diff --git a/Documentation/android.txt b/Documentation/android.txt index 72a62afdf2025..e1e41884895d5 100644 --- a/Documentation/android.txt +++ b/Documentation/android.txt @@ -14,6 +14,12 @@ CONTENTS: 1.3 Recommended enabled config options 2. Contact +0. Getting sources: +----------------- + +git clone --reference /path/to/linux-git/for/speedup/ git://android.git.kernel.org/kernel/msm.git +git checkout -b android-msm-2.6.29 origin/android-msm-2.6.29 + 1. Android ========== @@ -26,6 +32,7 @@ To see a working defconfig look at msm_defconfig or goldfish_defconfig which can be found at http://android.git.kernel.org in kernel/common.git and kernel/msm.git +msm_defconfig should work on qualcomm reference design, HTC Magic and G1/ADP1. 1.1 Required enabled config options ----------------------------------- @@ -114,6 +121,23 @@ SERIAL_CORE SERIAL_CORE_CONSOLE +Board code names +---------------- + +board-halibut - Qualcomm SURF 7201A +board-sapphire - HTC Magic +board-trout - HTC Dream / T-Mobile G1 / Android ADP1 + +Booting your kernel +------------------- + +hold down camera and red button to boot into rainbow screen. Then + +./fastboot boot linux-msm/arch/arm/boot/zImage ramdisk.img + +Machine will freeze at rainbow screen for a while, be +patient. ramdisk.img is required. + 2. Contact ========== website: http://android.git.kernel.org diff --git a/Documentation/arm/msm/avs.txt b/Documentation/arm/msm/avs.txt new file mode 100644 index 0000000000000..c22feec8a13d1 --- /dev/null +++ b/Documentation/arm/msm/avs.txt @@ -0,0 +1,185 @@ +Introduction +============ + +Adaptive Voltage Scaling (AVS) for ARCH_MSM_SCORPION + +The AVS driver adjusts the CPU voltage based on hardware feedback. Using +hardware feedback AVS is able to achieve lower voltages than the equivalent +static voltage scaling (SVS) voltage. + +The Scorpion architecture includes three ring oscillators for AVS. The +ring oscillators provide real time feedback about the voltage requirements +for the current operating conditions. The hardware can sense when the voltage +can be lowered and needs to be raised. + +The AVS software keeps track of the current operating conditions. Periodically +AVS queries the hardware. Every query AVS updates a table of required voltage +indexed by operating conditions, CPU frequency and temperature. + + +Hardware description +==================== + +AVS HW is specific to the Scorpion CPU implementation of ARMv7. The AVS HW +includes three ring oscillators. Each is located near a different +subsystem : CPU, VFP, and L2 cache. For the VFP measurement to be useful, +the VFP needs to execute. + +AVS HW is controlled through ARM CP15 registers + +AVSSCR - AVS Status and Control register +op1 = 7, CRn = c15, CRm = c1, op2 = 7 + +AVSDSCR - AVS Delay Synthesizer and Control and Status register +op1 = 7, CRn = c15, CRm = c0, op2 = 6 + +TSSCR - Temperature Sensor Control and Status register +op1 = 7, CRn = c15, CRm = c1, op2 = 0 + + +Software description +==================== + +AVS adaptively adjusts the CPU voltage for current operating conditions. It +maintains a table of operating voltages indexed by CPU frequency and +relative temperature. + +AVS is notified before and after the frequency change. AVS uses this +information to correct the voltage and correctly maintain the operating voltage +table. + +AVS manages the voltage in a background work queue. Every 50ms, AVS checks if +the HW recommends a voltage decrease or increase. The voltage table is updated +and the voltage is changed. + +AVS is designed to never underestimate the required operating voltage. +Several fail safes are implemented to ensure the minimum operating +voltage is maintained. + +1. The AVS HW is tuned to overestimate minimum voltage. This +overestimate provides extra operating margin. +2. Initial operating conditions are chosen to be more conservative +than equivalent static operating conditions. This ensures we always +approach the optimal voltage from the same direction. +3. Voltage is adjusted in 25mV increments. This adjustment is less than +the margin built into the AVS HW. This ensures we do not undershoot the +voltage. +4. Although not expected, if the HW requests an increase in voltage for a +single operating frequency, the voltage is increased in every frequency for +that temperature. This allows us to retry approaching the operating minimum. + +If the AVS circuitry requests a voltage increase at the maximum operating +voltage, the request is noted in the kernel log, but the request is ignored. +This condition is never expected to happen. + +Design +====== + +Reduce CPU operating voltage +Never allow CPU voltage to be less than required for proper operation +Immediate voltage changes as required for frequency changes. +Periodic management of CPU voltage +Minimal CPU overhead + + +The HW design team designed the AVS ring oscillator configuration to ensure +a proper operating voltage margin, while safely allowing reduction in CPU +operating voltage. This is implemented by the AVS delay synthesizer +configuration magic number. + +AVS is logically a superset of SVS. Therefore, AVS is implemented as an add-on +to the Static Voltage Scaling driver. + +AVS manages the CPU voltage exclusively. + +When the frequency changes AVS is notified before and after the frequency +change. This allows AVS to increase the voltage before the operating voltage +is too low. It allows AVS to drop the voltage as soon as the frequency +transition is complete. Finally it allows the AVS background processing to +be aware that the operating conditions are not stable. + +AVS manages the voltage in a background work queue. The design uses a +deferrable delayed work queue to schedule background work. The deferrable +queue was chosen to minimize CPU wakeups. This queue type will not wake the +CPU from idle, but will defer until the CPU is woken. + + +Power Management +================ + +AVS is part of the power management architecture for Scorpion. AVS manages +CPU supply voltage. + +AVS is aware of CPU frequency changes. These changes are initiated by +CPUFREQ, WFI, idle power collapse and suspend. + +AVS CP15 registers are preserved through power collapse. + + +SMP/multi-core +============== + +ARCH_SM_SCORPION is not a multicore architecture. It is difficult to +anticipate the changes in HW and SW required to support SMP. + +Security +======== + +None + +Performance +=========== + +None + +Interface +========= + +There is no general purpose interface to AVS. The sole client is SVS +(acpuclock-8x50.c). AVS is initialized and torn down by the +avs_init(...) and avs_exit(...) functions. AVS is notified of CPU +frequency changes by avs_adjust_freq(...). + +Driver parameters +================= + +None + +Config options +============== + +MSM_CPU_AVS enables the driver. + +Dependencies +============ + +AVS is built on top of the Static Voltage Scaling driver (SVS) + +Once AVS is initialized. AVS assumes it is the only process changing the +supply voltage. Other coprocessors must not change the Scorpion voltage. +The bootloader must not change the Scorpion Voltage when warm booting from +power collapse. + +User space utilities +==================== + +None + +Other +===== + +None + +Known issues +============ + +None + +To do +===== + +AVS needs to support future Scorpion chipsets. The implementation will be +parameterized, using board files, as new chipset support is added. + + + diff --git a/Documentation/arm/msm/boot.txt b/Documentation/arm/msm/boot.txt new file mode 100644 index 0000000000000..1a41cd5320207 --- /dev/null +++ b/Documentation/arm/msm/boot.txt @@ -0,0 +1,23 @@ +Introduction +============= +The power management integrated circuit (PMIC) records the reason the +Application processor was powered on in Shared Memory. +The hardware and software used is the shared memory interface. This document +is not for the purpose of describing this interface, but to identify the +possible values for this data item. + +Description +=========== +Shared memory item (SMEM_POWER_ON_STATUS_INFO) is read to get access to +this data. The table below identifies the possible values stored. + +power_on_status values set by the PMIC for power on event: +---------------------------------------------------------- +0x01 -- keyboard power on +0x02 -- RTC alarm +0x04 -- cable power on +0x08 -- SMPL +0x10 -- Watch Dog timeout +0x20 -- USB charger +0x40 -- Wall charger +0xFF -- error reading power_on_status value diff --git a/Documentation/arm/msm/emulate_domain_manager.txt b/Documentation/arm/msm/emulate_domain_manager.txt new file mode 100644 index 0000000000000..97a25663df64a --- /dev/null +++ b/Documentation/arm/msm/emulate_domain_manager.txt @@ -0,0 +1,282 @@ +Copyright (c) 2009, Code Aurora Forum. All rights reserved. + +Redistribution and use in source form and compiled forms (SGML, HTML, PDF, +PostScript, RTF and so forth) with or without modification, are permitted +provided that the following conditions are met: + +Redistributions in source form must retain the above copyright notice, this +list of conditions and the following disclaimer as the first lines of this +file unmodified. + +Redistributions in compiled form (transformed to other DTDs, converted to +PDF, PostScript, RTF and other formats) must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS DOCUMENTATION IS PROVIDED BY THE CODE AURORA FORUM "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +AND NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD +DOCUMENTATION PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS DOCUMENTATION, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Introduction +============ + +8x50 chipset requires the ability to disable HW domain manager function. + +The ARM MMU architecture has a feature known as domain manager mode. +Briefly each page table, section, or supersection is assigned a domain. +Each domain can be globally configured to NoAccess, Client, or Manager +mode. These global configurations allow the access permissions of the +entire domain to be changed simultaneously. + +The domain manger emulation is required to fix a HW problem on the 8x50 +chipset. The problem is simple to repair except when domain manager mode +is enabled. The emulation allows the problem to be completely resolved. + + +Hardware description +==================== + +When domain manager mode is enabled on a specific domain, the MMU +hardware ignores the access permission bits and the execute never bit. All +accesses, to memory in the domain, are granted full read, write, +execute permissions. + +The mode of each domain is controlled by a field in the cp15 dacr register. +Each domain can be globally configured to NoAccess, Client, or Manager mode. + +See: ARMv7 Architecture Reference Manual + + +Software description +==================== + +In order to disable domain manager mode the equivalent HW functionality must +be emulated in SW. Any attempts to enable domain manager mode, must be +intercepted. + +Because domain manager mode is not enabled, permissions for the +associated domain will remain restricted. Permission faults will be generated. +The permission faults will be intercepted. The faulted pages/sections will +be modified to grant full access and execute permissions. + +The modified page tables must be restored when exiting domain manager mode. + + +Design +====== + +Design Goals: + +Disable Domain Manager Mode +Exact SW emulation of Domain Manager Mode +Minimal Kernel changes +Minimal Security Risk + +Design Decisions: + +Detect kernel page table modifications on restore +Direct ARMv7 HW MMU table manipulation +Restore emulation modified MMU entries on context switch +No need to restore MMU entries for MMU entry copy operations +Invalidate TLB entries on modification +Store Domain Manager bits in memory +8 entry MMU entry cache +Use spin_lock_irqsave to protect domain manipulation +Assume no split MMU table + +Design Discussion: + +Detect kernel page table modifications on restore - +When restoring original page/section permission faults, the submitted design +verifies the MMU entry has not been modified. The kernel modifies MMU +entries for the following purposes : create a memory mapping, release a +memory mapping, add permissions during a permission fault, and map a page +during a translation fault. The submitted design works with the listed +scenarios. The translation fault and permission faults simply do not happen on +relevant entries (valid entries with full access permissions). The alternative +would be to hook every MMU table modification. The alternative greatly +increases complexity and code maintenance issues. + +Direct ARMv7 HW MMU table manipulation - +The natural choice would be to use the kernel provided mechanism to manipulate +MMU page table entries. The ARM MMU interface is described in pgtable.h. +This interface is complicated by the Linux implementation. The level 1 pgd +entries are treated and manipulated as entry pairs. The level 2 entries are +shadowed and cloned. The compromise was chosen to actually use the ARMv7 HW +registers to walk and modify the MMU table entries. The choice limits the +usage of this implementation to ARMv7 and similar ARM MMU architectures. Since +this implementation is targeted at fixing an issue in 8x50 ARMv7, the choice is +logical. The HW manipulation is in distinct low level functions. These could +easily be replaced or generalized to support other architectures as necessary. + +Restore emulation modified MMU entries on context switch - +This additional hook was added to minimize performance impact. By guaranteeing +the ASID will not change during the emulation, the emulation may invalidate each +entry by MVA & ASID. Only the affected page table entries will be removed from +the TLB cache. The performance cost of the invalidate on context switch is near +zero. Typically on context switch the domain mode would also change, forcing a +complete restore of all modified MMU entries. The alternative would be to +invalidate the entire TLB every time a table entry is restored. + +No need to restore MMU entries for copy operations - +Operations which copy MMU entries are relatively rare in the kernel. Because +we modify the level 2 pte entries directly in hardware, the Linux shadow copies +are left untouched. The kernel treats the shadow copies as the primary pte +entry. Any pte copy operations would be unaffected by the HW modification. +On translation section fault, pgd entries are copied from the kernel master +page table to the current thread page table. Since we restore MMU entries on +context switch, we guarantee the master table will not contain modifications, +while faulting on a process local entry. Other read, modify write operations +occur during permission fault handling. Since we open permission on modified +entries, these do not need to be restored, because we guarantee these +permission fault operations will not happen. + +Invalidate TLB entries on modification - +No real choice here. This is more of a design requirement. On permission +fault, the MMU entry with restricted permissions will be in the TLB. To open +access permissions, the TLB entry must be invalidated. Otherwise the access +will permission fault again. Upon restoring original MMU entries, the TLB +must be invalidated to restrict memory access. + +Store Domain Manager bits in memory - +There was only one alternative here. 2.6.29 kernel only uses 3 of 16 +possible domains. Additional bits in dacr could be used to store the +manager bits. This would allow faster access to the manager bits. +Overall this would reduce any performance impact. The performance +needs did not seem to justify the added weirdness. + +8 entry MMU entry cache- +The size of the modified MMU entry cache is somewhat arbitrary. The thought +process is that typically, a thread is using two pointers to perform a copy +operation. In this case only 2 entries would be required. One could imagine +a more complicated operation, a masked copy for instance, which would require +more pointers. 8 pointer seemed to be large enough to minimize risk of +permission fault thrashing. The disadvantage of a larger cache would simply +be a longer list of entries to restore. + +Use spin_lock_irqsave to protect domain manipulation - +The obvious choice. + +Assume no split MMU table - +This same assumption is documented in cpu_v7_switch_mm. + + +Power Management +================ + +Not affected. + + +SMP/multi-core +============== + +SMP/multicore not supported. This is intended as a 8x50 workaround. + + +Security +======== + +MMU page/section permissions must be manipulated correctly to emulate domain +manager mode. If page permission are left in full access mode, any process +can read associated memory. + + +Performance +=========== + +Performance should be impacted only minimally. When emulating domain manager +mode, there is overhead added to MMU table/context switches, set_domain() +calls, data aborts, and prefetch aborts. + +Normally the kernel operates with domain != DOMAIN_MANAGER. In this case the +overhead is minimal. An additional check is required to see if domain manager +mode is on. This minimal code is added to each of emulation entry points : +set, data abort, prefetch abort, and MMU table/context switch. + +Initial accesses to a MMU protected page/section will generate a permission +fault. The page will be manipulated to grant full access permissions and +the access will be retried. This will typically require 2-3 page table +walks. + +On a context switch, all modified MMU entries will be restored. On thread +resume, additional accesses will be treated as initial accesses. + + +Interface +========= + +The emulation does not have clients. It is hooked to the kernel through a +small list of functions. + +void emulate_domain_manager_set(u32 domain); +int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar); +int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar); +void emulate_domain_manager_switch_mm( + unsigned long pgd_phys, + struct mm_struct *mm, + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *)); + +emulate_domain_manager_set() is the set_domain handler. This replaces the +direct manipulation of CP15 dacr with a function call. This allows emulation +to prevent setting dacr manager bits. It also allows emulation to restore +page/section permissions when domain manger is disabled. + +emulate_domain_manager_data_abort() handles data aborts caused by domain +not being set in HW, and handles section/page manipulation. + +emulate_domain_manager_prefetch_abort() is the similar prefetch abort handler. + +emulate_domain_manager_switch_mm() handles MMU table and context switches. +This notifies the emulation that the MMU context is changing. Allowing the +emulation to restore page table entry permission before switching contexts. + + +Config options +============== + +This option is enable/disable by the EMULATE_DOMAIN_MANAGER_V7 option. + + +Dependencies +============ + +Implementation is for ARMv7, MMU, and !SMP. Targets solving issue for 8x50 +chipset. + + +User space utilities +==================== + +None + + +Other +===== + +Code is implemented in kernel/arch/arm/mm. + + +arch/arm/mm/emulate_domain_manager.c contains comments. No additional public +documentation available or planned. + + +Known issues +============ + +No intent to support SMP or non ARMv7 architectures + + +To do +===== + +None + diff --git a/Documentation/arm/msm/kgsl-sysfs.txt b/Documentation/arm/msm/kgsl-sysfs.txt new file mode 100644 index 0000000000000..c572312c43732 --- /dev/null +++ b/Documentation/arm/msm/kgsl-sysfs.txt @@ -0,0 +1,98 @@ +This document lists details for the device specific sysfs attributes +created by the KGSL GPU driver. + +- /sys/devices/platform/kgsl/vmalloc + The total amount of vmalloc()ed memory currently allocated by the driver + (in bytes) + +- /sys/devices/platform/kgsl/vmalloc_max + The maximum amount of vmalloc()ed memory allocated at any one + time by the driver since the system was booted (in bytes) + +- /sys/devices/platform/kgsl/coherent + The total amount of coherent DMA memory currently allocated by the driver + (in bytes) + +- /sys/devices/platform/kgsl/coherent_max + The maximum amount of coherent DMA memory allocated at any one + time by the driver since the system was booted (in bytes) + + +- /sys/devices/platform/kgsl/histogram + A histogram of the sizes of vmalloc allocations by the driver + since the system was booted. The allocations are grouped by the order + of the allocation size in pages. For example, order 0 are 1 page + allocations, order 1 are 2 page allocations, order 2 are 4 page allocations, + and so forth, up to order 16 (32768) pages. + +- /sys/devices/platform/kgsl/proc + This directory contains individual entries for each active rendering + process. Rendering instances are created for each unique process that + opens the GPU devices, and are named for the id of the creating process. + In the driver, memory allocations are owned by the process that allocates + them, and outstanding memory is garbage collected when the process closes + the device. + + - /sys/devices/platform/kgsl/proc/NN/vmalloc + The total amount of vmalloc memory currently allocated by the process + (in bytes) + + - /sys/devices/platform/kgsl/proc/NN/vmalloc_max + The maximum amount of vmalloc memory allocated at any one + time by the process since it was created (in bytes) + + - /sys/devices/platform/kgsl/proc/NN/exmem + The total amount of external memory devices currently mapped by the process + (in bytes). This includes PMEM, ASHMEM and external memory pointers from + userspace. + + - /sys/devices/platform/kgsl/proc/NN/exmem_max + The maximum amount of external memory devices allocated at any one + time by the process since it was created (in bytes). This includes PMEM, + ASHMEM and external memory pointers from userspace. + + - /sys/devices/platform/kgsl/proc/NN/flushes + The total number of cache flushes performed by this process since it + was created. + +- /sys/devices/platform/kgsl/pagetables + This directory contains individual entries for each active pagetable. + There will always be a global pagetable with ID 0. If per-process + pagetables are not enabled, pagetable ID 0 will also be the default + pagetable for all processes. If per-process pagetables are enabled, + there will be an entry for each pagetable, named after the ID of the + process that created it. + + - /sys/devices/platform/kgsl/pagetables/NN/entries + The number of concurrent entries mapped in the GPU MMU. + + - /sys/devices/platform/kgsl/pagetables/NN/mapped + The number of bytes currently mapped in the GPU MMU. + + - /sys/devices/platform/kgsl/pagetables/NN/va_range + The virtual address size of the MMU pagetable (in bytes). + + - /sys/devices/platform/kgsl/pagetables/NN/max_mapped + The maximum number of bytes concurrently mapped in the GPU MMU since + the pagetable was created. + + - /sys/devices/platform/kgsl/pagetables/NN/max_entries + The maximum number of entries concurrently mapped in the GPU MMU since + the pagetable was created. + +- /sys/devices/platform/kgsl/msm_kgsl/ + Each individual GPU device (2D or 3D) will have its own device node in + this directory. All platforms will have kgsl-3d0 (3D device), some + devices may have 1 2D device (kgsl-2d0) and others might add a second 2D + device (kgsl-2d1). + + - /sys/devices/platform/kgsl/msm_kgsl/kgsl-XXX/pwrnap + Controls the system ability to nap (lightly sleep between frames). 1 + indicates napping is enabled, 0 indicates it is disabled. Write a 1 or + a 0 to the file to control napping. + + - /sys/devices/platform/kgsl/msm_kgsl/kgsl-XXX/gpuclk + Shows the last active requested speed of the GPU clock in HZ, does not + actually measure the current clock rate. Write a clock speed to the file + corresponding to a supported platform power level to change to that power + level. The bandwidth vote will also be adjusted. diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index b37d805e19ead..4c371b7d76c12 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -29,6 +29,7 @@ Contents: 2.4 Ondemand 2.5 Conservative 2.6 Interactive +2.7 SmartassV2 3. The Governor Interface in the CPUfreq Core @@ -201,21 +202,78 @@ idle. When the cpu comes out of idle, a timer is configured to fire within 1-2 ticks. If the cpu is very busy between exiting idle and when the timer fires then we assume the cpu is underpowered and ramp to MAX speed. - + If the cpu was not sufficiently busy to immediately ramp to MAX speed, then governor evaluates the cpu load since the last speed adjustment, -choosing th highest value between that longer-term load or the +choosing the highest value between that longer-term load or the short-term load since idle exit to determine the cpu speed to ramp to. -The tuneable value for this governor are: +The tuneable values for this governor are: min_sample_time: The minimum amount of time to spend at the current frequency before ramping down. This is to ensure that the governor has seen enough historic cpu load data to determine the appropriate workload. Default is 80000 uS. -go_maxspeed_load: The CPU load at which to ramp to max speed. Default -is 85. +hispeed_freq: An intermediate "hi speed" at which to initially ramp +when CPU load hits the value specified in go_hispeed_load. If load +stays high for the amount of time specified in above_hispeed_delay, +then speed may be bumped higher. Default is maximum speed. + +go_hispeed_load: The CPU load at which to ramp to the intermediate "hi +speed". Default is 85%. + +above_hispeed_delay: Once speed is set to hispeed_freq, wait for this +long before bumping speed higher in response to continued high load. +Default is 20000 uS. + +timer_rate: Sample rate for reevaluating cpu load when the system is +not idle. Default is 20000 uS. + +input_boost: If non-zero, boost speed of all CPUs to hispeed_freq on +touchscreen activity. Default is 0. + +boost: If non-zero, immediately boost speed of all CPUs to at least +hispeed_freq until zero is written to this attribute. If zero, allow +CPU speeds to drop below hispeed_freq according to load as usual. + +boostpulse: Immediately boost speed of all CPUs to hispeed_freq for +min_sample_time, after which speeds are allowed to drop below +hispeed_freq according to load as usual. + + +2.7 SmartassV2 +--------------- + +The CPUfreq governor "smartassV2", like other governors, aims to balance +performance vs battery life by using low frequencies when load is low and +ramping the frequency when necessary, fast enough to ensure responsiveness. + +The implementation of the governor is roughtly based on the idea of interactive. +The idle loop is used to track when the CPU has idle cycles. The idle loop will +set a relatively high rate timer to sample the load when appropriate, the timer +will measure the load since it was set and schedule a work queue task to do the +actual frequency change when necessary. + +The most important tunable is the "ideal" frequency: this governor will aim +for this frequency, in the sense that it will ramp towards this frequency much +more aggresively than beyond it - both when ramping up from below this frequency +and when ramping down from above this frequency. Still, note, that when load is +low enough the governor should choose the lowest available frequency regardless +of the ideal frequency and similarly when load is consistently high enough the +highest available frequency will be used. + +Smartass also tracks the state of the screen, and when screen is off (a.k.a +sleep or suspended in the terms of this governor) a different ideal frequency +is used. This is the only difference between the screen on and screen off +states. Proper tuning of the awake_ideal_freq and sleep_ideal_freq should +allow both high responsiveness when screen is on and utilizing the low +frequency range when load is low, especially when screen is off. + +Finally, smartass is a highly customizable governor with almost everything +tweakable through the sysfs. For a detailed explaination of each tunable, +please see the inline comments at the begging of the code (smartass2.c). + 3. The Governor Interface in the CPUfreq Core diff --git a/Documentation/flexible-arrays.txt b/Documentation/flexible-arrays.txt index cb8a3a00cc926..df904aec99044 100644 --- a/Documentation/flexible-arrays.txt +++ b/Documentation/flexible-arrays.txt @@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before entering atomic context, using: int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int end, gfp_t flags); + unsigned int nr_elements, gfp_t flags); This function will ensure that memory for the elements indexed in the range -defined by start and end has been allocated. Thereafter, a +defined by start and nr_elements has been allocated. Thereafter, a flex_array_put() call on an element in that range is guaranteed not to block. diff --git a/Documentation/genlock.txt b/Documentation/genlock.txt new file mode 100644 index 0000000000000..cd8261467748a --- /dev/null +++ b/Documentation/genlock.txt @@ -0,0 +1,167 @@ +Introduction + +'genlock' is an in-kernel API and optional userspace interface for a generic +cross-process locking mechanism. The API is designed for situations where +multiple user space processes and/or kernel drivers need to coordinate access +to a shared resource, such as a graphics buffer. The API was designed with +graphics buffers in mind, but is sufficiently generic to allow it to be +independently used with different types of resources. The chief advantage +of genlock over other cross-process locking mechanisms is that the resources +can be accessed by both userspace and kernel drivers which allows resources +to be locked or unlocked by asynchronous events in the kernel without the +intervention of user space. + +As an example, consider a graphics buffer that is shared between a rendering +application and a compositing window manager. The application renders into a +buffer. That buffer is reused by the compositing window manager as a texture. +To avoid corruption, access to the buffer needs to be restricted so that one +is not drawing on the surface while the other is reading. Locks can be +explicitly added between the rendering stages in the processes, but explicit +locks require that the application wait for rendering and purposely release the +lock. An implicit release triggered by an asynchronous event from the GPU +kernel driver, however, will let execution continue without requiring the +intercession of user space. + +SW Goals + +The genlock API implements exclusive write locks and shared read locks meaning +that there can only be one writer at a time, but multiple readers. Processes +that are unable to acquire a lock can be optionally blocked until the resource +becomes available. + +Locks are shared between processes. Each process will have its own private +instance for a lock known as a handle. Handles can be shared between user +space and kernel space to allow a kernel driver to unlock or lock a buffer +on behalf of a user process. + +Locks within a process using a single genlock handle follow the same rules for +exclusive write locks with multiple readers. Genlock cannot provide deadlock +protection because the same handle can be used simultaneously by a producer and +consumer. In practice in the event that the client creates a deadlock an error +will still be generated when the timeout expires. + +Kernel API + +Access to the genlock API can either be via the in-kernel API or via an +optional character device (/dev/genlock). The character device is primarily +to be used for legacy resource sharing APIs that cannot be easily changed. +New resource sharing APIs from this point should implement a scheme specific +wrapper for locking. + +To create or attach to an existing lock, a process or kernel driver must first +create a handle. Each handle is linked to a single lock at any time. An entityi +may have multiple handles, each associated with a different lock. Once a handle +has been created, the owner may create a new lock or attach an existing lock +that has been exported from a different handle. + +Once the handle has a lock attached, the owning process may attempt to lock the +buffer for read or write. Write locks are exclusive, meaning that only one +process may acquire it at any given time. Read locks are shared, meaning that +multiple readers can hold the lock at the same time. Attempts to acquire a read +lock with a writer active or a write lock with one or more readers or writers +active will typically cause the process to block until the lock is acquired. +When the lock is released, all waiting processes will be woken up. Ownership +of the lock is reference counted, meaning that any one owner can "lock" +multiple times. The lock will only be released from the owner when all the +references to the lock are released via unlock. + +The owner of a write lock may atomically convert the lock into a read lock +(which will wake up other processes waiting for a read lock) without first +releasing the lock. The owner would simply issue a new request for a read lock. +However, the owner of a read lock cannot convert it into a write lock in the +same manner. To switch from a read lock to a write lock, the owner must +release the lock and then try to reacquire it. + +These are the in-kernel API calls that drivers can use to create and +manipulate handles and locks. Handles can either be created and managed +completely inside of kernel space, or shared from user space via a file +descriptor. + +* struct genlock_handle *genlock_get_handle(void) +Create a new handle. + +* struct genlock_handle * genlock_get_handle_fd(int fd) +Given a valid file descriptor, return the handle associated with that +descriptor. + +* void genlock_put_handle(struct genlock_handle *) +Release a handle. + +* struct genlock * genlock_create_lock(struct genlock_handle *) +Create a new lock and attach it to the handle. Once a lock is attached to a +handle it stays attached until the handle is destroyed. + +* struct genlock * genlock_attach_lock(struct genlock_handle *handle, int fd) +Given a valid file descriptor, get the lock associated with it and attach it to +the handle. + +* int genlock_lock(struct genlock_handle *, int op, int flags, u32 timeout) +Lock or unlock the lock attached to the handle. A zero timeout value will +be treated just like if the GENOCK_NOBLOCK flag is passed; if the lock +can be acquired without blocking then do so otherwise return -EAGAIN. +Function returns -ETIMEDOUT if the timeout expired or 0 if the lock was +acquired. + +* int genlock_wait(struct genloc_handle *, u32 timeout) +Wait for a lock held by the handle to go to the unlocked state. A non-zero +timeout value must be passed. Returns -ETIMEDOUT if the timeout expired or +0 if the lock is in an unlocked state. + +Character Device + +Opening an instance to the /dev/genlock character device will automatically +create a new handle. All ioctl functions with the exception of NEW and +RELEASE use the following parameter structure: + +struct genlock_lock { + int fd; /* Returned by EXPORT, used by ATTACH */ + int op; /* Used by LOCK */ + int flags; /* used by LOCK */ + u32 timeout; /* Used by LOCK and WAIT */ +} + +*GENLOCK_IOC_NEW +Create a new lock and attaches it to the handle. Returns -EINVAL if the handle +already has a lock attached (use GENLOCK_IOC_RELEASE to remove it). Returns +-ENOMEM if the memory for the lock can not be allocated. No data is passed +from the user for this ioctl. + +*GENLOCK_IOC_EXPORT +Export the currently attached lock to a file descriptor. The file descriptor +is returned in genlock_lock.fd. + +*GENLOCK_IOC_ATTACH +Attach an exported lock file descriptor to the current handle. Return -EINVAL +if the handle already has a lock attached (use GENLOCK_IOC_RELEASE to remove +it). Pass the file descriptor in genlock_lock.fd. + +*GENLOCK_IOC_LOCK +Lock or unlock the attached lock. Pass the desired operation in +genlock_lock.op: + * GENLOCK_WRLOCK - write lock + * GENLOCK_RDLOCK - read lock + * GENLOCK_UNLOCK - unlock an existing lock + +Pass flags in genlock_lock.flags: + * GENLOCK_NOBLOCK - Do not block if the lock is already taken + * GENLOCK_WRITE_TO_READ - Convert a write lock that the handle owns to a read + lock. For instance graphics may hold a write lock + while rendering the back buffer then when swapping + convert the lock to a read lock to copy the front + buffer in the next frame for preserved buffers. + +Pass a timeout value in milliseconds in genlock_lock.timeout. +genlock_lock.flags and genlock_lock.timeout are not used for UNLOCK. +Returns -EINVAL if no lock is attached, -EAGAIN if the lock is taken and +NOBLOCK is specified or if the timeout value is zero, -ETIMEDOUT if the timeout +expires or 0 if the lock was successful. + +* GENLOCK_IOC_WAIT +Wait for the lock attached to the handle to be released (i.e. goes to unlock). +This is mainly used for a thread that needs to wait for a peer to release a +lock on the same shared handle. A non-zero timeout value in milliseconds is +passed in genlock_lock.timeout. Returns 0 when the lock has been released, +-EINVAL if a zero timeout is passed, or -ETIMEDOUT if the timeout expires. + +* GENLOCK_IOC_RELEASE +This ioctl has been deprecated. Do not use. diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt new file mode 100644 index 0000000000000..4627c4241ece6 --- /dev/null +++ b/Documentation/hid/uhid.txt @@ -0,0 +1,169 @@ + UHID - User-space I/O driver support for HID subsystem + ======================================================== + +The HID subsystem needs two kinds of drivers. In this document we call them: + + 1. The "HID I/O Driver" is the driver that performs raw data I/O to the + low-level device. Internally, they register an hid_ll_driver structure with + the HID core. They perform device setup, read raw data from the device and + push it into the HID subsystem and they provide a callback so the HID + subsystem can send data to the device. + + 2. The "HID Device Driver" is the driver that parses HID reports and reacts on + them. There are generic drivers like "generic-usb" and "generic-bluetooth" + which adhere to the HID specification and provide the standardizes features. + But there may be special drivers and quirks for each non-standard device out + there. Internally, they use the hid_driver structure. + +Historically, the USB stack was the first subsystem to provide an HID I/O +Driver. However, other standards like Bluetooth have adopted the HID specs and +may provide HID I/O Drivers, too. The UHID driver allows to implement HID I/O +Drivers in user-space and feed the data into the kernel HID-subsystem. + +This allows user-space to operate on the same level as USB-HID, Bluetooth-HID +and similar. It does not provide a way to write HID Device Drivers, though. Use +hidraw for this purpose. + +There is an example user-space application in ./samples/uhid/uhid-example.c + +The UHID API +------------ + +UHID is accessed through a character misc-device. The minor-number is allocated +dynamically so you need to rely on udev (or similar) to create the device node. +This is /dev/uhid by default. + +If a new device is detected by your HID I/O Driver and you want to register this +device with the HID subsystem, then you need to open /dev/uhid once for each +device you want to register. All further communication is done by read()'ing or +write()'ing "struct uhid_event" objects. Non-blocking operations are supported +by setting O_NONBLOCK. + +struct uhid_event { + __u32 type; + union { + struct uhid_create_req create; + struct uhid_data_req data; + ... + } u; +}; + +The "type" field contains the ID of the event. Depending on the ID different +payloads are sent. You must not split a single event across multiple read()'s or +multiple write()'s. A single event must always be sent as a whole. Furthermore, +only a single event can be sent per read() or write(). Pending data is ignored. +If you want to handle multiple events in a single syscall, then use vectored +I/O with readv()/writev(). + +The first thing you should do is sending an UHID_CREATE event. This will +register the device. UHID will respond with an UHID_START event. You can now +start sending data to and reading data from UHID. However, unless UHID sends the +UHID_OPEN event, the internally attached HID Device Driver has no user attached. +That is, you might put your device asleep unless you receive the UHID_OPEN +event. If you receive the UHID_OPEN event, you should start I/O. If the last +user closes the HID device, you will receive an UHID_CLOSE event. This may be +followed by an UHID_OPEN event again and so on. There is no need to perform +reference-counting in user-space. That is, you will never receive multiple +UHID_OPEN events without an UHID_CLOSE event. The HID subsystem performs +ref-counting for you. +You may decide to ignore UHID_OPEN/UHID_CLOSE, though. I/O is allowed even +though the device may have no users. + +If you want to send data to the HID subsystem, you send an HID_INPUT event with +your raw data payload. If the kernel wants to send data to the device, you will +read an UHID_OUTPUT or UHID_OUTPUT_EV event. + +If your device disconnects, you should send an UHID_DESTROY event. This will +unregister the device. You can now send UHID_CREATE again to register a new +device. +If you close() the fd, the device is automatically unregistered and destroyed +internally. + +write() +------- +write() allows you to modify the state of the device and feed input data into +the kernel. The following types are supported: UHID_CREATE, UHID_DESTROY and +UHID_INPUT. The kernel will parse the event immediately and if the event ID is +not supported, it will return -EOPNOTSUPP. If the payload is invalid, then +-EINVAL is returned, otherwise, the amount of data that was read is returned and +the request was handled successfully. + + UHID_CREATE: + This creates the internal HID device. No I/O is possible until you send this + event to the kernel. The payload is of type struct uhid_create_req and + contains information about your device. You can start I/O now. + + UHID_DESTROY: + This destroys the internal HID device. No further I/O will be accepted. There + may still be pending messages that you can receive with read() but no further + UHID_INPUT events can be sent to the kernel. + You can create a new device by sending UHID_CREATE again. There is no need to + reopen the character device. + + UHID_INPUT: + You must send UHID_CREATE before sending input to the kernel! This event + contains a data-payload. This is the raw data that you read from your device. + The kernel will parse the HID reports and react on it. + + UHID_FEATURE_ANSWER: + If you receive a UHID_FEATURE request you must answer with this request. You + must copy the "id" field from the request into the answer. Set the "err" field + to 0 if no error occured or to EIO if an I/O error occurred. + If "err" is 0 then you should fill the buffer of the answer with the results + of the feature request and set "size" correspondingly. + +read() +------ +read() will return a queued ouput report. These output reports can be of type +UHID_START, UHID_STOP, UHID_OPEN, UHID_CLOSE, UHID_OUTPUT or UHID_OUTPUT_EV. No +reaction is required to any of them but you should handle them according to your +needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads. + + UHID_START: + This is sent when the HID device is started. Consider this as an answer to + UHID_CREATE. This is always the first event that is sent. + + UHID_STOP: + This is sent when the HID device is stopped. Consider this as an answer to + UHID_DESTROY. + If the kernel HID device driver closes the device manually (that is, you + didn't send UHID_DESTROY) then you should consider this device closed and send + an UHID_DESTROY event. You may want to reregister your device, though. This is + always the last message that is sent to you unless you reopen the device with + UHID_CREATE. + + UHID_OPEN: + This is sent when the HID device is opened. That is, the data that the HID + device provides is read by some other process. You may ignore this event but + it is useful for power-management. As long as you haven't received this event + there is actually no other process that reads your data so there is no need to + send UHID_INPUT events to the kernel. + + UHID_CLOSE: + This is sent when there are no more processes which read the HID data. It is + the counterpart of UHID_OPEN and you may as well ignore this event. + + UHID_OUTPUT: + This is sent if the HID device driver wants to send raw data to the I/O + device. You should read the payload and forward it to the device. The payload + is of type "struct uhid_data_req". + This may be received even though you haven't received UHID_OPEN, yet. + + UHID_OUTPUT_EV: + Same as UHID_OUTPUT but this contains a "struct input_event" as payload. This + is called for force-feedback, LED or similar events which are received through + an input device by the HID subsystem. You should convert this into raw reports + and send them to your device similar to events of type UHID_OUTPUT. + + UHID_FEATURE: + This event is sent if the kernel driver wants to perform a feature request as + described in the HID specs. The report-type and report-number are available in + the payload. + The kernel serializes feature requests so there will never be two in parallel. + However, if you fail to respond with a UHID_FEATURE_ANSWER in a time-span of 5 + seconds, then the requests will be dropped and a new one might be sent. + Therefore, the payload also contains an "id" field that identifies every + request. + +Document by: + David Herrmann diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index 87da405a85979..9edb75d8c9b94 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices @@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) (...) i2c_adap = i2c_get_adapter(2); memset(&i2c_info, 0, sizeof(struct i2c_board_info)); - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, normal_i2c, NULL); i2c_put_adapter(i2c_adap); diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients index 5ebf5af1d7160..5aa53374ea2a8 100644 --- a/Documentation/i2c/writing-clients +++ b/Documentation/i2c/writing-clients @@ -38,7 +38,7 @@ static struct i2c_driver foo_driver = { .name = "foo", }, - .id_table = foo_ids, + .id_table = foo_idtable, .probe = foo_probe, .remove = foo_remove, /* if device autodetection is needed: */ diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 4a990317b84a7..376538c984cef 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -196,3 +196,8 @@ to be included in the databases, separated by blank space. E.g.: To get all available archs you can also specify all. E.g.: $ make ALLSOURCE_ARCHS=all tags + +KBUILD_ENABLE_EXTRA_GCC_CHECKS +-------------------------------------------------- +If enabled over the make command line with "W=1", it turns on additional +gcc -W... options for more extensive build-time checking. diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 86e3cd0d26a08..d0f4d1d9717b7 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -499,6 +499,18 @@ more details, with real examples. gcc >= 3.00. For gcc < 3.00, -malign-functions=4 is used. Note: cc-option-align uses KBUILD_CFLAGS for $(CC) options + cc-disable-warning + cc-disable-warning checks if gcc supports a given warning and returns + the commandline switch to disable it. This special function is needed, + because gcc 4.4 and later accept any unknown -Wno-* option and only + warn about it if there is another warning in the source file. + + Example: + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) + + In the above example, -Wno-unused-but-set-variable will be added to + KBUILD_CFLAGS only if gcc really accepts it. + cc-version cc-version returns a numerical version of the $(CC) compiler version. The format is where both are two digits. So for example diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f4a04c0c7edca..89099cab0a82c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2317,6 +2317,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. 1: Fast pin select (default) 2: ATC IRMode + snddev_icodec.msm_codec_i2s_slave_mode= [ARM-MSM] + 1, codec is I2S master + 0, MSM is I2S master (default) + softlockup_panic= [KNL] Should the soft-lockup detector generate panics. diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt new file mode 100644 index 0000000000000..c10d956018f99 --- /dev/null +++ b/Documentation/scheduler/sched-BFS.txt @@ -0,0 +1,347 @@ +BFS - The Brain Fuck Scheduler by Con Kolivas. + +Goals. + +The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to +completely do away with the complex designs of the past for the cpu process +scheduler and instead implement one that is very simple in basic design. +The main focus of BFS is to achieve excellent desktop interactivity and +responsiveness without heuristics and tuning knobs that are difficult to +understand, impossible to model and predict the effect of, and when tuned to +one workload cause massive detriment to another. + + +Design summary. + +BFS is best described as a single runqueue, O(n) lookup, earliest effective +virtual deadline first design, loosely based on EEVDF (earliest eligible virtual +deadline first) and my previous Staircase Deadline scheduler. Each component +shall be described in order to understand the significance of, and reasoning for +it. The codebase when the first stable version was released was approximately +9000 lines less code than the existing mainline linux kernel scheduler (in +2.6.31). This does not even take into account the removal of documentation and +the cgroups code that is not used. + +Design reasoning. + +The single runqueue refers to the queued but not running processes for the +entire system, regardless of the number of CPUs. The reason for going back to +a single runqueue design is that once multiple runqueues are introduced, +per-CPU or otherwise, there will be complex interactions as each runqueue will +be responsible for the scheduling latency and fairness of the tasks only on its +own runqueue, and to achieve fairness and low latency across multiple CPUs, any +advantage in throughput of having CPU local tasks causes other disadvantages. +This is due to requiring a very complex balancing system to at best achieve some +semblance of fairness across CPUs and can only maintain relatively low latency +for tasks bound to the same CPUs, not across them. To increase said fairness +and latency across CPUs, the advantage of local runqueue locking, which makes +for better scalability, is lost due to having to grab multiple locks. + +A significant feature of BFS is that all accounting is done purely based on CPU +used and nowhere is sleep time used in any way to determine entitlement or +interactivity. Interactivity "estimators" that use some kind of sleep/run +algorithm are doomed to fail to detect all interactive tasks, and to falsely tag +tasks that aren't interactive as being so. The reason for this is that it is +close to impossible to determine that when a task is sleeping, whether it is +doing it voluntarily, as in a userspace application waiting for input in the +form of a mouse click or otherwise, or involuntarily, because it is waiting for +another thread, process, I/O, kernel activity or whatever. Thus, such an +estimator will introduce corner cases, and more heuristics will be required to +cope with those corner cases, introducing more corner cases and failed +interactivity detection and so on. Interactivity in BFS is built into the design +by virtue of the fact that tasks that are waking up have not used up their quota +of CPU time, and have earlier effective deadlines, thereby making it very likely +they will preempt any CPU bound task of equivalent nice level. See below for +more information on the virtual deadline mechanism. Even if they do not preempt +a running task, because the rr interval is guaranteed to have a bound upper +limit on how long a task will wait for, it will be scheduled within a timeframe +that will not cause visible interface jitter. + + +Design details. + +Task insertion. + +BFS inserts tasks into each relevant queue as an O(1) insertion into a double +linked list. On insertion, *every* running queue is checked to see if the newly +queued task can run on any idle queue, or preempt the lowest running task on the +system. This is how the cross-CPU scheduling of BFS achieves significantly lower +latency per extra CPU the system has. In this case the lookup is, in the worst +case scenario, O(n) where n is the number of CPUs on the system. + +Data protection. + +BFS has one single lock protecting the process local data of every task in the +global queue. Thus every insertion, removal and modification of task data in the +global runqueue needs to grab the global lock. However, once a task is taken by +a CPU, the CPU has its own local data copy of the running process' accounting +information which only that CPU accesses and modifies (such as during a +timer tick) thus allowing the accounting data to be updated lockless. Once a +CPU has taken a task to run, it removes it from the global queue. Thus the +global queue only ever has, at most, + + (number of tasks requesting cpu time) - (number of logical CPUs) + 1 + +tasks in the global queue. This value is relevant for the time taken to look up +tasks during scheduling. This will increase if many tasks with CPU affinity set +in their policy to limit which CPUs they're allowed to run on if they outnumber +the number of CPUs. The +1 is because when rescheduling a task, the CPU's +currently running task is put back on the queue. Lookup will be described after +the virtual deadline mechanism is explained. + +Virtual deadline. + +The key to achieving low latency, scheduling fairness, and "nice level" +distribution in BFS is entirely in the virtual deadline mechanism. The one +tunable in BFS is the rr_interval, or "round robin interval". This is the +maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) +tasks of the same nice level will be running for, or looking at it the other +way around, the longest duration two tasks of the same nice level will be +delayed for. When a task requests cpu time, it is given a quota (time_slice) +equal to the rr_interval and a virtual deadline. The virtual deadline is +offset from the current time in jiffies by this equation: + + jiffies + (prio_ratio * rr_interval) + +The prio_ratio is determined as a ratio compared to the baseline of nice -20 +and increases by 10% per nice level. The deadline is a virtual one only in that +no guarantee is placed that a task will actually be scheduled by this time, but +it is used to compare which task should go next. There are three components to +how a task is next chosen. First is time_slice expiration. If a task runs out +of its time_slice, it is descheduled, the time_slice is refilled, and the +deadline reset to that formula above. Second is sleep, where a task no longer +is requesting CPU for whatever reason. The time_slice and deadline are _not_ +adjusted in this case and are just carried over for when the task is next +scheduled. Third is preemption, and that is when a newly waking task is deemed +higher priority than a currently running task on any cpu by virtue of the fact +that it has an earlier virtual deadline than the currently running task. The +earlier deadline is the key to which task is next chosen for the first and +second cases. Once a task is descheduled, it is put back on the queue, and an +O(n) lookup of all queued-but-not-running tasks is done to determine which has +the earliest deadline and that task is chosen to receive CPU next. + +The CPU proportion of different nice tasks works out to be approximately the + + (prio_ratio difference)^2 + +The reason it is squared is that a task's deadline does not change while it is +running unless it runs out of time_slice. Thus, even if the time actually +passes the deadline of another task that is queued, it will not get CPU time +unless the current running task deschedules, and the time "base" (jiffies) is +constantly moving. + +Task lookup. + +BFS has 103 priority queues. 100 of these are dedicated to the static priority +of realtime tasks, and the remaining 3 are, in order of best to worst priority, +SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority +scheduling). When a task of these priorities is queued, a bitmap of running +priorities is set showing which of these priorities has tasks waiting for CPU +time. When a CPU is made to reschedule, the lookup for the next task to get +CPU time is performed in the following way: + +First the bitmap is checked to see what static priority tasks are queued. If +any realtime priorities are found, the corresponding queue is checked and the +first task listed there is taken (provided CPU affinity is suitable) and lookup +is complete. If the priority corresponds to a SCHED_ISO task, they are also +taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds +to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this +stage, every task in the runlist that corresponds to that priority is checked +to see which has the earliest set deadline, and (provided it has suitable CPU +affinity) it is taken off the runqueue and given the CPU. If a task has an +expired deadline, it is taken and the rest of the lookup aborted (as they are +chosen in FIFO order). + +Thus, the lookup is O(n) in the worst case only, where n is as described +earlier, as tasks may be chosen before the whole task list is looked over. + + +Scalability. + +The major limitations of BFS will be that of scalability, as the separate +runqueue designs will have less lock contention as the number of CPUs rises. +However they do not scale linearly even with separate runqueues as multiple +runqueues will need to be locked concurrently on such designs to be able to +achieve fair CPU balancing, to try and achieve some sort of nice-level fairness +across CPUs, and to achieve low enough latency for tasks on a busy CPU when +other CPUs would be more suited. BFS has the advantage that it requires no +balancing algorithm whatsoever, as balancing occurs by proxy simply because +all CPUs draw off the global runqueue, in priority and deadline order. Despite +the fact that scalability is _not_ the prime concern of BFS, it both shows very +good scalability to smaller numbers of CPUs and is likely a more scalable design +at these numbers of CPUs. + +It also has some very low overhead scalability features built into the design +when it has been deemed their overhead is so marginal that they're worth adding. +The first is the local copy of the running process' data to the CPU it's running +on to allow that data to be updated lockless where possible. Then there is +deference paid to the last CPU a task was running on, by trying that CPU first +when looking for an idle CPU to use the next time it's scheduled. Finally there +is the notion of "sticky" tasks that are flagged when they are involuntarily +descheduled, meaning they still want further CPU time. This sticky flag is +used to bias heavily against those tasks being scheduled on a different CPU +unless that CPU would be otherwise idle. When a cpu frequency governor is used +that scales with CPU load, such as ondemand, sticky tasks are not scheduled +on a different CPU at all, preferring instead to go idle. This means the CPU +they were bound to is more likely to increase its speed while the other CPU +will go idle, thus speeding up total task execution time and likely decreasing +power usage. This is the only scenario where BFS will allow a CPU to go idle +in preference to scheduling a task on the earliest available spare CPU. + +The real cost of migrating a task from one CPU to another is entirely dependant +on the cache footprint of the task, how cache intensive the task is, how long +it's been running on that CPU to take up the bulk of its cache, how big the CPU +cache is, how fast and how layered the CPU cache is, how fast a context switch +is... and so on. In other words, it's close to random in the real world where we +do more than just one sole workload. The only thing we can be sure of is that +it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and +utilising idle CPUs is more important than cache locality, and cache locality +only plays a part after that. + +When choosing an idle CPU for a waking task, the cache locality is determined +according to where the task last ran and then idle CPUs are ranked from best +to worst to choose the most suitable idle CPU based on cache locality, NUMA +node locality and hyperthread sibling business. They are chosen in the +following preference (if idle): + +* Same core, idle or busy cache, idle threads +* Other core, same cache, idle or busy cache, idle threads. +* Same node, other CPU, idle cache, idle threads. +* Same node, other CPU, busy cache, idle threads. +* Same core, busy threads. +* Other core, same cache, busy threads. +* Same node, other CPU, busy threads. +* Other node, other CPU, idle cache, idle threads. +* Other node, other CPU, busy cache, idle threads. +* Other node, other CPU, busy threads. + +This shows the SMT or "hyperthread" awareness in the design as well which will +choose a real idle core first before a logical SMT sibling which already has +tasks on the physical CPU. + +Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. +However this benchmarking was performed on an earlier design that was far less +scalable than the current one so it's hard to know how scalable it is in terms +of both CPUs (due to the global runqueue) and heavily loaded machines (due to +O(n) lookup) at this stage. Note that in terms of scalability, the number of +_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) +quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark +results are very promising indeed, without needing to tweak any knobs, features +or options. Benchmark contributions are most welcome. + + +Features + +As the initial prime target audience for BFS was the average desktop user, it +was designed to not need tweaking, tuning or have features set to obtain benefit +from it. Thus the number of knobs and features has been kept to an absolute +minimum and should not require extra user input for the vast majority of cases. +There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval +and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition +to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is +support for CGROUPS. The average user should neither need to know what these +are, nor should they need to be using them to have good desktop behaviour. + +rr_interval + +There is only one "scheduler" tunable, the round robin interval. This can be +accessed in + + /proc/sys/kernel/rr_interval + +The value is in milliseconds, and the default value is set to 6ms. Valid values +are from 1 to 1000. Decreasing the value will decrease latencies at the cost of +decreasing throughput, while increasing it will improve throughput, but at the +cost of worsening latencies. The accuracy of the rr interval is limited by HZ +resolution of the kernel configuration. Thus, the worst case latencies are +usually slightly higher than this actual value. BFS uses "dithering" to try and +minimise the effect the Hz limitation has. The default value of 6 is not an +arbitrary one. It is based on the fact that humans can detect jitter at +approximately 7ms, so aiming for much lower latencies is pointless under most +circumstances. It is worth noting this fact when comparing the latency +performance of BFS to other schedulers. Worst case latencies being higher than +7ms are far worse than average latencies not being in the microsecond range. +Experimentation has shown that rr intervals being increased up to 300 can +improve throughput but beyond that, scheduling noise from elsewhere prevents +further demonstrable throughput. + +Isochronous scheduling. + +Isochronous scheduling is a unique scheduling policy designed to provide +near-real-time performance to unprivileged (ie non-root) users without the +ability to starve the machine indefinitely. Isochronous tasks (which means +"same time") are set using, for example, the schedtool application like so: + + schedtool -I -e amarok + +This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works +is that it has a priority level between true realtime tasks and SCHED_NORMAL +which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, +if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval +rate). However if ISO tasks run for more than a tunable finite amount of time, +they are then demoted back to SCHED_NORMAL scheduling. This finite amount of +time is the percentage of _total CPU_ available across the machine, configurable +as a percentage in the following "resource handling" tunable (as opposed to a +scheduler tunable): + + /proc/sys/kernel/iso_cpu + +and is set to 70% by default. It is calculated over a rolling 5 second average +Because it is the total CPU available, it means that on a multi CPU machine, it +is possible to have an ISO task running as realtime scheduling indefinitely on +just one CPU, as the other CPUs will be available. Setting this to 100 is the +equivalent of giving all users SCHED_RR access and setting it to 0 removes the +ability to run any pseudo-realtime tasks. + +A feature of BFS is that it detects when an application tries to obtain a +realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the +appropriate privileges to use those policies. When it detects this, it will +give the task SCHED_ISO policy instead. Thus it is transparent to the user. +Because some applications constantly set their policy as well as their nice +level, there is potential for them to undo the override specified by the user +on the command line of setting the policy to SCHED_ISO. To counter this, once +a task has been set to SCHED_ISO policy, it needs superuser privileges to set +it back to SCHED_NORMAL. This will ensure the task remains ISO and all child +processes and threads will also inherit the ISO policy. + +Idleprio scheduling. + +Idleprio scheduling is a scheduling policy designed to give out CPU to a task +_only_ when the CPU would be otherwise idle. The idea behind this is to allow +ultra low priority tasks to be run in the background that have virtually no +effect on the foreground tasks. This is ideally suited to distributed computing +clients (like setiathome, folding, mprime etc) but can also be used to start +a video encode or so on without any slowdown of other tasks. To avoid this +policy from grabbing shared resources and holding them indefinitely, if it +detects a state where the task is waiting on I/O, the machine is about to +suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As +per the Isochronous task management, once a task has been scheduled as IDLEPRIO, +it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can +be set to start as SCHED_IDLEPRIO with the schedtool command like so: + + schedtool -D -e ./mprime + +Subtick accounting. + +It is surprisingly difficult to get accurate CPU accounting, and in many cases, +the accounting is done by simply determining what is happening at the precise +moment a timer tick fires off. This becomes increasingly inaccurate as the +timer tick frequency (HZ) is lowered. It is possible to create an application +which uses almost 100% CPU, yet by being descheduled at the right time, records +zero CPU usage. While the main problem with this is that there are possible +security implications, it is also difficult to determine how much CPU a task +really does use. BFS tries to use the sub-tick accounting from the TSC clock, +where possible, to determine real CPU usage. This is not entirely reliable, but +is far more likely to produce accurate CPU usage data than the existing designs +and will not show tasks as consuming no CPU usage when they actually are. Thus, +the amount of CPU reported as being used by BFS will more accurately represent +how much CPU the task itself is using (as is shown for example by the 'time' +application), so the reported values may be quite different to other schedulers. +Values reported as the 'load' are more prone to problems with this design, but +per process values are closer to real usage. When comparing throughput of BFS +to other designs, it is important to compare the actual completed work in terms +of total wall clock time taken and total work done, rather than the reported +"cpu usage". + + +Con Kolivas Tue, 5 Apr 2011 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 11d5ceda5bb06..f43587894b5fa 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -21,6 +21,7 @@ show up in /proc/sys/kernel: - acct - bootloader_type [ X86 only ] - bootloader_version [ X86 only ] +- boot_reason [ ARM only ] - callhome [ S390 only ] - auto_msgmni - core_pattern @@ -32,6 +33,7 @@ show up in /proc/sys/kernel: - domainname - hostname - hotplug +- iso_cpu - java-appletviewer [ binfmt_java, obsolete ] - java-interpreter [ binfmt_java, obsolete ] - kptr_restrict @@ -55,6 +57,7 @@ show up in /proc/sys/kernel: - randomize_va_space - real-root-dev ==> Documentation/initrd.txt - reboot-cmd [ SPARC only ] +- rr_interval - rtsig-max - rtsig-nr - sem @@ -126,6 +129,19 @@ Documentation/x86/boot.txt for additional information. ============================================================== +boot_reason: + +ARM -- reason for device boot + +A single bit will be set in the unsigned integer value to identify the +reason the device was booted / powered on. The value will be zero if this +feature is not supported on the ARM device being booted. + +See the power-on-status field definitions in +Documentation/arm/msm/boot.txt for Qualcomm's family of devices. + +============================================================== + callhome: Controls the kernel's callhome behavior in case of a kernel panic. @@ -255,6 +271,16 @@ Default value is "/sbin/hotplug". ============================================================== +iso_cpu: (BFS CPU scheduler only). + +This sets the percentage cpu that the unprivileged SCHED_ISO tasks can +run effectively at realtime priority, averaged over a rolling five +seconds over the -whole- system, meaning all cpus. + +Set to 70 (percent) by default. + +============================================================== + l2cr: (PPC only) This flag controls the L2 cache of G3 processor boards. If @@ -442,6 +468,20 @@ rebooting. ??? ============================================================== +rr_interval: (BFS CPU scheduler only) + +This is the smallest duration that any cpu process scheduling unit +will run for. Increasing this value can increase throughput of cpu +bound tasks substantially but at the expense of increased latencies +overall. Conversely decreasing it will decrease average and maximum +latencies but at the expense of throughput. This value is in +milliseconds and the default value chosen depends on the number of +cpus available at scheduler initialisation with a minimum of 6. + +Valid values are from 1-1000. + +============================================================== + rtsig-max & rtsig-nr: The file rtsig-max can be used to tune the maximum number diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 30289fab86ebb..c000e0dc0db3a 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -28,6 +28,7 @@ Currently, these files are in /proc/sys/vm: - dirty_writeback_centisecs - drop_caches - extfrag_threshold +- extra_free_kbytes - hugepages_treat_as_movable - hugetlb_shm_group - laptop_mode @@ -168,6 +169,21 @@ fragmentation index is <= extfrag_threshold. The default value is 500. ============================================================== +extra_free_kbytes + +This parameter tells the VM to keep extra free memory between the threshold +where background reclaim (kswapd) kicks in, and the threshold where direct +reclaim (by allocating processes) kicks in. + +This is useful for workloads that require low latency memory allocations +and have a bounded burstiness in memory allocations, for example a +realtime application that receives and transmits network traffic +(causing in-kernel memory allocations) with a maximum total message burst +size of 200MB may need 200MB of extra free memory to avoid direct reclaim +related latencies. + +============================================================== + hugepages_treat_as_movable This parameter is only useful when kernelcore= is specified at boot time to diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf index 612e7220fb295..37a02ce548417 100644 --- a/Documentation/usb/linux-cdc-acm.inf +++ b/Documentation/usb/linux-cdc-acm.inf @@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys [SourceDisksFiles] [SourceDisksNames] [DeviceList] -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 [DeviceList.NTamd64] -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 ;------------------------------------------------------------------------------ diff --git a/Documentation/usb/linux.inf b/Documentation/usb/linux.inf index 4dee95851224a..4ffa715b0ae88 100644 --- a/Documentation/usb/linux.inf +++ b/Documentation/usb/linux.inf @@ -18,15 +18,15 @@ DriverVer = 06/21/2006,6.0.6000.16384 ; Decoration for x86 architecture [LinuxDevices.NTx86] -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00 ; Decoration for x64 architecture [LinuxDevices.NTamd64] -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00 ; Decoration for ia64 architecture [LinuxDevices.NTia64] -%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00 +%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00 ;@@@ This is the common setting for setup [ControlFlags] diff --git a/MAINTAINERS b/MAINTAINERS index f1bc3dc6b3699..6ca300443de60 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5944,7 +5944,6 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -M: Chris Wright L: stable@kernel.org S: Maintained @@ -6252,6 +6251,13 @@ S: Maintained F: Documentation/filesystems/ufs.txt F: fs/ufs/ +UHID USERSPACE HID IO DRIVER: +M: David Herrmann +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/uhid.c +F: include/linux/uhid.h + ULTRA-WIDEBAND (UWB) SUBSYSTEM: L: linux-usb@vger.kernel.org S: Orphan diff --git a/Makefile b/Makefile index d6592b63c8cb6..0c6ab694aa687 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = +EXTRAVERSION = .8 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* @@ -102,6 +102,10 @@ ifeq ("$(origin O)", "command line") KBUILD_OUTPUT := $(O) endif +ifeq ("$(origin W)", "command line") + export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W) +endif + # That's our default target when none is given on the command line PHONY := _all _all: @@ -167,6 +171,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ ) +SUBARCH := arm + # Cross compiling and selecting different set of gcc/bin-utils # --------------------------------------------------------------------------- # @@ -189,7 +195,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile export KBUILD_BUILDHOST := $(SUBARCH) ARCH ?= $(SUBARCH) -CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) +CROSS_COMPILE ?= arm-eabi- # Architecture as present in compile.h UTS_MACHINE := $(ARCH) @@ -555,6 +561,11 @@ ifndef CONFIG_CC_STACKPROTECTOR KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) endif +# This warning generated too much noise in a regular build. +# Use make W=1 to enable this warning (see scripts/Makefile.build) +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) +KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized) + ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else @@ -600,7 +611,7 @@ CHECKFLAGS += $(NOSTDINC_FLAGS) KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) # disable pointer signed / unsigned warnings in gcc 4.0 -KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) +KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) @@ -1262,6 +1273,11 @@ help: @echo ' make O=dir [targets] Locate all output files in "dir", including .config' @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' + @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' + @echo ' 1: warnings which may be relevant and do not occur too often' + @echo ' 2: warnings which occur quite often but may still be relevant' + @echo ' 3: more obscure warnings, can most likely be ignored' + @echo ' Multiple levels can be combined with W=12 or W=123' @echo '' @echo 'Execute "make" or "make all" to build all targets marked with [*] ' @echo 'For further info see the ./README file' @@ -1368,7 +1384,7 @@ endif # KBUILD_EXTMOD clean: $(clean-dirs) $(call cmd,rmdirs) $(call cmd,rmfiles) - @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ + @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ -o -name '*.symtypes' -o -name 'modules.order' \ diff --git a/arch/Kconfig b/arch/Kconfig index f78c2be4242b4..923a8cf3911b1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -151,6 +151,13 @@ config HAVE_HW_BREAKPOINT bool depends on PERF_EVENTS +config HAVE_HW_BRKPT_RESERVED_RW_ACCESS + bool + depends on HAVE_HW_BREAKPOINT + help + Some of the hardware might not have r/w access beyond a certain number + of breakpoint register access. + config HAVE_MIXED_BREAKPOINTS_REGS bool depends on HAVE_HW_BREAKPOINT diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index 945de222ab91a..e8a761aee088a 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -29,7 +29,7 @@ : "r" (uaddr), "r"(oparg) \ : "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( __ASM_SMP_MB - "1: ldl_l %0,0(%2)\n" - " cmpeq %0,%3,%1\n" - " beq %1,3f\n" - " mov %4,%1\n" - "2: stl_c %1,0(%2)\n" - " beq %1,4f\n" + "1: ldl_l %1,0(%3)\n" + " cmpeq %1,%4,%2\n" + " beq %2,3f\n" + " mov %5,%2\n" + "2: stl_c %2,0(%3)\n" + " beq %2,4f\n" "3: .subsection 2\n" "4: br 1b\n" " .previous\n" @@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .long 2b-.\n" " lda $31,3b-2b(%0)\n" " .previous\n" - : "=&r"(prev), "=&r"(cmp) + : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)oldval), "r"(newval) : "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 166efa2a19cd9..10233cb550d67 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -20,6 +20,7 @@ config ARM select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA + select HAVE_KERNEL_XZ select HAVE_IRQ_WORK select HAVE_PERF_EVENTS select PERF_USE_VMALLOC @@ -61,7 +62,7 @@ config GENERIC_CLOCKEVENTS config GENERIC_CLOCKEVENTS_BROADCAST bool depends on GENERIC_CLOCKEVENTS - default y if SMP + default y if SMP && !LOCAL_TIMERS config HAVE_TCM bool @@ -126,15 +127,27 @@ config GENERIC_IRQ_PROBE config GENERIC_LOCKBREAK bool - default y + help + Enable generic lock breaking on preemptible SMP platforms. + Some platforms may be susceptible to livelock with this + item enabled. + default n depends on SMP && PREEMPT +config ARM_TICKET_LOCKS + bool + help + Enable ticket locks, which help preserve fairness among + contended locks and prevent livelock in multicore systems. + Say 'y' if system stability is important. + default y if ARCH_MSM_SCORPIONMP + depends on SMP + config RWSEM_GENERIC_SPINLOCK bool - default y config RWSEM_XCHGADD_ALGORITHM - bool + def_bool y config ARCH_HAS_ILOG2_U32 bool @@ -615,15 +628,23 @@ config ARCH_PXA Support for Intel/Marvell's PXA2xx/PXA3xx processor line. config ARCH_MSM - bool "Qualcomm MSM" + bool "Qualcomm MSM/QSD" + select ARCH_HAS_CPUFREQ + select ARCH_REQUIRE_GPIOLIB select HAVE_CLK + select GENERIC_GPIO +# select GENERIC_TIME select GENERIC_CLOCKEVENTS select ARCH_REQUIRE_GPIOLIB +# select GENERIC_ALLOCATOR +# select ARCH_WANT_OPTIONAL_GPIOLIB +# select CLKDEV_LOOKUP +# select HAVE_SCHED_CLOCK help Support for Qualcomm MSM/QSD based systems. This runs on the - apps processor of the MSM/QSD and depends on a shared memory - interface to the modem processor which runs the baseband - stack and controls some vital subsystems + ARM11/Scorpion apps processor of the MSM/QSD and depends on a + shared memory interface to the ARM9 modem processor which + runs the baseband stack and controls some vital subsystems (clock and power control, etc). config ARCH_SHMOBILE @@ -1033,6 +1054,15 @@ config ARM_TIMER_SP804 source arch/arm/mm/Kconfig +config RESERVE_FIRST_PAGE + bool + default n + help + Reserve the first page at PHYS_OFFSET. The first + physical page is used by many platforms for warm + boot operations. Reserve this page so that it is + not allocated by the kernel. + config IWMMXT bool "Enable iWMMXt support" depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 @@ -1140,7 +1170,7 @@ config ARM_ERRATA_742231 config PL310_ERRATA_588369 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" - depends on CACHE_L2X0 && ARCH_OMAP4 + depends on CACHE_L2X0 help The PL310 L2 cache controller implements three types of Clean & Invalidate maintenance operations: by Physical Address @@ -1149,8 +1179,7 @@ config PL310_ERRATA_588369 clean operation followed immediately by an invalidate operation, both performing to the same memory location. This functionality is not correctly implemented in PL310 as clean lines are not - invalidated as a result of these operations. Note that this errata - uses Texas Instrument's secure monitor api. + invalidated as a result of these operations. config ARM_ERRATA_720789 bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" @@ -1202,6 +1231,30 @@ config ARM_ERRATA_753970 This has the same effect as the cache sync operation: store buffer drain and waiting for all buffers empty. +config PL310_ERRATA_727915 + bool "Background Clean & Invalidate by Way operation can cause data corruption" + depends on CACHE_L2X0 + help + PL310 implements the Clean & Invalidate by Way L2 cache maintenance + operation (offset 0x7FC). This operation runs in background so that + PL310 can handle normal accesses while it is in progress. Under very + rare circumstances, due to this erratum, write data can be lost when + PL310 treats a cacheable write transaction during a Clean & + Invalidate by Way operation. + +config KSAPI + tristate "KSAPI support (EXPERIMENTAL)" + depends on ARCH_MSM_SCORPION || ARCH_MSM_KRAIT + default n + help + KSAPI: Performance monitoring tool for linux. + KSAPI records performance statistics for Snapdragon linux platform. + It uses the /proc FS as a means to exchange configuration data and + counter statistics. It can monitor the counter statistics for + Scorpion processor supported hardware performance counters on a per + thread basis or AXI counters on an overall system basis. + + endmenu source "arch/arm/common/Kconfig" @@ -1279,9 +1332,9 @@ config SMP depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ - ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE + MSM_SMP || ARCH_SHMOBILE select USE_GENERIC_SMP_HELPERS - select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP + select HAVE_ARM_SCU if !MSM_SMP help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If @@ -1357,16 +1410,17 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" depends on SMP && HOTPLUG && EXPERIMENTAL - depends on !ARCH_MSM help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. config LOCAL_TIMERS bool "Use local timer interrupts" - depends on SMP + depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || \ + REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || ARCH_U8500 || \ + MSM_SMP) default y - select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP + select HAVE_ARM_TWD if (ARCH_REALVIEW || ARCH_OMAP4 || ARCH_U8500) help Enable support for local timers on SMP platforms, rather then the legacy IPI broadcast method. Local timers allows the system @@ -1473,8 +1527,39 @@ config HW_PERF_EVENTS Enable hardware performance counter support for perf events. If disabled, perf events will use software events only. +config VMALLOC_RESERVE + hex "Reserved vmalloc space" + default 0x08000000 + depends on MMU + help + Reserved vmalloc space if not specified on the kernel commandline. + source "mm/Kconfig" +config ARCH_MEMORY_PROBE + def_bool n + depends on MEMORY_HOTPLUG + +config ARCH_MEMORY_REMOVE + def_bool n + depends on MEMORY_HOTPLUG + +config ARCH_POPULATES_NODE_MAP + def_bool n + depends on MEMORY_HOTPLUG + +config DONT_RESERVE_FROM_MOVABLE_ZONE + def_bool y + depends on MEMORY_HOTPLUG + +config DONT_MAP_HOLE_AFTER_MEMBANK0 + def_bool n + depends on SPARSEMEM + +config FIX_MOVABLE_ZONE + def_bool y + depends on MEMORY_HOTPLUG + config FORCE_MAX_ZONEORDER int "Maximum zone order" if ARCH_SHMOBILE range 11 64 if ARCH_SHMOBILE @@ -1607,6 +1692,17 @@ config DEPRECATED_PARAM_STRUCT This was deprecated in 2001 and announced to live on for 5 years. Some old boot loaders still use this way. +config CP_ACCESS + tristate "CP register access tool" + default n + help + Provide support for Coprocessor register access using /sys + interface. Read and write to CP registers from userspace + through sysfs interface. A sys file (cp_rw) will be created under + /sys/devices/system/cpaccess/cpaccess0. + + If unsure, say N. + endmenu menu "Boot options" @@ -1833,6 +1929,14 @@ source "drivers/cpuidle/Kconfig" endmenu +config CPU_FREQ_MSM + bool + depends on CPU_FREQ && ARCH_MSM + default y + help + This enables the CPUFreq driver for Qualcomm CPUs. + If in doubt, say Y. + menu "Floating point emulation" comment "At least one emulation must be selected" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 6f7b29294c80f..36d67cc177da0 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -17,7 +17,7 @@ endif OBJCOPYFLAGS :=-O binary -R .comment -S GZFLAGS :=-9 -#KBUILD_CFLAGS +=-pipe +KBUILD_CFLAGS +=-pipe # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: KBUILD_CFLAGS +=$(call cc-option,-marm,) @@ -257,6 +257,7 @@ core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ core-y += $(machdirs) $(platdirs) drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ +core-y += arch/arm/perfmon/ libs-y := arch/arm/lib/ $(libs-y) diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index c6028967d3367..f504fc155e683 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,7 +1,9 @@ font.c +ashldi3.S lib1funcs.S piggy.gzip piggy.lzo piggy.lzma +piggy.xzkern vmlinux vmlinux.lds diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 0a8f748e506ad..2d9ab571eb0dd 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -4,9 +4,18 @@ # create a compressed vmlinuz image from the original vmlinux # -AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) -HEAD = head.o -OBJS = misc.o decompress.o +plus_sec := $(call as-instr,.arch_extension sec,+sec) + +AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +AFLAGS_head.o += -Wa,-march=armv7-a$(plus_sec) +HEAD = head.o + +AFLAGS_misc.o += -Wa,-march=armv7-a$(plus_sec) +MISC = misc.o + +AFLAGS_decompress.o += -Wa,-march=armv7-a$(plus_sec) +DECOMPRESS = decompress.o + FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c # @@ -59,7 +68,7 @@ ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT) ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS) else ZTEXTADDR := 0 -ZBSSADDR := ALIGN(4) +ZBSSADDR := ALIGN(8) endif SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ @@ -67,20 +76,21 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ suffix_$(CONFIG_KERNEL_GZIP) = gzip suffix_$(CONFIG_KERNEL_LZO) = lzo suffix_$(CONFIG_KERNEL_LZMA) = lzma +suffix_$(CONFIG_KERNEL_XZ) = xzkern targets := vmlinux vmlinux.lds \ piggy.$(suffix_y) piggy.$(suffix_y).o \ - font.o font.c head.o misc.o $(OBJS) + font.o font.c head.o misc.o decompress.o $(OBJS) # Make sure files are removed during clean -extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S +extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern lib1funcs.S ashldi3.S ifeq ($(CONFIG_FUNCTION_TRACER),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif -EXTRA_CFLAGS := -fpic -fno-builtin +EXTRA_CFLAGS := -fpic -fno-builtin -Wno-error=coverage-mismatch EXTRA_AFLAGS := -Wa,-march=all # Supply ZRELADDR to the decompressor via a linker symbol. @@ -100,19 +110,28 @@ LDFLAGS_vmlinux += -X LDFLAGS_vmlinux += -T # For __aeabi_uidivmod +AFLAGS_lib1funcs.o += -Wa,-march=armv7-a$(plus_sec) lib1funcs = $(obj)/lib1funcs.o $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE $(call cmd,shipped) -$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ - $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE +# For __aeabi_llsl +AFLAGS_ashldi3.o += -Wa,-march=armv7-a$(plus_sec) +ashldi3 = $(obj)/ashldi3.o + +$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S FORCE + $(call cmd,shipped) + +$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/$(MISC) $(obj)/$(DECOMPRESS) $(obj)/piggy.$(suffix_y).o \ + $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE $(call if_changed,ld) @: $(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE $(call if_changed,$(suffix_y)) +AFLAGS_piggy.$(suffix_y).o += -Wa,-march=armv7-a$(plus_sec) $(obj)/piggy.$(suffix_y).o: $(obj)/piggy.$(suffix_y) FORCE CFLAGS_font.o := -Dstatic= diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 4c72a97bc3e14..5210840076169 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -44,6 +44,10 @@ extern void error(char *); #include "../../../../lib/decompress_unlzma.c" #endif +#ifdef CONFIG_KERNEL_XZ +#include "../../../../lib/decompress_unxz.c" +#endif + void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { decompress(input, len, NULL, NULL, output, NULL, error); diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 48cbc0c50116a..eb7ba6a20acf5 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -21,7 +21,7 @@ #if defined(CONFIG_DEBUG_ICEDCC) -#ifdef CONFIG_CPU_V6 +#ifdef defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) .macro loadsp, rb, tmp .endm .macro writeb, ch, rb diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index e653a6d3c8d90..253ecc88a7bb9 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -36,7 +36,7 @@ extern void error(char *x); #ifdef CONFIG_DEBUG_ICEDCC -#ifdef CONFIG_CPU_V6 +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) static void icedcc_putc(int ch) { diff --git a/arch/arm/boot/compressed/piggy.xzkern.S b/arch/arm/boot/compressed/piggy.xzkern.S new file mode 100644 index 0000000000000..6c83f7f598b1d --- /dev/null +++ b/arch/arm/boot/compressed/piggy.xzkern.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.xzkern" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index 366a924019ac6..7ece7056ba0ea 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in @@ -57,6 +57,7 @@ SECTIONS .bss : { *(.bss) } _end = .; + . = ALIGN(8); /* the stack must be 64-bit aligned */ .stack : { *(.stack) } .stab 0 : { *(.stab) } diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index ad8d13ddeb947..ce8a4f2315922 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o obj-$(CONFIG_ARCH_IXP23XX) += uengine.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o +obj-$(CONFIG_CP_ACCESS) += cpaccess.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o diff --git a/arch/arm/common/cpaccess.c b/arch/arm/common/cpaccess.c new file mode 100644 index 0000000000000..241e33953927d --- /dev/null +++ b/arch/arm/common/cpaccess.c @@ -0,0 +1,253 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * CP parameters + */ +struct cp_params { + unsigned long cp; + unsigned long op1; + unsigned long op2; + unsigned long crn; + unsigned long crm; + unsigned long write_value; + char rw; +}; + +static struct semaphore cp_sem; +static int cpu; + +static DEFINE_PER_CPU(struct cp_params, cp_param) + = { 15, 0, 0, 0, 0, 0, 'r' }; + +static struct sysdev_class cpaccess_sysclass = { + .name = "cpaccess", +}; + +/* + * get_asm_value - Dummy fuction + * @write_val: Write value incase of a CP register write operation. + * + * This function is just a placeholder. The first 2 instructions + * will be inserted to perform MRC/MCR instruction and a return. + * See do_cpregister_rw function. Value passed to function is + * accessed from r0 register. + */ +static noinline unsigned long cpaccess_dummy(unsigned long write_val) +{ + asm("mrc p15, 0, r0, c0, c0, 0\n\t"); + asm("bx lr\n\t"); + return 0xBEEF; +} __attribute__((aligned(32))) + +/* + * get_asm_value - Read/Write CP registers + * @ret: Pointer to return value in case of CP register + * read op. + * + */ +static void get_asm_value(void *ret) +{ + *(unsigned long *)ret = + cpaccess_dummy(per_cpu(cp_param.write_value, cpu)); +} + +/* + * dp_cpregister_rw - Read/Write CP registers + * @write: 1 for Write and 0 for Read operation + * + * Returns value read from CP register + */ +static unsigned long do_cpregister_rw(int write) +{ + unsigned long opcode, ret, *p_opcode; + + /* + * Mask the crn, crm, op1, op2 and cp values so they do not + * interfer with other fields of the op code. + */ + per_cpu(cp_param.cp, cpu) &= 0xF; + per_cpu(cp_param.crn, cpu) &= 0xF; + per_cpu(cp_param.crm, cpu) &= 0xF; + per_cpu(cp_param.op1, cpu) &= 0x7; + per_cpu(cp_param.op2, cpu) &= 0x7; + + /* + * Base MRC opcode for MIDR is EE100010, + * MCR is 0xEE000010 + */ + opcode = (write == 1 ? 0xEE000010 : 0xEE100010); + opcode |= (per_cpu(cp_param.crn, cpu)<<16) | + (per_cpu(cp_param.crm, cpu)<<0) | + (per_cpu(cp_param.op1, cpu)<<21) | + (per_cpu(cp_param.op2, cpu)<<5) | + (per_cpu(cp_param.cp, cpu) << 8); + + /* + * Grab address of the Dummy function, insert MRC/MCR + * instruction and a return instruction ("bx lr"). Do + * a D cache clean and I cache invalidate after inserting + * new code. + */ + p_opcode = (unsigned long *)&cpaccess_dummy; + *p_opcode++ = opcode; + *p_opcode-- = 0xE12FFF1E; + __cpuc_coherent_kern_range((unsigned long)p_opcode, + ((unsigned long)p_opcode + (sizeof(long) * 2))); + +#ifdef CONFIG_SMP + /* + * Use smp_call_function_single to do CPU core specific + * get_asm_value function call. + */ + if (smp_call_function_single(cpu, get_asm_value, &ret, 1)) + printk(KERN_ERR "Error cpaccess smp call single\n"); +#else + get_asm_value(&ret); +#endif + + return ret; +} + +/* + * cp_register_write_sysfs - sysfs interface for writing to + * CP register + * @dev: sys device + * @attr: device attribute + * @buf: write value + * @cnt: not used + * + */ +static ssize_t cp_register_write_sysfs(struct sys_device *dev, + struct sysdev_attribute *attr, const char *buf, size_t cnt) +{ + unsigned long op1, op2, crn, crm, cp = 15, write_value, ret; + char rw; + if (down_timeout(&cp_sem, 6000)) + return -ERESTARTSYS; + + sscanf(buf, "%lu:%lu:%lu:%lu:%lu:%c:%lx:%d", &cp, &op1, &crn, + &crm, &op2, &rw, &write_value, &cpu); + per_cpu(cp_param.cp, cpu) = cp; + per_cpu(cp_param.op1, cpu) = op1; + per_cpu(cp_param.crn, cpu) = crn; + per_cpu(cp_param.crm, cpu) = crm; + per_cpu(cp_param.op2, cpu) = op2; + per_cpu(cp_param.rw, cpu) = rw; + per_cpu(cp_param.write_value, cpu) = write_value; + + if (per_cpu(cp_param.rw, cpu) == 'w') { + do_cpregister_rw(1); + ret = cnt; + } + + if ((per_cpu(cp_param.rw, cpu) != 'w') && + (per_cpu(cp_param.rw, cpu) != 'r')) { + ret = -1; + printk(KERN_INFO "Wrong Entry for 'r' or 'w'. \ + Use cp:op1:crn:crm:op2:r/w:write_value.\n"); + } + + return cnt; +} + +/* + * cp_register_read_sysfs - sysfs interface for reading CP registers + * @dev: sys device + * @attr: device attribute + * @buf: write value + * + * Code to read in the CPxx crn, crm, op1, op2 variables, or into + * the base MRC opcode, store to executable memory, clean/invalidate + * caches and then execute the new instruction and provide the + * result to the caller. + */ +static ssize_t cp_register_read_sysfs(struct sys_device *dev, + struct sysdev_attribute *attr, char *buf) +{ + int ret; + ret = sprintf(buf, "%lx\n", do_cpregister_rw(0)); + + if (cp_sem.count <= 0) + up(&cp_sem); + + return ret; +} + +/* + * Setup sysfs files + */ +SYSDEV_ATTR(cp_rw, 0644, cp_register_read_sysfs, + cp_register_write_sysfs); + +static struct sys_device device_cpaccess = { + .id = 0, + .cls = &cpaccess_sysclass, +}; + +/* + * init_cpaccess_sysfs - initialize sys devices + */ +static int __init init_cpaccess_sysfs(void) +{ + int error = sysdev_class_register(&cpaccess_sysclass); + + if (!error) + error = sysdev_register(&device_cpaccess); + else + printk(KERN_ERR "Error initializing cpaccess \ + interface\n"); + + if (!error) + error = sysdev_create_file(&device_cpaccess, + &attr_cp_rw); + else { + printk(KERN_ERR "Error initializing cpaccess \ + interface\n"); + sysdev_unregister(&device_cpaccess); + sysdev_class_unregister(&cpaccess_sysclass); + } + + sema_init(&cp_sem, 1); + + return error; +} + +static void __exit exit_cpaccess_sysfs(void) +{ + sysdev_remove_file(&device_cpaccess, &attr_cp_rw); + sysdev_unregister(&device_cpaccess); + sysdev_class_unregister(&cpaccess_sysclass); +} + +module_init(init_cpaccess_sysfs); +module_exit(exit_cpaccess_sysfs); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 2243772111518..8996f06a76a67 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -28,10 +28,12 @@ #include #include #include +#include #include #include #include +#include static DEFINE_SPINLOCK(irq_controller_lock); @@ -42,6 +44,25 @@ struct gic_chip_data { unsigned int irq_offset; void __iomem *dist_base; void __iomem *cpu_base; + unsigned int max_irq; +#ifdef CONFIG_PM + unsigned int wakeup_irqs[32]; + unsigned int enabled_irqs[32]; +#endif +}; + +/* + * Supported arch specific GIC irq extension. + * Default make them NULL. + */ +struct irq_chip gic_arch_extn = { + .irq_eoi = NULL, + .irq_mask = NULL, + .irq_unmask = NULL, + .irq_retrigger = NULL, + .irq_set_type = NULL, + .irq_set_wake = NULL, + .irq_disable = NULL, }; #ifndef MAX_GIC_NR @@ -71,29 +92,136 @@ static inline unsigned int gic_irq(struct irq_data *d) /* * Routines to acknowledge, disable and enable interrupts */ -static void gic_ack_irq(struct irq_data *d) +static void gic_mask_irq(struct irq_data *d) { + u32 mask = 1 << (d->irq % 32); + spin_lock(&irq_controller_lock); - writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); + writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_mask) + gic_arch_extn.irq_mask(d); spin_unlock(&irq_controller_lock); + } -static void gic_mask_irq(struct irq_data *d) +static void gic_unmask_irq(struct irq_data *d) { u32 mask = 1 << (d->irq % 32); spin_lock(&irq_controller_lock); - writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + if (gic_arch_extn.irq_unmask) + gic_arch_extn.irq_unmask(d); + writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); spin_unlock(&irq_controller_lock); } -static void gic_unmask_irq(struct irq_data *d) +static void gic_disable_irq(struct irq_data *d) { - u32 mask = 1 << (d->irq % 32); + if (gic_arch_extn.irq_disable) + gic_arch_extn.irq_disable(d); +} + +#ifdef CONFIG_PM +static int gic_suspend_one(struct gic_chip_data *gic) +{ + unsigned int i; + void __iomem *base = gic->dist_base; + + for (i = 0; i * 32 < gic->max_irq; i++) { + gic->enabled_irqs[i] + = readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4); + /* disable all of them */ + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4); + /* enable the wakeup set */ + writel_relaxed(gic->wakeup_irqs[i], + base + GIC_DIST_ENABLE_SET + i * 4); + } + mb(); + return 0; +} + +static int gic_suspend(void) +{ + int i; + for (i = 0; i < MAX_GIC_NR; i++) + gic_suspend_one(&gic_data[i]); + return 0; +} + +extern int msm_show_resume_irq_mask; + +static void gic_show_resume_irq(struct gic_chip_data *gic) +{ + unsigned int i; + u32 enabled; + unsigned long pending[32]; + void __iomem *base = gic->dist_base; + + if (!msm_show_resume_irq_mask) + return; spin_lock(&irq_controller_lock); - writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); + for (i = 0; i * 32 < gic->max_irq; i++) { + enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4); + pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4); + pending[i] &= enabled; + } spin_unlock(&irq_controller_lock); + + for (i = find_first_bit(pending, gic->max_irq); + i < gic->max_irq; + i = find_next_bit(pending, gic->max_irq, i+1)) { + pr_warning("%s: %d triggered", __func__, + i + gic->irq_offset); + } +} + +static void gic_resume_one(struct gic_chip_data *gic) +{ + unsigned int i; + void __iomem *base = gic->dist_base; + + gic_show_resume_irq(gic); + for (i = 0; i * 32 < gic->max_irq; i++) { + /* disable all of them */ + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4); + /* enable the enabled set */ + writel_relaxed(gic->enabled_irqs[i], + base + GIC_DIST_ENABLE_SET + i * 4); + } + mb(); +} + +static void gic_resume(void) +{ + int i; + for (i = 0; i < MAX_GIC_NR; i++) + gic_resume_one(&gic_data[i]); +} + +static struct syscore_ops gic_syscore_ops = { + .suspend = gic_suspend, + .resume = gic_resume, +}; + +static int __init gic_init_sys(void) +{ + register_syscore_ops(&gic_syscore_ops); + return 0; +} +arch_initcall(gic_init_sys); + +#endif + +static void gic_eoi_irq(struct irq_data *d) +{ + if (gic_arch_extn.irq_eoi) { + spin_lock(&irq_controller_lock); + gic_arch_extn.irq_eoi(d); + spin_unlock(&irq_controller_lock); + } + + writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -116,7 +244,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type) spin_lock(&irq_controller_lock); - val = readl(base + GIC_DIST_CONFIG + confoff); + if (gic_arch_extn.irq_set_type) + gic_arch_extn.irq_set_type(d, type); + + val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); if (type == IRQ_TYPE_LEVEL_HIGH) val &= ~confmask; else if (type == IRQ_TYPE_EDGE_RISING) @@ -126,59 +257,94 @@ static int gic_set_type(struct irq_data *d, unsigned int type) * As recommended by the spec, disable the interrupt before changing * the configuration */ - if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { - writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); + if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); enabled = true; } - writel(val, base + GIC_DIST_CONFIG + confoff); + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); if (enabled) - writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); + writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); spin_unlock(&irq_controller_lock); return 0; } +static int gic_retrigger(struct irq_data *d) +{ + if (gic_arch_extn.irq_retrigger) + return gic_arch_extn.irq_retrigger(d); + + return -ENXIO; +} + #ifdef CONFIG_SMP -static int -gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force) +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int shift = (d->irq % 4) * 8; unsigned int cpu = cpumask_first(mask_val); - u32 val; - struct irq_desc *desc; + u32 val, mask, bit; - spin_lock(&irq_controller_lock); - desc = irq_to_desc(d->irq); - if (desc == NULL) { - spin_unlock(&irq_controller_lock); + if (cpu >= 8) return -EINVAL; - } + + mask = 0xff << shift; + bit = 1 << (cpu + shift); + + spin_lock(&irq_controller_lock); d->node = cpu; - val = readl(reg) & ~(0xff << shift); - val |= 1 << (cpu + shift); - writel(val, reg); + val = readl_relaxed(reg) & ~mask; + writel_relaxed(val | bit, reg); spin_unlock(&irq_controller_lock); return 0; } #endif +#ifdef CONFIG_PM +static int gic_set_wake(struct irq_data *d, unsigned int on) +{ + int ret = -ENXIO; + unsigned int reg_offset, bit_offset; + unsigned int gicirq = gic_irq(d); + struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); + + /* per-cpu interrupts cannot be wakeup interrupts */ + WARN_ON(gicirq < 32); + + reg_offset = gicirq / 32; + bit_offset = gicirq % 32; + + if (on) + gic_data->wakeup_irqs[reg_offset] |= 1 << bit_offset; + else + gic_data->wakeup_irqs[reg_offset] &= ~(1 << bit_offset); + + if (gic_arch_extn.irq_set_wake) + ret = gic_arch_extn.irq_set_wake(d, on); + + return ret; +} + +#else +#define gic_set_wake NULL +#endif + static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { - struct gic_chip_data *chip_data = get_irq_data(irq); - struct irq_chip *chip = get_irq_chip(irq); + struct gic_chip_data *chip_data = irq_get_handler_data(irq); + struct irq_chip *chip = irq_get_chip(irq); unsigned int cascade_irq, gic_irq; unsigned long status; - /* primary controller ack'ing */ - chip->irq_ack(&desc->irq_data); + chained_irq_enter(chip, desc); spin_lock(&irq_controller_lock); - status = readl(chip_data->cpu_base + GIC_CPU_INTACK); + status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); spin_unlock(&irq_controller_lock); gic_irq = (status & 0x3ff); @@ -192,28 +358,30 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) generic_handle_irq(cascade_irq); out: - /* primary controller unmasking */ - chip->irq_unmask(&desc->irq_data); + chained_irq_exit(chip, desc); } static struct irq_chip gic_chip = { .name = "GIC", - .irq_ack = gic_ack_irq, .irq_mask = gic_mask_irq, .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, + .irq_retrigger = gic_retrigger, #ifdef CONFIG_SMP - .irq_set_affinity = gic_set_cpu, + .irq_set_affinity = gic_set_affinity, #endif + .irq_disable = gic_disable_irq, + .irq_set_wake = gic_set_wake, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) { if (gic_nr >= MAX_GIC_NR) BUG(); - if (set_irq_data(irq, &gic_data[gic_nr]) != 0) + if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) BUG(); - set_irq_chained_handler(irq, gic_handle_cascade_irq); + irq_set_chained_handler(irq, gic_handle_cascade_irq); } static void __init gic_dist_init(struct gic_chip_data *gic, @@ -226,13 +394,13 @@ static void __init gic_dist_init(struct gic_chip_data *gic, cpumask |= cpumask << 8; cpumask |= cpumask << 16; - writel(0, base + GIC_DIST_CTRL); + writel_relaxed(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ - gic_irqs = readl(base + GIC_DIST_CTR) & 0x1f; + gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f; gic_irqs = (gic_irqs + 1) * 32; if (gic_irqs > 1020) gic_irqs = 1020; @@ -241,26 +409,26 @@ static void __init gic_dist_init(struct gic_chip_data *gic, * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < gic_irqs; i += 16) - writel(0, base + GIC_DIST_CONFIG + i * 4 / 16); + writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); /* * Set all global interrupts to this CPU only. */ for (i = 32; i < gic_irqs; i += 4) - writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); + writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); /* * Set priority on all global interrupts. */ for (i = 32; i < gic_irqs; i += 4) - writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); + writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); /* * Disable all interrupts. Leave the PPI and SGIs alone * as these enables are banked registers. */ for (i = 32; i < gic_irqs; i += 32) - writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); + writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); /* * Limit number of interrupts registered to the platform maximum @@ -273,13 +441,15 @@ static void __init gic_dist_init(struct gic_chip_data *gic, * Setup the Linux IRQ subsystem. */ for (i = irq_start; i < irq_limit; i++) { - set_irq_chip(i, &gic_chip); - set_irq_chip_data(i, gic); - set_irq_handler(i, handle_level_irq); + irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq); + irq_set_chip_data(i, gic); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } - writel(1, base + GIC_DIST_CTRL); + gic->max_irq = gic_irqs; + + writel_relaxed(1, base + GIC_DIST_CTRL); + mb(); } static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) @@ -292,17 +462,18 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) * Deal with the banked PPI and SGI interrupts - disable all * PPI interrupts, ensure all SGI interrupts are enabled. */ - writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); - writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); + writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); + writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); /* * Set priority on PPI and SGI interrupts */ for (i = 0; i < 32; i += 4) - writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); + writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); - writel(0xf0, base + GIC_CPU_PRIMASK); - writel(1, base + GIC_CPU_CTRL); + writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); + writel_relaxed(1, base + GIC_CPU_CTRL); + mb(); } void __init gic_init(unsigned int gic_nr, unsigned int irq_start, @@ -336,7 +507,7 @@ void __cpuinit gic_enable_ppi(unsigned int irq) unsigned long flags; local_irq_save(flags); - irq_to_desc(irq)->status |= IRQ_NOPROBE; + irq_set_status_flags(irq, IRQ_NOPROBE); gic_unmask_irq(irq_get_irq_data(irq)); local_irq_restore(flags); } @@ -346,7 +517,55 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) { unsigned long map = *cpus_addr(*mask); + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + /* this always happens on GIC0 */ - writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); + writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); + mb(); } #endif + +/* before calling this function the interrupts should be disabled + * and the irq must be disabled at gic to avoid spurious interrupts */ +bool gic_is_spi_pending(unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + struct gic_chip_data *gic_data = &gic_data[0]; + u32 mask, val; + + WARN_ON(!irqs_disabled()); + spin_lock(&irq_controller_lock); + mask = 1 << (gic_irq(d) % 32); + val = readl(gic_dist_base(d) + + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); + /* warn if the interrupt is enabled */ + WARN_ON(val & mask); + val = readl(gic_dist_base(d) + + GIC_DIST_PENDING_SET + (gic_irq(d) / 32) * 4); + spin_unlock(&irq_controller_lock); + return (bool) (val & mask); +} + +/* before calling this function the interrupts should be disabled + * and the irq must be disabled at gic to avoid spurious interrupts */ +void gic_clear_spi_pending(unsigned int irq) +{ + struct gic_chip_data *gic_data = &gic_data[0]; + struct irq_data *d = irq_get_irq_data(irq); + + u32 mask, val; + WARN_ON(!irqs_disabled()); + spin_lock(&irq_controller_lock); + mask = 1 << (gic_irq(d) % 32); + val = readl(gic_dist_base(d) + + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); + /* warn if the interrupt is enabled */ + WARN_ON(val & mask); + writel(mask, gic_dist_base(d) + + GIC_DIST_PENDING_CLEAR + (gic_irq(d) / 32) * 4); + spin_unlock(&irq_controller_lock); +} diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index ae5fe7292e0d4..1176613650360 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c @@ -22,17 +22,16 @@ #include #include #include -#include +#include #include #include #include #include -#if defined(CONFIG_PM) +#ifdef CONFIG_PM /** * struct vic_device - VIC PM device - * @sysdev: The system device which is registered. * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @resume_sources: A bitmask of interrupts for resume. @@ -43,8 +42,6 @@ * @protect: Save for VIC_PROTECT. */ struct vic_device { - struct sys_device sysdev; - void __iomem *base; int irq; u32 resume_sources; @@ -59,11 +56,6 @@ struct vic_device { static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; static int vic_id; - -static inline struct vic_device *to_vic(struct sys_device *sys) -{ - return container_of(sys, struct vic_device, sysdev); -} #endif /* CONFIG_PM */ /** @@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base) writel(32, base + VIC_PL190_DEF_VECT_ADDR); } -#if defined(CONFIG_PM) -static int vic_class_resume(struct sys_device *dev) +#ifdef CONFIG_PM +static void resume_one_vic(struct vic_device *vic) { - struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); @@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev) writel(vic->soft_int, base + VIC_INT_SOFT); writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); +} - return 0; +static void vic_resume(void) +{ + int id; + + for (id = vic_id - 1; id >= 0; id--) + resume_one_vic(vic_devices + id); } -static int vic_class_suspend(struct sys_device *dev, pm_message_t state) +static void suspend_one_vic(struct vic_device *vic) { - struct vic_device *vic = to_vic(dev); void __iomem *base = vic->base; printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); @@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state) writel(vic->resume_irqs, base + VIC_INT_ENABLE); writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); +} + +static int vic_suspend(void) +{ + int id; + + for (id = 0; id < vic_id; id++) + suspend_one_vic(vic_devices + id); return 0; } -struct sysdev_class vic_class = { - .name = "vic", - .suspend = vic_class_suspend, - .resume = vic_class_resume, +struct syscore_ops vic_syscore_ops = { + .suspend = vic_suspend, + .resume = vic_resume, }; /** @@ -147,30 +150,8 @@ struct sysdev_class vic_class = { */ static int __init vic_pm_init(void) { - struct vic_device *dev = vic_devices; - int err; - int id; - - if (vic_id == 0) - return 0; - - err = sysdev_class_register(&vic_class); - if (err) { - printk(KERN_ERR "%s: cannot register class\n", __func__); - return err; - } - - for (id = 0; id < vic_id; id++, dev++) { - dev->sysdev.id = id; - dev->sysdev.cls = &vic_class; - - err = sysdev_register(&dev->sysdev); - if (err) { - printk(KERN_ERR "%s: failed to register device\n", - __func__); - return err; - } - } + if (vic_id > 0) + register_syscore_ops(&vic_syscore_ops); return 0; } diff --git a/arch/arm/configs/cayniarb_bravo_defconfig b/arch/arm/configs/cayniarb_bravo_defconfig new file mode 100644 index 0000000000000..dbe07e0d17edd --- /dev/null +++ b/arch/arm/configs/cayniarb_bravo_defconfig @@ -0,0 +1,2133 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sun Mar 25 18:22:22 2012 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cayniarb-ics-stock" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +# CONFIG_MACH_BRAVO_NONE is not set +CONFIG_MACH_BRAVO=y +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +# CONFIG_MSM_HW3D is not set +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/vendor/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +CONFIG_INPUT_CAPELLA_CM3602_HTC=y +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_MMU=y +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/cayniarb_bravoc_defconfig b/arch/arm/configs/cayniarb_bravoc_defconfig new file mode 100644 index 0000000000000..5323a4da24e13 --- /dev/null +++ b/arch/arm/configs/cayniarb_bravoc_defconfig @@ -0,0 +1,2127 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sun Feb 19 17:03:40 2012 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cayniarb-ics-stock" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +# CONFIG_MACH_BRAVO_NONE is not set +# CONFIG_MACH_BRAVO is not set +CONFIG_MACH_BRAVOC=y +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +# CONFIG_MSM_HW3D is not set +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/vendor/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +CONFIG_INPUT_CAPELLA_CM3602_HTC=y +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_MMU=y +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_ZRAM=y +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/cayniarb_incrediblec_defconfig b/arch/arm/configs/cayniarb_incrediblec_defconfig new file mode 100644 index 0000000000000..9a5f520d5f3bf --- /dev/null +++ b/arch/arm/configs/cayniarb_incrediblec_defconfig @@ -0,0 +1,2162 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sun Feb 19 17:05:09 2012 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="arm-eabi-" +CONFIG_LOCALVERSION="-cayniarb-ics-stock" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_RESOURCE_COUNTERS is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +CONFIG_MACH_INCREDIBLEC=y +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=2 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON is not set +# CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998000 +CONFIG_MSM_CPU_FREQ_MIN=245760 +# CONFIG_AXI_SCREEN_POLICY is not set +# CONFIG_MSM_HW3D is not set +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=m +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/vendor/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_MSM=y +# CONFIG_SERIAL_MSM_CONSOLE is not set +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +# CONFIG_S5K6AAFX is not set +CONFIG_OV8810=y +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_MMU=y +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +# CONFIG_FB_MSM_MDDI_EPSON is not set +# CONFIG_FB_MSM_MDDI_NOVTEC is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +# CONFIG_USB_ANDROID_RNDIS_WCEIS is not set +# CONFIG_USB_ANDROID_ACCESSORY is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_ACCESSORY_DETECT is not set +# CONFIG_DOCK_ACCESSORY_DETECT is not set +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_ZRAM=y +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/cayniarb_mahimahi_defconfig b/arch/arm/configs/cayniarb_mahimahi_defconfig new file mode 100644 index 0000000000000..17a9ed96ad928 --- /dev/null +++ b/arch/arm/configs/cayniarb_mahimahi_defconfig @@ -0,0 +1,2132 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sun Mar 25 18:09:45 2012 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cayniarb-ics-stock" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +CONFIG_MACH_MAHIMAHI=y +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +# CONFIG_MSM_HW3D is not set +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/vendor/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_CAPELLA_CM3602=y +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +# CONFIG_LIGHTSENSOR_MICROP is not set +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_MMU=y +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/cayniarb_supersonic_defconfig b/arch/arm/configs/cayniarb_supersonic_defconfig new file mode 100644 index 0000000000000..233df2be183b4 --- /dev/null +++ b/arch/arm/configs/cayniarb_supersonic_defconfig @@ -0,0 +1,2152 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sun Feb 19 17:05:33 2012 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cayniarb-ics-stock" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +# CONFIG_NET_NS is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +CONFIG_MACH_SUPERSONIC=y +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +# CONFIG_MSM_HW3D is not set +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_WIFI_MEM_PREALLOC=y +# CONFIG_ARCH_MSM_FLASHLIGHT is not set +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +# CONFIG_NF_CONNTRACK_H323 is not set +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +# CONFIG_NETFILTER_XT_TARGET_TRACE is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +CONFIG_NETFILTER_XT_MATCH_RECENT=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +CONFIG_WIMAX=y +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +# CONFIG_IFB is not set +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +# CONFIG_NET_ETHERNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/system/etc/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# WiMAX Wireless Broadband devices +# + +# +# Enable USB support to see WiMAX USB drivers +# +# CONFIG_WIMAX_I2400M_SDIO is not set +CONFIG_WIMAX_SQN=m +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +CONFIG_TPS65200=y +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +CONFIG_S5K6AAFX=y +CONFIG_OV8810=y +CONFIG_OV9665=y +CONFIG_S5K3H1GX=y + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +CONFIG_MSM_KGSL_MMU=y +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +CONFIG_FB_MSM_MDDI_EPSON=y +CONFIG_FB_MSM_MDDI_NOVTEC=y +CONFIG_MSM_HDMI=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +CONFIG_USB_ACCESSORY_DETECT=y +# CONFIG_USB_ACCESSORY_DETECT_BY_ADC is not set +CONFIG_DOCK_ACCESSORY_DETECT=y +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_IIO is not set +CONFIG_ZRAM=y +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_SMB_FS is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_BKL=y +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DEBUG_ERRORS=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_bravo_defconfig b/arch/arm/configs/evervolv_bravo_defconfig new file mode 100644 index 0000000000000..281674c1fbbd4 --- /dev/null +++ b/arch/arm/configs/evervolv_bravo_defconfig @@ -0,0 +1,2184 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Mon Nov 25 21:10:55 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_MM_OWNER=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_THROTTLING is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +# CONFIG_MACH_BRAVO_NONE is not set +CONFIG_MACH_BRAVO=y +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +CONFIG_INPUT_CAPELLA_CM3602_HTC=y +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_incrediblec_defconfig b/arch/arm/configs/evervolv_incrediblec_defconfig new file mode 100644 index 0000000000000..40738ac2a9639 --- /dev/null +++ b/arch/arm/configs/evervolv_incrediblec_defconfig @@ -0,0 +1,2217 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Mon Nov 25 21:11:34 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="arm-eabi-" +CONFIG_LOCALVERSION="-evervolv" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_RESOURCE_COUNTERS is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +CONFIG_MACH_INCREDIBLEC=y +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=2 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON is not set +# CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998000 +CONFIG_MSM_CPU_FREQ_MIN=245760 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=m +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_MSM=y +# CONFIG_SERIAL_MSM_CONSOLE is not set +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +# CONFIG_S5K6AAFX is not set +CONFIG_OV8810=y +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +# CONFIG_FB_MSM_MDDI_EPSON is not set +# CONFIG_FB_MSM_MDDI_NOVTEC is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +# CONFIG_USB_ANDROID_RNDIS_WCEIS is not set +# CONFIG_USB_ANDROID_ACCESSORY is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_ACCESSORY_DETECT is not set +# CONFIG_DOCK_ACCESSORY_DETECT is not set +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_mahimahi_defconfig b/arch/arm/configs/evervolv_mahimahi_defconfig new file mode 100644 index 0000000000000..9446ff2346f22 --- /dev/null +++ b/arch/arm/configs/evervolv_mahimahi_defconfig @@ -0,0 +1,2183 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Mon Nov 25 21:05:36 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_MM_OWNER=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_THROTTLING is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +CONFIG_MACH_MAHIMAHI=y +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_CAPELLA_CM3602=y +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +# CONFIG_LIGHTSENSOR_MICROP is not set +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_supersonic_defconfig b/arch/arm/configs/evervolv_supersonic_defconfig new file mode 100644 index 0000000000000..6d2c57822cc03 --- /dev/null +++ b/arch/arm/configs/evervolv_supersonic_defconfig @@ -0,0 +1,2207 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Mon Nov 25 21:12:01 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_MEM_RES_CTLR=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y +CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +# CONFIG_NET_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_MM_OWNER=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +CONFIG_MACH_SUPERSONIC=y +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_WIFI_MEM_PREALLOC=y +# CONFIG_ARCH_MSM_FLASHLIGHT is not set +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +# CONFIG_NF_CONNTRACK_H323 is not set +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +# CONFIG_NETFILTER_XT_TARGET_TRACE is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +CONFIG_NETFILTER_XT_MATCH_RECENT=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +CONFIG_WIMAX=y +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +# CONFIG_IFB is not set +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +# CONFIG_NET_ETHERNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# WiMAX Wireless Broadband devices +# + +# +# Enable USB support to see WiMAX USB drivers +# +# CONFIG_WIMAX_I2400M_SDIO is not set +CONFIG_WIMAX_SQN=m +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +CONFIG_TPS65200=y +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +CONFIG_S5K6AAFX=y +CONFIG_OV8810=y +CONFIG_OV9665=y +CONFIG_S5K3H1GX=y + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +CONFIG_FB_MSM_MDDI_EPSON=y +CONFIG_FB_MSM_MDDI_NOVTEC=y +CONFIG_MSM_HDMI=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +CONFIG_USB_ACCESSORY_DETECT=y +# CONFIG_USB_ACCESSORY_DETECT_BY_ADC is not set +CONFIG_DOCK_ACCESSORY_DETECT=y +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_SMB_FS is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_BKL=y +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DEBUG_ERRORS=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_ubuntuphone_bravo_defconfig b/arch/arm/configs/evervolv_ubuntuphone_bravo_defconfig new file mode 100644 index 0000000000000..afb841143e3d7 --- /dev/null +++ b/arch/arm/configs/evervolv_ubuntuphone_bravo_defconfig @@ -0,0 +1,2196 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Fri Feb 22 19:02:05 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv-up" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_THROTTLING is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +# CONFIG_MACH_BRAVO_NONE is not set +CONFIG_MACH_BRAVO=y +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +# CONFIG_ANDROID_PARANOID_NETWORK is not set +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +CONFIG_INPUT_CAPELLA_CM3602_HTC=y +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_ubuntuphone_incrediblec_defconfig b/arch/arm/configs/evervolv_ubuntuphone_incrediblec_defconfig new file mode 100644 index 0000000000000..00a29ae73f8fb --- /dev/null +++ b/arch/arm/configs/evervolv_ubuntuphone_incrediblec_defconfig @@ -0,0 +1,2232 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sat Feb 23 23:52:39 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="arm-eabi-" +CONFIG_LOCALVERSION="-evervolv-up" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_RESOURCE_COUNTERS is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +CONFIG_MACH_INCREDIBLEC=y +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=2 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON is not set +# CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998000 +CONFIG_MSM_CPU_FREQ_MIN=245760 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +# CONFIG_ANDROID_PARANOID_NETWORK is not set +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=m +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_MSM=y +# CONFIG_SERIAL_MSM_CONSOLE is not set +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +# CONFIG_S5K6AAFX is not set +CONFIG_OV8810=y +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +# CONFIG_FB_MSM_MDDI_EPSON is not set +# CONFIG_FB_MSM_MDDI_NOVTEC is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +# CONFIG_USB_ANDROID_RNDIS_WCEIS is not set +# CONFIG_USB_ANDROID_ACCESSORY is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_ACCESSORY_DETECT is not set +# CONFIG_DOCK_ACCESSORY_DETECT is not set +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_ubuntuphone_mahimahi_defconfig b/arch/arm/configs/evervolv_ubuntuphone_mahimahi_defconfig new file mode 100644 index 0000000000000..6bfe5c49266f4 --- /dev/null +++ b/arch/arm/configs/evervolv_ubuntuphone_mahimahi_defconfig @@ -0,0 +1,2195 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Fri Feb 22 10:26:50 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv-up" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set +CONFIG_COMPAT_BRK=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_THROTTLING is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="deadline" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +CONFIG_MACH_MAHIMAHI=y +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_BATTCHG is not set +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_MICROP_COMMON is not set +# CONFIG_HTC_HEADSET_MGR is not set +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +# CONFIG_ANDROID_PARANOID_NETWORK is not set +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ATMEL is not set +# CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_CAPELLA_CM3602=y +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +# CONFIG_LIGHTSENSOR_MICROP is not set +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +# CONFIG_USB_MSM_72K_HTC is not set +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/evervolv_ubuntuphone_supersonic_defconfig b/arch/arm/configs/evervolv_ubuntuphone_supersonic_defconfig new file mode 100644 index 0000000000000..8b65603858c93 --- /dev/null +++ b/arch/arm/configs/evervolv_ubuntuphone_supersonic_defconfig @@ -0,0 +1,2215 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Sat Feb 23 23:53:02 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-evervolv-up" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_KERNEL_XZ=y +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +# CONFIG_MACH_INCREDIBLEC is not set +CONFIG_MACH_SUPERSONIC=y +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +# CONFIG_MSM_SERIAL_DEBUGGER is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_WIFI_MEM_PREALLOC=y +# CONFIG_ARCH_MSM_FLASHLIGHT is not set +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +# CONFIG_ANDROID_PARANOID_NETWORK is not set +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +# CONFIG_NF_CONNTRACK_H323 is not set +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +# CONFIG_NETFILTER_XT_TARGET_TRACE is not set +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +CONFIG_NETFILTER_XT_MATCH_RECENT=y +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +# CONFIG_IP_NF_SECURITY is not set +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +CONFIG_WIMAX=y +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +# CONFIG_DEVTMPFS_MOUNT is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +# CONFIG_IFB is not set +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +# CONFIG_NET_ETHERNET is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# WiMAX Wireless Broadband devices +# + +# +# Enable USB support to see WiMAX USB drivers +# +# CONFIG_WIMAX_I2400M_SDIO is not set +CONFIG_WIMAX_SQN=m +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +# CONFIG_INPUT_OPTICALJOYSTICK is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +CONFIG_TPS65200=y +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +CONFIG_S5K6AAFX=y +CONFIG_OV8810=y +CONFIG_OV9665=y +CONFIG_S5K3H1GX=y + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +CONFIG_FB_MSM_MDDI_EPSON=y +CONFIG_FB_MSM_MDDI_NOVTEC=y +CONFIG_MSM_HDMI=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +CONFIG_USB_ACCESSORY_DETECT=y +# CONFIG_USB_ACCESSORY_DETECT_BY_ADC is not set +CONFIG_DOCK_ACCESSORY_DETECT=y +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_SMB_FS is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_BKL=y +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DEBUG_ERRORS=y +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_IMA is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/incrediblec-incredikernel_defconfig b/arch/arm/configs/incrediblec-incredikernel_defconfig new file mode 100644 index 0000000000000..4678c0488ef1f --- /dev/null +++ b/arch/arm/configs/incrediblec-incredikernel_defconfig @@ -0,0 +1,2224 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.8 Kernel Configuration +# Mon Nov 25 21:11:34 2013 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="arm-eabi-" +CONFIG_LOCALVERSION="-incredikernel-kk44" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_NS is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_RESOURCE_COUNTERS is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +# CONFIG_BLK_CGROUP is not set +# CONFIG_NAMESPACES is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_KALLSYMS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_BFQ=y +CONFIG_SCHED_BFS=y +# CONFIG_CGROUP_BFQIO is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_BFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y +# CONFIG_PERFLOCK is not set + +# +# Qualcomm MSM Board Type +# +# CONFIG_MACH_SWORDFISH is not set +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART_NONE=y +# CONFIG_MSM_DEBUG_UART1 is not set +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +# CONFIG_MACH_MAHIMAHI is not set +CONFIG_MACH_BRAVO_NONE=y +# CONFIG_MACH_BRAVO is not set +# CONFIG_MACH_BRAVOC is not set +# CONFIG_MACH_INCREDIBLE is not set +CONFIG_MACH_INCREDIBLEC=y +# CONFIG_MACH_SUPERSONIC is not set +# CONFIG_MACH_QSD8X50_FFA is not set +# CONFIG_HTC_HEADSET is not set +# CONFIG_HTC_35MM_JACK is not set +CONFIG_HTC_BATTCHG=y +CONFIG_HTC_BATTCHG_SMEM=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +# CONFIG_HTC_POWER_COLLAPSE_MAGIC is not set +# CONFIG_HTC_ONMODE_CHARGING is not set +CONFIG_QSD_SVS=y +CONFIG_QSD_PMIC_DEFAULT_DCDC1=1275 +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_PHYS_OFFSET=0x20000000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=2 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=20000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON is not set +# CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998000 +CONFIG_MSM_CPU_FREQ_MIN=245760 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_CPU_AVS=y +CONFIG_MSM_AVS_HW=y +CONFIG_HTC_ACOUSTIC_QSD=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +# CONFIG_WIFI_MEM_PREALLOC is not set +CONFIG_ARCH_MSM_FLASHLIGHT=y +CONFIG_MICROP_COMMON=y +CONFIG_HTC_HEADSET_MGR=y +CONFIG_HTC_HEADSET_GPIO=y +CONFIG_HTC_HEADSET_MICROP=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_CACHE_ERR_REPORT is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +# CONFIG_VCM is not set +# CONFIG_STRICT_MEMORY_RWX is not set +# CONFIG_RESERVE_FIRST_PAGE is not set +CONFIG_CPU_HAS_PMU=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_458693 is not set +# CONFIG_ARM_ERRATA_460075 is not set +# CONFIG_ARM_ERRATA_743622 is not set +# CONFIG_KSAPI is not set +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +# CONFIG_CP_ACCESS is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM0" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_SMARTASS2 is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +# CONFIG_SUSPEND_TIME is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_TPROXY=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_IP6_NF_QUEUE is not set +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +# CONFIG_IP6_NF_RAW is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=y +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_WEXT=y +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set +CONFIG_CFG80211_ALLOW_RECONNECT=y +# CONFIG_MAC80211 is not set + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_GENLOCK=y +CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +# CONFIG_SW_SYNC_USER is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +# CONFIG_VP_A1026 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +CONFIG_SENSORS_BMA150_SPI=y +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=m +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +CONFIG_WLAN=y +# CONFIG_ATH_COMMON is not set +# CONFIG_BCM4329 is not set +CONFIG_BCMDHD=m +CONFIG_BCMDHD_FW_PATH="/vendor/firmware/fw_bcmdhd.bin" +CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" +# CONFIG_DHD_USE_STATIC_BUF is not set +# CONFIG_DHD_USE_SCHED_SCAN is not set +# CONFIG_DHD_ENABLE_P2P is not set +# CONFIG_HOSTAP is not set +# CONFIG_IWM is not set +# CONFIG_LIBERTAS is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL=y +CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_CAPELLA_CM3602 is not set +# CONFIG_INPUT_CAPELLA_CM3602_HTC is not set +CONFIG_LIGHTSENSOR_MICROP=y +CONFIG_INPUT_OPTICALJOYSTICK=y +CONFIG_OPTICALJOYSTICK_CRUCIAL=y +CONFIG_OPTICALJOYSTICK_CRUCIAL_uP=y +# CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX3107 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_MSM=y +# CONFIG_SERIAL_MSM_CONSOLE is not set +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_PXA2XX_PCI is not set +CONFIG_SPI_QSD=y +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_DS2784 is not set +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TPS65200 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13XXX is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_TPS6524X is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +CONFIG_RC_CORE=y +CONFIG_LIRC=y +CONFIG_RC_MAP=y +CONFIG_IR_NEC_DECODER=y +CONFIG_IR_RC5_DECODER=y +CONFIG_IR_RC6_DECODER=y +CONFIG_IR_JVC_DECODER=y +CONFIG_IR_SONY_DECODER=y +CONFIG_IR_RC5_SZ_DECODER=y +CONFIG_IR_LIRC_CODEC=y +# CONFIG_IR_IMON is not set +# CONFIG_IR_MCEUSB is not set +# CONFIG_IR_STREAMZAP is not set +# CONFIG_RC_LOOPBACK is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +# CONFIG_S5K3E2FX is not set +# CONFIG_S5K6AAFX is not set +CONFIG_OV8810=y +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_MSM_KGSL=y +# CONFIG_MSM_KGSL_CFF_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set +# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set +# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set +CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000 +CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y +# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +CONFIG_FB_MSM_MDDI=y +# CONFIG_FB_MSM_MDDI_EPSON is not set +# CONFIG_FB_MSM_MDDI_NOVTEC is not set +# CONFIG_MSM_HDMI is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +CONFIG_HID_APPLE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_MSM_72K_HTC=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +# CONFIG_USB_ANDROID_RNDIS_WCEIS is not set +# CONFIG_USB_ANDROID_ACCESSORY is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_ACCESSORY_DETECT is not set +# CONFIG_DOCK_ACCESSORY_DETECT is not set +# CONFIG_USB_BYPASS_VBUS_NOTIFY is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_SPI is not set +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_IIO is not set +CONFIG_XVMALLOC=y +CONFIG_ZRAM=y +# CONFIG_ZRAM_DEBUG is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_LIRC_STAGING is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set +CONFIG_EXFAT_FS=y +CONFIG_EXFAT_FS=y +CONFIG_EXFAT_DEFAULT_CODEPAGE=437 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +CONFIG_DYNAMIC_FSYNC=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +CONFIG_SCHED_DEBUG=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_BKL is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_EVENT_POWER_TRACING_DEPRECATED=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_ARM_UNWIND=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/mahimahi_defconfig b/arch/arm/configs/mahimahi_defconfig new file mode 100644 index 0000000000000..47bba92c19218 --- /dev/null +++ b/arch/arm/configs/mahimahi_defconfig @@ -0,0 +1,290 @@ +CONFIG_EXPERIMENTAL=y +# CONFIG_SWAP is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_ASHMEM=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_MSM=y +CONFIG_ARCH_QSD8X50=y +CONFIG_MSM_DEBUG_UART1=y +CONFIG_HTC_35MM_JACK=y +# CONFIG_HTC_PWRSINK is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_HW3D is not set +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_ARM_THUMBEE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_HIGHMEM=y +CONFIG_VMALLOC_RESERVE=0x30000000 +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM,115200n8" +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_PM=y +CONFIG_WAKELOCK=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +CONFIG_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_MTD=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_MISC_DEVICES=y +CONFIG_SENSORS_AKM8973=y +CONFIG_VP_A1026=y +CONFIG_UID_STAT=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +CONFIG_BCM4329=m +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +CONFIG_INPUT_CAPELLA_CM3602=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +CONFIG_W1_MASTER_DS2482=y +CONFIG_POWER_SUPPLY=y +CONFIG_BATTERY_DS2784=y +# CONFIG_HWMON is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +CONFIG_REGULATOR_TPS65023=y +CONFIG_MEDIA_SUPPORT=y +# CONFIG_RC_CORE is not set +CONFIG_MSM_CAMERA=y +CONFIG_S5K3E2FX=y +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FB_MSM_MDDI is not set +CONFIG_GPU_MSM_KGSL=y +CONFIG_MSM_KGSL_MMU=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_ANDROID=y +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_SLEEP=y +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_DNOTIFY is not set +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y +CONFIG_DEBUG_INFO=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/configs/msm_defconfig b/arch/arm/configs/msm_defconfig old mode 100644 new mode 100755 index 2b8f7affc1eb5..4e1788a381586 --- a/arch/arm/configs/msm_defconfig +++ b/arch/arm/configs/msm_defconfig @@ -1,72 +1,273 @@ CONFIG_EXPERIMENTAL=y +# CONFIG_SWAP is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y CONFIG_BLK_DEV_INITRD=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_ASHMEM=y CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ARCH_MSM=y -CONFIG_MACH_HALIBUT=y +CONFIG_ARCH_MSM7X00A=y +CONFIG_MSM_DEBUG_UART=3 +CONFIG_MSM_DEBUG_UART3=y +CONFIG_MACH_SAPPHIRE=y +CONFIG_HTC_HEADSET=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_WIFI_MEM_PREALLOC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT=y CONFIG_AEABI=y # CONFIG_OABI_COMPAT is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="mem=64M console=ttyMSM,115200n8" +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_PM=y +CONFIG_WAKELOCK=y CONFIG_NET=y +CONFIG_PACKET=y CONFIG_UNIX=y +CONFIG_NET_KEY=y CONFIG_INET=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +CONFIG_INET_ESP=y # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -# CONFIG_IPV6 is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_DEBUG=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +CONFIG_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set CONFIG_MTD=y CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_MISC_DEVICES=y +CONFIG_UID_STAT=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y CONFIG_NETDEVICES=y +CONFIG_IFB=y CONFIG_DUMMY=y +CONFIG_TUN=y CONFIG_NET_ETHERNET=y CONFIG_SMC91X=y CONFIG_PPP=y CONFIG_PPP_ASYNC=y CONFIG_PPP_DEFLATE=y CONFIG_PPP_BSDCOMP=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_MSM_RMNET_DEBUG=y +# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y -# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELAN_I2C_8232=y +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y # CONFIG_SERIO is not set -CONFIG_VT_HW_CONSOLE_BINDING=y +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_RX_WAKEUP=y +CONFIG_SERIAL_MSM_HS=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set CONFIG_I2C=y +CONFIG_POWER_SUPPLY=y # CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +# CONFIG_IR_CORE is not set +CONFIG_MSM_CAMERA=y +CONFIG_MT9T013=y +CONFIG_DAB=y CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_FB=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y -CONFIG_FB_MSM=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_ANDROID=y +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_MSM=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_SLEEP=y +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y +CONFIG_VFAT_FS=y CONFIG_TMPFS=y +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y +# CONFIG_STACKTRACE is not set CONFIG_SCHEDSTATS=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_SPINLOCK_SLEEP=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_LL=y +# CONFIG_ARM_UNWIND is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/configs/pershoot_mahimahi_defconfig b/arch/arm/configs/pershoot_mahimahi_defconfig new file mode 100644 index 0000000000000..d6f04950491f1 --- /dev/null +++ b/arch/arm/configs/pershoot_mahimahi_defconfig @@ -0,0 +1,2097 @@ +# +# Automatically generated make config: don't edit +# Linux/arm 2.6.38.5 Kernel Configuration +# Mon May 2 19:07:50 2011 +# +CONFIG_ARM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +# CONFIG_ARCH_USES_GETTIMEOFFSET is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y +CONFIG_HAVE_IRQ_WORK=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="" +CONFIG_LOCALVERSION="-cyanogenmod" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +# CONFIG_KERNEL_GZIP is not set +CONFIG_KERNEL_LZMA=y +# CONFIG_KERNEL_LZO is not set +# CONFIG_SWAP is not set +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set +CONFIG_HAVE_GENERIC_HARDIRQS=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_HARDIRQS=y +# CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED is not set +CONFIG_HAVE_SPARSE_IRQ=y +# CONFIG_GENERIC_PENDING_IRQ is not set +# CONFIG_AUTO_IRQ_AFFINITY is not set +# CONFIG_IRQ_PER_CPU is not set +# CONFIG_SPARSE_IRQ is not set + +# +# RCU Subsystem +# +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +CONFIG_TINY_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_TRACE is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_BOOST=y +CONFIG_RCU_BOOST_PRIO=1 +CONFIG_RCU_BOOST_DELAY=500 +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_NS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y +# CONFIG_CPUSETS is not set +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +# CONFIG_CGROUP_MEM_RES_CTLR is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +# CONFIG_DEBUG_BLK_CGROUP is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +# CONFIG_NET_NS is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EXPERT=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_ASHMEM=y +CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_THROTTLING=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_DEFAULT_BFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="bfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set +CONFIG_FREEZER=y + +# +# System Type +# +CONFIG_MMU=y +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MXS is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_PNX4008 is not set +# CONFIG_ARCH_PXA is not set +CONFIG_ARCH_MSM=y +# CONFIG_ARCH_SHMOBILE is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5P6442 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_S5PV310 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_TCC_926 is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_ARCH_MSM7X00A is not set +# CONFIG_ARCH_MSM7X30 is not set +CONFIG_ARCH_QSD8X50=y +# CONFIG_ARCH_MSM8X60 is not set +CONFIG_ARCH_MSM_SCORPION=y +CONFIG_HAS_MSM_DEBUG_UART_PHYS=y +CONFIG_MSM_MDP31=y + +# +# Qualcomm MSM Board Type +# +CONFIG_MACH_SWORDFISH=y +CONFIG_MACH_QSD8X50_SURF=y +# CONFIG_MACH_QSD8X50A_ST1_5 is not set +CONFIG_MSM_DEBUG_UART=1 +# CONFIG_MSM_DEBUG_UART_NONE is not set +CONFIG_MSM_DEBUG_UART1=y +# CONFIG_MSM_DEBUG_UART2 is not set +# CONFIG_MSM_DEBUG_UART3 is not set +CONFIG_MSM_PROC_COMM=y +CONFIG_MACH_MAHIMAHI=y +CONFIG_MACH_QSD8X50_FFA=y +# CONFIG_HTC_HEADSET is not set +CONFIG_HTC_35MM_JACK=y +CONFIG_HTC_PWRSPLY=y +# CONFIG_HTC_PWRSINK is not set +CONFIG_CACHE_FLUSH_RANGE_LIMIT=0x40000 +CONFIG_MSM7X00A_USE_GP_TIMER=y +# CONFIG_MSM7X00A_USE_DG_TIMER is not set +CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND=y +# CONFIG_MSM7X00A_SLEEP_MODE_POWER_COLLAPSE is not set +# CONFIG_MSM7X00A_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_SLEEP_MODE=0 +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE=y +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT is not set +# CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MODE=1 +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +CONFIG_MSM7X00A_IDLE_SPIN_TIME=80000 +CONFIG_MSM_IDLE_STATS=y +CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 +CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 +CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 +CONFIG_MSM_FIQ_SUPPORT=y +CONFIG_MSM_SERIAL_DEBUGGER=y +CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP=y +# CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON is not set +# CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE is not set +CONFIG_MSM_SMD=y +CONFIG_MSM_GPIOMUX=y +CONFIG_MSM_DAL=y +CONFIG_MSM_ONCRPCROUTER=y +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MAX=998400 +CONFIG_MSM_CPU_FREQ_MIN=245000 +# CONFIG_AXI_SCREEN_POLICY is not set +CONFIG_MSM_HW3D=y +CONFIG_MSM_QDSP6=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_WIFI_MEM_PREALLOC=y +CONFIG_ARCH_MSM_FLASHLIGHT=y +# CONFIG_VIRTUAL_KPANIC_PARTITION is not set + +# +# System MMU +# + +# +# Processor Type +# +CONFIG_CPU_32v6K=y +CONFIG_CPU_V7=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_VERIFY_PERMISSION_FAULT=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_ARM_THUMBEE=y +# CONFIG_SWP_EMULATE is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_458693=y +CONFIG_ARM_ERRATA_460075=y +CONFIG_ARM_ERRATA_743622=y +# CONFIG_FIQ_DEBUGGER is not set + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_VMALLOC_RESERVE=0x08000000 +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_COMPACTION is not set +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_NEED_PER_CPU_KM=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +# CONFIG_CC_STACKPROTECTOR is not set +# CONFIG_DEPRECATED_PARAM_STRUCT is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM,115200n8" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_AUTO_ZRELADDR is not set + +# +# CPU Power Management +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_TABLE=y +# CONFIG_CPU_FREQ_DEBUG is not set +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_IDLE is not set +CONFIG_CPU_FREQ_MSM=y + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_NEON=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HAS_WAKELOCK=y +CONFIG_HAS_EARLYSUSPEND=y +CONFIG_WAKELOCK=y +CONFIG_WAKELOCK_STAT=y +CONFIG_USER_WAKELOCK=y +CONFIG_EARLYSUSPEND=y +# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set +CONFIG_FB_EARLYSUSPEND=y +# CONFIG_APM_EMULATION is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +# CONFIG_IPV6_ROUTE_INFO is not set +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +CONFIG_ANDROID_PARANOID_NETWORK=y +CONFIG_NET_ACTIVITY_STATS=y +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +CONFIG_NETFILTER_XT_TARGET_LED=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TRACE=y +# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +CONFIG_NETFILTER_XT_MATCH_OWNER=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_ADDRTYPE=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_LOG=y +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +# CONFIG_NF_NAT_SNMP_BASIC is not set +CONFIG_NF_NAT_PROTO_DCCP=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PROTO_UDPLITE=y +CONFIG_NF_NAT_PROTO_SCTP=y +CONFIG_NF_NAT_FTP=y +CONFIG_NF_NAT_IRC=y +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_AMANDA=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y +CONFIG_NF_NAT_SIP=y +# CONFIG_IP_NF_MANGLE is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_DEFRAG_IPV6 is not set +# CONFIG_NF_CONNTRACK_IPV6 is not set +# CONFIG_IP6_NF_QUEUE is not set +# CONFIG_IP6_NF_IPTABLES is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=y +CONFIG_BRIDGE=y +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=y +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +CONFIG_NET_SCH_INGRESS=y + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=y +# CONFIG_GACT_PROB is not set +CONFIG_NET_ACT_MIRRED=y +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +# CONFIG_BT_BNEP_MC_FILTER is not set +# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_HIDP=y + +# +# Bluetooth device drivers +# +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +CONFIG_BT_HCIUART_LL=y +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_PRIV=y +# CONFIG_CFG80211 is not set +CONFIG_WIRELESS_EXT_SYSFS=y +# CONFIG_LIB80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# + +# +# Some wireless drivers require a rate control algorithm +# +# CONFIG_WIMAX is not set +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_DEVTMPFS is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_MSM_NAND=y +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=y + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_BLK_DEV_RBD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +CONFIG_ANDROID_PMEM=y +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +CONFIG_KERNEL_DEBUGGER_CORE=y +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_SENSORS_AK8975 is not set +CONFIG_SENSORS_AKM8973=y +# CONFIG_SENSORS_AKM8976 is not set +CONFIG_VP_A1026=y +# CONFIG_DS1682 is not set +CONFIG_UID_STAT=y +# CONFIG_BMP085 is not set +# CONFIG_WL127X_RFKILL is not set +# CONFIG_APANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_IFB=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=m +# CONFIG_VETH is not set +CONFIG_MII=y +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ETHOC is not set +CONFIG_SMC911X=y +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8851_MLL is not set +CONFIG_NETDEV_1000=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NETDEV_10000=y +CONFIG_WLAN=y +CONFIG_BCM4329=m +CONFIG_BCM4329_FW_PATH="/system/etc/firmware/fw_bcm4329.bin" +CONFIG_BCM4329_NVRAM_PATH="/proc/calibration" +# CONFIG_HOSTAP is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set + +# +# CAIF transport drivers +# +CONFIG_PPP=y +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=y +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +# CONFIG_PPPOE is not set +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_SLIP is not set +CONFIG_SLHC=y +# CONFIG_NETCONSOLE is not set +CONFIG_MSM_RMNET=y +# CONFIG_MSM_RMNET_DEBUG is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_GAN_ETH=y +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set +CONFIG_INPUT_KEYRESET=y + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_I2C is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELAN_I2C_8232 is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_QT602240 is not set +# CONFIG_TOUCHSCREEN_MSM is not set +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_TOUCHSCREEN_DUPLICATED_FILTER=y +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +CONFIG_INPUT_KEYCHORD=y +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_CAPELLA_CM3602=y + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +CONFIG_SERIAL_MSM_HS=y +CONFIG_SERIAL_BCM_BT_LPM=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_DCC_TTY is not set +# CONFIG_RAMOOPS is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_MSM=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set + +# +# PPS generators support +# +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# +# CONFIG_GPIO_BASIC_MMIO is not set +# CONFIG_GPIO_IT8761E is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# + +# +# AC97 GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# +CONFIG_W1=y + +# +# 1-wire Bus Masters +# +CONFIG_W1_MASTER_DS2482=y +# CONFIG_W1_MASTER_DS1WM is not set +# CONFIG_W1_MASTER_GPIO is not set + +# +# 1-wire Slaves +# +# CONFIG_W1_SLAVE_THERM is not set +# CONFIG_W1_SLAVE_SMEM is not set +# CONFIG_W1_SLAVE_DS2423 is not set +# CONFIG_W1_SLAVE_DS2431 is not set +# CONFIG_W1_SLAVE_DS2433 is not set +# CONFIG_W1_SLAVE_DS2760 is not set +# CONFIG_W1_SLAVE_BQ27000 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2782 is not set +CONFIG_BATTERY_DS2784=y +# CONFIG_BATTERY_BQ20Z75 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_MFD_SUPPORT=y +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_WL1273_CORE is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +CONFIG_REGULATOR_TPS65023=y +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_AD5398 is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +# CONFIG_VIDEO_DEV is not set +# CONFIG_DVB_CORE is not set +# CONFIG_VIDEO_MEDIA is not set + +# +# Multimedia drivers +# +# CONFIG_RC_CORE is not set + +# +# Qualcomm MSM Camera And Video +# +CONFIG_MSM_CAMERA=y +CONFIG_720P_CAMERA=y +# CONFIG_MSM_CAMERA_DEBUG is not set + +# +# Camera Sensor Selection +# +# CONFIG_MT9T013 is not set +# CONFIG_MT9D112 is not set +# CONFIG_MT9P012 is not set +CONFIG_S5K3E2FX=y +# CONFIG_S5K6AAFX is not set +# CONFIG_OV8810 is not set +# CONFIG_OV9665 is not set +# CONFIG_S5K3H1GX is not set + +# +# Graphics support +# +# CONFIG_DRM is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_WMT_GE_ROPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_FB_MSM=y +CONFIG_FB_MSM_LEGACY_MDP=y +CONFIG_FB_MSM_MDP_PPP=y +CONFIG_FB_MSM_LCDC=y +# CONFIG_FB_MSM_MDDI is not set +CONFIG_GPU_MSM_KGSL=y +CONFIG_MSM_KGSL_MMU=y +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set +# CONFIG_LOGO is not set +# CONFIG_SOUND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# +# CONFIG_HID_APPLE is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_WACOM is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB_ARCH_HAS_EHCI=y +# CONFIG_USB is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_R8A66597 is not set +# CONFIG_USB_GADGET_PXA_U2O is not set +# CONFIG_USB_GADGET_M66592 is not set +# CONFIG_USB_GADGET_CI13XXX_MSM is not set +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_MSM_72K=y +CONFIG_USB_GADGET_DUALSPEED=y +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_G_PRINTER is not set +CONFIG_USB_ANDROID=y +# CONFIG_USB_ANDROID_ACM is not set +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +# CONFIG_USB_ANDROID_MTP is not set +CONFIG_USB_ANDROID_RNDIS=y +CONFIG_USB_ANDROID_RNDIS_WCEIS=y +CONFIG_USB_ANDROID_ACCESSORY=y +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set + +# +# OTG and related infrastructure +# +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_MSM_OTG_72K is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_UNSAFE_RESUME=y +# CONFIG_MMC_CLKGATE is not set +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_MSM=y +# CONFIG_MMC_DW is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +CONFIG_LEDS_CPLD=y +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +CONFIG_LEDS_TRIGGERS=y + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set +CONFIG_LEDS_TRIGGER_SLEEP=y + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_NFC_DEVICES is not set +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +CONFIG_RTC_INTF_ALARM=y +CONFIG_RTC_INTF_ALARM_DEV=y +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_MSM7X00A=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +# CONFIG_ECHO is not set +# CONFIG_BRCM80211 is not set +# CONFIG_COMEDI is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8 +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d +# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set +CONFIG_ANDROID_TIMED_OUTPUT=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +# CONFIG_POHMELFS is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_IIO is not set +# CONFIG_ZRAM is not set +# CONFIG_FB_SM7XX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_ST_BT is not set +# CONFIG_SMB_FS is not set +CONFIG_MACH_NO_WESTBRIDGE=y +# CONFIG_ATH6K_LEGACY is not set +# CONFIG_FT1000 is not set + +# +# Speakup console speech +# +# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set +# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=m +# CONFIG_CUSE is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_YAFFS_FS=y +CONFIG_YAFFS_YAFFS1=y +# CONFIG_YAFFS_9BYTE_TAGS is not set +# CONFIG_YAFFS_DOES_ECC is not set +CONFIG_YAFFS_YAFFS2=y +CONFIG_YAFFS_AUTO_YAFFS2=y +CONFIG_YAFFS_DISABLE_TAGS_ECC=y +# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set +# CONFIG_YAFFS_EMPTY_LOST_AND_FOUND is not set +# CONFIG_YAFFS_DISABLE_BLOCK_REFRESHING is not set +# CONFIG_YAFFS_DISABLE_BACKGROUND is not set +CONFIG_YAFFS_XATTR=y +# CONFIG_JFFS2_FS is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +# CONFIG_NFS_V4_1 is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFS_USE_NEW_IDMAPPER is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=m +CONFIG_CIFS_STATS=y +CONFIG_CIFS_STATS2=y +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_UPCALL is not set +CONFIG_CIFS_XATTR=y +# CONFIG_CIFS_POSIX is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_ACL is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_HARDLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_BKL=y +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_STACKTRACE=y +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_LKDTM is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM_UNWIND is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set +# CONFIG_OC_ETM is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_GHASH is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/surf7x30_defconfig b/arch/arm/configs/surf7x30_defconfig new file mode 100644 index 0000000000000..0e9df11ed7c0f --- /dev/null +++ b/arch/arm/configs/surf7x30_defconfig @@ -0,0 +1,238 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_ASHMEM=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_MSM=y +CONFIG_ARCH_MSM7X30=y +CONFIG_MACH_MSM7X30_FLUID=y +CONFIG_MSM_DEBUG_UART2=y +# CONFIG_HTC_PWRSPLY is not set +# CONFIG_HTC_PWRSINK is not set +CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME=50000000 +# CONFIG_MSM_HW3D is not set +CONFIG_MSM_SSBI=y +CONFIG_WIFI_CONTROL_FUNC=y +CONFIG_ARM_THUMBEE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_HIGHMEM=y +CONFIG_VMALLOC_RESERVE=0x30000000 +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="console=ttyMSM,115200n8" +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_PM=y +CONFIG_WAKELOCK=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +CONFIG_IPV6=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_TUNNEL=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_MTD=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_MISC_DEVICES=y +CONFIG_KERNEL_DEBUGGER_CORE=y +CONFIG_UID_STAT=y +CONFIG_APANIC=y +CONFIG_APANIC_PLABEL="crashdata" +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_NET_ETHERNET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_PM8058=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_CYTTSP_I2C=y +CONFIG_TOUCHSCREEN_MSM=y +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +CONFIG_SPI=y +CONFIG_SPI_QSD=y +CONFIG_POWER_SUPPLY=y +CONFIG_CHARGER_PM8058=y +# CONFIG_HWMON is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_VIDEO_DEV=y +# CONFIG_VIDEO_ALLOW_V4L1 is not set +# CONFIG_IR_CORE is not set +# CONFIG_MEDIA_TUNER_SIMPLE is not set +# CONFIG_MEDIA_TUNER_TDA8290 is not set +# CONFIG_MEDIA_TUNER_TDA827X is not set +# CONFIG_MEDIA_TUNER_TDA18271 is not set +# CONFIG_MEDIA_TUNER_TDA9887 is not set +# CONFIG_MEDIA_TUNER_TEA5761 is not set +# CONFIG_MEDIA_TUNER_TEA5767 is not set +# CONFIG_MEDIA_TUNER_MT20XX is not set +# CONFIG_MEDIA_TUNER_MT2060 is not set +# CONFIG_MEDIA_TUNER_MT2266 is not set +# CONFIG_MEDIA_TUNER_MT2131 is not set +# CONFIG_MEDIA_TUNER_QT1010 is not set +# CONFIG_MEDIA_TUNER_XC2028 is not set +# CONFIG_MEDIA_TUNER_XC5000 is not set +# CONFIG_MEDIA_TUNER_MXL5005S is not set +# CONFIG_MEDIA_TUNER_MXL5007T is not set +# CONFIG_MEDIA_TUNER_MC44S803 is not set +# CONFIG_MEDIA_TUNER_MAX2165 is not set +# CONFIG_MEDIA_TUNER_TDA18218 is not set +CONFIG_VIDEO_HELPER_CHIPS_AUTO=y +CONFIG_DAB=y +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +CONFIG_GPU_MSM_KGSL=y +CONFIG_MSM_KGSL_MMU=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_GADGET_MSM_72K=y +CONFIG_USB_ANDROID=y +CONFIG_USB_ANDROID_ACM=y +CONFIG_USB_ANDROID_ADB=y +CONFIG_USB_ANDROID_DIAG=y +CONFIG_USB_ANDROID_MASS_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_SLEEP=y +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +# CONFIG_RTC_DRV_MSM7X00A is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_RAM_CONSOLE=y +CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_DNOTIFY is not set +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_YAFFS_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SLUB_DEBUG_ON=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y +CONFIG_DEBUG_HIGHMEM=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_SG=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/configs/swordfish_defconfig b/arch/arm/configs/swordfish_defconfig new file mode 100644 index 0000000000000..38fc1f1d351dd --- /dev/null +++ b/arch/arm/configs/swordfish_defconfig @@ -0,0 +1,167 @@ +CONFIG_EXPERIMENTAL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_ELF_CORE is not set +CONFIG_ASHMEM=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_MSM=y +CONFIG_ARCH_QSD8X50=y +CONFIG_MSM_DEBUG_UART3=y +# CONFIG_HTC_PWRSINK is not set +CONFIG_MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT=y +CONFIG_MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT=y +# CONFIG_MSM_IDLE_STATS is not set +# CONFIG_MSM_FIQ_SUPPORT is not set +# CONFIG_MSM_HW3D is not set +# CONFIG_MSM_ADSP is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="mem=64M console=ttyMSM,115200n8" +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_PM=y +CONFIG_WAKELOCK=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +CONFIG_BT=y +CONFIG_BT_L2CAP=y +CONFIG_BT_SCO=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_RFKILL=y +# CONFIG_RFKILL_PM is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_MTD=y +CONFIG_MTD_PARTITIONS=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_KERNEL_DEBUGGER_CORE=y +CONFIG_UID_STAT=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_NET_ETHERNET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_BSDCOMP=y +# CONFIG_INPUT_MOUSEDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_MSM=y +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_SERIAL_MSM_CLOCK_CONTROL is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +CONFIG_POWER_SUPPLY=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=y +CONFIG_FB=y +CONFIG_GPU_MSM_KGSL=y +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_EMBEDDED_SDIO=y +CONFIG_MMC_PARANOID_SD_INIT=y +# CONFIG_MMC_BLOCK_BOUNCE is not set +CONFIG_MMC_MSM7X00A=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_SLEEP=y +CONFIG_SWITCH=y +CONFIG_SWITCH_GPIO=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_INTF_SYSFS is not set +# CONFIG_RTC_INTF_PROC is not set +# CONFIG_RTC_INTF_DEV is not set +# CONFIG_RTC_DRV_MSM7X00A is not set +CONFIG_STAGING=y +# CONFIG_STAGING_EXCLUDE_BUILD is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_LOGGER=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_YAFFS_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_KERNEL=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_VM=y +CONFIG_DEBUG_SG=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +CONFIG_DEBUG_LL=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_TWOFISH=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index f533a6a71924e..994e59b0c8f0f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -204,6 +204,21 @@ * DMA Cache Coherency * =================== * + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * - start - virtual start address + * - end - virtual end address + * + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * - start - virtual start address + * - end - virtual end address + * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -224,6 +239,8 @@ struct cpu_cache_fns { void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); + void (*dma_inv_range)(const void *, const void *); + void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); }; @@ -250,6 +267,8 @@ extern struct cpu_cache_fns cpu_cache; */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area +#define dmac_inv_range cpu_cache.dma_inv_range +#define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -278,10 +297,14 @@ extern void __cpuc_flush_dcache_area(void *, size_t); */ #define dmac_map_area __glue(_CACHE,_dma_map_area) #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) +#define dmac_inv_range __glue(_CACHE,_dma_inv_range) +#define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); +extern void dmac_inv_range(const void *, const void *); +extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); #endif diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc0020e..8d056b865962c 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -7,6 +7,7 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPIDR 5 #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" @@ -68,6 +69,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) return read_cpuid(CPUID_TCM); } +static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(CPUID_MPIDR); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index b2deda1815496..5c6b9a3c5df52 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -8,7 +8,7 @@ #include /* HZ */ -extern void __delay(int loops); +extern void __delay(unsigned long loops); /* * This function intentionally does not exist; if you see references to @@ -40,5 +40,8 @@ extern void __const_udelay(unsigned long); __const_udelay((n) * ((2199023U*HZ)>>11))) : \ __udelay(n)) +extern void set_delay_fn(void (*fn)(unsigned long)); +extern void read_current_timer_delay_loop(unsigned long loops); + #endif /* defined(_ARM_DELAY_H) */ diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363edd..6f48921b08d61 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -172,6 +172,46 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, { } + +/* + * dma_coherent_pre_ops - barrier functions for coherent memory before DMA. + * A barrier is required to ensure memory operations are complete before the + * initiation of a DMA xfer. + * If the coherent memory is Strongly Ordered + * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses + * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier + * If coherent memory is normal then we need a barrier to prevent + * reordering + */ +static inline void dma_coherent_pre_ops(void) +{ +#if COHERENT_IS_NORMAL == 1 + dmb(); +#else + if (arch_is_coherent()) + dmb(); + else + barrier(); +#endif +} +/* + * dma_post_coherent_ops - barrier functions for coherent memory after DMA. + * If the coherent memory is Strongly Ordered we dont need a barrier since + * there are no speculative fetches to Strongly Ordered memory. + * If coherent memory is normal then we need a barrier to prevent reordering + */ +static inline void dma_coherent_post_ops(void) +{ +#if COHERENT_IS_NORMAL == 1 + dmb(); +#else + if (arch_is_coherent()) + dmb(); + else + barrier(); +#endif +} + /** * dma_alloc_coherent - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -385,6 +425,58 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, return addr; } +/** + * dma_cache_pre_ops - clean or invalidate cache before dma transfer is + * initiated and perform a barrier operation. + * @virtual_addr: A kernel logical or kernel virtual address + * @size: size of buffer to map + * @dir: DMA transfer direction + * + * Ensure that any data held in the cache is appropriately discarded + * or written back. + * + */ +static inline void dma_cache_pre_ops(void *virtual_addr, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + + BUG_ON(!valid_dma_direction(dir)); + + if (!arch_is_coherent()) + ___dma_single_cpu_to_dev(virtual_addr, size, dir); +} + +/** + * dma_cache_post_ops - clean or invalidate cache after dma transfer is + * initiated and perform a barrier operation. + * @virtual_addr: A kernel logical or kernel virtual address + * @size: size of buffer to map + * @dir: DMA transfer direction + * + * Ensure that any data held in the cache is appropriately discarded + * or written back. + * + */ +static inline void dma_cache_post_ops(void *virtual_addr, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + + BUG_ON(!valid_dma_direction(dir)); + + if (arch_has_speculative_dfetch() && !arch_is_coherent() + && dir != DMA_TO_DEVICE) + /* + * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE + * identically: invalidate + */ + ___dma_single_cpu_to_dev(virtual_addr, + size, DMA_FROM_DEVICE); +} + /** * dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index af18ceaacf5d2..0854849e4d969 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -2,6 +2,7 @@ * arch/arm/include/asm/domain.h * * Copyright (C) 1999 Russell King. + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -27,8 +28,13 @@ * * 36-bit addressing and supersections are only available on * CPUs based on ARMv6+ or the Intel XSC3 core. + * + * We cannot use domain 0 for the kernel on QSD8x50 since the kernel domain + * is set to manager mode when set_fs(KERNEL_DS) is called. Setting domain 0 + * to manager mode will disable the workaround for a cpu bug that can cause an + * invalid fault status and/or tlb corruption (CONFIG_VERIFY_PERMISSION_FAULT). */ -#ifndef CONFIG_IO_36 +#if !defined(CONFIG_IO_36) && !defined(CONFIG_VERIFY_PERMISSION_FAULT) #define DOMAIN_KERNEL 0 #define DOMAIN_TABLE 0 #define DOMAIN_USER 1 @@ -56,6 +62,17 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_CPU_USE_DOMAINS +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 +void emulate_domain_manager_set(u32 domain); +int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar); +int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar); +void emulate_domain_manager_switch_mm( + unsigned long pgd_phys, + struct mm_struct *mm, + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *)); + +#define set_domain(x) emulate_domain_manager_set(x) +#else #define set_domain(x) \ do { \ __asm__ __volatile__( \ @@ -63,6 +80,7 @@ : : "r" (x)); \ isb(); \ } while (0) +#endif #define modify_domain(dom,type) \ do { \ diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index ec0bbf79c71fd..febdb7d4c5922 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -2,8 +2,8 @@ * Interrupt handling. Preserves r7, r8, r9 */ .macro arch_irq_handler_default - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr + get_irqnr_preamble r6, lr +1: get_irqnr_and_base r0, r2, r6, lr movne r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @@ -15,17 +15,17 @@ /* * XXX * - * this macro assumes that irqstat (r6) and base (r5) are + * this macro assumes that irqstat (r2) and base (r6) are * preserved from get_irqnr_and_base above */ - ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp adrne lr, BSYM(1b) bne do_IPI #ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr + test_for_ltirq r0, r2, r6, lr movne r0, sp adrne lr, BSYM(1b) bne do_local_timer @@ -38,7 +38,7 @@ .align 5 .global \symbol_name \symbol_name: - mov r4, lr + mov r8, lr arch_irq_handler_default - mov pc, r4 + mov pc, r8 .endm diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index b33fe7065b386..8c73900da9ed0 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -3,16 +3,74 @@ #ifdef __KERNEL__ +#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP) +/* ARM doesn't provide unprivileged exclusive memory accessors */ +#include +#else + +#include +#include +#include + +#define __futex_atomic_ex_table(err_reg) \ + "3:\n" \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4f, 2b, 4f\n" \ + " .popsection\n" \ + " .pushsection .fixup,\"ax\"\n" \ + "4: mov %0, " err_reg "\n" \ + " b 3b\n" \ + " .popsection" + #ifdef CONFIG_SMP -#include +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ldrex %1, [%2]\n" \ + " " insn "\n" \ + "2: strex %1, %0, [%2]\n" \ + " teq %1, #0\n" \ + " bne 1b\n" \ + " mov %0, #0\n" \ + __futex_atomic_ex_table("%4") \ + : "=&r" (ret), "=&r" (oldval) \ + : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ + : "cc", "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret; + u32 val; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + smp_mb(); + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: ldrex %1, [%4]\n" + " teq %1, %2\n" + " ite eq @ explicit IT needed for the 2b label\n" + "2: strexeq %0, %3, [%4]\n" + " movne %0, #0\n" + " teq %0, #0\n" + " bne 1b\n" + __futex_atomic_ex_table("%5") + : "=&r" (ret), "=&r" (val) + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) + : "cc", "memory"); + smp_mb(); + + *uval = val; + return ret; +} #else /* !SMP, we can work around lack of atomic ops by disabling preemption */ -#include #include -#include -#include #include #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ @@ -21,21 +79,39 @@ " " insn "\n" \ "2: " T(str) " %0, [%2]\n" \ " mov %0, #0\n" \ - "3:\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 4f, 2b, 4f\n" \ - " .popsection\n" \ - " .pushsection .fixup,\"ax\"\n" \ - "4: mov %0, %4\n" \ - " b 3b\n" \ - " .popsection" \ + __futex_atomic_ex_table("%4") \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: " T(ldr) " %1, [%4]\n" + " teq %1, %2\n" + " it eq @ explicit IT needed for the 2b label\n" + "2: " T(streq) " %3, [%4]\n" + __futex_atomic_ex_table("%5") + : "+r" (ret), "=&r" (val) + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) + : "cc", "memory"); + + *uval = val; + return ret; +} + +#endif /* !SMP */ + +static inline int +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -46,7 +122,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); /* implies preempt_disable() */ @@ -87,40 +163,6 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) -{ - int val; - - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - pagefault_disable(); /* implies preempt_disable() */ - - __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " T(ldr) " %0, [%3]\n" - " teq %0, %1\n" - " it eq @ explicit IT needed for the 2b label\n" - "2: " T(streq) " %2, [%3]\n" - "3:\n" - " .pushsection __ex_table,\"a\"\n" - " .align 3\n" - " .long 1b, 4f, 2b, 4f\n" - " .popsection\n" - " .pushsection .fixup,\"ax\"\n" - "4: mov %0, %4\n" - " b 3b\n" - " .popsection" - : "=&r" (val) - : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) - : "cc", "memory"); - - pagefault_enable(); /* subsumes preempt_enable() */ - - return val; -} - -#endif /* !SMP */ - +#endif /* !(CPU_USE_DOMAINS && SMP) */ #endif /* __KERNEL__ */ #endif /* _ASM_ARM_FUTEX_H */ diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 16bd48031583d..1fc2f49a9546d 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -59,6 +59,7 @@ /* Registers shifts and masks */ #define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L220 (2 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_AUX_CTRL_MASK 0xc0000fff @@ -71,9 +72,13 @@ #define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 +#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT 20 #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +extern void l2x0_suspend(void); +extern void l2x0_resume(int collapsed); +extern void l2x0_cache_sync(void); #endif #endif diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 84557d3210013..dd20b946d06e8 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -40,6 +40,9 @@ void gic_secondary_init(unsigned int); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); void gic_enable_ppi(unsigned int); +void gic_show_resume_irq(unsigned int gic_nr); +bool gic_is_spi_pending(unsigned int irq); +void gic_clear_spi_pending(unsigned int irq); #endif #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d66605dea55a2..761c29e01eee9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -248,6 +248,8 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_strongly_ordered(cookie, size) __arch_ioremap(cookie, size, \ + MT_DEVICE_STRONGLY_ORDERED) #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap __arch_iounmap diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 2721a5814cb93..5a526afb5f185 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -23,6 +23,7 @@ struct pt_regs; extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); +void handle_IRQ(unsigned int, struct pt_regs *); void init_IRQ(void); #endif diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h index 4ca69fe2c850c..36938ea24a3dd 100644 --- a/arch/arm/include/asm/mach/flash.h +++ b/arch/arm/include/asm/mach/flash.h @@ -17,6 +17,7 @@ struct mtd_info; * map_name: the map probe function name * name: flash device name (eg, as used with mtdparts=) * width: width of mapped device + * interleave: interleave mode feature support * init: method called at driver/device initialisation * exit: method called at driver/device removal * set_vpp: method called to enable or disable VPP @@ -28,6 +29,7 @@ struct flash_platform_data { const char *map_name; const char *name; unsigned int width; + unsigned int interleave; int (*init)(void); void (*exit)(void); void (*set_vpp)(int on); diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index d2fedb5aeb1f3..a124dde6f4b9a 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -29,11 +29,21 @@ struct map_desc { #define MT_MEMORY_NONCACHED 11 #define MT_MEMORY_DTCM 12 #define MT_MEMORY_ITCM 13 +#define MT_DEVICE_STRONGLY_ORDERED 14 +#define MT_MEMORY_R 15 +#define MT_MEMORY_RW 16 +#define MT_MEMORY_RX 17 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); -struct mem_type; +struct mem_type { + pteval_t prot_pte; + unsigned int prot_l1; + unsigned int prot_sect; + unsigned int domain; +}; + extern const struct mem_type *get_mem_type(unsigned int type); /* * external interface to remap single page with appropriate type diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h new file mode 100644 index 0000000000000..57216658dd9a9 --- /dev/null +++ b/arch/arm/include/asm/mach/mmc.h @@ -0,0 +1,149 @@ +/* + * arch/arm/include/asm/mach/mmc.h + */ +#ifndef ASMARM_MACH_MMC_H +#define ASMARM_MACH_MMC_H + +#include +#include +#include +#include + +#define SDC_DAT1_DISABLE 0 +#define SDC_DAT1_ENABLE 1 +#define SDC_DAT1_ENWAKE 2 +#define SDC_DAT1_DISWAKE 3 + +struct embedded_sdio_data { + struct sdio_cis cis; + struct sdio_cccr cccr; + struct sdio_embedded_func *funcs; + int num_funcs; +}; + +/* This structure keeps information per regulator */ +struct msm_mmc_reg_data { + /* voltage regulator handle */ + struct regulator *reg; + /* regulator name */ + const char *name; + /* voltage level to be set */ + unsigned int low_vol_level; + unsigned int high_vol_level; + /* Load values for low power and high power mode */ + unsigned int lpm_uA; + unsigned int hpm_uA; + /* + * is set voltage supported for this regulator? + * false => set voltage is not supported + * true => set voltage is supported + */ + bool set_voltage_sup; + /* is this regulator enabled? */ + bool is_enabled; + /* is this regulator needs to be always on? */ + bool always_on; + /* is low power mode setting required for this regulator? */ + bool lpm_sup; +}; + +/* + * This structure keeps information for all the + * regulators required for a SDCC slot. + */ +struct msm_mmc_slot_reg_data { + struct msm_mmc_reg_data *vdd_data; /* keeps VDD/VCC regulator info */ + struct msm_mmc_reg_data *vccq_data; /* keeps VCCQ regulator info */ + struct msm_mmc_reg_data *vddp_data; /* keeps VDD Pad regulator info */ +}; + +struct msm_mmc_gpio { + u32 no; + const char *name; + bool is_always_on; + bool is_enabled; +}; + +struct msm_mmc_gpio_data { + struct msm_mmc_gpio *gpio; + u8 size; +}; + +struct msm_mmc_pad_pull { + enum msm_tlmm_pull_tgt no; + u32 val; +}; + +struct msm_mmc_pad_pull_data { + struct msm_mmc_pad_pull *on; + struct msm_mmc_pad_pull *off; + u8 size; +}; + +struct msm_mmc_pad_drv { + enum msm_tlmm_hdrive_tgt no; + u32 val; +}; + +struct msm_mmc_pad_drv_data { + struct msm_mmc_pad_drv *on; + struct msm_mmc_pad_drv *off; + u8 size; +}; + +struct msm_mmc_pad_data { + struct msm_mmc_pad_pull_data *pull; + struct msm_mmc_pad_drv_data *drv; +}; + +struct msm_mmc_pin_data { + /* + * = 1 if controller pins are using gpios + * = 0 if controller has dedicated MSM pads + */ + u8 is_gpio; + u8 cfg_sts; + struct msm_mmc_gpio_data *gpio_data; + struct msm_mmc_pad_data *pad_data; +}; + +struct mmc_platform_data { + unsigned int ocr_mask; /* available voltages */ + /* + * XPC controls the maximum current in the + * default speed mode of SDXC card. + */ + unsigned int xpc_cap; + /* Supported UHS-I Modes */ + unsigned int uhs_caps; + u32 (*translate_vdd)(struct device *, unsigned int); + void (*sdio_lpm_gpio_setup)(struct device *, unsigned int); + unsigned int (*status)(struct device *); + unsigned int status_irq; + unsigned int status_gpio; + struct embedded_sdio_data *embedded_sdio; + unsigned int sdiowakeup_irq; + int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); + unsigned long irq_flags; + unsigned long mmc_bus_width; + int (*wpswitch) (struct device *); + unsigned int msmsdcc_fmin; + unsigned int msmsdcc_fmid; + unsigned int msmsdcc_fmax; + bool nonremovable; + bool pclk_src_dfab; + int (*cfg_mpm_sdiowakeup)(struct device *, unsigned); + bool sdcc_v4_sup; + unsigned int wpswitch_gpio; + unsigned char wpswitch_polarity; + struct msm_mmc_slot_reg_data *vreg_data; + int is_sdio_al_client; + unsigned int *sup_clk_table; + unsigned char sup_clk_cnt; + struct msm_mmc_pin_data *pin_data; + bool disable_bam; + bool disable_runtime_pm; + bool disable_cmd23; +}; + +#endif diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 883f6be5117a7..d5adaae5ee2c0 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -34,7 +34,6 @@ * timer interrupt which may be pending. */ struct sys_timer { - struct sys_device dev; void (*init)(void); void (*suspend)(void); void (*resume)(void); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index d0ee74b7cf86b..308b0adf9d9a3 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -256,6 +256,13 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define arch_is_coherent() 0 #endif +/* + * Set if the architecture speculatively fetches data into cache. + */ +#ifndef arch_has_speculative_dfetch +#define arch_has_speculative_dfetch() 0 +#endif + #endif #include diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 93226cf23ae0a..fd3f17ef94a04 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -41,6 +41,8 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) __res |= __ex_flag; if (unlikely(__res != 0)) fail_fn(count); + else + smp_rmb(); } static inline int @@ -61,6 +63,9 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) __res |= __ex_flag; if (unlikely(__res != 0)) __res = fail_fn(count); + else + smp_rmb(); + return __res; } @@ -74,6 +79,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { int __ex_flag, __res, __orig; + smp_wmb(); __asm__ ( "ldrex %0, [%3] \n\t" @@ -119,6 +125,8 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) : "r" (&count->counter) : "cc", "memory" ); + if (__orig) + smp_rmb(); return __orig; } diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc19009252753..d8387437ec5aa 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -21,6 +21,8 @@ #ifndef __ASM_OUTERCACHE_H #define __ASM_OUTERCACHE_H +#include + struct outer_cache_fns { void (*inv_range)(unsigned long, unsigned long); void (*clean_range)(unsigned long, unsigned long); @@ -31,23 +33,24 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif + void (*set_debug)(unsigned long); }; #ifdef CONFIG_OUTER_CACHE extern struct outer_cache_fns outer_cache; -static inline void outer_inv_range(unsigned long start, unsigned long end) +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.inv_range) outer_cache.inv_range(start, end); } -static inline void outer_clean_range(unsigned long start, unsigned long end) +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.clean_range) outer_cache.clean_range(start, end); } -static inline void outer_flush_range(unsigned long start, unsigned long end) +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.flush_range) outer_cache.flush_range(start, end); @@ -73,11 +76,11 @@ static inline void outer_disable(void) #else -static inline void outer_inv_range(unsigned long start, unsigned long end) +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { } -static inline void outer_clean_range(unsigned long start, unsigned long end) +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) { } -static inline void outer_flush_range(unsigned long start, unsigned long end) +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { } static inline void outer_flush_all(void) { } static inline void outer_inv_all(void) { } diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index f51a69595f6ed..68a71effe1b90 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -201,6 +201,11 @@ typedef struct page *pgtable_t; extern int pfn_valid(unsigned long); #endif +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +extern int _early_pfn_valid(unsigned long); +#define early_pfn_valid(pfn) (_early_pfn_valid(pfn)) +#endif + #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af9c..0e1fd19010525 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -24,6 +24,9 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_V6MP, ARM_PERF_PMU_ID_CA8, ARM_PERF_PMU_ID_CA9, + ARM_PERF_PMU_ID_SCORPION, + ARM_PERF_PMU_ID_SCORPIONMP, + ARM_PERF_PMU_ID_KRAIT, ARM_NUM_PMU_IDS, }; diff --git a/arch/arm/include/asm/perftypes.h b/arch/arm/include/asm/perftypes.h new file mode 100644 index 0000000000000..d659a9cdad2c4 --- /dev/null +++ b/arch/arm/include/asm/perftypes.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +** perftypes.h +** DESCRIPTION +** ksapi.ko function hooks header file +*/ + +#ifndef __PERFTYPES_H__ +#define __PERFTYPES_H__ + +typedef void (*VPVF)(void); +typedef void (*VPULF)(unsigned long); +typedef void (*VPULULF)(unsigned long, unsigned long); + +extern VPVF pp_interrupt_out_ptr; +extern VPVF pp_interrupt_in_ptr; +extern VPULF pp_process_remove_ptr; +extern void perf_mon_interrupt_in(void); +extern void perf_mon_interrupt_out(void); + +#endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ebcb6432f45f8..47192284ae608 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -22,7 +22,9 @@ #include #include +#include #include +#include /* * Just any arbitrary offset to the start of the vmalloc VM area: the @@ -232,16 +234,30 @@ extern pgprot_t pgprot_kernel; #define pgprot_writecombine(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) +#define pgprot_device(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_NONSHARED) + +#define pgprot_writethroughcache(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH) + +#define pgprot_writebackcache(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK) + +#define pgprot_writebackwacache(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC) + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) #define __HAVE_PHYS_MEM_ACCESS_PROT +#define COHERENT_IS_NORMAL 1 struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #else #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) +#define COHERENT_IS_NORMAL 0 #endif #endif /* __ASSEMBLY__ */ @@ -468,8 +484,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' */ +#ifndef HAS_ARCH_IO_REMAP_PFN_RANGE #define io_remap_pfn_range(vma,from,pfn,size,prot) \ - remap_pfn_range(vma, from, pfn, size, prot) + remap_pfn_range(vma,from,pfn,size,prot) +#else +extern int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot); +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + arch_io_remap_pfn_range(vma,from,pfn,size,prot) +#endif + #define pgtable_cache_init() do { } while (0) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 8ccea012722cb..5422501275171 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -14,6 +14,7 @@ enum arm_pmu_type { ARM_PMU_DEVICE_CPU = 0, + ARM_PMU_DEVICE_L2 = 1, ARM_NUM_PMU_DEVICES, }; diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeeebd..9d223c02911a4 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -29,6 +29,8 @@ #define STACK_TOP_MAX TASK_SIZE #endif +extern unsigned int boot_reason; + union debug_insn { u32 arm; u16 thumb; diff --git a/arch/arm/include/asm/remote_spinlock.h b/arch/arm/include/asm/remote_spinlock.h new file mode 100644 index 0000000000000..de39fdd8a86f5 --- /dev/null +++ b/arch/arm/include/asm/remote_spinlock.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __ASM_REMOTE_SPINLOCK_H +#define __ASM_REMOTE_SPINLOCK_H + +#include + +#endif /* __ASM_REMOTE_SPINLOCK_H */ diff --git a/arch/arm/include/asm/rwsem.h b/arch/arm/include/asm/rwsem.h new file mode 100644 index 0000000000000..5532e242a8648 --- /dev/null +++ b/arch/arm/include/asm/rwsem.h @@ -0,0 +1,180 @@ +/* rwsem.h: R/W semaphores implemented using ARM atomic functions. + * + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_ARM_RWSEM_H +#define _ASM_ARM_RWSEM_H + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ +#include +#include +#include +#include + +/* + * the semaphore definition + */ +struct rw_semaphore { + long count; +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (atomic_inc_return((atomic_t *)(&sem->count)) < 0) + rwsem_down_read_failed(sem); +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + int tmp; + + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg(&sem->count, tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + return 1; + } + } + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)); + if (tmp != RWSEM_ACTIVE_WRITE_BIAS) + rwsem_down_write_failed(sem); +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + int tmp; + + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_dec_return((atomic_t *)(&sem->count)); + if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + (atomic_t *)(&sem->count)) < 0) + rwsem_wake(sem); +} + +/* + * implement atomic add functionality + */ +static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) +{ + atomic_add(delta, (atomic_t *)(&sem->count)); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + int tmp; + + tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} + +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) +{ + __down_write(sem); +} + +/* + * implement exchange and add functionality + */ +static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +{ + return atomic_add_return(delta, (atomic_t *)(&sem->count)); +} + +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return (sem->count != 0); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_ARM_RWSEM_H */ diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f1e5a9bca2491..25a0f16845b03 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -126,6 +126,30 @@ struct tag_cmdline { char cmdline[1]; /* this is the minimum size */ }; +#ifdef CONFIG_MICROP_COMMON +/* Microp version */ +#define ATAG_MICROP_VERSION 0x5441000a + +struct tag_microp_version { + char ver[4]; +}; + +/* Light sensor calibration value */ +#define ATAG_ALS 0x5441001b + +struct tag_als_kadc { + __u32 kadc; +}; + +/* Proximity sensor calibration values */ +#define ATAG_PS 0x5441001c + +struct tag_ps_kparam { + __u32 kparam1; + __u32 kparam2; +}; +#endif + /* acorn RiscPC specific information */ #define ATAG_ACORN 0x41000101 @@ -153,6 +177,11 @@ struct tag { struct tag_initrd initrd; struct tag_serialnr serialnr; struct tag_revision revision; +#ifdef CONFIG_MICROP_COMMON + struct tag_microp_version microp_version; + struct tag_als_kadc als_kadc; + struct tag_ps_kparam ps_kparam; +#endif struct tag_videolfb videolfb; struct tag_cmdline cmdline; @@ -221,6 +250,19 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size + +/* + * Early command line parameters. + */ +struct early_params { + const char *arg; + void (*fn)(char **p); +}; + +#define __early_param(name,fn) \ +static struct early_params __early_##fn __used \ +__attribute__((__section__(".early_param.init"))) = { name, fn } + #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707dd3..38a9ce4bf7473 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,17 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +/* + * Portions based on arch/ia64/include/asm/spinlock.h + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang + * Copyright (C) 1999 Walt Drummond + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This file is used for SMP configurations only. + */ + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 @@ -21,6 +32,7 @@ static inline void dsb_sev(void) #endif } +#ifndef CONFIG_ARM_TICKET_LOCKS /* * ARMv6 Spin-locking. * @@ -91,6 +103,131 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) dsb_sev(); } +#else +/* + * ARM Ticket spin-locking + * + * Ticket locks are conceptually two parts, one indicating the current head of + * the queue, and the other indicating the current tail. The lock is acquired + * by atomically noting the tail and incrementing it by one (thus adding + * ourself to the queue and noting our position), then waiting until the head + * becomes equal to the the initial value of the tail. + * + * Unlocked value: 0 + * Locked value: now_serving != next_ticket + * + * 31 17 16 15 14 0 + * +----------------------------------------------------+ + * | now_serving | next_ticket | + * +----------------------------------------------------+ + */ + +#define TICKET_SHIFT 16 +#define TICKET_BITS 16 +#define TICKET_MASK 0xFFFF + +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned long tmp, ticket, next_ticket; + + /* Grab the next ticket and wait for it to be "served" */ + __asm__ __volatile__( +"1: ldrex %[ticket], [%[lockaddr]]\n" +" uadd16 %[next_ticket], %[ticket], %[val1]\n" +" strex %[tmp], %[next_ticket], [%[lockaddr]]\n" +" teq %[tmp], #0\n" +" bne 1b\n" +" uxth %[ticket], %[ticket]\n" +"2:\n" +#ifdef CONFIG_CPU_32v6K +" wfene\n" +#endif +" ldr %[tmp], [%[lockaddr]]\n" +" cmp %[ticket], %[tmp], lsr #16\n" +" bne 2b" + : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket) + : [lockaddr]"r" (&lock->lock), [val1]"r" (1) + : "cc"); + smp_mb(); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned long tmp, ticket, next_ticket; + + /* Grab lock if now_serving == next_ticket and access is exclusive */ + __asm__ __volatile__( +" ldrex %[ticket], [%[lockaddr]]\n" +" ror %[tmp], %[ticket], #16\n" +" eors %[tmp], %[tmp], %[ticket]\n" +" bne 1f\n" +" uadd16 %[next_ticket], %[ticket], %[val1]\n" +" strex %[tmp], %[next_ticket], [%[lockaddr]]\n" +"1:" + : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), + [next_ticket]"=&r" (next_ticket) + : [lockaddr]"r" (&lock->lock), [val1]"r" (1) + : "cc"); + if (!tmp) + smp_mb(); + return !tmp; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + unsigned long ticket, tmp; + + smp_mb(); + + /* Bump now_serving by 1 */ + __asm__ __volatile__( +"1: ldrex %[ticket], [%[lockaddr]]\n" +" uadd16 %[ticket], %[ticket], %[serving1]\n" +" strex %[tmp], %[ticket], [%[lockaddr]]\n" +" teq %[tmp], #0\n" +" bne 1b" + : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp) + : [lockaddr]"r" (&lock->lock), [serving1]"r" (0x00010000) + : "cc"); + dsb_sev(); +} + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + unsigned long ticket; + + /* Wait for now_serving == next_ticket */ + __asm__ __volatile__( +#ifdef CONFIG_CPU_32v6K +" cmpne %[lockaddr], %[lockaddr]\n" +"1: wfene\n" +#else +"1:\n" +#endif +" ldr %[ticket], [%[lockaddr]]\n" +" eor %[ticket], %[ticket], %[ticket], lsr #16\n" +" uxth %[ticket], %[ticket]\n" +" cmp %[ticket], #0\n" +" bne 1b" + : [ticket]"=&r" (ticket) + : [lockaddr]"r" (&lock->lock) + : "cc"); +} + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + unsigned long tmp = ACCESS_ONCE(lock->lock); + return (((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK) != 0; +} + +static inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + unsigned long tmp = ACCESS_ONCE(lock->lock); + return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; +} +#endif /* * RWLOCKS @@ -125,7 +262,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) unsigned long tmp; __asm__ __volatile__( -"1: ldrex %0, [%1]\n" +" ldrex %0, [%1]\n" " teq %0, #0\n" " strexeq %0, %2, [%1]" : "=&r" (tmp) @@ -213,7 +350,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) unsigned long tmp, tmp2 = 1; __asm__ __volatile__( -"1: ldrex %0, [%2]\n" +" ldrex %0, [%2]\n" " adds %0, %0, #1\n" " strexpl %1, %0, [%2]\n" : "=&r" (tmp), "+r" (tmp2) diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 97f6d60297d57..9e51aef4637a6 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -69,6 +69,8 @@ #define __exception_irq_entry __exception #endif +void cpu_idle_wait(void); + struct thread_info; struct task_struct; @@ -77,6 +79,12 @@ extern unsigned int system_rev; extern unsigned int system_serial_low; extern unsigned int system_serial_high; extern unsigned int mem_fclk_21285; +#ifdef CONFIG_MICROP_COMMON +extern char microp_ver[4]; +extern unsigned int als_kadc; +extern unsigned int ps_kparam1; +extern unsigned int ps_kparam2; +#endif struct pt_regs; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7b5cc8dae06e6..bcb6d9d9919a1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -75,7 +75,7 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ .restart_block = { \ diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index d2005de383b8c..ab2a488ae77e8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -433,7 +433,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_V6_I_PAGE)) asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); if (tlb_flag(TLB_V7_UIS_PAGE)) -#ifdef CONFIG_ARM_ERRATA_720789 +#if defined(CONFIG_ARM_ERRATA_720789) || defined(CONFIG_ARCH_MSM8X60) asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); #else asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); @@ -480,7 +480,11 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_V6_I_PAGE)) asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); if (tlb_flag(TLB_V7_UIS_PAGE)) +#ifdef CONFIG_ARCH_MSM8X60 + asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (kaddr) : "cc"); +#else asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); +#endif if (tlb_flag(TLB_BTB)) { /* flush the branch target cache */ diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h index f4ab34fd4f72c..a4459dcfaea48 100644 --- a/arch/arm/include/asm/vfp.h +++ b/arch/arm/include/asm/vfp.h @@ -21,7 +21,7 @@ #define FPSID_FORMAT_MASK (0x3 << FPSID_FORMAT_BIT) #define FPSID_NODOUBLE (1<<20) #define FPSID_ARCH_BIT (16) -#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT) +#define FPSID_ARCH_MASK (0x7F << FPSID_ARCH_BIT) #define FPSID_PART_BIT (8) #define FPSID_PART_MASK (0xFF << FPSID_PART_BIT) #define FPSID_VARIANT_BIT (4) @@ -82,3 +82,9 @@ #define VFPOPDESC_UNUSED_BIT (24) #define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT) #define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK)) + +#ifndef __ASSEMBLY__ +int vfp_flush_context(void); +void vfp_reinit(void); +#endif + diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index e5e1e5387678f..687a26d2961ca 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -52,10 +52,6 @@ extern void fpundefinstr(void); EXPORT_SYMBOL(__backtrace); - /* platform dependent support */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__const_udelay); - /* networking */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_from_user); @@ -112,9 +108,6 @@ EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); #endif - /* crypto hash */ -EXPORT_SYMBOL(sha_transform); - /* gcc lib functions */ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index a0f07521ca8a2..c1bf7c5c5803a 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -25,7 +25,7 @@ .macro addruart, rp, rv .endm -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) .macro senduart, rd, rx mcr p14, 0, \rd, c0, c5, 0 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 94fbf2c72ab9a..7d674b29923b4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -4,6 +4,7 @@ * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) * nommu support by Hyok S. Choi (hyok.choi@samsung.com) + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -28,18 +29,17 @@ #include /* - * Interrupt handling. Preserves r7, r8, r9 + * Interrupt handling. */ .macro irq_handler #ifdef CONFIG_MULTI_IRQ_HANDLER - ldr r5, =handle_arch_irq + ldr r1, =handle_arch_irq mov r0, sp - ldr r5, [r5] adr lr, BSYM(9997f) - teq r5, #0 - movne pc, r5 -#endif + ldr pc, [r1] +#else arch_irq_handler_default +#endif 9997: .endm @@ -729,7 +729,14 @@ ENTRY(__switch_to) ldr r7, [r7, #TSK_STACK_CANARY] #endif #ifdef CONFIG_CPU_USE_DOMAINS +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + stmdb r13!, {r0-r3, lr} + mov r0, r6 + bl emulate_domain_manager_set + ldmia r13!, {r0-r3, lr} +#else mcr p15, 0, r6, c3, c0, 0 @ Set domain register +#endif #endif mov r5, r0 add r4, r2, #TI_CPU_SAVE diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index f06ff9feb0dbb..67a243e7da9fa 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -4,6 +4,7 @@ * Copyright (C) 1994-2002 Russell King * Copyright (c) 2003 ARM Limited * All Rights Reserved + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -341,7 +342,7 @@ __secondary_data: * r13 = *virtual* address to jump to upon completion */ __enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP +#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A @@ -355,10 +356,17 @@ __enable_mmu: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_TABLE, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT)) +#else + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_TABLE, DOMAIN_CLIENT) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) +#endif mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 44b84fe6e1b0f..b362f437c30b4 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -833,6 +833,18 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, return ret; } +static void reset_brps_reserved_reg(int n) +{ + int i; + + /* we must also reset any reserved registers. */ + for (i = 0; i < n; ++i) { + write_wb_reg(ARM_BASE_BCR + i, 0UL); + write_wb_reg(ARM_BASE_BVR + i, 0UL); + } + +} + /* * One-time initialisation. */ @@ -868,16 +880,23 @@ static void reset_ctrl_regs(void *info) */ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); isb(); + + /* + * Clear any configured vector-catch events before + * enabling monitor mode. + */ + asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); + isb(); } if (enable_monitor_mode()) return; - /* We must also reset any reserved registers. */ - for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { - write_wb_reg(ARM_BASE_BCR + i, 0UL); - write_wb_reg(ARM_BASE_BVR + i, 0UL); - } +#ifdef CONFIG_HAVE_HW_BRKPT_RESERVED_RW_ACCESS + reset_brps_reserved_reg(core_num_brps); +#else + reset_brps_reserved_reg(core_num_brps + core_num_reserved_brps); +#endif for (i = 0; i < core_num_wrps; ++i) { write_wb_reg(ARM_BASE_WCR + i, 0UL); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 28536e352deb5..d18a37e6218fc 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -42,6 +42,8 @@ #include #include +#include + /* * No architecture-specific irq_finish function defined in arm/arch/irqs.h. */ @@ -112,15 +114,16 @@ int show_interrupts(struct seq_file *p, void *v) } /* - * do_IRQ handles all hardware IRQ's. Decoded IRQs should not - * come via this function. Instead, they should provide their - * own 'handler' + * handle_IRQ handles all hardware IRQ's. Decoded IRQs should + * not come via this function. Instead, they should provide their + * own 'handler'. Used by platform code implementing C-based 1st + * level decoding. */ -asmlinkage void __exception_irq_entry -asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +void handle_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); + perf_mon_interrupt_in(); irq_enter(); /* @@ -140,6 +143,16 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs) irq_exit(); set_irq_regs(old_regs); + perf_mon_interrupt_out(); +} + +/* + * asm_do_IRQ is the interface to be used from assembly code. + */ +asmlinkage void __exception_irq_entry +asm_do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + handle_IRQ(irq, regs); } void set_irq_flags(unsigned int irq, unsigned int iflags) diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c index 31a316c1777b8..136e8376a3eb4 100644 --- a/arch/arm/kernel/leds.c +++ b/arch/arm/kernel/leds.c @@ -9,7 +9,10 @@ */ #include #include +#include +#include #include +#include #include @@ -69,34 +72,54 @@ static ssize_t leds_store(struct sys_device *dev, static SYSDEV_ATTR(event, 0200, NULL, leds_store); -static int leds_suspend(struct sys_device *dev, pm_message_t state) +static struct sysdev_class leds_sysclass = { + .name = "leds", +}; + +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + +static int leds_suspend(void) { leds_event(led_stop); return 0; } -static int leds_resume(struct sys_device *dev) +static void leds_resume(void) { leds_event(led_start); - return 0; } -static int leds_shutdown(struct sys_device *dev) +static void leds_shutdown(void) { leds_event(led_halted); - return 0; } -static struct sysdev_class leds_sysclass = { - .name = "leds", +static struct syscore_ops leds_syscore_ops = { .shutdown = leds_shutdown, .suspend = leds_suspend, .resume = leds_resume, }; -static struct sys_device leds_device = { - .id = 0, - .cls = &leds_sysclass, +static int leds_idle_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + switch (val) { + case IDLE_START: + leds_event(led_idle_start); + break; + case IDLE_END: + leds_event(led_idle_end); + break; + } + + return 0; +} + +static struct notifier_block leds_idle_nb = { + .notifier_call = leds_idle_notifier, }; static int __init leds_init(void) @@ -107,6 +130,12 @@ static int __init leds_init(void) ret = sysdev_register(&leds_device); if (ret == 0) ret = sysdev_create_file(&leds_device, &attr_event); + + if (ret == 0) { + register_syscore_ops(&leds_syscore_ops); + idle_notifier_register(&leds_idle_nb); + } + return ret; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 30ead135ff5f4..0a4256224b71c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -81,6 +81,7 @@ void machine_kexec(struct kimage *image) unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; + arch_kexec(); page_list = image->head & PAGE_MASK; @@ -113,5 +114,5 @@ void machine_kexec(struct kimage *image) cpu_proc_fin(); outer_inv_all(); flush_cache_all(); - cpu_reset(reboot_code_buffer_phys); + __virt_to_phys(cpu_reset)(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d150ad1ccb5d8..bc2a90da25048 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,10 @@ struct arm_pmu { enum arm_perf_pmu_ids id; const char *name; irqreturn_t (*handle_irq)(int irq_num, void *dev); +#ifdef CONFIG_SMP + void (*secondary_enable)(unsigned int irq); + void (*secondary_disable)(unsigned int irq); +#endif void (*enable)(struct hw_perf_event *evt, int idx); void (*disable)(struct hw_perf_event *evt, int idx); int (*get_event_idx)(struct cpu_hw_events *cpuc, @@ -204,10 +209,9 @@ armpmu_event_set_period(struct perf_event *event, static u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx) + int idx, int overflow) { - int shift = 64 - 32; - s64 prev_raw_count, new_raw_count; + u64 prev_raw_count, new_raw_count; u64 delta; again: @@ -218,8 +222,13 @@ armpmu_event_update(struct perf_event *event, new_raw_count) != prev_raw_count) goto again; - delta = (new_raw_count << shift) - (prev_raw_count << shift); - delta >>= shift; + new_raw_count &= armpmu->max_period; + prev_raw_count &= armpmu->max_period; + + if (overflow) + delta = armpmu->max_period - prev_raw_count + new_raw_count; + else + delta = new_raw_count - prev_raw_count; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -236,7 +245,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx); + armpmu_event_update(event, hwc, hwc->idx, 0); } static void @@ -254,7 +263,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx); + armpmu_event_update(event, hwc, hwc->idx, 0); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -407,6 +416,10 @@ armpmu_reserve_hardware(void) pr_warning("unable to request IRQ%d for ARM perf " "counters\n", irq); break; +#ifdef CONFIG_SMP + } else if (armpmu->secondary_enable) { + armpmu->secondary_enable(irq); +#endif } } @@ -430,8 +443,13 @@ armpmu_release_hardware(void) for (i = pmu_device->num_resources - 1; i >= 0; --i) { irq = platform_get_irq(pmu_device, i); - if (irq >= 0) + if (irq >= 0) { free_irq(irq, NULL); +#ifdef CONFIG_SMP + if (armpmu->secondary_disable) + armpmu->secondary_disable(irq); +#endif + } } armpmu->stop(); @@ -608,6 +626,10 @@ static struct pmu pmu = { #include "perf_event_xscale.c" #include "perf_event_v6.c" #include "perf_event_v7.c" +#include "perf_event_msm.c" +#include "perf_event_msm_l2.c" +#include "perf_event_msm_krait.c" +#include "perf_event_msm_krait_l2.c" static int __init init_hw_perf_events(void) @@ -645,6 +667,22 @@ init_hw_perf_events(void) armpmu = xscale2pmu_init(); break; } + /* Qualcomm CPUs */ + } else if (0x51 == implementor) { + switch (part_number) { + case 0x00F0: /* 8x50 & 7x30*/ + armpmu = armv7_scorpion_pmu_init(); + break; + case 0x02D0: /* 8x60 */ + armpmu = armv7_scorpionmp_pmu_init(); + scorpionmp_l2_pmu_init(); + break; + case 0x0490: /* 8960 sim */ + case 0x04D0: /* 8960 */ + armpmu = armv7_krait_pmu_init(); + krait_l2_pmu_init(); + break; + } } if (armpmu) { @@ -714,7 +752,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) tail = (struct frame_tail __user *)regs->ARM_fp - 1; - while (tail && !((unsigned long)tail & 0x3)) + while ((entry->nr < PERF_MAX_STACK_DEPTH) && + tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c new file mode 100644 index 0000000000000..be088d132f384 --- /dev/null +++ b/arch/arm/kernel/perf_event_msm.c @@ -0,0 +1,710 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "../vfp/vfpinstr.h" + +#ifdef CONFIG_CPU_V7 +enum scorpion_perf_common { + SCORPION_EVT_START_IDX = 0x4c, + SCORPION_ICACHE_EXPL_INV = 0x4c, + SCORPION_ICACHE_MISS = 0x4d, + SCORPION_ICACHE_ACCESS = 0x4e, + SCORPION_ICACHE_CACHEREQ_L2 = 0x4f, + SCORPION_ICACHE_NOCACHE_L2 = 0x50, + SCORPION_HIQUP_NOPED = 0x51, + SCORPION_DATA_ABORT = 0x52, + SCORPION_IRQ = 0x53, + SCORPION_FIQ = 0x54, + SCORPION_ALL_EXCPT = 0x55, + SCORPION_UNDEF = 0x56, + SCORPION_SVC = 0x57, + SCORPION_SMC = 0x58, + SCORPION_PREFETCH_ABORT = 0x59, + SCORPION_INDEX_CHECK = 0x5a, + SCORPION_NULL_CHECK = 0x5b, + SCORPION_ICIMVAU_IMPL_ICIALLU = 0x5c, + SCORPION_NONICIALLU_BTAC_INV = 0x5d, + SCORPION_IMPL_ICIALLU = 0x5e, + SCORPION_EXPL_ICIALLU = 0x5f, + SCORPION_SPIPE_ONLY_CYCLES = 0x60, + SCORPION_XPIPE_ONLY_CYCLES = 0x61, + SCORPION_DUAL_CYCLES = 0x62, + SCORPION_DISPATCH_ANY_CYCLES = 0x63, + SCORPION_FIFO_FULLBLK_CMT = 0x64, + SCORPION_FAIL_COND_INST = 0x65, + SCORPION_PASS_COND_INST = 0x66, + SCORPION_ALLOW_VU_CLK = 0x67, + SCORPION_VU_IDLE = 0x68, + SCORPION_ALLOW_L2_CLK = 0x69, + SCORPION_L2_IDLE = 0x6a, + SCORPION_DTLB_IMPL_INV_SCTLR_DACR = 0x6b, + SCORPION_DTLB_EXPL_INV = 0x6c, + SCORPION_DTLB_MISS = 0x6d, + SCORPION_DTLB_ACCESS = 0x6e, + SCORPION_ITLB_MISS = 0x6f, + SCORPION_ITLB_IMPL_INV = 0x70, + SCORPION_ITLB_EXPL_INV = 0x71, + SCORPION_UTLB_D_MISS = 0x72, + SCORPION_UTLB_D_ACCESS = 0x73, + SCORPION_UTLB_I_MISS = 0x74, + SCORPION_UTLB_I_ACCESS = 0x75, + SCORPION_UTLB_INV_ASID = 0x76, + SCORPION_UTLB_INV_MVA = 0x77, + SCORPION_UTLB_INV_ALL = 0x78, + SCORPION_S2_HOLD_RDQ_UNAVAIL = 0x79, + SCORPION_S2_HOLD = 0x7a, + SCORPION_S2_HOLD_DEV_OP = 0x7b, + SCORPION_S2_HOLD_ORDER = 0x7c, + SCORPION_S2_HOLD_BARRIER = 0x7d, + SCORPION_VIU_DUAL_CYCLE = 0x7e, + SCORPION_VIU_SINGLE_CYCLE = 0x7f, + SCORPION_VX_PIPE_WAR_STALL_CYCLES = 0x80, + SCORPION_VX_PIPE_WAW_STALL_CYCLES = 0x81, + SCORPION_VX_PIPE_RAW_STALL_CYCLES = 0x82, + SCORPION_VX_PIPE_LOAD_USE_STALL = 0x83, + SCORPION_VS_PIPE_WAR_STALL_CYCLES = 0x84, + SCORPION_VS_PIPE_WAW_STALL_CYCLES = 0x85, + SCORPION_VS_PIPE_RAW_STALL_CYCLES = 0x86, + SCORPION_EXCEPTIONS_INV_OPERATION = 0x87, + SCORPION_EXCEPTIONS_DIV_BY_ZERO = 0x88, + SCORPION_COND_INST_FAIL_VX_PIPE = 0x89, + SCORPION_COND_INST_FAIL_VS_PIPE = 0x8a, + SCORPION_EXCEPTIONS_OVERFLOW = 0x8b, + SCORPION_EXCEPTIONS_UNDERFLOW = 0x8c, + SCORPION_EXCEPTIONS_DENORM = 0x8d, +}; + +enum scorpion_perf_smp { + SCORPIONMP_NUM_BARRIERS = 0x8e, + SCORPIONMP_BARRIER_CYCLES = 0x8f, +}; + +enum scorpion_perf_up { + SCORPION_BANK_AB_HIT = 0x8e, + SCORPION_BANK_AB_ACCESS = 0x8f, + SCORPION_BANK_CD_HIT = 0x90, + SCORPION_BANK_CD_ACCESS = 0x91, + SCORPION_BANK_AB_DSIDE_HIT = 0x92, + SCORPION_BANK_AB_DSIDE_ACCESS = 0x93, + SCORPION_BANK_CD_DSIDE_HIT = 0x94, + SCORPION_BANK_CD_DSIDE_ACCESS = 0x95, + SCORPION_BANK_AB_ISIDE_HIT = 0x96, + SCORPION_BANK_AB_ISIDE_ACCESS = 0x97, + SCORPION_BANK_CD_ISIDE_HIT = 0x98, + SCORPION_BANK_CD_ISIDE_ACCESS = 0x99, + SCORPION_ISIDE_RD_WAIT = 0x9a, + SCORPION_DSIDE_RD_WAIT = 0x9b, + SCORPION_BANK_BYPASS_WRITE = 0x9c, + SCORPION_BANK_AB_NON_CASTOUT = 0x9d, + SCORPION_BANK_AB_L2_CASTOUT = 0x9e, + SCORPION_BANK_CD_NON_CASTOUT = 0x9f, + SCORPION_BANK_CD_L2_CASTOUT = 0xa0, +}; + +static const unsigned armv7_scorpion_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, + [C(RESULT_MISS)] = SCORPION_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, + [C(RESULT_MISS)] = SCORPION_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * Only ITLB misses and DTLB refills are supported. + * If users want the DTLB refills misses a raw counter + * must be used. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(RESULT_MISS)] = SCORPION_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, + [C(RESULT_MISS)] = SCORPION_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = SCORPION_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = SCORPION_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +struct scorpion_evt { + /* + * The scorpion_evt_type field corresponds to the actual Scorpion + * event codes. These map many-to-one to the armv7 defined codes + */ + u32 scorpion_evt_type; + + /* + * The group_setval field corresponds to the value that the group + * register needs to be set to. This value is deduced from the row + * and column that the event belongs to in the event table + */ + u32 group_setval; + + /* + * The groupcode corresponds to the group that the event belongs to. + * Scorpion has 5 groups of events LPM0, LPM1, LPM2, L2LPM and VLPM + * going from 0 to 4 in terms of the codes used + */ + u8 groupcode; + + /* + * The armv7_evt_type field corresponds to the armv7 defined event + * code that the Scorpion events map to + */ + u32 armv7_evt_type; +}; + +static const struct scorpion_evt scorpion_event[] = { + {SCORPION_ICACHE_EXPL_INV, 0x80000500, 0, 0x4d}, + {SCORPION_ICACHE_MISS, 0x80050000, 0, 0x4e}, + {SCORPION_ICACHE_ACCESS, 0x85000000, 0, 0x4f}, + {SCORPION_ICACHE_CACHEREQ_L2, 0x86000000, 0, 0x4f}, + {SCORPION_ICACHE_NOCACHE_L2, 0x87000000, 0, 0x4f}, + {SCORPION_HIQUP_NOPED, 0x80080000, 0, 0x4e}, + {SCORPION_DATA_ABORT, 0x8000000a, 0, 0x4c}, + {SCORPION_IRQ, 0x80000a00, 0, 0x4d}, + {SCORPION_FIQ, 0x800a0000, 0, 0x4e}, + {SCORPION_ALL_EXCPT, 0x8a000000, 0, 0x4f}, + {SCORPION_UNDEF, 0x8000000b, 0, 0x4c}, + {SCORPION_SVC, 0x80000b00, 0, 0x4d}, + {SCORPION_SMC, 0x800b0000, 0, 0x4e}, + {SCORPION_PREFETCH_ABORT, 0x8b000000, 0, 0x4f}, + {SCORPION_INDEX_CHECK, 0x8000000c, 0, 0x4c}, + {SCORPION_NULL_CHECK, 0x80000c00, 0, 0x4d}, + {SCORPION_ICIMVAU_IMPL_ICIALLU, 0x8000000d, 0, 0x4c}, + {SCORPION_NONICIALLU_BTAC_INV, 0x80000d00, 0, 0x4d}, + {SCORPION_IMPL_ICIALLU, 0x800d0000, 0, 0x4e}, + {SCORPION_EXPL_ICIALLU, 0x8d000000, 0, 0x4f}, + + {SCORPION_SPIPE_ONLY_CYCLES, 0x80000600, 1, 0x51}, + {SCORPION_XPIPE_ONLY_CYCLES, 0x80060000, 1, 0x52}, + {SCORPION_DUAL_CYCLES, 0x86000000, 1, 0x53}, + {SCORPION_DISPATCH_ANY_CYCLES, 0x89000000, 1, 0x53}, + {SCORPION_FIFO_FULLBLK_CMT, 0x8000000d, 1, 0x50}, + {SCORPION_FAIL_COND_INST, 0x800d0000, 1, 0x52}, + {SCORPION_PASS_COND_INST, 0x8d000000, 1, 0x53}, + {SCORPION_ALLOW_VU_CLK, 0x8000000e, 1, 0x50}, + {SCORPION_VU_IDLE, 0x80000e00, 1, 0x51}, + {SCORPION_ALLOW_L2_CLK, 0x800e0000, 1, 0x52}, + {SCORPION_L2_IDLE, 0x8e000000, 1, 0x53}, + + {SCORPION_DTLB_IMPL_INV_SCTLR_DACR, 0x80000001, 2, 0x54}, + {SCORPION_DTLB_EXPL_INV, 0x80000100, 2, 0x55}, + {SCORPION_DTLB_MISS, 0x80010000, 2, 0x56}, + {SCORPION_DTLB_ACCESS, 0x81000000, 2, 0x57}, + {SCORPION_ITLB_MISS, 0x80000200, 2, 0x55}, + {SCORPION_ITLB_IMPL_INV, 0x80020000, 2, 0x56}, + {SCORPION_ITLB_EXPL_INV, 0x82000000, 2, 0x57}, + {SCORPION_UTLB_D_MISS, 0x80000003, 2, 0x54}, + {SCORPION_UTLB_D_ACCESS, 0x80000300, 2, 0x55}, + {SCORPION_UTLB_I_MISS, 0x80030000, 2, 0x56}, + {SCORPION_UTLB_I_ACCESS, 0x83000000, 2, 0x57}, + {SCORPION_UTLB_INV_ASID, 0x80000400, 2, 0x55}, + {SCORPION_UTLB_INV_MVA, 0x80040000, 2, 0x56}, + {SCORPION_UTLB_INV_ALL, 0x84000000, 2, 0x57}, + {SCORPION_S2_HOLD_RDQ_UNAVAIL, 0x80000800, 2, 0x55}, + {SCORPION_S2_HOLD, 0x88000000, 2, 0x57}, + {SCORPION_S2_HOLD_DEV_OP, 0x80000900, 2, 0x55}, + {SCORPION_S2_HOLD_ORDER, 0x80090000, 2, 0x56}, + {SCORPION_S2_HOLD_BARRIER, 0x89000000, 2, 0x57}, + + {SCORPION_VIU_DUAL_CYCLE, 0x80000001, 4, 0x5c}, + {SCORPION_VIU_SINGLE_CYCLE, 0x80000100, 4, 0x5d}, + {SCORPION_VX_PIPE_WAR_STALL_CYCLES, 0x80000005, 4, 0x5c}, + {SCORPION_VX_PIPE_WAW_STALL_CYCLES, 0x80000500, 4, 0x5d}, + {SCORPION_VX_PIPE_RAW_STALL_CYCLES, 0x80050000, 4, 0x5e}, + {SCORPION_VX_PIPE_LOAD_USE_STALL, 0x80000007, 4, 0x5c}, + {SCORPION_VS_PIPE_WAR_STALL_CYCLES, 0x80000008, 4, 0x5c}, + {SCORPION_VS_PIPE_WAW_STALL_CYCLES, 0x80000800, 4, 0x5d}, + {SCORPION_VS_PIPE_RAW_STALL_CYCLES, 0x80080000, 4, 0x5e}, + {SCORPION_EXCEPTIONS_INV_OPERATION, 0x8000000b, 4, 0x5c}, + {SCORPION_EXCEPTIONS_DIV_BY_ZERO, 0x80000b00, 4, 0x5d}, + {SCORPION_COND_INST_FAIL_VX_PIPE, 0x800b0000, 4, 0x5e}, + {SCORPION_COND_INST_FAIL_VS_PIPE, 0x8b000000, 4, 0x5f}, + {SCORPION_EXCEPTIONS_OVERFLOW, 0x8000000c, 4, 0x5c}, + {SCORPION_EXCEPTIONS_UNDERFLOW, 0x80000c00, 4, 0x5d}, + {SCORPION_EXCEPTIONS_DENORM, 0x8c000000, 4, 0x5f}, + +#ifdef CONFIG_MSM_SMP + {SCORPIONMP_NUM_BARRIERS, 0x80000e00, 3, 0x59}, + {SCORPIONMP_BARRIER_CYCLES, 0x800e0000, 3, 0x5a}, +#else + {SCORPION_BANK_AB_HIT, 0x80000001, 3, 0x58}, + {SCORPION_BANK_AB_ACCESS, 0x80000100, 3, 0x59}, + {SCORPION_BANK_CD_HIT, 0x80010000, 3, 0x5a}, + {SCORPION_BANK_CD_ACCESS, 0x81000000, 3, 0x5b}, + {SCORPION_BANK_AB_DSIDE_HIT, 0x80000002, 3, 0x58}, + {SCORPION_BANK_AB_DSIDE_ACCESS, 0x80000200, 3, 0x59}, + {SCORPION_BANK_CD_DSIDE_HIT, 0x80020000, 3, 0x5a}, + {SCORPION_BANK_CD_DSIDE_ACCESS, 0x82000000, 3, 0x5b}, + {SCORPION_BANK_AB_ISIDE_HIT, 0x80000003, 3, 0x58}, + {SCORPION_BANK_AB_ISIDE_ACCESS, 0x80000300, 3, 0x59}, + {SCORPION_BANK_CD_ISIDE_HIT, 0x80030000, 3, 0x5a}, + {SCORPION_BANK_CD_ISIDE_ACCESS, 0x83000000, 3, 0x5b}, + {SCORPION_ISIDE_RD_WAIT, 0x80000009, 3, 0x58}, + {SCORPION_DSIDE_RD_WAIT, 0x80090000, 3, 0x5a}, + {SCORPION_BANK_BYPASS_WRITE, 0x8000000a, 3, 0x58}, + {SCORPION_BANK_AB_NON_CASTOUT, 0x8000000c, 3, 0x58}, + {SCORPION_BANK_AB_L2_CASTOUT, 0x80000c00, 3, 0x59}, + {SCORPION_BANK_CD_NON_CASTOUT, 0x800c0000, 3, 0x5a}, + {SCORPION_BANK_CD_L2_CASTOUT, 0x8c000000, 3, 0x5b}, +#endif +}; + +static unsigned int get_scorpion_evtinfo(unsigned int scorpion_evt_type, + struct scorpion_evt *evtinfo) +{ + u32 idx; + + if (scorpion_evt_type < SCORPION_EVT_START_IDX || scorpion_evt_type >= + (ARRAY_SIZE(scorpion_event) + SCORPION_EVT_START_IDX)) + return -EINVAL; + idx = scorpion_evt_type - SCORPION_EVT_START_IDX; + if (scorpion_event[idx].scorpion_evt_type == scorpion_evt_type) { + evtinfo->group_setval = scorpion_event[idx].group_setval; + evtinfo->groupcode = scorpion_event[idx].groupcode; + evtinfo->armv7_evt_type = scorpion_event[idx].armv7_evt_type; + return scorpion_event[idx].armv7_evt_type; + } + return -EINVAL; +} + +static u32 scorpion_read_lpm0(void) +{ + u32 val; + + asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); + return val; +} + +static void scorpion_write_lpm0(u32 val) +{ + asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); +} + +static u32 scorpion_read_lpm1(void) +{ + u32 val; + + asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); + return val; +} + +static void scorpion_write_lpm1(u32 val) +{ + asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); +} + +static u32 scorpion_read_lpm2(void) +{ + u32 val; + + asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); + return val; +} + +static void scorpion_write_lpm2(u32 val) +{ + asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); +} + +static u32 scorpion_read_l2lpm(void) +{ + u32 val; + + asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); + return val; +} + +static void scorpion_write_l2lpm(u32 val) +{ + asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); +} + +static u32 scorpion_read_vlpm(void) +{ + u32 val; + + asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); + return val; +} + +static void scorpion_write_vlpm(u32 val) +{ + asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); +} + +/* + * The Scorpion processor supports performance monitoring for Venum unit. + * In order to access the performance monitor registers corresponding to + * VFP, CPACR and FPEXC registers need to be set up beforehand. + * Also, they need to be recovered once the access is done. + * This is the reason for having pre and post functions + */ + +static DEFINE_PER_CPU(u32, venum_orig_val); +static DEFINE_PER_CPU(u32, fp_orig_val); + +static void scorpion_pre_vlpm(void) +{ + u32 venum_new_val; + u32 fp_new_val; + + /* CPACR Enable CP10 access*/ + venum_orig_val = get_copro_access(); + venum_new_val = venum_orig_val | CPACC_SVC(10); + set_copro_access(venum_new_val); + /* Enable FPEXC */ + fp_orig_val = fmrx(FPEXC); + fp_new_val = fp_orig_val | FPEXC_EN; + fmxr(FPEXC, fp_new_val); +} + +static void scorpion_post_vlpm(void) +{ + /* Restore FPEXC*/ + fmxr(FPEXC, fp_orig_val); + isb(); + /* Restore CPACR*/ + set_copro_access(venum_orig_val); +} + +struct scorpion_access_funcs { + u32 (*read) (void); + void (*write) (u32); + void (*pre) (void); + void (*post) (void); +}; + +/* + * The scorpion_functions array is used to set up the event register codes + * based on the group to which an event belongs to. + * Having the following array modularizes the code for doing that. + */ +struct scorpion_access_funcs scorpion_functions[] = { + {scorpion_read_lpm0, scorpion_write_lpm0, NULL, NULL}, + {scorpion_read_lpm1, scorpion_write_lpm1, NULL, NULL}, + {scorpion_read_lpm2, scorpion_write_lpm2, NULL, NULL}, + {scorpion_read_l2lpm, scorpion_write_l2lpm, NULL, NULL}, + {scorpion_read_vlpm, scorpion_write_vlpm, scorpion_pre_vlpm, + scorpion_post_vlpm}, +}; + +static inline u32 scorpion_get_columnmask(u32 evt_code) +{ + const u32 columnmasks[] = {0xffffff00, 0xffff00ff, 0xff00ffff, + 0x80ffffff}; + + return columnmasks[evt_code & 0x3]; +} + +static void scorpion_evt_setup(u32 gr, u32 setval, u32 evt_code) +{ + u32 val; + + if (scorpion_functions[gr].pre) + scorpion_functions[gr].pre(); + val = scorpion_get_columnmask(evt_code) & scorpion_functions[gr].read(); + val = val | setval; + scorpion_functions[gr].write(val); + if (scorpion_functions[gr].post) + scorpion_functions[gr].post(); +} + +static void scorpion_clear_pmuregs(void) +{ + unsigned long flags; + + scorpion_write_lpm0(0); + scorpion_write_lpm1(0); + scorpion_write_lpm2(0); + scorpion_write_l2lpm(0); + raw_spin_lock_irqsave(&pmu_lock, flags); + scorpion_pre_vlpm(); + scorpion_write_vlpm(0); + scorpion_post_vlpm(); + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void scorpion_clearpmu(u32 grp, u32 val, u32 evt_code) +{ + u32 orig_pmuval, new_pmuval; + + if (scorpion_functions[grp].pre) + scorpion_functions[grp].pre(); + orig_pmuval = scorpion_functions[grp].read(); + val = val & ~scorpion_get_columnmask(evt_code); + new_pmuval = orig_pmuval & ~val; + scorpion_functions[grp].write(new_pmuval); + if (scorpion_functions[grp].post) + scorpion_functions[grp].post(); +} + +static void scorpion_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + u32 val = 0; + u32 gr; + unsigned long event; + struct scorpion_evt evtinfo; + + /* Disable counter and interrupt */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear lpm code (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) { + val = hwc->config_base; + val &= ARMV7_EVTSEL_MASK; + if (val > 0x40) { + event = get_scorpion_evtinfo(val, &evtinfo); + if (event == -EINVAL) + goto scorpion_dis_out; + val = evtinfo.group_setval; + gr = evtinfo.groupcode; + scorpion_clearpmu(gr, val, evtinfo.armv7_evt_type); + } + } + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); + +scorpion_dis_out: + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void scorpion_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + u32 val = 0; + u32 gr; + unsigned long event; + struct scorpion_evt evtinfo; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) { + val = hwc->config_base; + val &= ARMV7_EVTSEL_MASK; + if (val < 0x40) { + armv7_pmnc_write_evtsel(idx, hwc->config_base); + } else { + event = get_scorpion_evtinfo(val, &evtinfo); + + if (event == -EINVAL) + goto scorpion_out; + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + armv7_pmnc_write_evtsel(idx, event); + val = 0x0; + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : + "r" (val)); + val = evtinfo.group_setval; + gr = evtinfo.groupcode; + scorpion_evt_setup(gr, val, evtinfo.armv7_evt_type); + } + } + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); + +scorpion_out: + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +#ifdef CONFIG_SMP +static void scorpion_secondary_enable_callback(void *info) +{ + int irq = *(unsigned int *)info; + + if (get_irq_chip(irq)->irq_unmask) + get_irq_chip(irq)->irq_unmask(irq_get_irq_data(irq)); +} +static void scorpion_secondary_disable_callback(void *info) +{ + int irq = *(unsigned int *)info; + + if (get_irq_chip(irq)->irq_mask) + get_irq_chip(irq)->irq_mask(irq_get_irq_data(irq)); +} + +static void scorpion_secondary_enable(unsigned int irq) +{ + smp_call_function(scorpion_secondary_enable_callback, &irq, 1); +} + +static void scorpion_secondary_disable(unsigned int irq) +{ + smp_call_function(scorpion_secondary_disable_callback, &irq, 1); +} +#endif + +static struct arm_pmu scorpion_pmu = { + .handle_irq = armv7pmu_handle_irq, +#ifdef CONFIG_SMP + .secondary_enable = scorpion_secondary_enable, + .secondary_disable = scorpion_secondary_disable, +#endif + .enable = scorpion_pmu_enable_event, + .disable = scorpion_pmu_disable_event, + .read_counter = armv7pmu_read_counter, + .write_counter = armv7pmu_write_counter, + .raw_event_mask = 0xFF, + .get_event_idx = armv7pmu_get_event_idx, + .start = armv7pmu_start, + .stop = armv7pmu_stop, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init armv7_scorpion_pmu_init(void) +{ + scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPION; + scorpion_pmu.name = "ARMv7 Scorpion"; + scorpion_pmu.cache_map = &armv7_scorpion_perf_cache_map; + scorpion_pmu.event_map = &armv7_scorpion_perf_map; + scorpion_pmu.num_events = armv7_reset_read_pmnc(); + scorpion_clear_pmuregs(); + return &scorpion_pmu; +} + +static const struct arm_pmu *__init armv7_scorpionmp_pmu_init(void) +{ + scorpion_pmu.id = ARM_PERF_PMU_ID_SCORPIONMP; + scorpion_pmu.name = "ARMv7 Scorpion-MP"; + scorpion_pmu.cache_map = &armv7_scorpion_perf_cache_map; + scorpion_pmu.event_map = &armv7_scorpion_perf_map; + scorpion_pmu.num_events = armv7_reset_read_pmnc(); + scorpion_clear_pmuregs(); + return &scorpion_pmu; +} +#else +static const struct arm_pmu *__init armv7_scorpion_pmu_init(void) +{ + return NULL; +} +static const struct arm_pmu *__init armv7_scorpionmp_pmu_init(void) +{ + return NULL; +} +#endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c new file mode 100644 index 0000000000000..8c0f09c1ab241 --- /dev/null +++ b/arch/arm/kernel/perf_event_msm_krait.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#ifdef CONFIG_CPU_V7 +#define KRAIT_EVT_PREFIX 1 +#define KRAIT_MAX_L1_REG 2 +/* + event encoding: prccg + p = prefix (1 for Krait L1) + r = register + cc = code + g = group +*/ +#define KRAIT_L1_ICACHE_MISS 0x10010 +#define KRAIT_L1_ICACHE_ACCESS 0x10011 +#define KRAIT_DTLB_ACCESS 0x121B2 +#define KRAIT_ITLB_ACCESS 0x121C0 + +u32 evt_type_base[] = {0x4c, 0x50, 0x54}; + +static const unsigned armv7_krait_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = KRAIT_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = KRAIT_L1_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = KRAIT_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = KRAIT_L1_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = KRAIT_DTLB_ACCESS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = KRAIT_DTLB_ACCESS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = KRAIT_ITLB_ACCESS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = KRAIT_ITLB_ACCESS, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_USED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +struct krait_evt { + /* + * The group_setval field corresponds to the value that the group + * register needs to be set to. This value is calculated from the row + * and column that the event belongs to in the event table + */ + u32 group_setval; + + /* + * The groupcode corresponds to the group that the event belongs to. + * Krait has 3 groups of events PMRESR0, 1, 2 + * going from 0 to 2 in terms of the codes used + */ + u8 groupcode; + + /* + * The armv7_evt_type field corresponds to the armv7 defined event + * code that the Krait events map to + */ + u32 armv7_evt_type; +}; + +static unsigned int get_krait_evtinfo(unsigned int krait_evt_type, + struct krait_evt *evtinfo) +{ + u8 prefix; + u8 reg; + u8 code; + u8 group; + + prefix = (krait_evt_type & 0xF0000) >> 16; + reg = (krait_evt_type & 0x0F000) >> 12; + code = (krait_evt_type & 0x00FF0) >> 4; + group = krait_evt_type & 0x0000F; + + if ((prefix != KRAIT_EVT_PREFIX) || (group > 3) || + (reg > KRAIT_MAX_L1_REG)) + return -EINVAL; + + evtinfo->group_setval = 0x80000000 | (code << (group * 8)); + evtinfo->groupcode = reg; + evtinfo->armv7_evt_type = evt_type_base[reg] | group; + + return evtinfo->armv7_evt_type; +} + +static u32 krait_read_pmresr0(void) +{ + u32 val; + + asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val)); + return val; +} + +static void krait_write_pmresr0(u32 val) +{ + asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val)); +} + +static u32 krait_read_pmresr1(void) +{ + u32 val; + + asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val)); + return val; +} + +static void krait_write_pmresr1(u32 val) +{ + asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val)); +} + +static u32 krait_read_pmresr2(void) +{ + u32 val; + + asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val)); + return val; +} + +static void krait_write_pmresr2(u32 val) +{ + asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val)); +} + +struct krait_access_funcs { + u32 (*read) (void); + void (*write) (u32); +}; + +/* + * The krait_functions array is used to set up the event register codes + * based on the group to which an event belongs. + * Having the following array modularizes the code for doing that. + */ +struct krait_access_funcs krait_functions[] = { + {krait_read_pmresr0, krait_write_pmresr0}, + {krait_read_pmresr1, krait_write_pmresr1}, + {krait_read_pmresr2, krait_write_pmresr2}, +}; + +static inline u32 krait_get_columnmask(u32 evt_code) +{ + const u32 columnmasks[] = {0xffffff00, 0xffff00ff, 0xff00ffff, + 0x80ffffff}; + + return columnmasks[evt_code & 0x3]; +} + +static void krait_evt_setup(u32 gr, u32 setval, u32 evt_code) +{ + u32 val; + + val = krait_get_columnmask(evt_code) & krait_functions[gr].read(); + val = val | setval; + krait_functions[gr].write(val); +} + +static void krait_clear_pmuregs(void) +{ + krait_write_pmresr0(0); + krait_write_pmresr1(0); + krait_write_pmresr2(0); +} + +static void krait_clearpmu(u32 grp, u32 val, u32 evt_code) +{ + u32 new_pmuval; + + new_pmuval = krait_functions[grp].read() & + krait_get_columnmask(evt_code); + krait_functions[grp].write(new_pmuval); +} + +static void krait_pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + u32 val = 0; + u32 gr; + unsigned long event; + struct krait_evt evtinfo; + + /* Disable counter and interrupt */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Clear pmresr code (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) { + val = hwc->config_base; + if (val > 0x40) { + event = get_krait_evtinfo(val, &evtinfo); + if (event == -EINVAL) + goto krait_dis_out; + val = evtinfo.group_setval; + gr = evtinfo.groupcode; + krait_clearpmu(gr, val, evtinfo.armv7_evt_type); + } + } + /* Disable interrupt for this counter */ + armv7_pmnc_disable_intens(idx); + +krait_dis_out: + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void krait_pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + u32 val = 0; + u32 gr; + unsigned long event; + struct krait_evt evtinfo; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&pmu_lock, flags); + + /* Disable counter */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) { + val = hwc->config_base; + if (val < 0x40) { + armv7_pmnc_write_evtsel(idx, hwc->config_base); + } else { + event = get_krait_evtinfo(val, &evtinfo); + + if (event == -EINVAL) + goto krait_out; + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + armv7_pmnc_write_evtsel(idx, event); + val = 0x0; + asm volatile("mcr p15, 0, %0, c9, c15, 0" : : + "r" (val)); + val = evtinfo.group_setval; + gr = evtinfo.groupcode; + krait_evt_setup(gr, val, evtinfo.armv7_evt_type); + } + } + + /* Enable interrupt for this counter */ + armv7_pmnc_enable_intens(idx); + + /* Enable counter */ + armv7_pmnc_enable_counter(idx); + +krait_out: + raw_spin_unlock_irqrestore(&pmu_lock, flags); +} + +static struct arm_pmu krait_pmu = { + .handle_irq = armv7pmu_handle_irq, +#ifdef CONFIG_ARCH_MSM_SMP + .secondary_enable = scorpion_secondary_enable, + .secondary_disable = scorpion_secondary_disable, +#endif + .enable = krait_pmu_enable_event, + .disable = krait_pmu_disable_event, + .read_counter = armv7pmu_read_counter, + .write_counter = armv7pmu_write_counter, + .raw_event_mask = 0xFFFFF, + .get_event_idx = armv7pmu_get_event_idx, + .start = armv7pmu_start, + .stop = armv7pmu_stop, + .max_period = (1LLU << 32) - 1, +}; + +static const struct arm_pmu *__init armv7_krait_pmu_init(void) +{ + krait_pmu.id = ARM_PERF_PMU_ID_KRAIT; + krait_pmu.name = "ARMv7 Krait"; + krait_pmu.cache_map = &armv7_krait_perf_cache_map; + krait_pmu.event_map = &armv7_krait_perf_map; + krait_pmu.num_events = armv7_reset_read_pmnc(); + krait_clear_pmuregs(); + return &krait_pmu; +} + +#else +static const struct arm_pmu *__init armv7_krait_pmu_init(void) +{ + return NULL; +} +#endif /* CONFIG_CPU_V7 */ diff --git a/arch/arm/kernel/perf_event_msm_krait_l2.c b/arch/arm/kernel/perf_event_msm_krait_l2.c new file mode 100644 index 0000000000000..97b81b6d14f48 --- /dev/null +++ b/arch/arm/kernel/perf_event_msm_krait_l2.c @@ -0,0 +1,649 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifdef CONFIG_CPU_HAS_L2_PMU + +#include + +#include + +#define MAX_L2_PERIOD ((1ULL << 32) - 1) +#define MAX_KRAIT_L2_CTRS 5 + +#define L2PMCCNTR 0x409 +#define L2PMCCNTCR 0x408 +#define L2PMCCNTSR 0x40A +#define L2CYCLE_CTR_BIT 31 +#define L2CYCLE_CTR_EVENT_IDX 4 +#define L2CYCLE_CTR_RAW_CODE 0xfe + +#define L2PMOVSR 0x406 + +#define L2PMCR 0x400 +#define L2PMCR_RESET_ALL 0x6 +#define L2PMCR_GLOBAL_ENABLE 0x1 +#define L2PMCR_GLOBAL_DISABLE 0x0 + +#define L2PMCNTENSET 0x403 +#define L2PMCNTENCLR 0x402 + +#define L2PMINTENSET 0x405 +#define L2PMINTENCLR 0x404 + +#define IA_L2PMXEVCNTCR_BASE 0x420 +#define IA_L2PMXEVTYPER_BASE 0x424 +#define IA_L2PMRESX_BASE 0x410 +#define IA_L2PMXEVFILTER_BASE 0x423 +#define IA_L2PMXEVCNTR_BASE 0x421 + +/* event format is -e rsRCCG See get_event_desc() */ + +#define EVENT_REG_MASK 0xf000 +#define EVENT_GROUPSEL_MASK 0x000f +#define EVENT_GROUPCODE_MASK 0x0ff0 +#define EVENT_REG_SHIFT 12 +#define EVENT_GROUPCODE_SHIFT 4 + +#define RESRX_VALUE_EN 0x80000000 + +static struct platform_device *l2_pmu_device; + +struct hw_krait_l2_pmu { + struct perf_event *events[MAX_KRAIT_L2_CTRS]; + unsigned long active_mask[BITS_TO_LONGS(MAX_KRAIT_L2_CTRS)]; + raw_spinlock_t lock; +}; + +struct hw_krait_l2_pmu hw_krait_l2_pmu; + +struct event_desc { + int event_groupsel; + int event_reg; + int event_group_code; +}; + +void get_event_desc(u64 config, struct event_desc *evdesc) +{ + /* L2PMEVCNTRX */ + evdesc->event_reg = (config & EVENT_REG_MASK) >> EVENT_REG_SHIFT; + /* Group code (row ) */ + evdesc->event_group_code = + (config & EVENT_GROUPCODE_MASK) >> EVENT_GROUPCODE_SHIFT; + /* Group sel (col) */ + evdesc->event_groupsel = (config & EVENT_GROUPSEL_MASK); + + pr_debug("%s: reg: %x, group_code: %x, groupsel: %x\n", __func__, + evdesc->event_reg, evdesc->event_group_code, + evdesc->event_groupsel); +} + +static void set_evcntcr(int ctr) +{ + u32 evtcr_reg = (ctr * 16) + IA_L2PMXEVCNTCR_BASE; + + set_l2_indirect_reg(evtcr_reg, 0x0); +} + +static void set_evtyper(int event_groupsel, int event_reg, int ctr) +{ + u32 evtype_reg = (ctr * 16) + IA_L2PMXEVTYPER_BASE; + u32 evtype_val = event_groupsel + (4 * event_reg); + + set_l2_indirect_reg(evtype_reg, evtype_val); +} + +static void set_evres(int event_groupsel, int event_reg, int event_group_code) +{ + u32 group_reg = event_reg + IA_L2PMRESX_BASE; + u32 group_val = + RESRX_VALUE_EN | (event_group_code << (8 * event_groupsel)); + u32 resr_val; + u32 group_byte = 0xff; + u32 group_mask = ~(group_byte << (8 * event_groupsel)); + + resr_val = get_l2_indirect_reg(group_reg); + resr_val &= group_mask; + resr_val |= group_val; + + set_l2_indirect_reg(group_reg, resr_val); +} + +static void set_evfilter(int ctr) +{ + u32 filter_reg = (ctr * 16) + IA_L2PMXEVFILTER_BASE; + u32 filter_val = 0x000f0030 | 1 << smp_processor_id(); + + set_l2_indirect_reg(filter_reg, filter_val); +} + +static void enable_intenset(u32 idx) +{ + if (idx == L2CYCLE_CTR_EVENT_IDX) + set_l2_indirect_reg(L2PMINTENSET, 1 << L2CYCLE_CTR_BIT); + else + set_l2_indirect_reg(L2PMINTENSET, 1 << idx); +} + +static void disable_intenclr(u32 idx) +{ + if (idx == L2CYCLE_CTR_EVENT_IDX) + set_l2_indirect_reg(L2PMINTENCLR, 1 << L2CYCLE_CTR_BIT); + else + set_l2_indirect_reg(L2PMINTENCLR, 1 << idx); +} + +static void enable_counter(u32 idx) +{ + if (idx == L2CYCLE_CTR_EVENT_IDX) + set_l2_indirect_reg(L2PMCNTENSET, 1 << L2CYCLE_CTR_BIT); + else + set_l2_indirect_reg(L2PMCNTENSET, 1 << idx); +} + +static void disable_counter(u32 idx) +{ + if (idx == L2CYCLE_CTR_EVENT_IDX) + set_l2_indirect_reg(L2PMCNTENCLR, 1 << L2CYCLE_CTR_BIT); + else + set_l2_indirect_reg(L2PMCNTENCLR, 1 << idx); +} + +static u64 read_counter(u32 idx) +{ + u32 val; + u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE; + + if (idx == L2CYCLE_CTR_EVENT_IDX) + val = get_l2_indirect_reg(L2PMCCNTR); + else + val = get_l2_indirect_reg(counter_reg); + + return val; +} + +static void write_counter(u32 idx, u32 val) +{ + u32 counter_reg = (idx * 16) + IA_L2PMXEVCNTR_BASE; + + if (idx == L2CYCLE_CTR_EVENT_IDX) + set_l2_indirect_reg(L2PMCCNTR, val); + else + set_l2_indirect_reg(counter_reg, val); +} + +static int +pmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64) MAX_L2_PERIOD) + left = MAX_L2_PERIOD; + + local64_set(&hwc->prev_count, (u64)-left); + + write_counter(idx, (u64) (-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static u64 +pmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx, + int overflow) +{ + u64 prev_raw_count, new_raw_count; + u64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + new_raw_count &= MAX_L2_PERIOD; + prev_raw_count &= MAX_L2_PERIOD; + + if (overflow) + delta = MAX_L2_PERIOD - prev_raw_count + new_raw_count; + else + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + pr_debug("%s: new: %lld, prev: %lld, event: %ld count: %lld\n", + __func__, new_raw_count, prev_raw_count, + hwc->config_base, local64_read(&event->count)); + + return new_raw_count; +} + +static void krait_l2_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + pmu_event_update(event, hwc, hwc->idx, 0); +} + +static void krait_l2_stop_counter(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(hwc->state & PERF_HES_STOPPED)) { + disable_intenclr(idx); + disable_counter(idx); + + pmu_event_update(event, hwc, idx, 0); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } + + pr_debug("%s: event: %ld ctr: %d stopped\n", __func__, hwc->config_base, + idx); +} + +static void krait_l2_start_counter(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct event_desc evdesc; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + pmu_event_set_period(event, hwc, idx); + + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) + goto out; + + set_evcntcr(idx); + + memset(&evdesc, 0, sizeof(evdesc)); + + get_event_desc(hwc->config_base, &evdesc); + + set_evtyper(evdesc.event_groupsel, evdesc.event_reg, idx); + + set_evres(evdesc.event_groupsel, evdesc.event_reg, + evdesc.event_group_code); + + set_evfilter(idx); + +out: + enable_intenset(idx); + enable_counter(idx); + + pr_debug + ("%s: ctr: %d group: %ld group_code: %lld started from cpu:%d\n", + __func__, idx, hwc->config_base, hwc->config, smp_processor_id()); +} + +static void krait_l2_del_event(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + unsigned long iflags; + + raw_spin_lock_irqsave(&hw_krait_l2_pmu.lock, iflags); + + clear_bit(idx, (long unsigned int *)(&hw_krait_l2_pmu.active_mask)); + + krait_l2_stop_counter(event, PERF_EF_UPDATE); + hw_krait_l2_pmu.events[idx] = NULL; + hwc->idx = -1; + + raw_spin_unlock_irqrestore(&hw_krait_l2_pmu.lock, iflags); + + pr_debug("%s: event: %ld deleted\n", __func__, hwc->config_base); + + perf_event_update_userpage(event); +} + +static int krait_l2_add_event(struct perf_event *event, int flags) +{ + int ctr = 0; + struct hw_perf_event *hwc = &event->hw; + unsigned long iflags; + int err = 0; + + perf_pmu_disable(event->pmu); + + raw_spin_lock_irqsave(&hw_krait_l2_pmu.lock, iflags); + + /* Cycle counter has a resrvd index */ + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { + if (hw_krait_l2_pmu.events[L2CYCLE_CTR_EVENT_IDX]) { + pr_err("%s: Stale cycle ctr event ptr !\n", __func__); + err = -EINVAL; + goto out; + } + hwc->idx = L2CYCLE_CTR_EVENT_IDX; + hw_krait_l2_pmu.events[L2CYCLE_CTR_EVENT_IDX] = event; + set_bit(L2CYCLE_CTR_EVENT_IDX, + (long unsigned int *)&hw_krait_l2_pmu.active_mask); + goto skip_ctr_loop; + } + + for (ctr = 0; ctr < MAX_KRAIT_L2_CTRS - 1; ctr++) { + if (!hw_krait_l2_pmu.events[ctr]) { + hwc->idx = ctr; + hw_krait_l2_pmu.events[ctr] = event; + set_bit(ctr, + (long unsigned int *) + &hw_krait_l2_pmu.active_mask); + break; + } + } + + if (hwc->idx < 0) { + err = -ENOSPC; + pr_err("%s: No space for event: %llx!!\n", __func__, + event->attr.config); + goto out; + } + +skip_ctr_loop: + + disable_counter(hwc->idx); + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (flags & PERF_EF_START) + krait_l2_start_counter(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + pr_debug("%s: event: %ld, ctr: %d added from cpu:%d\n", + __func__, hwc->config_base, hwc->idx, smp_processor_id()); +out: + raw_spin_unlock_irqrestore(&hw_krait_l2_pmu.lock, iflags); + + /* Resume the PMU even if this event could not be added */ + perf_pmu_enable(event->pmu); + + return err; +} + +static void krait_l2_pmu_enable(struct pmu *pmu) +{ + isb(); + set_l2_indirect_reg(L2PMCR, L2PMCR_GLOBAL_ENABLE); +} + +static void krait_l2_pmu_disable(struct pmu *pmu) +{ + set_l2_indirect_reg(L2PMCR, L2PMCR_GLOBAL_DISABLE); + isb(); +} + +u32 get_reset_pmovsr(void) +{ + int val; + + val = get_l2_indirect_reg(L2PMOVSR); + /* reset it */ + val &= 0xffffffff; + set_l2_indirect_reg(L2PMOVSR, val); + + return val; +} + +static irqreturn_t krait_l2_handle_irq(int irq_num, void *dev) +{ + unsigned long pmovsr; + struct perf_sample_data data; + struct pt_regs *regs; + struct perf_event *event; + struct hw_perf_event *hwc; + int bitp; + int idx = 0; + + pmovsr = get_reset_pmovsr(); + + if (!(pmovsr & 0xffffffff)) + return IRQ_NONE; + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + raw_spin_lock(&hw_krait_l2_pmu.lock); + + while (pmovsr) { + bitp = __ffs(pmovsr); + + if (bitp == L2CYCLE_CTR_BIT) + idx = L2CYCLE_CTR_EVENT_IDX; + else + idx = bitp; + + event = hw_krait_l2_pmu.events[idx]; + + if (!event) + goto next; + + if (!test_bit(idx, hw_krait_l2_pmu.active_mask)) + goto next; + + hwc = &event->hw; + pmu_event_update(event, hwc, idx, 1); + data.period = event->hw.last_period; + + if (!pmu_event_set_period(event, hwc, idx)) + goto next; + + if (perf_event_overflow(event, 0, &data, regs)) + disable_counter(hwc->idx); +next: + pmovsr &= (pmovsr - 1); + } + + raw_spin_unlock(&hw_krait_l2_pmu.lock); + + irq_work_run(); + + return IRQ_HANDLED; +} + +static atomic_t active_l2_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(krait_pmu_reserve_mutex); + +static int pmu_reserve_hardware(void) +{ + int i, err = -ENODEV, irq; + + l2_pmu_device = reserve_pmu(ARM_PMU_DEVICE_L2); + + if (IS_ERR(l2_pmu_device)) { + pr_warning("unable to reserve pmu\n"); + return PTR_ERR(l2_pmu_device); + } + + if (l2_pmu_device->num_resources < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + if (strncmp(l2_pmu_device->name, "l2-arm-pmu", 6)) { + pr_err("Incorrect pdev reserved !\n"); + return -EINVAL; + } + + for (i = 0; i < l2_pmu_device->num_resources; ++i) { + irq = platform_get_irq(l2_pmu_device, i); + if (irq < 0) + continue; + + err = request_irq(irq, krait_l2_handle_irq, + IRQF_DISABLED | IRQF_NOBALANCING, + "krait-l2-pmu", NULL); + if (err) { + pr_warning("unable to request IRQ%d for Krait L2 perf " + "counters\n", irq); + break; + } + + get_irq_chip(irq)->irq_unmask(irq_get_irq_data(irq)); + } + + if (err) { + for (i = i - 1; i >= 0; --i) { + irq = platform_get_irq(l2_pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + release_pmu(l2_pmu_device); + l2_pmu_device = NULL; + } + + return err; +} + +static void pmu_release_hardware(void) +{ + int i, irq; + + for (i = l2_pmu_device->num_resources - 1; i >= 0; --i) { + irq = platform_get_irq(l2_pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + + krait_l2_pmu_disable(NULL); + + release_pmu(l2_pmu_device); + l2_pmu_device = NULL; +} + +static void pmu_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock + (&active_l2_events, &krait_pmu_reserve_mutex)) { + pmu_release_hardware(); + mutex_unlock(&krait_pmu_reserve_mutex); + } +} + +static int krait_l2_event_init(struct perf_event *event) +{ + int err = 0; + struct hw_perf_event *hwc = &event->hw; + int status = 0; + + switch (event->attr.type) { + case PERF_TYPE_SHARED: + break; + + default: + return -ENOENT; + } + + hwc->idx = -1; + + event->destroy = pmu_perf_event_destroy; + + if (!atomic_inc_not_zero(&active_l2_events)) { + /* 0 active events */ + mutex_lock(&krait_pmu_reserve_mutex); + err = pmu_reserve_hardware(); + mutex_unlock(&krait_pmu_reserve_mutex); + if (!err) + atomic_inc(&active_l2_events); + else + return err; + } + + hwc->config_base = event->attr.config; + hwc->config = 0; + hwc->event_base = 0; + + /* Only one CPU can control the cycle counter */ + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { + /* Check if its already running */ + status = get_l2_indirect_reg(L2PMCCNTSR); + if (status == 0x2) { + err = -ENOSPC; + goto out; + } + } + + if (!hwc->sample_period) { + hwc->sample_period = MAX_L2_PERIOD; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + pr_debug("%s: event: %lld init'd\n", __func__, event->attr.config); + +out: + if (err < 0) + pmu_perf_event_destroy(event); + + return err; +} + +static struct pmu krait_l2_pmu = { + .pmu_enable = krait_l2_pmu_enable, + .pmu_disable = krait_l2_pmu_disable, + .event_init = krait_l2_event_init, + .add = krait_l2_add_event, + .del = krait_l2_del_event, + .start = krait_l2_start_counter, + .stop = krait_l2_stop_counter, + .read = krait_l2_read, +}; + +static const struct arm_pmu *__init krait_l2_pmu_init(void) +{ + /* Register our own PMU here */ + perf_pmu_register(&krait_l2_pmu, "Krait L2", PERF_TYPE_SHARED); + + memset(&hw_krait_l2_pmu, 0, sizeof(hw_krait_l2_pmu)); + + /* Reset all ctrs */ + set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); + + /* Avoid spurious interrupt if any */ + get_reset_pmovsr(); + + /* Don't return an arm_pmu here */ + return NULL; +} +#else + +static const struct arm_pmu *__init krait_l2_pmu_init(void) +{ + return NULL; +} +#endif diff --git a/arch/arm/kernel/perf_event_msm_l2.c b/arch/arm/kernel/perf_event_msm_l2.c new file mode 100644 index 0000000000000..46733ab1d73b4 --- /dev/null +++ b/arch/arm/kernel/perf_event_msm_l2.c @@ -0,0 +1,974 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifdef CONFIG_CPU_HAS_L2_PMU + +#include + +#define MAX_BB_L2_PERIOD ((1ULL << 32) - 1) +#define MAX_BB_L2_CTRS 5 +#define BB_L2CYCLE_CTR_BIT 31 +#define BB_L2CYCLE_CTR_EVENT_IDX 4 +#define BB_L2CYCLE_CTR_RAW_CODE 0xfe +#define SCORPIONL2_PMNC_E (1 << 0) /* Enable all counters */ + +/* + * Lock to protect r/m/w sequences to the L2 PMU. + */ +DEFINE_RAW_SPINLOCK(bb_l2_pmu_lock); + +static struct platform_device *bb_l2_pmu_device; + +struct hw_bb_l2_pmu { + struct perf_event *events[MAX_BB_L2_CTRS]; + unsigned long active_mask[BITS_TO_LONGS(MAX_BB_L2_CTRS)]; + raw_spinlock_t lock; +}; + +struct hw_bb_l2_pmu hw_bb_l2_pmu; + +struct bb_l2_scorp_evt { + u32 evt_type; + u32 val; + u8 grp; + u32 evt_type_act; +}; + +enum scorpion_perf_types { + SCORPIONL2_TOTAL_BANK_REQ = 0x90, + SCORPIONL2_DSIDE_READ = 0x91, + SCORPIONL2_DSIDE_WRITE = 0x92, + SCORPIONL2_ISIDE_READ = 0x93, + SCORPIONL2_L2CACHE_ISIDE_READ = 0x94, + SCORPIONL2_L2CACHE_BANK_REQ = 0x95, + SCORPIONL2_L2CACHE_DSIDE_READ = 0x96, + SCORPIONL2_L2CACHE_DSIDE_WRITE = 0x97, + SCORPIONL2_L2NOCACHE_DSIDE_WRITE = 0x98, + SCORPIONL2_L2NOCACHE_ISIDE_READ = 0x99, + SCORPIONL2_L2NOCACHE_TOTAL_REQ = 0x9a, + SCORPIONL2_L2NOCACHE_DSIDE_READ = 0x9b, + SCORPIONL2_DSIDE_READ_NOL1 = 0x9c, + SCORPIONL2_L2CACHE_WRITETHROUGH = 0x9d, + SCORPIONL2_BARRIERS = 0x9e, + SCORPIONL2_HARDWARE_TABLE_WALKS = 0x9f, + SCORPIONL2_MVA_POC = 0xa0, + SCORPIONL2_L2CACHE_HW_TABLE_WALKS = 0xa1, + SCORPIONL2_SETWAY_CACHE_OPS = 0xa2, + SCORPIONL2_DSIDE_WRITE_HITS = 0xa3, + SCORPIONL2_ISIDE_READ_HITS = 0xa4, + SCORPIONL2_CACHE_DSIDE_READ_NOL1 = 0xa5, + SCORPIONL2_TOTAL_CACHE_HITS = 0xa6, + SCORPIONL2_CACHE_MATCH_MISS = 0xa7, + SCORPIONL2_DREAD_HIT_L1_DATA = 0xa8, + SCORPIONL2_L2LINE_LOCKED = 0xa9, + SCORPIONL2_HW_TABLE_WALK_HIT = 0xaa, + SCORPIONL2_CACHE_MVA_POC = 0xab, + SCORPIONL2_L2ALLOC_DWRITE_MISS = 0xac, + SCORPIONL2_CORRECTED_TAG_ARRAY = 0xad, + SCORPIONL2_CORRECTED_DATA_ARRAY = 0xae, + SCORPIONL2_CORRECTED_REPLACEMENT_ARRAY = 0xaf, + SCORPIONL2_PMBUS_MPAAF = 0xb0, + SCORPIONL2_PMBUS_MPWDAF = 0xb1, + SCORPIONL2_PMBUS_MPBRT = 0xb2, + SCORPIONL2_CPU0_GRANT = 0xb3, + SCORPIONL2_CPU1_GRANT = 0xb4, + SCORPIONL2_CPU0_NOGRANT = 0xb5, + SCORPIONL2_CPU1_NOGRANT = 0xb6, + SCORPIONL2_CPU0_LOSING_ARB = 0xb7, + SCORPIONL2_CPU1_LOSING_ARB = 0xb8, + SCORPIONL2_SLAVEPORT_NOGRANT = 0xb9, + SCORPIONL2_SLAVEPORT_BPQ_FULL = 0xba, + SCORPIONL2_SLAVEPORT_LOSING_ARB = 0xbb, + SCORPIONL2_SLAVEPORT_GRANT = 0xbc, + SCORPIONL2_SLAVEPORT_GRANTLOCK = 0xbd, + SCORPIONL2_L2EM_STREX_PASS = 0xbe, + SCORPIONL2_L2EM_STREX_FAIL = 0xbf, + SCORPIONL2_LDREX_RESERVE_L2EM = 0xc0, + SCORPIONL2_SLAVEPORT_LDREX = 0xc1, + SCORPIONL2_CPU0_L2EM_CLEARED = 0xc2, + SCORPIONL2_CPU1_L2EM_CLEARED = 0xc3, + SCORPIONL2_SLAVEPORT_L2EM_CLEARED = 0xc4, + SCORPIONL2_CPU0_CLAMPED = 0xc5, + SCORPIONL2_CPU1_CLAMPED = 0xc6, + SCORPIONL2_CPU0_WAIT = 0xc7, + SCORPIONL2_CPU1_WAIT = 0xc8, + SCORPIONL2_CPU0_NONAMBAS_WAIT = 0xc9, + SCORPIONL2_CPU1_NONAMBAS_WAIT = 0xca, + SCORPIONL2_CPU0_DSB_WAIT = 0xcb, + SCORPIONL2_CPU1_DSB_WAIT = 0xcc, + SCORPIONL2_AXI_READ = 0xcd, + SCORPIONL2_AXI_WRITE = 0xce, + + SCORPIONL2_1BEAT_WRITE = 0xcf, + SCORPIONL2_2BEAT_WRITE = 0xd0, + SCORPIONL2_4BEAT_WRITE = 0xd1, + SCORPIONL2_8BEAT_WRITE = 0xd2, + SCORPIONL2_12BEAT_WRITE = 0xd3, + SCORPIONL2_16BEAT_WRITE = 0xd4, + SCORPIONL2_1BEAT_DSIDE_READ = 0xd5, + SCORPIONL2_2BEAT_DSIDE_READ = 0xd6, + SCORPIONL2_4BEAT_DSIDE_READ = 0xd7, + SCORPIONL2_8BEAT_DSIDE_READ = 0xd8, + SCORPIONL2_CSYS_READ_1BEAT = 0xd9, + SCORPIONL2_CSYS_READ_2BEAT = 0xda, + SCORPIONL2_CSYS_READ_4BEAT = 0xdb, + SCORPIONL2_CSYS_READ_8BEAT = 0xdc, + SCORPIONL2_4BEAT_IFETCH_READ = 0xdd, + SCORPIONL2_8BEAT_IFETCH_READ = 0xde, + SCORPIONL2_CSYS_WRITE_1BEAT = 0xdf, + SCORPIONL2_CSYS_WRITE_2BEAT = 0xe0, + SCORPIONL2_AXI_READ_DATA_BEAT = 0xe1, + SCORPIONL2_AXI_WRITE_EVT1 = 0xe2, + SCORPIONL2_AXI_WRITE_EVT2 = 0xe3, + SCORPIONL2_LDREX_REQ = 0xe4, + SCORPIONL2_STREX_PASS = 0xe5, + SCORPIONL2_STREX_FAIL = 0xe6, + SCORPIONL2_CPREAD = 0xe7, + SCORPIONL2_CPWRITE = 0xe8, + SCORPIONL2_BARRIER_REQ = 0xe9, + SCORPIONL2_AXI_READ_SLVPORT = 0xea, + SCORPIONL2_AXI_WRITE_SLVPORT = 0xeb, + SCORPIONL2_AXI_READ_SLVPORT_DATABEAT = 0xec, + SCORPIONL2_AXI_WRITE_SLVPORT_DATABEAT = 0xed, + SCORPIONL2_SNOOPKILL_PREFILTER = 0xee, + SCORPIONL2_SNOOPKILL_FILTEROUT = 0xef, + SCORPIONL2_SNOOPED_IC = 0xf0, + SCORPIONL2_SNOOPED_BP = 0xf1, + SCORPIONL2_SNOOPED_BARRIERS = 0xf2, + SCORPIONL2_SNOOPED_TLB = 0xf3, + BB_L2_MAX_EVT, +}; + +static const struct bb_l2_scorp_evt sc_evt[] = { + {SCORPIONL2_TOTAL_BANK_REQ, 0x80000001, 0, 0x00}, + {SCORPIONL2_DSIDE_READ, 0x80000100, 0, 0x01}, + {SCORPIONL2_DSIDE_WRITE, 0x80010000, 0, 0x02}, + {SCORPIONL2_ISIDE_READ, 0x81000000, 0, 0x03}, + {SCORPIONL2_L2CACHE_ISIDE_READ, 0x80000002, 0, 0x00}, + {SCORPIONL2_L2CACHE_BANK_REQ, 0x80000200, 0, 0x01}, + {SCORPIONL2_L2CACHE_DSIDE_READ, 0x80020000, 0, 0x02}, + {SCORPIONL2_L2CACHE_DSIDE_WRITE, 0x82000000, 0, 0x03}, + {SCORPIONL2_L2NOCACHE_DSIDE_WRITE, 0x80000003, 0, 0x00}, + {SCORPIONL2_L2NOCACHE_ISIDE_READ, 0x80000300, 0, 0x01}, + {SCORPIONL2_L2NOCACHE_TOTAL_REQ, 0x80030000, 0, 0x02}, + {SCORPIONL2_L2NOCACHE_DSIDE_READ, 0x83000000, 0, 0x03}, + {SCORPIONL2_DSIDE_READ_NOL1, 0x80000004, 0, 0x00}, + {SCORPIONL2_L2CACHE_WRITETHROUGH, 0x80000400, 0, 0x01}, + {SCORPIONL2_BARRIERS, 0x84000000, 0, 0x03}, + {SCORPIONL2_HARDWARE_TABLE_WALKS, 0x80000005, 0, 0x00}, + {SCORPIONL2_MVA_POC, 0x80000500, 0, 0x01}, + {SCORPIONL2_L2CACHE_HW_TABLE_WALKS, 0x80050000, 0, 0x02}, + {SCORPIONL2_SETWAY_CACHE_OPS, 0x85000000, 0, 0x03}, + {SCORPIONL2_DSIDE_WRITE_HITS, 0x80000006, 0, 0x00}, + {SCORPIONL2_ISIDE_READ_HITS, 0x80000600, 0, 0x01}, + {SCORPIONL2_CACHE_DSIDE_READ_NOL1, 0x80060000, 0, 0x02}, + {SCORPIONL2_TOTAL_CACHE_HITS, 0x86000000, 0, 0x03}, + {SCORPIONL2_CACHE_MATCH_MISS, 0x80000007, 0, 0x00}, + {SCORPIONL2_DREAD_HIT_L1_DATA, 0x87000000, 0, 0x03}, + {SCORPIONL2_L2LINE_LOCKED, 0x80000008, 0, 0x00}, + {SCORPIONL2_HW_TABLE_WALK_HIT, 0x80000800, 0, 0x01}, + {SCORPIONL2_CACHE_MVA_POC, 0x80080000, 0, 0x02}, + {SCORPIONL2_L2ALLOC_DWRITE_MISS, 0x88000000, 0, 0x03}, + {SCORPIONL2_CORRECTED_TAG_ARRAY, 0x80001A00, 0, 0x01}, + {SCORPIONL2_CORRECTED_DATA_ARRAY, 0x801A0000, 0, 0x02}, + {SCORPIONL2_CORRECTED_REPLACEMENT_ARRAY, 0x9A000000, 0, 0x03}, + {SCORPIONL2_PMBUS_MPAAF, 0x80001C00, 0, 0x01}, + {SCORPIONL2_PMBUS_MPWDAF, 0x801C0000, 0, 0x02}, + {SCORPIONL2_PMBUS_MPBRT, 0x9C000000, 0, 0x03}, + + {SCORPIONL2_CPU0_GRANT, 0x80000001, 1, 0x04}, + {SCORPIONL2_CPU1_GRANT, 0x80000100, 1, 0x05}, + {SCORPIONL2_CPU0_NOGRANT, 0x80020000, 1, 0x06}, + {SCORPIONL2_CPU1_NOGRANT, 0x82000000, 1, 0x07}, + {SCORPIONL2_CPU0_LOSING_ARB, 0x80040000, 1, 0x06}, + {SCORPIONL2_CPU1_LOSING_ARB, 0x84000000, 1, 0x07}, + {SCORPIONL2_SLAVEPORT_NOGRANT, 0x80000007, 1, 0x04}, + {SCORPIONL2_SLAVEPORT_BPQ_FULL, 0x80000700, 1, 0x05}, + {SCORPIONL2_SLAVEPORT_LOSING_ARB, 0x80070000, 1, 0x06}, + {SCORPIONL2_SLAVEPORT_GRANT, 0x87000000, 1, 0x07}, + {SCORPIONL2_SLAVEPORT_GRANTLOCK, 0x80000008, 1, 0x04}, + {SCORPIONL2_L2EM_STREX_PASS, 0x80000009, 1, 0x04}, + {SCORPIONL2_L2EM_STREX_FAIL, 0x80000900, 1, 0x05}, + {SCORPIONL2_LDREX_RESERVE_L2EM, 0x80090000, 1, 0x06}, + {SCORPIONL2_SLAVEPORT_LDREX, 0x89000000, 1, 0x07}, + {SCORPIONL2_CPU0_L2EM_CLEARED, 0x800A0000, 1, 0x06}, + {SCORPIONL2_CPU1_L2EM_CLEARED, 0x8A000000, 1, 0x07}, + {SCORPIONL2_SLAVEPORT_L2EM_CLEARED, 0x80000B00, 1, 0x05}, + {SCORPIONL2_CPU0_CLAMPED, 0x8000000E, 1, 0x04}, + {SCORPIONL2_CPU1_CLAMPED, 0x80000E00, 1, 0x05}, + {SCORPIONL2_CPU0_WAIT, 0x800F0000, 1, 0x06}, + {SCORPIONL2_CPU1_WAIT, 0x8F000000, 1, 0x07}, + {SCORPIONL2_CPU0_NONAMBAS_WAIT, 0x80000010, 1, 0x04}, + {SCORPIONL2_CPU1_NONAMBAS_WAIT, 0x80001000, 1, 0x05}, + {SCORPIONL2_CPU0_DSB_WAIT, 0x80000014, 1, 0x04}, + {SCORPIONL2_CPU1_DSB_WAIT, 0x80001400, 1, 0x05}, + + {SCORPIONL2_AXI_READ, 0x80000001, 2, 0x08}, + {SCORPIONL2_AXI_WRITE, 0x80000100, 2, 0x09}, + {SCORPIONL2_1BEAT_WRITE, 0x80010000, 2, 0x0a}, + {SCORPIONL2_2BEAT_WRITE, 0x80010000, 2, 0x0b}, + {SCORPIONL2_4BEAT_WRITE, 0x80000002, 2, 0x08}, + {SCORPIONL2_8BEAT_WRITE, 0x80000200, 2, 0x09}, + {SCORPIONL2_12BEAT_WRITE, 0x80020000, 2, 0x0a}, + {SCORPIONL2_16BEAT_WRITE, 0x82000000, 2, 0x0b}, + {SCORPIONL2_1BEAT_DSIDE_READ, 0x80000003, 2, 0x08}, + {SCORPIONL2_2BEAT_DSIDE_READ, 0x80000300, 2, 0x09}, + {SCORPIONL2_4BEAT_DSIDE_READ, 0x80030000, 2, 0x0a}, + {SCORPIONL2_8BEAT_DSIDE_READ, 0x83000000, 2, 0x0b}, + {SCORPIONL2_CSYS_READ_1BEAT, 0x80000004, 2, 0x08}, + {SCORPIONL2_CSYS_READ_2BEAT, 0x80000400, 2, 0x09}, + {SCORPIONL2_CSYS_READ_4BEAT, 0x80040000, 2, 0x0a}, + {SCORPIONL2_CSYS_READ_8BEAT, 0x84000000, 2, 0x0b}, + {SCORPIONL2_4BEAT_IFETCH_READ, 0x80000005, 2, 0x08}, + {SCORPIONL2_8BEAT_IFETCH_READ, 0x80000500, 2, 0x09}, + {SCORPIONL2_CSYS_WRITE_1BEAT, 0x80050000, 2, 0x0a}, + {SCORPIONL2_CSYS_WRITE_2BEAT, 0x85000000, 2, 0x0b}, + {SCORPIONL2_AXI_READ_DATA_BEAT, 0x80000600, 2, 0x09}, + {SCORPIONL2_AXI_WRITE_EVT1, 0x80060000, 2, 0x0a}, + {SCORPIONL2_AXI_WRITE_EVT2, 0x86000000, 2, 0x0b}, + {SCORPIONL2_LDREX_REQ, 0x80000007, 2, 0x08}, + {SCORPIONL2_STREX_PASS, 0x80000700, 2, 0x09}, + {SCORPIONL2_STREX_FAIL, 0x80070000, 2, 0x0a}, + {SCORPIONL2_CPREAD, 0x80000008, 2, 0x08}, + {SCORPIONL2_CPWRITE, 0x80000800, 2, 0x09}, + {SCORPIONL2_BARRIER_REQ, 0x88000000, 2, 0x0b}, + + {SCORPIONL2_AXI_READ_SLVPORT, 0x80000001, 3, 0x0c}, + {SCORPIONL2_AXI_WRITE_SLVPORT, 0x80000100, 3, 0x0d}, + {SCORPIONL2_AXI_READ_SLVPORT_DATABEAT, 0x80010000, 3, 0x0e}, + {SCORPIONL2_AXI_WRITE_SLVPORT_DATABEAT, 0x81000000, 3, 0x0f}, + + {SCORPIONL2_SNOOPKILL_PREFILTER, 0x80000001, 4, 0x10}, + {SCORPIONL2_SNOOPKILL_FILTEROUT, 0x80000100, 4, 0x11}, + {SCORPIONL2_SNOOPED_IC, 0x80000002, 4, 0x10}, + {SCORPIONL2_SNOOPED_BP, 0x80000200, 4, 0x11}, + {SCORPIONL2_SNOOPED_BARRIERS, 0x80020000, 4, 0x12}, + {SCORPIONL2_SNOOPED_TLB, 0x82000000, 4, 0x13}, +}; + +static u32 bb_l2_read_l2pm0(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c7, 0" : "=r" (val)); + return val; +} + +static void bb_l2_write_l2pm0(u32 val) +{ + asm volatile ("mcr p15, 3, %0, c15, c7, 0" : : "r" (val)); +} + +static u32 bb_l2_read_l2pm1(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c7, 1" : "=r" (val)); + return val; +} + +static void bb_l2_write_l2pm1(u32 val) +{ + asm volatile ("mcr p15, 3, %0, c15, c7, 1" : : "r" (val)); +} + +static u32 bb_l2_read_l2pm2(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c7, 2" : "=r" (val)); + return val; +} + +static void bb_l2_write_l2pm2(u32 val) +{ + asm volatile ("mcr p15, 3, %0, c15, c7, 2" : : "r" (val)); +} + +static u32 bb_l2_read_l2pm3(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c7, 3" : "=r" (val)); + return val; +} + +static void bb_l2_write_l2pm3(u32 val) +{ + asm volatile ("mcr p15, 3, %0, c15, c7, 3" : : "r" (val)); +} + +static u32 bb_l2_read_l2pm4(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c7, 4" : "=r" (val)); + return val; +} + +static void bb_l2_write_l2pm4(u32 val) +{ + asm volatile ("mcr p15, 3, %0, c15, c7, 4" : : "r" (val)); +} + +struct bb_scorpion_access_funcs { + u32(*read) (void); + void (*write) (u32); + void (*pre) (void); + void (*post) (void); +}; + +struct bb_scorpion_access_funcs bb_l2_func[] = { + {bb_l2_read_l2pm0, bb_l2_write_l2pm0, NULL, NULL}, + {bb_l2_read_l2pm1, bb_l2_write_l2pm1, NULL, NULL}, + {bb_l2_read_l2pm2, bb_l2_write_l2pm2, NULL, NULL}, + {bb_l2_read_l2pm3, bb_l2_write_l2pm3, NULL, NULL}, + {bb_l2_read_l2pm4, bb_l2_write_l2pm4, NULL, NULL}, +}; + +#define COLMN0MASK 0x000000ff +#define COLMN1MASK 0x0000ff00 +#define COLMN2MASK 0x00ff0000 + +static u32 bb_l2_get_columnmask(u32 setval) +{ + if (setval & COLMN0MASK) + return 0xffffff00; + else if (setval & COLMN1MASK) + return 0xffff00ff; + else if (setval & COLMN2MASK) + return 0xff00ffff; + else + return 0x80ffffff; +} + +static void bb_l2_evt_setup(u32 gr, u32 setval) +{ + u32 val; + if (bb_l2_func[gr].pre) + bb_l2_func[gr].pre(); + val = bb_l2_get_columnmask(setval) & bb_l2_func[gr].read(); + val = val | setval; + bb_l2_func[gr].write(val); + if (bb_l2_func[gr].post) + bb_l2_func[gr].post(); +} + +#define BB_L2_EVT_START_IDX 0x90 +#define BB_L2_INV_EVTYPE 0 + +static unsigned int get_bb_l2_evtinfo(unsigned int evt_type, + struct bb_l2_scorp_evt *evtinfo) +{ + u32 idx; + if (evt_type < BB_L2_EVT_START_IDX || evt_type >= BB_L2_MAX_EVT) + return BB_L2_INV_EVTYPE; + idx = evt_type - BB_L2_EVT_START_IDX; + if (sc_evt[idx].evt_type == evt_type) { + evtinfo->val = sc_evt[idx].val; + evtinfo->grp = sc_evt[idx].grp; + evtinfo->evt_type_act = sc_evt[idx].evt_type_act; + return sc_evt[idx].evt_type_act; + } + return BB_L2_INV_EVTYPE; +} + +static inline void bb_l2_pmnc_write(unsigned long val) +{ + val &= 0xff; + asm volatile ("mcr p15, 3, %0, c15, c4, 0" : : "r" (val)); +} + +static inline unsigned long bb_l2_pmnc_read(void) +{ + u32 val; + asm volatile ("mrc p15, 3, %0, c15, c4, 0" : "=r" (val)); + return val; +} + +static void bb_l2_set_evcntcr(void) +{ + u32 val = 0x0; + asm volatile ("mcr p15, 3, %0, c15, c6, 4" : : "r" (val)); +} + +static inline void bb_l2_set_evtyper(int ctr, int val) +{ + /* select ctr */ + asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (ctr)); + + /* write into EVTYPER */ + asm volatile ("mcr p15, 3, %0, c15, c6, 7" : : "r" (val)); +} + +static void bb_l2_set_evfilter(void) +{ + u32 filter_val = 0x000f0030 | 1 << smp_processor_id(); + + asm volatile ("mcr p15, 3, %0, c15, c6, 3" : : "r" (filter_val)); +} + +static void bb_l2_enable_intenset(u32 idx) +{ + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r" + (1 << BB_L2CYCLE_CTR_BIT)); + } else { + asm volatile ("mcr p15, 3, %0, c15, c5, 1" : : "r" (1 << idx)); + } +} + +static void bb_l2_disable_intenclr(u32 idx) +{ + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r" + (1 << BB_L2CYCLE_CTR_BIT)); + } else { + asm volatile ("mcr p15, 3, %0, c15, c5, 0" : : "r" (1 << idx)); + } +} + +static void bb_l2_enable_counter(u32 idx) +{ + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r" + (1 << BB_L2CYCLE_CTR_BIT)); + } else { + asm volatile ("mcr p15, 3, %0, c15, c4, 3" : : "r" (1 << idx)); + } +} + +static void bb_l2_disable_counter(u32 idx) +{ + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r" + (1 << BB_L2CYCLE_CTR_BIT)); + + } else { + asm volatile ("mcr p15, 3, %0, c15, c4, 2" : : "r" (1 << idx)); + } +} + +static u64 bb_l2_read_counter(u32 idx) +{ + u32 val; + unsigned long flags; + + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mrc p15, 3, %0, c15, c4, 5" : "=r" (val)); + } else { + raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags); + asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (idx)); + + /* read val from counter */ + asm volatile ("mrc p15, 3, %0, c15, c6, 5" : "=r" (val)); + raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags); + } + + return val; +} + +static void bb_l2_write_counter(u32 idx, u32 val) +{ + unsigned long flags; + + if (idx == BB_L2CYCLE_CTR_EVENT_IDX) { + asm volatile ("mcr p15, 3, %0, c15, c4, 5" : : "r" (val)); + } else { + raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags); + /* select counter */ + asm volatile ("mcr p15, 3, %0, c15, c6, 0" : : "r" (idx)); + + /* write val into counter */ + asm volatile ("mcr p15, 3, %0, c15, c6, 5" : : "r" (val)); + raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags); + } +} + +static int +bb_pmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64) MAX_BB_L2_PERIOD) + left = MAX_BB_L2_PERIOD; + + local64_set(&hwc->prev_count, (u64)-left); + + bb_l2_write_counter(idx, (u64) (-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static u64 +bb_pmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, + int idx, int overflow) +{ + u64 prev_raw_count, new_raw_count; + u64 delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = bb_l2_read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + new_raw_count &= MAX_BB_L2_PERIOD; + prev_raw_count &= MAX_BB_L2_PERIOD; + + if (overflow) { + delta = MAX_BB_L2_PERIOD - prev_raw_count + new_raw_count; + pr_err("%s: delta: %lld\n", __func__, delta); + } else + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + pr_debug("%s: new: %lld, prev: %lld, event: %ld count: %lld\n", + __func__, new_raw_count, prev_raw_count, + hwc->config_base, local64_read(&event->count)); + + return new_raw_count; +} + +static void bb_l2_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + bb_pmu_event_update(event, hwc, hwc->idx, 0); +} + +static void bb_l2_stop_counter(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(hwc->state & PERF_HES_STOPPED)) { + bb_l2_disable_intenclr(idx); + bb_l2_disable_counter(idx); + + bb_pmu_event_update(event, hwc, idx, 0); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } + + pr_debug("%s: event: %ld ctr: %d stopped\n", __func__, hwc->config_base, + idx); +} + +static void bb_l2_start_counter(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + struct bb_l2_scorp_evt evtinfo; + int evtype = hwc->config_base; + int ev_typer; + unsigned long iflags; + int cpu_id = smp_processor_id(); + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + bb_pmu_event_set_period(event, hwc, idx); + + if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE) + goto out; + + memset(&evtinfo, 0, sizeof(evtinfo)); + + ev_typer = get_bb_l2_evtinfo(evtype, &evtinfo); + + raw_spin_lock_irqsave(&bb_l2_pmu_lock, iflags); + + bb_l2_set_evtyper(idx, ev_typer); + + bb_l2_set_evcntcr(); + + bb_l2_set_evfilter(); + + bb_l2_evt_setup(evtinfo.grp, evtinfo.val); + + raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, iflags); + +out: + + bb_l2_enable_intenset(idx); + + bb_l2_enable_counter(idx); + + pr_debug("%s: idx: %d, event: %d, val: %x, cpu: %d\n", + __func__, idx, evtype, evtinfo.val, cpu_id); +} + +static void bb_l2_del_event(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + unsigned long iflags; + + raw_spin_lock_irqsave(&hw_bb_l2_pmu.lock, iflags); + + clear_bit(idx, (long unsigned int *)(&hw_bb_l2_pmu.active_mask)); + + bb_l2_stop_counter(event, PERF_EF_UPDATE); + hw_bb_l2_pmu.events[idx] = NULL; + hwc->idx = -1; + + raw_spin_unlock_irqrestore(&hw_bb_l2_pmu.lock, iflags); + + pr_debug("%s: event: %ld deleted\n", __func__, hwc->config_base); + + perf_event_update_userpage(event); +} + +static int bb_l2_add_event(struct perf_event *event, int flags) +{ + int ctr = 0; + struct hw_perf_event *hwc = &event->hw; + unsigned long iflags; + int err = 0; + + perf_pmu_disable(event->pmu); + + raw_spin_lock_irqsave(&hw_bb_l2_pmu.lock, iflags); + + /* Cycle counter has a resrvd index */ + if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE) { + if (hw_bb_l2_pmu.events[BB_L2CYCLE_CTR_EVENT_IDX]) { + pr_err("%s: Stale cycle ctr event ptr !\n", __func__); + err = -EINVAL; + goto out; + } + hwc->idx = BB_L2CYCLE_CTR_EVENT_IDX; + hw_bb_l2_pmu.events[BB_L2CYCLE_CTR_EVENT_IDX] = event; + set_bit(BB_L2CYCLE_CTR_EVENT_IDX, + (long unsigned int *)&hw_bb_l2_pmu.active_mask); + goto skip_ctr_loop; + } + + for (ctr = 0; ctr < MAX_BB_L2_CTRS - 1; ctr++) { + if (!hw_bb_l2_pmu.events[ctr]) { + hwc->idx = ctr; + hw_bb_l2_pmu.events[ctr] = event; + set_bit(ctr, (long unsigned int *) + &hw_bb_l2_pmu.active_mask); + break; + } + } + + if (hwc->idx < 0) { + err = -ENOSPC; + pr_err("%s: No space for event: %llx!!\n", __func__, + event->attr.config); + goto out; + } + +skip_ctr_loop: + + bb_l2_disable_counter(hwc->idx); + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (flags & PERF_EF_START) + bb_l2_start_counter(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + pr_debug("%s: event: %ld, ctr: %d added from cpu:%d\n", + __func__, hwc->config_base, hwc->idx, smp_processor_id()); +out: + raw_spin_unlock_irqrestore(&hw_bb_l2_pmu.lock, iflags); + + /* Resume the PMU even if this event could not be added */ + perf_pmu_enable(event->pmu); + + return err; +} + +static void bb_l2_pmu_enable(struct pmu *pmu) +{ + unsigned long flags; + isb(); + raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags); + /* Enable all counters */ + bb_l2_pmnc_write(bb_l2_pmnc_read() | SCORPIONL2_PMNC_E); + raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags); +} + +static void bb_l2_pmu_disable(struct pmu *pmu) +{ + unsigned long flags; + raw_spin_lock_irqsave(&bb_l2_pmu_lock, flags); + /* Disable all counters */ + bb_l2_pmnc_write(bb_l2_pmnc_read() & ~SCORPIONL2_PMNC_E); + raw_spin_unlock_irqrestore(&bb_l2_pmu_lock, flags); + isb(); +} + +static inline u32 bb_l2_get_reset_pmovsr(void) +{ + u32 val; + + /* Read */ + asm volatile ("mrc p15, 3, %0, c15, c4, 1" : "=r" (val)); + + /* Write to clear flags */ + val &= 0xffffffff; + asm volatile ("mcr p15, 3, %0, c15, c4, 1" : : "r" (val)); + + return val; +} + +static irqreturn_t bb_l2_handle_irq(int irq_num, void *dev) +{ + unsigned long pmovsr; + struct perf_sample_data data; + struct pt_regs *regs; + struct perf_event *event; + struct hw_perf_event *hwc; + int bitp; + int idx = 0; + + pmovsr = bb_l2_get_reset_pmovsr(); + + if (!(pmovsr & 0xffffffff)) + return IRQ_NONE; + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + raw_spin_lock(&hw_bb_l2_pmu.lock); + + while (pmovsr) { + bitp = __ffs(pmovsr); + + if (bitp == BB_L2CYCLE_CTR_BIT) + idx = BB_L2CYCLE_CTR_EVENT_IDX; + else + idx = bitp; + + event = hw_bb_l2_pmu.events[idx]; + + if (!event) + goto next; + + if (!test_bit(idx, hw_bb_l2_pmu.active_mask)) + goto next; + + hwc = &event->hw; + bb_pmu_event_update(event, hwc, idx, 1); + data.period = event->hw.last_period; + + if (!bb_pmu_event_set_period(event, hwc, idx)) + goto next; + + if (perf_event_overflow(event, 0, &data, regs)) + bb_l2_disable_counter(hwc->idx); +next: + pmovsr &= (pmovsr - 1); + } + + raw_spin_unlock(&hw_bb_l2_pmu.lock); + + irq_work_run(); + + return IRQ_HANDLED; +} + +static atomic_t active_bb_l2_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(bb_pmu_reserve_mutex); + +static int bb_pmu_reserve_hardware(void) +{ + int i, err = -ENODEV, irq; + + bb_l2_pmu_device = reserve_pmu(ARM_PMU_DEVICE_L2); + + if (IS_ERR(bb_l2_pmu_device)) { + pr_warning("unable to reserve pmu\n"); + return PTR_ERR(bb_l2_pmu_device); + } + + if (bb_l2_pmu_device->num_resources < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + if (strncmp(bb_l2_pmu_device->name, "l2-arm-pmu", 6)) { + pr_err("Incorrect pdev reserved !\n"); + return -EINVAL; + } + + for (i = 0; i < bb_l2_pmu_device->num_resources; ++i) { + irq = platform_get_irq(bb_l2_pmu_device, i); + if (irq < 0) + continue; + + err = request_irq(irq, bb_l2_handle_irq, + IRQF_DISABLED | IRQF_NOBALANCING, + "bb-l2-pmu", NULL); + if (err) { + pr_warning("unable to request IRQ%d for Krait L2 perf " + "counters\n", irq); + break; + } + + get_irq_chip(irq)->irq_unmask(irq_get_irq_data(irq)); + } + + if (err) { + for (i = i - 1; i >= 0; --i) { + irq = platform_get_irq(bb_l2_pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + release_pmu(bb_l2_pmu_device); + bb_l2_pmu_device = NULL; + } + + return err; +} + +static void bb_pmu_release_hardware(void) +{ + int i, irq; + + for (i = bb_l2_pmu_device->num_resources - 1; i >= 0; --i) { + irq = platform_get_irq(bb_l2_pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + + bb_l2_pmu_disable(NULL); + + release_pmu(bb_l2_pmu_device); + bb_l2_pmu_device = NULL; +} + +static void bb_pmu_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock + (&active_bb_l2_events, &bb_pmu_reserve_mutex)) { + bb_pmu_release_hardware(); + mutex_unlock(&bb_pmu_reserve_mutex); + } +} + +static int bb_l2_event_init(struct perf_event *event) +{ + int err = 0; + struct hw_perf_event *hwc = &event->hw; + int status = 0; + + switch (event->attr.type) { + case PERF_TYPE_SHARED: + break; + + default: + return -ENOENT; + } + + hwc->idx = -1; + + event->destroy = bb_pmu_perf_event_destroy; + + if (!atomic_inc_not_zero(&active_bb_l2_events)) { + /* 0 active events */ + mutex_lock(&bb_pmu_reserve_mutex); + err = bb_pmu_reserve_hardware(); + mutex_unlock(&bb_pmu_reserve_mutex); + if (!err) + atomic_inc(&active_bb_l2_events); + else + return err; + } + + hwc->config_base = event->attr.config & 0xff; + hwc->config = 0; + hwc->event_base = 0; + + /* Only one CPU can control the cycle counter */ + if (hwc->config_base == BB_L2CYCLE_CTR_RAW_CODE) { + /* Check if its already running */ + asm volatile ("mrc p15, 3, %0, c15, c4, 6" : "=r" (status)); + if (status == 0x2) { + err = -ENOSPC; + goto out; + } + } + + if (!hwc->sample_period) { + hwc->sample_period = MAX_BB_L2_PERIOD; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + pr_debug("%s: event: %lld init'd\n", __func__, event->attr.config); + +out: + if (err < 0) + bb_pmu_perf_event_destroy(event); + + return err; +} + +static struct pmu bb_l2_pmu = { + .pmu_enable = bb_l2_pmu_enable, + .pmu_disable = bb_l2_pmu_disable, + .event_init = bb_l2_event_init, + .add = bb_l2_add_event, + .del = bb_l2_del_event, + .start = bb_l2_start_counter, + .stop = bb_l2_stop_counter, + .read = bb_l2_read, +}; + +static const struct arm_pmu *__init scorpionmp_l2_pmu_init(void) +{ + /* Register our own PMU here */ + perf_pmu_register(&bb_l2_pmu, "BB L2", PERF_TYPE_SHARED); + + memset(&hw_bb_l2_pmu, 0, sizeof(hw_bb_l2_pmu)); + + /* Avoid spurious interrupts at startup */ + bb_l2_get_reset_pmovsr(); + + /* Don't return an arm_pmu here */ + return NULL; +} +#else + +static const struct arm_pmu *__init scorpionmp_l2_pmu_init(void) +{ + return NULL; +} + +#endif diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index c058bfc8532be..66ce900b1c5aa 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num, continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2e1402556fa0c..2aa83c2f82d21 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -466,6 +466,7 @@ static inline unsigned long armv7_pmnc_read(void) static inline void armv7_pmnc_write(unsigned long val) { val &= ARMV7_PMNC_MASK; + isb(); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); } @@ -502,6 +503,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); + isb(); return idx; } @@ -780,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 28cd3b025bc36..39affbe4fdb24 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; @@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx); + armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec192629..1df39b8321fd6 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -3,6 +3,7 @@ * * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles * Copyright (C) 2010 ARM Ltd, Will Deacon + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -45,16 +46,41 @@ static int __devinit pmu_device_probe(struct platform_device *pdev) return 0; } -static struct platform_driver pmu_driver = { +static struct platform_driver cpu_pmu_driver = { .driver = { - .name = "arm-pmu", + .name = "cpu-arm-pmu", }, .probe = pmu_device_probe, }; +static struct platform_driver l2_pmu_driver = { + .driver = { + .name = "l2-arm-pmu", + }, + .probe = pmu_device_probe, +}; + +static struct platform_driver *pmu_drivers[] __initdata = { + &cpu_pmu_driver, + &l2_pmu_driver, +}; + static int __init register_pmu_driver(void) { - return platform_driver_register(&pmu_driver); + int i; + int err; + + for (i = 0; i < ARM_NUM_PMU_DEVICES; i++) { + err = platform_driver_register(pmu_drivers[i]); + if (err) { + pr_err("%s: failed to register id:%d\n", __func__, i); + while (--i >= 0) + platform_driver_unregister(pmu_drivers[i]); + break; + } + } + + return err; } device_initcall(register_pmu_driver); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 8335b86166200..2dc0e81c0ef2f 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -32,7 +32,6 @@ #include #include -#include #include #include #include @@ -182,8 +181,8 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { + idle_notifier_call_chain(IDLE_START); tick_nohz_stop_sched_tick(1); - leds_event(led_idle_start); while (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU if (cpu_is_offline(smp_processor_id())) @@ -207,8 +206,8 @@ void cpu_idle(void) local_irq_enable(); } } - leds_event(led_idle_end); tick_nohz_restart_sched_tick(); + idle_notifier_call_chain(IDLE_END); preempt_enable_no_resched(); schedule(); preempt_disable(); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 5ea4fb718b970..9f39fcc1a8208 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -85,6 +85,20 @@ unsigned int __atags_pointer __initdata; unsigned int system_rev; EXPORT_SYMBOL(system_rev); +#ifdef CONFIG_MICROP_COMMON +char microp_ver[4]; +EXPORT_SYMBOL(microp_ver); + +unsigned int als_kadc; +EXPORT_SYMBOL(als_kadc); + +unsigned int ps_kparam1; +EXPORT_SYMBOL(ps_kparam1); + +unsigned int ps_kparam2; +EXPORT_SYMBOL(ps_kparam2); +#endif + unsigned int system_serial_low; EXPORT_SYMBOL(system_serial_low); @@ -94,6 +108,8 @@ EXPORT_SYMBOL(system_serial_high); unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL(elf_hwcap); +unsigned int boot_reason; +EXPORT_SYMBOL(boot_reason); #ifdef MULTI_CPU struct processor processor __read_mostly; @@ -646,10 +662,44 @@ static int __init parse_tag_revision(const struct tag *tag) __tagtable(ATAG_REVISION, parse_tag_revision); +#ifdef CONFIG_MICROP_COMMON +static int __init parse_tag_microp_version(const struct tag *tag) +{ + int i; + + for (i = 0; i < 4; i++) + microp_ver[i] = tag->u.microp_version.ver[i]; + + return 0; +} + +__tagtable(ATAG_MICROP_VERSION, parse_tag_microp_version); + +static int __init parse_tag_als_calibration(const struct tag *tag) +{ + als_kadc = tag->u.als_kadc.kadc; + + return 0; +} + +__tagtable(ATAG_ALS, parse_tag_als_calibration); + +static int __init parse_tag_ps_calibration(const struct tag *tag) +{ + ps_kparam1 = tag->u.ps_kparam.kparam1; + ps_kparam2 = tag->u.ps_kparam.kparam2; + + return 0; +} + +__tagtable(ATAG_PS, parse_tag_ps_calibration); +#endif + static int __init parse_tag_cmdline(const struct tag *tag) { #ifndef CONFIG_CMDLINE_FORCE strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); + #else pr_warning("Ignoring tag cmdline (using the default kernel command line)\n"); #endif /* CONFIG_CMDLINE_FORCE */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 4539ebcb089fa..0f31dbc836d27 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 ARM Limited, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as + * it under the termsCON of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include @@ -278,8 +278,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -470,11 +468,7 @@ static void smp_timer_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } -#else -#define smp_timer_broadcast NULL -#endif -#ifndef CONFIG_LOCAL_TIMERS static void broadcast_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -489,6 +483,7 @@ static void local_timer_setup(struct clock_event_device *evt) evt->rating = 400; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; + evt->broadcast = smp_timer_broadcast; clockevents_register_device(evt); } @@ -500,7 +495,6 @@ void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); - evt->broadcast = smp_timer_broadcast; local_timer_setup(evt); } @@ -589,6 +583,11 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) void smp_send_reschedule(int cpu) { + + if (unlikely(cpu_is_offline(cpu))) { + WARN_ON(1); + return; + } smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 60636f499cb3e..377dbe972b30b 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -127,8 +127,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) twd_calibrate_rate(); clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_C3STOP; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 4ad8da15ef2b3..af0aaebf4de62 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -311,7 +311,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, long err; int i; - if (nsops < 1) + if (nsops < 1 || nsops > SEMOPM) return -EINVAL; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 3d76bf2337347..768861a488978 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -117,48 +117,37 @@ void timer_tick(void) #endif #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) -static int timer_suspend(struct sys_device *dev, pm_message_t state) +static int timer_suspend(void) { - struct sys_timer *timer = container_of(dev, struct sys_timer, dev); - - if (timer->suspend != NULL) - timer->suspend(); + if (system_timer->suspend) + system_timer->suspend(); return 0; } -static int timer_resume(struct sys_device *dev) +static void timer_resume(void) { - struct sys_timer *timer = container_of(dev, struct sys_timer, dev); - - if (timer->resume != NULL) - timer->resume(); - - return 0; + if (system_timer->resume) + system_timer->resume(); } #else #define timer_suspend NULL #define timer_resume NULL #endif -static struct sysdev_class timer_sysclass = { - .name = "timer", +static struct syscore_ops timer_syscore_ops = { .suspend = timer_suspend, .resume = timer_resume, }; -static int __init timer_init_sysfs(void) +static int __init timer_init_syscore_ops(void) { - int ret = sysdev_class_register(&timer_sysclass); - if (ret == 0) { - system_timer->dev.cls = &timer_sysclass; - ret = sysdev_register(&system_timer->dev); - } + register_syscore_ops(&timer_syscore_ops); - return ret; + return 0; } -device_initcall(timer_init_sysfs); +device_initcall(timer_init_syscore_ops); void __init time_init(void) { diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index d71941d661cb9..2e76788f88a72 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -455,6 +455,10 @@ do_cache_op(unsigned long start, unsigned long end, int flags) up_read(&mm->mmap_sem); flush_cache_user_range(start, end); + +#ifdef CONFIG_ARCH_MSM7X27 + mb(); +#endif return; } up_read(&mm->mmap_sem); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 61462790757f5..479ddae2b2be0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -7,6 +7,9 @@ #include #include #include +#ifdef CONFIG_STRICT_MEMORY_RWX +#include +#endif #define PROC_INFO \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ @@ -104,6 +107,9 @@ SECTIONS #endif } +#ifdef CONFIG_STRICT_MEMORY_RWX + . = ALIGN(1< + * Copyright (C) 2005-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +/* + * Oh, if only we had a cycle counter... + */ +void delay_loop(unsigned long loops) +{ + asm volatile( + "1: subs %0, %0, #1 \n" + " bhi 1b \n" + : /* No output */ + : "r" (loops) + ); +} + +#ifdef ARCH_HAS_READ_CURRENT_TIMER +/* + * Assuming read_current_timer() is monotonically increasing + * across calls. + */ +void read_current_timer_delay_loop(unsigned long loops) +{ + unsigned long bclock, now; + + read_current_timer(&bclock); + do { + read_current_timer(&now); + } while ((now - bclock) < loops); +} +#endif + +static void (*delay_fn)(unsigned long) = delay_loop; + +void set_delay_fn(void (*fn)(unsigned long)) +{ + delay_fn = fn; +} + +/* + * loops = usecs * HZ * loops_per_jiffy / 1000000 + */ +void __delay(unsigned long loops) +{ + delay_fn(loops); +} +EXPORT_SYMBOL(__delay); + +/* + * 0 <= xloops <= 0x7fffff06 + * loops_per_jiffy <= 0x01ffffff (max. 3355 bogomips) + */ +void __const_udelay(unsigned long xloops) +{ + unsigned long lpj; + unsigned long loops; + + xloops >>= 14; /* max = 0x01ffffff */ + lpj = loops_per_jiffy >> 10; /* max = 0x0001ffff */ + loops = lpj * xloops; /* max = 0x00007fff */ + loops >>= 6; /* max = 2^32-1 */ + + if (loops) + __delay(loops); +} +EXPORT_SYMBOL(__const_udelay); + +/* + * usecs <= 2000 + * HZ <= 1000 + */ +void __udelay(unsigned long usecs) +{ + __const_udelay(usecs * ((2199023UL*HZ)>>11)); +} +EXPORT_SYMBOL(__udelay); diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S index 6dc06487f3c3e..63b75df56d2fb 100644 --- a/arch/arm/lib/lib1funcs.S +++ b/arch/arm/lib/lib1funcs.S @@ -35,7 +35,7 @@ Boston, MA 02111-1307, USA. */ #include #include - +#include .macro ARM_DIV_BODY dividend, divisor, result, curbit @@ -207,6 +207,7 @@ Boston, MA 02111-1307, USA. */ ENTRY(__udivsi3) ENTRY(__aeabi_uidiv) +UNWIND(.fnstart) subs r2, r1, #1 moveq pc, lr @@ -230,10 +231,12 @@ ENTRY(__aeabi_uidiv) mov r0, r0, lsr r2 mov pc, lr +UNWIND(.fnend) ENDPROC(__udivsi3) ENDPROC(__aeabi_uidiv) ENTRY(__umodsi3) +UNWIND(.fnstart) subs r2, r1, #1 @ compare divisor with 1 bcc Ldiv0 @@ -247,10 +250,12 @@ ENTRY(__umodsi3) mov pc, lr +UNWIND(.fnend) ENDPROC(__umodsi3) ENTRY(__divsi3) ENTRY(__aeabi_idiv) +UNWIND(.fnstart) cmp r1, #0 eor ip, r0, r1 @ save the sign of the result. @@ -287,10 +292,12 @@ ENTRY(__aeabi_idiv) rsbmi r0, r0, #0 mov pc, lr +UNWIND(.fnend) ENDPROC(__divsi3) ENDPROC(__aeabi_idiv) ENTRY(__modsi3) +UNWIND(.fnstart) cmp r1, #0 beq Ldiv0 @@ -310,11 +317,14 @@ ENTRY(__modsi3) rsbmi r0, r0, #0 mov pc, lr +UNWIND(.fnend) ENDPROC(__modsi3) #ifdef CONFIG_AEABI ENTRY(__aeabi_uidivmod) +UNWIND(.fnstart) +UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_uidiv @@ -323,10 +333,12 @@ ENTRY(__aeabi_uidivmod) sub r1, r1, r3 mov pc, lr +UNWIND(.fnend) ENDPROC(__aeabi_uidivmod) ENTRY(__aeabi_idivmod) - +UNWIND(.fnstart) +UNWIND(.save {r0, r1, ip, lr} ) stmfd sp!, {r0, r1, ip, lr} bl __aeabi_idiv ldmfd sp!, {r1, r2, ip, lr} @@ -334,15 +346,18 @@ ENTRY(__aeabi_idivmod) sub r1, r1, r3 mov pc, lr +UNWIND(.fnend) ENDPROC(__aeabi_idivmod) #endif -Ldiv0: - +ENTRY(Ldiv0) +UNWIND(.fnstart) +UNWIND(.pad #4) +UNWIND(.save {lr}) str lr, [sp, #-8]! bl __div0 mov r0, #0 @ About as wrong as it could be. ldr pc, [sp], #8 - - +UNWIND(.fnend) +ENDPROC(Ldiv0) diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S deleted file mode 100644 index eb0edb80d7b84..0000000000000 --- a/arch/arm/lib/sha1.S +++ /dev/null @@ -1,211 +0,0 @@ -/* - * linux/arch/arm/lib/sha1.S - * - * SHA transform optimized for ARM - * - * Copyright: (C) 2005 by Nicolas Pitre - * Created: September 17, 2005 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * The reference implementation for this code is linux/lib/sha1.c - */ - -#include - - .text - - -/* - * void sha_transform(__u32 *digest, const char *in, __u32 *W) - * - * Note: the "in" ptr may be unaligned. - */ - -ENTRY(sha_transform) - - stmfd sp!, {r4 - r8, lr} - - @ for (i = 0; i < 16; i++) - @ W[i] = be32_to_cpu(in[i]); - -#ifdef __ARMEB__ - mov r4, r0 - mov r0, r2 - mov r2, #64 - bl memcpy - mov r2, r0 - mov r0, r4 -#else - mov r3, r2 - mov lr, #16 -1: ldrb r4, [r1], #1 - ldrb r5, [r1], #1 - ldrb r6, [r1], #1 - ldrb r7, [r1], #1 - subs lr, lr, #1 - orr r5, r5, r4, lsl #8 - orr r6, r6, r5, lsl #8 - orr r7, r7, r6, lsl #8 - str r7, [r3], #4 - bne 1b -#endif - - @ for (i = 0; i < 64; i++) - @ W[i+16] = ror(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 31); - - sub r3, r2, #4 - mov lr, #64 -2: ldr r4, [r3, #4]! - subs lr, lr, #1 - ldr r5, [r3, #8] - ldr r6, [r3, #32] - ldr r7, [r3, #52] - eor r4, r4, r5 - eor r4, r4, r6 - eor r4, r4, r7 - mov r4, r4, ror #31 - str r4, [r3, #64] - bne 2b - - /* - * The SHA functions are: - * - * f1(B,C,D) = (D ^ (B & (C ^ D))) - * f2(B,C,D) = (B ^ C ^ D) - * f3(B,C,D) = ((B & C) | (D & (B | C))) - * - * Then the sub-blocks are processed as follows: - * - * A' = ror(A, 27) + f(B,C,D) + E + K + *W++ - * B' = A - * C' = ror(B, 2) - * D' = C - * E' = D - * - * We therefore unroll each loop 5 times to avoid register shuffling. - * Also the ror for C (and also D and E which are successivelyderived - * from it) is applied in place to cut on an additional mov insn for - * each round. - */ - - .macro sha_f1, A, B, C, D, E - ldr r3, [r2], #4 - eor ip, \C, \D - add \E, r1, \E, ror #2 - and ip, \B, ip, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f2, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - eor ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - eor ip, ip, \D, ror #2 - add \E, \E, r3 - add \E, \E, ip - .endm - - .macro sha_f3, A, B, C, D, E - ldr r3, [r2], #4 - add \E, r1, \E, ror #2 - orr ip, \B, \C, ror #2 - add \E, \E, \A, ror #27 - and ip, ip, \D, ror #2 - add \E, \E, r3 - and r3, \B, \C, ror #2 - orr ip, ip, r3 - add \E, \E, ip - .endm - - ldmia r0, {r4 - r8} - - mov lr, #4 - ldr r1, .L_sha_K + 0 - - /* adjust initial values */ - mov r6, r6, ror #30 - mov r7, r7, ror #30 - mov r8, r8, ror #30 - -3: subs lr, lr, #1 - sha_f1 r4, r5, r6, r7, r8 - sha_f1 r8, r4, r5, r6, r7 - sha_f1 r7, r8, r4, r5, r6 - sha_f1 r6, r7, r8, r4, r5 - sha_f1 r5, r6, r7, r8, r4 - bne 3b - - ldr r1, .L_sha_K + 4 - mov lr, #4 - -4: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 4b - - ldr r1, .L_sha_K + 8 - mov lr, #4 - -5: subs lr, lr, #1 - sha_f3 r4, r5, r6, r7, r8 - sha_f3 r8, r4, r5, r6, r7 - sha_f3 r7, r8, r4, r5, r6 - sha_f3 r6, r7, r8, r4, r5 - sha_f3 r5, r6, r7, r8, r4 - bne 5b - - ldr r1, .L_sha_K + 12 - mov lr, #4 - -6: subs lr, lr, #1 - sha_f2 r4, r5, r6, r7, r8 - sha_f2 r8, r4, r5, r6, r7 - sha_f2 r7, r8, r4, r5, r6 - sha_f2 r6, r7, r8, r4, r5 - sha_f2 r5, r6, r7, r8, r4 - bne 6b - - ldmia r0, {r1, r2, r3, ip, lr} - add r4, r1, r4 - add r5, r2, r5 - add r6, r3, r6, ror #2 - add r7, ip, r7, ror #2 - add r8, lr, r8, ror #2 - stmia r0, {r4 - r8} - - ldmfd sp!, {r4 - r8, pc} - -ENDPROC(sha_transform) - - .align 2 -.L_sha_K: - .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 - - -/* - * void sha_init(__u32 *buf) - */ - - .align 2 -.L_sha_initial_digest: - .word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 - -ENTRY(sha_init) - - str lr, [sp, #-4]! - adr r1, .L_sha_initial_digest - ldmia r1, {r1, r2, r3, ip, lr} - stmia r0, {r1, r2, r3, ip, lr} - ldr pc, [sp], #4 - -ENDPROC(sha_init) diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 0ca90b834586d..556bbd468db3d 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -440,11 +440,6 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) gpio_request(gpio + 7, "nCF_SEL"); gpio_direction_output(gpio + 7, 1); - /* irlml6401 switches over 1A, in under 8 msec; - * now it can be managed by nDRV_VBUS ... - */ - davinci_setup_usb(1000, 8); - return 0; } @@ -705,6 +700,9 @@ static __init void davinci_evm_init(void) davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_evm_snd_data); + /* irlml6401 switches over 1A, in under 8 msec */ + davinci_setup_usb(1000, 8); + soc_info->emac_pdata->phy_id = DM644X_EVM_PHY_ID; /* Register the fixup for PHY on DaVinci */ phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK, diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 4a68c2b1ec11f..5f05e746fef35 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c @@ -94,9 +94,7 @@ static int davinci_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return ret; - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, - dev_driver_string(cpufreq.dev), - "transition: %u --> %u\n", freqs.old, freqs.new); + dev_dbg(&cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new); ret = cpufreq_frequency_table_target(policy, pdata->freq_table, freqs.new, relation, &idx); diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 5d3d9ade12fba..1061558420e39 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -6,7 +6,6 @@ choice config ARCH_MSM7X00A bool "MSM7x00A / MSM7x01A" - select MACH_TROUT if !MACH_HALIBUT select ARCH_MSM_ARM11 select MSM_SMD select MSM_SMD_PKG3 @@ -19,22 +18,25 @@ config ARCH_MSM7X30 select MACH_MSM7X30_SURF # if ! select ARCH_MSM_SCORPION select MSM_SMD - select MSM_VIC select CPU_V7 select MSM_GPIOMUX select MSM_PROC_COMM select HAS_MSM_DEBUG_UART_PHYS + select VERIFY_PERMISSION_FAULT + select MSM_DAL config ARCH_QSD8X50 bool "QSD8X50" select MACH_QSD8X50_SURF if !MACH_QSD8X50A_ST1_5 select ARCH_MSM_SCORPION select MSM_SMD - select MSM_VIC select CPU_V7 select MSM_GPIOMUX select MSM_PROC_COMM select HAS_MSM_DEBUG_UART_PHYS + select VERIFY_PERMISSION_FAULT + select MSM_DAL + select REGULATOR config ARCH_MSM8X60 bool "MSM8X60" @@ -47,7 +49,7 @@ config ARCH_MSM8X60 select MSM_GPIOMUX select IOMMU_API select MSM_SCM if SMP - + select VERIFY_PERMISSION_FAULT endchoice config MSM_SOC_REV_A @@ -63,21 +65,132 @@ config ARCH_MSM_SCORPION config HAS_MSM_DEBUG_UART_PHYS bool -config MSM_VIC +config MSM_MDP22 + bool + depends on ARCH_MSM7X00A + default y + +config MSM_MDP31 + bool + depends on ARCH_QSD8X50 + default y + +config MSM_MDP40 + bool + depends on ARCH_MSM7X30 + default y + +config PERFLOCK + depends on CPU_FREQ + depends on ARCH_QSD8X50 || ARCH_MSM7X30 || ARCH_MSM7X00A || ARCH_MSM7227 || ARCH_MSM7225 + default n + bool "HTC Performance Lock" + +config PERFLOCK_BOOT_LOCK + depends on PERFLOCK + depends on ARCH_QSD8X50 || ARCH_MSM7X30 || ARCH_MSM7X00A || ARCH_MSM7227 || ARCH_MSM7225 + default n + bool "Boot Time Performance Lock" + +config PERFLOCK_SCREEN_POLICY + depends on PERFLOCK + depends on ARCH_QSD8X50 || ARCH_MSM7X00A || ARCH_MSM7227 || ARCH_MSM7225 + default n + bool "Change Cpufreq Policy while Screen ON/OFF" + +config PERFLOCK_SCREEN_ON_MIN + depends on PERFLOCK_SCREEN_POLICY + int "Minimum speed while screen on" + default MSM_CPU_FREQ_MIN + +config PERFLOCK_SCREEN_ON_MAX + depends on PERFLOCK_SCREEN_POLICY + int "Maximum speed while screen on" + default MSM_CPU_FREQ_MAX + +config PERFLOCK_SCREEN_OFF_MIN + depends on PERFLOCK_SCREEN_POLICY + int "Minimum speed while screen off" + default MSM_CPU_FREQ_MIN + +config PERFLOCK_SCREEN_OFF_MAX + depends on PERFLOCK_SCREEN_POLICY + int "Maximum speed while screen off" + default MSM_CPU_FREQ_MAX + +config MSM_REMOTE_SPINLOCK_DEKKERS + bool + +config MSM_REMOTE_SPINLOCK_SWP bool +config MSM_REMOTE_SPINLOCK_LDREX + bool + +config MSM_REMOTE_SPINLOCK + bool + depends on MSM_REMOTE_SPINLOCK_LDREX || MSM_REMOTE_SPINLOCK_SWP || \ + MSM_REMOTE_SPINLOCK_DEKKERS + default y + +config MSM_LEGACY_7X00A_AMSS + bool + +config MSM_AMSS_VERSION + int + default 6210 if MSM_AMSS_VERSION_6210 + default 6220 if MSM_AMSS_VERSION_6220 + default 6225 if MSM_AMSS_VERSION_6225 + default 6350 if MSM_AMSS_VERSION_6350 + default 3200 if MSM_AMSS_VERSION_3200 + +choice + prompt "AMSS modem firmware version" + + depends on ARCH_MSM7X00A + default MSM_AMSS_VERSION_6225 + + config MSM_AMSS_VERSION_6210 + bool "6.2.10" + select MSM_LEGACY_7X00A_AMSS + + config MSM_AMSS_VERSION_6220 + bool "6.2.20" + select MSM_LEGACY_7X00A_AMSS + + config MSM_AMSS_VERSION_6225 + bool "6.2.20 + New ADSP" + select MSM_LEGACY_7X00A_AMSS + + config MSM_AMSS_VERSION_6350 + bool "6.3.50" + + config MSM_AMSS_VERSION_3200 + bool "3.2.00" +endchoice + menu "Qualcomm MSM Board Type" config MACH_HALIBUT depends on ARCH_MSM depends on ARCH_MSM7X00A + default n bool "Halibut Board (QCT SURF7201A)" help Support for the Qualcomm SURF7201A eval board. +config MACH_SAPPHIRE + depends on ARCH_MSM + depends on ARCH_MSM7X00A + default y + bool "HTC Magic (aka sapphire)" + help + Support for the HTC Dream, T-Mobile G1, Android ADP1 devices. + config MACH_TROUT depends on ARCH_MSM depends on ARCH_MSM7X00A + default n bool "HTC Dream (aka trout)" help Support for the HTC Dream, T-Mobile G1, Android ADP1 devices. @@ -88,12 +201,82 @@ config MACH_MSM7X30_SURF help Support for the Qualcomm MSM7x30 SURF eval board. +config MACH_MSM7X30_FLUID + depends on ARCH_MSM7X30 + select MACH_MSM7X30_SURF + bool "MSM7x30 FLUID" + help + Support for the Qualcomm MSM7x30 FLUID eval board. + +choice + depends on ARCH_QSD8X50 + prompt "Bravo" + default MACH_BRAVO_NONE + help + Select this to support the Bravo GSM or CDMA device + + config MACH_BRAVO_NONE + bool "None" + + config MACH_BRAVO + bool "GSM" + help + Select this to support the Bravo GSM device + + config MACH_BRAVOC + bool "CDMA" + help + Select this to support the Bravo CDMA device +endchoice + +config MACH_INCREDIBLE + depends on ARCH_QSD8X50 + default n + bool "Incredible" + help + Select this to support the Incredible device + +config MACH_INCREDIBLEC + depends on ARCH_QSD8X50 + default n + bool "IncredibleC" + help + Select this to support the IncredibleC device + +config MACH_MAHIMAHI + depends on ARCH_QSD8X50 + default y + bool "Mahi-Mahi" + help + Select this to support the Mahi-Mahi device + +config MACH_SWORDFISH + depends on ARCH_QSD8X50 + default n + bool "Swordfish Board (QCT SURF8250)" + help + Support for the Qualcomm SURF8250 eval board. + +config MACH_SUPERSONIC + depends on ARCH_QSD8X50 + default n + bool "Supersonic (HTC EVO 4G)" + help + Select this to support the Supersonic device + config MACH_QSD8X50_SURF depends on ARCH_QSD8X50 bool "QSD8x50 SURF" help Support for the Qualcomm QSD8x50 SURF eval board. +config MACH_QSD8X50_FFA + depends on ARCH_QSD8X50 + default n + bool "8x50-ffa" + help + Select this to support the 8x50 ffa device + config MACH_QSD8X50A_ST1_5 depends on ARCH_QSD8X50 select MSM_SOC_REV_A @@ -163,6 +346,251 @@ config MSM_SMD_PKG3 config MSM_PROC_COMM bool +config HTC_HEADSET + tristate "HTC 2 Wire detection driver" + default n + help + Provides support for detecting HTC 2 wire devices, such as wired + headset, on the trout platform. Can be used with the msm serial + debugger, but not with serial console. + +config HTC_35MM_JACK + bool "HTC 3.5mm headset jack" + default n + help + Provides support for 3.5mm headset jack devices, like wired headsets. + +config TROUT_BATTCHG + depends on (MACH_TROUT || MACH_SAPPHIRE) && POWER_SUPPLY + default y + bool "Trout battery / charger driver" + +config HTC_BATTCHG + depends on MSM_ONCRPCROUTER && POWER_SUPPLY + default n + bool "HTC battery / charger driver" + +config HTC_BATTCHG_SMEM + depends on HTC_BATTCHG + default n + bool "Read Battery Info via SMEM" + +config HTC_PWRSPLY + depends on MSM_ONCRPCROUTER && POWER_SUPPLY && !TROUT_BATTCHG + default y + bool "HTC Power supply driver" + help + Used by HTC devices with a dedicated battery gauge" + +config HTC_PWRSINK + depends on MSM_SMD + default y + bool "HTC Power Sink Driver" + +config HTC_POWER_COLLAPSE_MAGIC + default n + bool "Check Power Collapse State" + +config HTC_ONMODE_CHARGING + default n + bool "Low-power hibernate charging support" + +-config QSD_SVS + bool "QSD Static Voltage Scaling" + depends on (ARCH_MSM_SCORPION) + default y + help + Enables static voltage scaling using the TPS65023 PMIC. + +config QSD_PMIC_DEFAULT_DCDC1 + int "PMIC default output voltage" + depends on (ARCH_MSM_SCORPION) + default 1275 + help + This is the PMIC voltage at Linux kernel boot. + +config CACHE_FLUSH_RANGE_LIMIT + hex "Cache flush range limit" + default 0x40000 + help + When flushing a cache range larger then this (hex) limit, flush the + entire cache instead. Flushing a large range can be slower than + flushing, then refilling, the entire cache. + +config PHYS_OFFSET + hex "Physical Offset" + default "0x40800000" if ARCH_MSM9615 + default "0x80200000" if ARCH_APQ8064 + default "0x80200000" if ARCH_MSM8960 + default "0x10000000" if ARCH_FSM9XXX + default "0x00200000" if !MSM_STACKED_MEMORY + default "0x00000000" if ARCH_QSD8X50 && MSM_SOC_REV_A + default "0x20000000" if ARCH_QSD8X50 + default "0x40200000" if ARCH_MSM8X60 + default "0x10000000" + +choice + prompt "Default Timer" + default MSM7X00A_USE_GP_TIMER + + config MSM7X00A_USE_GP_TIMER + bool "GP Timer" + help + Low resolution timer that allows power collapse from idle. + + config MSM7X00A_USE_DG_TIMER + bool "DG Timer" + help + High resolution timer. +endchoice + +choice + prompt "Suspend sleep mode" + default MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND + help + Allows overriding the sleep mode used. Leave at power + collapse suspend unless the arm9 image has problems. + + config MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND + bool "Power collapse suspend" + help + Lowest sleep state. Returns through reset vector. + + config MSM7X00A_SLEEP_MODE_POWER_COLLAPSE + bool "Power collapse" + help + Sleep state that returns through reset vector. + + config MSM7X00A_SLEEP_MODE_APPS_SLEEP + bool "Apps Sleep" + + config MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT + bool "Ramp down cpu clock and wait for interrupt" + + config MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT + bool "Wait for interrupt" +endchoice + +config MSM7X00A_SLEEP_MODE + int + default 0 if MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND + default 1 if MSM7X00A_SLEEP_MODE_POWER_COLLAPSE + default 2 if MSM7X00A_SLEEP_MODE_APPS_SLEEP + default 3 if MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT + default 4 if MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT + +choice + prompt "Idle sleep mode" + default MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE + help + Allows overriding the sleep mode used from idle. Leave at power + collapse suspend unless the arm9 image has problems. + + config MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND + bool "Power collapse suspend" + help + Lowest sleep state. Returns through reset vector. + + config MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE + bool "Power collapse" + help + Sleep state that returns through reset vector. + + config MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP + bool "Apps Sleep" + + config MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT + bool "Ramp down cpu clock and wait for interrupt" + + config MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT + bool "Wait for interrupt" +endchoice + +config MSM7X00A_IDLE_SLEEP_MODE + int + default 0 if MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND + default 1 if MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE + default 2 if MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP + default 3 if MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT + default 4 if MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT + +config MSM7X00A_IDLE_SLEEP_MIN_TIME + int "Minimum idle time before sleep" + default 20000000 + help + Minimum idle time in nanoseconds before entering low power mode. + +config MSM7X00A_IDLE_SPIN_TIME + int "Idle spin time before cpu ramp down" + default 80000 + help + Spin time in nanoseconds before ramping down cpu clock and entering + any low power state. + +menuconfig MSM_IDLE_STATS + bool "Collect idle statistics" + default y + help + Collect idle statistics and export them in proc/msm_pm_stats. + +if MSM_IDLE_STATS + +config MSM_IDLE_STATS_FIRST_BUCKET + int "First bucket time" + default 62500 + help + Upper time limit in nanosconds of first bucket. + +config MSM_IDLE_STATS_BUCKET_SHIFT + int "Bucket shift" + default 2 + +config MSM_IDLE_STATS_BUCKET_COUNT + int "Bucket count" + default 10 + +endif # MSM_IDLE_STATS + +config MSM_FIQ_SUPPORT + default y + bool "Enable installation of an FIQ handler." + +config MSM_SERIAL_DEBUGGER + select MSM_FIQ_SUPPORT + select KERNEL_DEBUGGER_CORE + default n + bool "FIQ Mode Serial Debugger" + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. Depends on the kernel debugger core in drivers/misc. + +config MSM_SERIAL_DEBUGGER_NO_SLEEP + depends on MSM_SERIAL_DEBUGGER + default n + bool "Keep serial debugger active" + help + Enables the serial debugger at boot. Passing + msm_serial_debugger.no_sleep on the kernel commandline will + override this config option. + +config MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + depends on MSM_SERIAL_DEBUGGER + default n + bool "Don't disable wakeup IRQ when debugger is active" + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + radio builds that ignore the uart clock request in power collapse. + +config MSM_SERIAL_DEBUGGER_CONSOLE + depends on MSM_SERIAL_DEBUGGER + default n + bool "Console on FIQ Serial Debugger port" + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. + config MSM_SMD bool @@ -177,4 +605,196 @@ config IOMMU_API config MSM_SCM bool + +config MSM_DAL + default n + bool "MSM Driver Access Layer (DAL RPC)" + help + Support for the DAL RPC interface used to communicate with + the baseband processor or DSP in newer Qualcomm MSM/QSD + chips. + +config MSM_ONCRPCROUTER + depends on MSM_SMD + default y + bool "MSM ONCRPC router support" + help + Support for the MSM ONCRPC router for communication between + the ARM9 and ARM11 + +config MSM_RPCSERVERS + depends on MSM_ONCRPCROUTER && ARCH_MSM7X00A + default y + bool "Kernel side RPC server bundle" + help + none + +if CPU_FREQ_MSM + +config MSM_CPU_FREQ_SET_MIN_MAX + bool "Set Min/Max CPU frequencies." + default n + help + Allow setting min and max CPU frequencies. Sysfs can be used + to override these values. + +config MSM_CPU_FREQ_MAX + int "Max CPU Frequency" + depends on MSM_CPU_FREQ_SET_MIN_MAX + default 384000 + +config MSM_CPU_FREQ_MIN + int "Min CPU Frequency" + depends on MSM_CPU_FREQ_SET_MIN_MAX + default 245760 + +endif # CPU_FREQ_MSM + +config AXI_SCREEN_POLICY + depends on ARCH_QSD8X50 || ARCH_MSM7X00A || ARCH_MSM7227 || ARCH_MSM7225 + depends on HAS_EARLYSUSPEND + bool "Use higher AXI bus while screen ON" + default y + help + Simple AXI scaling based on screen ON/OFF and PWRC. + +config MSM_CPU_AVS + bool "Enable software controlled Adaptive Voltage Scaling (AVS)" + depends on (ARCH_MSM_SCORPION && QSD_SVS) + depends on ARCH_QSD8X50 + default n + select MSM_AVS_HW + help + This enables the s/w control of Adaptive Voltage Scaling feature + in Qualcomm ARMv7 CPUs. It adjusts the voltage for each frequency + based on feedback from three ring oscillators in the CPU. + +config MSM_AVS_HW + bool "Enable Adaptive Voltage Scaling (AVS)" + default n + help + Enable AVS hardware to fine tune voltage at each frequency. The + AVS hardware blocks associated with each Qualcomm ARMv7 cores can + fine tune the voltages based on the feedback from the ring + oscillators. + +config MSM_ADSP + depends on ARCH_MSM7X00A + tristate "MSM ADSP driver" + default y + help + Provides access to registers needed by the userspace aDSP library. + +config HTC_ACOUSTIC + tristate "HTC acoustic driver" + depends on ARCH_MSM7X00A + default y + help + The driver provide user space use shared memory allocate by using + RPC code. + +config HTC_ACOUSTIC_QSD + tristate "HTC acoustic driver for QSD" + depends on ARCH_QSD8X50 + default y + help + Provides user space use shared memory allocate by using RPC code. + Provides headset amp, mic bias and speaker amp control. + +config MSM_ADSP_REPORT_EVENTS + bool "Report modem events from the DSP" + default y + depends on MSM_ADSP + help + Normally, only messages from the aDSP are reported to userspace. + With this option, we report events from the aDSP as well. + +config MSM_QDSP6 + tristate "QDSP6 support" + depends on ARCH_QSD8X50 + default y + help + Enable support for qdsp6. This provides audio and video functionality. + +config MSM_QDSP5V2 + tristate "QDSP5V2 support" + depends on ARCH_MSM7X30 + default y + help + Enable support for qdsp5v2, which provides audio processing on 7x30. + +config MSM_SSBI + tristate "SSBI support" + depends on ARCH_MSM7X30 + default n + help + Enable support for SSBI bus. This is required for communicatinig with + Qualcomm PMICs and Audio codecs. + +config WIFI_CONTROL_FUNC + bool "Enable WiFi control function abstraction" + help + Enables Power/Reset/Carddetect function abstraction + +config WIFI_MEM_PREALLOC + depends on WIFI_CONTROL_FUNC + bool "Preallocate memory for WiFi buffers" + help + Preallocates memory buffers for WiFi driver + +config ARCH_MSM_FLASHLIGHT + bool "Flashlight Driver" + depends on ARCH_MSM + help + The flashlight driver is for MSM series. + +config MICROP_COMMON + tristate "MICROP COMMON Driver" + depends on I2C + help + HTC Microp-P support. + +config HTC_HEADSET_MGR + tristate "HTC headset manager driver" + default n + help + Provides support of HTC headset manager. + +config HTC_HEADSET_GPIO + tristate "HTC GPIO headset detection driver" + depends on HTC_HEADSET_MGR + default n + help + Provides support of HTC GPIO headset detection. + +config HTC_HEADSET_MICROP + tristate "HTC Micro-P headset detection driver" + depends on HTC_HEADSET_MGR && MICROP_COMMON + default n + help + Provides support of HTC Micro-P headset detection. + +config VIRTUAL_KPANIC_PARTITION + bool "Create virtual kpanic partition" + default n + help + Creates a virtual mtd partition named 'kpanic', stealing space from + the specified mtd partition label. + *** DO NOT USE IF YOU ARE USING OTA/RECOVERY *** + +config VIRTUAL_KPANIC_PSIZE + depends on VIRTUAL_KPANIC_PARTITION + int "Default kpanic partition size" + default 1048576 + help + Sets the size of the virtual kpanic paritition to create. + +config VIRTUAL_KPANIC_SRC + depends on VIRTUAL_KPANIC_PARTITION + string "Partition to steal from" + default "cache" + help + Sets the partition to steal from to make the virtual one. + + endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 94195c190e13a..c5ef66eb4d6b1 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -1,35 +1,121 @@ -obj-y += io.o idle.o timer.o +obj-y += io.o dma.o memory.o timer.o +obj-y += clock.o +obj-$(CONFIG_CPU_FREQ_MSM) += cpufreq.o +obj-y += socinfo.o + +obj-$(CONFIG_MSM_PROC_COMM) += proc_comm.o ifndef CONFIG_ARCH_MSM8X60 -obj-y += acpuclock-arm11.o -obj-y += dma.o + obj-$(CONFIG_MSM_PROC_COMM) += clock-pcom.o + obj-$(CONFIG_MSM_PROC_COMM) += vreg.o + ifdef CONFIG_MSM_PROC_COMM +ifndef CONFIG_ARCH_FSM9XXX + obj-$(CONFIG_REGULATOR) += footswitch-pcom.o +endif + endif endif -ifdef CONFIG_MSM_VIC -obj-y += irq-vic.o -else +obj-y += drv_callback.o +obj-y += htc_board_tags.o + +obj-$(CONFIG_ARCH_MSM_ARM11) += acpuclock-arm11.o idle.o +obj-$(CONFIG_ARCH_MSM_SCORPION) += arch-init-scorpion.o + +obj-$(CONFIG_ARCH_QSD8X50) += acpuclock-qsd8x50.o +obj-$(CONFIG_ARCH_MSM7X30) += acpuclock-7x30.o +obj-$(CONFIG_MSM_CPU_AVS) += avs.o +obj-$(CONFIG_MSM_AVS_HW) += avs_hw.o + ifndef CONFIG_ARCH_MSM8X60 obj-y += irq.o endif -endif obj-$(CONFIG_ARCH_MSM8X60) += clock-dummy.o iommu.o iommu_dev.o devices-msm8x60-iommu.o -obj-$(CONFIG_MSM_PROC_COMM) += proc_comm.o clock-pcom.o vreg.o -obj-$(CONFIG_MSM_PROC_COMM) += clock.o +obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o +obj-$(CONFIG_SMP) += headsmp.o platsmp.o +obj-$(CONFIG_CPU_V7) += idle-v7.o +obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o obj-$(CONFIG_ARCH_QSD8X50) += sirc.o +obj-$(CONFIG_MSM_FIQ_SUPPORT) += fiq_glue.o +obj-$(CONFIG_MSM_SMD_LOGGING) += smem_log.o obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o +obj-$(CONFIG_MSM_SMD) += smd_tty.o smd_qmi.o obj-$(CONFIG_MSM_SMD) += last_radio_log.o -obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o -obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o -obj-$(CONFIG_SMP) += headsmp.o platsmp.o +ifndef CONFIG_ARCH_MSM9615 +ifndef CONFIG_ARCH_MSM8960 +ifndef CONFIG_ARCH_MSM8X60 + obj-$(CONFIG_MSM_SMD) += pmic.o +endif +endif +endif +ifndef CONFIG_ARCH_MSM8960 +ifndef CONFIG_ARCH_MSM8X60 +ifndef CONFIG_ARCH_APQ8064 + obj-y += nand_partitions.o +endif +endif +endif + +obj-$(CONFIG_MSM_DAL) += dal.o + +obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter.o +obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_device.o +obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_servers.o +obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_dog_keepalive.o +obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_time_remote.o +obj-$(CONFIG_MSM_ADSP) += qdsp5/ +obj-$(CONFIG_MSM_QDSP5V2) += qdsp5v2/ +obj-$(CONFIG_MSM_QDSP6) += qdsp6/ +obj-$(CONFIG_MSM_REMOTE_SPINLOCK) += remote_spinlock.o +obj-$(CONFIG_MSM_SSBI) += ssbi.o +ifdef CONFIG_PM + obj-y += pm.o +else + obj-y += no-pm.o +endif + +obj-$(CONFIG_MACH_TROUT) += board-trout-rfkill.o +obj-$(CONFIG_MACH_TROUT) += board-trout-wifi.o +obj-$(CONFIG_MACH_TROUT) += devices_htc.o +obj-$(CONFIG_TROUT_BATTCHG) += htc_battery_trout.o + +obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o +obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o +obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-mmc.o board-sapphire-wifi.o +obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-rfkill.o msm_vibrator.o +obj-$(CONFIG_MACH_SAPPHIRE) += devices_htc.o +obj-$(CONFIG_MACH_SAPPHIRE) += htc_akm_cal.o htc_wifi_nvs.o + +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi.o board-mahimahi-panel.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-keypad.o board-mahimahi-mmc.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-rfkill.o htc_wifi_nvs.o htc_awb_cal.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-wifi.o board-mahimahi-audio.o +obj-$(CONFIG_MACH_MAHIMAHI) += msm_vibrator.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-microp.o +obj-$(CONFIG_MACH_MAHIMAHI) += htc_acoustic_qsd.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-tpa2018d1.o +obj-$(CONFIG_MACH_MAHIMAHI) += board-mahimahi-smb329.o -obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o -obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o devices-msm7x00.o -obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o devices-msm7x30.o -obj-$(CONFIG_ARCH_QSD8X50) += board-qsd8x50.o devices-qsd8x50.o +obj-$(CONFIG_MACH_HALIBUT) += board-halibut-keypad.o +obj-$(CONFIG_MACH_HALIBUT) += board-halibut-panel.o fish_battery.o + +obj-$(CONFIG_MACH_SWORDFISH) += board-swordfish.o +obj-$(CONFIG_MACH_SWORDFISH) += board-swordfish-keypad.o fish_battery.o +obj-$(CONFIG_MACH_SWORDFISH) += board-swordfish-panel.o +obj-$(CONFIG_MACH_SWORDFISH) += board-swordfish-mmc.o + +obj-$(CONFIG_ARCH_MSM7X30) += devices-msm7x30.o +obj-$(CONFIG_ARCH_QSD8X50) += devices-qsd8x50.o +obj-$(CONFIG_MACH_QSD8X50_SURF) += board-qsd8x50.o +obj-$(CONFIG_MACH_QSD8X50A_ST1_5) += board-qsd8x50.o +obj-$(CONFIG_MACH_QSD8X50_FFA) += board-qsd8x50.o obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60.o +obj-$(CONFIG_MACH_MSM7X30_SURF) += board-msm7x30.o +obj-$(CONFIG_MACH_MSM7X30_SURF) += board-msm7x30-panel.o +obj-$(CONFIG_MACH_MSM7X30_SURF) += board-msm7x30-audio.o + obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o obj-$(CONFIG_ARCH_QSD8X50) += gpiomux-8x50.o gpiomux-v1.o gpiomux.o obj-$(CONFIG_ARCH_MSM8X60) += gpiomux-8x60.o gpiomux-v2.o gpiomux.o @@ -38,3 +124,57 @@ obj-y += gpio-v2.o else obj-y += gpio.o endif + +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic.o board-supersonic-panel.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-keypad.o board-supersonic-mmc.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-rfkill.o board-supersonic-audio.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-wifi.o htc_awb_cal.o +obj-$(CONFIG_MACH_SUPERSONIC) += msm_vibrator.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-microp.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-tpa2018d1.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-tpa6130.o +obj-$(CONFIG_MACH_SUPERSONIC) += htc_wifi_nvs.o htc_bluetooth.o +obj-$(CONFIG_MACH_SUPERSONIC) += htc_acoustic_qsd.o +obj-$(CONFIG_MACH_SUPERSONIC) += board-supersonic-flashlight.o + +obj-$(CONFIG_MACH_INCREDIBLEC) += board-incrediblec.o board-incrediblec-panel.o board-incrediblec-tv.o +obj-$(CONFIG_MACH_INCREDIBLEC) += board-incrediblec-keypad.o board-incrediblec-mmc.o +obj-$(CONFIG_MACH_INCREDIBLEC) += board-incrediblec-rfkill.o htc_wifi_nvs.o board-incrediblec-audio.o +obj-$(CONFIG_MACH_INCREDIBLEC) += board-incrediblec-wifi.o htc_awb_cal.o +obj-$(CONFIG_MACH_INCREDIBLEC) += board-incrediblec-microp.o htc_bluetooth.o +obj-$(CONFIG_MACH_INCREDIBLEC) += msm_vibrator.o +obj-$(CONFIG_MACH_INCREDIBLEC) += proc_engineerid.o board-incrediblec-tpa6130.o +obj-$(CONFIG_MACH_INCREDIBLEC) += htc_acoustic_qsd.o + +obj-$(CONFIG_MACH_BRAVO) += board-bravo.o board-bravo-panel.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-keypad.o board-bravo-mmc.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-rfkill.o htc_wifi_nvs.o htc_awb_cal.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-wifi.o board-bravo-audio.o +obj-$(CONFIG_MACH_BRAVO) += msm_vibrator.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-microp.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-tpa2018d1.o +obj-$(CONFIG_MACH_BRAVO) += board-bravo-smb329.o + +obj-$(CONFIG_MACH_BRAVOC) += board-bravo.o board-bravo-panel.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-keypad.o board-bravo-mmc.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-rfkill.o htc_wifi_nvs.o htc_awb_cal.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-wifi.o board-bravo-audio.o +obj-$(CONFIG_MACH_BRAVOC) += msm_vibrator.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-microp.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-tpa2018d1.o +obj-$(CONFIG_MACH_BRAVOC) += board-bravo-smb329.o + +obj-$(CONFIG_HTC_ACOUSTIC) += htc_acoustic.o +obj-$(CONFIG_HTC_ACOUSTIC_QSD) += htc_acoustic_qsd.o +obj-$(CONFIG_HTC_BATTCHG) += htc_battery.o +obj-$(CONFIG_HTC_PWRSINK) += htc_pwrsink.o +obj-$(CONFIG_HTC_PWRSPLY) += htc_power_supply.o +obj-$(CONFIG_HTC_HEADSET) += htc_headset.o +obj-$(CONFIG_HTC_35MM_JACK) += htc_35mm_jack.o +obj-$(CONFIG_HTC_HEADSET_MGR) += htc_headset_mgr.o +obj-$(CONFIG_HTC_HEADSET_GPIO) += htc_headset_gpio.o +obj-$(CONFIG_HTC_HEADSET_MICROP) += htc_headset_microp.o +obj-$(CONFIG_PERFLOCK) += perflock.o +obj-$(CONFIG_PERFLOCK) += htc_set_perflock.o +obj-$(CONFIG_MICROP_COMMON) += atmega_microp_common.o +obj-$(CONFIG_ARCH_MSM_FLASHLIGHT) += msm_flashlight.o diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot index 24dfbf8c07c47..b9c45d980051b 100644 --- a/arch/arm/mach-msm/Makefile.boot +++ b/arch/arm/mach-msm/Makefile.boot @@ -1,3 +1,58 @@ - zreladdr-y := 0x10008000 -params_phys-y := 0x10000100 -initrd_phys-y := 0x10800000 +# MSM7x01A + zreladdr-$(CONFIG_ARCH_MSM7X01A) := 0x10008000 +params_phys-$(CONFIG_ARCH_MSM7X01A) := 0x10000100 +initrd_phys-$(CONFIG_ARCH_MSM7X01A) := 0x10800000 + +# MSM7x25 + zreladdr-$(CONFIG_ARCH_MSM7X25) := 0x00208000 +params_phys-$(CONFIG_ARCH_MSM7X25) := 0x00200100 +initrd_phys-$(CONFIG_ARCH_MSM7X25) := 0x0A000000 + +# MSM7x27 + zreladdr-$(CONFIG_ARCH_MSM7X27) := 0x00208000 +params_phys-$(CONFIG_ARCH_MSM7X27) := 0x00200100 +initrd_phys-$(CONFIG_ARCH_MSM7X27) := 0x0A000000 + +# MSM7x27A + zreladdr-$(CONFIG_ARCH_MSM7X27A) := 0x00208000 +params_phys-$(CONFIG_ARCH_MSM7X27A) := 0x00200100 + +# MSM7x30 + zreladdr-$(CONFIG_ARCH_MSM7X30) := 0x00208000 +params_phys-$(CONFIG_ARCH_MSM7X30) := 0x00200100 +initrd_phys-$(CONFIG_ARCH_MSM7X30) := 0x01200000 + +ifeq ($(CONFIG_MSM_SOC_REV_A),y) +# QSD8x50A + zreladdr-$(CONFIG_ARCH_QSD8X50) := 0x00008000 +params_phys-$(CONFIG_ARCH_QSD8X50) := 0x00000100 +initrd_phys-$(CONFIG_ARCH_QSD8X50) := 0x04000000 +else +# QSD8x50 + zreladdr-$(CONFIG_ARCH_QSD8X50) := 0x20008000 +params_phys-$(CONFIG_ARCH_QSD8X50) := 0x20000100 +initrd_phys-$(CONFIG_ARCH_QSD8X50) := 0x24000000 +#initrd_phys-$(CONFIG_ARCH_QSD8X50) := 0x21000000 +endif + +# MSM8x60 + zreladdr-$(CONFIG_ARCH_MSM8X60) := 0x40208000 + +# MSM8960 + zreladdr-$(CONFIG_ARCH_MSM8960) := 0x80208000 + +# APQ8064 + zreladdr-$(CONFIG_ARCH_APQ8064) := 0x80208000 + +# MSM9615 + zreladdr-$(CONFIG_ARCH_MSM9615) := 0x40808000 + +# FSM9XXX + zreladdr-$(CONFIG_ARCH_FSM9XXX) := 0x10008000 +params_phys-$(CONFIG_ARCH_FSM9XXX) := 0x10000100 +initrd_phys-$(CONFIG_ARCH_FSM9XXX) := 0x12000000 + +# override for Sapphire + zreladdr-$(CONFIG_MACH_SAPPHIRE) := 0x02008000 +params_phys-$(CONFIG_MACH_SAPPHIRE) := 0x02000100 +initrd_phys-$(CONFIG_MACH_SAPPHIRE) := 0x02800000 diff --git a/arch/arm/mach-msm/acpuclock-7x30.c b/arch/arm/mach-msm/acpuclock-7x30.c new file mode 100644 index 0000000000000..03087e06ac498 --- /dev/null +++ b/arch/arm/mach-msm/acpuclock-7x30.c @@ -0,0 +1,70 @@ +/* + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "acpuclock.h" + +unsigned long acpuclk_power_collapse(void) +{ + return 0; +} + +unsigned long acpuclk_wait_for_irq(void) +{ + return 0; +} + +unsigned long acpuclk_get_wfi_rate(void) +{ + return 0; +} + +int acpuclk_set_rate(unsigned long rate, int for_power_collapse) +{ + return 0; +} + +unsigned long acpuclk_get_rate(void) +{ + return 0; +} + +uint32_t acpuclk_get_switch_time(void) +{ + return 0; +} + +static void __init acpuclk_init(void) +{ +} + +void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) +{ + pr_info("acpu_clock_init()\n"); + acpuclk_init(); +} diff --git a/arch/arm/mach-msm/acpuclock-arm11.c b/arch/arm/mach-msm/acpuclock-arm11.c index 7ffbd987eb5d8..1900b04cf85ea 100644 --- a/arch/arm/mach-msm/acpuclock-arm11.c +++ b/arch/arm/mach-msm/acpuclock-arm11.c @@ -95,6 +95,8 @@ struct clkctl_acpu_speed { short up; }; +static unsigned long max_axi_rate; + /* * ACPU speed table. Complete table is shown but certain speeds are commented * out to optimized speed switching. Initialize loops_per_jiffy to 0. @@ -176,11 +178,12 @@ static int pc_pll_request(unsigned id, unsigned on) * ARM11 'owned' clock control *---------------------------------------------------------------------------*/ -unsigned long acpuclk_power_collapse(void) { +unsigned long acpuclk_power_collapse(int from_idle) { int ret = acpuclk_get_rate(); ret *= 1000; if (ret > drv_state.power_collapse_khz) - acpuclk_set_rate(drv_state.power_collapse_khz, 1); + acpuclk_set_rate(drv_state.power_collapse_khz, + (from_idle ? SETRATE_PC_IDLE : SETRATE_PC)); return ret; } @@ -193,7 +196,7 @@ unsigned long acpuclk_wait_for_irq(void) { int ret = acpuclk_get_rate(); ret *= 1000; if (ret > drv_state.wait_for_irq_khz) - acpuclk_set_rate(drv_state.wait_for_irq_khz, 1); + acpuclk_set_rate(drv_state.wait_for_irq_khz, SETRATE_SWFI); return ret; } @@ -290,7 +293,7 @@ static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s) { } } -int acpuclk_set_rate(unsigned long rate, int for_power_collapse) +int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason) { uint32_t reg_clkctl; struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s; @@ -315,7 +318,7 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) return -EINVAL; /* Choose the highest speed speed at or below 'rate' with same PLL. */ - if (for_power_collapse && tgt_s->a11clk_khz < cur_s->a11clk_khz) { + if (reason != SETRATE_CPUFREQ && tgt_s->a11clk_khz < cur_s->a11clk_khz) { while (tgt_s->pll != ACPU_PLL_TCXO && tgt_s->pll != cur_s->pll) tgt_s--; } @@ -323,7 +326,7 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) if (strt_s->pll != ACPU_PLL_TCXO) plls_enabled |= 1 << strt_s->pll; - if (!for_power_collapse) { + if (reason == SETRATE_CPUFREQ) { mutex_lock(&drv_state.lock); if (strt_s->pll != tgt_s->pll && tgt_s->pll != ACPU_PLL_TCXO) { rc = pc_pll_request(tgt_s->pll, 1); @@ -343,7 +346,7 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) } } - /* Set wait states for CPU inbetween frequency changes */ + /* Set wait states for CPU in/between frequency changes */ reg_clkctl = readl(A11S_CLK_CNTL_ADDR); reg_clkctl |= (100 << 16); /* set WT_ST_CNT */ writel(reg_clkctl, A11S_CLK_CNTL_ADDR); @@ -378,7 +381,7 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) printk(KERN_DEBUG "%s: STEP khz = %u, pll = %d\n", __FUNCTION__, cur_s->a11clk_khz, cur_s->pll); #endif - if (!for_power_collapse&& cur_s->pll != ACPU_PLL_TCXO + if (reason == SETRATE_CPUFREQ && cur_s->pll != ACPU_PLL_TCXO && !(plls_enabled & (1 << cur_s->pll))) { rc = pc_pll_request(cur_s->pll, 1); if (rc < 0) { @@ -397,7 +400,7 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) } /* Nothing else to do for power collapse. */ - if (for_power_collapse) + if (reason != SETRATE_CPUFREQ) return 0; /* Disable PLLs we are not using anymore. */ @@ -428,14 +431,14 @@ int acpuclk_set_rate(unsigned long rate, int for_power_collapse) printk(KERN_DEBUG "%s: ACPU speed change complete\n", __FUNCTION__); #endif out: - if (!for_power_collapse) + if (reason == SETRATE_CPUFREQ) mutex_unlock(&drv_state.lock); return rc; } static void __init acpuclk_init(void) { - struct clkctl_acpu_speed *speed; + struct clkctl_acpu_speed *speed, *max_s; uint32_t div, sel; int rc; @@ -471,9 +474,21 @@ static void __init acpuclk_init(void) if (rc < 0) pr_err("Setting AXI min rate failed!\n"); + for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) + ; + + max_s = speed - 1; + max_axi_rate = max_s->axiclk_khz * 1000; + printk(KERN_INFO "ACPU running at %d KHz\n", speed->a11clk_khz); } +unsigned long acpuclk_get_max_axi_rate(void) +{ + return max_axi_rate; +} +EXPORT_SYMBOL(acpuclk_get_max_axi_rate); + unsigned long acpuclk_get_rate(void) { WARN_ONCE(drv_state.current_speed == NULL, diff --git a/arch/arm/mach-msm/acpuclock-qsd8x50.c b/arch/arm/mach-msm/acpuclock-qsd8x50.c new file mode 100644 index 0000000000000..69acc9021016e --- /dev/null +++ b/arch/arm/mach-msm/acpuclock-qsd8x50.c @@ -0,0 +1,615 @@ +/* + * Copyright (c) 2009 Google, Inc. + * Copyright (c) 2008 QUALCOMM Incorporated. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "acpuclock.h" +#include "avs.h" +#include "proc_comm.h" + +#if 0 +#define DEBUG(x...) pr_info(x) +#else +#define DEBUG(x...) do {} while (0) +#endif + +#define SHOT_SWITCH 4 +#define HOP_SWITCH 5 +#define SIMPLE_SLEW 6 +#define COMPLEX_SLEW 7 + +#define SPSS_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100) +#define SPSS_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104) + +/* Scorpion PLL registers */ +#define SCPLL_CTL_ADDR (MSM_SCPLL_BASE + 0x4) +#define SCPLL_STATUS_ADDR (MSM_SCPLL_BASE + 0x18) +#define SCPLL_FSM_CTL_EXT_ADDR (MSM_SCPLL_BASE + 0x10) + +struct clkctl_acpu_speed { + unsigned acpu_khz; + unsigned clk_cfg; + unsigned clk_sel; + unsigned sc_l_value; + unsigned lpj; + int vdd; + unsigned axiclk_khz; +}; + +static unsigned long max_axi_rate; + +struct regulator { + struct device *dev; + struct list_head list; + int uA_load; + int min_uV; + int max_uV; + char *supply_name; + struct device_attribute dev_attr; + struct regulator_dev *rdev; +}; + +/* clock sources */ +#define CLK_TCXO 0 /* 19.2 MHz */ +#define CLK_GLOBAL_PLL 1 /* 768 MHz */ +#define CLK_MODEM_PLL 4 /* 245 MHz (UMTS) or 235.93 MHz (CDMA) */ + +#define CCTL(src, div) (((src) << 4) | (div - 1)) + +/* core sources */ +#define SRC_RAW 0 /* clock from SPSS_CLK_CNTL */ +#define SRC_SCPLL 1 /* output of scpll 128-1113 MHZ */ +#define SRC_AXI 2 /* 128 MHz */ +#define SRC_PLL1 3 /* 768 MHz */ + +struct clkctl_acpu_speed acpu_freq_tbl[] = { + { 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 975, 14000 }, + { 128000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 975, 14000 }, + { 245000, CCTL(CLK_MODEM_PLL, 1), SRC_RAW, 0, 0, 1025, 29000 }, + /* Work around for acpu resume hung, GPLL is turn off by arm9 */ + /*{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 1000, 29000 },*/ + { 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 1025, 58000 }, + { 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 1050, 117000 }, + { 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 1050, 117000 }, + { 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 1075, 117000 }, + { 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 1075, 117000 }, + { 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 1100, 117000 }, + { 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 1100, 117000 }, + { 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1125, 117000 }, + { 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1150, 117000 }, + { 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1175, 117000 }, + { 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1200, 128000 }, + { 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1225, 128000 }, + { 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1250, 128000 }, + { 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1275, 128000 }, + { 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1275, 128000 }, + { 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1275, 128000 }, + { 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1275, 128000 }, + { 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1275, 128000 }, + { 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1275, 128000 }, + { 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1275, 128000 }, + { 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1325, 128000 }, + { 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 }, + { 0 }, +}; + +/* select the standby clock that is used when switching scpll + * frequencies + * + * Currently: MPLL + */ +struct clkctl_acpu_speed *acpu_stby = &acpu_freq_tbl[2]; +#define IS_ACPU_STANDBY(x) (((x)->clk_cfg == acpu_stby->clk_cfg) && \ + ((x)->clk_sel == acpu_stby->clk_sel)) + +struct clkctl_acpu_speed *acpu_mpll = &acpu_freq_tbl[2]; + +#ifdef CONFIG_CPU_FREQ_TABLE +static struct cpufreq_frequency_table freq_table[ARRAY_SIZE(acpu_freq_tbl)]; + +static void __init acpuclk_init_cpufreq_table(void) +{ + int i; + int vdd; + for (i = 0; acpu_freq_tbl[i].acpu_khz; i++) { + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + + /* Define speeds that we want to skip */ + if (acpu_freq_tbl[i].acpu_khz == 19200 || + acpu_freq_tbl[i].acpu_khz == 128000 || + acpu_freq_tbl[i].acpu_khz == 256000) + continue; + + vdd = acpu_freq_tbl[i].vdd; + /* Allow mpll and the first scpll speeds */ + if (acpu_freq_tbl[i].acpu_khz == acpu_mpll->acpu_khz || + acpu_freq_tbl[i].acpu_khz == 384000) { + freq_table[i].frequency = acpu_freq_tbl[i].acpu_khz; + continue; + } + + /* Add to the table */ + freq_table[i].frequency = acpu_freq_tbl[i].acpu_khz; + } + + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + cpufreq_frequency_table_get_attr(freq_table, smp_processor_id()); +} +#else +#define acpuclk_init_cpufreq_table() do {} while (0); +#endif + +struct clock_state { + struct clkctl_acpu_speed *current_speed; + struct mutex lock; + uint32_t acpu_switch_time_us; + uint32_t max_speed_delta_khz; + uint32_t vdd_switch_time_us; + unsigned long power_collapse_khz; + unsigned long wait_for_irq_khz; + struct clk* clk_ebi1; + struct regulator *regulator; + int (*acpu_set_vdd) (int mvolts); +}; + +static struct clock_state drv_state = { 0 }; + +struct clk *clk_get(struct device *dev, const char *id); +unsigned long clk_get_rate(struct clk *clk); +int clk_set_rate(struct clk *clk, unsigned long rate); + +static DEFINE_SPINLOCK(acpu_lock); + +#define PLLMODE_POWERDOWN 0 +#define PLLMODE_BYPASS 1 +#define PLLMODE_STANDBY 2 +#define PLLMODE_FULL_CAL 4 +#define PLLMODE_HALF_CAL 5 +#define PLLMODE_STEP_CAL 6 +#define PLLMODE_NORMAL 7 +#define PLLMODE_MASK 7 + +static void scpll_power_down(void) +{ + uint32_t val; + + /* Wait for any frequency switches to finish. */ + while (readl(SCPLL_STATUS_ADDR) & 0x1) + ; + + /* put the pll in standby mode */ + val = readl(SCPLL_CTL_ADDR); + val = (val & (~PLLMODE_MASK)) | PLLMODE_STANDBY; + writel(val, SCPLL_CTL_ADDR); + dmb(); + + /* wait to stabilize in standby mode */ + udelay(10); + + val = (val & (~PLLMODE_MASK)) | PLLMODE_POWERDOWN; + writel(val, SCPLL_CTL_ADDR); + dmb(); +} + +static void scpll_set_freq(uint32_t lval) +{ + uint32_t val, ctl; + + if (lval > 33) + lval = 33; + if (lval < 10) + lval = 10; + + /* wait for any calibrations or frequency switches to finish */ + while (readl(SCPLL_STATUS_ADDR) & 0x3) + ; + + ctl = readl(SCPLL_CTL_ADDR); + + if ((ctl & PLLMODE_MASK) != PLLMODE_NORMAL) { + /* put the pll in standby mode */ + writel((ctl & (~PLLMODE_MASK)) | PLLMODE_STANDBY, SCPLL_CTL_ADDR); + dmb(); + + /* wait to stabilize in standby mode */ + udelay(10); + + /* switch to 384 MHz */ + val = readl(SCPLL_FSM_CTL_EXT_ADDR); + val = (val & (~0x1FF)) | (0x0A << 3) | SHOT_SWITCH; + writel(val, SCPLL_FSM_CTL_EXT_ADDR); + dmb(); + + ctl = readl(SCPLL_CTL_ADDR); + writel(ctl | PLLMODE_NORMAL, SCPLL_CTL_ADDR); + dmb(); + + /* wait for frequency switch to finish */ + while (readl(SCPLL_STATUS_ADDR) & 0x1) + ; + + /* completion bit is not reliable for SHOT switch */ + udelay(25); + } + + /* write the new L val and switch mode */ + val = readl(SCPLL_FSM_CTL_EXT_ADDR); + val = (val & (~0x1FF)) | (lval << 3) | HOP_SWITCH; + writel(val, SCPLL_FSM_CTL_EXT_ADDR); + dmb(); + + ctl = readl(SCPLL_CTL_ADDR); + writel(ctl | PLLMODE_NORMAL, SCPLL_CTL_ADDR); + dmb(); + + /* wait for frequency switch to finish */ + while (readl(SCPLL_STATUS_ADDR) & 0x1) + ; +} + +/* this is still a bit weird... */ +static void select_clock(unsigned src, unsigned config) +{ + uint32_t val; + + if (src == SRC_RAW) { + uint32_t sel = readl(SPSS_CLK_SEL_ADDR); + unsigned shift = (sel & 1) ? 8 : 0; + + /* set other clock source to the new configuration */ + val = readl(SPSS_CLK_CNTL_ADDR); + val = (val & (~(0x7F << shift))) | (config << shift); + writel(val, SPSS_CLK_CNTL_ADDR); + + /* switch to other clock source */ + writel(sel ^ 1, SPSS_CLK_SEL_ADDR); + + dmb(); /* necessary? */ + } + + /* switch to new source */ + val = readl(SPSS_CLK_SEL_ADDR) & (~6); + writel(val | ((src & 3) << 1), SPSS_CLK_SEL_ADDR); +} + +static int acpu_set_vdd(int vdd) +{ + int rc = 0; + + if (!drv_state.regulator || IS_ERR(drv_state.regulator)) { + drv_state.regulator = regulator_get(NULL, "acpu_vcore"); + if (IS_ERR(drv_state.regulator)) { + pr_info("acpu_set_vdd %d no regulator\n", vdd); + /* Assume that the PMIC supports scaling the processor + * to its maximum frequency at its default voltage. + */ + return -ENODEV; + } + pr_info("acpu_set_vdd got regulator\n"); + } + + rc = tps65023_set_dcdc1_level(drv_state.regulator->rdev, vdd); + + if (rc == -ENODEV && vdd <= CONFIG_QSD_PMIC_DEFAULT_DCDC1) + return 0; + + return rc; +} + +static int acpuclk_set_vdd_level(int vdd) +{ + if (drv_state.acpu_set_vdd) + return drv_state.acpu_set_vdd(vdd); + else { + /* Assume that the PMIC supports scaling the processor + * to its maximum frequency at its default voltage. + */ + return 0; + } +} + +int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason) +{ + struct clkctl_acpu_speed *cur, *next; + unsigned long flags; + int rc = 0; + int freq_index = 0; + + cur = drv_state.current_speed; + + /* convert to KHz */ + rate /= 1000; + + DEBUG("acpuclk_set_rate(%d,%d)\n", (int) rate, reason); + + if (rate == 0 || rate == cur->acpu_khz) + return 0; + + next = acpu_freq_tbl; + for (;;) { + if (next->acpu_khz == rate) + break; + if (next->acpu_khz == 0) + return -EINVAL; + next++; + freq_index++; + } + + if (reason == SETRATE_CPUFREQ) { + mutex_lock(&drv_state.lock); +#ifdef CONFIG_MSM_CPU_AVS + /* Notify avs before changing frequency */ + rc = avs_adjust_freq(freq_index, 1); + if (rc) { + printk(KERN_ERR + "acpuclock: Unable to increase ACPU " + "vdd: %d.\n", (int) rate); + mutex_unlock(&drv_state.lock); + return rc; + } +#endif + /* Increase VDD if needed. */ + if (next->vdd > cur->vdd) { + rc = acpuclk_set_vdd_level(next->vdd); + if (rc) { + pr_err("acpuclock: Unable to increase ACPU VDD from %d to %d setting rate to %d.\n", cur->vdd, next->vdd, (int) rate); + mutex_unlock(&drv_state.lock); + return rc; + } + } + } + + spin_lock_irqsave(&acpu_lock, flags); + + DEBUG("sel=%d cfg=%02x lv=%02x -> sel=%d, cfg=%02x lv=%02x\n", + cur->clk_sel, cur->clk_cfg, cur->sc_l_value, + next->clk_sel, next->clk_cfg, next->sc_l_value); + + if (next->clk_sel == SRC_SCPLL) { + if (!IS_ACPU_STANDBY(cur)) + select_clock(acpu_stby->clk_sel, acpu_stby->clk_cfg); + loops_per_jiffy = next->lpj; + scpll_set_freq(next->sc_l_value); + select_clock(SRC_SCPLL, 0); + } else { + loops_per_jiffy = next->lpj; + if (cur->clk_sel == SRC_SCPLL) { + select_clock(acpu_stby->clk_sel, acpu_stby->clk_cfg); + select_clock(next->clk_sel, next->clk_cfg); + scpll_power_down(); + } else { + select_clock(next->clk_sel, next->clk_cfg); + } + } + + drv_state.current_speed = next; + + spin_unlock_irqrestore(&acpu_lock, flags); + +#ifndef CONFIG_AXI_SCREEN_POLICY + if (reason == SETRATE_CPUFREQ || reason == SETRATE_PC) { + if (cur->axiclk_khz != next->axiclk_khz) + clk_set_rate(drv_state.clk_ebi1, next->axiclk_khz * 1000); + DEBUG("acpuclk_set_rate switch axi to %d\n", + clk_get_rate(drv_state.clk_ebi1)); + } +#endif + if (reason == SETRATE_CPUFREQ) { +#ifdef CONFIG_MSM_CPU_AVS + /* notify avs after changing frequency */ + rc = avs_adjust_freq(freq_index, 0); + if (rc) + printk(KERN_ERR + "acpuclock: Unable to drop ACPU vdd: %d.\n", (int) rate); +#endif + /* Drop VDD level if we can. */ + if (next->vdd < cur->vdd) { + rc = acpuclk_set_vdd_level(next->vdd); + if (rc) + pr_err("acpuclock: Unable to drop ACPU VDD from%d to %d setting rate to %d.\n", cur->vdd, next->vdd, (int) rate); + } + mutex_unlock(&drv_state.lock); + } + + return 0; +} + +static unsigned __init acpuclk_find_speed(void) +{ + uint32_t sel, val; + + sel = readl(SPSS_CLK_SEL_ADDR); + switch ((sel & 6) >> 1) { + case 1: + val = readl(SCPLL_FSM_CTL_EXT_ADDR); + val = (val >> 3) & 0x3f; + return val * 38400; + case 2: + return 128000; + default: + pr_err("acpu_find_speed: failed\n"); + BUG(); + return 0; + } +} + +#define PCOM_MODEM_PLL 0 +static int pll_request(unsigned id, unsigned on) +{ + on = !!on; + return msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on); +} + +static void __init acpuclk_init(void) +{ + struct clkctl_acpu_speed *speed, *max_s; + unsigned init_khz; + + init_khz = acpuclk_find_speed(); + + /* request the modem pll, and then drop it. We don't want to keep a + * ref to it, but we do want to make sure that it is initialized at + * this point. The ARM9 will ensure that the MPLL is always on + * once it is fully booted, but it may not be up by the time we get + * to here. So, our pll_request for it will block until the mpll is + * actually up. We want it up because we will want to use it as a + * temporary step during frequency scaling. */ + pll_request(PCOM_MODEM_PLL, 1); + pll_request(PCOM_MODEM_PLL, 0); + + if (!(readl(MSM_CLK_CTL_BASE + 0x300) & 1)) { + pr_err("%s: MPLL IS NOT ON!!! RUN AWAY!!\n", __func__); + BUG(); + } + + /* Move to 768MHz for boot, which is a safe frequency + * for all versions of Scorpion at the moment. + */ + speed = acpu_freq_tbl; + for (;;) { + if (speed->acpu_khz == 806400) + break; + if (speed->acpu_khz == 0) { + pr_err("acpuclk_init: cannot find 806MHz\n"); + BUG(); + } + speed++; + } + + if (init_khz != speed->acpu_khz) { + /* Bootloader needs to have SCPLL operating, but we're + * going to step over to the standby clock and make sure + * we select the right frequency on SCPLL and then + * step back to it, to make sure we're sane here. + */ + select_clock(acpu_stby->clk_sel, acpu_stby->clk_cfg); + scpll_power_down(); + scpll_set_freq(speed->sc_l_value); + select_clock(SRC_SCPLL, 0); + } + drv_state.current_speed = speed; + + for (speed = acpu_freq_tbl; speed->acpu_khz; speed++) + speed->lpj = cpufreq_scale(loops_per_jiffy, + init_khz, speed->acpu_khz); + + loops_per_jiffy = drv_state.current_speed->lpj; + + for (speed = acpu_freq_tbl; speed->acpu_khz != 0; speed++) + ; + + max_s = speed - 1; + max_axi_rate = max_s->axiclk_khz * 1000; +} + +unsigned long acpuclk_get_max_axi_rate(void) +{ + return max_axi_rate; +} +EXPORT_SYMBOL(acpuclk_get_max_axi_rate); + +unsigned long acpuclk_get_rate(void) +{ + return drv_state.current_speed->acpu_khz; +} + +uint32_t acpuclk_get_switch_time(void) +{ + return drv_state.acpu_switch_time_us; +} + +unsigned long acpuclk_power_collapse(int from_idle) +{ + int ret = acpuclk_get_rate(); + enum setrate_reason reason = (from_idle) ? SETRATE_PC_IDLE : SETRATE_PC; + if (ret > drv_state.power_collapse_khz) + acpuclk_set_rate(drv_state.power_collapse_khz * 1000, reason); + return ret * 1000; +} + +unsigned long acpuclk_get_wfi_rate(void) +{ + return drv_state.wait_for_irq_khz * 1000; +} + +unsigned long acpuclk_wait_for_irq(void) +{ + int ret = acpuclk_get_rate(); + if (ret > drv_state.wait_for_irq_khz) + acpuclk_set_rate(drv_state.wait_for_irq_khz * 1000, 1); + return ret * 1000; +} + +#ifdef CONFIG_MSM_CPU_AVS +static int __init acpu_avs_init(int (*set_vdd) (int), int khz) +{ + int i; + int freq_count = 0; + int freq_index = -1; + + for (i = 0; acpu_freq_tbl[i].acpu_khz; i++) { + freq_count++; + if (acpu_freq_tbl[i].acpu_khz == khz) + freq_index = i; + } + + return avs_init(set_vdd, freq_count, freq_index); +} +#endif + +void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) +{ + spin_lock_init(&acpu_lock); + mutex_init(&drv_state.lock); + + drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us; + drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz; + drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us; + drv_state.power_collapse_khz = clkdata->power_collapse_khz; + drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz; + drv_state.acpu_set_vdd = acpu_set_vdd; + + if (clkdata->mpll_khz) + acpu_mpll->acpu_khz = clkdata->mpll_khz; + + acpuclk_init(); + acpuclk_init_cpufreq_table(); + drv_state.clk_ebi1 = clk_get(NULL,"ebi1_clk"); +#ifndef CONFIG_AXI_SCREEN_POLICY + clk_set_rate(drv_state.clk_ebi1, drv_state.current_speed->axiclk_khz * 1000); +#endif +#ifdef CONFIG_MSM_CPU_AVS + if (!acpu_avs_init(drv_state.acpu_set_vdd, + drv_state.current_speed->acpu_khz)) { + /* avs init successful. avs will handle voltage changes */ + drv_state.acpu_set_vdd = NULL; + } +#endif +} diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h index 415de2eb9a5ee..921b6c5a08bc5 100644 --- a/arch/arm/mach-msm/acpuclock.h +++ b/arch/arm/mach-msm/acpuclock.h @@ -20,11 +20,18 @@ #ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_H #define __ARCH_ARM_MACH_MSM_ACPUCLOCK_H -int acpuclk_set_rate(unsigned long rate, int for_power_collapse); +enum setrate_reason { + SETRATE_CPUFREQ = 0, + SETRATE_SWFI, + SETRATE_PC, + SETRATE_PC_IDLE, +}; + +int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason); +unsigned long acpuclk_power_collapse(int from_idle); unsigned long acpuclk_get_rate(void); uint32_t acpuclk_get_switch_time(void); unsigned long acpuclk_wait_for_irq(void); -unsigned long acpuclk_power_collapse(void); unsigned long acpuclk_get_wfi_rate(void); diff --git a/arch/arm/mach-msm/arch-init-scorpion.S b/arch/arm/mach-msm/arch-init-scorpion.S new file mode 100644 index 0000000000000..2eaf4eafb4480 --- /dev/null +++ b/arch/arm/mach-msm/arch-init-scorpion.S @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2008, QUALCOMM Incorporated. + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * Copyright (c) 2008-2009, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google, Inc. nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +/* TODO: + * - style cleanup + * - do we need to do *all* of this at boot? + */ + +.text +.code 32 + +#define DSB .byte 0x4f, 0xf0, 0x7f, 0xf5 +#define ISB .byte 0x6f, 0xf0, 0x7f, 0xf5 + +.equ TCSR_SPARE2, 0xA8700060 + +SET_SA: + ldr r0, =TCSR_SPARE2 + ldr r12, [r0] + + /* pack bits 8,2,0 into 2,1,0 */ + and r0, r12, #0x001 + and r1, r12, #0x004 + and r2, r12, #0x100 + orr r0, r1, lsr #1 + orr r0, r2, lsr #6 + + adr r1, table_l1_acc + mov r0, r0, lsl #2 + ldr r3, [r1, r0] + + /* write 3800XXXX to PVR0F0 */ + orr r0, r3, #0x38000000 + mcr p15, 0, r0, c15, c15, 0 + + /* write XXXX0000 to PVR2F0 */ + mov r1, r3, lsl #16 + mcr p15, 2, r1, c15, c15, 0 + + adr r1, table_l2_acc + and r0, r12, #0x008 + and r2, r12, #0x002 + orr r0, r0, r2, lsl #1 + ldr r2, [r1, r0] + + /* write to L2VR3F1 */ + mcr p15, 3, r2, c15, c15, 1 + + bx lr + +table_l1_acc: + .word 0xFC00 + .word 0xFC00 + .word 0x7C00 + .word 0xFC00 + .word 0x3C00 + .word 0x0400 + .word 0x0C00 + .word 0x1C00 + +table_l2_acc: + .word 0x010102 + .word 0x010102 + .word 0x010101 + .word 0x212102 + +.globl __cpu_early_init +__cpu_early_init: + //; Zero out r0 for use throughout this code. All other GPRs + //; (r1-r3) are set throughout this code to help establish + //; a consistent startup state for any code that follows. + //; Users should add code at the end of this routine to establish + //; their own stack address (r13), add translation page tables, enable + //; the caches, etc. + MOV r0, #0x0 + + + //; Remove hardcoded cache settings. appsbl_handler.s calls Set_SA + //; API to dynamically configure cache for slow/nominal/fast parts + + //; DCIALL to invalidate L2 cache bank (needs to be run 4 times, once per bank) + //; This must be done early in code (prior to enabling the caches) + MOV r1, #0x2 + MCR p15, 0, r1, c9, c0, 6 //; DCIALL bank D ([15:14] == 2'b00) + ORR r1, r1, #0x00004000 + MCR p15, 0, r1, c9, c0, 6 //; DCIALL bank C ([15:14] == 2'b01) + ADD r1, r1, #0x00004000 + MCR p15, 0, r1, c9, c0, 6 //; DCIALL bank B ([15:14] == 2'b10) + ADD r1, r1, #0x00004000 + MCR p15, 0, r1, c9, c0, 6 //; DCIALL bank A ([15:14] == 2'b11) + + //; Initialize the BPCR - setup Global History Mask (GHRM) to all 1's + //; and have all address bits (AM) participate. + //; Different settings can be used to improve performance + // MOVW r1, #0x01FF +.word 0xe30011ff // hardcoded MOVW instruction due to lack of compiler support + // MOVT r1, #0x01FF +.word 0xe34011ff // hardcoded MOVT instruction due to lack of compiler support + MCR p15, 7, r1, c15, c0, 2 //; WCP15_BPCR + + + //; Initialize all I$ Victim Registers to 0 for startup + MCR p15, 0, r0, c9, c1, 0 //; WCP15_ICVIC0 r0 + MCR p15, 0, r0, c9, c1, 1 //; WCP15_ICVIC1 r0 + MCR p15, 0, r0, c9, c1, 2 //; WCP15_ICVIC2 r0 + MCR p15, 0, r0, c9, c1, 3 //; WCP15_ICVIC3 r0 + MCR p15, 0, r0, c9, c1, 4 //; WCP15_ICVIC4 r0 + MCR p15, 0, r0, c9, c1, 5 //; WCP15_ICVIC5 r0 + MCR p15, 0, r0, c9, c1, 6 //; WCP15_ICVIC5 r0 + MCR p15, 0, r0, c9, c1, 7 //; WCP15_ICVIC7 r0 + + //; Initialize all I$ Locked Victim Registers (Unlocked Floors) to 0 + MCR p15, 1, r0, c9, c1, 0 //; WCP15_ICFLOOR0 r0 + MCR p15, 1, r0, c9, c1, 1 //; WCP15_ICFLOOR1 r0 + MCR p15, 1, r0, c9, c1, 2 //; WCP15_ICFLOOR2 r0 + MCR p15, 1, r0, c9, c1, 3 //; WCP15_ICFLOOR3 r0 + MCR p15, 1, r0, c9, c1, 4 //; WCP15_ICFLOOR4 r0 + MCR p15, 1, r0, c9, c1, 5 //; WCP15_ICFLOOR5 r0 + MCR p15, 1, r0, c9, c1, 6 //; WCP15_ICFLOOR6 r0 + MCR p15, 1, r0, c9, c1, 7 //; WCP15_ICFLOOR7 r0 + + //; Initialize all D$ Victim Registers to 0 + MCR p15, 2, r0, c9, c1, 0 //; WP15_DCVIC0 r0 + MCR p15, 2, r0, c9, c1, 1 //; WP15_DCVIC1 r0 + MCR p15, 2, r0, c9, c1, 2 //; WP15_DCVIC2 r0 + MCR p15, 2, r0, c9, c1, 3 //; WP15_DCVIC3 r0 + MCR p15, 2, r0, c9, c1, 4 //; WP15_DCVIC4 r0 + MCR p15, 2, r0, c9, c1, 5 //; WP15_DCVIC5 r0 + MCR p15, 2, r0, c9, c1, 6 //; WP15_DCVIC6 r0 + MCR p15, 2, r0, c9, c1, 7 //; WP15_DCVIC7 r0 + + //; Initialize all D$ Locked VDCtim Registers (Unlocked Floors) to 0 + MCR p15, 3, r0, c9, c1, 0 //; WCP15_DCFLOOR0 r0 + MCR p15, 3, r0, c9, c1, 1 //; WCP15_DCFLOOR1 r0 + MCR p15, 3, r0, c9, c1, 2 //; WCP15_DCFLOOR2 r0 + MCR p15, 3, r0, c9, c1, 3 //; WCP15_DCFLOOR3 r0 + MCR p15, 3, r0, c9, c1, 4 //; WCP15_DCFLOOR4 r0 + MCR p15, 3, r0, c9, c1, 5 //; WCP15_DCFLOOR5 r0 + MCR p15, 3, r0, c9, c1, 6 //; WCP15_DCFLOOR6 r0 + MCR p15, 3, r0, c9, c1, 7 //; WCP15_DCFLOOR7 r0 + + //; Initialize ASID to zero + MCR p15, 0, r0, c13, c0, 1 //; WCP15_CONTEXTIDR r0 + + //; ICIALL to invalidate entire I-Cache + MCR p15, 0, r0, c7, c5, 0 //; ICIALLU + + //; DCIALL to invalidate entire D-Cache + MCR p15, 0, r0, c9, c0, 6 //; DCIALL r0 + + + //; The VBAR (Vector Base Address Register) should be initialized + //; early in your code. We are setting it to zero + MCR p15, 0, r0, c12, c0, 0 //; WCP15_VBAR r0 + + //; Ensure the MCR's above have completed their operation before continuing + DSB + ISB + + //;------------------------------------------------------------------- + //; There are a number of registers that must be set prior to enabling + //; the MMU. The DCAR is one of these registers. We are setting + //; it to zero (no access) to easily detect improper setup in subsequent + //; code sequences + //;------------------------------------------------------------------- + //; Setup DACR (Domain Access Control Register) to zero + MCR p15, 0, r0, c3, c0, 0 //; WCP15_DACR r0 + + //; Setup DCLKCR to allow normal D-Cache line fills + MCR p15, 1, r0, c9, c0, 7 //; WCP15_DCLKCR r0 + + //; Initialize the ADFSR and EFSR registers. + MCR p15, 0, r0, c5, c1, 0 //; ADFSR + MCR p15, 7, r0, c15, c0, 1 //; EFSR + + //; Setup the TLBLKCR + //; Victim = 6'b000000; Floor = 6'b000000; + //; IASIDCFG = 2'b00 (State-Machine); IALLCFG = 2'b01 (Flash); BNA = 1'b0; + MOV r1, #0x02 + MCR p15, 0, r1, c10, c1, 3 //; WCP15_TLBLKCR r1 + + //;Make sure TLBLKCR is complete before continuing + ISB + + //; Invalidate the UTLB + MCR p15, 0, r0, c8, c7, 0 //; UTLBIALL + + //; Make sure UTLB request has been presented to macro before continuing + ISB + + //; setup L2CR1 to some default Instruction and data prefetching values + //; Users may want specific settings for various performance enhancements + //; In Halcyon we do not have broadcasting barriers. So we need to turn + // ; on bit 8 of L2CR1; which DBB:( Disable barrier broadcast ) + MOV r2, #0x100 + MCR p15, 3, r2, c15, c0, 3 //; WCP15_L2CR1 r0 + + + //; Enable Z bit to enable branch prediction (default is off) + MRC p15, 0, r2, c1, c0, 0 //; RCP15_SCTLR r2 + ORR r2, r2, #0x00000800 + MCR p15, 0, r2, c1, c0, 0 //; WCP15_SCTLR r2 + +#ifdef CONFIG_ARCH_QSD8X50 + /* disable predecode repair cache for thumb2 (DPRC, set bit 4 in PVR0F2) */ + mrc p15, 0, r2, c15, c15, 2 + orr r2, r2, #0x10 + mcr p15, 0, r2, c15, c15, 2 +#endif + + mov r1, lr + //; Make sure Link stack is initialized with branch and links to sequential addresses + //; This aids in creating a predictable startup environment + BL SEQ1 +SEQ1: BL SEQ2 +SEQ2: BL SEQ3 +SEQ3: BL SEQ4 +SEQ4: BL SEQ5 +SEQ5: BL SEQ6 +SEQ6: BL SEQ7 +SEQ7: BL SEQ8 +SEQ8: + mov lr, r1 + + //; REMOVE FOLLOWING THREE INSTRUCTIONS WHEN POWER COLLAPSE IS ENA + //;Make sure the DBGOSLSR[LOCK] bit is cleared to allow access to the debug registers + //; Writing anything but the "secret code" to the DBGOSLAR clears the DBGOSLSR[LOCK] bit + MCR p14, 0, r0, c1, c0, 4 //; WCP14_DBGOSLAR r0 + + + //; Read the DBGPRSR to clear the DBGPRSR[STICKYPD] + //; Any read to DBGPRSR clear the STICKYPD bit + //; ISB guarantees the read completes before attempting to + //; execute a CP14 instruction. + MRC p14, 0, r3, c1, c5, 4 //; RCP14_DBGPRSR r3 + ISB + + //; Initialize the Watchpoint Control Registers to zero (optional) + //;;; MCR p14, 0, r0, c0, c0, 7 ; WCP14_DBGWCR0 r0 + //;;; MCR p14, 0, r0, c0, c1, 7 ; WCP14_DBGWCR1 r0 + + + //;---------------------------------------------------------------------- + //; The saved Program Status Registers (SPSRs) should be setup + //; prior to any automatic mode switches. The following + //; code sets these registers up to a known state. Users will need to + //; customize these settings to meet their needs. + //;---------------------------------------------------------------------- + MOV r2, #0x1f + MOV r1, #0x17 //;ABT mode + msr cpsr_c, r1 //;ABT mode + msr spsr_cxfs, r2 //;clear the spsr + MOV r1, #0x1b //;UND mode + msr cpsr_c, r1 //;UND mode + msr spsr_cxfs, r2 //;clear the spsr + MOV r1, #0x11 //;FIQ mode + msr cpsr_c, r1 //;FIQ mode + msr spsr_cxfs, r2 //;clear the spsr + MOV r1, #0x12 //;IRQ mode + msr cpsr_c, r1 //;IRQ mode + msr spsr_cxfs, r2 //;clear the spsr + MOV r1, #0x16 //;Monitor mode + msr cpsr_c, r1 //;Monitor mode + msr spsr_cxfs, r2 //;clear the spsr + MOV r1, #0x13 //;SVC mode + msr cpsr_c, r1 //;SVC mode + msr spsr_cxfs, r2 //;clear the spsr + + + //;---------------------------------------------------------------------- + //; Enabling Error reporting is something users may want to do at + //; some other point in time. We have chosen some default settings + //; that should be reviewed. Most of these registers come up in an + //; unpredictable state after reset. + //;---------------------------------------------------------------------- +//;Start of error and control setting + + //; setup L2CR0 with various L2/TCM control settings + //; enable out of order bus attributes and error reporting + //; this register comes up unpredictable after reset + // MOVW r1, #0x0F0F +.word 0xe3001f0f // hardcoded MOVW instruction due to lack of compiler support + // MOVT r1, #0xC005 +.word 0xe34c1005 // hardcoded MOVW instruction due to lack of compiler support + MCR p15, 3, r1, c15, c0, 1 //; WCP15_L2CR0 r1 + + //; setup L2CPUCR + //; MOV r2, #0xFF + //; Enable I and D cache parity + //;L2CPUCR[7:5] = 3~Rh7 ~V enable parity error reporting for modified, + //;tag, and data parity errors + MOV r2, #0xe0 + MCR p15, 3, r2, c15, c0, 2 //; WCP15_L2CPUCR r2 + + //; setup SPCR + //; enable all error reporting (reset value is unpredicatble for most bits) + MOV r3, #0x0F + MCR p15, 0, r3, c9, c7, 0 //; WCP15_SPCR r3 + + //; setup DMACHCRs (reset value unpredictable) + //; control setting and enable all error reporting + MOV r1, #0x0F + + //; DMACHCR0 = 0000000F + MOV r2, #0x00 //; channel 0 + MCR p15, 0, r2, c11, c0, 0 //; WCP15_DMASELR r2 + MCR p15, 0, r1, c11, c0, 2 //; WCP15_DMACHCR r1 + + //; DMACHCR1 = 0000000F + MOV r2, #0x01 //; channel 1 + MCR p15, 0, r2, c11, c0, 0 //; WCP15_DMASELR r2 + MCR p15, 0, r1, c11, c0, 2 //; WCP15_DMACHCR r1 + + //; DMACHCR2 = 0000000F + MOV r2, #0x02 //; channel 2 + MCR p15, 0, r2, c11, c0, 0 //; WCP15_DMASELR r2 + MCR p15, 0, r1, c11, c0, 2 //; WCP15_DMACHCR r1 + + //; DMACHCR3 = 0000000F + MOV r2, #0x03 //; channel 3 + MCR p15, 0, r2, c11, c0, 0 //; WCP15_DMASELR r2 + MCR p15, 0, r1, c11, c0, 2 //; WCP15_DMACHCR r1 + + //; Set ACTLR (reset unpredictable) + //; Set AVIVT control, error reporting, etc. + //; MOV r3, #0x07 + //; Enable I and D cache parity + //;ACTLR[2:0] = 3'h7 - enable parity error reporting from L2/I$/D$) + //;ACTLR[5:4] = 2'h3 - enable parity + //;ACTLR[19:18] =2'h3 - always generate and check parity(when MMU disabled). + //;Value to be written #0xC0037 + // MOVW r3, #0x0037 +.word 0xe3003037 // hardcoded MOVW instruction due to lack of compiler support + // MOVT r3, #0x000C +.word 0xe340300c // hardcoded MOVW instruction due to lack of compiler support + //; read the version_id to determine if d-cache should be disabled + LDR r2, = 0xa8e00270 //;Read HW_REVISION_NUMBER, HWIO_HW_REVISION_NUMBER_ADDR + LDR r2,[r2] + AND r2,r2,#0xf0000000 //;hw_revision mask off bits 28-31 + //;if HW_revision is 1.0 or older, (revision==0) + CMP r2,#0 + //; Disable d-cache on older QSD8650 (Rev 1.0) silicon + orreq r3, r3, #0x4000 //;disable dcache + MCR p15, 0, r3, c1, c0, 1 //; WCP15_ACTLR r3 + +//;End of error and control setting + + //;---------------------------------------------------------------------- + //; Unlock ETM and read StickyPD to halt the ETM clocks from running. + //; This is required for power saving whether the ETM is used or not. + //;---------------------------------------------------------------------- + + //;Clear ETMOSLSR[LOCK] bit + MOV r1, #0x00000000 + MCR p14, 1, r1, c1, c0, 4 //; WCP14_ETMOSLAR r1 + + //;Clear ETMPDSR[STICKYPD] bit + MRC p14, 1, r2, c1, c5, 4 //; RCP14_ETMPDSR r2 + +/* +#ifdef APPSBL_ETM_ENABLE + ;---------------------------------------------------------------------- + ; Optionally Enable the ETM (Embedded Trace Macro) which is used for debug + ;---------------------------------------------------------------------- + + ; enable ETM clock if disabled + MRC p15, 7, r1, c15, c0, 5 ; RCP15_CPMR r1 + ORR r1, r1, #0x00000008 + MCR p15, 7, r1, c15, c0, 5 ; WCP15_CPMR r1 + ISB + + ; set trigger event to counter1 being zero + MOV r3, #0x00000040 + MCR p14, 1, r3, c0, c2, 0 ; WCP14_ETMTRIGGER r3 + + ; clear ETMSR + MOV r2, #0x00000000 + MCR p14, 1, r2, c0, c4, 0 ; WCP14_ETMSR r2 + + ; clear trace enable single address comparator usage + MCR p14, 1, r2, c0, c7, 0 ; WCP14_ETMTECR2 r2 + + ; set trace enable to always + MOV r2, #0x0000006F + MCR p14, 1, r2, c0, c8, 0 ; WCP14_ETMTEEVR r2 + + ; clear trace enable address range comparator usage and exclude nothing + MOV r2, #0x01000000 + MCR p14, 1, r2, c0, c9, 0 ; WCP14_ETMTECR1 r2 + + ; set view data to always + MOV r2, #0x0000006F + MCR p14, 1, r2, c0, c12, 0 ; WCP14_ETMVDEVR r2 + + ; clear view data single address comparator usage + MOV r2, #0x00000000 + MCR p14, 1, r2, c0, c13, 0 ; WCP14_ETMVDCR1 r2 + + ; clear view data address range comparator usage and exclude nothing + MOV r2, #0x00010000 + MCR p14, 1, r2, c0, c15, 0 ; WCP14_ETMVDCR3 r2 + + ; set counter1 to 194 + MOV r2, #0x000000C2 + MCR p14, 1, r2, c0, c0, 5 ; WCP14_ETMCNTRLDVR1 r2 + + ; set counter1 to never reload + MOV r2, #0x0000406F + MCR p14, 1, r2, c0, c8, 5 ; WCP14_ETMCNTRLDEVR1 r2 + + ; set counter1 to decrement every cycle + MOV r2, #0x0000006F + MCR p14, 1, r2, c0, c4, 5 ; WCP14_ETMCNTENR1 r2 + + ; Set trace synchronization frequency 1024 bytes + MOV r2, #0x00000400 + MCR p14, 1, r2, c0, c8, 7 ; WCP14_ETMSYNCFR r2 + + ; Program etm control register + ; - Set the CPU to ETM clock ratio to 1:1 + ; - Set the ETM to perform data address tracing + MOV r2, #0x00002008 + MCR p14, 1, r2, c0, c0, 0 ; WCP14_ETMCR r2 + ISB +#endif *//* APPSBL_ETM_ENABLE */ + +/* +#ifdef APPSBL_VFP_ENABLE + ;---------------------------------------------------------------------- + ; Perform the following operations if you intend to make use of + ; the VFP/Neon unit. Note that the FMXR instruction requires a CPU ID + ; indicating the VFP unit is present (i.e.Cortex-A8). . + ; Some tools will require full double precision floating point support + ; which will become available in Scorpion pass 2 + ;---------------------------------------------------------------------- + ; allow full access to CP 10 and 11 space for VFP/NEON use + MRC p15, 0, r1, c1, c0, 2 ; Read CP Access Control Register + ORR r1, r1, #0x00F00000 ; enable full access for p10,11 + MCR p15, 0, r1, c1, c0, 2 ; Write CPACR + + ;make sure the CPACR is complete before continuing + ISB + + ; Enable VFP itself (certain OSes may want to dynamically set/clear + ; the enable bit based on the application being executed + MOV r1, #0x40000000 + FMXR FPEXC, r1 +#endif *//* APPSBL_VFP_ENABLE */ + + /* we have no stack, so just tail-call into the SET_SA routine... */ + b SET_SA + +.ltorg diff --git a/arch/arm/mach-msm/atmega_microp_common.c b/arch/arm/mach-msm/atmega_microp_common.c new file mode 100644 index 0000000000000..21b50ea9759cf --- /dev/null +++ b/arch/arm/mach-msm/atmega_microp_common.c @@ -0,0 +1,834 @@ +/* arch/arm/mach-msm/atmega_microp_common.c + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "proc_comm.h" + + +#define I2C_READ_RETRY_TIMES 10 +#define I2C_WRITE_RETRY_TIMES 10 +#define MICROP_I2C_WRITE_BLOCK_SIZE 80 + +static struct i2c_client *private_microp_client; +static struct microp_ops *board_ops; + +static int microp_rw_delay; + +static char *hex2string(uint8_t *data, int len) +{ + static char buf[MICROP_I2C_WRITE_BLOCK_SIZE*4]; + int i; + + i = (sizeof(buf) - 1) / 4; + if (len > i) + len = i; + + for (i = 0; i < len; i++) + sprintf(buf + i * 4, "[%02X]", data[i]); + + return buf; +} + +static int i2c_read_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + struct microp_i2c_client_data *cdata; + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = data, + } + }; + + cdata = i2c_get_clientdata(client); + mutex_lock(&cdata->microp_i2c_rw_mutex); + msleep(1); + for (retry = 0; retry <= I2C_READ_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msgs, 2) == 2) + break; + msleep(microp_rw_delay); + } + mutex_unlock(&cdata->microp_i2c_rw_mutex); + dev_dbg(&client->dev, "R [%02X] = %s\n", + addr, hex2string(data, length)); + + if (retry > I2C_READ_RETRY_TIMES) { + dev_err(&client->dev, "i2c_read_block retry over %d\n", + I2C_READ_RETRY_TIMES); + return -EIO; + } + + return 0; +} + +static int i2c_write_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + uint8_t buf[MICROP_I2C_WRITE_BLOCK_SIZE]; + int i; + struct microp_i2c_client_data *cdata; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length + 1, + .buf = buf, + } + }; + + dev_dbg(&client->dev, "W [%02X] = %s\n", + addr, hex2string(data, length)); + + cdata = i2c_get_clientdata(client); + if (length + 1 > MICROP_I2C_WRITE_BLOCK_SIZE) { + dev_err(&client->dev, "i2c_write_block length too long\n"); + return -E2BIG; + } + + buf[0] = addr; + for (i = 0; i < length; i++) + buf[i+1] = data[i]; + + mutex_lock(&cdata->microp_i2c_rw_mutex); + msleep(1); + for (retry = 0; retry <= I2C_WRITE_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msg, 1) == 1) + break; + msleep(microp_rw_delay); + } + if (retry > I2C_WRITE_RETRY_TIMES) { + dev_err(&client->dev, "i2c_write_block retry over %d\n", + I2C_WRITE_RETRY_TIMES); + mutex_unlock(&cdata->microp_i2c_rw_mutex); + return -EIO; + } + if (addr == MICROP_I2C_WCMD_LCM_BURST_EN) + udelay(500);/*1.5ms for microp SPI write */ + mutex_unlock(&cdata->microp_i2c_rw_mutex); + + return 0; +} + +int microp_i2c_read(uint8_t addr, uint8_t *data, int length) +{ + struct i2c_client *client = private_microp_client; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + + if (i2c_read_block(client, addr, data, length) < 0) { + dev_err(&client->dev, "%s: write microp i2c fail\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(microp_i2c_read); + +int microp_i2c_write(uint8_t addr, uint8_t *data, int length) +{ + struct i2c_client *client = private_microp_client; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + + if (i2c_write_block(client, addr, data, length) < 0) { + dev_err(&client->dev, "%s: write microp i2c fail\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(microp_i2c_write); + +void microp_mobeam_enable(int enable) +{ + if (enable) + microp_rw_delay = 500; + else + microp_rw_delay = 5; +} +EXPORT_SYMBOL(microp_mobeam_enable); + +void microp_register_ops(struct microp_ops *ops) +{ + board_ops = ops; +} + +int microp_function_check(struct i2c_client *client, uint8_t category) +{ + struct microp_i2c_platform_data *pdata; + int i, ret = -1; + + pdata = client->dev.platform_data; + + for (i = 0; i < pdata->num_functions; i++) { + if (pdata->microp_function[i].category == category) { + ret = i; + break; + } + } + if (ret < 0) + pr_err("%s: No function %d !!\n", __func__, category); + + return ret; +} + +int microp_write_interrupt(struct i2c_client *client, + uint16_t interrupt, uint8_t enable) +{ + uint8_t data[2], addr; + int ret = -1; + + if (enable) + addr = MICROP_I2C_WCMD_GPI_INT_CTL_EN; + else + addr = MICROP_I2C_WCMD_GPI_INT_CTL_DIS; + + data[0] = interrupt >> 8; + data[1] = interrupt & 0xFF; + ret = i2c_write_block(client, addr, data, 2); + + if (ret < 0) + dev_err(&client->dev, "%s: %s 0x%x interrupt failed\n", + __func__, (enable ? "enable" : "disable"), interrupt); + return ret; +} + +int microp_read_adc(uint8_t *data) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int ret = 0; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + mutex_lock(&cdata->microp_adc_mutex); + if (i2c_write_block(client, MICROP_I2C_WCMD_READ_ADC_VALUE_REQ, + data, 2) < 0) { + dev_err(&client->dev, "%s: request adc fail\n", __func__); + ret = -EIO; + goto exit; + } + memset(data, 0x00, sizeof(data)); + if (i2c_read_block(client, MICROP_I2C_RCMD_ADC_VALUE, data, 2) < 0) { + dev_err(&client->dev, "%s: read adc fail\n", __func__); + ret = -EIO; + goto exit; + } +exit: + mutex_unlock(&cdata->microp_adc_mutex); + return ret; +} + +EXPORT_SYMBOL(microp_read_adc); + +int microp_read_gpio_status(uint8_t *data) +{ + struct i2c_client *client; + struct microp_i2c_platform_data *pdata; + int length; + + client = private_microp_client; + pdata = client->dev.platform_data; + + if (pdata->cmd_diff & CMD_83_DIFF) + length = 2; + else + length = 3; + memset(data, 0x00, sizeof(data)); + if (i2c_read_block(client, MICROP_I2C_RCMD_GPIO_STATUS, + data, length) < 0) { + dev_err(&client->dev, "%s: read gpio status fail\n", __func__); + return -EIO; + } + return 0; +} + +static void microp_pm_power_off(struct i2c_client *client) +{ + return; +} + +static void microp_reset_system(void) +{ + return; +} + +static int microp_oj_intr_enable(struct i2c_client *client, uint8_t enable) +{ + struct microp_i2c_client_data *cdata; + + cdata = i2c_get_clientdata(client); + enable = enable ? 1 : 0; + return microp_write_interrupt(client, + cdata->int_pin.int_oj, enable); +} + +static int microp_spi_enable(struct i2c_client *client, uint8_t enable) +{ + uint8_t data; + int ret = 0; + + data = enable ? 1 : 0; + ret = i2c_write_block(client, MICROP_I2C_WCMD_SPI_EN, &data, 1); + if (ret != 0) + printk(KERN_ERR "%s: set SPI %s fail\n", __func__, + (enable ? "enable" : "disable")); + + return ret; +} + +int microp_spi_vote_enable(int spi_device, uint8_t enable) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + struct microp_i2c_platform_data *pdata; + uint8_t data[2] = {0, 0}; + int ret = 0; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + cdata = i2c_get_clientdata(client); + pdata = client->dev.platform_data; + + if (spi_device == SPI_OJ) + microp_oj_intr_enable(client, enable); + + mutex_lock(&cdata->microp_adc_mutex); + if (enable) + cdata->spi_devices_vote |= spi_device; + else + cdata->spi_devices_vote &= ~spi_device; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_SPI_BL_STATUS, data, 2); + if (ret != 0) { + printk(KERN_ERR "%s: read SPI/BL status fail\n", __func__); + goto exit; + } + + if ((data[1] & 0x01) == + ((pdata->spi_devices & cdata->spi_devices_vote) ? 1 : 0)) + goto exit; + + if (pdata->spi_devices & cdata->spi_devices_vote) + enable = 1; + else + enable = 0; + mutex_unlock(&cdata->microp_adc_mutex); + + ret = microp_spi_enable(client, enable); + return ret; + +exit: + mutex_unlock(&cdata->microp_adc_mutex); + return ret; + +} + +EXPORT_SYMBOL(microp_spi_vote_enable); + +static void microp_reset_microp(struct i2c_client *client) +{ + struct microp_i2c_platform_data *pdata; + + pdata = client->dev.platform_data; + + gpio_set_value(pdata->gpio_reset, 0); + udelay(120); + gpio_set_value(pdata->gpio_reset, 1); + mdelay(5); +} + +static ssize_t microp_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct microp_i2c_client_data *cdata; + + cdata = i2c_get_clientdata(to_i2c_client(dev)); + + return sprintf(buf, "%04X\n", cdata->version); +} + +static DEVICE_ATTR(version, 0644, microp_version_show, NULL); + +static ssize_t microp_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int val; + + val = -1; + sscanf(buf, "%u", &val); + if (val != 1) + return -EINVAL; + + client = to_i2c_client(dev); + cdata = i2c_get_clientdata(client); + + microp_reset_microp(client); + if (board_ops->init_microp_func) + board_ops->init_microp_func(client); + + return count; +} + +static DEVICE_ATTR(reset, 0644, NULL, microp_reset_store); + +static ssize_t microp_gpio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t data[3] = {0, 0, 0}; + int ret; + + microp_read_gpio_status(data); + ret = sprintf(buf, "PB = 0x%x, PC = 0x%x, PD = 0x%x\n", + data[0], data[1], data[2]); + + return ret; +} + +static ssize_t microp_gpio_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int enable = 0, tmp[3] = {0, 0, 0}; + uint8_t addr, data[3] = {0, 0, 0}; + + sscanf(buf, "%d %d %d %d", &enable, &tmp[0], &tmp[1], &tmp[2]); + + if (enable != 0 && enable != 1) + return -EINVAL; + + client = to_i2c_client(dev); + cdata = i2c_get_clientdata(client); + + if (enable) + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_EN; + else + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_DIS; + data[0] = (uint8_t)tmp[0]; + data[1] = (uint8_t)tmp[1]; + data[2] = (uint8_t)tmp[2]; + i2c_write_block(client, addr, data, 3); + + return count; +} + +static DEVICE_ATTR(gpio, 0644, microp_gpio_show, + microp_gpio_store); + +static irqreturn_t microp_intr_irq_handler(int irq, void *dev_id) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + + client = to_i2c_client(dev_id); + cdata = i2c_get_clientdata(client); + + disable_irq_nosync(client->irq); + queue_work(cdata->microp_queue, &cdata->microp_intr_work); + return IRQ_HANDLED; +} + +static void microp_int_dispatch(u32 status) +{ + unsigned int mask; + int irq; + + while (status) { + mask = status & -status; + irq = fls(mask) - 1; + status &= ~mask; + generic_handle_irq(FIRST_MICROP_IRQ + irq); + } +} + +static enum hrtimer_restart hr_dispath_irq_func(struct hrtimer *data) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + + cdata = i2c_get_clientdata(client); + microp_int_dispatch(cdata->intr_status); + cdata->intr_status = 0; + return HRTIMER_NORESTART; +} + + +static void microp_intr_work_func(struct work_struct *work) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + struct microp_i2c_platform_data *pdata; + uint8_t data[3]; + uint16_t intr_status = 0; + int sd_insert = 0; + ktime_t zero_debounce; + + zero_debounce = ktime_set(0, 0); /* No debounce time */ + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return; + } + + cdata = i2c_get_clientdata(client); + pdata = client->dev.platform_data; + + memset(data, 0x00, sizeof(data)); + if (i2c_read_block(client, MICROP_I2C_RCMD_GPI_INT_STATUS, + data, 2) < 0) + dev_err(&client->dev, "%s: read interrupt status fail\n", + __func__); + intr_status = data[0]<<8 | data[1]; + if (i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_STATUS_CLR, + data, 2) < 0) + dev_err(&client->dev, "%s: clear interrupt status fail\n", + __func__); + + if (intr_status & cdata->int_pin.int_reset) { + dev_info(&client->dev, "Reset button is pressed\n"); + microp_reset_system(); + } + if (intr_status & cdata->int_pin.int_simcard) { + dev_info(&client->dev, "SIM Card is plugged/unplugged\n"); + microp_pm_power_off(client); + } + + if (intr_status & cdata->int_pin.int_sdcard) { + dev_info(&client->dev, "SD Card is plugged/unplugged\n"); + msleep(300); + microp_read_gpio_status(data); + sd_insert = ((data[0] << 16 | data[1] << 8 | data[2]) + & cdata->gpio.sdcard) ? 1 : 0; + if (sd_insert != cdata->sdcard_is_in) { + cdata->sdcard_is_in = sd_insert; + cnf_driver_event("sdcard_detect", &cdata->sdcard_is_in); + } + } + + cdata->intr_status = intr_status; + hrtimer_start(&cdata->gen_irq_timer, zero_debounce, HRTIMER_MODE_REL); + enable_irq(client->irq); +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void microp_early_suspend(struct early_suspend *h) +{ + struct microp_i2c_client_data *cdata; + struct i2c_client *client = private_microp_client; + struct microp_i2c_platform_data *pdata; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + pdata = client->dev.platform_data; + + atomic_set(&cdata->microp_is_suspend, 1); +} + +static void microp_late_resume(struct early_suspend *h) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + struct microp_i2c_platform_data *pdata; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + pdata = client->dev.platform_data; + + atomic_set(&cdata->microp_is_suspend, 0); +} +#endif + +static int __devexit microp_i2c_remove(struct i2c_client *client) +{ + struct microp_i2c_platform_data *pdata; + struct microp_i2c_client_data *cdata; + + pdata = client->dev.platform_data; + cdata = i2c_get_clientdata(client); + +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&cdata->early_suspend); +#endif + + if (client->irq) + free_irq(client->irq, &client->dev); + + gpio_free(pdata->gpio_reset); + + device_remove_file(&client->dev, &dev_attr_reset); + device_remove_file(&client->dev, &dev_attr_version); + device_remove_file(&client->dev, &dev_attr_gpio); + destroy_workqueue(cdata->microp_queue); + kfree(cdata); + + return 0; +} + +static int microp_i2c_suspend(struct i2c_client *client, + pm_message_t mesg) +{ + return 0; +} + +static int microp_i2c_resume(struct i2c_client *client) +{ + return 0; +} + +static void register_microp_devices(struct platform_device *devices, int num) +{ + int i; + for (i = 0; i < num; i++) { + platform_device_register(devices + i); + dev_set_drvdata(&(devices + i)->dev, private_microp_client); + } +} + +static int microp_i2c_probe(struct i2c_client *client + , const struct i2c_device_id *id) +{ + struct microp_i2c_platform_data *pdata; + struct microp_i2c_client_data *cdata; + uint8_t data[6]; + int ret; + + cdata = kzalloc(sizeof(struct microp_i2c_client_data), GFP_KERNEL); + if (!cdata) { + ret = -ENOMEM; + dev_err(&client->dev, "failed on allocat cdata\n"); + goto err_cdata; + } + + i2c_set_clientdata(client, cdata); + + mutex_init(&cdata->microp_adc_mutex); + mutex_init(&cdata->microp_i2c_rw_mutex); + + private_microp_client = client; + pdata = client->dev.platform_data; + if (!pdata) { + ret = -EBUSY; + dev_err(&client->dev, "failed on get pdata\n"); + goto err_exit; + } + pdata->dev_id = (void *)&client->dev; + microp_rw_delay = 5; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_VERSION, data, 2); + if (ret || !(data[0] && data[1])) { + ret = -ENODEV; + dev_err(&client->dev, "failed on get microp version\n"); + goto err_exit; + } + dev_info(&client->dev, "microp version [%02X][%02X]\n", + data[0], data[1]); + + ret = gpio_request(pdata->gpio_reset, "atmega_microp"); + if (ret < 0) { + dev_err(&client->dev, "failed on request gpio reset\n"); + goto err_exit; + } + ret = gpio_direction_output(pdata->gpio_reset, 1); + if (ret < 0) { + dev_err(&client->dev, + "failed on gpio_direction_output reset\n"); + goto err_gpio_reset; + } + + cdata->version = data[0] << 8 | data[1]; + atomic_set(&cdata->microp_is_suspend, 0); + + cdata->spi_devices_vote = pdata->spi_devices_init; + + cdata->intr_status = 0; + hrtimer_init(&cdata->gen_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cdata->gen_irq_timer.function = hr_dispath_irq_func; + + cdata->microp_queue = create_singlethread_workqueue("microp_work_q"); + if (cdata->microp_queue == NULL) { + ret = -ENOMEM; + goto err_create_work_queue; + } + + if (client->irq) { + INIT_WORK(&cdata->microp_intr_work, microp_intr_work_func); + + ret = request_irq(client->irq, microp_intr_irq_handler, + IRQF_TRIGGER_LOW, "microp_intrrupt", + &client->dev); + if (ret) { + dev_err(&client->dev, "request_irq failed\n"); + goto err_intr; + } + ret = set_irq_wake(client->irq, 1); + if (ret) { + dev_err(&client->dev, "set_irq_wake failed\n"); + goto err_intr; + } + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + cdata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + cdata->early_suspend.suspend = microp_early_suspend; + cdata->early_suspend.resume = microp_late_resume; + register_early_suspend(&cdata->early_suspend); +#endif + ret = device_create_file(&client->dev, &dev_attr_reset); + ret = device_create_file(&client->dev, &dev_attr_version); + ret = device_create_file(&client->dev, &dev_attr_gpio); + + register_microp_devices(pdata->microp_devices, pdata->num_devices); + if (board_ops->init_microp_func) { + ret = board_ops->init_microp_func(client); + if (ret) { + dev_err(&client->dev, + "failed on microp function initialize\n"); + goto err_fun_init; + } + } + + return 0; + +err_fun_init: +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&cdata->early_suspend); +#endif + device_remove_file(&client->dev, &dev_attr_reset); + device_remove_file(&client->dev, &dev_attr_version); + device_remove_file(&client->dev, &dev_attr_gpio); + destroy_workqueue(cdata->microp_queue); +err_intr: +err_create_work_queue: + kfree(cdata); +err_gpio_reset: + gpio_free(pdata->gpio_reset); +err_exit: + private_microp_client = NULL; +err_cdata: + return ret; +} + +static const struct i2c_device_id microp_i2c_id[] = { + { MICROP_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver microp_i2c_driver = { + .driver = { + .name = MICROP_I2C_NAME, + }, + .id_table = microp_i2c_id, + .probe = microp_i2c_probe, + .suspend = microp_i2c_suspend, + .resume = microp_i2c_resume, + .remove = __devexit_p(microp_i2c_remove), +}; + +static void microp_irq_ack(unsigned int irq) +{ + ; +} + +static void microp_irq_mask(unsigned int irq) +{ + ; +} + +static void microp_irq_unmask(unsigned int irq) +{ + ; +} + +static struct irq_chip microp_irq_chip = { + .name = "microp", + .disable = microp_irq_mask, + .ack = microp_irq_ack, + .mask = microp_irq_mask, + .unmask = microp_irq_unmask, +}; + +static int __init microp_common_init(void) +{ + int ret; + int n, MICROP_IRQ_END = FIRST_MICROP_IRQ + NR_MICROP_IRQS; + + for (n = FIRST_MICROP_IRQ; n < MICROP_IRQ_END; n++) { + set_irq_chip(n, µp_irq_chip); + set_irq_handler(n, handle_level_irq); + set_irq_flags(n, IRQF_VALID); + } + + ret = i2c_add_driver(µp_i2c_driver); + if (ret) + return ret; + return 0; +} + +static void __exit microp_common_exit(void) +{ + i2c_del_driver(µp_i2c_driver); +} + +module_init(microp_common_init); +module_exit(microp_common_exit); + +MODULE_AUTHOR("Eric Huang "); +MODULE_DESCRIPTION("Atmega MicroP driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/avs.c b/arch/arm/mach-msm/avs.c new file mode 100644 index 0000000000000..ab857f644a34d --- /dev/null +++ b/arch/arm/mach-msm/avs.c @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "avs.h" + +#define AVSDSCR_INPUT 0x01004860 /* magic # from circuit designer */ +#define TSCSR_INPUT 0x00000001 /* enable temperature sense */ + +#define TEMPRS 16 /* total number of temperature regions */ +#define GET_TEMPR() (avs_get_tscsr() >> 28) /* scale TSCSR[CTEMP] to regions */ + +struct mutex avs_lock; + +static struct avs_state_s +{ + u32 freq_cnt; /* Frequencies supported list */ + short *avs_v; /* Dyanmically allocated storage for + * 2D table of voltages over temp & + * freq. Used as a set of 1D tables. + * Each table is for a single temp. + * For usage see avs_get_voltage + */ + int (*set_vdd) (int); /* Function Ptr for setting voltage */ + int changing; /* Clock frequency is changing */ + u32 freq_idx; /* Current frequency index */ + int vdd; /* Current ACPU voltage */ +} avs_state; + +struct clkctl_acpu_speed { + unsigned acpu_khz; + int min_vdd; + int max_vdd; +}; + + +struct clkctl_acpu_speed acpu_vdd_tbl[] = { + { 19200, 950, 975 }, + { 128000, 950, 975 }, + { 245000, 950, 1025 }, + { 384000, 950, 1025 }, + { 422400, 950, 1050 }, + { 460800, 975, 1050 }, + { 499200, 1000, 1075 }, + { 537600, 1000, 1075 }, + { 576000, 1025, 1100 }, + { 614400, 1050, 1100 }, + { 652800, 1075, 1125 }, + { 691200, 1100, 1150 }, + { 729600, 1125, 1175 }, + { 768000, 1150, 1200 }, + { 806400, 1175, 1225 }, + { 844800, 1200, 1250 }, + { 883200, 1200, 1275 }, + { 921600, 1225, 1275 }, + { 960000, 1225, 1275 }, + { 998400, 1225, 1275 }, + { 1036800, 1275, 1275 }, + { 1075200, 1275, 1275 }, + { 1113600, 1275, 1275 }, + { 1152000, 1300, VOLTAGE_MAX }, + { 1190400, 1300, VOLTAGE_MAX }, + { 0 }, +}; + +/* + * Update the AVS voltage vs frequency table, for current temperature + * Adjust based on the AVS delay circuit hardware status + */ +static void avs_update_voltage_table(short *vdd_table) +{ + u32 avscsr; + int cpu; + int vu; + int l2; + int i; + u32 cur_freq_idx; + short cur_voltage; + + cur_freq_idx = avs_state.freq_idx; + cur_voltage = avs_state.vdd; + + avscsr = avs_test_delays(); +/* AVSDEBUG("avscsr=%x, avsdscr=%x\n", avscsr, avs_get_avsdscr());*/ + + /* + * Read the results for the various unit's AVS delay circuits + * 2=> up, 1=>down, 0=>no-change + */ + cpu = ((avscsr >> 23) & 2) + ((avscsr >> 16) & 1); + vu = ((avscsr >> 28) & 2) + ((avscsr >> 21) & 1); + l2 = ((avscsr >> 29) & 2) + ((avscsr >> 22) & 1); + + if ((cpu == 3) || (vu == 3) || (l2 == 3)) { + printk(KERN_ERR "AVS: Dly Synth O/P error\n"); + } else if ((cpu == 2) || (l2 == 2) || (vu == 2)) { + /* + * even if one oscillator asks for up, increase the voltage, + * as its an indication we are running outside the + * critical acceptable range of v-f combination. + */ + AVSDEBUG("cpu=%d l2=%d vu=%d\n", cpu, l2, vu); + AVSDEBUG("Voltage up at %d\n", cur_freq_idx); + + if (cur_voltage >= VOLTAGE_MAX || cur_voltage >= acpu_vdd_tbl[cur_freq_idx].max_vdd) + AVSDEBUG(KERN_ERR + "AVS: Voltage can not get high enough!\n"); + + /* Raise the voltage for all frequencies */ + for (i = 0; i < avs_state.freq_cnt; i++) { + vdd_table[i] = cur_voltage + VOLTAGE_STEP; + if (vdd_table[i] > VOLTAGE_MAX) + vdd_table[i] = VOLTAGE_MAX; + else if (vdd_table[i] > acpu_vdd_tbl[i].max_vdd) + vdd_table[i] = acpu_vdd_tbl[i].max_vdd; + } + } else if ((cpu == 1) && (l2 == 1) && (vu == 1)) { + if ((cur_voltage - VOLTAGE_STEP >= VOLTAGE_MIN) && + (cur_voltage - VOLTAGE_STEP >= acpu_vdd_tbl[cur_freq_idx].min_vdd) && + (cur_voltage <= vdd_table[cur_freq_idx])) { + vdd_table[cur_freq_idx] = cur_voltage - VOLTAGE_STEP; + AVSDEBUG("Voltage down for %d and lower levels\n", + cur_freq_idx); + + /* clamp to this voltage for all lower levels */ + for (i = 0; i < cur_freq_idx; i++) { + if (vdd_table[i] > vdd_table[cur_freq_idx]) + vdd_table[i] = vdd_table[cur_freq_idx]; + } + } + } +} + +/* + * Return the voltage for the target performance freq_idx and optionally + * use AVS hardware to check the present voltage freq_idx + */ +static short avs_get_target_voltage(int freq_idx, bool update_table) +{ + unsigned cur_tempr = GET_TEMPR(); + unsigned temp_index = cur_tempr*avs_state.freq_cnt; + + /* Table of voltages vs frequencies for this temp */ + short *vdd_table = avs_state.avs_v + temp_index; + + if (update_table) + avs_update_voltage_table(vdd_table); + + if (vdd_table[freq_idx] > acpu_vdd_tbl[freq_idx].max_vdd) { + AVSDEBUG("%dmV too high for %d.\n", vdd_table[freq_idx], acpu_vdd_tbl[freq_idx].acpu_khz); + vdd_table[freq_idx] = acpu_vdd_tbl[freq_idx].max_vdd; + } + if (vdd_table[freq_idx] < acpu_vdd_tbl[freq_idx].min_vdd) { + AVSDEBUG("%dmV too low for %d.\n", vdd_table[freq_idx], acpu_vdd_tbl[freq_idx].acpu_khz); + vdd_table[freq_idx] = acpu_vdd_tbl[freq_idx].min_vdd; + } + + return vdd_table[freq_idx]; +} + + +/* + * Set the voltage for the freq_idx and optionally + * use AVS hardware to update the voltage + */ +static int avs_set_target_voltage(int freq_idx, bool update_table) +{ + int ctr = 5, rc = 0, new_voltage; + + if (freq_idx < 0 || freq_idx >= avs_state.freq_cnt) { + AVSDEBUG("Out of range :%d\n", freq_idx); + return -EINVAL; + } + + new_voltage = avs_get_target_voltage(freq_idx, update_table); + if (avs_state.vdd != new_voltage) { + AVSDEBUG("AVS setting V to %d mV @%d MHz\n", + new_voltage, acpu_vdd_tbl[freq_idx].acpu_khz / 1000); + rc = avs_state.set_vdd(new_voltage); + while (rc && ctr) { + rc = avs_state.set_vdd(new_voltage); + ctr--; + if (rc) { + AVSDEBUG(KERN_ERR "avs_set_target_voltage: Unable to set V to %d mV (attempt: %d)\n", new_voltage, 5 - ctr); + mdelay(1); + } + } + if (rc) + return rc; + avs_state.vdd = new_voltage; + } + return rc; +} + +/* + * Notify avs of clk frquency transition begin & end + */ +int avs_adjust_freq(u32 freq_idx, int begin) +{ + int rc = 0; + + if (!avs_state.set_vdd) { + /* AVS not initialized */ + return 0; + } + + if (freq_idx < 0 || freq_idx >= avs_state.freq_cnt) { + AVSDEBUG("Out of range :%d\n", freq_idx); + return -EINVAL; + } + + mutex_lock(&avs_lock); + if ((begin && (freq_idx > avs_state.freq_idx)) || + (!begin && (freq_idx < avs_state.freq_idx))) { + /* Update voltage before increasing frequency & + * after decreasing frequency + */ + rc = avs_set_target_voltage(freq_idx, 0); + if (rc) + goto aaf_out; + + avs_state.freq_idx = freq_idx; + } + avs_state.changing = begin; +aaf_out: + mutex_unlock(&avs_lock); + + return rc; +} + + +static struct delayed_work avs_work; +static struct workqueue_struct *kavs_wq; +#define AVS_DELAY ((CONFIG_HZ * 50 + 999) / 1000) + +static void do_avs_timer(struct work_struct *work) +{ + int cur_freq_idx; + + mutex_lock(&avs_lock); + if (!avs_state.changing) { + /* Only adjust the voltage if clk is stable */ + cur_freq_idx = avs_state.freq_idx; + avs_set_target_voltage(cur_freq_idx, 1); + } + mutex_unlock(&avs_lock); + queue_delayed_work_on(0, kavs_wq, &avs_work, AVS_DELAY); +} + + +static void __init avs_timer_init(void) +{ + INIT_DELAYED_WORK_DEFERRABLE(&avs_work, do_avs_timer); + queue_delayed_work_on(0, kavs_wq, &avs_work, AVS_DELAY); +} + +static void __exit avs_timer_exit(void) +{ + cancel_delayed_work(&avs_work); +} + +static int __init avs_work_init(void) +{ + kavs_wq = create_workqueue("avs"); + if (!kavs_wq) { + printk(KERN_ERR "AVS initialization failed\n"); + return -EFAULT; + } + printk(KERN_ERR "AVS initialization success\n"); + avs_timer_init(); + + return 1; +} + +static void __exit avs_work_exit(void) +{ + avs_timer_exit(); + destroy_workqueue(kavs_wq); +} + +int __init avs_init(int (*set_vdd)(int), u32 freq_cnt, u32 freq_idx) +{ + int i; + + mutex_init(&avs_lock); + + if (freq_cnt == 0) + return -EINVAL; + + avs_state.freq_cnt = freq_cnt; + + if (freq_idx >= avs_state.freq_cnt) + return -EINVAL; + + avs_state.avs_v = kmalloc(TEMPRS * avs_state.freq_cnt * + sizeof(avs_state.avs_v[0]), GFP_KERNEL); + + if (avs_state.avs_v == 0) + return -ENOMEM; + + for (i = 0; i < TEMPRS*avs_state.freq_cnt; i++) + avs_state.avs_v[i] = VOLTAGE_MAX; + + avs_reset_delays(AVSDSCR_INPUT); + avs_set_tscsr(TSCSR_INPUT); + + avs_state.set_vdd = set_vdd; + avs_state.changing = 0; + avs_state.freq_idx = -1; + avs_state.vdd = -1; + avs_adjust_freq(freq_idx, 0); + + avs_work_init(); + + return 0; +} + +void __exit avs_exit() +{ + avs_work_exit(); + + kfree(avs_state.avs_v); +} + + diff --git a/arch/arm/mach-msm/avs.h b/arch/arm/mach-msm/avs.h new file mode 100644 index 0000000000000..4e6898359842c --- /dev/null +++ b/arch/arm/mach-msm/avs.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AVS_H +#define AVS_H + +#define VOLTAGE_MIN 950 /* mV */ +#define VOLTAGE_MAX 1275 +#define VOLTAGE_STEP 5 + +int __init avs_init(int (*set_vdd)(int), u32 freq_cnt, u32 freq_idx); +void __exit avs_exit(void); + +int avs_adjust_freq(u32 freq_index, int begin); + +/* Routines exported from avs_hw.S */ +#ifdef CONFIG_MSM_CPU_AVS +u32 avs_test_delays(void); +#else +static inline u32 avs_test_delays(void) +{ return 0; } +#endif + +#ifdef CONFIG_MSM_AVS_HW +u32 avs_reset_delays(u32 avsdscr); +u32 avs_get_avscsr(void); +u32 avs_get_avsdscr(void); +u32 avs_get_tscsr(void); +void avs_set_tscsr(u32 to_tscsr); +void avs_disable(void); +#else +static inline u32 avs_reset_delays(u32 avsdscr) +{ return 0; } +static inline u32 avs_get_avscsr(void) +{ return 0; } +static inline u32 avs_get_avsdscr(void) +{ return 0; } +static inline u32 avs_get_tscsr(void) +{ return 0; } +static inline void avs_set_tscsr(u32 to_tscsr) {} +static inline void avs_disable(void) {} +#endif + +/*#define AVSDEBUG(x...) pr_info("AVS: " x);*/ +#define AVSDEBUG(...) + +#define AVS_DISABLE(cpu) do { \ + if (get_cpu() == (cpu)) \ + avs_disable(); \ + put_cpu(); \ + } while (0); + +#define AVS_ENABLE(cpu, x) do { \ + if (get_cpu() == (cpu)) \ + avs_reset_delays((x)); \ + put_cpu(); \ + } while (0); + +#endif /* AVS_H */ diff --git a/arch/arm/mach-msm/avs_hw.S b/arch/arm/mach-msm/avs_hw.S new file mode 100644 index 0000000000000..5e1530e4672b8 --- /dev/null +++ b/arch/arm/mach-msm/avs_hw.S @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + + .text + +#ifdef CONFIG_MSM_CPU_AVS + .global avs_test_delays +avs_test_delays: + +/* Read r1=CPMR and enable Never Sleep for VSLPDLY */ + mrc p15, 7, r1, c15, c0, 5 + orr r12, r1, #3, 24 + mcr p15, 7, r12, c15, c0, 5 + +/* Read r2=CPACR and enable full access to CP10 and CP11 space */ + mrc p15, 0, r2, c1, c0, 2 + orr r12, r2, #(0xf << 20) + mcr p15, 0, r12, c1, c0, 2 + isb + +/* Read r3=FPEXC and or in FP enable, VFP/ASE enable = FPEXC[30]; */ + fmrx r3, fpexc + orr r12, r3, #1, 2 + fmxr fpexc, r12 + +/* + * Do floating-point operations to prime the VFP pipeline. Use + * fcpyd d0, d0 as a floating point nop. This avoids changing VFP + * state. + */ + fcpyd d0, d0 + fcpyd d0, d0 + fcpyd d0, d0 + +/* Read r0=AVSCSR to get status from CPU, VFP, and L2 ring oscillators */ + mrc p15, 7, r0, c15, c1, 7 + +/* Restore FPEXC */ + fmxr fpexc, r3 + +/* Restore CPACR */ + MCR p15, 0, r2, c1, c0, 2 + +/* Restore CPMR */ + mcr p15, 7, r1, c15, c0, 5 + isb + + bx lr +#endif + + + .global avs_get_avscsr +/* Read r0=AVSCSR to get status from CPU, VFP, and L2 ring oscillators */ + +avs_get_avscsr: + mrc p15, 7, r0, c15, c1, 7 + bx lr + + .global avs_get_avsdscr +/* Read r0=AVSDSCR to get the AVS Delay Synthesizer control settings */ + +avs_get_avsdscr: + mrc p15, 7, r0, c15, c0, 6 + bx lr + + + + + .global avs_get_tscsr +/* Read r0=TSCSR to get temperature sensor control and status */ + +avs_get_tscsr: + mrc p15, 7, r0, c15, c1, 0 + bx lr + + .global avs_set_tscsr +/* Write TSCSR=r0 to set temperature sensor control and status */ + +avs_set_tscsr: + mcr p15, 7, r0, c15, c1, 0 + bx lr + + + + + + .global avs_reset_delays +avs_reset_delays: + +/* AVSDSCR(dly) to program delay */ + mcr p15, 7, r0, c15, c0, 6 + +/* Read r0=AVSDSCR */ + mrc p15, 7, r0, c15, c0, 6 + +/* AVSCSR(0x61) to enable CPU, V and L2 AVS module */ + mov r3, #0x61 + mcr p15, 7, r3, c15, c1, 7 + + bx lr + + + + .global avs_disable +avs_disable: + +/* Clear AVSCSR */ + mov r0, #0 + +/* Write AVSCSR */ + mcr p15, 7, r0, c15, c1, 7 + + bx lr + + .end + + diff --git a/arch/arm/mach-msm/board-bravo-audio.c b/arch/arm/mach-msm/board-bravo-audio.c new file mode 100644 index 0000000000000..d9ee10da2d6cf --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-audio.c @@ -0,0 +1,270 @@ +/* arch/arm/mach-msm/board-bravo-audio.c + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "board-bravo.h" +#include "proc_comm.h" +#include "pmic.h" +#include "board-bravo-tpa2018d1.h" + +#if 0 +#define D(fmt, args...) printk(KERN_INFO "Audio: "fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +static struct mutex mic_lock; +static struct mutex bt_sco_lock; + +static struct q6_hw_info q6_audio_hw[Q6_HW_COUNT] = { + [Q6_HW_HANDSET] = { + .min_gain = -1500, + .max_gain = 1199, + }, + [Q6_HW_HEADSET] = { + .min_gain = -2000, + .max_gain = 1199, + }, + [Q6_HW_SPEAKER] = { + .min_gain = -1100, + .max_gain = 400, + }, + [Q6_HW_TTY] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_SCO] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_A2DP] = { + .min_gain = -1600, + .max_gain = 400, + }, +}; + +void bravo_headset_enable(int en) +{ + D("%s %d\n", __func__, en); + /* enable audio amp */ + if (en) mdelay(15); + gpio_set_value(BRAVO_AUD_JACKHP_EN, !!en); +} + +void bravo_speaker_enable(int en) +{ + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 0; + scm.is_left_chan_en = 1; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(LEFT_SPKR, 1); + pmic_spkr_en(RIGHT_SPKR, 0); + + /* unmute */ + pmic_spkr_en_mute(LEFT_SPKR, 1); + } else { + pmic_spkr_en_mute(LEFT_SPKR, 0); + + pmic_spkr_en(LEFT_SPKR, 0); + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + if (is_cdma_version(system_rev)) + tpa2018d1_set_speaker_amp(en); +} + +void bravo_receiver_enable(int en) +{ + if (is_cdma_version(system_rev)) { + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 1; + scm.is_left_chan_en = 0; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(RIGHT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(RIGHT_SPKR, 1); + } else { + pmic_spkr_en_mute(RIGHT_SPKR, 0); + + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + } +} + +static uint32_t bt_sco_enable[] = { + PCOM_GPIO_CFG(BRAVO_BT_PCM_OUT, 1, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_IN, 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_SYNC, 2, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_CLK, 2, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static uint32_t bt_sco_disable[] = { + PCOM_GPIO_CFG(BRAVO_BT_PCM_OUT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_IN, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_SYNC, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_BT_PCM_CLK, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +void bravo_bt_sco_enable(int en) +{ + static int bt_sco_refcount; + D("%s %d\n", __func__, en); + + mutex_lock(&bt_sco_lock); + if (en) { + if (++bt_sco_refcount == 1) + config_gpio_table(bt_sco_enable, + ARRAY_SIZE(bt_sco_enable)); + } else { + if (--bt_sco_refcount == 0) { + config_gpio_table(bt_sco_disable, + ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(BRAVO_BT_PCM_OUT, 0); + } + } + mutex_unlock(&bt_sco_lock); +} + +void bravo_mic_enable(int en) +{ + static int old_state = 0, new_state = 0; + + D("%s %d\n", __func__, en); + + mutex_lock(&mic_lock); + if (!!en) + new_state++; + else + new_state--; + + if (new_state == 1 && old_state == 0) { + gpio_set_value(BRAVO_AUD_2V5_EN, 1); + mdelay(60); + } else if (new_state == 0 && old_state == 1) + gpio_set_value(BRAVO_AUD_2V5_EN, 0); + else + D("%s: do nothing %d %d\n", __func__, old_state, new_state); + + old_state = new_state; + mutex_unlock(&mic_lock); +} + +void bravo_analog_init(void) +{ + D("%s\n", __func__); + /* stereo pmic init */ + pmic_spkr_set_gain(LEFT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_set_gain(RIGHT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_en_right_chan(OFF_CMD); + pmic_spkr_en_left_chan(OFF_CMD); + pmic_spkr_add_right_left_chan(OFF_CMD); + pmic_spkr_en_stereo(OFF_CMD); + pmic_spkr_select_usb_with_hpf_20hz(OFF_CMD); + pmic_spkr_bypass_mux(OFF_CMD); + pmic_spkr_en_hpf(ON_CMD); + pmic_spkr_en_sink_curr_from_ref_volt_cir(OFF_CMD); + pmic_spkr_set_mux_hpf_corner_freq(SPKR_FREQ_0_73KHZ); + pmic_mic_set_volt(MIC_VOLT_1_80V); + + gpio_request(BRAVO_AUD_JACKHP_EN, "aud_jackhp_en"); + gpio_request(BRAVO_BT_PCM_OUT, "bt_pcm_out"); + + gpio_direction_output(BRAVO_AUD_JACKHP_EN, 0); + + mutex_lock(&bt_sco_lock); + config_gpio_table(bt_sco_disable, + ARRAY_SIZE(bt_sco_disable)); + gpio_direction_output(BRAVO_BT_PCM_OUT, 0); + mutex_unlock(&bt_sco_lock); +} + +int bravo_get_rx_vol(uint8_t hw, int level) +{ + int vol; + struct q6_hw_info *info; + + if (level > 100) + level = 100; + else if (level < 0) + level = 0; + + if (is_cdma_version(system_rev) && hw == Q6_HW_HANDSET) { + int handset_volume[6] = { -1600, -1300, -1000, -600, -300, 0 }; + vol = handset_volume[5 * level / 100]; + } else { + info = &q6_audio_hw[hw]; + vol = info->min_gain + ((info->max_gain - info->min_gain) * level) / 100; + } + + D("%s %d\n", __func__, vol); + return vol; +} + +static struct qsd_acoustic_ops acoustic = { + .enable_mic_bias = bravo_mic_enable, +}; + +static struct q6audio_analog_ops ops = { + .init = bravo_analog_init, + .speaker_enable = bravo_speaker_enable, + .headset_enable = bravo_headset_enable, + .receiver_enable = bravo_receiver_enable, + .bt_sco_enable = bravo_bt_sco_enable, + .int_mic_enable = bravo_mic_enable, + .ext_mic_enable = bravo_mic_enable, + .get_rx_vol = bravo_get_rx_vol, +}; + +void __init bravo_audio_init(void) +{ + mutex_init(&mic_lock); + mutex_init(&bt_sco_lock); + q6audio_register_analog_ops(&ops); + acoustic_register_ops(&acoustic); + if (is_cdma_version(system_rev)) + q6audio_set_acdb_file("default_mos.acdb"); +} diff --git a/arch/arm/mach-msm/board-bravo-keypad.c b/arch/arm/mach-msm/board-bravo-keypad.c new file mode 100644 index 0000000000000..8208f20cdc8fc --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-keypad.c @@ -0,0 +1,159 @@ +/* arch/arm/mach-msm/board-bravo-keypad.c + * + * Copyright (C) 2009 Google, Inc + * Copyright (C) 2009 HTC Corporation. + * Copyright (C) 2010 Giulio Cervera + * Copyright (C) 2010 Diogo Ferreira + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "board-bravo.h" + +static unsigned int bravo_col_gpios[] = { + BRAVO_GPIO_KP_MKOUT0, + BRAVO_GPIO_KP_MKOUT1, + BRAVO_GPIO_KP_MKOUT2, +}; + +static unsigned int bravo_row_gpios[] = { + BRAVO_GPIO_KP_MPIN0, + BRAVO_GPIO_KP_MPIN1, + BRAVO_GPIO_KP_MPIN2, +}; + +#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(bravo_row_gpios) + (row)) +#define KEYMAP_SIZE (ARRAY_SIZE(bravo_col_gpios) * \ + ARRAY_SIZE(bravo_row_gpios)) + +/* keypad */ +static const unsigned short bravo_keymap[KEYMAP_SIZE] = { + [KEYMAP_INDEX(0, 0)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(0, 1)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(1, 1)] = BTN_MOUSE, /* OJ Action key */ + [KEYMAP_INDEX(1, 0)] = KEY_MENU, + [KEYMAP_INDEX(1, 2)] = KEY_SEARCH, + [KEYMAP_INDEX(2, 0)] = KEY_HOME, + [KEYMAP_INDEX(2, 2)] = KEY_BACK, +}; + +static struct gpio_event_matrix_info bravo_keypad_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = bravo_keymap, + .output_gpios = bravo_col_gpios, + .input_gpios = bravo_row_gpios, + .noutputs = ARRAY_SIZE(bravo_col_gpios), + .ninputs = ARRAY_SIZE(bravo_row_gpios), + .settle_time.tv.nsec = 40 * NSEC_PER_USEC, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .debounce_delay.tv.nsec = 5 * NSEC_PER_MSEC, + .flags = (GPIOKPF_LEVEL_TRIGGERED_IRQ | + GPIOKPF_REMOVE_PHANTOM_KEYS | + GPIOKPF_PRINT_UNMAPPED_KEYS), +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + .info.oj_btn = true, +#endif +}; + +static struct gpio_event_direct_entry bravo_keypad_key_map[] = { + { + .gpio = BRAVO_GPIO_POWER_KEY, + .code = KEY_POWER, + }, +}; + +static struct gpio_event_input_info bravo_keypad_key_info = { + .info.func = gpio_event_input_func, + .info.no_suspend = true, + .flags = GPIOEDF_PRINT_KEYS, + .type = EV_KEY, + .debounce_time.tv.nsec = 5 * NSEC_PER_MSEC, + .keymap = bravo_keypad_key_map, + .keymap_size = ARRAY_SIZE(bravo_keypad_key_map) +}; + +static struct gpio_event_info *bravo_input_info[] = { + &bravo_keypad_matrix_info.info, + &bravo_keypad_key_info.info, +}; + +static struct gpio_event_platform_data bravo_input_data = { + .names = { +#ifdef CONFIG_MACH_BRAVO + "bravo-keypad", +#else + "bravoc-keypad", +#endif + NULL, + }, + .info = bravo_input_info, + .info_count = ARRAY_SIZE(bravo_input_info), +}; + +static struct platform_device bravo_input_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &bravo_input_data, + }, +}; + +static int bravo_reset_keys_up[] = { + KEY_VOLUMEUP, + 0, +}; + +static struct keyreset_platform_data bravo_reset_keys_pdata = { + .keys_up = bravo_reset_keys_up, + .keys_down = { + KEY_POWER, + KEY_VOLUMEDOWN, + BTN_MOUSE, + 0 + }, +}; + +struct platform_device bravo_reset_keys_device = { + .name = KEYRESET_NAME, + .dev = { + .platform_data = &bravo_reset_keys_pdata, + }, +}; + +static int __init bravo_init_keypad(void) +{ + int ret; + + if (!machine_is_bravo() && !machine_is_bravoc()) + return 0; + + ret = platform_device_register(&bravo_reset_keys_device); + if (ret != 0) + return ret; + + ret = platform_device_register(&bravo_input_device); + if (ret != 0) + return ret; + + return 0; +} + +device_initcall(bravo_init_keypad); diff --git a/arch/arm/mach-msm/board-bravo-microp.c b/arch/arm/mach-msm/board-bravo-microp.c new file mode 100644 index 0000000000000..9fc4fbf44c65b --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-microp.c @@ -0,0 +1,1885 @@ +/* board-bravo-microp.c + * + * Copyright (C) 2009 Google. + * Copyright (C) 2009 HTC Corporation. + * Copyright (C) 2010 Giulio Cervera + * Copyright (C) 2010 Diogo Ferreira + * + * The Microp on bravo is an i2c device that supports + * the following functions + * - LEDs (Green, Amber, Blue, Button-backlight) + * - Lightsensor + * - Headset & Remotekeys + * - G-sensor + * - Interrupts + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include +#include + +#include +#include "board-bravo.h" + +#define READ_GPI_STATE_HPIN (1<<2) +#define READ_GPI_STATE_SDCARD (1<<0) + +/*#define DEBUG_BMA150 */ +#ifdef DEBUG_BMA150 +/* Debug logging of accelleration data */ +#define GSENSOR_LOG_MAX 2048 /* needs to be power of 2 */ +#define GSENSOR_LOG_MASK (GSENSOR_LOG_MAX - 1) + +struct gsensor_log { + ktime_t timestamp; + short x; + short y; + short z; +}; + +static DEFINE_MUTEX(gsensor_log_lock); +static struct gsensor_log gsensor_log[GSENSOR_LOG_MAX]; +static unsigned gsensor_log_head; +static unsigned gsensor_log_tail; + +void gsensor_log_status(ktime_t time, short x, short y, short z) +{ + unsigned n; + mutex_lock(&gsensor_log_lock); + n = gsensor_log_head; + gsensor_log[n].timestamp = time; + gsensor_log[n].x = x; + gsensor_log[n].y = y; + gsensor_log[n].z = z; + n = (n + 1) & GSENSOR_LOG_MASK; + if (n == gsensor_log_tail) + gsensor_log_tail = (gsensor_log_tail + 1) & GSENSOR_LOG_MASK; + gsensor_log_head = n; + mutex_unlock(&gsensor_log_lock); +} + +static int gsensor_log_print(struct seq_file *sf, void *private) +{ + unsigned n; + + mutex_lock(&gsensor_log_lock); + seq_printf(sf, "timestamp X Y Z\n"); + for (n = gsensor_log_tail; + n != gsensor_log_head; + n = (n + 1) & GSENSOR_LOG_MASK) { + seq_printf(sf, "%10d.%010d %6d %6d %6d\n", + gsensor_log[n].timestamp.tv.sec, + gsensor_log[n].timestamp.tv.nsec, + gsensor_log[n].x, gsensor_log[n].y, + gsensor_log[n].z); + } + mutex_unlock(&gsensor_log_lock); + return 0; +} + +static int gsensor_log_open(struct inode *inode, struct file *file) +{ + return single_open(file, gsensor_log_print, NULL); +} + +static struct file_operations gsensor_log_fops = { + .open = gsensor_log_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* def DEBUG_BMA150 */ + +static struct mutex gsensor_RW_mutex; +static struct mutex gsensor_set_mode_mutex; + +static int microp_headset_has_mic(void); +static int microp_enable_headset_plug_event(void); +static int microp_enable_key_event(void); +static int microp_disable_key_event(void); + +static struct h35mm_platform_data bravo_h35mm_data = { + .plug_event_enable = microp_enable_headset_plug_event, + .headset_has_mic = microp_headset_has_mic, + .key_event_enable = microp_enable_key_event, + .key_event_disable = microp_disable_key_event, +}; + +static struct platform_device bravo_h35mm = { + .name = "htc_headset", + .id = -1, + .dev = { + .platform_data = &bravo_h35mm_data, + }, +}; + +enum led_type { + GREEN_LED, + AMBER_LED, + BLUE_LED, + BUTTONS_LED, + NUM_LEDS, +}; + +static uint16_t remote_key_adc_table[6] = { + 0, 33, 43, 110, 129, 220 +}; + +static struct wake_lock microp_i2c_wakelock; + +static struct i2c_client *private_microp_client; + +struct microp_int_pin { + uint16_t int_gsensor; + uint16_t int_lsensor; + uint16_t int_reset; + uint16_t int_simcard; + uint16_t int_hpin; + uint16_t int_remotekey; +}; + +struct microp_led_data { + int type; + struct led_classdev ldev; + struct mutex led_data_mutex; + struct work_struct brightness_work; + spinlock_t brightness_lock; + enum led_brightness brightness; + uint8_t mode; + uint8_t blink; +}; + +struct microp_i2c_work { + struct work_struct work; + struct i2c_client *client; + int (*intr_debounce)(uint8_t *pin_status); + void (*intr_function)(uint8_t *pin_status); +}; + +struct microp_i2c_client_data { + struct microp_led_data leds[NUM_LEDS]; + uint16_t version; + struct microp_i2c_work work; + struct delayed_work hpin_debounce_work; + struct delayed_work ls_read_work; + struct early_suspend early_suspend; + uint8_t enable_early_suspend; + uint8_t enable_reset_button; + int microp_is_suspend; + int auto_backlight_enabled; + uint8_t button_led_value; + int headset_is_in; + int is_hpin_pin_stable; + uint32_t spi_devices_vote; + uint32_t spi_devices; + struct mutex microp_i2c_rw_mutex; + struct mutex microp_adc_mutex; + struct hrtimer gen_irq_timer; + uint16_t intr_status; +}; + +static char *hex2string(uint8_t *data, int len) +{ + static char buf[101]; + int i; + + i = (sizeof(buf) - 1) / 4; + if (len > i) + len = i; + + for (i = 0; i < len; i++) + sprintf(buf + i * 4, "[%02X]", data[i]); + + return buf; +} + +#define I2C_READ_RETRY_TIMES 10 +#define I2C_WRITE_RETRY_TIMES 10 +#define MICROP_I2C_WRITE_BLOCK_SIZE 80 + +static int i2c_read_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + struct microp_i2c_client_data *cdata; + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = data, + } + }; + + cdata = i2c_get_clientdata(client); + mutex_lock(&cdata->microp_i2c_rw_mutex); + mdelay(1); + for (retry = 0; retry <= I2C_READ_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msgs, 2) == 2) + break; + msleep(10); + } + mutex_unlock(&cdata->microp_i2c_rw_mutex); + dev_dbg(&client->dev, "R [%02X] = %s\n", + addr, hex2string(data, length)); + + if (retry > I2C_READ_RETRY_TIMES) { + dev_err(&client->dev, "i2c_read_block retry over %d\n", + I2C_READ_RETRY_TIMES); + return -EIO; + } + + return 0; +} + +static int i2c_write_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + uint8_t buf[MICROP_I2C_WRITE_BLOCK_SIZE]; + int i; + struct microp_i2c_client_data *cdata; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length + 1, + .buf = buf, + } + }; + + dev_dbg(&client->dev, "W [%02X] = %s\n", + addr, hex2string(data, length)); + + cdata = i2c_get_clientdata(client); + if (length + 1 > MICROP_I2C_WRITE_BLOCK_SIZE) { + dev_err(&client->dev, "i2c_write_block length too long\n"); + return -E2BIG; + } + + buf[0] = addr; + for (i = 0; i < length; i++) + buf[i+1] = data[i]; + + mutex_lock(&cdata->microp_i2c_rw_mutex); + mdelay(1); + for (retry = 0; retry <= I2C_WRITE_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msg, 1) == 1) + break; + msleep(10); + } + if (retry > I2C_WRITE_RETRY_TIMES) { + dev_err(&client->dev, "i2c_write_block retry over %d\n", + I2C_WRITE_RETRY_TIMES); + mutex_unlock(&cdata->microp_i2c_rw_mutex); + return -EIO; + } + mutex_unlock(&cdata->microp_i2c_rw_mutex); + + return 0; +} + +struct i2c_client *get_microp_client(void) +{ + return private_microp_client; +} + +int microp_i2c_read(uint8_t addr, uint8_t *data, int length) +{ + struct i2c_client *client = private_microp_client; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + + if (i2c_read_block(client, addr, data, length) < 0) { + dev_err(&client->dev, "%s: write microp i2c fail\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(microp_i2c_read); + +int microp_i2c_write(uint8_t addr, uint8_t *data, int length) +{ + struct i2c_client *client = private_microp_client; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + + if (i2c_write_block(client, addr, data, length) < 0) { + dev_err(&client->dev, "%s: write microp i2c fail\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(microp_i2c_write); + +int microp_read_adc(uint8_t channel, uint16_t *value) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int ret; + uint8_t cmd[2], data[2]; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + cmd[0] = 0; + cmd[1] = channel; + mutex_lock(&cdata->microp_adc_mutex); + ret = i2c_write_block(client, MICROP_I2C_WCMD_READ_ADC_VALUE_REQ, + cmd, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: request adc fail\n", __func__); + mutex_unlock(&cdata->microp_adc_mutex); + return -EIO; + } + + ret = i2c_read_block(client, MICROP_I2C_RCMD_ADC_VALUE, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read adc fail\n", __func__); + mutex_unlock(&cdata->microp_adc_mutex); + return -EIO; + } + + *value = data[0] << 8 | data[1]; + mutex_unlock(&cdata->microp_adc_mutex); + return 0; +} +EXPORT_SYMBOL(microp_read_adc); + +static int microp_read_gpi_status(struct i2c_client *client, uint16_t *status) +{ + uint8_t data[3]; + int ret; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GPI_STATUS, data, 3); + if (ret < 0) { + dev_err(&client->dev, "%s: read failed\n", __func__); + return -EIO; + } + *status = (data[0] << 16 | data[1] << 8 | data[2]); + return 0; +} + +static int microp_interrupt_enable(struct i2c_client *client, + uint16_t interrupt_mask) +{ + uint8_t data[2]; + int ret = -1; + + data[0] = interrupt_mask >> 8; + data[1] = interrupt_mask & 0xFF; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_CTL_EN, data, 2); + + if (ret < 0) + dev_err(&client->dev, "%s: enable 0x%x interrupt failed\n", + __func__, interrupt_mask); + return ret; +} + +static int microp_interrupt_disable(struct i2c_client *client, + uint16_t interrupt_mask) +{ + uint8_t data[2]; + int ret = -1; + + data[0] = interrupt_mask >> 8; + data[1] = interrupt_mask & 0xFF; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_CTL_DIS, data, 2); + + if (ret < 0) + dev_err(&client->dev, "%s: disable 0x%x interrupt failed\n", + __func__, interrupt_mask); + return ret; +} + +int microp_write_interrupt(struct i2c_client *client, + uint16_t interrupt, uint8_t enable) +{ + int ret; + + if (enable) { + ret = microp_interrupt_enable(client, interrupt); + printk("%s: microp_interrupt_enable called\n", __func__ ); + } else { + ret = microp_interrupt_disable(client, interrupt); + printk("%s: microp_interrupt_disable called\n", __func__ ); + } + + return ret; +} +EXPORT_SYMBOL(microp_write_interrupt); + +/* + * SD slot card-detect support + */ +static unsigned int sdslot_cd = 0; +static void (*sdslot_status_cb)(int card_present, void *dev_id); +static void *sdslot_mmc_dev; + +int bravo_microp_sdslot_status_register( + void (*cb)(int card_present, void *dev_id), + void *dev_id) +{ + if (sdslot_status_cb) + return -EBUSY; + sdslot_status_cb = cb; + sdslot_mmc_dev = dev_id; + return 0; +} + +unsigned int bravo_microp_sdslot_status(struct device *dev) +{ + return sdslot_cd; +} + +static void bravo_microp_sdslot_update_status(int status) +{ + sdslot_cd = !(status & READ_GPI_STATE_SDCARD); + if (sdslot_status_cb) + sdslot_status_cb(sdslot_cd, sdslot_mmc_dev); +} + +/* + *Headset Support +*/ +static void hpin_debounce_do_work(struct work_struct *work) +{ + uint16_t gpi_status = 0; + struct microp_i2c_client_data *cdata; + int insert = 0; + struct i2c_client *client; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + microp_read_gpi_status(client, &gpi_status); + insert = (gpi_status & READ_GPI_STATE_HPIN) ? 0 : 1; + if (insert != cdata->headset_is_in) { + cdata->headset_is_in = insert; + pr_debug("headset %s\n", insert ? "inserted" : "removed"); + htc_35mm_jack_plug_event(cdata->headset_is_in, + &cdata->is_hpin_pin_stable); + } +} + +static int microp_enable_headset_plug_event(void) +{ + int ret; + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint16_t stat; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + /* enable microp interrupt to detect changes */ + ret = microp_interrupt_enable(client, IRQ_HEADSETIN); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable irqs\n", + __func__); + return 0; + } + /* see if headset state has changed */ + microp_read_gpi_status(client, &stat); + stat = !(stat & READ_GPI_STATE_HPIN); + if(cdata->headset_is_in != stat) { + cdata->headset_is_in = stat; + pr_debug("Headset state changed\n"); + htc_35mm_jack_plug_event(stat, &cdata->is_hpin_pin_stable); + } + + return 1; +} + +static int microp_headset_detect_mic(void) +{ + uint16_t data; + + microp_read_adc(MICROP_REMOTE_KEY_ADC_CHAN, &data); + if (data >= 200) + return 1; + else + return 0; +} + +static int microp_headset_has_mic(void) +{ + int mic1 = -1; + int mic2 = -1; + int count = 0; + + mic2 = microp_headset_detect_mic(); + + /* debounce the detection wait until 2 consecutive read are equal */ + while ((mic1 != mic2) && (count < 10)) { + mic1 = mic2; + msleep(600); + mic2 = microp_headset_detect_mic(); + count++; + } + + pr_info("%s: microphone (%d) %s\n", __func__, count, + mic1 ? "present" : "not present"); + + return mic1; +} + +static int microp_enable_key_event(void) +{ + int ret; + struct i2c_client *client; + + client = private_microp_client; + + if (!is_cdma_version(system_rev)) + gpio_set_value(BRAVO_GPIO_35MM_KEY_INT_SHUTDOWN, 1); + + /* turn on key interrupt */ + /* enable microp interrupt to detect changes */ + ret = microp_interrupt_enable(client, IRQ_REMOTEKEY); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable irqs\n", + __func__); + return ret; + } + return 0; +} + +static int microp_disable_key_event(void) +{ + int ret; + struct i2c_client *client; + + client = private_microp_client; + + /* shutdown key interrupt */ + if (!is_cdma_version(system_rev)) + gpio_set_value(BRAVO_GPIO_35MM_KEY_INT_SHUTDOWN, 0); + + /* disable microp interrupt to detect changes */ + ret = microp_interrupt_disable(client, IRQ_REMOTEKEY); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to disable irqs\n", + __func__); + return ret; + } + return 0; +} + +static int get_remote_keycode(int *keycode) +{ + struct i2c_client *client = private_microp_client; + int ret; + uint8_t data[2]; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_REMOTE_KEYCODE, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read remote keycode fail\n", + __func__); + return -EIO; + } + pr_debug("%s: key = 0x%x\n", __func__, data[1]); + if (!data[1]) { + *keycode = 0; + return 1; /* no keycode */ + } else { + *keycode = data[1]; + } + return 0; +} + +static ssize_t microp_i2c_remotekey_adc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client; + uint16_t value; + int i, button = 0; + int ret; + + client = to_i2c_client(dev); + + microp_read_adc(MICROP_REMOTE_KEY_ADC_CHAN, &value); + + for (i = 0; i < 3; i++) { + if ((value >= remote_key_adc_table[2 * i]) && + (value <= remote_key_adc_table[2 * i + 1])) { + button = i + 1; + } + + } + + ret = sprintf(buf, "Remote Key[0x%03X] => button %d\n", + value, button); + + return ret; +} + +static DEVICE_ATTR(key_adc, 0644, microp_i2c_remotekey_adc_show, NULL); + +/* + * LED support +*/ +static int microp_i2c_write_led_mode(struct i2c_client *client, + struct led_classdev *led_cdev, + uint8_t mode, uint16_t off_timer) +{ + struct microp_i2c_client_data *cdata; + struct microp_led_data *ldata; + uint8_t data[7]; + int ret; + + cdata = i2c_get_clientdata(client); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + if (ldata->type == GREEN_LED) { + data[0] = 0x01; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } else if (ldata->type == AMBER_LED) { + data[0] = 0x02; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + data[4] = mode; + data[5] = off_timer >> 8; + data[6] = off_timer & 0xFF; + } else if (ldata->type == BLUE_LED) { + data[0] = 0x04; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } + + ret = i2c_write_block(client, MICROP_I2C_WCMD_LED_MODE, data, 7); + if (ret == 0) { + mutex_lock(&ldata->led_data_mutex); + if (mode > 1) + ldata->blink = mode; + else + ldata->mode = mode; + mutex_unlock(&ldata->led_data_mutex); + } + return ret; +} + +static ssize_t microp_i2c_led_blink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + int ret; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + mutex_lock(&ldata->led_data_mutex); + ret = sprintf(buf, "%d\n", ldata->blink ? ldata->blink - 1 : 0); + mutex_unlock(&ldata->led_data_mutex); + + return ret; +} + +static ssize_t microp_i2c_led_blink_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int val, ret; + uint8_t mode; + + val = -1; + sscanf(buf, "%u", &val); + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + mutex_lock(&ldata->led_data_mutex); + switch (val) { + case 0: /* stop flashing */ + mode = ldata->mode; + ldata->blink = 0; + break; + case 1: + case 2: + case 3: + mode = val + 1; + break; + + default: + mutex_unlock(&ldata->led_data_mutex); + return -EINVAL; + } + mutex_unlock(&ldata->led_data_mutex); + + ret = microp_i2c_write_led_mode(client, led_cdev, mode, 0xffff); + if (ret) + dev_err(&client->dev, "%s set blink failed\n", led_cdev->name); + + return count; +} + +static DEVICE_ATTR(blink, 0644, microp_i2c_led_blink_show, + microp_i2c_led_blink_store); + +static ssize_t microp_i2c_led_off_timer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct microp_i2c_client_data *cdata; + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + uint8_t data[2]; + int ret, offtime; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + cdata = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "Getting %s remaining time\n", led_cdev->name); + + if (ldata->type == GREEN_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME, data, 2); + } else if (ldata->type == AMBER_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_AMBER_LED_REMAIN_TIME, + data, 2); + } else if (ldata->type == BLUE_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME, data, 2); + } else { + dev_err(&client->dev, "Unknown led %s\n", ldata->ldev.name); + return -EINVAL; + } + + if (ret) { + dev_err(&client->dev, + "%s get off_timer failed\n", led_cdev->name); + } + offtime = (int)((data[1] | data[0] << 8) * 2); + + ret = sprintf(buf, "Time remains %d:%d\n", offtime / 60, offtime % 60); + return ret; +} + +static ssize_t microp_i2c_led_off_timer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int min, sec, ret; + uint16_t off_timer; + + min = -1; + sec = -1; + sscanf(buf, "%d %d", &min, &sec); + + if (min < 0 || min > 255) + return -EINVAL; + if (sec < 0 || sec > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + dev_dbg(&client->dev, "Setting %s off_timer to %d min %d sec\n", + led_cdev->name, min, sec); + + if (!min && !sec) + off_timer = 0xFFFF; + else + off_timer = (min * 60 + sec) / 2; + + ret = microp_i2c_write_led_mode(client, led_cdev, + ldata->mode, off_timer); + if (ret) { + dev_err(&client->dev, + "%s set off_timer %d min %d sec failed\n", + led_cdev->name, min, sec); + } + return count; +} + +static DEVICE_ATTR(off_timer, 0644, microp_i2c_led_off_timer_show, + microp_i2c_led_off_timer_store); + +static void microp_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + unsigned long flags; + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + struct microp_led_data *ldata = + container_of(led_cdev, struct microp_led_data, ldev); + + dev_dbg(&client->dev, "Setting %s brightness current %d new %d\n", + led_cdev->name, led_cdev->brightness, brightness); + + if (brightness > 255) + brightness = 255; + led_cdev->brightness = brightness; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + schedule_work(&ldata->brightness_work); +} + +static void microp_led_brightness_set_work(struct work_struct *work) +{ + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + struct led_classdev *led_cdev = &ldata->ldev; + + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + + enum led_brightness brightness; + int ret; + uint8_t mode; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + if (brightness) + mode = 1; + else + mode = 0; + + ret = microp_i2c_write_led_mode(client, led_cdev, mode, 0xffff); + if (ret) { + dev_err(&client->dev, + "led_brightness_set failed to set mode\n"); + } +} + +static void microp_led_brightness_gpo_set_work(struct work_struct *work) +{ + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + + enum led_brightness brightness; + int ret; + uint8_t addr, data[3] = {0x00,0x02,0x00}, enable; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + enable = brightness ? 1 : 0; + if (enable) + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_EN; + else + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_DIS; + + ret = microp_i2c_write (addr, data, 3); + if (ret < 0) + pr_err("%s failed on set gpo led mode:%d\n", __func__, brightness); +} + +struct device_attribute *green_amber_attrs[] = { + &dev_attr_blink, + &dev_attr_off_timer, +}; + +static void microp_led_buttons_brightness_set_work(struct work_struct *work) +{ + + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + struct led_classdev *led_cdev = &ldata->ldev; + + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + struct microp_i2c_client_data *cdata = i2c_get_clientdata(client); + + uint8_t data[4] = {0, 0, 0}; + int ret = 0; + enum led_brightness brightness; + uint8_t value; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + value = brightness >= 255 ? 0xFF : 0; + + /* avoid a flicker that can occur when writing the same value */ + if (cdata->button_led_value == value) + return; + cdata->button_led_value = value; + + /* in 40ms */ + data[0] = 0x05; + /* duty cycle 0-255 */ + data[1] = value; + /* bit2 == change brightness */ + data[3] = 0x04; + + ret = i2c_write_block(client, MICROP_I2C_WCMD_BUTTONS_LED_CTRL, + data, 4); + if (ret < 0) + dev_err(&client->dev, "%s failed on set buttons\n", __func__); +} + +static int microp_oj_interrupt_mode(struct i2c_client *client, uint8_t enable) +{ + int ret; + + if (enable) { + ret = microp_interrupt_enable(client, IRQ_OJ); + printk("%s: microp_interrupt_enable called\n", __func__ ); + } else { + ret = microp_interrupt_disable(client, IRQ_OJ); + printk("%s: microp_interrupt_disable called\n", __func__ ); + } + + return ret; +} + +static int microp_spi_enable(uint8_t on) +{ + struct i2c_client *client; + int ret; + + client = private_microp_client; + ret = i2c_write_block(client, MICROP_I2C_WCMD_SPI_EN, &on, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + return ret; + } + + msleep(10); + return ret; +} + +/* Lookup active SPI devices and only turn it off when no device + * is using it + * */ +int microp_spi_vote_enable(int spi_device, uint8_t enable) { + //XXX need to check that all that crap in the HTC kernel is needed + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata = i2c_get_clientdata(client); + uint8_t data[2] = {0, 0}; + int ret = 0; + + if (!client) { + printk(KERN_ERR "%s: dataset: client is empty\n", __func__); + return -EIO; + } + + if (spi_device == SPI_OJ) { + microp_oj_interrupt_mode(client, enable); + printk(KERN_ERR "%s: Changing OJ interrupt mode [%d]\n", __func__, enable); + } + + mutex_lock(&cdata->microp_adc_mutex); + /* Add/remove it from the poll */ + if (enable) + cdata->spi_devices_vote |= spi_device; + else + cdata->spi_devices_vote &= ~spi_device; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_SPI_BL_STATUS, data, 2); + if (ret != 0) { + printk(KERN_ERR "%s: read SPI/BL status fail\n", __func__); + mutex_unlock(&cdata->microp_adc_mutex); + return ret; + } + + if ((data[1] & 0x01) == + ((cdata->spi_devices & cdata->spi_devices_vote) ? 1 : 0)) { + printk(KERN_ERR "%s: already in voted state, [spi_device %d,enable %d], [spi_status %d, spi_devices_vote %d]\n", __func__, spi_device, enable, data[1]&0x01, cdata->spi_devices_vote); + mutex_unlock(&cdata->microp_adc_mutex); + return ret; + } + + if (cdata->spi_devices & cdata->spi_devices_vote) + enable = 1; + else + enable = 0; + + printk(KERN_ERR "%s: Changing SPI [%d]\n", __func__, enable); + + mutex_unlock(&cdata->microp_adc_mutex); + ret = microp_spi_enable(enable); + return ret; +} +EXPORT_SYMBOL(microp_spi_vote_enable); + +/* + * G-sensor + */ +static int gsensor_read_reg(uint8_t reg, uint8_t *data) +{ + struct i2c_client *client = private_microp_client; + int ret; + uint8_t tmp[2]; + + mutex_lock(&gsensor_RW_mutex); + + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ, + ®, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + msleep(10); + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_REG_DATA, tmp, 2); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_read_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + *data = tmp[1]; + + mutex_unlock(&gsensor_RW_mutex); + + return ret; +} + +static int gsensor_write_reg(uint8_t reg, uint8_t data) +{ + struct i2c_client *client = private_microp_client; + int ret; + uint8_t tmp[2]; + + mutex_lock(&gsensor_RW_mutex); + + tmp[0] = reg; + tmp[1] = data; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_REG, tmp, 2); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + mutex_unlock(&gsensor_RW_mutex); + + return ret; +} + +static int gsensor_read_acceleration(short *buf) +{ + struct i2c_client *client = private_microp_client; + int ret; + uint8_t tmp[6]; + struct microp_i2c_client_data *cdata; + + mutex_lock(&gsensor_RW_mutex); + + cdata = i2c_get_clientdata(client); + + tmp[0] = 1; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_DATA_REQ, + tmp, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + msleep(10); + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_DATA, + tmp, 6); + if (ret < 0) { + dev_err(&client->dev, "%s: i2c_read_block fail\n", + __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + buf[0] = (short)(tmp[0] << 8 | tmp[1]); + buf[0] >>= 6; + buf[1] = (short)(tmp[2] << 8 | tmp[3]); + buf[1] >>= 6; + buf[2] = (short)(tmp[4] << 8 | tmp[5]); + buf[2] >>= 6; + +#ifdef DEBUG_BMA150 + /* Log this to debugfs */ + gsensor_log_status(ktime_get(), buf[0], buf[1], buf[2]); +#endif + + mutex_unlock(&gsensor_RW_mutex); + + return 1; +} + +static int gsensor_init_hw(void) +{ + uint8_t reg; + int ret; + + pr_debug("%s\n", __func__); + + ret = gsensor_read_reg(RANGE_BWIDTH_REG, ®); + if (ret < 0 ) + return -EIO; + reg &= 0xe0; + ret = gsensor_write_reg(RANGE_BWIDTH_REG, reg); + if (ret < 0 ) + return -EIO; + + ret = gsensor_read_reg(SMB150_CONF2_REG, ®); + if (ret < 0 ) + return -EIO; + reg |= (1 << 3); + ret = gsensor_write_reg(SMB150_CONF2_REG, reg); + + return ret; +} + +static int bma150_set_mode(char mode) +{ + uint8_t reg; + int ret; + + mutex_lock(&gsensor_set_mode_mutex); + + pr_debug("%s mode = %d\n", __func__, mode); + if (mode == BMA_MODE_NORMAL) + microp_spi_vote_enable(SPI_GSENSOR, 1); + + ret = gsensor_read_reg(SMB150_CTRL_REG, ®); + if (ret < 0 ) { + mutex_unlock(&gsensor_set_mode_mutex); + return -EIO; + } + reg = (reg & 0xfe) | mode; + ret = gsensor_write_reg(SMB150_CTRL_REG, reg); + + if (mode == BMA_MODE_SLEEP) + microp_spi_vote_enable(SPI_GSENSOR, 0); + + mutex_unlock(&gsensor_set_mode_mutex); + + return ret; +} + +static int gsensor_read(uint8_t *data) +{ + int ret; + uint8_t reg = data[0]; + + ret = gsensor_read_reg(reg, &data[1]); + pr_debug("%s reg = %x data = %x\n", __func__, reg, data[1]); + return ret; +} + +static int gsensor_write(uint8_t *data) +{ + int ret; + uint8_t reg = data[0]; + + pr_debug("%s reg = %x data = %x\n", __func__, reg, data[1]); + ret = gsensor_write_reg(reg, data[1]); + return ret; +} + +static DEFINE_MUTEX(bma150_lock); + +static int bma150_open(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + return nonseekable_open(inode, file); +} + +static int bma150_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static long bma150_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + char rwbuf[8]; + int ret = -1; + short buf[8], temp; + + switch (cmd) { + case BMA_IOCTL_READ: + case BMA_IOCTL_WRITE: + case BMA_IOCTL_SET_MODE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_from_user(&buf, argp, sizeof(buf))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&bma150_lock); + switch (cmd) { + case BMA_IOCTL_INIT: + ret = gsensor_init_hw(); + if (ret < 0) + goto err; + break; + + case BMA_IOCTL_READ: + if (rwbuf[0] < 1) { + ret = -EINVAL; + goto err; + } + ret = gsensor_read(rwbuf); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_WRITE: + if (rwbuf[0] < 2) { + ret = -EINVAL; + goto err; + } + ret = gsensor_write(rwbuf); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_READ_ACCELERATION: + ret = gsensor_read_acceleration(&buf[0]); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_SET_MODE: + bma150_set_mode(rwbuf[0]); + break; + case BMA_IOCTL_GET_INT: + temp = 0; + break; + default: + ret = -ENOTTY; + goto err; + } + mutex_unlock(&bma150_lock); + + switch (cmd) { + case BMA_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_to_user(argp, &buf, sizeof(buf))) + return -EFAULT; + break; + case BMA_IOCTL_GET_INT: + if (copy_to_user(argp, &temp, sizeof(temp))) + return -EFAULT; + break; + default: + break; + } + + return 0; +err: + mutex_unlock(&bma150_lock); + return ret; +} + +static struct file_operations bma_fops = { + .owner = THIS_MODULE, + .open = bma150_open, + .release = bma150_release, + .unlocked_ioctl = bma150_ioctl, +}; + +static struct miscdevice spi_bma_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = BMA150_G_SENSOR_NAME, + .fops = &bma_fops, +}; + +/* + * Interrupt + */ +static irqreturn_t microp_i2c_intr_irq_handler(int irq, void *dev_id) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + + client = to_i2c_client(dev_id); + cdata = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "intr_irq_handler\n"); + + disable_irq_nosync(client->irq); + schedule_work(&cdata->work.work); + return IRQ_HANDLED; +} + +static void microp_int_dispatch(u32 status) +{ + unsigned int mask; + int irq; + + while (status) { + mask = status & -status; + irq = fls(mask) - 1; + status &= ~mask; + generic_handle_irq(FIRST_MICROP_IRQ + irq); + } +} + +static enum hrtimer_restart hr_dispath_irq_func(struct hrtimer *data) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + + cdata = i2c_get_clientdata(client); + microp_int_dispatch(cdata->intr_status); + cdata->intr_status = 0; + return HRTIMER_NORESTART; +} + +static void microp_i2c_intr_work_func(struct work_struct *work) +{ + struct microp_i2c_work *up_work; + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint8_t data[3]; + uint16_t intr_status = 0, gpi_status = 0; + int keycode = 0, ret = 0; + ktime_t zero_debounce; + + up_work = container_of(work, struct microp_i2c_work, work); + client = up_work->client; + cdata = i2c_get_clientdata(client); + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GPI_INT_STATUS, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read interrupt status fail\n", + __func__); + } + + intr_status = data[0]<<8 | data[1]; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_STATUS_CLR, + data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: clear interrupt status fail\n", + __func__); + } + pr_debug("intr_status=0x%02x\n", intr_status); + + if (intr_status & IRQ_SDCARD) { + microp_read_gpi_status(client, &gpi_status); + bravo_microp_sdslot_update_status(gpi_status); + } + + if (intr_status & IRQ_HEADSETIN) { + cdata->is_hpin_pin_stable = 0; + wake_lock_timeout(µp_i2c_wakelock, 3*HZ); + if (!cdata->headset_is_in) + schedule_delayed_work(&cdata->hpin_debounce_work, + msecs_to_jiffies(500)); + else + schedule_delayed_work(&cdata->hpin_debounce_work, + msecs_to_jiffies(300)); + } + + if (intr_status & IRQ_REMOTEKEY) { + if ((get_remote_keycode(&keycode) == 0) && + (cdata->is_hpin_pin_stable)) { + htc_35mm_key_event(keycode, &cdata->is_hpin_pin_stable); + } + } + + cdata->intr_status = intr_status; + zero_debounce = ktime_set(0, 0); /* No debounce time */ + hrtimer_start(&cdata->gen_irq_timer, zero_debounce, HRTIMER_MODE_REL); + + enable_irq(client->irq); +} + +static int microp_function_initialize(struct i2c_client *client) +{ + struct microp_i2c_client_data *cdata; + uint8_t data[20]; + uint16_t stat, interrupts = 0; + int i; + int ret; + struct led_classdev *led_cdev; + + cdata = i2c_get_clientdata(client); + + /* Headset */ + for (i = 0; i < 6; i++) { + data[i] = (uint8_t)(remote_key_adc_table[i] >> 8); + data[i + 6] = (uint8_t)(remote_key_adc_table[i]); + } + ret = i2c_write_block(client, + MICROP_I2C_WCMD_REMOTEKEY_TABLE, data, 12); + if (ret) + goto exit; + + INIT_DELAYED_WORK( + &cdata->hpin_debounce_work, hpin_debounce_do_work); + + /* SD Card */ + interrupts |= IRQ_SDCARD; + + /* set LED initial state */ + for (i = 0; i < BLUE_LED; i++) { + led_cdev = &cdata->leds[i].ldev; + microp_i2c_write_led_mode(client, led_cdev, 0, 0xffff); + } + +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + /* OJ interrupt */ + interrupts |= IRQ_OJ; +#endif + + /* enable the interrupts */ + ret = microp_interrupt_enable(client, interrupts); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable gpi irqs\n", + __func__); + goto err_irq_en; + } + + microp_read_gpi_status(client, &stat); + bravo_microp_sdslot_update_status(stat); + + return 0; + +err_irq_en: + gpio_free(BRAVO_GPIO_LS_EN_N); +exit: + return ret; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +void microp_early_suspend(struct early_suspend *h) +{ + struct microp_i2c_client_data *cdata; + struct i2c_client *client = private_microp_client; + int ret; + + if (!client) { + pr_err("%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + + cdata->microp_is_suspend = 1; + + disable_irq(client->irq); + ret = cancel_work_sync(&cdata->work.work); + if (ret != 0) { + enable_irq(client->irq); + } +} + +void microp_early_resume(struct early_suspend *h) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + + if (!client) { + pr_err("%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + + cdata->microp_is_suspend = 0; + enable_irq(client->irq); +} +#endif + +static int microp_i2c_suspend(struct i2c_client *client, + pm_message_t mesg) +{ + return 0; +} + +static int microp_i2c_resume(struct i2c_client *client) +{ + return 0; +} + +static void register_microp_devices(struct platform_device *devices, int num) +{ + int i; + for (i = 0; i < num; i++) + platform_device_register((devices + i)); +} + +static struct { + const char *name; + void (*led_set_work)(struct work_struct *); + struct device_attribute **attrs; + int attr_cnt; +} microp_leds[] = { + [GREEN_LED] = { + .name = "green", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [AMBER_LED] = { + .name = "amber", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [BLUE_LED] = { + .name = "blue", + .led_set_work = microp_led_brightness_gpo_set_work, + .attrs = NULL, + .attr_cnt = 0 + }, + [BUTTONS_LED] = { + .name = "button-backlight", + .led_set_work = microp_led_buttons_brightness_set_work + }, +}; + +static int microp_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct microp_i2c_client_data *cdata; + struct microp_i2c_platform_data *pdata; + uint8_t data[6]; + int ret; + int i; + int j; + + private_microp_client = client; + cdata = kzalloc(sizeof(struct microp_i2c_client_data), GFP_KERNEL); + if (!cdata) { + ret = -ENOMEM; + dev_err(&client->dev, "failed on allocat cdata\n"); + goto err_cdata; + } + + i2c_set_clientdata(client, cdata); + + mutex_init(&cdata->microp_adc_mutex); + mutex_init(&cdata->microp_i2c_rw_mutex); + + pdata = client->dev.platform_data; + if (!pdata) { + ret = -EBUSY; + dev_err(&client->dev, "failed on get pdata\n"); + goto err_exit; + } + pdata->dev_id = (void *)&client->dev; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_VERSION, data, 2); + if (ret || !(data[0] && data[1])) { + ret = -ENODEV; + dev_err(&client->dev, "failed on get microp version\n"); + goto err_exit; + } + dev_info(&client->dev, "microp version [%02X][%02X]\n", + data[0], data[1]); + + ret = gpio_request(BRAVO_GPIO_UP_RESET_N, "microp_i2c_wm"); + if (ret < 0) { + dev_err(&client->dev, "failed on request gpio reset\n"); + goto err_exit; + } + ret = gpio_direction_output(BRAVO_GPIO_UP_RESET_N, 1); + if (ret < 0) { + dev_err(&client->dev, + "failed on gpio_direction_output reset\n"); + goto err_gpio_reset; + } + + cdata->version = data[0] << 8 | data[1]; + cdata->microp_is_suspend = 0; + cdata->spi_devices_vote = 0; + cdata->spi_devices = SPI_OJ | SPI_GSENSOR; + + cdata->intr_status = 0; + hrtimer_init(&cdata->gen_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cdata->gen_irq_timer.function = hr_dispath_irq_func; + + wake_lock_init(µp_i2c_wakelock, WAKE_LOCK_SUSPEND, + "microp_i2c_present"); + + /* LEDs */ + ret = 0; + for (i = 0; i < ARRAY_SIZE(microp_leds) && !ret; ++i) { + struct microp_led_data *ldata = &cdata->leds[i]; + + ldata->type = i; + ldata->ldev.name = microp_leds[i].name; + ldata->ldev.brightness_set = microp_brightness_set; + mutex_init(&ldata->led_data_mutex); + INIT_WORK(&ldata->brightness_work, microp_leds[i].led_set_work); + spin_lock_init(&ldata->brightness_lock); + ret = led_classdev_register(&client->dev, &ldata->ldev); + if (ret) { + ldata->ldev.name = NULL; + break; + } + + for (j = 0; j < microp_leds[i].attr_cnt && !ret; ++j) + ret = device_create_file(ldata->ldev.dev, + microp_leds[i].attrs[j]); + } + if (ret) { + dev_err(&client->dev, "failed to add leds\n"); + goto err_add_leds; + } + + /* Headset */ + cdata->headset_is_in = 0; + cdata->is_hpin_pin_stable = 1; + platform_device_register(&bravo_h35mm); + + ret = device_create_file(&client->dev, &dev_attr_key_adc); + + /* G-sensor */ + ret = misc_register(&spi_bma_device); + if (ret < 0) { + pr_err("%s: init bma150 misc_register fail\n", + __func__); + goto err_register_bma150; + } + + mutex_init(&gsensor_RW_mutex); + mutex_init(&gsensor_set_mode_mutex); + + microp_spi_vote_enable(SPI_GSENSOR, 1); + +#ifdef DEBUG_BMA150 + debugfs_create_file("gsensor_log", 0444, NULL, NULL, &gsensor_log_fops); +#endif + /* Setup IRQ handler */ + INIT_WORK(&cdata->work.work, microp_i2c_intr_work_func); + cdata->work.client = client; + + ret = request_irq(client->irq, + microp_i2c_intr_irq_handler, + IRQF_TRIGGER_LOW, + "microp_interrupt", + &client->dev); + if (ret) { + dev_err(&client->dev, "request_irq failed\n"); + goto err_intr; + } + ret = set_irq_wake(client->irq, 1); + if (ret) { + dev_err(&client->dev, "set_irq_wake failed\n"); + goto err_intr; + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (cdata->enable_early_suspend) { + cdata->early_suspend.level = + EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + cdata->early_suspend.suspend = microp_early_suspend; + cdata->early_suspend.resume = microp_early_resume; + register_early_suspend(&cdata->early_suspend); + } +#endif + + ret = microp_function_initialize(client); + if (ret) { + dev_err(&client->dev, "failed on microp function initialize\n"); + goto err_fun_init; + } + + register_microp_devices(pdata->microp_devices, pdata->num_devices); + + return 0; + +err_fun_init: +err_intr: + misc_deregister(&spi_bma_device); + +err_register_bma150: + platform_device_unregister(&bravo_h35mm); + device_remove_file(&client->dev, &dev_attr_key_adc); + +err_add_leds: + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + if (!cdata->leds[i].ldev.name) + continue; + led_classdev_unregister(&cdata->leds[i].ldev); + for (j = 0; j < microp_leds[i].attr_cnt; ++j) + device_remove_file(cdata->leds[i].ldev.dev, + microp_leds[i].attrs[j]); + } + + wake_lock_destroy(µp_i2c_wakelock); + kfree(cdata); + i2c_set_clientdata(client, NULL); + +err_cdata: +err_gpio_reset: + gpio_free(BRAVO_GPIO_UP_RESET_N); +err_exit: + return ret; +} + +static int __devexit microp_i2c_remove(struct i2c_client *client) +{ + struct microp_i2c_client_data *cdata; + int i; + int j; + + cdata = i2c_get_clientdata(client); + + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + struct microp_led_data *ldata = &cdata->leds[i]; + cancel_work_sync(&ldata->brightness_work); + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (cdata->enable_early_suspend) { + unregister_early_suspend(&cdata->early_suspend); + } +#endif + + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + if (!cdata->leds[i].ldev.name) + continue; + led_classdev_unregister(&cdata->leds[i].ldev); + for (j = 0; j < microp_leds[i].attr_cnt; ++j) + device_remove_file(cdata->leds[i].ldev.dev, + microp_leds[i].attrs[j]); + } + + free_irq(client->irq, &client->dev); + + gpio_free(BRAVO_GPIO_UP_RESET_N); + + platform_device_unregister(&bravo_h35mm); + + /* G-sensor */ + misc_deregister(&spi_bma_device); + + kfree(cdata); + + return 0; +} + +static const struct i2c_device_id microp_i2c_id[] = { + { MICROP_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver microp_i2c_driver = { + .driver = { + .name = MICROP_I2C_NAME, + }, + .id_table = microp_i2c_id, + .probe = microp_i2c_probe, + .suspend = microp_i2c_suspend, + .resume = microp_i2c_resume, + .remove = __devexit_p(microp_i2c_remove), +}; + +static void microp_irq_ack(unsigned int irq) +{ + ; +} + +static void microp_irq_mask(unsigned int irq) +{ + ; +} + +static void microp_irq_unmask(unsigned int irq) +{ + ; +} + +static struct irq_chip microp_irq_chip = { + .name = "microp", + .disable = microp_irq_mask, + .ack = microp_irq_ack, + .mask = microp_irq_mask, + .unmask = microp_irq_unmask, +}; + +static int __init microp_i2c_init(void) +{ + int n, MICROP_IRQ_END = FIRST_MICROP_IRQ + NR_MICROP_IRQS; + for (n = FIRST_MICROP_IRQ; n < MICROP_IRQ_END; n++) { + set_irq_chip(n, µp_irq_chip); + set_irq_handler(n, handle_level_irq); + set_irq_flags(n, IRQF_VALID); + } + + return i2c_add_driver(µp_i2c_driver); +} + +static void __exit microp_i2c_exit(void) +{ + i2c_del_driver(µp_i2c_driver); +} + +module_init(microp_i2c_init); +module_exit(microp_i2c_exit); + +MODULE_AUTHOR("Eric Olsen "); +MODULE_DESCRIPTION("MicroP I2C driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-bravo-mmc.c b/arch/arm/mach-msm/board-bravo-mmc.c new file mode 100644 index 0000000000000..c47acf718e581 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-mmc.c @@ -0,0 +1,446 @@ +/* linux/arch/arm/mach-msm/board-bravo-mmc.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "board-bravo.h" +#include "devices.h" +#include "proc_comm.h" + +#undef BRAVO_DEBUG_MMC + +static bool opt_disable_sdcard; +static int __init bravo_disablesdcard_setup(char *str) +{ + opt_disable_sdcard = (bool)simple_strtol(str, NULL, 0); + return 1; +} + +__setup("board_bravo.disable_sdcard=", bravo_disablesdcard_setup); + +static uint32_t sdcard_on_gpio_table[] = { + PCOM_GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ +}; + +static uint32_t sdcard_off_gpio_table[] = { + PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static struct vreg *sdslot_vreg; +static uint32_t sdslot_vdd = 0xffffffff; +static uint32_t sdslot_vreg_enabled; + +static struct { + int mask; + int level; +} mmc_vdd_table[] = { + { MMC_VDD_165_195, 1800 }, + { MMC_VDD_20_21, 2050 }, + { MMC_VDD_21_22, 2150 }, + { MMC_VDD_22_23, 2250 }, + { MMC_VDD_23_24, 2350 }, + { MMC_VDD_24_25, 2450 }, + { MMC_VDD_25_26, 2550 }, + { MMC_VDD_26_27, 2650 }, + { MMC_VDD_27_28, 2750 }, + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2950 }, +}; + +static uint32_t bravo_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + int i; + int ret; + + if (vdd == sdslot_vdd) + return 0; + + sdslot_vdd = vdd; + + if (vdd == 0) { + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + vreg_disable(sdslot_vreg); + sdslot_vreg_enabled = 0; + return 0; + } + + if (!sdslot_vreg_enabled) { + ret = vreg_enable(sdslot_vreg); + if (ret) + pr_err("%s: Error enabling vreg (%d)\n", __func__, ret); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + sdslot_vreg_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask != (1 << vdd)) + continue; + ret = vreg_set_level(sdslot_vreg, mmc_vdd_table[i].level); + if (ret) + pr_err("%s: Error setting level (%d)\n", __func__, ret); + return 0; + } + + pr_err("%s: Invalid VDD (%d) specified\n", __func__, vdd); + return 0; +} + +static uint32_t bravo_cdma_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + if (!vdd == !sdslot_vdd) + return 0; + + /* In CDMA version, the vdd of sdslot is not configurable, and it is + * fixed in 2.85V by hardware design. + */ + + sdslot_vdd = vdd ? MMC_VDD_28_29 : 0; + + if (vdd) { + gpio_set_value(BRAVO_CDMA_SD_2V85_EN, 1); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + } else { + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + gpio_set_value(BRAVO_CDMA_SD_2V85_EN, 0); + } + + sdslot_vreg_enabled = !!vdd; + + return 0; +} + +static unsigned int bravo_sdslot_status(struct device *dev) +{ + return !gpio_get_value(BRAVO_GPIO_SDMC_CD_N); +} + +#define BRAVO_MMC_VDD (MMC_VDD_165_195 | MMC_VDD_20_21 | \ + MMC_VDD_21_22 | MMC_VDD_22_23 | \ + MMC_VDD_23_24 | MMC_VDD_24_25 | \ + MMC_VDD_25_26 | MMC_VDD_26_27 | \ + MMC_VDD_27_28 | MMC_VDD_28_29 | \ + MMC_VDD_29_30) + +int bravo_microp_sdslot_status_register(void (*cb)(int, void *), void *); +unsigned int bravo_microp_sdslot_status(struct device *); + +static struct msm_mmc_platform_data bravo_sdslot_data = { + .ocr_mask = BRAVO_MMC_VDD, + .status = bravo_microp_sdslot_status, + .register_status_notify = bravo_microp_sdslot_status_register, + .translate_vdd = bravo_sdslot_switchvdd, +}; + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +/* BCM4329 returns wrong sdio_vsn(1) when we read cccr, + * we use predefined value (sdio_vsn=2) here to initial sdio driver well + */ +static struct embedded_sdio_data bravo_wifi_emb_data = { + .cccr = { + .sdio_vsn = 2, + .multi_block = 1, + .low_speed = 0, + .wide_bus = 0, + .high_power = 1, + .high_speed = 1, + }, + .cis = { + .vendor = 0x02d0, + .device = 0x4329, + }, +}; + +static int bravo_wifi_cd = 0; /* WIFI virtual 'card detect' status */ +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int bravo_wifi_status_register( + void (*callback)(int card_present, void *dev_id), + void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static unsigned int bravo_wifi_status(struct device *dev) +{ + return bravo_wifi_cd; +} + +static struct msm_mmc_platform_data bravo_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .built_in = 1, + .status = bravo_wifi_status, + .register_status_notify = bravo_wifi_status_register, + .embedded_sdio = &bravo_wifi_emb_data, +}; + +int bravo_wifi_set_carddetect(int val) +{ + pr_info("%s: %d\n", __func__, val); + bravo_wifi_cd = val; + if (wifi_status_cb) { + wifi_status_cb(val, wifi_status_cb_devid); + } else + pr_warning("%s: Nobody to notify\n", __func__); + return 0; +} + +static int bravo_wifi_power_state; + +int bravo_wifi_power(int on) +{ + printk("%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + mdelay(50); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + } + + mdelay(100); + gpio_set_value(BRAVO_GPIO_WIFI_SHUTDOWN_N, on); /* WIFI_SHUTDOWN */ + mdelay(200); + + bravo_wifi_power_state = on; + return 0; +} + +static int bravo_wifi_reset_state; + +int bravo_wifi_reset(int on) +{ + printk("%s: do nothing\n", __func__); + bravo_wifi_reset_state = on; + return 0; +} + +int __init bravo_init_mmc(unsigned int sys_rev, unsigned debug_uart) +{ + uint32_t id; + + printk("%s()+\n", __func__); + + /* initial WIFI_SHUTDOWN# */ + id = PCOM_GPIO_CFG(BRAVO_GPIO_WIFI_SHUTDOWN_N, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + + msm_add_sdcc(1, &bravo_wifi_data, 0, 0); + + if (debug_uart) { + pr_info("%s: sdcard disabled due to debug uart\n", __func__); + goto done; + } + if (opt_disable_sdcard) { + pr_info("%s: sdcard disabled on cmdline\n", __func__); + goto done; + } + + sdslot_vreg_enabled = 0; + + if (is_cdma_version(sys_rev)) { + /* In the CDMA version, sdslot is supplied by a gpio. */ + int rc = gpio_request(BRAVO_CDMA_SD_2V85_EN, "sdslot_en"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + BRAVO_CDMA_SD_2V85_EN, rc); + return rc; + } + bravo_sdslot_data.translate_vdd = bravo_cdma_sdslot_switchvdd; + } else { + /* in UMTS version, sdslot is supplied by pmic */ + sdslot_vreg = vreg_get(0, "gp6"); + if (IS_ERR(sdslot_vreg)) + return PTR_ERR(sdslot_vreg); + } + +// if (system_rev > 0) + bravo_sdslot_data.status = bravo_sdslot_status; + msm_add_sdcc(2, &bravo_sdslot_data, 0, 0); +// else { +// bravo_sdslot_data.status = bravo_sdslot_status; +// bravo_sdslot_data.register_status_notify = NULL; +// set_irq_wake(MSM_GPIO_TO_INT(BRAVO_GPIO_SDMC_CD_N), 1); +// msm_add_sdcc(2, &bravo_sdslot_data, +// MSM_GPIO_TO_INT(BRAVO_GPIO_SDMC_CD_N), +// IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE); +// } + +done: + printk("%s()-\n", __func__); + return 0; +} + +#if defined(BRAVO_DEBUG_MMC) && defined(CONFIG_DEBUG_FS) + +static int bravommc_dbg_wifi_reset_set(void *data, u64 val) +{ + bravo_wifi_reset((int) val); + return 0; +} + +static int bravommc_dbg_wifi_reset_get(void *data, u64 *val) +{ + *val = bravo_wifi_reset_state; + return 0; +} + +static int bravommc_dbg_wifi_cd_set(void *data, u64 val) +{ + bravo_wifi_set_carddetect((int) val); + return 0; +} + +static int bravommc_dbg_wifi_cd_get(void *data, u64 *val) +{ + *val = bravo_wifi_cd; + return 0; +} + +static int bravommc_dbg_wifi_pwr_set(void *data, u64 val) +{ + bravo_wifi_power((int) val); + return 0; +} + +static int bravommc_dbg_wifi_pwr_get(void *data, u64 *val) +{ + *val = bravo_wifi_power_state; + return 0; +} + +static int bravommc_dbg_sd_pwr_set(void *data, u64 val) +{ + bravo_sdslot_switchvdd(NULL, (unsigned int) val); + return 0; +} + +static int bravommc_dbg_sd_pwr_get(void *data, u64 *val) +{ + *val = sdslot_vdd; + return 0; +} + +static int bravommc_dbg_sd_cd_set(void *data, u64 val) +{ + return -ENOSYS; +} + +static int bravommc_dbg_sd_cd_get(void *data, u64 *val) +{ + *val = bravo_sdslot_data.status(NULL); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(bravommc_dbg_wifi_reset_fops, + bravommc_dbg_wifi_reset_get, + bravommc_dbg_wifi_reset_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(bravommc_dbg_wifi_cd_fops, + bravommc_dbg_wifi_cd_get, + bravommc_dbg_wifi_cd_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(bravommc_dbg_wifi_pwr_fops, + bravommc_dbg_wifi_pwr_get, + bravommc_dbg_wifi_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(bravommc_dbg_sd_pwr_fops, + bravommc_dbg_sd_pwr_get, + bravommc_dbg_sd_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(bravommc_dbg_sd_cd_fops, + bravommc_dbg_sd_cd_get, + bravommc_dbg_sd_cd_set, "%llu\n"); + +static int __init bravommc_dbg_init(void) +{ + struct dentry *dent; + + if (!machine_is_bravo() && !machine_is_bravoc()) + return 0; + + dent = debugfs_create_dir("bravo_mmc_dbg", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("wifi_reset", 0644, dent, NULL, + &bravommc_dbg_wifi_reset_fops); + debugfs_create_file("wifi_cd", 0644, dent, NULL, + &bravommc_dbg_wifi_cd_fops); + debugfs_create_file("wifi_pwr", 0644, dent, NULL, + &bravommc_dbg_wifi_pwr_fops); + debugfs_create_file("sd_pwr", 0644, dent, NULL, + &bravommc_dbg_sd_pwr_fops); + debugfs_create_file("sd_cd", 0644, dent, NULL, + &bravommc_dbg_sd_cd_fops); + return 0; +} + +device_initcall(bravommc_dbg_init); +#endif diff --git a/arch/arm/mach-msm/board-bravo-panel.c b/arch/arm/mach-msm/board-bravo-panel.c new file mode 100644 index 0000000000000..76b39f0ea5663 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-panel.c @@ -0,0 +1,1271 @@ +/* arch/arm/mach-msm/board-bravo-panel.c + * + * Copyright (c) 2009 Google Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include "proc_comm.h" + +#include "board-bravo.h" +#include "devices.h" + +#define SPI_CONFIG (0x00000000) +#define SPI_IO_CONTROL (0x00000004) +#define SPI_OPERATIONAL (0x00000030) +#define SPI_ERROR_FLAGS_EN (0x00000038) +#define SPI_ERROR_FLAGS (0x00000038) +#define SPI_OUTPUT_FIFO (0x00000100) + +static void __iomem *spi_base; +static struct clk *spi_clk ; +static struct vreg *vreg_lcm_rftx_2v6; +static struct vreg *vreg_lcm_aux_2v6; + +#define SAMSUNG_PANEL 0 +/*Bitwise mask for SONY PANEL ONLY*/ +#define SONY_PANEL 0x1 /*Set bit 0 as 1 when it is SONY PANEL*/ +#define SONY_PWM_SPI 0x2 /*Set bit 1 as 1 as PWM_SPI mode, otherwise it is PWM_MICROP mode*/ +#define SONY_GAMMA 0x4 /*Set bit 2 as 1 when panel contains GAMMA table in its NVM*/ +#define SONY_RGB666 0x8 /*Set bit 3 as 1 when panel is 18 bit, otherwise it is 16 bit*/ + +extern int panel_type; + +static int is_sony_spi(void) +{ + return (panel_type & SONY_PWM_SPI ? 1 : 0); +} + +static int is_sony_with_gamma(void) +{ + return (panel_type & SONY_GAMMA ? 1 : 0); +} + +static int is_sony_RGB666(void) +{ + return (panel_type & SONY_RGB666 ? 1 : 0); +} + +static int qspi_send(uint32_t id, uint8_t data) +{ + uint32_t err; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + if ((err = readl(spi_base + SPI_ERROR_FLAGS))) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel((0x7000 | (id << 9) | data) << 16, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int qspi_send_9bit(uint32_t id, uint8_t data) +{ + uint32_t err; + + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + err = readl(spi_base + SPI_ERROR_FLAGS); + if (err) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel(((id << 8) | data) << 23, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int lcm_writeb(uint8_t reg, uint8_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val); + return 0; +} + +static int lcm_writew(uint8_t reg, uint16_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val >> 8); + qspi_send(0x1, val & 0xff); + return 0; +} + +static struct resource resources_msm_fb[] = { + { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct lcm_tbl { + uint8_t reg; + uint8_t val; +}; + +static struct lcm_tbl samsung_oled_rgb565_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x02 }, + { 0x39, 0x24 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, +}; + +static struct lcm_tbl samsung_oled_rgb666_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x01 }, + { 0x39, 0x24 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, +}; + +static struct lcm_tbl *init_tablep = samsung_oled_rgb565_init_table; +static size_t init_table_sz = ARRAY_SIZE(samsung_oled_rgb565_init_table); + +#define OLED_GAMMA_TABLE_SIZE (7 * 3) +static struct lcm_tbl samsung_oled_gamma_table[][OLED_GAMMA_TABLE_SIZE] = { + /* level 10 */ + { + /* Gamma-R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x3f }, + { 0x43, 0x35 }, + { 0x44, 0x30 }, + { 0x45, 0x2c }, + { 0x46, 0x13 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x0 }, + { 0x54, 0x27 }, + { 0x55, 0x2b }, + { 0x56, 0x12 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x3f }, + { 0x63, 0x34 }, + { 0x64, 0x2f }, + { 0x65, 0x2b }, + { 0x66, 0x1b }, + }, + + /* level 40 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x3e }, + { 0x43, 0x2e }, + { 0x44, 0x2d }, + { 0x45, 0x28 }, + { 0x46, 0x21 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x21 }, + { 0x54, 0x2a }, + { 0x55, 0x28 }, + { 0x56, 0x20 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x3e }, + { 0x63, 0x2d }, + { 0x64, 0x2b }, + { 0x65, 0x26 }, + { 0x66, 0x2d }, + }, + + /* level 70 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x35 }, + { 0x43, 0x2c }, + { 0x44, 0x2b }, + { 0x45, 0x26 }, + { 0x46, 0x29 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x25 }, + { 0x54, 0x29 }, + { 0x55, 0x26 }, + { 0x56, 0x28 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x34 }, + { 0x63, 0x2b }, + { 0x64, 0x2a }, + { 0x65, 0x23 }, + { 0x66, 0x37 }, + }, + + /* level 100 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x30 }, + { 0x43, 0x2a }, + { 0x44, 0x2b }, + { 0x45, 0x24 }, + { 0x46, 0x2f }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x25 }, + { 0x54, 0x29 }, + { 0x55, 0x24 }, + { 0x56, 0x2e }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2f }, + { 0x63, 0x29 }, + { 0x64, 0x29 }, + { 0x65, 0x21 }, + { 0x66, 0x3f }, + }, + + /* level 130 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2e }, + { 0x43, 0x29 }, + { 0x44, 0x2a }, + { 0x45, 0x23 }, + { 0x46, 0x34 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0xa }, + { 0x53, 0x25 }, + { 0x54, 0x28 }, + { 0x55, 0x23 }, + { 0x56, 0x33 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2d }, + { 0x63, 0x28 }, + { 0x64, 0x27 }, + { 0x65, 0x20 }, + { 0x66, 0x46 }, + }, + + /* level 160 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2b }, + { 0x43, 0x29 }, + { 0x44, 0x28 }, + { 0x45, 0x23 }, + { 0x46, 0x38 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0xb }, + { 0x53, 0x25 }, + { 0x54, 0x27 }, + { 0x55, 0x23 }, + { 0x56, 0x37 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x29 }, + { 0x63, 0x28 }, + { 0x64, 0x25 }, + { 0x65, 0x20 }, + { 0x66, 0x4b }, + }, + + /* level 190 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x29 }, + { 0x43, 0x29 }, + { 0x44, 0x27 }, + { 0x45, 0x22 }, + { 0x46, 0x3c }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x10 }, + { 0x53, 0x26 }, + { 0x54, 0x26 }, + { 0x55, 0x22 }, + { 0x56, 0x3b }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x28 }, + { 0x63, 0x28 }, + { 0x64, 0x24 }, + { 0x65, 0x1f }, + { 0x66, 0x50 }, + }, + + /* level 220 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x28 }, + { 0x43, 0x28 }, + { 0x44, 0x28 }, + { 0x45, 0x20 }, + { 0x46, 0x40 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x11 }, + { 0x53, 0x25 }, + { 0x54, 0x27 }, + { 0x55, 0x20 }, + { 0x56, 0x3f }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x27 }, + { 0x63, 0x26 }, + { 0x64, 0x26 }, + { 0x65, 0x1c }, + { 0x66, 0x56 }, + }, + + /* level 250 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2a }, + { 0x43, 0x27 }, + { 0x44, 0x27 }, + { 0x45, 0x1f }, + { 0x46, 0x44 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x17 }, + { 0x53, 0x24 }, + { 0x54, 0x26 }, + { 0x55, 0x1f }, + { 0x56, 0x43 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2a }, + { 0x63, 0x25 }, + { 0x64, 0x24 }, + { 0x65, 0x1b }, + { 0x66, 0x5c }, + }, +}; +#define SAMSUNG_OLED_NUM_LEVELS ARRAY_SIZE(samsung_oled_gamma_table) + +#define SAMSUNG_OLED_MIN_VAL 10 +#define SAMSUNG_OLED_MAX_VAL 250 +#define SAMSUNG_OLED_DEFAULT_VAL (SAMSUNG_OLED_MIN_VAL + \ + (SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / 2) + +#define SAMSUNG_OLED_LEVEL_STEP ((SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / \ + (SAMSUNG_OLED_NUM_LEVELS - 1)) + +#define LCM_GPIO_CFG(gpio, func) \ +PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA) +static uint32_t samsung_oled_on_gpio_table[] = { + LCM_GPIO_CFG(BRAVO_LCD_R1, 1), + LCM_GPIO_CFG(BRAVO_LCD_R2, 1), + LCM_GPIO_CFG(BRAVO_LCD_R3, 1), + LCM_GPIO_CFG(BRAVO_LCD_R4, 1), + LCM_GPIO_CFG(BRAVO_LCD_R5, 1), + LCM_GPIO_CFG(BRAVO_LCD_G0, 1), + LCM_GPIO_CFG(BRAVO_LCD_G1, 1), + LCM_GPIO_CFG(BRAVO_LCD_G2, 1), + LCM_GPIO_CFG(BRAVO_LCD_G3, 1), + LCM_GPIO_CFG(BRAVO_LCD_G4, 1), + LCM_GPIO_CFG(BRAVO_LCD_G5, 1), + LCM_GPIO_CFG(BRAVO_LCD_B1, 1), + LCM_GPIO_CFG(BRAVO_LCD_B2, 1), + LCM_GPIO_CFG(BRAVO_LCD_B3, 1), + LCM_GPIO_CFG(BRAVO_LCD_B4, 1), + LCM_GPIO_CFG(BRAVO_LCD_B5, 1), + LCM_GPIO_CFG(BRAVO_LCD_PCLK, 1), + LCM_GPIO_CFG(BRAVO_LCD_VSYNC, 1), + LCM_GPIO_CFG(BRAVO_LCD_HSYNC, 1), + LCM_GPIO_CFG(BRAVO_LCD_DE, 1), +}; + +static uint32_t samsung_oled_off_gpio_table[] = { + LCM_GPIO_CFG(BRAVO_LCD_R1, 0), + LCM_GPIO_CFG(BRAVO_LCD_R2, 0), + LCM_GPIO_CFG(BRAVO_LCD_R3, 0), + LCM_GPIO_CFG(BRAVO_LCD_R4, 0), + LCM_GPIO_CFG(BRAVO_LCD_R5, 0), + LCM_GPIO_CFG(BRAVO_LCD_G0, 0), + LCM_GPIO_CFG(BRAVO_LCD_G1, 0), + LCM_GPIO_CFG(BRAVO_LCD_G2, 0), + LCM_GPIO_CFG(BRAVO_LCD_G3, 0), + LCM_GPIO_CFG(BRAVO_LCD_G4, 0), + LCM_GPIO_CFG(BRAVO_LCD_G5, 0), + LCM_GPIO_CFG(BRAVO_LCD_B1, 0), + LCM_GPIO_CFG(BRAVO_LCD_B2, 0), + LCM_GPIO_CFG(BRAVO_LCD_B3, 0), + LCM_GPIO_CFG(BRAVO_LCD_B4, 0), + LCM_GPIO_CFG(BRAVO_LCD_B5, 0), + LCM_GPIO_CFG(BRAVO_LCD_PCLK, 0), + LCM_GPIO_CFG(BRAVO_LCD_VSYNC, 0), + LCM_GPIO_CFG(BRAVO_LCD_HSYNC, 0), + LCM_GPIO_CFG(BRAVO_LCD_DE, 0), +}; +#undef LCM_GPIO_CFG + +#define SONY_TFT_DEF_USER_VAL 102 +#define SONY_TFT_MIN_USER_VAL 30 +#define SONY_TFT_MAX_USER_VAL 255 +#define SONY_TFT_DEF_PANEL_VAL 120 +#define SONY_TFT_MIN_PANEL_VAL 8 +#define SONY_TFT_MAX_PANEL_VAL 255 +#define SONY_TFT_DEF_PANEL_UP_VAL 132 +#define SONY_TFT_MIN_PANEL_UP_VAL 9 +#define SONY_TFT_MAX_PANEL_UP_VAL 255 + +static DEFINE_MUTEX(panel_lock); +static struct work_struct brightness_delayed_work; +static DEFINE_SPINLOCK(brightness_lock); +static uint8_t new_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t last_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t table_sel_vals[] = { 0x43, 0x34 }; +static int table_sel_idx = 0; +static uint8_t tft_panel_on; + +static void gamma_table_bank_select(void) +{ + lcm_writeb(0x39, table_sel_vals[table_sel_idx]); + table_sel_idx ^= 1; +} + +static void samsung_oled_set_gamma_val(int val) +{ + int i; + int level; + int frac; + + val = clamp(val, SAMSUNG_OLED_MIN_VAL, SAMSUNG_OLED_MAX_VAL); + val = (val / 2) * 2; + + level = (val - SAMSUNG_OLED_MIN_VAL) / SAMSUNG_OLED_LEVEL_STEP; + frac = (val - SAMSUNG_OLED_MIN_VAL) % SAMSUNG_OLED_LEVEL_STEP; + + clk_enable(spi_clk); + + for (i = 0; i < OLED_GAMMA_TABLE_SIZE; ++i) { + unsigned int v1; + unsigned int v2 = 0; + u8 v; + if (frac == 0) { + v = samsung_oled_gamma_table[level][i].val; + } else { + + v1 = samsung_oled_gamma_table[level][i].val; + v2 = samsung_oled_gamma_table[level+1][i].val; + v = (v1 * (SAMSUNG_OLED_LEVEL_STEP - frac) + + v2 * frac) / SAMSUNG_OLED_LEVEL_STEP; + } + lcm_writeb(samsung_oled_gamma_table[level][i].reg, v); + } + + gamma_table_bank_select(); + clk_disable(spi_clk); + last_val = val; +} + +static void samsung_oled_panel_config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static int samsung_oled_panel_gpio_switch (int on) +{ + samsung_oled_panel_config_gpio_table ( + !!on ? samsung_oled_on_gpio_table : samsung_oled_off_gpio_table, + ARRAY_SIZE(samsung_oled_on_gpio_table)); + + return 0; +} + +static int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + /* Set the gamma write target to 4, leave the current gamma set at 2 */ + lcm_writeb(0x39, 0x24); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + int i; + + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + samsung_oled_panel_gpio_switch(1); + + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 1); + udelay(50); + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 0); + udelay(20); + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 1); + msleep(20); + + clk_enable(spi_clk); + + for (i = 0; i < init_table_sz; i++) + lcm_writeb(init_tablep[i].reg, init_tablep[i].val); + + lcm_writew(0xef, 0xd0e8); + lcm_writeb(0x1d, 0xa0); + table_sel_idx = 0; + gamma_table_bank_select(); + samsung_oled_set_gamma_val(last_val); + msleep(250); + lcm_writeb(0x14, 0x03); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + lcm_writeb(0x14, 0x0); + mdelay(1); + lcm_writeb(0x1d, 0xa1); + clk_disable(spi_clk); + msleep(200); + + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 0); + samsung_oled_panel_gpio_switch(0); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +#define LCM_GPIO_CFG(gpio, func, str) \ + PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, str) + +static uint32_t sony_tft_display_on_gpio_table[] = { + LCM_GPIO_CFG(BRAVO_LCD_R1, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R2, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R3, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R4, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R5, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G0, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G1, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G2, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G3, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G4, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G5, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B1, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B2, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B3, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B4, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B5, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_PCLK, 1, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_VSYNC, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_HSYNC, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_DE, 1, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_CLK, 1, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_DO, 1, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_CSz, 1, GPIO_4MA), +}; + +static uint32_t sony_tft_display_off_gpio_table[] = { + LCM_GPIO_CFG(BRAVO_LCD_R1, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R2, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R3, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R4, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_R5, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G0, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G1, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G2, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G3, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G4, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_G5, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B1, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B2, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B3, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B4, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_B5, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_PCLK, 0, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_VSYNC, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_HSYNC, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_DE, 0, GPIO_8MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_CLK, 0, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_DO, 0, GPIO_4MA), + LCM_GPIO_CFG(BRAVO_LCD_SPI_CSz, 0, GPIO_4MA), +}; + +#undef LCM_GPIO_CFG + +#define SONY_TFT_DEF_PANEL_DELTA \ + (SONY_TFT_DEF_PANEL_VAL - SONY_TFT_MIN_PANEL_VAL) +#define SONY_TFT_DEF_USER_DELTA \ + (SONY_TFT_DEF_USER_VAL - SONY_TFT_MIN_USER_VAL) + +static void sony_tft_set_pwm_val(int val) +{ + uint8_t data[4] = {0,0,0,0}; + unsigned int min_pwm, def_pwm, max_pwm; + + pr_info("%s: %d\n", __func__, val); + + last_val = val; + + if (!tft_panel_on) + return; + + if(!is_sony_spi()) { + min_pwm = SONY_TFT_MIN_PANEL_UP_VAL; + def_pwm = SONY_TFT_DEF_PANEL_UP_VAL; + max_pwm = SONY_TFT_MAX_PANEL_UP_VAL; + } else { + min_pwm = SONY_TFT_MIN_PANEL_VAL; + def_pwm = SONY_TFT_DEF_PANEL_VAL; + max_pwm = SONY_TFT_MAX_PANEL_VAL; + } + + if (val <= SONY_TFT_DEF_USER_VAL) { + if (val <= SONY_TFT_MIN_USER_VAL) + val = min_pwm; + else + val = (def_pwm - min_pwm) * + (val - SONY_TFT_MIN_USER_VAL) / + SONY_TFT_DEF_USER_DELTA + + min_pwm; + } else { + val = (max_pwm - def_pwm) * + (val - SONY_TFT_DEF_USER_VAL) / + (SONY_TFT_MAX_USER_VAL - SONY_TFT_DEF_USER_VAL) + + def_pwm; + } + + if (!is_sony_spi()) { + data[0] = 5; + data[1] = val; + data[3] = 1; + microp_i2c_write(0x25, data, 4); + } else { + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x51); + qspi_send_9bit(0x1, val); + qspi_send_9bit(0x0, 0x53); + qspi_send_9bit(0x1, 0x24); + clk_disable(spi_clk); + } +} + +#undef SONY_TFT_DEF_PANEL_DELTA +#undef SONY_TFT_DEF_USER_DELTA + +static void sony_tft_panel_config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static int sony_tft_panel_power(int on) +{ + unsigned id, on_off; + + if (on) { + on_off = 0; + + vreg_enable(vreg_lcm_aux_2v6); + vreg_enable(vreg_lcm_rftx_2v6); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 1); + mdelay(10); + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 0); + udelay(500); + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 1); + mdelay(10); + sony_tft_panel_config_gpio_table( + sony_tft_display_on_gpio_table, + ARRAY_SIZE(sony_tft_display_on_gpio_table)); + } else { + on_off = 1; + + gpio_set_value(BRAVO_GPIO_LCD_RST_N, 0); + + mdelay(120); + + vreg_disable(vreg_lcm_rftx_2v6); + vreg_disable(vreg_lcm_aux_2v6); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + sony_tft_panel_config_gpio_table( + sony_tft_display_off_gpio_table, + ARRAY_SIZE(sony_tft_display_off_gpio_table)); + } + return 0; +} + +static int sony_tft_panel_init(struct msm_lcdc_panel_ops *ops) +{ + return 0; +} + +static void sony_tft_panel_without_gamma_init(void) +{ + pr_info("%s: init gamma setting", __func__); + + qspi_send_9bit(0x0, 0xF1); + qspi_send_9bit(0x1, 0x5A); + qspi_send_9bit(0x1, 0x5A); + // FAh RGB + qspi_send_9bit(0x0, 0xFA); + // Red + qspi_send_9bit(0x1, 0x32); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x29); + qspi_send_9bit(0x1, 0x3E); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3D); + qspi_send_9bit(0x1, 0x2C); + qspi_send_9bit(0x1, 0x27); + qspi_send_9bit(0x1, 0x3D); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x3A); + qspi_send_9bit(0x1, 0x34); + qspi_send_9bit(0x1, 0x36); + // Green + qspi_send_9bit(0x1, 0x1A); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x40); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x2B); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x39); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x20); + qspi_send_9bit(0x1, 0x22); + // Blue + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x2F); + qspi_send_9bit(0x1, 0x3E); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x1E); + qspi_send_9bit(0x1, 0x18); + qspi_send_9bit(0x1, 0x1C); + qspi_send_9bit(0x1, 0x0C); + qspi_send_9bit(0x1, 0x0E); + // FBh RGB + qspi_send_9bit(0x0, 0xFB); + // Red + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x0D); + qspi_send_9bit(0x1, 0x09); + qspi_send_9bit(0x1, 0x0C); + qspi_send_9bit(0x1, 0x26); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x22); + qspi_send_9bit(0x1, 0x19); + qspi_send_9bit(0x1, 0x33); + qspi_send_9bit(0x1, 0x22); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x21); + qspi_send_9bit(0x1, 0x17); + qspi_send_9bit(0x1, 0x00); + // Green + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x1D); + qspi_send_9bit(0x1, 0x1F); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3A); + qspi_send_9bit(0x1, 0x26); + qspi_send_9bit(0x1, 0x1B); + qspi_send_9bit(0x1, 0x34); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x1F); + qspi_send_9bit(0x1, 0x12); + qspi_send_9bit(0x1, 0x00); + // Blue + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x33); + qspi_send_9bit(0x1, 0x43); + qspi_send_9bit(0x1, 0x48); + qspi_send_9bit(0x1, 0x41); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x1D); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x21); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x00); + // F3h Power control + qspi_send_9bit(0x0, 0xF3); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x01); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x24); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x0A); + qspi_send_9bit(0x1, 0x37); + // F4h VCOM Control + qspi_send_9bit(0x0, 0xF4); + qspi_send_9bit(0x1, 0x88); + qspi_send_9bit(0x1, 0x20); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0xAF); + qspi_send_9bit(0x1, 0x64); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0xAA); + qspi_send_9bit(0x1, 0x64); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x00); + //Change to level 1 + qspi_send_9bit(0x0, 0xF0); + qspi_send_9bit(0x1, 0x5A); + qspi_send_9bit(0x1, 0x5A); +} + +static int sony_tft_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + if (tft_panel_on) { + pr_info("%s: -() already unblanked\n", __func__); + goto done; + } + + sony_tft_panel_power(1); + msleep(45); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x11); + msleep(5); + qspi_send_9bit(0x0, 0x3a); + if (is_sony_RGB666()) + qspi_send_9bit(0x1, 0x06); + else + qspi_send_9bit(0x1, 0x05); + msleep(100); + qspi_send_9bit(0x0, 0x29); + msleep(20); + + //init gamma setting + if(!is_sony_with_gamma()) + sony_tft_panel_without_gamma_init(); + + /* unlock register page for pwm setting */ + if (is_sony_spi()) { + qspi_send_9bit(0x0, 0xf0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xf1); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xd0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + + qspi_send_9bit(0x0, 0xc2); + qspi_send_9bit(0x1, 0x53); + qspi_send_9bit(0x1, 0x12); + } + clk_disable(spi_clk); + msleep(100); + tft_panel_on = 1; + sony_tft_set_pwm_val(last_val); + + pr_info("%s: -()\n", __func__); +done: + mutex_unlock(&panel_lock); + return 0; +} + +static int sony_tft_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + uint8_t data[4] = {0, 0, 0, 0}; + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x28); + qspi_send_9bit(0x0, 0x10); + clk_disable(spi_clk); + + msleep(40); + sony_tft_panel_power(0); + tft_panel_on = 0; + + mutex_unlock(&panel_lock); + + if (!is_sony_spi()) { + data[0] = 5; + data[1] = 0; + data[3] = 1; + microp_i2c_write(0x25, data, 4); + } + + pr_info("%s: -()\n", __func__); + return 0; +} + +static struct msm_lcdc_panel_ops bravo_lcdc_amoled_panel_ops = { + .init = samsung_oled_panel_init, + .blank = samsung_oled_panel_blank, + .unblank = samsung_oled_panel_unblank, +}; + +static struct msm_lcdc_panel_ops bravo_lcdc_tft_panel_ops = { + .init = sony_tft_panel_init, + .blank = sony_tft_panel_blank, + .unblank = sony_tft_panel_unblank, +}; + + +static struct msm_lcdc_timing bravo_lcdc_amoled_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 4, + .hsync_back_porch = 8, + .hsync_front_porch = 8, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 8, + .vsync_front_porch = 8, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 1, +}; + +static struct msm_lcdc_timing bravo_lcdc_tft_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 2, + .hsync_back_porch = 20, + .hsync_front_porch = 20, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 6, + .vsync_front_porch = 4, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, +}; + +static struct msm_fb_data bravo_lcdc_fb_data = { + .xres = 480, + .yres = 800, + .width = 48, + .height = 80, + .output_format = MSM_MDP_OUT_IF_FMT_RGB565, +}; + +static struct msm_lcdc_platform_data bravo_lcdc_amoled_platform_data = { + .panel_ops = &bravo_lcdc_amoled_panel_ops, + .timing = &bravo_lcdc_amoled_timing, + .fb_id = 0, + .fb_data = &bravo_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct msm_lcdc_platform_data bravo_lcdc_tft_platform_data = { + .panel_ops = &bravo_lcdc_tft_panel_ops, + .timing = &bravo_lcdc_tft_timing, + .fb_id = 0, + .fb_data = &bravo_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct platform_device bravo_lcdc_amoled_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &bravo_lcdc_amoled_platform_data, + }, +}; + +static struct platform_device bravo_lcdc_tft_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &bravo_lcdc_tft_platform_data, + }, +}; + +static int bravo_init_spi_hack(void) +{ + int ret; + + spi_base = ioremap(MSM_SPI_PHYS, MSM_SPI_SIZE); + if (!spi_base) + return -1; + + spi_clk = clk_get(&msm_device_spi.dev, "spi_clk"); + if (IS_ERR(spi_clk)) { + pr_err("%s: unable to get spi_clk\n", __func__); + ret = PTR_ERR(spi_clk); + goto err_clk_get; + } + + clk_enable(spi_clk); + + printk("spi: SPI_CONFIG=%x\n", readl(spi_base + SPI_CONFIG)); + printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base + SPI_IO_CONTROL)); + printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base + SPI_OPERATIONAL)); + printk("spi: SPI_ERROR_FLAGS_EN=%x\n", + readl(spi_base + SPI_ERROR_FLAGS_EN)); + printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base + SPI_ERROR_FLAGS)); + printk("-%s()\n", __FUNCTION__); + clk_disable(spi_clk); + + return 0; + +err_clk_get: + iounmap(spi_base); + return ret; +} + +static void bravo_brightness_set(struct led_classdev *led_cdev, + enum led_brightness val) +{ + unsigned long flags; + led_cdev->brightness = val; + + spin_lock_irqsave(&brightness_lock, flags); + new_val = val; + spin_unlock_irqrestore(&brightness_lock, flags); + + schedule_work(&brightness_delayed_work); +} + +static void bravo_brightness_amoled_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + samsung_oled_set_gamma_val(val); + mutex_unlock(&panel_lock); +} + +static void bravo_brightness_tft_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + sony_tft_set_pwm_val(val); + mutex_unlock(&panel_lock); +} + +static struct led_classdev bravo_brightness_led = { + .name = "lcd-backlight", + .brightness = LED_FULL, + .brightness_set = bravo_brightness_set, +}; + +int __init bravo_init_panel(void) +{ + int ret; + + if (!machine_is_bravo() && !machine_is_bravoc()) + return 0; + + if (system_rev > 0xC0) { + /* CDMA version (except for EVT1) supports RGB666 */ + init_tablep = samsung_oled_rgb666_init_table; + init_table_sz = ARRAY_SIZE(samsung_oled_rgb666_init_table); + bravo_lcdc_fb_data.output_format = MSM_MDP_OUT_IF_FMT_RGB666; + } + + ret = platform_device_register(&msm_device_mdp); + if (ret != 0) + return ret; + + ret = bravo_init_spi_hack(); + if (ret != 0) + return ret; + + if (gpio_get_value(BRAVO_GPIO_LCD_ID0)) { + pr_info("%s: tft panel\n", __func__); + vreg_lcm_rftx_2v6 = vreg_get(0, "rftx"); + if (IS_ERR(vreg_lcm_rftx_2v6)) + return PTR_ERR(vreg_lcm_rftx_2v6); + vreg_set_level(vreg_lcm_rftx_2v6, 2600); + +#ifdef CONFIG_MACH_BRAVO + vreg_lcm_aux_2v6 = vreg_get(0, "gp4"); +#else + vreg_lcm_aux_2v6 = vreg_get(0, "gp6"); +#endif + if (IS_ERR(vreg_lcm_aux_2v6)) + return PTR_ERR(vreg_lcm_aux_2v6); + vreg_set_level(vreg_lcm_aux_2v6, 2600); + + if (gpio_get_value(BRAVO_GPIO_LCD_RST_N)) + tft_panel_on = 1; + ret = platform_device_register(&bravo_lcdc_tft_device); + INIT_WORK(&brightness_delayed_work, bravo_brightness_tft_set_work); + } else { + pr_info("%s: amoled panel\n", __func__); + ret = platform_device_register(&bravo_lcdc_amoled_device); + INIT_WORK(&brightness_delayed_work, bravo_brightness_amoled_set_work); + } + + if (ret != 0) + return ret; + + ret = led_classdev_register(NULL, &bravo_brightness_led); + if (ret != 0) { + pr_err("%s: Cannot register brightness led\n", __func__); + return ret; + } + + return 0; +} + +device_initcall(bravo_init_panel); diff --git a/arch/arm/mach-msm/board-bravo-rfkill.c b/arch/arm/mach-msm/board-bravo-rfkill.c new file mode 100644 index 0000000000000..f8f24f4d11f27 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-rfkill.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "board-bravo.h" + +static struct rfkill *bt_rfk; +static const char bt_name[] = "bcm4329"; + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (!blocked) { + gpio_direction_output(BRAVO_GPIO_BT_RESET_N, 1); + gpio_direction_output(BRAVO_GPIO_BT_SHUTDOWN_N, 1); + } else { + gpio_direction_output(BRAVO_GPIO_BT_SHUTDOWN_N, 0); + gpio_direction_output(BRAVO_GPIO_BT_RESET_N, 0); + } + return 0; +} + +static struct rfkill_ops bravo_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int bravo_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + rc = gpio_request(BRAVO_GPIO_BT_RESET_N, "bt_reset"); + if (rc) + goto err_gpio_reset; + rc = gpio_request(BRAVO_GPIO_BT_SHUTDOWN_N, "bt_shutdown"); + if (rc) + goto err_gpio_shutdown; + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &bravo_rfkill_ops, NULL); + if (!bt_rfk) { + rc = -ENOMEM; + goto err_rfkill_alloc; + } + + rfkill_set_states(bt_rfk, default_state, false); + + /* userspace cannot take exclusive control */ + + rc = rfkill_register(bt_rfk); + if (rc) + goto err_rfkill_reg; + + return 0; + +err_rfkill_reg: + rfkill_destroy(bt_rfk); +err_rfkill_alloc: + gpio_free(BRAVO_GPIO_BT_SHUTDOWN_N); +err_gpio_shutdown: + gpio_free(BRAVO_GPIO_BT_RESET_N); +err_gpio_reset: + return rc; +} + +static int bravo_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + gpio_free(BRAVO_GPIO_BT_SHUTDOWN_N); + gpio_free(BRAVO_GPIO_BT_RESET_N); + + return 0; +} + +static struct platform_driver bravo_rfkill_driver = { + .probe = bravo_rfkill_probe, + .remove = bravo_rfkill_remove, + .driver = { + .name = "bravo_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init bravo_rfkill_init(void) +{ + if (!machine_is_bravo() && !machine_is_bravoc()) + return 0; + + return platform_driver_register(&bravo_rfkill_driver); +} + +static void __exit bravo_rfkill_exit(void) +{ + platform_driver_unregister(&bravo_rfkill_driver); +} + +module_init(bravo_rfkill_init); +module_exit(bravo_rfkill_exit); +MODULE_DESCRIPTION("bravo rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-bravo-smb329.c b/arch/arm/mach-msm/board-bravo-smb329.c new file mode 100755 index 0000000000000..9d51005e2bce1 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-smb329.c @@ -0,0 +1,177 @@ +/* drivers/i2c/chips/smb329.c + * + * SMB329B Switch Charger (SUMMIT Microelectronics) + * + * Copyright (C) 2009 HTC Corporation + * Author: Justin Lin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-bravo-smb329.h" + +static struct smb329_data { + struct i2c_client *client; + uint8_t version; + struct work_struct work; + struct mutex state_lock; + int chg_state; +} smb329; + +static int smb329_i2c_write(uint8_t *value, uint8_t reg, uint8_t num_bytes) +{ + int ret; + struct i2c_msg msg; + + /* write the first byte of buffer as the register address */ + value[0] = reg; + msg.addr = smb329.client->addr; + msg.len = num_bytes + 1; + msg.flags = 0; + msg.buf = value; + + ret = i2c_transfer(smb329.client->adapter, &msg, 1); + + return (ret >= 0) ? 0 : ret; +} + +static int smb329_i2c_read(uint8_t *value, uint8_t reg, uint8_t num_bytes) +{ + int ret; + struct i2c_msg msg[2]; + + /* setup the address to read */ + msg[0].addr = smb329.client->addr; + msg[0].len = 1; + msg[0].flags = 0; + msg[0].buf = ® + + /* setup the read buffer */ + msg[1].addr = smb329.client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = num_bytes; + msg[1].buf = value; + + ret = i2c_transfer(smb329.client->adapter, msg, 2); + + return (ret >= 0) ? 0 : ret; +} + +static int smb329_i2c_write_byte(uint8_t value, uint8_t reg) +{ + int ret; + uint8_t buf[2] = { 0 }; + + buf[1] = value; + ret = smb329_i2c_write(buf, reg, 1); + if (ret) + pr_err("smb329: write byte error (%d)\n", ret); + + return ret; +} + +static int smb329_i2c_read_byte(uint8_t *value, uint8_t reg) +{ + int ret = smb329_i2c_read(value, reg, 1); + if (ret) + pr_err("smb329: read byte error (%d)\n", ret); + + return ret; +} + +int smb329_set_charger_ctrl(uint32_t ctl) +{ + mutex_lock(&smb329.state_lock); + smb329.chg_state = ctl; + schedule_work(&smb329.work); + mutex_unlock(&smb329.state_lock); + return 0; +} + +static void smb329_work_func(struct work_struct *work) +{ + mutex_lock(&smb329.state_lock); + + switch (smb329.chg_state) { + case SMB329_ENABLE_FAST_CHG: + pr_info("smb329: charger on (fast)\n"); + smb329_i2c_write_byte(0x84, 0x31); + smb329_i2c_write_byte(0x08, 0x05); + if ((smb329.version & 0x18) == 0x0) + smb329_i2c_write_byte(0xA9, 0x00); + break; + + case SMB329_DISABLE_CHG: + case SMB329_ENABLE_SLOW_CHG: + pr_info("smb329: charger off/slow\n"); + smb329_i2c_write_byte(0x88, 0x31); + smb329_i2c_write_byte(0x08, 0x05); + break; + default: + pr_err("smb329: unknown charger state %d\n", + smb329.chg_state); + } + + mutex_unlock(&smb329.state_lock); +} + +static int smb329_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { + dev_dbg(&client->dev, "[SMB329]:I2C fail\n"); + return -EIO; + } + + smb329.client = client; + mutex_init(&smb329.state_lock); + INIT_WORK(&smb329.work, smb329_work_func); + + smb329_i2c_read_byte(&smb329.version, 0x3B); + pr_info("smb329 version: 0x%02x\n", smb329.version); + + return 0; +} + +static const struct i2c_device_id smb329_id[] = { + { "smb329", 0 }, + { }, +}; + +static struct i2c_driver smb329_driver = { + .driver.name = "smb329", + .id_table = smb329_id, + .probe = smb329_probe, +}; + +static int __init smb329_init(void) +{ + int ret = i2c_add_driver(&smb329_driver); + if (ret) + pr_err("smb329_init: failed\n"); + + return ret; +} + +module_init(smb329_init); + +MODULE_AUTHOR("Justin Lin "); +MODULE_DESCRIPTION("SUMMIT Microelectronics SMB329B switch charger"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-bravo-smb329.h b/arch/arm/mach-msm/board-bravo-smb329.h new file mode 100644 index 0000000000000..13b326fa71dfa --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-smb329.h @@ -0,0 +1,32 @@ +/* include/linux/smb329.h - smb329 switch charger driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SMB329_H +#define _LINUX_SMB329_H + +#ifdef __KERNEL__ + +enum { + SMB329_DISABLE_CHG, + SMB329_ENABLE_SLOW_CHG, + SMB329_ENABLE_FAST_CHG, +}; + +extern int smb329_set_charger_ctrl(uint32_t ctl); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SMB329_H */ + diff --git a/arch/arm/mach-msm/board-bravo-tpa2018d1.c b/arch/arm/mach-msm/board-bravo-tpa2018d1.c new file mode 100644 index 0000000000000..afabb7966aecf --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-tpa2018d1.c @@ -0,0 +1,368 @@ +/* drivers/i2c/chips/tpa2018d1.c + * + * TI TPA2018D1 Speaker Amplifier + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* TODO: content validation in TPA2018_SET_CONFIG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-bravo-tpa2018d1.h" + +static struct i2c_client *this_client; +static struct tpa2018d1_platform_data *pdata; +static int is_on; +static char spk_amp_cfg[8]; +static const char spk_amp_on[8] = { /* same length as spk_amp_cfg */ + 0x01, 0xc3, 0x20, 0x01, 0x00, 0x08, 0x1a, 0x21 +}; +static const char spk_amp_off[] = {0x01, 0xa2}; + +static DEFINE_MUTEX(spk_amp_lock); +static int tpa2018d1_opened; +static char *config_data; +static int tpa2018d1_num_modes; + +#define DEBUG 0 + +static int tpa2018_i2c_write(const char *txData, int length) +{ + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + pr_err("%s: I2C transfer error\n", __func__); + return -EIO; + } else + return 0; +} + +static int tpa2018_i2c_read(char *rxData, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + if (i2c_transfer(this_client->adapter, msgs, 1) < 0) { + pr_err("%s: I2C transfer error\n", __func__); + return -EIO; + } + +#if DEBUG + do { + int i = 0; + for (i = 0; i < length; i++) + pr_info("%s: rx[%d] = %2x\n", + __func__, i, rxData[i]); + } while(0); +#endif + + return 0; +} + +static int tpa2018d1_open(struct inode *inode, struct file *file) +{ + int rc = 0; + + mutex_lock(&spk_amp_lock); + + if (tpa2018d1_opened) { + pr_err("%s: busy\n", __func__); + rc = -EBUSY; + goto done; + } + + tpa2018d1_opened = 1; +done: + mutex_unlock(&spk_amp_lock); + return rc; +} + +static int tpa2018d1_release(struct inode *inode, struct file *file) +{ + mutex_lock(&spk_amp_lock); + tpa2018d1_opened = 0; + mutex_unlock(&spk_amp_lock); + + return 0; +} + +static int tpa2018d1_read_config(void __user *argp) +{ + int rc = 0; + unsigned char reg_idx = 0x01; + unsigned char tmp[7]; + + if (!is_on) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + msleep(5); /* According to TPA2018D1 Spec */ + } + + rc = tpa2018_i2c_write(®_idx, sizeof(reg_idx)); + if (rc < 0) + goto err; + + rc = tpa2018_i2c_read(tmp, sizeof(tmp)); + if (rc < 0) + goto err; + + if (copy_to_user(argp, &tmp, sizeof(tmp))) + rc = -EFAULT; + +err: + if (!is_on) + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + return rc; +} + +static long tpa2018d1_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int rc = 0; + int mode = -1; + int offset = 0; + struct tpa2018d1_config_data cfg; + + mutex_lock(&spk_amp_lock); + + switch (cmd) { + case TPA2018_SET_CONFIG: + if (copy_from_user(spk_amp_cfg, argp, sizeof(spk_amp_cfg))) + rc = -EFAULT; + break; + + case TPA2018_READ_CONFIG: + rc = tpa2018d1_read_config(argp); + break; + + case TPA2018_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) { + rc = -EFAULT; + break; + } + if (mode >= tpa2018d1_num_modes || mode < 0) { + pr_err("%s: unsupported tpa2018d1 mode %d\n", + __func__, mode); + rc = -EINVAL; + break; + } + if (!config_data) { + pr_err("%s: no config data!\n", __func__); + rc = -EIO; + break; + } + memcpy(spk_amp_cfg, config_data + mode * TPA2018D1_CMD_LEN, + TPA2018D1_CMD_LEN); + break; + + case TPA2018_SET_PARAM: + if (copy_from_user(&cfg, argp, sizeof(cfg))) { + pr_err("%s: copy from user failed.\n", __func__); + rc = -EFAULT; + break; + } + tpa2018d1_num_modes = cfg.mode_num; + if (tpa2018d1_num_modes > TPA2018_NUM_MODES) { + pr_err("%s: invalid number of modes %d\n", __func__, + tpa2018d1_num_modes); + rc = -EINVAL; + break; + } + if (cfg.data_len != tpa2018d1_num_modes*TPA2018D1_CMD_LEN) { + pr_err("%s: invalid data length %d, expecting %d\n", + __func__, cfg.data_len, + tpa2018d1_num_modes * TPA2018D1_CMD_LEN); + rc = -EINVAL; + break; + } + /* Free the old data */ + if (config_data) + kfree(config_data); + config_data = kmalloc(cfg.data_len, GFP_KERNEL); + if (!config_data) { + pr_err("%s: out of memory\n", __func__); + rc = -ENOMEM; + break; + } + if (copy_from_user(config_data, cfg.cmd_data, cfg.data_len)) { + pr_err("%s: copy data from user failed.\n", __func__); + kfree(config_data); + config_data = NULL; + rc = -EFAULT; + break; + } + /* replace default setting with playback setting */ + if (tpa2018d1_num_modes >= TPA2018_MODE_PLAYBACK) { + offset = TPA2018_MODE_PLAYBACK * TPA2018D1_CMD_LEN; + memcpy(spk_amp_cfg, config_data + offset, + TPA2018D1_CMD_LEN); + } + break; + + default: + pr_err("%s: invalid command %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + break; + } + mutex_unlock(&spk_amp_lock); + return rc; +} + +static struct file_operations tpa2018d1_fops = { + .owner = THIS_MODULE, + .open = tpa2018d1_open, + .release = tpa2018d1_release, + .unlocked_ioctl = tpa2018d1_ioctl, +}; + +static struct miscdevice tpa2018d1_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tpa2018d1", + .fops = &tpa2018d1_fops, +}; + +void tpa2018d1_set_speaker_amp(int on) +{ + if (!pdata) { + pr_err("%s: no platform data!\n", __func__); + return; + } + mutex_lock(&spk_amp_lock); + if (on && !is_on) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + msleep(5); /* According to TPA2018D1 Spec */ + + if (tpa2018_i2c_write(spk_amp_cfg, sizeof(spk_amp_cfg)) == 0) { + is_on = 1; + pr_info("%s: ON\n", __func__); + } + } else if (!on && is_on) { + if (tpa2018_i2c_write(spk_amp_off, sizeof(spk_amp_off)) == 0) { + is_on = 0; + msleep(2); + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + pr_info("%s: OFF\n", __func__); + } + } + mutex_unlock(&spk_amp_lock); +} + +static int tpa2018d1_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + + pdata = client->dev.platform_data; + + if (!pdata) { + ret = -EINVAL; + pr_err("%s: platform data is NULL\n", __func__); + goto err_no_pdata; + } + + this_client = client; + + ret = gpio_request(pdata->gpio_tpa2018_spk_en, "tpa2018"); + if (ret < 0) { + pr_err("%s: gpio request aud_spk_en pin failed\n", __func__); + goto err_free_gpio; + } + + ret = gpio_direction_output(pdata->gpio_tpa2018_spk_en, 1); + if (ret < 0) { + pr_err("%s: request aud_spk_en gpio direction failed\n", + __func__); + goto err_free_gpio; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("%s: i2c check functionality error\n", __func__); + ret = -ENODEV; + goto err_free_gpio; + } + + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); /* Default Low */ + + ret = misc_register(&tpa2018d1_device); + if (ret) { + pr_err("%s: tpa2018d1_device register failed\n", __func__); + goto err_free_gpio; + } + memcpy(spk_amp_cfg, spk_amp_on, sizeof(spk_amp_on)); + return 0; + +err_free_gpio: + gpio_free(pdata->gpio_tpa2018_spk_en); +err_no_pdata: + return ret; +} + +static int tpa2018d1_suspend(struct i2c_client *client, pm_message_t mesg) +{ + return 0; +} + +static int tpa2018d1_resume(struct i2c_client *client) +{ + return 0; +} + +static const struct i2c_device_id tpa2018d1_id[] = { + { TPA2018D1_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver tpa2018d1_driver = { + .probe = tpa2018d1_probe, + .suspend = tpa2018d1_suspend, + .resume = tpa2018d1_resume, + .id_table = tpa2018d1_id, + .driver = { + .name = TPA2018D1_I2C_NAME, + }, +}; + +static int __init tpa2018d1_init(void) +{ + pr_info("%s\n", __func__); + return i2c_add_driver(&tpa2018d1_driver); +} + +module_init(tpa2018d1_init); + +MODULE_DESCRIPTION("tpa2018d1 speaker amp driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-bravo-tpa2018d1.h b/arch/arm/mach-msm/board-bravo-tpa2018d1.h new file mode 100644 index 0000000000000..dc11012209454 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-tpa2018d1.h @@ -0,0 +1,35 @@ +/* include/linux/tpa2018d1.h - tpa2018d1 speaker amplifier driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef __ASM_ARM_ARCH_TPA2018D1_H +#define __ASM_ARM_ARCH_TPA2018D1_H + +#define TPA2018D1_I2C_NAME "tpa2018d1" +#define TPA2018D1_CMD_LEN 8 + +struct tpa2018d1_platform_data { + uint32_t gpio_tpa2018_spk_en; +}; + +struct tpa2018d1_config_data { + unsigned char *cmd_data; /* [mode][cmd_len][cmds..] */ + unsigned int mode_num; + unsigned int data_len; +}; + +extern void tpa2018d1_set_speaker_amp(int on); + +#endif /* __ASM_ARM_ARCH_TPA2018D1_H */ diff --git a/arch/arm/mach-msm/board-bravo-wifi.c b/arch/arm/mach-msm/board-bravo-wifi.c new file mode 100644 index 0000000000000..abf30df4e1f2d --- /dev/null +++ b/arch/arm/mach-msm/board-bravo-wifi.c @@ -0,0 +1,152 @@ +/* linux/arch/arm/mach-msm/board-bravo-wifi.c +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-bravo.h" + +int bravo_wifi_power(int on); +int bravo_wifi_reset(int on); +int bravo_wifi_set_carddetect(int on); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + +#define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4 +#define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024) + +#define WLAN_SKB_BUF_NUM 16 + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = { + { NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) } +}; + +static void *bravo_wifi_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS) + return wlan_static_skb; + if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS)) + return NULL; + if (wifi_mem_array[section].size < size) + return NULL; + return wifi_mem_array[section].mem_ptr; +} +#endif + +int __init bravo_init_wifi_mem(void) +{ +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + int i; + + for(i=0;( i < WLAN_SKB_BUF_NUM );i++) { + if (i < (WLAN_SKB_BUF_NUM/2)) + wlan_static_skb[i] = dev_alloc_skb(4096); + else + wlan_static_skb[i] = dev_alloc_skb(8192); + } + for(i=0;( i < PREALLOC_WLAN_NUMBER_OF_SECTIONS );i++) { + wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, + GFP_KERNEL); + if (wifi_mem_array[i].mem_ptr == NULL) + return -ENOMEM; + } +#endif + return 0; +} + +static struct resource bravo_wifi_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = MSM_GPIO_TO_INT(BRAVO_GPIO_WIFI_IRQ), + .end = MSM_GPIO_TO_INT(BRAVO_GPIO_WIFI_IRQ), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct wifi_platform_data bravo_wifi_control = { + .set_power = bravo_wifi_power, + .set_reset = bravo_wifi_reset, + .set_carddetect = bravo_wifi_set_carddetect, +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + .mem_prealloc = bravo_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +static struct platform_device bravo_wifi_device = { + .name = "bcmdhd_wlan", + .id = 1, + .num_resources = ARRAY_SIZE(bravo_wifi_resources), + .resource = bravo_wifi_resources, + .dev = { + .platform_data = &bravo_wifi_control, + }, +}; + +extern unsigned char *get_wifi_nvs_ram(void); +extern int wifi_calibration_size_set(void); + +static unsigned bravo_wifi_update_nvs(char *str, int add_flag) +{ +#define NVS_LEN_OFFSET 0x0C +#define NVS_DATA_OFFSET 0x40 + unsigned char *ptr; + unsigned len; + + if (!str) + return -EINVAL; + ptr = get_wifi_nvs_ram(); + /* Size in format LE assumed */ + memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); + /* if the last byte in NVRAM is 0, trim it */ + if (ptr[NVS_DATA_OFFSET + len - 1] == 0) + len -= 1; + if (add_flag) { + strcpy(ptr + NVS_DATA_OFFSET + len, str); + len += strlen(str); + } else { + if (strnstr(ptr + NVS_DATA_OFFSET, str, len)) + len -= strlen(str); + } + memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); + wifi_calibration_size_set(); + return 0; +} + +static int __init bravo_wifi_init(void) +{ + if (!machine_is_bravo() && !machine_is_bravoc()) + return 0; + + printk("%s: start\n", __func__); + bravo_wifi_update_nvs("sd_oobonly=1\r\n", 0); + bravo_wifi_update_nvs("btc_params70=0x32\r\n", 1); + bravo_init_wifi_mem(); + return platform_device_register(&bravo_wifi_device); +} + +late_initcall(bravo_wifi_init); diff --git a/arch/arm/mach-msm/board-bravo.c b/arch/arm/mach-msm/board-bravo.c new file mode 100644 index 0000000000000..8bf88229a3220 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo.c @@ -0,0 +1,1344 @@ +/* arch/arm/mach-msm/board-bravo.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * Author: Dima Zavin + * Copyright (C) 2010 Giulio Cervera + * Copyright (C) 2010 Diogo Ferreira + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include <../../../drivers/staging/android/timed_gpio.h> +#include <../../../drivers/w1/w1.h> +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PERFLOCK +#include +#endif +#include +#include +#include + +#include "board-bravo.h" +#include "devices.h" +#include "proc_comm.h" +#include "board-bravo-tpa2018d1.h" +#include "board-bravo-smb329.h" + +#include +#include +#include "footswitch.h" +#include + +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL +#include +#endif + +static uint debug_uart; + +module_param_named(debug_uart, debug_uart, uint, 0); + +extern void notify_usb_connected(int); +extern void msm_init_pmic_vibrator(void); +extern void __init bravo_audio_init(void); + +extern int microp_headset_has_mic(void); + +static int bravo_phy_init_seq[] = { + 0x0C, 0x31, + 0x31, 0x32, + 0x1D, 0x0D, + 0x1D, 0x10, + -1 +}; + +static void bravo_usb_phy_reset(void) +{ + u32 id; + int ret; + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_ASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } + + msleep(1); + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_DEASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } +} + +static void bravo_usb_hw_reset(bool enable) +{ + u32 id; + int ret; + u32 func; + + id = PCOM_CLKRGM_APPS_RESET_USBH; + if (enable) + func = PCOM_CLK_REGIME_SEC_RESET_ASSERT; + else + func = PCOM_CLK_REGIME_SEC_RESET_DEASSERT; + ret = msm_proc_comm(func, &id, NULL); + if (ret) + pr_err("%s: Cannot set reset to %d (%d)\n", __func__, enable, + ret); +} + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = bravo_phy_init_seq, + .phy_reset = bravo_usb_phy_reset, + .hw_reset = bravo_usb_hw_reset, + .usb_connected = notify_usb_connected, +}; + +static char *usb_functions_ums[] = { + "usb_mass_storage", +}; + +static char *usb_functions_ums_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_rndis[] = { + "rndis", +}; + +static char *usb_functions_rndis_adb[] = { + "rndis", + "adb", +}; + +#ifdef CONFIG_USB_ANDROID_ACCESSORY +static char *usb_functions_accessory[] = { "accessory" }; +static char *usb_functions_accessory_adb[] = { "accessory", "adb" }; +#endif + +#ifdef CONFIG_USB_ANDROID_DIAG +static char *usb_functions_adb_diag[] = { + "usb_mass_storage", + "adb", + "diag", +}; +#endif + +static char *usb_functions_all[] = { +#ifdef CONFIG_USB_ANDROID_RNDIS + "rndis", +#endif +#ifdef CONFIG_USB_ANDROID_ACCESSORY + "accessory", +#endif + "usb_mass_storage", + "adb", +#ifdef CONFIG_USB_ANDROID_ACM + "acm", +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + "diag", +#endif +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0ff9, + .num_functions = ARRAY_SIZE(usb_functions_ums), + .functions = usb_functions_ums, + }, + { + .product_id = 0x0c87, + .num_functions = ARRAY_SIZE(usb_functions_ums_adb), + .functions = usb_functions_ums_adb, + }, + { + .product_id = 0x0FFE, + .num_functions = ARRAY_SIZE(usb_functions_rndis), + .functions = usb_functions_rndis, + }, + /* + XXX: there isn't a equivalent in htc's kernel + { + .product_id = 0x4e14, + .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), + .functions = usb_functions_rndis_adb, + }, */ +#ifdef CONFIG_USB_ANDROID_ACCESSORY + { + .vendor_id = USB_ACCESSORY_VENDOR_ID, + .product_id = USB_ACCESSORY_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory), + .functions = usb_functions_accessory, + }, + { + .vendor_id = USB_ACCESSORY_VENDOR_ID, + .product_id = USB_ACCESSORY_ADB_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory_adb), + .functions = usb_functions_accessory_adb, + }, +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + { + .product_id = 0x0c07, + .num_functions = ARRAY_SIZE(usb_functions_adb_diag), + .functions = usb_functions_adb_diag, + }, +#endif +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "HTC", + .product = "Desire", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data rndis_pdata = { + /* ethaddr is filled by board_serialno_setup */ + .vendorID = 0x0bb4, + .vendorDescr = "HTC", +}; + +static struct platform_device rndis_device = { + .name = "rndis", + .id = -1, + .dev = { + .platform_data = &rndis_pdata, + }, +}; +#endif + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x0bb4, + .product_id = 0x0c02, + .version = 0x0100, + .product_name = "Android Phone", + .manufacturer_name = "HTC", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +static struct platform_device bravo_rfkill = { + .name = "bravo_rfkill", + .id = -1, +}; + +/* start kgsl */ +static struct resource kgsl_3d0_resources[] = { + { + .name = KGSL_3D0_REG_MEMORY, + .start = 0xA0000000, + .end = 0xA001ffff, + .flags = IORESOURCE_MEM, + }, + { + .name = KGSL_3D0_IRQ, + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct kgsl_device_platform_data kgsl_3d0_pdata = { + .pwrlevel = { + { + .gpu_freq = 0, + .bus_freq = 128000000, + }, + }, + .init_level = 0, + .num_levels = 1, + .set_grp_async = NULL, + .idle_timeout = HZ/5, + .clk_map = KGSL_CLK_GRP | KGSL_CLK_IMEM, +}; + +struct platform_device msm_kgsl_3d0 = { + .name = "kgsl-3d0", + .id = 0, + .num_resources = ARRAY_SIZE(kgsl_3d0_resources), + .resource = kgsl_3d0_resources, + .dev = { + .platform_data = &kgsl_3d0_pdata, + }, +}; +/* end kgsl */ + +/* start footswitch regulator */ +struct platform_device *msm_footswitch_devices[] = { + FS_PCOM(FS_GFX3D, "fs_gfx3d"), +}; + +unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); +/* end footswitch regulator */ + +/* pmem heaps */ +#ifndef CONFIG_ION_MSM +static struct android_pmem_platform_data mdp_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, + .cached = 1, +}; + +static struct platform_device android_pmem_mdp_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &mdp_pmem_pdata + }, +}; +#endif + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { + .platform_data = &android_pmem_adsp_pdata, + }, +}; + +static struct android_pmem_platform_data android_pmem_venc_pdata = { + .name = "pmem_venc", + .start = MSM_PMEM_VENC_BASE, + .size = MSM_PMEM_VENC_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_venc_device = { + .name = "android_pmem", + .id = 3, + .dev = { + .platform_data = &android_pmem_venc_pdata, + }, +}; +/* end pmem heaps */ + +/* ion heaps */ +#ifdef CONFIG_ION_MSM +static struct ion_co_heap_pdata co_ion_pdata = { + .adjacent_mem_id = INVALID_HEAP_ID, + .align = PAGE_SIZE, +}; + +static struct ion_platform_data ion_pdata = { + .nr = 2, + .heaps = { + { + .id = ION_SYSTEM_HEAP_ID, + .type = ION_HEAP_TYPE_SYSTEM, + .name = ION_VMALLOC_HEAP_NAME, + }, + /* PMEM_MDP = SF */ + { + .id = ION_SF_HEAP_ID, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = ION_SF_HEAP_NAME, + .base = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .memory_type = ION_EBI_TYPE, + .extra_data = (void *)&co_ion_pdata, + }, + } +}; + +static struct platform_device ion_dev = { + .name = "ion-msm", + .id = 1, + .dev = { .platform_data = &ion_pdata }, +}; +#endif +/* end ion heaps */ + +static struct resource ram_console_resources[] = { + { + .start = MSM_RAM_CONSOLE_BASE, + .end = MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ram_console_device = { + .name = "ram_console", + .id = -1, + .num_resources = ARRAY_SIZE(ram_console_resources), + .resource = ram_console_resources, +}; + +static int bravo_ts_power(int on) +{ + pr_info("%s: power %d\n", __func__, on); + + if (on) { + /* level shifter should be off */ + gpio_set_value(BRAVO_GPIO_TP_EN, 1); + msleep(120); + /* enable touch panel level shift */ + gpio_set_value(BRAVO_GPIO_TP_LS_EN, 1); + msleep(3); + } else { + gpio_set_value(BRAVO_GPIO_TP_LS_EN, 0); + gpio_set_value(BRAVO_GPIO_TP_EN, 0); + udelay(50); + } + + return 0; +} + +static struct synaptics_i2c_rmi_platform_data bravo_synaptics_ts_data[] = { + { + .version = 0x100, + .power = bravo_ts_power, + .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE, + .inactive_left = -1 * 0x10000 / 480, + .inactive_right = -1 * 0x10000 / 480, + .inactive_top = -5 * 0x10000 / 800, + .inactive_bottom = -5 * 0x10000 / 800, + .sensitivity_adjust = 12, + } +}; + +static struct akm8973_platform_data compass_platform_data = { + .layouts = BRAVO_LAYOUTS, + .project_name = BRAVO_PROJECT_NAME, + .reset = BRAVO_GPIO_COMPASS_RST_N, + .intr = BRAVO_GPIO_COMPASS_INT_N, +}; + +static struct regulator_consumer_supply tps65023_dcdc1_supplies[] = { + { + .supply = "acpu_vcore", + }, +}; + +static struct regulator_init_data tps65023_data[5] = { + { + .constraints = { + .name = "dcdc1", /* VREG_MSMC2_1V29 */ + .min_uV = 975000, +#ifdef CONFIG_JESUS_PHONE + .max_uV = 1350000, +#else + .max_uV = 1275000, +#endif + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, + }, + .consumer_supplies = tps65023_dcdc1_supplies, + .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_supplies), + }, + /* dummy values for unused regulators to not crash driver: */ + { + .constraints = { + .name = "dcdc2", /* VREG_MSMC1_1V26 */ + .min_uV = 1260000, + .max_uV = 1260000, + }, + }, + { + .constraints = { + .name = "dcdc3", /* unused */ + .min_uV = 800000, + .max_uV = 3300000, + }, + }, + { + .constraints = { + .name = "ldo1", /* unused */ + .min_uV = 1000000, + .max_uV = 3150000, + }, + }, + { + .constraints = { + .name = "ldo2", /* V_USBPHY_3V3 */ + .min_uV = 3300000, + .max_uV = 3300000, + }, + }, +}; + +static void ds2482_set_slp_n(unsigned n) +{ + gpio_direction_output(BRAVO_GPIO_DS2482_SLP_N, n); +} + +static int capella_cm3602_power(int pwr_device, uint8_t enable); +static struct microp_function_config microp_functions[] = { + { + .name = "light_sensor", + .category = MICROP_FUNCTION_LSENSOR, + .levels = { 0x000, 0x001, 0x00F, 0x01E, 0x03C, 0x121, 0x190, 0x2BA, 0x35C, 0x3FF }, + .channel = 6, + .int_pin = IRQ_LSENSOR, + .golden_adc = 0xC0, + .ls_power = capella_cm3602_power, + }, +}; + +static struct lightsensor_platform_data lightsensor_data = { + .config = µp_functions[0], + .irq = MSM_uP_TO_INT(9), +}; + +static struct platform_device microp_devices[] = { + { + .name = "lightsensor_microp", + .dev = { + .platform_data = &lightsensor_data, + }, + }, +}; + +static struct microp_i2c_platform_data microp_data = { + .num_functions = ARRAY_SIZE(microp_functions), + .microp_function = microp_functions, + .num_devices = ARRAY_SIZE(microp_devices), + .microp_devices = microp_devices, + .gpio_reset = BRAVO_GPIO_UP_RESET_N, + .spi_devices = SPI_OJ | SPI_GSENSOR, +}; + +static struct tpa2018d1_platform_data tpa2018_data = { + .gpio_tpa2018_spk_en = BRAVO_CDMA_GPIO_AUD_SPK_AMP_EN, +}; + +static struct i2c_board_info base_i2c_devices[] = { + { + I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x40), + .platform_data = bravo_synaptics_ts_data, + .irq = MSM_GPIO_TO_INT(BRAVO_GPIO_TP_INT_N) + }, + { + I2C_BOARD_INFO("bravo-microp", 0xCC >> 1), + .platform_data = µp_data, + .irq = MSM_GPIO_TO_INT(BRAVO_GPIO_UP_INT_N) + }, + { + I2C_BOARD_INFO("ds2482", 0x30 >> 1), + .platform_data = ds2482_set_slp_n, + }, + { + I2C_BOARD_INFO(AKM8973_I2C_NAME, 0x1C), + .platform_data = &compass_platform_data, + .irq = MSM_GPIO_TO_INT(BRAVO_GPIO_COMPASS_INT_N), + }, + { + I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1), + }, + { + I2C_BOARD_INFO("tps65023", 0x48), + .platform_data = tps65023_data, + }, +}; + +static struct i2c_board_info rev_CX_i2c_devices[] = { + { + I2C_BOARD_INFO("tpa2018d1", 0x58), + .platform_data = &tpa2018_data, + }, + { + I2C_BOARD_INFO("smb329", 0x6E >> 1), + }, +}; + +static uint32_t camera_off_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* MCLK */ +}; + +void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + +static struct resource msm_camera_resources[] = { + { + .start = MSM_VFE_PHYS, + .end = MSM_VFE_PHYS + MSM_VFE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_VFE, + INT_VFE, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +static struct camera_flash_cfg msm_camera_sensor_flash_cfg = { + .camera_flash = flashlight_control, + .num_flash_levels = FLASHLIGHT_NUM, + .low_temp_limit = 5, + .low_cap_limit = 15, +}; + +static struct msm_camera_sensor_info msm_camera_sensor_s5k3e2fx_data = { + .sensor_name = "s5k3e2fx", + .sensor_reset = 144, /* CAM1_RST */ + .sensor_pwd = 143, /* CAM1_PWDN, enabled in a9 */ + /*.vcm_pwd = 31,*/ /* CAM1_VCM_EN, enabled in a9 */ + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .flash_cfg = &msm_camera_sensor_flash_cfg, +}; + +static struct platform_device msm_camera_sensor_s5k3e2fx = { + .name = "msm_camera_s5k3e2fx", + .dev = { + .platform_data = &msm_camera_sensor_s5k3e2fx_data, + }, +}; + +static int __capella_cm3602_power(int on) +{ + printk(KERN_DEBUG "%s: Turn the capella_cm3602 power %s\n", + __func__, (on) ? "on" : "off"); + if (on) { + gpio_direction_output(BRAVO_GPIO_LS_EN_N, 0); + gpio_direction_output(BRAVO_GPIO_PROXIMITY_EN, 1); + } else { + gpio_direction_output(BRAVO_GPIO_LS_EN_N, 1); + } + return 0; +}; + +static DEFINE_MUTEX(capella_cm3602_lock); +static int als_power_control; + +static int capella_cm3602_power(int pwr_device, uint8_t enable) +{ + /* TODO eolsen Add Voltage reg control */ + unsigned int old_status = 0; + int ret = 0, on = 0; + mutex_lock(&capella_cm3602_lock); + + old_status = als_power_control; + if (enable) + als_power_control |= pwr_device; + else + als_power_control &= ~pwr_device; + + on = als_power_control ? 1 : 0; + if (old_status == 0 && on) + ret = __capella_cm3602_power(1); + else if (!on) + ret = __capella_cm3602_power(0); + + mutex_unlock(&capella_cm3602_lock); + return ret; +}; + +static struct capella_cm3602_platform_data capella_cm3602_pdata = { + .power = capella_cm3602_power, + .p_en = BRAVO_GPIO_PROXIMITY_EN, + .p_out = BRAVO_GPIO_PROXIMITY_INT_N, + .irq = MSM_GPIO_TO_INT(BRAVO_GPIO_PROXIMITY_INT_N), +}; + +static struct platform_device capella_cm3602 = { + .name = CAPELLA_CM3602, + .id = -1, + .dev = { + .platform_data = &capella_cm3602_pdata + } +}; + +static uint32_t flashlight_gpio_table[] = { + PCOM_GPIO_CFG(BRAVO_GPIO_FLASHLIGHT_TORCH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_GPIO_FLASHLIGHT_FLASH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static uint32_t flashlight_gpio_table_rev_CX[] = { + PCOM_GPIO_CFG(BRAVO_CDMA_GPIO_FLASHLIGHT_TORCH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_GPIO_FLASHLIGHT_FLASH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static int config_bravo_flashlight_gpios(void) +{ + if (is_cdma_version(system_rev)) { + config_gpio_table(flashlight_gpio_table_rev_CX, + ARRAY_SIZE(flashlight_gpio_table_rev_CX)); + } else { + config_gpio_table(flashlight_gpio_table, + ARRAY_SIZE(flashlight_gpio_table)); + } + return 0; +} + +static struct flashlight_platform_data bravo_flashlight_data = { + .gpio_init = config_bravo_flashlight_gpios, + .torch = BRAVO_GPIO_FLASHLIGHT_TORCH, + .flash = BRAVO_GPIO_FLASHLIGHT_FLASH, + .flash_duration_ms = 600 +}; + +static struct platform_device bravo_flashlight_device = { + .name = "flashlight", + .dev = { + .platform_data = &bravo_flashlight_data, + }, +}; + +static struct timed_gpio timed_gpios[] = { + { + .name = "vibrator", + .gpio = BRAVO_GPIO_VIBRATOR_ON, + .max_timeout = 15000, + }, +}; + +static struct timed_gpio_platform_data timed_gpio_data = { + .num_gpios = ARRAY_SIZE(timed_gpios), + .gpios = timed_gpios, +}; + +static struct platform_device bravo_timed_gpios = { + .name = "timed-gpio", + .id = -1, + .dev = { + .platform_data = &timed_gpio_data, + }, +}; + +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = -1, + .inject_rx_on_wakeup = 0, + .exit_lpm_cb = bcm_bt_lpm_exit_lpm_locked, +}; + +static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = { + .gpio_wake = BRAVO_GPIO_BT_WAKE, + .gpio_host_wake = BRAVO_GPIO_BT_HOST_WAKE, + .request_clock_off_locked = msm_hs_request_clock_off_locked, + .request_clock_on_locked = msm_hs_request_clock_on_locked, +}; + +struct platform_device bcm_bt_lpm_device = { + .name = "bcm_bt_lpm", + .id = 0, + .dev = { + .platform_data = &bcm_bt_lpm_pdata, + }, +}; + +static int ds2784_charge(int on, int fast) +{ + if (is_cdma_version(system_rev)) { + if (!on) + smb329_set_charger_ctrl(SMB329_DISABLE_CHG); + else + smb329_set_charger_ctrl(fast ? SMB329_ENABLE_FAST_CHG : SMB329_ENABLE_SLOW_CHG); + } + else + gpio_direction_output(BRAVO_GPIO_BATTERY_CHARGER_CURRENT, !!fast); + gpio_direction_output(BRAVO_GPIO_BATTERY_CHARGER_EN, !on); + return 0; +} + +static int w1_ds2784_add_slave(struct w1_slave *sl) +{ + struct dd { + struct platform_device pdev; + struct ds2784_platform_data pdata; + } *p; + + int rc; + + p = kzalloc(sizeof(struct dd), GFP_KERNEL); + if (!p) { + pr_err("%s: out of memory\n", __func__); + return -ENOMEM; + } + + rc = gpio_request(BRAVO_GPIO_BATTERY_CHARGER_EN, "charger_en"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + BRAVO_GPIO_BATTERY_CHARGER_EN, rc); + kfree(p); + return rc; + } + + if (!is_cdma_version(system_rev)) { + rc = gpio_request(BRAVO_GPIO_BATTERY_CHARGER_CURRENT, "charger_current"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + BRAVO_GPIO_BATTERY_CHARGER_CURRENT, rc); + gpio_free(BRAVO_GPIO_BATTERY_CHARGER_EN); + kfree(p); + return rc; + } + } + + p->pdev.name = "ds2784-battery"; + p->pdev.id = -1; + p->pdev.dev.platform_data = &p->pdata; + p->pdata.charge = ds2784_charge; + p->pdata.w1_slave = sl; + + platform_device_register(&p->pdev); + + return 0; +} + +static struct w1_family_ops w1_ds2784_fops = { + .add_slave = w1_ds2784_add_slave, +}; + +static struct w1_family w1_ds2784_family = { + .fid = W1_FAMILY_DS2784, + .fops = &w1_ds2784_fops, +}; + +static int __init ds2784_battery_init(void) +{ + return w1_register_family(&w1_ds2784_family); +} + +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL +static void curcial_oj_shutdown(int enable) +{ + uint8_t cmd[3]; + + memset(cmd, 0x00, sizeof(uint8_t)*3); + cmd[2] = 0x20; + // microp firmware(v04) non-shutdown by default + microp_i2c_write(0x90, cmd, 3); + pr_err("%s\n", __func__); +} + +#define CURCIAL_OJ_POWER 150 +static int curcial_oj_poweron(int on) +{ + uint8_t data[2]; + +#ifdef CONFIG_MACH_BRAVO + struct vreg *oj_power = vreg_get(0, "gp2"); + if (IS_ERR(oj_power)) { + pr_err("%s: Error power domain\n", __func__); + return 0; + } + + if (on) { + vreg_set_level(oj_power, 2750); + vreg_enable(oj_power); + } else { + /* for microp firmware(v04) setting*/ + microp_i2c_read(MICROP_I2C_RCMD_VERSION, data, 2); + if (data[0] < 4) { + printk("Microp firmware version: %d\n", data[0]); + return 1; + } + vreg_disable(oj_power); + } + pr_err("%s: OJ power enable(%d)\n", __func__, on); +#else + /* for microp firmware(v04) setting*/ + if (on == 0) { + microp_i2c_read(MICROP_I2C_RCMD_VERSION, data, 2); + if (data[0] < 4) { + printk("Microp firmware version:%d\n",data[0]); + return 1; + } + } + + gpio_set_value(CURCIAL_OJ_POWER, on); + + if (gpio_get_value(CURCIAL_OJ_POWER) != on) { + printk(KERN_ERR "%s:OJ:power status fail \n", __func__); + return 0; + } + printk(KERN_ERR "%s:OJ:power status ok \n", __func__); +#endif + return 1; +} + +static void curcial_oj_adjust_xy(uint8_t *data, int16_t *mSumDeltaX, int16_t *mSumDeltaY) +{ + int8_t deltaX; + int8_t deltaY; + + if (data[2] == 0x80) + data[2] = 0x81; + if (data[1] == 0x80) + data[1] = 0x81; + if (1) { + deltaX = (1)*((int8_t) data[2]); /*X=2*/ + deltaY = (-1)*((int8_t) data[1]); /*Y=1*/ + } else { + deltaX = (-1)*((int8_t) data[1]); + deltaY = (1)*((int8_t) data[2]); + } + *mSumDeltaX += -((int16_t)deltaX); + *mSumDeltaY += -((int16_t)deltaY); +} + +#define BRAVO_MICROP_VER 0x03 +static struct curcial_oj_platform_data bravo_oj_data = { + .oj_poweron = curcial_oj_poweron, + .oj_shutdown = curcial_oj_shutdown, + .oj_adjust_xy = curcial_oj_adjust_xy, + .microp_version = BRAVO_MICROP_VER, + .mdelay_time = 0, + .normal_th = 8, + .xy_ratio = 15, +#ifdef CONFIG_MACH_BRAVO + .interval = 0, + .swap = false, + .y = -1, +#else + .interval = 10, + .swap = true, + .y = 1, +#endif + .x = 1, + .share_power = false, + .debugflag = 0, + .ap_code = false, + .sht_tbl = {0, 1000, 1250, 1500, 1750, 2000, 3000}, + .pxsum_tbl = {0, 0, 90, 100, 110, 120, 130}, + .degree = 7, + .Xsteps = {0, 1, 2, 3, 4, 5, 6, 8, 10, 12, + 14, 16, 18, 20, 22, 24, 26, 27, 28, 29, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + .Ysteps = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + .irq = MSM_uP_TO_INT(12), +}; + +static struct platform_device bravo_oj = { + .name = CURCIAL_OJ_NAME, + .id = -1, + .dev = { + .platform_data = &bravo_oj_data, + } +}; +#endif + +static struct platform_device *devices[] __initdata = { +#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) + &msm_device_uart1, +#endif + &bcm_bt_lpm_device, + &msm_device_uart_dm1, + &ram_console_device, + &bravo_rfkill, + &msm_device_smd, + &msm_device_nand, + &msm_device_hsusb, + &usb_mass_storage_device, +#ifdef CONFIG_USB_ANDROID_RNDIS + &rndis_device, +#endif + &android_usb_device, +#ifndef CONFIG_ION_MSM + &android_pmem_mdp_device, +#else + &ion_dev, +#endif + &android_pmem_adsp_device, +#ifdef CONFIG_720P_CAMERA + &android_pmem_venc_device, +#endif + &msm_kgsl_3d0, + &msm_device_i2c, + &msm_camera_sensor_s5k3e2fx, + &bravo_flashlight_device, +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + &bravo_oj, +#endif + &capella_cm3602, +}; + +static uint32_t bt_gpio_table[] = { + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_RTS, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_CTS, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_RX, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_TX, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_RESET_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_SHUTDOWN_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_WAKE, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_HOST_WAKE, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_4MA), +}; + +static uint32_t bt_gpio_table_rev_CX[] = { + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_RTS, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_CTS, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_RX, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_UART1_TX, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_RESET_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_SHUTDOWN_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_CDMA_GPIO_BT_WAKE, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(BRAVO_GPIO_BT_HOST_WAKE, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_4MA), +}; + +static uint32_t misc_gpio_table[] = { + PCOM_GPIO_CFG(BRAVO_GPIO_LCD_RST_N, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_GPIO_LED_3V3_EN, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(BRAVO_GPIO_DOCK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_4MA), +}; + +static uint32_t key_int_shutdown_gpio_table[] = { + PCOM_GPIO_CFG(BRAVO_GPIO_35MM_KEY_INT_SHUTDOWN, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static void bravo_headset_init(void) +{ + if (is_cdma_version(system_rev)) + return; + config_gpio_table(key_int_shutdown_gpio_table, + ARRAY_SIZE(key_int_shutdown_gpio_table)); + gpio_set_value(BRAVO_GPIO_35MM_KEY_INT_SHUTDOWN, 0); +} + +#define ATAG_BDADDR 0x43294329 /* bravo bluetooth address tag */ +#define ATAG_BDADDR_SIZE 4 +#define BDADDR_STR_SIZE 18 + +static char bdaddr[BDADDR_STR_SIZE]; + +module_param_string(bdaddr, bdaddr, sizeof(bdaddr), 0400); +MODULE_PARM_DESC(bdaddr, "bluetooth address"); + +static int __init parse_tag_bdaddr(const struct tag *tag) +{ + unsigned char *b = (unsigned char *)&tag->u; + + if (tag->hdr.size != ATAG_BDADDR_SIZE) + return -EINVAL; + + snprintf(bdaddr, BDADDR_STR_SIZE, "%02X:%02X:%02X:%02X:%02X:%02X", + b[0], b[1], b[2], b[3], b[4], b[5]); + + return 0; +} + +__tagtable(ATAG_BDADDR, parse_tag_bdaddr); + +static int __init bravo_board_serialno_setup(char *serialno) +{ +#ifdef CONFIG_USB_ANDROID_RNDIS + int i; + char *src = serialno; + + /* create a fake MAC address from our serial number. + * first byte is 0x02 to signify locally administered. + */ + rndis_pdata.ethaddr[0] = 0x02; + for (i = 0; *src; i++) { + /* XOR the USB serial across the remaining bytes */ + rndis_pdata.ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; + } +#endif + + android_usb_pdata.serial_number = serialno; + msm_hsusb_pdata.serial_number = serialno; + return 1; +} +__setup("androidboot.serialno=", bravo_board_serialno_setup); + +static struct msm_acpu_clock_platform_data bravo_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 245000, + .wait_for_irq_khz = 245000, + .mpll_khz = 245000 +}; + +static struct msm_acpu_clock_platform_data bravo_cdma_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 235930, + .wait_for_irq_khz = 235930, + .mpll_khz = 235930 +}; + +#ifdef CONFIG_PERFLOCK +static unsigned bravo_perf_acpu_table[] = { + 245000000, + 576000000, + 998400000, +}; + +static struct perflock_platform_data bravo_perflock_data = { + .perf_acpu_table = bravo_perf_acpu_table, + .table_size = ARRAY_SIZE(bravo_perf_acpu_table), +}; +#endif + +static void bravo_reset(void) +{ + gpio_set_value(BRAVO_GPIO_PS_HOLD, 0); +}; + +int bravo_init_mmc(int sysrev, unsigned debug_uart); + +static const struct smd_tty_channel_desc smd_cdma_default_channels[] = { + { .id = 0, .name = "SMD_DS" }, + { .id = 19, .name = "SMD_DATA3" }, + { .id = 27, .name = "SMD_GPSNMEA" } +}; + +static void __init bravo_init(void) +{ + int ret; + + printk("bravo_init() revision=%d\n", system_rev); + + if (is_cdma_version(system_rev)) + smd_set_channel_list(smd_cdma_default_channels, + ARRAY_SIZE(smd_cdma_default_channels)); + + msm_hw_reset_hook = bravo_reset; + + bravo_board_serialno_setup(board_serialno()); + + if (is_cdma_version(system_rev)) + msm_acpu_clock_init(&bravo_cdma_clock_data); + else + msm_acpu_clock_init(&bravo_clock_data); + +#ifdef CONFIG_PERFLOCK + perflock_init(&bravo_perflock_data); +#endif + + msm_serial_debug_init(MSM_UART1_PHYS, INT_UART1, + &msm_device_uart1.dev, 1, MSM_GPIO_TO_INT(139)); + + config_gpio_table(misc_gpio_table, ARRAY_SIZE(misc_gpio_table)); + + if (is_cdma_version(system_rev)) { + bcm_bt_lpm_pdata.gpio_wake = BRAVO_CDMA_GPIO_BT_WAKE; + bravo_flashlight_data.torch = BRAVO_CDMA_GPIO_FLASHLIGHT_TORCH; + config_gpio_table(bt_gpio_table_rev_CX, ARRAY_SIZE(bt_gpio_table_rev_CX)); + } else { + config_gpio_table(bt_gpio_table, ARRAY_SIZE(bt_gpio_table)); + } + + gpio_request(BRAVO_GPIO_TP_LS_EN, "tp_ls_en"); + gpio_direction_output(BRAVO_GPIO_TP_LS_EN, 0); + gpio_request(BRAVO_GPIO_TP_EN, "tp_en"); + gpio_direction_output(BRAVO_GPIO_TP_EN, 0); +// gpio_request(BRAVO_GPIO_PROXIMITY_EN, "proximity_en"); +// gpio_direction_output(BRAVO_GPIO_PROXIMITY_EN, 1); + gpio_request(BRAVO_GPIO_LS_EN_N, "ls_en"); + gpio_request(BRAVO_GPIO_COMPASS_RST_N, "compass_rst"); + gpio_direction_output(BRAVO_GPIO_COMPASS_RST_N, 1); + gpio_request(BRAVO_GPIO_COMPASS_INT_N, "compass_int"); + gpio_direction_input(BRAVO_GPIO_COMPASS_INT_N); + + gpio_request(BRAVO_GPIO_DS2482_SLP_N, "ds2482_slp_n"); + + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; + + platform_add_devices(devices, ARRAY_SIZE(devices)); + + platform_add_devices(msm_footswitch_devices, + msm_num_footswitch_devices); + + i2c_register_board_info(0, base_i2c_devices, + ARRAY_SIZE(base_i2c_devices)); + + if (is_cdma_version(system_rev)) { + i2c_register_board_info(0, rev_CX_i2c_devices, + ARRAY_SIZE(rev_CX_i2c_devices)); + } + + ret = bravo_init_mmc(system_rev, debug_uart); + if (ret != 0) + pr_crit("%s: Unable to initialize MMC\n", __func__); + + bravo_audio_init(); + bravo_headset_init(); + + platform_device_register(&bravo_timed_gpios); + + ds2784_battery_init(); +} + +static void __init bravo_fixup(struct machine_desc *desc, struct tag *tags, + char **cmdline, struct meminfo *mi) +{ + mi->nr_banks = 2; + mi->bank[0].start = PHYS_OFFSET; + mi->bank[0].size = MSM_EBI1_BANK0_SIZE; + mi->bank[1].start = MSM_EBI1_BANK1_BASE; + mi->bank[1].size = MSM_EBI1_BANK1_SIZE; +} + +static void __init bravo_map_io(void) +{ + msm_map_qsd8x50_io(); + msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); + if (socinfo_init() < 0) + printk(KERN_ERR "%s: socinfo_init() failed!\n",__func__); +} + +extern struct sys_timer msm_timer; + +#ifdef CONFIG_MACH_BRAVO +MACHINE_START(BRAVO, "bravo") +#else +MACHINE_START(BRAVOC, "bravoc") +#endif + .boot_params = 0x20000100, + .fixup = bravo_fixup, + .map_io = bravo_map_io, + .init_irq = msm_init_irq, + .init_machine = bravo_init, + .timer = &msm_timer, +MACHINE_END diff --git a/arch/arm/mach-msm/board-bravo.h b/arch/arm/mach-msm/board-bravo.h new file mode 100644 index 0000000000000..f3e95cf8785c0 --- /dev/null +++ b/arch/arm/mach-msm/board-bravo.h @@ -0,0 +1,185 @@ +/* arch/arm/mach-msm/board-bravo.h + * + * Copyright (C) 2009 HTC Corporation. + * Author: Haley Teng + * Copyright (C) 2010 Giulio Cervera + * Copyright (C) 2010 Diogo Ferreira + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_BRAVO_H +#define __ARCH_ARM_MACH_MSM_BOARD_BRAVO_H + +#include + +#define MSM_SMI_BASE 0x02B00000 +#define MSM_SMI_SIZE 0x01500000 + +#define MSM_PMEM_VENC_BASE 0x02B00000 +#define MSM_PMEM_VENC_SIZE 0x00800000 + +#define MSM_GPU_MEM_BASE 0x03300000 +#define MSM_GPU_MEM_SIZE 0x00500000 + +#define MSM_RAM_CONSOLE_BASE 0x03A00000 +#define MSM_RAM_CONSOLE_SIZE 0x00040000 + +#define MSM_FB_BASE 0x03B00000 +#define MSM_FB_SIZE 0x00300000 + +#define MSM_EBI1_BANK0_BASE 0x20000000 +#define MSM_EBI1_BANK0_SIZE 0x0E800000 + +#define MSM_EBI1_BANK1_BASE 0x30000000 +#define MSM_EBI1_BANK1_SIZE 0x0B700000 + +#define MSM_PMEM_MDP_BASE 0x3B700000 +#define MSM_PMEM_MDP_SIZE 0x02000000 + +#define MSM_PMEM_ADSP_BASE 0x3D700000 +#define MSM_PMEM_ADSP_SIZE 0x02900000 + +#define BRAVO_GPIO_PS_HOLD 25 + +#define BRAVO_GPIO_OJ_ACTION_XB 33 + +#define BRAVO_GPIO_UP_INT_N 35 +#define BRAVO_GPIO_UP_RESET_N 82 +#define BRAVO_GPIO_LS_EN_N 119 + +#define BRAVO_GPIO_TP_INT_N 92 +#define BRAVO_GPIO_TP_LS_EN 93 +#define BRAVO_GPIO_TP_EN 160 + +#define BRAVO_GPIO_POWER_KEY 94 +#define BRAVO_GPIO_SDMC_CD_N 153 + +#define BRAVO_GPIO_WIFI_SHUTDOWN_N 127 +#define BRAVO_GPIO_WIFI_IRQ 152 + +#define BRAVO_GPIO_BALL_UP 38 +#define BRAVO_GPIO_BALL_DOWN 37 +#define BRAVO_GPIO_BALL_LEFT 145 +#define BRAVO_GPIO_BALL_RIGHT 21 + +#define BRAVO_GPIO_BT_UART1_RTS 43 +#define BRAVO_GPIO_BT_UART1_CTS 44 +#define BRAVO_GPIO_BT_UART1_RX 45 +#define BRAVO_GPIO_BT_UART1_TX 46 +#define BRAVO_GPIO_BT_RESET_N 146 +#define BRAVO_GPIO_BT_SHUTDOWN_N 128 + +#define BRAVO_GPIO_BT_WAKE 57 +#define BRAVO_GPIO_BT_HOST_WAKE 86 + +#define BRAVO_GPIO_PROXIMITY_INT_N 90 +#define BRAVO_GPIO_PROXIMITY_EN 120 + +#define BRAVO_GPIO_DS2482_SLP_N 87 +#define BRAVO_GPIO_VIBRATOR_ON 89 + +#define BRAVO_CDMA_GPIO_BT_WAKE 28 +#define BRAVO_CDMA_GPIO_FLASHLIGHT_TORCH 26 + +#define BRAVO_CDMA_SD_2V85_EN 100 +#define BRAVO_CDMA_JOG_2V6_EN 150 + +/* Compass */ +#define BRAVO_GPIO_COMPASS_INT_N 153 +#define BRAVO_GPIO_COMPASS_RST_N 107 +#ifdef CONFIG_MACH_BRAVO +#define BRAVO_PROJECT_NAME "bravo" +#else +#define BRAVO_PROJECT_NAME "bravoc" +#endif +#define BRAVO_LAYOUTS { \ + { {-1, 0, 0}, { 0, -1, 0}, {0, 0, 1} }, \ + { { 0, -1, 0}, { 1, 0, 0}, {0, 0, -1} }, \ + { { 0, -1, 0}, { 1, 0, 0}, {0, 0, 1} }, \ + { {-1, 0, 0}, { 0, 0, -1}, {0, 1, 0} } \ +} + +/* Audio */ +#define BRAVO_AUD_JACKHP_EN 157 +#define BRAVO_AUD_2V5_EN 158 +#define BRAVO_AUD_MICPATH_SEL 111 + +#define BRAVO_GPIO_AUD_SPK_AMP_EN 104 + +/* Bluetooth PCM */ +#define BRAVO_BT_PCM_OUT 68 +#define BRAVO_BT_PCM_IN 69 +#define BRAVO_BT_PCM_SYNC 70 +#define BRAVO_BT_PCM_CLK 71 +/* flash light */ +#define BRAVO_GPIO_FLASHLIGHT_TORCH 58 +#define BRAVO_GPIO_FLASHLIGHT_FLASH 84 + +/* keypad */ +#define BRAVO_GPIO_KP_MKOUT0 33 +#define BRAVO_GPIO_KP_MKOUT1 32 +#define BRAVO_GPIO_KP_MKOUT2 31 +#define BRAVO_GPIO_KP_MPIN0 42 +#define BRAVO_GPIO_KP_MPIN1 41 +#define BRAVO_GPIO_KP_MPIN2 40 + +#define BRAVO_GPIO_LED_3V3_EN 85 +#define BRAVO_GPIO_LCD_RST_N 29 +#define BRAVO_GPIO_LCD_ID0 147 + +/* 3.5mm remote control key interrupt shutdown signal */ +#define BRAVO_GPIO_35MM_KEY_INT_SHUTDOWN 19 + +#define BRAVO_GPIO_DOCK 106 + +#define BRAVO_CDMA_GPIO_AUD_SPK_AMP_EN 104 + +#define BRAVO_GPIO_BATTERY_DETECTION 39 +#define BRAVO_GPIO_BATTERY_CHARGER_EN 22 +#define BRAVO_GPIO_BATTERY_CHARGER_CURRENT 16 + +/* display relative */ +#define BRAVO_LCD_SPI_CLK (17) +#define BRAVO_LCD_SPI_DO (18) +#define BRAVO_LCD_SPI_CSz (20) +#define BRAVO_LCD_RSTz (29) +#define BRAVO_LCD_R1 (114) +#define BRAVO_LCD_R2 (115) +#define BRAVO_LCD_R3 (116) +#define BRAVO_LCD_R4 (117) +#define BRAVO_LCD_R5 (118) +#define BRAVO_LCD_G0 (121) +#define BRAVO_LCD_G1 (122) +#define BRAVO_LCD_G2 (123) +#define BRAVO_LCD_G3 (124) +#define BRAVO_LCD_G4 (125) +#define BRAVO_LCD_G5 (126) +#define BRAVO_LCD_B1 (130) +#define BRAVO_LCD_B2 (131) +#define BRAVO_LCD_B3 (132) +#define BRAVO_LCD_B4 (133) +#define BRAVO_LCD_B5 (134) +#define BRAVO_LCD_PCLK (135) +#define BRAVO_LCD_VSYNC (136) +#define BRAVO_LCD_HSYNC (137) +#define BRAVO_LCD_DE (138) + +/* know revision + 0x02 = GSM amoled (dev phone) + 0x05 = CDMA lcd + 0x81 = GSM amoled + 0x83 = GSM lcd + 0x84 = GSM lcd +*/ + +#define is_cdma_version(rev) (rev == 0x05) + +#endif /* __ARCH_ARM_MACH_MSM_BOARD_BRAVO_H */ diff --git a/arch/arm/mach-msm/board-halibut-keypad.c b/arch/arm/mach-msm/board-halibut-keypad.c new file mode 100644 index 0000000000000..49c1075627d32 --- /dev/null +++ b/arch/arm/mach-msm/board-halibut-keypad.c @@ -0,0 +1,177 @@ +/* linux/arch/arm/mach-msm/board-halibut-keypad.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "board_halibut." +static int halibut_ffa; +module_param_named(ffa, halibut_ffa, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define SCAN_FUNCTION_KEYS 0 /* don't turn this on without updating the ffa support */ + +static unsigned int halibut_row_gpios[] = { + 31, 32, 33, 34, 35, 41 +#if SCAN_FUNCTION_KEYS + , 42 +#endif +}; + +static unsigned int halibut_col_gpios[] = { 36, 37, 38, 39, 40 }; + +/* FFA: + 36: KEYSENSE_N(0) + 37: KEYSENSE_N(1) + 38: KEYSENSE_N(2) + 39: KEYSENSE_N(3) + 40: KEYSENSE_N(4) + + 31: KYPD_17 + 32: KYPD_15 + 33: KYPD_13 + 34: KYPD_11 + 35: KYPD_9 + 41: KYPD_MEMO +*/ + +#define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(halibut_col_gpios) + (col)) + +static const unsigned short halibut_keymap[ARRAY_SIZE(halibut_col_gpios) * ARRAY_SIZE(halibut_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_5, + [KEYMAP_INDEX(0, 1)] = KEY_9, + [KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */ + [KEYMAP_INDEX(0, 3)] = KEY_6, + [KEYMAP_INDEX(0, 4)] = KEY_LEFT, + + [KEYMAP_INDEX(1, 0)] = KEY_0, + [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, + [KEYMAP_INDEX(1, 2)] = KEY_1, + [KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */ + [KEYMAP_INDEX(1, 4)] = KEY_SEND, + + [KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */ + [KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */ + [KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */ + [KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */ + + [KEYMAP_INDEX(3, 0)] = KEY_UP, + [KEYMAP_INDEX(3, 1)] = KEY_CLEAR, + [KEYMAP_INDEX(3, 2)] = KEY_4, + [KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */ + [KEYMAP_INDEX(3, 4)] = KEY_2, + + [KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */ + [KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */ + [KEYMAP_INDEX(4, 2)] = KEY_DOWN, + [KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */ + [KEYMAP_INDEX(4, 4)] = KEY_8, + + [KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ + [KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */ + [KEYMAP_INDEX(5, 3)] = KEY_3, + [KEYMAP_INDEX(5, 4)] = KEY_7, + +#if SCAN_FUNCTION_KEYS + [KEYMAP_INDEX(6, 0)] = KEY_F5, + [KEYMAP_INDEX(6, 1)] = KEY_F4, + [KEYMAP_INDEX(6, 2)] = KEY_F3, + [KEYMAP_INDEX(6, 3)] = KEY_F2, + [KEYMAP_INDEX(6, 4)] = KEY_F1 +#endif +}; + +static const unsigned short halibut_keymap_ffa[ARRAY_SIZE(halibut_col_gpios) * ARRAY_SIZE(halibut_row_gpios)] = { + /*[KEYMAP_INDEX(0, 0)] = ,*/ + /*[KEYMAP_INDEX(0, 1)] = ,*/ + [KEYMAP_INDEX(0, 2)] = KEY_1, + [KEYMAP_INDEX(0, 3)] = KEY_SEND, + [KEYMAP_INDEX(0, 4)] = KEY_LEFT, + + [KEYMAP_INDEX(1, 0)] = KEY_3, + [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, + [KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP, + /*[KEYMAP_INDEX(1, 3)] = ,*/ + [KEYMAP_INDEX(1, 4)] = KEY_6, + + [KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */ + [KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */ + [KEYMAP_INDEX(2, 2)] = KEY_0, + [KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */ + [KEYMAP_INDEX(2, 4)] = KEY_9, + + [KEYMAP_INDEX(3, 0)] = KEY_UP, + [KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */ + [KEYMAP_INDEX(3, 2)] = KEY_4, + /*[KEYMAP_INDEX(3, 3)] = ,*/ + [KEYMAP_INDEX(3, 4)] = KEY_2, + + [KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(4, 1)] = KEY_SOUND, + [KEYMAP_INDEX(4, 2)] = KEY_DOWN, + [KEYMAP_INDEX(4, 3)] = KEY_8, + [KEYMAP_INDEX(4, 4)] = KEY_5, + + /*[KEYMAP_INDEX(5, 0)] = ,*/ + [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ + [KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */ + [KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */ + [KEYMAP_INDEX(5, 4)] = KEY_7, +}; + +static struct gpio_event_matrix_info halibut_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = halibut_keymap, + .output_gpios = halibut_row_gpios, + .input_gpios = halibut_col_gpios, + .noutputs = ARRAY_SIZE(halibut_row_gpios), + .ninputs = ARRAY_SIZE(halibut_col_gpios), + .settle_time.tv.nsec = 0, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/ +}; + +struct gpio_event_info *halibut_keypad_info[] = { + &halibut_matrix_info.info +}; + +static struct gpio_event_platform_data halibut_keypad_data = { + .name = "halibut_keypad", + .info = halibut_keypad_info, + .info_count = ARRAY_SIZE(halibut_keypad_info) +}; + +static struct platform_device halibut_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = -1, + .dev = { + .platform_data = &halibut_keypad_data, + }, +}; + +static int __init halibut_init_keypad(void) +{ + if (!machine_is_halibut()) + return 0; + if (halibut_ffa) + halibut_matrix_info.keymap = halibut_keymap_ffa; + return platform_device_register(&halibut_keypad_device); +} + +device_initcall(halibut_init_keypad); diff --git a/arch/arm/mach-msm/board-halibut-panel.c b/arch/arm/mach-msm/board-halibut-panel.c new file mode 100644 index 0000000000000..a498c65344b52 --- /dev/null +++ b/arch/arm/mach-msm/board-halibut-panel.c @@ -0,0 +1,73 @@ +/* linux/arch/arm/mach-msm/board-halibut-mddi.c +** Author: Brian Swetland +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "proc_comm.h" +#include "devices.h" +#include "board-halibut.h" + +static void halibut_mddi_power_client(struct msm_mddi_client_data *mddi, + int on) +{ +} + +static struct resource resources_msm_fb = { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, +}; + +static struct msm_fb_data fb_data = { + .xres = 800, + .yres = 480, + .output_format = 0, +}; + +static struct msm_mddi_platform_data mddi_pdata = { + .clk_rate = 122880000, + .power_client = halibut_mddi_power_client, + .fb_resource = &resources_msm_fb, + .num_clients = 1, + .client_platform_data = { + { + .product_id = (0x4474 << 16 | 0xc065), + .name = "mddi_c_dummy", + .id = 0, + .client_data = &fb_data, + .clk_rate = 0, + }, + }, +}; + +int __init halibut_init_panel(void) +{ + int rc; + + if (!machine_is_halibut()) + return 0; + + rc = platform_device_register(&msm_device_mdp); + if (rc) + return rc; + + msm_device_mddi0.dev.platform_data = &mddi_pdata; + return platform_device_register(&msm_device_mddi0); +} + +device_initcall(halibut_init_panel); diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 75dabb16c8023..42a4dc38a6d65 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -20,8 +20,11 @@ #include #include #include +#include #include +#include +#include #include #include #include @@ -31,11 +34,18 @@ #include #include #include +#include +#include #include #include +#include +#include +#include #include "devices.h" +#include "board-halibut.h" +#include "proc_comm.h" static struct resource smc91x_resources[] = { [0] = { @@ -57,13 +67,345 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; +static struct i2c_board_info i2c_devices[] = { +#ifdef CONFIG_MT9D112 + { + I2C_BOARD_INFO("mt9d112", 0x78 >> 1), + }, +#endif +#ifdef CONFIG_S5K3E2FX + { + I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1), + }, +#endif +#ifdef CONFIG_MT9P012 + { + I2C_BOARD_INFO("mt9p012", 0x6C >> 1), + }, +#endif +#if defined(CONFIG_MT9T013) || defined(CONFIG_SENSORS_MT9T013) + { + I2C_BOARD_INFO("mt9t013", 0x6C), // 0x78>>1 + }, +#endif +}; + +#ifdef CONFIG_MSM_CAMERA +static uint32_t camera_off_gpio_table[] = { + /* parallel CAMERA interfaces */ + PCOM_GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* parallel CAMERA interfaces */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */ +}; + +static void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +static void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +#ifdef CONFIG_MT9D112 +static struct msm_camera_sensor_info msm_camera_sensor_mt9d112_data = { + .sensor_name = "mt9d112", + .sensor_reset = 89, + .sensor_pwd = 85, + .vcm_pwd = 0, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9d112 = { + .name = "msm_camera_mt9d112", + .dev = { + .platform_data = &msm_camera_sensor_mt9d112_data, + }, +}; +#endif + +#ifdef CONFIG_S5K3E2FX +static struct msm_camera_sensor_info msm_camera_sensor_s5k3e2fx_data = { + .sensor_name = "s5k3e2fx", + .sensor_reset = 89, + .sensor_pwd = 85, + .vcm_pwd = 0, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_s5k3e2fx = { + .name = "msm_camera_s5k3e2fx", + .dev = { + .platform_data = &msm_camera_sensor_s5k3e2fx_data, + }, +}; +#endif + +#ifdef CONFIG_MT9P012 +static struct msm_camera_sensor_info msm_camera_sensor_mt9p012_data = { + .sensor_name = "mt9p012", + .sensor_reset = 89, + .sensor_pwd = 85, + .vcm_pwd = 88, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9p012 = { + .name = "msm_camera_mt9p012", + .dev = { + .platform_data = &msm_camera_sensor_mt9p012_data, + }, +}; +#endif + +#ifdef CONFIG_MT9T013 +static struct msm_camera_sensor_info msm_camera_sensor_mt9t013_data = { + .sensor_name = "mt9t013", + .sensor_reset = 89, + .sensor_pwd = 85, + .vcm_pwd = 0, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9t013 = { + .name = "msm_camera_mt9t013", + .dev = { + .platform_data = &msm_camera_sensor_mt9t013_data, + }, +}; +#endif +#endif /*CONFIG_MSM_CAMERA*/ + +#define SND(desc, num) { .name = #desc, .id = num } +static struct snd_endpoint snd_endpoints_list[] = { + SND(HANDSET, 0), + SND(HEADSET, 2), + SND(SPEAKER, 6), + SND(BT, 12), + SND(CURRENT, 25), +}; +#undef SND + +static struct msm_snd_endpoints halibut_snd_endpoints = { + .endpoints = snd_endpoints_list, + .num = sizeof(snd_endpoints_list) / sizeof(struct snd_endpoint) +}; + +static struct platform_device halibut_snd = { + .name = "msm_snd", + .id = -1, + .dev = { + .platform_data = &halibut_snd_endpoints + }, +}; + +static struct android_pmem_platform_data android_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .no_allocator = 0, + .cached = 1, +}; + +static struct android_pmem_platform_data android_pmem_camera_pdata = { + .name = "pmem_camera", + .start = MSM_PMEM_CAMERA_BASE, + .size = MSM_PMEM_CAMERA_SIZE, + .no_allocator = 1, + .cached = 1, +}; + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, + .no_allocator = 0, + .cached = 0, +}; + +static struct android_pmem_platform_data android_pmem_gpu0_pdata = { + .name = "pmem_gpu0", + .start = MSM_PMEM_GPU0_BASE, + .size = MSM_PMEM_GPU0_SIZE, + .no_allocator = 1, + .cached = 0, +}; + +static struct android_pmem_platform_data android_pmem_gpu1_pdata = { + .name = "pmem_gpu1", + .start = MSM_PMEM_GPU1_BASE, + .size = MSM_PMEM_GPU1_SIZE, + .no_allocator = 1, + .cached = 0, +}; + +static struct platform_device android_pmem_device = { + .name = "android_pmem", + .id = 0, + .dev = { .platform_data = &android_pmem_pdata }, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { .platform_data = &android_pmem_adsp_pdata }, +}; + +static struct platform_device android_pmem_gpu0_device = { + .name = "android_pmem", + .id = 2, + .dev = { .platform_data = &android_pmem_gpu0_pdata }, +}; + +static struct platform_device android_pmem_gpu1_device = { + .name = "android_pmem", + .id = 3, + .dev = { .platform_data = &android_pmem_gpu1_pdata }, +}; + +static struct platform_device android_pmem_camera_device = { + .name = "android_pmem", + .id = 4, + .dev = { .platform_data = &android_pmem_camera_pdata }, +}; + +static int halibut_phy_init_seq[] = { 0x1D, 0x0D, 0x1D, 0x10, -1 }; + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = halibut_phy_init_seq, +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "Qualcomm", + .product = "Halibut", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +static char *usb_functions[] = { "usb_mass_storage" }; +static char *usb_functions_adb[] = { "usb_mass_storage", "adb" }; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0c01, + .num_functions = ARRAY_SIZE(usb_functions), + .functions = usb_functions, + }, + { + .product_id = 0x0c02, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, + }, +}; + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x18d1, + .product_id = 0x0c01, + .version = 0x0100, + .serial_number = "42", + .product_name = "Halibutdroid", + .manufacturer_name = "Qualcomm", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +static struct platform_device fish_battery_device = { + .name = "fish_battery", +}; + static struct platform_device *devices[] __initdata = { +#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart3, +#endif &msm_device_smd, &msm_device_nand, &msm_device_hsusb, + &usb_mass_storage_device, + &android_usb_device, &msm_device_i2c, &smc91x_device, + &halibut_snd, +#ifdef CONFIG_MT9T013 + &msm_camera_sensor_mt9t013, +#endif +#ifdef CONFIG_MT9D112 + &msm_camera_sensor_mt9d112, +#endif +#ifdef CONFIG_S5K3E2FX + &msm_camera_sensor_s5k3e2fx, +#endif +#ifdef CONFIG_MT9P012 + &msm_camera_sensor_mt9p012, +#endif + &android_pmem_device, + &android_pmem_adsp_device, + &android_pmem_gpu0_device, + &android_pmem_gpu1_device, + &android_pmem_camera_device, + &fish_battery_device, }; extern struct sys_timer msm_timer; @@ -73,15 +415,38 @@ static void __init halibut_init_irq(void) msm_init_irq(); } +static struct msm_acpu_clock_platform_data halibut_clock_data = { + .acpu_switch_time_us = 50, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 19200000, + .wait_for_irq_khz = 128000000, +}; + +extern void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq); + static void __init halibut_init(void) { +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3, + &msm_device_uart3.dev, 1); +#endif + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + msm_acpu_clock_init(&halibut_clock_data); +#ifdef CONFIG_MSM_CAMERA + config_camera_off_gpios(); /* might not be necessary */ +#endif + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); platform_add_devices(devices, ARRAY_SIZE(devices)); + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); + msm_hsusb_set_vbus_state(1); } static void __init halibut_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { - mi->nr_banks=1; + mi->nr_banks = 1; mi->bank[0].start = PHYS_OFFSET; mi->bank[0].size = (101*1024*1024); } @@ -94,6 +459,8 @@ static void __init halibut_map_io(void) MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") #ifdef CONFIG_MSM_DEBUG_UART + .phys_io = MSM_DEBUG_UART_PHYS, + .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc, #endif .boot_params = 0x10000100, .fixup = halibut_fixup, diff --git a/arch/arm/mach-msm/board-halibut.h b/arch/arm/mach-msm/board-halibut.h new file mode 100644 index 0000000000000..edcdacb34c274 --- /dev/null +++ b/arch/arm/mach-msm/board-halibut.h @@ -0,0 +1,20 @@ +/* linux/arch/arm/mach-msm/board-trout.h + * ** Author: Brian Swetland + * */ +#ifndef __ARCH_ARM_MACH_MSM_BOARD_HALIBUT_H +#define __ARCH_ARM_MACH_MSM_BOARD_HALIBUT_H + +#define MSM_PMEM_GPU0_BASE (0x10000000 + 64*SZ_1M) +#define MSM_PMEM_GPU0_SIZE 0x800000 +#define MSM_PMEM_MDP_BASE (MSM_PMEM_GPU0_BASE + MSM_PMEM_GPU0_SIZE) +#define MSM_PMEM_MDP_SIZE 0x800000 +#define MSM_PMEM_ADSP_BASE (MSM_PMEM_MDP_BASE + MSM_PMEM_MDP_SIZE) +#define MSM_PMEM_ADSP_SIZE 0x800000 +#define MSM_PMEM_GPU1_BASE (MSM_PMEM_ADSP_BASE + MSM_PMEM_ADSP_SIZE) +#define MSM_PMEM_GPU1_SIZE 0x800000 +#define MSM_FB_BASE (MSM_PMEM_GPU1_BASE + MSM_PMEM_GPU1_SIZE) +#define MSM_FB_SIZE 0x200000 +#define MSM_PMEM_CAMERA_BASE (MSM_FB_BASE + MSM_FB_SIZE) +#define MSM_PMEM_CAMERA_SIZE 0xA00000 + +#endif diff --git a/arch/arm/mach-msm/board-incrediblec-audio.c b/arch/arm/mach-msm/board-incrediblec-audio.c new file mode 100644 index 0000000000000..0550afb75431d --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-audio.c @@ -0,0 +1,275 @@ +/* arch/arm/mach-msm/board-incrediblec-audio.c + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "board-incrediblec.h" +#include "proc_comm.h" +#include "pmic.h" + +#if 1 +#define D(fmt, args...) printk(KERN_INFO "Audio: "fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +static struct mutex mic_lock; +static struct mutex bt_sco_lock; +static int headset_status = 0; + +static struct q6_hw_info q6_audio_hw[Q6_HW_COUNT] = { + [Q6_HW_HANDSET] = { + .min_gain = -1500, + .max_gain = 1199, + }, + [Q6_HW_HEADSET] = { + .min_gain = -2000, + .max_gain = 1199, + }, + [Q6_HW_SPEAKER] = { + .min_gain = -1100, + .max_gain = 400, + }, + [Q6_HW_TTY] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_SCO] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_A2DP] = { + .min_gain = -1600, + .max_gain = 400, + }, +}; + +void incrediblec_headset_enable(int en) +{ + D("%s %d\n", __func__, en); + /* enable audio amp */ + if (en != headset_status) { + headset_status = en; + if(en) { + gpio_set_value(INCREDIBLEC_AUD_JACKHP_EN, 1); + mdelay(10); + set_headset_amp(1); + } else { + set_headset_amp(0); + gpio_set_value(INCREDIBLEC_AUD_JACKHP_EN, 0); + } + } +} + +void incrediblec_speaker_enable(int en) +{ + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 0; + scm.is_left_chan_en = 1; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(LEFT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(LEFT_SPKR, 1); + } else { + pmic_spkr_en_mute(LEFT_SPKR, 0); + + pmic_spkr_en(LEFT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } +} + +void incrediblec_receiver_enable(int en) +{ + /* After XB*/ + if (system_rev >= 1) { + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 1; + scm.is_left_chan_en = 0; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(RIGHT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(RIGHT_SPKR, 1); + } else { + pmic_spkr_en_mute(RIGHT_SPKR, 0); + + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + } +} + +static uint32_t bt_sco_enable[] = { + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_OUT, 1, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_IN, 1, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_SYNC, 2, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_CLK, 2, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), +}; + +static uint32_t bt_sco_disable[] = { + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_OUT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_IN, 0, GPIO_INPUT, + GPIO_PULL_UP, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_SYNC, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_BT_PCM_CLK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +void incrediblec_bt_sco_enable(int en) +{ + static int bt_sco_refcount; + D("%s %d\n", __func__, en); + mutex_lock(&bt_sco_lock); + if (en) { + if (++bt_sco_refcount == 1) + config_gpio_table(bt_sco_enable, + ARRAY_SIZE(bt_sco_enable)); + } else { + if (--bt_sco_refcount == 0) { + config_gpio_table(bt_sco_disable, ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(INCREDIBLEC_BT_PCM_OUT, 0); + gpio_set_value(INCREDIBLEC_BT_PCM_SYNC,0); + gpio_set_value(INCREDIBLEC_BT_PCM_CLK,0); + } + } + mutex_unlock(&bt_sco_lock); +} + +void incrediblec_int_mic_enable(int en) +{ + D("%s %d\n", __func__, en); + if (en) + pmic_mic_en(ON_CMD); + else + pmic_mic_en(OFF_CMD); +} + +void incrediblec_ext_mic_enable(int en) +{ + static int old_state = 0, new_state = 0; + + D("%s %d\n", __func__, en); + + mutex_lock(&mic_lock); + if (!!en) + new_state++; + else + new_state--; + + if (new_state == 1 && old_state == 0) { + gpio_set_value(INCREDIBLEC_AUD_2V5_EN, 1); + } else if (new_state == 0 && old_state == 1) + gpio_set_value(INCREDIBLEC_AUD_2V5_EN, 0); + else + D("%s: do nothing %d %d\n", __func__, old_state, new_state); + + old_state = new_state; + mutex_unlock(&mic_lock); +} + +void incrediblec_analog_init(void) +{ + D("%s\n", __func__); + /* stereo pmic init */ + pmic_spkr_set_gain(LEFT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_set_gain(RIGHT_SPKR, SPKR_GAIN_00DB); + pmic_spkr_en_right_chan(OFF_CMD); + pmic_spkr_en_left_chan(OFF_CMD); + pmic_spkr_add_right_left_chan(OFF_CMD); + pmic_spkr_en_stereo(OFF_CMD); + pmic_spkr_select_usb_with_hpf_20hz(OFF_CMD); + pmic_spkr_bypass_mux(OFF_CMD); + pmic_spkr_en_hpf(ON_CMD); + pmic_spkr_en_sink_curr_from_ref_volt_cir(OFF_CMD); + pmic_spkr_set_mux_hpf_corner_freq(SPKR_FREQ_0_73KHZ); + pmic_mic_set_volt(MIC_VOLT_1_80V); + pmic_set_speaker_delay(SPKR_DLY_100MS); + + gpio_request(INCREDIBLEC_AUD_JACKHP_EN, "aud_jackhp_en"); + gpio_direction_output(INCREDIBLEC_AUD_JACKHP_EN, 1); + gpio_set_value(INCREDIBLEC_AUD_JACKHP_EN, 0); + + mutex_lock(&bt_sco_lock); + config_gpio_table(bt_sco_disable, ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(INCREDIBLEC_BT_PCM_OUT, 0); + gpio_set_value(INCREDIBLEC_BT_PCM_SYNC,0); + gpio_set_value(INCREDIBLEC_BT_PCM_CLK,0); + mutex_unlock(&bt_sco_lock); +} + +int incrediblec_get_rx_vol(uint8_t hw, int level) +{ + struct q6_hw_info *info; + int vol; + + info = &q6_audio_hw[hw]; + vol = info->min_gain + ((info->max_gain - info->min_gain) * level) / 100; + D("%s %d\n", __func__, vol); + return vol; +} + +static struct qsd_acoustic_ops acoustic = { + .enable_mic_bias = incrediblec_ext_mic_enable, +}; + +static struct q6audio_analog_ops ops = { + .init = incrediblec_analog_init, + .speaker_enable = incrediblec_speaker_enable, + .headset_enable = incrediblec_headset_enable, + .receiver_enable = incrediblec_receiver_enable, + .bt_sco_enable = incrediblec_bt_sco_enable, + .int_mic_enable = incrediblec_int_mic_enable, + .ext_mic_enable = incrediblec_ext_mic_enable, + .get_rx_vol = incrediblec_get_rx_vol, +}; + +void __init incrediblec_audio_init(void) +{ + mutex_init(&mic_lock); + mutex_init(&bt_sco_lock); + q6audio_register_analog_ops(&ops); + acoustic_register_ops(&acoustic); +} + diff --git a/arch/arm/mach-msm/board-incrediblec-keypad.c b/arch/arm/mach-msm/board-incrediblec-keypad.c new file mode 100644 index 0000000000000..0880ae6737978 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-keypad.c @@ -0,0 +1,137 @@ +/* arch/arm/mach-msm/board-incrediblec-keypad.c + * + * Copyright (C) 2009 Google, Inc + * Copyright (C) 2009 HTC Corporation. + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "board-incrediblec.h" + + +const struct gpio_event_direct_entry incrediblec_keypad_nav_map_x0[] = { + { + .gpio = INCREDIBLEC_GPIO_POWER_KEY, + .code = KEY_POWER + }, + { + .gpio = INCREDIBLEC_GPIO_VOLUME_UP, + .code = KEY_VOLUMEUP + }, + { + .gpio = INCREDIBLEC_GPIO_VOLUME_DOWN, + .code = KEY_VOLUMEDOWN + }, +}; + +const struct gpio_event_direct_entry incrediblec_keypad_nav_map_x1[] = { + { + .gpio = INCREDIBLEC_GPIO_POWER_KEY, + .code = KEY_POWER + }, + { + .gpio = INCREDIBLEC_GPIO_VOLUME_UP, + .code = KEY_VOLUMEUP + }, + { + .gpio = INCREDIBLEC_GPIO_VOLUME_DOWN, + .code = KEY_VOLUMEDOWN + }, + { + .gpio = INCREDIBLEC_GPIO_OJ_ACTION_XB, + .code = BTN_MOUSE + }, +}; + +static struct gpio_event_input_info incrediblec_keypad_nav_info = { + .info.func = gpio_event_input_func, + .info.oj_btn = true, + .flags = GPIOEDF_PRINT_KEYS, + .type = EV_KEY, + .keymap = incrediblec_keypad_nav_map_x1, + .debounce_time.tv.nsec = 5 * NSEC_PER_MSEC, + .keymap_size = ARRAY_SIZE(incrediblec_keypad_nav_map_x1) +}; + +static struct gpio_event_info *incrediblec_keypad_info[] = { + &incrediblec_keypad_nav_info.info, +}; + +static struct gpio_event_platform_data incrediblec_keypad_data = { + .name = "incrediblec-keypad", + .info = incrediblec_keypad_info, + .info_count = ARRAY_SIZE(incrediblec_keypad_info) +}; + +static struct platform_device incrediblec_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &incrediblec_keypad_data, + }, +}; + +static int incrediblec_reset_keys_up[] = { + KEY_VOLUMEUP, + 0 +}; + +static struct keyreset_platform_data incrediblec_reset_keys_pdata = { + .keys_up = incrediblec_reset_keys_up, + .keys_down = { + KEY_POWER, + KEY_VOLUMEDOWN, + BTN_MOUSE, + 0 + }, +}; + +static struct platform_device incrediblec_reset_keys_device = { + .name = KEYRESET_NAME, + .dev.platform_data = &incrediblec_reset_keys_pdata, +}; +static int __init incrediblec_init_keypad(void) +{ + int ret; + + if (!machine_is_incrediblec()) + return 0; + + if (system_rev < 2) { + incrediblec_keypad_nav_info.keymap = + incrediblec_keypad_nav_map_x0; + incrediblec_keypad_nav_info.keymap_size = + ARRAY_SIZE(incrediblec_keypad_nav_map_x0); + } + + if (platform_device_register(&incrediblec_reset_keys_device)) + printk(KERN_WARNING "%s: register reset key fail\n", __func__); + + ret = platform_device_register(&incrediblec_keypad_device); + if (ret != 0) + return ret; + + return 0; +} + +device_initcall(incrediblec_init_keypad); + + diff --git a/arch/arm/mach-msm/board-incrediblec-microp.c b/arch/arm/mach-msm/board-incrediblec-microp.c new file mode 100644 index 0000000000000..5f386156609e2 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-microp.c @@ -0,0 +1,454 @@ +/* arch/arm/mach-msm/board-incrediblec-microp.c + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ +#ifdef CONFIG_MICROP_COMMON +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-incrediblec.h" + + +#define INT_PSENSOR (1<<11) + +static int misc_opened; +static struct i2c_client *incrediblec_microp_client; + +static void p_sensor_do_work(struct work_struct *w); +static DECLARE_WORK(p_sensor_work, p_sensor_do_work); + +struct wake_lock proximity_wake_lock; + +static struct capella_cm3602_data { + struct input_dev *input_dev; + struct capella_cm3602_platform_data *pdata; + int enabled; + struct workqueue_struct *p_sensor_wq; +} the_data; + +static int psensor_intr_enable(uint8_t enable) +{ + int ret; + uint8_t addr, data[2]; + + if (enable) + addr = MICROP_I2C_WCMD_GPI_INT_CTL_EN; + else + addr = MICROP_I2C_WCMD_GPI_INT_CTL_DIS; + + data[0] = INT_PSENSOR >> 8; + data[1] = INT_PSENSOR & 0xFF; + ret = microp_i2c_write(addr, data, 2); + if (ret < 0) + pr_err("%s: %s p-sensor interrupt failed\n", + __func__, (enable ? "enable" : "disable")); + + return ret; +} + +static int incrediblec_microp_function_init(struct i2c_client *client) +{ + struct microp_i2c_platform_data *pdata; + struct microp_i2c_client_data *cdata; + uint8_t data[20]; + int i, j; + int ret; + + incrediblec_microp_client = client; + pdata = client->dev.platform_data; + cdata = i2c_get_clientdata(client); + + /* Headset remote key */ + ret = microp_function_check(client, MICROP_FUNCTION_REMOTEKEY); + if (ret >= 0) { + i = ret; + pdata->function_node[MICROP_FUNCTION_REMOTEKEY] = i; + cdata->int_pin.int_remotekey = + pdata->microp_function[i].int_pin; + + for (j = 0; j < 6; j++) { + data[j] = (uint8_t)(pdata->microp_function[i].levels[j] >> 8); + data[j + 6] = (uint8_t)(pdata->microp_function[i].levels[j]); + } + ret = microp_i2c_write(MICROP_I2C_WCMD_REMOTEKEY_TABLE, + data, 12); + if (ret) + goto exit; + } + + /* Reset button interrupt */ + data[0] = 0x08; + ret = microp_i2c_write(MICROP_I2C_WCMD_MISC, data, 1); + if (ret) + goto exit; + + /* OJ interrupt */ + ret = microp_function_check(client, MICROP_FUNCTION_OJ); + if (ret >= 0) { + i = ret; + cdata->int_pin.int_oj = pdata->microp_function[i].int_pin; + + ret = microp_write_interrupt(client, cdata->int_pin.int_oj, 1); + if (ret) + goto exit; + } + + /* Proximity interrupt */ + ret = microp_function_check(client, MICROP_FUNCTION_P); + if (ret >= 0) { + i = ret; + cdata->int_pin.int_psensor = pdata->microp_function[i].int_pin; + cdata->gpio.psensor = pdata->microp_function[i].mask_r[0] << 16 + | pdata->microp_function[i].mask_r[1] << 8 + | pdata->microp_function[i].mask_r[2]; + cdata->fnode.psensor = i; + } + + return 0; + +exit: + return ret; +} + +static int report_psensor_data(void) +{ + int ret, ps_data = 0; + uint8_t data[3] = {0, 0, 0}; + + ret = microp_i2c_read(MICROP_I2C_RCMD_GPIO_STATUS, data, 3); + if (ret < 0) + pr_err("%s: read data failed\n", __func__); + else { + ps_data = (data[2] & 0x10) ? 1 : 0; + pr_info("proximity %s\n", ps_data ? "FAR" : "NEAR"); + + /* 0 is close, 1 is far */ + input_report_abs(the_data.input_dev, ABS_DISTANCE, ps_data); + input_sync(the_data.input_dev); + + wake_lock_timeout(&proximity_wake_lock, 2*HZ); + } + + return ret; +} + +static int capella_cm3602_enable(struct capella_cm3602_data *data) +{ + int rc; + pr_info("%s\n", __func__); + if (data->enabled) { + pr_info("%s: already enabled\n", __func__); + return 0; + } + + /* dummy report */ + input_report_abs(data->input_dev, ABS_DISTANCE, -1); + input_sync(data->input_dev); + + rc = data->pdata->power(PS_PWR_ON, 1); + if (rc < 0) + return -EIO; + + rc = gpio_direction_output(data->pdata->p_en, 0); + if (rc < 0) { + pr_err("%s: set psesnor enable failed!!", + __func__); + return -EIO; + } + msleep(220); + rc = psensor_intr_enable(1); + if (rc < 0) + return -EIO; + + data->enabled = 1; + report_psensor_data(); + + return rc; +} + +static int capella_cm3602_disable(struct capella_cm3602_data *data) +{ + int rc = -EIO; + pr_info("%s\n", __func__); + if (!data->enabled) { + pr_info("%s: already disabled\n", __func__); + return 0; + } + + rc = psensor_intr_enable(0); + if (rc < 0) + return -EIO; + + rc = gpio_direction_output(data->pdata->p_en, 1); + if (rc < 0) { + pr_err("%s: set GPIO failed!!", __func__); + return -EIO; + } + + rc = data->pdata->power(PS_PWR_ON, 0); + if (rc < 0) + return -EIO; + + data->enabled = 0; + return rc; +} + +static ssize_t capella_cm3602_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + + ret = sprintf(buf, "proximity enabled = %d\n", the_data.enabled); + + return ret; +} + +static ssize_t capella_cm3602_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count + ) +{ + ssize_t val; + + val = -1; + sscanf(buf, "%u", &val); + if (val < 0 || val > 1) + return -EINVAL; + + /* Enable capella_cm3602*/ + if (val == 1) + capella_cm3602_enable(&the_data); + + /* Disable capella_cm3602*/ + if (val == 0) + capella_cm3602_disable(&the_data); + + return count; +} + +static DEVICE_ATTR(proximity, 0644, capella_cm3602_show, capella_cm3602_store); + +static int capella_cm3602_open(struct inode *inode, struct file *file) +{ + pr_info("%s\n", __func__); + if (misc_opened) + return -EBUSY; + misc_opened = 1; + return 0; +} + +static int capella_cm3602_release(struct inode *inode, struct file *file) +{ + pr_info("%s\n", __func__); + misc_opened = 0; + return capella_cm3602_disable(&the_data); +} + +static long capella_cm3602_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int val; + pr_info("%s cmd %d\n", __func__, _IOC_NR(cmd)); + switch (cmd) { + case CAPELLA_CM3602_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) + return -EFAULT; + if (val) + return capella_cm3602_enable(&the_data); + else + return capella_cm3602_disable(&the_data); + break; + case CAPELLA_CM3602_IOCTL_GET_ENABLED: + return put_user(the_data.enabled, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + return -EINVAL; + } +} +static void p_sensor_do_work(struct work_struct *w) +{ + report_psensor_data(); +} + +static irqreturn_t p_sensor_irq_handler(int irq, void *data) +{ + struct capella_cm3602_data *ip = data; + queue_work(ip->p_sensor_wq, &p_sensor_work); + + return IRQ_HANDLED; +} + +static struct file_operations capella_cm3602_fops = { + .owner = THIS_MODULE, + .open = capella_cm3602_open, + .release = capella_cm3602_release, + .unlocked_ioctl = capella_cm3602_ioctl +}; + +static struct miscdevice capella_cm3602_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "cm3602", + .fops = &capella_cm3602_fops +}; + +static int capella_cm3602_probe(struct platform_device *pdev) +{ + int rc = -1; + struct input_dev *input_dev; + struct capella_cm3602_data *ip; + struct capella_cm3602_platform_data *pdata; + + struct class *proximity_attr_class; + struct device *proximity_attr_dev; + + pr_info("%s: probe\n", __func__); + + pdata = dev_get_platdata(&pdev->dev); + + ip = &the_data; + platform_set_drvdata(pdev, ip); + + input_dev = input_allocate_device(); + if (!input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + rc = -ENOMEM; + goto done; + } + ip->input_dev = input_dev; + ip->pdata = pdata; + input_set_drvdata(input_dev, ip); + + input_dev->name = "proximity"; + + set_bit(EV_ABS, input_dev->evbit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, 1, 0, 0); + + rc = input_register_device(input_dev); + if (rc < 0) { + pr_err("%s: could not register input device\n", __func__); + goto err_free_input_device; + } + + rc = misc_register(&capella_cm3602_misc); + if (rc < 0) { + pr_err("%s: could not register misc device\n", __func__); + goto err_unregister_input_device; + } + + wake_lock_init(&proximity_wake_lock, WAKE_LOCK_SUSPEND, "proximity"); + + proximity_attr_class = class_create(THIS_MODULE, "sensors"); + if (IS_ERR(proximity_attr_class)) { + pr_err("%s: class_create failed\n", __func__); + rc = PTR_ERR(proximity_attr_class); + proximity_attr_class = NULL; + goto err_create_class; + } + + proximity_attr_dev = device_create(proximity_attr_class, + NULL, 0, "%s", "proximity_sensor"); + if (unlikely(IS_ERR(proximity_attr_dev))) { + pr_err("%s: device create failed\n", __func__); + rc = PTR_ERR(proximity_attr_dev); + proximity_attr_dev = NULL; + goto err_create_proximity_attr_device; + } + + rc = device_create_file(proximity_attr_dev, &dev_attr_proximity); + if (rc) { + pr_err("%s: device_create_file failed\n", __func__); + goto err_create_proximity_device_file; + } + + ip->p_sensor_wq = create_workqueue("p-sensor_microp_wq"); + if (ip->p_sensor_wq == NULL) { + pr_err("%s: create_workqueue failed\n", __func__); + goto err_create_workqueue; + } + + rc = gpio_request(pdata->p_en, "gpio_proximity_en"); + if (rc < 0) { + pr_err("%s: gpio %d request failed (%d)\n", + __func__, pdata->p_en, rc); + goto err_request_proximity_en; + } + + rc = request_irq(pdata->p_out, p_sensor_irq_handler, + IRQF_TRIGGER_NONE, "p-sensor_microp", ip); + if (rc < 0) { + pr_err("%s: request_irq(%d) failed for (%d)\n", + __func__, pdata->p_out, rc); + goto err_request_proximity_irq; + } + + + goto done; + +err_request_proximity_irq: + gpio_free(pdata->p_en); +err_request_proximity_en: + destroy_workqueue(ip->p_sensor_wq); +err_create_workqueue: + device_remove_file(proximity_attr_dev, &dev_attr_proximity); +err_create_proximity_device_file: + device_unregister(proximity_attr_dev); +err_create_proximity_attr_device: + class_destroy(proximity_attr_class); +err_create_class: + misc_deregister(&capella_cm3602_misc); +err_unregister_input_device: + input_unregister_device(input_dev); +err_free_input_device: + input_free_device(input_dev); +done: + return rc; +} + +static struct microp_ops ops = { + .init_microp_func = incrediblec_microp_function_init, +}; + +void __init incrediblec_microp_init(void) +{ + microp_register_ops(&ops); +} + +static struct platform_driver capella_cm3602_driver = { + .probe = capella_cm3602_probe, + .driver = { + .name = "incrediblec_proximity", + .owner = THIS_MODULE + }, +}; + +static int __init incrediblec_capella_cm3602_init(void) +{ + if (!machine_is_incrediblec()) + return 0; + + return platform_driver_register(&capella_cm3602_driver); +} + +device_initcall(incrediblec_capella_cm3602_init); +#endif diff --git a/arch/arm/mach-msm/board-incrediblec-mmc.c b/arch/arm/mach-msm/board-incrediblec-mmc.c new file mode 100644 index 0000000000000..42971046a8388 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-mmc.c @@ -0,0 +1,314 @@ +/* linux/arch/arm/mach-msm/board-incrediblec-mmc.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "board-incrediblec.h" +#include "devices.h" +#include "proc_comm.h" + +#define DEBUG_SDSLOT_VDD 1 + +static bool opt_disable_sdcard; +static int __init incrediblec_disablesdcard_setup(char *str) +{ + opt_disable_sdcard = (bool)simple_strtol(str, NULL, 0); + return 1; +} + +__setup("board_incrediblec.disable_sdcard=", incrediblec_disablesdcard_setup); + +static uint32_t sdcard_on_gpio_table[] = { + PCOM_GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT0 */ +}; + +static uint32_t sdcard_off_gpio_table[] = { + PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static uint32_t movinand_on_gpio_table[] = { + PCOM_GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT1 */ + PCOM_GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT0 */ + PCOM_GPIO_CFG(158, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT4 */ + PCOM_GPIO_CFG(159, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT5 */ + PCOM_GPIO_CFG(160, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT6 */ + PCOM_GPIO_CFG(161, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT7 */ +}; + +static struct vreg *sdslot_vreg; +static uint32_t sdslot_vdd = 0xffffffff; +static uint32_t sdslot_vreg_enabled; + +static struct { + int mask; + int level; +} mmc_vdd_table[] = { + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2900 }, +}; + +static uint32_t incrediblec_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + int i; + int ret; + + if (vdd == sdslot_vdd) + return 0; + + sdslot_vdd = vdd; + + if (vdd == 0) { +#if DEBUG_SDSLOT_VDD + printk(KERN_INFO "%s: Disabling SD slot power\n", __func__); +#endif + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + vreg_disable(sdslot_vreg); + sdslot_vreg_enabled = 0; + return 0; + } + + if (!sdslot_vreg_enabled) { + mdelay(5); + ret = vreg_enable(sdslot_vreg); + if (ret) + pr_err("%s: Error enabling vreg (%d)\n", __func__, ret); + udelay(500); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + sdslot_vreg_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask != (1 << vdd)) + continue; + ret = vreg_set_level(sdslot_vreg, mmc_vdd_table[i].level); + if (ret) + pr_err("%s: Error setting level (%d)\n", __func__, ret); +#if DEBUG_SDSLOT_VDD + printk(KERN_INFO "%s: Setting level to %u (%s)\n", + __func__, mmc_vdd_table[i].level, + ret?"Failed":"Success"); +#endif + return 0; + } + + pr_err("%s: Invalid VDD (%d) specified\n", __func__, vdd); + return 0; +} + +static unsigned int incrediblec_sdslot_status(struct device *dev) +{ + return !gpio_get_value(INCREDIBLEC_GPIO_SDMC_CD_N); +} + +#define INCREDIBLEC_MMC_VDD (MMC_VDD_28_29 | MMC_VDD_29_30) + +static unsigned int incrediblec_sdslot_type = MMC_TYPE_SD; + +static struct msm_mmc_platform_data incrediblec_sdslot_data = { + .ocr_mask = INCREDIBLEC_MMC_VDD, + .status = incrediblec_sdslot_status, + .translate_vdd = incrediblec_sdslot_switchvdd, + .slot_type = &incrediblec_sdslot_type, +}; + +static unsigned int incrediblec_mmc_type = MMC_TYPE_MMC; + +static struct msm_mmc_platform_data incrediblec_movinand_data = { + .ocr_mask = INCREDIBLEC_MMC_VDD, + .slot_type = &incrediblec_mmc_type, +}; + +/* ---- WIFI ---- */ + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +/* BCM4329 returns wrong sdio_vsn(1) when we read cccr, + * we use predefined value (sdio_vsn=2) here to initial sdio driver well + */ +static struct embedded_sdio_data incrediblec_wifi_emb_data = { + .cccr = { + .sdio_vsn = 2, + .multi_block = 1, + .low_speed = 0, + .wide_bus = 0, + .high_power = 1, + .high_speed = 1, + }, + .cis = { + .vendor = 0x02d0, + .device = 0x4329, + }, +}; + +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int +incrediblec_wifi_status_register(void (*callback)(int card_present, void *dev_id), + void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static int incrediblec_wifi_cd; /* WiFi virtual 'card detect' status */ + +static unsigned int incrediblec_wifi_status(struct device *dev) +{ + return incrediblec_wifi_cd; +} + +static struct msm_mmc_platform_data incrediblec_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .built_in = 1, + .status = incrediblec_wifi_status, + .register_status_notify = incrediblec_wifi_status_register, + .embedded_sdio = &incrediblec_wifi_emb_data, +}; + +int incrediblec_wifi_set_carddetect(int val) +{ + printk(KERN_INFO "%s: %d\n", __func__, val); + incrediblec_wifi_cd = val; + if (wifi_status_cb) + wifi_status_cb(val, wifi_status_cb_devid); + else + printk(KERN_WARNING "%s: Nobody to notify\n", __func__); + return 0; +} +EXPORT_SYMBOL(incrediblec_wifi_set_carddetect); + +static int incrediblec_wifi_power_state; + +int incrediblec_wifi_power(int on) +{ + printk(KERN_INFO "%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + mdelay(50); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + } + + mdelay(100); + gpio_set_value(INCREDIBLEC_GPIO_WIFI_SHUTDOWN_N, on); /* WIFI_SHUTDOWN */ + mdelay(200); + + incrediblec_wifi_power_state = on; + return 0; +} + +static int incrediblec_wifi_reset_state; + +int incrediblec_wifi_reset(int on) +{ + printk(KERN_INFO "%s: do nothing\n", __func__); + incrediblec_wifi_reset_state = on; + return 0; +} + + +int __init incrediblec_init_mmc(unsigned int sys_rev) +{ + uint32_t id; + + printk(KERN_INFO "%s()+\n", __func__); + + /* initial WIFI_SHUTDOWN# */ + id = PCOM_GPIO_CFG(INCREDIBLEC_GPIO_WIFI_SHUTDOWN_N, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + gpio_set_value(INCREDIBLEC_GPIO_WIFI_SHUTDOWN_N, 0); + + msm_add_sdcc(1, &incrediblec_wifi_data, 0, 0); + + if (opt_disable_sdcard) { + pr_info("%s: sdcard disabled on cmdline\n", __func__); + goto done; + } + + sdslot_vreg_enabled = 0; + + sdslot_vreg = vreg_get(0, "gp6"); + if (IS_ERR(sdslot_vreg)) + return PTR_ERR(sdslot_vreg); + + set_irq_wake(MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_SDMC_CD_N), 1); + + msm_add_sdcc(2, &incrediblec_sdslot_data, + MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_SDMC_CD_N), + IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE); + +done: + config_gpio_table(movinand_on_gpio_table, + ARRAY_SIZE(movinand_on_gpio_table)); + msm_add_sdcc(3, &incrediblec_movinand_data, 0, 0); /* SDC3: MoviNAND */ + + printk(KERN_INFO "%s()-\n", __func__); + return 0; +} diff --git a/arch/arm/mach-msm/board-incrediblec-panel.c b/arch/arm/mach-msm/board-incrediblec-panel.c new file mode 100644 index 0000000000000..d3a81eba81a8b --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-panel.c @@ -0,0 +1,1255 @@ +/* arch/arm/mach-msm/board-incrediblec-panel.c + * + * Copyright (c) 2009 Google Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include "proc_comm.h" + +#include "board-incrediblec.h" +#include "devices.h" + +#define SPI_CONFIG (0x00000000) +#define SPI_IO_CONTROL (0x00000004) +#define SPI_OPERATIONAL (0x00000030) +#define SPI_ERROR_FLAGS_EN (0x00000038) +#define SPI_ERROR_FLAGS (0x00000038) +#define SPI_OUTPUT_FIFO (0x00000100) + +static void __iomem *spi_base; +static struct clk *spi_clk ; +static struct vreg *vreg_lcm_rftx_2v6; +static struct vreg *vreg_lcm_aux_2v6; + +#define SAMSUNG_PANEL 0 +/*Bitwise mask for SONY PANEL ONLY*/ +#define SONY_PANEL 0x1 /*Set bit 0 as 1 when it is SONY PANEL*/ +#define SONY_PWM_SPI 0x2 /*Set bit 1 as 1 as PWM_SPI mode, otherwise it is PWM_MICROP mode*/ +#define SONY_GAMMA 0x4 /*Set bit 2 as 1 when panel contains GAMMA table in its NVM*/ +#define SONY_RGB666 0x8 /*Set bit 3 as 1 when panel is 18 bit, otherwise it is 16 bit*/ + +extern int panel_type; + +static int is_sony_spi(void) +{ + return (panel_type & SONY_PWM_SPI ? 1 : 0); +} + +static int is_sony_with_gamma(void) +{ + return (panel_type & SONY_GAMMA ? 1 : 0); +} + +static int is_sony_RGB666(void) +{ + return (panel_type & SONY_RGB666 ? 1 : 0); +} + +static int qspi_send(uint32_t id, uint8_t data) +{ + uint32_t err; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + if ((err = readl(spi_base + SPI_ERROR_FLAGS))) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel((0x7000 | (id << 9) | data) << 16, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int qspi_send_9bit(uint32_t id, uint8_t data) +{ + uint32_t err; + + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + err = readl(spi_base + SPI_ERROR_FLAGS); + if (err) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel(((id << 8) | data) << 23, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int lcm_writeb(uint8_t reg, uint8_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val); + return 0; +} + +static int lcm_writew(uint8_t reg, uint16_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val >> 8); + qspi_send(0x1, val & 0xff); + return 0; +} + +static struct resource resources_msm_fb[] = { + { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct lcm_tbl { + uint8_t reg; + uint8_t val; +}; + +static struct lcm_tbl samsung_oled_rgb565_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x02 }, + { 0x39, 0x44 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, + { 0x1D, 0xA0 }, +}; + +static struct lcm_tbl samsung_oled_rgb666_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x01 }, + { 0x16, 0x01 }, + { 0x39, 0x44 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, + { 0x1D, 0xA0 }, +}; + +static struct lcm_tbl *init_tablep = samsung_oled_rgb565_init_table; +static size_t init_table_sz = ARRAY_SIZE(samsung_oled_rgb565_init_table); + +#define OLED_GAMMA_TABLE_SIZE (7 * 3) +static struct lcm_tbl samsung_oled_gamma_table[][OLED_GAMMA_TABLE_SIZE] = { + /* level 10 */ + { + {0x40, 0x0}, + {0x41, 0x3}, + {0x42, 0x40}, + {0x43, 0x39}, + {0x44, 0x32}, + {0x45, 0x2e}, + {0x46, 0xc }, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x0 }, + {0x53, 0x00}, + {0x54, 0x26}, + {0x55, 0x2d}, + {0x56, 0xb }, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x40}, + {0x63, 0x38}, + {0x64, 0x31}, + {0x65, 0x2d}, + {0x66, 0x12}, + }, + + /*level 40*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x3e}, + {0x43, 0x2e}, + {0x44, 0x2d}, + {0x45, 0x28}, + {0x46, 0x21}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x0 }, + {0x53, 0x21}, + {0x54, 0x2a}, + {0x55, 0x28}, + {0x56, 0x20}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x3e}, + {0x63, 0x2d}, + {0x64, 0x2b}, + {0x65, 0x26}, + {0x66, 0x2d}, + }, + + /*level 70*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x35}, + {0x43, 0x2c}, + {0x44, 0x2b}, + {0x45, 0x26}, + {0x46, 0x29}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x0 }, + {0x53, 0x25}, + {0x54, 0x29}, + {0x55, 0x26}, + {0x56, 0x28}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x34}, + {0x63, 0x2b}, + {0x64, 0x2a}, + {0x65, 0x23}, + {0x66, 0x37}, + }, + + /*level 100*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x30}, + {0x43, 0x2a}, + {0x44, 0x2b}, + {0x45, 0x24}, + {0x46, 0x2f}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x0 }, + {0x53, 0x25}, + {0x54, 0x29}, + {0x55, 0x24}, + {0x56, 0x2e}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x2f}, + {0x63, 0x29}, + {0x64, 0x29}, + {0x65, 0x21}, + {0x66, 0x3f}, + }, + + /*level 130*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x2e}, + {0x43, 0x29}, + {0x44, 0x2a}, + {0x45, 0x23}, + {0x46, 0x34}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0xa }, + {0x53, 0x25}, + {0x54, 0x28}, + {0x55, 0x23}, + {0x56, 0x33}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x2d}, + {0x63, 0x28}, + {0x64, 0x27}, + {0x65, 0x20}, + {0x66, 0x46}, + }, + + /*level 160*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x2b}, + {0x43, 0x29}, + {0x44, 0x28}, + {0x45, 0x23}, + {0x46, 0x38}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0xb }, + {0x53, 0x25}, + {0x54, 0x27}, + {0x55, 0x23}, + {0x56, 0x37}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x29}, + {0x63, 0x28}, + {0x64, 0x25}, + {0x65, 0x20}, + {0x66, 0x4b}, + }, + + /*level 190*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x29}, + {0x43, 0x29}, + {0x44, 0x27}, + {0x45, 0x22}, + {0x46, 0x3c}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x10}, + {0x53, 0x26}, + {0x54, 0x26}, + {0x55, 0x22}, + {0x56, 0x3b}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x28}, + {0x63, 0x28}, + {0x64, 0x24}, + {0x65, 0x1f}, + {0x66, 0x50}, + }, + + /*level 220*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x28}, + {0x43, 0x28}, + {0x44, 0x28}, + {0x45, 0x20}, + {0x46, 0x40}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x11}, + {0x53, 0x25}, + {0x54, 0x27}, + {0x55, 0x20}, + {0x56, 0x3f}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x27}, + {0x63, 0x26}, + {0x64, 0x26}, + {0x65, 0x1c}, + {0x66, 0x56}, + }, + + /*level 250*/ + { + {0x40, 0x0 }, + {0x41, 0x3f}, + {0x42, 0x2a}, + {0x43, 0x27}, + {0x44, 0x27}, + {0x45, 0x1f}, + {0x46, 0x44}, + {0x50, 0x0 }, + {0x51, 0x0 }, + {0x52, 0x17}, + {0x53, 0x24}, + {0x54, 0x26}, + {0x55, 0x1f}, + {0x56, 0x43}, + {0x60, 0x0 }, + {0x61, 0x3f}, + {0x62, 0x2a}, + {0x63, 0x25}, + {0x64, 0x24}, + {0x65, 0x1b}, + {0x66, 0x5c}, + }, +}; + +#define SAMSUNG_OLED_NUM_LEVELS ARRAY_SIZE(samsung_oled_gamma_table) + +#define SAMSUNG_OLED_MIN_VAL 10 +#define SAMSUNG_OLED_MAX_VAL 250 +#define SAMSUNG_OLED_DEFAULT_VAL (SAMSUNG_OLED_MIN_VAL + \ + (SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / 2) + +#define SAMSUNG_OLED_LEVEL_STEP ((SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / \ + (SAMSUNG_OLED_NUM_LEVELS - 1)) + +#define LCM_GPIO_CFG(gpio, func) \ +PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA) +static uint32_t samsung_oled_on_gpio_table[] = { + LCM_GPIO_CFG(INCREDIBLEC_LCD_R0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_PCLK, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_VSYNC, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_HSYNC, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_DE, 1), +}; + +static uint32_t samsung_oled_off_gpio_table[] = { + LCM_GPIO_CFG(INCREDIBLEC_LCD_R0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_PCLK, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_VSYNC, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_HSYNC, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_DE, 0), +}; +#undef LCM_GPIO_CFG + +#define SONY_TFT_DEF_USER_VAL 102 +#define SONY_TFT_MIN_USER_VAL 30 +#define SONY_TFT_MAX_USER_VAL 255 +#define SONY_TFT_DEF_PANEL_VAL 120 +#define SONY_TFT_MIN_PANEL_VAL 8 +#define SONY_TFT_MAX_PANEL_VAL 255 +#define SONY_TFT_DEF_PANEL_UP_VAL 132 +#define SONY_TFT_MIN_PANEL_UP_VAL 9 +#define SONY_TFT_MAX_PANEL_UP_VAL 255 + +static DEFINE_MUTEX(panel_lock); +static struct work_struct brightness_delayed_work; +static DEFINE_SPINLOCK(brightness_lock); +static uint8_t new_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t last_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t table_sel_vals[] = { 0x43, 0x34 }; +static int table_sel_idx = 0; +static uint8_t tft_panel_on; + +static void gamma_table_bank_select(void) +{ + lcm_writeb(0x39, table_sel_vals[table_sel_idx]); + table_sel_idx ^= 1; +} + +static void samsung_oled_set_gamma_val(int val) +{ + int i; + int level; + int frac; + + val = clamp(val, SAMSUNG_OLED_MIN_VAL, SAMSUNG_OLED_MAX_VAL); + val = (val / 2) * 2; + + level = (val - SAMSUNG_OLED_MIN_VAL) / SAMSUNG_OLED_LEVEL_STEP; + frac = (val - SAMSUNG_OLED_MIN_VAL) % SAMSUNG_OLED_LEVEL_STEP; + + clk_enable(spi_clk); + + for (i = 0; i < OLED_GAMMA_TABLE_SIZE; ++i) { + unsigned int v1; + unsigned int v2 = 0; + u8 v; + if (frac == 0) { + v = samsung_oled_gamma_table[level][i].val; + } else { + + v1 = samsung_oled_gamma_table[level][i].val; + v2 = samsung_oled_gamma_table[level+1][i].val; + v = (v1 * (SAMSUNG_OLED_LEVEL_STEP - frac) + + v2 * frac) / SAMSUNG_OLED_LEVEL_STEP; + } + lcm_writeb(samsung_oled_gamma_table[level][i].reg, v); + } + + gamma_table_bank_select(); + clk_disable(spi_clk); + last_val = val; +} + +static void samsung_oled_panel_config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static int samsung_oled_panel_gpio_switch (int on) +{ + samsung_oled_panel_config_gpio_table ( + !!on ? samsung_oled_on_gpio_table : samsung_oled_off_gpio_table, + ARRAY_SIZE(samsung_oled_on_gpio_table)); + + return 0; +} + +static int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + /* Set the gamma write target to 4, leave the current gamma set at 2 */ + lcm_writeb(0x39, 0x24); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + int i; + + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + samsung_oled_panel_gpio_switch(1); + + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 1); + udelay(50); + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 0); + udelay(20); + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 1); + msleep(20); + + clk_enable(spi_clk); + + for (i = 0; i < init_table_sz; i++) + lcm_writeb(init_tablep[i].reg, init_tablep[i].val); + + lcm_writew(0xef, 0xd0e8); + lcm_writeb(0x1d, 0xa0); + table_sel_idx = 0; + gamma_table_bank_select(); + samsung_oled_set_gamma_val(last_val); + msleep(250); + lcm_writeb(0x14, 0x03); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + lcm_writeb(0x14, 0x0); + mdelay(1); + lcm_writeb(0x1d, 0xa1); + clk_disable(spi_clk); + msleep(200); + + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 0); + samsung_oled_panel_gpio_switch(0); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +#define LCM_GPIO_CFG(gpio, func) \ + PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA) + +static uint32_t sony_tft_display_on_gpio_table[] = { + LCM_GPIO_CFG(INCREDIBLEC_LCD_R0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B0, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B1, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B2, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B3, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B4, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B5, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_PCLK, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_VSYNC, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_HSYNC, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_DE, 1), + LCM_GPIO_CFG(INCREDIBLEC_SPI_CLK, 1), + LCM_GPIO_CFG(INCREDIBLEC_SPI_CS, 1), + LCM_GPIO_CFG(INCREDIBLEC_LCD_ID0, 1), + LCM_GPIO_CFG(INCREDIBLEC_SPI_DO, 1), +}; + +static uint32_t sony_tft_display_off_gpio_table[] = { + LCM_GPIO_CFG(INCREDIBLEC_LCD_R0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_R5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_G5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B0, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B1, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B2, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B3, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B4, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_B5, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_PCLK, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_VSYNC, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_HSYNC, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_DE, 0), + LCM_GPIO_CFG(INCREDIBLEC_SPI_CLK, 0), + LCM_GPIO_CFG(INCREDIBLEC_SPI_CS, 0), + LCM_GPIO_CFG(INCREDIBLEC_LCD_ID0, 0), + LCM_GPIO_CFG(INCREDIBLEC_SPI_DO, 0), + +}; + +#undef LCM_GPIO_CFG + +#define SONY_TFT_DEF_PANEL_DELTA \ + (SONY_TFT_DEF_PANEL_VAL - SONY_TFT_MIN_PANEL_VAL) +#define SONY_TFT_DEF_USER_DELTA \ + (SONY_TFT_DEF_USER_VAL - SONY_TFT_MIN_USER_VAL) + +static void sony_tft_set_pwm_val(int val) +{ + uint8_t data[4] = {0,0,0,0}; + unsigned int min_pwm, def_pwm, max_pwm; + + pr_info("%s: %d\n", __func__, val); + + last_val = val; + + if (!tft_panel_on) + return; + + if(!is_sony_spi()) { + min_pwm = SONY_TFT_MIN_PANEL_UP_VAL; + def_pwm = SONY_TFT_DEF_PANEL_UP_VAL; + max_pwm = SONY_TFT_MAX_PANEL_UP_VAL; + } else { + min_pwm = SONY_TFT_MIN_PANEL_VAL; + def_pwm = SONY_TFT_DEF_PANEL_VAL; + max_pwm = SONY_TFT_MAX_PANEL_VAL; + } + + if (val <= SONY_TFT_DEF_USER_VAL) { + if (val <= SONY_TFT_MIN_USER_VAL) + val = min_pwm; + else + val = (def_pwm - min_pwm) * + (val - SONY_TFT_MIN_USER_VAL) / + SONY_TFT_DEF_USER_DELTA + + min_pwm; + } else { + val = (max_pwm - def_pwm) * + (val - SONY_TFT_DEF_USER_VAL) / + (SONY_TFT_MAX_USER_VAL - SONY_TFT_DEF_USER_VAL) + + def_pwm; + } + + if (!is_sony_spi()) { + data[0] = 5; + data[1] = val; + data[3] = 1; + microp_i2c_write(0x25, data, 4); + } else { + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x51); + qspi_send_9bit(0x1, val); + qspi_send_9bit(0x0, 0x53); + qspi_send_9bit(0x1, 0x24); + clk_disable(spi_clk); + } +} + +#undef SONY_TFT_DEF_PANEL_DELTA +#undef SONY_TFT_DEF_USER_DELTA + +static void sony_tft_panel_config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static int sony_tft_panel_power(int on) +{ + unsigned id, on_off; + + if (on) { + on_off = 0; + + vreg_enable(vreg_lcm_aux_2v6); + vreg_enable(vreg_lcm_rftx_2v6); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 1); + mdelay(10); + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 0); + udelay(500); + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 1); + mdelay(10); + sony_tft_panel_config_gpio_table( + sony_tft_display_on_gpio_table, + ARRAY_SIZE(sony_tft_display_on_gpio_table)); + } else { + on_off = 1; + + gpio_set_value(INCREDIBLEC_LCD_RST_ID1, 0); + + mdelay(120); + + vreg_disable(vreg_lcm_rftx_2v6); + vreg_disable(vreg_lcm_aux_2v6); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + sony_tft_panel_config_gpio_table( + sony_tft_display_off_gpio_table, + ARRAY_SIZE(sony_tft_display_off_gpio_table)); + } + return 0; +} + +static int sony_tft_panel_init(struct msm_lcdc_panel_ops *ops) +{ + return 0; +} + +static void sony_tft_panel_without_gamma_init(void) +{ + pr_info("%s: init gamma setting", __func__); + + qspi_send_9bit(0x0, 0xF1); + qspi_send_9bit(0x1, 0x5A); + qspi_send_9bit(0x1, 0x5A); + // FAh RGB + qspi_send_9bit(0x0, 0xFA); + // Red + qspi_send_9bit(0x1, 0x32); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x29); + qspi_send_9bit(0x1, 0x3E); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3D); + qspi_send_9bit(0x1, 0x2C); + qspi_send_9bit(0x1, 0x27); + qspi_send_9bit(0x1, 0x3D); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x3A); + qspi_send_9bit(0x1, 0x34); + qspi_send_9bit(0x1, 0x36); + // Green + qspi_send_9bit(0x1, 0x1A); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x40); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x2B); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x39); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x20); + qspi_send_9bit(0x1, 0x22); + // Blue + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x2F); + qspi_send_9bit(0x1, 0x3E); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x1E); + qspi_send_9bit(0x1, 0x18); + qspi_send_9bit(0x1, 0x1C); + qspi_send_9bit(0x1, 0x0C); + qspi_send_9bit(0x1, 0x0E); + // FBh RGB + qspi_send_9bit(0x0, 0xFB); + // Red + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x0D); + qspi_send_9bit(0x1, 0x09); + qspi_send_9bit(0x1, 0x0C); + qspi_send_9bit(0x1, 0x26); + qspi_send_9bit(0x1, 0x2E); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x22); + qspi_send_9bit(0x1, 0x19); + qspi_send_9bit(0x1, 0x33); + qspi_send_9bit(0x1, 0x22); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x21); + qspi_send_9bit(0x1, 0x17); + qspi_send_9bit(0x1, 0x00); + // Green + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x1D); + qspi_send_9bit(0x1, 0x1F); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x3C); + qspi_send_9bit(0x1, 0x3A); + qspi_send_9bit(0x1, 0x26); + qspi_send_9bit(0x1, 0x1B); + qspi_send_9bit(0x1, 0x34); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x1F); + qspi_send_9bit(0x1, 0x12); + qspi_send_9bit(0x1, 0x00); + // Blue + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x3F); + qspi_send_9bit(0x1, 0x31); + qspi_send_9bit(0x1, 0x33); + qspi_send_9bit(0x1, 0x43); + qspi_send_9bit(0x1, 0x48); + qspi_send_9bit(0x1, 0x41); + qspi_send_9bit(0x1, 0x2A); + qspi_send_9bit(0x1, 0x1D); + qspi_send_9bit(0x1, 0x35); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x23); + qspi_send_9bit(0x1, 0x21); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x00); + // F3h Power control + qspi_send_9bit(0x0, 0xF3); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x25); + qspi_send_9bit(0x1, 0x01); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x24); + qspi_send_9bit(0x1, 0x2D); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x10); + qspi_send_9bit(0x1, 0x0A); + qspi_send_9bit(0x1, 0x37); + // F4h VCOM Control + qspi_send_9bit(0x0, 0xF4); + qspi_send_9bit(0x1, 0x88); + qspi_send_9bit(0x1, 0x20); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0xAF); + qspi_send_9bit(0x1, 0x64); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0xAA); + qspi_send_9bit(0x1, 0x64); + qspi_send_9bit(0x1, 0x00); + qspi_send_9bit(0x1, 0x00); + //Change to level 1 + qspi_send_9bit(0x0, 0xF0); + qspi_send_9bit(0x1, 0x5A); + qspi_send_9bit(0x1, 0x5A); +} + +static int sony_tft_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + if (tft_panel_on) { + pr_info("%s: -() already unblanked\n", __func__); + goto done; + } + + sony_tft_panel_power(1); + msleep(45); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x11); + msleep(5); + qspi_send_9bit(0x0, 0x3a); + if (is_sony_RGB666()) + qspi_send_9bit(0x1, 0x06); + else + qspi_send_9bit(0x1, 0x05); + msleep(100); + qspi_send_9bit(0x0, 0x29); + msleep(20); + + //init gamma setting + if(!is_sony_with_gamma()) + sony_tft_panel_without_gamma_init(); + + /* unlock register page for pwm setting */ + if (is_sony_spi()) { + qspi_send_9bit(0x0, 0xf0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xf1); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xd0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + + qspi_send_9bit(0x0, 0xc2); + qspi_send_9bit(0x1, 0x53); + qspi_send_9bit(0x1, 0x12); + } + clk_disable(spi_clk); + msleep(100); + tft_panel_on = 1; + sony_tft_set_pwm_val(last_val); + + pr_info("%s: -()\n", __func__); +done: + mutex_unlock(&panel_lock); + return 0; +} + +static int sony_tft_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + uint8_t data[4] = {0, 0, 0, 0}; + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x28); + qspi_send_9bit(0x0, 0x10); + clk_disable(spi_clk); + + msleep(40); + sony_tft_panel_power(0); + tft_panel_on = 0; + + mutex_unlock(&panel_lock); + + if (!is_sony_spi()) { + data[0] = 5; + data[1] = 0; + data[3] = 1; + microp_i2c_write(0x25, data, 4); + } + + pr_info("%s: -()\n", __func__); + return 0; +} + +static struct msm_lcdc_panel_ops incrediblec_lcdc_amoled_panel_ops = { + .init = samsung_oled_panel_init, + .blank = samsung_oled_panel_blank, + .unblank = samsung_oled_panel_unblank, +}; + +static struct msm_lcdc_panel_ops incrediblec_lcdc_tft_panel_ops = { + .init = sony_tft_panel_init, + .blank = sony_tft_panel_blank, + .unblank = sony_tft_panel_unblank, +}; + + +static struct msm_lcdc_timing incrediblec_lcdc_amoled_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 4, + .hsync_back_porch = 8, + .hsync_front_porch = 8, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 8, + .vsync_front_porch = 8, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 1, +}; + +static struct msm_lcdc_timing incrediblec_lcdc_tft_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 2, + .hsync_back_porch = 20, + .hsync_front_porch = 20, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 6, + .vsync_front_porch = 4, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, +}; + +static struct msm_fb_data incrediblec_lcdc_fb_data = { + .xres = 480, + .yres = 800, + .width = 48, + .height = 80, + .output_format = MSM_MDP_OUT_IF_FMT_RGB565, +}; + +static struct msm_lcdc_platform_data incrediblec_lcdc_amoled_platform_data = { + .panel_ops = &incrediblec_lcdc_amoled_panel_ops, + .timing = &incrediblec_lcdc_amoled_timing, + .fb_id = 0, + .fb_data = &incrediblec_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct msm_lcdc_platform_data incrediblec_lcdc_tft_platform_data = { + .panel_ops = &incrediblec_lcdc_tft_panel_ops, + .timing = &incrediblec_lcdc_tft_timing, + .fb_id = 0, + .fb_data = &incrediblec_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct platform_device incrediblec_lcdc_amoled_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &incrediblec_lcdc_amoled_platform_data, + }, +}; + +static struct platform_device incrediblec_lcdc_tft_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &incrediblec_lcdc_tft_platform_data, + }, +}; + +static int incrediblec_init_spi_hack(void) +{ + int ret; + + spi_base = ioremap(MSM_SPI_PHYS, MSM_SPI_SIZE); + if (!spi_base) + return -1; + + spi_clk = clk_get(&msm_device_spi.dev, "spi_clk"); + if (IS_ERR(spi_clk)) { + pr_err("%s: unable to get spi_clk\n", __func__); + ret = PTR_ERR(spi_clk); + goto err_clk_get; + } + + clk_enable(spi_clk); + + printk("spi: SPI_CONFIG=%x\n", readl(spi_base + SPI_CONFIG)); + printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base + SPI_IO_CONTROL)); + printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base + SPI_OPERATIONAL)); + printk("spi: SPI_ERROR_FLAGS_EN=%x\n", + readl(spi_base + SPI_ERROR_FLAGS_EN)); + printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base + SPI_ERROR_FLAGS)); + printk("-%s()\n", __FUNCTION__); + clk_disable(spi_clk); + + return 0; + +err_clk_get: + iounmap(spi_base); + return ret; +} + +static void incrediblec_brightness_set(struct led_classdev *led_cdev, + enum led_brightness val) +{ + unsigned long flags; + led_cdev->brightness = val; + + spin_lock_irqsave(&brightness_lock, flags); + new_val = val; + spin_unlock_irqrestore(&brightness_lock, flags); + + schedule_work(&brightness_delayed_work); +} + +static void incrediblec_brightness_amoled_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + samsung_oled_set_gamma_val(val); + mutex_unlock(&panel_lock); +} + +static void incrediblec_brightness_tft_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + sony_tft_set_pwm_val(val); + mutex_unlock(&panel_lock); +} + +static struct led_classdev incrediblec_brightness_led = { + .name = "lcd-backlight", + .brightness = LED_FULL, + .brightness_set = incrediblec_brightness_set, +}; + +int __init incrediblec_init_panel(void) +{ + int ret; + + if (system_rev >= 1) { + /* CDMA version (except for EVT1) supports RGB666 */ + init_tablep = samsung_oled_rgb666_init_table; + init_table_sz = ARRAY_SIZE(samsung_oled_rgb666_init_table); + incrediblec_lcdc_fb_data.output_format = MSM_MDP_OUT_IF_FMT_RGB666; + } + + ret = platform_device_register(&msm_device_mdp); + if (ret != 0) + return ret; + + ret = incrediblec_init_spi_hack(); + if (ret != 0) + return ret; + + if (gpio_get_value(INCREDIBLEC_LCD_ID0)) { + pr_info("%s: tft panel\n", __func__); + vreg_lcm_rftx_2v6 = vreg_get(0, "rftx"); + if (IS_ERR(vreg_lcm_rftx_2v6)) + return PTR_ERR(vreg_lcm_rftx_2v6); + vreg_set_level(vreg_lcm_rftx_2v6, 2600); + +#ifdef CONFIG_MACH_INCREDIBLEC + vreg_lcm_aux_2v6 = vreg_get(0, "gp4"); +#else + vreg_lcm_aux_2v6 = vreg_get(0, "gp6"); +#endif + if (IS_ERR(vreg_lcm_aux_2v6)) + return PTR_ERR(vreg_lcm_aux_2v6); + vreg_set_level(vreg_lcm_aux_2v6, 2600); + + if (gpio_get_value(INCREDIBLEC_LCD_RST_ID1)) + tft_panel_on = 1; + ret = platform_device_register(&incrediblec_lcdc_tft_device); + INIT_WORK(&brightness_delayed_work, incrediblec_brightness_tft_set_work); + } else { + pr_info("%s: amoled panel\n", __func__); + ret = platform_device_register(&incrediblec_lcdc_amoled_device); + INIT_WORK(&brightness_delayed_work, incrediblec_brightness_amoled_set_work); + } + + if (ret != 0) + return ret; + + ret = led_classdev_register(NULL, &incrediblec_brightness_led); + if (ret != 0) { + pr_err("%s: Cannot register brightness led\n", __func__); + return ret; + } + + return 0; +} + +device_initcall(incrediblec_init_panel); diff --git a/arch/arm/mach-msm/board-incrediblec-rfkill.c b/arch/arm/mach-msm/board-incrediblec-rfkill.c new file mode 100644 index 0000000000000..c42f4b4a7f0c0 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-rfkill.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* Control bluetooth power for incrediblec platform */ + +#include +#include +#include +#include +#include +#include +#include + +#include "board-incrediblec.h" + + +static struct rfkill *bt_rfk; +static const char bt_name[] = "bcm4329"; + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (!blocked) { + gpio_direction_output(INCREDIBLEC_GPIO_BT_RESET_N, 1); + gpio_direction_output(INCREDIBLEC_GPIO_BT_SHUTDOWN_N, 1); + } else { + gpio_direction_output(INCREDIBLEC_GPIO_BT_SHUTDOWN_N, 0); + gpio_direction_output(INCREDIBLEC_GPIO_BT_RESET_N, 0); + } + return 0; +} + +static struct rfkill_ops incrediblec_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int incrediblec_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + rc = gpio_request(INCREDIBLEC_GPIO_BT_RESET_N, "bt_reset"); + if (rc) + goto err_gpio_reset; + rc = gpio_request(INCREDIBLEC_GPIO_BT_SHUTDOWN_N, "bt_shutdown"); + if (rc) + goto err_gpio_shutdown; + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &incrediblec_rfkill_ops, NULL); + if (!bt_rfk) { + rc = -ENOMEM; + goto err_rfkill_alloc; + } + + rfkill_set_states(bt_rfk, default_state, false); + + /* userspace cannot take exclusive control */ + + rc = rfkill_register(bt_rfk); + if (rc) + goto err_rfkill_reg; + + return 0; + +err_rfkill_reg: + rfkill_destroy(bt_rfk); +err_rfkill_alloc: + gpio_free(INCREDIBLEC_GPIO_BT_SHUTDOWN_N); +err_gpio_shutdown: + gpio_free(INCREDIBLEC_GPIO_BT_RESET_N); +err_gpio_reset: + return rc; +} + +static int incrediblec_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + gpio_free(INCREDIBLEC_GPIO_BT_SHUTDOWN_N); + gpio_free(INCREDIBLEC_GPIO_BT_RESET_N); + + return 0; +} + +static struct platform_driver incrediblec_rfkill_driver = { + .probe = incrediblec_rfkill_probe, + .remove = incrediblec_rfkill_remove, + .driver = { + .name = "incrediblec_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init incrediblec_rfkill_init(void) +{ + if (!machine_is_incrediblec()) + return 0; + + return platform_driver_register(&incrediblec_rfkill_driver); +} + +static void __exit incrediblec_rfkill_exit(void) +{ + platform_driver_unregister(&incrediblec_rfkill_driver); +} + + +module_init(incrediblec_rfkill_init); +module_exit(incrediblec_rfkill_exit); +MODULE_DESCRIPTION("incrediblec rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-incrediblec-tpa6130.c b/arch/arm/mach-msm/board-incrediblec-tpa6130.c new file mode 100644 index 0000000000000..bd91aeae5f587 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-tpa6130.c @@ -0,0 +1,185 @@ +/* driver/i2c/chip/tap6130.c + * + * TI TPA6130 Headset Amp + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HEADSET_MTOA_PROG 0x30100003 +#define HEADSET_MTOA_VERS 0 +#define HTC_HEADSET_NULL_PROC 0 +#define HTC_HEADSET_CTL_PROC 1 + +static struct i2c_client *this_client; +struct mutex amp_mutex; +static struct tpa6130_platform_data *pdata; + +static int i2c_on; +char buffer[2]; + +static int I2C_TxData(char *txData, int length) +{ + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + pr_err("tpa6130 :I2C transfer error\n"); + return -EIO; + } else + return 0; +} + +void set_headset_amp(int on) +{ + mutex_lock(&_mutex); + if (on && !i2c_on) { + buffer[0] = 0x01; + buffer[1] = 0xC0; + buffer[2] = 0x3E; + if (I2C_TxData(buffer, 3) == 0) { + i2c_on = 1; + pr_err("tpa6130: turn on headset amp !\n"); + } + } else if (!on && i2c_on) { + buffer[0] = 0x01; + buffer[1] = 0xC1; + if (I2C_TxData(buffer, 2) == 0) { + i2c_on = 0; + pr_err("tpa6130: turn off headset amp !\n"); + } + } + mutex_unlock(&_mutex); +} + +static int handle_headset_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + struct rpc_headset_amp_ctl_args *args; + + if (!pdata->enable_rpc_server) + return 0; + + switch (req->procedure) { + case HTC_HEADSET_NULL_PROC: + return 0; + case HTC_HEADSET_CTL_PROC: + args = (struct rpc_headset_amp_ctl_args *)(req + 1); + args->on = be32_to_cpu(args->on); + if (args->on) { + gpio_set_value(pdata->gpio_hp_sd, 1); + msleep(10); + set_headset_amp(args->on); + } else if (!args->on) { + set_headset_amp(args->on); + gpio_set_value(pdata->gpio_hp_sd, 0); + } + return 0; + default: + pr_err("tpa6130a: the wrong proc for headset server\n"); + } + return -ENODEV; +} + +static struct msm_rpc_server headset_server = { + .prog = HEADSET_MTOA_PROG, + .vers = HEADSET_MTOA_VERS, + .rpc_call = handle_headset_call +}; + +int tpa6130_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + + pdata = client->dev.platform_data; + if (pdata == NULL) { + pr_err("tpa6130: platform data is NULL\n"); + goto fault; + } + + if (pdata->enable_rpc_server) { + msm_rpc_create_server(&headset_server); + + ret = gpio_request(pdata->gpio_hp_sd, "tpa6130"); + if (ret < 0) { + pr_err("tap6130a : gpio request failed\n"); + goto fault; + } + + ret = gpio_direction_output(pdata->gpio_hp_sd, 1); + if (ret < 0) { + pr_err("tap6130a: request reset gpio failed\n"); + goto fault; + } + gpio_set_value(pdata->gpio_hp_sd, 0); + } + + this_client = client; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("tpa6130a: i2c check functionality error\n"); + goto fault; + } + + return 0; +fault: + return -ENODEV; +} + +static int tpa6130_remove(struct i2c_client *client) +{ + return 0; +} +static const struct i2c_device_id tpa6130_id[] = { + { TPA6130_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver tpa6130_driver = { + .probe = tpa6130_probe, + .remove = tpa6130_remove, + .id_table = tpa6130_id, + .driver = { + .name = TPA6130_I2C_NAME, + }, +}; + +static int __init tpa6130_init(void) +{ + pr_err("tpa6130 HP AMP: init\n"); + mutex_init(&_mutex); + return i2c_add_driver(&tpa6130_driver); +} + +static void __exit tpa6130_exit(void) +{ + i2c_del_driver(&tpa6130_driver); +} + +module_init(tpa6130_init); +module_exit(tpa6130_exit); diff --git a/arch/arm/mach-msm/board-incrediblec-tv.c b/arch/arm/mach-msm/board-incrediblec-tv.c new file mode 100644 index 0000000000000..8cb9197d47a93 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-tv.c @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-incrediblec.h" +#include "devices.h" +#include "proc_comm.h" + +static struct resource msm_tvenc_resources[] = { + { + .name = "msm_tv", + .start = 0xaa400000, + .end = 0xaa400000 + 0x1000 - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = MSM_TV_FB_BASE, + .end = MSM_TV_FB_BASE + MSM_TV_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + } +}; + +static struct msm_fb_data incrediblec_tvenc_fb_data = { + .xres = 480, + .yres = 720, + /* Typical geometry of 17" CRT */ + .width = 338, + .height = 270, + .output_format = MDP_YCRYCB_H2V1, +}; + +static int incrediblec_tv_video_relay(int on_off) +{ + pr_info("[tv]: %s(%d)\n", __func__, on_off); + on_off = !!on_off; + gpio_set_value(INCREDIBLEC_VIDEO_SHDN_N, on_off); + if (system_rev < 2) + gpio_set_value(INCREDIBLEC_AV_SWITCH, on_off); + + return 0; +} + +static struct msm_tvenc_platform_data incrediblec_tvenc_platform_data = { + .fb_id = 1, + .fb_data = &incrediblec_tvenc_fb_data, + .fb_resource = &msm_tvenc_resources[1], + .video_relay = &incrediblec_tv_video_relay, +}; + +static struct platform_device msm_tvenc_device = { + .name = "msm_tv", + .id = 0, + .num_resources = ARRAY_SIZE(msm_tvenc_resources), + .resource = msm_tvenc_resources, + .dev = { + .platform_data = &incrediblec_tvenc_platform_data, + }, +}; + +int __init incrediblec_init_tv(void) +{ + int ret, engid; + uint32_t config; + + if (!machine_is_incrediblec()) + return 0; + + engid = incrediblec_get_engineerid(); + if (0 == engid || 0xF == engid) { + msm_tvenc_resources[1].start = MSM_TV_FB_XA_BASE; + msm_tvenc_resources[1].end = msm_tvenc_resources[1].start + + MSM_TV_FB_SIZE - 1; + } else if (engid >= 3) { + msm_tvenc_resources[1].start = + MSM_TV_FB_BASE + MSM_MEM_128MB_OFFSET; + msm_tvenc_resources[1].end = msm_tvenc_resources[1].start + + MSM_TV_FB_SIZE - 1; + } + + config = PCOM_GPIO_CFG(INCREDIBLEC_VIDEO_SHDN_N, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_16MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); + gpio_set_value(INCREDIBLEC_VIDEO_SHDN_N, 1); + config = PCOM_GPIO_CFG(INCREDIBLEC_TV_LOAD_DET, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_16MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); + + if (system_rev < 2) { + config = PCOM_GPIO_CFG(INCREDIBLEC_AV_SWITCH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_16MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); + } + + if ((ret = platform_device_register(&msm_tvenc_device)) != 0) + return ret; + + return 0; +} + +device_initcall(incrediblec_init_tv); diff --git a/arch/arm/mach-msm/board-incrediblec-wifi.c b/arch/arm/mach-msm/board-incrediblec-wifi.c new file mode 100644 index 0000000000000..261e3d8c8e0bf --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec-wifi.c @@ -0,0 +1,152 @@ +/* linux/arch/arm/mach-msm/board-incrediblec-wifi.c +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-incrediblec.h" + +int incrediblec_wifi_power(int on); +int incrediblec_wifi_reset(int on); +int incrediblec_wifi_set_carddetect(int on); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + +#define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4 +#define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024) + +#define WLAN_SKB_BUF_NUM 16 + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = { + { NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) } +}; + +static void *incrediblec_wifi_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS) + return wlan_static_skb; + if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS)) + return NULL; + if (wifi_mem_array[section].size < size) + return NULL; + return wifi_mem_array[section].mem_ptr; +} +#endif + +int __init incrediblec_init_wifi_mem(void) +{ +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + int i; + + for(i=0;( i < WLAN_SKB_BUF_NUM );i++) { + if (i < (WLAN_SKB_BUF_NUM/2)) + wlan_static_skb[i] = dev_alloc_skb(4096); + else + wlan_static_skb[i] = dev_alloc_skb(8192); + } + for(i=0;( i < PREALLOC_WLAN_NUMBER_OF_SECTIONS );i++) { + wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, + GFP_KERNEL); + if (wifi_mem_array[i].mem_ptr == NULL) + return -ENOMEM; + } +#endif + return 0; +} + +static struct resource incrediblec_wifi_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_WIFI_IRQ), + .end = MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_WIFI_IRQ), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct wifi_platform_data incrediblec_wifi_control = { + .set_power = incrediblec_wifi_power, + .set_reset = incrediblec_wifi_reset, + .set_carddetect = incrediblec_wifi_set_carddetect, +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + .mem_prealloc = incrediblec_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +static struct platform_device incrediblec_wifi_device = { + .name = "bcmdhd_wlan", + .id = 1, + .num_resources = ARRAY_SIZE(incrediblec_wifi_resources), + .resource = incrediblec_wifi_resources, + .dev = { + .platform_data = &incrediblec_wifi_control, + }, +}; + +extern unsigned char *get_wifi_nvs_ram(void); +extern int wifi_calibration_size_set(void); + +static unsigned incrediblec_wifi_update_nvs(char *str, int add_flag) +{ +#define NVS_LEN_OFFSET 0x0C +#define NVS_DATA_OFFSET 0x40 + unsigned char *ptr; + unsigned len; + + if (!str) + return -EINVAL; + ptr = get_wifi_nvs_ram(); + /* Size in format LE assumed */ + memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); + /* if the last byte in NVRAM is 0, trim it */ + if (ptr[NVS_DATA_OFFSET + len - 1] == 0) + len -= 1; + if (add_flag) { + strcpy(ptr + NVS_DATA_OFFSET + len, str); + len += strlen(str); + } else { + if (strnstr(ptr + NVS_DATA_OFFSET, str, len)) + len -= strlen(str); + } + memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); + wifi_calibration_size_set(); + return 0; +} + +static int __init incrediblec_wifi_init(void) +{ + if (!machine_is_incrediblec()) + return 0; + + printk("%s: start\n", __func__); + incrediblec_wifi_update_nvs("sd_oobonly=1\r\n", 0); + incrediblec_wifi_update_nvs("btc_params70=0x32\r\n", 1); + incrediblec_init_wifi_mem(); + return platform_device_register(&incrediblec_wifi_device); +} + +late_initcall(incrediblec_wifi_init); diff --git a/arch/arm/mach-msm/board-incrediblec.c b/arch/arm/mach-msm/board-incrediblec.c new file mode 100644 index 0000000000000..d4d5c9973e4a2 --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec.c @@ -0,0 +1,1516 @@ +/* linux/arch/arm/mach-msm/board-incrediblec.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "board-incrediblec.h" +#include "devices.h" +#include "proc_comm.h" +#include "smd_private.h" +#if 1 /*allenou, bt for bcm, 2009/7/8 */ +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "footswitch.h" +#include + +#define SMEM_SPINLOCK_I2C 6 +#define INCREDIBLEC_MICROP_VER 0x04 + +static uint opt_usb_h2w_sw; +module_param_named(usb_h2w_sw, opt_usb_h2w_sw, uint, 0); + +void msm_init_pmic_vibrator(void); +extern void __init incrediblec_audio_init(void); +#ifdef CONFIG_MICROP_COMMON +void __init incrediblec_microp_init(void); +#endif + +unsigned int engineerid; + +static struct htc_battery_platform_data htc_battery_pdev_data = { +/* .gpio_mbat_in = INCREDIBLEC_GPIO_MBAT_IN,*/ +/* .gpio_mchg_en_n = INCREDIBLEC_GPIO_MCHG_EN_N,*/ +/* .gpio_iset = INCREDIBLEC_GPIO_ISET,*/ + .guage_driver = GUAGE_MODEM, + .m2a_cable_detect = 1, + .charger = SWITCH_CHARGER, +}; + +static struct platform_device htc_battery_pdev = { + .name = "htc_battery", + .id = -1, + .dev = { + .platform_data = &htc_battery_pdev_data, + }, +}; +static int capella_cm3602_power(int pwr_device, uint8_t enable); +/*XA, XB*/ +static struct microp_function_config microp_functions[] = { + { + .name = "microp_intrrupt", + .category = MICROP_FUNCTION_INTR, + }, + { + .name = "reset-int", + .category = MICROP_FUNCTION_RESET_INT, + .int_pin = 1 << 8, + }, + { + .name = "oj", + .category = MICROP_FUNCTION_OJ, + .int_pin = 1 << 12, + }, + { + .name = "proximity", + .category = MICROP_FUNCTION_P, + .int_pin = 1 << 11, + .mask_r = {0x00, 0x00, 0x10}, + .mask_w = {0x00, 0x00, 0x04}, + }, +}; + +/*For XC: Change ALS chip from CM3602 to CM3605*/ +static struct microp_function_config microp_functions_1[] = { + { + .name = "remote-key", + .category = MICROP_FUNCTION_REMOTEKEY, + .levels = {0, 33, 50, 110, 160, 220}, + .channel = 1, + .int_pin = 1 << 5, + }, + { + .name = "microp_intrrupt", + .category = MICROP_FUNCTION_INTR, + }, + { + .name = "reset-int", + .category = MICROP_FUNCTION_RESET_INT, + .int_pin = 1 << 8, + }, + { + .name = "oj", + .category = MICROP_FUNCTION_OJ, + .int_pin = 1 << 12, + }, + { + .name = "proximity", + .category = MICROP_FUNCTION_P, + .int_pin = 1 << 11, + .mask_r = {0x00, 0x00, 0x10}, + .mask_w = {0x00, 0x00, 0x04}, + }, +}; + +static struct microp_function_config microp_lightsensor = { + .name = "light_sensor", + .category = MICROP_FUNCTION_LSENSOR, + .levels = { 1, 11, 16, 22, 75, 209, 362, 488, 560, 0x3FF }, + .channel = 3, + .int_pin = 1 << 9, + .golden_adc = 0xD2, + .mask_w = {0x00, 0x00, 0x04}, + .ls_power = capella_cm3602_power, +}; + +static struct lightsensor_platform_data lightsensor_data = { + .config = µp_lightsensor, + .irq = MSM_uP_TO_INT(9), +}; + +static struct microp_led_config led_config[] = { + { + .name = "amber", + .type = LED_RGB, + }, + { + .name = "green", + .type = LED_RGB, + }, +}; + +static struct microp_led_platform_data microp_leds_data = { + .num_leds = ARRAY_SIZE(led_config), + .led_config = led_config, +}; + +static struct bma150_platform_data incrediblec_g_sensor_pdata = { + .microp_new_cmd = 1, +}; + +/* Proximity Sensor (Capella_CM3602)*/ + +static int __capella_cm3602_power(int on) +{ + uint8_t data[3], addr; + int ret; + + printk(KERN_DEBUG "%s: Turn the capella_cm3602 power %s\n", + __func__, (on) ? "on" : "off"); + if (on) + gpio_direction_output(INCREDIBLEC_GPIO_PROXIMITY_EN_N, 1); + + data[0] = 0x00; + data[1] = 0x00; + data[2] = 0x04; + addr = on ? MICROP_I2C_WCMD_GPO_LED_STATUS_EN : + MICROP_I2C_WCMD_GPO_LED_STATUS_DIS; + ret = microp_i2c_write(addr, data, 3); + if (ret < 0) + pr_err("%s: %s capella power failed\n", + __func__, (on ? "enable" : "disable")); + + if (!on) + gpio_direction_output(INCREDIBLEC_GPIO_PROXIMITY_EN_N, 0); + + return ret; +} + +static DEFINE_MUTEX(capella_cm3602_lock); +static unsigned int als_power_control; + +static int capella_cm3602_power(int pwr_device, uint8_t enable) +{ + unsigned int old_status = 0; + int ret = 0, on = 0; + mutex_lock(&capella_cm3602_lock); + + old_status = als_power_control; + if (enable) + als_power_control |= pwr_device; + else + als_power_control &= ~pwr_device; + + on = als_power_control ? 1 : 0; + if (old_status == 0 && on) + ret = __capella_cm3602_power(1); + else if (!on) + ret = __capella_cm3602_power(0); + + mutex_unlock(&capella_cm3602_lock); + return ret; +} + +static struct capella_cm3602_platform_data capella_cm3602_pdata = { + .power = capella_cm3602_power, + .p_en = INCREDIBLEC_GPIO_PROXIMITY_EN_N, + .p_out = MSM_uP_TO_INT(11), +}; +/* End Proximity Sensor (Capella_CM3602)*/ + +static struct htc_headset_microp_platform_data htc_headset_microp_data = { + .remote_int = 1 << 5, + .remote_irq = MSM_uP_TO_INT(5), + .remote_enable_pin = 0, + .adc_channel = 0x01, + .adc_remote = {0, 33, 50, 110, 160, 220}, +}; + +static struct platform_device microp_devices[] = { + { + .name = "lightsensor_microp", + .dev = { + .platform_data = &lightsensor_data, + }, + }, + { + .name = "leds-microp", + .id = -1, + .dev = { + .platform_data = µp_leds_data, + }, + }, + { + .name = BMA150_G_SENSOR_NAME, + .dev = { + .platform_data = &incrediblec_g_sensor_pdata, + }, + }, + { + .name = "incrediblec_proximity", + .id = -1, + .dev = { + .platform_data = &capella_cm3602_pdata, + }, + }, + { + .name = "HTC_HEADSET_MICROP", + .id = -1, + .dev = { + .platform_data = &htc_headset_microp_data, + }, + }, +}; + +static struct microp_i2c_platform_data microp_data = { + .num_functions = ARRAY_SIZE(microp_functions), + .microp_function = microp_functions, + .num_devices = ARRAY_SIZE(microp_devices), + .microp_devices = microp_devices, + .gpio_reset = INCREDIBLEC_GPIO_UP_RESET_N, + .microp_ls_on = LS_PWR_ON | PS_PWR_ON, + .spi_devices = SPI_OJ | SPI_GSENSOR, +}; + +static struct gpio_led incrediblec_led_list[] = { + { + .name = "button-backlight", + .gpio = INCREDIBLEC_AP_KEY_LED_EN, + .active_low = 0, + }, +}; + +static struct gpio_led_platform_data incrediblec_leds_data = { + .num_leds = ARRAY_SIZE(incrediblec_led_list), + .leds = incrediblec_led_list, +}; + +static struct platform_device incrediblec_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &incrediblec_leds_data, + }, +}; + +static int incrediblec_phy_init_seq[] = { 0x1D, 0x0D, 0x1D, 0x10, -1 }; + +extern void msm_hsusb_8x50_phy_reset(void); + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = incrediblec_phy_init_seq, + .phy_reset = msm_hsusb_8x50_phy_reset, + .usb_id_pin_gpio = INCREDIBLEC_GPIO_USB_ID_PIN, + .accessory_detect = 1, /* detect by ID pin gpio */ +}; + +static char *usb_functions_ums[] = { + "usb_mass_storage", +}; + +static char *usb_functions_ums_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_rndis[] = { + "rndis", +}; + +static char *usb_functions_rndis_adb[] = { + "rndis", + "adb", +}; + +#ifdef CONFIG_USB_ANDROID_ACCESSORY +static char *usb_functions_accessory[] = { "accessory" }; +static char *usb_functions_accessory_adb[] = { "accessory", "adb" }; +#endif + +#ifdef CONFIG_USB_ANDROID_DIAG +static char *usb_functions_adb_diag[] = { + "usb_mass_storage", + "adb", + "diag", +}; +#endif + +static char *usb_functions_all[] = { +#ifdef CONFIG_USB_ANDROID_RNDIS + "rndis", +#endif + "usb_mass_storage", + "adb", +#ifdef CONFIG_USB_ANDROID_ACM + "acm", +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + "diag", +#endif +#ifdef CONFIG_USB_ANDROID_ACCESSORY + "accessory", +#endif +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0ff9, + .num_functions = ARRAY_SIZE(usb_functions_ums), + .functions = usb_functions_ums, + }, + { + .product_id = 0x0c8d, + .num_functions = ARRAY_SIZE(usb_functions_ums_adb), + .functions = usb_functions_ums_adb, + }, + { + .product_id = 0x0c03, + .num_functions = ARRAY_SIZE(usb_functions_rndis), + .functions = usb_functions_rndis, + }, + { + .product_id = 0x0c04, + .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), + .functions = usb_functions_rndis_adb, + }, +#ifdef CONFIG_USB_ANDROID_ACCESSORY + { + .product_id = USB_ACCESSORY_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory), + .functions = usb_functions_accessory, + }, + { + .product_id = USB_ACCESSORY_ADB_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory_adb), + .functions = usb_functions_accessory_adb, + }, +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + { + .product_id = 0x0c07, + .num_functions = ARRAY_SIZE(usb_functions_adb_diag), + .functions = usb_functions_adb_diag, + }, +#endif +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 3, + .vendor = "HTC", + .product = "Incredible", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data rndis_pdata = { + /* ethaddr is filled by incrediblec_board_serialno_setup */ + .vendorID = 0x18d1, + .vendorDescr = "Google, Inc.", +}; + +static struct platform_device rndis_device = { + .name = "rndis", + .id = -1, + .dev = { + .platform_data = &rndis_pdata, + }, +}; +#endif + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x0bb4, + .product_id = 0x0c9e, + .version = 0x0100, + .product_name = "Android Phone", + .manufacturer_name = "HTC", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + + +static struct platform_device incrediblec_rfkill = { + .name = "incrediblec_rfkill", + .id = -1, +}; + +static struct spi_platform_data incrediblec_spi_pdata = { + .clk_rate = 1200000, +}; + +/* start kgsl */ +static struct resource kgsl_3d0_resources[] = { + { + .name = KGSL_3D0_REG_MEMORY, + .start = 0xA0000000, + .end = 0xA001ffff, + .flags = IORESOURCE_MEM, + }, + { + .name = KGSL_3D0_IRQ, + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct kgsl_device_platform_data kgsl_3d0_pdata = { + .pwrlevel = { + { + .gpu_freq = 0, + .bus_freq = 128000000, + }, + }, + .init_level = 0, + .num_levels = 1, + .set_grp_async = NULL, + .idle_timeout = HZ/5, + .clk_map = KGSL_CLK_GRP | KGSL_CLK_IMEM, +}; + +struct platform_device msm_kgsl_3d0 = { + .name = "kgsl-3d0", + .id = 0, + .num_resources = ARRAY_SIZE(kgsl_3d0_resources), + .resource = kgsl_3d0_resources, + .dev = { + .platform_data = &kgsl_3d0_pdata, + }, +}; +/* end kgsl */ + +/* start footswitch regulator */ +struct platform_device *msm_footswitch_devices[] = { + FS_PCOM(FS_GFX3D, "fs_gfx3d"), +}; + +unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); +/* end footswitch regulator */ + +/* pmem heaps */ +#ifndef CONFIG_ION_MSM +static struct android_pmem_platform_data mdp_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, + .cached = 1, +}; + +static struct platform_device android_pmem_mdp_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &mdp_pmem_pdata + }, +}; +#endif + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 4, + .dev = { + .platform_data = &android_pmem_adsp_pdata, + }, +}; + +#ifdef CONFIG_720P_CAMERA +static struct android_pmem_platform_data android_pmem_venc_pdata = { + .name = "pmem_venc", + .start = MSM_PMEM_VENC_BASE, + .size = MSM_PMEM_VENC_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_venc_device = { + .name = "android_pmem", + .id = 5, + .dev = { + .platform_data = &android_pmem_venc_pdata, + }, +}; +#else +static struct android_pmem_platform_data android_pmem_camera_pdata = { + .name = "pmem_camera", + .start = MSM_PMEM_CAMERA_BASE, + .size = MSM_PMEM_CAMERA_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_camera_device = { + .name = "android_pmem", + .id = 5, + .dev = { + .platform_data = &android_pmem_camera_pdata, + }, +}; +#endif +/* end pmem heaps */ + +/* ion heaps */ +#ifdef CONFIG_ION_MSM +static struct ion_co_heap_pdata co_ion_pdata = { + .adjacent_mem_id = INVALID_HEAP_ID, + .align = PAGE_SIZE, +}; + +static struct ion_platform_data ion_pdata = { + .nr = 2, + .heaps = { + { + .id = ION_SYSTEM_HEAP_ID, + .type = ION_HEAP_TYPE_SYSTEM, + .name = ION_VMALLOC_HEAP_NAME, + }, + /* PMEM_MDP = SF */ + { + .id = ION_SF_HEAP_ID, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = ION_SF_HEAP_NAME, + .base = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .memory_type = ION_EBI_TYPE, + .extra_data = (void *)&co_ion_pdata, + }, + } +}; + +static struct platform_device ion_dev = { + .name = "ion-msm", + .id = 1, + .dev = { .platform_data = &ion_pdata }, +}; +#endif +/* end ion heaps */ + +static struct resource ram_console_resources[] = { + { + .start = MSM_RAM_CONSOLE_BASE, + .end = MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ram_console_device = { + .name = "ram_console", + .id = -1, + .num_resources = ARRAY_SIZE(ram_console_resources), + .resource = ram_console_resources, +}; + +static int incrediblec_atmel_ts_power(int on) +{ + printk(KERN_INFO "incrediblec_atmel_ts_power(%d)\n", on); + if (on) { + gpio_set_value(INCREDIBLEC_GPIO_TP_RST, 0); + msleep(5); + gpio_set_value(INCREDIBLEC_GPIO_TP_EN, 1); + msleep(5); + gpio_set_value(INCREDIBLEC_GPIO_TP_RST, 1); + msleep(40); + } else { + gpio_set_value(INCREDIBLEC_GPIO_TP_EN, 0); + msleep(2); + } + return 0; +} + +struct atmel_i2c_platform_data incrediblec_atmel_ts_data[] = { + { + .version = 0x016, + .abs_x_min = 1, + .abs_x_max = 1023, + .abs_y_min = 2, + .abs_y_max = 966, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = INCREDIBLEC_GPIO_TP_INT_N, + .power = incrediblec_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {50, 15, 25}, + .config_T8 = {10, 0, 20, 10, 0, 0, 5, 15}, + .config_T9 = {139, 0, 0, 18, 12, 0, 16, 38, 3, 7, 0, 5, 2, 15, 2, 10, 25, 5, 0, 0, 0, 0, 0, 0, 0, 0, 159, 47, 149, 81, 40}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T22 = {15, 0, 0, 0, 0, 0, 0, 0, 16, 0, 1, 0, 7, 18, 25, 30, 0}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8, 60}, + .object_crc = {0xDB, 0xBF, 0x60}, + .cable_config = {35, 30, 8, 16}, + .GCAF_level = {20, 24, 28, 40, 63}, + .filter_level = {15, 60, 963, 1008}, + }, + { + .version = 0x015, + .abs_x_min = 13, + .abs_x_max = 1009, + .abs_y_min = 15, + .abs_y_max = 960, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = INCREDIBLEC_GPIO_TP_INT_N, + .power = incrediblec_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {50, 15, 25}, + .config_T8 = {12, 0, 20, 20, 0, 0, 20, 0}, + .config_T9 = {139, 0, 0, 18, 12, 0, 32, 40, 2, 7, 0, 5, 2, 0, 2, 10, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159, 47, 149, 81}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {7, 0, 0, 0, 0, 0, 0, 30, 20, 4, 15, 5}, + .config_T22 = {7, 0, 0, 25, 0, -25, 255, 4, 50, 0, 1, 10, 15, 20, 25, 30, 4}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8, 60}, + .object_crc = {0x19, 0x87, 0x7E}, + }, + { + .version = 0x014, + .abs_x_min = 13, + .abs_x_max = 1009, + .abs_y_min = 15, + .abs_y_max = 960, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = INCREDIBLEC_GPIO_TP_INT_N, + .power = incrediblec_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {50, 15, 25}, + .config_T8 = {12, 0, 20, 20, 0, 0, 10, 15}, + .config_T9 = {3, 0, 0, 18, 12, 0, 48, 45, 2, 7, 0, 0, 0, 0, 2, 10, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 143, 47, 143, 81}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T22 = {5, 0, 0, 25, 0, -25, 255, 4, 50, 0, 1, 10, 15, 20, 25, 30, 4}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8, 60}, + } +}; + +static struct regulator_consumer_supply tps65023_dcdc1_supplies[] = { + { + .supply = "acpu_vcore", + }, +}; + +static struct regulator_init_data tps65023_data[5] = { + { + .constraints = { + .name = "dcdc1", /* VREG_MSMC2_1V29 */ + .min_uV = 925000, + .max_uV = 1350000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, + }, + .consumer_supplies = tps65023_dcdc1_supplies, + .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_supplies), + }, + /* dummy values for unused regulators to not crash driver: */ + { + .constraints = { + .name = "dcdc2", /* VREG_MSMC1_1V26 */ + .min_uV = 1260000, + .max_uV = 1260000, + }, + }, + { + .constraints = { + .name = "dcdc3", /* unused */ + .min_uV = 800000, + .max_uV = 3300000, + }, + }, + { + .constraints = { + .name = "ldo1", /* unused */ + .min_uV = 1000000, + .max_uV = 3150000, + }, + }, + { + .constraints = { + .name = "ldo2", /* V_USBPHY_3V3 */ + .min_uV = 3300000, + .max_uV = 3300000, + }, + }, +}; + + +static struct htc_headset_mgr_platform_data htc_headset_mgr_data = { +}; + +static struct platform_device htc_headset_mgr = { + .name = "HTC_HEADSET_MGR", + .id = -1, + .dev = { + .platform_data = &htc_headset_mgr_data, + }, +}; + +static struct htc_headset_gpio_platform_data htc_headset_gpio_data = { + .hpin_gpio = INCREDIBLEC_GPIO_35MM_HEADSET_DET, + .key_enable_gpio = 0, + .mic_select_gpio = 0, +}; + +static struct platform_device htc_headset_gpio = { + .name = "HTC_HEADSET_GPIO", + .id = -1, + .dev = { + .platform_data = &htc_headset_gpio_data, + }, +}; + +static struct akm8973_platform_data compass_platform_data = { + .layouts = INCREDIBLEC_LAYOUTS, + .project_name = INCREDIBLEC_PROJECT_NAME, + .reset = INCREDIBLEC_GPIO_COMPASS_RST_N, + .intr = INCREDIBLEC_GPIO_COMPASS_INT_N, +}; + +static struct tpa6130_platform_data headset_amp_platform_data = { + .enable_rpc_server = 0, +}; + +static struct i2c_board_info i2c_devices[] = { + { + I2C_BOARD_INFO(ATMEL_QT602240_NAME, 0x94 >> 1), + .platform_data = &incrediblec_atmel_ts_data, + .irq = MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_TP_INT_N) + }, + { + I2C_BOARD_INFO(MICROP_I2C_NAME, 0xCC >> 1), + .platform_data = µp_data, + .irq = MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_UP_INT_N) + }, + { + I2C_BOARD_INFO("ds2482", 0x30 >> 1), + /*.platform_data = µp_data,*/ + /*.irq = MSM_GPIO_TO_INT(PASSION_GPIO_UP_INT_N)*/ + }, + { + I2C_BOARD_INFO("smb329", 0x6E >> 1), + }, + { + I2C_BOARD_INFO("akm8973", 0x1C), + .platform_data = &compass_platform_data, + .irq = MSM_GPIO_TO_INT(INCREDIBLEC_GPIO_COMPASS_INT_N), + }, +#ifdef CONFIG_MSM_CAMERA +#ifdef CONFIG_OV8810 + { + I2C_BOARD_INFO("ov8810", 0x6C >> 1), + }, +#endif +#endif/*CONIFIG_MSM_CAMERA*/ + + { + I2C_BOARD_INFO(TPA6130_I2C_NAME, 0xC0 >> 1), + .platform_data = &headset_amp_platform_data, + }, + { + I2C_BOARD_INFO("tps65023", 0x48), + .platform_data = tps65023_data, + }, +}; + +static uint32_t camera_off_gpio_table[] = { + +#if 0 /* CAMERA OFF*/ + PCOM_GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +#endif + /* CAMERA SUSPEND*/ + PCOM_GPIO_CFG(0, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ + PCOM_GPIO_CFG(99, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* CAM1_RST */ + PCOM_GPIO_CFG(INCREDIBLEC_CAM_PWD, + 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* CAM1_PWD */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA ON */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_16MA), /* MCLK */ +}; + +static void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +static void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + +enum msm_camera_source camera_source; +static void incrediblec_camera_set_source(enum msm_camera_source source) +{ + camera_source = source; +} + +static struct resource msm_camera_resources[] = { + { + .start = MSM_VFE_PHYS, + .end = MSM_VFE_PHYS + MSM_VFE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_VFE, + INT_VFE, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +static struct camera_flash_cfg msm_camera_sensor_flash_cfg = { + .camera_flash = flashlight_control, + .num_flash_levels = FLASHLIGHT_NUM, + .low_temp_limit = 10, + .low_cap_limit = 15, +}; + +static struct msm_camera_sensor_info msm_camera_sensor_ov8810_data = { + .sensor_name = "ov8810", + .sensor_reset = INCREDIBLEC_CAM_RST, /* CAM1_RST */ + .sensor_pwd = INCREDIBLEC_CAM_PWD, /* CAM1_PWDN, enabled in a9 */ + .camera_set_source = incrediblec_camera_set_source, + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .waked_up = 0, + .need_suspend = 0, + .flash_cfg = &msm_camera_sensor_flash_cfg, +}; + +static struct platform_device msm_camera_sensor_ov8810 = { + .name = "msm_camera_ov8810", + .dev = { + .platform_data = &msm_camera_sensor_ov8810_data, + }, +}; + +static void config_incrediblec_flashlight_gpios(void) +{ + static uint32_t flashlight_gpio_table[] = { + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_FLASHLIGHT_TORCH, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_FLASHLIGHT_FLASH, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_FLASHLIGHT_FLASH_ADJ, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + }; + + config_gpio_table(flashlight_gpio_table, + ARRAY_SIZE(flashlight_gpio_table)); +} + +static struct flashlight_platform_data incrediblec_flashlight_data = { + .gpio_init = config_incrediblec_flashlight_gpios, + .torch = INCREDIBLEC_GPIO_FLASHLIGHT_TORCH, + .flash = INCREDIBLEC_GPIO_FLASHLIGHT_FLASH, + .flash_adj = INCREDIBLEC_GPIO_FLASHLIGHT_FLASH_ADJ, + .flash_duration_ms = 600, + .led_count = 1, +}; + +static struct platform_device incrediblec_flashlight_device = { + .name = "flashlight", + .dev = { + .platform_data = &incrediblec_flashlight_data, + }, +}; + +static void curcial_oj_shutdown(int enable) +{ + uint8_t cmd[3]; + memset(cmd, 0, sizeof(uint8_t)*3); + + cmd[2] = 0x80; + if (enable) + microp_i2c_write(0x91, cmd, 3); + else + microp_i2c_write(0x90, cmd, 3); +} + +static int curcial_oj_poweron(int on) +{ + struct vreg *oj_power = vreg_get(0, "synt"); + if (IS_ERR(oj_power)) { + printk(KERN_ERR "%s: Error power domain\n", __func__); + return 0; + } + + if (on) { + vreg_set_level(oj_power, 2750); + vreg_enable(oj_power); + } else + vreg_disable(oj_power); + + printk(KERN_INFO "%s: OJ power enable(%d)\n", __func__, on); + return 1; +}; +static void curcial_oj_adjust_xy(uint8_t *data, int16_t *mSumDeltaX, int16_t *mSumDeltaY) +{ + int8_t deltaX; + int8_t deltaY; + + + if (data[2] == 0x80) + data[2] = 0x81; + if (data[1] == 0x80) + data[1] = 0x81; + if (0) { + deltaX = (1)*((int8_t) data[2]); /*X=2*/ + deltaY = (1)*((int8_t) data[1]); /*Y=1*/ + } else { + deltaX = (1)*((int8_t) data[1]); + deltaY = (1)*((int8_t) data[2]); + } + *mSumDeltaX += -((int16_t)deltaX); + *mSumDeltaY += -((int16_t)deltaY); +} +static struct curcial_oj_platform_data incrediblec_oj_data = { + .oj_poweron = curcial_oj_poweron, + .oj_shutdown = curcial_oj_shutdown, + .oj_adjust_xy = curcial_oj_adjust_xy, + .microp_version = INCREDIBLEC_MICROP_VER, + .debugflag = 0, + .mdelay_time = 0, + .normal_th = 8, + .xy_ratio = 15, + .interval = 20, + .swap = true, + .ap_code = false, + .x = 1, + .y = 1, + .share_power = true, + .Xsteps = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + .Ysteps = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + .sht_tbl = {0, 2000, 2250, 2500, 2750, 3000}, + .pxsum_tbl = {0, 0, 40, 50, 60, 70}, + .degree = 6, + .irq = MSM_uP_TO_INT(12), +}; + +static struct platform_device incrediblec_oj = { + .name = CURCIAL_OJ_NAME, + .id = -1, + .dev = { + .platform_data = &incrediblec_oj_data, + } +}; + +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = -1, + .inject_rx_on_wakeup = 0, + .exit_lpm_cb = bcm_bt_lpm_exit_lpm_locked, +}; + +static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = { + .gpio_wake = INCREDIBLEC_GPIO_BT_CHIP_WAKE, + .gpio_host_wake = INCREDIBLEC_GPIO_BT_HOST_WAKE, + .request_clock_off_locked = msm_hs_request_clock_off_locked, + .request_clock_on_locked = msm_hs_request_clock_on_locked, +}; + +struct platform_device bcm_bt_lpm_device = { + .name = "bcm_bt_lpm", + .id = 0, + .dev = { + .platform_data = &bcm_bt_lpm_pdata, + }, +}; + +static struct platform_device *devices[] __initdata = { + &msm_device_uart1, + &bcm_bt_lpm_device, + &msm_device_uart_dm1, + &htc_battery_pdev, + &htc_headset_mgr, + &htc_headset_gpio, + &ram_console_device, + &incrediblec_rfkill, + &msm_device_smd, + &msm_device_nand, + &msm_device_hsusb, + &usb_mass_storage_device, +#ifdef CONFIG_USB_ANDROID_RNDIS + &rndis_device, +#endif + &android_usb_device, +#ifndef CONFIG_ION_MSM + &android_pmem_mdp_device, +#else + &ion_dev, +#endif + &android_pmem_adsp_device, +#ifdef CONFIG_720P_CAMERA + &android_pmem_venc_device, +#else + &android_pmem_camera_device, +#endif + &msm_camera_sensor_ov8810, + &msm_kgsl_3d0, + &msm_device_i2c, + &incrediblec_flashlight_device, + &incrediblec_leds, + +#if defined(CONFIG_SPI_QSD) + &msm_device_spi, +#endif + &incrediblec_oj, +}; + +static uint32_t bt_gpio_table_rev_CX[] = { + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_UART1_RTS, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_UART1_CTS, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_UART1_RX, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_UART1_TX, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_RESET_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_SHUTDOWN_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_CHIP_WAKE, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_BT_HOST_WAKE, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_4MA), +}; + + +static uint32_t usb_phy_3v3_table[] = { + PCOM_GPIO_CFG(INCREDIBLEC_USB_PHY_3V3_ENABLE, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA) +}; + +static uint32_t incrediblec_uart_gpio_table[] = { + /* RX */ + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_UART3_RX, 3, GPIO_INPUT, GPIO_NO_PULL, + GPIO_4MA), + /* TX */ + PCOM_GPIO_CFG(INCREDIBLEC_GPIO_UART3_TX, 3, GPIO_INPUT, GPIO_NO_PULL, + GPIO_4MA), +}; + +static void incrediblec_config_uart_gpios(void) +{ + config_gpio_table(incrediblec_uart_gpio_table, + ARRAY_SIZE(incrediblec_uart_gpio_table)); +} + + +#define ATAG_BDADDR 0x43294329 +#define ATAG_BDADDR_SIZE 4 +#define BDADDR_STR_SIZE 18 + +static char bdaddr[BDADDR_STR_SIZE]; + +module_param_string(bdaddr, bdaddr, sizeof(bdaddr), 0400); +MODULE_PARM_DESC(bdaddr, "bluetooth address"); + +static int __init parse_tag_bdaddr(const struct tag *tag) +{ + unsigned char *b = (unsigned char *)&tag->u; + + if (tag->hdr.size != ATAG_BDADDR_SIZE) + return -EINVAL; + + snprintf(bdaddr, BDADDR_STR_SIZE, "%02X:%02X:%02X:%02X:%02X:%02X", + b[0], b[1], b[2], b[3], b[4], b[5]); + + return 0; +} + +__tagtable(ATAG_BDADDR, parse_tag_bdaddr); + +static int __init incrediblec_board_serialno_setup(char *serialno) +{ +#ifdef CONFIG_USB_ANDROID_RNDIS + int i; + char *src = serialno; + + /* create a fake MAC address from our serial number. + * first byte is 0x02 to signify locally administered. + */ + rndis_pdata.ethaddr[0] = 0x02; + for (i = 0; *src; i++) { + /* XOR the USB serial across the remaining bytes */ + rndis_pdata.ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; + } +#endif + + android_usb_pdata.serial_number = serialno; + msm_hsusb_pdata.serial_number = serialno; + return 1; +} + +static struct msm_acpu_clock_platform_data incrediblec_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 245000, + .wait_for_irq_khz = 245000, +}; + +static unsigned incrediblec_perf_acpu_table[] = { + 245000000, + 576000000, + 998400000, +}; + +static struct perflock_platform_data incrediblec_perflock_data = { + .perf_acpu_table = incrediblec_perf_acpu_table, + .table_size = ARRAY_SIZE(incrediblec_perf_acpu_table), +}; + +int incrediblec_init_mmc(int sysrev); + +static int OJ_BMA_power(void) +{ + int ret; + struct vreg *vreg = vreg_get(0, "synt"); + + if (!vreg) { + printk(KERN_ERR "%s: vreg error\n", __func__); + return -EIO; + } + ret = vreg_set_level(vreg, 2850); + + ret = vreg_enable(vreg); + if (ret < 0) + printk(KERN_ERR "%s: vreg enable failed\n", __func__); + + return 0; +} + +unsigned int incrediblec_get_engineerid(void) +{ + return engineerid; +} + +static ssize_t incrediblec_virtual_keys_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + if (engineerid > 1 && system_rev > 1) { + /* center: x: home: 45, menu: 152, back: 318, search 422, y: 830 */ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":47:830:74:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":155:830:80:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":337:830:90:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":434:830:60:50" + "\n"); + } else { + /* center: x: home: 50, menu: 184, back: 315, search 435, y: 830*/ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":50:830:98:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":184:830:120:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":315:830:100:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":435:830:88:50" + "\n"); + } + +} + +static struct kobj_attribute incrediblec_virtual_keys_attr = { + .attr = { + .name = "virtualkeys.atmel-touchscreen", + .mode = S_IRUGO, + }, + .show = &incrediblec_virtual_keys_show, +}; + +static struct attribute *incrediblec_properties_attrs[] = { + &incrediblec_virtual_keys_attr.attr, + NULL +}; + +static struct attribute_group incrediblec_properties_attr_group = { + .attrs = incrediblec_properties_attrs, +}; + +static void incrediblec_reset(void) +{ + gpio_set_value(INCREDIBLEC_GPIO_PS_HOLD, 0); +} + +static void __init incrediblec_init(void) +{ + int ret; + struct kobject *properties_kobj; + + printk("incrediblec_init() revision=%d, engineerid=%d\n", system_rev, engineerid); + + msm_hw_reset_hook = incrediblec_reset; + + if (0 == engineerid || 0xF == engineerid) { +#ifdef CONFIG_ION_MSM + ion_pdata.heaps[1].base = MSM_PMEM_MDP_XA_BASE; +#else + mdp_pmem_pdata.start = MSM_PMEM_MDP_XA_BASE; +#endif + android_pmem_adsp_pdata.start = MSM_PMEM_ADSP_XA_BASE; + } else if (engineerid >= 3) { +#ifdef CONFIG_ION_MSM + ion_pdata.heaps[1].base = MSM_PMEM_MDP_BASE + MSM_MEM_128MB_OFFSET; +#else + mdp_pmem_pdata.start = MSM_PMEM_MDP_BASE + MSM_MEM_128MB_OFFSET; +#endif + android_pmem_adsp_pdata.start = MSM_PMEM_ADSP_BASE + MSM_MEM_128MB_OFFSET; + } + + incrediblec_board_serialno_setup(board_serialno()); + + OJ_BMA_power(); + + msm_acpu_clock_init(&incrediblec_clock_data); + + perflock_init(&incrediblec_perflock_data); + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + msm_serial_debug_init(MSM_UART1_PHYS, INT_UART1, + &msm_device_uart1.dev, 1, INT_UART1_RX); +#endif + + bcm_bt_lpm_pdata.gpio_wake = INCREDIBLEC_GPIO_BT_CHIP_WAKE; + config_gpio_table(bt_gpio_table_rev_CX, ARRAY_SIZE(bt_gpio_table_rev_CX)); + +#ifdef CONFIG_SPI_QSD + msm_device_spi.dev.platform_data = &incrediblec_spi_pdata; +#endif + + #ifdef CONFIG_SERIAL_MSM_HS + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; + msm_device_uart_dm1.name = "msm_serial_hs"; /* for bcm */ + #endif + + incrediblec_config_uart_gpios(); + config_gpio_table(usb_phy_3v3_table, ARRAY_SIZE(usb_phy_3v3_table)); + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); + /*gpio_direction_output(INCREDIBLEC_GPIO_TP_LS_EN, 0);*/ + gpio_direction_output(INCREDIBLEC_GPIO_TP_EN, 0); + + incrediblec_audio_init(); +#ifdef CONFIG_MICROP_COMMON + incrediblec_microp_init(); +#endif + + if (system_rev >= 2) { + microp_data.num_functions = ARRAY_SIZE(microp_functions_1); + microp_data.microp_function = microp_functions_1; + } + + platform_add_devices(devices, ARRAY_SIZE(devices)); + + platform_add_devices(msm_footswitch_devices, + msm_num_footswitch_devices); + + if (!opt_usb_h2w_sw) { + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + } + if (system_rev > 2) { + incrediblec_atmel_ts_data[0].config_T9[7] = 33; + incrediblec_atmel_ts_data[0].object_crc[0] = 0x2E; + incrediblec_atmel_ts_data[0].object_crc[1] = 0x80; + incrediblec_atmel_ts_data[0].object_crc[2] = 0xE0; + } + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); + + ret = incrediblec_init_mmc(system_rev); + if (ret != 0) + pr_crit("%s: Unable to initialize MMC\n", __func__); + + properties_kobj = kobject_create_and_add("board_properties", NULL); + if (properties_kobj) + ret = sysfs_create_group(properties_kobj, + &incrediblec_properties_attr_group); + if (!properties_kobj || ret) + pr_err("failed to create board_properties\n"); + + msm_init_pmic_vibrator(); + +} + +static void __init incrediblec_fixup(struct machine_desc *desc, struct tag *tags, + char **cmdline, struct meminfo *mi) +{ + engineerid = parse_tag_engineerid(tags); + mi->nr_banks = 1; + mi->bank[0].start = PHYS_OFFSET; + if (0 == engineerid || 0xF == engineerid) + mi->bank[0].size = (MSM_LINUX_XA_SIZE); + else if (engineerid <= 2) { /* 4G3G */ + mi->bank[0].size = MSM_EBI1_BANK0_SIZE; + mi->nr_banks++; + mi->bank[1].start = MSM_EBI1_BANK1_BASE; + mi->bank[1].size = MSM_EBI1_BANK1_SIZE; + } else { + mi->bank[0].size = MSM_EBI1_BANK0_SIZE; + mi->nr_banks++; + mi->bank[1].start = MSM_EBI1_BANK1_BASE; + mi->bank[1].size = MSM_EBI1_BANK1_SIZE + MSM_MEM_128MB_OFFSET; + } +} + +static void __init incrediblec_map_io(void) +{ + msm_map_qsd8x50_io(); + msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); + if (socinfo_init() < 0) + printk(KERN_ERR "%s: socinfo_init() failed!\n",__func__); +} + +extern struct sys_timer msm_timer; + +MACHINE_START(INCREDIBLEC, "incrediblec") +#ifdef CONFIG_MSM_DEBUG_UART + .phys_io = MSM_DEBUG_UART_PHYS, + .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc, +#endif + .boot_params = 0x20000100, + .fixup = incrediblec_fixup, + .map_io = incrediblec_map_io, + .init_irq = msm_init_irq, + .init_machine = incrediblec_init, + .timer = &msm_timer, +MACHINE_END diff --git a/arch/arm/mach-msm/board-incrediblec.h b/arch/arm/mach-msm/board-incrediblec.h new file mode 100644 index 0000000000000..14fb432ad8ecf --- /dev/null +++ b/arch/arm/mach-msm/board-incrediblec.h @@ -0,0 +1,197 @@ +/* arch/arm/mach-msm/board-incrediblec.h + * + * Copyright (C) 2009 HTC Corporation. + * Author: Haley Teng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_INCREDIBLEC_H +#define __ARCH_ARM_MACH_MSM_BOARD_INCREDIBLEC_H + +#include + + +#define MSM_SMI_BASE 0x02B00000 +#define MSM_SMI_SIZE 0x01500000 + +#ifdef CONFIG_720P_CAMERA +#define MSM_PMEM_VENC_BASE 0x02B00000 +#define MSM_PMEM_VENC_SIZE 0x00800000 +/* rest 4MB SMI */ +#else +#define MSM_PMEM_CAMERA_BASE 0x02B00000 +#define MSM_PMEM_CAMERA_SIZE 0x00C00000 +#endif + +#define MSM_GPU_MEM_BASE 0x03700000 +#define MSM_GPU_MEM_SIZE 0x00300000 + +#define MSM_RAM_CONSOLE_BASE 0x03A00000 +#define MSM_RAM_CONSOLE_SIZE 0x00040000 + +#define MSM_FB_BASE 0x03B00000 +#define MSM_FB_SIZE 0x00300000 + +#define MSM_EBI1_BANK0_BASE 0x20000000 +#define MSM_EBI1_BANK0_SIZE 0x0E800000 + +#define MSM_EBI1_BANK1_BASE 0x30000000 +#define MSM_EBI1_BANK1_SIZE 0x03E00000 + +#define MSM_PMEM_MDP_BASE 0x33E00000 +#define MSM_PMEM_MDP_SIZE 0x02000000 + +#define MSM_PMEM_ADSP_BASE 0x35E00000 +#define MSM_PMEM_ADSP_SIZE 0x02000000 + +#define MSM_TV_FB_BASE 0x37E00000 +#define MSM_TV_FB_SIZE 0x00200000 + +#define MSM_MEM_128MB_OFFSET 0x08000000 + +/* 4G2G MCP */ +#define MSM_PMEM_ADSP_XA_BASE 0x29000000 + +#define MSM_TV_FB_XA_BASE 0x2B900000 + +#define MSM_PMEM_MDP_XA_BASE 0x2BB00000 + +#define MSM_GPU_MEM_XA_BASE 0x03700000 + +#define MSM_LINUX_XA_SIZE 0x09000000 +/* 4G2G END */ + +#define INCREDIBLEC_GPIO_UP_INT_N 35 +#define INCREDIBLEC_GPIO_UP_RESET_N 108 + +#define INCREDIBLEC_GPIO_TP_RST 34 +#define INCREDIBLEC_GPIO_TP_INT_N 145 +/*#define INCREDIBLEC_GPIO_TP_LS_EN 93*/ +#define INCREDIBLEC_GPIO_TP_EN 98 + +#define INCREDIBLEC_GPIO_SDMC_CD_N 28 + +/* BT */ +#define INCREDIBLEC_GPIO_BT_UART1_RTS (43) +#define INCREDIBLEC_GPIO_BT_UART1_CTS (44) +#define INCREDIBLEC_GPIO_BT_UART1_RX (45) +#define INCREDIBLEC_GPIO_BT_UART1_TX (46) +#define INCREDIBLEC_GPIO_BT_RESET_N (146) +#define INCREDIBLEC_GPIO_BT_HOST_WAKE (86) +#define INCREDIBLEC_GPIO_BT_CHIP_WAKE (87) +#define INCREDIBLEC_GPIO_BT_SHUTDOWN_N (128) + +#define INCREDIBLEC_GPIO_COMPASS_RST_N 107 +#define INCREDIBLEC_GPIO_COMPASS_INT_N 36 +#define INCREDIBLEC_PROJECT_NAME "incrediblec" +#define INCREDIBLEC_LAYOUTS { \ + { { 0, 1, 0}, { -1, 0, 0}, {0, 0, 1} }, \ + { { 0, -1, 0}, { -1, 0, 0}, {0, 0, 1} }, \ + { { -1, 0, 0}, { 0, -1, 0}, {0, 0, 1} }, \ + { { 1, 0, 0}, { 0, 0, 1}, {0, 1, 0} } \ + } + +/* Proximity */ +#define INCREDIBLEC_GPIO_PROXIMITY_EN_N 120 + +/* Battery */ +#define INCREDIBLEC_GPIO_MBAT_IN 39 +#define INCREDIBLEC_GPIO_MCHG_EN_N 22 +#define INCREDIBLEC_GPIO_ISET 16 + +/*Audio */ +#define INCREDIBLEC_AUD_JACKHP_EN 157 +#define INCREDIBLEC_AUD_2V5_EN 26 + +/* Bluetooth PCM */ +#define INCREDIBLEC_BT_PCM_OUT 68 +#define INCREDIBLEC_BT_PCM_IN 69 +#define INCREDIBLEC_BT_PCM_SYNC 70 +#define INCREDIBLEC_BT_PCM_CLK 71 + +#define INCREDIBLEC_GPIO_MENU_KEY 40 +#define INCREDIBLEC_GPIO_VOLUME_UP 41 +#define INCREDIBLEC_GPIO_VOLUME_DOWN 42 +#define INCREDIBLEC_GPIO_POWER_KEY 94 +#define INCREDIBLEC_GPIO_OJ_ACTION_XB 33 + +/* flash light */ +#define INCREDIBLEC_GPIO_FLASHLIGHT_FLASH (84) +#define INCREDIBLEC_GPIO_FLASHLIGHT_TORCH (85) +#define INCREDIBLEC_GPIO_FLASHLIGHT_FLASH_ADJ (31) + +/* 35mm headset */ +#define INCREDIBLEC_GPIO_35MM_HEADSET_DET (153) +#define INCREDIBLEC_GPIO_CABLE_IN1 (38) +#define INCREDIBLEC_GPIO_CABLE_IN2 (37) +#define INCREDIBLEC_GPIO_H2W_DATA (139) +#define INCREDIBLEC_GPIO_H2W_CLK (140) +#define INCREDIBLEC_GPIO_UART3_RX (139) +#define INCREDIBLEC_GPIO_UART3_TX (140) + +/* Wifi */ +#define INCREDIBLEC_GPIO_WIFI_SHUTDOWN_N 127 +#define INCREDIBLEC_GPIO_WIFI_IRQ 152 + +/* SPI */ +#define INCREDIBLEC_SPI_CLK (17) +#define INCREDIBLEC_SPI_DO (18) +#define INCREDIBLEC_SPI_CS (20) + +#define INCREDIBLEC_LCD_RST_ID1 (29) +#define INCREDIBLEC_LCD_ID0 (32) + +/* TV-out */ +#define INCREDIBLEC_TV_LOAD_DET (82) +#define INCREDIBLEC_VIDEO_SHDN_N (109) +#define INCREDIBLEC_AV_SWITCH (119) + +/* LCD */ +#define INCREDIBLEC_LCD_R0 (113) +#define INCREDIBLEC_LCD_R1 (114) +#define INCREDIBLEC_LCD_R2 (115) +#define INCREDIBLEC_LCD_R3 (116) +#define INCREDIBLEC_LCD_R4 (117) +#define INCREDIBLEC_LCD_R5 (118) +#define INCREDIBLEC_LCD_G0 (121) +#define INCREDIBLEC_LCD_G1 (122) +#define INCREDIBLEC_LCD_G2 (123) +#define INCREDIBLEC_LCD_G3 (124) +#define INCREDIBLEC_LCD_G4 (125) +#define INCREDIBLEC_LCD_G5 (126) +#define INCREDIBLEC_LCD_B0 (129) +#define INCREDIBLEC_LCD_B1 (130) +#define INCREDIBLEC_LCD_B2 (131) +#define INCREDIBLEC_LCD_B3 (132) +#define INCREDIBLEC_LCD_B4 (133) +#define INCREDIBLEC_LCD_B5 (134) +#define INCREDIBLEC_LCD_PCLK (135) +#define INCREDIBLEC_LCD_VSYNC (136) +#define INCREDIBLEC_LCD_HSYNC (137) +#define INCREDIBLEC_LCD_DE (138) + +/* USB PHY 3V3 enable*/ +#define INCREDIBLEC_USB_PHY_3V3_ENABLE (104) +#define INCREDIBLEC_GPIO_USB_CABLE_IN_PIN (144) +#define INCREDIBLEC_GPIO_USB_ID_PIN (112) + +/* AP Key Led turn on*/ +#define INCREDIBLEC_AP_KEY_LED_EN (143) + +/*Camera*/ +#define INCREDIBLEC_CAM_PWD (100) +#define INCREDIBLEC_CAM_RST (99) + +#define INCREDIBLEC_GPIO_PS_HOLD (25) + +unsigned int incrediblec_get_engineerid(void); + +#endif /* __ARCH_ARM_MACH_MSM_BOARD_INCREDIBLEC_H */ diff --git a/arch/arm/mach-msm/board-mahimahi-audio.c b/arch/arm/mach-msm/board-mahimahi-audio.c new file mode 100644 index 0000000000000..94d70f507bc70 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-audio.c @@ -0,0 +1,273 @@ +/* arch/arm/mach-msm/board-mahimahi-audio.c + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "board-mahimahi.h" +#include "proc_comm.h" +#include "pmic.h" +#include "board-mahimahi-tpa2018d1.h" + +#if 0 +#define D(fmt, args...) printk(KERN_INFO "Audio: "fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +static struct mutex mic_lock; +static struct mutex bt_sco_lock; + +static struct q6_hw_info q6_audio_hw[Q6_HW_COUNT] = { + [Q6_HW_HANDSET] = { + .min_gain = -1500, + .max_gain = 1199, + }, + [Q6_HW_HEADSET] = { + .min_gain = -2000, + .max_gain = 1199, + }, + [Q6_HW_SPEAKER] = { + .min_gain = -1100, + .max_gain = 400, + }, + [Q6_HW_TTY] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_SCO] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_A2DP] = { + .min_gain = -1600, + .max_gain = 400, + }, +}; + +void mahimahi_headset_enable(int en) +{ + D("%s %d\n", __func__, en); + /* enable audio amp */ + if (en) mdelay(15); + gpio_set_value(MAHIMAHI_AUD_JACKHP_EN, !!en); +} + +void mahimahi_speaker_enable(int en) +{ + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 0; + scm.is_left_chan_en = 1; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(LEFT_SPKR, 1); + pmic_spkr_en(RIGHT_SPKR, 0); + + /* unmute */ + pmic_spkr_en_mute(LEFT_SPKR, 1); + } else { + pmic_spkr_en_mute(LEFT_SPKR, 0); + + pmic_spkr_en(LEFT_SPKR, 0); + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + + if (is_cdma_version(system_rev)) + tpa2018d1_set_speaker_amp(en); +} + +void mahimahi_receiver_enable(int en) +{ + if (is_cdma_version(system_rev) && + ((system_rev == 0xC1) || (system_rev == 0xC2))) { + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 1; + scm.is_left_chan_en = 0; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(RIGHT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(RIGHT_SPKR, 1); + } else { + pmic_spkr_en_mute(RIGHT_SPKR, 0); + + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + } +} + +static uint32_t bt_sco_enable[] = { + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_OUT, 1, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_IN, 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_SYNC, 2, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_CLK, 2, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static uint32_t bt_sco_disable[] = { + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_OUT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_IN, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_SYNC, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_BT_PCM_CLK, 0, GPIO_INPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +void mahimahi_bt_sco_enable(int en) +{ + static int bt_sco_refcount; + D("%s %d\n", __func__, en); + + mutex_lock(&bt_sco_lock); + if (en) { + if (++bt_sco_refcount == 1) + config_gpio_table(bt_sco_enable, + ARRAY_SIZE(bt_sco_enable)); + } else { + if (--bt_sco_refcount == 0) { + config_gpio_table(bt_sco_disable, + ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(MAHIMAHI_BT_PCM_OUT, 0); + } + } + mutex_unlock(&bt_sco_lock); +} + +void mahimahi_mic_enable(int en) +{ + static int old_state = 0, new_state = 0; + + D("%s %d\n", __func__, en); + + mutex_lock(&mic_lock); + if (!!en) + new_state++; + else + new_state--; + + if (new_state == 1 && old_state == 0) { + gpio_set_value(MAHIMAHI_AUD_2V5_EN, 1); + mdelay(60); + } else if (new_state == 0 && old_state == 1) + gpio_set_value(MAHIMAHI_AUD_2V5_EN, 0); + else + D("%s: do nothing %d %d\n", __func__, old_state, new_state); + + old_state = new_state; + mutex_unlock(&mic_lock); +} + +void mahimahi_analog_init(void) +{ + D("%s\n", __func__); + /* stereo pmic init */ + pmic_spkr_set_gain(LEFT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_set_gain(RIGHT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_en_right_chan(OFF_CMD); + pmic_spkr_en_left_chan(OFF_CMD); + pmic_spkr_add_right_left_chan(OFF_CMD); + pmic_spkr_en_stereo(OFF_CMD); + pmic_spkr_select_usb_with_hpf_20hz(OFF_CMD); + pmic_spkr_bypass_mux(OFF_CMD); + pmic_spkr_en_hpf(ON_CMD); + pmic_spkr_en_sink_curr_from_ref_volt_cir(OFF_CMD); + pmic_spkr_set_mux_hpf_corner_freq(SPKR_FREQ_0_73KHZ); + pmic_mic_set_volt(MIC_VOLT_1_80V); + + gpio_request(MAHIMAHI_AUD_JACKHP_EN, "aud_jackhp_en"); + gpio_request(MAHIMAHI_BT_PCM_OUT, "bt_pcm_out"); + + gpio_direction_output(MAHIMAHI_AUD_JACKHP_EN, 0); + + mutex_lock(&bt_sco_lock); + config_gpio_table(bt_sco_disable, + ARRAY_SIZE(bt_sco_disable)); + gpio_direction_output(MAHIMAHI_BT_PCM_OUT, 0); + mutex_unlock(&bt_sco_lock); +} + +int mahimahi_get_rx_vol(uint8_t hw, int level) +{ + int vol; + + if (level > 100) + level = 100; + else if (level < 0) + level = 0; + + if (is_cdma_version(system_rev) && hw == Q6_HW_HANDSET) { + int handset_volume[6] = { -1600, -1300, -1000, -600, -300, 0 }; + vol = handset_volume[5 * level / 100]; + } else { + struct q6_hw_info *info; + info = &q6_audio_hw[hw]; + vol = info->min_gain + ((info->max_gain - info->min_gain) * level) / 100; + } + + D("%s %d\n", __func__, vol); + return vol; +} + +static struct qsd_acoustic_ops acoustic = { + .enable_mic_bias = mahimahi_mic_enable, +}; + +static struct q6audio_analog_ops ops = { + .init = mahimahi_analog_init, + .speaker_enable = mahimahi_speaker_enable, + .headset_enable = mahimahi_headset_enable, + .receiver_enable = mahimahi_receiver_enable, + .bt_sco_enable = mahimahi_bt_sco_enable, + .int_mic_enable = mahimahi_mic_enable, + .ext_mic_enable = mahimahi_mic_enable, + .get_rx_vol = mahimahi_get_rx_vol, +}; + +void __init mahimahi_audio_init(void) +{ + mutex_init(&mic_lock); + mutex_init(&bt_sco_lock); + q6audio_register_analog_ops(&ops); + acoustic_register_ops(&acoustic); + if (is_cdma_version(system_rev) && + ((system_rev == 0xC1) || (system_rev == 0xC2))) + q6audio_set_acdb_file("default_PMIC.acdb"); +} diff --git a/arch/arm/mach-msm/board-mahimahi-keypad.c b/arch/arm/mach-msm/board-mahimahi-keypad.c new file mode 100644 index 0000000000000..ab38847d15e21 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-keypad.c @@ -0,0 +1,265 @@ +/* arch/arm/mach-msm/board-mahimahi-keypad.c + * + * Copyright (C) 2009 Google, Inc + * Copyright (C) 2009 HTC Corporation. + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "board-mahimahi.h" + +struct jog_axis_info { + struct gpio_event_axis_info info; + uint16_t in_state; + uint16_t out_state; +}; + +static struct vreg *jog_vreg; +static bool jog_just_on; +static unsigned long jog_on_jiffies; + +static unsigned int mahimahi_col_gpios[] = { 33, 32, 31 }; +static unsigned int mahimahi_row_gpios[] = { 42, 41, 40 }; + +#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(mahimahi_row_gpios) + (row)) +#define KEYMAP_SIZE (ARRAY_SIZE(mahimahi_col_gpios) * \ + ARRAY_SIZE(mahimahi_row_gpios)) + +/* keypad */ +static const unsigned short mahimahi_keymap[KEYMAP_SIZE] = { + [KEYMAP_INDEX(0, 0)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(0, 1)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(1, 1)] = MATRIX_KEY(1, BTN_MOUSE), +}; + +static const unsigned short mahimahi_cdma_keymap[KEYMAP_SIZE] = { + [KEYMAP_INDEX(0, 0)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(0, 1)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(1, 1)] = MATRIX_KEY(1, BTN_MOUSE), + + /* Key (2, 2) is not a physical key on mahimahi. The purpose of + * registering the unused matrix key as a dummy key is to make + * userland able to send/receive the key event for some requested tests + * in lab. of some CDMA carriers (e.g. Verizon). + */ + [KEYMAP_INDEX(2, 2)] = KEY_END, +}; + +static struct gpio_event_matrix_info mahimahi_keypad_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = mahimahi_keymap, + .output_gpios = mahimahi_col_gpios, + .input_gpios = mahimahi_row_gpios, + .noutputs = ARRAY_SIZE(mahimahi_col_gpios), + .ninputs = ARRAY_SIZE(mahimahi_row_gpios), + .settle_time.tv.nsec = 40 * NSEC_PER_USEC, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .flags = (GPIOKPF_LEVEL_TRIGGERED_IRQ | + GPIOKPF_REMOVE_PHANTOM_KEYS | + GPIOKPF_PRINT_UNMAPPED_KEYS), +}; + +static struct gpio_event_direct_entry mahimahi_keypad_key_map[] = { + { + .gpio = MAHIMAHI_GPIO_POWER_KEY, + .code = KEY_POWER, + }, +}; + +static struct gpio_event_input_info mahimahi_keypad_key_info = { + .info.func = gpio_event_input_func, + .info.no_suspend = true, + .debounce_time.tv.nsec = 5 * NSEC_PER_MSEC, + .flags = 0, + .type = EV_KEY, + .keymap = mahimahi_keypad_key_map, + .keymap_size = ARRAY_SIZE(mahimahi_keypad_key_map) +}; + +/* jogball */ +static uint16_t jogball_axis_map(struct gpio_event_axis_info *info, uint16_t in) +{ + struct jog_axis_info *ai = + container_of(info, struct jog_axis_info, info); + uint16_t out = ai->out_state; + + if (jog_just_on) { + if (jiffies == jog_on_jiffies || jiffies == jog_on_jiffies + 1) + goto ignore; + jog_just_on = 0; + } + if((ai->in_state ^ in) & 1) + out--; + if((ai->in_state ^ in) & 2) + out++; + ai->out_state = out; +ignore: + ai->in_state = in; + return out; +} + +static int jogball_power(const struct gpio_event_platform_data *pdata, bool on) +{ + if (on) { + vreg_enable(jog_vreg); + jog_just_on = 1; + jog_on_jiffies = jiffies; + } else { + vreg_disable(jog_vreg); + } + + return 0; +} + +static int jogball_power_cdma(const struct gpio_event_platform_data *pdata, bool on) +{ + if (on) { + gpio_set_value(MAHIMAHI_CDMA_JOG_2V6_EN, 1); + jog_just_on = 1; + jog_on_jiffies = jiffies; + } else { + gpio_set_value(MAHIMAHI_CDMA_JOG_2V6_EN, 0); + } + + return 0; +} + +static uint32_t jogball_x_gpios[] = { + MAHIMAHI_GPIO_BALL_LEFT, MAHIMAHI_GPIO_BALL_RIGHT, +}; +static uint32_t jogball_y_gpios[] = { + MAHIMAHI_GPIO_BALL_UP, MAHIMAHI_GPIO_BALL_DOWN, +}; + +static struct jog_axis_info jogball_x_axis = { + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(jogball_x_gpios), + .dev = 1, + .type = EV_REL, + .code = REL_X, + .decoded_size = 1U << ARRAY_SIZE(jogball_x_gpios), + .map = jogball_axis_map, + .gpio = jogball_x_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION, + } +}; + +static struct jog_axis_info jogball_y_axis = { + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(jogball_y_gpios), + .dev = 1, + .type = EV_REL, + .code = REL_Y, + .decoded_size = 1U << ARRAY_SIZE(jogball_y_gpios), + .map = jogball_axis_map, + .gpio = jogball_y_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION, + } +}; + +static struct gpio_event_info *mahimahi_input_info[] = { + &mahimahi_keypad_matrix_info.info, + &mahimahi_keypad_key_info.info, + &jogball_x_axis.info.info, + &jogball_y_axis.info.info, +}; + +static struct gpio_event_platform_data mahimahi_input_data = { + .names = { + "mahimahi-keypad", + "mahimahi-nav", + NULL, + }, + .info = mahimahi_input_info, + .info_count = ARRAY_SIZE(mahimahi_input_info), + .power = jogball_power, +}; + +static struct platform_device mahimahi_input_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &mahimahi_input_data, + }, +}; + +static int mahimahi_reset_keys_up[] = { + KEY_VOLUMEUP, + 0, +}; + +static struct keyreset_platform_data mahimahi_reset_keys_pdata = { + .keys_up = mahimahi_reset_keys_up, + .keys_down = { + KEY_POWER, + KEY_VOLUMEDOWN, + BTN_MOUSE, + 0 + }, +}; + +struct platform_device mahimahi_reset_keys_device = { + .name = KEYRESET_NAME, + .dev = { + .platform_data = &mahimahi_reset_keys_pdata, + }, +}; + + +static int __init mahimahi_init_keypad_jogball(void) +{ + int ret; + + if (!machine_is_mahimahi()) + return 0; + + ret = platform_device_register(&mahimahi_reset_keys_device); + if (ret != 0) + return ret; + + if (is_cdma_version(system_rev)) { + mahimahi_keypad_matrix_info.keymap = mahimahi_cdma_keymap; + /* In the CDMA version, jogball power is supplied by a gpio. */ + ret = gpio_request(MAHIMAHI_CDMA_JOG_2V6_EN, "jog_en"); + if (ret < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + MAHIMAHI_CDMA_JOG_2V6_EN, ret); + return ret; + } + mahimahi_input_data.power = jogball_power_cdma; + } else { + /* in UMTS version, jogball power is supplied by pmic */ + jog_vreg = vreg_get(&mahimahi_input_device.dev, "gp2"); + if (jog_vreg == NULL) + return -ENOENT; + } + + ret = platform_device_register(&mahimahi_input_device); + if (ret != 0) + return ret; + + return 0; +} + +device_initcall(mahimahi_init_keypad_jogball); diff --git a/arch/arm/mach-msm/board-mahimahi-microp.c b/arch/arm/mach-msm/board-mahimahi-microp.c new file mode 100644 index 0000000000000..8161517089c9a --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-microp.c @@ -0,0 +1,2273 @@ +/* board-mahimahi-microp.c + * Copyright (C) 2009 Google. + * Copyright (C) 2009 HTC Corporation. + * + * The Microp on mahimahi is an i2c device that supports + * the following functions + * - LEDs (Green, Amber, Jogball backlight) + * - Lightsensor + * - Headset remotekeys + * - G-sensor + * - Interrupts + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-mahimahi.h" + + +#define MICROP_I2C_NAME "mahimahi-microp" + +#define MICROP_LSENSOR_ADC_CHAN 6 +#define MICROP_REMOTE_KEY_ADC_CHAN 7 + +#define MICROP_I2C_WCMD_MISC 0x20 +#define MICROP_I2C_WCMD_SPI_EN 0x21 +#define MICROP_I2C_WCMD_AUTO_BL_CTL 0x23 +#define MICROP_I2C_RCMD_SPI_BL_STATUS 0x24 +#define MICROP_I2C_WCMD_BUTTONS_LED_CTRL 0x25 +#define MICROP_I2C_RCMD_VERSION 0x30 +#define MICROP_I2C_WCMD_ADC_TABLE 0x42 +#define MICROP_I2C_WCMD_LED_MODE 0x53 +#define MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME 0x54 +#define MICROP_I2C_RCMD_AMBER_RED_LED_REMAIN_TIME 0x55 +#define MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME 0x57 +#define MICROP_I2C_WCMD_JOGBALL_LED_MODE 0x5A +#define MICROP_I2C_RCMD_JOGBALL_LED_REMAIN_TIME 0x5B +#define MICROP_I2C_WCMD_JOGBALL_LED_PWM_SET 0x5C +#define MICROP_I2C_WCMD_JOGBALL_LED_PERIOD_SET 0x5D +#define MICROP_I2C_WCMD_READ_ADC_VALUE_REQ 0x60 +#define MICROP_I2C_RCMD_ADC_VALUE 0x62 +#define MICROP_I2C_WCMD_REMOTEKEY_TABLE 0x63 +#define MICROP_I2C_WCMD_LCM_REGISTER 0x70 +#define MICROP_I2C_WCMD_GSENSOR_REG 0x73 +#define MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ 0x74 +#define MICROP_I2C_RCMD_GSENSOR_REG_DATA 0x75 +#define MICROP_I2C_WCMD_GSENSOR_DATA_REQ 0x76 +#define MICROP_I2C_RCMD_GSENSOR_X_DATA 0x77 +#define MICROP_I2C_RCMD_GSENSOR_Y_DATA 0x78 +#define MICROP_I2C_RCMD_GSENSOR_Z_DATA 0x79 +#define MICROP_I2C_RCMD_GSENSOR_DATA 0x7A +#define MICROP_I2C_WCMD_OJ_REG 0x7B +#define MICROP_I2C_WCMD_OJ_REG_DATA_REQ 0x7C +#define MICROP_I2C_RCMD_OJ_REG_DATA 0x7D +#define MICROP_I2C_WCMD_OJ_POS_DATA_REQ 0x7E +#define MICROP_I2C_RCMD_OJ_POS_DATA 0x7F +#define MICROP_I2C_WCMD_GPI_INT_CTL_EN 0x80 +#define MICROP_I2C_WCMD_GPI_INT_CTL_DIS 0x81 +#define MICROP_I2C_RCMD_GPI_INT_STATUS 0x82 +#define MICROP_I2C_RCMD_GPI_STATUS 0x83 +#define MICROP_I2C_WCMD_GPI_INT_STATUS_CLR 0x84 +#define MICROP_I2C_RCMD_GPI_INT_SETTING 0x85 +#define MICROP_I2C_RCMD_REMOTE_KEYCODE 0x87 +#define MICROP_I2C_WCMD_REMOTE_KEY_DEBN_TIME 0x88 +#define MICROP_I2C_WCMD_REMOTE_PLUG_DEBN_TIME 0x89 +#define MICROP_I2C_WCMD_SIMCARD_DEBN_TIME 0x8A +#define MICROP_I2C_WCMD_GPO_LED_STATUS_EN 0x90 +#define MICROP_I2C_WCMD_GPO_LED_STATUS_DIS 0x91 + +#define IRQ_GSENSOR (1<<10) +#define IRQ_LSENSOR (1<<9) +#define IRQ_REMOTEKEY (1<<7) +#define IRQ_HEADSETIN (1<<2) +#define IRQ_SDCARD (1<<0) + +#define READ_GPI_STATE_HPIN (1<<2) +#define READ_GPI_STATE_SDCARD (1<<0) + +#define ALS_CALIBRATE_MODE 147 + +/* Check pattern, to check if ALS has been calibrated */ +#define ALS_CALIBRATED 0x6DA5 + +/* delay for deferred light sensor read */ +#define LS_READ_DELAY (HZ/2) + +/*#define DEBUG_BMA150 */ +#ifdef DEBUG_BMA150 +/* Debug logging of accelleration data */ +#define GSENSOR_LOG_MAX 2048 /* needs to be power of 2 */ +#define GSENSOR_LOG_MASK (GSENSOR_LOG_MAX - 1) + +struct gsensor_log { + ktime_t timestamp; + short x; + short y; + short z; +}; + +static DEFINE_MUTEX(gsensor_log_lock); +static struct gsensor_log gsensor_log[GSENSOR_LOG_MAX]; +static unsigned gsensor_log_head; +static unsigned gsensor_log_tail; + +void gsensor_log_status(ktime_t time, short x, short y, short z) +{ + unsigned n; + mutex_lock(&gsensor_log_lock); + n = gsensor_log_head; + gsensor_log[n].timestamp = time; + gsensor_log[n].x = x; + gsensor_log[n].y = y; + gsensor_log[n].z = z; + n = (n + 1) & GSENSOR_LOG_MASK; + if (n == gsensor_log_tail) + gsensor_log_tail = (gsensor_log_tail + 1) & GSENSOR_LOG_MASK; + gsensor_log_head = n; + mutex_unlock(&gsensor_log_lock); +} + +static int gsensor_log_print(struct seq_file *sf, void *private) +{ + unsigned n; + + mutex_lock(&gsensor_log_lock); + seq_printf(sf, "timestamp X Y Z\n"); + for (n = gsensor_log_tail; + n != gsensor_log_head; + n = (n + 1) & GSENSOR_LOG_MASK) { + seq_printf(sf, "%10d.%010d %6d %6d %6d\n", + gsensor_log[n].timestamp.tv.sec, + gsensor_log[n].timestamp.tv.nsec, + gsensor_log[n].x, gsensor_log[n].y, + gsensor_log[n].z); + } + mutex_unlock(&gsensor_log_lock); + return 0; +} + +static int gsensor_log_open(struct inode *inode, struct file *file) +{ + return single_open(file, gsensor_log_print, NULL); +} + +static struct file_operations gsensor_log_fops = { + .open = gsensor_log_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* def DEBUG_BMA150 */ + +static int microp_headset_has_mic(void); +static int microp_enable_headset_plug_event(void); +static int microp_enable_key_event(void); +static int microp_disable_key_event(void); + +static struct h35mm_platform_data mahimahi_h35mm_data = { + .plug_event_enable = microp_enable_headset_plug_event, + .headset_has_mic = microp_headset_has_mic, + .key_event_enable = microp_enable_key_event, + .key_event_disable = microp_disable_key_event, +}; + +static struct platform_device mahimahi_h35mm = { + .name = "htc_headset", + .id = -1, + .dev = { + .platform_data = &mahimahi_h35mm_data, + }, +}; + +enum led_type { + GREEN_LED, + AMBER_LED, + RED_LED, + BLUE_LED, + JOGBALL_LED, + BUTTONS_LED, + NUM_LEDS, +}; + +static uint16_t lsensor_adc_table[10] = { + 0x000, 0x001, 0x00F, 0x01E, 0x03C, 0x121, 0x190, 0x2BA, 0x35C, 0x3FF +}; + +static uint16_t remote_key_adc_table[6] = { + 0, 33, 43, 110, 129, 220 +}; + +static uint32_t golden_adc = 0xC0; +static uint32_t als_kadc; + +static struct wake_lock microp_i2c_wakelock; + +static struct i2c_client *private_microp_client; + +struct microp_int_pin { + uint16_t int_gsensor; + uint16_t int_lsensor; + uint16_t int_reset; + uint16_t int_simcard; + uint16_t int_hpin; + uint16_t int_remotekey; +}; + +struct microp_led_data { + int type; + struct led_classdev ldev; + struct mutex led_data_mutex; + struct work_struct brightness_work; + spinlock_t brightness_lock; + enum led_brightness brightness; + uint8_t mode; + uint8_t blink; +}; + +struct microp_i2c_work { + struct work_struct work; + struct i2c_client *client; + int (*intr_debounce)(uint8_t *pin_status); + void (*intr_function)(uint8_t *pin_status); +}; + +struct microp_i2c_client_data { + struct microp_led_data leds[NUM_LEDS]; + uint16_t version; + struct microp_i2c_work work; + struct delayed_work hpin_debounce_work; + struct delayed_work ls_read_work; + struct early_suspend early_suspend; + uint8_t enable_early_suspend; + uint8_t enable_reset_button; + int microp_is_suspend; + int auto_backlight_enabled; + uint8_t light_sensor_enabled; + uint8_t force_light_sensor_read; + uint8_t button_led_value; + int headset_is_in; + int is_hpin_pin_stable; + struct input_dev *ls_input_dev; + uint32_t als_kadc; + uint32_t als_gadc; + uint8_t als_calibrating; +}; + +static char *hex2string(uint8_t *data, int len) +{ + static char buf[101]; + int i; + + i = (sizeof(buf) - 1) / 4; + if (len > i) + len = i; + + for (i = 0; i < len; i++) + sprintf(buf + i * 4, "[%02X]", data[i]); + + return buf; +} + +#define I2C_READ_RETRY_TIMES 10 +#define I2C_WRITE_RETRY_TIMES 10 + +static int i2c_read_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + int ret; + struct i2c_msg msgs[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = data, + } + }; + + mdelay(1); + for (retry = 0; retry <= I2C_READ_RETRY_TIMES; retry++) { + ret = i2c_transfer(client->adapter, msgs, 2); + if (ret == 2) { + dev_dbg(&client->dev, "R [%02X] = %s\n", addr, + hex2string(data, length)); + return 0; + } + msleep(10); + } + + dev_err(&client->dev, "i2c_read_block retry over %d\n", + I2C_READ_RETRY_TIMES); + return -EIO; +} + +#define MICROP_I2C_WRITE_BLOCK_SIZE 21 +static int i2c_write_block(struct i2c_client *client, uint8_t addr, + uint8_t *data, int length) +{ + int retry; + uint8_t buf[MICROP_I2C_WRITE_BLOCK_SIZE]; + int ret; + + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length + 1, + .buf = buf, + } + }; + + dev_dbg(&client->dev, "W [%02X] = %s\n", addr, + hex2string(data, length)); + + if (length + 1 > MICROP_I2C_WRITE_BLOCK_SIZE) { + dev_err(&client->dev, "i2c_write_block length too long\n"); + return -E2BIG; + } + + buf[0] = addr; + memcpy((void *)&buf[1], (void *)data, length); + + mdelay(1); + for (retry = 0; retry <= I2C_WRITE_RETRY_TIMES; retry++) { + ret = i2c_transfer(client->adapter, msg, 1); + if (ret == 1) + return 0; + msleep(10); + } + dev_err(&client->dev, "i2c_write_block retry over %d\n", + I2C_WRITE_RETRY_TIMES); + return -EIO; +} + +static int microp_read_adc(uint8_t channel, uint16_t *value) +{ + struct i2c_client *client; + int ret; + uint8_t cmd[2], data[2]; + + client = private_microp_client; + cmd[0] = 0; + cmd[1] = channel; + ret = i2c_write_block(client, MICROP_I2C_WCMD_READ_ADC_VALUE_REQ, + cmd, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: request adc fail\n", __func__); + return -EIO; + } + + ret = i2c_read_block(client, MICROP_I2C_RCMD_ADC_VALUE, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read adc fail\n", __func__); + return -EIO; + } + *value = data[0] << 8 | data[1]; + return 0; +} + +static int microp_read_gpi_status(struct i2c_client *client, uint16_t *status) +{ + uint8_t data[2]; + int ret; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GPI_STATUS, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read failed\n", __func__); + return -EIO; + } + *status = (data[0] << 8) | data[1]; + return 0; +} + +static int microp_interrupt_enable(struct i2c_client *client, + uint16_t interrupt_mask) +{ + uint8_t data[2]; + int ret = -1; + + data[0] = interrupt_mask >> 8; + data[1] = interrupt_mask & 0xFF; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_CTL_EN, data, 2); + + if (ret < 0) + dev_err(&client->dev, "%s: enable 0x%x interrupt failed\n", + __func__, interrupt_mask); + return ret; +} + +static int microp_interrupt_disable(struct i2c_client *client, + uint16_t interrupt_mask) +{ + uint8_t data[2]; + int ret = -1; + + data[0] = interrupt_mask >> 8; + data[1] = interrupt_mask & 0xFF; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_CTL_DIS, data, 2); + + if (ret < 0) + dev_err(&client->dev, "%s: disable 0x%x interrupt failed\n", + __func__, interrupt_mask); + return ret; +} + + +/* + * SD slot card-detect support + */ +static unsigned int sdslot_cd = 0; +static void (*sdslot_status_cb)(int card_present, void *dev_id); +static void *sdslot_mmc_dev; + +int mahimahi_microp_sdslot_status_register( + void (*cb)(int card_present, void *dev_id), + void *dev_id) +{ + if (sdslot_status_cb) + return -EBUSY; + sdslot_status_cb = cb; + sdslot_mmc_dev = dev_id; + return 0; +} + +unsigned int mahimahi_microp_sdslot_status(struct device *dev) +{ + return sdslot_cd; +} + +static void mahimahi_microp_sdslot_update_status(int status) +{ + sdslot_cd = !(status & READ_GPI_STATE_SDCARD); + if (sdslot_status_cb) + sdslot_status_cb(sdslot_cd, sdslot_mmc_dev); +} + +/* + *Headset Support +*/ +static void hpin_debounce_do_work(struct work_struct *work) +{ + uint16_t gpi_status = 0; + struct microp_i2c_client_data *cdata; + int insert = 0; + struct i2c_client *client; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + microp_read_gpi_status(client, &gpi_status); + insert = (gpi_status & READ_GPI_STATE_HPIN) ? 0 : 1; + if (insert != cdata->headset_is_in) { + cdata->headset_is_in = insert; + pr_debug("headset %s\n", insert ? "inserted" : "removed"); + htc_35mm_jack_plug_event(cdata->headset_is_in, + &cdata->is_hpin_pin_stable); + } +} + +static int microp_enable_headset_plug_event(void) +{ + int ret; + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint16_t stat; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + /* enable microp interrupt to detect changes */ + ret = microp_interrupt_enable(client, IRQ_HEADSETIN); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable irqs\n", + __func__); + return 0; + } + /* see if headset state has changed */ + microp_read_gpi_status(client, &stat); + stat = !(stat & READ_GPI_STATE_HPIN); + if(cdata->headset_is_in != stat) { + cdata->headset_is_in = stat; + pr_debug("Headset state changed\n"); + htc_35mm_jack_plug_event(stat, &cdata->is_hpin_pin_stable); + } + + return 1; +} + +static int microp_headset_detect_mic(void) +{ + uint16_t data; + + microp_read_adc(MICROP_REMOTE_KEY_ADC_CHAN, &data); + if (data >= 200) + return 1; + else + return 0; +} + +static int microp_headset_has_mic(void) +{ + int mic1 = -1; + int mic2 = -1; + int count = 0; + + mic2 = microp_headset_detect_mic(); + + /* debounce the detection wait until 2 consecutive read are equal */ + while ((mic1 != mic2) && (count < 10)) { + mic1 = mic2; + msleep(600); + mic2 = microp_headset_detect_mic(); + count++; + } + + pr_info("%s: microphone (%d) %s\n", __func__, count, + mic1 ? "present" : "not present"); + + return mic1; +} + +static int microp_enable_key_event(void) +{ + int ret; + struct i2c_client *client; + + client = private_microp_client; + + if (!is_cdma_version(system_rev)) + gpio_set_value(MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN, 1); + + /* turn on key interrupt */ + /* enable microp interrupt to detect changes */ + ret = microp_interrupt_enable(client, IRQ_REMOTEKEY); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable irqs\n", + __func__); + return ret; + } + return 0; +} + +static int microp_disable_key_event(void) +{ + int ret; + struct i2c_client *client; + + client = private_microp_client; + + /* shutdown key interrupt */ + if (!is_cdma_version(system_rev)) + gpio_set_value(MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN, 0); + + /* disable microp interrupt to detect changes */ + ret = microp_interrupt_disable(client, IRQ_REMOTEKEY); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to disable irqs\n", + __func__); + return ret; + } + return 0; +} + +static int get_remote_keycode(int *keycode) +{ + struct i2c_client *client = private_microp_client; + int ret; + uint8_t data[2]; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_REMOTE_KEYCODE, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read remote keycode fail\n", + __func__); + return -EIO; + } + pr_debug("%s: key = 0x%x\n", __func__, data[1]); + if (!data[1]) { + *keycode = 0; + return 1; /* no keycode */ + } else { + *keycode = data[1]; + } + return 0; +} + +static ssize_t microp_i2c_remotekey_adc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client; + uint16_t value; + int i, button = 0; + int ret; + + client = to_i2c_client(dev); + + microp_read_adc(MICROP_REMOTE_KEY_ADC_CHAN, &value); + + for (i = 0; i < 3; i++) { + if ((value >= remote_key_adc_table[2 * i]) && + (value <= remote_key_adc_table[2 * i + 1])) { + button = i + 1; + } + + } + + ret = sprintf(buf, "Remote Key[0x%03X] => button %d\n", + value, button); + + return ret; +} + +static DEVICE_ATTR(key_adc, 0644, microp_i2c_remotekey_adc_show, NULL); + +/* + * LED support +*/ +static int microp_i2c_write_led_mode(struct i2c_client *client, + struct led_classdev *led_cdev, + uint8_t mode, uint16_t off_timer) +{ + struct microp_i2c_client_data *cdata; + struct microp_led_data *ldata; + uint8_t data[7]; + int ret; + + cdata = i2c_get_clientdata(client); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + + if (ldata->type == GREEN_LED) { + data[0] = 0x01; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } else if (ldata->type == AMBER_LED) { + data[0] = 0x02; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + data[4] = mode; + data[5] = off_timer >> 8; + data[6] = off_timer & 0xFF; + } else if (ldata->type == RED_LED) { + data[0] = 0x02; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + data[4] = mode? 5: 0; + data[5] = off_timer >> 8; + data[6] = off_timer & 0xFF; + } else if (ldata->type == BLUE_LED) { + data[0] = 0x04; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } + + ret = i2c_write_block(client, MICROP_I2C_WCMD_LED_MODE, data, 7); + if (ret == 0) { + mutex_lock(&ldata->led_data_mutex); + if (mode > 1) + ldata->blink = mode; + else + ldata->mode = mode; + mutex_unlock(&ldata->led_data_mutex); + } + return ret; +} + +static ssize_t microp_i2c_led_blink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + int ret; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + mutex_lock(&ldata->led_data_mutex); + ret = sprintf(buf, "%d\n", ldata->blink ? ldata->blink - 1 : 0); + mutex_unlock(&ldata->led_data_mutex); + + return ret; +} + +static ssize_t microp_i2c_led_blink_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int val, ret; + uint8_t mode; + + val = -1; + sscanf(buf, "%u", &val); + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + mutex_lock(&ldata->led_data_mutex); + switch (val) { + case 0: /* stop flashing */ + mode = ldata->mode; + ldata->blink = 0; + break; + case 1: + case 2: + case 3: + mode = val + 1; + break; + + default: + mutex_unlock(&ldata->led_data_mutex); + return -EINVAL; + } + mutex_unlock(&ldata->led_data_mutex); + + ret = microp_i2c_write_led_mode(client, led_cdev, mode, 0xffff); + if (ret) + dev_err(&client->dev, "%s set blink failed\n", led_cdev->name); + + return count; +} + +static DEVICE_ATTR(blink, 0644, microp_i2c_led_blink_show, + microp_i2c_led_blink_store); + +static ssize_t microp_i2c_led_off_timer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct microp_i2c_client_data *cdata; + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + uint8_t data[2]; + int ret, offtime; + + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + cdata = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "Getting %s remaining time\n", led_cdev->name); + + if (ldata->type == GREEN_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME, data, 2); + } else if (ldata->type == AMBER_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_AMBER_RED_LED_REMAIN_TIME, + data, 2); + } else if (ldata->type == RED_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_AMBER_RED_LED_REMAIN_TIME, + data, 2); + } else if (ldata->type == BLUE_LED) { + ret = i2c_read_block(client, + MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME, data, 2); + } else { + dev_err(&client->dev, "Unknown led %s\n", ldata->ldev.name); + return -EINVAL; + } + + if (ret) { + dev_err(&client->dev, + "%s get off_timer failed\n", led_cdev->name); + } + offtime = (int)((data[1] | data[0] << 8) * 2); + + ret = sprintf(buf, "Time remains %d:%d\n", offtime / 60, offtime % 60); + return ret; +} + +static ssize_t microp_i2c_led_off_timer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int min, sec, ret; + uint16_t off_timer; + + min = -1; + sec = -1; + sscanf(buf, "%d %d", &min, &sec); + + if (min < 0 || min > 255) + return -EINVAL; + if (sec < 0 || sec > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + dev_dbg(&client->dev, "Setting %s off_timer to %d min %d sec\n", + led_cdev->name, min, sec); + + if (!min && !sec) + off_timer = 0xFFFF; + else + off_timer = (min * 60 + sec) / 2; + + ret = microp_i2c_write_led_mode(client, led_cdev, + ldata->mode, off_timer); + if (ret) { + dev_err(&client->dev, + "%s set off_timer %d min %d sec failed\n", + led_cdev->name, min, sec); + } + return count; +} + +static DEVICE_ATTR(off_timer, 0644, microp_i2c_led_off_timer_show, + microp_i2c_led_off_timer_store); + +static ssize_t microp_i2c_jogball_color_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int rpwm, gpwm, bpwm, ret; + uint8_t data[4]; + + rpwm = -1; + gpwm = -1; + bpwm = -1; + sscanf(buf, "%d %d %d", &rpwm, &gpwm, &bpwm); + + if (rpwm < 0 || rpwm > 255) + return -EINVAL; + if (gpwm < 0 || gpwm > 255) + return -EINVAL; + if (bpwm < 0 || bpwm > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + dev_dbg(&client->dev, "Setting %s color to R=%d, G=%d, B=%d\n", + led_cdev->name, rpwm, gpwm, bpwm); + + data[0] = rpwm; + data[1] = gpwm; + data[2] = bpwm; + data[3] = 0x00; + + ret = i2c_write_block(client, MICROP_I2C_WCMD_JOGBALL_LED_PWM_SET, + data, 4); + if (ret) { + dev_err(&client->dev, + "%s set color R=%d G=%d B=%d failed\n", + led_cdev->name, rpwm, gpwm, bpwm); + } + return count; +} + +static DEVICE_ATTR(color, 0644, NULL, microp_i2c_jogball_color_store); + +static ssize_t microp_i2c_jogball_period_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int period = -1; + int ret; + uint8_t data[4]; + + sscanf(buf, "%d", &period); + + if (period < 2 || period > 12) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + dev_info(&client->dev, "Setting Jogball flash period to %d\n", period); + + data[0] = 0x00; + data[1] = period; + + ret = i2c_write_block(client, MICROP_I2C_WCMD_JOGBALL_LED_PERIOD_SET, + data, 2); + if (ret) { + dev_err(&client->dev, "%s set period=%d failed\n", + led_cdev->name, period); + } + return count; +} + +static DEVICE_ATTR(period, 0644, NULL, microp_i2c_jogball_period_store); + +static void microp_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + unsigned long flags; + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + struct microp_led_data *ldata = + container_of(led_cdev, struct microp_led_data, ldev); + + dev_dbg(&client->dev, "Setting %s brightness current %d new %d\n", + led_cdev->name, led_cdev->brightness, brightness); + + if (brightness > 255) + brightness = 255; + led_cdev->brightness = brightness; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + schedule_work(&ldata->brightness_work); +} + +static void microp_led_brightness_set_work(struct work_struct *work) +{ + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + struct led_classdev *led_cdev = &ldata->ldev; + + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + + enum led_brightness brightness; + int ret; + uint8_t mode; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + if (brightness) + mode = 1; + else + mode = 0; + + ret = microp_i2c_write_led_mode(client, led_cdev, mode, 0xffff); + if (ret) { + dev_err(&client->dev, + "led_brightness_set failed to set mode\n"); + } +} + +struct device_attribute *green_amber_attrs[] = { + &dev_attr_blink, + &dev_attr_off_timer, +}; + +struct device_attribute *jogball_attrs[] = { + &dev_attr_color, + &dev_attr_period, +}; + +static void microp_led_buttons_brightness_set_work(struct work_struct *work) +{ + + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + struct led_classdev *led_cdev = &ldata->ldev; + + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + struct microp_i2c_client_data *cdata = i2c_get_clientdata(client); + + + uint8_t data[4] = {0, 0, 0}; + int ret = 0; + enum led_brightness brightness; + uint8_t value; + + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + value = brightness >= 255 ? 0x20 : 0; + + /* avoid a flicker that can occur when writing the same value */ + if (cdata->button_led_value == value) + return; + cdata->button_led_value = value; + + /* in 40ms */ + data[0] = 0x05; + /* duty cycle 0-255 */ + data[1] = value; + /* bit2 == change brightness */ + data[3] = 0x04; + + ret = i2c_write_block(client, MICROP_I2C_WCMD_BUTTONS_LED_CTRL, + data, 4); + if (ret < 0) + dev_err(&client->dev, "%s failed on set buttons\n", __func__); +} + +static void microp_led_jogball_brightness_set_work(struct work_struct *work) +{ + unsigned long flags; + struct microp_led_data *ldata = + container_of(work, struct microp_led_data, brightness_work); + struct led_classdev *led_cdev = &ldata->ldev; + + struct i2c_client *client = to_i2c_client(led_cdev->dev->parent); + uint8_t data[3] = {0, 0, 0}; + int ret = 0; + enum led_brightness brightness; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + brightness = ldata->brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + switch (brightness) { + case 0: + data[0] = 0; + break; + case 3: + data[0] = 1; + data[1] = data[2] = 0xFF; + break; + case 7: + data[0] = 2; + data[1] = 0; + data[2] = 60; + break; + default: + dev_warn(&client->dev, "%s: unknown value: %d\n", + __func__, brightness); + break; + } + ret = i2c_write_block(client, MICROP_I2C_WCMD_JOGBALL_LED_MODE, + data, 3); + if (ret < 0) + dev_err(&client->dev, "%s failed on set jogball mode:0x%2.2X\n", + __func__, data[0]); +} + +/* + * Light Sensor Support + */ +static int microp_i2c_auto_backlight_mode(struct i2c_client *client, + uint8_t enabled) +{ + uint8_t data[2]; + int ret = 0; + + data[0] = 0; + if (enabled) + data[1] = 1; + else + data[1] = 0; + + ret = i2c_write_block(client, MICROP_I2C_WCMD_AUTO_BL_CTL, data, 2); + if (ret != 0) + pr_err("%s: set auto light sensor fail\n", __func__); + + return ret; +} + +static int lightsensor_enable(void) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int ret; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + if (cdata->microp_is_suspend) { + pr_err("%s: abort, uP is going to suspend after #\n", + __func__); + return -EIO; + } + + disable_irq(client->irq); + ret = microp_i2c_auto_backlight_mode(client, 1); + if (ret < 0) { + pr_err("%s: set auto light sensor fail\n", __func__); + enable_irq(client->irq); + return ret; + } + + cdata->auto_backlight_enabled = 1; + /* TEMPORARY HACK: schedule a deferred light sensor read + * to work around sensor manager race condition + */ + schedule_delayed_work(&cdata->ls_read_work, LS_READ_DELAY); + schedule_work(&cdata->work.work); + + return 0; +} + +static int lightsensor_disable(void) +{ + /* update trigger data when done */ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + int ret; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + if (cdata->microp_is_suspend) { + pr_err("%s: abort, uP is going to suspend after #\n", + __func__); + return -EIO; + } + + cancel_delayed_work(&cdata->ls_read_work); + + ret = microp_i2c_auto_backlight_mode(client, 0); + if (ret < 0) + pr_err("%s: disable auto light sensor fail\n", + __func__); + else + cdata->auto_backlight_enabled = 0; + return 0; +} + +static int microp_lightsensor_read(uint16_t *adc_value, + uint8_t *adc_level) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint8_t i; + int ret; + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + ret = microp_read_adc(MICROP_LSENSOR_ADC_CHAN, adc_value); + if (ret != 0) + return -1; + + if (*adc_value > 0x3FF) { + pr_warning("%s: get wrong value: 0x%X\n", + __func__, *adc_value); + return -1; + } else { + if (!cdata->als_calibrating) { + *adc_value = *adc_value + * cdata->als_gadc / cdata->als_kadc; + if (*adc_value > 0x3FF) + *adc_value = 0x3FF; + } + + *adc_level = ARRAY_SIZE(lsensor_adc_table) - 1; + for (i = 0; i < ARRAY_SIZE(lsensor_adc_table); i++) { + if (*adc_value <= lsensor_adc_table[i]) { + *adc_level = i; + break; + } + } + pr_debug("%s: ADC value: 0x%X, level: %d #\n", + __func__, *adc_value, *adc_level); + } + + return 0; +} + +static ssize_t microp_i2c_lightsensor_adc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t adc_level = 0; + uint16_t adc_value = 0; + int ret; + + ret = microp_lightsensor_read(&adc_value, &adc_level); + + ret = sprintf(buf, "ADC[0x%03X] => level %d\n", adc_value, adc_level); + + return ret; +} + +static DEVICE_ATTR(ls_adc, 0644, microp_i2c_lightsensor_adc_show, NULL); + +static ssize_t microp_i2c_ls_auto_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client; + uint8_t data[2] = {0, 0}; + int ret; + + client = to_i2c_client(dev); + + i2c_read_block(client, MICROP_I2C_RCMD_SPI_BL_STATUS, data, 2); + ret = sprintf(buf, "Light sensor Auto = %d, SPI enable = %d\n", + data[0], data[1]); + + return ret; +} + +static ssize_t microp_i2c_ls_auto_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint8_t enable = 0; + int ls_auto; + + ls_auto = -1; + sscanf(buf, "%d", &ls_auto); + + if (ls_auto != 0 && ls_auto != 1 && ls_auto != ALS_CALIBRATE_MODE) + return -EINVAL; + + client = to_i2c_client(dev); + cdata = i2c_get_clientdata(client); + + if (ls_auto) { + enable = 1; + cdata->als_calibrating = (ls_auto == ALS_CALIBRATE_MODE) ? 1 : 0; + cdata->auto_backlight_enabled = 1; + } else { + enable = 0; + cdata->als_calibrating = 0; + cdata->auto_backlight_enabled = 0; + } + + microp_i2c_auto_backlight_mode(client, enable); + + return count; +} + +static DEVICE_ATTR(ls_auto, 0644, microp_i2c_ls_auto_show, + microp_i2c_ls_auto_store); + +DEFINE_MUTEX(api_lock); +static int lightsensor_opened; + +static int lightsensor_open(struct inode *inode, struct file *file) +{ + int rc = 0; + pr_debug("%s\n", __func__); + mutex_lock(&api_lock); + if (lightsensor_opened) { + pr_err("%s: already opened\n", __func__); + rc = -EBUSY; + } + lightsensor_opened = 1; + mutex_unlock(&api_lock); + return rc; +} + +static int lightsensor_release(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + mutex_lock(&api_lock); + lightsensor_opened = 0; + mutex_unlock(&api_lock); + return 0; +} + +static long lightsensor_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int rc, val; + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + + mutex_lock(&api_lock); + + client = private_microp_client; + cdata = i2c_get_clientdata(client); + + pr_debug("%s cmd %d\n", __func__, _IOC_NR(cmd)); + + switch (cmd) { + case LIGHTSENSOR_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) { + rc = -EFAULT; + break; + } + rc = val ? lightsensor_enable() : lightsensor_disable(); + break; + case LIGHTSENSOR_IOCTL_GET_ENABLED: + val = cdata->auto_backlight_enabled; + pr_debug("%s enabled %d\n", __func__, val); + rc = put_user(val, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + } + + mutex_unlock(&api_lock); + return rc; +} + +static struct file_operations lightsensor_fops = { + .owner = THIS_MODULE, + .open = lightsensor_open, + .release = lightsensor_release, + .unlocked_ioctl = lightsensor_ioctl +}; + +struct miscdevice lightsensor_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lightsensor", + .fops = &lightsensor_fops +}; + +/* + * G-sensor + */ +static int microp_spi_enable(uint8_t on) +{ + struct i2c_client *client; + int ret; + + client = private_microp_client; + ret = i2c_write_block(client, MICROP_I2C_WCMD_SPI_EN, &on, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + return ret; + } + msleep(10); + return ret; +} + +static int gsensor_read_reg(uint8_t reg, uint8_t *data) +{ + struct i2c_client *client; + int ret; + uint8_t tmp[2]; + + client = private_microp_client; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ, + ®, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + return ret; + } + msleep(10); + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_REG_DATA, tmp, 2); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_read_block fail\n", __func__); + return ret; + } + *data = tmp[1]; + return ret; +} + +static int gsensor_write_reg(uint8_t reg, uint8_t data) +{ + struct i2c_client *client; + int ret; + uint8_t tmp[2]; + + client = private_microp_client; + + tmp[0] = reg; + tmp[1] = data; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_REG, tmp, 2); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + return ret; + } + + return ret; +} + +static int gsensor_read_acceleration(short *buf) +{ + struct i2c_client *client; + int ret; + uint8_t tmp[6]; + struct microp_i2c_client_data *cdata; + + client = private_microp_client; + + cdata = i2c_get_clientdata(client); + + tmp[0] = 1; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GSENSOR_DATA_REQ, + tmp, 1); + if (ret < 0) { + dev_err(&client->dev,"%s: i2c_write_block fail\n", __func__); + return ret; + } + + msleep(10); + + if (cdata->version <= 0x615) { + /* + * Note the data is a 10bit signed value from the chip. + */ + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_X_DATA, + tmp, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: i2c_read_block fail\n", + __func__); + return ret; + } + buf[0] = (short)(tmp[0] << 8 | tmp[1]); + buf[0] >>= 6; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_Y_DATA, + tmp, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: i2c_read_block fail\n", + __func__); + return ret; + } + buf[1] = (short)(tmp[0] << 8 | tmp[1]); + buf[1] >>= 6; + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_Z_DATA, + tmp, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: i2c_read_block fail\n", + __func__); + return ret; + } + buf[2] = (short)(tmp[0] << 8 | tmp[1]); + buf[2] >>= 6; + } else { + ret = i2c_read_block(client, MICROP_I2C_RCMD_GSENSOR_DATA, + tmp, 6); + if (ret < 0) { + dev_err(&client->dev, "%s: i2c_read_block fail\n", + __func__); + return ret; + } + buf[0] = (short)(tmp[0] << 8 | tmp[1]); + buf[0] >>= 6; + buf[1] = (short)(tmp[2] << 8 | tmp[3]); + buf[1] >>= 6; + buf[2] = (short)(tmp[4] << 8 | tmp[5]); + buf[2] >>= 6; + } + +#ifdef DEBUG_BMA150 + /* Log this to debugfs */ + gsensor_log_status(ktime_get(), buf[0], buf[1], buf[2]); +#endif + return 1; +} + +static int gsensor_init_hw(void) +{ + uint8_t reg; + int ret; + + pr_debug("%s\n", __func__); + + microp_spi_enable(1); + + ret = gsensor_read_reg(RANGE_BWIDTH_REG, ®); + if (ret < 0 ) + return -EIO; + reg &= 0xe0; + ret = gsensor_write_reg(RANGE_BWIDTH_REG, reg); + if (ret < 0 ) + return -EIO; + + ret = gsensor_read_reg(SMB150_CONF2_REG, ®); + if (ret < 0 ) + return -EIO; + reg |= (1 << 3); + ret = gsensor_write_reg(SMB150_CONF2_REG, reg); + + return ret; +} + +static int bma150_set_mode(char mode) +{ + uint8_t reg; + int ret; + + pr_debug("%s mode = %d\n", __func__, mode); + if (mode == BMA_MODE_NORMAL) + microp_spi_enable(1); + + + ret = gsensor_read_reg(SMB150_CTRL_REG, ®); + if (ret < 0 ) + return -EIO; + reg = (reg & 0xfe) | mode; + ret = gsensor_write_reg(SMB150_CTRL_REG, reg); + + if (mode == BMA_MODE_SLEEP) + microp_spi_enable(0); + + return ret; +} +static int gsensor_read(uint8_t *data) +{ + int ret; + uint8_t reg = data[0]; + + ret = gsensor_read_reg(reg, &data[1]); + pr_debug("%s reg = %x data = %x\n", __func__, reg, data[1]); + return ret; +} + +static int gsensor_write(uint8_t *data) +{ + int ret; + uint8_t reg = data[0]; + + pr_debug("%s reg = %x data = %x\n", __func__, reg, data[1]); + ret = gsensor_write_reg(reg, data[1]); + return ret; +} + +static DEFINE_MUTEX(bma150_lock); + +static int bma150_open(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + return nonseekable_open(inode, file); +} + +static int bma150_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static long bma150_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + char rwbuf[8]; + int ret = -1; + short buf[8], temp; + + switch (cmd) { + case BMA_IOCTL_READ: + case BMA_IOCTL_WRITE: + case BMA_IOCTL_SET_MODE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_from_user(&buf, argp, sizeof(buf))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&bma150_lock); + switch (cmd) { + case BMA_IOCTL_INIT: + ret = gsensor_init_hw(); + if (ret < 0) + goto err; + break; + + case BMA_IOCTL_READ: + if (rwbuf[0] < 1) { + ret = -EINVAL; + goto err; + } + ret = gsensor_read(rwbuf); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_WRITE: + if (rwbuf[0] < 2) { + ret = -EINVAL; + goto err; + } + ret = gsensor_write(rwbuf); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_READ_ACCELERATION: + ret = gsensor_read_acceleration(&buf[0]); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_SET_MODE: + bma150_set_mode(rwbuf[0]); + break; + case BMA_IOCTL_GET_INT: + temp = 0; + break; + default: + ret = -ENOTTY; + goto err; + } + mutex_unlock(&bma150_lock); + + switch (cmd) { + case BMA_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_to_user(argp, &buf, sizeof(buf))) + return -EFAULT; + break; + case BMA_IOCTL_GET_INT: + if (copy_to_user(argp, &temp, sizeof(temp))) + return -EFAULT; + break; + default: + break; + } + + return 0; + +err: + mutex_unlock(&bma150_lock); + return ret; +} + +static struct file_operations bma_fops = { + .owner = THIS_MODULE, + .open = bma150_open, + .release = bma150_release, + .unlocked_ioctl = bma150_ioctl, +}; + +static struct miscdevice spi_bma_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = BMA150_G_SENSOR_NAME, + .fops = &bma_fops, +}; + +/* + * Interrupt + */ +static irqreturn_t microp_i2c_intr_irq_handler(int irq, void *dev_id) +{ + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + + client = to_i2c_client(dev_id); + cdata = i2c_get_clientdata(client); + + dev_dbg(&client->dev, "intr_irq_handler\n"); + + disable_irq_nosync(client->irq); + schedule_work(&cdata->work.work); + return IRQ_HANDLED; +} + +static void microp_i2c_intr_work_func(struct work_struct *work) +{ + struct microp_i2c_work *up_work; + struct i2c_client *client; + struct microp_i2c_client_data *cdata; + uint8_t data[3], adc_level; + uint16_t intr_status = 0, adc_value, gpi_status = 0; + int keycode = 0, ret = 0; + + up_work = container_of(work, struct microp_i2c_work, work); + client = up_work->client; + cdata = i2c_get_clientdata(client); + + ret = i2c_read_block(client, MICROP_I2C_RCMD_GPI_INT_STATUS, data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: read interrupt status fail\n", + __func__); + } + + intr_status = data[0]<<8 | data[1]; + ret = i2c_write_block(client, MICROP_I2C_WCMD_GPI_INT_STATUS_CLR, + data, 2); + if (ret < 0) { + dev_err(&client->dev, "%s: clear interrupt status fail\n", + __func__); + } + pr_debug("intr_status=0x%02x\n", intr_status); + + if ((intr_status & IRQ_LSENSOR) || cdata->force_light_sensor_read) { + ret = microp_lightsensor_read(&adc_value, &adc_level); + if (cdata->force_light_sensor_read) { + /* report an invalid value first to ensure we trigger an event + * when adc_level is zero. + */ + input_report_abs(cdata->ls_input_dev, ABS_MISC, -1); + input_sync(cdata->ls_input_dev); + cdata->force_light_sensor_read = 0; + } + input_report_abs(cdata->ls_input_dev, ABS_MISC, (int)adc_level); + input_sync(cdata->ls_input_dev); + } + + if (intr_status & IRQ_SDCARD) { + microp_read_gpi_status(client, &gpi_status); + mahimahi_microp_sdslot_update_status(gpi_status); + } + + if (intr_status & IRQ_HEADSETIN) { + cdata->is_hpin_pin_stable = 0; + wake_lock_timeout(µp_i2c_wakelock, 3*HZ); + if (!cdata->headset_is_in) + schedule_delayed_work(&cdata->hpin_debounce_work, + msecs_to_jiffies(500)); + else + schedule_delayed_work(&cdata->hpin_debounce_work, + msecs_to_jiffies(300)); + } + if (intr_status & IRQ_REMOTEKEY) { + if ((get_remote_keycode(&keycode) == 0) && + (cdata->is_hpin_pin_stable)) { + htc_35mm_key_event(keycode, &cdata->is_hpin_pin_stable); + } + } + + enable_irq(client->irq); +} + +static void ls_read_do_work(struct work_struct *work) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata = i2c_get_clientdata(client); + + /* force a light sensor reading */ + disable_irq(client->irq); + cdata->force_light_sensor_read = 1; + schedule_work(&cdata->work.work); +} + +static int microp_function_initialize(struct i2c_client *client) +{ + struct microp_i2c_client_data *cdata; + uint8_t data[20]; + uint16_t stat, interrupts = 0; + int i; + int ret; + struct led_classdev *led_cdev; + + cdata = i2c_get_clientdata(client); + + /* Light Sensor */ + if (als_kadc >> 16 == ALS_CALIBRATED) + cdata->als_kadc = als_kadc & 0xFFFF; + else { + cdata->als_kadc = 0; + pr_info("%s: no ALS calibrated\n", __func__); + } + + if (cdata->als_kadc && golden_adc) { + cdata->als_kadc = + (cdata->als_kadc > 0 && cdata->als_kadc < 0x400) + ? cdata->als_kadc : golden_adc; + cdata->als_gadc = + (golden_adc > 0) + ? golden_adc : cdata->als_kadc; + } else { + cdata->als_kadc = 1; + cdata->als_gadc = 1; + } + pr_info("%s: als_kadc=0x%x, als_gadc=0x%x\n", + __func__, cdata->als_kadc, cdata->als_gadc); + + for (i = 0; i < 10; i++) { + data[i] = (uint8_t)(lsensor_adc_table[i] + * cdata->als_kadc / cdata->als_gadc >> 8); + data[i + 10] = (uint8_t)(lsensor_adc_table[i] + * cdata->als_kadc / cdata->als_gadc); + } + ret = i2c_write_block(client, MICROP_I2C_WCMD_ADC_TABLE, data, 20); + if (ret) + goto exit; + + ret = gpio_request(MAHIMAHI_GPIO_LS_EN_N, "microp_i2c"); + if (ret < 0) { + dev_err(&client->dev, "failed on request gpio ls_on\n"); + goto exit; + } + ret = gpio_direction_output(MAHIMAHI_GPIO_LS_EN_N, 0); + if (ret < 0) { + dev_err(&client->dev, "failed on gpio_direction_output" + "ls_on\n"); + goto err_gpio_ls; + } + cdata->light_sensor_enabled = 1; + + /* Headset */ + for (i = 0; i < 6; i++) { + data[i] = (uint8_t)(remote_key_adc_table[i] >> 8); + data[i + 6] = (uint8_t)(remote_key_adc_table[i]); + } + ret = i2c_write_block(client, + MICROP_I2C_WCMD_REMOTEKEY_TABLE, data, 12); + if (ret) + goto exit; + + INIT_DELAYED_WORK( + &cdata->hpin_debounce_work, hpin_debounce_do_work); + INIT_DELAYED_WORK( + &cdata->ls_read_work, ls_read_do_work); + + /* SD Card */ + interrupts |= IRQ_SDCARD; + + /* set LED initial state */ + for (i = 0; i < BLUE_LED; i++) { + led_cdev = &cdata->leds[i].ldev; + microp_i2c_write_led_mode(client, led_cdev, 0, 0xffff); + } + + /* enable the interrupts */ + ret = microp_interrupt_enable(client, interrupts); + if (ret < 0) { + dev_err(&client->dev, "%s: failed to enable gpi irqs\n", + __func__); + goto err_irq_en; + } + + microp_read_gpi_status(client, &stat); + mahimahi_microp_sdslot_update_status(stat); + + return 0; + +err_irq_en: +err_gpio_ls: + gpio_free(MAHIMAHI_GPIO_LS_EN_N); +exit: + return ret; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +void microp_early_suspend(struct early_suspend *h) +{ + struct microp_i2c_client_data *cdata; + struct i2c_client *client = private_microp_client; + int ret; + + if (!client) { + pr_err("%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + + cdata->microp_is_suspend = 1; + + disable_irq(client->irq); + ret = cancel_work_sync(&cdata->work.work); + if (ret != 0) { + enable_irq(client->irq); + } + + if (cdata->auto_backlight_enabled) + microp_i2c_auto_backlight_mode(client, 0); + if (cdata->light_sensor_enabled == 1) { + gpio_set_value(MAHIMAHI_GPIO_LS_EN_N, 1); + cdata->light_sensor_enabled = 0; + } +} + +void microp_early_resume(struct early_suspend *h) +{ + struct i2c_client *client = private_microp_client; + struct microp_i2c_client_data *cdata; + + if (!client) { + pr_err("%s: dataset: client is empty\n", __func__); + return; + } + cdata = i2c_get_clientdata(client); + + gpio_set_value(MAHIMAHI_GPIO_LS_EN_N, 0); + cdata->light_sensor_enabled = 1; + + if (cdata->auto_backlight_enabled) + microp_i2c_auto_backlight_mode(client, 1); + + cdata->microp_is_suspend = 0; + enable_irq(client->irq); +} +#endif + +static int microp_i2c_suspend(struct i2c_client *client, + pm_message_t mesg) +{ + return 0; +} + +static int microp_i2c_resume(struct i2c_client *client) +{ + return 0; +} + +static struct { + const char *name; + void (*led_set_work)(struct work_struct *); + struct device_attribute **attrs; + int attr_cnt; +} microp_leds[] = { + [GREEN_LED] = { + .name = "green", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [AMBER_LED] = { + .name = "amber", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [RED_LED] = { + .name = "red", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [BLUE_LED] = { + .name = "blue", + .led_set_work = microp_led_brightness_set_work, + .attrs = green_amber_attrs, + .attr_cnt = ARRAY_SIZE(green_amber_attrs) + }, + [JOGBALL_LED] = { + .name = "jogball-backlight", + .led_set_work = microp_led_jogball_brightness_set_work, + .attrs = jogball_attrs, + .attr_cnt = ARRAY_SIZE(jogball_attrs) + }, + [BUTTONS_LED] = { + .name = "button-backlight", + .led_set_work = microp_led_buttons_brightness_set_work + }, +}; + +static int microp_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct microp_i2c_client_data *cdata; + uint8_t data[6]; + int ret; + int i; + int j; + + private_microp_client = client; + ret = i2c_read_block(client, MICROP_I2C_RCMD_VERSION, data, 2); + if (ret || !(data[0] && data[1])) { + ret = -ENODEV; + dev_err(&client->dev, "failed on get microp version\n"); + goto err_exit; + } + dev_info(&client->dev, "microp version [%02X][%02X]\n", + data[0], data[1]); + + ret = gpio_request(MAHIMAHI_GPIO_UP_RESET_N, "microp_i2c_wm"); + if (ret < 0) { + dev_err(&client->dev, "failed on request gpio reset\n"); + goto err_exit; + } + ret = gpio_direction_output(MAHIMAHI_GPIO_UP_RESET_N, 1); + if (ret < 0) { + dev_err(&client->dev, + "failed on gpio_direction_output reset\n"); + goto err_gpio_reset; + } + + cdata = kzalloc(sizeof(struct microp_i2c_client_data), GFP_KERNEL); + if (!cdata) { + ret = -ENOMEM; + dev_err(&client->dev, "failed on allocat cdata\n"); + goto err_cdata; + } + + i2c_set_clientdata(client, cdata); + cdata->version = data[0] << 8 | data[1]; + cdata->microp_is_suspend = 0; + cdata->auto_backlight_enabled = 0; + cdata->light_sensor_enabled = 0; + + wake_lock_init(µp_i2c_wakelock, WAKE_LOCK_SUSPEND, + "microp_i2c_present"); + + /* Light Sensor */ + ret = device_create_file(&client->dev, &dev_attr_ls_adc); + ret = device_create_file(&client->dev, &dev_attr_ls_auto); + cdata->ls_input_dev = input_allocate_device(); + if (!cdata->ls_input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + ret = -ENOMEM; + goto err_request_input_dev; + } + cdata->ls_input_dev->name = "lightsensor-level"; + set_bit(EV_ABS, cdata->ls_input_dev->evbit); + input_set_abs_params(cdata->ls_input_dev, ABS_MISC, 0, 9, 0, 0); + + ret = input_register_device(cdata->ls_input_dev); + if (ret < 0) { + dev_err(&client->dev, "%s: can not register input device\n", + __func__); + goto err_register_input_dev; + } + + ret = misc_register(&lightsensor_misc); + if (ret < 0) { + dev_err(&client->dev, "%s: can not register misc device\n", + __func__); + goto err_register_misc_register; + } + + /* LEDs */ + ret = 0; + for (i = 0; i < ARRAY_SIZE(microp_leds) && !ret; ++i) { + struct microp_led_data *ldata = &cdata->leds[i]; + + ldata->type = i; + ldata->ldev.name = microp_leds[i].name; + ldata->ldev.brightness_set = microp_brightness_set; + mutex_init(&ldata->led_data_mutex); + INIT_WORK(&ldata->brightness_work, microp_leds[i].led_set_work); + spin_lock_init(&ldata->brightness_lock); + ret = led_classdev_register(&client->dev, &ldata->ldev); + if (ret) { + ldata->ldev.name = NULL; + break; + } + + for (j = 0; j < microp_leds[i].attr_cnt && !ret; ++j) + ret = device_create_file(ldata->ldev.dev, + microp_leds[i].attrs[j]); + } + if (ret) { + dev_err(&client->dev, "failed to add leds\n"); + goto err_add_leds; + } + + /* Headset */ + cdata->headset_is_in = 0; + cdata->is_hpin_pin_stable = 1; + platform_device_register(&mahimahi_h35mm); + + ret = device_create_file(&client->dev, &dev_attr_key_adc); + + /* G-sensor */ + ret = misc_register(&spi_bma_device); + if (ret < 0) { + pr_err("%s: init bma150 misc_register fail\n", + __func__); + goto err_register_bma150; + } +#ifdef DEBUG_BMA150 + debugfs_create_file("gsensor_log", 0444, NULL, NULL, &gsensor_log_fops); +#endif + /* Setup IRQ handler */ + INIT_WORK(&cdata->work.work, microp_i2c_intr_work_func); + cdata->work.client = client; + + ret = request_irq(client->irq, + microp_i2c_intr_irq_handler, + IRQF_TRIGGER_LOW, + "microp_interrupt", + &client->dev); + if (ret) { + dev_err(&client->dev, "request_irq failed\n"); + goto err_intr; + } + ret = set_irq_wake(client->irq, 1); + if (ret) { + dev_err(&client->dev, "set_irq_wake failed\n"); + goto err_intr; + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (cdata->enable_early_suspend) { + cdata->early_suspend.level = + EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + cdata->early_suspend.suspend = microp_early_suspend; + cdata->early_suspend.resume = microp_early_resume; + register_early_suspend(&cdata->early_suspend); + } +#endif + + ret = microp_function_initialize(client); + if (ret) { + dev_err(&client->dev, "failed on microp function initialize\n"); + goto err_fun_init; + } + + return 0; + +err_fun_init: +err_intr: + misc_deregister(&spi_bma_device); + +err_register_bma150: + platform_device_unregister(&mahimahi_h35mm); + device_remove_file(&client->dev, &dev_attr_key_adc); + +err_add_leds: + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + if (!cdata->leds[i].ldev.name) + continue; + led_classdev_unregister(&cdata->leds[i].ldev); + for (j = 0; j < microp_leds[i].attr_cnt; ++j) + device_remove_file(cdata->leds[i].ldev.dev, + microp_leds[i].attrs[j]); + } + + misc_deregister(&lightsensor_misc); + +err_register_misc_register: + input_unregister_device(cdata->ls_input_dev); + +err_register_input_dev: + input_free_device(cdata->ls_input_dev); + +err_request_input_dev: + wake_lock_destroy(µp_i2c_wakelock); + device_remove_file(&client->dev, &dev_attr_ls_adc); + device_remove_file(&client->dev, &dev_attr_ls_auto); + kfree(cdata); + i2c_set_clientdata(client, NULL); + +err_cdata: +err_gpio_reset: + gpio_free(MAHIMAHI_GPIO_UP_RESET_N); +err_exit: + return ret; +} + +static int __devexit microp_i2c_remove(struct i2c_client *client) +{ + struct microp_i2c_client_data *cdata; + int i; + int j; + + cdata = i2c_get_clientdata(client); + + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + struct microp_led_data *ldata = &cdata->leds[i]; + cancel_work_sync(&ldata->brightness_work); + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (cdata->enable_early_suspend) { + unregister_early_suspend(&cdata->early_suspend); + } +#endif + + for (i = 0; i < ARRAY_SIZE(microp_leds); ++i) { + if (!cdata->leds[i].ldev.name) + continue; + led_classdev_unregister(&cdata->leds[i].ldev); + for (j = 0; j < microp_leds[i].attr_cnt; ++j) + device_remove_file(cdata->leds[i].ldev.dev, + microp_leds[i].attrs[j]); + } + + free_irq(client->irq, &client->dev); + + gpio_free(MAHIMAHI_GPIO_UP_RESET_N); + + misc_deregister(&lightsensor_misc); + input_unregister_device(cdata->ls_input_dev); + input_free_device(cdata->ls_input_dev); + device_remove_file(&client->dev, &dev_attr_ls_adc); + device_remove_file(&client->dev, &dev_attr_key_adc); + device_remove_file(&client->dev, &dev_attr_ls_auto); + + platform_device_unregister(&mahimahi_h35mm); + + /* G-sensor */ + misc_deregister(&spi_bma_device); + + kfree(cdata); + + return 0; +} + +#define ATAG_ALS 0x5441001b +static int __init parse_tag_als_kadc(const struct tag *tags) +{ + int found = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_ALS) { + found = 1; + break; + } + } + + if (found) + als_kadc = t->u.revision.rev; + pr_debug("%s: als_kadc = 0x%x\n", __func__, als_kadc); + return 0; +} +__tagtable(ATAG_ALS, parse_tag_als_kadc); + +static const struct i2c_device_id microp_i2c_id[] = { + { MICROP_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver microp_i2c_driver = { + .driver = { + .name = MICROP_I2C_NAME, + }, + .id_table = microp_i2c_id, + .probe = microp_i2c_probe, + .suspend = microp_i2c_suspend, + .resume = microp_i2c_resume, + .remove = __devexit_p(microp_i2c_remove), +}; + + +static int __init microp_i2c_init(void) +{ + return i2c_add_driver(µp_i2c_driver); +} + +static void __exit microp_i2c_exit(void) +{ + i2c_del_driver(µp_i2c_driver); +} + +module_init(microp_i2c_init); +module_exit(microp_i2c_exit); + +MODULE_AUTHOR("Eric Olsen "); +MODULE_DESCRIPTION("MicroP I2C driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-mahimahi-mmc.c b/arch/arm/mach-msm/board-mahimahi-mmc.c new file mode 100644 index 0000000000000..d4b4256ec17b0 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-mmc.c @@ -0,0 +1,445 @@ +/* linux/arch/arm/mach-msm/board-mahimahi-mmc.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "board-mahimahi.h" +#include "devices.h" +#include "proc_comm.h" + +#undef MAHIMAHI_DEBUG_MMC + +static bool opt_disable_sdcard; +static int __init mahimahi_disablesdcard_setup(char *str) +{ + opt_disable_sdcard = (bool)simple_strtol(str, NULL, 0); + return 1; +} + +__setup("board_mahimahi.disable_sdcard=", mahimahi_disablesdcard_setup); + +static uint32_t sdcard_on_gpio_table[] = { + PCOM_GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ +}; + +static uint32_t sdcard_off_gpio_table[] = { + PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static struct vreg *sdslot_vreg; +static uint32_t sdslot_vdd = 0xffffffff; +static uint32_t sdslot_vreg_enabled; + +static struct { + int mask; + int level; +} mmc_vdd_table[] = { + { MMC_VDD_165_195, 1800 }, + { MMC_VDD_20_21, 2050 }, + { MMC_VDD_21_22, 2150 }, + { MMC_VDD_22_23, 2250 }, + { MMC_VDD_23_24, 2350 }, + { MMC_VDD_24_25, 2450 }, + { MMC_VDD_25_26, 2550 }, + { MMC_VDD_26_27, 2650 }, + { MMC_VDD_27_28, 2750 }, + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2950 }, +}; + +static uint32_t mahimahi_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + int i; + int ret; + + if (vdd == sdslot_vdd) + return 0; + + sdslot_vdd = vdd; + + if (vdd == 0) { + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + vreg_disable(sdslot_vreg); + sdslot_vreg_enabled = 0; + return 0; + } + + if (!sdslot_vreg_enabled) { + ret = vreg_enable(sdslot_vreg); + if (ret) + pr_err("%s: Error enabling vreg (%d)\n", __func__, ret); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + sdslot_vreg_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask != (1 << vdd)) + continue; + ret = vreg_set_level(sdslot_vreg, mmc_vdd_table[i].level); + if (ret) + pr_err("%s: Error setting level (%d)\n", __func__, ret); + return 0; + } + + pr_err("%s: Invalid VDD (%d) specified\n", __func__, vdd); + return 0; +} + +static uint32_t mahimahi_cdma_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + if (!vdd == !sdslot_vdd) + return 0; + + /* In CDMA version, the vdd of sdslot is not configurable, and it is + * fixed in 2.85V by hardware design. + */ + + sdslot_vdd = vdd ? MMC_VDD_28_29 : 0; + + if (vdd) { + gpio_set_value(MAHIMAHI_CDMA_SD_2V85_EN, 1); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + } else { + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + gpio_set_value(MAHIMAHI_CDMA_SD_2V85_EN, 0); + } + + sdslot_vreg_enabled = !!vdd; + + return 0; +} + +static unsigned int mahimahi_sdslot_status_rev0(struct device *dev) +{ + return !gpio_get_value(MAHIMAHI_GPIO_SDMC_CD_REV0_N); +} + +#define MAHIMAHI_MMC_VDD (MMC_VDD_165_195 | MMC_VDD_20_21 | \ + MMC_VDD_21_22 | MMC_VDD_22_23 | \ + MMC_VDD_23_24 | MMC_VDD_24_25 | \ + MMC_VDD_25_26 | MMC_VDD_26_27 | \ + MMC_VDD_27_28 | MMC_VDD_28_29 | \ + MMC_VDD_29_30) + +int mahimahi_microp_sdslot_status_register(void (*cb)(int, void *), void *); +unsigned int mahimahi_microp_sdslot_status(struct device *); + +static struct msm_mmc_platform_data mahimahi_sdslot_data = { + .ocr_mask = MAHIMAHI_MMC_VDD, + .status = mahimahi_microp_sdslot_status, + .register_status_notify = mahimahi_microp_sdslot_status_register, + .translate_vdd = mahimahi_sdslot_switchvdd, +}; + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +/* BCM4329 returns wrong sdio_vsn(1) when we read cccr, + * we use predefined value (sdio_vsn=2) here to initial sdio driver well + */ +static struct embedded_sdio_data mahimahi_wifi_emb_data = { + .cccr = { + .sdio_vsn = 2, + .multi_block = 1, + .low_speed = 0, + .wide_bus = 0, + .high_power = 1, + .high_speed = 1, + }, + .cis = { + .vendor = 0x02d0, + .device = 0x4329, + }, +}; + +static int mahimahi_wifi_cd = 0; /* WIFI virtual 'card detect' status */ +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int mahimahi_wifi_status_register( + void (*callback)(int card_present, void *dev_id), + void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static unsigned int mahimahi_wifi_status(struct device *dev) +{ + return mahimahi_wifi_cd; +} + +static struct msm_mmc_platform_data mahimahi_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .built_in = 1, + .status = mahimahi_wifi_status, + .register_status_notify = mahimahi_wifi_status_register, + .embedded_sdio = &mahimahi_wifi_emb_data, +}; + +int mahimahi_wifi_set_carddetect(int val) +{ + pr_info("%s: %d\n", __func__, val); + mahimahi_wifi_cd = val; + if (wifi_status_cb) { + wifi_status_cb(val, wifi_status_cb_devid); + } else + pr_warning("%s: Nobody to notify\n", __func__); + return 0; +} + +static int mahimahi_wifi_power_state; + +int mahimahi_wifi_power(int on) +{ + printk("%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + mdelay(50); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + } + + mdelay(100); + gpio_set_value(MAHIMAHI_GPIO_WIFI_SHUTDOWN_N, on); /* WIFI_SHUTDOWN */ + mdelay(200); + + mahimahi_wifi_power_state = on; + return 0; +} + +static int mahimahi_wifi_reset_state; + +int mahimahi_wifi_reset(int on) +{ + printk("%s: do nothing\n", __func__); + mahimahi_wifi_reset_state = on; + return 0; +} + +int __init mahimahi_init_mmc(unsigned int sys_rev, unsigned debug_uart) +{ + uint32_t id; + + printk("%s()+\n", __func__); + + /* initial WIFI_SHUTDOWN# */ + id = PCOM_GPIO_CFG(MAHIMAHI_GPIO_WIFI_SHUTDOWN_N, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + + msm_add_sdcc(1, &mahimahi_wifi_data, 0, 0); + + if (debug_uart) { + pr_info("%s: sdcard disabled due to debug uart\n", __func__); + goto done; + } + if (opt_disable_sdcard) { + pr_info("%s: sdcard disabled on cmdline\n", __func__); + goto done; + } + + sdslot_vreg_enabled = 0; + + if (is_cdma_version(sys_rev)) { + /* In the CDMA version, sdslot is supplied by a gpio. */ + int rc = gpio_request(MAHIMAHI_CDMA_SD_2V85_EN, "sdslot_en"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + MAHIMAHI_CDMA_SD_2V85_EN, rc); + return rc; + } + mahimahi_sdslot_data.translate_vdd = mahimahi_cdma_sdslot_switchvdd; + } else { + /* in UMTS version, sdslot is supplied by pmic */ + sdslot_vreg = vreg_get(0, "gp6"); + if (IS_ERR(sdslot_vreg)) + return PTR_ERR(sdslot_vreg); + } + + if (system_rev > 0) + msm_add_sdcc(2, &mahimahi_sdslot_data, 0, 0); + else { + mahimahi_sdslot_data.status = mahimahi_sdslot_status_rev0; + mahimahi_sdslot_data.register_status_notify = NULL; + set_irq_wake(MSM_GPIO_TO_INT(MAHIMAHI_GPIO_SDMC_CD_REV0_N), 1); + msm_add_sdcc(2, &mahimahi_sdslot_data, + MSM_GPIO_TO_INT(MAHIMAHI_GPIO_SDMC_CD_REV0_N), + IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE); + } + +done: + printk("%s()-\n", __func__); + return 0; +} + +#if defined(MAHIMAHI_DEBUG_MMC) && defined(CONFIG_DEBUG_FS) + +static int mahimahimmc_dbg_wifi_reset_set(void *data, u64 val) +{ + mahimahi_wifi_reset((int) val); + return 0; +} + +static int mahimahimmc_dbg_wifi_reset_get(void *data, u64 *val) +{ + *val = mahimahi_wifi_reset_state; + return 0; +} + +static int mahimahimmc_dbg_wifi_cd_set(void *data, u64 val) +{ + mahimahi_wifi_set_carddetect((int) val); + return 0; +} + +static int mahimahimmc_dbg_wifi_cd_get(void *data, u64 *val) +{ + *val = mahimahi_wifi_cd; + return 0; +} + +static int mahimahimmc_dbg_wifi_pwr_set(void *data, u64 val) +{ + mahimahi_wifi_power((int) val); + return 0; +} + +static int mahimahimmc_dbg_wifi_pwr_get(void *data, u64 *val) +{ + *val = mahimahi_wifi_power_state; + return 0; +} + +static int mahimahimmc_dbg_sd_pwr_set(void *data, u64 val) +{ + mahimahi_sdslot_switchvdd(NULL, (unsigned int) val); + return 0; +} + +static int mahimahimmc_dbg_sd_pwr_get(void *data, u64 *val) +{ + *val = sdslot_vdd; + return 0; +} + +static int mahimahimmc_dbg_sd_cd_set(void *data, u64 val) +{ + return -ENOSYS; +} + +static int mahimahimmc_dbg_sd_cd_get(void *data, u64 *val) +{ + *val = mahimahi_sdslot_data.status(NULL); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(mahimahimmc_dbg_wifi_reset_fops, + mahimahimmc_dbg_wifi_reset_get, + mahimahimmc_dbg_wifi_reset_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(mahimahimmc_dbg_wifi_cd_fops, + mahimahimmc_dbg_wifi_cd_get, + mahimahimmc_dbg_wifi_cd_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(mahimahimmc_dbg_wifi_pwr_fops, + mahimahimmc_dbg_wifi_pwr_get, + mahimahimmc_dbg_wifi_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(mahimahimmc_dbg_sd_pwr_fops, + mahimahimmc_dbg_sd_pwr_get, + mahimahimmc_dbg_sd_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(mahimahimmc_dbg_sd_cd_fops, + mahimahimmc_dbg_sd_cd_get, + mahimahimmc_dbg_sd_cd_set, "%llu\n"); + +static int __init mahimahimmc_dbg_init(void) +{ + struct dentry *dent; + + if (!machine_is_mahimahi()) + return 0; + + dent = debugfs_create_dir("mahimahi_mmc_dbg", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("wifi_reset", 0644, dent, NULL, + &mahimahimmc_dbg_wifi_reset_fops); + debugfs_create_file("wifi_cd", 0644, dent, NULL, + &mahimahimmc_dbg_wifi_cd_fops); + debugfs_create_file("wifi_pwr", 0644, dent, NULL, + &mahimahimmc_dbg_wifi_pwr_fops); + debugfs_create_file("sd_pwr", 0644, dent, NULL, + &mahimahimmc_dbg_sd_pwr_fops); + debugfs_create_file("sd_cd", 0644, dent, NULL, + &mahimahimmc_dbg_sd_cd_fops); + return 0; +} + +device_initcall(mahimahimmc_dbg_init); +#endif diff --git a/arch/arm/mach-msm/board-mahimahi-panel.c b/arch/arm/mach-msm/board-mahimahi-panel.c new file mode 100644 index 0000000000000..64b66b7a37d4b --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-panel.c @@ -0,0 +1,998 @@ +/* linux/arch/arm/mach-msm/board-mahimahi-panel.c + * + * Copyright (c) 2009 Google Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include "proc_comm.h" + +#include "board-mahimahi.h" +#include "devices.h" + + +#define SPI_CONFIG (0x00000000) +#define SPI_IO_CONTROL (0x00000004) +#define SPI_OPERATIONAL (0x00000030) +#define SPI_ERROR_FLAGS_EN (0x00000038) +#define SPI_ERROR_FLAGS (0x00000038) +#define SPI_OUTPUT_FIFO (0x00000100) + +static void __iomem *spi_base; +static struct clk *spi_clk ; +static struct vreg *vreg_lcm_rftx_2v6; +static struct vreg *vreg_lcm_aux_2v6; + +static int qspi_send(uint32_t id, uint8_t data) +{ + uint32_t err; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + if ((err = readl(spi_base + SPI_ERROR_FLAGS))) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel((0x7000 | (id << 9) | data) << 16, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int qspi_send_9bit(uint32_t id, uint8_t data) +{ + uint32_t err; + + while (readl(spi_base + SPI_OPERATIONAL) & (1<<5)) { + err = readl(spi_base + SPI_ERROR_FLAGS); + if (err) { + pr_err("%s: ERROR: SPI_ERROR_FLAGS=0x%08x\n", __func__, + err); + return -EIO; + } + } + writel(((id << 8) | data) << 23, spi_base + SPI_OUTPUT_FIFO); + udelay(100); + + return 0; +} + +static int lcm_writeb(uint8_t reg, uint8_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val); + return 0; +} + +static int lcm_writew(uint8_t reg, uint16_t val) +{ + qspi_send(0x0, reg); + qspi_send(0x1, val >> 8); + qspi_send(0x1, val & 0xff); + return 0; +} + +static struct resource resources_msm_fb[] = { + { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct lcm_tbl { + uint8_t reg; + uint8_t val; +}; + +static struct lcm_tbl samsung_oled_rgb565_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x02 }, + { 0x39, 0x24 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, +}; + +static struct lcm_tbl samsung_oled_rgb666_init_table[] = { + { 0x31, 0x08 }, + { 0x32, 0x14 }, + { 0x30, 0x2 }, + { 0x27, 0x1 }, + { 0x12, 0x8 }, + { 0x13, 0x8 }, + { 0x15, 0x0 }, + { 0x16, 0x01 }, + { 0x39, 0x24 }, + { 0x17, 0x22 }, + { 0x18, 0x33 }, + { 0x19, 0x3 }, + { 0x1A, 0x1 }, + { 0x22, 0xA4 }, + { 0x23, 0x0 }, + { 0x26, 0xA0 }, +}; + +static struct lcm_tbl *init_tablep = samsung_oled_rgb565_init_table; +static size_t init_table_sz = ARRAY_SIZE(samsung_oled_rgb565_init_table); + +#define OLED_GAMMA_TABLE_SIZE (7 * 3) +static struct lcm_tbl samsung_oled_gamma_table[][OLED_GAMMA_TABLE_SIZE] = { + /* level 10 */ + { + /* Gamma-R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x3f }, + { 0x43, 0x35 }, + { 0x44, 0x30 }, + { 0x45, 0x2c }, + { 0x46, 0x13 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x0 }, + { 0x54, 0x27 }, + { 0x55, 0x2b }, + { 0x56, 0x12 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x3f }, + { 0x63, 0x34 }, + { 0x64, 0x2f }, + { 0x65, 0x2b }, + { 0x66, 0x1b }, + }, + + /* level 40 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x3e }, + { 0x43, 0x2e }, + { 0x44, 0x2d }, + { 0x45, 0x28 }, + { 0x46, 0x21 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x21 }, + { 0x54, 0x2a }, + { 0x55, 0x28 }, + { 0x56, 0x20 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x3e }, + { 0x63, 0x2d }, + { 0x64, 0x2b }, + { 0x65, 0x26 }, + { 0x66, 0x2d }, + }, + + /* level 70 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x35 }, + { 0x43, 0x2c }, + { 0x44, 0x2b }, + { 0x45, 0x26 }, + { 0x46, 0x29 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x25 }, + { 0x54, 0x29 }, + { 0x55, 0x26 }, + { 0x56, 0x28 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x34 }, + { 0x63, 0x2b }, + { 0x64, 0x2a }, + { 0x65, 0x23 }, + { 0x66, 0x37 }, + }, + + /* level 100 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x30 }, + { 0x43, 0x2a }, + { 0x44, 0x2b }, + { 0x45, 0x24 }, + { 0x46, 0x2f }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x0 }, + { 0x53, 0x25 }, + { 0x54, 0x29 }, + { 0x55, 0x24 }, + { 0x56, 0x2e }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2f }, + { 0x63, 0x29 }, + { 0x64, 0x29 }, + { 0x65, 0x21 }, + { 0x66, 0x3f }, + }, + + /* level 130 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2e }, + { 0x43, 0x29 }, + { 0x44, 0x2a }, + { 0x45, 0x23 }, + { 0x46, 0x34 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0xa }, + { 0x53, 0x25 }, + { 0x54, 0x28 }, + { 0x55, 0x23 }, + { 0x56, 0x33 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2d }, + { 0x63, 0x28 }, + { 0x64, 0x27 }, + { 0x65, 0x20 }, + { 0x66, 0x46 }, + }, + + /* level 160 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2b }, + { 0x43, 0x29 }, + { 0x44, 0x28 }, + { 0x45, 0x23 }, + { 0x46, 0x38 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0xb }, + { 0x53, 0x25 }, + { 0x54, 0x27 }, + { 0x55, 0x23 }, + { 0x56, 0x37 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x29 }, + { 0x63, 0x28 }, + { 0x64, 0x25 }, + { 0x65, 0x20 }, + { 0x66, 0x4b }, + }, + + /* level 190 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x29 }, + { 0x43, 0x29 }, + { 0x44, 0x27 }, + { 0x45, 0x22 }, + { 0x46, 0x3c }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x10 }, + { 0x53, 0x26 }, + { 0x54, 0x26 }, + { 0x55, 0x22 }, + { 0x56, 0x3b }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x28 }, + { 0x63, 0x28 }, + { 0x64, 0x24 }, + { 0x65, 0x1f }, + { 0x66, 0x50 }, + }, + + /* level 220 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x28 }, + { 0x43, 0x28 }, + { 0x44, 0x28 }, + { 0x45, 0x20 }, + { 0x46, 0x40 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x11 }, + { 0x53, 0x25 }, + { 0x54, 0x27 }, + { 0x55, 0x20 }, + { 0x56, 0x3f }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x27 }, + { 0x63, 0x26 }, + { 0x64, 0x26 }, + { 0x65, 0x1c }, + { 0x66, 0x56 }, + }, + + /* level 250 */ + { + /* Gamma -R */ + { 0x40, 0x0 }, + { 0x41, 0x3f }, + { 0x42, 0x2a }, + { 0x43, 0x27 }, + { 0x44, 0x27 }, + { 0x45, 0x1f }, + { 0x46, 0x44 }, + /* Gamma -G */ + { 0x50, 0x0 }, + { 0x51, 0x0 }, + { 0x52, 0x17 }, + { 0x53, 0x24 }, + { 0x54, 0x26 }, + { 0x55, 0x1f }, + { 0x56, 0x43 }, + /* Gamma -B */ + { 0x60, 0x0 }, + { 0x61, 0x3f }, + { 0x62, 0x2a }, + { 0x63, 0x25 }, + { 0x64, 0x24 }, + { 0x65, 0x1b }, + { 0x66, 0x5c }, + }, +}; +#define SAMSUNG_OLED_NUM_LEVELS ARRAY_SIZE(samsung_oled_gamma_table) + +#define SAMSUNG_OLED_MIN_VAL 10 +#define SAMSUNG_OLED_MAX_VAL 250 +#define SAMSUNG_OLED_DEFAULT_VAL (SAMSUNG_OLED_MIN_VAL + \ + (SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / 2) + +#define SAMSUNG_OLED_LEVEL_STEP ((SAMSUNG_OLED_MAX_VAL - \ + SAMSUNG_OLED_MIN_VAL) / \ + (SAMSUNG_OLED_NUM_LEVELS - 1)) + + +#define SONY_TFT_DEF_USER_VAL 102 +#define SONY_TFT_MIN_USER_VAL 30 +#define SONY_TFT_MAX_USER_VAL 255 +#define SONY_TFT_DEF_PANEL_VAL 155 +#define SONY_TFT_MIN_PANEL_VAL 26 +#define SONY_TFT_MAX_PANEL_VAL 255 + + +static DEFINE_MUTEX(panel_lock); +static struct work_struct brightness_delayed_work; +static DEFINE_SPINLOCK(brightness_lock); +static uint8_t new_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t last_val = SAMSUNG_OLED_DEFAULT_VAL; +static uint8_t table_sel_vals[] = { 0x43, 0x34 }; +static int table_sel_idx = 0; +static uint8_t tft_panel_on; + +static void gamma_table_bank_select(void) +{ + lcm_writeb(0x39, table_sel_vals[table_sel_idx]); + table_sel_idx ^= 1; +} + +static void samsung_oled_set_gamma_val(int val) +{ + int i; + int level; + int frac; + + val = clamp(val, SAMSUNG_OLED_MIN_VAL, SAMSUNG_OLED_MAX_VAL); + val = (val / 2) * 2; + + level = (val - SAMSUNG_OLED_MIN_VAL) / SAMSUNG_OLED_LEVEL_STEP; + frac = (val - SAMSUNG_OLED_MIN_VAL) % SAMSUNG_OLED_LEVEL_STEP; + + clk_enable(spi_clk); + + for (i = 0; i < OLED_GAMMA_TABLE_SIZE; ++i) { + unsigned int v1; + unsigned int v2 = 0; + u8 v; + if (frac == 0) { + v = samsung_oled_gamma_table[level][i].val; + } else { + + v1 = samsung_oled_gamma_table[level][i].val; + v2 = samsung_oled_gamma_table[level+1][i].val; + v = (v1 * (SAMSUNG_OLED_LEVEL_STEP - frac) + + v2 * frac) / SAMSUNG_OLED_LEVEL_STEP; + } + lcm_writeb(samsung_oled_gamma_table[level][i].reg, v); + } + + gamma_table_bank_select(); + clk_disable(spi_clk); + last_val = val; +} + +static int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + /* Set the gamma write target to 4, leave the current gamma set at 2 */ + lcm_writeb(0x39, 0x24); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + int i; + + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); + udelay(50); + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); + udelay(20); + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); + msleep(20); + + clk_enable(spi_clk); + + for (i = 0; i < init_table_sz; i++) + lcm_writeb(init_tablep[i].reg, init_tablep[i].val); + + lcm_writew(0xef, 0xd0e8); + lcm_writeb(0x1d, 0xa0); + table_sel_idx = 0; + gamma_table_bank_select(); + samsung_oled_set_gamma_val(last_val); + msleep(250); + lcm_writeb(0x14, 0x03); + clk_disable(spi_clk); + + mutex_unlock(&panel_lock); + + pr_info("%s: -()\n", __func__); + return 0; +} + +static int samsung_oled_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + lcm_writeb(0x14, 0x0); + mdelay(1); + lcm_writeb(0x1d, 0xa1); + clk_disable(spi_clk); + msleep(200); + + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); + + mutex_unlock(&panel_lock); + pr_info("%s: -()\n", __func__); + return 0; +} + +struct lcm_cmd { + int reg; + uint32_t val; + unsigned delay; +}; + +#define LCM_GPIO_CFG(gpio, func, str) \ + PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, str) + +static uint32_t sony_tft_display_on_gpio_table[] = { + LCM_GPIO_CFG(MAHIMAHI_LCD_R1, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R2, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R3, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R4, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R5, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G0, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G1, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G2, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G3, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G4, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G5, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B1, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B2, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B3, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B4, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B5, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_PCLK, 1, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_VSYNC, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_HSYNC, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_DE, 1, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CLK, 1, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_DO, 1, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CSz, 1, GPIO_4MA), +}; + +static uint32_t sony_tft_display_off_gpio_table[] = { + LCM_GPIO_CFG(MAHIMAHI_LCD_R1, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R2, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R3, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R4, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_R5, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G0, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G1, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G2, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G3, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G4, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_G5, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B1, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B2, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B3, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B4, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_B5, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_PCLK, 0, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_VSYNC, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_HSYNC, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_DE, 0, GPIO_8MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CLK, 0, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_DO, 0, GPIO_4MA), + LCM_GPIO_CFG(MAHIMAHI_LCD_SPI_CSz, 0, GPIO_4MA), +}; + +#undef LCM_GPIO_CFG + +#define SONY_TFT_DEF_PANEL_DELTA \ + (SONY_TFT_DEF_PANEL_VAL - SONY_TFT_MIN_PANEL_VAL) +#define SONY_TFT_DEF_USER_DELTA \ + (SONY_TFT_DEF_USER_VAL - SONY_TFT_MIN_USER_VAL) + +static void sony_tft_set_pwm_val(int val) +{ + pr_info("%s: %d\n", __func__, val); + + last_val = val; + + if (!tft_panel_on) + return; + + if (val <= SONY_TFT_DEF_USER_VAL) { + if (val <= SONY_TFT_MIN_USER_VAL) + val = SONY_TFT_MIN_PANEL_VAL; + else + val = SONY_TFT_DEF_PANEL_DELTA * + (val - SONY_TFT_MIN_USER_VAL) / + SONY_TFT_DEF_USER_DELTA + + SONY_TFT_MIN_PANEL_VAL; + } else + val = (SONY_TFT_MAX_PANEL_VAL - SONY_TFT_DEF_PANEL_VAL) * + (val - SONY_TFT_DEF_USER_VAL) / + (SONY_TFT_MAX_USER_VAL - SONY_TFT_DEF_USER_VAL) + + SONY_TFT_DEF_PANEL_VAL; + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x51); + qspi_send_9bit(0x1, val); + qspi_send_9bit(0x0, 0x53); + qspi_send_9bit(0x1, 0x24); + clk_disable(spi_clk); +} + +#undef SONY_TFT_DEF_PANEL_DELTA +#undef SONY_TFT_DEF_USER_DELTA + +static void sony_tft_panel_config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + + +static int sony_tft_panel_power(int on) +{ + unsigned id, on_off; + + if (on) { + on_off = 0; + + vreg_enable(vreg_lcm_aux_2v6); + vreg_enable(vreg_lcm_rftx_2v6); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + mdelay(10); + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); + mdelay(10); + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); + udelay(500); + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 1); + mdelay(10); + sony_tft_panel_config_gpio_table( + sony_tft_display_on_gpio_table, + ARRAY_SIZE(sony_tft_display_on_gpio_table)); + } else { + on_off = 1; + + gpio_set_value(MAHIMAHI_GPIO_LCD_RST_N, 0); + + mdelay(120); + + vreg_disable(vreg_lcm_rftx_2v6); + vreg_disable(vreg_lcm_aux_2v6); + + id = PM_VREG_PDOWN_RFTX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + sony_tft_panel_config_gpio_table( + sony_tft_display_off_gpio_table, + ARRAY_SIZE(sony_tft_display_off_gpio_table)); + } + return 0; +} + +static int sony_tft_panel_init(struct msm_lcdc_panel_ops *ops) +{ + return 0; +} + +static int sony_tft_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + if (tft_panel_on) { + pr_info("%s: -() already unblanked\n", __func__); + goto done; + } + + sony_tft_panel_power(1); + msleep(45); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x11); + msleep(5); + qspi_send_9bit(0x0, 0x3a); + qspi_send_9bit(0x1, 0x05); + msleep(100); + qspi_send_9bit(0x0, 0x29); + /* unlock register page for pwm setting */ + qspi_send_9bit(0x0, 0xf0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xf1); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x0, 0xd0); + qspi_send_9bit(0x1, 0x5a); + qspi_send_9bit(0x1, 0x5a); + + qspi_send_9bit(0x0, 0xc2); + qspi_send_9bit(0x1, 0x53); + qspi_send_9bit(0x1, 0x12); + clk_disable(spi_clk); + msleep(100); + tft_panel_on = 1; + sony_tft_set_pwm_val(last_val); + + pr_info("%s: -()\n", __func__); +done: + mutex_unlock(&panel_lock); + return 0; +} + +static int sony_tft_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + pr_info("%s: +()\n", __func__); + + mutex_lock(&panel_lock); + + clk_enable(spi_clk); + qspi_send_9bit(0x0, 0x28); + qspi_send_9bit(0x0, 0x10); + clk_disable(spi_clk); + + msleep(40); + sony_tft_panel_power(0); + tft_panel_on = 0; + + mutex_unlock(&panel_lock); + + pr_info("%s: -()\n", __func__); + return 0; +} + +static struct msm_lcdc_panel_ops mahimahi_lcdc_amoled_panel_ops = { + .init = samsung_oled_panel_init, + .blank = samsung_oled_panel_blank, + .unblank = samsung_oled_panel_unblank, +}; + +static struct msm_lcdc_panel_ops mahimahi_lcdc_tft_panel_ops = { + .init = sony_tft_panel_init, + .blank = sony_tft_panel_blank, + .unblank = sony_tft_panel_unblank, +}; + + +static struct msm_lcdc_timing mahimahi_lcdc_amoled_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 4, + .hsync_back_porch = 8, + .hsync_front_porch = 8, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 8, + .vsync_front_porch = 8, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 1, +}; + +static struct msm_lcdc_timing mahimahi_lcdc_tft_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 2, + .hsync_back_porch = 20, + .hsync_front_porch = 20, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 6, + .vsync_front_porch = 4, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, +}; + +static struct msm_fb_data mahimahi_lcdc_fb_data = { + .xres = 480, + .yres = 800, + .width = 48, + .height = 80, + .output_format = MSM_MDP_OUT_IF_FMT_RGB565, +}; + +static struct msm_lcdc_platform_data mahimahi_lcdc_amoled_platform_data = { + .panel_ops = &mahimahi_lcdc_amoled_panel_ops, + .timing = &mahimahi_lcdc_amoled_timing, + .fb_id = 0, + .fb_data = &mahimahi_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct msm_lcdc_platform_data mahimahi_lcdc_tft_platform_data = { + .panel_ops = &mahimahi_lcdc_tft_panel_ops, + .timing = &mahimahi_lcdc_tft_timing, + .fb_id = 0, + .fb_data = &mahimahi_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct platform_device mahimahi_lcdc_amoled_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &mahimahi_lcdc_amoled_platform_data, + }, +}; + +static struct platform_device mahimahi_lcdc_tft_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &mahimahi_lcdc_tft_platform_data, + }, +}; + +static int mahimahi_init_spi_hack(void) +{ + int ret; + + spi_base = ioremap(MSM_SPI_PHYS, MSM_SPI_SIZE); + if (!spi_base) + return -1; + + spi_clk = clk_get(&msm_device_spi.dev, "spi_clk"); + if (IS_ERR(spi_clk)) { + pr_err("%s: unable to get spi_clk\n", __func__); + ret = PTR_ERR(spi_clk); + goto err_clk_get; + } + + clk_enable(spi_clk); + + printk("spi: SPI_CONFIG=%x\n", readl(spi_base + SPI_CONFIG)); + printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base + SPI_IO_CONTROL)); + printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base + SPI_OPERATIONAL)); + printk("spi: SPI_ERROR_FLAGS_EN=%x\n", + readl(spi_base + SPI_ERROR_FLAGS_EN)); + printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base + SPI_ERROR_FLAGS)); + printk("-%s()\n", __FUNCTION__); + clk_disable(spi_clk); + + return 0; + +err_clk_get: + iounmap(spi_base); + return ret; +} + +static void mahimahi_brightness_set(struct led_classdev *led_cdev, + enum led_brightness val) +{ + unsigned long flags; + led_cdev->brightness = val; + + spin_lock_irqsave(&brightness_lock, flags); + new_val = val; + spin_unlock_irqrestore(&brightness_lock, flags); + + schedule_work(&brightness_delayed_work); +} + +static void mahimahi_brightness_amoled_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + samsung_oled_set_gamma_val(val); + mutex_unlock(&panel_lock); +} + +static void mahimahi_brightness_tft_set_work(struct work_struct *work_ptr) +{ + unsigned long flags; + uint8_t val; + + spin_lock_irqsave(&brightness_lock, flags); + val = new_val; + spin_unlock_irqrestore(&brightness_lock, flags); + + mutex_lock(&panel_lock); + sony_tft_set_pwm_val(val); + mutex_unlock(&panel_lock); +} + +static struct led_classdev mahimahi_brightness_led = { + .name = "lcd-backlight", + .brightness = LED_FULL, + .brightness_set = mahimahi_brightness_set, +}; + +int __init mahimahi_init_panel(void) +{ + int ret; + + if (!machine_is_mahimahi()) + return 0; + + if (system_rev > 0xC0) { + /* CDMA version (except for EVT1) supports RGB666 */ + init_tablep = samsung_oled_rgb666_init_table; + init_table_sz = ARRAY_SIZE(samsung_oled_rgb666_init_table); + mahimahi_lcdc_fb_data.output_format = MSM_MDP_OUT_IF_FMT_RGB666; + } + + ret = platform_device_register(&msm_device_mdp); + if (ret != 0) + return ret; + + ret = mahimahi_init_spi_hack(); + if (ret != 0) + return ret; + + if (gpio_get_value(MAHIMAHI_GPIO_LCD_ID0)) { + pr_info("%s: tft panel\n", __func__); + vreg_lcm_rftx_2v6 = vreg_get(0, "rftx"); + if (IS_ERR(vreg_lcm_rftx_2v6)) + return PTR_ERR(vreg_lcm_rftx_2v6); + vreg_set_level(vreg_lcm_rftx_2v6, 2600); + + vreg_lcm_aux_2v6 = vreg_get(0, "gp4"); + if (IS_ERR(vreg_lcm_aux_2v6)) + return PTR_ERR(vreg_lcm_aux_2v6); + + if (gpio_get_value(MAHIMAHI_GPIO_LCD_RST_N)) + tft_panel_on = 1; + ret = platform_device_register(&mahimahi_lcdc_tft_device); + INIT_WORK(&brightness_delayed_work, mahimahi_brightness_tft_set_work); + } else { + pr_info("%s: amoled panel\n", __func__); + ret = platform_device_register(&mahimahi_lcdc_amoled_device); + INIT_WORK(&brightness_delayed_work, mahimahi_brightness_amoled_set_work); + } + + if (ret != 0) + return ret; + + ret = led_classdev_register(NULL, &mahimahi_brightness_led); + if (ret != 0) { + pr_err("%s: Cannot register brightness led\n", __func__); + return ret; + } + + return 0; +} + +device_initcall(mahimahi_init_panel); diff --git a/arch/arm/mach-msm/board-mahimahi-rfkill.c b/arch/arm/mach-msm/board-mahimahi-rfkill.c new file mode 100644 index 0000000000000..05c9bb0b4d556 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-rfkill.c @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "board-mahimahi.h" + +static struct rfkill *bt_rfk; +static const char bt_name[] = "bcm4329"; + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (!blocked) { + gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 1); + gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 1); + } else { + gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 0); + gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 0); + } + return 0; +} + +static struct rfkill_ops mahimahi_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int mahimahi_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + rc = gpio_request(MAHIMAHI_GPIO_BT_RESET_N, "bt_reset"); + if (rc) + goto err_gpio_reset; + rc = gpio_request(MAHIMAHI_GPIO_BT_SHUTDOWN_N, "bt_shutdown"); + if (rc) + goto err_gpio_shutdown; + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &mahimahi_rfkill_ops, NULL); + if (!bt_rfk) { + rc = -ENOMEM; + goto err_rfkill_alloc; + } + + rfkill_set_states(bt_rfk, default_state, false); + + /* userspace cannot take exclusive control */ + + rc = rfkill_register(bt_rfk); + if (rc) + goto err_rfkill_reg; + + return 0; + +err_rfkill_reg: + rfkill_destroy(bt_rfk); +err_rfkill_alloc: + gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); +err_gpio_shutdown: + gpio_free(MAHIMAHI_GPIO_BT_RESET_N); +err_gpio_reset: + return rc; +} + +static int mahimahi_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); + gpio_free(MAHIMAHI_GPIO_BT_RESET_N); + + return 0; +} + +static struct platform_driver mahimahi_rfkill_driver = { + .probe = mahimahi_rfkill_probe, + .remove = mahimahi_rfkill_remove, + .driver = { + .name = "mahimahi_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init mahimahi_rfkill_init(void) +{ + if (!machine_is_mahimahi()) + return 0; + + return platform_driver_register(&mahimahi_rfkill_driver); +} + +static void __exit mahimahi_rfkill_exit(void) +{ + platform_driver_unregister(&mahimahi_rfkill_driver); +} + +module_init(mahimahi_rfkill_init); +module_exit(mahimahi_rfkill_exit); +MODULE_DESCRIPTION("mahimahi rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-mahimahi-smb329.c b/arch/arm/mach-msm/board-mahimahi-smb329.c new file mode 100755 index 0000000000000..b80db78491e19 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-smb329.c @@ -0,0 +1,177 @@ +/* drivers/i2c/chips/smb329.c + * + * SMB329B Switch Charger (SUMMIT Microelectronics) + * + * Copyright (C) 2009 HTC Corporation + * Author: Justin Lin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-mahimahi-smb329.h" + +static struct smb329_data { + struct i2c_client *client; + uint8_t version; + struct work_struct work; + struct mutex state_lock; + int chg_state; +} smb329; + +static int smb329_i2c_write(uint8_t *value, uint8_t reg, uint8_t num_bytes) +{ + int ret; + struct i2c_msg msg; + + /* write the first byte of buffer as the register address */ + value[0] = reg; + msg.addr = smb329.client->addr; + msg.len = num_bytes + 1; + msg.flags = 0; + msg.buf = value; + + ret = i2c_transfer(smb329.client->adapter, &msg, 1); + + return (ret >= 0) ? 0 : ret; +} + +static int smb329_i2c_read(uint8_t *value, uint8_t reg, uint8_t num_bytes) +{ + int ret; + struct i2c_msg msg[2]; + + /* setup the address to read */ + msg[0].addr = smb329.client->addr; + msg[0].len = 1; + msg[0].flags = 0; + msg[0].buf = ® + + /* setup the read buffer */ + msg[1].addr = smb329.client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = num_bytes; + msg[1].buf = value; + + ret = i2c_transfer(smb329.client->adapter, msg, 2); + + return (ret >= 0) ? 0 : ret; +} + +static int smb329_i2c_write_byte(uint8_t value, uint8_t reg) +{ + int ret; + uint8_t buf[2] = { 0 }; + + buf[1] = value; + ret = smb329_i2c_write(buf, reg, 1); + if (ret) + pr_err("smb329: write byte error (%d)\n", ret); + + return ret; +} + +static int smb329_i2c_read_byte(uint8_t *value, uint8_t reg) +{ + int ret = smb329_i2c_read(value, reg, 1); + if (ret) + pr_err("smb329: read byte error (%d)\n", ret); + + return ret; +} + +int smb329_set_charger_ctrl(uint32_t ctl) +{ + mutex_lock(&smb329.state_lock); + smb329.chg_state = ctl; + schedule_work(&smb329.work); + mutex_unlock(&smb329.state_lock); + return 0; +} + +static void smb329_work_func(struct work_struct *work) +{ + mutex_lock(&smb329.state_lock); + + switch (smb329.chg_state) { + case SMB329_ENABLE_FAST_CHG: + pr_info("smb329: charger on (fast)\n"); + smb329_i2c_write_byte(0x84, 0x31); + smb329_i2c_write_byte(0x08, 0x05); + if ((smb329.version & 0x18) == 0x0) + smb329_i2c_write_byte(0xA9, 0x00); + break; + + case SMB329_DISABLE_CHG: + case SMB329_ENABLE_SLOW_CHG: + pr_info("smb329: charger off/slow\n"); + smb329_i2c_write_byte(0x88, 0x31); + smb329_i2c_write_byte(0x08, 0x05); + break; + default: + pr_err("smb329: unknown charger state %d\n", + smb329.chg_state); + } + + mutex_unlock(&smb329.state_lock); +} + +static int smb329_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { + dev_dbg(&client->dev, "[SMB329]:I2C fail\n"); + return -EIO; + } + + smb329.client = client; + mutex_init(&smb329.state_lock); + INIT_WORK(&smb329.work, smb329_work_func); + + smb329_i2c_read_byte(&smb329.version, 0x3B); + pr_info("smb329 version: 0x%02x\n", smb329.version); + + return 0; +} + +static const struct i2c_device_id smb329_id[] = { + { "smb329", 0 }, + { }, +}; + +static struct i2c_driver smb329_driver = { + .driver.name = "smb329", + .id_table = smb329_id, + .probe = smb329_probe, +}; + +static int __init smb329_init(void) +{ + int ret = i2c_add_driver(&smb329_driver); + if (ret) + pr_err("smb329_init: failed\n"); + + return ret; +} + +module_init(smb329_init); + +MODULE_AUTHOR("Justin Lin "); +MODULE_DESCRIPTION("SUMMIT Microelectronics SMB329B switch charger"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-mahimahi-smb329.h b/arch/arm/mach-msm/board-mahimahi-smb329.h new file mode 100644 index 0000000000000..13b326fa71dfa --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-smb329.h @@ -0,0 +1,32 @@ +/* include/linux/smb329.h - smb329 switch charger driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SMB329_H +#define _LINUX_SMB329_H + +#ifdef __KERNEL__ + +enum { + SMB329_DISABLE_CHG, + SMB329_ENABLE_SLOW_CHG, + SMB329_ENABLE_FAST_CHG, +}; + +extern int smb329_set_charger_ctrl(uint32_t ctl); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SMB329_H */ + diff --git a/arch/arm/mach-msm/board-mahimahi-tpa2018d1.c b/arch/arm/mach-msm/board-mahimahi-tpa2018d1.c new file mode 100644 index 0000000000000..7f02762ef50ee --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-tpa2018d1.c @@ -0,0 +1,368 @@ +/* drivers/i2c/chips/tpa2018d1.c + * + * TI TPA2018D1 Speaker Amplifier + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* TODO: content validation in TPA2018_SET_CONFIG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-mahimahi-tpa2018d1.h" + +static struct i2c_client *this_client; +static struct tpa2018d1_platform_data *pdata; +static int is_on; +static char spk_amp_cfg[8]; +static const char spk_amp_on[8] = { /* same length as spk_amp_cfg */ + 0x01, 0xc3, 0x20, 0x01, 0x00, 0x08, 0x1a, 0x21 +}; +static const char spk_amp_off[] = {0x01, 0xa2}; + +static DEFINE_MUTEX(spk_amp_lock); +static int tpa2018d1_opened; +static char *config_data; +static int tpa2018d1_num_modes; + +#define DEBUG 0 + +static int tpa2018_i2c_write(const char *txData, int length) +{ + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + pr_err("%s: I2C transfer error\n", __func__); + return -EIO; + } else + return 0; +} + +static int tpa2018_i2c_read(char *rxData, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + if (i2c_transfer(this_client->adapter, msgs, 1) < 0) { + pr_err("%s: I2C transfer error\n", __func__); + return -EIO; + } + +#if DEBUG + do { + int i = 0; + for (i = 0; i < length; i++) + pr_info("%s: rx[%d] = %2x\n", + __func__, i, rxData[i]); + } while(0); +#endif + + return 0; +} + +static int tpa2018d1_open(struct inode *inode, struct file *file) +{ + int rc = 0; + + mutex_lock(&spk_amp_lock); + + if (tpa2018d1_opened) { + pr_err("%s: busy\n", __func__); + rc = -EBUSY; + goto done; + } + + tpa2018d1_opened = 1; +done: + mutex_unlock(&spk_amp_lock); + return rc; +} + +static int tpa2018d1_release(struct inode *inode, struct file *file) +{ + mutex_lock(&spk_amp_lock); + tpa2018d1_opened = 0; + mutex_unlock(&spk_amp_lock); + + return 0; +} + +static int tpa2018d1_read_config(void __user *argp) +{ + int rc = 0; + unsigned char reg_idx = 0x01; + unsigned char tmp[7]; + + if (!is_on) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + msleep(5); /* According to TPA2018D1 Spec */ + } + + rc = tpa2018_i2c_write(®_idx, sizeof(reg_idx)); + if (rc < 0) + goto err; + + rc = tpa2018_i2c_read(tmp, sizeof(tmp)); + if (rc < 0) + goto err; + + if (copy_to_user(argp, &tmp, sizeof(tmp))) + rc = -EFAULT; + +err: + if (!is_on) + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + return rc; +} + +static long tpa2018d1_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int rc = 0; + int mode = -1; + int offset = 0; + struct tpa2018d1_config_data cfg; + + mutex_lock(&spk_amp_lock); + + switch (cmd) { + case TPA2018_SET_CONFIG: + if (copy_from_user(spk_amp_cfg, argp, sizeof(spk_amp_cfg))) + rc = -EFAULT; + break; + + case TPA2018_READ_CONFIG: + rc = tpa2018d1_read_config(argp); + break; + + case TPA2018_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) { + rc = -EFAULT; + break; + } + if (mode >= tpa2018d1_num_modes || mode < 0) { + pr_err("%s: unsupported tpa2018d1 mode %d\n", + __func__, mode); + rc = -EINVAL; + break; + } + if (!config_data) { + pr_err("%s: no config data!\n", __func__); + rc = -EIO; + break; + } + memcpy(spk_amp_cfg, config_data + mode * TPA2018D1_CMD_LEN, + TPA2018D1_CMD_LEN); + break; + + case TPA2018_SET_PARAM: + if (copy_from_user(&cfg, argp, sizeof(cfg))) { + pr_err("%s: copy from user failed.\n", __func__); + rc = -EFAULT; + break; + } + tpa2018d1_num_modes = cfg.mode_num; + if (tpa2018d1_num_modes > TPA2018_NUM_MODES) { + pr_err("%s: invalid number of modes %d\n", __func__, + tpa2018d1_num_modes); + rc = -EINVAL; + break; + } + if (cfg.data_len != tpa2018d1_num_modes*TPA2018D1_CMD_LEN) { + pr_err("%s: invalid data length %d, expecting %d\n", + __func__, cfg.data_len, + tpa2018d1_num_modes * TPA2018D1_CMD_LEN); + rc = -EINVAL; + break; + } + /* Free the old data */ + if (config_data) + kfree(config_data); + config_data = kmalloc(cfg.data_len, GFP_KERNEL); + if (!config_data) { + pr_err("%s: out of memory\n", __func__); + rc = -ENOMEM; + break; + } + if (copy_from_user(config_data, cfg.cmd_data, cfg.data_len)) { + pr_err("%s: copy data from user failed.\n", __func__); + kfree(config_data); + config_data = NULL; + rc = -EFAULT; + break; + } + /* replace default setting with playback setting */ + if (tpa2018d1_num_modes >= TPA2018_MODE_PLAYBACK) { + offset = TPA2018_MODE_PLAYBACK * TPA2018D1_CMD_LEN; + memcpy(spk_amp_cfg, config_data + offset, + TPA2018D1_CMD_LEN); + } + break; + + default: + pr_err("%s: invalid command %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + break; + } + mutex_unlock(&spk_amp_lock); + return rc; +} + +static struct file_operations tpa2018d1_fops = { + .owner = THIS_MODULE, + .open = tpa2018d1_open, + .release = tpa2018d1_release, + .unlocked_ioctl = tpa2018d1_ioctl, +}; + +static struct miscdevice tpa2018d1_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tpa2018d1", + .fops = &tpa2018d1_fops, +}; + +void tpa2018d1_set_speaker_amp(int on) +{ + if (!pdata) { + pr_err("%s: no platform data!\n", __func__); + return; + } + mutex_lock(&spk_amp_lock); + if (on && !is_on) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + msleep(5); /* According to TPA2018D1 Spec */ + + if (tpa2018_i2c_write(spk_amp_cfg, sizeof(spk_amp_cfg)) == 0) { + is_on = 1; + pr_info("%s: ON\n", __func__); + } + } else if (!on && is_on) { + if (tpa2018_i2c_write(spk_amp_off, sizeof(spk_amp_off)) == 0) { + is_on = 0; + msleep(2); + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + pr_info("%s: OFF\n", __func__); + } + } + mutex_unlock(&spk_amp_lock); +} + +static int tpa2018d1_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + + pdata = client->dev.platform_data; + + if (!pdata) { + ret = -EINVAL; + pr_err("%s: platform data is NULL\n", __func__); + goto err_no_pdata; + } + + this_client = client; + + ret = gpio_request(pdata->gpio_tpa2018_spk_en, "tpa2018"); + if (ret < 0) { + pr_err("%s: gpio request aud_spk_en pin failed\n", __func__); + goto err_free_gpio; + } + + ret = gpio_direction_output(pdata->gpio_tpa2018_spk_en, 1); + if (ret < 0) { + pr_err("%s: request aud_spk_en gpio direction failed\n", + __func__); + goto err_free_gpio; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("%s: i2c check functionality error\n", __func__); + ret = -ENODEV; + goto err_free_gpio; + } + + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); /* Default Low */ + + ret = misc_register(&tpa2018d1_device); + if (ret) { + pr_err("%s: tpa2018d1_device register failed\n", __func__); + goto err_free_gpio; + } + memcpy(spk_amp_cfg, spk_amp_on, sizeof(spk_amp_on)); + return 0; + +err_free_gpio: + gpio_free(pdata->gpio_tpa2018_spk_en); +err_no_pdata: + return ret; +} + +static int tpa2018d1_suspend(struct i2c_client *client, pm_message_t mesg) +{ + return 0; +} + +static int tpa2018d1_resume(struct i2c_client *client) +{ + return 0; +} + +static const struct i2c_device_id tpa2018d1_id[] = { + { TPA2018D1_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver tpa2018d1_driver = { + .probe = tpa2018d1_probe, + .suspend = tpa2018d1_suspend, + .resume = tpa2018d1_resume, + .id_table = tpa2018d1_id, + .driver = { + .name = TPA2018D1_I2C_NAME, + }, +}; + +static int __init tpa2018d1_init(void) +{ + pr_info("%s\n", __func__); + return i2c_add_driver(&tpa2018d1_driver); +} + +module_init(tpa2018d1_init); + +MODULE_DESCRIPTION("tpa2018d1 speaker amp driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-mahimahi-tpa2018d1.h b/arch/arm/mach-msm/board-mahimahi-tpa2018d1.h new file mode 100644 index 0000000000000..dc11012209454 --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-tpa2018d1.h @@ -0,0 +1,35 @@ +/* include/linux/tpa2018d1.h - tpa2018d1 speaker amplifier driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef __ASM_ARM_ARCH_TPA2018D1_H +#define __ASM_ARM_ARCH_TPA2018D1_H + +#define TPA2018D1_I2C_NAME "tpa2018d1" +#define TPA2018D1_CMD_LEN 8 + +struct tpa2018d1_platform_data { + uint32_t gpio_tpa2018_spk_en; +}; + +struct tpa2018d1_config_data { + unsigned char *cmd_data; /* [mode][cmd_len][cmds..] */ + unsigned int mode_num; + unsigned int data_len; +}; + +extern void tpa2018d1_set_speaker_amp(int on); + +#endif /* __ASM_ARM_ARCH_TPA2018D1_H */ diff --git a/arch/arm/mach-msm/board-mahimahi-wifi.c b/arch/arm/mach-msm/board-mahimahi-wifi.c new file mode 100644 index 0000000000000..04c0bbeece98b --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi-wifi.c @@ -0,0 +1,152 @@ +/* linux/arch/arm/mach-msm/board-mahimahi-wifi.c +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-mahimahi.h" + +int mahimahi_wifi_power(int on); +int mahimahi_wifi_reset(int on); +int mahimahi_wifi_set_carddetect(int on); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + +#define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4 +#define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024) + +#define WLAN_SKB_BUF_NUM 16 + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = { + { NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) } +}; + +static void *mahimahi_wifi_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS) + return wlan_static_skb; + if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS)) + return NULL; + if (wifi_mem_array[section].size < size) + return NULL; + return wifi_mem_array[section].mem_ptr; +} +#endif + +int __init mahimahi_init_wifi_mem(void) +{ +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + int i; + + for(i=0;( i < WLAN_SKB_BUF_NUM );i++) { + if (i < (WLAN_SKB_BUF_NUM/2)) + wlan_static_skb[i] = dev_alloc_skb(4096); + else + wlan_static_skb[i] = dev_alloc_skb(8192); + } + for(i=0;( i < PREALLOC_WLAN_NUMBER_OF_SECTIONS );i++) { + wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, + GFP_KERNEL); + if (wifi_mem_array[i].mem_ptr == NULL) + return -ENOMEM; + } +#endif + return 0; +} + +static struct resource mahimahi_wifi_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = MSM_GPIO_TO_INT(MAHIMAHI_GPIO_WIFI_IRQ), + .end = MSM_GPIO_TO_INT(MAHIMAHI_GPIO_WIFI_IRQ), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct wifi_platform_data mahimahi_wifi_control = { + .set_power = mahimahi_wifi_power, + .set_reset = mahimahi_wifi_reset, + .set_carddetect = mahimahi_wifi_set_carddetect, +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + .mem_prealloc = mahimahi_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +static struct platform_device mahimahi_wifi_device = { + .name = "bcmdhd_wlan", + .id = 1, + .num_resources = ARRAY_SIZE(mahimahi_wifi_resources), + .resource = mahimahi_wifi_resources, + .dev = { + .platform_data = &mahimahi_wifi_control, + }, +}; + +extern unsigned char *get_wifi_nvs_ram(void); +extern int wifi_calibration_size_set(void); + +static unsigned mahimahi_wifi_update_nvs(char *str, int add_flag) +{ +#define NVS_LEN_OFFSET 0x0C +#define NVS_DATA_OFFSET 0x40 + unsigned char *ptr; + unsigned len; + + if (!str) + return -EINVAL; + ptr = get_wifi_nvs_ram(); + /* Size in format LE assumed */ + memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); + /* if the last byte in NVRAM is 0, trim it */ + if (ptr[NVS_DATA_OFFSET + len - 1] == 0) + len -= 1; + if (add_flag) { + strcpy(ptr + NVS_DATA_OFFSET + len, str); + len += strlen(str); + } else { + if (strnstr(ptr + NVS_DATA_OFFSET, str, len)) + len -= strlen(str); + } + memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); + wifi_calibration_size_set(); + return 0; +} + +static int __init mahimahi_wifi_init(void) +{ + if (!machine_is_mahimahi()) + return 0; + + printk("%s: start\n", __func__); + mahimahi_wifi_update_nvs("sd_oobonly=1\r\n", 0); + mahimahi_wifi_update_nvs("btc_params70=0x32\r\n", 1); + mahimahi_init_wifi_mem(); + return platform_device_register(&mahimahi_wifi_device); +} + +late_initcall(mahimahi_wifi_init); diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c index ef3ebf2f763be..4f0cbbffe0e17 100644 --- a/arch/arm/mach-msm/board-mahimahi.c +++ b/arch/arm/mach-msm/board-mahimahi.c @@ -17,11 +17,24 @@ #include #include +#include #include #include #include #include #include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include <../../../drivers/staging/android/timed_gpio.h> +#include <../../../drivers/w1/w1.h> #include #include @@ -30,27 +43,1174 @@ #include #include +#include +#include +#include #include - +#include +#include +#include +#include +#ifdef CONFIG_PERFLOCK +#include +#endif +#include #include "board-mahimahi.h" #include "devices.h" #include "proc_comm.h" +#include "board-mahimahi-tpa2018d1.h" +#include "board-mahimahi-smb329.h" + +#include +#include +#include "footswitch.h" +#include static uint debug_uart; module_param_named(debug_uart, debug_uart, uint, 0); +extern void notify_usb_connected(int); +extern void msm_init_pmic_vibrator(void); +extern void __init mahimahi_audio_init(void); + +extern int microp_headset_has_mic(void); + +static int mahimahi_phy_init_seq[] = { + 0x0C, 0x31, + 0x31, 0x32, + 0x1D, 0x0D, + 0x1D, 0x10, + -1 }; + +static void mahimahi_usb_phy_reset(void) +{ + u32 id; + int ret; + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_ASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } + + msleep(1); + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_DEASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } +} + +static void mahimahi_usb_hw_reset(bool enable) +{ + u32 id; + int ret; + u32 func; + + id = PCOM_CLKRGM_APPS_RESET_USBH; + if (enable) + func = PCOM_CLK_REGIME_SEC_RESET_ASSERT; + else + func = PCOM_CLK_REGIME_SEC_RESET_DEASSERT; + ret = msm_proc_comm(func, &id, NULL); + if (ret) + pr_err("%s: Cannot set reset to %d (%d)\n", __func__, enable, + ret); +} + + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = mahimahi_phy_init_seq, + .phy_reset = mahimahi_usb_phy_reset, + .hw_reset = mahimahi_usb_hw_reset, + .usb_connected = notify_usb_connected, +}; + +static char *usb_functions_ums[] = { + "usb_mass_storage", +}; + +static char *usb_functions_ums_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_rndis[] = { + "rndis", +}; + +static char *usb_functions_rndis_adb[] = { + "rndis", + "adb", +}; + +#ifdef CONFIG_USB_ANDROID_ACCESSORY +static char *usb_functions_accessory[] = { "accessory" }; +static char *usb_functions_accessory_adb[] = { "accessory", "adb" }; +#endif + +#ifdef CONFIG_USB_ANDROID_DIAG +static char *usb_functions_adb_diag[] = { + "usb_mass_storage", + "adb", + "diag", +}; +#endif + +static char *usb_functions_all[] = { +#ifdef CONFIG_USB_ANDROID_RNDIS + "rndis", +#endif +#ifdef CONFIG_USB_ANDROID_ACCESSORY + "accessory", +#endif + "usb_mass_storage", + "adb", +#ifdef CONFIG_USB_ANDROID_ACM + "acm", +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + "diag", +#endif +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x4e11, + .num_functions = ARRAY_SIZE(usb_functions_ums), + .functions = usb_functions_ums, + }, + { + .product_id = 0x4e12, + .num_functions = ARRAY_SIZE(usb_functions_ums_adb), + .functions = usb_functions_ums_adb, + }, + { + .product_id = 0x4e13, + .num_functions = ARRAY_SIZE(usb_functions_rndis), + .functions = usb_functions_rndis, + }, + { + .product_id = 0x4e14, + .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), + .functions = usb_functions_rndis_adb, + }, +#ifdef CONFIG_USB_ANDROID_ACCESSORY + { + .vendor_id = USB_ACCESSORY_VENDOR_ID, + .product_id = USB_ACCESSORY_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory), + .functions = usb_functions_accessory, + }, + { + .vendor_id = USB_ACCESSORY_VENDOR_ID, + .product_id = USB_ACCESSORY_ADB_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory_adb), + .functions = usb_functions_accessory_adb, + }, +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + { + .product_id = 0x4e17, + .num_functions = ARRAY_SIZE(usb_functions_adb_diag), + .functions = usb_functions_adb_diag, + }, +#endif +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "Google, Inc.", + .product = "Nexus One", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data rndis_pdata = { + /* ethaddr is filled by board_serialno_setup */ + .vendorID = 0x18d1, + .vendorDescr = "Google, Inc.", +}; + +static struct platform_device rndis_device = { + .name = "rndis", + .id = -1, + .dev = { + .platform_data = &rndis_pdata, + }, +}; +#endif + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x18d1, + .product_id = 0x4e11, + .version = 0x0100, + .product_name = "Nexus One", + .manufacturer_name = "Google, Inc.", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +static struct platform_device mahimahi_rfkill = { + .name = "mahimahi_rfkill", + .id = -1, +}; + +/* start kgsl */ +static struct resource kgsl_3d0_resources[] = { + { + .name = KGSL_3D0_REG_MEMORY, + .start = 0xA0000000, + .end = 0xA001ffff, + .flags = IORESOURCE_MEM, + }, + { + .name = KGSL_3D0_IRQ, + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct kgsl_device_platform_data kgsl_3d0_pdata = { + .pwrlevel = { + { + .gpu_freq = 0, + .bus_freq = 128000000, + }, + }, + .init_level = 0, + .num_levels = 1, + .set_grp_async = NULL, + .idle_timeout = HZ/5, + .clk_map = KGSL_CLK_GRP | KGSL_CLK_IMEM, +}; + +struct platform_device msm_kgsl_3d0 = { + .name = "kgsl-3d0", + .id = 0, + .num_resources = ARRAY_SIZE(kgsl_3d0_resources), + .resource = kgsl_3d0_resources, + .dev = { + .platform_data = &kgsl_3d0_pdata, + }, +}; +/* end kgsl */ + +/* start footswitch regulator */ +struct platform_device *msm_footswitch_devices[] = { + FS_PCOM(FS_GFX3D, "fs_gfx3d"), +}; + +unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); +/* end footswitch regulator */ + +/* pmem heaps */ +#ifndef CONFIG_ION_MSM +static struct android_pmem_platform_data mdp_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, + .cached = 1, +}; + +static struct platform_device android_pmem_mdp_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &mdp_pmem_pdata + }, +}; +#endif + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { + .platform_data = &android_pmem_adsp_pdata, + }, +}; + +static struct android_pmem_platform_data android_pmem_venc_pdata = { + .name = "pmem_venc", + .start = MSM_PMEM_VENC_BASE, + .size = MSM_PMEM_VENC_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_venc_device = { + .name = "android_pmem", + .id = 3, + .dev = { + .platform_data = &android_pmem_venc_pdata, + }, +}; +/* end pmem heaps */ + +/* ion heaps */ +#ifdef CONFIG_ION_MSM +static struct ion_co_heap_pdata co_ion_pdata = { + .adjacent_mem_id = INVALID_HEAP_ID, + .align = PAGE_SIZE, +}; + +static struct ion_platform_data ion_pdata = { + .nr = 2, + .heaps = { + { + .id = ION_SYSTEM_HEAP_ID, + .type = ION_HEAP_TYPE_SYSTEM, + .name = ION_VMALLOC_HEAP_NAME, + }, + /* PMEM_MDP = SF */ + { + .id = ION_SF_HEAP_ID, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = ION_SF_HEAP_NAME, + .base = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .memory_type = ION_EBI_TYPE, + .extra_data = (void *)&co_ion_pdata, + }, + } +}; + +static struct platform_device ion_dev = { + .name = "ion-msm", + .id = 1, + .dev = { .platform_data = &ion_pdata }, +}; +#endif +/* end ion heaps */ + +static struct resource ram_console_resources[] = { + { + .start = MSM_RAM_CONSOLE_BASE, + .end = MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ram_console_device = { + .name = "ram_console", + .id = -1, + .num_resources = ARRAY_SIZE(ram_console_resources), + .resource = ram_console_resources, +}; + +static int mahimahi_ts_power(int on) +{ + pr_info("%s: power %d\n", __func__, on); + + if (on) { + /* level shifter should be off */ + gpio_set_value(MAHIMAHI_GPIO_TP_EN, 1); + msleep(120); + /* enable touch panel level shift */ + gpio_set_value(MAHIMAHI_GPIO_TP_LS_EN, 1); + msleep(3); + } else { + gpio_set_value(MAHIMAHI_GPIO_TP_LS_EN, 0); + gpio_set_value(MAHIMAHI_GPIO_TP_EN, 0); + udelay(50); + } + + return 0; +} + +static struct synaptics_i2c_rmi_platform_data mahimahi_synaptics_ts_data[] = { + { + .version = 0x105, + .power = mahimahi_ts_power, + .flags = SYNAPTICS_FLIP_Y, + .inactive_left = -15 * 0x10000 / 480, + .inactive_right = -15 * 0x10000 / 480, + .inactive_top = -15 * 0x10000 / 800, + .inactive_bottom = -50 * 0x10000 / 800, + .sensitivity_adjust = 9, + }, + { + .flags = SYNAPTICS_FLIP_Y, + .inactive_left = -15 * 0x10000 / 480, + .inactive_right = -15 * 0x10000 / 480, + .inactive_top = -15 * 0x10000 / 800, + .inactive_bottom = -40 * 0x10000 / 800, + .sensitivity_adjust = 12, + }, +}; + +static struct a1026_platform_data a1026_data = { + .gpio_a1026_micsel = MAHIMAHI_AUD_MICPATH_SEL, + .gpio_a1026_wakeup = MAHIMAHI_AUD_A1026_WAKEUP, + .gpio_a1026_reset = MAHIMAHI_AUD_A1026_RESET, + .gpio_a1026_clk = MAHIMAHI_AUD_A1026_CLK, + /*.gpio_a1026_int = MAHIMAHI_AUD_A1026_INT,*/ +}; + +static struct akm8973_platform_data compass_platform_data = { + .layouts = MAHIMAHI_LAYOUTS, + .project_name = MAHIMAHI_PROJECT_NAME, + .reset = MAHIMAHI_GPIO_COMPASS_RST_N, + .intr = MAHIMAHI_GPIO_COMPASS_INT_N, +}; + +static struct regulator_consumer_supply tps65023_dcdc1_supplies[] = { + { + .supply = "acpu_vcore", + }, +}; + +static struct regulator_init_data tps65023_data[5] = { + { + .constraints = { + .name = "dcdc1", /* VREG_MSMC2_1V29 */ + .min_uV = 975000, + .max_uV = 1275000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, + }, + .consumer_supplies = tps65023_dcdc1_supplies, + .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_supplies), + }, + /* dummy values for unused regulators to not crash driver: */ + { + .constraints = { + .name = "dcdc2", /* VREG_MSMC1_1V26 */ + .min_uV = 1260000, + .max_uV = 1260000, + }, + }, + { + .constraints = { + .name = "dcdc3", /* unused */ + .min_uV = 800000, + .max_uV = 3300000, + }, + }, + { + .constraints = { + .name = "ldo1", /* unused */ + .min_uV = 1000000, + .max_uV = 3150000, + }, + }, + { + .constraints = { + .name = "ldo2", /* V_USBPHY_3V3 */ + .min_uV = 3300000, + .max_uV = 3300000, + }, + }, +}; + + +static void ds2482_set_slp_n(unsigned n) +{ + gpio_direction_output(MAHIMAHI_GPIO_DS2482_SLP_N, n); +} + +static struct tpa2018d1_platform_data tpa2018_data = { + .gpio_tpa2018_spk_en = MAHIMAHI_CDMA_GPIO_AUD_SPK_AMP_EN, +}; + +static struct i2c_board_info base_i2c_devices[] = { + { + I2C_BOARD_INFO("ds2482", 0x30 >> 1), + .platform_data = ds2482_set_slp_n, + }, + { + I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x40), + .platform_data = mahimahi_synaptics_ts_data, + .irq = MSM_GPIO_TO_INT(MAHIMAHI_GPIO_TP_INT_N) + }, + { + I2C_BOARD_INFO("mahimahi-microp", 0x66), + .irq = MSM_GPIO_TO_INT(MAHIMAHI_GPIO_UP_INT_N) + }, + { + I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1), + }, + { + I2C_BOARD_INFO("tps65023", 0x48), + .platform_data = tps65023_data, + }, +}; + +static struct i2c_board_info rev0_i2c_devices[] = { + { + I2C_BOARD_INFO(AKM8973_I2C_NAME, 0x1C), + .platform_data = &compass_platform_data, + .irq = MSM_GPIO_TO_INT(MAHIMAHI_REV0_GPIO_COMPASS_INT_N), + }, +}; + +static struct i2c_board_info rev1_i2c_devices[] = { + { + I2C_BOARD_INFO("audience_a1026", 0x3E), + .platform_data = &a1026_data, + /*.irq = MSM_GPIO_TO_INT(MAHIMAHI_AUD_A1026_INT)*/ + }, + { + I2C_BOARD_INFO(AKM8973_I2C_NAME, 0x1C), + .platform_data = &compass_platform_data, + .irq = MSM_GPIO_TO_INT(MAHIMAHI_GPIO_COMPASS_INT_N), + }, +}; + +static struct i2c_board_info rev_CX_i2c_devices[] = { + { + I2C_BOARD_INFO("tpa2018d1", 0x58), + .platform_data = &tpa2018_data, + }, + { + I2C_BOARD_INFO("smb329", 0x6E >> 1), + }, +}; + +static uint32_t camera_off_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* MCLK */ +}; + +void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + +static struct resource msm_camera_resources[] = { + { + .start = MSM_VFE_PHYS, + .end = MSM_VFE_PHYS + MSM_VFE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_VFE, + INT_VFE, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +static struct camera_flash_cfg msm_camera_sensor_flash_cfg = { + .camera_flash = flashlight_control, + .num_flash_levels = FLASHLIGHT_NUM, + .low_temp_limit = 5, + .low_cap_limit = 15, + +}; + +static struct msm_camera_sensor_info msm_camera_sensor_s5k3e2fx_data = { + .sensor_name = "s5k3e2fx", + .sensor_reset = 144, /* CAM1_RST */ + .sensor_pwd = 143, /* CAM1_PWDN, enabled in a9 */ + /*.vcm_pwd = 31, */ /* CAM1_VCM_EN, enabled in a9 */ + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .flash_cfg = &msm_camera_sensor_flash_cfg, +}; + +static struct platform_device msm_camera_sensor_s5k3e2fx = { + .name = "msm_camera_s5k3e2fx", + .dev = { + .platform_data = &msm_camera_sensor_s5k3e2fx_data, + }, +}; + +static int capella_cm3602_power(int on) +{ + /* TODO eolsen Add Voltage reg control */ + if (on) { + gpio_direction_output(MAHIMAHI_GPIO_PROXIMITY_EN, 0); + } else { + gpio_direction_output(MAHIMAHI_GPIO_PROXIMITY_EN, 1); + } + + return 0; +} + + +static struct capella_cm3602_platform_data capella_cm3602_pdata = { + .power = capella_cm3602_power, + .p_out = MAHIMAHI_GPIO_PROXIMITY_INT_N +}; + +static struct platform_device capella_cm3602 = { + .name = CAPELLA_CM3602, + .id = -1, + .dev = { + .platform_data = &capella_cm3602_pdata + } +}; + +static uint32_t flashlight_gpio_table[] = { + PCOM_GPIO_CFG(MAHIMAHI_GPIO_FLASHLIGHT_TORCH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_FLASHLIGHT_FLASH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static uint32_t flashlight_gpio_table_rev_CX[] = { + PCOM_GPIO_CFG(MAHIMAHI_CDMA_GPIO_FLASHLIGHT_TORCH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_FLASHLIGHT_FLASH, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + + +static int config_mahimahi_flashlight_gpios(void) +{ + if (is_cdma_version(system_rev)) { + config_gpio_table(flashlight_gpio_table_rev_CX, + ARRAY_SIZE(flashlight_gpio_table_rev_CX)); + } else { + config_gpio_table(flashlight_gpio_table, + ARRAY_SIZE(flashlight_gpio_table)); + } + return 0; +} + +static struct flashlight_platform_data mahimahi_flashlight_data = { + .gpio_init = config_mahimahi_flashlight_gpios, + .torch = MAHIMAHI_GPIO_FLASHLIGHT_TORCH, + .flash = MAHIMAHI_GPIO_FLASHLIGHT_FLASH, + .flash_duration_ms = 600 +}; + +static struct platform_device mahimahi_flashlight_device = { + .name = "flashlight", + .dev = { + .platform_data = &mahimahi_flashlight_data, + }, +}; + +static struct timed_gpio timed_gpios[] = { + { + .name = "vibrator", + .gpio = MAHIMAHI_GPIO_VIBRATOR_ON, + .max_timeout = 15000, + }, +}; + +static struct timed_gpio_platform_data timed_gpio_data = { + .num_gpios = ARRAY_SIZE(timed_gpios), + .gpios = timed_gpios, +}; + +static struct platform_device mahimahi_timed_gpios = { + .name = "timed-gpio", + .id = -1, + .dev = { + .platform_data = &timed_gpio_data, + }, +}; + +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = -1, + .inject_rx_on_wakeup = 0, + .exit_lpm_cb = bcm_bt_lpm_exit_lpm_locked, +}; + +static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = { + .gpio_wake = MAHIMAHI_GPIO_BT_WAKE, + .gpio_host_wake = MAHIMAHI_GPIO_BT_HOST_WAKE, + .request_clock_off_locked = msm_hs_request_clock_off_locked, + .request_clock_on_locked = msm_hs_request_clock_on_locked, +}; + +struct platform_device bcm_bt_lpm_device = { + .name = "bcm_bt_lpm", + .id = 0, + .dev = { + .platform_data = &bcm_bt_lpm_pdata, + }, +}; + +static int ds2784_charge(int on, int fast) +{ + if (is_cdma_version(system_rev)) { + if (!on) + smb329_set_charger_ctrl(SMB329_DISABLE_CHG); + else + smb329_set_charger_ctrl(fast ? SMB329_ENABLE_FAST_CHG : SMB329_ENABLE_SLOW_CHG); + } + else + gpio_direction_output(MAHIMAHI_GPIO_BATTERY_CHARGER_CURRENT, !!fast); + gpio_direction_output(MAHIMAHI_GPIO_BATTERY_CHARGER_EN, !on); + return 0; +} + +static int w1_ds2784_add_slave(struct w1_slave *sl) +{ + struct dd { + struct platform_device pdev; + struct ds2784_platform_data pdata; + } *p; + + int rc; + + p = kzalloc(sizeof(struct dd), GFP_KERNEL); + if (!p) { + pr_err("%s: out of memory\n", __func__); + return -ENOMEM; + } + + rc = gpio_request(MAHIMAHI_GPIO_BATTERY_CHARGER_EN, "charger_en"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + MAHIMAHI_GPIO_BATTERY_CHARGER_EN, rc); + kfree(p); + return rc; + } + + if (!is_cdma_version(system_rev)) { + rc = gpio_request(MAHIMAHI_GPIO_BATTERY_CHARGER_CURRENT, "charger_current"); + if (rc < 0) { + pr_err("%s: gpio_request(%d) failed: %d\n", __func__, + MAHIMAHI_GPIO_BATTERY_CHARGER_CURRENT, rc); + gpio_free(MAHIMAHI_GPIO_BATTERY_CHARGER_EN); + kfree(p); + return rc; + } + } + + p->pdev.name = "ds2784-battery"; + p->pdev.id = -1; + p->pdev.dev.platform_data = &p->pdata; + p->pdata.charge = ds2784_charge; + p->pdata.w1_slave = sl; + + platform_device_register(&p->pdev); + + return 0; +} + +static struct w1_family_ops w1_ds2784_fops = { + .add_slave = w1_ds2784_add_slave, +}; + +static struct w1_family w1_ds2784_family = { + .fid = W1_FAMILY_DS2784, + .fops = &w1_ds2784_fops, +}; + +static int __init ds2784_battery_init(void) +{ + return w1_register_family(&w1_ds2784_family); +} + static struct platform_device *devices[] __initdata = { #if !defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart1, #endif + &bcm_bt_lpm_device, &msm_device_uart_dm1, + &ram_console_device, + &mahimahi_rfkill, + &msm_device_smd, &msm_device_nand, + &msm_device_hsusb, + &usb_mass_storage_device, +#ifdef CONFIG_USB_ANDROID_RNDIS + &rndis_device, +#endif + &android_usb_device, +#ifndef CONFIG_ION_MSM + &android_pmem_mdp_device, +#else + &ion_dev, +#endif + &android_pmem_adsp_device, +#ifdef CONFIG_720P_CAMERA + &android_pmem_venc_device, +#endif + &msm_kgsl_3d0, + &msm_device_i2c, + &capella_cm3602, + &msm_camera_sensor_s5k3e2fx, + &mahimahi_flashlight_device, +}; + +static uint32_t bt_gpio_table[] = { + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_RTS, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_CTS, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_RX, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_TX, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_RESET_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_WAKE, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_HOST_WAKE, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_4MA), +}; + +static uint32_t bt_gpio_table_rev_CX[] = { + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_RTS, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_CTS, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_RX, 2, GPIO_INPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_UART1_TX, 2, GPIO_OUTPUT, + GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_RESET_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_CDMA_GPIO_BT_WAKE, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_4MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_BT_HOST_WAKE, 0, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_4MA), +}; + +static uint32_t misc_gpio_table[] = { + PCOM_GPIO_CFG(MAHIMAHI_GPIO_LCD_RST_N, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_LED_3V3_EN, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(MAHIMAHI_GPIO_DOCK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_4MA), +}; + +static uint32_t key_int_shutdown_gpio_table[] = { + PCOM_GPIO_CFG(MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +static void mahimahi_headset_init(void) +{ + if (is_cdma_version(system_rev)) + return; + config_gpio_table(key_int_shutdown_gpio_table, + ARRAY_SIZE(key_int_shutdown_gpio_table)); + gpio_set_value(MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN, 0); +} + +#define ATAG_BDADDR 0x43294329 /* mahimahi bluetooth address tag */ +#define ATAG_BDADDR_SIZE 4 +#define BDADDR_STR_SIZE 18 + +static char bdaddr[BDADDR_STR_SIZE]; + +module_param_string(bdaddr, bdaddr, sizeof(bdaddr), 0400); +MODULE_PARM_DESC(bdaddr, "bluetooth address"); + +static int __init parse_tag_bdaddr(const struct tag *tag) +{ + unsigned char *b = (unsigned char *)&tag->u; + + if (tag->hdr.size != ATAG_BDADDR_SIZE) + return -EINVAL; + + snprintf(bdaddr, BDADDR_STR_SIZE, "%02X:%02X:%02X:%02X:%02X:%02X", + b[0], b[1], b[2], b[3], b[4], b[5]); + + return 0; +} + +__tagtable(ATAG_BDADDR, parse_tag_bdaddr); + +static int __init mahimahi_board_serialno_setup(char *serialno) +{ +#ifdef CONFIG_USB_ANDROID_RNDIS + int i; + char *src = serialno; + + /* create a fake MAC address from our serial number. + * first byte is 0x02 to signify locally administered. + */ + rndis_pdata.ethaddr[0] = 0x02; + for (i = 0; *src; i++) { + /* XOR the USB serial across the remaining bytes */ + rndis_pdata.ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; + } +#endif + + android_usb_pdata.serial_number = serialno; + msm_hsusb_pdata.serial_number = serialno; + return 1; +} +__setup("androidboot.serialno=", mahimahi_board_serialno_setup); + +static struct msm_acpu_clock_platform_data mahimahi_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 245000, + .wait_for_irq_khz = 245000, + .mpll_khz = 245000 +}; + +static struct msm_acpu_clock_platform_data mahimahi_cdma_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 235930, + .wait_for_irq_khz = 235930, + .mpll_khz = 235930 +}; + +#ifdef CONFIG_PERFLOCK +static unsigned mahimahi_perf_acpu_table[] = { + 245000000, + 576000000, + 998400000, +}; + +static struct perflock_platform_data mahimahi_perflock_data = { + .perf_acpu_table = mahimahi_perf_acpu_table, + .table_size = ARRAY_SIZE(mahimahi_perf_acpu_table), +}; +#endif + +static ssize_t mahimahi_virtual_keys_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + if (system_rev > 2 && system_rev != 0xC0) { + /* center: x: back: 60, menu: 172, home: 298, search 412, y: 840 */ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":55:840:90:60" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":172:840:125:60" + ":" __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":298:840:115:60" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":412:840:95:60" + "\n"); + } else { + /* center: x: home: 55, menu: 185, back: 305, search 425, y: 835 */ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":55:835:70:55" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":185:835:100:55" + ":" __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":305:835:70:55" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":425:835:70:55" + "\n"); + } +} + +static struct kobj_attribute mahimahi_virtual_keys_attr = { + .attr = { + .name = "virtualkeys.synaptics-rmi-touchscreen", + .mode = S_IRUGO, + }, + .show = &mahimahi_virtual_keys_show, +}; + +static struct attribute *mahimahi_properties_attrs[] = { + &mahimahi_virtual_keys_attr.attr, + NULL +}; + +static struct attribute_group mahimahi_properties_attr_group = { + .attrs = mahimahi_properties_attrs, +}; + +static void mahimahi_reset(void) +{ + gpio_set_value(MAHIMAHI_GPIO_PS_HOLD, 0); +} + +int mahimahi_init_mmc(int sysrev, unsigned debug_uart); + +static const struct smd_tty_channel_desc smd_cdma_default_channels[] = { + { .id = 0, .name = "SMD_DS" }, + { .id = 19, .name = "SMD_DATA3" }, + { .id = 27, .name = "SMD_GPSNMEA" } }; static void __init mahimahi_init(void) { + int ret; + struct kobject *properties_kobj; + + printk("mahimahi_init() revision=%d\n", system_rev); + + if (is_cdma_version(system_rev)) + smd_set_channel_list(smd_cdma_default_channels, + ARRAY_SIZE(smd_cdma_default_channels)); + + msm_hw_reset_hook = mahimahi_reset; + + mahimahi_board_serialno_setup(board_serialno()); + + if (is_cdma_version(system_rev)) + msm_acpu_clock_init(&mahimahi_cdma_clock_data); + else + msm_acpu_clock_init(&mahimahi_clock_data); + +#ifdef CONFIG_PERFLOCK + perflock_init(&mahimahi_perflock_data); +#endif + + msm_serial_debug_init(MSM_UART1_PHYS, INT_UART1, + &msm_device_uart1.dev, 1, MSM_GPIO_TO_INT(139)); + + config_gpio_table(misc_gpio_table, ARRAY_SIZE(misc_gpio_table)); + + if (is_cdma_version(system_rev)) { + bcm_bt_lpm_pdata.gpio_wake = MAHIMAHI_CDMA_GPIO_BT_WAKE; + mahimahi_flashlight_data.torch = MAHIMAHI_CDMA_GPIO_FLASHLIGHT_TORCH; + config_gpio_table(bt_gpio_table_rev_CX, ARRAY_SIZE(bt_gpio_table_rev_CX)); + } else { + config_gpio_table(bt_gpio_table, ARRAY_SIZE(bt_gpio_table)); + } + + gpio_request(MAHIMAHI_GPIO_TP_LS_EN, "tp_ls_en"); + gpio_direction_output(MAHIMAHI_GPIO_TP_LS_EN, 0); + gpio_request(MAHIMAHI_GPIO_TP_EN, "tp_en"); + gpio_direction_output(MAHIMAHI_GPIO_TP_EN, 0); + gpio_request(MAHIMAHI_GPIO_PROXIMITY_EN, "proximity_en"); + gpio_direction_output(MAHIMAHI_GPIO_PROXIMITY_EN, 1); + gpio_request(MAHIMAHI_GPIO_COMPASS_RST_N, "compass_rst"); + gpio_direction_output(MAHIMAHI_GPIO_COMPASS_RST_N, 1); + gpio_request(MAHIMAHI_GPIO_COMPASS_INT_N, "compass_int"); + gpio_direction_input(MAHIMAHI_GPIO_COMPASS_INT_N); + + gpio_request(MAHIMAHI_GPIO_DS2482_SLP_N, "ds2482_slp_n"); + + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; + platform_add_devices(devices, ARRAY_SIZE(devices)); + + platform_add_devices(msm_footswitch_devices, + msm_num_footswitch_devices); + + i2c_register_board_info(0, base_i2c_devices, + ARRAY_SIZE(base_i2c_devices)); + + if (system_rev == 0) { + /* Only board after XB with Audience A1026 */ + i2c_register_board_info(0, rev0_i2c_devices, + ARRAY_SIZE(rev0_i2c_devices)); + } + + if (system_rev > 0) { + /* Only board after XB with Audience A1026 */ + i2c_register_board_info(0, rev1_i2c_devices, + ARRAY_SIZE(rev1_i2c_devices)); + } + + if (is_cdma_version(system_rev)) { + /* Only CDMA version with TI TPA2018D1 Speaker Amp. */ + i2c_register_board_info(0, rev_CX_i2c_devices, + ARRAY_SIZE(rev_CX_i2c_devices)); + if ((system_rev & 0x0F) == 0x00) { + a1026_data.gpio_a1026_clk = MAHIMAHI_CDMA_XA_AUD_A1026_CLK; + } else if ((system_rev & 0x0F) >= 0x01) { + a1026_data.gpio_a1026_wakeup = MAHIMAHI_CDMA_XB_AUD_A1026_WAKEUP; + a1026_data.gpio_a1026_reset = MAHIMAHI_CDMA_XB_AUD_A1026_RESET; + a1026_data.gpio_a1026_clk = MAHIMAHI_CDMA_XB_AUD_A1026_CLK; + } + } + + ret = mahimahi_init_mmc(system_rev, debug_uart); + if (ret != 0) + pr_crit("%s: Unable to initialize MMC\n", __func__); + + properties_kobj = kobject_create_and_add("board_properties", NULL); + if (properties_kobj) + ret = sysfs_create_group(properties_kobj, + &mahimahi_properties_attr_group); + if (!properties_kobj || ret) + pr_err("failed to create board_properties\n"); + + mahimahi_audio_init(); + mahimahi_headset_init(); + + if (system_rev > 0) + platform_device_register(&mahimahi_timed_gpios); + else + msm_init_pmic_vibrator(); + + ds2784_battery_init(); } static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags, @@ -58,17 +1218,17 @@ static void __init mahimahi_fixup(struct machine_desc *desc, struct tag *tags, { mi->nr_banks = 2; mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); - mi->bank[0].size = (219*1024*1024); - mi->bank[1].start = MSM_HIGHMEM_BASE; - mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE); - mi->bank[1].size = MSM_HIGHMEM_SIZE; + mi->bank[0].size = MSM_EBI1_BANK0_SIZE; + mi->bank[1].start = MSM_EBI1_BANK1_BASE; + mi->bank[1].size = MSM_EBI1_BANK1_SIZE; } static void __init mahimahi_map_io(void) { - msm_map_common_io(); - msm_clock_init(); + msm_map_qsd8x50_io(); + msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); + if (socinfo_init() < 0) + printk(KERN_ERR "%s: socinfo_init() failed!\n",__func__); } extern struct sys_timer msm_timer; diff --git a/arch/arm/mach-msm/board-mahimahi.h b/arch/arm/mach-msm/board-mahimahi.h new file mode 100644 index 0000000000000..3d8a364d8492a --- /dev/null +++ b/arch/arm/mach-msm/board-mahimahi.h @@ -0,0 +1,172 @@ +/* arch/arm/mach-msm/board-mahimahi.h + * + * Copyright (C) 2009 HTC Corporation. + * Author: Haley Teng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_MAHIMAHI_H +#define __ARCH_ARM_MACH_MSM_BOARD_MAHIMAHI_H + +#include + +#define MSM_SMI_BASE 0x02B00000 +#define MSM_SMI_SIZE 0x01500000 + +#define MSM_PMEM_VENC_BASE 0x02B00000 +#define MSM_PMEM_VENC_SIZE 0x00800000 + +#define MSM_GPU_MEM_BASE 0x03300000 +#define MSM_GPU_MEM_SIZE 0x00500000 + +#define MSM_RAM_CONSOLE_BASE 0x03A00000 +#define MSM_RAM_CONSOLE_SIZE 0x00040000 + +#define MSM_FB_BASE 0x03B00000 +#define MSM_FB_SIZE 0x00300000 + +#define MSM_EBI1_BANK0_BASE 0x20000000 +#define MSM_EBI1_BANK0_SIZE 0x0E800000 + +#define MSM_EBI1_BANK1_BASE 0x30000000 +#define MSM_EBI1_BANK1_SIZE 0x0B700000 + +#define MSM_PMEM_MDP_BASE 0x3B700000 +#define MSM_PMEM_MDP_SIZE 0x02000000 + +#define MSM_PMEM_ADSP_BASE 0x3D700000 +#define MSM_PMEM_ADSP_SIZE 0x02900000 + +#define MAHIMAHI_GPIO_PS_HOLD 25 + +#define MAHIMAHI_GPIO_UP_INT_N 35 +#define MAHIMAHI_GPIO_UP_RESET_N 82 +#define MAHIMAHI_GPIO_LS_EN_N 119 + +#define MAHIMAHI_GPIO_TP_INT_N 92 +#define MAHIMAHI_GPIO_TP_LS_EN 93 +#define MAHIMAHI_GPIO_TP_EN 160 + +#define MAHIMAHI_GPIO_POWER_KEY 94 +#define MAHIMAHI_GPIO_SDMC_CD_REV0_N 153 + +#define MAHIMAHI_GPIO_WIFI_SHUTDOWN_N 127 +#define MAHIMAHI_GPIO_WIFI_IRQ 152 + +#define MAHIMAHI_GPIO_BALL_UP 38 +#define MAHIMAHI_GPIO_BALL_DOWN 37 +#define MAHIMAHI_GPIO_BALL_LEFT 145 +#define MAHIMAHI_GPIO_BALL_RIGHT 21 + +#define MAHIMAHI_GPIO_BT_UART1_RTS 43 +#define MAHIMAHI_GPIO_BT_UART1_CTS 44 +#define MAHIMAHI_GPIO_BT_UART1_RX 45 +#define MAHIMAHI_GPIO_BT_UART1_TX 46 +#define MAHIMAHI_GPIO_BT_RESET_N 146 +#define MAHIMAHI_GPIO_BT_SHUTDOWN_N 128 + +#define MAHIMAHI_GPIO_BT_WAKE 57 +#define MAHIMAHI_GPIO_BT_HOST_WAKE 86 + +#define MAHIMAHI_GPIO_PROXIMITY_INT_N 90 +#define MAHIMAHI_GPIO_PROXIMITY_EN 120 + +#define MAHIMAHI_GPIO_DS2482_SLP_N 87 +#define MAHIMAHI_GPIO_VIBRATOR_ON 89 +/* Compass */ +#define MAHIMAHI_REV0_GPIO_COMPASS_INT_N 36 + +#define MAHIMAHI_GPIO_COMPASS_INT_N 153 +#define MAHIMAHI_GPIO_COMPASS_RST_N 107 +#define MAHIMAHI_PROJECT_NAME "mahimahi" +#define MAHIMAHI_LAYOUTS { \ + { {-1, 0, 0}, { 0, -1, 0}, {0, 0, 1} }, \ + { { 0, -1, 0}, { 1, 0, 0}, {0, 0, -1} }, \ + { { 0, -1, 0}, { 1, 0, 0}, {0, 0, 1} }, \ + { {-1, 0, 0}, { 0, 0, -1}, {0, 1, 0} } \ +} + +/* Audio */ +#define MAHIMAHI_AUD_JACKHP_EN 157 +#define MAHIMAHI_AUD_2V5_EN 158 +#define MAHIMAHI_AUD_MICPATH_SEL 111 +#define MAHIMAHI_AUD_A1026_INT 112 +#define MAHIMAHI_AUD_A1026_WAKEUP 113 +#define MAHIMAHI_AUD_A1026_RESET 129 +#define MAHIMAHI_AUD_A1026_CLK -1 +#define MAHIMAHI_CDMA_XA_AUD_A1026_CLK 105 +/* NOTE: MAHIMAHI_CDMA_XB_AUD_A1026_WAKEUP on CDMA is the same GPIO as + * MAHIMAHI_GPIO_BATTERY_CHARGER_CURRENT on UMTS. Also, + * MAHIMAHI_CDMA_XB_AUD_A1026_RESET is the same as + * GPIO MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN on UMTS. + */ +#define MAHIMAHI_CDMA_XB_AUD_A1026_WAKEUP 16 +#define MAHIMAHI_CDMA_XB_AUD_A1026_RESET 19 +#define MAHIMAHI_CDMA_XB_AUD_A1026_CLK -1 + +/* Bluetooth PCM */ +#define MAHIMAHI_BT_PCM_OUT 68 +#define MAHIMAHI_BT_PCM_IN 69 +#define MAHIMAHI_BT_PCM_SYNC 70 +#define MAHIMAHI_BT_PCM_CLK 71 +/* flash light */ +#define MAHIMAHI_GPIO_FLASHLIGHT_TORCH 58 +#define MAHIMAHI_GPIO_FLASHLIGHT_FLASH 84 + +#define MAHIMAHI_GPIO_LED_3V3_EN 85 +#define MAHIMAHI_GPIO_LCD_RST_N 29 +#define MAHIMAHI_GPIO_LCD_ID0 147 + +/* 3.5mm remote control key interrupt shutdown signal */ +#define MAHIMAHI_GPIO_35MM_KEY_INT_SHUTDOWN 19 + +#define MAHIMAHI_GPIO_DOCK 106 + +/* speaker amplifier enable pin for mahimahi CDMA version */ +#define MAHIMAHI_CDMA_GPIO_AUD_SPK_AMP_EN 104 + +#define MAHIMAHI_GPIO_BATTERY_DETECTION 39 +#define MAHIMAHI_GPIO_BATTERY_CHARGER_EN 22 +#define MAHIMAHI_GPIO_BATTERY_CHARGER_CURRENT 16 + +#define MAHIMAHI_CDMA_GPIO_BT_WAKE 28 +#define MAHIMAHI_CDMA_GPIO_FLASHLIGHT_TORCH 26 + +#define MAHIMAHI_CDMA_SD_2V85_EN 100 +#define MAHIMAHI_CDMA_JOG_2V6_EN 150 +/* display relative */ +#define MAHIMAHI_LCD_SPI_CLK (17) +#define MAHIMAHI_LCD_SPI_DO (18) +#define MAHIMAHI_LCD_SPI_CSz (20) +#define MAHIMAHI_LCD_RSTz (29) +#define MAHIMAHI_LCD_R1 (114) +#define MAHIMAHI_LCD_R2 (115) +#define MAHIMAHI_LCD_R3 (116) +#define MAHIMAHI_LCD_R4 (117) +#define MAHIMAHI_LCD_R5 (118) +#define MAHIMAHI_LCD_G0 (121) +#define MAHIMAHI_LCD_G1 (122) +#define MAHIMAHI_LCD_G2 (123) +#define MAHIMAHI_LCD_G3 (124) +#define MAHIMAHI_LCD_G4 (125) +#define MAHIMAHI_LCD_G5 (126) +#define MAHIMAHI_LCD_B1 (130) +#define MAHIMAHI_LCD_B2 (131) +#define MAHIMAHI_LCD_B3 (132) +#define MAHIMAHI_LCD_B4 (133) +#define MAHIMAHI_LCD_B5 (134) +#define MAHIMAHI_LCD_PCLK (135) +#define MAHIMAHI_LCD_VSYNC (136) +#define MAHIMAHI_LCD_HSYNC (137) +#define MAHIMAHI_LCD_DE (138) +#define is_cdma_version(rev) (((rev) & 0xF0) == 0xC0) + +#endif /* __ARCH_ARM_MACH_MSM_BOARD_MAHIMAHI_H */ diff --git a/arch/arm/mach-msm/board-msm7x30-audio.c b/arch/arm/mach-msm/board-msm7x30-audio.c new file mode 100644 index 0000000000000..b83a66ea32682 --- /dev/null +++ b/arch/arm/mach-msm/board-msm7x30-audio.c @@ -0,0 +1,59 @@ +/* linux/arch/arm/mach-msm/board-msm7x30-audio.c + * + * Copyright (C) 2011 Google, Inc. + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include + +#include "gpiomux.h" +#include "board-msm7x30.h" + +static void msm7x30_speaker_amp_init(void) +{ + msm_gpiomux_write(82, 0, + GPIOMUX_FUNC_GPIO | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_OUTPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + + gpio_request(82, "poweramp"); + gpio_direction_output(82, 1); +} + +static void msm7x30_marimba_init(void) +{ + struct vreg *vr; + + vr = vreg_get(NULL, "s3"); + vreg_set_level(vr, 1800); + vreg_enable(vr); + + vr = vreg_get(NULL, "gp16"); + vreg_set_level(vr, 1200); + vreg_enable(vr); + + vr = vreg_get(NULL, "usb2"); + vreg_set_level(vr, 1800); + vreg_enable(vr); +} + +void msm7x30_board_audio_init(void) +{ + msm7x30_marimba_init(); + msm7x30_speaker_amp_init(); +} diff --git a/arch/arm/mach-msm/board-msm7x30-panel.c b/arch/arm/mach-msm/board-msm7x30-panel.c new file mode 100644 index 0000000000000..4f40834058152 --- /dev/null +++ b/arch/arm/mach-msm/board-msm7x30-panel.c @@ -0,0 +1,969 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "gpiomux.h" +#include "devices.h" +#include "pmic.h" +#include "board-msm7x30.h" + +#define MSM_FB_SIZE (0x00500000UL) + +#define CLK_NS_TO_RATE(ns) (1000000000UL / (ns)) + +#define MDDI_CLIENT_CORE_BASE (0x00108000UL) +#define LCD_CONTROL_BLOCK_BASE (0x00110000UL) +#define SPI_BLOCK_BASE (0x00120000UL) +#define PWM_BLOCK_BASE (0x00140000UL) +#define GPIO_BLOCK_BASE (0x00150000UL) +#define SYSTEM_BLOCK1_BASE (0x00160000UL) +#define SYSTEM_BLOCK2_BASE (0x00170000UL) + +#define TTBUSSEL (MDDI_CLIENT_CORE_BASE | 0x18) +#define DPSET0 (MDDI_CLIENT_CORE_BASE | 0x1C) +#define DPSET1 (MDDI_CLIENT_CORE_BASE | 0x20) +#define DPSUS (MDDI_CLIENT_CORE_BASE | 0x24) +#define DPRUN (MDDI_CLIENT_CORE_BASE | 0x28) +#define SYSCKENA (MDDI_CLIENT_CORE_BASE | 0x2C) + +#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44) +#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48) +#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C) +#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50) +#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54) + +#define SRST (LCD_CONTROL_BLOCK_BASE|0x00) +#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04) +#define START (LCD_CONTROL_BLOCK_BASE|0x08) +#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C) + +#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18) +#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C) +#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20) + +#define PXL (LCD_CONTROL_BLOCK_BASE|0x30) +#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34) +#define HSW (LCD_CONTROL_BLOCK_BASE|0x38) +#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C) +#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40) +#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44) +#define VSW (LCD_CONTROL_BLOCK_BASE|0x48) +#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C) +#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50) +#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54) +#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C) +#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60) +#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64) +#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68) +#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C) +#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70) +#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74) +#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78) +#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C) +#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80) +#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84) +#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88) +#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C) +#define MONI (LCD_CONTROL_BLOCK_BASE|0xB0) +#define VPOS (LCD_CONTROL_BLOCK_BASE|0xC0) + +#define SSICTL (SPI_BLOCK_BASE|0x00) +#define SSITIME (SPI_BLOCK_BASE|0x04) +#define SSITX (SPI_BLOCK_BASE|0x08) +#define SSIINTS (SPI_BLOCK_BASE|0x14) + +#define TIMER0LOAD (PWM_BLOCK_BASE|0x00) +#define TIMER0CTRL (PWM_BLOCK_BASE|0x08) +#define PWM0OFF (PWM_BLOCK_BASE|0x1C) +#define TIMER1LOAD (PWM_BLOCK_BASE|0x20) +#define TIMER1CTRL (PWM_BLOCK_BASE|0x28) +#define PWM1OFF (PWM_BLOCK_BASE|0x3C) +#define TIMER2LOAD (PWM_BLOCK_BASE|0x40) +#define TIMER2CTRL (PWM_BLOCK_BASE|0x48) +#define PWM2OFF (PWM_BLOCK_BASE|0x5C) +#define PWMCR (PWM_BLOCK_BASE|0x68) + +#define GPIODATA (GPIO_BLOCK_BASE|0x00) +#define GPIODIR (GPIO_BLOCK_BASE|0x04) +#define GPIOIS (GPIO_BLOCK_BASE|0x08) +#define GPIOIEV (GPIO_BLOCK_BASE|0x10) +#define GPIOIC (GPIO_BLOCK_BASE|0x20) +#define GPIOPC (GPIO_BLOCK_BASE|0x28) + +#define WKREQ (SYSTEM_BLOCK1_BASE|0x00) +#define CLKENB (SYSTEM_BLOCK1_BASE|0x04) +#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08) +#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C) +#define CNT_DIS (SYSTEM_BLOCK1_BASE|0x10) + +#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00) + +struct mddi_table { + uint32_t reg; + uint32_t value; +}; +static struct mddi_table mddi_toshiba_init_table[] = { + { DPSET0, 0x4bec0066 }, + { DPSET1, 0x00000113 }, + { DPSUS, 0x00000000 }, + { DPRUN, 0x00000001 }, + { 1, 5 }, + { SYSCKENA, 0x00000001 }, + { CLKENB, 0x0000A0E9 }, + { GPIODATA, 0x03FF0000 }, + { GPIODIR, 0x0000024D }, + { GPIOSEL, 0x00000173 }, + { GPIOPC, 0x03C300C0 }, + { WKREQ, 0x00000000 }, + + { GPIOIS, 0x00000000 }, + { GPIOIEV, 0x00000001 }, + { GPIOIC, 0x000003FF }, + { GPIODATA, 0x00040004 }, + + { GPIODATA, 0x00080008 }, + { DRAMPWR, 0x00000001 }, + { CLKENB, 0x0000A0EB }, + { PWMCR, 0x00000000 }, + { 1, 1 }, + { SSICTL, 0x00060399 }, + { SSITIME, 0x00000100 }, + { CNT_DIS, 0x00000002 }, + { SSICTL, 0x0006039b }, + { SSITX, 0x00000000 }, + { 1, 7 }, + { SSITX, 0x00000000 }, + { 1, 7 }, + { SSITX, 0x00000000 }, + { 1, 7 }, + { SSITX, 0x000800BA }, + { SSITX, 0x00000111 }, + { SSITX, 0x00080036 }, + { SSITX, 0x00000100 }, + { 1, 1 }, + { SSITX, 0x0008003A }, + { SSITX, 0x00000160 }, + { SSITX, 0x000800B1 }, + { SSITX, 0x0000015D }, + { 1, 1 }, + { SSITX, 0x000800B2 }, + { SSITX, 0x00000133 }, + { SSITX, 0x000800B3 }, + { SSITX, 0x00000122 }, + { 1, 1 }, + { SSITX, 0x000800B4 }, + { SSITX, 0x00000102 }, + { SSITX, 0x000800B5 }, + { SSITX, 0x0000011E }, + { 1, 1 }, + { SSITX, 0x000800B6 }, + { SSITX, 0x00000127 }, + { SSITX, 0x000800B7 }, + { SSITX, 0x00000103 }, + { 1, 1 }, + { SSITX, 0x000800B9 }, + { SSITX, 0x00000124 }, + { SSITX, 0x000800BD }, + { SSITX, 0x000001A1 }, + { 1, 1 }, + { SSITX, 0x000800BB }, + { SSITX, 0x00000100 }, + { SSITX, 0x000800BF }, + { SSITX, 0x00000101 }, + { 1, 1 }, + { SSITX, 0x000800BE }, + { SSITX, 0x00000100 }, + { SSITX, 0x000800C0 }, + { SSITX, 0x00000111 }, + { 1, 1 }, + { SSITX, 0x000800C1 }, + { SSITX, 0x00000111 }, + { SSITX, 0x000800C2 }, + { SSITX, 0x00000111 }, + { 1, 1 }, + { SSITX, 0x000800C3 }, + { SSITX, 0x00080132 }, + { SSITX, 0x00000132 }, + { 1, 1 }, + { SSITX, 0x000800C4 }, + { SSITX, 0x00080132 }, + { SSITX, 0x00000132 }, + { 1, 1 }, + { SSITX, 0x000800C5 }, + { SSITX, 0x00080132 }, + { SSITX, 0x00000132 }, + { 1, 1 }, + { SSITX, 0x000800C6 }, + { SSITX, 0x00080132 }, + { SSITX, 0x00000132 }, + { 1, 1 }, + { SSITX, 0x000800C7 }, + { SSITX, 0x00080164 }, + { SSITX, 0x00000145 }, + { 1, 1 }, + { SSITX, 0x000800C8 }, + { SSITX, 0x00000144 }, + { SSITX, 0x000800C9 }, + { SSITX, 0x00000152 }, + { 1, 1 }, + { SSITX, 0x000800CA }, + { SSITX, 0x00000100 }, + { 1, 1 }, + { SSITX, 0x000800EC }, + { SSITX, 0x00080101 }, + { SSITX, 0x000001FC }, + { 1, 1 }, + { SSITX, 0x000800CF }, + { SSITX, 0x00000101 }, + { 1, 1 }, + { SSITX, 0x000800D0 }, + { SSITX, 0x00080110 }, + { SSITX, 0x00000104 }, + { 1, 1 }, + { SSITX, 0x000800D1 }, + { SSITX, 0x00000101 }, + { 1, 1 }, + { SSITX, 0x000800D2 }, + { SSITX, 0x00080100 }, + { SSITX, 0x00000128 }, + { 1, 1 }, + { SSITX, 0x000800D3 }, + { SSITX, 0x00080100 }, + { SSITX, 0x00000128 }, + { 1, 1 }, + { SSITX, 0x000800D4 }, + { SSITX, 0x00080126 }, + { SSITX, 0x000001A4 }, + { 1, 1 }, + { SSITX, 0x000800D5 }, + { SSITX, 0x00000120 }, + { 1, 1 }, + { SSITX, 0x000800EF }, + { SSITX, 0x00080132 }, + { SSITX, 0x00000100 }, + { 1, 1 }, + { BITMAP0, 0x032001E0 }, + { BITMAP1, 0x032001E0 }, + { BITMAP2, 0x014000F0 }, + { BITMAP3, 0x014000F0 }, + { BITMAP4, 0x014000F0 }, + { CLKENB, 0x0000A1EB }, + { PORT_ENB, 0x00000001 }, + { PORT, 0x00000004 }, + { PXL, 0x00000002 }, + { MPLFBUF, 0x00000000 }, + { HCYCLE, 0x000000FD }, + { HSW, 0x00000003 }, + { HDE_START, 0x00000007 }, + { HDE_SIZE, 0x000000EF }, + { VCYCLE, 0x00000325 }, + { VSW, 0x00000001 }, + { VDE_START, 0x00000003 }, + { VDE_SIZE, 0x0000031F }, + { START, 0x00000001 }, + { 1, 32 }, + { SSITX, 0x000800BC }, + { SSITX, 0x00000180 }, + { SSITX, 0x0008003B }, + { SSITX, 0x00000100 }, + { 1, 1 }, + { SSITX, 0x000800B0 }, + { SSITX, 0x00000116 }, + { 1, 1 }, + { SSITX, 0x000800B8 }, + { SSITX, 0x000801FF }, + { SSITX, 0x000001F5 }, + { 1, 1 }, + { SSITX, 0x00000011 }, + { 1, 5 }, + { SSITX, 0x00000029 }, +}; + +#define MSM7X30_SURF_DEFAULT_BACKLIGHT_BRIGHTNESS 15 + +static int msm7x30_backlight_off; +static int msm7x30_backlight_brightness = + MSM7X30_SURF_DEFAULT_BACKLIGHT_BRIGHTNESS; + +static DEFINE_MUTEX(msm7x30_backlight_lock); + + +static void msm7x30_set_backlight_level(uint8_t level) +{ + if (machine_is_msm7x30_fluid()) { + gpio_set_value(MSM7X30_PM8058_GPIO(25), !!level); + } else { + pmic_set_led_intensity(LED_LCD, + MSM7X30_SURF_DEFAULT_BACKLIGHT_BRIGHTNESS * + level / LED_FULL); + } +} + +static void msm7x30_process_mddi_table(struct msm_mddi_client_data *client_data, + const struct mddi_table *table, + size_t count) +{ + int i; + for (i = 0; i < count; i++) { + uint32_t reg = table[i].reg; + uint32_t value = table[i].value; + + if (reg == 0) + udelay(value); + else if (reg == 1) + msleep(value); + else + client_data->remote_write(client_data, value, reg); + } +} + +static unsigned wega_reset_gpio = 180; +static unsigned fluid_vee_reset_gpio = 20; + +static struct pm8058_pin_config msm7x30_mddi_sleep_clk_cfg_on = { + .vin_src = PM8058_GPIO_VIN_SRC_VREG_S3, + .dir = PM8058_GPIO_OUTPUT, + .pull_up = PM8058_GPIO_PULL_NONE, + .strength = PM8058_GPIO_STRENGTH_HIGH, + .func = PM8058_GPIO_FUNC_2, +}; + +static struct pm8058_pin_config msm7x30_mddi_sleep_clk_cfg_off = { + .vin_src = PM8058_GPIO_VIN_SRC_VREG_S3, + .dir = PM8058_GPIO_OUTPUT, + .pull_up = PM8058_GPIO_PULL_NONE, + .strength = PM8058_GPIO_STRENGTH_HIGH, + .func = PM8058_GPIO_FUNC_NORMAL, +}; + +static struct pm8058_pin_config msm7x30_fluid_backlight = { + .vin_src = PM8058_GPIO_VIN_SRC_VREG_S3, + .dir = PM8058_GPIO_OUTPUT, + .pull_up = PM8058_GPIO_PULL_NONE, + .strength = PM8058_GPIO_STRENGTH_HIGH, + .func = PM8058_GPIO_FUNC_NORMAL, +}; + +static void msm7x30_power_panel(int on) +{ + int rc = 0; + struct vreg *vreg_ldo12, *vreg_ldo15 = NULL; + struct vreg *vreg_ldo20, *vreg_ldo16, *vreg_ldo8 = NULL; + + if (on) { + // XXX enable wega reset gpio + + /* reset Toshiba WeGA chip -- toggle reset pin -- gpio_180 */ + gpio_set_value(180, 0); /* bring reset line low to hold reset*/ + } + + /* Toshiba WeGA power -- has 3 power source */ + /* 1.5V -- LDO20*/ + vreg_ldo20 = vreg_get(NULL, "gp13"); + + if (IS_ERR(vreg_ldo20) || vreg_ldo20 == NULL) { + rc = PTR_ERR(vreg_ldo20); + pr_err("%s: gp13 vreg get failed (%d)\n", + __func__, rc); + return; + } + + /* 1.8V -- LDO12 */ + vreg_ldo12 = vreg_get(NULL, "gp9"); + + if (IS_ERR(vreg_ldo12) || vreg_ldo12 == NULL) { + rc = PTR_ERR(vreg_ldo12); + pr_err("%s: gp9 vreg get failed (%d)\n", + __func__, rc); + return; + } + + /* 2.6V -- LDO16 */ + vreg_ldo16 = vreg_get(NULL, "gp10"); + + if (IS_ERR(vreg_ldo16) || vreg_ldo16 == NULL) { + rc = PTR_ERR(vreg_ldo16); + pr_err("%s: gp10 vreg get failed (%d)\n", + __func__, rc); + return; + } + + if (machine_is_msm7x30_fluid()) { + /* 1.8V -- LDO8 */ + vreg_ldo8 = vreg_get(NULL, "gp7"); + + if (IS_ERR(vreg_ldo8) || vreg_ldo8 == NULL) { + rc = PTR_ERR(vreg_ldo8); + pr_err("%s: gp7 vreg get failed (%d)\n", + __func__, rc); + return; + } + } else { + /* lcd panel power */ + /* 3.1V -- LDO15 */ + vreg_ldo15 = vreg_get(NULL, "gp6"); + + if (IS_ERR(vreg_ldo15) || vreg_ldo15 == NULL) { + rc = PTR_ERR(vreg_ldo15); + pr_err("%s: gp6 vreg get failed (%d)\n", + __func__, rc); + return; + } + } + + rc = vreg_set_level(vreg_ldo20, 1500); + if (rc) { + pr_err("%s: vreg LDO20 set level failed (%d)\n", + __func__, rc); + return; + } + + rc = vreg_set_level(vreg_ldo12, 1800); + if (rc) { + pr_err("%s: vreg LDO12 set level failed (%d)\n", + __func__, rc); + return; + } + + rc = vreg_set_level(vreg_ldo16, 2600); + if (rc) { + pr_err("%s: vreg LDO16 set level failed (%d)\n", + __func__, rc); + return; + } + + if (machine_is_msm7x30_fluid()) { + rc = vreg_set_level(vreg_ldo8, 1800); + if (rc) { + pr_err("%s: vreg LDO8 set level failed (%d)\n", + __func__, rc); + return; + } + } else { + rc = vreg_set_level(vreg_ldo15, 3100); + if (rc) { + pr_err("%s: vreg LDO15 set level failed (%d)\n", + __func__, rc); + return; + } + } + + if (on) { + rc = vreg_enable(vreg_ldo20); + if (rc) { + pr_err("%s: LDO20 vreg enable failed (%d)\n", + __func__, rc); + return; + } + + rc = vreg_enable(vreg_ldo12); + if (rc) { + pr_err("%s: LDO12 vreg enable failed (%d)\n", + __func__, rc); + return; + } + + rc = vreg_enable(vreg_ldo16); + if (rc) { + pr_err("%s: LDO16 vreg enable failed (%d)\n", + __func__, rc); + return; + } + + if (machine_is_msm7x30_fluid()) { + rc = vreg_enable(vreg_ldo8); + if (rc) { + pr_err("%s: LDO8 vreg enable failed (%d)\n", + __func__, rc); + return; + } + } else { + rc = vreg_enable(vreg_ldo15); + if (rc) { + pr_err("%s: LDO15 vreg enable failed (%d)\n", + __func__, rc); + return; + } + } + + mdelay(5); /* ensure power is stable */ + + if (machine_is_msm7x30_fluid()) { + // XXX enable vee reset gpio + + /* assert vee reset_n */ + gpio_set_value(20, 1); + gpio_set_value(20, 0); + mdelay(1); + gpio_set_value(20, 1); + } + + gpio_set_value(180, 1); /* bring reset line high */ + mdelay(10); /* 10 msec before IO can be accessed */ + + rc = pm8058_gpio_mux(MSM7X30_PM8058_GPIO(37), + &msm7x30_mddi_sleep_clk_cfg_on); + if (rc) + pr_err("%s: pm8058_gpio_mux failure\n", __func__); + } else { + vreg_disable(vreg_ldo20); + vreg_disable(vreg_ldo16); + + gpio_set_value(180, 0); /* bring reset line low */ + + if (machine_is_msm7x30_fluid()) + vreg_disable(vreg_ldo8); + else + vreg_disable(vreg_ldo15); + + mdelay(5); /* ensure power is stable */ + + vreg_disable(vreg_ldo12); + + if (machine_is_msm7x30_fluid()) { + // XXX disable vee_reset_gpio + } + + rc = pm8058_gpio_mux(MSM7X30_PM8058_GPIO(37), + &msm7x30_mddi_sleep_clk_cfg_off); + if (rc) + pr_err("%s: pm8058_gpio_mux failure\n", __func__); + } +} + +static void msm7x30_mddi_power_client(struct msm_mddi_client_data *mddi, int on) +{ + msm7x30_power_panel(on); +} + +static int msm7x30_mddi_toshiba_client_init( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + client_data->auto_hibernate(client_data, 0); + msm7x30_process_mddi_table(client_data, mddi_toshiba_init_table, + ARRAY_SIZE(mddi_toshiba_init_table)); + client_data->auto_hibernate(client_data, 1); + + return 0; +} + +static int msm7x30_mddi_toshiba_client_uninit( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + return 0; +} + +static int msm7x30_mddi_panel_unblank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + mutex_lock(&msm7x30_backlight_lock); + msm7x30_set_backlight_level(msm7x30_backlight_brightness); + msm7x30_backlight_off = 0; + mutex_unlock(&msm7x30_backlight_lock); + + return 0; +} + +static int msm7x30_mddi_panel_blank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + mutex_lock(&msm7x30_backlight_lock); + msm7x30_set_backlight_level(0); + msm7x30_backlight_off = 1; + mutex_unlock(&msm7x30_backlight_lock); + + return 0; +} + +static void msm7x30_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + mutex_lock(&msm7x30_backlight_lock); + msm7x30_backlight_brightness = value; + if (!msm7x30_backlight_off) + msm7x30_set_backlight_level(msm7x30_backlight_brightness); + mutex_unlock(&msm7x30_backlight_lock); +} + +static struct led_classdev msm7x30_backlight_led = { + .name = "lcd-backlight", + .brightness = MSM7X30_SURF_DEFAULT_BACKLIGHT_BRIGHTNESS, + .brightness_set = msm7x30_brightness_set, +}; + +static int msm7x30_backlight_probe(struct platform_device *pdev) +{ + led_classdev_register(&pdev->dev, &msm7x30_backlight_led); + return 0; +} + +static int msm7x30_backlight_remove(struct platform_device *pdev) +{ + led_classdev_unregister(&msm7x30_backlight_led); + return 0; +} + +static struct platform_driver msm7x30_backlight_driver = { + .probe = msm7x30_backlight_probe, + .remove = msm7x30_backlight_remove, + .driver = { + .name = "msm7x30-backlight", + .owner = THIS_MODULE, + }, +}; + +static struct resource resources_msm_fb[] = { + { + .flags = IORESOURCE_MEM, + }, +}; + +void __init msm7x30_allocate_fbmem(void) +{ + unsigned long base; + unsigned long size; + + size = MSM_FB_SIZE; + base = memblock_alloc(size, SZ_1M); + memblock_free(base, size); + memblock_remove(base, size); + resources_msm_fb[0].start = base; + resources_msm_fb[0].end = resources_msm_fb[0].start + size - 1; + pr_info("%s: allocated %lu bytes at 0x%lx\n", __func__, size, base); +} + +#define TOSHIBAWEGA_MFR_NAME 0xd263 +#define TOSHIBAWEGA_PRODUCT_CODE 0x8722 + +static void toshibawega_fixup(uint16_t *mfr_name, uint16_t *product_code) +{ + *mfr_name = TOSHIBAWEGA_MFR_NAME ; + *product_code = TOSHIBAWEGA_PRODUCT_CODE ; +} + +static struct msm_mddi_bridge_platform_data toshiba_client_data = { + .init = msm7x30_mddi_toshiba_client_init, + .uninit = msm7x30_mddi_toshiba_client_uninit, + .blank = msm7x30_mddi_panel_blank, + .unblank = msm7x30_mddi_panel_unblank, + .fb_data = { + .xres = 480, + .yres = 800, + .width = 45, + .height = 67, + .output_format = MSM_MDP_OUT_IF_FMT_RGB888, + }, +}; + +static struct msm_mddi_platform_data mddi_pdata = { + .clk_rate = 445500000, + .fixup = toshibawega_fixup, + .power_client = msm7x30_mddi_power_client, + .fb_resource = resources_msm_fb, + .num_clients = 1, + .client_platform_data = { + { + .product_id = (0xd263 << 16 | 0x8722), + .name = "mddi_c_simple", + .id = 0, + .client_data = &toshiba_client_data, + .clk_rate = 0, + }, + }, +}; + +/********************** FLUID LCDC PANEL CODE */ +struct { + u8 addr; + u8 data; +} fluid_sharp_init_tbl[] = { + { 15, 0x01 }, + { 5, 0x01 }, + { 7, 0x10 }, + { 9, 0x1E }, + { 10, 0x04 }, + { 17, 0xFF }, + { 21, 0x8A }, + { 22, 0x00 }, + { 23, 0x82 }, + { 24, 0x24 }, + { 25, 0x22 }, + { 26, 0x6D }, + { 27, 0xEB }, + { 28, 0xB9 }, + { 29, 0x3A }, + { 49, 0x1A }, + { 50, 0x16 }, + { 51, 0x05 }, + { 55, 0x7F }, + { 56, 0x15 }, + { 57, 0x7B }, + { 60, 0x05 }, + { 61, 0x0C }, + { 62, 0x80 }, + { 63, 0x00 }, + { 92, 0x90 }, + { 97, 0x01 }, + { 98, 0xFF }, + { 113, 0x11 }, + { 114, 0x02 }, + { 115, 0x08 }, + { 123, 0xAB }, + { 124, 0x04 }, + { 6, 0x02 }, + { 133, 0x00 }, + { 134, 0xFE }, + { 135, 0x22 }, + { 136, 0x0B }, + { 137, 0xFF }, + { 138, 0x0F }, + { 139, 0x00 }, + { 140, 0xFE }, + { 141, 0x22 }, + { 142, 0x0B }, + { 143, 0xFF }, + { 144, 0x0F }, + { 145, 0x00 }, + { 146, 0xFE }, + { 147, 0x22 }, + { 148, 0x0B }, + { 149, 0xFF }, + { 150, 0x0F }, + { 202, 0x30 }, + { 30, 0x01 }, + { 4, 0x01 }, + { 31, 0x41 }, +}; + +static struct spi_device *lcdc_spi_client; + +static int fluid_spi_write(u8 reg, u8 data) +{ + u8 tx_buf[2]; + int rc; + struct spi_message m; + struct spi_transfer t = { + .tx_buf = tx_buf, + }; + + if (!lcdc_spi_client) { + pr_err("%s: lcdc_spi_client is NULL\n", __func__); + return -EINVAL; + } + + spi_setup(lcdc_spi_client); + spi_message_init(&m); + spi_message_add_tail(&t, &m); + + tx_buf[0] = reg; + tx_buf[1] = data; + t.rx_buf = NULL; + t.len = 2; + rc = spi_sync(lcdc_spi_client, &m); + return rc; +} + +int fluid_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + /* TODO: Turn backlight off? */ + return 0; +} + +int fluid_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + /* TODO: Turn backlight on? */ + return 0; +} + +int fluid_panel_init(struct msm_lcdc_panel_ops *ops) +{ + int i; + + msm7x30_power_panel(true); + + for (i = 0; i < ARRAY_SIZE(fluid_sharp_init_tbl); i++) + fluid_spi_write(fluid_sharp_init_tbl[i].addr, + fluid_sharp_init_tbl[i].data); + mdelay(10); + fluid_spi_write(31, 0xC1); + mdelay(10); + fluid_spi_write(31, 0xD9); + fluid_spi_write(31, 0xDF); + + return 0; +} + +static struct msm_lcdc_timing fluid_lcdc_timing = { + .clk_rate = 24576000, + .hsync_pulse_width = 10, + .hsync_back_porch = 20, + .hsync_front_porch = 10, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 2, + .vsync_front_porch = 2, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, +}; + +static struct msm_fb_data fluid_lcdc_fb_data = { + .xres = 480, + .yres = 800, + .width = 57, + .height = 94, + .output_format = MSM_MDP_OUT_IF_FMT_RGB666, +}; + +static struct msm_lcdc_panel_ops fluid_lcdc_panel_ops = { + .init = fluid_panel_init, + .blank = fluid_panel_blank, + .unblank = fluid_panel_unblank, +}; + +static struct msm_lcdc_platform_data fluid_lcdc_platform_data = { + .panel_ops = &fluid_lcdc_panel_ops, + .timing = &fluid_lcdc_timing, + .fb_id = 0, + .fb_data = &fluid_lcdc_fb_data, + .fb_resource = resources_msm_fb, +}; + +static struct platform_device fluid_lcdc_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &fluid_lcdc_platform_data, + }, +}; + +static int __devinit fluid_lcdc_sharp_spi_probe(struct spi_device *spi) +{ + int rc; + + lcdc_spi_client = spi; + lcdc_spi_client->bits_per_word = 32; + + rc = platform_device_register(&fluid_lcdc_device); + if (rc) + return rc; + return 0; +} + +static int __devexit fluid_lcdc_sharp_spi_remove(struct spi_device *spi) +{ + lcdc_spi_client = NULL; + return 0; +} + +static struct spi_driver fluid_lcdc_sharp_spi_driver = { + .driver = { + .name = "fluid_lcdc_sharp", + .owner = THIS_MODULE, + }, + .probe = fluid_lcdc_sharp_spi_probe, + .remove = __devexit_p(fluid_lcdc_sharp_spi_remove), +}; + +static struct spi_board_info fluid_lcdc_sharp_spi_board_info[] __initdata = { + { + .modalias = "fluid_lcdc_sharp", + .mode = SPI_MODE_1, + .bus_num = 0, + .chip_select = 0, + .max_speed_hz = 26331429, + } +}; + +static struct platform_device msm7x30_backlight = { + .name = "msm7x30-backlight", +}; + +int __init msm7x30_init_panel(void) +{ + int rc; + + msm_gpiomux_write(180, 0, + GPIOMUX_FUNC_GPIO | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_OUTPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + msm_gpiomux_write(20, 0, + GPIOMUX_FUNC_GPIO | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_OUTPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + + if (machine_is_msm7x30_fluid()) { + int i; + int mux_val = GPIOMUX_FUNC_1 | GPIOMUX_PULL_NONE | + GPIOMUX_DIR_OUTPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID; + + msm_gpiomux_write(22, 0, mux_val); + msm_gpiomux_write(25, 0, mux_val); + + for (i = 90; i <= 109; i++) + msm_gpiomux_write(i, 0, mux_val); + } + + rc = platform_device_register(&msm_device_mdp); + if (rc) + return rc; + + if (machine_is_msm7x30_fluid()) { + rc = spi_register_driver(&fluid_lcdc_sharp_spi_driver); + if (rc) + return rc; + spi_register_board_info(fluid_lcdc_sharp_spi_board_info, + ARRAY_SIZE(fluid_lcdc_sharp_spi_board_info)); + pm8058_gpio_mux(MSM7X30_PM8058_GPIO(25), + &msm7x30_fluid_backlight); + gpio_request(MSM7X30_PM8058_GPIO(25), "lcd_backlight"); + } else { + msm_device_mddi0.dev.platform_data = &mddi_pdata; + rc = platform_device_register(&msm_device_mddi0); + if (rc) + return rc; + } + + platform_device_register(&msm7x30_backlight); + return platform_driver_register(&msm7x30_backlight_driver); +} + +device_initcall(msm7x30_init_panel); diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 6f3b9735e9708..6595eb6545c11 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c @@ -20,9 +20,17 @@ #include #include #include +#include #include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include #include #include @@ -33,33 +41,315 @@ #include #include #include +#include +#include +#include #include #include "devices.h" #include "proc_comm.h" +#include "clock-pcom.h" +#include "gpiomux.h" +#include "board-msm7x30.h" extern struct sys_timer msm_timer; -static int hsusb_phy_init_seq[] = { - 0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */ - 0x02, 0x36, /* Disable CDR Auto Reset feature */ +static int msm7x30_phy_init_seq[] = { + 0x0C, 0x31, + 0x31, 0x32, + 0x1D, 0x0D, + 0x1D, 0x10, -1 }; -static struct msm_otg_platform_data msm_otg_pdata = { - .phy_init_seq = hsusb_phy_init_seq, - .mode = USB_PERIPHERAL, - .otg_control = OTG_PHY_CONTROL, +static void msm7x30_usb_phy_reset(void) +{ + u32 id; + int ret; + + id = P_USB_PHY_CLK; + ret = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } + + msleep(1); + + id = P_USB_PHY_CLK; + ret = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } +} + +static void msm7x30_usb_hw_reset(bool enable) +{ + u32 id; + int ret; + u32 func; + + id = P_USB_HS_CLK; + if (enable) + func = PCOM_CLKCTL_RPC_RESET_ASSERT; + else + func = PCOM_CLKCTL_RPC_RESET_DEASSERT; + ret = msm_proc_comm(func, &id, NULL); + if (ret) + pr_err("%s: Cannot set reset to %d (%d)\n", __func__, enable, + ret); +} + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = msm7x30_phy_init_seq, + .phy_reset = msm7x30_usb_phy_reset, + .hw_reset = msm7x30_usb_hw_reset, + .usb_connected = pm8058_notify_charger_connected, +}; + +static struct wake_lock vbus_wake_lock; + +static void msm7x30_vbus_present(bool present) +{ + pr_info("usb_cable_status: %s\n", present ? "inserted" : "removed"); + if (present) + wake_lock(&vbus_wake_lock); + msm_hsusb_set_vbus_state(present); + if (!present) + wake_unlock(&vbus_wake_lock); +} + +static int msm7x30_pcom_charge(u32 max_current, bool is_ac) +{ + u32 status = 0; + u32 pc_ids[] = { + PCOM_CHG_USB_IS_PC_CONNECTED, + PCOM_CHG_USB_IS_CHARGER_CONNECTED, + }; + int ret = 0; + + pr_info("%s(%u,%d)\n", __func__, max_current, is_ac); + + if (max_current) { + /* enable charging */ + status = 0; + msm_proc_comm(pc_ids[!!is_ac], &status, 0); + if (!status) { + pr_err("%s: can't set chg type (ac=%d)\n", __func__, + is_ac); + ret = -EINVAL; + goto err; + } + msm_proc_comm(PCOM_CHG_USB_IS_AVAILABLE, &max_current, &status); + if (!status) { + pr_err("%s: set_i failed %u\n", __func__, max_current); + ret = -EINVAL; + goto err; + } + } else { + msm_proc_comm(PCOM_CHG_USB_IS_AVAILABLE, &max_current, &status); + if (!status) { + pr_err("%s: set_i failed %u\n", __func__, max_current); + ret = -EINVAL; + goto err; + } + msm_proc_comm(PCOM_CHG_USB_IS_DISCONNECTED, &status, 0); + if (!status) { + pr_err("%s: can't set disconnect\n", __func__); + ret = -EINVAL; + goto err; + } + } + +err: + return ret; +} + +static struct pm8058_charger_platform_data msm7x30_pmic_charger_pdata = { + .vbus_present = msm7x30_vbus_present, + .charge = msm7x30_pcom_charge, + .supplied_to = NULL, + .num_supplicants = 0, +}; + +static char *usb_functions[] = { + "usb_mass_storage", +}; + +static char *usb_functions_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_all[] = { + "usb_mass_storage", + "adb", +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x4e11, + .num_functions = ARRAY_SIZE(usb_functions), + .functions = usb_functions, + }, + { + .product_id = 0x4e12, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, + }, +}; + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x18d1, + .product_id = 0x4e11, + .version = 0x0100, + .product_name = "Surf7x30", + .manufacturer_name = "Qualcomm, Inc.", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "Qualcomm, Inc.", + .product = "Surf7x30", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +static struct android_pmem_platform_data pmem_pdata = { + .name = "pmem", + .size = MSM7X30_SURF_PMEM_SIZE, + .no_allocator = 0, + .cached = 1, +}; + +static struct android_pmem_platform_data pmem_adsp_pdata = { + .name = "pmem_adsp", + .size = MSM7X30_SURF_PMEM_ADSP_SIZE, + .no_allocator = 0, + .cached = 0, +}; + +static struct platform_device android_pmem_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &pmem_pdata + }, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { + .platform_data = &pmem_adsp_pdata, + }, }; +static struct resource msm_kgsl_resources[] = { + { + .name = "kgsl_phys_memory", + .flags = IORESOURCE_MEM, + }, + { + .name = "kgsl_reg_memory", + .start = MSM_GPU_REG_PHYS, + .end = MSM_GPU_REG_PHYS + MSM_GPU_REG_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device msm_kgsl_device = { + .name = "kgsl", + .id = -1, + .resource = msm_kgsl_resources, + .num_resources = ARRAY_SIZE(msm_kgsl_resources), +}; + +#define PWR_RAIL_GRP_CLK 8 +static int msm7x30_kgsl_power_rail_mode(int follow_clk) +{ + int mode = follow_clk ? 0 : 1; + int rail_id = PWR_RAIL_GRP_CLK; + + return msm_proc_comm(PCOM_CLKCTL_RPC_RAIL_CONTROL, &rail_id, &mode); +} + +static int msm7x30_kgsl_power(bool on) +{ + int cmd; + int id; + + if (on) { + /* turn clock on, turn power on, turn clock off */ + cmd = PCOM_CLKCTL_RPC_RAIL_ENABLE; + id = P_GRP_3D_CLK; + msm_proc_comm(cmd, &id, NULL); + + cmd = PCOM_CLKCTL_RPC_ENABLE; + id = PWR_RAIL_GRP_CLK; + msm_proc_comm(cmd, &id, NULL); + + cmd = PCOM_CLKCTL_RPC_RAIL_DISABLE; + id = P_GRP_3D_CLK; + msm_proc_comm(cmd, &id, NULL); + } else { + /* turn clock on, turn power off, turn clock off */ + cmd = PCOM_CLKCTL_RPC_RAIL_ENABLE; + id = P_GRP_3D_CLK; + msm_proc_comm(cmd, &id, NULL); + + cmd = PCOM_CLKCTL_RPC_DISABLE; + id = PWR_RAIL_GRP_CLK; + msm_proc_comm(cmd, &id, NULL); + + cmd = PCOM_CLKCTL_RPC_RAIL_DISABLE; + id = P_GRP_3D_CLK; + msm_proc_comm(cmd, &id, NULL); + } + + return 0; +} + static struct platform_device *devices[] __initdata = { -#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER) +#if defined(CONFIG_SERIAL_MSM) && !defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart2, #endif &msm_device_smd, - &msm_device_otg, + &msm_device_nand, + &msm_device_i2c, + &msm_device_i2c2, &msm_device_hsusb, - &msm_device_hsusb_host, + &usb_mass_storage_device, + &android_usb_device, + &android_pmem_device, + &android_pmem_adsp_device, + &msm_kgsl_device, + &msm_device_spi, }; static void __init msm7x30_init_irq(void) @@ -67,13 +357,472 @@ static void __init msm7x30_init_irq(void) msm_init_irq(); } +static struct pm8058_pin_config msm7x30_kpd_input_gpio_cfg = { + .vin_src = PM8058_GPIO_VIN_SRC_VREG_S3, + .dir = PM8058_GPIO_INPUT, + .pull_up = PM8058_GPIO_PULL_UP_31P5, + .strength = PM8058_GPIO_STRENGTH_OFF, + .func = PM8058_GPIO_FUNC_NORMAL, + .flags = PM8058_GPIO_INV_IRQ_POL +}; + +static struct pm8058_pin_config msm7x30_kpd_output_gpio_cfg = { + .vin_src = PM8058_GPIO_VIN_SRC_VREG_S3, + .dir = PM8058_GPIO_OUTPUT, + .pull_up = PM8058_GPIO_PULL_NONE, + .strength = PM8058_GPIO_STRENGTH_LOW, + .func = PM8058_GPIO_FUNC_1, + .flags = (PM8058_GPIO_OPEN_DRAIN | + PM8058_GPIO_INV_IRQ_POL), +}; + +static unsigned int msm7x30_pmic_col_gpios[] = { + MSM7X30_PM8058_GPIO(0), MSM7X30_PM8058_GPIO(1), + MSM7X30_PM8058_GPIO(2), MSM7X30_PM8058_GPIO(3), + MSM7X30_PM8058_GPIO(4), MSM7X30_PM8058_GPIO(5), + MSM7X30_PM8058_GPIO(6), MSM7X30_PM8058_GPIO(7), +}; +static unsigned int msm7x30_pmic_row_gpios[] = { + MSM7X30_PM8058_GPIO(8), MSM7X30_PM8058_GPIO(9), + MSM7X30_PM8058_GPIO(10), MSM7X30_PM8058_GPIO(11), + MSM7X30_PM8058_GPIO(12), MSM7X30_PM8058_GPIO(13), + MSM7X30_PM8058_GPIO(14), MSM7X30_PM8058_GPIO(15), + MSM7X30_PM8058_GPIO(16), MSM7X30_PM8058_GPIO(17), + MSM7X30_PM8058_GPIO(18), MSM7X30_PM8058_GPIO(19), +}; + +#define KEYMAP_NUM_ROWS ARRAY_SIZE(msm7x30_pmic_row_gpios) +#define KEYMAP_NUM_COLS ARRAY_SIZE(msm7x30_pmic_col_gpios) +#define KEYMAP_INDEX(row, col) (((row) * KEYMAP_NUM_COLS) + (col)) +#define KEYMAP_SIZE (KEYMAP_NUM_ROWS * KEYMAP_NUM_COLS) + +static int msm7x30_pmic_keypad_init(struct device *dev) +{ + int i; + + for (i = 0; i < KEYMAP_NUM_COLS; ++i) + pm8058_gpio_mux(msm7x30_pmic_col_gpios[i], + &msm7x30_kpd_input_gpio_cfg); + for (i = 0; i < KEYMAP_NUM_ROWS; ++i) + pm8058_gpio_mux(msm7x30_pmic_row_gpios[i], + &msm7x30_kpd_output_gpio_cfg); + return 0; +} + +static const unsigned short msm7x30_fluid_pmic_keymap[KEYMAP_SIZE] = { + [KEYMAP_INDEX(0, 0)] = KEY_7, + [KEYMAP_INDEX(0, 1)] = KEY_ENTER, + [KEYMAP_INDEX(0, 2)] = KEY_UP, + [KEYMAP_INDEX(0, 4)] = KEY_DOWN, + + [KEYMAP_INDEX(1, 0)] = KEY_POWER, + [KEYMAP_INDEX(1, 1)] = KEY_SELECT, + [KEYMAP_INDEX(1, 2)] = KEY_1, + [KEYMAP_INDEX(1, 3)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(1, 4)] = KEY_VOLUMEDOWN, +}; + +static const unsigned short msm7x30_surf_pmic_keymap[KEYMAP_SIZE] = { + [KEYMAP_INDEX(0, 0)] = KEY_7, + [KEYMAP_INDEX(0, 1)] = KEY_DOWN, + [KEYMAP_INDEX(0, 2)] = KEY_UP, + [KEYMAP_INDEX(0, 3)] = KEY_RIGHT, + [KEYMAP_INDEX(0, 4)] = KEY_ENTER, + [KEYMAP_INDEX(0, 5)] = KEY_L, + [KEYMAP_INDEX(0, 6)] = KEY_BACK, + [KEYMAP_INDEX(0, 7)] = KEY_M, + + [KEYMAP_INDEX(1, 0)] = KEY_LEFT, + [KEYMAP_INDEX(1, 1)] = KEY_SEND, + [KEYMAP_INDEX(1, 2)] = KEY_1, + [KEYMAP_INDEX(1, 3)] = KEY_4, + [KEYMAP_INDEX(1, 4)] = KEY_CLEAR, + [KEYMAP_INDEX(1, 5)] = KEY_MSDOS, + [KEYMAP_INDEX(1, 6)] = KEY_SPACE, + [KEYMAP_INDEX(1, 7)] = KEY_COMMA, + + [KEYMAP_INDEX(2, 0)] = KEY_6, + [KEYMAP_INDEX(2, 1)] = KEY_5, + [KEYMAP_INDEX(2, 2)] = KEY_8, + [KEYMAP_INDEX(2, 3)] = KEY_3, + [KEYMAP_INDEX(2, 4)] = KEY_NUMERIC_STAR, + [KEYMAP_INDEX(2, 5)] = KEY_UP, + [KEYMAP_INDEX(2, 6)] = KEY_DOWN, + [KEYMAP_INDEX(2, 7)] = KEY_LEFTSHIFT, + + [KEYMAP_INDEX(3, 0)] = KEY_9, + [KEYMAP_INDEX(3, 1)] = KEY_NUMERIC_POUND, + [KEYMAP_INDEX(3, 2)] = KEY_0, + [KEYMAP_INDEX(3, 3)] = KEY_2, + [KEYMAP_INDEX(3, 4)] = KEY_SLEEP, + [KEYMAP_INDEX(3, 5)] = KEY_F1, + [KEYMAP_INDEX(3, 6)] = KEY_F2, + [KEYMAP_INDEX(3, 7)] = KEY_F3, + + [KEYMAP_INDEX(4, 0)] = KEY_BACK, + [KEYMAP_INDEX(4, 1)] = KEY_HOME, + [KEYMAP_INDEX(4, 2)] = KEY_MENU, + [KEYMAP_INDEX(4, 3)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(4, 4)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(4, 5)] = KEY_F4, + [KEYMAP_INDEX(4, 6)] = KEY_F5, + [KEYMAP_INDEX(4, 7)] = KEY_F6, + + [KEYMAP_INDEX(5, 0)] = KEY_R, + [KEYMAP_INDEX(5, 1)] = KEY_T, + [KEYMAP_INDEX(5, 2)] = KEY_Y, + [KEYMAP_INDEX(5, 3)] = KEY_LEFTALT, + [KEYMAP_INDEX(5, 4)] = KEY_KPENTER, + [KEYMAP_INDEX(5, 5)] = KEY_Q, + [KEYMAP_INDEX(5, 6)] = KEY_W, + [KEYMAP_INDEX(5, 7)] = KEY_E, + + [KEYMAP_INDEX(6, 0)] = KEY_F, + [KEYMAP_INDEX(6, 1)] = KEY_G, + [KEYMAP_INDEX(6, 2)] = KEY_H, + [KEYMAP_INDEX(6, 3)] = KEY_CAPSLOCK, + [KEYMAP_INDEX(6, 4)] = KEY_PAGEUP, + [KEYMAP_INDEX(6, 5)] = KEY_A, + [KEYMAP_INDEX(6, 6)] = KEY_S, + [KEYMAP_INDEX(6, 7)] = KEY_D, + + [KEYMAP_INDEX(7, 0)] = KEY_V, + [KEYMAP_INDEX(7, 1)] = KEY_B, + [KEYMAP_INDEX(7, 2)] = KEY_N, + [KEYMAP_INDEX(7, 3)] = KEY_MENU, + [KEYMAP_INDEX(7, 4)] = KEY_PAGEDOWN, + [KEYMAP_INDEX(7, 5)] = KEY_Z, + [KEYMAP_INDEX(7, 6)] = KEY_X, + [KEYMAP_INDEX(7, 7)] = KEY_C, + + [KEYMAP_INDEX(8, 0)] = KEY_P, + [KEYMAP_INDEX(8, 1)] = KEY_J, + [KEYMAP_INDEX(8, 2)] = KEY_K, + [KEYMAP_INDEX(8, 3)] = KEY_INSERT, + [KEYMAP_INDEX(8, 4)] = KEY_LINEFEED, + [KEYMAP_INDEX(8, 5)] = KEY_U, + [KEYMAP_INDEX(8, 6)] = KEY_I, + [KEYMAP_INDEX(8, 7)] = KEY_O, + + [KEYMAP_INDEX(9, 0)] = KEY_4, + [KEYMAP_INDEX(9, 1)] = KEY_5, + [KEYMAP_INDEX(9, 2)] = KEY_6, + [KEYMAP_INDEX(9, 3)] = KEY_7, + [KEYMAP_INDEX(9, 4)] = KEY_8, + [KEYMAP_INDEX(9, 5)] = KEY_1, + [KEYMAP_INDEX(9, 6)] = KEY_2, + [KEYMAP_INDEX(9, 7)] = KEY_3, + + [KEYMAP_INDEX(10, 0)] = KEY_F7, + [KEYMAP_INDEX(10, 1)] = KEY_F8, + [KEYMAP_INDEX(10, 2)] = KEY_F9, + [KEYMAP_INDEX(10, 3)] = KEY_F10, + [KEYMAP_INDEX(10, 4)] = KEY_FN, + [KEYMAP_INDEX(10, 5)] = KEY_9, + [KEYMAP_INDEX(10, 6)] = KEY_0, + [KEYMAP_INDEX(10, 7)] = KEY_DOT, + + [KEYMAP_INDEX(11, 0)] = KEY_LEFTCTRL, + [KEYMAP_INDEX(11, 1)] = KEY_F11, + [KEYMAP_INDEX(11, 2)] = KEY_ENTER, + [KEYMAP_INDEX(11, 3)] = KEY_SEARCH, + [KEYMAP_INDEX(11, 4)] = KEY_DELETE, + [KEYMAP_INDEX(11, 5)] = KEY_RIGHT, + [KEYMAP_INDEX(11, 6)] = KEY_LEFT, + [KEYMAP_INDEX(11, 7)] = KEY_RIGHTSHIFT, +}; + +static struct pm8058_keypad_platform_data msm7x30_pmic_keypad_pdata = { + .name = "msm7x30-keypad", + .num_drv = KEYMAP_NUM_ROWS, + .num_sns = KEYMAP_NUM_COLS, + .scan_delay_shift = 5, + .drv_hold_clks = 4, + .debounce_ms = 10, + .init = msm7x30_pmic_keypad_init, +}; + +static struct pm8058_platform_data msm7x30_pm8058_pdata = { + .irq_base = MSM7X30_PM8058_IRQ_BASE, + .gpio_base = MSM7X30_PM8058_GPIO_BASE, + .keypad_pdata = &msm7x30_pmic_keypad_pdata, + .charger_pdata = &msm7x30_pmic_charger_pdata, +}; + +static struct msm_ssbi_platform_data msm7x30_ssbi_pmic_pdata = { + .slave = { + .name = "pm8058-core", + .irq = MSM_GPIO_TO_INT(MSM7X30_GPIO_PMIC_INT_N), + .platform_data = &msm7x30_pm8058_pdata, + }, + .rspinlock_name = "D:PMIC_SSBI", +}; + +static int __init msm7x30_ssbi_pmic_init(void) +{ + int ret; + + pr_info("%s()\n", __func__); + msm_gpiomux_write(MSM7X30_GPIO_PMIC_INT_N, 0, + GPIOMUX_FUNC_GPIO | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + ret = gpiochip_reserve(msm7x30_pm8058_pdata.gpio_base, + PM8058_NUM_GPIOS); + WARN(ret, "can't reserve pm8058 gpios. badness will ensue...\n"); + msm_device_ssbi_pmic.dev.platform_data = &msm7x30_ssbi_pmic_pdata; + return platform_device_register(&msm_device_ssbi_pmic); +} + +static struct vreg *vreg_l15; /* gp6 */ +static struct vreg *vreg_l8; /* gp7 */ +static struct vreg *vreg_l16; /* gp10 */ + +static struct vreg *_get_vreg(char *name, u32 mv, bool en) +{ + struct vreg *vr; + + vr = vreg_get(NULL, name); + if (IS_ERR(vr)) + return NULL; + if (mv != 0) + vreg_set_level(vr, mv); + if (en) + vreg_enable(vr); + return vr; +} + +static void _put_vreg(struct vreg *vr) +{ + if (vr) { + vreg_disable(vr); + vreg_put(vr); + } +} + +static void fluid_cyttsp_init(void) +{ + vreg_l8 = _get_vreg("gp7", 1800, true); + vreg_l16 = _get_vreg("gp10", 2600, true); + vreg_l15 = _get_vreg("gp6", 3050, true); + + if (!vreg_l8 || !vreg_l16 || !vreg_l15) { + pr_err("%s: can't get vregs\n", __func__); + _put_vreg(vreg_l8); + _put_vreg(vreg_l15); + _put_vreg(vreg_l16); + return; + } + + /* enable interrupt gpio */ + msm_gpiomux_write(MSM7X30_FLUID_GPIO_TOUCH_INT_N, 0, + GPIOMUX_FUNC_GPIO | + GPIOMUX_PULL_UP | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_6MA | GPIOMUX_VALID); +} + +static int fluid_cyttsp_resume(struct i2c_client *client) +{ + mdelay(10); + return CY_OK; +} + +static struct cyttsp_platform_data fluid_cyttsp_pdata = { + .panel_maxx = 479, + .panel_maxy = 799, + .disp_maxx = 469, + .disp_maxy = 799, + .disp_minx = 10, + .disp_miny = 0, + .flags = 0, + .gen = CY_GEN3, /* or */ + .use_st = CY_USE_ST, + .use_mt = CY_USE_MT, + .use_hndshk = CY_SEND_HNDSHK, + .use_trk_id = CY_USE_TRACKING_ID, + .use_sleep = CY_USE_SLEEP, + .use_gestures = CY_USE_GESTURES, + .gest_set = CY_GEST_GRP1 | CY_GEST_GRP2 | + CY_GEST_GRP3 | CY_GEST_GRP4 | + CY_ACT_DIST, + .act_intrvl = CY_ACT_INTRVL_DFLT, + .tch_tmout = CY_TCH_TMOUT_DFLT, + .lp_intrvl = CY_LP_INTRVL_DFLT, + .resume = fluid_cyttsp_resume, + .init = NULL, +}; + +static struct i2c_board_info fluid_i2c_0_board_info[] = { + { + I2C_BOARD_INFO("cyttsp-i2c", 0x24), + .platform_data = &fluid_cyttsp_pdata, + .irq = MSM_GPIO_TO_INT(MSM7X30_FLUID_GPIO_TOUCH_INT_N), + } +}; + +static struct i2c_board_info surf_i2c_devices[] = { + /* marimba master is implied at 0x0c */ + { + I2C_BOARD_INFO("marimba-codec", 0x77), + }, +}; + +static int msm7x30_i2c_0_init(void) +{ + msm_gpiomux_write(70, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_16MA | GPIOMUX_VALID); + msm_gpiomux_write(71, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_16MA | GPIOMUX_VALID); +} + +static int msm7x30_spi_init(void) +{ + msm_gpiomux_write(45, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + msm_gpiomux_write(46, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + msm_gpiomux_write(47, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); + msm_gpiomux_write(48, 0, + GPIOMUX_FUNC_1 | + GPIOMUX_PULL_NONE | + GPIOMUX_DIR_INPUT | + GPIOMUX_DRV_2MA | GPIOMUX_VALID); +} + +static struct msm_spi_platform_data msm7x30_spi_pdata = { + .max_clock_speed = 26331429, +}; + +static ssize_t fluid_virtual_keys_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":50:842:80:100" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":170:842:80:100" + ":" __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":290:842:80:100" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":410:842:80:100" + "\n"); +} + +static struct kobj_attribute fluid_virtual_keys_attr = { + .attr = { + .name = "virtualkeys.cyttsp-i2c", + .mode = S_IRUGO, + }, + .show = &fluid_virtual_keys_show, +}; + +static struct attribute *fluid_properties_attrs[] = { + &fluid_virtual_keys_attr.attr, + NULL +}; + +static struct attribute_group fluid_properties_attr_group = { + .attrs = fluid_properties_attrs, +}; + +static int fluid_board_props_init(void) +{ + int rc; + struct kobject *properties_kobj; + + properties_kobj = kobject_create_and_add("board_properties", NULL); + if (!properties_kobj) { + rc = -ENOMEM; + goto err_kobj_create; + } + + rc = sysfs_create_group(properties_kobj, &fluid_properties_attr_group); + if (rc) + goto err_sysfs_create; + + return 0; + +err_sysfs_create: + kobject_put(properties_kobj); +err_kobj_create: + pr_err("failed to create board_properties\n"); + return rc; +} + + +extern void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq, + int wakeup_irq); + static void __init msm7x30_init(void) { - msm_device_otg.dev.platform_data = &msm_otg_pdata; - msm_device_hsusb.dev.parent = &msm_device_otg.dev; - msm_device_hsusb_host.dev.parent = &msm_device_otg.dev; + wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "board-vbus"); +#ifdef CONFIG_DEBUG_LL + { + /* HACK: get a fake clock request for uart2 for debug_ll */ + struct clk *uart2_clk; + uart2_clk = clk_get(&msm_device_uart2.dev, "uart_clk"); + if (IS_ERR(uart2_clk)) + uart2_clk = NULL; + else + clk_enable(uart2_clk); + } +#endif + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + msm_serial_debug_init(MSM_UART2_PHYS, INT_UART2, + &msm_device_uart2.dev, 23, MSM_GPIO_TO_INT(51)); +#endif + + if (machine_is_msm7x30_fluid()) + msm7x30_pmic_keypad_pdata.keymap = msm7x30_fluid_pmic_keymap; + else + msm7x30_pmic_keypad_pdata.keymap = msm7x30_surf_pmic_keymap; + + msm7x30_ssbi_pmic_init(); + msm7x30_i2c_0_init(); + msm7x30_spi_init(); + + /* set the gpu power rail to manual mode so clk en/dis will not + * turn off gpu power, and hang it on resume */ + msm7x30_kgsl_power_rail_mode(0); + msm7x30_kgsl_power(true); + + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + msm_device_spi.dev.platform_data = &msm7x30_spi_pdata; platform_add_devices(devices, ARRAY_SIZE(devices)); + + if (machine_is_msm7x30_fluid()) { + fluid_cyttsp_init(); + i2c_register_board_info(0, fluid_i2c_0_board_info, + ARRAY_SIZE(fluid_i2c_0_board_info)); + } + i2c_register_board_info(1, surf_i2c_devices, + ARRAY_SIZE(surf_i2c_devices)); + + if (machine_is_msm7x30_fluid()) + fluid_board_props_init(); + + msm7x30_board_audio_init(); + + msm_hsusb_set_vbus_state(1); + msm_hsusb_set_vbus_state(0); + msm_hsusb_set_vbus_state(1); } static void __init msm7x30_map_io(void) @@ -82,6 +831,40 @@ static void __init msm7x30_map_io(void) msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30); } +extern void __init msm7x30_allocate_fbmem(void); + +static phys_addr_t _reserve_mem(const char *name, unsigned long size, + unsigned long align) +{ + unsigned long base; + + size = ALIGN(size, align); + base = memblock_alloc(size, align); + memblock_free(base, size); + memblock_remove(base, size); + pr_info("msm7x30_surf: reserved memory for %s @ 0x%08lx (%lu bytes)\n", + name, base, size); + return base; +} + +static void __init msm7x30_reserve(void) +{ + struct resource *mem_res; + + msm7x30_allocate_fbmem(); + + pmem_pdata.start = _reserve_mem("pmem", pmem_pdata.size, SZ_1M); + pmem_adsp_pdata.start = _reserve_mem("pmem_adsp", pmem_adsp_pdata.size, + SZ_1M); + + mem_res = platform_get_resource_byname(&msm_kgsl_device, IORESOURCE_MEM, + "kgsl_phys_memory"); + BUG_ON(!mem_res); + mem_res->start = _reserve_mem("gpu_mem", MSM7X30_SURF_GPU_MEM_SIZE, + SZ_1M); + mem_res->end = mem_res->start + MSM7X30_SURF_GPU_MEM_SIZE - 1; +} + MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF") #ifdef CONFIG_MSM_DEBUG_UART #endif @@ -90,6 +873,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF") .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, + .reserve = msm7x30_reserve, MACHINE_END MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA") @@ -100,6 +884,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA") .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, + .reserve = msm7x30_reserve, MACHINE_END MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID") @@ -110,4 +895,5 @@ MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID") .init_irq = msm7x30_init_irq, .init_machine = msm7x30_init, .timer = &msm_timer, + .reserve = msm7x30_reserve, MACHINE_END diff --git a/arch/arm/mach-msm/board-msm7x30.h b/arch/arm/mach-msm/board-msm7x30.h new file mode 100644 index 0000000000000..976febe02ac42 --- /dev/null +++ b/arch/arm/mach-msm/board-msm7x30.h @@ -0,0 +1,37 @@ +/* linux/arch/arm/mach-msm/board-msm7x30.h + * + * Copyright (C) 2011 Google, Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_MSM7X30_H +#define __ARCH_ARM_MACH_MSM_BOARD_MSM7X30_H + +#include +#include + +#define MSM7X30_PM8058_GPIO_BASE FIRST_BOARD_GPIO +#define MSM7X30_PM8058_GPIO(x) (MSM7X30_PM8058_GPIO_BASE + (x)) +#define MSM7X30_PM8058_IRQ_BASE FIRST_BOARD_IRQ + +#define MSM7X30_GPIO_PMIC_INT_N 27 +#define MSM7X30_FLUID_GPIO_TOUCH_INT_N 150 + +#define MSM7X30_SURF_PMEM_SIZE 0x02000000 +#define MSM7X30_SURF_PMEM_ADSP_SIZE 0x01800000 + +#define MSM7X30_SURF_GPU_MEM_SIZE 0x00500000 + +void msm7x30_board_audio_init(void); + +#endif diff --git a/arch/arm/mach-msm/board-sapphire-gpio.c b/arch/arm/mach-msm/board-sapphire-gpio.c new file mode 100644 index 0000000000000..a75df356dd996 --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-gpio.c @@ -0,0 +1,324 @@ +/* arch/arm/mach-msm/board-sapphire-gpio.c + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "board-sapphire.h" + +#ifdef DEBUG_SAPPHIRE_GPIO +#define DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n", __func__, ## arg) +#else +#define DBG(fmt, arg...) do {} while (0) +#endif + +#define SAPPHIRE_CPLD_INT_STATUS (SAPPHIRE_CPLD_BASE + 0x0E) +#define SAPPHIRE_CPLD_INT_LEVEL (SAPPHIRE_CPLD_BASE + 0x08) +#define SAPPHIRE_CPLD_INT_MASK (SAPPHIRE_CPLD_BASE + 0x0C) + +/*CPLD misc reg offset*/ +static const int _g_CPLD_MISCn_Offset[] = { 0x0A, /*misc1 reg*/ + 0x00, /*misc2 reg*/ + 0x02, /*misc3 reg*/ + 0x04, /*misc4 reg*/ + 0x06}; /*misc5 reg*/ +/*CPLD INT Bank*/ +/*BANK0: int1 status, int2 level, int3 mask*/ +static const int _g_INT_BANK_Offset[][3] = {{0x0E, 0x08, 0x0C} }; + +static uint8_t sapphire_cpld_initdata[4] = { + [0] = 0x80, /* for serial debug UART3, low current misc2*/ + [1] = 0x34, /* jog & tp enable, I2C pull misc3*/ + [3] = 0x04, /* mmdi 32k en misc5*/ +}; + +/*save current working int mask, so the value can be restored after resume. +Sapphire has only bank0.*/ +static uint8_t sapphire_int_mask[] = { + [0] = 0xfb, /* enable all interrupts, bit 2 is not used */ +}; + +/*Sleep have to prepare the wake up source in advance. +default to disable all wakeup sources when suspend.*/ +static uint8_t sapphire_sleep_int_mask[] = { + [0] = 0x00, /* bit2 is not used */ +}; + +static int sapphire_suspended; + +static int sapphire_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + unsigned n = chip->base + offset; + + if (n < SAPPHIRE_GPIO_INT_B0_BASE) /*MISCn*/ + return !!(readb(CPLD_GPIO_REG(n)) & CPLD_GPIO_BIT_POS_MASK(n)); + else if (n <= SAPPHIRE_GPIO_END) /*gpio n is INT pin*/ + return !!(readb(CPLD_INT_LEVEL_REG_G(n)) & + CPLD_GPIO_BIT_POS_MASK(n)); + return 0; +} + +/*CPLD Write only register :MISC2, MISC3, MISC4, MISC5 => reg=0,2,4,6 +Reading from write-only registers is undefined, so the writing value +should be kept in shadow for later usage.*/ +static void sapphire_gpio_set(struct gpio_chip *chip, unsigned offset, int on) +{ + unsigned n = chip->base + offset; + unsigned long flags; + uint8_t reg_val; + if (n > SAPPHIRE_GPIO_END) + return; + + local_irq_save(flags); + reg_val = readb(CPLD_GPIO_REG(n)); + if (on) + reg_val |= CPLD_GPIO_BIT_POS_MASK(n); + else + reg_val &= ~CPLD_GPIO_BIT_POS_MASK(n); + writeb(reg_val, CPLD_GPIO_REG(n)); + + DBG("gpio=%d, l=0x%x\r\n", n, readb(SAPPHIRE_CPLD_INT_LEVEL)); + + local_irq_restore(flags); +} + +static int sapphire_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + sapphire_gpio_set(chip, offset, value); + return 0; +} + +static int sapphire_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + return 0; +} + +static int sapphire_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + unsigned gpio = chip->base + offset; + int irq; + + DBG("gpio=%d, l=0x%x\r\n", gpio, readb(SAPPHIRE_CPLD_INT_LEVEL)); + DBG("SAPPHIRE_GPIO_INT_B0_BASE=%d, SAPPHIRE_GPIO_LAST_INT=%d\r\n", + SAPPHIRE_GPIO_INT_B0_BASE, SAPPHIRE_GPIO_LAST_INT); + if ((gpio < SAPPHIRE_GPIO_INT_B0_BASE) || + (gpio > SAPPHIRE_GPIO_LAST_INT)) + return -ENOENT; + irq = SAPPHIRE_GPIO_TO_INT(gpio); + DBG("irq=%d\r\n", irq); + return irq; +} + +/*write 1 to clear INT status bit.*/ +static void sapphire_gpio_irq_ack(unsigned int irq) +{ + /*write 1 to clear*/ + writeb(SAPPHIRE_INT_BIT_MASK(irq), CPLD_INT_STATUS_REG(irq)); +} + +/*unmask/enable the INT +static void sapphire_gpio_irq_unmask(unsigned int irq)*/ +static void sapphire_gpio_irq_enable(unsigned int irq) +{ + unsigned long flags; + uint8_t reg_val; + + local_irq_save(flags); /*disabling all interrupts*/ + + reg_val = readb(CPLD_INT_MASK_REG(irq)) | SAPPHIRE_INT_BIT_MASK(irq); + DBG("(irq=%d,0x%x, 0x%x)\r\n", irq, CPLD_INT_MASK_REG(irq), + SAPPHIRE_INT_BIT_MASK(irq)); + DBG("sapphire_suspended=%d\r\n", sapphire_suspended); + /*printk(KERN_INFO "sapphire_gpio_irq_mask irq %d => %d:%02x\n", + irq, bank, reg_val);*/ + if (!sapphire_suspended) + writeb(reg_val, CPLD_INT_MASK_REG(irq)); + + reg_val = readb(CPLD_INT_MASK_REG(irq)); + DBG("reg_val= 0x%x\r\n", reg_val); + DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL)); + + local_irq_restore(flags); /*restore the interrupts*/ +} + +/*mask/disable INT +static void sapphire_gpio_irq_mask(unsigned int irq)*/ +static void sapphire_gpio_irq_disable(unsigned int irq) +{ + unsigned long flags; + uint8_t reg_val; + + local_irq_save(flags); + reg_val = readb(CPLD_INT_MASK_REG(irq)) & ~SAPPHIRE_INT_BIT_MASK(irq); + /*CPLD INT MASK is r/w now.*/ + + /*printk(KERN_INFO "sapphire_gpio_irq_unmask irq %d => %d:%02x\n", + irq, bank, reg_val);*/ + DBG("(%d,0x%x, 0x%x, 0x%x)\r\n", irq, reg_val, CPLD_INT_MASK_REG(irq), + SAPPHIRE_INT_BIT_MASK(irq)); + DBG("sapphire_suspended=%d\r\n", sapphire_suspended); + if (!sapphire_suspended) + writeb(reg_val, CPLD_INT_MASK_REG(irq)); + + reg_val = readb(CPLD_INT_MASK_REG(irq)); + DBG("reg_val= 0x%x\r\n", reg_val); + DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL)); + + local_irq_restore(flags); +} + +/*preparing enable/disable wake source before sleep*/ +int sapphire_gpio_irq_set_wake(unsigned int irq, unsigned int on) +{ + unsigned long flags; + uint8_t mask = SAPPHIRE_INT_BIT_MASK(irq); + + local_irq_save(flags); + + if (on) /*wake on -> mask the bit*/ + sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] |= mask; + else /*no wake -> unmask the bit*/ + sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] &= ~mask; + local_irq_restore(flags); + return 0; +} + +/*Sapphire has only one INT Bank.*/ +static void sapphire_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + int j; + unsigned v; + int int_base = SAPPHIRE_INT_START; + + v = readb(SAPPHIRE_CPLD_INT_STATUS); /*INT1 status reg, BANK0*/ + + for (j = 0; j < 8 ; j++) { /*8 bit per bank*/ + if (v & (1U << j)) { /*got the INT Bit*/ + DBG("generic_handle_irq j=0x%x\r\n", j); + generic_handle_irq(int_base + j); + } + } + + desc->chip->ack(irq); /*clear CPLD INT in SOC side.*/ + DBG("irq=%d, l=0x%x\r\n", irq, readb(SAPPHIRE_CPLD_INT_LEVEL)); +} + +/*Save current working sources before sleep, so we can restore it after + * resume.*/ +static int sapphire_sysdev_suspend(struct sys_device *dev, pm_message_t state) +{ + sapphire_suspended = 1; + /*save current masking*/ + sapphire_int_mask[0] = readb(SAPPHIRE_CPLD_BASE + + SAPPHIRE_GPIO_INT_B0_MASK_REG); + + /*set waking source before sleep.*/ + writeb(sapphire_sleep_int_mask[0], + SAPPHIRE_CPLD_BASE + SAPPHIRE_GPIO_INT_B0_MASK_REG); + + return 0; +} + +/*All the registers will be kept till a power loss...*/ +int sapphire_sysdev_resume(struct sys_device *dev) +{ + /*restore the working mask saved before sleep*/ + writeb(sapphire_int_mask[0], SAPPHIRE_CPLD_BASE + + SAPPHIRE_GPIO_INT_B0_MASK_REG); + sapphire_suspended = 0; + return 0; +} + +/** + * linux/irq.h :: struct irq_chip + * @enable: enable the interrupt (defaults to chip->unmask if NULL) + * @disable: disable the interrupt (defaults to chip->mask if NULL) + * @ack: start of a new interrupt + * @mask: mask an interrupt source + * @mask_ack: ack and mask an interrupt source + * @unmask: unmask an interrupt source + */ +static struct irq_chip sapphire_gpio_irq_chip = { + .name = "sapphiregpio", + .ack = sapphire_gpio_irq_ack, + .mask = sapphire_gpio_irq_disable, /*sapphire_gpio_irq_mask,*/ + .unmask = sapphire_gpio_irq_enable, /*sapphire_gpio_irq_unmask,*/ + .set_wake = sapphire_gpio_irq_set_wake, + /*.set_type = sapphire_gpio_irq_set_type,*/ +}; + +static struct gpio_chip sapphire_gpio_chip = { + .base = SAPPHIRE_GPIO_START, + .ngpio = SAPPHIRE_GPIO_END - SAPPHIRE_GPIO_START + 1, + .direction_output = sapphire_gpio_direction_output, + .direction_input = sapphire_gpio_direction_input, + .get = sapphire_gpio_get, + .set = sapphire_gpio_set, + .to_irq = sapphire_gpio_to_irq, +}; + +struct sysdev_class sapphire_sysdev_class = { + .name = "sapphiregpio_irq", + .suspend = sapphire_sysdev_suspend, + .resume = sapphire_sysdev_resume, +}; + +static struct sys_device sapphire_irq_device = { + .cls = &sapphire_sysdev_class, +}; + +int __init sapphire_init_gpio(void) +{ + int i; + if (!machine_is_sapphire()) + return 0; + + DBG("%d,%d\r\n", SAPPHIRE_INT_START, SAPPHIRE_INT_END); + DBG("NR_MSM_IRQS=%d, NR_GPIO_IRQS=%d\r\n", NR_MSM_IRQS, NR_GPIO_IRQS); + for (i = SAPPHIRE_INT_START; i <= SAPPHIRE_INT_END; i++) { + set_irq_chip(i, &sapphire_gpio_irq_chip); + set_irq_handler(i, handle_edge_irq); + set_irq_flags(i, IRQF_VALID); + } + + gpiochip_add(&sapphire_gpio_chip); + + /*setup CPLD INT connecting to SOC's gpio 17 */ + set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); + set_irq_chained_handler(MSM_GPIO_TO_INT(17), sapphire_gpio_irq_handler); + set_irq_wake(MSM_GPIO_TO_INT(17), 1); + + if (sysdev_class_register(&sapphire_sysdev_class) == 0) + sysdev_register(&sapphire_irq_device); + + return 0; +} + +int sapphire_init_cpld(unsigned int sys_rev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sapphire_cpld_initdata); i++) + writeb(sapphire_cpld_initdata[i], SAPPHIRE_CPLD_BASE + i * 2); + return 0; +} diff --git a/arch/arm/mach-msm/board-sapphire-h2w.c b/arch/arm/mach-msm/board-sapphire-h2w.c new file mode 100644 index 0000000000000..aa83e216974dc --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-h2w.c @@ -0,0 +1,545 @@ +/* + * H2W device detection driver. + * + * Copyright (C) 2008 HTC Corporation. + * Copyright (C) 2008 Google, Inc. + * + * Authors: + * Laurence Chen + * Nick Pelly + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* For detecting HTC 2 Wire devices, such as wired headset. + + Logically, the H2W driver is always present, and H2W state (hi->state) + indicates what is currently plugged into the H2W interface. + + When the headset is plugged in, CABLE_IN1 is pulled low. When the headset + button is pressed, CABLE_IN2 is pulled low. These two lines are shared with + the TX and RX (respectively) of UART3 - used for serial debugging. + + This headset driver keeps the CPLD configured as UART3 for as long as + possible, so that we can do serial FIQ debugging even when the kernel is + locked and this driver no longer runs. So it only configures the CPLD to + GPIO while the headset is plugged in, and for 10ms during detection work. + + Unfortunately we can't leave the CPLD as UART3 while a headset is plugged + in, UART3 is pullup on TX but the headset is pull-down, causing a 55 mA + drain on sapphire. + + The headset detection work involves setting CPLD to GPIO, and then pulling + CABLE_IN1 high with a stronger pullup than usual. A H2W headset will still + pull this line low, whereas other attachments such as a serial console + would get pulled up by this stronger pullup. + + Headset insertion/removal causes UEvent's to be sent, and + /sys/class/switch/h2w/state to be updated. + + Button presses are interpreted as input event (KEY_MEDIA). Button presses + are ignored if the headset is plugged in, so the buttons on 11 pin -> 3.5mm + jack adapters do not work until a headset is plugged into the adapter. This + is to avoid serial RX traffic causing spurious button press events. + + We tend to check the status of CABLE_IN1 a few more times than strictly + necessary during headset detection, to avoid spurious headset insertion + events caused by serial debugger TX traffic. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "board-sapphire.h" + +#ifdef CONFIG_DEBUG_SAPPHIRE_H2W +#define H2W_DBG(fmt, arg...) printk(KERN_INFO "[H2W] %s " fmt "\n", __FUNCTION__, ## arg) +#else +#define H2W_DBG(fmt, arg...) do {} while (0) +#endif + +static struct workqueue_struct *g_detection_work_queue; +static void detection_work(struct work_struct *work); +static DECLARE_WORK(g_detection_work, detection_work); +enum { + NO_DEVICE = 0, + HTC_HEADSET = 1, +}; + +enum { + UART3 = 0, + GPIO = 1, +}; + +struct h2w_info { + struct switch_dev sdev; + struct input_dev *input; + + atomic_t btn_state; + int ignore_btn; + + unsigned int irq; + unsigned int irq_btn; + + struct hrtimer timer; + ktime_t debounce_time; + + struct hrtimer btn_timer; + ktime_t btn_debounce_time; +}; +static struct h2w_info *hi; + +static ssize_t sapphire_h2w_print_name(struct switch_dev *sdev, char *buf) +{ + switch (switch_get_state(&hi->sdev)) { + case NO_DEVICE: + return sprintf(buf, "No Device\n"); + case HTC_HEADSET: + return sprintf(buf, "Headset\n"); + } + return -EINVAL; +} + +static void configure_cpld(int route) +{ + H2W_DBG(" route = %s", route == UART3 ? "UART3" : "GPIO"); + switch (route) { + case UART3: + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1); + break; + case GPIO: + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0); + break; + } +} + +static void button_pressed(void) +{ + H2W_DBG(""); + atomic_set(&hi->btn_state, 1); + input_report_key(hi->input, KEY_MEDIA, 1); + input_sync(hi->input); +} + +static void button_released(void) +{ + H2W_DBG(""); + atomic_set(&hi->btn_state, 0); + input_report_key(hi->input, KEY_MEDIA, 0); + input_sync(hi->input); +} + +#ifdef CONFIG_MSM_SERIAL_DEBUGGER +extern void msm_serial_debug_enable(int); +#endif + +static void insert_headset(void) +{ + unsigned long irq_flags; + + H2W_DBG(""); + + switch_set_state(&hi->sdev, HTC_HEADSET); + configure_cpld(GPIO); + +#ifdef CONFIG_MSM_SERIAL_DEBUGGER + msm_serial_debug_enable(false); +#endif + + + /* On some non-standard headset adapters (usually those without a + * button) the btn line is pulled down at the same time as the detect + * line. We can check here by sampling the button line, if it is + * low then it is probably a bad adapter so ignore the button. + * If the button is released then we stop ignoring the button, so that + * the user can recover from the situation where a headset is plugged + * in with button held down. + */ + hi->ignore_btn = !gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2); + + /* Enable button irq */ + local_irq_save(irq_flags); + enable_irq(hi->irq_btn); + local_irq_restore(irq_flags); + + hi->debounce_time = ktime_set(0, 20000000); /* 20 ms */ +} + +static void remove_headset(void) +{ + unsigned long irq_flags; + + H2W_DBG(""); + + switch_set_state(&hi->sdev, NO_DEVICE); + configure_cpld(UART3); + + /* Disable button */ + local_irq_save(irq_flags); + disable_irq(hi->irq_btn); + local_irq_restore(irq_flags); + + if (atomic_read(&hi->btn_state)) + button_released(); + + hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */ +} + +static void detection_work(struct work_struct *work) +{ + unsigned long irq_flags; + int clk, cable_in1; + + H2W_DBG(""); + + if (gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1) != 0) { + /* Headset not plugged in */ + if (switch_get_state(&hi->sdev) == HTC_HEADSET) + remove_headset(); + return; + } + + /* Something plugged in, lets make sure its a headset */ + + /* Switch CPLD to GPIO to do detection */ + configure_cpld(GPIO); + /* Disable headset interrupt while detecting.*/ + local_irq_save(irq_flags); + disable_irq(hi->irq); + local_irq_restore(irq_flags); + + /* Set GPIO_CABLE_IN1 as output high */ + gpio_direction_output(SAPPHIRE_GPIO_CABLE_IN1, 1); + /* Delay 10ms for pin stable. */ + msleep(10); + /* Save H2W_CLK */ + clk = gpio_get_value(SAPPHIRE_GPIO_H2W_CLK_GPI); + /* Set GPIO_CABLE_IN1 as input */ + gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN1); + + /* Restore IRQs */ + local_irq_save(irq_flags); + enable_irq(hi->irq); + local_irq_restore(irq_flags); + + cable_in1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1); + + if (cable_in1 == 0 && clk == 0) { + if (switch_get_state(&hi->sdev) == NO_DEVICE) + insert_headset(); + } else { + configure_cpld(UART3); + H2W_DBG("CABLE_IN1 was low, but not a headset " + "(recent cable_in1 = %d, clk = %d)", cable_in1, clk); + } +} + +static enum hrtimer_restart button_event_timer_func(struct hrtimer *data) +{ + H2W_DBG(""); + + if (switch_get_state(&hi->sdev) == HTC_HEADSET) { + if (gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2)) { + if (hi->ignore_btn) + hi->ignore_btn = 0; + else if (atomic_read(&hi->btn_state)) + button_released(); + } else { + if (!hi->ignore_btn && !atomic_read(&hi->btn_state)) + button_pressed(); + } + } + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart detect_event_timer_func(struct hrtimer *data) +{ + H2W_DBG(""); + + queue_work(g_detection_work_queue, &g_detection_work); + return HRTIMER_NORESTART; +} + +static irqreturn_t detect_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + H2W_DBG(""); + do { + value1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1); + set_irq_type(hi->irq, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1); + } while (value1 != value2 && retry_limit-- > 0); + + H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit)); + + if ((switch_get_state(&hi->sdev) == NO_DEVICE) ^ value2) { + if (switch_get_state(&hi->sdev) == HTC_HEADSET) + hi->ignore_btn = 1; + /* Do the rest of the work in timer context */ + hrtimer_start(&hi->timer, hi->debounce_time, HRTIMER_MODE_REL); + } + + return IRQ_HANDLED; +} + +static irqreturn_t button_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + H2W_DBG(""); + do { + value1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2); + set_irq_type(hi->irq_btn, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2); + } while (value1 != value2 && retry_limit-- > 0); + + H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit)); + + hrtimer_start(&hi->btn_timer, hi->btn_debounce_time, HRTIMER_MODE_REL); + + return IRQ_HANDLED; +} + +#if defined(CONFIG_DEBUG_FS) +static void h2w_debug_set(void *data, u64 val) +{ + switch_set_state(&hi->sdev, (int)val); +} + +static u64 h2w_debug_get(void *data) +{ + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(h2w_debug_fops, h2w_debug_get, h2w_debug_set, "%llu\n"); +static int __init h2w_debug_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("h2w", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("state", 0644, dent, NULL, &h2w_debug_fops); + + return 0; +} + +device_initcall(h2w_debug_init); +#endif + +static int sapphire_h2w_probe(struct platform_device *pdev) +{ + int ret; + unsigned long irq_flags; + + printk(KERN_INFO "H2W: Registering H2W (headset) driver\n"); + hi = kzalloc(sizeof(struct h2w_info), GFP_KERNEL); + if (!hi) + return -ENOMEM; + + atomic_set(&hi->btn_state, 0); + hi->ignore_btn = 0; + + hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */ + hi->btn_debounce_time = ktime_set(0, 10000000); /* 10 ms */ + hi->sdev.name = "h2w"; + hi->sdev.print_name = sapphire_h2w_print_name; + + ret = switch_dev_register(&hi->sdev); + if (ret < 0) + goto err_switch_dev_register; + + g_detection_work_queue = create_workqueue("detection"); + if (g_detection_work_queue == NULL) { + ret = -ENOMEM; + goto err_create_work_queue; + } + + ret = gpio_request(SAPPHIRE_GPIO_CABLE_IN1, "h2w_detect"); + if (ret < 0) + goto err_request_detect_gpio; + + ret = gpio_request(SAPPHIRE_GPIO_CABLE_IN2, "h2w_button"); + if (ret < 0) + goto err_request_button_gpio; + + ret = gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN1); + if (ret < 0) + goto err_set_detect_gpio; + + ret = gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN2); + if (ret < 0) + goto err_set_button_gpio; + + hi->irq = gpio_to_irq(SAPPHIRE_GPIO_CABLE_IN1); + if (hi->irq < 0) { + ret = hi->irq; + goto err_get_h2w_detect_irq_num_failed; + } + + hi->irq_btn = gpio_to_irq(SAPPHIRE_GPIO_CABLE_IN2); + if (hi->irq_btn < 0) { + ret = hi->irq_btn; + goto err_get_button_irq_num_failed; + } + + /* Set CPLD MUX to H2W <-> CPLD GPIO */ + configure_cpld(UART3); + /* Set the CPLD connected H2W GPIO's to input */ + gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, 0); + + hrtimer_init(&hi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hi->timer.function = detect_event_timer_func; + hrtimer_init(&hi->btn_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hi->btn_timer.function = button_event_timer_func; + + ret = request_irq(hi->irq, detect_irq_handler, + IRQF_TRIGGER_LOW, "h2w_detect", NULL); + if (ret < 0) + goto err_request_detect_irq; + + /* Disable button until plugged in */ + set_irq_flags(hi->irq_btn, IRQF_VALID | IRQF_NOAUTOEN); + ret = request_irq(hi->irq_btn, button_irq_handler, + IRQF_TRIGGER_LOW, "h2w_button", NULL); + if (ret < 0) + goto err_request_h2w_headset_button_irq; + + ret = set_irq_wake(hi->irq, 1); + if (ret < 0) + goto err_request_input_dev; + ret = set_irq_wake(hi->irq_btn, 1); + if (ret < 0) + goto err_request_input_dev; + + hi->input = input_allocate_device(); + if (!hi->input) { + ret = -ENOMEM; + goto err_request_input_dev; + } + + hi->input->name = "h2w headset"; + hi->input->evbit[0] = BIT_MASK(EV_KEY); + hi->input->keybit[BIT_WORD(KEY_MEDIA)] = BIT_MASK(KEY_MEDIA); + + ret = input_register_device(hi->input); + if (ret < 0) + goto err_register_input_dev; + + return 0; + +err_register_input_dev: + input_free_device(hi->input); +err_request_input_dev: + free_irq(hi->irq_btn, 0); +err_request_h2w_headset_button_irq: + free_irq(hi->irq, 0); +err_request_detect_irq: +err_get_button_irq_num_failed: +err_get_h2w_detect_irq_num_failed: +err_set_button_gpio: +err_set_detect_gpio: + gpio_free(SAPPHIRE_GPIO_CABLE_IN2); +err_request_button_gpio: + gpio_free(SAPPHIRE_GPIO_CABLE_IN1); +err_request_detect_gpio: + destroy_workqueue(g_detection_work_queue); +err_create_work_queue: + switch_dev_unregister(&hi->sdev); +err_switch_dev_register: + printk(KERN_ERR "H2W: Failed to register driver\n"); + + return ret; +} + +static int sapphire_h2w_remove(struct platform_device *pdev) +{ + H2W_DBG(""); + if (switch_get_state(&hi->sdev)) + remove_headset(); + input_unregister_device(hi->input); + gpio_free(SAPPHIRE_GPIO_CABLE_IN2); + gpio_free(SAPPHIRE_GPIO_CABLE_IN1); + free_irq(hi->irq_btn, 0); + free_irq(hi->irq, 0); + destroy_workqueue(g_detection_work_queue); + switch_dev_unregister(&hi->sdev); + + return 0; +} + +static struct platform_device sapphire_h2w_device = { + .name = "sapphire-h2w", +}; + +static struct platform_driver sapphire_h2w_driver = { + .probe = sapphire_h2w_probe, + .remove = sapphire_h2w_remove, + .driver = { + .name = "sapphire-h2w", + .owner = THIS_MODULE, + }, +}; + +static int __init sapphire_h2w_init(void) +{ + if (!machine_is_sapphire()) + return 0; + int ret; + H2W_DBG(""); + ret = platform_driver_register(&sapphire_h2w_driver); + if (ret) + return ret; + return platform_device_register(&sapphire_h2w_device); +} + +static void __exit sapphire_h2w_exit(void) +{ + platform_device_unregister(&sapphire_h2w_device); + platform_driver_unregister(&sapphire_h2w_driver); +} + +module_init(sapphire_h2w_init); +module_exit(sapphire_h2w_exit); + +MODULE_AUTHOR("Laurence Chen "); +MODULE_DESCRIPTION("HTC 2 Wire detection driver for sapphire"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-sapphire-keypad.c b/arch/arm/mach-msm/board-sapphire-keypad.c new file mode 100755 index 0000000000000..a48c2d3f375cd --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-keypad.c @@ -0,0 +1,131 @@ +/* arch/arm/mach-msm/board-sapphire-keypad.c + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include +#include "board-sapphire.h" +static char *keycaps = "--qwerty"; +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "board_sapphire." +module_param_named(keycaps, keycaps, charp, 0); + + +static unsigned int sapphire_col_gpios[] = { 35, 34 }; + +/* KP_MKIN2 (GPIO40) is not used? */ +static unsigned int sapphire_row_gpios[] = { 42, 41 }; + +#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(sapphire_row_gpios) + (row)) + +/*scan matrix key*/ +/* HOME(up) MENU (up) Back Search */ +static const unsigned short sapphire_keymap2[ARRAY_SIZE(sapphire_col_gpios) * ARRAY_SIZE(sapphire_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_COMPOSE, + [KEYMAP_INDEX(0, 1)] = KEY_BACK, + + [KEYMAP_INDEX(1, 0)] = KEY_MENU, + [KEYMAP_INDEX(1, 1)] = KEY_SEND, +}; + +/* HOME(up) + MENU (down)*/ +static const unsigned short sapphire_keymap1[ARRAY_SIZE(sapphire_col_gpios) * + ARRAY_SIZE(sapphire_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_BACK, + [KEYMAP_INDEX(0, 1)] = KEY_MENU, + + [KEYMAP_INDEX(1, 0)] = KEY_HOME, + [KEYMAP_INDEX(1, 1)] = KEY_SEND, +}; + +/* MENU(up) + HOME (down)*/ +static const unsigned short sapphire_keymap0[ARRAY_SIZE(sapphire_col_gpios) * + ARRAY_SIZE(sapphire_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_BACK, + [KEYMAP_INDEX(0, 1)] = KEY_HOME, + + [KEYMAP_INDEX(1, 0)] = KEY_MENU, + [KEYMAP_INDEX(1, 1)] = KEY_SEND, +}; + +static struct gpio_event_matrix_info sapphire_keypad_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = sapphire_keymap2, + .output_gpios = sapphire_col_gpios, + .input_gpios = sapphire_row_gpios, + .noutputs = ARRAY_SIZE(sapphire_col_gpios), + .ninputs = ARRAY_SIZE(sapphire_row_gpios), + .settle_time.tv.nsec = 40 * NSEC_PER_USEC, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .debounce_delay.tv.nsec = 50 * NSEC_PER_MSEC, + .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | + GPIOKPF_REMOVE_PHANTOM_KEYS | + GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/ +}; + +static struct gpio_event_direct_entry sapphire_keypad_nav_map[] = { + { SAPPHIRE_POWER_KEY, KEY_END }, + { SAPPHIRE_VOLUME_UP, KEY_VOLUMEUP }, + { SAPPHIRE_VOLUME_DOWN, KEY_VOLUMEDOWN }, +}; + +static struct gpio_event_input_info sapphire_keypad_nav_info = { + .info.func = gpio_event_input_func, + .flags = 0, + .type = EV_KEY, + .keymap = sapphire_keypad_nav_map, + .debounce_time.tv.nsec = 20 * NSEC_PER_MSEC, + .keymap_size = ARRAY_SIZE(sapphire_keypad_nav_map) +}; + +static struct gpio_event_info *sapphire_keypad_info[] = { + &sapphire_keypad_matrix_info.info, + &sapphire_keypad_nav_info.info, +}; + +static struct gpio_event_platform_data sapphire_keypad_data = { + .name = "sapphire-keypad", + .info = sapphire_keypad_info, + .info_count = ARRAY_SIZE(sapphire_keypad_info) +}; + +static struct platform_device sapphire_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &sapphire_keypad_data, + }, +}; + +static int __init sapphire_init_keypad(void) +{ + if (!machine_is_sapphire()) + return 0; + + switch (sapphire_get_hwid()) { + case 0: + sapphire_keypad_matrix_info.keymap = sapphire_keymap0; + break; + default: + if(system_rev != 0x80) + sapphire_keypad_matrix_info.keymap = sapphire_keymap1; + break; + } + return platform_device_register(&sapphire_keypad_device); +} + +device_initcall(sapphire_init_keypad); + diff --git a/arch/arm/mach-msm/board-sapphire-mmc.c b/arch/arm/mach-msm/board-sapphire-mmc.c new file mode 100755 index 0000000000000..252831d5f1e46 --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-mmc.c @@ -0,0 +1,482 @@ +/* linux/arch/arm/mach-msm/board-sapphire-mmc.c + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include "devices.h" +#include "board-sapphire.h" +#include "proc_comm.h" + +#define DEBUG_SDSLOT_VDD 0 + +/* ---- COMMON ---- */ +static void config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +/* ---- SDCARD ---- */ + +static uint32_t sdcard_on_gpio_table[] = { + PCOM_GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ +}; + +static uint32_t sdcard_off_gpio_table[] = { + PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static uint opt_disable_sdcard; + +static int __init sapphire_disablesdcard_setup(char *str) +{ + int cal = simple_strtol(str, NULL, 0); + + opt_disable_sdcard = cal; + return 1; +} + +__setup("board_sapphire.disable_sdcard=", sapphire_disablesdcard_setup); + +static struct vreg *vreg_sdslot; /* SD slot power */ + +struct mmc_vdd_xlat { + int mask; + int level; +}; + +static struct mmc_vdd_xlat mmc_vdd_table[] = { + { MMC_VDD_165_195, 1800 }, + { MMC_VDD_20_21, 2050 }, + { MMC_VDD_21_22, 2150 }, + { MMC_VDD_22_23, 2250 }, + { MMC_VDD_23_24, 2350 }, + { MMC_VDD_24_25, 2450 }, + { MMC_VDD_25_26, 2550 }, + { MMC_VDD_26_27, 2650 }, + { MMC_VDD_27_28, 2750 }, + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2950 }, +}; + +static unsigned int sdslot_vdd = 0xffffffff; +static unsigned int sdslot_vreg_enabled; + +static uint32_t sapphire_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + int i, rc; + + BUG_ON(!vreg_sdslot); + + if (vdd == sdslot_vdd) + return 0; + + sdslot_vdd = vdd; + + if (vdd == 0) { +#if DEBUG_SDSLOT_VDD + printk(KERN_DEBUG "%s: Disabling SD slot power\n", __func__); +#endif + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + vreg_disable(vreg_sdslot); + sdslot_vreg_enabled = 0; + return 0; + } + + if (!sdslot_vreg_enabled) { + rc = vreg_enable(vreg_sdslot); + if (rc) { + printk(KERN_ERR "%s: Error enabling vreg (%d)\n", + __func__, rc); + } + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + sdslot_vreg_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask == (1 << vdd)) { +#if DEBUG_SDSLOT_VDD + printk(KERN_DEBUG "%s: Setting level to %u\n", + __func__, mmc_vdd_table[i].level); +#endif + rc = vreg_set_level(vreg_sdslot, + mmc_vdd_table[i].level); + if (rc) { + printk(KERN_ERR + "%s: Error setting vreg level (%d)\n", + __func__, rc); + } + return 0; + } + } + + printk(KERN_ERR "%s: Invalid VDD %d specified\n", __func__, vdd); + return 0; +} + +static unsigned int sapphire_sdslot_status(struct device *dev) +{ + unsigned int status; + + status = (unsigned int) gpio_get_value(SAPPHIRE_GPIO_SDMC_CD_N); + return !status; +} + +#define SAPPHIRE_MMC_VDD (MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \ + | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \ + | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \ + | MMC_VDD_28_29 | MMC_VDD_29_30) + +static struct msm_mmc_platform_data sapphire_sdslot_data = { + .ocr_mask = SAPPHIRE_MMC_VDD, + .status = sapphire_sdslot_status, + .translate_vdd = sapphire_sdslot_switchvdd, +}; + +/* ---- WIFI ---- */ + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(29, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(29, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static struct vreg *vreg_wifi_osc; /* WIFI 32khz oscilator */ +static int sapphire_wifi_cd = 0; /* WIFI virtual 'card detect' status */ + +static struct sdio_embedded_func wifi_func = { + .f_class = SDIO_CLASS_WLAN, + .f_maxblksize = 512, +}; + +static struct embedded_sdio_data sapphire_wifi_emb_data = { + .cis = { + .vendor = 0x104c, + .device = 0x9066, + .blksize = 512, + .max_dtr = 20000000, + }, + .cccr = { + .multi_block = 0, + .low_speed = 0, + .wide_bus = 1, + .high_power = 0, + .high_speed = 0, + }, + .funcs = &wifi_func, + .num_funcs = 1, +}; + +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int sapphire_wifi_status_register(void (*callback)(int card_present, + void *dev_id), + void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static unsigned int sapphire_wifi_status(struct device *dev) +{ + return sapphire_wifi_cd; +} + +int sapphire_wifi_set_carddetect(int val) +{ + printk(KERN_DEBUG "%s: %d\n", __func__, val); + sapphire_wifi_cd = val; + if (wifi_status_cb) + wifi_status_cb(val, wifi_status_cb_devid); + else + printk(KERN_WARNING "%s: Nobody to notify\n", __func__); + return 0; +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(sapphire_wifi_set_carddetect); +#endif + +int sapphire_wifi_power_state=0; +int sapphire_bt_power_state=0; + +int sapphire_wifi_power(int on) +{ + int rc; + + printk(KERN_DEBUG "%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + rc = vreg_enable(vreg_wifi_osc); + if (rc) + return rc; + htc_pwrsink_set(PWRSINK_WIFI, 70); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + htc_pwrsink_set(PWRSINK_WIFI, 0); + } + gpio_set_value(SAPPHIRE_GPIO_MAC_32K_EN, on); + mdelay(100); + gpio_set_value(SAPPHIRE_GPIO_WIFI_EN, on); + mdelay(100); + if (!on) { + if(!sapphire_bt_power_state) + { + vreg_disable(vreg_wifi_osc); + printk("WiFi disable vreg_wifi_osc.\n"); + } + else + printk("WiFi shouldn't disable vreg_wifi_osc. BT is using it!!\n"); + } + sapphire_wifi_power_state = on; + return 0; +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(sapphire_wifi_power); +#endif + +/* Eenable VREG_MMC pin to turn on fastclock oscillator : colin */ +int sapphire_bt_fastclock_power(int on) +{ + int rc; + + printk(KERN_DEBUG "sapphire_bt_fastclock_power on = %d\n", on); + if (vreg_wifi_osc) { + if (on) { + rc = vreg_enable(vreg_wifi_osc); + printk(KERN_DEBUG "BT vreg_enable vreg_mmc, rc=%d\n", + rc); + if (rc) { + printk("Error turn sapphire_bt_fastclock_power rc=%d\n", rc); + return rc; + } + } else { + if (!sapphire_wifi_power_state) { + vreg_disable(vreg_wifi_osc); + printk(KERN_DEBUG "BT disable vreg_wifi_osc.\n"); + } else + printk(KERN_DEBUG "BT shouldn't disable vreg_wifi_osc. WiFi is using it!!\n"); + } + } + sapphire_bt_power_state = on; + return 0; +} +EXPORT_SYMBOL(sapphire_bt_fastclock_power); + +static int sapphire_wifi_reset_state; +void sapphire_wifi_reset(int on) +{ + printk(KERN_DEBUG "%s: %d\n", __func__, on); + gpio_set_value(SAPPHIRE_GPIO_WIFI_PA_RESETX, !on); + sapphire_wifi_reset_state = on; + mdelay(50); +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(sapphire_wifi_reset); +#endif + +static struct msm_mmc_platform_data sapphire_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .status = sapphire_wifi_status, + .register_status_notify = sapphire_wifi_status_register, + .embedded_sdio = &sapphire_wifi_emb_data, +}; + +int __init sapphire_init_mmc(unsigned int sys_rev) +{ + wifi_status_cb = NULL; + + sdslot_vreg_enabled = 0; + + vreg_sdslot = vreg_get(0, "gp6"); + if (IS_ERR(vreg_sdslot)) + return PTR_ERR(vreg_sdslot); + vreg_wifi_osc = vreg_get(0, "mmc"); + if (IS_ERR(vreg_wifi_osc)) + return PTR_ERR(vreg_wifi_osc); + + set_irq_wake(SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_SDMC_CD_N), 1); + + msm_add_sdcc(1, &sapphire_wifi_data, 0, 0); + + if (!opt_disable_sdcard) + msm_add_sdcc(2, &sapphire_sdslot_data, + SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_SDMC_CD_N), 0); + else + printk(KERN_INFO "sapphire: SD-Card interface disabled\n"); + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +static int sapphiremmc_dbg_wifi_reset_set(void *data, u64 val) +{ + sapphire_wifi_reset((int) val); + return 0; +} + +static int sapphiremmc_dbg_wifi_reset_get(void *data, u64 *val) +{ + *val = sapphire_wifi_reset_state; + return 0; +} + +static int sapphiremmc_dbg_wifi_cd_set(void *data, u64 val) +{ + sapphire_wifi_set_carddetect((int) val); + return 0; +} + +static int sapphiremmc_dbg_wifi_cd_get(void *data, u64 *val) +{ + *val = sapphire_wifi_cd; + return 0; +} + +static int sapphiremmc_dbg_wifi_pwr_set(void *data, u64 val) +{ + sapphire_wifi_power((int) val); + return 0; +} + +static int sapphiremmc_dbg_wifi_pwr_get(void *data, u64 *val) +{ + + *val = sapphire_wifi_power_state; + return 0; +} + +static int sapphiremmc_dbg_sd_pwr_set(void *data, u64 val) +{ + sapphire_sdslot_switchvdd(NULL, (unsigned int) val); + return 0; +} + +static int sapphiremmc_dbg_sd_pwr_get(void *data, u64 *val) +{ + *val = sdslot_vdd; + return 0; +} + +static int sapphiremmc_dbg_sd_cd_set(void *data, u64 val) +{ + return -ENOSYS; +} + +static int sapphiremmc_dbg_sd_cd_get(void *data, u64 *val) +{ + *val = sapphire_sdslot_status(NULL); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_reset_fops, + sapphiremmc_dbg_wifi_reset_get, + sapphiremmc_dbg_wifi_reset_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_cd_fops, + sapphiremmc_dbg_wifi_cd_get, + sapphiremmc_dbg_wifi_cd_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_pwr_fops, + sapphiremmc_dbg_wifi_pwr_get, + sapphiremmc_dbg_wifi_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_sd_pwr_fops, + sapphiremmc_dbg_sd_pwr_get, + sapphiremmc_dbg_sd_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_sd_cd_fops, + sapphiremmc_dbg_sd_cd_get, + sapphiremmc_dbg_sd_cd_set, "%llu\n"); + +static int __init sapphiremmc_dbg_init(void) +{ + struct dentry *dent; + + if (!machine_is_sapphire()) + return 0; + + dent = debugfs_create_dir("sapphiremmc_dbg", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("wifi_reset", 0644, dent, NULL, + &sapphiremmc_dbg_wifi_reset_fops); + debugfs_create_file("wifi_cd", 0644, dent, NULL, + &sapphiremmc_dbg_wifi_cd_fops); + debugfs_create_file("wifi_pwr", 0644, dent, NULL, + &sapphiremmc_dbg_wifi_pwr_fops); + + debugfs_create_file("sd_pwr", 0644, dent, NULL, + &sapphiremmc_dbg_sd_pwr_fops); + debugfs_create_file("sd_cd", 0644, dent, NULL, + &sapphiremmc_dbg_sd_cd_fops); + + return 0; +} + +device_initcall(sapphiremmc_dbg_init); + +#endif diff --git a/arch/arm/mach-msm/board-sapphire-panel.c b/arch/arm/mach-msm/board-sapphire-panel.c new file mode 100644 index 0000000000000..775ff5b194346 --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-panel.c @@ -0,0 +1,1271 @@ +/* linux/arch/arm/mach-msm/board-sapphire-panel.c + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "board-sapphire.h" +#include "proc_comm.h" +#include "devices.h" + +#define DEBUG_SAPPHIRE_PANEL 0 +#define userid 0xD10 + +#define VSYNC_GPIO 97 + +enum sapphire_panel_type { + SAPPHIRE_PANEL_SHARP = 0, + SAPPHIRE_PANEL_TOPPOLY, + NUM_OF_SAPPHIRE_PANELS, +}; +static int g_panel_id = -1 ; +static int g_panel_inited = 0 ; + +#define SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS 132 +#define GOOGLE_DEFAULT_BACKLIGHT_BRIGHTNESS 102 +#define SDBB SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS +#define GDBB GOOGLE_DEFAULT_BACKLIGHT_BRIGHTNESS + +static int sapphire_backlight_off; +static int sapphire_backlight_brightness = + SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS; + +static uint8_t sapphire_backlight_last_level = 33; +static DEFINE_MUTEX(sapphire_backlight_lock); + +/* Divide dimming level into 12 sections, and restrict maximum level to 27 */ +#define DIMMING_STEPS 12 +static unsigned dimming_levels[NUM_OF_SAPPHIRE_PANELS][DIMMING_STEPS] = { + {0, 1, 2, 3, 6, 9, 11, 13, 16, 19, 22, 25}, /* Sharp */ + {0, 1, 2, 4, 7, 10, 13, 15, 18, 21, 24, 27}, /* Toppolly */ +}; +static unsigned pwrsink_percents[] = {0, 6, 8, 15, 26, 34, 46, 54, 65, 77, 87, + 100}; + +static void sapphire_set_backlight_level(uint8_t level) +{ + unsigned dimming_factor = 255/DIMMING_STEPS + 1; + int index, new_level ; + unsigned percent; + unsigned long flags; + int i = 0; + + /* Non-linear transform for the difference between two + * kind of default backlight settings. + */ + new_level = level<=GDBB ? + level*SDBB/GDBB : (SDBB + (level-GDBB)*(255-SDBB) / (255-GDBB)) ; + index = new_level/dimming_factor ; + +#if DEBUG_SAPPHIRE_PANEL + printk(KERN_INFO "level=%d, new level=%d, dimming_levels[%d]=%d\n", + level, new_level, index, dimming_levels[g_panel_id][index]); +#endif + percent = pwrsink_percents[index]; + level = dimming_levels[g_panel_id][index]; + + if (sapphire_backlight_last_level == level) + return; + + if (level == 0) { + gpio_set_value(27, 0); + msleep(2); + } else { + local_irq_save(flags); + if (sapphire_backlight_last_level == 0) { + gpio_set_value(27, 1); + udelay(40); + sapphire_backlight_last_level = 33; + } + i = (sapphire_backlight_last_level - level + 33) % 33; + while (i-- > 0) { + gpio_set_value(27, 0); + udelay(1); + gpio_set_value(27, 1); + udelay(1); + } + local_irq_restore(flags); + } + sapphire_backlight_last_level = level; + htc_pwrsink_set(PWRSINK_BACKLIGHT, percent); +} + +#define MDDI_CLIENT_CORE_BASE 0x108000 +#define LCD_CONTROL_BLOCK_BASE 0x110000 +#define SPI_BLOCK_BASE 0x120000 +#define I2C_BLOCK_BASE 0x130000 +#define PWM_BLOCK_BASE 0x140000 +#define GPIO_BLOCK_BASE 0x150000 +#define SYSTEM_BLOCK1_BASE 0x160000 +#define SYSTEM_BLOCK2_BASE 0x170000 + + +#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24) +#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C) +#define PWM0OFF (PWM_BLOCK_BASE|0x1C) + +#define V_VDDE2E_VDD2_GPIO 0 +#define V_VDDE2E_VDD2_GPIO_5M 89 +#define MDDI_RST_N 82 + +#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00) +#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04) +#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08) +#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C) +#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10) +#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14) +#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18) +#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C) +#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20) +#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24) +#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28) +#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C) +#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30) +#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34) +#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38) +#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C) +#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40) +#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44) +#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48) +#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C) +#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50) +#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54) + +#define SRST (LCD_CONTROL_BLOCK_BASE|0x00) +#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04) +#define START (LCD_CONTROL_BLOCK_BASE|0x08) +#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C) +#define CMN (LCD_CONTROL_BLOCK_BASE|0x10) +#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14) +#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18) +#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C) +#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20) +#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24) +#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28) +#define PXL (LCD_CONTROL_BLOCK_BASE|0x30) +#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34) +#define HSW (LCD_CONTROL_BLOCK_BASE|0x38) +#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C) +#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40) +#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44) +#define VSW (LCD_CONTROL_BLOCK_BASE|0x48) +#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C) +#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50) +#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54) +#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58) +#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C) +#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60) +#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64) +#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68) +#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C) +#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70) +#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74) +#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78) +#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C) +#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80) +#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84) +#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88) +#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C) + +#define SSICTL (SPI_BLOCK_BASE|0x00) +#define SSITIME (SPI_BLOCK_BASE|0x04) +#define SSITX (SPI_BLOCK_BASE|0x08) +#define SSIRX (SPI_BLOCK_BASE|0x0C) +#define SSIINTC (SPI_BLOCK_BASE|0x10) +#define SSIINTS (SPI_BLOCK_BASE|0x14) +#define SSIDBG1 (SPI_BLOCK_BASE|0x18) +#define SSIDBG2 (SPI_BLOCK_BASE|0x1C) +#define SSIID (SPI_BLOCK_BASE|0x20) + +#define WKREQ (SYSTEM_BLOCK1_BASE|0x00) +#define CLKENB (SYSTEM_BLOCK1_BASE|0x04) +#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08) +#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C) +#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00) + +#define GPIODATA (GPIO_BLOCK_BASE|0x00) +#define GPIODIR (GPIO_BLOCK_BASE|0x04) +#define GPIOIS (GPIO_BLOCK_BASE|0x08) +#define GPIOIBE (GPIO_BLOCK_BASE|0x0C) +#define GPIOIEV (GPIO_BLOCK_BASE|0x10) +#define GPIOIE (GPIO_BLOCK_BASE|0x14) +#define GPIORIS (GPIO_BLOCK_BASE|0x18) +#define GPIOMIS (GPIO_BLOCK_BASE|0x1C) +#define GPIOIC (GPIO_BLOCK_BASE|0x20) +#define GPIOOMS (GPIO_BLOCK_BASE|0x24) +#define GPIOPC (GPIO_BLOCK_BASE|0x28) +#define GPIOID (GPIO_BLOCK_BASE|0x30) + +#define SPI_WRITE(reg, val) \ + { SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \ + { 0, 5 }, + +#define SPI_WRITE1(reg) \ + { SSITX, (reg) & 0xff }, \ + { 0, 5 }, + +struct mddi_table { + uint32_t reg; + uint32_t value; +}; +static struct mddi_table mddi_toshiba_init_table[] = { + { DPSET0, 0x09e90046 }, + { DPSET1, 0x00000118 }, + { DPSUS, 0x00000000 }, + { DPRUN, 0x00000001 }, + { 1, 14 }, /* msleep 14 */ + { SYSCKENA, 0x00000001 }, + /*{ CLKENB, 0x000000EF } */ + { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */ + /*{ CLKENB, 0x000025CB }, Clock enable register */ + + { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */ + { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */ + { GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */ + { GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */ + { WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */ + + { GPIOIBE, 0x000003FF }, + { GPIOIS, 0x00000000 }, + { GPIOIC, 0x000003FF }, + { GPIOIE, 0x00000000 }, + + { GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */ + { 1, 1 }, /* msleep 1 */ + { GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */ + { DRAMPWR, 0x00000001 }, /* eDRAM power */ +}; + +static struct mddi_table mddi_toshiba_panel_init_table[] = { + { SRST, 0x00000003 }, /* FIFO/LCDC not reset */ + { PORT_ENB, 0x00000001 }, /* Enable sync. Port */ + { START, 0x00000000 }, /* To stop operation */ + /*{ START, 0x00000001 }, To start operation */ + { PORT, 0x00000004 }, /* Polarity of VS/HS/DE. */ + { CMN, 0x00000000 }, + { GAMMA, 0x00000000 }, /* No Gamma correction */ + { INTFLG, 0x00000000 }, /* VSYNC interrupt flag clear/status */ + { INTMSK, 0x00000000 }, /* VSYNC interrupt mask is off. */ + { MPLFBUF, 0x00000000 }, /* Select frame buffer's base address. */ + { HDE_LEFT, 0x00000000 }, /* The value of HDE_LEFT. */ + { VDE_TOP, 0x00000000 }, /* The value of VDE_TPO. */ + { PXL, 0x00000001 }, /* 1. RGB666 */ + /* 2. Data is valid from 1st frame of beginning. */ + { HDE_START, 0x00000006 }, /* HDE_START= 14 PCLK */ + { HDE_SIZE, 0x0000009F }, /* HDE_SIZE=320 PCLK */ + { HSW, 0x00000004 }, /* HSW= 10 PCLK */ + { VSW, 0x00000001 }, /* VSW=2 HCYCLE */ + { VDE_START, 0x00000003 }, /* VDE_START=4 HCYCLE */ + { VDE_SIZE, 0x000001DF }, /* VDE_SIZE=480 HCYCLE */ + { WAKEUP, 0x000001e2 }, /* Wakeup position in VSYNC mode. */ + { WSYN_DLY, 0x00000000 }, /* Wakeup position in VSIN mode. */ + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { CLKENB, 0x000025CB }, /* Clock enable register */ + + { SSICTL, 0x00000170 }, /* SSI control register */ + { SSITIME, 0x00000250 }, /* SSI timing control register */ + { SSICTL, 0x00000172 }, /* SSI control register */ +}; + + +static struct mddi_table mddi_sharp_init_table[] = { + { VCYCLE, 0x000001eb }, + { HCYCLE, 0x000000ae }, + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { 1, 1 }, /* msleep 1 */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { 1, 10 }, /* msleep 10 */ + SPI_WRITE(0x5f, 0x01) + SPI_WRITE1(0x11) + { 1, 200 }, /* msleep 200 */ + SPI_WRITE1(0x29) + SPI_WRITE1(0xde) + { START, 0x00000001 }, /* To start operation */ +}; + +static struct mddi_table mddi_sharp_deinit_table[] = { + { 1, 200 }, /* msleep 200 */ + SPI_WRITE(0x10, 0x1) + { 1, 100 }, /* msleep 100 */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { 1, 10 }, /* msleep 10 */ +}; + +static struct mddi_table mddi_tpo_init_table[] = { + { VCYCLE, 0x000001e5 }, + { HCYCLE, 0x000000ac }, + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { 0, 20 }, /* udelay 20 */ + { GPIODATA, 0x00000004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { 0, 20 }, /* udelay 20 */ + + SPI_WRITE(0x08, 0x01) + { 0, 500 }, /* udelay 500 */ + SPI_WRITE(0x08, 0x00) + SPI_WRITE(0x02, 0x00) + SPI_WRITE(0x03, 0x04) + SPI_WRITE(0x04, 0x0e) + SPI_WRITE(0x09, 0x02) + SPI_WRITE(0x0b, 0x08) + SPI_WRITE(0x0c, 0x53) + SPI_WRITE(0x0d, 0x01) + SPI_WRITE(0x0e, 0xe0) + SPI_WRITE(0x0f, 0x01) + SPI_WRITE(0x10, 0x58) + SPI_WRITE(0x20, 0x1e) + SPI_WRITE(0x21, 0x0a) + SPI_WRITE(0x22, 0x0a) + SPI_WRITE(0x23, 0x1e) + SPI_WRITE(0x25, 0x32) + SPI_WRITE(0x26, 0x00) + SPI_WRITE(0x27, 0xac) + SPI_WRITE(0x29, 0x06) + SPI_WRITE(0x2a, 0xa4) + SPI_WRITE(0x2b, 0x45) + SPI_WRITE(0x2c, 0x45) + SPI_WRITE(0x2d, 0x15) + SPI_WRITE(0x2e, 0x5a) + SPI_WRITE(0x2f, 0xff) + SPI_WRITE(0x30, 0x6b) + SPI_WRITE(0x31, 0x0d) + SPI_WRITE(0x32, 0x48) + SPI_WRITE(0x33, 0x82) + SPI_WRITE(0x34, 0xbd) + SPI_WRITE(0x35, 0xe7) + SPI_WRITE(0x36, 0x18) + SPI_WRITE(0x37, 0x94) + SPI_WRITE(0x38, 0x01) + SPI_WRITE(0x39, 0x5d) + SPI_WRITE(0x3a, 0xae) + SPI_WRITE(0x3b, 0xff) + SPI_WRITE(0x07, 0x09) + { 0, 10 }, /* udelay 10 */ + { START, 0x00000001 }, /* To start operation */ +}; + +static struct mddi_table mddi_tpo_deinit_table[] = { + SPI_WRITE(0x07, 0x19) + { START, 0x00000000 }, /* To stop operation */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { 0, 5 }, /* usleep 5 */ +}; + + +#define GPIOSEL_VWAKEINT (1U << 0) +#define INTMASK_VWAKEOUT (1U << 0) + +static void sapphire_process_mddi_table( + struct msm_mddi_client_data *client_data, + const struct mddi_table *table, + size_t count) +{ + int i; + for (i = 0; i < count; i++) { + uint32_t reg = table[i].reg; + uint32_t value = table[i].value; + + if (reg == 0) + udelay(value); + else if (reg == 1) + msleep(value); + else + client_data->remote_write(client_data, value, reg); + } +} + +static struct vreg *vreg_lcm_2v85; + +static void sapphire_mddi_power_client(struct msm_mddi_client_data *client_data, + int on) +{ + unsigned id, on_off; +#if DEBUG_SAPPHIRE_PANEL + printk(KERN_INFO "sapphire_mddi_client_power:%d\r\n", on); +#endif + if (on) { + on_off = 0; + id = PM_VREG_PDOWN_MDDI_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + + gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 1); + mdelay(5); /* delay time >5ms and <10ms */ + + if (is_12pin_camera()) + gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 1); + else + gpio_set_value(V_VDDE2E_VDD2_GPIO, 1); + + gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 1); + msleep(3); + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcm_2v85); + msleep(3); + } else { + gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 0); + gpio_set_value(MDDI_RST_N, 0); + msleep(10); + vreg_disable(vreg_lcm_2v85); + on_off = 1; + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + msleep(5); + if (is_12pin_camera()) + gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 0); + else + gpio_set_value(V_VDDE2E_VDD2_GPIO, 0); + + msleep(200); + gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 0); + id = PM_VREG_PDOWN_MDDI_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + } +} + +static int sapphire_mddi_toshiba_client_init( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int panel_id; + + /* Set the MDDI_RST_N accroding to MDDI client repectively( + * been set in sapphire_mddi_power_client() originally) + */ + gpio_set_value(MDDI_RST_N, 1); + msleep(10); + + client_data->auto_hibernate(client_data, 0); + sapphire_process_mddi_table(client_data, mddi_toshiba_init_table, + ARRAY_SIZE(mddi_toshiba_init_table)); + client_data->auto_hibernate(client_data, 1); + g_panel_id = panel_id = + (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; + if (panel_id > 1) { +#if DEBUG_SAPPHIRE_PANEL + printk(KERN_ERR "unknown panel id at mddi_enable\n"); +#endif + return -1; + } + return 0; +} + +static int sapphire_mddi_toshiba_client_uninit( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + gpio_set_value(MDDI_RST_N, 0); + msleep(10); + + return 0; +} + +static int sapphire_mddi_panel_unblank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int panel_id, ret = 0; + + sapphire_set_backlight_level(0); + client_data->auto_hibernate(client_data, 0); + sapphire_process_mddi_table(client_data, mddi_toshiba_panel_init_table, + ARRAY_SIZE(mddi_toshiba_panel_init_table)); + panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; + switch (panel_id) { + case 0: +#if DEBUG_SAPPHIRE_PANEL + printk(KERN_DEBUG "init sharp panel\n"); +#endif + sapphire_process_mddi_table(client_data, + mddi_sharp_init_table, + ARRAY_SIZE(mddi_sharp_init_table)); + break; + case 1: +#if DEBUG_SAPPHIRE_PANEL + printk(KERN_DEBUG "init tpo panel\n"); +#endif + sapphire_process_mddi_table(client_data, + mddi_tpo_init_table, + ARRAY_SIZE(mddi_tpo_init_table)); + break; + default: + + printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id); + ret = -1; + }; + mutex_lock(&sapphire_backlight_lock); + sapphire_set_backlight_level(sapphire_backlight_brightness); + sapphire_backlight_off = 0; + mutex_unlock(&sapphire_backlight_lock); + client_data->auto_hibernate(client_data, 1); + /* reenable vsync */ + client_data->remote_write(client_data, GPIOSEL_VWAKEINT, + GPIOSEL); + client_data->remote_write(client_data, INTMASK_VWAKEOUT, + INTMASK); + return ret; + +} + +static int sapphire_mddi_panel_blank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int panel_id, ret = 0; + + panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; + client_data->auto_hibernate(client_data, 0); + switch (panel_id) { + case 0: + printk(KERN_DEBUG "deinit sharp panel\n"); + sapphire_process_mddi_table(client_data, + mddi_sharp_deinit_table, + ARRAY_SIZE(mddi_sharp_deinit_table)); + break; + case 1: + printk(KERN_DEBUG "deinit tpo panel\n"); + sapphire_process_mddi_table(client_data, + mddi_tpo_deinit_table, + ARRAY_SIZE(mddi_tpo_deinit_table)); + break; + default: + printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id); + ret = -1; + }; + client_data->auto_hibernate(client_data, 1); + mutex_lock(&sapphire_backlight_lock); + sapphire_set_backlight_level(0); + sapphire_backlight_off = 1; + mutex_unlock(&sapphire_backlight_lock); + client_data->remote_write(client_data, 0, SYSCLKENA); + client_data->remote_write(client_data, 1, DPSUS); + + return ret; +} + + +/* Initial sequence of sharp panel with Novatek NT35399 MDDI client */ +static const struct mddi_table sharp2_init_table[] = { + { 0x02A0, 0x00 }, + { 0x02A1, 0x00 }, + { 0x02A2, 0x3F }, + { 0x02A3, 0x01 }, + { 0x02B0, 0x00 }, + { 0x02B1, 0x00 }, + { 0x02B2, 0xDF }, + { 0x02B3, 0x01 }, + { 0x02D0, 0x00 }, + { 0x02D1, 0x00 }, + { 0x02D2, 0x00 }, + { 0x02D3, 0x00 }, + { 0x0350, 0x80 }, /* Set frame tearing effect(FTE) position */ + { 0x0351, 0x00 }, + { 0x0360, 0x30 }, + { 0x0361, 0xC1 }, + { 0x0362, 0x00 }, + { 0x0370, 0x00 }, + { 0x0371, 0xEF }, + { 0x0372, 0x01 }, + + { 0x0B00, 0x10 }, + + { 0x0B10, 0x00 }, + { 0x0B20, 0x22 }, + { 0x0B30, 0x46 }, + { 0x0B40, 0x07 }, + { 0x0B41, 0x1C }, + { 0x0B50, 0x0F }, + { 0x0B51, 0x7A }, + { 0x0B60, 0x16 }, + { 0x0B70, 0x0D }, + { 0x0B80, 0x04 }, + { 0x0B90, 0x07 }, + { 0x0BA0, 0x04 }, + { 0x0BA1, 0x86 }, + { 0x0BB0, 0xFF }, + { 0x0BB1, 0x01 }, + { 0x0BB2, 0xF7 }, + { 0x0BB3, 0x01 }, + { 0x0BC0, 0x00 }, + { 0x0BC1, 0x00 }, + { 0x0BC2, 0x00 }, + { 0x0BC3, 0x00 }, + { 0x0BE0, 0x01 }, + { 0x0BE1, 0x3F }, + + { 0x0BF0, 0x03 }, + + { 0x0C10, 0x02 }, + + { 0x0C30, 0x22 }, + { 0x0C31, 0x20 }, + { 0x0C40, 0x48 }, + { 0x0C41, 0x06 }, + + { 0xE00, 0x0028}, + { 0xE01, 0x002F}, + { 0xE02, 0x0032}, + { 0xE03, 0x000A}, + { 0xE04, 0x0023}, + { 0xE05, 0x0024}, + { 0xE06, 0x0022}, + { 0xE07, 0x0012}, + { 0xE08, 0x000D}, + { 0xE09, 0x0035}, + { 0xE0A, 0x000E}, + { 0xE0B, 0x001A}, + { 0xE0C, 0x003C}, + { 0xE0D, 0x003A}, + { 0xE0E, 0x0050}, + { 0xE0F, 0x0069}, + { 0xE10, 0x0006}, + { 0xE11, 0x001F}, + { 0xE12, 0x0035}, + { 0xE13, 0x0020}, + { 0xE14, 0x0043}, + { 0xE15, 0x0030}, + { 0xE16, 0x003C}, + { 0xE17, 0x0010}, + { 0xE18, 0x0009}, + { 0xE19, 0x0051}, + { 0xE1A, 0x001D}, + { 0xE1B, 0x003C}, + { 0xE1C, 0x0053}, + { 0xE1D, 0x0041}, + { 0xE1E, 0x0045}, + { 0xE1F, 0x004B}, + { 0xE20, 0x000A}, + { 0xE21, 0x0014}, + { 0xE22, 0x001C}, + { 0xE23, 0x0013}, + { 0xE24, 0x002E}, + { 0xE25, 0x0029}, + { 0xE26, 0x001B}, + { 0xE27, 0x0014}, + { 0xE28, 0x000E}, + { 0xE29, 0x0032}, + { 0xE2A, 0x000D}, + { 0xE2B, 0x001B}, + { 0xE2C, 0x0033}, + { 0xE2D, 0x0033}, + { 0xE2E, 0x005B}, + { 0xE2F, 0x0069}, + { 0xE30, 0x0006}, + { 0xE31, 0x0014}, + { 0xE32, 0x003D}, + { 0xE33, 0x0029}, + { 0xE34, 0x0042}, + { 0xE35, 0x0032}, + { 0xE36, 0x003F}, + { 0xE37, 0x000E}, + { 0xE38, 0x0008}, + { 0xE39, 0x0059}, + { 0xE3A, 0x0015}, + { 0xE3B, 0x002E}, + { 0xE3C, 0x0049}, + { 0xE3D, 0x0058}, + { 0xE3E, 0x0061}, + { 0xE3F, 0x006B}, + { 0xE40, 0x000A}, + { 0xE41, 0x001A}, + { 0xE42, 0x0022}, + { 0xE43, 0x0014}, + { 0xE44, 0x002F}, + { 0xE45, 0x002A}, + { 0xE46, 0x001A}, + { 0xE47, 0x0014}, + { 0xE48, 0x000E}, + { 0xE49, 0x002F}, + { 0xE4A, 0x000F}, + { 0xE4B, 0x001B}, + { 0xE4C, 0x0030}, + { 0xE4D, 0x002C}, + { 0xE4E, 0x0051}, + { 0xE4F, 0x0069}, + { 0xE50, 0x0006}, + { 0xE51, 0x001E}, + { 0xE52, 0x0043}, + { 0xE53, 0x002F}, + { 0xE54, 0x0043}, + { 0xE55, 0x0032}, + { 0xE56, 0x0043}, + { 0xE57, 0x000D}, + { 0xE58, 0x0008}, + { 0xE59, 0x0059}, + { 0xE5A, 0x0016}, + { 0xE5B, 0x0030}, + { 0xE5C, 0x004B}, + { 0xE5D, 0x0051}, + { 0xE5E, 0x005A}, + { 0xE5F, 0x006B}, + + { 0x0290, 0x01 }, +}; + +#undef TPO2_ONE_GAMMA +/* Initial sequence of TPO panel with Novatek NT35399 MDDI client */ + +static const struct mddi_table tpo2_init_table[] = { + /* Panel interface control */ + { 0xB30, 0x44 }, + { 0xB40, 0x00 }, + { 0xB41, 0x87 }, + { 0xB50, 0x06 }, + { 0xB51, 0x7B }, + { 0xB60, 0x0E }, + { 0xB70, 0x0F }, + { 0xB80, 0x03 }, + { 0xB90, 0x00 }, + { 0x350, 0x70 }, /* FTE is at line 0x70 */ + + /* Entry Mode */ + { 0x360, 0x30 }, + { 0x361, 0xC1 }, + { 0x362, 0x04 }, + +/* 0x2 for gray scale gamma correction, 0x12 for RGB gamma correction */ +#ifdef TPO2_ONE_GAMMA + { 0xB00, 0x02 }, +#else + { 0xB00, 0x12 }, +#endif + /* Driver output control */ + { 0x371, 0xEF }, + { 0x372, 0x03 }, + + /* DCDC on glass control */ + { 0xC31, 0x10 }, + { 0xBA0, 0x00 }, + { 0xBA1, 0x86 }, + + /* VCOMH voltage control */ + { 0xC50, 0x3b }, + + /* Special function control */ + { 0xC10, 0x82 }, + + /* Power control */ + { 0xC40, 0x44 }, + { 0xC41, 0x02 }, + + /* Source output control */ + { 0xBE0, 0x01 }, + { 0xBE1, 0x00 }, + + /* Windows address setting */ + { 0x2A0, 0x00 }, + { 0x2A1, 0x00 }, + { 0x2A2, 0x3F }, + { 0x2A3, 0x01 }, + { 0x2B0, 0x00 }, + { 0x2B1, 0x00 }, + { 0x2B2, 0xDF }, + { 0x2B3, 0x01 }, + + /* RAM address setting */ + { 0x2D0, 0x00 }, + { 0x2D1, 0x00 }, + { 0x2D2, 0x00 }, + { 0x2D3, 0x00 }, + + { 0xF20, 0x55 }, + { 0xF21, 0xAA }, + { 0xF22, 0x66 }, + { 0xF57, 0x45 }, + +/* + * The NT35399 provides gray or RGB gamma correction table, + * which determinated by register-0xb00, and following table + */ +#ifdef TPO2_ONE_GAMMA + /* Positive Gamma setting */ + { 0xE00, 0x04 }, + { 0xE01, 0x12 }, + { 0xE02, 0x18 }, + { 0xE03, 0x10 }, + { 0xE04, 0x29 }, + { 0xE05, 0x26 }, + { 0xE06, 0x1f }, + { 0xE07, 0x11 }, + { 0xE08, 0x0c }, + { 0xE09, 0x3a }, + { 0xE0A, 0x0d }, + { 0xE0B, 0x28 }, + { 0xE0C, 0x40 }, + { 0xE0D, 0x4e }, + { 0xE0E, 0x6f }, + { 0xE0F, 0x5E }, + + /* Negative Gamma setting */ + { 0xE10, 0x0B }, + { 0xE11, 0x00 }, + { 0xE12, 0x00 }, + { 0xE13, 0x1F }, + { 0xE14, 0x4b }, + { 0xE15, 0x33 }, + { 0xE16, 0x13 }, + { 0xE17, 0x12 }, + { 0xE18, 0x0d }, + { 0xE19, 0x2f }, + { 0xE1A, 0x16 }, + { 0xE1B, 0x2e }, + { 0xE1C, 0x49 }, + { 0xE1D, 0x41 }, + { 0xE1E, 0x46 }, + { 0xE1F, 0x55 }, +#else + /* Red Positive Gamma */ + { 0xE00, 0x0f }, + { 0xE01, 0x19 }, + { 0xE02, 0x22 }, + { 0xE03, 0x0b }, + { 0xE04, 0x23 }, + { 0xE05, 0x23 }, + { 0xE06, 0x14 }, + { 0xE07, 0x13 }, + { 0xE08, 0x0f }, + { 0xE09, 0x2a }, + { 0xE0A, 0x0d }, + { 0xE0B, 0x26 }, + { 0xE0C, 0x43 }, + { 0xE0D, 0x20 }, + { 0xE0E, 0x2a }, + { 0xE0F, 0x5c }, + + /* Red Negative Gamma */ + { 0xE10, 0x0d }, + { 0xE11, 0x45 }, + { 0xE12, 0x4c }, + { 0xE13, 0x1c }, + { 0xE14, 0x4d }, + { 0xE15, 0x33 }, + { 0xE16, 0x23 }, + { 0xE17, 0x0f }, + { 0xE18, 0x0b }, + { 0xE19, 0x3a }, + { 0xE1A, 0x19 }, + { 0xE1B, 0x32 }, + { 0xE1C, 0x4e }, + { 0xE1D, 0x37 }, + { 0xE1E, 0x38 }, + { 0xE1F, 0x3b }, + + /* Green Positive Gamma */ + { 0xE20, 0x00 }, + { 0xE21, 0x09 }, + { 0xE22, 0x10 }, + { 0xE23, 0x0f }, + { 0xE24, 0x29 }, + { 0xE25, 0x23 }, + { 0xE26, 0x0b }, + { 0xE27, 0x14 }, + { 0xE28, 0x12 }, + { 0xE29, 0x25 }, + { 0xE2A, 0x12 }, + { 0xE2B, 0x2f }, + { 0xE2C, 0x43 }, + { 0xE2D, 0x2d }, + { 0xE2E, 0x52 }, + { 0xE2F, 0x61 }, + + /* Green Negative Gamma */ + { 0xE30, 0x08 }, + { 0xE31, 0x1d }, + { 0xE32, 0x3f }, + { 0xE33, 0x1c }, + { 0xE34, 0x44 }, + { 0xE35, 0x2e }, + { 0xE36, 0x28 }, + { 0xE37, 0x0c }, + { 0xE38, 0x0a }, + { 0xE39, 0x42 }, + { 0xE3A, 0x17 }, + { 0xE3B, 0x30 }, + { 0xE3C, 0x4b }, + { 0xE3D, 0x3f }, + { 0xE3E, 0x43 }, + { 0xE3F, 0x45 }, + + /* Blue Positive Gamma */ + { 0xE40, 0x32 }, + { 0xE41, 0x32 }, + { 0xE42, 0x31 }, + { 0xE43, 0x06 }, + { 0xE44, 0x08 }, + { 0xE45, 0x0d }, + { 0xE46, 0x04 }, + { 0xE47, 0x14 }, + { 0xE48, 0x0f }, + { 0xE49, 0x1d }, + { 0xE4A, 0x1a }, + { 0xE4B, 0x39 }, + { 0xE4C, 0x4c }, + { 0xE4D, 0x1e }, + { 0xE4E, 0x43 }, + { 0xE4F, 0x61 }, + + /* Blue Negative Gamma */ + { 0xE50, 0x08 }, + { 0xE51, 0x2c }, + { 0xE52, 0x4e }, + { 0xE53, 0x13 }, + { 0xE54, 0x3a }, + { 0xE55, 0x26 }, + { 0xE56, 0x30 }, + { 0xE57, 0x0f }, + { 0xE58, 0x0a }, + { 0xE59, 0x49 }, + { 0xE5A, 0x34 }, + { 0xE5B, 0x4a }, + { 0xE5C, 0x53 }, + { 0xE5D, 0x28 }, + { 0xE5E, 0x26 }, + { 0xE5F, 0x27 }, + +#endif + /* Sleep in mode */ + { 0x110, 0x00 }, + { 0x1, 0x23 }, + /* Display on mode */ + { 0x290, 0x00 }, + { 0x1, 0x27 }, + /* Driver output control */ + { 0x372, 0x01 }, + { 0x1, 0x40 }, + /* Display on mode */ + { 0x290, 0x01 }, +}; + +static const struct mddi_table tpo2_display_on[] = { + { 0x290, 0x01 }, +}; + +static const struct mddi_table tpo2_display_off[] = { + { 0x110, 0x01 }, + { 0x290, 0x00 }, + { 0x1, 100 }, +}; + +static const struct mddi_table tpo2_power_off[] = { + { 0x0110, 0x01 }, +}; + +static int nt35399_detect_panel(struct msm_mddi_client_data *client_data) +{ + int id = -1, i ; + + /* If the MDDI client is failed to report the panel ID, + * perform retrial 5 times. + */ + for( i=0; i < 5; i++ ) { + client_data->remote_write(client_data, 0, 0x110); + msleep(5); + id = client_data->remote_read(client_data, userid) ; + if( id == 0 || id == 1 ) { + if(i==0) { + printk(KERN_ERR "%s: got valid panel ID=%d, " + "without retry\n", + __FUNCTION__, id); + } + else { + printk(KERN_ERR "%s: got valid panel ID=%d, " + "after %d retry\n", + __FUNCTION__, id, i+1); + } + break ; + } + printk(KERN_ERR "%s: got invalid panel ID:%d, trial #%d\n", + __FUNCTION__, id, i+1); + + gpio_set_value(MDDI_RST_N, 0); + msleep(5); + + gpio_set_value(MDDI_RST_N, 1); + msleep(10); + gpio_set_value(MDDI_RST_N, 0); + udelay(100); + gpio_set_value(MDDI_RST_N, 1); + mdelay(10); + } + printk(KERN_INFO "%s: final panel id=%d\n", __FUNCTION__, id); + + switch(id) { + case 0: + return SAPPHIRE_PANEL_TOPPOLY; + case 1: + return SAPPHIRE_PANEL_SHARP; + default : + printk(KERN_ERR "%s(): Invalid panel ID: %d, " + "treat as sharp panel.", __FUNCTION__, id); + return SAPPHIRE_PANEL_SHARP; + } +} + +static int nt35399_client_init( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int panel_id; + + if (g_panel_inited == 0) { + g_panel_id = panel_id = nt35399_detect_panel(client_data); + g_panel_inited = 1 ; + } else { + gpio_set_value(MDDI_RST_N, 1); + msleep(10); + gpio_set_value(MDDI_RST_N, 0); + udelay(100); + gpio_set_value(MDDI_RST_N, 1); + mdelay(10); + + g_panel_id = panel_id = nt35399_detect_panel(client_data); + if (panel_id == -1) { + printk("Invalid panel id\n"); + return -1; + } + + client_data->auto_hibernate(client_data, 0); + if (panel_id == SAPPHIRE_PANEL_TOPPOLY) { + sapphire_process_mddi_table(client_data, tpo2_init_table, + ARRAY_SIZE(tpo2_init_table)); + } else if(panel_id == SAPPHIRE_PANEL_SHARP) { + sapphire_process_mddi_table(client_data, sharp2_init_table, + ARRAY_SIZE(sharp2_init_table)); + } + + client_data->auto_hibernate(client_data, 1); + } + + return 0; +} + +static int nt35399_client_uninit( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *cdata) +{ + return 0; +} + +static int nt35399_panel_unblank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int ret = 0; + + mdelay(20); + sapphire_set_backlight_level(0); + client_data->auto_hibernate(client_data, 0); + + mutex_lock(&sapphire_backlight_lock); + sapphire_set_backlight_level(sapphire_backlight_brightness); + sapphire_backlight_off = 0; + mutex_unlock(&sapphire_backlight_lock); + + client_data->auto_hibernate(client_data, 1); + + return ret; +} + +static int nt35399_panel_blank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int ret = 0; + + client_data->auto_hibernate(client_data, 0); + sapphire_process_mddi_table(client_data, tpo2_display_off, + ARRAY_SIZE(tpo2_display_off)); + client_data->auto_hibernate(client_data, 1); + + mutex_lock(&sapphire_backlight_lock); + sapphire_set_backlight_level(0); + sapphire_backlight_off = 1; + mutex_unlock(&sapphire_backlight_lock); + + return ret; +} + +static void sapphire_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) +{ + mutex_lock(&sapphire_backlight_lock); + sapphire_backlight_brightness = value; + if (!sapphire_backlight_off) + sapphire_set_backlight_level(sapphire_backlight_brightness); + mutex_unlock(&sapphire_backlight_lock); +} + +static struct led_classdev sapphire_backlight_led = { + .name = "lcd-backlight", + .brightness = SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS, + .brightness_set = sapphire_brightness_set, +}; + +static int sapphire_backlight_probe(struct platform_device *pdev) +{ + led_classdev_register(&pdev->dev, &sapphire_backlight_led); + return 0; +} + +static int sapphire_backlight_remove(struct platform_device *pdev) +{ + led_classdev_unregister(&sapphire_backlight_led); + return 0; +} + +static struct platform_driver sapphire_backlight_driver = { + .probe = sapphire_backlight_probe, + .remove = sapphire_backlight_remove, + .driver = { + .name = "sapphire-backlight", + .owner = THIS_MODULE, + }, +}; + +static struct resource resources_msm_fb[] = { + { + .start = SMI64_MSM_FB_BASE, + .end = SMI64_MSM_FB_BASE + SMI64_MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct msm_mddi_bridge_platform_data toshiba_client_data = { + .init = sapphire_mddi_toshiba_client_init, + .uninit = sapphire_mddi_toshiba_client_uninit, + .blank = sapphire_mddi_panel_blank, + .unblank = sapphire_mddi_panel_unblank, + .fb_data = { + .xres = 320, + .yres = 480, + .width = 45, + .height = 67, + .output_format = 0, + }, +}; + +#define NT35399_MFR_NAME 0x0bda +#define NT35399_PRODUCT_CODE 0x8a47 + +static void nt35399_fixup(uint16_t * mfr_name, uint16_t * product_code) +{ + printk(KERN_DEBUG "%s: enter.\n", __func__); + *mfr_name = NT35399_MFR_NAME ; + *product_code= NT35399_PRODUCT_CODE ; +} + +static struct msm_mddi_bridge_platform_data nt35399_client_data = { + + .init = nt35399_client_init, + .uninit = nt35399_client_uninit, + .blank = nt35399_panel_blank, + .unblank = nt35399_panel_unblank, + .fb_data = { + .xres = 320, + .yres = 480, + .output_format = 0, + }, +}; + +static struct msm_mddi_platform_data mddi_pdata = { + .clk_rate = 122880000, + .power_client = sapphire_mddi_power_client, + .fixup = nt35399_fixup, + .vsync_irq = MSM_GPIO_TO_INT(VSYNC_GPIO), + .fb_resource = resources_msm_fb, + .num_clients = 2, + .client_platform_data = { + { + .product_id = (0xd263 << 16 | 0), + .name = "mddi_c_d263_0000", + .id = 0, + .client_data = &toshiba_client_data, + .clk_rate = 0, + }, + { + .product_id = + (NT35399_MFR_NAME << 16 | NT35399_PRODUCT_CODE), + .name = "mddi_c_simple" , + .id = 0, + .client_data = &nt35399_client_data, + .clk_rate = 0, + }, + }, +}; + +static struct platform_device sapphire_backlight = { + .name = "sapphire-backlight", +}; + +int __init sapphire_init_panel(void) +{ + int rc = -1; + uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA); /* GPIO27 */ + + if (!machine_is_sapphire()) + return 0; + + /* checking board as soon as possible */ + printk("sapphire_init_panel:machine_is_sapphire=%d, machine_arch_type=%d, MACH_TYPE_SAPPHIRE=%d\r\n", machine_is_sapphire(), machine_arch_type, MACH_TYPE_SAPPHIRE); + if (!machine_is_sapphire()) + return 0; + + vreg_lcm_2v85 = vreg_get(0, "gp4"); + if (IS_ERR(vreg_lcm_2v85)) + return PTR_ERR(vreg_lcm_2v85); + + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); + + /* setup FB by SMI size */ + if (sapphire_get_smi_size() == 32) { + resources_msm_fb[0].start = SMI32_MSM_FB_BASE; + resources_msm_fb[0].end = SMI32_MSM_FB_BASE + SMI32_MSM_FB_SIZE - 1; + } + + rc = gpio_request(VSYNC_GPIO, "vsync"); + if (rc) + return rc; + rc = gpio_direction_input(VSYNC_GPIO); + if (rc) + return rc; + rc = platform_device_register(&msm_device_mdp); + if (rc) + return rc; + msm_device_mddi0.dev.platform_data = &mddi_pdata; + rc = platform_device_register(&msm_device_mddi0); + if (rc) + return rc; + platform_device_register(&sapphire_backlight); + return platform_driver_register(&sapphire_backlight_driver); +} + +device_initcall(sapphire_init_panel); diff --git a/arch/arm/mach-msm/board-sapphire-rfkill.c b/arch/arm/mach-msm/board-sapphire-rfkill.c new file mode 100644 index 0000000000000..fba0a16618ebe --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-rfkill.c @@ -0,0 +1,104 @@ +/* linux/arch/arm/mach-msm/board-sapphire-rfkill.c + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +/* Control bluetooth power for sapphire platform */ + +#include +#include +#include +#include +#include +#include +#include +#include "board-sapphire.h" + +static struct rfkill *bt_rfk; +static const char bt_name[] = "brf6300"; + +extern int sapphire_bt_fastclock_power(int on); + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (!blocked) { + sapphire_bt_fastclock_power(1); + gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 1); + udelay(10); + gpio_direction_output(101, 1); + } else { + gpio_direction_output(101, 0); + gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 0); + sapphire_bt_fastclock_power(0); + } + return 0; +} + +static struct rfkill_ops sapphire_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int sapphire_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &sapphire_rfkill_ops, NULL); + if (!bt_rfk) + return -ENOMEM; + + /* userspace cannot take exclusive control */ + + rfkill_set_states(bt_rfk, default_state, false); + + rc = rfkill_register(bt_rfk); + + if (rc) + rfkill_destroy(bt_rfk); + return rc; +} + +static int sapphire_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + + return 0; +} + +static struct platform_driver sapphire_rfkill_driver = { + .probe = sapphire_rfkill_probe, + .remove = sapphire_rfkill_remove, + .driver = { + .name = "sapphire_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init sapphire_rfkill_init(void) +{ + return platform_driver_register(&sapphire_rfkill_driver); +} + +static void __exit sapphire_rfkill_exit(void) +{ + platform_driver_unregister(&sapphire_rfkill_driver); +} + +module_init(sapphire_rfkill_init); +module_exit(sapphire_rfkill_exit); +MODULE_DESCRIPTION("sapphire rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-sapphire-wifi.c b/arch/arm/mach-msm/board-sapphire-wifi.c new file mode 100644 index 0000000000000..43f827c60f13e --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire-wifi.c @@ -0,0 +1,74 @@ +/* arch/arm/mach-msm/board-sapphire-wifi.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Dmitry Shmidt + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifdef CONFIG_WIFI_CONTROL_FUNC +#include +#include +#include +#include +#include +#include + +extern int sapphire_wifi_set_carddetect(int val); +extern int sapphire_wifi_power(int on); +extern int sapphire_wifi_reset(int on); + +#ifdef CONFIG_WIFI_MEM_PREALLOC +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[WMPA_NUMBER_OF_SECTIONS] = { + { NULL, (WMPA_SECTION_SIZE_0 + WMPA_SECTION_HEADER) }, + { NULL, (WMPA_SECTION_SIZE_1 + WMPA_SECTION_HEADER) }, + { NULL, (WMPA_SECTION_SIZE_2 + WMPA_SECTION_HEADER) } +}; + +static void *sapphire_wifi_mem_prealloc(int section, unsigned long size) +{ + if ((section < 0) || (section >= WMPA_NUMBER_OF_SECTIONS)) + return NULL; + if (wifi_mem_array[section].size < size) + return NULL; + return wifi_mem_array[section].mem_ptr; +} + +int __init sapphire_init_wifi_mem (void) +{ + int i; + + for (i = 0; (i < WMPA_NUMBER_OF_SECTIONS); i++) { + wifi_mem_array[i].mem_ptr = vmalloc(wifi_mem_array[i].size); + if (wifi_mem_array[i].mem_ptr == NULL) + return -ENOMEM; + } + return 0; +} +#endif + +struct wifi_platform_data sapphire_wifi_control = { + .set_power = sapphire_wifi_power, + .set_reset = sapphire_wifi_reset, + .set_carddetect = sapphire_wifi_set_carddetect, +#ifdef CONFIG_WIFI_MEM_PREALLOC + .mem_prealloc = sapphire_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +#endif diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c index 8919ffb171960..229d7284eb166 100644 --- a/arch/arm/mach-msm/board-sapphire.c +++ b/arch/arm/mach-msm/board-sapphire.c @@ -17,8 +17,17 @@ #include #include #include +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include #include @@ -31,40 +40,1159 @@ #include #include #include -#include #include #include #include +#include #include #include -#include "gpio_chip.h" +#include +#include + #include "board-sapphire.h" + +#include +#include +#include +#include +#include + +#ifdef CONFIG_WIFI_CONTROL_FUNC +#ifdef CONFIG_WIFI_MEM_PREALLOC +extern int sapphire_init_wifi_mem(void); +#endif +extern struct wifi_platform_data sapphire_wifi_control; +#endif + #include "proc_comm.h" #include "devices.h" void msm_init_irq(void); void msm_init_gpio(void); +void msm_init_pmic_vibrator(void); + +extern int sapphire_init_mmc(unsigned int); + +struct sapphire_axis_info { + struct gpio_event_axis_info info; + uint16_t in_state; + uint16_t out_state; + uint16_t temp_state; + uint16_t threshold; +}; +static bool nav_just_on; +static int nav_on_jiffies; +static int smi_sz = 64; +static unsigned int hwid = 0; +static unsigned int skuid = 0; +static unsigned engineerid = (0x01 << 1); /* default is 3M sensor */ + +uint16_t sapphire_axis_map(struct gpio_event_axis_info *info, uint16_t in) +{ + struct sapphire_axis_info *ai = container_of(info, struct sapphire_axis_info, info); + uint16_t out = ai->out_state; + + if (nav_just_on) { + if (jiffies == nav_on_jiffies || jiffies == nav_on_jiffies + 1) + goto ignore; + nav_just_on = 0; + } + if ((ai->in_state ^ in) & 1) + out--; + if ((ai->in_state ^ in) & 2) + out++; + ai->out_state = out; +ignore: + ai->in_state = in; + if (ai->out_state - ai->temp_state == ai->threshold) { + ai->temp_state++; + ai->out_state = ai->temp_state; + } else if (ai->temp_state - ai->out_state == ai->threshold) { + ai->temp_state--; + ai->out_state = ai->temp_state; + } else if (abs(ai->out_state - ai->temp_state) > ai->threshold) + ai->temp_state = ai->out_state; + + return ai->temp_state; +} + +int sapphire_nav_power(const struct gpio_event_platform_data *pdata, bool on) +{ + gpio_set_value(SAPPHIRE_GPIO_JOG_EN, on); + if (on) { + nav_just_on = 1; + nav_on_jiffies = jiffies; + } + return 0; +} + +static uint32_t sapphire_x_axis_gpios[] = { + SAPPHIRE_BALL_LEFT_0, SAPPHIRE_BALL_RIGHT_0 +}; + +static struct sapphire_axis_info sapphire_x_axis = { + .threshold = 2, + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(sapphire_x_axis_gpios), + .type = EV_REL, + .code = REL_X, + .decoded_size = 1U << ARRAY_SIZE(sapphire_x_axis_gpios), + .map = sapphire_axis_map, + .gpio = sapphire_x_axis_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */ + } +}; + +static uint32_t sapphire_y_axis_gpios[] = { + SAPPHIRE_BALL_UP_0, SAPPHIRE_BALL_DOWN_0 +}; + +static struct sapphire_axis_info sapphire_y_axis = { + .threshold = 2, + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(sapphire_y_axis_gpios), + .type = EV_REL, + .code = REL_Y, + .decoded_size = 1U << ARRAY_SIZE(sapphire_y_axis_gpios), + .map = sapphire_axis_map, + .gpio = sapphire_y_axis_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */ + } +}; + +static struct gpio_event_direct_entry sapphire_nav_buttons[] = { + { SAPPHIRE_GPIO_NAVI_ACT_N, BTN_MOUSE }, +}; + +static struct gpio_event_input_info sapphire_nav_button_info = { + .info.func = gpio_event_input_func, + .flags = GPIOEDF_PRINT_KEYS | GPIOEDF_PRINT_KEY_DEBOUNCE, + .poll_time.tv.nsec = 40 * NSEC_PER_MSEC, + .type = EV_KEY, + .keymap = sapphire_nav_buttons, + .keymap_size = ARRAY_SIZE(sapphire_nav_buttons) +}; + +static struct gpio_event_info *sapphire_nav_info[] = { + &sapphire_x_axis.info.info, + &sapphire_y_axis.info.info, + &sapphire_nav_button_info.info +}; + +static struct gpio_event_platform_data sapphire_nav_data = { + .name = "sapphire-nav", + .info = sapphire_nav_info, + .info_count = ARRAY_SIZE(sapphire_nav_info), + .power = sapphire_nav_power, +}; + +static struct platform_device sapphire_nav_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 2, + .dev = { + .platform_data = &sapphire_nav_data, + }, +}; + +/* a new search button to be a wake-up source */ +static struct gpio_event_direct_entry sapphire_search_button_v1[] = { + { SAPPHIRE_GPIO_SEARCH_ACT_N, KEY_COMPOSE }, /* CPLD Key Search*/ +}; + +static struct gpio_event_direct_entry sapphire_search_button_v2[] = { + { SAPPHIRE_GPIO_SEARCH_ACT_N, KEY_HOME }, /* CPLD Key Home */ +}; + +static struct gpio_event_input_info sapphire_search_button_info = { + .info.func = gpio_event_input_func, + /* .flags = GPIOEDF_PRINT_KEYS | GPIOEDF_PRINT_KEY_DEBOUNCE, */ + .flags = 0, + .poll_time.tv.nsec = 40 * NSEC_PER_MSEC, + .type = EV_KEY, + .keymap = sapphire_search_button_v2, + .keymap_size = ARRAY_SIZE(sapphire_search_button_v2) +}; + +static struct gpio_event_info *sapphire_search_info[] = { + &sapphire_search_button_info.info +}; + +static struct gpio_event_platform_data sapphire_search_button_data = { + .name = "sapphire-nav-button", + .info = sapphire_search_info, + .info_count = ARRAY_SIZE(sapphire_search_info), +}; + +static struct platform_device sapphire_search_button_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 1, + .dev = { + .platform_data = &sapphire_search_button_data, + }, +}; + +static int sapphire_reset_keys_up[] = { + BTN_MOUSE, + 0 +}; + +static struct keyreset_platform_data sapphire_reset_keys_pdata = { + .keys_up = sapphire_reset_keys_up, + .keys_down = { + KEY_SEND, + KEY_MENU, + KEY_END, + 0 + }, +}; + +struct platform_device sapphire_reset_keys_device = { + .name = KEYRESET_NAME, + .dev.platform_data = &sapphire_reset_keys_pdata, +}; + +static int gpio_tp_ls_en = SAPPHIRE_TP_LS_EN; + +static int sapphire_ts_power(int on) +{ + if (on) { + gpio_set_value(SAPPHIRE_GPIO_TP_EN, 1); + /* touchscreen must be powered before we enable i2c pullup */ + msleep(2); + /* enable touch panel level shift */ + gpio_direction_output(gpio_tp_ls_en, 1); + msleep(2); + } else { + gpio_direction_output(gpio_tp_ls_en, 0); + udelay(50); + gpio_set_value(SAPPHIRE_GPIO_TP_EN, 0); + } + + return 0; +} + +static struct synaptics_i2c_rmi_platform_data sapphire_ts_data[] = { +{ + .version = 0x0101, + .power = sapphire_ts_power, + .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE, + .inactive_left = -50 * 0x10000 / 4334, + .inactive_right = -50 * 0x10000 / 4334, + .inactive_top = -40 * 0x10000 / 6696, + .inactive_bottom = -40 * 0x10000 / 6696, + .snap_left_on = 50 * 0x10000 / 4334, + .snap_left_off = 60 * 0x10000 / 4334, + .snap_right_on = 50 * 0x10000 / 4334, + .snap_right_off = 60 * 0x10000 / 4334, + .snap_top_on = 100 * 0x10000 / 6696, + .snap_top_off = 110 * 0x10000 / 6696, + .snap_bottom_on = 100 * 0x10000 / 6696, + .snap_bottom_off = 110 * 0x10000 / 6696, + }, + { + .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE, + .inactive_left = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334, + .inactive_right = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334, + .inactive_top = ((6946 - 6696) / 2) * 0x10000 / 6696, + .inactive_bottom = ((6946 - 6696) / 2) * 0x10000 / 6696, + } +}; + +static struct akm8976_platform_data compass_platform_data = { + .reset = SAPPHIRE_GPIO_COMPASS_RST_N, + .clk_on = SAPPHIRE_GPIO_COMPASS_32K_EN, + .intr = SAPPHIRE_GPIO_COMPASS_IRQ, +}; + +static struct elan_i2c_platform_data elan_i2c_data[] = { + { + .version = 0x104, + .abs_x_min = 0, + .abs_y_min = 0, + .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N, + .power = sapphire_ts_power, + }, + { + .version = 0x103, + .abs_x_min = 0, + .abs_x_max = 512 * 2, + .abs_y_min = 0, + .abs_y_max = 896 * 2, + .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N, + .power = sapphire_ts_power, + }, + { + .version = 0x102, + .abs_x_min = 0, + .abs_x_max = 384, + .abs_y_min = 0, + .abs_y_max = 576, + .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N, + .power = sapphire_ts_power, + }, + { + .version = 0x101, + .abs_x_min = 32 + 1, + .abs_x_max = 352 - 1, + .abs_y_min = 32 + 1, + .abs_y_max = 544 - 1, + .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N, + .power = sapphire_ts_power, + } +}; + +static struct i2c_board_info i2c_devices[] = { + { + I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x20), + .platform_data = sapphire_ts_data, + .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_TP_ATT_N) + }, + { + I2C_BOARD_INFO(ELAN_8232_I2C_NAME, 0x10), + .platform_data = &elan_i2c_data, + .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_TP_ATT_N), + }, + { + I2C_BOARD_INFO("akm8976", 0x1C), + .platform_data = &compass_platform_data, + .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_COMPASS_IRQ), + }, +#ifdef CONFIG_MSM_CAMERA +#ifdef CONFIG_MT9P012 + { + I2C_BOARD_INFO("mt9p012", 0x6C >> 1), + }, +#endif +#ifdef CONFIG_MT9T013 + { + I2C_BOARD_INFO("mt9t013", 0x6C), + }, +#endif +#endif/*CONIFIG_MSM_CAMERA*/ +#ifdef CONFIG_SENSORS_MT9T013 + { + I2C_BOARD_INFO("mt9t013", 0x6C >> 1), + }, +#endif +}; + +#ifdef CONFIG_LEDS_CPLD +static struct resource cpldled_resources[] = { + { + .start = SAPPHIRE_CPLD_LED_BASE, + .end = SAPPHIRE_CPLD_LED_BASE + SAPPHIRE_CPLD_LED_SIZE - 1, + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device android_CPLD_leds = { + .name = "leds-cpld", + .id = -1, + .num_resources = ARRAY_SIZE(cpldled_resources), + .resource = cpldled_resources, +}; +#endif + +static struct gpio_led android_led_list[] = { + { + .name = "button-backlight", + .gpio = SAPPHIRE_GPIO_APKEY_LED_EN, + }, +}; + +static struct gpio_led_platform_data android_leds_data = { + .num_leds = ARRAY_SIZE(android_led_list), + .leds = android_led_list, +}; + +static struct platform_device android_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &android_leds_data, + }, +}; + +#ifdef CONFIG_HTC_HEADSET +/* RTS/CTS to GPO/GPI. */ +static uint32_t uart1_on_gpio_table[] = { + /* allenou, uart hs test, 2008/11/18 */ + #ifdef CONFIG_SERIAL_MSM_HS + /* RTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 2, + GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + /* CTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 2, + GPIO_INPUT, GPIO_PULL_UP, GPIO_8MA), + #else + /* RTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 1, + GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), + /* CTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 1, + GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), + #endif +}; + +/* RTS,CTS to BT. */ +static uint32_t uart1_off_gpio_table[] = { + /* RTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + /* CTS */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 0, + GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), +}; + +/* Sapphire: Switch between UART3 and GPIO */ +static uint32_t uart3_on_gpio_table[] = { + /* RX */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART3_RX, 1, + GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), + /* TX */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART3_TX, 1, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), +}; + +/* set TX,RX to GPI */ +static uint32_t uart3_off_gpi_table[] = { + /* RX, H2W DATA */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_H2W_DATA, 0, + GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), + /* TX, H2W CLK */ + PCOM_GPIO_CFG(SAPPHIRE_GPIO_H2W_CLK, 0, + GPIO_INPUT, GPIO_KEEPER, GPIO_2MA), +}; + +static int sapphire_h2w_path = H2W_GPIO; + +static void h2w_config_cpld(int route) +{ + switch (route) { + case H2W_UART1: + /* Make sure uart1 funtion pin opened. */ + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+1, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 1); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0); + sapphire_h2w_path = H2W_UART1; + printk(KERN_INFO "H2W route = H2W-UART1, BT-X, UART3-X \n"); + break; + case H2W_BT: + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 1); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1); + /* UART1 RTS/CTS to GPO/GPI. */ + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_off_gpio_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_off_gpio_table+1, 0); + sapphire_h2w_path = H2W_BT; + printk(KERN_INFO "H2W route = H2W-BT, UART1-X, UART3-X \n"); + break; + case H2W_UART3: + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart3_on_gpio_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart3_on_gpio_table+1, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1); + /* Make sure uart1 funtion pin opened. */ + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+1, 0); + sapphire_h2w_path = H2W_UART3; + printk(KERN_INFO "H2W route = H2W-UART3, BT-UART1 \n"); + break; + case H2W_GPIO: /*H2W_UART3 TX,RX are changed to H2W_GPIO */ + default: + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0); + /* Set the CPLD connected H2W GPIO's to input */ + gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, 0); + /* TX,RX GPI first. */ + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart3_off_gpi_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart3_off_gpi_table+1, 0); + /* Make sure uart1 funtion pin opened. */ + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+0, 0); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, + uart1_on_gpio_table+1, 0); + sapphire_h2w_path = H2W_GPIO; + printk(KERN_INFO "H2W route = H2W-GPIO, BT-UART1 \n"); + break; + } +} + +static void h2w_init_cpld(void) +{ + h2w_config_cpld(H2W_UART3); +} + +static int h2w_dat_value; +static void set_h2w_dat(int n) +{ + h2w_dat_value = n; + gpio_set_value(SAPPHIRE_GPIO_H2W_DATA, n); +} + +static int h2w_clk_value; +static void set_h2w_clk(int n) +{ + h2w_clk_value = n; + gpio_set_value(SAPPHIRE_GPIO_H2W_CLK, n); +} + +static void set_h2w_dat_dir(int n) +{ + if (n == 0) /* input */ + gpio_direction_input(SAPPHIRE_GPIO_H2W_DATA); + else + gpio_direction_output(SAPPHIRE_GPIO_H2W_DATA, h2w_dat_value); + + gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, n); + +} + +static void set_h2w_clk_dir(int n) +{ + if (n == 0) /* input */ + gpio_direction_input(SAPPHIRE_GPIO_H2W_CLK); + else + gpio_direction_output(SAPPHIRE_GPIO_H2W_CLK, h2w_clk_value); + + gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, n); +} + +static int get_h2w_dat(void) +{ + return gpio_get_value(SAPPHIRE_GPIO_H2W_DATA); +} + +static int get_h2w_clk(void) +{ + return gpio_get_value(SAPPHIRE_GPIO_H2W_CLK); +} + +static int set_h2w_path(const char *val, struct kernel_param *kp) +{ + int ret = -EINVAL; + + ret = param_set_int(val, kp); + if (ret) + return ret; + + switch (sapphire_h2w_path) { + case H2W_GPIO: + case H2W_UART1: + case H2W_UART3: + case H2W_BT: + break; + default: + sapphire_h2w_path = -1; + return -EINVAL; + } + + h2w_config_cpld(sapphire_h2w_path); + return ret; +} +module_param_call(h2w_path, set_h2w_path, param_get_int, + &sapphire_h2w_path, S_IWUSR | S_IRUGO); + + +static struct h2w_platform_data sapphire_h2w_data = { + .power_name = "wlan", + .cable_in1 = SAPPHIRE_GPIO_CABLE_IN1, + .cable_in2 = SAPPHIRE_GPIO_CABLE_IN2, + .h2w_clk = SAPPHIRE_GPIO_H2W_CLK, + .h2w_data = SAPPHIRE_GPIO_H2W_DATA, + .headset_mic_35mm = SAPPHIRE_GPIO_AUD_HSMIC_DET_N, + .debug_uart = H2W_UART3, + .config_cpld = h2w_config_cpld, + .init_cpld = h2w_init_cpld, + .set_dat = set_h2w_dat, + .set_clk = set_h2w_clk, + .set_dat_dir = set_h2w_dat_dir, + .set_clk_dir = set_h2w_clk_dir, + .get_dat = get_h2w_dat, + .get_clk = get_h2w_clk, +}; + +static struct platform_device sapphire_h2w = { + .name = "h2w", + .id = -1, + .dev = { + .platform_data = &sapphire_h2w_data, + }, +}; +#endif + +static void sapphire_phy_reset(void) +{ + gpio_set_value(SAPPHIRE_GPIO_USB_PHY_RST_N, 0); + mdelay(10); + gpio_set_value(SAPPHIRE_GPIO_USB_PHY_RST_N, 1); + mdelay(10); +} + +static struct pwr_sink sapphire_pwrsink_table[] = { + { + .id = PWRSINK_AUDIO, + .ua_max = 100000, + }, + { + .id = PWRSINK_BACKLIGHT, + .ua_max = 125000, + }, + { + .id = PWRSINK_LED_BUTTON, + .ua_max = 0, + }, + { + .id = PWRSINK_LED_KEYBOARD, + .ua_max = 0, + }, + { + .id = PWRSINK_GP_CLK, + .ua_max = 0, + }, + { + .id = PWRSINK_BLUETOOTH, + .ua_max = 15000, + }, + { + .id = PWRSINK_CAMERA, + .ua_max = 0, + }, + { + .id = PWRSINK_SDCARD, + .ua_max = 0, + }, + { + .id = PWRSINK_VIDEO, + .ua_max = 0, + }, + { + .id = PWRSINK_WIFI, + .ua_max = 200000, + }, + { + .id = PWRSINK_SYSTEM_LOAD, + .ua_max = 100000, + .percent_util = 38, + }, +}; + +static int sapphire_pwrsink_resume_early(struct platform_device *pdev) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 7); + return 0; +} + +static void sapphire_pwrsink_resume_late(struct early_suspend *h) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 38); +} + +static void sapphire_pwrsink_suspend_early(struct early_suspend *h) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 7); +} + +static int sapphire_pwrsink_suspend_late(struct platform_device *pdev, pm_message_t state) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 1); + return 0; +} + +static struct pwr_sink_platform_data sapphire_pwrsink_data = { + .num_sinks = ARRAY_SIZE(sapphire_pwrsink_table), + .sinks = sapphire_pwrsink_table, + .suspend_late = sapphire_pwrsink_suspend_late, + .resume_early = sapphire_pwrsink_resume_early, + .suspend_early = sapphire_pwrsink_suspend_early, + .resume_late = sapphire_pwrsink_resume_late, +}; + +static struct platform_device sapphire_pwr_sink = { + .name = "htc_pwrsink", + .id = -1, + .dev = { + .platform_data = &sapphire_pwrsink_data, + }, +}; + +static struct platform_device sapphire_rfkill = { + .name = "sapphire_rfkill", + .id = -1, +}; + +static struct msm_pmem_setting pmem_setting_32 = { + .pmem_start = SMI32_MSM_PMEM_MDP_BASE, + .pmem_size = SMI32_MSM_PMEM_MDP_SIZE, + .pmem_adsp_start = SMI32_MSM_PMEM_ADSP_BASE, + .pmem_adsp_size = SMI32_MSM_PMEM_ADSP_SIZE, + .pmem_gpu0_start = MSM_PMEM_GPU0_BASE, + .pmem_gpu0_size = MSM_PMEM_GPU0_SIZE, + .pmem_gpu1_start = MSM_PMEM_GPU1_BASE, + .pmem_gpu1_size = MSM_PMEM_GPU1_SIZE, + .pmem_camera_start = 0, + .pmem_camera_size = 0, + .ram_console_start = MSM_RAM_CONSOLE_BASE, + .ram_console_size = MSM_RAM_CONSOLE_SIZE, +}; + +static struct msm_pmem_setting pmem_setting_64 = { + .pmem_start = SMI64_MSM_PMEM_MDP_BASE, + .pmem_size = SMI64_MSM_PMEM_MDP_SIZE, + .pmem_adsp_start = SMI64_MSM_PMEM_ADSP_BASE, + .pmem_adsp_size = SMI64_MSM_PMEM_ADSP_SIZE, + .pmem_gpu0_start = MSM_PMEM_GPU0_BASE, + .pmem_gpu0_size = MSM_PMEM_GPU0_SIZE, + .pmem_gpu1_start = MSM_PMEM_GPU1_BASE, + .pmem_gpu1_size = MSM_PMEM_GPU1_SIZE, + .pmem_camera_start = SMI64_MSM_PMEM_CAMERA_BASE, + .pmem_camera_size = SMI64_MSM_PMEM_CAMERA_SIZE, + .ram_console_start = MSM_RAM_CONSOLE_BASE, + .ram_console_size = MSM_RAM_CONSOLE_SIZE, +}; + +#ifdef CONFIG_WIFI_CONTROL_FUNC +static struct platform_device sapphire_wifi = { + .name = "msm_wifi", + .id = 1, + .num_resources = 0, + .resource = NULL, + .dev = { + .platform_data = &sapphire_wifi_control, + }, +}; +#endif + +#define SND(num, desc) { .name = desc, .id = num } +static struct snd_endpoint snd_endpoints_list[] = { + SND(0, "HANDSET"), + SND(1, "SPEAKER"), + SND(2, "HEADSET"), + SND(3, "BT"), + SND(44, "BT_EC_OFF"), + SND(10, "HEADSET_AND_SPEAKER"), + SND(256, "CURRENT"), + + /* Bluetooth accessories. */ + + SND(12, "HTC BH S100"), + SND(13, "HTC BH M100"), + SND(14, "Motorola H500"), + SND(15, "Nokia HS-36W"), + SND(16, "PLT 510v.D"), + SND(17, "M2500 by Plantronics"), + SND(18, "Nokia HDW-3"), + SND(19, "HBH-608"), + SND(20, "HBH-DS970"), + SND(21, "i.Tech BlueBAND"), + SND(22, "Nokia BH-800"), + SND(23, "Motorola H700"), + SND(24, "HTC BH M200"), + SND(25, "Jabra JX10"), + SND(26, "320Plantronics"), + SND(27, "640Plantronics"), + SND(28, "Jabra BT500"), + SND(29, "Motorola HT820"), + SND(30, "HBH-IV840"), + SND(31, "6XXPlantronics"), + SND(32, "3XXPlantronics"), + SND(33, "HBH-PV710"), + SND(34, "Motorola H670"), + SND(35, "HBM-300"), + SND(36, "Nokia BH-208"), + SND(37, "Samsung WEP410"), + SND(38, "Jabra BT8010"), + SND(39, "Motorola S9"), + SND(40, "Jabra BT620s"), + SND(41, "Nokia BH-902"), + SND(42, "HBH-DS220"), + SND(43, "HBH-DS980"), +}; +#undef SND + +static struct msm_snd_endpoints sapphire_snd_endpoints = { + .endpoints = snd_endpoints_list, + .num = ARRAY_SIZE(snd_endpoints_list), +}; + +static struct platform_device sapphire_snd = { + .name = "msm_snd", + .id = -1, + .dev = { + .platform_data = &sapphire_snd_endpoints, + }, +}; + +#ifdef CONFIG_MSM_CAMERA +void config_sapphire_camera_on_gpios(void); +void config_sapphire_camera_on_gpios(void); +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_sapphire_camera_on_gpios, + .camera_gpio_off = config_sapphire_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +#ifdef CONFIG_MT9T013 +static struct msm_camera_sensor_info msm_camera_sensor_mt9t013_data = { + .sensor_name = "mt9t013", + .sensor_reset = 108, + .sensor_pwd = 85, + .vcm_pwd = SAPPHIRE_GPIO_VCM_PWDN, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9t013 = { + .name = "msm_camera_mt9t013", + .dev = { + .platform_data = &msm_camera_sensor_mt9t013_data, + }, +}; +#endif + +#ifdef CONFIG_MT9P012 +static struct msm_camera_sensor_info msm_camera_sensor_mt9p012_data = { + .sensor_name = "mt9p012", + .sensor_reset = 108, + .sensor_pwd = 85, + .vcm_pwd = SAPPHIRE_GPIO_VCM_PWDN, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9p012 = { + .name = "msm_camera_mt9p012", + .dev = { + .platform_data = &msm_camera_sensor_mt9p012_data, + }, +}; +#endif +#endif/*CONFIG_MSM_CAMERA*/ + +#ifdef CONFIG_SENSORS_MT9T013 +static struct msm_camera_legacy_device_platform_data msm_camera_device_mt9t013 = { + .sensor_reset = 108, + .sensor_pwd = 85, + .vcm_pwd = SAPPHIRE_GPIO_VCM_PWDN, + .config_gpio_on = config_sapphire_camera_on_gpios, + .config_gpio_off = config_sapphire_camera_off_gpios, +}; + +static struct platform_device sapphire_camera = { + .name = "camera", + .dev = { + .platform_data = &msm_camera_device_mt9t013, + }, +}; +#endif static struct platform_device *devices[] __initdata = { &msm_device_smd, - &msm_device_dmov, &msm_device_nand, + &msm_device_i2c, &msm_device_uart1, +#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) && !defined(CONFIG_TROUT_H2W) &msm_device_uart3, +#endif +#ifdef CONFIG_SERIAL_MSM_HS + &msm_device_uart_dm1, +#endif + &sapphire_nav_device, + &sapphire_search_button_device, + &sapphire_reset_keys_device, + &android_leds, +#ifdef CONFIG_LEDS_CPLD + &android_CPLD_leds, +#endif +#ifdef CONFIG_HTC_HEADSET + &sapphire_h2w, +#endif +#ifdef CONFIG_MT9T013 + &msm_camera_sensor_mt9t013, +#endif +#ifdef CONFIG_MT9P012 + &msm_camera_sensor_mt9p012, +#endif + &sapphire_rfkill, +#ifdef CONFIG_WIFI_CONTROL_FUNC + &sapphire_wifi, +#endif + +#ifdef CONFIG_HTC_PWRSINK + &sapphire_pwr_sink, +#endif + &sapphire_snd, +#ifdef CONFIG_SENSORS_MT9T013 + &sapphire_camera, +#endif }; extern struct sys_timer msm_timer; static void __init sapphire_init_irq(void) { + printk(KERN_DEBUG "sapphire_init_irq()\n"); msm_init_irq(); } +static uint cpld_iset; +static uint cpld_charger_en; +static uint cpld_usb_h2w_sw; +static uint opt_disable_uart3; + +module_param_named(iset, cpld_iset, uint, 0); +module_param_named(charger_en, cpld_charger_en, uint, 0); +module_param_named(usb_h2w_sw, cpld_usb_h2w_sw, uint, 0); +module_param_named(disable_uart3, opt_disable_uart3, uint, 0); + +static void sapphire_reset(void) +{ + gpio_set_value(SAPPHIRE_GPIO_PS_HOLD, 0); +} + +static uint32_t gpio_table[] = { + /* BLUETOOTH */ +#ifdef CONFIG_SERIAL_MSM_HS + PCOM_GPIO_CFG(43, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */ + PCOM_GPIO_CFG(44, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */ + PCOM_GPIO_CFG(45, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */ + PCOM_GPIO_CFG(46, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */ +#else + PCOM_GPIO_CFG(43, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */ + PCOM_GPIO_CFG(44, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */ + PCOM_GPIO_CFG(45, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */ + PCOM_GPIO_CFG(46, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */ +#endif +}; + + +static uint32_t camera_off_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */ +}; + +static uint32_t camera_off_gpio_12pins_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_12pins_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */ +}; + +static void config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +void config_sapphire_camera_on_gpios(void) +{ + /*Add for judage it's 10 pins or 12 pins platform ----->*/ + if (is_12pin_camera()) { + config_gpio_table(camera_on_gpio_12pins_table, + ARRAY_SIZE(camera_on_gpio_12pins_table)); + } else { + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); + } + /*End Of Add for judage it's 10 pins or 12 pins platform*/ +} + +void config_sapphire_camera_off_gpios(void) +{ + /*Add for judage it's 10 pins or 12 pins platform ----->*/ + if (is_12pin_camera()) { + config_gpio_table(camera_off_gpio_12pins_table, + ARRAY_SIZE(camera_off_gpio_12pins_table)); + } else { + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); + } + /*End Of Add for judage it's 10 pins or 12 pins platform*/ +} + +static void __init config_gpios(void) +{ + config_gpio_table(gpio_table, ARRAY_SIZE(gpio_table)); + config_sapphire_camera_off_gpios(); +} + +static struct msm_acpu_clock_platform_data sapphire_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 19200000, + .wait_for_irq_khz = 128000000, +}; + +#ifdef CONFIG_SERIAL_MSM_HS +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = MSM_GPIO_TO_INT(45), + .inject_rx_on_wakeup = 1, + .rx_to_inject = 0x32, +}; +#endif + +extern int sapphire_init_gpio(void); + static void __init sapphire_init(void) { + int rc; + printk("sapphire_init() revision = 0x%X\n", system_rev); + + sapphire_init_gpio(); + /* + * Setup common MSM GPIOS + */ + config_gpios(); + rc = gpio_request(SAPPHIRE_GPIO_TP_EN, NULL); + if (rc < 0) + pr_err("%s: gpio_request(%d) failure %d\n", + __func__, SAPPHIRE_GPIO_TP_EN, rc); + + msm_hw_reset_hook = sapphire_reset; + + msm_acpu_clock_init(&sapphire_clock_data); + + /* adjust GPIOs based on bootloader request */ + printk("sapphire_init: cpld_usb_hw2_sw = %d\n", cpld_usb_h2w_sw); + gpio_set_value(SAPPHIRE_GPIO_USB_H2W_SW, cpld_usb_h2w_sw); + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + if (!opt_disable_uart3) + msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3, + &msm_device_uart3.dev, 1, + MSM_GPIO_TO_INT(86)); +#endif + + /* gpio_configure(108, IRQF_TRIGGER_LOW); */ + + /* H2W pins <-> UART3, Bluetooth <-> UART1 */ + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0); + gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1); + /* put the AF VCM in powerdown mode to avoid noise */ + gpio_set_value(SAPPHIRE_GPIO_VCM_PWDN, !sapphire_is_5M_camera()); + + mdelay(100); + + printk(KERN_DEBUG "sapphire_is_5M_camera=%d\n", + sapphire_is_5M_camera()); + printk(KERN_DEBUG "is_12pin_camera=%d\n", is_12pin_camera()); +#ifdef CONFIG_SERIAL_MSM_HS + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; +#endif + msm_add_usb_devices(sapphire_phy_reset); + + if (32 == smi_sz) + msm_add_mem_devices(&pmem_setting_32); + else + msm_add_mem_devices(&pmem_setting_64); + + rc = sapphire_init_mmc(system_rev); + if (rc) + printk(KERN_CRIT "%s: MMC init failure (%d)\n", __func__, rc); + +#ifdef CONFIG_WIFI_MEM_PREALLOC + rc = sapphire_init_wifi_mem(); + if (rc) { + printk(KERN_CRIT "%s: WiFi memory init failure (%d)\n", + __func__, rc); + } +#endif + msm_init_pmic_vibrator(); + + if(system_rev != 0x80) + sapphire_search_button_info.keymap = sapphire_search_button_v1; + + if (is_12pin_camera()) + gpio_tp_ls_en = SAPPHIRE20_TP_LS_EN; + gpio_request(gpio_tp_ls_en, "tp_ls_en"); + + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); platform_add_devices(devices, ARRAY_SIZE(devices)); } @@ -77,22 +1205,83 @@ static struct map_desc sapphire_io_desc[] __initdata = { } }; + +unsigned int sapphire_get_hwid(void) +{ + return hwid; +} + +unsigned int sapphire_get_skuid(void) +{ + return skuid; +} + +unsigned sapphire_engineerid(void) +{ + return engineerid; +} + +int sapphire_is_5M_camera(void) +{ + int ret = 0; + if (sapphire_get_skuid() == 0x1FF00 && !(sapphire_engineerid() & 0x02)) + ret = 1; + else if (sapphire_get_skuid() == 0x20100 && !(sapphire_engineerid() & 0x02)) + ret = 1; + return ret; +} + +/* it can support 3M and 5M sensor */ +unsigned int is_12pin_camera(void) +{ + unsigned int ret = 0; + + if (sapphire_get_skuid() == 0x1FF00 || sapphire_get_skuid() == 0x20100) + ret = 1; + else + ret = 0; + return ret; +} + +int sapphire_get_smi_size(void) +{ + printk(KERN_DEBUG "get_smi_size=%d\n", smi_sz); + return smi_sz; +} + static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { - int smi_sz = parse_tag_smi((const struct tag *)tags); + smi_sz = parse_tag_smi((const struct tag *)tags); + printk("sapphire_fixup:smisize=%d\n", smi_sz); + hwid = parse_tag_hwid((const struct tag *)tags); + printk("sapphire_fixup:hwid=0x%x\n", hwid); + skuid = parse_tag_skuid((const struct tag *)tags); + printk("sapphire_fixup:skuid=0x%x\n", skuid); + engineerid = parse_tag_engineerid((const struct tag *)tags); + printk("sapphire_fixup:engineerid=0x%x\n", engineerid); - mi->nr_banks = 1; - mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); if (smi_sz == 32) { + mi->nr_banks = 1; + mi->bank[0].start = PHYS_OFFSET; mi->bank[0].size = (84*1024*1024); } else if (smi_sz == 64) { - mi->bank[0].size = (101*1024*1024); + mi->nr_banks = 2; + mi->bank[0].start = SMI64_MSM_LINUX_BASE_1; + mi->bank[0].size = (32*1024*1024); + mi->bank[1].start = SMI64_MSM_LINUX_BASE_2; + mi->bank[1].size = (84*1024*1024); } else { - /* Give a default value when not get smi size */ + printk(KERN_ERR "can not get smi size\n"); + + /*Give a default value when not get smi size*/ smi_sz = 64; - mi->bank[0].size = (101*1024*1024); + mi->nr_banks = 2; + mi->bank[0].start = SMI64_MSM_LINUX_BASE_1; + mi->bank[0].size = (32*1024*1024); + mi->bank[1].start = SMI64_MSM_LINUX_BASE_2; + mi->bank[1].size = (84*1024*1024); + printk(KERN_ERR "use default : smisize=%d\n", smi_sz); } } @@ -100,14 +1289,14 @@ static void __init sapphire_map_io(void) { msm_map_common_io(); iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc)); - msm_clock_init(); + msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a); } MACHINE_START(SAPPHIRE, "sapphire") /* Maintainer: Brian Swetland */ #ifdef CONFIG_MSM_DEBUG_UART #endif - .boot_params = PHYS_OFFSET + 0x100, + .boot_params = 0x02000100, .fixup = sapphire_fixup, .map_io = sapphire_map_io, .init_irq = sapphire_init_irq, diff --git a/arch/arm/mach-msm/board-sapphire.h b/arch/arm/mach-msm/board-sapphire.h new file mode 100644 index 0000000000000..fd4ccd8ec140f --- /dev/null +++ b/arch/arm/mach-msm/board-sapphire.h @@ -0,0 +1,223 @@ +/* linux/arch/arm/mach-msm/board-sapphire.h + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ +#ifndef __ARCH_ARM_MACH_MSM_BOARD_SAPPHIRE_H +#define __ARCH_ARM_MACH_MSM_BOARD_SAPPHIRE_H + +#include + +#define MSM_SMI_BASE 0x00000000 +#define MSM_SMI_SIZE 0x00800000 + +#define MSM_EBI_BASE 0x10000000 +#define MSM_EBI_SIZE 0x07100000 + +#define MSM_PMEM_GPU0_BASE 0x00000000 +#define MSM_PMEM_GPU0_SIZE 0x00700000 + +#define SMI64_MSM_PMEM_MDP_BASE 0x15900000 +#define SMI64_MSM_PMEM_MDP_SIZE 0x00800000 + +#define SMI64_MSM_PMEM_ADSP_BASE 0x16100000 +#define SMI64_MSM_PMEM_ADSP_SIZE 0x00800000 + +#define SMI64_MSM_PMEM_CAMERA_BASE 0x15400000 +#define SMI64_MSM_PMEM_CAMERA_SIZE 0x00500000 + +#define SMI64_MSM_FB_BASE 0x00700000 +#define SMI64_MSM_FB_SIZE 0x00100000 + +#define SMI64_MSM_LINUX_BASE MSM_EBI_BASE +#define SMI64_MSM_LINUX_SIZE 0x068e0000 + +#define SMI64_MSM_LINUX_BASE_1 0x02000000 +#define SMI64_MSM_LINUX_SIZE_1 0x02000000 + +#define SMI64_MSM_LINUX_BASE_2 MSM_EBI_BASE +#define SMI64_MSM_LINUX_SIZE_2 0x05400000 + +#define SMI32_MSM_LINUX_BASE MSM_EBI_BASE +#define SMI32_MSM_LINUX_SIZE 0x5400000 + +#define SMI32_MSM_PMEM_MDP_BASE SMI32_MSM_LINUX_BASE + SMI32_MSM_LINUX_SIZE +#define SMI32_MSM_PMEM_MDP_SIZE 0x800000 + +#define SMI32_MSM_PMEM_ADSP_BASE SMI32_MSM_PMEM_MDP_BASE + SMI32_MSM_PMEM_MDP_SIZE +#define SMI32_MSM_PMEM_ADSP_SIZE 0x800000 + +#define SMI32_MSM_FB_BASE SMI32_MSM_PMEM_ADSP_BASE + SMI32_MSM_PMEM_ADSP_SIZE +#define SMI32_MSM_FB_SIZE 0x9b000 + + +#define MSM_PMEM_GPU1_SIZE 0x800000 +#define MSM_PMEM_GPU1_BASE (MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE) + +#define MSM_RAM_CONSOLE_BASE 0x169E0000 +#define MSM_RAM_CONSOLE_SIZE 128 * SZ_1K + +#if (SMI32_MSM_FB_BASE + SMI32_MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE) +#error invalid memory map +#endif + +#if (SMI64_MSM_FB_BASE + SMI64_MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE) +#error invalid memory map +#endif + +#define DECLARE_MSM_IOMAP +#include + +/* +** SOC GPIO +*/ +#define SAPPHIRE_BALL_UP_0 94 +#define SAPPHIRE_BALL_LEFT_0 18 +#define SAPPHIRE_BALL_DOWN_0 49 +#define SAPPHIRE_BALL_RIGHT_0 19 + +#define SAPPHIRE_POWER_KEY 20 +#define SAPPHIRE_VOLUME_UP 36 +#define SAPPHIRE_VOLUME_DOWN 39 + +#define SAPPHIRE_GPIO_PS_HOLD (25) +#define SAPPHIRE_MDDI_1V5_EN (28) +#define SAPPHIRE_BL_PWM (27) +#define SAPPHIRE_TP_LS_EN (1) +#define SAPPHIRE20_TP_LS_EN (88) + +/* H2W */ +#define SAPPHIRE_GPIO_CABLE_IN1 (83) +#define SAPPHIRE_GPIO_CABLE_IN2 (37) +#define SAPPHIRE_GPIO_UART3_RX (86) +#define SAPPHIRE_GPIO_UART3_TX (87) +#define SAPPHIRE_GPIO_H2W_DATA (86) +#define SAPPHIRE_GPIO_H2W_CLK (87) + +#define SAPPHIRE_GPIO_UART1_RTS (43) +#define SAPPHIRE_GPIO_UART1_CTS (44) + +/* +** CPLD GPIO +** +** Sapphire Altera CPLD can keep the registers value and +** doesn't need a shadow to backup. +**/ +#define SAPPHIRE_CPLD_BASE 0xFA000000 /* VA */ +#define SAPPHIRE_CPLD_START 0x98000000 /* PA */ +#define SAPPHIRE_CPLD_SIZE SZ_4K + +#define SAPPHIRE_GPIO_START (128) /* Pseudo GPIO number */ + +/* Sapphire has one INT BANK only. */ +#define SAPPHIRE_GPIO_INT_B0_MASK_REG (0x0c) /*INT3 MASK*/ +#define SAPPHIRE_GPIO_INT_B0_STAT_REG (0x0e) /*INT1 STATUS*/ + +/* LED control register */ +#define SAPPHIRE_CPLD_LED_BASE (SAPPHIRE_CPLD_BASE + 0x10) /* VA */ +#define SAPPHIRE_CPLD_LED_START (SAPPHIRE_CPLD_START + 0x10) /* PA */ +#define SAPPHIRE_CPLD_LED_SIZE 0x08 + +/* MISCn: GPO pin to Enable/Disable some functions. */ +#define SAPPHIRE_GPIO_MISC1_BASE (SAPPHIRE_GPIO_START + 0x00) +#define SAPPHIRE_GPIO_MISC2_BASE (SAPPHIRE_GPIO_START + 0x08) +#define SAPPHIRE_GPIO_MISC3_BASE (SAPPHIRE_GPIO_START + 0x10) +#define SAPPHIRE_GPIO_MISC4_BASE (SAPPHIRE_GPIO_START + 0x18) +#define SAPPHIRE_GPIO_MISC5_BASE (SAPPHIRE_GPIO_START + 0x20) + +/* INT BANK0: INT1: int status, INT2: int level, INT3: int Mask */ +#define SAPPHIRE_GPIO_INT_B0_BASE (SAPPHIRE_GPIO_START + 0x28) + +/* MISCn GPIO: */ +#define SAPPHIRE_GPIO_CPLD128_VER_0 (SAPPHIRE_GPIO_MISC1_BASE + 4) +#define SAPPHIRE_GPIO_CPLD128_VER_1 (SAPPHIRE_GPIO_MISC1_BASE + 5) +#define SAPPHIRE_GPIO_CPLD128_VER_2 (SAPPHIRE_GPIO_MISC1_BASE + 6) +#define SAPPHIRE_GPIO_CPLD128_VER_3 (SAPPHIRE_GPIO_MISC1_BASE + 7) + +#define SAPPHIRE_GPIO_H2W_DAT_DIR (SAPPHIRE_GPIO_MISC2_BASE + 2) +#define SAPPHIRE_GPIO_H2W_CLK_DIR (SAPPHIRE_GPIO_MISC2_BASE + 3) +#define SAPPHIRE_GPIO_H2W_SEL0 (SAPPHIRE_GPIO_MISC2_BASE + 6) +#define SAPPHIRE_GPIO_H2W_SEL1 (SAPPHIRE_GPIO_MISC2_BASE + 7) + +#define SAPPHIRE_GPIO_I2C_PULL (SAPPHIRE_GPIO_MISC3_BASE + 2) +#define SAPPHIRE_GPIO_TP_EN (SAPPHIRE_GPIO_MISC3_BASE + 4) +#define SAPPHIRE_GPIO_JOG_EN (SAPPHIRE_GPIO_MISC3_BASE + 5) +#define SAPPHIRE_GPIO_JOG_LED_EN (SAPPHIRE_GPIO_MISC3_BASE + 6) +#define SAPPHIRE_GPIO_APKEY_LED_EN (SAPPHIRE_GPIO_MISC3_BASE + 7) + +#define SAPPHIRE_GPIO_VCM_PWDN (SAPPHIRE_GPIO_MISC4_BASE + 0) +#define SAPPHIRE_GPIO_USB_H2W_SW (SAPPHIRE_GPIO_MISC4_BASE + 1) +#define SAPPHIRE_GPIO_COMPASS_RST_N (SAPPHIRE_GPIO_MISC4_BASE + 2) +#define SAPPHIRE_GPIO_USB_PHY_RST_N (SAPPHIRE_GPIO_MISC4_BASE + 5) +#define SAPPHIRE_GPIO_WIFI_PA_RESETX (SAPPHIRE_GPIO_MISC4_BASE + 6) +#define SAPPHIRE_GPIO_WIFI_EN (SAPPHIRE_GPIO_MISC4_BASE + 7) + +#define SAPPHIRE_GPIO_BT_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 0) +#define SAPPHIRE_GPIO_MAC_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 1) +#define SAPPHIRE_GPIO_MDDI_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 2) +#define SAPPHIRE_GPIO_COMPASS_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 3) + +/* INT STATUS/LEVEL/MASK : INT GPIO should be the last. */ +#define SAPPHIRE_GPIO_NAVI_ACT_N (SAPPHIRE_GPIO_INT_B0_BASE + 0) +#define SAPPHIRE_GPIO_COMPASS_IRQ (SAPPHIRE_GPIO_INT_B0_BASE + 1) +#define SAPPHIRE_GPIO_SEARCH_ACT_N (SAPPHIRE_GPIO_INT_B0_BASE + 2) +#define SAPPHIRE_GPIO_AUD_HSMIC_DET_N (SAPPHIRE_GPIO_INT_B0_BASE + 3) +#define SAPPHIRE_GPIO_SDMC_CD_N (SAPPHIRE_GPIO_INT_B0_BASE + 4) +#define SAPPHIRE_GPIO_CAM_BTN_STEP1_N (SAPPHIRE_GPIO_INT_B0_BASE + 5) +#define SAPPHIRE_GPIO_CAM_BTN_STEP2_N (SAPPHIRE_GPIO_INT_B0_BASE + 6) +#define SAPPHIRE_GPIO_TP_ATT_N (SAPPHIRE_GPIO_INT_B0_BASE + 7) + +#define SAPPHIRE_GPIO_END SAPPHIRE_GPIO_TP_ATT_N +#define SAPPHIRE_GPIO_LAST_INT (SAPPHIRE_GPIO_TP_ATT_N) + +/* Bit position in the CPLD MISCn by the CPLD GPIOn: only bit0-7 is used. */ +#define CPLD_GPIO_BIT_POS_MASK(n) (1U << ((n) & 7)) +#define CPLD_GPIO_REG_OFFSET(n) _g_CPLD_MISCn_Offset[((n)-SAPPHIRE_GPIO_START) >> 3] +#define CPLD_GPIO_REG(n) (CPLD_GPIO_REG_OFFSET(n) + SAPPHIRE_CPLD_BASE) + +/* +** CPLD INT Start +*/ +#define SAPPHIRE_INT_START (NR_MSM_IRQS + NR_GPIO_IRQS) /* pseudo number for CPLD INT */ +/* Using INT status/Bank0 for GPIO to INT */ +#define SAPPHIRE_GPIO_TO_INT(n) ((n-SAPPHIRE_GPIO_INT_B0_BASE) + SAPPHIRE_INT_START) +#define SAPPHIRE_INT_END (SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_END)) + +/* get the INT reg by GPIO number */ +#define CPLD_INT_GPIO_TO_BANK(n) (((n)-SAPPHIRE_GPIO_INT_B0_BASE) >> 3) +#define CPLD_INT_STATUS_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][0] +#define CPLD_INT_LEVEL_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][1] +#define CPLD_INT_MASK_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][2] +#define CPLD_INT_STATUS_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_STATUS_REG_OFFSET_G(n)) +#define CPLD_INT_LEVEL_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_LEVEL_REG_OFFSET_G(n)) +#define CPLD_INT_MASK_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_MASK_REG_OFFSET_G(n)) + +/* get the INT reg by INT number */ +#define CPLD_INT_TO_BANK(i) ((i-SAPPHIRE_INT_START) >> 3) +#define CPLD_INT_STATUS_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][0] +#define CPLD_INT_LEVEL_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][1] +#define CPLD_INT_MASK_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][2] +#define CPLD_INT_STATUS_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_STATUS_REG_OFFSET(i)) +#define CPLD_INT_LEVEL_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_LEVEL_REG_OFFSET(i)) +#define CPLD_INT_MASK_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_MASK_REG_OFFSET(i) ) + +/* return the bit mask by INT number */ +#define SAPPHIRE_INT_BIT_MASK(i) (1U << ((i - SAPPHIRE_INT_START) & 7)) + +void config_sapphire_camera_on_gpios(void); +void config_sapphire_camera_off_gpios(void); +int sapphire_get_smi_size(void); +unsigned int sapphire_get_hwid(void); +unsigned int sapphire_get_skuid(void); +unsigned int is_12pin_camera(void); +int sapphire_is_5M_camera(void); + +#endif /* GUARD */ diff --git a/arch/arm/mach-msm/board-supersonic-audio.c b/arch/arm/mach-msm/board-supersonic-audio.c new file mode 100644 index 0000000000000..d986b6f509192 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-audio.c @@ -0,0 +1,318 @@ +/* arch/arm/mach-msm/board-supersonic-audio.c + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "board-supersonic.h" +#include "proc_comm.h" +#include "pmic.h" +#include "board-supersonic-tpa2018d1.h" + +#if 1 +#define D(fmt, args...) printk(KERN_INFO "Audio: "fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +static struct mutex mic_lock; +static struct mutex bt_sco_lock; +static struct mutex hdmi_i2s_lock; +static int headset_status = 0; + +static struct q6_hw_info q6_audio_hw[Q6_HW_COUNT] = { + [Q6_HW_HANDSET] = { + .min_gain = -1500, + .max_gain = 1199, + }, + [Q6_HW_HEADSET] = { + .min_gain = -2000, + .max_gain = 1199, + }, + [Q6_HW_SPEAKER] = { + .min_gain = -1100, + .max_gain = 400, + }, + [Q6_HW_TTY] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_SCO] = { + .min_gain = -1600, + .max_gain = 400, + }, + [Q6_HW_BT_A2DP] = { + .min_gain = -2000, + .max_gain = 0, + }, +}; + +void supersonic_headset_enable(int en) +{ + D("%s %d\n", __func__, en); + /* enable audio amp */ + if (en != headset_status) { + headset_status = en; + if(en) { + gpio_set_value(SUPERSONIC_AUD_JACKHP_EN, 1); + mdelay(10); + if (system_rev == 0) + set_headset_amp(1); + } else { + if (system_rev == 0) + set_headset_amp(0); + gpio_set_value(SUPERSONIC_AUD_JACKHP_EN, 0); + } + } +} + +void supersonic_speaker_enable(int en) +{ + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 0; + scm.is_left_chan_en = 1; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(LEFT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(LEFT_SPKR, 1); + } else { + pmic_spkr_en_mute(LEFT_SPKR, 0); + + pmic_spkr_en(LEFT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + tpa2018d1_set_speaker_amp(en); +} + +void supersonic_receiver_enable(int en) +{ + /* After XB*/ + if (system_rev >= 1) { + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + D("%s %d\n", __func__, en); + if (en) { + scm.is_right_chan_en = 1; + scm.is_left_chan_en = 0; + scm.is_stereo_en = 0; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(RIGHT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(RIGHT_SPKR, 1); + } else { + pmic_spkr_en_mute(RIGHT_SPKR, 0); + + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } + } +} + +static uint32_t hdmi_i2s_enable[] = { + PCOM_GPIO_CFG(SUPERSONIC_I2S_CLK, 2, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_I2S_WS, 1, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_I2S_DOUT, 1, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), +}; + +static uint32_t hdmi_i2s_disable[] = { + PCOM_GPIO_CFG(SUPERSONIC_I2S_CLK, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_I2S_WS, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_I2S_DOUT, 0, GPIO_OUTPUT, + GPIO_PULL_DOWN, GPIO_2MA), +}; + +void supersonic_hdmi_i2s_enable(int en) +{ + static int hdmi_i2s_refcount; + D("%s %d\n", __func__, en); + mutex_lock(&hdmi_i2s_lock); + if (en) { + if (++hdmi_i2s_refcount == 1) + config_gpio_table(hdmi_i2s_enable, + ARRAY_SIZE(hdmi_i2s_enable)); + } else { + if (--hdmi_i2s_refcount == 0) { + config_gpio_table(hdmi_i2s_disable, ARRAY_SIZE(hdmi_i2s_disable)); + } + } + mutex_unlock(&hdmi_i2s_lock); +} + + +static uint32_t bt_sco_enable[] = { + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_OUT, 1, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_IN, 1, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_SYNC, 2, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_CLK, 2, GPIO_INPUT, + GPIO_PULL_DOWN, GPIO_2MA), +}; + +static uint32_t bt_sco_disable[] = { + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_OUT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_IN, 0, GPIO_INPUT, + GPIO_PULL_UP, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_SYNC, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_BT_PCM_CLK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA), +}; + +void supersonic_bt_sco_enable(int en) +{ + static int bt_sco_refcount; + D("%s %d\n", __func__, en); + mutex_lock(&bt_sco_lock); + if (en) { + if (++bt_sco_refcount == 1) + config_gpio_table(bt_sco_enable, + ARRAY_SIZE(bt_sco_enable)); + } else { + if (--bt_sco_refcount == 0) { + config_gpio_table(bt_sco_disable, ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(SUPERSONIC_BT_PCM_OUT, 0); + gpio_set_value(SUPERSONIC_BT_PCM_SYNC,0); + gpio_set_value(SUPERSONIC_BT_PCM_CLK,0); + } + } + mutex_unlock(&bt_sco_lock); +} + +void supersonic_int_mic_enable(int en) +{ + D("%s %d\n", __func__, en); + if (en) + pmic_mic_en(ON_CMD); + else + pmic_mic_en(OFF_CMD); +} + +void supersonic_ext_mic_enable(int en) +{ + static int old_state = 0, new_state = 0; + + D("%s %d\n", __func__, en); + + mutex_lock(&mic_lock); + if (!!en) + new_state++; + else + new_state--; + + if (new_state == 1 && old_state == 0) { + gpio_set_value(SUPERSONIC_AUD_2V5_EN, 1); + } else if (new_state == 0 && old_state == 1) + gpio_set_value(SUPERSONIC_AUD_2V5_EN, 0); + else + D("%s: do nothing %d %d\n", __func__, old_state, new_state); + + old_state = new_state; + mutex_unlock(&mic_lock); +} + +void supersonic_analog_init(void) +{ + D("%s\n", __func__); + /* stereo pmic init */ + pmic_spkr_set_gain(LEFT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_set_gain(RIGHT_SPKR, SPKR_GAIN_00DB); + pmic_spkr_en_right_chan(OFF_CMD); + pmic_spkr_en_left_chan(OFF_CMD); + pmic_spkr_add_right_left_chan(OFF_CMD); + pmic_spkr_en_stereo(OFF_CMD); + pmic_spkr_select_usb_with_hpf_20hz(OFF_CMD); + pmic_spkr_bypass_mux(OFF_CMD); + pmic_spkr_en_hpf(ON_CMD); + pmic_spkr_en_sink_curr_from_ref_volt_cir(OFF_CMD); + pmic_spkr_set_mux_hpf_corner_freq(SPKR_FREQ_0_73KHZ); + pmic_mic_set_volt(MIC_VOLT_1_80V); + pmic_set_speaker_delay(SPKR_DLY_100MS); + + gpio_request(SUPERSONIC_AUD_JACKHP_EN, "aud_jackhp_en"); + gpio_direction_output(SUPERSONIC_AUD_JACKHP_EN, 0); + gpio_set_value(SUPERSONIC_AUD_JACKHP_EN, 0); + + mutex_lock(&bt_sco_lock); + config_gpio_table(bt_sco_disable, ARRAY_SIZE(bt_sco_disable)); + gpio_set_value(SUPERSONIC_BT_PCM_OUT, 0); + gpio_set_value(SUPERSONIC_BT_PCM_SYNC,0); + gpio_set_value(SUPERSONIC_BT_PCM_CLK,0); + mutex_unlock(&bt_sco_lock); +} + +int supersonic_get_rx_vol(uint8_t hw, int level) +{ + struct q6_hw_info *info; + int vol; + + info = &q6_audio_hw[hw]; + vol = info->min_gain + ((info->max_gain - info->min_gain) * level) / 100; + D("%s %d\n", __func__, vol); + return vol; +} + +static struct qsd_acoustic_ops acoustic = { + .enable_mic_bias = supersonic_ext_mic_enable, +}; + +static struct q6audio_analog_ops ops = { + .init = supersonic_analog_init, + .speaker_enable = supersonic_speaker_enable, + .headset_enable = supersonic_headset_enable, + .receiver_enable = supersonic_receiver_enable, + .bt_sco_enable = supersonic_bt_sco_enable, + .int_mic_enable = supersonic_int_mic_enable, + .ext_mic_enable = supersonic_ext_mic_enable, + .i2s_enable = supersonic_hdmi_i2s_enable, + .get_rx_vol = supersonic_get_rx_vol, +}; + +void __init supersonic_audio_init(void) +{ + mutex_init(&mic_lock); + mutex_init(&bt_sco_lock); + mutex_init(&hdmi_i2s_lock); + q6audio_register_analog_ops(&ops); + acoustic_register_ops(&acoustic); +} + diff --git a/arch/arm/mach-msm/board-supersonic-flashlight.c b/arch/arm/mach-msm/board-supersonic-flashlight.c new file mode 100644 index 0000000000000..8264e1d79bb5a --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-flashlight.c @@ -0,0 +1,490 @@ +/* + * arch/arm/mach-msm/msm_flashlight.c - The flashlight driver + * Copyright (C) 2009 HTC Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +/*#include */ +#include "board-supersonic-flashlight.h" +#include + +struct flashlight_struct { + struct led_classdev fl_lcdev; + struct early_suspend early_suspend_flashlight; + struct hrtimer timer; + struct wake_lock wake_lock; + spinlock_t spin_lock; + uint32_t gpio_torch; + uint32_t gpio_flash; + uint32_t gpio_flash_adj; + uint32_t flash_sw_timeout_ms; + enum flashlight_mode_flags mode_status; + unsigned long spinlock_flags; + unsigned flash_adj_gpio_status; + /* inactive: 0x0 + * active: 0x1 + * force disable flashlight function: 0x2 */ + uint8_t flash_adj_value; + uint8_t led_count; +}; + +/* disable it, we didn't need to adjust GPIO */ +/* #define FLASHLIGHT_ADJ_FUNC */ + +static struct flashlight_struct *this_fl_str; + +static void flashlight_hw_command(uint8_t addr, uint8_t data) +{ + uint8_t loop_i, loop_j; + const uint8_t fl_addr_to_rising_count[4] = { 17, 18, 19, 20 }; + uint8_t loop_tmp; + if (!this_fl_str->gpio_torch && !this_fl_str->gpio_torch) { + printk(KERN_ERR "%s: not setup GPIO??? torch: %d, flash: %d\n", + __func__, this_fl_str->gpio_torch, + this_fl_str->gpio_flash); + return; + } + for (loop_j = 0; loop_j < 2; loop_j++) { + if (!loop_j) + loop_tmp = fl_addr_to_rising_count[addr]; + else + loop_tmp = data; + for (loop_i = 0; loop_i < loop_tmp; loop_i++) { + gpio_direction_output(this_fl_str->gpio_torch, 0); + udelay(2); + gpio_direction_output(this_fl_str->gpio_torch, 1); + udelay(2); + } + udelay(500); + } +} + +static void flashlight_turn_off(void) +{ + gpio_direction_output(this_fl_str->gpio_flash, 0); + gpio_direction_output(this_fl_str->gpio_torch, 0); + this_fl_str->mode_status = FL_MODE_OFF; + this_fl_str->fl_lcdev.brightness = LED_OFF; + wake_unlock(&this_fl_str->wake_lock); +} + +static enum hrtimer_restart flashlight_hrtimer_func(struct hrtimer *timer) +{ + struct flashlight_struct *fl_str = container_of(timer, + struct flashlight_struct, timer); + wake_unlock(&fl_str->wake_lock); + spin_lock_irqsave(&fl_str->spin_lock, fl_str->spinlock_flags); + flashlight_turn_off(); + spin_unlock_irqrestore(&fl_str->spin_lock, fl_str->spinlock_flags); + printk(KERN_INFO "%s: turn off flash mode\n", __func__); + return HRTIMER_NORESTART; +} + +int flashlight_control(int mode) +{ + int ret = 0; + uint32_t flash_ns = ktime_to_ns(ktime_get()); + +#if 0 /* disable flash_adj_value check now */ + if (this_fl_str->flash_adj_value == 2) { + printk(KERN_WARNING "%s: force disable function!\n", __func__); + return -EIO; + } +#endif + spin_lock_irqsave(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + if (this_fl_str->mode_status == FL_MODE_FLASH) { + hrtimer_cancel(&this_fl_str->timer); + wake_unlock(&this_fl_str->wake_lock); + flashlight_turn_off(); + } + switch (mode) { + case FL_MODE_OFF: + flashlight_turn_off(); + break; + case FL_MODE_TORCH: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH; + this_fl_str->fl_lcdev.brightness = LED_HALF; + break; + case FL_MODE_TORCH_LED_A: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 3); + this_fl_str->mode_status = FL_MODE_TORCH_LED_A; + this_fl_str->fl_lcdev.brightness = 1; + break; + case FL_MODE_TORCH_LED_B: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 2); + this_fl_str->mode_status = FL_MODE_TORCH_LED_B; + this_fl_str->fl_lcdev.brightness = 2; + break; + case FL_MODE_FLASH: + flashlight_hw_command(2, 4); + gpio_direction_output(this_fl_str->gpio_flash, 1); + this_fl_str->mode_status = FL_MODE_FLASH; + this_fl_str->fl_lcdev.brightness = LED_FULL; + hrtimer_start(&this_fl_str->timer, + ktime_set(this_fl_str->flash_sw_timeout_ms / 1000, + (this_fl_str->flash_sw_timeout_ms % 1000) * + NSEC_PER_MSEC), HRTIMER_MODE_REL); + wake_lock(&this_fl_str->wake_lock); + break; + case FL_MODE_PRE_FLASH: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 9); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_PRE_FLASH; + this_fl_str->fl_lcdev.brightness = LED_HALF + 1; + break; + case FL_MODE_TORCH_LEVEL_1: + flashlight_hw_command(3, 8); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1; + this_fl_str->fl_lcdev.brightness = LED_HALF - 2; + break; + case FL_MODE_TORCH_LEVEL_2: + flashlight_hw_command(3, 4); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2; + this_fl_str->fl_lcdev.brightness = LED_HALF - 1; + break; + case FL_MODE_DEATH_RAY: + pr_info("%s: death ray\n", __func__); + hrtimer_cancel(&this_fl_str->timer); + gpio_direction_output(this_fl_str->gpio_flash, 0); + udelay(40); + gpio_direction_output(this_fl_str->gpio_flash, 1); + this_fl_str->mode_status = 0; + this_fl_str->fl_lcdev.brightness = 3; + wake_lock(&this_fl_str->wake_lock); + break; + default: + printk(KERN_ERR "%s: unknown flash_light flags: %d\n", + __func__, mode); + ret = -EINVAL; + break; + } + + printk(KERN_DEBUG "%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode, + flash_ns/(1000*1000)); + + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + return ret; +} + +static void fl_lcdev_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct flashlight_struct *fl_str; + enum flashlight_mode_flags mode; + + fl_str = container_of(led_cdev, struct flashlight_struct, fl_lcdev); + if (brightness > 0 && brightness <= LED_HALF) { + /* Torch mode */ + if (brightness == (LED_HALF - 2)) + mode = FL_MODE_TORCH_LEVEL_1; + else if (brightness == (LED_HALF - 1)) + mode = FL_MODE_TORCH_LEVEL_2; + else if (brightness == 1 && fl_str->led_count) + mode = FL_MODE_TORCH_LED_A; + else if (brightness == 2 && fl_str->led_count) + mode = FL_MODE_TORCH_LED_B; + else if (brightness == 3) + mode = FL_MODE_DEATH_RAY; + else + mode = FL_MODE_TORCH; + } else if (brightness > LED_HALF && brightness <= LED_FULL) { + /* Flashlight mode */ + if (brightness == (LED_HALF + 1)) + mode = FL_MODE_PRE_FLASH; /* pre-flash mode */ + else + mode = FL_MODE_FLASH; + } else + /* off and else */ + mode = FL_MODE_OFF; + flashlight_control(mode); + + return; +} + +static void flashlight_early_suspend(struct early_suspend *handler) +{ + struct flashlight_struct *fl_str = container_of(handler, + struct flashlight_struct, early_suspend_flashlight); + if (fl_str != NULL && fl_str->mode_status) { + spin_lock_irqsave(&fl_str->spin_lock, fl_str->spinlock_flags); + flashlight_turn_off(); + spin_unlock_irqrestore(&fl_str->spin_lock, + fl_str->spinlock_flags); + } +} + +static void flashlight_late_resume(struct early_suspend *handler) +{ + /* + struct flashlight_struct *fl_str = container_of(handler, + struct flashlight_struct, early_suspend_flashlight); + */ +} + +static int flashlight_setup_gpio(struct flashlight_platform_data *flashlight, + struct flashlight_struct *fl_str) +{ + int ret = 0; + if (flashlight->gpio_init) + flashlight->gpio_init(); + if (flashlight->torch) { + ret = gpio_request(flashlight->torch, "fl_torch"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(torch) failed\n", + __func__); + return ret; + } + fl_str->gpio_torch = flashlight->torch; + } + + if (flashlight->flash) { + ret = gpio_request(flashlight->flash, "fl_flash"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(flash) failed\n", + __func__); + return ret; + } + fl_str->gpio_flash = flashlight->flash; + } + + if (flashlight->flash_adj) { + ret = gpio_request(flashlight->flash_adj, "fl_flash_adj"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(flash_adj) failed\n", + __func__); + return ret; + } + fl_str->gpio_flash_adj = flashlight->flash_adj; + gpio_set_value(fl_str->gpio_flash_adj, 0); + fl_str->flash_adj_gpio_status = 0; + printk(KERN_DEBUG "%s: enable flash_adj function\n", + FLASHLIGHT_NAME); + } + if (flashlight->flash_duration_ms) + fl_str->flash_sw_timeout_ms = flashlight->flash_duration_ms; + else /* load default value */ + fl_str->flash_sw_timeout_ms = 600; + return ret; +} + +static int flashlight_free_gpio(struct flashlight_platform_data *flashlight, + struct flashlight_struct *fl_str) +{ + int ret = 0; + if (fl_str->gpio_torch) { + gpio_free(flashlight->torch); + fl_str->gpio_torch = 0; + } + + if (fl_str->gpio_flash) { + gpio_free(flashlight->flash); + fl_str->gpio_flash = 0; + } + + if (fl_str->gpio_flash_adj) { + gpio_free(flashlight->flash_adj); + fl_str->gpio_flash_adj = 0; + } + + return ret; +} + +#ifdef FLASHLIGHT_ADJ_FUNC +static ssize_t show_flash_adj(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", this_fl_str->flash_adj_value); + return length; +} + +static ssize_t store_flash_adj(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + static int tmp, adj_tmp; + if ((buf[0] == '0' || buf[0] == '1' || buf[0] == '2') + && buf[1] == '\n') { + spin_lock_irqsave(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + tmp = buf[0] - 0x30; + if (tmp == this_fl_str->flash_adj_value) { + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + printk(KERN_NOTICE "%s: status is same(%d)\n", + __func__, this_fl_str->flash_adj_value); + return count; + } + adj_tmp = this_fl_str->gpio_flash_adj; + switch (tmp) { + case 2: + flashlight_turn_off(); + break; + case 1: + /* + if (this_fl_str->flash_adj_gpio_status) { + gpio_set_value(adj_tmp, 0); + this_fl_str->flash_adj_gpio_status = 0; + } + */ + break; + case 0: + /* + if (!this_fl_str->flash_adj_gpio_status) { + gpio_set_value(adj_tmp, 1); + this_fl_str->flash_adj_gpio_status = 1; + } + */ + break; + } + this_fl_str->flash_adj_value = tmp; + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + } + return count; +} + +static DEVICE_ATTR(flash_adj, 0666, show_flash_adj, store_flash_adj); +#endif + +static int flashlight_probe(struct platform_device *pdev) +{ + + struct flashlight_platform_data *flashlight = pdev->dev.platform_data; + struct flashlight_struct *fl_str; + int err = 0; + + fl_str = kzalloc(sizeof(struct flashlight_struct), GFP_KERNEL); + if (!fl_str) { + printk(KERN_ERR "%s: kzalloc fail !!!\n", __func__); + return -ENOMEM; + } + + err = flashlight_setup_gpio(flashlight, fl_str); + if (err < 0) { + printk(KERN_ERR "%s: setup GPIO fail !!!\n", __func__); + goto fail_free_mem; + } + spin_lock_init(&fl_str->spin_lock); + wake_lock_init(&fl_str->wake_lock, WAKE_LOCK_SUSPEND, pdev->name); + fl_str->fl_lcdev.name = pdev->name; + fl_str->fl_lcdev.brightness_set = fl_lcdev_brightness_set; + fl_str->fl_lcdev.brightness = 0; + err = led_classdev_register(&pdev->dev, &fl_str->fl_lcdev); + if (err < 0) { + printk(KERN_ERR "failed on led_classdev_register\n"); + goto fail_free_gpio; + } +#ifdef FLASHLIGHT_ADJ_FUNC + if (fl_str->gpio_flash_adj) { + printk(KERN_DEBUG "%s: flash_adj exist, create attr file\n", + __func__); + err = device_create_file(fl_str->fl_lcdev.dev, + &dev_attr_flash_adj); + if (err != 0) + printk(KERN_WARNING "dev_attr_flash_adj failed\n"); + } +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + fl_str->early_suspend_flashlight.suspend = flashlight_early_suspend; + fl_str->early_suspend_flashlight.resume = flashlight_late_resume; + register_early_suspend(&fl_str->early_suspend_flashlight); +#endif + hrtimer_init(&fl_str->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + fl_str->timer.function = flashlight_hrtimer_func; + fl_str->led_count = flashlight->led_count; + + this_fl_str = fl_str; + printk(KERN_INFO "%s: The Flashlight Driver is ready\n", __func__); + return 0; + +fail_free_gpio: + wake_lock_destroy(&fl_str->wake_lock); + flashlight_free_gpio(flashlight, fl_str); +fail_free_mem: + kfree(fl_str); + printk(KERN_ERR "%s: The Flashlight driver is Failure\n", __func__); + return err; +} + +static int flashlight_remove(struct platform_device *pdev) +{ + struct flashlight_platform_data *flashlight = pdev->dev.platform_data; + + flashlight_turn_off(); + hrtimer_cancel(&this_fl_str->timer); + unregister_early_suspend(&this_fl_str->early_suspend_flashlight); +#ifdef FLASHLIGHT_ADJ_FUNC + if (this_fl_str->gpio_flash_adj) { + device_remove_file(this_fl_str->fl_lcdev.dev, + &dev_attr_flash_adj); + } +#endif + led_classdev_unregister(&this_fl_str->fl_lcdev); + wake_lock_destroy(&this_fl_str->wake_lock); + flashlight_free_gpio(flashlight, this_fl_str); + + kfree(this_fl_str); + return 0; +} + +static struct platform_driver flashlight_driver = { + .probe = flashlight_probe, + .remove = flashlight_remove, + .driver = { + .name = FLASHLIGHT_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init flashlight_init(void) +{ + return platform_driver_register(&flashlight_driver); +} + +static void __exit flashlight_exit(void) +{ + platform_driver_unregister(&flashlight_driver); +} + +module_init(flashlight_init); +module_exit(flashlight_exit); + +MODULE_DESCRIPTION("flash light driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-supersonic-flashlight.h b/arch/arm/mach-msm/board-supersonic-flashlight.h new file mode 100644 index 0000000000000..08efd10298f50 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-flashlight.h @@ -0,0 +1,57 @@ +/* + * arch/arm/mach-msm/include/mach/msm_flashlight.h - The flashlight header + * Copyright (C) 2009 HTC Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#define __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#include + +#define FLASHLIGHT_NAME "flashlight" + +#define FLASHLIGHT_OFF 0 +#define FLASHLIGHT_TORCH 1 +#define FLASHLIGHT_FLASH 2 +#define FLASHLIGHT_NUM 3 + +enum flashlight_mode_flags { + FL_MODE_OFF = 0, + FL_MODE_TORCH, + FL_MODE_FLASH, + FL_MODE_PRE_FLASH, + FL_MODE_TORCH_LED_A, + FL_MODE_TORCH_LED_B, + FL_MODE_TORCH_LEVEL_1, + FL_MODE_TORCH_LEVEL_2, + FL_MODE_DEATH_RAY, +}; + +struct flashlight_platform_data { + void (*gpio_init) (void); + uint32_t torch; + uint32_t flash; + uint32_t flash_adj; + uint32_t flash_duration_ms; + uint8_t led_count; /* 0: 1 LED, 1: 2 LED */ +}; + +int flashlight_control(int mode); +int aat1271_flashlight_control(int mode); +int adp1650_flashlight_control(int mode); + +#undef __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#endif diff --git a/arch/arm/mach-msm/board-supersonic-keypad.c b/arch/arm/mach-msm/board-supersonic-keypad.c new file mode 100644 index 0000000000000..3132719c41d2c --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-keypad.c @@ -0,0 +1,96 @@ +/* arch/arm/mach-msm/board-supersonic-keypad.c + * + * Copyright (C) 2009 Google, Inc + * Copyright (C) 2009 HTC Corporation. + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "board-supersonic.h" + + +static struct gpio_event_direct_entry supersonic_keypad_nav_map[] = { + { SUPERSONIC_POWER_KEY, KEY_POWER }, + { SUPERSONIC_VOLUME_UP, KEY_VOLUMEUP }, + { SUPERSONIC_VOLUME_DOWN, KEY_VOLUMEDOWN }, +}; + +static struct gpio_event_input_info supersonic_keypad_nav_info = { + .info.func = gpio_event_input_func, + .flags = GPIOEDF_PRINT_KEYS, + .type = EV_KEY, + .keymap = supersonic_keypad_nav_map, + .debounce_time.tv.nsec = 20 * NSEC_PER_MSEC, + .keymap_size = ARRAY_SIZE(supersonic_keypad_nav_map) +}; + +static struct gpio_event_info *supersonic_keypad_info[] = { + &supersonic_keypad_nav_info.info, +}; + +static struct gpio_event_platform_data supersonic_keypad_data = { + .name = "supersonic-keypad", + .info = supersonic_keypad_info, + .info_count = ARRAY_SIZE(supersonic_keypad_info) +}; + +static struct platform_device supersonic_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &supersonic_keypad_data, + }, +}; + +static struct keyreset_platform_data supersonic_reset_keys_pdata = { + .keys_down = { + KEY_POWER, + KEY_VOLUMEUP, + KEY_VOLUMEDOWN, + 0 + }, +}; + +static struct platform_device supersonic_reset_keys_device = { + .name = KEYRESET_NAME, + .dev.platform_data = &supersonic_reset_keys_pdata, +}; + +static int __init supersonic_init_keypad(void) +{ + int ret; + + if (!machine_is_supersonic()) + return 0; + + if (platform_device_register(&supersonic_reset_keys_device)) + printk(KERN_WARNING "%s: register reset key fail\n", __func__); + + ret = platform_device_register(&supersonic_keypad_device); + if (ret != 0) + return ret; + + return 0; +} + +device_initcall(supersonic_init_keypad); + + diff --git a/arch/arm/mach-msm/board-supersonic-microp.c b/arch/arm/mach-msm/board-supersonic-microp.c new file mode 100644 index 0000000000000..c1b8f4def28d2 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-microp.c @@ -0,0 +1,437 @@ +/* arch/arm/mach-msm/board-supersonic-microp.c + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ +#ifdef CONFIG_MICROP_COMMON +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-supersonic.h" + +#define INT_PSENSOR (1<<4) + +static int misc_opened; +static unsigned int als_power_control; +static struct mutex als_power_lock; + +static void p_sensor_do_work(struct work_struct *w); +static DECLARE_WORK(p_sensor_work, p_sensor_do_work); + +struct wake_lock proximity_wake_lock; + +static struct capella_cm3602_data { + struct input_dev *input_dev; + struct capella_cm3602_platform_data *pdata; + int enabled; + struct workqueue_struct *p_sensor_wq; +} the_data; + +static int psensor_intr_enable(uint8_t enable) +{ + int ret; + uint8_t addr, data[2]; + + if (enable) + addr = MICROP_I2C_WCMD_GPI_INT_CTL_EN; + else + addr = MICROP_I2C_WCMD_GPI_INT_CTL_DIS; + + data[0] = INT_PSENSOR >> 8; + data[1] = INT_PSENSOR & 0xFF; + ret = microp_i2c_write(addr, data, 2); + if (ret < 0) + pr_err("%s: %s p-sensor interrupt failed\n", + __func__, (enable ? "enable" : "disable")); + + return ret; +} + +static int supersonic_microp_function_init(struct i2c_client *client) +{ + struct microp_i2c_platform_data *pdata; + struct microp_i2c_client_data *cdata; + uint8_t data[20]; + int i, j; + int ret; + + pdata = client->dev.platform_data; + cdata = i2c_get_clientdata(client); + + /* Headset remote key */ + ret = microp_function_check(client, MICROP_FUNCTION_REMOTEKEY); + if (ret >= 0) { + i = ret; + pdata->function_node[MICROP_FUNCTION_REMOTEKEY] = i; + cdata->int_pin.int_remotekey = + pdata->microp_function[i].int_pin; + + for (j = 0; j < 6; j++) { + data[j] = (uint8_t)(pdata->microp_function[i].levels[j] >> 8); + data[j + 6] = (uint8_t)(pdata->microp_function[i].levels[j]); + } + ret = microp_i2c_write(MICROP_I2C_WCMD_REMOTEKEY_TABLE, + data, 12); + if (ret) + goto exit; + } + + /* Reset button interrupt */ + ret = microp_write_interrupt(client, (1<<8), 1); + if (ret) + goto exit; + + /* HDMI interrupt */ + ret = microp_write_interrupt(client, (1 << 1), 1); + if (ret) + goto exit; + + return 0; + +exit: + return ret; +} + +static int report_psensor_data(void) +{ + int ret, ps_data = 0; + uint8_t data[3] = {0, 0, 0}; + + ret = microp_i2c_read(MICROP_I2C_RCMD_GPIO_STATUS, data, 3); + if (ret < 0) + pr_err("%s: read data failed\n", __func__); + else { + ps_data = (data[2] & 0x10) ? 1 : 0; + pr_info("proximity %s\n", ps_data ? "FAR" : "NEAR"); + + /* 0 is close, 1 is far */ + input_report_abs(the_data.input_dev, ABS_DISTANCE, ps_data); + input_sync(the_data.input_dev); + + wake_lock_timeout(&proximity_wake_lock, 2*HZ); + } + + return ret; +} + +static int capella_cm3602_enable(struct capella_cm3602_data *data) +{ + int rc; + pr_info("%s\n", __func__); + if (data->enabled) { + pr_info("%s: already enabled\n", __func__); + return 0; + } + + /* dummy report */ + input_report_abs(data->input_dev, ABS_DISTANCE, -1); + input_sync(data->input_dev); + + rc = data->pdata->power(PS_PWR_ON, 1); + if (rc < 0) + return -EIO; + + rc = gpio_direction_output(data->pdata->p_en, 0); + if (rc < 0) { + pr_err("%s: set psesnor enable failed!!", + __func__); + return -EIO; + } + msleep(220); + rc = psensor_intr_enable(1); + if (rc < 0) + return -EIO; + + data->enabled = 1; + report_psensor_data(); + + return rc; +} + +static int capella_cm3602_disable(struct capella_cm3602_data *data) +{ + int rc = -EIO; + pr_info("%s\n", __func__); + if (!data->enabled) { + pr_info("%s: already disabled\n", __func__); + return 0; + } + + rc = psensor_intr_enable(0); + if (rc < 0) + return -EIO; + + rc = gpio_direction_output(data->pdata->p_en, 1); + if (rc < 0) { + pr_err("%s: set GPIO failed!!", __func__); + return -EIO; + } + + rc = data->pdata->power(PS_PWR_ON, 0); + if (rc < 0) + return -EIO; + + data->enabled = 0; + return rc; +} + +static ssize_t capella_cm3602_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + + ret = sprintf(buf, "proximity enabled = %d\n", the_data.enabled); + + return ret; +} + +static ssize_t capella_cm3602_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count + ) +{ + ssize_t val; + + val = -1; + sscanf(buf, "%u", &val); + if (val < 0 || val > 1) + return -EINVAL; + + /* Enable capella_cm3602*/ + if (val == 1) + capella_cm3602_enable(&the_data); + + /* Disable capella_cm3602*/ + if (val == 0) + capella_cm3602_disable(&the_data); + + return count; +} + +static DEVICE_ATTR(proximity, 0644, capella_cm3602_show, capella_cm3602_store); + +static int capella_cm3602_open(struct inode *inode, struct file *file) +{ + pr_info("%s\n", __func__); + if (misc_opened) + return -EBUSY; + misc_opened = 1; + return 0; +} + +static int capella_cm3602_release(struct inode *inode, struct file *file) +{ + pr_info("%s\n", __func__); + misc_opened = 0; + return capella_cm3602_disable(&the_data); +} + +static long capella_cm3602_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int val; + pr_info("%s cmd %d\n", __func__, _IOC_NR(cmd)); + switch (cmd) { + case CAPELLA_CM3602_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) + return -EFAULT; + if (val) + return capella_cm3602_enable(&the_data); + else + return capella_cm3602_disable(&the_data); + break; + case CAPELLA_CM3602_IOCTL_GET_ENABLED: + return put_user(the_data.enabled, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + return -EINVAL; + } +} + +static void p_sensor_do_work(struct work_struct *w) +{ + report_psensor_data(); +} + +static irqreturn_t p_sensor_irq_handler(int irq, void *data) +{ + struct capella_cm3602_data *ip = data; + queue_work(ip->p_sensor_wq, &p_sensor_work); + + return IRQ_HANDLED; +} + +static struct file_operations capella_cm3602_fops = { + .owner = THIS_MODULE, + .open = capella_cm3602_open, + .release = capella_cm3602_release, + .unlocked_ioctl = capella_cm3602_ioctl +}; + +static struct miscdevice capella_cm3602_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "cm3602", + .fops = &capella_cm3602_fops +}; + +static int capella_cm3602_probe(struct platform_device *pdev) +{ + int rc = -1; + struct input_dev *input_dev; + struct capella_cm3602_data *ip; + struct capella_cm3602_platform_data *pdata; + + struct class *proximity_attr_class; + struct device *proximity_attr_dev; + + pr_info("%s: probe\n", __func__); + + pdata = pdev->dev.platform_data; + + ip = &the_data; + platform_set_drvdata(pdev, ip); + + input_dev = input_allocate_device(); + if (!input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + rc = -ENOMEM; + goto done; + } + ip->input_dev = input_dev; + ip->pdata = pdata; + input_set_drvdata(input_dev, ip); + + input_dev->name = "proximity"; + + set_bit(EV_ABS, input_dev->evbit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, 1, 0, 0); + + rc = input_register_device(input_dev); + if (rc < 0) { + pr_err("%s: could not register input device\n", __func__); + goto err_free_input_device; + } + + rc = misc_register(&capella_cm3602_misc); + if (rc < 0) { + pr_err("%s: could not register misc device\n", __func__); + goto err_unregister_input_device; + } + + wake_lock_init(&proximity_wake_lock, WAKE_LOCK_SUSPEND, "proximity"); + + proximity_attr_class = class_create(THIS_MODULE, "sensors"); + if (IS_ERR(proximity_attr_class)) { + pr_err("%s: class_create failed\n", __func__); + rc = PTR_ERR(proximity_attr_class); + proximity_attr_class = NULL; + goto err_create_class; + } + + proximity_attr_dev = device_create(proximity_attr_class, + NULL, 0, "%s", "proximity_sensor"); + if (unlikely(IS_ERR(proximity_attr_dev))) { + pr_err("%s: device create failed\n", __func__); + rc = PTR_ERR(proximity_attr_dev); + proximity_attr_dev = NULL; + goto err_create_proximity_attr_device; + } + + rc = device_create_file(proximity_attr_dev, &dev_attr_proximity); + if (rc) { + pr_err("%s: device_create_file failed\n", __func__); + goto err_create_proximity_device_file; + } + + ip->p_sensor_wq = create_workqueue("p-sensor_microp_wq"); + if (ip->p_sensor_wq == NULL) { + pr_err("%s: create_workqueue failed\n", __func__); + goto err_create_workqueue; + } + + rc = gpio_request(pdata->p_en, "gpio_proximity_en"); + if (rc < 0) { + pr_err("%s: gpio %d request failed (%d)\n", + __func__, pdata->p_en, rc); + goto err_request_proximity_en; + } + + rc = request_irq(pdata->p_out, p_sensor_irq_handler, + IRQF_TRIGGER_NONE, "p-sensor_microp", ip); + if (rc < 0) { + pr_err("%s: request_irq(%d) failed for (%d)\n", + __func__, pdata->p_out, rc); + goto err_request_proximity_irq; + } + + goto done; + +err_request_proximity_irq: + gpio_free(pdata->p_en); +err_request_proximity_en: + destroy_workqueue(ip->p_sensor_wq); +err_create_workqueue: + device_remove_file(proximity_attr_dev, &dev_attr_proximity); +err_create_proximity_device_file: + device_unregister(proximity_attr_dev); +err_create_proximity_attr_device: + class_destroy(proximity_attr_class); +err_create_class: + misc_deregister(&capella_cm3602_misc); +err_unregister_input_device: + input_unregister_device(input_dev); +err_free_input_device: + input_free_device(input_dev); +done: + return rc; +} + +static struct microp_ops ops = { + .init_microp_func = supersonic_microp_function_init, +}; + +void __init supersonic_microp_init(void) +{ + microp_register_ops(&ops); +} + +static struct platform_driver capella_cm3602_driver = { + .probe = capella_cm3602_probe, + .driver = { + .name = "supersonic_proximity", + .owner = THIS_MODULE + }, +}; + +static int __init supersonic_capella_cm3602_init(void) +{ + if (!machine_is_supersonic()) + return 0; + + return platform_driver_register(&capella_cm3602_driver); +} + +device_initcall(supersonic_capella_cm3602_init); + +#endif diff --git a/arch/arm/mach-msm/board-supersonic-mmc.c b/arch/arm/mach-msm/board-supersonic-mmc.c new file mode 100644 index 0000000000000..75a7041eaa5ed --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-mmc.c @@ -0,0 +1,666 @@ +/* linux/arch/arm/mach-msm/board-supersonic-mmc.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "board-supersonic.h" +#include "devices.h" +#include "proc_comm.h" + +#define DEBUG_SDSLOT_VDD 1 + +static bool opt_disable_sdcard; +static int __init supersonic_disablesdcard_setup(char *str) +{ + opt_disable_sdcard = (bool)simple_strtol(str, NULL, 0); + return 1; +} + +__setup("board_supersonic.disable_sdcard=", supersonic_disablesdcard_setup); + +static uint32_t sdcard_on_gpio_table[] = { + PCOM_GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static uint32_t sdcard_off_gpio_table[] = { + PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ +}; + +static struct vreg *sdslot_vreg; +static uint32_t sdslot_vdd = 0xffffffff; +static uint32_t sdslot_vreg_enabled; + +static struct { + int mask; + int level; +} mmc_vdd_table[] = { + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2900 }, +}; + +static uint32_t supersonic_sdslot_switchvdd(struct device *dev, unsigned int vdd) +{ + int i; + int ret; + + if (vdd == sdslot_vdd) + return 0; + + sdslot_vdd = vdd; + + if (vdd == 0) { +#if DEBUG_SDSLOT_VDD + printk(KERN_INFO "%s: Disabling SD slot power\n", __func__); +#endif + config_gpio_table(sdcard_off_gpio_table, + ARRAY_SIZE(sdcard_off_gpio_table)); + vreg_disable(sdslot_vreg); + sdslot_vreg_enabled = 0; + return 0; + } + + if (!sdslot_vreg_enabled) { + mdelay(5); + ret = vreg_enable(sdslot_vreg); + if (ret) + pr_err("%s: Error enabling vreg (%d)\n", __func__, ret); + udelay(500); + config_gpio_table(sdcard_on_gpio_table, + ARRAY_SIZE(sdcard_on_gpio_table)); + sdslot_vreg_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask != (1 << vdd)) + continue; + ret = vreg_set_level(sdslot_vreg, mmc_vdd_table[i].level); + if (ret) + pr_err("%s: Error setting level (%d)\n", __func__, ret); +#if DEBUG_SDSLOT_VDD + printk(KERN_INFO "%s: Setting level to %u (%s)\n", + __func__, mmc_vdd_table[i].level, + ret?"Failed":"Success"); +#endif + return 0; + } + + pr_err("%s: Invalid VDD (%d) specified\n", __func__, vdd); + return 0; +} + +static unsigned int supersonic_sdslot_status(struct device *dev) +{ + return (system_rev > 0)?1:!gpio_get_value(SUPERSONIC_GPIO_SDMC_CD_N); +} + +#define SUPERSONIC_MMC_VDD (MMC_VDD_28_29 | MMC_VDD_29_30) + +static unsigned int supersonic_sdslot_type = MMC_TYPE_SD; + +static struct msm_mmc_platform_data supersonic_sdslot_data = { + .ocr_mask = SUPERSONIC_MMC_VDD, + .status = supersonic_sdslot_status, + .translate_vdd = supersonic_sdslot_switchvdd, + .slot_type = &supersonic_sdslot_type, +}; + +/* ---- WIFI ---- */ + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_INPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_INPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_INPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_INPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_INPUT, GPIO_PULL_UP, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(152, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA), /* WLAN IRQ */ +}; + +/* BCM4329 returns wrong sdio_vsn(1) when we read cccr, + * we use predefined value (sdio_vsn=2) here to initial sdio driver well + */ +static struct embedded_sdio_data supersonic_wifi_emb_data = { + .cccr = { + .sdio_vsn = 2, + .multi_block = 1, + .low_speed = 0, + .wide_bus = 0, + .high_power = 1, + .high_speed = 1, + }, + .cis = { + .vendor = 0x02d0, + .device = 0x4329, + }, +}; + +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int +supersonic_wifi_status_register(void (*callback)(int card_present, void *dev_id), + void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static int supersonic_wifi_cd; /* WiFi virtual 'card detect' status */ + +static unsigned int supersonic_wifi_status(struct device *dev) +{ + return supersonic_wifi_cd; +} + +static struct msm_mmc_platform_data supersonic_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .built_in = 1, + .status = supersonic_wifi_status, + .register_status_notify = supersonic_wifi_status_register, + .embedded_sdio = &supersonic_wifi_emb_data, +}; + +int supersonic_wifi_set_carddetect(int val) +{ + printk(KERN_INFO "%s: %d\n", __func__, val); + supersonic_wifi_cd = val; + if (wifi_status_cb) + wifi_status_cb(val, wifi_status_cb_devid); + else + printk(KERN_WARNING "%s: Nobody to notify\n", __func__); + return 0; +} +EXPORT_SYMBOL(supersonic_wifi_set_carddetect); + +static int supersonic_wifi_power_state; + +int supersonic_wifi_power(int on) +{ + printk(KERN_INFO "%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + mdelay(50); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + } + + mdelay(100); + gpio_set_value(SUPERSONIC_GPIO_WIFI_SHUTDOWN_N, on); /* WIFI_SHUTDOWN */ + mdelay(200); + + supersonic_wifi_power_state = on; + return 0; +} + +static int supersonic_wifi_reset_state; + +int supersonic_wifi_reset(int on) +{ + printk(KERN_INFO "%s: do nothing\n", __func__); + supersonic_wifi_reset_state = on; + return 0; +} + + +/* ---------------- WiMAX GPIO Settings --------------- */ +static uint32_t wimax_power_pin_gpio_table[] = { + PCOM_GPIO_CFG(48, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), + PCOM_GPIO_CFG(106, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), + PCOM_GPIO_CFG(154, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), + PCOM_GPIO_CFG(155, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), + PCOM_GPIO_CFG(156, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA) +}; + +static uint32_t wimax_on_gpio_table[] = { + PCOM_GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT1 */ + PCOM_GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT0 */ + /*WiMax_Host_2*/ + PCOM_GPIO_CFG(159, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), +}; + +static uint32_t wimax_off_gpio_table[] = { + PCOM_GPIO_CFG(88, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT3 */ + PCOM_GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT2 */ + PCOM_GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT1 */ + PCOM_GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* DAT0 */ + /*WiMax_Host_2*/ + PCOM_GPIO_CFG(159, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), +}; + + +static void (*wimax_status_cb)(int card_present, void *dev_id); +static void *wimax_status_cb_devid; +static int mmc_wimax_cd = 0; +static int mmc_wimax_sdio_status = 0; +static int mmc_wimax_netlog_status = 0; +static int mmc_wimax_sdio_interrupt_log_status = 0; +static int mmc_wimax_netlog_withraw_status = 0; +static int mmc_wimax_cliam_host_status = 0; +static int mmc_wimax_busclk_pwrsave = 1; // Default is dynamic CLK OFF +static int mmc_wimax_CMD53_timeout_trigger_counter = 0; +static int mmc_wimax_hostwakeup_gpio = 40; // GPIO40 +static int mmc_wimax_thp_log_status = 0; +static int mmc_wimax_sdio_hw_reset = 0; // Rollback to default disabled HW RESET +static int mmc_wimax_packet_filter = 0; + +static int supersonic_wimax_status_register(void (*callback)(int card_present, void *dev_id), void *dev_id) +{ + if (wimax_status_cb) + return -EAGAIN; + printk("%s\n", __func__); + wimax_status_cb = callback; + wimax_status_cb_devid = dev_id; + return 0; +} + +static unsigned int supersonic_wimax_status(struct device *dev) +{ + printk("%s\n", __func__); + return mmc_wimax_cd; +} + +void mmc_wimax_set_carddetect(int val) +{ + printk("%s: %d\n", __func__, val); + mmc_wimax_cd = val; + if (wimax_status_cb) { + wimax_status_cb(val, wimax_status_cb_devid); + } else + printk(KERN_WARNING "%s: Nobody to notify\n", __func__); +} +EXPORT_SYMBOL(mmc_wimax_set_carddetect); + +static unsigned int supersonic_wimax_type = MMC_TYPE_SDIO_WIMAX; + +static struct msm_mmc_platform_data supersonic_wimax_data = { + .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30, + .built_in = 1, + .status = supersonic_wimax_status, + .register_status_notify = supersonic_wimax_status_register, + .embedded_sdio = NULL, + .slot_type = &supersonic_wimax_type, +}; + +struct _vreg +{ + const char *name; + unsigned id; +}; + + +/* 2 : wimax UART, 1 : CPU uart, 0 : usb +CPU_WIMAX_SW -> GPIO160 (SUPERSONIC_WIMAX_CPU_UARTz_SW) +USB_UART#_SW -> GPIO33 (SUPERSONIC_USB_UARTz_SW) + +XA : GPIO33 = 0 -> USB + GPIO33 = 1 -> CPU UART + +XB : GPIO33 = 0 -> USB + GPIO33 = 1 , GPIO160 = 0 -> CPU UART + GPIO33 = 1 , GPIO160 = 1 -> Wimax UART +*/ +int wimax_uart_switch = 0; +int mmc_wimax_uart_switch(int uart) +{ + printk("%s uart:%d\n", __func__, uart); + wimax_uart_switch = uart; + + gpio_set_value(SUPERSONIC_USB_UARTz_SW, uart?1:0); + if(system_rev && uart) + gpio_set_value(SUPERSONIC_WIMAX_CPU_UARTz_SW, uart==2?1:0); + return uart?1:0; +} +EXPORT_SYMBOL(mmc_wimax_uart_switch); + +int mmc_wimax_get_uart_switch(void) +{ + printk("%s uart:%d\n", __func__, wimax_uart_switch); + return wimax_uart_switch?1:0; +} +EXPORT_SYMBOL(mmc_wimax_get_uart_switch); + +static int supersonic_wimax_power_state; + + +int mmc_wimax_power(int on) +{ + printk("%s\n", __func__); + + if (on) { + /*Power ON sequence*/ + gpio_set_value(154, 1); + gpio_set_value(48, 1); + mdelay(5); + gpio_set_value(106, 0); + gpio_set_value(156, 1); + gpio_set_value(155, 1); + mdelay(5); + gpio_set_value(106, 1); + mdelay(1150); + + config_gpio_table(wimax_on_gpio_table, + ARRAY_SIZE(wimax_on_gpio_table)); + } else { + /*Power OFF sequence*/ + config_gpio_table(wimax_off_gpio_table, + ARRAY_SIZE(wimax_off_gpio_table)); + gpio_set_value(88, 0); /*WiMax_SDIO_CLK_1 OL*/ + gpio_set_value(159, 0); /*WiMax_Host_2 OL*/ + + gpio_set_value(106, 1); + mdelay(5); + gpio_set_value(156, 0); + gpio_set_value(155, 0); + gpio_set_value(106, 0); + mdelay(5); + gpio_set_value(154, 0); + gpio_set_value(48, 0); + mdelay(5); + + } +supersonic_wimax_power_state = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_power); + +int mmc_wimax_set_status(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_sdio_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_status); + +int mmc_wimax_get_status(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_sdio_status); + return mmc_wimax_sdio_status; +} +EXPORT_SYMBOL(mmc_wimax_get_status); + +int mmc_wimax_set_cliam_host_status(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_cliam_host_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_cliam_host_status); + +int mmc_wimax_get_cliam_host_status(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_sdio_status); + return mmc_wimax_cliam_host_status; +} +EXPORT_SYMBOL(mmc_wimax_get_cliam_host_status); + +int mmc_wimax_set_netlog_status(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_netlog_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_netlog_status); + +int mmc_wimax_get_netlog_status(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_sdio_status); + return mmc_wimax_netlog_status; +} +EXPORT_SYMBOL(mmc_wimax_get_netlog_status); + +int mmc_wimax_set_netlog_withraw_status(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_netlog_withraw_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_netlog_withraw_status); + +int mmc_wimax_get_netlog_withraw_status(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_netlog_withraw_status); + return mmc_wimax_netlog_withraw_status; +} +EXPORT_SYMBOL(mmc_wimax_get_netlog_withraw_status); + +int mmc_wimax_set_sdio_interrupt_log(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_sdio_interrupt_log_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_sdio_interrupt_log); + +int mmc_wimax_get_sdio_interrupt_log(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_sdio_interrupt_log_status); + return mmc_wimax_sdio_interrupt_log_status; +} +EXPORT_SYMBOL(mmc_wimax_get_sdio_interrupt_log); + +int mmc_wimax_set_packet_filter(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_packet_filter = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_packet_filter); + +int mmc_wimax_get_packet_filter(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_packet_filter); + return mmc_wimax_packet_filter; +} +EXPORT_SYMBOL(mmc_wimax_get_packet_filter); + +int mmc_wimax_set_thp_log(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_thp_log_status = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_thp_log); + +int mmc_wimax_get_thp_log(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_thp_log_status); + return mmc_wimax_thp_log_status; +} +EXPORT_SYMBOL(mmc_wimax_get_thp_log); + +int mmc_wimax_set_busclk_pwrsave(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_busclk_pwrsave = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_busclk_pwrsave); + +int mmc_wimax_get_busclk_pwrsave(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_busclk_pwrsave); + return mmc_wimax_busclk_pwrsave; +} +EXPORT_SYMBOL(mmc_wimax_get_busclk_pwrsave); + +int mmc_wimax_set_sdio_hw_reset(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_sdio_hw_reset = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_sdio_hw_reset); + +int mmc_wimax_get_sdio_hw_reset(void) +{ + //printk(KERN_INFO "%s status:%d\n", __func__, mmc_wimax_sdio_hw_reset); + return mmc_wimax_sdio_hw_reset; +} +EXPORT_SYMBOL(mmc_wimax_get_sdio_hw_reset); + +int mmc_wimax_set_CMD53_timeout_trigger_counter(int counter) +{ + printk(KERN_INFO "%s counter:%d\n", __func__, counter); + mmc_wimax_CMD53_timeout_trigger_counter = counter; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_CMD53_timeout_trigger_counter); + +int mmc_wimax_get_CMD53_timeout_trigger_counter(void) +{ + //printk(KERN_INFO "%s counter:%d\n", __func__, mmc_wimax_CMD53_timeout_trigger_counter); + return mmc_wimax_CMD53_timeout_trigger_counter; +} +EXPORT_SYMBOL(mmc_wimax_get_CMD53_timeout_trigger_counter); + +int mmc_wimax_get_hostwakeup_gpio(void) +{ + return mmc_wimax_hostwakeup_gpio; +} +EXPORT_SYMBOL(mmc_wimax_get_hostwakeup_gpio); + +static int mmc_wimax_is_gpio_irq_enabled = 0; + +int mmc_wimax_set_gpio_irq_enabled(int on) +{ + printk(KERN_INFO "%s on:%d\n", __func__, on); + mmc_wimax_is_gpio_irq_enabled = on; + return 0; +} +EXPORT_SYMBOL(mmc_wimax_set_gpio_irq_enabled); + +int mmc_wimax_get_gpio_irq_enabled(void) +{ + return mmc_wimax_is_gpio_irq_enabled; +} +EXPORT_SYMBOL(mmc_wimax_get_gpio_irq_enabled); + +void mmc_wimax_enable_host_wakeup(int on) +{ + if (mmc_wimax_sdio_status) + { + if (on) { + if (!mmc_wimax_is_gpio_irq_enabled) { + printk("set GPIO%d as waketup source\n", mmc_wimax_get_hostwakeup_gpio()); + enable_irq(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio())); + enable_irq_wake(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio())); + mmc_wimax_is_gpio_irq_enabled = 1; + } + } + else { + if (mmc_wimax_is_gpio_irq_enabled) { + printk("disable GPIO%d wakeup source\n", mmc_wimax_get_hostwakeup_gpio()); + disable_irq_wake(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio())); + disable_irq_nosync(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio())); + mmc_wimax_is_gpio_irq_enabled = 0; + } + } + } + else { + printk("%s mmc_wimax_sdio_status is OFF\n", __func__); + } +} +EXPORT_SYMBOL(mmc_wimax_enable_host_wakeup); + +int __init supersonic_init_mmc(unsigned int sys_rev) +{ + uint32_t id; + + printk(KERN_INFO "%s()+\n", __func__); + + /* initial WIFI_SHUTDOWN# */ + id = PCOM_GPIO_CFG(SUPERSONIC_GPIO_WIFI_SHUTDOWN_N, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + gpio_set_value(SUPERSONIC_GPIO_WIFI_SHUTDOWN_N, 0); + + msm_add_sdcc(1, &supersonic_wifi_data, 0, 0); + + /* Initial WiMAX */ + printk("config wimax power gpio table\n"); + config_gpio_table(wimax_power_pin_gpio_table, + ARRAY_SIZE(wimax_power_pin_gpio_table)); + + msm_add_sdcc(3, &supersonic_wimax_data,0,0); + + if (opt_disable_sdcard) { + pr_info("%s: sdcard disabled on cmdline\n", __func__); + goto done; + } + + sdslot_vreg_enabled = 0; + + sdslot_vreg = vreg_get(0, "gp6"); + if (IS_ERR(sdslot_vreg)) + return PTR_ERR(sdslot_vreg); + + if (system_rev == 0) { /* XA board */ + set_irq_wake(MSM_GPIO_TO_INT(SUPERSONIC_GPIO_SDMC_CD_N), 1); + + msm_add_sdcc(2, &supersonic_sdslot_data, + MSM_GPIO_TO_INT(SUPERSONIC_GPIO_SDMC_CD_N), + IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE); + } else + msm_add_sdcc(2, &supersonic_sdslot_data, 0, 0); + +done: + printk(KERN_INFO "%s()-\n", __func__); + return 0; +} diff --git a/arch/arm/mach-msm/board-supersonic-panel.c b/arch/arm/mach-msm/board-supersonic-panel.c new file mode 100644 index 0000000000000..e9702419f1733 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-panel.c @@ -0,0 +1,928 @@ +/* linux/arch/arm/mach-msm/board-supersonic-panel.c + * + * Copyright (C) 2008 HTC Corporation. + * Author: Jay Tu + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +/* #include */ + +#include "board-supersonic.h" +#include "devices.h" +#include "proc_comm.h" + +#if 1 +#define B(s...) printk(s) +#else +#define B(s...) do {} while(0) +#endif +extern int panel_type; +enum { + PANEL_SHARP, + PANEL_AUO, +}; + +static struct cabc_t { + struct led_classdev lcd_backlight; + struct msm_mddi_client_data *client_data; + struct mutex lock; + unsigned long status; +} cabc; + +enum { + GATE_ON = 1 << 0, +}; +static struct vreg *vreg_lcd_2v8; +static struct vreg *vreg_lcd_1v8; + +#define REG_WAIT (0xffff) +struct nov_regs { + unsigned reg; + unsigned val; +} nov_init_seq[] = { + {0xc000, 0x86}, + {0xc001, 0x00}, + {0xc002, 0x86}, + {0xc003, 0x00}, + {0xc100, 0x40}, + {0xc200, 0x02}, + {0xc202, 0x32}, + {0xe000, 0x0e}, + {0xe001, 0x34}, + {0xe002, 0x3F}, + {0xe003, 0x49}, + {0xe004, 0x1D}, + {0xe005, 0x2C}, + {0xe006, 0x5F}, + {0xe007, 0x3A}, + {0xe008, 0x20}, + {0xe009, 0x28}, + {0xe00a, 0x80}, + {0xe00b, 0x13}, + {0xe00c, 0x32}, + {0xe00d, 0x56}, + {0xe00e, 0x79}, + {0xe00f, 0xB8}, + {0xe010, 0x55}, + {0xe011, 0x57}, + {0xe100, 0x0e}, + {0xe101, 0x34}, + {0xe102, 0x3F}, + {0xe103, 0x49}, + {0xe104, 0x1D}, + {0xe105, 0x2C}, + {0xe106, 0x5F}, + {0xe107, 0x3A}, + {0xe108, 0x20}, + {0xe109, 0x28}, + {0xe10a, 0x80}, + {0xe10b, 0x13}, + {0xe10c, 0x32}, + {0xe10d, 0x56}, + {0xe10e, 0x79}, + {0xe10f, 0xB8}, + {0xe110, 0x55}, + {0xe111, 0x57}, + + {0xe200, 0x0E}, + {0xe201, 0x34}, + {0xe202, 0x3F}, + {0xe203, 0x49}, + {0xe204, 0x1D}, + {0xe205, 0x2C}, + {0xe206, 0x5F}, + {0xe207, 0x3A}, + {0xe208, 0x20}, + {0xe209, 0x28}, + {0xe20A, 0x80}, + {0xe20B, 0x13}, + {0xe20C, 0x32}, + {0xe20D, 0x56}, + {0xe20E, 0x79}, + {0xe20F, 0xB8}, + {0xe210, 0x55}, + {0xe211, 0x57}, + + {0xe300, 0x0E}, + {0xe301, 0x34}, + {0xe302, 0x3F}, + {0xe303, 0x49}, + {0xe304, 0x1D}, + {0xe305, 0x2C}, + {0xe306, 0x5F}, + {0xe307, 0x3A}, + {0xe308, 0x20}, + {0xe309, 0x28}, + {0xe30A, 0x80}, + {0xe30B, 0x13}, + {0xe30C, 0x32}, + {0xe30D, 0x56}, + {0xe30E, 0x79}, + {0xe30F, 0xB8}, + {0xe310, 0x55}, + {0xe311, 0x57}, + {0xe400, 0x0E}, + {0xe401, 0x34}, + {0xe402, 0x3F}, + {0xe403, 0x49}, + {0xe404, 0x1D}, + {0xe405, 0x2C}, + {0xe406, 0x5F}, + {0xe407, 0x3A}, + {0xe408, 0x20}, + {0xe409, 0x28}, + {0xe40A, 0x80}, + {0xe40B, 0x13}, + {0xe40C, 0x32}, + {0xe40D, 0x56}, + {0xe40E, 0x79}, + {0xe40F, 0xB8}, + {0xe410, 0x55}, + {0xe411, 0x57}, + {0xe500, 0x0E}, + {0xe501, 0x34}, + {0xe502, 0x3F}, + {0xe503, 0x49}, + {0xe504, 0x1D}, + {0xe505, 0x2C}, + {0xe506, 0x5F}, + {0xe507, 0x3A}, + {0xe508, 0x20}, + {0xe509, 0x28}, + {0xe50A, 0x80}, + {0xe50B, 0x13}, + {0xe50C, 0x32}, + {0xe50D, 0x56}, + {0xe50E, 0x79}, + {0xe50F, 0xB8}, + {0xe510, 0x55}, + {0xe511, 0x57}, + + {0x3a00, 0x05}, + + /* cabc */ + {0x4e00, 0x00}, + {0x5e00, 0x00}, + {0x6a01, 0x00}, + {0x6a02, 0x03}, + {0x5100, 0xff}, + {0x5301, 0x10}, + {0x6A18, 0xff}, + {0x6A17, 0x01}, + {0xF402, 0x14}, + + {0x3500, 0x00}, + {0x1100, 0x0}, + {REG_WAIT, 120}, +}; + +struct s1d_regs { + unsigned reg; + unsigned val; +} s1d13775_init_seq[] = { + {0x001C, 0x1500}, + {0x0020, 0x3043}, + {0x0024, 0x401A}, + {0x0028, 0x031A}, + {0x002C, 0x0001}, + {REG_WAIT, 0x0004}, /* increase delay 1ms -> 4ms */ + {0x0084, 0x0215}, + {0x0088, 0x0038}, + {0x008C, 0x2113}, + {0x002C, 0x0002}, + {REG_WAIT, 0x0004}, /* increase delay 1ms -> 4ms */ + {0x002C, 0x0003}, + {0x0100, 0x3702}, + {0x0104, 0x0180}, + {0x0140, 0x003F}, + {0x0144, 0x00EF}, + {0x0148, 0x0016}, + {0x014C, 0x0005}, + {0x0150, 0x0006}, + {0x0154, 0x032B}, + {0x0158, 0x031F}, + {0x015C, 0x0009}, + {0x0160, 0x0002}, + {0x0164, 0x0003}, + {0x0168, 0x00A2}, + {0x0180, 0x0057}, + {0x0184, 0x00DB}, + {0x0188, 0x00E3}, + {0x018C, 0x0000}, + {0x0190, 0x0000}, + {0x0280, 0x0000}, + {0x0284, 0x0002}, + {0x0288, 0x0000}, + {0x028C, 0x0001}, + {0x0294, 0x0000}, + {0x0400, 0x8000}, + {0x0404, 0x10C8}, + {0x0480, 0x0001}, + {0x0500, 0x0000}, + {0x0504, 0x0011}, + {0x0508, 0x0000}, + {0x0510, 0x0000}, + {0x0518, 0x002E}, + {0x051C, 0x00c7}, + {0x0520, 0x01DF}, + {0x0524, 0x031f}, + {0x0528, 0x0000}, + {0x052C, 0x0000}, + {0x0530, 0x0000}, + {0x0534, 0x0000}, + + {0x0604, 0x0108}, + {0x060C, 0x0000}, + {0x0610, 0x00ff}, + + {0x0648, 0x0020}, + {0x0800, 0x0000}, + {0x0804, 0x000A}, + {0x0808, 0x0400}, + {0x080C, 0x0400}, + {0x0814, 0x0000}, + {0x081C, 0x0000}, + {0x0824, 0x002E}, + {0x0828, 0x00C7}, + {0x082C, 0x01DF}, + {0x0830, 0x031F}, + {0x0834, 0x0000}, + {0x0838, 0x0000}, + {0x083C, 0x0000}, + {0x0840, 0x0000}, + {0x0844, 0x01DF}, + {0x0848, 0x031F}, + {0x0870, 0x0064}, + {0x0874, 0x0064}, + {0x0878, 0x00C7}, + {0x087C, 0x00C7}, + {0x1410, 0x0004}, + {0x1414, 0x00FF}, + {0x1420, 0x0000}, + {0x1424, 0x0000}, + {0x1428, 0x01DF}, + {0x142C, 0x031F}, + {0x1430, 0xDC00}, + {0x1434, 0x0005}, + {0x1440, 0x0000}, + {0x1444, 0x0000}, + {0x1448, 0x01DF}, + {0x144C, 0x031F}, + {0x1450, 0x0000}, + {0x1454, 0x0000}, + {0x1458, 0x01DF}, + {0x145C, 0x031F}, + {0x1460, 0x0000}, + {0x1464, 0x0000}, + {0x1468, 0x01DF}, + {0x146C, 0x031F}, + {0x1470, 0x0000}, + {0x1474, 0x0000}, + {0x1478, 0x01DF}, + {0x147C, 0x031F}, + {0x14A4, 0x0110}, + {0x14A8, 0xAFC8}, + {0x14AC, 0x0FF0}, + {0x14B0, 0x0202}, + {0x14B4, 0x0080}, + {0x14A0, 0x0002}, + {0x1508, 0x0000}, + {0x150C, 0x0000}, + {0x1510, 0x0000}, + {0x1514, 0x0000}, + {0x1520, 0x0000}, + {0x1524, 0x0000}, + {0x1528, 0x0000}, + {0x152C, 0x0000}, + {0x1530, 0x0000}, + {0x1534, 0x0000}, + {0x1538, 0x0000}, + {0x153C, 0x0000}, + {0x1540, 0x0000}, + {0x1544, 0x0000}, + {0x1548, 0x0000}, + {0x154C, 0x0000}, + {0x1550, 0x0000}, + {0x1554, 0x0000}, + {0x1558, 0x0000}, + {0x1600, 0x0000}, + {0x1604, 0x0020}, + {0x1608, 0x0040}, + {0x160C, 0x0060}, + {0x1610, 0x0080}, + {0x1614, 0x00A0}, + {0x1618, 0x00C0}, + {0x161C, 0x00E0}, + {0x1620, 0x0100}, + {0x1624, 0x0000}, + {0x1628, 0x0020}, + {0x162C, 0x0040}, + {0x1630, 0x0060}, + {0x1634, 0x0080}, + {0x1638, 0x00A0}, + {0x163C, 0x00C0}, + {0x1640, 0x00E0}, + {0x1644, 0x0100}, + {0x1648, 0x0000}, + {0x164C, 0x0020}, + {0x1650, 0x0040}, + {0x1654, 0x0060}, + {0x1658, 0x0080}, + {0x165C, 0x00A0}, + {0x1660, 0x00C0}, + {0x1664, 0x00E0}, + {0x1668, 0x0100}, + {0x1680, 0x0000}, + {0x1684, 0x0000}, + {0x1688, 0x0000}, + {0x168C, 0x0000}, + {0x1694, 0x0000}, + {0x16A0, 0x0000}, + {0x16A4, 0x0000}, + {0x16A8, 0x0000}, + {0x16AC, 0x0000}, + {0x16B4, 0x0000}, + {0x16C0, 0x0000}, + {0x16C4, 0x0000}, + {0x16C8, 0x0000}, + {0x16CC, 0x0000}, + {0x16D4, 0x0000}, + {0x16E0, 0x0000}, + {0x16E4, 0x0000}, + {0x16E8, 0x0000}, + {0x16EC, 0x0000}, + {0x16F4, 0x0000}, + {0x1700, 0x0000}, + {0x1704, 0x0000}, + {0x1708, 0x0000}, + {0x170C, 0x0000}, + {0x1714, 0x0000}, + {0x1720, 0x0000}, + {0x1724, 0x0000}, + {0x1728, 0x0000}, + {0x172C, 0x0000}, + {0x1734, 0x0000}, + {0x1740, 0x0000}, + {0x1744, 0x0000}, + {0x1748, 0x0000}, + {0x174C, 0x0000}, + {0x1754, 0x0000}, + {0x1760, 0x0000}, + {0x1764, 0x0000}, + {0x1768, 0x0000}, + {0x176C, 0x0000}, + {0x1774, 0x0000}, + {0x0300, 0x7000}, + {0x0304, 0x0000}, + {0x0308, 0x0000}, + {0x030C, 0x0000}, + {0x0310, 0x0000}, + {0x0314, 0x0000}, + {0x0318, 0xF7FF}, + {0x031C, 0xFFFF}, + {0x0320, 0x000F}, + {0x0324, 0x0000}, + {0x0328, 0x0000}, + {0x032C, 0x0000}, +}; + +struct s1d_regs pwm_seq[] = { + {0x001C, 0x0010}, + {0x14A0, 0x0001}, + {0x14A4, 0x0110}, + {0x14B0, 0x3030}, + {0x14A8, 0x09C4}, + {0x14AC, 0x0FF0}, +}; +extern int qspi_send_9bit(unsigned char id, unsigned data); +extern int qspi_send_16bit(unsigned char id, unsigned data); + +static void suc_set_brightness(struct led_classdev *led_cdev, + enum led_brightness val) +{ + struct msm_mddi_client_data *client = cabc.client_data; + unsigned int shrink_br = val; + + printk(KERN_DEBUG "set brightness = %d\n", val); + if (test_bit(GATE_ON, &cabc.status) == 0) + return; + + if (val < 30) + shrink_br = 5; + else if ((val >= 30) && (val <= 143)) + shrink_br = 104 * (val - 30) / 113 + 5; + else + shrink_br = 145 * (val - 144) / 111 + 110; + mutex_lock(&cabc.lock); + if (panel_type == PANEL_SHARP) { + int i, reg, val; + for (i = 0; i < ARRAY_SIZE(pwm_seq); i++) { + reg = pwm_seq[i].reg; + val = pwm_seq[i].val; + if (reg == REG_WAIT) + msleep(val); + else + client->remote_write(client, cpu_to_le32(val), reg); + } + client->remote_write(client, shrink_br, 0x14B4); + } else { + qspi_send_16bit(0x1, 0x55); + qspi_send_16bit(0x0, 0x00); + qspi_send_16bit(0x2, 0x00); + + qspi_send_16bit(0x1, 0x51); + qspi_send_16bit(0x0, 0x00); + qspi_send_16bit(0x2, shrink_br); + } + mutex_unlock(&cabc.lock); +} + +static enum led_brightness +suc_get_brightness(struct led_classdev *led_cdev) +{ + struct msm_mddi_client_data *client = cabc.client_data; + if (panel_type == PANEL_SHARP) + return client->remote_read(client, 0x14B4); + else + return client->remote_read(client, 0x5100); +} + +#define DEFAULT_BRIGHTNESS 100 +static void suc_backlight_switch(int on) +{ + enum led_brightness val; + + if (on) { + printk(KERN_DEBUG "turn on backlight\n"); + set_bit(GATE_ON, &cabc.status); + val = cabc.lcd_backlight.brightness; + + /* LED core uses get_brightness for default value + * If the physical layer is not ready, we should + * not count on it */ + if (val == 0) + val = DEFAULT_BRIGHTNESS; + suc_set_brightness(&cabc.lcd_backlight, val); + } else { + clear_bit(GATE_ON, &cabc.status); + suc_set_brightness(&cabc.lcd_backlight, 0); + } +} + +static int suc_backlight_probe(struct platform_device *pdev) +{ + int err = -EIO; + + mutex_init(&cabc.lock); + cabc.client_data = pdev->dev.platform_data; + cabc.lcd_backlight.name = "lcd-backlight"; + cabc.lcd_backlight.brightness_set = suc_set_brightness; + cabc.lcd_backlight.brightness_get = suc_get_brightness; + err = led_classdev_register(&pdev->dev, &cabc.lcd_backlight); + if (err) + goto err_register_lcd_bl; + return 0; + +err_register_lcd_bl: + led_classdev_unregister(&cabc.lcd_backlight); + return err; +} + +/* ------------------------------------------------------------------- */ + +static struct resource resources_msm_fb[] = { + { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static int +supersonic_mddi_init(struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int i = 0, ret; + unsigned reg, val; + + if (panel_type == PANEL_SHARP) { + client_data->auto_hibernate(client_data, 0); + for (i = 0; i < ARRAY_SIZE(s1d13775_init_seq); i++) { + reg = s1d13775_init_seq[i].reg; + val = s1d13775_init_seq[i].val; + if (reg == REG_WAIT) + msleep(val); + else + client_data->remote_write(client_data, cpu_to_le32(val), reg); + } + client_data->auto_hibernate(client_data, 1); + + struct spi_cmd { + unsigned char reg; + unsigned char val; + unsigned int delay; + } sharp_spi[] = { + {0x0, 0x11, 100}, + + {0x0, 0xB9, 0}, + {0x1, 0xFF, 0}, + {0x1, 0x83, 0}, + {0x1, 0x63, 0}, + + {0x0, 0x3A, 0}, + {0x1, 0x50, 0}, + }; + + /* FIXME */ + + for (i = 0; i < ARRAY_SIZE(sharp_spi); i++) { + ret = qspi_send_9bit(sharp_spi[i].reg, sharp_spi[i].val); + if (ret < 0) + printk("%s: spi_write fail!\n", __func__); + else if (sharp_spi[i].delay) + msleep(sharp_spi[i].delay); + } + } + else { + client_data->auto_hibernate(client_data, 0); + for (i = 0; i < ARRAY_SIZE(nov_init_seq); i++) { + reg = cpu_to_le32(nov_init_seq[i].reg); + val = cpu_to_le32(nov_init_seq[i].val); + if (reg == REG_WAIT) + msleep(val); + else + client_data->remote_write(client_data, val, reg); + } + client_data->auto_hibernate(client_data, 1); + } + return 0; +} + +static int +supersonic_mddi_uninit(struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + if (panel_type == PANEL_SHARP) { + int i, ret; + struct spi_cmd { + unsigned char reg; + unsigned char val; + unsigned int delay; + } sharp_spi[] = { + {0x0, 0x28, 0}, + {0x0, 0x10, 100}, + }; + + /* FIXME */ + + for (i = 0; i < ARRAY_SIZE(sharp_spi); i++) { + ret = qspi_send_9bit(sharp_spi[i].reg, sharp_spi[i].val); + if (ret < 0) + printk("%s: spi_write fail!\n", __func__); + else if (sharp_spi[i].delay) + msleep(sharp_spi[i].delay); + } + } + else + client_data->remote_write(client_data, 0, 0x2800); + + return 0; +} + +/* FIXME: remove after XA03 */ +static int backlight_control(int on) +{ + struct i2c_adapter *adap = i2c_get_adapter(0); + struct i2c_msg msg; + u8 buf[] = {0x90, 0x00, 0x00, 0x08}; + int ret = -EIO, max_retry = 3; + + msg.addr = 0xcc >> 1; + msg.flags = 0; + msg.len = sizeof(buf); + msg.buf = buf; + + if (on == 0) + buf[0] = 0x91; + + while (max_retry--) { + ret = i2c_transfer(adap, &msg, 1); + if (ret != 1) + msleep(1); + else { + ret = 0; + break; + } + ret = -EIO; + } + + if (ret) + printk(KERN_ERR "backlight control fail\n"); + return 0; +} + +static int +supersonic_panel_blank(struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + B(KERN_DEBUG "%s\n", __func__); + suc_backlight_switch(LED_OFF); + backlight_control(0); + return 0; +} + +static int +supersonic_panel_unblank(struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + B(KERN_DEBUG "%s\n", __func__); + if (panel_type == PANEL_AUO) { + suc_backlight_switch(LED_FULL); + client_data->remote_write(client_data, 0x01, 0xB101); + client_data->remote_write(client_data, 0x82, 0xB102); + client_data->remote_write(client_data, 0x5A, 0xB107); + client_data->remote_write(client_data, 0x00, 0x4400); + client_data->remote_write(client_data, 0xC8, 0x4401); + client_data->remote_write(client_data, 0x00, 0x2900); + msleep(100); + client_data->remote_write(client_data, 0x24, 0x5300); + } else { + suc_backlight_switch(LED_FULL); + client_data->remote_write(client_data, 0x3043, 0x0020); + client_data->remote_write(client_data, 0x10C8, 0x0404); + client_data->remote_write(client_data, 0x4000, 0x0600); + msleep(10); + qspi_send_9bit(0x0, 0x29); + client_data->remote_write(client_data, 0x7000, 0x0324); + client_data->remote_write(client_data, 0x4000, 0x0600); + } + + backlight_control(1); + return 0; +} + +static struct msm_mddi_bridge_platform_data novatec_client_data = { + .init = supersonic_mddi_init, + .uninit = supersonic_mddi_uninit, + .blank = supersonic_panel_blank, + .unblank = supersonic_panel_unblank, + .fb_data = { + .xres = 480, + .yres = 800, + .width = 48, + .height = 80, + .output_format = 0, + }, + .panel_conf = { + .caps = MSMFB_CAP_CABC, + }, +}; + +static struct msm_mddi_bridge_platform_data epson_client_data = { + .init = supersonic_mddi_init, + .uninit = supersonic_mddi_uninit, + .blank = supersonic_panel_blank, + .unblank = supersonic_panel_unblank, + .fb_data = { + .xres = 480, + .yres = 800, + .width = 48, + .height = 80, + .output_format = 0, + }, + .panel_conf = { + .caps = MSMFB_CAP_CABC, + }, +}; + + +#define SPI_CLK 17 +#define SPI_DO 18 +#define SPI_DI 19 +#define SPI_CS 20 + +#define LCM_GPIO_CFG(gpio, func) \ +PCOM_GPIO_CFG(gpio, func, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_16MA) +static uint32_t spi_on_gpio_table[] = { + LCM_GPIO_CFG(SPI_CLK, 1), + LCM_GPIO_CFG(SPI_CS, 1), + LCM_GPIO_CFG(SPI_DO, 1), + PCOM_GPIO_CFG(SPI_DI, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_16MA), +}; + +static uint32_t spi_off_gpio_table[] = { + LCM_GPIO_CFG(SPI_CLK, 0), + LCM_GPIO_CFG(SPI_CS, 0), + LCM_GPIO_CFG(SPI_DO, 0), + PCOM_GPIO_CFG(SPI_DI, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_16MA), +}; + +static int spi_gpio_switch(int on) +{ + config_gpio_table( + !!on ? spi_on_gpio_table : spi_off_gpio_table, + ARRAY_SIZE(spi_on_gpio_table)); + + return 0; +} + +static void +mddi_novatec_power(struct msm_mddi_client_data *client_data, int on) +{ + unsigned id, on_off = 1; + + B(KERN_DEBUG "%s: power %s.\n", __func__, on ? "on" : "off"); + + if (on) { + on_off = 1; + /* 2V8 */ + id = PM_VREG_PDOWN_SYNT_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcd_2v8); + + /* 1V8 */ + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcd_1v8); + msleep(15); + + gpio_set_value(SUPERSONIC_LCD_RST, 1); + msleep(1); + gpio_set_value(SUPERSONIC_LCD_RST, 0); + msleep(5); + gpio_set_value(SUPERSONIC_LCD_RST, 1); + msleep(50); + spi_gpio_switch(1); + } else { + on_off = 0; + gpio_set_value(SUPERSONIC_LCD_RST, 0); + msleep(120); + + /* 1V8 */ + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_disable(vreg_lcd_1v8); + + /* 2V8 */ + id = PM_VREG_PDOWN_SYNT_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_disable(vreg_lcd_2v8); + spi_gpio_switch(0); + } +} + +static void +mddi_epson_power(struct msm_mddi_client_data *client_data, int on) +{ + unsigned id, on_off = 1; + + B(KERN_DEBUG "%s: power %s.\n", __func__, on ? "on" : "off"); + + if (on) { + on_off = 1; + /* 2V8 */ + gpio_set_value(149, 1); + id = PM_VREG_PDOWN_SYNT_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcd_2v8); + msleep(5); + /* 1V8 */ + gpio_set_value(16, 1); + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcd_1v8); + msleep(10); + + gpio_set_value(151, 1); + msleep(2); + + gpio_set_value(SUPERSONIC_LCD_RST, 1); + msleep(1); + gpio_set_value(SUPERSONIC_LCD_RST, 0); + msleep(5); + gpio_set_value(SUPERSONIC_LCD_RST, 1); + msleep(50); + spi_gpio_switch(1); + } else { + on_off = 0; + gpio_set_value(SUPERSONIC_LCD_RST, 0); + msleep(2); + gpio_set_value(151, 0); + msleep(120); + + /* 1V8 */ + gpio_set_value(16, 0); + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_disable(vreg_lcd_1v8); + msleep(5); + /* 2V8 */ + gpio_set_value(149, 0); + id = PM_VREG_PDOWN_SYNT_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_disable(vreg_lcd_2v8); + spi_gpio_switch(0); + } +} + +static struct msm_mddi_platform_data mddi_pdata = { + .clk_rate = 384000000, + .fb_resource = resources_msm_fb, + .num_clients = 2, + .client_platform_data = { + { + .product_id = (0xb9f6 << 16 | 0x5582), + .name = "mddi_c_b9f6_5582", + .id = 1, + .client_data = &novatec_client_data, + .clk_rate = 0, + }, + { + .product_id = (0x4ca3 << 16 | 0x0000), + .name = "mddi_c_4ca3_0000", + .id = 0, + .client_data = &epson_client_data, + .clk_rate = 0, + }, + }, +}; + +static struct platform_driver suc_backlight_driver = { + .probe = suc_backlight_probe, + .driver = { + .owner = THIS_MODULE, + }, +}; + +static struct msm_mdp_platform_data mdp_pdata = { + .dma_channel = MDP_DMA_S, +}; + +int __init supersonic_init_panel(void) +{ + int rc; + + B(KERN_INFO "%s: enter.\n", __func__); + + vreg_lcd_1v8 = vreg_get(0, "gp4"); + if (IS_ERR(vreg_lcd_1v8)) + return PTR_ERR(vreg_lcd_1v8); + + vreg_lcd_2v8 = vreg_get(0, "synt"); + if (IS_ERR(vreg_lcd_2v8)) + return PTR_ERR(vreg_lcd_2v8); + + if (panel_type == PANEL_SHARP) + mdp_pdata.overrides |= MSM_MDP_PANEL_IGNORE_PIXEL_DATA; + else + mdp_pdata.overrides &= ~MSM_MDP_PANEL_IGNORE_PIXEL_DATA; + + msm_device_mdp.dev.platform_data = &mdp_pdata; + rc = platform_device_register(&msm_device_mdp); + if (rc) + return rc; + + if (panel_type) + mddi_pdata.power_client = mddi_novatec_power; + else + mddi_pdata.power_client = mddi_epson_power; + + msm_device_mddi0.dev.platform_data = &mddi_pdata; + rc = platform_device_register(&msm_device_mddi0); + if (rc) + return rc; + + if (panel_type) + suc_backlight_driver.driver.name = "nov_cabc"; + else + suc_backlight_driver.driver.name = "eps_cabc"; + rc = platform_driver_register(&suc_backlight_driver); + if (rc) + return rc; + + return 0; +} diff --git a/arch/arm/mach-msm/board-supersonic-rfkill.c b/arch/arm/mach-msm/board-supersonic-rfkill.c new file mode 100644 index 0000000000000..8236350f862de --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-rfkill.c @@ -0,0 +1,353 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* Control bluetooth power for supersonic platform */ + +#include +#include +#include +#include +#include +#include +#include + +#include "proc_comm.h" +#include "board-supersonic.h" + +#define HTC_RFKILL_DBG + +static struct rfkill *bt_rfk; +static const char bt_name[] = "bcm4329"; +static int pre_state; + +static uint32_t supersonic_bt_init_table[] = { + /* BT_RTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RTS, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_8MA), + /* BT_CTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_CTS, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_RX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RX, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_TX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_TX, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_8MA), + + /* BT_SHUTDOWN_N */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_SHUTDOWN_N, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_4MA), + /* BT_RESET_N */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_RESET_N, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_4MA), + + /* BT_HOST_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_HOST_WAKE, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_4MA), + /* BT_CHIP_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_CHIP_WAKE, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_4MA), +}; + +static uint32_t supersonic_bt_on_table[] = { + /* BT_RTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RTS, + 2, + GPIO_OUTPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_CTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_CTS, + 2, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_RX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RX, + 2, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_TX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_TX, + 2, + GPIO_OUTPUT, + GPIO_PULL_UP, + GPIO_8MA), + + /* BT_HOST_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_HOST_WAKE, + 0, + GPIO_INPUT, + GPIO_NO_PULL, + GPIO_4MA), + /* BT_CHIP_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_CHIP_WAKE, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_4MA), +}; + +static uint32_t supersonic_bt_off_table[] = { + /* BT_RTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RTS, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_8MA), + /* BT_CTS */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_CTS, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_RX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_RX, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_8MA), + /* BT_TX */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_UART1_TX, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_8MA), + + /* BT_HOST_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_HOST_WAKE, + 0, + GPIO_INPUT, + GPIO_PULL_UP, + GPIO_4MA), + /* BT_CHIP_WAKE */ + PCOM_GPIO_CFG(SUPERSONIC_GPIO_BT_CHIP_WAKE, + 0, + GPIO_OUTPUT, + GPIO_NO_PULL, + GPIO_4MA), +}; + +static void config_bt_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static void supersonic_config_bt_init(void) +{ + /* set bt initial configuration*/ + config_bt_table(supersonic_bt_init_table, + ARRAY_SIZE(supersonic_bt_init_table)); + /* BT_RESET_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_RESET_N, 0); + + mdelay(5); + + /* BT_SHUTDOWN_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_SHUTDOWN_N, 1); + + /* BT_RESET_N */ + + gpio_direction_output(SUPERSONIC_GPIO_BT_RESET_N, 1); + + mdelay(15); + + /* BT_RESET_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_RESET_N, 0); + + /* BT_SHUTDOWN_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_SHUTDOWN_N, 0); + + /* BT_RTS */ + gpio_direction_output(SUPERSONIC_GPIO_BT_UART1_RTS, 0); + + /* BT_TX */ + gpio_direction_output(SUPERSONIC_GPIO_BT_UART1_TX, 0); + + /* BT_CHIP_WAKE */ + gpio_direction_output(SUPERSONIC_GPIO_BT_CHIP_WAKE, 0); + +} + +static void supersonic_config_bt_on(void) +{ + + #ifdef HTC_RFKILL_DBG + printk(KERN_INFO "-- RK ON --\n"); + #endif + + /* set bt on configuration*/ + config_bt_table(supersonic_bt_on_table, + ARRAY_SIZE(supersonic_bt_on_table)); + mdelay(5); + /* BT_SHUTDOWN_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_SHUTDOWN_N, 1); + + /* BT_RESET_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_RESET_N, 1); + + mdelay(5); + + /* BT_CHIP_WAKE */ + gpio_direction_output(SUPERSONIC_GPIO_BT_CHIP_WAKE, 1); + +} + +static void supersonic_config_bt_off(void) +{ + #ifdef HTC_RFKILL_DBG + printk(KERN_INFO "-- RK OFF --\n"); + #endif + + /* BT_RESET_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_RESET_N, 0); + + /* BT_SHUTDOWN_N */ + gpio_direction_output(SUPERSONIC_GPIO_BT_SHUTDOWN_N, 0); + + + config_bt_table(supersonic_bt_off_table, + ARRAY_SIZE(supersonic_bt_off_table)); + mdelay(5); + + /* BT_RTS */ + gpio_direction_output(SUPERSONIC_GPIO_BT_UART1_RTS, 0); + + /* BT_TX */ + gpio_direction_output(SUPERSONIC_GPIO_BT_UART1_TX, 0); + + /* BT_CHIP_WAKE */ + gpio_direction_output(SUPERSONIC_GPIO_BT_CHIP_WAKE, 0); +} + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (pre_state == blocked) { + #ifdef HTC_RFKILL_DBG + printk(KERN_INFO "-- SAME ST --\n"); + #endif + return 0; + } else + pre_state = blocked; + + if (!blocked) + supersonic_config_bt_on(); + else + supersonic_config_bt_off(); + + return 0; +} + +static struct rfkill_ops supersonic_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int supersonic_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + supersonic_config_bt_init(); /* bt gpio initial config */ + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &supersonic_rfkill_ops, NULL); + if (!bt_rfk) { + rc = -ENOMEM; + goto err_rfkill_reset; + } + + rfkill_set_states(bt_rfk, default_state, false); + + /* userspace cannot take exclusive control */ + rc = rfkill_register(bt_rfk); + if (rc) + goto err_rfkill_reg; + + return 0; + +err_rfkill_reg: + rfkill_destroy(bt_rfk); +err_rfkill_reset: + return rc; +} + +static int supersonic_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + + return 0; +} + +static struct platform_driver supersonic_rfkill_driver = { + .probe = supersonic_rfkill_probe, + .remove = supersonic_rfkill_remove, + .driver = { + .name = "supersonic_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init supersonic_rfkill_init(void) +{ + pre_state = -1; + if (!machine_is_supersonic()) + return 0; + + return platform_driver_register(&supersonic_rfkill_driver); +} + +static void __exit supersonic_rfkill_exit(void) +{ + platform_driver_unregister(&supersonic_rfkill_driver); +} + +module_init(supersonic_rfkill_init); +module_exit(supersonic_rfkill_exit); +MODULE_DESCRIPTION("supersonic rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-supersonic-tpa2018d1.c b/arch/arm/mach-msm/board-supersonic-tpa2018d1.c new file mode 100644 index 0000000000000..bf601df41468a --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-tpa2018d1.c @@ -0,0 +1,352 @@ +/* driver/i2c/chip/tap2018d1.c + * + * TI TPA2018D1 Speaker Amp + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-supersonic-tpa2018d1.h" + +#define DEBUG (0) + +static struct i2c_client *this_client; +static struct tpa2018d1_platform_data *pdata; + +struct mutex spk_amp_lock; +static int tpa2018d1_opened; +static int last_spkamp_state; +static char SPK_AMP_CFG[8]; +static char DEFAULT_SPK_AMP_ON[] = + {0x01, 0xc3, 0x20, 0x01, 0x00, 0x08, 0x1a, 0x21}; +static char SPK_AMP_0FF[] = {0x01, 0xa2}; +static char *config_data; +static int tpa2018d1_num_modes; + +static int tpa2018_i2c_write(char *txData, int length) +{ + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + pr_err("%s: I2C transfer error\n", __func__); + return -EIO; + } else + return 0; +} + +static int tpa2018_i2c_read(char *rxData, int length) +{ + int rc; + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + rc = i2c_transfer(this_client->adapter, msgs, 1); + if (rc < 0) { + pr_err("%s: transfer error %d\n", __func__, rc); + return rc; + } + +#if DEBUG + { + int i = 0; + for (i = 0; i < length; i++) + pr_info("%s: rx[%d] = %2x\n", __func__, i, rxData[i]); + } +#endif + + return 0; +} + +static int tpa2018d1_open(struct inode *inode, struct file *file) +{ + int rc = 0; + + mutex_lock(&spk_amp_lock); + + if (tpa2018d1_opened) { + pr_err("%s: busy\n", __func__); + rc = -EBUSY; + goto done; + } + + tpa2018d1_opened = 1; +done: + mutex_unlock(&spk_amp_lock); + return rc; +} + +static int tpa2018d1_release(struct inode *inode, struct file *file) +{ + mutex_lock(&spk_amp_lock); + tpa2018d1_opened = 0; + mutex_unlock(&spk_amp_lock); + + return 0; +} + +void tpa2018d1_set_speaker_amp(int on) +{ + mutex_lock(&spk_amp_lock); + if (on && !last_spkamp_state) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + mdelay(5); /* According to TPA2018D1 Spec */ + if (tpa2018_i2c_write(SPK_AMP_CFG, sizeof(SPK_AMP_CFG)) == 0) { + last_spkamp_state = 1; + pr_info("%s: ON, value = %x %x\n", __func__, SPK_AMP_CFG[0], SPK_AMP_CFG[1]); + } + } else if (!on && last_spkamp_state) { + if (tpa2018_i2c_write(SPK_AMP_0FF, sizeof(SPK_AMP_0FF)) == 0) { + last_spkamp_state = 0; + mdelay(2); + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + pr_info("%s: OFF\n", __func__); + } + } + mutex_unlock(&spk_amp_lock); +} + +static long +tpa2018d1_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int rc = 0; + unsigned char tmp[7]; + int mode = -1; + int offset = 0; + unsigned char reg_idx[1] = {0x01}; + struct tpa2018d1_config_data cfg; + + switch (cmd) { + case TPA2018_SET_CONFIG: + if (copy_from_user(SPK_AMP_CFG, argp, sizeof(SPK_AMP_CFG))) + /* TODO: content validation? */ + break; + case TPA2018_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) + return -EFAULT; + if (mode >= tpa2018d1_num_modes || mode < 0) { + pr_err("unsupported tpa2018d1 mode %d\n", mode); + return -EINVAL; + } + memcpy(SPK_AMP_CFG, config_data + mode * TPA2018D1_CMD_LEN, + TPA2018D1_CMD_LEN); + break; + case TPA2018_READ_CONFIG: + mutex_lock(&spk_amp_lock); + if (!last_spkamp_state) { + gpio_set_value(pdata->gpio_tpa2018_spk_en, 1); + mdelay(5); /* According to TPA2018D1 Spec */ + } + + rc = tpa2018_i2c_write(reg_idx, sizeof(reg_idx)); + if (rc < 0) + goto err; + + rc = tpa2018_i2c_read(tmp, sizeof(tmp)); + if (rc < 0) + goto err; + + if (copy_to_user(argp, &tmp, sizeof(tmp))) + rc = -EFAULT; +err: + if (!last_spkamp_state) + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); + mutex_unlock(&spk_amp_lock); + break; + case TPA2018_SET_PARAM: + cfg.mode_num = 0; + cfg.cmd_data = 0; + if (copy_from_user(&cfg, argp, sizeof(cfg))) { + pr_err("%s: copy from user failed.\n", __func__); + return -EFAULT; + } + tpa2018d1_num_modes = cfg.mode_num; + if (tpa2018d1_num_modes > TPA2018_NUM_MODES) { + pr_err("%s: invalid number of modes %d\n", __func__, + tpa2018d1_num_modes); + return -EINVAL; + } + if (cfg.data_len != tpa2018d1_num_modes*TPA2018D1_CMD_LEN) { + pr_err("%s: invalid data length %d, expecting %d\n", + __func__, cfg.data_len, + tpa2018d1_num_modes * TPA2018D1_CMD_LEN); + return -EINVAL; + } + config_data = kmalloc(cfg.data_len, GFP_KERNEL); + if (!config_data) { + pr_err("%s: out of memory\n", __func__); + return -ENOMEM; + } + if (copy_from_user(config_data, cfg.cmd_data, cfg.data_len)) { + pr_err("%s: copy data from user failed.\n", __func__); + kfree(config_data); + return -EFAULT; + } + /* replace default setting with playback setting */ + if (tpa2018d1_num_modes >= TPA2018_MODE_PLAYBACK) { + offset = TPA2018_MODE_PLAYBACK * TPA2018D1_CMD_LEN; + memcpy(SPK_AMP_CFG, config_data + offset, + TPA2018D1_CMD_LEN); + } + break; + default: + pr_err("%s: Invalid command\n", __func__); + rc = -EINVAL; + break; + } + return rc; +} + +static struct file_operations tpa2018d1_fops = { + .owner = THIS_MODULE, + .open = tpa2018d1_open, + .release = tpa2018d1_release, + .unlocked_ioctl = tpa2018d1_ioctl, +}; + +static struct miscdevice tpa2018d1_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tpa2018d1", + .fops = &tpa2018d1_fops, +}; + +int tpa2018d1_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + + pdata = client->dev.platform_data; + + if (pdata == NULL) { + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (pdata == NULL) { + ret = -ENOMEM; + pr_err("%s: platform data is NULL\n", __func__); + goto err_alloc_data_failed; + } + } + + this_client = client; + + ret = gpio_request(pdata->gpio_tpa2018_spk_en, "tpa2018"); + if (ret < 0) { + pr_err("%s: gpio request aud_spk_en pin failed\n", __func__); + goto err_free_gpio_all; + } + + ret = gpio_direction_output(pdata->gpio_tpa2018_spk_en, 1); + if (ret < 0) { + pr_err("%s: request aud_spk_en gpio direction failed\n", + __func__); + goto err_free_gpio_all; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("%s: i2c check functionality error\n", __func__); + ret = -ENODEV; + goto err_free_gpio_all; + } + + gpio_set_value(pdata->gpio_tpa2018_spk_en, 0); /* Default Low */ + + ret = misc_register(&tpa2018d1_device); + if (ret) { + pr_err("%s: tpa2018d1_device register failed\n", __func__); + goto err_free_gpio_all; + } + memcpy(SPK_AMP_CFG, DEFAULT_SPK_AMP_ON, sizeof(DEFAULT_SPK_AMP_ON)); + return 0; + +err_free_gpio_all: + gpio_free(pdata->gpio_tpa2018_spk_en); +err_alloc_data_failed: + return ret; +} + +static int tpa2018d1_remove(struct i2c_client *client) +{ + struct tpa2018d1_platform_data *p2018data = i2c_get_clientdata(client); + kfree(p2018data); + + return 0; +} + +static int tpa2018d1_suspend(struct i2c_client *client, pm_message_t mesg) +{ + return 0; +} + +static int tpa2018d1_resume(struct i2c_client *client) +{ + return 0; +} + +static const struct i2c_device_id tpa2018d1_id[] = { + { TPA2018D1_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver tpa2018d1_driver = { + .probe = tpa2018d1_probe, + .remove = tpa2018d1_remove, + .suspend = tpa2018d1_suspend, + .resume = tpa2018d1_resume, + .id_table = tpa2018d1_id, + .driver = { + .name = TPA2018D1_I2C_NAME, + }, +}; + +static int __init tpa2018d1_init(void) +{ + pr_info("%s\n", __func__); + mutex_init(&spk_amp_lock); + return i2c_add_driver(&tpa2018d1_driver); +} + +static void __exit tpa2018d1_exit(void) +{ + i2c_del_driver(&tpa2018d1_driver); +} + +module_init(tpa2018d1_init); +module_exit(tpa2018d1_exit); + +MODULE_DESCRIPTION("TPA2018D1 Speaker Amp driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-supersonic-tpa2018d1.h b/arch/arm/mach-msm/board-supersonic-tpa2018d1.h new file mode 100644 index 0000000000000..dc11012209454 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-tpa2018d1.h @@ -0,0 +1,35 @@ +/* include/linux/tpa2018d1.h - tpa2018d1 speaker amplifier driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef __ASM_ARM_ARCH_TPA2018D1_H +#define __ASM_ARM_ARCH_TPA2018D1_H + +#define TPA2018D1_I2C_NAME "tpa2018d1" +#define TPA2018D1_CMD_LEN 8 + +struct tpa2018d1_platform_data { + uint32_t gpio_tpa2018_spk_en; +}; + +struct tpa2018d1_config_data { + unsigned char *cmd_data; /* [mode][cmd_len][cmds..] */ + unsigned int mode_num; + unsigned int data_len; +}; + +extern void tpa2018d1_set_speaker_amp(int on); + +#endif /* __ASM_ARM_ARCH_TPA2018D1_H */ diff --git a/arch/arm/mach-msm/board-supersonic-tpa6130.c b/arch/arm/mach-msm/board-supersonic-tpa6130.c new file mode 100644 index 0000000000000..bd91aeae5f587 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-tpa6130.c @@ -0,0 +1,185 @@ +/* driver/i2c/chip/tap6130.c + * + * TI TPA6130 Headset Amp + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HEADSET_MTOA_PROG 0x30100003 +#define HEADSET_MTOA_VERS 0 +#define HTC_HEADSET_NULL_PROC 0 +#define HTC_HEADSET_CTL_PROC 1 + +static struct i2c_client *this_client; +struct mutex amp_mutex; +static struct tpa6130_platform_data *pdata; + +static int i2c_on; +char buffer[2]; + +static int I2C_TxData(char *txData, int length) +{ + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + pr_err("tpa6130 :I2C transfer error\n"); + return -EIO; + } else + return 0; +} + +void set_headset_amp(int on) +{ + mutex_lock(&_mutex); + if (on && !i2c_on) { + buffer[0] = 0x01; + buffer[1] = 0xC0; + buffer[2] = 0x3E; + if (I2C_TxData(buffer, 3) == 0) { + i2c_on = 1; + pr_err("tpa6130: turn on headset amp !\n"); + } + } else if (!on && i2c_on) { + buffer[0] = 0x01; + buffer[1] = 0xC1; + if (I2C_TxData(buffer, 2) == 0) { + i2c_on = 0; + pr_err("tpa6130: turn off headset amp !\n"); + } + } + mutex_unlock(&_mutex); +} + +static int handle_headset_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + struct rpc_headset_amp_ctl_args *args; + + if (!pdata->enable_rpc_server) + return 0; + + switch (req->procedure) { + case HTC_HEADSET_NULL_PROC: + return 0; + case HTC_HEADSET_CTL_PROC: + args = (struct rpc_headset_amp_ctl_args *)(req + 1); + args->on = be32_to_cpu(args->on); + if (args->on) { + gpio_set_value(pdata->gpio_hp_sd, 1); + msleep(10); + set_headset_amp(args->on); + } else if (!args->on) { + set_headset_amp(args->on); + gpio_set_value(pdata->gpio_hp_sd, 0); + } + return 0; + default: + pr_err("tpa6130a: the wrong proc for headset server\n"); + } + return -ENODEV; +} + +static struct msm_rpc_server headset_server = { + .prog = HEADSET_MTOA_PROG, + .vers = HEADSET_MTOA_VERS, + .rpc_call = handle_headset_call +}; + +int tpa6130_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + + pdata = client->dev.platform_data; + if (pdata == NULL) { + pr_err("tpa6130: platform data is NULL\n"); + goto fault; + } + + if (pdata->enable_rpc_server) { + msm_rpc_create_server(&headset_server); + + ret = gpio_request(pdata->gpio_hp_sd, "tpa6130"); + if (ret < 0) { + pr_err("tap6130a : gpio request failed\n"); + goto fault; + } + + ret = gpio_direction_output(pdata->gpio_hp_sd, 1); + if (ret < 0) { + pr_err("tap6130a: request reset gpio failed\n"); + goto fault; + } + gpio_set_value(pdata->gpio_hp_sd, 0); + } + + this_client = client; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("tpa6130a: i2c check functionality error\n"); + goto fault; + } + + return 0; +fault: + return -ENODEV; +} + +static int tpa6130_remove(struct i2c_client *client) +{ + return 0; +} +static const struct i2c_device_id tpa6130_id[] = { + { TPA6130_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver tpa6130_driver = { + .probe = tpa6130_probe, + .remove = tpa6130_remove, + .id_table = tpa6130_id, + .driver = { + .name = TPA6130_I2C_NAME, + }, +}; + +static int __init tpa6130_init(void) +{ + pr_err("tpa6130 HP AMP: init\n"); + mutex_init(&_mutex); + return i2c_add_driver(&tpa6130_driver); +} + +static void __exit tpa6130_exit(void) +{ + i2c_del_driver(&tpa6130_driver); +} + +module_init(tpa6130_init); +module_exit(tpa6130_exit); diff --git a/arch/arm/mach-msm/board-supersonic-wifi.c b/arch/arm/mach-msm/board-supersonic-wifi.c new file mode 100644 index 0000000000000..42c9c76d81ac4 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic-wifi.c @@ -0,0 +1,155 @@ +/* linux/arch/arm/mach-msm/board-supersonic-wifi.c +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-supersonic.h" + +int supersonic_wifi_power(int on); +int supersonic_wifi_reset(int on); +int supersonic_wifi_set_carddetect(int on); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + +#define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4 +#define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160 +#define PREALLOC_WLAN_SECTION_HEADER 24 + +#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128) +#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512) +#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024) + +#define WLAN_SKB_BUF_NUM 16 + +static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM]; + +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = { + { NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) }, + { NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) } +}; + +static void *supersonic_wifi_mem_prealloc(int section, unsigned long size) +{ + if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS) + return wlan_static_skb; + if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS)) + return NULL; + if (wifi_mem_array[section].size < size) + return NULL; + return wifi_mem_array[section].mem_ptr; +} +#endif + +int __init supersonic_init_wifi_mem(void) +{ +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + int i; + + for(i=0;( i < WLAN_SKB_BUF_NUM );i++) { + if (i < (WLAN_SKB_BUF_NUM/2)) + wlan_static_skb[i] = dev_alloc_skb(4096); + else + wlan_static_skb[i] = dev_alloc_skb(8192); + } + for(i=0;( i < PREALLOC_WLAN_NUMBER_OF_SECTIONS );i++) { + wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size, + GFP_KERNEL); + if (wifi_mem_array[i].mem_ptr == NULL) + return -ENOMEM; + } +#endif + return 0; +} + +static struct resource supersonic_wifi_resources[] = { + [0] = { + .name = "bcmdhd_wlan_irq", + .start = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_WIFI_IRQ), + .end = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_WIFI_IRQ), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct wifi_platform_data supersonic_wifi_control = { + .set_power = supersonic_wifi_power, + .set_reset = supersonic_wifi_reset, + .set_carddetect = supersonic_wifi_set_carddetect, +#if defined(CONFIG_DHD_USE_STATIC_BUF) || defined(CONFIG_BCM4329_DHD_USE_STATIC_BUF) + .mem_prealloc = supersonic_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +static struct platform_device supersonic_wifi_device = { + .name = "bcmdhd_wlan", + .id = 1, + .num_resources = ARRAY_SIZE(supersonic_wifi_resources), + .resource = supersonic_wifi_resources, + .dev = { + .platform_data = &supersonic_wifi_control, + }, +}; + +extern unsigned char *get_wifi_nvs_ram(void); +extern int wifi_calibration_size_set(void); + +static unsigned supersonic_wifi_update_nvs(char *str, int add_flag) +{ +#define NVS_LEN_OFFSET 0x0C +#define NVS_DATA_OFFSET 0x40 + unsigned char *ptr; + unsigned len; + + if (!str) + return -EINVAL; + ptr = get_wifi_nvs_ram(); + /* Size in format LE assumed */ + memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); + + /* the last bye in NVRAM is 0, trim it */ + if (ptr[NVS_DATA_OFFSET + len -1] == 0) + len -= 1; + + if (add_flag) { + strcpy(ptr + NVS_DATA_OFFSET + len, str); + len += strlen(str); + } else { + if (strnstr(ptr + NVS_DATA_OFFSET, str, len)) + len -= strlen(str); + } + memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len)); + wifi_calibration_size_set(); + return 0; +} + +static int __init supersonic_wifi_init(void) +{ + if (!machine_is_supersonic()) + return 0; + + printk("%s: start\n", __func__); + supersonic_wifi_update_nvs("sd_oobonly=1\r\n", 0); + supersonic_wifi_update_nvs("btc_params80=0\n", 1); + supersonic_wifi_update_nvs("btc_params70=0x32\n", 1); + supersonic_init_wifi_mem(); + return platform_device_register(&supersonic_wifi_device); +} + +late_initcall(supersonic_wifi_init); diff --git a/arch/arm/mach-msm/board-supersonic.c b/arch/arm/mach-msm/board-supersonic.c new file mode 100644 index 0000000000000..260654d5b9b97 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic.c @@ -0,0 +1,1720 @@ +/* linux/arch/arm/mach-msm/board-supersonic.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +//#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "board-supersonic.h" +#include "devices.h" +#include "proc_comm.h" +#include "smd_private.h" +#include +#include +#include +#include "board-supersonic-flashlight.h" +#include +#include + +#include +#include +#include + + +#ifdef CONFIG_MICROP_COMMON +#include +#endif + +#include +#include + +#include "board-supersonic-tpa2018d1.h" + +#include +#include +#include "footswitch.h" +#include + +#define SMEM_SPINLOCK_I2C 6 + +#ifdef CONFIG_ARCH_QSD8X50 +extern unsigned char *get_bt_bd_ram(void); +#endif + +static unsigned skuid; + +static uint opt_usb_h2w_sw; +module_param_named(usb_h2w_sw, opt_usb_h2w_sw, uint, 0); + +void msm_init_pmic_vibrator(void); +static void config_supersonic_usb_id_gpios(bool output); +extern void __init supersonic_audio_init(void); +extern void __init supersonic_init_panel(void); +#ifdef CONFIG_MICROP_COMMON +void __init supersonic_microp_init(void); +#endif +static struct htc_battery_platform_data htc_battery_pdev_data = { + .gpio_mbat_in = SUPERSONIC_GPIO_MBAT_IN, + .gpio_mchg_en_n = SUPERSONIC_GPIO_MCHG_EN_N, + .gpio_iset = SUPERSONIC_GPIO_ISET, + .guage_driver = GUAGE_MODEM, + .m2a_cable_detect = 1, + .charger = SWITCH_CHARGER, + /* After the state of SUC XA, MCHG_EN is chanage to CHG_INT*/ + .int_data.chg_int = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_MCHG_EN_N), +}; + +static struct platform_device htc_battery_pdev = { + .name = "htc_battery", + .id = -1, + .dev = { + .platform_data = &htc_battery_pdev_data, + }, +}; + +#ifdef CONFIG_MICROP_COMMON +static int capella_cm3602_power(int pwr_device, uint8_t enable); +static struct microp_function_config microp_functions[] = { + { + .name = "reset-int", + .category = MICROP_FUNCTION_RESET_INT, + .int_pin = 1 << 8, + }, +}; + +static struct microp_function_config microp_lightsensor = { + .name = "light_sensor", + .category = MICROP_FUNCTION_LSENSOR, + .levels = { 3, 7, 12, 57, 114, 279, 366, 453, 540, 0x3FF }, + .channel = 3, + .int_pin = 1 << 9, + .golden_adc = 0x118, + .ls_power = capella_cm3602_power, +}; + +static struct lightsensor_platform_data lightsensor_data = { + .config = µp_lightsensor, + .irq = MSM_uP_TO_INT(9), +}; + +static struct microp_led_config led_config[] = { + { + .name = "amber", + .type = LED_RGB, + }, + { + .name = "green", + .type = LED_RGB, + }, + { + .name = "wimax", + .type = LED_WIMAX, + }, +}; + +static struct microp_led_platform_data microp_leds_data = { + .num_leds = ARRAY_SIZE(led_config), + .led_config = led_config, +}; + +static struct bma150_platform_data supersonic_g_sensor_pdata = { + .microp_new_cmd = 1, +}; + +/* Proximity Sensor (Capella_CM3602)*/ +static int __capella_cm3602_power(int on) +{ + int ret; + struct vreg *vreg = vreg_get(0, "gp1"); + if (!vreg) { + printk(KERN_ERR "%s: vreg error\n", __func__); + return -EIO; + } + ret = vreg_set_level(vreg, 2800); + + printk(KERN_DEBUG "%s: Turn the capella_cm3602 power %s\n", + __func__, (on) ? "on" : "off"); + if (on) { + gpio_direction_output(SUPERSONIC_GPIO_PROXIMITY_EN_N, 1); + ret = vreg_enable(vreg); + if (ret < 0) + printk(KERN_ERR "%s: vreg enable failed\n", __func__); + } else { + vreg_disable(vreg); + gpio_direction_output(SUPERSONIC_GPIO_PROXIMITY_EN_N, 0); + } + + return ret; +} + +static DEFINE_MUTEX(capella_cm3602_lock); +static unsigned int als_power_control; + +static int capella_cm3602_power(int pwr_device, uint8_t enable) +{ + unsigned int old_status = 0; + int ret = 0, on = 0; + mutex_lock(&capella_cm3602_lock); + + old_status = als_power_control; + if (enable) + als_power_control |= pwr_device; + else + als_power_control &= ~pwr_device; + + on = als_power_control ? 1 : 0; + if (old_status == 0 && on) + ret = __capella_cm3602_power(1); + else if (!on) + ret = __capella_cm3602_power(0); + + mutex_unlock(&capella_cm3602_lock); + return ret; +} + +static struct capella_cm3602_platform_data capella_cm3602_pdata = { + .power = capella_cm3602_power, + .p_en = SUPERSONIC_GPIO_PROXIMITY_EN_N, + .p_out = MSM_uP_TO_INT(4), +}; +/* End Proximity Sensor (Capella_CM3602)*/ + +static struct htc_headset_microp_platform_data htc_headset_microp_data = { + .remote_int = 1 << 7, + .remote_irq = MSM_uP_TO_INT(7), + .remote_enable_pin = 0, + .adc_channel = 0x01, + .adc_remote = {0, 33, 50, 110, 160, 220}, +}; + +static struct platform_device microp_devices[] = { + { + .name = "lightsensor_microp", + .dev = { + .platform_data = &lightsensor_data, + }, + }, + { + .name = "leds-microp", + .id = -1, + .dev = { + .platform_data = µp_leds_data, + }, + }, + { + .name = BMA150_G_SENSOR_NAME, + .dev = { + .platform_data = &supersonic_g_sensor_pdata, + }, + }, + { + .name = "supersonic_proximity", + .id = -1, + .dev = { + .platform_data = &capella_cm3602_pdata, + }, + }, + { + .name = "HTC_HEADSET_MICROP", + .id = -1, + .dev = { + .platform_data = &htc_headset_microp_data, + }, + }, +}; + +static struct microp_i2c_platform_data microp_data = { + .num_functions = ARRAY_SIZE(microp_functions), + .microp_function = microp_functions, + .num_devices = ARRAY_SIZE(microp_devices), + .microp_devices = microp_devices, + .gpio_reset = SUPERSONIC_GPIO_UP_RESET_N, + .microp_ls_on = LS_PWR_ON | PS_PWR_ON, + .spi_devices = SPI_GSENSOR, +}; +#endif + +static struct gpio_led supersonic_led_list[] = { + { + .name = "button-backlight", + .gpio = SUPERSONIC_AP_KEY_LED_EN, + .active_low = 0, + }, +}; + +static struct gpio_led_platform_data supersonic_leds_data = { + .num_leds = ARRAY_SIZE(supersonic_led_list), + .leds = supersonic_led_list, +}; + +static struct platform_device supersonic_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &supersonic_leds_data, + }, +}; + +static int supersonic_phy_init_seq[] = { 0xC, 0x31, 0x30, 0x32, 0x1D, 0x0D, 0x1D, 0x10, -1 }; + + +// USB cable out: supersonic_uart_usb_switch(1) +// USB cable in: supersonic_uart_usb_switch(0) +static void supersonic_uart_usb_switch(int uart) +{ + printk(KERN_INFO "%s:uart:%d\n", __func__, uart); + gpio_set_value(SUPERSONIC_USB_UARTz_SW, uart?1:0); // XA and for USB cable in to reset wimax UART + + if(system_rev && uart) // XB + { + if (gpio_get_value(SUPERSONIC_WIMAX_CPU_UARTz_SW)) // Wimax UART + { + printk(KERN_INFO "%s:Wimax UART\n", __func__); + gpio_set_value(SUPERSONIC_USB_UARTz_SW,1); + gpio_set_value(SUPERSONIC_WIMAX_CPU_UARTz_SW,1); + } + else // USB, CPU UART + { + printk(KERN_INFO "%s:Non wimax UART\n", __func__); + gpio_set_value(SUPERSONIC_WIMAX_CPU_UARTz_SW, uart==2?1:0); + } + } +} + +extern void msm_hsusb_8x50_phy_reset(void); + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = supersonic_phy_init_seq, + .phy_reset = msm_hsusb_8x50_phy_reset, + .usb_id_pin_gpio = SUPERSONIC_GPIO_USB_ID_PIN, + .accessory_detect = 1, /* detect by ID pin gpio */ + .usb_uart_switch = supersonic_uart_usb_switch, +}; + + +static char *usb_functions_ums[] = { + "usb_mass_storage", +}; + +static char *usb_functions_ums_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_rndis[] = { + "rndis", +}; + +static char *usb_functions_rndis_adb[] = { + "rndis", + "adb", +}; + +#ifdef CONFIG_USB_ANDROID_ACCESSORY +static char *usb_functions_accessory[] = { "accessory" }; +static char *usb_functions_accessory_adb[] = { "accessory", "adb" }; +#endif + +#ifdef CONFIG_USB_ANDROID_DIAG +static char *usb_functions_adb_diag[] = { + "usb_mass_storage", + "adb", + "diag", +}; +#endif + +static char *usb_functions_all[] = { +#ifdef CONFIG_USB_ANDROID_RNDIS + "rndis", +#endif + "usb_mass_storage", + "adb", +#ifdef CONFIG_USB_ANDROID_ACM + "acm", +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + "diag", +#endif +#ifdef CONFIG_USB_ANDROID_ACCESSORY + "accessory", +#endif +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0ff9, + .num_functions = ARRAY_SIZE(usb_functions_ums), + .functions = usb_functions_ums, + }, + { + .product_id = 0x0c8d, + .num_functions = ARRAY_SIZE(usb_functions_ums_adb), + .functions = usb_functions_ums_adb, + }, + { + .product_id = 0x0c03, + .num_functions = ARRAY_SIZE(usb_functions_rndis), + .functions = usb_functions_rndis, + }, + { + .product_id = 0x0c04, + .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), + .functions = usb_functions_rndis_adb, + }, +#ifdef CONFIG_USB_ANDROID_ACCESSORY + { + .product_id = USB_ACCESSORY_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory), + .functions = usb_functions_accessory, + }, + { + .product_id = USB_ACCESSORY_ADB_PRODUCT_ID, + .num_functions = ARRAY_SIZE(usb_functions_accessory_adb), + .functions = usb_functions_accessory_adb, + }, +#endif +#ifdef CONFIG_USB_ANDROID_DIAG + { + .product_id = 0x0c07, + .num_functions = ARRAY_SIZE(usb_functions_adb_diag), + .functions = usb_functions_adb_diag, + }, +#endif + +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "HTC", + .product = "Supersonic", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data rndis_pdata = { + /* ethaddr is filled by board_serialno_setup */ + .vendorID = 0x18d1, + .vendorDescr = "Google, Inc.", +}; + +static struct platform_device rndis_device = { + .name = "rndis", + .id = -1, + .dev = { + .platform_data = &rndis_pdata, + }, +}; +#endif + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x0bb4, + .product_id = 0x0c8d, + .version = 0x0100, + .product_name = "Android Phone", + .manufacturer_name = "HTC", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + + +/* 2 : wimax UART, 1 : CPU uart, 0 : usb +CPU_WIMAX_SW -> GPIO160 +USB_UART#_SW -> GPIO33 + +XA : GPIO33 = 0 -> USB + GPIO33 = 1 -> CPU UART + +XB : GPIO33 = 0 -> USB + GPIO33 = 1 , GPIO160 = 0 -> CPU UART // SUPERSONIC_WIMAX_CPU_UARTz_SW (GPIO160) + GPIO33 = 1 , GPIO160 = 1 -> Wimax UART // SUPERSONIC_USB_UARTz_SW (GPIO33) +*/ + + +static int __init supersonic_board_serialno_setup(char *serialno) +{ +#ifdef CONFIG_USB_ANDROID_RNDIS + int i; + char *src = serialno; + + /* create a fake MAC address from our serial number. + * first byte is 0x02 to signify locally administered. + */ + rndis_pdata.ethaddr[0] = 0x02; + for (i = 0; *src; i++) { + /* XOR the USB serial across the remaining bytes */ + rndis_pdata.ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; + } +#endif + + android_usb_pdata.serial_number = serialno; + msm_hsusb_pdata.serial_number = serialno; + return 1; +} +__setup("androidboot.serialno=", supersonic_board_serialno_setup); + + +static struct platform_device supersonic_rfkill = { + .name = "supersonic_rfkill", + .id = -1, +}; + +static struct spi_platform_data supersonic_spi_pdata = { + .clk_rate = 1200000, +}; + +/* start kgsl */ +static struct resource kgsl_3d0_resources[] = { + { + .name = KGSL_3D0_REG_MEMORY, + .start = 0xA0000000, + .end = 0xA001ffff, + .flags = IORESOURCE_MEM, + }, + { + .name = KGSL_3D0_IRQ, + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct kgsl_device_platform_data kgsl_3d0_pdata = { + .pwrlevel = { + { + .gpu_freq = 0, + .bus_freq = 128000000, + }, + }, + .init_level = 0, + .num_levels = 1, + .set_grp_async = NULL, + .idle_timeout = HZ/5, + .clk_map = KGSL_CLK_GRP | KGSL_CLK_IMEM, +}; + +struct platform_device msm_kgsl_3d0 = { + .name = "kgsl-3d0", + .id = 0, + .num_resources = ARRAY_SIZE(kgsl_3d0_resources), + .resource = kgsl_3d0_resources, + .dev = { + .platform_data = &kgsl_3d0_pdata, + }, +}; +/* end kgsl */ + +/* start footswitch regulator */ +struct platform_device *msm_footswitch_devices[] = { + FS_PCOM(FS_GFX3D, "fs_gfx3d"), +}; + +unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); +/* end footswitch regulator */ + +/* pmem heaps */ +#ifndef CONFIG_ION_MSM +static struct android_pmem_platform_data mdp_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING, + .cached = 1, +}; + +static struct platform_device android_pmem_mdp_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &mdp_pmem_pdata + }, +}; +#endif + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 4, + .dev = { + .platform_data = &android_pmem_adsp_pdata, + }, +}; + +static struct android_pmem_platform_data android_pmem_venc_pdata = { + .name = "pmem_venc", + .start = MSM_PMEM_VENC_BASE, + .size = MSM_PMEM_VENC_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 1, +}; + +static struct platform_device android_pmem_venc_device = { + .name = "android_pmem", + .id = 6, + .dev = { + .platform_data = &android_pmem_venc_pdata, + }, +}; + +#ifdef CONFIG_BUILD_CIQ +static struct android_pmem_platform_data android_pmem_ciq_pdata = { + .name = "pmem_ciq", + .start = MSM_PMEM_CIQ_BASE, + .size = MSM_PMEM_CIQ_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 0, +}; + +static struct platform_device android_pmem_ciq_device = { + .name = "android_pmem", + .id = 7, + .dev = { .platform_data = &android_pmem_ciq_pdata }, +}; + +static struct android_pmem_platform_data android_pmem_ciq1_pdata = { + .name = "pmem_ciq1", + .start = MSM_PMEM_CIQ1_BASE, + .size = MSM_PMEM_CIQ1_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 0, +}; + +static struct platform_device android_pmem_ciq1_device = { + .name = "android_pmem", + .id = 8, + .dev = { .platform_data = &android_pmem_ciq1_pdata }, +}; + +static struct android_pmem_platform_data android_pmem_ciq2_pdata = { + .name = "pmem_ciq2", + .start = MSM_PMEM_CIQ2_BASE, + .size = MSM_PMEM_CIQ2_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 0, +}; + +static struct platform_device android_pmem_ciq2_device = { + .name = "android_pmem", + .id = 9, + .dev = { .platform_data = &android_pmem_ciq2_pdata }, +}; + +static struct android_pmem_platform_data android_pmem_ciq3_pdata = { + .name = "pmem_ciq3", + .start = MSM_PMEM_CIQ3_BASE, + .size = MSM_PMEM_CIQ3_SIZE, +/* .no_allocator = 0,*/ + .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, + .cached = 0, +}; + +static struct platform_device android_pmem_ciq3_device = { + .name = "android_pmem", + .id = 10, + .dev = { .platform_data = &android_pmem_ciq3_pdata }, +}; +#endif +/* end pmem heaps */ + +/* ion heaps */ +#ifdef CONFIG_ION_MSM +static struct ion_co_heap_pdata co_ion_pdata = { + .adjacent_mem_id = INVALID_HEAP_ID, + .align = PAGE_SIZE, +}; + +static struct ion_platform_data ion_pdata = { + .nr = 2, + .heaps = { + { + .id = ION_SYSTEM_HEAP_ID, + .type = ION_HEAP_TYPE_SYSTEM, + .name = ION_VMALLOC_HEAP_NAME, + }, + /* PMEM_MDP = SF */ + { + .id = ION_SF_HEAP_ID, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = ION_SF_HEAP_NAME, + .base = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .memory_type = ION_EBI_TYPE, + .extra_data = (void *)&co_ion_pdata, + }, + } +}; + +static struct platform_device ion_dev = { + .name = "ion-msm", + .id = 1, + .dev = { .platform_data = &ion_pdata }, +}; +#endif +/* end ion heaps */ + +static struct resource ram_console_resources[] = { + { + .start = MSM_RAM_CONSOLE_BASE, + .end = MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ram_console_device = { + .name = "ram_console", + .id = -1, + .num_resources = ARRAY_SIZE(ram_console_resources), + .resource = ram_console_resources, +}; + +static int supersonic_atmel_ts_power(int on) +{ + printk(KERN_INFO "supersonic_atmel_ts_power(%d)\n", on); + if (on) { + gpio_set_value(SUPERSONIC_GPIO_TP_RST, 0); + msleep(5); + gpio_set_value(SUPERSONIC_GPIO_TP_EN, 1); + msleep(5); + gpio_set_value(SUPERSONIC_GPIO_TP_RST, 1); + msleep(40); + } else { + gpio_set_value(SUPERSONIC_GPIO_TP_EN, 0); + msleep(2); + } + return 0; +} + +struct atmel_i2c_platform_data supersonic_atmel_ts_data[] = { + { + .version = 0x016, + .abs_x_min = 34, + .abs_x_max = 990, + .abs_y_min = 15, + .abs_y_max = 950, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = SUPERSONIC_GPIO_TP_INT_N, + .power = supersonic_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {50, 15, 50}, + .config_T8 = {10, 0, 20, 10, 0, 0, 5, 0}, + .config_T9 = {139, 0, 0, 18, 12, 0, 16, 32, 3, 5, 0, 5, 2, 14, 5, 10, 25, 10, 0, 0, 0, 0, 0, 0, 0, 0, 143, 25, 146, 10, 40}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T22 = {15, 0, 0, 0, 0, 0, 0, 0, 14, 0, 1, 8, 12, 16, 30, 40, 0}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8, 60}, + .object_crc = {0x63, 0x27, 0x8E}, + .cable_config = {30, 30, 8, 16}, + .GCAF_level = {20, 24, 28, 40, 63}, + .filter_level = {46, 100, 923, 978}, + }, + { + .version = 0x015, + .abs_x_min = 10, + .abs_x_max = 1012, + .abs_y_min = 15, + .abs_y_max = 960, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = SUPERSONIC_GPIO_TP_INT_N, + .power = supersonic_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {100, 10, 50}, + .config_T8 = {8, 0, 50, 50, 0, 0, 50, 0}, + .config_T9 = {139, 0, 0, 18, 12, 0, 16, 32, 3, 5, 0, 5, 2, 14, 5, 10, 25, 10, 0, 0, 0, 0, 0, 0, 0, 0, 143, 25, 146, 10, 20}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T22 = {7, 0, 0, 25, 0, -25, 255, 4, 50, 0, 1, 10, 15, 20, 25, 30, 4}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8, 60}, + .object_crc = {0x87, 0xAD, 0xF5}, + }, + { + .version = 0x014, + .abs_x_min = 10, + .abs_x_max = 1012, + .abs_y_min = 15, + .abs_y_max = 960, + .abs_pressure_min = 0, + .abs_pressure_max = 255, + .abs_width_min = 0, + .abs_width_max = 20, + .gpio_irq = SUPERSONIC_GPIO_TP_INT_N, + .power = supersonic_atmel_ts_power, + .config_T6 = {0, 0, 0, 0, 0, 0}, + .config_T7 = {100, 10, 50}, + .config_T8 = {8, 0, 50, 50, 0, 0, 50, 0}, + .config_T9 = {139, 0, 0, 18, 12, 0, 16, 32, 3, 5, 0, 5, 2, 14, 5, 10, 25, 10, 0, 0, 0, 0, 0, 0, 0, 0, 143, 25, 146, 10, 20}, + .config_T15 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T19 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T20 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T22 = {7, 0, 0, 25, 0, -25, 255, 4, 50, 0, 1, 10, 15, 20, 25, 30, 4}, + .config_T23 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T24 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T25 = {3, 0, 200, 50, 64, 31, 0, 0, 0, 0, 0, 0, 0, 0}, + .config_T27 = {0, 0, 0, 0, 0, 0, 0}, + .config_T28 = {0, 0, 2, 4, 8}, + } +}; + +static struct regulator_consumer_supply tps65023_dcdc1_supplies[] = { + { + .supply = "acpu_vcore", + }, +}; + +static struct regulator_init_data tps65023_data[5] = { + { + .constraints = { + .name = "dcdc1", /* VREG_MSMC2_1V29 */ + .min_uV = 975000, + .max_uV = 1300000, + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, + }, + .consumer_supplies = tps65023_dcdc1_supplies, + .num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_supplies), + }, + /* dummy values for unused regulators to not crash driver: */ + { + .constraints = { + .name = "dcdc2", /* VREG_MSMC1_1V26 */ + .min_uV = 1260000, + .max_uV = 1260000, + }, + }, + { + .constraints = { + .name = "dcdc3", /* unused */ + .min_uV = 800000, + .max_uV = 3300000, + }, + }, + { + .constraints = { + .name = "ldo1", /* unused */ + .min_uV = 1000000, + .max_uV = 3150000, + }, + }, + { + .constraints = { + .name = "ldo2", /* V_USBPHY_3V3 */ + .min_uV = 3300000, + .max_uV = 3300000, + }, + }, +}; + +static struct htc_headset_mgr_platform_data htc_headset_mgr_data = { +}; + +static struct platform_device htc_headset_mgr = { + .name = "HTC_HEADSET_MGR", + .id = -1, + .dev = { + .platform_data = &htc_headset_mgr_data, + }, +}; + +static struct htc_headset_gpio_platform_data htc_headset_gpio_data = { + .hpin_gpio = SUPERSONIC_GPIO_35MM_HEADSET_DET, + .key_enable_gpio = 0, + .mic_select_gpio = 0, +}; + +static struct platform_device htc_headset_gpio = { + .name = "HTC_HEADSET_GPIO", + .id = -1, + .dev = { + .platform_data = &htc_headset_gpio_data, + }, +}; + +static struct akm8973_platform_data compass_platform_data = { + .layouts = SUPERSONIC_LAYOUTS, + .project_name = SUPERSONIC_PROJECT_NAME, + .reset = SUPERSONIC_GPIO_COMPASS_RST_N, + .intr = SUPERSONIC_GPIO_COMPASS_INT_N, +}; + +static struct tpa2018d1_platform_data tpa2018_data = { + .gpio_tpa2018_spk_en = SUPERSONIC_AUD_SPK_EN, +}; + +/* + * HDMI platform data + */ + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +static int hdmi_power(int on) +{ + HDMI_DBG("%s(%d)\n", __func__, on); + + switch(on) { + /* Power on/off sequence for normal or D2 sleep mode */ + case 0: + gpio_set_value(HDMI_RST, 0); + msleep(2); + gpio_set_value(V_HDMI_3V3_EN, 0); + gpio_set_value(V_VGA_5V_SIL9022A_EN, 0); + msleep(2); + gpio_set_value(V_HDMI_1V2_EN, 0); + break; + case 1: + gpio_set_value(V_HDMI_1V2_EN, 1); + msleep(2); + gpio_set_value(V_VGA_5V_SIL9022A_EN, 1); + gpio_set_value(V_HDMI_3V3_EN, 1); + msleep(2); + gpio_set_value(HDMI_RST, 1); + msleep(2); + break; + + /* Power on/off sequence for D3 sleep mode */ + case 2: + gpio_set_value(V_HDMI_3V3_EN, 0); + break; + case 3: + gpio_set_value(HDMI_RST, 0); + msleep(2); + gpio_set_value(V_HDMI_3V3_EN, 1); + gpio_set_value(V_VGA_5V_SIL9022A_EN, 1); + msleep(50); + gpio_set_value(HDMI_RST, 1); + msleep(10); + break; + case 4: + gpio_set_value(V_VGA_5V_SIL9022A_EN, 0); + break; + case 5: + gpio_set_value(V_VGA_5V_SIL9022A_EN, 1); + break; + + default: + return -EINVAL; + } + return 0; +} + +static uint32_t hdmi_gpio_on_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_LCD_R0, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R1, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R2, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R3, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R4, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_G0, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G1, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G2, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G3, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G4, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G5, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_B0, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B1, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B2, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B3, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B4, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_PCLK, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_VSYNC, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_HSYNC, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_DE, 1, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), +}; + +static uint32_t hdmi_gpio_off_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_LCD_R0, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R1, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R2, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R3, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_R4, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_G0, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G1, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G2, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G3, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G4, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_G5, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_B0, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B1, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B2, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B3, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_B4, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + + PCOM_GPIO_CFG(SUPERSONIC_LCD_PCLK, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_VSYNC, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_HSYNC, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_LCD_DE, 0, GPIO_OUTPUT, GPIO_NO_PULL, + GPIO_2MA), +}; + + +static void suc_hdmi_gpio_on(void) +{ + HDMI_DBG("%s\n", __func__); + + config_gpio_table(hdmi_gpio_on_table, ARRAY_SIZE(hdmi_gpio_on_table)); +} + +static void suc_hdmi_gpio_off(void) +{ + int i = 0; + + HDMI_DBG("%s\n", __func__); + config_gpio_table(hdmi_gpio_off_table, ARRAY_SIZE(hdmi_gpio_off_table)); + + for (i = SUPERSONIC_LCD_R0; i <= SUPERSONIC_LCD_R4; i++) + gpio_set_value(i, 0); + for (i = SUPERSONIC_LCD_G0; i <= SUPERSONIC_LCD_G5; i++) + gpio_set_value(i, 0); + for (i = SUPERSONIC_LCD_B0; i <= SUPERSONIC_LCD_DE; i++) + gpio_set_value(i, 0); +} + +static struct hdmi_platform_data hdmi_device_data = { + .hdmi_res = { + .start = MSM_HDMI_FB_BASE, + .end = MSM_HDMI_FB_BASE + MSM_HDMI_FB_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + .power = hdmi_power, + .hdmi_gpio_on = suc_hdmi_gpio_on, + .hdmi_gpio_off = suc_hdmi_gpio_off, +}; + +static struct tpa6130_platform_data headset_amp_platform_data = { + .enable_rpc_server = 0, +}; + +static struct i2c_board_info i2c_devices[] = { + { + I2C_BOARD_INFO(ATMEL_QT602240_NAME, 0x94 >> 1), + .platform_data = &supersonic_atmel_ts_data, + .irq = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_TP_INT_N) + }, +#ifdef CONFIG_MICROP_COMMON + { + I2C_BOARD_INFO(MICROP_I2C_NAME, 0xCC >> 1), + .platform_data = µp_data, + .irq = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_UP_INT_N) + }, +#endif + { + I2C_BOARD_INFO("smb329", 0x6E >> 1), + }, + { + I2C_BOARD_INFO("tps65200", 0xD4 >> 1), + }, + { + I2C_BOARD_INFO("akm8973", 0x1C), + .platform_data = &compass_platform_data, + .irq = MSM_GPIO_TO_INT(SUPERSONIC_GPIO_COMPASS_INT_N), + }, + { + I2C_BOARD_INFO("s5k3h1gx", 0x20 >> 1), + },/*samsung for 2nd source main camera*/ + { + I2C_BOARD_INFO("s5k6aafx", 0x78 >> 1), + },/*samsung 2nd camera 2nd source*/ + { + I2C_BOARD_INFO("ov8810", 0x6C >> 1), + }, + { + I2C_BOARD_INFO("ov9665", 0x60 >> 1), + }, + { + I2C_BOARD_INFO(TPA6130_I2C_NAME, 0xC0 >> 1), + .platform_data = &headset_amp_platform_data, + }, + { + I2C_BOARD_INFO("tps65023", 0x48), + .platform_data = tps65023_data, + }, + { + I2C_BOARD_INFO("tpa2018d1", 0x58), + .platform_data = &tpa2018_data, + }, + { + I2C_BOARD_INFO("SiL902x-hdmi", 0x76 >> 1), + .platform_data = &hdmi_device_data, + .irq = MSM_uP_TO_INT(1), + }, +}; + +#define ATAG_BDADDR 0x43294329 +#define ATAG_BDADDR_SIZE 4 +#define BDADDR_STR_SIZE 18 + +static char bdaddr[BDADDR_STR_SIZE]; + +module_param_string(bdaddr, bdaddr, sizeof(bdaddr), 0400); +MODULE_PARM_DESC(bdaddr, "bluetooth address"); + +static int __init parse_tag_bdaddr(const struct tag *tag) +{ + unsigned char *b = (unsigned char *)&tag->u; + + if (tag->hdr.size != ATAG_BDADDR_SIZE) + return -EINVAL; + + snprintf(bdaddr, BDADDR_STR_SIZE, "%02X:%02X:%02X:%02X:%02X:%02X", + b[0], b[1], b[2], b[3], b[4], b[5]); + + return 0; +} + +__tagtable(ATAG_BDADDR, parse_tag_bdaddr); + +static uint32_t camera_off_gpio_table[] = { + /* CAMERA SUSPEND*/ + PCOM_GPIO_CFG(0, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT0 */ + PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT1 */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* HSYNC */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_UP, GPIO_2MA), /* VSYNC */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_16MA), /* MCLK */ +}; + +static void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +static void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + + +static struct resource msm_camera_resources[] = { + { + .start = MSM_VFE_PHYS, + .end = MSM_VFE_PHYS + MSM_VFE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_VFE, + INT_VFE, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +static void supersonic_maincam_clk_switch(void){ + int rc = 0; + pr_info("SuperSoinc: clk switch (supersonic)(maincam)\n"); + rc = gpio_request(SUPERSONIC_CLK_SWITCH, "maincam"); + if (rc < 0) + pr_err("GPIO (%d) request fail\n", SUPERSONIC_CLK_SWITCH); + else + gpio_direction_output(SUPERSONIC_CLK_SWITCH, 0); + gpio_free(SUPERSONIC_CLK_SWITCH); + + return; +} + +static void supersonic_seccam_clk_switch(void){ + int rc = 0; + pr_info("SuperSoinc: Doing clk switch (supersonic)(2ndcam)\n"); + rc = gpio_request(SUPERSONIC_CLK_SWITCH, "seccam"); + if (rc < 0) + pr_err("GPIO (%d) request fail\n", SUPERSONIC_CLK_SWITCH); + else + gpio_direction_output(SUPERSONIC_CLK_SWITCH, 1); + gpio_free(SUPERSONIC_CLK_SWITCH); + + return; +} + +enum msm_camera_source camera_source; +static void supersonic_set_source(enum msm_camera_source source) +{ + camera_source = source; +} + +enum msm_camera_source supersonic_get_source(void){ + return camera_source; +} + +static int camera_main_probed = 0; +static int supersonic_camera_main_get_probe(void) +{ + return camera_main_probed; +} +static void supersonic_camera_main_set_probe(int probed) +{ + camera_main_probed = probed; +} + +static int camera_sec_probed = 0; +static int supersonic_camera_sec_get_probe(void) +{ + return camera_sec_probed; +} +static void supersonic_camera_sec_set_probe(int probed) +{ + camera_sec_probed = probed; +} + +static struct camera_flash_cfg msm_camera_sensor_flash_cfg = { + .camera_flash = flashlight_control, + .num_flash_levels = FLASHLIGHT_NUM, + .low_temp_limit = 10, + .low_cap_limit = 15, +}; + +/*2nd source for 2nd camera*/ +static struct msm_camera_sensor_info msm_camera_sensor_s5k6aafx_data = { + .sensor_name = "s5k6aafx", + .sensor_reset = SUPERSONIC_MAINCAM_RST, + .sensor_pwd = SUPERSONIC_2NDCAM_PWD, + .camera_clk_switch = supersonic_seccam_clk_switch, + .camera_main_get_probe = supersonic_camera_sec_get_probe, + .camera_main_set_probe = supersonic_camera_sec_get_probe, + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .waked_up = 0, + .need_suspend = 0, +}; + +static struct platform_device msm_camera_sensor_s5k6aafx = { + .name = "msm_camera_s5k6aafx", + .dev = { + .platform_data = &msm_camera_sensor_s5k6aafx_data, + }, +}; + +/*samsung for 2nd source main camera*/ +static struct msm_camera_sensor_info msm_camera_sensor_s5k3h1_data = { + .sensor_name = "s5k3h1gx", + .sensor_reset = SUPERSONIC_MAINCAM_RST, + .sensor_pwd = SUPERSONIC_MAINCAM_PWD, + .camera_clk_switch = supersonic_maincam_clk_switch, + .camera_set_source = supersonic_set_source, + .camera_main_get_probe = supersonic_camera_main_get_probe, + .camera_main_set_probe = supersonic_camera_main_set_probe, + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .flash_cfg = &msm_camera_sensor_flash_cfg, +}; + +static struct platform_device msm_camera_sensor_s5k3h1 = { + .name = "msm_camera_s5k3h1gx", + .dev = { + .platform_data = &msm_camera_sensor_s5k3h1_data, + }, +}; +static struct msm_camera_sensor_info msm_camera_sensor_ov8810_data = { + .sensor_name = "ov8810", + .sensor_reset = SUPERSONIC_MAINCAM_RST, + .sensor_pwd = SUPERSONIC_MAINCAM_PWD, + .camera_clk_switch = supersonic_maincam_clk_switch, + .camera_set_source = supersonic_set_source, + .camera_main_get_probe = supersonic_camera_main_get_probe, + .camera_main_set_probe = supersonic_camera_main_set_probe, + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .waked_up = 0, + .need_suspend = 0, + .flash_cfg = &msm_camera_sensor_flash_cfg, +}; + +static struct platform_device msm_camera_sensor_ov8810 = { + .name = "msm_camera_ov8810", + .dev = { + .platform_data = &msm_camera_sensor_ov8810_data, + }, +}; + +static struct msm_camera_sensor_info msm_camera_sensor_ov9665_data = { + .sensor_name = "ov9665", + .sensor_reset = SUPERSONIC_MAINCAM_RST, + .sensor_pwd = SUPERSONIC_2NDCAM_PWD, + .camera_clk_switch = supersonic_seccam_clk_switch, + .camera_get_source = supersonic_get_source, + .camera_main_get_probe = supersonic_camera_sec_get_probe, + .camera_main_get_probe = supersonic_camera_sec_get_probe, + .pdata = &msm_camera_device_data, + .resource = msm_camera_resources, + .num_resources = ARRAY_SIZE(msm_camera_resources), + .waked_up = 0, + .need_suspend = 0, +}; + +static struct platform_device msm_camera_sensor_ov9665 = { + .name = "msm_camera_ov9665", + .dev = { + .platform_data = &msm_camera_sensor_ov9665_data, + }, +}; +static void config_supersonic_flashlight_gpios(void) +{ + static uint32_t flashlight_gpio_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_GPIO_FLASHLIGHT_TORCH, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_GPIO_FLASHLIGHT_FLASH, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + PCOM_GPIO_CFG(SUPERSONIC_GPIO_FLASHLIGHT_FLASH_ADJ, 0, + GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), + }; + config_gpio_table(flashlight_gpio_table, + ARRAY_SIZE(flashlight_gpio_table)); +} + +static struct flashlight_platform_data supersonic_flashlight_data = { + .gpio_init = config_supersonic_flashlight_gpios, + .torch = SUPERSONIC_GPIO_FLASHLIGHT_TORCH, + .flash = SUPERSONIC_GPIO_FLASHLIGHT_FLASH, + .flash_adj = SUPERSONIC_GPIO_FLASHLIGHT_FLASH_ADJ, + .flash_duration_ms = 600, + .led_count = 1, +}; + +static struct platform_device supersonic_flashlight_device = { + .name = FLASHLIGHT_NAME, + .dev = { + .platform_data = &supersonic_flashlight_data, + }, +}; + +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = -1, + .inject_rx_on_wakeup = 0, + .exit_lpm_cb = bcm_bt_lpm_exit_lpm_locked, +}; + +static struct bcm_bt_lpm_platform_data bcm_bt_lpm_pdata = { + .gpio_wake = SUPERSONIC_GPIO_BT_CHIP_WAKE, + .gpio_host_wake = SUPERSONIC_GPIO_BT_HOST_WAKE, + .request_clock_off_locked = msm_hs_request_clock_off_locked, + .request_clock_on_locked = msm_hs_request_clock_on_locked, +}; + +struct platform_device bcm_bt_lpm_device = { + .name = "bcm_bt_lpm", + .id = 0, + .dev = { + .platform_data = &bcm_bt_lpm_pdata, + }, +}; + +static struct platform_device *devices[] __initdata = { +#ifndef CONFIG_MSM_SERIAL_DEBUGGER + &msm_device_uart1, +#endif + &bcm_bt_lpm_device, + &msm_device_uart_dm1, + &htc_battery_pdev, + &htc_headset_mgr, + &htc_headset_gpio, + &ram_console_device, + &supersonic_rfkill, + &msm_device_smd, + &msm_device_nand, + &msm_device_hsusb, + &usb_mass_storage_device, +#ifdef CONFIG_USB_ANDROID_RNDIS + &rndis_device, +#endif + &android_usb_device, +#ifndef CONFIG_ION_MSM + &android_pmem_mdp_device, +#else + &ion_dev, +#endif + &android_pmem_adsp_device, +// &android_pmem_camera_device, +#ifdef CONFIG_720P_CAMERA + &android_pmem_venc_device, +#endif +#ifdef CONFIG_BUILD_CIQ + &android_pmem_ciq_device, + &android_pmem_ciq1_device, + &android_pmem_ciq2_device, + &android_pmem_ciq3_device, +#endif + &msm_camera_sensor_s5k3h1, + &msm_camera_sensor_ov8810, + &msm_camera_sensor_s5k6aafx, + &msm_kgsl_3d0, + &msm_device_i2c, + &msm_camera_sensor_ov9665, + &supersonic_flashlight_device, + &supersonic_leds, +#if defined(CONFIG_SPI_QSD) + &msm_device_spi, +#endif +}; + +static uint32_t usb_phy_3v3_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_USB_PHY_3V3_ENABLE, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA) +}; + +static uint32_t usb_ID_PIN_input_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_GPIO_USB_ID_PIN, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), +}; + +static uint32_t usb_ID_PIN_ouput_table[] = { + PCOM_GPIO_CFG(SUPERSONIC_GPIO_USB_ID_PIN, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), +}; + +static void config_supersonic_usb_id_gpios(bool output) +{ + if (output){ + config_gpio_table(usb_ID_PIN_ouput_table, ARRAY_SIZE(usb_ID_PIN_ouput_table)); + gpio_set_value(SUPERSONIC_GPIO_USB_ID_PIN, 1); + printk(KERN_INFO "%s %d output high\n", __func__, SUPERSONIC_GPIO_USB_ID_PIN); + }else{ + config_gpio_table(usb_ID_PIN_input_table, ARRAY_SIZE(usb_ID_PIN_input_table)); + printk(KERN_INFO "%s %d input none pull\n", __func__, SUPERSONIC_GPIO_USB_ID_PIN); + } +} + +static struct msm_acpu_clock_platform_data supersonic_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 245000, + .wait_for_irq_khz = 245000, +}; + +int supersonic_init_mmc(int sysrev); + +static int OJ_BMA_power(void) +{ + int ret; + struct vreg *vreg = vreg_get(0, "synt"); + + if (!vreg) { + printk(KERN_ERR "%s: vreg error\n", __func__); + return -EIO; + } + ret = vreg_set_level(vreg, 2850); + + ret = vreg_enable(vreg); + if (ret < 0) + printk(KERN_ERR "%s: vreg enable failed\n", __func__); + + return 0; +} + +unsigned supersonic_get_skuid(void) +{ + return skuid; +} + +static ssize_t supersonic_virtual_keys_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, + __stringify(EV_KEY) ":" __stringify(KEY_HOME) ":43:835:86:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_MENU) ":165:835:100:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_BACK) ":300:835:110:50" + ":" __stringify(EV_KEY) ":" __stringify(KEY_SEARCH) ":425:835:90:50" + "\n"); +} + +static struct kobj_attribute supersonic_virtual_keys_attr = { + .attr = { + .name = "virtualkeys.atmel-touchscreen", + .mode = S_IRUGO, + }, + .show = &supersonic_virtual_keys_show, +}; + +static struct attribute *supersonic_properties_attrs[] = { + &supersonic_virtual_keys_attr.attr, + NULL +}; + +static struct attribute_group supersonic_properties_attr_group = { + .attrs = supersonic_properties_attrs, +}; + +static void supersonic_reset(void) +{ + gpio_set_value(SUPERSONIC_GPIO_PS_HOLD, 0); +} + +/* system_rev == higher 16bits of PCBID +XA -> 0000FFFF -> 0x0000 +XB -> 0101FFFF -> 0x0101 +XC -> 0202FFFF -> 0x0202 +*/ +static void __init supersonic_init(void) +{ + int ret; + struct kobject *properties_kobj; + + printk("supersonic_init() revision=%d\n", system_rev); + + msm_hw_reset_hook = supersonic_reset; + + supersonic_board_serialno_setup(board_serialno()); + + OJ_BMA_power(); + + msm_acpu_clock_init(&supersonic_clock_data); + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + msm_serial_debug_init(MSM_UART1_PHYS, INT_UART1, + &msm_device_uart1.dev, 1, MSM_GPIO_TO_INT(SUPERSONIC_GPIO_UART1_RX)); +#endif + +#ifdef CONFIG_SPI_QSD + msm_device_spi.dev.platform_data = &supersonic_spi_pdata; +#endif + + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; + + config_gpio_table(usb_phy_3v3_table, ARRAY_SIZE(usb_phy_3v3_table)); + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); + gpio_request(SUPERSONIC_GPIO_TP_EN, "tp_en"); + gpio_direction_output(SUPERSONIC_GPIO_TP_EN, 0); + + supersonic_audio_init(); + supersonic_init_panel(); +#ifdef CONFIG_MICROP_COMMON + supersonic_microp_init(); +#endif + + platform_add_devices(devices, ARRAY_SIZE(devices)); + + platform_add_devices(msm_footswitch_devices, + msm_num_footswitch_devices); + + if (!opt_usb_h2w_sw) { + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + config_supersonic_usb_id_gpios(0); + } + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); + + ret = supersonic_init_mmc(system_rev); + if (ret != 0) + pr_crit("%s: Unable to initialize MMC\n", __func__); + + properties_kobj = kobject_create_and_add("board_properties", NULL); + if (properties_kobj) + ret = sysfs_create_group(properties_kobj, + &supersonic_properties_attr_group); + if (!properties_kobj || ret) + pr_err("failed to create board_properties\n"); + + msm_init_pmic_vibrator(); +} + +static void __init supersonic_fixup(struct machine_desc *desc, struct tag *tags, + char **cmdline, struct meminfo *mi) +{ + skuid = parse_tag_skuid((const struct tag *)tags); + printk(KERN_INFO "supersonic_fixup:skuid=0x%x\n", skuid); + /* First Bank 256MB */ + mi->nr_banks = 1; + mi->bank[0].start = PHYS_OFFSET; +// mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); + mi->bank[0].size = MSM_EBI1_BANK0_SIZE; /*(219*1024*1024);*/ + + /* Second Bank 128MB */ + mi->nr_banks++; + mi->bank[1].start = MSM_EBI1_BANK1_BASE; +// mi->bank[1].node = PHYS_TO_NID(MSM_EBI1_BANK1_BASE); + mi->bank[1].size = MSM_EBI1_BANK1_SIZE; +} + +static void __init supersonic_map_io(void) +{ + msm_map_qsd8x50_io(); + msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); + if (socinfo_init() < 0) + printk(KERN_ERR "%s: socinfo_init() failed!\n",__func__); +} + +extern struct sys_timer msm_timer; + +MACHINE_START(SUPERSONIC, "supersonic") +#ifdef CONFIG_MSM_DEBUG_UART + .phys_io = MSM_DEBUG_UART_PHYS, + .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc, +#endif + .boot_params = 0x20000100, + .fixup = supersonic_fixup, + .map_io = supersonic_map_io, + .init_irq = msm_init_irq, + .init_machine = supersonic_init, + .timer = &msm_timer, +MACHINE_END diff --git a/arch/arm/mach-msm/board-supersonic.h b/arch/arm/mach-msm/board-supersonic.h new file mode 100644 index 0000000000000..d71d6c7eb1264 --- /dev/null +++ b/arch/arm/mach-msm/board-supersonic.h @@ -0,0 +1,195 @@ +/* arch/arm/mach-msm/board-supersonic.h + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_SUPERSONIC_H +#define __ARCH_ARM_MACH_MSM_BOARD_SUPERSONIC_H + +#include + +#define MSM_SMI_BASE 0x02B00000 +#define MSM_SMI_SIZE 0x01500000 + +#define MSM_HDMI_FB_BASE 0x02B00000 +#define MSM_HDMI_FB_SIZE 0x00400000 + +#define MSM_PMEM_VENC_BASE 0x02F00000 +#define MSM_PMEM_VENC_SIZE 0x00800000 + +#define MSM_GPU_MEM_BASE 0x03700000 +#define MSM_GPU_MEM_SIZE 0x00500000 + +#define MSM_RAM_CONSOLE_BASE 0x03C00000 +#define MSM_RAM_CONSOLE_SIZE 0x00040000 + +#ifdef CONFIG_BUILD_CIQ +#define MSM_PMEM_CIQ_BASE MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE +#define MSM_PMEM_CIQ_SIZE SZ_64K +#define MSM_PMEM_CIQ1_BASE MSM_PMEM_CIQ_BASE +#define MSM_PMEM_CIQ1_SIZE MSM_PMEM_CIQ_SIZE +#define MSM_PMEM_CIQ2_BASE MSM_PMEM_CIQ_BASE +#define MSM_PMEM_CIQ2_SIZE MSM_PMEM_CIQ_SIZE +#define MSM_PMEM_CIQ3_BASE MSM_PMEM_CIQ_BASE +#define MSM_PMEM_CIQ3_SIZE MSM_PMEM_CIQ_SIZE +#endif + +#define MSM_FB_BASE 0x03D00000 +#define MSM_FB_SIZE 0x00300000 + +#define MSM_EBI1_BANK0_BASE 0x20000000 +//#define MSM_EBI1_BANK0_SIZE 0x0E000000 /* radio < 3210 */ +#define MSM_EBI1_BANK0_SIZE 0x0E800000 /*for radio >=3210 */ + +/* 4Gb/512MB DRAM */ +#define MSM_EBI1_BANK1_BASE 0x30000000 +#define MSM_EBI1_BANK1_SIZE 0x0C000000 + +#define MSM_PMEM_MDP_BASE 0x3C000000 +#define MSM_PMEM_MDP_SIZE 0x02000000 + +#define MSM_PMEM_ADSP_BASE 0x3E000000 +#define MSM_PMEM_ADSP_SIZE 0x02000000 + +#define SUPERSONIC_GPIO_UP_INT_N 35 +#define SUPERSONIC_GPIO_UP_RESET_N 108 + +#define SUPERSONIC_GPIO_TP_RST 34 +#define SUPERSONIC_GPIO_TP_INT_N 38 +#define SUPERSONIC_GPIO_TP_EN 100 /* V_TP3V3_EN */ + +//#define SUPERSONIC_GPIO_POWER_KEY 94 +#define SUPERSONIC_GPIO_SDMC_CD_N 28 + +/* BT */ +#define SUPERSONIC_GPIO_BT_UART1_RTS (43) +#define SUPERSONIC_GPIO_BT_UART1_CTS (44) +#define SUPERSONIC_GPIO_BT_UART1_RX (45) +#define SUPERSONIC_GPIO_BT_UART1_TX (46) +#define SUPERSONIC_GPIO_BT_RESET_N (27) +#define SUPERSONIC_GPIO_BT_SHUTDOWN_N (146) +#define SUPERSONIC_GPIO_BT_HOST_WAKE (86) +#define SUPERSONIC_GPIO_BT_CHIP_WAKE (87) + +#define SUPERSONIC_GPIO_COMPASS_RST_N 107 +#define SUPERSONIC_GPIO_COMPASS_INT_N 36 +#define SUPERSONIC_PROJECT_NAME "supersonic" +#define SUPERSONIC_LAYOUTS { \ + { { 0, 1, 0}, { -1, 0, 0}, {0, 0, 1} }, \ + { { 0, -1, 0}, { -1, 0, 0}, {0, 0, 1} }, \ + { { -1, 0, 0}, { 0, -1, 0}, {0, 0, 1} }, \ + { { 1, 0, 0}, { 0, 0, 1}, {0, 1, 0} } \ + } + +/* Proximity */ +#define SUPERSONIC_GPIO_PROXIMITY_EN_N 109 + +/* Battery */ +#define SUPERSONIC_GPIO_MBAT_IN 39 +#define SUPERSONIC_GPIO_MCHG_EN_N 22 +#define SUPERSONIC_GPIO_ISET 16 + +/*Audio */ +#define SUPERSONIC_AUD_JACKHP_EN 157 +#define SUPERSONIC_AUD_2V5_EN 26 +#define SUPERSONIC_AUD_SPK_EN 129 + +/* Bluetooth PCM */ +#define SUPERSONIC_BT_PCM_OUT 68 +#define SUPERSONIC_BT_PCM_IN 69 +#define SUPERSONIC_BT_PCM_SYNC 70 +#define SUPERSONIC_BT_PCM_CLK 71 + +//#define SUPERSONIC_MENU_KEY 40 +#define SUPERSONIC_VOLUME_UP 41 +#define SUPERSONIC_VOLUME_DOWN 42 +#define SUPERSONIC_POWER_KEY 94 + +/* flash light */ +#define SUPERSONIC_GPIO_FLASHLIGHT_FLASH (84) +#define SUPERSONIC_GPIO_FLASHLIGHT_TORCH (85) +#define SUPERSONIC_GPIO_FLASHLIGHT_FLASH_ADJ (31) + +/* AP Key Led turn on*/ +#define SUPERSONIC_AP_KEY_LED_EN (32) + +/* UART/USB switch : high -> UART, low -> HSUSB */ +#define SUPERSONIC_USB_UARTz_SW 33 +#define SUPERSONIC_WIMAX_CPU_UARTz_SW 160 + +/* USB PHY 3V3 enable*/ +#define SUPERSONIC_USB_PHY_3V3_ENABLE (104) +#define SUPERSONIC_GPIO_USB_CABLE_IN_PIN (82) +#define SUPERSONIC_GPIO_USB_ID_PIN (37) +/* 35mm headset */ +#define SUPERSONIC_GPIO_35MM_HEADSET_DET (153) +#if 0 /* TODO */ +//#define SUPERSONIC_GPIO_H2W_POWER (27) +//#define SUPERSONIC_GPIO_CABLE_IN1 (38) +#define SUPERSONIC_GPIO_CABLE_IN (37) +//#define SUPERSONIC_GPIO_H2W_DATA (139) +//#define SUPERSONIC_GPIO_H2W_CLK (140) +#endif + +/* UART1*/ +#define SUPERSONIC_GPIO_UART1_RX (139) +#define SUPERSONIC_GPIO_UART1_TX (140) + +/* Wifi */ +#define SUPERSONIC_GPIO_WIFI_SHUTDOWN_N 147 +#define SUPERSONIC_GPIO_WIFI_IRQ 152 +/*camera*/ +#define SUPERSONIC_MAINCAM_PWD 105 +#define SUPERSONIC_MAINCAM_RST 99 +#define SUPERSONIC_2NDCAM_PWD 120 +#define SUPERSONIC_CLK_SWITCH 102 + +#define SUPERSONIC_LCD_RST (113) +unsigned supersonic_get_skuid(void); + +/* HDMI */ +#define HDMI_RST (111) +#define V_HDMI_1V2_EN (119) +#define V_VGA_5V_SIL9022A_EN (127) +#define V_HDMI_3V3_EN (128) +#define SUPERSONIC_I2S_CLK (142) +#define SUPERSONIC_I2S_WS (143) +#define SUPERSONIC_I2S_DOUT (145) + +/* LCD RGB */ +#define SUPERSONIC_LCD_R0 (114) +#define SUPERSONIC_LCD_R1 (115) +#define SUPERSONIC_LCD_R2 (116) +#define SUPERSONIC_LCD_R3 (117) +#define SUPERSONIC_LCD_R4 (118) + +#define SUPERSONIC_LCD_G0 (121) +#define SUPERSONIC_LCD_G1 (122) +#define SUPERSONIC_LCD_G2 (123) +#define SUPERSONIC_LCD_G3 (124) +#define SUPERSONIC_LCD_G4 (125) +#define SUPERSONIC_LCD_G5 (126) + +#define SUPERSONIC_LCD_B0 (130) +#define SUPERSONIC_LCD_B1 (131) +#define SUPERSONIC_LCD_B2 (132) +#define SUPERSONIC_LCD_B3 (133) +#define SUPERSONIC_LCD_B4 (134) + +#define SUPERSONIC_LCD_PCLK (135) +#define SUPERSONIC_LCD_VSYNC (136) +#define SUPERSONIC_LCD_HSYNC (137) +#define SUPERSONIC_LCD_DE (138) + +#define SUPERSONIC_GPIO_PS_HOLD (25) + +#endif /* __ARCH_ARM_MACH_MSM_BOARD_SUPERSONIC_H */ diff --git a/arch/arm/mach-msm/board-swordfish-keypad.c b/arch/arm/mach-msm/board-swordfish-keypad.c new file mode 100644 index 0000000000000..f2c2f3962f651 --- /dev/null +++ b/arch/arm/mach-msm/board-swordfish-keypad.c @@ -0,0 +1,177 @@ +/* linux/arch/arm/mach-msm/board-swordfish-keypad.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "board_swordfish." +static int swordfish_ffa; +module_param_named(ffa, swordfish_ffa, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define SCAN_FUNCTION_KEYS 0 /* don't turn this on without updating the ffa support */ + +static unsigned int swordfish_row_gpios[] = { + 31, 32, 33, 34, 35, 41 +#if SCAN_FUNCTION_KEYS + , 42 +#endif +}; + +static unsigned int swordfish_col_gpios[] = { 36, 37, 38, 39, 40 }; + +/* FFA: + 36: KEYSENSE_N(0) + 37: KEYSENSE_N(1) + 38: KEYSENSE_N(2) + 39: KEYSENSE_N(3) + 40: KEYSENSE_N(4) + + 31: KYPD_17 + 32: KYPD_15 + 33: KYPD_13 + 34: KYPD_11 + 35: KYPD_9 + 41: KYPD_MEMO +*/ + +#define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(swordfish_col_gpios) + (col)) + +static const unsigned short swordfish_keymap[ARRAY_SIZE(swordfish_col_gpios) * ARRAY_SIZE(swordfish_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_5, + [KEYMAP_INDEX(0, 1)] = KEY_9, + [KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */ + [KEYMAP_INDEX(0, 3)] = KEY_6, + [KEYMAP_INDEX(0, 4)] = KEY_LEFT, + + [KEYMAP_INDEX(1, 0)] = KEY_0, + [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, + [KEYMAP_INDEX(1, 2)] = KEY_1, + [KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */ + [KEYMAP_INDEX(1, 4)] = KEY_SEND, + + [KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */ + [KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */ + [KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */ + [KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */ + + [KEYMAP_INDEX(3, 0)] = KEY_UP, + [KEYMAP_INDEX(3, 1)] = KEY_CLEAR, + [KEYMAP_INDEX(3, 2)] = KEY_4, + [KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */ + [KEYMAP_INDEX(3, 4)] = KEY_2, + + [KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */ + [KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */ + [KEYMAP_INDEX(4, 2)] = KEY_DOWN, + [KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */ + [KEYMAP_INDEX(4, 4)] = KEY_8, + + [KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ + [KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */ + [KEYMAP_INDEX(5, 3)] = KEY_3, + [KEYMAP_INDEX(5, 4)] = KEY_7, + +#if SCAN_FUNCTION_KEYS + [KEYMAP_INDEX(6, 0)] = KEY_F5, + [KEYMAP_INDEX(6, 1)] = KEY_F4, + [KEYMAP_INDEX(6, 2)] = KEY_F3, + [KEYMAP_INDEX(6, 3)] = KEY_F2, + [KEYMAP_INDEX(6, 4)] = KEY_F1 +#endif +}; + +static const unsigned short swordfish_keymap_ffa[ARRAY_SIZE(swordfish_col_gpios) * ARRAY_SIZE(swordfish_row_gpios)] = { + /*[KEYMAP_INDEX(0, 0)] = ,*/ + /*[KEYMAP_INDEX(0, 1)] = ,*/ + [KEYMAP_INDEX(0, 2)] = KEY_1, + [KEYMAP_INDEX(0, 3)] = KEY_SEND, + [KEYMAP_INDEX(0, 4)] = KEY_LEFT, + + [KEYMAP_INDEX(1, 0)] = KEY_3, + [KEYMAP_INDEX(1, 1)] = KEY_RIGHT, + [KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP, + /*[KEYMAP_INDEX(1, 3)] = ,*/ + [KEYMAP_INDEX(1, 4)] = KEY_6, + + [KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */ + [KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */ + [KEYMAP_INDEX(2, 2)] = KEY_0, + [KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */ + [KEYMAP_INDEX(2, 4)] = KEY_9, + + [KEYMAP_INDEX(3, 0)] = KEY_UP, + [KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */ + [KEYMAP_INDEX(3, 2)] = KEY_4, + /*[KEYMAP_INDEX(3, 3)] = ,*/ + [KEYMAP_INDEX(3, 4)] = KEY_2, + + [KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(4, 1)] = KEY_SOUND, + [KEYMAP_INDEX(4, 2)] = KEY_DOWN, + [KEYMAP_INDEX(4, 3)] = KEY_8, + [KEYMAP_INDEX(4, 4)] = KEY_5, + + /*[KEYMAP_INDEX(5, 0)] = ,*/ + [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */ + [KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */ + [KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */ + [KEYMAP_INDEX(5, 4)] = KEY_7, +}; + +static struct gpio_event_matrix_info swordfish_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = swordfish_keymap, + .output_gpios = swordfish_row_gpios, + .input_gpios = swordfish_col_gpios, + .noutputs = ARRAY_SIZE(swordfish_row_gpios), + .ninputs = ARRAY_SIZE(swordfish_col_gpios), + .settle_time.tv.nsec = 0, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/ +}; + +struct gpio_event_info *swordfish_keypad_info[] = { + &swordfish_matrix_info.info +}; + +static struct gpio_event_platform_data swordfish_keypad_data = { + .name = "swordfish_keypad", + .info = swordfish_keypad_info, + .info_count = ARRAY_SIZE(swordfish_keypad_info) +}; + +static struct platform_device swordfish_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = -1, + .dev = { + .platform_data = &swordfish_keypad_data, + }, +}; + +static int __init swordfish_init_keypad(void) +{ + if (!machine_is_swordfish()) + return 0; + if (swordfish_ffa) + swordfish_matrix_info.keymap = swordfish_keymap_ffa; + return platform_device_register(&swordfish_keypad_device); +} + +device_initcall(swordfish_init_keypad); diff --git a/arch/arm/mach-msm/board-swordfish-mmc.c b/arch/arm/mach-msm/board-swordfish-mmc.c new file mode 100644 index 0000000000000..d4424e3a7ca3a --- /dev/null +++ b/arch/arm/mach-msm/board-swordfish-mmc.c @@ -0,0 +1,244 @@ +/* linux/arch/arm/mach-msm/board-swordfish-mmc.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "proc_comm.h" +#include "devices.h" + +#define FPGA_BASE 0x70000000 +#define FPGA_SDIO_STATUS 0x280 + +static void __iomem *fpga_base; + +#define DEBUG_SWORDFISH_MMC 1 + +extern int msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat, + unsigned int stat_irq, unsigned long stat_irq_flags); + +static unsigned sdc1_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), +}; + +static unsigned sdc2_gpio_table[] = { + PCOM_GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), + PCOM_GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), +}; + +static unsigned sdc3_gpio_table[] = { + PCOM_GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), + PCOM_GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), +}; + +static unsigned sdc4_gpio_table[] = { + PCOM_GPIO_CFG(142, 3, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), + PCOM_GPIO_CFG(143, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(144, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(145, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(146, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), + PCOM_GPIO_CFG(147, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), +}; + +struct sdc_info { + unsigned *table; + unsigned len; +}; + +static struct sdc_info sdcc_gpio_tables[] = { + [0] = { + .table = sdc1_gpio_table, + .len = ARRAY_SIZE(sdc1_gpio_table), + }, + [1] = { + .table = sdc2_gpio_table, + .len = ARRAY_SIZE(sdc2_gpio_table), + }, + [2] = { + .table = sdc3_gpio_table, + .len = ARRAY_SIZE(sdc3_gpio_table), + }, + [3] = { + .table = sdc4_gpio_table, + .len = ARRAY_SIZE(sdc4_gpio_table), + }, +}; + +static int swordfish_sdcc_setup_gpio(int dev_id, unsigned enable) +{ + struct sdc_info *info; + + if (dev_id < 1 || dev_id > 4) + return -1; + + info = &sdcc_gpio_tables[dev_id - 1]; + return config_gpio_table(info->table, info->len, enable); +} + +struct mmc_vdd_xlat { + int mask; + int level; +}; + +static struct mmc_vdd_xlat mmc_vdd_table[] = { + { MMC_VDD_165_195, 1800 }, + { MMC_VDD_20_21, 2050 }, + { MMC_VDD_21_22, 2150 }, + { MMC_VDD_22_23, 2250 }, + { MMC_VDD_23_24, 2350 }, + { MMC_VDD_24_25, 2450 }, + { MMC_VDD_25_26, 2550 }, + { MMC_VDD_26_27, 2650 }, + { MMC_VDD_27_28, 2750 }, + { MMC_VDD_28_29, 2850 }, + { MMC_VDD_29_30, 2950 }, +}; + +static struct vreg *vreg_sdcc; +static unsigned int vreg_sdcc_enabled; +static unsigned int sdcc_vdd = 0xffffffff; + +static uint32_t sdcc_translate_vdd(struct device *dev, unsigned int vdd) +{ + int i; + int rc = 0; + struct platform_device *pdev; + + pdev = container_of(dev, struct platform_device, dev); + BUG_ON(!vreg_sdcc); + + if (vdd == sdcc_vdd) + return 0; + + sdcc_vdd = vdd; + + /* enable/disable the signals to the slot */ + swordfish_sdcc_setup_gpio(pdev->id, !!vdd); + + /* power down */ + if (vdd == 0) { +#if DEBUG_SWORDFISH_MMC + pr_info("%s: disable sdcc power\n", __func__); +#endif + vreg_disable(vreg_sdcc); + vreg_sdcc_enabled = 0; + return 0; + } + + if (!vreg_sdcc_enabled) { + rc = vreg_enable(vreg_sdcc); + if (rc) + pr_err("%s: Error enabling vreg (%d)\n", __func__, rc); + vreg_sdcc_enabled = 1; + } + + for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { + if (mmc_vdd_table[i].mask != (1 << vdd)) + continue; +#if DEBUG_SWORDFISH_MMC + pr_info("%s: Setting level to %u\n", __func__, + mmc_vdd_table[i].level); +#endif + rc = vreg_set_level(vreg_sdcc, mmc_vdd_table[i].level); + if (rc) + pr_err("%s: Error setting vreg level (%d)\n", __func__, rc); + return 0; + } + + pr_err("%s: Invalid VDD %d specified\n", __func__, vdd); + return 0; +} + +static unsigned int swordfish_sdcc_slot_status (struct device *dev) +{ + struct platform_device *pdev; + uint32_t sdcc_stat; + + pdev = container_of(dev, struct platform_device, dev); + + sdcc_stat = readl(fpga_base + FPGA_SDIO_STATUS); + + /* bit 0 - sdcc1 crd_det + * bit 1 - sdcc1 wr_prt + * bit 2 - sdcc2 crd_det + * bit 3 - sdcc2 wr_prt + * etc... + */ + + /* crd_det is active low */ + return !(sdcc_stat & (1 << ((pdev->id - 1) << 1))); +} + +#define SWORDFISH_MMC_VDD (MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \ + | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \ + | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \ + | MMC_VDD_28_29 | MMC_VDD_29_30) + +static struct mmc_platform_data swordfish_sdcc_data = { + .ocr_mask = SWORDFISH_MMC_VDD/*MMC_VDD_27_28 | MMC_VDD_28_29*/, + .status = swordfish_sdcc_slot_status, + .translate_vdd = sdcc_translate_vdd, +}; + +int __init swordfish_init_mmc(void) +{ + vreg_sdcc_enabled = 0; + vreg_sdcc = vreg_get(NULL, "gp5"); + if (IS_ERR(vreg_sdcc)) { + pr_err("%s: vreg get failed (%ld)\n", + __func__, PTR_ERR(vreg_sdcc)); + return PTR_ERR(vreg_sdcc); + } + + fpga_base = ioremap(FPGA_BASE, SZ_4K); + if (!fpga_base) { + pr_err("%s: Can't ioremap FPGA base address (0x%08x)\n", + __func__, FPGA_BASE); + vreg_put(vreg_sdcc); + return -EIO; + } + + msm_add_sdcc(1, &swordfish_sdcc_data, 0, 0); + msm_add_sdcc(4, &swordfish_sdcc_data, 0, 0); + + return 0; +} + diff --git a/arch/arm/mach-msm/board-swordfish-panel.c b/arch/arm/mach-msm/board-swordfish-panel.c new file mode 100644 index 0000000000000..cf5f3f62b767d --- /dev/null +++ b/arch/arm/mach-msm/board-swordfish-panel.c @@ -0,0 +1,116 @@ +/* linux/arch/arm/mach-msm/board-swordfish-panel.c + * + * Copyright (c) 2009 Google Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Dima Zavin + */ + +#include +#include +#include +#include + +#include +#include + +#include + +#include "board-swordfish.h" +#include "devices.h" + +#define CLK_NS_TO_RATE(ns) (1000000000UL / (ns)) + +int swordfish_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + /* TODO: Turn backlight off? */ + return 0; +} + +int swordfish_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + /* TODO: Turn backlight on? */ + return 0; +} + +int swordfish_panel_init(struct msm_lcdc_panel_ops *ops) +{ + return 0; +} + +static struct resource resources_msm_fb[] = { + { + .start = MSM_FB_BASE, + .end = MSM_FB_BASE + MSM_FB_SIZE, + .flags = IORESOURCE_MEM, + }, +}; + +static struct msm_lcdc_timing swordfish_lcdc_timing = { + .clk_rate = CLK_NS_TO_RATE(26), + .hsync_pulse_width = 60, + .hsync_back_porch = 81, + .hsync_front_porch = 81, + .hsync_skew = 0, + .vsync_pulse_width = 2, + .vsync_back_porch = 20, + .vsync_front_porch = 27, + .vsync_act_low = 0, + .hsync_act_low = 0, + .den_act_low = 0, +}; + +static struct msm_fb_data swordfish_lcdc_fb_data = { + .xres = 800, + .yres = 480, + .width = 94, + .height = 57, + .output_format = 0, +}; + +static struct msm_lcdc_panel_ops swordfish_lcdc_panel_ops = { + .init = swordfish_panel_init, + .blank = swordfish_panel_blank, + .unblank = swordfish_panel_unblank, +}; + +static struct msm_lcdc_platform_data swordfish_lcdc_platform_data = { + .panel_ops = &swordfish_lcdc_panel_ops, + .timing = &swordfish_lcdc_timing, + .fb_id = 0, + .fb_data = &swordfish_lcdc_fb_data, + .fb_resource = &resources_msm_fb[0], +}; + +static struct platform_device swordfish_lcdc_device = { + .name = "msm_mdp_lcdc", + .id = -1, + .dev = { + .platform_data = &swordfish_lcdc_platform_data, + }, +}; + +int __init swordfish_init_panel(void) +{ + int rc; + if (!machine_is_swordfish()) + return 0; + + if ((rc = platform_device_register(&msm_device_mdp)) != 0) + return rc; + + if ((rc = platform_device_register(&swordfish_lcdc_device)) != 0) + return rc; + + return 0; +} + +device_initcall(swordfish_init_panel); diff --git a/arch/arm/mach-msm/board-swordfish.c b/arch/arm/mach-msm/board-swordfish.c new file mode 100644 index 0000000000000..12948eb9e1703 --- /dev/null +++ b/arch/arm/mach-msm/board-swordfish.c @@ -0,0 +1,356 @@ +/* linux/arch/arm/mach-msm/board-swordfish.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "board-swordfish.h" +#include "devices.h" +#include "proc_comm.h" + +extern int swordfish_init_mmc(void); + +static struct resource smc91x_resources[] = { + [0] = { + .start = 0x70000300, + .end = 0x70000400, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = MSM_GPIO_TO_INT(156), + .end = MSM_GPIO_TO_INT(156), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device smc91x_device = { + .name = "smc91x", + .id = 0, + .num_resources = ARRAY_SIZE(smc91x_resources), + .resource = smc91x_resources, +}; + +static int swordfish_phy_init_seq[] = { + 0x0C, 0x31, + 0x1D, 0x0D, + 0x1D, 0x10, + -1 +}; + +static void swordfish_usb_phy_reset(void) +{ + u32 id; + int ret; + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_ASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } + + msleep(1); + + id = PCOM_CLKRGM_APPS_RESET_USB_PHY; + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_DEASSERT, &id, NULL); + if (ret) { + pr_err("%s: Cannot assert (%d)\n", __func__, ret); + return; + } +} + +static void swordfish_usb_hw_reset(bool enable) +{ + u32 id; + int ret; + u32 func; + + id = PCOM_CLKRGM_APPS_RESET_USBH; + if (enable) + func = PCOM_CLK_REGIME_SEC_RESET_ASSERT; + else + func = PCOM_CLK_REGIME_SEC_RESET_DEASSERT; + ret = msm_proc_comm(func, &id, NULL); + if (ret) + pr_err("%s: Cannot set reset to %d (%d)\n", __func__, enable, + ret); +} + + +static struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_init_seq = swordfish_phy_init_seq, + .phy_reset = swordfish_usb_phy_reset, + .hw_reset = swordfish_usb_hw_reset, +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "Qualcomm", + .product = "Swordfish", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +static struct resource msm_kgsl_resources[] = { + { + .name = "kgsl_reg_memory", + .start = MSM_GPU_REG_PHYS, + .end = MSM_GPU_REG_PHYS + MSM_GPU_REG_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "kgsl_phys_memory", + .start = MSM_GPU_MEM_BASE, + .end = MSM_GPU_MEM_BASE + MSM_GPU_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device msm_kgsl_device = { + .name = "kgsl", + .id = -1, + .resource = msm_kgsl_resources, + .num_resources = ARRAY_SIZE(msm_kgsl_resources), +}; + +static struct android_pmem_platform_data mdp_pmem_pdata = { + .name = "pmem", + .start = MSM_PMEM_MDP_BASE, + .size = MSM_PMEM_MDP_SIZE, + .no_allocator = 0, + .cached = 1, +}; + +static struct android_pmem_platform_data android_pmem_gpu0_pdata = { + .name = "pmem_gpu0", + .start = MSM_PMEM_GPU0_BASE, + .size = MSM_PMEM_GPU0_SIZE, + .no_allocator = 0, + .cached = 0, +}; + +static struct android_pmem_platform_data android_pmem_gpu1_pdata = { + .name = "pmem_gpu1", + .start = MSM_PMEM_GPU1_BASE, + .size = MSM_PMEM_GPU1_SIZE, + .no_allocator = 0, + .cached = 0, +}; + +static struct android_pmem_platform_data android_pmem_adsp_pdata = { + .name = "pmem_adsp", + .start = MSM_PMEM_ADSP_BASE, + .size = MSM_PMEM_ADSP_SIZE, + .no_allocator = 0, + .cached = 0, +}; + +static struct platform_device android_pmem_mdp_device = { + .name = "android_pmem", + .id = 0, + .dev = { + .platform_data = &mdp_pmem_pdata + }, +}; + +static struct platform_device android_pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { + .platform_data = &android_pmem_adsp_pdata, + }, +}; + +static struct platform_device android_pmem_gpu0_device = { + .name = "android_pmem", + .id = 2, + .dev = { + .platform_data = &android_pmem_gpu0_pdata, + }, +}; + +static struct platform_device android_pmem_gpu1_device = { + .name = "android_pmem", + .id = 3, + .dev = { + .platform_data = &android_pmem_gpu1_pdata, + }, +}; + +static char *usb_functions[] = { "usb_mass_storage" }; +static char *usb_functions_adb[] = { "usb_mass_storage", "adb" }; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0c01, + .num_functions = ARRAY_SIZE(usb_functions), + .functions = usb_functions, + }, + { + .product_id = 0x0c02, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, + }, +}; + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x18d1, + .product_id = 0x0d01, + .version = 0x0100, + .serial_number = "42", + .product_name = "Swordfishdroid", + .manufacturer_name = "Qualcomm", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_adb), + .functions = usb_functions_adb, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +static struct platform_device fish_battery_device = { + .name = "fish_battery", +}; + +static struct msm_ts_platform_data swordfish_ts_pdata = { + .min_x = 296, + .max_x = 3800, + .min_y = 296, + .max_y = 3800, + .min_press = 0, + .max_press = 256, + .inv_x = 4096, + .inv_y = 4096, +}; + +static struct platform_device *devices[] __initdata = { +#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) + &msm_device_uart3, +#endif + &msm_device_smd, + &msm_device_nand, + &msm_device_hsusb, + &usb_mass_storage_device, + &android_usb_device, + &fish_battery_device, + &smc91x_device, + &msm_device_touchscreen, + &android_pmem_mdp_device, + &android_pmem_adsp_device, + &android_pmem_gpu0_device, + &android_pmem_gpu1_device, + &msm_kgsl_device, +}; + +extern struct sys_timer msm_timer; + +static struct msm_acpu_clock_platform_data swordfish_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 128000000, + .wait_for_irq_khz = 128000000, +}; + +void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq); + +static void __init swordfish_init(void) +{ + int rc; + + msm_acpu_clock_init(&swordfish_clock_data); +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3, + &msm_device_uart3.dev, 1); +#endif + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + msm_device_touchscreen.dev.platform_data = &swordfish_ts_pdata; + platform_add_devices(devices, ARRAY_SIZE(devices)); + msm_hsusb_set_vbus_state(1); + rc = swordfish_init_mmc(); + if (rc) + pr_crit("%s: MMC init failure (%d)\n", __func__, rc); +} + +static void __init swordfish_fixup(struct machine_desc *desc, struct tag *tags, + char **cmdline, struct meminfo *mi) +{ + mi->nr_banks = 1; + mi->bank[0].start = PHYS_OFFSET; + mi->bank[0].size = (101*1024*1024); +} + +static void __init swordfish_map_io(void) +{ + msm_map_qsd8x50_io(); + msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); +} + +MACHINE_START(SWORDFISH, "Swordfish Board (QCT SURF8250)") + .boot_params = 0x20000100, + .fixup = swordfish_fixup, + .map_io = swordfish_map_io, + .init_irq = msm_init_irq, + .init_machine = swordfish_init, + .timer = &msm_timer, +MACHINE_END + +MACHINE_START(QSD8X50_FFA, "qsd8x50 FFA Board (QCT FFA8250)") + .boot_params = 0x20000100, + .fixup = swordfish_fixup, + .map_io = swordfish_map_io, + .init_irq = msm_init_irq, + .init_machine = swordfish_init, + .timer = &msm_timer, +MACHINE_END diff --git a/arch/arm/mach-msm/board-swordfish.h b/arch/arm/mach-msm/board-swordfish.h new file mode 100644 index 0000000000000..b9ea54f680dbc --- /dev/null +++ b/arch/arm/mach-msm/board-swordfish.h @@ -0,0 +1,48 @@ +/* arch/arm/mach-msm/board-swordfish.h + * + * Copyright (C) 2009 Google Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef __ARCH_ARM_MACH_MSM_BOARD_SWORDFISH_H +#define __ARCH_ARM_MACH_MSM_BOARD_SWORDFISH_H + +#include + +#define MSM_SMI_BASE 0x02B00000 +#define MSM_SMI_SIZE 0x01500000 + +#define MSM_PMEM_MDP_BASE 0x03000000 +#define MSM_PMEM_MDP_SIZE 0x01000000 + +#define MSM_EBI1_BASE 0x20000000 +#define MSM_EBI1_SIZE 0x0E000000 + +#define MSM_PMEM_ADSP_BASE 0x2A300000 +#define MSM_PMEM_ADSP_SIZE 0x02000000 + +#define MSM_PMEM_GPU1_BASE 0x2C300000 +#define MSM_PMEM_GPU1_SIZE 0x01400000 + +#define MSM_PMEM_GPU0_BASE 0x2D700000 +#define MSM_PMEM_GPU0_SIZE 0x00400000 + +#define MSM_GPU_MEM_BASE 0x2DB00000 +#define MSM_GPU_MEM_SIZE 0x00200000 + +#define MSM_RAM_CONSOLE_BASE 0x2DD00000 +#define MSM_RAM_CONSOLE_SIZE 0x00040000 + +#define MSM_FB_BASE 0x2DE00000 +#define MSM_FB_SIZE 0x00200000 + +#endif /* __ARCH_ARM_MACH_MSM_BOARD_SWORDFISH_H */ diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c index a604ec1e44bf2..4446a922e0b9e 100644 --- a/arch/arm/mach-msm/board-trout-gpio.c +++ b/arch/arm/mach-msm/board-trout-gpio.c @@ -1,164 +1,180 @@ -/* - * linux/arch/arm/mach-msm/gpio.c +/* arch/arm/mach-msm/board-trout-gpio.c * - * Copyright (C) 2005 HP Labs * Copyright (C) 2008 Google, Inc. - * Copyright (C) 2009 Pavel Machek * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * */ #include -#include -#include +#include #include -#include +#include +#include #include +#include +#include + +#include + #include "board-trout.h" +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "board_trout." + +static uint cpld_usb_h2w_sw; +module_param_named(usb_h2w_sw, cpld_usb_h2w_sw, uint, 0); + +static uint8_t trout_cpld_shadow[4] = { +#if defined(CONFIG_MSM_DEBUG_UART1) + /* H2W pins <-> UART1 */ + [0] = 0x40, // for serial debug, low current +#else + /* H2W pins <-> UART3, Bluetooth <-> UART1 */ + [0] = 0x80, // for serial debug, low current +#endif + [1] = 0x04, // I2C_PULL + [3] = 0x04, // mmdi 32k en +}; static uint8_t trout_int_mask[2] = { - [0] = 0xff, /* mask all interrupts */ - [1] = 0xff, + [0] = 0xff, /* mask all interrupts */ + [1] = 0xff, }; static uint8_t trout_sleep_int_mask[] = { - [0] = 0xff, - [1] = 0xff, + [0] = 0xff, + [1] = 0xff, }; +static int trout_suspended; -struct msm_gpio_chip { - struct gpio_chip chip; - void __iomem *reg; /* Base of register bank */ - u8 shadow; -}; - -#define to_msm_gpio_chip(c) container_of(c, struct msm_gpio_chip, chip) - -static int msm_gpiolib_get(struct gpio_chip *chip, unsigned offset) +static int trout_gpio_get(struct gpio_chip *chip, unsigned offset) { - struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip); - unsigned mask = 1 << offset; + unsigned n = chip->base + offset; + uint8_t b; + int reg; + if (n >= TROUT_GPIO_VIRTUAL_BASE) + n += TROUT_GPIO_VIRTUAL_TO_REAL_OFFSET; + b = 1U << (n & 7); + reg = (n & 0x78) >> 2; // assumes base is 128 + return !!(readb(TROUT_CPLD_BASE + reg) & b); +} - return !!(readb(msm_gpio->reg) & mask); +static void update_pwrsink(unsigned gpio, unsigned on) +{ + switch(gpio) { + case TROUT_GPIO_UI_LED_EN: + htc_pwrsink_set(PWRSINK_LED_BUTTON, on ? 100 : 0); + break; + case TROUT_GPIO_QTKEY_LED_EN: + htc_pwrsink_set(PWRSINK_LED_KEYBOARD, on ? 100 : 0); + break; + } } -static void msm_gpiolib_set(struct gpio_chip *chip, unsigned offset, int val) +static uint8_t trout_gpio_write_shadow(unsigned n, unsigned on) { - struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip); - unsigned mask = 1 << offset; + uint8_t b = 1U << (n & 7); + int reg = (n & 0x78) >> 2; // assumes base is 128 - if (val) - msm_gpio->shadow |= mask; + if(on) + return trout_cpld_shadow[reg >> 1] |= b; else - msm_gpio->shadow &= ~mask; - - writeb(msm_gpio->shadow, msm_gpio->reg); + return trout_cpld_shadow[reg >> 1] &= ~b; } -static int msm_gpiolib_direction_input(struct gpio_chip *chip, - unsigned offset) +static void trout_gpio_set(struct gpio_chip *chip, unsigned offset, int on) { - msm_gpiolib_set(chip, offset, 0); - return 0; + unsigned n = chip->base + offset; + int reg = (n & 0x78) >> 2; // assumes base is 128 + unsigned long flags; + uint8_t reg_val; + + if ((reg >> 1) >= ARRAY_SIZE(trout_cpld_shadow)) { + printk(KERN_ERR "trout_gpio_write called on input %d\n", n); + return; + } + + local_irq_save(flags); + update_pwrsink(n, on); + reg_val = trout_gpio_write_shadow(n, on); + writeb(reg_val, TROUT_CPLD_BASE + reg); + local_irq_restore(flags); } -static int msm_gpiolib_direction_output(struct gpio_chip *chip, - unsigned offset, int val) +static int trout_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) { - msm_gpiolib_set(chip, offset, val); + trout_gpio_set(chip, offset, value); return 0; } static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { - struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip); - - return TROUT_GPIO_TO_INT(offset + chip->base); + unsigned gpio = chip->base + offset; + + if ((gpio < TROUT_GPIO_BANK0_FIRST_INT_SOURCE || + gpio > TROUT_GPIO_BANK0_LAST_INT_SOURCE) && + (gpio < TROUT_GPIO_BANK1_FIRST_INT_SOURCE || + gpio > TROUT_GPIO_BANK1_LAST_INT_SOURCE)) + return -ENOENT; + return TROUT_GPIO_TO_INT(gpio); } -#define TROUT_GPIO_BANK(name, reg_num, base_gpio, shadow_val) \ - { \ - .chip = { \ - .label = name, \ - .direction_input = msm_gpiolib_direction_input,\ - .direction_output = msm_gpiolib_direction_output, \ - .get = msm_gpiolib_get, \ - .set = msm_gpiolib_set, \ - .to_irq = trout_gpio_to_irq, \ - .base = base_gpio, \ - .ngpio = 8, \ - }, \ - .reg = (void *) reg_num + TROUT_CPLD_BASE, \ - .shadow = shadow_val, \ - } - -static struct msm_gpio_chip msm_gpio_banks[] = { -#if defined(CONFIG_MSM_DEBUG_UART1) - /* H2W pins <-> UART1 */ - TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x40), -#else - /* H2W pins <-> UART3, Bluetooth <-> UART1 */ - TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x80), -#endif - /* I2C pull */ - TROUT_GPIO_BANK("MISC3", 0x02, TROUT_GPIO_MISC3_BASE, 0x04), - TROUT_GPIO_BANK("MISC4", 0x04, TROUT_GPIO_MISC4_BASE, 0), - /* mmdi 32k en */ - TROUT_GPIO_BANK("MISC5", 0x06, TROUT_GPIO_MISC5_BASE, 0x04), - TROUT_GPIO_BANK("INT2", 0x08, TROUT_GPIO_INT2_BASE, 0), - TROUT_GPIO_BANK("MISC1", 0x0a, TROUT_GPIO_MISC1_BASE, 0), - TROUT_GPIO_BANK("VIRTUAL", 0x12, TROUT_GPIO_VIRTUAL_BASE, 0), -}; - -static void trout_gpio_irq_ack(struct irq_data *d) +static void trout_gpio_irq_ack(unsigned int irq) { - int bank = TROUT_INT_TO_BANK(d->irq); - uint8_t mask = TROUT_INT_TO_MASK(d->irq); + int bank = TROUT_INT_TO_BANK(irq); + uint8_t mask = TROUT_INT_TO_MASK(irq); int reg = TROUT_BANK_TO_STAT_REG(bank); - /*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", d->irq);*/ + /*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", irq);*/ writeb(mask, TROUT_CPLD_BASE + reg); } -static void trout_gpio_irq_mask(struct irq_data *d) +static void trout_gpio_irq_mask(unsigned int irq) { unsigned long flags; uint8_t reg_val; - int bank = TROUT_INT_TO_BANK(d->irq); - uint8_t mask = TROUT_INT_TO_MASK(d->irq); + int bank = TROUT_INT_TO_BANK(irq); + uint8_t mask = TROUT_INT_TO_MASK(irq); int reg = TROUT_BANK_TO_MASK_REG(bank); local_irq_save(flags); reg_val = trout_int_mask[bank] |= mask; /*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n", - d->irq, bank, reg_val);*/ - writeb(reg_val, TROUT_CPLD_BASE + reg); + irq, bank, reg_val);*/ + if (!trout_suspended) + writeb(reg_val, TROUT_CPLD_BASE + reg); local_irq_restore(flags); } -static void trout_gpio_irq_unmask(struct irq_data *d) +static void trout_gpio_irq_unmask(unsigned int irq) { unsigned long flags; uint8_t reg_val; - int bank = TROUT_INT_TO_BANK(d->irq); - uint8_t mask = TROUT_INT_TO_MASK(d->irq); + int bank = TROUT_INT_TO_BANK(irq); + uint8_t mask = TROUT_INT_TO_MASK(irq); int reg = TROUT_BANK_TO_MASK_REG(bank); local_irq_save(flags); reg_val = trout_int_mask[bank] &= ~mask; /*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n", - d->irq, bank, reg_val);*/ - writeb(reg_val, TROUT_CPLD_BASE + reg); + irq, bank, reg_val);*/ + if (!trout_suspended) + writeb(reg_val, TROUT_CPLD_BASE + reg); local_irq_restore(flags); } -int trout_gpio_irq_set_wake(struct irq_data *d, unsigned int on) +int trout_gpio_irq_set_wake(unsigned int irq, unsigned int on) { unsigned long flags; - int bank = TROUT_INT_TO_BANK(d->irq); - uint8_t mask = TROUT_INT_TO_MASK(d->irq); + int bank = TROUT_INT_TO_BANK(irq); + uint8_t mask = TROUT_INT_TO_MASK(irq); local_irq_save(flags); if(on) @@ -198,38 +214,89 @@ static void trout_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) } int_base += TROUT_INT_BANK0_COUNT; } - desc->irq_data.chip->irq_ack(&desc->irq_data); + desc->chip->ack(irq); +} + +static int trout_sysdev_suspend(struct sys_device *dev, pm_message_t state) +{ + trout_suspended = 1; + writeb(trout_sleep_int_mask[0], + TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK0_REG); + writeb(trout_sleep_int_mask[1], + TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK1_REG); + writeb(trout_sleep_int_mask[0], + TROUT_CPLD_BASE + TROUT_GPIO_INT_STAT0_REG); + writeb(trout_sleep_int_mask[1], + TROUT_CPLD_BASE + TROUT_GPIO_INT_STAT1_REG); + return 0; +} + +int trout_sysdev_resume(struct sys_device *dev) +{ + writeb(trout_int_mask[0], TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK0_REG); + writeb(trout_int_mask[1], TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK1_REG); + trout_suspended = 0; + return 0; } static struct irq_chip trout_gpio_irq_chip = { - .name = "troutgpio", - .irq_ack = trout_gpio_irq_ack, - .irq_mask = trout_gpio_irq_mask, - .irq_unmask = trout_gpio_irq_unmask, - .irq_set_wake = trout_gpio_irq_set_wake, + .name = "troutgpio", + .ack = trout_gpio_irq_ack, + .mask = trout_gpio_irq_mask, + .unmask = trout_gpio_irq_unmask, + .set_wake = trout_gpio_irq_set_wake, + //.set_type = trout_gpio_irq_set_type, }; -/* - * Called from the processor-specific init to enable GPIO pin support. - */ -int __init trout_init_gpio(void) +static struct gpio_chip trout_gpio_chip = { + .base = TROUT_GPIO_START, + .ngpio = TROUT_GPIO_END - TROUT_GPIO_START + 1, + .direction_output = trout_gpio_direction_output, + .get = trout_gpio_get, + .set = trout_gpio_set, + .to_irq = trout_gpio_to_irq, +}; + +struct sysdev_class trout_sysdev_class = { + .name = "troutgpio_irq", + .suspend = trout_sysdev_suspend, + .resume = trout_sysdev_resume, +}; + +static struct sys_device trout_irq_device = { + .cls = &trout_sysdev_class, +}; + +static int __init trout_init_gpio(void) { int i; + + if (!machine_is_trout()) + return 0; + + /* adjust GPIOs based on bootloader request */ + pr_info("trout_init_gpio: cpld_usb_hw2_sw = %d\n", cpld_usb_h2w_sw); + trout_gpio_write_shadow(TROUT_GPIO_USB_H2W_SW, cpld_usb_h2w_sw); + + for(i = 0; i < ARRAY_SIZE(trout_cpld_shadow); i++) + writeb(trout_cpld_shadow[i], TROUT_CPLD_BASE + i * 2); + for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) { set_irq_chip(i, &trout_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } - for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++) - gpiochip_add(&msm_gpio_banks[i].chip); + gpiochip_add(&trout_gpio_chip); set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); set_irq_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler); set_irq_wake(MSM_GPIO_TO_INT(17), 1); + if(sysdev_class_register(&trout_sysdev_class) == 0) + sysdev_register(&trout_irq_device); + return 0; } postcore_initcall(trout_init_gpio); - diff --git a/arch/arm/mach-msm/board-trout-keypad.c b/arch/arm/mach-msm/board-trout-keypad.c new file mode 100644 index 0000000000000..0299d0686de9f --- /dev/null +++ b/arch/arm/mach-msm/board-trout-keypad.c @@ -0,0 +1,345 @@ +/* arch/arm/mach-msm/board-trout-keypad.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "board-trout.h" + +static char *keycaps = "--qwerty"; +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "board_trout." +module_param_named(keycaps, keycaps, charp, 0); + + +static unsigned int trout_col_gpios[] = { 35, 34, 33, 32, 31, 23, 30, 78 }; +static unsigned int trout_row_gpios[] = { 42, 41, 40, 39, 38, 37, 36 }; + +#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(trout_row_gpios) + (row)) + +static const unsigned short trout_keymap[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_BACK, + [KEYMAP_INDEX(0, 1)] = KEY_HOME, +// [KEYMAP_INDEX(0, 2)] = KEY_, + [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE, + [KEYMAP_INDEX(0, 4)] = KEY_ENTER, + [KEYMAP_INDEX(0, 5)] = KEY_RIGHTALT, + [KEYMAP_INDEX(0, 6)] = KEY_P, + + [KEYMAP_INDEX(1, 0)] = KEY_MENU, +// [KEYMAP_INDEX(1, 0)] = 229, // SOFT1 + [KEYMAP_INDEX(1, 1)] = KEY_SEND, + [KEYMAP_INDEX(1, 2)] = KEY_END, + [KEYMAP_INDEX(1, 3)] = KEY_LEFTALT, + [KEYMAP_INDEX(1, 4)] = KEY_A, + [KEYMAP_INDEX(1, 5)] = KEY_LEFTSHIFT, + [KEYMAP_INDEX(1, 6)] = KEY_Q, + + [KEYMAP_INDEX(2, 0)] = KEY_U, + [KEYMAP_INDEX(2, 1)] = KEY_7, + [KEYMAP_INDEX(2, 2)] = KEY_K, + [KEYMAP_INDEX(2, 3)] = KEY_J, + [KEYMAP_INDEX(2, 4)] = KEY_M, + [KEYMAP_INDEX(2, 5)] = KEY_SLASH, + [KEYMAP_INDEX(2, 6)] = KEY_8, + + [KEYMAP_INDEX(3, 0)] = KEY_5, + [KEYMAP_INDEX(3, 1)] = KEY_6, + [KEYMAP_INDEX(3, 2)] = KEY_B, + [KEYMAP_INDEX(3, 3)] = KEY_H, + [KEYMAP_INDEX(3, 4)] = KEY_N, + [KEYMAP_INDEX(3, 5)] = KEY_SPACE, + [KEYMAP_INDEX(3, 6)] = KEY_Y, + + [KEYMAP_INDEX(4, 0)] = KEY_4, + [KEYMAP_INDEX(4, 1)] = KEY_R, + [KEYMAP_INDEX(4, 2)] = KEY_V, + [KEYMAP_INDEX(4, 3)] = KEY_G, + [KEYMAP_INDEX(4, 4)] = KEY_C, + //[KEYMAP_INDEX(4, 5)] = KEY_, + [KEYMAP_INDEX(4, 6)] = KEY_T, + + [KEYMAP_INDEX(5, 0)] = KEY_2, + [KEYMAP_INDEX(5, 1)] = KEY_W, + [KEYMAP_INDEX(5, 2)] = KEY_COMPOSE, + [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(5, 4)] = KEY_S, + [KEYMAP_INDEX(5, 5)] = KEY_Z, + [KEYMAP_INDEX(5, 6)] = KEY_1, + + [KEYMAP_INDEX(6, 0)] = KEY_I, + [KEYMAP_INDEX(6, 1)] = KEY_0, + [KEYMAP_INDEX(6, 2)] = KEY_O, + [KEYMAP_INDEX(6, 3)] = KEY_L, + [KEYMAP_INDEX(6, 4)] = KEY_DOT, + [KEYMAP_INDEX(6, 5)] = KEY_COMMA, + [KEYMAP_INDEX(6, 6)] = KEY_9, + + [KEYMAP_INDEX(7, 0)] = KEY_3, + [KEYMAP_INDEX(7, 1)] = KEY_E, + [KEYMAP_INDEX(7, 2)] = KEY_EMAIL, // @ + [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(7, 4)] = KEY_X, + [KEYMAP_INDEX(7, 5)] = KEY_F, + [KEYMAP_INDEX(7, 6)] = KEY_D +}; + +static unsigned int trout_col_gpios_evt2[] = { 35, 34, 33, 32, 31, 23, 30, 109 }; +static unsigned int trout_row_gpios_evt2[] = { 42, 41, 40, 39, 38, 37, 36 }; + +static const unsigned short trout_keymap_evt2_1[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_BACK, + [KEYMAP_INDEX(0, 1)] = KEY_HOME, +// [KEYMAP_INDEX(0, 2)] = KEY_, + [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE, + [KEYMAP_INDEX(0, 4)] = KEY_ENTER, + [KEYMAP_INDEX(0, 5)] = KEY_RIGHTSHIFT, + [KEYMAP_INDEX(0, 6)] = KEY_P, + + [KEYMAP_INDEX(1, 0)] = KEY_MENU, + [KEYMAP_INDEX(1, 1)] = KEY_SEND, +// [KEYMAP_INDEX(1, 2)] = KEY_, + [KEYMAP_INDEX(1, 3)] = KEY_LEFTSHIFT, + [KEYMAP_INDEX(1, 4)] = KEY_A, + [KEYMAP_INDEX(1, 5)] = KEY_COMPOSE, + [KEYMAP_INDEX(1, 6)] = KEY_Q, + + [KEYMAP_INDEX(2, 0)] = KEY_U, + [KEYMAP_INDEX(2, 1)] = KEY_7, + [KEYMAP_INDEX(2, 2)] = KEY_K, + [KEYMAP_INDEX(2, 3)] = KEY_J, + [KEYMAP_INDEX(2, 4)] = KEY_M, + [KEYMAP_INDEX(2, 5)] = KEY_SLASH, + [KEYMAP_INDEX(2, 6)] = KEY_8, + + [KEYMAP_INDEX(3, 0)] = KEY_5, + [KEYMAP_INDEX(3, 1)] = KEY_6, + [KEYMAP_INDEX(3, 2)] = KEY_B, + [KEYMAP_INDEX(3, 3)] = KEY_H, + [KEYMAP_INDEX(3, 4)] = KEY_N, + [KEYMAP_INDEX(3, 5)] = KEY_SPACE, + [KEYMAP_INDEX(3, 6)] = KEY_Y, + + [KEYMAP_INDEX(4, 0)] = KEY_4, + [KEYMAP_INDEX(4, 1)] = KEY_R, + [KEYMAP_INDEX(4, 2)] = KEY_V, + [KEYMAP_INDEX(4, 3)] = KEY_G, + [KEYMAP_INDEX(4, 4)] = KEY_C, +// [KEYMAP_INDEX(4, 5)] = KEY_, + [KEYMAP_INDEX(4, 6)] = KEY_T, + + [KEYMAP_INDEX(5, 0)] = KEY_2, + [KEYMAP_INDEX(5, 1)] = KEY_W, + [KEYMAP_INDEX(5, 2)] = KEY_LEFTALT, + [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(5, 4)] = KEY_S, + [KEYMAP_INDEX(5, 5)] = KEY_Z, + [KEYMAP_INDEX(5, 6)] = KEY_1, + + [KEYMAP_INDEX(6, 0)] = KEY_I, + [KEYMAP_INDEX(6, 1)] = KEY_0, + [KEYMAP_INDEX(6, 2)] = KEY_O, + [KEYMAP_INDEX(6, 3)] = KEY_L, + [KEYMAP_INDEX(6, 4)] = KEY_COMMA, + [KEYMAP_INDEX(6, 5)] = KEY_DOT, + [KEYMAP_INDEX(6, 6)] = KEY_9, + + [KEYMAP_INDEX(7, 0)] = KEY_3, + [KEYMAP_INDEX(7, 1)] = KEY_E, + [KEYMAP_INDEX(7, 2)] = KEY_EMAIL, // @ + [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(7, 4)] = KEY_X, + [KEYMAP_INDEX(7, 5)] = KEY_F, + [KEYMAP_INDEX(7, 6)] = KEY_D +}; + +static const unsigned short trout_keymap_evt2_2[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = { + [KEYMAP_INDEX(0, 0)] = KEY_BACK, + [KEYMAP_INDEX(0, 1)] = KEY_HOME, +// [KEYMAP_INDEX(0, 2)] = KEY_, + [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE, + [KEYMAP_INDEX(0, 4)] = KEY_ENTER, + [KEYMAP_INDEX(0, 5)] = KEY_RIGHTSHIFT, + [KEYMAP_INDEX(0, 6)] = KEY_P, + + [KEYMAP_INDEX(1, 0)] = KEY_MENU, /* external menu key */ + [KEYMAP_INDEX(1, 1)] = KEY_SEND, +// [KEYMAP_INDEX(1, 2)] = KEY_, + [KEYMAP_INDEX(1, 3)] = KEY_LEFTSHIFT, + [KEYMAP_INDEX(1, 4)] = KEY_A, + [KEYMAP_INDEX(1, 5)] = KEY_F1, /* qwerty menu key */ + [KEYMAP_INDEX(1, 6)] = KEY_Q, + + [KEYMAP_INDEX(2, 0)] = KEY_U, + [KEYMAP_INDEX(2, 1)] = KEY_7, + [KEYMAP_INDEX(2, 2)] = KEY_K, + [KEYMAP_INDEX(2, 3)] = KEY_J, + [KEYMAP_INDEX(2, 4)] = KEY_M, + [KEYMAP_INDEX(2, 5)] = KEY_DOT, + [KEYMAP_INDEX(2, 6)] = KEY_8, + + [KEYMAP_INDEX(3, 0)] = KEY_5, + [KEYMAP_INDEX(3, 1)] = KEY_6, + [KEYMAP_INDEX(3, 2)] = KEY_B, + [KEYMAP_INDEX(3, 3)] = KEY_H, + [KEYMAP_INDEX(3, 4)] = KEY_N, + [KEYMAP_INDEX(3, 5)] = KEY_SPACE, + [KEYMAP_INDEX(3, 6)] = KEY_Y, + + [KEYMAP_INDEX(4, 0)] = KEY_4, + [KEYMAP_INDEX(4, 1)] = KEY_R, + [KEYMAP_INDEX(4, 2)] = KEY_V, + [KEYMAP_INDEX(4, 3)] = KEY_G, + [KEYMAP_INDEX(4, 4)] = KEY_C, + [KEYMAP_INDEX(4, 5)] = KEY_EMAIL, // @ + [KEYMAP_INDEX(4, 6)] = KEY_T, + + [KEYMAP_INDEX(5, 0)] = KEY_2, + [KEYMAP_INDEX(5, 1)] = KEY_W, + [KEYMAP_INDEX(5, 2)] = KEY_LEFTALT, + [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP, + [KEYMAP_INDEX(5, 4)] = KEY_S, + [KEYMAP_INDEX(5, 5)] = KEY_Z, + [KEYMAP_INDEX(5, 6)] = KEY_1, + + [KEYMAP_INDEX(6, 0)] = KEY_I, + [KEYMAP_INDEX(6, 1)] = KEY_0, + [KEYMAP_INDEX(6, 2)] = KEY_O, + [KEYMAP_INDEX(6, 3)] = KEY_L, + [KEYMAP_INDEX(6, 4)] = KEY_COMMA, + [KEYMAP_INDEX(6, 5)] = KEY_RIGHTALT, + [KEYMAP_INDEX(6, 6)] = KEY_9, + + [KEYMAP_INDEX(7, 0)] = KEY_3, + [KEYMAP_INDEX(7, 1)] = KEY_E, + [KEYMAP_INDEX(7, 2)] = KEY_COMPOSE, + [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN, + [KEYMAP_INDEX(7, 4)] = KEY_X, + [KEYMAP_INDEX(7, 5)] = KEY_F, + [KEYMAP_INDEX(7, 6)] = KEY_D +}; + +static struct gpio_event_matrix_info trout_keypad_matrix_info = { + .info.func = gpio_event_matrix_func, + .keymap = trout_keymap, + .output_gpios = trout_col_gpios, + .input_gpios = trout_row_gpios, + .noutputs = ARRAY_SIZE(trout_col_gpios), + .ninputs = ARRAY_SIZE(trout_row_gpios), + .settle_time.tv.nsec = 40 * NSEC_PER_USEC, + .poll_time.tv.nsec = 20 * NSEC_PER_MSEC, + .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_REMOVE_PHANTOM_KEYS |GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/ +}; + +static struct gpio_event_direct_entry trout_keypad_nav_map[] = { + { TROUT_POWER_KEY, KEY_POWER }, + { TROUT_GPIO_CAM_BTN_STEP1_N, KEY_CAMERA-1 }, //steal KEY_HP + { TROUT_GPIO_CAM_BTN_STEP2_N, KEY_CAMERA }, +}; + +static struct gpio_event_direct_entry trout_keypad_nav_map_evt2[] = { + { TROUT_POWER_KEY, KEY_END }, + { TROUT_GPIO_CAM_BTN_STEP1_N, KEY_CAMERA-1 }, //steal KEY_HP + { TROUT_GPIO_CAM_BTN_STEP2_N, KEY_CAMERA }, +}; + +static struct gpio_event_input_info trout_keypad_nav_info = { + .info.func = gpio_event_input_func, + .flags = 0, + .type = EV_KEY, + .keymap = trout_keypad_nav_map, + .keymap_size = ARRAY_SIZE(trout_keypad_nav_map) +}; + +static struct gpio_event_direct_entry trout_keypad_switch_map[] = { + { TROUT_GPIO_SLIDING_DET, SW_LID } +}; + +static struct gpio_event_input_info trout_keypad_switch_info = { + .info.func = gpio_event_input_func, + .flags = 0, + .type = EV_SW, + .keymap = trout_keypad_switch_map, + .keymap_size = ARRAY_SIZE(trout_keypad_switch_map) +}; + +static struct gpio_event_info *trout_keypad_info[] = { + &trout_keypad_matrix_info.info, + &trout_keypad_nav_info.info, + &trout_keypad_switch_info.info, +}; + +static struct gpio_event_platform_data trout_keypad_data = { + .name = "trout-keypad", + .info = trout_keypad_info, + .info_count = ARRAY_SIZE(trout_keypad_info) +}; + +static struct platform_device trout_keypad_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 0, + .dev = { + .platform_data = &trout_keypad_data, + }, +}; + +static int __init trout_init_keypad(void) +{ + if (!machine_is_trout()) + return 0; + + switch (system_rev) { + case 0: + /* legacy default keylayout */ + break; + case 1: + /* v1 has a new keyboard layout */ + trout_keypad_matrix_info.keymap = trout_keymap_evt2_1; + trout_keypad_matrix_info.output_gpios = trout_col_gpios_evt2; + trout_keypad_matrix_info.input_gpios = trout_row_gpios_evt2; + + /* v1 has new direct keys */ + trout_keypad_nav_info.keymap = trout_keypad_nav_map_evt2; + trout_keypad_nav_info.keymap_size = ARRAY_SIZE(trout_keypad_nav_map_evt2); + + /* userspace needs to know about these changes as well */ + trout_keypad_data.name = "trout-keypad-v2"; + break; + default: /* 2, 3, 4 currently */ + /* v2 has a new keyboard layout */ + trout_keypad_matrix_info.keymap = trout_keymap_evt2_2; + trout_keypad_matrix_info.output_gpios = trout_col_gpios_evt2; + trout_keypad_matrix_info.input_gpios = trout_row_gpios_evt2; + + /* v2 has new direct keys */ + trout_keypad_nav_info.keymap = trout_keypad_nav_map_evt2; + trout_keypad_nav_info.keymap_size = ARRAY_SIZE(trout_keypad_nav_map_evt2); + + /* userspace needs to know about these changes as well */ + if (!strcmp(keycaps, "qwertz")) { + trout_keypad_data.name = "trout-keypad-qwertz"; + } else { + trout_keypad_data.name = "trout-keypad-v3"; + } + break; + } + return platform_device_register(&trout_keypad_device); +} + +device_initcall(trout_init_keypad); + diff --git a/arch/arm/mach-msm/board-trout-mmc.c b/arch/arm/mach-msm/board-trout-mmc.c index 44be8464657b3..3fe9c00ae0d10 100644 --- a/arch/arm/mach-msm/board-trout-mmc.c +++ b/arch/arm/mach-msm/board-trout-mmc.c @@ -15,6 +15,7 @@ #include #include +#include #include @@ -166,16 +167,158 @@ static struct msm_mmc_platform_data trout_sdslot_data = { .translate_vdd = trout_sdslot_switchvdd, }; +/* ---- WIFI ---- */ + +static uint32_t wifi_on_gpio_table[] = { + PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ + PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ + PCOM_GPIO_CFG(29, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static uint32_t wifi_off_gpio_table[] = { + PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ + PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ + PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ + PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ + PCOM_GPIO_CFG(29, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */ +}; + +static struct vreg *vreg_wifi_osc; /* WIFI 32khz oscilator */ +static int trout_wifi_cd = 0; /* WIFI virtual 'card detect' status */ + +static struct sdio_embedded_func wifi_func = { + .f_class = SDIO_CLASS_WLAN, + .f_maxblksize = 512, +}; + +static struct embedded_sdio_data trout_wifi_emb_data = { + .cis = { + .vendor = 0x104c, + .device = 0x9066, + .blksize = 512, + /*.max_dtr = 24000000, Max of chip - no worky on Trout */ + .max_dtr = 20000000, + }, + .cccr = { + .multi_block = 0, + .low_speed = 0, + .wide_bus = 1, + .high_power = 0, + .high_speed = 0, + }, + .funcs = &wifi_func, + .num_funcs = 1, +}; + +static void (*wifi_status_cb)(int card_present, void *dev_id); +static void *wifi_status_cb_devid; + +static int trout_wifi_status_register(void (*callback)(int card_present, void *dev_id), void *dev_id) +{ + if (wifi_status_cb) + return -EAGAIN; + wifi_status_cb = callback; + wifi_status_cb_devid = dev_id; + return 0; +} + +static unsigned int trout_wifi_status(struct device *dev) +{ + return trout_wifi_cd; +} + +int trout_wifi_set_carddetect(int val) +{ + printk("%s: %d\n", __func__, val); + trout_wifi_cd = val; + if (wifi_status_cb) { + wifi_status_cb(val, wifi_status_cb_devid); + } else + printk(KERN_WARNING "%s: Nobody to notify\n", __func__); + return 0; +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(trout_wifi_set_carddetect); +#endif + +static int trout_wifi_power_state; + +int trout_wifi_power(int on) +{ + int rc; + + printk("%s: %d\n", __func__, on); + + if (on) { + config_gpio_table(wifi_on_gpio_table, + ARRAY_SIZE(wifi_on_gpio_table)); + rc = vreg_enable(vreg_wifi_osc); + if (rc) + return rc; + htc_pwrsink_set(PWRSINK_WIFI, 70); + } else { + config_gpio_table(wifi_off_gpio_table, + ARRAY_SIZE(wifi_off_gpio_table)); + htc_pwrsink_set(PWRSINK_WIFI, 0); + } + gpio_set_value( TROUT_GPIO_MAC_32K_EN, on); + mdelay(100); + gpio_set_value( TROUT_GPIO_WIFI_EN, on); + mdelay(100); + if (!on) { + vreg_disable(vreg_wifi_osc); + } + trout_wifi_power_state = on; + return 0; +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(trout_wifi_power); +#endif + +static int trout_wifi_reset_state; +int trout_wifi_reset(int on) +{ + printk("%s: %d\n", __func__, on); + gpio_set_value( TROUT_GPIO_WIFI_PA_RESETX, !on ); + trout_wifi_reset_state = on; + mdelay(50); + return 0; +} +#ifndef CONFIG_WIFI_CONTROL_FUNC +EXPORT_SYMBOL(trout_wifi_reset); +#endif + +static struct msm_mmc_platform_data trout_wifi_data = { + .ocr_mask = MMC_VDD_28_29, + .status = trout_wifi_status, + .register_status_notify = trout_wifi_status_register, + .embedded_sdio = &trout_wifi_emb_data, +}; + int __init trout_init_mmc(unsigned int sys_rev) { + wifi_status_cb = NULL; + sdslot_vreg_enabled = 0; vreg_sdslot = vreg_get(0, "gp6"); if (IS_ERR(vreg_sdslot)) return PTR_ERR(vreg_sdslot); + vreg_wifi_osc = vreg_get(0, "mmc"); + if (IS_ERR(vreg_wifi_osc)) + return PTR_ERR(vreg_wifi_osc); + set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); + msm_add_sdcc(1, &trout_wifi_data, 0, 0); + if (!opt_disable_sdcard) msm_add_sdcc(2, &trout_sdslot_data, TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 0); @@ -184,3 +327,110 @@ int __init trout_init_mmc(unsigned int sys_rev) return 0; } +#if defined(CONFIG_DEBUG_FS) +static int troutmmc_dbg_wifi_reset_set(void *data, u64 val) +{ + trout_wifi_reset((int) val); + return 0; +} + +static int troutmmc_dbg_wifi_reset_get(void *data, u64 *val) +{ + *val = trout_wifi_reset_state; + return 0; +} + +static int troutmmc_dbg_wifi_cd_set(void *data, u64 val) +{ + trout_wifi_set_carddetect((int) val); + return 0; +} + +static int troutmmc_dbg_wifi_cd_get(void *data, u64 *val) +{ + *val = trout_wifi_cd; + return 0; +} + +static int troutmmc_dbg_wifi_pwr_set(void *data, u64 val) +{ + trout_wifi_power((int) val); + return 0; +} + +static int troutmmc_dbg_wifi_pwr_get(void *data, u64 *val) +{ + + *val = trout_wifi_power_state; + return 0; +} + +static int troutmmc_dbg_sd_pwr_set(void *data, u64 val) +{ + trout_sdslot_switchvdd(NULL, (unsigned int) val); + return 0; +} + +static int troutmmc_dbg_sd_pwr_get(void *data, u64 *val) +{ + *val = sdslot_vdd; + return 0; +} + +static int troutmmc_dbg_sd_cd_set(void *data, u64 val) +{ + return -ENOSYS; +} + +static int troutmmc_dbg_sd_cd_get(void *data, u64 *val) +{ + *val = trout_sdslot_status(NULL); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_reset_fops, + troutmmc_dbg_wifi_reset_get, + troutmmc_dbg_wifi_reset_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_cd_fops, + troutmmc_dbg_wifi_cd_get, + troutmmc_dbg_wifi_cd_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_pwr_fops, + troutmmc_dbg_wifi_pwr_get, + troutmmc_dbg_wifi_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_sd_pwr_fops, + troutmmc_dbg_sd_pwr_get, + troutmmc_dbg_sd_pwr_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_sd_cd_fops, + troutmmc_dbg_sd_cd_get, + troutmmc_dbg_sd_cd_set, "%llu\n"); + +static int __init troutmmc_dbg_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("troutmmc_dbg", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("wifi_reset", 0644, dent, NULL, + &troutmmc_dbg_wifi_reset_fops); + debugfs_create_file("wifi_cd", 0644, dent, NULL, + &troutmmc_dbg_wifi_cd_fops); + debugfs_create_file("wifi_pwr", 0644, dent, NULL, + &troutmmc_dbg_wifi_pwr_fops); + + debugfs_create_file("sd_pwr", 0644, dent, NULL, + &troutmmc_dbg_sd_pwr_fops); + debugfs_create_file("sd_cd", 0644, dent, NULL, + &troutmmc_dbg_sd_cd_fops); + + return 0; +} + +device_initcall(troutmmc_dbg_init); + +#endif diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c index 729bb49a44caf..7e978b88cb13c 100644 --- a/arch/arm/mach-msm/board-trout-panel.c +++ b/arch/arm/mach-msm/board-trout-panel.c @@ -16,12 +16,76 @@ #include #include +#include #include "board-trout.h" #include "proc_comm.h" #include "devices.h" #define TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS 255 +#define VSYNC_GPIO 97 + +static struct clk *gp_clk; +static int trout_backlight_off; +static int trout_backlight_brightness = TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS; +static int trout_new_backlight = 1; +static uint8_t trout_backlight_last_level = 33; +static DEFINE_MUTEX(trout_backlight_lock); + +static void trout_set_backlight_level(uint8_t level) +{ + unsigned percent = ((int)level * 100) / 255; + + if (trout_new_backlight) { + unsigned long flags; + int i = 0; + level = (int)level * 34 / 256; + + if (trout_backlight_last_level == level) + return; + + if (level == 0) { + gpio_set_value(27, 0); + msleep(2); + } else { + local_irq_save(flags); + if (trout_backlight_last_level == 0) { + gpio_set_value(27, 1); + udelay(40); + trout_backlight_last_level = 33; + } + i = (trout_backlight_last_level - level + 33) % 33; + while (i-- > 0) { + gpio_set_value(27, 0); + udelay(1); + gpio_set_value(27, 1); + udelay(1); + } + local_irq_restore(flags); + } + trout_backlight_last_level = level; + } + else { + if(level) { + clk_enable(gp_clk); + writel((1U << 16) | (~level & 0xffff), + MSM_CLK_CTL_BASE + 0x58); + /* Going directly to a 100% duty cycle does not + * seem to work */ + if(level == 255) { + writel((~127 << 16) | 0xb20, + MSM_CLK_CTL_BASE + 0x5c); + udelay(1); + } + writel((~127 << 16) | 0xb58, MSM_CLK_CTL_BASE + 0x5c); + } + else { + writel(0x0, MSM_CLK_CTL_BASE + 0x5c); + clk_disable(gp_clk); + } + } + htc_pwrsink_set(PWRSINK_BACKLIGHT, percent); +} #define MDDI_CLIENT_CORE_BASE 0x108000 #define LCD_CONTROL_BLOCK_BASE 0x110000 @@ -147,7 +211,9 @@ static struct mddi_table mddi_toshiba_init_table[] = { { DPRUN, 0x00000001 }, { 1, 14 }, /* msleep 14 */ { SYSCKENA, 0x00000001 }, + //{ CLKENB, 0x000000EF }, { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */ + //{ CLKENB, 0x000025CB }, /* Clock enable register */ { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */ { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */ @@ -166,20 +232,136 @@ static struct mddi_table mddi_toshiba_init_table[] = { { DRAMPWR, 0x00000001 }, /* eDRAM power */ }; -#define GPIOSEL_VWAKEINT (1U << 0) -#define INTMASK_VWAKEOUT (1U << 0) +static struct mddi_table mddi_toshiba_panel_init_table[] = { + { SRST, 0x00000003 }, /* FIFO/LCDC not reset */ + { PORT_ENB, 0x00000001 }, /* Enable sync. Port */ + { START, 0x00000000 }, /* To stop operation */ + //{ START, 0x00000001 }, /* To start operation */ + { PORT, 0x00000004 }, /* Polarity of VS/HS/DE. */ + { CMN, 0x00000000 }, + { GAMMA, 0x00000000 }, /* No Gamma correction */ + { INTFLG, 0x00000000 }, /* VSYNC interrupt flag clear/status */ + { INTMSK, 0x00000000 }, /* VSYNC interrupt mask is off. */ + { MPLFBUF, 0x00000000 }, /* Select frame buffer's base address. */ + { HDE_LEFT, 0x00000000 }, /* The value of HDE_LEFT. */ + { VDE_TOP, 0x00000000 }, /* The value of VDE_TPO. */ + { PXL, 0x00000001 }, /* 1. RGB666 */ + /* 2. Data is valid from 1st frame of beginning. */ + { HDE_START, 0x00000006 }, /* HDE_START= 14 PCLK */ + { HDE_SIZE, 0x0000009F }, /* HDE_SIZE=320 PCLK */ + { HSW, 0x00000004 }, /* HSW= 10 PCLK */ + { VSW, 0x00000001 }, /* VSW=2 HCYCLE */ + { VDE_START, 0x00000003 }, /* VDE_START=4 HCYCLE */ + { VDE_SIZE, 0x000001DF }, /* VDE_SIZE=480 HCYCLE */ + { WAKEUP, 0x000001e2 }, /* Wakeup position in VSYNC mode. */ + { WSYN_DLY, 0x00000000 }, /* Wakeup position in VSIN mode. */ + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { CLKENB, 0x000025CB }, /* Clock enable register */ + + { SSICTL, 0x00000170 }, /* SSI control register */ + { SSITIME, 0x00000250 }, /* SSI timing control register */ + { SSICTL, 0x00000172 }, /* SSI control register */ +}; -static struct clk *gp_clk; -static int trout_new_backlight = 1; -static struct vreg *vreg_mddi_1v5; -static struct vreg *vreg_lcm_2v85; +static struct mddi_table mddi_sharp_init_table[] = { + { VCYCLE, 0x000001eb }, + { HCYCLE, 0x000000ae }, + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { 1, 1 }, /* msleep 1 */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { 1, 10 }, /* msleep 10 */ + SPI_WRITE(0x5f, 0x01) + SPI_WRITE1(0x11) + { 1, 200 }, /* msleep 200 */ + SPI_WRITE1(0x29) + SPI_WRITE1(0xde) + { START, 0x00000001 }, /* To start operation */ +}; + +static struct mddi_table mddi_sharp_deinit_table[] = { + { 1, 200 }, /* msleep 200 */ + SPI_WRITE(0x10, 0x1) + { 1, 100 }, /* msleep 100 */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { 1, 10 }, /* msleep 10 */ +}; + +static struct mddi_table mddi_tpo_init_table[] = { + { VCYCLE, 0x000001e5 }, + { HCYCLE, 0x000000ac }, + { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */ + { 0, 20 }, /* udelay 20 */ + { GPIODATA, 0x00000004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { 0, 20 }, /* udelay 20 */ + + SPI_WRITE(0x08, 0x01) + { 0, 500 }, /* udelay 500 */ + SPI_WRITE(0x08, 0x00) + SPI_WRITE(0x02, 0x00) + SPI_WRITE(0x03, 0x04) + SPI_WRITE(0x04, 0x0e) + SPI_WRITE(0x09, 0x02) + SPI_WRITE(0x0b, 0x08) + SPI_WRITE(0x0c, 0x53) + SPI_WRITE(0x0d, 0x01) + SPI_WRITE(0x0e, 0xe0) + SPI_WRITE(0x0f, 0x01) + SPI_WRITE(0x10, 0x58) + SPI_WRITE(0x20, 0x1e) + SPI_WRITE(0x21, 0x0a) + SPI_WRITE(0x22, 0x0a) + SPI_WRITE(0x23, 0x1e) + SPI_WRITE(0x25, 0x32) + SPI_WRITE(0x26, 0x00) + SPI_WRITE(0x27, 0xac) + SPI_WRITE(0x29, 0x06) + SPI_WRITE(0x2a, 0xa4) + SPI_WRITE(0x2b, 0x45) + SPI_WRITE(0x2c, 0x45) + SPI_WRITE(0x2d, 0x15) + SPI_WRITE(0x2e, 0x5a) + SPI_WRITE(0x2f, 0xff) + SPI_WRITE(0x30, 0x6b) + SPI_WRITE(0x31, 0x0d) + SPI_WRITE(0x32, 0x48) + SPI_WRITE(0x33, 0x82) + SPI_WRITE(0x34, 0xbd) + SPI_WRITE(0x35, 0xe7) + SPI_WRITE(0x36, 0x18) + SPI_WRITE(0x37, 0x94) + SPI_WRITE(0x38, 0x01) + SPI_WRITE(0x39, 0x5d) + SPI_WRITE(0x3a, 0xae) + SPI_WRITE(0x3b, 0xff) + SPI_WRITE(0x07, 0x09) + { 0, 10 }, /* udelay 10 */ + { START, 0x00000001 }, /* To start operation */ +}; + +static struct mddi_table mddi_tpo_deinit_table[] = { + SPI_WRITE(0x07, 0x19) + { START, 0x00000000 }, /* To stop operation */ + { GPIODATA, 0x00040004 }, /* GPIO2 high */ + { GPIODIR, 0x00000004 }, /* GPIO2 out */ + { GPIODATA, 0x00040000 }, /* GPIO2 low */ + { 0, 5 }, /* usleep 5 */ +}; + + +#define GPIOSEL_VWAKEINT (1U << 0) +#define INTMASK_VWAKEOUT (1U << 0) static void trout_process_mddi_table(struct msm_mddi_client_data *client_data, struct mddi_table *table, size_t count) { int i; - for (i = 0; i < count; i++) { + for(i = 0; i < count; i++) { uint32_t reg = table[i].reg; uint32_t value = table[i].value; @@ -192,6 +374,45 @@ static void trout_process_mddi_table(struct msm_mddi_client_data *client_data, } } +static struct vreg *vreg_mddi_1v5; +static struct vreg *vreg_lcm_2v85; + +static void trout_mddi_power_client(struct msm_mddi_client_data *client_data, + int on) +{ + unsigned id, on_off; + if(on) { + on_off = 0; + id = PM_VREG_PDOWN_MDDI_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_mddi_1v5); + mdelay(5); // delay time >5ms and <10ms + gpio_set_value(V_VDDE2E_VDD2_GPIO, 1); + gpio_set_value(TROUT_GPIO_MDDI_32K_EN, 1); + msleep(3); + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + vreg_enable(vreg_lcm_2v85); + msleep(3); + gpio_set_value(MDDI_RST_N, 1); + msleep(10); + } else { + gpio_set_value(TROUT_GPIO_MDDI_32K_EN, 0); + gpio_set_value(MDDI_RST_N, 0); + msleep(10); + vreg_disable(vreg_lcm_2v85); + on_off = 1; + id = PM_VREG_PDOWN_AUX_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + msleep(5); + gpio_set_value(V_VDDE2E_VDD2_GPIO, 0); + msleep(200); + vreg_disable(vreg_mddi_1v5); + id = PM_VREG_PDOWN_MDDI_ID; + msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id); + } +} + static int trout_mddi_toshiba_client_init( struct msm_mddi_bridge_platform_data *bridge_data, struct msm_mddi_client_data *client_data) @@ -204,7 +425,7 @@ static int trout_mddi_toshiba_client_init( client_data->auto_hibernate(client_data, 1); panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; if (panel_id > 1) { - printk(KERN_WARNING "unknown panel id at mddi_enable\n"); + printk("unknown panel id at mddi_enable\n"); return -1; } return 0; @@ -217,6 +438,117 @@ static int trout_mddi_toshiba_client_uninit( return 0; } +static int trout_mddi_panel_unblank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + + int panel_id, ret = 0; + + trout_set_backlight_level(0); + client_data->auto_hibernate(client_data, 0); + trout_process_mddi_table(client_data, mddi_toshiba_panel_init_table, + ARRAY_SIZE(mddi_toshiba_panel_init_table)); + panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; + switch(panel_id) { + case 0: + printk("init sharp panel\n"); + trout_process_mddi_table(client_data, + mddi_sharp_init_table, + ARRAY_SIZE(mddi_sharp_init_table)); + break; + case 1: + printk("init tpo panel\n"); + trout_process_mddi_table(client_data, + mddi_tpo_init_table, + ARRAY_SIZE(mddi_tpo_init_table)); + break; + default: + printk("unknown panel_id: %d\n", panel_id); + ret = -1; + }; + mutex_lock(&trout_backlight_lock); + trout_set_backlight_level(trout_backlight_brightness); + trout_backlight_off = 0; + mutex_unlock(&trout_backlight_lock); + client_data->auto_hibernate(client_data, 1); + client_data->remote_write(client_data, GPIOSEL_VWAKEINT, GPIOSEL); + client_data->remote_write(client_data, INTMASK_VWAKEOUT, INTMASK); + return ret; + +} + +static int trout_mddi_panel_blank( + struct msm_mddi_bridge_platform_data *bridge_data, + struct msm_mddi_client_data *client_data) +{ + int panel_id, ret = 0; + + panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3; + client_data->auto_hibernate(client_data, 0); + switch(panel_id) { + case 0: + printk("deinit sharp panel\n"); + trout_process_mddi_table(client_data, + mddi_sharp_deinit_table, + ARRAY_SIZE(mddi_sharp_deinit_table)); + break; + case 1: + printk("deinit tpo panel\n"); + trout_process_mddi_table(client_data, + mddi_tpo_deinit_table, + ARRAY_SIZE(mddi_tpo_deinit_table)); + break; + default: + printk("unknown panel_id: %d\n", panel_id); + ret = -1; + }; + client_data->auto_hibernate(client_data, 1); + mutex_lock(&trout_backlight_lock); + trout_set_backlight_level(0); + trout_backlight_off = 1; + mutex_unlock(&trout_backlight_lock); + client_data->remote_write(client_data, 0, SYSCLKENA); + client_data->remote_write(client_data, 1, DPSUS); + return ret; +} + +static void trout_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) +{ + mutex_lock(&trout_backlight_lock); + trout_backlight_brightness = value; + if(!trout_backlight_off) + trout_set_backlight_level(trout_backlight_brightness); + mutex_unlock(&trout_backlight_lock); +} + +static struct led_classdev trout_backlight_led = { + .name = "lcd-backlight", + .brightness = TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS, + .brightness_set = trout_brightness_set, +}; + +static int trout_backlight_probe(struct platform_device *pdev) +{ + led_classdev_register(&pdev->dev, &trout_backlight_led); + return 0; +} + +static int trout_backlight_remove(struct platform_device *pdev) +{ + led_classdev_unregister(&trout_backlight_led); + return 0; +} + +static struct platform_driver trout_backlight_driver = { + .probe = trout_backlight_probe, + .remove = trout_backlight_remove, + .driver = { + .name = "trout-backlight", + .owner = THIS_MODULE, + }, +}; + static struct resource resources_msm_fb[] = { { .start = MSM_FB_BASE, @@ -228,6 +560,8 @@ static struct resource resources_msm_fb[] = { struct msm_mddi_bridge_platform_data toshiba_client_data = { .init = trout_mddi_toshiba_client_init, .uninit = trout_mddi_toshiba_client_uninit, + .blank = trout_mddi_panel_blank, + .unblank = trout_mddi_panel_unblank, .fb_data = { .xres = 320, .yres = 480, @@ -239,25 +573,33 @@ struct msm_mddi_bridge_platform_data toshiba_client_data = { static struct msm_mddi_platform_data mddi_pdata = { .clk_rate = 122880000, + .power_client = trout_mddi_power_client, + .vsync_irq = MSM_GPIO_TO_INT(VSYNC_GPIO), .fb_resource = resources_msm_fb, .num_clients = 1, .client_platform_data = { { .product_id = (0xd263 << 16 | 0), .name = "mddi_c_d263_0000", + //.name = "mddi_c_dummy", .id = 0, .client_data = &toshiba_client_data, + //.client_data = &toshiba_client_data.fb_data, .clk_rate = 0, }, }, }; +static struct platform_device trout_backlight = { + .name = "trout-backlight", +}; + int __init trout_init_panel(void) { int rc; - if (!machine_is_trout()) - return 0; + if (!machine_is_trout()) + return 0; vreg_mddi_1v5 = vreg_get(0, "gp2"); if (IS_ERR(vreg_mddi_1v5)) return PTR_ERR(vreg_mddi_1v5); @@ -270,7 +612,8 @@ int __init trout_init_panel(void) uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA); msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); - } else { + } + else { uint32_t config = PCOM_GPIO_CFG(27, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA); msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0); @@ -287,11 +630,21 @@ int __init trout_init_panel(void) "failed\n"); } + rc = gpio_request(VSYNC_GPIO, "vsync"); + if (rc) + return rc; + rc = gpio_direction_input(VSYNC_GPIO); + if (rc) + return rc; rc = platform_device_register(&msm_device_mdp); if (rc) return rc; msm_device_mddi0.dev.platform_data = &mddi_pdata; - return platform_device_register(&msm_device_mddi0); + rc = platform_device_register(&msm_device_mddi0); + if (rc) + return rc; + platform_device_register(&trout_backlight); + return platform_driver_register(&trout_backlight_driver); } device_initcall(trout_init_panel); diff --git a/arch/arm/mach-msm/board-trout-rfkill.c b/arch/arm/mach-msm/board-trout-rfkill.c new file mode 100644 index 0000000000000..e68eb2ae4c517 --- /dev/null +++ b/arch/arm/mach-msm/board-trout-rfkill.c @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2008 Google, Inc. + * Author: Nick Pelly + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* Control bluetooth power for trout platform */ + +#include +#include +#include +#include +#include +#include + +#include "board-trout.h" + +static struct rfkill *bt_rfk; +static const char bt_name[] = "brf6300"; + +static int bluetooth_set_power(void *data, bool blocked) +{ + if (!blocked) { + gpio_set_value(TROUT_GPIO_BT_32K_EN, 1); + udelay(10); + gpio_direction_output(101, 1); + } else { + gpio_direction_output(101, 0); + gpio_set_value(TROUT_GPIO_BT_32K_EN, 0); + } + return 0; +} + +static struct rfkill_ops trout_rfkill_ops = { + .set_block = bluetooth_set_power, +}; + +static int trout_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + bool default_state = true; /* off */ + + bluetooth_set_power(NULL, default_state); + + bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, + &trout_rfkill_ops, NULL); + if (!bt_rfk) + return -ENOMEM; + + rfkill_set_states(bt_rfk, default_state, false); + + /* userspace cannot take exclusive control */ + + rc = rfkill_register(bt_rfk); + + if (rc) + rfkill_destroy(bt_rfk); + return rc; +} + +static int trout_rfkill_remove(struct platform_device *dev) +{ + rfkill_unregister(bt_rfk); + rfkill_destroy(bt_rfk); + + return 0; +} + +static struct platform_driver trout_rfkill_driver = { + .probe = trout_rfkill_probe, + .remove = trout_rfkill_remove, + .driver = { + .name = "trout_rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init trout_rfkill_init(void) +{ + return platform_driver_register(&trout_rfkill_driver); +} + +static void __exit trout_rfkill_exit(void) +{ + platform_driver_unregister(&trout_rfkill_driver); +} + +module_init(trout_rfkill_init); +module_exit(trout_rfkill_exit); +MODULE_DESCRIPTION("trout rfkill"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/board-trout-wifi.c b/arch/arm/mach-msm/board-trout-wifi.c new file mode 100644 index 0000000000000..51b26a4053695 --- /dev/null +++ b/arch/arm/mach-msm/board-trout-wifi.c @@ -0,0 +1,74 @@ +/* arch/arm/mach-msm/board-trout-wifi.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Dmitry Shmidt + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifdef CONFIG_WIFI_CONTROL_FUNC +#include +#include +#include +#include +#include +#include + +extern int trout_wifi_set_carddetect(int val); +extern int trout_wifi_power(int on); +extern int trout_wifi_reset(int on); + +#ifdef CONFIG_WIFI_MEM_PREALLOC +typedef struct wifi_mem_prealloc_struct { + void *mem_ptr; + unsigned long size; +} wifi_mem_prealloc_t; + +static wifi_mem_prealloc_t wifi_mem_array[WMPA_NUMBER_OF_SECTIONS] = { + { NULL, (WMPA_SECTION_SIZE_0 + WMPA_SECTION_HEADER) }, + { NULL, (WMPA_SECTION_SIZE_1 + WMPA_SECTION_HEADER) }, + { NULL, (WMPA_SECTION_SIZE_2 + WMPA_SECTION_HEADER) } +}; + +static void *trout_wifi_mem_prealloc(int section, unsigned long size) +{ + if( (section < 0) || (section >= WMPA_NUMBER_OF_SECTIONS) ) + return NULL; + if( wifi_mem_array[section].size < size ) + return NULL; + return wifi_mem_array[section].mem_ptr; +} + +int __init trout_init_wifi_mem( void ) +{ + int i; + + for(i=0;( i < WMPA_NUMBER_OF_SECTIONS );i++) { + wifi_mem_array[i].mem_ptr = vmalloc(wifi_mem_array[i].size); + if( wifi_mem_array[i].mem_ptr == NULL ) + return -ENOMEM; + } + return 0; +} +#endif + +struct wifi_platform_data trout_wifi_control = { + .set_power = trout_wifi_power, + .set_reset = trout_wifi_reset, + .set_carddetect = trout_wifi_set_carddetect, +#ifdef CONFIG_WIFI_MEM_PREALLOC + .mem_prealloc = trout_wifi_mem_prealloc, +#else + .mem_prealloc = NULL, +#endif +}; + +#endif diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c index 73f1460665421..ddff77eae5263 100644 --- a/arch/arm/mach-msm/board-trout.c +++ b/arch/arm/mach-msm/board-trout.c @@ -1,7 +1,6 @@ -/* linux/arch/arm/mach-msm/board-trout.c +/* arch/arm/mach-msm/board-trout.c * - * Copyright (C) 2009 Google, Inc. - * Author: Brian Swetland + * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -17,56 +16,805 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include <../../../drivers/staging/android/timed_gpio.h> +#include +#include +#include +#include +#include + +#include +#include #include #include #include +#include +#include +#include +#include + +#include +#include #include +#include +#include +#include + +#include +#include +#include + +#include "board-trout.h" + #include -#include -#include +#include +#include +#include +#include +#ifdef CONFIG_HTC_HEADSET +#include +#endif +#ifdef CONFIG_WIFI_CONTROL_FUNC +#include +#endif +#include "proc_comm.h" #include "devices.h" -#include "board-trout.h" + +void msm_init_irq(void); +void msm_init_gpio(void); extern int trout_init_mmc(unsigned int); +#ifdef CONFIG_WIFI_CONTROL_FUNC +#ifdef CONFIG_WIFI_MEM_PREALLOC +extern int trout_init_wifi_mem(void); +#endif +extern struct wifi_platform_data trout_wifi_control; +#endif + +struct trout_axis_info { + struct gpio_event_axis_info info; + uint16_t in_state; + uint16_t out_state; +}; +static bool nav_just_on; +static int nav_on_jiffies; + +uint16_t trout_axis_map(struct gpio_event_axis_info *info, uint16_t in) +{ + struct trout_axis_info *ai = container_of(info, struct trout_axis_info, info); + uint16_t out = ai->out_state; + + if (nav_just_on) { + if (jiffies == nav_on_jiffies || jiffies == nav_on_jiffies + 1) + goto ignore; + nav_just_on = 0; + } + if((ai->in_state ^ in) & 1) + out--; + if((ai->in_state ^ in) & 2) + out++; + ai->out_state = out; +ignore: + ai->in_state = in; + return out; +} + +int trout_nav_power(const struct gpio_event_platform_data *pdata, bool on) +{ + gpio_set_value(TROUT_GPIO_JOG_EN, on); + if (on) { + nav_just_on = 1; + nav_on_jiffies = jiffies; + } + return 0; +} + +static uint32_t trout_4_x_axis_gpios[] = { + TROUT_4_BALL_LEFT_0, TROUT_4_BALL_RIGHT_0 +}; +static uint32_t trout_5_x_axis_gpios[] = { + TROUT_5_BALL_LEFT_0, TROUT_5_BALL_RIGHT_0 +}; + +static struct trout_axis_info trout_x_axis = { + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(trout_5_x_axis_gpios), + .type = EV_REL, + .code = REL_X, + .decoded_size = 1U << ARRAY_SIZE(trout_5_x_axis_gpios), + .map = trout_axis_map, + .gpio = trout_5_x_axis_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */ + } +}; + +static uint32_t trout_4_y_axis_gpios[] = { + TROUT_4_BALL_UP_0, TROUT_4_BALL_DOWN_0 +}; +static uint32_t trout_5_y_axis_gpios[] = { + TROUT_5_BALL_UP_0, TROUT_5_BALL_DOWN_0 +}; + +static struct trout_axis_info trout_y_axis = { + .info = { + .info.func = gpio_event_axis_func, + .count = ARRAY_SIZE(trout_5_y_axis_gpios), + .type = EV_REL, + .code = REL_Y, + .decoded_size = 1U << ARRAY_SIZE(trout_5_y_axis_gpios), + .map = trout_axis_map, + .gpio = trout_5_y_axis_gpios, + .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */ + } +}; + +static struct gpio_event_direct_entry trout_nav_buttons[] = { + { TROUT_GPIO_NAVI_ACT_N, BTN_MOUSE } +}; + +static struct gpio_event_input_info trout_nav_button_info = { + .info.func = gpio_event_input_func, + .flags = 0, + .type = EV_KEY, + .keymap = trout_nav_buttons, + .keymap_size = ARRAY_SIZE(trout_nav_buttons) +}; + +static struct gpio_event_info *trout_nav_info[] = { + &trout_x_axis.info.info, + &trout_y_axis.info.info, + &trout_nav_button_info.info +}; + +static struct gpio_event_platform_data trout_nav_data = { + .name = "trout-nav", + .info = trout_nav_info, + .info_count = ARRAY_SIZE(trout_nav_info), + .power = trout_nav_power, +}; + +static struct platform_device trout_nav_device = { + .name = GPIO_EVENT_DEV_NAME, + .id = 2, + .dev = { + .platform_data = &trout_nav_data, + }, +}; + +static int trout_reset_keys_up[] = { + BTN_MOUSE, + 0 +}; + +static struct keyreset_platform_data trout_reset_keys_pdata = { + .keys_up = trout_reset_keys_up, + .keys_down = { + KEY_SEND, + KEY_MENU, + KEY_END, + 0 + }, +}; + +struct platform_device trout_reset_keys_device = { + .name = KEYRESET_NAME, + .dev.platform_data = &trout_reset_keys_pdata, +}; + +static int trout_ts_power(int on) +{ + int tp_ls_gpio = system_rev < 5 ? TROUT_4_TP_LS_EN : TROUT_5_TP_LS_EN; + if (on) { + gpio_set_value(TROUT_GPIO_TP_I2C_PULL, 1); + gpio_set_value(TROUT_GPIO_TP_EN, 1); + /* touchscreen must be powered before we enable i2c pullup */ + msleep(2); + /* enable touch panel level shift */ + gpio_set_value(tp_ls_gpio, 1); + msleep(2); + } + else { + gpio_set_value(tp_ls_gpio, 0); + udelay(50); + gpio_set_value(TROUT_GPIO_TP_EN, 0); + gpio_set_value(TROUT_GPIO_TP_I2C_PULL, 0); + } + return 0; +} + +static struct synaptics_i2c_rmi_platform_data trout_ts_data[] = { + { + .version = 0x010c, + .power = trout_ts_power, + .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE, + .inactive_left = -100 * 0x10000 / 4334, + .inactive_right = -100 * 0x10000 / 4334, + .inactive_top = -40 * 0x10000 / 6696, + .inactive_bottom = -40 * 0x10000 / 6696, + .snap_left_on = 300 * 0x10000 / 4334, + .snap_left_off = 310 * 0x10000 / 4334, + .snap_right_on = 300 * 0x10000 / 4334, + .snap_right_off = 310 * 0x10000 / 4334, + .snap_top_on = 100 * 0x10000 / 6696, + .snap_top_off = 110 * 0x10000 / 6696, + .snap_bottom_on = 100 * 0x10000 / 6696, + .snap_bottom_off = 110 * 0x10000 / 6696, + }, + { + .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE, + .inactive_left = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334, + .inactive_right = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334, + .inactive_top = ((6946 - 6696) / 2) * 0x10000 / 6696, + .inactive_bottom = ((6946 - 6696) / 2) * 0x10000 / 6696, + } +}; + +static struct akm8976_platform_data compass_platform_data = { + .reset = TROUT_GPIO_COMPASS_RST_N, + .clk_on = TROUT_GPIO_COMPASS_32K_EN, + .intr = TROUT_GPIO_COMPASS_IRQ, +}; + +static struct i2c_board_info i2c_devices[] = { + { + I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x20), + .platform_data = trout_ts_data, + .irq = TROUT_GPIO_TO_INT(TROUT_GPIO_TP_ATT_N) + }, + { + I2C_BOARD_INFO("elan-touch", 0x10), + .irq = TROUT_GPIO_TO_INT(TROUT_GPIO_TP_ATT_N), + }, + { + I2C_BOARD_INFO("akm8976", 0x1C), + .platform_data = &compass_platform_data, + .irq = TROUT_GPIO_TO_INT(TROUT_GPIO_COMPASS_IRQ), + }, + { + I2C_BOARD_INFO("pca963x", 0x62), + }, +#if defined(CONFIG_MSM_CAMERA) && defined(CONFIG_MT9T013) + { + I2C_BOARD_INFO("mt9t013", 0x6C), + }, +#endif +#ifdef CONFIG_SENSORS_MT9T013 + { + I2C_BOARD_INFO("mt9t013", 0x6C >> 1), + }, +#endif +}; + +static struct timed_gpio timed_gpios[] = { + { + .name = "vibrator", + .gpio = TROUT_GPIO_HAPTIC_PWM, + .max_timeout = 15000, + }, + { + .name = "flash", + .gpio = TROUT_GPIO_FLASH_EN, + .max_timeout = 400, + }, +}; + +static struct timed_gpio_platform_data timed_gpio_data = { + .num_gpios = ARRAY_SIZE(timed_gpios), + .gpios = timed_gpios, +}; + +static struct platform_device android_timed_gpios = { + .name = "timed-gpio", + .id = -1, + .dev = { + .platform_data = &timed_gpio_data, + }, +}; + +static struct gpio_led android_led_list[] = { + { + .name = "spotlight", + .gpio = TROUT_GPIO_SPOTLIGHT_EN, + }, + { + .name = "keyboard-backlight", + .gpio = TROUT_GPIO_QTKEY_LED_EN, + }, + { + .name = "button-backlight", + .gpio = TROUT_GPIO_UI_LED_EN, + }, +}; + +static struct gpio_led_platform_data android_leds_data = { + .num_leds = ARRAY_SIZE(android_led_list), + .leds = android_led_list, +}; + +static struct platform_device android_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &android_leds_data, + }, +}; + +static struct gpio_switch_platform_data sd_door_switch_data = { + .name = "sd-door", + .gpio = TROUT_GPIO_SD_DOOR_N, + .state_on = "open", + .state_off = "closed", +}; + +static struct platform_device sd_door_switch = { + .name = "switch-gpio", + .id = -1, + .dev = { + .platform_data = &sd_door_switch_data, + }, +}; + +#ifdef CONFIG_HTC_HEADSET +static void h2w_config_cpld(int route) +{ + switch (route) { + case H2W_UART3: + gpio_set_value(TROUT_GPIO_H2W_SEL0, 0); + gpio_set_value(TROUT_GPIO_H2W_SEL1, 1); + break; + case H2W_GPIO: + gpio_set_value(TROUT_GPIO_H2W_SEL0, 0); + gpio_set_value(TROUT_GPIO_H2W_SEL1, 0); + break; + } +} + +static void h2w_init_cpld(void) +{ + h2w_config_cpld(H2W_UART3); + gpio_set_value(TROUT_GPIO_H2W_CLK_DIR, 0); + gpio_set_value(TROUT_GPIO_H2W_DAT_DIR, 0); +} + +static struct h2w_platform_data trout_h2w_data = { + .cable_in1 = TROUT_GPIO_CABLE_IN1, + .cable_in2 = TROUT_GPIO_CABLE_IN2, + .h2w_clk = TROUT_GPIO_H2W_CLK_GPI, + .h2w_data = TROUT_GPIO_H2W_DAT_GPI, + .debug_uart = H2W_UART3, + .config_cpld = h2w_config_cpld, + .init_cpld = h2w_init_cpld, +}; + +static struct platform_device trout_h2w = { + .name = "h2w", + .id = -1, + .dev = { + .platform_data = &trout_h2w_data, + }, +}; +#endif + +static void trout_phy_reset(void) +{ + gpio_set_value(TROUT_GPIO_USB_PHY_RST_N, 0); + mdelay(10); + gpio_set_value(TROUT_GPIO_USB_PHY_RST_N, 1); + mdelay(10); +} + +static void config_camera_on_gpios(void); +static void config_camera_off_gpios(void); + +#ifdef CONFIG_MSM_CAMERA +static struct msm_camera_device_platform_data msm_camera_device_data = { + .camera_gpio_on = config_camera_on_gpios, + .camera_gpio_off = config_camera_off_gpios, + .ioext.mdcphy = MSM_MDC_PHYS, + .ioext.mdcsz = MSM_MDC_SIZE, + .ioext.appphy = MSM_CLK_CTL_PHYS, + .ioext.appsz = MSM_CLK_CTL_SIZE, +}; + +#ifdef CONFIG_MT9T013 +static struct msm_camera_sensor_info msm_camera_sensor_mt9t013_data = { + .sensor_name = "mt9t013", + .sensor_reset = 108, + .sensor_pwd = 85, + .vcm_pwd = TROUT_GPIO_VCM_PWDN, + .pdata = &msm_camera_device_data, +}; + +static struct platform_device msm_camera_sensor_mt9t013 = { + .name = "msm_camera_mt9t013", + .dev = { + .platform_data = &msm_camera_sensor_mt9t013_data, + }, +}; +#endif +#endif + +#ifdef CONFIG_SENSORS_MT9T013 +static struct msm_camera_legacy_device_platform_data msm_camera_device_mt9t013 = { + .sensor_reset = 108, + .sensor_pwd = 85, + .vcm_pwd = TROUT_GPIO_VCM_PWDN, + .config_gpio_on = config_camera_on_gpios, + .config_gpio_off = config_camera_off_gpios, +}; + +static struct platform_device trout_camera = { + .name = "camera", + .dev = { + .platform_data = &msm_camera_device_mt9t013, + }, +}; +#endif + +static struct pwr_sink trout_pwrsink_table[] = { + { + .id = PWRSINK_AUDIO, + .ua_max = 90000, + }, + { + .id = PWRSINK_BACKLIGHT, + .ua_max = 128000, + }, + { + .id = PWRSINK_LED_BUTTON, + .ua_max = 17000, + }, + { + .id = PWRSINK_LED_KEYBOARD, + .ua_max = 22000, + }, + { + .id = PWRSINK_GP_CLK, + .ua_max = 30000, + }, + { + .id = PWRSINK_BLUETOOTH, + .ua_max = 15000, + }, + { + .id = PWRSINK_CAMERA, + .ua_max = 0, + }, + { + .id = PWRSINK_SDCARD, + .ua_max = 0, + }, + { + .id = PWRSINK_VIDEO, + .ua_max = 0, + }, + { + .id = PWRSINK_WIFI, + .ua_max = 200000, + }, + { + .id = PWRSINK_SYSTEM_LOAD, + .ua_max = 100000, + .percent_util = 38, + }, +}; + +static struct pwr_sink_platform_data trout_pwrsink_data = { + .num_sinks = ARRAY_SIZE(trout_pwrsink_table), + .sinks = trout_pwrsink_table, + .suspend_late = NULL, + .resume_early = NULL, + .suspend_early = NULL, + .resume_late = NULL, +}; + +static struct platform_device trout_pwr_sink = { + .name = "htc_pwrsink", + .id = -1, + .dev = { + .platform_data = &trout_pwrsink_data, + }, +}; + +static struct platform_device trout_rfkill = { + .name = "trout_rfkill", + .id = -1, +}; + +static struct msm_pmem_setting pmem_setting = { + .pmem_start = MSM_PMEM_MDP_BASE, + .pmem_size = MSM_PMEM_MDP_SIZE, + .pmem_adsp_start = MSM_PMEM_ADSP_BASE, + .pmem_adsp_size = MSM_PMEM_ADSP_SIZE, + .pmem_gpu0_start = MSM_PMEM_GPU0_BASE, + .pmem_gpu0_size = MSM_PMEM_GPU0_SIZE, + .pmem_gpu1_start = MSM_PMEM_GPU1_BASE, + .pmem_gpu1_size = MSM_PMEM_GPU1_SIZE, + .pmem_camera_start = MSM_PMEM_CAMERA_BASE, + .pmem_camera_size = MSM_PMEM_CAMERA_SIZE, + .ram_console_start = MSM_RAM_CONSOLE_BASE, + .ram_console_size = MSM_RAM_CONSOLE_SIZE, +}; + +#ifdef CONFIG_WIFI_CONTROL_FUNC +static struct platform_device trout_wifi = { + .name = "msm_wifi", + .id = 1, + .num_resources = 0, + .resource = NULL, + .dev = { + .platform_data = &trout_wifi_control, + }, +}; +#endif + +#define SND(num, desc) { .name = desc, .id = num } +static struct snd_endpoint snd_endpoints_list[] = { + SND(0, "HANDSET"), + SND(1, "SPEAKER"), + SND(2, "HEADSET"), + SND(3, "BT"), + SND(44, "BT_EC_OFF"), + SND(10, "HEADSET_AND_SPEAKER"), + SND(256, "CURRENT"), + + /* Bluetooth accessories. */ + + SND(12, "HTC BH S100"), + SND(13, "HTC BH M100"), + SND(14, "Motorola H500"), + SND(15, "Nokia HS-36W"), + SND(16, "PLT 510v.D"), + SND(17, "M2500 by Plantronics"), + SND(18, "Nokia HDW-3"), + SND(19, "HBH-608"), + SND(20, "HBH-DS970"), + SND(21, "i.Tech BlueBAND"), + SND(22, "Nokia BH-800"), + SND(23, "Motorola H700"), + SND(24, "HTC BH M200"), + SND(25, "Jabra JX10"), + SND(26, "320Plantronics"), + SND(27, "640Plantronics"), + SND(28, "Jabra BT500"), + SND(29, "Motorola HT820"), + SND(30, "HBH-IV840"), + SND(31, "6XXPlantronics"), + SND(32, "3XXPlantronics"), + SND(33, "HBH-PV710"), + SND(34, "Motorola H670"), + SND(35, "HBM-300"), + SND(36, "Nokia BH-208"), + SND(37, "Samsung WEP410"), + SND(38, "Jabra BT8010"), + SND(39, "Motorola S9"), + SND(40, "Jabra BT620s"), + SND(41, "Nokia BH-902"), + SND(42, "HBH-DS220"), + SND(43, "HBH-DS980"), +}; +#undef SND + +static struct msm_snd_endpoints trout_snd_endpoints = { + .endpoints = snd_endpoints_list, + .num = ARRAY_SIZE(snd_endpoints_list), +}; + +static struct platform_device trout_snd = { + .name = "msm_snd", + .id = -1, + .dev = { + .platform_data = &trout_snd_endpoints, + }, +}; static struct platform_device *devices[] __initdata = { - &msm_device_uart3, &msm_device_smd, &msm_device_nand, - &msm_device_hsusb, &msm_device_i2c, + &msm_device_uart1, +#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) && !defined(CONFIG_TROUT_H2W) + &msm_device_uart3, +#endif +#ifdef CONFIG_SERIAL_MSM_HS + &msm_device_uart_dm1, +#endif + &trout_nav_device, + &trout_reset_keys_device, + &android_leds, + &sd_door_switch, + &android_timed_gpios, +#ifdef CONFIG_MT9T013 + &msm_camera_sensor_mt9t013, +#endif +#ifdef CONFIG_SENSORS_MT9T013 + &trout_camera, +#endif + &trout_rfkill, +#ifdef CONFIG_WIFI_CONTROL_FUNC + &trout_wifi, +#endif +#ifdef CONFIG_HTC_HEADSET + &trout_h2w, +#endif +#ifdef CONFIG_HTC_PWRSINK + &trout_pwr_sink, +#endif + &trout_snd, }; extern struct sys_timer msm_timer; static void __init trout_init_irq(void) { + printk("trout_init_irq()\n"); msm_init_irq(); } -static void __init trout_fixup(struct machine_desc *desc, struct tag *tags, - char **cmdline, struct meminfo *mi) +static uint opt_disable_uart3; + +module_param_named(disable_uart3, opt_disable_uart3, uint, 0); + +static void trout_reset(void) { - mi->nr_banks = 1; - mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].size = (101*1024*1024); + gpio_set_value(TROUT_GPIO_PS_HOLD, 0); +} + +static uint32_t gpio_table[] = { + /* BLUETOOTH */ +#ifdef CONFIG_SERIAL_MSM_HS + PCOM_GPIO_CFG(43, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */ + PCOM_GPIO_CFG(44, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */ + PCOM_GPIO_CFG(45, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */ + PCOM_GPIO_CFG(46, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */ +#else + PCOM_GPIO_CFG(43, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */ + PCOM_GPIO_CFG(44, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */ + PCOM_GPIO_CFG(45, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */ + PCOM_GPIO_CFG(46, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */ +#endif +}; + + +static uint32_t camera_off_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */ + PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */ +}; + +static uint32_t camera_on_gpio_table[] = { + /* CAMERA */ + PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */ + PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */ + PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */ + PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */ + PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */ + PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */ + PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */ + PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */ + PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */ + PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */ + PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */ + PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */ + PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */ + PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */ +}; + +static void config_camera_on_gpios(void) +{ + config_gpio_table(camera_on_gpio_table, + ARRAY_SIZE(camera_on_gpio_table)); +} + +static void config_camera_off_gpios(void) +{ + config_gpio_table(camera_off_gpio_table, + ARRAY_SIZE(camera_off_gpio_table)); +} + +static void __init config_gpios(void) +{ + config_gpio_table(gpio_table, ARRAY_SIZE(gpio_table)); + config_camera_off_gpios(); } +static struct msm_acpu_clock_platform_data trout_clock_data = { + .acpu_switch_time_us = 20, + .max_speed_delta_khz = 256000, + .vdd_switch_time_us = 62, + .power_collapse_khz = 19200000, + .wait_for_irq_khz = 128000000, +}; + +#ifdef CONFIG_SERIAL_MSM_HS +static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { + .rx_wakeup_irq = MSM_GPIO_TO_INT(45), + .inject_rx_on_wakeup = 1, + .rx_to_inject = 0x32, +}; +#endif + static void __init trout_init(void) { int rc; - platform_add_devices(devices, ARRAY_SIZE(devices)); + printk("trout_init() revision=%d\n", system_rev); + + /* + * Setup common MSM GPIOS + */ + config_gpios(); + + msm_hw_reset_hook = trout_reset; + + gpio_direction_output(system_rev < 5 ? + TROUT_4_TP_LS_EN : TROUT_5_TP_LS_EN, 0); + + msm_acpu_clock_init(&trout_clock_data); + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) + if (!opt_disable_uart3) + msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3, + &msm_device_uart3.dev, 1, + MSM_GPIO_TO_INT(86)); +#endif -#ifdef CONFIG_MMC - rc = trout_init_mmc(system_rev); - if (rc) - printk(KERN_CRIT "%s: MMC init failure (%d)\n", __func__, rc); + /* gpio_configure(108, IRQF_TRIGGER_LOW); */ + + /* put the AF VCM in powerdown mode to avoid noise */ + gpio_set_value(TROUT_GPIO_VCM_PWDN, 1); + mdelay(100); + + if (system_rev < 5) { + trout_x_axis.info.gpio = trout_4_x_axis_gpios; + trout_y_axis.info.gpio = trout_4_y_axis_gpios; + } + +#ifdef CONFIG_SERIAL_MSM_HS + msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; #endif + msm_add_usb_devices(trout_phy_reset); + msm_add_mem_devices(&pmem_setting); + + rc = trout_init_mmc(system_rev); + if (rc) + printk(KERN_CRIT "%s: MMC init failure (%d)\n", __func__, rc); + +#ifdef CONFIG_WIFI_MEM_PREALLOC + rc = trout_init_wifi_mem(); + if (rc) + printk(KERN_CRIT "%s: WiFi Memory init failure (%d)\n", __func__, rc); +#endif + + platform_add_devices(devices, ARRAY_SIZE(devices)); + i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); + + /* SD card door should wake the device */ + set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SD_DOOR_N), 1); } static struct map_desc trout_io_desc[] __initdata = { @@ -78,26 +826,31 @@ static struct map_desc trout_io_desc[] __initdata = { } }; +static void __init trout_fixup(struct machine_desc *desc, struct tag *tags, + char **cmdline, struct meminfo *mi) +{ + mi->nr_banks=1; + mi->bank[0].start = PHYS_OFFSET; + mi->bank[0].size = (101*1024*1024); +} + static void __init trout_map_io(void) { msm_map_common_io(); iotable_init(trout_io_desc, ARRAY_SIZE(trout_io_desc)); - -#ifdef CONFIG_MSM_DEBUG_UART3 - /* route UART3 to the "H2W" extended usb connector */ - writeb(0x80, TROUT_CPLD_BASE + 0x00); -#endif - msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a); } -MACHINE_START(TROUT, "HTC Dream") +MACHINE_START(TROUT, "trout") +/* Maintainer: Brian Swetland */ #ifdef CONFIG_MSM_DEBUG_UART + .phys_io = MSM_DEBUG_UART_PHYS, + .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc, #endif - .boot_params = 0x10000100, - .fixup = trout_fixup, - .map_io = trout_map_io, - .init_irq = trout_init_irq, - .init_machine = trout_init, - .timer = &msm_timer, + .boot_params = 0x10000100, + .fixup = trout_fixup, + .map_io = trout_map_io, + .init_irq = trout_init_irq, + .init_machine = trout_init, + .timer = &msm_timer, MACHINE_END diff --git a/arch/arm/mach-msm/board-trout.h b/arch/arm/mach-msm/board-trout.h index 651851c3e1dd1..287c9471adfa1 100644 --- a/arch/arm/mach-msm/board-trout.h +++ b/arch/arm/mach-msm/board-trout.h @@ -58,7 +58,7 @@ #define TROUT_4_TP_LS_EN 19 #define TROUT_5_TP_LS_EN 1 -#define TROUT_CPLD_BASE 0xE8100000 +#define TROUT_CPLD_BASE 0xFA000000 #define TROUT_CPLD_START 0x98000000 #define TROUT_CPLD_SIZE SZ_4K diff --git a/arch/arm/mach-msm/clock-pcom.h b/arch/arm/mach-msm/clock-pcom.h index 17d027b235011..04f73322adcb9 100644 --- a/arch/arm/mach-msm/clock-pcom.h +++ b/arch/arm/mach-msm/clock-pcom.h @@ -72,8 +72,13 @@ #define P_USB_HS_P_CLK 37 /* High speed USB pbus clock */ #define P_USB_OTG_CLK 38 /* Full speed USB clock */ #define P_VDC_CLK 39 /* Video controller clock */ -#define P_VFE_MDC_CLK 40 /* Camera / Video Front End clock */ -#define P_VFE_CLK 41 /* VFE MDDI client clock */ +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) +#define P_VFE_MDC_CLK 40 /* VFE MDDI client clock */ +#define P_VFE_CLK 41 /* Camera / Video Front End clock */ +#else/* For radio code base others */ +#define P_VFE_MDC_CLK 41 /* VFE MDDI client clock */ +#define P_VFE_CLK 40 /* Camera / Video Front End clock */ +#endif #define P_MDP_LCDC_PCLK_CLK 42 #define P_MDP_LCDC_PAD_PCLK_CLK 43 #define P_MDP_VSYNC_CLK 44 @@ -89,7 +94,7 @@ #define P_USB_HS2_CORE_CLK 54 /* High speed USB 2 core clock */ #define P_USB_HS3_CORE_CLK 55 /* High speed USB 3 core clock */ #define P_CAM_M_CLK 56 -#define P_CAMIF_PAD_P_CLK 57 +#define P_QUP_I2C_P_CLK 57 #define P_GRP_2D_CLK 58 #define P_GRP_2D_P_CLK 59 #define P_I2S_CLK 60 @@ -137,6 +142,7 @@ struct clk_ops; extern struct clk_ops clk_ops_pcom; +enum clk_reset_action; int pc_clk_reset(unsigned id, enum clk_reset_action action); diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c index 2069bfaa3a261..50bff73dd2d1c 100644 --- a/arch/arm/mach-msm/clock.c +++ b/arch/arm/mach-msm/clock.c @@ -20,10 +20,13 @@ #include #include #include +#include #include #include #include #include +#include +#include #include #include "clock.h" @@ -32,7 +35,7 @@ static DEFINE_MUTEX(clocks_mutex); static DEFINE_SPINLOCK(clocks_lock); -static LIST_HEAD(clocks); +static HLIST_HEAD(clocks); struct clk *msm_clocks; unsigned msm_num_clocks; @@ -43,25 +46,54 @@ unsigned msm_num_clocks; static DECLARE_BITMAP(clock_map_enabled, NR_CLKS); static DEFINE_SPINLOCK(clock_map_lock); +static struct clk *clk_allocate_handle(struct clk *sclk) +{ + unsigned long flags; + struct clk_handle *clkh = kzalloc(sizeof(*clkh), GFP_KERNEL); + if (!clkh) + return ERR_PTR(ENOMEM); + clkh->clk.flags = CLKFLAG_HANDLE; + clkh->source = sclk; + + spin_lock_irqsave(&clocks_lock, flags); + hlist_add_head(&clkh->clk.list, &sclk->handles); + spin_unlock_irqrestore(&clocks_lock, flags); + return &clkh->clk; +} + +static struct clk *source_clk(struct clk *clk) +{ + struct clk_handle *clkh; + + if (clk->flags & CLKFLAG_HANDLE) { + clkh = container_of(clk, struct clk_handle, clk); + clk = clkh->source; + } + return clk; +} + /* * Standard clock functions defined in include/linux/clk.h */ struct clk *clk_get(struct device *dev, const char *id) { struct clk *clk; + struct hlist_node *pos; mutex_lock(&clocks_mutex); - list_for_each_entry(clk, &clocks, list) + hlist_for_each_entry(clk, pos, &clocks, list) if (!strcmp(id, clk->name) && clk->dev == dev) goto found_it; - list_for_each_entry(clk, &clocks, list) + hlist_for_each_entry(clk, pos, &clocks, list) if (!strcmp(id, clk->name) && clk->dev == NULL) goto found_it; clk = ERR_PTR(-ENOENT); found_it: + if (!IS_ERR(clk) && (clk->flags & CLKFLAG_SHARED)) + clk = clk_allocate_handle(clk); mutex_unlock(&clocks_mutex); return clk; } @@ -69,6 +101,22 @@ EXPORT_SYMBOL(clk_get); void clk_put(struct clk *clk) { + struct clk_handle *clkh; + unsigned long flags; + + if (WARN_ON(IS_ERR(clk))) + return; + + if (!(clk->flags & CLKFLAG_HANDLE)) + return; + + clk_set_rate(clk, 0); + + spin_lock_irqsave(&clocks_lock, flags); + clkh = container_of(clk, struct clk_handle, clk); + hlist_del(&clk->list); + kfree(clkh); + spin_unlock_irqrestore(&clocks_lock, flags); } EXPORT_SYMBOL(clk_put); @@ -76,6 +124,7 @@ int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); + clk = source_clk(clk); clk->count++; if (clk->count == 1) { clk->ops->enable(clk->id); @@ -92,6 +141,7 @@ void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clocks_lock, flags); + clk = source_clk(clk); BUG_ON(clk->count == 0); clk->count--; if (clk->count == 0) { @@ -114,28 +164,53 @@ EXPORT_SYMBOL(clk_reset); unsigned long clk_get_rate(struct clk *clk) { + clk = source_clk(clk); return clk->ops->get_rate(clk->id); } EXPORT_SYMBOL(clk_get_rate); +static unsigned long clk_find_min_rate_locked(struct clk *clk) +{ + unsigned long rate = 0; + struct clk_handle *clkh; + struct hlist_node *pos; + + hlist_for_each_entry(clkh, pos, &clk->handles, clk.list) + if (clkh->rate > rate) + rate = clkh->rate; + return rate; +} + int clk_set_rate(struct clk *clk, unsigned long rate) { int ret; + unsigned long flags; + + spin_lock_irqsave(&clocks_lock, flags); + if (clk->flags & CLKFLAG_HANDLE) { + struct clk_handle *clkh; + clkh = container_of(clk, struct clk_handle, clk); + clkh->rate = rate; + clk = clkh->source; + rate = clk_find_min_rate_locked(clk); + } + if (clk->flags & CLKFLAG_MAX) { ret = clk->ops->set_max_rate(clk->id, rate); if (ret) - return ret; + goto err; } if (clk->flags & CLKFLAG_MIN) { ret = clk->ops->set_min_rate(clk->id, rate); if (ret) - return ret; + goto err; } - if (clk->flags & CLKFLAG_MAX || clk->flags & CLKFLAG_MIN) - return ret; - - return clk->ops->set_rate(clk->id, rate); + if (!(clk->flags & (CLKFLAG_MAX | CLKFLAG_MIN))) + ret = clk->ops->set_rate(clk->id, rate); +err: + spin_unlock_irqrestore(&clocks_lock, flags); + return ret; } EXPORT_SYMBOL(clk_set_rate); @@ -173,16 +248,49 @@ int clk_set_flags(struct clk *clk, unsigned long flags) { if (clk == NULL || IS_ERR(clk)) return -EINVAL; + clk = source_clk(clk); return clk->ops->set_flags(clk->id, flags); } EXPORT_SYMBOL(clk_set_flags); -/* EBI1 is the only shared clock that several clients want to vote on as of - * this commit. If this changes in the future, then it might be better to - * make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more - * generic to support different clocks. - */ -static struct clk *ebi1_clk; +void clk_enter_sleep(int from_idle) +{ +} + +void clk_exit_sleep(void) +{ +} + +int clks_print_running(void) +{ + struct clk *clk; + int clk_on_count = 0; + struct hlist_node *pos; + char buf[100]; + char *pbuf = buf; + int size = sizeof(buf); + int wr; + unsigned long flags; + + spin_lock_irqsave(&clocks_lock, flags); + + hlist_for_each_entry(clk, pos, &clocks, list) { + if (clk->count) { + clk_on_count++; + wr = snprintf(pbuf, size, " %s", clk->name); + if (wr >= size) + break; + pbuf += wr; + size -= wr; + } + } + if (clk_on_count) + pr_info("clocks on:%s\n", buf); + + spin_unlock_irqrestore(&clocks_lock, flags); + return !clk_on_count; +} +EXPORT_SYMBOL(clks_print_running); static void __init set_clock_ops(struct clk *clk) { @@ -202,13 +310,9 @@ void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks) msm_num_clocks = num_clocks; for (n = 0; n < msm_num_clocks; n++) { set_clock_ops(&msm_clocks[n]); - list_add_tail(&msm_clocks[n].list, &clocks); + hlist_add_head(&msm_clocks[n].list, &clocks); } mutex_unlock(&clocks_mutex); - - ebi1_clk = clk_get(NULL, "ebi1_clk"); - BUG_ON(ebi1_clk == NULL); - } #if defined(CONFIG_DEBUG_FS) @@ -277,6 +381,75 @@ static int clock_debug_local_get(void *data, u64 *val) return 0; } +static void *clk_info_seq_start(struct seq_file *seq, loff_t *ppos) +{ + struct hlist_node *pos; + int i = *ppos; + mutex_lock(&clocks_mutex); + hlist_for_each(pos, &clocks) + if (i-- == 0) + return hlist_entry(pos, struct clk, list); + return NULL; +} + +static void *clk_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct clk *clk = v; + ++*pos; + return hlist_entry(clk->list.next, struct clk, list); +} + +static void clk_info_seq_stop(struct seq_file *seq, void *v) +{ + mutex_unlock(&clocks_mutex); +} + +static int clk_info_seq_show(struct seq_file *seq, void *v) +{ + struct clk *clk = v; + unsigned long flags; + struct clk_handle *clkh; + struct hlist_node *pos; + + seq_printf(seq, "Clock %s\n", clk->name); + seq_printf(seq, " Id %d\n", clk->id); + seq_printf(seq, " Count %d\n", clk->count); + seq_printf(seq, " Flags %x\n", clk->flags); + seq_printf(seq, " Dev %p %s\n", + clk->dev, clk->dev ? dev_name(clk->dev) : ""); + seq_printf(seq, " Handles %p\n", clk->handles.first); + spin_lock_irqsave(&clocks_lock, flags); + hlist_for_each_entry(clkh, pos, &clk->handles, clk.list) + seq_printf(seq, " Requested rate %ld\n", clkh->rate); + spin_unlock_irqrestore(&clocks_lock, flags); + + seq_printf(seq, " Enabled %d\n", clk->ops->is_enabled(clk->id)); + seq_printf(seq, " Rate %ld\n", clk_get_rate(clk)); + + seq_printf(seq, "\n"); + return 0; +} + +static struct seq_operations clk_info_seqops = { + .start = clk_info_seq_start, + .next = clk_info_seq_next, + .stop = clk_info_seq_stop, + .show = clk_info_seq_show, +}; + +static int clk_info_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &clk_info_seqops); +} + +static const struct file_operations clk_info_fops = { + .owner = THIS_MODULE, + .open = clk_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get, clock_debug_rate_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get, @@ -288,7 +461,7 @@ static int __init clock_debug_init(void) { struct dentry *dent_rate, *dent_enable, *dent_local; struct clk *clock; - unsigned n = 0; + struct hlist_node *pos; char temp[50], *ptr; dent_rate = debugfs_create_dir("clk_rate", 0); @@ -303,7 +476,10 @@ static int __init clock_debug_init(void) if (IS_ERR(dent_local)) return PTR_ERR(dent_local); - while ((clock = msm_clock_get_nth(n++)) != 0) { + debugfs_create_file("clk_info", 0x444, 0, NULL, &clk_info_fops); + + mutex_lock(&clocks_mutex); + hlist_for_each_entry(clock, pos, &clocks, list) { strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1); for (ptr = temp; *ptr; ptr++) *ptr = tolower(*ptr); @@ -314,10 +490,11 @@ static int __init clock_debug_init(void) debugfs_create_file(temp, S_IRUGO, dent_local, clock, &clock_local_fops); } + mutex_unlock(&clocks_mutex); return 0; } -device_initcall(clock_debug_init); +late_initcall(clock_debug_init); #endif /* The bootloader and/or AMSS may have left various clocks enabled. @@ -328,10 +505,11 @@ static int __init clock_late_init(void) { unsigned long flags; struct clk *clk; + struct hlist_node *pos; unsigned count = 0; mutex_lock(&clocks_mutex); - list_for_each_entry(clk, &clocks, list) { + hlist_for_each_entry(clk, pos, &clocks, list) { if (clk->flags & CLKFLAG_AUTO_OFF) { spin_lock_irqsave(&clocks_lock, flags); if (!clk->count) { diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h index c270b552ed135..b06b9aa7382c8 100644 --- a/arch/arm/mach-msm/clock.h +++ b/arch/arm/mach-msm/clock.h @@ -27,11 +27,15 @@ #define CLKFLAG_NOINVERT 0x00000002 #define CLKFLAG_NONEST 0x00000004 #define CLKFLAG_NORESET 0x00000008 +#define CLKFLAG_HANDLE 0x00000010 #define CLK_FIRST_AVAILABLE_FLAG 0x00000100 #define CLKFLAG_AUTO_OFF 0x00000200 #define CLKFLAG_MIN 0x00000400 #define CLKFLAG_MAX 0x00000800 +#define CLKFLAG_SHARED 0x00001000 +#define CLKFLAG_ARCH_QSD8X50 (0x00020000) +#define CLKFLAG_ARCH_ALL (0xffff0000) struct clk_ops { int (*enable)(unsigned id); @@ -55,8 +59,15 @@ struct clk { const char *name; struct clk_ops *ops; const char *dbg_name; - struct list_head list; + struct hlist_node list; struct device *dev; + struct hlist_head handles; +}; + +struct clk_handle { + struct clk clk; + struct clk *source; + unsigned long rate; }; #define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100) @@ -69,10 +80,10 @@ struct clk { #define CLOCK_DBG_NAME(x) #endif -#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \ +#define CLOCK(clk_name, clk_id, clk_dev, clk_flags, clk_arch) { \ .name = clk_name, \ .id = clk_id, \ - .flags = clk_flags, \ + .flags = (clk_flags) | ((clk_arch) & CLKFLAG_ARCH_ALL), \ .dev = clk_dev, \ CLOCK_DBG_NAME(#clk_id) \ } @@ -105,5 +116,8 @@ int msm_clock_get_name(uint32_t id, char *name, uint32_t size); int ebi1_clk_set_min_rate(enum clkvote_client client, unsigned long rate); unsigned long clk_get_max_axi_khz(void); +void clk_enter_sleep(int from_idle); +void clk_exit_sleep(void); + #endif diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c new file mode 100644 index 0000000000000..f1d27ceca2e65 --- /dev/null +++ b/arch/arm/mach-msm/cpufreq.c @@ -0,0 +1,144 @@ +/* arch/arm/mach-msm/cpufreq.c + * + * MSM architecture cpufreq driver + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. + * Author: Mike A. Chan + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include "acpuclock.h" + +static int override_cpu; + +static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq) +{ + int ret = 0; + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + if (override_cpu) { + if (policy->cur == policy->max) + return 0; + else + freqs.new = policy->max; + } else + freqs.new = new_freq; + freqs.cpu = policy->cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + ret = acpuclk_set_rate(new_freq * 1000, 0); + if (!ret) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return ret; +} + +static int msm_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + int index; + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(policy->cpu); + + if (cpufreq_frequency_table_target(policy, table, target_freq, relation, + &index)) { + pr_err("cpufreq: invalid target_freq: %d\n", target_freq); + return -EINVAL; + } + + if (policy->cur == table[index].frequency) + return 0; + +#ifdef CONFIG_CPU_FREQ_DEBUG + pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n", + policy->cpu, target_freq, relation, + policy->min, policy->max, table[index].frequency); +#endif + + return set_cpu_freq(policy, table[index].frequency); +} + +static int msm_cpufreq_verify(struct cpufreq_policy *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static int msm_cpufreq_init(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(policy->cpu); + + BUG_ON(cpufreq_frequency_table_cpuinfo(policy, table)); + policy->cur = acpuclk_get_rate(); +#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX + policy->min = CONFIG_MSM_CPU_FREQ_MIN; + policy->max = CONFIG_MSM_CPU_FREQ_MAX; +#endif + policy->cpuinfo.transition_latency = + acpuclk_get_switch_time() * NSEC_PER_USEC; + return 0; +} + +static ssize_t store_mfreq(struct sysdev_class *class, + struct sysdev_class_attribute *attr, + const char *buf, size_t count) +{ + u64 val; + + if (strict_strtoull(buf, 0, &val) < 0) { + pr_err("Invalid parameter to mfreq\n"); + return 0; + } + if (val) + override_cpu = 1; + else + override_cpu = 0; + return count; +} + +static SYSDEV_CLASS_ATTR(mfreq, 0200, NULL, store_mfreq); + +static struct freq_attr *msm_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver msm_cpufreq_driver = { + /* lps calculations are handled here. */ + .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS, + .init = msm_cpufreq_init, + .verify = msm_cpufreq_verify, + .target = msm_cpufreq_target, + .name = "msm", + .attr = msm_cpufreq_attr, +}; + +static int __init msm_cpufreq_register(void) +{ + int err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, + &attr_mfreq.attr); + if (err) + pr_err("Failed to create sysfs mfreq\n"); + + return cpufreq_register_driver(&msm_cpufreq_driver); +} + +late_initcall(msm_cpufreq_register); diff --git a/arch/arm/mach-msm/dal.c b/arch/arm/mach-msm/dal.c new file mode 100644 index 0000000000000..971abbbcefc3a --- /dev/null +++ b/arch/arm/mach-msm/dal.c @@ -0,0 +1,627 @@ +/* arch/arm/mach-msm/qdsp6/dal.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "dal.h" + +#define DAL_TRACE 0 + +struct dal_hdr { + uint32_t length:16; /* message length (header inclusive) */ + uint32_t version:8; /* DAL protocol version */ + uint32_t priority:7; + uint32_t async:1; + uint32_t ddi:16; /* DDI method number */ + uint32_t prototype:8; /* DDI serialization format */ + uint32_t msgid:8; /* message id (DDI, ATTACH, DETACH, ...) */ + void *from; + void *to; +} __attribute__((packed)); + +#define TRACE_DATA_MAX 128 +#define TRACE_LOG_MAX 32 +#define TRACE_LOG_MASK (TRACE_LOG_MAX - 1) + +struct dal_trace { + unsigned timestamp; + struct dal_hdr hdr; + uint32_t data[TRACE_DATA_MAX]; +}; + +#define DAL_HDR_SIZE (sizeof(struct dal_hdr)) +#define DAL_DATA_MAX 512 +#define DAL_MSG_MAX (DAL_HDR_SIZE + DAL_DATA_MAX) + +#define DAL_VERSION 0x11 + +#define DAL_MSGID_DDI 0x00 +#define DAL_MSGID_ATTACH 0x01 +#define DAL_MSGID_DETACH 0x02 +#define DAL_MSGID_ASYNCH 0xC0 +#define DAL_MSGID_REPLY 0x80 + +struct dal_channel { + struct list_head list; + struct list_head clients; + + /* synchronization for changing channel state, + * adding/removing clients, smd callbacks, etc + */ + spinlock_t lock; + + struct smd_channel *sch; + char *name; + + /* events are delivered at IRQ context immediately, so + * we only need one assembly buffer for the entire channel + */ + struct dal_hdr hdr; + unsigned char data[DAL_DATA_MAX]; + + unsigned count; + void *ptr; + + /* client which the current inbound message is for */ + struct dal_client *active; +}; + +struct dal_client { + struct list_head list; + struct dal_channel *dch; + void *cookie; + dal_event_func_t event; + + /* opaque handle for the far side */ + void *remote; + + /* dal rpc calls are fully synchronous -- only one call may be + * active per client at a time + */ + struct mutex write_lock; + wait_queue_head_t wait; + + unsigned char data[DAL_DATA_MAX]; + + void *reply; + int reply_max; + int status; + unsigned msgid; /* msgid of expected reply */ + + spinlock_t tr_lock; + unsigned tr_head; + unsigned tr_tail; + struct dal_trace *tr_log; +}; + +static unsigned now(void) +{ + struct timespec ts; + ktime_get_ts(&ts); + return (ts.tv_nsec / 1000000) + (ts.tv_sec * 1000); +} + +void dal_trace(struct dal_client *c) +{ + if (c->tr_log) + return; + c->tr_log = kzalloc(sizeof(struct dal_trace) * TRACE_LOG_MAX, + GFP_KERNEL); +} + +void dal_trace_print(struct dal_hdr *hdr, unsigned *data, int len, unsigned when) +{ + int i; + printk("DAL %08x -> %08x L=%03x A=%d D=%04x P=%02x M=%02x T=%d", + (unsigned) hdr->from, (unsigned) hdr->to, + hdr->length, hdr->async, + hdr->ddi, hdr->prototype, hdr->msgid, + when); + len /= 4; + for (i = 0; i < len; i++) { + if (!(i & 7)) + printk("\n%03x", i * 4); + printk(" %08x", data[i]); + } + printk("\n"); +} + +void dal_trace_dump(struct dal_client *c) +{ + struct dal_trace *dt; + unsigned n, len; + + if (!c->tr_log) + return; + + for (n = c->tr_tail; n != c->tr_head; n = (n + 1) & TRACE_LOG_MASK) { + dt = c->tr_log + n; + len = dt->hdr.length - sizeof(dt->hdr); + if (len > TRACE_DATA_MAX) + len = TRACE_DATA_MAX; + dal_trace_print(&dt->hdr, dt->data, len, dt->timestamp); + } +} + +static void dal_trace_log(struct dal_client *c, + struct dal_hdr *hdr, void *data, unsigned len) +{ + unsigned long flags; + unsigned t, n; + struct dal_trace *dt; + + t = now(); + if (len > TRACE_DATA_MAX) + len = TRACE_DATA_MAX; + + spin_lock_irqsave(&c->tr_lock, flags); + n = (c->tr_head + 1) & TRACE_LOG_MASK; + if (c->tr_tail == n) + c->tr_tail = (c->tr_tail + 1) & TRACE_LOG_MASK; + dt = c->tr_log + n; + dt->timestamp = t; + memcpy(&dt->hdr, hdr, sizeof(struct dal_hdr)); + memcpy(dt->data, data, len); + c->tr_head = n; + + spin_unlock_irqrestore(&c->tr_lock, flags); +} + + +static void dal_channel_notify(void *priv, unsigned event) +{ + struct dal_channel *dch = priv; + struct dal_hdr *hdr = &dch->hdr; + struct dal_client *client; + unsigned long flags; + int len; + int r; + + spin_lock_irqsave(&dch->lock, flags); + +again: + if (dch->count == 0) { + if (smd_read_avail(dch->sch) < DAL_HDR_SIZE) + goto done; + + smd_read(dch->sch, hdr, DAL_HDR_SIZE); + + if (hdr->length < DAL_HDR_SIZE) + goto done; + + if (hdr->length > DAL_MSG_MAX) + panic("oversize message"); + + dch->count = hdr->length - DAL_HDR_SIZE; + + /* locate the client this message is targeted to */ + list_for_each_entry(client, &dch->clients, list) { + if (dch->hdr.to == client) { + dch->active = client; + dch->ptr = client->data; + goto check_data; + } + } + pr_err("$$$ receiving unknown message len = %d $$$\n", + dch->count); + dch->active = 0; + dch->ptr = dch->data; + } + +check_data: + len = dch->count; + if (len > 0) { + if (smd_read_avail(dch->sch) < len) + goto done; + + r = smd_read(dch->sch, dch->ptr, len); + if (r != len) + panic("invalid read"); + +#if DAL_TRACE + pr_info("dal recv %p <- %p %02x:%04x:%02x %d\n", + hdr->to, hdr->from, hdr->msgid, hdr->ddi, + hdr->prototype, hdr->length - sizeof(*hdr)); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, dch->ptr, len); +#endif + dch->count = 0; + + client = dch->active; + if (!client) { + pr_err("dal: message to %p discarded\n", dch->hdr.to); + goto again; + } + + if (client->tr_log) + dal_trace_log(client, hdr, dch->ptr, len); + + if (hdr->msgid == DAL_MSGID_ASYNCH) { + if (client->event) + client->event(dch->ptr, len, client->cookie); + else + pr_err("dal: client %p has no event handler\n", + client); + goto again; + } + + if (hdr->msgid == client->msgid) { + if (!client->remote) + client->remote = hdr->from; + if (len > client->reply_max) + len = client->reply_max; + memcpy(client->reply, client->data, len); + client->status = len; + wake_up(&client->wait); + goto again; + } + + pr_err("dal: cannot find client %p\n", dch->hdr.to); + goto again; + } + +done: + spin_unlock_irqrestore(&dch->lock, flags); +} + +static LIST_HEAD(dal_channel_list); +static DEFINE_MUTEX(dal_channel_list_lock); + +static struct dal_channel *dal_open_channel(const char *name) +{ + struct dal_channel *dch; + + /* quick sanity check to avoid trying to talk to + * some non-DAL channel... + */ + if (strncmp(name, "DSP_DAL", 7) && strncmp(name, "SMD_DAL", 7)) + return 0; + + mutex_lock(&dal_channel_list_lock); + + list_for_each_entry(dch, &dal_channel_list, list) { + if (!strcmp(dch->name, name)) + goto found_it; + } + + dch = kzalloc(sizeof(*dch) + strlen(name) + 1, GFP_KERNEL); + if (!dch) + goto fail; + + dch->name = (char *) (dch + 1); + strcpy(dch->name, name); + spin_lock_init(&dch->lock); + INIT_LIST_HEAD(&dch->clients); + + list_add(&dch->list, &dal_channel_list); + +found_it: + if (!dch->sch) { + if (smd_open(name, &dch->sch, dch, dal_channel_notify)) + dch = NULL; + /* FIXME: wait for channel to open before returning */ + msleep(100); + } + +fail: + mutex_unlock(&dal_channel_list_lock); + + return dch; +} + +int dal_call_raw(struct dal_client *client, + struct dal_hdr *hdr, + void *data, int data_len, + void *reply, int reply_max) +{ + struct dal_channel *dch = client->dch; + unsigned long flags; + + client->reply = reply; + client->reply_max = reply_max; + client->msgid = hdr->msgid | DAL_MSGID_REPLY; + client->status = -EBUSY; + +#if DAL_TRACE + pr_info("dal send %p -> %p %02x:%04x:%02x %d\n", + hdr->from, hdr->to, hdr->msgid, hdr->ddi, + hdr->prototype, hdr->length - sizeof(*hdr)); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, data_len); +#endif + + if (client->tr_log) + dal_trace_log(client, hdr, data, data_len); + + spin_lock_irqsave(&dch->lock, flags); + /* FIXME: ensure entire message is written or none. */ + smd_write(dch->sch, hdr, sizeof(*hdr)); + smd_write(dch->sch, data, data_len); + spin_unlock_irqrestore(&dch->lock, flags); + + if (!wait_event_timeout(client->wait, (client->status != -EBUSY), 5*HZ)) { + dal_trace_dump(client); + pr_err("dal: call timed out. dsp is probably dead.\n"); + dal_trace_print(hdr, data, data_len, 0); +#if defined(CONFIG_MSM_QDSP6) + q6audio_dsp_not_responding(); +#endif + } + + return client->status; +} + +int dal_call(struct dal_client *client, + unsigned ddi, unsigned prototype, + void *data, int data_len, + void *reply, int reply_max) +{ + struct dal_hdr hdr; + int r; + + memset(&hdr, 0, sizeof(hdr)); + + hdr.length = data_len + sizeof(hdr); + hdr.version = DAL_VERSION; + hdr.msgid = DAL_MSGID_DDI; + hdr.ddi = ddi; + hdr.prototype = prototype; + hdr.from = client; + hdr.to = client->remote; + + if (hdr.length > DAL_MSG_MAX) + return -EINVAL; + + mutex_lock(&client->write_lock); + r = dal_call_raw(client, &hdr, data, data_len, reply, reply_max); + mutex_unlock(&client->write_lock); +#if 0 + if ((r > 3) && (((uint32_t*) reply)[0] == 0)) { + pr_info("dal call OK\n"); + } else { + pr_info("dal call ERROR\n"); + } +#endif + return r; +} + +struct dal_msg_attach { + uint32_t device_id; + char attach[64]; + char service_name[32]; +} __attribute__((packed)); + +struct dal_reply_attach { + uint32_t status; + char name[64]; +}; + +struct dal_client *dal_attach(uint32_t device_id, const char *name, + dal_event_func_t func, void *cookie) +{ + struct dal_hdr hdr; + struct dal_msg_attach msg; + struct dal_reply_attach reply; + struct dal_channel *dch; + struct dal_client *client; + unsigned long flags; + int r; + + dch = dal_open_channel(name); + if (!dch) + return 0; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return 0; + + client->dch = dch; + client->event = func; + client->cookie = cookie; + mutex_init(&client->write_lock); + spin_lock_init(&client->tr_lock); + init_waitqueue_head(&client->wait); + + spin_lock_irqsave(&dch->lock, flags); + list_add(&client->list, &dch->clients); + spin_unlock_irqrestore(&dch->lock, flags); + + memset(&hdr, 0, sizeof(hdr)); + memset(&msg, 0, sizeof(msg)); + + hdr.length = sizeof(hdr) + sizeof(msg); + hdr.version = DAL_VERSION; + hdr.msgid = DAL_MSGID_ATTACH; + hdr.from = client; + msg.device_id = device_id; + + r = dal_call_raw(client, &hdr, &msg, sizeof(msg), + &reply, sizeof(reply)); + + if ((r == sizeof(reply)) && (reply.status == 0)) { + reply.name[63] = 0; + pr_info("dal_attach: status = %d, name = '%s'\n", + reply.status, reply.name); + return client; + } + + pr_err("dal_attach: failure\n"); + + dal_detach(client); + return 0; +} + +int dal_detach(struct dal_client *client) +{ + struct dal_channel *dch; + unsigned long flags; + + mutex_lock(&client->write_lock); + if (client->remote) { + struct dal_hdr hdr; + uint32_t data; + + memset(&hdr, 0, sizeof(hdr)); + hdr.length = sizeof(hdr) + sizeof(data); + hdr.version = DAL_VERSION; + hdr.msgid = DAL_MSGID_DETACH; + hdr.from = client; + hdr.to = client->remote; + data = (uint32_t) client; + + dal_call_raw(client, &hdr, &data, sizeof(data), + &data, sizeof(data)); + } + + dch = client->dch; + spin_lock_irqsave(&dch->lock, flags); + if (dch->active == client) { + /* We have received a message header for this client + * but not the body of the message. Ensure that when + * the body arrives we don't write it into the now-closed + * client. In *theory* this should never happen. + */ + dch->active = 0; + dch->ptr = dch->data; + } + list_del(&client->list); + spin_unlock_irqrestore(&dch->lock, flags); + + mutex_unlock(&client->write_lock); + + kfree(client); + return 0; +} + +void *dal_get_remote_handle(struct dal_client *client) +{ + return client->remote; +} + +/* convenience wrappers */ + +int dal_call_f0(struct dal_client *client, uint32_t ddi, uint32_t arg1) +{ + uint32_t tmp = arg1; + int res; + res = dal_call(client, ddi, 0, &tmp, sizeof(tmp), &tmp, sizeof(tmp)); + if (res >= 4) + return (int) tmp; + return res; +} + +int dal_call_f1(struct dal_client *client, uint32_t ddi, uint32_t arg1, uint32_t arg2) +{ + uint32_t tmp[2]; + int res; + tmp[0] = arg1; + tmp[1] = arg2; + res = dal_call(client, ddi, 1, tmp, sizeof(tmp), tmp, sizeof(uint32_t)); + if (res >= 4) + return (int) tmp[0]; + return res; +} + +int dal_call_f5(struct dal_client *client, uint32_t ddi, void *ibuf, uint32_t ilen) +{ + uint32_t tmp[128]; + int res; + int param_idx = 0; + + if (ilen + 4 > DAL_DATA_MAX) + return -EINVAL; + + tmp[param_idx] = ilen; + param_idx++; + + memcpy(&tmp[param_idx], ibuf, ilen); + param_idx += DIV_ROUND_UP(ilen, 4); + + res = dal_call(client, ddi, 5, tmp, param_idx * 4, tmp, sizeof(tmp)); + + if (res >= 4) + return (int) tmp[0]; + return res; +} + +int dal_call_f9(struct dal_client *client, uint32_t ddi, void *obuf, + uint32_t olen) +{ + uint32_t tmp[128]; + int res; + + if (olen > sizeof(tmp) - 8) + return -EINVAL; + tmp[0] = olen; + + res = dal_call(client, ddi, 9, tmp, sizeof(uint32_t), tmp, + sizeof(tmp)); + + if (res >= 4) + res = (int)tmp[0]; + + if (!res) { + if (tmp[1] > olen) + return -EIO; + memcpy(obuf, &tmp[2], tmp[1]); + } + return res; +} + +int dal_call_f13(struct dal_client *client, uint32_t ddi, void *ibuf1, + uint32_t ilen1, void *ibuf2, uint32_t ilen2, void *obuf, + uint32_t olen) +{ + uint32_t tmp[128]; + int res; + int param_idx = 0; + + if (ilen1 + ilen2 + 8 > DAL_DATA_MAX) + return -EINVAL; + + tmp[param_idx] = ilen1; + param_idx++; + + memcpy(&tmp[param_idx], ibuf1, ilen1); + param_idx += DIV_ROUND_UP(ilen1, 4); + + tmp[param_idx++] = ilen2; + memcpy(&tmp[param_idx], ibuf2, ilen2); + param_idx += DIV_ROUND_UP(ilen2, 4); + + tmp[param_idx++] = olen; + res = dal_call(client, ddi, 13, tmp, param_idx * 4, tmp, sizeof(tmp)); + + if (res >= 4) + res = (int)tmp[0]; + + if (!res) { + if (tmp[1] > olen) + return -EIO; + memcpy(obuf, &tmp[2], tmp[1]); + } + return res; +} diff --git a/arch/arm/mach-msm/dal.h b/arch/arm/mach-msm/dal.h new file mode 100644 index 0000000000000..6e4368295be31 --- /dev/null +++ b/arch/arm/mach-msm/dal.h @@ -0,0 +1,73 @@ +/* arch/arm/mach-msm/qdsp6/dal.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MACH_MSM_DAL_ +#define _MACH_MSM_DAL_ + +struct dal_client; + +struct dal_info { + uint32_t size; + uint32_t version; + char name[32]; +}; + +typedef void (*dal_event_func_t)(void *data, int len, void *cookie); + +struct dal_client *dal_attach(uint32_t device_id, const char *name, + dal_event_func_t func, void *cookie); + +int dal_detach(struct dal_client *client); + +int dal_call(struct dal_client *client, + unsigned ddi, unsigned prototype, + void *data, int data_len, + void *reply, int reply_max); + +void dal_trace(struct dal_client *client); +void dal_trace_dump(struct dal_client *client); + +/* function to call before panic on stalled dal calls */ +void dal_set_oops(struct dal_client *client, void (*oops)(void)); + +/* convenience wrappers */ +int dal_call_f0(struct dal_client *client, uint32_t ddi, + uint32_t arg1); +int dal_call_f1(struct dal_client *client, uint32_t ddi, + uint32_t arg1, uint32_t arg2); +int dal_call_f5(struct dal_client *client, uint32_t ddi, + void *ibuf, uint32_t ilen); +int dal_call_f9(struct dal_client *client, uint32_t ddi, + void *obuf, uint32_t olen); +int dal_call_f13(struct dal_client *client, uint32_t ddi, void *ibuf1, + uint32_t ilen1, void *ibuf2, uint32_t ilen2, void *obuf, + uint32_t olen); + +/* common DAL operations */ +enum { + DAL_OP_ATTACH = 0, + DAL_OP_DETACH, + DAL_OP_INIT, + DAL_OP_DEINIT, + DAL_OP_OPEN, + DAL_OP_CLOSE, + DAL_OP_INFO, + DAL_OP_POWEREVENT, + DAL_OP_SYSREQUEST, + DAL_OP_FIRST_DEVICE_API, +}; + +#endif diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c index fb548a8a21dbb..19e4dce17a06b 100644 --- a/arch/arm/mach-msm/devices-msm7x00.c +++ b/arch/arm/mach-msm/devices-msm7x00.c @@ -15,10 +15,14 @@ #include #include +#include +#include #include #include +#include #include "devices.h" +#include "proc_comm.h" #include #include @@ -88,6 +92,92 @@ struct platform_device msm_device_uart3 = { .resource = resources_uart3, }; +static struct resource msm_uart1_dm_resources[] = { + { + .start = MSM_UART1DM_PHYS, + .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART1DM_IRQ, + .end = INT_UART1DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART1DM_RX, + .end = INT_UART1DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART1_TX_CHAN, + .end = DMOV_HSUART1_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART1_TX_CRCI, + .end = DMOV_HSUART1_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm1 = { + .name = "msm_serial_hs", + .id = 0, + .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), + .resource = msm_uart1_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm1_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource msm_uart2_dm_resources[] = { + { + .start = MSM_UART2DM_PHYS, + .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART2DM_IRQ, + .end = INT_UART2DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART2DM_RX, + .end = INT_UART2DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART2_TX_CHAN, + .end = DMOV_HSUART2_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART2_TX_CRCI, + .end = DMOV_HSUART2_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm2 = { + .name = "msm_serial_hs", + .id = 1, + .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), + .resource = msm_uart2_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm2_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + static struct resource resources_i2c[] = { { .start = MSM_I2C_PHYS, @@ -108,6 +198,30 @@ struct platform_device msm_device_i2c = { .resource = resources_i2c, }; +#define GPIO_I2C_CLK 60 +#define GPIO_I2C_DAT 61 +void msm_set_i2c_mux(bool gpio, int *gpio_clk, int *gpio_dat) +{ + unsigned id; + if (gpio) { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + *gpio_clk = GPIO_I2C_CLK; + *gpio_dat = GPIO_I2C_DAT; + } else { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT , 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + static struct resource resources_hsusb[] = { { .start = MSM_HSUSB_PHYS, @@ -414,24 +528,52 @@ struct platform_device msm_device_mdp = { .resource = resources_mdp, }; +static struct resource resources_tssc[] = { + { + .start = MSM_TSSC_PHYS, + .end = MSM_TSSC_PHYS + MSM_TSSC_SIZE - 1, + .name = "tssc", + .flags = IORESOURCE_MEM, + }, + { + .start = INT_TCHSCRN1, + .end = INT_TCHSCRN1, + .name = "tssc1", + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, + }, + { + .start = INT_TCHSCRN2, + .end = INT_TCHSCRN2, + .name = "tssc2", + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, + }, +}; + +struct platform_device msm_device_touchscreen = { + .name = "msm_touchscreen", + .id = 0, + .num_resources = ARRAY_SIZE(resources_tssc), + .resource = resources_tssc, +}; + struct clk msm_clocks_7x01a[] = { CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0), - CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, 0), + CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0), CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0), - CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF), - CLK_PCOM("gp_clk", GP_CLK, NULL, 0), + CLK_PCOM("mddi_clk", EMDH_CLK, &msm_device_mddi1.dev, OFF), + CLK_PCOM("gp_clk", GP_CLK, NULL, 0), CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, OFF), CLK_PCOM("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0), CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0), CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0), CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF), CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0), - CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), + CLK_PCOM("mdp_clk", MDP_CLK, &msm_device_mdp.dev, OFF), CLK_PCOM("pbus_clk", PBUS_CLK, NULL, 0), CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), - CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX), + CLK_PCOM("mddi_clk", PMDH_CLK, &msm_device_mddi0.dev, OFF | CLK_MINMAX), CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF), CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF), @@ -446,14 +588,14 @@ struct clk msm_clocks_7x01a[] = { CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF), - CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0), + CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, OFF), CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF), - CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF), - CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0), + CLK_PCOM("uartdm_clk", UART1DM_CLK, &msm_device_uart_dm1.dev, OFF), + CLK_PCOM("uartdm_clk", UART2DM_CLK, &msm_device_uart_dm2.dev, OFF), CLK_PCOM("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF), CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, &msm_device_hsusb.dev, OFF), CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0), - CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF ), + CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MINMAX), CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF), CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF), }; diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c index 4e9a0ab3e9377..56ba5d9f5fde5 100644 --- a/arch/arm/mach-msm/devices-msm7x30.c +++ b/arch/arm/mach-msm/devices-msm7x30.c @@ -23,6 +23,7 @@ #include #include "devices.h" +#include "proc_comm.h" #include "smd_private.h" #include @@ -31,6 +32,19 @@ #include +static struct resource resources_uart1[] = { + { + .start = INT_UART1, + .end = INT_UART1, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART1_PHYS, + .end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + static struct resource resources_uart2[] = { { .start = INT_UART2, @@ -44,6 +58,26 @@ static struct resource resources_uart2[] = { }, }; +static struct resource resources_uart3[] = { + { + .start = INT_UART3, + .end = INT_UART3, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART3_PHYS, + .end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device msm_device_uart1 = { + .name = "msm_serial", + .id = 0, + .num_resources = ARRAY_SIZE(resources_uart1), + .resource = resources_uart1, +}; + struct platform_device msm_device_uart2 = { .name = "msm_serial", .id = 1, @@ -51,11 +85,555 @@ struct platform_device msm_device_uart2 = { .resource = resources_uart2, }; +struct platform_device msm_device_uart3 = { + .name = "msm_serial", + .id = 2, + .num_resources = ARRAY_SIZE(resources_uart3), + .resource = resources_uart3, +}; + struct platform_device msm_device_smd = { .name = "msm_smd", .id = -1, }; +static struct resource msm_uart1_dm_resources[] = { + { + .start = MSM_UART1DM_PHYS, + .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART1DM_IRQ, + .end = INT_UART1DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART1DM_RX, + .end = INT_UART1DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART1_TX_CHAN, + .end = DMOV_HSUART1_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART1_TX_CRCI, + .end = DMOV_HSUART1_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm1 = { + .name = "msm_serial_hs", + .id = 0, + .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), + .resource = msm_uart1_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm1_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource msm_uart2_dm_resources[] = { + { + .start = MSM_UART2DM_PHYS, + .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART2DM_IRQ, + .end = INT_UART2DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART2DM_RX, + .end = INT_UART2DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART2_TX_CHAN, + .end = DMOV_HSUART2_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART2_TX_CRCI, + .end = DMOV_HSUART2_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm2 = { + .name = "msm_serial_hs", + .id = 1, + .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), + .resource = msm_uart2_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm2_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource resources_i2c[] = { + { + .start = MSM_I2C_PHYS, + .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_PWB_I2C, + .end = INT_PWB_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_i2c = { + .name = "msm_i2c", + .id = 0, + .num_resources = ARRAY_SIZE(resources_i2c), + .resource = resources_i2c, +}; + +#define GPIO_I2C_CLK 70 +#define GPIO_I2C_DAT 71 +void msm_set_i2c_mux(bool gpio, int *gpio_clk, int *gpio_dat) +{ + unsigned id; + if (gpio) { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + *gpio_clk = GPIO_I2C_CLK; + *gpio_dat = GPIO_I2C_DAT; + } else { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT , 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +static struct resource resources_i2c2[] = { + { + .start = MSM_I2C_2_PHYS, + .end = MSM_I2C_2_PHYS + MSM_I2C_2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_PWB_I2C_2, + .end = INT_PWB_I2C_2, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_i2c2 = { + .name = "msm_i2c", + .id = 1, + .num_resources = ARRAY_SIZE(resources_i2c2), + .resource = resources_i2c2, +}; + +static struct resource resources_qup[] = { + { + .name = "qup_phys_addr", + .start = MSM_QUP_PHYS, + .end = MSM_QUP_PHYS + MSM_QUP_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "gsbi_qup_i2c_addr", + .start = MSM_GSBI_QUP_I2C_PHYS, + .end = MSM_GSBI_QUP_I2C_PHYS + MSM_GSBI_QUP_I2C_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "qup_in_intr", + .start = INT_PWB_QUP_IN, + .end = INT_PWB_QUP_IN, + .flags = IORESOURCE_IRQ, + }, + { + .name = "qup_out_intr", + .start = INT_PWB_QUP_OUT, + .end = INT_PWB_QUP_OUT, + .flags = IORESOURCE_IRQ, + }, + { + .name = "qup_err_intr", + .start = INT_PWB_QUP_ERR, + .end = INT_PWB_QUP_ERR, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_qup_i2c = { + .name = "qup_i2c", + .id = 4, + .num_resources = ARRAY_SIZE(resources_qup), + .resource = resources_qup, +}; + +struct flash_platform_data msm_nand_data = { + .parts = NULL, + .nr_parts = 0, +}; + +static struct resource resources_nand[] = { + [0] = { + .start = 7, + .end = 7, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_nand = { + .name = "msm_nand", + .id = -1, + .num_resources = ARRAY_SIZE(resources_nand), + .resource = resources_nand, + .dev = { + .platform_data = &msm_nand_data, + }, +}; + +static struct resource resources_sdc1[] = { + { + .start = MSM_SDC1_PHYS, + .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC1_0, + .end = INT_SDC1_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC1_1, + .end = INT_SDC1_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc2[] = { + { + .start = MSM_SDC2_PHYS, + .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC2_0, + .end = INT_SDC2_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC2_1, + .end = INT_SDC2_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc3[] = { + { + .start = MSM_SDC3_PHYS, + .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC3_0, + .end = INT_SDC3_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC3_1, + .end = INT_SDC3_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc4[] = { + { + .start = MSM_SDC4_PHYS, + .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC4_0, + .end = INT_SDC4_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC4_1, + .end = INT_SDC4_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_sdc1 = { + .name = "msm_sdcc", + .id = 1, + .num_resources = ARRAY_SIZE(resources_sdc1), + .resource = resources_sdc1, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc2 = { + .name = "msm_sdcc", + .id = 2, + .num_resources = ARRAY_SIZE(resources_sdc2), + .resource = resources_sdc2, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc3 = { + .name = "msm_sdcc", + .id = 3, + .num_resources = ARRAY_SIZE(resources_sdc3), + .resource = resources_sdc3, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc4 = { + .name = "msm_sdcc", + .id = 4, + .num_resources = ARRAY_SIZE(resources_sdc4), + .resource = resources_sdc4, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +static struct platform_device *msm_sdcc_devices[] __initdata = { + &msm_device_sdc1, + &msm_device_sdc2, + &msm_device_sdc3, + &msm_device_sdc4, +}; + +int __init msm_add_sdcc(unsigned int controller, + struct msm_mmc_platform_data *plat, + unsigned int stat_irq, unsigned long stat_irq_flags) +{ + struct platform_device *pdev; + struct resource *res; + + if (controller < 1 || controller > 4) + return -EINVAL; + + pdev = msm_sdcc_devices[controller-1]; + pdev->dev.platform_data = plat; + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq"); + if (!res) + return -EINVAL; + else if (stat_irq) { + res->start = res->end = stat_irq; + res->flags &= ~IORESOURCE_DISABLED; + res->flags |= stat_irq_flags; + } + + return platform_device_register(pdev); +} + +static struct resource resources_mddi0[] = { + { + .start = MSM_PMDH_PHYS, + .end = MSM_PMDH_PHYS + MSM_PMDH_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_MDDI_PRI, + .end = INT_MDDI_PRI, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource resources_mddi1[] = { + { + .start = MSM_EMDH_PHYS, + .end = MSM_EMDH_PHYS + MSM_EMDH_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_MDDI_EXT, + .end = INT_MDDI_EXT, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_mddi0 = { + .name = "msm_mddi", + .id = 0, + .num_resources = ARRAY_SIZE(resources_mddi0), + .resource = resources_mddi0, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_mddi1 = { + .name = "msm_mddi", + .id = 1, + .num_resources = ARRAY_SIZE(resources_mddi1), + .resource = resources_mddi1, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +static struct resource resources_mdp[] = { + { + .start = MSM_MDP_PHYS, + .end = MSM_MDP_PHYS + MSM_MDP_SIZE - 1, + .name = "mdp", + .flags = IORESOURCE_MEM + }, + { + .start = INT_MDP, + .end = INT_MDP, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_mdp = { + .name = "msm_mdp", + .id = 0, + .num_resources = ARRAY_SIZE(resources_mdp), + .resource = resources_mdp, +}; + + +static struct resource resources_ssbi_pmic[] = { + { + .start = MSM_PMIC_SSBI_PHYS, + .end = MSM_PMIC_SSBI_PHYS + MSM_PMIC_SSBI_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +struct platform_device msm_device_ssbi_pmic = { + .name = "msm_ssbi", + .id = -1, + .resource = resources_ssbi_pmic, + .num_resources = ARRAY_SIZE(resources_ssbi_pmic), +}; + +static struct resource resources_spi[] = { + { + .start = MSM_SPI_PHYS, + .end = MSM_SPI_PHYS + MSM_SPI_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SPI_INPUT, + .end = INT_SPI_INPUT, + .name = "irq_in", + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_SPI_OUTPUT, + .end = INT_SPI_OUTPUT, + .name = "irq_out", + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_SPI_ERROR, + .end = INT_SPI_ERROR, + .name = "irq_err", + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_spi = { + .name = "msm_spi", + .id = 0, + .num_resources = ARRAY_SIZE(resources_spi), + .resource = resources_spi, +}; + +static struct resource msm_vidc_720p_resources[] = { + { + .start = 0xA3B00000, + .end = 0xA3B00000 + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_MFC720, + .end = INT_MFC720, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_vidc_720p = { + .name = "msm_vidc_720p", + .id = 0, + .num_resources = ARRAY_SIZE(msm_vidc_720p_resources), + .resource = msm_vidc_720p_resources, +}; + static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, @@ -131,11 +709,10 @@ struct clk msm_clocks_7x30[] = { CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0), CLK_PCOM("cam_m_clk", CAM_M_CLK, NULL, 0), - CLK_PCOM("camif_pad_pclk", CAMIF_PAD_P_CLK, NULL, OFF), - CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), + CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN | CLKFLAG_SHARED), CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0), - CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX), - CLK_PCOM("emdh_pclk", EMDH_P_CLK, NULL, OFF), + CLK_PCOM("emdh_clk", EMDH_CLK, &msm_device_mddi1.dev, OFF | CLK_MINMAX), + CLK_PCOM("emdh_pclk", EMDH_P_CLK, &msm_device_mddi1.dev, OFF), CLK_PCOM("gp_clk", GP_CLK, NULL, 0), CLK_PCOM("grp_2d_clk", GRP_2D_CLK, NULL, 0), CLK_PCOM("grp_2d_pclk", GRP_2D_P_CLK, NULL, 0), @@ -144,44 +721,58 @@ struct clk msm_clocks_7x30[] = { CLK_7X30S("grp_src_clk", GRP_3D_SRC_CLK, GRP_3D_CLK, NULL, 0), CLK_PCOM("hdmi_clk", HDMI_CLK, NULL, 0), CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF), + CLK_PCOM("i2c_clk", I2C_CLK, &msm_device_i2c.dev, OFF), + CLK_PCOM("i2c_clk", I2C_2_CLK, &msm_device_i2c2.dev, OFF), CLK_PCOM("jpeg_clk", JPEG_CLK, NULL, OFF), CLK_PCOM("jpeg_pclk", JPEG_P_CLK, NULL, OFF), CLK_PCOM("lpa_codec_clk", LPA_CODEC_CLK, NULL, 0), CLK_PCOM("lpa_core_clk", LPA_CORE_CLK, NULL, 0), CLK_PCOM("lpa_pclk", LPA_P_CLK, NULL, 0), CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0), - CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX), - CLK_PCOM("mddi_pclk", PMDH_P_CLK, NULL, 0), - CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), - CLK_PCOM("mdp_pclk", MDP_P_CLK, NULL, 0), - CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0), - CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0), - CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0), + CLK_PCOM("mddi_clk", PMDH_CLK, &msm_device_mddi0.dev, OFF | CLK_MINMAX), + CLK_PCOM("mddi_pclk", PMDH_P_CLK, &msm_device_mddi0.dev, OFF | CLK_MINMAX), + CLK_PCOM("mdp_clk", MDP_CLK, &msm_device_mdp.dev, OFF), + CLK_PCOM("mdp_pclk", MDP_P_CLK, &msm_device_mdp.dev, OFF), + CLK_PCOM("lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, &msm_device_mdp.dev, 0), + CLK_PCOM("lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, &msm_device_mdp.dev, 0), + CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, &msm_device_mdp.dev, 0), CLK_PCOM("mfc_clk", MFC_CLK, NULL, 0), CLK_PCOM("mfc_div2_clk", MFC_DIV2_CLK, NULL, 0), CLK_PCOM("mfc_pclk", MFC_P_CLK, NULL, 0), CLK_PCOM("mi2s_m_clk", MI2S_M_CLK, NULL, 0), CLK_PCOM("mi2s_s_clk", MI2S_S_CLK, NULL, 0), - CLK_PCOM("mi2s_codec_rx_m_clk", MI2S_CODEC_RX_M_CLK, NULL, 0), - CLK_PCOM("mi2s_codec_rx_s_clk", MI2S_CODEC_RX_S_CLK, NULL, 0), - CLK_PCOM("mi2s_codec_tx_m_clk", MI2S_CODEC_TX_M_CLK, NULL, 0), - CLK_PCOM("mi2s_codec_tx_s_clk", MI2S_CODEC_TX_S_CLK, NULL, 0), + CLK_PCOM("mi2s_codec_rx_mclk", MI2S_CODEC_RX_M_CLK, NULL, 0), + CLK_PCOM("mi2s_codec_rx_sclk", MI2S_CODEC_RX_S_CLK, NULL, 0), + CLK_PCOM("mi2s_codec_tx_mclk", MI2S_CODEC_TX_M_CLK, NULL, 0), + CLK_PCOM("mi2s_codec_tx_sclk", MI2S_CODEC_TX_S_CLK, NULL, 0), CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN), CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), CLK_PCOM("rotator_clk", AXI_ROTATOR_CLK, NULL, 0), CLK_PCOM("rotator_imem_clk", ROTATOR_IMEM_CLK, NULL, OFF), CLK_PCOM("rotator_pclk", ROTATOR_P_CLK, NULL, OFF), CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), - CLK_PCOM("spi_clk", SPI_CLK, NULL, 0), - CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0), + CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF), + CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF), + CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF), + CLK_PCOM("sdc_pclk", SDC2_P_CLK, &msm_device_sdc2.dev, OFF), + CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF), + CLK_PCOM("sdc_pclk", SDC3_P_CLK, &msm_device_sdc3.dev, OFF), + CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF), + CLK_PCOM("sdc_pclk", SDC4_P_CLK, &msm_device_sdc4.dev, OFF), + CLK_PCOM("spi_clk", SPI_CLK, &msm_device_spi.dev, 0), + CLK_PCOM("spi_pclk", SPI_P_CLK, &msm_device_spi.dev, 0), CLK_7X30S("tv_src_clk", TV_CLK, TV_ENC_CLK, NULL, 0), CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), - CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0), + CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF), + CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, OFF), + CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF), + CLK_PCOM("uartdm_clk", UART1DM_CLK, &msm_device_uart_dm1.dev, OFF), + CLK_PCOM("uartdm_clk", UART2DM_CLK, &msm_device_uart_dm2.dev, OFF), CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), - CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF), + CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, &msm_device_hsusb.dev, OFF), CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF), CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF), CLK_PCOM("usb_hs2_core_clk", USB_HS2_CORE_CLK, NULL, OFF), @@ -194,6 +785,8 @@ struct clk msm_clocks_7x30[] = { CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, 0), CLK_PCOM("vfe_pclk", VFE_P_CLK, NULL, OFF), CLK_PCOM("vpe_clk", VPE_CLK, NULL, 0), + CLK_PCOM("qup_clk", QUP_I2C_CLK, &msm_device_qup_i2c.dev, OFF), + CLK_PCOM("qup_pclk", QUP_I2C_P_CLK, &msm_device_qup_i2c.dev, OFF), /* 7x30 v2 hardware only. */ CLK_PCOM("csi_clk", CSI0_CLK, NULL, 0), diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c index a4b798f20ccb7..2aed947569932 100644 --- a/arch/arm/mach-msm/devices-qsd8x50.c +++ b/arch/arm/mach-msm/devices-qsd8x50.c @@ -13,9 +13,11 @@ * */ +#include #include #include - +#include +#include #include #include #include @@ -23,11 +25,250 @@ #include #include "devices.h" +#include "proc_comm.h" #include +#include +#include #include +#include +#include +#include +#ifdef CONFIG_PMIC8058 +#include +#endif + +int usb_phy_error; + +#define HSUSB_API_INIT_PHY_PROC 2 +#define HSUSB_API_PROG 0x30000064 +#define HSUSB_API_VERS MSM_RPC_VERS(1, 1) + +static void *usb_base; +#define MSM_USB_BASE ((unsigned)usb_base) +static unsigned ulpi_read(void __iomem *usb_base, unsigned reg) +{ + unsigned timeout = 100000; + + /* initiate read operation */ + writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) + cpu_relax(); + + if (timeout == 0) { + printk(KERN_ERR "ulpi_read: timeout %08x\n", + readl(USB_ULPI_VIEWPORT)); + return 0xffffffff; + } + return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); +} + +static int ulpi_write(void __iomem *usb_base, unsigned val, unsigned reg) +{ + unsigned timeout = 10000; + + /* initiate write operation */ + writel(ULPI_RUN | ULPI_WRITE | + ULPI_ADDR(reg) | ULPI_DATA(val), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) + cpu_relax(); + + if (timeout == 0) { + printk(KERN_ERR "ulpi_write: timeout\n"); + return -1; + } + + return 0; +} + +#define CLKRGM_APPS_RESET_USBH 37 +#define CLKRGM_APPS_RESET_USB_PHY 34 +static void msm_hsusb_apps_reset_link(int reset) +{ + int ret; + unsigned usb_id = CLKRGM_APPS_RESET_USBH; + + if (reset) + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_ASSERT, + &usb_id, NULL); + else + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_DEASSERT, + &usb_id, NULL); + if (ret) + printk(KERN_INFO "%s: Cannot set reset to %d (%d)\n", + __func__, reset, ret); +} + +static void msm_hsusb_apps_reset_phy(void) +{ + int ret; + unsigned usb_phy_id = CLKRGM_APPS_RESET_USB_PHY; + + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_ASSERT, + &usb_phy_id, NULL); + if (ret) { + printk(KERN_INFO "%s: Cannot assert (%d)\n", __func__, ret); + return; + } + msleep(1); + ret = msm_proc_comm(PCOM_CLK_REGIME_SEC_RESET_DEASSERT, + &usb_phy_id, NULL); + if (ret) { + printk(KERN_INFO "%s: Cannot assert (%d)\n", __func__, ret); + return; + } +} + +#define ULPI_VERIFY_MAX_LOOP_COUNT 3 +static int msm_hsusb_phy_verify_access(void __iomem *usb_base) +{ + int temp; + + for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { + if (ulpi_read(usb_base, ULPI_DEBUG) != (unsigned)-1) + break; + msm_hsusb_apps_reset_phy(); + } + + if (temp == ULPI_VERIFY_MAX_LOOP_COUNT) { + pr_err("%s: ulpi read failed for %d times\n", + __func__, ULPI_VERIFY_MAX_LOOP_COUNT); + return -1; + } + + return 0; +} + +static unsigned msm_hsusb_ulpi_read_with_reset(void __iomem *usb_base, unsigned reg) +{ + int temp; + unsigned res; + + for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { + res = ulpi_read(usb_base, reg); + if (res != -1) + return res; + msm_hsusb_apps_reset_phy(); + } + + pr_err("%s: ulpi read failed for %d times\n", + __func__, ULPI_VERIFY_MAX_LOOP_COUNT); + + return -1; +} + +static int msm_hsusb_ulpi_write_with_reset(void __iomem *usb_base, + unsigned val, unsigned reg) +{ + int temp; + int res; + + for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { + res = ulpi_write(usb_base, val, reg); + if (!res) + return 0; + msm_hsusb_apps_reset_phy(); + } + + pr_err("%s: ulpi write failed for %d times\n", + __func__, ULPI_VERIFY_MAX_LOOP_COUNT); + return -1; +} + +static int msm_hsusb_phy_caliberate(void __iomem *usb_base) +{ + int ret; + unsigned res; + + ret = msm_hsusb_phy_verify_access(usb_base); + if (ret) + return -ETIMEDOUT; + + res = msm_hsusb_ulpi_read_with_reset(usb_base, ULPI_FUNC_CTRL_CLR); + if (res == -1) + return -ETIMEDOUT; + + res = msm_hsusb_ulpi_write_with_reset(usb_base, + res | ULPI_SUSPENDM, + ULPI_FUNC_CTRL_CLR); + if (res) + return -ETIMEDOUT; + + msm_hsusb_apps_reset_phy(); + + return msm_hsusb_phy_verify_access(usb_base); +} + +#define USB_LINK_RESET_TIMEOUT (msecs_to_jiffies(10)) +void msm_hsusb_8x50_phy_reset(void) +{ + u32 temp; + unsigned long timeout; + printk(KERN_INFO "msm_hsusb_phy_reset\n"); + usb_base = ioremap(MSM_HSUSB_PHYS, 4096); + + msm_hsusb_apps_reset_link(1); + msm_hsusb_apps_reset_phy(); + msm_hsusb_apps_reset_link(0); + + /* select ULPI phy */ + temp = (readl(USB_PORTSC) & ~PORTSC_PTS_MASK); + writel(temp | PORTSC_PTS_ULPI, USB_PORTSC); + + if (msm_hsusb_phy_caliberate(usb_base)) { + usb_phy_error = 1; + return; + } + + /* soft reset phy */ + writel(USBCMD_RESET, USB_USBCMD); + timeout = jiffies + USB_LINK_RESET_TIMEOUT; + while (readl(USB_USBCMD) & USBCMD_RESET) { + if (time_after(jiffies, timeout)) { + pr_err("usb link reset timeout\n"); + break; + } + msleep(1); + } + usb_phy_error = 0; + + return; +} + +static struct resource resources_uart1[] = { + { + .start = INT_UART1, + .end = INT_UART1, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART1_PHYS, + .end = MSM_UART1_PHYS + MSM_UART1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct resource resources_uart2[] = { + { + .start = INT_UART2, + .end = INT_UART2, + .flags = IORESOURCE_IRQ, + }, + { + .start = MSM_UART2_PHYS, + .end = MSM_UART2_PHYS + MSM_UART2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, +}; + static struct resource resources_uart3[] = { { .start = INT_UART3, @@ -41,6 +282,20 @@ static struct resource resources_uart3[] = { }, }; +struct platform_device msm_device_uart1 = { + .name = "msm_serial", + .id = 0, + .num_resources = ARRAY_SIZE(resources_uart1), + .resource = resources_uart1, +}; + +struct platform_device msm_device_uart2 = { + .name = "msm_serial", + .id = 1, + .num_resources = ARRAY_SIZE(resources_uart2), + .resource = resources_uart2, +}; + struct platform_device msm_device_uart3 = { .name = "msm_serial", .id = 2, @@ -53,6 +308,542 @@ struct platform_device msm_device_smd = { .id = -1, }; +static struct resource msm_uart1_dm_resources[] = { + { + .start = MSM_UART1DM_PHYS, + .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART1DM_IRQ, + .end = INT_UART1DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART1DM_RX, + .end = INT_UART1DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART1_TX_CHAN, + .end = DMOV_HSUART1_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART1_TX_CRCI, + .end = DMOV_HSUART1_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm1 = { + .name = "msm_serial_hs", + .id = 0, + .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), + .resource = msm_uart1_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm1_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource msm_uart2_dm_resources[] = { + { + .start = MSM_UART2DM_PHYS, + .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_UART2DM_IRQ, + .end = INT_UART2DM_IRQ, + .flags = IORESOURCE_IRQ, + }, + { + .start = INT_UART2DM_RX, + .end = INT_UART2DM_RX, + .flags = IORESOURCE_IRQ, + }, + { + .start = DMOV_HSUART2_TX_CHAN, + .end = DMOV_HSUART2_RX_CHAN, + .name = "uartdm_channels", + .flags = IORESOURCE_DMA, + }, + { + .start = DMOV_HSUART2_TX_CRCI, + .end = DMOV_HSUART2_RX_CRCI, + .name = "uartdm_crci", + .flags = IORESOURCE_DMA, + }, +}; + +static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); + +struct platform_device msm_device_uart_dm2 = { + .name = "msm_serial_hs", + .id = 1, + .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), + .resource = msm_uart2_dm_resources, + .dev = { + .dma_mask = &msm_uart_dm2_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct resource resources_i2c[] = { + { + .start = MSM_I2C_PHYS, + .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_PWB_I2C, + .end = INT_PWB_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_i2c = { + .name = "msm_i2c", + .id = 0, + .num_resources = ARRAY_SIZE(resources_i2c), + .resource = resources_i2c, +}; + +#define GPIO_I2C_CLK 95 +#define GPIO_I2C_DAT 96 +static int gpio_i2c_clk = -1; +static int gpio_i2c_dat = -1; +void msm_set_i2c_mux(bool gpio, int *gpio_clk, int *gpio_dat) +{ + unsigned id; + + if (gpio_i2c_clk < 0) { + gpio_request(GPIO_I2C_CLK, "i2c-clk"); + gpio_i2c_clk = GPIO_I2C_CLK; + } + if (gpio_i2c_dat < 0) { + gpio_request(GPIO_I2C_DAT, "i2c-dat"); + gpio_i2c_dat = GPIO_I2C_DAT; + } + + if (gpio) { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT, 0, GPIO_OUTPUT, + GPIO_NO_PULL, GPIO_2MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + *gpio_clk = GPIO_I2C_CLK; + *gpio_dat = GPIO_I2C_DAT; + } else { + id = PCOM_GPIO_CFG(GPIO_I2C_CLK, 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + id = PCOM_GPIO_CFG(GPIO_I2C_DAT , 1, GPIO_INPUT, + GPIO_NO_PULL, GPIO_8MA); + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} + +struct flash_platform_data msm_nand_data = { + .parts = NULL, + .nr_parts = 0, +}; + +static struct resource resources_nand[] = { + [0] = { + .start = 7, + .end = 7, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_nand = { + .name = "msm_nand", + .id = -1, + .num_resources = ARRAY_SIZE(resources_nand), + .resource = resources_nand, + .dev = { + .platform_data = &msm_nand_data, + }, +}; + +static struct resource resources_sdc1[] = { + { + .start = MSM_SDC1_PHYS, + .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC1_0, + .end = INT_SDC1_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC1_1, + .end = INT_SDC1_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc2[] = { + { + .start = MSM_SDC2_PHYS, + .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC2_0, + .end = INT_SDC2_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC2_1, + .end = INT_SDC2_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc3[] = { + { + .start = MSM_SDC3_PHYS, + .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC3_0, + .end = INT_SDC3_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC3_1, + .end = INT_SDC3_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +static struct resource resources_sdc4[] = { + { + .start = MSM_SDC4_PHYS, + .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_SDC4_0, + .end = INT_SDC4_0, + .flags = IORESOURCE_IRQ, + .name = "cmd_irq", + }, + { + .start = INT_SDC4_1, + .end = INT_SDC4_1, + .flags = IORESOURCE_IRQ, + .name = "pio_irq", + }, + { + .flags = IORESOURCE_IRQ | IORESOURCE_DISABLED, + .name = "status_irq" + }, + { + .start = 8, + .end = 8, + .flags = IORESOURCE_DMA, + }, +}; + +struct platform_device msm_device_sdc1 = { + .name = "msm_sdcc", + .id = 1, + .num_resources = ARRAY_SIZE(resources_sdc1), + .resource = resources_sdc1, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc2 = { + .name = "msm_sdcc", + .id = 2, + .num_resources = ARRAY_SIZE(resources_sdc2), + .resource = resources_sdc2, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc3 = { + .name = "msm_sdcc", + .id = 3, + .num_resources = ARRAY_SIZE(resources_sdc3), + .resource = resources_sdc3, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_sdc4 = { + .name = "msm_sdcc", + .id = 4, + .num_resources = ARRAY_SIZE(resources_sdc4), + .resource = resources_sdc4, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +static struct platform_device *msm_sdcc_devices[] __initdata = { + &msm_device_sdc1, + &msm_device_sdc2, + &msm_device_sdc3, + &msm_device_sdc4, +}; + +int __init msm_add_sdcc(unsigned int controller, struct msm_mmc_platform_data *plat, + unsigned int stat_irq, unsigned long stat_irq_flags) +{ + struct platform_device *pdev; + struct resource *res; + + if (controller < 1 || controller > 4) + return -EINVAL; + + pdev = msm_sdcc_devices[controller-1]; + pdev->dev.platform_data = plat; + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq"); + if (!res) + return -EINVAL; + else if (stat_irq) { + res->start = res->end = stat_irq; + res->flags &= ~IORESOURCE_DISABLED; + res->flags |= stat_irq_flags; + } + + return platform_device_register(pdev); +} + +static struct resource resources_mddi0[] = { + { + .start = MSM_PMDH_PHYS, + .end = MSM_PMDH_PHYS + MSM_PMDH_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_MDDI_PRI, + .end = INT_MDDI_PRI, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource resources_mddi1[] = { + { + .start = MSM_EMDH_PHYS, + .end = MSM_EMDH_PHYS + MSM_EMDH_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = INT_MDDI_EXT, + .end = INT_MDDI_EXT, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_mddi0 = { + .name = "msm_mddi", + .id = 0, + .num_resources = ARRAY_SIZE(resources_mddi0), + .resource = resources_mddi0, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +struct platform_device msm_device_mddi1 = { + .name = "msm_mddi", + .id = 1, + .num_resources = ARRAY_SIZE(resources_mddi1), + .resource = resources_mddi1, + .dev = { + .coherent_dma_mask = 0xffffffff, + }, +}; + +static struct resource resources_mdp[] = { + { + .start = MSM_MDP_PHYS, + .end = MSM_MDP_PHYS + MSM_MDP_SIZE - 1, + .name = "mdp", + .flags = IORESOURCE_MEM + }, + { + .start = INT_MDP, + .end = INT_MDP, + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device msm_device_mdp = { + .name = "msm_mdp", + .id = 0, + .num_resources = ARRAY_SIZE(resources_mdp), + .resource = resources_mdp, +}; + +static struct resource resources_tssc[] = { + { + .start = MSM_TSSC_PHYS, + .end = MSM_TSSC_PHYS + MSM_TSSC_SIZE - 1, + .name = "tssc", + .flags = IORESOURCE_MEM, + }, + { + .start = INT_TCHSCRN1, + .end = INT_TCHSCRN1, + .name = "tssc1", + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, + }, + { + .start = INT_TCHSCRN2, + .end = INT_TCHSCRN2, + .name = "tssc2", + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING, + }, +}; + +struct platform_device msm_device_touchscreen = { + .name = "msm_touchscreen", + .id = 0, + .num_resources = ARRAY_SIZE(resources_tssc), + .resource = resources_tssc, +}; + +#if defined(CONFIG_ARCH_QSD8X50) +static struct resource resources_spi[] = { + { + .name = "spi_base", + .start = MSM_SPI_PHYS, + .end = MSM_SPI_PHYS + MSM_SPI_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "spi_irq_in", + .start = INT_SPI_INPUT, + .end = INT_SPI_INPUT, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_irq_out", + .start = INT_SPI_OUTPUT, + .end = INT_SPI_OUTPUT, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_irq_err", + .start = INT_SPI_ERROR, + .end = INT_SPI_ERROR, + .flags = IORESOURCE_IRQ, + }, +#if defined(CONFIG_SPI_QSD) + { + .name = "spi_clk", + .start = 17, + .end = 1, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_mosi", + .start = 18, + .end = 1, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_miso", + .start = 19, + .end = 1, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_cs0", + .start = 20, + .end = 1, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_pwr", + .start = 21, + .end = 0, + .flags = IORESOURCE_IRQ, + }, + { + .name = "spi_irq_cs0", + .start = 22, + .end = 0, + .flags = IORESOURCE_IRQ, + }, +#endif +}; + +struct platform_device msm_device_spi = { +#if defined(CONFIG_SPI_QSD) + .name = "spi_qsd", +#else + .name = "msm_spi", +#endif + .id = 0, + .num_resources = ARRAY_SIZE(resources_spi), + .resource = resources_spi, +}; +#endif + +#define CLK_ALL(name, id, dev, flags) \ + CLOCK(name, id, dev, flags, CLKFLAG_ARCH_ALL) +#define CLK_7X00A(name, id, dev, flags) \ + CLOCK(name, id, dev, flags, CLKFLAG_ARCH_MSM7X00A) +#define CLK_8X50(name, id, dev, flags) \ + CLOCK(name, id, dev, flags, CLKFLAG_ARCH_QSD8X50) + +#define OFF CLKFLAG_AUTO_OFF +#define MINMAX (CLKFLAG_USE_MIN_TO_SET | CLKFLAG_USE_MAX_TO_SET) +#define USE_MIN (CLKFLAG_USE_MIN_TO_SET | CLKFLAG_SHARED) + static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, @@ -126,32 +917,45 @@ struct platform_device msm_device_hsusb_host = { struct clk msm_clocks_8x50[] = { CLK_PCOM("adm_clk", ADM_CLK, NULL, 0), - CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN), + CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN | CLKFLAG_SHARED), CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0), CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0), - CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX), + CLK_PCOM("mddi_clk", EMDH_CLK, &msm_device_mddi1.dev, OFF | CLK_MINMAX), CLK_PCOM("gp_clk", GP_CLK, NULL, 0), CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0), + CLK_PCOM("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0), CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0), CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0), CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF), CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0), - CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX), - CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF), - CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0), - CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0), - CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0), + CLK_PCOM("mddi_clk", PMDH_CLK, &msm_device_mddi0.dev, OFF | CLK_MINMAX), + CLK_PCOM("mdp_clk", MDP_CLK, &msm_device_mdp.dev, OFF), + CLK_PCOM("lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, &msm_device_mdp.dev, 0), + CLK_PCOM("lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, &msm_device_mdp.dev, 0), + CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, &msm_device_mdp.dev, 0), CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN), CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0), CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF), - CLK_PCOM("spi_clk", SPI_CLK, NULL, 0), + CLK_PCOM("spi_clk", SPI_CLK, &msm_device_spi.dev, 0), + CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF), + CLK_PCOM("sdc_pclk", SDC1_P_CLK, &msm_device_sdc1.dev, OFF), + CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF), + CLK_PCOM("sdc_pclk", SDC2_P_CLK, &msm_device_sdc2.dev, OFF), + CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF), + CLK_PCOM("sdc_pclk", SDC3_P_CLK, &msm_device_sdc3.dev, OFF), + CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF), + CLK_PCOM("sdc_pclk", SDC4_P_CLK, &msm_device_sdc4.dev, OFF), CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0), CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), + CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF), + CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, OFF), CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF), - CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), - CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), + CLK_PCOM("uartdm_clk", UART1DM_CLK, &msm_device_uart_dm1.dev, OFF), + CLK_PCOM("uartdm_clk", UART2DM_CLK, &msm_device_uart_dm2.dev, OFF), + CLK_PCOM("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF), + CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, &msm_device_hsusb.dev, OFF), CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0), CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN), CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF), @@ -162,7 +966,14 @@ struct clk msm_clocks_8x50[] = { CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF), CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF), CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), + }; +void msm_i2c_gpio_init(void) +{ + gpio_request(GPIO_I2C_CLK, "i2c_clk"); + gpio_request(GPIO_I2C_DAT, "i2c_data"); +} + unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50); diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h index 87c70bfce2bde..460590f1b5702 100644 --- a/arch/arm/mach-msm/devices.h +++ b/arch/arm/mach-msm/devices.h @@ -22,6 +22,9 @@ extern struct platform_device msm_device_uart1; extern struct platform_device msm_device_uart2; extern struct platform_device msm_device_uart3; +extern struct platform_device msm_device_uart_dm1; +extern struct platform_device msm_device_uart_dm2; + extern struct platform_device msm_device_sdc1; extern struct platform_device msm_device_sdc2; extern struct platform_device msm_device_sdc3; @@ -32,14 +35,23 @@ extern struct platform_device msm_device_otg; extern struct platform_device msm_device_hsusb_host; extern struct platform_device msm_device_i2c; +extern struct platform_device msm_device_i2c2; + +extern struct platform_device msm_device_qup_i2c; extern struct platform_device msm_device_smd; extern struct platform_device msm_device_nand; +extern struct platform_device msm_device_mddi0; +extern struct platform_device msm_device_mddi1; +extern struct platform_device msm_device_mdp; extern struct platform_device msm_device_mddi0; extern struct platform_device msm_device_mddi1; extern struct platform_device msm_device_mdp; +extern struct platform_device msm_device_touchscreen; +extern struct platform_device msm_device_spi; +extern struct platform_device msm_device_ssbi_pmic; extern struct clk msm_clocks_7x01a[]; extern unsigned msm_num_clocks_7x01a; @@ -49,5 +61,8 @@ extern unsigned msm_num_clocks_7x30; extern struct clk msm_clocks_8x50[]; extern unsigned msm_num_clocks_8x50; +extern struct platform_device msm_device_vidc_720p; + +extern struct platform_device msm_footswitch; #endif diff --git a/arch/arm/mach-msm/devices_htc.c b/arch/arm/mach-msm/devices_htc.c new file mode 100644 index 0000000000000..a69053f6692ed --- /dev/null +++ b/arch/arm/mach-msm/devices_htc.c @@ -0,0 +1,357 @@ +/* linux/arch/arm/mach-msm/devices.c + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include +#include +#include +#include "devices.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if 0 +struct platform_device *devices[] __initdata = { + &msm_device_nand, + &msm_device_smd, + &msm_device_i2c, +}; + +void __init msm_add_devices(void) +{ + platform_add_devices(devices, ARRAY_SIZE(devices)); +} +#endif + +#define HSUSB_API_INIT_PHY_PROC 2 +#define HSUSB_API_PROG 0x30000064 +#define HSUSB_API_VERS MSM_RPC_VERS(1,1) + +static void internal_phy_reset(void) +{ + struct msm_rpc_endpoint *usb_ep; + int rc; + struct hsusb_phy_start_req { + struct rpc_request_hdr hdr; + } req; + + printk(KERN_INFO "msm_hsusb_phy_reset\n"); + + usb_ep = msm_rpc_connect(HSUSB_API_PROG, HSUSB_API_VERS, 0); + if (IS_ERR(usb_ep)) { + printk(KERN_ERR "%s: init rpc failed! error: %ld\n", + __func__, PTR_ERR(usb_ep)); + goto close; + } + rc = msm_rpc_call(usb_ep, HSUSB_API_INIT_PHY_PROC, + &req, sizeof(req), 5 * HZ); + if (rc < 0) + printk(KERN_ERR "%s: rpc call failed! (%d)\n", __func__, rc); + +close: + msm_rpc_close(usb_ep); +} + +/* adjust eye diagram, disable vbusvalid interrupts */ +static int hsusb_phy_init_seq[] = { 0x40, 0x31, 0x1D, 0x0D, 0x1D, 0x10, -1 }; + +struct msm_hsusb_platform_data msm_hsusb_pdata = { + .phy_reset = internal_phy_reset, + .phy_init_seq = hsusb_phy_init_seq, + .usb_connected = notify_usb_connected, +}; + +static struct usb_mass_storage_platform_data mass_storage_pdata = { + .nluns = 1, + .vendor = "HTC ", + .product = "Android Phone ", + .release = 0x0100, +}; + +static struct platform_device usb_mass_storage_device = { + .name = "usb_mass_storage", + .id = -1, + .dev = { + .platform_data = &mass_storage_pdata, + }, +}; + +#ifdef CONFIG_USB_ANDROID_RNDIS +static struct usb_ether_platform_data rndis_pdata = { + /* ethaddr is filled by board_serialno_setup */ + .vendorID = 0x0bb4, + .vendorDescr = "HTC", +}; + +static struct platform_device rndis_device = { + .name = "rndis", + .id = -1, + .dev = { + .platform_data = &rndis_pdata, + }, +}; +#endif + +static char *usb_functions_ums[] = { + "usb_mass_storage", +}; + +static char *usb_functions_ums_adb[] = { + "usb_mass_storage", + "adb", +}; + +static char *usb_functions_rndis[] = { + "rndis", +}; + +static char *usb_functions_rndis_adb[] = { + "rndis", + "adb", +}; + +static char *usb_functions_all[] = { +#ifdef CONFIG_USB_ANDROID_RNDIS + "rndis", +#endif + "usb_mass_storage", + "adb", +#ifdef CONFIG_USB_ANDROID_ACM + "acm", +#endif +}; + +static struct android_usb_product usb_products[] = { + { + .product_id = 0x0c01, + .num_functions = ARRAY_SIZE(usb_functions_ums), + .functions = usb_functions_ums, + }, + { + .product_id = 0x0c02, + .num_functions = ARRAY_SIZE(usb_functions_ums_adb), + .functions = usb_functions_ums_adb, + }, + { + .product_id = 0x0ffe, + .num_functions = ARRAY_SIZE(usb_functions_rndis), + .functions = usb_functions_rndis, + }, + { + .product_id = 0x0ffc, + .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), + .functions = usb_functions_rndis_adb, + }, +}; + +static struct android_usb_platform_data android_usb_pdata = { + .vendor_id = 0x0bb4, + .product_id = 0x0c01, + .version = 0x0100, + .product_name = "Android Phone", + .manufacturer_name = "HTC", + .num_products = ARRAY_SIZE(usb_products), + .products = usb_products, + .num_functions = ARRAY_SIZE(usb_functions_all), + .functions = usb_functions_all, +}; + +static struct platform_device android_usb_device = { + .name = "android_usb", + .id = -1, + .dev = { + .platform_data = &android_usb_pdata, + }, +}; + +void __init msm_add_usb_devices(void (*phy_reset) (void)) +{ + /* setup */ + android_usb_pdata.serial_number = board_serialno(); + + if (phy_reset) + msm_hsusb_pdata.phy_reset = phy_reset; + msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata; + platform_device_register(&msm_device_hsusb); +#ifdef CONFIG_USB_ANDROID_RNDIS + platform_device_register(&rndis_device); +#endif + platform_device_register(&usb_mass_storage_device); + platform_device_register(&android_usb_device); +} + +static struct android_pmem_platform_data pmem_pdata = { + .name = "pmem", + .no_allocator = 1, + .cached = 1, +}; + +static struct android_pmem_platform_data pmem_adsp_pdata = { + .name = "pmem_adsp", + .no_allocator = 0, + .cached = 0, +}; + +static struct android_pmem_platform_data pmem_camera_pdata = { + .name = "pmem_camera", + .no_allocator = 1, + .cached = 0, +}; + +static struct platform_device pmem_device = { + .name = "android_pmem", + .id = 0, + .dev = { .platform_data = &pmem_pdata }, +}; + +static struct platform_device pmem_adsp_device = { + .name = "android_pmem", + .id = 1, + .dev = { .platform_data = &pmem_adsp_pdata }, +}; + +static struct platform_device pmem_camera_device = { + .name = "android_pmem", + .id = 2, + .dev = { .platform_data = &pmem_camera_pdata }, +}; + +static struct resource ram_console_resource[] = { + { + .flags = IORESOURCE_MEM, + } +}; + +static struct platform_device ram_console_device = { + .name = "ram_console", + .id = -1, + .num_resources = ARRAY_SIZE(ram_console_resource), + .resource = ram_console_resource, +}; + +static struct resource resources_hw3d[] = { + { + .start = 0xA0000000, + .end = 0xA00fffff, + .flags = IORESOURCE_MEM, + .name = "regs", + }, + { + .flags = IORESOURCE_MEM, + .name = "smi", + }, + { + .flags = IORESOURCE_MEM, + .name = "ebi", + }, + { + .start = INT_GRAPHICS, + .end = INT_GRAPHICS, + .flags = IORESOURCE_IRQ, + .name = "gfx", + }, +}; + +static struct platform_device hw3d_device = { + .name = "msm_hw3d", + .id = 0, + .num_resources = ARRAY_SIZE(resources_hw3d), + .resource = resources_hw3d, +}; + +void __init msm_add_mem_devices(struct msm_pmem_setting *setting) +{ + if (setting->pmem_size) { + pmem_pdata.start = setting->pmem_start; + pmem_pdata.size = setting->pmem_size; + platform_device_register(&pmem_device); + } + + if (setting->pmem_adsp_size) { + pmem_adsp_pdata.start = setting->pmem_adsp_start; + pmem_adsp_pdata.size = setting->pmem_adsp_size; + platform_device_register(&pmem_adsp_device); + } + + if (setting->pmem_gpu0_size && setting->pmem_gpu1_size) { + struct resource *res; + + res = platform_get_resource_byname(&hw3d_device, IORESOURCE_MEM, + "smi"); + res->start = setting->pmem_gpu0_start; + res->end = res->start + setting->pmem_gpu0_size - 1; + + res = platform_get_resource_byname(&hw3d_device, IORESOURCE_MEM, + "ebi"); + res->start = setting->pmem_gpu1_start; + res->end = res->start + setting->pmem_gpu1_size - 1; + platform_device_register(&hw3d_device); + } + + if (setting->pmem_camera_size) { + pmem_camera_pdata.start = setting->pmem_camera_start; + pmem_camera_pdata.size = setting->pmem_camera_size; + platform_device_register(&pmem_camera_device); + } + + if (setting->ram_console_size) { + ram_console_resource[0].start = setting->ram_console_start; + ram_console_resource[0].end = setting->ram_console_start + + setting->ram_console_size - 1; + platform_device_register(&ram_console_device); + } +} + +#define PM_LIBPROG 0x30000061 +#if (CONFIG_MSM_AMSS_VERSION == 6220) || (CONFIG_MSM_AMSS_VERSION == 6225) +#define PM_LIBVERS 0xfb837d0b +#else +#define PM_LIBVERS 0x10001 +#endif + +#if 0 +static struct platform_device *msm_serial_devices[] __initdata = { + &msm_device_uart1, + &msm_device_uart2, + &msm_device_uart3, + #ifdef CONFIG_SERIAL_MSM_HS + &msm_device_uart_dm1, + &msm_device_uart_dm2, + #endif +}; + +int __init msm_add_serial_devices(unsigned num) +{ + if (num > MSM_SERIAL_NUM) + return -EINVAL; + + return platform_device_register(msm_serial_devices[num]); +} +#endif + diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c index 02cae5e2951c2..d6650f66b086a 100644 --- a/arch/arm/mach-msm/dma.c +++ b/arch/arm/mach-msm/dma.c @@ -1,6 +1,8 @@ /* linux/arch/arm/mach-msm/dma.c * * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -18,6 +20,7 @@ #include #include #include +#include #include #define MSM_DMOV_CHANNEL_COUNT 16 @@ -51,8 +54,9 @@ void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful) { writel((graceful << 31), DMOV_FLUSH0(id)); } +EXPORT_SYMBOL(msm_dmov_stop_cmd); -void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) +void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd) { unsigned long irq_flags; unsigned int status; @@ -89,6 +93,29 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) } spin_unlock_irqrestore(&msm_dmov_lock, irq_flags); } +EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext); + +void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd) +{ + /* Disable callback function (for backwards compatibility) */ + cmd->execute_func = NULL; + + msm_dmov_enqueue_cmd_ext(id, cmd); +} +EXPORT_SYMBOL(msm_dmov_enqueue_cmd); + +void msm_dmov_flush(unsigned int id) +{ + unsigned long irq_flags; + spin_lock_irqsave(&msm_dmov_lock, irq_flags); + /* XXX not checking if flush cmd sent already */ + if (!list_empty(&active_commands[id])) { + PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id); + writel(DMOV_FLUSH_TYPE, DMOV_FLUSH0(id)); + } + spin_unlock_irqrestore(&msm_dmov_lock, irq_flags); +} +EXPORT_SYMBOL(msm_dmov_flush); struct msm_dmov_exec_cmdptr_cmd { struct msm_dmov_cmd dmov_cmd; @@ -268,4 +295,3 @@ static int __init msm_init_datamover(void) } arch_initcall(msm_init_datamover); - diff --git a/arch/arm/mach-msm/drv_callback.c b/arch/arm/mach-msm/drv_callback.c new file mode 100644 index 0000000000000..3ba36f96ecddb --- /dev/null +++ b/arch/arm/mach-msm/drv_callback.c @@ -0,0 +1,60 @@ +/* linux/arch/arm/mach-msm/drv_callback.c + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +static DECLARE_RWSEM(cnf_driver_list_lock); +static LIST_HEAD(cnf_driver_list); + +int cnf_driver_register(struct cnf_driver *driver) +{ + if(driver){ + rwlock_init(&driver->cnfdrv_list_lock); + + down_write(&cnf_driver_list_lock); + list_add_tail(&driver->next_drv, &cnf_driver_list); + up_write(&cnf_driver_list_lock); + + return 0; + } + else{ + printk(KERN_WARNING "Configurable driver %s failed to " + "register (NULL driver)\n", driver->name); + return -1; + } +} + +int cnf_driver_event(const char *name, void *argu) +{ + struct list_head *listptr; + int ret = -EINVAL; + + down_read(&cnf_driver_list_lock); + list_for_each(listptr, &cnf_driver_list){ + struct cnf_driver *driver; + + driver = list_entry(listptr, struct cnf_driver, next_drv); + if(strcmp(driver->name, name) == 0){ + ret = driver->func(argu); + break; + } + } + up_read(&cnf_driver_list_lock); + + return ret; +} diff --git a/arch/arm/mach-msm/fiq_glue.S b/arch/arm/mach-msm/fiq_glue.S new file mode 100644 index 0000000000000..df1c7084fe115 --- /dev/null +++ b/arch/arm/mach-msm/fiq_glue.S @@ -0,0 +1,112 @@ +/* arch/arm/mach-msm/fiq_glue.S + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + + .text + + .global fiq_glue_end + + /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ + +ENTRY(fiq_glue) + /* store pc, cpsr from previous mode */ + mrs r12, spsr + sub r11, lr, #4 + subs r10, #1 + bne nested_fiq + + stmfd sp!, {r11-r12, lr} + + /* store r8-r14 from previous mode */ + sub sp, sp, #(7 * 4) + stmia sp, {r8-r14}^ + nop + + /* store r0-r7 from previous mode */ + stmfd sp!, {r0-r7} + + /* setup func(data,regs) arguments */ + mov r0, r9 + mov r1, sp + mov r3, r8 + + mov r7, sp + + /* Get sp and lr from non-user modes */ + and r4, r12, #MODE_MASK + cmp r4, #USR_MODE + beq fiq_from_usr_mode + + mov r7, sp + orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r4 + str sp, [r7, #(4 * 13)] + str lr, [r7, #(4 * 14)] + mrs r5, spsr + str r5, [r7, #(4 * 17)] + + cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + /* use fiq stack if we reenter this mode */ + subne sp, r7, #(4 * 3) + +fiq_from_usr_mode: + msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + mov r2, sp + sub sp, r7, #12 + stmfd sp!, {r2, ip, lr} + /* call func(data,regs) */ + blx r3 + ldmfd sp, {r2, ip, lr} + mov sp, r2 + + /* restore/discard saved state */ + cmp r4, #USR_MODE + beq fiq_from_usr_mode_exit + + msr cpsr_c, r4 + ldr sp, [r7, #(4 * 13)] + ldr lr, [r7, #(4 * 14)] + msr spsr_cxsf, r5 + +fiq_from_usr_mode_exit: + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + + ldmfd sp!, {r0-r7} + add sp, sp, #(7 * 4) + ldmfd sp!, {r11-r12, lr} +exit_fiq: + msr spsr_cxsf, r12 + add r10, #1 + movs pc, r11 + +nested_fiq: + orr r12, r12, #(PSR_F_BIT) + b exit_fiq + +fiq_glue_end: + +ENTRY(fiq_glue_setup) /* func, data, sp */ + mrs r3, cpsr + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + movs r8, r0 + mov r9, r1 + mov sp, r2 + moveq r10, #0 + movne r10, #1 + msr cpsr_c, r3 + bx lr + diff --git a/arch/arm/mach-msm/fish_battery.c b/arch/arm/mach-msm/fish_battery.c new file mode 100644 index 0000000000000..19fbb91fe83ae --- /dev/null +++ b/arch/arm/mach-msm/fish_battery.c @@ -0,0 +1,145 @@ +/* arch/arm/mach-msm/fish_battery.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * based on: arch/arm/mach-msm/htc_battery.c + */ + +#include +#include +#include +#include +#include +#include + +static enum power_supply_property fish_battery_properties[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CAPACITY, +}; + +static enum power_supply_property fish_power_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static char *supply_list[] = { + "battery", +}; + +static int fish_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static int fish_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static struct power_supply fish_power_supplies[] = { + { + .name = "battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = fish_battery_properties, + .num_properties = ARRAY_SIZE(fish_battery_properties), + .get_property = fish_battery_get_property, + }, + { + .name = "ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = fish_power_properties, + .num_properties = ARRAY_SIZE(fish_power_properties), + .get_property = fish_power_get_property, + }, +}; + +static int fish_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + val->intval = 1; + else + val->intval = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fish_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = POWER_SUPPLY_STATUS_FULL; + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = 100; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fish_battery_probe(struct platform_device *pdev) +{ + int i; + int rc; + + /* init power supplier framework */ + for (i = 0; i < ARRAY_SIZE(fish_power_supplies); i++) { + rc = power_supply_register(&pdev->dev, &fish_power_supplies[i]); + if (rc) + pr_err("%s: Failed to register power supply (%d)\n", + __func__, rc); + } + + return 0; +} + +static struct platform_driver fish_battery_driver = { + .probe = fish_battery_probe, + .driver = { + .name = "fish_battery", + .owner = THIS_MODULE, + }, +}; + +static int __init fish_battery_init(void) +{ + platform_driver_register(&fish_battery_driver); + return 0; +} + +module_init(fish_battery_init); +MODULE_DESCRIPTION("Qualcomm fish battery driver"); +MODULE_LICENSE("GPL"); + diff --git a/arch/arm/mach-msm/footswitch-pcom.c b/arch/arm/mach-msm/footswitch-pcom.c new file mode 100644 index 0000000000000..8eec807464f23 --- /dev/null +++ b/arch/arm/mach-msm/footswitch-pcom.c @@ -0,0 +1,332 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include "footswitch.h" +#include "proc_comm.h" + +/* PCOM power rail IDs */ +#define PCOM_FS_GRP 8 +#define PCOM_FS_GRP_2D 58 +#define PCOM_FS_MDP 14 +#define PCOM_FS_MFC 68 +#define PCOM_FS_ROTATOR 90 +#define PCOM_FS_VFE 41 +#define PCOM_FS_VPE 76 + +#define PCOM_RAIL_MODE_AUTO 0 +#define PCOM_RAIL_MODE_MANUAL 1 + +/** + * struct footswitch - Per-footswitch data and state + * @rdev: Regulator framework device + * @desc: Regulator descriptor + * @init_data: Regulator platform data + * @pcom_id: Proc-comm ID of the footswitch + * @is_enabled: Flag set when footswitch is enabled + * @is_manual: Flag set when footswitch is in manual proc-comm mode + * @has_ahb_clk: Flag set if footswitched core has an ahb_clk + * @has_src_clk: Flag set if footswitched core has a src_clk + * @src_clk: Controls the core clock's rate + * @core_clk: Clocks the core + * @ahb_clk: Clocks the core's register interface + * @src_clk_init_rate: Rate to use for src_clk if it has not been set yet + * @is_rate_set: Flag set if core_clk's rate has been set + */ +struct footswitch { + struct regulator_dev *rdev; + struct regulator_desc desc; + struct regulator_init_data init_data; + unsigned pcom_id; + bool is_enabled; + bool is_manual; + struct clk *src_clk; + struct clk *core_clk; + struct clk *ahb_clk; + const bool has_ahb_clk; + const bool has_src_clk; + const int src_clk_init_rate; + bool is_rate_set; +}; + +static inline int set_rail_mode(int pcom_id, int mode) +{ + int rc; + + rc = msm_proc_comm(PCOM_CLKCTL_RPC_RAIL_CONTROL, &pcom_id, &mode); + if (!rc && pcom_id) + rc = -EINVAL; + + return rc; +} + +static inline int set_rail_state(int pcom_id, int state) +{ + int rc; + + rc = msm_proc_comm(state, &pcom_id, NULL); + if (!rc && pcom_id) + rc = -EINVAL; + + return rc; +} + +static int enable_clocks(struct footswitch *fs) +{ + fs->is_rate_set = !!(clk_get_rate(fs->src_clk)); + if (!fs->is_rate_set) + clk_set_rate(fs->src_clk, fs->src_clk_init_rate); + clk_enable(fs->core_clk); + + if (fs->ahb_clk) + clk_enable(fs->ahb_clk); + + return 0; +} + +static void disable_clocks(struct footswitch *fs) +{ + if (fs->ahb_clk) + clk_disable(fs->ahb_clk); + clk_disable(fs->core_clk); +} + +static int footswitch_is_enabled(struct regulator_dev *rdev) +{ + struct footswitch *fs = rdev_get_drvdata(rdev); + + return fs->is_enabled; +} + +static int footswitch_enable(struct regulator_dev *rdev) +{ + struct footswitch *fs = rdev_get_drvdata(rdev); + int rc; + + rc = enable_clocks(fs); + if (rc) + return rc; + + rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE); + if (!rc) + fs->is_enabled = true; + + disable_clocks(fs); + + return rc; +} + +static int footswitch_disable(struct regulator_dev *rdev) +{ + struct footswitch *fs = rdev_get_drvdata(rdev); + int rc; + + rc = enable_clocks(fs); + if (rc) + return rc; + + rc = set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_DISABLE); + if (!rc) + fs->is_enabled = false; + + disable_clocks(fs); + + return rc; +} + +static struct regulator_ops footswitch_ops = { + .is_enabled = footswitch_is_enabled, + .enable = footswitch_enable, + .disable = footswitch_disable, +}; + +#define FOOTSWITCH(_id, _pcom_id, _name, _src_clk, _rate, _ahb_clk) \ + [_id] = { \ + .desc = { \ + .id = _id, \ + .name = _name, \ + .ops = &footswitch_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + .pcom_id = _pcom_id, \ + .has_src_clk = _src_clk, \ + .src_clk_init_rate = _rate, \ + .has_ahb_clk = _ahb_clk, \ + } +static struct footswitch footswitches[] = { + FOOTSWITCH(FS_GFX3D, PCOM_FS_GRP, + "fs_gfx3d", true, 24576000, true), + FOOTSWITCH(FS_GFX2D0, PCOM_FS_GRP_2D, + "fs_gfx2d0", false, 24576000, true), + FOOTSWITCH(FS_MDP, PCOM_FS_MDP, + "fs_mdp", false, 24576000, true), + FOOTSWITCH(FS_MFC, PCOM_FS_MFC, + "fs_mfc", false, 24576000, true), + FOOTSWITCH(FS_ROT, PCOM_FS_ROTATOR, + "fs_rot", false, 0, true), + FOOTSWITCH(FS_VFE, PCOM_FS_VFE, + "fs_vfe", false, 24576000, true), + FOOTSWITCH(FS_VPE, PCOM_FS_VPE, + "fs_vpe", false, 24576000, false), +}; + +static int get_clocks(struct device *dev, struct footswitch *fs) +{ + int rc; + + /* + * Some SoCs may not have a separate rate-settable clock. + * If one can't be found, try to use the core clock for + * rate-setting instead. + */ + if (fs->has_src_clk) { + fs->src_clk = clk_get(dev, "src_clk"); + if (IS_ERR(fs->src_clk)) + fs->src_clk = clk_get(dev, "core_clk"); + } else { + fs->src_clk = clk_get(dev, "core_clk"); + } + if (IS_ERR(fs->src_clk)) { + pr_err("clk_get(src_clk) failed\n"); + rc = PTR_ERR(fs->src_clk); + goto err_src_clk; + } + + fs->core_clk = clk_get(dev, "core_clk"); + if (IS_ERR(fs->core_clk)) { + pr_err("clk_get(core_clk) failed\n"); + rc = PTR_ERR(fs->core_clk); + goto err_core_clk; + } + + if (fs->has_ahb_clk) { + fs->ahb_clk = clk_get(dev, "iface_clk"); + if (IS_ERR(fs->ahb_clk)) { + pr_err("clk_get(iface_clk) failed\n"); + rc = PTR_ERR(fs->ahb_clk); + goto err_ahb_clk; + } + } + + return 0; + +err_ahb_clk: + clk_put(fs->core_clk); +err_core_clk: + clk_put(fs->src_clk); +err_src_clk: + return rc; +} + +static void put_clocks(struct footswitch *fs) +{ + clk_put(fs->src_clk); + clk_put(fs->core_clk); + clk_put(fs->ahb_clk); +} + +static int footswitch_probe(struct platform_device *pdev) +{ + struct footswitch *fs; + struct regulator_init_data *init_data; + int rc; + + if (pdev == NULL) + return -EINVAL; + + if (pdev->id >= MAX_FS) + return -ENODEV; + + fs = &footswitches[pdev->id]; + if (!fs->is_manual) { + pr_err("%s is not in manual mode\n", fs->desc.name); + return -EINVAL; + } + init_data = pdev->dev.platform_data; + + rc = get_clocks(&pdev->dev, fs); + if (rc) + return rc; + + fs->rdev = regulator_register(&fs->desc, &pdev->dev, init_data, fs); + if (IS_ERR(fs->rdev)) { + pr_err("regulator_register(%s) failed\n", fs->desc.name); + rc = PTR_ERR(fs->rdev); + goto err_register; + } + + return 0; + +err_register: + put_clocks(fs); + + return rc; +} + +static int __devexit footswitch_remove(struct platform_device *pdev) +{ + struct footswitch *fs = &footswitches[pdev->id]; + + regulator_unregister(fs->rdev); + set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_AUTO); + put_clocks(fs); + + return 0; +} + +static struct platform_driver footswitch_driver = { + .probe = footswitch_probe, + .remove = __devexit_p(footswitch_remove), + .driver = { + .name = "footswitch-pcom", + .owner = THIS_MODULE, + }, +}; + +static int __init footswitch_init(void) +{ + struct footswitch *fs; + int ret; + + /* + * Enable all footswitches in manual mode (ie. not controlled along + * with pcom clocks). + */ + for (fs = footswitches; fs < footswitches + ARRAY_SIZE(footswitches); + fs++) { + set_rail_state(fs->pcom_id, PCOM_CLKCTL_RPC_RAIL_ENABLE); + ret = set_rail_mode(fs->pcom_id, PCOM_RAIL_MODE_MANUAL); + if (!ret) + fs->is_manual = 1; + } + + return platform_driver_register(&footswitch_driver); +} +subsys_initcall(footswitch_init); + +static void __exit footswitch_exit(void) +{ + platform_driver_unregister(&footswitch_driver); +} +module_exit(footswitch_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("proc_comm rail footswitch"); +MODULE_ALIAS("platform:footswitch-pcom"); diff --git a/arch/arm/mach-msm/footswitch.h b/arch/arm/mach-msm/footswitch.h new file mode 100644 index 0000000000000..3d9b4795f314d --- /dev/null +++ b/arch/arm/mach-msm/footswitch.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MSM_FOOTSWITCH__ +#define __MSM_FOOTSWITCH__ + +#include + +/* Device IDs */ +#define FS_GFX2D0 0 +#define FS_GFX2D1 1 +#define FS_GFX3D 2 +#define FS_IJPEG 3 +#define FS_MDP 4 +#define FS_MFC 5 +#define FS_ROT 6 +#define FS_VED 7 +#define FS_VFE 8 +#define FS_VPE 9 +#define MAX_FS 10 + +#define FS_GENERIC(_drv_name, _id, _name) (&(struct platform_device){ \ + .name = (_drv_name), \ + .id = (_id), \ + .dev = { \ + .platform_data = &(struct regulator_init_data){ \ + .constraints = { \ + .valid_modes_mask = REGULATOR_MODE_NORMAL, \ + .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ + }, \ + .num_consumer_supplies = 1, \ + .consumer_supplies = \ + &(struct regulator_consumer_supply) \ + REGULATOR_SUPPLY((_name), NULL), \ + } \ + }, \ +}) +#define FS_PCOM(_id, _name) FS_GENERIC("footswitch-pcom", (_id), (_name)) +#define FS_8X60(_id, _name) FS_GENERIC("footswitch-8x60", (_id), (_name)) + +#endif diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c index 176af9dcb8ee4..7a3d75e95c8b3 100644 --- a/arch/arm/mach-msm/gpio.c +++ b/arch/arm/mach-msm/gpio.c @@ -20,10 +20,12 @@ #include #include #include +#include #include "gpio_hw.h" #include "gpiomux.h" -#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0) +#include "proc_comm.h" +#include "smd_private.h" #define MSM_GPIO_BANK(bank, first, last) \ { \ @@ -50,6 +52,14 @@ } \ } +enum { + GPIO_DEBUG_SLEEP = 1U << 0, +}; + +static int msm_gpio_debug_mask; +module_param_named(debug_mask, msm_gpio_debug_mask, int, + S_IRUGO | S_IWUSR | S_IWGRP); + #define MSM_GPIO_BROKEN_INT_CLEAR 1 struct msm_gpio_regs { @@ -345,6 +355,129 @@ static struct irq_chip msm_gpio_irq_chip = { .irq_set_type = msm_gpio_irq_set_type, }; +#define NUM_GPIO_SMEM_BANKS 6 +#define GPIO_SMEM_NUM_GROUPS 2 +#define GPIO_SMEM_MAX_PC_INTERRUPTS 8 +struct tramp_gpio_smem { + uint16_t num_fired[GPIO_SMEM_NUM_GROUPS]; + uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS]; + uint32_t enabled[NUM_GPIO_SMEM_BANKS]; + uint32_t detection[NUM_GPIO_SMEM_BANKS]; + uint32_t polarity[NUM_GPIO_SMEM_BANKS]; +}; + +static void msm_gpio_sleep_int(unsigned long arg) +{ + int i, j; + struct tramp_gpio_smem *smem_gpio; + + BUILD_BUG_ON(NR_GPIO_IRQS > NUM_GPIO_SMEM_BANKS * 32); + + smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); + if (smem_gpio == NULL) + return; + + local_irq_disable(); + for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) { + int count = smem_gpio->num_fired[i]; + for (j = 0; j < count; j++) { + /* TODO: Check mask */ + generic_handle_irq( + MSM_GPIO_TO_INT(smem_gpio->fired[i][j])); + } + } + local_irq_enable(); +} + +static DECLARE_TASKLET(msm_gpio_sleep_int_tasklet, msm_gpio_sleep_int, 0); + +void msm_gpio_enter_sleep(int from_idle) +{ + int i; + struct tramp_gpio_smem *smem_gpio; + + smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); + + if (smem_gpio) { + for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) { + smem_gpio->enabled[i] = 0; + smem_gpio->detection[i] = 0; + smem_gpio->polarity[i] = 0; + } + } + + for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { + writel(msm_gpio_chips[i].int_enable[!from_idle], + msm_gpio_chips[i].regs.int_en); + if (smem_gpio) { + uint32_t tmp; + int start, index, shiftl, shiftr; + start = msm_gpio_chips[i].chip.base; + index = start / 32; + shiftl = start % 32; + shiftr = 32 - shiftl; + tmp = msm_gpio_chips[i].int_enable[!from_idle]; + smem_gpio->enabled[index] |= tmp << shiftl; + smem_gpio->enabled[index+1] |= tmp >> shiftr; + smem_gpio->detection[index] |= + readl(msm_gpio_chips[i].regs.int_edge) << + shiftl; + smem_gpio->detection[index+1] |= + readl(msm_gpio_chips[i].regs.int_edge) >> + shiftr; + smem_gpio->polarity[index] |= + readl(msm_gpio_chips[i].regs.int_pos) << shiftl; + smem_gpio->polarity[index+1] |= + readl(msm_gpio_chips[i].regs.int_pos) >> shiftr; + } + } + + if (smem_gpio) { + if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP) + for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) { + printk("msm_gpio_enter_sleep gpio %d-%d: enable" + " %08x, edge %08x, polarity %08x\n", + i * 32, i * 32 + 31, + smem_gpio->enabled[i], + smem_gpio->detection[i], + smem_gpio->polarity[i]); + } + for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) + smem_gpio->num_fired[i] = 0; + } +} + +void msm_gpio_exit_sleep(void) +{ + int i; + struct tramp_gpio_smem *smem_gpio; + + smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio)); + + for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { + writel(msm_gpio_chips[i].int_enable[0], + msm_gpio_chips[i].regs.int_en); + } + + if (smem_gpio && (smem_gpio->num_fired[0] || smem_gpio->num_fired[1])) { + if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP) + printk(KERN_INFO "gpio: fired %x %x\n", + smem_gpio->num_fired[0], smem_gpio->num_fired[1]); + tasklet_schedule(&msm_gpio_sleep_int_tasklet); + } +} + +void config_gpio_table(uint32_t *table, int len) +{ + int n; + unsigned id; + for (n = 0; n < len; n++) { + id = table[n]; + msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); + } +} +EXPORT_SYMBOL(config_gpio_table); + static int __init msm_init_gpio(void) { int i, j = 0; diff --git a/arch/arm/mach-msm/gpiomux-v1.h b/arch/arm/mach-msm/gpiomux-v1.h index 71d86feba4509..a523ad25d0cbc 100644 --- a/arch/arm/mach-msm/gpiomux-v1.h +++ b/arch/arm/mach-msm/gpiomux-v1.h @@ -64,4 +64,8 @@ enum { GPIOMUX_PULL_UP = 3UL << 15, }; +enum { + GPIOMUX_DIR_INPUT = 0UL << 14, + GPIOMUX_DIR_OUTPUT = 1UL << 14, +}; #endif diff --git a/arch/arm/mach-msm/htc_35mm_jack.c b/arch/arm/mach-msm/htc_35mm_jack.c new file mode 100644 index 0000000000000..60d407a2f0a82 --- /dev/null +++ b/arch/arm/mach-msm/htc_35mm_jack.c @@ -0,0 +1,397 @@ +/* arch/arm/mach-msm/htc_35mm_jack.c + * + * Copyright (C) 2009 HTC, Inc. + * Author: Arec Kao + * Copyright (C) 2009 Google, Inc. + * Author: Eric Olsen + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HTC_AUDIOJACK +#include +#endif + +/* #define CONFIG_DEBUG_H2W */ + +#define H2WI(fmt, arg...) \ + printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#define H2WE(fmt, arg...) \ + printk(KERN_ERR "[H2W] %s " fmt "\r\n", __func__, ## arg) + +#ifdef CONFIG_DEBUG_H2W +#define H2W_DBG(fmt, arg...) \ + printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#else +#define H2W_DBG(fmt, arg...) do {} while (0) +#endif + +void detect_h2w_do_work(struct work_struct *w); + +static struct workqueue_struct *detect_wq; +static struct workqueue_struct *button_wq; + +static DECLARE_DELAYED_WORK(detect_h2w_work, detect_h2w_do_work); + +static void insert_35mm_do_work(struct work_struct *work); +static DECLARE_WORK(insert_35mm_work, insert_35mm_do_work); +static void remove_35mm_do_work(struct work_struct *work); +static DECLARE_WORK(remove_35mm_work, remove_35mm_do_work); +static void button_35mm_do_work(struct work_struct *work); +static DECLARE_WORK(button_35mm_work, button_35mm_do_work); + +struct h35_info { + struct mutex mutex_lock; + struct switch_dev hs_change; + unsigned long insert_jiffies; + int ext_35mm_status; + int is_ext_insert; + int key_code; + int mic_bias_state; + int *is_hpin_stable; + struct input_dev *input; + + struct wake_lock headset_wake_lock; +}; + +static struct h35mm_platform_data *pd; +static struct h35_info *hi; + +static ssize_t h35mm_print_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "Headset\n"); +} + +static void button_35mm_do_work(struct work_struct *work) +{ + int key = 0; + int pressed = 0; + + if (!hi->is_ext_insert) { + /* no headset ignor key event */ + H2WI("3.5mm headset is plugged out, skip report key event"); + return; + } + + switch (hi->key_code) { + case 0x1: /* Play/Pause */ + H2WI("3.5mm RC: Play Pressed"); + key = KEY_MEDIA; + pressed = 1; + break; + case 0x2: + H2WI("3.5mm RC: BACKWARD Pressed"); + key = KEY_PREVIOUSSONG; + pressed = 1; + break; + case 0x3: + H2WI("3.5mm RC: FORWARD Pressed"); + key = KEY_NEXTSONG; + pressed = 1; + break; + case 0x81: /* Play/Pause */ + H2WI("3.5mm RC: Play Released"); + key = KEY_MEDIA; + pressed = 0; + break; + case 0x82: + H2WI("3.5mm RC: BACKWARD Released"); + key = KEY_PREVIOUSSONG; + pressed = 0; + break; + case 0x83: + H2WI("3.5mm RC: FORWARD Released"); + key = KEY_NEXTSONG; + pressed = 0; + break; + default: + H2WI("3.5mm RC: Unknown Button (0x%x) Pressed", hi->key_code); + return; + } + input_report_key(hi->input, key, pressed); + input_sync(hi->input); + + wake_lock_timeout(&hi->headset_wake_lock, 1.5*HZ); +} + +static void remove_35mm_do_work(struct work_struct *work) +{ + wake_lock_timeout(&hi->headset_wake_lock, 2.5*HZ); + + H2W_DBG(""); + /*To solve the insert, remove, insert headset problem*/ + if (time_before_eq(jiffies, hi->insert_jiffies)) + msleep(800); + + if (hi->is_ext_insert) { + H2WI("Skip 3.5mm headset plug out!!!"); + if (hi->is_hpin_stable) + *(hi->is_hpin_stable) = 1; + return; + } + + pr_info("3.5mm_headset plug out\n"); + + if (pd->key_event_disable != NULL) + pd->key_event_disable(); + + if (hi->mic_bias_state) { + turn_mic_bias_on(0); + hi->mic_bias_state = 0; + } + hi->ext_35mm_status = 0; + if (hi->is_hpin_stable) + *(hi->is_hpin_stable) = 0; + + /* Notify framework via switch class */ + mutex_lock(&hi->mutex_lock); + switch_set_state(&hi->hs_change, hi->ext_35mm_status); + mutex_unlock(&hi->mutex_lock); +} + +static void insert_35mm_do_work(struct work_struct *work) +{ + H2W_DBG(""); + hi->insert_jiffies = jiffies + 1*HZ; + + wake_lock_timeout(&hi->headset_wake_lock, 1.5*HZ); + + if (hi->is_ext_insert) { + pr_info("3.5mm_headset plug in\n"); + + if (pd->key_event_enable != NULL) + pd->key_event_enable(); + + /* Turn On Mic Bias */ + if (!hi->mic_bias_state) { + turn_mic_bias_on(1); + hi->mic_bias_state = 1; + /* Wait for pin stable */ + msleep(300); + } + + /* Detect headset with or without microphone */ + if(pd->headset_has_mic) { + if (pd->headset_has_mic() == 0) { + /* without microphone */ + pr_info("3.5mm without microphone\n"); + hi->ext_35mm_status = BIT_HEADSET_NO_MIC; + } else { /* with microphone */ + pr_info("3.5mm with microphone\n"); + hi->ext_35mm_status = BIT_HEADSET; + } + } else { + /* Assume no mic */ + pr_info("3.5mm without microphone\n"); + hi->ext_35mm_status = BIT_HEADSET_NO_MIC; + } + + /* Notify framework via switch class */ + mutex_lock(&hi->mutex_lock); + switch_set_state(&hi->hs_change, hi->ext_35mm_status); + mutex_unlock(&hi->mutex_lock); + + if (hi->is_hpin_stable) + *(hi->is_hpin_stable) = 1; + } +} + +int htc_35mm_key_event(int keycode, int *hpin_stable) +{ + hi->key_code = keycode; + hi->is_hpin_stable = hpin_stable; + + if ((hi->ext_35mm_status & BIT_HEADSET) == 0) { + *(hi->is_hpin_stable) = 0; + + pr_info("Key press with no mic. Retrying detection\n"); + queue_work(detect_wq, &insert_35mm_work); + } else + queue_work(button_wq, &button_35mm_work); + + return 0; +} + +int htc_35mm_jack_plug_event(int insert, int *hpin_stable) +{ + if (!hi) { + pr_err("Plug event before driver init\n"); + return -1; + } + + mutex_lock(&hi->mutex_lock); + hi->is_ext_insert = insert; + hi->is_hpin_stable = hpin_stable; + mutex_unlock(&hi->mutex_lock); + + H2WI(" %d", hi->is_ext_insert); + if (!hi->is_ext_insert) + queue_work(detect_wq, &remove_35mm_work); + else + queue_work(detect_wq, &insert_35mm_work); + return 1; +} + +static int htc_35mm_probe(struct platform_device *pdev) +{ + int ret; + + pd = pdev->dev.platform_data; + + pr_info("H2W: htc_35mm_jack driver register\n"); + + hi = kzalloc(sizeof(struct h35_info), GFP_KERNEL); + if (!hi) + return -ENOMEM; + + hi->ext_35mm_status = 0; + hi->is_ext_insert = 0; + hi->mic_bias_state = 0; + + mutex_init(&hi->mutex_lock); + + wake_lock_init(&hi->headset_wake_lock, WAKE_LOCK_SUSPEND, "headset"); + + hi->hs_change.name = "h2w"; + hi->hs_change.print_name = h35mm_print_name; + ret = switch_dev_register(&hi->hs_change); + if (ret < 0) + goto err_switch_dev_register; + + detect_wq = create_workqueue("detection"); + if (detect_wq == NULL) { + ret = -ENOMEM; + goto err_create_detect_work_queue; + } + + button_wq = create_workqueue("button"); + if (button_wq == NULL) { + ret = -ENOMEM; + goto err_create_button_work_queue; + } + + hi->input = input_allocate_device(); + if (!hi->input) { + ret = -ENOMEM; + goto err_request_input_dev; + } + + hi->input->name = "h2w headset"; + set_bit(EV_SYN, hi->input->evbit); + set_bit(EV_KEY, hi->input->evbit); + set_bit(KEY_MEDIA, hi->input->keybit); + set_bit(KEY_NEXTSONG, hi->input->keybit); + set_bit(KEY_PLAYPAUSE, hi->input->keybit); + set_bit(KEY_PREVIOUSSONG, hi->input->keybit); + set_bit(KEY_MUTE, hi->input->keybit); + set_bit(KEY_VOLUMEUP, hi->input->keybit); + set_bit(KEY_VOLUMEDOWN, hi->input->keybit); + set_bit(KEY_END, hi->input->keybit); + set_bit(KEY_SEND, hi->input->keybit); + + ret = input_register_device(hi->input); + if (ret < 0) + goto err_register_input_dev; + + /* Enable plug events*/ + if (pd->plug_event_enable == NULL) { + ret = -ENOMEM; + goto err_enable_plug_event; + } + if (pd->plug_event_enable() != 1) { + ret = -ENOMEM; + goto err_enable_plug_event; + } + + return 0; + +err_enable_plug_event: +err_register_input_dev: + input_free_device(hi->input); +err_request_input_dev: + destroy_workqueue(button_wq); +err_create_button_work_queue: + destroy_workqueue(detect_wq); +err_create_detect_work_queue: + switch_dev_unregister(&hi->hs_change); +err_switch_dev_register: + kzfree(hi); + pr_err("H2W: Failed to register driver\n"); + + return ret; +} + +static int htc_35mm_remove(struct platform_device *pdev) +{ + H2W_DBG(""); + switch_dev_unregister(&hi->hs_change); + kzfree(hi); + +#if 0 /* Add keys later */ + input_unregister_device(hi->input); +#endif + return 0; +} + +static struct platform_driver htc_35mm_driver = { + .probe = htc_35mm_probe, + .remove = htc_35mm_remove, + .driver = { + .name = "htc_headset", + .owner = THIS_MODULE, + }, +}; + +static int __init htc_35mm_init(void) +{ + H2W_DBG(""); + return platform_driver_register(&htc_35mm_driver); +} + +static void __exit htc_35mm_exit(void) +{ + platform_driver_unregister(&htc_35mm_driver); +} + +module_init(htc_35mm_init); +module_exit(htc_35mm_exit); + +MODULE_AUTHOR("Eric Olsen "); +MODULE_DESCRIPTION("HTC 3.5MM Driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_acoustic.c b/arch/arm/mach-msm/htc_acoustic.c new file mode 100644 index 0000000000000..2360b37f55e28 --- /dev/null +++ b/arch/arm/mach-msm/htc_acoustic.c @@ -0,0 +1,266 @@ +/* arch/arm/mach-msm/htc_acoustic.c + * + * Copyright (C) 2007-2008 HTC Corporation + * Author: Laurence Chen + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "smd_private.h" + +#define ACOUSTIC_IOCTL_MAGIC 'p' +#define ACOUSTIC_ARM11_DONE _IOW(ACOUSTIC_IOCTL_MAGIC, 22, unsigned int) + +#define HTCRPOG 0x30100002 +#define HTCVERS MSM_RPC_VERS(0,0) +#define ONCRPC_SET_MIC_BIAS_PROC (1) +#define ONCRPC_ACOUSTIC_INIT_PROC (5) +#define ONCRPC_ALLOC_ACOUSTIC_MEM_PROC (6) + +#define HTC_ACOUSTIC_TABLE_SIZE (0x10000) + +#define D(fmt, args...) printk(KERN_INFO "htc-acoustic: "fmt, ##args) +#define E(fmt, args...) printk(KERN_ERR "htc-acoustic: "fmt, ##args) + +struct set_smem_req { + struct rpc_request_hdr hdr; + uint32_t size; +}; + +struct set_smem_rep { + struct rpc_reply_hdr hdr; + int n; +}; + +struct set_acoustic_req { + struct rpc_request_hdr hdr; +}; + +struct set_acoustic_rep { + struct rpc_reply_hdr hdr; + int n; +}; + +static uint32_t htc_acoustic_vir_addr; +static struct msm_rpc_endpoint *endpoint; +static struct mutex api_lock; +static struct mutex rpc_connect_mutex; + +static int is_rpc_connect(void) +{ + mutex_lock(&rpc_connect_mutex); + if (endpoint == NULL) { + endpoint = msm_rpc_connect(HTCRPOG, HTCVERS, 0); + if (IS_ERR(endpoint)) { + pr_err("%s: init rpc failed! rc = %ld\n", + __func__, PTR_ERR(endpoint)); + mutex_unlock(&rpc_connect_mutex); + return 0; + } + } + mutex_unlock(&rpc_connect_mutex); + return 1; +} + +int turn_mic_bias_on(int on) +{ + struct mic_bias_req { + struct rpc_request_hdr hdr; + uint32_t on; + } req; + + if (!is_rpc_connect()) + return -1; + + req.on = cpu_to_be32(on); + return msm_rpc_call(endpoint, ONCRPC_SET_MIC_BIAS_PROC, + &req, sizeof(req), 5 * HZ); +} +EXPORT_SYMBOL(turn_mic_bias_on); + +static int acoustic_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long pgoff, delta; + int rc = -EINVAL; + size_t size; + + D("mmap\n"); + + mutex_lock(&api_lock); + + size = vma->vm_end - vma->vm_start; + + if (vma->vm_pgoff != 0) { + E("mmap failed: page offset %lx\n", vma->vm_pgoff); + goto done; + } + + if (!htc_acoustic_vir_addr) { + E("mmap failed: smem region not allocated\n"); + rc = -EIO; + goto done; + } + + pgoff = MSM_SHARED_RAM_PHYS + + (htc_acoustic_vir_addr - (uint32_t)MSM_SHARED_RAM_BASE); + delta = PAGE_ALIGN(pgoff) - pgoff; + + if (size + delta > HTC_ACOUSTIC_TABLE_SIZE) { + E("mmap failed: size %d\n", size); + goto done; + } + + pgoff += delta; + vma->vm_flags |= VM_IO | VM_RESERVED; + + rc = io_remap_pfn_range(vma, vma->vm_start, pgoff >> PAGE_SHIFT, + size, vma->vm_page_prot); + + if (rc < 0) + E("mmap failed: remap error %d\n", rc); + +done: mutex_unlock(&api_lock); + return rc; +} + +static int acoustic_open(struct inode *inode, struct file *file) +{ + int rc = -EIO; + struct set_smem_req req_smem; + struct set_smem_rep rep_smem; + + D("open\n"); + + mutex_lock(&api_lock); + + if (!htc_acoustic_vir_addr) { + if (!is_rpc_connect()) + goto done; + + req_smem.size = cpu_to_be32(HTC_ACOUSTIC_TABLE_SIZE); + rc = msm_rpc_call_reply(endpoint, + ONCRPC_ALLOC_ACOUSTIC_MEM_PROC, + &req_smem, sizeof(req_smem), + &rep_smem, sizeof(rep_smem), + 5 * HZ); + + if (rep_smem.n != 0 || rc < 0) { + E("open failed: ALLOC_ACOUSTIC_MEM_PROC error %d.\n", + rc); + goto done; + } + htc_acoustic_vir_addr = + (uint32_t)smem_alloc(SMEM_ID_VENDOR1, + HTC_ACOUSTIC_TABLE_SIZE); + if (!htc_acoustic_vir_addr) { + E("open failed: smem_alloc error\n"); + goto done; + } + } + + rc = 0; +done: + mutex_unlock(&api_lock); + return rc; +} + +static int acoustic_release(struct inode *inode, struct file *file) +{ + D("release\n"); + return 0; +} + +static long acoustic_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int rc, reply_value; + struct set_acoustic_req req; + struct set_acoustic_rep rep; + + D("ioctl\n"); + + mutex_lock(&api_lock); + + switch (cmd) { + case ACOUSTIC_ARM11_DONE: + D("ioctl: ACOUSTIC_ARM11_DONE called %d.\n", current->pid); + rc = msm_rpc_call_reply(endpoint, + ONCRPC_ACOUSTIC_INIT_PROC, &req, + sizeof(req), &rep, sizeof(rep), + 5 * HZ); + + reply_value = be32_to_cpu(rep.n); + if (reply_value != 0 || rc < 0) { + E("ioctl failed: ONCRPC_ACOUSTIC_INIT_PROC "\ + "error %d.\n", rc); + if (rc >= 0) + rc = -EIO; + break; + } + D("ioctl: ONCRPC_ACOUSTIC_INIT_PROC success.\n"); + break; + default: + E("ioctl: invalid command\n"); + rc = -EINVAL; + } + + mutex_unlock(&api_lock); + return 0; +} + + +static struct file_operations acoustic_fops = { + .owner = THIS_MODULE, + .open = acoustic_open, + .release = acoustic_release, + .mmap = acoustic_mmap, + .unlocked_ioctl = acoustic_ioctl, +}; + +static struct miscdevice acoustic_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "htc-acoustic", + .fops = &acoustic_fops, +}; + +static int __init acoustic_init(void) +{ + mutex_init(&api_lock); + mutex_init(&rpc_connect_mutex); + return misc_register(&acoustic_misc); +} + +static void __exit acoustic_exit(void) +{ + misc_deregister(&acoustic_misc); +} + +module_init(acoustic_init); +module_exit(acoustic_exit); + +MODULE_AUTHOR("Laurence Chen "); +MODULE_DESCRIPTION("HTC acoustic driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_acoustic_qsd.c b/arch/arm/mach-msm/htc_acoustic_qsd.c new file mode 100644 index 0000000000000..d925329e1fb96 --- /dev/null +++ b/arch/arm/mach-msm/htc_acoustic_qsd.c @@ -0,0 +1,449 @@ +/* arch/arm/mach-msm/htc_acoustic_qsd.c + * + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "smd_private.h" + +#define ACOUSTIC_IOCTL_MAGIC 'p' +#define ACOUSTIC_UPDATE_ADIE \ + _IOW(ACOUSTIC_IOCTL_MAGIC, 24, unsigned int) + +#define HTCACOUSTICPROG 0x30100003 +#define HTCACOUSTICVERS 0 +#define ONCRPC_ALLOC_ACOUSTIC_MEM_PROC (1) +#define ONCRPC_UPDATE_ADIE_PROC (2) +#define ONCRPC_ENABLE_AUX_PGA_LOOPBACK_PROC (3) +#define ONCRPC_FORCE_HEADSET_SPEAKER_PROC (4) +#define ONCRPC_SET_AUX_PGA_GAIN_PROC (5) + +#define HTC_ACOUSTIC_TABLE_SIZE (0x20000) + +#define D(fmt, args...) printk(KERN_INFO "htc-acoustic: "fmt, ##args) +#define E(fmt, args...) printk(KERN_ERR "htc-acoustic: "fmt, ##args) + +static uint32_t htc_acoustic_vir_addr; +static struct msm_rpc_endpoint *endpoint; +static struct mutex api_lock; +static struct mutex rpc_connect_lock; +static struct qsd_acoustic_ops *the_ops; +struct class *htc_class; +static int hac_enable_flag; +struct mutex acoustic_lock; + +void acoustic_register_ops(struct qsd_acoustic_ops *ops) +{ + the_ops = ops; +} + +static int is_rpc_connect(void) +{ + mutex_lock(&rpc_connect_lock); + if (endpoint == NULL) { + endpoint = msm_rpc_connect(HTCACOUSTICPROG, + HTCACOUSTICVERS, 0); + if (IS_ERR(endpoint)) { + pr_err("%s: init rpc failed! rc = %ld\n", + __func__, PTR_ERR(endpoint)); + mutex_unlock(&rpc_connect_lock); + return -1; + } + } + mutex_unlock(&rpc_connect_lock); + return 0; +} + +int enable_mic_bias(int on) +{ + D("%s called %d\n", __func__, on); + if (the_ops->enable_mic_bias) + the_ops->enable_mic_bias(on); + + return 0; +} + +int turn_mic_bias_on(int on) +{ + return enable_mic_bias(on); +} +EXPORT_SYMBOL(turn_mic_bias_on); + +int enable_mos_test(int enable) +{ + static int mos_test_enable; + int res = 0; + if (enable != mos_test_enable) { + D("%s called %d\n", __func__, enable); + if (enable) + res = q6audio_reinit_acdb("default_mos.acdb"); + else + res = q6audio_reinit_acdb("default.acdb"); + mos_test_enable = enable; + } + return res; +} +EXPORT_SYMBOL(enable_mos_test); + +int force_headset_speaker_on(int enable) +{ + struct speaker_headset_req { + struct rpc_request_hdr hdr; + uint32_t enable; + } spkr_req; + + D("%s called %d\n", __func__, enable); + + if (is_rpc_connect() == -1) + return -1; + + spkr_req.enable = cpu_to_be32(enable); + return msm_rpc_call(endpoint, + ONCRPC_FORCE_HEADSET_SPEAKER_PROC, + &spkr_req, sizeof(spkr_req), 5 * HZ); +} +EXPORT_SYMBOL(force_headset_speaker_on); + +int enable_aux_loopback(uint32_t enable) +{ + struct aux_loopback_req { + struct rpc_request_hdr hdr; + uint32_t enable; + } aux_req; + + D("%s called %d\n", __func__, enable); + + if (is_rpc_connect() == -1) + return -1; + + aux_req.enable = cpu_to_be32(enable); + return msm_rpc_call(endpoint, + ONCRPC_ENABLE_AUX_PGA_LOOPBACK_PROC, + &aux_req, sizeof(aux_req), 5 * HZ); +} +EXPORT_SYMBOL(enable_aux_loopback); + +int set_aux_gain(int level) +{ + struct aux_gain_req { + struct rpc_request_hdr hdr; + int level; + } aux_req; + + D("%s called %d\n", __func__, level); + + if (is_rpc_connect() == -1) + return -1; + + aux_req.level = cpu_to_be32(level); + return msm_rpc_call(endpoint, + ONCRPC_SET_AUX_PGA_GAIN_PROC, + &aux_req, sizeof(aux_req), 5 * HZ); +} +EXPORT_SYMBOL(set_aux_gain); + +static int acoustic_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long pgoff; + int rc = -EINVAL; + size_t size; + + D("mmap\n"); + + mutex_lock(&api_lock); + + size = vma->vm_end - vma->vm_start; + + if (vma->vm_pgoff != 0) { + E("mmap failed: page offset %lx\n", vma->vm_pgoff); + goto done; + } + + if (!htc_acoustic_vir_addr) { + E("mmap failed: smem region not allocated\n"); + rc = -EIO; + goto done; + } + + pgoff = MSM_SHARED_RAM_PHYS + + (htc_acoustic_vir_addr - (uint32_t)MSM_SHARED_RAM_BASE); + pgoff = ((pgoff + 4095) & ~4095); + htc_acoustic_vir_addr = ((htc_acoustic_vir_addr + 4095) & ~4095); + + if (pgoff <= 0) { + E("pgoff wrong. %ld\n", pgoff); + goto done; + } + + if (size <= HTC_ACOUSTIC_TABLE_SIZE) { + pgoff = pgoff >> PAGE_SHIFT; + } else { + E("size > HTC_ACOUSTIC_TABLE_SIZE %d\n", size); + goto done; + } + + vma->vm_flags |= VM_IO | VM_RESERVED; + rc = io_remap_pfn_range(vma, vma->vm_start, pgoff, + size, vma->vm_page_prot); + + if (rc < 0) + E("mmap failed: remap error %d\n", rc); + +done: mutex_unlock(&api_lock); + return rc; +} + +static int acoustic_open(struct inode *inode, struct file *file) +{ + int reply_value; + int rc = -EIO; + struct set_smem_req { + struct rpc_request_hdr hdr; + uint32_t size; + } req_smem; + + struct set_smem_rep { + struct rpc_reply_hdr hdr; + int n; + } rep_smem; + + D("open\n"); + + mutex_lock(&api_lock); + + if (!htc_acoustic_vir_addr) { + if (is_rpc_connect() == -1) + goto done; + + req_smem.size = cpu_to_be32(HTC_ACOUSTIC_TABLE_SIZE); + rc = msm_rpc_call_reply(endpoint, + ONCRPC_ALLOC_ACOUSTIC_MEM_PROC, + &req_smem, sizeof(req_smem), + &rep_smem, sizeof(rep_smem), + 5 * HZ); + + reply_value = be32_to_cpu(rep_smem.n); + if (reply_value != 0 || rc < 0) { + E("open failed: ALLOC_ACOUSTIC_MEM_PROC error %d.\n", + rc); + goto done; + } + htc_acoustic_vir_addr = + (uint32_t)smem_alloc(SMEM_ID_VENDOR1, + HTC_ACOUSTIC_TABLE_SIZE); + if (!htc_acoustic_vir_addr) { + E("open failed: smem_alloc error\n"); + goto done; + } + } + + rc = 0; +done: + mutex_unlock(&api_lock); + return rc; +} + +static int acoustic_release(struct inode *inode, struct file *file) +{ + D("release\n"); + return 0; +} + +static long acoustic_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int rc, reply_value; + + D("ioctl\n"); + + mutex_lock(&api_lock); + + switch (cmd) { + case ACOUSTIC_UPDATE_ADIE: { + struct update_adie_req { + struct rpc_request_hdr hdr; + int id; + } adie_req; + + struct update_adie_rep { + struct rpc_reply_hdr hdr; + int ret; + } adie_rep; + + D("ioctl: ACOUSTIC_UPDATE_ADIE called %d.\n", current->pid); + + adie_req.id = cpu_to_be32(-1); /* update all codecs */ + rc = msm_rpc_call_reply(endpoint, + ONCRPC_UPDATE_ADIE_PROC, &adie_req, + sizeof(adie_req), &adie_rep, + sizeof(adie_rep), 5 * HZ); + + reply_value = be32_to_cpu(adie_rep.ret); + if (reply_value != 0 || rc < 0) { + E("ioctl failed: ONCRPC_UPDATE_ADIE_PROC "\ + "error %d.\n", rc); + if (rc >= 0) + rc = -EIO; + break; + } + D("ioctl: ONCRPC_UPDATE_ADIE_PROC success.\n"); + break; + } + default: + E("ioctl: invalid command\n"); + rc = -EINVAL; + } + + mutex_unlock(&api_lock); + return rc; +} + +struct rpc_set_uplink_mute_args { + int mute; +}; + +static struct file_operations acoustic_fops = { + .owner = THIS_MODULE, + .open = acoustic_open, + .release = acoustic_release, + .mmap = acoustic_mmap, + .unlocked_ioctl = acoustic_ioctl, +}; + +static struct miscdevice acoustic_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "htc-acoustic", + .fops = &acoustic_fops, +}; + +static ssize_t htc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + char *s = buf; + mutex_lock(&acoustic_lock); + s += sprintf(s, "%d\n", hac_enable_flag); + mutex_unlock(&acoustic_lock); + return s - buf; + +} +static ssize_t htc_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + + if (count == (strlen("enable") + 1) && + strncmp(buf, "enable", strlen("enable")) == 0) { + mutex_lock(&acoustic_lock); + + if (hac_enable_flag == 0) + pr_info("Enable HAC\n"); + hac_enable_flag = 1; + + mutex_unlock(&acoustic_lock); + return count; + } + if (count == (strlen("disable") + 1) && + strncmp(buf, "disable", strlen("disable")) == 0) { + mutex_lock(&acoustic_lock); + + if (hac_enable_flag == 1) + pr_info("Disable HAC\n"); + hac_enable_flag = 0; + + mutex_unlock(&acoustic_lock); + return count; + } + pr_err("hac_flag_store: invalid argument\n"); + return -EINVAL; + +} + +static DEVICE_ATTR(flag, 0666, htc_show, htc_store); + +static int __init acoustic_init(void) +{ + int ret = 0; + mutex_init(&api_lock); + mutex_init(&rpc_connect_lock); + ret = misc_register(&acoustic_misc); + if (ret < 0) { + pr_err("failed to register misc device!\n"); + return ret; + } + + htc_class = class_create(THIS_MODULE, "htc_acoustic"); + if (IS_ERR(htc_class)) { + ret = PTR_ERR(htc_class); + htc_class = NULL; + goto err_create_class; + } + acoustic_misc.this_device = + device_create(htc_class, NULL, 0 , NULL, "hac"); + if (IS_ERR(acoustic_misc.this_device)) { + ret = PTR_ERR(acoustic_misc.this_device); + acoustic_misc.this_device = NULL; + goto err_create_class; + } + + ret = device_create_file(acoustic_misc.this_device, &dev_attr_flag); + if (ret < 0) + goto err_create_class_device; + + mutex_init(&acoustic_lock); + +#if defined(CONFIG_HTC_HEADSET_MGR) + struct headset_notifier notifier; + notifier.id = HEADSET_REG_MIC_BIAS; + notifier.func = enable_mic_bias; + headset_notifier_register(¬ifier); +#endif + + return 0; + +err_create_class_device: + device_destroy(htc_class, 0); +err_create_class: + return ret; + +} + +static void __exit acoustic_exit(void) +{ + device_remove_file(acoustic_misc.this_device, &dev_attr_flag); + device_destroy(htc_class, 0); + class_destroy(htc_class); + + misc_deregister(&acoustic_misc); +} + +module_init(acoustic_init); +module_exit(acoustic_exit); + diff --git a/arch/arm/mach-msm/htc_akm_cal.c b/arch/arm/mach-msm/htc_akm_cal.c new file mode 100644 index 0000000000000..943083fe0fbe8 --- /dev/null +++ b/arch/arm/mach-msm/htc_akm_cal.c @@ -0,0 +1,64 @@ +/* arch/arm/mach-msm/htc_akm_cal.c + * + * Code to extract compass calibration information from ATAG set up + * by the bootloader. + * + * Copyright (C) 2007-2008 HTC Corporation + * Author: Farmer Tseng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include + +/* configuration tags specific to AKM8976 */ +#define ATAG_AKM8976 0x89768976 /* AKM8976 */ + +#define MAX_CALI_SIZE 0x1000U + +static char akm_cal_ram[MAX_CALI_SIZE]; + +char *get_akm_cal_ram(void) +{ + return(akm_cal_ram); +} +EXPORT_SYMBOL(get_akm_cal_ram); + +static int __init parse_tag_akm(const struct tag *tag) +{ + unsigned char *dptr = (unsigned char *)(&tag->u); + unsigned size; + + size = min((tag->hdr.size - 2) * sizeof(__u32), MAX_CALI_SIZE); + + printk(KERN_INFO "AKM Data size = %d , 0x%x, size = %d\n", + tag->hdr.size, tag->hdr.tag, size); + +#ifdef ATAG_COMPASS_DEBUG + unsigned i; + unsigned char *ptr; + + ptr = dptr; + printk(KERN_INFO + "AKM Data size = %d , 0x%x\n", + tag->hdr.size, tag->hdr.tag); + for (i = 0; i < size; i++) + printk(KERN_INFO "%02x ", *ptr++); +#endif + memcpy((void *)akm_cal_ram, (void *)dptr, size); + return 0; +} + +__tagtable(ATAG_AKM8976, parse_tag_akm); diff --git a/arch/arm/mach-msm/htc_awb_cal.c b/arch/arm/mach-msm/htc_awb_cal.c new file mode 100644 index 0000000000000..012f063ada5c1 --- /dev/null +++ b/arch/arm/mach-msm/htc_awb_cal.c @@ -0,0 +1,146 @@ +/* arch/arm/mach-msm/htc_awb_cal.c */ +/* Code to extract Camera AWB calibration information from ATAG +set up by the bootloader. + +Copyright (C) 2008 Google, Inc. +Author: Dmitry Shmidt + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +*/ + +#include +#include +#include +#include +#include + +/* for outputing file to filesystem : /data/awb_calibration_data_hboot.txt */ +#include +#include + +/* configuration tags specific to msm */ +#define ATAG_MSM_AWB_CAL 0x59504550 /* MSM CAMERA AWB Calibration */ + +#ifdef CONFIG_ARCH_MSM7X30 +#define AWB_CAL_MAX_SIZE 0x1000U /* 0x1000 = 4096 bytes */ +#else +#define AWB_CAL_MAX_SIZE 0x800U /* 0x800 = 2048 bytes */ +#endif + +struct qct_lsc_struct{ + unsigned long int lsc_verify; + unsigned long int lsc_fuseid[4]; + float pCalcParam[17*13*4]; + unsigned long int lsc_checksum; +}; + +struct qct_awb_lsc_struct{ + unsigned long int caBuff[8];/* AWB Calibartion */ + struct qct_lsc_struct qct_lsc_data;/* LSC Calibration */ +}; + +static unsigned char cam_awb_ram[AWB_CAL_MAX_SIZE]; + +int gCAM_AWB_CAL_LEN; + +unsigned char *get_cam_awb_cal(void) +{ + return cam_awb_ram; +} + +EXPORT_SYMBOL(get_cam_awb_cal); + +static int __init parse_tag_cam_awb_cal(const struct tag *tag) +{ + unsigned char *dptr = (unsigned char *)(&tag->u); + unsigned size; + + size = min((tag->hdr.size - 2) * sizeof(__u32), AWB_CAL_MAX_SIZE); + + printk(KERN_INFO "CAM_AWB_CAL Data size = %d , 0x%x, size = %d\n", + tag->hdr.size, tag->hdr.tag, size); + + gCAM_AWB_CAL_LEN = size; + memcpy(cam_awb_ram, dptr, size); + + +#ifdef ATAG_CAM_AWB_CAL_DEBUG + { + int *pint, i; + + printk(KERN_INFO "parse_tag_cam_awb_cal():\n"); + + pint = (int *)cam_awb_ram; + + for (i = 0; i < 1024; i++) + printk(KERN_INFO "%x\n", pint[i]); + + } +#endif + + return 0; +} + +__tagtable(ATAG_MSM_AWB_CAL, parse_tag_cam_awb_cal); + + +static ssize_t awb_calibration_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + unsigned char *ptr; + + ptr = get_cam_awb_cal(); + /* fixed : workaround because of defined 8 parameters now */ + + ret = sizeof(struct qct_awb_lsc_struct);/* 8*4; */ + memcpy(buf, ptr, ret); + + +#ifdef ATAG_CAM_AWB_CAL_DEBUG + { + int i, *pint; + printk(KERN_INFO "awb_calibration_show():\n"); + pint = (int *)buf; + for (i = 0; i < 898; i++) + printk(KERN_INFO "%x\n", pint[i]); + + } +#endif + + return ret; +} + +static DEVICE_ATTR(awb_cal, 0444, awb_calibration_show, NULL); + +static struct kobject *cam_awb_cal; + +static int cam_get_awb_cal(void) +{ + int ret ; + + /* Create /sys/android_camera_awb_cal/awb_cal */ + cam_awb_cal = kobject_create_and_add("android_camera_awb_cal", NULL); + if (cam_awb_cal == NULL) { + pr_info("cam_get_awb_cal: subsystem_register failed\n"); + ret = -ENOMEM; + return ret ; + } + + /* dev_attr_[register_name]<== DEVICE_ATTR(awb_cal, 0444, + awb_calibration_show, NULL); */ + ret = sysfs_create_file(cam_awb_cal, &dev_attr_awb_cal.attr); + if (ret) { + pr_info("cam_get_awb_cal:: sysfs_create_file failed\n"); + kobject_del(cam_awb_cal); + } + return 0 ; +} + +late_initcall(cam_get_awb_cal); diff --git a/arch/arm/mach-msm/htc_battery.c b/arch/arm/mach-msm/htc_battery.c new file mode 100644 index 0000000000000..77781093a44c7 --- /dev/null +++ b/arch/arm/mach-msm/htc_battery.c @@ -0,0 +1,1751 @@ +/* arch/arm/mach-msm/htc_battery.c + * + * Copyright (C) 2008 HTC Corporation. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Jay, to register display notifier */ +#include +#include +#include +#include +#ifdef CONFIG_HTC_BATTCHG_SMEM +#include "smd_private.h" +#endif + +#if defined(CONFIG_TROUT_BATTCHG_DOCK) +#include +#endif +#ifdef CONFIG_BATTERY_DS2784 +#include +#elif CONFIG_BATTERY_DS2746 +#include +#endif + +#include + +static struct wake_lock vbus_wake_lock; + +enum { + HTC_BATT_DEBUG_M2A_RPC = 1U << 0, + HTC_BATT_DEBUG_A2M_RPC = 1U << 1, + HTC_BATT_DEBUG_UEVT = 1U << 2, + HTC_BATT_DEBUG_USER_QUERY = 1U << 3, + HTC_BATT_DEBUG_USB_NOTIFY = 1U << 4, + HTC_BATT_DEBUG_SMEM = 1U << 5, +}; +static int htc_batt_debug_mask = HTC_BATT_DEBUG_M2A_RPC | HTC_BATT_DEBUG_A2M_RPC + | HTC_BATT_DEBUG_UEVT | HTC_BATT_DEBUG_USB_NOTIFY | HTC_BATT_DEBUG_SMEM; +module_param_named(debug_mask, htc_batt_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define BATT_LOG(x...) do { \ +struct timespec ts; \ +struct rtc_time tm; \ +getnstimeofday(&ts); \ +rtc_time_to_tm(ts.tv_sec, &tm); \ +printk(KERN_INFO "batt: " x); \ +printk(" at %lld (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", \ +ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ +tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ +} while (0) + +#define BATT_ERR(x...) do { \ +struct timespec ts; \ +struct rtc_time tm; \ +getnstimeofday(&ts); \ +rtc_time_to_tm(ts.tv_sec, &tm); \ +printk(KERN_ERR "batt: err:" x); \ +printk(" at %lld (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", \ +ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ +tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ +} while (0) + +/* rpc related */ +#define APP_BATT_PDEV_NAME "rs30100001:00000000" +#define APP_BATT_PROG 0x30100001 +#define APP_BATT_VER MSM_RPC_VERS(0,0) +#define HTC_PROCEDURE_BATTERY_NULL 0 +#define HTC_PROCEDURE_GET_BATT_LEVEL 1 +#define HTC_PROCEDURE_GET_BATT_INFO 2 +#define HTC_PROCEDURE_GET_CABLE_STATUS 3 +#define HTC_PROCEDURE_SET_BATT_DELTA 4 +#define HTC_PROCEDURE_CHARGER_SWITCH 6 +#define HTC_PROCEDURE_SET_FULL_LEVEL 7 +#define HTC_PROCEDURE_GET_USB_ACCESSORY_ADC_LEVEL 10 + +const char *charger_tags[] = {"none", "USB", "AC"}; + +struct htc_battery_info { + int device_id; + int present; + unsigned long update_time; + + /* lock to protect the battery info */ + struct mutex lock; + + /* lock held while calling the arm9 to query the battery info */ + struct mutex rpc_lock; + struct battery_info_reply rep; + int (*func_show_batt_attr)(struct device_attribute *attr, char *buf); + int gpio_mbat_in; + int gpio_usb_id; + int gpio_mchg_en_n; + int gpio_iset; + int guage_driver; + int m2a_cable_detect; + int charger; +}; + +static struct msm_rpc_endpoint *endpoint; + +static struct htc_battery_info htc_batt_info; + +/* Remove cache mechanism to prevent cable status not sync. */ +static unsigned int cache_time; + +static int htc_battery_initial = 0; +static int htc_full_level_flag = 0; + +static enum power_supply_property htc_battery_properties[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CAPACITY, +}; + +static enum power_supply_property htc_power_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static char *supply_list[] = { + "battery", +}; + +/* HTC dedicated attributes */ +static ssize_t htc_battery_show_property(struct device *dev, + struct device_attribute *attr, + char *buf); + +static int htc_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static int htc_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static struct power_supply htc_power_supplies[] = { + { + .name = "battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = htc_battery_properties, + .num_properties = ARRAY_SIZE(htc_battery_properties), + .get_property = htc_battery_get_property, + }, + { + .name = "usb", + .type = POWER_SUPPLY_TYPE_USB, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = htc_power_properties, + .num_properties = ARRAY_SIZE(htc_power_properties), + .get_property = htc_power_get_property, + }, + { + .name = "ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = htc_power_properties, + .num_properties = ARRAY_SIZE(htc_power_properties), + .get_property = htc_power_get_property, + }, +}; + +static int update_batt_info(void); +static void usb_status_notifier_func(int online); +//static int g_usb_online; +static struct t_usb_status_notifier usb_status_notifier = { + .name = "htc_battery", + .func = usb_status_notifier_func, +}; + +/* Move cable detection/notification to standard PMIC RPC. */ +static BLOCKING_NOTIFIER_HEAD(cable_status_notifier_list); +int register_notifier_cable_status(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&cable_status_notifier_list, nb); +} + +int unregister_notifier_cable_status(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&cable_status_notifier_list, nb); +} + +/* -------------------------------------------------------------------------- */ +/* For sleep charging screen. */ +static int zcharge_enabled; +static int htc_is_zcharge_enabled(void) +{ + return zcharge_enabled; +} +static int __init enable_zcharge_setup(char *str) +{ + int cal = simple_strtol(str, NULL, 0); + + zcharge_enabled = cal; + return 1; +} +__setup("enable_zcharge=", enable_zcharge_setup); + +static int htc_is_cable_in(void) +{ + if (!htc_batt_info.update_time) { + BATT_ERR("%s: battery driver hasn't been initialized yet.", __func__); + return -EINVAL; + } + return (htc_batt_info.rep.charging_source != CHARGER_BATTERY) ? 1 : 0; +} + +/** + * htc_power_policy - check if it obeys our policy + * return 0 for no errors, to indicate it follows policy. + * non zero otherwise. + **/ +static int __htc_power_policy(void) +{ + if (!zcharge_enabled) + return 0; + + if (htc_is_cable_in()) + return 1; + + return 0; +} + +/* + * Jay, 7/1/09' + */ +static int htc_power_policy(struct notifier_block *nfb, + unsigned long action, void *ignored) +{ + int rc; + switch (action) { + case NOTIFY_POWER: + pr_info("%s: enter.\n", __func__); + rc = __htc_power_policy(); + if (rc) + return NOTIFY_STOP; + else + return NOTIFY_OK; + } + return NOTIFY_DONE; /* we did not care other action here */ +} + +unsigned int batt_get_status(enum power_supply_property psp) +{ + union power_supply_propval val; + + if (update_batt_info()) + return -EINVAL; + + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY: + mutex_lock(&htc_batt_info.lock); + val.intval = htc_batt_info.rep.level; + mutex_unlock(&htc_batt_info.lock); + /* prevent shutdown before battery driver ready. */ + if (htc_batt_info.device_id == 0) + val.intval = 55; /* 55 == ?? */ + break; + case POWER_SUPPLY_PROP_TEMP: + mutex_lock(&htc_batt_info.lock); + val.intval = htc_batt_info.rep.batt_temp; + mutex_unlock(&htc_batt_info.lock); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + mutex_lock(&htc_batt_info.lock); + val.intval = htc_batt_info.rep.batt_vol; + mutex_unlock(&htc_batt_info.lock); + break; + default: + return -EINVAL; + } + + return val.intval; +} + +#if defined(CONFIG_DEBUG_FS) +static int htc_battery_set_charging(enum batt_ctl_t ctl); +static int batt_debug_set(void *data, u64 val) +{ + return htc_battery_set_charging((enum batt_ctl_t) val); +} + +static int batt_debug_get(void *data, u64 *val) +{ + return -ENOSYS; +} + +DEFINE_SIMPLE_ATTRIBUTE(batt_debug_fops, batt_debug_get, batt_debug_set, "%llu\n"); +static int __init batt_debug_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("htc_battery", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("charger_state", 0644, dent, NULL, &batt_debug_fops); + + return 0; +} + +device_initcall(batt_debug_init); +#endif + +static int init_batt_gpio(void) +{ + + if (htc_batt_info.gpio_mbat_in > 0 && + gpio_request(htc_batt_info.gpio_mbat_in, "batt_detect") < 0) + goto gpio_failed; + if (htc_batt_info.gpio_mchg_en_n > 0 && + gpio_request(htc_batt_info.gpio_mchg_en_n, "charger_en") < 0) + goto gpio_failed; + if (htc_batt_info.gpio_iset > 0 && + gpio_request(htc_batt_info.gpio_iset, "charge_current") < 0) + goto gpio_failed; + + return 0; + +gpio_failed: + return -EINVAL; + +} + +/* + * battery_charging_ctrl - battery charing control. + * @ctl: battery control command + * + */ +int battery_charging_ctrl(enum batt_ctl_t ctl) +{ + int result = 0; + + switch (ctl) { + case DISABLE: + BATT_LOG("charger OFF"); + /* 0 for enable; 1 disable */ + result = gpio_direction_output(htc_batt_info.gpio_mchg_en_n, 1); + break; + case ENABLE_SLOW_CHG: + BATT_LOG("charger ON (SLOW)"); + result = gpio_direction_output(htc_batt_info.gpio_iset, 0); + result = gpio_direction_output(htc_batt_info.gpio_mchg_en_n, 0); + break; + case ENABLE_FAST_CHG: + BATT_LOG("charger ON (FAST)"); + result = gpio_direction_output(htc_batt_info.gpio_iset, 1); + result = gpio_direction_output(htc_batt_info.gpio_mchg_en_n, 0); + break; + default: + BATT_ERR("%s: Not supported battery ctr called.!", __func__); + result = -EINVAL; + break; + } + + return result; +} + +static int htc_battery_set_charging(enum batt_ctl_t ctl) +{ + int rc; + + if ((rc = battery_charging_ctrl(ctl)) < 0) + goto result; + + if (!htc_battery_initial) { + htc_batt_info.rep.charging_enabled = ctl & 0x3; + } else { + mutex_lock(&htc_batt_info.lock); + htc_batt_info.rep.charging_enabled = ctl & 0x3; + mutex_unlock(&htc_batt_info.lock); + } +result: + return rc; +} + +static int htc_battery_status_update(u32 curr_level) +{ + int notify; + if (!htc_battery_initial) + return 0; + + mutex_lock(&htc_batt_info.lock); + notify = (htc_batt_info.rep.level != curr_level); + htc_batt_info.rep.level = curr_level; + mutex_unlock(&htc_batt_info.lock); +#if 0 + if (notify) { + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_UEVT) + BATT_LOG("power_supply_changed: battery"); + } +#else + /* we don't check level here for charging over temp RPC call */ + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_UEVT) + BATT_LOG("batt:power_supply_changed: battery"); +#endif + return 0; +} + +static void update_wake_lock(int status) +{ + if (status == CHARGER_USB) { + wake_lock(&vbus_wake_lock); + } else if (__htc_power_policy()) { + /* Lock suspend for DOPOD charging animation */ + wake_lock(&vbus_wake_lock); + } else { + /* give userspace some time to see the uevent and update + * LED state or whatnot... + */ + wake_lock_timeout(&vbus_wake_lock, HZ * 5); + } +} + +#ifdef CONFIG_HTC_BATTCHG_SMEM +static int htc_set_smem_cable_type(u32 cable_type); +#else +static int htc_set_smem_cable_type(u32 cable_type) { return -1; } +#endif +#if 1 //JH //this is for packet filter (notify port list while USB in/out) +int update_port_list_charging_state(int enable); +#endif +static int htc_cable_status_update(int status) +{ + int rc = 0; +// unsigned last_source; + + if (!htc_battery_initial) + return 0; + + if (status < CHARGER_BATTERY || status > CHARGER_AC) { + BATT_ERR("%s: Not supported cable status received!", __func__); + return -EINVAL; + } + + mutex_lock(&htc_batt_info.lock); +#if 1 + pr_info("batt: %s: %d -> %d\n", __func__, htc_batt_info.rep.charging_source, status); + if (status == htc_batt_info.rep.charging_source) { + /* When cable overvoltage(5V => 7V) A9 will report the same source, so only sent the uevent */ + if (status == CHARGER_USB) { + power_supply_changed(&htc_power_supplies[CHARGER_USB]); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_UEVT) + BATT_LOG("batt:(htc_cable_status_update)power_supply_changed: OverVoltage"); + } + mutex_unlock(&htc_batt_info.lock); + return 0; + } + + /* TODO: replace charging_source to vbus_present */ + htc_batt_info.rep.charging_source = status; + /* ARM9 should know the status it notifies, + * keep this code for old projects. */ + /* htc_set_smem_cable_type(status); */ + + update_wake_lock(status); + /*ARM9 report CHARGER_AC while plug in htc_adaptor which is identify by usbid*/ + /*don't need to notify usb driver*/ + if ((htc_batt_info.guage_driver == GUAGE_MODEM) && (status == CHARGER_AC)) { + htc_set_smem_cable_type(CHARGER_AC); + power_supply_changed(&htc_power_supplies[CHARGER_AC]); + } else + msm_hsusb_set_vbus_state(!!htc_batt_info.rep.charging_source); + + /* TODO: use power_supply_change to notify battery drivers. */ + if (htc_batt_info.guage_driver == GUAGE_DS2784 || + htc_batt_info.guage_driver == GUAGE_DS2746) + blocking_notifier_call_chain(&cable_status_notifier_list, + status, NULL); + + if (status == CHARGER_BATTERY) { + htc_set_smem_cable_type(CHARGER_BATTERY); + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_UEVT) + BATT_LOG("batt:(htc_cable_status_update)power_supply_changed: battery"); + } + +#else + /* A9 reports USB charging when helf AC cable in and China AC charger. */ + /* notify userspace USB charging first, + and then usb driver will notify AC while D+/D- Line short. */ + /* China AC detection: + * Write SMEM as USB first, and update SMEM to AC + * if receives AC notification */ + last_source = htc_batt_info.rep.charging_source; + if (status == CHARGER_USB && g_usb_online == 0) { + htc_set_smem_cable_type(CHARGER_USB); + htc_batt_info.rep.charging_source = CHARGER_USB; + } else { + htc_set_smem_cable_type(status); + htc_batt_info.rep.charging_source = status; + /* usb driver will not notify usb offline. */ + if (status == CHARGER_BATTERY && g_usb_online != 0) + g_usb_online = 0; + } + + msm_hsusb_set_vbus_state(status == CHARGER_USB); + if (htc_batt_info.guage_driver == GUAGE_DS2784 || + htc_batt_info.guage_driver == GUAGE_DS2746) + blocking_notifier_call_chain(&cable_status_notifier_list, + htc_batt_info.rep.charging_source, NULL); + + if (htc_batt_info.rep.charging_source != last_source) { +#if 1 //JH //this is for packet filter (notify port list while USB in/out) + update_port_list_charging_state(!(htc_batt_info.rep.charging_source == CHARGER_BATTERY)); +#endif + /* Lock suspend only when USB in for ADB or other USB functions. */ + if (htc_batt_info.rep.charging_source == CHARGER_USB) { + wake_lock(&vbus_wake_lock); + } else if (__htc_power_policy()) { + /* Lock suspend for DOPOD charging animation */ + wake_lock(&vbus_wake_lock); + } else { + if (htc_batt_info.rep.charging_source == CHARGER_AC + && last_source == CHARGER_USB) + BATT_ERR("%s: USB->AC\n", __func__); + /* give userspace some time to see the uevent and update + * LED state or whatnot... + */ + wake_lock_timeout(&vbus_wake_lock, HZ * 5); + } + if (htc_batt_info.rep.charging_source == CHARGER_BATTERY || last_source == CHARGER_BATTERY) + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + if (htc_batt_info.rep.charging_source == CHARGER_USB || last_source == CHARGER_USB) + power_supply_changed(&htc_power_supplies[CHARGER_USB]); + if (htc_batt_info.rep.charging_source == CHARGER_AC || last_source == CHARGER_AC) + power_supply_changed(&htc_power_supplies[CHARGER_AC]); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_UEVT) + BATT_LOG("power_supply_changed: %s -> %s", + charger_tags[last_source], charger_tags[htc_batt_info.rep.charging_source]); + } +#endif + mutex_unlock(&htc_batt_info.lock); + + return rc; +} + +#ifdef CONFIG_USB_ACCESSORY_DETECT_BY_ADC +int htc_get_usb_accessory_adc_level(uint32_t *buffer) +{ + struct rpc_request_hdr req; + + struct htc_get_usb_adc_value_rep { + struct rpc_reply_hdr hdr; + uint32_t adc_value; + } rep; + + int rc; + printk(KERN_INFO "%s\n", __func__); + + if (buffer == NULL) { + printk(KERN_INFO "%s: buffer null\n", __func__); + return -EINVAL; + } + + rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_USB_ACCESSORY_ADC_LEVEL, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + if (rc < 0) { + printk(KERN_INFO "%s: msm_rpc_call_reply fail\n", __func__); + return rc; + } + *buffer = be32_to_cpu(rep.adc_value); + + printk(KERN_INFO "%s: adc = %d\n", __func__, *buffer); + return 0; +} +EXPORT_SYMBOL(htc_get_usb_accessory_adc_level); +#endif + +/* A9 reports USB charging when helf AC cable in and China AC charger. */ +/* notify userspace USB charging first, +and then usb driver will notify AC while D+/D- Line short. */ +static void usb_status_notifier_func(int online) +{ +#if 1 + pr_info("batt:online=%d",online); + /* TODO: replace charging_source to usb_status */ + htc_batt_info.rep.charging_source = online; + htc_set_smem_cable_type(htc_batt_info.rep.charging_source); + + /* TODO: use power_supply_change to notify battery drivers. */ + if (htc_batt_info.guage_driver == GUAGE_DS2784 || htc_batt_info.guage_driver == GUAGE_DS2746) + blocking_notifier_call_chain(&cable_status_notifier_list, + htc_batt_info.rep.charging_source, NULL); + + power_supply_changed(&htc_power_supplies[CHARGER_AC]); + power_supply_changed(&htc_power_supplies[CHARGER_USB]); + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + update_wake_lock(htc_batt_info.rep.charging_source); +#else + mutex_lock(&htc_batt_info.lock); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USB_NOTIFY) + BATT_LOG("%s: online=%d, g_usb_online=%d", __func__, online, g_usb_online); + if (g_usb_online != online) { + g_usb_online = online; + if (online == CHARGER_AC && htc_batt_info.rep.charging_source == CHARGER_USB) { + mutex_unlock(&htc_batt_info.lock); + htc_cable_status_update(CHARGER_AC); + mutex_lock(&htc_batt_info.lock); + } + } + mutex_unlock(&htc_batt_info.lock); +#endif +} + +static int htc_get_batt_info(struct battery_info_reply *buffer) +{ + struct rpc_request_hdr req; + + struct htc_get_batt_info_rep { + struct rpc_reply_hdr hdr; + struct battery_info_reply info; + } rep; + + int rc; + + if (buffer == NULL) + return -EINVAL; + + rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_BATT_INFO, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + if ( rc < 0 ) + return rc; + + mutex_lock(&htc_batt_info.lock); + buffer->batt_id = be32_to_cpu(rep.info.batt_id); + buffer->batt_vol = be32_to_cpu(rep.info.batt_vol); + buffer->batt_temp = be32_to_cpu(rep.info.batt_temp); + buffer->batt_current = be32_to_cpu(rep.info.batt_current); + buffer->level = be32_to_cpu(rep.info.level); + /* Move the rules of charging_source to cable_status_update. */ + /* buffer->charging_source = be32_to_cpu(rep.info.charging_source); */ + buffer->charging_enabled = be32_to_cpu(rep.info.charging_enabled); + buffer->full_bat = be32_to_cpu(rep.info.full_bat); + /* Over_vchg only update in SMEM from A9 */ + /* buffer->over_vchg = be32_to_cpu(rep.info.over_vchg); */ + mutex_unlock(&htc_batt_info.lock); + + if (htc_batt_debug_mask & HTC_BATT_DEBUG_A2M_RPC) + BATT_LOG("A2M_RPC: get_batt_info: batt_id=%d, batt_vol=%d, batt_temp=%d, " + "batt_current=%d, level=%d, charging_source=%d, " + "charging_enabled=%d, full_bat=%d, over_vchg=%d", + buffer->batt_id, buffer->batt_vol, buffer->batt_temp, + buffer->batt_current, buffer->level, buffer->charging_source, + buffer->charging_enabled, buffer->full_bat, buffer->over_vchg); + + return 0; +} + +#ifdef CONFIG_HTC_BATTCHG_SMEM +struct htc_batt_info_full { + u32 batt_id; + u32 batt_vol; + u32 batt_vol_last; + u32 batt_temp; + s32 batt_current; + s32 batt_current_last; + u32 batt_discharge_current; + + u32 VREF_2; + u32 VREF; + u32 ADC4096_VREF; + + u32 Rtemp; + s32 Temp; + s32 Temp_last; + + u32 pd_M; + u32 MBAT_pd; + s32 I_MBAT; + + u32 pd_temp; + u32 percent_last; + u32 percent_update; + u32 dis_percent; + + u32 vbus; + u32 usbid; + u32 charging_source; + + u32 MBAT_IN; + u32 full_bat; + + u32 eval_current; + u32 eval_current_last; + u32 charging_enabled; + + u32 timeout; + u32 fullcharge; + u32 level; + u32 delta; + + u32 chg_time; + s32 level_change; + u32 sleep_timer_count; + u32 OT_led_on; + u32 overloading_charge; + + u32 a2m_cable_type; + u32 vchg; // VCHG => 0: Not, 1: In + u32 over_vchg; /*over voltage charger detection, 0:VCHG normal(below 6V) 1:VCHG over(upper 6V)*/ + u32 reserve4; + u32 reserve5; +}; + +/* SMEM_BATT_INFO is allocated by A9 after first A2M RPC is sent. */ +static struct htc_batt_info_full *smem_batt_info; + +static int htc_get_batt_info_smem(struct battery_info_reply *buffer) +{ + if (!smem_batt_info) { + smem_batt_info = smem_alloc(SMEM_BATT_INFO, + sizeof(struct htc_batt_info_full)); + if (!smem_batt_info) { + BATT_ERR("battery SMEM allocate fail, " + "use RPC instead of"); + return htc_get_batt_info(buffer); + } + } + + if (!buffer) + return -EINVAL; + + mutex_lock(&htc_batt_info.lock); + buffer->batt_id = smem_batt_info->batt_id; + buffer->batt_vol = smem_batt_info->batt_vol; + buffer->batt_temp = smem_batt_info->Temp; + buffer->batt_current = smem_batt_info->batt_current; + buffer->eval_current = smem_batt_info->eval_current; + /* Fix issue that recharging percent drop to 99%. */ + /* The level in SMEM is for A9 internal use, + * always use value reported by M2A level update RPC. */ +#if 0 + buffer->level = smem_batt_info->percent_update; +#endif + /* Move the rules of charging_source to cable_status_update. */ + /* buffer->charging_source = be32_to_cpu(smem_batt_info->charging_source); */ + buffer->charging_enabled = smem_batt_info->charging_enabled; + buffer->full_bat = smem_batt_info->full_bat; + buffer->over_vchg = smem_batt_info->over_vchg; + mutex_unlock(&htc_batt_info.lock); + + if (htc_batt_debug_mask & HTC_BATT_DEBUG_SMEM) + BATT_LOG("SMEM_BATT: get_batt_info: batt_id=%d, batt_vol=%d, batt_temp=%d, " + "batt_current=%d, eval_current=%d, level=%d, charging_source=%d, " + "charging_enabled=%d, full_bat=%d, over_vchg=%d", + buffer->batt_id, buffer->batt_vol, buffer->batt_temp, + buffer->batt_current, buffer->eval_current, buffer->level, buffer->charging_source, + buffer->charging_enabled, buffer->full_bat, buffer->over_vchg); + + return 0; +} + +static ssize_t htc_battery_show_smem(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int len = 0; + + if (!smem_batt_info) { + smem_batt_info = smem_alloc(SMEM_BATT_INFO, + sizeof(struct htc_batt_info_full)); + if (!smem_batt_info) { + BATT_ERR("Show SMEM: allocate fail"); + return 0; + } + } + + if (!strcmp(attr->attr.name, "smem_raw")) { + len = sizeof(struct htc_batt_info_full); + memcpy(buf, smem_batt_info, len); + } else if (!strcmp(attr->attr.name, "smem_text")) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "batt_id: %d\n" + "batt_vol: %d\n" + "batt_vol_last: %d\n" + "batt_temp: %d\n" + "batt_current: %d\n" + "batt_current_last: %d\n" + "batt_discharge_current: %d\n" + "VREF_2: %d\n" + "VREF: %d\n" + "ADC4096_VREF: %d\n" + "Rtemp: %d\n" + "Temp: %d\n" + "Temp_last: %d\n" + "pd_M: %d\n" + "MBAT_pd: %d\n" + "I_MBAT: %d\n" + "pd_temp: %d\n" + "percent_last: %d\n" + "percent_update: %d\n" + "dis_percent: %d\n" + "vbus: %d\n" + "usbid: %d\n" + "charging_source: %d\n" + "MBAT_IN: %d\n" + "full_bat: %d\n" + "eval_current: %d\n" + "eval_current_last: %d\n" + "charging_enabled: %d\n" + "timeout: %d\n" + "fullcharge: %d\n" + "level: %d\n" + "delta: %d\n" + "chg_time: %d\n" + "level_change: %d\n" + "sleep_timer_count: %d\n" + "OT_led_on: %d\n" + "overloading_charge: %d\n" + "a2m_cable_type: %d\n" + "vchg: %d\n" + "over_vchg: %d\n", + smem_batt_info->batt_id, + smem_batt_info->batt_vol, + smem_batt_info->batt_vol_last, + smem_batt_info->batt_temp, + smem_batt_info->batt_current, + smem_batt_info->batt_current_last, + smem_batt_info->batt_discharge_current, + smem_batt_info->VREF_2, + smem_batt_info->VREF, + smem_batt_info->ADC4096_VREF, + smem_batt_info->Rtemp, + smem_batt_info->Temp, + smem_batt_info->Temp_last, + smem_batt_info->pd_M, + smem_batt_info->MBAT_pd, + smem_batt_info->I_MBAT, + smem_batt_info->pd_temp, + smem_batt_info->percent_last, + smem_batt_info->percent_update, + smem_batt_info->dis_percent, + smem_batt_info->vbus, + smem_batt_info->usbid, + smem_batt_info->charging_source, + smem_batt_info->MBAT_IN, + smem_batt_info->full_bat, + smem_batt_info->eval_current, + smem_batt_info->eval_current_last, + smem_batt_info->charging_enabled, + smem_batt_info->timeout, + smem_batt_info->fullcharge, + smem_batt_info->level, + smem_batt_info->delta, + smem_batt_info->chg_time, + smem_batt_info->level_change, + smem_batt_info->sleep_timer_count, + smem_batt_info->OT_led_on, + smem_batt_info->overloading_charge, + smem_batt_info->a2m_cable_type, + smem_batt_info->vchg, + smem_batt_info->over_vchg); + } + + return len; +} + +static int htc_set_smem_cable_type(u32 cable_type) +{ + if (!smem_batt_info) { + smem_batt_info = smem_alloc(SMEM_BATT_INFO, + sizeof(struct htc_batt_info_full)); + if (!smem_batt_info) { + BATT_ERR("Update SMEM: allocate fail"); + return -EINVAL; + } + } + + smem_batt_info->a2m_cable_type = cable_type; + BATT_LOG("Update SMEM: cable type %d", cable_type); + + return 0; +} +#endif +static ssize_t htc_battery_show_batt_attr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + switch (htc_batt_info.guage_driver) { + case GUAGE_MODEM: +#ifdef CONFIG_HTC_BATTCHG_SMEM + return htc_battery_show_smem(dev, attr, buf); +#endif + break; + case GUAGE_DS2784: + case GUAGE_DS2746: + return htc_batt_info.func_show_batt_attr(attr, buf); + break; + } + return 0; +} + +/* -------------------------------------------------------------------------- */ +static int htc_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + enum charger_type_t charger; + + mutex_lock(&htc_batt_info.lock); + + charger = htc_batt_info.rep.charging_source; + /* ARM9 decides charging_enabled value by battery id */ + if (htc_batt_info.rep.batt_id == 255) + charger = CHARGER_BATTERY; + + mutex_unlock(&htc_batt_info.lock); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + if (psy->type == POWER_SUPPLY_TYPE_MAINS) { + val->intval = (charger == CHARGER_AC ? 1 : 0); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: online=%d", __func__, psy->name, val->intval); + } else if (psy->type == POWER_SUPPLY_TYPE_USB) { + val->intval = (charger == CHARGER_USB ? 1 : 0); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: online=%d", __func__, psy->name, val->intval); + } else + val->intval = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Once charge full, set this flag */ +static int htc_charge_full = 0; + +static int htc_battery_get_charging_status(void) +{ + u32 level; + enum charger_type_t charger; + int ret; + + mutex_lock(&htc_batt_info.lock); + + charger = htc_batt_info.rep.charging_source; + + /* ARM9 decides charging_enabled value by battery id */ + if (htc_batt_info.rep.batt_id == 255) + charger = CHARGER_UNKNOWN; + + switch (charger) { + case CHARGER_BATTERY: + htc_charge_full = 0; + ret = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + case CHARGER_USB: + case CHARGER_AC: + if ((htc_charge_full) && (htc_batt_info.rep.full_level == 100)) { + htc_batt_info.rep.level = 100; + } + + level = htc_batt_info.rep.level; + if (level == 100){ + htc_charge_full = 1;} + if (htc_charge_full) + ret = POWER_SUPPLY_STATUS_FULL; + else if (htc_batt_info.rep.charging_enabled != 0) + ret = POWER_SUPPLY_STATUS_CHARGING; + else + ret = POWER_SUPPLY_STATUS_DISCHARGING; + break; + default: + ret = POWER_SUPPLY_STATUS_UNKNOWN; + } + mutex_unlock(&htc_batt_info.lock); + return ret; +} + +static int htc_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = htc_battery_get_charging_status(); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: status=%d", __func__, psy->name, val->intval); + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + if (machine_is_paradise() && (htc_batt_info.rep.batt_temp >= 500 || + htc_batt_info.rep.batt_temp <= 0)) + val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; + else if (!machine_is_paradise() && (htc_batt_info.rep.batt_temp >= 480 || + htc_batt_info.rep.batt_temp <= 0)) + val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: health=%d", __func__, psy->name, val->intval); + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = htc_batt_info.present; + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: present=%d", __func__, psy->name, val->intval); + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: technology=%d", __func__, psy->name, val->intval); + break; + case POWER_SUPPLY_PROP_CAPACITY: + mutex_lock(&htc_batt_info.lock); + val->intval = htc_batt_info.rep.level; + /* prevent shutdown before battery driver ready. */ + if (htc_batt_info.device_id == 0) + val->intval = 55; /* 55 == ?? */ + mutex_unlock(&htc_batt_info.lock); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) + BATT_LOG("%s: %s: capacity=%d", __func__, psy->name, val->intval); + break; + default: + return -EINVAL; + } + + return 0; +} + +#define HTC_BATTERY_ATTR(_name) \ +{ \ + .attr = { .name = #_name, .mode = S_IRUGO }, \ + .show = htc_battery_show_property, \ + .store = NULL, \ +} + +static struct device_attribute htc_battery_attrs[] = { + HTC_BATTERY_ATTR(batt_id), + HTC_BATTERY_ATTR(batt_vol), + HTC_BATTERY_ATTR(batt_temp), + HTC_BATTERY_ATTR(batt_current), + HTC_BATTERY_ATTR(charging_source), + HTC_BATTERY_ATTR(charging_enabled), + HTC_BATTERY_ATTR(full_bat), + HTC_BATTERY_ATTR(over_vchg), +/*[FIXME]__ATTR(batt_attr_raw, S_IRUGO, htc_battery_show_batt_attr, NULL),*/ +#ifdef CONFIG_HTC_BATTCHG_SMEM + __ATTR(smem_raw, S_IRUGO, htc_battery_show_smem, NULL), + __ATTR(smem_text, S_IRUGO, htc_battery_show_smem, NULL), +#else + __ATTR(batt_attr_text, S_IRUGO, htc_battery_show_batt_attr, NULL), +#endif +}; + +enum { + BATT_ID = 0, + BATT_VOL, + BATT_TEMP, + BATT_CURRENT, + CHARGING_SOURCE, + CHARGING_ENABLED, + FULL_BAT, + OVER_VCHG, +}; + +static int htc_rpc_set_delta(unsigned delta) +{ + struct set_batt_delta_req { + struct rpc_request_hdr hdr; + uint32_t data; + } req; + + req.data = cpu_to_be32(delta); + return msm_rpc_call(endpoint, HTC_PROCEDURE_SET_BATT_DELTA, + &req, sizeof(req), 5 * HZ); +} + +static int htc_rpc_charger_switch(unsigned enable) +{ + struct charger_switch_req { + struct rpc_request_hdr hdr; + uint32_t data; + } req; + + req.data = cpu_to_be32(enable); + return msm_rpc_call(endpoint, HTC_PROCEDURE_CHARGER_SWITCH, + &req, sizeof(req), 5 * HZ); +} + +static int htc_rpc_set_full_level(unsigned level) +{ + struct set_batt_full_level_req { + struct rpc_request_hdr hdr; + uint32_t data; + } req; + + req.data = cpu_to_be32(level); + return msm_rpc_call(endpoint, HTC_PROCEDURE_SET_FULL_LEVEL, + &req, sizeof(req), 5 * HZ); +} + +static ssize_t htc_battery_set_delta(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc; + unsigned long delta = 0; + + delta = simple_strtoul(buf, NULL, 10); + + if (delta > 100) + return -EINVAL; + + mutex_lock(&htc_batt_info.rpc_lock); + rc = htc_rpc_set_delta(delta); + mutex_unlock(&htc_batt_info.rpc_lock); + if (rc < 0) + return rc; + return count; +} + +/* +* For PA and QA test +* 0x10-> fake temp to 250 +* 0x11->TBD if needed +* 0x12->TBD if needed +* .... +*/ +static ssize_t htc_battery_debug_flag(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned long debug_flag; + debug_flag = simple_strtoul(buf, NULL, 10); + + if (debug_flag > 100 || debug_flag == 0) + return -EINVAL; + + mutex_lock(&htc_batt_info.lock); + blocking_notifier_call_chain(&cable_status_notifier_list, + debug_flag, 0); + mutex_unlock(&htc_batt_info.lock); + return 0; + +} + +static ssize_t htc_battery_charger_switch(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc; + unsigned long enable = 0; + + enable = simple_strtoul(buf, NULL, 10); + + if (enable > 1) + return -EINVAL; + + mutex_lock(&htc_batt_info.rpc_lock); + rc = htc_rpc_charger_switch(enable); + mutex_unlock(&htc_batt_info.rpc_lock); + if (rc < 0) + return rc; + return count; +} + + +static ssize_t htc_battery_set_full_level(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc = 0; + unsigned long percent = 100; + unsigned long param = 0; + + percent = simple_strtoul(buf, NULL, 10); + + if (percent > 100 || percent == 0) + return -EINVAL; + + switch (htc_batt_info.guage_driver) { + case GUAGE_MODEM: + mutex_lock(&htc_batt_info.rpc_lock); + htc_batt_info.rep.full_level = percent; + rc = htc_rpc_set_full_level(percent); + mutex_unlock(&htc_batt_info.rpc_lock); + break; + case GUAGE_DS2784: + case GUAGE_DS2746: + if (htc_full_level_flag == 0) { + mutex_lock(&htc_batt_info.lock); + htc_full_level_flag = 1; + htc_batt_info.rep.full_level = percent; + param = percent; + blocking_notifier_call_chain(&cable_status_notifier_list, + 0xff, (void *) ¶m); + mutex_unlock(&htc_batt_info.lock); + } + rc = 0; + break; + } + if (rc < 0) + return rc; + return rc; +} + +static struct device_attribute htc_set_delta_attrs[] = { + __ATTR(delta, S_IWUSR | S_IWGRP, NULL, htc_battery_set_delta), + __ATTR(full_level, S_IWUSR | S_IWGRP, NULL, htc_battery_set_full_level), + __ATTR(batt_debug_flag,S_IWUSR | S_IWGRP, NULL, htc_battery_debug_flag), + __ATTR(charger_control, S_IWUSR | S_IWGRP, NULL, htc_battery_charger_switch), +}; + +static int htc_battery_create_attrs(struct device * dev) +{ + int i = 0, j = 0, rc = 0; + + for (i = 0; i < ARRAY_SIZE(htc_battery_attrs); i++) { + rc = device_create_file(dev, &htc_battery_attrs[i]); + if (rc) + goto htc_attrs_failed; + } + + for (j = 0; j < ARRAY_SIZE(htc_set_delta_attrs); j++) { + rc = device_create_file(dev, &htc_set_delta_attrs[j]); + if (rc) + goto htc_delta_attrs_failed; + } + + goto succeed; + +htc_attrs_failed: + while (i--) + device_remove_file(dev, &htc_battery_attrs[i]); +htc_delta_attrs_failed: + while (j--) + device_remove_file(dev, &htc_set_delta_attrs[j]); +succeed: + return rc; +} + +static int update_batt_info(void) +{ + int ret = 0; + + /* FIXME */ + switch (htc_batt_info.guage_driver) { + case GUAGE_MODEM: +#ifdef CONFIG_HTC_BATTCHG_SMEM + if (htc_get_batt_info_smem(&htc_batt_info.rep) < 0) { + BATT_ERR("%s: smem read failed!!!", __func__); + ret = -1; + } +#else + if (htc_get_batt_info(&htc_batt_info.rep) < 0) { + BATT_ERR("%s: rpc failed!!!", __func__); + ret = -1; + } +#endif + break; +#ifdef CONFIG_BATTERY_DS2784 + case GUAGE_DS2784: + if (ds2784_get_battery_info(&htc_batt_info.rep)) { + BATT_ERR("%s: ds2784 read failed!!!", __func__); + ret = -1; + } + break; +#elif CONFIG_BATTERY_DS2746 + case GUAGE_DS2746: + if (ds2746_get_battery_info(&htc_batt_info.rep)) { + BATT_ERR("%s: ds2746 read failed!!!", __func__); + ret = -1; + } + break; +#endif + + default: + return -EINVAL; + } + + return ret; +} + +static ssize_t htc_battery_show_property(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i = 0; + const ptrdiff_t off = attr - htc_battery_attrs; + + /* rpc lock is used to prevent two threads from calling + * into the get info rpc at the same time + */ + + mutex_lock(&htc_batt_info.rpc_lock); + /* check cache time to decide if we need to update */ + if (htc_batt_info.update_time && + time_before(jiffies, htc_batt_info.update_time + + msecs_to_jiffies(cache_time))) { + BATT_LOG("%s: use cached values", __func__); + goto dont_need_update; + } + + if (!update_batt_info()) + htc_batt_info.update_time = jiffies; + +dont_need_update: + mutex_unlock(&htc_batt_info.rpc_lock); + + mutex_lock(&htc_batt_info.lock); + switch (off) { + case BATT_ID: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_id); + break; + case BATT_VOL: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_vol); + break; + case BATT_TEMP: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_temp); + break; + case BATT_CURRENT: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_current); + break; + case CHARGING_SOURCE: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.charging_source); + break; + case CHARGING_ENABLED: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.charging_enabled); + break; + case FULL_BAT: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.full_bat); + break; + case OVER_VCHG: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.over_vchg); + break; + default: + i = -EINVAL; + } + mutex_unlock(&htc_batt_info.lock); + + if (htc_batt_debug_mask & HTC_BATT_DEBUG_USER_QUERY) { + if (i < 0) + BATT_LOG("%s: battery: attribute is not supported: %d", __func__, off); + else + BATT_LOG("%s: battery: %s=%s", __func__, attr->attr.name, buf); + } + return i; +} + +static irqreturn_t tps65200_int_detection(int irq, void *data) +{ + struct htc_battery_tps65200_int *ip = data; + + BATT_LOG("%s: over voltage is detected.", __func__); + + disable_irq_nosync(ip->chg_int); + + ip->tps65200_reg = 0; + + schedule_delayed_work(&ip->int_work, msecs_to_jiffies(200)); + + return IRQ_HANDLED; +} + +static void htc_battery_tps65200_int_func(struct work_struct *work) +{ + struct htc_battery_tps65200_int *ip; + int fault_bit; + ip = container_of(work, struct htc_battery_tps65200_int, + int_work.work); + + switch (ip->tps65200_reg) { + case CHECK_INT1: + /* read twice. First read to trigger TPS65200 clear fault bit + on INT1. Second read to make sure that fault bit is cleared + and call off ovp function.*/ + fault_bit = tps_set_charger_ctrl(CHECK_INT1); + BATT_LOG("INT1 value: %d", fault_bit); + fault_bit = tps_set_charger_ctrl(CHECK_INT1); + + if (fault_bit) { +#ifdef CONFIG_HTC_BATTCHG_SMEM + smem_batt_info->over_vchg = 1; +#else + htc_batt_info.rep.over_vchg = 1; +#endif + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + schedule_delayed_work(&ip->int_work, + msecs_to_jiffies(5000)); + BATT_LOG("OVER_VOLTAGE: " + "over voltage fault bit on TPS65200 is raised:" + " %d", fault_bit); + } else { +#ifdef CONFIG_HTC_BATTCHG_SMEM + smem_batt_info->over_vchg = 0; +#else + htc_batt_info.rep.over_vchg = 0; +#endif + cancel_delayed_work(&ip->int_work); + enable_irq(ip->chg_int); + } + break; + default: + fault_bit = tps_set_charger_ctrl(CHECK_INT2); + BATT_LOG("Read TPS65200 INT2 register value: %x", fault_bit); + if (fault_bit) { + fault_bit = tps_set_charger_ctrl(CHECK_INT2); + BATT_LOG("Read TPS65200 INT2 register value: %x" + , fault_bit); + fault_bit = tps_set_charger_ctrl(CHECK_INT2); + BATT_LOG("Read TPS65200 INT2 register value: %x" + , fault_bit); + fault_bit = tps_set_charger_ctrl(CHECK_CONTROL); +#ifdef CONFIG_HTC_BATTCHG_SMEM + smem_batt_info->reserve4 = 1; +#endif + cancel_delayed_work(&ip->int_work); + enable_irq(ip->chg_int); + } else { + fault_bit = tps_set_charger_ctrl(CHECK_INT1); + BATT_LOG("Read TPS65200 INT1 register value: %x" + , fault_bit); + if (fault_bit) { + ip->tps65200_reg = CHECK_INT1; + schedule_delayed_work(&ip->int_work, + msecs_to_jiffies(200)); + } + } + break; + } +} + +static int htc_battery_core_probe(struct platform_device *pdev) +{ + int i, rc; + + /* init battery gpio */ + if (htc_batt_info.charger == LINEAR_CHARGER) { + if ((rc = init_batt_gpio()) < 0) { + BATT_ERR("%s: init battery gpio failed!", __func__); + return rc; + } + } + + /* init structure data member */ + htc_batt_info.update_time = jiffies; + /* A9 will shutdown the phone if battery is pluged out, so this value is always 1. + htc_batt_info.present = gpio_get_value(GPIO_TROUT_MBAT_IN); + */ + htc_batt_info.present = 1; + + /* init rpc */ + endpoint = msm_rpc_connect(APP_BATT_PROG, APP_BATT_VER, 0); + if (IS_ERR(endpoint)) { + BATT_ERR("%s: init rpc failed! rc = %ld", + __func__, PTR_ERR(endpoint)); + return -EINVAL; + } + + /* init power supplier framework */ + for (i = 0; i < ARRAY_SIZE(htc_power_supplies); i++) { + rc = power_supply_register(&pdev->dev, &htc_power_supplies[i]); + if (rc) + BATT_ERR("%s: Failed to register power supply (%d)", __func__, rc); + } + + /* create htc detail attributes */ + htc_battery_create_attrs(htc_power_supplies[CHARGER_BATTERY].dev); + + /* After battery driver gets initialized, send rpc request to inquiry + * the battery status in case of we lost some info + */ + htc_battery_initial = 1; + + mutex_lock(&htc_batt_info.rpc_lock); + htc_batt_info.rep.charging_source = CHARGER_BATTERY; + if (htc_get_batt_info(&htc_batt_info.rep) < 0) + BATT_ERR("%s: get info failed", __func__); + + if (htc_rpc_set_delta(1) < 0) + BATT_ERR("%s: set delta failed", __func__); + htc_batt_info.update_time = jiffies; + mutex_unlock(&htc_batt_info.rpc_lock); + + return 0; +} + +static struct platform_driver htc_battery_core_driver = { + .probe = htc_battery_core_probe, + .driver = { + .name = APP_BATT_PDEV_NAME, + .owner = THIS_MODULE, + }, +}; + +/* batt_mtoa server definitions */ +#define BATT_MTOA_PROG 0x30100000 +#define BATT_MTOA_VERS 0 +#define RPC_BATT_MTOA_NULL 0 +#define RPC_BATT_MTOA_SET_CHARGING_PROC 1 +#define RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC 2 +#define RPC_BATT_MTOA_LEVEL_UPDATE_PROC 3 + +struct rpc_batt_mtoa_set_charging_args { + int enable; +}; + +struct rpc_batt_mtoa_cable_status_update_args { + int status; +}; + +struct rpc_dem_battery_update_args { + uint32_t level; +}; + +static int handle_battery_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + switch (req->procedure) { + case RPC_BATT_MTOA_NULL: + return 0; + + case RPC_BATT_MTOA_SET_CHARGING_PROC: { + struct rpc_batt_mtoa_set_charging_args *args; + args = (struct rpc_batt_mtoa_set_charging_args *)(req + 1); + args->enable = be32_to_cpu(args->enable); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_M2A_RPC) + BATT_LOG("M2A_RPC: set_charging: %d", args->enable); + if (htc_batt_info.charger == SWITCH_CHARGER) + blocking_notifier_call_chain(&cable_status_notifier_list, + args->enable, NULL); + else { + htc_battery_set_charging(args->enable); + } + return 0; + } + case RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC: { + struct rpc_batt_mtoa_cable_status_update_args *args; + args = (struct rpc_batt_mtoa_cable_status_update_args *)(req + 1); + args->status = be32_to_cpu(args->status); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_M2A_RPC) + BATT_LOG("M2A_RPC: cable_update: %s", charger_tags[args->status]); +#if 0 + /* FIXME: work arround for usb function, remove it after battery driver ready */ + if (machine_is_incrediblec() && args->status == CHARGER_AC) + args->status = CHARGER_USB; +#endif + htc_cable_status_update(args->status); + #if defined(CONFIG_TROUT_BATTCHG_DOCK) + dock_detect_start(args->status); + #endif + return 0; + } + case RPC_BATT_MTOA_LEVEL_UPDATE_PROC: { + struct rpc_dem_battery_update_args *args; + args = (struct rpc_dem_battery_update_args *)(req + 1); + args->level = be32_to_cpu(args->level); + if (htc_batt_debug_mask & HTC_BATT_DEBUG_M2A_RPC) + BATT_LOG("M2A_RPC: level_update: %d", args->level); + htc_battery_status_update(args->level); + return 0; + } + default: + BATT_ERR("%s: program 0x%08x:%d: unknown procedure %d", + __func__, req->prog, req->vers, req->procedure); + return -ENODEV; + } +} + +static struct msm_rpc_server battery_server = { + .prog = BATT_MTOA_PROG, + .vers = BATT_MTOA_VERS, + .rpc_call = handle_battery_call, +}; + +#if defined(CONFIG_BATTERY_DS2784) || defined(CONFIG_BATTERY_DS2746) +static int ds2784_notifier_func(struct notifier_block *nfb, + unsigned long action, void *param) +{ + u8 arg = 0; + + if (param) + arg = *(u8 *)param; + + BATT_LOG("ds2784_notify: %ld %d", action, arg); + switch (action) { + case DS2784_CHARGING_CONTROL: + if (htc_batt_info.charger == LINEAR_CHARGER) + battery_charging_ctrl(arg); +// else if(htc_batt_info.charger == SWITCH_CHARGER) +// set_charger_ctrl(arg); + break; + case DS2784_LEVEL_UPDATE: + htc_battery_status_update(arg); + break; + case DS2784_BATTERY_FAULT: + case DS2784_OVER_TEMP: + htc_battery_status_update(htc_batt_info.rep.level); + break; + default: + return NOTIFY_BAD; + } + + return NOTIFY_OK; /* we did not care other action here */ +} + +static struct notifier_block ds2784_notifier = { + .notifier_call = ds2784_notifier_func, +}; + +#endif + +static int htc_battery_probe(struct platform_device *pdev) +{ + int rc = 0; + struct htc_battery_platform_data *pdata = pdev->dev.platform_data; + + htc_batt_info.device_id = pdev->id; + htc_batt_info.gpio_usb_id = pdata->gpio_usb_id; + htc_batt_info.guage_driver = pdata->guage_driver; + htc_batt_info.m2a_cable_detect = pdata->m2a_cable_detect; + htc_batt_info.func_show_batt_attr = pdata->func_show_batt_attr; + htc_batt_info.charger = pdata->charger; + htc_batt_info.rep.full_level = 100; + + if (htc_batt_info.charger == LINEAR_CHARGER) { + htc_batt_info.gpio_mbat_in = pdata->gpio_mbat_in; + htc_batt_info.gpio_mchg_en_n = pdata->gpio_mchg_en_n; + htc_batt_info.gpio_iset = pdata->gpio_iset; + } + + if (pdata->guage_driver == GUAGE_MODEM || + pdata->m2a_cable_detect) + msm_rpc_create_server(&battery_server); +#ifdef CONFIG_BATTERY_DS2784 + if (pdata->guage_driver == GUAGE_DS2784) + ds2784_register_notifier(&ds2784_notifier); +#elif CONFIG_BATTERY_DS2746 + if (pdata->guage_driver == GUAGE_DS2746) + ds2746_register_notifier(&ds2784_notifier); +#endif + + if (system_rev >= 1) { + if (pdata->int_data.chg_int) { + BATT_LOG("init over voltage interrupt detection."); + INIT_DELAYED_WORK(&pdata->int_data.int_work, + htc_battery_tps65200_int_func); + + rc = request_irq(pdata->int_data.chg_int, + tps65200_int_detection, + IRQF_TRIGGER_LOW, + "over_voltage_interrupt", + &pdata->int_data); + + if (rc) { + BATT_LOG("request irq failed"); + return rc; + } + } + } + + return 0; +} + +int get_cable_status(void) +{ +// if(htc_batt_info.rep.charging_source == CHARGER_AC || htc_batt_info.rep.charging_source == CHARGER_USB) +// htc_cable_status_update(htc_batt_info.rep.charging_source); + return htc_batt_info.rep.charging_source; +} + +static struct platform_driver htc_battery_driver = { + .probe = htc_battery_probe, + .driver = { + .name = "htc_battery", + .owner = THIS_MODULE, + }, +}; + +static struct notifier_block batt_notify = { + .notifier_call = htc_power_policy, +}; + +static BLOCKING_NOTIFIER_HEAD(battery_notifier_list); +int batt_register_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&battery_notifier_list, nb); +} + +int batt_unregister_client(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&battery_notifier_list, nb); +} + +int batt_notifier_call_chain(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&battery_notifier_list, val, v); +} + +static int __init htc_battery_init(void) +{ + wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); + mutex_init(&htc_batt_info.lock); + mutex_init(&htc_batt_info.rpc_lock); + usb_register_notifier(&usb_status_notifier); + platform_driver_register(&htc_battery_driver); + platform_driver_register(&htc_battery_core_driver); + batt_register_client(&batt_notify); + /* Jay, The msm_fb need to consult htc_battery for power policy */ + display_notifier(htc_power_policy, NOTIFY_POWER); + return 0; +} + +module_init(htc_battery_init); +MODULE_DESCRIPTION("HTC Battery Driver"); +MODULE_LICENSE("GPL"); +EXPORT_SYMBOL(htc_is_cable_in); +EXPORT_SYMBOL(htc_is_zcharge_enabled); diff --git a/arch/arm/mach-msm/htc_battery_trout.c b/arch/arm/mach-msm/htc_battery_trout.c new file mode 100644 index 0000000000000..e1dbbf55675e3 --- /dev/null +++ b/arch/arm/mach-msm/htc_battery_trout.c @@ -0,0 +1,796 @@ +/* arch/arm/mach-msm/htc_battery.c + * + * Copyright (C) 2008 HTC Corporation. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct wake_lock vbus_wake_lock; + +#define TRACE_BATT 0 + +#if TRACE_BATT +#include + +#define BATT(x...) do { \ +struct timespec ts; \ +struct rtc_time tm; \ +getnstimeofday(&ts); \ +rtc_time_to_tm(ts.tv_sec, &tm); \ +printk(KERN_INFO "[BATT] " x); \ +printk(" at %lld (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", \ +ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ +tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ +} while (0) +#else +#define BATT(x...) do {} while (0) +#endif + +/* rpc related */ +#define APP_BATT_PDEV_NAME "rs30100001:00000000" +#define APP_BATT_PROG 0x30100001 +#define APP_BATT_VER MSM_RPC_VERS(0,0) +#define HTC_PROCEDURE_BATTERY_NULL 0 +#define HTC_PROCEDURE_GET_BATT_LEVEL 1 +#define HTC_PROCEDURE_GET_BATT_INFO 2 +#define HTC_PROCEDURE_GET_CABLE_STATUS 3 +#define HTC_PROCEDURE_SET_BATT_DELTA 4 + +/* module debugger */ +#define HTC_BATTERY_DEBUG 1 +#define BATTERY_PREVENTION 1 + +/* Enable this will shut down if no battery */ +#define ENABLE_BATTERY_DETECTION 0 +/* Sapphire pin changes: + * USB_ID (GPIO 90) is renamed to AC_IN (GPIO 30) + * CHARGER_EN (CPLD MISC2 bit[0]) is move to PMIC (MPP_14). + * ISET (CPLD MISC2 bit[1]) is move to PMIC (MPP_13). */ +#define GPIO_SAPPHIRE_USB_ID 30 + +#define GPIO_BATTERY_DETECTION 21 +#define GPIO_BATTERY_CHARGER_EN 128 + +/* Charge current selection */ +#define GPIO_BATTERY_CHARGER_CURRENT 129 + +typedef enum { + DISABLE = 0, + ENABLE_SLOW_CHG, + ENABLE_FAST_CHG +} batt_ctl_t; + +/* This order is the same as htc_power_supplies[] + * And it's also the same as htc_cable_status_update() + */ +typedef enum { + CHARGER_BATTERY = 0, + CHARGER_USB, + CHARGER_AC +} charger_type_t; + +const char *charger_tags[] = {"none", "USB", "AC"}; + +struct battery_info_reply { + u32 batt_id; /* Battery ID from ADC */ + u32 batt_vol; /* Battery voltage from ADC */ + u32 batt_temp; /* Battery Temperature (C) from formula and ADC */ + u32 batt_current; /* Battery current from ADC */ + u32 level; /* formula */ + u32 charging_source; /* 0: no cable, 1:usb, 2:AC */ + u32 charging_enabled; /* 0: Disable, 1: Enable */ + u32 full_bat; /* Full capacity of battery (mAh) */ +}; + +struct htc_battery_info { + int present; + unsigned long update_time; + + /* lock to protect the battery info */ + struct mutex lock; + + /* lock held while calling the arm9 to query the battery info */ + struct mutex rpc_lock; + struct battery_info_reply rep; +}; + +static struct msm_rpc_endpoint *endpoint; + +static struct htc_battery_info htc_batt_info; + +static unsigned int cache_time = 1000; + +static int htc_battery_initial = 0; + +static enum power_supply_property htc_battery_properties[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CAPACITY, +}; + +static enum power_supply_property htc_power_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static char *supply_list[] = { + "battery", +}; + +/* HTC dedicated attributes */ +static ssize_t htc_battery_show_property(struct device *dev, + struct device_attribute *attr, + char *buf); + +static int htc_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static int htc_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static struct power_supply htc_power_supplies[] = { + { + .name = "battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = htc_battery_properties, + .num_properties = ARRAY_SIZE(htc_battery_properties), + .get_property = htc_battery_get_property, + }, + { + .name = "usb", + .type = POWER_SUPPLY_TYPE_USB, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = htc_power_properties, + .num_properties = ARRAY_SIZE(htc_power_properties), + .get_property = htc_power_get_property, + }, + { + .name = "ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = htc_power_properties, + .num_properties = ARRAY_SIZE(htc_power_properties), + .get_property = htc_power_get_property, + }, +}; + +static int g_usb_online; + +/* -------------------------------------------------------------------------- */ + +#if defined(CONFIG_DEBUG_FS) +int htc_battery_set_charging(batt_ctl_t ctl); +static int batt_debug_set(void *data, u64 val) +{ + return htc_battery_set_charging((batt_ctl_t) val); +} + +static int batt_debug_get(void *data, u64 *val) +{ + return -ENOSYS; +} + +DEFINE_SIMPLE_ATTRIBUTE(batt_debug_fops, batt_debug_get, batt_debug_set, "%llu\n"); +static int __init batt_debug_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("htc_battery", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("charger_state", 0644, dent, NULL, &batt_debug_fops); + + return 0; +} + +device_initcall(batt_debug_init); +#endif + +static int init_batt_gpio(void) +{ + if (!machine_is_trout()) + return 0; + + if (gpio_request(GPIO_BATTERY_DETECTION, "batt_detect") < 0) + goto gpio_failed; + if (gpio_request(GPIO_BATTERY_CHARGER_EN, "charger_en") < 0) + goto gpio_failed; + if (gpio_request(GPIO_BATTERY_CHARGER_CURRENT, "charge_current") < 0) + goto gpio_failed; + + return 0; + +gpio_failed: + return -EINVAL; + +} + +/* + * battery_charging_ctrl - battery charing control. + * @ctl: battery control command + * + */ +static int battery_charging_ctrl(batt_ctl_t ctl) +{ + int result = 0; + + /* The charing operations are move to A9 in Sapphire. */ + if (!machine_is_trout()) + return result; + + switch (ctl) { + case DISABLE: + BATT("charger OFF"); + /* 0 for enable; 1 disable */ + result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 1); + break; + case ENABLE_SLOW_CHG: + BATT("charger ON (SLOW)"); + result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 0); + result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0); + break; + case ENABLE_FAST_CHG: + BATT("charger ON (FAST)"); + result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 1); + result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0); + break; + default: + printk(KERN_ERR "Not supported battery ctr called.!\n"); + result = -EINVAL; + break; + } + + return result; +} + +int htc_battery_set_charging(batt_ctl_t ctl) +{ + int rc; + + if ((rc = battery_charging_ctrl(ctl)) < 0) + goto result; + + if (!htc_battery_initial) { + htc_batt_info.rep.charging_enabled = ctl & 0x3; + } else { + mutex_lock(&htc_batt_info.lock); + htc_batt_info.rep.charging_enabled = ctl & 0x3; + mutex_unlock(&htc_batt_info.lock); + } +result: + return rc; +} + +int htc_battery_status_update(u32 curr_level) +{ + int notify; + if (!htc_battery_initial) + return 0; + + mutex_lock(&htc_batt_info.lock); + notify = (htc_batt_info.rep.level != curr_level); + htc_batt_info.rep.level = curr_level; + mutex_unlock(&htc_batt_info.lock); + + if (notify) + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + return 0; +} + +int htc_cable_status_update(int status) +{ + int rc = 0; + unsigned last_source; + + if (!htc_battery_initial) + return 0; + + if (status < CHARGER_BATTERY || status > CHARGER_AC) { + BATT("%s: Not supported cable status received!", __func__); + return -EINVAL; + } + mutex_lock(&htc_batt_info.lock); + /* A9 reports USB charging when helf AC cable in and China AC charger. */ + /* Work arround: notify userspace AC charging first, + and notify USB charging again when receiving usb connected notificaiton from usb driver. */ + last_source = htc_batt_info.rep.charging_source; + if (status == CHARGER_USB && g_usb_online == 0) + htc_batt_info.rep.charging_source = CHARGER_AC; + else { + htc_batt_info.rep.charging_source = status; + /* usb driver will not notify usb offline. */ + if (status == CHARGER_BATTERY && g_usb_online == 1) + g_usb_online = 0; + } + + /* TODO: Don't call usb driver again with the same cable status. */ + msm_hsusb_set_vbus_state(status == CHARGER_USB); + + if (htc_batt_info.rep.charging_source != last_source) { + if (htc_batt_info.rep.charging_source == CHARGER_USB || + htc_batt_info.rep.charging_source == CHARGER_AC) { + wake_lock(&vbus_wake_lock); + } else { + /* give userspace some time to see the uevent and update + * LED state or whatnot... + */ + wake_lock_timeout(&vbus_wake_lock, HZ / 2); + } + if (htc_batt_info.rep.charging_source == CHARGER_BATTERY || last_source == CHARGER_BATTERY) + power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); + if (htc_batt_info.rep.charging_source == CHARGER_USB || last_source == CHARGER_USB) + power_supply_changed(&htc_power_supplies[CHARGER_USB]); + if (htc_batt_info.rep.charging_source == CHARGER_AC || last_source == CHARGER_AC) + power_supply_changed(&htc_power_supplies[CHARGER_AC]); + } + mutex_unlock(&htc_batt_info.lock); + + return rc; +} + +/* A9 reports USB charging when helf AC cable in and China AC charger. */ +/* Work arround: notify userspace AC charging first, +and notify USB charging again when receiving usb connected notification from usb driver. */ +void notify_usb_connected(int online) +{ + mutex_lock(&htc_batt_info.lock); + + BATT("%s: online=%d, g_usb_online=%d", __func__, online, g_usb_online); + + if (g_usb_online != online) { + g_usb_online = online; + if (online && htc_batt_info.rep.charging_source == CHARGER_AC) { + mutex_unlock(&htc_batt_info.lock); + htc_cable_status_update(CHARGER_USB); + mutex_lock(&htc_batt_info.lock); + } else if (online) { + BATT("warning: usb connected but charging source=%d", htc_batt_info.rep.charging_source); + } + } + mutex_unlock(&htc_batt_info.lock); +} + +static int htc_get_batt_info(struct battery_info_reply *buffer) +{ + struct rpc_request_hdr req; + + struct htc_get_batt_info_rep { + struct rpc_reply_hdr hdr; + struct battery_info_reply info; + } rep; + + int rc; + + if (buffer == NULL) + return -EINVAL; + + rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_BATT_INFO, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + if ( rc < 0 ) + return rc; + + mutex_lock(&htc_batt_info.lock); + buffer->batt_id = be32_to_cpu(rep.info.batt_id); + buffer->batt_vol = be32_to_cpu(rep.info.batt_vol); + buffer->batt_temp = be32_to_cpu(rep.info.batt_temp); + buffer->batt_current = be32_to_cpu(rep.info.batt_current); + buffer->level = be32_to_cpu(rep.info.level); + /* Move the rules of charging_source to cable_status_update. */ + /* buffer->charging_source = be32_to_cpu(rep.info.charging_source); */ + buffer->charging_enabled = be32_to_cpu(rep.info.charging_enabled); + buffer->full_bat = be32_to_cpu(rep.info.full_bat); + mutex_unlock(&htc_batt_info.lock); + + return 0; +} + +/* -------------------------------------------------------------------------- */ +static int htc_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + charger_type_t charger; + + mutex_lock(&htc_batt_info.lock); + charger = htc_batt_info.rep.charging_source; + mutex_unlock(&htc_batt_info.lock); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + val->intval = (charger == CHARGER_AC ? 1 : 0); + else if (psy->type == POWER_SUPPLY_TYPE_USB) + val->intval = (charger == CHARGER_USB ? 1 : 0); + else + val->intval = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int htc_battery_get_charging_status(void) +{ + u32 level; + charger_type_t charger; + int ret; + + mutex_lock(&htc_batt_info.lock); + charger = htc_batt_info.rep.charging_source; + + switch (charger) { + case CHARGER_BATTERY: + ret = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + case CHARGER_USB: + case CHARGER_AC: + level = htc_batt_info.rep.level; + if (level == 100) + ret = POWER_SUPPLY_STATUS_FULL; + else + ret = POWER_SUPPLY_STATUS_CHARGING; + break; + default: + ret = POWER_SUPPLY_STATUS_UNKNOWN; + } + mutex_unlock(&htc_batt_info.lock); + return ret; +} + +static int htc_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = htc_battery_get_charging_status(); + break; + case POWER_SUPPLY_PROP_HEALTH: + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = htc_batt_info.present; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + case POWER_SUPPLY_PROP_CAPACITY: + mutex_lock(&htc_batt_info.lock); + val->intval = htc_batt_info.rep.level; + mutex_unlock(&htc_batt_info.lock); + break; + default: + return -EINVAL; + } + + return 0; +} + +#define HTC_BATTERY_ATTR(_name) \ +{ \ + .attr = { .name = #_name, .mode = S_IRUGO, .owner = THIS_MODULE }, \ + .show = htc_battery_show_property, \ + .store = NULL, \ +} + +static struct device_attribute htc_battery_attrs[] = { + HTC_BATTERY_ATTR(batt_id), + HTC_BATTERY_ATTR(batt_vol), + HTC_BATTERY_ATTR(batt_temp), + HTC_BATTERY_ATTR(batt_current), + HTC_BATTERY_ATTR(charging_source), + HTC_BATTERY_ATTR(charging_enabled), + HTC_BATTERY_ATTR(full_bat), +}; + +enum { + BATT_ID = 0, + BATT_VOL, + BATT_TEMP, + BATT_CURRENT, + CHARGING_SOURCE, + CHARGING_ENABLED, + FULL_BAT, +}; + +static int htc_rpc_set_delta(unsigned delta) +{ + struct set_batt_delta_req { + struct rpc_request_hdr hdr; + uint32_t data; + } req; + + req.data = cpu_to_be32(delta); + return msm_rpc_call(endpoint, HTC_PROCEDURE_SET_BATT_DELTA, + &req, sizeof(req), 5 * HZ); +} + + +static ssize_t htc_battery_set_delta(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc; + unsigned long delta = 0; + + delta = simple_strtoul(buf, NULL, 10); + + if (delta > 100) + return -EINVAL; + + mutex_lock(&htc_batt_info.rpc_lock); + rc = htc_rpc_set_delta(delta); + mutex_unlock(&htc_batt_info.rpc_lock); + if (rc < 0) + return rc; + return count; +} + +static struct device_attribute htc_set_delta_attrs[] = { + __ATTR(delta, S_IWUSR | S_IWGRP, NULL, htc_battery_set_delta), +}; + +static int htc_battery_create_attrs(struct device * dev) +{ + int i, j, rc; + + for (i = 0; i < ARRAY_SIZE(htc_battery_attrs); i++) { + rc = device_create_file(dev, &htc_battery_attrs[i]); + if (rc) + goto htc_attrs_failed; + } + + for (j = 0; j < ARRAY_SIZE(htc_set_delta_attrs); j++) { + rc = device_create_file(dev, &htc_set_delta_attrs[j]); + if (rc) + goto htc_delta_attrs_failed; + } + + goto succeed; + +htc_attrs_failed: + while (i--) + device_remove_file(dev, &htc_battery_attrs[i]); +htc_delta_attrs_failed: + while (j--) + device_remove_file(dev, &htc_set_delta_attrs[i]); +succeed: + return rc; +} + +static ssize_t htc_battery_show_property(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int i = 0; + const ptrdiff_t off = attr - htc_battery_attrs; + + /* rpc lock is used to prevent two threads from calling + * into the get info rpc at the same time + */ + + mutex_lock(&htc_batt_info.rpc_lock); + /* check cache time to decide if we need to update */ + if (htc_batt_info.update_time && + time_before(jiffies, htc_batt_info.update_time + + msecs_to_jiffies(cache_time))) + goto dont_need_update; + + if (htc_get_batt_info(&htc_batt_info.rep) < 0) { + printk(KERN_ERR "%s: rpc failed!!!\n", __FUNCTION__); + } else { + htc_batt_info.update_time = jiffies; + } +dont_need_update: + mutex_unlock(&htc_batt_info.rpc_lock); + + mutex_lock(&htc_batt_info.lock); + switch (off) { + case BATT_ID: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_id); + break; + case BATT_VOL: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_vol); + break; + case BATT_TEMP: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_temp); + break; + case BATT_CURRENT: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.batt_current); + break; + case CHARGING_SOURCE: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.charging_source); + break; + case CHARGING_ENABLED: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.charging_enabled); + break; + case FULL_BAT: + i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", + htc_batt_info.rep.full_bat); + break; + default: + i = -EINVAL; + } + mutex_unlock(&htc_batt_info.lock); + + return i; +} + +static int htc_battery_probe(struct platform_device *pdev) +{ + int i, rc; + + /* init battery gpio */ + if ((rc = init_batt_gpio()) < 0) { + printk(KERN_ERR "%s: init battery gpio failed!\n", __FUNCTION__); + return rc; + } + + /* init structure data member */ + htc_batt_info.update_time = jiffies; + /* A9 will shutdown the phone if battery is pluged out, so this value is always 1. + htc_batt_info.present = gpio_get_value(GPIO_TROUT_MBAT_IN); + */ + htc_batt_info.present = 1; + + /* init rpc */ + endpoint = msm_rpc_connect(APP_BATT_PROG, APP_BATT_VER, 0); + if (IS_ERR(endpoint)) { + printk(KERN_ERR "%s: init rpc failed! rc = %ld\n", + __FUNCTION__, PTR_ERR(endpoint)); + return rc; + } + + /* init power supplier framework */ + for (i = 0; i < ARRAY_SIZE(htc_power_supplies); i++) { + rc = power_supply_register(&pdev->dev, &htc_power_supplies[i]); + if (rc) + printk(KERN_ERR "Failed to register power supply (%d)\n", rc); + } + + /* create htc detail attributes */ + htc_battery_create_attrs(htc_power_supplies[CHARGER_BATTERY].dev); + + /* After battery driver gets initialized, send rpc request to inquiry + * the battery status in case of we lost some info + */ + htc_battery_initial = 1; + + mutex_lock(&htc_batt_info.rpc_lock); + htc_batt_info.rep.charging_source = CHARGER_BATTERY; + if (htc_get_batt_info(&htc_batt_info.rep) < 0) + printk(KERN_ERR "%s: get info failed\n", __FUNCTION__); + + if (htc_rpc_set_delta(1) < 0) + printk(KERN_ERR "%s: set delta failed\n", __FUNCTION__); + htc_batt_info.update_time = jiffies; + mutex_unlock(&htc_batt_info.rpc_lock); + + return 0; +} + +static struct platform_driver htc_battery_driver = { + .probe = htc_battery_probe, + .driver = { + .name = APP_BATT_PDEV_NAME, + .owner = THIS_MODULE, + }, +}; + +/* batt_mtoa server definitions */ +#define BATT_MTOA_PROG 0x30100000 +#define BATT_MTOA_VERS 0 +#define RPC_BATT_MTOA_NULL 0 +#define RPC_BATT_MTOA_SET_CHARGING_PROC 1 +#define RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC 2 +#define RPC_BATT_MTOA_LEVEL_UPDATE_PROC 3 + +struct rpc_batt_mtoa_set_charging_args { + int enable; +}; + +struct rpc_batt_mtoa_cable_status_update_args { + int status; +}; + +struct rpc_dem_battery_update_args { + uint32_t level; +}; + +static int handle_battery_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + switch (req->procedure) { + case RPC_BATT_MTOA_NULL: + return 0; + + case RPC_BATT_MTOA_SET_CHARGING_PROC: { + struct rpc_batt_mtoa_set_charging_args *args; + args = (struct rpc_batt_mtoa_set_charging_args *)(req + 1); + args->enable = be32_to_cpu(args->enable); + BATT("set_charging: enable=%d",args->enable); + htc_battery_set_charging(args->enable); + return 0; + } + case RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC: { + struct rpc_batt_mtoa_cable_status_update_args *args; + args = (struct rpc_batt_mtoa_cable_status_update_args *)(req + 1); + args->status = be32_to_cpu(args->status); + BATT("cable_status_update: status=%d",args->status); + htc_cable_status_update(args->status); + return 0; + } + case RPC_BATT_MTOA_LEVEL_UPDATE_PROC: { + struct rpc_dem_battery_update_args *args; + args = (struct rpc_dem_battery_update_args *)(req + 1); + args->level = be32_to_cpu(args->level); + BATT("dem_battery_update: level=%d",args->level); + htc_battery_status_update(args->level); + return 0; + } + default: + printk(KERN_ERR "%s: program 0x%08x:%d: unknown procedure %d\n", + __FUNCTION__, req->prog, req->vers, req->procedure); + return -ENODEV; + } +} + +static struct msm_rpc_server battery_server = { + .prog = BATT_MTOA_PROG, + .vers = BATT_MTOA_VERS, + .rpc_call = handle_battery_call, +}; + +static int __init htc_battery_init(void) +{ + wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); + mutex_init(&htc_batt_info.lock); + mutex_init(&htc_batt_info.rpc_lock); + msm_rpc_create_server(&battery_server); + platform_driver_register(&htc_battery_driver); + return 0; +} + +module_init(htc_battery_init); +MODULE_DESCRIPTION("HTC Battery Driver"); +MODULE_LICENSE("GPL"); + diff --git a/arch/arm/mach-msm/htc_bluetooth.c b/arch/arm/mach-msm/htc_bluetooth.c new file mode 100644 index 0000000000000..edb3b5205b39a --- /dev/null +++ b/arch/arm/mach-msm/htc_bluetooth.c @@ -0,0 +1,61 @@ +/* arch/arm/mach-msm/htc_bluetooth.c + * + * Code to extract Bluetooth bd_address information + * from ATAG set up by the bootloader. + * + * Copyright (C) 2009 HTC Corporation + * Author:Yomin Lin + * + */ + +#include +#include +#include + +#include + +/* configuration tags specific to Bluetooth*/ +#define ATAG_BLUETOOTH 0x43294329 + +#define ATAG_BT_DEBUG +#define MAX_BT_SIZE 0x8U +static unsigned char bt_bd_ram[MAX_BT_SIZE]; + +unsigned char *get_bt_bd_ram(void) +{ + return (bt_bd_ram); +} + +EXPORT_SYMBOL(get_bt_bd_ram); +#ifdef ATAG_BT_DEBUG +static int __init parse_tag_bt(const struct tag *tag) +{ + unsigned char *dptr=(unsigned char *)(&tag->u); + unsigned size; + unsigned i; + unsigned char *ptr; + + size=min((tag->hdr.size-2)*sizeof(__u32),MAX_BT_SIZE); + + ptr=dptr; + printk("BT Data size= %d, 0x%x,", tag->hdr.size, tag->hdr.tag); + for(i=0;iu); + unsigned size; + + size=min((tag->hdr.size-2)*sizeof(__u32),MAX_BT_SIZE); + memcpy((void *)bt_bd_ram ,(void *)dptr, size); + return 0; +} +#endif + __tagtable(ATAG_BLUETOOTH, parse_tag_bt); diff --git a/arch/arm/mach-msm/htc_board_tags.c b/arch/arm/mach-msm/htc_board_tags.c new file mode 100644 index 0000000000000..f0cb064c47fe4 --- /dev/null +++ b/arch/arm/mach-msm/htc_board_tags.c @@ -0,0 +1,257 @@ +/* linux/arch/arm/mach-msm/devices.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "smd_private.h" + +#include +#include +#include +#include +#include + +static char *df_serialno = "000000000000"; +static char *board_sn; + +#define MFG_GPIO_TABLE_MAX_SIZE 0x400 +static unsigned char mfg_gpio_table[MFG_GPIO_TABLE_MAX_SIZE]; + +static int mfg_mode; +int __init board_mfg_mode_init(char *s) +{ + if (!strcmp(s, "normal")) + mfg_mode = 0; + else if (!strcmp(s, "factory2")) + mfg_mode = 1; + else if (!strcmp(s, "recovery")) + mfg_mode = 2; + else if (!strcmp(s, "charge")) + mfg_mode = 3; + else if (!strcmp(s, "power_test")) + mfg_mode = 4; + else if (!strcmp(s, "offmode_charging")) + mfg_mode = 5; + + return 1; +} +__setup("androidboot.mode=", board_mfg_mode_init); + + +int board_mfg_mode(void) +{ + return mfg_mode; +} + +EXPORT_SYMBOL(board_mfg_mode); + +static int __init board_serialno_setup(char *serialno) +{ + char *str; + + /* use default serial number when mode is factory2 */ + if (board_mfg_mode() == 1 || !strlen(serialno)) + str = df_serialno; + else + str = serialno; + board_sn = str; + return 1; +} +__setup("androidboot.serialno=", board_serialno_setup); + +char *board_serialno(void) +{ + return board_sn; +} + +EXPORT_SYMBOL(board_serialno); + +#define ATAG_SMI 0x4d534D71 +/* setup calls mach->fixup, then parse_tags, parse_cmdline + * We need to setup meminfo in mach->fixup, so this function + * will need to traverse each tag to find smi tag. + */ +int __init parse_tag_smi(const struct tag *tags) +{ + int smi_sz = 0, find = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_SMI) { + printk(KERN_DEBUG "find the smi tag\n"); + find = 1; + break; + } + } + if (!find) + return -1; + + printk(KERN_DEBUG "parse_tag_smi: smi size = %d\n", t->u.mem.size); + smi_sz = t->u.mem.size; + return smi_sz; +} +__tagtable(ATAG_SMI, parse_tag_smi); + +#define ATAG_HWID 0x4d534D72 +int __init parse_tag_hwid(const struct tag *tags) +{ + int hwid = 0, find = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_HWID) { + printk(KERN_DEBUG "find the hwid tag\n"); + find = 1; + break; + } + } + + if (find) + hwid = t->u.revision.rev; + printk(KERN_DEBUG "parse_tag_hwid: hwid = 0x%x\n", hwid); + return hwid; +} +__tagtable(ATAG_HWID, parse_tag_hwid); + +#define ATAG_SKUID 0x4d534D73 +int __init parse_tag_skuid(const struct tag *tags) +{ + int skuid = 0, find = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_SKUID) { + printk(KERN_DEBUG "find the skuid tag\n"); + find = 1; + break; + } + } + + if (find) + skuid = t->u.revision.rev; + printk(KERN_DEBUG "parse_tag_skuid: hwid = 0x%x\n", skuid); + return skuid; +} +__tagtable(ATAG_SKUID, parse_tag_skuid); + +#define ATAG_HERO_PANEL_TYPE 0x4d534D74 +int panel_type; +int __init tag_panel_parsing(const struct tag *tags) +{ + panel_type = tags->u.revision.rev; + + printk(KERN_DEBUG "%s: panel type = %d\n", __func__, + panel_type); + + return panel_type; +} +__tagtable(ATAG_HERO_PANEL_TYPE, tag_panel_parsing); + +#define ATAG_ENGINEERID 0x4d534D75 +unsigned engineer_id; +int __init parse_tag_engineerid(const struct tag *tags) +{ + int engineerid = 0, find = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_ENGINEERID) { + printk(KERN_DEBUG "find the engineer tag\n"); + find = 1; + break; + } + } + + if (find) { + engineer_id = t->u.revision.rev; + engineerid = t->u.revision.rev; + } + printk(KERN_DEBUG "parse_tag_engineerid: 0x%x\n", engineerid); + return engineerid; +} +__tagtable(ATAG_ENGINEERID, parse_tag_engineerid); + +#define ATAG_MFG_GPIO_TABLE 0x59504551 +int __init parse_tag_mfg_gpio_table(const struct tag *tags) +{ + unsigned char *dptr = (unsigned char *)(&tags->u); + __u32 size; + + size = min((__u32)(tags->hdr.size - 2) * sizeof(__u32), (__u32)MFG_GPIO_TABLE_MAX_SIZE); + memcpy(mfg_gpio_table, dptr, size); + return 0; +} +__tagtable(ATAG_MFG_GPIO_TABLE, parse_tag_mfg_gpio_table); + +char * board_get_mfg_sleep_gpio_table(void) +{ + return mfg_gpio_table; +} +EXPORT_SYMBOL(board_get_mfg_sleep_gpio_table); + +static char *emmc_tag; +static int __init board_set_emmc_tag(char *get_hboot_emmc) +{ + if (strlen(get_hboot_emmc)) + emmc_tag = get_hboot_emmc; + else + emmc_tag = NULL; + return 1; +} +__setup("androidboot.emmc=", board_set_emmc_tag); + +int board_emmc_boot(void) +{ + if (emmc_tag) { + if (!strcmp(emmc_tag, "true")) + return 1; + } + + return 0; +} + +#define ATAG_MEMSIZE 0x5441001e +unsigned memory_size; +int __init parse_tag_memsize(const struct tag *tags) +{ + int mem_size = 0, find = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_MEMSIZE) { + printk(KERN_DEBUG "find the memsize tag\n"); + find = 1; + break; + } + } + + if (find) { + memory_size = t->u.revision.rev; + mem_size = t->u.revision.rev; + } + printk(KERN_DEBUG "parse_tag_memsize: %d\n", memory_size); + return mem_size; +} +__tagtable(ATAG_MEMSIZE, parse_tag_memsize); + diff --git a/arch/arm/mach-msm/htc_headset.c b/arch/arm/mach-msm/htc_headset.c new file mode 100644 index 0000000000000..f9a00b79f9d1c --- /dev/null +++ b/arch/arm/mach-msm/htc_headset.c @@ -0,0 +1,1332 @@ +/* + * H2W device detection driver. + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC, Inc. + * + * Authors: + * Laurence Chen + * Nick Pelly + * Thomas Tsai + * Farmer Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +/* For detecting HTC 2 Wire devices, such as wired headset. + + Logically, the H2W driver is always present, and H2W state (hi->state) + indicates what is currently plugged into the H2W interface. + + When the headset is plugged in, CABLE_IN1 is pulled low. When the headset + button is pressed, CABLE_IN2 is pulled low. These two lines are shared with + the TX and RX (respectively) of UART3 - used for serial debugging. + + This headset driver keeps the CPLD configured as UART3 for as long as + possible, so that we can do serial FIQ debugging even when the kernel is + locked and this driver no longer runs. So it only configures the CPLD to + GPIO while the headset is plugged in, and for 10ms during detection work. + + Unfortunately we can't leave the CPLD as UART3 while a headset is plugged + in, UART3 is pullup on TX but the headset is pull-down, causing a 55 mA + drain on trout. + + The headset detection work involves setting CPLD to GPIO, and then pulling + CABLE_IN1 high with a stronger pullup than usual. A H2W headset will still + pull this line low, whereas other attachments such as a serial console + would get pulled up by this stronger pullup. + + Headset insertion/removal causes UEvent's to be sent, and + /sys/class/switch/h2w/state to be updated. + + Button presses are interpreted as input event (KEY_MEDIA). Button presses + are ignored if the headset is plugged in, so the buttons on 11 pin -> 3.5mm + jack adapters do not work until a headset is plugged into the adapter. This + is to avoid serial RX traffic causing spurious button press events. + + We tend to check the status of CABLE_IN1 a few more times than strictly + necessary during headset detection, to avoid spurious headset insertion + events caused by serial debugger TX traffic. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define H2WI(fmt, arg...) \ + printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#define H2WE(fmt, arg...) \ + printk(KERN_ERR "[H2W] %s " fmt "\r\n", __func__, ## arg) + +#ifdef CONFIG_DEBUG_H2W +#define H2W_DBG(fmt, arg...) printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#else +#define H2W_DBG(fmt, arg...) do {} while (0) +#endif + +static struct workqueue_struct *g_detection_work_queue; +static void detection_work(struct work_struct *work); +static DECLARE_WORK(g_detection_work, detection_work); + +struct h2w_info { + struct switch_dev sdev; + struct input_dev *input; + struct mutex mutex_lock; + + atomic_t btn_state; + int ignore_btn; + + unsigned int irq; + unsigned int irq_btn; + unsigned int irq_btn_35mm; + + int cable_in1; + int cable_in2; + int h2w_clk; + int h2w_data; + int debug_uart; + int headset_mic_35mm; + + void (*config_cpld) (int); + void (*init_cpld) (void); + /* for h2w */ + void (*set_dat)(int); + void (*set_clk)(int); + void (*set_dat_dir)(int); + void (*set_clk_dir)(int); + int (*get_dat)(void); + int (*get_clk)(void); + + int htc_headset_flag; + int btn_11pin_35mm_flag; + + struct hrtimer timer; + ktime_t debounce_time; + + struct hrtimer btn_timer; + ktime_t btn_debounce_time; + + struct hrtimer btn35mm_timer; + ktime_t btn35mm_debounce_time; + + H2W_INFO h2w_info; + H2W_SPEED speed; + struct vreg *vreg_h2w; +}; +static struct h2w_info *hi; + +static ssize_t h2w_print_name(struct switch_dev *sdev, char *buf) +{ + switch (switch_get_state(&hi->sdev)) { + case H2W_NO_DEVICE: + return sprintf(buf, "No Device\n"); + case H2W_HTC_HEADSET: + return sprintf(buf, "Headset\n"); + } + return -EINVAL; +} + +static void button_pressed(void) +{ + printk(KERN_INFO "[H2W] button_pressed\n"); + atomic_set(&hi->btn_state, 1); + input_report_key(hi->input, KEY_MEDIA, 1); + input_sync(hi->input); +} + +static void button_released(void) +{ + printk(KERN_INFO "[H2W] button_released\n"); + atomic_set(&hi->btn_state, 0); + input_report_key(hi->input, KEY_MEDIA, 0); + input_sync(hi->input); +} + +/***************** + * H2W proctocol * + *****************/ +static inline void h2w_begin_command(void) +{ + /* Disable H2W interrupt */ + set_irq_type(hi->irq_btn, IRQF_TRIGGER_HIGH); + disable_irq(hi->irq); + disable_irq(hi->irq_btn); + + /* Set H2W_CLK as output low */ + hi->set_clk(0); + hi->set_clk_dir(1); +} + +static inline void h2w_end_command(void) +{ + /* Set H2W_CLK as input */ + hi->set_clk_dir(0); + + /* Enable H2W interrupt */ + enable_irq(hi->irq); + enable_irq(hi->irq_btn); + set_irq_type(hi->irq_btn, IRQF_TRIGGER_RISING); +} + +static inline void one_clock_write(unsigned short flag) +{ + if (flag) + hi->set_dat(1); + else + hi->set_dat(0); + + udelay(hi->speed); + hi->set_clk(1); + udelay(hi->speed); + hi->set_clk(0); +} + +static inline void one_clock_write_RWbit(unsigned short flag) +{ + if (flag) + hi->set_dat(1); + else + hi->set_dat(0); + + udelay(hi->speed); + hi->set_clk(1); + udelay(hi->speed); + hi->set_clk(0); + hi->set_dat_dir(0); + udelay(hi->speed); +} + +static inline void h2w_reset(void) +{ + /* Set H2W_DAT as output low */ + hi->set_dat(0); + hi->set_dat_dir(1); + + udelay(hi->speed); + hi->set_clk(1); + udelay(4 * hi->speed); + hi->set_dat(1); + udelay(hi->speed); + hi->set_dat(0); + udelay(hi->speed); + hi->set_clk(0); + udelay(hi->speed); +} + +static inline void h2w_start(void) +{ + udelay(hi->speed); + hi->set_clk(1); + udelay(2 * hi->speed); + hi->set_clk(0); + udelay(hi->speed); +} + +static inline int h2w_ack(void) +{ + int retry_times = 0; + +ack_resend: + if (retry_times == MAX_ACK_RESEND_TIMES) + return -1; + + udelay(hi->speed); + hi->set_clk(1); + udelay(2 * hi->speed); + + if (!hi->get_dat()) { + retry_times++; + hi->set_clk(0); + udelay(hi->speed); + goto ack_resend; + } + + hi->set_clk(0); + udelay(hi->speed); + return 0; +} + +static unsigned char h2w_readc(void) +{ + unsigned char h2w_read_data = 0x0; + int index; + + for (index = 0; index < 8; index++) { + hi->set_clk(0); + udelay(hi->speed); + hi->set_clk(1); + udelay(hi->speed); + if (hi->get_dat()) + h2w_read_data |= (1 << (7 - index)); + } + hi->set_clk(0); + udelay(hi->speed); + + return h2w_read_data; +} + +static int h2w_readc_cmd(H2W_ADDR address) +{ + int ret = -1, retry_times = 0; + unsigned char read_data; + +read_resend: + if (retry_times == MAX_HOST_RESEND_TIMES) + goto err_read; + + h2w_reset(); + h2w_start(); + /* Write address */ + one_clock_write(address & 0x1000); + one_clock_write(address & 0x0800); + one_clock_write(address & 0x0400); + one_clock_write(address & 0x0200); + one_clock_write(address & 0x0100); + one_clock_write(address & 0x0080); + one_clock_write(address & 0x0040); + one_clock_write(address & 0x0020); + one_clock_write(address & 0x0010); + one_clock_write(address & 0x0008); + one_clock_write(address & 0x0004); + one_clock_write(address & 0x0002); + one_clock_write(address & 0x0001); + one_clock_write_RWbit(1); + if (h2w_ack() < 0) { + H2W_DBG("Addr NO ACK(%d).\n", retry_times); + retry_times++; + hi->set_clk(0); + mdelay(RESEND_DELAY); + goto read_resend; + } + + read_data = h2w_readc(); + + if (h2w_ack() < 0) { + H2W_DBG("Data NO ACK(%d).\n", retry_times); + retry_times++; + hi->set_clk(0); + mdelay(RESEND_DELAY); + goto read_resend; + } + ret = (int)read_data; + +err_read: + if (ret < 0) + H2WE("NO ACK.\n"); + + return ret; +} + +static int h2w_writec_cmd(H2W_ADDR address, unsigned char data) +{ + int ret = -1; + int retry_times = 0; + +write_resend: + if (retry_times == MAX_HOST_RESEND_TIMES) + goto err_write; + + h2w_reset(); + h2w_start(); + + /* Write address */ + one_clock_write(address & 0x1000); + one_clock_write(address & 0x0800); + one_clock_write(address & 0x0400); + one_clock_write(address & 0x0200); + one_clock_write(address & 0x0100); + one_clock_write(address & 0x0080); + one_clock_write(address & 0x0040); + one_clock_write(address & 0x0020); + one_clock_write(address & 0x0010); + one_clock_write(address & 0x0008); + one_clock_write(address & 0x0004); + one_clock_write(address & 0x0002); + one_clock_write(address & 0x0001); + one_clock_write_RWbit(0); + if (h2w_ack() < 0) { + H2W_DBG("Addr NO ACK(%d).\n", retry_times); + retry_times++; + hi->set_clk(0); + mdelay(RESEND_DELAY); + goto write_resend; + } + + /* Write data */ + hi->set_dat_dir(1); + one_clock_write(data & 0x0080); + one_clock_write(data & 0x0040); + one_clock_write(data & 0x0020); + one_clock_write(data & 0x0010); + one_clock_write(data & 0x0008); + one_clock_write(data & 0x0004); + one_clock_write(data & 0x0002); + one_clock_write_RWbit(data & 0x0001); + if (h2w_ack() < 0) { + H2W_DBG("Data NO ACK(%d).\n", retry_times); + retry_times++; + hi->set_clk(0); + mdelay(RESEND_DELAY); + goto write_resend; + } + ret = 0; + +err_write: + if (ret < 0) + H2WE("NO ACK.\n"); + + return ret; +} + +static int h2w_get_fnkey(void) +{ + int ret; + h2w_begin_command(); + ret = h2w_readc_cmd(H2W_FNKEY_UPDOWN); + h2w_end_command(); + return ret; +} + +static int h2w_dev_init(H2W_INFO *ph2w_info) +{ + int ret = -1; + unsigned char ascr0 = 0; + int h2w_sys = 0, maxgpadd = 0, maxadd = 0, key = 0; + + hi->speed = H2W_50KHz; + h2w_begin_command(); + + /* read H2W_SYSTEM */ + h2w_sys = h2w_readc_cmd(H2W_SYSTEM); + if (h2w_sys == -1) { + H2WE("read H2W_SYSTEM(0x0000) failed.\n"); + goto err_plugin; + } + ph2w_info->ACC_CLASS = (h2w_sys & 0x03); + ph2w_info->AUDIO_DEVICE = (h2w_sys & 0x04) > 0 ? 1 : 0; + ph2w_info->HW_REV = (h2w_sys & 0x18) >> 3; + ph2w_info->SLEEP_PR = (h2w_sys & 0x20) >> 5; + ph2w_info->CLK_SP = (h2w_sys & 0xC0) >> 6; + + /* enter init mode */ + if (h2w_writec_cmd(H2W_ASCR0, H2W_ASCR_DEVICE_INI) < 0) { + H2WE("write H2W_ASCR0(0x0002) failed.\n"); + goto err_plugin; + } + udelay(10); + + /* read H2W_MAX_GP_ADD */ + maxgpadd = h2w_readc_cmd(H2W_MAX_GP_ADD); + if (maxgpadd == -1) { + H2WE("write H2W_MAX_GP_ADD(0x0001) failed.\n"); + goto err_plugin; + } + ph2w_info->CLK_SP += (maxgpadd & 0x60) >> 3; + ph2w_info->MAX_GP_ADD = (maxgpadd & 0x1F); + + /* read key group */ + if (ph2w_info->MAX_GP_ADD >= 1) { + ph2w_info->KEY_MAXADD = h2w_readc_cmd(H2W_KEY_MAXADD); + if (ph2w_info->KEY_MAXADD == -1) + goto err_plugin; + if (ph2w_info->KEY_MAXADD >= 1) { + key = h2w_readc_cmd(H2W_ASCII_DOWN); + if (key < 0) + goto err_plugin; + ph2w_info->ASCII_DOWN = (key == 0xFF) ? 1 : 0; + } + if (ph2w_info->KEY_MAXADD >= 2) { + key = h2w_readc_cmd(H2W_ASCII_UP); + if (key == -1) + goto err_plugin; + ph2w_info->ASCII_UP = (key == 0xFF) ? 1 : 0; + } + if (ph2w_info->KEY_MAXADD >= 3) { + key = h2w_readc_cmd(H2W_FNKEY_UPDOWN); + if (key == -1) + goto err_plugin; + ph2w_info->FNKEY_UPDOWN = (key == 0xFF) ? 1 : 0; + } + if (ph2w_info->KEY_MAXADD >= 4) { + key = h2w_readc_cmd(H2W_KD_STATUS); + if (key == -1) + goto err_plugin; + ph2w_info->KD_STATUS = (key == 0x01) ? 1 : 0; + } + } + + /* read led group */ + if (ph2w_info->MAX_GP_ADD >= 2) { + ph2w_info->LED_MAXADD = h2w_readc_cmd(H2W_LED_MAXADD); + if (ph2w_info->LED_MAXADD == -1) + goto err_plugin; + if (ph2w_info->LED_MAXADD >= 1) { + key = h2w_readc_cmd(H2W_LEDCT0); + if (key == -1) + goto err_plugin; + ph2w_info->LEDCT0 = (key == 0x02) ? 1 : 0; + } + } + + /* read group 3, 4, 5 */ + if (ph2w_info->MAX_GP_ADD >= 3) { + maxadd = h2w_readc_cmd(H2W_CRDL_MAXADD); + if (maxadd == -1) + goto err_plugin; + } + if (ph2w_info->MAX_GP_ADD >= 4) { + maxadd = h2w_readc_cmd(H2W_CARKIT_MAXADD); + if (maxadd == -1) + goto err_plugin; + } + if (ph2w_info->MAX_GP_ADD >= 5) { + maxadd = h2w_readc_cmd(H2W_USBHOST_MAXADD); + if (maxadd == -1) + goto err_plugin; + } + + /* read medical group */ + if (ph2w_info->MAX_GP_ADD >= 6) { + ph2w_info->MED_MAXADD = h2w_readc_cmd(H2W_MED_MAXADD); + if (ph2w_info->MED_MAXADD == -1) + goto err_plugin; + if (ph2w_info->MED_MAXADD >= 1) { + key = h2w_readc_cmd(H2W_MED_CONTROL); + if (key == -1) + goto err_plugin; + ph2w_info->DATA_EN = (key & 0x01); + ph2w_info->AP_EN = (key & 0x02) >> 1; + ph2w_info->AP_ID = (key & 0x1c) >> 2; + } + if (ph2w_info->MED_MAXADD >= 2) { + key = h2w_readc_cmd(H2W_MED_IN_DATA); + if (key == -1) + goto err_plugin; + } + } + + if (ph2w_info->AUDIO_DEVICE) + ascr0 = H2W_ASCR_AUDIO_IN | H2W_ASCR_ACT_EN; + else + ascr0 = H2W_ASCR_ACT_EN; + + if (h2w_writec_cmd(H2W_ASCR0, ascr0) < 0) + goto err_plugin; + udelay(10); + + ret = 0; + + /* adjust speed */ + if (ph2w_info->MAX_GP_ADD == 2) { + /* Remote control */ + hi->speed = H2W_250KHz; + } else if (ph2w_info->MAX_GP_ADD == 6) { + if (ph2w_info->MED_MAXADD >= 1) { + key = h2w_readc_cmd(H2W_MED_CONTROL); + if (key == -1) + goto err_plugin; + ph2w_info->DATA_EN = (key & 0x01); + ph2w_info->AP_EN = (key & 0x02) >> 1; + ph2w_info->AP_ID = (key & 0x1c) >> 2; + } + } + +err_plugin: + h2w_end_command(); + + return ret; +} + +static inline void h2w_dev_power_on(int on) +{ + if (!hi->vreg_h2w) + return; + + if (on) + vreg_enable(hi->vreg_h2w); + else + vreg_disable(hi->vreg_h2w); +} + +static int h2w_dev_detect(void) +{ + int ret = -1; + int retry_times; + + for (retry_times = 5; retry_times; retry_times--) { + /* Enable H2W Power */ + h2w_dev_power_on(1); + msleep(100); + memset(&hi->h2w_info, 0, sizeof(H2W_INFO)); + if (h2w_dev_init(&hi->h2w_info) < 0) { + h2w_dev_power_on(0); + msleep(100); + } else if (hi->h2w_info.MAX_GP_ADD == 2) { + ret = 0; + break; + } else { + printk(KERN_INFO "h2w_detect: detect error(%d)\n" + , hi->h2w_info.MAX_GP_ADD); + h2w_dev_power_on(0); + msleep(100); + } + printk(KERN_INFO "h2w_detect(%d)\n" + , hi->h2w_info.MAX_GP_ADD); + } + H2W_DBG("h2w_detect:(%d)\n", retry_times); + return ret; +} + +static void remove_headset(void) +{ + unsigned long irq_flags; + + H2W_DBG(""); + + mutex_lock(&hi->mutex_lock); + switch_set_state(&hi->sdev, switch_get_state(&hi->sdev) & + ~(BIT_HEADSET | BIT_HEADSET_NO_MIC)); + mutex_unlock(&hi->mutex_lock); + hi->init_cpld(); + + /* Disable button */ + switch (hi->htc_headset_flag) { + case H2W_HTC_HEADSET: + local_irq_save(irq_flags); + disable_irq(hi->irq_btn); + local_irq_restore(irq_flags); + + if (atomic_read(&hi->btn_state)) + button_released(); + printk(KERN_INFO "remove htc headset\n"); + break; + case NORMAL_HEARPHONE: + if (hi->btn_11pin_35mm_flag) { + disable_irq(hi->irq_btn_35mm); + turn_mic_bias_on(0); + hi->btn_11pin_35mm_flag = 0; + if (atomic_read(&hi->btn_state)) + button_released(); + } + printk(KERN_INFO "remove 11pin 3.5mm headset\n"); + break; + case H2W_DEVICE: + h2w_dev_power_on(0); + set_irq_type(hi->irq_btn, IRQF_TRIGGER_LOW); + disable_irq(hi->irq_btn); + /* 10ms (5-15 with 10ms tick) */ + hi->btn_debounce_time = ktime_set(0, 10000000); + hi->set_clk_dir(0); + hi->set_dat_dir(0); + printk(KERN_INFO "remove h2w device\n"); + break; + } + + hi->htc_headset_flag = 0; + hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */ + +} + +#ifdef CONFIG_MSM_SERIAL_DEBUGGER +extern void msm_serial_debug_enable(int); +#endif + +static void insert_headset(int type) +{ + unsigned long irq_flags; + int state; + + H2W_DBG(""); + + hi->htc_headset_flag = type; + state = BIT_HEADSET | BIT_HEADSET_NO_MIC; + + state = switch_get_state(&hi->sdev); + state &= ~(BIT_HEADSET_NO_MIC | BIT_HEADSET); + switch (type) { + case H2W_HTC_HEADSET: + printk(KERN_INFO "insert_headset H2W_HTC_HEADSET\n"); + state |= BIT_HEADSET; + hi->ignore_btn = !gpio_get_value(hi->cable_in2); + /* Enable button irq */ + local_irq_save(irq_flags); + enable_irq(hi->irq_btn); + local_irq_restore(irq_flags); + hi->debounce_time = ktime_set(0, 200000000); /* 20 ms */ + break; + case NORMAL_HEARPHONE: + if (hi->headset_mic_35mm) { + /* support 3.5mm earphone with mic */ + printk(KERN_INFO "11pin_3.5mm_headset plug in\n"); + /* Turn On Mic Bias */ + turn_mic_bias_on(1); + /* Wait pin be stable */ + msleep(200); + /* Detect headset with or without microphone */ + if (gpio_get_value(hi->headset_mic_35mm)) { + /* without microphone */ + turn_mic_bias_on(0); + state |= BIT_HEADSET_NO_MIC; + printk(KERN_INFO + "11pin_3.5mm without microphone\n"); + } else { /* with microphone */ + state |= BIT_HEADSET; + /* Enable button irq */ + if (!hi->btn_11pin_35mm_flag) { + set_irq_type(hi->irq_btn_35mm, + IRQF_TRIGGER_HIGH); + enable_irq(hi->irq_btn_35mm); + hi->btn_11pin_35mm_flag = 1; + } + printk(KERN_INFO + "11pin_3.5mm with microphone\n"); + } + } else /* not support 3.5mm earphone with mic */ + state |= BIT_HEADSET_NO_MIC; + hi->debounce_time = ktime_set(0, 500000000); /* 500 ms */ + break; + case H2W_DEVICE: + printk(KERN_INFO "insert_headset H2W_DEVICE\n"); + if (!hi->set_dat) { + printk(KERN_INFO "Don't support H2W_DEVICE\n"); + hi->htc_headset_flag = 0; + return; + } + if (h2w_dev_detect() < 0) { + printk(KERN_INFO "H2W_DEVICE -- Non detect\n"); + remove_headset(); + } else { + printk(KERN_INFO "H2W_DEVICE -- detect\n"); + hi->btn_debounce_time = ktime_set(0, 0); + local_irq_save(irq_flags); + enable_irq(hi->irq_btn); + set_irq_type(hi->irq_btn, IRQF_TRIGGER_RISING); + local_irq_restore(irq_flags); + state |= BIT_HEADSET; + } + break; + case H2W_USB_CRADLE: + printk(KERN_INFO "insert_headset USB_CRADLE\n"); + state |= BIT_HEADSET_NO_MIC; + break; + case H2W_UART_DEBUG: + printk(KERN_INFO "switch to H2W_UART_DEBUG\n"); + hi->config_cpld(hi->debug_uart); + default: + return; + } + mutex_lock(&hi->mutex_lock); + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); + +#ifdef CONFIG_MSM_SERIAL_DEBUGGER + msm_serial_debug_enable(false); +#endif + +} +#if 0 +static void remove_headset(void) +{ + unsigned long irq_flags; + + H2W_DBG(""); + + switch_set_state(&hi->sdev, H2W_NO_DEVICE); + + hi->init_cpld(); + + /* Disable button */ + local_irq_save(irq_flags); + disable_irq(hi->irq_btn); + local_irq_restore(irq_flags); + + if (atomic_read(&hi->btn_state)) + button_released(); + + hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */ +} +#endif +static int is_accessary_pluged_in(void) +{ + int type = 0; + int clk1 = 0, dat1 = 0, clk2 = 0, dat2 = 0, clk3 = 0, dat3 = 0; + + /* Step1: save H2W_CLK and H2W_DAT */ + /* Delay 10ms for pin stable. */ + msleep(10); + clk1 = gpio_get_value(hi->h2w_clk); + dat1 = gpio_get_value(hi->h2w_data); + + /* + * Step2: set GPIO_CABLE_IN1 as output high and GPIO_CABLE_IN2 as + * input + */ + gpio_direction_output(hi->cable_in1, 1); + gpio_direction_input(hi->cable_in2); + /* Delay 10ms for pin stable. */ + msleep(10); + /* Step 3: save H2W_CLK and H2W_DAT */ + clk2 = gpio_get_value(hi->h2w_clk); + dat2 = gpio_get_value(hi->h2w_data); + + /* + * Step 4: set GPIO_CABLE_IN1 as input and GPIO_CABLE_IN2 as output + * high + */ + gpio_direction_input(hi->cable_in1); + gpio_direction_output(hi->cable_in2, 1); + /* Delay 10ms for pin stable. */ + msleep(10); + /* Step 5: save H2W_CLK and H2W_DAT */ + clk3 = gpio_get_value(hi->h2w_clk); + dat3 = gpio_get_value(hi->h2w_data); + + /* Step 6: set both GPIO_CABLE_IN1 and GPIO_CABLE_IN2 as input */ + gpio_direction_input(hi->cable_in1); + gpio_direction_input(hi->cable_in2); + + H2WI("(%d,%d) (%d,%d) (%d,%d)", + clk1, dat1, clk2, dat2, clk3, dat3); + + if ((clk1 == 0) && (dat1 == 1) && + (clk2 == 0) && (dat2 == 1) && + (clk3 == 0) && (dat3 == 1)) + type = H2W_HTC_HEADSET; + else if ((clk1 == 0) && (dat1 == 0) && + (clk2 == 0) && (dat2 == 0) && + (clk3 == 0) && (dat3 == 0)) + type = NORMAL_HEARPHONE; + else if ((clk1 == 0) && (dat1 == 0) && + (clk2 == 1) && (dat2 == 0) && + (clk3 == 0) && (dat3 == 1)) + type = H2W_DEVICE; + else if ((clk1 == 0) && (dat1 == 0) && + (clk2 == 1) && (dat2 == 1) && + (clk3 == 1) && (dat3 == 1)) + type = H2W_USB_CRADLE; + else if ((clk1 == 0) && (dat1 == 1) && + (clk2 == 1) && (dat2 == 1) && + (clk3 == 0) && (dat3 == 1)) + type = H2W_UART_DEBUG; + else + type = H2W_NO_DEVICE; + + return type; +} + + +static void detection_work(struct work_struct *work) +{ + unsigned long irq_flags; + int type; + + H2W_DBG(""); + + if (gpio_get_value(hi->cable_in1) != 0) { + /* Headset not plugged in */ + if (switch_get_state(&hi->sdev) != H2W_NO_DEVICE) + remove_headset(); + return; + } + + /* Something plugged in, lets make sure its a headset */ + + /* Switch CPLD to GPIO to do detection */ + hi->config_cpld(H2W_GPIO); + + /* Disable headset interrupt while detecting.*/ + local_irq_save(irq_flags); + disable_irq(hi->irq); + local_irq_restore(irq_flags); + + /* Something plugged in, lets make sure its a headset */ + type = is_accessary_pluged_in(); + + /* Restore IRQs */ + local_irq_save(irq_flags); + enable_irq(hi->irq); + local_irq_restore(irq_flags); + + insert_headset(type); +} + +void headset_button_event(int is_press) +{ + if (!is_press) { + if (hi->ignore_btn) + hi->ignore_btn = 0; + else if (atomic_read(&hi->btn_state)) + button_released(); + } else { + if (!hi->ignore_btn && !atomic_read(&hi->btn_state)) + button_pressed(); + } +} + +static enum hrtimer_restart button_35mm_event_timer_func(struct hrtimer *data) +{ + if (gpio_get_value(hi->headset_mic_35mm)) { + headset_button_event(1); + /* 10 ms */ + hi->btn35mm_debounce_time = ktime_set(0, 10000000); + } else { + headset_button_event(0); + /* 100 ms */ + hi->btn35mm_debounce_time = ktime_set(0, 100000000); + } + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart button_event_timer_func(struct hrtimer *data) +{ + int key, press, keyname, h2w_key = 1; + + H2W_DBG(""); + + if (switch_get_state(&hi->sdev) & BIT_HEADSET) { + switch (hi->htc_headset_flag) { + case H2W_HTC_HEADSET: + if (!gpio_get_value(hi->cable_in2)) + headset_button_event(1); /* press */ + else + headset_button_event(0); + break; + case H2W_DEVICE: + if ((hi->get_dat() == 1) && (hi->get_clk() == 1)) { + /* Don't do anything because H2W pull out. */ + H2WE("Remote Control pull out.\n"); + } else { + key = h2w_get_fnkey(); + press = (key > 0x7F) ? 0 : 1; + keyname = key & 0x7F; + /* H2WI("key = %d, press = %d, + keyname = %d \n", + key, press, keyname); */ + switch (keyname) { + case H2W_KEY_PLAY: + H2WI("H2W_KEY_PLAY"); + key = KEY_PLAYPAUSE; + break; + case H2W_KEY_FORWARD: + H2WI("H2W_KEY_FORWARD"); + key = KEY_NEXTSONG; + break; + case H2W_KEY_BACKWARD: + H2WI("H2W_KEY_BACKWARD"); + key = KEY_PREVIOUSSONG; + break; + case H2W_KEY_VOLUP: + H2WI("H2W_KEY_VOLUP"); + key = KEY_VOLUMEUP; + break; + case H2W_KEY_VOLDOWN: + H2WI("H2W_KEY_VOLDOWN"); + key = KEY_VOLUMEDOWN; + break; + case H2W_KEY_PICKUP: + H2WI("H2W_KEY_PICKUP"); + key = KEY_SEND; + break; + case H2W_KEY_HANGUP: + H2WI("H2W_KEY_HANGUP"); + key = KEY_END; + break; + case H2W_KEY_MUTE: + H2WI("H2W_KEY_MUTE"); + key = KEY_MUTE; + break; + case H2W_KEY_HOLD: + H2WI("H2W_KEY_HOLD"); + break; + default: + H2WI("default"); + h2w_key = 0; + } + if (h2w_key) { + if (press) + H2WI("Press\n"); + else + H2WI("Release\n"); + input_report_key(hi->input, key, press); + } + } + break; + } /* end switch */ + } + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart detect_event_timer_func(struct hrtimer *data) +{ + H2W_DBG(""); + + queue_work(g_detection_work_queue, &g_detection_work); + return HRTIMER_NORESTART; +} + +static irqreturn_t detect_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + H2W_DBG(""); + set_irq_type(hi->irq_btn, IRQF_TRIGGER_LOW); + do { + value1 = gpio_get_value(hi->cable_in1); + set_irq_type(hi->irq, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(hi->cable_in1); + } while (value1 != value2 && retry_limit-- > 0); + + H2W_DBG("value2 = %d (%d retries), device=%d", + value2, (10-retry_limit), switch_get_state(&hi->sdev)); + + if ((switch_get_state(&hi->sdev) == H2W_NO_DEVICE) ^ value2) { + if (switch_get_state(&hi->sdev) & BIT_HEADSET) + hi->ignore_btn = 1; + /* Do the rest of the work in timer context */ + hrtimer_start(&hi->timer, hi->debounce_time, HRTIMER_MODE_REL); + } + + return IRQ_HANDLED; +} + +static irqreturn_t button_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + H2W_DBG(""); + do { + value1 = gpio_get_value(hi->cable_in2); + if (hi->htc_headset_flag != H2W_DEVICE) + set_irq_type(hi->irq_btn, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(hi->cable_in2); + } while (value1 != value2 && retry_limit-- > 0); + + H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit)); + + hrtimer_start(&hi->btn_timer, hi->btn_debounce_time, HRTIMER_MODE_REL); + + return IRQ_HANDLED; +} + +static irqreturn_t button_35mm_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + H2W_DBG(""); + do { + value1 = gpio_get_value(hi->headset_mic_35mm); + set_irq_type(hi->irq_btn_35mm, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(hi->headset_mic_35mm); + } while (value1 != value2 && retry_limit-- > 0); + + H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit)); + + hrtimer_start(&hi->btn35mm_timer, + hi->btn35mm_debounce_time, + HRTIMER_MODE_REL); + + return IRQ_HANDLED; + +} + +#if defined(CONFIG_DEBUG_FS) +static int h2w_debug_set(void *data, u64 val) +{ + mutex_lock(&hi->mutex_lock); + switch_set_state(&hi->sdev, (int)val); + mutex_unlock(&hi->mutex_lock); + return 0; +} + +static int h2w_debug_get(void *data, u64 *val) +{ + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(h2w_debug_fops, h2w_debug_get, h2w_debug_set, "%llu\n"); +static int __init h2w_debug_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("h2w", 0); + if (IS_ERR(dent)) + return PTR_ERR(dent); + + debugfs_create_file("state", 0644, dent, NULL, &h2w_debug_fops); + + return 0; +} + +device_initcall(h2w_debug_init); +#endif + +static int h2w_probe(struct platform_device *pdev) +{ + int ret; + struct h2w_platform_data *pdata = pdev->dev.platform_data; + + printk(KERN_INFO "H2W: Registering H2W (headset) driver\n"); + hi = kzalloc(sizeof(struct h2w_info), GFP_KERNEL); + if (!hi) + return -ENOMEM; + + atomic_set(&hi->btn_state, 0); + hi->ignore_btn = 0; + + hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */ + hi->btn_debounce_time = ktime_set(0, 10000000); /* 10 ms */ + hi->btn35mm_debounce_time = ktime_set(0, 50000000); /* 50 ms */ + + hi->htc_headset_flag = 0; + hi->btn_11pin_35mm_flag = 0; + hi->cable_in1 = pdata->cable_in1; + hi->cable_in2 = pdata->cable_in2; + hi->h2w_clk = pdata->h2w_clk; + hi->h2w_data = pdata->h2w_data; + hi->debug_uart = pdata->debug_uart; + hi->headset_mic_35mm = pdata->headset_mic_35mm; + hi->config_cpld = pdata->config_cpld; + hi->init_cpld = pdata->init_cpld; + hi->set_dat = pdata->set_dat; + hi->set_clk = pdata->set_clk; + hi->set_dat_dir = pdata->set_dat_dir; + hi->set_clk_dir = pdata->set_clk_dir; + hi->get_dat = pdata->get_dat; + hi->get_clk = pdata->get_clk; + hi->speed = H2W_50KHz; + /* obtain needed VREGs */ + if (pdata->power_name) + hi->vreg_h2w = vreg_get(0, pdata->power_name); + + mutex_init(&hi->mutex_lock); + + hi->sdev.name = "h2w"; + hi->sdev.print_name = h2w_print_name; + + ret = switch_dev_register(&hi->sdev); + if (ret < 0) + goto err_switch_dev_register; + + g_detection_work_queue = create_workqueue("detection"); + if (g_detection_work_queue == NULL) { + ret = -ENOMEM; + goto err_create_work_queue; + } + + if (hi->headset_mic_35mm) { + ret = gpio_request(hi->headset_mic_35mm, "3.5mm_mic_detect"); + if (ret < 0) + goto err_request_35mm_mic_detect_gpio; + + ret = gpio_direction_input(hi->headset_mic_35mm); + if (ret < 0) + goto err_set_35mm_mic_detect_gpio; + + hi->irq_btn_35mm = gpio_to_irq(hi->headset_mic_35mm); + if (hi->irq_btn_35mm < 0) { + ret = hi->irq_btn_35mm; + goto err_request_btn_35mm_irq; + } + set_irq_flags(hi->irq_btn_35mm, IRQF_VALID | IRQF_NOAUTOEN); + ret = request_irq(hi->irq_btn_35mm, + button_35mm_irq_handler, + IRQF_TRIGGER_HIGH, "35mm_button", NULL); + if (ret < 0) + goto err_request_btn_35mm_irq; + } + + ret = gpio_request(hi->cable_in1, "h2w_detect"); + if (ret < 0) + goto err_request_detect_gpio; + + ret = gpio_request(hi->cable_in2, "h2w_button"); + if (ret < 0) + goto err_request_button_gpio; + + ret = gpio_direction_input(hi->cable_in1); + if (ret < 0) + goto err_set_detect_gpio; + + ret = gpio_direction_input(hi->cable_in2); + if (ret < 0) + goto err_set_button_gpio; + + hi->irq = gpio_to_irq(hi->cable_in1); + if (hi->irq < 0) { + ret = hi->irq; + goto err_get_h2w_detect_irq_num_failed; + } + + hi->irq_btn = gpio_to_irq(hi->cable_in2); + if (hi->irq_btn < 0) { + ret = hi->irq_btn; + goto err_get_button_irq_num_failed; + } + + /* Set CPLD MUX to H2W <-> CPLD GPIO */ + hi->init_cpld(); + + hrtimer_init(&hi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hi->timer.function = detect_event_timer_func; + hrtimer_init(&hi->btn_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hi->btn_timer.function = button_event_timer_func; + hrtimer_init(&hi->btn35mm_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hi->btn35mm_timer.function = button_35mm_event_timer_func; + + ret = request_irq(hi->irq, detect_irq_handler, + IRQF_TRIGGER_LOW, "h2w_detect", NULL); + if (ret < 0) + goto err_request_detect_irq; + + /* Disable button until plugged in */ + set_irq_flags(hi->irq_btn, IRQF_VALID | IRQF_NOAUTOEN); + ret = request_irq(hi->irq_btn, button_irq_handler, + IRQF_TRIGGER_LOW, "h2w_button", NULL); + if (ret < 0) + goto err_request_h2w_headset_button_irq; + + ret = set_irq_wake(hi->irq, 1); + if (ret < 0) + goto err_request_input_dev; + + ret = set_irq_wake(hi->irq_btn, 1); + if (ret < 0) + goto err_request_input_dev; + + + + hi->input = input_allocate_device(); + if (!hi->input) { + ret = -ENOMEM; + goto err_request_input_dev; + } + + hi->input->name = "h2w headset"; + set_bit(EV_SYN, hi->input->evbit); + set_bit(EV_KEY, hi->input->evbit); + set_bit(KEY_MEDIA, hi->input->keybit); + set_bit(KEY_NEXTSONG, hi->input->keybit); + set_bit(KEY_PLAYPAUSE, hi->input->keybit); + set_bit(KEY_PREVIOUSSONG, hi->input->keybit); + set_bit(KEY_MUTE, hi->input->keybit); + set_bit(KEY_VOLUMEUP, hi->input->keybit); + set_bit(KEY_VOLUMEDOWN, hi->input->keybit); + set_bit(KEY_END, hi->input->keybit); + set_bit(KEY_SEND, hi->input->keybit); + + ret = input_register_device(hi->input); + if (ret < 0) + goto err_register_input_dev; + + return 0; + +err_register_input_dev: + input_free_device(hi->input); +err_request_input_dev: + free_irq(hi->irq_btn, 0); +err_request_h2w_headset_button_irq: + free_irq(hi->irq, 0); +err_request_detect_irq: +err_get_button_irq_num_failed: +err_get_h2w_detect_irq_num_failed: +err_set_button_gpio: +err_set_detect_gpio: + gpio_free(hi->cable_in2); +err_request_button_gpio: + gpio_free(hi->cable_in1); +err_request_detect_gpio: + if (hi->headset_mic_35mm) + free_irq(hi->irq_btn_35mm, 0); +err_request_btn_35mm_irq: +err_set_35mm_mic_detect_gpio: + if (hi->headset_mic_35mm) + gpio_free(hi->headset_mic_35mm); +err_request_35mm_mic_detect_gpio: + destroy_workqueue(g_detection_work_queue); +err_create_work_queue: + switch_dev_unregister(&hi->sdev); +err_switch_dev_register: + printk(KERN_ERR "H2W: Failed to register driver\n"); + + return ret; +} + +static int h2w_remove(struct platform_device *pdev) +{ + H2W_DBG(""); + if (switch_get_state(&hi->sdev)) + remove_headset(); + input_unregister_device(hi->input); + gpio_free(hi->cable_in2); + gpio_free(hi->cable_in1); + free_irq(hi->irq_btn, 0); + free_irq(hi->irq, 0); + if (hi->headset_mic_35mm) { + gpio_free(hi->headset_mic_35mm); + free_irq(hi->irq_btn_35mm, 0); + } + destroy_workqueue(g_detection_work_queue); + switch_dev_unregister(&hi->sdev); + + return 0; +} + + +static struct platform_driver h2w_driver = { + .probe = h2w_probe, + .remove = h2w_remove, + .driver = { + .name = "h2w", + .owner = THIS_MODULE, + }, +}; + +static int __init h2w_init(void) +{ + H2W_DBG(""); + return platform_driver_register(&h2w_driver); +} + +static void __exit h2w_exit(void) +{ + platform_driver_unregister(&h2w_driver); +} + +module_init(h2w_init); +module_exit(h2w_exit); + +MODULE_AUTHOR("Laurence Chen "); +MODULE_DESCRIPTION("HTC 2 Wire detection driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_headset_gpio.c b/arch/arm/mach-msm/htc_headset_gpio.c new file mode 100644 index 0000000000000..c8fa9c4675927 --- /dev/null +++ b/arch/arm/mach-msm/htc_headset_gpio.c @@ -0,0 +1,271 @@ +/* + * + * /arch/arm/mach-msm/htc_headset_gpio.c + * + * HTC GPIO headset detection driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "HS_GPIO" + +/* #define DEBUG */ + +#ifdef DEBUG +#define AJ_DBG(fmt, arg...) \ + printk(KERN_INFO "[Audio Jack] %s " fmt "\r\n", __func__, ## arg) +#else +#define AJ_DBG(fmt, arg...) do {} while (0) +#endif + +struct audio_jack_info { + unsigned int irq_jack; + int audio_jack_detect; + int key_enable_gpio; + int mic_select_gpio; + int audio_jack_flag; + + struct hrtimer detection_timer; + ktime_t debounce_time; + + struct work_struct work; + + spinlock_t spin_lock; + + struct wake_lock audiojack_wake_lock; +}; + +static struct audio_jack_info *pjack_info; + +void hs_gpio_key_enable(int enable) +{ + DBG_MSG(); + + if (pjack_info->key_enable_gpio) + gpio_set_value(pjack_info->key_enable_gpio, enable); +} + +void hs_gpio_mic_select(int enable) +{ + DBG_MSG(); + + if (pjack_info->mic_select_gpio) + gpio_set_value(pjack_info->mic_select_gpio, enable); +} + +static irqreturn_t detect_irq_handler(int irq, void *dev_id) +{ + int value1, value2; + int retry_limit = 10; + + hs_notify_hpin_irq(); + + AJ_DBG(""); + + do { + value1 = gpio_get_value(pjack_info->audio_jack_detect); + set_irq_type(pjack_info->irq_jack, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(pjack_info->audio_jack_detect); + } while (value1 != value2 && retry_limit-- > 0); + + AJ_DBG("value2 = %d (%d retries)", value2, (10-retry_limit)); + + if ((pjack_info->audio_jack_flag == 0) ^ value2) { + wake_lock_timeout(&pjack_info->audiojack_wake_lock, 4*HZ); + + /* Do the rest of the work in timer context */ + hrtimer_start(&pjack_info->detection_timer, + pjack_info->debounce_time, HRTIMER_MODE_REL); + } + + return IRQ_HANDLED; +} + +static enum hrtimer_restart detect_35mm_event_timer_func(struct hrtimer *data) +{ + int state; + + AJ_DBG(""); + state = !gpio_get_value(pjack_info->audio_jack_detect); + if (pjack_info->audio_jack_flag != state) { + pjack_info->audio_jack_flag = state; + schedule_work(&pjack_info->work); + } + + return HRTIMER_NORESTART; +} + +static void audiojack_work_func(struct work_struct *work) +{ + int is_insert; + unsigned long flags = 0; + + spin_lock_irqsave(&pjack_info->spin_lock, flags); + is_insert = pjack_info->audio_jack_flag; + spin_unlock_irqrestore(&pjack_info->spin_lock, flags); + + htc_35mm_remote_notify_insert_ext_headset(is_insert); + + if (is_insert) + pjack_info->debounce_time = ktime_set(0, 200000000); + else + pjack_info->debounce_time = ktime_set(0, 500000000); +} + +static void hs_gpio_register(void) +{ + struct headset_notifier notifier; + + if (pjack_info->mic_select_gpio) { + notifier.id = HEADSET_REG_MIC_SELECT; + notifier.func = hs_gpio_mic_select; + headset_notifier_register(¬ifier); + } + + if (pjack_info->key_enable_gpio) { + notifier.id = HEADSET_REG_KEY_ENABLE; + notifier.func = hs_gpio_key_enable; + headset_notifier_register(¬ifier); + } +} + +static int audiojack_probe(struct platform_device *pdev) +{ + int ret; + struct htc_headset_gpio_platform_data *pdata = pdev->dev.platform_data; + + SYS_MSG("++++++++++++++++++++"); + + pjack_info = kzalloc(sizeof(struct audio_jack_info), GFP_KERNEL); + if (!pjack_info) + return -ENOMEM; + + pjack_info->audio_jack_detect = pdata->hpin_gpio; + pjack_info->key_enable_gpio = pdata->key_enable_gpio; + pjack_info->mic_select_gpio = pdata->mic_select_gpio; + pjack_info->audio_jack_flag = 0; + + pjack_info->debounce_time = ktime_set(0, 500000000); + hrtimer_init(&pjack_info->detection_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + pjack_info->detection_timer.function = detect_35mm_event_timer_func; + + INIT_WORK(&pjack_info->work, audiojack_work_func); + + spin_lock_init(&pjack_info->spin_lock); + wake_lock_init(&pjack_info->audiojack_wake_lock, + WAKE_LOCK_SUSPEND, "audiojack"); + + if (pjack_info->audio_jack_detect) { + ret = gpio_request(pjack_info->audio_jack_detect, + "3.5mm_detect"); + if (ret < 0) + goto err_request_detect_gpio; + + ret = gpio_direction_input(pjack_info->audio_jack_detect); + if (ret < 0) + goto err_set_detect_gpio; + + pjack_info->irq_jack = + gpio_to_irq(pjack_info->audio_jack_detect); + if (pjack_info->irq_jack < 0) { + ret = pjack_info->irq_jack; + goto err_request_detect_irq; + } + + ret = request_irq(pjack_info->irq_jack, + detect_irq_handler, + IRQF_TRIGGER_LOW, "35mm_headset", NULL); + if (ret < 0) + goto err_request_detect_irq; + + ret = set_irq_wake(pjack_info->irq_jack, 1); + if (ret < 0) + goto err_set_irq_wake; + } + + hs_gpio_register(); + + SYS_MSG("--------------------"); + + return 0; + +err_set_irq_wake: + if (pjack_info->audio_jack_detect) + free_irq(pjack_info->irq_jack, 0); +err_request_detect_irq: +err_set_detect_gpio: + if (pjack_info->audio_jack_detect) + gpio_free(pjack_info->audio_jack_detect); +err_request_detect_gpio: + printk(KERN_ERR "Audiojack: Failed in audiojack_probe\n"); + + return ret; +} + +static int audiojack_remove(struct platform_device *pdev) +{ + if (pjack_info->audio_jack_detect) + free_irq(pjack_info->irq_jack, 0); + + if (pjack_info->audio_jack_detect) + gpio_free(pjack_info->audio_jack_detect); + + return 0; +} + +static struct platform_driver audiojack_driver = { + .probe = audiojack_probe, + .remove = audiojack_remove, + .driver = { + .name = "HTC_HEADSET_GPIO", + .owner = THIS_MODULE, + }, +}; + +static int __init audiojack_init(void) +{ + return platform_driver_register(&audiojack_driver); +} + +static void __exit audiojack_exit(void) +{ + platform_driver_unregister(&audiojack_driver); +} + +module_init(audiojack_init); +module_exit(audiojack_exit); + +MODULE_DESCRIPTION("HTC GPIO headset detection driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_headset_mgr.c b/arch/arm/mach-msm/htc_headset_mgr.c new file mode 100644 index 0000000000000..e16f46a158882 --- /dev/null +++ b/arch/arm/mach-msm/htc_headset_mgr.c @@ -0,0 +1,925 @@ +/* + * + * /arch/arm/mach-msm/htc_headset_mgr.c + * + * HTC headset manager driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#define DRIVER_NAME "HS_MGR" + +/* #define CONFIG_DEBUG_H2W */ + +/*Delay 200ms when 11pin device plug in*/ +#define H2W_DETECT_DELAY msecs_to_jiffies(200) +#define BUTTON_H2W_DELAY msecs_to_jiffies(10) + +#define H2WI(fmt, arg...) \ + printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#define H2WE(fmt, arg...) \ + printk(KERN_ERR "[H2W] %s " fmt "\r\n", __func__, ## arg) + +#ifdef CONFIG_DEBUG_H2W +#define H2W_DBG(fmt, arg...) \ + printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg) +#else +#define H2W_DBG(fmt, arg...) do {} while (0) +#endif + +static struct workqueue_struct *detect_wq; + +static void insert_35mm_do_work(struct work_struct *work); +static DECLARE_WORK(insert_35mm_work, insert_35mm_do_work); +static void remove_35mm_do_work(struct work_struct *work); +static DECLARE_WORK(remove_35mm_work, remove_35mm_do_work); + +static struct workqueue_struct *button_wq; + +static void button_35mm_do_work(struct work_struct *w); +static DECLARE_DELAYED_WORK(button_35mm_work, button_35mm_do_work); + +static int hs_mgr_rpc_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len); + +static struct msm_rpc_server hs_rpc_server = { + .prog = HS_RPC_SERVER_PROG, + .vers = HS_RPC_SERVER_VERS, + .rpc_call = hs_mgr_rpc_call, +}; + +struct button_work { + struct delayed_work key_work; + int key_code; +}; + +static struct h2w_info *hi; +static struct hs_notifier_func hs_mgr_notifier; + +void hs_notify_hpin_irq(void) +{ + hi->hpin_jiffies = jiffies; + SYS_MSG("HPIN IRQ"); +} + +int hs_hpin_stable(void) +{ + unsigned long last_hpin_jiffies = 0; + unsigned long unstable_jiffies = 1.2 * HZ; + + last_hpin_jiffies = hi->hpin_jiffies; + + if (time_before_eq(jiffies, last_hpin_jiffies + unstable_jiffies)) + return 0; + + return 1; +} + +int headset_notifier_register(struct headset_notifier *notifier) +{ + if (!notifier->func) { + SYS_MSG("NULL register function"); + return 0; + } + + switch (notifier->id) { + case HEADSET_REG_REMOTE_ADC: + SYS_MSG("Register REMOTE_ADC notifier"); + hs_mgr_notifier.remote_adc = notifier->func; + break; + case HEADSET_REG_RPC_KEY: + SYS_MSG("Register RPC_KEY notifier"); + hs_mgr_notifier.rpc_key = notifier->func; + break; + case HEADSET_REG_MIC_STATUS: + SYS_MSG("Register MIC_STATUS notifier"); + hs_mgr_notifier.mic_status = notifier->func; + break; + case HEADSET_REG_MIC_BIAS: + SYS_MSG("Register MIC_BIAS notifier"); + hs_mgr_notifier.mic_bias_enable = notifier->func; + break; + case HEADSET_REG_MIC_SELECT: + SYS_MSG("Register MIC_SELECT notifier"); + hs_mgr_notifier.mic_select = notifier->func; + break; + case HEADSET_REG_KEY_INT_ENABLE: + SYS_MSG("Register KEY_INT_ENABLE notifier"); + hs_mgr_notifier.key_int_enable = notifier->func; + break; + case HEADSET_REG_KEY_ENABLE: + SYS_MSG("Register KEY_ENABLE notifier"); + hs_mgr_notifier.key_enable = notifier->func; + break; + default: + SYS_MSG("Unknown register ID"); + return 0; + } + + return 1; +} + +static int hs_mgr_rpc_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + struct hs_rpc_server_args_key *args_key; + + DBG_MSG(""); + + switch (req->procedure) { + case HS_RPC_SERVER_PROC_NULL: + SYS_MSG("RPC_SERVER_NULL"); + break; + case HS_RPC_SERVER_PROC_KEY: + args_key = (struct hs_rpc_server_args_key *)(req + 1); + args_key->adc = be32_to_cpu(args_key->adc); + SYS_MSG("RPC_SERVER_KEY ADC = %u (0x%X)", + args_key->adc, args_key->adc); + if (hs_mgr_notifier.rpc_key) + hs_mgr_notifier.rpc_key(args_key->adc); + else + SYS_MSG("RPC_KEY notify function doesn't exist"); + break; + default: + SYS_MSG("Unknown RPC procedure"); + return -EINVAL; + } + + return 0; +} + +static ssize_t h2w_print_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "Headset\n"); +} + +void button_pressed(int type) +{ + printk(KERN_INFO "[H2W] button_pressed %d\n", type); + atomic_set(&hi->btn_state, type); + input_report_key(hi->input, type, 1); + input_sync(hi->input); +} + +void button_released(int type) +{ + printk(KERN_INFO "[H2W] button_released %d\n", type); + atomic_set(&hi->btn_state, 0); + input_report_key(hi->input, type, 0); + input_sync(hi->input); +} + +void headset_button_event(int is_press, int type) +{ + if (!hs_hpin_stable()) { + H2WI("The HPIN is unstable, SKIP THE BUTTON EVENT."); + return; + } + + if (!is_press) { + if (hi->ignore_btn) + hi->ignore_btn = 0; + else + button_released(type); + } else { + if (!hi->ignore_btn && !atomic_read(&hi->btn_state)) + button_pressed(type); + } +} + +static void set_35mm_hw_state(int state) +{ + if (hi->mic_bias_state != state && hs_mgr_notifier.mic_bias_enable) { + hs_mgr_notifier.mic_bias_enable(state); + hi->mic_bias_state = state; + if (state) /* Wait for MIC bias stable */ + msleep(HS_DELAY_MIC_BIAS); + } + + if (hs_mgr_notifier.mic_select) + hs_mgr_notifier.mic_select(state); + + if (hs_mgr_notifier.key_enable) + hs_mgr_notifier.key_enable(state); + + if (hs_mgr_notifier.key_int_enable) + hs_mgr_notifier.key_int_enable(state); +} + +static void insert_11pin_35mm(int *state) +{ + int mic = HEADSET_NO_MIC; + + SYS_MSG("Insert USB 3.5mm headset"); + set_35mm_hw_state(1); + + if (hs_mgr_notifier.mic_status) + mic = hs_mgr_notifier.mic_status(); + + if (mic == HEADSET_NO_MIC) { + /* without microphone */ + *state |= BIT_HEADSET_NO_MIC; + hi->h2w_35mm_status = HTC_35MM_NO_MIC; + printk(KERN_INFO "11pin_3.5mm without microphone\n"); + } else { /* with microphone */ + *state |= BIT_HEADSET; + hi->h2w_35mm_status = HTC_35MM_MIC; + printk(KERN_INFO "11pin_3.5mm with microphone\n"); + } +} + +static void remove_11pin_35mm(void) +{ + SYS_MSG("Remove USB 3.5mm headset"); + + set_35mm_hw_state(0); + + if (atomic_read(&hi->btn_state)) + button_released(atomic_read(&hi->btn_state)); + hi->h2w_35mm_status = HTC_35MM_UNPLUG; +} + +static void button_35mm_do_work(struct work_struct *w) +{ + int key; + struct button_work *work; + + work = container_of(w, struct button_work, key_work.work); + hi->key_level_flag = work->key_code; + + if (!hi->is_ext_insert && !hi->h2w_35mm_status) { + kfree(work); + H2WI("3.5mm headset is plugged out, skip report key event"); + return; + } + + if (hi->key_level_flag) { + switch (hi->key_level_flag) { + case 1: + H2WI("3.5mm RC: Play Pressed"); + key = HS_MGR_KEYCODE_MEDIA; + break; + case 2: + H2WI("3.5mm RC: BACKWARD Pressed"); + key = HS_MGR_KEYCODE_BACKWARD; + break; + case 3: + H2WI("3.5mm RC: FORWARD Pressed"); + key = HS_MGR_KEYCODE_FORWARD; + break; + default: + H2WI("3.5mm RC: WRONG Button Pressed"); + return; + } + headset_button_event(1, key); + } else { /* key release */ + if (atomic_read(&hi->btn_state)) + headset_button_event(0, atomic_read(&hi->btn_state)); + else + H2WI("3.5mm RC: WRONG Button Release"); + } + + wake_lock_timeout(&hi->headset_wake_lock, 1.5 * HZ); + + kfree(work); +} + +static void enable_metrico_headset(int enable) +{ + if (enable && !hi->metrico_status) { +#if 0 + enable_mos_test(1); +#endif + hi->metrico_status = 1; + printk(KERN_INFO "Enable metrico headset\n"); + } + + if (!enable && hi->metrico_status) { +#if 0 + enable_mos_test(0); +#endif + hi->metrico_status = 0; + printk(KERN_INFO "Disable metrico headset\n"); + } +} + +static void remove_35mm_do_work(struct work_struct *work) +{ + int state; + + wake_lock_timeout(&hi->headset_wake_lock, 2.5 * HZ); + + H2W_DBG(""); + /*To solve the insert, remove, insert headset problem*/ + if (time_before_eq(jiffies, hi->insert_jiffies)) + msleep(800); + if (hi->is_ext_insert) { + H2WI("Skip 3.5mm headset plug out!!!"); + return; + } + + SYS_MSG("Remove 3.5mm headset"); + set_35mm_hw_state(0); + + /* For HW Metrico lab test */ + if (hi->metrico_status) + enable_metrico_headset(0); + + if (atomic_read(&hi->btn_state)) + button_released(atomic_read(&hi->btn_state)); + hi->ext_35mm_status = HTC_35MM_UNPLUG; + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + + if (hi->usb_dev_type == USB_HEADSET) { + hi->usb_dev_status = STATUS_CONNECTED_ENABLED; + state &= ~BIT_HEADSET; + state |= BIT_HEADSET_NO_MIC; + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); + } else if (hi->usb_dev_type == H2W_TVOUT) { + state &= ~BIT_HEADSET; + state |= BIT_HEADSET_NO_MIC; + switch_set_state(&hi->sdev, state); +#if 0 + } else if (hi->cable_in1 && !gpio_get_value(hi->cable_in1)) { + state &= ~BIT_35MM_HEADSET; + switch_set_state(&hi->sdev, state); + queue_delayed_work(detect_wq, &detect_h2w_work, + HS_DELAY_ZERO_JIFFIES); +#endif + } else { + state &= ~(BIT_HEADSET | BIT_HEADSET_NO_MIC); + switch_set_state(&hi->sdev, state); + } + + mutex_unlock(&hi->mutex_lock); +} + +static void insert_35mm_do_work(struct work_struct *work) +{ + int state; + int i, mic1, mic2; + + H2W_DBG(""); + hi->insert_jiffies = jiffies + HZ; + + wake_lock_timeout(&hi->headset_wake_lock, 1.5 * HZ); + +#if 0 + if (hi->usb_dev_type && hi->is_ext_insert && + hi->usb_dev_type != H2W_TVOUT && hi->usb_dev_type != USB_HEADSET) + remove_headset(); + else if (hi->usb_dev_type == USB_HEADSET) + hi->usb_dev_status = STATUS_CONNECTED_DISABLED; +#endif + + if (hi->usb_dev_type == USB_HEADSET) + hi->usb_dev_status = STATUS_CONNECTED_DISABLED; + + if (hi->is_ext_insert) { + SYS_MSG("Insert 3.5mm headset"); + set_35mm_hw_state(1); + hi->ignore_btn = 0; + + mic1 = mic2 = HEADSET_NO_MIC; + if (hs_mgr_notifier.mic_status) { + if (hi->ext_35mm_status == HTC_35MM_NO_MIC || + hi->h2w_35mm_status == HTC_35MM_NO_MIC) + for (i = 0; i < 10; i++) { + mic1 = hs_mgr_notifier.mic_status(); + msleep(HS_DELAY_MIC_DETECT); + mic2 = hs_mgr_notifier.mic_status(); + if (mic1 == mic2) + break; + } + else + mic1 = mic2 = hs_mgr_notifier.mic_status(); + } + + /* For HW Metrico lab test */ + if (mic2 == HEADSET_METRICO && !hi->metrico_status) + enable_metrico_headset(1); + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + state &= ~(BIT_HEADSET | BIT_HEADSET_NO_MIC); + if (mic2 == HEADSET_NO_MIC || mic1 != mic2) { + state |= BIT_HEADSET_NO_MIC; + printk(KERN_INFO "3.5mm_headset without microphone\n"); + } else { + state |= BIT_HEADSET; + printk(KERN_INFO "3.5mm_headset with microphone\n"); + } + + + switch_set_state(&hi->sdev, state); + if (state & BIT_HEADSET_NO_MIC) + hi->ext_35mm_status = HTC_35MM_NO_MIC; + else + hi->ext_35mm_status = HTC_35MM_MIC; + mutex_unlock(&hi->mutex_lock); + } +} + +int htc_35mm_remote_notify_insert_ext_headset(int insert) +{ + if (hi) { + mutex_lock(&hi->mutex_lock); + hi->is_ext_insert = insert; + mutex_unlock(&hi->mutex_lock); + + H2WI(" %d", hi->is_ext_insert); + if (!hi->is_ext_insert) + queue_work(detect_wq, &remove_35mm_work); + else + queue_work(detect_wq, &insert_35mm_work); + } + return 1; +} + +int htc_35mm_remote_notify_microp_ready(void) +{ + if (hi) { + if (hi->is_ext_insert) + queue_work(detect_wq, &insert_35mm_work); +#if 0 + if (hi->h2w_35mm_status) + insert_headset(NORMAL_HEARPHONE); +#endif + } + return 1; +} + +int htc_35mm_remote_notify_button_status(int key_level) +{ + struct button_work *work; + + if (hi->ext_35mm_status == HTC_35MM_NO_MIC || + hi->h2w_35mm_status == HTC_35MM_NO_MIC) { + SYS_MSG("MIC re-detection"); + msleep(HS_DELAY_MIC_DETECT); + queue_work(detect_wq, &insert_35mm_work); + } else if (!hs_hpin_stable()) { + H2WI("The HPIN is unstable, SKIP THE BUTTON EVENT."); + return 1; + } else { + work = kzalloc(sizeof(struct button_work), GFP_KERNEL); + if (!work) { + printk(KERN_INFO "Failed to allocate button memory\n"); + return 1; + } + work->key_code = key_level; + INIT_DELAYED_WORK(&work->key_work, button_35mm_do_work); + queue_delayed_work(button_wq, &work->key_work, + HS_JIFFIES_BUTTON); + } + + return 1; +} + +static void usb_headset_detect(int type) +{ + int state; + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + + switch (type) { + case NO_DEVICE: + if (hi->usb_dev_type == USB_HEADSET) { + printk(KERN_INFO "Remove USB headset\n"); + hi->usb_dev_type = NO_DEVICE; + hi->usb_dev_status = STATUS_DISCONNECTED; + state &= ~BIT_USB_HEADSET; + if (!hi->is_ext_insert) + state &= ~BIT_HEADSET_NO_MIC; + } + break; + case USB_HEADSET: + printk(KERN_INFO "Insert USB headset\n"); + hi->usb_dev_type = USB_HEADSET; + if (hi->is_ext_insert) { + printk(KERN_INFO "Disable USB headset\n"); + hi->usb_dev_status = STATUS_CONNECTED_DISABLED; + state |= BIT_USB_HEADSET; + } else { + printk(KERN_INFO "Enable USB headset\n"); + hi->usb_dev_status = STATUS_CONNECTED_ENABLED; + state |= (BIT_USB_HEADSET | BIT_HEADSET_NO_MIC); + } + break; + default: + printk(KERN_INFO "Unknown headset type\n"); + } + + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); +} + +void headset_ext_detect(int type) +{ + switch (type) { + case NO_DEVICE: + if (hi->usb_dev_type == USB_HEADSET) + usb_headset_detect(type); + break; + case USB_HEADSET: + usb_headset_detect(type); + break; + default: + printk(KERN_INFO "Unknown headset type\n"); + } +} + +int switch_send_event(unsigned int bit, int on) +{ + unsigned long state; + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + state &= ~(bit); + + if (on) + state |= bit; + + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); + return 0; +} + +static ssize_t tty_flag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *s = buf; + mutex_lock(&hi->mutex_lock); + s += sprintf(s, "%d\n", hi->tty_enable_flag); + mutex_unlock(&hi->mutex_lock); + return (s - buf); +} + +static ssize_t tty_flag_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state; + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + state &= ~(BIT_TTY_FULL | BIT_TTY_VCO | BIT_TTY_HCO); + + if (count == (strlen("enable") + 1) && + strncmp(buf, "enable", strlen("enable")) == 0) { + hi->tty_enable_flag = 1; + switch_set_state(&hi->sdev, state | BIT_TTY_FULL); + mutex_unlock(&hi->mutex_lock); + printk(KERN_INFO "Enable TTY FULL\n"); + return count; + } + if (count == (strlen("vco_enable") + 1) && + strncmp(buf, "vco_enable", strlen("vco_enable")) == 0) { + hi->tty_enable_flag = 2; + switch_set_state(&hi->sdev, state | BIT_TTY_VCO); + mutex_unlock(&hi->mutex_lock); + printk(KERN_INFO "Enable TTY VCO\n"); + return count; + } + if (count == (strlen("hco_enable") + 1) && + strncmp(buf, "hco_enable", strlen("hco_enable")) == 0) { + hi->tty_enable_flag = 3; + switch_set_state(&hi->sdev, state | BIT_TTY_HCO); + mutex_unlock(&hi->mutex_lock); + printk(KERN_INFO "Enable TTY HCO\n"); + return count; + } + if (count == (strlen("disable") + 1) && + strncmp(buf, "disable", strlen("disable")) == 0) { + hi->tty_enable_flag = 0; + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); + printk(KERN_INFO "Disable TTY\n"); + return count; + } + printk(KERN_ERR "tty_enable_flag_store: invalid argument\n"); + return -EINVAL; +} +static DEVICE_ACCESSORY_ATTR(tty, 0666, tty_flag_show, tty_flag_store); + +static ssize_t fm_flag_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int state; + + mutex_lock(&hi->mutex_lock); + state = switch_get_state(&hi->sdev); + state &= ~(BIT_FM_HEADSET | BIT_FM_SPEAKER); + + if (count == (strlen("fm_headset") + 1) && + strncmp(buf, "fm_headset", strlen("fm_headset")) == 0) { + hi->fm_flag = 1; + state |= BIT_FM_HEADSET; + printk(KERN_INFO "Enable FM HEADSET\n"); + } else if (count == (strlen("fm_speaker") + 1) && + strncmp(buf, "fm_speaker", strlen("fm_speaker")) == 0) { + hi->fm_flag = 2; + state |= BIT_FM_SPEAKER; + printk(KERN_INFO "Enable FM SPEAKER\n"); + } else if (count == (strlen("disable") + 1) && + strncmp(buf, "disable", strlen("disable")) == 0) { + hi->fm_flag = 0 ; + printk(KERN_INFO "Disable FM\n"); + } else { + mutex_unlock(&hi->mutex_lock); + printk(KERN_ERR "fm_enable_flag_store: invalid argument\n"); + return -EINVAL; + } + switch_set_state(&hi->sdev, state); + mutex_unlock(&hi->mutex_lock); + return count; +} + +static ssize_t fm_flag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *s = buf; + char *show_str; + mutex_lock(&hi->mutex_lock); + if (hi->fm_flag == 0) + show_str = "disable"; + if (hi->fm_flag == 1) + show_str = "fm_headset"; + if (hi->fm_flag == 2) + show_str = "fm_speaker"; + + s += sprintf(s, "%s\n", show_str); + mutex_unlock(&hi->mutex_lock); + return (s - buf); +} +static DEVICE_ACCESSORY_ATTR(fm, 0666, fm_flag_show, fm_flag_store); + +static int register_common_headset(struct h2w_info *h2w, int create_attr) +{ + int ret = 0; + hi = h2w; + + hi->htc_accessory_class = class_create(THIS_MODULE, "htc_accessory"); + if (IS_ERR(hi->htc_accessory_class)) { + ret = PTR_ERR(hi->htc_accessory_class); + hi->htc_accessory_class = NULL; + goto err_create_class; + } + + hi->tty_dev = device_create(hi->htc_accessory_class, + NULL, 0, "%s", "tty"); + if (unlikely(IS_ERR(hi->tty_dev))) { + ret = PTR_ERR(hi->tty_dev); + hi->tty_dev = NULL; + goto err_create_tty_device; + } + + /* register the attributes */ + ret = device_create_file(hi->tty_dev, &dev_attr_tty); + if (ret) + goto err_create_tty_device_file; + + hi->fm_dev = device_create(hi->htc_accessory_class, + NULL, 0, "%s", "fm"); + if (unlikely(IS_ERR(hi->fm_dev))) { + ret = PTR_ERR(hi->fm_dev); + hi->fm_dev = NULL; + goto err_create_fm_device; + } + + /* register the attributes */ + ret = device_create_file(hi->fm_dev, &dev_attr_fm); + if (ret) + goto err_create_fm_device_file; + + return 0; + +err_create_fm_device_file: + device_unregister(hi->fm_dev); +err_create_fm_device: + device_remove_file(hi->tty_dev, &dev_attr_tty); +err_create_tty_device_file: + device_unregister(hi->tty_dev); +err_create_tty_device: + class_destroy(hi->htc_accessory_class); +err_create_class: + + return ret; +} + +static void unregister_common_headset(struct h2w_info *h2w) +{ + hi = h2w; + device_remove_file(hi->tty_dev, &dev_attr_tty); + device_unregister(hi->tty_dev); + device_remove_file(hi->fm_dev, &dev_attr_fm); + device_unregister(hi->fm_dev); + class_destroy(hi->htc_accessory_class); +} + +static int htc_35mm_probe(struct platform_device *pdev) +{ + int ret; + + struct htc_headset_mgr_platform_data *pdata = pdev->dev.platform_data; + + SYS_MSG("++++++++++++++++++++"); + + hi = kzalloc(sizeof(struct h2w_info), GFP_KERNEL); + if (!hi) + return -ENOMEM; + + hi->driver_flag = pdata->driver_flag; + hi->hpin_jiffies = jiffies; + + hi->ext_35mm_status = 0; + hi->h2w_35mm_status = 0; + hi->is_ext_insert = 0; + hi->mic_bias_state = 0; + hi->key_level_flag = -1; + + atomic_set(&hi->btn_state, 0); + hi->ignore_btn = 0; + hi->usb_dev_type = NO_DEVICE; + hi->tty_enable_flag = 0; + hi->fm_flag = 0; + hi->mic_switch_flag = 1; + hi->rc_flag = 0; + + hi->insert_11pin_35mm = insert_11pin_35mm; + hi->remove_11pin_35mm = remove_11pin_35mm; + + mutex_init(&hi->mutex_lock); + mutex_init(&hi->mutex_rc_lock); + + wake_lock_init(&hi->headset_wake_lock, WAKE_LOCK_SUSPEND, "headset"); + + hi->sdev.name = "h2w"; + hi->sdev.print_name = h2w_print_name; + + ret = switch_dev_register(&hi->sdev); + if (ret < 0) + goto err_switch_dev_register; + + detect_wq = create_workqueue("detection"); + if (detect_wq == NULL) { + ret = -ENOMEM; + goto err_create_detect_work_queue; + } + button_wq = create_workqueue("button"); + if (button_wq == NULL) { + ret = -ENOMEM; + goto err_create_button_work_queue; + } + + hi->input = input_allocate_device(); + if (!hi->input) { + ret = -ENOMEM; + goto err_request_input_dev; + } + + hi->input->name = "h2w headset"; + set_bit(EV_SYN, hi->input->evbit); + set_bit(EV_KEY, hi->input->evbit); + set_bit(KEY_END, hi->input->keybit); + set_bit(KEY_MUTE, hi->input->keybit); + set_bit(KEY_VOLUMEDOWN, hi->input->keybit); + set_bit(KEY_VOLUMEUP, hi->input->keybit); + set_bit(KEY_NEXTSONG, hi->input->keybit); + set_bit(KEY_PLAYPAUSE, hi->input->keybit); + set_bit(KEY_PREVIOUSSONG, hi->input->keybit); + set_bit(KEY_MEDIA, hi->input->keybit); + set_bit(KEY_SEND, hi->input->keybit); + + ret = input_register_device(hi->input); + if (ret < 0) + goto err_register_input_dev; + + ret = register_common_headset(hi, 0); + if (ret) + goto err_register_common_headset; + + if (hi->driver_flag & DRIVER_HS_MGR_RPC_SERVER) { + /* Create RPC server */ + ret = msm_rpc_create_server(&hs_rpc_server); + if (ret < 0) { + SYS_MSG("Failed to create RPC server"); + goto err_create_rpc_server; + } + SYS_MSG("Create RPC server successfully"); + } + + SYS_MSG("--------------------"); + + return 0; + +err_create_rpc_server: + +err_register_common_headset: + input_unregister_device(hi->input); + +err_register_input_dev: + input_free_device(hi->input); + +err_request_input_dev: + destroy_workqueue(button_wq); + +err_create_button_work_queue: + destroy_workqueue(detect_wq); + +err_create_detect_work_queue: + switch_dev_unregister(&hi->sdev); + +err_switch_dev_register: + + printk(KERN_ERR "H2W: Failed to register driver\n"); + + return ret; +} + +static int htc_35mm_remove(struct platform_device *pdev) +{ + H2W_DBG(""); + +#if 0 + if ((switch_get_state(&hi->sdev) & + (BIT_HEADSET | BIT_HEADSET_NO_MIC)) != 0) + remove_headset(); +#endif + + unregister_common_headset(hi); + input_unregister_device(hi->input); + destroy_workqueue(detect_wq); + destroy_workqueue(button_wq); + switch_dev_unregister(&hi->sdev); + + return 0; +} + +static struct platform_driver htc_35mm_driver = { + .probe = htc_35mm_probe, + .remove = htc_35mm_remove, + .driver = { + .name = "HTC_HEADSET_MGR", + .owner = THIS_MODULE, + }, +}; + + +static int __init htc_35mm_init(void) +{ + H2W_DBG(""); + return platform_driver_register(&htc_35mm_driver); +} + +static void __exit htc_35mm_exit(void) +{ + platform_driver_unregister(&htc_35mm_driver); +} + +module_init(htc_35mm_init); +module_exit(htc_35mm_exit); + +MODULE_DESCRIPTION("HTC headset manager driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_headset_microp.c b/arch/arm/mach-msm/htc_headset_microp.c new file mode 100644 index 0000000000000..363d256546709 --- /dev/null +++ b/arch/arm/mach-msm/htc_headset_microp.c @@ -0,0 +1,419 @@ +/* + * + * /arch/arm/mach-msm/htc_headset_microp.c + * + * HTC Micro-P headset detection driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#define DRIVER_NAME "HS_MICROP" + +static struct htc_headset_microp_info *hi; + +static struct workqueue_struct *detect_wq; +static void detect_microp_work_func(struct work_struct *work); +static DECLARE_DELAYED_WORK(detect_microp_work, detect_microp_work_func); + +static struct workqueue_struct *button_wq; +static void button_microp_work_func(struct work_struct *work); +static DECLARE_WORK(button_microp_work, button_microp_work_func); + +static void hs_microp_key_enable(int enable) +{ + uint8_t addr; + uint8_t data[3]; + + DBG_MSG(); + + if (hi->pdata.remote_enable_pin) { + addr = (enable) ? MICROP_I2C_WCMD_GPO_LED_STATUS_EN : + MICROP_I2C_WCMD_GPO_LED_STATUS_DIS; + data[0] = (hi->pdata.remote_enable_pin >> 16) & 0xFF; + data[1] = (hi->pdata.remote_enable_pin >> 8) & 0xFF; + data[2] = (hi->pdata.remote_enable_pin >> 0) & 0xFF; + microp_i2c_write(addr, data, 3); + } +} + +static int headset_microp_enable_interrupt(int interrupt, int enable) +{ + uint8_t addr = 0x00; + uint8_t data[2]; + + DBG_MSG(); + + addr = (enable) ? MICROP_I2C_WCMD_GPI_INT_CTL_EN : + MICROP_I2C_WCMD_GPI_INT_CTL_DIS; + + memset(data, 0x00, sizeof(data)); + data[0] = (uint8_t) (interrupt >> 8); + data[1] = (uint8_t) interrupt; + + return microp_i2c_write(addr, data, 2); +} + +static int headset_microp_enable_button_int(int enable) +{ + int ret = 0; + + DBG_MSG(); + + if (hi->pdata.remote_int) + ret = headset_microp_enable_interrupt(hi->pdata.remote_int, + enable); + + return ret; +} + +static int headset_microp_remote_adc(int *adc) +{ + int ret = 0; + uint8_t data[2]; + + DBG_MSG(); + + data[0] = 0x00; + data[1] = hi->pdata.adc_channel; + ret = microp_read_adc(data); + if (ret != 0) { + SYS_MSG("Failed to read Micro-P ADC"); + return 0; + } + + *adc = data[0] << 8 | data[1]; + SYS_MSG("Remote ADC %d (0x%X)", *adc, *adc); + + return 1; +} + +static int headset_microp_mic_status(void) +{ + int ret = HEADSET_NO_MIC; + int adc = 0; + + DBG_MSG(); + + ret = headset_microp_remote_adc(&adc); + if (!ret) { + SYS_MSG("Failed to read Micro-P remote ADC"); + return HEADSET_NO_MIC; + } + + if (hi->pdata.adc_metrico[0] && hi->pdata.adc_metrico[1] && + adc >= hi->pdata.adc_metrico[0] && adc <= hi->pdata.adc_metrico[1]) + ret = HEADSET_METRICO; /* For Metrico lab test */ + else if (adc >= HS_DEF_MIC_ADC_10_BIT) + ret = HEADSET_MIC; + else + ret = HEADSET_NO_MIC; + + return ret; +} + +static void detect_microp_work_func(struct work_struct *work) +{ + int insert = 0; + int gpio_status = 0; + uint8_t data[3]; + + DBG_MSG(); + + microp_read_gpio_status(data); + gpio_status = data[0] << 16 | data[1] << 8 | data[2]; + insert = (gpio_status & hi->hpin_gpio_mask) ? 0 : 1; + htc_35mm_remote_notify_insert_ext_headset(insert); + + hi->hpin_debounce = (insert) ? HS_JIFFIES_REMOVE : HS_JIFFIES_INSERT; +} + +static void button_microp_work_func(struct work_struct *work) +{ + int ret = 0; + int keycode = -1; + uint8_t data[2]; + + DBG_MSG(); + + memset(data, 0x00, sizeof(data)); + ret = microp_i2c_read(MICROP_I2C_RCMD_REMOTE_KEYCODE, data, 2); + if (ret != 0) { + SYS_MSG("Failed to read Micro-P remote key code"); + return; + } + + if (!data[1]) + keycode = -1; /* no key code */ + else if (data[1] & 0x80) + keycode = 0; /* release key code */ + else + keycode = (int) data[1]; + + SYS_MSG("Key code %d", keycode); + + htc_35mm_remote_notify_button_status(keycode); +} + +static irqreturn_t htc_headset_microp_detect_irq(int irq, void *data) +{ + hs_notify_hpin_irq(); + + DBG_MSG(); + + queue_delayed_work(detect_wq, &detect_microp_work, hi->hpin_debounce); + + return IRQ_HANDLED; +} + +static irqreturn_t htc_headset_microp_button_irq(int irq, void *data) +{ + DBG_MSG(); + + queue_work(button_wq, &button_microp_work); + + return IRQ_HANDLED; +} + +static void hs_microp_register(void) +{ + struct headset_notifier notifier; + + if (hi->pdata.adc_channel) { + notifier.id = HEADSET_REG_REMOTE_ADC; + notifier.func = headset_microp_remote_adc; + headset_notifier_register(¬ifier); + + notifier.id = HEADSET_REG_MIC_STATUS; + notifier.func = headset_microp_mic_status; + headset_notifier_register(¬ifier); + } + + if (hi->pdata.remote_int) { + notifier.id = HEADSET_REG_KEY_INT_ENABLE; + notifier.func = headset_microp_enable_button_int; + headset_notifier_register(¬ifier); + } + + if (hi->pdata.remote_enable_pin) { + notifier.id = HEADSET_REG_KEY_ENABLE; + notifier.func = hs_microp_key_enable; + headset_notifier_register(¬ifier); + } +} + +static int htc_headset_microp_probe(struct platform_device *pdev) +{ + int i = 0; + int ret = 0; + uint8_t data[12]; + + struct htc_headset_microp_platform_data *pdata = NULL; + + SYS_MSG("++++++++++++++++++++"); + + pdata = pdev->dev.platform_data; + + hi = kzalloc(sizeof(struct htc_headset_microp_info), GFP_KERNEL); + if (!hi) { + SYS_MSG("Failed to allocate memory for headset info"); + return -ENOMEM; + } + + hi->pdata.hpin_int = pdata->hpin_int; + hi->pdata.hpin_irq = pdata->hpin_irq; + if (pdata->hpin_mask[0] || pdata->hpin_mask[1] || pdata->hpin_mask[2]) + memcpy(hi->pdata.hpin_mask, pdata->hpin_mask, + sizeof(hi->pdata.hpin_mask)); + + hi->pdata.remote_int = pdata->remote_int; + hi->pdata.remote_irq = pdata->remote_irq; + hi->pdata.remote_enable_pin = pdata->remote_enable_pin; + hi->pdata.adc_channel = pdata->adc_channel; + + if (pdata->adc_remote[5]) + memcpy(hi->pdata.adc_remote, pdata->adc_remote, + sizeof(hi->pdata.adc_remote)); + + if (pdata->adc_metrico[0] && pdata->adc_metrico[1]) + memcpy(hi->pdata.adc_metrico, pdata->adc_metrico, + sizeof(hi->pdata.adc_metrico)); + + hi->hpin_debounce = HS_JIFFIES_INSERT; + + if (hi->pdata.hpin_int) { + hi->hpin_gpio_mask = pdata->hpin_mask[0] << 16 | + pdata->hpin_mask[1] << 8 | + pdata->hpin_mask[2]; + } + + detect_wq = create_workqueue("detect"); + if (detect_wq == NULL) { + ret = -ENOMEM; + SYS_MSG("Failed to create detect workqueue"); + goto err_create_detect_work_queue; + } + + button_wq = create_workqueue("button"); + if (button_wq == NULL) { + ret = -ENOMEM; + SYS_MSG("Failed to create button workqueue"); + goto err_create_button_work_queue; + } + + if (hi->pdata.hpin_int) { + ret = headset_microp_enable_interrupt(hi->pdata.hpin_int, 1); + if (ret != 0) { + SYS_MSG("Failed to enable Micro-P HPIN interrupt"); + goto err_enable_microp_hpin_interrupt; + } + } + + if (hi->pdata.hpin_irq) { + ret = request_irq(hi->pdata.hpin_irq, + htc_headset_microp_detect_irq, + IRQF_TRIGGER_NONE, + "HTC_HEADSET_MICROP_BUTTON", NULL); + if (ret < 0) { + ret = -EINVAL; + SYS_MSG("Failed to request Micro-P IRQ (ERROR %d)", + ret); + goto err_request_microp_detect_irq; + } + } + + if (hi->pdata.adc_remote[5]) { + memset(data, 0x00, sizeof(data)); + for (i = 0; i < 6; i++) + data[i + 6] = (uint8_t) hi->pdata.adc_remote[i]; + ret = microp_i2c_write(MICROP_I2C_WCMD_REMOTEKEY_TABLE, + data, 12); + + if (ret != 0) { + ret = -EIO; + SYS_MSG("Failed to write Micro-P ADC table"); + goto err_write_microp_adc_table; + } + } + + if (hi->pdata.remote_int) { + ret = headset_microp_enable_interrupt(hi->pdata.remote_int, 1); + if (ret != 0) { + SYS_MSG("Failed to enable Micro-P remote interrupt"); + goto err_enable_microp_remote_interrupt; + } + } + + if (hi->pdata.remote_irq) { + ret = request_irq(hi->pdata.remote_irq, + htc_headset_microp_button_irq, + IRQF_TRIGGER_NONE, + "HTC_HEADSET_MICROP_BUTTON", NULL); + if (ret < 0) { + ret = -EINVAL; + SYS_MSG("Failed to request Micro-P IRQ (ERROR %d)", + ret); + goto err_request_microp_button_irq; + } + } + + hs_microp_register(); + + SYS_MSG("--------------------"); + + return 0; + +err_request_microp_button_irq: + if (hi->pdata.remote_int) + headset_microp_enable_interrupt(hi->pdata.remote_int, 0); + +err_enable_microp_remote_interrupt: +err_write_microp_adc_table: + if (hi->pdata.hpin_irq) + free_irq(hi->pdata.hpin_irq, 0); + +err_request_microp_detect_irq: + if (hi->pdata.hpin_int) + headset_microp_enable_interrupt(hi->pdata.hpin_int, 0); + +err_enable_microp_hpin_interrupt: + destroy_workqueue(button_wq); + +err_create_button_work_queue: + destroy_workqueue(detect_wq); + +err_create_detect_work_queue: + kfree(hi); + + return ret; +} + +static int htc_headset_microp_remove(struct platform_device *pdev) +{ + DBG_MSG(); + + if (hi->pdata.remote_irq) + free_irq(hi->pdata.remote_irq, 0); + + if (hi->pdata.remote_int) + headset_microp_enable_interrupt(hi->pdata.remote_int, 0); + + if (hi->pdata.hpin_irq) + free_irq(hi->pdata.hpin_irq, 0); + + if (hi->pdata.hpin_int) + headset_microp_enable_interrupt(hi->pdata.hpin_int, 0); + + destroy_workqueue(button_wq); + destroy_workqueue(detect_wq); + + kfree(hi); + + return 0; +} + +static struct platform_driver htc_headset_microp_driver = { + .probe = htc_headset_microp_probe, + .remove = htc_headset_microp_remove, + .driver = { + .name = "HTC_HEADSET_MICROP", + .owner = THIS_MODULE, + }, +}; + +static int __init htc_headset_microp_init(void) +{ + DBG_MSG(); + return platform_driver_register(&htc_headset_microp_driver); +} + +static void __exit htc_headset_microp_exit(void) +{ + DBG_MSG(); + platform_driver_unregister(&htc_headset_microp_driver); +} + +module_init(htc_headset_microp_init); +module_exit(htc_headset_microp_exit); + +MODULE_DESCRIPTION("HTC Micro-P headset detection driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/htc_power_supply.c b/arch/arm/mach-msm/htc_power_supply.c new file mode 100644 index 0000000000000..bd286c9f3a8b1 --- /dev/null +++ b/arch/arm/mach-msm/htc_power_supply.c @@ -0,0 +1,616 @@ +/* arch/arm/mach-msm/htc_battery.c + * + * Copyright (C) 2008 HTC Corporation. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "board-mahimahi.h" + +extern void notify_usb_connected(int); + +static char *supply_list[] = { + "battery", +}; + +static struct switch_dev dock_switch = { + .name = "dock", +}; + +static int vbus_present; +static int usb_status; +static bool dock_mains; + +struct dock_state { + struct mutex lock; + u32 t; + u32 last_edge_t[2]; + u32 last_edge_i[2]; + bool level; + bool dock_connected_unknown; +}; + +static struct workqueue_struct *dock_wq; +static struct work_struct dock_work; +static struct wake_lock dock_work_wake_lock; +static struct dock_state ds = { + .lock = __MUTEX_INITIALIZER(ds.lock), +}; + +#define _GPIO_DOCK MAHIMAHI_GPIO_DOCK + +#define dock_out(n) gpio_direction_output(_GPIO_DOCK, n) +#define dock_out2(n) gpio_set_value(_GPIO_DOCK, n) +#define dock_in() gpio_direction_input(_GPIO_DOCK) +#define dock_read() gpio_get_value(_GPIO_DOCK) + +#define MFM_DELAY_NS 10000 + +static int dock_get_edge(struct dock_state *s, u32 timeout, u32 tmin, u32 tmax) +{ + bool lin; + bool in = s->level; + u32 t; + do { + lin = in; + in = dock_read(); + t = msm_read_fast_timer(); + if (in != lin) { + s->last_edge_t[in] = t; + s->last_edge_i[in] = 0; + s->level = in; + if ((s32)(t - tmin) < 0 || (s32)(t - tmax) > 0) + return -1; + return 1; + } + } while((s32)(t - timeout) < 0); + return 0; +} + +static bool dock_sync(struct dock_state *s, u32 timeout) +{ + u32 t; + + s->level = dock_read(); + t = msm_read_fast_timer(); + + if (!dock_get_edge(s, t + timeout, 0, 0)) + return false; + s->last_edge_i[s->level] = 2; + return !!dock_get_edge(s, + s->last_edge_t[s->level] + MFM_DELAY_NS * 4, 0, 0); +} + +static int dock_get_next_bit(struct dock_state *s) +{ + u32 i = s->last_edge_i[!s->level] + ++s->last_edge_i[s->level]; + u32 target = s->last_edge_t[!s->level] + MFM_DELAY_NS * i; + u32 timeout = target + MFM_DELAY_NS / 2; + u32 tmin = target - MFM_DELAY_NS / 4; + u32 tmax = target + MFM_DELAY_NS / 4; + return dock_get_edge(s, timeout, tmin, tmax); +} + +static u32 dock_get_bits(struct dock_state *s, int count, int *errp) +{ + u32 data = 0; + u32 m = 1; + int ret; + int err = 0; + while (count--) { + ret = dock_get_next_bit(s); + if (ret) + data |= m; + if (ret < 0) + err++; + m <<= 1; + } + if (errp) + *errp = err; + return data; +} + +static void dock_delay(u32 timeout) +{ + timeout += msm_read_fast_timer(); + while (((s32)(msm_read_fast_timer() - timeout)) < 0) + ; +} + +static int dock_send_bits(struct dock_state *s, u32 data, int count, int period) +{ + u32 t, t0, to; + + dock_out2(s->level); + t = to = 0; + t0 = msm_read_fast_timer(); + + while (count--) { + if (data & 1) + dock_out2((s->level = !s->level)); + + t = msm_read_fast_timer() - t0; + if (t - to > period / 2) { + pr_info("dock: to = %d, t = %d\n", to, t); + return -EIO; + } + + to += MFM_DELAY_NS; + do { + t = msm_read_fast_timer() - t0; + } while (t < to); + if (t - to > period / 4) { + pr_info("dock: to = %d, t = %d\n", to, t); + return -EIO; + } + data >>= 1; + } + return 0; +} + +static u32 mfm_encode(u16 data, int count, bool p) +{ + u32 mask; + u32 mfm = 0; + u32 clock = ~data & ~(data << 1 | !!p); + for (mask = 1UL << (count - 1); mask; mask >>= 1) { + mfm |= (data & mask); + mfm <<= 1; + mfm |= (clock & mask); + } + return mfm; +} + +static u32 mfm_decode(u32 mfm) +{ + u32 data = 0; + u32 clock = 0; + u32 mask = 1; + while (mfm) { + if (mfm & 1) + clock |= mask; + mfm >>= 1; + if (mfm & 1) + data |= mask; + mfm >>= 1; + mask <<= 1; + } + return data; +} + +static int dock_command(struct dock_state *s, u16 cmd, int len, int retlen) +{ + u32 mfm; + int count; + u32 data = cmd; + int ret; + int err = -1; + unsigned long flags; + + data = data << 2 | 3; /* add 0101 mfm data*/ + mfm = mfm_encode(data, len, false); + count = len * 2 + 2; + + msm_enable_fast_timer(); + local_irq_save(flags); + ret = dock_send_bits(s, mfm, count, MFM_DELAY_NS); + if (!ret) { + dock_in(); + if (dock_sync(s, MFM_DELAY_NS * 5)) + ret = dock_get_bits(s, retlen * 2, &err); + else + ret = -1; + dock_out(s->level); + } + local_irq_restore(flags); + + dock_delay((ret < 0) ? MFM_DELAY_NS * 6 : MFM_DELAY_NS * 2); + msm_disable_fast_timer(); + if (ret < 0) { + pr_warning("dock_command: %x: no response\n", cmd); + return ret; + } + data = mfm_decode(ret); + mfm = mfm_encode(data, retlen, true); + if (mfm != ret || err) { + pr_warning("dock_command: %x: bad response, " + "data %x, mfm %x %x, err %d\n", + cmd, data, mfm, ret, err); + return -EIO; + } + return data; +} + +static int dock_command_retry(struct dock_state *s, u16 cmd, size_t len, size_t retlen) +{ + int retry = 20; + int ret; + while (retry--) { + ret = dock_command(s, cmd, len, retlen); + if (ret >= 0) + return ret; + if (retry != 19) + msleep(10); + } + s->dock_connected_unknown = true; + return -EIO; +} + +static int dock_read_single(struct dock_state *s, int addr) +{ + int ret = -1, last; + int retry = 20; + while (retry--) { + last = ret; + ret = dock_command_retry(s, addr << 1, 6, 8); + if (ret < 0 || ret == last) + return ret; + } + return -EIO; +} + +static int dock_read_multi(struct dock_state *s, int addr, u8 *data, size_t len) +{ + int ret; + int i; + u8 suml, sumr = -1; + int retry = 20; + while (retry--) { + suml = 0; + for (i = 0; i <= len; i++) { + ret = dock_command_retry(s, (addr + i) << 1, 6, 8); + if (ret < 0) + return ret; + if (i < len) { + data[i] = ret; + suml += ret; + } else + sumr = ret; + } + if (sumr == suml) + return 0; + + pr_warning("dock_read_multi(%x): bad checksum, %x != %x\n", + addr, sumr, suml); + } + return -EIO; +} + +static int dock_write_byte(struct dock_state *s, int addr, u8 data) +{ + return dock_command_retry(s, 1 | addr << 1 | data << 4, 6 + 8, 1); +} + +static int dock_write_multi(struct dock_state *s, int addr, u8 *data, size_t len) +{ + int ret; + int i; + u8 sum; + int retry = 2; + while (retry--) { + sum = 0; + for (i = 0; i < len; i++) { + sum += data[i]; + ret = dock_write_byte(s, addr + i, data[i]); + if (ret < 0) + return ret; + } + ret = dock_write_byte(s, addr + len, sum); + if (ret <= 0) + return ret; + } + return -EIO; +} + +static int dock_acquire(struct dock_state *s) +{ + mutex_lock(&s->lock); + dock_in(); + if (dock_read()) { + /* Allow some time for the dock pull-down resistor to discharge + * the capasitor. + */ + msleep(20); + if (dock_read()) { + mutex_unlock(&s->lock); + return -ENOENT; + } + } + dock_out(0); + s->level = false; + return 0; +} + +static void dock_release(struct dock_state *s) +{ + dock_in(); + mutex_unlock(&s->lock); +} + +enum { + DOCK_TYPE = 0x0, + DOCK_BT_ADDR = 0x1, /* - 0x7 */ + + DOCK_PIN_CODE = 0x0, +}; + +static ssize_t bt_addr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u8 bt_addr[6]; + + ret = dock_acquire(&ds); + if (ret < 0) + return ret; + ret = dock_read_multi(&ds, DOCK_BT_ADDR, bt_addr, 6); + dock_release(&ds); + if (ret < 0) + return ret; + + return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", + bt_addr[0], bt_addr[1], bt_addr[2], + bt_addr[3], bt_addr[4], bt_addr[5]); +} +static DEVICE_ATTR(bt_addr, S_IRUGO | S_IWUSR, bt_addr_show, NULL); + +static ssize_t bt_pin_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret, i; + u8 pin[4]; + + if (size < 4) + return -EINVAL; + + for (i = 0; i < sizeof(pin); i++) { + if ((pin[i] = buf[i] - '0') > 10) + return -EINVAL; + } + + ret = dock_acquire(&ds); + if (ret < 0) + return ret; + ret = dock_write_multi(&ds, DOCK_PIN_CODE, pin, 4); + dock_release(&ds); + if (ret < 0) + return ret; + + return size; +} +static DEVICE_ATTR(bt_pin, S_IRUGO | S_IWUSR, NULL, bt_pin_store); + + +static int power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + if (psp != POWER_SUPPLY_PROP_ONLINE) + return -EINVAL; + + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + val->intval = (vbus_present && (usb_status == 2 || dock_mains)); + else + val->intval = vbus_present; + return 0; +} + +static enum power_supply_property power_properties[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + +static struct power_supply ac_supply = { + .name = "ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = power_properties, + .num_properties = ARRAY_SIZE(power_properties), + .get_property = power_get_property, +}; + +static struct power_supply usb_supply = { + .name = "usb", + .type = POWER_SUPPLY_TYPE_USB, + .supplied_to = supply_list, + .num_supplicants = ARRAY_SIZE(supply_list), + .properties = power_properties, + .num_properties = ARRAY_SIZE(power_properties), + .get_property = power_get_property, +}; + +/* rpc related */ +#define APP_BATT_PDEV_NAME "rs30100001:00000000" +#define APP_BATT_PROG 0x30100001 +#define APP_BATT_VER MSM_RPC_VERS(0,0) +#define HTC_PROCEDURE_BATTERY_NULL 0 +#define HTC_PROCEDURE_GET_BATT_LEVEL 1 +#define HTC_PROCEDURE_GET_BATT_INFO 2 +#define HTC_PROCEDURE_GET_CABLE_STATUS 3 +#define HTC_PROCEDURE_SET_BATT_DELTA 4 + +static struct msm_rpc_endpoint *endpoint; + +struct battery_info_reply { + u32 batt_id; /* Battery ID from ADC */ + u32 batt_vol; /* Battery voltage from ADC */ + u32 batt_temp; /* Battery Temperature (C) from formula and ADC */ + u32 batt_current; /* Battery current from ADC */ + u32 level; /* formula */ + u32 charging_source; /* 0: no cable, 1:usb, 2:AC */ + u32 charging_enabled; /* 0: Disable, 1: Enable */ + u32 full_bat; /* Full capacity of battery (mAh) */ +}; + +static void dock_work_proc(struct work_struct *work) +{ + int dockid; + + if (!vbus_present || dock_acquire(&ds)) + goto no_dock; + + if (ds.dock_connected_unknown) { + /* force a new dock notification if a command failed */ + switch_set_state(&dock_switch, 0); + ds.dock_connected_unknown = false; + } + + dockid = dock_read_single(&ds, DOCK_TYPE); + dock_release(&ds); + + pr_info("Detected dock with ID %02x\n", dockid); + if (dockid >= 0) { + msm_hsusb_set_vbus_state(0); + dock_mains = !!(dockid & 0x80); + switch_set_state(&dock_switch, (dockid & 1) ? 2 : 1); + goto done; + } +no_dock: + dock_mains = false; + switch_set_state(&dock_switch, 0); + msm_hsusb_set_vbus_state(vbus_present); +done: + power_supply_changed(&ac_supply); + power_supply_changed(&usb_supply); + wake_unlock(&dock_work_wake_lock); +} + +static int htc_battery_probe(struct platform_device *pdev) +{ + struct rpc_request_hdr req; + struct htc_get_batt_info_rep { + struct rpc_reply_hdr hdr; + struct battery_info_reply info; + } rep; + + int rc; + + endpoint = msm_rpc_connect(APP_BATT_PROG, APP_BATT_VER, 0); + if (IS_ERR(endpoint)) { + printk(KERN_ERR "%s: init rpc failed! rc = %ld\n", + __FUNCTION__, PTR_ERR(endpoint)); + return PTR_ERR(endpoint); + } + + /* must do this or we won't get cable status updates */ + rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_BATT_INFO, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + if (rc < 0) + printk(KERN_ERR "%s: get info failed\n", __FUNCTION__); + + power_supply_register(&pdev->dev, &ac_supply); + power_supply_register(&pdev->dev, &usb_supply); + + INIT_WORK(&dock_work, dock_work_proc); + dock_wq = create_singlethread_workqueue("dock"); + + return 0; +} + +static struct platform_driver htc_battery_driver = { + .probe = htc_battery_probe, + .driver = { + .name = APP_BATT_PDEV_NAME, + .owner = THIS_MODULE, + }, +}; + +/* batt_mtoa server definitions */ +#define BATT_MTOA_PROG 0x30100000 +#define BATT_MTOA_VERS 0 +#define RPC_BATT_MTOA_NULL 0 +#define RPC_BATT_MTOA_SET_CHARGING_PROC 1 +#define RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC 2 +#define RPC_BATT_MTOA_LEVEL_UPDATE_PROC 3 + +struct rpc_batt_mtoa_cable_status_update_args { + int status; +}; + +static int handle_battery_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + struct rpc_batt_mtoa_cable_status_update_args *args; + + if (req->procedure != RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC) + return 0; + + args = (struct rpc_batt_mtoa_cable_status_update_args *)(req + 1); + args->status = be32_to_cpu(args->status); + pr_info("cable_status_update: status=%d\n",args->status); + + args->status = !!args->status; + + vbus_present = args->status; + wake_lock(&dock_work_wake_lock); + queue_work(dock_wq, &dock_work); + return 0; +} + +void notify_usb_connected(int status) +{ + printk("### notify_usb_connected(%d) ###\n", status); + usb_status = status; + power_supply_changed(&ac_supply); + power_supply_changed(&usb_supply); +} + +int is_ac_power_supplied(void) +{ + return vbus_present && (usb_status == 2 || dock_mains); +} + +static struct msm_rpc_server battery_server = { + .prog = BATT_MTOA_PROG, + .vers = BATT_MTOA_VERS, + .rpc_call = handle_battery_call, +}; + +static int __init htc_battery_init(void) +{ + int ret; + gpio_request(_GPIO_DOCK, "dock"); + dock_in(); + wake_lock_init(&dock_work_wake_lock, WAKE_LOCK_SUSPEND, "dock"); + platform_driver_register(&htc_battery_driver); + msm_rpc_create_server(&battery_server); + if (switch_dev_register(&dock_switch) == 0) { + ret = device_create_file(dock_switch.dev, &dev_attr_bt_addr); + WARN_ON(ret); + ret = device_create_file(dock_switch.dev, &dev_attr_bt_pin); + WARN_ON(ret); + } + + return 0; +} + +module_init(htc_battery_init); +MODULE_DESCRIPTION("HTC Battery Driver"); +MODULE_LICENSE("GPL"); + diff --git a/arch/arm/mach-msm/htc_pwrsink.c b/arch/arm/mach-msm/htc_pwrsink.c new file mode 100644 index 0000000000000..f4e8b3859422d --- /dev/null +++ b/arch/arm/mach-msm/htc_pwrsink.c @@ -0,0 +1,289 @@ +/* arch/arm/mach-msm/htc_pwrsink.c + * + * Copyright (C) 2008 HTC Corporation + * Copyright (C) 2008 Google, Inc. + * Author: San Mehat + * Kant Kang + * Eiven Peng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "smd_private.h" + +enum { + PWRSINK_DEBUG_CURR_CHANGE = 1U << 0, + PWRSINK_DEBUG_CURR_CHANGE_AUDIO = 1U << 1, +}; +static int pwrsink_debug_mask; +module_param_named(debug_mask, pwrsink_debug_mask, int, + S_IRUGO | S_IWUSR | S_IWGRP); + +static int initialized; +static unsigned audio_path = 1; /* HTC_SND_DEVICE_SPEAKER = 1 */ +static struct pwr_sink_audio audio_sink_array[PWRSINK_AUDIO_LAST + 1]; +static struct pwr_sink *sink_array[PWRSINK_LAST + 1]; +static DEFINE_SPINLOCK(sink_lock); +static DEFINE_SPINLOCK(audio_sink_lock); +static unsigned long total_sink; +static uint32_t *smem_total_sink; + +int htc_pwrsink_set(pwrsink_id_type id, unsigned percent_utilized) +{ + unsigned long flags; + + if (!smem_total_sink) + smem_total_sink = smem_alloc(SMEM_ID_VENDOR0, sizeof(uint32_t)); + + if (!initialized) + return -EAGAIN; + + if (id < 0 || id > PWRSINK_LAST) + return -EINVAL; + + spin_lock_irqsave(&sink_lock, flags); + + if (!sink_array[id]) { + spin_unlock_irqrestore(&sink_lock, flags); + return -ENOENT; + } + + if (sink_array[id]->percent_util == percent_utilized) { + spin_unlock_irqrestore(&sink_lock, flags); + return 0; + } + + total_sink -= (sink_array[id]->ua_max * + sink_array[id]->percent_util / 100); + sink_array[id]->percent_util = percent_utilized; + total_sink += (sink_array[id]->ua_max * + sink_array[id]->percent_util / 100); + + if (smem_total_sink) + *smem_total_sink = total_sink / 1000; + + pr_debug("htc_pwrsink: ID %d, Util %d%%, Total %lu uA %s\n", + id, percent_utilized, total_sink, + smem_total_sink ? "SET" : ""); + + spin_unlock_irqrestore(&sink_lock, flags); + + return 0; +} +EXPORT_SYMBOL(htc_pwrsink_set); + +static void compute_audio_current(void) +{ + /* unsigned long flags; */ + unsigned max_percent = 0; + int i, active_audio_sinks = 0; + pwrsink_audio_id_type last_active_audio_sink = 0; + + /* Make sure this segment will be spinlocked + before computing by calling function. */ + /* spin_lock_irqsave(&audio_sink_lock, flags); */ + for (i = 0; i <= PWRSINK_AUDIO_LAST; ++i) { + max_percent = (audio_sink_array[i].percent > max_percent) ? + audio_sink_array[i].percent : max_percent; + if (audio_sink_array[i].percent > 0) { + active_audio_sinks++; + last_active_audio_sink = i; + } + } + if (active_audio_sinks == 0) + htc_pwrsink_set(PWRSINK_AUDIO, 0); + else if (active_audio_sinks == 1) { + pwrsink_audio_id_type laas = last_active_audio_sink; + /* TODO: add volume and routing path current. */ + if (audio_path == 1) /* Speaker */ + htc_pwrsink_set(PWRSINK_AUDIO, + audio_sink_array[laas].percent); + else + htc_pwrsink_set(PWRSINK_AUDIO, + audio_sink_array[laas].percent * 9 / 10); + } else if (active_audio_sinks > 1) { + /* TODO: add volume and routing path current. */ + if (audio_path == 1) /* Speaker */ + htc_pwrsink_set(PWRSINK_AUDIO, max_percent); + else + htc_pwrsink_set(PWRSINK_AUDIO, max_percent * 9 / 10); + } + /* spin_unlock_irqrestore(&audio_sink_lock, flags); */ + + if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO) + pr_info("%s: active_audio_sinks=%d, audio_path=%d\n", __func__, + active_audio_sinks, audio_path); +} + +int htc_pwrsink_audio_set(pwrsink_audio_id_type id, unsigned percent_utilized) +{ + unsigned long flags; + + if (id < 0 || id > PWRSINK_AUDIO_LAST) + return -EINVAL; + + if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO) + pr_info("%s: id=%d, percent=%d, percent_old=%d\n", __func__, + id, percent_utilized, audio_sink_array[id].percent); + + spin_lock_irqsave(&audio_sink_lock, flags); + if (audio_sink_array[id].percent == percent_utilized) { + spin_unlock_irqrestore(&audio_sink_lock, flags); + return 0; + } + audio_sink_array[id].percent = percent_utilized; + spin_unlock_irqrestore(&audio_sink_lock, flags); + compute_audio_current(); + return 0; +} +EXPORT_SYMBOL(htc_pwrsink_audio_set); + +int htc_pwrsink_audio_volume_set(pwrsink_audio_id_type id, unsigned volume) +{ + unsigned long flags; + + if (id < 0 || id > PWRSINK_AUDIO_LAST) + return -EINVAL; + + if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO) + pr_info("%s: id=%d, volume=%d, volume_old=%d\n", __func__, + id, volume, audio_sink_array[id].volume); + + spin_lock_irqsave(&audio_sink_lock, flags); + if (audio_sink_array[id].volume == volume) { + spin_unlock_irqrestore(&audio_sink_lock, flags); + return 0; + } + audio_sink_array[id].volume = volume; + spin_unlock_irqrestore(&audio_sink_lock, flags); + compute_audio_current(); + return 0; +} +EXPORT_SYMBOL(htc_pwrsink_audio_volume_set); + +int htc_pwrsink_audio_path_set(unsigned path) +{ + unsigned long flags; + + if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO) + pr_info("%s: path=%d, path_old=%d\n", + __func__, path, audio_path); + + spin_lock_irqsave(&audio_sink_lock, flags); + if (audio_path == path) { + spin_unlock_irqrestore(&audio_sink_lock, flags); + return 0; + } + audio_path = path; + spin_unlock_irqrestore(&audio_sink_lock, flags); + compute_audio_current(); + return 0; +} +EXPORT_SYMBOL(htc_pwrsink_audio_path_set); + +void htc_pwrsink_suspend_early(struct early_suspend *h) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 7); +} + +int htc_pwrsink_suspend_late(struct device *dev) +{ + struct pwr_sink_platform_data *pdata = dev_get_platdata(dev); + + if (pdata && pdata->suspend_late) + pdata->suspend_late(to_platform_device(dev), PMSG_SUSPEND); + else + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 1); + return 0; +} + +int htc_pwrsink_resume_early(struct device *dev) +{ + struct pwr_sink_platform_data *pdata = dev_get_platdata(dev);; + + if (pdata && pdata->resume_early) + pdata->resume_early(to_platform_device(dev)); + else + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 7); + return 0; +} + +void htc_pwrsink_resume_late(struct early_suspend *h) +{ + htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 38); +} + +#ifdef CONFIG_WAKELOCK +struct early_suspend htc_pwrsink_early_suspend = { + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, + .suspend = htc_pwrsink_suspend_early, + .resume = htc_pwrsink_resume_late, +}; +#endif + +static int __devinit htc_pwrsink_probe(struct platform_device *pdev) +{ + struct pwr_sink_platform_data *pdata = pdev->dev.platform_data; + int i; + + if (!pdata) + return -EINVAL; + + total_sink = 0; + for (i = 0; i < pdata->num_sinks; i++) { + sink_array[pdata->sinks[i].id] = &pdata->sinks[i]; + total_sink += (pdata->sinks[i].ua_max * + pdata->sinks[i].percent_util / 100); + } + + initialized = 1; + +#ifdef CONFIG_WAKELOCK + if (pdata->suspend_early) + htc_pwrsink_early_suspend.suspend = pdata->suspend_early; + if (pdata->resume_late) + htc_pwrsink_early_suspend.resume = pdata->resume_late; +#endif + register_early_suspend(&htc_pwrsink_early_suspend); + + return 0; +} + +static struct dev_pm_ops htc_pwrsink_pm_ops = { + .suspend_noirq = htc_pwrsink_suspend_late, + .resume_noirq = htc_pwrsink_resume_early, +}; + +static struct platform_driver htc_pwrsink_driver = { + .probe = htc_pwrsink_probe, + .driver = { + .name = "htc_pwrsink", + .owner = THIS_MODULE, + .pm = &htc_pwrsink_pm_ops, + }, +}; + +static int __init htc_pwrsink_init(void) +{ + initialized = 0; + memset(sink_array, 0, sizeof(sink_array)); + return platform_driver_register(&htc_pwrsink_driver); +} + +module_init(htc_pwrsink_init); diff --git a/arch/arm/mach-msm/htc_set_perflock.c b/arch/arm/mach-msm/htc_set_perflock.c new file mode 100644 index 0000000000000..3509f08ef6820 --- /dev/null +++ b/arch/arm/mach-msm/htc_set_perflock.c @@ -0,0 +1,78 @@ +/* arch/arm/mach-msm/htc_set_perflock.c + * + * Copyright (C) 2008 HTC Corporation + * Author: Eiven Peng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct mutex lock; +static struct perf_lock media_perf_lock; +static uint32_t num = 0; + +static int perflock_open(struct inode *inode, struct file *file) +{ + mutex_lock(&lock); + if (num == 0) { + perf_lock(&media_perf_lock); + printk(KERN_DEBUG "[perflock] Perflock enabled.\n"); + } + num++; + printk(KERN_DEBUG "[perflock] Perflock node is opened by [%s]/[PID=%d],numbers of opened nodes = [%d].\n", + current->comm, current->pid, num); + mutex_unlock(&lock); + + return 0; +} + +static int perflock_release(struct inode *inode, struct file *file) +{ + mutex_lock(&lock); + num--; + printk(KERN_DEBUG "[perflock] Perflock node is closed by [%s]/[PID=%d], numbers of opened nodes = [%d].\n", + current->comm, current->pid, num); + if (num == 0) { + perf_unlock(&media_perf_lock); + printk(KERN_DEBUG "[perflock] Perflock disabled.\n"); + } + mutex_unlock(&lock); + + return 0; +} + +static struct file_operations perflock_fops = { + .owner = THIS_MODULE, + .open = perflock_open, + .release = perflock_release, +}; + +struct miscdevice perflock_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "perflock", + .fops = &perflock_fops, +}; + +static int __init set_perflock_init(void) { + mutex_init(&lock); + perf_lock_init(&media_perf_lock, PERF_LOCK_HIGHEST, "media"); + return misc_register(&perflock_misc); +} + +device_initcall(set_perflock_init); diff --git a/arch/arm/mach-msm/htc_wifi_nvs.c b/arch/arm/mach-msm/htc_wifi_nvs.c new file mode 100644 index 0000000000000..db6f678958a2d --- /dev/null +++ b/arch/arm/mach-msm/htc_wifi_nvs.c @@ -0,0 +1,105 @@ +/* arch/arm/mach-msm/htc_wifi_nvs.c + * + * Code to extract WiFi calibration information from ATAG set up + * by the bootloader. + * + * Copyright (C) 2008 Google, Inc. + * Author: Dmitry Shmidt + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include + +/* configuration tags specific to msm */ +#define ATAG_MSM_WIFI 0x57494649 /* MSM WiFi */ + +#define NVS_MAX_SIZE 0x800U +#define NVS_LEN_OFFSET 0x0C +#define NVS_DATA_OFFSET 0x40 + +static unsigned char wifi_nvs_ram[NVS_MAX_SIZE]; +static struct proc_dir_entry *wifi_calibration; + +unsigned char *get_wifi_nvs_ram( void ) +{ + return( wifi_nvs_ram ); +} +EXPORT_SYMBOL(get_wifi_nvs_ram); + +static int __init parse_tag_msm_wifi(const struct tag *tag) +{ + unsigned char *dptr = (unsigned char *)(&tag->u); + unsigned size; +#ifdef ATAG_MSM_WIFI_DEBUG + unsigned i; +#endif + + size = min((tag->hdr.size - 2) * sizeof(__u32), NVS_MAX_SIZE); +#ifdef ATAG_MSM_WIFI_DEBUG + printk("WiFi Data size = %d , 0x%x\n", tag->hdr.size, tag->hdr.tag); + for (i = 0; i < size; i++) + printk("%02x ", *dptr++); +#endif + memcpy( (void *)wifi_nvs_ram, (void *)dptr, size ); + return 0; +} + +__tagtable(ATAG_MSM_WIFI, parse_tag_msm_wifi); + +static unsigned wifi_get_nvs_size( void ) +{ + unsigned char *ptr; + unsigned len; + + ptr = get_wifi_nvs_ram(); + /* Size in format LE assumed */ + memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len)); + len = min(len, (NVS_MAX_SIZE - NVS_DATA_OFFSET)); + return len; +} + +int wifi_calibration_size_set(void) +{ + if (wifi_calibration != NULL) + wifi_calibration->size = wifi_get_nvs_size(); + return 0; +} + +static int wifi_calibration_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + unsigned char *ptr; + unsigned len; + + ptr = get_wifi_nvs_ram(); + len = min(wifi_get_nvs_size(), (unsigned)count); + memcpy(page, ptr + NVS_DATA_OFFSET, len); + return len; +} + +static int __init wifi_nvs_init(void) +{ + wifi_calibration = create_proc_entry("calibration", 0444, NULL); + if (wifi_calibration != NULL) { + wifi_calibration->size = wifi_get_nvs_size(); + wifi_calibration->read_proc = wifi_calibration_read_proc; + wifi_calibration->write_proc = NULL; + } + return 0; +} + +device_initcall(wifi_nvs_init); diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S new file mode 100644 index 0000000000000..70dfb2b6c7c0d --- /dev/null +++ b/arch/arm/mach-msm/idle-v7.S @@ -0,0 +1,220 @@ +/* + * Idle processing for ARMv7-based Qualcomm SoCs. + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +ENTRY(msm_arch_idle) + wfi + bx lr + +ENTRY(msm_pm_collapse) +#if defined(CONFIG_MSM_FIQ_SUPPORT) + cpsid f +#endif + + ldr r0, =saved_state + stmia r0!, {r4-r14} + mrc p15, 0, r1, c1, c0, 0 /* MMU control */ + mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */ + mrc p15, 0, r3, c3, c0, 0 /* dacr */ + mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */ + mrc p15, 0, r5, c10, c2, 0 /* PRRR */ + mrc p15, 0, r6, c10, c2, 1 /* NMRR */ + mrc p15, 0, r7, c1, c0, 1 /* ACTLR */ + mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */ + mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */ + mrc p15, 0, ip, c13, c0, 1 /* context ID */ + stmia r0!, {r1-r9, ip} +#ifdef CONFIG_MSM_CPU_AVS + mrc p15, 7, r1, c15, c1, 7 /* AVSCSR is the Adaptive Voltage Scaling + * Control and Status Register */ + mrc p15, 7, r2, c15, c0, 6 /* AVSDSCR is the Adaptive Voltage + * Scaling Delay Synthesizer Control + * Register */ + mrc p15, 7, r3, c15, c1, 0 /* TSCSR is the Temperature Status and + * Control Register + */ + stmia r0!, {r1-r3} +#endif + +#ifdef CONFIG_VFP + .fpu neon + VFPFSTMIA r0, r1 /* Save VFP working registers */ + fmrx r1, fpexc + fmrx r2, fpscr + stmia r0!, {r1, r2} /* Save VFP state registers */ +#endif + bl v7_flush_dcache_all + + mrc p15, 0, r1, c1, c0, 0 /* read current CR */ + bic r0, r1, #(1 << 2) /* clear dcache bit */ + bic r0, r0, #(1 << 12) /* clear icache bit */ + mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */ + + dsb + wfi + + mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */ + isb + +#if defined(CONFIG_MSM_FIQ_SUPPORT) + cpsie f +#endif + + ldr r0, =saved_state /* restore registers */ + ldmfd r0, {r4-r14} + mov r0, #0 /* return power collapse failed */ + bx lr + +ENTRY(msm_pm_collapse_exit) +#if 0 /* serial debug */ + mov r0, #0x80000016 + mcr p15, 0, r0, c15, c2, 4 + mov r0, #0xA9000000 + add r0, r0, #0x00A00000 /* UART1 */ + /*add r0, r0, #0x00C00000*/ /* UART3 */ + mov r1, #'A' + str r1, [r0, #0x00C] +#endif + +#if 0 + //; REMOVE FOLLOWING THREE INSTRUCTIONS WHEN POWER COLLAPSE IS ENA + //;Make sure the DBGOSLSR[LOCK] bit is cleared to allow access to the debug registers + //; Writing anything but the "secret code" to the DBGOSLAR clears the DBGOSLSR[LOCK] bit + MCR p14, 0, r0, c1, c0, 4 //; WCP14_DBGOSLAR r0 + + + //; Read the DBGPRSR to clear the DBGPRSR[STICKYPD] + //; Any read to DBGPRSR clear the STICKYPD bit + //; ISB guarantees the read completes before attempting to + //; execute a CP14 instruction. + MRC p14, 0, r3, c1, c5, 4 //; RCP14_DBGPRSR r3 + ISB +#endif + +#if 0 /* allow jtag reconnect */ +1: + b 1b +#endif + + bl __cpu_early_init + + ldr r1, =saved_state_end + ldr r2, =msm_pm_collapse_exit + adr r3, msm_pm_collapse_exit + add r1, r1, r3 + sub r1, r1, r2 +#ifdef CONFIG_MSM_CPU_AVS + ldmdb r1!, {r2-r4} + mcr p15, 7, r4, c15, c1, 0 /* TSCSR */ + mcr p15, 7, r3, c15, c0, 6 /* AVSDSCR */ + mcr p15, 7, r2, c15, c1, 7 /* AVSCSR */ +#endif +#ifdef CONFIG_VFP + mrc p15, 0, r2, c1, c0, 2 /* Read CP Access Control Register */ + orr r2, r2, #0x00F00000 /* Enable full access for p10,11 */ + mcr p15, 0, r2, c1, c0, 2 /* Write CPACR */ + isb + mov r2, #0x40000000 /* Enable VFP */ + fmxr fpexc, r2 + isb + ldmdb r1!, {r2, r3} /* Read saved VFP state registers */ + sub r1, r1, #32*8 /* Jump to start of vfp regs area */ + VFPFLDMIA r1, r4 /* Restore VFP working registers, + * r1 incremented to end of vfp + * regs area */ + sub r1, r1, #32*8 /* Jump back to start of vfp regs area */ + fmxr fpscr, r3 /* Restore FPSCR */ + fmxr fpexc, r2 /* Restore FPEXC last */ +#endif + ldmdb r1!, {r2-r11} + mcr p15, 0, r4, c3, c0, 0 /* dacr */ + mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */ + mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */ + mcr p15, 0, r6, c10, c2, 0 /* PRRR */ + mcr p15, 0, r7, c10, c2, 1 /* NMRR */ + mcr p15, 0, r8, c1, c0, 1 /* ACTLR */ + mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */ + mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */ + mcr p15, 0, r11, c13, c0, 1 /* context ID */ + isb + ldmdb r1!, {r4-r14} + /* Add 1:1 map in the PMD to allow smooth switch when turning on MMU */ + and r3, r3, #~0x7F /* mask off lower 7 bits of TTB */ + adr r0, msm_pm_mapped_pa /* get address of the mapped instr */ + lsr r1, r0, #20 /* get the addr range of addr in MB */ + lsl r1, r1, #2 /* multiply by 4 to get to the pg index */ + add r3, r3, r1 /* pgd + pgd_index(addr) */ + ldr r1, [r3] /* save current entry to r1 */ + lsr r0, #20 /* align current addr to 1MB boundary */ + lsl r0, #20 + /* Create new entry for this 1MB page */ + orr r0, r0, #0x4 /* PMD_SECT_BUFFERED */ + orr r0, r0, #0x400 /* PMD_SECT_AP_WRITE */ + orr r0, r0, #0x2 /* PMD_TYPE_SECT|PMD_DOMAIN(DOMAIN_KERNEL) */ + str r0, [r3] /* put new entry into the MMU table */ + mcr p15, 0, r3, c7, c10, 1 /* flush_pmd */ + dsb + isb + mcr p15, 0, r2, c1, c0, 0 /* MMU control */ + isb +msm_pm_mapped_pa: + /* Switch to virtual */ + adr r2, msm_pm_pa_to_va + ldr r0, =msm_pm_pa_to_va + mov pc, r0 +msm_pm_pa_to_va: + sub r0, r0, r2 + /* Restore r1 in MMU table */ + add r3, r3, r0 + str r1, [r3] + mcr p15, 0, r3, c7, c10, 1 /* flush_pmd */ + dsb + isb + mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */ + mcr p15, 0, r3, c7, c5, 6 /* BPIALL */ + dsb + isb + stmfd sp!, {lr} + bl v7_flush_kern_cache_all + ldmfd sp!, {lr} + mov r0, #1 + bx lr + nop + nop + nop + nop + nop +1: b 1b + + + .data + +saved_state: + .space 4 * 11 /* r4-14 */ + .space 4 * 10 /* cp15 */ +#ifdef CONFIG_MSM_CPU_AVS + .space 4 * 3 /* AVS control registers */ +#endif +#ifdef CONFIG_VFP + .space 8 * 32 /* VFP working registers */ + .space 4 * 2 /* VFP state registers */ +#endif +saved_state_end: + diff --git a/arch/arm/mach-msm/idle.S b/arch/arm/mach-msm/idle.S index 6a94f05271377..d4c3e81747312 100644 --- a/arch/arm/mach-msm/idle.S +++ b/arch/arm/mach-msm/idle.S @@ -1,9 +1,9 @@ -/* arch/arm/mach-msm/include/mach/idle.S +/* arch/arm/mach-msm/idle.S * - * Idle processing for MSM7K - work around bugs with SWFI. + * Idle processing for MSM7X00A - work around bugs with SWFI. * * Copyright (c) 2007 QUALCOMM Incorporated. - * Copyright (C) 2007 Google, Inc. + * Copyright (C) 2007 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -14,23 +14,93 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - */ - + */ + #include #include -ENTRY(arch_idle) -#ifdef CONFIG_MSM7X00A_IDLE - mrc p15, 0, r1, c1, c0, 0 /* read current CR */ +ENTRY(msm_pm_collapse) + ldr r0, =saved_state + stmia r0!, {r4-r14} + mrc p15, 0, r1, c1, c0, 0 /* MMU control */ + mrc p15, 0, r2, c2, c0, 0 /* ttb */ + mrc p15, 0, r3, c3, c0, 0 /* dacr */ + mrc p15, 0, ip, c13, c0, 1 /* context ID */ + stmia r0!, {r1-r3, ip} +#if defined(CONFIG_OPROFILE) + mrc p15, 0, r1, c15, c12, 0 /* pmnc */ + mrc p15, 0, r2, c15, c12, 1 /* ccnt */ + mrc p15, 0, r3, c15, c12, 2 /* pmn0 */ + mrc p15, 0, ip, c15, c12, 3 /* pmn1 */ + stmia r0!, {r1-r3, ip} +#endif + /* fall though */ +ENTRY(msm_arch_idle) +#if defined(CONFIG_MSM_FIQ_SUPPORT) + cpsid f +#endif + mrc p15, 0, r1, c1, c0, 0 /* read current CR */ bic r0, r1, #(1 << 2) /* clear dcache bit */ bic r0, r0, #(1 << 12) /* clear icache bit */ mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */ - mov r0, #0 /* prepare wfi value */ + mov r0, #0 /* prepare wfi value */ /* also used as return value from msm_pm_collapse */ mcr p15, 0, r0, c7, c10, 0 /* flush the cache */ mcr p15, 0, r0, c7, c10, 4 /* memory barrier */ mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */ mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */ +#if defined(CONFIG_MSM_FIQ_SUPPORT) + cpsie f #endif mov pc, lr + +ENTRY(msm_pm_collapse_exit) +#if 0 /* serial debug */ + mov r0, #0x80000016 + mcr p15, 0, r0, c15, c2, 4 + mov r0, #0xA9000000 + add r0, r0, #0x00A00000 /* UART1 */ + /*add r0, r0, #0x00C00000*/ /* UART3 */ + mov r1, #'A' + str r1, [r0, #0x00C] +#endif + ldr r1, =saved_state_end + ldr r2, =msm_pm_collapse_exit + adr r3, msm_pm_collapse_exit + add r1, r1, r3 + sub r1, r1, r2 +#if defined(CONFIG_OPROFILE) + ldmdb r1!, {r2-r5} + mcr p15, 0, r3, c15, c12, 1 /* ccnt */ + mcr p15, 0, r4, c15, c12, 2 /* pmn0 */ + mcr p15, 0, r5, c15, c12, 3 /* pmn1 */ + mcr p15, 0, r2, c15, c12, 0 /* pmnc */ +#endif + ldmdb r1!, {r2-r5} + mcr p15, 0, r4, c3, c0, 0 /* dacr */ + mcr p15, 0, r3, c2, c0, 0 /* ttb */ + mcr p15, 0, r5, c13, c0, 1 /* context ID */ + ldmdb r1!, {r4-r14} + mov r0, #1 + + mcr p15, 0, r2, c1, c0, 0 /* MMU control */ + mov pc, lr + nop + nop + nop + nop + nop +1: b 1b + + + .data + +saved_state: + .space 4 * 11 /* r4-14 */ + .space 4 * 4 /* cp15 - MMU control, ttb, dacr, context ID */ +#if defined(CONFIG_OPROFILE) + .space 4 * 4 /* more cp15 - pmnc, ccnt, pmn0, pmn1 */ +#endif +saved_state_end: + diff --git a/arch/arm/mach-msm/include/mach/atmega_microp.h b/arch/arm/mach-msm/include/mach/atmega_microp.h new file mode 100644 index 0000000000000..2b04dc58b3015 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/atmega_microp.h @@ -0,0 +1,265 @@ +/* include/asm/mach-msm/atmega_microp.h + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#ifndef _LINUX_ATMEGA_MICROP_H +#define _LINUX_ATMEGA_MICROP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define MICROP_I2C_NAME "atmega-microp" + +#define MICROP_FUNCTION_LSENSOR 1 +#define MICROP_FUNCTION_REMOTEKEY 2 +#define MICROP_FUNCTION_LCD_BL 3 +#define MICROP_FUNCTION_RMK_VALUE 4 +#define MICROP_FUNCTION_INTR 11 +#define MICROP_FUNCTION_GSENSOR 12 +#define MICROP_FUNCTION_LED 13 +#define MICROP_FUNCTION_HPIN 14 +#define MICROP_FUNCTION_RESET_INT 15 +#define MICROP_FUNCTION_SIM_CARD 16 +#define MICROP_FUNCTION_SDCARD 17 +#define MICROP_FUNCTION_OJ 18 +#define MICROP_FUNCTION_P 19 + +#define HEADSET_NO_MIC 0 +#define HEADSET_MIC 1 +#define HEADSET_METRICO 2 + +#define LED_RGB (1 << 0) +#define LED_JOGBALL (1 << 1) +#define LED_GPO (1 << 2) +#define LED_PWM (1 << 3) +#define LED_WIMAX (1 << 4) +#define LED_MOBEAM (1 << 5) + +#define SPI_GSENSOR (1 << 0) +#define SPI_LCM (1 << 1) +#define SPI_OJ (1 << 2) + +#define LS_PWR_ON (1 << 0) +#define PS_PWR_ON (1 << 1) + +#define ALS_BACKLIGHT (1 << 0) +#define ALS_VKEY_LED (1 << 1) + +#define CMD_83_DIFF (1 << 0) +#define CMD_25_DIFF (1 << 1) + +#define ALS_CALIBRATED 0x6DA5 + +#define MICROP_I2C_WCMD_MISC 0x20 +#define MICROP_I2C_WCMD_SPI_EN 0x21 +#define MICROP_I2C_WCMD_LCM_BL_MANU_CTL 0x22 +#define MICROP_I2C_WCMD_AUTO_BL_CTL 0x23 +#define MICROP_I2C_RCMD_SPI_BL_STATUS 0x24 +#define MICROP_I2C_WCMD_LED_PWM 0x25 +#define MICROP_I2C_WCMD_BL_EN 0x26 +#define MICROP_I2C_RCMD_VERSION 0x30 +#define MICROP_I2C_WCMD_ADC_TABLE 0x42 +#define MICROP_I2C_WCMD_LED_MODE 0x53 +#define MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME 0x54 +#define MICROP_I2C_RCMD_AMBER_LED_REMAIN_TIME 0x55 +#define MICROP_I2C_RCMD_LED_REMAIN_TIME 0x56 +#define MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME 0x57 +#define MICROP_I2C_RCMD_LED_STATUS 0x58 +#define MICROP_I2C_WCMD_JOGBALL_LED_MODE 0x5A +#define MICROP_I2C_WCMD_JOGBALL_LED_PWM_SET 0x5C +#define MICROP_I2C_WCMD_READ_ADC_VALUE_REQ 0x60 +#define MICROP_I2C_RCMD_ADC_VALUE 0x62 +#define MICROP_I2C_WCMD_REMOTEKEY_TABLE 0x63 +#define MICROP_I2C_WCMD_LCM_BURST 0x6A +#define MICROP_I2C_WCMD_LCM_BURST_EN 0x6B +#define MICROP_I2C_WCMD_LCM_REGISTER 0x70 +#define MICROP_I2C_WCMD_GSENSOR_REG 0x73 +#define MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ 0x74 +#define MICROP_I2C_RCMD_GSENSOR_REG_DATA 0x75 +#define MICROP_I2C_WCMD_GSENSOR_DATA_REQ 0x76 +#define MICROP_I2C_RCMD_GSENSOR_X_DATA 0x77 +#define MICROP_I2C_RCMD_GSENSOR_Y_DATA 0x78 +#define MICROP_I2C_RCMD_GSENSOR_Z_DATA 0x79 +#define MICROP_I2C_RCMD_GSENSOR_DATA 0x7A +#define MICROP_I2C_WCMD_OJ_REG 0x7B +#define MICROP_I2C_WCMD_OJ_REG_DATA_REQ 0x7C +#define MICROP_I2C_RCMD_OJ_REG_DATA 0x7D +#define MICROP_I2C_WCMD_OJ_POS_DATA_REQ 0x7E +#define MICROP_I2C_RCMD_OJ_POS_DATA 0x7F +#define MICROP_I2C_WCMD_GPI_INT_CTL_EN 0x80 +#define MICROP_I2C_WCMD_GPI_INT_CTL_DIS 0x81 +#define MICROP_I2C_RCMD_GPI_INT_STATUS 0x82 +#define MICROP_I2C_RCMD_GPIO_STATUS 0x83 +#define MICROP_I2C_WCMD_GPI_INT_STATUS_CLR 0x84 +#define MICROP_I2C_RCMD_GPI_INT_SETTING 0x85 +#define MICROP_I2C_RCMD_REMOTE_KEYCODE 0x87 +#define MICROP_I2C_WCMD_REMOTE_KEY_DEBN_TIME 0x88 +#define MICROP_I2C_WCMD_REMOTE_PLUG_DEBN_TIME 0x89 +#define MICROP_I2C_WCMD_SIMCARD_DEBN_TIME 0x8A +#define MICROP_I2C_WCMD_GPO_LED_STATUS_EN 0x90 +#define MICROP_I2C_WCMD_GPO_LED_STATUS_DIS 0x91 +#define MICROP_I2C_WCMD_OJ_INT_STATUS 0xA8 +#define MICROP_I2C_RCMD_MOBEAM_STATUS 0xB1 +#define MICROP_I2C_WCMD_MOBEAM_DL 0xB2 +#define MICROP_I2C_WCMD_MOBEAM_SEND 0xB3 + +struct microp_function_config { + const char *name; + uint8_t category; + uint8_t init_value; + uint8_t channel; + uint8_t fade_time; + uint32_t sub_categ; + uint16_t levels[10]; + uint16_t dutys[10]; + uint16_t int_pin; + uint16_t golden_adc; + uint8_t mask_r[3]; + uint8_t mask_w[3]; + uint32_t ls_gpio_on; + int (*ls_power)(int, uint8_t); +}; + +struct microp_i2c_platform_data { + struct microp_function_config *microp_function; + struct platform_device *microp_devices; + int num_devices; + int num_functions; + uint32_t gpio_reset; + uint32_t microp_ls_on; + void *dev_id; + uint8_t microp_mic_status; + uint8_t function_node[20]; + uint32_t cmd_diff; + uint32_t spi_devices; + uint32_t spi_devices_init; +}; + +struct microp_led_config { + const char *name; + uint32_t type; + uint8_t init_value; + uint8_t fade_time; + uint16_t led_pin; + uint8_t mask_w[3]; +}; + +struct microp_led_platform_data { + struct microp_led_config *led_config; + int num_leds; +}; + +struct microp_int_pin { + uint16_t int_gsensor; + uint16_t int_lsensor; + uint16_t int_reset; + uint16_t int_simcard; + uint16_t int_hpin; + uint16_t int_remotekey; + uint16_t int_sdcard; + uint16_t int_oj; + uint16_t int_psensor; +}; + +struct microp_gpio_status { + uint32_t hpin; + uint32_t sdcard; + uint32_t psensor; +}; + +struct microp_function_node { + uint8_t lsensor; + uint8_t psensor; +}; + +struct microp_led_data { + struct led_classdev ldev; + struct microp_led_config *led_config; + struct mutex led_data_mutex; + spinlock_t brightness_lock; + enum led_brightness brightness; + uint8_t mode; + uint8_t blink; +}; + +struct microp_i2c_client_data { + struct mutex microp_adc_mutex; + struct mutex microp_i2c_rw_mutex; + uint16_t version; + struct workqueue_struct *microp_queue; + struct work_struct microp_intr_work; + struct delayed_work ls_on_work; + struct delayed_work hpin_enable_intr_work; + struct delayed_work hpin_debounce_work; + struct early_suspend early_suspend; + struct microp_int_pin int_pin; + struct microp_gpio_status gpio; + struct microp_function_node fnode; + struct wake_lock hpin_wake_lock; + + atomic_t microp_is_suspend; + atomic_t als_intr_enabled; + atomic_t als_intr_enable_flag; + int headset_is_in; + int sdcard_is_in; + uint32_t spi_devices_vote; + uint32_t pwr_devices_vote; + uint32_t als_func; + struct hrtimer gen_irq_timer; + uint16_t intr_status; +}; + +struct lightsensor_platform_data{ + struct i2c_client *client; + struct microp_function_config *config; + int irq; + int old_intr_cmd; +}; + +struct microp_ops { + int (*init_microp_func)(struct i2c_client *); + int (*als_pwr_enable)(int pwr_device, uint8_t en); + int (*als_intr_enable)(struct i2c_client *, + uint32_t als_func, uint8_t en); + void (*als_level_change)(struct i2c_client *, uint8_t *data); + void (*headset_enable)(int en); + void (*spi_enable)(int en); + void (*notifier_func)(struct i2c_client *, struct microp_led_data *); + void (*led_gpio_set)(struct microp_led_data *); +}; + +int microp_i2c_read(uint8_t addr, uint8_t *data, int length); +int microp_i2c_write(uint8_t addr, uint8_t *data, int length); +int microp_function_check(struct i2c_client *client, uint8_t category); +int microp_read_gpio_status(uint8_t *data); +int microp_write_interrupt(struct i2c_client *client, + uint16_t interrupt, uint8_t enable); +void microp_get_als_kvalue(int i); +int microp_spi_vote_enable(int spi_device, uint8_t enable); +void microp_register_ops(struct microp_ops *ops); + +int microp_read_adc(uint8_t *data); +void microp_mobeam_enable(int enable); + +#endif /* _LINUX_ATMEGA_MICROP_H */ diff --git a/arch/arm/mach-msm/include/mach/bcm_bt_lpm.h b/arch/arm/mach-msm/include/mach/bcm_bt_lpm.h new file mode 100644 index 0000000000000..c22429718809a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/bcm_bt_lpm.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_BCM_BT_LPM_H +#define __ASM_ARCH_BCM_BT_LPM_H + +#include + +/* Uart driver must call this every time it beings TX, to ensure + * this driver keeps WAKE asserted during TX. Called with uart + * spinlock held. */ +extern void bcm_bt_lpm_exit_lpm_locked(struct uart_port *uport); + +struct bcm_bt_lpm_platform_data { + unsigned int gpio_wake; /* CPU -> BCM wakeup gpio */ + unsigned int gpio_host_wake; /* BCM -> CPU wakeup gpio */ + + /* Callback to request the uart driver to clock off. + * Called with uart spinlock held. */ + void (*request_clock_off_locked)(struct uart_port *uport); + /* Callback to request the uart driver to clock on. + * Called with uart spinlock held. */ + void (*request_clock_on_locked)(struct uart_port *uport); +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/board-bravo-microp-common.h b/arch/arm/mach-msm/include/mach/board-bravo-microp-common.h new file mode 100644 index 0000000000000..112121521d8fe --- /dev/null +++ b/arch/arm/mach-msm/include/mach/board-bravo-microp-common.h @@ -0,0 +1,166 @@ +/* arch/arm/mach-msm/board-bravo.h + * + * Copyright (C) 2009 HTC Corporation. + * Author: Haley Teng + * Copyright (C) 2010 Kali- + * Copyright (C) 2010 Diogo Ferreira + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +#ifndef _LINUX_BOARD_BRAVO_MICROP_COMMON_H +#define _LINUX_BOARD_BRAVO_MICROP_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MICROP_I2C_NAME "bravo-microp" + +#define MICROP_LSENSOR_ADC_CHAN 6 +#define MICROP_REMOTE_KEY_ADC_CHAN 7 + +#define MICROP_I2C_WCMD_MISC 0x20 +#define MICROP_I2C_WCMD_SPI_EN 0x21 +#define MICROP_I2C_WCMD_AUTO_BL_CTL 0x23 +#define MICROP_I2C_RCMD_SPI_BL_STATUS 0x24 +#define MICROP_I2C_WCMD_BUTTONS_LED_CTRL 0x25 +#define MICROP_I2C_RCMD_VERSION 0x30 +#define MICROP_I2C_WCMD_ADC_TABLE 0x42 +#define MICROP_I2C_WCMD_LED_MODE 0x53 +#define MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME 0x54 +#define MICROP_I2C_RCMD_AMBER_LED_REMAIN_TIME 0x55 +#define MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME 0x57 +#define MICROP_I2C_WCMD_READ_ADC_VALUE_REQ 0x60 +#define MICROP_I2C_RCMD_ADC_VALUE 0x62 +#define MICROP_I2C_WCMD_REMOTEKEY_TABLE 0x63 +#define MICROP_I2C_WCMD_LCM_REGISTER 0x70 +#define MICROP_I2C_WCMD_GSENSOR_REG 0x73 +#define MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ 0x74 +#define MICROP_I2C_RCMD_GSENSOR_REG_DATA 0x75 +#define MICROP_I2C_WCMD_GSENSOR_DATA_REQ 0x76 +#define MICROP_I2C_RCMD_GSENSOR_X_DATA 0x77 +#define MICROP_I2C_RCMD_GSENSOR_Y_DATA 0x78 +#define MICROP_I2C_RCMD_GSENSOR_Z_DATA 0x79 +#define MICROP_I2C_RCMD_GSENSOR_DATA 0x7A +#define MICROP_I2C_WCMD_OJ_REG 0x7B +#define MICROP_I2C_WCMD_OJ_REG_DATA_REQ 0x7C +#define MICROP_I2C_RCMD_OJ_REG_DATA 0x7D +#define MICROP_I2C_WCMD_OJ_POS_DATA_REQ 0x7E +#define MICROP_I2C_RCMD_OJ_POS_DATA 0x7F +#define MICROP_I2C_WCMD_GPI_INT_CTL_EN 0x80 +#define MICROP_I2C_WCMD_GPI_INT_CTL_DIS 0x81 +#define MICROP_I2C_RCMD_GPI_INT_STATUS 0x82 +#define MICROP_I2C_RCMD_GPI_STATUS 0x83 +#define MICROP_I2C_WCMD_GPI_INT_STATUS_CLR 0x84 +#define MICROP_I2C_RCMD_GPI_INT_SETTING 0x85 +#define MICROP_I2C_RCMD_REMOTE_KEYCODE 0x87 +#define MICROP_I2C_WCMD_REMOTE_KEY_DEBN_TIME 0x88 +#define MICROP_I2C_WCMD_REMOTE_PLUG_DEBN_TIME 0x89 +#define MICROP_I2C_WCMD_SIMCARD_DEBN_TIME 0x8A +#define MICROP_I2C_WCMD_GPO_LED_STATUS_EN 0x90 +#define MICROP_I2C_WCMD_GPO_LED_STATUS_DIS 0x91 +#define MICROP_I2C_WCMD_OJ_INT_STATUS 0xA8 + +#define IRQ_OJ (1<<12) +#define IRQ_GSENSOR (1<<10) +#define IRQ_LSENSOR (1<<9) +#define IRQ_REMOTEKEY (1<<7) +#define IRQ_HEADSETIN (1<<2) +#define IRQ_SDCARD (1<<0) + +#define SPI_GSENSOR (1 << 0) +#define SPI_LCM (1 << 1) +#define SPI_OJ (1 << 2) + +#define MICROP_FUNCTION_LSENSOR 1 +#define MICROP_FUNCTION_REMOTEKEY 2 +#define MICROP_FUNCTION_LCD_BL 3 +#define MICROP_FUNCTION_RMK_VALUE 4 +#define MICROP_FUNCTION_INTR 11 +#define MICROP_FUNCTION_GSENSOR 12 +#define MICROP_FUNCTION_LED 13 +#define MICROP_FUNCTION_HPIN 14 +#define MICROP_FUNCTION_RESET_INT 15 +#define MICROP_FUNCTION_SIM_CARD 16 +#define MICROP_FUNCTION_SDCARD 17 +#define MICROP_FUNCTION_OJ 18 +#define MICROP_FUNCTION_P 19 + +#define LS_PWR_ON (1 << 0) +#define ALS_CALIBRATED 0x6DA5 +#define ATAG_ALS 0x5441001b + +/* I2C functions for drivers */ +int microp_i2c_read(uint8_t addr, uint8_t *data, int length); +int microp_i2c_write(uint8_t addr, uint8_t *data, int length); +int microp_read_adc(uint8_t channel, uint16_t *value); +int microp_spi_vote_enable(int spi_device, uint8_t enable); +int microp_write_interrupt(struct i2c_client *client, + uint16_t interrupt, uint8_t enable); +struct i2c_client *get_microp_client(void); + +struct microp_function_config { + const char *name; + uint8_t category; + uint8_t init_value; + uint8_t channel; + uint8_t fade_time; + uint32_t sub_categ; + uint16_t levels[10]; + uint16_t dutys[10]; + uint16_t int_pin; + uint16_t golden_adc; + uint8_t mask_r[3]; + uint8_t mask_w[3]; + uint32_t ls_gpio_on; + int (*ls_power)(int, uint8_t); +}; + +struct microp_i2c_platform_data { + struct microp_function_config *microp_function; + struct platform_device *microp_devices; + int num_devices; + int num_functions; + uint32_t gpio_reset; + uint32_t microp_ls_on; + void *dev_id; + uint8_t microp_mic_status; + uint8_t function_node[20]; + uint32_t cmd_diff; + uint32_t spi_devices; + uint32_t spi_devices_init; +}; + +struct lightsensor_platform_data{ + struct i2c_client *client; + struct microp_function_config *config; + int irq; + int old_intr_cmd; +}; + +struct microp_ops { + int (*init_microp_func)(struct i2c_client *); + int (*als_pwr_enable)(int pwr_device, uint8_t en); + int (*als_intr_enable)(struct i2c_client *, + uint32_t als_func, uint8_t en); + void (*als_level_change)(struct i2c_client *, uint8_t *data); + void (*headset_enable)(int en); + void (*spi_enable)(int en); +}; + +#endif /* _LINUX_BOARD_BRAVO_MICROP_COMMON_H */ diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h index 6abf4a6eadc19..cd8c2d4da9d9d 100644 --- a/arch/arm/mach-msm/include/mach/board.h +++ b/arch/arm/mach-msm/include/mach/board.h @@ -19,32 +19,190 @@ #include #include +#include /* platform device data structures */ - -struct msm_acpu_clock_platform_data -{ +struct msm_acpu_clock_platform_data { uint32_t acpu_switch_time_us; uint32_t max_speed_delta_khz; uint32_t vdd_switch_time_us; + unsigned long mpll_khz; unsigned long power_collapse_khz; unsigned long wait_for_irq_khz; }; +struct msm_camera_io_ext { + uint32_t mdcphy; + uint32_t mdcsz; + uint32_t appphy; + uint32_t appsz; + uint32_t camifpadphy; + uint32_t camifpadsz; + uint32_t csiphy; + uint32_t csisz; + uint32_t csiirq; +}; + +struct msm_camera_device_platform_data { + void (*camera_gpio_on) (void); + void (*camera_gpio_off)(void); + struct msm_camera_io_ext ioext; +}; +enum msm_camera_csi_data_format { + CSI_8BIT, + CSI_10BIT, + CSI_12BIT, +}; +struct msm_camera_csi_params { + enum msm_camera_csi_data_format data_format; + uint8_t lane_cnt; + uint8_t lane_assign; + uint8_t settle_cnt; + uint8_t dpcm_scheme; +}; + +struct msm_camera_legacy_device_platform_data { + int sensor_reset; + int sensor_pwd; + int vcm_pwd; + void (*config_gpio_on) (void); + void (*config_gpio_off)(void); + struct msm_camsensor_device_platform_data *sensor_info; +}; + +#define MSM_CAMERA_FLASH_NONE 0 +#define MSM_CAMERA_FLASH_LED 1 +#define MSM_CAMERA_FLASH_SRC_PMIC (0x00000001<<0) +#define MSM_CAMERA_FLASH_SRC_PWM (0x00000001<<1) + +struct msm_camera_sensor_flash_pmic { + uint32_t low_current; + uint32_t high_current; +}; + +struct msm_camera_sensor_flash_pwm { + uint32_t freq; + uint32_t max_load; + uint32_t low_load; + uint32_t high_load; + uint32_t channel; +}; + +struct msm_camera_sensor_flash_src { + int flash_sr_type; + + union { + struct msm_camera_sensor_flash_pmic pmic_src; + struct msm_camera_sensor_flash_pwm pwm_src; + } _fsrc; +}; + +struct msm_camera_sensor_flash_data { + int flash_type; + struct msm_camera_sensor_flash_src *flash_src; +}; + +struct camera_flash_cfg { + int num_flash_levels; + int (*camera_flash)(int level); + uint16_t low_temp_limit; + uint16_t low_cap_limit; + uint8_t postpone_led_mode; +}; + +enum msm_camera_source{ + MAIN_SOURCE, + SECOND_SOURCE, +}; + +struct msm_camera_sensor_info { + const char *sensor_name; + int sensor_reset; + int sensor_pwd; + int vcm_pwd; + void(*camera_clk_switch)(void); + /*power*/ + char *camera_analog_pwd; + char *camera_io_pwd; + char *camera_vcm_pwd; + char *camera_digital_pwd; + int analog_pwd1_gpio; + int (*camera_power_on)(void); + int (*camera_power_off)(void); + void(*camera_set_source)(enum msm_camera_source); + enum msm_camera_source(*camera_get_source)(void); + int (*camera_main_get_probe)(void); + void (*camera_main_set_probe)(int); + int mclk; + int flash_type; /* for back support */ + uint8_t led_high_enabled; + int need_suspend; + struct msm_camera_device_platform_data *pdata; + struct resource *resource; + uint8_t num_resources; + uint32_t waked_up; + wait_queue_head_t event_wait; + uint32_t kpi_sensor_start; + uint32_t kpi_sensor_end; + struct camera_flash_cfg* flash_cfg; + int csi_if; + struct msm_camera_csi_params csi_params; + int sensor_lc_disable; /* for sensor lens correction support */ + uint8_t (*preview_skip_frame)(void); +}; struct clk; +struct snd_endpoint { + int id; + const char *name; +}; + +struct msm_snd_endpoints { + struct snd_endpoint *endpoints; + unsigned num; +}; + extern struct sys_timer msm_timer; /* common init routines for use by arch/arm/mach-msm/board-*.c */ - void __init msm_add_devices(void); void __init msm_map_common_io(void); void __init msm_init_irq(void); -void __init msm_init_gpio(void); void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks); void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *); int __init msm_add_sdcc(unsigned int controller, struct msm_mmc_platform_data *plat, unsigned int stat_irq, unsigned long stat_irq_flags); +#if defined(CONFIG_USB_FUNCTION_MSM_HSUSB) || defined(CONFIG_USB_MSM_72K) +void msm_hsusb_set_vbus_state(int online); +/* START: add USB connected notify function */ +struct t_usb_status_notifier{ + struct list_head notifier_link; + const char *name; + void (*func)(int online); +}; + int usb_register_notifier(struct t_usb_status_notifier *); + static LIST_HEAD(g_lh_usb_notifier_list); +/* END: add USB connected notify function */ +#else +static inline void msm_hsusb_set_vbus_state(int online) {} +#endif + +char *board_serialno(void); +int __init parse_tag_skuid(const struct tag *tags); +int __init parse_tag_engineerid(const struct tag *tags); +int __init parse_tag_memsize(const struct tag *tags); +int board_mfg_mode(void); +void __init msm_snddev_init(void); +void msm_snddev_poweramp_on(void); +void msm_snddev_poweramp_off(void); +void msm_snddev_hsed_pamp_on(void); +void msm_snddev_hsed_pamp_off(void); +void msm_snddev_tx_route_config(void); +void msm_snddev_tx_route_deconfig(void); + +extern int emmc_partition_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data); + #endif diff --git a/arch/arm/mach-msm/include/mach/board_htc.h b/arch/arm/mach-msm/include/mach/board_htc.h new file mode 100644 index 0000000000000..9647ed1c4b3dc --- /dev/null +++ b/arch/arm/mach-msm/include/mach/board_htc.h @@ -0,0 +1,84 @@ +/* arch/arm/mach-msm/include/mach/BOARD_HTC.h + * Copyright (C) 2007-2009 HTC Corporation. + * Author: Thomas Tsai + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __ASM_ARCH_MSM_BOARD_HTC_H +#define __ASM_ARCH_MSM_BOARD_HTC_H + +#include +#include +#include + +struct msm_pmem_setting{ + resource_size_t pmem_start; + resource_size_t pmem_size; + resource_size_t pmem_adsp_start; + resource_size_t pmem_adsp_size; + resource_size_t pmem_gpu0_start; + resource_size_t pmem_gpu0_size; + resource_size_t pmem_gpu1_start; + resource_size_t pmem_gpu1_size; + resource_size_t pmem_camera_start; + resource_size_t pmem_camera_size; + resource_size_t ram_console_start; + resource_size_t ram_console_size; +}; + +enum { + MSM_SERIAL_UART1 = 0, + MSM_SERIAL_UART2, + MSM_SERIAL_UART3, +#ifdef CONFIG_SERIAL_MSM_HS + MSM_SERIAL_UART1DM, + MSM_SERIAL_UART2DM, +#endif + MSM_SERIAL_NUM, +}; + + +/* common init routines for use by arch/arm/mach-msm/board-*.c */ + +void __init msm_add_usb_devices(void (*phy_reset) (void)); +void __init msm_add_usb_id_pin_function(void (*config_usb_id_gpios)(bool enable)); +void __init msm_add_usb_id_pin_gpio(int usb_id_pin_io); +void __init msm_enable_car_kit_detect(bool enable); +void __init msm_change_usb_id(__u16 vendor_id, __u16 product_id); +void __init msm_add_mem_devices(struct msm_pmem_setting *setting); +void __init msm_init_pmic_vibrator(void); + +struct mmc_platform_data; +int __init msm_add_sdcc_devices(unsigned int controller, struct mmc_platform_data *plat); +int __init msm_add_serial_devices(unsigned uart); + +int __init board_mfg_mode(void); +int __init parse_tag_smi(const struct tag *tags); +int __init parse_tag_hwid(const struct tag * tags); +int __init parse_tag_skuid(const struct tag * tags); +int __init tag_panel_parsing(const struct tag *tags); +int parse_tag_engineerid(const struct tag * tags); + +void notify_usb_connected(int online); + +char *board_serialno(void); + +/* + * Obviously, we need these in all project. + * To export a function to get these is too lousy. + * Each BSP can include board.h to get these. + * + * Jay, 15/May/09' + * */ +extern int panel_type; +extern unsigned engineer_id; +extern int usb_phy_error; + +#endif diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h new file mode 100644 index 0000000000000..a517d29077c09 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/camera.h @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2008-2009 QUALCOMM Incorporated. + */ + +#ifndef __ASM__ARCH_CAMERA_H +#define __ASM__ARCH_CAMERA_H + +#include +#include +#include +#include +#include +#include "linux/types.h" + +#include +#include + +#ifdef CONFIG_MSM_CAMERA_DEBUG +#define CDBG(fmt, args...) printk(KERN_INFO "msm_camera: " fmt, ##args) +#else +#define CDBG(fmt, args...) do { } while (0) +#endif + +#define MSM_CAMERA_MSG 0 +#define MSM_CAMERA_EVT 1 +#define NUM_WB_EXP_NEUTRAL_REGION_LINES 4 +#define NUM_WB_EXP_STAT_OUTPUT_BUFFERS 3 +#define NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS 16 +#define NUM_STAT_OUTPUT_BUFFERS 3 +#define NUM_AF_STAT_OUTPUT_BUFFERS 3 + +enum msm_queue { + MSM_CAM_Q_CTRL, /* control command or control command status */ + MSM_CAM_Q_VFE_EVT, /* adsp event */ + MSM_CAM_Q_VFE_MSG, /* adsp message */ + MSM_CAM_Q_V4L2_REQ, /* v4l2 request */ +}; + +enum vfe_resp_msg { + VFE_EVENT, + VFE_MSG_GENERAL, + VFE_MSG_SNAPSHOT, +#ifndef CONFIG_720P_CAMERA + VFE_MSG_OUTPUT1, + VFE_MSG_OUTPUT2, +#else + VFE_MSG_OUTPUT_P, /* preview (continuous mode ) */ + VFE_MSG_OUTPUT_T, /* thumbnail (snapshot mode )*/ + VFE_MSG_OUTPUT_S, /* main image (snapshot mode )*/ + VFE_MSG_OUTPUT_V, /* video (continuous mode ) */ +#endif + VFE_MSG_STATS_AF, + VFE_MSG_STATS_WE, +}; + +#define VFE31_OUTPUT_MODE_PT (0x1 << 0) +#define VFE31_OUTPUT_MODE_S (0x1 << 1) +#define VFE31_OUTPUT_MODE_V (0x1 << 2) + +struct msm_vfe_phy_info { + uint32_t sbuf_phy; + uint32_t y_phy; + uint32_t cbcr_phy; + uint8_t output_id; /* VFE31_OUTPUT_MODE_PT/S/V */ +}; + +struct msm_vfe_resp { + enum vfe_resp_msg type; + struct msm_vfe_evt_msg evt_msg; + struct msm_vfe_phy_info phy; + void *extdata; + int32_t extlen; +}; + +struct msm_vfe_callback { + void (*vfe_resp)(struct msm_vfe_resp *, + enum msm_queue, void *syncdata, + gfp_t gfp); + void* (*vfe_alloc)(int, void *syncdata, gfp_t gfp); + void (*vfe_free)(void *ptr); +}; + +struct msm_camvfe_fn { + int (*vfe_init)(struct msm_vfe_callback *, struct platform_device *); + int (*vfe_enable)(struct camera_enable_cmd *); + int (*vfe_config)(struct msm_vfe_cfg_cmd *, void *); + int (*vfe_disable)(struct camera_enable_cmd *, + struct platform_device *dev); + void (*vfe_release)(struct platform_device *); +}; + +struct msm_sensor_ctrl { + int (*s_init)(struct msm_camera_sensor_info *); + int (*s_release)(void); + int (*s_config)(void __user *); + int node; +}; + +/* this structure is used in kernel */ +struct msm_queue_cmd { + struct list_head list_config; + struct list_head list_control; + struct list_head list_frame; + struct list_head list_pict; + enum msm_queue type; + void *command; + int on_heap; + struct timespec ts; +}; + +struct msm_device_queue { + struct list_head list; + spinlock_t lock; + wait_queue_head_t wait; + int max; + int len; + const char *name; +}; + +struct msm_sync { + /* These two queues are accessed from a process context only. They contain + * pmem descriptors for the preview frames and the stats coming from the + * camera sensor. + */ + struct hlist_head pmem_frames; + struct hlist_head pmem_stats; + + /* The message queue is used by the control thread to send commands + * to the config thread, and also by the DSP to send messages to the + * config thread. Thus it is the only queue that is accessed from + * both interrupt and process context. + */ + struct msm_device_queue event_q; + + /* This queue contains preview frames. It is accessed by the DSP (in + * in interrupt context, and by the frame thread. + */ + struct msm_device_queue frame_q; + int unblock_poll_frame; + + /* This queue contains snapshot frames. It is accessed by the DSP (in + * interrupt context, and by the control thread. + */ + struct msm_device_queue pict_q; + int get_pic_abort; + + struct msm_camera_sensor_info *sdata; + struct msm_camvfe_fn vfefn; + struct msm_sensor_ctrl sctrl; + struct wake_lock wake_suspend_lock; + struct wake_lock wake_lock; + struct platform_device *pdev; + uint8_t opencnt; + void *cropinfo; + int croplen; + + uint32_t pp_mask; + struct msm_queue_cmd *pp_prev; + struct msm_queue_cmd *pp_snap; + + /* When this flag is set, we send preview-frame notifications to config + * as well as to the frame queue. By default, the flag is cleared. + */ + uint32_t report_preview_to_config; + + const char *apps_id; + + struct mutex lock; + struct list_head list; +}; + +#define MSM_APPS_ID_V4L2 "msm_v4l2" +#define MSM_APPS_ID_PROP "msm_qct" + +struct msm_device { + struct msm_sync *sync; /* most-frequently accessed */ + struct device *device; + struct cdev cdev; + /* opened is meaningful only for the config and frame nodes, + * which may be opened only once. + */ + atomic_t opened; +}; + +struct msm_control_device { + struct msm_device *pmsm; + + /* Used for MSM_CAM_IOCTL_CTRL_CMD_DONE responses */ + uint8_t ctrl_data[50]; + struct msm_ctrl_cmd ctrl; + struct msm_queue_cmd qcmd; + + /* This queue used by the config thread to send responses back to the + * control thread. It is accessed only from a process context. + */ + struct msm_device_queue ctrl_q; +}; + +struct register_address_value_pair { + uint16_t register_address; + uint16_t register_value; +}; + +struct msm_pmem_region { + struct hlist_node list; + unsigned long paddr; +//#ifdef CONFIG_MSM_CAMERA_LEGACY + unsigned long kvaddr; +//#endif + unsigned long len; + struct file *file; + struct msm_pmem_info info; +}; + +struct axidata { + uint32_t bufnum1; + uint32_t bufnum2; +//#ifdef CONFIG_720P_CAMERA + uint32_t bufnum3; +//#endif + struct msm_pmem_region *region; +}; + +#ifdef CONFIG_MSM_CAMERA_FLASH + int msm_camera_flash_set_led_state( + struct msm_camera_sensor_flash_data *fdata, + unsigned led_state); +#else + static inline int msm_camera_flash_set_led_state( + struct msm_camera_sensor_flash_data *fdata, + unsigned led_state) + { + return -ENOTSUPP; + } +#endif + +#ifdef CONFIG_MSM_CAMERA_V4L2 +/* Below functions are added for V4L2 kernel APIs */ +struct msm_v4l2_driver { + struct msm_sync *sync; + int (*open)(struct msm_sync *, const char *apps_id); + int (*release)(struct msm_sync *); + int (*ctrl)(struct msm_sync *, struct msm_ctrl_cmd *); + int (*reg_pmem)(struct msm_sync *, struct msm_pmem_info *); + int (*get_frame) (struct msm_sync *, struct msm_frame *); + int (*put_frame) (struct msm_sync *, struct msm_frame *); + int (*get_pict) (struct msm_sync *, struct msm_ctrl_cmd *); + unsigned int (*drv_poll) (struct msm_sync *, struct file *, + struct poll_table_struct *); +}; + +int msm_v4l2_register(struct msm_v4l2_driver *); +int msm_v4l2_unregister(struct msm_v4l2_driver *); +#endif + +void msm_camvfe_init(void); +int msm_camvfe_check(void *); +void msm_camvfe_fn_init(struct msm_camvfe_fn *, void *); +int msm_camera_drv_start(struct platform_device *dev, + int (*sensor_probe)(struct msm_camera_sensor_info *, + struct msm_sensor_ctrl *)); + +enum msm_camio_clk_type { + CAMIO_VFE_MDC_CLK, + CAMIO_MDC_CLK, + CAMIO_VFE_CLK, + CAMIO_VFE_AXI_CLK, +//#ifdef CONFIG_MSM_CAMERA_7X30 + CAMIO_VFE_CLK_FOR_MIPI_2_LANE, + CAMIO_VFE_CAMIF_CLK, + CAMIO_VFE_PBDG_CLK, + CAMIO_CAM_MCLK_CLK, + CAMIO_CAMIF_PAD_PBDG_CLK, + CAMIO_CSI_CLK, + CAMIO_CSI_VFE_CLK, + CAMIO_CSI_PCLK, +//#endif + CAMIO_MAX_CLK +}; + +enum msm_camio_clk_src_type { + MSM_CAMIO_CLK_SRC_INTERNAL, + MSM_CAMIO_CLK_SRC_EXTERNAL, + MSM_CAMIO_CLK_SRC_MAX +}; + +enum msm_s_test_mode { + S_TEST_OFF, + S_TEST_1, + S_TEST_2, + S_TEST_3 +}; + +enum msm_s_resolution { + S_QTR_SIZE, + S_FULL_SIZE, + S_INVALID_SIZE +}; + +enum msm_s_reg_update { + /* Sensor egisters that need to be updated during initialization */ + S_REG_INIT, + /* Sensor egisters that needs periodic I2C writes */ + S_UPDATE_PERIODIC, + /* All the sensor Registers will be updated */ + S_UPDATE_ALL, + /* Not valid update */ + S_UPDATE_INVALID +}; + +enum msm_s_setting { + S_RES_PREVIEW, + S_RES_CAPTURE +}; + +int msm_camio_enable(struct platform_device *dev); + +int msm_camio_clk_enable(enum msm_camio_clk_type clk); +int msm_camio_clk_disable(enum msm_camio_clk_type clk); +int msm_camio_clk_config(uint32_t freq); +void msm_camio_clk_rate_set(int rate); +void msm_camio_clk_axi_rate_set(int rate); + +void msm_camio_camif_pad_reg_reset(void); +void msm_camio_camif_pad_reg_reset_2(void); + +void msm_camio_vfe_blk_reset(void); + +void msm_camio_clk_sel(enum msm_camio_clk_src_type); +void msm_camio_disable(struct platform_device *); +int msm_camio_probe_on(struct platform_device *); +int msm_camio_probe_off(struct platform_device *); + +#ifdef CONFIG_MSM_CAMERA_7X30 +void msm_camio_clk_rate_set_2(struct clk *clk, int rate); +void msm_disable_io_gpio_clk(struct platform_device *); +int msm_camio_csi_config(struct msm_camera_csi_params *csi_params); +int request_axi_qos(uint32_t freq); +int update_axi_qos(uint32_t freq); +void release_axi_qos(void); +int msm_camio_read_camif_status(void); + +void msm_io_w(u32 data, void __iomem *addr); +void msm_io_w_mb(u32 data, void __iomem *addr); +u32 msm_io_r(void __iomem *addr); +u32 msm_io_r_mb(void __iomem *addr); +void msm_io_dump(void __iomem *addr, int size); +void msm_io_memcpy(void __iomem *dest_addr, void __iomem *src_addr, u32 len); +#endif + +#endif diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h index c05ca40478c78..998f1ac2a1764 100644 --- a/arch/arm/mach-msm/include/mach/clk.h +++ b/arch/arm/mach-msm/include/mach/clk.h @@ -54,4 +54,5 @@ int clk_reset(struct clk *clk, enum clk_reset_action action); /* Set clock-specific configuration parameters */ int clk_set_flags(struct clk *clk, unsigned long flags); +unsigned long acpuclk_get_max_axi_rate(void); #endif diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h index 05583f5695244..f1ccd428e6056 100644 --- a/arch/arm/mach-msm/include/mach/dma.h +++ b/arch/arm/mach-msm/include/mach/dma.h @@ -1,6 +1,8 @@ /* linux/include/asm-arm/arch-msm/dma.h * * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -34,7 +36,9 @@ struct msm_dmov_cmd { #ifndef CONFIG_ARCH_MSM8X60 void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd); +void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd); void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful); +void msm_dmov_flush(unsigned int id); int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr); #else static inline @@ -77,6 +81,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr) { return -EIO; } #define DMOV_FLUSH3(ch) DMOV_SD_AARM(0x140, ch) #define DMOV_FLUSH4(ch) DMOV_SD_AARM(0x180, ch) #define DMOV_FLUSH5(ch) DMOV_SD_AARM(0x1C0, ch) +#define DMOV_FLUSH_TYPE (1 << 31) #define DMOV_STATUS(ch) DMOV_SD_AARM(0x200, ch) #define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29)) @@ -108,6 +113,19 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr) { return -EIO; } #define DMOV_USB_CHAN 11 +#define DMOV_HSUART1_TX_CHAN 4 +#define DMOV_HSUART1_TX_CRCI 8 + +#define DMOV_HSUART1_RX_CHAN 9 +#define DMOV_HSUART1_RX_CRCI 9 + +#define DMOV_HSUART2_TX_CHAN 4 +#define DMOV_HSUART2_TX_CRCI 14 + +#define DMOV_HSUART2_RX_CHAN 11 +#define DMOV_HSUART2_RX_CRCI 15 + + /* no client rate control ifc (eg, ram) */ #define DMOV_NONE_CRCI 0 diff --git a/arch/arm/mach-msm/include/mach/drv_callback.h b/arch/arm/mach-msm/include/mach/drv_callback.h new file mode 100644 index 0000000000000..56ed3ccfe33b6 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/drv_callback.h @@ -0,0 +1,28 @@ +/* linux/arch/arm/mach-msm/drv_callback.h + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +struct cnf_driver { + const char *name; + int (*func) (void *); + + /* configurable driver list lock */ + rwlock_t cnfdrv_list_lock; + struct list_head next_drv; +}; + +int cnf_driver_register(struct cnf_driver *); +int cnf_driver_event(const char *, void *argu); diff --git a/arch/arm/mach-msm/include/mach/fiq.h b/arch/arm/mach-msm/include/mach/fiq.h new file mode 100644 index 0000000000000..64a0ea90c25bd --- /dev/null +++ b/arch/arm/mach-msm/include/mach/fiq.h @@ -0,0 +1,34 @@ +/* linux/include/asm-arm/arch-msm/irqs.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_FIQ_H +#define __ASM_ARCH_MSM_FIQ_H + +/* cause an interrupt to be an FIQ instead of a regular IRQ */ +void msm_fiq_select(int number); +void msm_fiq_unselect(int number); + +/* enable/disable an interrupt that is an FIQ (not safe from FIQ context) */ +void msm_fiq_enable(int number); +void msm_fiq_disable(int number); + +/* install an FIQ handler */ +int msm_fiq_set_handler(void (*func)(void *data, void *regs, void *svc_sp), + void *data); + +/* cause an edge triggered interrupt to fire (safe from FIQ context */ +void msm_trigger_irq(int number); + +#endif diff --git a/arch/arm/mach-msm/include/mach/gpio.h b/arch/arm/mach-msm/include/mach/gpio.h index 36ad50d3bfaa8..0971190a7826f 100644 --- a/arch/arm/mach-msm/include/mach/gpio.h +++ b/arch/arm/mach-msm/include/mach/gpio.h @@ -17,6 +17,63 @@ #define __ASM_ARCH_MSM_GPIO_H #include +#include + +#define FIRST_BOARD_GPIO NR_GPIO_IRQS + +/* GPIO TLMM (Top Level Multiplexing) Definitions */ + +/* GPIO TLMM: Function -- GPIO specific */ + +/* GPIO TLMM: Direction */ +enum { + GPIO_CFG_INPUT, + GPIO_CFG_OUTPUT, +}; + +/* GPIO TLMM: Pullup/Pulldown */ +enum { + GPIO_CFG_NO_PULL, + GPIO_CFG_PULL_DOWN, + GPIO_CFG_KEEPER, + GPIO_CFG_PULL_UP, +}; + +/* GPIO TLMM: Drive Strength */ +enum { + GPIO_CFG_2MA, + GPIO_CFG_4MA, + GPIO_CFG_6MA, + GPIO_CFG_8MA, + GPIO_CFG_10MA, + GPIO_CFG_12MA, + GPIO_CFG_14MA, + GPIO_CFG_16MA, +}; + +enum { + GPIO_CFG_ENABLE, + GPIO_CFG_DISABLE, +}; + +extern void config_gpio_table(uint32_t *table, int len); +extern int gpio_configure(unsigned int gpio, unsigned long flags); + +#define GPIO_CFG(gpio, func, dir, pull, drvstr) \ + ((((gpio) & 0x3FF) << 4) | \ + ((func) & 0xf) | \ + (((dir) & 0x1) << 14) | \ + (((pull) & 0x3) << 15) | \ + (((drvstr) & 0xF) << 17)) + +/** + * extract GPIO pin from bit-field used for gpio_tlmm_config + */ +#define GPIO_PIN(gpio_cfg) (((gpio_cfg) >> 4) & 0x3ff) +#define GPIO_FUNC(gpio_cfg) (((gpio_cfg) >> 0) & 0xf) +#define GPIO_DIR(gpio_cfg) (((gpio_cfg) >> 14) & 0x1) +#define GPIO_PULL(gpio_cfg) (((gpio_cfg) >> 15) & 0x3) +#define GPIO_DRVSTR(gpio_cfg) (((gpio_cfg) >> 17) & 0xf) #define gpio_get_value __gpio_get_value #define gpio_set_value __gpio_set_value diff --git a/arch/arm/mach-msm/include/mach/htc_35mm_jack.h b/arch/arm/mach-msm/include/mach/htc_35mm_jack.h new file mode 100644 index 0000000000000..5ce1e2a1e4780 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_35mm_jack.h @@ -0,0 +1,31 @@ +/* arch/arm/mach-msm/include/mach/htc_35mm_jack.h + * + * Copyright (C) 2009 HTC, Inc. + * Author: Arec Kao + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HTC_35MM_REMOTE_H +#define HTC_35MM_REMOTE_H + +/* Driver interfaces */ +int htc_35mm_jack_plug_event(int insert, int *hpin_stable); +int htc_35mm_key_event(int key, int *hpin_stable); + +/* Platform Specific Callbacks */ +struct h35mm_platform_data { + int (*plug_event_enable)(void); + int (*headset_has_mic)(void); + int (*key_event_enable)(void); + int (*key_event_disable)(void); +}; +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_acoustic_qsd.h b/arch/arm/mach-msm/include/mach/htc_acoustic_qsd.h new file mode 100644 index 0000000000000..d42c6e009ce57 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_acoustic_qsd.h @@ -0,0 +1,31 @@ +/* include/asm/mach-msm/htc_acoustic_qsd.h + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ARCH_ARM_MACH_MSM_HTC_ACOUSTIC_QSD_H_ +#define _ARCH_ARM_MACH_MSM_HTC_ACOUSTIC_QSD_H_ + +struct qsd_acoustic_ops { + void (*enable_mic_bias)(int en); +}; + +void acoustic_register_ops(struct qsd_acoustic_ops *ops); + +int turn_mic_bias_on(int on); +int force_headset_speaker_on(int enable); +int enable_aux_loopback(uint32_t enable); +int set_aux_gain(int level); +int enable_mos_test(int enable); + +#endif + diff --git a/arch/arm/mach-msm/include/mach/htc_battery.h b/arch/arm/mach-msm/include/mach/htc_battery.h new file mode 100644 index 0000000000000..fa9f6fd37bc70 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_battery.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2007 HTC Incorporated + * Author: Jay Tu (jay_tu@htc.com) + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _HTC_BATTERY_H_ +#define _HTC_BATTERY_H_ +#include +#include + +#define BATT_EVENT_SUSPEND 0x01 + +#define CHECK_CHG 0X64 +#define SET_ICL500 0X65 +#define SET_ICL100 0X66 +#define CHECK_INT2 0X67 +#define OVERTEMP_VREG_4060 0XC8 +#define NORMALTEMP_VREG_4200 0XC9 +#define CHECK_INT1 0XCA +#define CHECK_CONTROL 0xCB +/* information about the system we're running on */ +extern unsigned int system_rev; + +enum batt_ctl_t { + DISABLE = 0, + ENABLE_SLOW_CHG, + ENABLE_FAST_CHG +}; + +/* This order is the same as htc_power_supplies[] + * And it's also the same as htc_cable_status_update() + */ +enum charger_type_t { + CHARGER_UNKNOWN = -1, + CHARGER_BATTERY = 0, + CHARGER_USB, + CHARGER_AC +}; + +enum { + GUAGE_NONE, + GUAGE_MODEM, + GUAGE_DS2784, + GUAGE_DS2746, +}; + +enum { + LINEAR_CHARGER, + SWITCH_CHARGER, +}; + +struct battery_info_reply { + u32 batt_id; /* Battery ID from ADC */ + u32 batt_vol; /* Battery voltage from ADC */ + s32 batt_temp; /* Battery Temperature (C) from formula and ADC */ + s32 batt_current; /* Battery current from ADC */ + u32 level; /* formula */ + u32 charging_source; /* 0: no cable, 1:usb, 2:AC */ + u32 charging_enabled; /* 0: Disable, 1: Enable */ + u32 full_bat; /* Full capacity of battery (mAh) */ + u32 full_level; /* Full Level */ + u32 over_vchg; /* 0:normal, 1:over voltage charger */ + s32 eval_current; /* System loading current from ADC */ +}; + +struct htc_battery_tps65200_int { + int chg_int; + int tps65200_reg; + struct delayed_work int_work; +}; + +struct htc_battery_platform_data { + int (*func_show_batt_attr)(struct device_attribute *attr, + char *buf); + int gpio_mbat_in; + int gpio_usb_id; + int gpio_mchg_en_n; + int gpio_iset; + int guage_driver; + int m2a_cable_detect; + int charger; + struct htc_battery_tps65200_int int_data; +}; + +#if CONFIG_HTC_BATTCHG +extern int register_notifier_cable_status(struct notifier_block *nb); +extern int unregister_notifier_cable_status(struct notifier_block *nb); +#else +static int register_notifier_cable_status(struct notifier_block *nb) { return 0; } +static int unregister_notifier_cable_status(struct notifier_block *nb) { return 0; } +#endif + +#ifdef CONFIG_BATTERY_DS2784 +extern int battery_charging_ctrl(enum batt_ctl_t ctl); +#endif +extern int get_cable_status(void); +#ifdef CONFIG_HTC_BATTCHG +extern int batt_register_client(struct notifier_block *nb); +extern int batt_unregister_client(struct notifier_block *nb); +extern int batt_notifier_call_chain(unsigned long val, void *v); +#else +static int batt_register_client(struct notifier_block *nb) +{ + return 0; +} + +static int batt_unregister_client(struct notifier_block *nb) +{ + return 0; +} + +static int batt_notifier_call_chain(unsigned long val, void *v) +{ + return 0; +} +#endif + +extern unsigned int batt_get_status(enum power_supply_property psp); +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_headset.h b/arch/arm/mach-msm/include/mach/htc_headset.h new file mode 100644 index 0000000000000..5bea7a2bed3d7 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_headset.h @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2008 HTC, Inc. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_HTC_HEADSET_H +#define __ASM_ARCH_HTC_HEADSET_H + +struct h2w_platform_data { + char *power_name; + int cable_in1; + int cable_in2; + int h2w_clk; + int h2w_data; + int debug_uart; + int headset_mic_35mm; + void (*config_cpld)(int); + void (*init_cpld)(void); + void (*set_dat)(int); + void (*set_clk)(int); + void (*set_dat_dir)(int); + void (*set_clk_dir)(int); + int (*get_dat)(void); + int (*get_clk)(void); +}; + +#define BIT_HEADSET (1 << 0) +#define BIT_HEADSET_NO_MIC (1 << 1) +#define BIT_TTY (1 << 2) +#define BIT_FM_HEADSET (1 << 3) +#define BIT_FM_SPEAKER (1 << 4) +#define BIT_TTY_VCO (1 << 5) +#define BIT_TTY_HCO (1 << 6) +#define BIT_35MM_HEADSET (1 << 7) + +enum { + H2W_NO_DEVICE = 0, + H2W_HTC_HEADSET = 1, + NORMAL_HEARPHONE= 2, + H2W_DEVICE = 3, + H2W_USB_CRADLE = 4, + H2W_UART_DEBUG = 5, +}; + +enum { + H2W_GPIO = 0, + H2W_UART1 = 1, + H2W_UART3 = 2, + H2W_BT = 3 +}; + +#define RESEND_DELAY (3) /* ms */ +#define MAX_ACK_RESEND_TIMES (6) /* follow spec */ +#define MAX_HOST_RESEND_TIMES (3) /* follow spec */ +#define MAX_HYGEIA_RESEND_TIMES (5) + +#define H2W_ASCR_DEVICE_INI (0x01) +#define H2W_ASCR_ACT_EN (0x02) +#define H2W_ASCR_PHONE_IN (0x04) +#define H2W_ASCR_RESET (0x08) +#define H2W_ASCR_AUDIO_IN (0x10) + +#define H2W_LED_OFF (0x0) +#define H2W_LED_BKL (0x1) +#define H2W_LED_MTL (0x2) + +typedef enum { + /* === system group 0x0000~0x00FF === */ + /* (R) Accessory type register */ + H2W_SYSTEM = 0x0000, + /* (R) Maximum group address */ + H2W_MAX_GP_ADD = 0x0001, + /* (R/W) Accessory system control register0 */ + H2W_ASCR0 = 0x0002, + + /* === key group 0x0100~0x01FF === */ + /* (R) Key group maximum sub address */ + H2W_KEY_MAXADD = 0x0100, + /* (R) ASCII key press down flag */ + H2W_ASCII_DOWN = 0x0101, + /* (R) ASCII key release up flag */ + H2W_ASCII_UP = 0x0102, + /* (R) Function key status flag */ + H2W_FNKEY_UPDOWN = 0x0103, + /* (R/W) Key device status */ + H2W_KD_STATUS = 0x0104, + + /* === led group 0x0200~0x02FF === */ + /* (R) LED group maximum sub address */ + H2W_LED_MAXADD = 0x0200, + /* (R/W) LED control register0 */ + H2W_LEDCT0 = 0x0201, + + /* === crdl group 0x0300~0x03FF === */ + /* (R) Cardle group maximum sub address */ + H2W_CRDL_MAXADD = 0x0300, + /* (R/W) Cardle group function control register0 */ + H2W_CRDLCT0 = 0x0301, + + /* === car kit group 0x0400~0x04FF === */ + H2W_CARKIT_MAXADD = 0x0400, + + /* === usb host group 0x0500~0x05FF === */ + H2W_USBHOST_MAXADD = 0x0500, + + /* === medical group 0x0600~0x06FF === */ + H2W_MED_MAXADD = 0x0600, + H2W_MED_CONTROL = 0x0601, + H2W_MED_IN_DATA = 0x0602, +} H2W_ADDR; + + +typedef struct H2W_INFO { + /* system group */ + unsigned char CLK_SP; + int SLEEP_PR; + unsigned char HW_REV; + int AUDIO_DEVICE; + unsigned char ACC_CLASS; + unsigned char MAX_GP_ADD; + + /* key group */ + int KEY_MAXADD; + int ASCII_DOWN; + int ASCII_UP; + int FNKEY_UPDOWN; + int KD_STATUS; + + /* led group */ + int LED_MAXADD; + int LEDCT0; + + /* medical group */ + int MED_MAXADD; + unsigned char AP_ID; + unsigned char AP_EN; + unsigned char DATA_EN; +} H2W_INFO; + +typedef enum { + H2W_500KHz = 1, + H2W_250KHz = 2, + H2W_166KHz = 3, + H2W_125KHz = 4, + H2W_100KHz = 5, + H2W_83KHz = 6, + H2W_71KHz = 7, + H2W_62KHz = 8, + H2W_55KHz = 9, + H2W_50KHz = 10, +} H2W_SPEED; + +typedef enum { + H2W_KEY_INVALID = -1, + H2W_KEY_PLAY = 0, + H2W_KEY_FORWARD = 1, + H2W_KEY_BACKWARD = 2, + H2W_KEY_VOLUP = 3, + H2W_KEY_VOLDOWN = 4, + H2W_KEY_PICKUP = 5, + H2W_KEY_HANGUP = 6, + H2W_KEY_MUTE = 7, + H2W_KEY_HOLD = 8, + H2W_NUM_KEYFUNC = 9, +} KEYFUNC; + +extern int turn_mic_bias_on(int on); + +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_headset_gpio.h b/arch/arm/mach-msm/include/mach/htc_headset_gpio.h new file mode 100644 index 0000000000000..f18ba47a67c5a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_headset_gpio.h @@ -0,0 +1,29 @@ +/* + * + * /arch/arm/mach-msm/include/mach/htc_headset_gpio.h + * + * HTC GPIO headset detection driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HTC_HEADSET_GPIO_H +#define HTC_HEADSET_GPIO_H + +struct htc_headset_gpio_platform_data { + unsigned int hpin_gpio; + unsigned int key_enable_gpio; + unsigned int mic_select_gpio; +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_headset_mgr.h b/arch/arm/mach-msm/include/mach/htc_headset_mgr.h new file mode 100644 index 0000000000000..c01f847bcd633 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_headset_mgr.h @@ -0,0 +1,394 @@ +/* + * + * /arch/arm/mach-msm/include/mach/htc_headset_mgr.h + * + * HTC headset manager driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HTC_HEADSET_MGR_H +#define HTC_HEADSET_MGR_H + +#include + +#include +#include +#include + +#define SYS_MSG(fmt, arg...) \ + printk(KERN_INFO "[" DRIVER_NAME "] (%s) " fmt "\n", __func__, ## arg) +#if 0 +#define DBG_MSG(fmt, arg...) \ + printk(KERN_INFO "##### [" DRIVER_NAME "] (%s) " fmt "\n", \ + __func__, ## arg) +#else +#define DBG_MSG(fmt, arg...) {} +#endif + +#define DEVICE_ACCESSORY_ATTR(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = \ + __ATTR(flag, _mode, _show, _store) + +#define DRIVER_HS_MGR_RPC_SERVER (1 << 0) + +#define HS_DEF_MIC_ADC_10_BIT 200 +#define HS_DEF_MIC_ADC_16_BIT 14894 /* (0.5 / 2.2) * (2 ^ 16) */ + +#define HS_DELAY_ZERO 0 +#define HS_DELAY_MIC_BIAS 200 +#define HS_DELAY_MIC_DETECT 500 +#define HS_DELAY_INSERT 500 +#define HS_DELAY_REMOVE 200 +#define HS_DELAY_BUTTON 500 + +#define HS_JIFFIES_ZERO msecs_to_jiffies(HS_DELAY_ZERO) +#define HS_JIFFIES_MIC_BIAS msecs_to_jiffies(HS_DELAY_MIC_BIAS) +#define HS_JIFFIES_MIC_DETECT msecs_to_jiffies(HS_DELAY_MIC_DETECT) +#define HS_JIFFIES_INSERT msecs_to_jiffies(HS_DELAY_INSERT) +#define HS_JIFFIES_REMOVE msecs_to_jiffies(HS_DELAY_REMOVE) +#define HS_JIFFIES_BUTTON msecs_to_jiffies(HS_DELAY_BUTTON) + +/* Definitions for Headset RPC Server */ +#define HS_RPC_SERVER_PROG 0x30100004 +#define HS_RPC_SERVER_VERS 0x00000000 +#define HS_RPC_SERVER_PROC_NULL 0 +#define HS_RPC_SERVER_PROC_KEY 1 + +/* Definitions for Headset RPC Client */ +#define HS_RPC_CLIENT_PROG 0x30100005 +#define HS_RPC_CLIENT_VERS 0x00000000 +#define HS_RPC_CLIENT_PROC_NULL 0 +#define HS_RPC_CLIENT_PROC_ADC 1 + +#define HS_MGR_KEYCODE_END KEY_END /* 107 */ +#define HS_MGR_KEYCODE_MUTE KEY_MUTE /* 113 */ +#define HS_MGR_KEYCODE_VOLDOWN KEY_VOLUMEDOWN /* 114 */ +#define HS_MGR_KEYCODE_VOLUP KEY_VOLUMEUP /* 115 */ +#define HS_MGR_KEYCODE_FORWARD KEY_NEXTSONG /* 163 */ +#define HS_MGR_KEYCODE_PLAY KEY_PLAYPAUSE /* 164 */ +#define HS_MGR_KEYCODE_BACKWARD KEY_PREVIOUSSONG /* 165 */ +#define HS_MGR_KEYCODE_MEDIA KEY_MEDIA /* 226 */ +#define HS_MGR_KEYCODE_SEND KEY_SEND /* 231 */ + +#define HEADSET_NO_MIC 0 +#define HEADSET_MIC 1 +#define HEADSET_METRICO 2 + +#define HTC_35MM_UNPLUG 0 +#define HTC_35MM_NO_MIC 1 +#define HTC_35MM_MIC 2 + +enum { + HEADSET_REG_REMOTE_ADC, + HEADSET_REG_RPC_KEY, + HEADSET_REG_MIC_STATUS, + HEADSET_REG_MIC_BIAS, + HEADSET_REG_MIC_SELECT, + HEADSET_REG_KEY_INT_ENABLE, + HEADSET_REG_KEY_ENABLE, +}; + +enum { + HS_MGR_KEY_INVALID = -1, + HS_MGR_KEY_NONE = 0, + HS_MGR_KEY_PLAY = 1, + HS_MGR_KEY_BACKWARD = 2, + HS_MGR_KEY_FORWARD = 3, +}; + +struct hs_rpc_server_args_key { + uint32_t adc; +}; + +struct hs_rpc_client_req_adc { + struct rpc_request_hdr hdr; +}; + +struct hs_rpc_client_rep_adc { + struct rpc_reply_hdr hdr; + uint32_t adc; +}; + +struct headset_notifier { + int id; + void *func; +}; + +struct hs_notifier_func { + int (*remote_adc)(int *); + void (*rpc_key)(int); + int (*mic_status)(void); + int (*mic_bias_enable)(int); + void (*mic_select)(int); + int (*key_int_enable)(int); + void (*key_enable)(int); +}; + +struct htc_headset_mgr_platform_data { + unsigned int driver_flag; + + int cable_in1; + int cable_in2; + int h2w_clk; + int h2w_data; + int debug_uart; + int headset_mic_35mm; + + void (*h2w_power)(int); + void (*config)(int); + void (*set_dat)(int); + void (*set_clk)(int); + void (*set_dat_dir)(int); + void (*set_clk_dir)(int); + int (*get_dat)(void); + int (*get_clk)(void); +}; + +#define BIT_HEADSET (1 << 0) +#define BIT_HEADSET_NO_MIC (1 << 1) +#define BIT_TTY_FULL (1 << 2) +#define BIT_FM_HEADSET (1 << 3) +#define BIT_FM_SPEAKER (1 << 4) +#define BIT_TTY_VCO (1 << 5) +#define BIT_TTY_HCO (1 << 6) +#define BIT_35MM_HEADSET (1 << 7) +#define BIT_TV_OUT (1 << 8) +#define BIT_USB_CRADLE (1 << 9) +#define BIT_TV_OUT_AUDIO (1 << 10) +#define BIT_HDMI_CABLE (1 << 11) +#define BIT_HDMI_AUDIO (1 << 12) +#define BIT_USB_HEADSET (1 << 13) + +enum { + STATUS_DISCONNECTED = 0, + STATUS_CONNECTED_ENABLED = 1, + STATUS_CONNECTED_DISABLED = 2, +}; + +enum { + H2W_GPIO = 0, + H2W_UART1 = 1, + H2W_UART3 = 2, + H2W_BT = 3 +}; + +enum { + NO_DEVICE = 0, + HTC_HEADSET = 1, + NORMAL_HEARPHONE = 2, + H2W_DEVICE = 3, + USB_CRADLE = 4, + UART_DEBUG = 5, + H2W_TVOUT = 6, + USB_HEADSET = 7, +}; + +#define RESEND_DELAY (3) /* ms */ +#define MAX_ACK_RESEND_TIMES (6) /* follow spec */ +#define MAX_HOST_RESEND_TIMES (3) /* follow spec */ +#define MAX_HYGEIA_RESEND_TIMES (5) + +#define H2W_ASCR_DEVICE_INI (0x01) +#define H2W_ASCR_ACT_EN (0x02) +#define H2W_ASCR_PHONE_IN (0x04) +#define H2W_ASCR_RESET (0x08) +#define H2W_ASCR_AUDIO_IN (0x10) + +#define H2W_LED_OFF (0x0) +#define H2W_LED_BKL (0x1) +#define H2W_LED_MTL (0x2) + +#define H2W_PhoneIn (0x01) +#define H2W_MuteLed (0x02) + +typedef enum { + /* === system group 0x0000~0x00FF === */ + /* (R) Accessory type register */ + H2W_SYSTEM = 0x0000, + /* (R) Maximum group address */ + H2W_MAX_GP_ADD = 0x0001, + /* (R/W) Accessory system control register0 */ + H2W_ASCR0 = 0x0002, + + /* === key group 0x0100~0x01FF === */ + /* (R) Key group maximum sub address */ + H2W_KEY_MAXADD = 0x0100, + /* (R) ASCII key press down flag */ + H2W_ASCII_DOWN = 0x0101, + /* (R) ASCII key release up flag */ + H2W_ASCII_UP = 0x0102, + /* (R) Function key status flag */ + H2W_FNKEY_UPDOWN = 0x0103, + /* (R/W) Key device status */ + H2W_KD_STATUS = 0x0104, + + /* === led group 0x0200~0x02FF === */ + /* (R) LED group maximum sub address */ + H2W_LED_MAXADD = 0x0200, + /* (R/W) LED control register0 */ + H2W_LEDCT0 = 0x0201, + + /* === crdl group 0x0300~0x03FF === */ + /* (R) Cardle group maximum sub address */ + H2W_CRDL_MAXADD = 0x0300, + /* (R/W) Cardle group function control register0 */ + H2W_CRDLCT0 = 0x0301, + + /* === car kit group 0x0400~0x04FF === */ + H2W_CARKIT_MAXADD = 0x0400, + + /* === usb host group 0x0500~0x05FF === */ + H2W_USBHOST_MAXADD = 0x0500, + + /* === medical group 0x0600~0x06FF === */ + H2W_MED_MAXADD = 0x0600, + H2W_MED_CONTROL = 0x0601, + H2W_MED_IN_DATA = 0x0602, +} H2W_ADDR; + +typedef struct H2W_INFO { + /* system group */ + unsigned char CLK_SP; + int SLEEP_PR; + unsigned char HW_REV; + int AUDIO_DEVICE; + unsigned char ACC_CLASS; + unsigned char MAX_GP_ADD; + + /* key group */ + int KEY_MAXADD; + int ASCII_DOWN; + int ASCII_UP; + int FNKEY_UPDOWN; + int KD_STATUS; + + /* led group */ + int LED_MAXADD; + int LEDCT0; + + /* medical group */ + int MED_MAXADD; + unsigned char AP_ID; + unsigned char AP_EN; + unsigned char DATA_EN; +} H2W_INFO; + +typedef enum { + H2W_500KHz = 1, + H2W_250KHz = 2, + H2W_166KHz = 3, + H2W_125KHz = 4, + H2W_100KHz = 5, + H2W_83KHz = 6, + H2W_71KHz = 7, + H2W_62KHz = 8, + H2W_55KHz = 9, + H2W_50KHz = 10, +} H2W_SPEED; + +struct h2w_info { + unsigned int driver_flag; + + unsigned long hpin_jiffies; + + struct class *htc_accessory_class; + struct device *tty_dev; + struct device *fm_dev; + struct device *mic_dev; + struct device *mute_dev; + struct device *phonein_dev; + struct mutex mutex_lock; + struct mutex mutex_rc_lock; + + struct switch_dev sdev; + struct input_dev *input; + unsigned long insert_jiffies; + + int ignore_btn; + atomic_t btn_state; + + int tty_enable_flag; + int fm_flag; + int mic_switch_flag; + int rc_flag; + + unsigned int irq; + unsigned int irq_btn; + unsigned int irq_btn_35mm; + + int cable_in1; + int cable_in2; + int h2w_clk; + int h2w_data; + int debug_uart; + int headset_mic_35mm; + + /* The variables were used by 35mm headset*/ + int key_level_flag; + int ext_35mm_status; + int h2w_35mm_status; + int is_ext_insert; + int mic_bias_state; + int metrico_status; /* For HW Metrico lab test */ + + /* The variables are used by USB headset */ + int usb_dev_type; + int usb_dev_status; + + void (*insert_11pin_35mm)(int *); + void (*remove_11pin_35mm)(void); + + void (*configure) (int); + int (*get_path) (void); + void (*h2w_power)(int); + void (*set_dat)(int); + void (*set_clk)(int); + void (*set_dat_dir)(int); + void (*set_clk_dir)(int); + int (*get_dat)(void); + int (*get_clk)(void); + + H2W_INFO h2w_info; + H2W_SPEED speed; + + struct wake_lock headset_wake_lock; +}; + +int headset_notifier_register(struct headset_notifier *notifier); + +void insert_headset(int); +void remove_headset(void); + +void headset_button_event(int is_press, int type); +void button_pressed(int type); +void button_released(int type); + +void button_h2w_do_work(struct work_struct *w); +void detect_h2w_do_work(struct work_struct *w); + +void headset_ext_detect(int type); + +extern int switch_send_event(unsigned int bit, int on); + +/* notify the 3.5mm driver of events */ +int htc_35mm_remote_notify_ext_headset_irq(int insert); +int htc_35mm_remote_notify_insert_ext_headset(int insert); +int htc_35mm_remote_notify_microp_ready(void); +int htc_35mm_remote_notify_button_status(int key_level); +int htc_35mm_remote_notify_irq_enable(int enable); + +void hs_notify_hpin_irq(void); +int hs_hpin_stable(void); + +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_headset_microp.h b/arch/arm/mach-msm/include/mach/htc_headset_microp.h new file mode 100644 index 0000000000000..5239c0cb2b318 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_headset_microp.h @@ -0,0 +1,48 @@ +/* + * + * /arch/arm/mach-msm/include/mach/htc_headset_microp.h + * + * HTC Micro-P headset detection driver. + * + * Copyright (C) 2010 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HTC_HEADSET_MICROP_H +#define HTC_HEADSET_MICROP_H + +struct htc_headset_microp_platform_data { + /* Headset detection */ + int hpin_int; + unsigned int hpin_irq; + uint8_t hpin_mask[3]; + + /* Remote key detection */ + int remote_int; + unsigned int remote_irq; + + /* Remote key interrupt enable */ + unsigned int remote_enable_pin; + + /* ADC tables */ + uint8_t adc_channel; + uint16_t adc_remote[6]; + uint16_t adc_metrico[2]; +}; + +struct htc_headset_microp_info { + struct htc_headset_microp_platform_data pdata; + int hpin_gpio_mask; + unsigned int hpin_debounce; +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/htc_pwrsink.h b/arch/arm/mach-msm/include/mach/htc_pwrsink.h new file mode 100644 index 0000000000000..c7a91f1d906cf --- /dev/null +++ b/arch/arm/mach-msm/include/mach/htc_pwrsink.h @@ -0,0 +1,87 @@ +/* include/asm/mach-msm/htc_pwrsink.h + * + * Copyright (C) 2007 Google, Inc. + * Copyright (C) 2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _ARCH_ARM_MACH_MSM_HTC_PWRSINK_H_ +#define _ARCH_ARM_MACH_MSM_HTC_PWRSINK_H_ + +#include +#include + +typedef enum { + PWRSINK_AUDIO_PCM = 0, + PWRSINK_AUDIO_MP3, + PWRSINK_AUDIO_AAC, + + PWRSINK_AUDIO_LAST = PWRSINK_AUDIO_AAC, + PWRSINK_AUDIO_INVALID +} pwrsink_audio_id_type; + +struct pwr_sink_audio { + unsigned volume; + unsigned percent; +}; + +typedef enum { + PWRSINK_SYSTEM_LOAD = 0, + PWRSINK_AUDIO, + PWRSINK_BACKLIGHT, + PWRSINK_LED_BUTTON, + PWRSINK_LED_KEYBOARD, + PWRSINK_GP_CLK, + PWRSINK_BLUETOOTH, + PWRSINK_CAMERA, + PWRSINK_SDCARD, + PWRSINK_VIDEO, + PWRSINK_WIFI, + + PWRSINK_LAST = PWRSINK_WIFI, + PWRSINK_INVALID +} pwrsink_id_type; + +struct pwr_sink { + pwrsink_id_type id; + unsigned ua_max; + unsigned percent_util; +}; + +struct pwr_sink_platform_data { + unsigned num_sinks; + struct pwr_sink *sinks; + int (*suspend_late)(struct platform_device *, pm_message_t state); + int (*resume_early)(struct platform_device *); + void (*suspend_early)(struct early_suspend *); + void (*resume_late)(struct early_suspend *); +}; + +#ifndef CONFIG_HTC_PWRSINK +static inline int htc_pwrsink_set(pwrsink_id_type id, unsigned percent) +{ + return 0; +} +static inline int htc_pwrsink_audio_set(pwrsink_audio_id_type id, + unsigned percent_utilized) { return 0; } +static inline int htc_pwrsink_audio_volume_set( + pwrsink_audio_id_type id, unsigned volume) { return 0; } +static inline int htc_pwrsink_audio_path_set(unsigned path) { return 0; } +#else +extern int htc_pwrsink_set(pwrsink_id_type id, unsigned percent); +extern int htc_pwrsink_audio_set(pwrsink_audio_id_type id, + unsigned percent_utilized); +extern int htc_pwrsink_audio_volume_set(pwrsink_audio_id_type id, + unsigned volume); +extern int htc_pwrsink_audio_path_set(unsigned path); +#endif + +#endif diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h index 296c0f10f230e..305af33e28a48 100644 --- a/arch/arm/mach-msm/include/mach/iommu.h +++ b/arch/arm/mach-msm/include/mach/iommu.h @@ -99,13 +99,6 @@ struct msm_iommu_ctx_drvdata { struct list_head attached_elm; }; -/* - * Look up an IOMMU context device by its context name. NULL if none found. - * Useful for testing and drivers that do not yet fully have IOMMU stuff in - * their platform devices. - */ -struct device *msm_iommu_get_ctx(const char *ctx_name); - /* * Interrupt handler for the IOMMU context fault interrupt. Hooking the * interrupt is not supported in the API yet, but this will print an error @@ -113,4 +106,18 @@ struct device *msm_iommu_get_ctx(const char *ctx_name); */ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id); +#ifdef CONFIG_MSM_IOMMU +/* + * Look up an IOMMU context device by its context name. NULL if none found. + * Useful for testing and drivers that do not yet fully have IOMMU stuff in + * their platform devices. + */ +struct device *msm_iommu_get_ctx(const char *ctx_name); +#else +static inline struct device *msm_iommu_get_ctx(const char *ctx_name) +{ + return NULL; +} +#endif + #endif diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h new file mode 100644 index 0000000000000..958b61d9c0dd7 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/iommu_domains.h @@ -0,0 +1,110 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_IOMMU_DOMAINS_H +#define _ARCH_IOMMU_DOMAINS_H + +enum { + VIDEO_DOMAIN, + CAMERA_DOMAIN, + DISPLAY_DOMAIN, + ROTATOR_DOMAIN, + MAX_DOMAINS +}; + +enum { + VIDEO_FIRMWARE_POOL, + VIDEO_MAIN_POOL, + GEN_POOL, +}; + + +#if defined(CONFIG_MSM_IOMMU) + +extern struct iommu_domain *msm_get_iommu_domain(int domain_num); + +extern unsigned long msm_allocate_iova_address(unsigned int iommu_domain, + unsigned int partition_no, + unsigned long size, + unsigned long align); + +extern void msm_free_iova_address(unsigned long iova, + unsigned int iommu_domain, + unsigned int partition_no, + unsigned long size); + +extern unsigned long msm_subsystem_get_domain_no(int subsys_id); + +extern unsigned long msm_subsystem_get_partition_no(int subsys_id); + +extern int msm_use_iommu(void); + +extern int msm_iommu_map_extra(struct iommu_domain *domain, + unsigned long start_iova, + unsigned long size, + unsigned long page_size, + int cached); + +extern void msm_iommu_unmap_extra(struct iommu_domain *domain, + unsigned long start_iova, + unsigned long size, + unsigned long page_size); + +#else +static inline struct iommu_domain + *msm_get_iommu_domain(int subsys_id) { return NULL; } + + + +static inline unsigned long msm_allocate_iova_address(unsigned int iommu_domain, + unsigned int partition_no, + unsigned long size, + unsigned long align) { return 0; } + +static inline void msm_free_iova_address(unsigned long iova, + unsigned int iommu_domain, + unsigned int partition_no, + unsigned long size) { return; } + +static inline unsigned long msm_subsystem_get_domain_no(int subsys_id) +{ + return 0xFFFFFFFF; +} + +static inline unsigned long msm_subsystem_get_partition_no(int subsys_id) +{ + return 0xFFFFFFFF; +} + +static inline int msm_use_iommu(void) +{ + return 0; +} + +static inline int msm_iommu_map_extra(struct iommu_domain *domain, + unsigned long start_iova, + unsigned long size, + unsigned long page_size, + int cached) +{ + return -ENODEV; +} + +static inline void msm_iommu_unmap_extra(struct iommu_domain *domain, + unsigned long start_iova, + unsigned long size, + unsigned long page_size) +{ +} +#endif + +#endif diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h b/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h index c2c3da9444f48..d2463bf78fea9 100644 --- a/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h +++ b/arch/arm/mach-msm/include/mach/iommu_hw-8xxx.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -623,20 +623,6 @@ do { \ #define SET_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PSR, INDEX, v) -/* V2Pxx UW UR PW PR */ -#define SET_V2PUW_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PUW, V2Pxx_INDEX, v) -#define SET_V2PUW_VA(b, c, v) SET_CONTEXT_FIELD(b, c, V2PUW, V2Pxx_VA, v) - -#define SET_V2PUR_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PUR, V2Pxx_INDEX, v) -#define SET_V2PUR_VA(b, c, v) SET_CONTEXT_FIELD(b, c, V2PUR, V2Pxx_VA, v) - -#define SET_V2PPW_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PPW, V2Pxx_INDEX, v) -#define SET_V2PPW_VA(b, c, v) SET_CONTEXT_FIELD(b, c, V2PPW, V2Pxx_VA, v) - -#define SET_V2PPR_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PPR, V2Pxx_INDEX, v) -#define SET_V2PPR_VA(b, c, v) SET_CONTEXT_FIELD(b, c, V2PPR, V2Pxx_VA, v) - - /* Context Register getters */ /* ACTLR */ #define GET_CFERE(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, CFERE) @@ -824,20 +810,6 @@ do { \ #define GET_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PSR, INDEX) -/* V2Pxx UW UR PW PR */ -#define GET_V2PUW_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PUW, V2Pxx_INDEX) -#define GET_V2PUW_VA(b, c) GET_CONTEXT_FIELD(b, c, V2PUW, V2Pxx_VA) - -#define GET_V2PUR_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PUR, V2Pxx_INDEX) -#define GET_V2PUR_VA(b, c) GET_CONTEXT_FIELD(b, c, V2PUR, V2Pxx_VA) - -#define GET_V2PPW_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PPW, V2Pxx_INDEX) -#define GET_V2PPW_VA(b, c) GET_CONTEXT_FIELD(b, c, V2PPW, V2Pxx_VA) - -#define GET_V2PPR_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PPR, V2Pxx_INDEX) -#define GET_V2PPR_VA(b, c) GET_CONTEXT_FIELD(b, c, V2PPR, V2Pxx_VA) - - /* Global Registers */ #define M2VCBR_N (0xFF000) #define CBACR_N (0xFF800) diff --git a/arch/arm/mach-msm/include/mach/ion.h b/arch/arm/mach-msm/include/mach/ion.h new file mode 100644 index 0000000000000..b472d27fc47db --- /dev/null +++ b/arch/arm/mach-msm/include/mach/ion.h @@ -0,0 +1,29 @@ +/** + * + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MACH_ION_H_ +#define __MACH_ION_H_ + +enum ion_memory_types { + ION_EBI_TYPE, + ION_SMI_TYPE, +}; + +enum ion_permission_type { + IPT_TYPE_MM_CARVEOUT = 0, + IPT_TYPE_MFC_SHAREDMEM = 1, + IPT_TYPE_MDP_WRITEBACK = 2, +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/irqs-7x00.h b/arch/arm/mach-msm/include/mach/irqs-7x00.h index f1fe70612fe9b..65e18d3940709 100644 --- a/arch/arm/mach-msm/include/mach/irqs-7x00.h +++ b/arch/arm/mach-msm/include/mach/irqs-7x00.h @@ -71,5 +71,6 @@ #define NR_MSM_IRQS 64 #define NR_GPIO_IRQS 122 #define NR_BOARD_IRQS 64 +#define NR_SIRC_IRQS 0 #endif diff --git a/arch/arm/mach-msm/include/mach/irqs-7x30.h b/arch/arm/mach-msm/include/mach/irqs-7x30.h index 67c5396514feb..c564576cfdf33 100644 --- a/arch/arm/mach-msm/include/mach/irqs-7x30.h +++ b/arch/arm/mach-msm/include/mach/irqs-7x30.h @@ -155,16 +155,11 @@ #define INT_MDDI_CLIENT INT_MDC #define INT_NAND_WR_ER_DONE INT_EBI2_WR_ER_DONE #define INT_NAND_OP_DONE INT_EBI2_OP_DONE +#define INT_GRAPHICS INT_GRP_3D #define NR_MSM_IRQS 128 +#define NR_SIRC_IRQS 0 #define NR_GPIO_IRQS 182 -#define PMIC8058_IRQ_BASE (NR_MSM_IRQS + NR_GPIO_IRQS) -#define NR_PMIC8058_GPIO_IRQS 40 -#define NR_PMIC8058_MPP_IRQS 12 -#define NR_PMIC8058_MISC_IRQS 8 -#define NR_PMIC8058_IRQS (NR_PMIC8058_GPIO_IRQS +\ - NR_PMIC8058_MPP_IRQS +\ - NR_PMIC8058_MISC_IRQS) -#define NR_BOARD_IRQS NR_PMIC8058_IRQS +#define NR_BOARD_IRQS 64 #endif /* __ASM_ARCH_MSM_IRQS_7X30_H */ diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h index 8679a45647447..f32831bc6f96f 100644 --- a/arch/arm/mach-msm/include/mach/irqs.h +++ b/arch/arm/mach-msm/include/mach/irqs.h @@ -19,6 +19,14 @@ #define MSM_IRQ_BIT(irq) (1 << ((irq) & 31)) +#define NR_BOARD_IRQS 64 +#define NR_MICROP_IRQS 16 + +#define FIRST_SIRC_IRQ (NR_MSM_IRQS) +#define FIRST_GPIO_IRQ (NR_MSM_IRQS + NR_SIRC_IRQS) +#define FIRST_BOARD_IRQ (NR_MSM_IRQS + NR_SIRC_IRQS + NR_GPIO_IRQS) +#define FIRST_MICROP_IRQ (FIRST_BOARD_IRQ + NR_BOARD_IRQS) + #if defined(CONFIG_ARCH_MSM7X30) #include "irqs-7x30.h" #elif defined(CONFIG_ARCH_QSD8X50) @@ -32,8 +40,17 @@ #error "Unknown architecture specification" #endif -#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS) -#define MSM_GPIO_TO_INT(n) (NR_MSM_IRQS + (n)) -#define MSM_INT_TO_REG(base, irq) (base + irq / 32) +#if defined(CONFIG_MACH_BRAVO) || defined(CONFIG_MACH_BRAVOC) || defined(CONFIG_MACH_INCREDIBLEC) || defined(CONFIG_MACH_SUPERSONIC) +#define NR_IRQS (NR_MSM_IRQS + NR_SIRC_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS \ + + NR_MICROP_IRQS) +#define MSM_INT_TO_GPIO(n) ((n) - NR_MSM_IRQS) +#define MSM_uP_TO_INT(n) (FIRST_MICROP_IRQ + (n)) +#define MSM_INT_TO_GPIO(n) ((n) - NR_MSM_IRQS) +#define MSM_uP_TO_INT(n) (FIRST_MICROP_IRQ + (n)) +#else +#define NR_IRQS (NR_MSM_IRQS + NR_SIRC_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS) +#endif +#define MSM_GPIO_TO_INT(n) (FIRST_GPIO_IRQ + (n)) +#define MSM_INT_TO_REG(base, irq) (base + irq / 32) #endif diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h index 070e17d237f19..903b7b7b106d1 100644 --- a/arch/arm/mach-msm/include/mach/memory.h +++ b/arch/arm/mach-msm/include/mach/memory.h @@ -1,6 +1,7 @@ /* arch/arm/mach-msm/include/mach/memory.h * * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -12,22 +13,117 @@ * GNU General Public License for more details. * */ - #ifndef __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H +#include /* physical offset of RAM */ -#if defined(CONFIG_ARCH_QSD8X50) && defined(CONFIG_MSM_SOC_REV_A) -#define PHYS_OFFSET UL(0x00000000) -#elif defined(CONFIG_ARCH_QSD8X50) -#define PHYS_OFFSET UL(0x20000000) -#elif defined(CONFIG_ARCH_MSM7X30) -#define PHYS_OFFSET UL(0x00200000) -#elif defined(CONFIG_ARCH_MSM8X60) -#define PHYS_OFFSET UL(0x40200000) -#else -#define PHYS_OFFSET UL(0x10000000) +#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) + +#define MAX_PHYSMEM_BITS 32 +#define SECTION_SIZE_BITS 29 + +/* Maximum number of Memory Regions */ +#define MAX_NR_REGIONS 4 + +/* Certain configurations of MSM7x30 have multiple memory banks. +* One or more of these banks can contain holes in the memory map as well. +* These macros define appropriate conversion routines between the physical +* and virtual address domains for supporting these configurations using +* SPARSEMEM and a 3G/1G VM split. +*/ + +#if defined(CONFIG_ARCH_MSM7X30) + +#define EBI0_PHYS_OFFSET PHYS_OFFSET +#define EBI0_PAGE_OFFSET PAGE_OFFSET +#define EBI0_SIZE 0x10000000 + +#define EBI1_PHYS_OFFSET 0x40000000 +#define EBI1_PAGE_OFFSET (EBI0_PAGE_OFFSET + EBI0_SIZE) + +#if (defined(CONFIG_SPARSEMEM) && defined(CONFIG_VMSPLIT_3G)) + +#define __phys_to_virt(phys) \ + ((phys) >= EBI1_PHYS_OFFSET ? \ + (phys) - EBI1_PHYS_OFFSET + EBI1_PAGE_OFFSET : \ + (phys) - EBI0_PHYS_OFFSET + EBI0_PAGE_OFFSET) + +#define __virt_to_phys(virt) \ + ((virt) >= EBI1_PAGE_OFFSET ? \ + (virt) - EBI1_PAGE_OFFSET + EBI1_PHYS_OFFSET : \ + (virt) - EBI0_PAGE_OFFSET + EBI0_PHYS_OFFSET) + +#endif + +#endif + +#define HAS_ARCH_IO_REMAP_PFN_RANGE + +#ifndef __ASSEMBLY__ +void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment); +void *allocate_contiguous_ebi(unsigned long, unsigned long, int); +unsigned long allocate_contiguous_ebi_nomap(unsigned long, unsigned long); +void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long); +void clean_caches(unsigned long, unsigned long, unsigned long); +void invalidate_caches(unsigned long, unsigned long, unsigned long); +int platform_physical_remove_pages(u64, u64); +int platform_physical_active_pages(u64, u64); +int platform_physical_low_power_pages(u64, u64); + +extern int (*change_memory_power)(u64, u64, int); + +#if defined(CONFIG_ARCH_MSM_ARM11) || defined(CONFIG_ARCH_MSM_CORTEX_A5) +void write_to_strongly_ordered_memory(void); +void map_page_strongly_ordered(void); #endif +#ifdef CONFIG_CACHE_L2X0 +extern void l2x0_cache_sync(void); +#define finish_arch_switch(prev) do { l2x0_cache_sync(); } while (0) #endif +#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) +extern void store_ttbr0(void); +#define finish_arch_switch(prev) do { store_ttbr0(); } while (0) +#endif + +#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0 +extern unsigned long membank0_size; +extern unsigned long membank1_start; + +#define MEMBANK0_PHYS_OFFSET PHYS_OFFSET +#define MEMBANK0_PAGE_OFFSET PAGE_OFFSET + +#define MEMBANK1_PHYS_OFFSET (membank1_start) +#define MEMBANK1_PAGE_OFFSET (MEMBANK0_PAGE_OFFSET + (membank0_size)) + +#define __phys_to_virt(phys) \ + ((MEMBANK1_PHYS_OFFSET && ((phys) >= MEMBANK1_PHYS_OFFSET)) ? \ + (phys) - MEMBANK1_PHYS_OFFSET + MEMBANK1_PAGE_OFFSET : \ + (phys) - MEMBANK0_PHYS_OFFSET + MEMBANK0_PAGE_OFFSET) + +#define __virt_to_phys(virt) \ + ((MEMBANK1_PHYS_OFFSET && ((virt) >= MEMBANK1_PAGE_OFFSET)) ? \ + (virt) - MEMBANK1_PAGE_OFFSET + MEMBANK1_PHYS_OFFSET : \ + (virt) - MEMBANK0_PAGE_OFFSET + MEMBANK0_PHYS_OFFSET) +#endif + +#endif + +#if defined CONFIG_ARCH_MSM_SCORPION || defined CONFIG_ARCH_MSM_KRAIT +#define arch_has_speculative_dfetch() 1 +#endif + +#endif + +/* these correspond to values known by the modem */ +#define MEMORY_DEEP_POWERDOWN 0 +#define MEMORY_SELF_REFRESH 1 +#define MEMORY_ACTIVE 2 + +#define NPA_MEMORY_NODE_NAME "/mem/apps/ddr_dpd" + +#ifndef CONFIG_ARCH_MSM7X27 +#define CONSISTENT_DMA_SIZE (SZ_1M * 14) +#endif diff --git a/arch/arm/mach-msm/include/mach/mmc.h b/arch/arm/mach-msm/include/mach/mmc.h index d54b6b086cff4..71768d3d3d5c7 100644 --- a/arch/arm/mach-msm/include/mach/mmc.h +++ b/arch/arm/mach-msm/include/mach/mmc.h @@ -17,10 +17,13 @@ struct embedded_sdio_data { struct msm_mmc_platform_data { unsigned int ocr_mask; /* available voltages */ + int built_in; /* built-in device flag */ u32 (*translate_vdd)(struct device *, unsigned int); unsigned int (*status)(struct device *); struct embedded_sdio_data *embedded_sdio; int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id); + unsigned int *slot_type; + unsigned dat0_gpio; }; #endif diff --git a/arch/arm/mach-msm/include/mach/msm_adsp.h b/arch/arm/mach-msm/include/mach/msm_adsp.h new file mode 100644 index 0000000000000..a081683328a31 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_adsp.h @@ -0,0 +1,112 @@ +/* include/asm-arm/arch-msm/msm_adsp.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM__ARCH_MSM_ADSP_H +#define __ASM__ARCH_MSM_ADSP_H + +struct msm_adsp_module; + +struct msm_adsp_ops { + /* event is called from interrupt context when a message + * arrives from the DSP. Use the provided function pointer + * to copy the message into a local buffer. Do NOT call + * it multiple times. + */ + void (*event)(void *driver_data, unsigned id, size_t len, + void (*getevent)(void *ptr, size_t len)); +}; + +/* Get, Put, Enable, and Disable are synchronous and must only + * be called from thread context. Enable and Disable will block + * up to one second in the event of a fatal DSP error but are + * much faster otherwise. + */ +int msm_adsp_get(const char *name, struct msm_adsp_module **module, + struct msm_adsp_ops *ops, void *driver_data); +void msm_adsp_put(struct msm_adsp_module *module); +int msm_adsp_enable(struct msm_adsp_module *module); +int msm_adsp_disable(struct msm_adsp_module *module); +int adsp_set_clkrate(struct msm_adsp_module *module, unsigned long clk_rate); + +/* Write is safe to call from interrupt context. + */ +int msm_adsp_write(struct msm_adsp_module *module, + unsigned queue_id, + void *data, size_t len); + +#if CONFIG_MSM_AMSS_VERSION >= 6350 +/* Command Queue Indexes */ +#define QDSP_lpmCommandQueue 0 +#define QDSP_mpuAfeQueue 1 +#define QDSP_mpuGraphicsCmdQueue 2 +#define QDSP_mpuModmathCmdQueue 3 +#define QDSP_mpuVDecCmdQueue 4 +#define QDSP_mpuVDecPktQueue 5 +#define QDSP_mpuVEncCmdQueue 6 +#define QDSP_rxMpuDecCmdQueue 7 +#define QDSP_rxMpuDecPktQueue 8 +#define QDSP_txMpuEncQueue 9 +#define QDSP_uPAudPPCmd1Queue 10 +#define QDSP_uPAudPPCmd2Queue 11 +#define QDSP_uPAudPPCmd3Queue 12 +#define QDSP_uPAudPlay0BitStreamCtrlQueue 13 +#define QDSP_uPAudPlay1BitStreamCtrlQueue 14 +#define QDSP_uPAudPlay2BitStreamCtrlQueue 15 +#define QDSP_uPAudPlay3BitStreamCtrlQueue 16 +#define QDSP_uPAudPlay4BitStreamCtrlQueue 17 +#define QDSP_uPAudPreProcCmdQueue 18 +#define QDSP_uPAudRecBitStreamQueue 19 +#define QDSP_uPAudRecCmdQueue 20 +#define QDSP_uPDiagQueue 21 +#define QDSP_uPJpegActionCmdQueue 22 +#define QDSP_uPJpegCfgCmdQueue 23 +#define QDSP_uPVocProcQueue 24 +#define QDSP_vfeCommandQueue 25 +#define QDSP_vfeCommandScaleQueue 26 +#define QDSP_vfeCommandTableQueue 27 +#define QDSP_MAX_NUM_QUEUES 28 +#else +/* Command Queue Indexes */ +#define QDSP_lpmCommandQueue 0 +#define QDSP_mpuAfeQueue 1 +#define QDSP_mpuGraphicsCmdQueue 2 +#define QDSP_mpuModmathCmdQueue 3 +#define QDSP_mpuVDecCmdQueue 4 +#define QDSP_mpuVDecPktQueue 5 +#define QDSP_mpuVEncCmdQueue 6 +#define QDSP_rxMpuDecCmdQueue 7 +#define QDSP_rxMpuDecPktQueue 8 +#define QDSP_txMpuEncQueue 9 +#define QDSP_uPAudPPCmd1Queue 10 +#define QDSP_uPAudPPCmd2Queue 11 +#define QDSP_uPAudPPCmd3Queue 12 +#define QDSP_uPAudPlay0BitStreamCtrlQueue 13 +#define QDSP_uPAudPlay1BitStreamCtrlQueue 14 +#define QDSP_uPAudPlay2BitStreamCtrlQueue 15 +#define QDSP_uPAudPlay3BitStreamCtrlQueue 16 +#define QDSP_uPAudPlay4BitStreamCtrlQueue 17 +#define QDSP_uPAudPreProcCmdQueue 18 +#define QDSP_uPAudRecBitStreamQueue 19 +#define QDSP_uPAudRecCmdQueue 20 +#define QDSP_uPJpegActionCmdQueue 21 +#define QDSP_uPJpegCfgCmdQueue 22 +#define QDSP_uPVocProcQueue 23 +#define QDSP_vfeCommandQueue 24 +#define QDSP_vfeCommandScaleQueue 25 +#define QDSP_vfeCommandTableQueue 26 +#define QDSP_QUEUE_MAX 26 +#endif + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_audio_aac.h b/arch/arm/mach-msm/include/mach/msm_audio_aac.h new file mode 100644 index 0000000000000..1aea0faa3d31e --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_audio_aac.h @@ -0,0 +1,75 @@ +/* arch/arm/mach-msm/include/mach/msm_audio_aac.h + * + * Copyright (c) 2009 QUALCOMM USA, INC. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org. + * + */ + +#ifndef __MSM_AUDIO_AAC_H +#define __MSM_AUDIO_AAC_H + +#include + +#define AUDIO_SET_AAC_CONFIG _IOW(AUDIO_IOCTL_MAGIC, \ + (AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned) +#define AUDIO_GET_AAC_CONFIG _IOR(AUDIO_IOCTL_MAGIC, \ + (AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned) + +#define AUDIO_AAC_FORMAT_ADTS -1 +#define AUDIO_AAC_FORMAT_RAW 0x0000 +#define AUDIO_AAC_FORMAT_PSUEDO_RAW 0x0001 +#define AUDIO_AAC_FORMAT_LOAS 0x0002 + +#define AUDIO_AAC_OBJECT_LC 0x0002 +#define AUDIO_AAC_OBJECT_LTP 0x0004 +#define AUDIO_AAC_OBJECT_ERLC 0x0011 + +#define AUDIO_AAC_SEC_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SEC_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SCA_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SCA_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SPEC_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SPEC_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SBR_ON_FLAG_ON 0x0001 +#define AUDIO_AAC_SBR_ON_FLAG_OFF 0x0000 + +#define AUDIO_AAC_SBR_PS_ON_FLAG_ON 0x0001 +#define AUDIO_AAC_SBR_PS_ON_FLAG_OFF 0x0000 + +/* Primary channel on both left and right channels */ +#define AUDIO_AAC_DUAL_MONO_PL_PR 0 +/* Secondary channel on both left and right channels */ +#define AUDIO_AAC_DUAL_MONO_SL_SR 1 +/* Primary channel on right channel and 2nd on left channel */ +#define AUDIO_AAC_DUAL_MONO_SL_PR 2 +/* 2nd channel on right channel and primary on left channel */ +#define AUDIO_AAC_DUAL_MONO_PL_SR 3 + +struct msm_audio_aac_config { + signed short format; + unsigned short audio_object; + unsigned short ep_config; /* 0 ~ 3 useful only obj = ERLC */ + unsigned short aac_section_data_resilience_flag; + unsigned short aac_scalefactor_data_resilience_flag; + unsigned short aac_spectral_data_resilience_flag; + unsigned short sbr_on_flag; + unsigned short sbr_ps_on_flag; + unsigned short dual_mono_mode; + unsigned short channel_configuration; +}; + +#endif /* __MSM_AUDIO_AAC_H */ diff --git a/arch/arm/mach-msm/include/mach/msm_audio_qcp.h b/arch/arm/mach-msm/include/mach/msm_audio_qcp.h new file mode 100644 index 0000000000000..28c234667e00a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_audio_qcp.h @@ -0,0 +1,59 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MSM_AUDIO_QCP_H +#define __MSM_AUDIO_QCP_H + +#include + +#define CDMA_RATE_BLANK 0x00 +#define CDMA_RATE_EIGHTH 0x01 +#define CDMA_RATE_QUARTER 0x02 +#define CDMA_RATE_HALF 0x03 +#define CDMA_RATE_FULL 0x04 +#define CDMA_RATE_ERASURE 0x05 + +struct msm_audio_qcelp_config { + uint32_t channels; + uint32_t cdma_rate; + uint32_t min_bit_rate; + uint32_t max_bit_rate; +}; + +struct msm_audio_evrc_config { + uint32_t channels; + uint32_t cdma_rate; + uint32_t min_bit_rate; + uint32_t max_bit_rate; + uint8_t bit_rate_reduction; + uint8_t hi_pass_filter; + uint8_t noise_suppressor; + uint8_t post_filter; +}; + +#endif /* __MSM_AUDIO_QCP_H */ diff --git a/arch/arm/mach-msm/include/mach/msm_bus.h b/arch/arm/mach-msm/include/mach/msm_bus.h new file mode 100644 index 0000000000000..095af3a6853cb --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_bus.h @@ -0,0 +1,134 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * ALTERNATIVELY, this product may be distributed under the terms of + * the GNU General Public License, version 2, in which case the provisions + * of the GPL version 2 are required INSTEAD OF the BSD license. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF + * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_H +#define _ARCH_ARM_MACH_MSM_BUS_H + +#include +#include + +/* + * Macros for clients to convert their data to ib and ab + * Ws : Time window over which to transfer the data in SECONDS + * Bs : Size of the data block in bytes + * Per : Recurrence period + * Tb : Throughput bandwidth to prevent stalling + * R : Ratio of actual bandwidth used to Tb + * Ib : Instantaneous bandwidth + * Ab : Arbitrated bandwidth + * + * IB_RECURRBLOCK and AB_RECURRBLOCK: + * These are used if the requirement is to transfer a + * recurring block of data over a known time window. + * + * IB_THROUGHPUTBW and AB_THROUGHPUTBW: + * These are used for CPU style masters. Here the requirement + * is to have minimum throughput bandwidth available to avoid + * stalling. + */ +#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws))) +#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per))) +#define IB_THROUGHPUTBW(Tb) (Tb) +#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R)) + +struct msm_bus_vectors { + int src; /* Master */ + int dst; /* Slave */ + unsigned int ab; /* Arbitrated bandwidth */ + unsigned int ib; /* Instantaneous bandwidth */ +}; + +struct msm_bus_paths { + int num_paths; + struct msm_bus_vectors *vectors; +}; + +struct msm_bus_scale_pdata { + struct msm_bus_paths *usecase; + int num_usecases; + const char *name; + /* + * If the active_only flag is set to 1, the BW request is applied + * only when at least one CPU is active (powered on). If the flag + * is set to 0, then the BW request is always applied irrespective + * of the CPU state. + */ + unsigned int active_only; +}; + +/* Scaling APIs */ + +/* + * This function returns a handle to the client. This should be used to + * call msm_bus_scale_client_update_request. + * The function returns 0 if bus driver is unable to register a client + */ + +#ifdef CONFIG_MSM_BUS_SCALING +uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata); +int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index); +void msm_bus_scale_unregister_client(uint32_t cl); +/* AXI Port configuration APIs */ +int msm_bus_axi_porthalt(int master_port); +int msm_bus_axi_portunhalt(int master_port); + +#else +static inline uint32_t +msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata) +{ + return 1; +} + +static inline int +msm_bus_scale_client_update_request(uint32_t cl, unsigned int index) +{ + return 0; +} + +static inline void +msm_bus_scale_unregister_client(uint32_t cl) +{ +} + +static inline int msm_bus_axi_porthalt(int master_port) +{ + return 0; +} + +static inline int msm_bus_axi_portunhalt(int master_port) +{ + return 0; +} +#endif + +#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/ diff --git a/arch/arm/mach-msm/include/mach/msm_fast_timer.h b/arch/arm/mach-msm/include/mach/msm_fast_timer.h new file mode 100644 index 0000000000000..e1660c192a3ca --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_fast_timer.h @@ -0,0 +1,19 @@ +/* arch/arm/mach-msm/include/mach/msm_fast_timer.h + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +void msm_enable_fast_timer(void); +void msm_disable_fast_timer(void); +u32 msm_read_fast_timer(void); + diff --git a/arch/arm/mach-msm/include/mach/msm_fb.h b/arch/arm/mach-msm/include/mach/msm_fb.h index 1f4fc81b3d8fc..c3d535020c83f 100644 --- a/arch/arm/mach-msm/include/mach/msm_fb.h +++ b/arch/arm/mach-msm/include/mach/msm_fb.h @@ -18,9 +18,30 @@ #define _MSM_FB_H_ #include +#include +#include +#include + struct mddi_info; +/* output interface format */ +#define MSM_MDP_OUT_IF_FMT_RGB565 0 +#define MSM_MDP_OUT_IF_FMT_RGB666 1 +#define MSM_MDP_OUT_IF_FMT_RGB888 2 + +/* mdp override operations */ +#define MSM_MDP_PANEL_IGNORE_PIXEL_DATA (1 << 0) +#define MSM_MDP_PANEL_FLIP_UD (1 << 1) +#define MSM_MDP_PANEL_FLIP_LR (1 << 2) +#define MSM_MDP4_MDDI_DMA_SWITCH (1 << 3) +#define MSM_MDP_DMA_PACK_ALIGN_LSB (1 << 4) +#define MSM_MDP_RGB_PANEL_SELE_REFRESH (1 << 5) + +/* mddi type */ +#define MSM_MDP_MDDI_TYPE_I 0 +#define MSM_MDP_MDDI_TYPE_II 1 + struct msm_fb_data { int xres; /* x resolution in pixels */ int yres; /* y resolution in pixels */ @@ -34,12 +55,33 @@ struct msmfb_callback { }; enum { - MSM_MDDI_PMDH_INTERFACE, + MSM_MDDI_PMDH_INTERFACE = 0, MSM_MDDI_EMDH_INTERFACE, MSM_EBI2_INTERFACE, + MSM_LCDC_INTERFACE, + MSM_TV_INTERFACE, + + MSM_MDP_NUM_INTERFACES = MSM_TV_INTERFACE + 1 }; #define MSMFB_CAP_PARTIAL_UPDATES (1 << 0) +#define MSMFB_CAP_CABC (1 << 1) + +struct msm_lcdc_timing { + unsigned int clk_rate; /* dclk freq */ + unsigned int hsync_pulse_width; /* in dclks */ + unsigned int hsync_back_porch; /* in dclks */ + unsigned int hsync_front_porch; /* in dclks */ + unsigned int hsync_skew; /* in dclks */ + unsigned int vsync_pulse_width; /* in lines */ + unsigned int vsync_back_porch; /* in lines */ + unsigned int vsync_front_porch; /* in lines */ + + /* control signal polarity */ + unsigned int vsync_act_low:1; + unsigned int hsync_act_low:1; + unsigned int den_act_low:1; +}; struct msm_panel_data { /* turns off the fb memory */ @@ -50,9 +92,15 @@ struct msm_panel_data { int (*blank)(struct msm_panel_data *); /* turns on the panel */ int (*unblank)(struct msm_panel_data *); + /* for msmfb shutdown() */ + int (*shutdown)(struct msm_panel_data *); void (*wait_vsync)(struct msm_panel_data *); void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *); void (*clear_vsync)(struct msm_panel_data *); + void (*dump_vsync)(void); + /* change timing on the fly */ + int (*adjust_timing)(struct msm_panel_data *, struct msm_lcdc_timing *, + u32 xres, u32 yres); /* from the enum above */ unsigned interface_type; /* data to be passed to the fb driver */ @@ -60,6 +108,33 @@ struct msm_panel_data { /* capabilities supported by the panel */ uint32_t caps; + /* + * For samsung driver IC, we always need to indicate where + * to draw. So we pass update_into to mddi client. + * + */ + struct { + int left; + int top; + int eright; /* exclusive */ + int ebottom; /* exclusive */ + } update_info; +}; + +enum { + MDP_DMA_P = 0, + MDP_DMA_S, +}; + +struct msm_mdp_platform_data { + /* from the enum above */ + int dma_channel; + unsigned overrides; + unsigned color_format; + int tearing_check; + unsigned sync_config; + unsigned sync_thresh; + unsigned sync_start_pos; }; struct msm_mddi_client_data { @@ -68,6 +143,8 @@ struct msm_mddi_client_data { void (*activate_link)(struct msm_mddi_client_data *); void (*remote_write)(struct msm_mddi_client_data *, uint32_t val, uint32_t reg); + void (*remote_write_vals)(struct msm_mddi_client_data *, uint8_t * val, + uint32_t reg, unsigned int nr_bytes); uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg); void (*auto_hibernate)(struct msm_mddi_client_data *, int); /* custom data that needs to be passed from the board file to a @@ -85,9 +162,12 @@ struct msm_mddi_platform_data { /* fixup the mfr name, product id */ void (*fixup)(uint16_t *mfr_name, uint16_t *product_id); + int vsync_irq; + struct resource *fb_resource; /*optional*/ /* number of clients in the list that follows */ int num_clients; + unsigned type; /* array of client information of clients */ struct { unsigned product_id; /* mfr id in top 16 bits, product id @@ -110,17 +190,105 @@ struct msm_mddi_platform_data { } client_platform_data[]; }; +struct msm_lcdc_panel_ops { + int (*init)(struct msm_lcdc_panel_ops *); + int (*uninit)(struct msm_lcdc_panel_ops *); + int (*blank)(struct msm_lcdc_panel_ops *); + int (*unblank)(struct msm_lcdc_panel_ops *); + int (*shutdown)(struct msm_lcdc_panel_ops *); +#ifdef CONFIG_PANEL_SELF_REFRESH + int (*refresh_enable)(struct msm_lcdc_panel_ops *); + int (*refresh_disable)(struct msm_lcdc_panel_ops *); +#endif +}; + +struct msm_lcdc_platform_data { + struct msm_lcdc_panel_ops *panel_ops; + struct msm_lcdc_timing *timing; + int fb_id; + struct msm_fb_data *fb_data; + struct resource *fb_resource; +}; + +struct msm_tvenc_platform_data { + struct msm_tvenc_panel_ops *panel_ops; + int fb_id; + struct msm_fb_data *fb_data; + struct resource *fb_resource; + int (*video_relay)(int on_off); +}; + struct mdp_blit_req; struct fb_info; +struct mdp_overlay; +struct msmfb_overlay_data; struct mdp_device { struct device dev; - void (*dma)(struct mdp_device *mpd, uint32_t addr, + void (*dma)(struct mdp_device *mdp, uint32_t addr, uint32_t stride, uint32_t w, uint32_t h, uint32_t x, uint32_t y, struct msmfb_callback *callback, int interface); - void (*dma_wait)(struct mdp_device *mdp); + void (*dma_wait)(struct mdp_device *mdp, int interface); int (*blit)(struct mdp_device *mdp, struct fb_info *fb, struct mdp_blit_req *req); +#ifdef CONFIG_FB_MSM_OVERLAY + int (*overlay_get)(struct mdp_device *mdp, struct fb_info *fb, + struct mdp_overlay *req); + int (*overlay_set)(struct mdp_device *mdp, struct fb_info *fb, + struct mdp_overlay *req); + int (*overlay_unset)(struct mdp_device *mdp, struct fb_info *fb, + int ndx); + int (*overlay_play)(struct mdp_device *mdp, struct fb_info *fb, + struct msmfb_overlay_data *req, struct file **p_src_file); +#endif void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id); + void (*configure_dma)(struct mdp_device *mdp); + int (*check_output_format)(struct mdp_device *mdp, int bpp); + int (*set_output_format)(struct mdp_device *mdp, int bpp); + void (*set_panel_size)(struct mdp_device *mdp, int width, int height); + unsigned color_format; + unsigned overrides; + uint32_t width; /*panel width*/ + uint32_t height; /*panel height*/ +}; + +struct msmfb_info { + struct fb_info *fb; + struct msm_panel_data *panel; + int xres; + int yres; + unsigned output_format; + unsigned yoffset; + unsigned frame_requested; + unsigned frame_done; + int sleeping; + unsigned update_frame; + struct { + int left; + int top; + int eright; /* exclusive */ + int ebottom; /* exclusive */ + } update_info; + char *black; +#ifdef CONFIG_HTC_ONMODE_CHARGING + struct early_suspend onchg_earlier_suspend; + struct early_suspend onchg_suspend; +#endif + struct early_suspend earlier_suspend; + struct early_suspend early_suspend; + + struct wake_lock idle_lock; + spinlock_t update_lock; + struct mutex panel_init_lock; + wait_queue_head_t frame_wq; + struct workqueue_struct *resume_workqueue; + struct work_struct resume_work; + struct work_struct msmfb_resume_work; + struct msmfb_callback dma_callback; + struct msmfb_callback vsync_callback; + struct hrtimer fake_vsync; + ktime_t vsync_request_time; + unsigned fb_resumed; + struct ion_client *iclient; }; struct class_interface; @@ -128,6 +296,24 @@ int register_mdp_client(struct class_interface *class_intf); /**** private client data structs go below this line ***/ +/* + * Panel private data, include backlight stuff + * 9/28 09', Jay + * */ +struct panel_data { + int panel_id; + u32 caps; + int shrink; + /* backlight data */ + u8 *pwm; + int min_level; + /* default_br used in turn on backlight, must sync with setting in user space */ + int default_br; + int (*shrink_br)(int brightness); + int (*change_cabcmode)(struct msm_mddi_client_data *client_data, + int mode, u8 dimming); +}; + struct msm_mddi_bridge_platform_data { /* from board file */ int (*init)(struct msm_mddi_bridge_platform_data *, @@ -139,9 +325,56 @@ struct msm_mddi_bridge_platform_data { struct msm_mddi_client_data *); int (*unblank)(struct msm_mddi_bridge_platform_data *, struct msm_mddi_client_data *); + int (*shutdown)(struct msm_mddi_bridge_platform_data *, + struct msm_mddi_client_data *); struct msm_fb_data fb_data; + struct panel_data panel_conf; + /* for those MDDI client which need to re-position display region + after each update or static electricity strike. It should be + implemented in board-xxx-panel due to the function itself need to + send the screen dimensional info of screen to MDDI client. + */ + void (*adjust)(struct msm_mddi_client_data *); +#define SAMSUNG_D 0 +#define SAMSUNG_S6 1 + int bridge_type; + int panel_type; + uint32_t caps; + /* backlight data */ + u8 *pwm; }; +/* + * This is used to communicate event between msm_fb, mddi, mddi_client, + * and board. + * It's mainly used to reset the display system. + * Also, it is used for battery power policy. + * + */ +#define NOTIFY_MDDI 0x00000000 +#define NOTIFY_POWER 0x00000001 +#define NOTIFY_MSM_FB 0x00000010 +extern int register_display_notifier(struct notifier_block *nb); +extern int display_notifier_call_chain(unsigned long val, void *data); + +#define display_notifier(fn, pri) { \ + static struct notifier_block fn##_nb = \ + { .notifier_call = fn, .priority = pri }; \ + register_display_notifier(&fn##_nb); \ +} + +#if (defined(CONFIG_USB_FUNCTION_PROJECTOR) || defined(CONFIG_USB_ANDROID_PROJECTOR)) +/* For USB Projector to quick access the frame buffer info */ +struct msm_fb_info { + unsigned char *fb_addr; + int msmfb_area; + int xres; + int yres; +}; + +extern int msmfb_get_var(struct msm_fb_info *tmp); +extern int msmfb_get_fb_area(void); +#endif #endif diff --git a/arch/arm/mach-msm/include/mach/msm_flashlight.h b/arch/arm/mach-msm/include/mach/msm_flashlight.h new file mode 100644 index 0000000000000..08efd10298f50 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_flashlight.h @@ -0,0 +1,57 @@ +/* + * arch/arm/mach-msm/include/mach/msm_flashlight.h - The flashlight header + * Copyright (C) 2009 HTC Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#define __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#include + +#define FLASHLIGHT_NAME "flashlight" + +#define FLASHLIGHT_OFF 0 +#define FLASHLIGHT_TORCH 1 +#define FLASHLIGHT_FLASH 2 +#define FLASHLIGHT_NUM 3 + +enum flashlight_mode_flags { + FL_MODE_OFF = 0, + FL_MODE_TORCH, + FL_MODE_FLASH, + FL_MODE_PRE_FLASH, + FL_MODE_TORCH_LED_A, + FL_MODE_TORCH_LED_B, + FL_MODE_TORCH_LEVEL_1, + FL_MODE_TORCH_LEVEL_2, + FL_MODE_DEATH_RAY, +}; + +struct flashlight_platform_data { + void (*gpio_init) (void); + uint32_t torch; + uint32_t flash; + uint32_t flash_adj; + uint32_t flash_duration_ms; + uint8_t led_count; /* 0: 1 LED, 1: 2 LED */ +}; + +int flashlight_control(int mode); +int aat1271_flashlight_control(int mode); +int adp1650_flashlight_control(int mode); + +#undef __ASM_ARCH_MSM8X50_FLASHLIGHT_H +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_hdmi.h b/arch/arm/mach-msm/include/mach/msm_hdmi.h new file mode 100644 index 0000000000000..74b45a1b57c61 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_hdmi.h @@ -0,0 +1,25 @@ +/* arch/arm/mach-msm/include/mach/msm_hdmi.h + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_HDMI_H_ +#define _MSM_HDMI_H_ + +struct hdmi_platform_data { + struct resource hdmi_res; + /* power on hdmi chip */ + int (*power)(int on); /* mandatory */ + void (*hdmi_gpio_on)(void); /* optional */ + void (*hdmi_gpio_off)(void); /* optional */ +}; +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb.h b/arch/arm/mach-msm/include/mach/msm_hsusb.h new file mode 100644 index 0000000000000..9a8598a70896d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_hsusb.h @@ -0,0 +1,55 @@ +/* linux/include/asm-arm/arch-msm/hsusb.h + * + * Copyright (C) 2008 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_HSUSB_H +#define __ASM_ARCH_MSM_HSUSB_H + +#include + +/* platform device data for msm_hsusb driver */ + +struct msm_hsusb_platform_data { + /* hard reset the ULPI PHY */ + void (*phy_reset)(void); + void (*phy_shutdown)(void); + + /* (de)assert the reset to the usb core */ + void (*hw_reset)(bool enable); + + /* for notification when USB is connected or disconnected */ + void (*usb_connected)(int); + /* 1 : uart, 0 : usb */ + void (*usb_uart_switch)(int); + void (*config_usb_id_gpios)(bool enable); + void (*usb_hub_enable)(bool); + void (*serial_debug_gpios)(int); + int (*china_ac_detect)(void); + void (*disable_usb_charger)(void); + /* val, reg pairs terminated by -1 */ + int *phy_init_seq; + + char *serial_number; + int usb_id_pin_gpio; + int dock_pin_gpio; + int id_pin_irq; + bool enable_car_kit_detect; + __u8 accessory_detect; + bool dock_detect; +}; + +int usb_get_connect_type(void); + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h b/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h new file mode 100644 index 0000000000000..d7dae7a1f17e1 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_hsusb_hw.h @@ -0,0 +1,250 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ +#define __LINUX_USB_GADGET_MSM72K_UDC_H__ + +/*-------------------------------------------------------------------------*/ + +#define xprintk(level, fmt, args...) \ + printk(level "%s: " fmt , driver_name , ## args) + +#ifdef DEBUG +#undef DEBUG +#define DEBUG(fmt, args...) \ + xprintk(KERN_DEBUG , fmt , ## args) +#else +#define DEBUG(fmt,args...) \ + do { } while (0) +#endif /* DEBUG */ + +#ifdef VERBOSE +#define VDEBUG DEBUG +#else +#define VDEBUG(fmt,args...) \ + do { } while (0) +#endif /* VERBOSE */ + +#define ERROR(fmt,args...) \ + xprintk(KERN_ERR , fmt , ## args) +#define INFO(fmt,args...) \ + xprintk(KERN_INFO , fmt , ## args) + +/*-------------------------------------------------------------------------*/ + + +#define USB_ID (MSM_USB_BASE + 0x0000) +#define USB_HWGENERAL (MSM_USB_BASE + 0x0004) +#define USB_HWHOST (MSM_USB_BASE + 0x0008) +#define USB_HWDEVICE (MSM_USB_BASE + 0x000C) +#define USB_HWTXBUF (MSM_USB_BASE + 0x0010) +#define USB_HWRXBUF (MSM_USB_BASE + 0x0014) + +#ifdef CONFIG_ARCH_MSM7X00A +#define USB_SBUSCFG (MSM_USB_BASE + 0x0090) +#else +#define USB_AHBBURST (MSM_USB_BASE + 0x0090) +#define USB_AHBMODE (MSM_USB_BASE + 0x0098) +#endif + +#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ +#define USB_HCIVERSION (MSM_USB_BASE + 0x0102) /* 16 bit */ +#define USB_HCSPARAMS (MSM_USB_BASE + 0x0104) +#define USB_HCCPARAMS (MSM_USB_BASE + 0x0108) +#define USB_DCIVERSION (MSM_USB_BASE + 0x0120) /* 16 bit */ +#define USB_USBCMD (MSM_USB_BASE + 0x0140) +#define USB_USBSTS (MSM_USB_BASE + 0x0144) +#define USB_USBINTR (MSM_USB_BASE + 0x0148) +#define USB_FRINDEX (MSM_USB_BASE + 0x014C) +#define USB_DEVICEADDR (MSM_USB_BASE + 0x0154) +#define USB_ENDPOINTLISTADDR (MSM_USB_BASE + 0x0158) +#define USB_BURSTSIZE (MSM_USB_BASE + 0x0160) +#define USB_TXFILLTUNING (MSM_USB_BASE + 0x0164) +#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170) +#define USB_ENDPTNAK (MSM_USB_BASE + 0x0178) +#define USB_ENDPTNAKEN (MSM_USB_BASE + 0x017C) +#define USB_PORTSC (MSM_USB_BASE + 0x0184) +#define USB_OTGSC (MSM_USB_BASE + 0x01A4) +#define USB_USBMODE (MSM_USB_BASE + 0x01A8) +#define USB_ENDPTSETUPSTAT (MSM_USB_BASE + 0x01AC) +#define USB_ENDPTPRIME (MSM_USB_BASE + 0x01B0) +#define USB_ENDPTFLUSH (MSM_USB_BASE + 0x01B4) +#define USB_ENDPTSTAT (MSM_USB_BASE + 0x01B8) +#define USB_ENDPTCOMPLETE (MSM_USB_BASE + 0x01BC) +#define USB_ENDPTCTRL(n) (MSM_USB_BASE + 0x01C0 + (4 * (n))) + + +#define USBCMD_RESET 2 +#define USBCMD_ATTACH 1 +#define USBCMD_ATDTW (1 << 14) + +#define USBMODE_DEVICE 2 +#define USBMODE_HOST 3 + +struct ept_queue_head { + unsigned config; + unsigned active; /* read-only */ + + unsigned next; + unsigned info; + unsigned page0; + unsigned page1; + unsigned page2; + unsigned page3; + unsigned page4; + unsigned reserved_0; + + unsigned char setup_data[8]; + + unsigned reserved_1; + unsigned reserved_2; + unsigned reserved_3; + unsigned reserved_4; +}; + +#define CONFIG_MAX_PKT(n) ((n) << 16) +#define CONFIG_ZLT (1 << 29) /* stop on zero-len xfer */ +#define CONFIG_IOS (1 << 15) /* IRQ on setup */ + +struct ept_queue_item { + unsigned next; + unsigned info; + unsigned page0; + unsigned page1; + unsigned page2; + unsigned page3; + unsigned page4; + unsigned reserved; +}; + +#define TERMINATE 1 + +#define INFO_BYTES(n) ((n) << 16) +#define INFO_IOC (1 << 15) +#define INFO_ACTIVE (1 << 7) +#define INFO_HALTED (1 << 6) +#define INFO_BUFFER_ERROR (1 << 5) +#define INFO_TXN_ERROR (1 << 3) + + +#define STS_NAKI (1 << 16) /* */ +#define STS_SLI (1 << 8) /* R/WC - suspend state entered */ +#define STS_SRI (1 << 7) /* R/WC - SOF recv'd */ +#define STS_URI (1 << 6) /* R/WC - RESET recv'd - write to clear */ +#define STS_FRI (1 << 3) /* R/WC - Frame List Rollover */ +#define STS_PCI (1 << 2) /* R/WC - Port Change Detect */ +#define STS_UEI (1 << 1) /* R/WC - USB Error */ +#define STS_UI (1 << 0) /* R/WC - USB Transaction Complete */ + + +/* bits used in all the endpoint status registers */ +#define EPT_TX(n) (1 << ((n) + 16)) +#define EPT_RX(n) (1 << (n)) + + +#define CTRL_TXE (1 << 23) +#define CTRL_TXR (1 << 22) +#define CTRL_TXI (1 << 21) +#define CTRL_TXD (1 << 17) +#define CTRL_TXS (1 << 16) +#define CTRL_RXE (1 << 7) +#define CTRL_RXR (1 << 6) +#define CTRL_RXI (1 << 5) +#define CTRL_RXD (1 << 1) +#define CTRL_RXS (1 << 0) + +#define CTRL_TXT_MASK (3 << 18) +#define CTRL_TXT_CTRL (0 << 18) +#define CTRL_TXT_ISOCH (1 << 18) +#define CTRL_TXT_BULK (2 << 18) +#define CTRL_TXT_INT (3 << 18) +#define CTRL_TXT_EP_TYPE_SHIFT 18 + +#define CTRL_RXT_MASK (3 << 2) +#define CTRL_RXT_CTRL (0 << 2) +#define CTRL_RXT_ISOCH (1 << 2) +#define CTRL_RXT_BULK (2 << 2) +#define CTRL_RXT_INT (3 << 2) +#define CTRL_RXT_EP_TYPE_SHIFT 2 + +#define ULPI_WAKEUP (1 << 31) +#define ULPI_RUN (1 << 30) +#define ULPI_WRITE (1 << 29) +#define ULPI_READ (0 << 29) +#define ULPI_STATE_NORMAL (1 << 27) +#define ULPI_ADDR(n) (((n) & 255) << 16) +#define ULPI_DATA(n) ((n) & 255) +#define ULPI_DATA_READ(n) (((n) >> 8) & 255) + +#define ULPI_DEBUG_REG (0x15) +#define ULPI_SCRATCH_REG (0x16) + +#define ULPI_FUNC_CTRL_CLR (0x06) +#define ULPI_FUNC_SUSPENDM (1 << 6) + + +/* USB_PORTSC bits for determining port speed */ +#define PORTSC_PSPD_FS (0 << 26) +#define PORTSC_PSPD_LS (1 << 26) +#define PORTSC_PSPD_HS (2 << 26) +#define PORTSC_PSPD_MASK (3 << 26) +/* suspend and remote wakeup */ +#define PORTSC_FPR (1 << 6) +#define PORTSC_SUSP (1 << 7) + +#define OTGSC_BSVIE (1 << 27) /* R/W - BSV Interrupt Enable */ +#define OTGSC_DPIE (1 << 30) /* R/W - DataPulse Interrupt Enable */ +#define OTGSC_1MSE (1 << 29) /* R/W - 1ms Interrupt Enable */ +#define OTGSC_BSEIE (1 << 28) /* R/W - BSE Interrupt Enable */ +#define OTGSC_ASVIE (1 << 26) /* R/W - ASV Interrupt Enable */ +#define OTGSC_ASEIE (1 << 25) /* R/W - ASE Interrupt Enable */ +#define OTGSC_IDIE (1 << 24) /* R/W - ID Interrupt Enable */ +#define OTGSC_BSVIS (1 << 19) /* R/W - BSV Interrupt Status */ +#define OTGSC_IDPU (1 << 5) +#define OTGSC_ID (1 << 8) +#define OTGSC_IDIS (1 << 16) +#define B_SESSION_VALID (1 << 11) +#define OTGSC_INTR_MASK (OTGSC_BSVIE | OTGSC_DPIE | OTGSC_1MSE | \ + OTGSC_BSEIE | OTGSC_ASVIE | OTGSC_ASEIE | \ + OTGSC_IDIE) +#define OTGSC_INTR_STS_MASK (0x7f << 16) +#define CURRENT_CONNECT_STATUS (1 << 0) + +/* test mode support */ +#define J_TEST (0x0100) +#define K_TEST (0x0200) +#define SE0_NAK_TEST (0x0300) +#define TST_PKT_TEST (0x0400) +#define PORTSC_PTC (0xf << 16) +#define PORTSC_PTC_J_STATE (0x01 << 16) +#define PORTSC_PTC_K_STATE (0x02 << 16) +#define PORTSC_PTC_SE0_NAK (0x03 << 16) +#define PORTSC_PTC_TST_PKT (0x04 << 16) + +#define PORTSC_PTS_MASK (3 << 30) +#define PORTSC_PTS_ULPI (2 << 30) +#define PORTSC_PTS_SERIAL (3 << 30) + +#define PORTSC_CCS (1 << 0) /* current connect status */ +#define PORTSC_FPR (1 << 6) /* R/W - State normal => suspend */ +#define PORTSC_SUSP (1 << 7) /* Read - Port in suspend state */ +#define PORTSC_PORT_RESET (1 << 8) +#define PORTSC_LS (3 << 10) /* Read - Port's Line status */ +#define PORTSC_PHCD (1 << 23) /* phy suspend mode */ + +#define ULPI_DEBUG 0x15 +#define ULPI_SUSPENDM (1 << 6) + +#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */ diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h index cfff0e74f128f..571391b605bbb 100644 --- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h +++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h @@ -43,35 +43,38 @@ #define IOMEM(x) ((void __force __iomem *)(x)) #endif -#define MSM_VIC_BASE IOMEM(0xE0000000) +#define MSM_VIC_BASE IOMEM(0xF8000000) #define MSM_VIC_PHYS 0xC0000000 #define MSM_VIC_SIZE SZ_4K -#define MSM_CSR_BASE IOMEM(0xE0001000) +#define MSM_CSR_BASE IOMEM(0xF8001000) #define MSM_CSR_PHYS 0xC0100000 #define MSM_CSR_SIZE SZ_4K -#define MSM_GPT_PHYS MSM_CSR_PHYS -#define MSM_GPT_BASE MSM_CSR_BASE -#define MSM_GPT_SIZE SZ_4K +#define MSM_TMR_PHYS MSM_CSR_PHYS +#define MSM_TMR_BASE MSM_CSR_BASE +#define MSM_TMR_SIZE SZ_4K -#define MSM_DMOV_BASE IOMEM(0xE0002000) +#define MSM_GPT_BASE MSM_TMR_BASE +#define MSM_DGT_BASE (MSM_TMR_BASE + 0x10) + +#define MSM_DMOV_BASE IOMEM(0xF8002000) #define MSM_DMOV_PHYS 0xA9700000 #define MSM_DMOV_SIZE SZ_4K -#define MSM_GPIO1_BASE IOMEM(0xE0003000) +#define MSM_GPIO1_BASE IOMEM(0xF8003000) #define MSM_GPIO1_PHYS 0xA9200000 #define MSM_GPIO1_SIZE SZ_4K -#define MSM_GPIO2_BASE IOMEM(0xE0004000) +#define MSM_GPIO2_BASE IOMEM(0xF8004000) #define MSM_GPIO2_PHYS 0xA9300000 #define MSM_GPIO2_SIZE SZ_4K -#define MSM_CLK_CTL_BASE IOMEM(0xE0005000) +#define MSM_CLK_CTL_BASE IOMEM(0xF8005000) #define MSM_CLK_CTL_PHYS 0xA8600000 #define MSM_CLK_CTL_SIZE SZ_4K -#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000) +#define MSM_SHARED_RAM_BASE IOMEM(0xF8100000) #define MSM_SHARED_RAM_PHYS 0x01F00000 #define MSM_SHARED_RAM_SIZE SZ_1M @@ -85,7 +88,7 @@ #define MSM_UART3_SIZE SZ_4K #ifdef CONFIG_MSM_DEBUG_UART -#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_BASE 0xF9000000 #if CONFIG_MSM_DEBUG_UART == 1 #define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS #elif CONFIG_MSM_DEBUG_UART == 2 @@ -108,6 +111,9 @@ #define MSM_SDC4_PHYS 0xA0700000 #define MSM_SDC4_SIZE SZ_4K +#define MSM_NAND_PHYS 0xA0A00000 +#define MSM_NAND_SIZE SZ_4K + #define MSM_I2C_PHYS 0xA9900000 #define MSM_I2C_SIZE SZ_4K @@ -123,12 +129,25 @@ #define MSM_MDP_PHYS 0xAA200000 #define MSM_MDP_SIZE 0x000F0000 +#define MSM_MDC_BASE IOMEM(0xF8200000) #define MSM_MDC_PHYS 0xAA500000 #define MSM_MDC_SIZE SZ_1M +#define MSM_AD5_BASE IOMEM(0xF8300000) #define MSM_AD5_PHYS 0xAC000000 #define MSM_AD5_SIZE (SZ_1M*13) +#define MSM_VFE_PHYS 0xA0F00000 +#define MSM_VFE_SIZE SZ_1M + +#define MSM_UART1DM_PHYS 0xA0200000 +#define MSM_UART2DM_PHYS 0xA0300000 + +#define MSM_SSBI_PHYS 0xA8100000 +#define MSM_SSBI_SIZE SZ_4K + +#define MSM_TSSC_PHYS 0xAA300000 +#define MSM_TSSC_SIZE SZ_4K #if defined(CONFIG_ARCH_MSM7X30) #define MSM_GCC_BASE IOMEM(0xF8009000) diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h index 0fd7b68ca1141..9166b4c12a33a 100644 --- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h +++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h @@ -35,11 +35,11 @@ * */ -#define MSM_VIC_BASE IOMEM(0xE0000000) +#define MSM_VIC_BASE IOMEM(0xF8000000) #define MSM_VIC_PHYS 0xC0080000 #define MSM_VIC_SIZE SZ_4K -#define MSM_CSR_BASE IOMEM(0xE0001000) +#define MSM_CSR_BASE IOMEM(0xF8001000) #define MSM_CSR_PHYS 0xC0100000 #define MSM_CSR_SIZE SZ_4K @@ -50,43 +50,43 @@ #define MSM_GPT_BASE (MSM_TMR_BASE + 0x4) #define MSM_DGT_BASE (MSM_TMR_BASE + 0x24) -#define MSM_DMOV_BASE IOMEM(0xE0002000) +#define MSM_DMOV_BASE IOMEM(0xF8002000) #define MSM_DMOV_PHYS 0xAC400000 #define MSM_DMOV_SIZE SZ_4K -#define MSM_GPIO1_BASE IOMEM(0xE0003000) +#define MSM_GPIO1_BASE IOMEM(0xF8003000) #define MSM_GPIO1_PHYS 0xAC001000 #define MSM_GPIO1_SIZE SZ_4K -#define MSM_GPIO2_BASE IOMEM(0xE0004000) +#define MSM_GPIO2_BASE IOMEM(0xF8004000) #define MSM_GPIO2_PHYS 0xAC101000 #define MSM_GPIO2_SIZE SZ_4K -#define MSM_CLK_CTL_BASE IOMEM(0xE0005000) +#define MSM_CLK_CTL_BASE IOMEM(0xF8005000) #define MSM_CLK_CTL_PHYS 0xAB800000 #define MSM_CLK_CTL_SIZE SZ_4K -#define MSM_CLK_CTL_SH2_BASE IOMEM(0xE0006000) +#define MSM_CLK_CTL_SH2_BASE IOMEM(0xF8006000) #define MSM_CLK_CTL_SH2_PHYS 0xABA01000 #define MSM_CLK_CTL_SH2_SIZE SZ_4K -#define MSM_ACC_BASE IOMEM(0xE0007000) +#define MSM_ACC_BASE IOMEM(0xF8007000) #define MSM_ACC_PHYS 0xC0101000 #define MSM_ACC_SIZE SZ_4K -#define MSM_SAW_BASE IOMEM(0xE0008000) +#define MSM_SAW_BASE IOMEM(0xF8008000) #define MSM_SAW_PHYS 0xC0102000 #define MSM_SAW_SIZE SZ_4K -#define MSM_GCC_BASE IOMEM(0xE0009000) +#define MSM_GCC_BASE IOMEM(0xF8009000) #define MSM_GCC_PHYS 0xC0182000 #define MSM_GCC_SIZE SZ_4K -#define MSM_TCSR_BASE IOMEM(0xE000A000) +#define MSM_TCSR_BASE IOMEM(0xF800A000) #define MSM_TCSR_PHYS 0xAB600000 #define MSM_TCSR_SIZE SZ_4K -#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000) +#define MSM_SHARED_RAM_BASE IOMEM(0xF8100000) #define MSM_SHARED_RAM_PHYS 0x00100000 #define MSM_SHARED_RAM_SIZE SZ_1M @@ -100,7 +100,7 @@ #define MSM_UART3_SIZE SZ_4K #ifdef CONFIG_MSM_DEBUG_UART -#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_BASE 0xF9000000 #if CONFIG_MSM_DEBUG_UART == 1 #define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS #elif CONFIG_MSM_DEBUG_UART == 2 @@ -111,15 +111,69 @@ #define MSM_DEBUG_UART_SIZE SZ_4K #endif -#define MSM_MDC_BASE IOMEM(0xE0200000) +#define MSM_MDC_BASE IOMEM(0xF8200000) #define MSM_MDC_PHYS 0xAA500000 #define MSM_MDC_SIZE SZ_1M -#define MSM_AD5_BASE IOMEM(0xE0300000) +#define MSM_AD5_BASE IOMEM(0xF8300000) #define MSM_AD5_PHYS 0xA7000000 #define MSM_AD5_SIZE (SZ_1M*13) #define MSM_HSUSB_PHYS 0xA3600000 #define MSM_HSUSB_SIZE SZ_1K +#define MSM_VFE_PHYS 0xA0F00000 +#define MSM_VFE_SIZE SZ_1M + +#define MSM_I2C_SIZE SZ_4K +#define MSM_I2C_PHYS 0xACD00000 + +#define MSM_I2C_2_PHYS 0xACF00000 +#define MSM_I2C_2_SIZE SZ_4K + +#define MSM_QUP_PHYS 0xA8301000 +#define MSM_QUP_SIZE SZ_4K + +#define MSM_GSBI_QUP_I2C_PHYS 0xA8300000 +#define MSM_GSBI_QUP_I2C_SIZE 4 + +#define MSM_PMDH_PHYS 0xAD600000 +#define MSM_PMDH_SIZE SZ_4K + +#define MSM_EMDH_PHYS 0xAD700000 +#define MSM_EMDH_SIZE SZ_4K + +#define MSM_MDP_PHYS 0xA3F00000 +#define MSM_MDP_SIZE 0x000F0000 + +#define MSM_UART1DM_PHYS 0xA0200000 +#define MSM_UART2DM_PHYS 0xA0300000 + +#define MSM_TSSC_PHYS 0xAA300000 +#define MSM_TSSC_SIZE SZ_4K + +#define MSM_SDC1_PHYS 0xA0400000 +#define MSM_SDC1_SIZE SZ_4K + +#define MSM_SDC2_PHYS 0xA0500000 +#define MSM_SDC2_SIZE SZ_4K + +#define MSM_SDC3_PHYS 0xA3000000 +#define MSM_SDC3_SIZE SZ_4K + +#define MSM_SDC4_PHYS 0xA3100000 +#define MSM_SDC4_SIZE SZ_4K + +#define MSM_NAND_PHYS 0xA0200000 +#define MSM_NAND_SIZE SZ_4K + +#define MSM_PMIC_SSBI_PHYS 0xAD900000 +#define MSM_PMIC_SSBI_SIZE SZ_4K + +#define MSM_GPU_REG_PHYS 0xA3500000 +#define MSM_GPU_REG_SIZE SZ_128K + +#define MSM_SPI_PHYS 0xA8000000 +#define MSM_SPI_SIZE SZ_4K + #endif diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h index acc819eb76e56..eed8606bfb075 100644 --- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h +++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h @@ -35,11 +35,11 @@ * */ -#define MSM_VIC_BASE IOMEM(0xE0000000) +#define MSM_VIC_BASE IOMEM(0xF8000000) #define MSM_VIC_PHYS 0xAC000000 #define MSM_VIC_SIZE SZ_4K -#define MSM_CSR_BASE IOMEM(0xE0001000) +#define MSM_CSR_BASE IOMEM(0xF8001000) #define MSM_CSR_PHYS 0xAC100000 #define MSM_CSR_SIZE SZ_4K @@ -50,38 +50,42 @@ #define MSM_GPT_BASE MSM_TMR_BASE #define MSM_DGT_BASE (MSM_TMR_BASE + 0x10) -#define MSM_DMOV_BASE IOMEM(0xE0002000) +#define MSM_DMOV_BASE IOMEM(0xF8002000) #define MSM_DMOV_PHYS 0xA9700000 #define MSM_DMOV_SIZE SZ_4K -#define MSM_GPIO1_BASE IOMEM(0xE0003000) +#define MSM_GPIO1_BASE IOMEM(0xF8003000) #define MSM_GPIO1_PHYS 0xA9000000 #define MSM_GPIO1_SIZE SZ_4K -#define MSM_GPIO2_BASE IOMEM(0xE0004000) +#define MSM_GPIO2_BASE IOMEM(0xF8004000) #define MSM_GPIO2_PHYS 0xA9100000 #define MSM_GPIO2_SIZE SZ_4K -#define MSM_CLK_CTL_BASE IOMEM(0xE0005000) +#define MSM_CLK_CTL_BASE IOMEM(0xF8005000) #define MSM_CLK_CTL_PHYS 0xA8600000 #define MSM_CLK_CTL_SIZE SZ_4K -#define MSM_SIRC_BASE IOMEM(0xE1006000) +#define MSM_SIRC_BASE IOMEM(0xF8006000) #define MSM_SIRC_PHYS 0xAC200000 #define MSM_SIRC_SIZE SZ_4K -#define MSM_SCPLL_BASE IOMEM(0xE1007000) +#define MSM_SCPLL_BASE IOMEM(0xF8007000) #define MSM_SCPLL_PHYS 0xA8800000 #define MSM_SCPLL_SIZE SZ_4K +#define MSM_TCSR_BASE IOMEM(0xF8008000) +#define MSM_TCSR_PHYS 0xA8700000 +#define MSM_TCSR_SIZE SZ_4K + #ifdef CONFIG_MSM_SOC_REV_A -#define MSM_SMI_BASE 0xE0000000 +#define MSM_8K_SMI_BASE 0xE0000000 #else -#define MSM_SMI_BASE 0x00000000 +#define MSM_8K_SMI_BASE 0x00000000 #endif -#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000) -#define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000) +#define MSM_SHARED_RAM_BASE IOMEM(0xF8100000) +#define MSM_SHARED_RAM_PHYS (MSM_8K_SMI_BASE + 0x00100000) #define MSM_SHARED_RAM_SIZE SZ_1M #define MSM_UART1_PHYS 0xA9A00000 @@ -94,7 +98,7 @@ #define MSM_UART3_SIZE SZ_4K #ifdef CONFIG_MSM_DEBUG_UART -#define MSM_DEBUG_UART_BASE 0xE1000000 +#define MSM_DEBUG_UART_BASE 0xF9000000 #if CONFIG_MSM_DEBUG_UART == 1 #define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS #elif CONFIG_MSM_DEBUG_UART == 2 @@ -105,14 +109,16 @@ #define MSM_DEBUG_UART_SIZE SZ_4K #endif -#define MSM_MDC_BASE IOMEM(0xE0200000) +#define MSM_MDC_BASE IOMEM(0xF8200000) #define MSM_MDC_PHYS 0xAA500000 #define MSM_MDC_SIZE SZ_1M -#define MSM_AD5_BASE IOMEM(0xE0300000) +#define MSM_AD5_BASE IOMEM(0xF8300000) #define MSM_AD5_PHYS 0xAC000000 #define MSM_AD5_SIZE (SZ_1M*13) +#define MSM_VFE_PHYS 0xA0F00000 +#define MSM_VFE_SIZE SZ_1M #define MSM_I2C_SIZE SZ_4K #define MSM_I2C_PHYS 0xA9900000 @@ -120,8 +126,17 @@ #define MSM_HSUSB_PHYS 0xA0800000 #define MSM_HSUSB_SIZE SZ_1K -#define MSM_NAND_PHYS 0xA0A00000 +#define MSM_PMDH_PHYS 0xAA600000 +#define MSM_PMDH_SIZE SZ_4K + +#define MSM_EMDH_PHYS 0xAA700000 +#define MSM_EMDH_SIZE SZ_4K + +#define MSM_MDP_PHYS 0xAA200000 +#define MSM_MDP_SIZE 0x000F0000 +#define MSM_NAND_PHYS 0xA0A00000 +#define MSM_NAND_SIZE SZ_4K #define MSM_TSIF_PHYS (0xa0100000) #define MSM_TSIF_SIZE (0x200) @@ -131,17 +146,25 @@ #define MSM_UART1DM_PHYS 0xA0200000 #define MSM_UART2DM_PHYS 0xA0900000 +#define MSM_TSSC_PHYS 0xAA300000 +#define MSM_TSSC_SIZE SZ_4K -#define MSM_SDC1_PHYS 0xA0400000 +#define MSM_SDC1_PHYS 0xA0300000 #define MSM_SDC1_SIZE SZ_4K -#define MSM_SDC2_PHYS 0xA0500000 +#define MSM_SDC2_PHYS 0xA0400000 #define MSM_SDC2_SIZE SZ_4K -#define MSM_SDC3_PHYS 0xA0600000 +#define MSM_SDC3_PHYS 0xA0500000 #define MSM_SDC3_SIZE SZ_4K -#define MSM_SDC4_PHYS 0xA0700000 +#define MSM_SDC4_PHYS 0xA0600000 #define MSM_SDC4_SIZE SZ_4K +#define MSM_GPU_REG_PHYS 0xA0000000 +#define MSM_GPU_REG_SIZE 0x00020000 + +#define MSM_SPI_PHYS 0xA1200000 +#define MSM_SPI_SIZE SZ_4K + #endif diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h new file mode 100644 index 0000000000000..406260a92d227 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* The MSM Hardware supports multiple flavors of physical memory. + * This file captures hardware specific information of these types. +*/ + +#ifndef __ASM_ARCH_MSM_MEMTYPES_H +#define __ASM_ARCH_MSM_MEMTYPES_H + +#include +#include + +int __init meminfo_init(unsigned int, unsigned int); +/* Redundant check to prevent this from being included outside of 7x30 */ +#if defined(CONFIG_ARCH_MSM7X30) +unsigned int get_num_populated_chipselects(void); +#endif + +unsigned int get_num_memory_banks(void); +unsigned int get_memory_bank_size(unsigned int); +unsigned int get_memory_bank_start(unsigned int); +int soc_change_memory_power(u64, u64, int); + +enum { + MEMTYPE_NONE = -1, + MEMTYPE_SMI_KERNEL = 0, + MEMTYPE_SMI, + MEMTYPE_EBI0, + MEMTYPE_EBI1, + MEMTYPE_MAX, +}; + +void msm_reserve(void); + +#define MEMTYPE_FLAGS_FIXED 0x1 +#define MEMTYPE_FLAGS_1M_ALIGN 0x2 + +struct memtype_reserve { + unsigned long start; + unsigned long size; + unsigned long limit; + int flags; +}; + +struct reserve_info { + struct memtype_reserve *memtype_reserve_table; + void (*calculate_reserve_sizes)(void); + int (*paddr_to_memtype)(unsigned int); + unsigned long low_unstable_address; + unsigned long max_unstable_size; + unsigned long bank_size; +}; + +extern struct reserve_info *reserve_info; +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_qdsp6_audio.h b/arch/arm/mach-msm/include/mach/msm_qdsp6_audio.h new file mode 100644 index 0000000000000..fcc8ce39ceb33 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_qdsp6_audio.h @@ -0,0 +1,121 @@ +/* arch/arm/mach-msm/include/mach/msm_qdsp6_audio.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MACH_MSM_QDSP6_Q6AUDIO_ +#define _MACH_MSM_QDSP6_Q6AUDIO_ + +#define AUDIO_FLAG_READ 0 +#define AUDIO_FLAG_WRITE 1 + +#include + +struct audio_buffer { + dma_addr_t phys; + void *data; + uint32_t size; + uint32_t used; /* 1 = CPU is waiting for DSP to consume this buf */ +}; + +struct audio_client { + struct audio_buffer buf[2]; + int cpu_buf; /* next buffer the CPU will touch */ + int dsp_buf; /* next buffer the DSP will touch */ + int running; + int session; + + wait_queue_head_t wait; + struct dal_client *client; + + int cb_status; + uint32_t flags; +}; + +#define Q6_HW_HANDSET 0 +#define Q6_HW_HEADSET 1 +#define Q6_HW_SPEAKER 2 +#define Q6_HW_TTY 3 +#define Q6_HW_BT_SCO 4 +#define Q6_HW_BT_A2DP 5 + +#define Q6_HW_COUNT 6 + +struct q6_hw_info { + int min_gain; + int max_gain; +}; + +/* Obtain a 16bit signed, interleaved audio channel of the specified + * rate (Hz) and channels (1 or 2), with two buffers of bufsz bytes. + */ +struct audio_client *q6audio_open_pcm(uint32_t bufsz, uint32_t rate, + uint32_t channels, uint32_t flags, + uint32_t acdb_id); + +struct audio_client *q6voice_open(uint32_t flags, uint32_t acdb_id); + +struct audio_client *q6audio_open_mp3(uint32_t bufsz, uint32_t rate, + uint32_t channels, uint32_t acdb_id); + +struct audio_client *q6fm_open(void); + +struct audio_client *q6audio_open_aac(uint32_t bufsz, uint32_t rate, + uint32_t flags, void *data, uint32_t acdb_id); + +struct audio_client *q6audio_open_qcelp(uint32_t bufsz, uint32_t rate, + void *data, uint32_t acdb_id); + +int q6audio_close(struct audio_client *ac); +int q6voice_close(struct audio_client *ac); +int q6audio_mp3_close(struct audio_client *ac); +int q6fm_close(struct audio_client *ac); +int q6audio_aac_close(struct audio_client *ac); +int q6audio_qcelp_close(struct audio_client *ac); + +int q6audio_read(struct audio_client *ac, struct audio_buffer *ab); +int q6audio_write(struct audio_client *ac, struct audio_buffer *ab); +int q6audio_async(struct audio_client *ac); + +int q6audio_do_routing(uint32_t route, uint32_t acdb_id); +int q6audio_set_tx_mute(int mute); +int q6audio_reinit_acdb(char* filename); +int q6audio_update_acdb(uint32_t id_src, uint32_t id_dst); +int q6audio_set_rx_volume(int level); +int q6audio_set_stream_volume(struct audio_client *ac, int vol); + +struct q6audio_analog_ops { + void (*init)(void); + void (*speaker_enable)(int en); + void (*headset_enable)(int en); + void (*receiver_enable)(int en); + void (*bt_sco_enable)(int en); + void (*int_mic_enable)(int en); + void (*ext_mic_enable)(int en); + void (*i2s_enable)(int en); + int (*get_rx_vol)(uint8_t hw, int level); +}; + +#ifdef CONFIG_MSM_QDSP6 +void q6audio_register_analog_ops(struct q6audio_analog_ops *ops); +void q6audio_set_acdb_file(char* filename); +#else +static inline void q6audio_register_analog_ops(struct q6audio_analog_ops *ops) {} +static inline void q6audio_set_acdb_file(char* filename) {} +#endif + +/* signal non-recoverable DSP error so we can log and/or panic */ +void q6audio_dsp_not_responding(void); + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_qup.h b/arch/arm/mach-msm/include/mach/msm_qup.h new file mode 100644 index 0000000000000..e95b5d662f5be --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_qup.h @@ -0,0 +1,16 @@ +#ifndef _MACH_MSM_QUP_H +#define _MACH_MSM_QUP_H + +struct msm_qup_i2c_platform_data { + int clk_freq; + uint32_t rmutex; + const char *rsl_id; + uint32_t pm_lat; + int pri_clk; + int pri_dat; + int aux_clk; + int aux_dat; + void (*msm_i2c_config_gpio)(int iface, int config_type); +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_rpcrouter.h b/arch/arm/mach-msm/include/mach/msm_rpcrouter.h new file mode 100644 index 0000000000000..b2f8e3ed0278c --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_rpcrouter.h @@ -0,0 +1,180 @@ +/** include/asm-arm/arch-msm/msm_rpcrouter.h + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2009 QUALCOMM Incorporated + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM__ARCH_MSM_RPCROUTER_H +#define __ASM__ARCH_MSM_RPCROUTER_H + +#include +#include +#include + +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) +/* RPC API version structure + * Version bit 31 : 1->hashkey versioning, + * 0->major-minor (backward compatible) versioning + * hashkey versioning: + * Version bits 31-0 hashkey + * major-minor (backward compatible) versioning + * Version bits 30-28 reserved (no match) + * Version bits 27-16 major (must match) + * Version bits 15-0 minor (greater or equal) + */ +#define RPC_VERSION_MODE_MASK 0x80000000 +#define RPC_VERSION_MAJOR_MASK 0x0fff0000 +#define RPC_VERSION_MAJOR_OFFSET 16 +#define RPC_VERSION_MINOR_MASK 0x0000ffff + +#define MSM_RPC_VERS(major, minor) \ + ((uint32_t)((((major) << RPC_VERSION_MAJOR_OFFSET) & \ + RPC_VERSION_MAJOR_MASK) | \ + ((minor) & RPC_VERSION_MINOR_MASK))) +#define MSM_RPC_GET_MAJOR(vers) (((vers) & RPC_VERSION_MAJOR_MASK) >> \ + RPC_VERSION_MAJOR_OFFSET) +#define MSM_RPC_GET_MINOR(vers) ((vers) & RPC_VERSION_MINOR_MASK) +#else +#define MSM_RPC_VERS(major, minor) (major) +#define MSM_RPC_GET_MAJOR(vers) (vers) +#define MSM_RPC_GET_MINOR(vers) 0 +#endif + +struct msm_rpc_endpoint; + +struct rpcsvr_platform_device +{ + struct platform_device base; + uint32_t prog; + uint32_t vers; +}; + +#define RPC_DATA_IN 0 +/* + * Structures for sending / receiving direct RPC requests + * XXX: Any cred/verif lengths > 0 not supported + */ + +struct rpc_request_hdr +{ + uint32_t xid; + uint32_t type; /* 0 */ + uint32_t rpc_vers; /* 2 */ + uint32_t prog; + uint32_t vers; + uint32_t procedure; + uint32_t cred_flavor; + uint32_t cred_length; + uint32_t verf_flavor; + uint32_t verf_length; +}; + +typedef struct +{ + uint32_t low; + uint32_t high; +} rpc_reply_progmismatch_data; + +typedef struct +{ +} rpc_denied_reply_hdr; + +typedef struct +{ + uint32_t verf_flavor; + uint32_t verf_length; + uint32_t accept_stat; +#define RPC_ACCEPTSTAT_SUCCESS 0 +#define RPC_ACCEPTSTAT_PROG_UNAVAIL 1 +#define RPC_ACCEPTSTAT_PROG_MISMATCH 2 +#define RPC_ACCEPTSTAT_PROC_UNAVAIL 3 +#define RPC_ACCEPTSTAT_GARBAGE_ARGS 4 +#define RPC_ACCEPTSTAT_SYSTEM_ERR 5 +#define RPC_ACCEPTSTAT_PROG_LOCKED 6 + /* + * Following data is dependant on accept_stat + * If ACCEPTSTAT == PROG_MISMATCH then there is a + * 'rpc_reply_progmismatch_data' structure following the header. + * Otherwise the data is procedure specific + */ +} rpc_accepted_reply_hdr; + +struct rpc_reply_hdr +{ + uint32_t xid; + uint32_t type; + uint32_t reply_stat; +#define RPCMSG_REPLYSTAT_ACCEPTED 0 +#define RPCMSG_REPLYSTAT_DENIED 1 + union { + rpc_accepted_reply_hdr acc_hdr; + rpc_denied_reply_hdr dny_hdr; + } data; +}; + +/* flags for msm_rpc_connect() */ +#define MSM_RPC_UNINTERRUPTIBLE 0x0001 +#define MSM_RPC_ENABLE_RECEIVE (0x10000) + +/* use IS_ERR() to check for failure */ +struct msm_rpc_endpoint *msm_rpc_open(void); +/* Connect with the specified server version */ +struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags); +uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept); +/* check if server version can handle client requested version */ +int msm_rpc_is_compatible_version(uint32_t server_version, + uint32_t client_version); + +int msm_rpc_close(struct msm_rpc_endpoint *ept); +int msm_rpc_write(struct msm_rpc_endpoint *ept, + void *data, int len); +int msm_rpc_read(struct msm_rpc_endpoint *ept, + void **data, unsigned len, long timeout); +void msm_rpc_setup_req(struct rpc_request_hdr *hdr, + uint32_t prog, uint32_t vers, uint32_t proc); +int msm_rpc_register_server(struct msm_rpc_endpoint *ept, + uint32_t prog, uint32_t vers); +int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept, + uint32_t prog, uint32_t vers); + +/* simple blocking rpc call + * + * request is mandatory and must have a rpc_request_hdr + * at the start. The header will be filled out for you. + * + * reply provides a buffer for replies of reply_max_size + */ +int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc, + void *request, int request_size, + void *reply, int reply_max_size, + long timeout); +int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc, + void *request, int request_size, + long timeout); + +struct msm_rpc_server +{ + struct list_head list; + uint32_t flags; + + uint32_t prog; + uint32_t vers; + + int (*rpc_call)(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len); +}; + +int msm_rpc_create_server(struct msm_rpc_server *server); + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_serial_debugger.h b/arch/arm/mach-msm/include/mach/msm_serial_debugger.h new file mode 100644 index 0000000000000..f490b1be4f21a --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_serial_debugger.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_SERIAL_DEBUGGER_H +#define __ASM_ARCH_MSM_SERIAL_DEBUGGER_H + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER) +void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq, int wakeup_irq); +#else +static inline void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq, int wakeup_irq) {} +#endif + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_serial_hs.h b/arch/arm/mach-msm/include/mach/msm_serial_hs.h new file mode 100644 index 0000000000000..fe54b02f622d2 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_serial_hs.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2008 Google, Inc. + * Author: Nick Pelly + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_MSM_SERIAL_HS_H +#define __ASM_ARCH_MSM_SERIAL_HS_H + +#include + +/* API to request the uart clock off or on for low power management + * Clients should call request_clock_off() when no uart data is expected, + * and must call request_clock_on() before any further uart data can be + * received. */ +extern void msm_hs_request_clock_off(struct uart_port *uport); +extern void msm_hs_request_clock_on(struct uart_port *uport); +/* uport->lock must be held when calling _locked() */ +extern void msm_hs_request_clock_off_locked(struct uart_port *uport); +extern void msm_hs_request_clock_on_locked(struct uart_port *uport); + +/* Optional platform device data for msm_serial_hs driver. + * Used to configure low power rx wakeup */ +struct msm_serial_hs_platform_data { + int rx_wakeup_irq; /* wakeup irq */ + /* bool: inject char into rx tty on wakeup */ + unsigned char inject_rx_on_wakeup; + char rx_to_inject; + + void (*exit_lpm_cb)(struct uart_port *); +}; + +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h index 029463ec8756f..7ed29393cc7b9 100644 --- a/arch/arm/mach-msm/include/mach/msm_smd.h +++ b/arch/arm/mach-msm/include/mach/msm_smd.h @@ -106,4 +106,11 @@ typedef enum { SMD_NUM_PORTS, } smd_port_id_type; +struct smd_tty_channel_desc { + int id; + const char *name; +}; + +int smd_set_channel_list(const struct smd_tty_channel_desc *, int len); + #endif diff --git a/arch/arm/mach-msm/include/mach/msm_spi.h b/arch/arm/mach-msm/include/mach/msm_spi.h new file mode 100644 index 0000000000000..433b2bdd57c16 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_spi.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +/* + * SPI driver for Qualcomm QSD platforms. + */ + +struct msm_spi_platform_data { + u32 max_clock_speed; + int (*gpio_config)(void); + void (*gpio_release)(void); + int (*dma_config)(void); +}; diff --git a/arch/arm/mach-msm/include/mach/msm_ssbi.h b/arch/arm/mach-msm/include/mach/msm_ssbi.h new file mode 100644 index 0000000000000..4d79078c33237 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_ssbi.h @@ -0,0 +1,35 @@ +/* arch/arm/mach-msm/include/mach/msm_ssbi.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ASM_ARCH_MSM_SSBI_H +#define __ASM_ARCH_MSM_SSBI_H + +#include + +struct msm_ssbi_slave_info { + const char *name; + int irq; + void *platform_data; +}; + +struct msm_ssbi_platform_data { + struct msm_ssbi_slave_info slave; + const char *rspinlock_name; +}; + +int msm_ssbi_write(struct device *dev, u16 addr, u8 *buf, int len); +int msm_ssbi_read(struct device *dev, u16 addr, u8 *buf, int len); +#endif diff --git a/arch/arm/mach-msm/include/mach/msm_ts.h b/arch/arm/mach-msm/include/mach/msm_ts.h new file mode 100644 index 0000000000000..cb41aed7f0705 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/msm_ts.h @@ -0,0 +1,51 @@ +/* arch/arm/mach-msm/include/mach/msm_ts.h + * + * Internal platform definitions for msm/qsd touchscreen devices + * + * Copyright (C) 2008 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_MSM_TS_H +#define __ASM_ARCH_MSM_TS_H + +#include + +/* The dimensions for the virtual key are for the other axis, i.e. if + * virtual keys are in the Y dimension then min/max is the range in the X + * dimension where that key would be activated */ +struct ts_virt_key { + int key; + int min; + int max; +}; + +struct msm_ts_virtual_keys { + struct ts_virt_key *keys; + int num_keys; +}; + +struct msm_ts_platform_data { + uint32_t min_x; + uint32_t max_x; + uint32_t min_y; + uint32_t max_y; + uint32_t min_press; + uint32_t max_press; + struct msm_ts_virtual_keys *vkeys_x; + uint32_t virt_x_start; + struct msm_ts_virtual_keys *vkeys_y; + uint32_t virt_y_start; + uint32_t inv_x; + uint32_t inv_y; +}; + +#endif /* __ASM_ARCH_MSM_TS_H */ diff --git a/arch/arm/mach-msm/include/mach/perflock.h b/arch/arm/mach-msm/include/mach/perflock.h new file mode 100644 index 0000000000000..34fcd7534e778 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/perflock.h @@ -0,0 +1,68 @@ +/* arch/arm/mach-msm/perflock.h + * + * MSM performance lock driver header + * + * Copyright (C) 2008 HTC Corporation + * Author: Eiven Peng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_PERF_LOCK_H +#define __ARCH_ARM_MACH_PERF_LOCK_H + +#include + +/* + * Performance level determine differnt EBI1 rate + */ + +enum { + PERF_LOCK_MEDIUM, /* Medium performance */ + PERF_LOCK_HIGH, /* High performance */ + PERF_LOCK_HIGHEST, /* Highest performance */ + PERF_LOCK_INVALID, +}; + +struct perf_lock { + struct list_head link; + unsigned int flags; + unsigned int level; + const char *name; +}; + +struct perflock_platform_data { + unsigned int *perf_acpu_table; + unsigned int table_size; +}; + +#ifndef CONFIG_PERFLOCK +static inline void __init perflock_init( + struct perflock_platform_data *pdata) { return; } +static inline void perf_lock_init(struct perf_lock *lock, + unsigned int level, const char *name) { return; } +static inline void perf_lock(struct perf_lock *lock) { return; } +static inline void perf_unlock(struct perf_lock *lock) { return; } +static inline int is_perf_lock_active(struct perf_lock *lock) { return 0; } +static inline int is_perf_locked(void) { return 0; } +#else +extern void __init perflock_init(struct perflock_platform_data *pdata); +extern void perf_lock_init(struct perf_lock *lock, + unsigned int level, const char *name); +extern void perf_lock(struct perf_lock *lock); +extern void perf_unlock(struct perf_lock *lock); +extern int is_perf_lock_active(struct perf_lock *lock); +extern int is_perf_locked(void); +#endif + + +#endif + diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaycmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaycmdi.h new file mode 100644 index 0000000000000..ece4bc7f9e6eb --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaycmdi.h @@ -0,0 +1,94 @@ +#ifndef QDSP5AUDPLAYCMDI_H +#define QDSP5AUDPLAYCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + Q D S P 5 A U D I O P L A Y T A S K C O M M A N D S + +GENERAL DESCRIPTION + Command Interface for AUDPLAYTASK on QDSP5 + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + + audplay_cmd_dec_data_avail + Send buffer to AUDPLAY task + + +Copyright(c) 1992 - 2009 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audplaycmdi.h#2 $ + +===========================================================================*/ + +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL 0x0000 +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_LEN \ + sizeof(audplay_cmd_bitstream_data_avail) + +/* Type specification of dec_data_avail message sent to AUDPLAYTASK +*/ +typedef struct { + /*command ID*/ + unsigned int cmd_id; + + /* Decoder ID for which message is being sent */ + unsigned int decoder_id; + + /* Start address of data in ARM global memory */ + unsigned int buf_ptr; + + /* Number of 16-bit words of bit-stream data contiguously available at the + * above-mentioned address. */ + unsigned int buf_size; + + /* Partition number used by audPlayTask to communicate with DSP's RTOS + * kernel */ + unsigned int partition_number; +} __attribute__((packed)) audplay_cmd_bitstream_data_avail; + +#define AUDPLAY_CMD_HPCM_BUF_CFG 0x0003 +#define AUDPLAY_CMD_HPCM_BUF_CFG_LEN \ + sizeof(struct audplay_cmd_hpcm_buf_cfg) + +struct audplay_cmd_hpcm_buf_cfg { + unsigned int cmd_id; + unsigned int hostpcm_config; + unsigned int feedback_frequency; + unsigned int byte_swap; + unsigned int max_buffers; + unsigned int partition_number; +} __attribute__((packed)); + +#define AUDPLAY_CMD_BUFFER_REFRESH 0x0004 +#define AUDPLAY_CMD_BUFFER_REFRESH_LEN \ + sizeof(struct audplay_cmd_buffer_update) + +struct audplay_cmd_buffer_refresh { + unsigned int cmd_id; + unsigned int num_buffers; + unsigned int buf_read_count; + unsigned int buf0_address; + unsigned int buf0_length; + unsigned int buf1_address; + unsigned int buf1_length; +} __attribute__((packed)); +#endif /* QDSP5AUDPLAYCMD_H */ diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaymsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaymsg.h new file mode 100644 index 0000000000000..c63034b8bf137 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audplaymsg.h @@ -0,0 +1,70 @@ +#ifndef QDSP5AUDPLAYMSG_H +#define QDSP5AUDPLAYMSG_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + Q D S P 5 A U D I O P L A Y T A S K M S G + +GENERAL DESCRIPTION + Message sent by AUDPLAY task + +REFERENCES + None + + +Copyright(c) 1992 - 2009 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audplaymsg.h#3 $ + +===========================================================================*/ +#define AUDPLAY_MSG_DEC_NEEDS_DATA 0x0001 +#define AUDPLAY_MSG_DEC_NEEDS_DATA_MSG_LEN \ + sizeof(audplay_msg_dec_needs_data) + +typedef struct{ + /* reserved*/ + unsigned int dec_id; + + /* The read pointer offset of external memory until which the + * bitstream has been DMAed in. */ + unsigned int adecDataReadPtrOffset; + + /* The buffer size of external memory. */ + unsigned int adecDataBufSize; + + unsigned int bitstream_free_len; + unsigned int bitstream_write_ptr; + unsigned int bitstarem_buf_start; + unsigned int bitstream_buf_len; +} __attribute__((packed)) audplay_msg_dec_needs_data; + +#define AUDPLAY_MSG_BUFFER_UPDATE 0x0004 +#define AUDPLAY_MSG_BUFFER_UPDATE_LEN \ + sizeof(struct audplay_msg_buffer_update) + +struct audplay_msg_buffer_update { + unsigned int buffer_write_count; + unsigned int num_of_buffer; + unsigned int buf0_address; + unsigned int buf0_length; + unsigned int buf1_address; + unsigned int buf1_length; +} __attribute__((packed)); +#endif /* QDSP5AUDPLAYMSG_H */ diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h new file mode 100644 index 0000000000000..8bee9c62980b5 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h @@ -0,0 +1,914 @@ +#ifndef QDSP5AUDPPCMDI_H +#define QDSP5AUDPPCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + A U D I O P O S T P R O C E S S I N G I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by AUDPP Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audppcmdi.h#2 $ + +===========================================================================*/ + +/* + * ARM to AUDPPTASK Commands + * + * ARM uses three command queues to communicate with AUDPPTASK + * 1)uPAudPPCmd1Queue : Used for more frequent and shorter length commands + * Location : MEMA + * Buffer Size : 6 words + * No of buffers in a queue : 20 for gaming audio and 5 for other images + * 2)uPAudPPCmd2Queue : Used for commands which are not much lengthier + * Location : MEMA + * Buffer Size : 23 + * No of buffers in a queue : 2 + * 3)uPAudOOCmd3Queue : Used for lengthier and more frequent commands + * Location : MEMA + * Buffer Size : 145 + * No of buffers in a queue : 3 + */ + +/* + * Commands Related to uPAudPPCmd1Queue + */ + +/* + * Command Structure to enable or disable the active decoders + */ + +#define AUDPP_CMD_CFG_DEC_TYPE 0x0001 +#define AUDPP_CMD_CFG_DEC_TYPE_LEN sizeof(audpp_cmd_cfg_dec_type) + +/* Enable the decoder */ +#define AUDPP_CMD_DEC_TYPE_M 0x000F + +#define AUDPP_CMD_ENA_DEC_V 0x4000 +#define AUDPP_CMD_DIS_DEC_V 0x0000 +#define AUDPP_CMD_DEC_STATE_M 0x4000 + +#define AUDPP_CMD_UPDATDE_CFG_DEC 0x8000 +#define AUDPP_CMD_DONT_UPDATE_CFG_DEC 0x0000 + + +/* Type specification of cmd_cfg_dec */ + +typedef struct { + unsigned short cmd_id; + unsigned short dec0_cfg; + unsigned short dec1_cfg; + unsigned short dec2_cfg; + unsigned short dec3_cfg; + unsigned short dec4_cfg; +} __attribute__((packed)) audpp_cmd_cfg_dec_type; + +/* + * Command Structure to Pause , Resume and flushes the selected audio decoders + */ + +#define AUDPP_CMD_DEC_CTRL 0x0002 +#define AUDPP_CMD_DEC_CTRL_LEN sizeof(audpp_cmd_dec_ctrl) + +/* Decoder control commands for pause, resume and flush */ +#define AUDPP_CMD_FLUSH_V 0x2000 + +#define AUDPP_CMD_PAUSE_V 0x4000 +#define AUDPP_CMD_RESUME_V 0x0000 + +#define AUDPP_CMD_UPDATE_V 0x8000 +#define AUDPP_CMD_IGNORE_V 0x0000 + + +/* Type Spec for decoder control command*/ + +typedef struct { + unsigned short cmd_id; + unsigned short dec0_ctrl; + unsigned short dec1_ctrl; + unsigned short dec2_ctrl; + unsigned short dec3_ctrl; + unsigned short dec4_ctrl; +} __attribute__((packed)) audpp_cmd_dec_ctrl; + +/* + * Command Structure to Configure the AVSync FeedBack Mechanism + */ + +#define AUDPP_CMD_AVSYNC 0x0003 +#define AUDPP_CMD_AVSYNC_LEN sizeof(audpp_cmd_avsync) + +typedef struct { + unsigned short cmd_id; + unsigned short object_number; + unsigned short interrupt_interval_lsw; + unsigned short interrupt_interval_msw; +} __attribute__((packed)) audpp_cmd_avsync; + +/* + * Command Structure to enable or disable(sleep) the AUDPPTASK + */ + +#define AUDPP_CMD_CFG 0x0004 +#define AUDPP_CMD_CFG_LEN sizeof(audpp_cmd_cfg) + +#define AUDPP_CMD_CFG_SLEEP 0x0000 +#define AUDPP_CMD_CFG_ENABLE 0xFFFF + +typedef struct { + unsigned short cmd_id; + unsigned short cfg; +} __attribute__((packed)) audpp_cmd_cfg; + +/* + * Command Structure to Inject or drop the specified no of samples + */ + +#define AUDPP_CMD_ADJUST_SAMP 0x0005 +#define AUDPP_CMD_ADJUST_SAMP_LEN sizeof(audpp_cmd_adjust_samp) + +#define AUDPP_CMD_SAMP_DROP -1 +#define AUDPP_CMD_SAMP_INSERT 0x0001 + +#define AUDPP_CMD_NUM_SAMPLES 0x0001 + +typedef struct { + unsigned short cmd_id; + unsigned short object_no; + signed short sample_insert_or_drop; + unsigned short num_samples; +} __attribute__((packed)) audpp_cmd_adjust_samp; + +/* + * Command Structure to Configure AVSync Feedback Mechanism + */ + +#define AUDPP_CMD_AVSYNC_CMD_2 0x0006 +#define AUDPP_CMD_AVSYNC_CMD_2_LEN sizeof(audpp_cmd_avsync_cmd_2) + +typedef struct { + unsigned short cmd_id; + unsigned short object_number; + unsigned short interrupt_interval_lsw; + unsigned short interrupt_interval_msw; + unsigned short sample_counter_dlsw; + unsigned short sample_counter_dmsw; + unsigned short sample_counter_msw; + unsigned short byte_counter_dlsw; + unsigned short byte_counter_dmsw; + unsigned short byte_counter_msw; +} __attribute__((packed)) audpp_cmd_avsync_cmd_2; + +/* + * Command Structure to Configure AVSync Feedback Mechanism + */ + +#define AUDPP_CMD_AVSYNC_CMD_3 0x0007 +#define AUDPP_CMD_AVSYNC_CMD_3_LEN sizeof(audpp_cmd_avsync_cmd_3) + +typedef struct { + unsigned short cmd_id; + unsigned short object_number; + unsigned short interrupt_interval_lsw; + unsigned short interrupt_interval_msw; + unsigned short sample_counter_dlsw; + unsigned short sample_counter_dmsw; + unsigned short sample_counter_msw; + unsigned short byte_counter_dlsw; + unsigned short byte_counter_dmsw; + unsigned short byte_counter_msw; +} __attribute__((packed)) audpp_cmd_avsync_cmd_3; + +#define AUDPP_CMD_ROUTING_MODE 0x0008 +#define AUDPP_CMD_ROUTING_MODE_LEN \ +sizeof(struct audpp_cmd_routing_mode) + +struct audpp_cmd_routing_mode { + unsigned short cmd_id; + unsigned short object_number; + unsigned short routing_mode; +} __attribute__((packed)); + +/* + * Commands Related to uPAudPPCmd2Queue + */ + +/* + * Command Structure to configure Per decoder Parameters (Common) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS 0x0000 +#define AUDPP_CMD_CFG_ADEC_PARAMS_COMMON_LEN \ + sizeof(audpp_cmd_cfg_adec_params_common) + +#define AUDPP_CMD_STATUS_MSG_FLAG_ENA_FCM 0x4000 +#define AUDPP_CMD_STATUS_MSG_FLAG_DIS_FCM 0x0000 + +#define AUDPP_CMD_STATUS_MSG_FLAG_ENA_DCM 0x8000 +#define AUDPP_CMD_STATUS_MSG_FLAG_DIS_DCM 0x0000 + +/* Sampling frequency*/ +#define AUDPP_CMD_SAMP_RATE_96000 0x0000 +#define AUDPP_CMD_SAMP_RATE_88200 0x0001 +#define AUDPP_CMD_SAMP_RATE_64000 0x0002 +#define AUDPP_CMD_SAMP_RATE_48000 0x0003 +#define AUDPP_CMD_SAMP_RATE_44100 0x0004 +#define AUDPP_CMD_SAMP_RATE_32000 0x0005 +#define AUDPP_CMD_SAMP_RATE_24000 0x0006 +#define AUDPP_CMD_SAMP_RATE_22050 0x0007 +#define AUDPP_CMD_SAMP_RATE_16000 0x0008 +#define AUDPP_CMD_SAMP_RATE_12000 0x0009 +#define AUDPP_CMD_SAMP_RATE_11025 0x000A +#define AUDPP_CMD_SAMP_RATE_8000 0x000B + + +/* + * Type specification of cmd_adec_cfg sent to all decoder + */ + +typedef struct { + unsigned short cmd_id; + unsigned short length; + unsigned short dec_id; + unsigned short status_msg_flag; + unsigned short decoder_frame_counter_msg_period; + unsigned short input_sampling_frequency; +} __attribute__((packed)) audpp_cmd_cfg_adec_params_common; + +/* + * Command Structure to configure Per decoder Parameters (Wav) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN \ + sizeof(audpp_cmd_cfg_adec_params_wav) + + +#define AUDPP_CMD_WAV_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_WAV_STEREO_CFG_STEREO 0x0002 + +#define AUDPP_CMD_WAV_PCM_WIDTH_8 0x0000 +#define AUDPP_CMD_WAV_PCM_WIDTH_16 0x0001 +#define AUDPP_CMD_WAV_PCM_WIDTH_32 0x0002 + +typedef struct { + audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; + unsigned short pcm_width; + unsigned short sign; +} __attribute__((packed)) audpp_cmd_cfg_adec_params_wav; + +/* + * Command Structure to configure Per decoder Parameters (ADPCM) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_ADPCM_LEN \ + sizeof(audpp_cmd_cfg_adec_params_adpcm) + + +#define AUDPP_CMD_ADPCM_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_ADPCM_STEREO_CFG_STEREO 0x0002 + +typedef struct { + audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; + unsigned short block_size; +} __attribute__((packed)) audpp_cmd_cfg_adec_params_adpcm; + +/* + * Command Structure to configure Per decoder Parameters (MP3) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_MP3_LEN \ + sizeof(audpp_cmd_cfg_adec_params_mp3) + +typedef struct { + audpp_cmd_cfg_adec_params_common common; +} __attribute__((packed)) audpp_cmd_cfg_adec_params_mp3; + + +/* + * Command Structure to configure Per decoder Parameters (AAC) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_AAC_LEN \ + sizeof(audpp_cmd_cfg_adec_params_aac) + + +#define AUDPP_CMD_AAC_FORMAT_ADTS -1 +#define AUDPP_CMD_AAC_FORMAT_RAW 0x0000 +#define AUDPP_CMD_AAC_FORMAT_PSUEDO_RAW 0x0001 +#define AUDPP_CMD_AAC_FORMAT_LOAS 0x0002 + +#define AUDPP_CMD_AAC_AUDIO_OBJECT_LC 0x0002 +#define AUDPP_CMD_AAC_AUDIO_OBJECT_LTP 0x0004 +#define AUDPP_CMD_AAC_AUDIO_OBJECT_ERLC 0x0011 + +#define AUDPP_CMD_AAC_SBR_ON_FLAG_ON 0x0001 +#define AUDPP_CMD_AAC_SBR_ON_FLAG_OFF 0x0000 + +#define AUDPP_CMD_AAC_SBR_PS_ON_FLAG_ON 0x0001 +#define AUDPP_CMD_AAC_SBR_PS_ON_FLAG_OFF 0x0000 + +typedef struct { + audpp_cmd_cfg_adec_params_common common; + signed short format; + unsigned short audio_object; + unsigned short ep_config; + unsigned short aac_section_data_resilience_flag; + unsigned short aac_scalefactor_data_resilience_flag; + unsigned short aac_spectral_data_resilience_flag; + unsigned short sbr_on_flag; + unsigned short sbr_ps_on_flag; + unsigned short dual_mono_mode; + unsigned short channel_configuration; +} __attribute__((packed)) audpp_cmd_cfg_adec_params_aac; + +/* + * Command Structure to configure Per decoder Parameters (V13K) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_v13k) + + +#define AUDPP_CMD_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_STEREO_CFG_STEREO 0x0002 + +struct audpp_cmd_cfg_adec_params_v13k { + audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__((packed)); + +#define AUDPP_CMD_CFG_ADEC_PARAMS_EVRC_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_evrc) + +struct audpp_cmd_cfg_adec_params_evrc { + audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__ ((packed)); + +/* + * Command Structure to configure the HOST PCM interface + */ + +#define AUDPP_CMD_PCM_INTF 0x0001 +#define AUDPP_CMD_PCM_INTF_2 0x0002 +#define AUDPP_CMD_PCM_INTF_LEN sizeof(audpp_cmd_pcm_intf) + +#define AUDPP_CMD_PCM_INTF_MONO_V 0x0001 +#define AUDPP_CMD_PCM_INTF_STEREO_V 0x0002 + +/* These two values differentiate the two types of commands that could be issued + * Interface configuration command and Buffer update command */ + +#define AUDPP_CMD_PCM_INTF_CONFIG_CMD_V 0x0000 +#define AUDPP_CMD_PCM_INTF_BUFFER_CMD_V -1 + +#define AUDPP_CMD_PCM_INTF_RX_ENA_M 0x000F +#define AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V 0x0008 +#define AUDPP_CMD_PCM_INTF_RX_ENA_DSPTOARM_V 0x0004 + +/* These flags control the enabling and disabling of the interface together + * with host interface bit mask. */ + +#define AUDPP_CMD_PCM_INTF_ENA_V -1 +#define AUDPP_CMD_PCM_INTF_DIS_V 0x0000 + + +#define AUDPP_CMD_PCM_INTF_FULL_DUPLEX 0x0 +#define AUDPP_CMD_PCM_INTF_HALF_DUPLEX_TODSP 0x1 + + +#define AUDPP_CMD_PCM_INTF_OBJECT_NUM 0x5 +#define AUDPP_CMD_PCM_INTF_COMMON_OBJECT_NUM 0x6 + + +typedef struct { + unsigned short cmd_id; + unsigned short object_num; + signed short config; + unsigned short intf_type; + + /* DSP -> ARM Configuration */ + unsigned short read_buf1LSW; + unsigned short read_buf1MSW; + unsigned short read_buf1_len; + + unsigned short read_buf2LSW; + unsigned short read_buf2MSW; + unsigned short read_buf2_len; + /* 0:HOST_PCM_INTF disable + ** 0xFFFF: HOST_PCM_INTF enable + */ + signed short dsp_to_arm_flag; + unsigned short partition_number; + + /* ARM -> DSP Configuration */ + unsigned short write_buf1LSW; + unsigned short write_buf1MSW; + unsigned short write_buf1_len; + + unsigned short write_buf2LSW; + unsigned short write_buf2MSW; + unsigned short write_buf2_len; + + /* 0:HOST_PCM_INTF disable + ** 0xFFFF: HOST_PCM_INTF enable + */ + signed short arm_to_rx_flag; + unsigned short weight_decoder_to_rx; + unsigned short weight_arm_to_rx; + + unsigned short partition_number_arm_to_dsp; + unsigned short sample_rate; + unsigned short channel_mode; +} __attribute__((packed)) audpp_cmd_pcm_intf; + +/* + ** BUFFER UPDATE COMMAND + */ +#define AUDPP_CMD_PCM_INTF_SEND_BUF_PARAMS_LEN \ + sizeof(audpp_cmd_pcm_intf_send_buffer) + +typedef struct { + unsigned short cmd_id; + unsigned short host_pcm_object; + /* set config = 0xFFFF for configuration*/ + signed short config; + unsigned short intf_type; + unsigned short dsp_to_arm_buf_id; + unsigned short arm_to_dsp_buf_id; + unsigned short arm_to_dsp_buf_len; +} __attribute__((packed)) audpp_cmd_pcm_intf_send_buffer; + + +/* + * Commands Related to uPAudPPCmd3Queue + */ + +/* + * Command Structure to configure post processing params (Commmon) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS 0x0000 +#define AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN \ + sizeof(audpp_cmd_cfg_object_params_common) + +#define AUDPP_CMD_OBJ0_UPDATE 0x8000 +#define AUDPP_CMD_OBJ0_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ1_UPDATE 0x8000 +#define AUDPP_CMD_OBJ1_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ2_UPDATE 0x8000 +#define AUDPP_CMD_OBJ2_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ3_UPDATE 0x8000 +#define AUDPP_CMD_OBJ3_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ4_UPDATE 0x8000 +#define AUDPP_CMD_OBJ4_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_HPCM_UPDATE 0x8000 +#define AUDPP_CMD_HPCM_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_COMMON_CFG_UPDATE 0x8000 +#define AUDPP_CMD_COMMON_CFG_DONT_UPDATE 0x0000 + +typedef struct { + unsigned short cmd_id; + unsigned short obj0_cfg; + unsigned short obj1_cfg; + unsigned short obj2_cfg; + unsigned short obj3_cfg; + unsigned short obj4_cfg; + unsigned short host_pcm_obj_cfg; + unsigned short comman_cfg; + unsigned short command_type; +} __attribute__((packed)) audpp_cmd_cfg_object_params_common; + +/* + * Command Structure to configure post processing params (Volume) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_VOLUME_LEN \ + sizeof(audpp_cmd_cfg_object_params_volume) + +typedef struct { + audpp_cmd_cfg_object_params_common common; + unsigned short volume; + unsigned short pan; +} __attribute__((packed)) audpp_cmd_cfg_object_params_volume; + +/* + * Command Structure to configure post processing params (PCM Filter) --DOUBT + */ + +typedef struct { + unsigned short numerator_b0_filter_lsw; + unsigned short numerator_b0_filter_msw; + unsigned short numerator_b1_filter_lsw; + unsigned short numerator_b1_filter_msw; + unsigned short numerator_b2_filter_lsw; + unsigned short numerator_b2_filter_msw; +} __attribute__((packed)) numerator; + +typedef struct { + unsigned short denominator_a0_filter_lsw; + unsigned short denominator_a0_filter_msw; + unsigned short denominator_a1_filter_lsw; + unsigned short denominator_a1_filter_msw; +} __attribute__((packed)) denominator; + +typedef struct { + unsigned short shift_factor_0; +} __attribute__((packed)) shift_factor; + +typedef struct { + unsigned short pan_filter_0; +} __attribute__((packed)) pan; + +typedef struct { + numerator numerator_filter; + denominator denominator_filter; + shift_factor shift_factor_filter; + pan pan_filter; +} __attribute__((packed)) filter_1; + +typedef struct { + numerator numerator_filter[2]; + denominator denominator_filter[2]; + shift_factor shift_factor_filter[2]; + pan pan_filter[2]; +} __attribute__((packed)) filter_2; + +typedef struct { + numerator numerator_filter[3]; + denominator denominator_filter[3]; + shift_factor shift_factor_filter[3]; + pan pan_filter[3]; +} __attribute__((packed)) filter_3; + +typedef struct { + numerator numerator_filter[4]; + denominator denominator_filter[4]; + shift_factor shift_factor_filter[4]; + pan pan_filter[4]; +} __attribute__((packed)) filter_4; + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_PCM_LEN \ + sizeof(audpp_cmd_cfg_object_params_pcm) + + +typedef struct { + audpp_cmd_cfg_object_params_common common; + unsigned short active_flag; + unsigned short num_bands; + union { + filter_1 filter_1_params; + filter_2 filter_2_params; + filter_3 filter_3_params; + filter_4 filter_4_params; + } __attribute__((packed)) params_filter; +} __attribute__((packed)) audpp_cmd_cfg_object_params_pcm; + + +/* + * Command Structure to configure post processing parameters (equalizer) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_EQALIZER_LEN \ + sizeof(audpp_cmd_cfg_object_params_eqalizer) + +typedef struct { + unsigned short numerator_coeff_0_lsw; + unsigned short numerator_coeff_0_msw; + unsigned short numerator_coeff_1_lsw; + unsigned short numerator_coeff_1_msw; + unsigned short numerator_coeff_2_lsw; + unsigned short numerator_coeff_2_msw; +} __attribute__((packed)) eq_numerator; + +typedef struct { + unsigned short denominator_coeff_0_lsw; + unsigned short denominator_coeff_0_msw; + unsigned short denominator_coeff_1_lsw; + unsigned short denominator_coeff_1_msw; +} __attribute__((packed)) eq_denominator; + +typedef struct { + unsigned short shift_factor; +} __attribute__((packed)) eq_shiftfactor; + +typedef struct { + eq_numerator numerator; + eq_denominator denominator; + eq_shiftfactor shiftfactor; +} __attribute__((packed)) eq_coeff_1; + +typedef struct { + eq_numerator numerator[2]; + eq_denominator denominator[2]; + eq_shiftfactor shiftfactor[2]; +} __attribute__((packed)) eq_coeff_2; + +typedef struct { + eq_numerator numerator[3]; + eq_denominator denominator[3]; + eq_shiftfactor shiftfactor[3]; +} __attribute__((packed)) eq_coeff_3; + +typedef struct { + eq_numerator numerator[4]; + eq_denominator denominator[4]; + eq_shiftfactor shiftfactor[4]; +} __attribute__((packed)) eq_coeff_4; + +typedef struct { + eq_numerator numerator[5]; + eq_denominator denominator[5]; + eq_shiftfactor shiftfactor[5]; +} __attribute__((packed)) eq_coeff_5; + +typedef struct { + eq_numerator numerator[6]; + eq_denominator denominator[6]; + eq_shiftfactor shiftfactor[6]; +} __attribute__((packed)) eq_coeff_6; + +typedef struct { + eq_numerator numerator[7]; + eq_denominator denominator[7]; + eq_shiftfactor shiftfactor[7]; +} __attribute__((packed)) eq_coeff_7; + +typedef struct { + eq_numerator numerator[8]; + eq_denominator denominator[8]; + eq_shiftfactor shiftfactor[8]; +} __attribute__((packed)) eq_coeff_8; + +typedef struct { + eq_numerator numerator[9]; + eq_denominator denominator[9]; + eq_shiftfactor shiftfactor[9]; +} __attribute__((packed)) eq_coeff_9; + +typedef struct { + eq_numerator numerator[10]; + eq_denominator denominator[10]; + eq_shiftfactor shiftfactor[10]; +} __attribute__((packed)) eq_coeff_10; + +typedef struct { + eq_numerator numerator[11]; + eq_denominator denominator[11]; + eq_shiftfactor shiftfactor[11]; +} __attribute__((packed)) eq_coeff_11; + +typedef struct { + eq_numerator numerator[12]; + eq_denominator denominator[12]; + eq_shiftfactor shiftfactor[12]; +} __attribute__((packed)) eq_coeff_12; + + +typedef struct { + audpp_cmd_cfg_object_params_common common; + unsigned short eq_flag; + unsigned short num_bands; + union { + eq_coeff_1 eq_coeffs_1; + eq_coeff_2 eq_coeffs_2; + eq_coeff_3 eq_coeffs_3; + eq_coeff_4 eq_coeffs_4; + eq_coeff_5 eq_coeffs_5; + eq_coeff_6 eq_coeffs_6; + eq_coeff_7 eq_coeffs_7; + eq_coeff_8 eq_coeffs_8; + eq_coeff_9 eq_coeffs_9; + eq_coeff_10 eq_coeffs_10; + eq_coeff_11 eq_coeffs_11; + eq_coeff_12 eq_coeffs_12; + } __attribute__((packed)) eq_coeff; +} __attribute__((packed)) audpp_cmd_cfg_object_params_eqalizer; + + +/* + * Command Structure to configure post processing parameters (ADRC) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_ADRC_LEN \ + sizeof(audpp_cmd_cfg_object_params_adrc) + + +#define AUDPP_CMD_ADRC_FLAG_DIS 0x0000 +#define AUDPP_CMD_ADRC_FLAG_ENA -1 + +typedef struct { + audpp_cmd_cfg_object_params_common common; + signed short adrc_flag; + unsigned short compression_th; + unsigned short compression_slope; + unsigned short rms_time; + unsigned short attack_const_lsw; + unsigned short attack_const_msw; + unsigned short release_const_lsw; + unsigned short release_const_msw; + unsigned short adrc_system_delay; +} __attribute__((packed)) audpp_cmd_cfg_object_params_adrc; + +/* + * Command Structure to configure post processing parameters(Spectrum Analizer) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_SPECTRAM_LEN \ + sizeof(audpp_cmd_cfg_object_params_spectram) + + +typedef struct { + audpp_cmd_cfg_object_params_common common; + unsigned short sample_interval; + unsigned short num_coeff; +} __attribute__((packed)) audpp_cmd_cfg_object_params_spectram; + +/* + * Command Structure to configure post processing parameters (QConcert) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_QCONCERT_LEN \ + sizeof(audpp_cmd_cfg_object_params_qconcert) + + +#define AUDPP_CMD_QCON_ENA_FLAG_ENA -1 +#define AUDPP_CMD_QCON_ENA_FLAG_DIS 0x0000 + +#define AUDPP_CMD_QCON_OP_MODE_HEADPHONE -1 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_FRONT 0x0000 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_SIDE 0x0001 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_DESKTOP 0x0002 + +#define AUDPP_CMD_QCON_GAIN_UNIT 0x7FFF +#define AUDPP_CMD_QCON_GAIN_SIX_DB 0x4027 + + +#define AUDPP_CMD_QCON_EXPANSION_MAX 0x7FFF + + +typedef struct { + audpp_cmd_cfg_object_params_common common; + signed short enable_flag; + signed short output_mode; + signed short gain; + signed short expansion; + signed short delay; + unsigned short stages_per_mode; +} __attribute__((packed)) audpp_cmd_cfg_object_params_qconcert; + +/* + * Command Structure to configure post processing parameters (Side Chain) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_SIDECHAIN_LEN \ + sizeof(audpp_cmd_cfg_object_params_sidechain) + + +#define AUDPP_CMD_SIDECHAIN_ACTIVE_FLAG_DIS 0x0000 +#define AUDPP_CMD_SIDECHAIN_ACTIVE_FLAG_ENA -1 + +typedef struct { + audpp_cmd_cfg_object_params_common common; + signed short active_flag; + unsigned short num_bands; + union { + filter_1 filter_1_params; + filter_2 filter_2_params; + filter_3 filter_3_params; + filter_4 filter_4_params; + } __attribute__((packed)) params_filter; +} __attribute__((packed)) audpp_cmd_cfg_object_params_sidechain; + + +/* + * Command Structure to configure post processing parameters (QAFX) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_QAFX_LEN \ + sizeof(audpp_cmd_cfg_object_params_qafx) + +#define AUDPP_CMD_QAFX_ENA_DISA 0x0000 +#define AUDPP_CMD_QAFX_ENA_ENA_CFG -1 +#define AUDPP_CMD_QAFX_ENA_DIS_CFG 0x0001 + +#define AUDPP_CMD_QAFX_CMD_TYPE_ENV 0x0100 +#define AUDPP_CMD_QAFX_CMD_TYPE_OBJ 0x0010 +#define AUDPP_CMD_QAFX_CMD_TYPE_QUERY 0x1000 + +#define AUDPP_CMD_QAFX_CMDS_ENV_OP_MODE 0x0100 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_POS 0x0101 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_ORI 0x0102 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_VEL 0X0103 +#define AUDPP_CMD_QAFX_CMDS_ENV_ENV_RES 0x0107 + +#define AUDPP_CMD_QAFX_CMDS_OBJ_SAMP_FREQ 0x0010 +#define AUDPP_CMD_QAFX_CMDS_OBJ_VOL 0x0011 +#define AUDPP_CMD_QAFX_CMDS_OBJ_DIST 0x0012 +#define AUDPP_CMD_QAFX_CMDS_OBJ_POS 0x0013 +#define AUDPP_CMD_QAFX_CMDS_OBJ_VEL 0x0014 + + +typedef struct { + audpp_cmd_cfg_object_params_common common; + signed short enable; + unsigned short command_type; + unsigned short num_commands; + unsigned short commands; +} __attribute__((packed)) audpp_cmd_cfg_object_params_qafx; + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (Common) + */ + +#define AUDPP_CMD_REVERB_CONFIG 0x0001 +#define AUDPP_CMD_REVERB_CONFIG_COMMON_LEN \ + sizeof(audpp_cmd_reverb_config_common) + +#define AUDPP_CMD_ENA_ENA 0xFFFF +#define AUDPP_CMD_ENA_DIS 0x0000 +#define AUDPP_CMD_ENA_CFG 0x0001 + +#define AUDPP_CMD_CMD_TYPE_ENV 0x0104 +#define AUDPP_CMD_CMD_TYPE_OBJ 0x0015 +#define AUDPP_CMD_CMD_TYPE_QUERY 0x1000 + + +typedef struct { + unsigned short cmd_id; + unsigned short enable; + unsigned short cmd_type; +} __attribute__((packed)) audpp_cmd_reverb_config_common; + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (ENV-0x0104) + */ + +#define AUDPP_CMD_REVERB_CONFIG_ENV_104_LEN \ + sizeof(audpp_cmd_reverb_config_env_104) + +typedef struct { + audpp_cmd_reverb_config_common common; + unsigned short env_gain; + unsigned short decay_msw; + unsigned short decay_lsw; + unsigned short decay_timeratio_msw; + unsigned short decay_timeratio_lsw; + unsigned short delay_time; + unsigned short reverb_gain; + unsigned short reverb_delay; +} __attribute__((packed)) audpp_cmd_reverb_config_env_104; + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (ENV-0x0015) + */ + +#define AUDPP_CMD_REVERB_CONFIG_ENV_15_LEN \ + sizeof(audpp_cmd_reverb_config_env_15) + +typedef struct { + audpp_cmd_reverb_config_common common; + unsigned short object_num; + unsigned short absolute_gain; +} __attribute__((packed)) audpp_cmd_reverb_config_env_15; + + +#endif /* QDSP5AUDPPCMDI_H */ + diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h new file mode 100644 index 0000000000000..e229df3ffd880 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h @@ -0,0 +1,318 @@ +#ifndef QDSP5AUDPPMSG_H +#define QDSP5AUDPPMSG_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + Q D S P 5 A U D I O P O S T P R O C E S S I N G M S G + +GENERAL DESCRIPTION + Messages sent by AUDPPTASK to ARM + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2009 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + + $Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audppmsg.h#4 $ + +===========================================================================*/ + +/* + * AUDPPTASK uses audPPuPRlist to send messages to the ARM + * Location : MEMA + * Buffer Size : 45 + * No of Buffers in a queue : 5 for gaming audio and 1 for other images + */ + +/* + * MSG to Informs the ARM os Success/Failure of bringing up the decoder + */ + +#define AUDPP_MSG_STATUS_MSG 0x0001 +#define AUDPP_MSG_STATUS_MSG_LEN \ + sizeof(audpp_msg_status_msg) + +#define AUDPP_MSG_STATUS_SLEEP 0x0000 +#define AUDPP_MSG__STATUS_INIT 0x0001 +#define AUDPP_MSG_MSG_STATUS_CFG 0x0002 +#define AUDPP_MSG_STATUS_PLAY 0x0003 + +#define AUDPP_MSG_REASON_MIPS 0x0000 +#define AUDPP_MSG_REASON_MEM 0x0001 + +typedef struct{ + unsigned short dec_id; + unsigned short status; + unsigned short reason; +} __attribute__((packed)) audpp_msg_status_msg; + +/* + * MSG to communicate the spectrum analyzer output bands to the ARM + */ +#define AUDPP_MSG_SPA_BANDS 0x0002 +#define AUDPP_MSG_SPA_BANDS_LEN \ + sizeof(audpp_msg_spa_bands) + +typedef struct { + unsigned short current_object; + unsigned short spa_band_1; + unsigned short spa_band_2; + unsigned short spa_band_3; + unsigned short spa_band_4; + unsigned short spa_band_5; + unsigned short spa_band_6; + unsigned short spa_band_7; + unsigned short spa_band_8; + unsigned short spa_band_9; + unsigned short spa_band_10; + unsigned short spa_band_11; + unsigned short spa_band_12; + unsigned short spa_band_13; + unsigned short spa_band_14; + unsigned short spa_band_15; + unsigned short spa_band_16; + unsigned short spa_band_17; + unsigned short spa_band_18; + unsigned short spa_band_19; + unsigned short spa_band_20; + unsigned short spa_band_21; + unsigned short spa_band_22; + unsigned short spa_band_23; + unsigned short spa_band_24; + unsigned short spa_band_25; + unsigned short spa_band_26; + unsigned short spa_band_27; + unsigned short spa_band_28; + unsigned short spa_band_29; + unsigned short spa_band_30; + unsigned short spa_band_31; + unsigned short spa_band_32; +} __attribute__((packed)) audpp_msg_spa_bands; + +/* + * MSG to communicate the PCM I/O buffer status to ARM + */ +#define AUDPP_MSG_HOST_PCM_INTF_MSG 0x0003 +#define AUDPP_MSG_HOST_PCM_INTF_MSG_LEN \ + sizeof(audpp_msg_host_pcm_intf_msg) + +#define AUDPP_MSG_HOSTPCM_ID_TX_ARM 0x0000 +#define AUDPP_MSG_HOSTPCM_ID_ARM_TX 0x0001 +#define AUDPP_MSG_HOSTPCM_ID_RX_ARM 0x0002 +#define AUDPP_MSG_HOSTPCM_ID_ARM_RX 0x0003 + +#define AUDPP_MSG_SAMP_FREQ_INDX_96000 0x0000 +#define AUDPP_MSG_SAMP_FREQ_INDX_88200 0x0001 +#define AUDPP_MSG_SAMP_FREQ_INDX_64000 0x0002 +#define AUDPP_MSG_SAMP_FREQ_INDX_48000 0x0003 +#define AUDPP_MSG_SAMP_FREQ_INDX_44100 0x0004 +#define AUDPP_MSG_SAMP_FREQ_INDX_32000 0x0005 +#define AUDPP_MSG_SAMP_FREQ_INDX_24000 0x0006 +#define AUDPP_MSG_SAMP_FREQ_INDX_22050 0x0007 +#define AUDPP_MSG_SAMP_FREQ_INDX_16000 0x0008 +#define AUDPP_MSG_SAMP_FREQ_INDX_12000 0x0009 +#define AUDPP_MSG_SAMP_FREQ_INDX_11025 0x000A +#define AUDPP_MSG_SAMP_FREQ_INDX_8000 0x000B + +#define AUDPP_MSG_CHANNEL_MODE_MONO 0x0001 +#define AUDPP_MSG_CHANNEL_MODE_STEREO 0x0002 + +typedef struct{ + unsigned short obj_num; + unsigned short numbers_of_samples; + unsigned short host_pcm_id; + unsigned short buf_indx; + unsigned short samp_freq_indx; + unsigned short channel_mode; +} __attribute__((packed)) audpp_msg_host_pcm_intf_msg; + + +/* + * MSG to communicate 3D position of the source and listener , source volume + * source rolloff, source orientation + */ + +#define AUDPP_MSG_QAFX_POS 0x0004 +#define AUDPP_MSG_QAFX_POS_LEN \ + sizeof(audpp_msg_qafx_pos) + +typedef struct { + unsigned short current_object; + unsigned short x_pos_lis_msw; + unsigned short x_pos_lis_lsw; + unsigned short y_pos_lis_msw; + unsigned short y_pos_lis_lsw; + unsigned short z_pos_lis_msw; + unsigned short z_pos_lis_lsw; + unsigned short x_fwd_msw; + unsigned short x_fwd_lsw; + unsigned short y_fwd_msw; + unsigned short y_fwd_lsw; + unsigned short z_fwd_msw; + unsigned short z_fwd_lsw; + unsigned short x_up_msw; + unsigned short x_up_lsw; + unsigned short y_up_msw; + unsigned short y_up_lsw; + unsigned short z_up_msw; + unsigned short z_up_lsw; + unsigned short x_vel_lis_msw; + unsigned short x_vel_lis_lsw; + unsigned short y_vel_lis_msw; + unsigned short y_vel_lis_lsw; + unsigned short z_vel_lis_msw; + unsigned short z_vel_lis_lsw; + unsigned short threed_enable_flag; + unsigned short volume; + unsigned short x_pos_source_msw; + unsigned short x_pos_source_lsw; + unsigned short y_pos_source_msw; + unsigned short y_pos_source_lsw; + unsigned short z_pos_source_msw; + unsigned short z_pos_source_lsw; + unsigned short max_dist_0_msw; + unsigned short max_dist_0_lsw; + unsigned short min_dist_0_msw; + unsigned short min_dist_0_lsw; + unsigned short roll_off_factor; + unsigned short mute_after_max_flag; + unsigned short x_vel_source_msw; + unsigned short x_vel_source_lsw; + unsigned short y_vel_source_msw; + unsigned short y_vel_source_lsw; + unsigned short z_vel_source_msw; + unsigned short z_vel_source_lsw; +} __attribute__((packed)) audpp_msg_qafx_pos; + +/* + * MSG to provide AVSYNC feedback from DSP to ARM + */ + +#define AUDPP_MSG_AVSYNC_MSG 0x0005 +#define AUDPP_MSG_AVSYNC_MSG_LEN \ + sizeof(audpp_msg_avsync_msg) + +typedef struct { + unsigned short active_flag; + unsigned short num_samples_counter0_HSW; + unsigned short num_samples_counter0_MSW; + unsigned short num_samples_counter0_LSW; + unsigned short num_bytes_counter0_HSW; + unsigned short num_bytes_counter0_MSW; + unsigned short num_bytes_counter0_LSW; + unsigned short samp_freq_obj_0; + unsigned short samp_freq_obj_1; + unsigned short samp_freq_obj_2; + unsigned short samp_freq_obj_3; + unsigned short samp_freq_obj_4; + unsigned short samp_freq_obj_5; + unsigned short samp_freq_obj_6; + unsigned short samp_freq_obj_7; + unsigned short samp_freq_obj_8; + unsigned short samp_freq_obj_9; + unsigned short samp_freq_obj_10; + unsigned short samp_freq_obj_11; + unsigned short samp_freq_obj_12; + unsigned short samp_freq_obj_13; + unsigned short samp_freq_obj_14; + unsigned short samp_freq_obj_15; + unsigned short num_samples_counter4_HSW; + unsigned short num_samples_counter4_MSW; + unsigned short num_samples_counter4_LSW; + unsigned short num_bytes_counter4_HSW; + unsigned short num_bytes_counter4_MSW; + unsigned short num_bytes_counter4_LSW; +} __attribute__((packed)) audpp_msg_avsync_msg; + +/* + * MSG to provide PCM DMA Missed feedback from the DSP to ARM + */ + +#define AUDPP_MSG_PCMDMAMISSED 0x0006 +#define AUDPP_MSG_PCMDMAMISSED_LEN \ + sizeof(audpp_msg_pcmdmamissed); + +typedef struct{ + /* + ** Bit 0 0 = PCM DMA not missed for object 0 + ** 1 = PCM DMA missed for object0 + ** Bit 1 0 = PCM DMA not missed for object 1 + ** 1 = PCM DMA missed for object1 + ** Bit 2 0 = PCM DMA not missed for object 2 + ** 1 = PCM DMA missed for object2 + ** Bit 3 0 = PCM DMA not missed for object 3 + ** 1 = PCM DMA missed for object3 + ** Bit 4 0 = PCM DMA not missed for object 4 + ** 1 = PCM DMA missed for object4 + */ + unsigned short pcmdmamissed; +} __attribute__((packed)) audpp_msg_pcmdmamissed; + +/* + * MSG to AUDPP enable or disable feedback form DSP to ARM + */ + +#define AUDPP_MSG_CFG_MSG 0x0007 +#define AUDPP_MSG_CFG_MSG_LEN \ + sizeof(audpp_msg_cfg_msg) + +#define AUDPP_MSG_ENA_ENA 0xFFFF +#define AUDPP_MSG_ENA_DIS 0x0000 + +typedef struct{ + /* Enabled - 0xffff + ** Disabled - 0 + */ + unsigned short enabled; +} __attribute__((packed)) audpp_msg_cfg_msg; + +/* + * MSG to communicate the reverb per object volume + */ + +#define AUDPP_MSG_QREVERB_VOLUME 0x0008 +#define AUDPP_MSG_QREVERB_VOLUME_LEN \ + sizeof(audpp_msg_qreverb_volume) + + +typedef struct { + unsigned short obj_0_gain; + unsigned short obj_1_gain; + unsigned short obj_2_gain; + unsigned short obj_3_gain; + unsigned short obj_4_gain; + unsigned short hpcm_obj_volume; +} __attribute__((packed)) audpp_msg_qreverb_volume; + +#define AUDPP_MSG_ROUTING_ACK 0x0009 +#define AUDPP_MSG_ROUTING_ACK_LEN \ + sizeof(struct audpp_msg_routing_ack) + +struct audpp_msg_routing_ack { + unsigned short dec_id; + unsigned short routing_mode; +} __attribute__((packed)); + +#define AUDPP_MSG_FLUSH_ACK 0x000A + +#endif /* QDSP5AUDPPMSG_H */ diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreproccmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreproccmdi.h new file mode 100644 index 0000000000000..cd9d59068561d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreproccmdi.h @@ -0,0 +1,256 @@ +#ifndef QDSP5AUDPREPROCCMDI_H +#define QDSP5AUDPREPROCCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + A U D I O P R E P R O C E S S I N G I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by AUDPREPROC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audpreproccmdi.h#2 $ + +===========================================================================*/ + +/* + * AUDIOPREPROC COMMANDS: + * ARM uses uPAudPreProcCmdQueue to communicate with AUDPREPROCTASK + * Location : MEMB + * Buffer size : 51 + * Number of buffers in a queue : 3 + */ + +/* + * Command to configure the parameters of AGC + */ + +#define AUDPREPROC_CMD_CFG_AGC_PARAMS 0x0000 +#define AUDPREPROC_CMD_CFG_AGC_PARAMS_LEN \ + sizeof(audpreproc_cmd_cfg_agc_params) + +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_SLOPE 0x0009 +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_TH 0x000A +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_EXP_SLOPE 0x000B +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_EXP_TH 0x000C +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_AIG_FLAG 0x000D +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_STATIC_GAIN 0x000E +#define AUDPREPROC_CMD_TX_AGC_PARAM_MASK_TX_AGC_ENA_FLAG 0x000F + +#define AUDPREPROC_CMD_TX_AGC_ENA_FLAG_ENA -1 +#define AUDPREPROC_CMD_TX_AGC_ENA_FLAG_DIS 0x0000 + +#define AUDPREPROC_CMD_ADP_GAIN_FLAG_ENA_ADP_GAIN -1 +#define AUDPREPROC_CMD_ADP_GAIN_FLAG_ENA_STATIC_GAIN 0x0000 + +#define AUDPREPROC_CMD_PARAM_MASK_RMS_TAY 0x0004 +#define AUDPREPROC_CMD_PARAM_MASK_RELEASEK 0x0005 +#define AUDPREPROC_CMD_PARAM_MASK_DELAY 0x0006 +#define AUDPREPROC_CMD_PARAM_MASK_ATTACKK 0x0007 +#define AUDPREPROC_CMD_PARAM_MASK_LEAKRATE_SLOW 0x0008 +#define AUDPREPROC_CMD_PARAM_MASK_LEAKRATE_FAST 0x0009 +#define AUDPREPROC_CMD_PARAM_MASK_AIG_RELEASEK 0x000A +#define AUDPREPROC_CMD_PARAM_MASK_AIG_MIN 0x000B +#define AUDPREPROC_CMD_PARAM_MASK_AIG_MAX 0x000C +#define AUDPREPROC_CMD_PARAM_MASK_LEAK_UP 0x000D +#define AUDPREPROC_CMD_PARAM_MASK_LEAK_DOWN 0x000E +#define AUDPREPROC_CMD_PARAM_MASK_AIG_ATTACKK 0x000F + +typedef struct { + unsigned short cmd_id; + unsigned short tx_agc_param_mask; + unsigned short tx_agc_enable_flag; + unsigned short static_gain; + signed short adaptive_gain_flag; + unsigned short expander_th; + unsigned short expander_slope; + unsigned short compressor_th; + unsigned short compressor_slope; + unsigned short param_mask; + unsigned short aig_attackk; + unsigned short aig_leak_down; + unsigned short aig_leak_up; + unsigned short aig_max; + unsigned short aig_min; + unsigned short aig_releasek; + unsigned short aig_leakrate_fast; + unsigned short aig_leakrate_slow; + unsigned short attackk_msw; + unsigned short attackk_lsw; + unsigned short delay; + unsigned short releasek_msw; + unsigned short releasek_lsw; + unsigned short rms_tav; +} __attribute__((packed)) audpreproc_cmd_cfg_agc_params; + + +/* + * Command to configure the params of Advanved AGC + */ + +#define AUDPREPROC_CMD_CFG_AGC_PARAMS_2 0x0001 +#define AUDPREPROC_CMD_CFG_AGC_PARAMS_2_LEN \ + sizeof(audpreproc_cmd_cfg_agc_params_2) + +#define AUDPREPROC_CMD_2_TX_AGC_ENA_FLAG_ENA -1; +#define AUDPREPROC_CMD_2_TX_AGC_ENA_FLAG_DIS 0x0000; + +typedef struct { + unsigned short cmd_id; + unsigned short agc_param_mask; + signed short tx_agc_enable_flag; + unsigned short comp_static_gain; + unsigned short exp_th; + unsigned short exp_slope; + unsigned short comp_th; + unsigned short comp_slope; + unsigned short comp_rms_tav; + unsigned short comp_samp_mask; + unsigned short comp_attackk_msw; + unsigned short comp_attackk_lsw; + unsigned short comp_releasek_msw; + unsigned short comp_releasek_lsw; + unsigned short comp_delay; + unsigned short comp_makeup_gain; +} __attribute__((packed)) audpreproc_cmd_cfg_agc_params_2; + +/* + * Command to configure params for ns + */ + +#define AUDPREPROC_CMD_CFG_NS_PARAMS 0x0002 +#define AUDPREPROC_CMD_CFG_NS_PARAMS_LEN \ + sizeof(audpreproc_cmd_cfg_ns_params) + +#define AUDPREPROC_CMD_EC_MODE_NEW_NLMS_ENA 0x0001 +#define AUDPREPROC_CMD_EC_MODE_NEW_NLMS_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_DES_ENA 0x0002 +#define AUDPREPROC_CMD_EC_MODE_NEW_DES_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_NS_ENA 0x0004 +#define AUDPREPROC_CMD_EC_MODE_NEW_NS_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_CNI_ENA 0x0008 +#define AUDPREPROC_CMD_EC_MODE_NEW_CNI_DIS 0x0000 + +#define AUDPREPROC_CMD_EC_MODE_NEW_NLES_ENA 0x0010 +#define AUDPREPROC_CMD_EC_MODE_NEW_NLES_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_HB_ENA 0x0020 +#define AUDPREPROC_CMD_EC_MODE_NEW_HB_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_VA_ENA 0x0040 +#define AUDPREPROC_CMD_EC_MODE_NEW_VA_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_PCD_ENA 0x0080 +#define AUDPREPROC_CMD_EC_MODE_NEW_PCD_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_FEHI_ENA 0x0100 +#define AUDPREPROC_CMD_EC_MODE_NEW_FEHI_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_NEHI_ENA 0x0200 +#define AUDPREPROC_CMD_EC_MODE_NEW_NEHI_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_NLPP_ENA 0x0400 +#define AUDPREPROC_CMD_EC_MODE_NEW_NLPP_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_FNE_ENA 0x0800 +#define AUDPREPROC_CMD_EC_MODE_NEW_FNE_DIS 0x0000 +#define AUDPREPROC_CMD_EC_MODE_NEW_PRENLMS_ENA 0x1000 +#define AUDPREPROC_CMD_EC_MODE_NEW_PRENLMS_DIS 0x0000 + +typedef struct { + unsigned short cmd_id; + unsigned short ec_mode_new; + unsigned short dens_gamma_n; + unsigned short dens_nfe_block_size; + unsigned short dens_limit_ns; + unsigned short dens_limit_ns_d; + unsigned short wb_gamma_e; + unsigned short wb_gamma_n; +} __attribute__((packed)) audpreproc_cmd_cfg_ns_params; + +/* + * Command to configure parameters for IIR tuning filter + */ + +#define AUDPREPROC_CMD_CFG_IIR_TUNING_FILTER_PARAMS 0x0003 +#define AUDPREPROC_CMD_CFG_IIR_TUNING_FILTER_PARAMS_LEN \ + sizeof(audpreproc_cmd_cfg_iir_tuning_filter_params) + +#define AUDPREPROC_CMD_IIR_ACTIVE_FLAG_DIS 0x0000 +#define AUDPREPROC_CMD_IIR_ACTIVE_FLAG_ENA 0x0001 + +typedef struct { + unsigned short cmd_id; + unsigned short active_flag; + unsigned short num_bands; + unsigned short numerator_coeff_b0_filter0_lsw; + unsigned short numerator_coeff_b0_filter0_msw; + unsigned short numerator_coeff_b1_filter0_lsw; + unsigned short numerator_coeff_b1_filter0_msw; + unsigned short numerator_coeff_b2_filter0_lsw; + unsigned short numerator_coeff_b2_filter0_msw; + unsigned short numerator_coeff_b0_filter1_lsw; + unsigned short numerator_coeff_b0_filter1_msw; + unsigned short numerator_coeff_b1_filter1_lsw; + unsigned short numerator_coeff_b1_filter1_msw; + unsigned short numerator_coeff_b2_filter1_lsw; + unsigned short numerator_coeff_b2_filter1_msw; + unsigned short numerator_coeff_b0_filter2_lsw; + unsigned short numerator_coeff_b0_filter2_msw; + unsigned short numerator_coeff_b1_filter2_lsw; + unsigned short numerator_coeff_b1_filter2_msw; + unsigned short numerator_coeff_b2_filter2_lsw; + unsigned short numerator_coeff_b2_filter2_msw; + unsigned short numerator_coeff_b0_filter3_lsw; + unsigned short numerator_coeff_b0_filter3_msw; + unsigned short numerator_coeff_b1_filter3_lsw; + unsigned short numerator_coeff_b1_filter3_msw; + unsigned short numerator_coeff_b2_filter3_lsw; + unsigned short numerator_coeff_b2_filter3_msw; + unsigned short denominator_coeff_a0_filter0_lsw; + unsigned short denominator_coeff_a0_filter0_msw; + unsigned short denominator_coeff_a1_filter0_lsw; + unsigned short denominator_coeff_a1_filter0_msw; + unsigned short denominator_coeff_a0_filter1_lsw; + unsigned short denominator_coeff_a0_filter1_msw; + unsigned short denominator_coeff_a1_filter1_lsw; + unsigned short denominator_coeff_a1_filter1_msw; + unsigned short denominator_coeff_a0_filter2_lsw; + unsigned short denominator_coeff_a0_filter2_msw; + unsigned short denominator_coeff_a1_filter2_lsw; + unsigned short denominator_coeff_a1_filter2_msw; + unsigned short denominator_coeff_a0_filter3_lsw; + unsigned short denominator_coeff_a0_filter3_msw; + unsigned short denominator_coeff_a1_filter3_lsw; + unsigned short denominator_coeff_a1_filter3_msw; + + unsigned short shift_factor_filter0; + unsigned short shift_factor_filter1; + unsigned short shift_factor_filter2; + unsigned short shift_factor_filter3; + + unsigned short channel_selected0; + unsigned short channel_selected1; + unsigned short channel_selected2; + unsigned short channel_selected3; +} __attribute__((packed))audpreproc_cmd_cfg_iir_tuning_filter_params; + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreprocmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreprocmsg.h new file mode 100644 index 0000000000000..9187f45a586ed --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audpreprocmsg.h @@ -0,0 +1,85 @@ +#ifndef QDSP5AUDPREPROCMSG_H +#define QDSP5AUDPREPROCMSG_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + A U D I O P R E P R O C E S S I N G M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of messages + that are rcvd by AUDPREPROC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + + $Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audpreprocmsg.h#3 $ + +===========================================================================*/ + +/* + * ADSPREPROCTASK Messages + * AUDPREPROCTASK uses audPreProcUpRlist to communicate with ARM + * Location : MEMA + * Message Length : 2 + */ + +/* + * Message to indicate particular feature has been enabled or disabled + */ + + +#define AUDPREPROC_MSG_CMD_CFG_DONE_MSG 0x0000 +#define AUDPREPROC_MSG_CMD_CFG_DONE_MSG_LEN \ + sizeof(audpreproc_msg_cmd_cfg_done_msg) + +#define AUDPREPROC_MSG_TYPE_AGC 0x0000 +#define AUDPREPROC_MSG_TYPE_NOISE_REDUCTION 0x0001 +#define AUDPREPROC_MSG_TYPE_IIR_FILTER 0x0002 + + +#define AUDPREPROC_MSG_STATUS_FLAG_ENA -1 +#define AUDPREPROC_MSG_STATUS_FLAG_DIS 0x0000 + +typedef struct { + unsigned short type; + signed short status_flag; +} __attribute__((packed)) audpreproc_msg_cmd_cfg_done_msg; + + +/* + * Message to indicate particular feature has selected for wrong samp freq + */ + +#define AUDPREPROC_MSG_ERROR_MSG_ID 0x0001 +#define AUDPREPROC_MSG_ERROR_MSG_ID_LEN \ + sizeof(audpreproc_msg_error_msg_id) + +#define AUDPREPROC_MSG_ERR_INDEX_NS 0x0000 + +typedef struct { + unsigned short err_index; +} __attribute__((packed)) audpreproc_msg_error_msg_id; + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audreccmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audreccmdi.h new file mode 100644 index 0000000000000..e88bd5d0af25b --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audreccmdi.h @@ -0,0 +1,176 @@ +#ifndef QDSP5AUDRECCMDI_H +#define QDSP5AUDRECCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + A U D I O R E C O R D I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by AUDREC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ + +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + + $Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audreccmdi.h#3 $ + +============================================================================*/ + +/* + * AUDRECTASK COMMANDS + * ARM uses 2 queues to communicate with the AUDRECTASK + * 1.uPAudRecCmdQueue + * Location :MEMC + * Buffer Size : 8 + * No of Buffers in a queue : 3 + * 2.audRecUpBitStreamQueue + * Location : MEMC + * Buffer Size : 4 + * No of buffers in a queue : 2 + */ + +/* + * Commands on uPAudRecCmdQueue + */ + +/* + * Command to initiate and terminate the audio recording section + */ + +#define AUDREC_CMD_CFG 0x0000 +#define AUDREC_CMD_CFG_LEN sizeof(audrec_cmd_cfg) + +#define AUDREC_CMD_TYPE_0_INDEX_WAV 0x0000 +#define AUDREC_CMD_TYPE_0_INDEX_AAC 0x0001 + +#define AUDREC_CMD_TYPE_0_ENA 0x4000 +#define AUDREC_CMD_TYPE_0_DIS 0x0000 + +#define AUDREC_CMD_TYPE_0_NOUPDATE 0x0000 +#define AUDREC_CMD_TYPE_0_UPDATE 0x8000 + +#define AUDREC_CMD_TYPE_1_INDEX_SBC 0x0002 + +#define AUDREC_CMD_TYPE_1_ENA 0x4000 +#define AUDREC_CMD_TYPE_1_DIS 0x0000 + +#define AUDREC_CMD_TYPE_1_NOUPDATE 0x0000 +#define AUDREC_CMD_TYPE_1_UPDATE 0x8000 + +typedef struct { + unsigned short cmd_id; + unsigned short type_0; + unsigned short type_1; +} __attribute__((packed)) audrec_cmd_cfg; + + +/* + * Command to configure the recording parameters for RecType0(AAC/WAV) encoder + */ + +#define AUDREC_CMD_AREC0PARAM_CFG 0x0001 +#define AUDREC_CMD_AREC0PARAM_CFG_LEN \ + sizeof(audrec_cmd_arec0param_cfg) + +#define AUDREC_CMD_SAMP_RATE_INDX_8000 0x000B +#define AUDREC_CMD_SAMP_RATE_INDX_11025 0x000A +#define AUDREC_CMD_SAMP_RATE_INDX_12000 0x0009 +#define AUDREC_CMD_SAMP_RATE_INDX_16000 0x0008 +#define AUDREC_CMD_SAMP_RATE_INDX_22050 0x0007 +#define AUDREC_CMD_SAMP_RATE_INDX_24000 0x0006 +#define AUDREC_CMD_SAMP_RATE_INDX_32000 0x0005 +#define AUDREC_CMD_SAMP_RATE_INDX_44100 0x0004 +#define AUDREC_CMD_SAMP_RATE_INDX_48000 0x0003 + +#define AUDREC_CMD_STEREO_MODE_MONO 0x0000 +#define AUDREC_CMD_STEREO_MODE_STEREO 0x0001 + +typedef struct { + unsigned short cmd_id; + unsigned short ptr_to_extpkt_buffer_msw; + unsigned short ptr_to_extpkt_buffer_lsw; + unsigned short buf_len; + unsigned short samp_rate_index; + unsigned short stereo_mode; + unsigned short rec_quality; +} __attribute__((packed)) audrec_cmd_arec0param_cfg; + +/* + * Command to configure the recording parameters for RecType1(SBC) encoder + */ + +#define AUDREC_CMD_AREC1PARAM_CFG 0x0002 +#define AUDREC_CMD_AREC1PARAM_CFG_LEN \ + sizeof(audrec_cmd_arec1param_cfg) + +#define AUDREC_CMD_PARAM_BUF_BLOCKS_4 0x0000 +#define AUDREC_CMD_PARAM_BUF_BLOCKS_8 0x0001 +#define AUDREC_CMD_PARAM_BUF_BLOCKS_12 0x0002 +#define AUDREC_CMD_PARAM_BUF_BLOCKS_16 0x0003 + +#define AUDREC_CMD_PARAM_BUF_SUB_BANDS_8 0x0010 +#define AUDREC_CMD_PARAM_BUF_MODE_MONO 0x0000 +#define AUDREC_CMD_PARAM_BUF_MODE_DUAL 0x0040 +#define AUDREC_CMD_PARAM_BUF_MODE_STEREO 0x0050 +#define AUDREC_CMD_PARAM_BUF_MODE_JSTEREO 0x0060 +#define AUDREC_CMD_PARAM_BUF_LOUDNESS 0x0000 +#define AUDREC_CMD_PARAM_BUF_SNR 0x0100 +#define AUDREC_CMD_PARAM_BUF_BASIC_VER 0x0000 + +typedef struct { + unsigned short cmd_id; + unsigned short ptr_to_extpkt_buffer_msw; + unsigned short ptr_to_extpkt_buffer_lsw; + unsigned short buf_len; + unsigned short param_buf; + unsigned short bit_rate_0; + unsigned short bit_rate_1; +} __attribute__((packed)) audrec_cmd_arec1param_cfg; + + +/* + * Commands on audRecUpBitStreamQueue + */ + +/* + * Command to indicate the current packet read count + */ + +#define AUDREC_CMD_PACKET_EXT_PTR 0x0000 +#define AUDREC_CMD_PACKET_EXT_PTR_LEN \ + sizeof(audrec_cmd_packet_ext_ptr) + +#define AUDREC_CMD_TYPE_0 0x0000 +#define AUDREC_CMD_TYPE_1 0x0001 + +typedef struct { + unsigned short cmd_id; + unsigned short type; + unsigned short curr_rec_count_msw; + unsigned short curr_rec_count_lsw; +} __attribute__((packed)) audrec_cmd_packet_ext_ptr; + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audrecmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audrecmsg.h new file mode 100644 index 0000000000000..bb6eb5093cf5e --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audrecmsg.h @@ -0,0 +1,127 @@ +#ifndef QDSP5AUDRECMSGI_H +#define QDSP5AUDRECMSGI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + A U D I O R E C O R D M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of messages + that are sent by AUDREC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ + +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + + $Header: //source/qcom/qct/multimedia2/Audio/drivers/QDSP5Driver/QDSP5Interface/main/latest/qdsp5audrecmsg.h#3 $ + +============================================================================*/ + +/* + * AUDRECTASK MESSAGES + * AUDRECTASK uses audRecUpRlist to communicate with ARM + * Location : MEMC + * Buffer size : 4 + * No of buffers in a queue : 2 + */ + +/* + * Message to notify that config command is done + */ + +#define AUDREC_MSG_CMD_CFG_DONE_MSG 0x0002 +#define AUDREC_MSG_CMD_CFG_DONE_MSG_LEN \ + sizeof(audrec_msg_cmd_cfg_done_msg) + + +#define AUDREC_MSG_CFG_DONE_TYPE_0_ENA 0x4000 +#define AUDREC_MSG_CFG_DONE_TYPE_0_DIS 0x0000 + +#define AUDREC_MSG_CFG_DONE_TYPE_0_NO_UPDATE 0x0000 +#define AUDREC_MSG_CFG_DONE_TYPE_0_UPDATE 0x8000 + +#define AUDREC_MSG_CFG_DONE_TYPE_1_ENA 0x4000 +#define AUDREC_MSG_CFG_DONE_TYPE_1_DIS 0x0000 + +#define AUDREC_MSG_CFG_DONE_TYPE_1_NO_UPDATE 0x0000 +#define AUDREC_MSG_CFG_DONE_TYPE_1_UPDATE 0x8000 + +typedef struct { + unsigned short type_0; + unsigned short type_1; +} __attribute__((packed))audrec_msg_cmd_cfg_done_msg; + + +/* + * Message to notify arec0/1 cfg done and recording params revd by task + */ + +#define AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG 0x0003 +#define AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG_LEN \ + sizeof(audrec_msg_cmd_arec_param_cfg_done_msg) + +#define AUDREC_MSG_AREC_PARAM_TYPE_0 0x0000 +#define AUDREC_MSG_AREC_PARAM_TYPE_1 0x0001 + +typedef struct { + unsigned short type; +} __attribute__((packed))audrec_msg_cmd_arec_param_cfg_done_msg; + + +/* + * Message to notify no more buffers are available in ext mem to DME + */ + +#define AUDREC_MSG_FATAL_ERR_MSG 0x0004 +#define AUDREC_MSG_FATAL_ERR_MSG_LEN \ + sizeof(audrec_msg_fatal_err_msg) + +#define AUDREC_MSG_FATAL_ERR_TYPE_0 0x0000 +#define AUDREC_MSG_FATAL_ERR_TYPE_1 0x0001 + +typedef struct { + unsigned short type; +} __attribute__((packed))audrec_msg_fatal_err_msg; + +/* + * Message to notify DME deliverd the encoded pkt to ext pkt buffer + */ + +#define AUDREC_MSG_PACKET_READY_MSG 0x0005 +#define AUDREC_MSG_PACKET_READY_MSG_LEN \ + sizeof(audrec_msg_packet_ready_msg) + +#define AUDREC_MSG_PACKET_READY_TYPE_0 0x0000 +#define AUDREC_MSG_PACKET_READY_TYPE_1 0x0001 + +typedef struct { + unsigned short type; + unsigned short pkt_counter_msw; + unsigned short pkt_counter_lsw; + unsigned short pkt_read_cnt_msw; + unsigned short pkt_read_cnt_lsw; +} __attribute__((packed))audrec_msg_packet_ready_msg; + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegcmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegcmdi.h new file mode 100644 index 0000000000000..d8170f0b6a6e1 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegcmdi.h @@ -0,0 +1,376 @@ +#ifndef QDSP5VIDJPEGCMDI_H +#define QDSP5VIDJPEGCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + J P E G I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by JPEG Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5jpegcmdi.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: +when who what, where, why +-------- --- ---------------------------------------------------------- +06/09/08 sv initial version +===========================================================================*/ + +/* + * ARM to JPEG configuration commands are passed through the + * uPJpegCfgCmdQueue + */ + +/* + * Command to configure JPEG Encoder + */ + +#define JPEG_CMD_ENC_CFG 0x0000 +#define JPEG_CMD_ENC_CFG_LEN sizeof(jpeg_cmd_enc_cfg) + +#define JPEG_CMD_ENC_PROCESS_CFG_OP_ROTATION_0 0x0000 +#define JPEG_CMD_ENC_PROCESS_CFG_OP_ROTATION_90 0x0100 +#define JPEG_CMD_ENC_PROCESS_CFG_OP_ROTATION_180 0x0200 +#define JPEG_CMD_ENC_PROCESS_CFG_OP_ROTATION_270 0x0300 +#define JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_M 0x0003 +#define JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_H2V2 0x0000 +#define JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_H2V1 0x0001 +#define JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_H1V2 0x0002 + +#define JPEG_CMD_IP_SIZE_CFG_LUMA_HEIGHT_M 0x0000FFFF +#define JPEG_CMD_IP_SIZE_CFG_LUMA_WIDTH_M 0xFFFF0000 +#define JPEG_CMD_ENC_UPSAMP_IP_SIZE_CFG_ENA 0x0001 +#define JPEG_CMD_ENC_UPSAMP_IP_SIZE_CFG_DIS 0x0000 + +#define JPEG_CMD_FRAG_SIZE_LUMA_HEIGHT_M 0xFFFF + +typedef struct { + unsigned int cmd_id; + unsigned int process_cfg; + unsigned int ip_size_cfg; + unsigned int op_size_cfg; + unsigned int frag_cfg; + unsigned int frag_cfg_part[16]; + + unsigned int part_num; + + unsigned int op_buf_0_cfg_part1; + unsigned int op_buf_0_cfg_part2; + unsigned int op_buf_1_cfg_part1; + unsigned int op_buf_1_cfg_part2; + + unsigned int luma_qunt_table[32]; + unsigned int chroma_qunt_table[32]; + + unsigned int upsamp_ip_size_cfg; + unsigned int upsamp_ip_frame_off; + unsigned int upsamp_pp_filter_coeff[64]; +} __attribute__((packed)) jpeg_cmd_enc_cfg; + +/* + * Command to configure JPEG Decoder + */ + +#define JPEG_CMD_DEC_CFG 0x0001 +#define JPEG_CMD_DEC_CFG_LEN sizeof(jpeg_cmd_dec_cfg) + +#define JPEG_CMD_DEC_OP_DATA_FORMAT_M 0x0001 +#define JPEG_CMD_DEC_OP_DATA_FORMAT_H2V2 0x0000 +#define JPEG_CMD_DEC_OP_DATA_FORMAT_H2V1 0x0001 + +#define JPEG_CMD_DEC_OP_DATA_FORMAT_SCALE_FACTOR_8 0x000000 +#define JPEG_CMD_DEC_OP_DATA_FORMAT_SCALE_FACTOR_4 0x010000 +#define JPEG_CMD_DEC_OP_DATA_FORMAT_SCALE_FACTOR_2 0x020000 +#define JPEG_CMD_DEC_OP_DATA_FORMAT_SCALE_FACTOR_1 0x030000 + +#define JPEG_CMD_DEC_IP_STREAM_BUF_CFG_PART3_NOT_FINAL 0x0000 +#define JPEG_CMD_DEC_IP_STREAM_BUF_CFG_PART3_FINAL 0x0001 + + +typedef struct { + unsigned int cmd_id; + unsigned int img_dimension_cfg; + unsigned int op_data_format; + unsigned int restart_interval; + unsigned int ip_buf_partition_num; + unsigned int ip_stream_buf_cfg_part1; + unsigned int ip_stream_buf_cfg_part2; + unsigned int ip_stream_buf_cfg_part3; + unsigned int op_stream_buf_0_cfg_part1; + unsigned int op_stream_buf_0_cfg_part2; + unsigned int op_stream_buf_0_cfg_part3; + unsigned int op_stream_buf_1_cfg_part1; + unsigned int op_stream_buf_1_cfg_part2; + unsigned int op_stream_buf_1_cfg_part3; + unsigned int luma_qunt_table_0_3; + unsigned int luma_qunt_table_4_7; + unsigned int luma_qunt_table_8_11; + unsigned int luma_qunt_table_12_15; + unsigned int luma_qunt_table_16_19; + unsigned int luma_qunt_table_20_23; + unsigned int luma_qunt_table_24_27; + unsigned int luma_qunt_table_28_31; + unsigned int luma_qunt_table_32_35; + unsigned int luma_qunt_table_36_39; + unsigned int luma_qunt_table_40_43; + unsigned int luma_qunt_table_44_47; + unsigned int luma_qunt_table_48_51; + unsigned int luma_qunt_table_52_55; + unsigned int luma_qunt_table_56_59; + unsigned int luma_qunt_table_60_63; + unsigned int chroma_qunt_table_0_3; + unsigned int chroma_qunt_table_4_7; + unsigned int chroma_qunt_table_8_11; + unsigned int chroma_qunt_table_12_15; + unsigned int chroma_qunt_table_16_19; + unsigned int chroma_qunt_table_20_23; + unsigned int chroma_qunt_table_24_27; + unsigned int chroma_qunt_table_28_31; + unsigned int chroma_qunt_table_32_35; + unsigned int chroma_qunt_table_36_39; + unsigned int chroma_qunt_table_40_43; + unsigned int chroma_qunt_table_44_47; + unsigned int chroma_qunt_table_48_51; + unsigned int chroma_qunt_table_52_55; + unsigned int chroma_qunt_table_56_59; + unsigned int chroma_qunt_table_60_63; + unsigned int luma_dc_hm_code_cnt_table_0_3; + unsigned int luma_dc_hm_code_cnt_table_4_7; + unsigned int luma_dc_hm_code_cnt_table_8_11; + unsigned int luma_dc_hm_code_cnt_table_12_15; + unsigned int luma_dc_hm_code_val_table_0_3; + unsigned int luma_dc_hm_code_val_table_4_7; + unsigned int luma_dc_hm_code_val_table_8_11; + unsigned int chroma_dc_hm_code_cnt_table_0_3; + unsigned int chroma_dc_hm_code_cnt_table_4_7; + unsigned int chroma_dc_hm_code_cnt_table_8_11; + unsigned int chroma_dc_hm_code_cnt_table_12_15; + unsigned int chroma_dc_hm_code_val_table_0_3; + unsigned int chroma_dc_hm_code_val_table_4_7; + unsigned int chroma_dc_hm_code_val_table_8_11; + unsigned int luma_ac_hm_code_cnt_table_0_3; + unsigned int luma_ac_hm_code_cnt_table_4_7; + unsigned int luma_ac_hm_code_cnt_table_8_11; + unsigned int luma_ac_hm_code_cnt_table_12_15; + unsigned int luma_ac_hm_code_val_table_0_3; + unsigned int luma_ac_hm_code_val_table_4_7; + unsigned int luma_ac_hm_code_val_table_8_11; + unsigned int luma_ac_hm_code_val_table_12_15; + unsigned int luma_ac_hm_code_val_table_16_19; + unsigned int luma_ac_hm_code_val_table_20_23; + unsigned int luma_ac_hm_code_val_table_24_27; + unsigned int luma_ac_hm_code_val_table_28_31; + unsigned int luma_ac_hm_code_val_table_32_35; + unsigned int luma_ac_hm_code_val_table_36_39; + unsigned int luma_ac_hm_code_val_table_40_43; + unsigned int luma_ac_hm_code_val_table_44_47; + unsigned int luma_ac_hm_code_val_table_48_51; + unsigned int luma_ac_hm_code_val_table_52_55; + unsigned int luma_ac_hm_code_val_table_56_59; + unsigned int luma_ac_hm_code_val_table_60_63; + unsigned int luma_ac_hm_code_val_table_64_67; + unsigned int luma_ac_hm_code_val_table_68_71; + unsigned int luma_ac_hm_code_val_table_72_75; + unsigned int luma_ac_hm_code_val_table_76_79; + unsigned int luma_ac_hm_code_val_table_80_83; + unsigned int luma_ac_hm_code_val_table_84_87; + unsigned int luma_ac_hm_code_val_table_88_91; + unsigned int luma_ac_hm_code_val_table_92_95; + unsigned int luma_ac_hm_code_val_table_96_99; + unsigned int luma_ac_hm_code_val_table_100_103; + unsigned int luma_ac_hm_code_val_table_104_107; + unsigned int luma_ac_hm_code_val_table_108_111; + unsigned int luma_ac_hm_code_val_table_112_115; + unsigned int luma_ac_hm_code_val_table_116_119; + unsigned int luma_ac_hm_code_val_table_120_123; + unsigned int luma_ac_hm_code_val_table_124_127; + unsigned int luma_ac_hm_code_val_table_128_131; + unsigned int luma_ac_hm_code_val_table_132_135; + unsigned int luma_ac_hm_code_val_table_136_139; + unsigned int luma_ac_hm_code_val_table_140_143; + unsigned int luma_ac_hm_code_val_table_144_147; + unsigned int luma_ac_hm_code_val_table_148_151; + unsigned int luma_ac_hm_code_val_table_152_155; + unsigned int luma_ac_hm_code_val_table_156_159; + unsigned int luma_ac_hm_code_val_table_160_161; + unsigned int chroma_ac_hm_code_cnt_table_0_3; + unsigned int chroma_ac_hm_code_cnt_table_4_7; + unsigned int chroma_ac_hm_code_cnt_table_8_11; + unsigned int chroma_ac_hm_code_cnt_table_12_15; + unsigned int chroma_ac_hm_code_val_table_0_3; + unsigned int chroma_ac_hm_code_val_table_4_7; + unsigned int chroma_ac_hm_code_val_table_8_11; + unsigned int chroma_ac_hm_code_val_table_12_15; + unsigned int chroma_ac_hm_code_val_table_16_19; + unsigned int chroma_ac_hm_code_val_table_20_23; + unsigned int chroma_ac_hm_code_val_table_24_27; + unsigned int chroma_ac_hm_code_val_table_28_31; + unsigned int chroma_ac_hm_code_val_table_32_35; + unsigned int chroma_ac_hm_code_val_table_36_39; + unsigned int chroma_ac_hm_code_val_table_40_43; + unsigned int chroma_ac_hm_code_val_table_44_47; + unsigned int chroma_ac_hm_code_val_table_48_51; + unsigned int chroma_ac_hm_code_val_table_52_55; + unsigned int chroma_ac_hm_code_val_table_56_59; + unsigned int chroma_ac_hm_code_val_table_60_63; + unsigned int chroma_ac_hm_code_val_table_64_67; + unsigned int chroma_ac_hm_code_val_table_68_71; + unsigned int chroma_ac_hm_code_val_table_72_75; + unsigned int chroma_ac_hm_code_val_table_76_79; + unsigned int chroma_ac_hm_code_val_table_80_83; + unsigned int chroma_ac_hm_code_val_table_84_87; + unsigned int chroma_ac_hm_code_val_table_88_91; + unsigned int chroma_ac_hm_code_val_table_92_95; + unsigned int chroma_ac_hm_code_val_table_96_99; + unsigned int chroma_ac_hm_code_val_table_100_103; + unsigned int chroma_ac_hm_code_val_table_104_107; + unsigned int chroma_ac_hm_code_val_table_108_111; + unsigned int chroma_ac_hm_code_val_table_112_115; + unsigned int chroma_ac_hm_code_val_table_116_119; + unsigned int chroma_ac_hm_code_val_table_120_123; + unsigned int chroma_ac_hm_code_val_table_124_127; + unsigned int chroma_ac_hm_code_val_table_128_131; + unsigned int chroma_ac_hm_code_val_table_132_135; + unsigned int chroma_ac_hm_code_val_table_136_139; + unsigned int chroma_ac_hm_code_val_table_140_143; + unsigned int chroma_ac_hm_code_val_table_144_147; + unsigned int chroma_ac_hm_code_val_table_148_151; + unsigned int chroma_ac_hm_code_val_table_152_155; + unsigned int chroma_ac_hm_code_val_table_156_159; + unsigned int chroma_ac_hm_code_val_table_160_161; +} __attribute__((packed)) jpeg_cmd_dec_cfg; + + +/* + * ARM to JPEG configuration commands are passed through the + * uPJpegActionCmdQueue + */ + +/* + * Command to start the encode process + */ + +#define JPEG_CMD_ENC_ENCODE 0x0000 +#define JPEG_CMD_ENC_ENCODE_LEN sizeof(jpeg_cmd_enc_encode) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) jpeg_cmd_enc_encode; + + +/* + * Command to transition from current state of encoder to IDLE state + */ + +#define JPEG_CMD_ENC_IDLE 0x0001 +#define JPEG_CMD_ENC_IDLE_LEN sizeof(jpeg_cmd_enc_idle) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) jpeg_cmd_enc_idle; + + +/* + * Command to inform the encoder that another buffer is ready + */ + +#define JPEG_CMD_ENC_OP_CONSUMED 0x0002 +#define JPEG_CMD_ENC_OP_CONSUMED_LEN sizeof(jpeg_cmd_enc_op_consumed) + + +typedef struct { + unsigned int cmd_id; + unsigned int op_buf_addr; + unsigned int op_buf_size; +} __attribute__((packed)) jpeg_cmd_enc_op_consumed; + + +/* + * Command to start the decoding process + */ + +#define JPEG_CMD_DEC_DECODE 0x0003 +#define JPEG_CMD_DEC_DECODE_LEN sizeof(jpeg_cmd_dec_decode) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) jpeg_cmd_dec_decode; + + +/* + * Command to transition from the current state of decoder to IDLE + */ + +#define JPEG_CMD_DEC_IDLE 0x0004 +#define JPEG_CMD_DEC_IDLE_LEN sizeof(jpeg_cmd_dec_idle) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) jpeg_cmd_dec_idle; + + +/* + * Command to inform that an op buffer is ready for use + */ + +#define JPEG_CMD_DEC_OP_CONSUMED 0x0005 +#define JPEG_CMD_DEC_OP_CONSUMED_LEN sizeof(jpeg_cmd_dec_op_consumed) + + +typedef struct { + unsigned int cmd_id; + unsigned int luma_op_buf_addr; + unsigned int luma_op_buf_size; + unsigned int chroma_op_buf_addr; +} __attribute__((packed)) jpeg_cmd_dec_op_consumed; + + +/* + * Command to pass a new ip buffer to the jpeg decoder + */ + +#define JPEG_CMD_DEC_IP 0x0006 +#define JPEG_CMD_DEC_IP_LEN sizeof(jpeg_cmd_dec_ip_len) + +#define JPEG_CMD_EOI_INDICATOR_NOT_END 0x0000 +#define JPEG_CMD_EOI_INDICATOR_END 0x0001 + +typedef struct { + unsigned int cmd_id; + unsigned int ip_buf_addr; + unsigned int ip_buf_size; + unsigned int eoi_indicator; +} __attribute__((packed)) jpeg_cmd_dec_ip; + + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegmsg.h new file mode 100644 index 0000000000000..d11aa3fbccb65 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5jpegmsg.h @@ -0,0 +1,177 @@ +#ifndef QDSP5VIDJPEGMSGI_H +#define QDSP5VIDJPEGMSGI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + J P E G I N T E R N A L M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of messages + that are sent by JPEG Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5jpegmsg.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +05/10/08 sv initial version +===========================================================================*/ + +/* + * Messages from JPEG task to ARM through jpeguPMsgQueue + */ + +/* + * Message is ACK for CMD_JPEGE_ENCODE cmd + */ + +#define JPEG_MSG_ENC_ENCODE_ACK 0x0000 +#define JPEG_MSG_ENC_ENCODE_ACK_LEN \ + sizeof(jpeg_msg_enc_encode_ack) + +typedef struct { +} __attribute__((packed)) jpeg_msg_enc_encode_ack; + + +/* + * Message informs the up when op buffer is ready for consumption and + * when encoding is complete or errors + */ + +#define JPEG_MSG_ENC_OP_PRODUCED 0x0001 +#define JPEG_MSG_ENC_OP_PRODUCED_LEN \ + sizeof(jpeg_msg_enc_op_produced) + +#define JPEG_MSGOP_OP_BUF_STATUS_ENC_DONE_PROGRESS 0x0000 +#define JPEG_MSGOP_OP_BUF_STATUS_ENC_DONE_COMPLETE 0x0001 +#define JPEG_MSGOP_OP_BUF_STATUS_ENC_ERR 0x10000 + +typedef struct { + unsigned int op_buf_addr; + unsigned int op_buf_size; + unsigned int op_buf_status; +} __attribute__((packed)) jpeg_msg_enc_op_produced; + + +/* + * Message to ack CMD_JPEGE_IDLE + */ + +#define JPEG_MSG_ENC_IDLE_ACK 0x0002 +#define JPEG_MSG_ENC_IDLE_ACK_LEN sizeof(jpeg_msg_enc_idle_ack) + + +typedef struct { +} __attribute__ ((packed)) jpeg_msg_enc_idle_ack; + + +/* + * Message to indicate the illegal command + */ + +#define JPEG_MSG_ENC_ILLEGAL_COMMAND 0x0003 +#define JPEG_MSG_ENC_ILLEGAL_COMMAND_LEN \ + sizeof(jpeg_msg_enc_illegal_command) + +typedef struct { + unsigned int status; +} __attribute__((packed)) jpeg_msg_enc_illegal_command; + + +/* + * Message to ACK CMD_JPEGD_DECODE + */ + +#define JPEG_MSG_DEC_DECODE_ACK 0x0004 +#define JPEG_MSG_DEC_DECODE_ACK_LEN \ + sizeof(jpeg_msg_dec_decode_ack) + + +typedef struct { +} __attribute__((packed)) jpeg_msg_dec_decode_ack; + + +/* + * Message to inform up that an op buffer is ready for consumption and when + * decoding is complete or an error occurs + */ + +#define JPEG_MSG_DEC_OP_PRODUCED 0x0005 +#define JPEG_MSG_DEC_OP_PRODUCED_LEN \ + sizeof(jpeg_msg_dec_op_produced) + +#define JPEG_MSG_DEC_OP_BUF_STATUS_PROGRESS 0x0000 +#define JPEG_MSG_DEC_OP_BUF_STATUS_DONE 0x0001 + +typedef struct { + unsigned int luma_op_buf_addr; + unsigned int chroma_op_buf_addr; + unsigned int num_mcus; + unsigned int op_buf_status; +} __attribute__((packed)) jpeg_msg_dec_op_produced; + +/* + * Message to ack CMD_JPEGD_IDLE cmd + */ + +#define JPEG_MSG_DEC_IDLE_ACK 0x0006 +#define JPEG_MSG_DEC_IDLE_ACK_LEN sizeof(jpeg_msg_dec_idle_ack) + + +typedef struct { +} __attribute__((packed)) jpeg_msg_dec_idle_ack; + + +/* + * Message to indicate illegal cmd was received + */ + +#define JPEG_MSG_DEC_ILLEGAL_COMMAND 0x0007 +#define JPEG_MSG_DEC_ILLEGAL_COMMAND_LEN \ + sizeof(jpeg_msg_dec_illegal_command) + + +typedef struct { + unsigned int status; +} __attribute__((packed)) jpeg_msg_dec_illegal_command; + +/* + * Message to request up for the next segment of ip bit stream + */ + +#define JPEG_MSG_DEC_IP_REQUEST 0x0008 +#define JPEG_MSG_DEC_IP_REQUEST_LEN \ + sizeof(jpeg_msg_dec_ip_request) + + +typedef struct { +} __attribute__((packed)) jpeg_msg_dec_ip_request; + + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmcmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmcmdi.h new file mode 100644 index 0000000000000..6c76e2c20cf40 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmcmdi.h @@ -0,0 +1,82 @@ +#ifndef QDSP5LPMCMDI_H +#define QDSP5LPMCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + L P M I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by LPM Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5lpmcmdi.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +06/12/08 sv initial version +===========================================================================*/ + + +/* + * Command to start LPM processing based on the config params + */ + +#define LPM_CMD_START 0x0000 +#define LPM_CMD_START_LEN sizeof(lpm_cmd_start) + +#define LPM_CMD_SPATIAL_FILTER_PART_OPMODE_0 0x00000000 +#define LPM_CMD_SPATIAL_FILTER_PART_OPMODE_1 0x00010000 +typedef struct { + unsigned int cmd_id; + unsigned int ip_data_cfg_part1; + unsigned int ip_data_cfg_part2; + unsigned int ip_data_cfg_part3; + unsigned int ip_data_cfg_part4; + unsigned int op_data_cfg_part1; + unsigned int op_data_cfg_part2; + unsigned int op_data_cfg_part3; + unsigned int spatial_filter_part[32]; +} __attribute__((packed)) lpm_cmd_start; + + + +/* + * Command to stop LPM processing + */ + +#define LPM_CMD_IDLE 0x0001 +#define LPM_CMD_IDLE_LEN sizeof(lpm_cmd_idle) + +typedef struct { + unsigned int cmd_id; +} __attribute__((packed)) lpm_cmd_idle; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmmsg.h new file mode 100644 index 0000000000000..3d1039d6ba427 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5lpmmsg.h @@ -0,0 +1,80 @@ +#ifndef QDSP5LPMMSGI_H +#define QDSP5LPMMSGI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + L P M I N T E R N A L M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by LPM Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5lpmmsg.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +06/12/08 sv initial version +===========================================================================*/ + +/* + * Message to acknowledge CMD_LPM_IDLE command + */ + +#define LPM_MSG_IDLE_ACK 0x0000 +#define LPM_MSG_IDLE_ACK_LEN sizeof(lpm_msg_idle_ack) + +typedef struct { +} __attribute__((packed)) lpm_msg_idle_ack; + + +/* + * Message to acknowledge CMD_LPM_START command + */ + + +#define LPM_MSG_START_ACK 0x0001 +#define LPM_MSG_START_ACK_LEN sizeof(lpm_msg_start_ack) + + +typedef struct { +} __attribute__((packed)) lpm_msg_start_ack; + + +/* + * Message to notify the ARM that LPM processing is complete + */ + +#define LPM_MSG_DONE 0x0002 +#define LPM_MSG_DONE_LEN sizeof(lpm_msg_done) + +typedef struct { +} __attribute__((packed)) lpm_msg_done; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdeccmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdeccmdi.h new file mode 100644 index 0000000000000..3a32ee99c6e4d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdeccmdi.h @@ -0,0 +1,235 @@ +#ifndef QDSP5VIDDECCMDI_H +#define QDSP5VIDDECCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + V I D E O D E C O D E R I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by VIDDEC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5vdeccmdi.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +05/10/08 ac initial version +===========================================================================*/ + + +/* + * Command to inform VIDDEC that new subframe packet is ready + */ + +#define VIDDEC_CMD_SUBFRAME_PKT 0x0000 +#define VIDDEC_CMD_SUBFRAME_PKT_LEN \ + sizeof(viddec_cmd_subframe_pkt) + +#define VIDDEC_CMD_SF_INFO_1_DM_DMA_STATS_EXCHANGE_FLAG_DM 0x0000 +#define VIDDEC_CMD_SF_INFO_1_DM_DMA_STATS_EXCHANGE_FLAG_DMA 0x0001 + +#define VIDDEC_CMD_SF_INFO_0_SUBFRAME_CONTI 0x0000 +#define VIDDEC_CMD_SF_INFO_0_SUBFRAME_FIRST 0x0001 +#define VIDDEC_CMD_SF_INFO_0_SUBFRAME_LAST 0x0002 +#define VIDDEC_CMD_SF_INFO_0_SUBFRAME_FIRST_AND_LAST 0x0003 + +#define VIDDEC_CMD_CODEC_SELECTION_WORD_MPEG_4 0x0000 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_H_263_P0 0x0001 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_H_264 0x0002 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_H_263_p3 0x0003 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_RV9 0x0004 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_WMV9 0x0005 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_SMCDB 0x0006 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_QFRE 0x0007 +#define VIDDEC_CMD_CODEC_SELECTION_WORD_VLD 0x0008 + +typedef struct { + unsigned short cmd_id; + unsigned short packet_seq_number; + unsigned short codec_instance_id; + unsigned short subframe_packet_size_high; + unsigned short subframe_packet_size_low; + unsigned short subframe_packet_high; + unsigned short subframe_packet_low; + unsigned short subframe_packet_partition; + unsigned short statistics_packet_size_high; + unsigned short statistics_packet_size_low; + unsigned short statistics_packet_high; + unsigned short statistics_packet_low; + unsigned short statistics_partition; + unsigned short subframe_info_1; + unsigned short subframe_info_0; + unsigned short codec_selection_word; + unsigned short num_mbs; +} __attribute__((packed)) viddec_cmd_subframe_pkt; + + +/* + * Command to inform VIDDEC task that post processing is required for the frame + */ + +#define VIDDEC_CMD_PP_ENABLE 0x0001 +#define VIDDEC_CMD_PP_ENABLE_LEN \ + sizeof(viddec_cmd_pp_enable) + +#define VIDDEC_CMD_PP_INFO_0_DM_DMA_LS_EXCHANGE_FLAG_DM 0x0000 +#define VIDDEC_CMD_PP_INFO_0_DM_DMA_LS_EXCHANGE_FLAG_DMA 0x0001 + +typedef struct { + unsigned short cmd_id; + unsigned short packet_seq_num; + unsigned short codec_instance_id; + unsigned short postproc_info_0; + unsigned short codec_selection_word; + unsigned short pp_output_addr_high; + unsigned short pp_output_addr_low; + unsigned short postproc_info_1; + unsigned short load_sharing_packet_size_high; + unsigned short load_sharing_packet_size_low; + unsigned short load_sharing_packet_high; + unsigned short load_sharing_packet_low; + unsigned short load_sharing_partition; + unsigned short pp_param_0; + unsigned short pp_param_1; + unsigned short pp_param_2; + unsigned short pp_param_3; +} __attribute__((packed)) viddec_cmd_pp_enable; + + +/* + * FRAME Header Packet : It is at the start of new frame + */ + +#define VIDDEC_CMD_FRAME_HEADER_PACKET 0x0002 +#define VIDDEC_CMD_FRAME_HEADER_PACKET_LEN \ + sizeof(viddec_cmd_frame_header_packet) + +#define VIDDEC_CMD_FRAME_INFO_0_ERROR_SKIP 0x0000 +#define VIDDEC_CMD_FRAME_INFO_0_ERROR_BLACK 0x0800 + +typedef struct { + unsigned short packet_id; + unsigned short x_dimension; + unsigned short y_dimension; + unsigned short line_width; + unsigned short frame_info_0; + unsigned short frame_buffer_0_high; + unsigned short frame_buffer_0_low; + unsigned short frame_buffer_1_high; + unsigned short frame_buffer_1_low; + unsigned short frame_buffer_2_high; + unsigned short frame_buffer_2_low; + unsigned short frame_buffer_3_high; + unsigned short frame_buffer_3_low; + unsigned short frame_buffer_4_high; + unsigned short frame_buffer_4_low; + unsigned short frame_buffer_5_high; + unsigned short frame_buffer_5_low; + unsigned short frame_buffer_6_high; + unsigned short frame_buffer_6_low; + unsigned short frame_buffer_7_high; + unsigned short frame_buffer_7_low; + unsigned short frame_buffer_8_high; + unsigned short frame_buffer_8_low; + unsigned short frame_buffer_9_high; + unsigned short frame_buffer_9_low; + unsigned short frame_buffer_10_high; + unsigned short frame_buffer_10_low; + unsigned short frame_buffer_11_high; + unsigned short frame_buffer_11_low; + unsigned short frame_buffer_12_high; + unsigned short frame_buffer_12_low; + unsigned short frame_buffer_13_high; + unsigned short frame_buffer_13_low; + unsigned short frame_buffer_14_high; + unsigned short frame_buffer_14_low; + unsigned short frame_buffer_15_high; + unsigned short frame_buffer_15_low; + unsigned short output_frame_buffer_high; + unsigned short output_frame_buffer_low; + unsigned short end_of_packet_marker; +} __attribute__((packed)) viddec_cmd_frame_header_packet; + + +/* + * SLICE HEADER PACKET + * I-Slice and P-Slice + */ + +#define VIDDEC_CMD_SLICE_HEADER_PKT_ISLICE 0x0003 +#define VIDDEC_CMD_SLICE_HEADER_PKT_ISLICE_LEN \ + sizeof(viddec_cmd_slice_header_pkt_islice) + +#define VIDDEC_CMD_ISLICE_INFO_1_MOD_SLICE_TYPE_PSLICE 0x0000 +#define VIDDEC_CMD_ISLICE_INFO_1_MOD_SLICE_TYPE_BSLICE 0x0100 +#define VIDDEC_CMD_ISLICE_INFO_1_MOD_SLICE_TYPE_ISLICE 0x0200 +#define VIDDEC_CMD_ISLICE_INFO_1_MOD_SLICE_TYPE_SPSLICE 0x0300 +#define VIDDEC_CMD_ISLICE_INFO_1_MOD_SLICE_TYPE_SISLICE 0x0400 +#define VIDDEC_CMD_ISLICE_INFO_1_NOPADDING 0x0000 +#define VIDDEC_CMD_ISLICE_INFO_1_PADDING 0x0800 + +#define VIDDEC_CMD_ISLICE_EOP_MARKER 0x7FFF + +typedef struct { + unsigned short cmd_id; + unsigned short packet_id; + unsigned short slice_info_0; + unsigned short slice_info_1; + unsigned short slice_info_2; + unsigned short num_bytes_in_rbsp_high; + unsigned short num_bytes_in_rbsp_low; + unsigned short num_bytes_in_rbsp_consumed; + unsigned short end_of_packet_marker; +} __attribute__((packed)) viddec_cmd_slice_header_pkt_islice; + + +#define VIDDEC_CMD_SLICE_HEADER_PKT_PSLICE 0x0003 +#define VIDDEC_CMD_SLICE_HEADER_PKT_PSLICE_LEN \ + sizeof(viddec_cmd_slice_header_pkt_pslice) + + +typedef struct { + unsigned short cmd_id; + unsigned short packet_id; + unsigned short slice_info_0; + unsigned short slice_info_1; + unsigned short slice_info_2; + unsigned short slice_info_3; + unsigned short refidx_l0_map_tab_info_0; + unsigned short refidx_l0_map_tab_info_1; + unsigned short refidx_l0_map_tab_info_2; + unsigned short refidx_l0_map_tab_info_3; + unsigned short num_bytes_in_rbsp_high; + unsigned short num_bytes_in_rbsp_low; + unsigned short num_bytes_in_rbsp_consumed; + unsigned short end_of_packet_marker; +} __attribute__((packed)) viddec_cmd_slice_header_pkt_pslice; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdecmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdecmsg.h new file mode 100644 index 0000000000000..c1744c1644dd2 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vdecmsg.h @@ -0,0 +1,107 @@ +#ifndef QDSP5VIDDECMSGI_H +#define QDSP5VIDDECMSGI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + V I D E O D E C O D E R I N T E R N A L M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of messages + that are sent by VIDDEC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5vdecmsg.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +05/10/08 ac initial version +===========================================================================*/ + +/* + * Message to inform ARM which VDEC_SUBFRAME_PKT_CMD processed by VIDDEC TASK + */ + +#define VIDDEC_MSG_SUBF_DONE 0x0000 +#define VIDDEC_MSG_SUBF_DONE_LEN \ + sizeof(viddec_msg_subf_done) + +typedef struct { + unsigned short packet_seq_number; + unsigned short codec_instance_id; +} __attribute__((packed)) viddec_msg_subf_done; + + +/* + * Message to inform ARM one frame has been decoded + */ + +#define VIDDEC_MSG_FRAME_DONE 0x0001 +#define VIDDEC_MSG_FRAME_DONE_LEN \ + sizeof(viddec_msg_frame_done) + +typedef struct { + unsigned short packet_seq_number; + unsigned short codec_instance_id; +} __attribute__((packed)) viddec_msg_frame_done; + + +/* + * Message to inform ARM that post processing frame has been decoded + */ + +#define VIDDEC_MSG_PP_ENABLE_CMD_DONE 0x0002 +#define VIDDEC_MSG_PP_ENABLE_CMD_DONE_LEN \ + sizeof(viddec_msg_pp_enable_cmd_done) + +typedef struct { + unsigned short packet_seq_number; + unsigned short codec_instance_id; +} __attribute__((packed)) viddec_msg_pp_enable_cmd_done; + + +/* + * Message to inform ARM that one post processing frame has been decoded + */ + + +#define VIDDEC_MSG_PP_FRAME_DONE 0x0003 +#define VIDDEC_MSG_PP_FRAME_DONE_LEN \ + sizeof(viddec_msg_pp_frame_done) + +#define VIDDEC_MSG_DISP_WORTHY_DISP 0x0000 +#define VIDDEC_MSG_DISP_WORTHY_DISP_NONE 0xFFFF + + +typedef struct { + unsigned short packet_seq_number; + unsigned short codec_instance_id; + unsigned short display_worthy; +} __attribute__((packed)) viddec_msg_pp_frame_done; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5venccmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5venccmdi.h new file mode 100755 index 0000000000000..819544d186da4 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5venccmdi.h @@ -0,0 +1,212 @@ +#ifndef QDSP5VIDENCCMDI_H +#define QDSP5VIDENCCMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + V I D E O E N C O D E R I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by VIDENC Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 2008 by QUALCOMM, Incorporated. +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +09/25/08 umeshp initial version +===========================================================================*/ + + #define VIDENC_CMD_CFG 0x0000 + #define VIDENC_CMD_ACTIVE 0x0001 + #define VIDENC_CMD_IDLE 0x0002 + #define VIDENC_CMD_FRAME_START 0x0003 + #define VIDENC_CMD_STATUS_QUERY 0x0004 + #define VIDENC_CMD_RC_CFG 0x0005 + #define VIDENC_CMD_DIS_CFG 0x0006 + #define VIDENC_CMD_DIS 0x0007 + #define VIDENC_CMD_INTRA_REFRESH 0x0008 + #define VIDENC_CMD_DIGITAL_ZOOM 0x0009 + + +/* + * Command to pass the frame message information to VIDENC + */ + + +#define VIDENC_CMD_FRAME_START_LEN \ + sizeof(videnc_cmd_frame_start) + +typedef struct { + unsigned short cmd_id; + unsigned short frame_info; + unsigned short frame_rho_budget_word_high; + unsigned short frame_rho_budget_word_low; + unsigned short input_luma_addr_high; + unsigned short input_luma_addr_low; + unsigned short input_chroma_addr_high; + unsigned short input_chroma_addr_low; + unsigned short ref_vop_buf_ptr_high; + unsigned short ref_vop_buf_ptr_low; + unsigned short enc_pkt_buf_ptr_high; + unsigned short enc_pkt_buf_ptr_low; + unsigned short enc_pkt_buf_size_high; + unsigned short enc_pkt_buf_size_low; + unsigned short unfilt_recon_vop_buf_ptr_high; + unsigned short unfilt_recon_vop_buf_ptr_low; + unsigned short filt_recon_vop_buf_ptr_high; + unsigned short filt_recon_vop_buf_ptr_low; +} __attribute__((packed)) videnc_cmd_frame_start; + +/* + * Command to pass the frame-level digital stabilization parameters to VIDENC + */ + + +#define VIDENC_CMD_DIS_LEN \ + sizeof(videnc_cmd_dis) + +typedef struct { + unsigned short cmd_id; + unsigned short vfe_out_prev_luma_addr_high; + unsigned short vfe_out_prev_luma_addr_low; + unsigned short stabilization_info; +} __attribute__((packed)) videnc_cmd_dis; + +/* + * Command to pass the codec related parameters to VIDENC + */ + + +#define VIDENC_CMD_CFG_LEN \ + sizeof(videnc_cmd_cfg) + +typedef struct { + unsigned short cmd_id; + unsigned short cfg_info_0; + unsigned short cfg_info_1; + unsigned short four_mv_threshold; + unsigned short ise_fse_mv_cost_fac; + unsigned short venc_frame_dim; + unsigned short venc_DM_partition; +} __attribute__((packed)) videnc_cmd_cfg; + +/* + * Command to start the video encoding + */ + + +#define VIDENC_CMD_ACTIVE_LEN \ + sizeof(videnc_cmd_active) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) videnc_cmd_active; + +/* + * Command to stop the video encoding + */ + + +#define VIDENC_CMD_IDLE_LEN \ + sizeof(videnc_cmd_idle) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) videnc_cmd_idle; + +/* + * Command to query staus of VIDENC + */ + + +#define VIDENC_CMD_STATUS_QUERY_LEN \ + sizeof(videnc_cmd_status_query) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) videnc_cmd_status_query; + +/* + * Command to set rate control for a frame + */ + + +#define VIDENC_CMD_RC_CFG_LEN \ + sizeof(videnc_cmd_rc_cfg) + +typedef struct { + unsigned short cmd_id; + unsigned short max_frame_qp_delta; + unsigned short max_min_frame_qp; +} __attribute__((packed)) videnc_cmd_rc_cfg; + +/* + * Command to set intra-refreshing + */ + + +#define VIDENC_CMD_INTRA_REFRESH_LEN \ + sizeof(videnc_cmd_intra_refresh) + +typedef struct { + unsigned short cmd_id; + unsigned short num_mb_refresh; + unsigned short mb_index[15]; +} __attribute__((packed)) videnc_cmd_intra_refresh; + +/* + * Command to pass digital zoom information to the VIDENC + */ +#define VIDENC_CMD_DIGITAL_ZOOM_LEN \ + sizeof(videnc_cmd_digital_zoom) + +typedef struct { + unsigned short cmd_id; + unsigned short digital_zoom_en; + unsigned short luma_frame_shift_X; + unsigned short luma_frame_shift_Y; + unsigned short up_ip_luma_rows; + unsigned short up_ip_luma_cols; + unsigned short up_ip_chroma_rows; + unsigned short up_ip_chroma_cols; + unsigned short luma_ph_incr_V_low; + unsigned short luma_ph_incr_V_high; + unsigned short luma_ph_incr_H_low; + unsigned short luma_ph_incr_H_high; + unsigned short chroma_ph_incr_V_low; + unsigned short chroma_ph_incr_V_high; + unsigned short chroma_ph_incr_H_low; + unsigned short chroma_ph_incr_H_high; +} __attribute__((packed)) videnc_cmd_digital_zoom; + +/* + * Command to configure digital stabilization parameters + */ + +#define VIDENC_CMD_DIS_CFG_LEN \ + sizeof(videnc_cmd_dis_cfg) + +typedef struct { + unsigned short cmd_id; + unsigned short image_stab_subf_start_row_col; + unsigned short image_stab_subf_dim; + unsigned short image_stab_info_0; +} __attribute__((packed)) videnc_cmd_dis_cfg; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfecmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfecmdi.h new file mode 100644 index 0000000000000..f76d4e4263e93 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfecmdi.h @@ -0,0 +1,910 @@ +#ifndef QDSP5VFECMDI_H +#define QDSP5VFECMDI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + V F E I N T E R N A L C O M M A N D S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are accepted by VFE Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5vfecmdi.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +06/12/08 sv initial version +===========================================================================*/ + +/****************************************************************************** + * Commands through vfeCommandScaleQueue + *****************************************************************************/ + +/* + * Command to program scaler for op1 . max op of scaler is VGA + */ + + +#define VFE_CMD_SCALE_OP1_CFG 0x0000 +#define VFE_CMD_SCALE_OP1_CFG_LEN \ + sizeof(vfe_cmd_scale_op1_cfg) + +#define VFE_CMD_SCALE_OP1_SEL_IP_SEL_Y_STANDARD 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_IP_SEL_Y_CASCADED 0x0001 +#define VFE_CMD_SCALE_OP1_SEL_H_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_H_Y_SCALER_ENA 0x0002 +#define VFE_CMD_SCALE_OP1_SEL_H_PP_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_H_PP_Y_SCALER_ENA 0x0004 +#define VFE_CMD_SCALE_OP1_SEL_V_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_V_Y_SCALER_ENA 0x0008 +#define VFE_CMD_SCALE_OP1_SEL_V_PP_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_V_PP_Y_SCALER_ENA 0x0010 +#define VFE_CMD_SCALE_OP1_SEL_IP_SEL_CBCR_STANDARD 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_IP_SEL_CBCR_CASCADED 0x0020 +#define VFE_CMD_SCALE_OP1_SEL_H_CBCR_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_H_CBCR_SCALER_ENA 0x0040 +#define VFE_CMD_SCALE_OP1_SEL_V_CBCR_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP1_SEL_V_CBCR_SCALER_ENA 0x0080 + +#define VFE_CMD_OP1_PP_Y_SCALER_CFG_PART1_DONT_LOAD_COEFFS 0x80000000 +#define VFE_CMD_OP1_PP_Y_SCALER_CFG_PART1_LOAD_COEFFS 0x80000000 + +typedef struct { + unsigned int cmd_id; + unsigned int scale_op1_sel; + unsigned int y_scaler_cfg_part1; + unsigned int y_scaler_cfg_part2; + unsigned int cbcr_scaler_cfg_part1; + unsigned int cbcr_scaler_cfg_part2; + unsigned int cbcr_scaler_cfg_part3; + unsigned int pp_y_scaler_cfg_part1; + unsigned int pp_y_scaler_cfg_part2; + unsigned int y_scaler_v_coeff_bank_part1[16]; + unsigned int y_scaler_v_coeff_bank_part2[16]; + unsigned int y_scaler_h_coeff_bank_part1[16]; + unsigned int y_scaler_h_coeff_bank_part2[16]; +} __attribute__((packed)) vfe_cmd_scale_op1_cfg; + + +/* + * Command to program scaler for op2 + */ + +#define VFE_CMD_SCALE_OP2_CFG 0x0001 +#define VFE_CMD_SCALE_OP2_CFG_LEN \ + sizeof(vfe_cmd_scale_op2_cfg) + +#define VFE_CMD_SCALE_OP2_SEL_IP_SEL_Y_STANDARD 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_IP_SEL_Y_CASCADED 0x0001 +#define VFE_CMD_SCALE_OP2_SEL_H_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_H_Y_SCALER_ENA 0x0002 +#define VFE_CMD_SCALE_OP2_SEL_H_PP_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_H_PP_Y_SCALER_ENA 0x0004 +#define VFE_CMD_SCALE_OP2_SEL_V_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_V_Y_SCALER_ENA 0x0008 +#define VFE_CMD_SCALE_OP2_SEL_V_PP_Y_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_V_PP_Y_SCALER_ENA 0x0010 +#define VFE_CMD_SCALE_OP2_SEL_IP_SEL_CBCR_STANDARD 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_IP_SEL_CBCR_CASCADED 0x0020 +#define VFE_CMD_SCALE_OP2_SEL_H_CBCR_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_H_CBCR_SCALER_ENA 0x0040 +#define VFE_CMD_SCALE_OP2_SEL_V_CBCR_SCALER_DIS 0x0000 +#define VFE_CMD_SCALE_OP2_SEL_V_CBCR_SCALER_ENA 0x0080 + +#define VFE_CMD_OP2_PP_Y_SCALER_CFG_PART1_DONT_LOAD_COEFFS 0x80000000 +#define VFE_CMD_OP2_PP_Y_SCALER_CFG_PART1_LOAD_COEFFS 0x80000000 + +typedef struct { + unsigned int cmd_id; + unsigned int scale_op2_sel; + unsigned int y_scaler_cfg_part1; + unsigned int y_scaler_cfg_part2; + unsigned int cbcr_scaler_cfg_part1; + unsigned int cbcr_scaler_cfg_part2; + unsigned int cbcr_scaler_cfg_part3; + unsigned int pp_y_scaler_cfg_part1; + unsigned int pp_y_scaler_cfg_part2; + unsigned int y_scaler_v_coeff_bank_part1[16]; + unsigned int y_scaler_v_coeff_bank_part2[16]; + unsigned int y_scaler_h_coeff_bank_part1[16]; + unsigned int y_scaler_h_coeff_bank_part2[16]; +} __attribute__((packed)) vfe_cmd_scale_op2_cfg; + + +/****************************************************************************** + * Commands through vfeCommandTableQueue + *****************************************************************************/ + +/* + * Command to program the AXI ip paths + */ + +#define VFE_CMD_AXI_IP_CFG 0x0000 +#define VFE_CMD_AXI_IP_CFG_LEN sizeof(vfe_cmd_axi_ip_cfg) + +#define VFE_CMD_IP_SEL_IP_FORMAT_8 0x0000 +#define VFE_CMD_IP_SEL_IP_FORMAT_10 0x0001 +#define VFE_CMD_IP_SEL_IP_FORMAT_12 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int ip_sel; + unsigned int ip_cfg_part1; + unsigned int ip_cfg_part2; + unsigned int ip_unpack_cfg_part[6]; + unsigned int ip_buf_addr[8]; +} __attribute__ ((packed)) vfe_cmd_axi_ip_cfg; + + +/* + * Command to program axi op paths + */ + +#define VFE_CMD_AXI_OP_CFG 0x0001 +#define VFE_CMD_AXI_OP_CFG_LEN sizeof(vfe_cmd_axi_op_cfg) + +#define VFE_CMD_OP_SEL_OP1 0x0000 +#define VFE_CMD_OP_SEL_OP2 0x0001 +#define VFE_CMD_OP_SEL_OP1_OP2 0x0002 +#define VFE_CMD_OP_SEL_CTOA 0x0003 +#define VFE_CMD_OP_SEL_CTOA_OP1 0x0004 +#define VFE_CMD_OP_SEL_CTOA_OP2 0x0005 +#define VFE_CMD_OP_SEL_OP_FORMAT_8 0x0000 +#define VFE_CMD_OP_SEL_OP_FORMAT_10 0x0008 +#define VFE_CMD_OP_SEL_OP_FORMAT_12 0x0010 + + +typedef struct { + unsigned int cmd_id; + unsigned int op_sel; + unsigned int op1_y_cfg_part1; + unsigned int op1_y_cfg_part2; + unsigned int op1_cbcr_cfg_part1; + unsigned int op1_cbcr_cfg_part2; + unsigned int op2_y_cfg_part1; + unsigned int op2_y_cfg_part2; + unsigned int op2_cbcr_cfg_part1; + unsigned int op2_cbcr_cfg_part2; + unsigned int op1_buf1_addr[16]; + unsigned int op2_buf1_addr[16]; +} __attribute__((packed)) vfe_cmd_axi_op_cfg; + + + + +/* + * Command to program the roll off correction module + */ + +#define VFE_CMD_ROLLOFF_CFG 0x0002 +#define VFE_CMD_ROLLOFF_CFG_LEN \ + sizeof(vfe_cmd_rolloff_cfg) + + +typedef struct { + unsigned int cmd_id; + unsigned int correction_opt_center_pos; + unsigned int radius_square_entry[32]; + unsigned int red_table_entry[32]; + unsigned int green_table_entry[32]; + unsigned int blue_table_entry[32]; +} __attribute__((packed)) vfe_cmd_rolloff_cfg; + +/* + * Command to program RGB gamma table + */ + +#define VFE_CMD_RGB_GAMMA_CFG 0x0003 +#define VFE_CMD_RGB_GAMMA_CFG_LEN \ + sizeof(vfe_cmd_rgb_gamma_cfg) + +#define VFE_CMD_RGB_GAMMA_SEL_LINEAR 0x0000 +#define VFE_CMD_RGB_GAMMA_SEL_PW_LINEAR 0x0001 +typedef struct { + unsigned int cmd_id; + unsigned int rgb_gamma_sel; + unsigned int rgb_gamma_entry[256]; +} __attribute__((packed)) vfe_cmd_rgb_gamma_cfg; + + +/* + * Command to program luma gamma table for the noise reduction path + */ + +#define VFE_CMD_Y_GAMMA_CFG 0x0004 +#define VFE_CMD_Y_GAMMA_CFG_LEN \ + sizeof(vfe_cmd_y_gamma_cfg) + +#define VFE_CMD_Y_GAMMA_SEL_LINEAR 0x0000 +#define VFE_CMD_Y_GAMMA_SEL_PW_LINEAR 0x0001 + +typedef struct { + unsigned int cmd_id; + unsigned int y_gamma_sel; + unsigned int y_gamma_entry[256]; +} __attribute__((packed)) vfe_cmd_y_gamma_cfg; + + + +/****************************************************************************** + * Commands through vfeCommandQueue + *****************************************************************************/ + +/* + * Command to reset the VFE to a known good state.All previously programmed + * Params will be lost + */ + + +#define VFE_CMD_RESET 0x0000 +#define VFE_CMD_RESET_LEN sizeof(vfe_cmd_reset) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) vfe_cmd_reset; + + +/* + * Command to start VFE processing based on the config params + */ + + +#define VFE_CMD_START 0x0001 +#define VFE_CMD_START_LEN sizeof(vfe_cmd_start) + +#define VFE_CMD_STARTUP_PARAMS_SRC_CAMIF 0x0000 +#define VFE_CMD_STARTUP_PARAMS_SRC_AXI 0x0001 +#define VFE_CMD_STARTUP_PARAMS_MODE_CONTINUOUS 0x0000 +#define VFE_CMD_STARTUP_PARAMS_MODE_SNAPSHOT 0x0002 + +#define VFE_CMD_IMAGE_PL_BLACK_LVL_CORR_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_BLACK_LVL_CORR_ENA 0x0001 +#define VFE_CMD_IMAGE_PL_ROLLOFF_CORR_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_ROLLOFF_CORR_ENA 0x0002 +#define VFE_CMD_IMAGE_PL_WHITE_BAL_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_WHITE_BAL_ENA 0x0004 +#define VFE_CMD_IMAGE_PL_RGB_GAMMA_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_RGB_GAMMA_ENA 0x0008 +#define VFE_CMD_IMAGE_PL_LUMA_NOISE_RED_PATH_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_LUMA_NOISE_RED_PATH_ENA 0x0010 +#define VFE_CMD_IMAGE_PL_ADP_FILTER_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_ADP_FILTER_ENA 0x0020 +#define VFE_CMD_IMAGE_PL_CHROMA_SAMP_DIS 0x0000 +#define VFE_CMD_IMAGE_PL_CHROMA_SAMP_ENA 0x0040 + + +typedef struct { + unsigned int cmd_id; + unsigned int startup_params; + unsigned int image_pipeline; + unsigned int frame_dimension; +} __attribute__((packed)) vfe_cmd_start; + + +/* + * Command to halt all processing + */ + +#define VFE_CMD_STOP 0x0002 +#define VFE_CMD_STOP_LEN sizeof(vfe_cmd_stop) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) vfe_cmd_stop; + + +/* + * Command to commit the params that have been programmed to take + * effect on the next frame + */ + +#define VFE_CMD_UPDATE 0x0003 +#define VFE_CMD_UPDATE_LEN sizeof(vfe_cmd_update) + + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) vfe_cmd_update; + + +/* + * Command to program CAMIF module + */ + +#define VFE_CMD_CAMIF_CFG 0x0004 +#define VFE_CMD_CAMIF_CFG_LEN sizeof(vfe_cmd_camif_cfg) + +#define VFE_CMD_CFG_VSYNC_SYNC_EDGE_HIGH 0x0000 +#define VFE_CMD_CFG_VSYNC_SYNC_EDGE_LOW 0x0002 +#define VFE_CMD_CFG_HSYNC_SYNC_EDGE_HIGH 0x0000 +#define VFE_CMD_CFG_HSYNC_SYNC_EDGE_LOW 0x0004 +#define VFE_CMD_CFG_SYNC_MODE_APS 0x0000 +#define VFE_CMD_CFG_SYNC_MODE_EFS 0X0008 +#define VFE_CMD_CFG_SYNC_MODE_ELS 0x0010 +#define VFE_CMD_CFG_SYNC_MODE_RVD 0x0018 +#define VFE_CMD_CFG_VFE_SUBSAMP_EN_DIS 0x0000 +#define VFE_CMD_CFG_VFE_SUBSAMP_EN_ENA 0x0020 +#define VFE_CMD_CFG_BUS_SUBSAMP_EN_DIS 0x0000 +#define VFE_CMD_CFG_BUS_SUBSAMP_EN_ENA 0x0080 +#define VFE_CMD_CFG_IRQ_SUBSAMP_EN_DIS 0x0000 +#define VFE_CMD_CFG_IRQ_SUBSAMP_EN_ENA 0x0800 + +#define VFE_CMD_SUBSAMP2_CFG_PIXEL_SKIP_16 0x0000 +#define VFE_CMD_SUBSAMP2_CFG_PIXEL_SKIP_12 0x0010 + +#define VFE_CMD_EPOCH_IRQ_1_DIS 0x0000 +#define VFE_CMD_EPOCH_IRQ_1_ENA 0x4000 +#define VFE_CMD_EPOCH_IRQ_2_DIS 0x0000 +#define VFE_CMD_EPOCH_IRQ_2_ENA 0x8000 + +typedef struct { + unsigned int cmd_id; + unsigned int cfg; + unsigned int efs_cfg; + unsigned int frame_cfg; + unsigned int window_width_cfg; + unsigned int window_height_cfg; + unsigned int subsamp1_cfg; + unsigned int subsamp2_cfg; + unsigned int epoch_irq; +} __attribute__((packed)) vfe_cmd_camif_cfg; + + + +/* + * Command to program the black level module + */ + +#define VFE_CMD_BLACK_LVL_CFG 0x0005 +#define VFE_CMD_BLACK_LVL_CFG_LEN sizeof(vfe_cmd_black_lvl_cfg) + +#define VFE_CMD_BL_SEL_MANUAL 0x0000 +#define VFE_CMD_BL_SEL_AUTO 0x0001 + +typedef struct { + unsigned int cmd_id; + unsigned int black_lvl_sel; + unsigned int cfg_part[3]; +} __attribute__((packed)) vfe_cmd_black_lvl_cfg; + + +/* + * Command to program the active region by cropping the region of interest + */ + +#define VFE_CMD_ACTIVE_REGION_CFG 0x0006 +#define VFE_CMD_ACTIVE_REGION_CFG_LEN \ + sizeof(vfe_cmd_active_region_cfg) + + +typedef struct { + unsigned int cmd_id; + unsigned int cfg_part1; + unsigned int cfg_part2; +} __attribute__((packed)) vfe_cmd_active_region_cfg; + + + +/* + * Command to program the defective pixel correction(DPC) , + * adaptive bayer filter (ABF) and demosaic modules + */ + +#define VFE_CMD_DEMOSAIC_CFG 0x0007 +#define VFE_CMD_DEMOSAIC_CFG_LEN sizeof(vfe_cmd_demosaic_cfg) + +#define VFE_CMD_DEMOSAIC_PART1_ABF_EN_DIS 0x0000 +#define VFE_CMD_DEMOSAIC_PART1_ABF_EN_ENA 0x0001 +#define VFE_CMD_DEMOSAIC_PART1_DPC_EN_DIS 0x0000 +#define VFE_CMD_DEMOSAIC_PART1_DPC_EN_ENA 0x0002 +#define VFE_CMD_DEMOSAIC_PART1_FORCE_ABF_OFF 0x0000 +#define VFE_CMD_DEMOSAIC_PART1_FORCE_ABF_ON 0x0004 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_1 0x00000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_2 0x10000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_4 0x20000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_8 0x30000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_1_2 0x50000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_1_4 0x60000000 +#define VFE_CMD_DEMOSAIC_PART1_SLOPE_SHIFT_1_8 0x70000000 + +typedef struct { + unsigned int cmd_id; + unsigned int demosaic_part1; + unsigned int demosaic_part2; + unsigned int demosaic_part3; + unsigned int demosaic_part4; + unsigned int demosaic_part5; +} __attribute__((packed)) vfe_cmd_demosaic_cfg; + + +/* + * Command to program the ip format + */ + +#define VFE_CMD_IP_FORMAT_CFG 0x0008 +#define VFE_CMD_IP_FORMAT_CFG_LEN \ + sizeof(vfe_cmd_ip_format_cfg) + +#define VFE_CMD_IP_FORMAT_SEL_RGRG 0x0000 +#define VFE_CMD_IP_FORMAT_SEL_GRGR 0x0001 +#define VFE_CMD_IP_FORMAT_SEL_BGBG 0x0002 +#define VFE_CMD_IP_FORMAT_SEL_GBGB 0x0003 +#define VFE_CMD_IP_FORMAT_SEL_YCBYCR 0x0004 +#define VFE_CMD_IP_FORMAT_SEL_YCRYCB 0x0005 +#define VFE_CMD_IP_FORMAT_SEL_CBYCRY 0x0006 +#define VFE_CMD_IP_FORMAT_SEL_CRYCBY 0x0007 +#define VFE_CMD_IP_FORMAT_SEL_NO_CHROMA 0x0000 +#define VFE_CMD_IP_FORMAT_SEL_CHROMA 0x0008 + + +typedef struct { + unsigned int cmd_id; + unsigned int ip_format_sel; + unsigned int balance_gains_part1; + unsigned int balance_gains_part2; +} __attribute__((packed)) vfe_cmd_ip_format_cfg; + + + +/* + * Command to program max and min allowed op values + */ + +#define VFE_CMD_OP_CLAMP_CFG 0x0009 +#define VFE_CMD_OP_CLAMP_CFG_LEN \ + sizeof(vfe_cmd_op_clamp_cfg) + +typedef struct { + unsigned int cmd_id; + unsigned int op_clamp_max; + unsigned int op_clamp_min; +} __attribute__((packed)) vfe_cmd_op_clamp_cfg; + + +/* + * Command to program chroma sub sample module + */ + +#define VFE_CMD_CHROMA_SUBSAMPLE_CFG 0x000A +#define VFE_CMD_CHROMA_SUBSAMPLE_CFG_LEN \ + sizeof(vfe_cmd_chroma_subsample_cfg) + +#define VFE_CMD_CHROMA_SUBSAMP_SEL_H_INTERESTIAL_SAMPS 0x0000 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_H_COSITED_SAMPS 0x0001 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_V_INTERESTIAL_SAMPS 0x0000 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_V_COSITED_SAMPS 0x0002 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_H_SUBSAMP_DIS 0x0000 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_H_SUBSAMP_ENA 0x0004 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_V_SUBSAMP_DIS 0x0000 +#define VFE_CMD_CHROMA_SUBSAMP_SEL_V_SUBSAMP_ENA 0x0008 + +typedef struct { + unsigned int cmd_id; + unsigned int chroma_subsamp_sel; +} __attribute__((packed)) vfe_cmd_chroma_subsample_cfg; + + +/* + * Command to program the white balance module + */ + +#define VFE_CMD_WHITE_BALANCE_CFG 0x000B +#define VFE_CMD_WHITE_BALANCE_CFG_LEN \ + sizeof(vfe_cmd_white_balance_cfg) + +typedef struct { + unsigned int cmd_id; + unsigned int white_balance_gains; +} __attribute__((packed)) vfe_cmd_white_balance_cfg; + + +/* + * Command to program the color processing module + */ + +#define VFE_CMD_COLOR_PROCESS_CFG 0x000C +#define VFE_CMD_COLOR_PROCESS_CFG_LEN \ + sizeof(vfe_cmd_color_process_cfg) + +#define VFE_CMD_COLOR_CORRE_PART7_Q7_FACTORS 0x0000 +#define VFE_CMD_COLOR_CORRE_PART7_Q8_FACTORS 0x0001 +#define VFE_CMD_COLOR_CORRE_PART7_Q9_FACTORS 0x0002 +#define VFE_CMD_COLOR_CORRE_PART7_Q10_FACTORS 0x0003 + +typedef struct { + unsigned int cmd_id; + unsigned int color_correction_part1; + unsigned int color_correction_part2; + unsigned int color_correction_part3; + unsigned int color_correction_part4; + unsigned int color_correction_part5; + unsigned int color_correction_part6; + unsigned int color_correction_part7; + unsigned int chroma_enhance_part1; + unsigned int chroma_enhance_part2; + unsigned int chroma_enhance_part3; + unsigned int chroma_enhance_part4; + unsigned int chroma_enhance_part5; + unsigned int luma_calc_part1; + unsigned int luma_calc_part2; +} __attribute__((packed)) vfe_cmd_color_process_cfg; + + +/* + * Command to program adaptive filter module + */ + +#define VFE_CMD_ADP_FILTER_CFG 0x000D +#define VFE_CMD_ADP_FILTER_CFG_LEN \ + sizeof(vfe_cmd_adp_filter_cfg) + +#define VFE_CMD_ASF_CFG_PART_SMOOTH_FILTER_DIS 0x0000 +#define VFE_CMD_ASF_CFG_PART_SMOOTH_FILTER_ENA 0x0001 +#define VFE_CMD_ASF_CFG_PART_NO_SHARP_MODE 0x0000 +#define VFE_CMD_ASF_CFG_PART_SINGLE_FILTER 0x0002 +#define VFE_CMD_ASF_CFG_PART_DUAL_FILTER 0x0004 +#define VFE_CMD_ASF_CFG_PART_SHARP_MODE 0x0007 + +typedef struct { + unsigned int cmd_id; + unsigned int asf_cfg_part[7]; +} __attribute__((packed)) vfe_cmd_adp_filter_cfg; + + +/* + * Command to program for frame skip pattern for op1 and op2 + */ + +#define VFE_CMD_FRAME_SKIP_CFG 0x000E +#define VFE_CMD_FRAME_SKIP_CFG_LEN \ + sizeof(vfe_cmd_frame_skip_cfg) + +typedef struct { + unsigned int cmd_id; + unsigned int frame_skip_pattern_op1; + unsigned int frame_skip_pattern_op2; +} __attribute__((packed)) vfe_cmd_frame_skip_cfg; + + +/* + * Command to program field-of-view crop for digital zoom + */ + +#define VFE_CMD_FOV_CROP 0x000F +#define VFE_CMD_FOV_CROP_LEN sizeof(vfe_cmd_fov_crop) + +typedef struct { + unsigned int cmd_id; + unsigned int fov_crop_part1; + unsigned int fov_crop_part2; +} __attribute__((packed)) vfe_cmd_fov_crop; + + + +/* + * Command to program auto focus(AF) statistics module + */ + +#define VFE_CMD_STATS_AUTOFOCUS_CFG 0x0010 +#define VFE_CMD_STATS_AUTOFOCUS_CFG_LEN \ + sizeof(vfe_cmd_stats_autofocus_cfg) + +#define VFE_CMD_AF_STATS_SEL_STATS_DIS 0x0000 +#define VFE_CMD_AF_STATS_SEL_STATS_ENA 0x0001 +#define VFE_CMD_AF_STATS_SEL_PRI_FIXED 0x0000 +#define VFE_CMD_AF_STATS_SEL_PRI_VAR 0x0002 +#define VFE_CMD_AF_STATS_CFG_PART_METRIC_SUM 0x00000000 +#define VFE_CMD_AF_STATS_CFG_PART_METRIC_MAX 0x00200000 + +typedef struct { + unsigned int cmd_id; + unsigned int af_stats_sel; + unsigned int af_stats_cfg_part[8]; + unsigned int af_stats_op_buf_hdr; + unsigned int af_stats_op_buf[3]; +} __attribute__((packed)) vfe_cmd_stats_autofocus_cfg; + + +/* + * Command to program White balance(wb) and exposure (exp) + * statistics module + */ + +#define VFE_CMD_STATS_WB_EXP_CFG 0x0011 +#define VFE_CMD_STATS_WB_EXP_CFG_LEN \ + sizeof(vfe_cmd_stats_wb_exp_cfg) + +#define VFE_CMD_WB_EXP_STATS_SEL_STATS_DIS 0x0000 +#define VFE_CMD_WB_EXP_STATS_SEL_STATS_ENA 0x0001 +#define VFE_CMD_WB_EXP_STATS_SEL_PRI_FIXED 0x0000 +#define VFE_CMD_WB_EXP_STATS_SEL_PRI_VAR 0x0002 + +#define VFE_CMD_WB_EXP_STATS_CFG_PART1_EXP_REG_8_8 0x0000 +#define VFE_CMD_WB_EXP_STATS_CFG_PART1_EXP_REG_16_16 0x0001 +#define VFE_CMD_WB_EXP_STATS_CFG_PART1_EXP_SREG_8_8 0x0000 +#define VFE_CMD_WB_EXP_STATS_CFG_PART1_EXP_SREG_4_4 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int wb_exp_stats_sel; + unsigned int wb_exp_stats_cfg_part1; + unsigned int wb_exp_stats_cfg_part2; + unsigned int wb_exp_stats_cfg_part3; + unsigned int wb_exp_stats_cfg_part4; + unsigned int wb_exp_stats_op_buf_hdr; + unsigned int wb_exp_stats_op_buf[3]; +} __attribute__((packed)) vfe_cmd_stats_wb_exp_cfg; + + +/* + * Command to program histogram(hg) stats module + */ + +#define VFE_CMD_STATS_HG_CFG 0x0012 +#define VFE_CMD_STATS_HG_CFG_LEN \ + sizeof(vfe_cmd_stats_hg_cfg) + +#define VFE_CMD_HG_STATS_SEL_PRI_FIXED 0x0000 +#define VFE_CMD_HG_STATS_SEL_PRI_VAR 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int hg_stats_sel; + unsigned int hg_stats_cfg_part1; + unsigned int hg_stats_cfg_part2; + unsigned int hg_stats_op_buf_hdr; + unsigned int hg_stats_op_buf; +} __attribute__((packed)) vfe_cmd_stats_hg_cfg; + + +/* + * Command to acknowledge last MSG_VFE_OP1 message + */ + +#define VFE_CMD_OP1_ACK 0x0013 +#define VFE_CMD_OP1_ACK_LEN sizeof(vfe_cmd_op1_ack) + +typedef struct { + unsigned int cmd_id; + unsigned int op1_buf_y_addr; + unsigned int op1_buf_cbcr_addr; +} __attribute__((packed)) vfe_cmd_op1_ack; + + + +/* + * Command to acknowledge last MSG_VFE_OP2 message + */ + +#define VFE_CMD_OP2_ACK 0x0014 +#define VFE_CMD_OP2_ACK_LEN sizeof(vfe_cmd_op2_ack) + +typedef struct { + unsigned int cmd_id; + unsigned int op2_buf_y_addr; + unsigned int op2_buf_cbcr_addr; +} __attribute__((packed)) vfe_cmd_op2_ack; + + + +/* + * Command to acknowledge MSG_VFE_STATS_AUTOFOCUS msg + */ + +#define VFE_CMD_STATS_AF_ACK 0x0015 +#define VFE_CMD_STATS_AF_ACK_LEN sizeof(vfe_cmd_stats_af_ack) + + +typedef struct { + unsigned int cmd_id; + unsigned int af_stats_op_buf; +} __attribute__((packed)) vfe_cmd_stats_af_ack; + + +/* + * Command to acknowledge MSG_VFE_STATS_WB_EXP msg + */ + +#define VFE_CMD_STATS_WB_EXP_ACK 0x0016 +#define VFE_CMD_STATS_WB_EXP_ACK_LEN sizeof(vfe_cmd_stats_wb_exp_ack) + +typedef struct { + unsigned int cmd_id; + unsigned int wb_exp_stats_op_buf; +} __attribute__((packed)) vfe_cmd_stats_wb_exp_ack; + + +/* + * Command to acknowledge MSG_VFE_EPOCH1 message + */ + +#define VFE_CMD_EPOCH1_ACK 0x0017 +#define VFE_CMD_EPOCH1_ACK_LEN sizeof(vfe_cmd_epoch1_ack) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) vfe_cmd_epoch1_ack; + + +/* + * Command to acknowledge MSG_VFE_EPOCH2 message + */ + +#define VFE_CMD_EPOCH2_ACK 0x0018 +#define VFE_CMD_EPOCH2_ACK_LEN sizeof(vfe_cmd_epoch2_ack) + +typedef struct { + unsigned short cmd_id; +} __attribute__((packed)) vfe_cmd_epoch2_ack; + + + +/* + * Command to configure, enable or disable synchronous timer1 + */ + +#define VFE_CMD_SYNC_TIMER1_CFG 0x0019 +#define VFE_CMD_SYNC_TIMER1_CFG_LEN \ + sizeof(vfe_cmd_sync_timer1_cfg) + +#define VFE_CMD_SYNC_T1_CFG_PART1_TIMER_DIS 0x0000 +#define VFE_CMD_SYNC_T1_CFG_PART1_TIMER_ENA 0x0001 +#define VFE_CMD_SYNC_T1_CFG_PART1_POL_HIGH 0x0000 +#define VFE_CMD_SYNC_T1_CFG_PART1_POL_LOW 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int sync_t1_cfg_part1; + unsigned int sync_t1_h_sync_countdown; + unsigned int sync_t1_pclk_countdown; + unsigned int sync_t1_duration; +} __attribute__((packed)) vfe_cmd_sync_timer1_cfg; + + +/* + * Command to configure, enable or disable synchronous timer1 + */ + +#define VFE_CMD_SYNC_TIMER2_CFG 0x001A +#define VFE_CMD_SYNC_TIMER2_CFG_LEN \ + sizeof(vfe_cmd_sync_timer2_cfg) + +#define VFE_CMD_SYNC_T2_CFG_PART1_TIMER_DIS 0x0000 +#define VFE_CMD_SYNC_T2_CFG_PART1_TIMER_ENA 0x0001 +#define VFE_CMD_SYNC_T2_CFG_PART1_POL_HIGH 0x0000 +#define VFE_CMD_SYNC_T2_CFG_PART1_POL_LOW 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int sync_t2_cfg_part1; + unsigned int sync_t2_h_sync_countdown; + unsigned int sync_t2_pclk_countdown; + unsigned int sync_t2_duration; +} __attribute__((packed)) vfe_cmd_sync_timer2_cfg; + + +/* + * Command to configure and start asynchronous timer1 + */ + +#define VFE_CMD_ASYNC_TIMER1_START 0x001B +#define VFE_CMD_ASYNC_TIMER1_START_LEN \ + sizeof(vfe_cmd_async_timer1_start) + +#define VFE_CMD_ASYNC_T1_POLARITY_A_HIGH 0x0000 +#define VFE_CMD_ASYNC_T1_POLARITY_A_LOW 0x0001 +#define VFE_CMD_ASYNC_T1_POLARITY_B_HIGH 0x0000 +#define VFE_CMD_ASYNC_T1_POLARITY_B_LOW 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int async_t1a_cfg; + unsigned int async_t1b_cfg; + unsigned int async_t1_polarity; +} __attribute__((packed)) vfe_cmd_async_timer1_start; + + +/* + * Command to configure and start asynchronous timer2 + */ + +#define VFE_CMD_ASYNC_TIMER2_START 0x001C +#define VFE_CMD_ASYNC_TIMER2_START_LEN \ + sizeof(vfe_cmd_async_timer2_start) + +#define VFE_CMD_ASYNC_T2_POLARITY_A_HIGH 0x0000 +#define VFE_CMD_ASYNC_T2_POLARITY_A_LOW 0x0001 +#define VFE_CMD_ASYNC_T2_POLARITY_B_HIGH 0x0000 +#define VFE_CMD_ASYNC_T2_POLARITY_B_LOW 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int async_t2a_cfg; + unsigned int async_t2b_cfg; + unsigned int async_t2_polarity; +} __attribute__((packed)) vfe_cmd_async_timer2_start; + + +/* + * Command to program partial configurations of auto focus(af) + */ + +#define VFE_CMD_STATS_AF_UPDATE 0x001D +#define VFE_CMD_STATS_AF_UPDATE_LEN \ + sizeof(vfe_cmd_stats_af_update) + +#define VFE_CMD_AF_UPDATE_PART1_WINDOW_ONE 0x00000000 +#define VFE_CMD_AF_UPDATE_PART1_WINDOW_MULTI 0x80000000 + +typedef struct { + unsigned int cmd_id; + unsigned int af_update_part1; + unsigned int af_update_part2; +} __attribute__((packed)) vfe_cmd_stats_af_update; + + +/* + * Command to program partial cfg of wb and exp + */ + +#define VFE_CMD_STATS_WB_EXP_UPDATE 0x001E +#define VFE_CMD_STATS_WB_EXP_UPDATE_LEN \ + sizeof(vfe_cmd_stats_wb_exp_update) + +#define VFE_CMD_WB_EXP_UPDATE_PART1_REGIONS_8_8 0x0000 +#define VFE_CMD_WB_EXP_UPDATE_PART1_REGIONS_16_16 0x0001 +#define VFE_CMD_WB_EXP_UPDATE_PART1_SREGIONS_8_8 0x0000 +#define VFE_CMD_WB_EXP_UPDATE_PART1_SREGIONS_4_4 0x0002 + +typedef struct { + unsigned int cmd_id; + unsigned int wb_exp_update_part1; + unsigned int wb_exp_update_part2; + unsigned int wb_exp_update_part3; + unsigned int wb_exp_update_part4; +} __attribute__((packed)) vfe_cmd_stats_wb_exp_update; + + + +/* + * Command to re program the CAMIF FRAME CONFIG settings + */ + +#define VFE_CMD_UPDATE_CAMIF_FRAME_CFG 0x001F +#define VFE_CMD_UPDATE_CAMIF_FRAME_CFG_LEN \ + sizeof(vfe_cmd_update_camif_frame_cfg) + +typedef struct { + unsigned int cmd_id; + unsigned int camif_frame_cfg; +} __attribute__((packed)) vfe_cmd_update_camif_frame_cfg; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfemsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfemsg.h new file mode 100644 index 0000000000000..0053cfb65ba18 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5vfemsg.h @@ -0,0 +1,290 @@ +#ifndef QDSP5VFEMSGI_H +#define QDSP5VFEMSGI_H + +/*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====* + + V F E I N T E R N A L M E S S A G E S + +GENERAL DESCRIPTION + This file contains defintions of format blocks of commands + that are sent by VFE Task + +REFERENCES + None + +EXTERNALIZED FUNCTIONS + None + +Copyright(c) 1992 - 2008 by QUALCOMM, Incorporated. + +This software is licensed under the terms of the GNU General Public +License version 2, as published by the Free Software Foundation, and +may be copied, distributed, and modified under those terms. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + +This section contains comments describing changes made to this file. +Notice that changes are listed in reverse chronological order. + +$Header: //source/qcom/qct/multimedia2/AdspSvc/7XXX/qdsp5cmd/video/qdsp5vfemsg.h#2 $ $DateTime: 2008/07/30 10:50:23 $ $Author: pavanr $ +Revision History: + +when who what, where, why +-------- --- ---------------------------------------------------------- +06/12/08 sv initial version +===========================================================================*/ + + +/* + * Message to acknowledge CMD_VFE_REST command + */ + +#define VFE_MSG_RESET_ACK 0x0000 +#define VFE_MSG_RESET_ACK_LEN sizeof(vfe_msg_reset_ack) + +typedef struct { +} __attribute__((packed)) vfe_msg_reset_ack; + + +/* + * Message to acknowledge CMD_VFE_START command + */ + +#define VFE_MSG_START_ACK 0x0001 +#define VFE_MSG_START_ACK_LEN sizeof(vfe_msg_start_ack) + +typedef struct { +} __attribute__((packed)) vfe_msg_start_ack; + +/* + * Message to acknowledge CMD_VFE_STOP command + */ + +#define VFE_MSG_STOP_ACK 0x0002 +#define VFE_MSG_STOP_ACK_LEN sizeof(vfe_msg_stop_ack) + +typedef struct { +} __attribute__((packed)) vfe_msg_stop_ack; + + +/* + * Message to acknowledge CMD_VFE_UPDATE command + */ + +#define VFE_MSG_UPDATE_ACK 0x0003 +#define VFE_MSG_UPDATE_ACK_LEN sizeof(vfe_msg_update_ack) + +typedef struct { +} __attribute__((packed)) vfe_msg_update_ack; + + +/* + * Message to notify the ARM that snapshot processing is complete + * and that the VFE is now STATE_VFE_IDLE + */ + +#define VFE_MSG_SNAPSHOT_DONE 0x0004 +#define VFE_MSG_SNAPSHOT_DONE_LEN \ + sizeof(vfe_msg_snapshot_done) + +typedef struct { +} __attribute__((packed)) vfe_msg_snapshot_done; + + + +/* + * Message to notify ARM that illegal cmd was received and + * system is in the IDLE state + */ + +#define VFE_MSG_ILLEGAL_CMD 0x0005 +#define VFE_MSG_ILLEGAL_CMD_LEN \ + sizeof(vfe_msg_illegal_cmd) + +typedef struct { + unsigned int status; +} __attribute__((packed)) vfe_msg_illegal_cmd; + + +/* + * Message to notify ARM that op1 buf is full and ready + */ + +#define VFE_MSG_OP1 0x0006 +#define VFE_MSG_OP1_LEN sizeof(vfe_msg_op1) + +typedef struct { + unsigned int op1_buf_y_addr; + unsigned int op1_buf_cbcr_addr; + unsigned int black_level_even_col; + unsigned int black_level_odd_col; + unsigned int defect_pixels_detected; + unsigned int asf_max_edge; +} __attribute__((packed)) vfe_msg_op1; + + +/* + * Message to notify ARM that op2 buf is full and ready + */ + +#define VFE_MSG_OP2 0x0007 +#define VFE_MSG_OP2_LEN sizeof(vfe_msg_op2) + +typedef struct { + unsigned int op2_buf_y_addr; + unsigned int op2_buf_cbcr_addr; + unsigned int black_level_even_col; + unsigned int black_level_odd_col; + unsigned int defect_pixels_detected; + unsigned int asf_max_edge; +} __attribute__((packed)) vfe_msg_op2; + + +/* + * Message to notify ARM that autofocus(af) stats are ready + */ + +#define VFE_MSG_STATS_AF 0x0008 +#define VFE_MSG_STATS_AF_LEN sizeof(vfe_msg_stats_af) + +typedef struct { + unsigned int af_stats_op_buffer; +} __attribute__((packed)) vfe_msg_stats_af; + + +/* + * Message to notify ARM that white balance(wb) and exposure (exp) + * stats are ready + */ + +#define VFE_MSG_STATS_WB_EXP 0x0009 +#define VFE_MSG_STATS_WB_EXP_LEN \ + sizeof(vfe_msg_stats_wb_exp) + +typedef struct { + unsigned int wb_exp_stats_op_buf; +} __attribute__((packed)) vfe_msg_stats_wb_exp; + + +/* + * Message to notify the ARM that histogram(hg) stats are ready + */ + +#define VFE_MSG_STATS_HG 0x000A +#define VFE_MSG_STATS_HG_LEN sizeof(vfe_msg_stats_hg) + +typedef struct { + unsigned int hg_stats_op_buf; +} __attribute__((packed)) vfe_msg_stats_hg; + + +/* + * Message to notify the ARM that epoch1 event occurred in the CAMIF + */ + +#define VFE_MSG_EPOCH1 0x000B +#define VFE_MSG_EPOCH1_LEN sizeof(vfe_msg_epoch1) + +typedef struct { +} __attribute__((packed)) vfe_msg_epoch1; + + +/* + * Message to notify the ARM that epoch2 event occurred in the CAMIF + */ + +#define VFE_MSG_EPOCH2 0x000C +#define VFE_MSG_EPOCH2_LEN sizeof(vfe_msg_epoch2) + +typedef struct { +} __attribute__((packed)) vfe_msg_epoch2; + + +/* + * Message to notify the ARM that sync timer1 op is completed + */ + +#define VFE_MSG_SYNC_T1_DONE 0x000D +#define VFE_MSG_SYNC_T1_DONE_LEN sizeof(vfe_msg_sync_t1_done) + +typedef struct { +} __attribute__((packed)) vfe_msg_sync_t1_done; + + +/* + * Message to notify the ARM that sync timer2 op is completed + */ + +#define VFE_MSG_SYNC_T2_DONE 0x000E +#define VFE_MSG_SYNC_T2_DONE_LEN sizeof(vfe_msg_sync_t2_done) + +typedef struct { +} __attribute__((packed)) vfe_msg_sync_t2_done; + + +/* + * Message to notify the ARM that async t1 operation completed + */ + +#define VFE_MSG_ASYNC_T1_DONE 0x000F +#define VFE_MSG_ASYNC_T1_DONE_LEN sizeof(vfe_msg_async_t1_done) + +typedef struct { +} __attribute__((packed)) vfe_msg_async_t1_done; + + + +/* + * Message to notify the ARM that async t2 operation completed + */ + +#define VFE_MSG_ASYNC_T2_DONE 0x0010 +#define VFE_MSG_ASYNC_T2_DONE_LEN sizeof(vfe_msg_async_t2_done) + +typedef struct { +} __attribute__((packed)) vfe_msg_async_t2_done; + + + +/* + * Message to notify the ARM that an error has occurred + */ + +#define VFE_MSG_ERROR 0x0011 +#define VFE_MSG_ERROR_LEN sizeof(vfe_msg_error) + +#define VFE_MSG_ERR_COND_NO_CAMIF_ERR 0x0000 +#define VFE_MSG_ERR_COND_CAMIF_ERR 0x0001 +#define VFE_MSG_ERR_COND_OP1_Y_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_OP1_Y_BUS_OF 0x0002 +#define VFE_MSG_ERR_COND_OP1_CBCR_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_OP1_CBCR_BUS_OF 0x0004 +#define VFE_MSG_ERR_COND_OP2_Y_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_OP2_Y_BUS_OF 0x0008 +#define VFE_MSG_ERR_COND_OP2_CBCR_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_OP2_CBCR_BUS_OF 0x0010 +#define VFE_MSG_ERR_COND_AF_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_AF_BUS_OF 0x0020 +#define VFE_MSG_ERR_COND_WB_EXP_NO_BUS_OF 0x0000 +#define VFE_MSG_ERR_COND_WB_EXP_BUS_OF 0x0040 +#define VFE_MSG_ERR_COND_NO_AXI_ERR 0x0000 +#define VFE_MSG_ERR_COND_AXI_ERR 0x0080 + +#define VFE_MSG_CAMIF_STS_IDLE 0x0000 +#define VFE_MSG_CAMIF_STS_CAPTURE_DATA 0x0001 + +typedef struct { + unsigned int err_cond; + unsigned int camif_sts; +} __attribute__((packed)) vfe_msg_error; + + +#endif diff --git a/arch/arm/mach-msm/include/mach/remote_spinlock.h b/arch/arm/mach-msm/include/mach/remote_spinlock.h new file mode 100644 index 0000000000000..6b25ec20dbaaf --- /dev/null +++ b/arch/arm/mach-msm/include/mach/remote_spinlock.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ASM__ARCH_MSM_REMOTE_SPINLOCK_H +#define __ASM__ARCH_MSM_REMOTE_SPINLOCK_H + +#include +#include + +/* Remote spinlock type definitions. */ +struct raw_remote_spinlock; +typedef struct { + spinlock_t local; +#if defined(CONFIG_MSM_REMOTE_SPINLOCK) + struct raw_remote_spinlock *remote; +#endif +} remote_spinlock_t; + +#if defined(CONFIG_MSM_REMOTE_SPINLOCK) +int _remote_spin_lock_init(remote_spinlock_t *lock, const char *name); +void _remote_spin_lock(remote_spinlock_t *lock); +void _remote_spin_unlock(remote_spinlock_t *lock); +#else +static inline int _remote_spin_lock_init(remote_spinlock_t *lock, + const char *name) { return 0; } +static inline void _remote_spin_lock(remote_spinlock_t *lock) { } +static inline void _remote_spin_unlock(remote_spinlock_t *lock) { } +#endif + +/* Note: only the below functions constitute the supported interface */ +static inline int remote_spin_lock_init(remote_spinlock_t *lock, + const char *name) +{ + spin_lock_init(&lock->local); + return _remote_spin_lock_init(lock, name); +} + +#define remote_spin_lock(lock) \ + do { \ + typecheck(remote_spinlock_t *, lock); \ + spin_lock(&((lock)->local)); \ + _remote_spin_lock(lock); \ + } while (0) + +#define remote_spin_unlock(lock) \ + do { \ + typecheck(remote_spinlock_t *, lock); \ + _remote_spin_unlock(lock); \ + spin_unlock(&((lock)->local)); \ + } while (0) + + +#define remote_spin_lock_irqsave(lock,flags) \ + do { \ + typecheck(remote_spinlock_t *, lock); \ + spin_lock_irqsave(&((lock)->local), (flags)); \ + _remote_spin_lock(lock); \ + } while (0) + +#define remote_spin_unlock_irqrestore(lock,flags) \ + do { \ + typecheck(remote_spinlock_t *, lock); \ + _remote_spin_unlock(lock); \ + spin_unlock_irqrestore(&((lock)->local), (flags)); \ + } while (0) + +#endif /* __ASM__ARCH_MSM_REMOTE_SPINLOCK_H */ diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h new file mode 100644 index 0000000000000..3374fb7c846fa --- /dev/null +++ b/arch/arm/mach-msm/include/mach/scm.h @@ -0,0 +1,77 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __MACH_SCM_H +#define __MACH_SCM_H + +#define SCM_SVC_BOOT 0x1 +#define SCM_SVC_PIL 0x2 +#define SCM_SVC_UTIL 0x3 +#define SCM_SVC_TZ 0x4 +#define SCM_SVC_IO 0x5 +#define SCM_SVC_INFO 0x6 +#define SCM_SVC_SSD 0x7 +#define SCM_SVC_FUSE 0x8 +#define SCM_SVC_PWR 0x9 +#define SCM_SVC_CP 0xC +#define SCM_SVC_DCVS 0xD +#define SCM_SVC_TZSCHEDULER 0xFC + +#ifdef CONFIG_MSM_SCM +extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, + void *resp_buf, size_t resp_len); + +extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1); +extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2); +extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3, + u32 arg4, u32 *ret1, u32 *ret2); + +#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) + +extern u32 scm_get_version(void); +extern int scm_is_call_available(u32 svc_id, u32 cmd_id); + +#else + +static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len) +{ + return 0; +} + +static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) +{ + return 0; +} + +static inline s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) +{ + return 0; +} + +static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, + u32 arg3, u32 arg4, u32 *ret1, u32 *ret2) +{ + return 0; +} + +static inline u32 scm_get_version(void) +{ + return 0; +} + +static inline int scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + return 0; +} + +#endif +#endif diff --git a/arch/arm/mach-msm/include/mach/sirc.h b/arch/arm/mach-msm/include/mach/sirc.h index 7281337ee28db..c23d429422d78 100644 --- a/arch/arm/mach-msm/include/mach/sirc.h +++ b/arch/arm/mach-msm/include/mach/sirc.h @@ -42,6 +42,7 @@ struct sirc_regs_t { struct sirc_cascade_regs { void *int_status; unsigned int cascade_irq; + unsigned int cascade_fiq; }; void msm_init_sirc(void); @@ -56,8 +57,6 @@ void msm_sirc_exit_sleep(void); * Secondary interrupt controller interrupts */ -#define FIRST_SIRC_IRQ (NR_MSM_IRQS + NR_GPIO_IRQS) - #define INT_UART1 (FIRST_SIRC_IRQ + 0) #define INT_UART2 (FIRST_SIRC_IRQ + 1) #define INT_UART3 (FIRST_SIRC_IRQ + 2) diff --git a/arch/arm/mach-msm/include/mach/smem_log.h b/arch/arm/mach-msm/include/mach/smem_log.h new file mode 100644 index 0000000000000..65a42a27f76f4 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/smem_log.h @@ -0,0 +1,232 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include + +#define SMEM_LOG_BASE 0x30 + +#define SMIOC_SETMODE _IOW(SMEM_LOG_BASE, 1, int) +#define SMIOC_SETLOG _IOW(SMEM_LOG_BASE, 2, int) + +#define SMIOC_TEXT 0x00000001 +#define SMIOC_BINARY 0x00000002 +#define SMIOC_LOG 0x00000003 +#define SMIOC_STATIC_LOG 0x00000004 + +/* Event indentifier format: + * bit 31-28 is processor ID 8 => apps, 4 => Q6, 0 => modem + * bits 27-16 are subsystem id (event base) + * bits 15-0 are event id + */ + +#define PROC 0xF0000000 +#define SUB 0x0FFF0000 +#define ID 0x0000FFFF + +#define SMEM_LOG_PROC_ID_MODEM 0x00000000 +#define SMEM_LOG_PROC_ID_Q6 0x40000000 +#define SMEM_LOG_PROC_ID_APPS 0x80000000 + +#define SMEM_LOG_CONT 0x10000000 + +#define SMEM_LOG_DEBUG_EVENT_BASE 0x00000000 +#define SMEM_LOG_ONCRPC_EVENT_BASE 0x00010000 +#define SMEM_LOG_SMEM_EVENT_BASE 0x00020000 +#define SMEM_LOG_TMC_EVENT_BASE 0x00030000 +#define SMEM_LOG_TIMETICK_EVENT_BASE 0x00040000 +#define SMEM_LOG_DEM_EVENT_BASE 0x00050000 +#define SMEM_LOG_ERROR_EVENT_BASE 0x00060000 +#define SMEM_LOG_DCVS_EVENT_BASE 0x00070000 +#define SMEM_LOG_SLEEP_EVENT_BASE 0x00080000 +#define SMEM_LOG_RPC_ROUTER_EVENT_BASE 0x00090000 +#if defined(CONFIG_MSM_N_WAY_SMSM) +#define DEM_SMSM_ISR (SMEM_LOG_DEM_EVENT_BASE + 0x1) +#define DEM_STATE_CHANGE (SMEM_LOG_DEM_EVENT_BASE + 0x2) +#define DEM_STATE_MACHINE_ENTER (SMEM_LOG_DEM_EVENT_BASE + 0x3) +#define DEM_ENTER_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x4) +#define DEM_END_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x5) +#define DEM_SETUP_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x6) +#define DEM_SETUP_POWER_COLLAPSE (SMEM_LOG_DEM_EVENT_BASE + 0x7) +#define DEM_SETUP_SUSPEND (SMEM_LOG_DEM_EVENT_BASE + 0x8) +#define DEM_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 0x9) +#define DEM_WAKEUP_REASON (SMEM_LOG_DEM_EVENT_BASE + 0xA) +#define DEM_DETECT_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0xB) +#define DEM_DETECT_RESET (SMEM_LOG_DEM_EVENT_BASE + 0xC) +#define DEM_DETECT_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 0xD) +#define DEM_DETECT_RUN (SMEM_LOG_DEM_EVENT_BASE + 0xE) +#define DEM_APPS_SWFI (SMEM_LOG_DEM_EVENT_BASE + 0xF) +#define DEM_SEND_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0x10) +#define DEM_ASSERT_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x11) +#define DEM_NEGATE_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x12) +#define DEM_PROC_COMM_CMD (SMEM_LOG_DEM_EVENT_BASE + 0x13) +#define DEM_REMOVE_PROC_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x14) +#define DEM_RESTORE_PROC_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x15) +#define DEM_SMI_CLK_DISABLED (SMEM_LOG_DEM_EVENT_BASE + 0x16) +#define DEM_SMI_CLK_ENABLED (SMEM_LOG_DEM_EVENT_BASE + 0x17) +#define DEM_MAO_INTS (SMEM_LOG_DEM_EVENT_BASE + 0x18) +#define DEM_APPS_WAKEUP_INT (SMEM_LOG_DEM_EVENT_BASE + 0x19) +#define DEM_PROC_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0x1A) +#define DEM_PROC_POWERUP (SMEM_LOG_DEM_EVENT_BASE + 0x1B) +#define DEM_TIMER_EXPIRED (SMEM_LOG_DEM_EVENT_BASE + 0x1C) +#define DEM_SEND_BATTERY_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x1D) +#define DEM_REMOTE_PWR_CB (SMEM_LOG_DEM_EVENT_BASE + 0x24) +#define DEM_TIME_SYNC_START (SMEM_LOG_DEM_EVENT_BASE + 0x1E) +#define DEM_TIME_SYNC_SEND_VALUE (SMEM_LOG_DEM_EVENT_BASE + 0x1F) +#define DEM_TIME_SYNC_DONE (SMEM_LOG_DEM_EVENT_BASE + 0x20) +#define DEM_TIME_SYNC_REQUEST (SMEM_LOG_DEM_EVENT_BASE + 0x21) +#define DEM_TIME_SYNC_POLL (SMEM_LOG_DEM_EVENT_BASE + 0x22) +#define DEM_TIME_SYNC_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x23) +#define DEM_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x25) +#else +#define DEM_NO_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 1) +#define DEM_INSUF_TIME (SMEM_LOG_DEM_EVENT_BASE + 2) +#define DEMAPPS_ENTER_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 3) +#define DEMAPPS_DETECT_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 4) +#define DEMAPPS_END_APPS_TCXO (SMEM_LOG_DEM_EVENT_BASE + 5) +#define DEMAPPS_ENTER_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 6) +#define DEMAPPS_END_APPS_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 7) +#define DEMAPPS_SETUP_APPS_PWRCLPS (SMEM_LOG_DEM_EVENT_BASE + 8) +#define DEMAPPS_PWRCLPS_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 9) +#define DEMMOD_SEND_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0xA) +#define DEMMOD_NO_APPS_VOTE (SMEM_LOG_DEM_EVENT_BASE + 0xB) +#define DEMMOD_NO_TCXO_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0xC) +#define DEMMOD_BT_CLOCK (SMEM_LOG_DEM_EVENT_BASE + 0xD) +#define DEMMOD_UART_CLOCK (SMEM_LOG_DEM_EVENT_BASE + 0xE) +#define DEMMOD_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0xF) +#define DEM_SLEEP_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x10) +#define DEMMOD_TCXO_END (SMEM_LOG_DEM_EVENT_BASE + 0x11) +#define DEMMOD_END_SLEEP_SIG (SMEM_LOG_DEM_EVENT_BASE + 0x12) +#define DEMMOD_SETUP_APPSSLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x13) +#define DEMMOD_ENTER_TCXO (SMEM_LOG_DEM_EVENT_BASE + 0x14) +#define DEMMOD_WAKE_APPS (SMEM_LOG_DEM_EVENT_BASE + 0x15) +#define DEMMOD_POWER_COLLAPSE_APPS (SMEM_LOG_DEM_EVENT_BASE + 0x16) +#define DEMMOD_RESTORE_APPS_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x17) +#define DEMAPPS_ASSERT_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x18) +#define DEMAPPS_RESTART_START_TIMER (SMEM_LOG_DEM_EVENT_BASE + 0x19) +#define DEMAPPS_ENTER_RUN (SMEM_LOG_DEM_EVENT_BASE + 0x1A) +#define DEMMOD_MAO_INTS (SMEM_LOG_DEM_EVENT_BASE + 0x1B) +#define DEMMOD_POWERUP_APPS_CALLED (SMEM_LOG_DEM_EVENT_BASE + 0x1C) +#define DEMMOD_PC_TIMER_EXPIRED (SMEM_LOG_DEM_EVENT_BASE + 0x1D) +#define DEM_DETECT_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 0x1E) +#define DEM_DETECT_RUN (SMEM_LOG_DEM_EVENT_BASE + 0x1F) +#define DEM_SET_APPS_TIMER (SMEM_LOG_DEM_EVENT_BASE + 0x20) +#define DEM_NEGATE_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x21) +#define DEMMOD_APPS_WAKEUP_INT (SMEM_LOG_DEM_EVENT_BASE + 0x22) +#define DEMMOD_APPS_SWFI (SMEM_LOG_DEM_EVENT_BASE + 0x23) +#define DEM_SEND_BATTERY_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x24) +#define DEM_SMI_CLK_DISABLED (SMEM_LOG_DEM_EVENT_BASE + 0x25) +#define DEM_SMI_CLK_ENABLED (SMEM_LOG_DEM_EVENT_BASE + 0x26) +#define DEMAPPS_SETUP_APPS_SUSPEND (SMEM_LOG_DEM_EVENT_BASE + 0x27) +#define DEM_RPC_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 0x28) +#define DEMAPPS_WAKEUP_REASON (SMEM_LOG_DEM_EVENT_BASE + 0x29) +#define DEM_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x30) +#endif +#define DEMMOD_UMTS_BASE (SMEM_LOG_DEM_EVENT_BASE + 0x8000) +#define DEMMOD_GL1_GO_TO_SLEEP (DEMMOD_UMTS_BASE + 0x0000) +#define DEMMOD_GL1_SLEEP_START (DEMMOD_UMTS_BASE + 0x0001) +#define DEMMOD_GL1_AFTER_GSM_CLK_ON (DEMMOD_UMTS_BASE + 0x0002) +#define DEMMOD_GL1_BEFORE_RF_ON (DEMMOD_UMTS_BASE + 0x0003) +#define DEMMOD_GL1_AFTER_RF_ON (DEMMOD_UMTS_BASE + 0x0004) +#define DEMMOD_GL1_FRAME_TICK (DEMMOD_UMTS_BASE + 0x0005) +#define DEMMOD_GL1_WCDMA_START (DEMMOD_UMTS_BASE + 0x0006) +#define DEMMOD_GL1_WCDMA_ENDING (DEMMOD_UMTS_BASE + 0x0007) +#define DEMMOD_UMTS_NOT_OKTS (DEMMOD_UMTS_BASE + 0x0008) +#define DEMMOD_UMTS_START_TCXO_SHUTDOWN (DEMMOD_UMTS_BASE + 0x0009) +#define DEMMOD_UMTS_END_TCXO_SHUTDOWN (DEMMOD_UMTS_BASE + 0x000A) +#define DEMMOD_UMTS_START_ARM_HALT (DEMMOD_UMTS_BASE + 0x000B) +#define DEMMOD_UMTS_END_ARM_HALT (DEMMOD_UMTS_BASE + 0x000C) +#define DEMMOD_UMTS_NEXT_WAKEUP_SCLK (DEMMOD_UMTS_BASE + 0x000D) +#define TIME_REMOTE_LOG_EVENT_START (SMEM_LOG_TIMETICK_EVENT_BASE + 0) +#define TIME_REMOTE_LOG_EVENT_GOTO_WAIT (SMEM_LOG_TIMETICK_EVENT_BASE + 1) +#define TIME_REMOTE_LOG_EVENT_GOTO_INIT (SMEM_LOG_TIMETICK_EVENT_BASE + 2) +#define ERR_ERROR_FATAL (SMEM_LOG_ERROR_EVENT_BASE + 1) +#define ERR_ERROR_FATAL_TASK (SMEM_LOG_ERROR_EVENT_BASE + 2) +#define DCVSAPPS_LOG_IDLE (SMEM_LOG_DCVS_EVENT_BASE + 0x0) +#define DCVSAPPS_LOG_ERR (SMEM_LOG_DCVS_EVENT_BASE + 0x1) +#define DCVSAPPS_LOG_CHG (SMEM_LOG_DCVS_EVENT_BASE + 0x2) +#define DCVSAPPS_LOG_REG (SMEM_LOG_DCVS_EVENT_BASE + 0x3) +#define DCVSAPPS_LOG_DEREG (SMEM_LOG_DCVS_EVENT_BASE + 0x4) +#define SMEM_LOG_EVENT_CB (SMEM_LOG_SMEM_EVENT_BASE + 0) +#define SMEM_LOG_EVENT_START (SMEM_LOG_SMEM_EVENT_BASE + 1) +#define SMEM_LOG_EVENT_INIT (SMEM_LOG_SMEM_EVENT_BASE + 2) +#define SMEM_LOG_EVENT_RUNNING (SMEM_LOG_SMEM_EVENT_BASE + 3) +#define SMEM_LOG_EVENT_STOP (SMEM_LOG_SMEM_EVENT_BASE + 4) +#define SMEM_LOG_EVENT_RESTART (SMEM_LOG_SMEM_EVENT_BASE + 5) +#define SMEM_LOG_EVENT_SS (SMEM_LOG_SMEM_EVENT_BASE + 6) +#define SMEM_LOG_EVENT_READ (SMEM_LOG_SMEM_EVENT_BASE + 7) +#define SMEM_LOG_EVENT_WRITE (SMEM_LOG_SMEM_EVENT_BASE + 8) +#define SMEM_LOG_EVENT_SIGS1 (SMEM_LOG_SMEM_EVENT_BASE + 9) +#define SMEM_LOG_EVENT_SIGS2 (SMEM_LOG_SMEM_EVENT_BASE + 10) +#define SMEM_LOG_EVENT_WRITE_DM (SMEM_LOG_SMEM_EVENT_BASE + 11) +#define SMEM_LOG_EVENT_READ_DM (SMEM_LOG_SMEM_EVENT_BASE + 12) +#define SMEM_LOG_EVENT_SKIP_DM (SMEM_LOG_SMEM_EVENT_BASE + 13) +#define SMEM_LOG_EVENT_STOP_DM (SMEM_LOG_SMEM_EVENT_BASE + 14) +#define SMEM_LOG_EVENT_ISR (SMEM_LOG_SMEM_EVENT_BASE + 15) +#define SMEM_LOG_EVENT_TASK (SMEM_LOG_SMEM_EVENT_BASE + 16) +#define SMEM_LOG_EVENT_RS (SMEM_LOG_SMEM_EVENT_BASE + 17) +#define ONCRPC_LOG_EVENT_SMD_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 0) +#define ONCRPC_LOG_EVENT_RPC_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 1) +#define ONCRPC_LOG_EVENT_RPC_BOTH_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 2) +#define ONCRPC_LOG_EVENT_RPC_INIT (SMEM_LOG_ONCRPC_EVENT_BASE + 3) +#define ONCRPC_LOG_EVENT_RUNNING (SMEM_LOG_ONCRPC_EVENT_BASE + 4) +#define ONCRPC_LOG_EVENT_APIS_INITED (SMEM_LOG_ONCRPC_EVENT_BASE + 5) +#define ONCRPC_LOG_EVENT_AMSS_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 6) +#define ONCRPC_LOG_EVENT_SMD_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 7) +#define ONCRPC_LOG_EVENT_ONCRPC_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 8) +#define ONCRPC_LOG_EVENT_CB (SMEM_LOG_ONCRPC_EVENT_BASE + 9) +#define ONCRPC_LOG_EVENT_STD_CALL (SMEM_LOG_ONCRPC_EVENT_BASE + 10) +#define ONCRPC_LOG_EVENT_STD_REPLY (SMEM_LOG_ONCRPC_EVENT_BASE + 11) +#define ONCRPC_LOG_EVENT_STD_CALL_ASYNC (SMEM_LOG_ONCRPC_EVENT_BASE + 12) +#define NO_SLEEP_OLD (SMEM_LOG_SLEEP_EVENT_BASE + 0x1) +#define INSUF_TIME (SMEM_LOG_SLEEP_EVENT_BASE + 0x2) +#define MOD_UART_CLOCK (SMEM_LOG_SLEEP_EVENT_BASE + 0x3) +#define SLEEP_INFO (SMEM_LOG_SLEEP_EVENT_BASE + 0x4) +#define MOD_TCXO_END (SMEM_LOG_SLEEP_EVENT_BASE + 0x5) +#define MOD_ENTER_TCXO (SMEM_LOG_SLEEP_EVENT_BASE + 0x6) +#define NO_SLEEP_NEW (SMEM_LOG_SLEEP_EVENT_BASE + 0x7) +#define RPC_ROUTER_LOG_EVENT_UNKNOWN (SMEM_LOG_RPC_ROUTER_EVENT_BASE) +#define RPC_ROUTER_LOG_EVENT_MSG_READ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 1) +#define RPC_ROUTER_LOG_EVENT_MSG_WRITTEN (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 2) +#define RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 3) +#define RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 4) +#define RPC_ROUTER_LOG_EVENT_MID_READ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 5) +#define RPC_ROUTER_LOG_EVENT_MID_WRITTEN (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 6) +#define RPC_ROUTER_LOG_EVENT_MID_CFM_REQ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 7) + +void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3); +void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3, uint32_t data4, uint32_t data5, + uint32_t data6); +void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3); +void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3, uint32_t data4, uint32_t data5, + uint32_t data6); + diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h new file mode 100644 index 0000000000000..fba6efe251014 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/socinfo.h @@ -0,0 +1,257 @@ +/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_ +#define _ARCH_ARM_MACH_MSM_SOCINFO_H_ + +#include +#include +#include +#include +#include + +#include +#include +/* + * SOC version type with major number in the upper 16 bits and minor + * number in the lower 16 bits. For example: + * 1.0 -> 0x00010000 + * 2.3 -> 0x00020003 + */ +#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16) +#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff) + +#ifdef CONFIG_OF +#define early_machine_is_copper() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmcopper") +#define machine_is_copper() \ + of_machine_is_compatible("qcom,msmcopper") +#else +#define early_machine_is_copper() 0 +#define machine_is_copper() 0 +#endif + +enum msm_cpu { + MSM_CPU_UNKNOWN = 0, + MSM_CPU_7X01, + MSM_CPU_7X25, + MSM_CPU_7X27, + MSM_CPU_8X50, + MSM_CPU_8X50A, + MSM_CPU_7X30, + MSM_CPU_8X55, + MSM_CPU_8X60, + MSM_CPU_8960, + MSM_CPU_7X27A, + FSM_CPU_9XXX, + MSM_CPU_7X25A, + MSM_CPU_7X25AA, + MSM_CPU_8064, + MSM_CPU_8930, + MSM_CPU_7X27AA, + MSM_CPU_9615, + MSM_CPU_COPPER, +}; + +enum msm_cpu socinfo_get_msm_cpu(void); +uint32_t socinfo_get_id(void); +uint32_t socinfo_get_version(void); +char *socinfo_get_build_id(void); +uint32_t socinfo_get_platform_type(void); +uint32_t socinfo_get_platform_subtype(void); +uint32_t socinfo_get_platform_version(void); +int __init socinfo_init(void) __must_check; +const int read_msm_cpu_type(void); +const int get_core_count(void); +const int cpu_is_krait_v1(void); + +static inline int cpu_is_msm7x01(void) +{ +#ifdef CONFIG_ARCH_MSM7X01A + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X01; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x25(void) +{ +#ifdef CONFIG_ARCH_MSM7X25 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X25; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x27(void) +{ +#ifdef CONFIG_ARCH_MSM7X27 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X27; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x27a(void) +{ +#ifdef CONFIG_ARCH_MSM7X27A + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X27A; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x27aa(void) +{ +#ifdef CONFIG_ARCH_MSM7X27A + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X27AA; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x25a(void) +{ +#ifdef CONFIG_ARCH_MSM7X27A + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X25A; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x25aa(void) +{ +#ifdef CONFIG_ARCH_MSM7X27A + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X25AA; +#else + return 0; +#endif +} + +static inline int cpu_is_msm7x30(void) +{ +#ifdef CONFIG_ARCH_MSM7X30 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_7X30; +#else + return 0; +#endif +} + +static inline int cpu_is_qsd8x50(void) +{ +#ifdef CONFIG_ARCH_QSD8X50 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_8X50; +#else + return 0; +#endif +} + +static inline int cpu_is_msm8x55(void) +{ +#ifdef CONFIG_ARCH_MSM7X30 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_8X55; +#else + return 0; +#endif +} + +static inline int cpu_is_msm8x60(void) +{ +#ifdef CONFIG_ARCH_MSM8X60 + return read_msm_cpu_type() == MSM_CPU_8X60; +#else + return 0; +#endif +} + +static inline int cpu_is_msm8960(void) +{ +#ifdef CONFIG_ARCH_MSM8960 + return read_msm_cpu_type() == MSM_CPU_8960; +#else + return 0; +#endif +} + +static inline int cpu_is_apq8064(void) +{ +#ifdef CONFIG_ARCH_APQ8064 + return read_msm_cpu_type() == MSM_CPU_8064; +#else + return 0; +#endif +} + +static inline int cpu_is_msm8930(void) +{ +#ifdef CONFIG_ARCH_MSM8930 + return read_msm_cpu_type() == MSM_CPU_8930; +#else + return 0; +#endif +} + +static inline int cpu_is_fsm9xxx(void) +{ +#ifdef CONFIG_ARCH_FSM9XXX + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == FSM_CPU_9XXX; +#else + return 0; +#endif +} + +static inline int cpu_is_msm9615(void) +{ +#ifdef CONFIG_ARCH_MSM9615 + enum msm_cpu cpu = socinfo_get_msm_cpu(); + + BUG_ON(cpu == MSM_CPU_UNKNOWN); + return cpu == MSM_CPU_9615; +#else + return 0; +#endif +} +#endif diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h index d2e83f42ba165..23de62af3302c 100644 --- a/arch/arm/mach-msm/include/mach/system.h +++ b/arch/arm/mach-msm/include/mach/system.h @@ -26,3 +26,7 @@ static inline void arch_reset(char mode, const char *cmd) * PSHOLD line on the PMIC to hard reset the system */ extern void (*msm_hw_reset_hook)(void); + +void msm_set_i2c_mux(bool gpio, int *gpio_clk, int *gpio_dat); + +void msm_i2c_gpio_init(void); diff --git a/arch/arm/mach-msm/include/mach/tpa6130.h b/arch/arm/mach-msm/include/mach/tpa6130.h new file mode 100644 index 0000000000000..42124366081c1 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/tpa6130.h @@ -0,0 +1,21 @@ +/* + * Definitions for tpa6130a headset amp chip. + */ +#ifndef TPA6130_H +#define TPA6130_H + +#include + +#define TPA6130_I2C_NAME "tpa6130" +void set_headset_amp(int on); + +struct tpa6130_platform_data { + int gpio_hp_sd; + int enable_rpc_server; +}; + +struct rpc_headset_amp_ctl_args { + int on; +}; +#endif + diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h index d138448eff16d..8d7bc7c4bb25b 100644 --- a/arch/arm/mach-msm/include/mach/vmalloc.h +++ b/arch/arm/mach-msm/include/mach/vmalloc.h @@ -16,7 +16,8 @@ #ifndef __ASM_ARCH_MSM_VMALLOC_H #define __ASM_ARCH_MSM_VMALLOC_H -#define VMALLOC_END 0xd0000000UL +/* IO devices are mapped at 0xF8000000 and above */ +#define VMALLOC_END 0xf8000000UL #endif diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 1260007a9dd1a..80a81cab0a4e2 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -40,11 +41,13 @@ static struct map_desc msm_io_desc[] __initdata = { MSM_DEVICE(VIC), MSM_DEVICE(CSR), - MSM_DEVICE(GPT), + MSM_DEVICE(TMR), MSM_DEVICE(DMOV), MSM_DEVICE(GPIO1), MSM_DEVICE(GPIO2), MSM_DEVICE(CLK_CTL), + MSM_DEVICE(AD5), + MSM_DEVICE(MDC), #ifdef CONFIG_MSM_DEBUG_UART MSM_DEVICE(DEBUG_UART), #endif @@ -83,6 +86,7 @@ static struct map_desc qsd8x50_io_desc[] __initdata = { MSM_DEVICE(SCPLL), MSM_DEVICE(AD5), MSM_DEVICE(MDC), + MSM_DEVICE(TCSR), #ifdef CONFIG_MSM_DEBUG_UART MSM_DEVICE(DEBUG_UART), #endif @@ -96,6 +100,19 @@ static struct map_desc qsd8x50_io_desc[] __initdata = { void __init msm_map_qsd8x50_io(void) { + unsigned int unused; + + /* The bootloader may not have done it, so disable predecode repair + * cache for thumb2 (DPRC, set bit 4 in PVR0F2) due to a bug. + */ + asm volatile ("mrc p15, 0, %0, c15, c15, 2\n\t" + "orr %0, %0, #0x10\n\t" + "mcr p15, 0, %0, c15, c15, 2" + : "=&r" (unused)); + /* clear out EFSR and ADFSR on boot */ + asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t" + "mcr p15, 0, %0, c5, c1, 0" + : : "r" (0)); iotable_init(qsd8x50_io_desc, ARRAY_SIZE(qsd8x50_io_desc)); } #endif /* CONFIG_ARCH_QSD8X50 */ @@ -145,6 +162,10 @@ static struct map_desc msm7x30_io_desc[] __initdata = { void __init msm_map_msm7x30_io(void) { + /* clear out EFSR and ADFSR on boot */ + asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t" + "mcr p15, 0, %0, c5, c1, 0" + : : "r" (0)); iotable_init(msm7x30_io_desc, ARRAY_SIZE(msm7x30_io_desc)); } #endif /* CONFIG_ARCH_MSM7X30 */ @@ -152,6 +173,7 @@ void __init msm_map_msm7x30_io(void) void __iomem * __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { +#ifdef CONFIG_ARCH_MSM_ARM11 if (mtype == MT_DEVICE) { /* The peripherals in the 88000000 - D0000000 range * are only accessible by type MT_DEVICE_NONSHARED. @@ -160,7 +182,7 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000)) mtype = MT_DEVICE_NONSHARED; } - +#endif return __arm_ioremap_caller(phys_addr, size, mtype, __builtin_return_address(0)); } diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c index e2d58e4cb0d73..446102453d7ee 100644 --- a/arch/arm/mach-msm/iommu.c +++ b/arch/arm/mach-msm/iommu.c @@ -176,7 +176,7 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) SET_M(base, ctx, 1); } -static int msm_iommu_domain_init(struct iommu_domain *domain) +static int msm_iommu_domain_init(struct iommu_domain *domain, int flags) { struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -534,7 +534,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, /* Invalidate context TLB */ SET_CTX_TLBIALL(base, ctx, 0); - SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT); + SET_V2PPR(base, ctx, va & V2Pxx_VA); par = GET_PAR(base, ctx); diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c new file mode 100644 index 0000000000000..290ba332b86d6 --- /dev/null +++ b/arch/arm/mach-msm/iommu_domains.c @@ -0,0 +1,253 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct msm_iommu_domain { + int domain_idx; + int iova_pool_idx; +}; + +enum { + GLOBAL_DOMAIN, + VIDEO_DOMAIN, + EMPTY_DOMAIN, + MAX_DOMAINS +}; + +enum { + GLOBAL_MEMORY_POOL, + VIDEO_FIRMWARE_POOL, + VIDEO_ALLOC_POOL, +}; + +struct { + char *name; + int domain; +} msm_iommu_ctx_names[] = { + /* Camera */ + { + .name = "vpe_src", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "vpe_dst", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "vfe_imgwr", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "vfe_misc", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "ijpeg_src", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "ijpeg_dst", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "jpegd_src", + .domain = GLOBAL_DOMAIN, + }, + /* Camera */ + { + .name = "jpegd_dst", + .domain = GLOBAL_DOMAIN, + }, + /* Display */ + { + .name = "mdp_vg1", + .domain = GLOBAL_DOMAIN, + }, + /* Display */ + { + .name = "mdp_vg2", + .domain = GLOBAL_DOMAIN, + }, + /* Display */ + { + .name = "mdp_rgb1", + .domain = GLOBAL_DOMAIN, + }, + /* Display */ + { + .name = "mdp_rgb2", + .domain = GLOBAL_DOMAIN, + }, + /* Rotator */ + { + .name = "rot_src", + .domain = GLOBAL_DOMAIN, + }, + /* Rotator */ + { + .name = "rot_dst", + .domain = GLOBAL_DOMAIN, + }, + /* Video */ + { + .name = "vcodec_a_mm1", + .domain = VIDEO_DOMAIN, + }, + /* Video */ + { + .name = "vcodec_b_mm2", + .domain = VIDEO_DOMAIN, + }, + /* Video */ + { + .name = "vcodec_a_stream", + .domain = VIDEO_DOMAIN, + }, +}; + +static struct iommu_domain *msm_iommu_domains[MAX_DOMAINS]; + +static struct mem_pool msm_iommu_iova_pools[] = { + [GLOBAL_MEMORY_POOL] = { + .paddr = SZ_4K, + .size = SZ_2G - SZ_4K, + }, + /* + * The video hardware has several constraints: + * 1) The start address for firmware must be 128K aligned + * 2) The video firmware must exist at a lower address than + * all other video allocations + * 3) Video allocations cannot be more than 256MB away from the + * firmware + * + * Splitting the video pools makes sure that firmware will + * always be lower than regular allocations and the maximum + * size of 256MB will be enforced. + */ + [VIDEO_FIRMWARE_POOL] = { + .paddr = SZ_128K, + .size = SZ_16M - SZ_128K, + }, + [VIDEO_ALLOC_POOL] = { + .paddr = SZ_16M, + .size = SZ_256M - SZ_16M - SZ_128K, + } +}; + +static struct msm_iommu_domain msm_iommu_subsystems[] = { + [MSM_SUBSYSTEM_VIDEO] = { + .domain_idx = VIDEO_DOMAIN, + .iova_pool_idx = VIDEO_ALLOC_POOL, + }, + [MSM_SUBSYSTEM_VIDEO_FWARE] = { + .domain_idx = VIDEO_DOMAIN, + .iova_pool_idx = VIDEO_FIRMWARE_POOL, + }, + [MSM_SUBSYSTEM_CAMERA] = { + .domain_idx = GLOBAL_DOMAIN, + .iova_pool_idx = GLOBAL_MEMORY_POOL, + }, + [MSM_SUBSYSTEM_DISPLAY] = { + .domain_idx = GLOBAL_DOMAIN, + .iova_pool_idx = GLOBAL_MEMORY_POOL, + }, + [MSM_SUBSYSTEM_ROTATOR] = { + .domain_idx = GLOBAL_DOMAIN, + .iova_pool_idx = GLOBAL_MEMORY_POOL, + }, +}; + +struct iommu_domain *msm_subsystem_get_domain(int subsys_id) +{ + int id = msm_iommu_subsystems[subsys_id].domain_idx; + + return msm_iommu_domains[id]; +} + +struct mem_pool *msm_subsystem_get_pool(int subsys_id) +{ + int id = msm_iommu_subsystems[subsys_id].iova_pool_idx; + + return &msm_iommu_iova_pools[id]; +} + +static int __init msm_subsystem_iommu_init(void) +{ + int i; + + for (i = 0; i < (ARRAY_SIZE(msm_iommu_domains) - 1); i++) + msm_iommu_domains[i] = iommu_domain_alloc(0); + + for (i = 0; i < ARRAY_SIZE(msm_iommu_iova_pools); i++) { + mutex_init(&msm_iommu_iova_pools[i].pool_mutex); + msm_iommu_iova_pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1); + + if (!msm_iommu_iova_pools[i].gpool) { + pr_err("%s: could not allocate iova pool. iommu" + " programming will not work with iova space" + " %d\n", __func__, i); + continue; + } + + if (gen_pool_add(msm_iommu_iova_pools[i].gpool, + msm_iommu_iova_pools[i].paddr, + msm_iommu_iova_pools[i].size, + -1)) { + pr_err("%s: could not add memory to iova pool. iommu" + " programming will not work with iova space" + " %d\n", __func__, i); + gen_pool_destroy(msm_iommu_iova_pools[i].gpool); + msm_iommu_iova_pools[i].gpool = NULL; + continue; + } + } + + for (i = 0; i < ARRAY_SIZE(msm_iommu_ctx_names); i++) { + int domain_idx; + struct device *ctx = msm_iommu_get_ctx( + msm_iommu_ctx_names[i].name); + + if (!ctx) + continue; + + domain_idx = msm_iommu_ctx_names[i].domain; + + if (!msm_iommu_domains[domain_idx]) + continue; + + if (iommu_attach_device(msm_iommu_domains[domain_idx], ctx)) { + pr_err("%s: could not attach domain %d to context %s." + " iommu programming will not occur.\n", + __func__, domain_idx, + msm_iommu_ctx_names[i].name); + msm_iommu_subsystems[i].domain_idx = EMPTY_DOMAIN; + continue; + } + } + + return 0; +} +device_initcall(msm_subsystem_iommu_init); diff --git a/arch/arm/mach-msm/irq-vic.c b/arch/arm/mach-msm/irq-vic.c deleted file mode 100644 index 68c28bbdc9695..0000000000000 --- a/arch/arm/mach-msm/irq-vic.c +++ /dev/null @@ -1,364 +0,0 @@ -/* - * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2009, Code Aurora Forum. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include - -#include "smd_private.h" - -enum { - IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0, - IRQ_DEBUG_SLEEP_INT = 1U << 1, - IRQ_DEBUG_SLEEP_ABORT = 1U << 2, - IRQ_DEBUG_SLEEP = 1U << 3, - IRQ_DEBUG_SLEEP_REQUEST = 1U << 4, -}; -static int msm_irq_debug_mask; -module_param_named(debug_mask, msm_irq_debug_mask, int, - S_IRUGO | S_IWUSR | S_IWGRP); - -#define VIC_REG(off) (MSM_VIC_BASE + (off)) -#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4) -#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3) - -#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_EN0 VIC_REG(0x0010) -#define VIC_INT_EN1 VIC_REG(0x0014) -#define VIC_INT_EN2 VIC_REG(0x0018) -#define VIC_INT_EN3 VIC_REG(0x001C) -#define VIC_INT_ENCLEAR0 VIC_REG(0x0020) -#define VIC_INT_ENCLEAR1 VIC_REG(0x0024) -#define VIC_INT_ENCLEAR2 VIC_REG(0x0028) -#define VIC_INT_ENCLEAR3 VIC_REG(0x002C) -#define VIC_INT_ENSET0 VIC_REG(0x0030) -#define VIC_INT_ENSET1 VIC_REG(0x0034) -#define VIC_INT_ENSET2 VIC_REG(0x0038) -#define VIC_INT_ENSET3 VIC_REG(0x003C) -#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */ -#define VIC_NO_PEND_VAL VIC_REG(0x0060) - -#if defined(CONFIG_ARCH_MSM_SCORPION) -#define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064) -#define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */ -#define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */ -#else -#define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */ -#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */ -#define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */ -#endif - -#define VIC_IRQ_STATUS0 VIC_REG(0x0080) -#define VIC_IRQ_STATUS1 VIC_REG(0x0084) -#define VIC_IRQ_STATUS2 VIC_REG(0x0088) -#define VIC_IRQ_STATUS3 VIC_REG(0x008C) -#define VIC_FIQ_STATUS0 VIC_REG(0x0090) -#define VIC_FIQ_STATUS1 VIC_REG(0x0094) -#define VIC_FIQ_STATUS2 VIC_REG(0x0098) -#define VIC_FIQ_STATUS3 VIC_REG(0x009C) -#define VIC_RAW_STATUS0 VIC_REG(0x00A0) -#define VIC_RAW_STATUS1 VIC_REG(0x00A4) -#define VIC_RAW_STATUS2 VIC_REG(0x00A8) -#define VIC_RAW_STATUS3 VIC_REG(0x00AC) -#define VIC_INT_CLEAR0 VIC_REG(0x00B0) -#define VIC_INT_CLEAR1 VIC_REG(0x00B4) -#define VIC_INT_CLEAR2 VIC_REG(0x00B8) -#define VIC_INT_CLEAR3 VIC_REG(0x00BC) -#define VIC_SOFTINT0 VIC_REG(0x00C0) -#define VIC_SOFTINT1 VIC_REG(0x00C4) -#define VIC_SOFTINT2 VIC_REG(0x00C8) -#define VIC_SOFTINT3 VIC_REG(0x00CC) -#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */ -#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */ -#define VIC_IRQ_VEC_WR VIC_REG(0x00D8) - -#if defined(CONFIG_ARCH_MSM_SCORPION) -#define VIC_FIQ_VEC_RD VIC_REG(0x00DC) -#define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0) -#define VIC_FIQ_VEC_WR VIC_REG(0x00E4) -#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8) -#define VIC_IRQ_IN_STACK VIC_REG(0x00EC) -#define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0) -#define VIC_FIQ_IN_STACK VIC_REG(0x00F4) -#define VIC_TEST_BUS_SEL VIC_REG(0x00F8) -#define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC) -#else -#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0) -#define VIC_IRQ_IN_STACK VIC_REG(0x00E4) -#define VIC_TEST_BUS_SEL VIC_REG(0x00E8) -#endif - -#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4)) -#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4)) - -#if defined(CONFIG_ARCH_MSM7X30) -#define VIC_NUM_REGS 4 -#else -#define VIC_NUM_REGS 2 -#endif - -#if VIC_NUM_REGS == 2 -#define DPRINT_REGS(base_reg, format, ...) \ - printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ - readl(base_reg ## 0), readl(base_reg ## 1)) -#define DPRINT_ARRAY(array, format, ...) \ - printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ - array[0], array[1]) -#elif VIC_NUM_REGS == 4 -#define DPRINT_REGS(base_reg, format, ...) \ - printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ - readl(base_reg ## 0), readl(base_reg ## 1), \ - readl(base_reg ## 2), readl(base_reg ## 3)) -#define DPRINT_ARRAY(array, format, ...) \ - printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ - array[0], array[1], \ - array[2], array[3]) -#else -#error "VIC_NUM_REGS set to illegal value" -#endif - -static uint32_t msm_irq_smsm_wake_enable[2]; -static struct { - uint32_t int_en[2]; - uint32_t int_type; - uint32_t int_polarity; - uint32_t int_select; -} msm_irq_shadow_reg[VIC_NUM_REGS]; -static uint32_t msm_irq_idle_disable[VIC_NUM_REGS]; - -#define SMSM_FAKE_IRQ (0xff) -static uint8_t msm_irq_to_smsm[NR_IRQS] = { - [INT_MDDI_EXT] = 1, - [INT_MDDI_PRI] = 2, - [INT_MDDI_CLIENT] = 3, - [INT_USB_OTG] = 4, - - [INT_PWB_I2C] = 5, - [INT_SDC1_0] = 6, - [INT_SDC1_1] = 7, - [INT_SDC2_0] = 8, - - [INT_SDC2_1] = 9, - [INT_ADSP_A9_A11] = 10, - [INT_UART1] = 11, - [INT_UART2] = 12, - - [INT_UART3] = 13, - [INT_UART1_RX] = 14, - [INT_UART2_RX] = 15, - [INT_UART3_RX] = 16, - - [INT_UART1DM_IRQ] = 17, - [INT_UART1DM_RX] = 18, - [INT_KEYSENSE] = 19, -#if !defined(CONFIG_ARCH_MSM7X30) - [INT_AD_HSSD] = 20, -#endif - - [INT_NAND_WR_ER_DONE] = 21, - [INT_NAND_OP_DONE] = 22, - [INT_TCHSCRN1] = 23, - [INT_TCHSCRN2] = 24, - - [INT_TCHSCRN_SSBI] = 25, - [INT_USB_HS] = 26, - [INT_UART2DM_RX] = 27, - [INT_UART2DM_IRQ] = 28, - - [INT_SDC4_1] = 29, - [INT_SDC4_0] = 30, - [INT_SDC3_1] = 31, - [INT_SDC3_0] = 32, - - /* fake wakeup interrupts */ - [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ, - [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ, - [INT_A9_M2A_0] = SMSM_FAKE_IRQ, - [INT_A9_M2A_1] = SMSM_FAKE_IRQ, - [INT_A9_M2A_5] = SMSM_FAKE_IRQ, - [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ, - [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ, - [INT_ADSP_A11] = SMSM_FAKE_IRQ, -#ifdef CONFIG_ARCH_QSD8X50 - [INT_SIRC_0] = SMSM_FAKE_IRQ, - [INT_SIRC_1] = SMSM_FAKE_IRQ, -#endif -}; - -static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val) -{ - int i; - - for (i = 0; i < VIC_NUM_REGS; i++) - writel(val, base + (i * 4)); -} - -static void msm_irq_ack(struct irq_data *d) -{ - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, d->irq); - writel(1 << (d->irq & 31), reg); -} - -static void msm_irq_mask(struct irq_data *d) -{ - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, d->irq); - unsigned index = VIC_INT_TO_REG_INDEX(d->irq); - uint32_t mask = 1UL << (d->irq & 31); - int smsm_irq = msm_irq_to_smsm[d->irq]; - - msm_irq_shadow_reg[index].int_en[0] &= ~mask; - writel(mask, reg); - if (smsm_irq == 0) - msm_irq_idle_disable[index] &= ~mask; - else { - mask = 1UL << (smsm_irq - 1); - msm_irq_smsm_wake_enable[0] &= ~mask; - } -} - -static void msm_irq_unmask(struct irq_data *d) -{ - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, d->irq); - unsigned index = VIC_INT_TO_REG_INDEX(d->irq); - uint32_t mask = 1UL << (d->irq & 31); - int smsm_irq = msm_irq_to_smsm[d->irq]; - - msm_irq_shadow_reg[index].int_en[0] |= mask; - writel(mask, reg); - - if (smsm_irq == 0) - msm_irq_idle_disable[index] |= mask; - else { - mask = 1UL << (smsm_irq - 1); - msm_irq_smsm_wake_enable[0] |= mask; - } -} - -static int msm_irq_set_wake(struct irq_data *d, unsigned int on) -{ - unsigned index = VIC_INT_TO_REG_INDEX(d->irq); - uint32_t mask = 1UL << (d->irq & 31); - int smsm_irq = msm_irq_to_smsm[d->irq]; - - if (smsm_irq == 0) { - printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", d->irq); - return -EINVAL; - } - if (on) - msm_irq_shadow_reg[index].int_en[1] |= mask; - else - msm_irq_shadow_reg[index].int_en[1] &= ~mask; - - if (smsm_irq == SMSM_FAKE_IRQ) - return 0; - - mask = 1UL << (smsm_irq - 1); - if (on) - msm_irq_smsm_wake_enable[1] |= mask; - else - msm_irq_smsm_wake_enable[1] &= ~mask; - return 0; -} - -static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type) -{ - void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, d->irq); - void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, d->irq); - unsigned index = VIC_INT_TO_REG_INDEX(d->irq); - int b = 1 << (d->irq & 31); - uint32_t polarity; - uint32_t type; - - polarity = msm_irq_shadow_reg[index].int_polarity; - if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) - polarity |= b; - if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) - polarity &= ~b; - writel(polarity, preg); - msm_irq_shadow_reg[index].int_polarity = polarity; - - type = msm_irq_shadow_reg[index].int_type; - if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { - type |= b; - irq_desc[d->irq].handle_irq = handle_edge_irq; - } - if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { - type &= ~b; - irq_desc[d->irq].handle_irq = handle_level_irq; - } - writel(type, treg); - msm_irq_shadow_reg[index].int_type = type; - return 0; -} - -static struct irq_chip msm_irq_chip = { - .name = "msm", - .irq_disable = msm_irq_mask, - .irq_ack = msm_irq_ack, - .irq_mask = msm_irq_mask, - .irq_unmask = msm_irq_unmask, - .irq_set_wake = msm_irq_set_wake, - .irq_set_type = msm_irq_set_type, -}; - -void __init msm_init_irq(void) -{ - unsigned n; - - /* select level interrupts */ - msm_irq_write_all_regs(VIC_INT_TYPE0, 0); - - /* select highlevel interrupts */ - msm_irq_write_all_regs(VIC_INT_POLARITY0, 0); - - /* select IRQ for all INTs */ - msm_irq_write_all_regs(VIC_INT_SELECT0, 0); - - /* disable all INTs */ - msm_irq_write_all_regs(VIC_INT_EN0, 0); - - /* don't use vic */ - writel(0, VIC_CONFIG); - - /* enable interrupt controller */ - writel(3, VIC_INT_MASTEREN); - - for (n = 0; n < NR_MSM_IRQS; n++) { - set_irq_chip(n, &msm_irq_chip); - set_irq_handler(n, handle_level_irq); - set_irq_flags(n, IRQF_VALID); - } -} diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c index 0b27d899f40e7..8b8bd9b5cb727 100644 --- a/arch/arm/mach-msm/irq.c +++ b/arch/arm/mach-msm/irq.c @@ -16,101 +16,475 @@ #include #include #include +#include #include #include #include #include #include +#include +#include + #include #include +#include + +#include "sirc.h" +#include "smd_private.h" + +enum { + IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0, + IRQ_DEBUG_SLEEP_INT = 1U << 1, + IRQ_DEBUG_SLEEP_ABORT = 1U << 2, + IRQ_DEBUG_SLEEP = 1U << 3, + IRQ_DEBUG_SLEEP_REQUEST = 1U << 4, +}; +static int msm_irq_debug_mask; +module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define VIC_REG(off) (MSM_VIC_BASE + (off)) +#define __bank(irq) (((irq) / 32) & 0x3) -#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_EN0 VIC_REG(0x0010) -#define VIC_INT_EN1 VIC_REG(0x0014) -#define VIC_INT_ENCLEAR0 VIC_REG(0x0020) -#define VIC_INT_ENCLEAR1 VIC_REG(0x0024) -#define VIC_INT_ENSET0 VIC_REG(0x0030) -#define VIC_INT_ENSET1 VIC_REG(0x0034) -#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */ +#define VIC_INT_SELECT(n) VIC_REG(0x0000+((n) * 4)) /* 1: FIQ, 0: IRQ */ +#define VIC_INT_EN(n) VIC_REG(0x0010+((n) * 4)) +#define VIC_INT_ENCLEAR(n) VIC_REG(0x0020+((n) * 4)) +#define VIC_INT_ENSET(n) VIC_REG(0x0030+((n) * 4)) +#define VIC_INT_TYPE(n) VIC_REG(0x0040+((n) * 4)) /* 1: EDGE, 0: LEVEL */ +#define VIC_INT_POLARITY(n) VIC_REG(0x0050+((n) * 4)) /* 1: NEG, 0: POS */ #define VIC_NO_PEND_VAL VIC_REG(0x0060) + +#if defined(CONFIG_ARCH_MSM_SCORPION) +#define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064) +#define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */ +#define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */ +#else #define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */ -#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */ #define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */ -#define VIC_IRQ_STATUS0 VIC_REG(0x0080) -#define VIC_IRQ_STATUS1 VIC_REG(0x0084) -#define VIC_FIQ_STATUS0 VIC_REG(0x0090) -#define VIC_FIQ_STATUS1 VIC_REG(0x0094) -#define VIC_RAW_STATUS0 VIC_REG(0x00A0) -#define VIC_RAW_STATUS1 VIC_REG(0x00A4) -#define VIC_INT_CLEAR0 VIC_REG(0x00B0) -#define VIC_INT_CLEAR1 VIC_REG(0x00B4) -#define VIC_SOFTINT0 VIC_REG(0x00C0) -#define VIC_SOFTINT1 VIC_REG(0x00C4) +#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */ +#endif +#define VIC_IRQ_STATUS(n) VIC_REG(0x0080+((n) * 4)) +#define VIC_FIQ_STATUS(n) VIC_REG(0x0090+((n) * 4)) +#define VIC_RAW_STATUS(n) VIC_REG(0x00A0+((n) * 4)) +#define VIC_INT_CLEAR(n) VIC_REG(0x00B0+((n) * 4)) +#define VIC_SOFTINT(n) VIC_REG(0x00C0+((n) * 4)) #define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */ #define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */ #define VIC_IRQ_VEC_WR VIC_REG(0x00D8) + +#if defined(CONFIG_ARCH_MSM_SCORPION) +#define VIC_FIQ_VEC_RD VIC_REG(0x00DC) +#define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0) +#define VIC_FIQ_VEC_WR VIC_REG(0x00E4) +#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8) +#define VIC_IRQ_IN_STACK VIC_REG(0x00EC) +#define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0) +#define VIC_FIQ_IN_STACK VIC_REG(0x00F4) +#define VIC_TEST_BUS_SEL VIC_REG(0x00F8) +#define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC) +#else #define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0) #define VIC_IRQ_IN_STACK VIC_REG(0x00E4) #define VIC_TEST_BUS_SEL VIC_REG(0x00E8) +#endif #define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4)) #define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4)) +#if defined(CONFIG_ARCH_MSM7X30) +#define VIC_NUM_BANKS 4 +#else +#define VIC_NUM_BANKS 2 +#endif + +static uint32_t msm_irq_smsm_wake_enable[2]; +static struct { + uint32_t int_en[2]; + uint32_t int_type; + uint32_t int_polarity; + uint32_t int_select; +} msm_irq_shadow_reg[VIC_NUM_BANKS]; +static uint32_t msm_irq_idle_disable[VIC_NUM_BANKS]; + +#ifndef CONFIG_ARCH_MSM_SCORPION +#define INT_INFO_SMSM_ID SMEM_SMSM_INT_INFO +struct smsm_interrupt_info *smsm_int_info; +#else +#define INT_INFO_SMSM_ID SMEM_APPS_DEM_SLAVE_DATA +struct msm_dem_slave_data *smsm_int_info; +#endif + + +#define SMSM_FAKE_IRQ (0xff) +static uint8_t msm_irq_to_smsm[NR_MSM_IRQS + NR_SIRC_IRQS] = { + [INT_MDDI_EXT] = 1, + [INT_MDDI_PRI] = 2, + [INT_MDDI_CLIENT] = 3, + [INT_USB_OTG] = 4, + + /* [INT_PWB_I2C] = 5 -- not usable */ + [INT_SDC1_0] = 6, + [INT_SDC1_1] = 7, + [INT_SDC2_0] = 8, + + [INT_SDC2_1] = 9, + [INT_ADSP_A9_A11] = 10, + [INT_UART1] = 11, + [INT_UART2] = 12, + + [INT_UART3] = 13, + [INT_UART1_RX] = 14, + [INT_UART2_RX] = 15, + [INT_UART3_RX] = 16, + + [INT_UART1DM_IRQ] = 17, + [INT_UART1DM_RX] = 18, + [INT_KEYSENSE] = 19, +#if !defined(CONFIG_ARCH_MSM7X30) + [INT_AD_HSSD] = 20, +#endif + + [INT_NAND_WR_ER_DONE] = 21, + [INT_NAND_OP_DONE] = 22, + [INT_TCHSCRN1] = 23, + [INT_TCHSCRN2] = 24, + + [INT_TCHSCRN_SSBI] = 25, + [INT_USB_HS] = 26, + [INT_UART2DM_RX] = 27, + [INT_UART2DM_IRQ] = 28, + + [INT_SDC4_1] = 29, + [INT_SDC4_0] = 30, + [INT_SDC3_1] = 31, + [INT_SDC3_0] = 32, + + /* fake wakeup interrupts */ + [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ, + [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ, + [INT_A9_M2A_0] = SMSM_FAKE_IRQ, + [INT_A9_M2A_1] = SMSM_FAKE_IRQ, + [INT_A9_M2A_5] = SMSM_FAKE_IRQ, + [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ, + [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ, + [INT_ADSP_A11] = SMSM_FAKE_IRQ, + +#if defined(CONFIG_ARCH_QSD8X50) + [INT_SIRC_0] = SMSM_FAKE_IRQ, + [INT_SIRC_1] = SMSM_FAKE_IRQ, +#endif +}; + static void msm_irq_ack(struct irq_data *d) { - void __iomem *reg = VIC_INT_CLEAR0 + ((d->irq & 32) ? 4 : 0); + void __iomem *reg = VIC_INT_CLEAR(__bank(d->irq)); writel(1 << (d->irq & 31), reg); } static void msm_irq_mask(struct irq_data *d) { - void __iomem *reg = VIC_INT_ENCLEAR0 + ((d->irq & 32) ? 4 : 0); - writel(1 << (d->irq & 31), reg); + void __iomem *reg = VIC_INT_ENCLEAR(__bank(d->irq)); + unsigned index = __bank(d->irq); + uint32_t mask = 1UL << (d->irq & 31); + int smsm_irq = msm_irq_to_smsm[d->irq]; + + msm_irq_shadow_reg[index].int_en[0] &= ~mask; + writel(mask, reg); + if (smsm_irq == 0) + msm_irq_idle_disable[index] &= ~mask; + else { + mask = 1UL << (smsm_irq - 1); + msm_irq_smsm_wake_enable[0] &= ~mask; + } } static void msm_irq_unmask(struct irq_data *d) { - void __iomem *reg = VIC_INT_ENSET0 + ((d->irq & 32) ? 4 : 0); - writel(1 << (d->irq & 31), reg); + void __iomem *reg = VIC_INT_ENSET(__bank(d->irq)); + unsigned index = __bank(d->irq); + uint32_t mask = 1UL << (d->irq & 31); + int smsm_irq = msm_irq_to_smsm[d->irq]; + + msm_irq_shadow_reg[index].int_en[0] |= mask; + writel(mask, reg); + + if (smsm_irq == 0) + msm_irq_idle_disable[index] |= mask; + else { + mask = 1UL << (smsm_irq - 1); + msm_irq_smsm_wake_enable[0] |= mask; + } } static int msm_irq_set_wake(struct irq_data *d, unsigned int on) { - return -EINVAL; + unsigned irq = d->irq; + unsigned index = __bank(irq); + uint32_t mask = 1UL << (irq & 31); + int smsm_irq = msm_irq_to_smsm[irq]; + + if (smsm_irq == 0) { + printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", irq); + return -EINVAL; + } + if (on) + msm_irq_shadow_reg[index].int_en[1] |= mask; + else + msm_irq_shadow_reg[index].int_en[1] &= ~mask; + + if (smsm_irq == SMSM_FAKE_IRQ) + return 0; + + mask = 1UL << (smsm_irq - 1); + if (on) + msm_irq_smsm_wake_enable[1] |= mask; + else + msm_irq_smsm_wake_enable[1] &= ~mask; + return 0; } static int msm_irq_set_type(struct irq_data *d, unsigned int flow_type) { - void __iomem *treg = VIC_INT_TYPE0 + ((d->irq & 32) ? 4 : 0); - void __iomem *preg = VIC_INT_POLARITY0 + ((d->irq & 32) ? 4 : 0); + void __iomem *treg = VIC_INT_TYPE(__bank(d->irq)); + void __iomem *preg = VIC_INT_POLARITY(__bank(d->irq)); + unsigned index = __bank(d->irq); int b = 1 << (d->irq & 31); + uint32_t polarity; + uint32_t type; + polarity = msm_irq_shadow_reg[index].int_polarity; if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW)) - writel(readl(preg) | b, preg); + polarity |= b; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) - writel(readl(preg) & (~b), preg); + polarity &= ~b; + writel(polarity, preg); + msm_irq_shadow_reg[index].int_polarity = polarity; + type = msm_irq_shadow_reg[index].int_type; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { - writel(readl(treg) | b, treg); + type |= b; irq_desc[d->irq].handle_irq = handle_edge_irq; } if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) { - writel(readl(treg) & (~b), treg); + type &= ~b; irq_desc[d->irq].handle_irq = handle_level_irq; } + writel(type, treg); + msm_irq_shadow_reg[index].int_type = type; return 0; } +int msm_irq_pending(void) +{ + int i; + + for (i = 0; i < VIC_NUM_BANKS; ++i) + if (readl(VIC_IRQ_STATUS(i))) + return 1; + return 0; +} + +static void print_vic_irq_stat(void) +{ + int i; + + for (i = 0; i < VIC_NUM_BANKS; i++) + printk(" %x", readl(VIC_IRQ_STATUS(i))); + printk("\n"); +} + +static void print_irq_array(uint32_t *arr, int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) + printk(" %x", arr[i]); + printk("\n"); +} + +int msm_irq_idle_sleep_allowed(void) +{ + int i; + + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_REQUEST) { + printk(KERN_INFO "%s: disable", __func__); + print_irq_array(msm_irq_idle_disable, VIC_NUM_BANKS); + } + + for (i = 0; i < VIC_NUM_BANKS; ++i) + if (msm_irq_idle_disable[i]) + return 0; + return !!smsm_int_info; +} + +/* If arm9_wake is set: pass control to the other core. + * If from_idle is not set: disable non-wakeup interrupts. + */ +void msm_irq_enter_sleep1(bool arm9_wake, int from_idle) +{ + if (!arm9_wake || !smsm_int_info) + return; + smsm_int_info->interrupt_mask = msm_irq_smsm_wake_enable[!from_idle]; + smsm_int_info->pending_interrupts = 0; +} + +int msm_irq_enter_sleep2(bool arm9_wake, int from_idle) +{ + int limit = 10; + uint32_t pending[VIC_NUM_BANKS]; + int i; + uint32_t any = 0; + + if (from_idle && !arm9_wake) + return 0; + + /* edge triggered interrupt may get lost if this mode is used */ + WARN_ON_ONCE(!arm9_wake && !from_idle); + + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) { + printk(KERN_INFO "%s: change irq, pend", __func__); + print_vic_irq_stat(); + } + + for (i = 0; i < VIC_NUM_BANKS; ++i) { + pending[i] = readl(VIC_IRQ_STATUS(i)); + pending[i] &= msm_irq_shadow_reg[i].int_en[!from_idle]; + /* Clear INT_A9_M2A_5 since requesting sleep triggers it */ + if (i == (INT_A9_M2A_5 / 32)) + pending[i] &= ~(1U << (INT_A9_M2A_5 % 32)); + any |= pending[i]; + } + + if (any) { + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT) { + printk(KERN_INFO "%s abort", __func__); + print_irq_array(pending, VIC_NUM_BANKS); + } + return -EAGAIN; + } + + for (i = 0; i < VIC_NUM_BANKS; ++i) + writel(0, VIC_INT_EN(i)); + + while (limit-- > 0) { + int pend_irq; + int irq = readl(VIC_IRQ_VEC_RD); + if (irq == -1) + break; + pend_irq = readl(VIC_IRQ_VEC_PEND_RD); + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT) + printk(KERN_INFO "msm_irq_enter_sleep cleared " + "int %d (%d)\n", irq, pend_irq); + } + + if (arm9_wake) { + msm_irq_set_type(irq_get_irq_data(INT_A9_M2A_6), + IRQF_TRIGGER_RISING); + msm_irq_ack(irq_get_irq_data(INT_A9_M2A_6)); + writel(1U << INT_A9_M2A_6, VIC_INT_ENSET(0)); + } else { + for (i = 0; i < VIC_NUM_BANKS; ++i) + writel(msm_irq_shadow_reg[i].int_en[1], + VIC_INT_ENSET(i)); + } + + return 0; +} + +void msm_irq_exit_sleep1(void) +{ + int i; + + msm_irq_ack(irq_get_irq_data(INT_A9_M2A_6)); + msm_irq_ack(irq_get_irq_data(INT_PWB_I2C)); + for (i = 0; i < VIC_NUM_BANKS; i++) { + writel(msm_irq_shadow_reg[i].int_type, VIC_INT_TYPE(i)); + writel(msm_irq_shadow_reg[i].int_polarity, VIC_INT_POLARITY(i)); + writel(msm_irq_shadow_reg[i].int_en[0], VIC_INT_EN(i)); + writel(msm_irq_shadow_reg[i].int_select, VIC_INT_SELECT(i)); + } + writel(3, VIC_INT_MASTEREN); + if (!smsm_int_info) { + printk(KERN_ERR "msm_irq_exit_sleep \n"); + return; + } + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) { + printk(KERN_INFO "%s %x %x %x now", __func__, + smsm_int_info->interrupt_mask, + smsm_int_info->pending_interrupts, + smsm_int_info->wakeup_reason); + print_vic_irq_stat(); + } +} + +void msm_irq_exit_sleep2(void) +{ + int i; + uint32_t pending; + + if (!smsm_int_info) { + printk(KERN_ERR "msm_irq_exit_sleep \n"); + return; + } + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) { + printk(KERN_INFO "%s %x %x %x now", __func__, + smsm_int_info->interrupt_mask, + smsm_int_info->pending_interrupts, + smsm_int_info->wakeup_reason); + print_vic_irq_stat(); + } + pending = smsm_int_info->pending_interrupts; + for (i = 0; pending && i < ARRAY_SIZE(msm_irq_to_smsm); i++) { + unsigned bank = __bank(i); + uint32_t reg_mask = 1UL << (i & 31); + int smsm_irq = msm_irq_to_smsm[i]; + uint32_t smsm_mask; + if (smsm_irq == 0) + continue; + smsm_mask = 1U << (smsm_irq - 1); + if (!(pending & smsm_mask)) + continue; + pending &= ~smsm_mask; + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT) { + printk(KERN_INFO "%s: irq %d still pending %x now", + __func__, i, pending); + print_vic_irq_stat(); + } +#if 0 /* debug intetrrupt trigger */ + if (readl(VIC_IRQ_STATUS(bank)) & reg_mask) + writel(reg_mask, VIC_INT_CLEAR(bank)); +#endif + if (readl(VIC_IRQ_STATUS(bank)) & reg_mask) + continue; + writel(reg_mask, VIC_SOFTINT(bank)); + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT_TRIGGER) { + printk(KERN_INFO "%s: irq %d need trigger, now", + __func__, i); + print_vic_irq_stat(); + } + } +} + +void msm_irq_exit_sleep3(void) +{ + if (!smsm_int_info) { + printk(KERN_ERR "msm_irq_exit_sleep \n"); + return; + } + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) { + printk(KERN_INFO "%s %x %x %x state %x now", __func__, + smsm_int_info->interrupt_mask, + smsm_int_info->pending_interrupts, + smsm_int_info->wakeup_reason, + smsm_get_state(SMSM_STATE_MODEM)); + print_vic_irq_stat(); + } +} + static struct irq_chip msm_irq_chip = { .name = "msm", + .irq_disable = msm_irq_mask, .irq_ack = msm_irq_ack, .irq_mask = msm_irq_mask, .irq_unmask = msm_irq_unmask, @@ -122,31 +496,163 @@ void __init msm_init_irq(void) { unsigned n; - /* select level interrupts */ - writel(0, VIC_INT_TYPE0); - writel(0, VIC_INT_TYPE1); + for (n = 0; n < VIC_NUM_BANKS; ++n) { + /* select level interrupts */ + writel(0, VIC_INT_TYPE(n)); - /* select highlevel interrupts */ - writel(0, VIC_INT_POLARITY0); - writel(0, VIC_INT_POLARITY1); + /* select highlevel interrupts */ + writel(0, VIC_INT_POLARITY(n)); - /* select IRQ for all INTs */ - writel(0, VIC_INT_SELECT0); - writel(0, VIC_INT_SELECT1); + /* select IRQ for all INTs */ + writel(0, VIC_INT_SELECT(n)); - /* disable all INTs */ - writel(0, VIC_INT_EN0); - writel(0, VIC_INT_EN1); + /* disable all INTs */ + writel(0, VIC_INT_EN(n)); + } /* don't use 1136 vic */ writel(0, VIC_CONFIG); /* enable interrupt controller */ - writel(1, VIC_INT_MASTEREN); + writel(3, VIC_INT_MASTEREN); for (n = 0; n < NR_MSM_IRQS; n++) { set_irq_chip(n, &msm_irq_chip); set_irq_handler(n, handle_level_irq); set_irq_flags(n, IRQF_VALID); } + + msm_init_sirc(); +} + +static int __init msm_init_irq_late(void) +{ + smsm_int_info = smem_alloc(INT_INFO_SMSM_ID, sizeof(*smsm_int_info)); + if (!smsm_int_info) + pr_err("set_wakeup_mask NO INT_INFO (%d)\n", INT_INFO_SMSM_ID); + return 0; +} +late_initcall(msm_init_irq_late); + +#if defined(CONFIG_MSM_FIQ_SUPPORT) +void msm_trigger_irq(int irq) +{ + void __iomem *reg = VIC_SOFTINT(__bank(irq)); + uint32_t mask = 1UL << (irq & 31); + writel(mask, reg); +} + +void msm_fiq_enable(int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + unsigned long flags; + local_irq_save(flags); + d->chip->irq_unmask(d); + local_irq_restore(flags); +} + +void msm_fiq_disable(int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + unsigned long flags; + local_irq_save(flags); + d->chip->irq_mask(d); + local_irq_restore(flags); +} + +static void _msm_fiq_select(int irq) +{ + void __iomem *reg = VIC_INT_SELECT(__bank(irq)); + unsigned index = __bank(irq); + uint32_t mask = 1UL << (irq & 31); + unsigned long flags; + + local_irq_save(flags); + msm_irq_shadow_reg[index].int_select |= mask; + writel(msm_irq_shadow_reg[index].int_select, reg); + local_irq_restore(flags); +} + +static void _msm_fiq_unselect(int irq) +{ + void __iomem *reg = VIC_INT_SELECT(__bank(irq)); + unsigned index = __bank(irq); + uint32_t mask = 1UL << (irq & 31); + unsigned long flags; + + local_irq_save(flags); + msm_irq_shadow_reg[index].int_select &= (!mask); + writel(msm_irq_shadow_reg[index].int_select, reg); + local_irq_restore(flags); +} + +void msm_fiq_select(int irq) +{ + if (irq < FIRST_SIRC_IRQ) + _msm_fiq_select(irq); + else if (irq < FIRST_GPIO_IRQ) + sirc_fiq_select(irq, true); + else + pr_err("unsupported fiq %d", irq); +} + +void msm_fiq_unselect(int irq) +{ + if (irq < FIRST_SIRC_IRQ) + _msm_fiq_unselect(irq); + else if (irq < FIRST_GPIO_IRQ) + sirc_fiq_select(irq, false); + else + pr_err("unsupported fiq %d", irq); +} + +/* set_fiq_handler originally from arch/arm/kernel/fiq.c */ +static void set_fiq_handler(void *start, unsigned int length) +{ +#if defined(CONFIG_CPU_USE_DOMAINS) + memcpy((void *)0xffff001c, start, length); +#else + memcpy(vectors_page + 0x1c, start, length); +#endif + flush_icache_range(0xffff001c, 0xffff001c + length); + if (!vectors_high()) + flush_icache_range(0x1c, 0x1c + length); +} + +extern unsigned char fiq_glue, fiq_glue_end; + +static void (*fiq_func)(void *data, void *regs, void *svc_sp); +static void *fiq_data; +static void *fiq_stack; + +void fiq_glue_setup(void *func, void *data, void *sp); + +int msm_fiq_set_handler(void (*func)(void *data, void *regs, void *svc_sp), + void *data) +{ + unsigned long flags; + int ret = -ENOMEM; + + if (!fiq_stack) + fiq_stack = kzalloc(THREAD_SIZE, GFP_KERNEL); + if (!fiq_stack) + return -ENOMEM; + + local_irq_save(flags); + if (fiq_func == 0) { + fiq_func = func; + fiq_data = data; + fiq_glue_setup(func, data, fiq_stack + THREAD_START_SP); + set_fiq_handler(&fiq_glue, (&fiq_glue_end - &fiq_glue)); + ret = 0; + } + local_irq_restore(flags); + return ret; +} + +void msm_fiq_exit_sleep(void) +{ + if (fiq_stack) + fiq_glue_setup(fiq_func, fiq_data, fiq_stack + THREAD_START_SP); } +#endif diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c new file mode 100644 index 0000000000000..21a3db208fb93 --- /dev/null +++ b/arch/arm/mach-msm/memory.c @@ -0,0 +1,366 @@ +/* arch/arm/mach-msm/memory.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_MSM_NPA_REMOTE) +#include "npa_remote.h" +#include +#include +#endif +#include +#include +#include +#include <../../mm/mm.h> + +int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot) +{ + unsigned long pfn_addr = pfn << PAGE_SHIFT; + if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) { + prot = pgprot_device(prot); + pr_debug("remapping device %lx\n", prot); + } + return remap_pfn_range(vma, addr, pfn, size, prot); +} + +void *strongly_ordered_page; +char strongly_ordered_mem[PAGE_SIZE*2-4]; + +void map_page_strongly_ordered(void) +{ +#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A) + long unsigned int phys; + struct map_desc map; + + if (strongly_ordered_page) + return; + + strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem); + phys = __pa(strongly_ordered_page); + + map.pfn = __phys_to_pfn(phys); + map.virtual = MSM_STRONGLY_ORDERED_PAGE; + map.length = PAGE_SIZE; + map.type = MT_DEVICE_STRONGLY_ORDERED; + create_mapping(&map); + + printk(KERN_ALERT "Initialized strongly ordered page successfully\n"); +#endif +} +EXPORT_SYMBOL(map_page_strongly_ordered); + +void write_to_strongly_ordered_memory(void) +{ +#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A) + if (!strongly_ordered_page) { + if (!in_interrupt()) + map_page_strongly_ordered(); + else { + printk(KERN_ALERT "Cannot map strongly ordered page in " + "Interrupt Context\n"); + /* capture it here before the allocation fails later */ + BUG(); + } + } + *(int *)MSM_STRONGLY_ORDERED_PAGE = 0; +#endif +} +EXPORT_SYMBOL(write_to_strongly_ordered_memory); + +void flush_axi_bus_buffer(void) +{ +#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A) + __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory"); + write_to_strongly_ordered_memory(); +#endif +} + +#define CACHE_LINE_SIZE 32 + +/* These cache related routines make the assumption that the associated + * physical memory is contiguous. They will operate on all (L1 + * and L2 if present) caches. + */ +void clean_and_invalidate_caches(unsigned long vstart, + unsigned long length, unsigned long pstart) +{ + unsigned long vaddr; + + for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) + asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr)); +#ifdef CONFIG_OUTER_CACHE + outer_flush_range(pstart, pstart + length); +#endif + asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); + asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); + + flush_axi_bus_buffer(); +} + +void clean_caches(unsigned long vstart, + unsigned long length, unsigned long pstart) +{ + unsigned long vaddr; + + for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) + asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr)); +#ifdef CONFIG_OUTER_CACHE + outer_clean_range(pstart, pstart + length); +#endif + asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); + asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); + + flush_axi_bus_buffer(); +} + +void invalidate_caches(unsigned long vstart, + unsigned long length, unsigned long pstart) +{ + unsigned long vaddr; + + for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) + asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr)); +#ifdef CONFIG_OUTER_CACHE + outer_inv_range(pstart, pstart + length); +#endif + asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); + asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); + + flush_axi_bus_buffer(); +} + +void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment) +{ + void *unused_addr = NULL; + unsigned long addr, tmp_size, unused_size; + + /* Allocate maximum size needed, see where it ends up. + * Then free it -- in this path there are no other allocators + * so we can depend on getting the same address back + * when we allocate a smaller piece that is aligned + * at the end (if necessary) and the piece we really want, + * then free the unused first piece. + */ + + tmp_size = size + alignment - PAGE_SIZE; + addr = (unsigned long)alloc_bootmem(tmp_size); + free_bootmem(__pa(addr), tmp_size); + + unused_size = alignment - (addr % alignment); + if (unused_size) + unused_addr = alloc_bootmem(unused_size); + + addr = (unsigned long)alloc_bootmem(size); + if (unused_size) + free_bootmem(__pa(unused_addr), unused_size); + + return (void *)addr; +} + +int (*change_memory_power)(u64, u64, int); + +int platform_physical_remove_pages(u64 start, u64 size) +{ + if (!change_memory_power) + return 0; + return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN); +} + +int platform_physical_active_pages(u64 start, u64 size) +{ + if (!change_memory_power) + return 0; + return change_memory_power(start, size, MEMORY_ACTIVE); +} + +int platform_physical_low_power_pages(u64 start, u64 size) +{ + if (!change_memory_power) + return 0; + return change_memory_power(start, size, MEMORY_SELF_REFRESH); +} + +char *memtype_name[] = { + "SMI_KERNEL", + "SMI", + "EBI0", + "EBI1" +}; + +struct reserve_info *reserve_info; + +static unsigned long stable_size(struct membank *mb, + unsigned long unstable_limit) +{ + if (!unstable_limit || mb->start + mb->size <= unstable_limit) + return mb->size; + if (mb->start >= unstable_limit) + return 0; + return unstable_limit - mb->start; +} + +static void __init calculate_reserve_limits(void) +{ + int i; + struct membank *mb; + int memtype; + struct memtype_reserve *mt; + unsigned long size; + + for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) { + memtype = reserve_info->paddr_to_memtype(mb->start); + if (memtype == MEMTYPE_NONE) { + pr_warning("unknown memory type for bank at %lx\n", + mb->start); + continue; + } + mt = &reserve_info->memtype_reserve_table[memtype]; + size = stable_size(mb, reserve_info->low_unstable_address); + mt->limit = max(mt->limit, size); + } +} + +static void __init adjust_reserve_sizes(void) +{ + int i; + struct memtype_reserve *mt; + + mt = &reserve_info->memtype_reserve_table[0]; + for (i = 0; i < MEMTYPE_MAX; i++, mt++) { + if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) + mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK; + if (mt->size > mt->limit) { + pr_warning("%lx size for %s too large, setting to %lx\n", + mt->size, memtype_name[i], mt->limit); + mt->size = mt->limit; + } + } +} + +static void __init reserve_memory_for_mempools(void) +{ + int i, memtype, membank_type; + struct memtype_reserve *mt; + struct membank *mb; + int ret; + unsigned long size; + + mt = &reserve_info->memtype_reserve_table[0]; + for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { + if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size) + continue; + + /* We know we will find a memory bank of the proper size + * as we have limited the size of the memory pool for + * each memory type to the size of the largest memory + * bank. Choose the memory bank with the highest physical + * address which is large enough, so that we will not + * take memory from the lowest memory bank which the kernel + * is in (and cause boot problems) and so that we might + * be able to steal memory that would otherwise become + * highmem. However, do not use unstable memory. + */ + for (i = meminfo.nr_banks - 1; i >= 0; i--) { + mb = &meminfo.bank[i]; + membank_type = + reserve_info->paddr_to_memtype(mb->start); + if (memtype != membank_type) + continue; + size = stable_size(mb, + reserve_info->low_unstable_address); + if (size >= mt->size) { + mt->start = mb->start + size - mt->size; + ret = memblock_remove(mt->start, mt->size); + BUG_ON(ret); + break; + } + } + } +} + +static void __init initialize_mempools(void) +{ + struct mem_pool *mpool; + int memtype; + struct memtype_reserve *mt; + + mt = &reserve_info->memtype_reserve_table[0]; + for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { + if (!mt->size) + continue; + mpool = initialize_memory_pool(mt->start, mt->size, memtype); + if (!mpool) + pr_warning("failed to create %s mempool\n", + memtype_name[memtype]); + } +} + +void __init msm_reserve(void) +{ + memory_pool_init(); + reserve_info->calculate_reserve_sizes(); + calculate_reserve_limits(); + adjust_reserve_sizes(); + reserve_memory_for_mempools(); + initialize_mempools(); +} + +static int get_ebi_memtype(void) +{ + /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */ + if (cpu_is_msm7x30() || cpu_is_msm8x55()) + return MEMTYPE_EBI0; + return MEMTYPE_EBI1; +} + +void *allocate_contiguous_ebi(unsigned long size, + unsigned long align, int cached) +{ + return allocate_contiguous_memory(size, get_ebi_memtype(), + align, cached); +} +EXPORT_SYMBOL(allocate_contiguous_ebi); + +unsigned long allocate_contiguous_ebi_nomap(unsigned long size, + unsigned long align) +{ + return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(), + align, __builtin_return_address(0)); +} +EXPORT_SYMBOL(allocate_contiguous_ebi_nomap); + +unsigned int msm_ttbr0; + +void store_ttbr0(void) +{ + /* Store TTBR0 for post-mortem debugging purposes. */ + asm("mrc p15, 0, %0, c2, c0, 0\n" + : "=r" (msm_ttbr0)); +} diff --git a/arch/arm/mach-msm/msm_flashlight.c b/arch/arm/mach-msm/msm_flashlight.c new file mode 100644 index 0000000000000..4b6e2cce2b972 --- /dev/null +++ b/arch/arm/mach-msm/msm_flashlight.c @@ -0,0 +1,490 @@ +/* + * arch/arm/mach-msm/msm_flashlight.c - The flashlight driver + * Copyright (C) 2009 HTC Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +/*#include */ +#include +#include + +struct flashlight_struct { + struct led_classdev fl_lcdev; + struct early_suspend early_suspend_flashlight; + struct hrtimer timer; + struct wake_lock wake_lock; + spinlock_t spin_lock; + uint32_t gpio_torch; + uint32_t gpio_flash; + uint32_t gpio_flash_adj; + uint32_t flash_sw_timeout_ms; + enum flashlight_mode_flags mode_status; + unsigned long spinlock_flags; + unsigned flash_adj_gpio_status; + /* inactive: 0x0 + * active: 0x1 + * force disable flashlight function: 0x2 */ + uint8_t flash_adj_value; + uint8_t led_count; +}; + +/* disable it, we didn't need to adjust GPIO */ +/* #define FLASHLIGHT_ADJ_FUNC */ + +static struct flashlight_struct *this_fl_str; + +static void flashlight_hw_command(uint8_t addr, uint8_t data) +{ + uint8_t loop_i, loop_j; + const uint8_t fl_addr_to_rising_count[4] = { 17, 18, 19, 20 }; + uint8_t loop_tmp; + if (!this_fl_str->gpio_torch && !this_fl_str->gpio_torch) { + printk(KERN_ERR "%s: not setup GPIO??? torch: %d, flash: %d\n", + __func__, this_fl_str->gpio_torch, + this_fl_str->gpio_flash); + return; + } + for (loop_j = 0; loop_j < 2; loop_j++) { + if (!loop_j) + loop_tmp = fl_addr_to_rising_count[addr]; + else + loop_tmp = data; + for (loop_i = 0; loop_i < loop_tmp; loop_i++) { + gpio_direction_output(this_fl_str->gpio_torch, 0); + udelay(2); + gpio_direction_output(this_fl_str->gpio_torch, 1); + udelay(2); + } + udelay(500); + } +} + +static void flashlight_turn_off(void) +{ + gpio_direction_output(this_fl_str->gpio_flash, 0); + gpio_direction_output(this_fl_str->gpio_torch, 0); + this_fl_str->mode_status = FL_MODE_OFF; + this_fl_str->fl_lcdev.brightness = LED_OFF; + wake_unlock(&this_fl_str->wake_lock); +} + +static enum hrtimer_restart flashlight_hrtimer_func(struct hrtimer *timer) +{ + struct flashlight_struct *fl_str = container_of(timer, + struct flashlight_struct, timer); + wake_unlock(&fl_str->wake_lock); + spin_lock_irqsave(&fl_str->spin_lock, fl_str->spinlock_flags); + flashlight_turn_off(); + spin_unlock_irqrestore(&fl_str->spin_lock, fl_str->spinlock_flags); + printk(KERN_INFO "%s: turn off flash mode\n", __func__); + return HRTIMER_NORESTART; +} + +int flashlight_control(int mode) +{ + int ret = 0; + uint32_t flash_ns = ktime_to_ns(ktime_get()); + +#if 0 /* disable flash_adj_value check now */ + if (this_fl_str->flash_adj_value == 2) { + printk(KERN_WARNING "%s: force disable function!\n", __func__); + return -EIO; + } +#endif + spin_lock_irqsave(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + if (this_fl_str->mode_status == FL_MODE_FLASH) { + hrtimer_cancel(&this_fl_str->timer); + wake_unlock(&this_fl_str->wake_lock); + flashlight_turn_off(); + } + switch (mode) { + case FL_MODE_OFF: + flashlight_turn_off(); + break; + case FL_MODE_TORCH: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH; + this_fl_str->fl_lcdev.brightness = LED_HALF; + break; + case FL_MODE_TORCH_LED_A: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 3); + this_fl_str->mode_status = FL_MODE_TORCH_LED_A; + this_fl_str->fl_lcdev.brightness = 1; + break; + case FL_MODE_TORCH_LED_B: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 2); + this_fl_str->mode_status = FL_MODE_TORCH_LED_B; + this_fl_str->fl_lcdev.brightness = 2; + break; + case FL_MODE_FLASH: + flashlight_hw_command(2, 4); + gpio_direction_output(this_fl_str->gpio_flash, 1); + this_fl_str->mode_status = FL_MODE_FLASH; + this_fl_str->fl_lcdev.brightness = LED_FULL; + hrtimer_start(&this_fl_str->timer, + ktime_set(this_fl_str->flash_sw_timeout_ms / 1000, + (this_fl_str->flash_sw_timeout_ms % 1000) * + NSEC_PER_MSEC), HRTIMER_MODE_REL); + wake_lock(&this_fl_str->wake_lock); + break; + case FL_MODE_PRE_FLASH: + flashlight_hw_command(3, 1); + flashlight_hw_command(0, 9); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_PRE_FLASH; + this_fl_str->fl_lcdev.brightness = LED_HALF + 1; + break; + case FL_MODE_TORCH_LEVEL_1: + flashlight_hw_command(3, 8); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_1; + this_fl_str->fl_lcdev.brightness = LED_HALF - 2; + break; + case FL_MODE_TORCH_LEVEL_2: + flashlight_hw_command(3, 4); + flashlight_hw_command(0, 15); + flashlight_hw_command(2, 4); + this_fl_str->mode_status = FL_MODE_TORCH_LEVEL_2; + this_fl_str->fl_lcdev.brightness = LED_HALF - 1; + break; + case FL_MODE_DEATH_RAY: + pr_info("%s: death ray\n", __func__); + hrtimer_cancel(&this_fl_str->timer); + gpio_direction_output(this_fl_str->gpio_flash, 0); + udelay(40); + gpio_direction_output(this_fl_str->gpio_flash, 1); + this_fl_str->mode_status = 0; + this_fl_str->fl_lcdev.brightness = 3; + wake_lock(&this_fl_str->wake_lock); + break; + default: + printk(KERN_ERR "%s: unknown flash_light flags: %d\n", + __func__, mode); + ret = -EINVAL; + break; + } + + printk(KERN_DEBUG "%s: mode: %d, %u\n", FLASHLIGHT_NAME, mode, + flash_ns/(1000*1000)); + + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + return ret; +} + +static void fl_lcdev_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct flashlight_struct *fl_str; + enum flashlight_mode_flags mode; + + fl_str = container_of(led_cdev, struct flashlight_struct, fl_lcdev); + if (brightness > 0 && brightness <= LED_HALF) { + /* Torch mode */ + if (brightness == (LED_HALF - 2)) + mode = FL_MODE_TORCH_LEVEL_1; + else if (brightness == (LED_HALF - 1)) + mode = FL_MODE_TORCH_LEVEL_2; + else if (brightness == 1 && fl_str->led_count) + mode = FL_MODE_TORCH_LED_A; + else if (brightness == 2 && fl_str->led_count) + mode = FL_MODE_TORCH_LED_B; + else if (brightness == 3) + mode = FL_MODE_DEATH_RAY; + else + mode = FL_MODE_TORCH; + } else if (brightness > LED_HALF && brightness <= LED_FULL) { + /* Flashlight mode */ + if (brightness == (LED_HALF + 1)) + mode = FL_MODE_PRE_FLASH; /* pre-flash mode */ + else + mode = FL_MODE_FLASH; + } else + /* off and else */ + mode = FL_MODE_OFF; + flashlight_control(mode); + + return; +} + +static void flashlight_early_suspend(struct early_suspend *handler) +{ + struct flashlight_struct *fl_str = container_of(handler, + struct flashlight_struct, early_suspend_flashlight); + if (fl_str != NULL && fl_str->mode_status) { + spin_lock_irqsave(&fl_str->spin_lock, fl_str->spinlock_flags); + flashlight_turn_off(); + spin_unlock_irqrestore(&fl_str->spin_lock, + fl_str->spinlock_flags); + } +} + +static void flashlight_late_resume(struct early_suspend *handler) +{ + /* + struct flashlight_struct *fl_str = container_of(handler, + struct flashlight_struct, early_suspend_flashlight); + */ +} + +static int flashlight_setup_gpio(struct flashlight_platform_data *flashlight, + struct flashlight_struct *fl_str) +{ + int ret = 0; + if (flashlight->gpio_init) + flashlight->gpio_init(); + if (flashlight->torch) { + ret = gpio_request(flashlight->torch, "fl_torch"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(torch) failed\n", + __func__); + return ret; + } + fl_str->gpio_torch = flashlight->torch; + } + + if (flashlight->flash) { + ret = gpio_request(flashlight->flash, "fl_flash"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(flash) failed\n", + __func__); + return ret; + } + fl_str->gpio_flash = flashlight->flash; + } + + if (flashlight->flash_adj) { + ret = gpio_request(flashlight->flash_adj, "fl_flash_adj"); + if (ret < 0) { + printk(KERN_ERR "%s: gpio_request(flash_adj) failed\n", + __func__); + return ret; + } + fl_str->gpio_flash_adj = flashlight->flash_adj; + gpio_set_value(fl_str->gpio_flash_adj, 0); + fl_str->flash_adj_gpio_status = 0; + printk(KERN_DEBUG "%s: enable flash_adj function\n", + FLASHLIGHT_NAME); + } + if (flashlight->flash_duration_ms) + fl_str->flash_sw_timeout_ms = flashlight->flash_duration_ms; + else /* load default value */ + fl_str->flash_sw_timeout_ms = 600; + return ret; +} + +static int flashlight_free_gpio(struct flashlight_platform_data *flashlight, + struct flashlight_struct *fl_str) +{ + int ret = 0; + if (fl_str->gpio_torch) { + gpio_free(flashlight->torch); + fl_str->gpio_torch = 0; + } + + if (fl_str->gpio_flash) { + gpio_free(flashlight->flash); + fl_str->gpio_flash = 0; + } + + if (fl_str->gpio_flash_adj) { + gpio_free(flashlight->flash_adj); + fl_str->gpio_flash_adj = 0; + } + + return ret; +} + +#ifdef FLASHLIGHT_ADJ_FUNC +static ssize_t show_flash_adj(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", this_fl_str->flash_adj_value); + return length; +} + +static ssize_t store_flash_adj(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + static int tmp, adj_tmp; + if ((buf[0] == '0' || buf[0] == '1' || buf[0] == '2') + && buf[1] == '\n') { + spin_lock_irqsave(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + tmp = buf[0] - 0x30; + if (tmp == this_fl_str->flash_adj_value) { + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + printk(KERN_NOTICE "%s: status is same(%d)\n", + __func__, this_fl_str->flash_adj_value); + return count; + } + adj_tmp = this_fl_str->gpio_flash_adj; + switch (tmp) { + case 2: + flashlight_turn_off(); + break; + case 1: + /* + if (this_fl_str->flash_adj_gpio_status) { + gpio_set_value(adj_tmp, 0); + this_fl_str->flash_adj_gpio_status = 0; + } + */ + break; + case 0: + /* + if (!this_fl_str->flash_adj_gpio_status) { + gpio_set_value(adj_tmp, 1); + this_fl_str->flash_adj_gpio_status = 1; + } + */ + break; + } + this_fl_str->flash_adj_value = tmp; + spin_unlock_irqrestore(&this_fl_str->spin_lock, + this_fl_str->spinlock_flags); + } + return count; +} + +static DEVICE_ATTR(flash_adj, 0666, show_flash_adj, store_flash_adj); +#endif + +static int flashlight_probe(struct platform_device *pdev) +{ + + struct flashlight_platform_data *flashlight = pdev->dev.platform_data; + struct flashlight_struct *fl_str; + int err = 0; + + fl_str = kzalloc(sizeof(struct flashlight_struct), GFP_KERNEL); + if (!fl_str) { + printk(KERN_ERR "%s: kzalloc fail !!!\n", __func__); + return -ENOMEM; + } + + err = flashlight_setup_gpio(flashlight, fl_str); + if (err < 0) { + printk(KERN_ERR "%s: setup GPIO fail !!!\n", __func__); + goto fail_free_mem; + } + spin_lock_init(&fl_str->spin_lock); + wake_lock_init(&fl_str->wake_lock, WAKE_LOCK_SUSPEND, pdev->name); + fl_str->fl_lcdev.name = pdev->name; + fl_str->fl_lcdev.brightness_set = fl_lcdev_brightness_set; + fl_str->fl_lcdev.brightness = 0; + err = led_classdev_register(&pdev->dev, &fl_str->fl_lcdev); + if (err < 0) { + printk(KERN_ERR "failed on led_classdev_register\n"); + goto fail_free_gpio; + } +#ifdef FLASHLIGHT_ADJ_FUNC + if (fl_str->gpio_flash_adj) { + printk(KERN_DEBUG "%s: flash_adj exist, create attr file\n", + __func__); + err = device_create_file(fl_str->fl_lcdev.dev, + &dev_attr_flash_adj); + if (err != 0) + printk(KERN_WARNING "dev_attr_flash_adj failed\n"); + } +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + fl_str->early_suspend_flashlight.suspend = flashlight_early_suspend; + fl_str->early_suspend_flashlight.resume = flashlight_late_resume; + register_early_suspend(&fl_str->early_suspend_flashlight); +#endif + hrtimer_init(&fl_str->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + fl_str->timer.function = flashlight_hrtimer_func; + fl_str->led_count = flashlight->led_count; + + this_fl_str = fl_str; + printk(KERN_INFO "%s: The Flashlight Driver is ready\n", __func__); + return 0; + +fail_free_gpio: + wake_lock_destroy(&fl_str->wake_lock); + flashlight_free_gpio(flashlight, fl_str); +fail_free_mem: + kfree(fl_str); + printk(KERN_ERR "%s: The Flashlight driver is Failure\n", __func__); + return err; +} + +static int flashlight_remove(struct platform_device *pdev) +{ + struct flashlight_platform_data *flashlight = pdev->dev.platform_data; + + flashlight_turn_off(); + hrtimer_cancel(&this_fl_str->timer); + unregister_early_suspend(&this_fl_str->early_suspend_flashlight); +#ifdef FLASHLIGHT_ADJ_FUNC + if (this_fl_str->gpio_flash_adj) { + device_remove_file(this_fl_str->fl_lcdev.dev, + &dev_attr_flash_adj); + } +#endif + led_classdev_unregister(&this_fl_str->fl_lcdev); + wake_lock_destroy(&this_fl_str->wake_lock); + flashlight_free_gpio(flashlight, this_fl_str); + + kfree(this_fl_str); + return 0; +} + +static struct platform_driver flashlight_driver = { + .probe = flashlight_probe, + .remove = flashlight_remove, + .driver = { + .name = FLASHLIGHT_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init flashlight_init(void) +{ + return platform_driver_register(&flashlight_driver); +} + +static void __exit flashlight_exit(void) +{ + platform_driver_unregister(&flashlight_driver); +} + +module_init(flashlight_init); +module_exit(flashlight_exit); + +MODULE_DESCRIPTION("flash light driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/msm_vibrator.c b/arch/arm/mach-msm/msm_vibrator.c new file mode 100644 index 0000000000000..8b8174145a141 --- /dev/null +++ b/arch/arm/mach-msm/msm_vibrator.c @@ -0,0 +1,134 @@ +/* include/asm/mach-msm/htc_pwrsink.h + * + * Copyright (C) 2008 HTC Corporation. + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include <../../../drivers/staging/android/timed_output.h> +#include + +#include + +#define PM_LIBPROG 0x30000061 +#if defined(CONFIG_ARCH_MSM7X30) +#define PM_LIBVERS 0x00030001 +#elif defined(CONFIG_MSM_LEGACY_7X00A_AMSS) +#define PM_LIBVERS 0xfb837d0b +#else +#define PM_LIBVERS MSM_RPC_VERS(1,1) +#endif + +#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM7X30) +#define HTC_PROCEDURE_SET_VIB_ON_OFF 22 +#else +#define HTC_PROCEDURE_SET_VIB_ON_OFF 21 +#endif +#define PMIC_VIBRATOR_LEVEL (3000) + +static struct work_struct vibrator_work; +static struct hrtimer vibe_timer; +static spinlock_t vibe_lock; +static int vibe_state; + +static void set_pmic_vibrator(int on) +{ + static struct msm_rpc_endpoint *vib_endpoint; + struct set_vib_on_off_req { + struct rpc_request_hdr hdr; + uint32_t data; + } req; + + if (!vib_endpoint) { + vib_endpoint = msm_rpc_connect(PM_LIBPROG, PM_LIBVERS, 0); + if (IS_ERR(vib_endpoint)) { + printk(KERN_ERR "init vib rpc failed!\n"); + vib_endpoint = 0; + return; + } + } + + if (on) + req.data = cpu_to_be32(PMIC_VIBRATOR_LEVEL); + else + req.data = cpu_to_be32(0); + + msm_rpc_call(vib_endpoint, HTC_PROCEDURE_SET_VIB_ON_OFF, &req, + sizeof(req), 5 * HZ); +} + +static void update_vibrator(struct work_struct *work) +{ + set_pmic_vibrator(vibe_state); +} + +static void vibrator_enable(struct timed_output_dev *dev, int value) +{ + unsigned long flags; + + spin_lock_irqsave(&vibe_lock, flags); + hrtimer_cancel(&vibe_timer); + + if (value == 0) + vibe_state = 0; + else { + value = (value > 15000 ? 15000 : value); + vibe_state = 1; + hrtimer_start(&vibe_timer, + ktime_set(value / 1000, (value % 1000) * 1000000), + HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&vibe_lock, flags); + + schedule_work(&vibrator_work); +} + +static int vibrator_get_time(struct timed_output_dev *dev) +{ + if (hrtimer_active(&vibe_timer)) { + ktime_t r = hrtimer_get_remaining(&vibe_timer); + return r.tv.sec * 1000 + r.tv.nsec / 1000000; + } else + return 0; +} + +static enum hrtimer_restart vibrator_timer_func(struct hrtimer *timer) +{ + vibe_state = 0; + schedule_work(&vibrator_work); + return HRTIMER_NORESTART; +} + +static struct timed_output_dev pmic_vibrator = { + .name = "vibrator", + .get_time = vibrator_get_time, + .enable = vibrator_enable, +}; + +void __init msm_init_pmic_vibrator(void) +{ + INIT_WORK(&vibrator_work, update_vibrator); + + spin_lock_init(&vibe_lock); + vibe_state = 0; + hrtimer_init(&vibe_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + vibe_timer.function = vibrator_timer_func; + + timed_output_dev_register(&pmic_vibrator); +} + +MODULE_DESCRIPTION("timed output pmic vibrator device"); +MODULE_LICENSE("GPL"); + diff --git a/arch/arm/mach-msm/nand_partitions.c b/arch/arm/mach-msm/nand_partitions.c new file mode 100644 index 0000000000000..4361e49fb4aa6 --- /dev/null +++ b/arch/arm/mach-msm/nand_partitions.c @@ -0,0 +1,128 @@ +/* arch/arm/mach-msm/nand_partitions.c + * + * Code to extract partition information from ATAG set up by the + * bootloader. + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#include + + +/* configuration tags specific to msm */ + +#define ATAG_MSM_PARTITION 0x4d534D70 /* MSMp */ + +struct msm_ptbl_entry +{ + char name[16]; + __u32 offset; + __u32 size; + __u32 flags; +}; + +#define MSM_MAX_PARTITIONS 8 + +static struct mtd_partition msm_nand_partitions[MSM_MAX_PARTITIONS]; +static char msm_nand_names[MSM_MAX_PARTITIONS * 16]; + +extern struct flash_platform_data msm_nand_data; + +static int __init parse_tag_msm_partition(const struct tag *tag) +{ + struct mtd_partition *ptn = msm_nand_partitions; + char *name = msm_nand_names; + struct msm_ptbl_entry *entry = (void *) &tag->u; + unsigned count, n; + unsigned have_kpanic = 0; + + count = (tag->hdr.size - 2) / + (sizeof(struct msm_ptbl_entry) / sizeof(__u32)); + + if (count > MSM_MAX_PARTITIONS) + count = MSM_MAX_PARTITIONS; + + for (n = 0; n < count; n++) { + memcpy(name, entry->name, 15); + name[15] = 0; + + if (!strcmp(name, "kpanic")) + have_kpanic = 1; + + ptn->name = name; + ptn->offset = entry->offset; + ptn->size = entry->size; + + name += 16; + entry++; + ptn++; + } + +#ifdef CONFIG_VIRTUAL_KPANIC_PARTITION + if (!have_kpanic) { + int i; + uint64_t kpanic_off = 0; + + if (count == MSM_MAX_PARTITIONS) { + printk("Cannot create virtual 'kpanic' partition\n"); + goto out; + } + + for (i = 0; i < count; i++) { + ptn = &msm_nand_partitions[i]; + if (!strcmp(ptn->name, CONFIG_VIRTUAL_KPANIC_SRC)) { + ptn->size -= CONFIG_VIRTUAL_KPANIC_PSIZE; + kpanic_off = ptn->offset + ptn->size; + break; + } + } + if (i == count) { + printk(KERN_ERR "Partition %s not found\n", + CONFIG_VIRTUAL_KPANIC_SRC); + goto out; + } + + ptn = &msm_nand_partitions[count]; + ptn->name ="kpanic"; + ptn->offset = kpanic_off; + ptn->size = CONFIG_VIRTUAL_KPANIC_PSIZE; + + printk("Virtual mtd partition '%s' created @%llx (%llu)\n", + ptn->name, ptn->offset, ptn->size); + + count++; + } +out: +#endif /* CONFIG_VIRTUAL_KPANIC_SRC */ + msm_nand_data.nr_parts = count; + msm_nand_data.parts = msm_nand_partitions; + + return 0; +} + +__tagtable(ATAG_MSM_PARTITION, parse_tag_msm_partition); diff --git a/arch/arm/mach-msm/perflock.c b/arch/arm/mach-msm/perflock.c new file mode 100644 index 0000000000000..574dffc850a98 --- /dev/null +++ b/arch/arm/mach-msm/perflock.c @@ -0,0 +1,480 @@ +/* arch/arm/mach-msm/perflock.c + * + * Copyright (C) 2008 HTC Corporation + * Author: Eiven Peng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "proc_comm.h" +#include "acpuclock.h" + +#define PERF_LOCK_INITIALIZED (1U << 0) +#define PERF_LOCK_ACTIVE (1U << 1) + +enum { + PERF_LOCK_DEBUG = 1U << 0, + PERF_EXPIRE_DEBUG = 1U << 1, + PERF_CPUFREQ_NOTIFY_DEBUG = 1U << 2, + PERF_CPUFREQ_LOCK_DEBUG = 1U << 3, + PERF_SCREEN_ON_POLICY_DEBUG = 1U << 4, +}; + +static LIST_HEAD(active_perf_locks); +static LIST_HEAD(inactive_perf_locks); +static DEFINE_SPINLOCK(list_lock); +static DEFINE_SPINLOCK(policy_update_lock); +static int initialized; +static unsigned int *perf_acpu_table; +static unsigned int table_size; +static unsigned int curr_lock_speed; +static struct cpufreq_policy *cpufreq_policy; + +#ifdef CONFIG_PERF_LOCK_DEBUG +static int debug_mask = PERF_LOCK_DEBUG | PERF_EXPIRE_DEBUG | + PERF_CPUFREQ_NOTIFY_DEBUG | PERF_CPUFREQ_LOCK_DEBUG; +#else +static int debug_mask = PERF_CPUFREQ_LOCK_DEBUG | PERF_SCREEN_ON_POLICY_DEBUG; +#endif +module_param_call(debug_mask, param_set_int, param_get_int, + &debug_mask, S_IWUSR | S_IRUGO); + +static unsigned int get_perflock_speed(void); +static void print_active_locks(void); + +#ifdef CONFIG_PERFLOCK_SCREEN_POLICY +/* Increase cpufreq minumum frequency when screen on. + Pull down to lowest speed when screen off. */ +static unsigned int screen_off_policy_req; +static unsigned int screen_on_policy_req; +static void perflock_early_suspend(struct early_suspend *handler) +{ + unsigned long irqflags; + + spin_lock_irqsave(&policy_update_lock, irqflags); + if (screen_on_policy_req) { + screen_on_policy_req--; + spin_unlock_irqrestore(&policy_update_lock, irqflags); + return; + } + screen_off_policy_req++; + spin_unlock_irqrestore(&policy_update_lock, irqflags); + + if (cpufreq_policy) + cpufreq_update_policy(cpufreq_policy->cpu); +} + +static void perflock_late_resume(struct early_suspend *handler) +{ + unsigned long irqflags; + +/* + * This workaround is for hero project + * May cause potential bug: + * Accidentally set cpu in high freq in screen off mode. + * senario: in screen off early suspended state, runs the following sequence: + * 1.perflock_late_resume():acpuclk_set_rate(high freq);screen_on_pilicy_req=1; + * 2.perflock_early_suspend():if(screen_on_policy_req) return; + * 3.perflock_notifier_call(): only set policy's min and max + */ +#ifdef CONFIG_MACH_HERO + /* Work around for display driver, + * need to increase cpu speed immediately. + */ + unsigned int lock_speed = get_perflock_speed() / 1000; + if (lock_speed > CONFIG_PERFLOCK_SCREEN_ON_MIN) + acpuclk_set_rate(lock_speed * 1000, 0); + else + acpuclk_set_rate(CONFIG_PERFLOCK_SCREEN_ON_MIN * 1000, 0); +#endif + + spin_lock_irqsave(&policy_update_lock, irqflags); + if (screen_off_policy_req) { + screen_off_policy_req--; + spin_unlock_irqrestore(&policy_update_lock, irqflags); + return; + } + screen_on_policy_req++; + spin_unlock_irqrestore(&policy_update_lock, irqflags); + + if (cpufreq_policy) + cpufreq_update_policy(cpufreq_policy->cpu); +} + +static struct early_suspend perflock_power_suspend = { + .suspend = perflock_early_suspend, + .resume = perflock_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +/* 7k projects need to raise up cpu freq before panel resume for stability */ +#if defined(CONFIG_HTC_ONMODE_CHARGING) && \ + (defined(CONFIG_ARCH_MSM7225) || \ + defined(CONFIG_ARCH_MSM7227) || \ + defined(CONFIG_ARCH_MSM7201A)) +static struct early_suspend perflock_onchg_suspend = { + .suspend = perflock_early_suspend, + .resume = perflock_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; +#endif + +static int __init perflock_screen_policy_init(void) +{ + register_early_suspend(&perflock_power_suspend); +/* 7k projects need to raise up cpu freq before panel resume for stability */ +#if defined(CONFIG_HTC_ONMODE_CHARGING) && \ + (defined(CONFIG_ARCH_MSM7225) || \ + defined(CONFIG_ARCH_MSM7227) || \ + defined(CONFIG_ARCH_MSM7201A)) + register_onchg_suspend(&perflock_onchg_suspend); +#endif + screen_on_policy_req++; + if (cpufreq_policy) + cpufreq_update_policy(cpufreq_policy->cpu); + + return 0; +} + +late_initcall(perflock_screen_policy_init); +#endif + +#if 0 +static unsigned int policy_min = CONFIG_MSM_CPU_FREQ_MIN; +static unsigned int policy_max = CONFIG_MSM_CPU_FREQ_MAX; +#else +static unsigned int policy_min; +static unsigned int policy_max; +#endif +static int param_set_cpu_min_max(const char *val, struct kernel_param *kp) +{ + int ret; + ret = param_set_int(val, kp); + if (cpufreq_policy) + cpufreq_update_policy(cpufreq_policy->cpu); + return ret; +} + +module_param_call(min_cpu_khz, param_set_cpu_min_max, param_get_int, + &policy_min, S_IWUSR | S_IRUGO); +module_param_call(max_cpu_khz, param_set_cpu_min_max, param_get_int, + &policy_max, S_IWUSR | S_IRUGO); + +static int perflock_notifier_call(struct notifier_block *self, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; + unsigned int lock_speed; + unsigned long irqflags; + + spin_lock_irqsave(&policy_update_lock, irqflags); + if (debug_mask & PERF_CPUFREQ_NOTIFY_DEBUG) + pr_info("%s: event=%ld, policy->min=%d, policy->max=%d", + __func__, event, policy->min, policy->max); + + if (event == CPUFREQ_START) + cpufreq_policy = policy; + else if (event == CPUFREQ_NOTIFY) { + /* Each time cpufreq_update_policy, + * min/max will reset, need to set it again. */ +#ifdef CONFIG_PERFLOCK_SCREEN_POLICY + if (screen_on_policy_req) { + if (debug_mask & PERF_SCREEN_ON_POLICY_DEBUG) + pr_info("%s: screen_on_policy_req %d," + "policy_min %d\n", __func__, + screen_on_policy_req, + CONFIG_PERFLOCK_SCREEN_ON_MIN); + policy_min = CONFIG_PERFLOCK_SCREEN_ON_MIN; + policy_max = CONFIG_PERFLOCK_SCREEN_ON_MAX; + screen_on_policy_req--; + } else if (screen_off_policy_req) { + if (debug_mask & PERF_SCREEN_ON_POLICY_DEBUG) + pr_info("%s: screen_off_policy_req %d," + "policy_min %d\n", __func__, + screen_off_policy_req, + CONFIG_MSM_CPU_FREQ_MIN); + policy_min = CONFIG_PERFLOCK_SCREEN_OFF_MIN; + policy_max = CONFIG_PERFLOCK_SCREEN_OFF_MAX; + screen_off_policy_req--; + } +#endif + lock_speed = get_perflock_speed() / 1000; + if (lock_speed) { + policy->min = lock_speed; + policy->max = lock_speed; + if (debug_mask & PERF_CPUFREQ_LOCK_DEBUG) { + pr_info("%s: cpufreq lock speed %d\n", + __func__, lock_speed); + print_active_locks(); + } + } else { + policy->min = policy_min; + policy->max = policy_max; + if (debug_mask & PERF_CPUFREQ_LOCK_DEBUG) + pr_info("%s: cpufreq recover policy %d %d\n", + __func__, policy->min, policy->max); + } + curr_lock_speed = lock_speed; + } + spin_unlock_irqrestore(&policy_update_lock, irqflags); + + return 0; +} + +static struct notifier_block perflock_notifier = { + .notifier_call = perflock_notifier_call, +}; + +static unsigned int get_perflock_speed(void) +{ + unsigned long irqflags; + struct perf_lock *lock; + unsigned int perf_level = 0; + + /* Get the maxmimum perf level. */ + if (list_empty(&active_perf_locks)) + return 0; + + spin_lock_irqsave(&list_lock, irqflags); + list_for_each_entry(lock, &active_perf_locks, link) { + if (lock->level > perf_level) + perf_level = lock->level; + } + spin_unlock_irqrestore(&list_lock, irqflags); + + return perf_acpu_table[perf_level]; +} + +static void print_active_locks(void) +{ + unsigned long irqflags; + struct perf_lock *lock; + + spin_lock_irqsave(&list_lock, irqflags); + list_for_each_entry(lock, &active_perf_locks, link) { + pr_info("active perf lock '%s'\n", lock->name); + } + spin_unlock_irqrestore(&list_lock, irqflags); +} + +/** + * perf_lock_init - acquire a perf lock + * @lock: perf lock to acquire + * @level: performance level of @lock + * @name: the name of @lock + * + * Acquire @lock with @name and @level. (It doesn't activate the lock.) + */ +void perf_lock_init(struct perf_lock *lock, + unsigned int level, const char *name) +{ + unsigned long irqflags = 0; + + WARN_ON(!name); + WARN_ON(level >= PERF_LOCK_INVALID); + WARN_ON(lock->flags & PERF_LOCK_INITIALIZED); + + if ((!name) || (level >= PERF_LOCK_INVALID) || + (lock->flags & PERF_LOCK_INITIALIZED)) { + pr_err("%s: ERROR \"%s\" flags %x level %d\n", + __func__, name, lock->flags, level); + return; + } + lock->name = name; + lock->flags = PERF_LOCK_INITIALIZED; + lock->level = level; + + INIT_LIST_HEAD(&lock->link); + spin_lock_irqsave(&list_lock, irqflags); + list_add(&lock->link, &inactive_perf_locks); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(perf_lock_init); + +/** + * perf_lock - activate a perf lock + * @lock: perf lock to activate + * + * Activate @lock.(Need to init_perf_lock before activate) + */ +void perf_lock(struct perf_lock *lock) +{ + unsigned long irqflags; + + WARN_ON(!initialized); + WARN_ON((lock->flags & PERF_LOCK_INITIALIZED) == 0); + WARN_ON(lock->flags & PERF_LOCK_ACTIVE); + + spin_lock_irqsave(&list_lock, irqflags); + if (debug_mask & PERF_LOCK_DEBUG) + pr_info("%s: '%s', flags %d level %d\n", + __func__, lock->name, lock->flags, lock->level); + if (lock->flags & PERF_LOCK_ACTIVE) { + pr_err("%s: over-locked\n", __func__); + return; + } + lock->flags |= PERF_LOCK_ACTIVE; + list_del(&lock->link); + list_add(&lock->link, &active_perf_locks); + spin_unlock_irqrestore(&list_lock, irqflags); + + /* Update cpufreq policy - scaling_min/scaling_max */ + if (cpufreq_policy && + (curr_lock_speed != (get_perflock_speed() / 1000))) + cpufreq_update_policy(cpufreq_policy->cpu); +} +EXPORT_SYMBOL(perf_lock); + +#define PERF_UNLOCK_DELAY (HZ) +static void do_expire_perf_locks(struct work_struct *work) +{ + if (debug_mask & PERF_EXPIRE_DEBUG) + pr_info("%s: timed out to unlock\n", __func__); + + if (cpufreq_policy && + (curr_lock_speed != (get_perflock_speed() / 1000))) { + if (debug_mask & PERF_EXPIRE_DEBUG) + pr_info("%s: update cpufreq policy\n", __func__); + cpufreq_update_policy(cpufreq_policy->cpu); + } +} +static DECLARE_DELAYED_WORK(work_expire_perf_locks, do_expire_perf_locks); + +/** + * perf_unlock - de-activate a perf lock + * @lock: perf lock to de-activate + * + * de-activate @lock. + */ +void perf_unlock(struct perf_lock *lock) +{ + unsigned long irqflags; + + WARN_ON(!initialized); + WARN_ON((lock->flags & PERF_LOCK_ACTIVE) == 0); + + spin_lock_irqsave(&list_lock, irqflags); + if (debug_mask & PERF_LOCK_DEBUG) + pr_info("%s: '%s', flags %d level %d\n", + __func__, lock->name, lock->flags, lock->level); + if (!(lock->flags & PERF_LOCK_ACTIVE)) { + pr_err("%s: under-locked\n", __func__); + return; + } + lock->flags &= ~PERF_LOCK_ACTIVE; + list_del(&lock->link); + list_add(&lock->link, &inactive_perf_locks); + spin_unlock_irqrestore(&list_lock, irqflags); + + /* Prevent lock/unlock quickly, add a timeout to release perf_lock */ + if (cpufreq_policy && + (curr_lock_speed != (get_perflock_speed() / 1000))) + schedule_delayed_work(&work_expire_perf_locks, + PERF_UNLOCK_DELAY); +} +EXPORT_SYMBOL(perf_unlock); + +/** + * is_perf_lock_active - query if a perf_lock is active or not + * @lock: target perf lock + * RETURN: 0: inactive; 1: active + * + * query if @lock is active or not + */ +inline int is_perf_lock_active(struct perf_lock *lock) +{ + return (lock->flags & PERF_LOCK_ACTIVE); +} +EXPORT_SYMBOL(is_perf_lock_active); + +/** + * is_perf_locked - query if there is any perf lock activates + * RETURN: 0: no perf lock activates 1: at least a perf lock activates + */ +int is_perf_locked(void) +{ + return (!list_empty(&active_perf_locks)); +} +EXPORT_SYMBOL(is_perf_locked); + + +#ifdef CONFIG_PERFLOCK_BOOT_LOCK +/* Stop cpufreq and lock cpu, shorten boot time. */ +#define BOOT_LOCK_TIMEOUT (60 * HZ) +static struct perf_lock boot_perf_lock; + +static void do_expire_boot_lock(struct work_struct *work) +{ + perf_unlock(&boot_perf_lock); + pr_info("Release 'boot-time' perf_lock\n"); +} +static DECLARE_DELAYED_WORK(work_expire_boot_lock, do_expire_boot_lock); +#endif + +static void perf_acpu_table_fixup(void) +{ + int i; + for (i = 0; i < table_size; ++i) { + if (perf_acpu_table[i] > policy_max * 1000) + perf_acpu_table[i] = policy_max * 1000; + else if (perf_acpu_table[i] < policy_min * 1000) + perf_acpu_table[i] = policy_min * 1000; + } +} + +void __init perflock_init(struct perflock_platform_data *pdata) +{ + struct cpufreq_policy policy; + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(smp_processor_id()); + + BUG_ON(cpufreq_frequency_table_cpuinfo(&policy, table)); + policy_min = policy.cpuinfo.min_freq; + policy_max = policy.cpuinfo.max_freq; + + if (!pdata) + goto invalid_config; + + perf_acpu_table = pdata->perf_acpu_table; + table_size = pdata->table_size; + if (!perf_acpu_table || !table_size) + goto invalid_config; + if (table_size < PERF_LOCK_INVALID) + goto invalid_config; + + perf_acpu_table_fixup(); + cpufreq_register_notifier(&perflock_notifier, CPUFREQ_POLICY_NOTIFIER); + + initialized = 1; + +#ifdef CONFIG_PERFLOCK_BOOT_LOCK + /* Stop cpufreq and lock cpu, shorten boot time. */ + perf_lock_init(&boot_perf_lock, PERF_LOCK_HIGHEST, "boot-time"); + perf_lock(&boot_perf_lock); + schedule_delayed_work(&work_expire_boot_lock, BOOT_LOCK_TIMEOUT); + pr_info("Acquire 'boot-time' perf_lock\n"); +#endif + + return; + +invalid_config: + pr_err("%s: invalid configuration data, %p %d %d\n", __func__, + perf_acpu_table, table_size, PERF_LOCK_INVALID); +} diff --git a/arch/arm/mach-msm/pm.c b/arch/arm/mach-msm/pm.c new file mode 100644 index 0000000000000..f182233a09bda --- /dev/null +++ b/arch/arm/mach-msm/pm.c @@ -0,0 +1,887 @@ +/* arch/arm/mach-msm/pm.c + * + * MSM Power Management Routines + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_VFP +#include +#endif + +#include "smd_private.h" +#include "acpuclock.h" +#include "proc_comm.h" +#include "clock.h" +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif + +#define RESET_VECTOR UL(0x00000000) + +enum { + MSM_PM_DEBUG_SUSPEND = 1U << 0, + MSM_PM_DEBUG_POWER_COLLAPSE = 1U << 1, + MSM_PM_DEBUG_STATE = 1U << 2, + MSM_PM_DEBUG_CLOCK = 1U << 3, + MSM_PM_DEBUG_RESET_VECTOR = 1U << 4, + MSM_PM_DEBUG_SMSM_STATE = 1U << 5, + MSM_PM_DEBUG_IDLE = 1U << 6, + MSM_PM_DEBUG_CLOCK_VOTE = 1U << 7 +}; +static int msm_pm_debug_mask = MSM_PM_DEBUG_CLOCK_VOTE; +module_param_named(debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +enum { + MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND, + MSM_PM_SLEEP_MODE_POWER_COLLAPSE, + MSM_PM_SLEEP_MODE_APPS_SLEEP, + MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT, + MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT, +}; +static int msm_pm_sleep_mode = CONFIG_MSM7X00A_SLEEP_MODE; +module_param_named(sleep_mode, msm_pm_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP); +static int msm_pm_idle_sleep_mode = CONFIG_MSM7X00A_IDLE_SLEEP_MODE; +module_param_named(idle_sleep_mode, msm_pm_idle_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP); +static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME; +module_param_named(idle_sleep_min_time, msm_pm_idle_sleep_min_time, int, S_IRUGO | S_IWUSR | S_IWGRP); +static int msm_pm_idle_spin_time = CONFIG_MSM7X00A_IDLE_SPIN_TIME; +module_param_named(idle_spin_time, msm_pm_idle_spin_time, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#if defined(CONFIG_ARCH_MSM7X30) +#define A11S_CLK_SLEEP_EN (MSM_GCC_BASE + 0x020) +#define A11S_PWRDOWN (MSM_ACC_BASE + 0x01c) +#define A11S_SECOP (MSM_TCSR_BASE + 0x038) +#else +#define A11S_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c) +#define A11S_PWRDOWN (MSM_CSR_BASE + 0x440) +#define A11S_STANDBY_CTL (MSM_CSR_BASE + 0x108) +#define A11RAMBACKBIAS (MSM_CSR_BASE + 0x508) +#endif + + +#define DEM_MASTER_BITS_PER_CPU 6 + +/* Power Master State Bits - Per CPU */ +#define DEM_MASTER_SMSM_RUN \ + (0x01UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) +#define DEM_MASTER_SMSM_RSA \ + (0x02UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) +#define DEM_MASTER_SMSM_PWRC_EARLY_EXIT \ + (0x04UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) +#define DEM_MASTER_SMSM_SLEEP_EXIT \ + (0x08UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) +#define DEM_MASTER_SMSM_READY \ + (0x10UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) +#define DEM_MASTER_SMSM_SLEEP \ + (0x20UL << (DEM_MASTER_BITS_PER_CPU * SMSM_STATE_APPS)) + +/* Power Slave State Bits */ +#define DEM_SLAVE_SMSM_RUN (0x0001) +#define DEM_SLAVE_SMSM_PWRC (0x0002) +#define DEM_SLAVE_SMSM_PWRC_DELAY (0x0004) +#define DEM_SLAVE_SMSM_PWRC_EARLY_EXIT (0x0008) +#define DEM_SLAVE_SMSM_WFPI (0x0010) +#define DEM_SLAVE_SMSM_SLEEP (0x0020) +#define DEM_SLAVE_SMSM_SLEEP_EXIT (0x0040) +#define DEM_SLAVE_SMSM_MSGS_REDUCED (0x0080) +#define DEM_SLAVE_SMSM_RESET (0x0100) +#define DEM_SLAVE_SMSM_PWRC_SUSPEND (0x0200) + +#ifndef CONFIG_ARCH_MSM_SCORPION +#define PM_SMSM_WRITE_STATE SMSM_STATE_APPS +#define PM_SMSM_READ_STATE SMSM_STATE_MODEM + +#define PM_SMSM_WRITE_RUN SMSM_RUN +#define PM_SMSM_READ_RUN SMSM_RUN +#else +#define PM_SMSM_WRITE_STATE SMSM_STATE_APPS_DEM +#define PM_SMSM_READ_STATE SMSM_STATE_POWER_MASTER_DEM + +#define PM_SMSM_WRITE_RUN DEM_SLAVE_SMSM_RUN +#define PM_SMSM_READ_RUN DEM_MASTER_SMSM_RUN +#endif + +int msm_pm_collapse(void); +int msm_arch_idle(void); +void msm_pm_collapse_exit(void); + +int64_t msm_timer_enter_idle(void); +void msm_timer_exit_idle(int low_power); +int msm_irq_idle_sleep_allowed(void); +int msm_irq_pending(void); +int clks_print_running(void); +extern int board_mfg_mode(void); + +#ifdef CONFIG_AXI_SCREEN_POLICY +static int axi_rate; +static int sleep_axi_rate; +static struct clk *axi_clk; +#endif +static uint32_t *msm_pm_reset_vector; + +static uint32_t msm_pm_max_sleep_time; + +#ifdef CONFIG_MSM_IDLE_STATS +enum msm_pm_time_stats_id { + MSM_PM_STAT_REQUESTED_IDLE, + MSM_PM_STAT_IDLE_SPIN, + MSM_PM_STAT_IDLE_WFI, + MSM_PM_STAT_IDLE_SLEEP, + MSM_PM_STAT_IDLE_FAILED_SLEEP, + MSM_PM_STAT_NOT_IDLE, + MSM_PM_STAT_COUNT +}; + +static struct msm_pm_time_stats { + const char *name; + int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT]; + int count; + int64_t total_time; +} msm_pm_stats[MSM_PM_STAT_COUNT] = { + [MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request", + [MSM_PM_STAT_IDLE_SPIN].name = "idle-spin", + [MSM_PM_STAT_IDLE_WFI].name = "idle-wfi", + [MSM_PM_STAT_IDLE_SLEEP].name = "idle-sleep", + [MSM_PM_STAT_IDLE_FAILED_SLEEP].name = "idle-failed-sleep", + [MSM_PM_STAT_NOT_IDLE].name = "not-idle", +}; + +static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) +{ + int i; + int64_t bt; + msm_pm_stats[id].total_time += t; + msm_pm_stats[id].count++; + bt = t; + do_div(bt, CONFIG_MSM_IDLE_STATS_FIRST_BUCKET); + if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT * + (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1))) + i = DIV_ROUND_UP(fls((uint32_t)bt), + CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT); + else + i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; + msm_pm_stats[id].bucket[i]++; + if (t < msm_pm_stats[id].min_time[i] || !msm_pm_stats[id].max_time[i]) + msm_pm_stats[id].min_time[i] = t; + if (t > msm_pm_stats[id].max_time[i]) + msm_pm_stats[id].max_time[i] = t; +} +#endif + +static int +msm_pm_wait_state(uint32_t wait_all_set, uint32_t wait_all_clear, + uint32_t wait_any_set, uint32_t wait_any_clear) +{ + int i; + uint32_t state; + + for (i = 0; i < 100000; i++) { + state = smsm_get_state(PM_SMSM_READ_STATE); + if (((wait_all_set || wait_all_clear) && + !(~state & wait_all_set) && !(state & wait_all_clear)) || + (state & wait_any_set) || (~state & wait_any_clear)) + return 0; + udelay(1); + } + pr_err("msm_pm_wait_state(%x, %x, %x, %x) failed %x\n", wait_all_set, + wait_all_clear, wait_any_set, wait_any_clear, state); + return -ETIMEDOUT; +} + +/* + * For speeding up boot time: + * During booting up, disable entering arch_idle() by disable_hlt() + * Enable it after booting up BOOT_LOCK_TIMEOUT sec. + */ +#define BOOT_LOCK_TIMEOUT_NORMAL (60 * HZ) +#define BOOT_LOCK_TIMEOUT_SHORT (10 * HZ) +static void do_expire_boot_lock(struct work_struct *work) +{ + enable_hlt(); + pr_info("Release 'boot-time' no_halt_lock\n"); +} +static DECLARE_DELAYED_WORK(work_expire_boot_lock, do_expire_boot_lock); + +static void +msm_pm_enter_prep_hw(void) +{ +#if defined(CONFIG_ARCH_MSM7X30) + writel(1, A11S_PWRDOWN); + writel(4, A11S_SECOP); +#else +#if defined(CONFIG_ARCH_QSD8X50) + writel(0x1b, A11S_CLK_SLEEP_EN); +#else + writel(0x1f, A11S_CLK_SLEEP_EN); +#endif + writel(1, A11S_PWRDOWN); + writel(0, A11S_STANDBY_CTL); + +#if defined(CONFIG_ARCH_MSM_ARM11) + writel(0, A11RAMBACKBIAS); +#endif +#endif +} + +static void +msm_pm_exit_restore_hw(void) +{ +#if defined(CONFIG_ARCH_MSM7X30) + writel(0, A11S_SECOP); + writel(0, A11S_PWRDOWN); +#else + writel(0x00, A11S_CLK_SLEEP_EN); + writel(0, A11S_PWRDOWN); +#endif +} + +#ifdef CONFIG_MSM_FIQ_SUPPORT +void msm_fiq_exit_sleep(void); +#else +static inline void msm_fiq_exit_sleep(void) { } +#endif + +#ifdef CONFIG_HTC_POWER_COLLAPSE_MAGIC +/* Set magic number in SMEM for power collapse state */ +#define HTC_POWER_COLLAPSE_ADD (MSM_SHARED_RAM_BASE + 0x000F8000 + 0x000007F8) +#define HTC_POWER_COLLAPSE_MAGIC_NUM (HTC_POWER_COLLAPSE_ADD - 0x04) +unsigned int magic_num; +#endif + +static int msm_sleep(int sleep_mode, uint32_t sleep_delay, int from_idle) +{ + uint32_t saved_vector[2]; + int collapsed; + void msm_irq_enter_sleep1(bool arm9_wake, int from_idle); + int msm_irq_enter_sleep2(bool arm9_wake, int from_idle); + void msm_irq_exit_sleep1(void); + void msm_irq_exit_sleep2(void); + void msm_irq_exit_sleep3(void); + void msm_gpio_enter_sleep(int from_idle); + void msm_gpio_exit_sleep(void); + void smd_sleep_exit(void); + uint32_t enter_state; + uint32_t enter_wait_set = 0; + uint32_t enter_wait_clear = 0; + uint32_t exit_state; + uint32_t exit_wait_clear = 0; + uint32_t exit_wait_any_set = 0; + unsigned long pm_saved_acpu_clk_rate = 0; + int ret; + int rv = -EINTR; + bool invalid_inital_state = false; + + if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND) + printk(KERN_INFO "msm_sleep(): mode %d delay %u idle %d\n", + sleep_mode, sleep_delay, from_idle); + +#ifndef CONFIG_ARCH_MSM_SCORPION + switch (sleep_mode) { + case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: + enter_state = SMSM_PWRC; + enter_wait_set = SMSM_RSA; + exit_state = SMSM_WFPI; + exit_wait_clear = SMSM_RSA; + break; + case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND: + enter_state = SMSM_PWRC_SUSPEND; + enter_wait_set = SMSM_RSA; + exit_state = SMSM_WFPI; + exit_wait_clear = SMSM_RSA; + break; + case MSM_PM_SLEEP_MODE_APPS_SLEEP: + enter_state = SMSM_SLEEP; + exit_state = SMSM_SLEEPEXIT; + exit_wait_any_set = SMSM_SLEEPEXIT; + break; + default: + enter_state = 0; + exit_state = 0; + } +#else + switch (sleep_mode) { + case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: + enter_state = DEM_SLAVE_SMSM_PWRC; + enter_wait_set = DEM_MASTER_SMSM_RSA; + exit_state = DEM_SLAVE_SMSM_WFPI; + exit_wait_any_set = + DEM_MASTER_SMSM_RUN | DEM_MASTER_SMSM_PWRC_EARLY_EXIT; + break; + case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND: + enter_state = DEM_SLAVE_SMSM_PWRC_SUSPEND; + enter_wait_set = DEM_MASTER_SMSM_RSA; + exit_state = DEM_SLAVE_SMSM_WFPI; + exit_wait_any_set = + DEM_MASTER_SMSM_RUN | DEM_MASTER_SMSM_PWRC_EARLY_EXIT; + break; + case MSM_PM_SLEEP_MODE_APPS_SLEEP: + enter_state = DEM_SLAVE_SMSM_SLEEP; + enter_wait_set = DEM_MASTER_SMSM_SLEEP; + exit_state = DEM_SLAVE_SMSM_SLEEP_EXIT; + exit_wait_any_set = DEM_MASTER_SMSM_SLEEP_EXIT; + break; + default: + enter_state = 0; + exit_state = 0; + } +#endif + + clk_enter_sleep(from_idle); + msm_irq_enter_sleep1(!!enter_state, from_idle); + msm_gpio_enter_sleep(from_idle); + + if (enter_state) { + /* Make sure last sleep request did not end with a timeout */ + ret = msm_pm_wait_state(PM_SMSM_READ_RUN, 0, 0, 0); + if (ret) { + printk(KERN_ERR "msm_sleep(): invalid inital state\n"); + invalid_inital_state = true; + } + + if (sleep_delay == 0 && sleep_mode >= MSM_PM_SLEEP_MODE_APPS_SLEEP) + sleep_delay = 192000*5; /* APPS_SLEEP does not allow infinite timeout */ + ret = smsm_set_sleep_duration(sleep_delay); + if (ret) { + printk(KERN_ERR "msm_sleep(): smsm_set_sleep_duration %x failed\n", enter_state); + enter_state = 0; + exit_state = 0; + } + if ((!from_idle && (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK_VOTE)) || + (from_idle && (msm_pm_debug_mask & MSM_PM_DEBUG_IDLE))) + clks_print_running(); + + ret = smsm_change_state(PM_SMSM_WRITE_STATE, PM_SMSM_WRITE_RUN, enter_state); + if (ret) { + printk(KERN_ERR "msm_sleep(): smsm_change_state %x failed\n", enter_state); + enter_state = 0; + exit_state = 0; + } + ret = msm_pm_wait_state(enter_wait_set, enter_wait_clear, 0, 0); + if (ret || invalid_inital_state) { + printk(KERN_INFO "msm_sleep(): msm_pm_wait_state failed, %x\n", smsm_get_state(PM_SMSM_READ_STATE)); + goto enter_failed; + } + } + if (msm_irq_enter_sleep2(!!enter_state, from_idle)) + goto enter_failed; + + if (enter_state) { + msm_pm_enter_prep_hw(); + + if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE) + printk(KERN_INFO "msm_sleep(): enter " + "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, " + "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN), + readl(A11S_PWRDOWN), smsm_get_state(PM_SMSM_READ_STATE)); + } + + if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) { + pm_saved_acpu_clk_rate = acpuclk_power_collapse(from_idle); + if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK) + printk(KERN_INFO "msm_sleep(): %ld enter power collapse" + "\n", pm_saved_acpu_clk_rate); + if (pm_saved_acpu_clk_rate == 0) + goto ramp_down_failed; + +#ifdef CONFIG_AXI_SCREEN_POLICY + /* Drop AXI request when the screen is on */ + if (axi_rate) + clk_set_rate(axi_clk, sleep_axi_rate); +#endif + } +#ifdef CONFIG_HTC_POWER_COLLAPSE_MAGIC + magic_num = 0xAAAA1111; + writel(magic_num, HTC_POWER_COLLAPSE_MAGIC_NUM); +#endif + if (sleep_mode < MSM_PM_SLEEP_MODE_APPS_SLEEP) { + if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) + smsm_print_sleep_info(); + saved_vector[0] = msm_pm_reset_vector[0]; + saved_vector[1] = msm_pm_reset_vector[1]; + msm_pm_reset_vector[0] = 0xE51FF004; /* ldr pc, 4 */ + msm_pm_reset_vector[1] = virt_to_phys(msm_pm_collapse_exit); + if (msm_pm_debug_mask & MSM_PM_DEBUG_RESET_VECTOR) + printk(KERN_INFO "msm_sleep(): vector %x %x -> " + "%x %x\n", saved_vector[0], saved_vector[1], + msm_pm_reset_vector[0], msm_pm_reset_vector[1]); +#ifdef CONFIG_VFP + if (from_idle) + vfp_flush_context(); +#endif + + if (!from_idle) printk(KERN_INFO "[R] suspend end\n"); + /* reset idle sleep mode when suspend. */ + if (!from_idle) msm_pm_idle_sleep_mode = CONFIG_MSM7X00A_IDLE_SLEEP_MODE; + collapsed = msm_pm_collapse(); + if (!from_idle) printk(KERN_INFO "[R] resume start\n"); + msm_pm_reset_vector[0] = saved_vector[0]; + msm_pm_reset_vector[1] = saved_vector[1]; + if (collapsed) { +#ifdef CONFIG_VFP + if (from_idle) + vfp_reinit(); +#endif + cpu_init(); + __asm__("cpsie a"); + msm_fiq_exit_sleep(); + local_fiq_enable(); + rv = 0; + } + if (msm_pm_debug_mask & MSM_PM_DEBUG_POWER_COLLAPSE) + printk(KERN_INFO "msm_pm_collapse(): returned %d\n", + collapsed); + if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) + smsm_print_sleep_info(); + } else { + msm_arch_idle(); + rv = 0; + } +#ifdef CONFIG_HTC_POWER_COLLAPSE_MAGIC + magic_num = 0xBBBB9999; + writel(magic_num, HTC_POWER_COLLAPSE_MAGIC_NUM); +#endif + if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) { + if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK) + printk(KERN_INFO "msm_sleep(): exit power collapse %ld" + "\n", pm_saved_acpu_clk_rate); +#if defined(CONFIG_ARCH_QSD8X50) + if (acpuclk_set_rate(pm_saved_acpu_clk_rate, 1) < 0) +#else + if (acpuclk_set_rate(pm_saved_acpu_clk_rate, + from_idle ? SETRATE_PC_IDLE : SETRATE_PC) < 0) +#endif + printk(KERN_ERR "msm_sleep(): clk_set_rate %ld " + "failed\n", pm_saved_acpu_clk_rate); + +#ifdef CONFIG_AXI_SCREEN_POLICY + /* Restore axi rate if needed */ + if (axi_rate) + clk_set_rate(axi_clk, axi_rate); +#endif + } + if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE) + printk(KERN_INFO "msm_sleep(): exit A11S_CLK_SLEEP_EN %x, " + "A11S_PWRDOWN %x, smsm_get_state %x\n", + readl(A11S_CLK_SLEEP_EN), readl(A11S_PWRDOWN), + smsm_get_state(PM_SMSM_READ_STATE)); +ramp_down_failed: + msm_irq_exit_sleep1(); +enter_failed: + if (enter_state) { + msm_pm_exit_restore_hw(); + + smsm_change_state(PM_SMSM_WRITE_STATE, enter_state, exit_state); + msm_pm_wait_state(0, exit_wait_clear, exit_wait_any_set, 0); + if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE) + printk(KERN_INFO "msm_sleep(): sleep exit " + "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, " + "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN), + readl(A11S_PWRDOWN), smsm_get_state(PM_SMSM_READ_STATE)); + if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) + smsm_print_sleep_info(); + } + msm_irq_exit_sleep2(); + if (enter_state) { + smsm_change_state(PM_SMSM_WRITE_STATE, exit_state, PM_SMSM_WRITE_RUN); + msm_pm_wait_state(PM_SMSM_READ_RUN, 0, 0, 0); + if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE) + printk(KERN_INFO "msm_sleep(): sleep exit " + "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, " + "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN), + readl(A11S_PWRDOWN), smsm_get_state(PM_SMSM_READ_STATE)); + } + msm_irq_exit_sleep3(); + msm_gpio_exit_sleep(); + smd_sleep_exit(); + clk_exit_sleep(); + return rv; +} + +static int msm_pm_idle_spin(void) +{ + int spin; + spin = msm_pm_idle_spin_time >> 10; + while (spin-- > 0) { + if (msm_irq_pending()) { + return -1; + } + udelay(1); + } + return 0; +} + +void arch_idle(void) +{ + int ret; + int64_t sleep_time; + int low_power = 0; +#ifdef CONFIG_MSM_IDLE_STATS + int64_t t1; + static int64_t t2; + int exit_stat; +#endif + int allow_sleep = + msm_pm_idle_sleep_mode < MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT && +#ifdef CONFIG_HAS_WAKELOCK + !has_wake_lock(WAKE_LOCK_IDLE) && +#endif + msm_irq_idle_sleep_allowed(); + if (msm_pm_reset_vector == NULL) + return; + + sleep_time = msm_timer_enter_idle(); +#ifdef CONFIG_MSM_IDLE_STATS + t1 = ktime_to_ns(ktime_get()); + msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - t2); + msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, sleep_time); +#endif + if (msm_pm_debug_mask & MSM_PM_DEBUG_IDLE) + printk(KERN_INFO "arch_idle: sleep time %llu, allow_sleep %d\n", + sleep_time, allow_sleep); + if (sleep_time < msm_pm_idle_sleep_min_time || !allow_sleep) { + unsigned long saved_rate; + if (acpuclk_get_wfi_rate() && msm_pm_idle_spin() < 0) { +#ifdef CONFIG_MSM_IDLE_STATS + exit_stat = MSM_PM_STAT_IDLE_SPIN; +#endif + goto abort_idle; + } + saved_rate = acpuclk_wait_for_irq(); + + + if (saved_rate && msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK) + printk(KERN_DEBUG "arch_idle: clk %ld -> swfi\n", + saved_rate); + + /* + * If there is a wfi speed specified and we failed to ramp, do not + * go into wfi. + */ + if (acpuclk_get_wfi_rate() && !saved_rate) + while (!msm_irq_pending()) + udelay(1); + else + msm_arch_idle(); + + if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK) + printk(KERN_DEBUG "msm_sleep: clk swfi -> %ld\n", + saved_rate); +#if defined(CONFIG_ARCH_QSD8X50) + if (saved_rate && acpuclk_set_rate(saved_rate, 1) < 0) +#else + if (saved_rate + && acpuclk_set_rate(saved_rate, SETRATE_SWFI) < 0) +#endif + printk(KERN_ERR "msm_sleep(): clk_set_rate %ld " + "failed\n", saved_rate); +#ifdef CONFIG_MSM_IDLE_STATS + exit_stat = MSM_PM_STAT_IDLE_WFI; +#endif + } else { + if (msm_pm_idle_spin() < 0) { +#ifdef CONFIG_MSM_IDLE_STATS + exit_stat = MSM_PM_STAT_IDLE_SPIN; +#endif + goto abort_idle; + } + + low_power = 1; + do_div(sleep_time, NSEC_PER_SEC / 32768); + if (sleep_time > 0x6DDD000) { + printk("sleep_time too big %lld\n", sleep_time); + sleep_time = 0x6DDD000; + } + ret = msm_sleep(msm_pm_idle_sleep_mode, sleep_time, 1); +#ifdef CONFIG_MSM_IDLE_STATS + if (ret) + exit_stat = MSM_PM_STAT_IDLE_FAILED_SLEEP; + else + exit_stat = MSM_PM_STAT_IDLE_SLEEP; +#endif + } +abort_idle: + msm_timer_exit_idle(low_power); +#ifdef CONFIG_MSM_IDLE_STATS + t2 = ktime_to_ns(ktime_get()); + msm_pm_add_stat(exit_stat, t2 - t1); +#endif +} + +static int msm_pm_enter(suspend_state_t state) +{ + msm_sleep(msm_pm_sleep_mode, msm_pm_max_sleep_time, 0); + return 0; +} + +static struct platform_suspend_ops msm_pm_ops = { + .enter = msm_pm_enter, + .valid = suspend_valid_only_mem, +}; + +#if defined(CONFIG_ARCH_MSM7X00A) +static uint32_t restart_reason = 0x776655AA; +#else +static uint32_t restart_reason = 0; +#endif + +static void msm_pm_power_off(void) +{ + msm_proc_comm(PCOM_POWER_DOWN, 0, 0); + for (;;) ; +} + +static bool console_flushed; + +void msm_pm_flush_console(void) +{ + if (console_flushed) + return; + console_flushed = true; + + printk("\n"); + printk(KERN_EMERG "Restarting %s\n", linux_banner); + if (console_trylock()) { + console_unlock(); + return; + } + + mdelay(50); + + local_irq_disable(); + if (!console_trylock()) + printk(KERN_EMERG "msm_restart: Console was locked! Busting\n"); + else + printk(KERN_EMERG "msm_restart: Console was locked!\n"); + console_unlock(); +} + +static void msm_pm_restart(char str) +{ + msm_pm_flush_console(); + + /* If there's a hard reset hook and the restart_reason + * is the default, prefer that to the (slower) proc_comm + * reset command. + */ + if ((restart_reason == 0x776655AA) && msm_hw_reset_hook) { + msm_hw_reset_hook(); + } else { + msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0); + } + for (;;) ; +} + +static int msm_reboot_call(struct notifier_block *this, unsigned long code, void *_cmd) +{ + if((code == SYS_RESTART) && _cmd) { + char *cmd = _cmd; + if (!strcmp(cmd, "bootloader")) { + restart_reason = 0x77665500; + } else if (!strcmp(cmd, "recovery")) { + restart_reason = 0x77665502; + } else if (!strcmp(cmd, "eraseflash")) { + restart_reason = 0x776655EF; + } else if (!strncmp(cmd, "oem-", 4)) { + unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff; + restart_reason = 0x6f656d00 | code; + } else if (!strcmp(cmd, "force-hard")) { + restart_reason = 0x776655AA; + } else { + restart_reason = 0x77665501; + } + } + return NOTIFY_DONE; +} + +static struct notifier_block msm_reboot_notifier = +{ + .notifier_call = msm_reboot_call, +}; + +#ifdef CONFIG_MSM_IDLE_STATS +static int msm_pm_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + int i, j; + char *p = page; + + for (i = 0; i < ARRAY_SIZE(msm_pm_stats); i++) { + int64_t bucket_time; + int64_t s; + uint32_t ns; + s = msm_pm_stats[i].total_time; + ns = do_div(s, NSEC_PER_SEC); + p += sprintf(p, + "%s:\n" + " count: %7d\n" + " total_time: %lld.%09u\n", + msm_pm_stats[i].name, + msm_pm_stats[i].count, + s, ns); + bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET; + for (j = 0; j < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; j++) { + s = bucket_time; + ns = do_div(s, NSEC_PER_SEC); + p += sprintf(p, " <%2lld.%09u: %7d (%lld-%lld)\n", + s, ns, msm_pm_stats[i].bucket[j], + msm_pm_stats[i].min_time[j], + msm_pm_stats[i].max_time[j]); + bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT; + } + p += sprintf(p, " >=%2lld.%09u: %7d (%lld-%lld)\n", + s, ns, msm_pm_stats[i].bucket[j], + msm_pm_stats[i].min_time[j], + msm_pm_stats[i].max_time[j]); + } + *start = page + off; + + len = p - page; + if (len > off) + len -= off; + else + len = 0; + + return len < count ? len : count; +} +#endif + +void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns) +{ + int64_t max_sleep_time_bs = max_sleep_time_ns; + + /* Convert from ns -> BS units */ + do_div(max_sleep_time_bs, NSEC_PER_SEC / 32768); + + if (max_sleep_time_bs > 0x6DDD000) + msm_pm_max_sleep_time = (uint32_t) 0x6DDD000; + else + msm_pm_max_sleep_time = (uint32_t) max_sleep_time_bs; + + if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND) + printk("%s: Requested %lldns (%lldbs), Giving %ubs\n", + __func__, max_sleep_time_ns, + max_sleep_time_bs, + msm_pm_max_sleep_time); +} +EXPORT_SYMBOL(msm_pm_set_max_sleep_time); + +#if defined(CONFIG_EARLYSUSPEND) && defined(CONFIG_ARCH_MSM_SCORPION) +#ifdef CONFIG_AXI_SCREEN_POLICY +/* axi 128 screen on, 61mhz screen off */ +static void axi_early_suspend(struct early_suspend *handler) { + axi_rate = 0; + clk_set_rate(axi_clk, axi_rate); +} + +static void axi_late_resume(struct early_suspend *handler) { + axi_rate = 128000000; + sleep_axi_rate = 120000000; + clk_set_rate(axi_clk, axi_rate); +} + +static struct early_suspend axi_screen_suspend = { + .suspend = axi_early_suspend, + .resume = axi_late_resume, +}; +#endif +#endif + +#ifdef CONFIG_AXI_SCREEN_POLICY +static void __init msm_pm_axi_init(void) +{ +#if defined(CONFIG_EARLYSUSPEND) && defined(CONFIG_ARCH_MSM_SCORPION) + axi_clk = clk_get(NULL, "ebi1_clk"); + if (IS_ERR(axi_clk)) { + int result = PTR_ERR(axi_clk); + pr_err("clk_get(ebi1_clk) returned %d\n", result); + return; + } + axi_rate = 128000000; + sleep_axi_rate = 120000000; + clk_set_rate(axi_clk, axi_rate); + register_early_suspend(&axi_screen_suspend); +#else + axi_rate = 0; +#endif +} +#endif + +static void __init boot_lock_nohalt(void) +{ + int nohalt_timeout; + + /* normal/factory2/recovery */ + switch (board_mfg_mode()) { + case 0: /* normal */ + case 1: /* factory2 */ + case 2: /* recovery */ + nohalt_timeout = BOOT_LOCK_TIMEOUT_NORMAL; + break; + case 3: /* charge */ + case 4: /* power_test */ + case 5: /* offmode_charge */ + default: + nohalt_timeout = BOOT_LOCK_TIMEOUT_SHORT; + break; + } + disable_hlt(); + schedule_delayed_work(&work_expire_boot_lock, nohalt_timeout); + pr_info("Acquire 'boot-time' no_halt_lock %ds\n", nohalt_timeout / HZ); +} + +static int __init msm_pm_init(void) +{ + pm_power_off = msm_pm_power_off; + arm_pm_restart = msm_pm_restart; + msm_pm_max_sleep_time = 0; +#ifdef CONFIG_AXI_SCREEN_POLICY + msm_pm_axi_init(); +#endif + register_reboot_notifier(&msm_reboot_notifier); + + msm_pm_reset_vector = ioremap(RESET_VECTOR, PAGE_SIZE); + if (msm_pm_reset_vector == NULL) { + printk(KERN_ERR "msm_pm_init: failed to map reset vector\n"); + return -ENODEV; + } + + suspend_set_ops(&msm_pm_ops); + +#ifdef CONFIG_MSM_IDLE_STATS + create_proc_read_entry("msm_pm_stats", S_IRUGO, + NULL, msm_pm_read_proc, NULL); +#endif + + boot_lock_nohalt(); + return 0; +} + +__initcall(msm_pm_init); diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h new file mode 100644 index 0000000000000..159a7a610d752 --- /dev/null +++ b/arch/arm/mach-msm/pm.h @@ -0,0 +1,31 @@ +/* arch/arm/mach-msm/pm.h + * + * Copyright (C) 2007 Google, Inc. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_PM_H +#define __ARCH_ARM_MACH_MSM_PM_H + +#include + +#define A11S_CLK_SLEEP_EN_ADDR MSM_CSR_BASE + 0x11c + +#define CLK_SLEEP_EN_ARM11_CORE 0x01 +#define CLK_SLEEP_EN_ARM11_AHB 0x02 +#define CLK_SLEEP_EN_ID_BRIDGE 0x04 +#define CLK_SLEEP_EN_DMA_BRIDGE 0x08 +#define CLK_SLEEP_EN_PBUS 0x10 +#define CLK_SLEEP_EN_DEBUG_TIME 0x20 +#define CLK_SLEEP_EN_GP_TIMER 0x40 +#endif diff --git a/arch/arm/mach-msm/pmic.c b/arch/arm/mach-msm/pmic.c new file mode 100644 index 0000000000000..0256308b10b9d --- /dev/null +++ b/arch/arm/mach-msm/pmic.c @@ -0,0 +1,547 @@ +/* arch/arm/mach-msm/qdsp6/pmic.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "pmic.h" + +#include + +#define LIB_NULL_PROC 0 +#define LIB_RPC_GLUE_CODE_INFO_REMOTE_PROC 1 +#define LP_MODE_CONTROL_PROC 2 +#define VREG_SET_LEVEL_PROC 3 +#define VREG_PULL_DOWN_SWITCH_PROC 4 +#define SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC 5 +#define SECURE_MPP_CONFIG_I_SINK_PROC 6 +#define RTC_START_PROC 7 +#define RTC_STOP_PROC 8 +#define RTC_GET_TIME_PROC 9 +#define RTC_ENABLE_ALARM_PROC 10 +#define RTC_DISABLE_ALARM_PROC 11 +#define RTC_GET_ALARM_TIME_PROC 12 +#define RTC_GET_ALARM_STATUS_PROC 13 +#define RTC_SET_TIME_ADJUST_PROC 14 +#define RTC_GET_TIME_ADJUST_PROC 15 +#define SET_LED_INTENSITY_PROC 16 +#define FLASH_LED_SET_CURRENT_PROC 17 +#define FLASH_LED_SET_MODE_PROC 18 +#define FLASH_LED_SET_POLARITY_PROC 19 +#define SPEAKER_CMD_PROC 20 +#define SET_SPEAKER_GAIN_PROC 21 +#define VIB_MOT_SET_VOLT_PROC 22 +#define VIB_MOT_SET_MODE_PROC 23 +#define VIB_MOT_SET_POLARITY_PROC 24 +#define VID_EN_PROC 25 +#define VID_IS_EN_PROC 26 +#define VID_LOAD_DETECT_EN_PROC 27 +#define MIC_EN_PROC 28 +#define MIC_IS_EN_PROC 29 +#define MIC_SET_VOLT_PROC 30 +#define MIC_GET_VOLT_PROC 31 +#define SPKR_EN_RIGHT_CHAN_PROC 32 +#define SPKR_IS_RIGHT_CHAN_EN_PROC 33 +#define SPKR_EN_LEFT_CHAN_PROC 34 +#define SPKR_IS_LEFT_CHAN_EN_PROC 35 +#define SET_SPKR_CONFIGURATION_PROC 36 +#define GET_SPKR_CONFIGURATION_PROC 37 +#define SPKR_GET_GAIN_PROC 38 +#define SPKR_IS_EN_PROC 39 +#define SPKR_EN_MUTE_PROC 40 +#define SPKR_IS_MUTE_EN_PROC 41 +#define SPKR_SET_DELAY_PROC 42 +#define SPKR_GET_DELAY_PROC 43 +#define SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC 44 +#define SET_SPEAKER_DELAY_PROC 45 +#define SPEAKER_1K6_ZIN_ENABLE_PROC 46 +#define SPKR_SET_MUX_HPF_CORNER_FREQ_PROC 47 +#define SPKR_GET_MUX_HPF_CORNER_FREQ_PROC 48 +#define SPKR_IS_RIGHT_LEFT_CHAN_ADDED_PROC 49 +#define SPKR_EN_STEREO_PROC 50 +#define SPKR_IS_STEREO_EN_PROC 51 +#define SPKR_SELECT_USB_WITH_HPF_20HZ_PROC 52 +#define SPKR_IS_USB_WITH_HPF_20HZ_PROC 53 +#define SPKR_BYPASS_MUX_PROC 54 +#define SPKR_IS_MUX_BYPASSED_PROC 55 +#define SPKR_EN_HPF_PROC 56 +#define SPKR_IS_HPF_EN_PROC 57 +#define SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC 58 +#define SPKR_IS_SINK_CURR_FROM_REF_VOLT_CIR_EN_PROC 59 +#define SPKR_ADD_RIGHT_LEFT_CHAN_PROC 60 +#define SPKR_SET_GAIN_PROC 61 +#define SPKR_EN_PROC 62 +#define HSED_SET_PERIOD_PROC 63 +#define HSED_SET_HYSTERESIS_PROC 64 +#define HSED_SET_CURRENT_THRESHOLD_PROC 65 +#define HSED_ENABLE_PROC 66 + +/* rpc related */ +#define PMIC_RPC_TIMEOUT (5*HZ) + +#define PMIC_RPC_PROG 0x30000061 +#define PMIC_RPC_VER1 0x00010001 +#define PMIC_RPC_VER3 0x00030000 + +/* error bit flags defined by modem side */ +#define PM_ERR_FLAG__PAR1_OUT_OF_RANGE (0x0001) +#define PM_ERR_FLAG__PAR2_OUT_OF_RANGE (0x0002) +#define PM_ERR_FLAG__PAR3_OUT_OF_RANGE (0x0004) +#define PM_ERR_FLAG__PAR4_OUT_OF_RANGE (0x0008) +#define PM_ERR_FLAG__PAR5_OUT_OF_RANGE (0x0010) + +#define PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE (0x001F) + +#define PM_ERR_FLAG__SBI_OPT_ERR (0x0080) +#define PM_ERR_FLAG__FEATURE_NOT_SUPPORTED (0x0100) + +#define PMIC_BUFF_SIZE 256 + +static DEFINE_MUTEX(pmic_mutex); +static struct msm_rpc_endpoint *pmic_ept; + + +static int modem_to_linux_err(uint err) +{ + if (err == 0) + return 0; + + if (err & PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE) + return -EINVAL; + + if (err & PM_ERR_FLAG__SBI_OPT_ERR) + return -EIO; + + if (err & PM_ERR_FLAG__FEATURE_NOT_SUPPORTED) + return -ENOSYS; + + return -EPERM; +} + + +/* + * 1) network byte order + * 2) RPC request header(40 bytes) and RPC reply header (24 bytes) + * 3) each transaction consists of a request and reply + * 3) PROC (comamnd) layer has its own sub-protocol defined + * 4) sub-protocol can be grouped to follwoing 7 cases: + * a) set one argument, no get + * b) set two argument, no get + * c) set three argument, no get + * d) set a struct, no get + * e) set a argument followed by a struct, no get + * f) set a argument, get a argument + * g) no set, get either a argument or a struct + */ + +/* Returns number of reply bytes (minus reply header size) or + * negative value on error. + */ +static int pmic_rpc(int proc, void *msg, int msglen, void *rep, int replen) +{ + int r; + mutex_lock(&pmic_mutex); + + if (!pmic_ept) { + pmic_ept = msm_rpc_connect(PMIC_RPC_PROG, PMIC_RPC_VER3, 0); + if (IS_ERR(pmic_ept)) + pmic_ept = msm_rpc_connect(PMIC_RPC_PROG, PMIC_RPC_VER1, 0); + if (IS_ERR(pmic_ept)) { + pmic_ept = NULL; + pr_err("pmic: cannot connect to rpc server\n"); + r = -ENODEV; + goto done; + } + } + r = msm_rpc_call_reply(pmic_ept, proc, msg, msglen, + rep, replen, PMIC_RPC_TIMEOUT); + if (r >= 0) { + if (r < sizeof(struct rpc_reply_hdr)) { + r = -EIO; + goto done; + } + r -= sizeof(struct rpc_reply_hdr); + } +done: + mutex_unlock(&pmic_mutex); + return r; +} + +struct pmic_reply { + struct rpc_reply_hdr hdr; + uint32_t status; + uint32_t data; +}; + +/** + * pmic_rpc_set_only() - set arguments and no get + * @data0: first argumrnt + * @data1: second argument + * @data2: third argument + * @data3: fourth argument + * @num: number of argument + * @proc: command/request id + * + * This function covers case a, b, and c + */ +static int pmic_rpc_set_only(uint data0, uint data1, uint data2, uint data3, + int num, int proc) +{ + struct { + struct rpc_request_hdr hdr; + uint32_t data[4]; + } msg; + struct pmic_reply rep; + int r; + + if (num > 4) + return -EINVAL; + + msg.data[0] = cpu_to_be32(data0); + msg.data[1] = cpu_to_be32(data1); + msg.data[2] = cpu_to_be32(data2); + msg.data[3] = cpu_to_be32(data3); + + r = pmic_rpc(proc, &msg, + sizeof(struct rpc_request_hdr) + num * sizeof(uint32_t), + &rep, sizeof(rep)); + if (r < 0) + return r; + if (r < sizeof(uint32_t)) + return -EIO; + + return modem_to_linux_err(be32_to_cpu(rep.status)); +} + +/** + * pmic_rpc_set_struct() - set the whole struct + * @xflag: indicates an extra argument + * @xdata: the extra argument + * @*data: starting address of struct + * @size: size of struct + * @proc: command/request id + * + * This fucntion covers case d and e + */ +static int pmic_rpc_set_struct(int xflag, uint xdata, uint *data, uint size, + int proc) +{ + struct { + struct rpc_request_hdr hdr; + uint32_t data[32+2]; + } msg; + struct pmic_reply rep; + int n = 0; + + size = (size + 3) & (~3); + if (size > (32 * sizeof(uint32_t))) + return -EINVAL; + + if (xflag) + msg.data[n++] = cpu_to_be32(xdata); + + msg.data[n++] = cpu_to_be32(1); + while (size > 0) { + size -= 4; + msg.data[n++] = cpu_to_be32(*data++); + } + + n = pmic_rpc(proc, &msg, + sizeof(struct rpc_request_hdr) + n * sizeof(uint32_t), + &rep, sizeof(rep)); + if (n < 0) + return n; + if (n < sizeof(uint32_t)) + return -EIO; + + return modem_to_linux_err(be32_to_cpu(rep.status)); +} + +int pmic_lp_mode_control(enum switch_cmd cmd, enum vreg_lp_id id) +{ + return pmic_rpc_set_only(cmd, id, 0, 0, 2, LP_MODE_CONTROL_PROC); +} +EXPORT_SYMBOL(pmic_lp_mode_control); + +int pmic_secure_mpp_control_digital_output(enum mpp_which which, + enum mpp_dlogic_level level, + enum mpp_dlogic_out_ctrl out) +{ + return pmic_rpc_set_only(which, level, out, 0, 3, + SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC); +} +EXPORT_SYMBOL(pmic_secure_mpp_control_digital_output); + +int pmic_secure_mpp_config_i_sink(enum mpp_which which, + enum mpp_i_sink_level level, + enum mpp_i_sink_switch onoff) +{ + return pmic_rpc_set_only(which, level, onoff, 0, 3, + SECURE_MPP_CONFIG_I_SINK_PROC); +} +EXPORT_SYMBOL(pmic_secure_mpp_config_i_sink); + +int pmic_secure_mpp_config_digital_input(enum mpp_which which, + enum mpp_dlogic_level level, + enum mpp_dlogic_in_dbus dbus) +{ + return pmic_rpc_set_only(which, level, dbus, 0, 3, + SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC); +} +EXPORT_SYMBOL(pmic_secure_mpp_config_digital_input); + +int pmic_rtc_start(struct rtc_time *time) +{ + return pmic_rpc_set_struct(0, 0, (uint *)time, sizeof(*time), + RTC_START_PROC); +} +EXPORT_SYMBOL(pmic_rtc_start); + +int pmic_rtc_stop(void) +{ + return pmic_rpc_set_only(0, 0, 0, 0, 0, RTC_STOP_PROC); +} +EXPORT_SYMBOL(pmic_rtc_stop); + +int pmic_rtc_enable_alarm(enum rtc_alarm alarm, + struct rtc_time *time) +{ + return pmic_rpc_set_struct(1, alarm, (uint *)time, sizeof(*time), + RTC_ENABLE_ALARM_PROC); +} +EXPORT_SYMBOL(pmic_rtc_enable_alarm); + +int pmic_rtc_disable_alarm(enum rtc_alarm alarm) +{ + return pmic_rpc_set_only(alarm, 0, 0, 0, 1, RTC_DISABLE_ALARM_PROC); +} +EXPORT_SYMBOL(pmic_rtc_disable_alarm); + +int pmic_rtc_set_time_adjust(uint adjust) +{ + return pmic_rpc_set_only(adjust, 0, 0, 0, 1, + RTC_SET_TIME_ADJUST_PROC); +} +EXPORT_SYMBOL(pmic_rtc_set_time_adjust); + +/* + * generic speaker + */ +int pmic_speaker_cmd(const enum spkr_cmd cmd) +{ + return pmic_rpc_set_only(cmd, 0, 0, 0, 1, SPEAKER_CMD_PROC); +} +EXPORT_SYMBOL(pmic_speaker_cmd); + +int pmic_set_spkr_configuration(struct spkr_config_mode *cfg) +{ + return pmic_rpc_set_struct(0, 0, (uint *)cfg, sizeof(*cfg), + SET_SPKR_CONFIGURATION_PROC); +} +EXPORT_SYMBOL(pmic_set_spkr_configuration); + +int pmic_spkr_en_right_chan(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_RIGHT_CHAN_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_right_chan); + +int pmic_spkr_en_left_chan(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_LEFT_CHAN_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_left_chan); + +int pmic_set_speaker_gain(enum spkr_gain gain) +{ + return pmic_rpc_set_only(gain, 0, 0, 0, 1, SET_SPEAKER_GAIN_PROC); +} +EXPORT_SYMBOL(pmic_set_speaker_gain); + +int pmic_set_speaker_delay(enum spkr_dly delay) +{ + return pmic_rpc_set_only(delay, 0, 0, 0, 1, SET_SPEAKER_DELAY_PROC); +} +EXPORT_SYMBOL(pmic_set_speaker_delay); + +int pmic_speaker_1k6_zin_enable(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, + SPEAKER_1K6_ZIN_ENABLE_PROC); +} +EXPORT_SYMBOL(pmic_speaker_1k6_zin_enable); + +int pmic_spkr_set_mux_hpf_corner_freq(enum spkr_hpf_corner_freq freq) +{ + return pmic_rpc_set_only(freq, 0, 0, 0, 1, + SPKR_SET_MUX_HPF_CORNER_FREQ_PROC); +} +EXPORT_SYMBOL(pmic_spkr_set_mux_hpf_corner_freq); + +int pmic_spkr_select_usb_with_hpf_20hz(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, + SPKR_SELECT_USB_WITH_HPF_20HZ_PROC); +} +EXPORT_SYMBOL(pmic_spkr_select_usb_with_hpf_20hz); + +int pmic_spkr_bypass_mux(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_BYPASS_MUX_PROC); +} +EXPORT_SYMBOL(pmic_spkr_bypass_mux); + +int pmic_spkr_en_hpf(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_HPF_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_hpf); + +int pmic_spkr_en_sink_curr_from_ref_volt_cir(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, + SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_sink_curr_from_ref_volt_cir); + +/* + * speaker indexed by left_right + */ +int pmic_spkr_en(enum spkr_left_right left_right, uint enable) +{ + return pmic_rpc_set_only(left_right, enable, 0, 0, 2, SPKR_EN_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en); + +int pmic_spkr_set_gain(enum spkr_left_right left_right, enum spkr_gain gain) +{ + return pmic_rpc_set_only(left_right, gain, 0, 0, 2, SPKR_SET_GAIN_PROC); +} +EXPORT_SYMBOL(pmic_spkr_set_gain); + +int pmic_spkr_set_delay(enum spkr_left_right left_right, enum spkr_dly delay) +{ + return pmic_rpc_set_only(left_right, delay, 0, 0, 2, + SPKR_SET_DELAY_PROC); +} +EXPORT_SYMBOL(pmic_spkr_set_delay); + +int pmic_spkr_en_mute(enum spkr_left_right left_right, uint enabled) +{ + return pmic_rpc_set_only(left_right, enabled, 0, 0, 2, + SPKR_EN_MUTE_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_mute); + +/* + * mic + */ +int pmic_mic_en(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, MIC_EN_PROC); +} +EXPORT_SYMBOL(pmic_mic_en); + +int pmic_mic_set_volt(enum mic_volt vol) +{ + return pmic_rpc_set_only(vol, 0, 0, 0, 1, MIC_SET_VOLT_PROC); +} +EXPORT_SYMBOL(pmic_mic_set_volt); + +int pmic_vib_mot_set_volt(uint vol) +{ + return pmic_rpc_set_only(vol, 0, 0, 0, 1, VIB_MOT_SET_VOLT_PROC); +} +EXPORT_SYMBOL(pmic_vib_mot_set_volt); + +int pmic_vib_mot_set_mode(enum pm_vib_mot_mode mode) +{ + return pmic_rpc_set_only(mode, 0, 0, 0, 1, VIB_MOT_SET_MODE_PROC); +} +EXPORT_SYMBOL(pmic_vib_mot_set_mode); + +int pmic_vib_mot_set_polarity(enum pm_vib_mot_pol pol) +{ + return pmic_rpc_set_only(pol, 0, 0, 0, 1, VIB_MOT_SET_POLARITY_PROC); +} +EXPORT_SYMBOL(pmic_vib_mot_set_polarity); + +int pmic_vid_en(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, VID_EN_PROC); +} +EXPORT_SYMBOL(pmic_vid_en); + +int pmic_vid_load_detect_en(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, VID_LOAD_DETECT_EN_PROC); +} +EXPORT_SYMBOL(pmic_vid_load_detect_en); + +int pmic_set_led_intensity(enum ledtype type, int level) +{ + return pmic_rpc_set_only(type, level, 0, 0, 2, SET_LED_INTENSITY_PROC); +} +EXPORT_SYMBOL(pmic_set_led_intensity); + +int pmic_flash_led_set_current(const uint16_t milliamps) +{ + return pmic_rpc_set_only(milliamps, 0, 0, 0, 1, + FLASH_LED_SET_CURRENT_PROC); +} +EXPORT_SYMBOL(pmic_flash_led_set_current); + +int pmic_flash_led_set_mode(enum flash_led_mode mode) +{ + return pmic_rpc_set_only((int)mode, 0, 0, 0, 1, + FLASH_LED_SET_MODE_PROC); +} +EXPORT_SYMBOL(pmic_flash_led_set_mode); + +int pmic_flash_led_set_polarity(enum flash_led_pol pol) +{ + return pmic_rpc_set_only((int)pol, 0, 0, 0, 1, + FLASH_LED_SET_POLARITY_PROC); +} +EXPORT_SYMBOL(pmic_flash_led_set_polarity); + +int pmic_spkr_add_right_left_chan(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, + SPKR_ADD_RIGHT_LEFT_CHAN_PROC); +} +EXPORT_SYMBOL(pmic_spkr_add_right_left_chan); + +int pmic_spkr_en_stereo(uint enable) +{ + return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_STEREO_PROC); +} +EXPORT_SYMBOL(pmic_spkr_en_stereo); + +int pmic_hsed_enable( + enum hsed_controller controller, + enum hsed_enable enable_hsed +) +{ + return pmic_rpc_set_only(controller, enable_hsed, 0, 0, + 2, + HSED_ENABLE_PROC); +} +EXPORT_SYMBOL(pmic_hsed_enable); diff --git a/arch/arm/mach-msm/pmic.h b/arch/arm/mach-msm/pmic.h new file mode 100644 index 0000000000000..1778ec083163b --- /dev/null +++ b/arch/arm/mach-msm/pmic.h @@ -0,0 +1,327 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __ARCH_ARM_MACH_PMIC_H +#define __ARCH_ARM_MACH_PMIC_H + +#include "proc_comm.h" + +enum spkr_left_right { + LEFT_SPKR, + RIGHT_SPKR, +}; + +enum spkr_gain { + SPKR_GAIN_MINUS16DB, /* -16 db */ + SPKR_GAIN_MINUS12DB, /* -12 db */ + SPKR_GAIN_MINUS08DB, /* -08 db */ + SPKR_GAIN_MINUS04DB, /* -04 db */ + SPKR_GAIN_00DB, /* 00 db */ + SPKR_GAIN_PLUS04DB, /* +04 db */ + SPKR_GAIN_PLUS08DB, /* +08 db */ + SPKR_GAIN_PLUS12DB, /* +12 db */ +}; + +enum spkr_dly { + SPKR_DLY_10MS, /* ~10 ms delay */ + SPKR_DLY_100MS, /* ~100 ms delay */ +}; + +enum spkr_hpf_corner_freq { + SPKR_FREQ_1_39KHZ, /* 1.39 kHz */ + SPKR_FREQ_0_64KHZ, /* 0.64 kHz */ + SPKR_FREQ_0_86KHZ, /* 0.86 kHz */ + SPKR_FREQ_0_51KHZ, /* 0.51 kHz */ + SPKR_FREQ_1_06KHZ, /* 1.06 kHz */ + SPKR_FREQ_0_57KHZ, /* 0.57 kHz */ + SPKR_FREQ_0_73KHZ, /* 0.73 kHz */ + SPKR_FREQ_0_47KHZ, /* 0.47 kHz */ + SPKR_FREQ_1_20KHZ, /* 1.20 kHz */ + SPKR_FREQ_0_60KHZ, /* 0.60 kHz */ + SPKR_FREQ_0_76KHZ, /* 0.76 kHz */ + SPKR_FREQ_0_49KHZ, /* 0.49 kHz */ + SPKR_FREQ_0_95KHZ, /* 0.95 kHz */ + SPKR_FREQ_0_54KHZ, /* 0.54 kHz */ + SPKR_FREQ_0_68KHZ, /* 0.68 kHz */ + SPKR_FREQ_0_45KHZ, /* 0.45 kHz */ +}; + +/* Turn the speaker on or off and enables or disables mute.*/ +enum spkr_cmd { + SPKR_DISABLE, /* Enable Speaker */ + SPKR_ENABLE, /* Disable Speaker */ + SPKR_MUTE_OFF, /* turn speaker mute off, SOUND ON */ + SPKR_MUTE_ON, /* turn speaker mute on, SOUND OFF */ + SPKR_OFF, /* turn speaker OFF (speaker disable and mute on) */ + SPKR_ON, /* turn speaker ON (speaker enable and mute off) */ + SPKR_SET_FREQ_CMD, /* set speaker frequency */ + SPKR_GET_FREQ_CMD, /* get speaker frequency */ + SPKR_SET_GAIN_CMD, /* set speaker gain */ + SPKR_GET_GAIN_CMD, /* get speaker gain */ + SPKR_SET_DELAY_CMD, /* set speaker delay */ + SPKR_GET_DELAY_CMD, /* get speaker delay */ + SPKR_SET_PDM_MODE, + SPKR_SET_PWM_MODE, +}; + +struct spkr_config_mode { + uint32_t is_right_chan_en; + uint32_t is_left_chan_en; + uint32_t is_right_left_chan_added; + uint32_t is_stereo_en; + uint32_t is_usb_with_hpf_20hz; + uint32_t is_mux_bypassed; + uint32_t is_hpf_en; + uint32_t is_sink_curr_from_ref_volt_cir_en; +}; + +enum mic_volt { + MIC_VOLT_2_00V, /* 2.00 V */ + MIC_VOLT_1_93V, /* 1.93 V */ + MIC_VOLT_1_80V, /* 1.80 V */ + MIC_VOLT_1_73V, /* 1.73 V */ +}; + +enum ledtype { + LED_LCD, + LED_KEYPAD, +}; + +enum flash_led_mode { + FLASH_LED_MODE__MANUAL, + FLASH_LED_MODE__DBUS1, + FLASH_LED_MODE__DBUS2, + FLASH_LED_MODE__DBUS3, +}; + +enum flash_led_pol { + FLASH_LED_POL__ACTIVE_HIGH, + FLASH_LED_POL__ACTIVE_LOW, +}; + +enum switch_cmd { + OFF_CMD, + ON_CMD +}; + +enum vreg_lp_id { + PM_VREG_LP_MSMA_ID, + PM_VREG_LP_MSMP_ID, + PM_VREG_LP_MSME1_ID, + PM_VREG_LP_GP3_ID, + PM_VREG_LP_MSMC_ID, + PM_VREG_LP_MSME2_ID, + PM_VREG_LP_GP4_ID, + PM_VREG_LP_GP1_ID, + PM_VREG_LP_RFTX_ID, + PM_VREG_LP_RFRX1_ID, + PM_VREG_LP_RFRX2_ID, + PM_VREG_LP_WLAN_ID, + PM_VREG_LP_MMC_ID, + PM_VREG_LP_RUIM_ID, + PM_VREG_LP_MSMC0_ID, + PM_VREG_LP_GP2_ID, + PM_VREG_LP_GP5_ID, + PM_VREG_LP_GP6_ID, + PM_VREG_LP_MPLL_ID, + PM_VREG_LP_RFUBM_ID, + PM_VREG_LP_RFA_ID, + PM_VREG_LP_CDC2_ID, + PM_VREG_LP_RFTX2_ID, + PM_VREG_LP_USIM_ID, + PM_VREG_LP_USB2P6_ID, + PM_VREG_LP_TCXO_ID, + PM_VREG_LP_USB3P3_ID, + + PM_VREG_LP_MSME_ID = PM_VREG_LP_MSME1_ID, + /* backward compatible enums only */ + PM_VREG_LP_CAM_ID = PM_VREG_LP_GP1_ID, + PM_VREG_LP_MDDI_ID = PM_VREG_LP_GP2_ID, + PM_VREG_LP_RUIM2_ID = PM_VREG_LP_GP3_ID, + PM_VREG_LP_AUX_ID = PM_VREG_LP_GP4_ID, + PM_VREG_LP_AUX2_ID = PM_VREG_LP_GP5_ID, + PM_VREG_LP_BT_ID = PM_VREG_LP_GP6_ID, + PM_VREG_LP_MSMC_LDO_ID = PM_VREG_LP_MSMC_ID, + PM_VREG_LP_MSME1_LDO_ID = PM_VREG_LP_MSME1_ID, + PM_VREG_LP_MSME2_LDO_ID = PM_VREG_LP_MSME2_ID, + PM_VREG_LP_RFA1_ID = PM_VREG_LP_RFRX2_ID, + PM_VREG_LP_RFA2_ID = PM_VREG_LP_RFTX2_ID, + PM_VREG_LP_XO_ID = PM_VREG_LP_TCXO_ID +}; + +enum mpp_which { + PM_MPP_1, + PM_MPP_2, + PM_MPP_3, + PM_MPP_4, + PM_MPP_5, + PM_MPP_6, + PM_MPP_7, + PM_MPP_8, + PM_MPP_9, + PM_MPP_10, + PM_MPP_11, + PM_MPP_12, + PM_MPP_13, + PM_MPP_14, + PM_MPP_15, + PM_MPP_16, + PM_MPP_17, + PM_MPP_18, + PM_MPP_19, + PM_MPP_20, + PM_MPP_21, + PM_MPP_22, + + PM_NUM_MPP_HAN = PM_MPP_4 + 1, + PM_NUM_MPP_KIP = PM_MPP_4 + 1, + PM_NUM_MPP_EPIC = PM_MPP_4 + 1, + PM_NUM_MPP_PM7500 = PM_MPP_22 + 1, + PM_NUM_MPP_PM6650 = PM_MPP_12 + 1, + PM_NUM_MPP_PM6658 = PM_MPP_12 + 1, + PM_NUM_MPP_PANORAMIX = PM_MPP_2 + 1, + PM_NUM_MPP_PM6640 = PM_NUM_MPP_PANORAMIX, + PM_NUM_MPP_PM6620 = PM_NUM_MPP_PANORAMIX +}; + +enum mpp_dlogic_level { + PM_MPP__DLOGIC__LVL_MSME, + PM_MPP__DLOGIC__LVL_MSMP, + PM_MPP__DLOGIC__LVL_RUIM, + PM_MPP__DLOGIC__LVL_MMC, + PM_MPP__DLOGIC__LVL_VDD, +}; + +enum mpp_dlogic_in_dbus { + PM_MPP__DLOGIC_IN__DBUS_NONE, + PM_MPP__DLOGIC_IN__DBUS1, + PM_MPP__DLOGIC_IN__DBUS2, + PM_MPP__DLOGIC_IN__DBUS3, +}; + +enum mpp_dlogic_out_ctrl { + PM_MPP__DLOGIC_OUT__CTRL_LOW, + PM_MPP__DLOGIC_OUT__CTRL_HIGH, + PM_MPP__DLOGIC_OUT__CTRL_MPP, + PM_MPP__DLOGIC_OUT__CTRL_NOT_MPP, +}; + +enum mpp_i_sink_level { + PM_MPP__I_SINK__LEVEL_5mA, + PM_MPP__I_SINK__LEVEL_10mA, + PM_MPP__I_SINK__LEVEL_15mA, + PM_MPP__I_SINK__LEVEL_20mA, + PM_MPP__I_SINK__LEVEL_25mA, + PM_MPP__I_SINK__LEVEL_30mA, + PM_MPP__I_SINK__LEVEL_35mA, + PM_MPP__I_SINK__LEVEL_40mA, +}; + +enum mpp_i_sink_switch { + PM_MPP__I_SINK__SWITCH_DIS, + PM_MPP__I_SINK__SWITCH_ENA, + PM_MPP__I_SINK__SWITCH_ENA_IF_MPP_HIGH, + PM_MPP__I_SINK__SWITCH_ENA_IF_MPP_LOW, +}; + +enum pm_vib_mot_mode { + PM_VIB_MOT_MODE__MANUAL, + PM_VIB_MOT_MODE__DBUS1, + PM_VIB_MOT_MODE__DBUS2, + PM_VIB_MOT_MODE__DBUS3, +}; + +enum pm_vib_mot_pol { + PM_VIB_MOT_POL__ACTIVE_HIGH, + PM_VIB_MOT_POL__ACTIVE_LOW, +}; + +struct rtc_time { + uint sec; +}; + +enum rtc_alarm { + PM_RTC_ALARM_1, +}; + + +enum hsed_controller { + PM_HSED_CONTROLLER_0, + PM_HSED_CONTROLLER_1, + PM_HSED_CONTROLLER_2, +}; + +enum hsed_enable { + PM_HSED_ENABLE_OFF, + PM_HSED_ENABLE_TCXO, + PM_HSED_ENABLE_PWM_TCXO, + PM_HSED_ENABLE_ALWAYS, +}; + +int pmic_lp_mode_control(enum switch_cmd cmd, enum vreg_lp_id id); +int pmic_secure_mpp_control_digital_output(enum mpp_which which, + enum mpp_dlogic_level level, enum mpp_dlogic_out_ctrl out); +int pmic_secure_mpp_config_i_sink(enum mpp_which which, + enum mpp_i_sink_level level, enum mpp_i_sink_switch onoff); +int pmic_secure_mpp_config_digital_input(enum mpp_which which, + enum mpp_dlogic_level level, enum mpp_dlogic_in_dbus dbus); +int pmic_speaker_cmd(const enum spkr_cmd cmd); +int pmic_set_spkr_configuration(struct spkr_config_mode *cfg); +int pmic_spkr_en_right_chan(uint enable); +int pmic_spkr_en_left_chan(uint enable); +int pmic_spkr_en(enum spkr_left_right left_right, uint enabled); +int pmic_spkr_set_gain(enum spkr_left_right left_right, enum spkr_gain gain); +int pmic_set_speaker_gain(enum spkr_gain gain); +int pmic_set_speaker_delay(enum spkr_dly delay); +int pmic_speaker_1k6_zin_enable(uint enable); +int pmic_spkr_set_mux_hpf_corner_freq(enum spkr_hpf_corner_freq freq); +int pmic_spkr_select_usb_with_hpf_20hz(uint enable); +int pmic_spkr_bypass_mux(uint enable); +int pmic_spkr_en_hpf(uint enable); +int pmic_spkr_en_sink_curr_from_ref_volt_cir(uint enable); +int pmic_spkr_set_delay(enum spkr_left_right left_right, enum spkr_dly delay); +int pmic_spkr_en_mute(enum spkr_left_right left_right, uint enabled); +int pmic_mic_en(uint enable); +int pmic_mic_set_volt(enum mic_volt vol); +int pmic_set_led_intensity(enum ledtype type, int level); +int pmic_flash_led_set_current(uint16_t milliamps); +int pmic_flash_led_set_mode(enum flash_led_mode mode); +int pmic_flash_led_set_polarity(enum flash_led_pol pol); +int pmic_spkr_add_right_left_chan(uint enable); +int pmic_spkr_en_stereo(uint enable); +int pmic_vib_mot_set_volt(uint vol); +int pmic_vib_mot_set_mode(enum pm_vib_mot_mode mode); +int pmic_vib_mot_set_polarity(enum pm_vib_mot_pol pol); +int pmic_vid_en(uint enable); +int pmic_vid_load_detect_en(uint enable); +int pmic_hsed_enable( + enum hsed_controller controller, + enum hsed_enable enable +); + +#endif diff --git a/arch/arm/mach-msm/proc_engineerid.c b/arch/arm/mach-msm/proc_engineerid.c new file mode 100644 index 0000000000000..36367b5ff3478 --- /dev/null +++ b/arch/arm/mach-msm/proc_engineerid.c @@ -0,0 +1,72 @@ +/* arch/arm/mach-msm/proc_engineerid.c + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + + +#include +#include +#include +#include +#include "devices.h" + +extern unsigned engineer_id; + +static int c_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%u\n", engineer_id); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations engineerid_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show +}; + +extern const struct seq_operations engineerid_op; +static int engineerid_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &engineerid_op); +} + +static const struct file_operations proc_engineerid_operations = { + .open = engineerid_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_engineerid_init(void) +{ + proc_create("engineerid", 0, NULL, &proc_engineerid_operations); + return 0; +} +module_init(proc_engineerid_init); + diff --git a/arch/arm/mach-msm/qdsp5/Makefile b/arch/arm/mach-msm/qdsp5/Makefile new file mode 100644 index 0000000000000..991d4a7e157f7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/Makefile @@ -0,0 +1,17 @@ +obj-y += adsp.o +ifeq ($(CONFIG_MSM_AMSS_VERSION_6350),y) +obj-y += adsp_info.o +obj-y += audio_evrc.o audio_qcelp.o audio_amrnb.o audio_aac.o +else +obj-y += adsp_6225.o +endif + +obj-y += adsp_driver.o +obj-y += adsp_video_verify_cmd.o +obj-y += adsp_videoenc_verify_cmd.o +obj-y += adsp_jpeg_verify_cmd.o adsp_jpeg_patch_event.o +obj-y += adsp_vfe_verify_cmd.o adsp_vfe_patch_event.o +obj-y += adsp_lpm_verify_cmd.o +obj-y += audio_out.o audio_in.o audio_mp3.o audmgr.o audpp.o +obj-y += snd.o + diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c new file mode 100644 index 0000000000000..9dc8945b2cba5 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp.c @@ -0,0 +1,1183 @@ +/* arch/arm/mach-msm/qdsp5/adsp.c + * + * Register/Interrupt access for userspace aDSP library. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* TODO: + * - move shareable rpc code outside of adsp.c + * - general solution for virt->phys patchup + * - queue IDs should be relative to modules + * - disallow access to non-associated queues + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct wake_lock adsp_wake_lock; +static inline void prevent_suspend(void) +{ + wake_lock(&adsp_wake_lock); +} +static inline void allow_suspend(void) +{ + wake_unlock(&adsp_wake_lock); +} + +#include +#include +#include "adsp.h" + +#define INT_ADSP INT_ADSP_A9_A11 + +static struct adsp_info adsp_info; +static struct msm_rpc_endpoint *rpc_cb_server_client; +static struct msm_adsp_module *adsp_modules; +static int adsp_open_count; +static DEFINE_MUTEX(adsp_open_lock); + +/* protect interactions with the ADSP command/message queue */ +static spinlock_t adsp_cmd_lock; + +static uint32_t current_image = -1; + +void adsp_set_image(struct adsp_info *info, uint32_t image) +{ + current_image = image; +} + +/* + * Checks whether the module_id is available in the + * module_entries table.If module_id is available returns `0`. + * If module_id is not available returns `-ENXIO`. + */ +#if CONFIG_MSM_AMSS_VERSION >= 6350 +static int32_t adsp_validate_module(uint32_t module_id) +{ + uint32_t *ptr; + uint32_t module_index; + uint32_t num_mod_entries; + + ptr = adsp_info.init_info_ptr->module_entries; + num_mod_entries = adsp_info.init_info_ptr->module_table_size; + + for (module_index = 0; module_index < num_mod_entries; module_index++) + if (module_id == ptr[module_index]) + return 0; + + return -ENXIO; +} +#else +static inline int32_t adsp_validate_module(uint32_t module_id) { return 0; } +#endif + +uint32_t adsp_get_module(struct adsp_info *info, uint32_t task) +{ + BUG_ON(current_image == -1UL); + return info->task_to_module[current_image][task]; +} + +uint32_t adsp_get_queue_offset(struct adsp_info *info, uint32_t queue_id) +{ + BUG_ON(current_image == -1UL); + return info->queue_offset[current_image][queue_id]; +} + +static int rpc_adsp_rtos_app_to_modem(uint32_t cmd, uint32_t module, + struct msm_adsp_module *adsp_module) +{ + int rc; + struct rpc_adsp_rtos_app_to_modem_args_t rpc_req; + struct rpc_reply_hdr *rpc_rsp; + + msm_rpc_setup_req(&rpc_req.hdr, + RPC_ADSP_RTOS_ATOM_PROG, + msm_rpc_get_vers(adsp_module->rpc_client), + RPC_ADSP_RTOS_APP_TO_MODEM_PROC); + + rpc_req.gotit = cpu_to_be32(1); + rpc_req.cmd = cpu_to_be32(cmd); + rpc_req.proc_id = cpu_to_be32(RPC_ADSP_RTOS_PROC_APPS); + rpc_req.module = cpu_to_be32(module); + rc = msm_rpc_write(adsp_module->rpc_client, &rpc_req, sizeof(rpc_req)); + if (rc < 0) { + pr_err("adsp: could not send RPC request: %d\n", rc); + return rc; + } + + rc = msm_rpc_read(adsp_module->rpc_client, + (void **)&rpc_rsp, -1, (5*HZ)); + if (rc < 0) { + pr_err("adsp: error receiving RPC reply: %d (%d)\n", + rc, -ERESTARTSYS); + return rc; + } + + if (be32_to_cpu(rpc_rsp->reply_stat) != RPCMSG_REPLYSTAT_ACCEPTED) { + pr_err("adsp: RPC call was denied!\n"); + kfree(rpc_rsp); + return -EPERM; + } + + if (be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat) != + RPC_ACCEPTSTAT_SUCCESS) { + pr_err("adsp error: RPC call was not successful (%d)\n", + be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat)); + kfree(rpc_rsp); + return -EINVAL; + } + + kfree(rpc_rsp); + return 0; +} + +#if CONFIG_MSM_AMSS_VERSION >= 6350 +static int get_module_index(uint32_t id) +{ + int mod_idx; + for (mod_idx = 0; mod_idx < adsp_info.module_count; mod_idx++) + if (adsp_info.module[mod_idx].id == id) + return mod_idx; + + return -ENXIO; +} +#endif + +static struct msm_adsp_module *find_adsp_module_by_id( + struct adsp_info *info, uint32_t id) +{ + if (id > info->max_module_id) { + return NULL; + } else { +#if CONFIG_MSM_AMSS_VERSION >= 6350 + id = get_module_index(id); + if (id < 0) + return NULL; +#endif + return info->id_to_module[id]; + } +} + +static struct msm_adsp_module *find_adsp_module_by_name( + struct adsp_info *info, const char *name) +{ + unsigned n; + for (n = 0; n < info->module_count; n++) + if (!strcmp(name, adsp_modules[n].name)) + return adsp_modules + n; + return NULL; +} + +static int adsp_rpc_init(struct msm_adsp_module *adsp_module) +{ + /* remove the original connect once compatible support is complete */ + adsp_module->rpc_client = msm_rpc_connect( + RPC_ADSP_RTOS_ATOM_PROG, + RPC_ADSP_RTOS_ATOM_VERS, + MSM_RPC_UNINTERRUPTIBLE | MSM_RPC_ENABLE_RECEIVE); + + if (IS_ERR(adsp_module->rpc_client)) { + int rc = PTR_ERR(adsp_module->rpc_client); + adsp_module->rpc_client = 0; + pr_err("adsp: could not open rpc client: %d\n", rc); + return rc; + } + + return 0; +} + +#if CONFIG_MSM_AMSS_VERSION >= 6350 +/* + * Send RPC_ADSP_RTOS_CMD_GET_INIT_INFO cmd to ARM9 and get + * queue offsets and module entries (init info) as part of the event. + */ +static void msm_get_init_info(void) +{ + int rc; + struct rpc_adsp_rtos_app_to_modem_args_t rpc_req; + + adsp_info.init_info_rpc_client = msm_rpc_connect( + RPC_ADSP_RTOS_ATOM_PROG, + RPC_ADSP_RTOS_ATOM_VERS, + MSM_RPC_UNINTERRUPTIBLE | MSM_RPC_ENABLE_RECEIVE); + if (IS_ERR(adsp_info.init_info_rpc_client)) { + rc = PTR_ERR(adsp_info.init_info_rpc_client); + adsp_info.init_info_rpc_client = 0; + pr_err("adsp: could not open rpc client: %d\n", rc); + return; + } + + msm_rpc_setup_req(&rpc_req.hdr, + RPC_ADSP_RTOS_ATOM_PROG, + msm_rpc_get_vers(adsp_info.init_info_rpc_client), + RPC_ADSP_RTOS_APP_TO_MODEM_PROC); + + rpc_req.gotit = cpu_to_be32(1); + rpc_req.cmd = cpu_to_be32(RPC_ADSP_RTOS_CMD_GET_INIT_INFO); + rpc_req.proc_id = cpu_to_be32(RPC_ADSP_RTOS_PROC_APPS); + rpc_req.module = 0; + + rc = msm_rpc_write(adsp_info.init_info_rpc_client, + &rpc_req, sizeof(rpc_req)); + if (rc < 0) + pr_err("adsp: could not send RPC request: %d\n", rc); +} +#endif + +int msm_adsp_get(const char *name, struct msm_adsp_module **out, + struct msm_adsp_ops *ops, void *driver_data) +{ + struct msm_adsp_module *module; + int rc = 0; + +#if CONFIG_MSM_AMSS_VERSION >= 6350 + static uint32_t init_info_cmd_sent; + if (!init_info_cmd_sent) { + msm_get_init_info(); + init_waitqueue_head(&adsp_info.init_info_wait); + rc = wait_event_timeout(adsp_info.init_info_wait, + adsp_info.init_info_state == ADSP_STATE_INIT_INFO, + 5 * HZ); + if (!rc) { + pr_info("adsp: INIT_INFO failed\n"); + return -ETIMEDOUT; + } + init_info_cmd_sent++; + } +#endif + + module = find_adsp_module_by_name(&adsp_info, name); + if (!module) + return -ENODEV; + + mutex_lock(&module->lock); + pr_info("adsp: opening module %s\n", module->name); + if (module->open_count++ == 0 && module->clk) + clk_enable(module->clk); + + mutex_lock(&adsp_open_lock); + if (adsp_open_count++ == 0) { + enable_irq(INT_ADSP); + prevent_suspend(); + } + mutex_unlock(&adsp_open_lock); + + if (module->ops) { + rc = -EBUSY; + goto done; + } + + rc = adsp_rpc_init(module); + if (rc) + goto done; + + module->ops = ops; + module->driver_data = driver_data; + *out = module; + rc = rpc_adsp_rtos_app_to_modem(RPC_ADSP_RTOS_CMD_REGISTER_APP, + module->id, module); + if (rc) { + module->ops = NULL; + module->driver_data = NULL; + *out = NULL; + pr_err("adsp: REGISTER_APP failed\n"); + goto done; + } + + pr_info("adsp: module %s has been registered\n", module->name); + +done: + mutex_lock(&adsp_open_lock); + if (rc && --adsp_open_count == 0) { + disable_irq(INT_ADSP); + allow_suspend(); + } + if (rc && --module->open_count == 0 && module->clk) + clk_disable(module->clk); + mutex_unlock(&adsp_open_lock); + mutex_unlock(&module->lock); + return rc; +} +EXPORT_SYMBOL(msm_adsp_get); + +static int msm_adsp_disable_locked(struct msm_adsp_module *module); + +void msm_adsp_put(struct msm_adsp_module *module) +{ + unsigned long flags; + + mutex_lock(&module->lock); + if (--module->open_count == 0 && module->clk) + clk_disable(module->clk); + if (module->ops) { + pr_info("adsp: closing module %s\n", module->name); + + /* lock to ensure a dsp event cannot be delivered + * during or after removal of the ops and driver_data + */ + spin_lock_irqsave(&adsp_cmd_lock, flags); + module->ops = NULL; + module->driver_data = NULL; + spin_unlock_irqrestore(&adsp_cmd_lock, flags); + + if (module->state != ADSP_STATE_DISABLED) { + pr_info("adsp: disabling module %s\n", module->name); + msm_adsp_disable_locked(module); + } + + msm_rpc_close(module->rpc_client); + module->rpc_client = 0; + if (--adsp_open_count == 0) { + disable_irq(INT_ADSP); + allow_suspend(); + pr_info("adsp: disable interrupt\n"); + } + } else { + pr_info("adsp: module %s is already closed\n", module->name); + } + mutex_unlock(&module->lock); +} +EXPORT_SYMBOL(msm_adsp_put); + +/* this should be common code with rpc_servers.c */ +static int rpc_send_accepted_void_reply(struct msm_rpc_endpoint *client, + uint32_t xid, uint32_t accept_status) +{ + int rc = 0; + uint8_t reply_buf[sizeof(struct rpc_reply_hdr)]; + struct rpc_reply_hdr *reply = (struct rpc_reply_hdr *)reply_buf; + + reply->xid = cpu_to_be32(xid); + reply->type = cpu_to_be32(1); /* reply */ + reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED); + + reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status); + reply->data.acc_hdr.verf_flavor = 0; + reply->data.acc_hdr.verf_length = 0; + + rc = msm_rpc_write(rpc_cb_server_client, reply_buf, sizeof(reply_buf)); + if (rc < 0) + pr_err("adsp: could not write RPC response: %d\n", rc); + return rc; +} + +int __msm_adsp_write(struct msm_adsp_module *module, unsigned dsp_queue_addr, + void *cmd_buf, size_t cmd_size) +{ + uint32_t ctrl_word; + uint32_t dsp_q_addr; + uint32_t dsp_addr; + uint32_t cmd_id = 0; + int cnt = 0; + int ret_status = 0; + unsigned long flags; + struct adsp_info *info = module->info; + + spin_lock_irqsave(&adsp_cmd_lock, flags); + + if (module->state != ADSP_STATE_ENABLED) { + spin_unlock_irqrestore(&adsp_cmd_lock, flags); + pr_err("adsp: module %s not enabled before write\n", + module->name); + return -ENODEV; + } + if (adsp_validate_module(module->id)) { + spin_unlock_irqrestore(&adsp_cmd_lock, flags); + pr_info("adsp: module id validation failed %s %d\n", + module->name, module->id); + return -ENXIO; + } + dsp_q_addr = adsp_get_queue_offset(info, dsp_queue_addr); + dsp_q_addr &= ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M; + + /* Poll until the ADSP is ready to accept a command. + * Wait for 100us, return error if it's not responding. + * If this returns an error, we need to disable ALL modules and + * then retry. + */ + while (((ctrl_word = readl(info->write_ctrl)) & + ADSP_RTOS_WRITE_CTRL_WORD_READY_M) != + ADSP_RTOS_WRITE_CTRL_WORD_READY_V) { + if (cnt > 100) { + pr_err("adsp: timeout waiting for DSP write ready\n"); + ret_status = -EIO; + goto fail; + } + pr_warning("adsp: waiting for DSP write ready\n"); + udelay(1); + cnt++; + } + + /* Set the mutex bits */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M); + ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V; + + /* Clear the command bits */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M); + + /* Set the queue address bits */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M); + ctrl_word |= dsp_q_addr; + + writel(ctrl_word, info->write_ctrl); + + /* Generate an interrupt to the DSP. This notifies the DSP that + * we are about to send a command on this particular queue. The + * DSP will in response change its state. + */ + writel(1, info->send_irq); + + /* Poll until the adsp responds to the interrupt; this does not + * generate an interrupt from the adsp. This should happen within + * 5ms. + */ + cnt = 0; + while ((readl(info->write_ctrl) & + ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M) == + ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V) { + if (cnt > 5000) { + pr_err("adsp: timeout waiting for adsp ack\n"); + ret_status = -EIO; + goto fail; + } + udelay(1); + cnt++; + } + + /* Read the ctrl word */ + ctrl_word = readl(info->write_ctrl); + + if ((ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_STATUS_M) != + ADSP_RTOS_WRITE_CTRL_WORD_NO_ERR_V) { + ret_status = -EAGAIN; + goto fail; + } + + /* Ctrl word status bits were 00, no error in the ctrl word */ + + /* Get the DSP buffer address */ + dsp_addr = (ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M) + + (uint32_t)MSM_AD5_BASE; + + if (dsp_addr < (uint32_t)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) { + uint16_t *buf_ptr = (uint16_t *) cmd_buf; + uint16_t *dsp_addr16 = (uint16_t *)dsp_addr; + cmd_size /= sizeof(uint16_t); + + /* Save the command ID */ + cmd_id = (uint32_t) buf_ptr[0]; + + /* Copy the command to DSP memory */ + cmd_size++; + while (--cmd_size) + *dsp_addr16++ = *buf_ptr++; + } else { + uint32_t *buf_ptr = (uint32_t *) cmd_buf; + uint32_t *dsp_addr32 = (uint32_t *)dsp_addr; + cmd_size /= sizeof(uint32_t); + + /* Save the command ID */ + cmd_id = buf_ptr[0]; + + cmd_size++; + while (--cmd_size) + *dsp_addr32++ = *buf_ptr++; + } + + /* Set the mutex bits */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M); + ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V; + + /* Set the command bits to write done */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M); + ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_CMD_WRITE_DONE_V; + + /* Set the queue address bits */ + ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M); + ctrl_word |= dsp_q_addr; + + writel(ctrl_word, info->write_ctrl); + + /* Generate an interrupt to the DSP. It does not respond with + * an interrupt, and we do not need to wait for it to + * acknowledge, because it will hold the mutex lock until it's + * ready to receive more commands again. + */ + writel(1, info->send_irq); + + module->num_commands++; + +fail: + spin_unlock_irqrestore(&adsp_cmd_lock, flags); + return ret_status; +} +EXPORT_SYMBOL(msm_adsp_write); + +int msm_adsp_write(struct msm_adsp_module *module, unsigned dsp_queue_addr, + void *cmd_buf, size_t cmd_size) +{ + int rc, retries = 0; + do { + rc = __msm_adsp_write(module, dsp_queue_addr, cmd_buf, cmd_size); + if (rc == -EAGAIN) + udelay(10); + } while(rc == -EAGAIN && retries++ < 100); + if (retries > 50) + pr_warning("adsp: %s command took %d attempts: rc %d\n", + module->name, retries, rc); + return rc; +} + +#ifdef CONFIG_MSM_ADSP_REPORT_EVENTS +static void *modem_event_addr; +#if CONFIG_MSM_AMSS_VERSION >= 6350 +static void read_modem_event(void *buf, size_t size) +{ + uint32_t *dptr = buf; + struct rpc_adsp_rtos_modem_to_app_args_t *sptr; + struct adsp_rtos_mp_mtoa_type *pkt_ptr; + size_t len = size / 4; + + if (len < 3) { + pr_err("%s: invalid length %d\n", __func__, len); + return; + } + + sptr = modem_event_addr; + pkt_ptr = &sptr->mtoa_pkt.adsp_rtos_mp_mtoa_data.mp_mtoa_packet; + + dptr[0] = be32_to_cpu(sptr->mtoa_pkt.mp_mtoa_header.event); + dptr[1] = be32_to_cpu(pkt_ptr->module); + dptr[2] = be32_to_cpu(pkt_ptr->image); +} +#else +static void read_modem_event(void *buf, size_t size) +{ + uint32_t *dptr = buf; + struct rpc_adsp_rtos_modem_to_app_args_t *sptr = + modem_event_addr; + size_t len = size / 4; + if (len < 3) { + pr_err("%s: invalid length %d\n", __func__, len); + return; + } + dptr[0] = be32_to_cpu(sptr->event); + dptr[1] = be32_to_cpu(sptr->module); + dptr[2] = be32_to_cpu(sptr->image); +} +#endif /* CONFIG_MSM_AMSS_VERSION >= 6350 */ +#endif /* CONFIG_MSM_ADSP_REPORT_EVENTS */ + +static void handle_adsp_rtos_mtoa_app(struct rpc_request_hdr *req) +{ + struct rpc_adsp_rtos_modem_to_app_args_t *args = + (struct rpc_adsp_rtos_modem_to_app_args_t *)req; + uint32_t event; + uint32_t proc_id; + uint32_t module_id; + uint32_t image; + struct msm_adsp_module *module; +#if CONFIG_MSM_AMSS_VERSION >= 6350 + struct adsp_rtos_mp_mtoa_type *pkt_ptr = + &args->mtoa_pkt.adsp_rtos_mp_mtoa_data.mp_mtoa_packet; + + event = be32_to_cpu(args->mtoa_pkt.mp_mtoa_header.event); + proc_id = be32_to_cpu(args->mtoa_pkt.mp_mtoa_header.proc_id); + module_id = be32_to_cpu(pkt_ptr->module); + image = be32_to_cpu(pkt_ptr->image); + + if (be32_to_cpu(args->mtoa_pkt.desc_field) == RPC_ADSP_RTOS_INIT_INFO) { + struct queue_to_offset_type *qptr; + struct queue_to_offset_type *qtbl; + uint32_t *mptr; + uint32_t *mtbl; + uint32_t q_idx; + uint32_t num_entries; + uint32_t entries_per_image; + struct adsp_rtos_mp_mtoa_init_info_type *iptr; + struct adsp_rtos_mp_mtoa_init_info_type *sptr; + int32_t i_no, e_idx; + + pr_info("adsp:INIT_INFO Event\n"); + sptr = &args->mtoa_pkt.adsp_rtos_mp_mtoa_data. + mp_mtoa_init_packet; + + iptr = adsp_info.init_info_ptr; + iptr->image_count = be32_to_cpu(sptr->image_count); + iptr->num_queue_offsets = be32_to_cpu(sptr->num_queue_offsets); + num_entries = iptr->num_queue_offsets; + qptr = &sptr->queue_offsets_tbl[0][0]; + for (i_no = 0; i_no < iptr->image_count; i_no++) { + qtbl = &iptr->queue_offsets_tbl[i_no][0]; + for (e_idx = 0; e_idx < num_entries; e_idx++) { + qtbl[e_idx].offset = be32_to_cpu(qptr->offset); + qtbl[e_idx].queue = be32_to_cpu(qptr->queue); + q_idx = be32_to_cpu(qptr->queue); + iptr->queue_offsets[i_no][q_idx] = + qtbl[e_idx].offset; + qptr++; + } + } + + num_entries = be32_to_cpu(sptr->num_task_module_entries); + iptr->num_task_module_entries = num_entries; + entries_per_image = num_entries / iptr->image_count; + mptr = &sptr->task_to_module_tbl[0][0]; + for (i_no = 0; i_no < iptr->image_count; i_no++) { + mtbl = &iptr->task_to_module_tbl[i_no][0]; + for (e_idx = 0; e_idx < entries_per_image; e_idx++) { + mtbl[e_idx] = be32_to_cpu(*mptr); + mptr++; + } + } + + iptr->module_table_size = be32_to_cpu(sptr->module_table_size); + mptr = &sptr->module_entries[0]; + for (i_no = 0; i_no < iptr->module_table_size; i_no++) + iptr->module_entries[i_no] = be32_to_cpu(mptr[i_no]); + adsp_info.init_info_state = ADSP_STATE_INIT_INFO; + rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid, + RPC_ACCEPTSTAT_SUCCESS); + wake_up(&adsp_info.init_info_wait); + + return; + } +#else + event = be32_to_cpu(args->event); + proc_id = be32_to_cpu(args->proc_id); + module_id = be32_to_cpu(args->module); + image = be32_to_cpu(args->image); +#endif + + pr_info("adsp: rpc event=%d, proc_id=%d, module=%d, image=%d\n", + event, proc_id, module_id, image); + + module = find_adsp_module_by_id(&adsp_info, module_id); + if (!module) { + pr_err("adsp: module %d is not supported!\n", module_id); + rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid, + RPC_ACCEPTSTAT_GARBAGE_ARGS); + return; + } + + mutex_lock(&module->lock); + switch (event) { + case RPC_ADSP_RTOS_MOD_READY: + pr_info("adsp: module %s: READY\n", module->name); + module->state = ADSP_STATE_ENABLED; + wake_up(&module->state_wait); + adsp_set_image(module->info, image); + break; + case RPC_ADSP_RTOS_MOD_DISABLE: + pr_info("adsp: module %s: DISABLED\n", module->name); + module->state = ADSP_STATE_DISABLED; + wake_up(&module->state_wait); + break; + case RPC_ADSP_RTOS_SERVICE_RESET: + pr_info("adsp: module %s: SERVICE_RESET\n", module->name); + module->state = ADSP_STATE_DISABLED; + wake_up(&module->state_wait); + break; + case RPC_ADSP_RTOS_CMD_SUCCESS: + pr_info("adsp: module %s: CMD_SUCCESS\n", module->name); + break; + case RPC_ADSP_RTOS_CMD_FAIL: + pr_info("adsp: module %s: CMD_FAIL\n", module->name); + break; +#if CONFIG_MSM_AMSS_VERSION >= 6350 + case RPC_ADSP_RTOS_DISABLE_FAIL: + pr_info("adsp: module %s: DISABLE_FAIL\n", module->name); + break; +#endif + default: + pr_info("adsp: unknown event %d\n", event); + rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid, + RPC_ACCEPTSTAT_GARBAGE_ARGS); + mutex_unlock(&module->lock); + return; + } + rpc_send_accepted_void_reply(rpc_cb_server_client, req->xid, + RPC_ACCEPTSTAT_SUCCESS); + mutex_unlock(&module->lock); +#ifdef CONFIG_MSM_ADSP_REPORT_EVENTS + if (module->ops != NULL && module->ops->event != NULL) { + modem_event_addr = (uint32_t *)req; + module->ops->event(module->driver_data, EVENT_MSG_ID, + EVENT_LEN, read_modem_event); + } +#endif +} + +static int handle_adsp_rtos_mtoa(struct rpc_request_hdr *req) +{ + switch (req->procedure) { + case RPC_ADSP_RTOS_MTOA_NULL_PROC: + rpc_send_accepted_void_reply(rpc_cb_server_client, + req->xid, + RPC_ACCEPTSTAT_SUCCESS); + break; + case RPC_ADSP_RTOS_MODEM_TO_APP_PROC: + handle_adsp_rtos_mtoa_app(req); + break; + default: + pr_err("adsp: unknowned proc %d\n", req->procedure); + rpc_send_accepted_void_reply( + rpc_cb_server_client, req->xid, + RPC_ACCEPTSTAT_PROC_UNAVAIL); + break; + } + return 0; +} + +/* this should be common code with rpc_servers.c */ +static int adsp_rpc_thread(void *data) +{ + void *buffer; + struct rpc_request_hdr *req; + int rc; + + do { + rc = msm_rpc_read(rpc_cb_server_client, &buffer, -1, -1); + if (rc < 0) { + pr_err("adsp: could not read rpc: %d\n", rc); + break; + } + req = (struct rpc_request_hdr *)buffer; + + req->type = be32_to_cpu(req->type); + req->xid = be32_to_cpu(req->xid); + req->rpc_vers = be32_to_cpu(req->rpc_vers); + req->prog = be32_to_cpu(req->prog); + req->vers = be32_to_cpu(req->vers); + req->procedure = be32_to_cpu(req->procedure); + + if (req->type != 0) + goto bad_rpc; + if (req->rpc_vers != 2) + goto bad_rpc; + if (req->prog != RPC_ADSP_RTOS_MTOA_PROG) + goto bad_rpc; + if (req->vers != RPC_ADSP_RTOS_MTOA_VERS) + goto bad_rpc; + + handle_adsp_rtos_mtoa(req); + kfree(buffer); + continue; + +bad_rpc: + pr_err("adsp: bogus rpc from modem\n"); + kfree(buffer); + } while (1); + + do_exit(0); +} + +static size_t read_event_len; +static void *read_event_addr; + +static void read_event_16(void *buf, size_t size) +{ + uint16_t *dst = buf; + uint16_t *src = read_event_addr; + size_t len = size / 2; + if (len > read_event_len) + len = read_event_len; + else if (len < read_event_len) + pr_warning("%s: event bufer length too small (%d < %d)\n", + __func__, len, read_event_len); + while (len--) + *dst++ = *src++; +} + +static void read_event_32(void *buf, size_t size) +{ + uint32_t *dst = buf; + uint32_t *src = read_event_addr; + size_t len = size / 4; + if (len > read_event_len) + len = read_event_len; + else if (len < read_event_len) + pr_warning("%s: event bufer length too small (%d < %d)\n", + __func__, len, read_event_len); + while (len--) + *dst++ = *src++; +} + +static int adsp_rtos_read_ctrl_word_cmd_tast_to_h_v( + struct adsp_info *info, void *dsp_addr) +{ + struct msm_adsp_module *module; + unsigned rtos_task_id; + unsigned msg_id; + unsigned msg_length; + void (*func)(void *, size_t); + + if (dsp_addr >= (void *)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) { + uint32_t *dsp_addr32 = dsp_addr; + uint32_t tmp = *dsp_addr32++; + rtos_task_id = (tmp & ADSP_RTOS_READ_CTRL_WORD_TASK_ID_M) >> 8; + msg_id = (tmp & ADSP_RTOS_READ_CTRL_WORD_MSG_ID_M); + read_event_len = tmp >> 16; + read_event_addr = dsp_addr32; + msg_length = read_event_len * sizeof(uint32_t); + func = read_event_32; + } else { + uint16_t *dsp_addr16 = dsp_addr; + uint16_t tmp = *dsp_addr16++; + rtos_task_id = (tmp & ADSP_RTOS_READ_CTRL_WORD_TASK_ID_M) >> 8; + msg_id = tmp & ADSP_RTOS_READ_CTRL_WORD_MSG_ID_M; + read_event_len = *dsp_addr16++; + read_event_addr = dsp_addr16; + msg_length = read_event_len * sizeof(uint16_t); + func = read_event_16; + } + + if (rtos_task_id > info->max_task_id) { + pr_err("adsp: bogus task id %d\n", rtos_task_id); + return 0; + } + module = find_adsp_module_by_id(info, + adsp_get_module(info, rtos_task_id)); + + if (!module) { + pr_err("adsp: no module for task id %d\n", rtos_task_id); + return 0; + } + + module->num_events++; + + if (!module->ops) { + pr_err("adsp: module %s is not open\n", module->name); + return 0; + } + + module->ops->event(module->driver_data, msg_id, msg_length, func); + return 0; +} + +static int adsp_get_event(struct adsp_info *info) +{ + uint32_t ctrl_word; + uint32_t ready; + void *dsp_addr; + uint32_t cmd_type; + int cnt; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&adsp_cmd_lock, flags); + + /* Whenever the DSP has a message, it updates this control word + * and generates an interrupt. When we receive the interrupt, we + * read this register to find out what ADSP task the command is + * comming from. + * + * The ADSP should *always* be ready on the first call, but the + * irq handler calls us in a loop (to handle back-to-back command + * processing), so we give the DSP some time to return to the + * ready state. The DSP will not issue another IRQ for events + * pending between the first IRQ and the event queue being drained, + * unfortunately. + */ + + for (cnt = 0; cnt < 10; cnt++) { + ctrl_word = readl(info->read_ctrl); + + if ((ctrl_word & ADSP_RTOS_READ_CTRL_WORD_FLAG_M) == + ADSP_RTOS_READ_CTRL_WORD_FLAG_UP_CONT_V) + goto ready; + + udelay(10); + } + pr_warning("adsp: not ready after 100uS\n"); + rc = -EBUSY; + goto done; + +ready: + /* Here we check to see if there are pending messages. If there are + * none, we siply return -EAGAIN to indicate that there are no more + * messages pending. + */ + ready = ctrl_word & ADSP_RTOS_READ_CTRL_WORD_READY_M; + if ((ready != ADSP_RTOS_READ_CTRL_WORD_READY_V) && + (ready != ADSP_RTOS_READ_CTRL_WORD_CONT_V)) { + rc = -EAGAIN; + goto done; + } + + /* DSP says that there are messages waiting for the host to read */ + + /* Get the Command Type */ + cmd_type = ctrl_word & ADSP_RTOS_READ_CTRL_WORD_CMD_TYPE_M; + + /* Get the DSP buffer address */ + dsp_addr = (void *)((ctrl_word & + ADSP_RTOS_READ_CTRL_WORD_DSP_ADDR_M) + + (uint32_t)MSM_AD5_BASE); + + /* We can only handle Task-to-Host messages */ + if (cmd_type != ADSP_RTOS_READ_CTRL_WORD_CMD_TASK_TO_H_V) { + pr_err("adsp: unknown dsp cmd_type %d\n", cmd_type); + rc = -EIO; + goto done; + } + + adsp_rtos_read_ctrl_word_cmd_tast_to_h_v(info, dsp_addr); + + ctrl_word = readl(info->read_ctrl); + ctrl_word &= ~ADSP_RTOS_READ_CTRL_WORD_READY_M; + + /* Write ctrl word to the DSP */ + writel(ctrl_word, info->read_ctrl); + + /* Generate an interrupt to the DSP */ + writel(1, info->send_irq); + +done: + spin_unlock_irqrestore(&adsp_cmd_lock, flags); + return rc; +} + +static irqreturn_t adsp_irq_handler(int irq, void *data) +{ + struct adsp_info *info = &adsp_info; + int cnt = 0; + for (cnt = 0; cnt < 10; cnt++) + if (adsp_get_event(info) < 0) + break; + if (cnt > info->event_backlog_max) + info->event_backlog_max = cnt; + info->events_received += cnt; + if (cnt == 10) + pr_err("adsp: too many (%d) events for single irq!\n", cnt); + return IRQ_HANDLED; +} + +int adsp_set_clkrate(struct msm_adsp_module *module, unsigned long clk_rate) +{ + if (module->clk && clk_rate) + return clk_set_rate(module->clk, clk_rate); + + return -EINVAL; +} + +int msm_adsp_enable(struct msm_adsp_module *module) +{ + int rc = 0; + + pr_info("msm_adsp_enable() '%s'state[%d] id[%d]\n", + module->name, module->state, module->id); + + mutex_lock(&module->lock); + switch (module->state) { + case ADSP_STATE_DISABLED: + rc = rpc_adsp_rtos_app_to_modem(RPC_ADSP_RTOS_CMD_ENABLE, + module->id, module); + if (rc) + break; + module->state = ADSP_STATE_ENABLING; + mutex_unlock(&module->lock); + rc = wait_event_timeout(module->state_wait, + module->state != ADSP_STATE_ENABLING, + 1 * HZ); + mutex_lock(&module->lock); + if (module->state == ADSP_STATE_ENABLED) { + rc = 0; + } else { + pr_err("adsp: module '%s' enable timed out\n", + module->name); + rc = -ETIMEDOUT; + } + break; + case ADSP_STATE_ENABLING: + pr_warning("adsp: module '%s' enable in progress\n", + module->name); + break; + case ADSP_STATE_ENABLED: + pr_warning("adsp: module '%s' already enabled\n", + module->name); + break; + case ADSP_STATE_DISABLING: + pr_err("adsp: module '%s' disable in progress\n", + module->name); + rc = -EBUSY; + break; + } + mutex_unlock(&module->lock); + return rc; +} +EXPORT_SYMBOL(msm_adsp_enable); + +static int msm_adsp_disable_locked(struct msm_adsp_module *module) +{ + int rc = 0; + + switch (module->state) { + case ADSP_STATE_DISABLED: + pr_warning("adsp: module '%s' already disabled\n", + module->name); + break; + case ADSP_STATE_ENABLING: + case ADSP_STATE_ENABLED: + rc = rpc_adsp_rtos_app_to_modem(RPC_ADSP_RTOS_CMD_DISABLE, + module->id, module); + module->state = ADSP_STATE_DISABLED; + } + return rc; +} + +int msm_adsp_disable(struct msm_adsp_module *module) +{ + int rc; + pr_info("msm_adsp_disable() '%s'\n", module->name); + mutex_lock(&module->lock); + rc = msm_adsp_disable_locked(module); + mutex_unlock(&module->lock); + return rc; +} +EXPORT_SYMBOL(msm_adsp_disable); + +static int msm_adsp_probe(struct platform_device *pdev) +{ + unsigned count; + int rc, i; + int max_module_id; + + pr_info("adsp: probe\n"); + + wake_lock_init(&adsp_wake_lock, WAKE_LOCK_SUSPEND, "adsp"); +#if CONFIG_MSM_AMSS_VERSION >= 6350 + adsp_info.init_info_ptr = kzalloc( + (sizeof(struct adsp_rtos_mp_mtoa_init_info_type)), GFP_KERNEL); + if (!adsp_info.init_info_ptr) + return -ENOMEM; +#endif + + rc = adsp_init_info(&adsp_info); + if (rc) + return rc; + adsp_info.send_irq += (uint32_t) MSM_AD5_BASE; + adsp_info.read_ctrl += (uint32_t) MSM_AD5_BASE; + adsp_info.write_ctrl += (uint32_t) MSM_AD5_BASE; + count = adsp_info.module_count; + +#if CONFIG_MSM_AMSS_VERSION >= 6350 + max_module_id = count; +#else + max_module_id = adsp_info.max_module_id + 1; +#endif + + adsp_modules = kzalloc( + sizeof(struct msm_adsp_module) * count + + sizeof(void *) * max_module_id, GFP_KERNEL); + if (!adsp_modules) + return -ENOMEM; + + adsp_info.id_to_module = (void *) (adsp_modules + count); + + spin_lock_init(&adsp_cmd_lock); + + rc = request_irq(INT_ADSP, adsp_irq_handler, IRQF_TRIGGER_RISING, + "adsp", 0); + if (rc < 0) + goto fail_request_irq; + disable_irq(INT_ADSP); + + rpc_cb_server_client = msm_rpc_open(); + if (IS_ERR(rpc_cb_server_client)) { + rpc_cb_server_client = NULL; + rc = PTR_ERR(rpc_cb_server_client); + pr_err("adsp: could not create rpc server (%d)\n", rc); + goto fail_rpc_open; + } + + rc = msm_rpc_register_server(rpc_cb_server_client, + RPC_ADSP_RTOS_MTOA_PROG, + RPC_ADSP_RTOS_MTOA_VERS); + if (rc) { + pr_err("adsp: could not register callback server (%d)\n", rc); + goto fail_rpc_register; + } + + /* start the kernel thread to process the callbacks */ + kthread_run(adsp_rpc_thread, NULL, "kadspd"); + + for (i = 0; i < count; i++) { + struct msm_adsp_module *mod = adsp_modules + i; + mutex_init(&mod->lock); + init_waitqueue_head(&mod->state_wait); + mod->info = &adsp_info; + mod->name = adsp_info.module[i].name; + mod->id = adsp_info.module[i].id; + if (adsp_info.module[i].clk_name) + mod->clk = clk_get(NULL, adsp_info.module[i].clk_name); + else + mod->clk = NULL; + if (mod->clk && adsp_info.module[i].clk_rate) + clk_set_rate(mod->clk, adsp_info.module[i].clk_rate); + mod->verify_cmd = adsp_info.module[i].verify_cmd; + mod->patch_event = adsp_info.module[i].patch_event; + INIT_HLIST_HEAD(&mod->pmem_regions); + mod->pdev.name = adsp_info.module[i].pdev_name; + mod->pdev.id = -1; +#if CONFIG_MSM_AMSS_VERSION >= 6350 + adsp_info.id_to_module[i] = mod; +#else + adsp_info.id_to_module[mod->id] = mod; +#endif + platform_device_register(&mod->pdev); + } + + msm_adsp_publish_cdevs(adsp_modules, count); + + return 0; + +fail_rpc_register: + msm_rpc_close(rpc_cb_server_client); + rpc_cb_server_client = NULL; +fail_rpc_open: + enable_irq(INT_ADSP); + free_irq(INT_ADSP, 0); +fail_request_irq: + kfree(adsp_modules); +#if CONFIG_MSM_AMSS_VERSION >= 6350 + kfree(adsp_info.init_info_ptr); +#endif + return rc; +} + +static struct platform_driver msm_adsp_driver = { + .probe = msm_adsp_probe, + .driver = { + .name = MSM_ADSP_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init adsp_init(void) +{ + return platform_driver_register(&msm_adsp_driver); +} + +device_initcall(adsp_init); diff --git a/arch/arm/mach-msm/qdsp5/adsp.h b/arch/arm/mach-msm/qdsp5/adsp.h new file mode 100644 index 0000000000000..0e5c9abd3da58 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp.h @@ -0,0 +1,369 @@ +/* arch/arm/mach-msm/qdsp5/adsp.h + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_ADSP_H +#define _ARCH_ARM_MACH_MSM_ADSP_H + +#include +#include +#include +#include + +int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr, + unsigned long len); +int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr, + unsigned long *kvaddr, unsigned long len); +int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr); + +int adsp_vfe_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size); +int adsp_jpeg_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size); +int adsp_lpm_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size); +int adsp_video_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size); +int adsp_videoenc_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size); + + +struct adsp_event; + +int adsp_vfe_patch_event(struct msm_adsp_module *module, + struct adsp_event *event); + +int adsp_jpeg_patch_event(struct msm_adsp_module *module, + struct adsp_event *event); + + +struct adsp_module_info { + const char *name; + const char *pdev_name; + uint32_t id; + const char *clk_name; + unsigned long clk_rate; + int (*verify_cmd) (struct msm_adsp_module*, unsigned int, void *, + size_t); + int (*patch_event) (struct msm_adsp_module*, struct adsp_event *); +}; + +#define ADSP_EVENT_MAX_SIZE 496 +#define EVENT_LEN 12 +#define EVENT_MSG_ID ((uint16_t)~0) + +struct adsp_event { + struct list_head list; + uint32_t size; /* always in bytes */ + uint16_t msg_id; + uint16_t type; /* 0 for msgs (from aDSP), -1 for events (from ARM9) */ + int is16; /* always 0 (msg is 32-bit) when the event type is 1(ARM9) */ + union { + uint16_t msg16[ADSP_EVENT_MAX_SIZE / 2]; + uint32_t msg32[ADSP_EVENT_MAX_SIZE / 4]; + } data; +}; + +struct adsp_info { + uint32_t send_irq; + uint32_t read_ctrl; + uint32_t write_ctrl; + + uint32_t max_msg16_size; + uint32_t max_msg32_size; + + uint32_t max_task_id; + uint32_t max_module_id; + uint32_t max_queue_id; + uint32_t max_image_id; + + /* for each image id, a map of queue id to offset */ + uint32_t **queue_offset; + + /* for each image id, a map of task id to module id */ + uint32_t **task_to_module; + + /* for each module id, map of module id to module */ + struct msm_adsp_module **id_to_module; + + uint32_t module_count; + struct adsp_module_info *module; + + /* stats */ + uint32_t events_received; + uint32_t event_backlog_max; + +#if CONFIG_MSM_AMSS_VERSION >= 6350 + /* rpc_client for init_info */ + struct msm_rpc_endpoint *init_info_rpc_client; + struct adsp_rtos_mp_mtoa_init_info_type *init_info_ptr; + wait_queue_head_t init_info_wait; + unsigned init_info_state; +#endif +}; + +#define RPC_ADSP_RTOS_ATOM_PROG 0x3000000a +#define RPC_ADSP_RTOS_MTOA_PROG 0x3000000b +#define RPC_ADSP_RTOS_ATOM_NULL_PROC 0 +#define RPC_ADSP_RTOS_MTOA_NULL_PROC 0 +#define RPC_ADSP_RTOS_APP_TO_MODEM_PROC 2 +#define RPC_ADSP_RTOS_MODEM_TO_APP_PROC 2 + +#if CONFIG_MSM_AMSS_VERSION >= 6350 +#define RPC_ADSP_RTOS_ATOM_VERS MSM_RPC_VERS(1,0) +#define RPC_ADSP_RTOS_MTOA_VERS MSM_RPC_VERS(2,1) /* must be actual vers */ +#define MSM_ADSP_DRIVER_NAME "rs3000000a:00010000" +#elif (CONFIG_MSM_AMSS_VERSION == 6220) || (CONFIG_MSM_AMSS_VERSION == 6225) +#define RPC_ADSP_RTOS_ATOM_VERS MSM_RPC_VERS(0x71d1094b, 0) +#define RPC_ADSP_RTOS_MTOA_VERS MSM_RPC_VERS(0xee3a9966, 0) +#define MSM_ADSP_DRIVER_NAME "rs3000000a:71d1094b" +#elif CONFIG_MSM_AMSS_VERSION == 6210 +#define RPC_ADSP_RTOS_ATOM_VERS MSM_RPC_VERS(0x20f17fd3, 0) +#define RPC_ADSP_RTOS_MTOA_VERS MSM_RPC_VERS(0x75babbd6, 0) +#define MSM_ADSP_DRIVER_NAME "rs3000000a:20f17fd3" +#else +#error "Unknown AMSS version" +#endif + +enum rpc_adsp_rtos_proc_type { + RPC_ADSP_RTOS_PROC_NONE = 0, + RPC_ADSP_RTOS_PROC_MODEM = 1, + RPC_ADSP_RTOS_PROC_APPS = 2, +}; + +enum { + RPC_ADSP_RTOS_CMD_REGISTER_APP, + RPC_ADSP_RTOS_CMD_ENABLE, + RPC_ADSP_RTOS_CMD_DISABLE, + RPC_ADSP_RTOS_CMD_KERNEL_COMMAND, + RPC_ADSP_RTOS_CMD_16_COMMAND, + RPC_ADSP_RTOS_CMD_32_COMMAND, + RPC_ADSP_RTOS_CMD_DISABLE_EVENT_RSP, + RPC_ADSP_RTOS_CMD_REMOTE_EVENT, + RPC_ADSP_RTOS_CMD_SET_STATE, +#if CONFIG_MSM_AMSS_VERSION >= 6350 + RPC_ADSP_RTOS_CMD_REMOTE_INIT_INFO_EVENT, + RPC_ADSP_RTOS_CMD_GET_INIT_INFO, +#endif +}; + +enum rpc_adsp_rtos_mod_status_type { + RPC_ADSP_RTOS_MOD_READY, + RPC_ADSP_RTOS_MOD_DISABLE, + RPC_ADSP_RTOS_SERVICE_RESET, + RPC_ADSP_RTOS_CMD_FAIL, + RPC_ADSP_RTOS_CMD_SUCCESS, +#if CONFIG_MSM_AMSS_VERSION >= 6350 + RPC_ADSP_RTOS_INIT_INFO, + RPC_ADSP_RTOS_DISABLE_FAIL, +#endif +}; + +struct rpc_adsp_rtos_app_to_modem_args_t { + struct rpc_request_hdr hdr; + uint32_t gotit; /* if 1, the next elements are present */ + uint32_t cmd; /* e.g., RPC_ADSP_RTOS_CMD_REGISTER_APP */ + uint32_t proc_id; /* e.g., RPC_ADSP_RTOS_PROC_APPS */ + uint32_t module; /* e.g., QDSP_MODULE_AUDPPTASK */ +}; + +#if CONFIG_MSM_AMSS_VERSION >= 6350 +enum qdsp_image_type { + QDSP_IMAGE_COMBO, + QDSP_IMAGE_GAUDIO, + QDSP_IMAGE_QTV_LP, + QDSP_IMAGE_MAX, + /* DO NOT USE: Force this enum to be a 32bit type to improve speed */ + QDSP_IMAGE_32BIT_DUMMY = 0x10000 +}; + +struct adsp_rtos_mp_mtoa_header_type { + enum rpc_adsp_rtos_mod_status_type event; + enum rpc_adsp_rtos_proc_type proc_id; +}; + +/* ADSP RTOS MP Communications - Modem to APP's Event Info*/ +struct adsp_rtos_mp_mtoa_type { + uint32_t module; + uint32_t image; + uint32_t apps_okts; +}; + +/* ADSP RTOS MP Communications - Modem to APP's Init Info */ +#define IMG_MAX 8 +#define ENTRIES_MAX 64 + +struct queue_to_offset_type { + uint32_t queue; + uint32_t offset; +}; + +struct adsp_rtos_mp_mtoa_init_info_type { + uint32_t image_count; + uint32_t num_queue_offsets; + struct queue_to_offset_type queue_offsets_tbl[IMG_MAX][ENTRIES_MAX]; + uint32_t num_task_module_entries; + uint32_t task_to_module_tbl[IMG_MAX][ENTRIES_MAX]; + + uint32_t module_table_size; + uint32_t module_entries[ENTRIES_MAX]; + /* + * queue_offsets[] is to store only queue_offsets + */ + uint32_t queue_offsets[IMG_MAX][ENTRIES_MAX]; +}; + +struct adsp_rtos_mp_mtoa_s_type { + struct adsp_rtos_mp_mtoa_header_type mp_mtoa_header; + + uint32_t desc_field; + union { + struct adsp_rtos_mp_mtoa_init_info_type mp_mtoa_init_packet; + struct adsp_rtos_mp_mtoa_type mp_mtoa_packet; + } adsp_rtos_mp_mtoa_data; +}; + +struct rpc_adsp_rtos_modem_to_app_args_t { + struct rpc_request_hdr hdr; + uint32_t gotit; /* if 1, the next elements are present */ + struct adsp_rtos_mp_mtoa_s_type mtoa_pkt; +}; +#else +struct rpc_adsp_rtos_modem_to_app_args_t { + struct rpc_request_hdr hdr; + uint32_t gotit; /* if 1, the next elements are present */ + uint32_t event; /* e.g., RPC_ADSP_RTOS_CMD_REGISTER_APP */ + uint32_t proc_id; /* e.g., RPC_ADSP_RTOS_PROC_APPS */ + uint32_t module; /* e.g., QDSP_MODULE_AUDPPTASK */ + uint32_t image; /* RPC_QDSP_IMAGE_GAUDIO */ +}; +#endif /* CONFIG_MSM_AMSS_VERSION >= 6350 */ + +#define ADSP_STATE_DISABLED 0 +#define ADSP_STATE_ENABLING 1 +#define ADSP_STATE_ENABLED 2 +#define ADSP_STATE_DISABLING 3 +#if CONFIG_MSM_AMSS_VERSION >= 6350 +#define ADSP_STATE_INIT_INFO 4 +#endif + +struct msm_adsp_module { + struct mutex lock; + const char *name; + unsigned id; + struct adsp_info *info; + + struct msm_rpc_endpoint *rpc_client; + struct msm_adsp_ops *ops; + void *driver_data; + + /* statistics */ + unsigned num_commands; + unsigned num_events; + + wait_queue_head_t state_wait; + unsigned state; + + struct platform_device pdev; + struct clk *clk; + int open_count; + + struct mutex pmem_regions_lock; + struct hlist_head pmem_regions; + int (*verify_cmd) (struct msm_adsp_module*, unsigned int, void *, + size_t); + int (*patch_event) (struct msm_adsp_module*, struct adsp_event *); +}; + +extern void msm_adsp_publish_cdevs(struct msm_adsp_module *, unsigned); +extern int adsp_init_info(struct adsp_info *info); + +/* Value to indicate that a queue is not defined for a particular image */ +#if CONFIG_MSM_AMSS_VERSION >= 6350 +#define QDSP_RTOS_NO_QUEUE 0xfffffffe +#else +#define QDSP_RTOS_NO_QUEUE 0xffffffff +#endif + +/* + * Constants used to communicate with the ADSP RTOS + */ +#define ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M 0x80000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V 0x80000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_AVAIL_V 0x00000000U + +#define ADSP_RTOS_WRITE_CTRL_WORD_CMD_M 0x70000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_CMD_WRITE_REQ_V 0x00000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_CMD_WRITE_DONE_V 0x10000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_CMD_NO_CMD_V 0x70000000U + +#define ADSP_RTOS_WRITE_CTRL_WORD_STATUS_M 0x0E000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_NO_ERR_V 0x00000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_NO_FREE_BUF_V 0x02000000U + +#define ADSP_RTOS_WRITE_CTRL_WORD_KERNEL_FLG_M 0x01000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_HTOD_MSG_WRITE_V 0x00000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_HTOD_CMD_V 0x01000000U + +#define ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M 0x00FFFFFFU +#define ADSP_RTOS_WRITE_CTRL_WORD_HTOD_CMD_ID_M 0x00FFFFFFU + +/* Combination of MUTEX and CMD bits to check if the DSP is busy */ +#define ADSP_RTOS_WRITE_CTRL_WORD_READY_M 0xF0000000U +#define ADSP_RTOS_WRITE_CTRL_WORD_READY_V 0x70000000U + +/* RTOS to Host processor command mask values */ +#define ADSP_RTOS_READ_CTRL_WORD_FLAG_M 0x80000000U +#define ADSP_RTOS_READ_CTRL_WORD_FLAG_UP_WAIT_V 0x00000000U +#define ADSP_RTOS_READ_CTRL_WORD_FLAG_UP_CONT_V 0x80000000U + +#define ADSP_RTOS_READ_CTRL_WORD_CMD_M 0x60000000U +#define ADSP_RTOS_READ_CTRL_WORD_READ_DONE_V 0x00000000U +#define ADSP_RTOS_READ_CTRL_WORD_READ_REQ_V 0x20000000U +#define ADSP_RTOS_READ_CTRL_WORD_NO_CMD_V 0x60000000U + +/* Combination of FLAG and COMMAND bits to check if MSG ready */ +#define ADSP_RTOS_READ_CTRL_WORD_READY_M 0xE0000000U +#define ADSP_RTOS_READ_CTRL_WORD_READY_V 0xA0000000U +#define ADSP_RTOS_READ_CTRL_WORD_CONT_V 0xC0000000U +#define ADSP_RTOS_READ_CTRL_WORD_DONE_V 0xE0000000U + +#define ADSP_RTOS_READ_CTRL_WORD_STATUS_M 0x18000000U +#define ADSP_RTOS_READ_CTRL_WORD_NO_ERR_V 0x00000000U + +#define ADSP_RTOS_READ_CTRL_WORD_IN_PROG_M 0x04000000U +#define ADSP_RTOS_READ_CTRL_WORD_NO_READ_IN_PROG_V 0x00000000U +#define ADSP_RTOS_READ_CTRL_WORD_READ_IN_PROG_V 0x04000000U + +#define ADSP_RTOS_READ_CTRL_WORD_CMD_TYPE_M 0x03000000U +#define ADSP_RTOS_READ_CTRL_WORD_CMD_TASK_TO_H_V 0x00000000U +#define ADSP_RTOS_READ_CTRL_WORD_CMD_KRNL_TO_H_V 0x01000000U +#define ADSP_RTOS_READ_CTRL_WORD_CMD_H_TO_KRNL_CFM_V 0x02000000U + +#define ADSP_RTOS_READ_CTRL_WORD_DSP_ADDR_M 0x00FFFFFFU + +#define ADSP_RTOS_READ_CTRL_WORD_MSG_ID_M 0x000000FFU +#define ADSP_RTOS_READ_CTRL_WORD_TASK_ID_M 0x0000FF00U + +/* Base address of DSP and DSP hardware registers */ +#define QDSP_RAMC_OFFSET 0x400000 + +#endif /* _ARCH_ARM_MACH_MSM_ADSP_H */ diff --git a/arch/arm/mach-msm/qdsp5/adsp_6210.c b/arch/arm/mach-msm/qdsp5/adsp_6210.c new file mode 100644 index 0000000000000..628c2477a1f8f --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_6210.c @@ -0,0 +1,283 @@ +/* arch/arm/mach-msm/qdsp5/adsp_6210.h + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "adsp.h" + +/* Firmware modules */ +typedef enum { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEO_AAC_VOC, + QDSP_MODULE_PCM_DEC, + QDSP_MODULE_AUDIO_DEC_MP3, + QDSP_MODULE_AUDIO_DEC_AAC, + QDSP_MODULE_AUDIO_DEC_WMA, + QDSP_MODULE_HOSTPCM, + QDSP_MODULE_DTMF, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_SBC_ENC, + QDSP_MODULE_VOC, + QDSP_MODULE_VOC_PCM, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_WAV_ENC, + QDSP_MODULE_AACLC_ENC, + QDSP_MODULE_VIDEO_AMR, + QDSP_MODULE_VOC_AMR, + QDSP_MODULE_VOC_EVRC, + QDSP_MODULE_VOC_13K, + QDSP_MODULE_VOC_FGV, + QDSP_MODULE_DIAGTASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_QCAMTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MIDI, + QDSP_MODULE_GAUDIO, + QDSP_MODULE_VDEC_LP_MODE, + QDSP_MODULE_MAX, +} qdsp_module_type; + +#define QDSP_RTOS_MAX_TASK_ID 19U + +/* Table of modules indexed by task ID for the GAUDIO image */ +static qdsp_module_type qdsp_gaudio_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the GAUDIO image */ +static uint32_t qdsp_gaudio_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x3be, /* QDSP_mpuAfeQueue */ + 0x3ee, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x3c2, /* QDSP_uPAudPPCmd1Queue */ + 0x3c6, /* QDSP_uPAudPPCmd2Queue */ + 0x3ca, /* QDSP_uPAudPPCmd3Queue */ + 0x3da, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + 0x3de, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + 0x3e2, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + 0x3e6, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + 0x3ea, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x3ce, /* QDSP_uPAudPreProcCmdQueue */ + 0x3d6, /* QDSP_uPAudRecBitStreamQueue */ + 0x3d2, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE /* QDSP_vfeCommandTableQueue */ +}; + +/* Table of modules indexed by task ID for the COMBO image */ +static qdsp_module_type qdsp_combo_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the COMBO image */ +static uint32_t qdsp_combo_queue_offset_table[] = { + 0x585, /* QDSP_lpmCommandQueue */ + 0x52d, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + 0x541, /* QDSP_mpuModmathCmdQueue */ + 0x555, /* QDSP_mpuVDecCmdQueue */ + 0x559, /* QDSP_mpuVDecPktQueue */ + 0x551, /* QDSP_mpuVEncCmdQueue */ + 0x535, /* QDSP_rxMpuDecCmdQueue */ + 0x539, /* QDSP_rxMpuDecPktQueue */ + 0x53d, /* QDSP_txMpuEncQueue */ + 0x55d, /* QDSP_uPAudPPCmd1Queue */ + 0x561, /* QDSP_uPAudPPCmd2Queue */ + 0x565, /* QDSP_uPAudPPCmd3Queue */ + 0x575, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + 0x579, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x569, /* QDSP_uPAudPreProcCmdQueue */ + 0x571, /* QDSP_uPAudRecBitStreamQueue */ + 0x56d, /* QDSP_uPAudRecCmdQueue */ + 0x581, /* QDSP_uPJpegActionCmdQueue */ + 0x57d, /* QDSP_uPJpegCfgCmdQueue */ + 0x531, /* QDSP_uPVocProcQueue */ + 0x545, /* QDSP_vfeCommandQueue */ + 0x54d, /* QDSP_vfeCommandScaleQueue */ + 0x549 /* QDSP_vfeCommandTableQueue */ +}; + +/* Table of modules indexed by task ID for the QTV_LP image */ +static qdsp_module_type qdsp_qtv_lp_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the QTV_LP image */ +static uint32_t qdsp_qtv_lp_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x40c, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + 0x410, /* QDSP_mpuVDecCmdQueue */ + 0x414, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x41c, /* QDSP_uPAudPPCmd1Queue */ + 0x420, /* QDSP_uPAudPPCmd2Queue */ + 0x424, /* QDSP_uPAudPPCmd3Queue */ + 0x430, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x418, /* QDSP_uPAudPreProcCmdQueue */ + 0x42c, /* QDSP_uPAudRecBitStreamQueue */ + 0x428, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE /* QDSP_vfeCommandTableQueue */ +}; + +/* Tables to convert tasks to modules */ +static uint32_t *qdsp_task_to_module[] = { + qdsp_combo_task_to_module_table, + qdsp_gaudio_task_to_module_table, + qdsp_qtv_lp_task_to_module_table, +}; + +/* Tables to retrieve queue offsets */ +static uint32_t *qdsp_queue_offset_table[] = { + qdsp_combo_queue_offset_table, + qdsp_gaudio_queue_offset_table, + qdsp_qtv_lp_queue_offset_table, +}; + +#define QDSP_MODULE(n) \ + { .name = #n, .pdev_name = "adsp_" #n, .id = QDSP_MODULE_##n } + +static struct adsp_module_info module_info[] = { + QDSP_MODULE(AUDPPTASK), + QDSP_MODULE(AUDRECTASK), + QDSP_MODULE(AUDPREPROCTASK), + QDSP_MODULE(VFETASK), + QDSP_MODULE(QCAMTASK), + QDSP_MODULE(LPMTASK), + QDSP_MODULE(JPEGTASK), + QDSP_MODULE(VIDEOTASK), + QDSP_MODULE(VDEC_LP_MODE), +}; + +int adsp_init_info(struct adsp_info *info) +{ + info->send_irq = 0x00c00200; + info->read_ctrl = 0x00400038; + info->write_ctrl = 0x00400034; + + info->max_msg16_size = 193; + info->max_msg32_size = 8; + + info->max_task_id = 16; + info->max_module_id = QDSP_MODULE_MAX - 1; + info->max_queue_id = QDSP_QUEUE_MAX; + info->max_image_id = 2; + info->queue_offset = qdsp_queue_offset_table; + info->task_to_module = qdsp_task_to_module; + + info->module_count = ARRAY_SIZE(module_info); + info->module = module_info; + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_6220.c b/arch/arm/mach-msm/qdsp5/adsp_6220.c new file mode 100644 index 0000000000000..c4c5a55271582 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_6220.c @@ -0,0 +1,284 @@ +/* arch/arm/mach-msm/qdsp5/adsp_6220.h + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "adsp.h" + +/* Firmware modules */ +typedef enum { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEO_AAC_VOC, + QDSP_MODULE_PCM_DEC, + QDSP_MODULE_AUDIO_DEC_MP3, + QDSP_MODULE_AUDIO_DEC_AAC, + QDSP_MODULE_AUDIO_DEC_WMA, + QDSP_MODULE_HOSTPCM, + QDSP_MODULE_DTMF, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_SBC_ENC, + QDSP_MODULE_VOC, + QDSP_MODULE_VOC_PCM, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_WAV_ENC, + QDSP_MODULE_AACLC_ENC, + QDSP_MODULE_VIDEO_AMR, + QDSP_MODULE_VOC_AMR, + QDSP_MODULE_VOC_EVRC, + QDSP_MODULE_VOC_13K, + QDSP_MODULE_VOC_FGV, + QDSP_MODULE_DIAGTASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_QCAMTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MIDI, + QDSP_MODULE_GAUDIO, + QDSP_MODULE_VDEC_LP_MODE, + QDSP_MODULE_MAX, +} qdsp_module_type; + +#define QDSP_RTOS_MAX_TASK_ID 19U + +/* Table of modules indexed by task ID for the GAUDIO image */ +static qdsp_module_type qdsp_gaudio_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the GAUDIO image */ +static uint32_t qdsp_gaudio_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x3f0, /* QDSP_mpuAfeQueue */ + 0x420, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x3f4, /* QDSP_uPAudPPCmd1Queue */ + 0x3f8, /* QDSP_uPAudPPCmd2Queue */ + 0x3fc, /* QDSP_uPAudPPCmd3Queue */ + 0x40c, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + 0x410, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + 0x414, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + 0x418, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + 0x41c, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x400, /* QDSP_uPAudPreProcCmdQueue */ + 0x408, /* QDSP_uPAudRecBitStreamQueue */ + 0x404, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE /* QDSP_vfeCommandTableQueue */ +}; + +/* Table of modules indexed by task ID for the COMBO image */ +static qdsp_module_type qdsp_combo_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the COMBO image */ +static uint32_t qdsp_combo_queue_offset_table[] = { + 0x6f2, /* QDSP_lpmCommandQueue */ + 0x69e, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + 0x6b2, /* QDSP_mpuModmathCmdQueue */ + 0x6c6, /* QDSP_mpuVDecCmdQueue */ + 0x6ca, /* QDSP_mpuVDecPktQueue */ + 0x6c2, /* QDSP_mpuVEncCmdQueue */ + 0x6a6, /* QDSP_rxMpuDecCmdQueue */ + 0x6aa, /* QDSP_rxMpuDecPktQueue */ + 0x6ae, /* QDSP_txMpuEncQueue */ + 0x6ce, /* QDSP_uPAudPPCmd1Queue */ + 0x6d2, /* QDSP_uPAudPPCmd2Queue */ + 0x6d6, /* QDSP_uPAudPPCmd3Queue */ + 0x6e6, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x6da, /* QDSP_uPAudPreProcCmdQueue */ + 0x6e2, /* QDSP_uPAudRecBitStreamQueue */ + 0x6de, /* QDSP_uPAudRecCmdQueue */ + 0x6ee, /* QDSP_uPJpegActionCmdQueue */ + 0x6ea, /* QDSP_uPJpegCfgCmdQueue */ + 0x6a2, /* QDSP_uPVocProcQueue */ + 0x6b6, /* QDSP_vfeCommandQueue */ + 0x6be, /* QDSP_vfeCommandScaleQueue */ + 0x6ba /* QDSP_vfeCommandTableQueue */ +}; + +/* Table of modules indexed by task ID for the QTV_LP image */ +static qdsp_module_type qdsp_qtv_lp_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX +}; + +/* Queue offset table indexed by queue ID for the QTV_LP image */ +static uint32_t qdsp_qtv_lp_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x430, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + 0x434, /* QDSP_mpuVDecCmdQueue */ + 0x438, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x440, /* QDSP_uPAudPPCmd1Queue */ + 0x444, /* QDSP_uPAudPPCmd2Queue */ + 0x448, /* QDSP_uPAudPPCmd3Queue */ + 0x454, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x43c, /* QDSP_uPAudPreProcCmdQueue */ + 0x450, /* QDSP_uPAudRecBitStreamQueue */ + 0x44c, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE /* QDSP_vfeCommandTableQueue */ +}; + +/* Tables to convert tasks to modules */ +static qdsp_module_type *qdsp_task_to_module[] = { + qdsp_combo_task_to_module_table, + qdsp_gaudio_task_to_module_table, + qdsp_qtv_lp_task_to_module_table, +}; + +/* Tables to retrieve queue offsets */ +static uint32_t *qdsp_queue_offset_table[] = { + qdsp_combo_queue_offset_table, + qdsp_gaudio_queue_offset_table, + qdsp_qtv_lp_queue_offset_table, +}; + +#define QDSP_MODULE(n) \ + { .name = #n, .pdev_name = "adsp_" #n, .id = QDSP_MODULE_##n } + +static struct adsp_module_info module_info[] = { + QDSP_MODULE(AUDPLAY0TASK), + QDSP_MODULE(AUDPPTASK), + QDSP_MODULE(AUDPREPROCTASK), + QDSP_MODULE(AUDRECTASK), + QDSP_MODULE(VFETASK), + QDSP_MODULE(QCAMTASK), + QDSP_MODULE(LPMTASK), + QDSP_MODULE(JPEGTASK), + QDSP_MODULE(VIDEOTASK), + QDSP_MODULE(VDEC_LP_MODE), +}; + +int adsp_init_info(struct adsp_info *info) +{ + info->send_irq = 0x00c00200; + info->read_ctrl = 0x00400038; + info->write_ctrl = 0x00400034; + + info->max_msg16_size = 193; + info->max_msg32_size = 8; + + info->max_task_id = 16; + info->max_module_id = QDSP_MODULE_MAX - 1; + info->max_queue_id = QDSP_QUEUE_MAX; + info->max_image_id = 2; + info->queue_offset = qdsp_queue_offset_table; + info->task_to_module = qdsp_task_to_module; + + info->module_count = ARRAY_SIZE(module_info); + info->module = module_info; + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_6225.c b/arch/arm/mach-msm/qdsp5/adsp_6225.c new file mode 100644 index 0000000000000..5078afbb1a8c4 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_6225.c @@ -0,0 +1,328 @@ +/* arch/arm/mach-msm/qdsp5/adsp_6225.h + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "adsp.h" + +/* Firmware modules */ +typedef enum { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEO_AAC_VOC, + QDSP_MODULE_PCM_DEC, + QDSP_MODULE_AUDIO_DEC_MP3, + QDSP_MODULE_AUDIO_DEC_AAC, + QDSP_MODULE_AUDIO_DEC_WMA, + QDSP_MODULE_HOSTPCM, + QDSP_MODULE_DTMF, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_SBC_ENC, + QDSP_MODULE_VOC_UMTS, + QDSP_MODULE_VOC_CDMA, + QDSP_MODULE_VOC_PCM, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_WAV_ENC, + QDSP_MODULE_AACLC_ENC, + QDSP_MODULE_VIDEO_AMR, + QDSP_MODULE_VOC_AMR, + QDSP_MODULE_VOC_EVRC, + QDSP_MODULE_VOC_13K, + QDSP_MODULE_VOC_FGV, + QDSP_MODULE_DIAGTASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_QCAMTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MIDI, + QDSP_MODULE_GAUDIO, + QDSP_MODULE_VDEC_LP_MODE, + QDSP_MODULE_MAX, +} qdsp_module_type; + +#define QDSP_RTOS_MAX_TASK_ID 30U + +/* Table of modules indexed by task ID for the GAUDIO image */ +static qdsp_module_type qdsp_gaudio_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_AUDPLAY2TASK, + QDSP_MODULE_AUDPLAY3TASK, + QDSP_MODULE_AUDPLAY4TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_GRAPHICSTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, +}; + +/* Queue offset table indexed by queue ID for the GAUDIO image */ +static uint32_t qdsp_gaudio_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x3f0, /* QDSP_mpuAfeQueue */ + 0x420, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x3f4, /* QDSP_uPAudPPCmd1Queue */ + 0x3f8, /* QDSP_uPAudPPCmd2Queue */ + 0x3fc, /* QDSP_uPAudPPCmd3Queue */ + 0x40c, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + 0x410, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + 0x414, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + 0x418, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + 0x41c, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x400, /* QDSP_uPAudPreProcCmdQueue */ + 0x408, /* QDSP_uPAudRecBitStreamQueue */ + 0x404, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandTableQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPDiagQueue */ +}; + +/* Table of modules indexed by task ID for the COMBO image */ +static qdsp_module_type qdsp_combo_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_VOCDECTASK, + QDSP_MODULE_VOCENCTASK, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_VIDEOENCTASK, + QDSP_MODULE_VOICEPROCTASK, + QDSP_MODULE_VFETASK, + QDSP_MODULE_JPEGTASK, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_AUDPLAY1TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_LPMTASK, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MODMATHTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_DIAGTASK, + QDSP_MODULE_MAX, +}; + +/* Queue offset table indexed by queue ID for the COMBO image */ +static uint32_t qdsp_combo_queue_offset_table[] = { + 0x714, /* QDSP_lpmCommandQueue */ + 0x6bc, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + 0x6d0, /* QDSP_mpuModmathCmdQueue */ + 0x6e8, /* QDSP_mpuVDecCmdQueue */ + 0x6ec, /* QDSP_mpuVDecPktQueue */ + 0x6e4, /* QDSP_mpuVEncCmdQueue */ + 0x6c4, /* QDSP_rxMpuDecCmdQueue */ + 0x6c8, /* QDSP_rxMpuDecPktQueue */ + 0x6cc, /* QDSP_txMpuEncQueue */ + 0x6f0, /* QDSP_uPAudPPCmd1Queue */ + 0x6f4, /* QDSP_uPAudPPCmd2Queue */ + 0x6f8, /* QDSP_uPAudPPCmd3Queue */ + 0x708, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x6fc, /* QDSP_uPAudPreProcCmdQueue */ + 0x704, /* QDSP_uPAudRecBitStreamQueue */ + 0x700, /* QDSP_uPAudRecCmdQueue */ + 0x710, /* QDSP_uPJpegActionCmdQueue */ + 0x70c, /* QDSP_uPJpegCfgCmdQueue */ + 0x6c0, /* QDSP_uPVocProcQueue */ + 0x6d8, /* QDSP_vfeCommandQueue */ + 0x6e0, /* QDSP_vfeCommandScaleQueue */ + 0x6dc, /* QDSP_vfeCommandTableQueue */ + 0x6d4, /* QDSP_uPDiagQueue */ +}; + +/* Table of modules indexed by task ID for the QTV_LP image */ +static qdsp_module_type qdsp_qtv_lp_task_to_module_table[] = { + QDSP_MODULE_KERNEL, + QDSP_MODULE_AFETASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_VIDEOTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDPPTASK, + QDSP_MODULE_AUDPLAY0TASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_AUDRECTASK, + QDSP_MODULE_AUDPREPROCTASK, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, + QDSP_MODULE_MAX, +}; + +/* Queue offset table indexed by queue ID for the QTV_LP image */ +static uint32_t qdsp_qtv_lp_queue_offset_table[] = { + QDSP_RTOS_NO_QUEUE, /* QDSP_lpmCommandQueue */ + 0x3fe, /* QDSP_mpuAfeQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuGraphicsCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuModmathCmdQueue */ + 0x402, /* QDSP_mpuVDecCmdQueue */ + 0x406, /* QDSP_mpuVDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_mpuVEncCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_rxMpuDecPktQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_txMpuEncQueue */ + 0x40e, /* QDSP_uPAudPPCmd1Queue */ + 0x412, /* QDSP_uPAudPPCmd2Queue */ + 0x416, /* QDSP_uPAudPPCmd3Queue */ + 0x422, /* QDSP_uPAudPlay0BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay1BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay2BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay3BitStreamCtrlQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPAudPlay4BitStreamCtrlQueue */ + 0x40a, /* QDSP_uPAudPreProcCmdQueue */ + 0x41e, /* QDSP_uPAudRecBitStreamQueue */ + 0x41a, /* QDSP_uPAudRecCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegActionCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPJpegCfgCmdQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPVocProcQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandScaleQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_vfeCommandTableQueue */ + QDSP_RTOS_NO_QUEUE, /* QDSP_uPDiagQueue */ +}; + +/* Tables to convert tasks to modules */ +static qdsp_module_type *qdsp_task_to_module[] = { + qdsp_combo_task_to_module_table, + qdsp_gaudio_task_to_module_table, + qdsp_qtv_lp_task_to_module_table, +}; + +/* Tables to retrieve queue offsets */ +static uint32_t *qdsp_queue_offset_table[] = { + qdsp_combo_queue_offset_table, + qdsp_gaudio_queue_offset_table, + qdsp_qtv_lp_queue_offset_table, +}; + +#define QDSP_MODULE(n, clkname, clkrate, verify_cmd_func, patch_event_func) \ + { .name = #n, .pdev_name = "adsp_" #n, .id = QDSP_MODULE_##n, \ + .clk_name = clkname, .clk_rate = clkrate, \ + .verify_cmd = verify_cmd_func, .patch_event = patch_event_func } + +static struct adsp_module_info module_info[] = { + QDSP_MODULE(AUDPLAY0TASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDPPTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDRECTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDPREPROCTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(VFETASK, "vfe_clk", 0, adsp_vfe_verify_cmd, + adsp_vfe_patch_event), + QDSP_MODULE(QCAMTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(LPMTASK, NULL, 0, adsp_lpm_verify_cmd, NULL), + QDSP_MODULE(JPEGTASK, "vdc_clk", 0, adsp_jpeg_verify_cmd, + adsp_jpeg_patch_event), + QDSP_MODULE(VIDEOTASK, "vdc_clk", 96000000, + adsp_video_verify_cmd, NULL), + QDSP_MODULE(VDEC_LP_MODE, NULL, 0, NULL, NULL), + QDSP_MODULE(VIDEOENCTASK, "vdc_clk", 96000000, + adsp_videoenc_verify_cmd, NULL), +}; + +int adsp_init_info(struct adsp_info *info) +{ + info->send_irq = 0x00c00200; + info->read_ctrl = 0x00400038; + info->write_ctrl = 0x00400034; + + info->max_msg16_size = 193; + info->max_msg32_size = 8; + + info->max_task_id = 16; + info->max_module_id = QDSP_MODULE_MAX - 1; + info->max_queue_id = QDSP_QUEUE_MAX; + info->max_image_id = 2; + info->queue_offset = qdsp_queue_offset_table; + info->task_to_module = qdsp_task_to_module; + + info->module_count = ARRAY_SIZE(module_info); + info->module = module_info; + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_driver.c b/arch/arm/mach-msm/qdsp5/adsp_driver.c new file mode 100644 index 0000000000000..8197765aae1e8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_driver.c @@ -0,0 +1,642 @@ +/* arch/arm/mach-msm/qdsp5/adsp_driver.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "adsp.h" + +#include +#include + +struct adsp_pmem_region { + struct hlist_node list; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + struct file *file; +}; + +struct adsp_device { + struct msm_adsp_module *module; + + spinlock_t event_queue_lock; + wait_queue_head_t event_wait; + struct list_head event_queue; + int abort; + + const char *name; + struct device *device; + struct cdev cdev; +}; + +static struct adsp_device *inode_to_device(struct inode *inode); + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = __v >= __r->vaddr && \ + __e <= __r->vaddr + __r->len; \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +static int adsp_pmem_check(struct msm_adsp_module *module, + void *vaddr, unsigned long len) +{ + struct adsp_pmem_region *region_elt; + struct hlist_node *node; + struct adsp_pmem_region t = { .vaddr = vaddr, .len = len }; + + hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + printk(KERN_ERR "adsp: module %s:" + " region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + module->name, + vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, + region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int adsp_pmem_add(struct msm_adsp_module *module, + struct adsp_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct adsp_pmem_region *region; + int rc = -EINVAL; + + mutex_lock(&module->pmem_regions_lock); + region = kmalloc(sizeof(*region), GFP_KERNEL); + if (!region) { + rc = -ENOMEM; + goto end; + } + INIT_HLIST_NODE(®ion->list); + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = adsp_pmem_check(module, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + + hlist_add_head(®ion->list, &module->pmem_regions); +end: + mutex_unlock(&module->pmem_regions_lock); + return rc; +} + +static int adsp_pmem_lookup_vaddr(struct msm_adsp_module *module, void **addr, + unsigned long len, struct adsp_pmem_region **region) +{ + struct hlist_node *node; + void *vaddr = *addr; + struct adsp_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { + if (vaddr >= region_elt->vaddr && + vaddr < region_elt->vaddr + region_elt->len && + vaddr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + printk(KERN_ERR "adsp: module %s: " + "multiple hits for vaddr %p, len %ld\n", + module->name, vaddr, len); + hlist_for_each_entry(region_elt, node, + &module->pmem_regions, list) { + if (vaddr >= region_elt->vaddr && + vaddr < region_elt->vaddr + region_elt->len && + vaddr + len <= region_elt->vaddr + region_elt->len) + printk(KERN_ERR "\t%p, %ld --> %p\n", + region_elt->vaddr, + region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +int adsp_pmem_fixup_kvaddr(struct msm_adsp_module *module, void **addr, + unsigned long *kvaddr, unsigned long len) +{ + struct adsp_pmem_region *region; + void *vaddr = *addr; + unsigned long *paddr = (unsigned long *)addr; + int ret; + + ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion); + if (ret) { + printk(KERN_ERR "adsp: not patching %s (paddr & kvaddr)," + " lookup (%p, %ld) failed\n", + module->name, vaddr, len); + return ret; + } + *paddr = region->paddr + (vaddr - region->vaddr); + *kvaddr = region->kvaddr + (vaddr - region->vaddr); + return 0; +} + +int adsp_pmem_fixup(struct msm_adsp_module *module, void **addr, + unsigned long len) +{ + struct adsp_pmem_region *region; + void *vaddr = *addr; + unsigned long *paddr = (unsigned long *)addr; + int ret; + + ret = adsp_pmem_lookup_vaddr(module, addr, len, ®ion); + if (ret) { + printk(KERN_ERR "adsp: not patching %s, lookup (%p, %ld) failed\n", + module->name, vaddr, len); + return ret; + } + + *paddr = region->paddr + (vaddr - region->vaddr); + return 0; +} + +static int adsp_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + /* call the per module verifier */ + if (module->verify_cmd) + return module->verify_cmd(module, queue_id, cmd_data, + cmd_size); + else + printk(KERN_INFO "adsp: no packet verifying function " + "for task %s\n", module->name); + return 0; +} + +static long adsp_write_cmd(struct adsp_device *adev, void __user *arg) +{ + struct adsp_command_t cmd; + unsigned char buf[256]; + void *cmd_data; + long rc; + + if (copy_from_user(&cmd, (void __user *)arg, sizeof(cmd))) + return -EFAULT; + + if (cmd.len > 256) { + cmd_data = kmalloc(cmd.len, GFP_USER); + if (!cmd_data) + return -ENOMEM; + } else { + cmd_data = buf; + } + + if (copy_from_user(cmd_data, (void __user *)(cmd.data), cmd.len)) { + rc = -EFAULT; + goto end; + } + + mutex_lock(&adev->module->pmem_regions_lock); + if (adsp_verify_cmd(adev->module, cmd.queue, cmd_data, cmd.len)) { + printk(KERN_ERR "module %s: verify failed.\n", + adev->module->name); + rc = -EINVAL; + goto end; + } + rc = msm_adsp_write(adev->module, cmd.queue, cmd_data, cmd.len); +end: + mutex_unlock(&adev->module->pmem_regions_lock); + + if (cmd.len > 256) + kfree(cmd_data); + + return rc; +} + +static int adsp_events_pending(struct adsp_device *adev) +{ + unsigned long flags; + int yes; + spin_lock_irqsave(&adev->event_queue_lock, flags); + yes = !list_empty(&adev->event_queue); + spin_unlock_irqrestore(&adev->event_queue_lock, flags); + return yes || adev->abort; +} + +static int adsp_pmem_lookup_paddr(struct msm_adsp_module *module, void **addr, + struct adsp_pmem_region **region) +{ + struct hlist_node *node; + unsigned long paddr = (unsigned long)(*addr); + struct adsp_pmem_region *region_elt; + + hlist_for_each_entry(region_elt, node, &module->pmem_regions, list) { + if (paddr >= region_elt->paddr && + paddr < region_elt->paddr + region_elt->len) { + *region = region_elt; + return 0; + } + } + return -1; +} + +int adsp_pmem_paddr_fixup(struct msm_adsp_module *module, void **addr) +{ + struct adsp_pmem_region *region; + unsigned long paddr = (unsigned long)(*addr); + unsigned long *vaddr = (unsigned long *)addr; + int ret; + + ret = adsp_pmem_lookup_paddr(module, addr, ®ion); + if (ret) { + printk(KERN_ERR "adsp: not patching %s, paddr %p lookup failed\n", + module->name, vaddr); + return ret; + } + + *vaddr = (unsigned long)region->vaddr + (paddr - region->paddr); + return 0; +} + +static int adsp_patch_event(struct msm_adsp_module *module, + struct adsp_event *event) +{ + /* call the per-module msg verifier */ + if (module->patch_event) + return module->patch_event(module, event); + return 0; +} + +static long adsp_get_event(struct adsp_device *adev, void __user *arg) +{ + unsigned long flags; + struct adsp_event *data = NULL; + struct adsp_event_t evt; + int timeout; + long rc = 0; + + if (copy_from_user(&evt, arg, sizeof(struct adsp_event_t))) + return -EFAULT; + + timeout = (int)evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout( + adev->event_wait, adsp_events_pending(adev), + msecs_to_jiffies(timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible( + adev->event_wait, adsp_events_pending(adev)); + } + if (rc < 0) + return rc; + + if (adev->abort) + return -ENODEV; + + spin_lock_irqsave(&adev->event_queue_lock, flags); + if (!list_empty(&adev->event_queue)) { + data = list_first_entry(&adev->event_queue, + struct adsp_event, list); + list_del(&data->list); + } + spin_unlock_irqrestore(&adev->event_queue_lock, flags); + + if (!data) + return -EAGAIN; + + /* DSP messages are type 0; they may contain physical addresses */ + if (data->type == 0) + adsp_patch_event(adev->module, data); + + /* map adsp_event --> adsp_event_t */ + if (evt.len < data->size) { + rc = -ETOOSMALL; + goto end; + } + if (data->msg_id != EVENT_MSG_ID) { + if (copy_to_user((void *)(evt.data), data->data.msg16, + data->size)) { + rc = -EFAULT; + goto end; + } + } else { + if (copy_to_user((void *)(evt.data), data->data.msg32, + data->size)) { + rc = -EFAULT; + goto end; + } + } + + evt.type = data->type; /* 0 --> from aDSP, 1 --> from ARM9 */ + evt.msg_id = data->msg_id; + evt.flags = data->is16; + evt.len = data->size; + if (copy_to_user(arg, &evt, sizeof(evt))) + rc = -EFAULT; +end: + kfree(data); + return rc; +} + +static long adsp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct adsp_device *adev = filp->private_data; + + switch (cmd) { + case ADSP_IOCTL_ENABLE: + return msm_adsp_enable(adev->module); + + case ADSP_IOCTL_DISABLE: + return msm_adsp_disable(adev->module); + + case ADSP_IOCTL_DISABLE_EVENT_RSP: + return 0; + + case ADSP_IOCTL_DISABLE_ACK: + pr_err("adsp: ADSP_IOCTL_DISABLE_ACK is not implemented.\n"); + break; + + case ADSP_IOCTL_WRITE_COMMAND: + return adsp_write_cmd(adev, (void __user *) arg); + + case ADSP_IOCTL_GET_EVENT: + return adsp_get_event(adev, (void __user *) arg); + + case ADSP_IOCTL_SET_CLKRATE: { +#if CONFIG_MSM_AMSS_VERSION==6350 + unsigned long clk_rate; + if (copy_from_user(&clk_rate, (void *) arg, sizeof(clk_rate))) + return -EFAULT; + return adsp_set_clkrate(adev->module, clk_rate); +#endif + } + + case ADSP_IOCTL_REGISTER_PMEM: { + struct adsp_pmem_info info; + if (copy_from_user(&info, (void *) arg, sizeof(info))) + return -EFAULT; + return adsp_pmem_add(adev->module, &info); + } + + case ADSP_IOCTL_ABORT_EVENT_READ: + adev->abort = 1; + wake_up(&adev->event_wait); + break; + + default: + break; + } + return -EINVAL; +} + +static int adsp_release(struct inode *inode, struct file *filp) +{ + struct adsp_device *adev = filp->private_data; + struct msm_adsp_module *module = adev->module; + struct hlist_node *node, *tmp; + struct adsp_pmem_region *region; + + pr_info("adsp_release() '%s'\n", adev->name); + + /* clear module before putting it to avoid race with open() */ + adev->module = NULL; + + mutex_lock(&module->pmem_regions_lock); + hlist_for_each_safe(node, tmp, &module->pmem_regions) { + region = hlist_entry(node, struct adsp_pmem_region, list); + hlist_del(node); + put_pmem_file(region->file); + kfree(region); + } + mutex_unlock(&module->pmem_regions_lock); + BUG_ON(!hlist_empty(&module->pmem_regions)); + + msm_adsp_put(module); + return 0; +} + +static void adsp_event(void *driver_data, unsigned id, size_t len, + void (*getevent)(void *ptr, size_t len)) +{ + struct adsp_device *adev = driver_data; + struct adsp_event *event; + unsigned long flags; + + if (len > ADSP_EVENT_MAX_SIZE) { + pr_err("adsp_event: event too large (%d bytes)\n", len); + return; + } + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) { + pr_err("adsp_event: cannot allocate buffer\n"); + return; + } + + if (id != EVENT_MSG_ID) { + event->type = 0; + event->is16 = 0; + event->msg_id = id; + event->size = len; + + getevent(event->data.msg16, len); + } else { + event->type = 1; + event->is16 = 1; + event->msg_id = id; + event->size = len; + getevent(event->data.msg32, len); + } + + spin_lock_irqsave(&adev->event_queue_lock, flags); + list_add_tail(&event->list, &adev->event_queue); + spin_unlock_irqrestore(&adev->event_queue_lock, flags); + wake_up(&adev->event_wait); +} + +static struct msm_adsp_ops adsp_ops = { + .event = adsp_event, +}; + +static int adsp_open(struct inode *inode, struct file *filp) +{ + struct adsp_device *adev; + int rc; + + rc = nonseekable_open(inode, filp); + if (rc < 0) + return rc; + + adev = inode_to_device(inode); + if (!adev) + return -ENODEV; + + pr_info("adsp_open() name = '%s'\n", adev->name); + + rc = msm_adsp_get(adev->name, &adev->module, &adsp_ops, adev); + if (rc) + return rc; + + pr_info("adsp_open() module '%s' adev %p\n", adev->name, adev); + filp->private_data = adev; + adev->abort = 0; + INIT_HLIST_HEAD(&adev->module->pmem_regions); + mutex_init(&adev->module->pmem_regions_lock); + + return 0; +} + +static unsigned adsp_device_count; +static struct adsp_device *adsp_devices; + +static struct adsp_device *inode_to_device(struct inode *inode) +{ + unsigned n = MINOR(inode->i_rdev); + if (n < adsp_device_count) { + if (adsp_devices[n].device) + return adsp_devices + n; + } + return NULL; +} + +static dev_t adsp_devno; +static struct class *adsp_class; + +static struct file_operations adsp_fops = { + .owner = THIS_MODULE, + .open = adsp_open, + .unlocked_ioctl = adsp_ioctl, + .release = adsp_release, +}; + +static void adsp_create(struct adsp_device *adev, const char *name, + struct device *parent, dev_t devt) +{ + struct device *dev; + int rc; + + dev = device_create(adsp_class, parent, devt, "%s", name); + if (IS_ERR(dev)) + return; + + init_waitqueue_head(&adev->event_wait); + INIT_LIST_HEAD(&adev->event_queue); + spin_lock_init(&adev->event_queue_lock); + + cdev_init(&adev->cdev, &adsp_fops); + adev->cdev.owner = THIS_MODULE; + + rc = cdev_add(&adev->cdev, devt, 1); + if (rc < 0) { + device_destroy(adsp_class, devt); + } else { + adev->device = dev; + adev->name = name; + } +} + +void msm_adsp_publish_cdevs(struct msm_adsp_module *modules, unsigned n) +{ + int rc; + + adsp_devices = kzalloc(sizeof(struct adsp_device) * n, GFP_KERNEL); + if (!adsp_devices) + return; + + adsp_class = class_create(THIS_MODULE, "adsp"); + if (IS_ERR(adsp_class)) + goto fail_create_class; + + rc = alloc_chrdev_region(&adsp_devno, 0, n, "adsp"); + if (rc < 0) + goto fail_alloc_region; + + adsp_device_count = n; + for (n = 0; n < adsp_device_count; n++) { + adsp_create(adsp_devices + n, + modules[n].name, &modules[n].pdev.dev, + MKDEV(MAJOR(adsp_devno), n)); + } + + return; + +fail_alloc_region: + class_unregister(adsp_class); +fail_create_class: + kfree(adsp_devices); +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_info.c b/arch/arm/mach-msm/qdsp5/adsp_info.c new file mode 100644 index 0000000000000..b9c77d20b5c4a --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_info.c @@ -0,0 +1,121 @@ +/* arch/arm/mach-msm/adsp_info.c + * + * Copyright (c) 2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "adsp.h" + +/* Firmware modules */ +#define QDSP_MODULE_KERNEL 0x0106dd4e +#define QDSP_MODULE_AFETASK 0x0106dd6f +#define QDSP_MODULE_AUDPLAY0TASK 0x0106dd70 +#define QDSP_MODULE_AUDPLAY1TASK 0x0106dd71 +#define QDSP_MODULE_AUDPPTASK 0x0106dd72 +#define QDSP_MODULE_VIDEOTASK 0x0106dd73 +#define QDSP_MODULE_VIDEO_AAC_VOC 0x0106dd74 +#define QDSP_MODULE_PCM_DEC 0x0106dd75 +#define QDSP_MODULE_AUDIO_DEC_MP3 0x0106dd76 +#define QDSP_MODULE_AUDIO_DEC_AAC 0x0106dd77 +#define QDSP_MODULE_AUDIO_DEC_WMA 0x0106dd78 +#define QDSP_MODULE_HOSTPCM 0x0106dd79 +#define QDSP_MODULE_DTMF 0x0106dd7a +#define QDSP_MODULE_AUDRECTASK 0x0106dd7b +#define QDSP_MODULE_AUDPREPROCTASK 0x0106dd7c +#define QDSP_MODULE_SBC_ENC 0x0106dd7d +#define QDSP_MODULE_VOC_UMTS 0x0106dd9a +#define QDSP_MODULE_VOC_CDMA 0x0106dd98 +#define QDSP_MODULE_VOC_PCM 0x0106dd7f +#define QDSP_MODULE_VOCENCTASK 0x0106dd80 +#define QDSP_MODULE_VOCDECTASK 0x0106dd81 +#define QDSP_MODULE_VOICEPROCTASK 0x0106dd82 +#define QDSP_MODULE_VIDEOENCTASK 0x0106dd83 +#define QDSP_MODULE_VFETASK 0x0106dd84 +#define QDSP_MODULE_WAV_ENC 0x0106dd85 +#define QDSP_MODULE_AACLC_ENC 0x0106dd86 +#define QDSP_MODULE_VIDEO_AMR 0x0106dd87 +#define QDSP_MODULE_VOC_AMR 0x0106dd88 +#define QDSP_MODULE_VOC_EVRC 0x0106dd89 +#define QDSP_MODULE_VOC_13K 0x0106dd8a +#define QDSP_MODULE_VOC_FGV 0x0106dd8b +#define QDSP_MODULE_DIAGTASK 0x0106dd8c +#define QDSP_MODULE_JPEGTASK 0x0106dd8d +#define QDSP_MODULE_LPMTASK 0x0106dd8e +#define QDSP_MODULE_QCAMTASK 0x0106dd8f +#define QDSP_MODULE_MODMATHTASK 0x0106dd90 +#define QDSP_MODULE_AUDPLAY2TASK 0x0106dd91 +#define QDSP_MODULE_AUDPLAY3TASK 0x0106dd92 +#define QDSP_MODULE_AUDPLAY4TASK 0x0106dd93 +#define QDSP_MODULE_GRAPHICSTASK 0x0106dd94 +#define QDSP_MODULE_MIDI 0x0106dd95 +#define QDSP_MODULE_GAUDIO 0x0106dd96 +#define QDSP_MODULE_VDEC_LP_MODE 0x0106dd97 +#define QDSP_MODULE_MAX 0x7fffffff + + /* DO NOT USE: Force this enum to be a 32bit type to improve speed */ +#define QDSP_MODULE_32BIT_DUMMY 0x10000 + +static uint32_t *qdsp_task_to_module[IMG_MAX]; +static uint32_t *qdsp_queue_offset_table[IMG_MAX]; + +#define QDSP_MODULE(n, clkname, clkrate, verify_cmd_func, patch_event_func) \ + { .name = #n, .pdev_name = "adsp_" #n, .id = QDSP_MODULE_##n, \ + .clk_name = clkname, .clk_rate = clkrate, \ + .verify_cmd = verify_cmd_func, .patch_event = patch_event_func } + +static struct adsp_module_info module_info[] = { + QDSP_MODULE(AUDPLAY0TASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDPPTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDRECTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(AUDPREPROCTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(VFETASK, "vfe_clk", 0, adsp_vfe_verify_cmd, + adsp_vfe_patch_event), + QDSP_MODULE(QCAMTASK, NULL, 0, NULL, NULL), + QDSP_MODULE(LPMTASK, NULL, 0, adsp_lpm_verify_cmd, NULL), + QDSP_MODULE(JPEGTASK, "vdc_clk", 96000000, adsp_jpeg_verify_cmd, + adsp_jpeg_patch_event), + QDSP_MODULE(VIDEOTASK, "vdc_clk", 96000000, + adsp_video_verify_cmd, NULL), + QDSP_MODULE(VDEC_LP_MODE, NULL, 0, NULL, NULL), + QDSP_MODULE(VIDEOENCTASK, "vdc_clk", 96000000, + adsp_videoenc_verify_cmd, NULL), +}; + +int adsp_init_info(struct adsp_info *info) +{ + uint32_t img_num; + + info->send_irq = 0x00c00200; + info->read_ctrl = 0x00400038; + info->write_ctrl = 0x00400034; + + info->max_msg16_size = 193; + info->max_msg32_size = 8; + for (img_num = 0; img_num < IMG_MAX; img_num++) + qdsp_queue_offset_table[img_num] = + &info->init_info_ptr->queue_offsets[img_num][0]; + + for (img_num = 0; img_num < IMG_MAX; img_num++) + qdsp_task_to_module[img_num] = + &info->init_info_ptr->task_to_module_tbl[img_num][0]; + info->max_task_id = 30; + info->max_module_id = QDSP_MODULE_MAX - 1; + info->max_queue_id = QDSP_MAX_NUM_QUEUES; + info->max_image_id = 2; + info->queue_offset = qdsp_queue_offset_table; + info->task_to_module = qdsp_task_to_module; + + info->module_count = ARRAY_SIZE(module_info); + info->module = module_info; + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c b/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c new file mode 100644 index 0000000000000..4f493edb6c946 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c @@ -0,0 +1,31 @@ +/* arch/arm/mach-msm/qdsp5/adsp_jpeg_patch_event.c + * + * Verification code for aDSP JPEG events. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "adsp.h" + +int adsp_jpeg_patch_event(struct msm_adsp_module *module, + struct adsp_event *event) +{ + if (event->msg_id == JPEG_MSG_ENC_OP_PRODUCED) { + jpeg_msg_enc_op_produced *op = (jpeg_msg_enc_op_produced *)event->data.msg16; + return adsp_pmem_paddr_fixup(module, (void **)&op->op_buf_addr); + } + + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c new file mode 100644 index 0000000000000..b33eba25569c7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c @@ -0,0 +1,182 @@ +/* arch/arm/mach-msm/qdsp5/adsp_jpeg_verify_cmd.c + * + * Verification code for aDSP JPEG packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "adsp.h" + +static uint32_t dec_fmt; + +static inline void get_sizes(jpeg_cmd_enc_cfg *cmd, uint32_t *luma_size, + uint32_t *chroma_size) +{ + uint32_t fmt, luma_width, luma_height; + + fmt = cmd->process_cfg & JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_M; + luma_width = (cmd->ip_size_cfg & JPEG_CMD_IP_SIZE_CFG_LUMA_WIDTH_M) + >> 16; + luma_height = cmd->frag_cfg & JPEG_CMD_FRAG_SIZE_LUMA_HEIGHT_M; + *luma_size = luma_width * luma_height; + if (fmt == JPEG_CMD_ENC_PROCESS_CFG_IP_DATA_FORMAT_H2V2) + *chroma_size = *luma_size/2; + else + *chroma_size = *luma_size; +} + +static inline int verify_jpeg_cmd_enc_cfg(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + jpeg_cmd_enc_cfg *cmd = (jpeg_cmd_enc_cfg *)cmd_data; + uint32_t luma_size, chroma_size; + int i, num_frags; + + if (cmd_size != sizeof(jpeg_cmd_enc_cfg)) { + printk(KERN_ERR "adsp: module %s: JPEG ENC CFG invalid cmd_size %d\n", + module->name, cmd_size); + return -1; + } + + get_sizes(cmd, &luma_size, &chroma_size); + num_frags = (cmd->process_cfg >> 10) & 0xf; + num_frags = ((num_frags == 1) ? num_frags : num_frags * 2); + for (i = 0; i < num_frags; i += 2) { + if (adsp_pmem_fixup(module, (void **)(&cmd->frag_cfg_part[i]), luma_size) || + adsp_pmem_fixup(module, (void **)(&cmd->frag_cfg_part[i+1]), chroma_size)) + return -1; + } + + if (adsp_pmem_fixup(module, (void **)&cmd->op_buf_0_cfg_part1, + cmd->op_buf_0_cfg_part2) || + adsp_pmem_fixup(module, (void **)&cmd->op_buf_1_cfg_part1, + cmd->op_buf_1_cfg_part2)) + return -1; + return 0; +} + +static inline int verify_jpeg_cmd_dec_cfg(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + jpeg_cmd_dec_cfg *cmd = (jpeg_cmd_dec_cfg *)cmd_data; + uint32_t div; + + if (cmd_size != sizeof(jpeg_cmd_dec_cfg)) { + printk(KERN_ERR "adsp: module %s: JPEG DEC CFG invalid cmd_size %d\n", + module->name, cmd_size); + return -1; + } + + if (adsp_pmem_fixup(module, (void **)&cmd->ip_stream_buf_cfg_part1, + cmd->ip_stream_buf_cfg_part2) || + adsp_pmem_fixup(module, (void **)&cmd->op_stream_buf_0_cfg_part1, + cmd->op_stream_buf_0_cfg_part2) || + adsp_pmem_fixup(module, (void **)&cmd->op_stream_buf_1_cfg_part1, + cmd->op_stream_buf_1_cfg_part2)) + return -1; + dec_fmt = cmd->op_data_format & + JPEG_CMD_DEC_OP_DATA_FORMAT_M; + div = (dec_fmt == JPEG_CMD_DEC_OP_DATA_FORMAT_H2V2) ? 2 : 1; + if (adsp_pmem_fixup(module, (void **)&cmd->op_stream_buf_0_cfg_part3, + cmd->op_stream_buf_0_cfg_part2 / div) || + adsp_pmem_fixup(module, (void **)&cmd->op_stream_buf_1_cfg_part3, + cmd->op_stream_buf_1_cfg_part2 / div)) + return -1; + return 0; +} + +static int verify_jpeg_cfg_cmd(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; + switch(cmd_id) { + case JPEG_CMD_ENC_CFG: + return verify_jpeg_cmd_enc_cfg(module, cmd_data, cmd_size); + case JPEG_CMD_DEC_CFG: + return verify_jpeg_cmd_dec_cfg(module, cmd_data, cmd_size); + default: + if (cmd_id > 1) { + printk(KERN_ERR "adsp: module %s: invalid JPEG CFG cmd_id %d\n", module->name, cmd_id); + return -1; + } + } + return 0; +} + +static int verify_jpeg_action_cmd(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; + switch (cmd_id) { + case JPEG_CMD_ENC_OP_CONSUMED: + { + jpeg_cmd_enc_op_consumed *cmd = + (jpeg_cmd_enc_op_consumed *)cmd_data; + + if (cmd_size != sizeof(jpeg_cmd_enc_op_consumed)) { + printk(KERN_ERR "adsp: module %s: JPEG_CMD_ENC_OP_CONSUMED invalid size %d\n", + module->name, cmd_size); + return -1; + } + + if (adsp_pmem_fixup(module, (void **)&cmd->op_buf_addr, + cmd->op_buf_size)) + return -1; + } + break; + case JPEG_CMD_DEC_OP_CONSUMED: + { + uint32_t div; + jpeg_cmd_dec_op_consumed *cmd = + (jpeg_cmd_dec_op_consumed *)cmd_data; + + if (cmd_size != sizeof(jpeg_cmd_enc_op_consumed)) { + printk(KERN_ERR "adsp: module %s: JPEG_CMD_DEC_OP_CONSUMED invalid size %d\n", + module->name, cmd_size); + return -1; + } + + div = (dec_fmt == JPEG_CMD_DEC_OP_DATA_FORMAT_H2V2) ? 2 : 1; + if (adsp_pmem_fixup(module, (void **)&cmd->luma_op_buf_addr, + cmd->luma_op_buf_size) || + adsp_pmem_fixup(module, (void **)&cmd->chroma_op_buf_addr, + cmd->luma_op_buf_size / div)) + return -1; + } + break; + default: + if (cmd_id > 7) { + printk(KERN_ERR "adsp: module %s: invalid cmd_id %d\n", + module->name, cmd_id); + return -1; + } + } + return 0; +} + +int adsp_jpeg_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + switch(queue_id) { + case QDSP_uPJpegCfgCmdQueue: + return verify_jpeg_cfg_cmd(module, cmd_data, cmd_size); + case QDSP_uPJpegActionCmdQueue: + return verify_jpeg_action_cmd(module, cmd_data, cmd_size); + default: + return -1; + } +} + diff --git a/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c new file mode 100644 index 0000000000000..1e23ef392700f --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c @@ -0,0 +1,65 @@ +/* arch/arm/mach-msm/qdsp5/adsp_lpm_verify_cmd.c + * + * Verificion code for aDSP LPM packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "adsp.h" + +int adsp_lpm_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + uint32_t cmd_id, col_height, input_row_incr, output_row_incr, + input_size, output_size; + uint32_t size_mask = 0x0fff; + lpm_cmd_start *cmd; + + if (queue_id != QDSP_lpmCommandQueue) { + printk(KERN_ERR "adsp: module %s: wrong queue id %d\n", + module->name, queue_id); + return -1; + } + + cmd = (lpm_cmd_start *)cmd_data; + cmd_id = cmd->cmd_id; + + if (cmd_id == LPM_CMD_START) { + if (cmd_size != sizeof(lpm_cmd_start)) { + printk(KERN_ERR "adsp: module %s: wrong size %d, expect %d\n", + module->name, cmd_size, sizeof(lpm_cmd_start)); + return -1; + } + col_height = cmd->ip_data_cfg_part1 & size_mask; + input_row_incr = cmd->ip_data_cfg_part2 & size_mask; + output_row_incr = cmd->op_data_cfg_part1 & size_mask; + input_size = col_height * input_row_incr; + output_size = col_height * output_row_incr; + if ((cmd->ip_data_cfg_part4 && adsp_pmem_fixup(module, + (void **)(&cmd->ip_data_cfg_part4), + input_size)) || + (cmd->op_data_cfg_part3 && adsp_pmem_fixup(module, + (void **)(&cmd->op_data_cfg_part3), + output_size))) + return -1; + } else if (cmd_id > 1) { + printk(KERN_ERR "adsp: module %s: invalid cmd_id %d\n", + module->name, cmd_id); + return -1; + } + return 0; +} + diff --git a/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c b/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c new file mode 100644 index 0000000000000..8f09ed237174b --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c @@ -0,0 +1,54 @@ +/* arch/arm/mach-msm/qdsp5/adsp_vfe_patch_event.c + * + * Verification code for aDSP VFE packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "adsp.h" + +static int patch_op_event(struct msm_adsp_module *module, + struct adsp_event *event) +{ + vfe_msg_op1 *op = (vfe_msg_op1 *)event->data.msg16; + if (adsp_pmem_paddr_fixup(module, (void **)&op->op1_buf_y_addr) || + adsp_pmem_paddr_fixup(module, (void **)&op->op1_buf_cbcr_addr)) + return -1; + return 0; +} + +static int patch_af_wb_event(struct msm_adsp_module *module, + struct adsp_event *event) +{ + vfe_msg_stats_wb_exp *af = (vfe_msg_stats_wb_exp *)event->data.msg16; + return adsp_pmem_paddr_fixup(module, (void **)&af->wb_exp_stats_op_buf); +} + +int adsp_vfe_patch_event(struct msm_adsp_module *module, + struct adsp_event *event) +{ + switch(event->msg_id) { + case VFE_MSG_OP1: + case VFE_MSG_OP2: + return patch_op_event(module, event); + case VFE_MSG_STATS_AF: + case VFE_MSG_STATS_WB_EXP: + return patch_af_wb_event(module, event); + default: + break; + } + + return 0; +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c new file mode 100644 index 0000000000000..d1f3fa8a635cd --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c @@ -0,0 +1,239 @@ +/* arch/arm/mach-msm/qdsp5/adsp_vfe_verify_cmd.c + * + * Verification code for aDSP VFE packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "adsp.h" + +static uint32_t size1_y, size2_y, size1_cbcr, size2_cbcr; +static uint32_t af_size = 4228; +static uint32_t awb_size = 8196; + +static inline int verify_cmd_op_ack(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + vfe_cmd_op1_ack *cmd = (vfe_cmd_op1_ack *)cmd_data; + void **addr_y = (void **)&cmd->op1_buf_y_addr; + void **addr_cbcr = (void **)(&cmd->op1_buf_cbcr_addr); + + if (cmd_size != sizeof(vfe_cmd_op1_ack)) + return -1; + if ((*addr_y && adsp_pmem_fixup(module, addr_y, size1_y)) || + (*addr_cbcr && adsp_pmem_fixup(module, addr_cbcr, size1_cbcr))) + return -1; + return 0; +} + +static inline int verify_cmd_stats_autofocus_cfg(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + int i; + vfe_cmd_stats_autofocus_cfg *cmd = + (vfe_cmd_stats_autofocus_cfg *)cmd_data; + + if (cmd_size != sizeof(vfe_cmd_stats_autofocus_cfg)) + return -1; + + for (i = 0; i < 3; i++) { + void **addr = (void **)(&cmd->af_stats_op_buf[i]); + if (*addr && adsp_pmem_fixup(module, addr, af_size)) + return -1; + } + return 0; +} + +static inline int verify_cmd_stats_wb_exp_cfg(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + vfe_cmd_stats_wb_exp_cfg *cmd = + (vfe_cmd_stats_wb_exp_cfg *)cmd_data; + int i; + + if (cmd_size != sizeof(vfe_cmd_stats_wb_exp_cfg)) + return -1; + + for (i = 0; i < 3; i++) { + void **addr = (void **)(&cmd->wb_exp_stats_op_buf[i]); + if (*addr && adsp_pmem_fixup(module, addr, awb_size)) + return -1; + } + return 0; +} + +static inline int verify_cmd_stats_af_ack(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + vfe_cmd_stats_af_ack *cmd = (vfe_cmd_stats_af_ack *)cmd_data; + void **addr = (void **)&cmd->af_stats_op_buf; + + if (cmd_size != sizeof(vfe_cmd_stats_af_ack)) + return -1; + + if (*addr && adsp_pmem_fixup(module, addr, af_size)) + return -1; + return 0; +} + +static inline int verify_cmd_stats_wb_exp_ack(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + vfe_cmd_stats_wb_exp_ack *cmd = + (vfe_cmd_stats_wb_exp_ack *)cmd_data; + void **addr = (void **)&cmd->wb_exp_stats_op_buf; + + if (cmd_size != sizeof(vfe_cmd_stats_wb_exp_ack)) + return -1; + + if (*addr && adsp_pmem_fixup(module, addr, awb_size)) + return -1; + return 0; +} + +static int verify_vfe_command(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; + switch (cmd_id) { + case VFE_CMD_OP1_ACK: + return verify_cmd_op_ack(module, cmd_data, cmd_size); + case VFE_CMD_OP2_ACK: + return verify_cmd_op_ack(module, cmd_data, cmd_size); + case VFE_CMD_STATS_AUTOFOCUS_CFG: + return verify_cmd_stats_autofocus_cfg(module, cmd_data, + cmd_size); + case VFE_CMD_STATS_WB_EXP_CFG: + return verify_cmd_stats_wb_exp_cfg(module, cmd_data, cmd_size); + case VFE_CMD_STATS_AF_ACK: + return verify_cmd_stats_af_ack(module, cmd_data, cmd_size); + case VFE_CMD_STATS_WB_EXP_ACK: + return verify_cmd_stats_wb_exp_ack(module, cmd_data, cmd_size); + default: + if (cmd_id > 29) { + printk(KERN_ERR "adsp: module %s: invalid VFE command id %d\n", module->name, cmd_id); + return -1; + } + } + return 0; +} + +static int verify_vfe_command_scale(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; + // FIXME: check the size + if (cmd_id > 1) { + printk(KERN_ERR "adsp: module %s: invalid VFE SCALE command id %d\n", module->name, cmd_id); + return -1; + } + return 0; +} + + +static uint32_t get_size(uint32_t hw) +{ + uint32_t height, width; + uint32_t height_mask = 0x3ffc; + uint32_t width_mask = 0x3ffc000; + + height = (hw & height_mask) >> 2; + width = (hw & width_mask) >> 14 ; + return height * width; +} + +static int verify_vfe_command_table(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + uint32_t cmd_id = ((uint32_t *)cmd_data)[0]; + int i; + + switch (cmd_id) { + case VFE_CMD_AXI_IP_CFG: + { + vfe_cmd_axi_ip_cfg *cmd = (vfe_cmd_axi_ip_cfg *)cmd_data; + uint32_t size; + if (cmd_size != sizeof(vfe_cmd_axi_ip_cfg)) { + printk(KERN_ERR "adsp: module %s: invalid VFE TABLE (VFE_CMD_AXI_IP_CFG) command size %d\n", + module->name, cmd_size); + return -1; + } + size = get_size(cmd->ip_cfg_part2); + + for (i = 0; i < 8; i++) { + void **addr = (void **) + &cmd->ip_buf_addr[i]; + if (*addr && adsp_pmem_fixup(module, addr, size)) + return -1; + } + } + case VFE_CMD_AXI_OP_CFG: + { + vfe_cmd_axi_op_cfg *cmd = (vfe_cmd_axi_op_cfg *)cmd_data; + void **addr1_y, **addr2_y, **addr1_cbcr, **addr2_cbcr; + + if (cmd_size != sizeof(vfe_cmd_axi_op_cfg)) { + printk(KERN_ERR "adsp: module %s: invalid VFE TABLE (VFE_CMD_AXI_OP_CFG) command size %d\n", + module->name, cmd_size); + return -1; + } + size1_y = get_size(cmd->op1_y_cfg_part2); + size1_cbcr = get_size(cmd->op1_cbcr_cfg_part2); + size2_y = get_size(cmd->op2_y_cfg_part2); + size2_cbcr = get_size(cmd->op2_cbcr_cfg_part2); + for (i = 0; i < 8; i++) { + addr1_y = (void **)(&cmd->op1_buf1_addr[2*i]); + addr1_cbcr = (void **)(&cmd->op1_buf1_addr[2*i+1]); + addr2_y = (void **)(&cmd->op2_buf1_addr[2*i]); + addr2_cbcr = (void **)(&cmd->op2_buf1_addr[2*i+1]); +/* + printk("module %s: [%d] %p %p %p %p\n", + module->name, i, + *addr1_y, *addr1_cbcr, *addr2_y, *addr2_cbcr); +*/ + if ((*addr1_y && adsp_pmem_fixup(module, addr1_y, size1_y)) || + (*addr1_cbcr && adsp_pmem_fixup(module, addr1_cbcr, size1_cbcr)) || + (*addr2_y && adsp_pmem_fixup(module, addr2_y, size2_y)) || + (*addr2_cbcr && adsp_pmem_fixup(module, addr2_cbcr, size2_cbcr))) + return -1; + } + } + default: + if (cmd_id > 4) { + printk(KERN_ERR "adsp: module %s: invalid VFE TABLE command id %d\n", + module->name, cmd_id); + return -1; + } + } + return 0; +} + +int adsp_vfe_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + switch (queue_id) { + case QDSP_vfeCommandQueue: + return verify_vfe_command(module, cmd_data, cmd_size); + case QDSP_vfeCommandScaleQueue: + return verify_vfe_command_scale(module, cmd_data, cmd_size); + case QDSP_vfeCommandTableQueue: + return verify_vfe_command_table(module, cmd_data, cmd_size); + default: + printk(KERN_ERR "adsp: module %s: unknown queue id %d\n", + module->name, queue_id); + return -1; + } +} diff --git a/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c new file mode 100644 index 0000000000000..fdad0551d237f --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c @@ -0,0 +1,163 @@ +/* arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c + * + * Verificion code for aDSP VDEC packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#define ADSP_DEBUG_MSGS 0 +#if ADSP_DEBUG_MSGS +#define DLOG(fmt,args...) \ + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ + ##args); } \ + while (0) +#else +#define DLOG(x...) do {} while (0) +#endif + + +#include +#include "adsp.h" + +static inline void *high_low_short_to_ptr(unsigned short high, + unsigned short low) +{ + return (void *)((((unsigned long)high) << 16) | ((unsigned long)low)); +} + +static inline void ptr_to_high_low_short(void *ptr, unsigned short *high, + unsigned short *low) +{ + *high = (unsigned short)((((unsigned long)ptr) >> 16) & 0xffff); + *low = (unsigned short)((unsigned long)ptr & 0xffff); +} + +static int pmem_fixup_high_low(unsigned short *high, + unsigned short *low, + unsigned short size_high, + unsigned short size_low, + struct msm_adsp_module *module, + unsigned long *addr, unsigned long *size) +{ + void *phys_addr; + unsigned long phys_size; + unsigned long kvaddr; + + phys_addr = high_low_short_to_ptr(*high, *low); + phys_size = (unsigned long)high_low_short_to_ptr(size_high, size_low); + DLOG("virt %x %x\n", phys_addr, phys_size); + if (adsp_pmem_fixup_kvaddr(module, &phys_addr, &kvaddr, phys_size)) { + DLOG("ah%x al%x sh%x sl%x addr %x size %x\n", + *high, *low, size_high, size_low, phys_addr, phys_size); + return -1; + } + ptr_to_high_low_short(phys_addr, high, low); + DLOG("phys %x %x\n", phys_addr, phys_size); + if (addr) + *addr = kvaddr; + if (size) + *size = phys_size; + return 0; +} + +static int verify_vdec_pkt_cmd(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + unsigned short cmd_id = ((unsigned short *)cmd_data)[0]; + viddec_cmd_subframe_pkt *pkt; + unsigned long subframe_pkt_addr; + unsigned long subframe_pkt_size; + viddec_cmd_frame_header_packet *frame_header_pkt; + int i, num_addr, skip; + unsigned short *frame_buffer_high, *frame_buffer_low; + unsigned long frame_buffer_size; + unsigned short frame_buffer_size_high, frame_buffer_size_low; + + DLOG("cmd_size %d cmd_id %d cmd_data %x\n", cmd_size, cmd_id, cmd_data); + if (cmd_id != VIDDEC_CMD_SUBFRAME_PKT) { + printk(KERN_INFO "adsp_video: unknown video packet %u\n", + cmd_id); + return 0; + } + if (cmd_size < sizeof(viddec_cmd_subframe_pkt)) + return -1; + + pkt = (viddec_cmd_subframe_pkt *)cmd_data; + + if (pmem_fixup_high_low(&(pkt->subframe_packet_high), + &(pkt->subframe_packet_low), + pkt->subframe_packet_size_high, + pkt->subframe_packet_size_low, + module, + &subframe_pkt_addr, + &subframe_pkt_size)) + return -1; + + /* deref those ptrs and check if they are a frame header packet */ + frame_header_pkt = (viddec_cmd_frame_header_packet *)subframe_pkt_addr; + + switch (frame_header_pkt->packet_id) { + case 0xB201: /* h.264 */ + num_addr = skip = 8; + break; + case 0x4D01: /* mpeg-4 and h.263 */ + num_addr = 3; + skip = 0; + break; + default: + return 0; + } + + frame_buffer_high = &frame_header_pkt->frame_buffer_0_high; + frame_buffer_low = &frame_header_pkt->frame_buffer_0_low; + frame_buffer_size = (frame_header_pkt->x_dimension * + frame_header_pkt->y_dimension * 3) / 2; + ptr_to_high_low_short((void *)frame_buffer_size, + &frame_buffer_size_high, + &frame_buffer_size_low); + for (i = 0; i < num_addr; i++) { + if (pmem_fixup_high_low(frame_buffer_high, frame_buffer_low, + frame_buffer_size_high, + frame_buffer_size_low, + module, + NULL, NULL)) + return -1; + frame_buffer_high += 2; + frame_buffer_low += 2; + } + /* Patch the output buffer. */ + frame_buffer_high += 2*skip; + frame_buffer_low += 2*skip; + if (pmem_fixup_high_low(frame_buffer_high, frame_buffer_low, + frame_buffer_size_high, + frame_buffer_size_low, module, NULL, NULL)) + return -1; + return 0; +} + +int adsp_video_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + switch (queue_id) { + case QDSP_mpuVDecPktQueue: + DLOG("\n"); + return verify_vdec_pkt_cmd(module, cmd_data, cmd_size); + default: + printk(KERN_INFO "unknown video queue %u\n", queue_id); + return 0; + } +} + diff --git a/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c new file mode 100644 index 0000000000000..ee3744950523b --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/adsp_videoenc_verify_cmd.c @@ -0,0 +1,235 @@ +/* arch/arm/mach-msm/qdsp5/adsp_video_verify_cmd.c + * + * Verificion code for aDSP VENC packets from userspace. + * + * Copyright (c) 2008 QUALCOMM Incorporated + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#define ADSP_DEBUG_MSGS 0 +#if ADSP_DEBUG_MSGS +#define DLOG(fmt,args...) \ + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ + ##args); } \ + while (0) +#else +#define DLOG(x...) do {} while (0) +#endif + +#include +#include "adsp.h" + + +static unsigned short x_dimension, y_dimension; + +static inline void *high_low_short_to_ptr(unsigned short high, + unsigned short low) +{ + return (void *)((((unsigned long)high) << 16) | ((unsigned long)low)); +} + +static inline void ptr_to_high_low_short(void *ptr, unsigned short *high, + unsigned short *low) +{ + *high = (unsigned short)((((unsigned long)ptr) >> 16) & 0xffff); + *low = (unsigned short)((unsigned long)ptr & 0xffff); +} + +static int pmem_fixup_high_low(unsigned short *high, + unsigned short *low, + unsigned short size_high, + unsigned short size_low, + struct msm_adsp_module *module, + unsigned long *addr, unsigned long *size) +{ + void *phys_addr; + unsigned long phys_size; + unsigned long kvaddr; + + phys_addr = high_low_short_to_ptr(*high, *low); + phys_size = (unsigned long)high_low_short_to_ptr(size_high, size_low); + DLOG("virt %x %x\n", phys_addr, phys_size); + if (adsp_pmem_fixup_kvaddr(module, &phys_addr, &kvaddr, phys_size)) { + DLOG("ah%x al%x sh%x sl%x addr %x size %x\n", + *high, *low, size_high, size_low, phys_addr, phys_size); + return -1; + } + ptr_to_high_low_short(phys_addr, high, low); + DLOG("phys %x %x\n", phys_addr, phys_size); + if (addr) + *addr = kvaddr; + if (size) + *size = phys_size; + return 0; +} + +static int verify_venc_cmd(struct msm_adsp_module *module, + void *cmd_data, size_t cmd_size) +{ + unsigned short cmd_id = ((unsigned short *)cmd_data)[0]; + unsigned long frame_buf_size, luma_buf_size, chroma_buf_size; + unsigned short frame_buf_size_high, frame_buf_size_low; + unsigned short luma_buf_size_high, luma_buf_size_low; + unsigned short chroma_buf_size_high, chroma_buf_size_low; + videnc_cmd_cfg *config_cmd; + videnc_cmd_frame_start *frame_cmd; + videnc_cmd_dis *dis_cmd; + + DLOG("cmd_size %d cmd_id %d cmd_data %x\n", cmd_size, cmd_id, cmd_data); + switch (cmd_id) { + case VIDENC_CMD_ACTIVE: + if (cmd_size < sizeof(videnc_cmd_active)) + return -1; + break; + case VIDENC_CMD_IDLE: + if (cmd_size < sizeof(videnc_cmd_idle)) + return -1; + x_dimension = y_dimension = 0; + break; + case VIDENC_CMD_STATUS_QUERY: + if (cmd_size < sizeof(videnc_cmd_status_query)) + return -1; + break; + case VIDENC_CMD_RC_CFG: + if (cmd_size < sizeof(videnc_cmd_rc_cfg)) + return -1; + break; + case VIDENC_CMD_INTRA_REFRESH: + if (cmd_size < sizeof(videnc_cmd_intra_refresh)) + return -1; + break; + case VIDENC_CMD_DIGITAL_ZOOM: + if (cmd_size < sizeof(videnc_cmd_digital_zoom)) + return -1; + break; + case VIDENC_CMD_DIS_CFG: + if (cmd_size < sizeof(videnc_cmd_dis_cfg)) + return -1; + break; + case VIDENC_CMD_CFG: + if (cmd_size < sizeof(videnc_cmd_cfg)) + return -1; + config_cmd = (videnc_cmd_cfg *)cmd_data; + x_dimension = ((config_cmd->venc_frame_dim) & 0xFF00)>>8; + x_dimension = x_dimension*16; + y_dimension = (config_cmd->venc_frame_dim) & 0xFF; + y_dimension = y_dimension * 16; + break; + case VIDENC_CMD_FRAME_START: + if (cmd_size < sizeof(videnc_cmd_frame_start)) + return -1; + frame_cmd = (videnc_cmd_frame_start *)cmd_data; + luma_buf_size = x_dimension * y_dimension; + chroma_buf_size = luma_buf_size>>1; + frame_buf_size = luma_buf_size + chroma_buf_size; + ptr_to_high_low_short((void *)luma_buf_size, + &luma_buf_size_high, + &luma_buf_size_low); + ptr_to_high_low_short((void *)chroma_buf_size, + &chroma_buf_size_high, + &chroma_buf_size_low); + ptr_to_high_low_short((void *)frame_buf_size, + &frame_buf_size_high, + &frame_buf_size_low); + /* Address of raw Y data. */ + if (pmem_fixup_high_low(&frame_cmd->input_luma_addr_high, + &frame_cmd->input_luma_addr_low, + luma_buf_size_high, + luma_buf_size_low, + module, + NULL, NULL)) + return -1; + /* Address of raw CbCr data */ + if (pmem_fixup_high_low(&frame_cmd->input_chroma_addr_high, + &frame_cmd->input_chroma_addr_low, + chroma_buf_size_high, + chroma_buf_size_low, + module, + NULL, NULL)) + return -1; + /* Reference VOP */ + if (pmem_fixup_high_low(&frame_cmd->ref_vop_buf_ptr_high, + &frame_cmd->ref_vop_buf_ptr_low, + frame_buf_size_high, + frame_buf_size_low, + module, + NULL, NULL)) + return -1; + /* Encoded Packet Address */ + if (pmem_fixup_high_low(&frame_cmd->enc_pkt_buf_ptr_high, + &frame_cmd->enc_pkt_buf_ptr_low, + frame_cmd->enc_pkt_buf_size_high, + frame_cmd->enc_pkt_buf_size_low, + module, + NULL, NULL)) + return -1; + /* Unfiltered VOP Buffer Address */ + if (pmem_fixup_high_low( + &frame_cmd->unfilt_recon_vop_buf_ptr_high, + &frame_cmd->unfilt_recon_vop_buf_ptr_low, + frame_buf_size_high, + frame_buf_size_low, + module, + NULL, NULL)) + return -1; + /* Filtered VOP Buffer Address */ + if (pmem_fixup_high_low(&frame_cmd->filt_recon_vop_buf_ptr_high, + &frame_cmd->filt_recon_vop_buf_ptr_low, + frame_buf_size_high, + frame_buf_size_low, + module, + NULL, NULL)) + return -1; + break; + case VIDENC_CMD_DIS: + if (cmd_size < sizeof(videnc_cmd_dis)) + return -1; + dis_cmd = (videnc_cmd_dis *)cmd_data; + luma_buf_size = x_dimension * y_dimension; + ptr_to_high_low_short((void *)luma_buf_size, + &luma_buf_size_high, + &luma_buf_size_low); + /* Prev VFE Luma Output Address */ + if (pmem_fixup_high_low(&dis_cmd->vfe_out_prev_luma_addr_high, + &dis_cmd->vfe_out_prev_luma_addr_low, + luma_buf_size_high, + luma_buf_size_low, + module, + NULL, NULL)) + return -1; + break; + default: + printk(KERN_INFO "adsp_video:unknown encoder video command %u\n", + cmd_id); + return 0; + } + + return 0; +} + + +int adsp_videoenc_verify_cmd(struct msm_adsp_module *module, + unsigned int queue_id, void *cmd_data, + size_t cmd_size) +{ + switch (queue_id) { + case QDSP_mpuVEncCmdQueue: + DLOG("\n"); + return verify_venc_cmd(module, cmd_data, cmd_size); + default: + printk(KERN_INFO "unknown video queue %u\n", queue_id); + return 0; + } +} + diff --git a/arch/arm/mach-msm/qdsp5/audio_aac.c b/arch/arm/mach-msm/qdsp5/audio_aac.c new file mode 100644 index 0000000000000..4232b9f248e9e --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_aac.c @@ -0,0 +1,1051 @@ +/* arch/arm/mach-msm/qdsp5/audio_aac.c + * + * aac audio decoder device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2008-2009 QUALCOMM USA, INC. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include "audmgr.h" + +#include +#include +#include +#include +#include +#include + +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +#ifdef DEBUG +#define dprintk(format, arg...) \ +printk(KERN_DEBUG format, ## arg) +#else +#define dprintk(format, arg...) do {} while (0) +#endif + +#define BUFSZ 32768 +#define DMASZ (BUFSZ * 2) + +#define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF +#define AUDDEC_DEC_AAC 5 + +#define PCM_BUFSZ_MIN 9600 /* Hold one stereo AAC frame */ +#define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most + but support 2 buffers currently */ +#define ROUTING_MODE_FTRT 1 +#define ROUTING_MODE_RT 2 +/* Decoder status received from AUDPPTASK */ +#define AUDPP_DEC_STATUS_SLEEP 0 +#define AUDPP_DEC_STATUS_INIT 1 +#define AUDPP_DEC_STATUS_CFG 2 +#define AUDPP_DEC_STATUS_PLAY 3 + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +struct audio { + struct buffer out[2]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + + atomic_t out_bytes; + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + /* Host PCM section */ + struct buffer in[PCM_BUF_MAX_COUNT]; + struct mutex read_lock; + wait_queue_head_t read_wait; /* Wait queue for read */ + char *read_data; /* pointer to reader buffer */ + dma_addr_t read_phys; /* physical address of reader buffer */ + uint8_t read_next; /* index to input buffers to be read next */ + uint8_t fill_next; /* index to buffer that DSP should be filling */ + uint8_t pcm_buf_count; /* number of pcm buffer allocated */ + /* ---- End of Host PCM section */ + + struct msm_adsp_module *audplay; + + /* configuration to use on next enable */ + uint32_t out_sample_rate; + uint32_t out_channel_mode; + struct msm_audio_aac_config aac_config; + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + int rflush; /* Read flush */ + int wflush; /* Write flush */ + int opened; + int enabled; + int running; + int stopped; /* set when stopped, cleared on flush */ + int pcm_feedback; + int buf_refresh; + + int reserved; /* A byte is being reserved */ + char rsv_byte; /* Handle odd length user data */ + + unsigned volume; + + uint16_t dec_id; + uint32_t read_ptr_offset; +}; + +static int auddec_dsp_config(struct audio *audio, int enable); +static void audpp_cmd_cfg_adec_params(struct audio *audio); +static void audpp_cmd_cfg_routing_mode(struct audio *audio); +static void audplay_send_data(struct audio *audio, unsigned needed); +static void audplay_config_hostpcm(struct audio *audio); +static void audplay_buffer_refresh(struct audio *audio); +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg); + +/* must be called with audio->lock held */ +static int audio_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + dprintk("audio_enable()\n"); + + if (audio->enabled) + return 0; + + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; + cfg.codec = RPC_AUD_DEF_CODEC_AAC; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audplay)) { + pr_err("audio: msm_adsp_enable(audplay) failed\n"); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + msm_adsp_disable(audio->audplay); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + audio->enabled = 1; + return 0; +} + +/* must be called with audio->lock held */ +static int audio_disable(struct audio *audio) +{ + dprintk("audio_disable()\n"); + if (audio->enabled) { + audio->enabled = 0; + auddec_dsp_config(audio, 0); + wake_up(&audio->write_wait); + wake_up(&audio->read_wait); + msm_adsp_disable(audio->audplay); + audpp_disable(audio->dec_id, audio); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audio_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) +{ + uint8_t index; + unsigned long flags; + + if (audio->rflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + for (index = 0; index < payload[1]; index++) { + if (audio->in[audio->fill_next].addr == + payload[2 + index * 2]) { + dprintk("audio_update_pcm_buf_entry: in[%d] ready\n", + audio->fill_next); + audio->in[audio->fill_next].used = + payload[3 + index * 2]; + if ((++audio->fill_next) == audio->pcm_buf_count) + audio->fill_next = 0; + + } else { + pr_err + ("audio_update_pcm_buf_entry: expected=%x ret=%x\n" + , audio->in[audio->fill_next].addr, + payload[1 + index * 2]); + break; + } + } + if (audio->in[audio->fill_next].used == 0) { + audplay_buffer_refresh(audio); + } else { + dprintk("audio_update_pcm_buf_entry: read cannot keep up\n"); + audio->buf_refresh = 1; + } + wake_up(&audio->read_wait); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + +} + +static void audplay_dsp_event(void *data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + struct audio *audio = data; + uint32_t msg[28]; + getevent(msg, sizeof(msg)); + + dprintk("audplay_dsp_event: msg_id=%x\n", id); + + switch (id) { + case AUDPLAY_MSG_DEC_NEEDS_DATA: + audplay_send_data(audio, 1); + break; + + case AUDPLAY_MSG_BUFFER_UPDATE: + audio_update_pcm_buf_entry(audio, msg); + break; + + default: + pr_err("unexpected message from decoder \n"); + } +} + +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned status = msg[1]; + + switch (status) { + case AUDPP_DEC_STATUS_SLEEP: + dprintk("decoder status: sleep \n"); + break; + + case AUDPP_DEC_STATUS_INIT: + dprintk("decoder status: init \n"); + audpp_cmd_cfg_routing_mode(audio); + break; + + case AUDPP_DEC_STATUS_CFG: + dprintk("decoder status: cfg \n"); + break; + case AUDPP_DEC_STATUS_PLAY: + dprintk("decoder status: play \n"); + if (audio->pcm_feedback) { + audplay_config_hostpcm(audio); + audplay_buffer_refresh(audio); + } + break; + default: + pr_err("unknown decoder status \n"); + } + break; + } + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + dprintk("audio_dsp_event: CFG_MSG ENABLE\n"); + auddec_dsp_config(audio, 1); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(audio->dec_id, audio->volume, + 0); + audpp_avsync(audio->dec_id, 22050); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + dprintk("audio_dsp_event: CFG_MSG DISABLE\n"); + audpp_avsync(audio->dec_id, 0); + audio->running = 0; + } else { + pr_err("audio_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + dprintk("audio_dsp_event: ROUTING_ACK mode=%d\n", msg[1]); + audpp_cmd_cfg_adec_params(audio); + break; + + case AUDPP_MSG_FLUSH_ACK: + dprintk("%s: FLUSH_ACK\n", __func__); + audio->wflush = 0; + audio->rflush = 0; + if (audio->pcm_feedback) + audplay_buffer_refresh(audio); + break; + + default: + pr_err("audio_dsp_event: UNKNOWN (%d)\n", id); + } + +} + +struct msm_adsp_ops audplay_adsp_ops_aac = { + .event = audplay_dsp_event, +}; + +#define audplay_send_queue0(audio, cmd, len) \ + msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \ + cmd, len) + +static int auddec_dsp_config(struct audio *audio, int enable) +{ + audpp_cmd_cfg_dec_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + if (enable) + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_AAC; + else + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_adec_params(struct audio *audio) +{ + audpp_cmd_cfg_adec_params_aac cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_AAC_LEN; + cmd.common.dec_id = audio->dec_id; + cmd.common.input_sampling_frequency = audio->out_sample_rate; + cmd.format = audio->aac_config.format; + cmd.audio_object = audio->aac_config.audio_object; + cmd.ep_config = audio->aac_config.ep_config; + cmd.aac_section_data_resilience_flag = + audio->aac_config.aac_section_data_resilience_flag; + cmd.aac_scalefactor_data_resilience_flag = + audio->aac_config.aac_scalefactor_data_resilience_flag; + cmd.aac_spectral_data_resilience_flag = + audio->aac_config.aac_spectral_data_resilience_flag; + cmd.sbr_on_flag = audio->aac_config.sbr_on_flag; + cmd.sbr_ps_on_flag = audio->aac_config.sbr_ps_on_flag; + cmd.channel_configuration = audio->aac_config.channel_configuration; + + audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_routing_mode(struct audio *audio) +{ + struct audpp_cmd_routing_mode cmd; + dprintk("audpp_cmd_cfg_routing_mode()\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; + cmd.object_number = audio->dec_id; + if (audio->pcm_feedback) + cmd.routing_mode = ROUTING_MODE_FTRT; + else + cmd.routing_mode = ROUTING_MODE_RT; + + audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static int audplay_dsp_send_data_avail(struct audio *audio, + unsigned idx, unsigned len) +{ + audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audio->dec_id; + cmd.buf_ptr = audio->out[idx].addr; + cmd.buf_size = len / 2; + cmd.partition_number = 0; + return audplay_send_queue0(audio, &cmd, sizeof(cmd)); +} + +static void audplay_buffer_refresh(struct audio *audio) +{ + struct audplay_cmd_buffer_refresh refresh_cmd; + + refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; + refresh_cmd.num_buffers = 1; + refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; + refresh_cmd.buf0_length = audio->in[audio->fill_next].size - + (audio->in[audio->fill_next].size % 1024); /* AAC frame size */ + refresh_cmd.buf_read_count = 0; + dprintk("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n", + refresh_cmd.buf0_address, refresh_cmd.buf0_length); + (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); +} + +static void audplay_config_hostpcm(struct audio *audio) +{ + struct audplay_cmd_hpcm_buf_cfg cfg_cmd; + + dprintk("audplay_config_hostpcm()\n"); + cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; + cfg_cmd.max_buffers = audio->pcm_buf_count; + cfg_cmd.byte_swap = 0; + cfg_cmd.hostpcm_config = (0x8000) | (0x4000); + cfg_cmd.feedback_frequency = 1; + cfg_cmd.partition_number = 0; + (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); + +} + +static void audplay_send_data(struct audio *audio, unsigned needed) +{ + struct buffer *frame; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (!audio->running) + goto done; + + if (needed && !audio->wflush) { + /* We were called from the callback because the DSP + * requested more data. Note that the DSP does want + * more data, and if a buffer was in-flight, mark it + * as available (since the DSP must now be done with + * it). + */ + audio->out_needed = 1; + frame = audio->out + audio->out_tail; + if (frame->used == 0xffffffff) { + dprintk("frame %d free\n", audio->out_tail); + frame->used = 0; + audio->out_tail ^= 1; + wake_up(&audio->write_wait); + } + } + + if (audio->out_needed) { + /* If the DSP currently wants data and we have a + * buffer available, we will send it and reset + * the needed flag. We'll mark the buffer as in-flight + * so that it won't be recycled until the next buffer + * is requested + */ + + frame = audio->out + audio->out_tail; + if (frame->used) { + BUG_ON(frame->used == 0xffffffff); +/* printk("frame %d busy\n", audio->out_tail); */ + audplay_dsp_send_data_avail(audio, audio->out_tail, + frame->used); + frame->used = 0xffffffff; + audio->out_needed = 0; + } + } + done: + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ + +static void audio_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->reserved = 0; + audio->out_needed = 0; + atomic_set(&audio->out_bytes, 0); +} + +static void audio_flush_pcm_buf(struct audio *audio) +{ + uint8_t index; + + for (index = 0; index < PCM_BUF_MAX_COUNT; index++) + audio->in[index].used = 0; + audio->buf_refresh = 0; + audio->read_next = 0; + audio->fill_next = 0; +} + +static int audaac_validate_usr_config(struct msm_audio_aac_config *config) +{ + int ret_val = -1; + + if (config->format != AUDIO_AAC_FORMAT_ADTS && + config->format != AUDIO_AAC_FORMAT_RAW && + config->format != AUDIO_AAC_FORMAT_PSUEDO_RAW && + config->format != AUDIO_AAC_FORMAT_LOAS) + goto done; + + if (config->audio_object != AUDIO_AAC_OBJECT_LC && + config->audio_object != AUDIO_AAC_OBJECT_LTP && + config->audio_object != AUDIO_AAC_OBJECT_ERLC) + goto done; + + if (config->audio_object == AUDIO_AAC_OBJECT_ERLC) { + if (config->ep_config > 3) + goto done; + if (config->aac_scalefactor_data_resilience_flag != + AUDIO_AAC_SCA_DATA_RES_OFF && + config->aac_scalefactor_data_resilience_flag != + AUDIO_AAC_SCA_DATA_RES_ON) + goto done; + if (config->aac_section_data_resilience_flag != + AUDIO_AAC_SEC_DATA_RES_OFF && + config->aac_section_data_resilience_flag != + AUDIO_AAC_SEC_DATA_RES_ON) + goto done; + if (config->aac_spectral_data_resilience_flag != + AUDIO_AAC_SPEC_DATA_RES_OFF && + config->aac_spectral_data_resilience_flag != + AUDIO_AAC_SPEC_DATA_RES_ON) + goto done; + } else { + config->aac_section_data_resilience_flag = + AUDIO_AAC_SEC_DATA_RES_OFF; + config->aac_scalefactor_data_resilience_flag = + AUDIO_AAC_SCA_DATA_RES_OFF; + config->aac_spectral_data_resilience_flag = + AUDIO_AAC_SPEC_DATA_RES_OFF; + } + + if (config->sbr_on_flag != AUDIO_AAC_SBR_ON_FLAG_OFF && + config->sbr_on_flag != AUDIO_AAC_SBR_ON_FLAG_ON) + goto done; + + if (config->sbr_ps_on_flag != AUDIO_AAC_SBR_PS_ON_FLAG_OFF && + config->sbr_ps_on_flag != AUDIO_AAC_SBR_PS_ON_FLAG_ON) + goto done; + + if (config->dual_mono_mode > AUDIO_AAC_DUAL_MONO_PL_SR) + goto done; + + if (config->channel_configuration > 2) + goto done; + + ret_val = 0; + done: + return ret_val; +} + +static void audio_ioport_reset(struct audio *audio) +{ + /* Make sure read/write thread are free from + * sleep and knowing that system is not able + * to process io request at the moment + */ + wake_up(&audio->write_wait); + mutex_lock(&audio->write_lock); + audio_flush(audio); + mutex_unlock(&audio->write_lock); + wake_up(&audio->read_wait); + mutex_lock(&audio->read_lock); + audio_flush_pcm_buf(audio); + mutex_unlock(&audio->read_lock); +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + dprintk("audio_ioctl() cmd = %d\n", cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = audpp_avsync_byte_count(audio->dec_id); + stats.sample_count = audpp_avsync_sample_count(audio->dec_id); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(audio->dec_id, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return 0; + } + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audio_enable(audio); + break; + case AUDIO_STOP: + rc = audio_disable(audio); + audio->stopped = 1; + audio_ioport_reset(audio); + audio->stopped = 0; + break; + case AUDIO_FLUSH: + dprintk("%s: AUDIO_FLUSH\n", __func__); + audio->rflush = 1; + audio->wflush = 1; + audio_ioport_reset(audio); + if (audio->running) + audpp_flush(audio->dec_id); + else { + audio->rflush = 0; + audio->wflush = 0; + } + break; + + case AUDIO_SET_CONFIG:{ + struct msm_audio_config config; + + if (copy_from_user + (&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + + if (config.channel_count == 1) { + config.channel_count = + AUDPP_CMD_PCM_INTF_MONO_V; + } else if (config.channel_count == 2) { + config.channel_count = + AUDPP_CMD_PCM_INTF_STEREO_V; + } else { + rc = -EINVAL; + break; + } + + audio->out_sample_rate = config.sample_rate; + audio->out_channel_mode = config.channel_count; + rc = 0; + break; + } + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = 2; + config.sample_rate = audio->out_sample_rate; + if (audio->out_channel_mode == + AUDPP_CMD_PCM_INTF_MONO_V) { + config.channel_count = 1; + } else { + config.channel_count = 2; + } + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + + break; + } + case AUDIO_GET_AAC_CONFIG:{ + if (copy_to_user((void *)arg, &audio->aac_config, + sizeof(audio->aac_config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_AAC_CONFIG:{ + struct msm_audio_aac_config usr_config; + + if (copy_from_user + (&usr_config, (void *)arg, + sizeof(usr_config))) { + rc = -EFAULT; + break; + } + + if (audaac_validate_usr_config(&usr_config) == 0) { + audio->aac_config = usr_config; + rc = 0; + } else + rc = -EINVAL; + + break; + } + case AUDIO_GET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + config.pcm_feedback = 0; + config.buffer_count = PCM_BUF_MAX_COUNT; + config.buffer_size = PCM_BUFSZ_MIN; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + if (copy_from_user + (&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if ((config.buffer_count > PCM_BUF_MAX_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_MAX_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + /* Check if pcm feedback is required */ + if ((config.pcm_feedback) && (!audio->read_data)) { + dprintk("ioctl: allocate PCM buffer %d\n", + config.buffer_count * + config.buffer_size); + audio->read_data = + dma_alloc_coherent(NULL, + config.buffer_size * + config.buffer_count, + &audio->read_phys, + GFP_KERNEL); + if (!audio->read_data) { + pr_err("audio_aac: buf alloc fail\n"); + rc = -1; + } else { + uint8_t index; + uint32_t offset = 0; + audio->pcm_feedback = 1; + audio->buf_refresh = 0; + audio->pcm_buf_count = + config.buffer_count; + audio->read_next = 0; + audio->fill_next = 0; + + for (index = 0; + index < config.buffer_count; + index++) { + audio->in[index].data = + audio->read_data + offset; + audio->in[index].addr = + audio->read_phys + offset; + audio->in[index].size = + config.buffer_size; + audio->in[index].used = 0; + offset += config.buffer_size; + } + rc = 0; + } + } else { + rc = 0; + } + break; + } + case AUDIO_PAUSE: + dprintk("%s: AUDIO_PAUSE %ld\n", __func__, arg); + rc = audpp_pause(audio->dec_id, (int) arg); + break; + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audio_read(struct file *file, char __user *buf, size_t count, + loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + int rc = 0; + + if (!audio->pcm_feedback) + return 0; /* PCM feedback is not enabled. Nothing to read */ + + mutex_lock(&audio->read_lock); + dprintk("audio_read() %d \n", count); + while (count > 0) { + rc = wait_event_interruptible(audio->read_wait, + (audio->in[audio->read_next]. + used > 0) || (audio->stopped) + || (audio->rflush)); + + if (rc < 0) + break; + + if (audio->stopped || audio->rflush) { + rc = -EBUSY; + break; + } + + if (count < audio->in[audio->read_next].used) { + /* Read must happen in frame boundary. Since driver + does not know frame size, read count must be greater + or equal to size of PCM samples */ + dprintk("audio_read: no partial frame done reading\n"); + break; + } else { + dprintk("audio_read: read from in[%d]\n", + audio->read_next); + if (copy_to_user + (buf, audio->in[audio->read_next].data, + audio->in[audio->read_next].used)) { + pr_err("audio_read: invalid addr %x \n", + (unsigned int)buf); + rc = -EFAULT; + break; + } + count -= audio->in[audio->read_next].used; + buf += audio->in[audio->read_next].used; + audio->in[audio->read_next].used = 0; + if ((++audio->read_next) == audio->pcm_buf_count) + audio->read_next = 0; + if (audio->in[audio->read_next].used == 0) + break; /* No data ready at this moment + * Exit while loop to prevent + * output thread sleep too long + */ + } + } + + /* don't feed output buffer to HW decoder during flushing + * buffer refresh command will be sent once flush completes + * send buf refresh command here can confuse HW decoder + */ + if (audio->buf_refresh && !audio->rflush) { + audio->buf_refresh = 0; + dprintk("audio_read: kick start pcm feedback again\n"); + audplay_buffer_refresh(audio); + } + + mutex_unlock(&audio->read_lock); + + if (buf > start) + rc = buf - start; + + dprintk("audio_read: read %d bytes\n", rc); + return rc; +} + +static ssize_t audio_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + char *cpy_ptr; + int rc = 0; + unsigned dsize; + + mutex_lock(&audio->write_lock); + while (count > 0) { + frame = audio->out + audio->out_head; + cpy_ptr = frame->data; + dsize = 0; + rc = wait_event_interruptible(audio->write_wait, + (frame->used == 0) + || (audio->stopped) + || (audio->wflush)); + if (rc < 0) + break; + if (audio->stopped || audio->wflush) { + rc = -EBUSY; + break; + } + + if (audio->reserved) { + dprintk("%s: append reserved byte %x\n", + __func__, audio->rsv_byte); + *cpy_ptr = audio->rsv_byte; + xfer = (count > (frame->size - 1)) ? + frame->size - 1 : count; + cpy_ptr++; + dsize = 1; + audio->reserved = 0; + } else + xfer = (count > frame->size) ? frame->size : count; + + if (copy_from_user(cpy_ptr, buf, xfer)) { + rc = -EFAULT; + break; + } + + dsize += xfer; + if (dsize & 1) { + audio->rsv_byte = ((char *) frame->data)[dsize - 1]; + dprintk("%s: odd length buf reserve last byte %x\n", + __func__, audio->rsv_byte); + audio->reserved = 1; + dsize--; + } + count -= xfer; + buf += xfer; + + if (dsize > 0) { + audio->out_head ^= 1; + frame->used = dsize; + audplay_send_data(audio, 0); + } + } + mutex_unlock(&audio->write_lock); + if (buf > start) + return buf - start; + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + dprintk("audio_release()\n"); + + mutex_lock(&audio->lock); + audio_disable(audio); + audio_flush(audio); + audio_flush_pcm_buf(audio); + msm_adsp_put(audio->audplay); + audio->audplay = NULL; + audio->opened = 0; + audio->reserved = 0; + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + audio->data = NULL; + if (audio->read_data != NULL) { + dma_free_coherent(NULL, + audio->in[0].size * audio->pcm_buf_count, + audio->read_data, audio->read_phys); + audio->read_data = NULL; + } + audio->pcm_feedback = 0; + mutex_unlock(&audio->lock); + return 0; +} + +struct audio the_aac_audio; + +static int audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_aac_audio; + int rc; + + mutex_lock(&audio->lock); + + if (audio->opened) { + pr_err("audio: busy\n"); + rc = -EBUSY; + goto done; + } + + if (!audio->data) { + audio->data = dma_alloc_coherent(NULL, DMASZ, + &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto done; + } + } + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto done; + + rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay, + &audplay_adsp_ops_aac, audio); + if (rc) { + pr_err("audio: failed to get audplay0 dsp module\n"); + goto done; + } + audio->out_sample_rate = 44100; + audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V; + audio->aac_config.format = AUDIO_AAC_FORMAT_ADTS; + audio->aac_config.audio_object = AUDIO_AAC_OBJECT_LC; + audio->aac_config.ep_config = 0; + audio->aac_config.aac_section_data_resilience_flag = + AUDIO_AAC_SEC_DATA_RES_OFF; + audio->aac_config.aac_scalefactor_data_resilience_flag = + AUDIO_AAC_SCA_DATA_RES_OFF; + audio->aac_config.aac_spectral_data_resilience_flag = + AUDIO_AAC_SPEC_DATA_RES_OFF; + audio->aac_config.sbr_on_flag = AUDIO_AAC_SBR_ON_FLAG_ON; + audio->aac_config.sbr_ps_on_flag = AUDIO_AAC_SBR_PS_ON_FLAG_ON; + audio->aac_config.dual_mono_mode = AUDIO_AAC_DUAL_MONO_PL_SR; + audio->aac_config.channel_configuration = 2; + audio->dec_id = 0; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = BUFSZ; + + audio->out[1].data = audio->data + BUFSZ; + audio->out[1].addr = audio->phys + BUFSZ; + audio->out[1].size = BUFSZ; + + audio->volume = 0x2000; /* Q13 1.0 */ + + audio_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; +done: + mutex_unlock(&audio->lock); + return rc; +} + +static struct file_operations audio_aac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .read = audio_read, + .write = audio_write, + .unlocked_ioctl = audio_ioctl, +}; + +struct miscdevice audio_aac_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_aac", + .fops = &audio_aac_fops, +}; + +static int __init audio_init(void) +{ + mutex_init(&the_aac_audio.lock); + mutex_init(&the_aac_audio.write_lock); + mutex_init(&the_aac_audio.read_lock); + spin_lock_init(&the_aac_audio.dsp_lock); + init_waitqueue_head(&the_aac_audio.write_wait); + init_waitqueue_head(&the_aac_audio.read_wait); + the_aac_audio.read_data = NULL; + return misc_register(&audio_aac_misc); +} + +device_initcall(audio_init); diff --git a/arch/arm/mach-msm/qdsp5/audio_amrnb.c b/arch/arm/mach-msm/qdsp5/audio_amrnb.c new file mode 100644 index 0000000000000..63fe2d0f0d8d7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_amrnb.c @@ -0,0 +1,872 @@ +/* linux/arch/arm/mach-msm/qdsp5/audio_amrnb.c + * + * amrnb audio decoder device + * + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5/audio_mp3.c + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * All source code in this file is licensed under the following license except + * where indicated. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include "audmgr.h" + +#include +#include +#include +#include + +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +#define DEBUG +#ifdef DEBUG +#define dprintk(format, arg...) \ +printk(KERN_DEBUG format, ## arg) +#else +#define dprintk(format, arg...) do {} while (0) +#endif + +#define BUFSZ 1024 /* Hold minimum 700ms voice data */ +#define DMASZ (BUFSZ * 2) + +#define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF +#define AUDDEC_DEC_AMRNB 10 + +#define PCM_BUFSZ_MIN 1600 /* 100ms worth of data */ +#define AMRNB_DECODED_FRSZ 320 /* AMR-NB 20ms 8KHz mono PCM size */ +#define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most + but support 2 buffers currently */ +#define ROUTING_MODE_FTRT 1 +#define ROUTING_MODE_RT 2 +/* Decoder status received from AUDPPTASK */ +#define AUDPP_DEC_STATUS_SLEEP 0 +#define AUDPP_DEC_STATUS_INIT 1 +#define AUDPP_DEC_STATUS_CFG 2 +#define AUDPP_DEC_STATUS_PLAY 3 + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +struct audio { + struct buffer out[2]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + + atomic_t out_bytes; + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + /* Host PCM section */ + struct buffer in[PCM_BUF_MAX_COUNT]; + struct mutex read_lock; + wait_queue_head_t read_wait; /* Wait queue for read */ + char *read_data; /* pointer to reader buffer */ + dma_addr_t read_phys; /* physical address of reader buffer */ + uint8_t read_next; /* index to input buffers to be read next */ + uint8_t fill_next; /* index to buffer that DSP should be filling */ + uint8_t pcm_buf_count; /* number of pcm buffer allocated */ + /* ---- End of Host PCM section */ + + struct msm_adsp_module *audplay; + + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + uint8_t opened:1; + uint8_t enabled:1; + uint8_t running:1; + uint8_t stopped:1; /* set when stopped, cleared on flush */ + uint8_t pcm_feedback:1; + uint8_t buf_refresh:1; + + unsigned volume; + + uint16_t dec_id; + uint32_t read_ptr_offset; +}; + +struct audpp_cmd_cfg_adec_params_amrnb { + audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__((packed)) ; + +static int auddec_dsp_config(struct audio *audio, int enable); +static void audpp_cmd_cfg_adec_params(struct audio *audio); +static void audpp_cmd_cfg_routing_mode(struct audio *audio); +static void audamrnb_send_data(struct audio *audio, unsigned needed); +static void audamrnb_config_hostpcm(struct audio *audio); +static void audamrnb_buffer_refresh(struct audio *audio); +static void audamrnb_dsp_event(void *private, unsigned id, uint16_t *msg); + +/* must be called with audio->lock held */ +static int audamrnb_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + dprintk("audamrnb_enable()\n"); + + if (audio->enabled) + return 0; + + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; + cfg.codec = RPC_AUD_DEF_CODEC_AMR_NB; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audplay)) { + pr_err("audio: msm_adsp_enable(audplay) failed\n"); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + if (audpp_enable(audio->dec_id, audamrnb_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + msm_adsp_disable(audio->audplay); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + audio->enabled = 1; + return 0; +} + +/* must be called with audio->lock held */ +static int audamrnb_disable(struct audio *audio) +{ + dprintk("audamrnb_disable()\n"); + if (audio->enabled) { + audio->enabled = 0; + auddec_dsp_config(audio, 0); + wake_up(&audio->write_wait); + wake_up(&audio->read_wait); + msm_adsp_disable(audio->audplay); + audpp_disable(audio->dec_id, audio); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audamrnb_update_pcm_buf_entry(struct audio *audio, + uint32_t *payload) +{ + uint8_t index; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + for (index = 0; index < payload[1]; index++) { + if (audio->in[audio->fill_next].addr == + payload[2 + index * 2]) { + dprintk("audamrnb_update_pcm_buf_entry: in[%d] ready\n", + audio->fill_next); + audio->in[audio->fill_next].used = + payload[3 + index * 2]; + if ((++audio->fill_next) == audio->pcm_buf_count) + audio->fill_next = 0; + + } else { + pr_err + ("audamrnb_update_pcm_buf_entry: expected=%x ret=%x\n" + , audio->in[audio->fill_next].addr, + payload[1 + index * 2]); + break; + } + } + if (audio->in[audio->fill_next].used == 0) { + audamrnb_buffer_refresh(audio); + } else { + dprintk("audamrnb_update_pcm_buf_entry: read cannot keep up\n"); + audio->buf_refresh = 1; + } + + spin_unlock_irqrestore(&audio->dsp_lock, flags); + wake_up(&audio->read_wait); +} + +static void audplay_dsp_event(void *data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + struct audio *audio = data; + uint32_t msg[28]; + getevent(msg, sizeof(msg)); + + dprintk("audplay_dsp_event: msg_id=%x\n", id); + + switch (id) { + case AUDPLAY_MSG_DEC_NEEDS_DATA: + audamrnb_send_data(audio, 1); + break; + + case AUDPLAY_MSG_BUFFER_UPDATE: + audamrnb_update_pcm_buf_entry(audio, msg); + break; + + default: + pr_err("unexpected message from decoder \n"); + } +} + +static void audamrnb_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned status = msg[1]; + + switch (status) { + case AUDPP_DEC_STATUS_SLEEP: + dprintk("decoder status: sleep \n"); + break; + + case AUDPP_DEC_STATUS_INIT: + dprintk("decoder status: init \n"); + audpp_cmd_cfg_routing_mode(audio); + break; + + case AUDPP_DEC_STATUS_CFG: + dprintk("decoder status: cfg \n"); + break; + case AUDPP_DEC_STATUS_PLAY: + dprintk("decoder status: play \n"); + if (audio->pcm_feedback) { + audamrnb_config_hostpcm(audio); + audamrnb_buffer_refresh(audio); + } + break; + default: + pr_err("unknown decoder status \n"); + break; + } + break; + } + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + dprintk("audamrnb_dsp_event: CFG_MSG ENABLE\n"); + auddec_dsp_config(audio, 1); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(audio->dec_id, audio->volume, + 0); + audpp_avsync(audio->dec_id, 22050); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + dprintk("audamrnb_dsp_event: CFG_MSG DISABLE\n"); + audpp_avsync(audio->dec_id, 0); + audio->running = 0; + } else { + pr_err("audamrnb_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + dprintk("audamrnb_dsp_event: ROUTING_ACK mode=%d\n", msg[1]); + audpp_cmd_cfg_adec_params(audio); + break; + + default: + pr_err("audamrnb_dsp_event: UNKNOWN (%d)\n", id); + } + +} + +struct msm_adsp_ops audplay_adsp_ops_amrnb = { + .event = audplay_dsp_event, +}; + +#define audplay_send_queue0(audio, cmd, len) \ + msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \ + cmd, len) + +static int auddec_dsp_config(struct audio *audio, int enable) +{ + audpp_cmd_cfg_dec_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + if (enable) + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_AMRNB; + else + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_adec_params(struct audio *audio) +{ + struct audpp_cmd_cfg_adec_params_amrnb cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN; + cmd.common.dec_id = audio->dec_id; + cmd.common.input_sampling_frequency = 8000; + cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V; + + audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_routing_mode(struct audio *audio) +{ + struct audpp_cmd_routing_mode cmd; + dprintk("audpp_cmd_cfg_routing_mode()\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; + cmd.object_number = audio->dec_id; + if (audio->pcm_feedback) + cmd.routing_mode = ROUTING_MODE_FTRT; + else + cmd.routing_mode = ROUTING_MODE_RT; + + audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static int audplay_dsp_send_data_avail(struct audio *audio, + unsigned idx, unsigned len) +{ + audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audio->dec_id; + cmd.buf_ptr = audio->out[idx].addr; + cmd.buf_size = len / 2; + cmd.partition_number = 0; + return audplay_send_queue0(audio, &cmd, sizeof(cmd)); +} + +static void audamrnb_buffer_refresh(struct audio *audio) +{ + struct audplay_cmd_buffer_refresh refresh_cmd; + + refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; + refresh_cmd.num_buffers = 1; + refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; + refresh_cmd.buf0_length = audio->in[audio->fill_next].size - + (audio->in[audio->fill_next].size % AMRNB_DECODED_FRSZ); + refresh_cmd.buf_read_count = 0; + dprintk("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n", + refresh_cmd.buf0_address, refresh_cmd.buf0_length); + (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); +} + +static void audamrnb_config_hostpcm(struct audio *audio) +{ + struct audplay_cmd_hpcm_buf_cfg cfg_cmd; + + dprintk("audamrnb_config_hostpcm()\n"); + cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; + cfg_cmd.max_buffers = audio->pcm_buf_count; + cfg_cmd.byte_swap = 0; + cfg_cmd.hostpcm_config = (0x8000) | (0x4000); + cfg_cmd.feedback_frequency = 1; + cfg_cmd.partition_number = 0; + (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); + +} + +static void audamrnb_send_data(struct audio *audio, unsigned needed) +{ + struct buffer *frame; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (!audio->running) + goto done; + + if (needed) { + /* We were called from the callback because the DSP + * requested more data. Note that the DSP does want + * more data, and if a buffer was in-flight, mark it + * as available (since the DSP must now be done with + * it). + */ + audio->out_needed = 1; + frame = audio->out + audio->out_tail; + if (frame->used == 0xffffffff) { + frame->used = 0; + audio->out_tail ^= 1; + wake_up(&audio->write_wait); + } + } + + if (audio->out_needed) { + /* If the DSP currently wants data and we have a + * buffer available, we will send it and reset + * the needed flag. We'll mark the buffer as in-flight + * so that it won't be recycled until the next buffer + * is requested + */ + + frame = audio->out + audio->out_tail; + if (frame->used) { + BUG_ON(frame->used == 0xffffffff); +/* printk("frame %d busy\n", audio->out_tail); */ + audplay_dsp_send_data_avail(audio, audio->out_tail, + frame->used); + frame->used = 0xffffffff; + audio->out_needed = 0; + } + } + done: + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ + +static void audamrnb_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->stopped = 0; + atomic_set(&audio->out_bytes, 0); +} + +static void audamrnb_flush_pcm_buf(struct audio *audio) +{ + uint8_t index; + + for (index = 0; index < PCM_BUF_MAX_COUNT; index++) + audio->in[index].used = 0; + + audio->read_next = 0; + audio->fill_next = 0; +} + +static long audamrnb_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + dprintk("audamrnb_ioctl() cmd = %d\n", cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = audpp_avsync_byte_count(audio->dec_id); + stats.sample_count = audpp_avsync_sample_count(audio->dec_id); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(audio->dec_id, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return 0; + } + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audamrnb_enable(audio); + break; + case AUDIO_STOP: + rc = audamrnb_disable(audio); + audio->stopped = 1; + break; + case AUDIO_FLUSH: + if (audio->stopped) { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the write_lock. + * While audio->stopped write threads will always + * exit immediately. + */ + wake_up(&audio->write_wait); + mutex_lock(&audio->write_lock); + audamrnb_flush(audio); + mutex_unlock(&audio->write_lock); + wake_up(&audio->read_wait); + mutex_lock(&audio->read_lock); + audamrnb_flush_pcm_buf(audio); + mutex_unlock(&audio->read_lock); + break; + } + + case AUDIO_SET_CONFIG:{ + dprintk("AUDIO_SET_CONFIG not applicable \n"); + break; + } + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = 2; + config.sample_rate = 8000; + config.channel_count = 1; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + + break; + } + case AUDIO_GET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + config.pcm_feedback = 0; + config.buffer_count = PCM_BUF_MAX_COUNT; + config.buffer_size = PCM_BUFSZ_MIN; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + if (copy_from_user + (&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if ((config.buffer_count > PCM_BUF_MAX_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_MAX_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + /* Check if pcm feedback is required */ + if ((config.pcm_feedback) && (!audio->read_data)) { + dprintk("audamrnb_ioctl: allocate PCM buf %d\n", + config.buffer_count * + config.buffer_size); + audio->read_data = + dma_alloc_coherent(NULL, + config.buffer_size * + config.buffer_count, + &audio->read_phys, + GFP_KERNEL); + if (!audio->read_data) { + pr_err("audamrnb_ioctl: no mem for pcm buf\n"); + rc = -1; + } else { + uint8_t index; + uint32_t offset = 0; + audio->pcm_feedback = 1; + audio->buf_refresh = 0; + audio->pcm_buf_count = + config.buffer_count; + audio->read_next = 0; + audio->fill_next = 0; + + for (index = 0; + index < config.buffer_count; index++) { + audio->in[index].data = + audio->read_data + offset; + audio->in[index].addr = + audio->read_phys + offset; + audio->in[index].size = + config.buffer_size; + audio->in[index].used = 0; + offset += config.buffer_size; + } + rc = 0; + } + } else { + rc = 0; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audamrnb_read(struct file *file, char __user *buf, size_t count, + loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + int rc = 0; + + if (!audio->pcm_feedback) + return 0; /* PCM feedback is not enabled. Nothing to read */ + + mutex_lock(&audio->read_lock); + dprintk("audamrnb_read() %d \n", count); + while (count > 0) { + rc = wait_event_interruptible(audio->read_wait, + (audio->in[audio->read_next]. + used > 0) || (audio->stopped)); + + if (rc < 0) + break; + + if (audio->stopped) { + rc = -EBUSY; + break; + } + + if (count < audio->in[audio->read_next].used) { + /* Read must happen in frame boundary. Since driver does + * not know frame size, read count must be greater or + * equal to size of PCM samples + */ + dprintk("audamrnb_read:read stop - partial frame\n"); + break; + } else { + dprintk("audamrnb_read: read from in[%d]\n", + audio->read_next); + if (copy_to_user + (buf, audio->in[audio->read_next].data, + audio->in[audio->read_next].used)) { + pr_err("audamrnb_read: invalid addr %x \n", + (unsigned int)buf); + rc = -EFAULT; + break; + } + count -= audio->in[audio->read_next].used; + buf += audio->in[audio->read_next].used; + audio->in[audio->read_next].used = 0; + if ((++audio->read_next) == audio->pcm_buf_count) + audio->read_next = 0; + } + } + + if (audio->buf_refresh) { + audio->buf_refresh = 0; + dprintk("audamrnb_read: kick start pcm feedback again\n"); + audamrnb_buffer_refresh(audio); + } + + mutex_unlock(&audio->read_lock); + + if (buf > start) + rc = buf - start; + + dprintk("audamrnb_read: read %d bytes\n", rc); + return rc; +} + +static ssize_t audamrnb_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + int rc = 0; + + if (count & 1) + return -EINVAL; + dprintk("audamrnb_write() \n"); + mutex_lock(&audio->write_lock); + while (count > 0) { + frame = audio->out + audio->out_head; + rc = wait_event_interruptible(audio->write_wait, + (frame->used == 0) + || (audio->stopped)); + dprintk("audamrnb_write() buffer available\n"); + if (rc < 0) + break; + if (audio->stopped) { + rc = -EBUSY; + break; + } + xfer = (count > frame->size) ? frame->size : count; + if (copy_from_user(frame->data, buf, xfer)) { + rc = -EFAULT; + break; + } + + frame->used = xfer; + audio->out_head ^= 1; + count -= xfer; + buf += xfer; + + audamrnb_send_data(audio, 0); + + } + mutex_unlock(&audio->write_lock); + if (buf > start) + return buf - start; + return rc; +} + +static int audamrnb_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + dprintk("audamrnb_release()\n"); + + mutex_lock(&audio->lock); + audamrnb_disable(audio); + audamrnb_flush(audio); + audamrnb_flush_pcm_buf(audio); + msm_adsp_put(audio->audplay); + audio->audplay = NULL; + audio->opened = 0; + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + audio->data = NULL; + if (audio->read_data != NULL) { + dma_free_coherent(NULL, + audio->in[0].size * audio->pcm_buf_count, + audio->read_data, audio->read_phys); + audio->read_data = NULL; + } + audio->pcm_feedback = 0; + mutex_unlock(&audio->lock); + return 0; +} + +static struct audio the_amrnb_audio; + +static int audamrnb_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_amrnb_audio; + int rc; + + mutex_lock(&audio->lock); + + if (audio->opened) { + pr_err("audio: busy\n"); + rc = -EBUSY; + goto done; + } + + if (!audio->data) { + audio->data = dma_alloc_coherent(NULL, DMASZ, + &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto done; + } + } + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto done; + + rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay, + &audplay_adsp_ops_amrnb, audio); + if (rc) { + pr_err("audio: failed to get audplay0 dsp module\n"); + audmgr_disable(&audio->audmgr); + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + audio->data = NULL; + goto done; + } + + audio->dec_id = 0; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = BUFSZ; + + audio->out[1].data = audio->data + BUFSZ; + audio->out[1].addr = audio->phys + BUFSZ; + audio->out[1].size = BUFSZ; + + audio->volume = 0x2000; /* Q13 1.0 */ + + audamrnb_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; +done: + mutex_unlock(&audio->lock); + return rc; +} + +static struct file_operations audio_amrnb_fops = { + .owner = THIS_MODULE, + .open = audamrnb_open, + .release = audamrnb_release, + .read = audamrnb_read, + .write = audamrnb_write, + .unlocked_ioctl = audamrnb_ioctl, +}; + +struct miscdevice audio_amrnb_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrnb", + .fops = &audio_amrnb_fops, +}; + +static int __init audamrnb_init(void) +{ + mutex_init(&the_amrnb_audio.lock); + mutex_init(&the_amrnb_audio.write_lock); + mutex_init(&the_amrnb_audio.read_lock); + spin_lock_init(&the_amrnb_audio.dsp_lock); + init_waitqueue_head(&the_amrnb_audio.write_wait); + init_waitqueue_head(&the_amrnb_audio.read_wait); + the_amrnb_audio.read_data = NULL; + return misc_register(&audio_amrnb_misc); +} + +static void __exit audamrnb_exit(void) +{ + misc_deregister(&audio_amrnb_misc); +} + +module_init(audamrnb_init); +module_exit(audamrnb_exit); + +MODULE_DESCRIPTION("MSM AMR-NB driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("QUALCOMM Inc"); diff --git a/arch/arm/mach-msm/qdsp5/audio_evrc.c b/arch/arm/mach-msm/qdsp5/audio_evrc.c new file mode 100644 index 0000000000000..8ee8d53ce5580 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_evrc.c @@ -0,0 +1,844 @@ +/* arch/arm/mach-msm/audio_evrc.c + * + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * This code also borrows from audio_aac.c, which is + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "audmgr.h" + +#include +#include +#include +#include + +#include "adsp.h" + +#ifdef DEBUG +#define dprintk(format, arg...) \ + printk(KERN_DEBUG format, ## arg) +#else +#define dprintk(format, arg...) do {} while (0) +#endif + +/* Hold 30 packets of 24 bytes each*/ +#define BUFSZ 720 +#define DMASZ (BUFSZ * 2) + +#define AUDDEC_DEC_EVRC 12 + +#define PCM_BUFSZ_MIN 1600 /* 100ms worth of data */ +#define PCM_BUF_MAX_COUNT 5 +/* DSP only accepts 5 buffers at most + * but support 2 buffers currently + */ +#define EVRC_DECODED_FRSZ 320 /* EVRC 20ms 8KHz mono PCM size */ + +#define ROUTING_MODE_FTRT 1 +#define ROUTING_MODE_RT 2 +/* Decoder status received from AUDPPTASK */ +#define AUDPP_DEC_STATUS_SLEEP 0 +#define AUDPP_DEC_STATUS_INIT 1 +#define AUDPP_DEC_STATUS_CFG 2 +#define AUDPP_DEC_STATUS_PLAY 3 + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +struct audio { + struct buffer out[2]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + + atomic_t out_bytes; + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + /* Host PCM section */ + struct buffer in[PCM_BUF_MAX_COUNT]; + struct mutex read_lock; + wait_queue_head_t read_wait; /* Wait queue for read */ + char *read_data; /* pointer to reader buffer */ + dma_addr_t read_phys; /* physical address of reader buffer */ + uint8_t read_next; /* index to input buffers to be read next */ + uint8_t fill_next; /* index to buffer that DSP should be filling */ + uint8_t pcm_buf_count; /* number of pcm buffer allocated */ + /* ---- End of Host PCM section */ + + struct msm_adsp_module *audplay; + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + uint8_t opened:1; + uint8_t enabled:1; + uint8_t running:1; + uint8_t stopped:1; /* set when stopped, cleared on flush */ + uint8_t pcm_feedback:1; + uint8_t buf_refresh:1; + + unsigned volume; + uint16_t dec_id; + uint32_t read_ptr_offset; +}; +static struct audio the_evrc_audio; + +static int auddec_dsp_config(struct audio *audio, int enable); +static void audpp_cmd_cfg_adec_params(struct audio *audio); +static void audpp_cmd_cfg_routing_mode(struct audio *audio); +static void audevrc_send_data(struct audio *audio, unsigned needed); +static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg); +static void audevrc_config_hostpcm(struct audio *audio); +static void audevrc_buffer_refresh(struct audio *audio); + +/* must be called with audio->lock held */ +static int audevrc_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + if (audio->enabled) + return 0; + + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; + cfg.codec = RPC_AUD_DEF_CODEC_EVRC; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audplay)) { + pr_err("audio: msm_adsp_enable(audplay) failed\n"); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + if (audpp_enable(audio->dec_id, audevrc_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + msm_adsp_disable(audio->audplay); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + audio->enabled = 1; + return 0; +} + +/* must be called with audio->lock held */ +static int audevrc_disable(struct audio *audio) +{ + if (audio->enabled) { + audio->enabled = 0; + auddec_dsp_config(audio, 0); + wake_up(&audio->write_wait); + wake_up(&audio->read_wait); + msm_adsp_disable(audio->audplay); + audpp_disable(audio->dec_id, audio); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + } + return 0; +} + +/* ------------------- dsp --------------------- */ + +static void audevrc_update_pcm_buf_entry(struct audio *audio, + uint32_t *payload) +{ + uint8_t index; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + for (index = 0; index < payload[1]; index++) { + if (audio->in[audio->fill_next].addr + == payload[2 + index * 2]) { + dprintk("audevrc_update_pcm_buf_entry: in[%d] ready\n", + audio->fill_next); + audio->in[audio->fill_next].used = + payload[3 + index * 2]; + if ((++audio->fill_next) == audio->pcm_buf_count) + audio->fill_next = 0; + + } else { + pr_err + ("audevrc_update_pcm_buf_entry: expected=%x ret=%x\n", + audio->in[audio->fill_next].addr, + payload[1 + index * 2]); + break; + } + } + if (audio->in[audio->fill_next].used == 0) { + audevrc_buffer_refresh(audio); + } else { + dprintk("audevrc_update_pcm_buf_entry: read cannot keep up\n"); + audio->buf_refresh = 1; + } + + spin_unlock_irqrestore(&audio->dsp_lock, flags); + wake_up(&audio->read_wait); +} + +static void audplay_dsp_event(void *data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + struct audio *audio = data; + uint32_t msg[28]; + getevent(msg, sizeof(msg)); + + dprintk("audplay_dsp_event: msg_id=%x\n", id); + switch (id) { + case AUDPLAY_MSG_DEC_NEEDS_DATA: + audevrc_send_data(audio, 1); + break; + case AUDPLAY_MSG_BUFFER_UPDATE: + dprintk("audevrc_update_pcm_buf_entry:======> \n"); + audevrc_update_pcm_buf_entry(audio, msg); + break; + default: + pr_err("unexpected message from decoder \n"); + } +} + +static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned status = msg[1]; + + switch (status) { + case AUDPP_DEC_STATUS_SLEEP: + dprintk("decoder status: sleep \n"); + break; + + case AUDPP_DEC_STATUS_INIT: + dprintk("decoder status: init \n"); + audpp_cmd_cfg_routing_mode(audio); + break; + + case AUDPP_DEC_STATUS_CFG: + dprintk("decoder status: cfg \n"); + break; + case AUDPP_DEC_STATUS_PLAY: + dprintk("decoder status: play \n"); + if (audio->pcm_feedback) { + audevrc_config_hostpcm(audio); + audevrc_buffer_refresh(audio); + } + break; + default: + pr_err("unknown decoder status \n"); + } + break; + } + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + dprintk("audevrc_dsp_event: CFG_MSG ENABLE\n"); + auddec_dsp_config(audio, 1); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(audio->dec_id, audio->volume, + 0); + audpp_avsync(audio->dec_id, 22050); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + dprintk("audevrc_dsp_event: CFG_MSG DISABLE\n"); + audpp_avsync(audio->dec_id, 0); + audio->running = 0; + } else { + pr_err("audevrc_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + dprintk("audevrc_dsp_event: ROUTING_ACK\n"); + audpp_cmd_cfg_adec_params(audio); + break; + + default: + pr_err("audevrc_dsp_event: UNKNOWN (%d)\n", id); + } + +} + +struct msm_adsp_ops audplay_adsp_ops_evrc = { + .event = audplay_dsp_event, +}; + +#define audplay_send_queue0(audio, cmd, len) \ + msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \ + cmd, len) + +static int auddec_dsp_config(struct audio *audio, int enable) +{ + audpp_cmd_cfg_dec_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + if (enable) + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_EVRC; + else + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_adec_params(struct audio *audio) +{ + struct audpp_cmd_cfg_adec_params_evrc cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = sizeof(cmd); + cmd.common.dec_id = audio->dec_id; + cmd.common.input_sampling_frequency = 8000; + cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V; + + audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_routing_mode(struct audio *audio) +{ + struct audpp_cmd_routing_mode cmd; + dprintk("audpp_cmd_cfg_routing_mode()\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; + cmd.object_number = audio->dec_id; + if (audio->pcm_feedback) + cmd.routing_mode = ROUTING_MODE_FTRT; + else + cmd.routing_mode = ROUTING_MODE_RT; + + audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static int audplay_dsp_send_data_avail(struct audio *audio, + unsigned idx, unsigned len) +{ + audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audio->dec_id; + cmd.buf_ptr = audio->out[idx].addr; + cmd.buf_size = len / 2; + cmd.partition_number = 0; + return audplay_send_queue0(audio, &cmd, sizeof(cmd)); +} + +static void audevrc_buffer_refresh(struct audio *audio) +{ + struct audplay_cmd_buffer_refresh refresh_cmd; + + refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; + refresh_cmd.num_buffers = 1; + refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; + refresh_cmd.buf0_length = audio->in[audio->fill_next].size; + + refresh_cmd.buf_read_count = 0; + dprintk("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n", + refresh_cmd.buf0_address, refresh_cmd.buf0_length); + audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); +} + +static void audevrc_config_hostpcm(struct audio *audio) +{ + struct audplay_cmd_hpcm_buf_cfg cfg_cmd; + + dprintk("audevrc_config_hostpcm()\n"); + cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; + cfg_cmd.max_buffers = 1; + cfg_cmd.byte_swap = 0; + cfg_cmd.hostpcm_config = (0x8000) | (0x4000); + cfg_cmd.feedback_frequency = 1; + cfg_cmd.partition_number = 0; + audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); + +} + +static void audevrc_send_data(struct audio *audio, unsigned needed) +{ + struct buffer *frame; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (!audio->running) + goto done; + + if (needed) { + /* We were called from the callback because the DSP + * requested more data. Note that the DSP does want + * more data, and if a buffer was in-flight, mark it + * as available (since the DSP must now be done with + * it). + */ + audio->out_needed = 1; + frame = audio->out + audio->out_tail; + if (frame->used == 0xffffffff) { + dprintk("frame %d free\n", audio->out_tail); + frame->used = 0; + audio->out_tail ^= 1; + wake_up(&audio->write_wait); + } + } + + if (audio->out_needed) { + /* If the DSP currently wants data and we have a + * buffer available, we will send it and reset + * the needed flag. We'll mark the buffer as in-flight + * so that it won't be recycled until the next buffer + * is requested + */ + + frame = audio->out + audio->out_tail; + if (frame->used) { + BUG_ON(frame->used == 0xffffffff); + dprintk("frame %d busy\n", audio->out_tail); + audplay_dsp_send_data_avail(audio, audio->out_tail, + frame->used); + frame->used = 0xffffffff; + audio->out_needed = 0; + } + } +done: + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ + +static void audevrc_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->stopped = 0; + atomic_set(&audio->out_bytes, 0); +} + +static void audevrc_flush_pcm_buf(struct audio *audio) +{ + uint8_t index; + + for (index = 0; index < PCM_BUF_MAX_COUNT; index++) + audio->in[index].used = 0; + + audio->read_next = 0; + audio->fill_next = 0; +} + +static long audevrc_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + dprintk("audevrc_ioctl() cmd = %d\n", cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = audpp_avsync_byte_count(audio->dec_id); + stats.sample_count = audpp_avsync_sample_count(audio->dec_id); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(audio->dec_id, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return 0; + } + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audevrc_enable(audio); + break; + case AUDIO_STOP: + rc = audevrc_disable(audio); + audio->stopped = 1; + break; + case AUDIO_SET_CONFIG:{ + dprintk("AUDIO_SET_CONFIG not applicable \n"); + break; + } + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = 2; + config.sample_rate = 8000; + config.channel_count = 1; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *)arg, &config, sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_GET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + config.pcm_feedback = 0; + config.buffer_count = PCM_BUF_MAX_COUNT; + config.buffer_size = PCM_BUFSZ_MIN; + if (copy_to_user((void *)arg, &config, sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + if (copy_from_user + (&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if ((config.buffer_count > PCM_BUF_MAX_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_MAX_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + /* Check if pcm feedback is required */ + if ((config.pcm_feedback) && (!audio->read_data)) { + dprintk("audevrc_ioctl: allocate PCM buf %d\n", + config.buffer_count * + config.buffer_size); + audio->read_data = + dma_alloc_coherent(NULL, + config.buffer_size * + config.buffer_count, + &audio->read_phys, + GFP_KERNEL); + if (!audio->read_data) { + pr_err + ("audevrc_ioctl: no mem for pcm buf\n"); + rc = -1; + } else { + uint8_t index; + uint32_t offset = 0; + audio->pcm_feedback = 1; + audio->buf_refresh = 0; + audio->pcm_buf_count = + config.buffer_count; + audio->read_next = 0; + audio->fill_next = 0; + + for (index = 0; + index < config.buffer_count; + index++) { + audio->in[index].data = + audio->read_data + offset; + audio->in[index].addr = + audio->read_phys + offset; + audio->in[index].size = + config.buffer_size; + audio->in[index].used = 0; + offset += config.buffer_size; + } + rc = 0; + } + } else { + rc = 0; + } + break; + } + case AUDIO_PAUSE: + dprintk("%s: AUDIO_PAUSE %ld\n", __func__, arg); + rc = audpp_pause(audio->dec_id, (int) arg); + break; + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audevrc_read(struct file *file, char __user *buf, size_t count, + loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + int rc = 0; + if (!audio->pcm_feedback) { + return 0; + /* PCM feedback is not enabled. Nothing to read */ + } + mutex_lock(&audio->read_lock); + dprintk("audevrc_read() \n"); + while (count > 0) { + rc = wait_event_interruptible(audio->read_wait, + (audio->in[audio->read_next]. + used > 0) || (audio->stopped)); + dprintk("audevrc_read() wait terminated \n"); + if (rc < 0) + break; + if (audio->stopped) { + rc = -EBUSY; + break; + } + if (count < audio->in[audio->read_next].used) { + /* Read must happen in frame boundary. Since driver does + * not know frame size, read count must be greater or + * equal to size of PCM samples + */ + dprintk("audevrc_read:read stop - partial frame\n"); + break; + } else { + dprintk("audevrc_read: read from in[%d]\n", + audio->read_next); + if (copy_to_user + (buf, audio->in[audio->read_next].data, + audio->in[audio->read_next].used)) { + pr_err("audevrc_read: invalid addr %x \n", + (unsigned int)buf); + rc = -EFAULT; + break; + } + count -= audio->in[audio->read_next].used; + buf += audio->in[audio->read_next].used; + audio->in[audio->read_next].used = 0; + if ((++audio->read_next) == audio->pcm_buf_count) + audio->read_next = 0; + if (audio->in[audio->read_next].used == 0) + break; /* No data ready at this moment + * Exit while loop to prevent + * output thread sleep too long + */ + + } + } + if (audio->buf_refresh) { + audio->buf_refresh = 0; + dprintk("audevrc_read: kick start pcm feedback again\n"); + audevrc_buffer_refresh(audio); + } + mutex_unlock(&audio->read_lock); + if (buf > start) + rc = buf - start; + dprintk("audevrc_read: read %d bytes\n", rc); + return rc; +} + +static ssize_t audevrc_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + int rc = 0; + + if (count & 1) + return -EINVAL; + mutex_lock(&audio->write_lock); + dprintk("audevrc_write() \n"); + while (count > 0) { + frame = audio->out + audio->out_head; + rc = wait_event_interruptible(audio->write_wait, + (frame->used == 0) + || (audio->stopped)); + if (rc < 0) + break; + if (audio->stopped) { + rc = -EBUSY; + break; + } + xfer = (count > frame->size) ? frame->size : count; + if (copy_from_user(frame->data, buf, xfer)) { + rc = -EFAULT; + break; + } + + frame->used = xfer; + audio->out_head ^= 1; + count -= xfer; + buf += xfer; + + audevrc_send_data(audio, 0); + + } + mutex_unlock(&audio->write_lock); + if (buf > start) + return buf - start; + return rc; +} + +static int audevrc_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + dprintk("audevrc_release()\n"); + + mutex_lock(&audio->lock); + audevrc_disable(audio); + audevrc_flush(audio); + audevrc_flush_pcm_buf(audio); + msm_adsp_put(audio->audplay); + audio->audplay = NULL; + audio->opened = 0; + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + audio->data = NULL; + if (audio->read_data != NULL) { + dma_free_coherent(NULL, + audio->in[0].size * audio->pcm_buf_count, + audio->read_data, audio->read_phys); + audio->read_data = NULL; + } + audio->pcm_feedback = 0; + mutex_unlock(&audio->lock); + return 0; +} + +static struct audio the_evrc_audio; + +static int audevrc_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_evrc_audio; + int rc; + + if (audio->opened) { + pr_err("audio: busy\n"); + return -EBUSY; + } + + /* Acquire Lock */ + mutex_lock(&audio->lock); + + if (!audio->data) { + audio->data = dma_alloc_coherent(NULL, DMASZ, + &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto dma_fail; + } + } + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto audmgr_fail; + + rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay, + &audplay_adsp_ops_evrc, audio); + if (rc) { + pr_err("audio: failed to get audplay0 dsp module\n"); + goto adsp_fail; + } + + audio->dec_id = 0; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = BUFSZ; + + audio->out[1].data = audio->data + BUFSZ; + audio->out[1].addr = audio->phys + BUFSZ; + audio->out[1].size = BUFSZ; + + audio->volume = 0x3FFF; + + audevrc_flush(audio); + + audio->opened = 1; + file->private_data = audio; + + mutex_unlock(&audio->lock); + return rc; + +adsp_fail: + audmgr_close(&audio->audmgr); +audmgr_fail: + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); +dma_fail: + mutex_unlock(&audio->lock); + return rc; +} + +static struct file_operations audio_evrc_fops = { + .owner = THIS_MODULE, + .open = audevrc_open, + .release = audevrc_release, + .read = audevrc_read, + .write = audevrc_write, + .unlocked_ioctl = audevrc_ioctl, +}; + +struct miscdevice audio_evrc_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_evrc", + .fops = &audio_evrc_fops, +}; + +static int __init audevrc_init(void) +{ + mutex_init(&the_evrc_audio.lock); + mutex_init(&the_evrc_audio.write_lock); + mutex_init(&the_evrc_audio.read_lock); + spin_lock_init(&the_evrc_audio.dsp_lock); + init_waitqueue_head(&the_evrc_audio.write_wait); + init_waitqueue_head(&the_evrc_audio.read_wait); + the_evrc_audio.read_data = NULL; + return misc_register(&audio_evrc_misc); +} + +static void __exit audevrc_exit(void) +{ + misc_deregister(&audio_evrc_misc); +} + +module_init(audevrc_init); +module_exit(audevrc_exit); + +MODULE_DESCRIPTION("MSM EVRC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("QUALCOMM Inc"); diff --git a/arch/arm/mach-msm/qdsp5/audio_in.c b/arch/arm/mach-msm/qdsp5/audio_in.c new file mode 100644 index 0000000000000..2a67209e19263 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_in.c @@ -0,0 +1,967 @@ +/* arch/arm/mach-msm/qdsp5/audio_in.c + * + * pcm audio input device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +#include "audmgr.h" + +#include +#include +#include +#include + +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (8) +#define FRAME_SIZE (2052 * 2) +#define MONO_DATA_SIZE (2048) +#define STEREO_DATA_SIZE (MONO_DATA_SIZE * 2) +#define DMASZ (FRAME_SIZE * FRAME_NUM) + +#define AGC_PARAM_SIZE (20) +#define NS_PARAM_SIZE (6) +#define IIR_PARAM_SIZE (48) +#define DEBUG (0) + +#define AGC_ENABLE 0x0001 +#define NS_ENABLE 0x0002 +#define IIR_ENABLE 0x0004 + +struct tx_agc_config { + uint16_t agc_params[AGC_PARAM_SIZE]; +}; + +struct ns_config { + uint16_t ns_params[NS_PARAM_SIZE]; +}; + +struct tx_iir_filter { + uint16_t num_bands; + uint16_t iir_params[IIR_PARAM_SIZE]; +}; + +struct audpre_cmd_iir_config_type { + uint16_t cmd_id; + uint16_t active_flag; + uint16_t num_bands; + uint16_t iir_params[IIR_PARAM_SIZE]; +}; + +struct buffer { + void *data; + uint32_t size; + uint32_t read; + uint32_t addr; +}; + +struct audio_in { + struct buffer in[FRAME_NUM]; + + spinlock_t dsp_lock; + + atomic_t in_bytes; + + struct mutex lock; + struct mutex read_lock; + wait_queue_head_t wait; + + struct msm_adsp_module *audpre; + struct msm_adsp_module *audrec; + + /* configuration to use on next enable */ + uint32_t samp_rate; + uint32_t channel_mode; + uint32_t buffer_size; /* 2048 for mono, 4096 for stereo */ + uint32_t type; /* 0 for PCM ,1 for AAC */ + uint32_t dsp_cnt; + uint32_t in_head; /* next buffer dsp will write */ + uint32_t in_tail; /* next buffer read() will read */ + uint32_t in_count; /* number of buffers available to read() */ + + unsigned short samp_rate_index; + + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + int opened; + int enabled; + int running; + int stopped; /* set when stopped, cleared on flush */ + + /* audpre settings */ + int agc_enable; + struct tx_agc_config agc; + + int ns_enable; + struct ns_config ns; + + int iir_enable; + struct tx_iir_filter iir; +}; + +static int audio_in_dsp_enable(struct audio_in *audio, int enable); +static int audio_in_encoder_config(struct audio_in *audio); +static int audio_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt); +static void audio_flush(struct audio_in *audio); +static int audio_dsp_set_agc(struct audio_in *audio); +static int audio_dsp_set_ns(struct audio_in *audio); +static int audio_dsp_set_tx_iir(struct audio_in *audio); + +static unsigned convert_dsp_samp_index(unsigned index) +{ + switch (index) { + case 48000: return AUDREC_CMD_SAMP_RATE_INDX_48000; + case 44100: return AUDREC_CMD_SAMP_RATE_INDX_44100; + case 32000: return AUDREC_CMD_SAMP_RATE_INDX_32000; + case 24000: return AUDREC_CMD_SAMP_RATE_INDX_24000; + case 22050: return AUDREC_CMD_SAMP_RATE_INDX_22050; + case 16000: return AUDREC_CMD_SAMP_RATE_INDX_16000; + case 12000: return AUDREC_CMD_SAMP_RATE_INDX_12000; + case 11025: return AUDREC_CMD_SAMP_RATE_INDX_11025; + case 8000: return AUDREC_CMD_SAMP_RATE_INDX_8000; + default: return AUDREC_CMD_SAMP_RATE_INDX_11025; + } +} + +static unsigned convert_samp_rate(unsigned hz) +{ + switch (hz) { + case 48000: return RPC_AUD_DEF_SAMPLE_RATE_48000; + case 44100: return RPC_AUD_DEF_SAMPLE_RATE_44100; + case 32000: return RPC_AUD_DEF_SAMPLE_RATE_32000; + case 24000: return RPC_AUD_DEF_SAMPLE_RATE_24000; + case 22050: return RPC_AUD_DEF_SAMPLE_RATE_22050; + case 16000: return RPC_AUD_DEF_SAMPLE_RATE_16000; + case 12000: return RPC_AUD_DEF_SAMPLE_RATE_12000; + case 11025: return RPC_AUD_DEF_SAMPLE_RATE_11025; + case 8000: return RPC_AUD_DEF_SAMPLE_RATE_8000; + default: return RPC_AUD_DEF_SAMPLE_RATE_11025; + } +} + +static unsigned convert_samp_index(unsigned index) +{ + switch (index) { + case RPC_AUD_DEF_SAMPLE_RATE_48000: return 48000; + case RPC_AUD_DEF_SAMPLE_RATE_44100: return 44100; + case RPC_AUD_DEF_SAMPLE_RATE_32000: return 32000; + case RPC_AUD_DEF_SAMPLE_RATE_24000: return 24000; + case RPC_AUD_DEF_SAMPLE_RATE_22050: return 22050; + case RPC_AUD_DEF_SAMPLE_RATE_16000: return 16000; + case RPC_AUD_DEF_SAMPLE_RATE_12000: return 12000; + case RPC_AUD_DEF_SAMPLE_RATE_11025: return 11025; + case RPC_AUD_DEF_SAMPLE_RATE_8000: return 8000; + default: return 11025; + } +} + +/* must be called with audio->lock held */ +static int audio_in_enable(struct audio_in *audio) +{ + struct audmgr_config cfg; + int rc; + + if (audio->enabled) + return 0; + + cfg.tx_rate = audio->samp_rate; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.def_method = RPC_AUD_DEF_METHOD_RECORD; + if (audio->type == AUDREC_CMD_TYPE_0_INDEX_WAV) + cfg.codec = RPC_AUD_DEF_CODEC_PCM; + else + cfg.codec = RPC_AUD_DEF_CODEC_AAC; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audpre)) { + pr_err("audrec: msm_adsp_enable(audpre) failed\n"); + return -ENODEV; + } + if (msm_adsp_enable(audio->audrec)) { + pr_err("audrec: msm_adsp_enable(audrec) failed\n"); + return -ENODEV; + } + + audio->enabled = 1; + audio_in_dsp_enable(audio, 1); + + return 0; +} + +/* must be called with audio->lock held */ +static int audio_in_disable(struct audio_in *audio) +{ + if (audio->enabled) { + audio->enabled = 0; + + audio_in_dsp_enable(audio, 0); + + wake_up(&audio->wait); + + msm_adsp_disable(audio->audrec); + msm_adsp_disable(audio->audpre); + audmgr_disable(&audio->audmgr); + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audpre_dsp_event(void *data, unsigned id, size_t len, + void (*getevent)(void *ptr, size_t len)) +{ + uint16_t msg[6]; /* may be a 32-bit event, which we ignore */ + getevent(msg, sizeof(msg)); + + switch (id) { + case AUDPREPROC_MSG_CMD_CFG_DONE_MSG: + pr_info("audpre: type %d, status_flag %d\n", msg[0], msg[1]); + break; + case AUDPREPROC_MSG_ERROR_MSG_ID: + pr_info("audpre: err_index %d\n", msg[0]); + break; + default: + pr_err("audpre: unknown event %d\n", id); + } +} + +struct audio_frame { + uint16_t count_low; + uint16_t count_high; + uint16_t bytes; + uint16_t unknown; + unsigned char samples[]; +} __attribute__((packed)); + +static void audio_in_get_dsp_frames(struct audio_in *audio) +{ + struct audio_frame *frame; + uint32_t index; + unsigned long flags; + + index = audio->in_head; + + /* XXX check for bogus frame size? */ + + frame = (void *) (((char *)audio->in[index].data) - sizeof(*frame)); + + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->in[index].size = frame->bytes; + + audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); + + /* If overflow, move the tail index foward. */ + if (audio->in_head == audio->in_tail) + audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); + else + audio->in_count++; + + audio_dsp_read_buffer(audio, audio->dsp_cnt++); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + + wake_up(&audio->wait); +} + +static void audrec_dsp_event(void *data, unsigned id, size_t len, + void (*getevent)(void *ptr, size_t len)) +{ + struct audio_in *audio = data; + uint16_t msg[6]; /* may be a 32-bit event, which we ignore */ + getevent(msg, sizeof(msg)); + + switch (id) { + case AUDREC_MSG_CMD_CFG_DONE_MSG: + if (msg[0] & AUDREC_MSG_CFG_DONE_TYPE_0_UPDATE) { + if (msg[0] & AUDREC_MSG_CFG_DONE_TYPE_0_ENA) { + pr_info("audpre: CFG ENABLED\n"); + audio_dsp_set_agc(audio); + audio_dsp_set_ns(audio); + audio_dsp_set_tx_iir(audio); + audio_in_encoder_config(audio); + } else { + pr_info("audrec: CFG SLEEP\n"); + audio->running = 0; + } + } else { + pr_info("audrec: CMD_CFG_DONE %x\n", msg[0]); + } + break; + case AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG: { + pr_info("audrec: PARAM CFG DONE\n"); + audio->running = 1; + break; + } + case AUDREC_MSG_FATAL_ERR_MSG: + pr_err("audrec: ERROR %x\n", msg[0]); + break; + case AUDREC_MSG_PACKET_READY_MSG: +/* REC_DBG("type %x, count %d", msg[0], (msg[1] | (msg[2] << 16))); */ + audio_in_get_dsp_frames(audio); + break; + default: + pr_err("audrec: unknown event %d\n", id); + } +} + +struct msm_adsp_ops audpre_adsp_ops = { + .event = audpre_dsp_event, +}; + +struct msm_adsp_ops audrec_adsp_ops = { + .event = audrec_dsp_event, +}; + + +#define audio_send_queue_pre(audio, cmd, len) \ + msm_adsp_write(audio->audpre, QDSP_uPAudPreProcCmdQueue, cmd, len) +#define audio_send_queue_recbs(audio, cmd, len) \ + msm_adsp_write(audio->audrec, QDSP_uPAudRecBitStreamQueue, cmd, len) +#define audio_send_queue_rec(audio, cmd, len) \ + msm_adsp_write(audio->audrec, \ + QDSP_uPAudRecCmdQueue, cmd, len) + +static int audio_dsp_set_agc(struct audio_in *audio) +{ + audpreproc_cmd_cfg_agc_params cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPREPROC_CMD_CFG_AGC_PARAMS; + + if (audio->agc_enable) { + /* cmd.tx_agc_param_mask = 0xFE00 from sample code */ + cmd.tx_agc_param_mask = + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_SLOPE) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_TH) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_EXP_SLOPE) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_EXP_TH) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_AIG_FLAG) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_COMP_STATIC_GAIN) | + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_TX_AGC_ENA_FLAG); + cmd.tx_agc_enable_flag = + AUDPREPROC_CMD_TX_AGC_ENA_FLAG_ENA; + memcpy(&cmd.static_gain, &audio->agc.agc_params[0], + sizeof(uint16_t) * 6); + /* cmd.param_mask = 0xFFF0 from sample code */ + cmd.param_mask = + (1 << AUDPREPROC_CMD_PARAM_MASK_RMS_TAY) | + (1 << AUDPREPROC_CMD_PARAM_MASK_RELEASEK) | + (1 << AUDPREPROC_CMD_PARAM_MASK_DELAY) | + (1 << AUDPREPROC_CMD_PARAM_MASK_ATTACKK) | + (1 << AUDPREPROC_CMD_PARAM_MASK_LEAKRATE_SLOW) | + (1 << AUDPREPROC_CMD_PARAM_MASK_LEAKRATE_FAST) | + (1 << AUDPREPROC_CMD_PARAM_MASK_AIG_RELEASEK) | + (1 << AUDPREPROC_CMD_PARAM_MASK_AIG_MIN) | + (1 << AUDPREPROC_CMD_PARAM_MASK_AIG_MAX) | + (1 << AUDPREPROC_CMD_PARAM_MASK_LEAK_UP) | + (1 << AUDPREPROC_CMD_PARAM_MASK_LEAK_DOWN) | + (1 << AUDPREPROC_CMD_PARAM_MASK_AIG_ATTACKK); + memcpy(&cmd.aig_attackk, &audio->agc.agc_params[6], + sizeof(uint16_t) * 14); + + } else { + cmd.tx_agc_param_mask = + (1 << AUDPREPROC_CMD_TX_AGC_PARAM_MASK_TX_AGC_ENA_FLAG); + cmd.tx_agc_enable_flag = + AUDPREPROC_CMD_TX_AGC_ENA_FLAG_DIS; + } +#if DEBUG + pr_info("cmd_id = 0x%04x\n", cmd.cmd_id); + pr_info("tx_agc_param_mask = 0x%04x\n", cmd.tx_agc_param_mask); + pr_info("tx_agc_enable_flag = 0x%04x\n", cmd.tx_agc_enable_flag); + pr_info("static_gain = 0x%04x\n", cmd.static_gain); + pr_info("adaptive_gain_flag = 0x%04x\n", cmd.adaptive_gain_flag); + pr_info("expander_th = 0x%04x\n", cmd.expander_th); + pr_info("expander_slope = 0x%04x\n", cmd.expander_slope); + pr_info("compressor_th = 0x%04x\n", cmd.compressor_th); + pr_info("compressor_slope = 0x%04x\n", cmd.compressor_slope); + pr_info("param_mask = 0x%04x\n", cmd.param_mask); + pr_info("aig_attackk = 0x%04x\n", cmd.aig_attackk); + pr_info("aig_leak_down = 0x%04x\n", cmd.aig_leak_down); + pr_info("aig_leak_up = 0x%04x\n", cmd.aig_leak_up); + pr_info("aig_max = 0x%04x\n", cmd.aig_max); + pr_info("aig_min = 0x%04x\n", cmd.aig_min); + pr_info("aig_releasek = 0x%04x\n", cmd.aig_releasek); + pr_info("aig_leakrate_fast = 0x%04x\n", cmd.aig_leakrate_fast); + pr_info("aig_leakrate_slow = 0x%04x\n", cmd.aig_leakrate_slow); + pr_info("attackk_msw = 0x%04x\n", cmd.attackk_msw); + pr_info("attackk_lsw = 0x%04x\n", cmd.attackk_lsw); + pr_info("delay = 0x%04x\n", cmd.delay); + pr_info("releasek_msw = 0x%04x\n", cmd.releasek_msw); + pr_info("releasek_lsw = 0x%04x\n", cmd.releasek_lsw); + pr_info("rms_tav = 0x%04x\n", cmd.rms_tav); +#endif + return audio_send_queue_pre(audio, &cmd, sizeof(cmd)); +} + +static int audio_dsp_set_ns(struct audio_in *audio) +{ + audpreproc_cmd_cfg_ns_params cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPREPROC_CMD_CFG_NS_PARAMS; + + if (audio->ns_enable) { + /* cmd.ec_mode_new is fixed as 0x0064 when enable from sample code */ + cmd.ec_mode_new = + AUDPREPROC_CMD_EC_MODE_NEW_NS_ENA | + AUDPREPROC_CMD_EC_MODE_NEW_HB_ENA | + AUDPREPROC_CMD_EC_MODE_NEW_VA_ENA; + memcpy(&cmd.dens_gamma_n, &audio->ns.ns_params, + sizeof(audio->ns.ns_params)); + } else { + cmd.ec_mode_new = + AUDPREPROC_CMD_EC_MODE_NEW_NLMS_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_DES_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_NS_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_CNI_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_NLES_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_HB_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_VA_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_PCD_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_FEHI_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_NEHI_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_NLPP_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_FNE_DIS | + AUDPREPROC_CMD_EC_MODE_NEW_PRENLMS_DIS; + } +#if DEBUG + pr_info("cmd_id = 0x%04x\n", cmd.cmd_id); + pr_info("ec_mode_new = 0x%04x\n", cmd.ec_mode_new); + pr_info("dens_gamma_n = 0x%04x\n", cmd.dens_gamma_n); + pr_info("dens_nfe_block_size = 0x%04x\n", cmd.dens_nfe_block_size); + pr_info("dens_limit_ns = 0x%04x\n", cmd.dens_limit_ns); + pr_info("dens_limit_ns_d = 0x%04x\n", cmd.dens_limit_ns_d); + pr_info("wb_gamma_e = 0x%04x\n", cmd.wb_gamma_e); + pr_info("wb_gamma_n = 0x%04x\n", cmd.wb_gamma_n); +#endif + return audio_send_queue_pre(audio, &cmd, sizeof(cmd)); +} + +static int audio_dsp_set_tx_iir(struct audio_in *audio) +{ + struct audpre_cmd_iir_config_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPREPROC_CMD_CFG_IIR_TUNING_FILTER_PARAMS; + + if (audio->iir_enable) { + cmd.active_flag = AUDPREPROC_CMD_IIR_ACTIVE_FLAG_ENA; + cmd.num_bands = audio->iir.num_bands; + memcpy(&cmd.iir_params, &audio->iir.iir_params, + sizeof(audio->iir.iir_params)); + } else { + cmd.active_flag = AUDPREPROC_CMD_IIR_ACTIVE_FLAG_DIS; + } +#if DEBUG + pr_info("cmd_id = 0x%04x\n", cmd.cmd_id); + pr_info("active_flag = 0x%04x\n", cmd.active_flag); +#endif + return audio_send_queue_pre(audio, &cmd, sizeof(cmd)); +} + +static int audio_in_dsp_enable(struct audio_in *audio, int enable) +{ + audrec_cmd_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDREC_CMD_CFG; + cmd.type_0 = enable ? AUDREC_CMD_TYPE_0_ENA : AUDREC_CMD_TYPE_0_DIS; + cmd.type_0 |= (AUDREC_CMD_TYPE_0_UPDATE | audio->type); + cmd.type_1 = 0; + + return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); +} + +static int audio_in_encoder_config(struct audio_in *audio) +{ + audrec_cmd_arec0param_cfg cmd; + uint16_t *data = (void *) audio->data; + unsigned n; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDREC_CMD_AREC0PARAM_CFG; + cmd.ptr_to_extpkt_buffer_msw = audio->phys >> 16; + cmd.ptr_to_extpkt_buffer_lsw = audio->phys; + cmd.buf_len = FRAME_NUM; /* Both WAV and AAC use 8 frames */ + cmd.samp_rate_index = audio->samp_rate_index; + cmd.stereo_mode = audio->channel_mode; /* 0 for mono, 1 for stereo */ + + /* FIXME have no idea why cmd.rec_quality is fixed + * as 0x1C00 from sample code + */ + cmd.rec_quality = 0x1C00; + + /* prepare buffer pointers: + * Mono: 1024 samples + 4 halfword header + * Stereo: 2048 samples + 4 halfword header + * AAC + * Mono/Stere: 768 + 4 halfword header + */ + for (n = 0; n < FRAME_NUM; n++) { + audio->in[n].data = data + 4; + if (audio->type == AUDREC_CMD_TYPE_0_INDEX_WAV) + data += (4 + (audio->channel_mode ? 2048 : 1024)); + else if (audio->type == AUDREC_CMD_TYPE_0_INDEX_AAC) + data += (4 + 768); + } + + return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); +} + +static int audio_dsp_read_buffer(struct audio_in *audio, uint32_t read_cnt) +{ + audrec_cmd_packet_ext_ptr cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDREC_CMD_PACKET_EXT_PTR; + /* Both WAV and AAC use AUDREC_CMD_TYPE_0 */ + cmd.type = AUDREC_CMD_TYPE_0; + cmd.curr_rec_count_msw = read_cnt >> 16; + cmd.curr_rec_count_lsw = read_cnt; + + return audio_send_queue_recbs(audio, &cmd, sizeof(cmd)); +} + +/* ------------------- device --------------------- */ + +static void audio_enable_agc(struct audio_in *audio, int enable) +{ + if (audio->agc_enable != enable) { + audio->agc_enable = enable; + if (audio->running) + audio_dsp_set_agc(audio); + } +} + +static void audio_enable_ns(struct audio_in *audio, int enable) +{ + if (audio->ns_enable != enable) { + audio->ns_enable = enable; + if (audio->running) + audio_dsp_set_ns(audio); + } +} + +static void audio_enable_tx_iir(struct audio_in *audio, int enable) +{ + if (audio->iir_enable != enable) { + audio->iir_enable = enable; + if (audio->running) + audio_dsp_set_tx_iir(audio); + } +} + +static void audio_flush(struct audio_in *audio) +{ + int i; + + audio->dsp_cnt = 0; + audio->in_head = 0; + audio->in_tail = 0; + audio->in_count = 0; + for (i = 0; i < FRAME_NUM; i++) { + audio->in[i].size = 0; + audio->in[i].read = 0; + } +} + +static long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct audio_in *audio = file->private_data; + int rc; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audio_in_enable(audio); + break; + case AUDIO_STOP: + rc = audio_in_disable(audio); + audio->stopped = 1; + break; + case AUDIO_FLUSH: + if (audio->stopped) { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the read_lock. + * While audio->stopped read threads will always + * exit immediately. + */ + wake_up(&audio->wait); + mutex_lock(&audio->read_lock); + audio_flush(audio); + mutex_unlock(&audio->read_lock); + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config cfg; + if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if (cfg.channel_count == 1) { + cfg.channel_count = AUDREC_CMD_STEREO_MODE_MONO; + } else if (cfg.channel_count == 2) { + cfg.channel_count = AUDREC_CMD_STEREO_MODE_STEREO; + } else { + rc = -EINVAL; + break; + } + + if (cfg.type == 0) { + cfg.type = AUDREC_CMD_TYPE_0_INDEX_WAV; + } else if (cfg.type == 1) { + cfg.type = AUDREC_CMD_TYPE_0_INDEX_AAC; + } else { + rc = -EINVAL; + break; + } + audio->samp_rate = convert_samp_rate(cfg.sample_rate); + audio->samp_rate_index = + convert_dsp_samp_index(cfg.sample_rate); + audio->channel_mode = cfg.channel_count; + audio->buffer_size = + audio->channel_mode ? STEREO_DATA_SIZE + : MONO_DATA_SIZE; + audio->type = cfg.type; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + cfg.buffer_size = audio->buffer_size; + cfg.buffer_count = FRAME_NUM; + cfg.sample_rate = convert_samp_index(audio->samp_rate); + if (audio->channel_mode == AUDREC_CMD_STEREO_MODE_MONO) + cfg.channel_count = 1; + else + cfg.channel_count = 2; + if (audio->type == AUDREC_CMD_TYPE_0_INDEX_WAV) + cfg.type = 0; + else + cfg.type = 1; + cfg.unused[0] = 0; + cfg.unused[1] = 0; + cfg.unused[2] = 0; + if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + else + rc = 0; + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audio_in_read(struct file *file, + char __user *buf, + size_t count, loff_t *pos) +{ + struct audio_in *audio = file->private_data; + unsigned long flags; + const char __user *start = buf; + void *data; + uint32_t index; + uint32_t size; + int rc = 0; + + mutex_lock(&audio->read_lock); + while (count > 0) { + rc = wait_event_interruptible( + audio->wait, (audio->in_count > 0) || audio->stopped); + if (rc < 0) + break; + + if (audio->stopped) { + rc = -EBUSY; + break; + } + + index = audio->in_tail; + data = (uint8_t *) audio->in[index].data; + size = audio->in[index].size; + if (count >= size) { + if (copy_to_user(buf, data, size)) { + rc = -EFAULT; + break; + } + spin_lock_irqsave(&audio->dsp_lock, flags); + if (index != audio->in_tail) { + /* overrun -- data is invalid and we need to retry */ + spin_unlock_irqrestore(&audio->dsp_lock, flags); + continue; + } + audio->in[index].size = 0; + audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); + audio->in_count--; + spin_unlock_irqrestore(&audio->dsp_lock, flags); + count -= size; + buf += size; + if (audio->type == AUDREC_CMD_TYPE_0_INDEX_AAC) + break; + } else { + pr_err("audio_in: short read\n"); + break; + } + if (audio->type == AUDREC_CMD_TYPE_0_INDEX_AAC) + break; /* AAC only read one frame */ + } + mutex_unlock(&audio->read_lock); + + if (buf > start) + return buf - start; + + return rc; +} + +static ssize_t audio_in_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + return -EINVAL; +} + +static int audio_in_release(struct inode *inode, struct file *file) +{ + struct audio_in *audio = file->private_data; + + mutex_lock(&audio->lock); + audio_in_disable(audio); + audio_flush(audio); + msm_adsp_put(audio->audrec); + msm_adsp_put(audio->audpre); + audio->audrec = NULL; + audio->audpre = NULL; + audio->opened = 0; + mutex_unlock(&audio->lock); + return 0; +} + +struct audio_in the_audio_in; + +static int audio_in_open(struct inode *inode, struct file *file) +{ + struct audio_in *audio = &the_audio_in; + int rc; + + mutex_lock(&audio->lock); + if (audio->opened) { + rc = -EBUSY; + goto done; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025; + audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025; + audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; + audio->buffer_size = MONO_DATA_SIZE; + audio->type = AUDREC_CMD_TYPE_0_INDEX_WAV; + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto done; + rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre, + &audpre_adsp_ops, audio); + if (rc) + goto done; + rc = msm_adsp_get("AUDRECTASK", &audio->audrec, + &audrec_adsp_ops, audio); + if (rc) + goto done; + + audio->dsp_cnt = 0; + audio->stopped = 0; + + audio_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; +done: + mutex_unlock(&audio->lock); + return rc; +} + +static long audpre_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio_in *audio = file->private_data; + int rc = 0, enable; + uint16_t enable_mask; +#if DEBUG + int i; +#endif + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_ENABLE_AUDPRE: { + if (copy_from_user(&enable_mask, (void *) arg, + sizeof(enable_mask))) + goto out_fault; + + enable = (enable_mask & AGC_ENABLE) ? 1 : 0; + audio_enable_agc(audio, enable); + enable = (enable_mask & NS_ENABLE) ? 1 : 0; + audio_enable_ns(audio, enable); + enable = (enable_mask & IIR_ENABLE) ? 1 : 0; + audio_enable_tx_iir(audio, enable); + break; + } + case AUDIO_SET_AGC: { + if (copy_from_user(&audio->agc, (void *) arg, + sizeof(audio->agc))) + goto out_fault; +#if DEBUG + pr_info("set agc\n"); + for (i = 0; i < AGC_PARAM_SIZE; i++) \ + pr_info("agc_params[%d] = 0x%04x\n", i, + audio->agc.agc_params[i]); +#endif + break; + } + case AUDIO_SET_NS: { + if (copy_from_user(&audio->ns, (void *) arg, + sizeof(audio->ns))) + goto out_fault; +#if DEBUG + pr_info("set ns\n"); + for (i = 0; i < NS_PARAM_SIZE; i++) \ + pr_info("ns_params[%d] = 0x%04x\n", + i, audio->ns.ns_params[i]); +#endif + break; + } + case AUDIO_SET_TX_IIR: { + if (copy_from_user(&audio->iir, (void *) arg, + sizeof(audio->iir))) + goto out_fault; +#if DEBUG + pr_info("set iir\n"); + pr_info("iir.num_bands = 0x%04x\n", audio->iir.num_bands); + for (i = 0; i < IIR_PARAM_SIZE; i++) \ + pr_info("iir_params[%d] = 0x%04x\n", + i, audio->iir.iir_params[i]); +#endif + break; + } + default: + rc = -EINVAL; + } + + goto out; + +out_fault: + rc = -EFAULT; +out: + mutex_unlock(&audio->lock); + return rc; +} + +static int audpre_open(struct inode *inode, struct file *file) +{ + struct audio_in *audio = &the_audio_in; + file->private_data = audio; + return 0; +} + +static struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +static struct file_operations audpre_fops = { + .owner = THIS_MODULE, + .open = audpre_open, + .unlocked_ioctl = audpre_ioctl, +}; + +struct miscdevice audio_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_in", + .fops = &audio_fops, +}; + +struct miscdevice audpre_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_audpre", + .fops = &audpre_fops, +}; + +static int __init audio_in_init(void) +{ + int rc; + the_audio_in.data = dma_alloc_coherent(NULL, DMASZ, + &the_audio_in.phys, GFP_KERNEL); + if (!the_audio_in.data) { + printk(KERN_ERR "%s: Unable to allocate DMA buffer\n", + __func__); + return -ENOMEM; + } + + mutex_init(&the_audio_in.lock); + mutex_init(&the_audio_in.read_lock); + spin_lock_init(&the_audio_in.dsp_lock); + init_waitqueue_head(&the_audio_in.wait); + rc = misc_register(&audio_in_misc); + if (!rc) { + rc = misc_register(&audpre_misc); + if (rc < 0) + misc_deregister(&audio_in_misc); + } + return rc; +} + +device_initcall(audio_in_init); diff --git a/arch/arm/mach-msm/qdsp5/audio_mp3.c b/arch/arm/mach-msm/qdsp5/audio_mp3.c new file mode 100644 index 0000000000000..f09bdcbc50495 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_mp3.c @@ -0,0 +1,970 @@ +/* arch/arm/mach-msm/qdsp5/audio_mp3.c + * + * mp3 audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include + +#include "audmgr.h" + +#include +#include +#include +#include + +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +#ifdef DEBUG +#define dprintk(format, arg...) \ +printk(KERN_DEBUG format, ## arg) +#else +#define dprintk(format, arg...) do {} while (0) +#endif + +/* Size must be power of 2 */ +#define BUFSZ_MAX 32768 +#define BUFSZ_MIN 4096 +#define DMASZ_MAX (BUFSZ_MAX * 2) +#define DMASZ_MIN (BUFSZ_MIN * 2) + +#define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF +#define AUDDEC_DEC_MP3 2 + +#define PCM_BUFSZ_MIN 4800 /* Hold one stereo MP3 frame */ +#define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most + but support 2 buffers currently */ +#define ROUTING_MODE_FTRT 1 +#define ROUTING_MODE_RT 2 +/* Decoder status received from AUDPPTASK */ +#define AUDPP_DEC_STATUS_SLEEP 0 +#define AUDPP_DEC_STATUS_INIT 1 +#define AUDPP_DEC_STATUS_CFG 2 +#define AUDPP_DEC_STATUS_PLAY 3 + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +struct audio { + struct buffer out[2]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + unsigned out_dma_sz; + + atomic_t out_bytes; + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + /* Host PCM section */ + struct buffer in[PCM_BUF_MAX_COUNT]; + struct mutex read_lock; + wait_queue_head_t read_wait; /* Wait queue for read */ + char *read_data; /* pointer to reader buffer */ + dma_addr_t read_phys; /* physical address of reader buffer */ + uint8_t read_next; /* index to input buffers to be read next */ + uint8_t fill_next; /* index to buffer that DSP should be filling */ + uint8_t pcm_buf_count; /* number of pcm buffer allocated */ + /* ---- End of Host PCM section */ + + struct msm_adsp_module *audplay; + + /* configuration to use on next enable */ + uint32_t out_sample_rate; + uint32_t out_channel_mode; + + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + int rflush; /* Read flush */ + int wflush; /* Write flush */ + int opened; + int enabled; + int running; + int stopped; /* set when stopped, cleared on flush */ + int pcm_feedback; + int buf_refresh; + + int reserved; /* A byte is being reserved */ + char rsv_byte; /* Handle odd length user data */ + + unsigned volume; + + uint16_t dec_id; + uint32_t read_ptr_offset; +}; + +static int auddec_dsp_config(struct audio *audio, int enable); +static void audpp_cmd_cfg_adec_params(struct audio *audio); +static void audpp_cmd_cfg_routing_mode(struct audio *audio); +static void audplay_send_data(struct audio *audio, unsigned needed); +static void audplay_config_hostpcm(struct audio *audio); +static void audplay_buffer_refresh(struct audio *audio); +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg); + +/* must be called with audio->lock held */ +static int audio_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + pr_info("audio_enable()\n"); + + if (audio->enabled) + return 0; + + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; + cfg.codec = RPC_AUD_DEF_CODEC_MP3; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audplay)) { + pr_err("audio: msm_adsp_enable(audplay) failed\n"); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + msm_adsp_disable(audio->audplay); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + audio->enabled = 1; + return 0; +} + +/* must be called with audio->lock held */ +static int audio_disable(struct audio *audio) +{ + pr_info("audio_disable()\n"); + if (audio->enabled) { + audio->enabled = 0; + auddec_dsp_config(audio, 0); + wake_up(&audio->write_wait); + wake_up(&audio->read_wait); + msm_adsp_disable(audio->audplay); + audpp_disable(audio->dec_id, audio); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audio_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) +{ + uint8_t index; + unsigned long flags; + + if (audio->rflush) { + audio->buf_refresh = 1; + return; + } + spin_lock_irqsave(&audio->dsp_lock, flags); + for (index = 0; index < payload[1]; index++) { + if (audio->in[audio->fill_next].addr == + payload[2 + index * 2]) { + pr_info("audio_update_pcm_buf_entry: in[%d] ready\n", + audio->fill_next); + audio->in[audio->fill_next].used = + payload[3 + index * 2]; + if ((++audio->fill_next) == audio->pcm_buf_count) + audio->fill_next = 0; + + } else { + pr_err + ("audio_update_pcm_buf_entry: expected=%x ret=%x\n" + , audio->in[audio->fill_next].addr, + payload[1 + index * 2]); + break; + } + } + if (audio->in[audio->fill_next].used == 0) { + audplay_buffer_refresh(audio); + } else { + pr_info("audio_update_pcm_buf_entry: read cannot keep up\n"); + audio->buf_refresh = 1; + } + wake_up(&audio->read_wait); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + +} + +static void audplay_dsp_event(void *data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + struct audio *audio = data; + uint32_t msg[28]; + getevent(msg, sizeof(msg)); + + dprintk("audplay_dsp_event: msg_id=%x\n", id); + + switch (id) { + case AUDPLAY_MSG_DEC_NEEDS_DATA: + audplay_send_data(audio, 1); + break; + + case AUDPLAY_MSG_BUFFER_UPDATE: + audio_update_pcm_buf_entry(audio, msg); + break; + + default: + pr_err("unexpected message from decoder \n"); + break; + } +} + +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned status = msg[1]; + + switch (status) { + case AUDPP_DEC_STATUS_SLEEP: + pr_info("decoder status: sleep \n"); + break; + + case AUDPP_DEC_STATUS_INIT: + pr_info("decoder status: init \n"); + audpp_cmd_cfg_routing_mode(audio); + break; + + case AUDPP_DEC_STATUS_CFG: + pr_info("decoder status: cfg \n"); + break; + case AUDPP_DEC_STATUS_PLAY: + pr_info("decoder status: play \n"); + if (audio->pcm_feedback) { + audplay_config_hostpcm(audio); + audplay_buffer_refresh(audio); + } + break; + default: + pr_err("unknown decoder status \n"); + break; + } + break; + } + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + pr_info("audio_dsp_event: CFG_MSG ENABLE\n"); + auddec_dsp_config(audio, 1); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(audio->dec_id, audio->volume, + 0); + audpp_avsync(audio->dec_id, 22050); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + pr_info("audio_dsp_event: CFG_MSG DISABLE\n"); + audpp_avsync(audio->dec_id, 0); + audio->running = 0; + } else { + pr_err("audio_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + pr_info("audio_dsp_event: ROUTING_ACK mode=%d\n", msg[1]); + audpp_cmd_cfg_adec_params(audio); + break; + + case AUDPP_MSG_FLUSH_ACK: + dprintk("%s: FLUSH_ACK\n", __func__); + audio->wflush = 0; + audio->rflush = 0; + if (audio->pcm_feedback) + audplay_buffer_refresh(audio); + break; + + default: + pr_err("audio_dsp_event: UNKNOWN (%d)\n", id); + } + +} + + +struct msm_adsp_ops audplay_adsp_ops = { + .event = audplay_dsp_event, +}; + + +#define audplay_send_queue0(audio, cmd, len) \ + msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \ + cmd, len) + +static int auddec_dsp_config(struct audio *audio, int enable) +{ + audpp_cmd_cfg_dec_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + if (enable) + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_ENA_DEC_V | + AUDDEC_DEC_MP3; + else + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_DIS_DEC_V; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_adec_params(struct audio *audio) +{ + audpp_cmd_cfg_adec_params_mp3 cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_MP3_LEN; + cmd.common.dec_id = audio->dec_id; + cmd.common.input_sampling_frequency = audio->out_sample_rate; + + audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_routing_mode(struct audio *audio) +{ + struct audpp_cmd_routing_mode cmd; + pr_info("audpp_cmd_cfg_routing_mode()\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; + cmd.object_number = audio->dec_id; + if (audio->pcm_feedback) + cmd.routing_mode = ROUTING_MODE_FTRT; + else + cmd.routing_mode = ROUTING_MODE_RT; + + audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static int audplay_dsp_send_data_avail(struct audio *audio, + unsigned idx, unsigned len) +{ + audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audio->dec_id; + cmd.buf_ptr = audio->out[idx].addr; + cmd.buf_size = len/2; + cmd.partition_number = 0; + return audplay_send_queue0(audio, &cmd, sizeof(cmd)); +} + +static void audplay_buffer_refresh(struct audio *audio) +{ + struct audplay_cmd_buffer_refresh refresh_cmd; + + refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; + refresh_cmd.num_buffers = 1; + refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; + refresh_cmd.buf0_length = audio->in[audio->fill_next].size - + (audio->in[audio->fill_next].size % 576); /* Mp3 frame size */ + refresh_cmd.buf_read_count = 0; + pr_info("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n", + refresh_cmd.buf0_address, refresh_cmd.buf0_length); + (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); +} + +static void audplay_config_hostpcm(struct audio *audio) +{ + struct audplay_cmd_hpcm_buf_cfg cfg_cmd; + + pr_info("audplay_config_hostpcm()\n"); + cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; + cfg_cmd.max_buffers = 1; + cfg_cmd.byte_swap = 0; + cfg_cmd.hostpcm_config = (0x8000) | (0x4000); + cfg_cmd.feedback_frequency = 1; + cfg_cmd.partition_number = 0; + (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); + +} + +static void audplay_send_data(struct audio *audio, unsigned needed) +{ + struct buffer *frame; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (!audio->running) + goto done; + + if (audio->wflush) { + audio->out_needed = 1; + goto done; + } + + if (needed && !audio->wflush) { + /* We were called from the callback because the DSP + * requested more data. Note that the DSP does want + * more data, and if a buffer was in-flight, mark it + * as available (since the DSP must now be done with + * it). + */ + audio->out_needed = 1; + frame = audio->out + audio->out_tail; + if (frame->used == 0xffffffff) { + dprintk("frame %d free\n", audio->out_tail); + frame->used = 0; + audio->out_tail ^= 1; + wake_up(&audio->write_wait); + } + } + + if (audio->out_needed) { + /* If the DSP currently wants data and we have a + * buffer available, we will send it and reset + * the needed flag. We'll mark the buffer as in-flight + * so that it won't be recycled until the next buffer + * is requested + */ + + frame = audio->out + audio->out_tail; + if (frame->used) { + BUG_ON(frame->used == 0xffffffff); + dprintk("frame %d busy\n", audio->out_tail); + audplay_dsp_send_data_avail(audio, audio->out_tail, + frame->used); + frame->used = 0xffffffff; + audio->out_needed = 0; + } + } +done: + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ + +static void audio_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->reserved = 0; + atomic_set(&audio->out_bytes, 0); +} + +static void audio_flush_pcm_buf(struct audio *audio) +{ + uint8_t index; + + for (index = 0; index < PCM_BUF_MAX_COUNT; index++) + audio->in[index].used = 0; + + audio->read_next = 0; + audio->fill_next = 0; +} + +static void audio_ioport_reset(struct audio *audio) +{ + /* Make sure read/write thread are free from + * sleep and knowing that system is not able + * to process io request at the moment + */ + wake_up(&audio->write_wait); + mutex_lock(&audio->write_lock); + audio_flush(audio); + mutex_unlock(&audio->write_lock); + wake_up(&audio->read_wait); + mutex_lock(&audio->read_lock); + audio_flush_pcm_buf(audio); + mutex_unlock(&audio->read_lock); +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + pr_info("audio_ioctl() cmd = %d\n", cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = audpp_avsync_byte_count(audio->dec_id); + stats.sample_count = audpp_avsync_sample_count(audio->dec_id); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(audio->dec_id, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return 0; + } + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audio_enable(audio); + break; + case AUDIO_STOP: + rc = audio_disable(audio); + audio->stopped = 1; + audio_ioport_reset(audio); + audio->stopped = 0; + break; + case AUDIO_FLUSH: + dprintk("%s: AUDIO_FLUSH\n", __func__); + audio->rflush = 1; + audio->wflush = 1; + audio_ioport_reset(audio); + audio->rflush = 0; + audio->wflush = 0; + + if (audio->buf_refresh) { + audio->buf_refresh = 0; + audplay_buffer_refresh(audio); + } + break; + + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count == 1) { + config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V; + } else if (config.channel_count == 2) { + config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V; + } else { + rc = -EINVAL; + break; + } + audio->out_sample_rate = config.sample_rate; + audio->out_channel_mode = config.channel_count; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = (audio->out_dma_sz >> 1); + config.buffer_count = 2; + config.sample_rate = audio->out_sample_rate; + if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V) { + config.channel_count = 1; + } else { + config.channel_count = 2; + } + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) { + rc = -EFAULT; + } else { + rc = 0; + } + break; + } + case AUDIO_GET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + config.pcm_feedback = 0; + config.buffer_count = PCM_BUF_MAX_COUNT; + config.buffer_size = PCM_BUFSZ_MIN; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + if (copy_from_user + (&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if ((config.buffer_count > PCM_BUF_MAX_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_MAX_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + /* Check if pcm feedback is required */ + if ((config.pcm_feedback) && (!audio->read_data)) { + pr_info("ioctl: allocate PCM buffer %d\n", + config.buffer_count * + config.buffer_size); + audio->read_data = + dma_alloc_coherent(NULL, + config.buffer_size * + config.buffer_count, + &audio->read_phys, + GFP_KERNEL); + if (!audio->read_data) { + pr_err("audio_mp3: malloc pcm \ + buf failed\n"); + rc = -1; + } else { + uint8_t index; + uint32_t offset = 0; + audio->pcm_feedback = 1; + audio->buf_refresh = 0; + audio->pcm_buf_count = + config.buffer_count; + audio->read_next = 0; + audio->fill_next = 0; + + for (index = 0; + index < config.buffer_count; + index++) { + audio->in[index].data = + audio->read_data + offset; + audio->in[index].addr = + audio->read_phys + offset; + audio->in[index].size = + config.buffer_size; + audio->in[index].used = 0; + offset += config.buffer_size; + } + rc = 0; + } + } else { + rc = 0; + } + break; + } + case AUDIO_PAUSE: + dprintk("%s: AUDIO_PAUSE %ld\n", __func__, arg); + rc = audpp_pause(audio->dec_id, (int) arg); + break; + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audio_read(struct file *file, char __user *buf, size_t count, + loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + int rc = 0; + + if (!audio->pcm_feedback) + return 0; /* PCM feedback disabled. Nothing to read */ + + mutex_lock(&audio->read_lock); + pr_info("audio_read() %d \n", count); + while (count > 0) { + rc = wait_event_interruptible(audio->read_wait, + (audio->in[audio->read_next]. + used > 0) || (audio->stopped) + || (audio->rflush)); + + if (rc < 0) + break; + + if (audio->stopped || audio->rflush) { + rc = -EBUSY; + break; + } + + if (count < audio->in[audio->read_next].used) { + /* Read must happen in frame boundary. Since + * driver does not know frame size, read count + * must be greater or equal + * to size of PCM samples + */ + pr_info("audio_read: no partial frame done reading\n"); + break; + } else { + pr_info("audio_read: read from in[%d]\n", + audio->read_next); + if (copy_to_user + (buf, audio->in[audio->read_next].data, + audio->in[audio->read_next].used)) { + pr_err("audio_read: invalid addr %x \n", + (unsigned int)buf); + rc = -EFAULT; + break; + } + count -= audio->in[audio->read_next].used; + buf += audio->in[audio->read_next].used; + audio->in[audio->read_next].used = 0; + if ((++audio->read_next) == audio->pcm_buf_count) + audio->read_next = 0; + if (audio->in[audio->read_next].used == 0) + break; /* No data ready at this moment + * Exit while loop to prevent + * output thread sleep too long + */ + } + } + + /* don't feed output buffer to HW decoder during flushing + * buffer refresh command will be sent once flush completes + * send buf refresh command here can confuse HW decoder + */ + if (audio->buf_refresh && !audio->rflush) { + audio->buf_refresh = 0; + pr_info("audio_read: kick start pcm feedback again\n"); + audplay_buffer_refresh(audio); + } + + mutex_unlock(&audio->read_lock); + + if (buf > start) + rc = buf - start; + + pr_info("audio_read: read %d bytes\n", rc); + return rc; +} + +static ssize_t audio_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + char *cpy_ptr; + int rc = 0; + unsigned dsize; + + mutex_lock(&audio->write_lock); + while (count > 0) { + frame = audio->out + audio->out_head; + cpy_ptr = frame->data; + dsize = 0; + rc = wait_event_interruptible(audio->write_wait, + (frame->used == 0) + || (audio->stopped) + || (audio->wflush)); + if (rc < 0) + break; + if (audio->stopped || audio->wflush) { + rc = -EBUSY; + break; + } + + if (audio->reserved) { + dprintk("%s: append reserved byte %x\n", + __func__, audio->rsv_byte); + *cpy_ptr = audio->rsv_byte; + xfer = (count > (frame->size - 1)) ? + frame->size - 1 : count; + cpy_ptr++; + dsize = 1; + audio->reserved = 0; + } else + xfer = (count > frame->size) ? frame->size : count; + + if (copy_from_user(cpy_ptr, buf, xfer)) { + rc = -EFAULT; + break; + } + + dsize += xfer; + if (dsize & 1) { + audio->rsv_byte = ((char *) frame->data)[dsize - 1]; + dprintk("%s: odd length buf reserve last byte %x\n", + __func__, audio->rsv_byte); + audio->reserved = 1; + dsize--; + } + count -= xfer; + buf += xfer; + + if (dsize > 0) { + audio->out_head ^= 1; + frame->used = dsize; + audplay_send_data(audio, 0); + } + } + mutex_unlock(&audio->write_lock); + if (buf > start) + return buf - start; + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + dprintk("audio_release()\n"); + + mutex_lock(&audio->lock); + audio_disable(audio); + audio_flush(audio); + audio_flush_pcm_buf(audio); + msm_adsp_put(audio->audplay); + audio->audplay = NULL; + audio->opened = 0; + audio->reserved = 0; + dma_free_coherent(NULL, audio->out_dma_sz, audio->data, audio->phys); + audio->data = NULL; + if (audio->read_data != NULL) { + dma_free_coherent(NULL, + audio->in[0].size * audio->pcm_buf_count, + audio->read_data, audio->read_phys); + audio->read_data = NULL; + } + audio->pcm_feedback = 0; + mutex_unlock(&audio->lock); + return 0; +} + +struct audio the_mp3_audio; + +static int audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_mp3_audio; + int rc; + unsigned pmem_sz; + + mutex_lock(&audio->lock); + + if (audio->opened) { + pr_err("audio: busy\n"); + rc = -EBUSY; + goto done; + } + + pmem_sz = DMASZ_MAX; + + while (pmem_sz >= DMASZ_MIN) { + audio->data = dma_alloc_coherent(NULL, pmem_sz, + &audio->phys, GFP_KERNEL); + if (audio->data) + break; + else if (pmem_sz == DMASZ_MIN) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto done; + } else + pmem_sz >>= 1; + } + + dprintk("%s: allocated %d bytes DMA buffer\n", __func__, pmem_sz); + + rc = audmgr_open(&audio->audmgr); + if (rc) { + dma_free_coherent(NULL, pmem_sz, + audio->data, audio->phys); + goto done; + } + + rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay, &audplay_adsp_ops, + audio); + if (rc) { + pr_err("audio: failed to get audplay0 dsp module\n"); + dma_free_coherent(NULL, pmem_sz, + audio->data, audio->phys); + audmgr_close(&audio->audmgr); + goto done; + } + + audio->out_dma_sz = pmem_sz; + pmem_sz >>= 1; /* Shift by 1 to get size of ping pong buffer */ + + audio->out_sample_rate = 44100; + audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V; + audio->dec_id = 0; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = pmem_sz; + + audio->out[1].data = audio->data + pmem_sz; + audio->out[1].addr = audio->phys + pmem_sz; + audio->out[1].size = pmem_sz; + + audio->volume = 0x2000; /* equal to Q13 number 1.0 Unit Gain */ + + audio_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; +done: + mutex_unlock(&audio->lock); + return rc; +} + +static struct file_operations audio_mp3_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .read = audio_read, + .write = audio_write, + .unlocked_ioctl = audio_ioctl, +}; + +struct miscdevice audio_mp3_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_mp3", + .fops = &audio_mp3_fops, +}; + +static int __init audio_init(void) +{ + mutex_init(&the_mp3_audio.lock); + mutex_init(&the_mp3_audio.write_lock); + mutex_init(&the_mp3_audio.read_lock); + spin_lock_init(&the_mp3_audio.dsp_lock); + init_waitqueue_head(&the_mp3_audio.write_wait); + init_waitqueue_head(&the_mp3_audio.read_wait); + the_mp3_audio.read_data = NULL; + return misc_register(&audio_mp3_misc); +} + +device_initcall(audio_init); diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c new file mode 100644 index 0000000000000..fcb1f139e62c4 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_out.c @@ -0,0 +1,850 @@ +/* arch/arm/mach-msm/qdsp5/audio_out.c + * + * pcm audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include "audmgr.h" + +#include +#include + +#include + +#include "evlog.h" + +#define LOG_AUDIO_EVENTS 1 +#define LOG_AUDIO_FAULTS 0 + +enum { + EV_NULL, + EV_OPEN, + EV_WRITE, + EV_RETURN, + EV_IOCTL, + EV_WRITE_WAIT, + EV_WAIT_EVENT, + EV_FILL_BUFFER, + EV_SEND_BUFFER, + EV_DSP_EVENT, + EV_ENABLE, +}; + +#if (LOG_AUDIO_EVENTS != 1) +static inline void LOG(unsigned id, unsigned arg) {} +#else +static const char *pcm_log_strings[] = { + "NULL", + "OPEN", + "WRITE", + "RETURN", + "IOCTL", + "WRITE_WAIT", + "WAIT_EVENT", + "FILL_BUFFER", + "SEND_BUFFER", + "DSP_EVENT", + "ENABLE", +}; + +DECLARE_LOG(pcm_log, 64, pcm_log_strings); + +static int __init _pcm_log_init(void) +{ + return ev_log_init(&pcm_log); +} +module_init(_pcm_log_init); + +#define LOG(id,arg) ev_log_write(&pcm_log, id, arg) +#endif + + + + + +#define BUFSZ (960 * 5) +#define DMASZ (BUFSZ * 2) + +#define AUDPP_CMD_CFG_OBJ_UPDATE 0x8000 +#define AUDPP_CMD_EQ_FLAG_DIS 0x0000 +#define AUDPP_CMD_EQ_FLAG_ENA -1 +#define AUDPP_CMD_IIR_FLAG_DIS 0x0000 +#define AUDPP_CMD_IIR_FLAG_ENA -1 + +#define AUDPP_CMD_IIR_TUNING_FILTER 1 +#define AUDPP_CMD_EQUALIZER 2 +#define AUDPP_CMD_ADRC 3 + +#define ADRC_ENABLE 0x0001 +#define EQ_ENABLE 0x0002 +#define IIR_ENABLE 0x0004 + +struct adrc_filter { + uint16_t compression_th; + uint16_t compression_slope; + uint16_t rms_time; + uint16_t attack_const_lsw; + uint16_t attack_const_msw; + uint16_t release_const_lsw; + uint16_t release_const_msw; + uint16_t adrc_system_delay; +}; + +struct eqalizer { + uint16_t num_bands; + uint16_t eq_params[132]; +}; + +struct rx_iir_filter { + uint16_t num_bands; + uint16_t iir_params[48]; +}; + +typedef struct { + audpp_cmd_cfg_object_params_common common; + uint16_t eq_flag; + uint16_t num_bands; + uint16_t eq_params[132]; +} audpp_cmd_cfg_object_params_eq; + +typedef struct { + audpp_cmd_cfg_object_params_common common; + uint16_t active_flag; + uint16_t num_bands; + uint16_t iir_params[48]; +} audpp_cmd_cfg_object_params_rx_iir; + +struct buffer { + void *data; + unsigned size; + unsigned used; + unsigned addr; +}; + +struct audio { + struct buffer out[2]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + + atomic_t out_bytes; + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t wait; + + /* configuration to use on next enable */ + uint32_t out_sample_rate; + uint32_t out_channel_mode; + uint32_t out_weight; + uint32_t out_buffer_size; + + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + int opened; + int enabled; + int running; + int stopped; /* set when stopped, cleared on flush */ + unsigned volume; + + struct wake_lock wakelock; + struct wake_lock idlelock; + + int adrc_enable; + struct adrc_filter adrc; + + int eq_enable; + struct eqalizer eq; + + int rx_iir_enable; + struct rx_iir_filter iir; +}; + +static void audio_prevent_sleep(struct audio *audio) +{ + printk(KERN_INFO "++++++++++++++++++++++++++++++\n"); + wake_lock(&audio->wakelock); + wake_lock(&audio->idlelock); +} + +static void audio_allow_sleep(struct audio *audio) +{ + wake_unlock(&audio->wakelock); + wake_unlock(&audio->idlelock); + printk(KERN_INFO "------------------------------\n"); +} + +static int audio_dsp_out_enable(struct audio *audio, int yes); +static int audio_dsp_send_buffer(struct audio *audio, unsigned id, unsigned len); +static int audio_dsp_set_adrc(struct audio *audio); +static int audio_dsp_set_eq(struct audio *audio); +static int audio_dsp_set_rx_iir(struct audio *audio); + +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg); + +/* must be called with audio->lock held */ +static int audio_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + pr_info("audio_enable()\n"); + + if (audio->enabled) + return 0; + + /* refuse to start if we're not ready */ + if (!audio->out[0].used || !audio->out[1].used) + return -EIO; + + /* we start buffers 0 and 1, so buffer 0 will be the + * next one the dsp will want + */ + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_HOST_PCM; + cfg.codec = RPC_AUD_DEF_CODEC_PCM; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + audio_prevent_sleep(audio); + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) { + audio_allow_sleep(audio); + return rc; + } + + if (audpp_enable(-1, audio_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + audmgr_disable(&audio->audmgr); + audio_allow_sleep(audio); + return -ENODEV; + } + + audio->enabled = 1; + htc_pwrsink_set(PWRSINK_AUDIO, 100); + return 0; +} + +/* must be called with audio->lock held */ +static int audio_disable(struct audio *audio) +{ + pr_info("audio_disable()\n"); + if (audio->enabled) { + audio->enabled = 0; + audio_dsp_out_enable(audio, 0); + + audpp_disable(-1, audio); + + wake_up(&audio->wait); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + audio_allow_sleep(audio); + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audio_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + struct buffer *frame; + unsigned long flags; + + LOG(EV_DSP_EVENT, id); + switch (id) { + case AUDPP_MSG_HOST_PCM_INTF_MSG: { + unsigned id = msg[2]; + unsigned idx = msg[3] - 1; + + /* pr_info("audio_dsp_event: HOST_PCM id %d idx %d\n", id, idx); */ + if (id != AUDPP_MSG_HOSTPCM_ID_ARM_RX) { + pr_err("bogus id\n"); + break; + } + if (idx > 1) { + pr_err("bogus buffer idx\n"); + break; + } + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (audio->running) { + atomic_add(audio->out[idx].used, &audio->out_bytes); + audio->out[idx].used = 0; + + frame = audio->out + audio->out_tail; + if (frame->used) { + audio_dsp_send_buffer( + audio, audio->out_tail, frame->used); + audio->out_tail ^= 1; + } else { + audio->out_needed++; + } + wake_up(&audio->wait); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + break; + } + case AUDPP_MSG_PCMDMAMISSED: + pr_info("audio_dsp_event: PCMDMAMISSED %d\n", msg[0]); + break; + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + LOG(EV_ENABLE, 1); + pr_info("audio_dsp_event: CFG_MSG ENABLE\n"); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(5, audio->volume, 0); + audio_dsp_set_adrc(audio); + audio_dsp_set_eq(audio); + audio_dsp_set_rx_iir(audio); + audio_dsp_out_enable(audio, 1); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + LOG(EV_ENABLE, 0); + pr_info("audio_dsp_event: CFG_MSG DISABLE\n"); + audio->running = 0; + } else { + pr_err("audio_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + default: + pr_err("audio_dsp_event: UNKNOWN (%d)\n", id); + } +} + +static int audio_dsp_out_enable(struct audio *audio, int yes) +{ + audpp_cmd_pcm_intf cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_PCM_INTF_2; + cmd.object_num = AUDPP_CMD_PCM_INTF_OBJECT_NUM; + cmd.config = AUDPP_CMD_PCM_INTF_CONFIG_CMD_V; + cmd.intf_type = AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V; + + if (yes) { + cmd.write_buf1LSW = audio->out[0].addr; + cmd.write_buf1MSW = audio->out[0].addr >> 16; + cmd.write_buf1_len = audio->out[0].size; + cmd.write_buf2LSW = audio->out[1].addr; + cmd.write_buf2MSW = audio->out[1].addr >> 16; + cmd.write_buf2_len = audio->out[1].size; + cmd.arm_to_rx_flag = AUDPP_CMD_PCM_INTF_ENA_V; + cmd.weight_decoder_to_rx = audio->out_weight; + cmd.weight_arm_to_rx = 1; + cmd.partition_number_arm_to_dsp = 0; + cmd.sample_rate = audio->out_sample_rate; + cmd.channel_mode = audio->out_channel_mode; + } + + return audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static int audio_dsp_send_buffer(struct audio *audio, unsigned idx, unsigned len) +{ + audpp_cmd_pcm_intf_send_buffer cmd; + + cmd.cmd_id = AUDPP_CMD_PCM_INTF_2; + cmd.host_pcm_object = AUDPP_CMD_PCM_INTF_OBJECT_NUM; + cmd.config = AUDPP_CMD_PCM_INTF_BUFFER_CMD_V; + cmd.intf_type = AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V; + cmd.dsp_to_arm_buf_id = 0; + cmd.arm_to_dsp_buf_id = idx + 1; + cmd.arm_to_dsp_buf_len = len; + + LOG(EV_SEND_BUFFER, idx); + return audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static int audio_dsp_set_adrc(struct audio *audio) +{ + audpp_cmd_cfg_object_params_adrc cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.comman_cfg = AUDPP_CMD_CFG_OBJ_UPDATE; + cmd.common.command_type = AUDPP_CMD_ADRC; + + if (audio->adrc_enable) { + cmd.adrc_flag = AUDPP_CMD_ADRC_FLAG_ENA; + cmd.compression_th = audio->adrc.compression_th; + cmd.compression_slope = audio->adrc.compression_slope; + cmd.rms_time = audio->adrc.rms_time; + cmd.attack_const_lsw = audio->adrc.attack_const_lsw; + cmd.attack_const_msw = audio->adrc.attack_const_msw; + cmd.release_const_lsw = audio->adrc.release_const_lsw; + cmd.release_const_msw = audio->adrc.release_const_msw; + cmd.adrc_system_delay = audio->adrc.adrc_system_delay; + } else { + cmd.adrc_flag = AUDPP_CMD_ADRC_FLAG_DIS; + } + return audpp_send_queue3(&cmd, sizeof(cmd)); +} + +static int audio_dsp_set_eq(struct audio *audio) +{ + audpp_cmd_cfg_object_params_eq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.comman_cfg = AUDPP_CMD_CFG_OBJ_UPDATE; + cmd.common.command_type = AUDPP_CMD_EQUALIZER; + + if (audio->eq_enable) { + cmd.eq_flag = AUDPP_CMD_EQ_FLAG_ENA; + cmd.num_bands = audio->eq.num_bands; + memcpy(&cmd.eq_params, audio->eq.eq_params, + sizeof(audio->eq.eq_params)); + } else { + cmd.eq_flag = AUDPP_CMD_EQ_FLAG_DIS; + } + return audpp_send_queue3(&cmd, sizeof(cmd)); +} + +static int audio_dsp_set_rx_iir(struct audio *audio) +{ + audpp_cmd_cfg_object_params_rx_iir cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.comman_cfg = AUDPP_CMD_CFG_OBJ_UPDATE; + cmd.common.command_type = AUDPP_CMD_IIR_TUNING_FILTER; + + if (audio->rx_iir_enable) { + cmd.active_flag = AUDPP_CMD_IIR_FLAG_ENA; + cmd.num_bands = audio->iir.num_bands; + memcpy(&cmd.iir_params, audio->iir.iir_params, + sizeof(audio->iir.iir_params)); + } else { + cmd.active_flag = AUDPP_CMD_IIR_FLAG_DIS; + } + + return audpp_send_queue3(&cmd, sizeof(cmd)); +} + +/* ------------------- device --------------------- */ + +static int audio_enable_adrc(struct audio *audio, int enable) +{ + if (audio->adrc_enable != enable) { + audio->adrc_enable = enable; + if (audio->running) + audio_dsp_set_adrc(audio); + } + return 0; +} + +static int audio_enable_eq(struct audio *audio, int enable) +{ + if (audio->eq_enable != enable) { + audio->eq_enable = enable; + if (audio->running) + audio_dsp_set_eq(audio); + } + return 0; +} + +static int audio_enable_rx_iir(struct audio *audio, int enable) +{ + if (audio->rx_iir_enable != enable) { + audio->rx_iir_enable = enable; + if (audio->running) + audio_dsp_set_rx_iir(audio); + } + return 0; +} + +static void audio_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->stopped = 0; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->out_bytes); + if (copy_to_user((void*) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(6, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + + LOG(EV_IOCTL, cmd); + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audio_enable(audio); + break; + case AUDIO_STOP: + rc = audio_disable(audio); + audio->stopped = 1; + break; + case AUDIO_FLUSH: + if (audio->stopped) { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the write_lock. + * While audio->stopped write threads will always + * exit immediately. + */ + wake_up(&audio->wait); + mutex_lock(&audio->write_lock); + audio_flush(audio); + mutex_unlock(&audio->write_lock); + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void*) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count == 1) { + config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V; + } else if (config.channel_count == 2) { + config.channel_count= AUDPP_CMD_PCM_INTF_STEREO_V; + } else { + rc = -EINVAL; + break; + } + audio->out_sample_rate = config.sample_rate; + audio->out_channel_mode = config.channel_count; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = 2; + config.sample_rate = audio->out_sample_rate; + if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V) { + config.channel_count = 1; + } else { + config.channel_count = 2; + } + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void*) arg, &config, sizeof(config))) { + rc = -EFAULT; + } else { + rc = 0; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audio_read(struct file *file, char __user *buf, size_t count, loff_t *pos) +{ + return -EINVAL; +} + +static inline int rt_policy(int policy) +{ + if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) + return 1; + return 0; +} + +static inline int task_has_rt_policy(struct task_struct *p) +{ + return rt_policy(p->policy); +} + +static ssize_t audio_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct sched_param s = { .sched_priority = 1 }; + struct audio *audio = file->private_data; + unsigned long flags; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + int old_prio = current->rt_priority; + int old_policy = current->policy; + int cap_nice = cap_raised(current_cap(), CAP_SYS_NICE); + int rc = 0; + + LOG(EV_WRITE, count | (audio->running << 28) | (audio->stopped << 24)); + + /* just for this write, set us real-time */ + if (!task_has_rt_policy(current)) { + struct cred *new = prepare_creds(); + cap_raise(new->cap_effective, CAP_SYS_NICE); + commit_creds(new); + sched_setscheduler(current, SCHED_RR, &s); + } + + mutex_lock(&audio->write_lock); + while (count > 0) { + frame = audio->out + audio->out_head; + + LOG(EV_WAIT_EVENT, 0); + rc = wait_event_interruptible(audio->wait, + (frame->used == 0) || (audio->stopped)); + LOG(EV_WAIT_EVENT, 1); + + if (rc < 0) + break; + if (audio->stopped) { + rc = -EBUSY; + break; + } + xfer = count > frame->size ? frame->size : count; + if (copy_from_user(frame->data, buf, xfer)) { + rc = -EFAULT; + break; + } + frame->used = xfer; + audio->out_head ^= 1; + count -= xfer; + buf += xfer; + + spin_lock_irqsave(&audio->dsp_lock, flags); + LOG(EV_FILL_BUFFER, audio->out_head ^ 1); + frame = audio->out + audio->out_tail; + if (frame->used && audio->out_needed) { + audio_dsp_send_buffer(audio, audio->out_tail, frame->used); + audio->out_tail ^= 1; + audio->out_needed--; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + + mutex_unlock(&audio->write_lock); + + /* restore scheduling policy and priority */ + if (!rt_policy(old_policy)) { + struct sched_param v = { .sched_priority = old_prio }; + sched_setscheduler(current, old_policy, &v); + if (likely(!cap_nice)) { + struct cred *new = prepare_creds(); + cap_lower(new->cap_effective, CAP_SYS_NICE); + commit_creds(new); + sched_setscheduler(current, SCHED_RR, &s); + } + } + + LOG(EV_RETURN,(buf > start) ? (buf - start) : rc); + if (buf > start) + return buf - start; + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + LOG(EV_OPEN, 0); + mutex_lock(&audio->lock); + audio_disable(audio); + audio_flush(audio); + audio->opened = 0; + mutex_unlock(&audio->lock); + htc_pwrsink_set(PWRSINK_AUDIO, 0); + return 0; +} + +struct audio the_audio; + +static int audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_audio; + int rc; + + mutex_lock(&audio->lock); + + if (audio->opened) { + pr_err("audio: busy\n"); + rc = -EBUSY; + goto done; + } + + if (!audio->data) { + audio->data = dma_alloc_coherent(NULL, DMASZ, + &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto done; + } + } + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto done; + + audio->out_buffer_size = BUFSZ; + audio->out_sample_rate = 44100; + audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V; + audio->out_weight = 100; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = BUFSZ; + + audio->out[1].data = audio->data + BUFSZ; + audio->out[1].addr = audio->phys + BUFSZ; + audio->out[1].size = BUFSZ; + + audio->volume = 0x2000; + + audio_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; + LOG(EV_OPEN, 1); +done: + mutex_unlock(&audio->lock); + return rc; +} + +static long audpp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0, enable; + uint16_t enable_mask; + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_ENABLE_AUDPP: + if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) + goto out_fault; + + enable = (enable_mask & ADRC_ENABLE)? 1 : 0; + audio_enable_adrc(audio, enable); + enable = (enable_mask & EQ_ENABLE)? 1 : 0; + audio_enable_eq(audio, enable); + enable = (enable_mask & IIR_ENABLE)? 1 : 0; + audio_enable_rx_iir(audio, enable); + break; + + case AUDIO_SET_ADRC: + if (copy_from_user(&audio->adrc, (void*) arg, sizeof(audio->adrc))) + goto out_fault; + break; + + case AUDIO_SET_EQ: + if (copy_from_user(&audio->eq, (void*) arg, sizeof(audio->eq))) + goto out_fault; + break; + + case AUDIO_SET_RX_IIR: + if (copy_from_user(&audio->iir, (void*) arg, sizeof(audio->iir))) + goto out_fault; + break; + + default: + rc = -EINVAL; + } + + goto out; + + out_fault: + rc = -EFAULT; + out: + mutex_unlock(&audio->lock); + return rc; +} + +static int audpp_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_audio; + + file->private_data = audio; + return 0; +} + +static struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .read = audio_read, + .write = audio_write, + .unlocked_ioctl = audio_ioctl, +}; + +static struct file_operations audpp_fops = { + .owner = THIS_MODULE, + .open = audpp_open, + .unlocked_ioctl = audpp_ioctl, +}; + +struct miscdevice audio_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_out", + .fops = &audio_fops, +}; + +struct miscdevice audpp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_ctl", + .fops = &audpp_fops, +}; + +static int __init audio_init(void) +{ + mutex_init(&the_audio.lock); + mutex_init(&the_audio.write_lock); + spin_lock_init(&the_audio.dsp_lock); + init_waitqueue_head(&the_audio.wait); + wake_lock_init(&the_audio.wakelock, WAKE_LOCK_SUSPEND, "audio_pcm"); + wake_lock_init(&the_audio.idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle"); + return (misc_register(&audio_misc) || misc_register(&audpp_misc)); +} + +device_initcall(audio_init); diff --git a/arch/arm/mach-msm/qdsp5/audio_qcelp.c b/arch/arm/mach-msm/qdsp5/audio_qcelp.c new file mode 100644 index 0000000000000..9571469e4c1b9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audio_qcelp.c @@ -0,0 +1,855 @@ +/* arch/arm/mach-msm/qdsp5/audio_qcelp.c + * + * qcelp 13k audio decoder device + * + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * This code is based in part on audio_mp3.c, which is + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "audmgr.h" +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +#ifdef DEBUG +#define dprintk(format, arg...) \ +printk(KERN_DEBUG format, ## arg) +#else +#define dprintk(format, arg...) do {} while (0) +#endif + +#define BUFSZ 1080 /* QCELP 13K Hold 600ms packet data = 36 * 30 */ +#define BUF_COUNT 2 +#define DMASZ (BUFSZ * BUF_COUNT) + +#define PCM_BUFSZ_MIN 1600 /* 100ms worth of data */ +#define PCM_BUF_MAX_COUNT 5 + +#define AUDDEC_DEC_QCELP 9 + +#define ROUTING_MODE_FTRT 1 +#define ROUTING_MODE_RT 2 +/* Decoder status received from AUDPPTASK */ +#define AUDPP_DEC_STATUS_SLEEP 0 +#define AUDPP_DEC_STATUS_INIT 1 +#define AUDPP_DEC_STATUS_CFG 2 +#define AUDPP_DEC_STATUS_PLAY 3 + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +struct audio { + struct buffer out[BUF_COUNT]; + + spinlock_t dsp_lock; + + uint8_t out_head; + uint8_t out_tail; + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + /* Host PCM section - START */ + struct buffer in[PCM_BUF_MAX_COUNT]; + struct mutex read_lock; + wait_queue_head_t read_wait; /* Wait queue for read */ + char *read_data; /* pointer to reader buffer */ + dma_addr_t read_phys; /* physical address of reader buffer */ + uint8_t read_next; /* index to input buffers to be read next */ + uint8_t fill_next; /* index to buffer that DSP should be filling */ + uint8_t pcm_buf_count; /* number of pcm buffer allocated */ + /* Host PCM section - END */ + + struct msm_adsp_module *audplay; + + struct audmgr audmgr; + + /* data allocated for various buffers */ + char *data; + dma_addr_t phys; + + uint8_t opened:1; + uint8_t enabled:1; + uint8_t running:1; + uint8_t stopped:1; /* set when stopped, cleared on flush */ + uint8_t pcm_feedback:1; /* set when non-tunnel mode */ + uint8_t buf_refresh:1; + + unsigned volume; + + uint16_t dec_id; +}; + +static struct audio the_qcelp_audio; + +static int auddec_dsp_config(struct audio *audio, int enable); +static void audpp_cmd_cfg_adec_params(struct audio *audio); +static void audpp_cmd_cfg_routing_mode(struct audio *audio); +static void audqcelp_send_data(struct audio *audio, unsigned needed); +static void audqcelp_config_hostpcm(struct audio *audio); +static void audqcelp_buffer_refresh(struct audio *audio); +static void audqcelp_dsp_event(void *private, unsigned id, uint16_t *msg); + +/* must be called with audio->lock held */ +static int audqcelp_enable(struct audio *audio) +{ + struct audmgr_config cfg; + int rc; + + dprintk("audqcelp_enable()\n"); + + if (audio->enabled) + return 0; + + audio->out_tail = 0; + audio->out_needed = 0; + + cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; + cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; + cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; + cfg.codec = RPC_AUD_DEF_CODEC_13K; + cfg.snd_method = RPC_SND_METHOD_MIDI; + + rc = audmgr_enable(&audio->audmgr, &cfg); + if (rc < 0) + return rc; + + if (msm_adsp_enable(audio->audplay)) { + pr_err("audio: msm_adsp_enable(audplay) failed\n"); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + + if (audpp_enable(audio->dec_id, audqcelp_dsp_event, audio)) { + pr_err("audio: audpp_enable() failed\n"); + msm_adsp_disable(audio->audplay); + audmgr_disable(&audio->audmgr); + return -ENODEV; + } + audio->enabled = 1; + return 0; +} + +/* must be called with audio->lock held */ +static int audqcelp_disable(struct audio *audio) +{ + dprintk("audqcelp_disable()\n"); + if (audio->enabled) { + audio->enabled = 0; + auddec_dsp_config(audio, 0); + wake_up(&audio->write_wait); + wake_up(&audio->read_wait); + msm_adsp_disable(audio->audplay); + audpp_disable(audio->dec_id, audio); + audmgr_disable(&audio->audmgr); + audio->out_needed = 0; + } + return 0; +} + +/* ------------------- dsp --------------------- */ +static void audqcelp_update_pcm_buf_entry(struct audio *audio, + uint32_t *payload) +{ + uint8_t index; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + for (index = 0; index < payload[1]; index++) { + if (audio->in[audio->fill_next].addr == + payload[2 + index * 2]) { + dprintk("audqcelp_update_pcm_buf_entry: in[%d] ready\n", + audio->fill_next); + audio->in[audio->fill_next].used = + payload[3 + index * 2]; + if ((++audio->fill_next) == audio->pcm_buf_count) + audio->fill_next = 0; + } else { + pr_err( + "audqcelp_update_pcm_buf_entry: expected=%x ret=%x\n", + audio->in[audio->fill_next].addr, + payload[1 + index * 2]); + break; + } + } + if (audio->in[audio->fill_next].used == 0) { + audqcelp_buffer_refresh(audio); + } else { + dprintk("audqcelp_update_pcm_buf_entry: read cannot keep up\n"); + audio->buf_refresh = 1; + } + + spin_unlock_irqrestore(&audio->dsp_lock, flags); + wake_up(&audio->read_wait); +} + +static void audplay_dsp_event(void *data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + struct audio *audio = data; + uint32_t msg[28]; + getevent(msg, sizeof(msg)); + + dprintk("audplay_dsp_event: msg_id=%x\n", id); + + switch (id) { + case AUDPLAY_MSG_DEC_NEEDS_DATA: + audqcelp_send_data(audio, 1); + break; + + case AUDPLAY_MSG_BUFFER_UPDATE: + audqcelp_update_pcm_buf_entry(audio, msg); + break; + + default: + pr_err("unexpected message from decoder \n"); + } +} + +static void audqcelp_dsp_event(void *private, unsigned id, uint16_t *msg) +{ + struct audio *audio = private; + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned status = msg[1]; + + switch (status) { + case AUDPP_DEC_STATUS_SLEEP: + dprintk("decoder status: sleep \n"); + break; + + case AUDPP_DEC_STATUS_INIT: + dprintk("decoder status: init \n"); + audpp_cmd_cfg_routing_mode(audio); + break; + + case AUDPP_DEC_STATUS_CFG: + dprintk("decoder status: cfg \n"); + break; + case AUDPP_DEC_STATUS_PLAY: + dprintk("decoder status: play \n"); + if (audio->pcm_feedback) { + audqcelp_config_hostpcm(audio); + audqcelp_buffer_refresh(audio); + } + break; + default: + pr_err("unknown decoder status \n"); + } + break; + } + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + dprintk("audqcelp_dsp_event: CFG_MSG ENABLE\n"); + auddec_dsp_config(audio, 1); + audio->out_needed = 0; + audio->running = 1; + audpp_set_volume_and_pan(audio->dec_id, audio->volume, + 0); + audpp_avsync(audio->dec_id, 22050); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + dprintk("audqcelp_dsp_event: CFG_MSG DISABLE\n"); + audpp_avsync(audio->dec_id, 0); + audio->running = 0; + } else { + pr_err("audqcelp_dsp_event: CFG_MSG %d?\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + dprintk("audqcelp_dsp_event: ROUTING_ACK mode=%d\n", msg[1]); + audpp_cmd_cfg_adec_params(audio); + break; + default: + pr_err("audqcelp_dsp_event: UNKNOWN (%d)\n", id); + } + +} + +struct msm_adsp_ops audplay_adsp_ops_qcelp = { + .event = audplay_dsp_event, +}; + +#define audplay_send_queue0(audio, cmd, len) \ + msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \ + cmd, len) + +static int auddec_dsp_config(struct audio *audio, int enable) +{ + audpp_cmd_cfg_dec_type cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + if (enable) + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | + AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_QCELP; + else + cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_adec_params(struct audio *audio) +{ + struct audpp_cmd_cfg_adec_params_v13k cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN; + cmd.common.dec_id = audio->dec_id; + cmd.common.input_sampling_frequency = 8000; + cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V; + + audpp_send_queue2(&cmd, sizeof(cmd)); +} + +static void audpp_cmd_cfg_routing_mode(struct audio *audio) +{ + struct audpp_cmd_routing_mode cmd; + dprintk("audpp_cmd_cfg_routing_mode()\n"); + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; + cmd.object_number = audio->dec_id; + if (audio->pcm_feedback) + cmd.routing_mode = ROUTING_MODE_FTRT; + else + cmd.routing_mode = ROUTING_MODE_RT; + audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static int audplay_dsp_send_data_avail(struct audio *audio, + unsigned idx, unsigned len) +{ + audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audio->dec_id; + cmd.buf_ptr = audio->out[idx].addr; + cmd.buf_size = len / 2; + cmd.partition_number = 0; + return audplay_send_queue0(audio, &cmd, sizeof(cmd)); +} + +static void audqcelp_buffer_refresh(struct audio *audio) +{ + struct audplay_cmd_buffer_refresh refresh_cmd; + + refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; + refresh_cmd.num_buffers = 1; + refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; + refresh_cmd.buf0_length = audio->in[audio->fill_next].size; + refresh_cmd.buf_read_count = 0; + dprintk("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n", + refresh_cmd.buf0_address, refresh_cmd.buf0_length); + + (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); +} + +static void audqcelp_config_hostpcm(struct audio *audio) +{ + struct audplay_cmd_hpcm_buf_cfg cfg_cmd; + + dprintk("audqcelp_config_hostpcm()\n"); + cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; + cfg_cmd.max_buffers = audio->pcm_buf_count; + cfg_cmd.byte_swap = 0; + cfg_cmd.hostpcm_config = (0x8000) | (0x4000); + cfg_cmd.feedback_frequency = 1; + cfg_cmd.partition_number = 0; + + (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); +} + +static void audqcelp_send_data(struct audio *audio, unsigned needed) +{ + struct buffer *frame; + unsigned long flags; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (!audio->running) + goto done; + + if (needed) { + /* We were called from the callback because the DSP + * requested more data. Note that the DSP does want + * more data, and if a buffer was in-flight, mark it + * as available (since the DSP must now be done with + * it). + */ + audio->out_needed = 1; + frame = audio->out + audio->out_tail; + if (frame->used == 0xffffffff) { + dprintk("frame %d free\n", audio->out_tail); + frame->used = 0; + audio->out_tail ^= 1; + wake_up(&audio->write_wait); + } + } + + if (audio->out_needed) { + /* If the DSP currently wants data and we have a + * buffer available, we will send it and reset + * the needed flag. We'll mark the buffer as in-flight + * so that it won't be recycled until the next buffer + * is requested + */ + + frame = audio->out + audio->out_tail; + if (frame->used) { + BUG_ON(frame->used == 0xffffffff); + dprintk("frame %d busy\n", audio->out_tail); + audplay_dsp_send_data_avail(audio, audio->out_tail, + frame->used); + frame->used = 0xffffffff; + audio->out_needed = 0; + } + } + done: + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ + +static void audqcelp_flush(struct audio *audio) +{ + audio->out[0].used = 0; + audio->out[1].used = 0; + audio->out_head = 0; + audio->out_tail = 0; + audio->stopped = 0; +} + +static void audqcelp_flush_pcm_buf(struct audio *audio) +{ + uint8_t index; + + for (index = 0; index < PCM_BUF_MAX_COUNT; index++) + audio->in[index].used = 0; + + audio->read_next = 0; + audio->fill_next = 0; +} + +static long audqcelp_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + dprintk("audqcelp_ioctl() cmd = %d\n", cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = audpp_avsync_byte_count(audio->dec_id); + stats.sample_count = audpp_avsync_sample_count(audio->dec_id); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + if (cmd == AUDIO_SET_VOLUME) { + unsigned long flags; + spin_lock_irqsave(&audio->dsp_lock, flags); + audio->volume = arg; + if (audio->running) + audpp_set_volume_and_pan(audio->dec_id, arg, 0); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return 0; + } + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + rc = audqcelp_enable(audio); + break; + case AUDIO_STOP: + rc = audqcelp_disable(audio); + audio->stopped = 1; + break; + case AUDIO_FLUSH: + if (audio->stopped) { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the write_lock. + * While audio->stopped write threads will always + * exit immediately. + */ + wake_up(&audio->write_wait); + mutex_lock(&audio->write_lock); + audqcelp_flush(audio); + mutex_unlock(&audio->write_lock); + wake_up(&audio->read_wait); + mutex_lock(&audio->read_lock); + audqcelp_flush_pcm_buf(audio); + mutex_unlock(&audio->read_lock); + break; + } + break; + case AUDIO_SET_CONFIG: + dprintk("AUDIO_SET_CONFIG not applicable \n"); + break; + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = BUF_COUNT; + config.sample_rate = 8000; + config.channel_count = 1; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + + break; + } + case AUDIO_GET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + + config.pcm_feedback = 0; + config.buffer_count = PCM_BUF_MAX_COUNT; + config.buffer_size = PCM_BUFSZ_MIN; + if (copy_to_user((void *)arg, &config, + sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + case AUDIO_SET_PCM_CONFIG:{ + struct msm_audio_pcm_config config; + + if (copy_from_user(&config, (void *)arg, + sizeof(config))) { + rc = -EFAULT; + break; + } + if ((config.buffer_count > PCM_BUF_MAX_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_MAX_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + /* Check if pcm feedback is required */ + if ((config.pcm_feedback) && (!audio->read_data)) { + dprintk( + "audqcelp_ioctl: allocate PCM buf %d\n", + config.buffer_count * config.buffer_size); + audio->read_data = dma_alloc_coherent(NULL, + config.buffer_size * config.buffer_count, + &audio->read_phys, GFP_KERNEL); + if (!audio->read_data) { + pr_err( + "audqcelp_ioctl: no mem for pcm buf\n" + ); + rc = -ENOMEM; + } else { + uint8_t index; + uint32_t offset = 0; + + audio->pcm_feedback = 1; + audio->buf_refresh = 0; + audio->pcm_buf_count = + config.buffer_count; + audio->read_next = 0; + audio->fill_next = 0; + + for (index = 0; + index < config.buffer_count; index++) { + audio->in[index].data = + audio->read_data + offset; + audio->in[index].addr = + audio->read_phys + offset; + audio->in[index].size = + config.buffer_size; + audio->in[index].used = 0; + offset += config.buffer_size; + } + rc = 0; + } + } else { + rc = 0; + } + break; + } + case AUDIO_PAUSE: + dprintk("%s: AUDIO_PAUSE %ld\n", __func__, arg); + rc = audpp_pause(audio->dec_id, (int) arg); + break; + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static ssize_t audqcelp_read(struct file *file, char __user *buf, size_t count, + loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + int rc = 0; + + if (!audio->pcm_feedback) + return 0; /* PCM feedback is not enabled. Nothing to read */ + + mutex_lock(&audio->read_lock); + dprintk("audqcelp_read() %d \n", count); + while (count > 0) { + rc = wait_event_interruptible(audio->read_wait, + (audio->in[audio->read_next].used > 0) || + (audio->stopped)); + if (rc < 0) + break; + + if (audio->stopped) { + rc = -EBUSY; + break; + } + + if (count < audio->in[audio->read_next].used) { + /* Read must happen in frame boundary. Since driver does + not know frame size, read count must be greater or equal + to size of PCM samples */ + dprintk("audqcelp_read:read stop - partial frame\n"); + break; + } else { + dprintk("audqcelp_read: read from in[%d]\n", + audio->read_next); + if (copy_to_user(buf, + audio->in[audio->read_next].data, + audio->in[audio->read_next].used)) { + pr_err("audqcelp_read: invalid addr %x \n", + (unsigned int)buf); + rc = -EFAULT; + break; + } + count -= audio->in[audio->read_next].used; + buf += audio->in[audio->read_next].used; + audio->in[audio->read_next].used = 0; + if ((++audio->read_next) == audio->pcm_buf_count) + audio->read_next = 0; + } + } + + if (audio->buf_refresh) { + audio->buf_refresh = 0; + dprintk("audqcelp_read: kick start pcm feedback again\n"); + audqcelp_buffer_refresh(audio); + } + + mutex_unlock(&audio->read_lock); + + if (buf > start) + rc = buf - start; + + dprintk("audqcelp_read: read %d bytes\n", rc); + return rc; +} + +static ssize_t audqcelp_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + const char __user *start = buf; + struct buffer *frame; + size_t xfer; + int rc = 0; + + if (count & 1) + return -EINVAL; + dprintk("audqcelp_write() \n"); + mutex_lock(&audio->write_lock); + while (count > 0) { + frame = audio->out + audio->out_head; + rc = wait_event_interruptible(audio->write_wait, + (frame->used == 0) + || (audio->stopped)); + dprintk("audqcelp_write() buffer available\n"); + if (rc < 0) + break; + if (audio->stopped) { + rc = -EBUSY; + break; + } + xfer = (count > frame->size) ? frame->size : count; + if (copy_from_user(frame->data, buf, xfer)) { + rc = -EFAULT; + break; + } + + frame->used = xfer; + audio->out_head ^= 1; + count -= xfer; + buf += xfer; + + audqcelp_send_data(audio, 0); + + } + mutex_unlock(&audio->write_lock); + if (buf > start) + return buf - start; + return rc; +} + +static int audqcelp_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + dprintk("audqcelp_release()\n"); + + mutex_lock(&audio->lock); + audqcelp_disable(audio); + audqcelp_flush(audio); + audqcelp_flush_pcm_buf(audio); + msm_adsp_put(audio->audplay); + audio->audplay = NULL; + audio->opened = 0; + if (audio->data) + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + audio->data = NULL; + if (audio->read_data) { + dma_free_coherent(NULL, + audio->in[0].size * audio->pcm_buf_count, + audio->read_data, audio->read_phys); + audio->read_data = NULL; + } + audio->pcm_feedback = 0; + mutex_unlock(&audio->lock); + return 0; +} + +static int audqcelp_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &the_qcelp_audio; + int rc; + + mutex_lock(&audio->lock); + + if (audio->opened) { + pr_err("audio: busy\n"); + rc = -EBUSY; + goto done; + } + + audio->data = dma_alloc_coherent(NULL, DMASZ, + &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + rc = -ENOMEM; + goto done; + } + + rc = audmgr_open(&audio->audmgr); + if (rc) + goto err; + + rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay, + &audplay_adsp_ops_qcelp, audio); + if (rc) { + pr_err("audio: failed to get audplay0 dsp module\n"); + audmgr_close(&audio->audmgr); + goto err; + } + + audio->dec_id = 0; + + audio->out[0].data = audio->data + 0; + audio->out[0].addr = audio->phys + 0; + audio->out[0].size = BUFSZ; + + audio->out[1].data = audio->data + BUFSZ; + audio->out[1].addr = audio->phys + BUFSZ; + audio->out[1].size = BUFSZ; + + audio->volume = 0x2000; /* Q13 1.0 */ + + audqcelp_flush(audio); + + file->private_data = audio; + audio->opened = 1; + rc = 0; +done: + mutex_unlock(&audio->lock); + return rc; +err: + dma_free_coherent(NULL, DMASZ, audio->data, audio->phys); + mutex_unlock(&audio->lock); + return rc; +} + +static struct file_operations audio_qcelp_fops = { + .owner = THIS_MODULE, + .open = audqcelp_open, + .release = audqcelp_release, + .read = audqcelp_read, + .write = audqcelp_write, + .unlocked_ioctl = audqcelp_ioctl, +}; + +struct miscdevice audio_qcelp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_qcelp", + .fops = &audio_qcelp_fops, +}; + +static int __init audqcelp_init(void) +{ + mutex_init(&the_qcelp_audio.lock); + mutex_init(&the_qcelp_audio.write_lock); + mutex_init(&the_qcelp_audio.read_lock); + spin_lock_init(&the_qcelp_audio.dsp_lock); + init_waitqueue_head(&the_qcelp_audio.write_wait); + init_waitqueue_head(&the_qcelp_audio.read_wait); + the_qcelp_audio.read_data = NULL; + return misc_register(&audio_qcelp_misc); +} + +static void __exit audqcelp_exit(void) +{ + misc_deregister(&audio_qcelp_misc); +} + +module_init(audqcelp_init); +module_exit(audqcelp_exit); + +MODULE_DESCRIPTION("MSM QCELP 13K driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("QUALCOMM"); diff --git a/arch/arm/mach-msm/qdsp5/audmgr.c b/arch/arm/mach-msm/qdsp5/audmgr.c new file mode 100644 index 0000000000000..b7d8158dab24e --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audmgr.c @@ -0,0 +1,314 @@ +/* arch/arm/mach-msm/qdsp5/audmgr.c + * + * interface to "audmgr" service on the baseband cpu + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "audmgr.h" + +#define STATE_CLOSED 0 +#define STATE_DISABLED 1 +#define STATE_ENABLING 2 +#define STATE_ENABLED 3 +#define STATE_DISABLING 4 +#define STATE_ERROR 5 + +static void rpc_ack(struct msm_rpc_endpoint *ept, uint32_t xid) +{ + uint32_t rep[6]; + + rep[0] = cpu_to_be32(xid); + rep[1] = cpu_to_be32(1); + rep[2] = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED); + rep[3] = cpu_to_be32(RPC_ACCEPTSTAT_SUCCESS); + rep[4] = 0; + rep[5] = 0; + + msm_rpc_write(ept, rep, sizeof(rep)); +} + +static void process_audmgr_callback(struct audmgr *am, + struct rpc_audmgr_cb_func_ptr *args, + int len) +{ + if (len < (sizeof(uint32_t) * 3)) + return; + if (be32_to_cpu(args->set_to_one) != 1) + return; + + switch (be32_to_cpu(args->status)) { + case RPC_AUDMGR_STATUS_READY: + if (len < sizeof(uint32_t) * 4) + break; + am->handle = be32_to_cpu(args->u.handle); + pr_info("audmgr: rpc READY handle=0x%08x\n", am->handle); + break; + case RPC_AUDMGR_STATUS_CODEC_CONFIG: { + uint32_t volume; + if (len < sizeof(uint32_t) * 4) + break; + volume = be32_to_cpu(args->u.volume); + pr_info("audmgr: rpc CODEC_CONFIG volume=0x%08x\n", volume); + am->state = STATE_ENABLED; + wake_up(&am->wait); + break; + } + case RPC_AUDMGR_STATUS_PENDING: + pr_err("audmgr: PENDING?\n"); + break; + case RPC_AUDMGR_STATUS_SUSPEND: + pr_err("audmgr: SUSPEND?\n"); + break; + case RPC_AUDMGR_STATUS_FAILURE: + pr_err("audmgr: FAILURE\n"); + break; + case RPC_AUDMGR_STATUS_VOLUME_CHANGE: + pr_err("audmgr: VOLUME_CHANGE?\n"); + break; + case RPC_AUDMGR_STATUS_DISABLED: + pr_err("audmgr: DISABLED\n"); + am->state = STATE_DISABLED; + wake_up(&am->wait); + break; + case RPC_AUDMGR_STATUS_ERROR: + pr_err("audmgr: ERROR?\n"); + am->state = STATE_ERROR; + wake_up(&am->wait); + break; + default: + break; + } +} + +static void process_rpc_request(uint32_t proc, uint32_t xid, + void *data, int len, void *private) +{ + struct audmgr *am = private; + uint32_t *x = data; + + if (0) { + int n = len / 4; + pr_info("rpc_call proc %d:", proc); + while (n--) + printk(" %08x", be32_to_cpu(*x++)); + printk("\n"); + } + + if (proc == AUDMGR_CB_FUNC_PTR) + process_audmgr_callback(am, data, len); + else + pr_err("audmgr: unknown rpc proc %d\n", proc); + rpc_ack(am->ept, xid); +} + +#define RPC_TYPE_REQUEST 0 +#define RPC_TYPE_REPLY 1 + +#define RPC_VERSION 2 + +#define RPC_COMMON_HDR_SZ (sizeof(uint32_t) * 2) +#define RPC_REQUEST_HDR_SZ (sizeof(struct rpc_request_hdr)) +#define RPC_REPLY_HDR_SZ (sizeof(uint32_t) * 3) +#define RPC_REPLY_SZ (sizeof(uint32_t) * 6) + +static int audmgr_rpc_thread(void *data) +{ + struct audmgr *am = data; + struct rpc_request_hdr *hdr = NULL; + uint32_t type; + int len; + + pr_info("audmgr_rpc_thread() start\n"); + + while (!kthread_should_stop()) { + if (hdr) { + kfree(hdr); + hdr = NULL; + } + len = msm_rpc_read(am->ept, (void **) &hdr, -1, -1); + if (len < 0) { + pr_err("audmgr: rpc read failed (%d)\n", len); + break; + } + if (len < RPC_COMMON_HDR_SZ) + continue; + + type = be32_to_cpu(hdr->type); + if (type == RPC_TYPE_REPLY) { + struct rpc_reply_hdr *rep = (void *) hdr; + uint32_t status; + if (len < RPC_REPLY_HDR_SZ) + continue; + status = be32_to_cpu(rep->reply_stat); + if (status == RPCMSG_REPLYSTAT_ACCEPTED) { + status = be32_to_cpu(rep->data.acc_hdr.accept_stat); + pr_info("audmgr: rpc_reply status %d\n", status); + } else { + pr_info("audmgr: rpc_reply denied!\n"); + } + /* process reply */ + continue; + } + + if (len < RPC_REQUEST_HDR_SZ) + continue; + + process_rpc_request(be32_to_cpu(hdr->procedure), + be32_to_cpu(hdr->xid), + (void *) (hdr + 1), + len - sizeof(*hdr), + data); + } + pr_info("audmgr_rpc_thread() exit\n"); + if (hdr) { + kfree(hdr); + hdr = NULL; + } + am->task = NULL; + wake_up(&am->wait); + return 0; +} + +struct audmgr_enable_msg { + struct rpc_request_hdr hdr; + struct rpc_audmgr_enable_client_args args; +}; + +struct audmgr_disable_msg { + struct rpc_request_hdr hdr; + uint32_t handle; +}; + +int audmgr_open(struct audmgr *am) +{ + int rc; + + if (am->state != STATE_CLOSED) + return 0; + + am->ept = msm_rpc_connect(AUDMGR_PROG, + AUDMGR_VERS, + MSM_RPC_UNINTERRUPTIBLE | MSM_RPC_ENABLE_RECEIVE); + + init_waitqueue_head(&am->wait); + + if (IS_ERR(am->ept)) { + rc = PTR_ERR(am->ept); + am->ept = NULL; + pr_err("audmgr: failed to connect to audmgr svc\n"); + return rc; + } + + am->task = kthread_run(audmgr_rpc_thread, am, "audmgr_rpc"); + if (IS_ERR(am->task)) { + rc = PTR_ERR(am->task); + am->task = NULL; + msm_rpc_close(am->ept); + am->ept = NULL; + return rc; + } + + am->state = STATE_DISABLED; + return 0; +} +EXPORT_SYMBOL(audmgr_open); + +int audmgr_close(struct audmgr *am) +{ + return -EBUSY; +} +EXPORT_SYMBOL(audmgr_close); + +int audmgr_enable(struct audmgr *am, struct audmgr_config *cfg) +{ + struct audmgr_enable_msg msg; + int rc; + + if (am->state == STATE_ENABLED) + return 0; + + if (am->state == STATE_DISABLING) + pr_err("audmgr: state is DISABLING in enable?\n"); + am->state = STATE_ENABLING; + + msg.args.set_to_one = cpu_to_be32(1); + msg.args.tx_sample_rate = cpu_to_be32(cfg->tx_rate); + msg.args.rx_sample_rate = cpu_to_be32(cfg->rx_rate); + msg.args.def_method = cpu_to_be32(cfg->def_method); + msg.args.codec_type = cpu_to_be32(cfg->codec); + msg.args.snd_method = cpu_to_be32(cfg->snd_method); + msg.args.cb_func = cpu_to_be32(0x11111111); + msg.args.client_data = cpu_to_be32(0x11223344); + + msm_rpc_setup_req(&msg.hdr, AUDMGR_PROG, msm_rpc_get_vers(am->ept), + AUDMGR_ENABLE_CLIENT); + + rc = msm_rpc_write(am->ept, &msg, sizeof(msg)); + if (rc < 0) + return rc; + + rc = wait_event_timeout(am->wait, am->state != STATE_ENABLING, 15 * HZ); + if (rc == 0) { + pr_err("audmgr_enable: ARM9 did not reply to RPC am->state = %d\n", am->state); + BUG(); + } + if (am->state == STATE_ENABLED) + return 0; + + pr_err("audmgr: unexpected state %d while enabling?!\n", am->state); + return -ENODEV; +} +EXPORT_SYMBOL(audmgr_enable); + +int audmgr_disable(struct audmgr *am) +{ + struct audmgr_disable_msg msg; + int rc; + + if (am->state == STATE_DISABLED) + return 0; + + msm_rpc_setup_req(&msg.hdr, AUDMGR_PROG, msm_rpc_get_vers(am->ept), + AUDMGR_DISABLE_CLIENT); + msg.handle = cpu_to_be32(am->handle); + + am->state = STATE_DISABLING; + + rc = msm_rpc_write(am->ept, &msg, sizeof(msg)); + if (rc < 0) + return rc; + + rc = wait_event_timeout(am->wait, am->state != STATE_DISABLING, 15 * HZ); + if (rc == 0) { + pr_err("audmgr_disable: ARM9 did not reply to RPC am->state = %d\n", am->state); + BUG(); + } + + if (am->state == STATE_DISABLED) + return 0; + + pr_err("audmgr: unexpected state %d while disabling?!\n", am->state); + return -ENODEV; +} +EXPORT_SYMBOL(audmgr_disable); diff --git a/arch/arm/mach-msm/qdsp5/audmgr.h b/arch/arm/mach-msm/qdsp5/audmgr.h new file mode 100644 index 0000000000000..cd2d0c6d7d411 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audmgr.h @@ -0,0 +1,215 @@ +/* arch/arm/mach-msm/qdsp5/audmgr.h + * + * Copyright 2008 (c) QUALCOMM Incorporated. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_AUDMGR_H +#define _ARCH_ARM_MACH_MSM_AUDMGR_H + +#if CONFIG_MSM_AMSS_VERSION==6350 +#include "audmgr_new.h" +#else + +enum rpc_aud_def_sample_rate_type { + RPC_AUD_DEF_SAMPLE_RATE_NONE, + RPC_AUD_DEF_SAMPLE_RATE_8000, + RPC_AUD_DEF_SAMPLE_RATE_11025, + RPC_AUD_DEF_SAMPLE_RATE_12000, + RPC_AUD_DEF_SAMPLE_RATE_16000, + RPC_AUD_DEF_SAMPLE_RATE_22050, + RPC_AUD_DEF_SAMPLE_RATE_24000, + RPC_AUD_DEF_SAMPLE_RATE_32000, + RPC_AUD_DEF_SAMPLE_RATE_44100, + RPC_AUD_DEF_SAMPLE_RATE_48000, + RPC_AUD_DEF_SAMPLE_RATE_MAX, +}; + +enum rpc_aud_def_method_type { + RPC_AUD_DEF_METHOD_NONE, + RPC_AUD_DEF_METHOD_KEY_BEEP, + RPC_AUD_DEF_METHOD_PLAYBACK, + RPC_AUD_DEF_METHOD_VOICE, + RPC_AUD_DEF_METHOD_RECORD, + RPC_AUD_DEF_METHOD_HOST_PCM, + RPC_AUD_DEF_METHOD_MIDI_OUT, + RPC_AUD_DEF_METHOD_RECORD_SBC, + RPC_AUD_DEF_METHOD_DTMF_RINGER, + RPC_AUD_DEF_METHOD_MAX, +}; + +enum rpc_aud_def_codec_type { + RPC_AUD_DEF_CODEC_NONE, + RPC_AUD_DEF_CODEC_DTMF, + RPC_AUD_DEF_CODEC_MIDI, + RPC_AUD_DEF_CODEC_MP3, + RPC_AUD_DEF_CODEC_PCM, + RPC_AUD_DEF_CODEC_AAC, + RPC_AUD_DEF_CODEC_WMA, + RPC_AUD_DEF_CODEC_RA, + RPC_AUD_DEF_CODEC_ADPCM, + RPC_AUD_DEF_CODEC_GAUDIO, + RPC_AUD_DEF_CODEC_VOC_EVRC, + RPC_AUD_DEF_CODEC_VOC_13K, + RPC_AUD_DEF_CODEC_VOC_4GV_NB, + RPC_AUD_DEF_CODEC_VOC_AMR, + RPC_AUD_DEF_CODEC_VOC_EFR, + RPC_AUD_DEF_CODEC_VOC_FR, + RPC_AUD_DEF_CODEC_VOC_HR, + RPC_AUD_DEF_CODEC_VOC, + RPC_AUD_DEF_CODEC_SBC, + RPC_AUD_DEF_CODEC_VOC_PCM, + RPC_AUD_DEF_CODEC_AMR_WB, + RPC_AUD_DEF_CODEC_AMR_WB_PLUS, + RPC_AUD_DEF_CODEC_MAX, +}; + +enum rpc_snd_method_type { + RPC_SND_METHOD_VOICE = 0, + RPC_SND_METHOD_KEY_BEEP, + RPC_SND_METHOD_MESSAGE, + RPC_SND_METHOD_RING, + RPC_SND_METHOD_MIDI, + RPC_SND_METHOD_AUX, + RPC_SND_METHOD_MAX, +}; + +enum rpc_voc_codec_type { + RPC_VOC_CODEC_DEFAULT, + RPC_VOC_CODEC_ON_CHIP_0 = RPC_VOC_CODEC_DEFAULT, + RPC_VOC_CODEC_ON_CHIP_1, + RPC_VOC_CODEC_STEREO_HEADSET, + RPC_VOC_CODEC_ON_CHIP_AUX, + RPC_VOC_CODEC_BT_OFF_BOARD, + RPC_VOC_CODEC_BT_A2DP, + RPC_VOC_CODEC_OFF_BOARD, + RPC_VOC_CODEC_SDAC, + RPC_VOC_CODEC_RX_EXT_SDAC_TX_INTERNAL, + RPC_VOC_CODEC_IN_STEREO_SADC_OUT_MONO_HANDSET, + RPC_VOC_CODEC_IN_STEREO_SADC_OUT_STEREO_HEADSET, + RPC_VOC_CODEC_TX_INT_SADC_RX_EXT_AUXPCM, + RPC_VOC_CODEC_EXT_STEREO_SADC_OUT_MONO_HANDSET, + RPC_VOC_CODEC_EXT_STEREO_SADC_OUT_STEREO_HEADSET, + RPC_VOC_CODEC_TTY_ON_CHIP_1, + RPC_VOC_CODEC_TTY_OFF_BOARD, + RPC_VOC_CODEC_TTY_VCO, + RPC_VOC_CODEC_TTY_HCO, + RPC_VOC_CODEC_ON_CHIP_0_DUAL_MIC, + RPC_VOC_CODEC_MAX, + RPC_VOC_CODEC_NONE, +}; + +enum rpc_audmgr_status_type { + RPC_AUDMGR_STATUS_READY, + RPC_AUDMGR_STATUS_CODEC_CONFIG, + RPC_AUDMGR_STATUS_PENDING, + RPC_AUDMGR_STATUS_SUSPEND, + RPC_AUDMGR_STATUS_FAILURE, + RPC_AUDMGR_STATUS_VOLUME_CHANGE, + RPC_AUDMGR_STATUS_DISABLED, + RPC_AUDMGR_STATUS_ERROR, +}; + +struct rpc_audmgr_enable_client_args { + uint32_t set_to_one; + uint32_t tx_sample_rate; + uint32_t rx_sample_rate; + uint32_t def_method; + uint32_t codec_type; + uint32_t snd_method; + + uint32_t cb_func; + uint32_t client_data; +}; + +#define AUDMGR_ENABLE_CLIENT 2 +#define AUDMGR_DISABLE_CLIENT 3 +#define AUDMGR_SUSPEND_EVENT_RSP 4 +#define AUDMGR_REGISTER_OPERATION_LISTENER 5 +#define AUDMGR_UNREGISTER_OPERATION_LISTENER 6 +#define AUDMGR_REGISTER_CODEC_LISTENER 7 +#define AUDMGR_GET_RX_SAMPLE_RATE 8 +#define AUDMGR_GET_TX_SAMPLE_RATE 9 +#define AUDMGR_SET_DEVICE_MODE 10 + +#if CONFIG_MSM_AMSS_VERSION < 6220 +#define AUDMGR_PROG_VERS "rs30000013:46255756" +#define AUDMGR_PROG 0x30000013 +#define AUDMGR_VERS 0x46255756 +#else +#define AUDMGR_PROG_VERS "rs30000013:e94e8f0c" +#define AUDMGR_PROG 0x30000013 +#define AUDMGR_VERS 0xe94e8f0c +#endif + +struct rpc_audmgr_cb_func_ptr { + uint32_t cb_id; + uint32_t set_to_one; + uint32_t status; + union { + uint32_t handle; + uint32_t volume; + + } u; +}; + +#define AUDMGR_CB_FUNC_PTR 1 +#define AUDMGR_OPR_LSTNR_CB_FUNC_PTR 2 +#define AUDMGR_CODEC_LSTR_FUNC_PTR 3 + +#if CONFIG_MSM_AMSS_VERSION < 6220 +#define AUDMGR_CB_PROG 0x31000013 +#define AUDMGR_CB_VERS 0x5fa922a9 +#else +#define AUDMGR_CB_PROG 0x31000013 +#define AUDMGR_CB_VERS 0x21570ba7 +#endif + +struct audmgr { + wait_queue_head_t wait; + uint32_t handle; + struct msm_rpc_endpoint *ept; + struct task_struct *task; + int state; +}; + +struct audmgr_config { + uint32_t tx_rate; + uint32_t rx_rate; + uint32_t def_method; + uint32_t codec; + uint32_t snd_method; +}; + +int audmgr_open(struct audmgr *am); +int audmgr_close(struct audmgr *am); +int audmgr_enable(struct audmgr *am, struct audmgr_config *cfg); +int audmgr_disable(struct audmgr *am); + +typedef void (*audpp_event_func)(void *private, unsigned id, uint16_t *msg); + +int audpp_enable(int id, audpp_event_func func, void *private); +void audpp_disable(int id, void *private); + +int audpp_send_queue1(void *cmd, unsigned len); +int audpp_send_queue2(void *cmd, unsigned len); +int audpp_send_queue3(void *cmd, unsigned len); + +int audpp_pause(unsigned id, int pause); +int audpp_set_volume_and_pan(unsigned id, unsigned volume, int pan); +void audpp_avsync(int id, unsigned rate); +unsigned audpp_avsync_sample_count(int id); +unsigned audpp_avsync_byte_count(int id); + +#endif +#endif diff --git a/arch/arm/mach-msm/qdsp5/audmgr_new.h b/arch/arm/mach-msm/qdsp5/audmgr_new.h new file mode 100644 index 0000000000000..43812424ffef7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audmgr_new.h @@ -0,0 +1,213 @@ +/* arch/arm/mach-msm/qdsp5/audmgr.h + * + * Copyright 2008 (c) QUALCOMM Incorporated. + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_AUDMGR_NEW_H +#define _ARCH_ARM_MACH_MSM_AUDMGR_NEW_H + +enum rpc_aud_def_sample_rate_type { + RPC_AUD_DEF_SAMPLE_RATE_NONE, + RPC_AUD_DEF_SAMPLE_RATE_8000, + RPC_AUD_DEF_SAMPLE_RATE_11025, + RPC_AUD_DEF_SAMPLE_RATE_12000, + RPC_AUD_DEF_SAMPLE_RATE_16000, + RPC_AUD_DEF_SAMPLE_RATE_22050, + RPC_AUD_DEF_SAMPLE_RATE_24000, + RPC_AUD_DEF_SAMPLE_RATE_32000, + RPC_AUD_DEF_SAMPLE_RATE_44100, + RPC_AUD_DEF_SAMPLE_RATE_48000, + RPC_AUD_DEF_SAMPLE_RATE_MAX, +}; + +enum rpc_aud_def_method_type { + RPC_AUD_DEF_METHOD_NONE, + RPC_AUD_DEF_METHOD_KEY_BEEP, + RPC_AUD_DEF_METHOD_PLAYBACK, + RPC_AUD_DEF_METHOD_VOICE, + RPC_AUD_DEF_METHOD_RECORD, + RPC_AUD_DEF_METHOD_HOST_PCM, + RPC_AUD_DEF_METHOD_MIDI_OUT, + RPC_AUD_DEF_METHOD_RECORD_SBC, + RPC_AUD_DEF_METHOD_DTMF_RINGER, + RPC_AUD_DEF_METHOD_MAX, +}; + +enum rpc_aud_def_codec_type { + RPC_AUD_DEF_CODEC_NONE, + RPC_AUD_DEF_CODEC_DTMF, + RPC_AUD_DEF_CODEC_MIDI, + RPC_AUD_DEF_CODEC_MP3, + RPC_AUD_DEF_CODEC_PCM, + RPC_AUD_DEF_CODEC_AAC, + RPC_AUD_DEF_CODEC_WMA, + RPC_AUD_DEF_CODEC_RA, + RPC_AUD_DEF_CODEC_ADPCM, + RPC_AUD_DEF_CODEC_GAUDIO, + RPC_AUD_DEF_CODEC_VOC_EVRC, + RPC_AUD_DEF_CODEC_VOC_13K, + RPC_AUD_DEF_CODEC_VOC_4GV_NB, + RPC_AUD_DEF_CODEC_VOC_AMR, + RPC_AUD_DEF_CODEC_VOC_EFR, + RPC_AUD_DEF_CODEC_VOC_FR, + RPC_AUD_DEF_CODEC_VOC_HR, + RPC_AUD_DEF_CODEC_VOC_CDMA, + RPC_AUD_DEF_CODEC_VOC_CDMA_WB, + RPC_AUD_DEF_CODEC_VOC_UMTS, + RPC_AUD_DEF_CODEC_VOC_UMTS_WB, + RPC_AUD_DEF_CODEC_SBC, + RPC_AUD_DEF_CODEC_VOC_PCM, + RPC_AUD_DEF_CODEC_AMR_WB, + RPC_AUD_DEF_CODEC_AMR_WB_PLUS, + RPC_AUD_DEF_CODEC_AAC_BSAC, + RPC_AUD_DEF_CODEC_MAX, + RPC_AUD_DEF_CODEC_AMR_NB, + RPC_AUD_DEF_CODEC_13K, + RPC_AUD_DEF_CODEC_EVRC, + RPC_AUD_DEF_CODEC_MAX_002, +}; + +enum rpc_snd_method_type { + RPC_SND_METHOD_VOICE = 0, + RPC_SND_METHOD_KEY_BEEP, + RPC_SND_METHOD_MESSAGE, + RPC_SND_METHOD_RING, + RPC_SND_METHOD_MIDI, + RPC_SND_METHOD_AUX, + RPC_SND_METHOD_MAX, +}; + +enum rpc_voc_codec_type { + RPC_VOC_CODEC_DEFAULT, + RPC_VOC_CODEC_ON_CHIP_0 = RPC_VOC_CODEC_DEFAULT, + RPC_VOC_CODEC_ON_CHIP_1, + RPC_VOC_CODEC_STEREO_HEADSET, + RPC_VOC_CODEC_ON_CHIP_AUX, + RPC_VOC_CODEC_BT_OFF_BOARD, + RPC_VOC_CODEC_BT_A2DP, + RPC_VOC_CODEC_OFF_BOARD, + RPC_VOC_CODEC_SDAC, + RPC_VOC_CODEC_RX_EXT_SDAC_TX_INTERNAL, + RPC_VOC_CODEC_IN_STEREO_SADC_OUT_MONO_HANDSET, + RPC_VOC_CODEC_IN_STEREO_SADC_OUT_STEREO_HEADSET, + RPC_VOC_CODEC_TX_INT_SADC_RX_EXT_AUXPCM, + RPC_VOC_CODEC_EXT_STEREO_SADC_OUT_MONO_HANDSET, + RPC_VOC_CODEC_EXT_STEREO_SADC_OUT_STEREO_HEADSET, + RPC_VOC_CODEC_TTY_ON_CHIP_1, + RPC_VOC_CODEC_TTY_OFF_BOARD, + RPC_VOC_CODEC_TTY_VCO, + RPC_VOC_CODEC_TTY_HCO, + RPC_VOC_CODEC_ON_CHIP_0_DUAL_MIC, + RPC_VOC_CODEC_MAX, + RPC_VOC_CODEC_NONE, +}; + +enum rpc_audmgr_status_type { + RPC_AUDMGR_STATUS_READY, + RPC_AUDMGR_STATUS_CODEC_CONFIG, + RPC_AUDMGR_STATUS_PENDING, + RPC_AUDMGR_STATUS_SUSPEND, + RPC_AUDMGR_STATUS_FAILURE, + RPC_AUDMGR_STATUS_VOLUME_CHANGE, + RPC_AUDMGR_STATUS_DISABLED, + RPC_AUDMGR_STATUS_ERROR, +}; + +struct rpc_audmgr_enable_client_args { + uint32_t set_to_one; + uint32_t tx_sample_rate; + uint32_t rx_sample_rate; + uint32_t def_method; + uint32_t codec_type; + uint32_t snd_method; + + uint32_t cb_func; + uint32_t client_data; +}; + +#define AUDMGR_ENABLE_CLIENT 2 +#define AUDMGR_DISABLE_CLIENT 3 +#define AUDMGR_SUSPEND_EVENT_RSP 4 +#define AUDMGR_REGISTER_OPERATION_LISTENER 5 +#define AUDMGR_UNREGISTER_OPERATION_LISTENER 6 +#define AUDMGR_REGISTER_CODEC_LISTENER 7 +#define AUDMGR_GET_RX_SAMPLE_RATE 8 +#define AUDMGR_GET_TX_SAMPLE_RATE 9 +#define AUDMGR_SET_DEVICE_MODE 10 + +#define AUDMGR_PROG 0x30000013 +#define AUDMGR_VERS MSM_RPC_VERS(1,0) + +struct rpc_audmgr_cb_func_ptr { + uint32_t cb_id; + uint32_t status; /* Audmgr status */ + uint32_t set_to_one; /* Pointer status (1 = valid, 0 = invalid) */ + uint32_t disc; + /* disc = AUDMGR_STATUS_READY => data=handle + disc = AUDMGR_STATUS_CODEC_CONFIG => data = handle + disc = AUDMGR_STATUS_DISABLED => data =status_disabled + disc = AUDMGR_STATUS_VOLUME_CHANGE => data = volume-change */ + union { + uint32_t handle; + uint32_t volume; + uint32_t status_disabled; + uint32_t volume_change; + } u; +}; + +#define AUDMGR_CB_FUNC_PTR 1 +#define AUDMGR_OPR_LSTNR_CB_FUNC_PTR 2 +#define AUDMGR_CODEC_LSTR_FUNC_PTR 3 + +#define AUDMGR_CB_PROG 0x31000013 +#define AUDMGR_CB_VERS 0xf8e3e2d9 + +struct audmgr { + wait_queue_head_t wait; + uint32_t handle; + struct msm_rpc_endpoint *ept; + struct task_struct *task; + int state; +}; + +struct audmgr_config { + uint32_t tx_rate; + uint32_t rx_rate; + uint32_t def_method; + uint32_t codec; + uint32_t snd_method; +}; + +int audmgr_open(struct audmgr *am); +int audmgr_close(struct audmgr *am); +int audmgr_enable(struct audmgr *am, struct audmgr_config *cfg); +int audmgr_disable(struct audmgr *am); + +typedef void (*audpp_event_func)(void *private, unsigned id, uint16_t *msg); + +int audpp_enable(int id, audpp_event_func func, void *private); +void audpp_disable(int id, void *private); + +int audpp_send_queue1(void *cmd, unsigned len); +int audpp_send_queue2(void *cmd, unsigned len); +int audpp_send_queue3(void *cmd, unsigned len); + +int audpp_set_volume_and_pan(unsigned id, unsigned volume, int pan); +int audpp_pause(unsigned id, int pause); +int audpp_flush(unsigned id); +void audpp_avsync(int id, unsigned rate); +unsigned audpp_avsync_sample_count(int id); +unsigned audpp_avsync_byte_count(int id); + +#endif diff --git a/arch/arm/mach-msm/qdsp5/audpp.c b/arch/arm/mach-msm/qdsp5/audpp.c new file mode 100644 index 0000000000000..32c284716475e --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/audpp.c @@ -0,0 +1,429 @@ + +/* arch/arm/mach-msm/qdsp5/audpp.c + * + * common code to deal with the AUDPP dsp task (audio postproc) + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include "audmgr.h" + +#include +#include + +/* for queue ids - should be relative to module number*/ +#include "adsp.h" + +#include "evlog.h" + + +enum { + EV_NULL, + EV_ENABLE, + EV_DISABLE, + EV_EVENT, + EV_DATA, +}; + +static const char *dsp_log_strings[] = { + "NULL", + "ENABLE", + "DISABLE", + "EVENT", + "DATA", +}; + +DECLARE_LOG(dsp_log, 64, dsp_log_strings); + +static int __init _dsp_log_init(void) +{ + return ev_log_init(&dsp_log); +} +module_init(_dsp_log_init); +#define LOG(id,arg) ev_log_write(&dsp_log, id, arg) + +static DEFINE_MUTEX(audpp_lock); + +#define CH_COUNT 5 +#define AUDPP_CLNT_MAX_COUNT 6 +#define AUDPP_AVSYNC_INFO_SIZE 7 + +struct audpp_state { + struct msm_adsp_module *mod; + audpp_event_func func[AUDPP_CLNT_MAX_COUNT]; + void *private[AUDPP_CLNT_MAX_COUNT]; + struct mutex *lock; + unsigned open_count; + unsigned enabled; + + /* which channels are actually enabled */ + unsigned avsync_mask; + + /* flags, 48 bits sample/bytes counter per channel */ + uint16_t avsync[CH_COUNT * AUDPP_CLNT_MAX_COUNT + 1]; +}; + +struct audpp_state the_audpp_state = { + .lock = &audpp_lock, +}; + +int audpp_send_queue1(void *cmd, unsigned len) +{ + return msm_adsp_write(the_audpp_state.mod, + QDSP_uPAudPPCmd1Queue, cmd, len); +} +EXPORT_SYMBOL(audpp_send_queue1); + +int audpp_send_queue2(void *cmd, unsigned len) +{ + return msm_adsp_write(the_audpp_state.mod, + QDSP_uPAudPPCmd2Queue, cmd, len); +} +EXPORT_SYMBOL(audpp_send_queue2); + +int audpp_send_queue3(void *cmd, unsigned len) +{ + return msm_adsp_write(the_audpp_state.mod, + QDSP_uPAudPPCmd3Queue, cmd, len); +} +EXPORT_SYMBOL(audpp_send_queue3); + +static int audpp_dsp_config(int enable) +{ + audpp_cmd_cfg cmd; + + cmd.cmd_id = AUDPP_CMD_CFG; + cmd.cfg = enable ? AUDPP_CMD_CFG_ENABLE : AUDPP_CMD_CFG_SLEEP; + + return audpp_send_queue1(&cmd, sizeof(cmd)); +} + +static void audpp_broadcast(struct audpp_state *audpp, unsigned id, + uint16_t *msg) +{ + unsigned n; + for (n = 0; n < AUDPP_CLNT_MAX_COUNT; n++) { + if (audpp->func[n]) + audpp->func[n] (audpp->private[n], id, msg); + } +} + +static void audpp_notify_clnt(struct audpp_state *audpp, unsigned clnt_id, + unsigned id, uint16_t *msg) +{ + if (clnt_id < AUDPP_CLNT_MAX_COUNT && audpp->func[clnt_id]) + audpp->func[clnt_id] (audpp->private[clnt_id], id, msg); +} + +static void audpp_dsp_event(void *data, unsigned id, size_t len, + void (*getevent)(void *ptr, size_t len)) +{ + struct audpp_state *audpp = data; + uint16_t msg[8]; + + if (id == AUDPP_MSG_AVSYNC_MSG) { + getevent(audpp->avsync, sizeof(audpp->avsync)); + + /* mask off any channels we're not watching to avoid + * cases where we might get one last update after + * disabling avsync and end up in an odd state when + * we next read... + */ + audpp->avsync[0] &= audpp->avsync_mask; + return; + } + + getevent(msg, sizeof(msg)); + + LOG(EV_EVENT, (id << 16) | msg[0]); + LOG(EV_DATA, (msg[1] << 16) | msg[2]); + + switch (id) { + case AUDPP_MSG_STATUS_MSG:{ + unsigned cid = msg[0]; + pr_info("audpp: status %d %d %d\n", cid, msg[1], + msg[2]); + if ((cid < 5) && audpp->func[cid]) + audpp->func[cid] (audpp->private[cid], id, msg); + break; + } + case AUDPP_MSG_HOST_PCM_INTF_MSG: + if (audpp->func[5]) + audpp->func[5] (audpp->private[5], id, msg); + break; + case AUDPP_MSG_PCMDMAMISSED: + pr_err("audpp: DMA missed obj=%x\n", msg[0]); + break; + case AUDPP_MSG_CFG_MSG: + if (msg[0] == AUDPP_MSG_ENA_ENA) { + pr_info("audpp: ENABLE\n"); + audpp->enabled = 1; + audpp_broadcast(audpp, id, msg); + } else if (msg[0] == AUDPP_MSG_ENA_DIS) { + pr_info("audpp: DISABLE\n"); + audpp->enabled = 0; + audpp_broadcast(audpp, id, msg); + } else { + pr_err("audpp: invalid config msg %d\n", msg[0]); + } + break; + case AUDPP_MSG_ROUTING_ACK: + audpp_broadcast(audpp, id, msg); + break; + case AUDPP_MSG_FLUSH_ACK: + audpp_notify_clnt(audpp, msg[0], id, msg); + break; + default: + pr_info("audpp: unhandled msg id %x\n", id); + } +} + +static struct msm_adsp_ops adsp_ops = { + .event = audpp_dsp_event, +}; + +static void audpp_fake_event(struct audpp_state *audpp, int id, + unsigned event, unsigned arg) +{ + uint16_t msg[1]; + msg[0] = arg; + audpp->func[id] (audpp->private[id], event, msg); +} + +int audpp_enable(int id, audpp_event_func func, void *private) +{ + struct audpp_state *audpp = &the_audpp_state; + int res = 0; + + if (id < -1 || id > 4) + return -EINVAL; + + if (id == -1) + id = 5; + + mutex_lock(audpp->lock); + if (audpp->func[id]) { + res = -EBUSY; + goto out; + } + + audpp->func[id] = func; + audpp->private[id] = private; + + LOG(EV_ENABLE, 1); + if (audpp->open_count++ == 0) { + pr_info("audpp: enable\n"); + res = msm_adsp_get("AUDPPTASK", &audpp->mod, &adsp_ops, audpp); + if (res < 0) { + pr_err("audpp: cannot open AUDPPTASK\n"); + audpp->open_count = 0; + audpp->func[id] = NULL; + audpp->private[id] = NULL; + goto out; + } + LOG(EV_ENABLE, 2); + msm_adsp_enable(audpp->mod); + audpp_dsp_config(1); + } else { + unsigned long flags; + local_irq_save(flags); + if (audpp->enabled) + audpp_fake_event(audpp, id, + AUDPP_MSG_CFG_MSG, AUDPP_MSG_ENA_ENA); + local_irq_restore(flags); + } + + res = 0; +out: + mutex_unlock(audpp->lock); + return res; +} +EXPORT_SYMBOL(audpp_enable); + +void audpp_disable(int id, void *private) +{ + struct audpp_state *audpp = &the_audpp_state; + unsigned long flags; + + if (id < -1 || id > 4) + return; + + if (id == -1) + id = 5; + + mutex_lock(audpp->lock); + LOG(EV_DISABLE, 1); + if (!audpp->func[id]) + goto out; + if (audpp->private[id] != private) + goto out; + + local_irq_save(flags); + audpp_fake_event(audpp, id, AUDPP_MSG_CFG_MSG, AUDPP_MSG_ENA_DIS); + audpp->func[id] = NULL; + audpp->private[id] = NULL; + local_irq_restore(flags); + + if (--audpp->open_count == 0) { + pr_info("audpp: disable\n"); + LOG(EV_DISABLE, 2); + audpp_dsp_config(0); + msm_adsp_disable(audpp->mod); + msm_adsp_put(audpp->mod); + audpp->mod = NULL; + } +out: + mutex_unlock(audpp->lock); +} +EXPORT_SYMBOL(audpp_disable); + +#define BAD_ID(id) ((id < 0) || (id >= CH_COUNT)) + +void audpp_avsync(int id, unsigned rate) +{ + unsigned long flags; + audpp_cmd_avsync cmd; + + if (BAD_ID(id)) + return; + + local_irq_save(flags); + if (rate) + the_audpp_state.avsync_mask |= (1 << id); + else + the_audpp_state.avsync_mask &= (~(1 << id)); + the_audpp_state.avsync[0] &= the_audpp_state.avsync_mask; + local_irq_restore(flags); + + cmd.cmd_id = AUDPP_CMD_AVSYNC; + cmd.object_number = id; + cmd.interrupt_interval_lsw = rate; + cmd.interrupt_interval_msw = rate >> 16; + audpp_send_queue1(&cmd, sizeof(cmd)); +} +EXPORT_SYMBOL(audpp_avsync); + +unsigned audpp_avsync_sample_count(int id) +{ + uint16_t *avsync = the_audpp_state.avsync; + unsigned val; + unsigned long flags; + unsigned mask; + + if (BAD_ID(id)) + return 0; + + mask = 1 << id; + id = id * AUDPP_AVSYNC_INFO_SIZE + 2; + local_irq_save(flags); + if (avsync[0] & mask) + val = (avsync[id] << 16) | avsync[id + 1]; + else + val = 0; + local_irq_restore(flags); + + return val; +} +EXPORT_SYMBOL(audpp_avsync_sample_count); + +unsigned audpp_avsync_byte_count(int id) +{ + uint16_t *avsync = the_audpp_state.avsync; + unsigned val; + unsigned long flags; + unsigned mask; + + if (BAD_ID(id)) + return 0; + + mask = 1 << id; + id = id * AUDPP_AVSYNC_INFO_SIZE + 5; + local_irq_save(flags); + if (avsync[0] & mask) + val = (avsync[id] << 16) | avsync[id + 1]; + else + val = 0; + local_irq_restore(flags); + + return val; +} +EXPORT_SYMBOL(audpp_avsync_byte_count); + +#define AUDPP_CMD_CFG_OBJ_UPDATE 0x8000 +#define AUDPP_CMD_VOLUME_PAN 0 + +int audpp_set_volume_and_pan(unsigned id, unsigned volume, int pan) +{ + /* cmd, obj_cfg[7], cmd_type, volume, pan */ + uint16_t cmd[11]; + + if (id > 6) + return -EINVAL; + + memset(cmd, 0, sizeof(cmd)); + cmd[0] = AUDPP_CMD_CFG_OBJECT_PARAMS; + cmd[1 + id] = AUDPP_CMD_CFG_OBJ_UPDATE; + cmd[8] = AUDPP_CMD_VOLUME_PAN; + cmd[9] = volume; + cmd[10] = pan; + + return audpp_send_queue3(cmd, sizeof(cmd)); +} +EXPORT_SYMBOL(audpp_set_volume_and_pan); + +int audpp_pause(unsigned id, int pause) +{ + /* pause 1 = pause 0 = resume */ + u16 pause_cmd[AUDPP_CMD_DEC_CTRL_LEN / sizeof(unsigned short)]; + + if (id >= CH_COUNT) + return -EINVAL; + + memset(pause_cmd, 0, sizeof(pause_cmd)); + + pause_cmd[0] = AUDPP_CMD_DEC_CTRL; + if (pause == 1) + pause_cmd[1 + id] = AUDPP_CMD_UPDATE_V | AUDPP_CMD_PAUSE_V; + else if (pause == 0) + pause_cmd[1 + id] = AUDPP_CMD_UPDATE_V | AUDPP_CMD_RESUME_V; + else + return -EINVAL; + + return audpp_send_queue1(pause_cmd, sizeof(pause_cmd)); +} +EXPORT_SYMBOL(audpp_pause); + +int audpp_flush(unsigned id) +{ + u16 flush_cmd[AUDPP_CMD_DEC_CTRL_LEN / sizeof(unsigned short)]; + + if (id >= CH_COUNT) + return -EINVAL; + + memset(flush_cmd, 0, sizeof(flush_cmd)); + + flush_cmd[0] = AUDPP_CMD_DEC_CTRL; + flush_cmd[1 + id] = AUDPP_CMD_UPDATE_V | AUDPP_CMD_FLUSH_V; + + return audpp_send_queue1(flush_cmd, sizeof(flush_cmd)); +} +EXPORT_SYMBOL(audpp_flush); diff --git a/arch/arm/mach-msm/qdsp5/evlog.h b/arch/arm/mach-msm/qdsp5/evlog.h new file mode 100644 index 0000000000000..5c0edf1e9feef --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/evlog.h @@ -0,0 +1,133 @@ +/* arch/arm/mach-msm/qdsp5/evlog.h + * + * simple event log debugging facility + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#define EV_LOG_ENTRY_NAME(n) n##_entry + +#define DECLARE_LOG(_name, _size, _str) \ +static struct ev_entry EV_LOG_ENTRY_NAME(_name)[_size]; \ +static struct ev_log _name = { \ + .name = #_name, \ + .strings = _str, \ + .num_strings = ARRAY_SIZE(_str), \ + .entry = EV_LOG_ENTRY_NAME(_name), \ + .max = ARRAY_SIZE(EV_LOG_ENTRY_NAME(_name)), \ +} + +struct ev_entry { + ktime_t when; + uint32_t id; + uint32_t arg; +}; + +struct ev_log { + struct ev_entry *entry; + unsigned max; + unsigned next; + unsigned fault; + const char **strings; + unsigned num_strings; + const char *name; +}; + +static char ev_buf[4096]; + +static ssize_t ev_log_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct ev_log *log = file->private_data; + struct ev_entry *entry; + unsigned long flags; + int size = 0; + unsigned n, id, max; + ktime_t now, t; + + max = log->max; + now = ktime_get(); + local_irq_save(flags); + n = (log->next - 1) & (max - 1); + entry = log->entry; + while (n != log->next) { + t = ktime_sub(now, entry[n].when); + id = entry[n].id; + if (id) { + const char *str; + if (id < log->num_strings) + str = log->strings[id]; + else + str = "UNKNOWN"; + size += scnprintf(ev_buf + size, 4096 - size, + "%8d.%03d %08x %s\n", + t.tv.sec, t.tv.nsec / 1000000, + entry[n].arg, str); + } + n = (n - 1) & (max - 1); + } + log->fault = 0; + local_irq_restore(flags); + return simple_read_from_buffer(buf, count, ppos, ev_buf, size); +} + +static void ev_log_write(struct ev_log *log, unsigned id, unsigned arg) +{ + struct ev_entry *entry; + unsigned long flags; + local_irq_save(flags); + + if (log->fault) { + if (log->fault == 1) + goto done; + log->fault--; + } + + entry = log->entry + log->next; + entry->when = ktime_get(); + entry->id = id; + entry->arg = arg; + log->next = (log->next + 1) & (log->max - 1); +done: + local_irq_restore(flags); +} + +static void ev_log_freeze(struct ev_log *log, unsigned count) +{ + unsigned long flags; + local_irq_save(flags); + log->fault = count; + local_irq_restore(flags); +} + +static int ev_log_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations ev_log_ops = { + .read = ev_log_read, + .open = ev_log_open, +}; + +static int ev_log_init(struct ev_log *log) +{ + debugfs_create_file(log->name, 0444, 0, log, &ev_log_ops); + return 0; +} + diff --git a/arch/arm/mach-msm/qdsp5/snd.c b/arch/arm/mach-msm/qdsp5/snd.c new file mode 100644 index 0000000000000..037d7ffb7e670 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5/snd.c @@ -0,0 +1,279 @@ +/* arch/arm/mach-msm/qdsp5/snd.c + * + * interface to "snd" service on the baseband cpu + * + * Copyright (C) 2008 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct snd_ctxt { + struct mutex lock; + int opened; + struct msm_rpc_endpoint *ept; + struct msm_snd_endpoints *snd_epts; +}; + +static struct snd_ctxt the_snd; + +#define RPC_SND_PROG 0x30000002 +#define RPC_SND_CB_PROG 0x31000002 +#if CONFIG_MSM_AMSS_VERSION == 6210 +#define RPC_SND_VERS 0x94756085 /* 2490720389 */ +#elif (CONFIG_MSM_AMSS_VERSION == 6220) || \ + (CONFIG_MSM_AMSS_VERSION == 6225) +#define RPC_SND_VERS 0xaa2b1a44 /* 2854951492 */ +#elif CONFIG_MSM_AMSS_VERSION == 6350 +#define RPC_SND_VERS MSM_RPC_VERS(1,0) +#endif + +#define SND_SET_DEVICE_PROC 2 +#define SND_SET_VOLUME_PROC 3 + +struct rpc_snd_set_device_args { + uint32_t device; + uint32_t ear_mute; + uint32_t mic_mute; + + uint32_t cb_func; + uint32_t client_data; +}; + +struct rpc_snd_set_volume_args { + uint32_t device; + uint32_t method; + uint32_t volume; + + uint32_t cb_func; + uint32_t client_data; +}; + +struct snd_set_device_msg { + struct rpc_request_hdr hdr; + struct rpc_snd_set_device_args args; +}; + +struct snd_set_volume_msg { + struct rpc_request_hdr hdr; + struct rpc_snd_set_volume_args args; +}; + +struct snd_endpoint *get_snd_endpoints(int *size); + +static inline int check_mute(int mute) +{ + return (mute == SND_MUTE_MUTED || + mute == SND_MUTE_UNMUTED) ? 0 : -EINVAL; +} + +static int get_endpoint(struct snd_ctxt *snd, unsigned long arg) +{ + int rc = 0, index; + struct msm_snd_endpoint ept; + + if (copy_from_user(&ept, (void __user *)arg, sizeof(ept))) { + pr_err("snd_ioctl get endpoint: invalid read pointer.\n"); + return -EFAULT; + } + + index = ept.id; + if (index < 0 || index >= snd->snd_epts->num) { + pr_err("snd_ioctl get endpoint: invalid index!\n"); + return -EINVAL; + } + + ept.id = snd->snd_epts->endpoints[index].id; + strncpy(ept.name, + snd->snd_epts->endpoints[index].name, + sizeof(ept.name)); + + if (copy_to_user((void __user *)arg, &ept, sizeof(ept))) { + pr_err("snd_ioctl get endpoint: invalid write pointer.\n"); + rc = -EFAULT; + } + + return rc; +} + +static long snd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct snd_set_device_msg dmsg; + struct snd_set_volume_msg vmsg; + struct msm_snd_device_config dev; + struct msm_snd_volume_config vol; + struct snd_ctxt *snd = file->private_data; + int rc = 0; + + mutex_lock(&snd->lock); + switch (cmd) { + case SND_SET_DEVICE: + if (copy_from_user(&dev, (void __user *) arg, sizeof(dev))) { + pr_err("snd_ioctl set device: invalid pointer.\n"); + rc = -EFAULT; + break; + } + + dmsg.args.device = cpu_to_be32(dev.device); + dmsg.args.ear_mute = cpu_to_be32(dev.ear_mute); + dmsg.args.mic_mute = cpu_to_be32(dev.mic_mute); + if (check_mute(dev.ear_mute) < 0 || + check_mute(dev.mic_mute) < 0) { + pr_err("snd_ioctl set device: invalid mute status.\n"); + rc = -EINVAL; + break; + } + dmsg.args.cb_func = -1; + dmsg.args.client_data = 0; + + pr_info("snd_set_device %d %d %d\n", dev.device, + dev.ear_mute, dev.mic_mute); + + rc = msm_rpc_call(snd->ept, + SND_SET_DEVICE_PROC, + &dmsg, sizeof(dmsg), 5 * HZ); + break; + + case SND_SET_VOLUME: + if (copy_from_user(&vol, (void __user *) arg, sizeof(vol))) { + pr_err("snd_ioctl set volume: invalid pointer.\n"); + rc = -EFAULT; + break; + } + + vmsg.args.device = cpu_to_be32(vol.device); + vmsg.args.method = cpu_to_be32(vol.method); + if (vol.method != SND_METHOD_VOICE) { + pr_err("snd_ioctl set volume: invalid method.\n"); + rc = -EINVAL; + break; + } + + vmsg.args.volume = cpu_to_be32(vol.volume); + vmsg.args.cb_func = -1; + vmsg.args.client_data = 0; + + pr_info("snd_set_volume %d %d %d\n", vol.device, + vol.method, vol.volume); + + rc = msm_rpc_call(snd->ept, + SND_SET_VOLUME_PROC, + &vmsg, sizeof(vmsg), 5 * HZ); + break; + + case SND_GET_NUM_ENDPOINTS: + if (copy_to_user((void __user *)arg, + &snd->snd_epts->num, sizeof(unsigned))) { + pr_err("snd_ioctl get endpoint: invalid pointer.\n"); + rc = -EFAULT; + } + break; + + case SND_GET_ENDPOINT: + rc = get_endpoint(snd, arg); + break; + + default: + pr_err("snd_ioctl unknown command.\n"); + rc = -EINVAL; + break; + } + mutex_unlock(&snd->lock); + + return rc; +} + +static int snd_release(struct inode *inode, struct file *file) +{ + struct snd_ctxt *snd = file->private_data; + + mutex_lock(&snd->lock); + snd->opened = 0; + mutex_unlock(&snd->lock); + return 0; +} + +static int snd_open(struct inode *inode, struct file *file) +{ + struct snd_ctxt *snd = &the_snd; + int rc = 0; + + mutex_lock(&snd->lock); + if (snd->opened == 0) { + if (snd->ept == NULL) { + snd->ept = msm_rpc_connect(RPC_SND_PROG, RPC_SND_VERS, + MSM_RPC_UNINTERRUPTIBLE); + if (IS_ERR(snd->ept)) { + rc = PTR_ERR(snd->ept); + snd->ept = NULL; + pr_err("snd: failed to connect snd svc\n"); + goto err; + } + } + file->private_data = snd; + snd->opened = 1; + } else { + pr_err("snd already opened.\n"); + rc = -EBUSY; + } + +err: + mutex_unlock(&snd->lock); + return rc; +} + +static struct file_operations snd_fops = { + .owner = THIS_MODULE, + .open = snd_open, + .release = snd_release, + .unlocked_ioctl = snd_ioctl, +}; + +struct miscdevice snd_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_snd", + .fops = &snd_fops, +}; + +static int snd_probe(struct platform_device *pdev) +{ + struct snd_ctxt *snd = &the_snd; + mutex_init(&snd->lock); + snd->snd_epts = (struct msm_snd_endpoints *)pdev->dev.platform_data; + return misc_register(&snd_misc); +} + +static struct platform_driver snd_plat_driver = { + .probe = snd_probe, + .driver = { + .name = "msm_snd", + .owner = THIS_MODULE, + }, +}; + +static int __init snd_init(void) +{ + return platform_driver_register(&snd_plat_driver); +} + +module_init(snd_init); diff --git a/arch/arm/mach-msm/qdsp5v2/Makefile b/arch/arm/mach-msm/qdsp5v2/Makefile new file mode 100644 index 0000000000000..4fb4ddaadc685 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/Makefile @@ -0,0 +1,6 @@ +obj-y += adsp.o +obj-y += adsp_audio.o +obj-y += audio_glue.o +obj-y += audio_out.o +obj-y += marimba.o +obj-y += voice.o diff --git a/arch/arm/mach-msm/qdsp5v2/adsp.c b/arch/arm/mach-msm/qdsp5v2/adsp.c new file mode 100644 index 0000000000000..18886e321bc1c --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp.c @@ -0,0 +1,701 @@ +/* arch/arm/mach-msm/qdsp5v2/adsp.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../dal.h" + +#include "adsp.h" +#include "adsp_private.h" +#include "adsp_audio.h" + +struct msm_adsp_queue { + const char *name; + uint32_t offset; + uint32_t max_size; + uint32_t flags; +}; + +struct msm_adsp_module { + msm_adsp_callback func; + void *cookie; + + wait_queue_head_t wait; + struct msm_adsp *adsp; + uint32_t id; + + unsigned active; + + const char *name; + struct msm_adsp_module *next; + struct msm_adsp_queue queue[ADSP_QUEUES_MAX]; +}; + +struct msm_adsp { + /* DSP "registers" */ + void *read_ctrl; + void *write_ctrl; + void *send_irq; + void *base; + + /* DAL client handle for DSP control service */ + struct dal_client *client; + + spinlock_t callback_lock; + spinlock_t write_lock; + spinlock_t event_lock; + + wait_queue_head_t callback_wq; + + /* list of all existing dsp modules */ + struct msm_adsp_module *all_modules; + + /* map from dsp rtos task IDs to modules */ + struct msm_adsp_module *task_to_module[ADSP_TASKS_MAX]; + + /* used during initialization */ + struct adsp_module_info tmpmodule; + +}; + +static struct msm_adsp the_adsp; + +static struct msm_adsp_module *id_to_module(struct msm_adsp *adsp, unsigned id) +{ + struct msm_adsp_module *module; + + for (module = adsp->all_modules; module; module = module->next) + if (module->id == id) + return module; + return NULL; +} + +int msm_adsp_get(const char *name, struct msm_adsp_module **module, + msm_adsp_callback func, void *cookie) +{ + struct msm_adsp *adsp = &the_adsp; + unsigned long flags; + int ret = -ENODEV; + struct msm_adsp_module *m; + + for (m = adsp->all_modules; m; m = m->next) { + if (!strcmp(m->name, name)) { + spin_lock_irqsave(&m->adsp->callback_lock, flags); + if (m->func == 0) { + m->func = func; + m->cookie = cookie; + *module = m; + ret = 0; + } else { + ret = -EBUSY; + } + spin_unlock_irqrestore(&m->adsp->callback_lock, flags); + break; + } + } + return ret; +} + +void msm_adsp_put(struct msm_adsp_module *m) +{ + unsigned long flags; + + spin_lock_irqsave(&m->adsp->callback_lock, flags); + m->func = 0; + m->cookie = 0; + spin_unlock_irqrestore(&m->adsp->callback_lock, flags); +} + + +int msm_adsp_lookup_queue(struct msm_adsp_module *module, const char *name) +{ + int n; + for (n = 0; n < ADSP_QUEUES_MAX; n++) { + if (!module->queue[n].name) + break; + if (!strcmp(name, module->queue[n].name)) + return n; + } + return -ENODEV; +} + +static int msm_adsp_command(struct msm_adsp_module *module, unsigned cmd_id) +{ + struct adsp_dal_cmd cmd; + int ret; + + cmd.cmd = cmd_id; + cmd.proc_id = ADSP_PROC_APPS; + cmd.module = module->id; + cmd.cookie = 0; + + ret = dal_call_f5(module->adsp->client, ADSP_DAL_COMMAND, + &cmd, sizeof(cmd)); + if (ret) + return -EIO; + + return 0; +} + +int msm_adsp_enable(struct msm_adsp_module *module) +{ + int ret; + /* XXX interlock? */ + + ret = msm_adsp_command(module, ADSP_CMD_ENABLE); + if (ret < 0) { + pr_err("msm_adsp_enable: error enabling %s %d\n", + module->name, ret); + return -EIO; + } + ret = wait_event_timeout(module->adsp->callback_wq, + module->active, 5 * HZ); + if (!ret) { + pr_err("msm_adsp_enable: timeout enabling %s\n", + module->name); + return -ETIMEDOUT; + } + + printk("msm_adsp_enable: %s enabled.\n", module->name); + return 0; +} + +int msm_adsp_disable(struct msm_adsp_module *module) +{ + /* XXX interlock? */ + return msm_adsp_command(module, ADSP_CMD_DISABLE); +} + +int msm_adsp_write(struct msm_adsp_module *module, unsigned queue_idx, + void *cmd_buf, size_t cmd_size) +{ + struct msm_adsp *adsp; + uint32_t val; + uint32_t dsp_q_addr; + uint32_t dsp_addr; + uint32_t cmd_id = 0; + int cnt = 0; + int ret = 0; + unsigned long flags; + + if (!module || !cmd_size || (queue_idx >= ADSP_QUEUES_MAX)) + return -EINVAL; + + if (module->queue[queue_idx].name == NULL) + return -EINVAL; + + adsp = module->adsp; + + spin_lock_irqsave(&adsp->write_lock, flags); + +#if 0 + if (module->state != ADSP_STATE_ENABLED) { + ret = -ENODEV; + goto done; + } +#endif + + dsp_q_addr = module->queue[queue_idx].offset; + dsp_q_addr &= ADSP_WRITE_CTRL_DSP_ADDR_M; + + /* Poll until the ADSP is ready to accept a command. + * Wait for 100us, return error if it's not responding. + * If this returns an error, we need to disable ALL modules and + * then retry. + */ + while (((val = readl(adsp->write_ctrl)) & + ADSP_WRITE_CTRL_READY_M) != + ADSP_WRITE_CTRL_READY_V) { + if (cnt > 50) { + pr_err("timeout waiting for DSP write ready\n"); + ret = -EIO; + goto done; + } + udelay(2); + cnt++; + } + + /* Set the mutex bits */ + val &= ~(ADSP_WRITE_CTRL_MUTEX_M); + val |= ADSP_WRITE_CTRL_MUTEX_NAVAIL_V; + + /* Clear the command bits */ + val &= ~(ADSP_WRITE_CTRL_CMD_M); + + /* Set the queue address bits */ + val &= ~(ADSP_WRITE_CTRL_DSP_ADDR_M); + val |= dsp_q_addr; + + writel(val, adsp->write_ctrl); + + /* Generate an interrupt to the DSP. This notifies the DSP that + * we are about to send a command on this particular queue. The + * DSP will in response change its state. + */ + writel(1, adsp->send_irq); + + /* Poll until the adsp responds to the interrupt; this does not + * generate an interrupt from the adsp. This should happen within + * 5ms. + */ + cnt = 0; + while ((readl(adsp->write_ctrl) & + ADSP_WRITE_CTRL_MUTEX_M) == + ADSP_WRITE_CTRL_MUTEX_NAVAIL_V) { + if (cnt > 2500) { + pr_err("timeout waiting for adsp ack\n"); + ret = -EIO; + goto done; + } + udelay(2); + cnt++; + } + + /* Read the ctrl word */ + val = readl(adsp->write_ctrl); + + if ((val & ADSP_WRITE_CTRL_STATUS_M) != + ADSP_WRITE_CTRL_NO_ERR_V) { + ret = -EIO; + pr_err("failed to write queue %x, retry\n", dsp_q_addr); + goto done; + } + + /* No error */ + /* Get the DSP buffer address */ + dsp_addr = (val & ADSP_WRITE_CTRL_DSP_ADDR_M) + + (uint32_t)MSM_AD5_BASE; + + if (dsp_addr < (uint32_t)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) { + uint16_t *buf_ptr = (uint16_t *) cmd_buf; + uint16_t *dsp_addr16 = (uint16_t *)dsp_addr; + cmd_size /= sizeof(uint16_t); + + /* Save the command ID */ + cmd_id = (uint32_t) buf_ptr[0]; + + /* Copy the command to DSP memory */ + cmd_size++; + while (--cmd_size) + *dsp_addr16++ = *buf_ptr++; + } else { + uint32_t *buf_ptr = (uint32_t *) cmd_buf; + uint32_t *dsp_addr32 = (uint32_t *)dsp_addr; + cmd_size /= sizeof(uint32_t); + + /* Save the command ID */ + cmd_id = buf_ptr[0]; + + cmd_size++; + while (--cmd_size) + *dsp_addr32++ = *buf_ptr++; + } + + /* Set the mutex bits */ + val &= ~(ADSP_WRITE_CTRL_MUTEX_M); + val |= ADSP_WRITE_CTRL_MUTEX_NAVAIL_V; + + /* Set the command bits to write done */ + val &= ~(ADSP_WRITE_CTRL_CMD_M); + val |= ADSP_WRITE_CTRL_CMD_WRITE_DONE_V; + + /* Set the queue address bits */ + val &= ~(ADSP_WRITE_CTRL_DSP_ADDR_M); + val |= dsp_q_addr; + + writel(val, adsp->write_ctrl); + + /* Generate an interrupt to the DSP. It does not respond with + * an interrupt, and we do not need to wait for it to + * acknowledge, because it will hold the mutex lock until it's + * ready to receive more commands again. + */ + writel(1, adsp->send_irq); + +// module->num_commands++; + +done: + spin_unlock_irqrestore(&adsp->write_lock, flags); + return ret; +} + +static int adsp_read_task_to_host(struct msm_adsp *adsp, void *dsp_addr) +{ + struct msm_adsp_module *module; + unsigned task_id; + unsigned msg_id; + unsigned msg_length; + unsigned n; + unsigned tmp; + union { + u32 data32[16]; + u16 data16[32]; + } u; + + if (dsp_addr >= (void *)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) { + uint32_t *dsp_addr32 = dsp_addr; + tmp = *dsp_addr32++; + task_id = (tmp & ADSP_READ_CTRL_TASK_ID_M) >> 8; + msg_id = (tmp & ADSP_READ_CTRL_MSG_ID_M); + tmp >>= 16; + if (tmp > 16) { + pr_err("adsp: message too large (%d x 32)\n", tmp); + tmp = 16; + } + msg_length = tmp * sizeof(uint32_t); + for (n = 0; n < tmp; n++) + u.data32[n] = *dsp_addr32++; + } else { + uint16_t *dsp_addr16 = dsp_addr; + tmp = *dsp_addr16++; + task_id = (tmp & ADSP_READ_CTRL_TASK_ID_M) >> 8; + msg_id = tmp & ADSP_READ_CTRL_MSG_ID_M; + tmp = *dsp_addr16++; + if (tmp > 32) { + pr_err("adsp: message too large (%d x 16)\n", tmp); + tmp = 32; + } + msg_length = tmp * sizeof(uint16_t); + for (n = 0; n < tmp; n++) + u.data16[n] = *dsp_addr16++; + } + +#if 0 + pr_info("ADSP EVENT TASK %d MSG %d SIZE %d\n", + task_id, msg_id, msg_length); +#endif + if (task_id > ADSP_TASKS_MAX) { + pr_err("adsp: bogus task id %d\n", task_id); + return 0; + } + module = adsp->task_to_module[task_id]; + + if (!module) { + pr_err("adsp: no module for task id %d\n", task_id); + return 0; + } + + if (!module->func) { + pr_err("module %s is not open\n", module->name); + return 0; + } + + module->func(msg_id, u.data32, msg_length, module->cookie); + return 0; +} + +static int adsp_get_event(struct msm_adsp *adsp) +{ + uint32_t val; + uint32_t ready; + void *dsp_addr; + uint32_t cmd_type; + int cnt; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&adsp->event_lock, flags); + + /* Whenever the DSP has a message, it updates this control word + * and generates an interrupt. When we receive the interrupt, we + * read this register to find out what ADSP task the command is + * comming from. + * + * The ADSP should *always* be ready on the first call, but the + * irq handler calls us in a loop (to handle back-to-back command + * processing), so we give the DSP some time to return to the + * ready state. The DSP will not issue another IRQ for events + * pending between the first IRQ and the event queue being drained, + * unfortunately. + */ + + for (cnt = 0; cnt < 50; cnt++) { + val = readl(adsp->read_ctrl); + + if ((val & ADSP_READ_CTRL_FLAG_M) == + ADSP_READ_CTRL_FLAG_UP_CONT_V) + goto ready; + + udelay(2); + } + pr_err("adsp_get_event: not ready after 100uS\n"); + rc = -EBUSY; + goto done; + +ready: + /* Here we check to see if there are pending messages. If there are + * none, we siply return -EAGAIN to indicate that there are no more + * messages pending. + */ + ready = val & ADSP_READ_CTRL_READY_M; + if ((ready != ADSP_READ_CTRL_READY_V) && + (ready != ADSP_READ_CTRL_CONT_V)) { + rc = -EAGAIN; + goto done; + } + + /* DSP says that there are messages waiting for the host to read */ + + /* Get the Command Type */ + cmd_type = val & ADSP_READ_CTRL_CMD_TYPE_M; + + /* Get the DSP buffer address */ + dsp_addr = (void *)((val & + ADSP_READ_CTRL_DSP_ADDR_M) + + (uint32_t)MSM_AD5_BASE); + + /* We can only handle Task-to-Host messages */ + if (cmd_type != ADSP_READ_CTRL_CMD_TASK_TO_H_V) { + rc = -EIO; + goto done; + } + + adsp_read_task_to_host(adsp, dsp_addr); + + val = readl(adsp->read_ctrl); + val &= ~ADSP_READ_CTRL_READY_M; + + /* Write ctrl word to the DSP */ + writel(val, adsp->read_ctrl); + + /* Generate an interrupt to the DSP */ + writel(1, adsp->send_irq); + +done: + spin_unlock_irqrestore(&adsp->event_lock, flags); + return rc; +} + +static irqreturn_t adsp_irq_handler(int irq, void *data) +{ + struct msm_adsp *adsp = &the_adsp; + int count = 0; + for (count = 0; count < 15; count++) + if (adsp_get_event(adsp) < 0) + break; +#if 0 + if (count > adsp->event_backlog_max) + adsp->event_backlog_max = count; + adsp->events_received += count; +#endif + if (count == 15) + pr_err("too many (%d) events for single irq!\n", count); + return IRQ_HANDLED; +} + +static void adsp_dal_callback(void *data, int len, void *cookie) +{ + struct msm_adsp *adsp = cookie; + struct adsp_dal_event *e = data; + struct msm_adsp_module *m; +#if 0 + pr_info("adsp: h %08x c %08x l %08x\n", + e->evt_handle, e->evt_cookie, e->evt_length); + pr_info(" : e %08x v %08x p %08x\n", + e->event, e->version, e->proc_id); + pr_info(" : m %08x i %08x a %08x\n", + e->u.info.module, e->u.info.image, e->u.info.apps_okts); +#endif + + switch (e->event) { + case ADSP_EVT_INIT_INFO: + memcpy(&adsp->tmpmodule, &e->u.module, + sizeof(adsp->tmpmodule)); + break; + case ADSP_EVT_MOD_READY: + m = id_to_module(adsp, e->u.info.module); + if (m) { + pr_info("adsp: %s READY\n", m->name); + m->active = 1; + } + break; + case ADSP_EVT_MOD_DISABLE: + /* does not actually happen in adsp5v2 */ + m = id_to_module(adsp, e->u.info.module); + if (m) + pr_info("adsp: %s DISABLED\n", m->name); + break; + case ADSP_EVT_DISABLE_FAIL: + m = id_to_module(adsp, e->u.info.module); + if (m) + pr_info("adsp: %s DISABLE FAILED\n", m->name); + break; + default: + pr_err("adsp_dal_callback: unknown event %d\n", e->event); + } + wake_up(&adsp->callback_wq); +} + +static void adsp_add_module(struct msm_adsp *adsp, struct adsp_module_info *mi) +{ + struct msm_adsp_module *module; + int n; + + if (mi->task_id >= ADSP_TASKS_MAX) { + pr_err("adsp: module '%s' task id %d is invalid\n", + mi->name, mi->task_id); + return; + } + if (mi->q_cnt > ADSP_QUEUES_MAX) { + pr_err("adsp: module '%s' q_cnt %d is invalid\n", + mi->name, mi->q_cnt); + return; + } + + module = kzalloc(sizeof(*module), GFP_KERNEL); + if (!module) + return; + + module->name = kstrdup(mi->name, GFP_KERNEL); + if (!module->name) + goto fail_module_name; + + for (n = 0; n < mi->q_cnt; n++) { + struct msm_adsp_queue *queue = module->queue + n; + queue->name = kstrdup(mi->queue[n].name, GFP_KERNEL); + if (!queue->name) + goto fail_queue_name; + queue->offset = mi->queue[n].offset; + queue->max_size = mi->queue[n].max_size; + queue->flags = mi->queue[n].flag; + } + + init_waitqueue_head(&module->wait); + module->id = mi->uuid; + module->adsp = adsp; + + module->next = adsp->all_modules; + adsp->all_modules = module; + + adsp->task_to_module[mi->task_id] = module; +#if 0 + pr_info("adsp: module '%s' id 0x%x task %d\n", + module->name, module->id, mi->task_id); + for (n = 0; (n < ADSP_TASKS_MAX) && module->queue[n].name; n++) + pr_info(" queue '%s' off 0x%x size %d flags %x", + module->queue[n].name, module->queue[n].offset, + module->queue[n].max_size, module->queue[n].flags); +#endif + return; + +fail_queue_name: + for (n = 0; n < mi->q_cnt; n++) + if (module->queue[n].name) + kfree(module->queue[n].name); +fail_module_name: + kfree(module); +} + +static int adsp_probe(struct platform_device *pdev) { + struct msm_adsp *adsp = &the_adsp; + struct adsp_dal_cmd cmd; + int ret, n; + + pr_info("*** adsp_probe() ***\n"); + + adsp->base = MSM_AD5_BASE; + adsp->read_ctrl = adsp->base + ADSP_READ_CTRL_OFFSET; + adsp->write_ctrl = adsp->base + ADSP_WRITE_CTRL_OFFSET; + adsp->send_irq = adsp->base + ADSP_SEND_IRQ_OFFSET; + + adsp->client = dal_attach(ADSP_DAL_DEVICE, ADSP_DAL_PORT, + adsp_dal_callback, adsp); + if (!adsp->client) { + pr_err("adsp_probe: cannot attach to dal device\n"); + return -ENODEV; + } + + cmd.cmd = ADSP_CMD_GET_INIT_INFO; + cmd.proc_id = ADSP_PROC_APPS; + cmd.module = 0; + cmd.cookie = 0; + + for (n = 0; n < 64; n++) { + adsp->tmpmodule.uuid = 0xffffffff; + ret = dal_call_f5(adsp->client, ADSP_DAL_COMMAND, + &cmd, sizeof(cmd)); + if (ret) { + pr_err("adsp_probe() get info dal call failed\n"); + break; + } + ret = wait_event_timeout(adsp->callback_wq, + (adsp->tmpmodule.uuid != 0xffffffff), + 5*HZ); + if (ret == 0) { + pr_err("adsp_probe() timed out getting module info\n"); + break; + } + if (adsp->tmpmodule.uuid == 0x7fffffff) + break; + if (adsp->tmpmodule.task_id == 0xffff) + continue; +// adsp_print_module(&adsp->tmpmodule); + adsp_add_module(adsp, &adsp->tmpmodule); + } + + ret = request_irq(INT_AD5A_MPROC_APPS_0, adsp_irq_handler, + IRQF_TRIGGER_RISING, "adsp", 0); + if (ret < 0) + return ret; + + pr_info("*** adsp_probe() done ***\n"); + + pr_info("audio: codec init...\n"); + msm_codec_init(); + + pr_info("audio: adsp init...\n"); + adsp_audio_init(); + + pr_info("audio: voice init...\n"); + msm_voice_init(); + + return 0; +} + +static struct platform_driver adsp_driver = { + .probe = adsp_probe, + .driver = { + .name = "SMD_DAL00", + .owner = THIS_MODULE, + }, +}; + +extern int msm_codec_init(void); + +static int __init adsp_init(void) +{ + struct msm_adsp *adsp = &the_adsp; + + pr_info("*** adsp_init() ***\n"); + + init_waitqueue_head(&adsp->callback_wq); + spin_lock_init(&adsp->callback_lock); + spin_lock_init(&adsp->write_lock); + spin_lock_init(&adsp->event_lock); + + return platform_driver_register(&adsp_driver); +} + +module_init(adsp_init); diff --git a/arch/arm/mach-msm/qdsp5v2/adsp.h b/arch/arm/mach-msm/qdsp5v2/adsp.h new file mode 100644 index 0000000000000..c37588f936a23 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp.h @@ -0,0 +1,42 @@ +/* arch/arm/mach-msm/qdsp5v2/adsp.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_ADSP_5V2_H_ +#define _MSM_ADSP_5V2_H_ + +struct msm_adsp_module; + +typedef void (*msm_adsp_callback)(unsigned id, void *event, + size_t len, void *cookie); + + +int msm_adsp_get(const char *name, struct msm_adsp_module **module, + msm_adsp_callback callback, void *cookie); + +void msm_adsp_put(struct msm_adsp_module *module); + +/* find queue index for a named module command queue */ +int msm_adsp_lookup_queue(struct msm_adsp_module *module, const char *name); + +int msm_adsp_enable(struct msm_adsp_module *module); +int msm_adsp_disable(struct msm_adsp_module *module); + +/* write is safe to call from atomic context. All other msm_adsp_* + * calls may block. + */ +int msm_adsp_write(struct msm_adsp_module *module, unsigned queue_idx, + void *data, size_t len); + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_audio.c b/arch/arm/mach-msm/qdsp5v2/adsp_audio.c new file mode 100644 index 0000000000000..80dde0a1ee4fa --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_audio.c @@ -0,0 +1,460 @@ +/* arch/arm/mach-msm/qdsp5v2/adsp_audio.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "adsp.h" +#include "adsp_module_afe.h" +#include "adsp_module_audpp.h" +#include "adsp_module_audplay.h" + +#include "adsp_audio.h" + + +#define AUDDEC_DEC_PCM 0 + +/* Decoder status received from AUDPPTASK */ +#define STATUS_SLEEP 0 +#define STATUS_INIT 1 +#define STATUS_CONFIG 2 +#define STATUS_PLAY 3 + + +#define MAX_AUDPLAY_TASKS 5 + +struct audplay { + struct msm_adsp_module *module; + wait_queue_head_t wait; + int q1; + int active; + int id; + int status; + struct audpp *audpp; + + void (*callback)(void *cookie); + void *cookie; +}; + +struct audpp { + struct msm_adsp_module *module; + wait_queue_head_t wait; + struct mutex lock; + int q1, q2, q3; + unsigned count; + struct audplay audplay[MAX_AUDPLAY_TASKS]; +}; + +struct afe_info { + struct msm_adsp_module *module; + wait_queue_head_t wait; + struct mutex lock; + unsigned count; + u8 active[AFE_DEVICE_ID_MAX + 1]; +}; + +struct afe_info the_afe_info; +static struct audpp the_audpp; + + +static void afe_callback(unsigned id, void *event, size_t len, void *cookie) +{ + struct afe_info *afe = cookie; + struct afe_msg_codec_config_ack *msg = event; + + printk("afe_callback id=%d len=%d\n", id, len); + + if (id != AFE_MSG_CODEC_CONFIG_ACK) + return; + + if (msg->device_id > AFE_DEVICE_ID_MAX) + return; + + if (msg->device_activity == AFE_MSG_CODEC_CONFIG_ENABLED) + afe->active[msg->device_id] = 1; + else + afe->active[msg->device_id] = 0; + + wake_up(&afe->wait); +} + +int afe_enable(unsigned device, unsigned rate, unsigned channels) +{ + struct afe_info *afe = &the_afe_info; + struct afe_cmd_codec_config cmd; + int ret = 0; + + /* rate must be one of the following: + * 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000 + */ + cmd.cmd_id = AFE_CMD_CODEC_CONFIG_CMD; + cmd.device_id = device; + cmd.activity = 1; + cmd.sample_rate = rate / 1000; + cmd.channel_mode = channels; + cmd.volume = AFE_VOLUME_UNITY; + cmd.reserved = 0; + + mutex_lock(&afe->lock); + + if (!afe->module) { + ret = msm_adsp_get("AFE", &afe->module, afe_callback, afe); + if (ret) + goto done; + } + + if (afe->active[device]) { + pr_err("afe_enable: device %d already enabled\n", device); + ret = -EBUSY; + goto done; + } + + if (++afe->count == 1) { + pr_info("AFE ENABLE!\n"); + ret = msm_adsp_enable(afe->module); + if (ret < 0) { + pr_err("afe_enable: cannot enable module\n"); + afe->count--; + goto done; + } + } + + ret = msm_adsp_write(afe->module, 0, &cmd, sizeof(cmd)); + if (ret < 0) { + printk("afe_enable: command write failed\n"); + goto done; + } + + ret = wait_event_timeout(afe->wait, afe->active[device], 5 * HZ); + if (!ret) { + pr_err("afe_enable: command timeout\n"); + ret = -EIO; + } else { + printk("afe_enable: device %d active\n", cmd.device_id); + } +done: + mutex_unlock(&afe->lock); + return ret; +} + +int afe_disable(unsigned device) +{ + struct afe_info *afe = &the_afe_info; + struct afe_cmd_codec_config cmd; + int ret = 0; + + memset(&cmd, sizeof(cmd), 0); + cmd.cmd_id = AFE_CMD_CODEC_CONFIG_CMD; + cmd.device_id = device; + cmd.activity = 0; + + mutex_lock(&afe->lock); + + if (!afe->active[device]) { + pr_err("afe_disable: device %d already disabled\n", device); + goto done; + } + + ret = msm_adsp_write(afe->module, 0, &cmd, sizeof(cmd)); + if (ret < 0) { + printk("afe_disable: command write failed\n"); + goto done; + } + + ret = wait_event_timeout(afe->wait, !afe->active[device], 5 * HZ); + if (!ret) { + pr_err("afe_disable: command timeout\n"); + ret = -EIO; + } else { + printk("afe_disable: device %d inactive\n", cmd.device_id); + if (--afe->count == 0) { + pr_info("AFE DISABLE!\n"); + msm_adsp_disable(afe->module); + } + } +done: + mutex_unlock(&afe->lock); + return ret; +} + +static void audpp_callback(unsigned id, void *event, size_t len, void *cookie) +{ + struct audpp *audpp = cookie; + + if (id == AUDPP_MSG_STATUS_MSG) { + struct audpp_msg_status_msg *msg = event; + pr_info("audpp STATUS id=%d status=%d reason=%d\n", + msg->dec_id, msg->status, msg->reason); + if (msg->dec_id < MAX_AUDPLAY_TASKS) { + audpp->audplay[msg->dec_id].status = msg->status; + wake_up(&audpp->audplay[msg->dec_id].wait); + } + + } else { + pr_info("audpp cb %d %d\n", id, len); + } +} + +static int audpp_get(struct audpp *audpp) +{ + int ret = 0; + + if (++audpp->count > 1) + return 0; + + ret = msm_adsp_get("AUDPP", &audpp->module, audpp_callback, audpp); + if (ret < 0) { + pr_err("audpp_get: could not get AUDPP\n"); + goto fail_get_module; + } + + audpp->q1 = msm_adsp_lookup_queue(audpp->module, "AudPPCmd1"); + audpp->q2 = msm_adsp_lookup_queue(audpp->module, "AudPPCmd2"); + audpp->q3 = msm_adsp_lookup_queue(audpp->module, "AudPPCmd3"); + if ((audpp->q1 < 0) || (audpp->q2 < 0) || (audpp->q3 < 0)) { + pr_err("audpp_get: could not get queues\n"); + ret = -ENODEV; + goto fail_enable_module; + } + + ret = msm_adsp_enable(audpp->module); + if (ret < 0) + goto fail_enable_module; + + return 0; + +fail_enable_module: + msm_adsp_put(audpp->module); + audpp->module = NULL; +fail_get_module: + audpp->count--; + return ret; +} + +static void audpp_put(struct audpp *audpp) +{ + if (--audpp->count > 0) + return; + + msm_adsp_disable(audpp->module); + msm_adsp_put(audpp->module); + audpp->module = NULL; +} + + +static void audplay_callback(unsigned id, void *event, size_t len, void *cookie) +{ + struct audplay *audplay = cookie; + if (id == AUDPLAY_MSG_DEC_NEEDS_DATA) { +#if 0 + struct audplay_msg_dec_needs_data *msg = event; + pr_info("audplay NEEDDATA id=%d off=%d sz=%d %d %d %d %d\n", + msg->dec_id, msg->adecDataReadPtrOffset, + msg->adecDataBufSize, msg->bitstream_free_len, + msg->bitstream_write_ptr, msg->bitstream_buf_start, + msg->bitstream_buf_len); +#endif + audplay->callback(audplay->cookie); + } else { + pr_info("audplay cb %d %d\n", id, len); + } +} + +struct audplay *audplay_get(void (*cb)(void *cookie), void *cookie) +{ + struct audpp *audpp = &the_audpp; + struct audplay *audplay = 0; + char buf[32]; + unsigned n; + int ret; + + mutex_lock(&audpp->lock); + + for (n = 0; n < MAX_AUDPLAY_TASKS; n++) + if (audpp->audplay[n].active == 0) break; + + if (n == MAX_AUDPLAY_TASKS) + goto done; + + if (audpp_get(audpp)) + goto done; + + audplay = audpp->audplay + n; + sprintf(buf, "AUDPLAY%d", n); + ret = msm_adsp_get(buf, &audplay->module, audplay_callback, audplay); + if (ret < 0) + goto fail_audplay_get; + + sprintf(buf,"AudPlay%dBitStreamCtrl", n); + audplay->q1 = msm_adsp_lookup_queue(audplay->module, buf); + if (audplay->q1 < 0) + goto fail_audplay_enable; + + ret = msm_adsp_enable(audplay->module); + if (ret < 0) + goto fail_audplay_enable; + + audplay->active = 1; + audplay->callback = cb; + audplay->cookie = cookie; + goto done; + +fail_audplay_enable: + msm_adsp_put(audplay->module); + audplay->module = NULL; + audplay->callback = NULL; +fail_audplay_get: + audplay = NULL; + audpp_put(audpp); +done: + mutex_unlock(&audpp->lock); + return audplay; +} + +void audplay_put(struct audplay *audplay) +{ + mutex_lock(&audplay->audpp->lock); + audplay->active = 0; + msm_adsp_disable(audplay->module); + msm_adsp_put(audplay->module); + audplay->module = NULL; + audplay->callback = NULL; + audpp_put(audplay->audpp); + mutex_unlock(&audplay->audpp->lock); +} + +static void inline audplay_send_q1(struct audplay *audplay, void *cmd, int len) +{ + msm_adsp_write(audplay->module, audplay->q1, cmd, len); +} + +static void inline audpp_send_q1(struct audpp *audpp, void *cmd, int len) +{ + msm_adsp_write(audpp->module, audpp->q1, cmd, len); +} + +static void inline audpp_send_q2(struct audpp *audpp, void *cmd, int len) +{ + msm_adsp_write(audpp->module, audpp->q2, cmd, len); +} + +static void inline audpp_send_q3(struct audpp *audpp, void *cmd, int len) +{ + msm_adsp_write(audpp->module, audpp->q3, cmd, len); +} + + +void audplay_config_pcm(struct audplay *audplay, + unsigned rate, unsigned width, unsigned channels) +{ + struct audpp_cmd_cfg_adec_params_wav cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; + cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN >> 1; + cmd.common.dec_id = audplay->id; + cmd.common.input_sampling_frequency = rate; + cmd.stereo_cfg = channels; + cmd.pcm_width = 1; + cmd.sign = 0; /* really? */ + audpp_send_q2(audplay->audpp, &cmd, sizeof(cmd)); /* sizeof(cmd)?!*/ +} + +void audplay_dsp_config(struct audplay *audplay, int enable) +{ + struct audpp_cmd_cfg_dec_type cmd; + int next; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE; + cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC; + if (enable) { + cmd.dec_cfg |= AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_PCM; + next = STATUS_INIT; + } else { + cmd.dec_cfg |= AUDPP_CMD_DIS_DEC_V; + next = STATUS_SLEEP; + } + cmd.dm_mode = 0; + cmd.stream_id = audplay->id; + + mutex_lock(&audplay->audpp->lock); + audpp_send_q1(audplay->audpp, &cmd, sizeof(cmd)); + wait_event_timeout(audplay->wait, audplay->status == next, 5 * HZ); + mutex_unlock(&audplay->audpp->lock); +} + +void audplay_send_data(struct audplay *audplay, unsigned phys, unsigned len) +{ + struct audplay_cmd_bitstream_data_avail cmd; + + cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL; + cmd.decoder_id = audplay->id; + cmd.buf_ptr = phys; + cmd.buf_size = len/2; + cmd.partition_number = 0; + + mutex_lock(&audplay->audpp->lock); + audplay_send_q1(audplay, &cmd, sizeof(cmd)); + wait_event_timeout(audplay->wait, audplay->status == STATUS_PLAY, 5 * HZ); + mutex_unlock(&audplay->audpp->lock); +} + +void audplay_mix_select(struct audplay *audplay, unsigned mix) +{ + struct audpp_cmd_cfg_dev_mixer_params cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd_id = AUDPP_CMD_CFG_DEV_MIXER; + cmd.stream_id = audplay->id; + cmd.mixer_cmd = mix; + audpp_send_q1(audplay->audpp, &cmd, sizeof(cmd)); +} + +void audplay_volume_pan(struct audplay *audplay, unsigned volume, unsigned pan) +{ +#define AUDPP_CMD_VOLUME_PAN 0 +#define AUDPP_CMD_CFG_OBJ_UPDATE 0x8000 + uint16_t cmd[7]; + cmd[0] = AUDPP_CMD_CFG_OBJECT_PARAMS; + cmd[1] = AUDPP_CMD_POPP_STREAM; + cmd[2] = audplay->id; + cmd[3] = AUDPP_CMD_CFG_OBJ_UPDATE; + cmd[4] = AUDPP_CMD_VOLUME_PAN; + cmd[5] = volume; + cmd[6] = pan; + audpp_send_q3(audplay->audpp, cmd, sizeof(cmd)); +} + + + +void adsp_audio_init(void) +{ + struct afe_info *afe = &the_afe_info; + struct audpp *audpp = &the_audpp; + int n; + + mutex_init(&audpp->lock); + init_waitqueue_head(&audpp->wait); + for (n = 0; n < MAX_AUDPLAY_TASKS; n++) { + struct audplay *audplay = audpp->audplay + n; + audplay->id = n; + audplay->audpp = audpp; + init_waitqueue_head(&audplay->wait); + } + + mutex_init(&afe->lock); + init_waitqueue_head(&afe->wait); +} diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_audio.h b/arch/arm/mach-msm/qdsp5v2/adsp_audio.h new file mode 100644 index 0000000000000..2de216a4a23ee --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_audio.h @@ -0,0 +1,47 @@ +/* arch/arm/mach-msm/qdsp5v2/adsp_audio.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_ADSP_AUDIO_H_ +#define _MSM_ADSP_AUDIO_H_ + +struct audplay; +struct audpp; + +struct audplay *audplay_get(void (*cb)(void *cookie), void *cookie); +void audplay_put(struct audplay *audplay); + +void audplay_send_data(struct audplay *audplay, unsigned phys, unsigned len); + +void audplay_dsp_config(struct audplay *audplay, int enable); +void audplay_config_pcm(struct audplay *audplay, + unsigned rate, unsigned width, unsigned channels); + + +void audplay_mix_select(struct audplay *audplay, unsigned mix); +void audplay_volume_pan(struct audplay *audplay, unsigned volume, unsigned pan); + + +int afe_enable(unsigned device, unsigned rate, unsigned channels); +int afe_disable(unsigned device); + + +void adsp_audio_init(void); +int msm_voice_init(void); +int msm_codec_init(void); + +int audio_route_path(const char *path); +int codec_route_path(const char *path); + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_module_afe.h b/arch/arm/mach-msm/qdsp5v2/adsp_module_afe.h new file mode 100644 index 0000000000000..eaa6139c64cb9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_module_afe.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __ADSP_MODULE_AFE_H +#define __ADSP_MODULE_AFE_H + +#define AFE_DEVICE_MI2S_CODEC_RX 1 /* internal codec rx path */ +#define AFE_DEVICE_MI2S_CODEC_TX 2 /* internal codec tx path */ +#define AFE_DEVICE_AUX_CODEC_RX 3 /* external codec rx path */ +#define AFE_DEVICE_AUX_CODEC_TX 4 /* external codec tx path */ +#define AFE_DEVICE_MI2S_HDMI_RX 5 /* HDMI/FM block rx path */ +#define AFE_DEVICE_MI2S_HDMI_TX 6 /* HDMI/FM block tx path */ +#define AFE_DEVICE_ID_MAX 7 + +#define AFE_VOLUME_UNITY 0x4000 /* Q14 format */ + +#define AFE_CMD_CODEC_CONFIG_CMD 0x1 +#define AFE_CMD_CODEC_CONFIG_LEN sizeof(struct afe_cmd_codec_config) + +struct afe_cmd_codec_config{ + uint16_t cmd_id; + uint16_t device_id; + uint16_t activity; + uint16_t sample_rate; + uint16_t channel_mode; + uint16_t volume; + uint16_t reserved; +} __attribute__ ((packed)); + +#define AFE_CMD_AUX_CODEC_CONFIG_CMD 0x3 +#define AFE_CMD_AUX_CODEC_CONFIG_LEN sizeof(struct afe_cmd_aux_codec_config) + +struct afe_cmd_aux_codec_config{ + uint16_t cmd_id; + uint16_t dma_path_ctl; + uint16_t pcm_ctl; + uint16_t eight_khz_int_mode; + uint16_t aux_codec_intf_ctl; + uint16_t data_format_padding_info; +} __attribute__ ((packed)); + +#define AFE_MSG_CODEC_CONFIG_ACK 0x0001 +#define AFE_MSG_CODEC_CONFIG_ACK_LEN \ + sizeof(struct afe_msg_codec_config_ack) + +#define AFE_MSG_CODEC_CONFIG_ENABLED 0x1 +#define AFE_MSG_CODEC_CONFIG_DISABLED 0xFFFF + +struct afe_msg_codec_config_ack { + uint16_t device_id; + uint16_t device_activity; +} __attribute__((packed)); + + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_module_audplay.h b/arch/arm/mach-msm/qdsp5v2/adsp_module_audplay.h new file mode 100644 index 0000000000000..7a67dedc4b2bd --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_module_audplay.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 1992-2009, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ADSP_MODULE_AUDPLAY +#define __ADSP_MODULE_AUDPLAY + +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL 0x0000 +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_LEN \ + sizeof(struct audplay_cmd_bitstream_data_avail) + +/* Type specification of dec_data_avail message sent to AUDPLAYTASK +*/ +struct audplay_cmd_bitstream_data_avail{ + /*command ID*/ + unsigned int cmd_id; + + /* Decoder ID for which message is being sent */ + unsigned int decoder_id; + + /* Start address of data in ARM global memory */ + unsigned int buf_ptr; + + /* Number of 16-bit words of bit-stream data contiguously + * available at the above-mentioned address + */ + unsigned int buf_size; + + /* Partition number used by audPlayTask to communicate with DSP's RTOS + * kernel + */ + unsigned int partition_number; + +} __attribute__((packed)); + +#define AUDPLAY_CMD_CHANNEL_INFO 0x0001 +#define AUDPLAY_CMD_CHANNEL_INFO_LEN \ + sizeof(struct audplay_cmd_channel_info) + +struct audplay_cmd_channel_select { + unsigned int cmd_id; + unsigned int stream_id; + unsigned int channel_select; +} __attribute__((packed)); + +struct audplay_cmd_threshold_update { + unsigned int cmd_id; + unsigned int threshold_update; + unsigned int threshold_value; +} __attribute__((packed)); + +union audplay_cmd_channel_info { + struct audplay_cmd_channel_select ch_select; + struct audplay_cmd_threshold_update thr_update; +}; + +#define AUDPLAY_CMD_HPCM_BUF_CFG 0x0003 +#define AUDPLAY_CMD_HPCM_BUF_CFG_LEN \ + sizeof(struct audplay_cmd_hpcm_buf_cfg) + +struct audplay_cmd_hpcm_buf_cfg { + unsigned int cmd_id; + unsigned int hostpcm_config; + unsigned int feedback_frequency; + unsigned int byte_swap; + unsigned int max_buffers; + unsigned int partition_number; +} __attribute__((packed)); + +#define AUDPLAY_CMD_BUFFER_REFRESH 0x0004 +#define AUDPLAY_CMD_BUFFER_REFRESH_LEN \ + sizeof(struct audplay_cmd_buffer_update) + +struct audplay_cmd_buffer_refresh { + unsigned int cmd_id; + unsigned int num_buffers; + unsigned int buf_read_count; + unsigned int buf0_address; + unsigned int buf0_length; + unsigned int buf1_address; + unsigned int buf1_length; +} __attribute__((packed)); + +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2 0x0005 +#define AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2_LEN \ + sizeof(struct audplay_cmd_bitstream_data_avail_nt2) + +/* Type specification of dec_data_avail message sent to AUDPLAYTASK + * for NT2 */ +struct audplay_cmd_bitstream_data_avail_nt2 { + /*command ID*/ + unsigned int cmd_id; + + /* Decoder ID for which message is being sent */ + unsigned int decoder_id; + + /* Start address of data in ARM global memory */ + unsigned int buf_ptr; + + /* Number of 16-bit words of bit-stream data contiguously + * available at the above-mentioned address + */ + unsigned int buf_size; + + /* Partition number used by audPlayTask to communicate with DSP's RTOS + * kernel + */ + unsigned int partition_number; + + /* bitstream write pointer */ + unsigned int dspBitstreamWritePtr; + +} __attribute__((packed)); + +#define AUDPLAY_CMD_OUTPORT_FLUSH 0x0006 + +struct audplay_cmd_outport_flush { + unsigned int cmd_id; +} __attribute__((packed)); + + +/* messages from dsp to apps */ + +#define AUDPLAY_MSG_DEC_NEEDS_DATA 0x0001 +#define AUDPLAY_MSG_DEC_NEEDS_DATA_MSG_LEN \ + sizeof(audplay_msg_dec_needs_data) + +struct audplay_msg_dec_needs_data { + /* reserved*/ + unsigned int dec_id; + + /*The read pointer offset of external memory till which bitstream + has been dmed in*/ + unsigned int adecDataReadPtrOffset; + + /*The buffer size of external memory. */ + unsigned int adecDataBufSize; + + unsigned int bitstream_free_len; + unsigned int bitstream_write_ptr; + unsigned int bitstream_buf_start; + unsigned int bitstream_buf_len; +} __attribute__((packed)); + +#define AUDPLAY_UP_STREAM_INFO 0x0003 +#define AUDPLAY_UP_STREAM_INFO_LEN \ + sizeof(struct audplay_msg_stream_info) + +struct audplay_msg_stream_info { + unsigned int decoder_id; + unsigned int channel_info; + unsigned int sample_freq; + unsigned int bitstream_info; + unsigned int bit_rate; +} __attribute__((packed)); + +#define AUDPLAY_MSG_BUFFER_UPDATE 0x0004 +#define AUDPLAY_MSG_BUFFER_UPDATE_LEN \ + sizeof(struct audplay_msg_buffer_update) + +struct audplay_msg_buffer_update { + unsigned int buffer_write_count; + unsigned int num_of_buffer; + unsigned int buf0_address; + unsigned int buf0_length; + unsigned int buf1_address; + unsigned int buf1_length; +} __attribute__((packed)); + +#define AUDPLAY_UP_OUTPORT_FLUSH_ACK 0x0005 + +#define ADSP_MESSAGE_ID 0xFFFF + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_module_audpp.h b/arch/arm/mach-msm/qdsp5v2/adsp_module_audpp.h new file mode 100644 index 0000000000000..8f278cf69f5f7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_module_audpp.h @@ -0,0 +1,1250 @@ +/* + * Copyright (c) 1992-2009, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ADSP_MODULE_AUDPP +#define __ADSP_MODULE_AUDPP + +/* + * ARM to AUDPPTASK Commands + * + * ARM uses three command queues to communicate with AUDPPTASK + * 1)uPAudPPCmd1Queue : Used for more frequent and shorter length commands + * Location : MEMA + * Buffer Size : 6 words + * No of buffers in a queue : 20 for gaming audio and 5 for other images + * 2)uPAudPPCmd2Queue : Used for commands which are not much lengthier + * Location : MEMA + * Buffer Size : 23 + * No of buffers in a queue : 2 + * 3)uPAudOOCmd3Queue : Used for lengthier and more frequent commands + * Location : MEMA + * Buffer Size : 145 + * No of buffers in a queue : 3 + */ + +/* + * Commands Related to uPAudPPCmd1Queue + */ + +/* + * Command Structure to enable or disable the active decoders + */ + +#define AUDPP_CMD_CFG_DEC_TYPE 0x0001 +#define AUDPP_CMD_CFG_DEC_TYPE_LEN sizeof(struct audpp_cmd_cfg_dec_type) + +/* Enable the decoder */ +#define AUDPP_CMD_DEC_TYPE_M 0x000F + +#define AUDPP_CMD_ENA_DEC_V 0x4000 +#define AUDPP_CMD_DIS_DEC_V 0x0000 +#define AUDPP_CMD_DEC_STATE_M 0x4000 + +#define AUDPP_CMD_UPDATDE_CFG_DEC 0x8000 +#define AUDPP_CMD_DONT_UPDATE_CFG_DEC 0x0000 + + +/* Type specification of cmd_cfg_dec */ + +struct audpp_cmd_cfg_dec_type { + unsigned short cmd_id; + unsigned short stream_id; + unsigned short dec_cfg; + unsigned short dm_mode; +} __attribute__((packed)); + +/* + * Command Structure to Pause , Resume and flushes the selected audio decoders + */ + +#define AUDPP_CMD_DEC_CTRL 0x0002 +#define AUDPP_CMD_DEC_CTRL_LEN sizeof(struct audpp_cmd_dec_ctrl) + +/* Decoder control commands for pause, resume and flush */ +#define AUDPP_CMD_FLUSH_V 0x2000 + +#define AUDPP_CMD_PAUSE_V 0x4000 +#define AUDPP_CMD_RESUME_V 0x0000 + +#define AUDPP_CMD_UPDATE_V 0x8000 +#define AUDPP_CMD_IGNORE_V 0x0000 + + +/* Type Spec for decoder control command*/ + +struct audpp_cmd_dec_ctrl{ + unsigned short cmd_id; + unsigned short stream_id; + unsigned short dec_ctrl; +} __attribute__((packed)); + +/* + * Command Structure to Configure the AVSync FeedBack Mechanism + */ + +#define AUDPP_CMD_AVSYNC 0x0003 +#define AUDPP_CMD_AVSYNC_LEN sizeof(struct audpp_cmd_avsync) + +struct audpp_cmd_avsync{ + unsigned short cmd_id; + unsigned short stream_id; + unsigned short interrupt_interval; + unsigned short sample_counter_dlsw; + unsigned short sample_counter_dmsw; + unsigned short sample_counter_msw; + unsigned short byte_counter_dlsw; + unsigned short byte_counter_dmsw; + unsigned short byte_counter_msw; +} __attribute__((packed)); + +/* + * Command Structure to enable or disable(sleep) the AUDPPTASK + */ + +#define AUDPP_CMD_CFG 0x0004 +#define AUDPP_CMD_CFG_LEN sizeof(struct audpp_cmd_cfg) + +#define AUDPP_CMD_CFG_SLEEP 0x0000 +#define AUDPP_CMD_CFG_ENABLE 0xFFFF + +struct audpp_cmd_cfg { + unsigned short cmd_id; + unsigned short cfg; +} __attribute__((packed)); + +/* + * Command Structure to Inject or drop the specified no of samples + */ + +#define AUDPP_CMD_ADJUST_SAMP 0x0005 +#define AUDPP_CMD_ADJUST_SAMP_LEN sizeof(struct audpp_cmd_adjust_samp) + +#define AUDPP_CMD_SAMP_DROP -1 +#define AUDPP_CMD_SAMP_INSERT 0x0001 + +#define AUDPP_CMD_NUM_SAMPLES 0x0001 + +struct audpp_cmd_adjust_samp { + unsigned short cmd_id; + unsigned short object_no; + signed short sample_insert_or_drop; + unsigned short num_samples; +} __attribute__((packed)); + +/* + * Command Structure to Configure AVSync Feedback Mechanism + */ + +#define AUDPP_CMD_ROUTING_MODE 0x0007 +#define AUDPP_CMD_ROUTING_MODE_LEN \ +sizeof(struct audpp_cmd_routing_mode) + +struct audpp_cmd_routing_mode { + unsigned short cmd_id; + unsigned short object_number; + unsigned short routing_mode; +} __attribute__((packed)); + +/* + * Commands Related to uPAudPPCmd2Queue + */ + +/* + * Command Structure to configure Per decoder Parameters (Common) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS 0x0000 +#define AUDPP_CMD_CFG_ADEC_PARAMS_COMMON_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_common) + +#define AUDPP_CMD_STATUS_MSG_FLAG_ENA_FCM 0x4000 +#define AUDPP_CMD_STATUS_MSG_FLAG_DIS_FCM 0x0000 + +#define AUDPP_CMD_STATUS_MSG_FLAG_ENA_DCM 0x8000 +#define AUDPP_CMD_STATUS_MSG_FLAG_DIS_DCM 0x0000 + +/* Sampling frequency*/ +#define AUDPP_CMD_SAMP_RATE_96000 0x0000 +#define AUDPP_CMD_SAMP_RATE_88200 0x0001 +#define AUDPP_CMD_SAMP_RATE_64000 0x0002 +#define AUDPP_CMD_SAMP_RATE_48000 0x0003 +#define AUDPP_CMD_SAMP_RATE_44100 0x0004 +#define AUDPP_CMD_SAMP_RATE_32000 0x0005 +#define AUDPP_CMD_SAMP_RATE_24000 0x0006 +#define AUDPP_CMD_SAMP_RATE_22050 0x0007 +#define AUDPP_CMD_SAMP_RATE_16000 0x0008 +#define AUDPP_CMD_SAMP_RATE_12000 0x0009 +#define AUDPP_CMD_SAMP_RATE_11025 0x000A +#define AUDPP_CMD_SAMP_RATE_8000 0x000B + + +/* + * Type specification of cmd_adec_cfg sent to all decoder + */ + +struct audpp_cmd_cfg_adec_params_common { + unsigned short cmd_id; + unsigned short dec_id; + unsigned short length; + unsigned short reserved; + unsigned short input_sampling_frequency; +} __attribute__((packed)); + +/* + * Command Structure to configure Per decoder Parameters (Wav) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_wav) + + +#define AUDPP_CMD_WAV_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_WAV_STEREO_CFG_STEREO 0x0002 + +#define AUDPP_CMD_WAV_PCM_WIDTH_8 0x0000 +#define AUDPP_CMD_WAV_PCM_WIDTH_16 0x0001 +#define AUDPP_CMD_WAV_PCM_WIDTH_24 0x0002 + +struct audpp_cmd_cfg_adec_params_wav { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; + unsigned short pcm_width; + unsigned short sign; +} __attribute__((packed)); + +/* + * Command Structure for CMD_CFG_DEV_MIXER + */ + +#define AUDPP_CMD_CFG_DEV_MIXER_PARAMS_LEN \ + sizeof(struct audpp_cmd_cfg_dev_mixer_params) + +#define AUDPP_CMD_CFG_DEV_MIXER 0x0008 + +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_NONE 0x0000 +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_0 0x0001 +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_1 0x0002 +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_2 0x0004 +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_3 0x0008 +#define AUDPP_CMD_CFG_DEV_MIXER_DEV_4 0x0010 + +struct audpp_cmd_cfg_dev_mixer_params { + unsigned short cmd_id; + unsigned short stream_id; + unsigned short mixer_cmd; +} __attribute__((packed)); + + +/* + * Command Structure to configure Per decoder Parameters (ADPCM) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_ADPCM_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_adpcm) + + +#define AUDPP_CMD_ADPCM_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_ADPCM_STEREO_CFG_STEREO 0x0002 + +struct audpp_cmd_cfg_adec_params_adpcm { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; + unsigned short block_size; +} __attribute__((packed)); + +/* + * Command Structure to configure Per decoder Parameters (WMA) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_WMA_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_wma) + +struct audpp_cmd_cfg_adec_params_wma { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short armdatareqthr; + unsigned short channelsdecoded; + unsigned short wmabytespersec; + unsigned short wmasamplingfreq; + unsigned short wmaencoderopts; +} __attribute__((packed)); + + +/* + * Command Structure to configure Per decoder Parameters (MP3) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_MP3_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_mp3) + +struct audpp_cmd_cfg_adec_params_mp3 { + struct audpp_cmd_cfg_adec_params_common common; +} __attribute__((packed)); + + +/* + * Command Structure to configure Per decoder Parameters (AAC) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_AAC_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_aac) + + +#define AUDPP_CMD_AAC_FORMAT_ADTS -1 +#define AUDPP_CMD_AAC_FORMAT_RAW 0x0000 +#define AUDPP_CMD_AAC_FORMAT_PSUEDO_RAW 0x0001 +#define AUDPP_CMD_AAC_FORMAT_LOAS 0x0002 + +#define AUDPP_CMD_AAC_AUDIO_OBJECT_LC 0x0002 +#define AUDPP_CMD_AAC_AUDIO_OBJECT_LTP 0x0004 +#define AUDPP_CMD_AAC_AUDIO_OBJECT_ERLC 0x0011 + +#define AUDPP_CMD_AAC_SBR_ON_FLAG_ON 0x0001 +#define AUDPP_CMD_AAC_SBR_ON_FLAG_OFF 0x0000 + +#define AUDPP_CMD_AAC_SBR_PS_ON_FLAG_ON 0x0001 +#define AUDPP_CMD_AAC_SBR_PS_ON_FLAG_OFF 0x0000 + +struct audpp_cmd_cfg_adec_params_aac { + struct audpp_cmd_cfg_adec_params_common common; + signed short format; + unsigned short audio_object; + unsigned short ep_config; + unsigned short aac_section_data_resilience_flag; + unsigned short aac_scalefactor_data_resilience_flag; + unsigned short aac_spectral_data_resilience_flag; + unsigned short sbr_on_flag; + unsigned short sbr_ps_on_flag; + unsigned short channel_configuration; +} __attribute__((packed)); + +/* + * Command Structure to configure Per decoder Parameters (V13K) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_V13K_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_v13k) + + +#define AUDPP_CMD_STEREO_CFG_MONO 0x0001 +#define AUDPP_CMD_STEREO_CFG_STEREO 0x0002 + +struct audpp_cmd_cfg_adec_params_v13k { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__((packed)); + +#define AUDPP_CMD_CFG_ADEC_PARAMS_EVRC_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_evrc) + +struct audpp_cmd_cfg_adec_params_evrc { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__ ((packed)); + +/* + * Command Structure to configure Per decoder Parameters (AMRWB) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_AMRWB_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_amrwb) + +struct audpp_cmd_cfg_adec_params_amrwb { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short stereo_cfg; +} __attribute__((packed)); + +/* + * Command Structure to configure Per decoder Parameters (WMAPRO) + */ + +#define AUDPP_CMD_CFG_ADEC_PARAMS_WMAPRO_LEN \ + sizeof(struct audpp_cmd_cfg_adec_params_wmapro) + +struct audpp_cmd_cfg_adec_params_wmapro { + struct audpp_cmd_cfg_adec_params_common common; + unsigned short armdatareqthr; + uint8_t validbitspersample; + uint8_t numchannels; + unsigned short formattag; + unsigned short samplingrate; + unsigned short avgbytespersecond; + unsigned short asfpacketlength; + unsigned short channelmask; + unsigned short encodeopt; + unsigned short advancedencodeopt; + uint32_t advancedencodeopt2; +} __attribute__((packed)); + +/* + * Command Structure to configure the HOST PCM interface + */ + +#define AUDPP_CMD_PCM_INTF 0x0001 +#define AUDPP_CMD_PCM_INTF_2 0x0002 +#define AUDPP_CMD_PCM_INTF_LEN sizeof(struct audpp_cmd_pcm_intf) + +#define AUDPP_CMD_PCM_INTF_MONO_V 0x0001 +#define AUDPP_CMD_PCM_INTF_STEREO_V 0x0002 + +/* These two values differentiate the two types of commands that could be issued + * Interface configuration command and Buffer update command */ + +#define AUDPP_CMD_PCM_INTF_CONFIG_CMD_V 0x0000 +#define AUDPP_CMD_PCM_INTF_BUFFER_CMD_V -1 + +#define AUDPP_CMD_PCM_INTF_RX_ENA_M 0x000F +#define AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V 0x0008 +#define AUDPP_CMD_PCM_INTF_RX_ENA_DSPTOARM_V 0x0004 + +/* These flags control the enabling and disabling of the interface together + * with host interface bit mask. */ + +#define AUDPP_CMD_PCM_INTF_ENA_V -1 +#define AUDPP_CMD_PCM_INTF_DIS_V 0x0000 + + +#define AUDPP_CMD_PCM_INTF_FULL_DUPLEX 0x0 +#define AUDPP_CMD_PCM_INTF_HALF_DUPLEX_TODSP 0x1 + + +#define AUDPP_CMD_PCM_INTF_OBJECT_NUM 0x5 +#define AUDPP_CMD_PCM_INTF_COMMON_OBJECT_NUM 0x6 + +struct audpp_cmd_pcm_intf { + unsigned short cmd_id; + unsigned short stream; + unsigned short stream_id; + signed short config; + unsigned short intf_type; + + /* DSP -> ARM Configuration */ + unsigned short read_buf1LSW; + unsigned short read_buf1MSW; + unsigned short read_buf1_len; + + unsigned short read_buf2LSW; + unsigned short read_buf2MSW; + unsigned short read_buf2_len; + /* 0:HOST_PCM_INTF disable + ** 0xFFFF: HOST_PCM_INTF enable + */ + signed short dsp_to_arm_flag; + unsigned short partition_number; + + /* ARM -> DSP Configuration */ + unsigned short write_buf1LSW; + unsigned short write_buf1MSW; + unsigned short write_buf1_len; + + unsigned short write_buf2LSW; + unsigned short write_buf2MSW; + unsigned short write_buf2_len; + + /* 0:HOST_PCM_INTF disable + ** 0xFFFF: HOST_PCM_INTF enable + */ + signed short arm_to_rx_flag; + unsigned short weight_decoder_to_rx; + unsigned short weight_arm_to_rx; + + unsigned short partition_number_arm_to_dsp; + unsigned short sample_rate; + unsigned short channel_mode; +} __attribute__((packed)); + +/* + ** BUFFER UPDATE COMMAND + */ +#define AUDPP_CMD_PCM_INTF_SEND_BUF_PARAMS_LEN \ + sizeof(struct audpp_cmd_pcm_intf_send_buffer) + +struct audpp_cmd_pcm_intf_send_buffer { + unsigned short cmd_id; + unsigned short stream; + unsigned short stream_id; + /* set config = 0xFFFF for configuration*/ + signed short config; + unsigned short intf_type; + unsigned short dsp_to_arm_buf_id; + unsigned short arm_to_dsp_buf_id; + unsigned short arm_to_dsp_buf_len; +} __attribute__((packed)); + + +/* + * Commands Related to uPAudPPCmd3Queue + */ + +/* + * Command Structure to configure post processing params (Commmon) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS 0x0000 +#define AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_common) + +#define AUDPP_CMD_OBJ0_UPDATE 0x8000 +#define AUDPP_CMD_OBJ0_DONT_UPDATE 0x0000 + + +#define AUDPP_CMD_OBJ2_UPDATE 0x8000 +#define AUDPP_CMD_OBJ2_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ3_UPDATE 0x8000 +#define AUDPP_CMD_OBJ3_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_OBJ4_UPDATE 0x8000 +#define AUDPP_CMD_OBJ4_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_HPCM_UPDATE 0x8000 +#define AUDPP_CMD_HPCM_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_COMMON_CFG_UPDATE 0x8000 +#define AUDPP_CMD_COMMON_CFG_DONT_UPDATE 0x0000 + +#define AUDPP_CMD_POPP_STREAM 0xFFFF +#define AUDPP_CMD_COPP_STREAM 0x0000 + +struct audpp_cmd_cfg_object_params_common{ + unsigned short cmd_id; + unsigned short stream; + unsigned short stream_id; + unsigned short obj_cfg; + unsigned short command_type; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing params (Volume) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_VOLUME_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_volume) + +struct audpp_cmd_cfg_object_params_volume { + struct audpp_cmd_cfg_object_params_common common; + unsigned short volume; + unsigned short pan; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing params (PCM Filter) + */ + +struct numerator { + unsigned short numerator_b0_filter_lsw; + unsigned short numerator_b0_filter_msw; + unsigned short numerator_b1_filter_lsw; + unsigned short numerator_b1_filter_msw; + unsigned short numerator_b2_filter_lsw; + unsigned short numerator_b2_filter_msw; +} __attribute__((packed)); + +struct denominator { + unsigned short denominator_a0_filter_lsw; + unsigned short denominator_a0_filter_msw; + unsigned short denominator_a1_filter_lsw; + unsigned short denominator_a1_filter_msw; +} __attribute__((packed)); + +struct shift_factor { + unsigned short shift_factor_0; +} __attribute__((packed)); + +struct pan { + unsigned short pan_filter_0; +} __attribute__((packed)); + +struct filter_1 { + struct numerator numerator_filter; + struct denominator denominator_filter; + struct shift_factor shift_factor_filter; + struct pan pan_filter; +} __attribute__((packed)); + +struct filter_2 { + struct numerator numerator_filter[2]; + struct denominator denominator_filter[2]; + struct shift_factor shift_factor_filter[2]; + struct pan pan_filter[2]; +} __attribute__((packed)); + +struct filter_3 { + struct numerator numerator_filter[3]; + struct denominator denominator_filter[3]; + struct shift_factor shift_factor_filter[3]; + struct pan pan_filter[3]; +} __attribute__((packed)); + +struct filter_4 { + struct numerator numerator_filter[4]; + struct denominator denominator_filter[4]; + struct shift_factor shift_factor_filter[4]; + struct pan pan_filter[4]; +} __attribute__((packed)); + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_PCM_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_pcm) + + +struct audpp_cmd_cfg_object_params_pcm { + struct audpp_cmd_cfg_object_params_common common; + signed short active_flag; + unsigned short num_bands; + union { + struct filter_1 filter_1_params; + struct filter_2 filter_2_params; + struct filter_3 filter_3_params; + struct filter_4 filter_4_params; + } __attribute__((packed)) params_filter; +} __attribute__((packed)); + + +/* + * Command Structure to configure post processing parameters (equalizer) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_EQALIZER_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_eqalizer) + +struct eq_numerator { + unsigned short numerator_coeff_0_lsw; + unsigned short numerator_coeff_0_msw; + unsigned short numerator_coeff_1_lsw; + unsigned short numerator_coeff_1_msw; + unsigned short numerator_coeff_2_lsw; + unsigned short numerator_coeff_2_msw; +} __attribute__((packed)); + +struct eq_denominator { + unsigned short denominator_coeff_0_lsw; + unsigned short denominator_coeff_0_msw; + unsigned short denominator_coeff_1_lsw; + unsigned short denominator_coeff_1_msw; +} __attribute__((packed)); + +struct eq_shiftfactor { + unsigned short shift_factor; +} __attribute__((packed)); + +struct eq_coeff_1 { + struct eq_numerator numerator; + struct eq_denominator denominator; + struct eq_shiftfactor shiftfactor; +} __attribute__((packed)); + +struct eq_coeff_2 { + struct eq_numerator numerator[2]; + struct eq_denominator denominator[2]; + struct eq_shiftfactor shiftfactor[2]; +} __attribute__((packed)); + +struct eq_coeff_3 { + struct eq_numerator numerator[3]; + struct eq_denominator denominator[3]; + struct eq_shiftfactor shiftfactor[3]; +} __attribute__((packed)); + +struct eq_coeff_4 { + struct eq_numerator numerator[4]; + struct eq_denominator denominator[4]; + struct eq_shiftfactor shiftfactor[4]; +} __attribute__((packed)); + +struct eq_coeff_5 { + struct eq_numerator numerator[5]; + struct eq_denominator denominator[5]; + struct eq_shiftfactor shiftfactor[5]; +} __attribute__((packed)); + +struct eq_coeff_6 { + struct eq_numerator numerator[6]; + struct eq_denominator denominator[6]; + struct eq_shiftfactor shiftfactor[6]; +} __attribute__((packed)); + +struct eq_coeff_7 { + struct eq_numerator numerator[7]; + struct eq_denominator denominator[7]; + struct eq_shiftfactor shiftfactor[7]; +} __attribute__((packed)); + +struct eq_coeff_8 { + struct eq_numerator numerator[8]; + struct eq_denominator denominator[8]; + struct eq_shiftfactor shiftfactor[8]; +} __attribute__((packed)); + +struct eq_coeff_9 { + struct eq_numerator numerator[9]; + struct eq_denominator denominator[9]; + struct eq_shiftfactor shiftfactor[9]; +} __attribute__((packed)); + +struct eq_coeff_10 { + struct eq_numerator numerator[10]; + struct eq_denominator denominator[10]; + struct eq_shiftfactor shiftfactor[10]; +} __attribute__((packed)); + +struct eq_coeff_11 { + struct eq_numerator numerator[11]; + struct eq_denominator denominator[11]; + struct eq_shiftfactor shiftfactor[11]; +} __attribute__((packed)); + +struct eq_coeff_12 { + struct eq_numerator numerator[12]; + struct eq_denominator denominator[12]; + struct eq_shiftfactor shiftfactor[12]; +} __attribute__((packed)); + + +struct audpp_cmd_cfg_object_params_eqalizer { + struct audpp_cmd_cfg_object_params_common common; + signed short eq_flag; + unsigned short num_bands; + union { + struct eq_coeff_1 eq_coeffs_1; + struct eq_coeff_2 eq_coeffs_2; + struct eq_coeff_3 eq_coeffs_3; + struct eq_coeff_4 eq_coeffs_4; + struct eq_coeff_5 eq_coeffs_5; + struct eq_coeff_6 eq_coeffs_6; + struct eq_coeff_7 eq_coeffs_7; + struct eq_coeff_8 eq_coeffs_8; + struct eq_coeff_9 eq_coeffs_9; + struct eq_coeff_10 eq_coeffs_10; + struct eq_coeff_11 eq_coeffs_11; + struct eq_coeff_12 eq_coeffs_12; + } __attribute__((packed)) eq_coeff; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing parameters (ADRC) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_ADRC_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_adrc) + + +#define AUDPP_CMD_ADRC_FLAG_DIS 0x0000 +#define AUDPP_CMD_ADRC_FLAG_ENA -1 + +struct audpp_cmd_cfg_object_params_adrc { + struct audpp_cmd_cfg_object_params_common common; + signed short adrc_flag; + unsigned short compression_th; + unsigned short compression_slope; + unsigned short rms_time; + unsigned short attack_const_lsw; + unsigned short attack_const_msw; + unsigned short release_const_lsw; + unsigned short release_const_msw; + unsigned short adrc_delay; +}; + +/* + * Command Structure to configure post processing parameters (MB - ADRC) + */ + +#define AUDPP_MAX_MBADRC_BANDS 5 + +struct adrc_config { + uint16_t subband_enable; + uint16_t adrc_sub_mute; + uint16_t rms_time; + uint16_t compression_th; + uint16_t compression_slope; + uint16_t attack_const_lsw; + uint16_t attack_const_msw; + uint16_t release_const_lsw; + uint16_t release_const_msw; + uint16_t makeup_gain; +}; + +struct audpp_cmd_cfg_object_params_mbadrc { + struct audpp_cmd_cfg_object_params_common common; + uint16_t enable; + uint16_t num_bands; + uint16_t down_samp_level; + uint16_t adrc_delay; + uint16_t ext_buf_size; + uint16_t ext_partition; + uint16_t ext_buf_msw; + uint16_t ext_buf_lsw; + struct adrc_config adrc_band[AUDPP_MAX_MBADRC_BANDS]; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing parameters(Spectrum Analizer) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_SPECTRAM_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_spectram) + + +struct audpp_cmd_cfg_object_params_spectram { + struct audpp_cmd_cfg_object_params_common common; + unsigned short sample_interval; + unsigned short num_coeff; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing parameters (QConcert) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_QCONCERT_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_qconcert) + + +#define AUDPP_CMD_QCON_ENA_FLAG_ENA -1 +#define AUDPP_CMD_QCON_ENA_FLAG_DIS 0x0000 + +#define AUDPP_CMD_QCON_OP_MODE_HEADPHONE -1 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_FRONT 0x0000 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_SIDE 0x0001 +#define AUDPP_CMD_QCON_OP_MODE_SPEAKER_DESKTOP 0x0002 + +#define AUDPP_CMD_QCON_GAIN_UNIT 0x7FFF +#define AUDPP_CMD_QCON_GAIN_SIX_DB 0x4027 + + +#define AUDPP_CMD_QCON_EXPANSION_MAX 0x7FFF + + +struct audpp_cmd_cfg_object_params_qconcert { + struct audpp_cmd_cfg_object_params_common common; + signed short enable_flag; + signed short op_mode; + signed short gain; + signed short expansion; + signed short delay; + unsigned short stages_per_mode; + unsigned short reverb_enable; + unsigned short decay_msw; + unsigned short decay_lsw; + unsigned short decay_time_ratio_msw; + unsigned short decay_time_ratio_lsw; + unsigned short reflection_delay_time; + unsigned short late_reverb_gain; + unsigned short late_reverb_delay; + unsigned short delay_buff_size_msw; + unsigned short delay_buff_size_lsw; + unsigned short partition_num; + unsigned short delay_buff_start_msw; + unsigned short delay_buff_start_lsw; +} __attribute__((packed)); + +/* + * Command Structure to configure post processing parameters (Side Chain) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_SIDECHAIN_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_sidechain) + + +#define AUDPP_CMD_SIDECHAIN_ACTIVE_FLAG_DIS 0x0000 +#define AUDPP_CMD_SIDECHAIN_ACTIVE_FLAG_ENA -1 + +struct audpp_cmd_cfg_object_params_sidechain { + struct audpp_cmd_cfg_object_params_common common; + signed short active_flag; + unsigned short num_bands; + union { + struct filter_1 filter_1_params; + struct filter_2 filter_2_params; + struct filter_3 filter_3_params; + struct filter_4 filter_4_params; + } __attribute__((packed)) params_filter; +} __attribute__((packed)); + + +/* + * Command Structure to configure post processing parameters (QAFX) + */ + +#define AUDPP_CMD_CFG_OBJECT_PARAMS_QAFX_LEN \ + sizeof(struct audpp_cmd_cfg_object_params_qafx) + +#define AUDPP_CMD_QAFX_ENA_DISA 0x0000 +#define AUDPP_CMD_QAFX_ENA_ENA_CFG -1 +#define AUDPP_CMD_QAFX_ENA_DIS_CFG 0x0001 + +#define AUDPP_CMD_QAFX_CMD_TYPE_ENV 0x0100 +#define AUDPP_CMD_QAFX_CMD_TYPE_OBJ 0x0010 +#define AUDPP_CMD_QAFX_CMD_TYPE_QUERY 0x1000 + +#define AUDPP_CMD_QAFX_CMDS_ENV_OP_MODE 0x0100 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_POS 0x0101 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_ORI 0x0102 +#define AUDPP_CMD_QAFX_CMDS_ENV_LIS_VEL 0X0103 +#define AUDPP_CMD_QAFX_CMDS_ENV_ENV_RES 0x0107 + +#define AUDPP_CMD_QAFX_CMDS_OBJ_SAMP_FREQ 0x0010 +#define AUDPP_CMD_QAFX_CMDS_OBJ_VOL 0x0011 +#define AUDPP_CMD_QAFX_CMDS_OBJ_DIST 0x0012 +#define AUDPP_CMD_QAFX_CMDS_OBJ_POS 0x0013 +#define AUDPP_CMD_QAFX_CMDS_OBJ_VEL 0x0014 + + +struct audpp_cmd_cfg_object_params_qafx { + struct audpp_cmd_cfg_object_params_common common; + signed short enable; + unsigned short command_type; + unsigned short num_commands; + unsigned short commands; +} __attribute__((packed)); + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (REVERB) (Common) + */ + +#define AUDPP_CMD_REVERB_CONFIG 0x0001 +#define AUDPP_CMD_REVERB_CONFIG_COMMON_LEN \ + sizeof(struct audpp_cmd_reverb_config_common) + +#define AUDPP_CMD_ENA_ENA 0xFFFF +#define AUDPP_CMD_ENA_DIS 0x0000 +#define AUDPP_CMD_ENA_CFG 0x0001 + +#define AUDPP_CMD_CMD_TYPE_ENV 0x0104 +#define AUDPP_CMD_CMD_TYPE_OBJ 0x0015 +#define AUDPP_CMD_CMD_TYPE_QUERY 0x1000 + + +struct audpp_cmd_reverb_config_common { + unsigned short cmd_id; + unsigned short enable; + unsigned short cmd_type; +} __attribute__((packed)); + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (ENV-0x0104) + */ + +#define AUDPP_CMD_REVERB_CONFIG_ENV_104_LEN \ + sizeof(struct audpp_cmd_reverb_config_env_104) + +struct audpp_cmd_reverb_config_env_104 { + struct audpp_cmd_reverb_config_common common; + unsigned short env_gain; + unsigned short decay_msw; + unsigned short decay_lsw; + unsigned short decay_timeratio_msw; + unsigned short decay_timeratio_lsw; + unsigned short delay_time; + unsigned short reverb_gain; + unsigned short reverb_delay; +} __attribute__((packed)); + +/* + * Command Structure to enable , disable or configure the reverberation effect + * (ENV-0x0015) + */ + +#define AUDPP_CMD_REVERB_CONFIG_ENV_15_LEN \ + sizeof(struct audpp_cmd_reverb_config_env_15) + +struct audpp_cmd_reverb_config_env_15 { + struct audpp_cmd_reverb_config_common common; + unsigned short object_num; + unsigned short absolute_gain; +} __attribute__((packed)); + + +/* messages from dsp to apps */ + + +/* + * AUDPPTASK uses audPPuPRlist to send messages to the ARM + * Location : MEMA + * Buffer Size : 45 + * No of Buffers in a queue : 5 for gaming audio and 1 for other images + */ + +/* + * MSG to Informs the ARM os Success/Failure of bringing up the decoder + */ + +#define AUDPP_MSG_STATUS_MSG 0x0001 +#define AUDPP_MSG_STATUS_MSG_LEN \ + sizeof(struct audpp_msg_status_msg) + +#define AUDPP_MSG_STATUS_SLEEP 0x0000 +#define AUDPP_MSG_STATUS_INIT 0x0001 +#define AUDPP_MSG_STATUS_CFG 0x0002 +#define AUDPP_MSG_STATUS_PLAY 0x0003 + +#define AUDPP_MSG_REASON_NONE 0x0000 +#define AUDPP_MSG_REASON_MEM 0x0001 +#define AUDPP_MSG_REASON_NODECODER 0x0002 + +struct audpp_msg_status_msg { + unsigned short dec_id; + unsigned short status; + unsigned short reason; +} __attribute__((packed)); + +/* + * MSG to communicate the spectrum analyzer output bands to the ARM + */ +#define AUDPP_MSG_SPA_BANDS 0x0002 +#define AUDPP_MSG_SPA_BANDS_LEN \ + sizeof(struct audpp_msg_spa_bands) + +struct audpp_msg_spa_bands { + unsigned short current_object; + unsigned short spa_band_1; + unsigned short spa_band_2; + unsigned short spa_band_3; + unsigned short spa_band_4; + unsigned short spa_band_5; + unsigned short spa_band_6; + unsigned short spa_band_7; + unsigned short spa_band_8; + unsigned short spa_band_9; + unsigned short spa_band_10; + unsigned short spa_band_11; + unsigned short spa_band_12; + unsigned short spa_band_13; + unsigned short spa_band_14; + unsigned short spa_band_15; + unsigned short spa_band_16; + unsigned short spa_band_17; + unsigned short spa_band_18; + unsigned short spa_band_19; + unsigned short spa_band_20; + unsigned short spa_band_21; + unsigned short spa_band_22; + unsigned short spa_band_23; + unsigned short spa_band_24; + unsigned short spa_band_25; + unsigned short spa_band_26; + unsigned short spa_band_27; + unsigned short spa_band_28; + unsigned short spa_band_29; + unsigned short spa_band_30; + unsigned short spa_band_31; + unsigned short spa_band_32; +} __attribute__((packed)); + +/* + * MSG to communicate the PCM I/O buffer status to ARM + */ +#define AUDPP_MSG_HOST_PCM_INTF_MSG 0x0003 +#define AUDPP_MSG_HOST_PCM_INTF_MSG_LEN \ + sizeof(struct audpp_msg_host_pcm_intf_msg) + +#define AUDPP_MSG_HOSTPCM_ID_TX_ARM 0x0000 +#define AUDPP_MSG_HOSTPCM_ID_ARM_TX 0x0001 +#define AUDPP_MSG_HOSTPCM_ID_RX_ARM 0x0002 +#define AUDPP_MSG_HOSTPCM_ID_ARM_RX 0x0003 + +#define AUDPP_MSG_SAMP_FREQ_INDX_96000 0x0000 +#define AUDPP_MSG_SAMP_FREQ_INDX_88200 0x0001 +#define AUDPP_MSG_SAMP_FREQ_INDX_64000 0x0002 +#define AUDPP_MSG_SAMP_FREQ_INDX_48000 0x0003 +#define AUDPP_MSG_SAMP_FREQ_INDX_44100 0x0004 +#define AUDPP_MSG_SAMP_FREQ_INDX_32000 0x0005 +#define AUDPP_MSG_SAMP_FREQ_INDX_24000 0x0006 +#define AUDPP_MSG_SAMP_FREQ_INDX_22050 0x0007 +#define AUDPP_MSG_SAMP_FREQ_INDX_16000 0x0008 +#define AUDPP_MSG_SAMP_FREQ_INDX_12000 0x0009 +#define AUDPP_MSG_SAMP_FREQ_INDX_11025 0x000A +#define AUDPP_MSG_SAMP_FREQ_INDX_8000 0x000B + +#define AUDPP_MSG_CHANNEL_MODE_MONO 0x0001 +#define AUDPP_MSG_CHANNEL_MODE_STEREO 0x0002 + +struct audpp_msg_host_pcm_intf_msg { + unsigned short obj_num; + unsigned short numbers_of_samples; + unsigned short host_pcm_id; + unsigned short buf_indx; + unsigned short samp_freq_indx; + unsigned short channel_mode; +} __attribute__((packed)); + + +/* + * MSG to communicate 3D position of the source and listener , source volume + * source rolloff, source orientation + */ + +#define AUDPP_MSG_QAFX_POS 0x0004 +#define AUDPP_MSG_QAFX_POS_LEN \ + sizeof(struct audpp_msg_qafx_pos) + +struct audpp_msg_qafx_pos { + unsigned short current_object; + unsigned short x_pos_lis_msw; + unsigned short x_pos_lis_lsw; + unsigned short y_pos_lis_msw; + unsigned short y_pos_lis_lsw; + unsigned short z_pos_lis_msw; + unsigned short z_pos_lis_lsw; + unsigned short x_fwd_msw; + unsigned short x_fwd_lsw; + unsigned short y_fwd_msw; + unsigned short y_fwd_lsw; + unsigned short z_fwd_msw; + unsigned short z_fwd_lsw; + unsigned short x_up_msw; + unsigned short x_up_lsw; + unsigned short y_up_msw; + unsigned short y_up_lsw; + unsigned short z_up_msw; + unsigned short z_up_lsw; + unsigned short x_vel_lis_msw; + unsigned short x_vel_lis_lsw; + unsigned short y_vel_lis_msw; + unsigned short y_vel_lis_lsw; + unsigned short z_vel_lis_msw; + unsigned short z_vel_lis_lsw; + unsigned short threed_enable_flag; + unsigned short volume; + unsigned short x_pos_source_msw; + unsigned short x_pos_source_lsw; + unsigned short y_pos_source_msw; + unsigned short y_pos_source_lsw; + unsigned short z_pos_source_msw; + unsigned short z_pos_source_lsw; + unsigned short max_dist_0_msw; + unsigned short max_dist_0_lsw; + unsigned short min_dist_0_msw; + unsigned short min_dist_0_lsw; + unsigned short roll_off_factor; + unsigned short mute_after_max_flag; + unsigned short x_vel_source_msw; + unsigned short x_vel_source_lsw; + unsigned short y_vel_source_msw; + unsigned short y_vel_source_lsw; + unsigned short z_vel_source_msw; + unsigned short z_vel_source_lsw; +} __attribute__((packed)); + +/* + * MSG to provide AVSYNC feedback from DSP to ARM + */ + +#define AUDPP_MSG_AVSYNC_MSG 0x0005 +#define AUDPP_MSG_AVSYNC_MSG_LEN \ + sizeof(struct audpp_msg_avsync_msg) + +struct audpp_msg_avsync_msg { + unsigned short active_flag; + unsigned short num_samples_counter0_HSW; + unsigned short num_samples_counter0_MSW; + unsigned short num_samples_counter0_LSW; + unsigned short num_bytes_counter0_HSW; + unsigned short num_bytes_counter0_MSW; + unsigned short num_bytes_counter0_LSW; + unsigned short samp_freq_obj_0; + unsigned short samp_freq_obj_1; + unsigned short samp_freq_obj_2; + unsigned short samp_freq_obj_3; + unsigned short samp_freq_obj_4; + unsigned short samp_freq_obj_5; + unsigned short samp_freq_obj_6; + unsigned short samp_freq_obj_7; + unsigned short samp_freq_obj_8; + unsigned short samp_freq_obj_9; + unsigned short samp_freq_obj_10; + unsigned short samp_freq_obj_11; + unsigned short samp_freq_obj_12; + unsigned short samp_freq_obj_13; + unsigned short samp_freq_obj_14; + unsigned short samp_freq_obj_15; + unsigned short num_samples_counter4_HSW; + unsigned short num_samples_counter4_MSW; + unsigned short num_samples_counter4_LSW; + unsigned short num_bytes_counter4_HSW; + unsigned short num_bytes_counter4_MSW; + unsigned short num_bytes_counter4_LSW; +} __attribute__((packed)); + +/* + * MSG to provide PCM DMA Missed feedback from the DSP to ARM + */ + +#define AUDPP_MSG_PCMDMAMISSED 0x0006 +#define AUDPP_MSG_PCMDMAMISSED_LEN \ + sizeof(struct audpp_msg_pcmdmamissed); + +struct audpp_msg_pcmdmamissed { + /* + ** Bit 0 0 = PCM DMA not missed for object 0 + ** 1 = PCM DMA missed for object0 + ** Bit 1 0 = PCM DMA not missed for object 1 + ** 1 = PCM DMA missed for object1 + ** Bit 2 0 = PCM DMA not missed for object 2 + ** 1 = PCM DMA missed for object2 + ** Bit 3 0 = PCM DMA not missed for object 3 + ** 1 = PCM DMA missed for object3 + ** Bit 4 0 = PCM DMA not missed for object 4 + ** 1 = PCM DMA missed for object4 + */ + unsigned short pcmdmamissed; +} __attribute__((packed)); + +/* + * MSG to AUDPP enable or disable feedback form DSP to ARM + */ + +#define AUDPP_MSG_CFG_MSG 0x0007 +#define AUDPP_MSG_CFG_MSG_LEN \ + sizeof(struct audpp_msg_cfg_msg) + +#define AUDPP_MSG_ENA_ENA 0xFFFF +#define AUDPP_MSG_ENA_DIS 0x0000 + +struct audpp_msg_cfg_msg { + /* Enabled - 0xffff + ** Disabled - 0 + */ + unsigned short enabled; +} __attribute__((packed)); + +/* + * MSG to communicate the reverb per object volume + */ + +#define AUDPP_MSG_QREVERB_VOLUME 0x0008 +#define AUDPP_MSG_QREVERB_VOLUME_LEN \ + sizeof(struct audpp_msg_qreverb_volume) + + +struct audpp_msg_qreverb_volume { + unsigned short obj_0_gain; + unsigned short obj_1_gain; + unsigned short obj_2_gain; + unsigned short obj_3_gain; + unsigned short obj_4_gain; + unsigned short hpcm_obj_volume; +} __attribute__((packed)); + +#define AUDPP_MSG_ROUTING_ACK 0x0009 +#define AUDPP_MSG_ROUTING_ACK_LEN \ + sizeof(struct audpp_msg_routing_ack) + +struct audpp_msg_routing_ack { + unsigned short dec_id; + unsigned short routing_mode; +} __attribute__((packed)); + +#define AUDPP_MSG_FLUSH_ACK 0x000A + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/adsp_private.h b/arch/arm/mach-msm/qdsp5v2/adsp_private.h new file mode 100644 index 0000000000000..badfb0daa5fc9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/adsp_private.h @@ -0,0 +1,163 @@ +/* arch/arm/mach-msm/qdsp5v2/adsp_private.h + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_ADSP_5V2_PRIVATE_H_ +#define _MSM_ADSP_5V2_PRIVATE_H_ + +/* adsp rtos / hardware memory map */ + +#define QDSP_RAMC_OFFSET 0x00400000 +#define ADSP_READ_CTRL_OFFSET 0x00400038 +#define ADSP_WRITE_CTRL_OFFSET 0x00400034 +#define ADSP_SEND_IRQ_OFFSET 0x00c00200 + + +/* adsp rtos hardware / shared memory interface */ + +#define ADSP_WRITE_CTRL_MUTEX_M 0x80000000U +#define ADSP_WRITE_CTRL_MUTEX_NAVAIL_V 0x80000000U +#define ADSP_WRITE_CTRL_MUTEX_AVAIL_V 0x00000000U + +#define ADSP_WRITE_CTRL_CMD_M 0x70000000U +#define ADSP_WRITE_CTRL_CMD_WRITE_REQ_V 0x00000000U +#define ADSP_WRITE_CTRL_CMD_WRITE_DONE_V 0x10000000U +#define ADSP_WRITE_CTRL_CMD_NO_CMD_V 0x70000000U + +#define ADSP_WRITE_CTRL_STATUS_M 0x0E000000U +#define ADSP_WRITE_CTRL_NO_ERR_V 0x00000000U +#define ADSP_WRITE_CTRL_NO_FREE_BUF_V 0x02000000U + +#define ADSP_WRITE_CTRL_DSP_ADDR_M 0x00FFFFFFU + +#define ADSP_WRITE_CTRL_HTOD_CMD_ID_M 0x00FFFFFFU + +/* Combination of MUTEX and CMD bits to check if the DSP is busy */ +#define ADSP_WRITE_CTRL_READY_M 0xF0000000U +#define ADSP_WRITE_CTRL_READY_V 0x70000000U + +/* RTOS to Host processor command mask values */ +#define ADSP_READ_CTRL_FLAG_M 0x80000000U +#define ADSP_READ_CTRL_FLAG_UP_WAIT_V 0x00000000U +#define ADSP_READ_CTRL_FLAG_UP_CONT_V 0x80000000U + +#define ADSP_READ_CTRL_CMD_M 0x60000000U +#define ADSP_READ_CTRL_READ_DONE_V 0x00000000U +#define ADSP_READ_CTRL_READ_REQ_V 0x20000000U +#define ADSP_READ_CTRL_NO_CMD_V 0x60000000U + +/* Combination of FLAG and COMMAND bits to check if MSG ready */ +#define ADSP_READ_CTRL_READY_M 0xE0000000U +#define ADSP_READ_CTRL_READY_V 0xA0000000U +#define ADSP_READ_CTRL_CONT_V 0xC0000000U +#define ADSP_READ_CTRL_DONE_V 0xE0000000U + +#define ADSP_READ_CTRL_STATUS_M 0x18000000U +#define ADSP_READ_CTRL_NO_ERR_V 0x00000000U + +#define ADSP_READ_CTRL_IN_PROG_M 0x04000000U +#define ADSP_READ_CTRL_NO_READ_IN_PROG_V 0x00000000U +#define ADSP_READ_CTRL_READ_IN_PROG_V 0x04000000U + +#define ADSP_READ_CTRL_CMD_TYPE_M 0x03000000U +#define ADSP_READ_CTRL_CMD_TASK_TO_H_V 0x00000000U + +#define ADSP_READ_CTRL_DSP_ADDR_M 0x00FFFFFFU + +#define ADSP_READ_CTRL_MSG_ID_M 0x000000FFU +#define ADSP_READ_CTRL_TASK_ID_M 0x0000FF00U + + +/* modem adsp management DAL service interface */ + +#define ADSP_DAL_DEVICE 0x0200009A +#define ADSP_DAL_PORT "SMD_DAL00" +#define ADSP_DAL_COMMAND (DAL_OP_FIRST_DEVICE_API | 0x80000000) + +struct adsp_dal_cmd { + uint32_t cmd; + uint32_t proc_id; + uint32_t module; + void *cookie; +}; + +#define ADSP_PROC_NONE 0 +#define ADSP_PROC_MODEM 1 +#define ADSP_PROC_APPS 2 + +#define ADSP_CMD_ENABLE 1 +#define ADSP_CMD_DISABLE 2 +#define ADSP_CMD_DISABLE_EVENT_RSP 6 +#define ADSP_CMD_GET_INIT_INFO 11 + +#define ADSP_EVT_MOD_READY 0 +#define ADSP_EVT_MOD_DISABLE 1 +#define ADSP_EVT_INIT_INFO 6 +#define ADSP_EVT_DISABLE_FAIL 7 + +#define ADSP_TASKS_MAX 64 +#define ADSP_QUEUES_MAX 4 + +#define MODULE_NAME_MAX 32 +#define QUEUE_NAME_MAX 32 + +#define ADSP_QUEUE_FLAG_16BIT 0 +#define ADSP_QUEUE_FLAG_32BIT 1 + +struct adsp_queue_info { + uint8_t name[QUEUE_NAME_MAX]; + uint32_t offset; /* Queue Offset in DSP memory */ + uint16_t idx; /* Global queue identifier */ + uint16_t max_size; /* Max allowed size in bytes for a queue */ + uint16_t flag; /* queue is 32bit Vs 16 bits */ + uint16_t rvd1; + uint32_t rvd2; +}; + +struct adsp_module_info +{ + uint8_t name[MODULE_NAME_MAX]; + uint32_t uuid; + uint16_t task_id; + uint16_t q_cnt; + struct adsp_queue_info queue[ADSP_QUEUES_MAX]; + uint32_t rvd1; + uint32_t rvd2; +}; + +struct adsp_evt_info { + uint32_t module; + uint32_t image; + uint32_t apps_okts; /* wtf is an okts? */ +}; + +struct adsp_dal_event { + /* DAL common event header */ + uint32_t evt_handle; + uint32_t evt_cookie; + uint32_t evt_length; + + /* ADSP event header */ + uint32_t event; + uint32_t version; + uint32_t proc_id; + + /* payload */ + union { + struct adsp_module_info module; + struct adsp_evt_info info; + } u; +}; + +#endif diff --git a/arch/arm/mach-msm/qdsp5v2/audio_glue.c b/arch/arm/mach-msm/qdsp5v2/audio_glue.c new file mode 100644 index 0000000000000..5364b788bd002 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/audio_glue.c @@ -0,0 +1,619 @@ +/* arch/arm/mach-msm/qdsp5v2/audio_glue.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "adsp_audio.h" +#include "adsp_module_afe.h" + +#include "../pmic.h" + +/* the audio codec control consists of + * - mi2s transports (x3) + * - lpa (low power audio) frontend for mi2s tx + * - various related clocks + */ +struct msm_codec { + void *tx_base; + void *rx_base; + void *lpa_base; + + struct clk *rx_mclk; + struct clk *rx_sclk; + struct clk *tx_mclk; + struct clk *tx_sclk; + + struct clk *lpa_codec_clk; + struct clk *lpa_core_clk; + struct clk *lpa_pclk; + + struct clk *adsp_clk; +}; + +#define LPA_MAX_BUF_SIZE 0x30000 + +#define LPA_CONTROL 0x00000000 +#define LPA_CODEC 0x00000004 +#define LPA_HLB_MIN_ADDR 0x00000008 +#define LPA_HLB_MAX_ADDR 0x0000000C +#define LPA_HLB_WPTR 0x00000010 +#define LPA_HLB_VOLUME_CONTROL 0x00000014 +#define LPA_LLB_MIN_ADDR 0x00000018 +#define LPA_LLB_MAX_ADDR 0x0000001C +#define LPA_SB_MIN_ADDR 0x00000020 +#define LPA_SB_MAX_ADDR 0x00000024 +#define LPA_INTR_ENABLE 0x00000028 +#define LPA_INTR_STATUS 0x0000002C +#define LPA_WMARK_ASSIGN 0x00000030 +#define LPA_WMARK_0_LLB 0x00000034 +#define LPA_WMARK_1_LLB 0x00000038 +#define LPA_WMARK_2_LLB 0x0000003C +#define LPA_WMARK_3_LLB 0x00000040 +#define LPA_WMARK_HLB 0x00000044 +#define LPA_WMARK_SB 0x00000048 +#define LPA_RDPTR_LLB 0x0000004C +#define LPA_RDPTR_HLB 0x00000050 +#define LPA_WRPTR_SB 0x00000054 +#define LPA_UTC_CONFIG 0x00000058 +#define LPA_UTC_INTR_LOW 0x0000005C +#define LPA_UTC_INTR_HIGH 0x00000060 +#define LPA_UTC_LOW 0x00000064 +#define LPA_UTC_HIGH 0x00000068 +#define LPA_MISR 0x0000006C +#define LPA_STATUS 0x00000070 +#define LPA_ACK 0x00000074 +#define LPA_MEMORY_CONTROL 0x00000078 +#define LPA_MEMORY_STATUS 0x0000007C +#define LPA_MEMORY_TIME_CONTROL 0x00000080 +#define LPA_ACC_LV 0x00000084 +#define LPA_ACC_HV 0x0000008c +#define LPA_RESETS 0x00000090 +#define LPA_TESTBUS 0x00000094 + +#define LPA_AICTL 0x00000100 + +/* OBUF_CODEC */ +#define LPA_CODEC_LOAD 0x200000 +#define LPA_CODEC_INTF_EN 0x100000 +#define LPA_CODEC_CFG_MASK 0x0FC07F + +#define LPA_SAMPLE_RATE_8KHZ 0x000000 +#define LPA_SAMPLE_RATE_11P025KHZ 0x010000 +#define LPA_SAMPLE_RATE_16KHZ 0x020000 +#define LPA_SAMPLE_RATE_22P05KHZ 0x030000 +#define LPA_SAMPLE_RATE_32KHZ 0x040000 +#define LPA_SAMPLE_RATE_44P1KHZ 0x050000 +#define LPA_SAMPLE_RATE_48KHZ 0x060000 +#define LPA_SAMPLE_RATE_64KHZ 0x070000 +#define LPA_SAMPLE_RATE_96KHZ 0x080000 + +#define LPA_BITS_PER_CHAN_16BITS 0x000000 +#define LPA_BITS_PER_CHAN_24BITS 0x004000 +#define LPA_BITS_PER_CHAN_32BITS 0x008000 +#define LPA_BITS_PER_CHAN_RESERVED 0x00C000 + +#define LPA_INTF_SDAC 0x000010 +#define LPA_INTF_MI2S 0x000020 +#define LPA_INTF_WB_CODEC 0x000030 + +/* WB_CODEC & SDAC can only support 16bit mono/stereo. + * MI2S can bit format and number of channel + */ +#define LPA_NUM_CHAN_MONO 0x000000 +#define LPA_NUM_CHAN_STEREO 0x000001 +#define LPA_NUM_CHAN_5P1 0x000002 +#define LPA_NUM_CHAN_7P1 0x000003 +#define LPA_NUM_CHAN_4_CHANNEL 0x000004 + +/* OBUF_CONTROL */ +#define LPA_CONTROL_TEST_EN 0x100 +#define LPA_CONTROL_LLB_CLR_CMD 0x080 +#define LPA_CONTROL_SB_SAT_EN 0x040 +#define LPA_CONTROL_LLB_SAT_EN 0x020 +#define LPA_CONTROL_LLB_ACC_EN 0x008 +#define LPA_CONTROL_HLB_EN 0x004 +#define LPA_CONTROL_LLB_EN 0x002 +#define LPA_CONTROL_SB_EN 0x001 + +/* OBUF_RESET definition */ +#define LPA_RESETS_MISR 0x1 +#define LPA_RESETS_OVERALL 0x2 + +/* OBUF_STATUS definition */ +#define LPA_STATUS_RESET_DONE 0x80000 +#define LPA_STATUS_LLB_CLR 0x40000 + +/* OBUF_HLB_MIN_ADDR definition */ +#define LPA_HLB_MIN_ADDR_LOAD 0x40000 +#define LPA_HLB_MIN_ADDR_SEG_MASK 0x3e000 + +/* OBUF_HLB_MAX_ADDR definition */ +#define LPA_HLB_MAX_ADDR_SEG_MASK 0x3fff8 + +/* OBUF_LLB_MIN_ADDR definition */ +#define LPA_LLB_MIN_ADDR_LOAD 0x40000 +#define LPA_LLB_MIN_ADDR_SEG_BMSK 0x3e000 + +/* OBUF_LLB_MAX_ADDR definition */ +#define LPA_LLB_MAX_ADDR_SEG_MASK 0x3ff8 +#define LPA_LLB_MAX_ADDR_SEG_SHFT 0x3 + +/* OBUF_SB_MIN_ADDR definition */ +#define LPA_SB_MIN_ADDR_LOAD 0x4000 +#define LPA_SB_MIN_ADDR_SEG_BMSK 0x3e00 + +/* OBUF_SB_MAX_ADDR definition */ +#define LPA_SB_MAX_ADDR_SEG_BMSK 0x3ff8 + +/* OBUF_MEMORY_CONTROL definition */ +#define LPA_MEM_CTL_PWRUP 0xfff + +/* OBUF_INTR_ENABLE definition */ +#define LPA_INTR_EN 0x3 + +/* OBUF_WMARK_ASSIGN definition */ +#define LPA_WMARK_ASSIGN_BMSK 0xF +#define LPA_WMARK_ASSIGN_DONE 0xF + +/* OBUF_WMARK_n_LLB definition */ +#define LPA_WMARK_n_LLB_ADDR(n) (0x00000034 + 0x4 * (n)) +#define LPA_WMARK_CTRL_MASK 0x0c0000 +#define LPA_WMARK_CTRL_SHFT 0x12 +#define LPA_WMARK_MAP_MASK 0xf00000 +#define LPA_WMARK_MAP_SHFT 0x14 + +#define LPA_WMARK_CTL_DISABLED 0x0 +#define LPA_WMARK_CTL_NON_BLOCK 0x1 +#define LPA_WMARK_CTL_ZERO_INSERT 0x2 +#define LPA_WMARK_CTL_RESERVED 0x3 + +/* OBUF_UTC_CONFIG definition */ +#define LPA_UTC_CONFIG_MAP_MASK 0xf0 +#define LPA_UTC_CONFIG_MAP_SHFT 0x4 +#define LPA_UTC_CONFIG_EN 0x1 +#define LPA_UTC_CONFIG_NO_INTR 0xF + +/* OBUF_ACK definition */ +#define LPA_ACK_RESET_DONE 0x80000 + + +#define LPA_BUF_ID_HLB 0 /* HLB buffer */ +#define LPA_BUF_ID_LLB 1 /* LLB buffer */ +#define LPA_BUF_ID_SB 2 /* SB buffer */ +#define LPA_BUF_ID_UTC 3 + + +/* from board file in qct tree */ + +#define LPA_HLB_SIZE 0x2BFF8 + +#define LPA_ID_DSP 0 +#define LPA_ID_APP 2 + +#if 0 +#define CFG_LLB_MIN_ADDR 0x0000 +#define CFG_LLB_MAX_ADDR 0x3ff8 +#define CFG_SB_MIN_ADDR 0 +#define CFG_SB_MAX_ADDR 0 +#else +#define CFG_LLB_MIN_ADDR 0x0000 +#define CFG_LLB_MAX_ADDR 0x37f8 +#define CFG_SB_MIN_ADDR 0x3800 +#define CFG_SB_MAX_ADDR 0x3ff8 +#endif + +#define CFG_HLB_MIN_ADDR 0x00000 +#define CFG_HLB_MAX_ADDR 0x2BFF8 + +/* 7x30 MI2S Registers */ + +/* MI2S Registers are named from the MI2S block's point of view: + * - TX = transmit from SoC to external codec + * - RX = receive from external codec to SoC + */ +#define MI2S_RESET 0x00 +#define MI2S_MODE 0x04 +#define MI2S_TX_MODE 0x08 +#define MI2S_RX_MODE 0x0C + +#define MI2S_RESET_RESET 1 + +#define MI2S_MODE_MASTER 0x1000 +#define MI2S_MODE_16BIT 0x0100 +#define MI2S_MODE_24BIT 0x0200 +#define MI2S_MODE_32BIT 0x0300 +#define MI2S_MODE_EN_3 0x0080 +#define MI2S_MODE_EN_2 0x0040 +#define MI2S_MODE_EN_1 0x0020 +#define MI2S_MODE_EN_0 0x0010 +#define MI2S_MODE_TX_3 0x0008 +#define MI2S_MODE_TX_2 0x0004 +#define MI2S_MODE_TX_1 0x0002 +#define MI2S_MODE_TX_0 0x0001 + +#define MI2S_TX_MODE_2CH 0x0000 +#define MI2S_TX_MODE_4CH 0x0008 +#define MI2S_TX_MODE_6CH 0x0010 +#define MI2S_TX_MODE_8CH 0x0018 +#define MI2S_TX_MODE_STEREO 0x0004 +#define MI2S_TX_MODE_MONO_PACK 0x0002 /* 2 mono samples packed together */ +#define MI2S_TX_MODE_DMA_SYNC 0x0001 /* sync dma ack clocks */ + +#define MI2S_RX_MODE_2CH 0x0000 +#define MI2S_RX_MODE_4CH 0x0008 +#define MI2S_RX_MODE_6CH 0x0010 +#define MI2S_RX_MODE_8CH 0x0018 +#define MI2S_RX_MODE_STEREO 0x0004 +#define MI2S_RX_MODE_MONO_PACK 0x0002 /* 2 mono samples packed together */ +#define MI2S_RX_MODE_DMA_SYNC 0x0001 /* sync dma ack clocks */ + +static int mi2s_set_output(struct msm_codec *mc, + unsigned channels, unsigned bitdepth) +{ + unsigned mode = 0; + unsigned tx_mode = 0; + + if (channels != 2 || bitdepth != 16) + return -EINVAL; + + /* TODO: support non stereo-16 (does the DSP even do that?) */ + + mode |= MI2S_MODE_MASTER; + mode |= MI2S_MODE_16BIT; + mode |= MI2S_MODE_EN_0; + mode |= MI2S_MODE_TX_0; + + tx_mode |= MI2S_TX_MODE_STEREO; + tx_mode |= MI2S_TX_MODE_2CH; + tx_mode |= MI2S_RX_MODE_DMA_SYNC; + + writel(1, mc->tx_base + MI2S_RESET); + writel(mode, mc->tx_base + MI2S_MODE); + writel(tx_mode, mc->tx_base + MI2S_TX_MODE); + writel(0, mc->tx_base + MI2S_RESET); + + return 0; +} + +static int mi2s_set_input(struct msm_codec *mc, + unsigned channels, unsigned bitdepth) +{ + unsigned mode = 0; + unsigned rx_mode = 0; + + if (channels != 2 || bitdepth != 16) + return -EINVAL; + + /* TODO: support non stereo-16 */ + /* TODO: packed mono mode? */ + + mode |= MI2S_MODE_MASTER; + mode |= MI2S_MODE_16BIT; + mode |= MI2S_MODE_EN_0; + + rx_mode |= MI2S_RX_MODE_STEREO; + rx_mode |= MI2S_RX_MODE_2CH; + rx_mode |= MI2S_RX_MODE_DMA_SYNC; + + writel(1, mc->rx_base + MI2S_RESET); + writel(mode, mc->rx_base + MI2S_MODE); + writel(rx_mode, mc->rx_base + MI2S_RX_MODE); + writel(0, mc->rx_base + MI2S_RESET); + + return 0; +} + +void lpa_enable(struct msm_codec *mc) +{ + unsigned val; + + /* for "hardware reasons" we must ensure the + * adsp clock is on during this reset sequence. + */ + clk_enable(mc->adsp_clk); + + /* disable codec */ + writel(LPA_CODEC_LOAD, mc->lpa_base + LPA_CODEC); + + writel(LPA_RESETS_MISR | LPA_RESETS_OVERALL, + mc->lpa_base + LPA_RESETS); + + while (!(readl(mc->lpa_base + LPA_STATUS) & LPA_STATUS_RESET_DONE)) + ; + + writel(LPA_ACK_RESET_DONE, mc->lpa_base + LPA_ACK); + + clk_disable(mc->adsp_clk); + + /* configure memory buffers */ + writel(CFG_LLB_MIN_ADDR | LPA_LLB_MIN_ADDR_LOAD, + mc->lpa_base + LPA_LLB_MIN_ADDR); + writel(CFG_LLB_MAX_ADDR, mc->lpa_base + LPA_LLB_MAX_ADDR); + + writel(CFG_SB_MIN_ADDR | LPA_SB_MIN_ADDR_LOAD, + mc->lpa_base + LPA_SB_MIN_ADDR); + writel(CFG_SB_MAX_ADDR, mc->lpa_base + LPA_SB_MAX_ADDR); + + writel(CFG_HLB_MIN_ADDR | LPA_HLB_MIN_ADDR_LOAD, + mc->lpa_base + LPA_HLB_MIN_ADDR); + writel(CFG_HLB_MAX_ADDR, mc->lpa_base + LPA_HLB_MAX_ADDR); + + writel(LPA_MEM_CTL_PWRUP, mc->lpa_base + LPA_MEMORY_CONTROL); + + + while (readl(mc->lpa_base + LPA_WMARK_ASSIGN) != LPA_WMARK_ASSIGN_DONE) + ; + + /* setup watermark ownership */ + writel(LPA_ID_DSP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_0_LLB); + writel(LPA_ID_DSP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_1_LLB); + writel(LPA_ID_APP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_2_LLB); + writel(LPA_ID_APP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_3_LLB); + writel(LPA_ID_DSP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_HLB); + writel(LPA_ID_DSP << LPA_WMARK_MAP_SHFT, + mc->lpa_base + LPA_WMARK_SB); + writel(0, mc->lpa_base + LPA_UTC_CONFIG); + + + val = readl(mc->lpa_base + LPA_CONTROL); + val |= LPA_CONTROL_LLB_EN; + val |= LPA_CONTROL_LLB_SAT_EN; + val |= LPA_CONTROL_SB_EN; + val |= LPA_CONTROL_SB_SAT_EN; + writel(val, mc->lpa_base + LPA_CONTROL); + + writel(1 << LPA_ID_DSP, mc->lpa_base + LPA_INTR_ENABLE); +} + +void lpa_start(struct msm_codec *mc) +{ + unsigned val, codec; + + codec = LPA_CODEC_LOAD; + codec |= LPA_NUM_CHAN_STEREO; + codec |= LPA_SAMPLE_RATE_48KHZ; + codec |= LPA_BITS_PER_CHAN_16BITS; + codec |= LPA_INTF_WB_CODEC; + writel(codec, mc->lpa_base + LPA_CODEC); + + /* clear LLB */ + val = readl(mc->lpa_base + LPA_CONTROL); + writel(val | LPA_CONTROL_LLB_CLR_CMD, mc->lpa_base + LPA_CONTROL); + + while (!(readl(mc->lpa_base + LPA_STATUS) & LPA_STATUS_LLB_CLR)) + udelay(100); + + /* enable codec */ + codec |= LPA_CODEC_INTF_EN; + writel(codec, mc->lpa_base + LPA_CODEC); +} + +void lpa_disable(struct msm_codec *mc) +{ + writel(LPA_CODEC_LOAD, mc->lpa_base + LPA_CODEC); +} + +int msm_codec_output_enable(struct msm_codec *mc) +{ + unsigned rate, val; + + pr_info("msm_codec_output_enable()\n"); + + /* yes rx clks for tx codec -- the clocks + * are named from the opposite POV of the + * codec for some reason... + */ + + + /* bitrate * bits * channels * 8 */ + rate = 48000 * 16 * 2 * 8; + clk_set_rate(mc->rx_mclk, rate); + + clk_enable(mc->rx_mclk); + clk_enable(mc->rx_sclk); + + clk_enable(mc->lpa_pclk); + clk_enable(mc->lpa_codec_clk); + clk_enable(mc->lpa_core_clk); + /* LPA init */ + + lpa_enable(mc); + + /* interconnect reg -> LPA */ + val = readl(mc->lpa_base + LPA_AICTL); + writel(val | 4, mc->lpa_base + LPA_AICTL); + + /* fire up mi2s transport */ + mi2s_set_output(mc, 2, 16); + + lpa_start(mc); + + /* AFE enable */ + + /* ADIE enable */ + + /* AMP enable */ + + return 0; +} + +int msm_codec_output_disable(struct msm_codec *mc) +{ + pr_info("msm_codec_output_disable()\n"); + /* AMP disable */ + /* ADIE disable */ + /* AFE disable */ + /* LPA disable */ + + clk_disable(mc->lpa_core_clk); + clk_disable(mc->lpa_codec_clk); + clk_disable(mc->lpa_pclk); + + clk_disable(mc->rx_sclk); + clk_disable(mc->rx_mclk); + + return 0; +} + + +int msm_codec_input_enable(struct msm_codec *mc) +{ + unsigned rate; + + pr_info("msm_codec_input_enable()\n"); + + /* yes tx clks for rx codec -- the clocks + * are named from the opposite POV of the + * codec for some reason... + */ + + + /* bitrate * bits * channels * 8 */ + rate = 48000 * 16 * 2 * 8; + clk_set_rate(mc->tx_mclk, rate); + + clk_enable(mc->tx_mclk); + clk_enable(mc->tx_sclk); + + /* fire up mi2s transport */ + mi2s_set_input(mc, 2, 16); + + return 0; +} + +int msm_codec_input_disable(struct msm_codec *mc) +{ + pr_info("msm_codec_input_disable()\n"); + + clk_disable(mc->tx_sclk); + clk_disable(mc->tx_mclk); + + return 0; +} + + +static struct msm_codec the_msm_codec; + +int msm_codec_output(int enable) +{ + struct msm_codec *mc = &the_msm_codec; + if (enable) + return msm_codec_output_enable(mc); + else + return msm_codec_output_disable(mc); +} + +int msm_codec_input(int enable) +{ + struct msm_codec *mc = &the_msm_codec; + if (enable) + return msm_codec_input_enable(mc); + else + return msm_codec_input_disable(mc); +} + +/* 7x30 memory map */ + +#define PHYS_ADDR_LPA 0xA5000000 +#define PHYS_SIZE_LPA 0x00000800 + +#define PHYS_ADDR_MI2S_HDMI 0xAC900000 +#define PHYS_ADDR_MI2S_CODEC_RX 0xAC940040 +#define PHYS_ADDR_MI2S_CODEC_TX 0xAC980080 +#define PHYS_SIZE_MI2S 0x00000040 + +int msm_codec_init(void) +{ + struct msm_codec *mc = &the_msm_codec; + + printk("msm_codec_init()\n"); + + mc->rx_mclk = clk_get(NULL, "mi2s_codec_rx_mclk"); + if (IS_ERR(mc->rx_mclk)) + return -ENODEV; + mc->rx_sclk = clk_get(NULL, "mi2s_codec_rx_sclk"); + if (IS_ERR(mc->rx_sclk)) + return -ENODEV; + mc->tx_mclk = clk_get(NULL, "mi2s_codec_tx_mclk"); + if (IS_ERR(mc->tx_mclk)) + return -ENODEV; + mc->tx_sclk = clk_get(NULL, "mi2s_codec_tx_sclk"); + if (IS_ERR(mc->tx_sclk)) + return -ENODEV; + mc->lpa_codec_clk = clk_get(NULL, "lpa_codec_clk"); + if (IS_ERR(mc->lpa_codec_clk)) + return -ENODEV; + mc->lpa_core_clk = clk_get(NULL, "lpa_core_clk"); + if (IS_ERR(mc->lpa_core_clk)) + return -ENODEV; + mc->lpa_pclk = clk_get(NULL, "lpa_pclk"); + if (IS_ERR(mc->lpa_pclk)) + return -ENODEV; + mc->adsp_clk = clk_get(NULL, "adsp_clk"); + if (IS_ERR(mc->adsp_clk)) + return -ENODEV; + + mc->lpa_base = ioremap(PHYS_ADDR_LPA, PHYS_SIZE_LPA); + if (!mc->lpa_base) + return -ENODEV; + mc->rx_base = ioremap(PHYS_ADDR_MI2S_CODEC_RX, PHYS_SIZE_MI2S); + if (!mc->rx_base) + return -ENODEV; + mc->tx_base = ioremap(PHYS_ADDR_MI2S_CODEC_TX, PHYS_SIZE_MI2S); + if (!mc->tx_base) + return -ENODEV; + + return 0; +} + +int audio_route_path(const char *path) +{ + static int need_init = 1; + + pr_info("audio_route_path: %s\n", path ? path : "default"); + + if (need_init) { + pr_info("audio_route_path: enable mi2s and lpa\n"); + + msm_codec_output(1); + afe_enable(AFE_DEVICE_MI2S_CODEC_RX, 48000, 2); + + msm_codec_input(1); + afe_enable(AFE_DEVICE_MI2S_CODEC_TX, 48000, 2); + + /* should be only for handset codec when required... */ + pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); + + need_init = 0; + + /* make sure something sane happens if nobody has configured + * an audio route and the first user requests the default + * route + */ + if (path == NULL) + return codec_route_path("speaker"); + } + + return codec_route_path(path); +} diff --git a/arch/arm/mach-msm/qdsp5v2/audio_out.c b/arch/arm/mach-msm/qdsp5v2/audio_out.c new file mode 100644 index 0000000000000..86fc61902145d --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/audio_out.c @@ -0,0 +1,254 @@ +/* arch/arm/mach-msm/qdsp5v2/audio_out.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "adsp.h" +#include "adsp_audio.h" + +#include "adsp_module_afe.h" + + +void adie_enable(void); + +struct audio_buffer { + dma_addr_t phys; + void *data; + uint32_t size; + uint32_t used; +}; + +struct audio { + struct audio_buffer buf[2]; + + int cpu_buf; + int dsp_buf; + int running; + int session; + + wait_queue_head_t wait; + struct audplay *audplay; + void *data; + dma_addr_t phys; +}; + +static void audio_send_data(void *cookie) +{ + struct audio *audio = cookie; + struct audio_buffer *ab = audio->buf + audio->dsp_buf; + + if (ab->used) { + ab->used = 0; + audio->dsp_buf ^= 1; + wake_up(&audio->wait); + } +} + + +static int audio_open(struct inode *inode, struct file *file) +{ + int ret; + struct audio *audio; + +#if 0 + static int need_init = 1; + if (need_init) { + msm_codec_output(1); + afe_enable(AFE_DEVICE_MI2S_CODEC_RX, 48000, 2); + adie_enable(); + + msm_codec_input(1); + afe_enable(AFE_DEVICE_MI2S_CODEC_TX, 48000, 2); + need_init = 0; + } + +#if 0 + msleep(5000); + afe_disable(AFE_DEVICE_MI2S_CODEC_RX); + msm_codec_output(0); + + msleep(5000); + msm_codec_output(1); + afe_enable(AFE_DEVICE_MI2S_CODEC_RX, 48000, 2); + adie_enable(); +#endif +#endif + + ret = audio_route_path(NULL); + if (ret < 0) + return ret; + + audio = kzalloc(sizeof(*audio), GFP_KERNEL); + if (!audio) + return -ENOMEM; + + audio->data = dma_alloc_coherent(NULL, 8192, &audio->phys, GFP_KERNEL); + if (!audio->data) { + pr_err("audio: could not allocate DMA buffers\n"); + kfree(audio); + return -ENOMEM; + } + + init_waitqueue_head(&audio->wait); + + audio->buf[0].phys = audio->phys; + audio->buf[0].data = audio->data; + audio->buf[0].size = 4096; + audio->buf[0].used = 0; + audio->buf[1].phys = audio->phys + 4096; + audio->buf[1].data = audio->data + 4096; + audio->buf[1].size = 4096; + audio->buf[1].used = 0; + + audio->audplay = audplay_get(audio_send_data, audio); + if (!audio->audplay) { + kfree(audio); + return -ENODEV; + } + + audplay_dsp_config(audio->audplay, 1); + + audplay_config_pcm(audio->audplay, 44100, 16, 2); + + audplay_mix_select(audio->audplay, 1); + audplay_volume_pan(audio->audplay, 0x2000, 0); + + file->private_data = audio; + return 0; +} + +static ssize_t audio_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct audio *audio = file->private_data; + struct audio_buffer *ab; + const char __user *start = buf; + int xfer; + + while (count > 0) { + ab = audio->buf + audio->cpu_buf; + + if (ab->used) + if (!wait_event_timeout(audio->wait, + (ab->used == 0), 5*HZ)) { + pr_err("audio_write: timeout. dsp dead?\n"); + return -EIO; + } + + xfer = count; + if (xfer > ab->size) + xfer = ab->size; + + if (copy_from_user(ab->data, buf, xfer)) + return -EFAULT; + + buf += xfer; + count -= xfer; + + ab->used = xfer; + audplay_send_data(audio->audplay, ab->phys, ab->used); + audio->cpu_buf ^= 1; + } + + return buf - start; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + pr_info("audio_release()\n"); + audplay_dsp_config(audio->audplay, 0); + audplay_put(audio->audplay); + kfree(audio); +#if 0 + afe_disable(AFE_DEVICE_MI2S_CODEC_RX); + msm_codec_output(0); +#endif + return 0; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_SET_VOLUME: { + int vol; + if (copy_from_user(&vol, (void*) arg, sizeof(vol))) { + rc = -EFAULT; + break; + } + pr_info("audio_out: volume %d\n", vol); + break; + } + case AUDIO_GET_STATS: + case AUDIO_START: + case AUDIO_STOP: + case AUDIO_FLUSH: + case AUDIO_SET_CONFIG: + /* implement me! */ + break; + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = 4096; + config.buffer_count = 2; + config.sample_rate = 44100; + config.channel_count = 2; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void*) arg, &config, sizeof(config))) { + rc = -EFAULT; + } + break; + } + } + return rc; +} + + +static const struct file_operations audio_out_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .write = audio_write, + .unlocked_ioctl = audio_ioctl, +}; + +struct miscdevice audio_out_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_out", + .fops = &audio_out_fops, +}; + +static int __init audio_init(void) +{ + adsp_audio_init(); + return misc_register(&audio_out_misc); +} + +device_initcall(audio_init); diff --git a/arch/arm/mach-msm/qdsp5v2/marimba.c b/arch/arm/mach-msm/qdsp5v2/marimba.c new file mode 100644 index 0000000000000..1173862089a29 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/marimba.c @@ -0,0 +1,406 @@ +/* arch/arm/mach-msm/qdsp5v2/marimba.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +struct codec_reg { + unsigned char addr, mask, val; +}; + +static struct codec_reg init_tx[] = { + { 0x04, 0xc0, 0x8C }, + { 0x0D, 0xFF, 0x00 }, + { 0x0E, 0xFF, 0x00 }, + { 0x0F, 0xFF, 0x00 }, + { 0x10, 0xF8, 0x68 }, + { 0x11, 0xFE, 0x00 }, + { 0x12, 0xFE, 0x00 }, + { 0x13, 0xFF, 0x58 }, + { 0x14, 0xFF, 0x00 }, + { 0x15, 0xFE, 0x00 }, + { 0x16, 0xFF, 0x00 }, + { 0x1A, 0xFF, 0x00 }, + { 0x80, 0x01, 0x00 }, + { 0x82, 0x7F, 0x18 }, + { 0x83, 0x1C, 0x00 }, + { 0x86, 0xFF, 0xAC }, + { 0x87, 0xFF, 0xAC }, + { 0x89, 0xFF, 0xFF }, + { 0x8A, 0xF0, 0x30 }, + + { 0xFF, 0x00, 0x00 }, +}; + +static struct codec_reg init_rx[] = { + { 0x23, 0xF8, 0x00 }, + { 0x24, 0x6F, 0x00 }, + { 0x25, 0x7F, 0x00 }, + { 0x26, 0xFC, 0x00 }, + { 0x28, 0xFE, 0x00 }, + { 0x29, 0xFE, 0x00 }, + { 0x33, 0xFF, 0x00 }, + { 0x34, 0xFF, 0x00 }, + { 0x35, 0xFC, 0x00 }, + { 0x36, 0xFE, 0x00 }, + { 0x37, 0xFE, 0x00 }, + { 0x38, 0xFE, 0x00 }, + { 0x39, 0xF0, 0x00 }, + { 0x3A, 0xFF, 0x0A }, + { 0x3B, 0xFC, 0xAC }, + { 0x3C, 0xFC, 0xAC }, + { 0x3D, 0xFF, 0x55 }, + { 0x3E, 0xFF, 0x55 }, + { 0x3F, 0xCF, 0x00 }, + { 0x40, 0x3F, 0x00 }, + { 0x41, 0x3F, 0x00 }, + { 0x42, 0xFF, 0x00 }, + { 0x43, 0xF7, 0x00 }, + { 0x43, 0xF7, 0x00 }, + { 0x43, 0xF7, 0x00 }, + { 0x43, 0xF7, 0x00 }, + { 0x44, 0xF7, 0x00 }, + { 0x45, 0xFF, 0x00 }, + { 0x46, 0xFF, 0x00 }, + { 0x47, 0xF7, 0x00 }, + { 0x48, 0xF7, 0x00 }, + { 0x49, 0xFF, 0x00 }, + { 0x4A, 0xFF, 0x00 }, + { 0x80, 0x02, 0x00 }, + { 0x81, 0xFF, 0x4C }, + { 0x83, 0x23, 0x00 }, + { 0x84, 0xFF, 0xAC }, + { 0x85, 0xFF, 0xAC }, + { 0x88, 0xFF, 0xFF }, + { 0x8A, 0x0F, 0x03 }, + { 0x8B, 0xFF, 0xAC }, + { 0x8C, 0x03, 0x01 }, + { 0x8D, 0xFF, 0x00 }, + { 0x8E, 0xFF, 0x00 }, + +/* lb regs */ + { 0x2B, 0x8F, 0x02 }, + { 0x2C, 0x8F, 0x02 }, + + { 0xFF, 0x00, 0x00 }, +}; + +static struct codec_reg init_handset_rx_48k_256_mono[] = { + { 0x80, 0x02, 0x02 }, + { 0x80, 0x02, 0x00 }, + + { 0x24, 0x6F, 0x44 }, + { 0x04, 0xFF, 0x8C }, + { 0x81, 0xFF, 0x4e }, + { 0x25, 0x0F, 0x0b }, + { 0x26, 0xfc, 0xfc }, + { 0x36, 0xc0, 0x80 }, + { 0x3A, 0xFF, 0x2B }, + { 0x23, 0xff, 0x20 }, + { 0x3d, 0xFF, 0x55 }, + { 0x83, 0x21, 0x21 }, + { 0x33, 0x80, 0x80 }, + + { 0xFF, 0x00, 10 }, + + { 0x33, 0x40, 0x40 }, + { 0x84, 0xff, 0x00 }, + { 0x8A, 0x05, 0x04 }, + + { 0xFF, 0x00, 0x00 }, +}; + +static struct codec_reg init_handset_tx_48k_256_mono[] = { + { 0x80, 0x01, 0x01 }, + { 0x80, 0x01, 0x00 }, + { 0x8A, 0x30, 0x30 }, + { 0x11, 0xfc, 0xfc }, + { 0x13, 0xfc, 0x58 }, + { 0x14, 0xff, 0x65 }, + { 0x15, 0xff, 0x64 }, + { 0x82, 0xff, 0x5A }, + { 0x10, 0xFF, 0x68 }, + + { 0x0D, 0xF0, 0xd0 }, + + { 0xFF, 0x00, 3 }, + + { 0x83, 0x14, 0x14 }, + { 0x8b, 0xff, 0xE6 }, + { 0x8c, 0x03, 0x02 }, + { 0x86, 0xff, 0xFA }, + { 0x8A, 0x50, 0x40 }, + + { 0xFF, 0x00, 0x00 }, +}; + +static struct codec_reg init_speaker_rx_48k_256_stereo[] = { + { 0x80, 0x02, 0x02 }, + { 0x80, 0x02, 0x00 }, + + { 0x24, 0x6F, 0x64 }, + { 0x25, 0x0F, 0x0B }, + { 0x26, 0xfc, 0xfc }, + { 0x37, 0xe6, 0x80 }, + { 0x3A, 0xFF, 0x2B }, + { 0x3d, 0xFF, 0x55 }, + { 0x83, 0x23, 0x23 }, + { 0x23, 0xff, 0x20 }, + { 0x33, 0x8a, 0x8a }, + { 0x33, 0x05, 0x05 }, + + { 0xFF, 0x00, 30 }, + + { 0x84, 0xff, 0x03 }, + { 0x85, 0xff, 0x03 }, + { 0x8A, 0x0f, 0x0c }, + + { 0xFF, 0x00, 0x00 }, +}; + + + +#include +#include +#include +#include +#include + +#include + +static struct vreg *vreg_marimba1; +static struct vreg *vreg_marimba2; +static struct vreg *vreg_marimba3; + +static int marimba_vreg_init(void) +{ + vreg_marimba1 = vreg_get(NULL, "s2"); + if (IS_ERR(vreg_marimba1)) + return PTR_ERR(vreg_marimba1); + vreg_marimba2 = vreg_get(NULL, "gp16"); + if (IS_ERR(vreg_marimba2)) + return PTR_ERR(vreg_marimba2); + /* codec vreg */ + vreg_marimba3 = vreg_get(NULL, "s4"); + if (IS_ERR(vreg_marimba3)) + return PTR_ERR(vreg_marimba3); + return 0; +} + +static void marimba_vreg_enable(void) +{ + vreg_enable(vreg_marimba1); + vreg_enable(vreg_marimba2); + vreg_enable(vreg_marimba3); +} + +#define MARIMBA_ADDR_MARIMBA 0x0C +#define MARIMBA_ADDR_FM 0x2A +#define MARIMBA_ADDR_CDC 0x77 +#define MARIMBA_ADDR_QMEMBIST 0X66 + +#define MARIMBA_REG_ID_FM 0x01 +#define MARIMBA_REG_ID_CDC 0x02 +#define MARIMBA_REG_ID_QMEMBIST 0x03 +#define MARIMBA_REG_ID_TSADC 0x04 + +static int marimba_raw_write(struct i2c_client *client, + u8 addr, u8 reg, u8 value) +{ + struct i2c_msg msg; + u8 data[2]; + int ret; + + msg.addr = addr; + msg.flags = 0; + msg.len = 2; + msg.buf = data; + data[0] = reg; + data[1] = value; + + ret = i2c_transfer(client->adapter, &msg, 1); + + if (ret != 1) + pr_err("marimba_write: fail %d\n", ret); + + return ret; +} + +static int marimba_raw_read(struct i2c_client *client, u8 addr, u8 reg) +{ + struct i2c_msg msg[2]; + u8 value; + int ret; + + msg[0].addr = addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = ® + + msg[1].addr = addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 1; + msg[1].buf = &value; + + ret = i2c_transfer(client->adapter, msg, 2); + + if (ret != 2) + pr_err("marimba_read: fail %d\n", ret); + + if (ret == 2) + return value; + return ret; +} + +static u8 marimba_shadow[256]; + +static int marimba_write(struct i2c_client *client, u8 reg, u8 value) +{ + marimba_shadow[reg] = value; + return marimba_raw_write(client, client->addr, reg, value); +} + +static int marimba_write_mask(struct i2c_client *client, u8 reg, u8 mask, u8 value) +{ + value = (marimba_shadow[reg] & (~mask)) | (value & mask); + marimba_shadow[reg] = value; + return marimba_raw_write(client, client->addr, reg, value); +} + +static int marimba_read(struct i2c_client *client, u8 reg) +{ + return marimba_raw_read(client, client->addr, reg); +} + + +static struct i2c_client *marimba_client; + +void adie_load(struct i2c_client *client, struct codec_reg *regs) +{ + int n; + for (n = 0;; n++) { + if (regs[n].addr == 0xff) { + if (regs[n].val == 0) + return; + msleep(regs[n].val); + continue; + } + marimba_write_mask(client, regs[n].addr, + regs[n].mask, regs[n].val); + } +} + +void adie_enable(void) +{ + static int need_init = 1; + struct i2c_client *client = marimba_client; + + if (need_init) { + marimba_vreg_enable(); + + marimba_write(client, 0xff, 0x08); /* bring up codec */ + marimba_write(client, 0xff, 0x0a); /* GDFS_EN_FEW=1 */ + marimba_write(client, 0xff, 0x0e); /* GDFS_EN_REST=1 */ + marimba_write(client, 0xff, 0x07); /* RESET_N=1 */ + marimba_write(client, 0xff, 0x17); /* clock enable */ + marimba_write(client, 0x03, 0x04); /* enable band gap */ + marimba_write(client, 0x8F, 0x44); /* dither delay select, dmic gain bypass */ + + msleep(100); + + adie_load(client, init_tx); + adie_load(client, init_rx); + + need_init = 0; + } +} + +int codec_route_path(const char *path) +{ + struct i2c_client *client = marimba_client; + + pr_info("codec_route_path: %s\n", path ? path : "default"); + + if (!path) + return 0; + + if (!client) { + pr_err("codec_route_path: codec does not exist?!\n"); + return -ENODEV; + } + + if (!strcmp(path,"speaker")) { + adie_enable(); + adie_load(client, init_speaker_rx_48k_256_stereo); + return 0; + } + + if (!strcmp(path, "handset")) { + adie_enable(); + adie_load(client, init_handset_tx_48k_256_mono); + adie_load(client, init_handset_rx_48k_256_mono); + return 0; + } + + return -ENODEV; +} + +static int marimba_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + + marimba_client = client; + + printk("*** marimba probe %p '%s' @ 0x%x ** *\n", + client, client->name, client->addr); + + /* 0x10 -> MARIMBA_MODE ?! */ + marimba_raw_write(client, MARIMBA_ADDR_MARIMBA, 0x00, 0x10); + + /* program address into marimba master device */ + ret = marimba_raw_write(client, MARIMBA_ADDR_MARIMBA, + MARIMBA_REG_ID_CDC, client->addr); + + if (ret != 1) { + pr_err("marimba_probe() cannot set address\n"); + return ret; + } + + return 0; +} + + +static const struct i2c_device_id marimba_id[] = { + { "marimba-codec", 0 }, + { } +}; + +static struct i2c_driver marimba_driver = { + .probe = marimba_probe, + .id_table = marimba_id, + .driver = { + .name = "marimba", + }, +}; + + +static int marimba_init(void) +{ + int ret; + ret = marimba_vreg_init(); + if (ret) + return ret; + return i2c_add_driver(&marimba_driver); +} + +module_init(marimba_init); diff --git a/arch/arm/mach-msm/qdsp5v2/voice.c b/arch/arm/mach-msm/qdsp5v2/voice.c new file mode 100644 index 0000000000000..3c4e45c7a3f10 --- /dev/null +++ b/arch/arm/mach-msm/qdsp5v2/voice.c @@ -0,0 +1,228 @@ +/* arch/arm/mach-msm/qdsp5v2/voice.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "../dal.h" + +#include "adsp_audio.h" + +#define VOICE_DAL_DEVICE 0x02000075 +#define VOICE_DAL_PORT "SMD_DAL00" + +/* Commands sent to Modem */ +#define CMD_VOICE_INIT 0x1 +#define CMD_ACQUIRE_DONE 0x2 +#define CMD_RELEASE_DONE 0x3 +#define CMD_DEVICE_INFO 0x4 +#define CMD_DEVICE_CHANGE 0x6 + +/* EVENTS received from MODEM */ +#define EVENT_ACQUIRE_START 0x51 +#define EVENT_RELEASE_START 0x52 +#define EVENT_CHANGE_START 0x54 +#define EVENT_NETWORK_RECONFIG 0x53 + +#define NETWORK_CDMA 0 +#define NETWORK_GSM 1 +#define NETWORK_WCDMA 2 +#define NETWORK_WCDMA_WB 3 + +#define VOICE_DALRPC_CMD DAL_OP_FIRST_DEVICE_API + +struct voice_header { + uint32_t id; + uint32_t data_len; +}; + +struct voice_init { + struct voice_header hdr; + void *cb_handle; +}; + +struct voice_device { + struct voice_header hdr; + uint32_t rx_device; + uint32_t tx_device; + uint32_t rx_volume; + uint32_t rx_mute; + uint32_t tx_mute; + uint32_t rx_sample; + uint32_t tx_sample; +}; + +struct voice_network { + struct voice_header hdr; + uint32_t network_info; +}; + +struct voice_event { + /* common DAL event header */ + uint32_t evt_handle; + uint32_t evt_cookie; + uint32_t evt_length; + + /* voice event header */ + uint32_t id; + uint32_t length; + + union { + uint32_t network; + } u; +}; + +struct msm_voice { + struct dal_client *client; + + uint32_t next; +}; + +static struct msm_voice the_voice; + +static int voice_cmd_acquire_done(struct msm_voice *voice) +{ + struct voice_header cmd; + int rc; + + cmd.id = CMD_ACQUIRE_DONE; + cmd.data_len = 0; + + return dal_call_f5(voice->client, VOICE_DALRPC_CMD, + &cmd, sizeof(cmd)); +} + +static void voice_work_func(struct work_struct *work) +{ + struct msm_voice *voice = &the_voice; + struct voice_device cmd; + int rc; + + pr_info("voice: doing work...\n"); + + switch (voice->next) { + case EVENT_ACQUIRE_START: + audio_route_path("handset"); + + cmd.hdr.id = CMD_DEVICE_INFO; + cmd.hdr.data_len = sizeof(cmd) - sizeof(cmd.hdr); + cmd.rx_device = 1; + cmd.tx_device = 2; + cmd.rx_volume = -500; /* millibels */ + cmd.tx_mute = 0; + cmd.rx_mute = 0; + cmd.rx_sample = 48000 / 1000; + cmd.tx_sample = 48000 / 1000; + + rc = dal_call_f5(voice->client, + VOICE_DALRPC_CMD, + &cmd, sizeof(cmd)); + if (rc < 0) { + pr_err("voice: device info failed\n"); + } + + rc = voice_cmd_acquire_done(voice); + break; + + case EVENT_RELEASE_START: + audio_route_path("speaker"); + break; + } +} + +static DECLARE_WORK(voice_work, voice_work_func); + +static void voice_dal_callback(void *data, int len, void *cookie) +{ + struct msm_voice *voice = cookie; + struct voice_event *evt = data; + + voice->next = evt->id; + + switch (evt->id) { + case EVENT_ACQUIRE_START: + pr_info("voice: ACQUIRE_START (net %d)\n", + evt->u.network); + break; + case EVENT_RELEASE_START: + pr_info("voice: RELEASE_START\n"); + + break; + case EVENT_CHANGE_START: + pr_info("voice: CHANGE_START\n"); + break; + case EVENT_NETWORK_RECONFIG: + pr_info("voice: NETWORK_RECONFIG\n"); + break; + }; + + schedule_work(&voice_work); +} + +static int voice_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t voice_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + return count; +} + +static const struct file_operations voice_fops = { + .owner = THIS_MODULE, + .open = voice_open, + .write = voice_write, +}; + +struct miscdevice voice_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "voice", + .fops = &voice_fops, +}; + +int msm_voice_init(void) +{ + struct msm_voice *voice = &the_voice; + struct voice_init cmd; + int rc; + + pr_info("voice: init()\n"); + + voice->client = dal_attach(VOICE_DAL_DEVICE, + VOICE_DAL_PORT, + voice_dal_callback, + voice); + + if (!voice->client) { + pr_err("voice: cannot attach to service\n"); + return -ENODEV; + } + + cmd.hdr.id = CMD_VOICE_INIT; + cmd.hdr.data_len = sizeof(cmd) - sizeof(cmd.hdr); + cmd.cb_handle = NULL; + rc = dal_call_f5(voice->client, VOICE_DALRPC_CMD, &cmd, sizeof(cmd)); + + if (rc < 0) { + pr_err("voice: init failed\n"); + return -ENODEV; + } + + return misc_register(&voice_misc); +} diff --git a/arch/arm/mach-msm/qdsp6/Makefile b/arch/arm/mach-msm/qdsp6/Makefile new file mode 100644 index 0000000000000..05cb351e7b45e --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/Makefile @@ -0,0 +1,9 @@ +obj-y += q6audio.o +obj-y += pcm_out.o +obj-y += pcm_in.o +obj-y += mp3.o +obj-y += routing.o +obj-y += audio_ctl.o +obj-y += msm_q6vdec.o +obj-y += msm_q6venc.o +obj-y += dsp_debug.o \ No newline at end of file diff --git a/arch/arm/mach-msm/qdsp6/analog_audio.c b/arch/arm/mach-msm/qdsp6/analog_audio.c new file mode 100644 index 0000000000000..3ec80d4e64859 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/analog_audio.c @@ -0,0 +1,67 @@ + +#include +#include "../pmic.h" +#include + +#define GPIO_HEADSET_AMP 157 + +void analog_init(void) +{ + /* stereo pmic init */ + pmic_spkr_set_gain(LEFT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_spkr_set_gain(RIGHT_SPKR, SPKR_GAIN_PLUS12DB); + pmic_mic_set_volt(MIC_VOLT_1_80V); + + gpio_direction_output(GPIO_HEADSET_AMP, 1); + gpio_set_value(GPIO_HEADSET_AMP, 0); +} + +void analog_headset_enable(int en) +{ + /* enable audio amp */ + gpio_set_value(GPIO_HEADSET_AMP, !!en); +} + +void analog_speaker_enable(int en) +{ + struct spkr_config_mode scm; + memset(&scm, 0, sizeof(scm)); + + if (en) { + scm.is_right_chan_en = 1; + scm.is_left_chan_en = 1; + scm.is_stereo_en = 1; + scm.is_hpf_en = 1; + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_spkr_en_mute(RIGHT_SPKR, 0); + pmic_set_spkr_configuration(&scm); + pmic_spkr_en(LEFT_SPKR, 1); + pmic_spkr_en(RIGHT_SPKR, 1); + + /* unmute */ + pmic_spkr_en_mute(LEFT_SPKR, 1); + pmic_spkr_en_mute(RIGHT_SPKR, 1); + } else { + pmic_spkr_en_mute(LEFT_SPKR, 0); + pmic_spkr_en_mute(RIGHT_SPKR, 0); + + pmic_spkr_en(LEFT_SPKR, 0); + pmic_spkr_en(RIGHT_SPKR, 0); + + pmic_set_spkr_configuration(&scm); + } +} + +static struct q6audio_analog_ops ops = { + .init = analog_init, + .speaker_enable = analog_speaker_enable, + .headset_enable = analog_headset_enable, +}; + +static int __init init(void) +{ + q6audio_register_analog_ops(&ops); + return 0; +} + +device_initcall(init); diff --git a/arch/arm/mach-msm/qdsp6/audio_ctl.c b/arch/arm/mach-msm/qdsp6/audio_ctl.c new file mode 100644 index 0000000000000..23d9eabbdd138 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/audio_ctl.c @@ -0,0 +1,220 @@ +/* arch/arm/mach-msm/qdsp6/audio_ctrl.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include + +#define BUFSZ (0) + +static DEFINE_MUTEX(voice_lock); +static DEFINE_MUTEX(fm_lock); +static int voice_started; +static int fm_started; + +static struct audio_client *voc_tx_clnt; +static struct audio_client *voc_rx_clnt; +static struct audio_client *fm_clnt; + +static int q6_voice_start(uint32_t rx_acdb_id, uint32_t tx_acdb_id) +{ + int rc = 0; + + mutex_lock(&voice_lock); + + if (voice_started) { + pr_err("voice: busy\n"); + rc = -EBUSY; + goto done; + } + + voc_rx_clnt = q6voice_open(AUDIO_FLAG_WRITE, rx_acdb_id); + if (!voc_rx_clnt) { + pr_err("voice: open voice rx failed.\n"); + rc = -ENOMEM; + goto done; + } + + voc_tx_clnt = q6voice_open(AUDIO_FLAG_READ, tx_acdb_id); + if (!voc_tx_clnt) { + pr_err("voice: open voice tx failed.\n"); + q6voice_close(voc_rx_clnt); + rc = -ENOMEM; + } + + voice_started = 1; +done: + mutex_unlock(&voice_lock); + return rc; +} + +static int q6_voice_stop(void) +{ + mutex_lock(&voice_lock); + if (voice_started) { + q6voice_close(voc_tx_clnt); + q6voice_close(voc_rx_clnt); + voice_started = 0; + } + mutex_unlock(&voice_lock); + return 0; +} + +int q6_fm_start(void) +{ + int rc = 0; + + mutex_lock(&fm_lock); + + if (fm_started) { + pr_err("fm: busy\n"); + rc = -EBUSY; + goto done; + } + + fm_clnt = q6fm_open(); + if (!fm_clnt) { + pr_err("fm: open failed.\n"); + rc = -ENOMEM; + goto done; + } + + fm_started = 1; +done: + mutex_unlock(&fm_lock); + return rc; +} + +int q6_fm_stop(void) +{ + mutex_lock(&fm_lock); + if (fm_started) { + q6fm_close(fm_clnt); + fm_started = 0; + } + mutex_unlock(&fm_lock); + return 0; +} + +static int q6_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static long q6_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int rc; + uint32_t n; + uint32_t id[2]; + char filename[64]; + + switch (cmd) { + case AUDIO_SWITCH_DEVICE: + rc = copy_from_user(&id, (void *)arg, sizeof(id)); + if (rc) { + pr_err("%s: bad user address\n", __func__); + rc = -EFAULT; + } else + rc = q6audio_do_routing(id[0], id[1]); + break; + case AUDIO_SET_VOLUME: + rc = copy_from_user(&n, (void *)arg, sizeof(n)); + if (rc) { + pr_err("%s: bad user address\n", __func__); + rc = -EFAULT; + } else + rc = q6audio_set_rx_volume(n); + break; + case AUDIO_SET_MUTE: + rc = copy_from_user(&n, (void *)arg, sizeof(n)); + if (rc) { + pr_err("%s: bad user address\n", __func__); + rc = -EFAULT; + } else + rc = q6audio_set_tx_mute(n); + break; + case AUDIO_UPDATE_ACDB: + rc = copy_from_user(&id, (void *)arg, sizeof(id)); + if (rc) { + pr_err("%s: bad user address\n", __func__); + rc = -EFAULT; + } else + rc = q6audio_update_acdb(id[0], id[1]); + break; + case AUDIO_START_VOICE: + if (arg == 0) + id[0] = id[1] = 0; + else if (copy_from_user(&id, (void *)arg, sizeof(id))) { + pr_info("voice: copy acdb_id from user failed\n"); + rc = -EFAULT; + break; + } + rc = q6_voice_start(id[0], id[1]); + break; + case AUDIO_STOP_VOICE: + rc = q6_voice_stop(); + break; + case AUDIO_START_FM: + rc = q6_fm_start(); + break; + case AUDIO_STOP_FM: + rc = q6_fm_stop(); + break; + case AUDIO_REINIT_ACDB: + rc = copy_from_user(&filename, (void *)arg, sizeof(filename)); + if (rc) { + pr_err("%s: bad user address\n", __func__); + rc = -EFAULT; + } else + rc = q6audio_reinit_acdb(filename); + break; + default: + pr_info("%s: unknown %d\n", __func__, cmd); + rc = -EINVAL; + } + + return rc; +} + + +static int q6_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations q6_dev_fops = { + .owner = THIS_MODULE, + .open = q6_open, + .unlocked_ioctl = q6_ioctl, + .release = q6_release, +}; + +struct miscdevice q6_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_audio_ctl", + .fops = &q6_dev_fops, +}; + + +static int __init q6_audio_ctl_init(void) { + return misc_register(&q6_control_device); +} + +device_initcall(q6_audio_ctl_init); diff --git a/arch/arm/mach-msm/qdsp6/dal_acdb.h b/arch/arm/mach-msm/qdsp6/dal_acdb.h new file mode 100644 index 0000000000000..0e95b3b8a37a9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/dal_acdb.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define ACDB_DAL_DEVICE 0x02000069 +#define ACDB_DAL_PORT "SMD_DAL_AM_AUD" + +#define ACDB_OP_IOCTL DAL_OP_FIRST_DEVICE_API + +/* ioctls */ +#define ACDB_GET_DEVICE 0x0108bb92 +#define ACDB_SET_DEVICE 0x0108bb93 +#define ACDB_GET_STREAM 0x0108bb95 +#define ACDB_SET_STREAM 0x0108bb96 +#define ACDB_GET_DEVICE_TABLE 0x0108bb97 +#define ACDB_GET_STREAM_TABLE 0x0108bb98 + +#define ACDB_RES_SUCCESS 0 +#define ACDB_RES_FAILURE -1 +#define ACDB_RES_BADPARM -2 +#define ACDB_RES_BADSTATE -3 + +struct acdb_cmd_device { + uint32_t size; + + uint32_t command_id; + uint32_t device_id; + uint32_t network_id; + uint32_t sample_rate_id; + uint32_t interface_id; + uint32_t algorithm_block_id; + + /* physical page aligned buffer */ + uint32_t total_bytes; + uint32_t unmapped_buf; +} __attribute__((packed)); + +struct acdb_cmd_device_table { + uint32_t size; + + uint32_t command_id; + uint32_t device_id; + uint32_t network_id; + uint32_t sample_rate_id; + + /* physical page aligned buffer */ + uint32_t total_bytes; + uint32_t unmapped_buf; + + uint32_t res_size; +} __attribute__((packed)); + +struct acdb_result { + uint32_t dal_status; + uint32_t size; + + uint32_t unmapped_buf; + uint32_t used_bytes; + uint32_t result; +} __attribute__((packed)); diff --git a/arch/arm/mach-msm/qdsp6/dal_adie.h b/arch/arm/mach-msm/qdsp6/dal_adie.h new file mode 100644 index 0000000000000..99e3c63f5cda9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/dal_adie.h @@ -0,0 +1,108 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MACH_MSM_QDSP6_ADIE_ +#define _MACH_MSM_QDSP6_ADIE_ + +#include "../dal.h" + +#define ADIE_DAL_DEVICE 0x02000029 +#define ADIE_DAL_PORT "SMD_DAL_AM_AUD" + +enum { + ADIE_OP_GET_NUM_PATHS = DAL_OP_FIRST_DEVICE_API, + ADIE_OP_GET_ALL_PATH_IDS, + ADIE_OP_SET_PATH, + ADIE_OP_GET_NUM_PATH_FREQUENCY_PLANS, + ADIE_OP_GET_PATH_FREQUENCY_PLANS, + ADIE_OP_SET_PATH_FREQUENCY_PLAN, + ADIE_OP_PROCEED_TO_STAGE, + ADIE_OP_MUTE_PATH +}; + +/* Path IDs for normal operation. */ +#define ADIE_PATH_HANDSET_TX 0x010740f6 +#define ADIE_PATH_HANDSET_RX 0x010740f7 +#define ADIE_PATH_HEADSET_MONO_TX 0x010740f8 +#define ADIE_PATH_HEADSET_STEREO_TX 0x010740f9 +#define ADIE_PATH_HEADSET_MONO_RX 0x010740fa +#define ADIE_PATH_HEADSET_STEREO_RX 0x010740fb +#define ADIE_PATH_SPEAKER_TX 0x010740fc +#define ADIE_PATH_SPEAKER_RX 0x010740fd +#define ADIE_PATH_SPEAKER_STEREO_RX 0x01074101 + +/* Path IDs used for TTY */ +#define ADIE_PATH_TTY_HEADSET_TX 0x010740fe +#define ADIE_PATH_TTY_HEADSET_RX 0x010740ff + +/* Path IDs used by Factory Test Mode. */ +#define ADIE_PATH_FTM_MIC1_TX 0x01074108 +#define ADIE_PATH_FTM_MIC2_TX 0x01074107 +#define ADIE_PATH_FTM_HPH_L_RX 0x01074106 +#define ADIE_PATH_FTM_HPH_R_RX 0x01074104 +#define ADIE_PATH_FTM_EAR_RX 0x01074103 +#define ADIE_PATH_FTM_SPKR_RX 0x01074102 + +/* Path IDs for Loopback */ +/* Path IDs used for Line in -> AuxPGA -> Line Out Stereo Mode*/ +#define ADIE_PATH_AUXPGA_LINEOUT_STEREO_LB 0x01074100 +/* Line in -> AuxPGA -> LineOut Mono */ +#define ADIE_PATH_AUXPGA_LINEOUT_MONO_LB 0x01073d82 +/* Line in -> AuxPGA -> Stereo Headphone */ +#define ADIE_PATH_AUXPGA_HDPH_STEREO_LB 0x01074109 +/* Line in -> AuxPGA -> Mono Headphone */ +#define ADIE_PATH_AUXPGA_HDPH_MONO_LB 0x01073d85 +/* Line in -> AuxPGA -> Earpiece */ +#define ADIE_PATH_AUXPGA_EAP_LB 0x01073d81 +/* Line in -> AuxPGA -> AuxOut */ +#define ADIE_PATH_AUXPGA_AUXOUT_LB 0x01073d86 + +/* Concurrency Profiles */ +#define ADIE_PATH_SPKR_STEREO_HDPH_MONO_RX 0x01073d83 +#define ADIE_PATH_SPKR_MONO_HDPH_MONO_RX 0x01073d84 +#define ADIE_PATH_SPKR_MONO_HDPH_STEREO_RX 0x01073d88 +#define ADIE_PATH_SPKR_STEREO_HDPH_STEREO_RX 0x01073d89 + +/* stages */ +#define ADIE_STAGE_PATH_OFF 0x0050 +#define ADIE_STAGE_DIGITAL_READY 0x0100 +#define ADIE_STAGE_DIGITAL_ANALOG_READY 0x1000 +#define ADIE_STAGE_ANALOG_OFF 0x0750 +#define ADIE_STAGE_DIGITAL_OFF 0x0600 + +/* path types */ +#define ADIE_PATH_RX 0 +#define ADIE_PATH_TX 1 +#define ADIE_PATH_LOOPBACK 2 + +/* mute states */ +#define ADIE_MUTE_OFF 0 +#define ADIE_MUTE_ON 1 + + +#endif diff --git a/arch/arm/mach-msm/qdsp6/dal_audio.h b/arch/arm/mach-msm/qdsp6/dal_audio.h new file mode 100644 index 0000000000000..b1ad07db4a38f --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/dal_audio.h @@ -0,0 +1,565 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __DAL_AUDIO_H__ +#define __DAL_AUDIO_H__ + +#include "dal_audio_format.h" + +#define AUDIO_DAL_DEVICE 0x02000028 +#define AUDIO_DAL_PORT "DSP_DAL_AQ_AUD" + +enum { + AUDIO_OP_CONTROL = DAL_OP_FIRST_DEVICE_API, + AUDIO_OP_DATA, + AUDIO_OP_INIT, +}; + +/* ---- common audio structures ---- */ + +/* This flag, if set, indicates that the beginning of the data in the*/ +/* buffer is a synchronization point or key frame, meaning no data */ +/* before it in the stream is required in order to render the stream */ +/* from this point onward. */ +#define ADSP_AUDIO_BUFFER_FLAG_SYNC_POINT 0x01 + +/* This flag, if set, indicates that the buffer object is using valid */ +/* physical address used to store the media data */ +#define ADSP_AUDIO_BUFFER_FLAG_PHYS_ADDR 0x04 + +/* This flag, if set, indicates that a media start timestamp has been */ +/* set for a buffer. */ +#define ADSP_AUDIO_BUFFER_FLAG_START_SET 0x08 + +/* This flag, if set, indicates that a media stop timestamp has been set */ +/* for a buffer. */ +#define ADSP_AUDIO_BUFFER_FLAG_STOP_SET 0x10 + +/* This flag, if set, indicates that a preroll timestamp has been set */ +/* for a buffer. */ +#define ADSP_AUDIO_BUFFER_FLAG_PREROLL_SET 0x20 + +/* This flag, if set, indicates that the data in the buffer is a fragment of */ +/* a larger block of data, and will be continued by the data in the next */ +/* buffer to be delivered. */ +#define ADSP_AUDIO_BUFFER_FLAG_CONTINUATION 0x40 + +struct adsp_audio_buffer { + u32 addr; /* Physical Address of buffer */ + u32 max_size; /* Maximum size of buffer */ + u32 actual_size; /* Actual size of valid data in the buffer */ + u32 offset; /* Offset to the first valid byte */ + u32 flags; /* ADSP_AUDIO_BUFFER_FLAGs that has been set */ + s64 start; /* Start timestamp, if any */ + s64 stop; /* Stop timestamp, if any */ + s64 preroll; /* Preroll timestamp, if any */ +} __attribute__ ((packed)); + + + +/* ---- audio commands ---- */ + +/* Command/event response types */ +#define ADSP_AUDIO_RESPONSE_COMMAND 0 +#define ADSP_AUDIO_RESPONSE_ASYNC 1 + +struct adsp_command_hdr { + u32 size; /* sizeof(cmd) - sizeof(u32) */ + + u32 dst; + u32 src; + + u32 opcode; + u32 response_type; + u32 seq_number; + + u32 context; /* opaque to DSP */ + u32 data; + + u32 padding; +} __attribute__ ((packed)); + + +#define AUDIO_DOMAIN_APP 0 +#define AUDIO_DOMAIN_MODEM 1 +#define AUDIO_DOMAIN_DSP 2 + +#define AUDIO_SERVICE_AUDIO 0 +#define AUDIO_SERVICE_VIDEO 1 /* really? */ + +/* adsp audio addresses are (byte order) domain, service, major, minor */ +//#define AUDIO_ADDR(maj,min) ( (((maj) & 0xff) << 16) | (((min) & 0xff) << 24) | (1) ) + +#define AUDIO_ADDR(maj,min,dom) ( (((min) & 0xff) << 24) | (((maj) & 0xff) << 16) | ((AUDIO_SERVICE_AUDIO) << 8) | (dom) ) + + +/* AAC Encoder modes */ +#define ADSP_AUDIO_ENC_AAC_LC_ONLY_MODE 0 +#define ADSP_AUDIO_ENC_AAC_PLUS_MODE 1 +#define ADSP_AUDIO_ENC_ENHANCED_AAC_PLUS_MODE 2 + +struct adsp_audio_aac_enc_cfg { + u32 bit_rate; /* bits per second */ + u32 encoder_mode; /* ADSP_AUDIO_ENC_* */ +} __attribute__ ((packed)); + +#define ADSP_AUDIO_ENC_SBC_ALLOCATION_METHOD_LOUNDNESS 0 +#define ADSP_AUDIO_ENC_SBC_ALLOCATION_METHOD_SNR 1 + +#define ADSP_AUDIO_ENC_SBC_CHANNEL_MODE_MONO 1 +#define ADSP_AUDIO_ENC_SBC_CHANNEL_MODE_STEREO 2 +#define ADSP_AUDIO_ENC_SBC_CHANNEL_MODE_DUAL 8 +#define ADSP_AUDIO_ENC_SBC_CHANNEL_MODE_JOINT_STEREO 9 + +struct adsp_audio_sbc_encoder_cfg { + u32 num_subbands; + u32 block_len; + u32 channel_mode; + u32 allocation_method; + u32 bit_rate; +} __attribute__ ((packed)); + +/* AMR NB encoder modes */ +#define ADSP_AUDIO_AMR_MR475 0 +#define ADSP_AUDIO_AMR_MR515 1 +#define ADSP_AUDIO_AMR_MMR59 2 +#define ADSP_AUDIO_AMR_MMR67 3 +#define ADSP_AUDIO_AMR_MMR74 4 +#define ADSP_AUDIO_AMR_MMR795 5 +#define ADSP_AUDIO_AMR_MMR102 6 +#define ADSP_AUDIO_AMR_MMR122 7 + +/* The following are valid AMR NB DTX modes */ +#define ADSP_AUDIO_AMR_DTX_MODE_OFF 0 +#define ADSP_AUDIO_AMR_DTX_MODE_ON_VAD1 1 +#define ADSP_AUDIO_AMR_DTX_MODE_ON_VAD2 2 +#define ADSP_AUDIO_AMR_DTX_MODE_ON_AUTO 3 + +/* AMR Encoder configuration */ +struct adsp_audio_amr_enc_cfg { + u32 mode; /* ADSP_AUDIO_AMR_MR* */ + u32 dtx_mode; /* ADSP_AUDIO_AMR_DTX_MODE* */ + u32 enable; /* 1 = enable, 0 = disable */ +} __attribute__ ((packed)); + +struct adsp_audio_qcelp13k_enc_cfg { + u16 min_rate; + u16 max_rate; +} __attribute__ ((packed)); + +struct adsp_audio_evrc_enc_cfg { + u16 min_rate; + u16 max_rate; +} __attribute__ ((packed)); + +union adsp_audio_codec_config { + struct adsp_audio_amr_enc_cfg amr; + struct adsp_audio_aac_enc_cfg aac; + struct adsp_audio_qcelp13k_enc_cfg qcelp13k; + struct adsp_audio_evrc_enc_cfg evrc; + struct adsp_audio_sbc_encoder_cfg sbc; +} __attribute__ ((packed)); + + +/* This is the default value. */ +#define ADSP_AUDIO_OPEN_STREAM_MODE_NONE 0x0000 + +/* This bit, if set, indicates that the AVSync mode is activated. */ +#define ADSP_AUDIO_OPEN_STREAM_MODE_AVSYNC 0x0001 + +/* This bit, if set, indicates that the Sample Rate/Channel Mode */ +/* Change Notification mode is activated. */ +#define ADSP_AUDIO_OPEN_STREAM_MODE_SR_CM_NOTIFY 0x0002 + +/* This bit, if set, indicates that the sync clock is enabled */ +#define ADSP_AUDIO_OPEN_STREAM_MODE_ENABLE_SYNC_CLOCK 0x0004 + +struct adsp_open_command { + struct adsp_command_hdr hdr; + + u32 device; + u32 endpoint; /* address */ + + u32 stream_context; + u32 mode; + + u32 buf_max_size; + + union adsp_audio_format format; + union adsp_audio_codec_config config; +} __attribute__ ((packed)); + + +/* --- audio control and stream session ioctls ---- */ + +/* Opcode to open a device stream session to capture audio */ +#define ADSP_AUDIO_IOCTL_CMD_OPEN_READ 0x0108dd79 + +/* Opcode to open a device stream session to render audio */ +#define ADSP_AUDIO_IOCTL_CMD_OPEN_WRITE 0x0108dd7a + +/* Opcode to open a device session, must open a device */ +#define ADSP_AUDIO_IOCTL_CMD_OPEN_DEVICE 0x0108dd7b + +/* Close an existing stream or device */ +#define ADSP_AUDIO_IOCTL_CMD_CLOSE 0x0108d8bc + + + +/* A device switch requires three IOCTL */ +/* commands in the following sequence: PREPARE, STANDBY, COMMIT */ + +/* adsp_audio_device_switch_command structure is needed for */ +/* DEVICE_SWITCH_PREPARE */ + +/* Device switch protocol step #1. Pause old device and */ +/* generate silence for the old device. */ +#define ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_PREPARE 0x010815c4 + +/* Device switch protocol step #2. Release old device, */ +/* create new device and generate silence for the new device. */ + +/* When client receives ack for this IOCTL, the client can */ +/* start sending IOCTL commands to configure, calibrate and */ +/* change filter settings on the new device. */ +#define ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_STANDBY 0x010815c5 + +/* Device switch protocol step #3. Start normal operations on new device */ +#define ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_COMMIT 0x01075ee7 + +struct adsp_device_switch_command { + struct adsp_command_hdr hdr; + u32 old_device; + u32 new_device; + u8 device_class; /* 0 = i.rx, 1 = i.tx, 2 = e.rx, 3 = e.tx */ + u8 device_type; /* 0 = rx, 1 = tx, 2 = both */ +} __attribute__ ((packed)); + + + +/* --- audio control session ioctls ---- */ + +#define ADSP_PATH_RX 0 +#define ADSP_PATH_TX 1 +#define ADSP_PATH_BOTH 2 + +/* These commands will affect a logical device and all its associated */ +/* streams. */ + + +/* Set device volume. */ +#define ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_VOL 0x0107605c + +struct adsp_set_dev_volume_command { + struct adsp_command_hdr hdr; + u32 device_id; + u32 path; /* 0 = rx, 1 = tx, 2 = both */ + s32 volume; +} __attribute__ ((packed)); + +/* Set Device stereo volume. This command has data payload, */ +/* struct adsp_audio_set_dev_stereo_volume_command. */ +#define ADSP_AUDIO_IOCTL_SET_DEVICE_STEREO_VOL 0x0108df3e + +/* Set L, R cross channel gain for a Device. This command has */ +/* data payload, struct adsp_audio_set_dev_x_chan_gain_command. */ +#define ADSP_AUDIO_IOCTL_SET_DEVICE_XCHAN_GAIN 0x0108df40 + +/* Set device mute state. */ +#define ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_MUTE 0x0107605f + +struct adsp_set_dev_mute_command { + struct adsp_command_hdr hdr; + u32 device_id; + u32 path; /* 0 = rx, 1 = tx, 2 = both */ + u32 mute; /* 1 = mute */ +} __attribute__ ((packed)); + +/* Configure Equalizer for a device. */ +/* This command has payload struct adsp_audio_set_dev_equalizer_command. */ +#define ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_EQ_CONFIG 0x0108b10e + +/* Set configuration data for an algorithm aspect of a device. */ +/* This command has payload struct adsp_audio_set_dev_cfg_command. */ +#define ADSP_AUDIO_IOCTL_SET_DEVICE_CONFIG 0x0108b6cb + +struct adsp_set_dev_cfg_command { + struct adsp_command_hdr hdr; + u32 device_id; + u32 block_id; + u32 interface_id; + u32 phys_addr; + u32 phys_size; + u32 phys_used; +} __attribute__ ((packed)); + +/* Set configuration data for all interfaces of a device. */ +#define ADSP_AUDIO_IOCTL_SET_DEVICE_CONFIG_TABLE 0x0108b6bf + +struct adsp_set_dev_cfg_table_command { + struct adsp_command_hdr hdr; + u32 device_id; + u32 phys_addr; + u32 phys_size; + u32 phys_used; +} __attribute__ ((packed)); + +/* ---- audio stream data commands ---- */ + +#define ADSP_AUDIO_IOCTL_CMD_DATA_TX 0x0108dd7f +#define ADSP_AUDIO_IOCTL_CMD_DATA_RX 0x0108dd80 + +struct adsp_buffer_command { + struct adsp_command_hdr hdr; + struct adsp_audio_buffer buffer; +} __attribute__ ((packed)); + + + +/* ---- audio stream ioctls (only affect a single stream in a session) ---- */ + +/* Stop stream for audio device. */ +#define ADSP_AUDIO_IOCTL_CMD_STREAM_STOP 0x01075c54 + +/* End of stream reached. Client will not send any more data. */ +#define ADSP_AUDIO_IOCTL_CMD_STREAM_EOS 0x0108b150 + +/* Do sample slipping/stuffing on AAC outputs. The payload of */ +/* this command is struct adsp_audio_slip_sample_command. */ +#define ADSP_AUDIO_IOCTL_CMD_STREAM_SLIPSAMPLE 0x0108d40e + +/* Set stream volume. */ +/* This command has data payload, struct adsp_audio_set_volume_command. */ +#define ADSP_AUDIO_IOCTL_CMD_SET_STREAM_VOL 0x0108c0de + +/* Set stream stereo volume. This command has data payload, */ +/* struct adsp_audio_set_stereo_volume_command. */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_STEREO_VOL 0x0108dd7c + +/* Set L, R cross channel gain for a Stream. This command has */ +/* data payload, struct adsp_audio_set_x_chan_gain_command. */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_XCHAN_GAIN 0x0108dd7d + +/* Set stream mute state. */ +/* This command has data payload, struct adsp_audio_set_stream_mute. */ +#define ADSP_AUDIO_IOCTL_CMD_SET_STREAM_MUTE 0x0108c0df + +/* Reconfigure bit rate information. This command has data */ +/* payload, struct adsp_audio_set_bit_rate_command */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_BITRATE 0x0108ccf1 + +/* Set Channel Mapping. This command has data payload, struct */ +/* This command has data payload struct adsp_audio_set_channel_map_command. */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_CHANNELMAP 0x0108d32a + +/* Enable/disable AACPlus SBR. */ +/* This command has data payload struct adsp_audio_set_sbr_command */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_SBR 0x0108d416 + +/* Enable/disable WMA Pro Chex and Fex. This command has data payload */ +/* struct adsp_audio_stream_set_wma_command. */ +#define ADSP_AUDIO_IOCTL_SET_STREAM_WMAPRO 0x0108d417 + + +/* ---- audio session ioctls (affect all streams in a session) --- */ + +/* Start stream for audio device. */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_START 0x010815c6 + +/* Stop all stream(s) for audio session as indicated by major id. */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_STOP 0x0108dd7e + +/* Pause the data flow for a session as indicated by major id. */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_PAUSE 0x01075ee8 + +/* Resume the data flow for a session as indicated by major id. */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_RESUME 0x01075ee9 + +/* Drop any unprocessed data buffers for a session as indicated by major id. */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_FLUSH 0x01075eea + +/* Start Stream DTMF tone */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_DTMF_START 0x0108c0dd + +/* Stop Stream DTMF tone */ +#define ADSP_AUDIO_IOCTL_CMD_SESSION_DTMF_STOP 0x01087554 + +/* Set Session volume. */ +/* This command has data payload, struct adsp_audio_set_volume_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_VOL 0x0108d8bd + +/* Set session stereo volume. This command has data payload, */ +/* struct adsp_audio_set_stereo_volume_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_STEREO_VOL 0x0108df3d + +/* Set L, R cross channel gain for a session. This command has */ +/* data payload, struct adsp_audio_set_x_chan_gain_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_XCHAN_GAIN 0x0108df3f + +/* Set Session mute state. */ +/* This command has data payload, struct adsp_audio_set_mute_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_MUTE 0x0108d8be + +/* Configure Equalizer for a stream. */ +/* This command has payload struct adsp_audio_set_equalizer_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_EQ_CONFIG 0x0108c0e0 + +/* Set Audio Video sync information. */ +/* This command has data payload, struct adsp_audio_set_av_sync_command. */ +#define ADSP_AUDIO_IOCTL_SET_SESSION_AVSYNC 0x0108d1e2 + +/* Get Audio Media Session time. */ +/* This command returns the audioTime in adsp_audio_unsigned64_event */ +#define ADSP_AUDIO_IOCTL_CMD_GET_AUDIO_TIME 0x0108c26c + + +/* these command structures are used for both STREAM and SESSION ioctls */ + +struct adsp_set_volume_command { + struct adsp_command_hdr hdr; + s32 volume; +} __attribute__ ((packed)); + +struct adsp_set_mute_command { + struct adsp_command_hdr hdr; + u32 mute; /* 1 == mute */ +} __attribute__ ((packed)); + + + +/* ---- audio events ---- */ + +/* All IOCTL commands generate an event with the IOCTL opcode as the */ +/* event id after the IOCTL command has been executed. */ + +/* This event is generated after a media stream session is opened. */ +#define ADSP_AUDIO_EVT_STATUS_OPEN 0x0108c0d6 + +/* This event is generated after a media stream session is closed. */ +#define ADSP_AUDIO_EVT_STATUS_CLOSE 0x0108c0d7 + +/* Asyncronous buffer consumption. This event is generated after a */ +/* recived buffer is consumed during rendering or filled during */ +/* capture opeartion. */ +#define ADSP_AUDIO_EVT_STATUS_BUF_DONE 0x0108c0d8 + +/* This event is generated when rendering operation is starving for */ +/* data. In order to avoid audio loss at the end of a plauback, the */ +/* client should wait for this event before issuing the close command. */ +#define ADSP_AUDIO_EVT_STATUS_BUF_UNDERRUN 0x0108c0d9 + +/* This event is generated during capture operation when there are no */ +/* buffers available to copy the captured audio data */ +#define ADSP_AUDIO_EVT_STATUS_BUF_OVERFLOW 0x0108c0da + +/* This asynchronous event is generated as a result of an input */ +/* sample rate change and/or channel mode change detected by the */ +/* decoder. The event payload data is an array of 2 uint32 */ +/* values containing the sample rate in Hz and channel mode. */ +#define ADSP_AUDIO_EVT_SR_CM_CHANGE 0x0108d329 + +struct adsp_event_hdr { + u32 evt_handle; /* DAL common header */ + u32 evt_cookie; + u32 evt_length; + + u32 src; /* "source" audio address */ + u32 dst; /* "destination" audio address */ + + u32 event_id; + u32 response_type; + u32 seq_number; + + u32 context; /* opaque to DSP */ + u32 data; + + u32 status; +} __attribute__ ((packed)); + +struct adsp_buffer_event { + struct adsp_event_hdr hdr; + struct adsp_audio_buffer buffer; +} __attribute__ ((packed)); + + +/* ---- audio device IDs ---- */ + +/* Device direction Rx/Tx flag */ +#define ADSP_AUDIO_RX_DEVICE 0x00 +#define ADSP_AUDIO_TX_DEVICE 0x01 + +/* Default RX or TX device */ +#define ADSP_AUDIO_DEVICE_ID_DEFAULT 0x1081679 + +/* Source (TX) devices */ +#define ADSP_AUDIO_DEVICE_ID_HANDSET_MIC 0x107ac8d +#define ADSP_AUDIO_DEVICE_ID_HEADSET_MIC 0x1081510 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MIC 0x1081512 +#define ADSP_AUDIO_DEVICE_ID_BT_SCO_MIC 0x1081518 +#define ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_MIC 0x108151b +#define ADSP_AUDIO_DEVICE_ID_I2S_MIC 0x1089bf3 + +/* Special loopback pseudo device to be paired with an RX device */ +/* with usage ADSP_AUDIO_DEVICE_USAGE_MIXED_PCM_LOOPBACK */ +#define ADSP_AUDIO_DEVICE_ID_MIXED_PCM_LOOPBACK_TX 0x1089bf2 + +/* Sink (RX) devices */ +#define ADSP_AUDIO_DEVICE_ID_HANDSET_SPKR 0x107ac88 +#define ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_MONO 0x1081511 +#define ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_STEREO 0x107ac8a +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO 0x1081513 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_MONO_HEADSET 0x108c508 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_STEREO_HEADSET 0x108c894 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO 0x1081514 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_MONO_HEADSET 0x108c895 +#define ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_STEREO_HEADSET 0x108c509 +#define ADSP_AUDIO_DEVICE_ID_BT_SCO_SPKR 0x1081519 +#define ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_SPKR 0x108151c +#define ADSP_AUDIO_DEVICE_ID_I2S_SPKR 0x1089bf4 +#define ADSP_AUDIO_DEVICE_ID_NULL_SINK 0x108e512 + +/* BT A2DP playback device. */ +/* This device must be paired with */ +/* ADSP_AUDIO_DEVICE_ID_MIXED_PCM_LOOPBACK_TX using */ +/* ADSP_AUDIO_DEVICE_USAGE_MIXED_PCM_LOOPBACK mode */ +#define ADSP_AUDIO_DEVICE_ID_BT_A2DP_SPKR 0x108151a + +/* Voice Destination identifier - specifically used for */ +/* controlling Voice module from the Device Control Session */ +#define ADSP_AUDIO_DEVICE_ID_VOICE 0x0108df3c + +/* Audio device usage types. */ +/* This is a bit mask to determine which topology to use in the */ +/* device session */ +#define ADSP_AUDIO_DEVICE_CONTEXT_VOICE 0x01 +#define ADSP_AUDIO_DEVICE_CONTEXT_PLAYBACK 0x02 +#define ADSP_AUDIO_DEVICE_CONTEXT_MIXED_RECORD 0x10 +#define ADSP_AUDIO_DEVICE_CONTEXT_RECORD 0x20 +#define ADSP_AUDIO_DEVICE_CONTEXT_PCM_LOOPBACK 0x40 + +#endif diff --git a/arch/arm/mach-msm/qdsp6/dal_audio_format.h b/arch/arm/mach-msm/qdsp6/dal_audio_format.h new file mode 100644 index 0000000000000..cdb2e1ab98812 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/dal_audio_format.h @@ -0,0 +1,285 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __ADSP_AUDIO_MEDIA_FORMAT_H +#define __ADSP_AUDIO_MEDIA_FORMAT_H + + + +/* Supported audio media formats */ + +/* format block in shmem */ +#define ADSP_AUDIO_FORMAT_SHAREDMEMORY 0x01091a78 +/* adsp_audio_format_raw_pcm type */ +#define ADSP_AUDIO_FORMAT_PCM 0x0103d2fd +/* adsp_audio_format_raw_pcm type */ +#define ADSP_AUDIO_FORMAT_DTMF 0x01087725 +/* adsp_audio_format_adpcm type */ +#define ADSP_AUDIO_FORMAT_ADPCM 0x0103d2ff +/* Yamaha PCM format */ +#define ADSP_AUDIO_FORMAT_YADPCM 0x0108dc07 +/* ISO/IEC 11172 */ +#define ADSP_AUDIO_FORMAT_MP3 0x0103d308 +/* ISO/IEC 14496 */ +#define ADSP_AUDIO_FORMAT_MPEG4_AAC 0x010422f1 +/* AMR-NB audio in FS format */ +#define ADSP_AUDIO_FORMAT_AMRNB_FS 0x0105c16c +/* AMR-WB audio in FS format */ +#define ADSP_AUDIO_FORMAT_AMRWB_FS 0x0105c16e +/* QCELP 13k, IS733 */ +#define ADSP_AUDIO_FORMAT_V13K_FS 0x01080b8a +/* EVRC 8k, IS127 */ +#define ADSP_AUDIO_FORMAT_EVRC_FS 0x01080b89 +/* EVRC-B 8k, 4GV */ +#define ADSP_AUDIO_FORMAT_EVRCB_FS 0x0108f2a3 +/* MIDI command stream */ +#define ADSP_AUDIO_FORMAT_MIDI 0x0103d300 +/* A2DP SBC stream */ +#define ADSP_AUDIO_FORMAT_SBC 0x0108c4d8 +/* Version 10 Professional */ +#define ADSP_AUDIO_FORMAT_WMA_V10PRO 0x0108aa92 +/* Version 9 Starndard */ +#define ADSP_AUDIO_FORMAT_WMA_V9 0x0108d430 +/* AMR WideBand Plus */ +#define ADSP_AUDIO_FORMAT_AMR_WB_PLUS 0x0108f3da +/* AC3 Decoder */ +#define ADSP_AUDIO_FORMAT_AC3_DECODER 0x0108d5f9 + + +/* Not yet supported audio media formats */ + + + +/* ISO/IEC 13818 */ +#define ADSP_AUDIO_FORMAT_MPEG2_AAC 0x0103d309 +/* 3GPP TS 26.101 Sec 4.0 */ +#define ADSP_AUDIO_FORMAT_AMRNB_IF1 0x0103d305 +/* 3GPP TS 26.101 Annex A */ +#define ADSP_AUDIO_FORMAT_AMRNB_IF2 0x01057b31 +/* 3GPP TS 26.201 */ +#define ADSP_AUDIO_FORMAT_AMRWB_IF1 0x0103d306 +/* 3GPP TS 26.201 */ +#define ADSP_AUDIO_FORMAT_AMRWB_IF2 0x0105c16d +/* G.711 */ +#define ADSP_AUDIO_FORMAT_G711 0x0106201d +/* QCELP 8k, IS96A */ +#define ADSP_AUDIO_FORMAT_V8K_FS 0x01081d29 +/* Version 1 codec */ +#define ADSP_AUDIO_FORMAT_WMA_V1 0x01055b2b +/* Version 2, 7 & 8 codec */ +#define ADSP_AUDIO_FORMAT_WMA_V8 0x01055b2c +/* Version 9 Professional codec */ +#define ADSP_AUDIO_FORMAT_WMA_V9PRO 0x01055b2d +/* Version 9 Voice codec */ +#define ADSP_AUDIO_FORMAT_WMA_SP1 0x01055b2e +/* Version 9 Lossless codec */ +#define ADSP_AUDIO_FORMAT_WMA_LOSSLESS 0x01055b2f +/* Real Media content, low-bitrate */ +#define ADSP_AUDIO_FORMAT_RA_SIPR 0x01042a0f +/* Real Media content */ +#define ADSP_AUDIO_FORMAT_RA_COOK 0x01042a0e + + +/* For all of the audio formats, unless specified otherwise, */ +/* the following apply: */ +/* Format block bits are arranged in bytes and words in little-endian */ +/* order, i.e., least-significant bit first and least-significant */ +/* byte first. */ + + + +/* AAC Format Block. */ + +/* AAC format block consist of a format identifier followed by */ +/* AudioSpecificConfig formatted according to ISO/IEC 14496-3 */ + +/* The following AAC format identifiers are supported */ +#define ADSP_AUDIO_AAC_ADTS 0x010619cf +#define ADSP_AUDIO_AAC_MPEG4_ADTS 0x010619d0 +#define ADSP_AUDIO_AAC_LOAS 0x010619d1 +#define ADSP_AUDIO_AAC_ADIF 0x010619d2 +#define ADSP_AUDIO_AAC_RAW 0x010619d3 +#define ADSP_AUDIO_AAC_FRAMED_RAW 0x0108c1fb + + +#define ADSP_AUDIO_COMPANDING_ALAW 0x10619cd +#define ADSP_AUDIO_COMPANDING_MLAW 0x10619ce + +/* Maxmum number of bytes allowed in a format block */ +#define ADSP_AUDIO_FORMAT_DATA_MAX 16 + + +struct adsp_audio_no_payload_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* no payload for this format type */ +} __attribute__ ((packed)); + + +/* For convenience, to be used as a standard format block */ +/* for various media types that don't need a unique format block */ +/* ie. PCM, DTMF, etc. */ +struct adsp_audio_standard_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u16 channels; + u16 bits_per_sample; + u32 sampling_rate; + u8 is_signed; + u8 is_interleaved; +} __attribute__ ((packed)); + + + +/* ADPCM format block */ +struct adsp_audio_adpcm_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u16 channels; + u16 bits_per_sample; + u32 sampling_rate; + u8 is_signed; + u8 is_interleaved; + u32 block_size; +} __attribute__ ((packed)); + + +/* MIDI format block */ +struct adsp_audio_midi_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u32 sampling_rate; + u16 channels; + u16 mode; +} __attribute__ ((packed)); + + +/* G711 format block */ +struct adsp_audio_g711_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u32 companding; +} __attribute__ ((packed)); + + +struct adsp_audio_wma_pro_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u16 format_tag; + u16 channels; + u32 samples_per_sec; + u32 avg_bytes_per_sec; + u16 block_align; + u16 valid_bits_per_sample; + u32 channel_mask; + u16 encode_opt; + u16 advanced_encode_opt; + u32 advanced_encode_opt2; + u32 drc_peak_reference; + u32 drc_peak_target; + u32 drc_average_reference; + u32 drc_average_target; +} __attribute__ ((packed)); + + +struct adsp_audio_amrwb_plus_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + u32 size; + u32 version; + u32 channels; + u32 amr_band_mode; + u32 amr_dtx_mode; + u32 amr_frame_format; + u32 amr_isf_index; +} __attribute__ ((packed)); + + +/* Binary Byte Stream Format */ +/* Binary format type that defines a byte stream, */ +/* can be used to specify any format (ie. AAC) */ +struct adsp_audio_binary_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* payload */ + /* number of bytes set in byte stream */ + u32 num_bytes; + /* Byte stream binary data */ + u8 data[ADSP_AUDIO_FORMAT_DATA_MAX]; +} __attribute__ ((packed)); + + +struct adsp_audio_shared_memory_format { + /* Media Format Code (must always be first element) */ + u32 format; + + /* Number of bytes in shared memory */ + u32 len; + /* Phyisical address to data in shared memory */ + u32 address; +} __attribute__ ((packed)); + + +/* Union of all format types */ +union adsp_audio_format { + /* Basic format block with no payload */ + struct adsp_audio_no_payload_format no_payload; + /* Generic format block PCM, DTMF */ + struct adsp_audio_standard_format standard; + /* ADPCM format block */ + struct adsp_audio_adpcm_format adpcm; + /* MIDI format block */ + struct adsp_audio_midi_format midi; + /* G711 format block */ + struct adsp_audio_g711_format g711; + /* WmaPro format block */ + struct adsp_audio_wma_pro_format wma_pro; + /* WmaPro format block */ + struct adsp_audio_amrwb_plus_format amrwb_plus; + /* binary (byte stream) format block, used for AAC */ + struct adsp_audio_binary_format binary; + /* format block in shared memory */ + struct adsp_audio_shared_memory_format shared_mem; +}; + +#endif + diff --git a/arch/arm/mach-msm/qdsp6/dsp_debug.c b/arch/arm/mach-msm/qdsp6/dsp_debug.c new file mode 100644 index 0000000000000..71c10ebef2b1b --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/dsp_debug.c @@ -0,0 +1,175 @@ +/* arch/arm/mach-msm/qdsp6/dsp_dump.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../proc_comm.h" + +static wait_queue_head_t dsp_wait; +static int dsp_has_crashed; +static int dsp_wait_count; + +static atomic_t dsp_crash_count = ATOMIC_INIT(0); + +void q6audio_dsp_not_responding(void) +{ + + if (atomic_add_return(1, &dsp_crash_count) != 1) { + pr_err("q6audio_dsp_not_responding() - parking additional crasher...\n"); + for (;;) + msleep(1000); + } + if (dsp_wait_count) { + dsp_has_crashed = 1; + wake_up(&dsp_wait); + + while (dsp_has_crashed != 2) + wait_event(dsp_wait, dsp_has_crashed == 2); + } else { + pr_err("q6audio_dsp_not_responding() - no waiter?\n"); + } + BUG(); +} + +static int dsp_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t dsp_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + char cmd[32]; + + if (count >= sizeof(cmd)) + return -EINVAL; + if (copy_from_user(cmd, buf, count)) + return -EFAULT; + cmd[count] = 0; + + if ((count > 1) && (cmd[count-1] == '\n')) + cmd[count-1] = 0; + + if (!strcmp(cmd, "wait-for-crash")) { + while (!dsp_has_crashed) { + int res; + dsp_wait_count++; + res = wait_event_interruptible(dsp_wait, dsp_has_crashed); + if (res < 0) { + dsp_wait_count--; + return res; + } + } +#if defined(CONFIG_MACH_MAHIMAHI) + /* assert DSP NMI */ + msm_proc_comm(PCOM_CUSTOMER_CMD1, 0, 0); + msleep(250); +#endif + } else if (!strcmp(cmd, "boom")) { + q6audio_dsp_not_responding(); + } else if (!strcmp(cmd, "continue-crash")) { + dsp_has_crashed = 2; + wake_up(&dsp_wait); + } else { + pr_err("unknown dsp_debug command: %s\n", cmd); + } + + return count; +} + +#define DSP_RAM_BASE 0x2E800000 +#define DSP_RAM_SIZE 0x01800000 + +static unsigned copy_ok_count; + +static ssize_t dsp_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t actual = 0; + size_t mapsize = PAGE_SIZE; + unsigned addr; + void __iomem *ptr; + + if (*pos >= DSP_RAM_SIZE) + return 0; + + if (*pos & (PAGE_SIZE - 1)) + return -EINVAL; + + addr = (*pos + DSP_RAM_BASE); + + /* don't blow up if we're unaligned */ + if (addr & (PAGE_SIZE - 1)) + mapsize *= 2; + + while (count >= PAGE_SIZE) { + ptr = ioremap(addr, mapsize); + if (!ptr) { + pr_err("dsp: map error @ %x\n", addr); + return -EFAULT; + } + if (copy_to_user(buf, ptr, PAGE_SIZE)) { + iounmap(ptr); + pr_err("dsp: copy error @ %p\n", buf); + return -EFAULT; + } + copy_ok_count += PAGE_SIZE; + iounmap(ptr); + addr += PAGE_SIZE; + buf += PAGE_SIZE; + actual += PAGE_SIZE; + count -= PAGE_SIZE; + } + + *pos += actual; + return actual; +} + +static int dsp_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations dsp_fops = { + .owner = THIS_MODULE, + .open = dsp_open, + .read = dsp_read, + .write = dsp_write, + .release = dsp_release, +}; + +static struct miscdevice dsp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "dsp_debug", + .fops = &dsp_fops, +}; + + +static int __init dsp_init(void) +{ + init_waitqueue_head(&dsp_wait); + return misc_register(&dsp_misc); +} + +device_initcall(dsp_init); diff --git a/arch/arm/mach-msm/qdsp6/mp3.c b/arch/arm/mach-msm/qdsp6/mp3.c new file mode 100644 index 0000000000000..759afb16182ba --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/mp3.c @@ -0,0 +1,220 @@ +/* arch/arm/mach-msm/qdsp6/mp3.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define BUFSZ (8192) +#define DMASZ (BUFSZ * 2) + +struct mp3 { + struct mutex lock; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; +}; + +static long mp3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct mp3 *mp3 = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void*) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + mutex_lock(&mp3->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: { + int vol; + if (copy_from_user(&vol, (void*) arg, sizeof(vol))) { + rc = -EFAULT; + break; + } + rc = q6audio_set_stream_volume(mp3->ac, vol); + break; + } + case AUDIO_START: { + uint32_t acdb_id; + if (arg == 0) { + acdb_id = 0; + } else if (copy_from_user(&acdb_id, (void*) arg, sizeof(acdb_id))) { + pr_info("pcm_out: copy acdb_id from user failed\n"); + rc = -EFAULT; + break; + } + if (mp3->ac) { + rc = -EBUSY; + } else { + mp3->ac = q6audio_open_mp3(BUFSZ, + mp3->sample_rate, mp3->channel_count, acdb_id); + if (!mp3->ac) + rc = -ENOMEM; + } + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (mp3->ac) { + rc = -EBUSY; + break; + } + if (copy_from_user(&config, (void*) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count < 1 || config.channel_count > 2) { + rc = -EINVAL; + break; + } + mp3->sample_rate = config.sample_rate; + mp3->channel_count = config.channel_count; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = BUFSZ; + config.buffer_count = 2; + config.sample_rate = mp3->sample_rate; + config.channel_count = mp3->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void*) arg, &config, sizeof(config))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&mp3->lock); + return rc; +} + +static int mp3_open(struct inode *inode, struct file *file) +{ + int rc = 0; + + struct mp3 *mp3; + mp3 = kzalloc(sizeof(struct mp3), GFP_KERNEL); + + if (!mp3) + return -ENOMEM; + + mutex_init(&mp3->lock); + mp3->channel_count = 2; + mp3->sample_rate = 44100; + + file->private_data = mp3; + return rc; +} + +static ssize_t mp3_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mp3 *mp3 = file->private_data; + struct audio_client *ac; + struct audio_buffer *ab; + const char __user *start = buf; + int xfer; + + if (!mp3->ac) + mp3_ioctl(file, AUDIO_START, 0); + + ac = mp3->ac; + if (!ac) + return -ENODEV; + + while (count > 0) { + ab = ac->buf + ac->cpu_buf; + + if (ab->used) + wait_event(ac->wait, (ab->used == 0)); + + xfer = count; + if (xfer > ab->size) + xfer = ab->size; + + if (copy_from_user(ab->data, buf, xfer)) + return -EFAULT; + + buf += xfer; + count -= xfer; + + ab->used = xfer; + q6audio_write(ac, ab); + ac->cpu_buf ^= 1; + } + + return buf - start; +} + +static int mp3_fsync(struct file *f, struct dentry *dentry, int datasync) +{ + struct mp3 *mp3 = f->private_data; + if (mp3->ac) + return q6audio_async(mp3->ac); + return -ENODEV; +} + +static int mp3_release(struct inode *inode, struct file *file) +{ + struct mp3 *mp3 = file->private_data; + if (mp3->ac) + q6audio_mp3_close(mp3->ac); + kfree(mp3); + return 0; +} + +static struct file_operations mp3_fops = { + .owner = THIS_MODULE, + .open = mp3_open, + .write = mp3_write, + .fsync = mp3_fsync, + .release = mp3_release, + .unlocked_ioctl = mp3_ioctl, +}; + +struct miscdevice mp3_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_mp3", + .fops = &mp3_fops, +}; + +static int __init mp3_init(void) { + return misc_register(&mp3_misc); +} + +device_initcall(mp3_init); diff --git a/arch/arm/mach-msm/qdsp6/msm_q6vdec.c b/arch/arm/mach-msm/qdsp6/msm_q6vdec.c new file mode 100644 index 0000000000000..d6106e0463b7d --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/msm_q6vdec.c @@ -0,0 +1,990 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +#define DEBUG_TRACE_VDEC +#define DEBUG +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../dal.h" + +#define DALDEVICEID_VDEC_DEVICE 0x02000026 +#define DALDEVICEID_VDEC_PORTNAME "DSP_DAL_AQ_VID" + +#define VDEC_INTERFACE_VERSION 0x00020000 + +#define MAJOR_MASK 0xFFFF0000 +#define MINOR_MASK 0x0000FFFF + +#define VDEC_GET_MAJOR_VERSION(version) (((version)&MAJOR_MASK)>>16) + +#define VDEC_GET_MINOR_VERSION(version) ((version)&MINOR_MASK) + +#ifdef DEBUG_TRACE_VDEC +#define TRACE(fmt,x...) \ + do { pr_debug("%s:%d " fmt, __func__, __LINE__, ##x); } while (0) +#else +#define TRACE(fmt, x...) do { } while (0) +#endif + +#define MAX_SUPPORTED_INSTANCES 2 + +enum { + VDEC_DALRPC_INITIALIZE = DAL_OP_FIRST_DEVICE_API, + VDEC_DALRPC_SETBUFFERS, + VDEC_DALRPC_FREEBUFFERS, + VDEC_DALRPC_QUEUE, + VDEC_DALRPC_SIGEOFSTREAM, + VDEC_DALRPC_FLUSH, + VDEC_DALRPC_REUSEFRAMEBUFFER, + VDEC_DALRPC_GETDECATTRIBUTES, +}; + +enum { + VDEC_ASYNCMSG_DECODE_DONE = 0xdec0de00, + VDEC_ASYNCMSG_REUSE_FRAME, +}; + +struct vdec_init_cfg { + u32 decode_done_evt; + u32 reuse_frame_evt; + struct vdec_config cfg; +}; + +struct vdec_buffer_status { + u32 data; + u32 status; +}; + +#define VDEC_MSG_MAX 128 + +struct vdec_msg_list { + struct list_head list; + struct vdec_msg vdec_msg; +}; + +struct vdec_mem_info { + u32 buf_type; + u32 id; + unsigned long phys_addr; + unsigned long len; + struct file *file; +}; + +struct vdec_mem_list { + struct list_head list; + struct vdec_mem_info mem; +}; + +struct vdec_data { + struct dal_client *vdec_handle; + struct list_head vdec_msg_list_head; + struct list_head vdec_msg_list_free; + wait_queue_head_t vdec_msg_evt; + spinlock_t vdec_list_lock; + struct list_head vdec_mem_list_head; + spinlock_t vdec_mem_list_lock; + int mem_initialized; + int running; + int close_decode; +}; + +static struct class *driver_class; +static dev_t vdec_device_no; +static struct cdev vdec_cdev; +static int ref_cnt; +static DEFINE_MUTEX(vdec_ref_lock); + +static DEFINE_MUTEX(idlecount_lock); +static int idlecount; +static struct wake_lock wakelock; +static struct wake_lock idlelock; + +static void prevent_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (++idlecount == 1) { + wake_lock(&idlelock); + wake_lock(&wakelock); + } + mutex_unlock(&idlecount_lock); +} + +static void allow_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (--idlecount == 0) { + wake_unlock(&idlelock); + wake_unlock(&wakelock); + } + mutex_unlock(&idlecount_lock); +} + +static inline int vdec_check_version(u32 client, u32 server) +{ + int ret = -EINVAL; + if ((VDEC_GET_MAJOR_VERSION(client) == VDEC_GET_MAJOR_VERSION(server)) + && (VDEC_GET_MINOR_VERSION(client) <= + VDEC_GET_MINOR_VERSION(server))) + ret = 0; + return ret; +} + +static int vdec_get_msg(struct vdec_data *vd, void *msg) +{ + struct vdec_msg_list *l; + unsigned long flags; + int ret = 0; + + if (!vd->running) + return -EPERM; + + spin_lock_irqsave(&vd->vdec_list_lock, flags); + list_for_each_entry_reverse(l, &vd->vdec_msg_list_head, list) { + if (copy_to_user(msg, &l->vdec_msg, sizeof(struct vdec_msg))) + pr_err("vdec_get_msg failed to copy_to_user!\n"); + if (l->vdec_msg.id == VDEC_MSG_REUSEINPUTBUFFER) + TRACE("reuse_input_buffer %d\n", l->vdec_msg.buf_id); + else if (l->vdec_msg.id == VDEC_MSG_FRAMEDONE) + TRACE("frame_done (stat=%d)\n", + l->vdec_msg.vfr_info.status); + else + TRACE("unknown msg (msgid=%d)\n", l->vdec_msg.id); + list_del(&l->list); + list_add(&l->list, &vd->vdec_msg_list_free); + ret = 1; + break; + } + spin_unlock_irqrestore(&vd->vdec_list_lock, flags); + + if (vd->close_decode) + ret = 1; + + return ret; +} + +static void vdec_put_msg(struct vdec_data *vd, struct vdec_msg *msg) +{ + struct vdec_msg_list *l; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&vd->vdec_list_lock, flags); + list_for_each_entry(l, &vd->vdec_msg_list_free, list) { + memcpy(&l->vdec_msg, msg, sizeof(struct vdec_msg)); + list_del(&l->list); + list_add(&l->list, &vd->vdec_msg_list_head); + found = 1; + break; + } + spin_unlock_irqrestore(&vd->vdec_list_lock, flags); + + if (found) + wake_up(&vd->vdec_msg_evt); + else + pr_err("vdec_put_msg can't find free list!\n"); +} + +static struct vdec_mem_list *vdec_get_mem_from_list(struct vdec_data *vd, + u32 pmem_id, u32 buf_type) +{ + struct vdec_mem_list *l; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&vd->vdec_mem_list_lock, flags); + list_for_each_entry(l, &vd->vdec_mem_list_head, list) { + if (l->mem.buf_type == buf_type && l->mem.id == pmem_id) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&vd->vdec_mem_list_lock, flags); + + if (found) + return l; + else + return NULL; + +} + +static int vdec_initialize(struct vdec_data *vd, void *argp) +{ + struct vdec_config_sps vdec_cfg_sps; + struct vdec_init_cfg vi_cfg; + struct vdec_buf_req vdec_buf_req; + struct u8 *header; + int ret = 0; + + ret = copy_from_user(&vdec_cfg_sps, + &((struct vdec_init *)argp)->sps_cfg, + sizeof(vdec_cfg_sps)); + + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + vi_cfg.decode_done_evt = VDEC_ASYNCMSG_DECODE_DONE; + vi_cfg.reuse_frame_evt = VDEC_ASYNCMSG_REUSE_FRAME; + memcpy(&vi_cfg.cfg, &vdec_cfg_sps.cfg, sizeof(struct vdec_config)); + + header = kmalloc(vdec_cfg_sps.seq.len, GFP_KERNEL); + if (!header) { + pr_err("%s: kmalloc failed\n", __func__); + return -ENOMEM; + } + + ret = copy_from_user(header, + ((struct vdec_init *)argp)->sps_cfg.seq.header, + vdec_cfg_sps.seq.len); + + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + kfree(header); + return ret; + } + + TRACE("vi_cfg: handle=%p fourcc=0x%x w=%d h=%d order=%d notify_en=%d " + "vc1_rb=%d h264_sd=%d h264_nls=%d pp_flag=%d fruc_en=%d\n", + vd->vdec_handle, vi_cfg.cfg.fourcc, vi_cfg.cfg.width, + vi_cfg.cfg.height, vi_cfg.cfg.order, vi_cfg.cfg.notify_enable, + vi_cfg.cfg.vc1_rowbase, vi_cfg.cfg.h264_startcode_detect, + vi_cfg.cfg.h264_nal_len_size, vi_cfg.cfg.postproc_flag, + vi_cfg.cfg.fruc_enable); + ret = dal_call_f13(vd->vdec_handle, VDEC_DALRPC_INITIALIZE, + &vi_cfg, sizeof(vi_cfg), + header, vdec_cfg_sps.seq.len, + &vdec_buf_req, sizeof(vdec_buf_req)); + + kfree(header); + + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, ret); + else + ret = copy_to_user(((struct vdec_init *)argp)->buf_req, + &vdec_buf_req, sizeof(vdec_buf_req)); + + vd->close_decode = 0; + return ret; +} + +static int vdec_setbuffers(struct vdec_data *vd, void *argp) +{ + struct vdec_buffer vmem; + struct vdec_mem_list *l; + unsigned long vstart; + unsigned long flags; + struct { + uint32_t size; + struct vdec_buf_info buf; + } rpc; + uint32_t res; + + int ret = 0; + + vd->mem_initialized = 0; + + ret = copy_from_user(&vmem, argp, sizeof(vmem)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + l = kzalloc(sizeof(struct vdec_mem_list), GFP_KERNEL); + if (!l) { + pr_err("%s: kzalloc failed!\n", __func__); + return -ENOMEM; + } + + l->mem.id = vmem.pmem_id; + l->mem.buf_type = vmem.buf.buf_type; + + ret = get_pmem_file(l->mem.id, &l->mem.phys_addr, &vstart, + &l->mem.len, &l->mem.file); + if (ret) { + pr_err("%s: get_pmem_fd failed\n", __func__); + goto err_get_pmem_file; + } + + TRACE("pmem_id=%d (phys=0x%08lx len=0x%lx) buftype=%d num_buf=%d " + "islast=%d src_id=%d offset=0x%08x size=0x%x\n", + vmem.pmem_id, l->mem.phys_addr, l->mem.len, + vmem.buf.buf_type, vmem.buf.num_buf, vmem.buf.islast, + vmem.buf.region.src_id, vmem.buf.region.offset, + vmem.buf.region.size); + + /* input buffers */ + if ((vmem.buf.region.offset + vmem.buf.region.size) > l->mem.len) { + pr_err("%s: invalid input buffer offset!\n", __func__); + ret = -EINVAL; + goto err_bad_offset; + + } + vmem.buf.region.offset += l->mem.phys_addr; + + rpc.size = sizeof(vmem.buf); + memcpy(&rpc.buf, &vmem.buf, sizeof(struct vdec_buf_info)); + + + ret = dal_call(vd->vdec_handle, VDEC_DALRPC_SETBUFFERS, 5, + &rpc, sizeof(rpc), &res, sizeof(res)); + + if (ret < 4) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + ret = -EIO; + goto err_dal_call; + } + + spin_lock_irqsave(&vd->vdec_mem_list_lock, flags); + list_add(&l->list, &vd->vdec_mem_list_head); + spin_unlock_irqrestore(&vd->vdec_mem_list_lock, flags); + + vd->mem_initialized = 1; + return ret; + +err_dal_call: +err_bad_offset: + put_pmem_file(l->mem.file); +err_get_pmem_file: + kfree(l); + return ret; +} + +static int vdec_queue(struct vdec_data *vd, void *argp) +{ + struct { + uint32_t size; + struct vdec_input_buf_info buf_info; + uint32_t osize; + } rpc; + struct vdec_mem_list *l; + struct { + uint32_t result; + uint32_t size; + struct vdec_queue_status status; + } rpc_res; + + u32 pmem_id; + int ret = 0; + + if (!vd->mem_initialized) { + pr_err("%s: memory is not being initialized!\n", __func__); + return -EPERM; + } + + ret = copy_from_user(&rpc.buf_info, + &((struct vdec_input_buf *)argp)->buffer, + sizeof(rpc.buf_info)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + ret = copy_from_user(&pmem_id, + &((struct vdec_input_buf *)argp)->pmem_id, + sizeof(u32)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + l = vdec_get_mem_from_list(vd, pmem_id, VDEC_BUFFER_TYPE_INPUT); + + if (NULL == l) { + pr_err("%s: not able to find the buffer from list\n", __func__); + return -EPERM; + } + + if ((rpc.buf_info.size + rpc.buf_info.offset) >= l->mem.len) { + pr_err("%s: invalid queue buffer offset!\n", __func__); + return -EINVAL; + } + + rpc.buf_info.offset += l->mem.phys_addr; + rpc.size = sizeof(struct vdec_input_buf_info); + rpc.osize = sizeof(struct vdec_queue_status); + + /* complete the writes to the buffer */ + wmb(); + ret = dal_call(vd->vdec_handle, VDEC_DALRPC_QUEUE, 8, + &rpc, sizeof(rpc), &rpc_res, sizeof(rpc_res)); + if (ret < 4) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + ret = -EIO; + } + return ret; +} + +static int vdec_reuse_framebuffer(struct vdec_data *vd, void *argp) +{ + u32 buf_id; + int ret = 0; + + ret = copy_from_user(&buf_id, argp, sizeof(buf_id)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + ret = dal_call_f0(vd->vdec_handle, VDEC_DALRPC_REUSEFRAMEBUFFER, + buf_id); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, ret); + + return ret; +} + +static int vdec_flush(struct vdec_data *vd, void *argp) +{ + u32 flush_type; + int ret = 0; + + ret = copy_from_user(&flush_type, argp, sizeof(flush_type)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + TRACE("flush_type=%d\n", flush_type); + ret = dal_call_f0(vd->vdec_handle, VDEC_DALRPC_FLUSH, flush_type); + if (ret) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + return ret; + } + + return ret; +} + +static int vdec_close(struct vdec_data *vd, void *argp) +{ + struct vdec_mem_list *l; + int ret = 0; + + pr_info("q6vdec_close()\n"); + vd->close_decode = 1; + wake_up(&vd->vdec_msg_evt); + ret = dal_call_f0(vd->vdec_handle, DAL_OP_CLOSE, 0); + if (ret) + pr_err("%s: failed to close daldevice (%d)\n", __func__, ret); + + if (vd->mem_initialized) { + list_for_each_entry(l, &vd->vdec_mem_list_head, list) + put_pmem_file(l->mem.file); + } + + return ret; +} +static int vdec_getdecattributes(struct vdec_data *vd, void *argp) +{ + struct { + uint32_t status; + uint32_t size; + struct vdec_dec_attributes dec_attr; + } rpc; + uint32_t inp; + int ret = 0; + inp = sizeof(struct vdec_dec_attributes); + + ret = dal_call(vd->vdec_handle, VDEC_DALRPC_GETDECATTRIBUTES, 9, + &inp, sizeof(inp), &rpc, sizeof(rpc)); + if (ret < 4 || rpc.size != sizeof(struct vdec_dec_attributes)) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + ret = -EIO; + } else + ret = + copy_to_user(((struct vdec_dec_attributes *)argp), + &rpc.dec_attr, sizeof(rpc.dec_attr)); + return ret; +} + +static int vdec_freebuffers(struct vdec_data *vd, void *argp) +{ + struct vdec_buffer vmem; + struct vdec_mem_list *l; + struct { + uint32_t size; + struct vdec_buf_info buf; + } rpc; + uint32_t res; + + int ret = 0; + + if (!vd->mem_initialized) { + pr_err("%s: memory is not being initialized!\n", __func__); + return -EPERM; + } + + ret = copy_from_user(&vmem, argp, sizeof(vmem)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + l = vdec_get_mem_from_list(vd, vmem.pmem_id, vmem.buf.buf_type); + + if (NULL == l) { + pr_err("%s: not able to find the buffer from list\n", __func__); + return -EPERM; + } + + /* input buffers */ + if ((vmem.buf.region.offset + vmem.buf.region.size) > l->mem.len) { + pr_err("%s: invalid input buffer offset!\n", __func__); + return -EINVAL; + + } + vmem.buf.region.offset += l->mem.phys_addr; + + rpc.size = sizeof(vmem.buf); + memcpy(&rpc.buf, &vmem.buf, sizeof(struct vdec_buf_info)); + + ret = dal_call(vd->vdec_handle, VDEC_DALRPC_FREEBUFFERS, 5, + &rpc, sizeof(rpc), &res, sizeof(res)); + if (ret < 4) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + } + + return ret; +} + +static int vdec_getversion(struct vdec_data *vd, void *argp) +{ + struct vdec_version ver_info; + int ret = 0; + + ver_info.major = VDEC_GET_MAJOR_VERSION(VDEC_INTERFACE_VERSION); + ver_info.minor = VDEC_GET_MINOR_VERSION(VDEC_INTERFACE_VERSION); + + ret = copy_to_user(((struct vdec_version *)argp), + &ver_info, sizeof(ver_info)); + + return ret; + +} + +static long vdec_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct vdec_data *vd = file->private_data; + void __user *argp = (void __user *)arg; + int ret = 0; + + if (!vd->running) + return -EPERM; + + switch (cmd) { + case VDEC_IOCTL_INITIALIZE: + ret = vdec_initialize(vd, argp); + break; + + case VDEC_IOCTL_SETBUFFERS: + ret = vdec_setbuffers(vd, argp); + break; + + case VDEC_IOCTL_QUEUE: + TRACE("VDEC_IOCTL_QUEUE (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = vdec_queue(vd, argp); + break; + + case VDEC_IOCTL_REUSEFRAMEBUFFER: + TRACE("VDEC_IOCTL_REUSEFRAMEBUFFER (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = vdec_reuse_framebuffer(vd, argp); + break; + + case VDEC_IOCTL_FLUSH: + ret = vdec_flush(vd, argp); + break; + + case VDEC_IOCTL_EOS: + TRACE("VDEC_IOCTL_EOS (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = dal_call_f0(vd->vdec_handle, VDEC_DALRPC_SIGEOFSTREAM, 0); + if (ret) + pr_err("%s: remote function failed (%d)\n", + __func__, ret); + break; + + case VDEC_IOCTL_GETMSG: + TRACE("VDEC_IOCTL_GETMSG (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + wait_event_interruptible(vd->vdec_msg_evt, + vdec_get_msg(vd, argp)); + + if (vd->close_decode) + ret = -EINTR; + else + /* order the reads from the buffer */ + rmb(); + break; + + case VDEC_IOCTL_CLOSE: + ret = vdec_close(vd, argp); + break; + + case VDEC_IOCTL_GETDECATTRIBUTES: + TRACE("VDEC_IOCTL_GETDECATTRIBUTES (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = vdec_getdecattributes(vd, argp); + + if (ret) + pr_err("%s: remote function failed (%d)\n", + __func__, ret); + break; + + case VDEC_IOCTL_FREEBUFFERS: + TRACE("VDEC_IOCTL_FREEBUFFERS (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = vdec_freebuffers(vd, argp); + + if (ret) + pr_err("%s: remote function failed (%d)\n", + __func__, ret); + break; + case VDEC_IOCTL_GETVERSION: + TRACE("VDEC_IOCTL_GETVERSION (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + ret = vdec_getversion(vd, argp); + + if (ret) + pr_err("%s: remote function failed (%d)\n", + __func__, ret); + break; + default: + pr_err("%s: invalid ioctl!\n", __func__); + ret = -EINVAL; + break; + } + + TRACE("ioctl done (pid=%d tid=%d)\n", + current->group_leader->pid, current->pid); + + return ret; +} + +static void vdec_dcdone_handler(struct vdec_data *vd, void *frame, + uint32_t frame_size) +{ + struct vdec_msg msg; + struct vdec_mem_list *l; + unsigned long flags; + int found = 0; + +/*if (frame_size != sizeof(struct vdec_frame_info)) {*/ + if (frame_size < sizeof(struct vdec_frame_info)) { + pr_warning("%s: msg size mismatch %d != %d\n", __func__, + frame_size, sizeof(struct vdec_frame_info)); + return; + } + + memcpy(&msg.vfr_info, (struct vdec_frame_info *)frame, + sizeof(struct vdec_frame_info)); + + if (msg.vfr_info.status == VDEC_FRAME_DECODE_OK) { + spin_lock_irqsave(&vd->vdec_mem_list_lock, flags); + list_for_each_entry(l, &vd->vdec_mem_list_head, list) { + if ((l->mem.buf_type == VDEC_BUFFER_TYPE_OUTPUT) && + (msg.vfr_info.offset >= l->mem.phys_addr) && + (msg.vfr_info.offset < + (l->mem.phys_addr + l->mem.len))) { + found = 1; + msg.vfr_info.offset -= l->mem.phys_addr; + msg.vfr_info.data2 = l->mem.id; + break; + } + } + spin_unlock_irqrestore(&vd->vdec_mem_list_lock, flags); + } + + if (found || (msg.vfr_info.status != VDEC_FRAME_DECODE_OK)) { + msg.id = VDEC_MSG_FRAMEDONE; + vdec_put_msg(vd, &msg); + } else { + pr_err("%s: invalid phys addr = 0x%x\n", + __func__, msg.vfr_info.offset); + } + +} + +static void vdec_reuseibuf_handler(struct vdec_data *vd, void *bufstat, + uint32_t bufstat_size) +{ + struct vdec_buffer_status *vdec_bufstat; + struct vdec_msg msg; + + /* TODO: how do we signal the client? If they are waiting on a + * message in an ioctl, they may block forever */ + if (bufstat_size != sizeof(struct vdec_buffer_status)) { + pr_warning("%s: msg size mismatch %d != %d\n", __func__, + bufstat_size, sizeof(struct vdec_buffer_status)); + return; + } + vdec_bufstat = (struct vdec_buffer_status *)bufstat; + msg.id = VDEC_MSG_REUSEINPUTBUFFER; + msg.buf_id = vdec_bufstat->data; + vdec_put_msg(vd, &msg); +} + +static void callback(void *data, int len, void *cookie) +{ + struct vdec_data *vd = (struct vdec_data *)cookie; + uint32_t *tmp = (uint32_t *) data; + + if (!vd->mem_initialized) { + pr_err("%s:memory not initialize but callback called!\n", + __func__); + return; + } + + TRACE("vdec_async: tmp=0x%08x 0x%08x 0x%08x\n", tmp[0], tmp[1], tmp[2]); + switch (tmp[0]) { + case VDEC_ASYNCMSG_DECODE_DONE: + vdec_dcdone_handler(vd, &tmp[3], tmp[2]); + break; + case VDEC_ASYNCMSG_REUSE_FRAME: + vdec_reuseibuf_handler(vd, &tmp[3], tmp[2]); + break; + default: + pr_err("%s: Unknown async message from DSP id=0x%08x sz=%u\n", + __func__, tmp[0], tmp[2]); + } +} +static int vdec_open(struct inode *inode, struct file *file) +{ + int ret; + int i; + struct vdec_msg_list *l; + struct vdec_data *vd; + struct dal_info version_info; + + pr_info("q6vdec_open()\n"); + mutex_lock(&vdec_ref_lock); + if (ref_cnt >= MAX_SUPPORTED_INSTANCES) { + pr_err("%s: Max allowed instances exceeded \n", __func__); + mutex_unlock(&vdec_ref_lock); + return -EBUSY; + } + ref_cnt++; + mutex_unlock(&vdec_ref_lock); + vd = kmalloc(sizeof(struct vdec_data), GFP_KERNEL); + if (!vd) { + pr_err("%s: kmalloc failed\n", __func__); + ret = -ENOMEM; + goto vdec_open_err_handle_vd; + } + file->private_data = vd; + + vd->mem_initialized = 0; + INIT_LIST_HEAD(&vd->vdec_msg_list_head); + INIT_LIST_HEAD(&vd->vdec_msg_list_free); + INIT_LIST_HEAD(&vd->vdec_mem_list_head); + init_waitqueue_head(&vd->vdec_msg_evt); + + spin_lock_init(&vd->vdec_list_lock); + spin_lock_init(&vd->vdec_mem_list_lock); + for (i = 0; i < VDEC_MSG_MAX; i++) { + l = kzalloc(sizeof(struct vdec_msg_list), GFP_KERNEL); + if (!l) { + pr_err("%s: kzalloc failed!\n", __func__); + ret = -ENOMEM; + goto vdec_open_err_handle_list; + } + list_add(&l->list, &vd->vdec_msg_list_free); + } + + vd->vdec_handle = dal_attach(DALDEVICEID_VDEC_DEVICE, + DALDEVICEID_VDEC_PORTNAME, + callback, vd); + + if (!vd->vdec_handle) { + pr_err("%s: failed to attach \n", __func__); + ret = -EIO; + goto vdec_open_err_handle_list; + } + ret = dal_call_f9(vd->vdec_handle, DAL_OP_INFO, + &version_info, sizeof(struct dal_info)); + + if (ret) { + pr_err("%s: failed to get version \n", __func__); + goto vdec_open_err_handle_version; + } + + TRACE("q6vdec_open() interface version 0x%x\n", version_info.version); + if (vdec_check_version(VDEC_INTERFACE_VERSION, + version_info.version)) { + pr_err("%s: driver version mismatch !\n", __func__); + goto vdec_open_err_handle_version; + } + + vd->running = 1; + prevent_sleep(); + return 0; +vdec_open_err_handle_version: + dal_detach(vd->vdec_handle); +vdec_open_err_handle_list: + { + struct vdec_msg_list *l, *n; + list_for_each_entry_safe(l, n, &vd->vdec_msg_list_free, list) { + list_del(&l->list); + kfree(l); + } + } +vdec_open_err_handle_vd: + mutex_lock(&vdec_ref_lock); + ref_cnt--; + mutex_unlock(&vdec_ref_lock); + kfree(vd); + return ret; +} + +static int vdec_release(struct inode *inode, struct file *file) +{ + int ret; + struct vdec_msg_list *l, *n; + struct vdec_mem_list *m, *k; + struct vdec_data *vd = file->private_data; + + vd->running = 0; + wake_up_all(&vd->vdec_msg_evt); + + if (!vd->close_decode) + vdec_close(vd, NULL); + + ret = dal_detach(vd->vdec_handle); + if (ret) + printk(KERN_INFO "%s: failed to detach (%d)\n", __func__, ret); + + list_for_each_entry_safe(l, n, &vd->vdec_msg_list_free, list) { + list_del(&l->list); + kfree(l); + } + + list_for_each_entry_safe(l, n, &vd->vdec_msg_list_head, list) { + list_del(&l->list); + kfree(l); + } + + list_for_each_entry_safe(m, k, &vd->vdec_mem_list_head, list) { + list_del(&m->list); + kfree(m); + } + mutex_lock(&vdec_ref_lock); + BUG_ON(ref_cnt <= 0); + ref_cnt--; + mutex_unlock(&vdec_ref_lock); + kfree(vd); + allow_sleep(); + return 0; +} + +static const struct file_operations vdec_fops = { + .owner = THIS_MODULE, + .open = vdec_open, + .release = vdec_release, + .unlocked_ioctl = vdec_ioctl, +}; + +static int __init vdec_init(void) +{ + struct device *class_dev; + int rc = 0; + + wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "vdec_idle"); + wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "vdec_suspend"); + + rc = alloc_chrdev_region(&vdec_device_no, 0, 1, "vdec"); + if (rc < 0) { + pr_err("%s: alloc_chrdev_region failed %d\n", __func__, rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, "vdec"); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("%s: class_create failed %d\n", __func__, rc); + goto vdec_init_err_unregister_chrdev_region; + } + class_dev = device_create(driver_class, NULL, + vdec_device_no, NULL, "vdec"); + if (!class_dev) { + pr_err("%s: class_device_create failed %d\n", __func__, rc); + rc = -ENOMEM; + goto vdec_init_err_class_destroy; + } + + cdev_init(&vdec_cdev, &vdec_fops); + vdec_cdev.owner = THIS_MODULE; + rc = cdev_add(&vdec_cdev, MKDEV(MAJOR(vdec_device_no), 0), 1); + + if (rc < 0) { + pr_err("%s: cdev_add failed %d\n", __func__, rc); + goto vdec_init_err_class_device_destroy; + } + + return 0; + +vdec_init_err_class_device_destroy: + device_destroy(driver_class, vdec_device_no); +vdec_init_err_class_destroy: + class_destroy(driver_class); +vdec_init_err_unregister_chrdev_region: + unregister_chrdev_region(vdec_device_no, 1); + return rc; +} + +static void __exit vdec_exit(void) +{ + device_destroy(driver_class, vdec_device_no); + class_destroy(driver_class); + unregister_chrdev_region(vdec_device_no, 1); +} + +MODULE_DESCRIPTION("video decoder driver for QSD platform"); +MODULE_VERSION("2.00"); + +module_init(vdec_init); +module_exit(vdec_exit); diff --git a/arch/arm/mach-msm/qdsp6/msm_q6venc.c b/arch/arm/mach-msm/qdsp6/msm_q6venc.c new file mode 100644 index 0000000000000..7b889767476fa --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/msm_q6venc.c @@ -0,0 +1,1261 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../dal.h" + +#define DALDEVICEID_VENC_DEVICE 0x0200002D +/*#define DALDEVICEID_VENC_PORTNAME "DAL_AQ_VID"*/ +#define DALDEVICEID_VENC_PORTNAME "DSP_DAL_AQ_VID" + +#define VENC_NAME "q6venc" +#define VENC_MSG_MAX 128 + +#define VENC_INTERFACE_VERSION 0x00020000 +#define MAJOR_MASK 0xFFFF0000 +#define MINOR_MASK 0x0000FFFF +#define VENC_GET_MAJOR_VERSION(version) ((version & MAJOR_MASK)>>16) +#define VENC_GET_MINOR_VERSION(version) (version & MINOR_MASK) + +uint32_t kpi_start[5]; +uint32_t kpi_end; +static uint32_t cnt = 0; + +enum { + VENC_BUFFER_TYPE_INPUT, + VENC_BUFFER_TYPE_OUTPUT, + VENC_BUFFER_TYPE_QDSP6, + VENC_BUFFER_TYPE_HDR +}; +enum { + VENC_DALRPC_GET_SYNTAX_HEADER = DAL_OP_FIRST_DEVICE_API, + VENC_DALRPC_UPDATE_INTRA_REFRESH, + VENC_DALRPC_UPDATE_FRAME_RATE, + VENC_DALRPC_UPDATE_BITRATE, + VENC_DALRPC_UPDATE_QP_RANGE, + VENC_DALRPC_UPDATE_INTRA_PERIOD, + VENC_DALRPC_REQUEST_IFRAME, + VENC_DALRPC_START, + VENC_DALRPC_STOP, + VENC_DALRPC_SUSPEND, + VENC_DALRPC_RESUME, + VENC_DALRPC_FLUSH, + VENC_DALRPC_QUEUE_INPUT, + VENC_DALRPC_QUEUE_OUTPUT +}; +struct venc_input_payload { + u32 data; +}; +struct venc_output_payload { + u32 size; + long long time_stamp; + u32 flags; + u32 data; + u32 client_data_from_input; +}; +union venc_payload { + struct venc_input_payload input_payload; + struct venc_output_payload output_payload; +}; +struct venc_msg_type { + u32 event; + u32 status; + union venc_payload payload; +}; +struct venc_input_buf { + struct venc_buf_type yuv_buf; + u32 data_size; + long long time_stamp; + u32 flags; + u32 dvs_offsetx; + u32 dvs_offsety; + u32 client_data; + u32 op_client_data; +}; +struct venc_output_buf { + struct venc_buf_type bit_stream_buf; + u32 client_data; +}; + +struct venc_msg_list { + struct list_head list; + struct venc_msg msg_data; +}; +struct venc_buf { + int fd; + u32 offset; + u32 size; + u32 btype; + unsigned long paddr; + struct file *file; +}; +struct venc_pmem_list { + struct list_head list; + struct venc_buf buf; +}; +struct venc_dev { + bool is_active; + bool pmem_freed; + enum venc_state_type state; + struct list_head venc_msg_list_head; + struct list_head venc_msg_list_free; + spinlock_t venc_msg_list_lock; + struct list_head venc_pmem_list_head; + spinlock_t venc_pmem_list_lock; + struct dal_client *q6_handle; + wait_queue_head_t venc_msg_evt; + struct device *class_devp; +}; + +#define DEBUG_VENC 0 +#if DEBUG_VENC +#define TRACE(fmt, x...) \ + do { pr_debug("%s:%d " fmt, __func__, __LINE__, ##x); } while (0) +#else +#define TRACE(fmt, x...) do { } while (0) +#endif + +static struct cdev cdev; +static dev_t venc_dev_num; +static struct class *venc_class; +static struct venc_dev *venc_device_p; +static int venc_ref; + +static DEFINE_MUTEX(idlecount_lock); +static int idlecount; +static struct wake_lock wakelock; +static struct wake_lock idlelock; + +static void prevent_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (++idlecount == 1) { + wake_lock(&idlelock); + wake_lock(&wakelock); + } + mutex_unlock(&idlecount_lock); +} + +static void allow_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (--idlecount == 0) { + wake_unlock(&idlelock); + wake_unlock(&wakelock); + } + mutex_unlock(&idlecount_lock); +} + +static inline int venc_check_version(u32 client, u32 server) +{ + int ret = -EINVAL; + + if ((VENC_GET_MAJOR_VERSION(client) == VENC_GET_MAJOR_VERSION(server)) + && (VENC_GET_MINOR_VERSION(client) <= + VENC_GET_MINOR_VERSION(server))) + ret = 0; + + return ret; +} + +static int venc_get_msg(struct venc_dev *dvenc, void *msg) +{ + struct venc_msg_list *l; + unsigned long flags; + int ret = 0; + struct venc_msg qdsp_msg; + + if (!dvenc->is_active) + return -EPERM; + spin_lock_irqsave(&dvenc->venc_msg_list_lock, flags); + list_for_each_entry_reverse(l, &dvenc->venc_msg_list_head, list) { + memcpy(&qdsp_msg, &l->msg_data, sizeof(struct venc_msg)); + list_del(&l->list); + list_add(&l->list, &dvenc->venc_msg_list_free); + ret = 1; + break; + } + spin_unlock_irqrestore(&dvenc->venc_msg_list_lock, flags); + if (copy_to_user(msg, &qdsp_msg, sizeof(struct venc_msg))) + pr_err("%s failed to copy_to_user\n", __func__); + return ret; +} + +static void venc_put_msg(struct venc_dev *dvenc, struct venc_msg *msg) +{ + struct venc_msg_list *l; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&dvenc->venc_msg_list_lock, flags); + list_for_each_entry(l, &dvenc->venc_msg_list_free, list) { + memcpy(&l->msg_data, msg, sizeof(struct venc_msg)); + list_del(&l->list); + list_add(&l->list, &dvenc->venc_msg_list_head); + found = 1; + break; + } + spin_unlock_irqrestore(&dvenc->venc_msg_list_lock, flags); + if (found) + wake_up(&dvenc->venc_msg_evt); + else + pr_err("%s: failed to find a free node\n", __func__); + +} + +static struct venc_pmem_list *venc_add_pmem_to_list(struct venc_dev *dvenc, + struct venc_pmem *mptr, + u32 btype) +{ + int ret = 0; + unsigned long flags; + unsigned long len; + unsigned long vaddr; + struct venc_pmem_list *plist = NULL; + + plist = kzalloc(sizeof(struct venc_pmem_list), GFP_KERNEL); + if (!plist) { + pr_err("%s: kzalloc failed\n", __func__); + return NULL; + } + + ret = get_pmem_file(mptr->fd, &(plist->buf.paddr), + &vaddr, &len, &(plist->buf.file)); + if (ret) { + pr_err("%s: get_pmem_file failed for fd=%d offset=%d\n", + __func__, mptr->fd, mptr->offset); + goto err_venc_add_pmem; + } else if (mptr->offset >= len) { + pr_err("%s: invalid offset (%d > %ld) for fd=%d\n", + __func__, mptr->offset, len, mptr->fd); + ret = -EINVAL; + goto err_venc_get_pmem; + } + + plist->buf.fd = mptr->fd; + plist->buf.paddr += mptr->offset; + plist->buf.size = mptr->size; + plist->buf.btype = btype; + plist->buf.offset = mptr->offset; + + spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags); + list_add(&plist->list, &dvenc->venc_pmem_list_head); + spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags); + return plist; + +err_venc_get_pmem: + put_pmem_file(plist->buf.file); +err_venc_add_pmem: + kfree(plist); + return NULL; +} + +static struct venc_pmem_list *venc_get_pmem_from_list( + struct venc_dev *dvenc, u32 pmem_fd, + u32 offset, u32 btype) +{ + struct venc_pmem_list *plist; + unsigned long flags; + struct file *file; + int found = 0; + + file = fget(pmem_fd); + if (!file) { + pr_err("%s: invalid encoder buffer fd(%d)\n", __func__, + pmem_fd); + return NULL; + } + spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags); + list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list) { + if (plist->buf.btype == btype && plist->buf.file == file && + plist->buf.offset == offset) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags); + fput(file); + if (found) + return plist; + + else + return NULL; +} + +static int venc_set_buffer(struct venc_dev *dvenc, void *argp, + u32 btype) +{ + struct venc_pmem pmem; + struct venc_pmem_list *plist; + int ret = 0; + + ret = copy_from_user(&pmem, argp, sizeof(pmem)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + plist = venc_add_pmem_to_list(dvenc, &pmem, btype); + if (plist == NULL) { + pr_err("%s: buffer add_to_pmem_list failed\n", + __func__); + return -EPERM; + } + return ret; +} + +static int venc_assign_q6_buffers(struct venc_dev *dvenc, + struct venc_buffers *pbufs, + struct venc_nonio_buf_config *pcfg) +{ + int ret = 0; + struct venc_pmem_list *plist; + + plist = venc_add_pmem_to_list(dvenc, &(pbufs->recon_buf[0]), + VENC_BUFFER_TYPE_QDSP6); + if (plist == NULL) { + pr_err("%s: recon_buf0 failed to add_to_pmem_list\n", + __func__); + return -EPERM; + } + pcfg->recon_buf1.region = pbufs->recon_buf[0].src; + pcfg->recon_buf1.phys = plist->buf.paddr; + pcfg->recon_buf1.size = plist->buf.size; + pcfg->recon_buf1.offset = 0; + + plist = venc_add_pmem_to_list(dvenc, &(pbufs->recon_buf[1]), + VENC_BUFFER_TYPE_QDSP6); + if (plist == NULL) { + pr_err("%s: recons_buf1 failed to add_to_pmem_list\n", + __func__); + return -EPERM; + } + pcfg->recon_buf2.region = pbufs->recon_buf[1].src; + pcfg->recon_buf2.phys = plist->buf.paddr; + pcfg->recon_buf2.size = plist->buf.size; + pcfg->recon_buf2.offset = 0; + + plist = venc_add_pmem_to_list(dvenc, &(pbufs->wb_buf), + VENC_BUFFER_TYPE_QDSP6); + if (plist == NULL) { + pr_err("%s: wb_buf failed to add_to_pmem_list\n", + __func__); + return -EPERM; + } + pcfg->wb_buf.region = pbufs->wb_buf.src; + pcfg->wb_buf.phys = plist->buf.paddr; + pcfg->wb_buf.size = plist->buf.size; + pcfg->wb_buf.offset = 0; + + plist = venc_add_pmem_to_list(dvenc, &(pbufs->cmd_buf), + VENC_BUFFER_TYPE_QDSP6); + if (plist == NULL) { + pr_err("%s: cmd_buf failed to add_to_pmem_list\n", + __func__); + return -EPERM; + } + pcfg->cmd_buf.region = pbufs->cmd_buf.src; + pcfg->cmd_buf.phys = plist->buf.paddr; + pcfg->cmd_buf.size = plist->buf.size; + pcfg->cmd_buf.offset = 0; + + plist = venc_add_pmem_to_list(dvenc, &(pbufs->vlc_buf), + VENC_BUFFER_TYPE_QDSP6); + if (plist == NULL) { + pr_err("%s: vlc_buf failed to add_to_pmem_list" + " failed\n", __func__); + return -EPERM; + } + pcfg->vlc_buf.region = pbufs->vlc_buf.src; + pcfg->vlc_buf.phys = plist->buf.paddr; + pcfg->vlc_buf.size = plist->buf.size; + pcfg->vlc_buf.offset = 0; + + return ret; +} + +static int venc_start(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_q6_config q6_config; + struct venc_init_config vconfig; + + dvenc->state = VENC_STATE_START; + ret = copy_from_user(&vconfig, argp, sizeof(struct venc_init_config)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + memcpy(&q6_config, &(vconfig.q6_config), sizeof(q6_config)); + ret = venc_assign_q6_buffers(dvenc, &(vconfig.q6_bufs), + &(q6_config.buf_params)); + if (ret != 0) { + pr_err("%s: assign_q6_buffers failed\n", __func__); + return -EPERM; + } + + q6_config.callback_event = dvenc->q6_handle; + TRACE("%s: parameters: handle:%p, config:%p, callback:%p \n", __func__, + dvenc->q6_handle, &q6_config, q6_config.callback_event); + TRACE("%s: parameters:recon1:0x%x, recon2:0x%x," + " wb_buf:0x%x, cmd:0x%x, vlc:0x%x\n", __func__, + q6_config.buf_params.recon_buf1.phys, + q6_config.buf_params.recon_buf2.phys, + q6_config.buf_params.wb_buf.phys, + q6_config.buf_params.cmd_buf.phys, + q6_config.buf_params.vlc_buf.phys); + TRACE("%s: size of param:%d \n", __func__, sizeof(q6_config)); + ret = dal_call_f5(dvenc->q6_handle, VENC_DALRPC_START, &q6_config, + sizeof(q6_config)); + if (ret != 0) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + return ret; + } + return ret; +} + +static int venc_encode_frame(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_pmem buf; + struct venc_input_buf q6_input; + struct venc_pmem_list *plist; + struct venc_buffer input; + + ret = copy_from_user(&input, argp, sizeof(struct venc_buffer)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + ret = copy_from_user(&buf, + ((struct venc_buffer *)argp)->ptr_buffer, + sizeof(struct venc_pmem)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + plist = venc_get_pmem_from_list(dvenc, buf.fd, buf.offset, + VENC_BUFFER_TYPE_INPUT); + if (NULL == plist) { + plist = venc_add_pmem_to_list(dvenc, &buf, + VENC_BUFFER_TYPE_INPUT); + if (plist == NULL) { + pr_err("%s: buffer add_to_pmem_list failed\n", + __func__); + return -EPERM; + } + } + + q6_input.flags = 0; + if (input.flags & VENC_FLAG_EOS) + q6_input.flags |= 0x00000001; + q6_input.yuv_buf.region = 0; + q6_input.yuv_buf.phys = plist->buf.paddr; + q6_input.yuv_buf.size = plist->buf.size; + q6_input.yuv_buf.offset = 0; + q6_input.data_size = plist->buf.size; + q6_input.client_data = (u32)input.client_data; + q6_input.time_stamp = input.time_stamp; + q6_input.dvs_offsetx = 0; + q6_input.dvs_offsety = 0; + + +kpi_start[cnt] = ktime_to_ns(ktime_get()); +TRACE("kpi_start %d, %u \n", cnt, kpi_start[cnt]); +cnt++; + + TRACE("Pushing down input phys=0x%x fd= %d, client_data: 0x%x," + " time_stamp:%lld \n", q6_input.yuv_buf.phys, plist->buf.fd, + input.client_data, input.time_stamp); + ret = dal_call_f5(dvenc->q6_handle, VENC_DALRPC_QUEUE_INPUT, + &q6_input, sizeof(q6_input)); + + if (ret != 0) + pr_err("%s: Q6 queue_input failed (%d)\n", __func__, + (int)ret); + return ret; +} + +static int venc_fill_output(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_pmem buf; + struct venc_output_buf q6_output; + struct venc_pmem_list *plist; + struct venc_buffer output; + + ret = copy_from_user(&output, argp, sizeof(struct venc_buffer)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + ret = copy_from_user(&buf, + ((struct venc_buffer *)argp)->ptr_buffer, + sizeof(struct venc_pmem)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + plist = venc_get_pmem_from_list(dvenc, buf.fd, buf.offset, + VENC_BUFFER_TYPE_OUTPUT); + if (NULL == plist) { + plist = venc_add_pmem_to_list(dvenc, &buf, + VENC_BUFFER_TYPE_OUTPUT); + if (NULL == plist) { + pr_err("%s: output buffer failed to add_to_pmem_list" + "\n", __func__); + return -EPERM; + } + } + q6_output.bit_stream_buf.region = 0; + q6_output.bit_stream_buf.phys = (u32)plist->buf.paddr; + q6_output.bit_stream_buf.size = plist->buf.size; + q6_output.bit_stream_buf.offset = 0; + q6_output.client_data = (u32)output.client_data; + ret = + dal_call_f5(dvenc->q6_handle, VENC_DALRPC_QUEUE_OUTPUT, &q6_output, + sizeof(q6_output)); + if (ret != 0) + pr_err("%s: remote function failed (%d)\n", __func__, ret); + return ret; +} + +static int venc_stop(struct venc_dev *dvenc) +{ + int ret = 0; + struct venc_msg msg; + + ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_STOP, 1); + if (ret) { + pr_err("%s: remote runction failed (%d)\n", __func__, ret); + msg.msg_code = VENC_MSG_STOP; + msg.msg_data_size = 0; + msg.status_code = VENC_S_EFAIL; + venc_put_msg(dvenc, &msg); + } + return ret; +} + +static int venc_pause(struct venc_dev *dvenc) +{ + int ret = 0; + struct venc_msg msg; + + ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_SUSPEND, 1); + if (ret) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + msg.msg_code = VENC_MSG_PAUSE; + msg.status_code = VENC_S_EFAIL; + msg.msg_data_size = 0; + venc_put_msg(dvenc, &msg); + } + return ret; +} + +static int venc_resume(struct venc_dev *dvenc) +{ + int ret = 0; + struct venc_msg msg; + + ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_RESUME, 1); + if (ret) { + pr_err("%s: remote function failed (%d)\n", __func__, ret); + msg.msg_code = VENC_MSG_RESUME; + msg.msg_data_size = 0; + msg.status_code = VENC_S_EFAIL; + venc_put_msg(dvenc, &msg); + } + return ret; +} + +static int venc_flush(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_msg msg; + union venc_msg_data smsg; + int status = VENC_S_SUCCESS; + struct venc_buffer_flush flush; + + if (copy_from_user(&flush, argp, sizeof(struct venc_buffer_flush))) + return -EFAULT; + if (flush.flush_mode == VENC_FLUSH_ALL) { + ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_FLUSH, 1); + if (ret) + status = VENC_S_EFAIL; + } else + status = VENC_S_ENOTSUPP; + + if (status != VENC_S_SUCCESS) { + if ((flush.flush_mode == VENC_FLUSH_INPUT) || + (flush.flush_mode == VENC_FLUSH_ALL)) { + smsg.flush_ret.flush_mode = VENC_FLUSH_INPUT; + msg.msg_data = smsg; + msg.status_code = status; + msg.msg_code = VENC_MSG_FLUSH; + msg.msg_data_size = sizeof(union venc_msg_data); + venc_put_msg(dvenc, &msg); + } + if (flush.flush_mode == VENC_FLUSH_OUTPUT || + (flush.flush_mode == VENC_FLUSH_ALL)) { + smsg.flush_ret.flush_mode = VENC_FLUSH_OUTPUT; + msg.msg_data = smsg; + msg.status_code = status; + msg.msg_code = VENC_MSG_FLUSH; + msg.msg_data_size = sizeof(union venc_msg_data); + venc_put_msg(dvenc, &msg); + } + return -EIO; + } + return ret; +} + +static int venc_get_sequence_hdr(struct venc_dev *dvenc, void *argp) +{ + pr_err("%s not supported\n", __func__); + return -EIO; +} + +static int venc_set_qp_range(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_qp_range qp; + + ret = copy_from_user(&qp, argp, sizeof(struct venc_qp_range)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + + if (dvenc->state == VENC_STATE_START || + dvenc->state == VENC_STATE_PAUSE) { + ret = + dal_call_f5(dvenc->q6_handle, VENC_DALRPC_UPDATE_QP_RANGE, + &qp, sizeof(struct venc_qp_range)); + if (ret) { + pr_err("%s: remote function failed (%d) \n", __func__, + ret); + return ret; + } + } + return ret; +} + +static int venc_set_intra_period(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + u32 pnum = 0; + + ret = copy_from_user(&pnum, argp, sizeof(int)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + if (dvenc->state == VENC_STATE_START || + dvenc->state == VENC_STATE_PAUSE) { + ret = dal_call_f0(dvenc->q6_handle, + VENC_DALRPC_UPDATE_INTRA_PERIOD, pnum); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, + ret); + } + return ret; +} + +static int venc_set_intra_refresh(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + u32 mb_num = 0; + + ret = copy_from_user(&mb_num, argp, sizeof(int)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + if (dvenc->state == VENC_STATE_START || + dvenc->state == VENC_STATE_PAUSE) { + ret = dal_call_f0(dvenc->q6_handle, + VENC_DALRPC_UPDATE_INTRA_REFRESH, mb_num); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, + ret); + } + return ret; +} + +static int venc_set_frame_rate(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + struct venc_frame_rate pdata; + ret = copy_from_user(&pdata, argp, sizeof(struct venc_frame_rate)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + if (dvenc->state == VENC_STATE_START || + dvenc->state == VENC_STATE_PAUSE) { + ret = dal_call_f5(dvenc->q6_handle, + VENC_DALRPC_UPDATE_FRAME_RATE, + (void *)&(pdata), + sizeof(struct venc_frame_rate)); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, + ret); + } + return ret; +} + +static int venc_set_target_bitrate(struct venc_dev *dvenc, void *argp) +{ + int ret = 0; + u32 pdata = 0; + + ret = copy_from_user(&pdata, argp, sizeof(int)); + if (ret) { + pr_err("%s: copy_from_user failed\n", __func__); + return ret; + } + if (dvenc->state == VENC_STATE_START || + dvenc->state == VENC_STATE_PAUSE) { + ret = dal_call_f0(dvenc->q6_handle, + VENC_DALRPC_UPDATE_BITRATE, pdata); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, + ret); + } + return ret; +} + +static int venc_request_iframe(struct venc_dev *dvenc) +{ + int ret = 0; + + if (dvenc->state != VENC_STATE_START) + return -EINVAL; + + ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_REQUEST_IFRAME, 1); + if (ret) + pr_err("%s: remote function failed (%d)\n", __func__, ret); + return ret; +} + +static int venc_stop_read_msg(struct venc_dev *dvenc) +{ + struct venc_msg msg; + int ret = 0; + + msg.status_code = 0; + msg.msg_code = VENC_MSG_STOP_READING_MSG; + msg.msg_data_size = 0; + venc_put_msg(dvenc, &msg); + return ret; +} + +static int venc_q6_stop(struct venc_dev *dvenc) +{ + int ret = 0; + struct venc_pmem_list *plist; + unsigned long flags; + + wake_up(&dvenc->venc_msg_evt); + spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags); + if (!dvenc->pmem_freed) { + list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list) + put_pmem_file(plist->buf.file); + dvenc->pmem_freed = 1; + } + spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags); + + dvenc->state = VENC_STATE_STOP; + return ret; +} + +static int venc_translate_error(enum venc_status_code q6_status) +{ + int ret = 0; + + switch (q6_status) { + case VENC_STATUS_SUCCESS: + ret = VENC_S_SUCCESS; + break; + case VENC_STATUS_ERROR: + ret = VENC_S_EFAIL; + break; + case VENC_STATUS_INVALID_STATE: + ret = VENC_S_EINVALSTATE; + break; + case VENC_STATUS_FLUSHING: + ret = VENC_S_EFLUSHED; + break; + case VENC_STATUS_INVALID_PARAM: + ret = VENC_S_EBADPARAM; + break; + case VENC_STATUS_CMD_QUEUE_FULL: + ret = VENC_S_ECMDQFULL; + break; + case VENC_STATUS_CRITICAL: + ret = VENC_S_EFATAL; + break; + case VENC_STATUS_INSUFFICIENT_RESOURCES: + ret = VENC_S_ENOHWRES; + break; + case VENC_STATUS_TIMEOUT: + ret = VENC_S_ETIMEOUT; + break; + } + if (q6_status != VENC_STATUS_SUCCESS) + pr_err("%s: Q6 failed (%d)", __func__, (int)q6_status); + return ret; +} + +static void venc_q6_callback(void *data, int len, void *cookie) +{ + int status = 0; + struct venc_dev *dvenc = (struct venc_dev *)cookie; + struct venc_msg_type *q6_msg = NULL; + struct venc_msg msg, msg1; + union venc_msg_data smsg1, smsg2; + unsigned long msg_code; + struct venc_input_payload *pload1; + struct venc_output_payload *pload2; + uint32_t *tmp = (uint32_t *) data; + + if (dvenc == NULL) { + pr_err("%s: empty driver parameter\n", __func__); + return; + } + if (tmp[2] == sizeof(struct venc_msg_type)) { + q6_msg = (struct venc_msg_type *)&tmp[3]; + } else { + pr_err("%s: callback with empty message (%d, %d)\n", + __func__, tmp[2], sizeof(struct venc_msg_type)); + return; + } + msg.msg_data_size = 0; + status = venc_translate_error(q6_msg->status); + switch ((enum venc_event_type_enum)q6_msg->event) { + case VENC_EVENT_START_STATUS: + dvenc->state = VENC_STATE_START; + msg_code = VENC_MSG_START; + break; + case VENC_EVENT_STOP_STATUS: + venc_q6_stop(dvenc); + msg_code = VENC_MSG_STOP; + break; + case VENC_EVENT_SUSPEND_STATUS: + dvenc->state = VENC_STATE_PAUSE; + msg_code = VENC_MSG_PAUSE; + break; + case VENC_EVENT_RESUME_STATUS: + dvenc->state = VENC_STATE_START; + msg_code = VENC_MSG_RESUME; + break; + case VENC_EVENT_FLUSH_STATUS: + smsg1.flush_ret.flush_mode = VENC_FLUSH_INPUT; + msg1.status_code = status; + msg1.msg_code = VENC_MSG_FLUSH; + msg1.msg_data = smsg1; + msg1.msg_data_size = sizeof(union venc_msg_data); + venc_put_msg(dvenc, &msg1); + smsg2.flush_ret.flush_mode = VENC_FLUSH_OUTPUT; + msg_code = VENC_MSG_FLUSH; + msg.msg_data = smsg2; + msg.msg_data_size = sizeof(union venc_msg_data); + break; + case VENC_EVENT_RELEASE_INPUT: + +kpi_end = ktime_to_ns(ktime_get()); +TRACE("KPI : encode a frame, %u ms\n", (kpi_end - kpi_start[0])/(1000*1000)); +if (cnt > 0) { + int i = 0; + for (i = 0; i < cnt; i++) + kpi_start[i] = kpi_start[i+1]; +} +cnt--; + pload1 = &((q6_msg->payload).input_payload); + TRACE("Release_input: data: 0x%x \n", pload1->data); + if (pload1 != NULL) { + msg.msg_data.buf.client_data = pload1->data; + msg_code = VENC_MSG_INPUT_BUFFER_DONE; + msg.msg_data_size = sizeof(union venc_msg_data); + } + break; + case VENC_EVENT_DELIVER_OUTPUT: + pload2 = &((q6_msg->payload).output_payload); + smsg1.buf.flags = 0; + if (pload2->flags & VENC_FLAG_SYNC_FRAME) + smsg1.buf.flags |= VENC_FLAG_SYNC_FRAME; + if (pload2->flags & VENC_FLAG_CODEC_CONFIG) + smsg1.buf.flags |= VENC_FLAG_CODEC_CONFIG; + if (pload2->flags & VENC_FLAG_END_OF_FRAME) + smsg1.buf.flags |= VENC_FLAG_END_OF_FRAME; + if (pload2->flags & VENC_FLAG_EOS) + smsg1.buf.flags |= VENC_FLAG_EOS; + smsg1.buf.len = pload2->size; + smsg1.buf.offset = 0; + smsg1.buf.time_stamp = pload2->time_stamp; + smsg1.buf.client_data = pload2->data; + msg_code = VENC_MSG_OUTPUT_BUFFER_DONE; + msg.msg_data = smsg1; + msg.msg_data_size = sizeof(union venc_msg_data); + break; + default: + pr_err("%s: invalid response from Q6 (%d)\n", __func__, + (int)q6_msg->event); + return; + } + msg.status_code = status; + msg.msg_code = msg_code; + venc_put_msg(dvenc, &msg); + return; +} + +static int venc_get_version(struct venc_dev *dvenc, void *argp) +{ + struct venc_version ver_info; + int ret = 0; + + ver_info.major = VENC_GET_MAJOR_VERSION(VENC_INTERFACE_VERSION); + ver_info.minor = VENC_GET_MINOR_VERSION(VENC_INTERFACE_VERSION); + + ret = copy_to_user(((struct venc_version *)argp), + &ver_info, sizeof(ver_info)); + if (ret) + pr_err("%s failed to copy_to_user\n", __func__); + + return ret; + +} + +static long q6venc_ioctl(struct file *file, u32 cmd, + unsigned long arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + struct venc_dev *dvenc = file->private_data; + + if (!dvenc || !dvenc->is_active) + return -EPERM; + + switch (cmd) { + case VENC_IOCTL_SET_INPUT_BUFFER: + ret = venc_set_buffer(dvenc, argp, VENC_BUFFER_TYPE_INPUT); + break; + case VENC_IOCTL_SET_OUTPUT_BUFFER: + ret = venc_set_buffer(dvenc, argp, VENC_BUFFER_TYPE_OUTPUT); + break; + case VENC_IOCTL_GET_SEQUENCE_HDR: + ret = venc_get_sequence_hdr(dvenc, argp); + break; + case VENC_IOCTL_SET_QP_RANGE: + ret = venc_set_qp_range(dvenc, argp); + break; + case VENC_IOCTL_SET_INTRA_PERIOD: + ret = venc_set_intra_period(dvenc, argp); + break; + case VENC_IOCTL_SET_INTRA_REFRESH: + ret = venc_set_intra_refresh(dvenc, argp); + break; + case VENC_IOCTL_SET_FRAME_RATE: + ret = venc_set_frame_rate(dvenc, argp); + break; + case VENC_IOCTL_SET_TARGET_BITRATE: + ret = venc_set_target_bitrate(dvenc, argp); + break; + case VENC_IOCTL_CMD_REQUEST_IFRAME: + if (dvenc->state == VENC_STATE_START) + ret = venc_request_iframe(dvenc); + break; + case VENC_IOCTL_CMD_START: + ret = venc_start(dvenc, argp); + break; + case VENC_IOCTL_CMD_STOP: + ret = venc_stop(dvenc); + break; + case VENC_IOCTL_CMD_PAUSE: + ret = venc_pause(dvenc); + break; + case VENC_IOCTL_CMD_RESUME: + ret = venc_resume(dvenc); + break; + case VENC_IOCTL_CMD_ENCODE_FRAME: + ret = venc_encode_frame(dvenc, argp); + break; + case VENC_IOCTL_CMD_FILL_OUTPUT_BUFFER: + ret = venc_fill_output(dvenc, argp); + break; + case VENC_IOCTL_CMD_FLUSH: + ret = venc_flush(dvenc, argp); + break; + case VENC_IOCTL_CMD_READ_NEXT_MSG: + wait_event_interruptible(dvenc->venc_msg_evt, + venc_get_msg(dvenc, argp)); + break; + case VENC_IOCTL_CMD_STOP_READ_MSG: + ret = venc_stop_read_msg(dvenc); + break; + case VENC_IOCTL_GET_VERSION: + ret = venc_get_version(dvenc, argp); + break; + default: + pr_err("%s: invalid ioctl code (%d)\n", __func__, cmd); + ret = -ENOTTY; + break; + } + return ret; +} + +static int q6venc_open(struct inode *inode, struct file *file) +{ + int i; + int ret = 0; + struct venc_dev *dvenc; + struct venc_msg_list *plist; + struct dal_info version_info; + + dvenc = kzalloc(sizeof(struct venc_dev), GFP_KERNEL); + if (!dvenc) { + pr_err("%s: unable to allocate memory for struct venc_dev\n", + __func__); + return -ENOMEM; + } + file->private_data = dvenc; + INIT_LIST_HEAD(&dvenc->venc_msg_list_head); + INIT_LIST_HEAD(&dvenc->venc_msg_list_free); + INIT_LIST_HEAD(&dvenc->venc_pmem_list_head); + init_waitqueue_head(&dvenc->venc_msg_evt); + spin_lock_init(&dvenc->venc_msg_list_lock); + spin_lock_init(&dvenc->venc_pmem_list_lock); + venc_ref++; + for (i = 0; i < VENC_MSG_MAX; i++) { + plist = kzalloc(sizeof(struct venc_msg_list), GFP_KERNEL); + if (!plist) { + pr_err("%s: kzalloc failed\n", __func__); + ret = -ENOMEM; + goto err_venc_create_msg_list; + } + list_add(&plist->list, &dvenc->venc_msg_list_free); + } + dvenc->q6_handle = + dal_attach(DALDEVICEID_VENC_DEVICE, DALDEVICEID_VENC_PORTNAME, + venc_q6_callback, (void *)dvenc); + if (!(dvenc->q6_handle)) { + pr_err("%s: daldevice_attach failed (%d)\n", __func__, ret); + goto err_venc_dal_attach; + } + ret = dal_call_f9(dvenc->q6_handle, DAL_OP_INFO, &version_info, + sizeof(struct dal_info)); + if (ret) { + pr_err("%s: failed to get version\n", __func__); + goto err_venc_dal_open; + } + + pr_info("VENC_INTERFACE_VERSION %X, version_info.version %X\n", + VENC_INTERFACE_VERSION, version_info.version); +#if 0 + if (venc_check_version(VENC_INTERFACE_VERSION, version_info.version)) { + pr_err("%s: driver version mismatch\n", __func__); + goto err_venc_dal_open; + } +#endif + ret = dal_call_f0(dvenc->q6_handle, DAL_OP_OPEN, 1); + if (ret) { + pr_err("%s: dal_call_open failed (%d)\n", __func__, ret); + goto err_venc_dal_open; + } + dvenc->state = VENC_STATE_STOP; + dvenc->is_active = 1; + prevent_sleep(); + return ret; +err_venc_dal_open: + dal_detach(dvenc->q6_handle); +err_venc_dal_attach: + list_for_each_entry(plist, &dvenc->venc_msg_list_free, list) { + list_del(&plist->list); + kfree(plist); + } +err_venc_create_msg_list: + kfree(dvenc); + venc_ref--; + return ret; +} + +static int q6venc_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct venc_msg_list *l, *n; + struct venc_pmem_list *plist, *m; + struct venc_dev *dvenc; + unsigned long flags; + + venc_ref--; + dvenc = file->private_data; + dvenc->is_active = 0; + wake_up_all(&dvenc->venc_msg_evt); + dal_call_f0(dvenc->q6_handle, VENC_DALRPC_STOP, 1); + dal_call_f0(dvenc->q6_handle, DAL_OP_CLOSE, 1); + dal_detach(dvenc->q6_handle); + list_for_each_entry_safe(l, n, &dvenc->venc_msg_list_free, list) { + list_del(&l->list); + kfree(l); + } + list_for_each_entry_safe(l, n, &dvenc->venc_msg_list_head, list) { + list_del(&l->list); + kfree(l); + } + spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags); + if (!dvenc->pmem_freed) { + list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list) + put_pmem_file(plist->buf.file); + dvenc->pmem_freed = 1; + } + spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags); + + list_for_each_entry_safe(plist, m, &dvenc->venc_pmem_list_head, list) { + list_del(&plist->list); + kfree(plist); + } + kfree(dvenc); + allow_sleep(); + return ret; +} + +const struct file_operations q6venc_fops = { + .owner = THIS_MODULE, + .open = q6venc_open, + .release = q6venc_release, + .unlocked_ioctl = q6venc_ioctl, +}; + +static int __init q6venc_init(void) +{ + int ret = 0; + + wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "venc_idle"); + wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "venc_suspend"); + + venc_device_p = kzalloc(sizeof(struct venc_dev), GFP_KERNEL); + if (!venc_device_p) { + pr_err("%s: unable to allocate memory for venc_device_p\n", + __func__); + return -ENOMEM; + } + ret = alloc_chrdev_region(&venc_dev_num, 0, 1, VENC_NAME); + if (ret < 0) { + pr_err("%s: alloc_chrdev_region failed (%d)\n", __func__, + ret); + return ret; + } + venc_class = class_create(THIS_MODULE, VENC_NAME); + if (IS_ERR(venc_class)) { + ret = PTR_ERR(venc_class); + pr_err("%s: failed to create venc_class (%d)\n", + __func__, ret); + goto err_venc_class_create; + } + venc_device_p->class_devp = + device_create(venc_class, NULL, venc_dev_num, NULL, + VENC_NAME); + if (IS_ERR(venc_device_p->class_devp)) { + ret = PTR_ERR(venc_device_p->class_devp); + pr_err("%s: failed to create class_device (%d)\n", __func__, + ret); + goto err_venc_class_device_create; + } + cdev_init(&cdev, &q6venc_fops); + cdev.owner = THIS_MODULE; + ret = cdev_add(&cdev, venc_dev_num, 1); + if (ret < 0) { + pr_err("%s: cdev_add failed (%d)\n", __func__, ret); + goto err_venc_cdev_add; + } + init_waitqueue_head(&venc_device_p->venc_msg_evt); + return ret; + +err_venc_cdev_add: + device_destroy(venc_class, venc_dev_num); +err_venc_class_device_create: + class_destroy(venc_class); +err_venc_class_create: + unregister_chrdev_region(venc_dev_num, 1); + return ret; +} + +static void __exit q6venc_exit(void) +{ + cdev_del(&(cdev)); + device_destroy(venc_class, venc_dev_num); + class_destroy(venc_class); + unregister_chrdev_region(venc_dev_num, 1); +} + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Video encoder driver for QDSP6"); +MODULE_VERSION("2.0"); +module_init(q6venc_init); +module_exit(q6venc_exit); diff --git a/arch/arm/mach-msm/qdsp6/pcm_in.c b/arch/arm/mach-msm/qdsp6/pcm_in.c new file mode 100644 index 0000000000000..501ac682e73b9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/pcm_in.c @@ -0,0 +1,244 @@ +/* arch/arm/mach-msm/qdsp6/pcm_in.c + * + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + + +struct msm_voicerec_mode { + uint32_t rec_mode; +}; + + +#define AUDIO_SET_INCALL _IOW(AUDIO_IOCTL_MAGIC, 19, struct msm_voicerec_mode) +#define AUDIO_FLAG_INCALL_MIXED 2 + +struct pcm { + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + uint32_t buffer_size; + uint32_t rec_mode; +}; + +#define BUFSZ (256) + +void audio_client_dump(struct audio_client *ac); + +static long q6_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_SET_VOLUME: + break; + case AUDIO_GET_STATS: { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void*) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + case AUDIO_START: { + uint32_t acdb_id; + rc = 0; + + if (arg == 0) { + acdb_id = 0; + } else if (copy_from_user(&acdb_id, (void*) arg, sizeof(acdb_id))) { + rc = -EFAULT; + break; + } + + if (pcm->ac) { + rc = -EBUSY; + } else { + pcm->ac = q6audio_open_pcm(pcm->buffer_size, + pcm->sample_rate, pcm->channel_count, + pcm->rec_mode, acdb_id); + if (!pcm->ac) + rc = -ENOMEM; + } + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void*) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (!config.channel_count || config.channel_count > 2) { + rc = -EINVAL; + break; + } + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + if (config.buffer_size < 128 || config.buffer_size > 8192) { + rc = -EINVAL; + break; + } + + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + break; + } + case AUDIO_SET_INCALL: { + struct msm_voicerec_mode voicerec_mode; + if (copy_from_user(&voicerec_mode, (void *)arg, + sizeof(struct msm_voicerec_mode))) + return -EFAULT; + if (voicerec_mode.rec_mode != AUDIO_FLAG_READ && + voicerec_mode.rec_mode != AUDIO_FLAG_INCALL_MIXED) { + pcm->rec_mode = AUDIO_FLAG_READ; + pr_err("invalid rec_mode\n"); + rc = -EINVAL; + } else + pcm->rec_mode = voicerec_mode.rec_mode; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = pcm->buffer_size; + config.buffer_count = 2; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void*) arg, &config, sizeof(config))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int q6_in_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + + pr_info("pcm_in: open\n"); + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + + if (!pcm) + return -ENOMEM; + + pcm->channel_count = 1; + pcm->sample_rate = 8000; + pcm->buffer_size = BUFSZ; + pcm->rec_mode = AUDIO_FLAG_READ; + file->private_data = pcm; + return 0; +} + +static ssize_t q6_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + struct audio_client *ac; + struct audio_buffer *ab; + const char __user *start = buf; + int xfer; + int res; + + ac = pcm->ac; + if (!ac) { + res = -ENODEV; + goto fail; + } + while (count > 0) { + ab = ac->buf + ac->cpu_buf; + + if (ab->used) + if (!wait_event_timeout(ac->wait, (ab->used == 0), 5*HZ)) { + audio_client_dump(ac); + pr_err("pcm_read: timeout. dsp dead?\n"); + q6audio_dsp_not_responding(); + } + + xfer = count; + if (xfer > ab->size) + xfer = ab->size; + + if (copy_to_user(buf, ab->data, xfer)) { + res = -EFAULT; + goto fail; + } + + buf += xfer; + count -= xfer; + + ab->used = 1; + q6audio_read(ac, ab); + ac->cpu_buf ^= 1; + } +fail: + res = buf - start; + return res; +} + +static int q6_in_release(struct inode *inode, struct file *file) +{ + + int rc = 0; + struct pcm *pcm = file->private_data; + if (pcm->ac) + rc = q6audio_close(pcm->ac); + kfree(pcm); + pr_info("pcm_out: release\n"); + return rc; +} + +static struct file_operations q6_in_fops = { + .owner = THIS_MODULE, + .open = q6_in_open, + .read = q6_in_read, + .release = q6_in_release, + .unlocked_ioctl = q6_in_ioctl, +}; + +struct miscdevice q6_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_in", + .fops = &q6_in_fops, +}; + +static int __init q6_in_init(void) { + return misc_register(&q6_in_misc); +} + +device_initcall(q6_in_init); diff --git a/arch/arm/mach-msm/qdsp6/pcm_out.c b/arch/arm/mach-msm/qdsp6/pcm_out.c new file mode 100644 index 0000000000000..6d041d8c686ae --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/pcm_out.c @@ -0,0 +1,235 @@ +/* arch/arm/mach-msm/qdsp6/pcm_out.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +void audio_client_dump(struct audio_client *ac); + +#define BUFSZ (3072) + +struct pcm { + struct mutex lock; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + size_t buffer_size; +}; + +static long pcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void*) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + mutex_lock(&pcm->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: { + int vol; + if (!pcm->ac) { + pr_err("%s: cannot set volume before AUDIO_START!\n", + __func__); + rc = -EINVAL; + break; + } + if (copy_from_user(&vol, (void*) arg, sizeof(vol))) { + rc = -EFAULT; + break; + } + rc = q6audio_set_stream_volume(pcm->ac, vol); + break; + } + case AUDIO_START: { + uint32_t acdb_id; + if (arg == 0) { + acdb_id = 0; + } else if (copy_from_user(&acdb_id, (void*) arg, sizeof(acdb_id))) { + pr_info("pcm_out: copy acdb_id from user failed\n"); + rc = -EFAULT; + break; + } + if (pcm->ac) { + rc = -EBUSY; + } else { + pcm->ac = q6audio_open_pcm(pcm->buffer_size, pcm->sample_rate, + pcm->channel_count, + AUDIO_FLAG_WRITE, acdb_id); + if (!pcm->ac) + rc = -ENOMEM; + } + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (pcm->ac) { + rc = -EBUSY; + break; + } + if (copy_from_user(&config, (void*) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count < 1 || config.channel_count > 2) { + rc = -EINVAL; + break; + } + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + if (config.buffer_size < 128 || config.buffer_size > 8192) { + rc = -EINVAL; + break; + } + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = pcm->buffer_size; + config.buffer_count = 2; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void*) arg, &config, sizeof(config))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&pcm->lock); + return rc; +} + +static int pcm_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + + pr_info("pcm_out: open\n"); + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + + if (!pcm) + return -ENOMEM; + + mutex_init(&pcm->lock); + pcm->channel_count = 2; + pcm->sample_rate = 44100; + pcm->buffer_size = BUFSZ; + + file->private_data = pcm; + return 0; +} + +static ssize_t pcm_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + struct audio_client *ac; + struct audio_buffer *ab; + const char __user *start = buf; + int xfer; + + if (!pcm->ac) + pcm_ioctl(file, AUDIO_START, 0); + + ac = pcm->ac; + if (!ac) + return -ENODEV; + + while (count > 0) { + ab = ac->buf + ac->cpu_buf; + + if (ab->used) + if (!wait_event_timeout(ac->wait, (ab->used == 0), 5*HZ)) { + audio_client_dump(ac); + pr_err("pcm_write: timeout. dsp dead?\n"); + q6audio_dsp_not_responding(); + } + + xfer = count; + if (xfer > ab->size) + xfer = ab->size; + + if (copy_from_user(ab->data, buf, xfer)) + return -EFAULT; + + buf += xfer; + count -= xfer; + + ab->used = xfer; + q6audio_write(ac, ab); + ac->cpu_buf ^= 1; + } + + return buf - start; +} + +static int pcm_release(struct inode *inode, struct file *file) +{ + struct pcm *pcm = file->private_data; + if (pcm->ac) + q6audio_close(pcm->ac); + kfree(pcm); + pr_info("pcm_out: release\n"); + return 0; +} + +static struct file_operations pcm_fops = { + .owner = THIS_MODULE, + .open = pcm_open, + .write = pcm_write, + .release = pcm_release, + .unlocked_ioctl = pcm_ioctl, +}; + +struct miscdevice pcm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_out", + .fops = &pcm_fops, +}; + +static int __init pcm_init(void) { + return misc_register(&pcm_misc); +} + +device_initcall(pcm_init); diff --git a/arch/arm/mach-msm/qdsp6/q6audio.c b/arch/arm/mach-msm/qdsp6/q6audio.c new file mode 100644 index 0000000000000..5882953a44366 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/q6audio.c @@ -0,0 +1,1713 @@ +/* arch/arm/mach-msm/qdsp6/q6audio.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "../dal.h" +#include "dal_audio.h" +#include "dal_audio_format.h" +#include "dal_acdb.h" +#include "dal_adie.h" +#include +#include + +#include + +#include "q6audio_devices.h" + +#if 0 +#define TRACE(x...) pr_info("Q6: "x) +#else +#define TRACE(x...) do{}while(0) +#endif + +static struct q6_hw_info q6_audio_hw[Q6_HW_COUNT] = { + [Q6_HW_HANDSET] = { + .min_gain = -1500, + .max_gain = 1100, + }, + [Q6_HW_HEADSET] = { + .min_gain = -1700, + .max_gain = 900, + }, + [Q6_HW_SPEAKER] = { + .min_gain = -2000, + .max_gain = 600, + }, + [Q6_HW_TTY] = { + .min_gain = -1500, + .max_gain = 1100, + }, + [Q6_HW_BT_SCO] = { + .min_gain = -2000, + .max_gain = 600, + }, + [Q6_HW_BT_A2DP] = { + .min_gain = -2000, + .max_gain = 600, + }, +}; + +static struct wake_lock wakelock; +static struct wake_lock idlelock; +static int idlecount; +static DEFINE_MUTEX(idlecount_lock); + +void audio_prevent_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (++idlecount == 1) { + wake_lock(&wakelock); + wake_lock(&idlelock); + } + mutex_unlock(&idlecount_lock); +} + +void audio_allow_sleep(void) +{ + mutex_lock(&idlecount_lock); + if (--idlecount == 0) { + wake_unlock(&idlelock); + wake_unlock(&wakelock); + } + mutex_unlock(&idlecount_lock); +} + +static struct clk *icodec_rx_clk; +static struct clk *icodec_tx_clk; +static struct clk *ecodec_clk; +static struct clk *sdac_clk; + +static struct q6audio_analog_ops default_analog_ops; +static struct q6audio_analog_ops *analog_ops = &default_analog_ops; +static uint32_t tx_clk_freq = 8000; +static int tx_mute_status = 0; +static int rx_vol_level = 100; +static char acdb_file[64] = "default.acdb"; +static uint32_t tx_acdb = 0; +static uint32_t rx_acdb = 0; + +void q6audio_register_analog_ops(struct q6audio_analog_ops *ops) +{ + analog_ops = ops; +} + +void q6audio_set_acdb_file(char* filename) +{ + if (filename) + strncpy(acdb_file, filename, sizeof(acdb_file)-1); +} + +static struct q6_device_info *q6_lookup_device(uint32_t device_id) +{ + struct q6_device_info *di = q6_audio_devices; + for (;;) { + if (di->id == device_id) + return di; + if (di->id == 0) { + pr_err("q6_lookup_device: bogus id 0x%08x\n", + device_id); + return di; + } + di++; + } +} + +static uint32_t q6_device_to_codec(uint32_t device_id) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + return di->codec; +} + +static uint32_t q6_device_to_dir(uint32_t device_id) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + return di->dir; +} + +static uint32_t q6_device_to_cad_id(uint32_t device_id) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + return di->cad_id; +} + +static uint32_t q6_device_to_path(uint32_t device_id) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + return di->path; +} + +static uint32_t q6_device_to_rate(uint32_t device_id) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + return di->rate; +} + +int q6_device_volume(uint32_t device_id, int level) +{ + struct q6_device_info *di = q6_lookup_device(device_id); + if (analog_ops->get_rx_vol) + return analog_ops->get_rx_vol(di->hw, level); + else { + struct q6_hw_info *hw; + hw = &q6_audio_hw[di->hw]; + return hw->min_gain + ((hw->max_gain - hw->min_gain) * level) / 100; + } +} + +static inline int adie_open(struct dal_client *client) +{ + return dal_call_f0(client, DAL_OP_OPEN, 0); +} + +static inline int adie_close(struct dal_client *client) +{ + return dal_call_f0(client, DAL_OP_CLOSE, 0); +} + +static inline int adie_set_path(struct dal_client *client, + uint32_t id, uint32_t path_type) +{ + return dal_call_f1(client, ADIE_OP_SET_PATH, id, path_type); +} + +static inline int adie_set_path_freq_plan(struct dal_client *client, + uint32_t path_type, uint32_t plan) +{ + return dal_call_f1(client, ADIE_OP_SET_PATH_FREQUENCY_PLAN, + path_type, plan); +} + +static inline int adie_proceed_to_stage(struct dal_client *client, + uint32_t path_type, uint32_t stage) +{ + return dal_call_f1(client, ADIE_OP_PROCEED_TO_STAGE, + path_type, stage); +} + +static inline int adie_mute_path(struct dal_client *client, + uint32_t path_type, uint32_t mute_state) +{ + return dal_call_f1(client, ADIE_OP_MUTE_PATH, path_type, mute_state); +} + +static int adie_refcount; + +static struct dal_client *adie; +static struct dal_client *adsp; +static struct dal_client *acdb; + +static int adie_enable(void) +{ + adie_refcount++; + if (adie_refcount == 1) + adie_open(adie); + return 0; +} + +static int adie_disable(void) +{ + adie_refcount--; + if (adie_refcount == 0) + adie_close(adie); + return 0; +} + +/* 4k DMA scratch page used for exchanging acdb device config tables + * and stream format descriptions with the DSP. + */ +static void *audio_data; +static dma_addr_t audio_phys; + +#define SESSION_MIN 0 +#define SESSION_MAX 64 + +static DEFINE_MUTEX(session_lock); +static DEFINE_MUTEX(audio_lock); + +static struct audio_client *session[SESSION_MAX]; + +static int session_alloc(struct audio_client *ac) +{ + int n; + + mutex_lock(&session_lock); + for (n = SESSION_MIN; n < SESSION_MAX; n++) { + if (!session[n]) { + session[n] = ac; + mutex_unlock(&session_lock); + return n; + } + } + mutex_unlock(&session_lock); + return -ENOMEM; +} + +static void session_free(int n, struct audio_client *ac) +{ + mutex_lock(&session_lock); + if (session[n] == ac) + session[n] = 0; + mutex_unlock(&session_lock); +} + +static void audio_client_free(struct audio_client *ac) +{ + session_free(ac->session, ac); + + if (ac->buf[0].data) + dma_free_coherent(NULL, ac->buf[0].size, + ac->buf[0].data, ac->buf[0].phys); + if (ac->buf[1].data) + dma_free_coherent(NULL, ac->buf[1].size, + ac->buf[1].data, ac->buf[1].phys); + kfree(ac); +} + +static struct audio_client *audio_client_alloc(unsigned bufsz) +{ + struct audio_client *ac; + int n; + + ac = kzalloc(sizeof(*ac), GFP_KERNEL); + if (!ac) + return 0; + + n = session_alloc(ac); + if (n < 0) + goto fail_session; + ac->session = n; + + if (bufsz > 0) { + ac->buf[0].data = dma_alloc_coherent(NULL, bufsz, + &ac->buf[0].phys, GFP_KERNEL); + if (!ac->buf[0].data) + goto fail; + ac->buf[1].data = dma_alloc_coherent(NULL, bufsz, + &ac->buf[1].phys, GFP_KERNEL); + if (!ac->buf[1].data) + goto fail; + + ac->buf[0].size = bufsz; + ac->buf[1].size = bufsz; + } + + init_waitqueue_head(&ac->wait); + ac->client = adsp; + + return ac; + +fail: + session_free(n, ac); +fail_session: + audio_client_free(ac); + return 0; +} + +void audio_client_dump(struct audio_client *ac) +{ + dal_trace_dump(ac->client); +} + +static int audio_ioctl(struct audio_client *ac, void *ptr, uint32_t len) +{ + struct adsp_command_hdr *hdr = ptr; + uint32_t tmp; + int r; + + hdr->size = len - sizeof(u32); + hdr->dst = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_DSP); + hdr->src = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_MODEM); + hdr->context = ac->session; + ac->cb_status = -EBUSY; + r = dal_call(ac->client, AUDIO_OP_CONTROL, 5, ptr, len, &tmp, sizeof(tmp)); + if (r != 4) + return -EIO; + if (!wait_event_timeout(ac->wait, (ac->cb_status != -EBUSY), 5*HZ)) { + dal_trace_dump(ac->client); + pr_err("audio_ioctl: timeout. dsp dead?\n"); + q6audio_dsp_not_responding(); + } + return ac->cb_status; +} + +static int audio_command(struct audio_client *ac, uint32_t cmd) +{ + struct adsp_command_hdr rpc; + memset(&rpc, 0, sizeof(rpc)); + rpc.opcode = cmd; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_open_control(struct audio_client *ac) +{ + struct adsp_open_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_DEVICE; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_out_open(struct audio_client *ac, uint32_t bufsz, + uint32_t rate, uint32_t channels) +{ + struct adsp_open_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + + rpc.format.standard.format = ADSP_AUDIO_FORMAT_PCM; + rpc.format.standard.channels = channels; + rpc.format.standard.bits_per_sample = 16; + rpc.format.standard.sampling_rate = rate; + rpc.format.standard.is_signed = 1; + rpc.format.standard.is_interleaved = 1; + + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_WRITE; + rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; + rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_PLAYBACK; + rpc.buf_max_size = bufsz; + + TRACE("open out %p\n", ac); + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +#if 0 +static int audio_in_open(struct audio_client *ac, uint32_t bufsz, + uint32_t rate, uint32_t channels) +{ + struct adsp_open_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + + rpc.format.standard.format = ADSP_AUDIO_FORMAT_PCM; + rpc.format.standard.channels = channels; + rpc.format.standard.bits_per_sample = 16; + rpc.format.standard.sampling_rate = rate; + rpc.format.standard.is_signed = 1; + rpc.format.standard.is_interleaved = 1; + + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_READ; + rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; + rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_RECORD; + rpc.buf_max_size = bufsz; + + TRACE("%p: open in\n", ac); + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} +#else +static int audio_in_open(struct audio_client *ac, uint32_t bufsz, + uint32_t flags, uint32_t rate, uint32_t channels) +{ + struct adsp_open_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + + rpc.format.standard.format = ADSP_AUDIO_FORMAT_PCM; + rpc.format.standard.channels = channels; + rpc.format.standard.bits_per_sample = 16; + rpc.format.standard.sampling_rate = rate; + rpc.format.standard.is_signed = 1; + rpc.format.standard.is_interleaved = 1; + + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_READ; + rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; + if (flags == AUDIO_FLAG_READ) + rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_RECORD; + else + rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_MIXED_RECORD; + + rpc.buf_max_size = bufsz; + + TRACE("%p: open in\n", ac); + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} +#endif + +static int audio_mp3_open(struct audio_client *ac, uint32_t bufsz, + uint32_t rate, uint32_t channels) +{ + struct adsp_open_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + + rpc.format.standard.format = ADSP_AUDIO_FORMAT_MP3; + rpc.format.standard.channels = channels; + rpc.format.standard.bits_per_sample = 16; + rpc.format.standard.sampling_rate = rate; + rpc.format.standard.is_signed = 1; + rpc.format.standard.is_interleaved = 0; + + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_WRITE; + rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT; + rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_PLAYBACK; + rpc.buf_max_size = bufsz; + + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_close(struct audio_client *ac) +{ + TRACE("%p: close\n", ac); + audio_command(ac, ADSP_AUDIO_IOCTL_CMD_STREAM_STOP); + audio_command(ac, ADSP_AUDIO_IOCTL_CMD_CLOSE); + return 0; +} + +static int audio_set_table(struct audio_client *ac, + uint32_t device_id, int size) +{ + struct adsp_set_dev_cfg_table_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_SET_DEVICE_CONFIG_TABLE; + if (q6_device_to_dir(device_id) == Q6_TX) + rpc.hdr.data = tx_clk_freq; + rpc.device_id = device_id; + rpc.phys_addr = audio_phys; + rpc.phys_size = size; + rpc.phys_used = size; + + TRACE("control: set table %x\n", device_id); + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +int q6audio_read(struct audio_client *ac, struct audio_buffer *ab) +{ + struct adsp_buffer_command rpc; + uint32_t res; + int r; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.size = sizeof(rpc) - sizeof(u32); + rpc.hdr.dst = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_DSP); + rpc.hdr.src = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_MODEM); + rpc.hdr.context = ac->session; + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_DATA_TX; + rpc.buffer.addr = ab->phys; + rpc.buffer.max_size = ab->size; + rpc.buffer.actual_size = ab->used; + + TRACE("%p: read\n", ac); + r = dal_call(ac->client, AUDIO_OP_DATA, 5, &rpc, sizeof(rpc), + &res, sizeof(res)); + return 0; +} + +int q6audio_write(struct audio_client *ac, struct audio_buffer *ab) +{ + struct adsp_buffer_command rpc; + uint32_t res; + int r; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.size = sizeof(rpc) - sizeof(u32); + rpc.hdr.dst = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_DSP); + rpc.hdr.src = AUDIO_ADDR(ac->session, 0, AUDIO_DOMAIN_MODEM); + rpc.hdr.context = ac->session; + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_DATA_RX; + rpc.buffer.addr = ab->phys; + rpc.buffer.max_size = ab->size; + rpc.buffer.actual_size = ab->used; + + TRACE("%p: write\n", ac); + r = dal_call(ac->client, AUDIO_OP_DATA, 5, &rpc, sizeof(rpc), + &res, sizeof(res)); + return 0; +} + +static int audio_rx_volume(struct audio_client *ac, uint32_t dev_id, int32_t volume) +{ + struct adsp_set_dev_volume_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_VOL; + rpc.device_id = dev_id; + rpc.path = ADSP_PATH_RX; + rpc.volume = volume; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_rx_mute(struct audio_client *ac, uint32_t dev_id, int mute) +{ + struct adsp_set_dev_mute_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_MUTE; + rpc.device_id = dev_id; + rpc.path = ADSP_PATH_RX; + rpc.mute = !!mute; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_tx_volume(struct audio_client *ac, uint32_t dev_id, int32_t volume) +{ + struct adsp_set_dev_volume_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_VOL; + rpc.device_id = dev_id; + rpc.path = ADSP_PATH_TX; + rpc.volume = volume; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_tx_mute(struct audio_client *ac, uint32_t dev_id, int mute) +{ + struct adsp_set_dev_mute_command rpc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_DEVICE_MUTE; + rpc.device_id = dev_id; + rpc.path = ADSP_PATH_TX; + rpc.mute = !!mute; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int audio_stream_volume(struct audio_client *ac, int volume) +{ + struct adsp_set_volume_command rpc; + int rc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_STREAM_VOL; + rpc.volume = volume; + rc = audio_ioctl(ac, &rpc, sizeof(rpc)); + return rc; +} + +static int audio_stream_mute(struct audio_client *ac, int mute) +{ + struct adsp_set_mute_command rpc; + int rc; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_SET_STREAM_MUTE; + rpc.mute = mute; + rc = audio_ioctl(ac, &rpc, sizeof(rpc)); + return rc; +} + +static void callback(void *data, int len, void *cookie) +{ + struct adsp_event_hdr *e = data; + struct audio_client *ac; + + + if (e->context >= SESSION_MAX) { + pr_err("audio callback: bogus session %d\n", + e->context); + return; + } + ac = session[e->context]; + if (!ac) { + pr_err("audio callback: unknown session %d\n", + e->context); + return; + } + + if (e->event_id == ADSP_AUDIO_IOCTL_CMD_STREAM_EOS) { + TRACE("%p: CB stream eos\n", ac); + if (e->status) + pr_err("playback status %d\n", e->status); + if (ac->cb_status == -EBUSY) { + ac->cb_status = e->status; + wake_up(&ac->wait); + } + return; + } + + if (e->event_id == ADSP_AUDIO_EVT_STATUS_BUF_DONE) { + TRACE("%p: CB done (%d)\n", ac, e->status); + if (e->status) + pr_err("buffer status %d\n", e->status); + ac->buf[ac->dsp_buf].used = 0; + ac->dsp_buf ^= 1; + wake_up(&ac->wait); + return; + } + + TRACE("%p: CB %08x status %d\n", ac, e->event_id, e->status); + if (e->status) + pr_warning("audio_cb: s=%d e=%08x status=%d\n", + e->context, e->event_id, e->status); + if (ac->cb_status == -EBUSY) { + ac->cb_status = e->status; + wake_up(&ac->wait); + } +} + +static void audio_init(struct dal_client *client) +{ + u32 tmp[3]; + + tmp[0] = 2 * sizeof(u32); + tmp[1] = 1; + tmp[2] = 0; + dal_call(client, AUDIO_OP_INIT, 5, tmp, sizeof(tmp), + tmp, sizeof(u32)); +} + +static struct audio_client *ac_control; + +static int q6audio_init(void) +{ + struct audio_client *ac = 0; + int res; + + mutex_lock(&audio_lock); + if (ac_control) { + res = 0; + goto done; + } + + pr_info("audio: init: codecs\n"); + icodec_rx_clk = clk_get(0, "icodec_rx_clk"); + icodec_tx_clk = clk_get(0, "icodec_tx_clk"); + ecodec_clk = clk_get(0, "ecodec_clk"); + sdac_clk = clk_get(0, "sdac_clk"); + audio_data = dma_alloc_coherent(NULL, 4096, &audio_phys, GFP_KERNEL); + + adsp = dal_attach(AUDIO_DAL_DEVICE, AUDIO_DAL_PORT, + callback, 0); + if (!adsp) { + pr_err("audio_init: cannot attach to adsp\n"); + res = -ENODEV; + goto done; + } + pr_info("audio: init: INIT\n"); + audio_init(adsp); + dal_trace(adsp); + + ac = audio_client_alloc(0); + if (!ac) { + pr_err("audio_init: cannot allocate client\n"); + res = -ENOMEM; + goto done; + } + + pr_info("audio: init: OPEN control\n"); + if (audio_open_control(ac)) { + pr_err("audio_init: cannot open control channel\n"); + res = -ENODEV; + goto done; + } + + pr_info("audio: init: attach ACDB\n"); + acdb = dal_attach(ACDB_DAL_DEVICE, ACDB_DAL_PORT, 0, 0); + if (!acdb) { + pr_err("audio_init: cannot attach to acdb channel\n"); + res = -ENODEV; + goto done; + } + + pr_info("audio: init: attach ADIE\n"); + adie = dal_attach(ADIE_DAL_DEVICE, ADIE_DAL_PORT, 0, 0); + if (!adie) { + pr_err("audio_init: cannot attach to adie\n"); + res = -ENODEV; + goto done; + } + if (analog_ops->init) + analog_ops->init(); + + res = 0; + ac_control = ac; + + wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle"); + wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "audio_pcm_suspend"); +done: + if ((res < 0) && ac) + audio_client_free(ac); + mutex_unlock(&audio_lock); + + return res; +} + +struct audio_config_data { + uint32_t device_id; + uint32_t sample_rate; + uint32_t offset; + uint32_t length; +}; + +struct audio_config_database { + uint8_t magic[8]; + uint32_t entry_count; + uint32_t unused; + struct audio_config_data entry[0]; +}; + +void *acdb_data; +const struct firmware *acdb_fw; +extern struct miscdevice q6_control_device; + +static int acdb_init(char *filename) +{ + const struct audio_config_database *db; + const struct firmware *fw; + int n; + + pr_info("acdb: load '%s'\n", filename); + if (request_firmware(&fw, filename, q6_control_device.this_device) < 0) { + pr_err("acdb: load 'default.acdb' failed...\n"); + return -ENODEV; + } + db = (void*) fw->data; + + if (fw->size < sizeof(struct audio_config_database)) { + pr_err("acdb: undersized database\n"); + goto fail; + } + if (strcmp(db->magic, "ACDB1.0")) { + pr_err("acdb: invalid magic\n"); + goto fail; + } + if (db->entry_count > 1024) { + pr_err("acdb: too many entries\n"); + goto fail; + } + if (fw->size < (sizeof(struct audio_config_database) + + db->entry_count * sizeof(struct audio_config_data))) { + pr_err("acdb: undersized TOC\n"); + goto fail; + } + for (n = 0; n < db->entry_count; n++) { + if (db->entry[n].length > 4096) { + pr_err("acdb: entry %d too large (%d)\n", + n, db->entry[n].length); + goto fail; + } + if ((db->entry[n].offset + db->entry[n].length) > fw->size) { + pr_err("acdb: entry %d outside of data\n", n); + goto fail; + } + } + if (acdb_data) + release_firmware(acdb_fw); + acdb_data = (void*) fw->data; + acdb_fw = fw; + return 0; +fail: + release_firmware(fw); + return -ENODEV; +} + +static int acdb_get_config_table(uint32_t device_id, uint32_t sample_rate) +{ + struct audio_config_database *db; + int n, res; + + if (q6audio_init()) + return 0; + + if (!acdb_data) { + res = acdb_init(acdb_file); + if (res) + return res; + } + + db = acdb_data; + for (n = 0; n < db->entry_count; n++) { + if (db->entry[n].device_id != device_id) + continue; + if (db->entry[n].sample_rate != sample_rate) + continue; + break; + } + + if (n == db->entry_count) { + pr_err("acdb: no entry for device %d, rate %d.\n", + device_id, sample_rate); + return 0; + } + + pr_info("acdb: %d bytes for device %d, rate %d.\n", + db->entry[n].length, device_id, sample_rate); + + memcpy(audio_data, acdb_data + db->entry[n].offset, db->entry[n].length); + return db->entry[n].length; +} + +static uint32_t audio_rx_path_id = ADIE_PATH_HANDSET_RX; +static uint32_t audio_rx_device_id = ADSP_AUDIO_DEVICE_ID_HANDSET_SPKR; +static uint32_t audio_rx_device_group = -1; +static uint32_t audio_tx_path_id = ADIE_PATH_HANDSET_TX; +static uint32_t audio_tx_device_id = ADSP_AUDIO_DEVICE_ID_HANDSET_MIC; +static uint32_t audio_tx_device_group = -1; + +static int qdsp6_devchg_notify(struct audio_client *ac, + uint32_t dev_type, uint32_t dev_id) +{ + struct adsp_device_switch_command rpc; + + if (dev_type != ADSP_AUDIO_RX_DEVICE && + dev_type != ADSP_AUDIO_TX_DEVICE) + return -EINVAL; + + memset(&rpc, 0, sizeof(rpc)); + rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_PREPARE; + if (dev_type == ADSP_AUDIO_RX_DEVICE) { + rpc.old_device = audio_rx_device_id; + rpc.new_device = dev_id; + } else { + rpc.old_device = audio_tx_device_id; + rpc.new_device = dev_id; + } + rpc.device_class = 0; + rpc.device_type = dev_type; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +static int qdsp6_standby(struct audio_client *ac) +{ + return audio_command(ac, ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_STANDBY); +} + +static int qdsp6_start(struct audio_client *ac) +{ + return audio_command(ac, ADSP_AUDIO_IOCTL_CMD_DEVICE_SWITCH_COMMIT); +} + +static void audio_rx_analog_enable(int en) +{ + switch (audio_rx_device_id) { + case ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_MONO: + case ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_STEREO: + case ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_SPKR: + if (analog_ops->headset_enable) + analog_ops->headset_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_MONO_HEADSET: + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_STEREO_HEADSET: + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_MONO_HEADSET: + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_STEREO_HEADSET: + if (analog_ops->headset_enable) + analog_ops->headset_enable(en); + if (analog_ops->speaker_enable) + analog_ops->speaker_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO: + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO: + if (analog_ops->speaker_enable) + analog_ops->speaker_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_BT_SCO_SPKR: + if (analog_ops->bt_sco_enable) + analog_ops->bt_sco_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_HANDSET_SPKR: + if (analog_ops->receiver_enable) + analog_ops->receiver_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_I2S_SPKR: + if (analog_ops->i2s_enable) + analog_ops->i2s_enable(en); + break; + } +} + +static void audio_tx_analog_enable(int en) +{ + switch (audio_tx_device_id) { + case ADSP_AUDIO_DEVICE_ID_HANDSET_MIC: + case ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MIC: + if (analog_ops->int_mic_enable) + analog_ops->int_mic_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_HEADSET_MIC: + case ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_MIC: + if (analog_ops->ext_mic_enable) + analog_ops->ext_mic_enable(en); + break; + case ADSP_AUDIO_DEVICE_ID_BT_SCO_MIC: + if (analog_ops->bt_sco_enable) + analog_ops->bt_sco_enable(en); + break; + } +} + +static int audio_update_acdb(uint32_t adev, uint32_t acdb_id) +{ + uint32_t sample_rate; + int sz = -1; + + sample_rate = q6_device_to_rate(adev); + + if (q6_device_to_dir(adev) == Q6_RX) + rx_acdb = acdb_id; + else + tx_acdb = acdb_id; + + if (acdb_id != 0) + sz = acdb_get_config_table(acdb_id, sample_rate); + + if (sz <= 0) { + acdb_id = q6_device_to_cad_id(adev); + sz = acdb_get_config_table(acdb_id, sample_rate); + if (sz <= 0) + return -EINVAL; + } + + audio_set_table(ac_control, adev, sz); + return 0; +} + +static void _audio_rx_path_enable(int reconf, uint32_t acdb_id) +{ + adie_enable(); + adie_set_path(adie, audio_rx_path_id, ADIE_PATH_RX); + adie_set_path_freq_plan(adie, ADIE_PATH_RX, 48000); + + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_DIGITAL_READY); + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_DIGITAL_ANALOG_READY); + + if (!reconf) + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, audio_rx_device_id); + audio_update_acdb(audio_rx_device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + + audio_rx_analog_enable(1); +} + +static void _audio_tx_path_enable(int reconf, uint32_t acdb_id) +{ + audio_tx_analog_enable(1); + + adie_enable(); + adie_set_path(adie, audio_tx_path_id, ADIE_PATH_TX); + + if (tx_clk_freq > 8000) + adie_set_path_freq_plan(adie, ADIE_PATH_TX, 48000); + else + adie_set_path_freq_plan(adie, ADIE_PATH_TX, 8000); + + adie_proceed_to_stage(adie, ADIE_PATH_TX, ADIE_STAGE_DIGITAL_READY); + adie_proceed_to_stage(adie, ADIE_PATH_TX, ADIE_STAGE_DIGITAL_ANALOG_READY); + + if (!reconf) + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_TX_DEVICE, audio_tx_device_id); + audio_update_acdb(audio_tx_device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + + audio_tx_mute(ac_control, audio_tx_device_id, tx_mute_status); +} + +static void _audio_rx_path_disable(void) +{ + audio_rx_analog_enable(0); + + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_ANALOG_OFF); + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_DIGITAL_OFF); + adie_disable(); +} + +static void _audio_tx_path_disable(void) +{ + audio_tx_analog_enable(0); + + adie_proceed_to_stage(adie, ADIE_PATH_TX, ADIE_STAGE_ANALOG_OFF); + adie_proceed_to_stage(adie, ADIE_PATH_TX, ADIE_STAGE_DIGITAL_OFF); + adie_disable(); +} + +static int icodec_rx_clk_refcount; +static int icodec_tx_clk_refcount; +static int ecodec_clk_refcount; +static int sdac_clk_refcount; + +static void _audio_rx_clk_enable(void) +{ + uint32_t device_group = q6_device_to_codec(audio_rx_device_id); + + switch(device_group) { + case Q6_ICODEC_RX: + icodec_rx_clk_refcount++; + if (icodec_rx_clk_refcount == 1) { + clk_set_rate(icodec_rx_clk, 12288000); + clk_enable(icodec_rx_clk); + } + break; + case Q6_ECODEC_RX: + ecodec_clk_refcount++; + if (ecodec_clk_refcount == 1) { + clk_set_rate(ecodec_clk, 2048000); + clk_enable(ecodec_clk); + } + break; + case Q6_SDAC_RX: + sdac_clk_refcount++; + if (sdac_clk_refcount == 1) { + clk_set_rate(sdac_clk, 12288000); + clk_enable(sdac_clk); + } + break; + default: + return; + } + audio_rx_device_group = device_group; +} + +static void _audio_tx_clk_enable(void) +{ + uint32_t device_group = q6_device_to_codec(audio_tx_device_id); + + switch (device_group) { + case Q6_ICODEC_TX: + icodec_tx_clk_refcount++; + if (icodec_tx_clk_refcount == 1) { + clk_set_rate(icodec_tx_clk, tx_clk_freq * 256); + clk_enable(icodec_tx_clk); + } + break; + case Q6_ECODEC_TX: + ecodec_clk_refcount++; + if (ecodec_clk_refcount == 1) { + clk_set_rate(ecodec_clk, 2048000); + clk_enable(ecodec_clk); + } + break; + case Q6_SDAC_TX: + /* TODO: In QCT BSP, clk rate was set to 20480000 */ + sdac_clk_refcount++; + if (sdac_clk_refcount == 1) { + clk_set_rate(sdac_clk, 12288000); + clk_enable(sdac_clk); + } + break; + default: + return; + } + audio_tx_device_group = device_group; +} + +static void _audio_rx_clk_disable(void) +{ + switch (audio_rx_device_group) { + case Q6_ICODEC_RX: + icodec_rx_clk_refcount--; + if (icodec_rx_clk_refcount == 0) { + clk_disable(icodec_rx_clk); + audio_rx_device_group = -1; + } + break; + case Q6_ECODEC_RX: + ecodec_clk_refcount--; + if (ecodec_clk_refcount == 0) { + clk_disable(ecodec_clk); + audio_rx_device_group = -1; + } + break; + case Q6_SDAC_RX: + sdac_clk_refcount--; + if (sdac_clk_refcount == 0) { + clk_disable(sdac_clk); + audio_rx_device_group = -1; + } + break; + default: + pr_err("audiolib: invalid rx device group %d\n", + audio_rx_device_group); + break; + } +} + +static void _audio_tx_clk_disable(void) +{ + switch (audio_tx_device_group) { + case Q6_ICODEC_TX: + icodec_tx_clk_refcount--; + if (icodec_tx_clk_refcount == 0) { + clk_disable(icodec_tx_clk); + audio_tx_device_group = -1; + } + break; + case Q6_ECODEC_TX: + ecodec_clk_refcount--; + if (ecodec_clk_refcount == 0) { + clk_disable(ecodec_clk); + audio_tx_device_group = -1; + } + break; + case Q6_SDAC_TX: + sdac_clk_refcount--; + if (sdac_clk_refcount == 0) { + clk_disable(sdac_clk); + audio_tx_device_group = -1; + } + break; + default: + pr_err("audiolib: invalid tx device group %d\n", + audio_tx_device_group); + break; + } +} + +static void _audio_rx_clk_reinit(uint32_t rx_device) +{ + uint32_t device_group = q6_device_to_codec(rx_device); + + if (device_group != audio_rx_device_group) + _audio_rx_clk_disable(); + + audio_rx_device_id = rx_device; + audio_rx_path_id = q6_device_to_path(rx_device); + + if (device_group != audio_rx_device_group) + _audio_rx_clk_enable(); + +} + +static void _audio_tx_clk_reinit(uint32_t tx_device) +{ + uint32_t device_group = q6_device_to_codec(tx_device); + + if (device_group != audio_tx_device_group) + _audio_tx_clk_disable(); + + audio_tx_device_id = tx_device; + audio_tx_path_id = q6_device_to_path(tx_device); + + if (device_group != audio_tx_device_group) + _audio_tx_clk_enable(); +} + +static DEFINE_MUTEX(audio_path_lock); +static int audio_rx_path_refcount; +static int audio_tx_path_refcount; + +static int audio_rx_path_enable(int en, uint32_t acdb_id) +{ + mutex_lock(&audio_path_lock); + if (en) { + audio_rx_path_refcount++; + if (audio_rx_path_refcount == 1) { + _audio_rx_clk_enable(); + _audio_rx_path_enable(0, acdb_id); + } + } else { + audio_rx_path_refcount--; + if (audio_rx_path_refcount == 0) { + _audio_rx_path_disable(); + _audio_rx_clk_disable(); + } + } + mutex_unlock(&audio_path_lock); + return 0; +} + +static int audio_tx_path_enable(int en, uint32_t acdb_id) +{ + mutex_lock(&audio_path_lock); + if (en) { + audio_tx_path_refcount++; + if (audio_tx_path_refcount == 1) { + _audio_tx_clk_enable(); + _audio_tx_path_enable(0, acdb_id); + } + } else { + audio_tx_path_refcount--; + if (audio_tx_path_refcount == 0) { + _audio_tx_path_disable(); + _audio_tx_clk_disable(); + } + } + mutex_unlock(&audio_path_lock); + return 0; +} + +int q6audio_reinit_acdb(char* filename) { + int res; + + if (q6audio_init()) + return 0; + + mutex_lock(&audio_path_lock); + if (strlen(filename) < 0 || !strcmp(filename, acdb_file)) { + res = -EINVAL; + goto done; + } + res = acdb_init(filename); + if (!res) + strcpy(acdb_file, filename); +done: + mutex_unlock(&audio_path_lock); + return res; + +} + +int q6audio_update_acdb(uint32_t id_src, uint32_t id_dst) +{ + int res; + + if (q6audio_init()) + return 0; + + mutex_lock(&audio_path_lock); + + if (q6_device_to_dir(id_dst) == Q6_RX) + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, id_dst); + else + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_TX_DEVICE, id_dst); + res = audio_update_acdb(id_dst, id_src); + if (res) + goto done; + + qdsp6_standby(ac_control); + qdsp6_start(ac_control); +done: + mutex_unlock(&audio_path_lock); + return res; +} + +int q6audio_set_tx_mute(int mute) +{ + uint32_t adev; + + if (q6audio_init()) + return 0; + + mutex_lock(&audio_path_lock); + + if (mute == tx_mute_status) { + mutex_unlock(&audio_path_lock); + return 0; + } + + adev = audio_tx_device_id; + audio_tx_mute(ac_control, adev, mute); + tx_mute_status = mute; + mutex_unlock(&audio_path_lock); + return 0; +} + +int q6audio_set_stream_volume(struct audio_client *ac, int vol) +{ + if (vol > 1200 || vol < -4000) { + pr_err("unsupported volume level %d\n", vol); + return -EINVAL; + } + mutex_lock(&audio_path_lock); + audio_stream_mute(ac, 0); + audio_stream_volume(ac, vol); + mutex_unlock(&audio_path_lock); + return 0; +} + +int q6audio_set_rx_volume(int level) +{ + uint32_t adev; + int vol; + + if (q6audio_init()) + return 0; + + if (level < 0 || level > 100) + return -EINVAL; + + mutex_lock(&audio_path_lock); + adev = ADSP_AUDIO_DEVICE_ID_VOICE; + vol = q6_device_volume(audio_rx_device_id, level); + audio_rx_mute(ac_control, adev, 0); + audio_rx_volume(ac_control, adev, vol); + rx_vol_level = level; + mutex_unlock(&audio_path_lock); + return 0; +} + +static void do_rx_routing(uint32_t device_id, uint32_t acdb_id) +{ + if (device_id == audio_rx_device_id) { + if (acdb_id != rx_acdb) { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, device_id); + audio_update_acdb(device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + } + return; + } + + if (audio_rx_path_refcount > 0) { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, device_id); + _audio_rx_path_disable(); + _audio_rx_clk_reinit(device_id); + _audio_rx_path_enable(1, acdb_id); + } else { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, + device_id); + audio_update_acdb(device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + audio_rx_device_id = device_id; + audio_rx_path_id = q6_device_to_path(device_id); + } +} + +static void do_tx_routing(uint32_t device_id, uint32_t acdb_id) +{ + if (device_id == audio_tx_device_id) { + if (acdb_id != tx_acdb) { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_TX_DEVICE, + device_id); + audio_update_acdb(device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + } + return; + } + + if (audio_tx_path_refcount > 0) { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_TX_DEVICE, device_id); + _audio_tx_path_disable(); + _audio_tx_clk_reinit(device_id); + _audio_tx_path_enable(1, acdb_id); + } else { + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_TX_DEVICE, + device_id); + audio_update_acdb(device_id, acdb_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + audio_tx_device_id = device_id; + audio_tx_path_id = q6_device_to_path(device_id); + } +} + +int q6audio_do_routing(uint32_t device_id, uint32_t acdb_id) +{ + if (q6audio_init()) + return 0; + + mutex_lock(&audio_path_lock); + + switch(q6_device_to_dir(device_id)) { + case Q6_RX: + do_rx_routing(device_id, acdb_id); + break; + case Q6_TX: + do_tx_routing(device_id, acdb_id); + break; + } + + mutex_unlock(&audio_path_lock); + return 0; +} + +int q6audio_set_route(const char *name) +{ + uint32_t route; + if (!strcmp(name, "speaker")) { + route = ADIE_PATH_SPEAKER_STEREO_RX; + } else if (!strcmp(name, "headphones")) { + route = ADIE_PATH_HEADSET_STEREO_RX; + } else if (!strcmp(name, "handset")) { + route = ADIE_PATH_HANDSET_RX; + } else { + return -EINVAL; + } + + mutex_lock(&audio_path_lock); + if (route == audio_rx_path_id) + goto done; + + audio_rx_path_id = route; + + if (audio_rx_path_refcount > 0) { + _audio_rx_path_disable(); + _audio_rx_path_enable(1, 0); + } + if (audio_tx_path_refcount > 0) { + _audio_tx_path_disable(); + _audio_tx_path_enable(1, 0); + } +done: + mutex_unlock(&audio_path_lock); + return 0; +} + +static void adie_rx_path_enable(uint32_t acdb_id) +{ + adie_enable(); + adie_set_path(adie, audio_rx_path_id, ADIE_PATH_RX); + adie_set_path_freq_plan(adie, ADIE_PATH_RX, 48000); + + adie_proceed_to_stage(adie, ADIE_PATH_RX, + ADIE_STAGE_DIGITAL_READY); + adie_proceed_to_stage(adie, ADIE_PATH_RX, + ADIE_STAGE_DIGITAL_ANALOG_READY); +} + +static void q6_rx_path_enable(int reconf, uint32_t acdb_id) +{ + audio_update_acdb(audio_rx_device_id, acdb_id); + if (!reconf) + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, audio_rx_device_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); +} + + + +struct audio_client *q6audio_open_pcm(uint32_t bufsz, uint32_t rate, + uint32_t channels, uint32_t flags, uint32_t acdb_id) +{ + int rc, retry = 5; + struct audio_client *ac; + + if (q6audio_init()) + return 0; + + ac = audio_client_alloc(bufsz); + if (!ac) + return 0; + + ac->flags = flags; + + mutex_lock(&audio_path_lock); +#if 0 + if (ac->flags & AUDIO_FLAG_WRITE) { + audio_rx_path_refcount++; + if (audio_rx_path_refcount == 1) { + _audio_rx_clk_enable(); + audio_update_acdb(audio_rx_device_id, acdb_id); + qdsp6_devchg_notify(ac_control, ADSP_AUDIO_RX_DEVICE, audio_rx_device_id); + qdsp6_standby(ac_control); + qdsp6_start(ac_control); + } + } else { + /* TODO: consider concurrency with voice call */ + tx_clk_freq = rate; + audio_tx_path_refcount++; + if (audio_tx_path_refcount == 1) { + _audio_tx_clk_enable(); + _audio_tx_path_enable(0, acdb_id); + } + } + + for (retry = 5;;retry--) { + if (ac->flags & AUDIO_FLAG_WRITE) + rc = audio_out_open(ac, bufsz, rate, channels); + else + rc = audio_in_open(ac, bufsz, rate, channels); + if (rc == 0) + break; + if (retry == 0) + q6audio_dsp_not_responding(); + pr_err("q6audio: open pcm error %d, retrying\n", rc); + msleep(1); + } + + if (ac->flags & AUDIO_FLAG_WRITE) { + if (audio_rx_path_refcount == 1) { + adie_enable(); + adie_set_path(adie, audio_rx_path_id, ADIE_PATH_RX); + adie_set_path_freq_plan(adie, ADIE_PATH_RX, 48000); + + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_DIGITAL_READY); + adie_proceed_to_stage(adie, ADIE_PATH_RX, ADIE_STAGE_DIGITAL_ANALOG_READY); + + audio_rx_analog_enable(1); + } + } +#else + if (ac->flags & AUDIO_FLAG_WRITE) { + audio_rx_path_refcount++; + if (audio_rx_path_refcount == 1) { + _audio_rx_clk_enable(); + q6_rx_path_enable(0, acdb_id); + adie_rx_path_enable(acdb_id); + } + } else { + /* TODO: consider concurrency with voice call */ + if (audio_tx_path_refcount > 0) { + tx_clk_freq = 8000; + } else { + tx_clk_freq = rate; + } + audio_tx_path_refcount++; + if (audio_tx_path_refcount == 1) { + tx_clk_freq = rate; + _audio_tx_clk_enable(); + _audio_tx_path_enable(0, acdb_id); + } + } + + for (retry = 5;;retry--) { + if (ac->flags & AUDIO_FLAG_WRITE) + rc = audio_out_open(ac, bufsz, rate, channels); + else + rc = audio_in_open(ac, bufsz, flags, rate, channels); + if (rc == 0) + break; + if (retry == 0) + BUG(); + pr_err("q6audio: open pcm error %d, retrying\n", rc); + msleep(1); + } + + if (ac->flags & AUDIO_FLAG_WRITE) { + if (audio_rx_path_refcount == 1) + audio_rx_analog_enable(1); + } +#endif + mutex_unlock(&audio_path_lock); + + for (retry = 5;;retry--) { + rc = audio_command(ac, ADSP_AUDIO_IOCTL_CMD_SESSION_START); + if (rc == 0) + break; + if (retry == 0) + q6audio_dsp_not_responding(); + pr_err("q6audio: stream start error %d, retrying\n", rc); + } + + if (!(ac->flags & AUDIO_FLAG_WRITE)) { + ac->buf[0].used = 1; + ac->buf[1].used = 1; + q6audio_read(ac, &ac->buf[0]); + q6audio_read(ac, &ac->buf[1]); + } + + audio_prevent_sleep(); + return ac; +} + +int q6audio_close(struct audio_client *ac) +{ + audio_close(ac); + if (ac->flags & AUDIO_FLAG_WRITE) + audio_rx_path_enable(0, 0); + else + audio_tx_path_enable(0, 0); + + audio_client_free(ac); + audio_allow_sleep(); + return 0; +} + +struct audio_client *q6voice_open(uint32_t flags, uint32_t acdb_id) +{ + struct audio_client *ac; + + if (q6audio_init()) + return 0; + + ac = audio_client_alloc(0); + if (!ac) + return 0; + + ac->flags = flags; + if (ac->flags & AUDIO_FLAG_WRITE) + audio_rx_path_enable(1, acdb_id); + else { + tx_clk_freq = 8000; + audio_tx_path_enable(1, acdb_id); + } + + return ac; +} + +int q6voice_close(struct audio_client *ac) +{ + if (ac->flags & AUDIO_FLAG_WRITE) + audio_rx_path_enable(0, 0); + else + audio_tx_path_enable(0, 0); + + audio_client_free(ac); + return 0; +} + +struct audio_client *q6audio_open_mp3(uint32_t bufsz, uint32_t rate, + uint32_t channels, uint32_t acdb_id) +{ + struct audio_client *ac; + + printk("q6audio_open_mp3()\n"); + + if (q6audio_init()) + return 0; + + ac = audio_client_alloc(bufsz); + if (!ac) + return 0; + + ac->flags = AUDIO_FLAG_WRITE; + audio_rx_path_enable(1, acdb_id); + + audio_mp3_open(ac, bufsz, rate, channels); + audio_command(ac, ADSP_AUDIO_IOCTL_CMD_SESSION_START); + + return ac; +} + +int q6audio_mp3_close(struct audio_client *ac) +{ + audio_close(ac); + audio_rx_path_enable(0, 0); + audio_client_free(ac); + return 0; +} + +int q6audio_async(struct audio_client *ac) +{ + struct adsp_command_hdr rpc; + memset(&rpc, 0, sizeof(rpc)); + rpc.opcode = ADSP_AUDIO_IOCTL_CMD_STREAM_EOS; + rpc.response_type = ADSP_AUDIO_RESPONSE_ASYNC; + return audio_ioctl(ac, &rpc, sizeof(rpc)); +} + +struct audio_client *q6fm_open(void) +{ + struct audio_client *ac; + + if (q6audio_init()) + return 0; + +/* if (audio_rx_device_id != ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_STEREO && + audio_rx_device_id != ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO) + return 0; +*/ + ac = audio_client_alloc(0); + if (!ac) + return 0; + + ac->flags = AUDIO_FLAG_WRITE; + audio_rx_path_enable(1, 0); + enable_aux_loopback(1); + + return ac; +} + +int q6fm_close(struct audio_client *ac) +{ + audio_rx_path_enable(0, 0); + enable_aux_loopback(0); + audio_client_free(ac); + return 0; +} + diff --git a/arch/arm/mach-msm/qdsp6/q6audio_devices.h b/arch/arm/mach-msm/qdsp6/q6audio_devices.h new file mode 100644 index 0000000000000..d4d30b57c3554 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/q6audio_devices.h @@ -0,0 +1,265 @@ +/* arch/arm/mach-msm/qdsp6/q6audio_devices.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +struct q6_device_info { + uint32_t id; + uint32_t cad_id; + uint32_t path; + uint32_t rate; + uint8_t dir; + uint8_t codec; + uint8_t hw; +}; + +#define Q6_ICODEC_RX 0 +#define Q6_ICODEC_TX 1 +#define Q6_ECODEC_RX 2 +#define Q6_ECODEC_TX 3 +#define Q6_SDAC_RX 6 +#define Q6_SDAC_TX 7 +#define Q6_CODEC_NONE 255 + +#define Q6_TX 1 +#define Q6_RX 2 +#define Q6_TX_RX 3 + +#define CAD_HW_DEVICE_ID_HANDSET_MIC 0x01 +#define CAD_HW_DEVICE_ID_HANDSET_SPKR 0x02 +#define CAD_HW_DEVICE_ID_HEADSET_MIC 0x03 +#define CAD_HW_DEVICE_ID_HEADSET_SPKR_MONO 0x04 +#define CAD_HW_DEVICE_ID_HEADSET_SPKR_STEREO 0x05 +#define CAD_HW_DEVICE_ID_SPKR_PHONE_MIC 0x06 +#define CAD_HW_DEVICE_ID_SPKR_PHONE_MONO 0x07 +#define CAD_HW_DEVICE_ID_SPKR_PHONE_STEREO 0x08 +#define CAD_HW_DEVICE_ID_BT_SCO_MIC 0x09 +#define CAD_HW_DEVICE_ID_BT_SCO_SPKR 0x0A +#define CAD_HW_DEVICE_ID_BT_A2DP_SPKR 0x0B +#define CAD_HW_DEVICE_ID_TTY_HEADSET_MIC 0x0C +#define CAD_HW_DEVICE_ID_TTY_HEADSET_SPKR 0x0D + +#define CAD_HW_DEVICE_ID_DEFAULT_TX 0x0E +#define CAD_HW_DEVICE_ID_DEFAULT_RX 0x0F + +/* Logical Device to indicate A2DP routing */ +#define CAD_HW_DEVICE_ID_BT_A2DP_TX 0x10 +#define CAD_HW_DEVICE_ID_HEADSET_MONO_PLUS_SPKR_MONO_RX 0x11 +#define CAD_HW_DEVICE_ID_HEADSET_MONO_PLUS_SPKR_STEREO_RX 0x12 +#define CAD_HW_DEVICE_ID_HEADSET_STEREO_PLUS_SPKR_MONO_RX 0x13 +#define CAD_HW_DEVICE_ID_HEADSET_STEREO_PLUS_SPKR_STEREO_RX 0x14 + +#define CAD_HW_DEVICE_ID_VOICE 0x15 + +#define CAD_HW_DEVICE_ID_I2S_RX 0x20 +#define CAD_HW_DEVICE_ID_I2S_TX 0x21 + +/* AUXPGA */ +#define CAD_HW_DEVICE_ID_HEADSET_SPKR_STEREO_LB 0x22 +#define CAD_HW_DEVICE_ID_HEADSET_SPKR_MONO_LB 0x23 +#define CAD_HW_DEVICE_ID_SPEAKER_SPKR_STEREO_LB 0x24 +#define CAD_HW_DEVICE_ID_SPEAKER_SPKR_MONO_LB 0x25 + +#define CAD_HW_DEVICE_ID_NULL_RX 0x2A + +#define CAD_HW_DEVICE_ID_MAX_NUM 0x2F + +#define CAD_HW_DEVICE_ID_INVALID 0xFF + +#define CAD_RX_DEVICE 0x00 +#define CAD_TX_DEVICE 0x01 + +static struct q6_device_info q6_audio_devices[] = { + { + .id = ADSP_AUDIO_DEVICE_ID_HANDSET_SPKR, + .cad_id = CAD_HW_DEVICE_ID_HANDSET_SPKR, + .path = ADIE_PATH_HANDSET_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_HANDSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_MONO, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_SPKR_MONO, + .path = ADIE_PATH_HEADSET_MONO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_HEADSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_HEADSET_SPKR_STEREO, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_SPKR_STEREO, + .path = ADIE_PATH_HEADSET_STEREO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_HEADSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO, + .cad_id = CAD_HW_DEVICE_ID_SPKR_PHONE_MONO, + .path = ADIE_PATH_SPEAKER_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO, + .cad_id = CAD_HW_DEVICE_ID_SPKR_PHONE_STEREO, + .path = ADIE_PATH_SPEAKER_STEREO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_MONO_HEADSET, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_MONO_PLUS_SPKR_MONO_RX, + .path = ADIE_PATH_SPKR_MONO_HDPH_MONO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MONO_W_STEREO_HEADSET, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_STEREO_PLUS_SPKR_MONO_RX, + .path = ADIE_PATH_SPKR_MONO_HDPH_STEREO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_MONO_HEADSET, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_MONO_PLUS_SPKR_STEREO_RX, + .path = ADIE_PATH_SPKR_STEREO_HDPH_MONO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_STEREO_W_STEREO_HEADSET, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_STEREO_PLUS_SPKR_STEREO_RX, + .path = ADIE_PATH_SPKR_STEREO_HDPH_STEREO_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_SPKR, + .cad_id = CAD_HW_DEVICE_ID_TTY_HEADSET_SPKR, + .path = ADIE_PATH_TTY_HEADSET_RX, + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ICODEC_RX, + .hw = Q6_HW_TTY, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_HANDSET_MIC, + .cad_id = CAD_HW_DEVICE_ID_HANDSET_MIC, + .path = ADIE_PATH_HANDSET_TX, + .rate = 8000, + .dir = Q6_TX, + .codec = Q6_ICODEC_TX, + .hw = Q6_HW_HANDSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_HEADSET_MIC, + .cad_id = CAD_HW_DEVICE_ID_HEADSET_MIC, + .path = ADIE_PATH_HEADSET_MONO_TX, + .rate = 8000, + .dir = Q6_TX, + .codec = Q6_ICODEC_TX, + .hw = Q6_HW_HEADSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_SPKR_PHONE_MIC, + .cad_id = CAD_HW_DEVICE_ID_SPKR_PHONE_MIC, + .path = ADIE_PATH_SPEAKER_TX, + .rate = 8000, + .dir = Q6_TX, + .codec = Q6_ICODEC_TX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_TTY_HEADSET_MIC, + .cad_id = CAD_HW_DEVICE_ID_TTY_HEADSET_MIC, + .path = ADIE_PATH_TTY_HEADSET_TX, + .rate = 8000, + .dir = Q6_TX, + .codec = Q6_ICODEC_TX, + .hw = Q6_HW_HEADSET, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_BT_SCO_SPKR, + .cad_id = CAD_HW_DEVICE_ID_BT_SCO_SPKR, + .path = 0, /* XXX */ + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ECODEC_RX, + .hw = Q6_HW_BT_SCO, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_BT_A2DP_SPKR, + .cad_id = CAD_HW_DEVICE_ID_BT_A2DP_SPKR, + .path = 0, /* XXX */ + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_ECODEC_RX, + .hw = Q6_HW_BT_A2DP, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_BT_SCO_MIC, + .cad_id = CAD_HW_DEVICE_ID_BT_SCO_MIC, + .path = 0, /* XXX */ + .rate = 8000, + .dir = Q6_TX, + .codec = Q6_ECODEC_TX, + .hw = Q6_HW_BT_SCO, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_I2S_SPKR, + .cad_id = CAD_HW_DEVICE_ID_I2S_RX, + .path = 0, /* XXX */ + .rate = 48000, + .dir = Q6_RX, + .codec = Q6_SDAC_RX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = ADSP_AUDIO_DEVICE_ID_I2S_MIC, + .cad_id = CAD_HW_DEVICE_ID_I2S_TX, + .path = 0, /* XXX */ + .rate = 16000, + .dir = Q6_TX, + .codec = Q6_SDAC_TX, + .hw = Q6_HW_SPEAKER, + }, + { + .id = 0, + .cad_id = 0, + .path = 0, + .rate = 8000, + .dir = 0, + .codec = Q6_CODEC_NONE, + .hw = 0, + }, +}; + diff --git a/arch/arm/mach-msm/qdsp6/routing.c b/arch/arm/mach-msm/qdsp6/routing.c new file mode 100644 index 0000000000000..a851896401290 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6/routing.c @@ -0,0 +1,71 @@ +/* arch/arm/mach-msm/qdsp6/routing.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +extern int q6audio_set_route(const char *name); + +static int q6_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static ssize_t q6_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + char cmd[32]; + + if (count >= sizeof(cmd)) + return -EINVAL; + if (copy_from_user(cmd, buf, count)) + return -EFAULT; + cmd[count] = 0; + + if ((count > 1) && (cmd[count-1] == '\n')) + cmd[count-1] = 0; + + q6audio_set_route(cmd); + + return count; +} + +static int q6_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations q6_fops = { + .owner = THIS_MODULE, + .open = q6_open, + .write = q6_write, + .release = q6_release, +}; + +static struct miscdevice q6_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_audio_route", + .fops = &q6_fops, +}; + + +static int __init q6_init(void) { + return misc_register(&q6_misc); +} + +device_initcall(q6_init); diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c new file mode 100644 index 0000000000000..75f6140ca0ccd --- /dev/null +++ b/arch/arm/mach-msm/remote_spinlock.c @@ -0,0 +1,226 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include + +#include +#include "smd_private.h" + +#define SMEM_SPINLOCK_COUNT 8 +#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t)) + +struct raw_remote_spinlock { + union { + volatile u32 lock; + struct { + volatile u8 self_lock; + volatile u8 other_lock; + volatile u8 next_yield; + u8 pad; + } dek; + }; +}; + +static inline void __raw_remote_ex_spin_lock(struct raw_remote_spinlock *lock) +{ + unsigned long tmp; + + asm volatile ( + "1: ldrex %0, [%1]\n" + " teq %0, #0\n" + " strexeq %0, %2, [%1]\n" + " teqeq %0, #0\n" + " bne 1b" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc"); + + smp_mb(); +} + +static inline void __raw_remote_ex_spin_unlock(struct raw_remote_spinlock *lock) +{ + smp_mb(); + + asm volatile ( + " str %1, [%0]\n" + : + : "r" (&lock->lock), "r" (0) + : "cc"); +} + +static inline void __raw_remote_swp_spin_lock(struct raw_remote_spinlock *lock) +{ + unsigned long tmp; + + asm volatile ( + "1: swp %0, %2, [%1]\n" + " teq %0, #0\n" + " bne 1b" + : "=&r" (tmp) + : "r" (&lock->lock), "r" (1) + : "cc"); + + smp_mb(); +} + +static inline void __raw_remote_swp_spin_unlock(struct raw_remote_spinlock *lock) +{ + smp_mb(); + + asm volatile ( + " str %1, [%0]" + : + : "r" (&lock->lock), "r" (0) + : "cc"); +} + +#define DEK_LOCK_REQUEST 1 +#define DEK_LOCK_YIELD (!DEK_LOCK_REQUEST) +#define DEK_YIELD_TURN_SELF 0 +static void __raw_remote_dek_spin_lock(struct raw_remote_spinlock *lock) +{ + lock->dek.self_lock = DEK_LOCK_REQUEST; + + while (lock->dek.other_lock) { + + if (lock->dek.next_yield == DEK_YIELD_TURN_SELF) + lock->dek.self_lock = DEK_LOCK_YIELD; + + while (lock->dek.other_lock) + ; + + lock->dek.self_lock = DEK_LOCK_REQUEST; + } + lock->dek.next_yield = DEK_YIELD_TURN_SELF; + + smp_mb(); +} + +static void __raw_remote_dek_spin_unlock(struct raw_remote_spinlock *lock) +{ + smp_mb(); + + lock->dek.self_lock = DEK_LOCK_YIELD; +} + +#if defined(CONFIG_MSM_REMOTE_SPINLOCK_DEKKERS) +/* Use Dekker's algorithm when LDREX/STREX and SWP are unavailable for + * shared memory */ +#define _raw_remote_spin_lock(lock) __raw_remote_dek_spin_lock(lock) +#define _raw_remote_spin_unlock(lock) __raw_remote_dek_spin_unlock(lock) +#elif defined(CONFIG_MSM_REMOTE_SPINLOCK_SWP) +/* Use SWP-based locks when LDREX/STREX are unavailable for shared memory. */ +#define _raw_remote_spin_lock(lock) __raw_remote_swp_spin_lock(lock) +#define _raw_remote_spin_unlock(lock) __raw_remote_swp_spin_unlock(lock) +#else +/* Use LDREX/STREX for shared memory locking, when available */ +#define _raw_remote_spin_lock(lock) __raw_remote_ex_spin_lock(lock) +#define _raw_remote_spin_unlock(lock) __raw_remote_ex_spin_unlock(lock) +#endif + +void _remote_spin_lock(remote_spinlock_t *lock) +{ + _raw_remote_spin_lock(lock->remote); +} +EXPORT_SYMBOL(_remote_spin_lock); + +void _remote_spin_unlock(remote_spinlock_t *lock) +{ + _raw_remote_spin_unlock(lock->remote); +} +EXPORT_SYMBOL(_remote_spin_unlock); + +static int remote_spin_lock_smem_init(remote_spinlock_t *lock, int id) +{ + void *start; + + if (id >= SMEM_SPINLOCK_COUNT) + return -EINVAL; + + start = smem_alloc(SMEM_SPINLOCK_ARRAY, SMEM_SPINLOCK_ARRAY_SIZE); + if (start == NULL) + return -ENXIO; + + lock->remote = + (struct raw_remote_spinlock *)(start + id * sizeof(uint32_t)); + return 0; +} + +#define DAL_CHUNK_NAME_LENGTH 12 +struct dal_chunk_header { + uint32_t size; + char name[DAL_CHUNK_NAME_LENGTH]; + uint32_t lock; + uint32_t reserved; + uint32_t type; + uint32_t version; +}; + +static int remote_spin_lock_dal_init(remote_spinlock_t *lock, const char *name) +{ + unsigned long start; + unsigned long end; + unsigned size; + struct dal_chunk_header *cur_hdr; + + if (!name) + return -EINVAL; + + start = (unsigned long)smem_item(SMEM_DAL_AREA, &size); + if (!start) + return -ENXIO; + + end = start + size; + + /* Find first chunk header */ + cur_hdr = (struct dal_chunk_header *)ALIGN(start, 4096); + lock->remote = NULL; + while (((unsigned long)(cur_hdr + 1) <= end) && (cur_hdr->size != 0)) { + if (!strncmp(cur_hdr->name, name, DAL_CHUNK_NAME_LENGTH)) { + lock->remote = + (struct raw_remote_spinlock *)&cur_hdr->lock; + return 0; + } + cur_hdr = (void *)cur_hdr + cur_hdr->size; + } + + pr_err("%s: DAL remote spin lock '%s' not found.\n", __func__, name); + return -EINVAL; +} + +int _remote_spin_lock_init(remote_spinlock_t *lock, const char *name) +{ + BUG_ON(name == NULL); + + /* remote spinlocks can be one of two formats: + * D: + * S: + */ + if (!strncmp(name, "D:", 2)) { + return remote_spin_lock_dal_init(lock, &name[2]); + } else if (!strncmp(name, "S:", 2)) { + BUG_ON(name[3] != '\0'); + return remote_spin_lock_smem_init(lock, (uint8_t)(name[2]-'0')); + } + + return -EINVAL; +} +EXPORT_SYMBOL(_remote_spin_lock_init); diff --git a/arch/arm/mach-msm/rpc_server_dog_keepalive.c b/arch/arm/mach-msm/rpc_server_dog_keepalive.c new file mode 100644 index 0000000000000..908f1e1bae255 --- /dev/null +++ b/arch/arm/mach-msm/rpc_server_dog_keepalive.c @@ -0,0 +1,71 @@ +/* arch/arm/mach-msm/rpc_server_dog_keepalive.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +/* dog_keepalive server definitions */ + +#define DOG_KEEPALIVE_PROG 0x30000015 +#if CONFIG_MSM_AMSS_VERSION==6210 +#define DOG_KEEPALIVE_VERS 0 +#define RPC_DOG_KEEPALIVE_BEACON 1 +#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225) +#define DOG_KEEPALIVE_VERS 0x731fa727 +#define RPC_DOG_KEEPALIVE_BEACON 2 +#elif CONFIG_MSM_AMSS_VERSION==6350 +#define DOG_KEEPALIVE_VERS 0x00010000 +#define RPC_DOG_KEEPALIVE_BEACON 2 +#elif CONFIG_MSM_AMSS_VERSION==3200 +#define DOG_KEEPALIVE_VERS 0x731fa727 /* 1931454247 */ +#define RPC_DOG_KEEPALIVE_BEACON 2 +#else +#error "Unsupported AMSS version" +#endif +#define RPC_DOG_KEEPALIVE_NULL 0 + + +/* TODO: Remove server registration with _VERS when modem is upated with _COMP*/ + +static int handle_rpc_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + switch (req->procedure) { + case RPC_DOG_KEEPALIVE_NULL: + return 0; + case RPC_DOG_KEEPALIVE_BEACON: + printk(KERN_INFO "DOG KEEPALIVE PING\n"); + return 0; + default: + return -ENODEV; + } +} + +static struct msm_rpc_server rpc_server = { + .prog = DOG_KEEPALIVE_PROG, + .vers = DOG_KEEPALIVE_VERS, + .rpc_call = handle_rpc_call, +}; + +static int __init rpc_server_init(void) +{ + /* Dual server registration to support backwards compatibility vers */ + return msm_rpc_create_server(&rpc_server); +} + + +module_init(rpc_server_init); diff --git a/arch/arm/mach-msm/rpc_server_time_remote.c b/arch/arm/mach-msm/rpc_server_time_remote.c new file mode 100644 index 0000000000000..c738090797e67 --- /dev/null +++ b/arch/arm/mach-msm/rpc_server_time_remote.c @@ -0,0 +1,79 @@ +/* arch/arm/mach-msm/rpc_server_time_remote.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +/* time_remote_mtoa server definitions. */ + +#define TIME_REMOTE_MTOA_PROG 0x3000005d +#if CONFIG_MSM_AMSS_VERSION==6210 +#define TIME_REMOTE_MTOA_VERS 0 +#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225) +#define TIME_REMOTE_MTOA_VERS 0x9202a8e4 +#elif CONFIG_MSM_AMSS_VERSION==6350 +#define TIME_REMOTE_MTOA_VERS 0x00010000 +#elif CONFIG_MSM_AMSS_VERSION==3200 +#define TIME_REMOTE_MTOA_VERS 0x9202a8e4 /* 2449647844 */ +#else +#error "Unknown AMSS version" +#endif +#define RPC_TIME_REMOTE_MTOA_NULL 0 +#define RPC_TIME_TOD_SET_APPS_BASES 2 + +struct rpc_time_tod_set_apps_bases_args { + uint32_t tick; + uint64_t stamp; +}; + +static int handle_rpc_call(struct msm_rpc_server *server, + struct rpc_request_hdr *req, unsigned len) +{ + switch (req->procedure) { + case RPC_TIME_REMOTE_MTOA_NULL: + return 0; + + case RPC_TIME_TOD_SET_APPS_BASES: { + struct rpc_time_tod_set_apps_bases_args *args; + args = (struct rpc_time_tod_set_apps_bases_args *)(req + 1); + args->tick = be32_to_cpu(args->tick); + args->stamp = be64_to_cpu(args->stamp); + printk(KERN_INFO "RPC_TIME_TOD_SET_APPS_BASES:\n" + "\ttick = %d\n" + "\tstamp = %lld\n", + args->tick, args->stamp); + return 0; + } + default: + return -ENODEV; + } +} + +static struct msm_rpc_server rpc_server = { + .prog = TIME_REMOTE_MTOA_PROG, + .vers = TIME_REMOTE_MTOA_VERS, + .rpc_call = handle_rpc_call, +}; + +static int __init rpc_server_init(void) +{ + /* Dual server registration to support backwards compatibility vers */ + return msm_rpc_create_server(&rpc_server); +} + + +module_init(rpc_server_init); diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c index 11b54c7aeb09a..06688a3ffdd74 100644 --- a/arch/arm/mach-msm/sirc.c +++ b/arch/arm/mach-msm/sirc.c @@ -1,25 +1,27 @@ -/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. +/* linux/arch/arm/mach-msm/irq.c * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. + * Copyright (c) 2009 QUALCOMM Incorporated. + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * */ #include #include #include #include +#include +#include + +#include "sirc.h" static unsigned int int_enable; static unsigned int wake_enable; @@ -37,9 +39,13 @@ static struct sirc_cascade_regs sirc_reg_table[] = { { .int_status = SPSS_SIRC_IRQ_STATUS, .cascade_irq = INT_SIRC_0, + .cascade_fiq = INT_SIRC_1, } }; +static unsigned int save_type; +static unsigned int save_polarity; + /* Mask off the given interrupt. Keep the int_enable mask in sync with the enable reg, so it can be restored after power collapse. */ static void sirc_irq_mask(struct irq_data *d) @@ -116,6 +122,24 @@ static int sirc_irq_set_type(struct irq_data *d, unsigned int flow_type) return 0; } +#if defined(CONFIG_MSM_FIQ_SUPPORT) +void sirc_fiq_select(int irq, bool enable) +{ + uint32_t mask = 1 << (irq - FIRST_SIRC_IRQ); + uint32_t val; + unsigned long flags; + + local_irq_save(flags); + val = readl(SPSS_SIRC_INT_SELECT); + if (enable) + val |= mask; + else + val &= ~mask; + writel(val, SPSS_SIRC_INT_SELECT); + local_irq_restore(flags); +} +#endif + /* Finds the pending interrupt on the passed cascade irq and redrives it */ static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc) { @@ -141,6 +165,22 @@ static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc) desc->irq_data.chip->irq_ack(&desc->irq_data); } +void msm_sirc_enter_sleep(void) +{ + save_type = readl(sirc_regs.int_type); + save_polarity = readl(sirc_regs.int_polarity); + writel(wake_enable, sirc_regs.int_enable); + return; +} + +void msm_sirc_exit_sleep(void) +{ + writel(save_type, sirc_regs.int_type); + writel(save_polarity, sirc_regs.int_polarity); + writel(int_enable, sirc_regs.int_enable); + return; +} + static struct irq_chip sirc_irq_chip = { .name = "sirc", .irq_ack = sirc_irq_ack, @@ -157,7 +197,7 @@ void __init msm_init_sirc(void) int_enable = 0; wake_enable = 0; - for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) { + for (i = FIRST_SIRC_IRQ; i < FIRST_SIRC_IRQ + NR_SIRC_IRQS; i++) { set_irq_chip(i, &sirc_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); @@ -167,6 +207,10 @@ void __init msm_init_sirc(void) set_irq_chained_handler(sirc_reg_table[i].cascade_irq, sirc_irq_handler); set_irq_wake(sirc_reg_table[i].cascade_irq, 1); +#if defined(CONFIG_MSM_FIQ_SUPPORT) + msm_fiq_select(sirc_reg_table[i].cascade_fiq); + msm_fiq_enable(sirc_reg_table[i].cascade_fiq); +#endif } return; } diff --git a/arch/arm/mach-msm/sirc.h b/arch/arm/mach-msm/sirc.h new file mode 100644 index 0000000000000..8e1399f028b2d --- /dev/null +++ b/arch/arm/mach-msm/sirc.h @@ -0,0 +1,27 @@ +/* arch/arm/mach-msm/pm.h + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_SIRC_H +#define _ARCH_ARM_MACH_MSM_SIRC_H + +#ifdef CONFIG_ARCH_QSD8X50 +void sirc_fiq_select(int irq, bool enable); +void __init msm_init_sirc(void); +#else +static inline void sirc_fiq_select(int irq, bool enable) {} +static inline void __init msm_init_sirc(void) {} +#endif + +#endif diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c index 657be73297db9..18c1d5b61655f 100644 --- a/arch/arm/mach-msm/smd.c +++ b/arch/arm/mach-msm/smd.c @@ -95,11 +95,14 @@ static void smd_diag(void) } } +void msm_pm_flush_console(void); + /* call when SMSM_RESET flag is set in the A9's smsm_state */ static void handle_modem_crash(void) { pr_err("ARM9 has CRASHED\n"); smd_diag(); + msm_pm_flush_console(); /* hard reboot if possible */ if (msm_hw_reset_hook) diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c index 8736afff82f3e..69a9cd9b6e3ba 100644 --- a/arch/arm/mach-msm/smd_debug.c +++ b/arch/arm/mach-msm/smd_debug.c @@ -154,20 +154,31 @@ static int debug_read_version(char *buf, int max) return sprintf(buf, "%d.%d\n", version >> 16, version & 0xffff); } +struct smem_msm_id { + uint32_t format; + uint32_t msm_id; + uint32_t msm_ver; + char build_id[32]; +}; + static int debug_read_build_id(char *buf, int max) { unsigned size; void *data; + struct smem_msm_id *msm_id; - data = smem_item(SMEM_HW_SW_BUILD_ID, &size); - if (!data) + msm_id = smem_item(SMEM_HW_SW_BUILD_ID, &size); + if (!msm_id || (size < sizeof(struct smem_msm_id))) return 0; if (size >= max) size = max; - memcpy(buf, data, size); - return size; + return scnprintf(buf, size, "fmt=%d id=%d vers=%d.%d build_id='%s'\n", + msm_id->format,msm_id->msm_id, + (msm_id->msm_ver >> 16) & 0xffff, + msm_id->msm_ver & 0xffff, + msm_id->build_id); } static int debug_read_alloc_tbl(char *buf, int max) diff --git a/arch/arm/mach-msm/smd_qmi.c b/arch/arm/mach-msm/smd_qmi.c new file mode 100644 index 0000000000000..50411df6179c9 --- /dev/null +++ b/arch/arm/mach-msm/smd_qmi.c @@ -0,0 +1,860 @@ +/* arch/arm/mach-msm/smd_qmi.c + * + * QMI Control Driver -- Manages network data connections. + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define QMI_CTL 0x00 +#define QMI_WDS 0x01 +#define QMI_DMS 0x02 +#define QMI_NAS 0x03 + +#define QMI_RESULT_SUCCESS 0x0000 +#define QMI_RESULT_FAILURE 0x0001 + +struct qmi_msg { + unsigned char service; + unsigned char client_id; + unsigned short txn_id; + unsigned short type; + unsigned short size; + unsigned char *tlv; +}; + +#define qmi_ctl_client_id 0 + +#define STATE_OFFLINE 0 +#define STATE_QUERYING 1 +#define STATE_ONLINE 2 + +struct qmi_ctxt { + struct miscdevice misc; + + struct mutex lock; + + unsigned char ctl_txn_id; + unsigned char wds_client_id; + unsigned short wds_txn_id; + + unsigned wds_busy; + unsigned wds_handle; + unsigned state_dirty; + unsigned state; + + unsigned char addr[4]; + unsigned char mask[4]; + unsigned char gateway[4]; + unsigned char dns1[4]; + unsigned char dns2[4]; + + smd_channel_t *ch; + const char *ch_name; + struct wake_lock wake_lock; + + struct work_struct open_work; + struct work_struct read_work; +}; + +static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n); + +static void qmi_read_work(struct work_struct *ws); +static void qmi_open_work(struct work_struct *work); + +void qmi_ctxt_init(struct qmi_ctxt *ctxt, unsigned n) +{ + mutex_init(&ctxt->lock); + INIT_WORK(&ctxt->read_work, qmi_read_work); + INIT_WORK(&ctxt->open_work, qmi_open_work); + wake_lock_init(&ctxt->wake_lock, WAKE_LOCK_SUSPEND, ctxt->misc.name); + ctxt->ctl_txn_id = 1; + ctxt->wds_txn_id = 1; + ctxt->wds_busy = 1; + ctxt->state = STATE_OFFLINE; + +} + +static struct workqueue_struct *qmi_wq; + +static int verbose = 0; + +/* anyone waiting for a state change waits here */ +static DECLARE_WAIT_QUEUE_HEAD(qmi_wait_queue); + + +static void qmi_dump_msg(struct qmi_msg *msg, const char *prefix) +{ + unsigned sz, n; + unsigned char *x; + + if (!verbose) + return; + + printk(KERN_INFO + "qmi: %s: svc=%02x cid=%02x tid=%04x type=%04x size=%04x\n", + prefix, msg->service, msg->client_id, + msg->txn_id, msg->type, msg->size); + + x = msg->tlv; + sz = msg->size; + + while (sz >= 3) { + sz -= 3; + + n = x[1] | (x[2] << 8); + if (n > sz) + break; + + printk(KERN_INFO "qmi: %s: tlv: %02x %04x { ", + prefix, x[0], n); + x += 3; + sz -= n; + while (n-- > 0) + printk("%02x ", *x++); + printk("}\n"); + } +} + +int qmi_add_tlv(struct qmi_msg *msg, + unsigned type, unsigned size, const void *data) +{ + unsigned char *x = msg->tlv + msg->size; + + x[0] = type; + x[1] = size; + x[2] = size >> 8; + + memcpy(x + 3, data, size); + + msg->size += (size + 3); + + return 0; +} + +/* Extract a tagged item from a qmi message buffer, +** taking care not to overrun the buffer. +*/ +static int qmi_get_tlv(struct qmi_msg *msg, + unsigned type, unsigned size, void *data) +{ + unsigned char *x = msg->tlv; + unsigned len = msg->size; + unsigned n; + + while (len >= 3) { + len -= 3; + + /* size of this item */ + n = x[1] | (x[2] << 8); + if (n > len) + break; + + if (x[0] == type) { + if (n != size) + return -1; + memcpy(data, x + 3, size); + return 0; + } + + x += (n + 3); + len -= n; + } + + return -1; +} + +static unsigned qmi_get_status(struct qmi_msg *msg, unsigned *error) +{ + unsigned short status[2]; + if (qmi_get_tlv(msg, 0x02, sizeof(status), status)) { + *error = 0; + return QMI_RESULT_FAILURE; + } else { + *error = status[1]; + return status[0]; + } +} + +/* 0x01 */ +#define QMUX_HEADER 13 + +/* should be >= HEADER + FOOTER */ +#define QMUX_OVERHEAD 16 + +static int qmi_send(struct qmi_ctxt *ctxt, struct qmi_msg *msg) +{ + unsigned char *data; + unsigned hlen; + unsigned len; + int r; + + qmi_dump_msg(msg, "send"); + + if (msg->service == QMI_CTL) { + hlen = QMUX_HEADER - 1; + } else { + hlen = QMUX_HEADER; + } + + /* QMUX length is total header + total payload - IFC selector */ + len = hlen + msg->size - 1; + if (len > 0xffff) + return -1; + + data = msg->tlv - hlen; + + /* prepend encap and qmux header */ + *data++ = 0x01; /* ifc selector */ + + /* qmux header */ + *data++ = len; + *data++ = len >> 8; + *data++ = 0x00; /* flags: client */ + *data++ = msg->service; + *data++ = msg->client_id; + + /* qmi header */ + *data++ = 0x00; /* flags: send */ + *data++ = msg->txn_id; + if (msg->service != QMI_CTL) + *data++ = msg->txn_id >> 8; + + *data++ = msg->type; + *data++ = msg->type >> 8; + *data++ = msg->size; + *data++ = msg->size >> 8; + + /* len + 1 takes the interface selector into account */ + r = smd_write(ctxt->ch, msg->tlv - hlen, len + 1); + + if (r != len) { + return -1; + } else { + return 0; + } +} + +static void qmi_process_ctl_msg(struct qmi_ctxt *ctxt, struct qmi_msg *msg) +{ + unsigned err; + if (msg->type == 0x0022) { + unsigned char n[2]; + if (qmi_get_status(msg, &err)) + return; + if (qmi_get_tlv(msg, 0x01, sizeof(n), n)) + return; + if (n[0] == QMI_WDS) { + printk(KERN_INFO + "qmi: ctl: wds use client_id 0x%02x\n", n[1]); + ctxt->wds_client_id = n[1]; + ctxt->wds_busy = 0; + } + } +} + +static int qmi_network_get_profile(struct qmi_ctxt *ctxt); + +static void swapaddr(unsigned char *src, unsigned char *dst) +{ + dst[0] = src[3]; + dst[1] = src[2]; + dst[2] = src[1]; + dst[3] = src[0]; +} + +static unsigned char zero[4]; +static void qmi_read_runtime_profile(struct qmi_ctxt *ctxt, struct qmi_msg *msg) +{ + unsigned char tmp[4]; + unsigned r; + + r = qmi_get_tlv(msg, 0x1e, 4, tmp); + swapaddr(r ? zero : tmp, ctxt->addr); + r = qmi_get_tlv(msg, 0x21, 4, tmp); + swapaddr(r ? zero : tmp, ctxt->mask); + r = qmi_get_tlv(msg, 0x20, 4, tmp); + swapaddr(r ? zero : tmp, ctxt->gateway); + r = qmi_get_tlv(msg, 0x15, 4, tmp); + swapaddr(r ? zero : tmp, ctxt->dns1); + r = qmi_get_tlv(msg, 0x16, 4, tmp); + swapaddr(r ? zero : tmp, ctxt->dns2); +} + +static void qmi_process_unicast_wds_msg(struct qmi_ctxt *ctxt, + struct qmi_msg *msg) +{ + unsigned err; + switch (msg->type) { + case 0x0021: + if (qmi_get_status(msg, &err)) { + printk(KERN_ERR + "qmi: wds: network stop failed (%04x)\n", err); + } else { + printk(KERN_INFO + "qmi: wds: network stopped\n"); + ctxt->state = STATE_OFFLINE; + ctxt->state_dirty = 1; + } + break; + case 0x0020: + if (qmi_get_status(msg, &err)) { + printk(KERN_ERR + "qmi: wds: network start failed (%04x)\n", err); + } else if (qmi_get_tlv(msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle)) { + printk(KERN_INFO + "qmi: wds no handle?\n"); + } else { + printk(KERN_INFO + "qmi: wds: got handle 0x%08x\n", + ctxt->wds_handle); + } + break; + case 0x002D: + printk("qmi: got network profile\n"); + if (ctxt->state == STATE_QUERYING) { + qmi_read_runtime_profile(ctxt, msg); + ctxt->state = STATE_ONLINE; + ctxt->state_dirty = 1; + } + break; + default: + printk(KERN_ERR "qmi: unknown msg type 0x%04x\n", msg->type); + } + ctxt->wds_busy = 0; +} + +static void qmi_process_broadcast_wds_msg(struct qmi_ctxt *ctxt, + struct qmi_msg *msg) +{ + if (msg->type == 0x0022) { + unsigned char n[2]; + if (qmi_get_tlv(msg, 0x01, sizeof(n), n)) + return; + switch (n[0]) { + case 1: + printk(KERN_INFO "qmi: wds: DISCONNECTED\n"); + ctxt->state = STATE_OFFLINE; + ctxt->state_dirty = 1; + break; + case 2: + printk(KERN_INFO "qmi: wds: CONNECTED\n"); + ctxt->state = STATE_QUERYING; + ctxt->state_dirty = 1; + qmi_network_get_profile(ctxt); + break; + case 3: + printk(KERN_INFO "qmi: wds: SUSPENDED\n"); + ctxt->state = STATE_OFFLINE; + ctxt->state_dirty = 1; + } + } else { + printk(KERN_ERR "qmi: unknown bcast msg type 0x%04x\n", msg->type); + } +} + +static void qmi_process_wds_msg(struct qmi_ctxt *ctxt, + struct qmi_msg *msg) +{ + printk("wds: %04x @ %02x\n", msg->type, msg->client_id); + if (msg->client_id == ctxt->wds_client_id) { + qmi_process_unicast_wds_msg(ctxt, msg); + } else if (msg->client_id == 0xff) { + qmi_process_broadcast_wds_msg(ctxt, msg); + } else { + printk(KERN_ERR + "qmi_process_wds_msg client id 0x%02x unknown\n", + msg->client_id); + } +} + +static void qmi_process_qmux(struct qmi_ctxt *ctxt, + unsigned char *buf, unsigned sz) +{ + struct qmi_msg msg; + + /* require a full header */ + if (sz < 5) + return; + + /* require a size that matches the buffer size */ + if (sz != (buf[0] | (buf[1] << 8))) + return; + + /* only messages from a service (bit7=1) are allowed */ + if (buf[2] != 0x80) + return; + + msg.service = buf[3]; + msg.client_id = buf[4]; + + /* annoyingly, CTL messages have a shorter TID */ + if (buf[3] == 0) { + if (sz < 7) + return; + msg.txn_id = buf[6]; + buf += 7; + sz -= 7; + } else { + if (sz < 8) + return; + msg.txn_id = buf[6] | (buf[7] << 8); + buf += 8; + sz -= 8; + } + + /* no type and size!? */ + if (sz < 4) + return; + sz -= 4; + + msg.type = buf[0] | (buf[1] << 8); + msg.size = buf[2] | (buf[3] << 8); + msg.tlv = buf + 4; + + if (sz != msg.size) + return; + + qmi_dump_msg(&msg, "recv"); + + mutex_lock(&ctxt->lock); + switch (msg.service) { + case QMI_CTL: + qmi_process_ctl_msg(ctxt, &msg); + break; + case QMI_WDS: + qmi_process_wds_msg(ctxt, &msg); + break; + default: + printk(KERN_ERR "qmi: msg from unknown svc 0x%02x\n", + msg.service); + break; + } + mutex_unlock(&ctxt->lock); + + wake_up(&qmi_wait_queue); +} + +#define QMI_MAX_PACKET (256 + QMUX_OVERHEAD) + +static void qmi_read_work(struct work_struct *ws) +{ + struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work); + struct smd_channel *ch = ctxt->ch; + unsigned char buf[QMI_MAX_PACKET]; + int sz; + + for (;;) { + sz = smd_cur_packet_size(ch); + if (sz == 0) + break; + if (sz < smd_read_avail(ch)) + break; + if (sz > QMI_MAX_PACKET) { + smd_read(ch, 0, sz); + continue; + } + if (smd_read(ch, buf, sz) != sz) { + printk(KERN_ERR "qmi: not enough data?!\n"); + continue; + } + + /* interface selector must be 1 */ + if (buf[0] != 0x01) + continue; + + qmi_process_qmux(ctxt, buf + 1, sz - 1); + } +} + +static int qmi_request_wds_cid(struct qmi_ctxt *ctxt); + +static void qmi_open_work(struct work_struct *ws) +{ + struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, open_work); + mutex_lock(&ctxt->lock); + qmi_request_wds_cid(ctxt); + mutex_unlock(&ctxt->lock); +} + +static void qmi_notify(void *priv, unsigned event) +{ + struct qmi_ctxt *ctxt = priv; + + switch (event) { + case SMD_EVENT_DATA: { + int sz; + sz = smd_cur_packet_size(ctxt->ch); + if ((sz > 0) && (sz <= smd_read_avail(ctxt->ch))) { + wake_lock_timeout(&ctxt->wake_lock, HZ / 2); + queue_work(qmi_wq, &ctxt->read_work); + } + break; + } + case SMD_EVENT_OPEN: + printk(KERN_INFO "qmi: smd opened\n"); + queue_work(qmi_wq, &ctxt->open_work); + break; + case SMD_EVENT_CLOSE: + printk(KERN_INFO "qmi: smd closed\n"); + break; + } +} + +static int qmi_request_wds_cid(struct qmi_ctxt *ctxt) +{ + unsigned char data[64 + QMUX_OVERHEAD]; + struct qmi_msg msg; + unsigned char n; + + msg.service = QMI_CTL; + msg.client_id = qmi_ctl_client_id; + msg.txn_id = ctxt->ctl_txn_id; + msg.type = 0x0022; + msg.size = 0; + msg.tlv = data + QMUX_HEADER; + + ctxt->ctl_txn_id += 2; + + n = QMI_WDS; + qmi_add_tlv(&msg, 0x01, 0x01, &n); + + return qmi_send(ctxt, &msg); +} + +static int qmi_network_get_profile(struct qmi_ctxt *ctxt) +{ + unsigned char data[96 + QMUX_OVERHEAD]; + struct qmi_msg msg; + + msg.service = QMI_WDS; + msg.client_id = ctxt->wds_client_id; + msg.txn_id = ctxt->wds_txn_id; + msg.type = 0x002D; + msg.size = 0; + msg.tlv = data + QMUX_HEADER; + + ctxt->wds_txn_id += 2; + + return qmi_send(ctxt, &msg); +} + +static int qmi_network_up(struct qmi_ctxt *ctxt, char *apn) +{ + unsigned char data[96 + QMUX_OVERHEAD]; + struct qmi_msg msg; + char *auth_type; + char *user; + char *pass; + + for (user = apn; *user; user++) { + if (*user == ' ') { + *user++ = 0; + break; + } + } + for (pass = user; *pass; pass++) { + if (*pass == ' ') { + *pass++ = 0; + break; + } + } + + for (auth_type = pass; *auth_type; auth_type++) { + if (*auth_type == ' ') { + *auth_type++ = 0; + break; + } + } + + msg.service = QMI_WDS; + msg.client_id = ctxt->wds_client_id; + msg.txn_id = ctxt->wds_txn_id; + msg.type = 0x0020; + msg.size = 0; + msg.tlv = data + QMUX_HEADER; + + ctxt->wds_txn_id += 2; + + qmi_add_tlv(&msg, 0x14, strlen(apn), apn); + if (*auth_type) + qmi_add_tlv(&msg, 0x16, strlen(auth_type), auth_type); + if (*user) { + if (!*auth_type) { + unsigned char x; + x = 3; + qmi_add_tlv(&msg, 0x16, 1, &x); + } + qmi_add_tlv(&msg, 0x17, strlen(user), user); + if (*pass) + qmi_add_tlv(&msg, 0x18, strlen(pass), pass); + } + return qmi_send(ctxt, &msg); +} + +static int qmi_network_down(struct qmi_ctxt *ctxt) +{ + unsigned char data[16 + QMUX_OVERHEAD]; + struct qmi_msg msg; + + msg.service = QMI_WDS; + msg.client_id = ctxt->wds_client_id; + msg.txn_id = ctxt->wds_txn_id; + msg.type = 0x0021; + msg.size = 0; + msg.tlv = data + QMUX_HEADER; + + ctxt->wds_txn_id += 2; + + qmi_add_tlv(&msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle); + + return qmi_send(ctxt, &msg); +} + +static int qmi_print_state(struct qmi_ctxt *ctxt, char *buf, int max) +{ + int i; + char *statename; + + if (ctxt->state == STATE_ONLINE) { + statename = "up"; + } else if (ctxt->state == STATE_OFFLINE) { + statename = "down"; + } else { + statename = "busy"; + } + + i = scnprintf(buf, max, "STATE=%s\n", statename); + i += scnprintf(buf + i, max - i, "CID=%d\n",ctxt->wds_client_id); + + if (ctxt->state != STATE_ONLINE){ + return i; + } + + i += scnprintf(buf + i, max - i, "ADDR=%d.%d.%d.%d\n", + ctxt->addr[0], ctxt->addr[1], ctxt->addr[2], ctxt->addr[3]); + i += scnprintf(buf + i, max - i, "MASK=%d.%d.%d.%d\n", + ctxt->mask[0], ctxt->mask[1], ctxt->mask[2], ctxt->mask[3]); + i += scnprintf(buf + i, max - i, "GATEWAY=%d.%d.%d.%d\n", + ctxt->gateway[0], ctxt->gateway[1], ctxt->gateway[2], + ctxt->gateway[3]); + i += scnprintf(buf + i, max - i, "DNS1=%d.%d.%d.%d\n", + ctxt->dns1[0], ctxt->dns1[1], ctxt->dns1[2], ctxt->dns1[3]); + i += scnprintf(buf + i, max - i, "DNS2=%d.%d.%d.%d\n", + ctxt->dns2[0], ctxt->dns2[1], ctxt->dns2[2], ctxt->dns2[3]); + + return i; +} + +static ssize_t qmi_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct qmi_ctxt *ctxt = fp->private_data; + char msg[256]; + int len; + int r; + + mutex_lock(&ctxt->lock); + for (;;) { + if (ctxt->state_dirty) { + ctxt->state_dirty = 0; + len = qmi_print_state(ctxt, msg, 256); + break; + } + mutex_unlock(&ctxt->lock); + r = wait_event_interruptible(qmi_wait_queue, ctxt->state_dirty); + if (r < 0) + return r; + mutex_lock(&ctxt->lock); + } + mutex_unlock(&ctxt->lock); + + if (len > count) + len = count; + + if (copy_to_user(buf, msg, len)) + return -EFAULT; + + return len; +} + + +static ssize_t qmi_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct qmi_ctxt *ctxt = fp->private_data; + unsigned char cmd[64]; + int len; + int r; + + if (count < 1) + return 0; + + len = count > 63 ? 63 : count; + + if (copy_from_user(cmd, buf, len)) + return -EFAULT; + + cmd[len] = 0; + + /* lazy */ + if (cmd[len-1] == '\n') { + cmd[len-1] = 0; + len--; + } + + if (!strncmp(cmd, "verbose", 7)) { + verbose = 1; + } else if (!strncmp(cmd, "terse", 5)) { + verbose = 0; + } else if (!strncmp(cmd, "poll", 4)) { + ctxt->state_dirty = 1; + wake_up(&qmi_wait_queue); + } else if (!strncmp(cmd, "down", 4)) { +retry_down: + mutex_lock(&ctxt->lock); + if (ctxt->wds_busy) { + mutex_unlock(&ctxt->lock); + r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy); + if (r < 0) + return r; + goto retry_down; + } + ctxt->wds_busy = 1; + qmi_network_down(ctxt); + mutex_unlock(&ctxt->lock); + } else if (!strncmp(cmd, "up:", 3)) { +retry_up: + mutex_lock(&ctxt->lock); + if (ctxt->wds_busy) { + mutex_unlock(&ctxt->lock); + r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy); + if (r < 0) + return r; + goto retry_up; + } + ctxt->wds_busy = 1; + qmi_network_up(ctxt, cmd+3); + mutex_unlock(&ctxt->lock); + } else { + return -EINVAL; + } + + return count; +} + +static int qmi_open(struct inode *ip, struct file *fp) +{ + struct qmi_ctxt *ctxt = qmi_minor_to_ctxt(MINOR(ip->i_rdev)); + int r = 0; + + if (!ctxt) { + printk(KERN_ERR "unknown qmi misc %d\n", MINOR(ip->i_rdev)); + return -ENODEV; + } + + fp->private_data = ctxt; + + mutex_lock(&ctxt->lock); + if (ctxt->ch == 0) + r = smd_open(ctxt->ch_name, &ctxt->ch, ctxt, qmi_notify); + if (r == 0) + wake_up(&qmi_wait_queue); + mutex_unlock(&ctxt->lock); + + return r; +} + +static int qmi_release(struct inode *ip, struct file *fp) +{ + return 0; +} + +static struct file_operations qmi_fops = { + .owner = THIS_MODULE, + .read = qmi_read, + .write = qmi_write, + .open = qmi_open, + .release = qmi_release, +}; + +static struct qmi_ctxt qmi_device0 = { + .ch_name = "SMD_DATA5_CNTL", + .misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qmi0", + .fops = &qmi_fops, + } +}; +static struct qmi_ctxt qmi_device1 = { + .ch_name = "SMD_DATA6_CNTL", + .misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qmi1", + .fops = &qmi_fops, + } +}; +static struct qmi_ctxt qmi_device2 = { + .ch_name = "SMD_DATA7_CNTL", + .misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qmi2", + .fops = &qmi_fops, + } +}; + +static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n) +{ + if (n == qmi_device0.misc.minor) + return &qmi_device0; + if (n == qmi_device1.misc.minor) + return &qmi_device1; + if (n == qmi_device2.misc.minor) + return &qmi_device2; + return 0; +} + +static int __init qmi_init(void) +{ + int ret; + + qmi_wq = create_singlethread_workqueue("qmi"); + if (qmi_wq == 0) + return -ENOMEM; + + qmi_ctxt_init(&qmi_device0, 0); + qmi_ctxt_init(&qmi_device1, 1); + qmi_ctxt_init(&qmi_device2, 2); + + ret = misc_register(&qmi_device0.misc); + if (ret == 0) + ret = misc_register(&qmi_device1.misc); + if (ret == 0) + ret = misc_register(&qmi_device2.misc); + return ret; +} + +module_init(qmi_init); diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c new file mode 100644 index 0000000000000..73b8b38502217 --- /dev/null +++ b/arch/arm/mach-msm/smd_rpcrouter.c @@ -0,0 +1,1346 @@ +/* arch/arm/mach-msm/smd_rpcrouter.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2009 QUALCOMM Incorporated. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* TODO: handle cases where smd_write() will tempfail due to full fifo */ +/* TODO: thread priority? schedule a work to bump it? */ +/* TODO: maybe make server_list_lock a mutex */ +/* TODO: pool fragments to avoid kmalloc/kfree churn */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "smd_rpcrouter.h" + +#define TRACE_R2R_MSG 0 +#define TRACE_R2R_RAW 0 +#define TRACE_RPC_MSG 0 +#define TRACE_NOTIFY_MSG 0 + +#define MSM_RPCROUTER_DEBUG 0 +#define MSM_RPCROUTER_DEBUG_PKT 0 +#define MSM_RPCROUTER_R2R_DEBUG 0 +#define DUMP_ALL_RECEIVED_HEADERS 0 + +#define DIAG(x...) printk("[RR] ERROR " x) + +#if MSM_RPCROUTER_DEBUG +#define D(x...) printk(x) +#else +#define D(x...) do {} while (0) +#endif + +#if TRACE_R2R_MSG +#define RR(x...) printk("[RR] "x) +#else +#define RR(x...) do {} while (0) +#endif + +#if TRACE_RPC_MSG +#define IO(x...) printk("[RPC] "x) +#else +#define IO(x...) do {} while (0) +#endif + +#if TRACE_NOTIFY_MSG +#define NTFY(x...) printk(KERN_ERR "[NOTIFY] "x) +#else +#define NTFY(x...) do {} while (0) +#endif + +static LIST_HEAD(local_endpoints); +static LIST_HEAD(remote_endpoints); + +static LIST_HEAD(server_list); + +static smd_channel_t *smd_channel; +static int initialized; +static wait_queue_head_t newserver_wait; +static wait_queue_head_t smd_wait; +static int smd_wait_count; /* odd while waiting */ + +static DEFINE_SPINLOCK(local_endpoints_lock); +static DEFINE_SPINLOCK(remote_endpoints_lock); +static DEFINE_SPINLOCK(server_list_lock); +static DEFINE_SPINLOCK(smd_lock); + +static struct workqueue_struct *rpcrouter_workqueue; +static struct wake_lock rpcrouter_wake_lock; +static int rpcrouter_need_len; + +static atomic_t next_xid = ATOMIC_INIT(1); +static atomic_t next_mid = ATOMIC_INIT(0); + +static void do_read_data(struct work_struct *work); +static void do_create_pdevs(struct work_struct *work); +static void do_create_rpcrouter_pdev(struct work_struct *work); + +static DECLARE_WORK(work_read_data, do_read_data); +static DECLARE_WORK(work_create_pdevs, do_create_pdevs); +static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev); +static atomic_t rpcrouter_pdev_created = ATOMIC_INIT(0); + +#define RR_STATE_IDLE 0 +#define RR_STATE_HEADER 1 +#define RR_STATE_BODY 2 +#define RR_STATE_ERROR 3 + +struct rr_context { + struct rr_packet *pkt; + uint8_t *ptr; + uint32_t state; /* current assembly state */ + uint32_t count; /* bytes needed in this state */ +}; + +struct rr_context the_rr_context; + +static struct platform_device rpcrouter_pdev = { + .name = "oncrpc_router", + .id = -1, +}; + + +static int rpcrouter_send_control_msg(union rr_control_msg *msg) +{ + struct rr_header hdr; + unsigned long flags; + int need; + + if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) { + printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, " + "router not initialized\n"); + return -EINVAL; + } + + hdr.version = RPCROUTER_VERSION; + hdr.type = msg->cmd; + hdr.src_pid = RPCROUTER_PID_LOCAL; + hdr.src_cid = RPCROUTER_ROUTER_ADDRESS; + hdr.confirm_rx = 0; + hdr.size = sizeof(*msg); + hdr.dst_pid = 0; + hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS; + + /* TODO: what if channel is full? */ + + need = sizeof(hdr) + hdr.size; + spin_lock_irqsave(&smd_lock, flags); + while (smd_write_avail(smd_channel) < need) { + spin_unlock_irqrestore(&smd_lock, flags); + msleep(250); + spin_lock_irqsave(&smd_lock, flags); + } + smd_write(smd_channel, &hdr, sizeof(hdr)); + smd_write(smd_channel, msg, hdr.size); + spin_unlock_irqrestore(&smd_lock, flags); + return 0; +} + +static struct rr_server *rpcrouter_create_server(uint32_t pid, + uint32_t cid, + uint32_t prog, + uint32_t ver) +{ + struct rr_server *server; + unsigned long flags; + int rc; + + server = kmalloc(sizeof(struct rr_server), GFP_KERNEL); + if (!server) + return ERR_PTR(-ENOMEM); + + memset(server, 0, sizeof(struct rr_server)); + server->pid = pid; + server->cid = cid; + server->prog = prog; + server->vers = ver; + + spin_lock_irqsave(&server_list_lock, flags); + list_add_tail(&server->list, &server_list); + spin_unlock_irqrestore(&server_list_lock, flags); + + if (pid == RPCROUTER_PID_REMOTE) { + rc = msm_rpcrouter_create_server_cdev(server); + if (rc < 0) + goto out_fail; + } + return server; +out_fail: + spin_lock_irqsave(&server_list_lock, flags); + list_del(&server->list); + spin_unlock_irqrestore(&server_list_lock, flags); + kfree(server); + return ERR_PTR(rc); +} + +static void rpcrouter_destroy_server(struct rr_server *server) +{ + unsigned long flags; + + spin_lock_irqsave(&server_list_lock, flags); + list_del(&server->list); + spin_unlock_irqrestore(&server_list_lock, flags); + device_destroy(msm_rpcrouter_class, server->device_number); + kfree(server); +} + +static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver) +{ + struct rr_server *server; + unsigned long flags; + + spin_lock_irqsave(&server_list_lock, flags); + list_for_each_entry(server, &server_list, list) { + if (server->prog == prog + && server->vers == ver) { + spin_unlock_irqrestore(&server_list_lock, flags); + return server; + } + } + spin_unlock_irqrestore(&server_list_lock, flags); + return NULL; +} + +static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev) +{ + struct rr_server *server; + unsigned long flags; + + spin_lock_irqsave(&server_list_lock, flags); + list_for_each_entry(server, &server_list, list) { + if (server->device_number == dev) { + spin_unlock_irqrestore(&server_list_lock, flags); + return server; + } + } + spin_unlock_irqrestore(&server_list_lock, flags); + return NULL; +} + +struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev) +{ + struct msm_rpc_endpoint *ept; + unsigned long flags; + int i; + + ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL); + if (!ept) + return NULL; + memset(ept, 0, sizeof(struct msm_rpc_endpoint)); + + /* mark no reply outstanding */ + ept->next_rroute = 0; + for (i = 0; i < MAX_REPLY_ROUTE; i++) + ept->rroute[i].pid = 0xffffffff; + + ept->cid = (uint32_t) ept; + ept->pid = RPCROUTER_PID_LOCAL; + ept->dev = dev; + + if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) { + struct rr_server *srv; + /* + * This is a userspace client which opened + * a program/ver devicenode. Bind the client + * to that destination + */ + srv = rpcrouter_lookup_server_by_dev(dev); + /* TODO: bug? really? */ + BUG_ON(!srv); + + ept->dst_pid = srv->pid; + ept->dst_cid = srv->cid; + ept->dst_prog = cpu_to_be32(srv->prog); + ept->dst_vers = cpu_to_be32(srv->vers); + ept->flags |= MSM_RPC_ENABLE_RECEIVE; + + D("Creating local ept %p @ %08x:%08x\n", ept, srv->prog, srv->vers); + } else { + /* mark not connected */ + ept->dst_pid = 0xffffffff; + D("Creating a master local ept %p\n", ept); + } + + init_waitqueue_head(&ept->wait_q); + INIT_LIST_HEAD(&ept->read_q); + spin_lock_init(&ept->read_q_lock); + wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read"); + INIT_LIST_HEAD(&ept->incomplete); + + spin_lock_irqsave(&local_endpoints_lock, flags); + list_add_tail(&ept->list, &local_endpoints); + spin_unlock_irqrestore(&local_endpoints_lock, flags); + return ept; +} + +int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept) +{ + int rc; + union rr_control_msg msg; + + msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT; + msg.cli.pid = ept->pid; + msg.cli.cid = ept->cid; + + RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid); + rc = rpcrouter_send_control_msg(&msg); + if (rc < 0) + return rc; + + wake_lock_destroy(&ept->read_q_wake_lock); + list_del(&ept->list); + kfree(ept); + return 0; +} + +static int rpcrouter_create_remote_endpoint(uint32_t cid) +{ + struct rr_remote_endpoint *new_c; + unsigned long flags; + + new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL); + if (!new_c) + return -ENOMEM; + memset(new_c, 0, sizeof(struct rr_remote_endpoint)); + + new_c->cid = cid; + new_c->pid = RPCROUTER_PID_REMOTE; + init_waitqueue_head(&new_c->quota_wait); + spin_lock_init(&new_c->quota_lock); + + spin_lock_irqsave(&remote_endpoints_lock, flags); + list_add_tail(&new_c->list, &remote_endpoints); + spin_unlock_irqrestore(&remote_endpoints_lock, flags); + return 0; +} + +static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid) +{ + struct msm_rpc_endpoint *ept; + unsigned long flags; + + spin_lock_irqsave(&local_endpoints_lock, flags); + list_for_each_entry(ept, &local_endpoints, list) { + if (ept->cid == cid) { + spin_unlock_irqrestore(&local_endpoints_lock, flags); + return ept; + } + } + spin_unlock_irqrestore(&local_endpoints_lock, flags); + return NULL; +} + +static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid) +{ + struct rr_remote_endpoint *ept; + unsigned long flags; + + spin_lock_irqsave(&remote_endpoints_lock, flags); + list_for_each_entry(ept, &remote_endpoints, list) { + if (ept->cid == cid) { + spin_unlock_irqrestore(&remote_endpoints_lock, flags); + return ept; + } + } + spin_unlock_irqrestore(&remote_endpoints_lock, flags); + return NULL; +} + +static int process_control_msg(union rr_control_msg *msg, int len) +{ + union rr_control_msg ctl; + struct rr_server *server; + struct rr_remote_endpoint *r_ept; + int rc = 0; + unsigned long flags; + + if (len != sizeof(*msg)) { + printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n", + len, sizeof(*msg)); + return -EINVAL; + } + + switch (msg->cmd) { + case RPCROUTER_CTRL_CMD_HELLO: + RR("o HELLO\n"); + + RR("x HELLO\n"); + memset(&ctl, 0, sizeof(ctl)); + ctl.cmd = RPCROUTER_CTRL_CMD_HELLO; + rpcrouter_send_control_msg(&ctl); + + initialized = 1; + + /* Send list of servers one at a time */ + ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER; + + /* TODO: long time to hold a spinlock... */ + spin_lock_irqsave(&server_list_lock, flags); + list_for_each_entry(server, &server_list, list) { + ctl.srv.pid = server->pid; + ctl.srv.cid = server->cid; + ctl.srv.prog = server->prog; + ctl.srv.vers = server->vers; + + RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n", + server->pid, server->cid, + server->prog, server->vers); + + rpcrouter_send_control_msg(&ctl); + } + spin_unlock_irqrestore(&server_list_lock, flags); + + queue_work(rpcrouter_workqueue, &work_create_rpcrouter_pdev); + break; + + case RPCROUTER_CTRL_CMD_RESUME_TX: + RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid); + + r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid); + if (!r_ept) { + printk(KERN_ERR + "rpcrouter: Unable to resume client\n"); + break; + } + spin_lock_irqsave(&r_ept->quota_lock, flags); + r_ept->tx_quota_cntr = 0; + spin_unlock_irqrestore(&r_ept->quota_lock, flags); + wake_up(&r_ept->quota_wait); + break; + + case RPCROUTER_CTRL_CMD_NEW_SERVER: + RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n", + msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers); + + server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers); + + if (!server) { + server = rpcrouter_create_server( + msg->srv.pid, msg->srv.cid, + msg->srv.prog, msg->srv.vers); + if (!server) + return -ENOMEM; + /* + * XXX: Verify that its okay to add the + * client to our remote client list + * if we get a NEW_SERVER notification + */ + if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) { + rc = rpcrouter_create_remote_endpoint( + msg->srv.cid); + if (rc < 0) + printk(KERN_ERR + "rpcrouter:Client create" + "error (%d)\n", rc); + } + schedule_work(&work_create_pdevs); + wake_up(&newserver_wait); + } else { + if ((server->pid == msg->srv.pid) && + (server->cid == msg->srv.cid)) { + printk(KERN_ERR "rpcrouter: Duplicate svr\n"); + } else { + server->pid = msg->srv.pid; + server->cid = msg->srv.cid; + } + } + break; + + case RPCROUTER_CTRL_CMD_REMOVE_SERVER: + RR("o REMOVE_SERVER prog=%08x:%d\n", + msg->srv.prog, msg->srv.vers); + server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers); + if (server) + rpcrouter_destroy_server(server); + break; + + case RPCROUTER_CTRL_CMD_REMOVE_CLIENT: + RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid); + if (msg->cli.pid != RPCROUTER_PID_REMOTE) { + printk(KERN_ERR + "rpcrouter: Denying remote removal of " + "local client\n"); + break; + } + r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid); + if (r_ept) { + spin_lock_irqsave(&remote_endpoints_lock, flags); + list_del(&r_ept->list); + spin_unlock_irqrestore(&remote_endpoints_lock, flags); + kfree(r_ept); + } + + /* Notify local clients of this event */ + printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n"); + rc = -ENOSYS; + + break; + default: + RR("o UNKNOWN(%08x)\n", msg->cmd); + rc = -ENOSYS; + } + + return rc; +} + +static void do_create_rpcrouter_pdev(struct work_struct *work) +{ + if (atomic_cmpxchg(&rpcrouter_pdev_created, 0, 1) == 0) + platform_device_register(&rpcrouter_pdev); +} + +static void do_create_pdevs(struct work_struct *work) +{ + unsigned long flags; + struct rr_server *server; + + /* TODO: race if destroyed while being registered */ + spin_lock_irqsave(&server_list_lock, flags); + list_for_each_entry(server, &server_list, list) { + if (server->pid == RPCROUTER_PID_REMOTE) { + if (server->pdev_name[0] == 0) { + spin_unlock_irqrestore(&server_list_lock, + flags); + msm_rpcrouter_create_server_pdev(server); + schedule_work(&work_create_pdevs); + return; + } + } + } + spin_unlock_irqrestore(&server_list_lock, flags); +} + +static void rpcrouter_smdnotify(void *_dev, unsigned event) +{ + if (event != SMD_EVENT_DATA) + return; + + if (smd_read_avail(smd_channel) >= rpcrouter_need_len) + wake_lock(&rpcrouter_wake_lock); + wake_up(&smd_wait); +} + +static void *rr_malloc(unsigned sz) +{ + void *ptr = kmalloc(sz, GFP_KERNEL); + if (ptr) + return ptr; + + printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz); + do { + ptr = kmalloc(sz, GFP_KERNEL); + } while (!ptr); + + return ptr; +} + +/* TODO: deal with channel teardown / restore */ +static int rr_read(void *data, int len) +{ + int rc; + unsigned long flags; +// printk("rr_read() %d\n", len); + for(;;) { + spin_lock_irqsave(&smd_lock, flags); + if (smd_read_avail(smd_channel) >= len) { + rc = smd_read(smd_channel, data, len); + spin_unlock_irqrestore(&smd_lock, flags); + if (rc == len) + return 0; + else + return -EIO; + } + rpcrouter_need_len = len; + wake_unlock(&rpcrouter_wake_lock); + spin_unlock_irqrestore(&smd_lock, flags); + +// printk("rr_read: waiting (%d)\n", len); + smd_wait_count++; + wake_up(&smd_wait); + wait_event(smd_wait, smd_read_avail(smd_channel) >= len); + smd_wait_count++; + } + return 0; +} + +static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX]; + +static void do_read_data(struct work_struct *work) +{ + struct rr_header hdr; + struct rr_packet *pkt; + struct rr_fragment *frag; + struct msm_rpc_endpoint *ept; + uint32_t pm, mid; + unsigned long flags; + + if (rr_read(&hdr, sizeof(hdr))) + goto fail_io; + +#if TRACE_R2R_RAW + RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n", + hdr.version, hdr.type, hdr.src_pid, hdr.src_cid, + hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid); +#endif + + if (hdr.version != RPCROUTER_VERSION) { + DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION); + goto fail_data; + } + if (hdr.size > RPCROUTER_MSGSIZE_MAX) { + DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX); + goto fail_data; + } + + if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) { + if (rr_read(r2r_buf, hdr.size)) + goto fail_io; + process_control_msg((void*) r2r_buf, hdr.size); + goto done; + } + + if (hdr.size < sizeof(pm)) { + DIAG("runt packet (no pacmark)\n"); + goto fail_data; + } + if (rr_read(&pm, sizeof(pm))) + goto fail_io; + + hdr.size -= sizeof(pm); + + frag = rr_malloc(sizeof(*frag)); + frag->next = NULL; + frag->length = hdr.size; + if (rr_read(frag->data, hdr.size)) { + kfree(frag); + goto fail_io; + } + + ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid); + if (!ept) { + DIAG("no local ept for cid %08x\n", hdr.dst_cid); + kfree(frag); + goto done; + } + + /* See if there is already a partial packet that matches our mid + * and if so, append this fragment to that packet. + */ + mid = PACMARK_MID(pm); + list_for_each_entry(pkt, &ept->incomplete, list) { + if (pkt->mid == mid) { + pkt->last->next = frag; + pkt->last = frag; + pkt->length += frag->length; + if (PACMARK_LAST(pm)) { + list_del(&pkt->list); + goto packet_complete; + } + goto done; + } + } + /* This mid is new -- create a packet for it, and put it on + * the incomplete list if this fragment is not a last fragment, + * otherwise put it on the read queue. + */ + pkt = rr_malloc(sizeof(struct rr_packet)); + pkt->first = frag; + pkt->last = frag; + memcpy(&pkt->hdr, &hdr, sizeof(hdr)); + pkt->mid = mid; + pkt->length = frag->length; + if (!PACMARK_LAST(pm)) { + list_add_tail(&pkt->list, &ept->incomplete); + goto done; + } + +packet_complete: + spin_lock_irqsave(&ept->read_q_lock, flags); + if (ept->flags & MSM_RPC_ENABLE_RECEIVE) { + wake_lock(&ept->read_q_wake_lock); + list_add_tail(&pkt->list, &ept->read_q); + wake_up(&ept->wait_q); + } else { + pr_warning("smd_rpcrouter: Unexpected incoming data on %08x:%08x\n", + be32_to_cpu(ept->dst_prog), + be32_to_cpu(ept->dst_vers)); + } + spin_unlock_irqrestore(&ept->read_q_lock, flags); +done: + + if (hdr.confirm_rx) { + union rr_control_msg msg; + + msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX; + msg.cli.pid = hdr.dst_pid; + msg.cli.cid = hdr.dst_cid; + + RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid); + rpcrouter_send_control_msg(&msg); + } + + queue_work(rpcrouter_workqueue, &work_read_data); + return; + +fail_io: +fail_data: + printk(KERN_ERR "rpc_router has died\n"); + wake_unlock(&rpcrouter_wake_lock); +} + +void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog, + uint32_t vers, uint32_t proc) +{ + memset(hdr, 0, sizeof(struct rpc_request_hdr)); + hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid)); + hdr->rpc_vers = cpu_to_be32(2); + hdr->prog = cpu_to_be32(prog); + hdr->vers = cpu_to_be32(vers); + hdr->procedure = cpu_to_be32(proc); +} + +struct msm_rpc_endpoint *msm_rpc_open(void) +{ + struct msm_rpc_endpoint *ept; + + ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0)); + if (ept == NULL) + return ERR_PTR(-ENOMEM); + + return ept; +} + +int msm_rpc_close(struct msm_rpc_endpoint *ept) +{ + return msm_rpcrouter_destroy_local_endpoint(ept); +} +EXPORT_SYMBOL(msm_rpc_close); + +static int msm_rpc_write_pkt(struct msm_rpc_endpoint *ept, + struct rr_remote_endpoint *r_ept, + struct rr_header *hdr, + uint32_t pacmark, + void *buffer, int count) +{ + DEFINE_WAIT(__wait); + unsigned long flags; + int needed; + + for (;;) { + prepare_to_wait(&r_ept->quota_wait, &__wait, + TASK_INTERRUPTIBLE); + spin_lock_irqsave(&r_ept->quota_lock, flags); + if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA) + break; + if (signal_pending(current) && + (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) + break; + spin_unlock_irqrestore(&r_ept->quota_lock, flags); + schedule(); + } + finish_wait(&r_ept->quota_wait, &__wait); + + if (signal_pending(current) && + (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) { + spin_unlock_irqrestore(&r_ept->quota_lock, flags); + return -ERESTARTSYS; + } + r_ept->tx_quota_cntr++; + if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) + hdr->confirm_rx = 1; + + spin_unlock_irqrestore(&r_ept->quota_lock, flags); + + spin_lock_irqsave(&smd_lock, flags); + + needed = sizeof(*hdr) + hdr->size; + while (smd_write_avail(smd_channel) < needed) { + spin_unlock_irqrestore(&smd_lock, flags); + msleep(250); + spin_lock_irqsave(&smd_lock, flags); + } + + /* TODO: deal with full fifo */ + smd_write(smd_channel, hdr, sizeof(*hdr)); + smd_write(smd_channel, &pacmark, sizeof(pacmark)); + smd_write(smd_channel, buffer, count); + + spin_unlock_irqrestore(&smd_lock, flags); + + return 0; +} + +int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count) +{ + struct rr_header hdr; + uint32_t pacmark; + uint32_t mid; + struct rpc_request_hdr *rq = buffer; + struct rr_remote_endpoint *r_ept; + int ret; + int total; + + /* snoop the RPC packet and enforce permissions */ + + /* has to have at least the xid and type fields */ + if (count < (sizeof(uint32_t) * 2)) { + printk(KERN_ERR "rr_write: rejecting runt packet\n"); + return -EINVAL; + } + + if (rq->type == 0) { + /* RPC CALL */ + if (count < (sizeof(uint32_t) * 6)) { + printk(KERN_ERR + "rr_write: rejecting runt call packet\n"); + return -EINVAL; + } + if (ept->dst_pid == 0xffffffff) { + printk(KERN_ERR "rr_write: not connected\n"); + return -ENOTCONN; + } + +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + if ((ept->dst_prog != rq->prog) || + !msm_rpc_is_compatible_version( + be32_to_cpu(ept->dst_vers), + be32_to_cpu(rq->vers))) { +#else + if (ept->dst_prog != rq->prog || ept->dst_vers != rq->vers) { +#endif + printk(KERN_ERR + "rr_write: cannot write to %08x:%d " + "(bound to %08x:%d)\n", + be32_to_cpu(rq->prog), be32_to_cpu(rq->vers), + be32_to_cpu(ept->dst_prog), + be32_to_cpu(ept->dst_vers)); + return -EINVAL; + } + hdr.dst_pid = ept->dst_pid; + hdr.dst_cid = ept->dst_cid; + IO("CALL on ept %p to %08x:%08x @ %d:%08x (%d bytes) (xid %x proc %x)\n", + ept, + be32_to_cpu(rq->prog), be32_to_cpu(rq->vers), + ept->dst_pid, ept->dst_cid, count, + be32_to_cpu(rq->xid), be32_to_cpu(rq->procedure)); + } else { + /* RPC REPLY */ + /* TODO: locking */ + for (ret = 0; ret < MAX_REPLY_ROUTE; ret++) + if (ept->rroute[ret].xid == rq->xid) { + if (ept->rroute[ret].pid == 0xffffffff) + continue; + hdr.dst_pid = ept->rroute[ret].pid; + hdr.dst_cid = ept->rroute[ret].cid; + /* consume this reply */ + ept->rroute[ret].pid = 0xffffffff; + goto found_rroute; + } + + printk(KERN_ERR "rr_write: rejecting packet w/ bad xid\n"); + return -EINVAL; + +found_rroute: + IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n", + ept, + be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count); + } + + r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid); + + if (!r_ept) { + printk(KERN_ERR + "msm_rpc_write(): No route to ept " + "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid); + return -EHOSTUNREACH; + } + + /* Create routing header */ + hdr.type = RPCROUTER_CTRL_CMD_DATA; + hdr.version = RPCROUTER_VERSION; + hdr.src_pid = ept->pid; + hdr.src_cid = ept->cid; + + total = count; + + mid = atomic_add_return(1, &next_mid) & 0xFF; + + while (count > 0) { + unsigned xfer; + + if (count > RPCROUTER_DATASIZE_MAX) + xfer = RPCROUTER_DATASIZE_MAX; + else + xfer = count; + + hdr.confirm_rx = 0; + hdr.size = xfer + sizeof(uint32_t); + + /* total == count -> must be first packet + * xfer == count -> must be last packet + */ + pacmark = PACMARK(xfer, mid, (total == count), (xfer == count)); + + ret = msm_rpc_write_pkt(ept, r_ept, &hdr, pacmark, buffer, xfer); + if (ret < 0) + return ret; + + buffer += xfer; + count -= xfer; + } + + return total; +} +EXPORT_SYMBOL(msm_rpc_write); + +/* + * NOTE: It is the responsibility of the caller to kfree buffer + */ +int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer, + unsigned user_len, long timeout) +{ + struct rr_fragment *frag, *next; + char *buf; + int rc; + + rc = __msm_rpc_read(ept, &frag, user_len, timeout); + if (rc <= 0) + return rc; + + /* single-fragment messages conveniently can be + * returned as-is (the buffer is at the front) + */ + if (frag->next == 0) { + *buffer = (void*) frag; + return rc; + } + + /* multi-fragment messages, we have to do it the + * hard way, which is rather disgusting right now + */ + buf = rr_malloc(rc); + *buffer = buf; + + while (frag != NULL) { + memcpy(buf, frag->data, frag->length); + next = frag->next; + buf += frag->length; + kfree(frag); + frag = next; + } + + return rc; +} + +int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc, + void *_request, int request_size, + long timeout) +{ + return msm_rpc_call_reply(ept, proc, + _request, request_size, + NULL, 0, timeout); +} +EXPORT_SYMBOL(msm_rpc_call); + +int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc, + void *_request, int request_size, + void *_reply, int reply_size, + long timeout) +{ + struct rpc_request_hdr *req = _request; + struct rpc_reply_hdr *reply; + int rc; + + if (request_size < sizeof(*req)) + return -ETOOSMALL; + + if (ept->dst_pid == 0xffffffff) + return -ENOTCONN; + + /* We can't use msm_rpc_setup_req() here, because dst_prog and + * dst_vers here are already in BE. + */ + memset(req, 0, sizeof(*req)); + req->xid = cpu_to_be32(atomic_add_return(1, &next_xid)); + req->rpc_vers = cpu_to_be32(2); + req->prog = ept->dst_prog; + req->vers = ept->dst_vers; + req->procedure = cpu_to_be32(proc); + + /* Allow replys to be added to the queue */ + ept->flags |= MSM_RPC_ENABLE_RECEIVE; + + rc = msm_rpc_write(ept, req, request_size); + if (rc < 0) + goto error; + + for (;;) { + rc = msm_rpc_read(ept, (void*) &reply, -1, timeout); + if (rc < 0) + goto error; + if (rc < (3 * sizeof(uint32_t))) { + rc = -EIO; + break; + } + /* we should not get CALL packets -- ignore them */ + if (reply->type == 0) { + kfree(reply); + continue; + } + /* If an earlier call timed out, we could get the (no + * longer wanted) reply for it. Ignore replies that + * we don't expect. + */ + if (reply->xid != req->xid) { + kfree(reply); + continue; + } + if (reply->reply_stat != 0) { + rc = -EPERM; + break; + } + if (reply->data.acc_hdr.accept_stat != 0) { + rc = -EINVAL; + break; + } + if (_reply == NULL) { + rc = 0; + break; + } + if (rc > reply_size) { + rc = -ENOMEM; + } else { + memcpy(_reply, reply, rc); + } + break; + } + kfree(reply); +error: + ept->flags &= ~MSM_RPC_ENABLE_RECEIVE; + wake_unlock(&ept->read_q_wake_lock); + + return rc; +} +EXPORT_SYMBOL(msm_rpc_call_reply); + + +static inline int ept_packet_available(struct msm_rpc_endpoint *ept) +{ + unsigned long flags; + int ret; + spin_lock_irqsave(&ept->read_q_lock, flags); + ret = !list_empty(&ept->read_q); + spin_unlock_irqrestore(&ept->read_q_lock, flags); + return ret; +} + +int __msm_rpc_read(struct msm_rpc_endpoint *ept, + struct rr_fragment **frag_ret, + unsigned len, long timeout) +{ + struct rr_packet *pkt; + struct rpc_request_hdr *rq; + DEFINE_WAIT(__wait); + unsigned long flags; + int rc; + + IO("READ on ept %p\n", ept); + + if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) { + if (timeout < 0) { + wait_event(ept->wait_q, ept_packet_available(ept)); + } else { + rc = wait_event_timeout( + ept->wait_q, ept_packet_available(ept), + timeout); + if (rc == 0) + return -ETIMEDOUT; + } + } else { + if (timeout < 0) { + rc = wait_event_interruptible( + ept->wait_q, ept_packet_available(ept)); + if (rc < 0) + return rc; + } else { + rc = wait_event_interruptible_timeout( + ept->wait_q, ept_packet_available(ept), + timeout); + if (rc == 0) + return -ETIMEDOUT; + } + } + + spin_lock_irqsave(&ept->read_q_lock, flags); + if (list_empty(&ept->read_q)) { + spin_unlock_irqrestore(&ept->read_q_lock, flags); + return -EAGAIN; + } + pkt = list_first_entry(&ept->read_q, struct rr_packet, list); + if (pkt->length > len) { + spin_unlock_irqrestore(&ept->read_q_lock, flags); + return -ETOOSMALL; + } + list_del(&pkt->list); + if (list_empty(&ept->read_q)) + wake_unlock(&ept->read_q_wake_lock); + spin_unlock_irqrestore(&ept->read_q_lock, flags); + + rc = pkt->length; + + *frag_ret = pkt->first; + rq = (void*) pkt->first->data; + if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) { + IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n", + ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers), + be32_to_cpu(rq->procedure), + be32_to_cpu(rq->xid)); + /* RPC CALL */ + if (ept->rroute[ept->next_rroute].pid != 0xffffffff) { + printk(KERN_WARNING + "rr_read: lost previous reply xid...\n"); + } + /* TODO: locking? */ + ept->rroute[ept->next_rroute].pid = pkt->hdr.src_pid; + ept->rroute[ept->next_rroute].cid = pkt->hdr.src_cid; + ept->rroute[ept->next_rroute].xid = rq->xid; + ept->next_rroute = (ept->next_rroute + 1) & (MAX_REPLY_ROUTE - 1); + } +#if TRACE_RPC_MSG + else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1)) + IO("READ on ept %p is a REPLY\n", ept); + else IO("READ on ept %p (%d bytes)\n", ept, rc); +#endif + + kfree(pkt); + return rc; +} + +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) +int msm_rpc_is_compatible_version(uint32_t server_version, + uint32_t client_version) +{ + if ((server_version & RPC_VERSION_MODE_MASK) != + (client_version & RPC_VERSION_MODE_MASK)) + return 0; + + if (server_version & RPC_VERSION_MODE_MASK) + return server_version == client_version; + + return ((server_version & RPC_VERSION_MAJOR_MASK) == + (client_version & RPC_VERSION_MAJOR_MASK)) && + ((server_version & RPC_VERSION_MINOR_MASK) >= + (client_version & RPC_VERSION_MINOR_MASK)); +} +EXPORT_SYMBOL(msm_rpc_is_compatible_version); + +static int msm_rpc_get_compatible_server(uint32_t prog, + uint32_t ver, + uint32_t *found_vers) +{ + struct rr_server *server; + unsigned long flags; + if (found_vers == NULL) + return 0; + + spin_lock_irqsave(&server_list_lock, flags); + list_for_each_entry(server, &server_list, list) { + if ((server->prog == prog) && + msm_rpc_is_compatible_version(server->vers, ver)) { + *found_vers = server->vers; + spin_unlock_irqrestore(&server_list_lock, flags); + return 0; + } + } + spin_unlock_irqrestore(&server_list_lock, flags); + return -1; +} +#endif + +struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags) +{ + struct msm_rpc_endpoint *ept; + struct rr_server *server; + +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + if (!(vers & RPC_VERSION_MODE_MASK)) { + uint32_t found_vers; + if (msm_rpc_get_compatible_server(prog, vers, &found_vers) < 0) + return ERR_PTR(-EHOSTUNREACH); + if (found_vers != vers) { + D("RPC using new version %08x:{%08x --> %08x}\n", + prog, vers, found_vers); + vers = found_vers; + } + } +#endif + + server = rpcrouter_lookup_server(prog, vers); + if (!server) + return ERR_PTR(-EHOSTUNREACH); + + ept = msm_rpc_open(); + if (IS_ERR(ept)) + return ept; + + ept->flags = flags; + ept->dst_pid = server->pid; + ept->dst_cid = server->cid; + ept->dst_prog = cpu_to_be32(prog); + ept->dst_vers = cpu_to_be32(vers); + + return ept; +} +EXPORT_SYMBOL(msm_rpc_connect); + +uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept) +{ + return be32_to_cpu(ept->dst_vers); +} +EXPORT_SYMBOL(msm_rpc_get_vers); + +/* TODO: permission check? */ +int msm_rpc_register_server(struct msm_rpc_endpoint *ept, + uint32_t prog, uint32_t vers) +{ + int rc; + union rr_control_msg msg; + struct rr_server *server; + + server = rpcrouter_create_server(ept->pid, ept->cid, + prog, vers); + if (!server) + return -ENODEV; + + msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER; + msg.srv.pid = ept->pid; + msg.srv.cid = ept->cid; + msg.srv.prog = prog; + msg.srv.vers = vers; + + RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n", + ept->pid, ept->cid, prog, vers); + + rc = rpcrouter_send_control_msg(&msg); + if (rc < 0) + return rc; + + ept->flags |= MSM_RPC_ENABLE_RECEIVE; + return 0; +} + +/* TODO: permission check -- disallow unreg of somebody else's server */ +int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept, + uint32_t prog, uint32_t vers) +{ + struct rr_server *server; + server = rpcrouter_lookup_server(prog, vers); + + if (!server) + return -ENOENT; + + ept->flags &= ~MSM_RPC_ENABLE_RECEIVE; + wake_unlock(&ept->read_q_wake_lock); + rpcrouter_destroy_server(server); + return 0; +} + +static int msm_rpcrouter_probe(struct platform_device *pdev) +{ + int rc; + + /* Initialize what we need to start processing */ + INIT_LIST_HEAD(&local_endpoints); + INIT_LIST_HEAD(&remote_endpoints); + + init_waitqueue_head(&newserver_wait); + init_waitqueue_head(&smd_wait); + wake_lock_init(&rpcrouter_wake_lock, WAKE_LOCK_SUSPEND, "SMD_RPCCALL"); + + rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter"); + if (!rpcrouter_workqueue) + return -ENOMEM; + + rc = msm_rpcrouter_init_devices(); + if (rc < 0) + goto fail_destroy_workqueue; + + /* Open up SMD channel 2 */ + initialized = 0; + rc = smd_open("SMD_RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify); + if (rc < 0) + goto fail_remove_devices; + + queue_work(rpcrouter_workqueue, &work_read_data); + return 0; + + fail_remove_devices: + msm_rpcrouter_exit_devices(); + fail_destroy_workqueue: + destroy_workqueue(rpcrouter_workqueue); + return rc; +} + +static int msm_rpcrouter_suspend(struct platform_device *pdev, + pm_message_t state) +{ + /* Wait until the worker thread has waited at least once so that it + * gets a chance to release its wakelock. + */ + int wait_count = smd_wait_count; + if (!(smd_wait_count & 1)) + wait_event(smd_wait, smd_wait_count != wait_count); + return 0; +} + +static struct platform_driver msm_smd_channel2_driver = { + .probe = msm_rpcrouter_probe, + .driver = { + .name = "SMD_RPCCALL", + .owner = THIS_MODULE, + }, + .suspend = msm_rpcrouter_suspend, +}; + +static int __init rpcrouter_init(void) +{ + return platform_driver_register(&msm_smd_channel2_driver); +} + +module_init(rpcrouter_init); +MODULE_DESCRIPTION("MSM RPC Router"); +MODULE_AUTHOR("San Mehat "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/smd_rpcrouter.h b/arch/arm/mach-msm/smd_rpcrouter.h new file mode 100644 index 0000000000000..2bf541acac259 --- /dev/null +++ b/arch/arm/mach-msm/smd_rpcrouter.h @@ -0,0 +1,202 @@ +/** arch/arm/mach-msm/smd_rpcrouter.h + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2008 QUALCOMM Incorporated. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H +#define _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H + +#include +#include +#include +#include +#include + +#include +#include + +/* definitions for the R2R wire protcol */ + +#define RPCROUTER_VERSION 1 +#define RPCROUTER_PROCESSORS_MAX 4 +#define RPCROUTER_MSGSIZE_MAX 512 +#define RPCROUTER_DATASIZE_MAX 500 + +#define RPCROUTER_CLIENT_BCAST_ID 0xffffffff +#define RPCROUTER_ROUTER_ADDRESS 0xfffffffe + +#define RPCROUTER_PID_LOCAL 1 +#define RPCROUTER_PID_REMOTE 0 + +#define RPCROUTER_CTRL_CMD_DATA 1 +#define RPCROUTER_CTRL_CMD_HELLO 2 +#define RPCROUTER_CTRL_CMD_BYE 3 +#define RPCROUTER_CTRL_CMD_NEW_SERVER 4 +#define RPCROUTER_CTRL_CMD_REMOVE_SERVER 5 +#define RPCROUTER_CTRL_CMD_REMOVE_CLIENT 6 +#define RPCROUTER_CTRL_CMD_RESUME_TX 7 +#define RPCROUTER_CTRL_CMD_EXIT 8 + +#define RPCROUTER_DEFAULT_RX_QUOTA 5 + +union rr_control_msg { + uint32_t cmd; + struct { + uint32_t cmd; + uint32_t prog; + uint32_t vers; + uint32_t pid; + uint32_t cid; + } srv; + struct { + uint32_t cmd; + uint32_t pid; + uint32_t cid; + } cli; +}; + +struct rr_header { + uint32_t version; + uint32_t type; + uint32_t src_pid; + uint32_t src_cid; + uint32_t confirm_rx; + uint32_t size; + uint32_t dst_pid; + uint32_t dst_cid; +}; + +/* internals */ + +#define RPCROUTER_MAX_REMOTE_SERVERS 100 + +struct rr_fragment { + unsigned char data[RPCROUTER_MSGSIZE_MAX]; + uint32_t length; + struct rr_fragment *next; +}; + +struct rr_packet { + struct list_head list; + struct rr_fragment *first; + struct rr_fragment *last; + struct rr_header hdr; + uint32_t mid; + uint32_t length; +}; + +#define PACMARK_LAST(n) ((n) & 0x80000000) +#define PACMARK_MID(n) (((n) >> 16) & 0xFF) +#define PACMARK_LEN(n) ((n) & 0xFFFF) + +static inline uint32_t PACMARK(uint32_t len, uint32_t mid, uint32_t first, + uint32_t last) +{ + return (len & 0xFFFF) | + ((mid & 0xFF) << 16) | + ((!!first) << 30) | + ((!!last) << 31); +} + +struct rr_server { + struct list_head list; + + uint32_t pid; + uint32_t cid; + uint32_t prog; + uint32_t vers; + + dev_t device_number; + struct cdev cdev; + struct device *device; + struct rpcsvr_platform_device p_device; + char pdev_name[32]; +}; + +struct rr_remote_endpoint { + uint32_t pid; + uint32_t cid; + + int tx_quota_cntr; + spinlock_t quota_lock; + wait_queue_head_t quota_wait; + + struct list_head list; +}; + +struct msm_reply_route { + uint32_t xid; + uint32_t pid; + uint32_t cid; + uint32_t unused; +}; + +#define MAX_REPLY_ROUTE 4 + +struct msm_rpc_endpoint { + struct list_head list; + + /* incomplete packets waiting for assembly */ + struct list_head incomplete; + + /* complete packets waiting to be read */ + struct list_head read_q; + spinlock_t read_q_lock; + struct wake_lock read_q_wake_lock; + wait_queue_head_t wait_q; + unsigned flags; + + /* endpoint address */ + uint32_t pid; + uint32_t cid; + + /* bound remote address + * if not connected (dst_pid == 0xffffffff) RPC_CALL writes fail + * RPC_CALLs must be to the prog/vers below or they will fail + */ + uint32_t dst_pid; + uint32_t dst_cid; + uint32_t dst_prog; /* be32 */ + uint32_t dst_vers; /* be32 */ + + /* RPC_REPLY writes must be routed to the pid/cid of the + * RPC_CALL they are in reply to. Keep a cache of valid + * xid/pid/cid groups. pid 0xffffffff -> not valid. + */ + unsigned next_rroute; + struct msm_reply_route rroute[MAX_REPLY_ROUTE]; + + /* device node if this endpoint is accessed via userspace */ + dev_t dev; +}; + +/* shared between smd_rpcrouter*.c */ + +int __msm_rpc_read(struct msm_rpc_endpoint *ept, + struct rr_fragment **frag, + unsigned len, long timeout); + +struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev); +int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept); + +int msm_rpcrouter_create_server_cdev(struct rr_server *server); +int msm_rpcrouter_create_server_pdev(struct rr_server *server); + +int msm_rpcrouter_init_devices(void); +void msm_rpcrouter_exit_devices(void); + +extern dev_t msm_rpcrouter_devno; +extern struct class *msm_rpcrouter_class; +#endif diff --git a/arch/arm/mach-msm/smd_rpcrouter_device.c b/arch/arm/mach-msm/smd_rpcrouter_device.c new file mode 100644 index 0000000000000..1ae83b5aded6e --- /dev/null +++ b/arch/arm/mach-msm/smd_rpcrouter_device.c @@ -0,0 +1,377 @@ +/* arch/arm/mach-msm/smd_rpcrouter_device.c + * + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2007-2009 QUALCOMM Incorporated. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "smd_rpcrouter.h" + +#define SAFETY_MEM_SIZE 65536 + +/* Next minor # available for a remote server */ +static int next_minor = 1; + +struct class *msm_rpcrouter_class; +dev_t msm_rpcrouter_devno; + +static struct cdev rpcrouter_cdev; +static struct device *rpcrouter_device; + +static int rpcrouter_open(struct inode *inode, struct file *filp) +{ + int rc; + struct msm_rpc_endpoint *ept; + + rc = nonseekable_open(inode, filp); + if (rc < 0) + return rc; + + ept = msm_rpcrouter_create_local_endpoint(inode->i_rdev); + if (!ept) + return -ENOMEM; + + filp->private_data = ept; + return 0; +} + +static int rpcrouter_release(struct inode *inode, struct file *filp) +{ + struct msm_rpc_endpoint *ept; + ept = (struct msm_rpc_endpoint *) filp->private_data; + + return msm_rpcrouter_destroy_local_endpoint(ept); +} + +static ssize_t rpcrouter_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct msm_rpc_endpoint *ept; + struct rr_fragment *frag, *next; + int rc; + + ept = (struct msm_rpc_endpoint *) filp->private_data; + + rc = __msm_rpc_read(ept, &frag, count, -1); + if (rc < 0) + return rc; + + count = rc; + + while (frag != NULL) { + if (copy_to_user(buf, frag->data, frag->length)) { + printk(KERN_ERR + "rpcrouter: could not copy all read data to user!\n"); + rc = -EFAULT; + } + buf += frag->length; + next = frag->next; + kfree(frag); + frag = next; + } + + return rc; +} + +static ssize_t rpcrouter_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct msm_rpc_endpoint *ept; + int rc = 0; + void *k_buffer; + + ept = (struct msm_rpc_endpoint *) filp->private_data; + + /* A check for safety, this seems non-standard */ + if (count > SAFETY_MEM_SIZE) + return -EINVAL; + + k_buffer = kmalloc(count, GFP_KERNEL); + if (!k_buffer) + return -ENOMEM; + + if (copy_from_user(k_buffer, buf, count)) { + rc = -EFAULT; + goto write_out_free; + } + + rc = msm_rpc_write(ept, k_buffer, count); + if (rc < 0) + goto write_out_free; + + rc = count; +write_out_free: + kfree(k_buffer); + return rc; +} + +static unsigned int rpcrouter_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct msm_rpc_endpoint *ept; + unsigned mask = 0; + ept = (struct msm_rpc_endpoint *) filp->private_data; + + /* If there's data already in the read queue, return POLLIN. + * Else, wait for the requested amount of time, and check again. + */ + + if (!list_empty(&ept->read_q)) + mask |= POLLIN; + + if (!mask) { + poll_wait(filp, &ept->wait_q, wait); + if (!list_empty(&ept->read_q)) + mask |= POLLIN; + } + + return mask; +} + +static long rpcrouter_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct msm_rpc_endpoint *ept; + struct rpcrouter_ioctl_server_args server_args; + int rc = 0; + uint32_t n; + + ept = (struct msm_rpc_endpoint *) filp->private_data; + switch (cmd) { + + case RPC_ROUTER_IOCTL_GET_VERSION: + n = RPC_ROUTER_VERSION_V1; + rc = put_user(n, (unsigned int *) arg); + break; + + case RPC_ROUTER_IOCTL_GET_MTU: + /* the pacmark word reduces the actual payload + * possible per message + */ + n = RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t); + rc = put_user(n, (unsigned int *) arg); + break; + + case RPC_ROUTER_IOCTL_REGISTER_SERVER: + rc = copy_from_user(&server_args, (void *) arg, + sizeof(server_args)); + if (rc < 0) + break; + msm_rpc_register_server(ept, + server_args.prog, + server_args.vers); + break; + + case RPC_ROUTER_IOCTL_UNREGISTER_SERVER: + rc = copy_from_user(&server_args, (void *) arg, + sizeof(server_args)); + if (rc < 0) + break; + + msm_rpc_unregister_server(ept, + server_args.prog, + server_args.vers); + break; + + case RPC_ROUTER_IOCTL_GET_MINOR_VERSION: + n = MSM_RPC_GET_MINOR(msm_rpc_get_vers(ept)); + rc = put_user(n, (unsigned int *)arg); + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static struct file_operations rpcrouter_server_fops = { + .owner = THIS_MODULE, + .open = rpcrouter_open, + .release = rpcrouter_release, + .read = rpcrouter_read, + .write = rpcrouter_write, + .poll = rpcrouter_poll, + .unlocked_ioctl = rpcrouter_ioctl, +}; + +static struct file_operations rpcrouter_router_fops = { + .owner = THIS_MODULE, + .open = rpcrouter_open, + .release = rpcrouter_release, + .read = rpcrouter_read, + .write = rpcrouter_write, + .poll = rpcrouter_poll, + .unlocked_ioctl = rpcrouter_ioctl, +}; + +int msm_rpcrouter_create_server_cdev(struct rr_server *server) +{ + int rc; + uint32_t dev_vers; + + if (next_minor == RPCROUTER_MAX_REMOTE_SERVERS) { + printk(KERN_ERR + "rpcrouter: Minor numbers exhausted - Increase " + "RPCROUTER_MAX_REMOTE_SERVERS\n"); + return -ENOBUFS; + } + +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + /* Servers with bit 31 set are remote msm servers with hashkey version. + * Servers with bit 31 not set are remote msm servers with + * backwards compatible version type in which case the minor number + * (lower 16 bits) is set to zero. + * + */ + if ((server->vers & RPC_VERSION_MODE_MASK)) + dev_vers = server->vers; + else + dev_vers = server->vers & RPC_VERSION_MAJOR_MASK; +#else + dev_vers = server->vers; +#endif + + server->device_number = + MKDEV(MAJOR(msm_rpcrouter_devno), next_minor++); + + server->device = + device_create(msm_rpcrouter_class, rpcrouter_device, + server->device_number, NULL, "%.8x:%.8x", + server->prog, dev_vers); + if (IS_ERR(server->device)) { + printk(KERN_ERR + "rpcrouter: Unable to create device (%ld)\n", + PTR_ERR(server->device)); + return PTR_ERR(server->device);; + } + + cdev_init(&server->cdev, &rpcrouter_server_fops); + server->cdev.owner = THIS_MODULE; + + rc = cdev_add(&server->cdev, server->device_number, 1); + if (rc < 0) { + printk(KERN_ERR + "rpcrouter: Unable to add chrdev (%d)\n", rc); + device_destroy(msm_rpcrouter_class, server->device_number); + return rc; + } + return 0; +} + +/* for backward compatible version type (31st bit cleared) + * clearing minor number (lower 16 bits) in device name + * is neccessary for driver binding + */ +int msm_rpcrouter_create_server_pdev(struct rr_server *server) +{ + sprintf(server->pdev_name, "rs%.8x:%.8x", + server->prog, +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + (server->vers & RPC_VERSION_MODE_MASK) ? server->vers : + (server->vers & RPC_VERSION_MAJOR_MASK)); +#else + server->vers); +#endif + + server->p_device.base.id = -1; + server->p_device.base.name = server->pdev_name; + + server->p_device.prog = server->prog; + server->p_device.vers = server->vers; + + platform_device_register(&server->p_device.base); + return 0; +} + +int msm_rpcrouter_init_devices(void) +{ + int rc; + int major; + + /* Create the device nodes */ + msm_rpcrouter_class = class_create(THIS_MODULE, "oncrpc"); + if (IS_ERR(msm_rpcrouter_class)) { + rc = -ENOMEM; + printk(KERN_ERR + "rpcrouter: failed to create oncrpc class\n"); + goto fail; + } + + rc = alloc_chrdev_region(&msm_rpcrouter_devno, 0, + RPCROUTER_MAX_REMOTE_SERVERS + 1, + "oncrpc"); + if (rc < 0) { + printk(KERN_ERR + "rpcrouter: Failed to alloc chardev region (%d)\n", rc); + goto fail_destroy_class; + } + + major = MAJOR(msm_rpcrouter_devno); + rpcrouter_device = device_create(msm_rpcrouter_class, NULL, + msm_rpcrouter_devno, NULL, "%.8x:%d", + 0, 0); + if (IS_ERR(rpcrouter_device)) { + rc = -ENOMEM; + goto fail_unregister_cdev_region; + } + + cdev_init(&rpcrouter_cdev, &rpcrouter_router_fops); + rpcrouter_cdev.owner = THIS_MODULE; + + rc = cdev_add(&rpcrouter_cdev, msm_rpcrouter_devno, 1); + if (rc < 0) + goto fail_destroy_device; + + return 0; + +fail_destroy_device: + device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno); +fail_unregister_cdev_region: + unregister_chrdev_region(msm_rpcrouter_devno, + RPCROUTER_MAX_REMOTE_SERVERS + 1); +fail_destroy_class: + class_destroy(msm_rpcrouter_class); +fail: + return rc; +} + +void msm_rpcrouter_exit_devices(void) +{ + cdev_del(&rpcrouter_cdev); + device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno); + unregister_chrdev_region(msm_rpcrouter_devno, + RPCROUTER_MAX_REMOTE_SERVERS + 1); + class_destroy(msm_rpcrouter_class); +} + diff --git a/arch/arm/mach-msm/smd_rpcrouter_servers.c b/arch/arm/mach-msm/smd_rpcrouter_servers.c new file mode 100644 index 0000000000000..839600221631a --- /dev/null +++ b/arch/arm/mach-msm/smd_rpcrouter_servers.c @@ -0,0 +1,230 @@ +/* arch/arm/mach-msm/rpc_servers.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include "smd_rpcrouter.h" + +static struct msm_rpc_endpoint *endpoint; + +#define FLAG_REGISTERED 0x0001 + +static LIST_HEAD(rpc_server_list); +static DEFINE_MUTEX(rpc_server_list_lock); +static int rpc_servers_active; +static struct wake_lock rpc_servers_wake_lock; + +static void rpc_server_register(struct msm_rpc_server *server) +{ + int rc; + rc = msm_rpc_register_server(endpoint, server->prog, server->vers); + if (rc < 0) + printk(KERN_ERR "[rpcserver] error registering %p @ %08x:%d\n", + server, server->prog, server->vers); +} + +static struct msm_rpc_server *rpc_server_find(uint32_t prog, uint32_t vers) +{ + struct msm_rpc_server *server; + + mutex_lock(&rpc_server_list_lock); + list_for_each_entry(server, &rpc_server_list, list) { + if ((server->prog == prog) && +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + msm_rpc_is_compatible_version(server->vers, vers)) { +#else + server->vers == vers) { +#endif + mutex_unlock(&rpc_server_list_lock); + return server; + } + } + mutex_unlock(&rpc_server_list_lock); + return NULL; +} + +static void rpc_server_register_all(void) +{ + struct msm_rpc_server *server; + + mutex_lock(&rpc_server_list_lock); + list_for_each_entry(server, &rpc_server_list, list) { + if (!(server->flags & FLAG_REGISTERED)) { + rpc_server_register(server); + server->flags |= FLAG_REGISTERED; + } + } + mutex_unlock(&rpc_server_list_lock); +} + +int msm_rpc_create_server(struct msm_rpc_server *server) +{ + /* make sure we're in a sane state first */ + server->flags = 0; + INIT_LIST_HEAD(&server->list); + + mutex_lock(&rpc_server_list_lock); + list_add(&server->list, &rpc_server_list); + if (rpc_servers_active) { + rpc_server_register(server); + server->flags |= FLAG_REGISTERED; + } + mutex_unlock(&rpc_server_list_lock); + + return 0; +} + +static int rpc_send_accepted_void_reply(struct msm_rpc_endpoint *client, + uint32_t xid, uint32_t accept_status) +{ + int rc = 0; + uint8_t reply_buf[sizeof(struct rpc_reply_hdr)]; + struct rpc_reply_hdr *reply = (struct rpc_reply_hdr *)reply_buf; + + reply->xid = cpu_to_be32(xid); + reply->type = cpu_to_be32(1); /* reply */ + reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED); + + reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status); + reply->data.acc_hdr.verf_flavor = 0; + reply->data.acc_hdr.verf_length = 0; + + rc = msm_rpc_write(client, reply_buf, sizeof(reply_buf)); + if (rc < 0) + printk(KERN_ERR + "%s: could not write response: %d\n", + __FUNCTION__, rc); + + return rc; +} + +static int rpc_servers_thread(void *data) +{ + void *buffer; + struct rpc_request_hdr *req; + struct msm_rpc_server *server; + int rc; + + for (;;) { + wake_unlock(&rpc_servers_wake_lock); + rc = wait_event_interruptible(endpoint->wait_q, + !list_empty(&endpoint->read_q)); + wake_lock(&rpc_servers_wake_lock); + rc = msm_rpc_read(endpoint, &buffer, -1, -1); + if (rc < 0) { + printk(KERN_ERR "%s: could not read: %d\n", + __FUNCTION__, rc); + break; + } + req = (struct rpc_request_hdr *)buffer; + + req->type = be32_to_cpu(req->type); + req->xid = be32_to_cpu(req->xid); + req->rpc_vers = be32_to_cpu(req->rpc_vers); + req->prog = be32_to_cpu(req->prog); + req->vers = be32_to_cpu(req->vers); + req->procedure = be32_to_cpu(req->procedure); + + server = rpc_server_find(req->prog, req->vers); + + if (req->rpc_vers != 2) + continue; + if (req->type != 0) + continue; + if (!server) { + rpc_send_accepted_void_reply( + endpoint, req->xid, + RPC_ACCEPTSTAT_PROG_UNAVAIL); + continue; + } + + rc = server->rpc_call(server, req, rc); + + switch (rc) { + case 0: + rpc_send_accepted_void_reply( + endpoint, req->xid, + RPC_ACCEPTSTAT_SUCCESS); + break; + default: + rpc_send_accepted_void_reply( + endpoint, req->xid, + RPC_ACCEPTSTAT_PROG_UNAVAIL); + break; + } + + kfree(buffer); + } + + do_exit(0); +} + +static int rpcservers_probe(struct platform_device *pdev) +{ + struct task_struct *server_thread; + + endpoint = msm_rpc_open(); + if (IS_ERR(endpoint)) + return PTR_ERR(endpoint); + + /* we're online -- register any servers installed beforehand */ + rpc_servers_active = 1; + rpc_server_register_all(); + + /* start the kernel thread */ + server_thread = kthread_run(rpc_servers_thread, NULL, "krpcserversd"); + if (IS_ERR(server_thread)) + return PTR_ERR(server_thread); + + return 0; +} + +static struct platform_driver rpcservers_driver = { + .probe = rpcservers_probe, + .driver = { + .name = "oncrpc_router", + .owner = THIS_MODULE, + }, +}; + +static int __init rpc_servers_init(void) +{ + wake_lock_init(&rpc_servers_wake_lock, WAKE_LOCK_SUSPEND, "rpc_server"); + return platform_driver_register(&rpcservers_driver); +} + +module_init(rpc_servers_init); + +MODULE_DESCRIPTION("MSM RPC Servers"); +MODULE_AUTHOR("Iliyan Malchev "); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c new file mode 100644 index 0000000000000..8715544dfdc16 --- /dev/null +++ b/arch/arm/mach-msm/smd_tty.c @@ -0,0 +1,229 @@ +/* arch/arm/mach-msm/smd_tty.c + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define MAX_SMD_TTYS 32 + +static DEFINE_MUTEX(smd_tty_lock); + +struct smd_tty_info { + smd_channel_t *ch; + struct tty_struct *tty; + struct wake_lock wake_lock; + int open_count; +}; + +static struct smd_tty_info smd_tty[MAX_SMD_TTYS]; + +static const struct smd_tty_channel_desc smd_default_tty_channels[] = { + { .id = 0, .name = "SMD_DS" }, + { .id = 27, .name = "SMD_GPSNMEA" }, +}; + +static const struct smd_tty_channel_desc *smd_tty_channels = + smd_default_tty_channels; +static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels); + +int smd_set_channel_list(const struct smd_tty_channel_desc *channels, int len) +{ + smd_tty_channels = channels; + smd_tty_channels_len = len; + return 0; +} + +static void smd_tty_notify(void *priv, unsigned event) +{ + unsigned char *ptr; + int avail; + struct smd_tty_info *info = priv; + struct tty_struct *tty = info->tty; + + if (!tty) + return; + + if (event != SMD_EVENT_DATA) + return; + + for (;;) { + if (test_bit(TTY_THROTTLED, &tty->flags)) break; + avail = smd_read_avail(info->ch); + if (avail == 0) break; + + avail = tty_prepare_flip_string(tty, &ptr, avail); + + if (smd_read(info->ch, ptr, avail) != avail) { + /* shouldn't be possible since we're in interrupt + ** context here and nobody else could 'steal' our + ** characters. + */ + printk(KERN_ERR "OOPS - smd_tty_buffer mismatch?!"); + } + + wake_lock_timeout(&info->wake_lock, HZ / 2); + tty_flip_buffer_push(tty); + } + + /* XXX only when writable and necessary */ + tty_wakeup(tty); +} + +static int smd_tty_open(struct tty_struct *tty, struct file *f) +{ + int res = 0; + int n = tty->index; + struct smd_tty_info *info; + const char *name = NULL; + int i; + + for (i = 0; i < smd_tty_channels_len; i++) { + if (smd_tty_channels[i].id == n) { + name = smd_tty_channels[i].name; + break; + } + } + if (!name) + return -ENODEV; + + info = smd_tty + n; + + mutex_lock(&smd_tty_lock); + wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, name); + tty->driver_data = info; + + if (info->open_count++ == 0) { + info->tty = tty; + if (info->ch) { + smd_kick(info->ch); + } else { + res = smd_open(name, &info->ch, info, smd_tty_notify); + } + } + mutex_unlock(&smd_tty_lock); + + return res; +} + +static void smd_tty_close(struct tty_struct *tty, struct file *f) +{ + struct smd_tty_info *info = tty->driver_data; + + if (info == 0) + return; + + mutex_lock(&smd_tty_lock); + if (--info->open_count == 0) { + info->tty = 0; + tty->driver_data = 0; + wake_lock_destroy(&info->wake_lock); + if (info->ch) { + smd_close(info->ch); + info->ch = 0; + } + } + mutex_unlock(&smd_tty_lock); +} + +static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len) +{ + struct smd_tty_info *info = tty->driver_data; + int avail; + + /* if we're writing to a packet channel we will + ** never be able to write more data than there + ** is currently space for + */ + avail = smd_write_avail(info->ch); + if (len > avail) + len = avail; + + return smd_write(info->ch, buf, len); +} + +static int smd_tty_write_room(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + return smd_write_avail(info->ch); +} + +static int smd_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + return smd_read_avail(info->ch); +} + +static void smd_tty_unthrottle(struct tty_struct *tty) +{ + struct smd_tty_info *info = tty->driver_data; + smd_kick(info->ch); +} + +static struct tty_operations smd_tty_ops = { + .open = smd_tty_open, + .close = smd_tty_close, + .write = smd_tty_write, + .write_room = smd_tty_write_room, + .chars_in_buffer = smd_tty_chars_in_buffer, + .unthrottle = smd_tty_unthrottle, +}; + +static struct tty_driver *smd_tty_driver; + +static int __init smd_tty_init(void) +{ + int ret, i; + + smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS); + if (smd_tty_driver == 0) + return -ENOMEM; + + smd_tty_driver->owner = THIS_MODULE; + smd_tty_driver->driver_name = "smd_tty_driver"; + smd_tty_driver->name = "smd"; + smd_tty_driver->major = 0; + smd_tty_driver->minor_start = 0; + smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + smd_tty_driver->subtype = SERIAL_TYPE_NORMAL; + smd_tty_driver->init_termios = tty_std_termios; + smd_tty_driver->init_termios.c_iflag = 0; + smd_tty_driver->init_termios.c_oflag = 0; + smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; + smd_tty_driver->init_termios.c_lflag = 0; + smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | + TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + tty_set_operations(smd_tty_driver, &smd_tty_ops); + + ret = tty_register_driver(smd_tty_driver); + if (ret) return ret; + + for (i = 0; i < smd_tty_channels_len; i++) + tty_register_device(smd_tty_driver, smd_tty_channels[i].id, 0); + + return 0; +} + +module_init(smd_tty_init); diff --git a/arch/arm/mach-msm/smem_log.c b/arch/arm/mach-msm/smem_log.c new file mode 100644 index 0000000000000..f569e1904bad5 --- /dev/null +++ b/arch/arm/mach-msm/smem_log.c @@ -0,0 +1,2032 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +/* + * Shared memory logging implementation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "smd_private.h" + +#define DEBUG +#undef DEBUG + +#ifdef DEBUG +#define D_DUMP_BUFFER(prestr, cnt, buf) \ +do { \ + int i; \ + printk(KERN_ERR "%s", prestr); \ + for (i = 0; i < cnt; i++) \ + printk(KERN_ERR "%.2x", buf[i]); \ + printk(KERN_ERR "\n"); \ +} while (0) +#else +#define D_DUMP_BUFFER(prestr, cnt, buf) +#endif + +#ifdef DEBUG +#define D(x...) printk(x) +#else +#define D(x...) do {} while (0) +#endif + +#define TIMESTAMP_ADDR (MSM_CSR_BASE + 0x04) + +struct smem_log_item { + uint32_t identifier; + uint32_t timetick; + uint32_t data1; + uint32_t data2; + uint32_t data3; +}; + +#define SMEM_LOG_NUM_ENTRIES 2000 +#define SMEM_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ + SMEM_LOG_NUM_ENTRIES) + +#define SMEM_LOG_NUM_STATIC_ENTRIES 150 +#define SMEM_STATIC_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ + SMEM_LOG_NUM_STATIC_ENTRIES) + +#define SMEM_LOG_NUM_POWER_ENTRIES 2000 +#define SMEM_POWER_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \ + SMEM_LOG_NUM_POWER_ENTRIES) + +#if defined(CONFIG_ARCH_MSM7X30) +#define SMEM_SPINLOCK_SMEM_LOG "S:2" +#define SMEM_SPINLOCK_STATIC_LOG "S:5" +#else +#define SMEM_SPINLOCK_SMEM_LOG 2 +#define SMEM_SPINLOCK_STATIC_LOG 5 +#endif +/* POWER shares with SMEM_SPINLOCK_SMEM_LOG */ + +static remote_spinlock_t remote_spinlock; +static remote_spinlock_t remote_spinlock_static; + +struct smem_log_inst { + int which_log; + struct smem_log_item __iomem *events; + uint32_t __iomem *idx; + int num; + remote_spinlock_t *remote_spinlock; +}; + +enum smem_logs { + GEN = 0, + STA, + POW, + NUM +}; + +static struct smem_log_inst inst[NUM]; + +#if defined(CONFIG_DEBUG_FS) + +#define HSIZE 13 + +struct sym { + uint32_t val; + char *str; + struct hlist_node node; +}; + +struct sym id_syms[] = { + { SMEM_LOG_PROC_ID_MODEM, "MODM" }, + { SMEM_LOG_PROC_ID_Q6, "QDSP" }, + { SMEM_LOG_PROC_ID_APPS, "APPS" }, +}; + +struct sym base_syms[] = { + { SMEM_LOG_ONCRPC_EVENT_BASE, "ONCRPC" }, + { SMEM_LOG_SMEM_EVENT_BASE, "SMEM" }, + { SMEM_LOG_TMC_EVENT_BASE, "TMC" }, + { SMEM_LOG_TIMETICK_EVENT_BASE, "TIMETICK" }, + { SMEM_LOG_DEM_EVENT_BASE, "DEM" }, + { SMEM_LOG_ERROR_EVENT_BASE, "ERROR" }, + { SMEM_LOG_DCVS_EVENT_BASE, "DCVS" }, + { SMEM_LOG_SLEEP_EVENT_BASE, "SLEEP" }, + { SMEM_LOG_RPC_ROUTER_EVENT_BASE, "ROUTER" }, +}; + +struct sym event_syms[] = { +#if defined(CONFIG_MSM_N_WAY_SMSM) + { DEM_SMSM_ISR, "SMSM_ISR" }, + { DEM_STATE_CHANGE, "STATE_CHANGE" }, + { DEM_STATE_MACHINE_ENTER, "STATE_MACHINE_ENTER" }, + { DEM_ENTER_SLEEP, "ENTER_SLEEP" }, + { DEM_END_SLEEP, "END_SLEEP" }, + { DEM_SETUP_SLEEP, "SETUP_SLEEP" }, + { DEM_SETUP_POWER_COLLAPSE, "SETUP_POWER_COLLAPSE" }, + { DEM_SETUP_SUSPEND, "SETUP_SUSPEND" }, + { DEM_EARLY_EXIT, "EARLY_EXIT" }, + { DEM_WAKEUP_REASON, "WAKEUP_REASON" }, + { DEM_DETECT_WAKEUP, "DETECT_WAKEUP" }, + { DEM_DETECT_RESET, "DETECT_RESET" }, + { DEM_DETECT_SLEEPEXIT, "DETECT_SLEEPEXIT" }, + { DEM_DETECT_RUN, "DETECT_RUN" }, + { DEM_APPS_SWFI, "APPS_SWFI" }, + { DEM_SEND_WAKEUP, "SEND_WAKEUP" }, + { DEM_ASSERT_OKTS, "ASSERT_OKTS" }, + { DEM_NEGATE_OKTS, "NEGATE_OKTS" }, + { DEM_PROC_COMM_CMD, "PROC_COMM_CMD" }, + { DEM_REMOVE_PROC_PWR, "REMOVE_PROC_PWR" }, + { DEM_RESTORE_PROC_PWR, "RESTORE_PROC_PWR" }, + { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" }, + { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" }, + { DEM_MAO_INTS, "MAO_INTS" }, + { DEM_APPS_WAKEUP_INT, "APPS_WAKEUP_INT" }, + { DEM_PROC_WAKEUP, "PROC_WAKEUP" }, + { DEM_PROC_POWERUP, "PROC_POWERUP" }, + { DEM_TIMER_EXPIRED, "TIMER_EXPIRED" }, + { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" }, + { DEM_REMOTE_PWR_CB, "REMOTE_PWR_CB" }, + { DEM_TIME_SYNC_START, "TIME_SYNC_START" }, + { DEM_TIME_SYNC_SEND_VALUE, "TIME_SYNC_SEND_VALUE" }, + { DEM_TIME_SYNC_DONE, "TIME_SYNC_DONE" }, + { DEM_TIME_SYNC_REQUEST, "TIME_SYNC_REQUEST" }, + { DEM_TIME_SYNC_POLL, "TIME_SYNC_POLL" }, + { DEM_TIME_SYNC_INIT, "TIME_SYNC_INIT" }, + { DEM_INIT, "INIT" }, +#else + + { DEM_NO_SLEEP, "NO_SLEEP" }, + { DEM_INSUF_TIME, "INSUF_TIME" }, + { DEMAPPS_ENTER_SLEEP, "APPS_ENTER_SLEEP" }, + { DEMAPPS_DETECT_WAKEUP, "APPS_DETECT_WAKEUP" }, + { DEMAPPS_END_APPS_TCXO, "APPS_END_APPS_TCXO" }, + { DEMAPPS_ENTER_SLEEPEXIT, "APPS_ENTER_SLEEPEXIT" }, + { DEMAPPS_END_APPS_SLEEP, "APPS_END_APPS_SLEEP" }, + { DEMAPPS_SETUP_APPS_PWRCLPS, "APPS_SETUP_APPS_PWRCLPS" }, + { DEMAPPS_PWRCLPS_EARLY_EXIT, "APPS_PWRCLPS_EARLY_EXIT" }, + { DEMMOD_SEND_WAKEUP, "MOD_SEND_WAKEUP" }, + { DEMMOD_NO_APPS_VOTE, "MOD_NO_APPS_VOTE" }, + { DEMMOD_NO_TCXO_SLEEP, "MOD_NO_TCXO_SLEEP" }, + { DEMMOD_BT_CLOCK, "MOD_BT_CLOCK" }, + { DEMMOD_UART_CLOCK, "MOD_UART_CLOCK" }, + { DEMMOD_OKTS, "MOD_OKTS" }, + { DEM_SLEEP_INFO, "SLEEP_INFO" }, + { DEMMOD_TCXO_END, "MOD_TCXO_END" }, + { DEMMOD_END_SLEEP_SIG, "MOD_END_SLEEP_SIG" }, + { DEMMOD_SETUP_APPSSLEEP, "MOD_SETUP_APPSSLEEP" }, + { DEMMOD_ENTER_TCXO, "MOD_ENTER_TCXO" }, + { DEMMOD_WAKE_APPS, "MOD_WAKE_APPS" }, + { DEMMOD_POWER_COLLAPSE_APPS, "MOD_POWER_COLLAPSE_APPS" }, + { DEMMOD_RESTORE_APPS_PWR, "MOD_RESTORE_APPS_PWR" }, + { DEMAPPS_ASSERT_OKTS, "APPS_ASSERT_OKTS" }, + { DEMAPPS_RESTART_START_TIMER, "APPS_RESTART_START_TIMER" }, + { DEMAPPS_ENTER_RUN, "APPS_ENTER_RUN" }, + { DEMMOD_MAO_INTS, "MOD_MAO_INTS" }, + { DEMMOD_POWERUP_APPS_CALLED, "MOD_POWERUP_APPS_CALLED" }, + { DEMMOD_PC_TIMER_EXPIRED, "MOD_PC_TIMER_EXPIRED" }, + { DEM_DETECT_SLEEPEXIT, "_DETECT_SLEEPEXIT" }, + { DEM_DETECT_RUN, "DETECT_RUN" }, + { DEM_SET_APPS_TIMER, "SET_APPS_TIMER" }, + { DEM_NEGATE_OKTS, "NEGATE_OKTS" }, + { DEMMOD_APPS_WAKEUP_INT, "MOD_APPS_WAKEUP_INT" }, + { DEMMOD_APPS_SWFI, "MOD_APPS_SWFI" }, + { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" }, + { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" }, + { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" }, + { DEMAPPS_SETUP_APPS_SUSPEND, "APPS_SETUP_APPS_SUSPEND" }, + { DEM_RPC_EARLY_EXIT, "RPC_EARLY_EXIT" }, + { DEMAPPS_WAKEUP_REASON, "APPS_WAKEUP_REASON" }, + { DEM_INIT, "INIT" }, +#endif + { DEMMOD_UMTS_BASE, "MOD_UMTS_BASE" }, + { DEMMOD_GL1_GO_TO_SLEEP, "GL1_GO_TO_SLEEP" }, + { DEMMOD_GL1_SLEEP_START, "GL1_SLEEP_START" }, + { DEMMOD_GL1_AFTER_GSM_CLK_ON, "GL1_AFTER_GSM_CLK_ON" }, + { DEMMOD_GL1_BEFORE_RF_ON, "GL1_BEFORE_RF_ON" }, + { DEMMOD_GL1_AFTER_RF_ON, "GL1_AFTER_RF_ON" }, + { DEMMOD_GL1_FRAME_TICK, "GL1_FRAME_TICK" }, + { DEMMOD_GL1_WCDMA_START, "GL1_WCDMA_START" }, + { DEMMOD_GL1_WCDMA_ENDING, "GL1_WCDMA_ENDING" }, + { DEMMOD_UMTS_NOT_OKTS, "UMTS_NOT_OKTS" }, + { DEMMOD_UMTS_START_TCXO_SHUTDOWN, "UMTS_START_TCXO_SHUTDOWN" }, + { DEMMOD_UMTS_END_TCXO_SHUTDOWN, "UMTS_END_TCXO_SHUTDOWN" }, + { DEMMOD_UMTS_START_ARM_HALT, "UMTS_START_ARM_HALT" }, + { DEMMOD_UMTS_END_ARM_HALT, "UMTS_END_ARM_HALT" }, + { DEMMOD_UMTS_NEXT_WAKEUP_SCLK, "UMTS_NEXT_WAKEUP_SCLK" }, + { TIME_REMOTE_LOG_EVENT_START, "START" }, + { TIME_REMOTE_LOG_EVENT_GOTO_WAIT, + "GOTO_WAIT" }, + { TIME_REMOTE_LOG_EVENT_GOTO_INIT, + "GOTO_INIT" }, + { ERR_ERROR_FATAL, "ERR_ERROR_FATAL" }, + { ERR_ERROR_FATAL_TASK, "ERR_ERROR_FATAL_TASK" }, + { DCVSAPPS_LOG_IDLE, "DCVSAPPS_LOG_IDLE" }, + { DCVSAPPS_LOG_ERR, "DCVSAPPS_LOG_ERR" }, + { DCVSAPPS_LOG_CHG, "DCVSAPPS_LOG_CHG" }, + { DCVSAPPS_LOG_REG, "DCVSAPPS_LOG_REG" }, + { DCVSAPPS_LOG_DEREG, "DCVSAPPS_LOG_DEREG" }, + { SMEM_LOG_EVENT_CB, "CB" }, + { SMEM_LOG_EVENT_START, "START" }, + { SMEM_LOG_EVENT_INIT, "INIT" }, + { SMEM_LOG_EVENT_RUNNING, "RUNNING" }, + { SMEM_LOG_EVENT_STOP, "STOP" }, + { SMEM_LOG_EVENT_RESTART, "RESTART" }, + { SMEM_LOG_EVENT_SS, "SS" }, + { SMEM_LOG_EVENT_READ, "READ" }, + { SMEM_LOG_EVENT_WRITE, "WRITE" }, + { SMEM_LOG_EVENT_SIGS1, "SIGS1" }, + { SMEM_LOG_EVENT_SIGS2, "SIGS2" }, + { SMEM_LOG_EVENT_WRITE_DM, "WRITE_DM" }, + { SMEM_LOG_EVENT_READ_DM, "READ_DM" }, + { SMEM_LOG_EVENT_SKIP_DM, "SKIP_DM" }, + { SMEM_LOG_EVENT_STOP_DM, "STOP_DM" }, + { SMEM_LOG_EVENT_ISR, "ISR" }, + { SMEM_LOG_EVENT_TASK, "TASK" }, + { SMEM_LOG_EVENT_RS, "RS" }, + { ONCRPC_LOG_EVENT_SMD_WAIT, "SMD_WAIT" }, + { ONCRPC_LOG_EVENT_RPC_WAIT, "RPC_WAIT" }, + { ONCRPC_LOG_EVENT_RPC_BOTH_WAIT, "RPC_BOTH_WAIT" }, + { ONCRPC_LOG_EVENT_RPC_INIT, "RPC_INIT" }, + { ONCRPC_LOG_EVENT_RUNNING, "RUNNING" }, + { ONCRPC_LOG_EVENT_APIS_INITED, "APIS_INITED" }, + { ONCRPC_LOG_EVENT_AMSS_RESET, "AMSS_RESET" }, + { ONCRPC_LOG_EVENT_SMD_RESET, "SMD_RESET" }, + { ONCRPC_LOG_EVENT_ONCRPC_RESET, "ONCRPC_RESET" }, + { ONCRPC_LOG_EVENT_CB, "CB" }, + { ONCRPC_LOG_EVENT_STD_CALL, "STD_CALL" }, + { ONCRPC_LOG_EVENT_STD_REPLY, "STD_REPLY" }, + { ONCRPC_LOG_EVENT_STD_CALL_ASYNC, "STD_CALL_ASYNC" }, + { NO_SLEEP_OLD, "NO_SLEEP_OLD" }, + { INSUF_TIME, "INSUF_TIME" }, + { MOD_UART_CLOCK, "MOD_UART_CLOCK" }, + { SLEEP_INFO, "SLEEP_INFO" }, + { MOD_TCXO_END, "MOD_TCXO_END" }, + { MOD_ENTER_TCXO, "MOD_ENTER_TCXO" }, + { NO_SLEEP_NEW, "NO_SLEEP_NEW" }, + { RPC_ROUTER_LOG_EVENT_UNKNOWN, "UNKNOWN" }, + { RPC_ROUTER_LOG_EVENT_MSG_READ, "MSG_READ" }, + { RPC_ROUTER_LOG_EVENT_MSG_WRITTEN, "MSG_WRITTEN" }, + { RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ, "MSG_CFM_REQ" }, + { RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT, "MSG_CFM_SNT" }, + { RPC_ROUTER_LOG_EVENT_MID_READ, "MID_READ" }, + { RPC_ROUTER_LOG_EVENT_MID_WRITTEN, "MID_WRITTEN" }, + { RPC_ROUTER_LOG_EVENT_MID_CFM_REQ, "MID_CFM_REQ" }, + +}; + +struct sym oncrpc_syms[] = { + { 0x30000000, "CM" }, + { 0x30000001, "DB" }, + { 0x30000002, "SND" }, + { 0x30000003, "WMS" }, + { 0x30000004, "PDSM" }, + { 0x30000005, "MISC_MODEM_APIS" }, + { 0x30000006, "MISC_APPS_APIS" }, + { 0x30000007, "JOYST" }, + { 0x30000008, "VJOY" }, + { 0x30000009, "JOYSTC" }, + { 0x3000000a, "ADSPRTOSATOM" }, + { 0x3000000b, "ADSPRTOSMTOA" }, + { 0x3000000c, "I2C" }, + { 0x3000000d, "TIME_REMOTE" }, + { 0x3000000e, "NV" }, + { 0x3000000f, "CLKRGM_SEC" }, + { 0x30000010, "RDEVMAP" }, + { 0x30000011, "FS_RAPI" }, + { 0x30000012, "PBMLIB" }, + { 0x30000013, "AUDMGR" }, + { 0x30000014, "MVS" }, + { 0x30000015, "DOG_KEEPALIVE" }, + { 0x30000016, "GSDI_EXP" }, + { 0x30000017, "AUTH" }, + { 0x30000018, "NVRUIMI" }, + { 0x30000019, "MMGSDILIB" }, + { 0x3000001a, "CHARGER" }, + { 0x3000001b, "UIM" }, + { 0x3000001C, "ONCRPCTEST" }, + { 0x3000001d, "PDSM_ATL" }, + { 0x3000001e, "FS_XMOUNT" }, + { 0x3000001f, "SECUTIL " }, + { 0x30000020, "MCCMEID" }, + { 0x30000021, "PM_STROBE_FLASH" }, + { 0x30000022, "DS707_EXTIF" }, + { 0x30000023, "SMD BRIDGE_MODEM" }, + { 0x30000024, "SMD PORT_MGR" }, + { 0x30000025, "BUS_PERF" }, + { 0x30000026, "BUS_MON" }, + { 0x30000027, "MC" }, + { 0x30000028, "MCCAP" }, + { 0x30000029, "MCCDMA" }, + { 0x3000002a, "MCCDS" }, + { 0x3000002b, "MCCSCH" }, + { 0x3000002c, "MCCSRID" }, + { 0x3000002d, "SNM" }, + { 0x3000002e, "MCCSYOBJ" }, + { 0x3000002f, "DS707_APIS" }, + { 0x30000030, "DS_MP_SHIM_APPS_ASYNC" }, + { 0x30000031, "DSRLP_APIS" }, + { 0x30000032, "RLP_APIS" }, + { 0x30000033, "DS_MP_SHIM_MODEM" }, + { 0x30000034, "DSHDR_APIS" }, + { 0x30000035, "DSHDR_MDM_APIS" }, + { 0x30000036, "DS_MP_SHIM_APPS" }, + { 0x30000037, "HDRMC_APIS" }, + { 0x30000038, "SMD_BRIDGE_MTOA" }, + { 0x30000039, "SMD_BRIDGE_ATOM" }, + { 0x3000003a, "DPMAPP_OTG" }, + { 0x3000003b, "DIAG" }, + { 0x3000003c, "GSTK_EXP" }, + { 0x3000003d, "DSBC_MDM_APIS" }, + { 0x3000003e, "HDRMRLP_MDM_APIS" }, + { 0x3000003f, "HDRMRLP_APPS_APIS" }, + { 0x30000040, "HDRMC_MRLP_APIS" }, + { 0x30000041, "PDCOMM_APP_API" }, + { 0x30000042, "DSAT_APIS" }, + { 0x30000043, "MISC_RF_APIS" }, + { 0x30000044, "CMIPAPP" }, + { 0x30000045, "DSMP_UMTS_MODEM_APIS" }, + { 0x30000046, "DSMP_UMTS_APPS_APIS" }, + { 0x30000047, "DSUCSDMPSHIM" }, + { 0x30000048, "TIME_REMOTE_ATOM" }, + { 0x3000004a, "SD" }, + { 0x3000004b, "MMOC" }, + { 0x3000004c, "WLAN_ADP_FTM" }, + { 0x3000004d, "WLAN_CP_CM" }, + { 0x3000004e, "FTM_WLAN" }, + { 0x3000004f, "SDCC_CPRM" }, + { 0x30000050, "CPRMINTERFACE" }, + { 0x30000051, "DATA_ON_MODEM_MTOA_APIS" }, + { 0x30000052, "DATA_ON_APPS_ATOM_APIS" }, + { 0x30000053, "MISC_MODEM_APIS_NONWINMOB" }, + { 0x30000054, "MISC_APPS_APIS_NONWINMOB" }, + { 0x30000055, "PMEM_REMOTE" }, + { 0x30000056, "TCXOMGR" }, + { 0x30000057, "DSUCSDAPPIF_APIS" }, + { 0x30000058, "BT" }, + { 0x30000059, "PD_COMMS_API" }, + { 0x3000005a, "PD_COMMS_CLIENT_API" }, + { 0x3000005b, "PDAPI" }, + { 0x3000005c, "LSA_SUPL_DSM" }, + { 0x3000005d, "TIME_REMOTE_MTOA" }, + { 0x3000005e, "FTM_BT" }, + { 0X3000005f, "DSUCSDAPPIF_APIS" }, + { 0X30000060, "PMAPP_GEN" }, + { 0X30000061, "PM_LIB" }, + { 0X30000062, "KEYPAD" }, + { 0X30000063, "HSU_APP_APIS" }, + { 0X30000064, "HSU_MDM_APIS" }, + { 0X30000065, "ADIE_ADC_REMOTE_ATOM " }, + { 0X30000066, "TLMM_REMOTE_ATOM" }, + { 0X30000067, "UI_CALLCTRL" }, + { 0X30000068, "UIUTILS" }, + { 0X30000069, "PRL" }, + { 0X3000006a, "HW" }, + { 0X3000006b, "OEM_RAPI" }, + { 0X3000006c, "WMSPM" }, + { 0X3000006d, "BTPF" }, + { 0X3000006e, "CLKRGM_SYNC_EVENT" }, + { 0X3000006f, "USB_APPS_RPC" }, + { 0X30000070, "USB_MODEM_RPC" }, + { 0X30000071, "ADC" }, + { 0X30000072, "CAMERAREMOTED" }, + { 0X30000073, "SECAPIREMOTED" }, + { 0X30000074, "DSATAPI" }, + { 0X30000075, "CLKCTL_RPC" }, + { 0X30000076, "BREWAPPCOORD" }, + { 0X30000077, "ALTENVSHELL" }, + { 0X30000078, "WLAN_TRP_UTILS" }, + { 0X30000079, "GPIO_RPC" }, + { 0X3000007a, "PING_RPC" }, + { 0X3000007b, "DSC_DCM_API" }, + { 0X3000007c, "L1_DS" }, + { 0X3000007d, "QCHATPK_APIS" }, + { 0X3000007e, "GPS_API" }, + { 0X3000007f, "OSS_RRCASN_REMOTE" }, + { 0X30000080, "PMAPP_OTG_REMOTE" }, + { 0X30000081, "PING_MDM_RPC" }, + { 0X30000082, "PING_KERNEL_RPC" }, + { 0X30000083, "TIMETICK" }, + { 0X30000084, "WM_BTHCI_FTM " }, + { 0X30000085, "WM_BT_PF" }, + { 0X30000086, "IPA_IPC_APIS" }, + { 0X30000087, "UKCC_IPC_APIS" }, + { 0X30000088, "CMIPSMS " }, + { 0X30000089, "VBATT_REMOTE" }, + { 0X3000008a, "MFPAL" }, + { 0X3000008b, "DSUMTSPDPREG" }, + { 0X3000fe00, "RESTART_DAEMON NUMBER 0" }, + { 0X3000fe01, "RESTART_DAEMON NUMBER 1" }, + { 0X3000feff, "RESTART_DAEMON NUMBER 255" }, + { 0X3000fffe, "BACKWARDS_COMPATIBILITY_IN_RPC_CLNT_LOOKUP" }, + { 0X3000ffff, "RPC_ROUTER_SERVER_PROGRAM" }, + { 0x31000000, "CM CB" }, + { 0x31000001, "DB CB" }, + { 0x31000002, "SND CB" }, + { 0x31000003, "WMS CB" }, + { 0x31000004, "PDSM CB" }, + { 0x31000005, "MISC_MODEM_APIS CB" }, + { 0x31000006, "MISC_APPS_APIS CB" }, + { 0x31000007, "JOYST CB" }, + { 0x31000008, "VJOY CB" }, + { 0x31000009, "JOYSTC CB" }, + { 0x3100000a, "ADSPRTOSATOM CB" }, + { 0x3100000b, "ADSPRTOSMTOA CB" }, + { 0x3100000c, "I2C CB" }, + { 0x3100000d, "TIME_REMOTE CB" }, + { 0x3100000e, "NV CB" }, + { 0x3100000f, "CLKRGM_SEC CB" }, + { 0x31000010, "RDEVMAP CB" }, + { 0x31000011, "FS_RAPI CB" }, + { 0x31000012, "PBMLIB CB" }, + { 0x31000013, "AUDMGR CB" }, + { 0x31000014, "MVS CB" }, + { 0x31000015, "DOG_KEEPALIVE CB" }, + { 0x31000016, "GSDI_EXP CB" }, + { 0x31000017, "AUTH CB" }, + { 0x31000018, "NVRUIMI CB" }, + { 0x31000019, "MMGSDILIB CB" }, + { 0x3100001a, "CHARGER CB" }, + { 0x3100001b, "UIM CB" }, + { 0x3100001C, "ONCRPCTEST CB" }, + { 0x3100001d, "PDSM_ATL CB" }, + { 0x3100001e, "FS_XMOUNT CB" }, + { 0x3100001f, "SECUTIL CB" }, + { 0x31000020, "MCCMEID" }, + { 0x31000021, "PM_STROBE_FLASH CB" }, + { 0x31000022, "DS707_EXTIF CB" }, + { 0x31000023, "SMD BRIDGE_MODEM CB" }, + { 0x31000024, "SMD PORT_MGR CB" }, + { 0x31000025, "BUS_PERF CB" }, + { 0x31000026, "BUS_MON CB" }, + { 0x31000027, "MC CB" }, + { 0x31000028, "MCCAP CB" }, + { 0x31000029, "MCCDMA CB" }, + { 0x3100002a, "MCCDS CB" }, + { 0x3100002b, "MCCSCH CB" }, + { 0x3100002c, "MCCSRID CB" }, + { 0x3100002d, "SNM CB" }, + { 0x3100002e, "MCCSYOBJ CB" }, + { 0x3100002f, "DS707_APIS CB" }, + { 0x31000030, "DS_MP_SHIM_APPS_ASYNC CB" }, + { 0x31000031, "DSRLP_APIS CB" }, + { 0x31000032, "RLP_APIS CB" }, + { 0x31000033, "DS_MP_SHIM_MODEM CB" }, + { 0x31000034, "DSHDR_APIS CB" }, + { 0x31000035, "DSHDR_MDM_APIS CB" }, + { 0x31000036, "DS_MP_SHIM_APPS CB" }, + { 0x31000037, "HDRMC_APIS CB" }, + { 0x31000038, "SMD_BRIDGE_MTOA CB" }, + { 0x31000039, "SMD_BRIDGE_ATOM CB" }, + { 0x3100003a, "DPMAPP_OTG CB" }, + { 0x3100003b, "DIAG CB" }, + { 0x3100003c, "GSTK_EXP CB" }, + { 0x3100003d, "DSBC_MDM_APIS CB" }, + { 0x3100003e, "HDRMRLP_MDM_APIS CB" }, + { 0x3100003f, "HDRMRLP_APPS_APIS CB" }, + { 0x31000040, "HDRMC_MRLP_APIS CB" }, + { 0x31000041, "PDCOMM_APP_API CB" }, + { 0x31000042, "DSAT_APIS CB" }, + { 0x31000043, "MISC_RF_APIS CB" }, + { 0x31000044, "CMIPAPP CB" }, + { 0x31000045, "DSMP_UMTS_MODEM_APIS CB" }, + { 0x31000046, "DSMP_UMTS_APPS_APIS CB" }, + { 0x31000047, "DSUCSDMPSHIM CB" }, + { 0x31000048, "TIME_REMOTE_ATOM CB" }, + { 0x3100004a, "SD CB" }, + { 0x3100004b, "MMOC CB" }, + { 0x3100004c, "WLAN_ADP_FTM CB" }, + { 0x3100004d, "WLAN_CP_CM CB" }, + { 0x3100004e, "FTM_WLAN CB" }, + { 0x3100004f, "SDCC_CPRM CB" }, + { 0x31000050, "CPRMINTERFACE CB" }, + { 0x31000051, "DATA_ON_MODEM_MTOA_APIS CB" }, + { 0x31000052, "DATA_ON_APPS_ATOM_APIS CB" }, + { 0x31000053, "MISC_APIS_NONWINMOB CB" }, + { 0x31000054, "MISC_APPS_APIS_NONWINMOB CB" }, + { 0x31000055, "PMEM_REMOTE CB" }, + { 0x31000056, "TCXOMGR CB" }, + { 0x31000057, "DSUCSDAPPIF_APIS CB" }, + { 0x31000058, "BT CB" }, + { 0x31000059, "PD_COMMS_API CB" }, + { 0x3100005a, "PD_COMMS_CLIENT_API CB" }, + { 0x3100005b, "PDAPI CB" }, + { 0x3100005c, "LSA_SUPL_DSM CB" }, + { 0x3100005d, "TIME_REMOTE_MTOA CB" }, + { 0x3100005e, "FTM_BT CB" }, + { 0X3100005f, "DSUCSDAPPIF_APIS CB" }, + { 0X31000060, "PMAPP_GEN CB" }, + { 0X31000061, "PM_LIB CB" }, + { 0X31000062, "KEYPAD CB" }, + { 0X31000063, "HSU_APP_APIS CB" }, + { 0X31000064, "HSU_MDM_APIS CB" }, + { 0X31000065, "ADIE_ADC_REMOTE_ATOM CB" }, + { 0X31000066, "TLMM_REMOTE_ATOM CB" }, + { 0X31000067, "UI_CALLCTRL CB" }, + { 0X31000068, "UIUTILS CB" }, + { 0X31000069, "PRL CB" }, + { 0X3100006a, "HW CB" }, + { 0X3100006b, "OEM_RAPI CB" }, + { 0X3100006c, "WMSPM CB" }, + { 0X3100006d, "BTPF CB" }, + { 0X3100006e, "CLKRGM_SYNC_EVENT CB" }, + { 0X3100006f, "USB_APPS_RPC CB" }, + { 0X31000070, "USB_MODEM_RPC CB" }, + { 0X31000071, "ADC CB" }, + { 0X31000072, "CAMERAREMOTED CB" }, + { 0X31000073, "SECAPIREMOTED CB" }, + { 0X31000074, "DSATAPI CB" }, + { 0X31000075, "CLKCTL_RPC CB" }, + { 0X31000076, "BREWAPPCOORD CB" }, + { 0X31000077, "ALTENVSHELL CB" }, + { 0X31000078, "WLAN_TRP_UTILS CB" }, + { 0X31000079, "GPIO_RPC CB" }, + { 0X3100007a, "PING_RPC CB" }, + { 0X3100007b, "DSC_DCM_API CB" }, + { 0X3100007c, "L1_DS CB" }, + { 0X3100007d, "QCHATPK_APIS CB" }, + { 0X3100007e, "GPS_API CB" }, + { 0X3100007f, "OSS_RRCASN_REMOTE CB" }, + { 0X31000080, "PMAPP_OTG_REMOTE CB" }, + { 0X31000081, "PING_MDM_RPC CB" }, + { 0X31000082, "PING_KERNEL_RPC CB" }, + { 0X31000083, "TIMETICK CB" }, + { 0X31000084, "WM_BTHCI_FTM CB" }, + { 0X31000085, "WM_BT_PF CB" }, + { 0X31000086, "IPA_IPC_APIS CB" }, + { 0X31000087, "UKCC_IPC_APIS CB" }, + { 0X31000088, "CMIPSMS CB" }, + { 0X31000089, "VBATT_REMOTE CB" }, + { 0X3100008a, "MFPAL CB" }, + { 0X3100008b, "DSUMTSPDPREG CB" }, + { 0X3100fe00, "RESTART_DAEMON NUMBER 0 CB" }, + { 0X3100fe01, "RESTART_DAEMON NUMBER 1 CB" }, + { 0X3100feff, "RESTART_DAEMON NUMBER 255 CB" }, + { 0X3100fffe, "BACKWARDS_COMPATIBILITY_IN_RPC_CLNT_LOOKUP CB" }, + { 0X3100ffff, "RPC_ROUTER_SERVER_PROGRAM CB" }, +}; + +struct sym wakeup_syms[] = { + { 0x00000040, "OTHER" }, + { 0x00000020, "RESET" }, + { 0x00000010, "ALARM" }, + { 0x00000008, "TIMER" }, + { 0x00000004, "GPIO" }, + { 0x00000002, "INT" }, + { 0x00000001, "RPC" }, + { 0x00000000, "NONE" }, +}; + +struct sym wakeup_int_syms[] = { + { 0, "MDDI_EXT" }, + { 1, "MDDI_PRI" }, + { 2, "MDDI_CLIENT"}, + { 3, "USB_OTG" }, + { 4, "I2CC" }, + { 5, "SDC1_0" }, + { 6, "SDC1_1" }, + { 7, "SDC2_0" }, + { 8, "SDC2_1" }, + { 9, "ADSP_A9A11" }, + { 10, "UART1" }, + { 11, "UART2" }, + { 12, "UART3" }, + { 13, "DP_RX_DATA" }, + { 14, "DP_RX_DATA2" }, + { 15, "DP_RX_DATA3" }, + { 16, "DM_UART" }, + { 17, "DM_DP_RX_DATA" }, + { 18, "KEYSENSE" }, + { 19, "HSSD" }, + { 20, "NAND_WR_ER_DONE" }, + { 21, "NAND_OP_DONE" }, + { 22, "TCHSCRN1" }, + { 23, "TCHSCRN2" }, + { 24, "TCHSCRN_SSBI" }, + { 25, "USB_HS" }, + { 26, "UART2_DM_RX" }, + { 27, "UART2_DM" }, + { 28, "SDC4_1" }, + { 29, "SDC4_0" }, + { 30, "SDC3_1" }, + { 31, "SDC3_0" }, +}; + +struct sym smsm_syms[] = { + { 0x80000000, "UN" }, + { 0x7F000000, "ERR" }, + { 0x00800000, "SMLP" }, + { 0x00400000, "ADWN" }, + { 0x00200000, "PWRS" }, + { 0x00100000, "DWLD" }, + { 0x00080000, "SRBT" }, + { 0x00040000, "SDWN" }, + { 0x00020000, "ARBT" }, + { 0x00010000, "REL" }, + { 0x00008000, "SLE" }, + { 0x00004000, "SLP" }, + { 0x00002000, "WFPI" }, + { 0x00001000, "EEX" }, + { 0x00000800, "TIN" }, + { 0x00000400, "TWT" }, + { 0x00000200, "PWRC" }, + { 0x00000100, "RUN" }, + { 0x00000080, "SA" }, + { 0x00000040, "RES" }, + { 0x00000020, "RIN" }, + { 0x00000010, "RWT" }, + { 0x00000008, "SIN" }, + { 0x00000004, "SWT" }, + { 0x00000002, "OE" }, + { 0x00000001, "I" }, +}; + +/* never reorder */ +struct sym voter_d2_syms[] = { + { 0x00000001, NULL }, + { 0x00000002, NULL }, + { 0x00000004, NULL }, + { 0x00000008, NULL }, + { 0x00000010, NULL }, + { 0x00000020, NULL }, + { 0x00000040, NULL }, + { 0x00000080, NULL }, + { 0x00000100, NULL }, + { 0x00000200, NULL }, + { 0x00000400, NULL }, + { 0x00000800, NULL }, + { 0x00001000, NULL }, + { 0x00002000, NULL }, + { 0x00004000, NULL }, + { 0x00008000, NULL }, + { 0x00010000, NULL }, + { 0x00020000, NULL }, + { 0x00040000, NULL }, + { 0x00080000, NULL }, + { 0x00100000, NULL }, + { 0x00200000, NULL }, + { 0x00400000, NULL }, + { 0x00800000, NULL }, + { 0x01000000, NULL }, + { 0x02000000, NULL }, + { 0x04000000, NULL }, + { 0x08000000, NULL }, + { 0x10000000, NULL }, + { 0x20000000, NULL }, + { 0x40000000, NULL }, + { 0x80000000, NULL }, +}; + +/* never reorder */ +struct sym voter_d3_syms[] = { + { 0x00000001, NULL }, + { 0x00000002, NULL }, + { 0x00000004, NULL }, + { 0x00000008, NULL }, + { 0x00000010, NULL }, + { 0x00000020, NULL }, + { 0x00000040, NULL }, + { 0x00000080, NULL }, + { 0x00000100, NULL }, + { 0x00000200, NULL }, + { 0x00000400, NULL }, + { 0x00000800, NULL }, + { 0x00001000, NULL }, + { 0x00002000, NULL }, + { 0x00004000, NULL }, + { 0x00008000, NULL }, + { 0x00010000, NULL }, + { 0x00020000, NULL }, + { 0x00040000, NULL }, + { 0x00080000, NULL }, + { 0x00100000, NULL }, + { 0x00200000, NULL }, + { 0x00400000, NULL }, + { 0x00800000, NULL }, + { 0x01000000, NULL }, + { 0x02000000, NULL }, + { 0x04000000, NULL }, + { 0x08000000, NULL }, + { 0x10000000, NULL }, + { 0x20000000, NULL }, + { 0x40000000, NULL }, + { 0x80000000, NULL }, +}; + +struct sym dem_state_master_syms[] = { + { 0, "INIT" }, + { 1, "RUN" }, + { 2, "SLEEP_WAIT" }, + { 3, "SLEEP_CONFIRMED" }, + { 4, "SLEEP_EXIT" }, + { 5, "RSA" }, + { 6, "EARLY_EXIT" }, + { 7, "RSA_DELAYED" }, + { 8, "RSA_CHECK_INTS" }, + { 9, "RSA_CONFIRMED" }, + { 10, "RSA_WAKING" }, + { 11, "RSA_RESTORE" }, + { 12, "RESET" }, +}; + +struct sym dem_state_slave_syms[] = { + { 0, "INIT" }, + { 1, "RUN" }, + { 2, "SLEEP_WAIT" }, + { 3, "SLEEP_EXIT" }, + { 4, "SLEEP_RUN_PENDING" }, + { 5, "POWER_COLLAPSE" }, + { 6, "CHECK_INTERRUPTS" }, + { 7, "SWFI" }, + { 8, "WFPI" }, + { 9, "EARLY_EXIT" }, + { 10, "RESET_RECOVER" }, + { 11, "RESET_ACKNOWLEDGE" }, + { 12, "ERROR" }, +}; + +struct sym smsm_entry_type_syms[] = { + { 0, "SMSM_APPS_STATE" }, + { 1, "SMSM_MODEM_STATE" }, + { 2, "SMSM_Q6_STATE" }, + { 3, "SMSM_APPS_DEM" }, + { 4, "SMSM_MODEM_DEM" }, + { 5, "SMSM_Q6_DEM" }, + { 6, "SMSM_POWER_MASTER_DEM" }, + { 7, "SMSM_TIME_MASTER_DEM" }, +}; + +struct sym smsm_state_syms[] = { + { 0x00000001, "INIT" }, + { 0x00000002, "OSENTERED" }, + { 0x00000004, "SMDWAIT" }, + { 0x00000008, "SMDINIT" }, + { 0x00000010, "RPCWAIT" }, + { 0x00000020, "RPCINIT" }, + { 0x00000040, "RESET" }, + { 0x00000080, "RSA" }, + { 0x00000100, "RUN" }, + { 0x00000200, "PWRC" }, + { 0x00000400, "TIMEWAIT" }, + { 0x00000800, "TIMEINIT" }, + { 0x00001000, "PWRC_EARLY_EXIT" }, + { 0x00002000, "WFPI" }, + { 0x00004000, "SLEEP" }, + { 0x00008000, "SLEEPEXIT" }, + { 0x00010000, "OEMSBL_RELEASE" }, + { 0x00020000, "APPS_REBOOT" }, + { 0x00040000, "SYSTEM_POWER_DOWN" }, + { 0x00080000, "SYSTEM_REBOOT" }, + { 0x00100000, "SYSTEM_DOWNLOAD" }, + { 0x00200000, "PWRC_SUSPEND" }, + { 0x00400000, "APPS_SHUTDOWN" }, + { 0x00800000, "SMD_LOOPBACK" }, + { 0x01000000, "RUN_QUIET" }, + { 0x02000000, "MODEM_WAIT" }, + { 0x04000000, "MODEM_BREAK" }, + { 0x08000000, "MODEM_CONTINUE" }, + { 0x80000000, "UNKNOWN" }, +}; + +#define ID_SYM 0 +#define BASE_SYM 1 +#define EVENT_SYM 2 +#define ONCRPC_SYM 3 +#define WAKEUP_SYM 4 +#define WAKEUP_INT_SYM 5 +#define SMSM_SYM 6 +#define VOTER_D2_SYM 7 +#define VOTER_D3_SYM 8 +#define DEM_STATE_MASTER_SYM 9 +#define DEM_STATE_SLAVE_SYM 10 +#define SMSM_ENTRY_TYPE_SYM 11 +#define SMSM_STATE_SYM 12 + +static struct sym_tbl { + struct sym *data; + int size; + struct hlist_head hlist[HSIZE]; +} tbl[] = { + { id_syms, ARRAY_SIZE(id_syms) }, + { base_syms, ARRAY_SIZE(base_syms) }, + { event_syms, ARRAY_SIZE(event_syms) }, + { oncrpc_syms, ARRAY_SIZE(oncrpc_syms) }, + { wakeup_syms, ARRAY_SIZE(wakeup_syms) }, + { wakeup_int_syms, ARRAY_SIZE(wakeup_int_syms) }, + { smsm_syms, ARRAY_SIZE(smsm_syms) }, + { voter_d2_syms, ARRAY_SIZE(voter_d2_syms) }, + { voter_d3_syms, ARRAY_SIZE(voter_d3_syms) }, + { dem_state_master_syms, ARRAY_SIZE(dem_state_master_syms) }, + { dem_state_slave_syms, ARRAY_SIZE(dem_state_slave_syms) }, + { smsm_entry_type_syms, ARRAY_SIZE(smsm_entry_type_syms) }, + { smsm_state_syms, ARRAY_SIZE(smsm_state_syms) }, +}; + +static void find_voters(void) +{ + void *x, *next; + unsigned size; + int i = 0, j = 0; + + x = smem_item(SMEM_SLEEP_STATIC, &size); + next = x; + while (next && (next < (x + size)) && + ((i + j) < (ARRAY_SIZE(voter_d3_syms) + + ARRAY_SIZE(voter_d2_syms)))) { + + if (i < ARRAY_SIZE(voter_d3_syms)) { + voter_d3_syms[i].str = (char *) next; + i++; + } else if (i >= ARRAY_SIZE(voter_d3_syms) && + j < ARRAY_SIZE(voter_d2_syms)) { + voter_d2_syms[j].str = (char *) next; + j++; + } + + next += 9; + } +} + +#define hash(val) (val % HSIZE) + +static void init_syms(void) +{ + int i; + int j; + + for (i = 0; i < ARRAY_SIZE(tbl); ++i) + for (j = 0; j < HSIZE; ++j) + INIT_HLIST_HEAD(&tbl[i].hlist[j]); + + for (i = 0; i < ARRAY_SIZE(tbl); ++i) + for (j = 0; j < tbl[i].size; ++j) { + INIT_HLIST_NODE(&tbl[i].data[j].node); + hlist_add_head(&tbl[i].data[j].node, + &tbl[i].hlist[hash(tbl[i].data[j].val)]); + } +} + +static char *find_sym(uint32_t id, uint32_t val) +{ + struct hlist_node *n; + struct sym *s; + + hlist_for_each(n, &tbl[id].hlist[hash(val)]) { + s = hlist_entry(n, struct sym, node); + if (s->val == val) + return s->str; + } + + return 0; +} + +#else +static void init_syms(void) {} +#endif + +static inline unsigned int read_timestamp(void) +{ + unsigned int tick; + + do { + tick = readl(TIMESTAMP_ADDR); + } while (tick != (tick = readl(TIMESTAMP_ADDR))); + + return tick; +} + +static void smem_log_event_from_user(struct smem_log_inst *inst, + const char __user *buf, int size, int num) +{ + uint32_t idx; + uint32_t next_idx; + unsigned long flags; + uint32_t identifier = 0; + uint32_t timetick = 0; + int first = 1; + int ret; + + remote_spin_lock_irqsave(inst->remote_spinlock, flags); + + while (num--) { + idx = *inst->idx; + + if (idx < inst->num) { + ret = copy_from_user(&inst->events[idx], + buf, size); + if (ret) { + printk("ERROR %s:%i tried to write " + "%i got ret %i", + __func__, __LINE__, + size, size - ret); + goto out; + } + + if (first) { + identifier = + inst->events[idx]. + identifier; + timetick = read_timestamp(); + first = 0; + } else { + identifier |= SMEM_LOG_CONT; + } + inst->events[idx].identifier = + identifier; + inst->events[idx].timetick = + timetick; + } + + next_idx = idx + 1; + if (next_idx >= inst->num) + next_idx = 0; + *inst->idx = next_idx; + + buf += sizeof(struct smem_log_item); + } + + out: + remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); +} + +static void _smem_log_event( + struct smem_log_item __iomem *events, + uint32_t __iomem *_idx, + remote_spinlock_t *lock, + int num, + uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3) +{ + struct smem_log_item item; + uint32_t idx; + uint32_t next_idx; + unsigned long flags; + + item.timetick = read_timestamp(); + item.identifier = id; + item.data1 = data1; + item.data2 = data2; + item.data3 = data3; + + remote_spin_lock_irqsave(lock, flags); + + idx = *_idx; + + if (idx < num) { + memcpy(&events[idx], + &item, sizeof(item)); + } + + next_idx = idx + 1; + if (next_idx >= num) + next_idx = 0; + *_idx = next_idx; + + remote_spin_unlock_irqrestore(lock, flags); +} + +static void _smem_log_event6( + struct smem_log_item __iomem *events, + uint32_t __iomem *_idx, + remote_spinlock_t *lock, + int num, + uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3, uint32_t data4, uint32_t data5, + uint32_t data6) +{ + struct smem_log_item item[2]; + uint32_t idx; + uint32_t next_idx; + unsigned long flags; + + item[0].timetick = read_timestamp(); + item[0].identifier = id; + item[0].data1 = data1; + item[0].data2 = data2; + item[0].data3 = data3; + item[1].identifier = item[0].identifier; + item[1].timetick = item[0].timetick; + item[1].data1 = data4; + item[1].data2 = data5; + item[1].data3 = data6; + + remote_spin_lock_irqsave(lock, flags); + + idx = *_idx; + + if (idx < (num-1)) { + memcpy(&events[idx], + &item, sizeof(item)); + } + + next_idx = idx + 2; + if (next_idx >= num) + next_idx = 0; + *_idx = next_idx; + + remote_spin_unlock_irqrestore(lock, flags); +} + +void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3) +{ + _smem_log_event(inst[GEN].events, inst[GEN].idx, + inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES, + id, data1, data2, data3); +} + +void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3, uint32_t data4, uint32_t data5, + uint32_t data6) +{ + _smem_log_event6(inst[GEN].events, inst[GEN].idx, + inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES, + id, data1, data2, data3, data4, data5, data6); +} + +void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3) +{ + _smem_log_event(inst[STA].events, inst[STA].idx, + inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES, + id, data1, data2, data3); +} + +void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2, + uint32_t data3, uint32_t data4, uint32_t data5, + uint32_t data6) +{ + _smem_log_event6(inst[STA].events, inst[STA].idx, + inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES, + id, data1, data2, data3, data4, data5, data6); +} + +static int _smem_log_init(void) +{ + inst[GEN].which_log = GEN; + inst[GEN].events = + (struct smem_log_item *)smem_alloc(SMEM_SMEM_LOG_EVENTS, + SMEM_LOG_EVENTS_SIZE); + inst[GEN].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_IDX, + sizeof(uint32_t)); + if (!inst[GEN].events || !inst[GEN].idx) { + pr_err("%s: no log or log_idx allocated, " + "smem_log disabled\n", __func__); + } + inst[GEN].num = SMEM_LOG_NUM_ENTRIES; + inst[GEN].remote_spinlock = &remote_spinlock; + + inst[STA].which_log = STA; + inst[STA].events = + (struct smem_log_item *) + smem_alloc(SMEM_SMEM_STATIC_LOG_EVENTS, + SMEM_STATIC_LOG_EVENTS_SIZE); + inst[STA].idx = (uint32_t *)smem_alloc(SMEM_SMEM_STATIC_LOG_IDX, + sizeof(uint32_t)); + if (!inst[STA].events || !inst[STA].idx) { + pr_err("%s: no static log or log_idx " + "allocated, smem_log disabled\n", __func__); + } + inst[STA].num = SMEM_LOG_NUM_STATIC_ENTRIES; + inst[STA].remote_spinlock = &remote_spinlock_static; + + inst[POW].which_log = POW; +#ifdef CONFIG_MSM_N_WAY_SMD + + inst[POW].events = + (struct smem_log_item *) + smem_alloc(SMEM_SMEM_LOG_POWER_EVENTS, + SMEM_POWER_LOG_EVENTS_SIZE); + inst[POW].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_POWER_IDX, + sizeof(uint32_t)); +#else + inst[POW].events = NULL; + inst[POW].idx = NULL; +#endif + if (!inst[POW].events || !inst[POW].idx) { + pr_err("%s: no power log or log_idx " + "allocated, smem_log disabled\n", __func__); + } + inst[POW].num = SMEM_LOG_NUM_POWER_ENTRIES; + inst[POW].remote_spinlock = &remote_spinlock; + + remote_spin_lock_init(&remote_spinlock, + SMEM_SPINLOCK_SMEM_LOG); + remote_spin_lock_init(&remote_spinlock_static, + SMEM_SPINLOCK_STATIC_LOG); + + init_syms(); + + return 0; +} + +static ssize_t smem_log_read_bin(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + int idx; + int orig_idx; + unsigned long flags; + int ret; + int tot_bytes = 0; + struct smem_log_inst *inst; + + inst = fp->private_data; + + remote_spin_lock_irqsave(inst->remote_spinlock, flags); + + orig_idx = *inst->idx; + idx = orig_idx; + + while (1) { + idx--; + if (idx < 0) + idx = inst->num - 1; + if (idx == orig_idx) { + ret = tot_bytes; + break; + } + + if ((tot_bytes + sizeof(struct smem_log_item)) > count) { + ret = tot_bytes; + break; + } + + ret = copy_to_user(buf, &inst[GEN].events[idx], + sizeof(struct smem_log_item)); + if (ret) { + ret = -EIO; + break; + } + + tot_bytes += sizeof(struct smem_log_item); + + buf += sizeof(struct smem_log_item); + } + + remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); + + return ret; +} + +static ssize_t smem_log_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + char loc_buf[128]; + int i; + int idx; + int orig_idx; + unsigned long flags; + int ret; + int tot_bytes = 0; + struct smem_log_inst *inst; + + inst = fp->private_data; + + remote_spin_lock_irqsave(inst->remote_spinlock, flags); + + orig_idx = *inst->idx; + idx = orig_idx; + + while (1) { + idx--; + if (idx < 0) + idx = inst->num - 1; + if (idx == orig_idx) { + ret = tot_bytes; + break; + } + + i = scnprintf(loc_buf, 128, + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + inst->events[idx].identifier, + inst->events[idx].timetick, + inst->events[idx].data1, + inst->events[idx].data2, + inst->events[idx].data3); + if (i == 0) { + ret = -EIO; + break; + } + + if ((tot_bytes + i) > count) { + ret = tot_bytes; + break; + } + + tot_bytes += i; + + ret = copy_to_user(buf, loc_buf, i); + if (ret) { + ret = -EIO; + break; + } + + buf += i; + } + + remote_spin_unlock_irqrestore(inst->remote_spinlock, flags); + + return ret; +} + +static ssize_t smem_log_write_bin(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + if (count < sizeof(struct smem_log_item)) + return -EINVAL; + + smem_log_event_from_user(fp->private_data, buf, + sizeof(struct smem_log_item), + count / sizeof(struct smem_log_item)); + + return count; +} + +static ssize_t smem_log_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + int ret; + const char delimiters[] = " ,;"; + char locbuf[256] = {0}; + uint32_t val[10]; + int vals = 0; + char *token; + char *running; + struct smem_log_inst *inst; + unsigned long res; + + inst = fp->private_data; + + if (count < 0) { + printk(KERN_ERR "ERROR: %s passed neg count = %i\n", + __func__, count); + return -EINVAL; + } + + count = count > 255 ? 255 : count; + + locbuf[count] = '\0'; + + ret = copy_from_user(locbuf, buf, count); + if (ret != 0) { + printk(KERN_ERR "ERROR: %s could not copy %i bytes\n", + __func__, ret); + return -EINVAL; + } + + D(KERN_ERR "%s: ", __func__); + D_DUMP_BUFFER("We got", len, locbuf); + + running = locbuf; + + token = strsep(&running, delimiters); + while (token && vals < ARRAY_SIZE(val)) { + if (*token != '\0') { + D(KERN_ERR "%s: ", __func__); + D_DUMP_BUFFER("", strlen(token), token); + ret = strict_strtoul(token, 0, &res); + if (ret) { + printk(KERN_ERR "ERROR: %s:%i got bad char " + "at strict_strtoul\n", + __func__, __LINE__-4); + return -EINVAL; + } + val[vals++] = res; + } + token = strsep(&running, delimiters); + } + + if (vals > 5) { + if (inst->which_log == GEN) + smem_log_event6(val[0], val[2], val[3], val[4], + val[7], val[8], val[9]); + else if (inst->which_log == STA) + smem_log_event6_to_static(val[0], + val[2], val[3], val[4], + val[7], val[8], val[9]); + else + return -1; + } else { + if (inst->which_log == GEN) + smem_log_event(val[0], val[2], val[3], val[4]); + else if (inst->which_log == STA) + smem_log_event_to_static(val[0], + val[2], val[3], val[4]); + else + return -1; + } + + return count; +} + +static int smem_log_open(struct inode *ip, struct file *fp) +{ + fp->private_data = &inst[GEN]; + + return 0; +} + + +static int smem_log_release(struct inode *ip, struct file *fp) +{ + return 0; +} + +static long smem_log_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +static const struct file_operations smem_log_fops = { + .owner = THIS_MODULE, + .read = smem_log_read, + .write = smem_log_write, + .open = smem_log_open, + .release = smem_log_release, + .unlocked_ioctl = smem_log_ioctl, +}; + +static const struct file_operations smem_log_bin_fops = { + .owner = THIS_MODULE, + .read = smem_log_read_bin, + .write = smem_log_write_bin, + .open = smem_log_open, + .release = smem_log_release, + .unlocked_ioctl = smem_log_ioctl, +}; + +static long smem_log_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + struct smem_log_inst *inst; + + inst = fp->private_data; + + switch (cmd) { + default: + return -ENOTTY; + + case SMIOC_SETMODE: + if (arg == SMIOC_TEXT) { + D("%s set text mode\n", __func__); + fp->f_op = &smem_log_fops; + } else if (arg == SMIOC_BINARY) { + D("%s set bin mode\n", __func__); + fp->f_op = &smem_log_bin_fops; + } else { + return -EINVAL; + } + break; + case SMIOC_SETLOG: + if (arg == SMIOC_LOG) + fp->private_data = &inst[GEN]; + else if (arg == SMIOC_STATIC_LOG) + fp->private_data = &inst[STA]; + else + return -EINVAL; + break; + } + + return 0; +} + +static struct miscdevice smem_log_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "smem_log", + .fops = &smem_log_fops, +}; + +#if defined(CONFIG_DEBUG_FS) + +static int _debug_dump(int log, char *buf, int max) +{ + unsigned int idx; + int orig_idx; + unsigned long flags; + int i = 0; + + if (!inst[log].events) + return 0; + + remote_spin_lock_irqsave(inst[log].remote_spinlock, flags); + + orig_idx = *inst[log].idx; + idx = orig_idx; + + while (1) { + idx++; + if (idx > inst[log].num - 1) + idx = 0; + if (idx == orig_idx) + break; + + if (idx < inst[log].num) { + if (!inst[log].events[idx].identifier) + continue; + + i += scnprintf(buf + i, max - i, + "%08x %08x %08x %08x %08x\n", + inst[log].events[idx].identifier, + inst[log].events[idx].timetick, + inst[log].events[idx].data1, + inst[log].events[idx].data2, + inst[log].events[idx].data3); + } + } + + remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags); + + return i; +} + +static int _debug_dump_sym(int log, char *buf, int max) +{ + unsigned int idx; + int orig_idx; + unsigned long flags; + int i = 0; + + char *proc; + char *sub; + char *id; + char *sym = NULL; + + uint32_t data[3]; + + uint32_t proc_val = 0; + uint32_t sub_val = 0; + uint32_t id_val = 0; + uint32_t id_only_val = 0; + uint32_t data1 = 0; + uint32_t data2 = 0; + uint32_t data3 = 0; + + int k; + + if (!inst[log].events) + return 0; + + find_voters(); /* need to call each time in case voters come and go */ + + i += scnprintf(buf + i, max - i, "Voters:\n"); + for (k = 0; k < ARRAY_SIZE(voter_d3_syms); ++k) + if (voter_d3_syms[k].str) + i += scnprintf(buf + i, max - i, "%s ", + voter_d3_syms[k].str); + for (k = 0; k < ARRAY_SIZE(voter_d2_syms); ++k) + if (voter_d2_syms[k].str) + i += scnprintf(buf + i, max - i, "%s ", + voter_d2_syms[k].str); + i += scnprintf(buf + i, max - i, "\n"); + + remote_spin_lock_irqsave(inst[log].remote_spinlock, flags); + + orig_idx = *inst[log].idx; + idx = orig_idx; + + while (1) { + idx++; + if (idx > inst[log].num - 1) + idx = 0; + if (idx == orig_idx) { + i += scnprintf(buf + i, max - i, "\n"); + break; + } + if (idx < inst[log].num) { + if (!inst[log].events[idx].identifier) + continue; + + proc_val = PROC & inst[log].events[idx].identifier; + sub_val = SUB & inst[log].events[idx].identifier; + id_val = (SUB | ID) & inst[log].events[idx].identifier; + id_only_val = ID & inst[log].events[idx].identifier; + data1 = inst[log].events[idx].data1; + data2 = inst[log].events[idx].data2; + data3 = inst[log].events[idx].data3; + + if (!(proc_val & SMEM_LOG_CONT)) { + i += scnprintf(buf + i, max - i, "\n"); + + proc = find_sym(ID_SYM, proc_val); + + if (proc) + i += scnprintf(buf + i, max - i, + "%4s: ", + proc); + else + i += scnprintf(buf + i, max - i, + "%04x: ", + PROC & + inst[log].events[idx]. + identifier); + + i += scnprintf(buf + i, max - i, + "%10u ", + inst[log].events[idx].timetick); + + sub = find_sym(BASE_SYM, sub_val); + + if (sub) + i += scnprintf(buf + i, max - i, + "%9s: ", + sub); + else + i += scnprintf(buf + i, max - i, + "%08x: ", + sub_val); + + id = find_sym(EVENT_SYM, id_val); + + if (id) + i += scnprintf(buf + i, max - i, + "%11s: ", + id); + else + i += scnprintf(buf + i, max - i, + "%08x: ", + id_only_val); + } + + if ((proc_val & SMEM_LOG_CONT) && + (id_val == ONCRPC_LOG_EVENT_STD_CALL || + id_val == ONCRPC_LOG_EVENT_STD_REPLY)) { + data[0] = data1; + data[1] = data2; + data[2] = data3; + i += scnprintf(buf + i, max - i, + " %.16s", + (char *) data); + } else if (proc_val & SMEM_LOG_CONT) { + i += scnprintf(buf + i, max - i, + " %08x %08x %08x", + data1, + data2, + data3); + } else if (id_val == ONCRPC_LOG_EVENT_STD_CALL) { + sym = find_sym(ONCRPC_SYM, data2); + + if (sym) + i += scnprintf(buf + i, max - i, + "xid:%4i %8s proc:%3i", + data1, + sym, + data3); + else + i += scnprintf(buf + i, max - i, + "xid:%4i %08x proc:%3i", + data1, + data2, + data3); +#if defined(CONFIG_MSM_N_WAY_SMSM) + } else if (id_val == DEM_STATE_CHANGE) { + if (data1 == 1) { + i += scnprintf(buf + i, + max - i, + "MASTER: "); + sym = find_sym(DEM_STATE_MASTER_SYM, + data2); + } else if (data1 == 0) { + i += scnprintf(buf + i, + max - i, + " SLAVE: "); + sym = find_sym(DEM_STATE_SLAVE_SYM, + data2); + } else { + i += scnprintf(buf + i, + max - i, + "%x: ", + data1); + sym = NULL; + } + if (sym) + i += scnprintf(buf + i, + max - i, + "from:%s ", + sym); + else + i += scnprintf(buf + i, + max - i, + "from:0x%x ", + data2); + + if (data1 == 1) + sym = find_sym(DEM_STATE_MASTER_SYM, + data3); + else if (data1 == 0) + sym = find_sym(DEM_STATE_SLAVE_SYM, + data3); + else + sym = NULL; + if (sym) + i += scnprintf(buf + i, + max - i, + "to:%s ", + sym); + else + i += scnprintf(buf + i, + max - i, + "to:0x%x ", + data3); + + } else if (id_val == DEM_STATE_MACHINE_ENTER) { + i += scnprintf(buf + i, + max - i, + "swfi:%i timer:%i manexit:%i", + data1, data2, data3); + + } else if (id_val == DEM_TIME_SYNC_REQUEST || + id_val == DEM_TIME_SYNC_POLL || + id_val == DEM_TIME_SYNC_INIT) { + sym = find_sym(SMSM_ENTRY_TYPE_SYM, + data1); + if (sym) + i += scnprintf(buf + i, + max - i, + "hostid:%s", + sym); + else + i += scnprintf(buf + i, + max - i, + "hostid:%x", + data1); + + } else if (id_val == DEM_TIME_SYNC_START || + id_val == DEM_TIME_SYNC_SEND_VALUE) { + unsigned mask = 0x1; + unsigned tmp = 0; + if (id_val == DEM_TIME_SYNC_START) + i += scnprintf(buf + i, + max - i, + "req:"); + else + i += scnprintf(buf + i, + max - i, + "pol:"); + while (mask) { + if (mask & data1) { + sym = find_sym( + SMSM_ENTRY_TYPE_SYM, + tmp); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s ", + sym); + else + i += scnprintf(buf + i, + max - i, + "%i ", + tmp); + } + mask <<= 1; + tmp++; + } + if (id_val == DEM_TIME_SYNC_SEND_VALUE) + i += scnprintf(buf + i, + max - i, + "tick:%x", + data2); + } else if (id_val == DEM_SMSM_ISR) { + unsigned vals[] = {data2, data3}; + unsigned j; + unsigned mask; + unsigned tmp; + unsigned once; + sym = find_sym(SMSM_ENTRY_TYPE_SYM, + data1); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s ", + sym); + else + i += scnprintf(buf + i, + max - i, + "%x ", + data1); + + for (j = 0; j < ARRAY_SIZE(vals); ++j) { + i += scnprintf(buf + i, max - i, "["); + mask = 0x80000000; + once = 0; + while (mask) { + tmp = vals[j] & mask; + mask >>= 1; + if (!tmp) + continue; + sym = find_sym(SMSM_STATE_SYM, + tmp); + + if (once) + i += scnprintf(buf + i, + max - i, + " "); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s", + sym); + else + i += scnprintf(buf + i, + max - i, + "0x%x", + tmp); + once = 1; + } + i += scnprintf(buf + i, max - i, "] "); + } +#else + } else if (id_val == DEMAPPS_WAKEUP_REASON) { + unsigned mask = 0x80000000; + unsigned tmp = 0; + while (mask) { + tmp = data1 & mask; + mask >>= 1; + if (!tmp) + continue; + sym = find_sym(WAKEUP_SYM, tmp); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s ", + sym); + else + i += scnprintf(buf + i, + max - i, + "%08x ", + tmp); + } + i += scnprintf(buf + i, max - i, + "%08x %08x", + data2, + data3); + } else if (id_val == DEMMOD_APPS_WAKEUP_INT) { + sym = find_sym(WAKEUP_INT_SYM, data1); + + if (sym) + i += scnprintf(buf + i, max - i, + "%s %08x %08x", + sym, + data2, + data3); + else + i += scnprintf(buf + i, max - i, + "%08x %08x %08x", + data1, + data2, + data3); + } else if (id_val == DEM_NO_SLEEP || + id_val == NO_SLEEP_NEW) { + unsigned vals[] = {data3, data2}; + unsigned j; + unsigned mask; + unsigned tmp; + unsigned once; + i += scnprintf(buf + i, max - i, "%08x ", + data1); + i += scnprintf(buf + i, max - i, "["); + once = 0; + for (j = 0; j < ARRAY_SIZE(vals); ++j) { + mask = 0x00000001; + while (mask) { + tmp = vals[j] & mask; + mask <<= 1; + if (!tmp) + continue; + if (j == 0) + sym = find_sym( + VOTER_D3_SYM, + tmp); + else + sym = find_sym( + VOTER_D2_SYM, + tmp); + + if (once) + i += scnprintf(buf + i, + max - i, + " "); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s", + sym); + else + i += scnprintf(buf + i, + max - i, + "%08x", + tmp); + once = 1; + } + } + i += scnprintf(buf + i, max - i, "] "); +#endif + } else if (id_val == SMEM_LOG_EVENT_CB) { + unsigned vals[] = {data2, data3}; + unsigned j; + unsigned mask; + unsigned tmp; + unsigned once; + i += scnprintf(buf + i, max - i, "%08x ", + data1); + for (j = 0; j < ARRAY_SIZE(vals); ++j) { + i += scnprintf(buf + i, max - i, "["); + mask = 0x80000000; + once = 0; + while (mask) { + tmp = vals[j] & mask; + mask >>= 1; + if (!tmp) + continue; + sym = find_sym(SMSM_SYM, tmp); + + if (once) + i += scnprintf(buf + i, + max - i, + " "); + if (sym) + i += scnprintf(buf + i, + max - i, + "%s", + sym); + else + i += scnprintf(buf + i, + max - i, + "%08x", + tmp); + once = 1; + } + i += scnprintf(buf + i, max - i, "] "); + } + } else { + i += scnprintf(buf + i, max - i, + "%08x %08x %08x", + data1, + data2, + data3); + } + } + } + + remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags); + + return i; +} + +static int debug_dump(char *buf, int max) +{ + return _debug_dump(GEN, buf, max); +} + +static int debug_dump_sym(char *buf, int max) +{ + return _debug_dump_sym(GEN, buf, max); +} + +static int debug_dump_static(char *buf, int max) +{ + return _debug_dump(STA, buf, max); +} + +static int debug_dump_static_sym(char *buf, int max) +{ + return _debug_dump_sym(STA, buf, max); +} + +static int debug_dump_power(char *buf, int max) +{ + return _debug_dump(POW, buf, max); +} + +static int debug_dump_power_sym(char *buf, int max) +{ + return _debug_dump_sym(POW, buf, max); +} + +#define SMEM_LOG_ITEM_PRINT_SIZE 160 + +#define EVENTS_PRINT_SIZE \ +(SMEM_LOG_ITEM_PRINT_SIZE * SMEM_LOG_NUM_ENTRIES) + +static char debug_buffer[EVENTS_PRINT_SIZE]; + +static ssize_t debug_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int (*fill)(char *buf, int max) = file->private_data; + int bsize = fill(debug_buffer, EVENTS_PRINT_SIZE); + return simple_read_from_buffer(buf, count, ppos, debug_buffer, + bsize); +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations debug_ops = { + .read = debug_read, + .open = debug_open, +}; + +static void debug_create(const char *name, mode_t mode, + struct dentry *dent, + int (*fill)(char *buf, int max)) +{ + debugfs_create_file(name, mode, dent, fill, &debug_ops); +} + +static void smem_log_debugfs_init(void) +{ + struct dentry *dent; + + dent = debugfs_create_dir("smem_log", 0); + if (IS_ERR(dent)) + return; + + debug_create("dump", 0444, dent, debug_dump); + debug_create("dump_sym", 0444, dent, debug_dump_sym); + debug_create("dump_static", 0444, dent, debug_dump_static); + debug_create("dump_static_sym", 0444, dent, debug_dump_static_sym); + debug_create("dump_power", 0444, dent, debug_dump_power); + debug_create("dump_power_sym", 0444, dent, debug_dump_power_sym); +} +#else +static void smem_log_debugfs_init(void) {} +#endif + +static int __init smem_log_init(void) +{ + int ret; + + ret = _smem_log_init(); + if (ret < 0) + return ret; + + smem_log_debugfs_init(); + + return misc_register(&smem_log_dev); +} + + +module_init(smem_log_init); + +MODULE_DESCRIPTION("smem log"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c new file mode 100644 index 0000000000000..c4bc76c09968a --- /dev/null +++ b/arch/arm/mach-msm/socinfo.c @@ -0,0 +1,763 @@ +/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +/* + * SOC Info Routines + * + */ + +#include +#include +#include +#include + +#include "smd_private.h" + +#define BUILD_ID_LENGTH 32 + +enum { + HW_PLATFORM_UNKNOWN = 0, + HW_PLATFORM_SURF = 1, + HW_PLATFORM_FFA = 2, + HW_PLATFORM_FLUID = 3, + HW_PLATFORM_SVLTE_FFA = 4, + HW_PLATFORM_SVLTE_SURF = 5, + HW_PLATFORM_LIQUID = 9, + /* Dragonboard platform id is assigned as 10 in CDT */ + HW_PLATFORM_DRAGON = 10, + HW_PLATFORM_INVALID +}; + +const char *hw_platform[] = { + [HW_PLATFORM_UNKNOWN] = "Unknown", + [HW_PLATFORM_SURF] = "Surf", + [HW_PLATFORM_FFA] = "FFA", + [HW_PLATFORM_FLUID] = "Fluid", + [HW_PLATFORM_LIQUID] = "Liquid", + [HW_PLATFORM_SVLTE_FFA] = "SVLTE_FFA", + [HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF", + [HW_PLATFORM_DRAGON] = "Dragon" +}; + +enum { + ACCESSORY_CHIP_UNKNOWN = 0, + ACCESSORY_CHIP_CHARM = 58, +}; + +enum { + PLATFORM_SUBTYPE_UNKNOWN = 0x0, + PLATFORM_SUBTYPE_CHARM = 0x1, + PLATFORM_SUBTYPE_STRANGE = 0x2, + PLATFORM_SUBTYPE_STRANGE_2A = 0x3, + PLATFORM_SUBTYPE_INVALID, +}; + +const char *hw_platform_subtype[] = { + [PLATFORM_SUBTYPE_UNKNOWN] = "Unknown", + [PLATFORM_SUBTYPE_CHARM] = "charm", + [PLATFORM_SUBTYPE_STRANGE] = "strange", + [PLATFORM_SUBTYPE_STRANGE_2A] = "strange_2a," +}; + +/* Used to parse shared memory. Must match the modem. */ +struct socinfo_v1 { + uint32_t format; + uint32_t id; + uint32_t version; + char build_id[BUILD_ID_LENGTH]; +}; + +struct socinfo_v2 { + struct socinfo_v1 v1; + + /* only valid when format==2 */ + uint32_t raw_id; + uint32_t raw_version; +}; + +struct socinfo_v3 { + struct socinfo_v2 v2; + + /* only valid when format==3 */ + uint32_t hw_platform; +}; + +struct socinfo_v4 { + struct socinfo_v3 v3; + + /* only valid when format==4 */ + uint32_t platform_version; +}; + +struct socinfo_v5 { + struct socinfo_v4 v4; + + /* only valid when format==5 */ + uint32_t accessory_chip; +}; + +struct socinfo_v6 { + struct socinfo_v5 v5; + + /* only valid when format==6 */ + uint32_t hw_platform_subtype; +}; + +static union { + struct socinfo_v1 v1; + struct socinfo_v2 v2; + struct socinfo_v3 v3; + struct socinfo_v4 v4; + struct socinfo_v5 v5; + struct socinfo_v6 v6; +} *socinfo; + +static enum msm_cpu cpu_of_id[] = { + + /* 7x01 IDs */ + [1] = MSM_CPU_7X01, + [16] = MSM_CPU_7X01, + [17] = MSM_CPU_7X01, + [18] = MSM_CPU_7X01, + [19] = MSM_CPU_7X01, + [23] = MSM_CPU_7X01, + [25] = MSM_CPU_7X01, + [26] = MSM_CPU_7X01, + [32] = MSM_CPU_7X01, + [33] = MSM_CPU_7X01, + [34] = MSM_CPU_7X01, + [35] = MSM_CPU_7X01, + + /* 7x25 IDs */ + [20] = MSM_CPU_7X25, + [21] = MSM_CPU_7X25, /* 7225 */ + [24] = MSM_CPU_7X25, /* 7525 */ + [27] = MSM_CPU_7X25, /* 7625 */ + [39] = MSM_CPU_7X25, + [40] = MSM_CPU_7X25, + [41] = MSM_CPU_7X25, + [42] = MSM_CPU_7X25, + [62] = MSM_CPU_7X25, /* 7625-1 */ + [63] = MSM_CPU_7X25, /* 7225-1 */ + [66] = MSM_CPU_7X25, /* 7225-2 */ + + + /* 7x27 IDs */ + [43] = MSM_CPU_7X27, + [44] = MSM_CPU_7X27, + [61] = MSM_CPU_7X27, + [67] = MSM_CPU_7X27, /* 7227-1 */ + [68] = MSM_CPU_7X27, /* 7627-1 */ + [69] = MSM_CPU_7X27, /* 7627-2 */ + + + /* 8x50 IDs */ + [30] = MSM_CPU_8X50, + [36] = MSM_CPU_8X50, + [37] = MSM_CPU_8X50, + [38] = MSM_CPU_8X50, + + /* 7x30 IDs */ + [59] = MSM_CPU_7X30, + [60] = MSM_CPU_7X30, + + /* 8x55 IDs */ + [74] = MSM_CPU_8X55, + [75] = MSM_CPU_8X55, + [85] = MSM_CPU_8X55, + + /* 8x60 IDs */ + [70] = MSM_CPU_8X60, + [71] = MSM_CPU_8X60, + [86] = MSM_CPU_8X60, + + /* 8960 IDs */ + [87] = MSM_CPU_8960, + + /* 7x25A IDs */ + [88] = MSM_CPU_7X25A, + [89] = MSM_CPU_7X25A, + [96] = MSM_CPU_7X25A, + + /* 7x27A IDs */ + [90] = MSM_CPU_7X27A, + [91] = MSM_CPU_7X27A, + [92] = MSM_CPU_7X27A, + [97] = MSM_CPU_7X27A, + + /* FSM9xxx ID */ + [94] = FSM_CPU_9XXX, + [95] = FSM_CPU_9XXX, + + /* 7x25AA ID */ + [98] = MSM_CPU_7X25AA, + [99] = MSM_CPU_7X25AA, + [100] = MSM_CPU_7X25AA, + + /* 7x27AA ID */ + [101] = MSM_CPU_7X27AA, + [102] = MSM_CPU_7X27AA, + [103] = MSM_CPU_7X27AA, + + /* 9x15 ID */ + [104] = MSM_CPU_9615, + [105] = MSM_CPU_9615, + + /* 8064 IDs */ + [109] = MSM_CPU_8064, + + /* 8930 IDs */ + [116] = MSM_CPU_8930, + + /* 8660A ID */ + [122] = MSM_CPU_8960, + + /* 8260A ID */ + [123] = MSM_CPU_8960, + + /* 8060A ID */ + [124] = MSM_CPU_8960, + + /* Copper IDs */ + [126] = MSM_CPU_COPPER, + + /* Uninitialized IDs are not known to run Linux. + MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are + considered as unknown CPU. */ +}; + +static enum msm_cpu cur_cpu; + +static struct socinfo_v1 dummy_socinfo = { + .format = 1, + .version = 1, + .build_id = "Dummy socinfo placeholder" +}; + +uint32_t socinfo_get_id(void) +{ + return (socinfo) ? socinfo->v1.id : 0; +} +EXPORT_SYMBOL_GPL(socinfo_get_id); + +uint32_t socinfo_get_version(void) +{ + return (socinfo) ? socinfo->v1.version : 0; +} + +char *socinfo_get_build_id(void) +{ + return (socinfo) ? socinfo->v1.build_id : NULL; +} + +uint32_t socinfo_get_raw_id(void) +{ + return socinfo ? + (socinfo->v1.format >= 2 ? socinfo->v2.raw_id : 0) + : 0; +} + +uint32_t socinfo_get_raw_version(void) +{ + return socinfo ? + (socinfo->v1.format >= 2 ? socinfo->v2.raw_version : 0) + : 0; +} + +uint32_t socinfo_get_platform_type(void) +{ + return socinfo ? + (socinfo->v1.format >= 3 ? socinfo->v3.hw_platform : 0) + : 0; +} + + +uint32_t socinfo_get_platform_version(void) +{ + return socinfo ? + (socinfo->v1.format >= 4 ? socinfo->v4.platform_version : 0) + : 0; +} + +/* This information is directly encoded by the machine id */ +/* Thus no external callers rely on this information at the moment */ +static uint32_t socinfo_get_accessory_chip(void) +{ + return socinfo ? + (socinfo->v1.format >= 5 ? socinfo->v5.accessory_chip : 0) + : 0; +} + +uint32_t socinfo_get_platform_subtype(void) +{ + return socinfo ? + (socinfo->v1.format >= 6 ? socinfo->v6.hw_platform_subtype : 0) + : 0; +} + +enum msm_cpu socinfo_get_msm_cpu(void) +{ + return cur_cpu; +} +EXPORT_SYMBOL_GPL(socinfo_get_msm_cpu); + +static ssize_t +socinfo_show_id(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_id()); +} + +static ssize_t +socinfo_show_version(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + uint32_t version; + + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + + version = socinfo_get_version(); + return snprintf(buf, PAGE_SIZE, "%u.%u\n", + SOCINFO_VERSION_MAJOR(version), + SOCINFO_VERSION_MINOR(version)); +} + +static ssize_t +socinfo_show_build_id(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%-.32s\n", socinfo_get_build_id()); +} + +static ssize_t +socinfo_show_raw_id(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 2) { + pr_err("%s: Raw ID not available!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_raw_id()); +} + +static ssize_t +socinfo_show_raw_version(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 2) { + pr_err("%s: Raw version not available!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_raw_version()); +} + +static ssize_t +socinfo_show_platform_type(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + uint32_t hw_type; + + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 3) { + pr_err("%s: platform type not available!\n", __func__); + return 0; + } + + hw_type = socinfo_get_platform_type(); + if (hw_type >= HW_PLATFORM_INVALID) { + pr_err("%s: Invalid hardware platform type found\n", + __func__); + hw_type = HW_PLATFORM_UNKNOWN; + } + + return snprintf(buf, PAGE_SIZE, "%-.32s\n", hw_platform[hw_type]); +} + +static ssize_t +socinfo_show_platform_version(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 4) { + pr_err("%s: platform version not available!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", + socinfo_get_platform_version()); +} + +static ssize_t +socinfo_show_accessory_chip(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 5) { + pr_err("%s: accessory chip not available!\n", __func__); + return 0; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", + socinfo_get_accessory_chip()); +} + +static ssize_t +socinfo_show_platform_subtype(struct sys_device *dev, + struct sysdev_attribute *attr, + char *buf) +{ + uint32_t hw_subtype; + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return 0; + } + if (socinfo->v1.format < 6) { + pr_err("%s: platform subtype not available!\n", __func__); + return 0; + } + + hw_subtype = socinfo_get_platform_subtype(); + if (hw_subtype >= PLATFORM_SUBTYPE_INVALID) { + pr_err("%s: Invalid hardware platform sub type found\n", + __func__); + hw_subtype = PLATFORM_SUBTYPE_UNKNOWN; + } + return snprintf(buf, PAGE_SIZE, "%-.32s\n", + hw_platform_subtype[hw_subtype]); +} + +static struct sysdev_attribute socinfo_v1_files[] = { + _SYSDEV_ATTR(id, 0444, socinfo_show_id, NULL), + _SYSDEV_ATTR(version, 0444, socinfo_show_version, NULL), + _SYSDEV_ATTR(build_id, 0444, socinfo_show_build_id, NULL), +}; + +static struct sysdev_attribute socinfo_v2_files[] = { + _SYSDEV_ATTR(raw_id, 0444, socinfo_show_raw_id, NULL), + _SYSDEV_ATTR(raw_version, 0444, socinfo_show_raw_version, NULL), +}; + +static struct sysdev_attribute socinfo_v3_files[] = { + _SYSDEV_ATTR(hw_platform, 0444, socinfo_show_platform_type, NULL), +}; + +static struct sysdev_attribute socinfo_v4_files[] = { + _SYSDEV_ATTR(platform_version, 0444, + socinfo_show_platform_version, NULL), +}; + +static struct sysdev_attribute socinfo_v5_files[] = { + _SYSDEV_ATTR(accessory_chip, 0444, + socinfo_show_accessory_chip, NULL), +}; + +static struct sysdev_attribute socinfo_v6_files[] = { + _SYSDEV_ATTR(platform_subtype, 0444, + socinfo_show_platform_subtype, NULL), +}; + +static struct sysdev_class soc_sysdev_class = { + .name = "soc", +}; + +static struct sys_device soc_sys_device = { + .id = 0, + .cls = &soc_sysdev_class, +}; + +static int __init socinfo_create_files(struct sys_device *dev, + struct sysdev_attribute files[], + int size) +{ + int i; + for (i = 0; i < size; i++) { + int err = sysdev_create_file(dev, &files[i]); + if (err) { + pr_err("%s: sysdev_create_file(%s)=%d\n", + __func__, files[i].attr.name, err); + return err; + } + } + return 0; +} + +static int __init socinfo_init_sysdev(void) +{ + int err; + + if (!socinfo) { + pr_err("%s: No socinfo found!\n", __func__); + return -ENODEV; + } + + err = sysdev_class_register(&soc_sysdev_class); + if (err) { + pr_err("%s: sysdev_class_register fail (%d)\n", + __func__, err); + return err; + } + err = sysdev_register(&soc_sys_device); + if (err) { + pr_err("%s: sysdev_register fail (%d)\n", + __func__, err); + return err; + } + socinfo_create_files(&soc_sys_device, socinfo_v1_files, + ARRAY_SIZE(socinfo_v1_files)); + if (socinfo->v1.format < 2) + return err; + socinfo_create_files(&soc_sys_device, socinfo_v2_files, + ARRAY_SIZE(socinfo_v2_files)); + + if (socinfo->v1.format < 3) + return err; + + socinfo_create_files(&soc_sys_device, socinfo_v3_files, + ARRAY_SIZE(socinfo_v3_files)); + + if (socinfo->v1.format < 4) + return err; + + socinfo_create_files(&soc_sys_device, socinfo_v4_files, + ARRAY_SIZE(socinfo_v4_files)); + + if (socinfo->v1.format < 5) + return err; + + socinfo_create_files(&soc_sys_device, socinfo_v5_files, + ARRAY_SIZE(socinfo_v5_files)); + + if (socinfo->v1.format < 6) + return err; + + return socinfo_create_files(&soc_sys_device, socinfo_v6_files, + ARRAY_SIZE(socinfo_v6_files)); + +} + +arch_initcall(socinfo_init_sysdev); + +void *setup_dummy_socinfo(void) +{ + if (machine_is_msm8960_rumi3() || machine_is_msm8960_sim() || + machine_is_msm8960_cdp()) + dummy_socinfo.id = 87; + else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim()) + dummy_socinfo.id = 109; + else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp()) + dummy_socinfo.id = 104; + else if (early_machine_is_copper()) + dummy_socinfo.id = 126; + return (void *) &dummy_socinfo; +} + +int __init socinfo_init(void) +{ + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, sizeof(struct socinfo_v6)); + + if (!socinfo) + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, + sizeof(struct socinfo_v5)); + + if (!socinfo) + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, + sizeof(struct socinfo_v4)); + + if (!socinfo) + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, + sizeof(struct socinfo_v3)); + + if (!socinfo) + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, + sizeof(struct socinfo_v2)); + + if (!socinfo) + socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, + sizeof(struct socinfo_v1)); + + if (!socinfo) { + pr_warn("%s: Can't find SMEM_HW_SW_BUILD_ID; falling back on " + "dummy values.\n", __func__); + socinfo = setup_dummy_socinfo(); + } + + WARN(!socinfo_get_id(), "Unknown SOC ID!\n"); + WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id), + "New IDs added! ID => CPU mapping might need an update.\n"); + + if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id)) + cur_cpu = cpu_of_id[socinfo->v1.id]; + + switch (socinfo->v1.format) { + case 1: + pr_info("%s: v%u, id=%u, ver=%u.%u\n", + __func__, socinfo->v1.format, socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version)); + break; + case 2: + pr_info("%s: v%u, id=%u, ver=%u.%u, " + "raw_id=%u, raw_ver=%u\n", + __func__, socinfo->v1.format, socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version), + socinfo->v2.raw_id, socinfo->v2.raw_version); + break; + case 3: + pr_info("%s: v%u, id=%u, ver=%u.%u, " + "raw_id=%u, raw_ver=%u, hw_plat=%u\n", + __func__, socinfo->v1.format, socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version), + socinfo->v2.raw_id, socinfo->v2.raw_version, + socinfo->v3.hw_platform); + break; + case 4: + pr_info("%s: v%u, id=%u, ver=%u.%u, " + "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n", + __func__, socinfo->v1.format, socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version), + socinfo->v2.raw_id, socinfo->v2.raw_version, + socinfo->v3.hw_platform, socinfo->v4.platform_version); + break; + case 5: + pr_info("%s: v%u, id=%u, ver=%u.%u, " + "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n" + " accessory_chip=%u\n", __func__, socinfo->v1.format, + socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version), + socinfo->v2.raw_id, socinfo->v2.raw_version, + socinfo->v3.hw_platform, socinfo->v4.platform_version, + socinfo->v5.accessory_chip); + break; + case 6: + pr_info("%s: v%u, id=%u, ver=%u.%u, " + "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n" + " accessory_chip=%u hw_plat_subtype=%u\n", __func__, + socinfo->v1.format, + socinfo->v1.id, + SOCINFO_VERSION_MAJOR(socinfo->v1.version), + SOCINFO_VERSION_MINOR(socinfo->v1.version), + socinfo->v2.raw_id, socinfo->v2.raw_version, + socinfo->v3.hw_platform, socinfo->v4.platform_version, + socinfo->v5.accessory_chip, + socinfo->v6.hw_platform_subtype); + break; + default: + pr_err("%s: Unknown format found\n", __func__); + break; + } + + return 0; +} + +const int get_core_count(void) +{ + if (!(read_cpuid_mpidr() & BIT(31))) + return 1; + + if (read_cpuid_mpidr() & BIT(30) && + !machine_is_msm8960_sim() && + !machine_is_apq8064_sim()) + return 1; + + /* 1 + the PART[1:0] field of MIDR */ + return ((read_cpuid_id() >> 4) & 3) + 1; +} + +const int read_msm_cpu_type(void) +{ + if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3()) + return MSM_CPU_8960; + + if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN) + return socinfo_get_msm_cpu(); + + switch (read_cpuid_id()) { + case 0x510F02D0: + case 0x510F02D2: + case 0x510F02D4: + return MSM_CPU_8X60; + + case 0x510F04D0: + case 0x510F04D1: + case 0x510F04D2: + case 0x511F04D0: + case 0x512F04D0: + return MSM_CPU_8960; + + case 0x51404D11: /* We can't get here unless we are in bringup */ + return MSM_CPU_8930; + + case 0x510F06F0: + return MSM_CPU_8064; + + default: + return MSM_CPU_UNKNOWN; + }; +} + +const int cpu_is_krait_v1(void) +{ + switch (read_cpuid_id()) { + case 0x510F04D0: + case 0x510F04D1: + case 0x510F04D2: + return 1; + + default: + return 0; + }; +} diff --git a/arch/arm/mach-msm/ssbi.c b/arch/arm/mach-msm/ssbi.c new file mode 100644 index 0000000000000..b9cefa29ba02e --- /dev/null +++ b/arch/arm/mach-msm/ssbi.c @@ -0,0 +1,304 @@ +/* arch/arm/mach-msm/ssbi.c + * + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * Copyright (c) 2010, Google Inc. + * + * Original authors: Code Aurura Forum + * + * Author: Dima Zavin + * - Largely rewritten from original to not be an i2c driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* SSBI 2.0 controller registers */ +#define SSBI2_CTL 0x0000 +#define SSBI2_RESET 0x0004 +#define SSBI2_CMD 0x0008 +#define SSBI2_RD 0x0010 +#define SSBI2_STATUS 0x0014 +#define SSBI2_PRIORITIES 0x0018 +#define SSBI2_MODE2 0x001C + +/* SSBI_CMD fields */ +#define SSBI_CMD_SEND_TERM_SYM (1 << 27) +#define SSBI_CMD_WAKEUP_SLAVE (1 << 26) +#define SSBI_CMD_USE_ENABLE (1 << 25) +#define SSBI_CMD_RDWRN (1 << 24) + +/* SSBI_STATUS fields */ +#define SSBI_STATUS_DATA_IN (1 << 4) +#define SSBI_STATUS_RD_CLOBBERED (1 << 3) +#define SSBI_STATUS_RD_READY (1 << 2) +#define SSBI_STATUS_READY (1 << 1) +#define SSBI_STATUS_MCHN_BUSY (1 << 0) + +/* SSBI_RD fields */ +#define SSBI_RD_USE_ENABLE (1 << 25) +#define SSBI_RD_RDWRN (1 << 24) + +/* SSBI_MODE2 fields */ +#define SSBI_MODE2_SSBI2_MODE (1 << 0) + +#define SSBI_TIMEOUT_US 100 + +struct msm_ssbi { + struct device *dev; + struct device *slave; + void __iomem *base; + remote_spinlock_t rspin_lock; +}; + +#define to_msm_ssbi(dev) platform_get_drvdata(to_platform_device(dev)) + +static inline u32 ssbi_readl(struct msm_ssbi *ssbi, unsigned long reg) +{ + return readl(ssbi->base + reg); +} + +static inline void ssbi_writel(struct msm_ssbi *ssbi, u32 val, + unsigned long reg) +{ + writel(val, ssbi->base + reg); +} + +//poll_for_device_ready === SSBI_STATUS_READY +//poll_for_transfer_completed === SSBI_STATUS_MCHN_BUSY +//poll_for_read_completed === SSBI_STATUS_RD_READY +static int ssbi_wait_mask(struct msm_ssbi *ssbi, u32 set_mask, u32 clr_mask) +{ + u32 timeout = SSBI_TIMEOUT_US; + u32 val; + + while (timeout--) { + val = ssbi_readl(ssbi, SSBI2_STATUS); + if (((val & set_mask) == set_mask) && ((val & clr_mask) == 0)) + return 0; + udelay(1); + } + + dev_err(ssbi->dev, "%s: timeout (status %x set_mask %x clr_mask %x)\n", + __func__, ssbi_readl(ssbi, SSBI2_STATUS), set_mask, clr_mask); + return -ETIMEDOUT; +} + +int msm_ssbi_read(struct device *dev, u16 addr, u8 *buf, int len) +{ + struct msm_ssbi *ssbi = to_msm_ssbi(dev); + unsigned long flags; + u32 read_cmd = SSBI_CMD_RDWRN | ((addr & 0xff) << 16); + u32 mode2; + int ret = 0; + + BUG_ON(ssbi->dev != dev); + + remote_spin_lock_irqsave(&ssbi->rspin_lock, flags); + + mode2 = ssbi_readl(ssbi, SSBI2_MODE2); + if (mode2 & SSBI_MODE2_SSBI2_MODE) { + mode2 = (mode2 & 0xf) | (((addr >> 8) & 0x7f) << 4); + ssbi_writel(ssbi, mode2, SSBI2_MODE2); + } + + while (len) { + ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0); + if (ret) + goto err; + + ssbi_writel(ssbi, read_cmd, SSBI2_CMD); + ret = ssbi_wait_mask(ssbi, SSBI_STATUS_RD_READY, 0); + if (ret) + goto err; + *buf++ = ssbi_readl(ssbi, SSBI2_RD) & 0xff; + len--; + } + +err: + remote_spin_unlock_irqrestore(&ssbi->rspin_lock, flags); + return ret; +} +EXPORT_SYMBOL(msm_ssbi_read); + +int msm_ssbi_write(struct device *dev, u16 addr, u8 *buf, int len) +{ + struct msm_ssbi *ssbi = to_msm_ssbi(dev); + unsigned long flags; + u32 mode2; + int ret = 0; + + BUG_ON(ssbi->dev != dev); + + remote_spin_lock_irqsave(&ssbi->rspin_lock, flags); + + mode2 = readl(ssbi->base + SSBI2_MODE2); + if (mode2 & SSBI_MODE2_SSBI2_MODE) { + mode2 = (mode2 & 0xf) | (((addr >> 8) & 0x7f) << 4); + ssbi_writel(ssbi, mode2, SSBI2_MODE2); + } + + while (len) { + ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0); + if (ret) + goto err; + + ssbi_writel(ssbi, ((addr & 0xff) << 16) | *buf, SSBI2_CMD); + ret = ssbi_wait_mask(ssbi, 0, SSBI_STATUS_MCHN_BUSY); + if (ret) + goto err; + buf++; + len--; + } + +err: + remote_spin_unlock_irqrestore(&ssbi->rspin_lock, flags); + return ret; +} +EXPORT_SYMBOL(msm_ssbi_write); + +static int __init msm_ssbi_add_slave(struct msm_ssbi *ssbi, + struct msm_ssbi_slave_info *slave) +{ + struct platform_device *slave_pdev; + struct resource slave_irq_res; + int ret; + + if (ssbi->slave) { + pr_err("%s: slave already attached??\n", __func__); + return -EBUSY; + } + + slave_pdev = platform_device_alloc(slave->name, -1); + if (!slave_pdev) { + pr_err("%s: cannot allocate pdev for slave '%s'", __func__, + slave->name); + ret = -ENOMEM; + goto err; + } + + slave_pdev->dev.parent = ssbi->dev; + slave_pdev->dev.platform_data = slave->platform_data; + + memset(&slave_irq_res, 0, sizeof(struct resource)); + slave_irq_res.start = slave->irq; + slave_irq_res.end = slave->irq; + slave_irq_res.flags = IORESOURCE_IRQ; + ret = platform_device_add_resources(slave_pdev, &slave_irq_res, 1); + if (ret) { + pr_err("%s: can't add irq resource for '%s'\n", __func__, + slave->name); + goto err; + } + + ret = platform_device_add(slave_pdev); + if (ret) { + pr_err("%s: cannot add slave platform device for '%s'\n", + __func__, slave->name); + goto err; + } + + ssbi->slave = &slave_pdev->dev; + return 0; + +err: + if (slave_pdev) + platform_device_put(slave_pdev); + return ret; +} + +static int __init msm_ssbi_probe(struct platform_device *pdev) +{ + struct msm_ssbi_platform_data *pdata = pdev->dev.platform_data; + struct resource *mem_res; + struct msm_ssbi *ssbi; + int ret = 0; + + if (!pdata) { + pr_err("%s: missing platform data\n", __func__); + return -EINVAL; + } + + ssbi = kzalloc(sizeof(struct msm_ssbi), GFP_KERNEL); + if (!ssbi) { + pr_err("%s: cannot allocate ssbi_data\n", __func__); + return -ENOMEM; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + pr_err("%s: missing mem resource\n", __func__); + ret = -EINVAL; + goto err_get_mem_res; + } + + ssbi->base = ioremap(mem_res->start, resource_size(mem_res)); + if (!ssbi->base) { + pr_err("%s: ioremap of 0x%p failed\n", __func__, + (void *)mem_res->start); + ret = -EINVAL; + goto err_ioremap; + } + ssbi->dev = &pdev->dev; + platform_set_drvdata(pdev, ssbi); + + ret = remote_spin_lock_init(&ssbi->rspin_lock, pdata->rspinlock_name); + if (ret) { + pr_err("%s: cannot init remote spinlock '%s'\n", __func__, + pdata->rspinlock_name); + goto err_remote_spinlock_init; + } + + ret = msm_ssbi_add_slave(ssbi, &pdata->slave); + if (ret) + goto err_ssbi_add_slave; + + pr_info("msm_ssbi: io=%08x rsl='%s'\n", mem_res->start, + pdata->rspinlock_name); + + return 0; + +err_remote_spinlock_init: + platform_set_drvdata(pdev, NULL); +err_ssbi_add_slave: + iounmap(ssbi->base); +err_ioremap: +err_get_mem_res: + kfree(ssbi); + return ret; +} + +static struct platform_driver msm_ssbi_driver = { + .probe = msm_ssbi_probe, + .driver = { + .name = "msm_ssbi", + .owner = THIS_MODULE, + }, +}; + +static int __init msm_ssbi_init(void) +{ + return platform_driver_register(&msm_ssbi_driver); +} + +postcore_initcall(msm_ssbi_init); diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index c105d28b53e38..ea8bcb2197a84 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -25,6 +25,16 @@ #include #include +#include "smd_private.h" + +enum { + MSM_TIMER_DEBUG_SYNC_STATE = 1U << 0, + MSM_TIMER_DEBUG_SYNC_UPDATE = 1U << 1, + MSM_TIMER_DEBUG_SYNC = 1U << 2, +}; +static int msm_timer_debug_mask; +module_param_named(debug_mask, msm_timer_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + #ifndef MSM_DGT_BASE #define MSM_DGT_BASE (MSM_GPT_BASE + 0x10) #endif @@ -47,19 +57,6 @@ enum { #define GPT_HZ 32768 -enum timer_location { - LOCAL_TIMER = 0, - GLOBAL_TIMER = 1, -}; - -#ifdef MSM_TMR0_BASE -#define MSM_TMR_GLOBAL (MSM_TMR0_BASE - MSM_TMR_BASE) -#else -#define MSM_TMR_GLOBAL 0 -#endif - -#define MSM_GLOBAL_TIMER MSM_CLOCK_DGT - #if defined(CONFIG_ARCH_QSD8X50) #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ #define MSM_DGT_SHIFT (0) @@ -71,6 +68,12 @@ enum timer_location { #define MSM_DGT_SHIFT (5) #endif +enum { + MSM_CLOCK_FLAGS_UNSTABLE_COUNT = 1U << 0, + MSM_CLOCK_FLAGS_ODD_MATCH_WRITE = 1U << 1, + MSM_CLOCK_FLAGS_DELAYED_WRITE_POST = 1U << 2, +}; + struct msm_clock { struct clock_event_device clockevent; struct clocksource clocksource; @@ -78,81 +81,478 @@ struct msm_clock { void __iomem *regbase; uint32_t freq; uint32_t shift; - void __iomem *global_counter; - void __iomem *local_counter; + uint32_t flags; + uint32_t write_delay; + uint32_t last_set; + uint32_t offset; + uint32_t alarm_vtime; + uint32_t smem_offset; + uint32_t smem_in_sync; + cycle_t stopped_tick; + int stopped; }; - enum { MSM_CLOCK_GPT, MSM_CLOCK_DGT, - NR_TIMERS, }; - - static struct msm_clock msm_clocks[]; -static struct clock_event_device *local_clock_event; +static struct msm_clock *msm_active_clock; +static DEFINE_SPINLOCK(msm_fast_timer_lock); +static int msm_fast_timer_enabled; static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; - if (smp_processor_id() != 0) - evt = local_clock_event; - if (evt->event_handler == NULL) - return IRQ_HANDLED; - evt->event_handler(evt); + if (evt->event_handler) + evt->event_handler(evt); return IRQ_HANDLED; } -static cycle_t msm_read_timer_count(struct clocksource *cs) +static uint32_t msm_read_timer_count(struct msm_clock *clock) { - struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); + uint32_t t1, t2; + int loop_count = 0; + + t1 = readl(clock->regbase + TIMER_COUNT_VAL); + if (!(clock->flags & MSM_CLOCK_FLAGS_UNSTABLE_COUNT)) + return t1; + while (1) { + t2 = readl(clock->regbase + TIMER_COUNT_VAL); + if (t1 == t2) + return t1; + if (loop_count++ > 10) { + printk(KERN_ERR "msm_read_timer_count timer %s did not" + "stabilize %u != %u\n", clock->clockevent.name, + t2, t1); + return t2; + } + t1 = t2; + } +} - return readl(clk->global_counter); +static cycle_t msm_gpt_read(struct clocksource *cs) +{ + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT]; + if (clock->stopped) + return clock->stopped_tick; + else + return msm_read_timer_count(clock) + clock->offset; } -static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) +static cycle_t msm_dgt_read(struct clocksource *cs) { -#ifdef CONFIG_SMP - int i; - for (i = 0; i < NR_TIMERS; i++) - if (evt == &(msm_clocks[i].clockevent)) - return &msm_clocks[i]; - return &msm_clocks[MSM_GLOBAL_TIMER]; -#else - return container_of(evt, struct msm_clock, clockevent); -#endif + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT]; + if (clock->stopped) + return clock->stopped_tick; + return (msm_read_timer_count(clock) + clock->offset) >> MSM_DGT_SHIFT; } static int msm_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - struct msm_clock *clock = clockevent_to_clock(evt); - uint32_t now = readl(clock->local_counter); - uint32_t alarm = now + (cycles << clock->shift); - + int i; + struct msm_clock *clock; + uint32_t now; + uint32_t alarm; + int late; + + clock = container_of(evt, struct msm_clock, clockevent); + now = msm_read_timer_count(clock); + alarm = now + (cycles << clock->shift); + if (clock->flags & MSM_CLOCK_FLAGS_ODD_MATCH_WRITE) + while (now == clock->last_set) + now = msm_read_timer_count(clock); writel(alarm, clock->regbase + TIMER_MATCH_VAL); + if (clock->flags & MSM_CLOCK_FLAGS_DELAYED_WRITE_POST) { + /* read the counter four extra times to make sure write posts + before reading the time */ + for (i = 0; i < 4; i++) + readl(clock->regbase + TIMER_COUNT_VAL); + } + now = msm_read_timer_count(clock); + clock->last_set = now; + clock->alarm_vtime = alarm + clock->offset; + late = now - alarm; + if (late >= (int)(-clock->write_delay << clock->shift) && late < DGT_HZ*5) { + static int print_limit = 10; + if (print_limit > 0) { + print_limit--; + printk(KERN_NOTICE "msm_timer_set_next_event(%lu) " + "clock %s, alarm already expired, now %x, " + "alarm %x, late %d%s\n", + cycles, clock->clockevent.name, now, alarm, late, + print_limit ? "" : " stop printing"); + } + return -ETIME; + } return 0; } static void msm_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { - struct msm_clock *clock = clockevent_to_clock(evt); + struct msm_clock *clock; + unsigned long irq_flags; + + clock = container_of(evt, struct msm_clock, clockevent); + local_irq_save(irq_flags); switch (mode) { case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; case CLOCK_EVT_MODE_ONESHOT: + clock->stopped = 0; + clock->offset = -msm_read_timer_count(clock) + clock->stopped_tick; + msm_active_clock = clock; writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: + msm_active_clock = NULL; + clock->smem_in_sync = 0; + clock->stopped = 1; + clock->stopped_tick = (msm_read_timer_count(clock) + + clock->offset) >> clock->shift; writel(0, clock->regbase + TIMER_ENABLE); break; } + local_irq_restore(irq_flags); } +static inline int check_timeout(struct msm_clock *clock, uint32_t timeout) +{ + return (int32_t)(msm_read_timer_count(clock) - timeout) <= 0; +} + +#ifndef CONFIG_ARCH_MSM_SCORPION + +static uint32_t msm_timer_sync_smem_clock(int exit_sleep) +{ + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT]; + uint32_t *smem_clock; + uint32_t smem_clock_val; + uint32_t timeout; + uint32_t entry_time; + uint32_t timeout_delta; + uint32_t last_state; + uint32_t state; + uint32_t new_offset; + + smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, + sizeof(uint32_t)); + + if (smem_clock == NULL) { + printk(KERN_ERR "no smem clock\n"); + return 0; + } + + if (!exit_sleep && clock->smem_in_sync) + return 0; + + timeout_delta = (clock->freq >> (7 - clock->shift)); /* 7.8ms */ + + last_state = state = smsm_get_state(SMSM_STATE_MODEM); + if (*smem_clock) { + printk(KERN_INFO "get_smem_clock: invalid start state %x " + "clock %u\n", state, *smem_clock); + smsm_change_state(SMSM_STATE_APPS, SMSM_TIMEWAIT, SMSM_TIMEINIT); + entry_time = msm_read_timer_count(clock); + timeout = entry_time + timeout_delta; + while (*smem_clock != 0 && check_timeout(clock, timeout)) + ; + if (*smem_clock) { + printk(KERN_INFO "get_smem_clock: timeout still " + "invalid state %x clock %u in %d ticks\n", + state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + return 0; + } + } + entry_time = msm_read_timer_count(clock); + timeout = entry_time + timeout_delta; + smsm_change_state(SMSM_STATE_APPS, SMSM_TIMEINIT, SMSM_TIMEWAIT); + do { + smem_clock_val = *smem_clock; + state = smsm_get_state(SMSM_STATE_MODEM); + if (state != last_state) { + last_state = state; + if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC_STATE) + pr_info("get_smem_clock: state %x clock %u\n", + state, smem_clock_val); + } + } while (smem_clock_val == 0 && check_timeout(clock, timeout)); + if (smem_clock_val) { + new_offset = smem_clock_val - msm_read_timer_count(clock); + if (clock->offset + clock->smem_offset != new_offset) { + if (exit_sleep) + clock->offset = new_offset - clock->smem_offset; + else + clock->smem_offset = new_offset - clock->offset; + clock->smem_in_sync = 1; + if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC_UPDATE) + printk(KERN_INFO "get_smem_clock: state %x " + "clock %u new offset %u+%u\n", + state, smem_clock_val, + clock->offset, clock->smem_offset); + } else if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC) { + printk(KERN_INFO "get_smem_clock: state %x " + "clock %u offset %u+%u\n", + state, smem_clock_val, + clock->offset, clock->smem_offset); + } + } else { + printk(KERN_INFO "get_smem_clock: timeout state %x clock %u " + "in %d ticks\n", state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + } + smsm_change_state(SMSM_STATE_APPS, SMSM_TIMEWAIT, SMSM_TIMEINIT); + entry_time = msm_read_timer_count(clock); + timeout = entry_time + timeout_delta; + while (*smem_clock != 0 && check_timeout(clock, timeout)) { + uint32_t astate = smsm_get_state(SMSM_STATE_APPS); + if ((astate & SMSM_TIMEWAIT) || !(astate & SMSM_TIMEINIT)) { + if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC_STATE) + pr_info("get_smem_clock: modem overwrote " + "apps state %x\n", astate); + smsm_change_state(SMSM_STATE_APPS, + SMSM_TIMEWAIT, SMSM_TIMEINIT); + } + } + if (*smem_clock) + printk(KERN_INFO "get_smem_clock: exit timeout state %x " + "clock %u in %d ticks\n", state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + return smem_clock_val; +} + +#else + +/* Time Master State Bits */ +#define DEM_TIME_MASTER_TIME_PENDING_APPS BIT(0) + +/* Time Slave State Bits */ +#define DEM_TIME_SLAVE_TIME_REQUEST 0x0400 +#define DEM_TIME_SLAVE_TIME_POLL 0x0800 +#define DEM_TIME_SLAVE_TIME_INIT 0x1000 + +static uint32_t msm_timer_sync_smem_clock(int exit_sleep) +{ + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT]; + uint32_t *smem_clock; + uint32_t smem_clock_val; + uint32_t bad_clock = 0; + uint32_t timeout; + uint32_t entry_time; + uint32_t timeout_delta; + uint32_t last_state; + uint32_t state; + uint32_t new_offset; + + smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, + sizeof(uint32_t)); + + if (smem_clock == NULL) { + printk(KERN_ERR "no smem clock\n"); + return 0; + } + + if (!exit_sleep && clock->smem_in_sync) + return 0; + + timeout_delta = (clock->freq >> (7 - clock->shift)); /* 7.8ms */ + + entry_time = msm_read_timer_count(clock); + last_state = state = smsm_get_state(SMSM_STATE_TIME_MASTER_DEM); + timeout = entry_time + timeout_delta; + while ((smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) + & DEM_TIME_MASTER_TIME_PENDING_APPS) + && check_timeout(clock, timeout)) + ; + if ((smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) & + DEM_TIME_MASTER_TIME_PENDING_APPS)) { + printk(KERN_INFO "get_smem_clock: invalid start state %x " + "clock %u in %d ticks\n", + state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + bad_clock = *smem_clock; + } + entry_time = msm_read_timer_count(clock); + timeout = entry_time + timeout_delta; + smsm_change_state(SMSM_STATE_APPS_DEM, + DEM_TIME_SLAVE_TIME_INIT, DEM_TIME_SLAVE_TIME_REQUEST); + while (!(smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) + & DEM_TIME_MASTER_TIME_PENDING_APPS) + && check_timeout(clock, timeout)) + ; + if (!(smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) & + DEM_TIME_MASTER_TIME_PENDING_APPS)) { + printk(KERN_INFO "get_smem_clock: invalid start state %x " + "clock %u in %d ticks\n", + state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + bad_clock = *smem_clock; + } + smsm_change_state(SMSM_STATE_APPS_DEM, + DEM_TIME_SLAVE_TIME_REQUEST, DEM_TIME_SLAVE_TIME_POLL); + do { + smem_clock_val = *smem_clock; + state = smsm_get_state(SMSM_STATE_TIME_MASTER_DEM); + if (state != last_state) { + last_state = state; + if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC_STATE) + pr_info("get_smem_clock: state %x clock %u\n", + state, smem_clock_val); + } + } while ((!smem_clock_val || smem_clock_val == bad_clock) + && check_timeout(clock, timeout)); + if (smem_clock_val && smem_clock_val != bad_clock) { + new_offset = smem_clock_val - msm_read_timer_count(clock); + if (clock->offset + clock->smem_offset != new_offset) { + if (exit_sleep) + clock->offset = new_offset - clock->smem_offset; + else + clock->smem_offset = new_offset - clock->offset; + clock->smem_in_sync = 1; + if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC_UPDATE) + printk(KERN_INFO "get_smem_clock: state %x " + "clock %u new offset %u+%u\n", + state, smem_clock_val, + clock->offset, clock->smem_offset); + } else if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC) { + printk(KERN_INFO "get_smem_clock: state %x " + "clock %u offset %u+%u\n", + state, smem_clock_val, + clock->offset, clock->smem_offset); + } + } else { + printk(KERN_INFO "get_smem_clock: timeout state %x clock %u " + "in %d ticks\n", state, *smem_clock, + msm_read_timer_count(clock) - entry_time); + } + smsm_change_state(SMSM_STATE_APPS_DEM, + DEM_TIME_SLAVE_TIME_POLL, DEM_TIME_SLAVE_TIME_INIT); +#if 1 /* debug */ + entry_time = msm_read_timer_count(clock); + timeout = entry_time + timeout_delta; + while ((smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) + & DEM_TIME_MASTER_TIME_PENDING_APPS) + && check_timeout(clock, timeout)) + ; + if (smsm_get_state(SMSM_STATE_TIME_MASTER_DEM) & + DEM_TIME_MASTER_TIME_PENDING_APPS) + printk(KERN_INFO "get_smem_clock: exit timeout state %x " + "clock %u in %d ticks\n", state, *smem_clock, + msm_read_timer_count(clock) - entry_time); +#endif + return smem_clock_val; +} + +#endif + +static void msm_timer_reactivate_alarm(struct msm_clock *clock) +{ + long alarm_delta = clock->alarm_vtime - clock->offset - + msm_read_timer_count(clock); + if (alarm_delta < (long)clock->write_delay + 4) + alarm_delta = clock->write_delay + 4; + while (msm_timer_set_next_event(alarm_delta, &clock->clockevent)) + ; +} + +int64_t msm_timer_enter_idle(void) +{ + struct msm_clock *clock = msm_active_clock; + uint32_t alarm; + uint32_t count; + int32_t delta; + + if (clock != &msm_clocks[MSM_CLOCK_GPT] || msm_fast_timer_enabled) + return 0; + + msm_timer_sync_smem_clock(0); + + count = msm_read_timer_count(clock); + if (clock->stopped++ == 0) + clock->stopped_tick = (count + clock->offset) >> clock->shift; + alarm = clock->alarm_vtime - clock->offset; + delta = alarm - count; + if (delta <= -(int32_t)((clock->freq << clock->shift) >> 10)) { + /* timer should have triggered 1ms ago */ + printk(KERN_ERR "msm_timer_enter_idle: timer late %d, " + "reprogram it\n", delta); + msm_timer_reactivate_alarm(clock); + } + if (delta <= 0) + return 0; + return clocksource_cyc2ns((alarm - count) >> clock->shift, + clock->clocksource.mult, clock->clocksource.shift); +} + +void msm_timer_exit_idle(int low_power) +{ + struct msm_clock *clock = msm_active_clock; + uint32_t smem_clock; + + if (clock != &msm_clocks[MSM_CLOCK_GPT]) + return; + + if (low_power) { + if (!(readl(clock->regbase + TIMER_ENABLE) & TIMER_ENABLE_EN)) { + writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE); + smem_clock = msm_timer_sync_smem_clock(1); + } + msm_timer_reactivate_alarm(clock); + } + clock->stopped--; +} + +unsigned long long sched_clock(void) +{ + static cycle_t saved_ticks; + static int saved_ticks_valid; + static unsigned long long base; + static unsigned long long last_result; + + unsigned long irq_flags; + static cycle_t last_ticks; + cycle_t ticks; + static unsigned long long result; + struct clocksource *cs; + struct msm_clock *clock = msm_active_clock; + + local_irq_save(irq_flags); + if (clock) { + cs = &clock->clocksource; + + last_ticks = saved_ticks; + saved_ticks = ticks = cs->read(cs); + if (!saved_ticks_valid) { + saved_ticks_valid = 1; + last_ticks = ticks; + base -= clocksource_cyc2ns(ticks, cs->mult, cs->shift); + } + if (ticks < last_ticks) { + base += clocksource_cyc2ns(cs->mask, + cs->mult, cs->shift); + base += clocksource_cyc2ns(1, cs->mult, cs->shift); + } + last_result = result = + clocksource_cyc2ns(ticks, cs->mult, cs->shift) + base; + } else { + base = result = last_result; + saved_ticks_valid = 0; + } + local_irq_restore(irq_flags); + return result; +} + +#ifdef CONFIG_MSM7X00A_USE_GP_TIMER + #define DG_TIMER_RATING 100 +#else + #define DG_TIMER_RATING 300 +#endif + static struct msm_clock msm_clocks[] = { [MSM_CLOCK_GPT] = { .clockevent = { @@ -166,42 +566,51 @@ static struct msm_clock msm_clocks[] = { .clocksource = { .name = "gp_timer", .rating = 200, - .read = msm_read_timer_count, + .read = msm_gpt_read, .mask = CLOCKSOURCE_MASK(32), + .shift = 17, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }, .irq = { .name = "gp_timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING, + .flags = IRQF_DISABLED | IRQF_TIMER | + IRQF_TRIGGER_RISING, .handler = msm_timer_interrupt, .dev_id = &msm_clocks[0].clockevent, .irq = INT_GP_TIMER_EXP }, .regbase = MSM_GPT_BASE, .freq = GPT_HZ, - .local_counter = MSM_GPT_BASE + TIMER_COUNT_VAL, - .global_counter = MSM_GPT_BASE + TIMER_COUNT_VAL + - MSM_TMR_GLOBAL, + .flags = +#ifdef CONFIG_ARCH_MSM_ARM11 + MSM_CLOCK_FLAGS_UNSTABLE_COUNT | + MSM_CLOCK_FLAGS_ODD_MATCH_WRITE | + MSM_CLOCK_FLAGS_DELAYED_WRITE_POST | +#endif + 0, + .write_delay = 9, }, [MSM_CLOCK_DGT] = { .clockevent = { .name = "dg_timer", .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32 + MSM_DGT_SHIFT, - .rating = 300, + .rating = DG_TIMER_RATING, .set_next_event = msm_timer_set_next_event, .set_mode = msm_timer_set_mode, }, .clocksource = { .name = "dg_timer", - .rating = 300, - .read = msm_read_timer_count, - .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)), + .rating = DG_TIMER_RATING, + .read = msm_dgt_read, + .mask = CLOCKSOURCE_MASK((32-MSM_DGT_SHIFT)), + .shift = 24 - MSM_DGT_SHIFT, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }, .irq = { .name = "dg_timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING, + .flags = IRQF_DISABLED | IRQF_TIMER | + IRQF_TRIGGER_RISING, .handler = msm_timer_interrupt, .dev_id = &msm_clocks[1].clockevent, .irq = INT_DEBUG_TIMER_EXP @@ -209,18 +618,75 @@ static struct msm_clock msm_clocks[] = { .regbase = MSM_DGT_BASE, .freq = DGT_HZ >> MSM_DGT_SHIFT, .shift = MSM_DGT_SHIFT, - .local_counter = MSM_DGT_BASE + TIMER_COUNT_VAL, - .global_counter = MSM_DGT_BASE + TIMER_COUNT_VAL + - MSM_TMR_GLOBAL, + .write_delay = 2, } }; +/** + * msm_enable_fast_timer - Enable fast timer + * + * Prevents low power idle, but the caller must call msm_disable_fast_timer + * before suspend completes. + * Reference counted. + */ +void msm_enable_fast_timer(void) +{ + u32 max; + unsigned long irq_flags; + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT]; + + spin_lock_irqsave(&msm_fast_timer_lock, irq_flags); + if (msm_fast_timer_enabled++) + goto done; + if (msm_active_clock == &msm_clocks[MSM_CLOCK_DGT]) { + pr_warning("msm_enable_fast_timer: timer already in use, " + "returned time will jump when hardware timer wraps\n"); + goto done; + } + max = (clock->clockevent.mult >> (clock->clockevent.shift - 32)) - 1; + writel(max, clock->regbase + TIMER_MATCH_VAL); + writel(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN, + clock->regbase + TIMER_ENABLE); +done: + spin_unlock_irqrestore(&msm_fast_timer_lock, irq_flags); +} + +/** + * msm_enable_fast_timer - Disable fast timer + */ +void msm_disable_fast_timer(void) +{ + unsigned long irq_flags; + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT]; + + spin_lock_irqsave(&msm_fast_timer_lock, irq_flags); + if (!WARN(!msm_fast_timer_enabled, "msm_disable_fast_timer undeflow") + && !--msm_fast_timer_enabled + && msm_active_clock != &msm_clocks[MSM_CLOCK_DGT]) + writel(0, clock->regbase + TIMER_ENABLE); + spin_unlock_irqrestore(&msm_fast_timer_lock, irq_flags); +} + +/** + * msm_enable_fast_timer - Read fast timer + * + * Returns 32bit nanosecond time value. + */ +u32 msm_read_fast_timer(void) +{ + cycle_t ticks; + struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT]; + ticks = msm_read_timer_count(clock) >> MSM_DGT_SHIFT; + return clocksource_cyc2ns(ticks, clock->clocksource.mult, + clock->clocksource.shift); +} + static void __init msm_timer_init(void) { int i; int res; -#ifdef CONFIG_ARCH_MSM_SCORPIONMP +#ifdef CONFIG_ARCH_MSM8X60 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); #endif @@ -231,16 +697,19 @@ static void __init msm_timer_init(void) writel(0, clock->regbase + TIMER_ENABLE); writel(0, clock->regbase + TIMER_CLEAR); writel(~0, clock->regbase + TIMER_MATCH_VAL); + while (msm_read_timer_count(clock)) ; /* wait for clock to clear */ ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift); /* allow at least 10 seconds to notice that the timer wrapped */ ce->max_delta_ns = clockevent_delta2ns(0xf0000000 >> clock->shift, ce); - /* 4 gets rounded down to 3 */ - ce->min_delta_ns = clockevent_delta2ns(4, ce); + /* ticks gets rounded down by one */ + ce->min_delta_ns = + clockevent_delta2ns(clock->write_delay + 4, ce); ce->cpumask = cpumask_of(0); - res = clocksource_register_hz(cs, clock->freq); + cs->mult = clocksource_hz2mult(clock->freq, cs->shift); + res = clocksource_register(cs); if (res) printk(KERN_ERR "msm_timer_init: clocksource_register " "failed for %s\n", cs->name); @@ -254,48 +723,6 @@ static void __init msm_timer_init(void) } } -#ifdef CONFIG_SMP -void __cpuinit local_timer_setup(struct clock_event_device *evt) -{ - struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER]; - - /* Use existing clock_event for cpu 0 */ - if (!smp_processor_id()) - return; - - writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); - - if (!local_clock_event) { - writel(0, clock->regbase + TIMER_ENABLE); - writel(0, clock->regbase + TIMER_CLEAR); - writel(~0, clock->regbase + TIMER_MATCH_VAL); - } - evt->irq = clock->irq.irq; - evt->name = "local_timer"; - evt->features = CLOCK_EVT_FEAT_ONESHOT; - evt->rating = clock->clockevent.rating; - evt->set_mode = msm_timer_set_mode; - evt->set_next_event = msm_timer_set_next_event; - evt->shift = clock->clockevent.shift; - evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift); - evt->max_delta_ns = - clockevent_delta2ns(0xf0000000 >> clock->shift, evt); - evt->min_delta_ns = clockevent_delta2ns(4, evt); - - local_clock_event = evt; - - gic_enable_ppi(clock->irq.irq); - - clockevents_register_device(evt); -} - -inline int local_timer_ack(void) -{ - return 1; -} - -#endif - struct sys_timer msm_timer = { .init = msm_timer_init }; diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S index a73bc86a3c263..260c0c17692a0 100644 --- a/arch/arm/mach-pxa/include/mach/entry-macro.S +++ b/arch/arm/mach-pxa/include/mach/entry-macro.S @@ -7,45 +7,9 @@ * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ -#include -#include .macro disable_fiq .endm - .macro get_irqnr_preamble, base, tmp - .endm - .macro arch_ret_to_user, tmp1, tmp2 .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - mrc p15, 0, \tmp, c0, c0, 0 @ CPUID - mov \tmp, \tmp, lsr #13 - and \tmp, \tmp, #0x7 @ Core G - cmp \tmp, #1 - bhi 1002f - - @ Core Generation 1 (PXA25x) - mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000 - add \base, \base, #0x00d00000 - ldr \irqstat, [\base, #0] @ ICIP - ldr \irqnr, [\base, #4] @ ICMR - - ands \irqnr, \irqstat, \irqnr - beq 1001f - rsb \irqstat, \irqnr, #0 - and \irqstat, \irqstat, \irqnr - clz \irqnr, \irqstat - rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0)) - b 1001f -1002: - @ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx) - mrc p6, 0, \irqstat, c5, c0, 0 @ ICHP - tst \irqstat, #0x80000000 - beq 1001f - bic \irqstat, \irqstat, #0x80000000 - mov \irqnr, \irqstat, lsr #16 - add \irqnr, \irqnr, #(PXA_IRQ(0)) -1001: - .endm diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index f62bb4c793bdd..7c3fb071ddd69 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c @@ -506,6 +506,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = { }, }; +static struct platform_device uda1340_codec = { + .name = "uda134x-codec", + .id = -1, +}; + static struct platform_device *mini2440_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, @@ -521,7 +526,9 @@ static struct platform_device *mini2440_devices[] __initdata = { &s3c_device_nand, &s3c_device_sdi, &s3c_device_iis, + &uda1340_codec, &mini2440_audio, + &samsung_asoc_dma, }; static void __init mini2440_map_io(void) diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c index a6f22920a2c20..22046e2f53c2a 100644 --- a/arch/arm/mach-s5pv210/cpufreq.c +++ b/arch/arm/mach-s5pv210/cpufreq.c @@ -390,8 +390,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, } #ifdef CONFIG_PM -static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy, - pm_message_t pmsg) +static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy) { return 0; } diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c index b04cbc731128f..7c08ad7d88872 100644 --- a/arch/arm/mach-s5pv310/cpufreq.c +++ b/arch/arm/mach-s5pv310/cpufreq.c @@ -458,8 +458,7 @@ static int s5pv310_target(struct cpufreq_policy *policy, } #ifdef CONFIG_PM -static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy, - pm_message_t pmsg) +static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy) { return 0; } diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S index d791f10eeac7e..2a57b2964ee91 100644 --- a/arch/arm/mach-shmobile/include/mach/entry-macro.S +++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S @@ -18,17 +18,5 @@ .macro disable_fiq .endm - .macro get_irqnr_preamble, base, tmp - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - .endm - - .macro test_for_ipi, irqnr, irqstat, base, tmp - .endm - - .macro test_for_ltirq, irqnr, irqstat, base, tmp - .endm - .macro arch_ret_to_user, tmp1, tmp2 .endm diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index e4509bae8fc48..74d1895a7452b 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -578,6 +578,9 @@ config CPU_TLB_V6 config CPU_TLB_V7 bool +config EMULATE_DOMAIN_MANAGER_V7 + bool + config VERIFY_PERMISSION_FAULT bool endif @@ -730,6 +733,19 @@ config CPU_DCACHE_SIZE If your SoC is configured to have a different size, define the value here with proper conditions. +config CPU_CACHE_ERR_REPORT + bool "Report errors in the L1 and L2 caches" + depends on ARCH_MSM_SCORPION + default n + help + The Scorpion processor supports reporting L2 errors, L1 icache parity + errors, and L1 dcache parity errors as imprecise external aborts. If + this option is not enabled these errors will go unreported and data + corruption will occur. + + Say Y here to have errors in the L1 and L2 caches reported as + imprecise data aborts. + config CPU_DCACHE_WRITETHROUGH bool "Force write through D-cache" depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_FA526) && !CPU_DCACHE_DISABLE @@ -813,7 +829,8 @@ config CACHE_L2X0 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \ - ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE + ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \ + ARCH_MSM7X27 || ARCH_MSM9615 default y select OUTER_CACHE select OUTER_CACHE_SYNC @@ -876,3 +893,30 @@ config ARCH_HAS_BARRIERS help This option allows the use of custom mandatory barriers included via the mach/barriers.h file. + +config VCM_MM + bool + +config VCM + bool "Virtual Contiguous Memory (VCM) Layer" + depends on MMU + select GENERIC_ALLOCATOR + select VCM_MM + default n + help + Virtual Contiguous Memory layer. This is the layer that is intended to + replace PMEM. + + If you don't know what this is, say N here. + +config STRICT_MEMORY_RWX + bool "restrict kernel memory permissions as much as possible" + default n + help + If this is set, kernel text will be made RX, kernel data and stack + RW, rodata R (otherwise all of the kernel 1-to-1 mapping is + made RWX). + The tradeoff is that several sections are padded to + 1M boundaries (because their permissions are different and + splitting the 1M pages into 4K ones causes TLB performance + problems), wasting memory. diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 00d74a04af3af..47b8c289104a8 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -91,6 +91,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o +obj-$(CONFIG_EMULATE_DOMAIN_MANAGER_V7) += emulate_domain_manager-v7.o AFLAGS_proc-v6.o :=-Wa,-march=armv6 AFLAGS_proc-v7.o :=-Wa,-march=armv7-a @@ -99,3 +100,5 @@ obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o +obj-$(CONFIG_VCM) += vcm.o vcm_alloc.o +obj-$(CONFIG_VCM_MM) += vcm_mm.o diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 1fa6f71470de5..ad953fe4ef50d 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -168,7 +168,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -fa_dma_inv_range: +ENTRY(fa_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -191,7 +191,7 @@ fa_dma_inv_range: * - start - virtual start address * - end - virtual end address */ -fa_dma_clean_range: +ENTRY(fa_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -253,5 +253,7 @@ ENTRY(fa_cache_fns) .long fa_flush_kern_dcache_area .long fa_dma_map_area .long fa_dma_unmap_area + .long fa_dma_inv_range + .long fa_dma_clean_range .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index f2ce38e085d21..becaf13db37f6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -2,6 +2,7 @@ * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support * * Copyright (C) 2007 ARM Limited + * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -26,6 +27,8 @@ #define CACHE_LINE_SIZE 32 static void __iomem *l2x0_base; +static uint32_t aux_ctrl_save; +static uint32_t data_latency_ctrl; static DEFINE_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ static uint32_t l2x0_size; @@ -73,18 +76,24 @@ static inline void l2x0_inv_line(unsigned long addr) writel_relaxed(addr, base + L2X0_INV_LINE_PA); } -#ifdef CONFIG_PL310_ERRATA_588369 -static void debug_writel(unsigned long val) -{ - extern void omap_smc1(u32 fn, u32 arg); +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) - /* - * Texas Instrument secure monitor api to modify the - * PL310 Debug Control Register. - */ - omap_smc1(0x100, val); +#define debug_writel(val) outer_cache.set_debug(val) + +static void l2x0_set_debug(unsigned long val) +{ + writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); +} +#else +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ } +#define l2x0_set_debug NULL +#endif + +#ifdef CONFIG_PL310_ERRATA_588369 static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -97,11 +106,6 @@ static inline void l2x0_flush_line(unsigned long addr) } #else -/* Optimised out for non-errata case */ -static inline void debug_writel(unsigned long val) -{ -} - static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; @@ -110,13 +114,9 @@ static inline void l2x0_flush_line(unsigned long addr) } #endif -static void l2x0_cache_sync(void) +void l2x0_cache_sync(void) { - unsigned long flags; - - spin_lock_irqsave(&l2x0_lock, flags); cache_sync(); - spin_unlock_irqrestore(&l2x0_lock, flags); } static void l2x0_flush_all(void) @@ -125,9 +125,11 @@ static void l2x0_flush_all(void) /* clean all ways */ spin_lock_irqsave(&l2x0_lock, flags); + debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); + debug_writel(0x00); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -196,6 +198,27 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void l2x0_inv_range_atomic(unsigned long start, unsigned long end) +{ + unsigned long addr; + + if (start & (CACHE_LINE_SIZE - 1)) { + start &= ~(CACHE_LINE_SIZE - 1); + writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA); + start += CACHE_LINE_SIZE; + } + + if (end & (CACHE_LINE_SIZE - 1)) { + end &= ~(CACHE_LINE_SIZE - 1); + writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA); + } + + for (addr = start; addr < end; addr += CACHE_LINE_SIZE) + writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA); + + mb(); +} + static void l2x0_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; @@ -226,6 +249,17 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void l2x0_clean_range_atomic(unsigned long start, unsigned long end) +{ + unsigned long addr; + + start &= ~(CACHE_LINE_SIZE - 1); + for (addr = start; addr < end; addr += CACHE_LINE_SIZE) + writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA); + + mb(); +} + static void l2x0_flush_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; @@ -258,6 +292,17 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) spin_unlock_irqrestore(&l2x0_lock, flags); } +void l2x0_flush_range_atomic(unsigned long start, unsigned long end) +{ + unsigned long addr; + + start &= ~(CACHE_LINE_SIZE - 1); + for (addr = start; addr < end; addr += CACHE_LINE_SIZE) + writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA); + + mb(); +} + static void l2x0_disable(void) { unsigned long flags; @@ -269,15 +314,19 @@ static void l2x0_disable(void) void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { - __u32 aux; + __u32 aux, bits; __u32 cache_id; __u32 way_size = 0; int ways; const char *type; l2x0_base = base; - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); + + bits = readl_relaxed(l2x0_base + L2X0_CTRL); + bits &= ~0x01; /* clear bit 0 */ + writel_relaxed(bits, l2x0_base + L2X0_CTRL); + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; @@ -302,7 +351,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) type = "L2x0 series"; break; } - + writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); l2x0_way_mask = (1 << ways) - 1; /* @@ -312,31 +361,79 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) way_size = 1 << (way_size + 3); l2x0_size = ways * way_size * SZ_1K; - /* - * Check if l2x0 controller is already enabled. - * If you are booting from non-secure mode - * accessing the below registers will fault. - */ - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { - - /* l2x0 controller is disabled */ - writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); + l2x0_inv_all(); - l2x0_inv_all(); + /* enable L2X0 */ + bits = readl_relaxed(l2x0_base + L2X0_CTRL); + bits |= 0x01; /* set bit 0 */ + writel_relaxed(bits, l2x0_base + L2X0_CTRL); - /* enable L2X0 */ - writel_relaxed(1, l2x0_base + L2X0_CTRL); + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { + case L2X0_CACHE_ID_PART_L220: + outer_cache.inv_range = l2x0_inv_range; + outer_cache.clean_range = l2x0_clean_range; + outer_cache.flush_range = l2x0_flush_range; + printk(KERN_INFO "L220 cache controller enabled\n"); + break; + case L2X0_CACHE_ID_PART_L310: + outer_cache.inv_range = l2x0_inv_range; + outer_cache.clean_range = l2x0_clean_range; + outer_cache.flush_range = l2x0_flush_range; + printk(KERN_INFO "L310 cache controller enabled\n"); + break; + case L2X0_CACHE_ID_PART_L210: + default: + outer_cache.inv_range = l2x0_inv_range_atomic; + outer_cache.clean_range = l2x0_clean_range_atomic; + outer_cache.flush_range = l2x0_flush_range_atomic; + printk(KERN_INFO "L210 cache controller enabled\n"); + break; } - outer_cache.inv_range = l2x0_inv_range; - outer_cache.clean_range = l2x0_clean_range; - outer_cache.flush_range = l2x0_flush_range; outer_cache.sync = l2x0_cache_sync; + outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; + outer_cache.set_debug = l2x0_set_debug; + mb(); printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", ways, cache_id, aux, l2x0_size); } + +void l2x0_suspend(void) +{ + /* Save aux control register value */ + aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); + data_latency_ctrl = readl_relaxed(l2x0_base + L2X0_DATA_LATENCY_CTRL); + /* Flush all cache */ + l2x0_flush_all(); + /* Disable the cache */ + writel_relaxed(0, l2x0_base + L2X0_CTRL); + + /* Memory barrier */ + dmb(); +} + +void l2x0_resume(int collapsed) +{ + if (collapsed) { + /* Disable the cache */ + writel_relaxed(0, l2x0_base + L2X0_CTRL); + + /* Restore aux control register value */ + writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL); + writel_relaxed(data_latency_ctrl, l2x0_base + + L2X0_DATA_LATENCY_CTRL); + + /* Invalidate the cache */ + l2x0_inv_all(); + } + + /* Enable the cache */ + writel_relaxed(1, l2x0_base + L2X0_CTRL); + + mb(); +} diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2e2bc406a18d6..64f739eaa4c67 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -92,6 +92,20 @@ ENTRY(v3_coherent_user_range) ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v3_dma_inv_range) + /* FALLTHROUGH */ + /* * dma_flush_range(start, end) * @@ -103,6 +117,17 @@ ENTRY(v3_flush_kern_dcache_area) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache + /* FALLTHROUGH */ + +/* + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v3_dma_clean_range) mov pc, lr /* @@ -113,7 +138,7 @@ ENTRY(v3_dma_flush_range) */ ENTRY(v3_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v3_dma_flush_range + bne v3_dma_inv_range /* FALLTHROUGH */ /* @@ -140,5 +165,7 @@ ENTRY(v3_cache_fns) .long v3_flush_kern_dcache_area .long v3_dma_map_area .long v3_dma_unmap_area + .long v3_dma_inv_range + .long v3_dma_clean_range .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index a8fefb523f194..7824cf6e14a38 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -102,6 +102,20 @@ ENTRY(v4_coherent_user_range) ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ +/* + * dma_inv_range(start, end) + * + * Invalidate (discard) the specified virtual address range. + * May not write back any entries. If 'start' or 'end' + * are not cache line aligned, those lines must be written + * back. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4_dma_inv_range) + /* FALLTHROUGH */ + /* * dma_flush_range(start, end) * @@ -115,6 +129,17 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif + /* FALLTHROUGH */ + +/* + * dma_clean_range(start, end) + * + * Clean (write back) the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4_dma_clean_range) mov pc, lr /* @@ -125,7 +150,7 @@ ENTRY(v4_dma_flush_range) */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v4_dma_flush_range + bne v4_dma_inv_range /* FALLTHROUGH */ /* @@ -152,5 +177,7 @@ ENTRY(v4_cache_fns) .long v4_flush_kern_dcache_area .long v4_dma_map_area .long v4_dma_unmap_area + .long v4_dma_inv_range + .long v4_dma_clean_range .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index d3644db467b74..57b8a95b8a323 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -184,7 +184,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -v4wb_dma_inv_range: +ENTRY(v4wb_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -205,7 +205,7 @@ v4wb_dma_inv_range: * - start - virtual start address * - end - virtual end address */ -v4wb_dma_clean_range: +ENTRY(v4wb_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -264,5 +264,7 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_kern_dcache_area .long v4wb_dma_map_area .long v4wb_dma_unmap_area + .long v4wb_dma_inv_range + .long v4wb_dma_clean_range .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 49c2b66cf3dd6..fe9038dc004a8 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -153,12 +153,23 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -v4wt_dma_inv_range: +ENTRY(v4wt_dma_inv_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b + /* FALLTHROUGH */ + +/* + * dma_clean_range(start, end) + * + * Clean the specified virtual address range. + * + * - start - virtual start address + * - end - virtual end address + */ +ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -208,5 +219,7 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_kern_dcache_area .long v4wt_dma_map_area .long v4wt_dma_unmap_area + .long v4wt_dma_inv_range + .long v4wt_dma_clean_range .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 7a4e0aea8ea18..f6851f13b4d40 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -202,7 +202,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -v6_dma_inv_range: +ENTRY(v6_dma_inv_range) #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership @@ -247,7 +247,7 @@ v6_dma_inv_range: * - start - virtual start address of region * - end - virtual end address of region */ -v6_dma_clean_range: +ENTRY(v6_dma_clean_range) bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef CONFIG_DMA_CACHE_RWFO @@ -357,5 +357,7 @@ ENTRY(v6_cache_fns) .long v6_flush_kern_dcache_area .long v6_dma_map_area .long v6_dma_unmap_area + .long v6_dma_inv_range + .long v6_dma_clean_range .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 6136e68ce953d..4eba077dc4dd1 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -240,7 +240,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -v7_dma_inv_range: +ENTRY(v7_dma_inv_range) dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -264,7 +264,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -v7_dma_clean_range: +ENTRY(v7_dma_clean_range) dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -334,5 +334,7 @@ ENTRY(v7_cache_fns) .long v7_flush_kern_dcache_area .long v7_dma_map_area .long v7_dma_unmap_area + .long v7_dma_inv_range + .long v7_dma_clean_range .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4771dba614481..75fd74e8f88df 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -420,18 +420,22 @@ EXPORT_SYMBOL(dma_free_coherent); void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { +#ifdef CONFIG_OUTER_CACHE unsigned long paddr; BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); +#endif dmac_map_area(kaddr, size, dir); +#ifdef CONFIG_OUTER_CACHE paddr = __pa(kaddr); if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { outer_clean_range(paddr, paddr + size); } +#endif /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -439,6 +443,7 @@ EXPORT_SYMBOL(___dma_single_cpu_to_dev); void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { +#ifdef CONFIG_OUTER_CACHE BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); /* FIXME: non-speculating: not required */ @@ -447,7 +452,7 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, unsigned long paddr = __pa(kaddr); outer_inv_range(paddr, paddr + size); } - +#endif dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); diff --git a/arch/arm/mm/emulate_domain_manager-v7.c b/arch/arm/mm/emulate_domain_manager-v7.c new file mode 100644 index 0000000000000..3797e211a4d8a --- /dev/null +++ b/arch/arm/mm/emulate_domain_manager-v7.c @@ -0,0 +1,345 @@ +/* + * Basic implementation of a SW emulation of the domain manager feature in + * ARM architecture. Assumes single processor ARMv7 chipset. + * + * Requires hooks to be alerted to any runtime changes of dacr or MMU context. + * + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#define DOMAIN_MANAGER_BITS (0xAAAAAAAA) + +#define DFSR_DOMAIN(dfsr) ((dfsr >> 4) & (16-1)) + +#define FSR_PERMISSION_FAULT(fsr) ((fsr & 0x40D) == 0x00D) +#define FSR_PERMISSION_SECT(fsr) ((fsr & 0x40F) == 0x00D) + +/* ARMv7 MMU HW Macros. Not conveniently defined elsewhere */ +#define MMU_TTB_ADDRESS(x) ((u32 *)(((u32)(x)) & ~((1 << 14) - 1))) +#define MMU_PMD_INDEX(addr) (((u32)addr) >> SECTION_SHIFT) +#define MMU_TABLE_ADDRESS(x) ((u32 *)((x) & ~((1 << 10) - 1))) +#define MMU_TABLE_INDEX(x) ((((u32)x) >> 12) & (256 - 1)) + +/* Convenience Macros */ +#define PMD_IS_VALID(x) (PMD_IS_TABLE(x) || PMD_IS_SECTION(x)) +#define PMD_IS_TABLE(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_TABLE) +#define PMD_IS_SECTION(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_SECT) +#define PMD_IS_SUPERSECTION(x) \ + (PMD_IS_SECTION(x) && ((x & PMD_SECT_SUPER) == PMD_SECT_SUPER)) + +#define PMD_GET_DOMAIN(x) \ + (PMD_IS_TABLE(x) || \ + (PMD_IS_SECTION(x) && !PMD_IS_SUPERSECTION(x)) ? \ + 0 : (x >> 5) & (16-1)) + +#define PTE_IS_LARGE(x) ((x & PTE_TYPE_MASK) == PTE_TYPE_LARGE) + + +/* Only DOMAIN_MMU_ENTRIES will be granted access simultaneously */ +#define DOMAIN_MMU_ENTRIES (8) + +#define LRU_INC(lru) ((lru + 1) >= DOMAIN_MMU_ENTRIES ? 0 : lru + 1) + + +static DEFINE_SPINLOCK(edm_lock); + +static u32 edm_manager_bits; + +struct domain_entry_save { + u32 *mmu_entry; + u32 *addr; + u32 value; + u16 sect; + u16 size; +}; + +static struct domain_entry_save edm_save[DOMAIN_MMU_ENTRIES]; + +static u32 edm_lru; + + +/* + * Return virtual address of pmd (level 1) entry for addr + * + * This routine walks the ARMv7 page tables in HW. + */ +static inline u32 *__get_pmd_v7(u32 *addr) +{ + u32 *ttb; + + __asm__ __volatile__( + "mrc p15, 0, %0, c2, c0, 0 @ ttbr0\n\t" + : "=r" (ttb) + : + ); + + return __va(MMU_TTB_ADDRESS(ttb) + MMU_PMD_INDEX(addr)); +} + +/* + * Return virtual address of pte (level 2) entry for addr + * + * This routine walks the ARMv7 page tables in HW. + */ +static inline u32 *__get_pte_v7(u32 *addr) +{ + u32 *pmd = __get_pmd_v7(addr); + u32 *table_pa = pmd && PMD_IS_TABLE(*pmd) ? + MMU_TABLE_ADDRESS(*pmd) : 0; + u32 *entry = table_pa ? __va(table_pa[MMU_TABLE_INDEX(addr)]) : 0; + + return entry; +} + +/* + * Invalidate the TLB for a given address for the current context + * + * After manipulating access permissions, TLB invalidation changes are + * observed + */ +static inline void __tlb_invalidate(u32 *addr) +{ + __asm__ __volatile__( + "mrc p15, 0, %%r2, c13, c0, 1 @ contextidr\n\t" + "and %%r2, %%r2, #0xff @ asid\n\t" + "mov %%r3, %0, lsr #12 @ mva[31:12]\n\t" + "orr %%r2, %%r2, %%r3, lsl #12 @ tlb mva and asid\n\t" + "mcr p15, 0, %%r2, c8, c7, 1 @ utlbimva\n\t" + "isb" + : + : "r" (addr) + : "r2", "r3" + ); +} + +/* + * Set HW MMU entry and do required synchronization operations. + */ +static inline void __set_entry(u32 *entry, u32 *addr, u32 value, int size) +{ + int i; + + if (!entry) + return; + + entry = (u32 *)((u32) entry & ~(size * sizeof(u32) - 1)); + + for (i = 0; i < size; i++) + entry[i] = value; + + __asm__ __volatile__( + "mcr p15, 0, %0, c7, c10, 1 @ flush entry\n\t" + "dsb\n\t" + "isb\n\t" + : + : "r" (entry) + ); + __tlb_invalidate(addr); +} + +/* + * Return the number of duplicate entries associated with entry value. + * Supersections and Large page table entries are replicated 16x. + */ +static inline int __entry_size(int sect, int value) +{ + u32 size; + + if (sect) + size = PMD_IS_SUPERSECTION(value) ? 16 : 1; + else + size = PTE_IS_LARGE(value) ? 16 : 1; + + return size; +} + +/* + * Change entry permissions to emulate domain manager access + */ +static inline int __manager_perm(int sect, int value) +{ + u32 edm_value; + + if (sect) { + edm_value = (value & ~(PMD_SECT_APX | PMD_SECT_XN)) | + (PMD_SECT_AP_READ | PMD_SECT_AP_WRITE); + } else { + edm_value = (value & ~(PTE_EXT_APX | PTE_EXT_XN)) | + (PTE_EXT_AP1 | PTE_EXT_AP0); + } + return edm_value; +} + +/* + * Restore original HW MMU entry. Cancels domain manager access + */ +static inline void __restore_entry(int index) +{ + struct domain_entry_save *entry = &edm_save[index]; + u32 edm_value; + + if (!entry->mmu_entry) + return; + + edm_value = __manager_perm(entry->sect, entry->value); + + if (*entry->mmu_entry == edm_value) + __set_entry(entry->mmu_entry, entry->addr, + entry->value, entry->size); + + entry->mmu_entry = 0; +} + +/* + * Modify HW MMU entry to grant domain manager access for a given MMU entry. + * This adds full read, write, and exec access permissions. + */ +static inline void __set_manager(int sect, u32 *addr) +{ + u32 *entry = sect ? __get_pmd_v7(addr) : __get_pte_v7(addr); + u32 value; + u32 edm_value; + u16 size; + + if (!entry) + return; + + value = *entry; + + size = __entry_size(sect, value); + edm_value = __manager_perm(sect, value); + + __set_entry(entry, addr, edm_value, size); + + __restore_entry(edm_lru); + + edm_save[edm_lru].mmu_entry = entry; + edm_save[edm_lru].addr = addr; + edm_save[edm_lru].value = value; + edm_save[edm_lru].sect = sect; + edm_save[edm_lru].size = size; + + edm_lru = LRU_INC(edm_lru); +} + +/* + * Restore original HW MMU entries. + * + * entry - MVA for HW MMU entry + */ +static inline void __restore(void) +{ + if (unlikely(edm_manager_bits)) { + u32 i; + + for (i = 0; i < DOMAIN_MMU_ENTRIES; i++) + __restore_entry(i); + } +} + +/* + * Common abort handler code + * + * If domain manager was actually set, permission fault would not happen. + * Open access permissions to emulate. Save original settings to restore + * later. Return 1 to pretend fault did not happen. + */ +static int __emulate_domain_manager_abort(u32 fsr, u32 far, int dabort) +{ + if (unlikely(FSR_PERMISSION_FAULT(fsr) && edm_manager_bits)) { + int domain = dabort ? DFSR_DOMAIN(fsr) : PMD_GET_DOMAIN(far); + if (edm_manager_bits & domain_val(domain, DOMAIN_MANAGER)) { + unsigned long flags; + + spin_lock_irqsave(&edm_lock, flags); + + __set_manager(FSR_PERMISSION_SECT(fsr), (u32 *) far); + + spin_unlock_irqrestore(&edm_lock, flags); + return 1; + } + } + return 0; +} + +/* + * Change domain setting. + * + * Lock and restore original contents. Extract and save manager bits. Set + * DACR, excluding manager bits. + */ +void emulate_domain_manager_set(u32 domain) +{ + unsigned long flags; + + spin_lock_irqsave(&edm_lock, flags); + + if (edm_manager_bits != (domain & DOMAIN_MANAGER_BITS)) { + __restore(); + edm_manager_bits = domain & DOMAIN_MANAGER_BITS; + } + + __asm__ __volatile__( + "mcr p15, 0, %0, c3, c0, 0 @ set domain\n\t" + "isb" + : + : "r" (domain & ~DOMAIN_MANAGER_BITS) + ); + + spin_unlock_irqrestore(&edm_lock, flags); +} +EXPORT_SYMBOL_GPL(emulate_domain_manager_set); + +/* + * Switch thread context. Restore original contents. + */ +void emulate_domain_manager_switch_mm(unsigned long pgd_phys, + struct mm_struct *mm, + void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *)) +{ + unsigned long flags; + + spin_lock_irqsave(&edm_lock, flags); + + __restore(); + + /* Call underlying kernel handler */ + switch_mm(pgd_phys, mm); + + spin_unlock_irqrestore(&edm_lock, flags); +} +EXPORT_SYMBOL_GPL(emulate_domain_manager_switch_mm); + +/* + * Kernel data_abort hook + */ +int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar) +{ + return __emulate_domain_manager_abort(dfsr, dfar, 1); +} +EXPORT_SYMBOL_GPL(emulate_domain_manager_data_abort); + +/* + * Kernel prefetch_abort hook + */ +int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar) +{ + return __emulate_domain_manager_abort(ifsr, ifar, 0); +} +EXPORT_SYMBOL_GPL(emulate_domain_manager_prefetch_abort); + diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f10f9bac22069..c86c6a59f67ae 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -23,6 +23,15 @@ #include #include #include +#include +#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) +#include +#include +#endif + +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 +#include +#endif /* CONFIG_EMULATE_DOMAIN_MANAGER_V7 */ #include "fault.h" @@ -461,6 +470,49 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) return 1; } +#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) +#define __str(x) #x +#define MRC(x, v1, v2, v4, v5, v6) do { \ + unsigned int __##x; \ + asm("mrc " __str(v1) ", " __str(v2) ", %0, " __str(v4) ", " \ + __str(v5) ", " __str(v6) "\n" \ + : "=r" (__##x)); \ + pr_info("%s: %s = 0x%.8x\n", __func__, #x, __##x); \ +} while(0) + +#define MSM_TCSR_SPARE2 (MSM_TCSR_BASE + 0x60) + +#endif + +static int +do_imprecise_ext(unsigned long addr, unsigned int fsr, struct pt_regs *regs) +{ +#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) + MRC(ADFSR, p15, 0, c5, c1, 0); + MRC(DFSR, p15, 0, c5, c0, 0); + MRC(ACTLR, p15, 0, c1, c0, 1); + MRC(EFSR, p15, 7, c15, c0, 1); + MRC(L2SR, p15, 3, c15, c1, 0); + MRC(L2CR0, p15, 3, c15, c0, 1); + MRC(L2CPUESR, p15, 3, c15, c1, 1); + MRC(L2CPUCR, p15, 3, c15, c0, 2); + MRC(SPESR, p15, 1, c9, c7, 0); + MRC(SPCR, p15, 0, c9, c7, 0); + MRC(DMACHSR, p15, 1, c11, c0, 0); + MRC(DMACHESR, p15, 1, c11, c0, 1); + MRC(DMACHCR, p15, 0, c11, c0, 2); + + /* clear out EFSR and ADFSR after fault */ + asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t" + "mcr p15, 0, %0, c5, c1, 0" + : : "r" (0)); +#endif +#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) + pr_info("%s: TCSR_SPARE2 = 0x%.8x\n", __func__, readl(MSM_TCSR_SPARE2)); +#endif + return 1; +} + static struct fsr_info { int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); int sig; @@ -498,7 +550,7 @@ static struct fsr_info { { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ { do_bad, SIGBUS, 0, "unknown 21" }, - { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ + { do_imprecise_ext, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ { do_bad, SIGBUS, 0, "unknown 23" }, { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ { do_bad, SIGBUS, 0, "unknown 25" }, @@ -523,6 +575,75 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *) fsr_info[nr].name = name; } +#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER +static int krait_tbb_fixup(unsigned int fsr, struct pt_regs *regs) +{ + int base_cond, cond = 0; + unsigned int p1, cpsr_z, cpsr_c, cpsr_n, cpsr_v; + + if ((read_cpuid_id() & 0xFFFFFFFC) != 0x510F04D0) + return 0; + + if (!thumb_mode(regs)) + return 0; + + /* If ITSTATE is 0, return quickly */ + if ((regs->ARM_cpsr & PSR_IT_MASK) == 0) + return 0; + + cpsr_n = (regs->ARM_cpsr & PSR_N_BIT) ? 1 : 0; + cpsr_z = (regs->ARM_cpsr & PSR_Z_BIT) ? 1 : 0; + cpsr_c = (regs->ARM_cpsr & PSR_C_BIT) ? 1 : 0; + cpsr_v = (regs->ARM_cpsr & PSR_V_BIT) ? 1 : 0; + + p1 = (regs->ARM_cpsr & BIT(12)) ? 1 : 0; + + base_cond = (regs->ARM_cpsr >> 13) & 0x07; + + switch (base_cond) { + case 0x0: /* equal */ + cond = cpsr_z; + break; + + case 0x1: /* carry set */ + cond = cpsr_c; + break; + + case 0x2: /* minus / negative */ + cond = cpsr_n; + break; + + case 0x3: /* overflow */ + cond = cpsr_v; + break; + + case 0x4: /* unsigned higher */ + cond = (cpsr_c == 1) && (cpsr_z == 0); + break; + + case 0x5: /* signed greater / equal */ + cond = (cpsr_n == cpsr_v); + break; + + case 0x6: /* signed greater */ + cond = (cpsr_z == 0) && (cpsr_n == cpsr_v); + break; + + case 0x7: /* always */ + cond = 1; + break; + }; + + if (cond == p1) { + pr_debug("Conditional abort fixup, PC=%08x, base=%d, cond=%d\n", + (unsigned int) regs->ARM_pc, base_cond, cond); + regs->ARM_pc += 2; + return 1; + } + return 0; +} +#endif + /* * Dispatch a data abort to the relevant handler. */ @@ -532,6 +653,16 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) const struct fsr_info *inf = fsr_info + fsr_fs(fsr); struct siginfo info; +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + if (emulate_domain_manager_data_abort(fsr, addr)) + return; +#endif + +#ifdef CONFIG_MSM_KRAIT_TBB_ABORT_HANDLER + if (krait_tbb_fixup(fsr, regs)) + return; +#endif + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; @@ -600,6 +731,11 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); struct siginfo info; +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + if (emulate_domain_manager_prefetch_abort(ifsr, addr)) + return; +#endif + if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index cddd684364dab..2951de78baef1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -201,6 +201,29 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } } +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP +static void __init arm_bootmem_free_apnm(unsigned long max_low, + unsigned long max_high) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + struct memblock_region *reg; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + + max_zone_pfns[0] = max_low; +#ifdef CONFIG_HIGHMEM + max_zone_pfns[ZONE_HIGHMEM] = max_high; +#endif + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + add_active_range(0, start, end); + } + free_area_init_nodes(max_zone_pfns); +} + +#else static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, unsigned long max_high) { @@ -251,6 +274,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, free_area_init_node(0, zone_size, min, zhole_size); } +#endif #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) @@ -280,6 +304,13 @@ static int __init meminfo_cmp(const void *_a, const void *_b) return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; } +#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0 +unsigned long membank0_size; +EXPORT_SYMBOL(membank0_size); +unsigned long membank1_start; +EXPORT_SYMBOL(membank1_start); +#endif + void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { int i; @@ -290,6 +321,11 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); +#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0 + membank0_size = meminfo.bank[0].size; + membank1_start = meminfo.bank[1].start; +#endif + /* Register the kernel text, kernel data and initrd with memblock. */ #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); @@ -322,6 +358,28 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) memblock_dump_all(); } +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +int _early_pfn_valid(unsigned long pfn) +{ + struct meminfo *mi = &meminfo; + unsigned int left = 0, right = mi->nr_banks; + + do { + unsigned int mid = (right + left) / 2; + struct membank *bank = &mi->bank[mid]; + + if (pfn < bank_pfn_start(bank)) + right = mid; + else if (pfn >= bank_pfn_end(bank)) + left = mid + 1; + else + return 1; + } while (left < right); + return 0; +} +EXPORT_SYMBOL(_early_pfn_valid); +#endif + void __init bootmem_init(void) { unsigned long min, max_low, max_high; @@ -343,12 +401,16 @@ void __init bootmem_init(void) */ sparse_init(); +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP + arm_bootmem_free_apnm(max_low, max_high); +#else /* * Now free the memory - free_area_init_node needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid. */ arm_bootmem_free(min, max_low, max_high); +#endif high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; @@ -392,7 +454,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn); + end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and @@ -410,7 +472,10 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) } /* - * The mem_map array can get very big. Free the unused area of the memory map. + * The mem_map array can get very big. Free as much of the unused portion of + * the mem_map that we are allowed to. The page migration code moves pages + * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we + * can't free mem_map entries that may be dereferenced in this manner. */ static void __init free_unused_memmap(struct meminfo *mi) { @@ -424,8 +489,17 @@ static void __init free_unused_memmap(struct meminfo *mi) for_each_bank(i, mi) { struct membank *bank = &mi->bank[i]; - bank_start = bank_pfn_start(bank); + bank_start = round_down(bank_pfn_start(bank), + MAX_ORDER_NR_PAGES); +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + bank_start = min(bank_start, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. @@ -433,13 +507,15 @@ static void __init free_unused_memmap(struct meminfo *mi) if (prev_bank_end && prev_bank_end < bank_start) free_memmap(prev_bank_end, bank_start); - /* - * Align up here since the VM subsystem insists that the - * memmap entries are valid from the bank end aligned to - * MAX_ORDER_NR_PAGES. - */ - prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); + prev_bank_end = round_up(bank_pfn_end(bank), + MAX_ORDER_NR_PAGES); } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) + free_memmap(prev_bank_end, + ALIGN(prev_bank_end, PAGES_PER_SECTION)); +#endif } static void __init free_highpages(void) @@ -542,7 +618,14 @@ void __init mem_init(void) else if (!page_count(page)) free_pages++; page++; +#ifdef CONFIG_SPARSEMEM + pfn1++; + if (!(pfn1 % PAGES_PER_SECTION)) + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); +#else } while (page < end); +#endif } /* @@ -659,6 +742,33 @@ void free_initmem(void) "init"); } +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size) +{ + struct pglist_data *pgdata = NODE_DATA(nid); + struct zone *zone = pgdata->node_zones + ZONE_MOVABLE; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + return __add_pages(nid, zone, start_pfn, nr_pages); +} + +int arch_physical_active_memory(u64 start, u64 size) +{ + return platform_physical_active_pages(start, size); +} + +int arch_physical_remove_memory(u64 start, u64 size) +{ + return platform_physical_remove_pages(start, size); +} + +int arch_physical_low_power_memory(u64 start, u64 size) +{ + return platform_physical_low_power_pages(start, size); +} +#endif + #ifdef CONFIG_BLK_DEV_INITRD static int keep_initrd; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ab506272b2d3e..17e7b0b57e49f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ */ - if (WARN_ON(pfn_valid(pfn))) - return NULL; + if (pfn_valid(pfn)) { + printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" + KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" + KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); + WARN_ON(1); + } type = get_mem_type(mtype); if (!type) diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 36960df5fb762..229200dbd7ebd 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -15,12 +15,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) return pmd_off(pgd_offset_k(virt), virt); } -struct mem_type { - pteval_t prot_pte; - unsigned int prot_l1; - unsigned int prot_sect; - unsigned int domain; -}; +struct mem_type; const struct mem_type *get_mem_type(unsigned int type); @@ -28,5 +23,8 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif +struct map_desc; + void __init bootmem_init(void); void arm_mm_memblock_reserve(void); +void __init create_mapping(struct map_desc *md); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 3c67e92f7d592..55def0a790d5e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -214,6 +214,12 @@ static struct mem_type mem_types[] = { .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, }, + [MT_DEVICE_STRONGLY_ORDERED] = { /* Guaranteed strongly ordered */ + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, + .domain = DOMAIN_IO, + }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, @@ -252,6 +258,18 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_R] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_RW] = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_RX] = { + .prot_sect = PMD_TYPE_SECT, + .domain = DOMAIN_KERNEL, + }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, @@ -355,6 +373,8 @@ static void __init build_mem_type_table(void) mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; + mem_types[MT_DEVICE_STRONGLY_ORDERED].prot_sect |= + PMD_SECT_XN; } if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { /* @@ -428,6 +448,8 @@ static void __init build_mem_type_table(void) * from SVC mode and no access from userspace. */ mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; @@ -446,6 +468,9 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; } } @@ -485,6 +510,9 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_MEMORY].prot_pte |= kern_pgprot; mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; + mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd; switch (cp->pmd) { @@ -650,7 +678,7 @@ static void __init create_36bit_mapping(struct map_desc *md, * offsets, and we take full advantage of sections and * supersections. */ -static void __init create_mapping(struct map_desc *md) +void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; @@ -714,7 +742,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(io_desc + i); } -static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); +static void * __initdata vmalloc_min = (void *)(VMALLOC_END - CONFIG_VMALLOC_RESERVE); /* * vmalloc=size forces the vmalloc area to be exactly 'size' @@ -749,7 +777,12 @@ static phys_addr_t lowmem_limit __initdata = 0; static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; +#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE) + void *v_movable_start = __va(movable_reserved_start); + if (vmalloc_min > v_movable_start) + vmalloc_min = v_movable_start - SECTION_SIZE; +#endif lowmem_limit = __pa(vmalloc_min - 1) + 1; memblock_set_current_limit(lowmem_limit); @@ -1011,8 +1044,39 @@ static void __init map_lowmem(void) map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); +#ifdef CONFIG_STRICT_MEMORY_RWX + if (start <= __pa(_text) && __pa(_text) < end) { + map.length = (unsigned long)_text - map.virtual; + map.type = MT_MEMORY; + + create_mapping(&map); + + map.pfn = __phys_to_pfn(__pa(_text)); + map.virtual = (unsigned long)_text; + map.length = __start_rodata - _text; + map.type = MT_MEMORY_RX; + + create_mapping(&map); + + map.pfn = __phys_to_pfn(__pa(__start_rodata)); + map.virtual = (unsigned long)__start_rodata; + map.length = _sdata - __start_rodata; + map.type = MT_MEMORY_R; + + create_mapping(&map); + + map.pfn = __phys_to_pfn(__pa(_sdata)); + map.virtual = (unsigned long)_sdata; + map.length = __phys_to_virt(end) - (unsigned int)_sdata; + map.type = MT_MEMORY_RW; + } else { + map.length = end - start; + map.type = MT_MEMORY_RW; + } +#else map.length = end - start; map.type = MT_MEMORY; +#endif create_mapping(&map); } @@ -1042,4 +1106,16 @@ void __init paging_init(struct machine_desc *mdesc) empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); + +#if defined(CONFIG_ARCH_MSM7X27) + /* + * ensure that the strongly ordered page is mapped before the + * first call to write_to_strongly_ordered_memory. This page + * is necessary for the msm 7x27 due to hardware quirks. The + * map call is made here to ensure the bootmem call is made + * in the right window (after initialization, before full + * allocators are initialized) + */ + map_page_strongly_ordered(); +#endif } diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index bcf748d9f4e25..41ab64752ff3c 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -275,7 +275,7 @@ ENTRY(arm1020_flush_kern_dcache_area) * * (same as v4wb) */ -arm1020_dma_inv_range: +ENTRY(arm1020_dma_inv_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -305,7 +305,7 @@ arm1020_dma_inv_range: * * (same as v4wb) */ -arm1020_dma_clean_range: +ENTRY(arm1020_dma_clean_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -374,6 +374,8 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_dcache_area .long arm1020_dma_map_area .long arm1020_dma_unmap_area + .long arm1020_dma_inv_range + .long arm1020_dma_clean_range .long arm1020_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index ab7ec26657eaf..d22f3260c8944 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -268,7 +268,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) * * (same as v4wb) */ -arm1020e_dma_inv_range: +ENTRY(arm1020e_dma_inv_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -294,7 +294,7 @@ arm1020e_dma_inv_range: * * (same as v4wb) */ -arm1020e_dma_clean_range: +ENTRY(arm1020e_dma_clean_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -360,6 +360,8 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_dcache_area .long arm1020e_dma_map_area .long arm1020e_dma_unmap_area + .long arm1020e_dma_inv_range + .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 831c5e54e22f0..d31ce0256f746 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -257,7 +257,7 @@ ENTRY(arm1022_flush_kern_dcache_area) * * (same as v4wb) */ -arm1022_dma_inv_range: +ENTRY(arm1022_dma_inv_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -283,7 +283,7 @@ arm1022_dma_inv_range: * * (same as v4wb) */ -arm1022_dma_clean_range: +ENTRY(arm1022_dma_clean_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -349,6 +349,8 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_dcache_area .long arm1022_dma_map_area .long arm1022_dma_unmap_area + .long arm1022_dma_inv_range + .long arm1022_dma_clean_range .long arm1022_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index e3f7e9a166bfc..14c25f58e6c30 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -251,7 +251,7 @@ ENTRY(arm1026_flush_kern_dcache_area) * * (same as v4wb) */ -arm1026_dma_inv_range: +ENTRY(arm1026_dma_inv_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -277,7 +277,7 @@ arm1026_dma_inv_range: * * (same as v4wb) */ -arm1026_dma_clean_range: +ENTRY(arm1026_dma_clean_range) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -343,6 +343,8 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_dcache_area .long arm1026_dma_map_area .long arm1026_dma_unmap_area + .long arm1026_dma_inv_range + .long arm1026_dma_clean_range .long arm1026_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6109f278a9045..35bd31ddd6285 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -242,7 +242,7 @@ ENTRY(arm920_flush_kern_dcache_area) * * (same as v4wb) */ -arm920_dma_inv_range: +ENTRY(arm920_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -265,7 +265,7 @@ arm920_dma_inv_range: * * (same as v4wb) */ -arm920_dma_clean_range: +ENTRY(arm920_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -325,6 +325,8 @@ ENTRY(arm920_cache_fns) .long arm920_flush_kern_dcache_area .long arm920_dma_map_area .long arm920_dma_unmap_area + .long arm920_dma_inv_range + .long arm920_dma_clean_range .long arm920_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index bb2f0f46a5e6c..ee6c4daac23b2 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -244,7 +244,7 @@ ENTRY(arm922_flush_kern_dcache_area) * * (same as v4wb) */ -arm922_dma_inv_range: +ENTRY(arm922_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -267,7 +267,7 @@ arm922_dma_inv_range: * * (same as v4wb) */ -arm922_dma_clean_range: +ENTRY(arm922_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -327,6 +327,8 @@ ENTRY(arm922_cache_fns) .long arm922_flush_kern_dcache_area .long arm922_dma_map_area .long arm922_dma_unmap_area + .long arm922_dma_inv_range + .long arm922_dma_clean_range .long arm922_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index c13e01accfe2e..ba147cff4685e 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -290,7 +290,7 @@ ENTRY(arm925_flush_kern_dcache_area) * * (same as v4wb) */ -arm925_dma_inv_range: +ENTRY(arm925_dma_inv_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -315,7 +315,7 @@ arm925_dma_inv_range: * * (same as v4wb) */ -arm925_dma_clean_range: +ENTRY(arm925_dma_clean_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -382,6 +382,8 @@ ENTRY(arm925_cache_fns) .long arm925_flush_kern_dcache_area .long arm925_dma_map_area .long arm925_dma_unmap_area + .long arm925_dma_inv_range + .long arm925_dma_clean_range .long arm925_dma_flush_range ENTRY(cpu_arm925_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 42eb4315740b1..41c9b5dd573d3 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -253,7 +253,7 @@ ENTRY(arm926_flush_kern_dcache_area) * * (same as v4wb) */ -arm926_dma_inv_range: +ENTRY(arm926_dma_inv_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -278,7 +278,7 @@ arm926_dma_inv_range: * * (same as v4wb) */ -arm926_dma_clean_range: +ENTRY(arm926_dma_clean_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -345,6 +345,8 @@ ENTRY(arm926_cache_fns) .long arm926_flush_kern_dcache_area .long arm926_dma_map_area .long arm926_dma_unmap_area + .long arm926_dma_inv_range + .long arm926_dma_clean_range .long arm926_dma_flush_range ENTRY(cpu_arm926_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 7b11cdb9935ff..937f978ac85e8 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -178,7 +178,7 @@ ENTRY(arm940_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -arm940_dma_inv_range: +ENTRY(arm940_dma_inv_range) mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -199,7 +199,7 @@ arm940_dma_inv_range: * - start - virtual start address * - end - virtual end address */ -arm940_dma_clean_range: +ENTRY(arm940_dma_clean_range) ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -274,6 +274,8 @@ ENTRY(arm940_cache_fns) .long arm940_flush_kern_dcache_area .long arm940_dma_map_area .long arm940_dma_unmap_area + .long arm940_dma_inv_range + .long arm940_dma_clean_range .long arm940_dma_flush_range __CPUINIT diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1a5bbf0803427..76be43b359e1d 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -222,7 +222,7 @@ ENTRY(arm946_flush_kern_dcache_area) * - end - virtual end address * (same as arm926) */ -arm946_dma_inv_range: +ENTRY(arm946_dma_inv_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -247,7 +247,7 @@ arm946_dma_inv_range: * * (same as arm926) */ -arm946_dma_clean_range: +ENTRY(arm946_dma_clean_range) #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -316,6 +316,8 @@ ENTRY(arm946_cache_fns) .long arm946_flush_kern_dcache_area .long arm946_dma_map_area .long arm946_dma_unmap_area + .long arm946_dma_inv_range + .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index b4597edbff97f..0fde230c457f4 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -280,7 +280,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) * (same as v4wb) */ .align 5 -feroceon_dma_inv_range: +ENTRY(feroceon_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -294,7 +294,7 @@ feroceon_dma_inv_range: mov pc, lr .align 5 -feroceon_range_dma_inv_range: +ENTRY(feroceon_range_dma_inv_range) mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -320,7 +320,7 @@ feroceon_range_dma_inv_range: * (same as v4wb) */ .align 5 -feroceon_dma_clean_range: +ENTRY(feroceon_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -330,7 +330,7 @@ feroceon_dma_clean_range: mov pc, lr .align 5 -feroceon_range_dma_clean_range: +ENTRY(feroceon_range_dma_clean_range) mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive @@ -421,6 +421,8 @@ ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_dcache_area .long feroceon_dma_map_area .long feroceon_dma_unmap_area + .long feroceon_dma_inv_range + .long feroceon_dma_clean_range .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) @@ -433,6 +435,8 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_range_flush_kern_dcache_area .long feroceon_range_dma_map_area .long feroceon_dma_unmap_area + .long feroceon_range_dma_inv_range + .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 4458ee6aa7133..0b94767e64bc3 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -214,7 +214,7 @@ ENTRY(mohawk_flush_kern_dcache_area) * * (same as v4wb) */ -mohawk_dma_inv_range: +ENTRY(mohawk_dma_inv_range) tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 @@ -237,7 +237,7 @@ mohawk_dma_inv_range: * * (same as v4wb) */ -mohawk_dma_clean_range: +ENTRY(mohawk_dma_clean_range) bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -297,6 +297,8 @@ ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_dcache_area .long mohawk_dma_map_area .long mohawk_dma_unmap_area + .long mohawk_dma_inv_range + .long mohawk_dma_clean_range .long mohawk_dma_flush_range ENTRY(cpu_mohawk_dcache_clean_area) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8e3356239136a..cb25277f1f810 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -61,7 +62,14 @@ ENDPROC(cpu_v7_proc_fin) */ .align 5 ENTRY(cpu_v7_reset) - mov pc, r0 + mrc p15, 0, r1, c1, c0, 0 @ ctrl register + bic r1, r1, #0x0001 @ ...............m + mcr p15, 0, r1, c1, c0, 0 @ Turn off MMU + mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D,flush TLB + mcr p15, 0, ip, c7, c5, 6 @ flush BTC + dsb + isb + mov pc,r0 ENDPROC(cpu_v7_reset) /* @@ -101,6 +109,11 @@ ENDPROC(cpu_v7_dcache_clean_area) */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU +#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + ldr r2, =cpu_v7_switch_mm_private + b emulate_domain_manager_switch_mm +cpu_v7_switch_mm_private: +#endif mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) @@ -189,9 +202,8 @@ cpu_v7_name: * - cache type register is implemented */ __v7_ca9mp_setup: -#ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, r0, c1, c0, 1) - ALT_UP(mov r0, #(1 << 6)) @ fake it for UP +#if defined(CONFIG_SMP) + mrc p15, 0, r0, c1, c0, 1 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting @@ -282,6 +294,35 @@ __v7_setup: ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) mcr p15, 0, r4, c2, c0, 1 @ load TTB1 +#ifndef CONFIG_EMULATE_DOMAIN_MANAGER_V7 + mov r10, #0x1f @ domains 0, 1 = manager + mcr p15, 0, r10, c3, c0, 0 @ load domain access register +#endif +#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP) + mov r0, #0x33 + mcr p15, 3, r0, c15, c0, 3 @ set L2CR1 +#endif +#if defined (CONFIG_ARCH_MSM_SCORPION) + mrc p15, 0, r0, c1, c0, 1 @ read ACTLR +#ifdef CONFIG_CPU_CACHE_ERR_REPORT + orr r0, r0, #0x37 @ turn on L1/L2 error reporting +#else + bic r0, r0, #0x37 +#endif +#if defined (CONFIG_ARCH_MSM_SCORPIONMP) + orr r0, r0, #0x1 << 24 @ optimal setting for Scorpion MP +#endif +#ifndef CONFIG_ARCH_MSM_KRAIT + mcr p15, 0, r0, c1, c0, 1 @ write ACTLR +#endif +#endif + +#if defined (CONFIG_ARCH_MSM_SCORPIONMP) + mrc p15, 3, r0, c15, c0, 2 @ optimal setting for Scorpion MP + orr r0, r0, #0x1 << 21 + mcr p15, 3, r0, c15, c0, 2 +#endif + /* * Memory region attributes with SCTLR.TRE=1 * @@ -311,7 +352,11 @@ __v7_setup: * NOS = PRRR[24+n] = 1 - not outer shareable */ ldr r5, =0xff0a81a8 @ PRRR +#ifdef CONFIG_MSM_SMP + ldr r6, =0x40e080e0 @ NMRR +#else ldr r6, =0x40e040e0 @ NMRR +#endif mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index ec26355cb7c25..f1bd758a63d07 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -264,7 +264,7 @@ ENTRY(xsc3_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -xsc3_dma_inv_range: +ENTRY(xsc3_dma_inv_range) tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -285,7 +285,7 @@ xsc3_dma_inv_range: * - start - virtual start address * - end - virtual end address */ -xsc3_dma_clean_range: +ENTRY(xsc3_dma_clean_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE @@ -345,6 +345,8 @@ ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_dcache_area .long xsc3_dma_map_area .long xsc3_dma_unmap_area + .long xsc3_dma_inv_range + .long xsc3_dma_clean_range .long xsc3_dma_flush_range ENTRY(cpu_xsc3_dcache_clean_area) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 5a37c5e45c411..456b9f6e80601 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -322,7 +322,7 @@ ENTRY(xscale_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -xscale_dma_inv_range: +ENTRY(xscale_dma_inv_range) tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -343,7 +343,7 @@ xscale_dma_inv_range: * - start - virtual start address * - end - virtual end address */ -xscale_dma_clean_range: +ENTRY(xscale_dma_clean_range) bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -417,6 +417,8 @@ ENTRY(xscale_cache_fns) .long xscale_flush_kern_dcache_area .long xscale_dma_map_area .long xscale_dma_unmap_area + .long xscale_dma_inv_range + .long xscale_dma_clean_range .long xscale_dma_flush_range /* diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 53cd5b4546731..8338c6e37d612 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range) dsb mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT +#ifdef CONFIG_ARCH_MSM8X60 + mov r0, r0, lsl #PAGE_SHIFT +#else asid r3, r3 @ mask ASID orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA +#endif mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARCH_MSM8X60 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ @@ -70,7 +78,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: +#ifdef CONFIG_ARCH_MSM8X60 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA (shareable) +#else ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) +#endif ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA add r0, r0, #PAGE_SZ cmp r0, r1 diff --git a/arch/arm/mm/vcm.c b/arch/arm/mm/vcm.c new file mode 100644 index 0000000000000..5c52a9cd00e35 --- /dev/null +++ b/arch/arm/mm/vcm.c @@ -0,0 +1,1830 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* alloc_vm_area */ +#include +#include +#include + +#include +#include + +#define ONE_TO_ONE_CHK 1 + +#define vcm_err(a, ...) \ + pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) + +static unsigned int smmu_map_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K}; + +static phys_addr_t *bootmem_cont; +static int cont_sz; +static struct vcm *cont_vcm_id; +static struct phys_chunk *cont_phys_chunk; + +DEFINE_SPINLOCK(vcmlock); + +/* Leaving this in for now to keep compatibility of the API. */ +/* This will disappear. */ +phys_addr_t vcm_get_dev_addr(struct res *res) +{ + if (!res) { + vcm_err("NULL RES"); + return -EINVAL; + } + return res->dev_addr; +} + +static int vcm_no_res(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + return list_empty(&vcm->res_head); +fail: + return -EINVAL; +} + +static int vcm_no_assoc(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + return list_empty(&vcm->assoc_head); +fail: + return -EINVAL; +} + +static int vcm_all_activated(struct vcm *vcm) +{ + struct avcm *avcm; + + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + list_for_each_entry(avcm, &vcm->assoc_head, assoc_elm) + if (!avcm->is_active) + return 0; + + return 1; +fail: + return -EINVAL; +} + +static void vcm_destroy_common(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + return; + } + + memset(vcm, 0, sizeof(*vcm)); + kfree(vcm); +} + +static struct vcm *vcm_create_common(void) +{ + struct vcm *vcm = 0; + + vcm = kzalloc(sizeof(*vcm), GFP_KERNEL); + if (!vcm) { + vcm_err("kzalloc(%i, GFP_KERNEL) ret 0\n", + sizeof(*vcm)); + goto fail; + } + + INIT_LIST_HEAD(&vcm->res_head); + INIT_LIST_HEAD(&vcm->assoc_head); + + return vcm; + +fail: + return NULL; +} + + +static int vcm_create_pool(struct vcm *vcm, unsigned long start_addr, + size_t len) +{ + int ret = 0; + + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + vcm->start_addr = start_addr; + vcm->len = len; + + vcm->pool = gen_pool_create(PAGE_SHIFT, -1); + if (!vcm->pool) { + vcm_err("gen_pool_create(%x, -1) ret 0\n", PAGE_SHIFT); + ret = -EINVAL; + goto fail; + } + + ret = gen_pool_add(vcm->pool, start_addr, len, -1); + if (ret) { + vcm_err("gen_pool_add(%p, %p, %i, -1) ret %i\n", vcm->pool, + (void *) start_addr, len, ret); + goto fail; + } + + vcm->domain = iommu_domain_alloc(); + if (!vcm->domain) { + vcm_err("Could not allocate domain\n"); + ret = -ENOMEM; + goto fail; + } + +fail: + if (ret && vcm->pool) + gen_pool_destroy(vcm->pool); + + return ret; +} + + +static struct vcm *vcm_create_flagged(int flag, unsigned long start_addr, + size_t len) +{ + int ret = 0; + struct vcm *vcm = 0; + + vcm = vcm_create_common(); + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + /* special one-to-one mapping case */ + if ((flag & ONE_TO_ONE_CHK) && + bootmem_cont && + start_addr == (size_t) bootmem_cont && + len == cont_sz) { + vcm->type = VCM_ONE_TO_ONE; + } else { + ret = vcm_create_pool(vcm, start_addr, len); + vcm->type = VCM_DEVICE; + } + + if (ret) { + vcm_err("vcm_create_pool(%p, %p, %i) ret %i\n", vcm, + (void *) start_addr, len, ret); + goto fail2; + } + + return vcm; + +fail2: + vcm_destroy_common(vcm); +fail: + return NULL; +} + +struct vcm *vcm_create(unsigned long start_addr, size_t len) +{ + unsigned long flags; + struct vcm *vcm; + + spin_lock_irqsave(&vcmlock, flags); + vcm = vcm_create_flagged(ONE_TO_ONE_CHK, start_addr, len); + spin_unlock_irqrestore(&vcmlock, flags); + return vcm; +} + + +static int ext_vcm_id_valid(size_t ext_vcm_id) +{ + return ((ext_vcm_id == VCM_PREBUILT_KERNEL) || + (ext_vcm_id == VCM_PREBUILT_USER)); +} + + +struct vcm *vcm_create_from_prebuilt(size_t ext_vcm_id) +{ + unsigned long flags; + struct vcm *vcm = 0; + + spin_lock_irqsave(&vcmlock, flags); + + if (!ext_vcm_id_valid(ext_vcm_id)) { + vcm_err("ext_vcm_id_valid(%i) ret 0\n", ext_vcm_id); + goto fail; + } + + vcm = vcm_create_common(); + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + if (ext_vcm_id == VCM_PREBUILT_KERNEL) + vcm->type = VCM_EXT_KERNEL; + else if (ext_vcm_id == VCM_PREBUILT_USER) + vcm->type = VCM_EXT_USER; + else { + vcm_err("UNREACHABLE ext_vcm_id is illegal\n"); + goto fail_free; + } + + /* TODO: set kernel and userspace start_addr and len, if this + * makes sense */ + + spin_unlock_irqrestore(&vcmlock, flags); + return vcm; + +fail_free: + vcm_destroy_common(vcm); +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return NULL; +} + + +struct vcm *vcm_clone(struct vcm *vcm) +{ + return 0; +} + + +/* No lock needed, vcm->start_addr is never updated after creation */ +size_t vcm_get_start_addr(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + return 1; + } + + return vcm->start_addr; +} + + +/* No lock needed, vcm->len is never updated after creation */ +size_t vcm_get_len(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + return 0; + } + + return vcm->len; +} + + +static int vcm_free_common_rule(struct vcm *vcm) +{ + int ret; + + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + ret = vcm_no_res(vcm); + if (!ret) { + vcm_err("vcm_no_res(%p) ret 0\n", vcm); + goto fail_busy; + } + + if (ret == -EINVAL) { + vcm_err("vcm_no_res(%p) ret -EINVAL\n", vcm); + goto fail; + } + + ret = vcm_no_assoc(vcm); + if (!ret) { + vcm_err("vcm_no_assoc(%p) ret 0\n", vcm); + goto fail_busy; + } + + if (ret == -EINVAL) { + vcm_err("vcm_no_assoc(%p) ret -EINVAL\n", vcm); + goto fail; + } + + return 0; + +fail_busy: + return -EBUSY; +fail: + return -EINVAL; +} + + +static int vcm_free_pool_rule(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + /* A vcm always has a valid pool, don't free the vcm because + what we got is probably invalid. + */ + if (!vcm->pool) { + vcm_err("NULL vcm->pool\n"); + goto fail; + } + + return 0; + +fail: + return -EINVAL; +} + + +static void vcm_free_common(struct vcm *vcm) +{ + memset(vcm, 0, sizeof(*vcm)); + + kfree(vcm); +} + + +static int vcm_free_pool(struct vcm *vcm) +{ + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + gen_pool_destroy(vcm->pool); + + return 0; + +fail: + return -EINVAL; +} + + +static int __vcm_free(struct vcm *vcm) +{ + int ret; + + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + ret = vcm_free_common_rule(vcm); + if (ret != 0) { + vcm_err("vcm_free_common_rule(%p) ret %i\n", vcm, ret); + goto fail; + } + + if (vcm->type == VCM_DEVICE) { + ret = vcm_free_pool_rule(vcm); + if (ret != 0) { + vcm_err("vcm_free_pool_rule(%p) ret %i\n", + (void *) vcm, ret); + goto fail; + } + if (vcm->domain) + iommu_domain_free(vcm->domain); + + vcm->domain = NULL; + ret = vcm_free_pool(vcm); + if (ret != 0) { + vcm_err("vcm_free_pool(%p) ret %i", (void *) vcm, ret); + goto fail; + } + } + + vcm_free_common(vcm); + + return 0; + +fail: + return -EINVAL; +} + +int vcm_free(struct vcm *vcm) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&vcmlock, flags); + ret = __vcm_free(vcm); + spin_unlock_irqrestore(&vcmlock, flags); + + return ret; +} + + +static struct res *__vcm_reserve(struct vcm *vcm, size_t len, u32 attr) +{ + struct res *res = NULL; + int align_attr = 0, i = 0; + + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + if (len == 0) { + vcm_err("len is 0\n"); + goto fail; + } + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + vcm_err("kzalloc(%i, GFP_KERNEL) ret 0", sizeof(*res)); + goto fail; + } + + align_attr = (attr >> VCM_ALIGN_SHIFT) & VCM_ALIGN_MASK; + + if (align_attr >= 32) { + vcm_err("Invalid alignment attribute: %d\n", align_attr); + goto fail2; + } + + INIT_LIST_HEAD(&res->res_elm); + res->vcm = vcm; + res->len = len; + res->attr = attr; + res->alignment_req = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; + + if (align_attr == 0) { + for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) + if (len / smmu_map_sizes[i]) { + res->alignment_req = smmu_map_sizes[i]; + break; + } + } else + res->alignment_req = 1 << align_attr; + + res->aligned_len = res->alignment_req + len; + + switch (vcm->type) { + case VCM_DEVICE: + /* should always be not zero */ + if (!vcm->pool) { + vcm_err("NULL vcm->pool\n"); + goto fail2; + } + + res->ptr = gen_pool_alloc(vcm->pool, res->aligned_len); + if (!res->ptr) { + vcm_err("gen_pool_alloc(%p, %i) ret 0\n", + vcm->pool, res->aligned_len); + goto fail2; + } + + /* Calculate alignment... this will all change anyway */ + res->dev_addr = res->ptr + + (res->alignment_req - + (res->ptr & (res->alignment_req - 1))); + + break; + case VCM_EXT_KERNEL: + res->vm_area = alloc_vm_area(res->aligned_len); + res->mapped = 0; /* be explicit */ + if (!res->vm_area) { + vcm_err("NULL res->vm_area\n"); + goto fail2; + } + + res->dev_addr = (size_t) res->vm_area->addr + + (res->alignment_req - + ((size_t) res->vm_area->addr & + (res->alignment_req - 1))); + + break; + case VCM_ONE_TO_ONE: + break; + default: + vcm_err("%i is an invalid vcm->type\n", vcm->type); + goto fail2; + } + + list_add_tail(&res->res_elm, &vcm->res_head); + + return res; + +fail2: + kfree(res); +fail: + return 0; +} + + +struct res *vcm_reserve(struct vcm *vcm, size_t len, u32 attr) +{ + unsigned long flags; + struct res *res; + + spin_lock_irqsave(&vcmlock, flags); + res = __vcm_reserve(vcm, len, attr); + spin_unlock_irqrestore(&vcmlock, flags); + + return res; +} + + +struct res *vcm_reserve_at(enum memtarget_t memtarget, struct vcm *vcm, + size_t len, u32 attr) +{ + return 0; +} + + +static int __vcm_unreserve(struct res *res) +{ + struct vcm *vcm; + + if (!res) { + vcm_err("NULL res\n"); + goto fail; + } + + if (!res->vcm) { + vcm_err("NULL res->vcm\n"); + goto fail; + } + + vcm = res->vcm; + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + switch (vcm->type) { + case VCM_DEVICE: + if (!res->vcm->pool) { + vcm_err("NULL (res->vcm))->pool\n"); + goto fail; + } + + /* res->ptr could be zero, this isn't an error */ + gen_pool_free(res->vcm->pool, res->ptr, + res->aligned_len); + break; + case VCM_EXT_KERNEL: + if (res->mapped) { + vcm_err("res->mapped is true\n"); + goto fail; + } + + /* This may take a little explaining. + * In the kernel vunmap will free res->vm_area + * so if we've called it then we shouldn't call + * free_vm_area(). If we've called it we set + * res->vm_area to 0. + */ + if (res->vm_area) { + free_vm_area(res->vm_area); + res->vm_area = 0; + } + + break; + case VCM_ONE_TO_ONE: + break; + default: + vcm_err("%i is an invalid vcm->type\n", vcm->type); + goto fail; + } + + list_del(&res->res_elm); + + /* be extra careful by clearing the memory before freeing it */ + memset(res, 0, sizeof(*res)); + + kfree(res); + + return 0; + +fail: + return -EINVAL; +} + + +int vcm_unreserve(struct res *res) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&vcmlock, flags); + ret = __vcm_unreserve(res); + spin_unlock_irqrestore(&vcmlock, flags); + + return ret; +} + + +/* No lock needed, res->len is never updated after creation */ +size_t vcm_get_res_len(struct res *res) +{ + if (!res) { + vcm_err("res is 0\n"); + return 0; + } + + return res->len; +} + + +int vcm_set_res_attr(struct res *res, u32 attr) +{ + return 0; +} + + +u32 vcm_get_res_attr(struct res *res) +{ + return 0; +} + + +size_t vcm_get_num_res(struct vcm *vcm) +{ + return 0; +} + + +struct res *vcm_get_next_res(struct vcm *vcm, struct res *res) +{ + return 0; +} + + +size_t vcm_res_copy(struct res *to, size_t to_off, struct res *from, size_t + from_off, size_t len) +{ + return 0; +} + + +size_t vcm_get_min_page_size(void) +{ + return PAGE_SIZE; +} + + +static int vcm_to_smmu_attr(u32 attr) +{ + int smmu_attr = 0; + + switch (attr & VCM_CACHE_POLICY) { + case VCM_NOTCACHED: + smmu_attr = VCM_DEV_ATTR_NONCACHED; + break; + case VCM_WB_WA: + smmu_attr = VCM_DEV_ATTR_CACHED_WB_WA; + smmu_attr |= VCM_DEV_ATTR_SH; + break; + case VCM_WB_NWA: + smmu_attr = VCM_DEV_ATTR_CACHED_WB_NWA; + smmu_attr |= VCM_DEV_ATTR_SH; + break; + case VCM_WT: + smmu_attr = VCM_DEV_ATTR_CACHED_WT; + smmu_attr |= VCM_DEV_ATTR_SH; + break; + default: + return -EINVAL; + } + + return smmu_attr; +} + + +static int vcm_process_chunk(struct iommu_domain *domain, phys_addr_t pa, + unsigned long va, size_t len, u32 attr, int map) +{ + int ret, i, map_order; + unsigned long map_len = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; + + for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) { + if (IS_ALIGNED(va, smmu_map_sizes[i]) && len >= + smmu_map_sizes[i]) { + map_len = smmu_map_sizes[i]; + break; + } + } + +#ifdef VCM_PERF_DEBUG + if (va & (len - 1)) + pr_warning("Warning! Suboptimal VCM mapping alignment " + "va = %p, len = %p. Expect TLB performance " + "degradation.\n", (void *) va, (void *) len); +#endif + + map_order = get_order(map_len); + + while (len) { + if (va & (SZ_4K - 1)) { + vcm_err("Tried to map w/ align < 4k! va = %08lx\n", va); + goto fail; + } + + if (map_len > len) { + vcm_err("map_len = %lu, len = %d, trying to overmap\n", + map_len, len); + goto fail; + } + + if (map) + ret = iommu_map(domain, va, pa, map_order, attr); + else + ret = iommu_unmap(domain, va, map_order); + + if (ret) { + vcm_err("iommu_map/unmap(%p, %p, %p, 0x%x, 0x%x) ret %i" + "map = %d", (void *) domain, (void *) pa, + (void *) va, (int) map_len, attr, ret, map); + goto fail; + } + + va += map_len; + pa += map_len; + len -= map_len; + } + + return 0; +fail: + return -EINVAL; +} + +/* TBD if you vcm_back again what happens? */ +int vcm_back(struct res *res, struct physmem *physmem) +{ + unsigned long flags; + struct vcm *vcm; + struct phys_chunk *chunk; + size_t va = 0; + int ret; + int attr; + + spin_lock_irqsave(&vcmlock, flags); + + if (!res) { + vcm_err("NULL res\n"); + goto fail; + } + + vcm = res->vcm; + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + switch (vcm->type) { + case VCM_DEVICE: + case VCM_EXT_KERNEL: /* hack part 1 */ + attr = vcm_to_smmu_attr(res->attr); + if (attr == -1) { + vcm_err("Bad SMMU attr\n"); + goto fail; + } + break; + default: + attr = 0; + break; + } + + if (!physmem) { + vcm_err("NULL physmem\n"); + goto fail; + } + + if (res->len == 0) { + vcm_err("res->len is 0\n"); + goto fail; + } + + if (physmem->len == 0) { + vcm_err("physmem->len is 0\n"); + goto fail; + } + + if (res->len != physmem->len) { + vcm_err("res->len (%i) != physmem->len (%i)\n", + res->len, physmem->len); + goto fail; + } + + if (physmem->is_cont) { + if (physmem->res == 0) { + vcm_err("cont physmem->res is 0"); + goto fail; + } + } else { + /* fail if no physmem */ + if (list_empty(&physmem->alloc_head.allocated)) { + vcm_err("no allocated phys memory"); + goto fail; + } + } + + ret = vcm_no_assoc(res->vcm); + if (ret == 1) { + vcm_err("can't back un associated VCM\n"); + goto fail; + } + + if (ret == -1) { + vcm_err("vcm_no_assoc() ret -1\n"); + goto fail; + } + + ret = vcm_all_activated(res->vcm); + if (ret == 0) { + vcm_err("can't back, not all associations are activated\n"); + goto fail_eagain; + } + + if (ret == -1) { + vcm_err("vcm_all_activated() ret -1\n"); + goto fail; + } + + va = res->dev_addr; + + list_for_each_entry(chunk, &physmem->alloc_head.allocated, + allocated) { + struct vcm *vcm = res->vcm; + size_t chunk_size = chunk->size; + + if (chunk_size <= 0) { + vcm_err("Bad chunk size: %d\n", chunk_size); + goto fail; + } + + switch (vcm->type) { + case VCM_DEVICE: + { + /* map all */ + ret = vcm_process_chunk(vcm->domain, chunk->pa, + va, chunk_size, attr, 1); + if (ret != 0) { + vcm_err("vcm_process_chunk(%p, %p, %p," + " 0x%x, 0x%x)" + " ret %i", + vcm->domain, + (void *) chunk->pa, + (void *) va, + (int) chunk_size, attr, ret); + goto fail; + } + break; + } + + case VCM_EXT_KERNEL: + { + unsigned int pages_in_chunk = chunk_size / PAGE_SIZE; + unsigned long loc_va = va; + unsigned long loc_pa = chunk->pa; + + const struct mem_type *mtype; + + /* TODO: get this based on MEMTYPE */ + mtype = get_mem_type(MT_DEVICE); + if (!mtype) { + vcm_err("mtype is 0\n"); + goto fail; + } + + /* TODO: Map with the same chunk size */ + while (pages_in_chunk--) { + ret = ioremap_page(loc_va, + loc_pa, + mtype); + if (ret != 0) { + vcm_err("ioremap_page(%p, %p, %p) ret" + " %i", (void *) loc_va, + (void *) loc_pa, + (void *) mtype, ret); + goto fail; + /* TODO handle weird + inter-map case */ + } + + /* hack part 2 */ + /* we're changing the PT entry behind + * linux's back + */ + ret = cpu_set_attr(loc_va, PAGE_SIZE, attr); + if (ret != 0) { + vcm_err("cpu_set_attr(%p, %lu, %x)" + "ret %i\n", + (void *) loc_va, PAGE_SIZE, + attr, ret); + goto fail; + /* TODO handle weird + inter-map case */ + } + + res->mapped = 1; + + loc_va += PAGE_SIZE; + loc_pa += PAGE_SIZE; + } + + flush_cache_vmap(va, loc_va); + break; + } + case VCM_ONE_TO_ONE: + va = chunk->pa; + break; + default: + /* this should never happen */ + goto fail; + } + + va += chunk_size; + /* also add res to the allocated chunk list of refs */ + } + + /* note the reservation */ + res->physmem = physmem; + + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +fail_eagain: + spin_unlock_irqrestore(&vcmlock, flags); + return -EAGAIN; +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + + +int vcm_unback(struct res *res) +{ + unsigned long flags; + struct vcm *vcm; + struct physmem *physmem; + int ret; + + spin_lock_irqsave(&vcmlock, flags); + + if (!res) + goto fail; + + vcm = res->vcm; + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + if (!res->physmem) { + vcm_err("can't unback a non-backed reservation\n"); + goto fail; + } + + physmem = res->physmem; + if (!physmem) { + vcm_err("physmem is NULL\n"); + goto fail; + } + + if (list_empty(&physmem->alloc_head.allocated)) { + vcm_err("physmem allocation is empty\n"); + goto fail; + } + + ret = vcm_no_assoc(res->vcm); + if (ret == 1) { + vcm_err("can't unback a unassociated reservation\n"); + goto fail; + } + + if (ret == -1) { + vcm_err("vcm_no_assoc(%p) ret -1\n", (void *) res->vcm); + goto fail; + } + + ret = vcm_all_activated(res->vcm); + if (ret == 0) { + vcm_err("can't unback, not all associations are active\n"); + goto fail_eagain; + } + + if (ret == -1) { + vcm_err("vcm_all_activated(%p) ret -1\n", (void *) res->vcm); + goto fail; + } + + + switch (vcm->type) { + case VCM_EXT_KERNEL: + if (!res->mapped) { + vcm_err("can't unback an unmapped VCM_EXT_KERNEL" + " VCM\n"); + goto fail; + } + + /* vunmap free's vm_area */ + vunmap(res->vm_area->addr); + res->vm_area = 0; + + res->mapped = 0; + break; + + case VCM_DEVICE: + { + struct phys_chunk *chunk; + size_t va = res->dev_addr; + + list_for_each_entry(chunk, &physmem->alloc_head.allocated, + allocated) { + struct vcm *vcm = res->vcm; + size_t chunk_size = chunk->size; + + ret = vcm_process_chunk(vcm->domain, 0, va, + chunk_size, 0, 0); + if (ret != 0) { + vcm_err("vcm_unback_chunk(%p, %p, 0x%x)" + " ret %i", + (void *) vcm->domain, + (void *) va, + (int) chunk_size, ret); + goto fail; + /* TODO handle weird inter-unmap state*/ + } + + va += chunk_size; + /* may to a light unback, depending on the requested + * functionality + */ + } + break; + } + + case VCM_ONE_TO_ONE: + break; + default: + /* this should never happen */ + goto fail; + } + + /* clear the reservation */ + res->physmem = 0; + + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +fail_eagain: + spin_unlock_irqrestore(&vcmlock, flags); + return -EAGAIN; +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + + +enum memtarget_t vcm_get_memtype_of_res(struct res *res) +{ + return VCM_INVALID; +} + +static int vcm_free_max_munch_cont(struct phys_chunk *head) +{ + struct phys_chunk *chunk, *tmp; + + if (!head) + return -EINVAL; + + list_for_each_entry_safe(chunk, tmp, &head->allocated, + allocated) { + list_del_init(&chunk->allocated); + } + + return 0; +} + +static int vcm_alloc_max_munch_cont(size_t start_addr, size_t len, + struct phys_chunk *head) +{ + /* this function should always succeed, since it + parallels a VCM */ + + int i, j; + + if (!head) { + vcm_err("head is NULL in continuous map.\n"); + goto fail; + } + + if (start_addr < (int) bootmem_cont) { + vcm_err("phys start addr (%p) < base (%p)\n", + (void *) start_addr, (void *) bootmem_cont); + goto fail; + } + + if ((start_addr + len) >= ((size_t) bootmem_cont + cont_sz)) { + vcm_err("requested region (%p + %i) > " + " available region (%p + %i)", + (void *) start_addr, (int) len, + (void *) bootmem_cont, cont_sz); + goto fail; + } + + i = (start_addr - (size_t) bootmem_cont)/SZ_4K; + + for (j = 0; j < ARRAY_SIZE(smmu_map_sizes); ++j) { + while (len/smmu_map_sizes[j]) { + if (!list_empty(&cont_phys_chunk[i].allocated)) { + vcm_err("chunk %i ( addr %p) already mapped\n", + i, (void *) (start_addr + + (i*smmu_map_sizes[j]))); + goto fail_free; + } + list_add_tail(&cont_phys_chunk[i].allocated, + &head->allocated); + cont_phys_chunk[i].size = smmu_map_sizes[j]; + + len -= smmu_map_sizes[j]; + i += smmu_map_sizes[j]/SZ_4K; + } + } + + if (len % SZ_4K) { + if (!list_empty(&cont_phys_chunk[i].allocated)) { + vcm_err("chunk %i (addr %p) already mapped\n", + i, (void *) (start_addr + (i*SZ_4K))); + goto fail_free; + } + len -= SZ_4K; + list_add_tail(&cont_phys_chunk[i].allocated, + &head->allocated); + + i++; + } + + return i; + +fail_free: + { + struct phys_chunk *chunk, *tmp; + /* just remove from list, if we're double alloc'ing + we don't want to stamp on the other guy */ + list_for_each_entry_safe(chunk, tmp, &head->allocated, + allocated) { + list_del(&chunk->allocated); + } + } +fail: + return 0; +} + +struct physmem *vcm_phys_alloc(enum memtype_t memtype, size_t len, u32 attr) +{ + unsigned long flags; + int ret; + struct physmem *physmem = NULL; + int blocks_allocated; + + spin_lock_irqsave(&vcmlock, flags); + + physmem = kzalloc(sizeof(*physmem), GFP_KERNEL); + if (!physmem) { + vcm_err("physmem is NULL\n"); + goto fail; + } + + physmem->memtype = memtype; + physmem->len = len; + physmem->attr = attr; + + INIT_LIST_HEAD(&physmem->alloc_head.allocated); + + if (attr & VCM_PHYS_CONT) { + if (!cont_vcm_id) { + vcm_err("cont_vcm_id is NULL\n"); + goto fail2; + } + + physmem->is_cont = 1; + + /* TODO: get attributes */ + physmem->res = __vcm_reserve(cont_vcm_id, len, 0); + if (physmem->res == 0) { + vcm_err("contiguous space allocation failed\n"); + goto fail2; + } + + /* if we're here we know we have memory, create + the shadow physmem links*/ + blocks_allocated = + vcm_alloc_max_munch_cont( + physmem->res->dev_addr, + len, + &physmem->alloc_head); + + if (blocks_allocated == 0) { + vcm_err("shadow physmem allocation failed\n"); + goto fail3; + } + } else { + blocks_allocated = vcm_alloc_max_munch(len, memtype, + &physmem->alloc_head); + if (blocks_allocated == 0) { + vcm_err("physical allocation failed:" + " vcm_alloc_max_munch(%i, %p) ret 0\n", + len, &physmem->alloc_head); + goto fail2; + } + } + + spin_unlock_irqrestore(&vcmlock, flags); + return physmem; + +fail3: + ret = __vcm_unreserve(physmem->res); + if (ret != 0) { + vcm_err("vcm_unreserve(%p) ret %i during cleanup", + (void *) physmem->res, ret); + spin_unlock_irqrestore(&vcmlock, flags); + return 0; + } +fail2: + kfree(physmem); +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +} + + +int vcm_phys_free(struct physmem *physmem) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&vcmlock, flags); + + if (!physmem) { + vcm_err("physmem is NULL\n"); + goto fail; + } + + if (physmem->is_cont) { + if (physmem->res == 0) { + vcm_err("contiguous reservation is NULL\n"); + goto fail; + } + + ret = vcm_free_max_munch_cont(&physmem->alloc_head); + if (ret != 0) { + vcm_err("failed to free physical blocks:" + " vcm_free_max_munch_cont(%p) ret %i\n", + (void *) &physmem->alloc_head, ret); + goto fail; + } + + ret = __vcm_unreserve(physmem->res); + if (ret != 0) { + vcm_err("failed to free virtual blocks:" + " vcm_unreserve(%p) ret %i\n", + (void *) physmem->res, ret); + goto fail; + } + + } else { + + ret = vcm_alloc_free_blocks(physmem->memtype, + &physmem->alloc_head); + if (ret != 0) { + vcm_err("failed to free physical blocks:" + " vcm_alloc_free_blocks(%p) ret %i\n", + (void *) &physmem->alloc_head, ret); + goto fail; + } + } + + memset(physmem, 0, sizeof(*physmem)); + + kfree(physmem); + + spin_unlock_irqrestore(&vcmlock, flags); + return 0; + +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + + +struct avcm *vcm_assoc(struct vcm *vcm, struct device *dev, u32 attr) +{ + unsigned long flags; + struct avcm *avcm = NULL; + + spin_lock_irqsave(&vcmlock, flags); + + if (!vcm) { + vcm_err("vcm is NULL\n"); + goto fail; + } + + if (!dev) { + vcm_err("dev_id is NULL\n"); + goto fail; + } + + if (vcm->type == VCM_EXT_KERNEL && !list_empty(&vcm->assoc_head)) { + vcm_err("only one device may be assocoated with a" + " VCM_EXT_KERNEL\n"); + goto fail; + } + + avcm = kzalloc(sizeof(*avcm), GFP_KERNEL); + if (!avcm) { + vcm_err("kzalloc(%i, GFP_KERNEL) ret NULL\n", sizeof(*avcm)); + goto fail; + } + + avcm->dev = dev; + + avcm->vcm = vcm; + avcm->attr = attr; + avcm->is_active = 0; + + INIT_LIST_HEAD(&avcm->assoc_elm); + list_add(&avcm->assoc_elm, &vcm->assoc_head); + + spin_unlock_irqrestore(&vcmlock, flags); + return avcm; + +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +} + + +int vcm_deassoc(struct avcm *avcm) +{ + unsigned long flags; + + spin_lock_irqsave(&vcmlock, flags); + + if (!avcm) { + vcm_err("avcm is NULL\n"); + goto fail; + } + + if (list_empty(&avcm->assoc_elm)) { + vcm_err("nothing to deassociate\n"); + goto fail; + } + + if (avcm->is_active) { + vcm_err("association still activated\n"); + goto fail_busy; + } + + list_del(&avcm->assoc_elm); + + memset(avcm, 0, sizeof(*avcm)); + + kfree(avcm); + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +fail_busy: + spin_unlock_irqrestore(&vcmlock, flags); + return -EBUSY; +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + + +int vcm_set_assoc_attr(struct avcm *avcm, u32 attr) +{ + return 0; +} + + +u32 vcm_get_assoc_attr(struct avcm *avcm) +{ + return 0; +} + + +int vcm_activate(struct avcm *avcm) +{ + unsigned long flags; + struct vcm *vcm; + + spin_lock_irqsave(&vcmlock, flags); + + if (!avcm) { + vcm_err("avcm is NULL\n"); + goto fail; + } + + vcm = avcm->vcm; + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + if (!avcm->dev) { + vcm_err("cannot activate without a device\n"); + goto fail_nodev; + } + + if (avcm->is_active) { + vcm_err("double activate\n"); + goto fail_busy; + } + + if (vcm->type == VCM_DEVICE) { +#ifdef CONFIG_SMMU + int ret; + ret = iommu_attach_device(vcm->domain, avcm->dev); + if (ret != 0) { + dev_err(avcm->dev, "failed to attach to domain\n"); + goto fail_dev; + } +#else + vcm_err("No SMMU support - cannot activate/deactivate\n"); + goto fail_nodev; +#endif + } + + avcm->is_active = 1; + spin_unlock_irqrestore(&vcmlock, flags); + return 0; + +#ifdef CONFIG_SMMU +fail_dev: + spin_unlock_irqrestore(&vcmlock, flags); + return -ENODEV; +#endif +fail_busy: + spin_unlock_irqrestore(&vcmlock, flags); + return -EBUSY; +fail_nodev: + spin_unlock_irqrestore(&vcmlock, flags); + return -ENODEV; +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + + +int vcm_deactivate(struct avcm *avcm) +{ + unsigned long flags; + struct vcm *vcm; + + spin_lock_irqsave(&vcmlock, flags); + + if (!avcm) + goto fail; + + vcm = avcm->vcm; + if (!vcm) { + vcm_err("NULL vcm\n"); + goto fail; + } + + if (!avcm->dev) { + vcm_err("cannot deactivate without a device\n"); + goto fail; + } + + if (!avcm->is_active) { + vcm_err("double deactivate\n"); + goto fail_nobusy; + } + + if (vcm->type == VCM_DEVICE) { +#ifdef CONFIG_SMMU + /* TODO, pmem check */ + iommu_detach_device(vcm->domain, avcm->dev); +#else + vcm_err("No SMMU support - cannot activate/deactivate\n"); + goto fail; +#endif + } + + avcm->is_active = 0; + spin_unlock_irqrestore(&vcmlock, flags); + return 0; +fail_nobusy: + spin_unlock_irqrestore(&vcmlock, flags); + return -ENOENT; +fail: + spin_unlock_irqrestore(&vcmlock, flags); + return -EINVAL; +} + +struct bound *vcm_create_bound(struct vcm *vcm, size_t len) +{ + return 0; +} + + +int vcm_free_bound(struct bound *bound) +{ + return -EINVAL; +} + + +struct res *vcm_reserve_from_bound(struct bound *bound, size_t len, + u32 attr) +{ + return 0; +} + + +size_t vcm_get_bound_start_addr(struct bound *bound) +{ + return 0; +} + + +size_t vcm_get_bound_len(struct bound *bound) +{ + return 0; +} + + +struct physmem *vcm_map_phys_addr(phys_addr_t phys, size_t len) +{ + return 0; +} + + +size_t vcm_get_next_phys_addr(struct physmem *physmem, phys_addr_t phys, + size_t *len) +{ + return 0; +} + + +struct res *vcm_get_res(unsigned long dev_addr, struct vcm *vcm) +{ + return 0; +} + + +size_t vcm_translate(struct device *src_dev, struct vcm *src_vcm, + struct vcm *dst_vcm) +{ + return 0; +} + + +size_t vcm_get_phys_num_res(phys_addr_t phys) +{ + return 0; +} + + +struct res *vcm_get_next_phys_res(phys_addr_t phys, struct res *res, + size_t *len) +{ + return 0; +} + + +phys_addr_t vcm_get_pgtbl_pa(struct vcm *vcm) +{ + return 0; +} + + +/* No lock needed, smmu_translate has its own lock */ +phys_addr_t vcm_dev_addr_to_phys_addr(struct vcm *vcm, unsigned long dev_addr) +{ + if (!vcm) + return -EINVAL; +#ifdef CONFIG_SMMU + return iommu_iova_to_phys(vcm->domain, dev_addr); +#else + vcm_err("No support for SMMU - manual translation not supported\n"); + return -ENODEV; +#endif +} + + +/* No lock needed, bootmem_cont never changes after */ +phys_addr_t vcm_get_cont_memtype_pa(enum memtype_t memtype) +{ + if (memtype != VCM_MEMTYPE_0) { + vcm_err("memtype != VCM_MEMTYPE_0\n"); + goto fail; + } + + if (!bootmem_cont) { + vcm_err("bootmem_cont 0\n"); + goto fail; + } + + return (size_t) bootmem_cont; +fail: + return 0; +} + + +/* No lock needed, constant */ +size_t vcm_get_cont_memtype_len(enum memtype_t memtype) +{ + if (memtype != VCM_MEMTYPE_0) { + vcm_err("memtype != VCM_MEMTYPE_0\n"); + return 0; + } + + return cont_sz; +} + +int vcm_hook(struct device *dev, vcm_handler handler, void *data) +{ +#ifdef CONFIG_SMMU + vcm_err("No interrupts in IOMMU API\n"); + return -ENODEV; +#else + vcm_err("No support for SMMU - interrupts not supported\n"); + return -ENODEV; +#endif +} + + +size_t vcm_hw_ver(size_t dev) +{ + return 0; +} + + +static int vcm_cont_phys_chunk_init(void) +{ + int i; + int cont_pa; + + if (!cont_phys_chunk) { + vcm_err("cont_phys_chunk 0\n"); + goto fail; + } + + if (!bootmem_cont) { + vcm_err("bootmem_cont 0\n"); + goto fail; + } + + cont_pa = (size_t) bootmem_cont; + + for (i = 0; i < cont_sz/PAGE_SIZE; ++i) { + cont_phys_chunk[i].pa = cont_pa; cont_pa += PAGE_SIZE; + cont_phys_chunk[i].size = SZ_4K; + /* Not part of an allocator-managed pool */ + cont_phys_chunk[i].pool_idx = -1; + INIT_LIST_HEAD(&cont_phys_chunk[i].allocated); + } + + return 0; + +fail: + return -EINVAL; +} + +int vcm_sys_init(struct physmem_region *mem, int n_regions, + struct vcm_memtype_map *mt_map, int n_mt, + void *cont_pa, unsigned int cont_len) +{ + int ret; + printk(KERN_INFO "VCM Initialization\n"); + bootmem_cont = cont_pa; + cont_sz = cont_len; + + if (!bootmem_cont) { + vcm_err("bootmem_cont is 0\n"); + ret = -1; + goto fail; + } + + ret = vcm_setup_tex_classes(); + if (ret != 0) { + printk(KERN_INFO "Could not determine TEX attribute mapping\n"); + ret = -1; + goto fail; + } + + + ret = vcm_alloc_init(mem, n_regions, mt_map, n_mt); + + if (ret != 0) { + vcm_err("vcm_alloc_init() ret %i\n", ret); + ret = -1; + goto fail; + } + + cont_phys_chunk = kzalloc(sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE), + GFP_KERNEL); + if (!cont_phys_chunk) { + vcm_err("kzalloc(%lu, GFP_KERNEL) ret 0", + sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE)); + goto fail_free; + } + + /* the address and size will hit our special case unless we + pass an override */ + cont_vcm_id = vcm_create_flagged(0, (size_t)bootmem_cont, cont_sz); + if (cont_vcm_id == 0) { + vcm_err("vcm_create_flagged(0, %p, %i) ret 0\n", + bootmem_cont, cont_sz); + ret = -1; + goto fail_free2; + } + + ret = vcm_cont_phys_chunk_init(); + if (ret != 0) { + vcm_err("vcm_cont_phys_chunk_init() ret %i\n", ret); + goto fail_free3; + } + + printk(KERN_INFO "VCM Initialization OK\n"); + return 0; + +fail_free3: + ret = __vcm_free(cont_vcm_id); + if (ret != 0) { + vcm_err("vcm_free(%p) ret %i during failure path\n", + (void *) cont_vcm_id, ret); + return ret; + } + +fail_free2: + kfree(cont_phys_chunk); + cont_phys_chunk = 0; + +fail_free: + ret = vcm_alloc_destroy(); + if (ret != 0) + vcm_err("vcm_alloc_destroy() ret %i during failure path\n", + ret); + + ret = -EINVAL; +fail: + return ret; +} + + +int vcm_sys_destroy(void) +{ + int ret = 0; + + if (!cont_phys_chunk) { + vcm_err("cont_phys_chunk is 0\n"); + return -ENODEV; + } + + if (!cont_vcm_id) { + vcm_err("cont_vcm_id is 0\n"); + return -ENODEV; + } + + ret = __vcm_free(cont_vcm_id); + if (ret != 0) { + vcm_err("vcm_free(%p) ret %i\n", (void *) cont_vcm_id, ret); + return -ENODEV; + } + + cont_vcm_id = 0; + + kfree(cont_phys_chunk); + cont_phys_chunk = 0; + + ret = vcm_alloc_destroy(); + if (ret != 0) { + vcm_err("vcm_alloc_destroy() ret %i\n", ret); + return ret; + } + + return ret; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Zach Pfeffer "); diff --git a/arch/arm/mm/vcm_alloc.c b/arch/arm/mm/vcm_alloc.c new file mode 100644 index 0000000000000..5f3c024757d1a --- /dev/null +++ b/arch/arm/mm/vcm_alloc.c @@ -0,0 +1,557 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +int basicalloc_init; + +#define vcm_alloc_err(a, ...) \ + pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) + +struct phys_chunk_head { + struct list_head head; + int num; +}; + +struct phys_pool { + int size; + int chunk_size; + struct phys_chunk_head head; +}; + +static int vcm_num_phys_pools; +static int vcm_num_memtypes; +static struct phys_pool *vcm_phys_pool; +static struct vcm_memtype_map *memtype_map; + +static int num_pools(enum memtype_t memtype) +{ + if (memtype >= vcm_num_memtypes) { + vcm_alloc_err("Bad memtype: %d\n", memtype); + return -EINVAL; + } + return memtype_map[memtype].num_pools; +} + +static int pool_chunk_size(enum memtype_t memtype, int prio_idx) +{ + int pool_idx; + if (memtype >= vcm_num_memtypes) { + vcm_alloc_err("Bad memtype: %d\n", memtype); + return -EINVAL; + } + + if (prio_idx >= num_pools(memtype)) { + vcm_alloc_err("Bad prio index: %d, max=%d, mt=%d\n", prio_idx, + num_pools(memtype), memtype); + return -EINVAL; + } + + pool_idx = memtype_map[memtype].pool_id[prio_idx]; + return vcm_phys_pool[pool_idx].chunk_size; +} + +int vcm_alloc_pool_idx_to_size(int pool_idx) +{ + if (pool_idx >= vcm_num_phys_pools) { + vcm_alloc_err("Bad pool index: %d\n, max=%d\n", pool_idx, + vcm_num_phys_pools); + return -EINVAL; + } + return vcm_phys_pool[pool_idx].chunk_size; +} + +static struct phys_chunk_head *get_chunk_list(enum memtype_t memtype, + int prio_idx) +{ + unsigned int pool_idx; + + if (memtype >= vcm_num_memtypes) { + vcm_alloc_err("Bad memtype: %d\n", memtype); + return NULL; + } + + if (prio_idx >= num_pools(memtype)) { + vcm_alloc_err("bad chunk size: mt=%d, prioidx=%d, np=%d\n", + memtype, prio_idx, num_pools(memtype)); + BUG(); + return NULL; + } + + if (!vcm_phys_pool) { + vcm_alloc_err("phys_pool is null\n"); + return NULL; + } + + /* We don't have a "pool count" anywhere but this is coming + * strictly from data in a board file + */ + pool_idx = memtype_map[memtype].pool_id[prio_idx]; + + return &vcm_phys_pool[pool_idx].head; +} + +static int is_allocated(struct list_head *allocated) +{ + /* This should not happen under normal conditions */ + if (!allocated) { + vcm_alloc_err("no allocated\n"); + return 0; + } + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return 0; + } + return !list_empty(allocated); +} + +static int count_allocated_size(enum memtype_t memtype, int idx) +{ + int cnt = 0; + struct phys_chunk *chunk, *tmp; + struct phys_chunk_head *pch; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return 0; + } + + pch = get_chunk_list(memtype, idx); + if (!pch) { + vcm_alloc_err("null pch\n"); + return -EINVAL; + } + + list_for_each_entry_safe(chunk, tmp, &pch->head, list) { + if (is_allocated(&chunk->allocated)) + cnt++; + } + + return cnt; +} + + +int vcm_alloc_get_mem_size(void) +{ + if (!vcm_phys_pool) { + vcm_alloc_err("No physical pool set up!\n"); + return -ENODEV; + } + return vcm_phys_pool[0].size; +} +EXPORT_SYMBOL(vcm_alloc_get_mem_size); + +void vcm_alloc_print_list(enum memtype_t memtype, int just_allocated) +{ + int i; + struct phys_chunk *chunk, *tmp; + struct phys_chunk_head *pch; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return; + } + + for (i = 0; i < num_pools(memtype); ++i) { + pch = get_chunk_list(memtype, i); + + if (!pch) { + vcm_alloc_err("pch is null\n"); + return; + } + + if (list_empty(&pch->head)) + continue; + + list_for_each_entry_safe(chunk, tmp, &pch->head, list) { + if (just_allocated && !is_allocated(&chunk->allocated)) + continue; + + printk(KERN_INFO "pa = %#x, size = %#x\n", + chunk->pa, vcm_phys_pool[chunk->pool_idx].chunk_size); + } + } +} +EXPORT_SYMBOL(vcm_alloc_print_list); + +int vcm_alloc_blocks_avail(enum memtype_t memtype, int idx) +{ + struct phys_chunk_head *pch; + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return 0; + } + pch = get_chunk_list(memtype, idx); + + if (!pch) { + vcm_alloc_err("pch is null\n"); + return 0; + } + return pch->num; +} +EXPORT_SYMBOL(vcm_alloc_blocks_avail); + + +int vcm_alloc_get_num_chunks(enum memtype_t memtype) +{ + return num_pools(memtype); +} +EXPORT_SYMBOL(vcm_alloc_get_num_chunks); + + +int vcm_alloc_all_blocks_avail(enum memtarget_t memtype) +{ + int i; + int cnt = 0; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return 0; + } + + for (i = 0; i < num_pools(memtype); ++i) + cnt += vcm_alloc_blocks_avail(memtype, i); + return cnt; +} +EXPORT_SYMBOL(vcm_alloc_all_blocks_avail); + + +int vcm_alloc_count_allocated(enum memtype_t memtype) +{ + int i; + int cnt = 0; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return 0; + } + + for (i = 0; i < num_pools(memtype); ++i) + cnt += count_allocated_size(memtype, i); + return cnt; +} +EXPORT_SYMBOL(vcm_alloc_count_allocated); + +int vcm_alloc_destroy(void) +{ + int i, mt; + struct phys_chunk *chunk, *tmp; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + return -ENODEV; + } + + /* can't destroy a space that has allocations */ + for (mt = 0; mt < vcm_num_memtypes; mt++) + if (vcm_alloc_count_allocated(mt)) { + vcm_alloc_err("allocations still present\n"); + return -EBUSY; + } + + for (i = 0; i < vcm_num_phys_pools; i++) { + struct phys_chunk_head *pch = &vcm_phys_pool[i].head; + + if (list_empty(&pch->head)) + continue; + list_for_each_entry_safe(chunk, tmp, &pch->head, list) { + list_del(&chunk->list); + memset(chunk, 0, sizeof(*chunk)); + kfree(chunk); + } + vcm_phys_pool[i].head.num = 0; + } + + kfree(vcm_phys_pool); + kfree(memtype_map); + + vcm_phys_pool = NULL; + memtype_map = NULL; + basicalloc_init = 0; + vcm_num_phys_pools = 0; + return 0; +} +EXPORT_SYMBOL(vcm_alloc_destroy); + + +int vcm_alloc_init(struct physmem_region *mem, int n_regions, + struct vcm_memtype_map *mt_map, int n_mt) +{ + int i = 0, j = 0, r = 0, num_chunks; + struct phys_chunk *chunk; + struct phys_chunk_head *pch = NULL; + unsigned long pa; + + /* no double inits */ + if (basicalloc_init) { + vcm_alloc_err("double basicalloc_init\n"); + BUG(); + goto fail; + } + memtype_map = kzalloc(sizeof(*mt_map) * n_mt, GFP_KERNEL); + if (!memtype_map) { + vcm_alloc_err("Could not copy memtype map\n"); + goto fail; + } + memcpy(memtype_map, mt_map, sizeof(*mt_map) * n_mt); + + vcm_phys_pool = kzalloc(sizeof(*vcm_phys_pool) * n_regions, GFP_KERNEL); + vcm_num_phys_pools = n_regions; + vcm_num_memtypes = n_mt; + + if (!vcm_phys_pool) { + vcm_alloc_err("Could not allocate physical pool structure\n"); + goto fail; + } + + /* separate out to ensure good cleanup */ + for (i = 0; i < n_regions; i++) { + pch = &vcm_phys_pool[i].head; + INIT_LIST_HEAD(&pch->head); + pch->num = 0; + } + + for (r = 0; r < n_regions; r++) { + pa = mem[r].addr; + vcm_phys_pool[r].size = mem[r].size; + vcm_phys_pool[r].chunk_size = mem[r].chunk_size; + pch = &vcm_phys_pool[r].head; + + num_chunks = mem[r].size / mem[r].chunk_size; + + printk(KERN_INFO "VCM Init: region %d, chunk size=%d, " + "num=%d, pa=%p\n", r, mem[r].chunk_size, num_chunks, + (void *)pa); + + for (j = 0; j < num_chunks; ++j) { + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + vcm_alloc_err("null chunk\n"); + goto fail; + } + chunk->pa = pa; + chunk->size = mem[r].chunk_size; + pa += mem[r].chunk_size; + chunk->pool_idx = r; + INIT_LIST_HEAD(&chunk->allocated); + list_add_tail(&chunk->list, &pch->head); + pch->num++; + } + } + + basicalloc_init = 1; + return 0; +fail: + vcm_alloc_destroy(); + return -EINVAL; +} +EXPORT_SYMBOL(vcm_alloc_init); + + +int vcm_alloc_free_blocks(enum memtype_t memtype, struct phys_chunk *alloc_head) +{ + struct phys_chunk *chunk, *tmp; + struct phys_chunk_head *pch = NULL; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + goto fail; + } + + if (!alloc_head) { + vcm_alloc_err("no alloc_head\n"); + goto fail; + } + + list_for_each_entry_safe(chunk, tmp, &alloc_head->allocated, + allocated) { + list_del_init(&chunk->allocated); + pch = &vcm_phys_pool[chunk->pool_idx].head; + + if (!pch) { + vcm_alloc_err("null pch\n"); + goto fail; + } + pch->num++; + } + + return 0; +fail: + return -ENODEV; +} +EXPORT_SYMBOL(vcm_alloc_free_blocks); + + +int vcm_alloc_num_blocks(int num, enum memtype_t memtype, int idx, + struct phys_chunk *alloc_head) +{ + struct phys_chunk *chunk; + struct phys_chunk_head *pch = NULL; + int num_allocated = 0; + + if (!basicalloc_init) { + vcm_alloc_err("no basicalloc_init\n"); + goto fail; + } + + if (!alloc_head) { + vcm_alloc_err("no alloc_head\n"); + goto fail; + } + + pch = get_chunk_list(memtype, idx); + + if (!pch) { + vcm_alloc_err("null pch\n"); + goto fail; + } + if (list_empty(&pch->head)) { + vcm_alloc_err("list is empty\n"); + goto fail; + } + + if (vcm_alloc_blocks_avail(memtype, idx) < num) { + vcm_alloc_err("not enough blocks? num=%d\n", num); + goto fail; + } + + list_for_each_entry(chunk, &pch->head, list) { + if (num_allocated == num) + break; + if (is_allocated(&chunk->allocated)) + continue; + + list_add_tail(&chunk->allocated, &alloc_head->allocated); + pch->num--; + num_allocated++; + } + return num_allocated; +fail: + return 0; +} +EXPORT_SYMBOL(vcm_alloc_num_blocks); + + +int vcm_alloc_max_munch(int len, enum memtype_t memtype, + struct phys_chunk *alloc_head) +{ + int i; + + int blocks_req = 0; + int block_residual = 0; + int blocks_allocated = 0; + int cur_chunk_size = 0; + int ba = 0; + + if (!basicalloc_init) { + vcm_alloc_err("basicalloc_init is 0\n"); + goto fail; + } + + if (!alloc_head) { + vcm_alloc_err("alloc_head is NULL\n"); + goto fail; + } + + if (num_pools(memtype) <= 0) { + vcm_alloc_err("Memtype %d has improper mempool configuration\n", + memtype); + goto fail; + } + + for (i = 0; i < num_pools(memtype); ++i) { + cur_chunk_size = pool_chunk_size(memtype, i); + if (cur_chunk_size <= 0) { + vcm_alloc_err("Bad chunk size: %d\n", cur_chunk_size); + goto fail; + } + + blocks_req = len / cur_chunk_size; + block_residual = len % cur_chunk_size; + + len = block_residual; /* len left */ + if (blocks_req) { + int blocks_available = 0; + int blocks_diff = 0; + int bytes_diff = 0; + + blocks_available = vcm_alloc_blocks_avail(memtype, i); + if (blocks_available < blocks_req) { + blocks_diff = + (blocks_req - blocks_available); + bytes_diff = + blocks_diff * cur_chunk_size; + + /* add back in the rest */ + len += bytes_diff; + } else { + /* got all the blocks I need */ + blocks_available = + (blocks_available > blocks_req) + ? blocks_req : blocks_available; + } + + ba = vcm_alloc_num_blocks(blocks_available, memtype, i, + alloc_head); + + if (ba != blocks_available) { + vcm_alloc_err("blocks allocated (%i) !=" + " blocks_available (%i):" + " chunk size = %#x," + " alloc_head = %p\n", + ba, blocks_available, + i, (void *) alloc_head); + goto fail; + } + blocks_allocated += blocks_available; + } + } + + if (len) { + int blocks_available = 0; + int last_sz = num_pools(memtype) - 1; + blocks_available = vcm_alloc_blocks_avail(memtype, last_sz); + + if (blocks_available > 0) { + ba = vcm_alloc_num_blocks(1, memtype, last_sz, + alloc_head); + if (ba != 1) { + vcm_alloc_err("blocks allocated (%i) !=" + " blocks_available (%i):" + " chunk size = %#x," + " alloc_head = %p\n", + ba, 1, + last_sz, + (void *) alloc_head); + goto fail; + } + blocks_allocated += 1; + } else { + vcm_alloc_err("blocks_available (%#x) <= 1\n", + blocks_available); + goto fail; + } + } + + return blocks_allocated; +fail: + vcm_alloc_free_blocks(memtype, alloc_head); + return 0; +} +EXPORT_SYMBOL(vcm_alloc_max_munch); diff --git a/arch/arm/mm/vcm_mm.c b/arch/arm/mm/vcm_mm.c new file mode 100644 index 0000000000000..dee51fab8a49e --- /dev/null +++ b/arch/arm/mm/vcm_mm.c @@ -0,0 +1,253 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Architecture-specific VCM functions */ + +#include +#include + +#include +#include + +#define MRC(reg, processor, op1, crn, crm, op2) \ +__asm__ __volatile__ ( \ +" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 " \n" \ +: "=r" (reg)) + +#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) +#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) + + +/* Local type attributes (not the same as VCM) */ +#define ARM_MT_NORMAL 2 +#define ARM_MT_STRONGLYORDERED 0 +#define ARM_MT_DEVICE 1 + +#define ARM_CP_NONCACHED 0 +#define ARM_CP_WB_WA 1 +#define ARM_CP_WB_NWA 3 +#define ARM_CP_WT_NWA 2 + +#define smmu_err(a, ...) \ + pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) + +#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20) +#define SL_OFFSET(va) (((va) & 0xFF000) >> 12) + +int vcm_driver_tex_class[4]; + +static int find_tex_class(int icp, int ocp, int mt, int nos) +{ + int i = 0; + unsigned int prrr = 0; + unsigned int nmrr = 0; + int c_icp, c_ocp, c_mt, c_nos; + + RCP15_PRRR(prrr); + RCP15_NMRR(nmrr); + + /* There are only 8 classes on this architecture */ + /* If they add more classes, registers will VASTLY change */ + for (i = 0; i < 8; i++) { + c_nos = prrr & (1 << (i + 24)) ? 1 : 0; + c_mt = (prrr & (3 << (i * 2))) >> (i * 2); + c_icp = (nmrr & (3 << (i * 2))) >> (i * 2); + c_ocp = (nmrr & (3 << (i * 2 + 16))) >> (i * 2 + 16); + + if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) + return i; + } + smmu_err("Could not find TEX class for ICP=%d, OCP=%d, MT=%d, NOS=%d\n", + icp, ocp, mt, nos); + + /* In reality, we may want to remove this panic. Some classes just */ + /* will not be available, and will fail in smmu_set_attr */ + panic("SMMU: Could not determine TEX attribute mapping.\n"); + return -1; +} + + +int vcm_setup_tex_classes(void) +{ + unsigned int cpu_prrr; + unsigned int cpu_nmrr; + + if (!(get_cr() & CR_TRE)) /* No TRE? */ + panic("TEX remap not enabled, but the SMMU driver needs it!\n"); + + RCP15_PRRR(cpu_prrr); + RCP15_NMRR(cpu_nmrr); + + vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED] = + find_tex_class(ARM_CP_NONCACHED, ARM_CP_NONCACHED, + ARM_MT_NORMAL, 1); + + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA] = + find_tex_class(ARM_CP_WB_WA, ARM_CP_WB_WA, + ARM_MT_NORMAL, 1); + + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA] = + find_tex_class(ARM_CP_WB_NWA, ARM_CP_WB_NWA, + ARM_MT_NORMAL, 1); + + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT] = + find_tex_class(ARM_CP_WT_NWA, ARM_CP_WT_NWA, + ARM_MT_NORMAL, 1); +#ifdef DEBUG_TEX + printk(KERN_INFO "VCM driver debug: Using TEX classes: %d %d %d %d\n", + vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED], + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA], + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA], + vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT]); +#endif + return 0; +} + + +int set_arm7_pte_attr(unsigned long pt_base, unsigned long va, + unsigned long len, unsigned int attr) +{ + unsigned long *fl_table = NULL; + unsigned long *fl_pte = NULL; + unsigned long fl_offset = 0; + unsigned long *sl_table = NULL; + unsigned long *sl_pte = NULL; + unsigned long sl_offset = 0; + int i; + int sh = 0; + int class = 0; + + /* Alignment */ + if (va & (len-1)) { + smmu_err("misaligned va: %p\n", (void *) va); + goto fail; + } + if (attr > 7) { + smmu_err("bad attribute: %d\n", attr); + goto fail; + } + + sh = (attr & VCM_DEV_ATTR_SH) ? 1 : 0; + class = vcm_driver_tex_class[attr & 0x03]; + + if (class > 7 || class < 0) { /* Bad class */ + smmu_err("bad tex class: %d\n", class); + goto fail; + } + + if (len != SZ_16M && len != SZ_1M && + len != SZ_64K && len != SZ_4K) { + smmu_err("bad size: %lu\n", len); + goto fail; + } + + fl_table = (unsigned long *) pt_base; + + if (!fl_table) { + smmu_err("null page table\n"); + goto fail; + } + + fl_offset = FL_OFFSET(va); /* Upper 12 bits */ + fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ + + if (*fl_pte == 0) { /* Nothing there! */ + smmu_err("first level pte is 0\n"); + goto fail; + } + + /* Supersection attributes */ + if (len == SZ_16M) { + for (i = 0; i < 16; i++) { + /* Clear the old bits */ + *(fl_pte+i) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE | + PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1)); + + /* Assign new class and S bit */ + *(fl_pte+i) |= sh ? PMD_SECT_S : 0; + *(fl_pte+i) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0; + *(fl_pte+i) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0; + *(fl_pte+i) |= class & 0x04 ? PMD_SECT_TEX(1) : 0; + } + } else if (len == SZ_1M) { + + /* Clear the old bits */ + *(fl_pte) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE | + PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1)); + + /* Assign new class and S bit */ + *(fl_pte) |= sh ? PMD_SECT_S : 0; + *(fl_pte) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0; + *(fl_pte) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0; + *(fl_pte) |= class & 0x04 ? PMD_SECT_TEX(1) : 0; + } + + sl_table = (unsigned long *) __va(((*fl_pte) & 0xFFFFFC00)); + sl_offset = SL_OFFSET(va); + sl_pte = sl_table + sl_offset; + + if (len == SZ_64K) { + for (i = 0; i < 16; i++) { + /* Clear the old bits */ + *(sl_pte+i) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE | + PTE_BUFFERABLE | PTE_EXT_TEX(1)); + + /* Assign new class and S bit */ + *(sl_pte+i) |= sh ? PTE_EXT_SHARED : 0; + *(sl_pte+i) |= class & 0x01 ? PTE_BUFFERABLE : 0; + *(sl_pte+i) |= class & 0x02 ? PTE_CACHEABLE : 0; + *(sl_pte+i) |= class & 0x04 ? PTE_EXT_TEX(1) : 0; + } + } else if (len == SZ_4K) { + /* Clear the old bits */ + *(sl_pte) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE | + PTE_BUFFERABLE | PTE_EXT_TEX(1)); + + /* Assign new class and S bit */ + *(sl_pte) |= sh ? PTE_EXT_SHARED : 0; + *(sl_pte) |= class & 0x01 ? PTE_BUFFERABLE : 0; + *(sl_pte) |= class & 0x02 ? PTE_CACHEABLE : 0; + *(sl_pte) |= class & 0x04 ? PTE_EXT_TEX(1) : 0; + } + + + mb(); + return 0; +fail: + return 1; +} + + +int cpu_set_attr(unsigned long va, unsigned long len, unsigned int attr) +{ + int ret; + pgd_t *pgd = init_mm.pgd; + + if (!pgd) { + smmu_err("null pgd\n"); + goto fail; + } + + ret = set_arm7_pte_attr((unsigned long)pgd, va, len, attr); + + if (ret != 0) { + smmu_err("could not set attribute: \ + pgd=%p, va=%p, len=%lu, attr=%d\n", + (void *) pgd, (void *) va, len, attr); + goto fail; + } + dmb(); + flush_tlb_all(); + return 0; +fail: + return -1; +} diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index c074e66ad224e..afd7afba96ec5 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -40,6 +40,12 @@ char *op_name_from_perf_id(void) return "arm/armv7"; case ARM_PERF_PMU_ID_CA9: return "arm/armv7-ca9"; + case ARM_PERF_PMU_ID_SCORPION: + return "arm/armv7-scorpion"; + case ARM_PERF_PMU_ID_SCORPIONMP: + return "arm/armv7-scorpionmp"; + case ARM_PERF_PMU_ID_KRAIT: + return "arm/armv7-krait"; default: return NULL; } diff --git a/arch/arm/perfmon/Makefile b/arch/arm/perfmon/Makefile new file mode 100644 index 0000000000000..716e0873d3903 --- /dev/null +++ b/arch/arm/perfmon/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_KSAPI) += ksapi.o + +# Object file lists. +obj-y += perf-function-hooks.o +ksapi-y += perf-v7.o per.o per-process-perf.o per-axi.o +ksapi-$(CONFIG_ARCH_MSM8X60) += perf-smp.o diff --git a/arch/arm/perfmon/cp15_registers.h b/arch/arm/perfmon/cp15_registers.h new file mode 100644 index 0000000000000..1adfceedea20d --- /dev/null +++ b/arch/arm/perfmon/cp15_registers.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +cp15_registers.h + +DESCRIPTION: define macros for reading and writing to the cp registers +for the ARMv7 + +REV/DATE: Fri Mar 18 15:54:32 EST 2005 +*/ + +#ifndef __cp15_registers__ +#define __cp15_registers__ + +#include "mcrmrc.h" + +#define WCP15_SDER(reg) MCR15(reg, 0, c1, c1, 1) +/* +* Performance Monitor Registers +*/ +#define WCP15_PMACTLR(reg) MCR15(reg, 0, c9, c15, 5) +#define WCP15_PMCCNTCR(reg) MCR15(reg, 0, c9, c15, 2) +#define WCP15_PMCCNTR(reg) MCR15(reg, 0, c9, c13, 0) +#define WCP15_PMCCNTSR(reg) MCR15(reg, 0, c9, c13, 3) +#define WCP15_PMCNTENCLR(reg) MCR15(reg, 0, c9, c12, 2) +#define WCP15_PMCNTENSET(reg) MCR15(reg, 0, c9, c12, 1) +#define WCP15_PMCR(reg) MCR15(reg, 0, c9, c12, 0) +#define WCP15_PMINTENCLR(reg) MCR15(reg, 0, c9, c14, 2) +#define WCP15_PMINTENSET(reg) MCR15(reg, 0, c9, c14, 1) +#define WCP15_PMOVSR(reg) MCR15(reg, 0, c9, c12, 3) +#define WCP15_PMRLDR(reg) MCR15(reg, 0, c9, c15, 4) +#define WCP15_PMSELR(reg) MCR15(reg, 0, c9, c12, 5) +#define WCP15_PMSWINC(reg) MCR15(reg, 0, c9, c12, 4) +#define WCP15_PMUSERENR(reg) MCR15(reg, 0, c9, c14, 0) +#define WCP15_PMXEVCNTCR(reg) MCR15(reg, 0, c9, c15, 0) +#define WCP15_PMXEVCNTR(reg) MCR15(reg, 0, c9, c13, 2) +#define WCP15_PMXEVCNTSR(reg) MCR15(reg, 0, c9, c15, 1) +#define WCP15_PMXEVTYPER(reg) MCR15(reg, 0, c9, c13, 1) +#define WCP15_LPM0EVTYPER(reg) MCR15(reg, 0, c15, c0, 0) +#define WCP15_LPM1EVTYPER(reg) MCR15(reg, 1, c15, c0, 0) +#define WCP15_LPM2EVTYPER(reg) MCR15(reg, 2, c15, c0, 0) +#define WCP15_LPM3EVTYPER(reg) MCR15(reg, 3, c15, c0, 0) +#define WCP15_L2LPMEVTYPER(reg) MCR15(reg, 3, c15, c2, 0) +#define WCP15_VLPMEVTYPER(reg) MCR15(reg, 7, c11, c0, 0) +#define WCP15_L2VR3F1(reg) MCR15(reg, 3, c15, c15, 1) + +/* +* READ the registers +*/ +#define RCP15_SDER(reg) MRC15(reg, 0, c1, c1, 1) +/* +* Performance Monitor Registers +*/ +#define RCP15_PMACTLR(reg) MRC15(reg, 0, c9, c15, 5) +#define RCP15_PMCCNTCR(reg) MRC15(reg, 0, c9, c15, 2) +#define RCP15_PMCCNTR(reg) MRC15(reg, 0, c9, c13, 0) +#define RCP15_PMCCNTSR(reg) MRC15(reg, 0, c9, c13, 3) +#define RCP15_PMCNTENCLR(reg) MRC15(reg, 0, c9, c12, 2) +#define RCP15_PMCNTENSET(reg) MRC15(reg, 0, c9, c12, 1) +#define RCP15_PMCR(reg) MRC15(reg, 0, c9, c12, 0) +#define RCP15_PMINTENCLR(reg) MRC15(reg, 0, c9, c14, 2) +#define RCP15_PMINTENSET(reg) MRC15(reg, 0, c9, c14, 1) +#define RCP15_PMOVSR(reg) MRC15(reg, 0, c9, c12, 3) +#define RCP15_PMRLDR(reg) MRC15(reg, 0, c9, c15, 4) +#define RCP15_PMSELR(reg) MRC15(reg, 0, c9, c12, 5) +#define RCP15_PMSWINC(reg) MRC15(reg, 0, c9, c12, 4) +#define RCP15_PMUSERENR(reg) MRC15(reg, 0, c9, c14, 0) +#define RCP15_PMXEVCNTCR(reg) MRC15(reg, 0, c9, c15, 0) +#define RCP15_PMXEVCNTR(reg) MRC15(reg, 0, c9, c13, 2) +#define RCP15_PMXEVCNTSR(reg) MRC15(reg, 0, c9, c15, 1) +#define RCP15_PMXEVTYPER(reg) MRC15(reg, 0, c9, c13, 1) +#define RCP15_LPM0EVTYPER(reg) MRC15(reg, 0, c15, c0, 0) +#define RCP15_LPM1EVTYPER(reg) MRC15(reg, 1, c15, c0, 0) +#define RCP15_LPM2EVTYPER(reg) MRC15(reg, 2, c15, c0, 0) +#define RCP15_LPM3EVTYPER(reg) MRC15(reg, 3, c15, c0, 0) +#define RCP15_L2LPMEVTYPER(reg) MRC15(reg, 3, c15, c2, 0) +#define RCP15_VLPMEVTYPER(reg) MRC15(reg, 7, c11, c0, 0) +#define RCP15_CONTEXTIDR(reg) MRC15(reg, 0, c13, c0, 1) +#define RCP15_L2CR0(reg) MRC15(reg, 3, c15, c0, 1) +#define RCP15_L2VR3F1(reg) MRC15(reg, 3, c15, c15, 1) + +#endif + diff --git a/arch/arm/perfmon/l2_cp15_registers.h b/arch/arm/perfmon/l2_cp15_registers.h new file mode 100644 index 0000000000000..e53aeaff36bc7 --- /dev/null +++ b/arch/arm/perfmon/l2_cp15_registers.h @@ -0,0 +1,103 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +cp15_registers.h + +DESCRIPTION: define macros for reading and writing to the cp registers +for the ARMv7 + +REV/DATE: Fri Mar 18 15:54:32 EST 2005 +*/ + +#ifndef __l2_cp15_registers__ +#define __l2_cp15_registers__ + +#include "mcrmrc.h" + +#define WCP15_SDER(reg) MCR15(reg, 0, c1, c1, 1) +/* +* Performance Monitor Registers +*/ +#define WCP15_L2MPCR(reg) MCR15(reg, 3, c15, c0, 4) +#define WCP15_L2PMCCNTCR(reg) MCR15(reg, 3, c15, c4, 4) +#define WCP15_L2PMCCNTR(reg) MCR15(reg, 3, c15, c4, 5) +#define WCP15_L2PMCCNTSR(reg) MCR15(reg, 3, c15, c4, 6) +#define WCP15_L2PMCNTENCLR(reg) MCR15(reg, 3, c15, c4, 2) +#define WCP15_L2PMCNTENSET(reg) MCR15(reg, 3, c15, c4, 3) +#define WCP15_L2PMCR(reg) MCR15(reg, 3, c15, c4, 0) +#define WCP15_L2PMINTENCLR(reg) MCR15(reg, 3, c15, c5, 0) +#define WCP15_L2PMINTENSET(reg) MCR15(reg, 3, c15, c5, 1) +#define WCP15_L2PMOVSR(reg) MCR15(reg, 3, c15, c4, 1) +#define WCP15_L2PMRLDR(reg) MCR15(reg, 3, c15, c4, 7) +#define WCP15_L2PMSELR(reg) MCR15(reg, 3, c15, c6, 0) +#define WCP15_L2PMXEVCNTCR(reg) MCR15(reg, 3, c15, c6, 4) +#define WCP15_L2PMXEVCNTR(reg) MCR15(reg, 3, c15, c6, 5) +#define WCP15_L2PMXEVCNTSR(reg) MCR15(reg, 3, c15, c6, 6) +#define WCP15_L2PMXEVTYPER(reg) MCR15(reg, 3, c15, c6, 7) +#define WCP15_L2PMXEVFILTER(reg) MCR15(reg, 3, c15, c6, 3) +#define WCP15_L2PMEVTYPER0(reg) MCR15(reg, 3, c15, c7, 0) +#define WCP15_L2PMEVTYPER1(reg) MCR15(reg, 3, c15, c7, 1) +#define WCP15_L2PMEVTYPER2(reg) MCR15(reg, 3, c15, c7, 2) +#define WCP15_L2PMEVTYPER3(reg) MCR15(reg, 3, c15, c7, 3) +#define WCP15_L2PMEVTYPER4(reg) MCR15(reg, 3, c15, c7, 4) +#define WCP15_L2VR3F1(reg) MCR15(reg, 3, c15, c15, 1) + +/* +* READ the registers +*/ +#define RCP15_SDER(reg) MRC15(reg, 0, c1, c1, 1) +/* +* Performance Monitor Registers +*/ +#define RCP15_L2MPCR(reg) MRC15(reg, 3, c15, c0, 4) +#define RCP15_L2PMCCNTCR(reg) MRC15(reg, 3, c15, c4, 4) +#define RCP15_L2PMCCNTR(reg) MRC15(reg, 3, c15, c4, 5) +#define RCP15_L2PMCCNTSR(reg) MRC15(reg, 3, c15, c4, 6) +#define RCP15_L2PMCNTENCLR(reg) MRC15(reg, 3, c15, c4, 2) +#define RCP15_L2PMCNTENSET(reg) MRC15(reg, 3, c15, c4, 3) +#define RCP15_L2PMCR(reg) MRC15(reg, 3, c15, c4, 0) +#define RCP15_L2PMINTENCLR(reg) MRC15(reg, 3, c15, c5, 0) +#define RCP15_L2PMINTENSET(reg) MRC15(reg, 3, c15, c5, 1) +#define RCP15_L2PMOVSR(reg) MRC15(reg, 3, c15, c4, 1) +#define RCP15_L2PMRLDR(reg) MRC15(reg, 3, c15, c4, 7) +#define RCP15_L2PMSELR(reg) MRC15(reg, 3, c15, c6, 0) +#define RCP15_L2PMXEVCNTCR(reg) MRC15(reg, 3, c15, c6, 4) +#define RCP15_L2PMXEVCNTR(reg) MRC15(reg, 3, c15, c6, 5) +#define RCP15_L2PMXEVCNTSR(reg) MRC15(reg, 3, c15, c6, 6) +#define RCP15_L2PMXEVTYPER(reg) MRC15(reg, 3, c15, c6, 7) +#define RCP15_L2PMXEVFILTER(reg) MRC15(reg, 3, c15, c6, 3) +#define RCP15_L2PMEVTYPER0(reg) MRC15(reg, 3, c15, c7, 0) +#define RCP15_L2PMEVTYPER1(reg) MRC15(reg, 3, c15, c7, 1) +#define RCP15_L2PMEVTYPER2(reg) MRC15(reg, 3, c15, c7, 2) +#define RCP15_L2PMEVTYPER3(reg) MRC15(reg, 3, c15, c7, 3) +#define RCP15_L2PMEVTYPER4(reg) MRC15(reg, 3, c15, c7, 4) +#define RCP15_L2VR3F1(reg) MRC15(reg, 3, c15, c15, 1) + +#endif + diff --git a/arch/arm/perfmon/mcrmrc.h b/arch/arm/perfmon/mcrmrc.h new file mode 100644 index 0000000000000..c161af06e41df --- /dev/null +++ b/arch/arm/perfmon/mcrmrc.h @@ -0,0 +1,101 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +mrcmcr.h + +DESCRIPTION: Convenience macros for access the cp registers in the arm. + +REV/DATE: Fri Mar 18 16:34:44 EST 2005 +*/ + +#ifndef __mrcmcr__h_ +#define __mrcmcr__h_ + +/* +* Define some convenience macros to acccess the cp registers from c code +* Lots of macro trickery here. +* +* Takes the same format as the asm instructions and unfortunatly you cannot +* use variables to select the crn, crn or op fields... +* +* For those unfamiliar with the # and string stuff. +* # creates a string from the value and any two strings that are beside +* are concatenated...thus these create one big asm string for the +* inline asm code. +* +* When compiled these compile to single asm instructions (fast) but +* without all the hassel of __asm__ __volatile__ (...) =r +* +* Format is: +* +* unsigned long reg; // destination variable +* MRC(reg, p15, 0, c1, c0, 0 ); +* +* MRC read control register +* MCR control register write +*/ + +/* +* Some assembly macros so we can use the same macros as in the C version. +* Turns the ASM code a little C-ish but keeps the code consistent and in +* one location... +*/ +#ifdef __ASSEMBLY__ + + +#define MRC(reg, processor, op1, crn, crm, op2) \ +(mrc processor , op1 , reg, crn , crm , op2) + +#define MCR(reg, processor, op1, crn, crm, op2) \ +(mcr processor , op1 , reg, crn , crm , op2) + +/* +* C version of the macros. +*/ +#else + +#define MRC(reg, processor, op1, crn, crm, op2) \ +__asm__ __volatile__ ( \ +" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ +: "=r" (reg)) + +#define MCR(reg, processor, op1, crn, crm, op2) \ +__asm__ __volatile__ ( \ +" mcr " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ +: : "r" (reg)) +#endif + + +/* +* Easy access convenience function to read CP15 registers from c code +*/ +#define MRC15(reg, op1, crn, crm, op2) MRC(reg, p15, op1, crn, crm, op2) +#define MCR15(reg, op1, crn, crm, op2) MCR(reg, p15, op1, crn, crm, op2) + +#endif diff --git a/arch/arm/perfmon/per-axi.c b/arch/arm/perfmon/per-axi.c new file mode 100644 index 0000000000000..48309bed82e5b --- /dev/null +++ b/arch/arm/perfmon/per-axi.c @@ -0,0 +1,759 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* +per-axi +DESCRIPTION +Functions related to AXI bus performance counter manipulations. +*/ + +#include +#include +#include +#include +#include +#include "asm/uaccess.h" +#include "per-axi.h" +#include "perf.h" + +/* +Definitions for AXI register addresses, macros to set and get register values +*/ +#define AXI_BASE_SIZE 0x00004000 +#define AXI_REG_BASE (AXI_BASE + 0x00000000) +#define AXI_REG_BASE_PHYS 0xa8200000 + +#define __inpdw(port) ioread32(port) +#define in_dword_masked(addr, mask) (__inpdw(addr) & (mask)) +#define __outpdw(port, val) (iowrite32((uint32_t) (val), port)) +#define out_dword(addr, val) __outpdw(addr, val) + +#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR \ + (AXI_REG_BASE + 0x00003434) +#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR, \ + HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_RMSK) + +#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR (AXI_REG_BASE + 0x00003438) +#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR, \ + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_RMSK) + +#define HWIO_AXI_MONITOR_SELECTION_REG0_ADDR (AXI_REG_BASE + 0x00003428) +#define HWIO_AXI_MONITOR_SELECTION_REG1_ADDR (AXI_REG_BASE + 0x0000342c) +#define HWIO_AXI_MONITOR_TENURE_SELECTION_REG_ADDR (AXI_REG_BASE + 0x00003430) +#define HWIO_AXI_MONITOR_SELECTION_REG0_ETC_BMSK 0x4000 +#define HWIO_AXI_MONITOR_SELECTION_REG0_ECC_BMSK 0x2000 +#define HWIO_AXI_MONITOR_SELECTION_REG0_EEC1_BMSK 0x800 +#define HWIO_AXI_MONITOR_SELECTION_REG0_EEC0_BMSK 0x200 +#define HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_ADDR, v) +#define HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_ADDR, v) +#define HWIO_AXI_MONITOR_SELECTION_REG0_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_SELECTION_REG0_ADDR, v) +#define HWIO_AXI_MONITOR_SELECTION_REG1_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_SELECTION_REG1_ADDR, v) +#define HWIO_AXI_MONITOR_TENURE_SELECTION_REG_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_TENURE_SELECTION_REG_ADDR, v) +#define HWIO_AXI_MONITOR_SELECTION_REG0_RMSK 0xffff +#define HWIO_AXI_MONITOR_SELECTION_REG0_IN \ + in_dword_masked(HWIO_AXI_MONITOR_SELECTION_REG0_ADDR, \ + HWIO_AXI_MONITOR_SELECTION_REG0_RMSK) + +#define HWIO_AXI_CONFIGURATION_REG_ADDR (AXI_REG_BASE + 0x00000008) +#define HWIO_AXI_CONFIGURATION_REG_OUT(v) \ + out_dword(HWIO_AXI_CONFIGURATION_REG_ADDR, v) +#define HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK 0x0 +#define HWIO_AXI_CONFIGURATION_REG_DISABLE 0x2 +#define AXI_EVTSEL_ENABLE_MASK 0x6a00 +#define AXI_EVTSEL_DISABLE_MASK 0x95ff +#define AXI_EVTSEL_RESET_MASK 0xfe40 + +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_ADDR (AXI_REG_BASE + 0x00003450) +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_RMSK 0xffff +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_SHFT 0 +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG0_IN \ + in_dword_masked(HWIO_AXI_MONITOR_EVENT_LOWER_REG0_ADDR, \ + HWIO_AXI_MONITOR_EVENT_LOWER_REG0_RMSK) +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_ADDR (AXI_REG_BASE + 0x00003454) +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_RMSK 0xffff +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_SHFT 0 +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG0_IN \ + in_dword_masked(HWIO_AXI_MONITOR_EVENT_UPPER_REG0_ADDR, \ + HWIO_AXI_MONITOR_EVENT_UPPER_REG0_RMSK) + +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_ADDR (AXI_REG_BASE + 0x00003458) +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_RMSK 0xffff +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_SHFT 0 +#define HWIO_AXI_MONITOR_EVENT_LOWER_REG1_IN \ + in_dword_masked(HWIO_AXI_MONITOR_EVENT_LOWER_REG1_ADDR, \ + HWIO_AXI_MONITOR_EVENT_LOWER_REG1_RMSK) +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_ADDR (AXI_REG_BASE + 0x0000345c) +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_RMSK 0xffff +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_SHFT 0 +#define HWIO_AXI_MONITOR_EVENT_UPPER_REG1_IN \ + in_dword_masked(HWIO_AXI_MONITOR_EVENT_UPPER_REG1_ADDR, \ + HWIO_AXI_MONITOR_EVENT_UPPER_REG1_RMSK) + +#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR (AXI_REG_BASE + 0x00003448) +#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_SHFT 0 +#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR, \ + HWIO_AXI_MONITOR_TENURE_LOWER_REG_RMSK) +#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR (AXI_REG_BASE + 0x00003444) +#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_SHFT 0 +#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR, \ + HWIO_AXI_MONITOR_TENURE_UPPER_REG_RMSK) + +#define HWIO_AXI_MONITOR_MIN_REG_ADDR (AXI_REG_BASE + 0x0000343c) +#define HWIO_AXI_MONITOR_MIN_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_MIN_REG_SHFT 0 +#define HWIO_AXI_MONITOR_MIN_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_MIN_REG_ADDR, \ + HWIO_AXI_MONITOR_MIN_REG_RMSK) +#define HWIO_AXI_MONITOR_MAX_REG_ADDR (AXI_REG_BASE + 0x00003440) +#define HWIO_AXI_MONITOR_MAX_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_MAX_REG_SHFT 0 +#define HWIO_AXI_MONITOR_MAX_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_MAX_REG_ADDR, \ + HWIO_AXI_MONITOR_MAX_REG_RMSK) +#define HWIO_AXI_MONITOR_LAST_TENURE_REG_ADDR (AXI_REG_BASE + 0x0000344c) +#define HWIO_AXI_MONITOR_LAST_TENURE_REG_RMSK 0xffff +#define HWIO_AXI_MONITOR_LAST_TENURE_REG_SHFT 0 +#define HWIO_AXI_MONITOR_LAST_TENURE_REG_IN \ + in_dword_masked(HWIO_AXI_MONITOR_LAST_TENURE_REG_ADDR, \ + HWIO_AXI_MONITOR_LAST_TENURE_REG_RMSK) +#define HWIO_AXI_MONITOR_TENURE_UPPER_REG_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_TENURE_UPPER_REG_ADDR, v) +#define HWIO_AXI_MONITOR_TENURE_LOWER_REG_OUT(v) \ + out_dword(HWIO_AXI_MONITOR_TENURE_LOWER_REG_ADDR, v) + +#define HWIO_AXI_RESET_ALL 0x9400 +#define HWIO_AXI_ENABLE_ALL_NOCYCLES 0x4a00 +#define HWIO_AXI_DISABLE_ALL 0xb500 +uint32_t AXI_BASE; + +unsigned int is_first = 1; +struct perf_mon_axi_data pm_axi_info; +struct perf_mon_axi_cnts axi_cnts; + +/* +FUNCTION get_axi_sel_reg0 + +DESCRIPTION + Retrieve the value of AXI_SEL_REG0 + +DEPENDENCIES + +RETURN VALUE + AXI_SEL_REG0 +SIDE EFFECTS +*/ +unsigned long get_axi_sel_reg0(void) +{ + return pm_axi_info.sel_reg0; +} + +/* +FUNCTION get_axi_sel_reg1 + +DESCRIPTION + Retrieve the value of AXI_SEL_REG1 + +DEPENDENCIES + +RETURN VALUE + AXI_SEL_REG1 +SIDE EFFECTS +*/ +unsigned long get_axi_sel_reg1(void) +{ + return pm_axi_info.sel_reg1; +} + +/* +FUNCTION get_axi_ten_sel_reg + +DESCRIPTION + Retrieve the value of AXI_TEN_REG + +DEPENDENCIES + +RETURN VALUE + AXI_TEN_REG +SIDE EFFECTS +*/ +unsigned long get_axi_ten_sel_reg(void) +{ + return pm_axi_info.ten_sel_reg; +} + +/* +FUNCTION get_axi_valid + +DESCRIPTION + Retrieve the value of AXI valid bit + +DEPENDENCIES + +RETURN VALUE + AXI Valid bit +SIDE EFFECTS +*/ +unsigned long get_axi_valid(void) +{ + return pm_axi_info.valid; +} + +/* +FUNCTION get_axi_enable + +DESCRIPTION + Retrieve the value of AXI enable bit + +DEPENDENCIES + +RETURN VALUE + AXI enable bit +SIDE EFFECTS +*/ +unsigned long get_axi_enable(void) +{ + return pm_axi_info.enable; +} + +/* +FUNCTION get_axi_clear + +DESCRIPTION + Retrieve the value of AXI clear bit + +DEPENDENCIES + +RETURN VALUE + AXI clear bit +SIDE EFFECTS +*/ +unsigned long get_axi_clear(void) +{ + return pm_axi_info.clear; +} + +/* +FUNCTION pm_axi_cnts_write + +DESCRIPTION + Write handler for the /proc axi results directory. + +DEPENDENCIES + +RETURN VALUE + Number of characters to output. + +SIDE EFFECTS +*/ +int pm_axi_cnts_write(struct file *file, const char *buff, + unsigned long cnt, void *data) +{ + char *newbuf; + struct PerfMonAxiCnts *p = + (struct PerfMonAxiCnts *)data; + + if (p == 0) + return cnt; + /* + * Alloc the user data in kernel space. and then copy user to kernel + */ + newbuf = kmalloc(cnt + 1, GFP_KERNEL); + if (0 == newbuf) + return cnt; + if (copy_from_user(newbuf, buff, cnt) != 0) { + printk(KERN_INFO "%s copy_from_user failed\n", __func__); + return cnt; + } + return cnt; +} + +/* +FUNCTION pm_axi_update_cnts + +DESCRIPTION + Read the current AXI counter values. Check for overflows and + adjust the values stored accordingly. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_update_cnts(void) +{ + if (is_first) { + pm_axi_start(); + } else { + if (pm_axi_info.valid == 1) { + pm_axi_info.valid = 0; + pm_axi_update(); + } else { + pm_axi_enable(); + } + } + is_first = 0; + axi_cnts.cycles += pm_get_axi_cycle_count(); + axi_cnts.cnt0 += pm_get_axi_evt0_count(); + axi_cnts.cnt1 += pm_get_axi_evt1_count(); + axi_cnts.tenure_total += pm_get_axi_ten_total_count(); + + axi_cnts.tenure_min = pm_get_axi_ten_min_count(); + axi_cnts.tenure_max = pm_get_axi_ten_max_count(); + axi_cnts.tenure_last = pm_get_axi_ten_last_count(); + + pm_axi_start(); +} + +/* +FUNCTION pm_axi_clear_cnts + +DESCRIPTION + Clear the locally stored AXI counter values. + Also clear the AXI counter registers. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_clear_cnts(void) +{ + axi_cnts.cycles = 0; + axi_cnts.cnt0 = 0; + axi_cnts.cnt1 = 0; + axi_cnts.tenure_total = 0; + axi_cnts.tenure_min = 0; + axi_cnts.tenure_max = 0; + axi_cnts.tenure_last = 0; + pm_axi_start(); +} + +/* +FUNCTION pm_axi_read_decimal + +DESCRIPTION + Read handler for the /proc axi results directory in decimal format. + +DEPENDENCIES + +RETURN VALUE + Number of characters to output. + +SIDE EFFECTS +*/ +int pm_axi_read_decimal(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct perf_mon_axi_cnts *p = (struct perf_mon_axi_cnts *)data; + + return sprintf(page, "cnt0:%llu cnt1:%llu tenure:%llu ten_max:%llu \ + ten_min:%llu ten_last:%llu cycles:%llu\n", + p->cnt0, + p->cnt1, + p->tenure_total, + p->tenure_max, + p->tenure_min, + p->tenure_last, + p->cycles); +} + +/* +FUNCTION pm_axi_read_hex + +DESCRIPTION + Read handler for the /proc axi results directory in hex format. + +DEPENDENCIES + +RETURN VALUE + Number of characters to output. + +SIDE EFFECTS +*/ +int pm_axi_read_hex(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct perf_mon_axi_cnts *p = (struct perf_mon_axi_cnts *)data; + + return sprintf(page, "cnt0:%llx cnt1:%llx tenure:%llx ten_max:%llx \ + ten_min:%llx ten_last:%llx cycles:%llx\n", + p->cnt0, + p->cnt1, + p->tenure_total, + p->tenure_max, + p->tenure_min, + p->tenure_last, + p->cycles); + +} + +/* +FUNCTION pm_axi_set_proc_entry + +DESCRIPTION + Create a generic entry for the /proc axi settings directory. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_set_proc_entry(char *name, unsigned long *var, + struct proc_dir_entry *d, int hex) +{ + struct proc_dir_entry *pe; + pe = create_proc_entry(name, 0777, d); + if (0 == pe) + return; + if (hex) { + pe->read_proc = per_process_read; + pe->write_proc = per_process_write_hex; + } else { + pe->read_proc = per_process_read_decimal; + pe->write_proc = per_process_write_dec; + } + pe->data = (void *)var; +} + +/* +FUNCTION pm_axi_get_cnt_proc_entry + +DESCRIPTION + Create a generic entry for the /proc axi results directory. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_get_cnt_proc_entry(char *name, struct perf_mon_axi_cnts *var, + struct proc_dir_entry *d, int hex) +{ + struct proc_dir_entry *pe; + pe = create_proc_entry(name, 0777, d); + if (0 == pe) + return; + if (hex) { + pe->read_proc = pm_axi_read_hex; + pe->write_proc = pm_axi_cnts_write; + } else { + pe->read_proc = pm_axi_read_decimal; + pe->write_proc = pm_axi_cnts_write; + } + pe->data = (void *)var; +} + +/* +FUNCTION pm_axi_clear_tenure + +DESCRIPTION + Clear AXI tenure cntr manually. Temporary solution till hardware bug + is fixed + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_clear_tenure(void) +{ + HWIO_AXI_MONITOR_TENURE_UPPER_REG_OUT(0x0); + HWIO_AXI_MONITOR_TENURE_LOWER_REG_OUT(0x0); +} + +/* +FUNCTION pm_axi_init + +DESCRIPTION + Map AXI region to virtual memory. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void pm_axi_init() +{ + /*Map the AXI regs*/ + #ifdef CONFIG_ARCH_QSD8X50 + { + /*Map the AXI regs*/ + AXI_BASE = (uint32_t)ioremap(AXI_REG_BASE_PHYS, AXI_BASE_SIZE); + if (!AXI_BASE) + printk(KERN_ERR "Mem map failed\n"); + } + #else + { + AXI_BASE = (uint32_t)kmalloc(AXI_BASE_SIZE, GFP_KERNEL); + } + #endif + +} + +/* +FUNCTION pm_axi_start + +DESCRIPTION + Set event0, event1 and tenure registers based on the /proc entries. + Set cycle cntr to fffffffe to start counters. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void +pm_axi_start() +{ + unsigned long sel_reg0, sel_reg1, ten_sel_reg; + sel_reg0 = get_axi_sel_reg0(); + sel_reg1 = get_axi_sel_reg1(); + ten_sel_reg = get_axi_ten_sel_reg(); + HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK); + /*Set AXI Cycle Counter to enable AXI Monitors*/ + HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(0xffff); + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(0xfffe); + /*Set master/slave*/ + HWIO_AXI_MONITOR_SELECTION_REG1_OUT(sel_reg1); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_RESET_ALL); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_ENABLE_ALL_NOCYCLES); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN + | sel_reg0); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN + | HWIO_AXI_MONITOR_SELECTION_REG0_ECC_BMSK); + HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK); +} + +/* +FUNCTION pm_axi_update + +DESCRIPTION + Set event0, event1 and tenure registers based on the /proc entries. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void +pm_axi_update() +{ + HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN + | HWIO_AXI_RESET_ALL); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(HWIO_AXI_MONITOR_SELECTION_REG0_IN + & HWIO_AXI_DISABLE_ALL); + pm_axi_start(); +} + +/* +FUNCTION pm_axi_disable + +DESCRIPTION + Disable all cntrs. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void +pm_axi_disable(void) +{ + unsigned long sel_reg0; + /*Disable cntrs*/ + sel_reg0 = get_axi_sel_reg0(); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(sel_reg0 & AXI_EVTSEL_DISABLE_MASK); + /*Disable clk*/ + HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_DISABLE); +} + +/* +FUNCTION pm_axi_enable + +DESCRIPTION + Enable all cntrs. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void +pm_axi_enable(void) +{ + unsigned long sel_reg0; + /*Enable cntrs*/ + sel_reg0 = get_axi_sel_reg0(); + HWIO_AXI_MONITOR_SELECTION_REG0_OUT(sel_reg0 | 0x6a00); + /*Enable clk*/ + HWIO_AXI_CONFIGURATION_REG_OUT(HWIO_AXI_CONFIGURATION_REG_PPDM_BMSK); +} + +/* +FUNCTION pm_axi_disable_cnts + +DESCRIPTION + Read cycle cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_cycle_count(void) +{ + if (HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN == 0x0 && + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN == 0x0) { + /*Set AXI Cycle Counter to enable AXI Monitors*/ + HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_OUT(0xffff); + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_OUT(0xfffe); + } + return 0xfffffffe - ((HWIO_AXI_MONITOR_CYCLE_COUNT_UPPER_REG_IN << 16) + + HWIO_AXI_MONITOR_CYCLE_COUNT_LOWER_REG_IN); +} + +/* +FUNCTION pm_get_axi_evt0_count + +DESCRIPTION + Read Event0 cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_evt0_count(void) +{ + return (HWIO_AXI_MONITOR_EVENT_UPPER_REG0_IN << 16) + + HWIO_AXI_MONITOR_EVENT_LOWER_REG0_IN; +} + +/* +FUNCTION pm_get_axi_evt1_count + +DESCRIPTION + Read Event1 cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_evt1_count(void) +{ + return (HWIO_AXI_MONITOR_EVENT_UPPER_REG1_IN << 16) + + HWIO_AXI_MONITOR_EVENT_LOWER_REG1_IN; +} + +/* +FUNCTION pm_get_axi_ten_min_count + +DESCRIPTION + Read min tenure cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_ten_min_count(void) +{ + return HWIO_AXI_MONITOR_MIN_REG_IN; +} + +/* +FUNCTION pm_get_axi_ten_max_count + +DESCRIPTION + Read max tenure cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_ten_max_count(void) +{ + return HWIO_AXI_MONITOR_MAX_REG_IN; +} + +/* +FUNCTION pm_get_axi_ten_total_count + +DESCRIPTION + Read total tenure cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_ten_total_count(void) +{ + return (HWIO_AXI_MONITOR_TENURE_UPPER_REG_IN << 16) + + HWIO_AXI_MONITOR_TENURE_LOWER_REG_IN; +} + +/* +FUNCTION pm_get_axi_ten_last_count + +DESCRIPTION + Read last tenure cntr value + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +unsigned long +pm_get_axi_ten_last_count(void) +{ + return HWIO_AXI_MONITOR_LAST_TENURE_REG_IN; +} diff --git a/arch/arm/perfmon/per-axi.h b/arch/arm/perfmon/per-axi.h new file mode 100644 index 0000000000000..185bb154a44c7 --- /dev/null +++ b/arch/arm/perfmon/per-axi.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +*per-axi +*DESCRIPTION +*Header File for Functions related to AXI bus performance counter manipulations. +*/ + +#ifndef __PER_AXI_H__ +#define __PER_AXI_H__ +unsigned long pm_get_axi_cycle_count(void); +unsigned long pm_get_axi_evt0_count(void); +unsigned long pm_get_axi_evt1_count(void); +unsigned long pm_get_axi_evt2_count(void); +unsigned long pm_get_axi_ten_min_count(void); +unsigned long pm_get_axi_ten_max_count(void); +unsigned long pm_get_axi_ten_total_count(void); +unsigned long pm_get_axi_ten_last_count(void); + +unsigned long get_axi_sel_reg0(void); +unsigned long get_axi_sel_seg1(void); +unsigned long get_axi_ten_sel_reg(void); +unsigned long get_axi_valid(void); +unsigned long get_axi_enable(void); +unsigned long get_axi_clear(void); + +void pm_axi_clear_cnts(void); +void pm_axi_update_cnts(void); + +void pm_axi_init(void); +void pm_axi_start(void); +void pm_axi_update(void); +void pm_axi_disable(void); +void pm_axi_enable(void); + +struct perf_mon_axi_cnts{ + unsigned long long cycles; + unsigned long long cnt0; + unsigned long long cnt1; + unsigned long long tenure_total; + unsigned long long tenure_min; + unsigned long long tenure_max; + unsigned long long tenure_last; +}; + +struct perf_mon_axi_data{ + struct proc_dir_entry *proc; + unsigned long enable; + unsigned long clear; + unsigned long valid; + unsigned long sel_reg0; + unsigned long sel_reg1; + unsigned long ten_sel_reg; + unsigned long refresh; +}; + +extern struct perf_mon_axi_data pm_axi_info; +extern struct perf_mon_axi_cnts axi_cnts; + +void pm_axi_set_proc_entry(char *name, unsigned long *var, + struct proc_dir_entry *d, int hex); +void pm_axi_get_cnt_proc_entry(char *name, struct perf_mon_axi_cnts *var, + struct proc_dir_entry *d, int hex); + +#endif diff --git a/arch/arm/perfmon/per-process-perf.c b/arch/arm/perfmon/per-process-perf.c new file mode 100644 index 0000000000000..c8bebd842d192 --- /dev/null +++ b/arch/arm/perfmon/per-process-perf.c @@ -0,0 +1,1251 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. +*/ + +/* +per-process_perf +DESCRIPTION +Capture the processor performances registers when the process context +switches. The /proc file system is used to control and access the results +of the performance counters. + +Each time a process is context switched, the performance counters for +the Snoop Control Unit and the standard ARM counters are set according +to the values stored for that process. + +The events to capture per process are set in the /proc/ppPerf/settings +directory. + +EXTERNALIZED FUNCTIONS + +INITIALIZATION AND SEQUENCING REQUIREMENTS +Detail how to initialize and use this service. The sequencing aspect +is only needed if the order of operations is important. +*/ + +/* +INCLUDE FILES FOR MODULE +*/ +#include +#include +#include +#include +#include +#include "linux/proc_fs.h" +#include "linux/kernel_stat.h" +#include +#include "asm/uaccess.h" +#include "cp15_registers.h" +#include "l2_cp15_registers.h" +#include +#include "per-axi.h" +#include "perf.h" + +#define DEBUG_SWAPIO +#ifdef DEBUG_SWAPIO +#define MR_SIZE 1024 +#define PM_PP_ERR -1 +struct mark_data_s { + long c; + long cpu; + unsigned long pid_old; + unsigned long pid_new; +}; + +struct mark_data_s markRay[MR_SIZE] __attribute__((aligned(16))); +int mrcnt; + +DEFINE_SPINLOCK(_mark_lock); + +static inline void MARKPIDS(char a, int opid, int npid) +{ + int cpu = smp_processor_id(); + + if (opid == 0) + return; + spin_lock(&_mark_lock); + if (++mrcnt >= MR_SIZE) + mrcnt = 0; + spin_unlock(&_mark_lock); + + markRay[mrcnt].pid_old = opid; + markRay[mrcnt].pid_new = npid; + markRay[mrcnt].cpu = cpu; + markRay[mrcnt].c = a; +} +static inline void MARK(char a) { MARKPIDS(a, 0xFFFF, 0xFFFF); } +static inline void MARKPID(char a, int pid) { MARKPIDS(a, pid, 0xFFFF); } + +#else +#define MARK(a) +#define MARKPID(a, b) +#define MARKPIDS(a, b, c) + +#endif /* DEBUG_SWAPIO */ + +/* +DEFINITIONS AND DECLARATIONS FOR MODULE + +This section contains definitions for constants, macros, types, variables +and other items needed by this module. +*/ + +/* +Constant / Define Declarations +*/ + +#define PERF_MON_PROCESS_NUM 0x400 +#define PERF_MON_PROCESS_MASK (PERF_MON_PROCESS_NUM-1) +#define PP_MAX_PROC_ENTRIES 32 + +/* + * The entry is locked and is not to be replaced. + */ +#define PERF_ENTRY_LOCKED (1<<0) +#define PERF_NOT_FIRST_TIME (1<<1) +#define PERF_EXITED (1<<2) +#define PERF_AUTOLOCK (1<<3) + +#define IS_LOCKED(p) (p->flags & PERF_ENTRY_LOCKED) + +#define PERF_NUM_MONITORS 4 + +#define L1_EVENTS_0 0 +#define L1_EVENTS_1 1 +#define L2_EVENTS_0 2 +#define L2_EVENTS_1 3 + +#define PM_CYCLE_OVERFLOW_MASK 0x80000000 +#define L2_PM_CYCLE_OVERFLOW_MASK 0x80000000 + +#define PM_START_ALL() do {\ + if (pm_global) \ + pmStartAll();\ + } while (0); +#define PM_STOP_ALL() do {\ + if (pm_global)\ + pmStopAll();\ + } while (0); +#define PM_RESET_ALL() do {\ + if (pm_global)\ + pmResetAll();\ + } while (0); + +/* + * Accessors for SMP based variables. + */ +#define _SWAPS(p) ((p)->cnts[smp_processor_id()].swaps) +#define _CYCLES(p) ((p)->cnts[smp_processor_id()].cycles) +#define _COUNTS(p, i) ((p)->cnts[smp_processor_id()].counts[i]) +#define _L2COUNTS(p, i) ((p)->cnts[smp_processor_id()].l2_counts[i]) +#define _L2CYCLES(p) ((p)->cnts[smp_processor_id()].l2_cycles) + +/* + Type Declarations +*/ + +/* + * Counts are on a per core basis. + */ +struct pm_counters_s { + unsigned long long cycles; + unsigned long long l2_cycles; + unsigned long long counts[PERF_NUM_MONITORS]; + unsigned long long l2_counts[PERF_NUM_MONITORS]; + unsigned long swaps; +}; + +struct per_process_perf_mon_type{ + struct pm_counters_s cnts[NR_CPUS]; + unsigned long control; + unsigned long index[PERF_NUM_MONITORS]; + unsigned long l2_index[PERF_NUM_MONITORS]; + unsigned long pid; + struct proc_dir_entry *proc; + struct proc_dir_entry *l2_proc; + unsigned short flags; + unsigned short running_cpu; + char *pidName; + unsigned long lpm0evtyper; + unsigned long lpm1evtyper; + unsigned long lpm2evtyper; + unsigned long l2lpmevtyper; + unsigned long vlpmevtyper; + unsigned long l2pmevtyper0; + unsigned long l2pmevtyper1; + unsigned long l2pmevtyper2; + unsigned long l2pmevtyper3; + unsigned long l2pmevtyper4; +}; + +unsigned long last_in_pid[NR_CPUS]; +unsigned long fake_swap_out[NR_CPUS] = {0}; + +/* + Local Object Definitions +*/ +struct per_process_perf_mon_type perf_mons[PERF_MON_PROCESS_NUM]; +struct proc_dir_entry *proc_dir; +struct proc_dir_entry *settings_dir; +struct proc_dir_entry *values_dir; +struct proc_dir_entry *axi_dir; +struct proc_dir_entry *l2_dir; +struct proc_dir_entry *axi_settings_dir; +struct proc_dir_entry *axi_results_dir; +struct proc_dir_entry *l2_results_dir; + +unsigned long pp_enabled; +unsigned long pp_settings_valid = -1; +unsigned long pp_auto_lock; +unsigned long pp_set_pid; +signed long pp_clear_pid = -1; +unsigned long per_proc_event[PERF_NUM_MONITORS]; +unsigned long l2_per_proc_event[PERF_NUM_MONITORS]; +unsigned long dbg_flags; +unsigned long pp_lpm0evtyper; +unsigned long pp_lpm1evtyper; +unsigned long pp_lpm2evtyper; +unsigned long pp_l2lpmevtyper; +unsigned long pp_vlpmevtyper; +unsigned long pm_stop_for_interrupts; +unsigned long pm_global; /* track all, not process based */ +unsigned long pm_global_enable; +unsigned long pm_remove_pid; + +unsigned long pp_l2pmevtyper0; +unsigned long pp_l2pmevtyper1; +unsigned long pp_l2pmevtyper2; +unsigned long pp_l2pmevtyper3; +unsigned long pp_l2pmevtyper4; + +unsigned long pp_proc_entry_index; +char *per_process_proc_names[PP_MAX_PROC_ENTRIES]; + +unsigned int axi_swaps; +#define MAX_AXI_SWAPS 10 +int first_switch = 1; +/* + Forward Declarations +*/ + +/* +Function Definitions +*/ + +/* +FUNCTION per_process_find + +DESCRIPTION + Find the per process information based on the process id (pid) passed. + This is a simple mask based on the number of entries stored in the + static array + +DEPENDENCIES + +RETURN VALUE + Pointer to the per process data +SIDE EFFECTS + +*/ +struct per_process_perf_mon_type *per_process_find(unsigned long pid) +{ + return &perf_mons[pid & PERF_MON_PROCESS_MASK]; +} + +/* +FUNCTION per_process_get_name + +DESCRIPTION + Retreive the name of the performance counter based on the table and + index passed. We have two different sets of performance counters so + different table need to be used. + +DEPENDENCIES + +RETURN VALUE + Pointer to char string with the name of the event or "BAD" + Never returns NULL or a bad pointer. + +SIDE EFFECTS +*/ +char *per_process_get_name(unsigned long index) +{ + return pm_find_event_name(index); +} + +/* +FUNCTION per_process_results_read + +DESCRIPTION + Print out the formatted results from the process id read. Event names + and counts are printed. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +int per_process_results_read(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct per_process_perf_mon_type *p = + (struct per_process_perf_mon_type *)data; + struct pm_counters_s cnts; + int i, j; + + /* + * Total across all CPUS + */ + memset(&cnts, 0, sizeof(cnts)); + for (i = 0; i < num_possible_cpus(); i++) { + cnts.swaps += p->cnts[i].swaps; + cnts.cycles += p->cnts[i].cycles; + for (j = 0; j < PERF_NUM_MONITORS; j++) + cnts.counts[j] += p->cnts[i].counts[j]; + } + + /* + * Display as single results of the totals calculated above. + * Do we want to display or have option to display individula cores? + */ + return sprintf(page, "pid:%lu one:%s:%llu two:%s:%llu three:%s:%llu \ + four:%s:%llu cycles:%llu swaps:%lu\n", + p->pid, + per_process_get_name(p->index[0]), cnts.counts[0], + per_process_get_name(p->index[1]), cnts.counts[1], + per_process_get_name(p->index[2]), cnts.counts[2], + per_process_get_name(p->index[3]), cnts.counts[3], + cnts.cycles, cnts.swaps); +} + +int per_process_l2_results_read(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + struct per_process_perf_mon_type *p = + (struct per_process_perf_mon_type *)data; + struct pm_counters_s cnts; + int i, j; + + /* + * Total across all CPUS + */ + memset(&cnts, 0, sizeof(cnts)); + for (i = 0; i < num_possible_cpus(); i++) { + cnts.l2_cycles += p->cnts[i].l2_cycles; + for (j = 0; j < PERF_NUM_MONITORS; j++) + cnts.l2_counts[j] += p->cnts[i].l2_counts[j]; + } + + /* + * Display as single results of the totals calculated above. + * Do we want to display or have option to display individula cores? + */ + return sprintf(page, "pid:%lu l2_one:%s:%llu l2_two:%s:%llu \ + l2_three:%s:%llu \ + l2_four:%s:%llu l2_cycles:%llu\n", + p->pid, + per_process_get_name(p->l2_index[0]), cnts.l2_counts[0], + per_process_get_name(p->l2_index[1]), cnts.l2_counts[1], + per_process_get_name(p->l2_index[2]), cnts.l2_counts[2], + per_process_get_name(p->l2_index[3]), cnts.l2_counts[3], + cnts.l2_cycles); +} + +/* +FUNCTION per_process_results_write + +DESCRIPTION + Allow some control over the results. If the user forgets to autolock or + wants to unlock the results so they will be deleted, then this is + where it is processed. + + For example, to unlock process 23 + echo "unlock" > 23 + +DEPENDENCIES + +RETURN VALUE + Number of characters used (all of them!) + +SIDE EFFECTS +*/ +int per_process_results_write(struct file *file, const char *buff, + unsigned long cnt, void *data) +{ + char *newbuf; + struct per_process_perf_mon_type *p = + (struct per_process_perf_mon_type *)data; + + if (p == 0) + return cnt; + /* + * Alloc the user data in kernel space. and then copy user to kernel + */ + newbuf = kmalloc(cnt + 1, GFP_KERNEL); + if (0 == newbuf) + return cnt; + if (copy_from_user(newbuf, buff, cnt) != 0) { + printk(KERN_INFO "%s copy_from_user failed\n", __func__); + return cnt; + } + + if (0 == strcmp("lock", newbuf)) + p->flags |= PERF_ENTRY_LOCKED; + else if (0 == strcmp("unlock", newbuf)) + p->flags &= ~PERF_ENTRY_LOCKED; + else if (0 == strcmp("auto", newbuf)) + p->flags |= PERF_AUTOLOCK; + else if (0 == strcmp("autoun", newbuf)) + p->flags &= ~PERF_AUTOLOCK; + + return cnt; +} + +/* +FUNCTION perProcessCreateResults + +DESCRIPTION + Create the results /proc file if the system parameters allow it... +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void per_process_create_results_proc(struct per_process_perf_mon_type *p) +{ + + if (0 == p->pidName) + p->pidName = kmalloc(12, GFP_KERNEL); + if (0 == p->pidName) + return; + sprintf(p->pidName, "%ld", p->pid); + + if (0 == p->proc) { + p->proc = create_proc_entry(p->pidName, 0777, values_dir); + if (0 == p->proc) + return; + } else { + p->proc->name = p->pidName; + } + + p->proc->read_proc = per_process_results_read; + p->proc->write_proc = per_process_results_write; + p->proc->data = (void *)p; +} + +void per_process_create_l2_results_proc(struct per_process_perf_mon_type *p) +{ + + if (0 == p->pidName) + p->pidName = kmalloc(12, GFP_KERNEL); + if (0 == p->pidName) + return; + sprintf(p->pidName, "%ld", p->pid); + + if (0 == p->l2_proc) { + p->l2_proc = create_proc_entry(p->pidName, 0777, + l2_results_dir); + if (0 == p->l2_proc) + return; + } else { + p->l2_proc->name = p->pidName; + } + + p->l2_proc->read_proc = per_process_l2_results_read; + p->l2_proc->write_proc = per_process_results_write; + p->l2_proc->data = (void *)p; +} +/* +FUNCTION per_process_swap_out + +DESCRIPTION + Store the counters from the process that is about to swap out. We take + the old counts and add them to the current counts in the perf registers. + Before the new process is swapped in, the counters are reset. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +typedef void (*vfun)(void *); +void per_process_swap_out(struct per_process_perf_mon_type *data) +{ + int i; + unsigned long overflow; +#ifdef CONFIG_ARCH_MSM8X60 + unsigned long l2_overflow; +#endif + struct per_process_perf_mon_type *p = data; + + MARKPIDS('O', p->pid, 0); + RCP15_PMOVSR(overflow); +#ifdef CONFIG_ARCH_MSM8X60 + RCP15_L2PMOVSR(l2_overflow); +#endif + + if (!pp_enabled) + return; + + /* + * The kernel for some reason (2.6.32.9) starts a process context on + * one core and ends on another. So the swap in and swap out can be + * on different cores. If this happens, we need to stop the + * counters and collect the data on the core that started the counters + * ....otherwise we receive invalid data. So we mark the the core with + * the process as deferred. The next time a process is swapped on + * the core that the process was running on, the counters will be + * updated. + */ + if ((smp_processor_id() != p->running_cpu) && (p->pid != 0)) { + fake_swap_out[p->running_cpu] = 1; + return; + } + + _SWAPS(p)++; + _CYCLES(p) += pm_get_cycle_count(); + + if (overflow & PM_CYCLE_OVERFLOW_MASK) + _CYCLES(p) += 0xFFFFFFFF; + + for (i = 0; i < PERF_NUM_MONITORS; i++) { + _COUNTS(p, i) += pm_get_count(i); + if (overflow & (1 << i)) + _COUNTS(p, i) += 0xFFFFFFFF; + } + +#ifdef CONFIG_ARCH_MSM8X60 + _L2CYCLES(p) += l2_pm_get_cycle_count(); + if (l2_overflow & L2_PM_CYCLE_OVERFLOW_MASK) + _L2CYCLES(p) += 0xFFFFFFFF; + for (i = 0; i < PERF_NUM_MONITORS; i++) { + _L2COUNTS(p, i) += l2_pm_get_count(i); + if (l2_overflow & (1 << i)) + _L2COUNTS(p, i) += 0xFFFFFFFF; + } +#endif +} + +/* +FUNCTION per_process_remove_manual + +DESCRIPTION + Remove an entry from the results directory if the flags allow this. + When not enbled or the entry is locked, the values/results will + not be removed. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void per_process_remove_manual(unsigned long pid) +{ + struct per_process_perf_mon_type *p = per_process_find(pid); + + /* + * Check all of the flags to see if we can remove this one + * Then mark as not used + */ + if (0 == p) + return; + p->pid = (0xFFFFFFFF); + + /* + * Remove the proc entry. + */ + if (p->proc) + remove_proc_entry(p->pidName, values_dir); + if (p->l2_proc) + remove_proc_entry(p->pidName, l2_results_dir); + kfree(p->pidName); + + /* + * Clear them out...and ensure the pid is invalid + */ + memset(p, 0, sizeof *p); + p->pid = 0xFFFFFFFF; + pm_remove_pid = -1; +} + +/* +* Remove called when a process exits... +*/ +void _per_process_remove(unsigned long pid) {} + +/* +FUNCTION per_process_initialize + +DESCRIPTION +Initialize performance collection information for a new process. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +May create a new proc entry +*/ +void per_process_initialize(struct per_process_perf_mon_type *p, + unsigned long pid) +{ + int i; + + /* + * See if this is the pid we are interested in... + */ + if (pp_settings_valid == -1) + return; + if ((pp_set_pid != pid) && (pp_set_pid != 0)) + return; + + /* + * Clear out the statistics table then insert this pid + * We want to keep the proc entry and the name + */ + p->pid = pid; + + /* + * Create a proc entry for this pid, then get the current event types and + * store in data struct so when the process is switched in we can track + * it. + */ + if (p->proc == 0) { + per_process_create_results_proc(p); +#ifdef CONFIG_ARCH_MSM8X60 + per_process_create_l2_results_proc(p); +#endif + } + _CYCLES(p) = 0; + _L2CYCLES(p) = 0; + _SWAPS(p) = 0; + /* + * Set the per process data struct, but not the monitors until later... + * Init only happens with the user sets the SetPID variable to this pid + * so we can load new values. + */ + for (i = 0; i < PERF_NUM_MONITORS; i++) { + p->index[i] = per_proc_event[i]; +#ifdef CONFIG_ARCH_MSM8X60 + p->l2_index[i] = l2_per_proc_event[i]; +#endif + _COUNTS(p, i) = 0; + _L2COUNTS(p, i) = 0; + } + p->lpm0evtyper = pp_lpm0evtyper; + p->lpm1evtyper = pp_lpm1evtyper; + p->lpm2evtyper = pp_lpm2evtyper; + p->l2lpmevtyper = pp_l2lpmevtyper; + p->vlpmevtyper = pp_vlpmevtyper; + +#ifdef CONFIG_ARCH_MSM8X60 + p->l2pmevtyper0 = pp_l2pmevtyper0; + p->l2pmevtyper1 = pp_l2pmevtyper1; + p->l2pmevtyper2 = pp_l2pmevtyper2; + p->l2pmevtyper3 = pp_l2pmevtyper3; + p->l2pmevtyper4 = pp_l2pmevtyper4; +#endif + + /* + * Reset pid and settings value + */ + pp_set_pid = -1; + pp_settings_valid = -1; +} + +/* +FUNCTION per_process_swap_in + +DESCRIPTION + Called when a context switch is about to start this PID. + We check to see if this process has an entry or not and create one + if not locked... + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void per_process_swap_in(struct per_process_perf_mon_type *p_new, + unsigned long pid) +{ + int i; + + MARKPIDS('I', p_new->pid, 0); + /* + * If the set proc variable == the current pid then init a new + * entry... + */ + if (pp_set_pid == pid) + per_process_initialize(p_new, pid); + + p_new->running_cpu = smp_processor_id(); + last_in_pid[smp_processor_id()] = pid; + + /* + * setup the monitors for this process. + */ + for (i = 0; i < PERF_NUM_MONITORS; i++) { + pm_set_event(i, p_new->index[i]); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_set_event(i, p_new->l2_index[i]); +#endif + } + pm_set_local_iu(p_new->lpm0evtyper); + pm_set_local_xu(p_new->lpm1evtyper); + pm_set_local_su(p_new->lpm2evtyper); + pm_set_local_l2(p_new->l2lpmevtyper); + +#ifdef CONFIG_ARCH_MSM8X60 + pm_set_local_bu(p_new->l2pmevtyper0); + pm_set_local_cb(p_new->l2pmevtyper1); + pm_set_local_mp(p_new->l2pmevtyper2); + pm_set_local_sp(p_new->l2pmevtyper3); + pm_set_local_scu(p_new->l2pmevtyper4); +#endif +} + +/* +FUNCTION perProcessSwitch + +DESCRIPTION + Called during context switch. Updates the counts on the process about to + be swapped out and brings in the counters for the process about to be + swapped in. + + All is dependant on the enabled and lock flags. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ + +DEFINE_SPINLOCK(pm_lock); +void _per_process_switch(unsigned long old_pid, unsigned long new_pid) +{ + struct per_process_perf_mon_type *p_old, *p_new; + + if (pm_global_enable == 0) + return; + + spin_lock(&pm_lock); + + pm_stop_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_stop_all(); +#endif + + /* + * We detected that the process was swapped in on one core and out on + * a different core. This does not allow us to stop and stop counters + * properly so we need to defer processing. This checks to see if there + * is any defered processing necessary. And does it... */ + if (fake_swap_out[smp_processor_id()] != 0) { + fake_swap_out[smp_processor_id()] = 0; + p_old = per_process_find(last_in_pid[smp_processor_id()]); + last_in_pid[smp_processor_id()] = 0; + if (p_old != 0) + per_process_swap_out(p_old); + } + + /* + * Clear the data collected so far for this process? + */ + if (pp_clear_pid != -1) { + struct per_process_perf_mon_type *p_clear = + per_process_find(pp_clear_pid); + if (p_clear) { + memset(p_clear->cnts, 0, + sizeof(struct pm_counters_s)*num_possible_cpus()); + printk(KERN_INFO "Clear Per Processor Stats for \ + PID:%ld\n", pp_clear_pid); + pp_clear_pid = -1; + } + } + /* + * Always collect for 0, it collects for all. + */ + if (pp_enabled) { + if (first_switch == 1) { + per_process_initialize(&perf_mons[0], 0); + first_switch = 0; + } + if (pm_global) { + per_process_swap_out(&perf_mons[0]); + per_process_swap_in(&perf_mons[0], 0); + } else { + p_old = per_process_find(old_pid); + p_new = per_process_find(new_pid); + + + /* + * save the old counts to the old data struct, if the + * returned ptr is NULL or the process id passed is not + * the same as the process id in the data struct then + * don't update the data. + */ + if ((p_old) && (p_old->pid == old_pid) && + (p_old->pid != 0)) { + per_process_swap_out(p_old); + } + + /* + * Setup the counters for the new process + */ + if (pp_set_pid == new_pid) + per_process_initialize(p_new, new_pid); + if ((p_new->pid == new_pid) && (new_pid != 0)) + per_process_swap_in(p_new, new_pid); + } + pm_reset_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_reset_all(); +#endif +#ifdef CONFIG_ARCH_QSD8X50 + axi_swaps++; + if (axi_swaps%pm_axi_info.refresh == 0) { + if (pm_axi_info.clear == 1) { + pm_axi_clear_cnts(); + pm_axi_info.clear = 0; + } + if (pm_axi_info.enable == 0) + pm_axi_disable(); + else + pm_axi_update_cnts(); + axi_swaps = 0; + } +#endif + } + pm_start_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_start_all(); +#endif + + spin_unlock(&pm_lock); +} + +/* +FUNCTION pmInterruptIn + +DESCRIPTION + Called when an interrupt is being processed. If the pmStopForInterrutps + flag is non zero then we disable the counting of performance monitors. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +static int pm_interrupt_nesting_count; +static unsigned long pm_cycle_in, pm_cycle_out; +void _perf_mon_interrupt_in(void) +{ + if (pm_global_enable == 0) + return; + if (pm_stop_for_interrupts == 0) + return; + pm_interrupt_nesting_count++; /* Atomic */ + pm_stop_all(); + pm_cycle_in = pm_get_cycle_count(); +} + +/* +FUNCTION perfMonInterruptOut + +DESCRIPTION + Reenable performance monitor counting whn the nest count goes to zero + provided the counting has been stoped + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void _perf_mon_interrupt_out(void) +{ + if (pm_global_enable == 0) + return; + if (pm_stop_for_interrupts == 0) + return; + --pm_interrupt_nesting_count; /* Atomic?? */ + + if (pm_interrupt_nesting_count <= 0) { + pm_cycle_out = pm_get_cycle_count(); + if (pm_cycle_in != pm_cycle_out) + printk(KERN_INFO "pmIn!=pmOut in:%lx out:%lx\n", + pm_cycle_in, pm_cycle_out); + if (pp_enabled) { + pm_start_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_start_all(); +#endif + } + pm_interrupt_nesting_count = 0; + } +} + +void per_process_do_global(unsigned long g) +{ + pm_global = g; + + if (pm_global == 1) { + pm_stop_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_stop_all(); +#endif + pm_reset_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_reset_all(); +#endif + pp_set_pid = 0; + per_process_swap_in(&perf_mons[0], 0); + pm_start_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_start_all(); +#endif + } else { + pm_stop_all(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_stop_all(); +#endif + } +} + + +/* +FUNCTION per_process_write + +DESCRIPTION + Generic routine to handle any of the settings /proc directory writes. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +int per_process_write(struct file *file, const char *buff, + unsigned long cnt, void *data, const char *fmt) +{ + char *newbuf; + unsigned long *d = (unsigned long *)data; + + /* + * Alloc the user data in kernel space. and then copy user to kernel + */ + newbuf = kmalloc(cnt + 1, GFP_KERNEL); + if (0 == newbuf) + return PM_PP_ERR; + if (copy_from_user(newbuf, buff, cnt) != 0) { + printk(KERN_INFO "%s copy_from_user failed\n", __func__); + return cnt; + } + sscanf(newbuf, fmt, d); + kfree(newbuf); + + /* + * If this is a remove command then do it now... + */ + if (d == &pm_remove_pid) + per_process_remove_manual(*d); + if (d == &pm_global) + per_process_do_global(*d); + return cnt; +} + +int per_process_write_dec(struct file *file, const char *buff, + unsigned long cnt, void *data) +{ + return per_process_write(file, buff, cnt, data, "%ld"); +} + +int per_process_write_hex(struct file *file, const char *buff, + unsigned long cnt, void *data) +{ + return per_process_write(file, buff, cnt, data, "%lx"); +} + +/* +FUNCTION per_process_read + +DESCRIPTION + Generic read handler for the /proc settings directory. + +DEPENDENCIES + +RETURN VALUE + Number of characters to output. + +SIDE EFFECTS +*/ +int per_process_read(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + unsigned long *d = (unsigned long *)data; + return sprintf(page, "%lx", *d); +} + +int per_process_read_decimal(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + unsigned long *d = (unsigned long *)data; + return sprintf(page, "%ld", *d); +} + +/* +FUNCTION per_process_proc_entry + +DESCRIPTION + Create a generic entry for the /proc settings directory. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +void per_process_proc_entry(char *name, unsigned long *var, + struct proc_dir_entry *d, int hex) +{ + struct proc_dir_entry *pe; + + pe = create_proc_entry(name, 0777, d); + if (0 == pe) + return; + if (hex) { + pe->read_proc = per_process_read; + pe->write_proc = per_process_write_hex; + } else { + pe->read_proc = per_process_read_decimal; + pe->write_proc = per_process_write_dec; + } + pe->data = (void *)var; + + if (pp_proc_entry_index >= PP_MAX_PROC_ENTRIES) { + printk(KERN_INFO "PERF: proc entry overflow,\ + memleak on module unload occured"); + return; + } + per_process_proc_names[pp_proc_entry_index++] = name; +} + +static int perfmon_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + static int old_pid = -1; + struct thread_info *thread = v; + int current_pid; + + if (cmd != THREAD_NOTIFY_SWITCH) + return old_pid; + + current_pid = thread->task->pid; + if (old_pid != -1) + _per_process_switch(old_pid, current_pid); + old_pid = current_pid; + return old_pid; +} + +static struct notifier_block perfmon_notifier_block = { + .notifier_call = perfmon_notifier, +}; + +/* +FUNCTION per_process_perf_init + +DESCRIPTION + Initialze the per process performance monitor variables and /proc space. + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +int per_process_perf_init(void) +{ +#ifdef CONFIG_ARCH_MSM8X60 + smp_call_function_single(0, (void *)pm_initialize, (void *)NULL, 1); + smp_call_function_single(1, (void *)pm_initialize, (void *)NULL, 1); + l2_pm_initialize(); +#else + pm_initialize(); +#endif + pm_axi_init(); + pm_axi_clear_cnts(); + proc_dir = proc_mkdir("ppPerf", NULL); + values_dir = proc_mkdir("results", proc_dir); + settings_dir = proc_mkdir("settings", proc_dir); + per_process_proc_entry("enable", &pp_enabled, settings_dir, 1); + per_process_proc_entry("valid", &pp_settings_valid, settings_dir, 1); + per_process_proc_entry("setPID", &pp_set_pid, settings_dir, 0); + per_process_proc_entry("clearPID", &pp_clear_pid, settings_dir, 0); + per_process_proc_entry("event0", &per_proc_event[0], settings_dir, 1); + per_process_proc_entry("event1", &per_proc_event[1], settings_dir, 1); + per_process_proc_entry("event2", &per_proc_event[2], settings_dir, 1); + per_process_proc_entry("event3", &per_proc_event[3], settings_dir, 1); + per_process_proc_entry("l2_event0", &l2_per_proc_event[0], settings_dir, + 1); + per_process_proc_entry("l2_event1", &l2_per_proc_event[1], settings_dir, + 1); + per_process_proc_entry("l2_event2", &l2_per_proc_event[2], settings_dir, + 1); + per_process_proc_entry("l2_event3", &l2_per_proc_event[3], settings_dir, + 1); + per_process_proc_entry("debug", &dbg_flags, settings_dir, 1); + per_process_proc_entry("autolock", &pp_auto_lock, settings_dir, 1); + per_process_proc_entry("lpm0evtyper", &pp_lpm0evtyper, settings_dir, 1); + per_process_proc_entry("lpm1evtyper", &pp_lpm1evtyper, settings_dir, 1); + per_process_proc_entry("lpm2evtyper", &pp_lpm2evtyper, settings_dir, 1); + per_process_proc_entry("l2lpmevtyper", &pp_l2lpmevtyper, settings_dir, + 1); + per_process_proc_entry("vlpmevtyper", &pp_vlpmevtyper, settings_dir, 1); + per_process_proc_entry("l2pmevtyper0", &pp_l2pmevtyper0, settings_dir, + 1); + per_process_proc_entry("l2pmevtyper1", &pp_l2pmevtyper1, settings_dir, + 1); + per_process_proc_entry("l2pmevtyper2", &pp_l2pmevtyper2, settings_dir, + 1); + per_process_proc_entry("l2pmevtyper3", &pp_l2pmevtyper3, settings_dir, + 1); + per_process_proc_entry("l2pmevtyper4", &pp_l2pmevtyper4, settings_dir, + 1); + per_process_proc_entry("stopForInterrupts", &pm_stop_for_interrupts, + settings_dir, 1); + per_process_proc_entry("global", &pm_global, settings_dir, 1); + per_process_proc_entry("globalEnable", &pm_global_enable, settings_dir, + 1); + per_process_proc_entry("removePID", &pm_remove_pid, settings_dir, 0); + + axi_dir = proc_mkdir("axi", proc_dir); + axi_settings_dir = proc_mkdir("settings", axi_dir); + axi_results_dir = proc_mkdir("results", axi_dir); + pm_axi_set_proc_entry("axi_enable", &pm_axi_info.enable, + axi_settings_dir, 1); + pm_axi_set_proc_entry("axi_clear", &pm_axi_info.clear, axi_settings_dir, + 0); + pm_axi_set_proc_entry("axi_valid", &pm_axi_info.valid, axi_settings_dir, + 1); + pm_axi_set_proc_entry("axi_sel_reg0", &pm_axi_info.sel_reg0, + axi_settings_dir, 1); + pm_axi_set_proc_entry("axi_sel_reg1", &pm_axi_info.sel_reg1, + axi_settings_dir, 1); + pm_axi_set_proc_entry("axi_ten_sel", &pm_axi_info.ten_sel_reg, + axi_settings_dir, 1); + pm_axi_set_proc_entry("axi_refresh", &pm_axi_info.refresh, + axi_settings_dir, 1); + pm_axi_get_cnt_proc_entry("axi_cnts", &axi_cnts, axi_results_dir, 0); + l2_dir = proc_mkdir("l2", proc_dir); + l2_results_dir = proc_mkdir("results", l2_dir); + + memset(perf_mons, 0, sizeof(perf_mons)); + per_process_create_results_proc(&perf_mons[0]); + per_process_create_l2_results_proc(&perf_mons[0]); + thread_register_notifier(&perfmon_notifier_block); + /* + * Set the function pointers so the module can be activated. + */ + pp_interrupt_out_ptr = _perf_mon_interrupt_out; + pp_interrupt_in_ptr = _perf_mon_interrupt_in; + pp_process_remove_ptr = _per_process_remove; + pp_loaded = 1; + pm_axi_info.refresh = 1; + +#ifdef CONFIG_ARCH_MSM8X60 + smp_call_function_single(0, (void *)pm_reset_all, (void *)NULL, 1); + smp_call_function_single(1, (void *)pm_reset_all, (void *)NULL, 1); + smp_call_function_single(0, (void *)l2_pm_reset_all, (void *)NULL, 1); + smp_call_function_single(1, (void *)l2_pm_reset_all, (void *)NULL, 1); +#else + pm_reset_all(); +#endif + + return 0; +} + +/* +FUNCTION per_process_perf_exit + +DESCRIPTION + Module exit functionm, clean up, renmove proc entries + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS + No more per process +*/ +void per_process_perf_exit(void) +{ + unsigned long i; + /* + * Sert the function pointers to 0 so the functions will no longer + * be invoked + */ + pp_loaded = 0; + pp_interrupt_out_ptr = 0; + pp_interrupt_in_ptr = 0; + pp_process_remove_ptr = 0; + /* + * Remove the results + */ + for (i = 0; i < PERF_MON_PROCESS_NUM; i++) + per_process_remove_manual(perf_mons[i].pid); + /* + * Remove the proc entries in the settings dir + */ + i = 0; + for (i = 0; i < pp_proc_entry_index; i++) + remove_proc_entry(per_process_proc_names[i], settings_dir); + + /*remove proc axi files*/ + remove_proc_entry("axi_enable", axi_settings_dir); + remove_proc_entry("axi_valid", axi_settings_dir); + remove_proc_entry("axi_refresh", axi_settings_dir); + remove_proc_entry("axi_clear", axi_settings_dir); + remove_proc_entry("axi_sel_reg0", axi_settings_dir); + remove_proc_entry("axi_sel_reg1", axi_settings_dir); + remove_proc_entry("axi_ten_sel", axi_settings_dir); + remove_proc_entry("axi_cnts", axi_results_dir); + /* + * Remove the directories + */ + remove_proc_entry("results", l2_dir); + remove_proc_entry("l2", proc_dir); + remove_proc_entry("results", proc_dir); + remove_proc_entry("settings", proc_dir); + remove_proc_entry("results", axi_dir); + remove_proc_entry("settings", axi_dir); + remove_proc_entry("axi", proc_dir); + remove_proc_entry("ppPerf", NULL); + pm_free_irq(); +#ifdef CONFIG_ARCH_MSM8X60 + l2_pm_free_irq(); +#endif + thread_unregister_notifier(&perfmon_notifier_block); +#ifdef CONFIG_ARCH_MSM8X60 + smp_call_function_single(0, (void *)pm_deinitialize, (void *)NULL, 1); + smp_call_function_single(1, (void *)pm_deinitialize, (void *)NULL, 1); + l2_pm_deinitialize(); +#else + pm_deinitialize(); +#endif +} diff --git a/arch/arm/perfmon/per.c b/arch/arm/perfmon/per.c new file mode 100644 index 0000000000000..4222844f12147 --- /dev/null +++ b/arch/arm/perfmon/per.c @@ -0,0 +1,59 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* +per.c + +DESCRIPTION: Performance count interface for linux via proc in the T32 +command file style +*/ + +#include +#include +#include +#include +#include +#include "linux/proc_fs.h" +#include "linux/kernel_stat.h" +#include "asm/uaccess.h" +#include "cp15_registers.h" +#include "perf.h" + +#define PM_PER_ERR -1 +/* +FUNCTION perf_if_proc_init + +DESCRIPTION Initialize the proc interface for thje performance data. +*/ +static __init int per_init(void) +{ + + if (atomic_read(&pm_op_lock) == 1) { + printk(KERN_INFO "Can not load KSAPI, monitors are in use\n"); + return PM_PER_ERR; + } + atomic_set(&pm_op_lock, 1); + per_process_perf_init(); + printk(KERN_INFO "ksapi init\n"); + return 0; +} + +static void __exit per_exit(void) +{ + per_process_perf_exit(); + printk(KERN_INFO "ksapi exit\n"); + atomic_set(&pm_op_lock, 0); +} + +MODULE_LICENSE("GPL v2"); +module_init(per_init); +module_exit(per_exit); diff --git a/arch/arm/perfmon/perf-function-hooks.c b/arch/arm/perfmon/perf-function-hooks.c new file mode 100644 index 0000000000000..aacc353741c96 --- /dev/null +++ b/arch/arm/perfmon/perf-function-hooks.c @@ -0,0 +1,81 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* +* perf-function-hooks.c +* DESCRIPTION +* Hooks for ksapi.ko +*/ + +#include +#include +#include +#include +#include +#include +#include "linux/proc_fs.h" +#include "linux/kernel_stat.h" +#include "asm/uaccess.h" +#include +#include "cp15_registers.h" +#include +#include "perf.h" + +/* +* Function Pointers for when the module is installed... +* Should we use a single "ready" variable for the testing +* in the functions below, will be safer when module is removed +* testing for a locked variable... +*/ +VPVF pp_interrupt_out_ptr; +VPVF pp_interrupt_in_ptr; +VPULF pp_process_remove_ptr; +unsigned int pp_loaded; +EXPORT_SYMBOL(pp_loaded); +atomic_t pm_op_lock; +EXPORT_SYMBOL(pm_op_lock); + +/* +FUNCTION VARIOUS + +DESCRIPTION +Hooks to callinto the module functions after they are loaded. The +above pointers will be set and then these functions are ready to be +called. + +DEPENDENCIES +THe per preocess performance monitor needs to be loaded ... + +RETURN VALUE + +SIDE EFFECTS +*/ +void perf_mon_interrupt_out(void) +{ + if (pp_loaded) + (*pp_interrupt_out_ptr)(); +} +EXPORT_SYMBOL(pp_interrupt_out_ptr); + +void perf_mon_interrupt_in(void) +{ + if (pp_loaded) + (*pp_interrupt_in_ptr)(); +} +EXPORT_SYMBOL(pp_interrupt_in_ptr); + +void per_process_remove(unsigned long pid) +{ + if (pp_loaded) + (*pp_process_remove_ptr)(pid); +} +EXPORT_SYMBOL(pp_process_remove_ptr); diff --git a/arch/arm/perfmon/perf-smp.c b/arch/arm/perfmon/perf-smp.c new file mode 100644 index 0000000000000..5417fc7f6a5d3 --- /dev/null +++ b/arch/arm/perfmon/perf-smp.c @@ -0,0 +1,751 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* +perf-smp.c +DESCRIPTION +Manipulation, initialization of the ARMV7 Performance counter register. + + +EXTERNALIZED FUNCTIONS + +INITIALIZATION AND SEQUENCING REQUIREMENTS +*/ + +/* +INCLUDE FILES FOR MODULE +*/ +#include +#include +#include +#include +#include + +#include +#include +#include "l2_cp15_registers.h" + +/* +DEFINITIONS AND DECLARATIONS FOR MODULE + +This section contains definitions for constants, macros, types, variables +and other items needed by this module. +*/ + +/* + Constant / Define Declarations +*/ + +#define PM_NUM_COUNTERS 4 +#define L2_PM_ERR -1 + +/*------------------------------------------------------------------------ + * Global control bits +------------------------------------------------------------------------*/ +#define PM_L2_GLOBAL_ENABLE (1<<0) +#define PM_L2_EVENT_RESET (1<<1) +#define PM_L2_CYCLE_RESET (1<<2) +#define PM_L2_CLKDIV (1<<3) +#define PM_L2_GLOBAL_TRACE (1<<4) +#define PM_L2_DISABLE_PROHIBIT (1<<5) + +/*--------------------------------------------------------------------------- + * Enable and clear bits for each event/trigger +----------------------------------------------------------------------------*/ +#define PM_L2EV0_ENABLE (1<<0) +#define PM_L2EV1_ENABLE (1<<1) +#define PM_L2EV2_ENABLE (1<<2) +#define PM_L2EV3_ENABLE (1<<3) +#define PM_L2_COUNT_ENABLE (1<<31) +#define PM_L2_ALL_ENABLE (0x8000000F) + + +/*----------------------------------------------------------------------------- + * Overflow actions +------------------------------------------------------------------------------*/ +#define PM_L2_OVERFLOW_NOACTION (0) +#define PM_L2_OVERFLOW_HALT (1) +#define PM_L2_OVERFLOW_STOP (2) +#define PM_L2_OVERFLOW_SKIP (3) + +/* + * Shifts for each trigger type + */ +#define PM_STOP_SHIFT 24 +#define PM_RELOAD_SHIFT 22 +#define PM_RESUME_SHIFT 20 +#define PM_SUSPEND_SHIFT 18 +#define PM_START_SHIFT 16 +#define PM_STOPALL_SHIFT 15 +#define PM_STOPCOND_SHIFT 12 +#define PM_RELOADCOND_SHIFT 9 +#define PM_RESUMECOND_SHIFT 6 +#define PM_SUSPENDCOND_SHIFT 3 +#define PM_STARTCOND_SHIFT 0 + + +/*--------------------------------------------------------------------------- +External control register. What todo when various events happen. +Triggering events, etc. +----------------------------------------------------------------------------*/ +#define PM_EXTTR0 0 +#define PM_EXTTR1 1 +#define PM_EXTTR2 2 +#define PM_EXTTR3 3 + +#define PM_COND_NO_STOP 0 +#define PM_COND_STOP_CNTOVRFLW 1 +#define PM_COND_STOP_EXTERNAL 4 +#define PM_COND_STOP_TRACE 5 +#define PM_COND_STOP_EVOVRFLW 6 +#define PM_COND_STOP_EVTYPER 7 + +/*-------------------------------------------------------------------------- +Protect against concurrent access. There is an index register that is +used to select the appropriate bank of registers. If multiple processes +are writting this at different times we could have a mess... +---------------------------------------------------------------------------*/ +#define PM_LOCK() +#define PM_UNLOCK() +#define PRINT printk + +/*-------------------------------------------------------------------------- +The Event definitions +--------------------------------------------------------------------------*/ +#define L2PM_EVT_PM0_EVT0 0x00 +#define L2PM_EVT_PM0_EVT1 0x01 +#define L2PM_EVT_PM0_EVT2 0x02 +#define L2PM_EVT_PM0_EVT3 0x03 +#define L2PM_EVT_PM1_EVT0 0x04 +#define L2PM_EVT_PM1_EVT1 0x05 +#define L2PM_EVT_PM1_EVT2 0x06 +#define L2PM_EVT_PM1_EVT3 0x07 +#define L2PM_EVT_PM2_EVT0 0x08 +#define L2PM_EVT_PM2_EVT1 0x09 +#define L2PM_EVT_PM2_EVT2 0x0a +#define L2PM_EVT_PM2_EVT3 0x0b +#define L2PM_EVT_PM3_EVT0 0x0c +#define L2PM_EVT_PM3_EVT1 0x0d +#define L2PM_EVT_PM3_EVT2 0x0e +#define L2PM_EVT_PM3_EVT3 0x0f +#define L2PM_EVT_PM4_EVT0 0x10 +#define L2PM_EVT_PM4_EVT1 0x11 +#define L2PM_EVT_PM4_EVT2 0x12 +#define L2PM_EVT_PM4_EVT3 0x13 + +/* +Type Declarations +*/ + +/* +Local Object Definitions +*/ + +unsigned long l2_pm_cycle_overflow_count; +unsigned long l2_pm_overflow_count[PM_NUM_COUNTERS]; + +/*--------------------------------------------------------------------------- +Max number of events read from the config registers +---------------------------------------------------------------------------*/ +static int pm_l2_max_events; + +static int irqid; + +/* +Function Definitions +*/ + +/* +FUNCTION l2_pm_group_stop + +DESCRIPTION Stop a group of the performance monitors. Event monitor 0 is bit +0, event monitor 1 bit 1, etc. The cycle count can also be disable with +bit 31. Macros are provided for all of the indexes including an ALL. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +Stops the performance monitoring for the index passed. +*/ +void pm_l2_group_stop(unsigned long mask) +{ + WCP15_L2PMCNTENCLR(mask); +} + +/* +FUNCTION l2_pm_group_start + +DESCRIPTION Start a group of the performance monitors. Event monitor 0 is bit +0, event monitor 1 bit 1, etc. The cycle count can also be enabled with +bit 31. Macros are provided for all of the indexes including an ALL. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +Starts the performance monitoring for the index passed. +*/ +void pm_l2_group_start(unsigned long mask) +{ + WCP15_L2PMCNTENSET(mask); +} + +/* +FUNCTION l2_pm_get_overflow + +DESCRIPTION Return the overflow condition for the index passed. + +DEPENDENCIES + +RETURN VALUE +0 no overflow +!0 (anything else) overflow; + +SIDE EFFECTS +*/ +unsigned long l2_pm_get_overflow(int index) +{ + unsigned long overflow = 0; + +/* +* Range check +*/ + if (index > pm_l2_max_events) + return L2_PM_ERR; + RCP15_L2PMOVSR(overflow); + + return overflow & (1< pm_l2_max_events) + return; + WCP15_L2PMCNTENCLR(1< pm_l2_max_events) + return; + WCP15_L2PMCNTENSET(1< pm_l2_max_events) + return L2_PM_ERR; + +/* +* Lock, select the index and read the count...unlock +*/ + PM_LOCK(); + WCP15_L2PMSELR(index); + WCP15_L2PMXEVCNTR(new_value); + PM_UNLOCK(); + return reg; +} + +int l2_pm_reset_count(int index) +{ + return l2_pm_set_count(index, 0); +} + +/* +FUNCTION l2_pm_get_count + +DESCRIPTION Return the number of events that have happened for the index +passed. + +DEPENDENCIES + +RETURN VALUE +-1 if the index is out of range +The number of events if inrange + +SIDE EFFECTS +*/ +unsigned long l2_pm_get_count(int index) +{ + unsigned long reg = 0; + +/* +* Range check +*/ + if (index > pm_l2_max_events) + return L2_PM_ERR; + +/* +* Lock, select the index and read the count...unlock +*/ + PM_LOCK(); + WCP15_L2PMSELR(index); + RCP15_L2PMXEVCNTR(reg); + PM_UNLOCK(); + return reg; +} + +unsigned long get_filter_code(unsigned long event) +{ + if (event == 0x0 || event == 0x4 || event == 0x08 + || event == 0x0c || event == 0x10) + return 0x0001003f; + else if (event == 0x1 || event == 0x5 || event == 0x09 + || event == 0x0d || event == 0x11) + return 0x0002003f; + else if (event == 0x2 || event == 0x6 || event == 0x0a + || event == 0x0e || event == 0x12) + return 0x0004003f; + else if (event == 0x3 || event == 0x7 || event == 0x0b + || event == 0x0f || event == 0x13) + return 0x0008003f; + else + return 0; +} + +int l2_pm_set_event(int index, unsigned long event) +{ + unsigned long reg = 0; + + /* + * Range check + */ + if (index > pm_l2_max_events) + return L2_PM_ERR; + + /* + * Lock, select the index and read the count...unlock + */ + PM_LOCK(); + WCP15_L2PMSELR(index); + WCP15_L2PMXEVTYPER(event); + /* WCP15_L2PMXEVFILTER(get_filter_code(event)); */ + WCP15_L2PMXEVFILTER(0x000f003f); + PM_UNLOCK(); + return reg; +} + +/* +FUNCTION pm_set_local_bu + +DESCRIPTION Set the local BU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_bu(unsigned long value) +{ + WCP15_L2PMEVTYPER0(value); +} + +/* +FUNCTION pm_set_local_cb + +DESCRIPTION Set the local CB triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_cb(unsigned long value) +{ + WCP15_L2PMEVTYPER1(value); +} + +/* +FUNCTION pm_set_local_mp + +DESCRIPTION Set the local MP triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_mp(unsigned long value) +{ + WCP15_L2PMEVTYPER2(value); +} + +/* +FUNCTION pm_set_local_sp + +DESCRIPTION Set the local SP triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_sp(unsigned long value) +{ + WCP15_L2PMEVTYPER3(value); +} + +/* +FUNCTION pm_set_local_scu + +DESCRIPTION Set the local SCU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_scu(unsigned long value) +{ + WCP15_L2PMEVTYPER4(value); +} + +/* +FUNCTION l2_pm_isr + +DESCRIPTION: + Performance Monitor interrupt service routine to capture overflows + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +static irqreturn_t l2_pm_isr(int irq, void *d) +{ + int i; + + for (i = 0; i < PM_NUM_COUNTERS; i++) { + if (l2_pm_get_overflow(i)) { + l2_pm_overflow_count[i]++; + l2_pm_reset_overflow(i); + } + } + + if (l2_pm_get_cycle_overflow()) { + l2_pm_cycle_overflow_count++; + l2_pm_reset_cycle_overflow(); + } + + return IRQ_HANDLED; +} + + +void l2_pm_stop_all(void) +{ + WCP15_L2PMCNTENCLR(0xFFFFFFFF); +} + +void l2_pm_reset_all(void) +{ + WCP15_L2PMCR(0xF); + WCP15_L2PMOVSR(PM_L2_ALL_ENABLE); /* overflow clear */ +} + +void l2_pm_start_all(void) +{ + WCP15_L2PMCNTENSET(PM_L2_ALL_ENABLE); +} + +/* +FUNCTION l2_pm_initialize + +DESCRIPTION Initialize the performanca monitoring for the v7 processor. + Ensures the cycle count is running and the event counters are enabled. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void l2_pm_initialize(void) +{ + unsigned long reg = 0; + unsigned char imp; + unsigned char id; + unsigned char num; + unsigned long enables = 0; + static int initialized; + + if (initialized) + return; + initialized = 1; + + irqid = SC_SICL2PERFMONIRPTREQ; + RCP15_L2PMCR(reg); + imp = (reg>>24) & 0xFF; + id = (reg>>16) & 0xFF; + pm_l2_max_events = num = (reg>>11) & 0xFF; + PRINT("V7 MP L2SCU Performance Monitor Capabilities\n"); + PRINT(" Implementor %c(%d)\n", imp, imp); + PRINT(" Id %d %x\n", id, id); + PRINT(" Num Events %d %x\n", num, num); + PRINT("\nCycle counter enabled by default...\n"); + + /* + * Global enable, ensure the global enable is set so all + * subsequent actions take effect. Also resets the counts + */ + RCP15_L2PMCR(enables); + WCP15_L2PMCR(enables | PM_L2_GLOBAL_ENABLE | PM_L2_EVENT_RESET | + PM_L2_CYCLE_RESET | PM_L2_CLKDIV); + + /* + * Enable access from user space + */ + + /* + * Install interrupt handler and the enable the interrupts + */ + l2_pm_reset_cycle_overflow(); + l2_pm_reset_overflow(0); + l2_pm_reset_overflow(1); + l2_pm_reset_overflow(2); + l2_pm_reset_overflow(3); + l2_pm_reset_overflow(4); + + if (0 != request_irq(irqid, l2_pm_isr, 0, "l2perfmon", 0)) + printk(KERN_ERR "%s:%d request_irq returned error\n", + __FILE__, __LINE__); + WCP15_L2PMINTENSET(PM_L2_ALL_ENABLE); + /* + * Enable the cycle counter. Default, count 1:1 no divisor. + */ + l2_pm_enable_cycle_counter(); + +} + +void l2_pm_free_irq(void) +{ + free_irq(irqid, 0); +} + +void l2_pm_deinitialize(void) +{ + unsigned long enables = 0; + RCP15_L2PMCR(enables); + WCP15_L2PMCR(enables & ~PM_L2_GLOBAL_ENABLE); +} + diff --git a/arch/arm/perfmon/perf-v7.c b/arch/arm/perfmon/perf-v7.c new file mode 100644 index 0000000000000..614eedc92181a --- /dev/null +++ b/arch/arm/perfmon/perf-v7.c @@ -0,0 +1,1009 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* +perf-v7.c +DESCRIPTION +Manipulation, initialization of the ARMV7 Performance counter register. + + +EXTERNALIZED FUNCTIONS + +INITIALIZATION AND SEQUENCING REQUIREMENTS +*/ + +/* +INCLUDE FILES FOR MODULE +*/ +#include +#include +#include +#include +#include + +#include +#include +#include "cp15_registers.h" + +/* +DEFINITIONS AND DECLARATIONS FOR MODULE + +This section contains definitions for constants, macros, types, variables +and other items needed by this module. +*/ + +/* + Constant / Define Declarations +*/ + +#define PM_NUM_COUNTERS 4 +#define PM_V7_ERR -1 + +/*------------------------------------------------------------------------ + * Global control bits +------------------------------------------------------------------------*/ +#define PM_GLOBAL_ENABLE (1<<0) +#define PM_EVENT_RESET (1<<1) +#define PM_CYCLE_RESET (1<<2) +#define PM_CLKDIV (1<<3) +#define PM_GLOBAL_TRACE (1<<4) +#define PM_DISABLE_PROHIBIT (1<<5) + +/*--------------------------------------------------------------------------- + * Enable and clear bits for each event/trigger +----------------------------------------------------------------------------*/ +#define PM_EV0_ENABLE (1<<0) +#define PM_EV1_ENABLE (1<<1) +#define PM_EV2_ENABLE (1<<2) +#define PM_EV3_ENABLE (1<<3) +#define PM_COUNT_ENABLE (1<<31) +#define PM_ALL_ENABLE (0x8000000F) + + +/*----------------------------------------------------------------------------- + * Overflow actions +------------------------------------------------------------------------------*/ +#define PM_OVERFLOW_NOACTION (0) +#define PM_OVERFLOW_HALT (1) +#define PM_OVERFLOW_STOP (2) +#define PM_OVERFLOW_SKIP (3) + +/* + * Shifts for each trigger type + */ +#define PM_STOP_SHIFT 24 +#define PM_RELOAD_SHIFT 22 +#define PM_RESUME_SHIFT 20 +#define PM_SUSPEND_SHIFT 18 +#define PM_START_SHIFT 16 +#define PM_STOPALL_SHIFT 15 +#define PM_STOPCOND_SHIFT 12 +#define PM_RELOADCOND_SHIFT 9 +#define PM_RESUMECOND_SHIFT 6 +#define PM_SUSPENDCOND_SHIFT 3 +#define PM_STARTCOND_SHIFT 0 + + +/*--------------------------------------------------------------------------- +External control register. What todo when various events happen. +Triggering events, etc. +----------------------------------------------------------------------------*/ +#define PM_EXTTR0 0 +#define PM_EXTTR1 1 +#define PM_EXTTR2 2 +#define PM_EXTTR3 3 + +#define PM_COND_NO_STOP 0 +#define PM_COND_STOP_CNTOVRFLW 1 +#define PM_COND_STOP_EXTERNAL 4 +#define PM_COND_STOP_TRACE 5 +#define PM_COND_STOP_EVOVRFLW 6 +#define PM_COND_STOP_EVTYPER 7 + +/*-------------------------------------------------------------------------- +Protect against concurrent access. There is an index register that is +used to select the appropriate bank of registers. If multiple processes +are writting this at different times we could have a mess... +---------------------------------------------------------------------------*/ +#define PM_LOCK() +#define PM_UNLOCK() +#define PRINT printk + +/*-------------------------------------------------------------------------- +The Event definitions +--------------------------------------------------------------------------*/ +#define PM_EVT_SW_INCREMENT 0 +#define PM_EVT_L1_I_MISS 1 +#define PM_EVT_ITLB_MISS 2 +#define PM_EVT_L1_D_MISS 3 +#define PM_EVT_L1_D_ACCESS 4 +#define PM_EVT_DTLB_MISS 5 +#define PM_EVT_DATA_READ 6 +#define PM_EVT_DATA_WRITE 7 +#define PM_EVT_INSTRUCTION 8 +#define PM_EVT_EXCEPTIONS 9 +#define PM_EVT_EXCEPTION_RET 10 +#define PM_EVT_CTX_CHANGE 11 +#define PM_EVT_PC_CHANGE 12 +#define PM_EVT_BRANCH 13 +#define PM_EVT_RETURN 14 +#define PM_EVT_UNALIGNED 15 +#define PM_EVT_BRANCH_MISS 16 +#define PM_EVT_EXTERNAL0 0x40 +#define PM_EVT_EXTERNAL1 0x41 +#define PM_EVT_EXTERNAL2 0x42 +#define PM_EVT_EXTERNAL3 0x43 +#define PM_EVT_TRACE0 0x44 +#define PM_EVT_TRACE1 0x45 +#define PM_EVT_TRACE2 0x46 +#define PM_EVT_TRACE3 0x47 +#define PM_EVT_PM0 0x48 +#define PM_EVT_PM1 0x49 +#define PM_EVT_PM2 0x4a +#define PM_EVT_PM3 0x4b +#define PM_EVT_LPM0_EVT0 0x4c +#define PM_EVT_LPM0_EVT1 0x4d +#define PM_EVT_LPM0_EVT2 0x4e +#define PM_EVT_LPM0_EVT3 0x4f +#define PM_EVT_LPM1_EVT0 0x50 +#define PM_EVT_LPM1_EVT1 0x51 +#define PM_EVT_LPM1_EVT2 0x52 +#define PM_EVT_LPM1_EVT3 0x53 +#define PM_EVT_LPM2_EVT0 0x54 +#define PM_EVT_LPM2_EVT1 0x55 +#define PM_EVT_LPM2_EVT2 0x56 +#define PM_EVT_LPM2_EVT3 0x57 +#define PM_EVT_L2_EVT0 0x58 +#define PM_EVT_L2_EVT1 0x59 +#define PM_EVT_L2_EVT2 0x5a +#define PM_EVT_L2_EVT3 0x5b +#define PM_EVT_VLP_EVT0 0x5c +#define PM_EVT_VLP_EVT1 0x5d +#define PM_EVT_VLP_EVT2 0x5e +#define PM_EVT_VLP_EVT3 0x5f + +/* +Type Declarations +*/ + +/*-------------------------------------------------------------------------- +A performance monitor trigger setup/initialization structure. Contains +all of the fields necessary to setup a complex trigger with the internal +performance monitor. +---------------------------------------------------------------------------*/ +struct pm_trigger_s { + int index; + int event_type; + bool interrupt; + bool overflow_enable; + bool event_export; + unsigned char overflow_action; + unsigned char stop_index; + unsigned char reload_index; + unsigned char resume_index; + unsigned char suspend_index; + unsigned char start_index; + bool overflow_stop; + unsigned char stop_condition; + unsigned char reload_condition; + unsigned char resume_condition; + unsigned char suspend_condition; + unsigned char start_condition; +}; + +/* +* Name and index place holder so we can display the event +*/ +struct pm_name_s { + unsigned long index; + char *name; +}; + +/* +Local Object Definitions +*/ + +unsigned long pm_cycle_overflow_count; +unsigned long pm_overflow_count[PM_NUM_COUNTERS]; + +/*--------------------------------------------------------------------------- +Max number of events read from the config registers +---------------------------------------------------------------------------*/ +static int pm_max_events; + +/*-------------------------------------------------------------------------- +Storage area for each of the triggers +*---------------------------------------------------------------------------*/ +static struct pm_trigger_s pm_triggers[4]; + +/*-------------------------------------------------------------------------- +Names and indexes of the events +--------------------------------------------------------------------------*/ +static struct pm_name_s pm_names[] = { + { PM_EVT_SW_INCREMENT, "SW Increment"}, + { PM_EVT_L1_I_MISS, "L1 I MISS"}, + { PM_EVT_ITLB_MISS, "L1 ITLB MISS"}, + { PM_EVT_L1_D_MISS, "L1 D MISS"}, + { PM_EVT_L1_D_ACCESS, "L1 D ACCESS"}, + { PM_EVT_DTLB_MISS, "DTLB MISS"}, + { PM_EVT_DATA_READ, "DATA READ"}, + { PM_EVT_DATA_WRITE, "DATA WRITE"}, + { PM_EVT_INSTRUCTION, "INSTRUCTIONS"}, + { PM_EVT_EXCEPTIONS, "EXCEPTIONS"}, + { PM_EVT_EXCEPTION_RET, "EXCEPTION RETURN"}, + { PM_EVT_CTX_CHANGE, "CTX CHANGE"}, + { PM_EVT_PC_CHANGE, "PC CHANGE"}, + { PM_EVT_BRANCH, "BRANCH"}, + { PM_EVT_RETURN, "RETURN"}, + { PM_EVT_UNALIGNED, "UNALIGNED"}, + { PM_EVT_BRANCH_MISS, "BRANCH MISS"}, + { PM_EVT_EXTERNAL0, "EXTERNAL 0"}, + { PM_EVT_EXTERNAL1, "EXTERNAL 1"}, + { PM_EVT_EXTERNAL2, "EXTERNAL 2"}, + { PM_EVT_EXTERNAL3, "EXTERNAL 3"}, + { PM_EVT_TRACE0, "TRACE 0"}, + { PM_EVT_TRACE1, "TRACE 1"}, + { PM_EVT_TRACE2, "TRACE 2"}, + { PM_EVT_TRACE3, "TRACE 3"}, + { PM_EVT_PM0, "PM0"}, + { PM_EVT_PM1, "PM1"}, + { PM_EVT_PM2, "PM2"}, + { PM_EVT_PM3, "PM3"}, + { PM_EVT_LPM0_EVT0, "LPM0 E0"}, + { PM_EVT_LPM0_EVT1, "LPM0 E1"}, + { PM_EVT_LPM0_EVT2 , "LPM0 E2"}, + { PM_EVT_LPM0_EVT3, "LPM0 E3"}, + { PM_EVT_LPM1_EVT0, "LPM1 E0"}, + { PM_EVT_LPM1_EVT1, "LPM1 E1"}, + { PM_EVT_LPM1_EVT2, "LPM1 E2"}, + { PM_EVT_LPM1_EVT3, "LPM1 E3"}, + { PM_EVT_LPM2_EVT0, "LPM2 E0"}, + { PM_EVT_LPM2_EVT1 , "LPM2 E1"}, + { PM_EVT_LPM2_EVT2, "LPM2 E2"}, + { PM_EVT_LPM2_EVT3, "LPM2 E3"}, + { PM_EVT_L2_EVT0 , "L2 E0"}, + { PM_EVT_L2_EVT1, "L2 E1"}, + { PM_EVT_L2_EVT2, "L2 E2"}, + { PM_EVT_L2_EVT3 , "L2 E3"}, + { PM_EVT_VLP_EVT0 , "VLP E0"}, + { PM_EVT_VLP_EVT1, "VLP E1"}, + { PM_EVT_VLP_EVT2, "VLP E2"}, + { PM_EVT_VLP_EVT3, "VLP E3"}, +}; + +static int irqid; + +/* +Function Definitions +*/ + +/* +FUNCTION pm_find_event_name + +DESCRIPTION Find the name associated with the event index passed and return +the pointer. + +DEPENDENCIES + +RETURN VALUE +Pointer to text string containing the name of the event or pointer to +an error string. Either way access to the returned string will not +cause an access error. + +SIDE EFFECTS +*/ +char *pm_find_event_name(unsigned long index) +{ + unsigned long i = 0; + + while (pm_names[i].index != -1) { + if (pm_names[i].index == index) + return pm_names[i].name; + i++; + } + return "BAD INDEX"; +} + +/* +FUNCTION pm_group_stop + +DESCRIPTION Stop a group of the performance monitors. Event monitor 0 is bit +0, event monitor 1 bit 1, etc. The cycle count can also be disabled with +bit 31. Macros are provided for all of the indexes including an ALL. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +Stops the performance monitoring for the index passed. +*/ +void pm_group_stop(unsigned long mask) +{ + WCP15_PMCNTENCLR(mask); +} + +/* +FUNCTION pm_group_start + +DESCRIPTION Start a group of the performance monitors. Event monitor 0 is bit +0, event monitor 1 bit 1, etc. The cycle count can also be enabled with +bit 31. Macros are provided for all of the indexes including an ALL. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +Starts the performance monitoring for the index passed. +*/ +void pm_group_start(unsigned long mask) +{ + WCP15_PMCNTENSET(mask); +} + +/* +FUNCTION pm_cycle_overflow_action + +DESCRIPTION Action to take for an overflow of the cycle counter. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +Modify the state actions for overflow +*/ +void pm_cycle_overflow_action(int action) +{ + unsigned long reg = 0; + + if ((action > PM_OVERFLOW_SKIP) || (action < 0)) + return; + + RCP15_PMACTLR(reg); + reg &= ~(1<<30); /*clear it*/ + WCP15_PMACTLR(reg | (action<<30)); +} + +/* +FUNCTION pm_get_overflow + +DESCRIPTION Return the overflow condition for the index passed. + +DEPENDENCIES + +RETURN VALUE +0 no overflow +!0 (anything else) overflow; + +SIDE EFFECTS +*/ +unsigned long pm_get_overflow(int index) +{ + unsigned long overflow = 0; + +/* +* Range check +*/ + if (index > pm_max_events) + return PM_V7_ERR; + RCP15_PMOVSR(overflow); + + return overflow & (1< pm_max_events) + return; + WCP15_PMCNTENCLR(1< pm_max_events) + return; + WCP15_PMCNTENSET(1< pm_max_events) + return PM_V7_ERR; + +/* +* Lock, select the index and read the count...unlock +*/ + PM_LOCK(); + WCP15_PMSELR(index); + WCP15_PMXEVCNTR(new_value); + PM_UNLOCK(); + return reg; +} + +int pm_reset_count(int index) +{ + return pm_set_count(index, 0); +} + +/* +FUNCTION pm_get_count + +DESCRIPTION Return the number of events that have happened for the index +passed. + +DEPENDENCIES + +RETURN VALUE +-1 if the index is out of range +The number of events if inrange + +SIDE EFFECTS +*/ +unsigned long pm_get_count(int index) +{ + unsigned long reg = 0; + +/* +* Range check +*/ + if (index > pm_max_events) + return PM_V7_ERR; + +/* +* Lock, select the index and read the count...unlock +*/ + PM_LOCK(); + WCP15_PMSELR(index); + RCP15_PMXEVCNTR(reg); + PM_UNLOCK(); + return reg; +} + +/* +FUNCTION pm_show_event_info + +DESCRIPTION Display (print) the information about the event at the index +passed. Shows the index, name and count if a valid index is passed. If +the index is not valid, then nothing is displayed. + +DEPENDENCIES + +RETURN VALUE +None + +SIDE EFFECTS +*/ +void pm_show_event_info(unsigned long index) +{ + unsigned long count; + unsigned long event_type; + + if (index > pm_max_events) + return; + if (pm_triggers[index].index > pm_max_events) + return; + + count = pm_get_count(index); + event_type = pm_triggers[index].event_type; + + PRINT("Event %ld Trigger %s(%ld) count:%ld\n", index, + pm_find_event_name(event_type), event_type, count); +} + +/* +FUNCTION pm_event_init + +DESCRIPTION Given the struct pm_trigger_s info passed, configure the event. +This can be a complex trigger or a simple trigger. Any old values in the +event are lost. + +DEPENDENCIES + +RETURN VALUE +status + +SIDE EFFECTS +stops and clears the event at the index passed. +*/ +int pm_event_init(struct pm_trigger_s *data) +{ + unsigned long trigger; + unsigned long actlr = 0; + + if (0 == data) + return PM_V7_ERR; + if (data->index > pm_max_events) + return PM_V7_ERR; + + /* + * Setup the trigger based ont he passed values + */ + trigger = ((data->overflow_enable&1)<<31) | + ((data->event_export&1)<<30) | + ((data->stop_index&3)<reload_index&3)<resume_index&3)<suspend_index&3)<start_index&3)<overflow_stop&1)<stop_condition&7)<reload_condition&7)<resume_condition&7)<suspend_condition&7)<start_condition&7)<index); + + /* + * Lock, select the bank, set the trigger event and the event type + * then unlock. + */ + PM_LOCK(); + RCP15_PMACTLR(actlr); + actlr &= ~(3<<(data->index<<1)); + WCP15_PMACTLR(actlr | ((data->overflow_action&3) << (data->index<<1))); + WCP15_PMSELR(data->index); + WCP15_PMXEVTYPER(data->event_type); + WCP15_PMXEVCNTCR(trigger); + PM_UNLOCK(); + + /* + * Make a copy of the trigger so we know what it is when/if it triggers. + */ + memcpy(&pm_triggers[data->index], data, sizeof(*data)); + + /* + * We do not re-enable this here so events can be started together with + * pm_group_start() that way an accurate measure can be taken... + */ + + return 0; +} + +int pm_set_event(int index, unsigned long event) +{ + unsigned long reg = 0; + + /* + * Range check + */ + if (index > pm_max_events) + return PM_V7_ERR; + + /* + * Lock, select the index and read the count...unlock + */ + PM_LOCK(); + WCP15_PMSELR(index); + WCP15_PMXEVTYPER(event); + PM_UNLOCK(); + return reg; +} + +/* +FUNCTION pm_set_local_iu + +DESCRIPTION Set the local IU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_iu(unsigned long value) +{ + WCP15_LPM0EVTYPER(value); +} + +/* +FUNCTION pm_set_local_iu + +DESCRIPTION Set the local IU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_xu(unsigned long value) +{ + WCP15_LPM1EVTYPER(value); +} + +/* +FUNCTION pm_set_local_su + +DESCRIPTION Set the local SU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_su(unsigned long value) +{ + WCP15_LPM2EVTYPER(value); +} + +/* +FUNCTION pm_set_local_l2 + +DESCRIPTION Set the local L2 triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_l2(unsigned long value) +{ + WCP15_L2LPMEVTYPER(value); +} + +/* +FUNCTION pm_set_local_vu + +DESCRIPTION Set the local VU triggers. Note that the MSB determines if + these are enabled or not. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_set_local_vu(unsigned long value) +{ + WCP15_VLPMEVTYPER(value); +} + +/* +FUNCTION pm_isr + +DESCRIPTION: + Performance Monitor interrupt service routine to capture overflows + +DEPENDENCIES + +RETURN VALUE + +SIDE EFFECTS +*/ +static irqreturn_t pm_isr(int irq, void *d) +{ + int i; + + for (i = 0; i < PM_NUM_COUNTERS; i++) { + if (pm_get_overflow(i)) { + pm_overflow_count[i]++; + pm_reset_overflow(i); + } + } + + if (pm_get_cycle_overflow()) { + pm_cycle_overflow_count++; + pm_reset_cycle_overflow(); + } + + return IRQ_HANDLED; +} + + +void pm_stop_all(void) +{ + WCP15_PMCNTENCLR(0xFFFFFFFF); +} + +void pm_reset_all(void) +{ + WCP15_PMCR(0xF); + WCP15_PMOVSR(PM_ALL_ENABLE); /* overflow clear */ +} + +void pm_start_all(void) +{ + WCP15_PMCNTENSET(PM_ALL_ENABLE); +} + +/* +FUNCTION pm_initialize + +DESCRIPTION Initialize the performanca monitoring for the v7 processor. + Ensures the cycle count is running and the event counters are enabled. + +DEPENDENCIES + +RETURN VALUE + NONE + +SIDE EFFECTS +*/ +void pm_initialize(void) +{ + unsigned long reg = 0; + unsigned char imp; + unsigned char id; + unsigned char num; + unsigned long enables = 0; + static int initialized; + + if (initialized) + return; + initialized = 1; + + irqid = INT_ARMQC_PERFMON; + RCP15_PMCR(reg); + imp = (reg>>24) & 0xFF; + id = (reg>>16) & 0xFF; + pm_max_events = num = (reg>>11) & 0xFF; + PRINT("V7Performance Monitor Capabilities\n"); + PRINT(" Implementor %c(%d)\n", imp, imp); + PRINT(" Id %d %x\n", id, id); + PRINT(" Num Events %d %x\n", num, num); + PRINT("\nCycle counter enabled by default...\n"); + + /* + * Global enable, ensure the global enable is set so all + * subsequent actions take effect. Also resets the counts + */ + RCP15_PMCR(enables); + WCP15_PMCR(enables | PM_GLOBAL_ENABLE | PM_EVENT_RESET | + PM_CYCLE_RESET | PM_CLKDIV); + + /* + * Enable access from user space + */ + WCP15_PMUSERENR(1); + WCP15_PMACTLR(1); + + /* + * Install interrupt handler and the enable the interrupts + */ + pm_reset_cycle_overflow(); + pm_reset_overflow(0); + pm_reset_overflow(1); + pm_reset_overflow(2); + pm_reset_overflow(3); + + if (0 != request_irq(irqid, pm_isr, 0, "perfmon", 0)) + printk(KERN_ERR "%s:%d request_irq returned error\n", + __FILE__, __LINE__); + WCP15_PMINTENSET(PM_ALL_ENABLE); + /* + * Enable the cycle counter. Default, count 1:1 no divisor. + */ + pm_enable_cycle_counter(); + +} + +void pm_free_irq(void) +{ + free_irq(irqid, 0); +} + +void pm_deinitialize(void) +{ + unsigned long enables = 0; + RCP15_PMCR(enables); + WCP15_PMCR(enables & ~PM_GLOBAL_ENABLE); +} diff --git a/arch/arm/perfmon/perf.h b/arch/arm/perfmon/perf.h new file mode 100644 index 0000000000000..21ee719b2164a --- /dev/null +++ b/arch/arm/perfmon/perf.h @@ -0,0 +1,101 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +perf.h + +DESCRIPTION: Reads and writes the performance monitoring registers in the ARM +by using the MRC and MCR instructions. +*/ +#ifndef PERF_H +#define PERF_H +extern unsigned long perf_get_cycles(void); +extern void perf_set_count1(unsigned long val); +extern void perf_set_count0(unsigned long val); +extern unsigned long perf_get_count1(void); +extern unsigned long perf_get_count0(void); +extern unsigned long perf_get_ctrl(void); +extern void perf_set_ctrl(void); +extern void perf_set_ctrl_with(unsigned long v); +extern void perf_enable_counting(void); +extern void perf_disable_counting(void); +extern void perf_set_divider(int d); +extern unsigned long perf_get_overflow(void); +extern void perf_clear_overflow(unsigned long bit); +extern void perf_export_event(unsigned long bit); +extern void perf_reset_counts(void); +extern int perf_set_event(unsigned long index, unsigned long val); +extern unsigned long perf_get_count(unsigned long index); +extern void perf_set_cycles(unsigned long c); + +extern void pm_stop_all(void); +extern void l2_pm_stop_all(void); +extern void pm_start_all(void); +extern void l2_pm_start_all(void); +extern void pm_reset_all(void); +extern void l2_pm_reset_all(void); +extern void pm_set_event(unsigned long monitorIndex, unsigned long eventIndex); +extern void l2_pm_set_event(unsigned long monitorIndex, + unsigned long eventIndex); +extern unsigned long pm_get_count(unsigned long monitorIndex); +extern unsigned long l2_pm_get_count(unsigned long monitorIndex); +extern unsigned long pm_get_cycle_count(void); +extern unsigned long l2_pm_get_cycle_count(void); +extern char *pm_find_event_name(unsigned long index); +extern void pm_set_local_iu(unsigned long events); +extern void pm_set_local_xu(unsigned long events); +extern void pm_set_local_su(unsigned long events); +extern void pm_set_local_l2(unsigned long events); +extern void pm_set_local_vu(unsigned long events); +extern void pm_set_local_bu(unsigned long events); +extern void pm_set_local_cb(unsigned long events); +extern void pm_set_local_mp(unsigned long events); +extern void pm_set_local_sp(unsigned long events); +extern void pm_set_local_scu(unsigned long events); +extern void pm_initialize(void); +extern void pm_deinitialize(void); +extern void l2_pm_initialize(void); +extern void l2_pm_deinitialize(void); +extern void pm_free_irq(void); +extern void l2_pm_free_irq(void); + +extern int per_process_perf_init(void); +extern void per_process_perf_exit(void); +int per_process_read(char *page, char **start, off_t off, int count, + int *eof, void *data); +int per_process_write_hex(struct file *file, const char *buff, + unsigned long cnt, void *data); +int per_process_read_decimal(char *page, char **start, off_t off, int count, + int *eof, void *data); +int per_process_write_dec(struct file *file, const char *buff, + unsigned long cnt, void *data); +void perfmon_register_callback(void); +void _per_process_switch(unsigned long oldPid, unsigned long newPid); +extern unsigned int pp_loaded; +extern atomic_t pm_op_lock; +#endif /*PERF_H*/ diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c index 25a8fc7f512e6..eea75ff81d15a 100644 --- a/arch/arm/plat-s3c24xx/cpu-freq.c +++ b/arch/arm/plat-s3c24xx/cpu-freq.c @@ -433,7 +433,7 @@ static int s3c_cpufreq_verify(struct cpufreq_policy *policy) static struct cpufreq_frequency_table suspend_pll; static unsigned int suspend_freq; -static int s3c_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) +static int s3c_cpufreq_suspend(struct cpufreq_policy *policy) { suspend_pll.frequency = clk_get_rate(_clk_mpll); suspend_pll.index = __raw_readl(S3C2410_MPLLCON); diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 9d6feaabbe7d1..b1fbb4ff02364 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -3316,3 +3316,22 @@ rover_g8 MACH_ROVER_G8 ROVER_G8 3335 t5388p MACH_T5388P T5388P 3336 dingo MACH_DINGO DINGO 3337 goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338 +msm7x27a_ffa MACH_MSM7X27A_FFA MSM7X27A_FFA 3351 +msm7x27a_surf MACH_MSM7X27A_SURF MSM7X27A_SURF 3352 +msm7x27a_rumi3 MACH_MSM7X27A_RUMI3 MSM7X27A_RUMI3 3353 +msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396 +msm8960_mtp MACH_MSM8960_MTP MSM8960_MTP 3397 +msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398 +msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399 +msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535 +msm8x60_dragon MACH_MSM8X60_DRAGON MSM8X60_DRAGON 3586 +apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572 +msm9615_cdp MACH_MSM9615_CDP MSM9615_CDP 3675 +apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679 +msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681 +msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727 +msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728 +msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729 +msm7627a_qrd1 MACH_MSM7627A_QRD1 MSM7627A_QRD1 3756 +msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771 +msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772 diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile index 39f6d8e1af730..e8cede3368e03 100644 --- a/arch/arm/vfp/Makefile +++ b/arch/arm/vfp/Makefile @@ -7,7 +7,7 @@ # EXTRA_CFLAGS := -DDEBUG # EXTRA_AFLAGS := -DDEBUG -KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=softvfp+vfp) +KBUILD_AFLAGS :=$(KBUILD_AFLAGS:-msoft-float=-Wa,-mfpu=neon) LDFLAGS +=--no-warn-mismatch obj-y += vfp.o diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 0797cb528b463..e23016fa41e3c 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -377,10 +377,58 @@ static void vfp_enable(void *unused) set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); } +int vfp_flush_context(void) +{ + unsigned long flags; + struct thread_info *ti; + u32 fpexc; + u32 cpu; + int saved = 0; + + local_irq_save(flags); + + ti = current_thread_info(); + fpexc = fmrx(FPEXC); + cpu = ti->cpu; + +#ifdef CONFIG_SMP + /* On SMP, if VFP is enabled, save the old state */ + if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { + last_VFP_context[cpu]->hard.cpu = cpu; +#else + /* If there is a VFP context we must save it. */ + if (last_VFP_context[cpu]) { + /* Enable VFP so we can save the old state. */ + fmxr(FPEXC, fpexc | FPEXC_EN); + isb(); +#endif + vfp_save_state(last_VFP_context[cpu], fpexc); + + /* disable, just in case */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); + saved = 1; + } + last_VFP_context[cpu] = NULL; + + local_irq_restore(flags); + + return saved; +} + +void vfp_reinit(void) +{ + /* ensure we have access to the vfp */ + vfp_enable(NULL); + + /* and disable it to ensure the next usage restores the state */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); +} + + #ifdef CONFIG_PM -#include +#include -static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) +static int vfp_pm_suspend(void) { struct thread_info *ti = current_thread_info(); u32 fpexc = fmrx(FPEXC); @@ -400,34 +448,25 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state) return 0; } -static int vfp_pm_resume(struct sys_device *dev) +static void vfp_pm_resume(void) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - - return 0; } -static struct sysdev_class vfp_pm_sysclass = { - .name = "vfp", +static struct syscore_ops vfp_pm_syscore_ops = { .suspend = vfp_pm_suspend, .resume = vfp_pm_resume, }; -static struct sys_device vfp_pm_sysdev = { - .cls = &vfp_pm_sysclass, -}; - static void vfp_pm_init(void) { - sysdev_class_register(&vfp_pm_sysclass); - sysdev_register(&vfp_pm_sysdev); + register_syscore_ops(&vfp_pm_syscore_ops); } - #else static inline void vfp_pm_init(void) { } #endif /* CONFIG_PM */ diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c index 02c7efd1bcf48..a76071a4b2cf1 100644 --- a/arch/blackfin/mach-common/dpmc.c +++ b/arch/blackfin/mach-common/dpmc.c @@ -19,9 +19,6 @@ #define DRIVER_NAME "bfin dpmc" -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) - struct bfin_dpmc_platform_data *pdata; /** diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c index 7328a7cf7449b..3a5f7ddad962e 100644 --- a/arch/cris/arch-v10/kernel/irq.c +++ b/arch/cris/arch-v10/kernel/irq.c @@ -20,6 +20,9 @@ #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr)); #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr)); +extern void kgdb_init(void); +extern void breakpoint(void); + /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is * global just so that the kernel gdb can use it. */ diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h index 08b3d1da35839..4bea27f50a7ab 100644 --- a/arch/frv/include/asm/futex.h +++ b/arch/frv/include/asm/futex.h @@ -7,10 +7,11 @@ #include #include -extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); +extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c index 14f64b054c7eb..d155ca9e5098c 100644 --- a/arch/frv/kernel/futex.c +++ b/arch/frv/kernel/futex.c @@ -18,7 +18,7 @@ * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) { int oldval, ret; @@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o /* * do the futex operations */ -int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index c7f0f062239cd..8428525ddb225 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -46,7 +46,7 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; + unsigned long prev; __asm__ __volatile__( " mf;; \n" " mov ar.ccv=%3;; \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8) + : "=r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); + *uval = prev; return r8; } } diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c index 22f61526a8e1d..f09b174244d5b 100644 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c @@ -23,8 +23,6 @@ #include #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) - MODULE_AUTHOR("Venkatesh Pallipadi"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -47,12 +45,12 @@ processor_set_pstate ( { s64 retval; - dprintk("processor_set_pstate\n"); + pr_debug("processor_set_pstate\n"); retval = ia64_pal_set_pstate((u64)value); if (retval) { - dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", + pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", value, retval); return -ENODEV; } @@ -67,14 +65,14 @@ processor_get_pstate ( u64 pstate_index = 0; s64 retval; - dprintk("processor_get_pstate\n"); + pr_debug("processor_get_pstate\n"); retval = ia64_pal_get_pstate(&pstate_index, PAL_GET_PSTATE_TYPE_INSTANT); *value = (u32) pstate_index; if (retval) - dprintk("Failed to get current freq with " + pr_debug("Failed to get current freq with " "error 0x%lx, idx 0x%x\n", retval, *value); return (int)retval; @@ -90,7 +88,7 @@ extract_clock ( { unsigned long i; - dprintk("extract_clock\n"); + pr_debug("extract_clock\n"); for (i = 0; i < data->acpi_data.state_count; i++) { if (value == data->acpi_data.states[i].status) @@ -110,7 +108,7 @@ processor_get_freq ( cpumask_t saved_mask; unsigned long clock_freq; - dprintk("processor_get_freq\n"); + pr_debug("processor_get_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -148,7 +146,7 @@ processor_set_freq ( cpumask_t saved_mask; int retval; - dprintk("processor_set_freq\n"); + pr_debug("processor_set_freq\n"); saved_mask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask_of(cpu)); @@ -159,16 +157,16 @@ processor_set_freq ( if (state == data->acpi_data.state) { if (unlikely(data->resume)) { - dprintk("Called after resume, resetting to P%d\n", state); + pr_debug("Called after resume, resetting to P%d\n", state); data->resume = 0; } else { - dprintk("Already at target state (P%d)\n", state); + pr_debug("Already at target state (P%d)\n", state); retval = 0; goto migrate_end; } } - dprintk("Transitioning from P%d to P%d\n", + pr_debug("Transitioning from P%d to P%d\n", data->acpi_data.state, state); /* cpufreq frequency struct */ @@ -186,7 +184,7 @@ processor_set_freq ( value = (u32) data->acpi_data.states[state].control; - dprintk("Transitioning to state: 0x%08x\n", value); + pr_debug("Transitioning to state: 0x%08x\n", value); ret = processor_set_pstate(value); if (ret) { @@ -219,7 +217,7 @@ acpi_cpufreq_get ( { struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - dprintk("acpi_cpufreq_get\n"); + pr_debug("acpi_cpufreq_get\n"); return processor_get_freq(data, cpu); } @@ -235,7 +233,7 @@ acpi_cpufreq_target ( unsigned int next_state = 0; unsigned int result = 0; - dprintk("acpi_cpufreq_setpolicy\n"); + pr_debug("acpi_cpufreq_setpolicy\n"); result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); @@ -255,7 +253,7 @@ acpi_cpufreq_verify ( unsigned int result = 0; struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - dprintk("acpi_cpufreq_verify\n"); + pr_debug("acpi_cpufreq_verify\n"); result = cpufreq_frequency_table_verify(policy, data->freq_table); @@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init ( struct cpufreq_acpi_io *data; unsigned int result = 0; - dprintk("acpi_cpufreq_cpu_init\n"); + pr_debug("acpi_cpufreq_cpu_init\n"); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); if (!data) @@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init ( /* capability check */ if (data->acpi_data.state_count <= 1) { - dprintk("No P-States\n"); + pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init ( ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - dprintk("Unsupported address space [%d, %d]\n", + pr_debug("Unsupported address space [%d, %d]\n", (u32) (data->acpi_data.control_register.space_id), (u32) (data->acpi_data.status_register.space_id)); result = -ENODEV; @@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init ( "activated.\n", cpu); for (i = 0; i < data->acpi_data.state_count; i++) - dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", + pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state?'*':' '), i, (u32) data->acpi_data.states[i].core_frequency, (u32) data->acpi_data.states[i].power, @@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit ( { struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - dprintk("acpi_cpufreq_cpu_exit\n"); + pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = { static int __init acpi_cpufreq_init (void) { - dprintk("acpi_cpufreq_init\n"); + pr_debug("acpi_cpufreq_init\n"); return cpufreq_register_driver(&acpi_cpufreq_driver); } @@ -427,7 +425,7 @@ acpi_cpufreq_init (void) static void __exit acpi_cpufreq_exit (void) { - dprintk("acpi_cpufreq_exit\n"); + pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); return; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 1753f6a30d55e..ff14ab26c508b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1859,7 +1859,8 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = __get_free_pages(GFP_KERNEL, get_order(sz)); + data = (void *)__get_free_pages(GFP_KERNEL, + get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e3b6f3d..9c271be9919aa 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) * use the GART mapped mode. */ static u64 -tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 02b7a03e42268..8b3db1c587fca 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -300,6 +300,8 @@ void __init paging_init(void) zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; free_area_init_node(i, zones_size, m68k_memory[i].addr >> PAGE_SHIFT, NULL); + if (node_present_pages(i)) + node_set_state(i, N_NORMAL_MEMORY); } } diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h index ad3fd61b2fe7e..b0526d2716fa7 100644 --- a/arch/microblaze/include/asm/futex.h +++ b/arch/microblaze/include/asm/futex.h @@ -29,7 +29,7 @@ }) static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev, cmp; + int ret = 0, cmp; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - __asm__ __volatile__ ("1: lwx %0, %2, r0; \ - cmp %1, %0, %3; \ - beqi %1, 3f; \ - 2: swx %4, %2, r0; \ - addic %1, r0, 0; \ - bnei %1, 1b; \ + __asm__ __volatile__ ("1: lwx %1, %3, r0; \ + cmp %2, %1, %4; \ + beqi %2, 3f; \ + 2: swx %5, %3, r0; \ + addic %2, r0, 0; \ + bnei %2, 1b; \ 3: \ .section .fixup,\"ax\"; \ 4: brid 3b; \ - addik %0, r0, %5; \ + addik %0, r0, %6; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b,2b,4b; \ .previous;" \ - : "=&r" (prev), "=&r"(cmp) \ + : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index d840f4a2d3c92..5bb95a11880d2 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -120,16 +120,16 @@ static inline unsigned long __must_check __clear_user(void __user *to, { /* normal memset with two words to __ex_table */ __asm__ __volatile__ ( \ - "1: sb r0, %2, r0;" \ + "1: sb r0, %1, r0;" \ " addik %0, %0, -1;" \ " bneid %0, 1b;" \ - " addik %2, %2, 1;" \ + " addik %1, %1, 1;" \ "2: " \ __EX_TABLE_SECTION \ ".word 1b,2b;" \ ".previous;" \ - : "=r"(n) \ - : "0"(n), "r"(to) + : "=r"(n), "=r"(to) \ + : "0"(n), "1"(to) ); return n; } diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index b9cce90346cfc..6ebf1734b411b 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -75,7 +75,7 @@ } static inline int -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int retval; + int ret = 0; + u32 val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; if (cpu_has_llsc && R10000_LLSC_WAR) { @@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqzl $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %0, %2 \n" - " bne %0, %z3, 3f \n" + "1: ll %1, %3 \n" + " bne %1, %z4, 3f \n" " .set mips0 \n" - " move $1, %z4 \n" + " move $1, %z5 \n" " .set mips3 \n" - "2: sc $1, %1 \n" + "2: sc $1, %2 \n" " beqz $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %5 \n" + "4: li %0, %6 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "=&r" (retval), "=R" (*uaddr) + : "+r" (ret), "=&r" (val), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else return -ENOSYS; - return retval; + *uval = val; + return ret; } #endif diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index f81955934aebf..26fd1146dda6a 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -259,10 +259,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) -static __inline__ int +static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) { - int ret; + s64 ret; unsigned long flags; _atomic_spin_lock_irqsave(v, flags); diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index 0c705c3a55efc..67a33cc27ef27 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -8,7 +8,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) /* Non-atomic version */ static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int err = 0; - int uval; + u32 val; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) return -EFAULT; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - err = get_user(uval, uaddr); - if (err) return -EFAULT; - if (uval == oldval) - err = put_user(newval, uaddr); - if (err) return -EFAULT; - return uval; + if (get_user(val, uaddr)) + return -EFAULT; + if (val == oldval && put_user(newval, uaddr)) + return -EFAULT; + *uval = val; + return 0; } #endif /*__KERNEL__*/ diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 3eb82c2a5ec33..9af5fab2befc5 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -814,8 +814,11 @@ #define __NR_recvmmsg (__NR_Linux + 319) #define __NR_accept4 (__NR_Linux + 320) #define __NR_prlimit64 (__NR_Linux + 321) +#define __NR_fanotify_init (__NR_Linux + 322) +#define __NR_fanotify_mark (__NR_Linux + 323) +#define __NR_clock_adjtime (__NR_Linux + 324) -#define __NR_Linux_syscalls (__NR_prlimit64 + 1) +#define __NR_Linux_syscalls (__NR_clock_adjtime + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index d7d94b845dc2c..3948f1dd455aa 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -108,7 +108,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) int cpu_dest; /* timer and ipi have to always be received on all CPUs */ - if (CHECK_IRQ_PER_CPU(irq)) { + if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 88a0ad14a9c99..dc9a624623233 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -228,3 +228,11 @@ asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, ((loff_t)lenhi << 32) | lenlo); } + +asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi, + u32 mask_lo, int fd, + const char __user *pathname) +{ + return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo, + fd, pathname); +} diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 74867dfdabe57..759323b2469b7 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -420,6 +420,9 @@ ENTRY_COMP(recvmmsg) ENTRY_SAME(accept4) /* 320 */ ENTRY_SAME(prlimit64) + ENTRY_SAME(fanotify_init) + ENTRY_COMP(fanotify_mark) + ENTRY_COMP(clock_adjtime) /* Nothing yet */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index f4f4d700833af..7fd8aadd8a8ee 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -266,8 +266,10 @@ static void __init setup_bootmem(void) } memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); - for (i = 0; i < npmem_ranges; i++) + for (i = 0; i < npmem_ranges; i++) { + node_set_state(i, N_NORMAL_MEMORY); node_set_online(i); + } #endif /* diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 7c589ef81fb0e..c94e4a3fe2ef3 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h @@ -30,7 +30,7 @@ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "cr0", "memory") -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - int prev; + int ret = 0; + u32 prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ - cmpw 0,%0,%3\n\ +"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%1,%4\n\ bne- 3f\n" - PPC405_ERR77(0,%2) -"2: stwcx. %4,0,%2\n\ + PPC405_ERR77(0,%3) +"2: stwcx. %5,0,%3\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER "3: .section .fixup,\"ax\"\n\ -4: li %0,%5\n\ +4: li %0,%6\n\ b 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 3\n\ " PPC_LONG "1b,4b,2b,4b\n\ .previous" \ - : "=&r" (prev), "+m" (*uaddr) + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); - return prev; + *uval = prev; + return ret; } #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 125fc1ad665d0..7626fa78e1f8b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -880,6 +880,7 @@ #define PV_970 0x0039 #define PV_POWER5 0x003A #define PV_POWER5p 0x003B +#define PV_POWER7 0x003F #define PV_970FX 0x003C #define PV_630 0x0040 #define PV_630p 0x0041 diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d569e2aff18e..8ee4c7c739f89 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -170,7 +170,7 @@ static void crash_kexec_wait_realmode(int cpu) int i; msecs = 10000; - for (i=0; i < NR_CPUS && msecs > 0; i++) { + for (i=0; i < nr_cpu_ids && msecs > 0; i++) { if (i == cpu) continue; diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 206a321a71d35..e89df59cdc5a5 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp) * wait for the flag to change, indicating this kernel is going away but * the slave code for the next one is at addresses 0 to 100. * - * This is used by all slaves. + * This is used by all slaves, even those that did not find a matching + * paca in the secondary startup code. * * Physical (hardware) cpu id should be in r3. */ @@ -471,10 +472,6 @@ _GLOBAL(kexec_wait) 1: mflr r5 addi r5,r5,kexec_flag-1b - li r4,KEXEC_STATE_REAL_MODE - stb r4,PACAKEXECSTATE(r13) - SYNC - 99: HMT_LOW #ifdef CONFIG_KEXEC /* use no memory without kexec */ lwz r4,0(r5) @@ -499,11 +496,17 @@ kexec_flag: * * get phys id from paca * switch to real mode + * mark the paca as no longer used * join other cpus in kexec_wait(phys_id) */ _GLOBAL(kexec_smp_wait) lhz r3,PACAHWCPUID(r13) bl real_mode + + li r4,KEXEC_STATE_REAL_MODE + stb r4,PACAKEXECSTATE(r13) + SYNC + b .kexec_wait /* diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f4adf89d76141..10f0aadee95b9 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -203,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index ab6f6beadb572..26e56e346912e 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* @@ -1269,6 +1292,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1361,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } } diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 9065369982911..85012abc779e7 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -924,12 +924,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, if (data && !(data & DABR_TRANSLATION)) return -EIO; #ifdef CONFIG_HAVE_HW_BREAKPOINT + if (ptrace_get_breakpoints(task) < 0) + return -ESRCH; + bp = thread->ptrace_bps[0]; if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { if (bp) { unregister_hw_breakpoint(bp); thread->ptrace_bps[0] = NULL; } + ptrace_put_breakpoints(task); return 0; } if (bp) { @@ -939,9 +943,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, (DABR_DATA_WRITE | DABR_DATA_READ), &attr.bp_type); ret = modify_user_hw_breakpoint(bp, &attr); - if (ret) + if (ret) { + ptrace_put_breakpoints(task); return ret; + } thread->ptrace_bps[0] = bp; + ptrace_put_breakpoints(task); thread->dabr = data; return 0; } @@ -956,9 +963,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, ptrace_triggered, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; + ptrace_put_breakpoints(task); return PTR_ERR(bp); } + ptrace_put_breakpoints(task); + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ /* Move contents to the DABR register */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a466471..21f30cb68077f 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 09d31dbf43f99..02d54e1892d75 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { @@ -356,7 +359,7 @@ void account_system_vtime(struct task_struct *tsk) } get_paca()->user_time_scaled += user_scaled; - if (in_irq() || idle_task(smp_processor_id()) != tsk) { + if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { account_system_time(tsk, 0, delta, sys_scaled); if (stolen) account_steal_time(stolen); diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 80774092db77f..93636ca48d3d5 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra) return is_kernel; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + static void power4_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { @@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs, for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { val = classic_ctr_read(i); - if (val < 0) { + if (pmc_overflow(val)) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b04662849320..f2a024f37c690 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -63,11 +63,6 @@ static struct task_struct *spusched_task; static struct timer_list spusched_timer; static struct timer_list spuloadavg_timer; -/* - * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). - */ -#define NORMAL_PRIO 120 - /* * Frequency of the spu scheduler tick. By default we do one SPU scheduler * tick for every 10 CPU scheduler ticks. diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 415ca6d6b2739..04af5f48b4eb1 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np) return offset; } -static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) +static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) { /* Ok, this could be made a bit smarter, but let's be robust for now. We * always force a speed change to high speed before sleep, to make sure diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 5c5d02de49e9a..81cf36b691f1d 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -7,7 +7,7 @@ #include #include -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, - int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); + return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index d6b1ed0ec52b3..2d9ea11f919ad 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -83,8 +83,8 @@ struct uaccess_ops { size_t (*clear_user)(size_t, void __user *); size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(int __user *, int old, int new); + int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); }; extern struct uaccess_ops uaccess; diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S index 7e9d30d567b0a..ab0e041ac54cf 100644 --- a/arch/s390/kvm/sie64a.S +++ b/arch/s390/kvm/sie64a.S @@ -48,10 +48,10 @@ sie_irq_handler: tm __TI_flags+7(%r2),_TIF_EXIT_SIE jz 0f larl %r2,sie_exit # work pending, leave sie - stg %r2,__LC_RETURN_PSW+8 + stg %r2,SPI_PSW+8(0,%r15) br %r14 0: larl %r2,sie_reenter # re-enter with guest id - stg %r2,__LC_RETURN_PSW+8 + stg %r2,SPI_PSW+8(0,%r15) 1: br %r14 /* diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 126011df14f1e..1d2536cb630bc 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(int __user *, int, int); -extern int futex_atomic_op_std(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); +extern int futex_atomic_op_std(int, u32 __user *, int, int *); extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *); -extern int futex_atomic_op_pt(int, int __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(int __user *, int, int); +extern int futex_atomic_op_pt(int, u32 __user *, int, int *); +extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 404f2de296dca..74833831417fc 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -302,7 +302,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc" ); -static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) { int ret; @@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) return ret; } -static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; asm volatile("0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2:\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } -int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); if (!uaddr) { @@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) } get_page(virt_to_page(uaddr)); spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); + ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); put_page(virt_to_page(uaddr)); return ret; } diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index a6c4f7ed24a49..bb1a7eed42ce4 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c @@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc"); -int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) +int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) +int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { int ret; asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" - "1: lr %0,%1\n" + "1: la %0,0\n" "2: sacf 0\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); + *uval = oldval; return ret; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2c57806c0858e..0f900c811cb6a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -558,9 +558,9 @@ static void pfault_interrupt(unsigned int ext_int_code, * Get the token (= address of the task structure of the affected task). */ #ifdef CONFIG_64BIT - tsk = *(struct task_struct **) param64; + tsk = (struct task_struct *) param64; #else - tsk = *(struct task_struct **) param32; + tsk = (struct task_struct *) param32; #endif if (subcode & 0x0080) { diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index a9f16a7f9aeaf..6cb9f193a95ea 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h @@ -3,7 +3,7 @@ #include -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *oldval) { unsigned long flags; @@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, return ret; } -static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, - int oldval, int newval) +static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, + u32 __user *uaddr, + u32 oldval, u32 newval) { unsigned long flags; - int ret, prev = 0; + int ret; + u32 prev = 0; local_irq_save(flags); @@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, local_irq_restore(flags); - if (ret) - return ret; - - return prev; + *uval = prev; + return ret; } #endif /* __ASM_SH_FUTEX_IRQ_H */ diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 68256ec5fa35b..7be39a646fbd0 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -10,7 +10,7 @@ /* XXX: UP variants, fix for SH-4A and SMP.. */ #include -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); + return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index d49c2135fd480..ae95935d93cde 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile @@ -17,7 +17,5 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ obj-$(CONFIG_SH_ADC) += adc.o obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o -obj-$(CONFIG_SH_FPU) += fpu.o -obj-$(CONFIG_SH_FPU_EMU) += fpu.o -obj-y += irq/ init.o clock.o hwblk.o proc.o +obj-y += irq/ init.o clock.o fpu.o hwblk.o proc.o diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 90a15d29feebd..2130ca674e9bd 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) attr = bp->attr; attr.bp_addr = addr; + /* reenable breakpoint */ + attr.disabled = false; err = modify_user_hw_breakpoint(bp, &attr); if (unlikely(err)) return err; @@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request, tmp = 0; } else { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); tmp = ((unsigned long *)child->thread.xstate) [index >> 2]; @@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request, else if (addr >= offsetof(struct user, fpu) && addr < offsetof(struct user, u_fpvalid)) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); set_stopped_child_used_math(child); ((unsigned long *)child->thread.xstate) diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 4436eacddb153..c8f97649f354b 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request, else if ((addr >= offsetof(struct user, fpu)) && (addr < offsetof(struct user, u_fpvalid))) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); tmp = get_fpu_long(child, index); } else if (addr == offsetof(struct user, u_fpvalid)) { @@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request, else if ((addr >= offsetof(struct user, fpu)) && (addr < offsetof(struct user, u_fpvalid))) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); ret = put_fpu_long(child, index, data); } diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 47f95839dc695..444e7bea23bcb 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -30,7 +30,7 @@ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "memory") -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) return -EFAULT; if (unlikely((((unsigned long) uaddr) & 0x3UL))) return -EINVAL; @@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { + int ret = 0; + __asm__ __volatile__( - "\n1: casa [%3] %%asi, %2, %0\n" + "\n1: casa [%4] %%asi, %3, %1\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %0\n" " jmpl %0 + %%lo(2b), %%g0\n" - " mov %4, %0\n" + " mov %5, %0\n" " .previous\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" - : "=r" (newval) - : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) + : "+r" (ret), "=r" (newval) + : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) : "memory"); - return newval; + *uval = newval; + return ret; } #endif /* !(_SPARC64_FUTEX_H) */ diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 1504df8ddf70a..906ee3e24cc96 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1283,7 +1283,7 @@ linux_syscall_trace: .globl ret_from_fork ret_from_fork: call schedule_tail - mov %g3, %o0 + ld [%g3 + TI_TASK], %o0 b ret_sys_call ld [%sp + STACKFRAME_SZ + PT_I0], %o0 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 72509d0e34be2..6f01e8c83197e 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -333,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma) { struct iommu *iommu; - iopte_t *iopte; unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; - iopte = iommu->page_table + - ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index df39a0f0d27af..732b0bce6001c 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask) static irqreturn_t ldc_rx(int irq, void *dev_id) { struct ldc_channel *lp = dev_id; - unsigned long orig_state, hv_err, flags; + unsigned long orig_state, flags; unsigned int event_mask; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; - hv_err = sun4v_ldc_rx_get_state(lp->id, - &lp->rx_head, - &lp->rx_tail, - &lp->chan_state); + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); @@ -904,16 +908,20 @@ static irqreturn_t ldc_rx(int irq, void *dev_id) static irqreturn_t ldc_tx(int irq, void *dev_id) { struct ldc_channel *lp = dev_id; - unsigned long flags, hv_err, orig_state; + unsigned long flags, orig_state; unsigned int event_mask = 0; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; - hv_err = sun4v_ldc_tx_get_state(lp->id, - &lp->tx_head, - &lp->tx_tail, - &lp->chan_state); + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_tx_get_state(lp->id, + &lp->tx_head, + &lp->tx_tail, + &lp->chan_state); ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 4137579d9adcd..f255382b02b86 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) * humanoid. */ err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); + (void) err; } list_for_each_entry(child_bus, &bus->children, node) pci_bus_register_of_sysfs(child_bus); diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 6c7a33af3ba62..6e3874b644880 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -295,14 +295,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); - unsigned long ret; if (config_out_of_range(pbm, bus, devfn, where)) { /* Do nothing. */ } else { - ret = pci_sun4v_config_put(devhandle, - HV_PCI_DEVICE_BUILD(bus, device, func), - where, size, value); + /* We don't check for hypervisor errors here, but perhaps + * we should and influence our return value depending upon + * what kind of error is thrown. + */ + pci_sun4v_config_put(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value); } return PCIBIOS_SUCCESSFUL; } diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index efb896d687540..75dfeb60ef6eb 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) { - unsigned long msiqid; u64 val; val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); - msiqid = (val & MSI_MAP_EQNUM); val &= ~MSI_MAP_VALID; diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 445a47a2fb3dd..4620eb76aef4c 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; const char *chipset_name; - int is_pbm_a, err; + int err; switch (chip_type) { case PBM_CHIP_TYPE_TOMATILLO: @@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, */ regs = of_get_property(dp, "reg", NULL); - is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); - pbm->next = pci_pbm_root; pci_pbm_root = pbm; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 743344aa6d8a6..859abfd789373 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; struct iommu *iommu = pbm->iommu; - unsigned long num_tsb_entries, sz, tsbsize; + unsigned long num_tsb_entries, sz; u32 dma_mask, dma_offset; const u32 *vdma; @@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); num_tsb_entries = vdma[1] / IO_PAGE_SIZE; - tsbsize = num_tsb_entries * sizeof(iopte_t); dma_offset = vdma[0]; diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 7c2ced612b8f5..8ac23e6600804 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val) unsigned long ret; ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); - if (val != HV_EOK) + if (ret != HV_EOK) write_pcr(val); } diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9ccc812bc09e6..96ee50a806613 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -1086,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) asmlinkage void syscall_trace_leave(struct pt_regs *regs) { +#ifdef CONFIG_AUDITSYSCALL if (unlikely(current->audit_context)) { unsigned long tstate = regs->tstate; int result = AUDITSC_SUCCESS; @@ -1095,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(result, regs->u_regs[UREG_I0]); } - +#endif if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->u_regs[UREG_G1]); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 555a76d1f4a18..3e94a8c232388 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -189,7 +189,7 @@ static inline long get_delta (long *rt, long *master) void smp_synchronize_tick_client(void) { long i, delta, adj, adjust_latency = 0, done = 0; - unsigned long flags, rt, master_time_stamp, bound; + unsigned long flags, rt, master_time_stamp; #if DEBUG_TICK_SYNC struct { long rt; /* roundtrip time */ @@ -208,10 +208,8 @@ void smp_synchronize_tick_client(void) { for (i = 0; i < NUM_ROUNDS; i++) { delta = get_delta(&rt, &master_time_stamp); - if (delta == 0) { + if (delta == 0) done = 1; /* let's lock on to this... */ - bound = rt; - } if (!done) { if (i > 0) { @@ -933,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) void flush_dcache_page_all(struct mm_struct *mm, struct page *page) { void *pg_addr; - int this_cpu; u64 data0; if (tlb_type == hypervisor) return; - this_cpu = get_cpu(); + preempt_disable(); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); @@ -964,7 +961,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) } __local_flush_dcache_page(page); - put_cpu(); + preempt_enable(); } void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 1e9770936c3b8..1ed547bd850f8 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc) void show_stack(struct task_struct *tsk, unsigned long *_ksp) { - unsigned long fp, thread_base, ksp; + unsigned long fp, ksp; struct thread_info *tp; int count = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) flushw_all(); fp = ksp + STACK_BIAS; - thread_base = (unsigned long) tp; printk("Call Trace:\n"); do { diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S index be183fe41443f..1c8d33228b2a6 100644 --- a/arch/sparc/kernel/una_asm_64.S +++ b/arch/sparc/kernel/una_asm_64.S @@ -127,7 +127,7 @@ do_int_load: wr %o5, 0x0, %asi retl mov 0, %o0 - .size __do_int_load, .-__do_int_load + .size do_int_load, .-do_int_load .section __ex_table,"a" .word 4b, __retl_efault diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 5b836f5aea90b..b10ac4d62378a 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -240,11 +240,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, * only copy the information from the master page table, * nothing more. */ + code = SEGV_MAPERR; if (!ARCH_SUN4C && address >= TASK_SIZE) goto vmalloc_fault; - code = SEGV_MAPERR; - /* * If we're in an interrupt or have no user * context, we must not take the fault.. diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae57a..d03ec124a598b 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include #include -extern struct __get_user futex_set(int __user *v, int i); -extern struct __get_user futex_add(int __user *v, int n); -extern struct __get_user futex_or(int __user *v, int n); -extern struct __get_user futex_andn(int __user *v, int n); -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); +extern struct __get_user futex_set(u32 __user *v, int i); +extern struct __get_user futex_add(u32 __user *v, int n); +extern struct __get_user futex_or(u32 __user *v, int n); +extern struct __get_user futex_andn(u32 __user *v, int n); +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(int __user *v, int n); +extern struct __get_user futex_xor(u32 __user *v, int n); #else -static inline struct __get_user futex_xor(int __user *uaddr, int n) +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - return asm_ret.err ? asm_ret.err : asm_ret.val; + *uval = asm_ret.val; + return asm_ret.err; } #ifndef __tilegx__ diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index 804b28dd0328a..b1da91c1b200d 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile @@ -4,7 +4,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ - sys_call_table.o tls.o + sys_call_table.o tls.o atomic64_cx8_32.o obj-$(CONFIG_BINFMT_ELF) += elfcore.o diff --git a/arch/um/sys-i386/atomic64_cx8_32.S b/arch/um/sys-i386/atomic64_cx8_32.S new file mode 100644 index 0000000000000..1e901d3d4a956 --- /dev/null +++ b/arch/um/sys-i386/atomic64_cx8_32.S @@ -0,0 +1,225 @@ +/* + * atomic64_t for 586+ + * + * Copied from arch/x86/lib/atomic64_cx8_32.S + * + * Copyright © 2010 Luca Barbieri + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include + +.macro SAVE reg + pushl_cfi %\reg + CFI_REL_OFFSET \reg, 0 +.endm + +.macro RESTORE reg + popl_cfi %\reg + CFI_RESTORE \reg +.endm + +.macro read64 reg + movl %ebx, %eax + movl %ecx, %edx +/* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ + LOCK_PREFIX + cmpxchg8b (\reg) +.endm + +ENTRY(atomic64_read_cx8) + CFI_STARTPROC + + read64 %ecx + ret + CFI_ENDPROC +ENDPROC(atomic64_read_cx8) + +ENTRY(atomic64_set_cx8) + CFI_STARTPROC + +1: +/* we don't need LOCK_PREFIX since aligned 64-bit writes + * are atomic on 586 and newer */ + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_set_cx8) + +ENTRY(atomic64_xchg_cx8) + CFI_STARTPROC + + movl %ebx, %eax + movl %ecx, %edx +1: + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_xchg_cx8) + +.macro addsub_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx + SAVE esi + SAVE edi + + movl %eax, %esi + movl %edx, %edi + movl %ecx, %ebp + + read64 %ebp +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l %esi, %ebx + \insc\()l %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE edi + RESTORE esi + RESTORE ebx + RESTORE ebp + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +addsub_return add add adc +addsub_return sub sub sbb + +.macro incdec_return func ins insc +ENTRY(atomic64_\func\()_return_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + \ins\()l $1, %ebx + \insc\()l $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +10: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_\func\()_return_cx8) +.endm + +incdec_return inc add adc +incdec_return dec sub sbb + +ENTRY(atomic64_dec_if_positive_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + movl %eax, %ebx + movl %edx, %ecx + subl $1, %ebx + sbb $0, %ecx + js 2f + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + +2: + movl %ebx, %eax + movl %ecx, %edx + RESTORE ebx + ret + CFI_ENDPROC +ENDPROC(atomic64_dec_if_positive_cx8) + +ENTRY(atomic64_add_unless_cx8) + CFI_STARTPROC + SAVE ebp + SAVE ebx +/* these just push these two parameters on the stack */ + SAVE edi + SAVE esi + + movl %ecx, %ebp + movl %eax, %esi + movl %edx, %edi + + read64 %ebp +1: + cmpl %eax, 0(%esp) + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl %esi, %ebx + adcl %edi, %ecx + LOCK_PREFIX + cmpxchg8b (%ebp) + jne 1b + + movl $1, %eax +3: + addl $8, %esp + CFI_ADJUST_CFA_OFFSET -8 + RESTORE ebx + RESTORE ebp + ret +4: + cmpl %edx, 4(%esp) + jne 2b + xorl %eax, %eax + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_add_unless_cx8) + +ENTRY(atomic64_inc_not_zero_cx8) + CFI_STARTPROC + SAVE ebx + + read64 %esi +1: + testl %eax, %eax + je 4f +2: + movl %eax, %ebx + movl %edx, %ecx + addl $1, %ebx + adcl $0, %ecx + LOCK_PREFIX + cmpxchg8b (%esi) + jne 1b + + movl $1, %eax +3: + RESTORE ebx + ret +4: + testl %edx, %edx + jne 2b + jmp 3b + CFI_ENDPROC +ENDPROC(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 8fe2a4966b7af..4292df78c9da0 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1612,6 +1612,7 @@ _zero_cipher_left_encrypt: movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 + ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) sub $16, %r11 add %r13, %r11 @@ -1634,7 +1635,9 @@ _zero_cipher_left_encrypt: # GHASH computation for the last <16 byte block sub %r13, %r11 add $16, %r11 - PSHUFB_XMM %xmm10, %xmm1 + + movdqa SHUF_MASK(%rip), %xmm10 + PSHUFB_XMM %xmm10, %xmm0 # shuffle xmm0 back to output as ciphertext diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index e1e60c7d5813c..b375b2a7a14f8 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -828,9 +828,15 @@ static int rfc4106_init(struct crypto_tfm *tfm) struct cryptd_aead *cryptd_tfm; struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); + struct crypto_aead *cryptd_child; + struct aesni_rfc4106_gcm_ctx *child_ctx; cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); + + cryptd_child = cryptd_aead_child(cryptd_tfm); + child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); + memcpy(child_ctx, ctx, sizeof(*ctx)); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_aead.reqsize = sizeof(struct aead_request) + crypto_aead_reqsize(&cryptd_tfm->base); @@ -925,6 +931,9 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); + struct aesni_rfc4106_gcm_ctx *child_ctx = + aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_mem = NULL; if (key_len < 4) { @@ -968,6 +977,7 @@ static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); + memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; @@ -999,7 +1009,6 @@ static int rfc4106_encrypt(struct aead_request *req) int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); - struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = @@ -1008,6 +1017,7 @@ static int rfc4106_encrypt(struct aead_request *req) aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_encrypt(cryptd_req); } else { + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.encrypt(req); kernel_fpu_end(); @@ -1020,7 +1030,6 @@ static int rfc4106_decrypt(struct aead_request *req) int ret; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); - struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); if (!irq_fpu_usable()) { struct aead_request *cryptd_req = @@ -1029,6 +1038,7 @@ static int rfc4106_decrypt(struct aead_request *req) aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_aead_decrypt(cryptd_req); } else { + struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); kernel_fpu_begin(); ret = cryptd_child->base.crt_aead.decrypt(req); kernel_fpu_end(); diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 47a30ff8e5178..8ac7695c15f7e 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -78,6 +78,7 @@ #define APIC_DEST_LOGICAL 0x00800 #define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 +#define APIC_DM_FIXED_MASK 0x00700 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 #define APIC_DM_REMRD 0x00300 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 220e2ea08e80b..3b98f78ad63ff 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -125,7 +125,7 @@ #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */ +#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */ #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index 1f11ce44e956d..d09bb03653f02 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -37,7 +37,7 @@ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { + int ret = 0; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) /* Real i386 machines have no cmpxchg instruction */ @@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, return -ENOSYS; #endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" + asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" "2:\t.section .fixup, \"ax\"\n" - "3:\tmov %2, %0\n" + "3:\tmov %3, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "+r" (ret), "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "1" (oldval) : "memory" ); - return oldval; + *uval = oldval; + return ret; } #endif diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h index 43085bfc99c30..3e7349f91afa4 100644 --- a/arch/x86/include/asm/gart.h +++ b/arch/x86/include/asm/gart.h @@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) * Don't enable translation but enable GART IO and CPU accesses. * Also, set DISTLBWALKPRB since GART tables memory is UC. */ - ctl = DISTLBWALKPRB | order << 1; + ctl = order << 1; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); } @@ -83,7 +83,7 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr) /* Enable GART translation for this hammer. */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); - ctl |= GARTEN; + ctl |= GARTEN | DISTLBWALKPRB; ctl &= ~(DISGARTCPU | DISGARTIO); pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); } diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h index 38d87379e2705..192e5a02a8c98 100644 --- a/arch/x86/include/asm/idle.h +++ b/arch/x86/include/asm/idle.h @@ -1,13 +1,6 @@ #ifndef _ASM_X86_IDLE_H #define _ASM_X86_IDLE_H -#define IDLE_START 1 -#define IDLE_END 2 - -struct notifier_block; -void idle_notifier_register(struct notifier_block *n); -void idle_notifier_unregister(struct notifier_block *n); - #ifdef CONFIG_X86_64 void enter_idle(void); void exit_idle(void); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 43a18c77676d9..99b402c6a9158 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -92,11 +92,15 @@ #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_AMD64_MC0_MASK 0xc0010044 + #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + /* These are consecutive and not in the normal 4er MCE bank block */ #define MSR_IA32_MC0_CTL2 0x00000280 #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 94b979d1b58db..effff47a3c828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) static inline void pud_clear(pud_t *pudp) { - unsigned long pgd; - set_pud(pudp, __pud(0)); /* @@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... * - * Make sure the pud entry we're updating is within the - * current pgd to avoid unnecessary TLB flushes. + * Currently all places where pud_clear() is called either have + * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or + * pud_clear_bad()), so we don't need TLB flush here. */ - pgd = read_cr3(); - if (__pa(pudp) >= pgd && __pa(pudp) < - (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) - write_cr3(pgd); } #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index abd3e0ea762ac..99f0ad753f32c 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -42,7 +42,7 @@ * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: - * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) + * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) * * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 3e094af443c39..130f1eeee5fed 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -94,6 +94,8 @@ /* after this # consecutive successes, bump up the throttle if it was lowered */ #define COMPLETE_THRESHOLD 5 +#define UV_LB_SUBNODEID 0x10 + /* * number of entries in the destination side payload queue */ @@ -124,7 +126,7 @@ * The distribution specification (32 bytes) is interpreted as a 256-bit * distribution vector. Adjacent bits correspond to consecutive even numbered * nodeIDs. The result of adding the index of a given bit to the 15-bit - * 'base_dest_nodeid' field of the header corresponds to the + * 'base_dest_nasid' field of the header corresponds to the * destination nodeID associated with that specified bit. */ struct bau_target_uvhubmask { @@ -176,7 +178,7 @@ struct bau_msg_payload { struct bau_msg_header { unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ /* bits 5:0 */ - unsigned int base_dest_nodeid:15; /* nasid of the */ + unsigned int base_dest_nasid:15; /* nasid of the */ /* bits 20:6 */ /* first bit in uvhub map */ unsigned int command:8; /* message type */ /* bits 28:21 */ @@ -378,6 +380,10 @@ struct ptc_stats { unsigned long d_rcanceled; /* number of messages canceled by resets */ }; +struct hub_and_pnode { + short uvhub; + short pnode; +}; /* * one per-cpu; to locate the software tables */ @@ -399,10 +405,12 @@ struct bau_control { int baudisabled; int set_bau_off; short cpu; + short osnode; short uvhub_cpu; short uvhub; short cpus_in_socket; short cpus_in_uvhub; + short partition_base_pnode; unsigned short message_number; unsigned short uvhub_quiesce; short socket_acknowledge_count[DEST_Q_SIZE]; @@ -422,15 +430,16 @@ struct bau_control { int congested_period; cycles_t period_time; long period_requests; + struct hub_and_pnode *target_hub_and_pnode; }; static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) { return constant_test_bit(uvhub, &dstp->bits[0]); } -static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp) +static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp) { - __set_bit(uvhub, &dstp->bits[0]); + __set_bit(pnode, &dstp->bits[0]); } static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp, int nbits) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 7038b95d363f2..4db35544de738 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -620,7 +620,12 @@ static int __kprobes stop_machine_text_poke(void *data) flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + p->len); } - + /* + * Intel Archiecture Software Developer's Manual section 7.1.3 specifies + * that a core serializing instruction such as "cpuid" should be + * executed on _each_ core before the new instruction is made visible. + */ + sync_core(); return 0; } diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 5955a7800a966..f6a1c2395b51f 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -500,7 +500,7 @@ int __init gart_iommu_hole_init(void) * Don't enable translation yet but enable GART IO and CPU * accesses and set DISTLBWALKPRB since GART table memory is UC. */ - u32 ctl = DISTLBWALKPRB | aper_order << 1; + u32 ctl = aper_order << 1; bus = amd_nb_bus_dev_ranges[i].bus; dev_base = amd_nb_bus_dev_ranges[i].dev_base; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ca9e2a3545a9b..e43777835f6de 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -615,14 +615,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void) struct IO_APIC_route_entry **ioapic_entries; ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, - GFP_KERNEL); + GFP_ATOMIC); if (!ioapic_entries) return 0; for (apic = 0; apic < nr_ioapics; apic++) { ioapic_entries[apic] = kzalloc(sizeof(struct IO_APIC_route_entry) * - nr_ioapic_registers[apic], GFP_KERNEL); + nr_ioapic_registers[apic], GFP_ATOMIC); if (!ioapic_entries[apic]) goto nomem; } diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 0e4f24c2a746f..3bfa022359659 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -66,7 +66,7 @@ * 1.5: Fix segment register reloading (in case of bad segments saved * across BIOS call). * Stephen Rothwell - * 1.6: Cope with complier/assembler differences. + * 1.6: Cope with compiler/assembler differences. * Only try to turn off the first display device. * Fix OOPS at power off with no APM BIOS by Jan Echternach * @@ -227,6 +227,8 @@ #include #include #include +#include +#include #include #include @@ -975,20 +977,10 @@ static void apm_cpu_idle(void) static void apm_power_off(void) { - unsigned char po_bios_call[] = { - 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ - 0x8e, 0xd0, /* movw ax,ss */ - 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ - 0xb8, 0x07, 0x53, /* movw $0x5307,ax */ - 0xbb, 0x01, 0x00, /* movw $0x0001,bx */ - 0xb9, 0x03, 0x00, /* movw $0x0003,cx */ - 0xcd, 0x15 /* int $0x15 */ - }; - /* Some bioses don't like being called from CPU != 0 */ if (apm_info.realmode_power_off) { set_cpus_allowed_ptr(current, cpumask_of(0)); - machine_real_restart(po_bios_call, sizeof(po_bios_call)); + machine_real_restart(MRR_APM); } else { (void)set_system_power_state(APM_STATE_OFF); } @@ -1246,7 +1238,7 @@ static int suspend(int vetoable) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); - sysdev_suspend(PMSG_SUSPEND); + syscore_suspend(); local_irq_enable(); @@ -1264,7 +1256,7 @@ static int suspend(int vetoable) apm_error("suspend", err); err = (err == APM_SUCCESS) ? 0 : -EIO; - sysdev_resume(); + syscore_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); @@ -1288,7 +1280,7 @@ static void standby(void) dpm_suspend_noirq(PMSG_SUSPEND); local_irq_disable(); - sysdev_suspend(PMSG_SUSPEND); + syscore_suspend(); local_irq_enable(); err = set_system_power_state(APM_STATE_STANDBY); @@ -1296,7 +1288,7 @@ static void standby(void) apm_error("standby", err); local_irq_disable(); - sysdev_resume(); + syscore_resume(); local_irq_enable(); dpm_resume_noirq(PMSG_RESUME); @@ -2331,12 +2323,11 @@ static int __init apm_init(void) apm_info.disabled = 1; return -ENODEV; } - if (pm_flags & PM_ACPI) { + if (!acpi_disabled) { printk(KERN_NOTICE "apm: overridden by ACPI.\n"); apm_info.disabled = 1; return -ENODEV; } - pm_flags |= PM_APM; /* * Set up the long jump entry point to the APM BIOS, which is called @@ -2428,7 +2419,6 @@ static void __exit apm_exit(void) kthread_stop(kapmd_task); kapmd_task = NULL; } - pm_flags &= ~PM_APM; } module_init(apm_init); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb83c5a4..f7159604e50ad 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -594,6 +594,35 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } #endif + + /* + * Family 0x12 and above processors have APIC timer + * running in deep C states. + */ + if (c->x86 > 0x11) + set_cpu_cap(c, X86_FEATURE_ARAT); + + /* + * Disable GART TLB Walk Errors on Fam10h. We do this here + * because this is always needed when GART is enabled, even in a + * kernel which has no MCE support built in. + */ + if (c->x86 == 0x10) { + /* + * BIOS should disable GartTlbWlk Errors themself. If + * it doesn't do it here as suggested by the BKDG. + * + * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 + */ + u64 mask; + int err; + + err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); + if (err == 0) { + mask |= (1 << 10); + checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); + } + } } #ifdef CONFIG_X86_32 @@ -658,7 +687,7 @@ cpu_dev_register(amd_cpu_dev); */ const int amd_erratum_400[] = - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), + AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf), AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); EXPORT_SYMBOL_GPL(amd_erratum_400); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396bdc..a9c7d7d7733a5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - if (eax > 0) - c->x86_capability[9] = ebx; + c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index a2baafb2fe6d3..4e04e12743887 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -47,9 +47,6 @@ #include #include "mperf.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "acpi-cpufreq", msg) - MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_LICENSE("GPL"); @@ -233,7 +230,7 @@ static u32 get_cur_val(const struct cpumask *mask) cmd.mask = mask; drv_read(&cmd); - dprintk("get_cur_val = %u\n", cmd.val); + pr_debug("get_cur_val = %u\n", cmd.val); return cmd.val; } @@ -244,7 +241,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) unsigned int freq; unsigned int cached_freq; - dprintk("get_cur_freq_on_cpu (%d)\n", cpu); + pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -261,7 +258,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) data->resume = 1; } - dprintk("cur freq = %u\n", freq); + pr_debug("cur freq = %u\n", freq); return freq; } @@ -293,7 +290,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, unsigned int i; int result = 0; - dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); + pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); if (unlikely(data == NULL || data->acpi_data == NULL || data->freq_table == NULL)) { @@ -313,11 +310,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { if (unlikely(data->resume)) { - dprintk("Called after resume, resetting to P%d\n", + pr_debug("Called after resume, resetting to P%d\n", next_perf_state); data->resume = 0; } else { - dprintk("Already at target state (P%d)\n", + pr_debug("Already at target state (P%d)\n", next_perf_state); goto out; } @@ -357,7 +354,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, if (acpi_pstate_strict) { if (!check_freqs(cmd.mask, freqs.new, data)) { - dprintk("acpi_cpufreq_target failed (%d)\n", + pr_debug("acpi_cpufreq_target failed (%d)\n", policy->cpu); result = -EAGAIN; goto out; @@ -378,7 +375,7 @@ static int acpi_cpufreq_verify(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_verify\n"); + pr_debug("acpi_cpufreq_verify\n"); return cpufreq_frequency_table_verify(policy, data->freq_table); } @@ -433,11 +430,11 @@ static void free_acpi_perf_data(void) static int __init acpi_cpufreq_early_init(void) { unsigned int i; - dprintk("acpi_cpufreq_early_init\n"); + pr_debug("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); if (!acpi_perf_data) { - dprintk("Memory allocation error for acpi_perf_data.\n"); + pr_debug("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } for_each_possible_cpu(i) { @@ -519,7 +516,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) static int blacklisted; #endif - dprintk("acpi_cpufreq_cpu_init\n"); + pr_debug("acpi_cpufreq_cpu_init\n"); #ifdef CONFIG_SMP if (blacklisted) @@ -566,7 +563,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* capability check */ if (perf->state_count <= 1) { - dprintk("No P-States\n"); + pr_debug("No P-States\n"); result = -ENODEV; goto err_unreg; } @@ -578,11 +575,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: - dprintk("SYSTEM IO addr space\n"); + pr_debug("SYSTEM IO addr space\n"); data->cpu_feature = SYSTEM_IO_CAPABLE; break; case ACPI_ADR_SPACE_FIXED_HARDWARE: - dprintk("HARDWARE addr space\n"); + pr_debug("HARDWARE addr space\n"); if (!check_est_cpu(cpu)) { result = -ENODEV; goto err_unreg; @@ -590,7 +587,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; break; default: - dprintk("Unknown addr space %d\n", + pr_debug("Unknown addr space %d\n", (u32) (perf->control_register.space_id)); result = -ENODEV; goto err_unreg; @@ -661,9 +658,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (cpu_has(c, X86_FEATURE_APERFMPERF)) acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; - dprintk("CPU%u - ACPI performance management activated.\n", cpu); + pr_debug("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) - dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", + pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32) perf->states[i].core_frequency, (u32) perf->states[i].power, @@ -694,7 +691,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_cpu_exit\n"); + pr_debug("acpi_cpufreq_cpu_exit\n"); if (data) { cpufreq_frequency_table_put_attr(policy->cpu); @@ -712,7 +709,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) { struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); - dprintk("acpi_cpufreq_resume\n"); + pr_debug("acpi_cpufreq_resume\n"); data->resume = 1; @@ -743,7 +740,7 @@ static int __init acpi_cpufreq_init(void) if (acpi_disabled) return 0; - dprintk("acpi_cpufreq_init\n"); + pr_debug("acpi_cpufreq_init\n"); ret = acpi_cpufreq_early_init(); if (ret) @@ -758,7 +755,7 @@ static int __init acpi_cpufreq_init(void) static void __exit acpi_cpufreq_exit(void) { - dprintk("acpi_cpufreq_exit\n"); + pr_debug("acpi_cpufreq_exit\n"); cpufreq_unregister_driver(&acpi_cpufreq_driver); diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 141abebc4516a..7bac808804f3a 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c @@ -57,8 +57,6 @@ MODULE_PARM_DESC(min_fsb, "Minimum FSB to use, if not defined: current FSB - 50"); #define PFX "cpufreq-nforce2: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "cpufreq-nforce2", msg) /** * nforce2_calc_fsb - calculate FSB @@ -270,7 +268,7 @@ static int nforce2_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - dprintk("Old CPU frequency %d kHz, new %d kHz\n", + pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -282,7 +280,7 @@ static int nforce2_target(struct cpufreq_policy *policy, printk(KERN_ERR PFX "Changing FSB to %d failed\n", target_fsb); else - dprintk("Changed FSB successfully to %d\n", + pr_debug("Changed FSB successfully to %d\n", target_fsb); /* Enable IRQs */ diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index 32974cf84232f..ffe1f2c92ed3f 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c @@ -142,9 +142,6 @@ module_param(max_duration, int, 0444); #define POLICY_MIN_DIV 20 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "gx-suspmod", msg) - /** * we can detect a core multipiler from dir0_lsb * from GX1 datasheet p.56, @@ -191,7 +188,7 @@ static __init struct pci_dev *gx_detect_chipset(void) /* check if CPU is a MediaGX or a Geode. */ if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { - dprintk("error: no MediaGX/Geode processor found!\n"); + pr_debug("error: no MediaGX/Geode processor found!\n"); return NULL; } @@ -201,7 +198,7 @@ static __init struct pci_dev *gx_detect_chipset(void) return gx_pci; } - dprintk("error: no supported chipset found!\n"); + pr_debug("error: no supported chipset found!\n"); return NULL; } @@ -305,14 +302,14 @@ static void gx_set_cpuspeed(unsigned int khz) break; default: local_irq_restore(flags); - dprintk("fatal: try to set unknown chipset.\n"); + pr_debug("fatal: try to set unknown chipset.\n"); return; } } else { suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; - dprintk("suspend modulation disabled: cpu runs 100%% speed.\n"); + pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); } gx_write_byte(PCI_MODOFF, gx_params->off_duration); @@ -327,9 +324,9 @@ static void gx_set_cpuspeed(unsigned int khz) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); - dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); + pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); } /**************************************************************** @@ -428,8 +425,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); - dprintk("cpu max frequency is %d.\n", maxfreq); - dprintk("cpu current frequency is %dkHz.\n", curfreq); + pr_debug("cpu max frequency is %d.\n", maxfreq); + pr_debug("cpu current frequency is %dkHz.\n", curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; @@ -475,7 +472,7 @@ static int __init cpufreq_gx_init(void) if (max_duration > 0xff) max_duration = 0xff; - dprintk("geode suspend modulation available.\n"); + pr_debug("geode suspend modulation available.\n"); params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); if (params == NULL) diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index 03162dac6271d..7ef3b1b439ba9 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -77,9 +77,6 @@ static int scale_voltage; static int disable_acpi_c3; static int revid_errata; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "longhaul", msg) - /* Clock ratios multiplied by 10 */ static int mults[32]; @@ -87,7 +84,6 @@ static int eblcr[32]; static int longhaul_version; static struct cpufreq_frequency_table *longhaul_table; -#ifdef CONFIG_CPU_FREQ_DEBUG static char speedbuffer[8]; static char *print_speed(int speed) @@ -106,7 +102,6 @@ static char *print_speed(int speed) return speedbuffer; } -#endif static unsigned int calc_speed(int mult) @@ -275,7 +270,7 @@ static void longhaul_setstate(unsigned int table_index) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: preempt_disable(); @@ -460,12 +455,12 @@ static int __cpuinit longhaul_get_ranges(void) break; } - dprintk("MinMult:%d.%dx MaxMult:%d.%dx\n", + pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", minmult/10, minmult%10, maxmult/10, maxmult%10); highest_speed = calc_speed(maxmult); lowest_speed = calc_speed(minmult); - dprintk("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, print_speed(lowest_speed/1000), print_speed(highest_speed/1000)); diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index d9f51367666b0..34ea359b370eb 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c @@ -15,9 +15,6 @@ #include #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "longrun", msg) - static struct cpufreq_driver longrun_driver; /** @@ -40,14 +37,14 @@ static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) u32 msr_lo, msr_hi; rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); - dprintk("longrun flags are %x - %x\n", msr_lo, msr_hi); + pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); if (msr_lo & 0x01) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); - dprintk("longrun ctrl is %x - %x\n", msr_lo, msr_hi); + pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); msr_lo &= 0x0000007F; msr_hi &= 0x0000007F; @@ -150,7 +147,7 @@ static unsigned int longrun_get(unsigned int cpu) return 0; cpuid(0x80860007, &eax, &ebx, &ecx, &edx); - dprintk("cpuid eax is %u\n", eax); + pr_debug("cpuid eax is %u\n", eax); return eax * 1000; } @@ -196,7 +193,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); *high_freq = msr_lo * 1000; /* to kHz */ - dprintk("longrun table interface told %u - %u kHz\n", + pr_debug("longrun table interface told %u - %u kHz\n", *low_freq, *high_freq); if (*low_freq > *high_freq) @@ -207,7 +204,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* set the upper border to the value determined during TSC init */ *high_freq = (cpu_khz / 1000); *high_freq = *high_freq * 1000; - dprintk("high frequency is %u kHz\n", *high_freq); + pr_debug("high frequency is %u kHz\n", *high_freq); /* get current borders */ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); @@ -233,7 +230,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, /* restore values */ wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); } - dprintk("percentage is %u %%, freq is %u MHz\n", ecx, eax); + pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) * eqals @@ -249,7 +246,7 @@ static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, edx = ((eax - ebx) * 100) / (100 - ecx); *low_freq = edx * 1000; /* back to kHz */ - dprintk("low frequency is %u kHz\n", *low_freq); + pr_debug("low frequency is %u kHz\n", *low_freq); if (*low_freq > *high_freq) *low_freq = *high_freq; diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 52c93648e492f..6be3e0760c26f 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -35,8 +35,6 @@ #include "speedstep-lib.h" #define PFX "p4-clockmod: " -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "p4-clockmod", msg) /* * Duty Cycle (3bits), note DC_DISABLE is not specified in @@ -66,7 +64,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); if (l & 0x01) - dprintk("CPU#%d currently thermal throttled\n", cpu); + pr_debug("CPU#%d currently thermal throttled\n", cpu); if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) @@ -74,10 +72,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); if (newstate == DC_DISABLE) { - dprintk("CPU#%d disabling modulation\n", cpu); + pr_debug("CPU#%d disabling modulation\n", cpu); wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); } else { - dprintk("CPU#%d setting duty cycle to %d%%\n", + pr_debug("CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10)); /* bits 63 - 5 : reserved * bit 4 : enable/disable @@ -217,7 +215,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f11: case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; - dprintk("has errata -- disabling low frequencies\n"); + pr_debug("has errata -- disabling low frequencies\n"); } if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4a5a42b842adf..f34ac52b9b8d7 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c @@ -39,7 +39,7 @@ #include -#define PCC_VERSION "1.00.00" +#define PCC_VERSION "1.10.00" #define POLL_LOOPS 300 #define CMD_COMPLETE 0x1 @@ -48,9 +48,6 @@ #define BUF_SZ 4 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "pcc-cpufreq", msg) - struct pcc_register_resource { u8 descriptor; u16 length; @@ -102,7 +99,7 @@ static struct acpi_generic_address doorbell; static u64 doorbell_preserve; static u64 doorbell_write; -static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f, +static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; struct pcc_cpu { @@ -152,7 +149,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) spin_lock(&pcc_lock); - dprintk("get: get_freq for CPU %d\n", cpu); + pr_debug("get: get_freq for CPU %d\n", cpu); pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); input_buffer = 0x1; @@ -170,7 +167,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - dprintk("get: FAILED: for CPU %d, status is %d\n", + pr_debug("get: FAILED: for CPU %d, status is %d\n", cpu, status); goto cmd_incomplete; } @@ -178,14 +175,14 @@ static unsigned int pcc_get_freq(unsigned int cpu) curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) / 100) * 1000); - dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is " - "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n", + pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " + "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), output_buffer, curr_freq); freq_limit = (output_buffer >> 8) & 0xff; if (freq_limit != 0xff) { - dprintk("get: frequency for cpu %d is being temporarily" + pr_debug("get: frequency for cpu %d is being temporarily" " capped at %d\n", cpu, curr_freq); } @@ -212,8 +209,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); - dprintk("target: CPU %d should go to target freq: %d " - "(virtual) input_offset is 0x%x\n", + pr_debug("target: CPU %d should go to target freq: %d " + "(virtual) input_offset is 0x%p\n", cpu, target_freq, (pcch_virt_addr + pcc_cpu_data->input_offset)); @@ -234,14 +231,14 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, status = ioread16(&pcch_hdr->status); if (status != CMD_COMPLETE) { - dprintk("target: FAILED for cpu %d, with status: 0x%x\n", + pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", cpu, status); goto cmd_incomplete; } iowrite16(0, &pcch_hdr->status); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - dprintk("target: was SUCCESSFUL for cpu %d\n", cpu); + pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); spin_unlock(&pcc_lock); return 0; @@ -293,7 +290,7 @@ static int pcc_get_offset(int cpu) memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); - dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data " + pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); out_free: @@ -412,7 +409,7 @@ static int __init pcc_cpufreq_probe(void) if (ACPI_SUCCESS(status)) { ret = pcc_cpufreq_do_osc(&osc_handle); if (ret) - dprintk("probe: _OSC evaluation did not succeed\n"); + pr_debug("probe: _OSC evaluation did not succeed\n"); /* Firmware's use of _OSC is optional */ ret = 0; } @@ -435,7 +432,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; - dprintk("probe: mem_resource descriptor: 0x%x," + pr_debug("probe: mem_resource descriptor: 0x%x," " length: %d, space_id: %d, resource_usage: %d," " type_specific: %d, granularity: 0x%llx," " minimum: 0x%llx, maximum: 0x%llx," @@ -455,13 +452,13 @@ static int __init pcc_cpufreq_probe(void) pcch_virt_addr = ioremap_nocache(mem_resource->minimum, mem_resource->address_length); if (pcch_virt_addr == NULL) { - dprintk("probe: could not map shared mem region\n"); + pr_debug("probe: could not map shared mem region\n"); goto out_free; } pcch_hdr = pcch_virt_addr; - dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); - dprintk("probe: PCCH header is at physical address: 0x%llx," + pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); + pr_debug("probe: PCCH header is at physical address: 0x%llx," " signature: 0x%x, length: %d bytes, major: %d, minor: %d," " supported features: 0x%x, command field: 0x%x," " status field: 0x%x, nominal latency: %d us\n", @@ -471,7 +468,7 @@ static int __init pcc_cpufreq_probe(void) ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), ioread32(&pcch_hdr->latency)); - dprintk("probe: min time between commands: %d us," + pr_debug("probe: min time between commands: %d us," " max time between commands: %d us," " nominal CPU frequency: %d MHz," " minimum CPU frequency: %d MHz," @@ -496,7 +493,7 @@ static int __init pcc_cpufreq_probe(void) doorbell.access_width = 64; doorbell.address = reg_resource->address; - dprintk("probe: doorbell: space_id is %d, bit_width is %d, " + pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " "bit_offset is %d, access_width is %d, address is 0x%llx\n", doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, doorbell.access_width, reg_resource->address); @@ -517,7 +514,7 @@ static int __init pcc_cpufreq_probe(void) doorbell_write = member->integer.value; - dprintk("probe: doorbell_preserve: 0x%llx," + pr_debug("probe: doorbell_preserve: 0x%llx," " doorbell_write: 0x%llx\n", doorbell_preserve, doorbell_write); @@ -552,7 +549,7 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) result = pcc_get_offset(cpu); if (result) { - dprintk("init: PCCP evaluation failed\n"); + pr_debug("init: PCCP evaluation failed\n"); goto out; } @@ -563,12 +560,12 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cur = pcc_get_freq(cpu); if (!policy->cur) { - dprintk("init: Unable to get current CPU frequency\n"); + pr_debug("init: Unable to get current CPU frequency\n"); result = -EINVAL; goto out; } - dprintk("init: policy->max is %d, policy->min is %d\n", + pr_debug("init: policy->max is %d, policy->min is %d\n", policy->max, policy->min); out: return result; @@ -599,7 +596,7 @@ static int __init pcc_cpufreq_init(void) ret = pcc_cpufreq_probe(); if (ret) { - dprintk("pcc_cpufreq_init: PCCH evaluation failed\n"); + pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); return ret; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 4a45fd6e41ba9..d71d9f3723590 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -68,7 +68,6 @@ union powernow_acpi_control_t { }; #endif -#ifdef CONFIG_CPU_FREQ_DEBUG /* divide by 1000 to get VCore voltage in V. */ static const int mobile_vid_table[32] = { 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, @@ -76,7 +75,6 @@ static const int mobile_vid_table[32] = { 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975, 950, 925, 0, }; -#endif /* divide by 10 to get FID. */ static const int fid_codes[32] = { @@ -103,9 +101,6 @@ static unsigned int fsb; static unsigned int latency; static char have_a0; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "powernow-k7", msg) - static int check_fsb(unsigned int fsbspeed) { int delta; @@ -209,7 +204,7 @@ static int get_ranges(unsigned char *pst) vid = *pst++; powernow_table[j].index |= (vid << 8); /* upper 8 bits */ - dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed/1000, vid, mobile_vid_table[vid]/1000, @@ -367,7 +362,7 @@ static int powernow_acpi_init(void) unsigned int speed, speed_mhz; pc.val = (unsigned long) state->control; - dprintk("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", i, (u32) state->core_frequency, (u32) state->power, @@ -401,7 +396,7 @@ static int powernow_acpi_init(void) invalidate_entry(i); } - dprintk(" FID: 0x%x (%d.%dx [%dMHz]) " + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, fid_codes[fid] % 10, speed_mhz, vid, mobile_vid_table[vid]/1000, @@ -409,7 +404,7 @@ static int powernow_acpi_init(void) if (state->core_frequency != speed_mhz) { state->core_frequency = speed_mhz; - dprintk(" Corrected ACPI frequency to %d\n", + pr_debug(" Corrected ACPI frequency to %d\n", speed_mhz); } @@ -453,8 +448,8 @@ static int powernow_acpi_init(void) static void print_pst_entry(struct pst_s *pst, unsigned int j) { - dprintk("PST:%d (@%p)\n", j, pst); - dprintk(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + pr_debug("PST:%d (@%p)\n", j, pst); + pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); } @@ -474,20 +469,20 @@ static int powernow_decode_bios(int maxfid, int startvid) p = phys_to_virt(i); if (memcmp(p, "AMDK7PNOW!", 10) == 0) { - dprintk("Found PSB header at %p\n", p); + pr_debug("Found PSB header at %p\n", p); psb = (struct psb_s *) p; - dprintk("Table version: 0x%x\n", psb->tableversion); + pr_debug("Table version: 0x%x\n", psb->tableversion); if (psb->tableversion != 0x12) { printk(KERN_INFO PFX "Sorry, only v1.2 tables" " supported right now\n"); return -ENODEV; } - dprintk("Flags: 0x%x\n", psb->flags); + pr_debug("Flags: 0x%x\n", psb->flags); if ((psb->flags & 1) == 0) - dprintk("Mobile voltage regulator\n"); + pr_debug("Mobile voltage regulator\n"); else - dprintk("Desktop voltage regulator\n"); + pr_debug("Desktop voltage regulator\n"); latency = psb->settlingtime; if (latency < 100) { @@ -497,9 +492,9 @@ static int powernow_decode_bios(int maxfid, int startvid) "Correcting.\n", latency); latency = 100; } - dprintk("Settling Time: %d microseconds.\n", + pr_debug("Settling Time: %d microseconds.\n", psb->settlingtime); - dprintk("Has %d PST tables. (Only dumping ones " + pr_debug("Has %d PST tables. (Only dumping ones " "relevant to this CPU).\n", psb->numpst); @@ -650,7 +645,7 @@ static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) printk(KERN_WARNING PFX "can not determine bus frequency\n"); return -EINVAL; } - dprintk("FSB: %3dMHz\n", fsb/1000); + pr_debug("FSB: %3dMHz\n", fsb/1000); if (dmi_check_system(powernow_dmi_table) || acpi_force) { printk(KERN_INFO PFX "PSB/PST known to be broken. " diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c567dec854f69..19db92af94986 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -139,7 +139,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) } do { if (i++ > 10000) { - dprintk("detected change pending stuck\n"); + pr_debug("detected change pending stuck\n"); return 1; } rdmsr(MSR_FIDVID_STATUS, lo, hi); @@ -176,7 +176,7 @@ static void fidvid_msr_init(void) fid = lo & MSR_S_LO_CURRENT_FID; lo = fid | (vid << MSR_C_LO_VID_SHIFT); hi = MSR_C_HI_STP_GNT_BENIGN; - dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); + pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); wrmsr(MSR_FIDVID_CTL, lo, hi); } @@ -196,7 +196,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) lo |= (data->currvid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", + pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); do { @@ -244,7 +244,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) lo |= (vid << MSR_C_LO_VID_SHIFT); lo |= MSR_C_LO_INIT_FID_VID; - dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", + pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); do { @@ -325,7 +325,7 @@ static int transition_fid_vid(struct powernow_k8_data *data, return 1; } - dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", + pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); return 0; @@ -339,7 +339,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 maxvid, lo, rvomult = 1; - dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " "reqvid 0x%x, rvo 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqvid, data->rvo); @@ -349,12 +349,12 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, rvosteps *= rvomult; rdmsr(MSR_FIDVID_STATUS, lo, maxvid); maxvid = 0x1f & (maxvid >> 16); - dprintk("ph1 maxvid=0x%x\n", maxvid); + pr_debug("ph1 maxvid=0x%x\n", maxvid); if (reqvid < maxvid) /* lower numbers are higher voltages */ reqvid = maxvid; while (data->currvid > reqvid) { - dprintk("ph1: curr 0x%x, req vid 0x%x\n", + pr_debug("ph1: curr 0x%x, req vid 0x%x\n", data->currvid, reqvid); if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) return 1; @@ -365,7 +365,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, if (data->currvid == maxvid) { rvosteps = 0; } else { - dprintk("ph1: changing vid for rvo, req 0x%x\n", + pr_debug("ph1: changing vid for rvo, req 0x%x\n", data->currvid - 1); if (decrease_vid_code_by_step(data, data->currvid-1, 1)) return 1; @@ -382,7 +382,7 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, return 1; } - dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -400,7 +400,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 0; } - dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " "reqfid 0x%x\n", smp_processor_id(), data->currfid, data->currvid, reqfid); @@ -457,7 +457,7 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) return 1; } - dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -470,7 +470,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 savefid = data->currfid; u32 savereqvid = reqvid; - dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", smp_processor_id(), data->currfid, data->currvid); @@ -498,17 +498,17 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 1; if (savereqvid != data->currvid) { - dprintk("ph3 failed, currvid 0x%x\n", data->currvid); + pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); return 1; } if (savefid != data->currfid) { - dprintk("ph3 failed, currfid changed 0x%x\n", + pr_debug("ph3 failed, currfid changed 0x%x\n", data->currfid); return 1; } - dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n", + pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", data->currfid, data->currvid); return 0; @@ -708,7 +708,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, return -EIO; } - dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); + pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); data->powernow_table = powernow_table; if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) print_basics(data); @@ -718,7 +718,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, (pst[j].vid == data->currvid)) return 0; - dprintk("currfid/vid do not match PST, ignoring\n"); + pr_debug("currfid/vid do not match PST, ignoring\n"); return 0; } @@ -740,36 +740,36 @@ static int find_psb_table(struct powernow_k8_data *data) if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) continue; - dprintk("found PSB header at 0x%p\n", psb); + pr_debug("found PSB header at 0x%p\n", psb); - dprintk("table vers: 0x%x\n", psb->tableversion); + pr_debug("table vers: 0x%x\n", psb->tableversion); if (psb->tableversion != PSB_VERSION_1_4) { printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); return -ENODEV; } - dprintk("flags: 0x%x\n", psb->flags1); + pr_debug("flags: 0x%x\n", psb->flags1); if (psb->flags1) { printk(KERN_ERR FW_BUG PFX "unknown flags\n"); return -ENODEV; } data->vstable = psb->vstable; - dprintk("voltage stabilization time: %d(*20us)\n", + pr_debug("voltage stabilization time: %d(*20us)\n", data->vstable); - dprintk("flags2: 0x%x\n", psb->flags2); + pr_debug("flags2: 0x%x\n", psb->flags2); data->rvo = psb->flags2 & 3; data->irt = ((psb->flags2) >> 2) & 3; mvs = ((psb->flags2) >> 4) & 3; data->vidmvs = 1 << mvs; data->batps = ((psb->flags2) >> 6) & 3; - dprintk("ramp voltage offset: %d\n", data->rvo); - dprintk("isochronous relief time: %d\n", data->irt); - dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); + pr_debug("ramp voltage offset: %d\n", data->rvo); + pr_debug("isochronous relief time: %d\n", data->irt); + pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); - dprintk("numpst: 0x%x\n", psb->num_tables); + pr_debug("numpst: 0x%x\n", psb->num_tables); cpst = psb->num_tables; if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0)) { @@ -784,13 +784,13 @@ static int find_psb_table(struct powernow_k8_data *data) } data->plllock = psb->plllocktime; - dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); - dprintk("maxfid: 0x%x\n", psb->maxfid); - dprintk("maxvid: 0x%x\n", psb->maxvid); + pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); + pr_debug("maxfid: 0x%x\n", psb->maxfid); + pr_debug("maxvid: 0x%x\n", psb->maxvid); maxvid = psb->maxvid; data->numps = psb->numps; - dprintk("numpstates: 0x%x\n", data->numps); + pr_debug("numpstates: 0x%x\n", data->numps); return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid); } @@ -835,13 +835,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) u64 control, status; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { - dprintk("register performance failed: bad ACPI data\n"); + pr_debug("register performance failed: bad ACPI data\n"); return -EIO; } /* verify the data contained in the ACPI structures */ if (data->acpi_data.state_count <= 1) { - dprintk("No ACPI P-States\n"); + pr_debug("No ACPI P-States\n"); goto err_out; } @@ -850,7 +850,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { - dprintk("Invalid control/status registers (%x - %x)\n", + pr_debug("Invalid control/status registers (%llx - %llx)\n", control, status); goto err_out; } @@ -859,7 +859,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1)), GFP_KERNEL); if (!powernow_table) { - dprintk("powernow_table memory alloc failure\n"); + pr_debug("powernow_table memory alloc failure\n"); goto err_out; } @@ -929,7 +929,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, } rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); if (!(hi & HW_PSTATE_VALID_MASK)) { - dprintk("invalid pstate %d, ignoring\n", index); + pr_debug("invalid pstate %d, ignoring\n", index); invalidate_entry(powernow_table, i); continue; } @@ -969,7 +969,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, vid = (control >> VID_SHIFT) & VID_MASK; } - dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); + pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); index = fid | (vid<<8); powernow_table[i].index = index; @@ -979,7 +979,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify frequency is OK */ if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { - dprintk("invalid freq %u kHz, ignoring\n", freq); + pr_debug("invalid freq %u kHz, ignoring\n", freq); invalidate_entry(powernow_table, i); continue; } @@ -987,7 +987,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, /* verify voltage is OK - * BIOSs are using "off" to indicate invalid */ if (vid == VID_OFF) { - dprintk("invalid vid %u, ignoring\n", vid); + pr_debug("invalid vid %u, ignoring\n", vid); invalidate_entry(powernow_table, i); continue; } @@ -1048,7 +1048,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); /* fid/vid correctness check for k8 */ /* fid are the lower 8 bits of the index we stored into @@ -1058,18 +1058,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, fid = data->powernow_table[index].index & 0xFF; vid = (data->powernow_table[index].index & 0xFF00) >> 8; - dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); + pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); if (query_current_values_with_pending_wait(data)) return 1; if ((data->currvid == vid) && (data->currfid == fid)) { - dprintk("target matches current values (fid 0x%x, vid 0x%x)\n", + pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", fid, vid); return 0; } - dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", + pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", smp_processor_id(), fid, vid); freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); @@ -1097,7 +1097,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, int res, i; struct cpufreq_freqs freqs; - dprintk("cpu %d transition to index %u\n", smp_processor_id(), index); + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); /* get MSR index for hardware pstate transition */ pstate = index & HW_PSTATE_MASK; @@ -1157,14 +1157,14 @@ static int powernowk8_target(struct cpufreq_policy *pol, goto err_out; } - dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", + pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) goto err_out; if (cpu_family != CPU_HW_PSTATE) { - dprintk("targ: curr fid 0x%x, vid 0x%x\n", + pr_debug("targ: curr fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); if ((checkvid != data->currvid) || @@ -1320,7 +1320,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->currpstate); else pol->cur = find_khz_freq_from_fid(data->currfid); - dprintk("policy current frequency %d kHz\n", pol->cur); + pr_debug("policy current frequency %d kHz\n", pol->cur); /* min/max the cpu is capable of */ if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { @@ -1338,10 +1338,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); if (cpu_family == CPU_HW_PSTATE) - dprintk("cpu_init done, current pstate 0x%x\n", + pr_debug("cpu_init done, current pstate 0x%x\n", data->currpstate); else - dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n", + pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); per_cpu(powernow_data, pol->cpu) = data; @@ -1587,7 +1587,7 @@ static int __cpuinit powernowk8_init(void) /* driver entry point for term */ static void __exit powernowk8_exit(void) { - dprintk("exit\n"); + pr_debug("exit\n"); if (boot_cpu_has(X86_FEATURE_CPB)) { msrs_free(msrs); diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index df3529b1c02d7..3744d26cdc2b3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -211,8 +211,6 @@ struct pst_s { u8 vid; }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) - static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid, u32 regfid); static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c index 435a996a613a6..1e205e6b1727e 100644 --- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c +++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c @@ -29,8 +29,6 @@ static __u8 __iomem *cpuctl; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "sc520_freq", msg) #define PFX "sc520_freq: " static struct cpufreq_frequency_table sc520_freq_table[] = { @@ -66,7 +64,7 @@ static void sc520_freq_set_cpu_state(unsigned int state) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - dprintk("attempting to set frequency to %i kHz\n", + pr_debug("attempting to set frequency to %i kHz\n", sc520_freq_table[state].frequency); local_irq_disable(); @@ -161,7 +159,7 @@ static int __init sc520_freq_init(void) /* Test if we have the right hardware */ if (c->x86_vendor != X86_VENDOR_AMD || c->x86 != 4 || c->x86_model != 9) { - dprintk("no Elan SC520 processor found!\n"); + pr_debug("no Elan SC520 processor found!\n"); return -ENODEV; } cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 9b1ff37de46ae..6ea3455def216 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -29,9 +29,6 @@ #define PFX "speedstep-centrino: " #define MAINTAINER "cpufreq@vger.kernel.org" -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) - #define INTEL_MSR_RANGE (0xffff) struct cpu_id @@ -244,7 +241,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->cpu_id == NULL) { /* No match at all */ - dprintk("no support for CPU model \"%s\": " + pr_debug("no support for CPU model \"%s\": " "send /proc/cpuinfo to " MAINTAINER "\n", cpu->x86_model_id); return -ENOENT; @@ -252,15 +249,15 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->op_points == NULL) { /* Matched a non-match */ - dprintk("no table support for CPU model \"%s\"\n", + pr_debug("no table support for CPU model \"%s\"\n", cpu->x86_model_id); - dprintk("try using the acpi-cpufreq driver\n"); + pr_debug("try using the acpi-cpufreq driver\n"); return -ENOENT; } per_cpu(centrino_model, policy->cpu) = model; - dprintk("found \"%s\": max frequency: %dkHz\n", + pr_debug("found \"%s\": max frequency: %dkHz\n", model->model_name, model->max_freq); return 0; @@ -369,7 +366,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; if (!per_cpu(centrino_cpu, policy->cpu)) { - dprintk("found unsupported CPU with " + pr_debug("found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " MAINTAINER "\n"); return -ENODEV; @@ -385,7 +382,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; - dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); + pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); wrmsr(MSR_IA32_MISC_ENABLE, l, h); /* check to see if it stuck */ @@ -402,7 +399,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) /* 10uS transition latency */ policy->cur = freq; - dprintk("centrino_cpu_init: cur=%dkHz\n", policy->cur); + pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); ret = cpufreq_frequency_table_cpuinfo(policy, per_cpu(centrino_model, policy->cpu)->op_points); @@ -498,7 +495,7 @@ static int centrino_target (struct cpufreq_policy *policy, good_cpu = j; if (good_cpu >= nr_cpu_ids) { - dprintk("couldn't limit to CPUs in this domain\n"); + pr_debug("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { /* We haven't started the transition yet. */ @@ -512,7 +509,7 @@ static int centrino_target (struct cpufreq_policy *policy, if (first_cpu) { rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); if (msr == (oldmsr & 0xffff)) { - dprintk("no change needed - msr was and needs " + pr_debug("no change needed - msr was and needs " "to be %x\n", oldmsr); retval = 0; goto out; @@ -521,7 +518,7 @@ static int centrino_target (struct cpufreq_policy *policy, freqs.old = extract_clock(oldmsr, cpu, 0); freqs.new = extract_clock(msr, cpu, 0); - dprintk("target=%dkHz old=%d new=%d msr=%04x\n", + pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); for_each_cpu(k, policy->cpus) { diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 561758e951802..a748ce782fee7 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -53,10 +53,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { }; -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-ich", msg) - - /** * speedstep_find_register - read the PMBASE address * @@ -80,7 +76,7 @@ static int speedstep_find_register(void) return -ENODEV; } - dprintk("pmbase is 0x%x\n", pmbase); + pr_debug("pmbase is 0x%x\n", pmbase); return 0; } @@ -106,13 +102,13 @@ static void speedstep_set_state(unsigned int state) /* read state */ value = inb(pmbase + 0x50); - dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); /* write new state */ value &= 0xFE; value |= state; - dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); /* Disable bus master arbitration */ pm2_blk = inb(pmbase + 0x20); @@ -132,10 +128,10 @@ static void speedstep_set_state(unsigned int state) /* Enable IRQs */ local_irq_restore(flags); - dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); if (state == (value & 0x1)) - dprintk("change to %u MHz succeeded\n", + pr_debug("change to %u MHz succeeded\n", speedstep_get_frequency(speedstep_processor) / 1000); else printk(KERN_ERR "cpufreq: change failed - I/O error\n"); @@ -165,7 +161,7 @@ static int speedstep_activate(void) pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); if (!(value & 0x08)) { value |= 0x08; - dprintk("activating SpeedStep (TM) registers\n"); + pr_debug("activating SpeedStep (TM) registers\n"); pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); } @@ -218,7 +214,7 @@ static unsigned int speedstep_detect_chipset(void) return 2; /* 2-M */ if (hostbridge->revision < 5) { - dprintk("hostbridge does not support speedstep\n"); + pr_debug("hostbridge does not support speedstep\n"); speedstep_chipset_dev = NULL; pci_dev_put(hostbridge); return 0; @@ -246,7 +242,7 @@ static unsigned int speedstep_get(unsigned int cpu) if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); - dprintk("detected %u kHz as current frequency\n", speed); + pr_debug("detected %u kHz as current frequency\n", speed); return speed; } @@ -276,7 +272,7 @@ static int speedstep_target(struct cpufreq_policy *policy, freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; - dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); + pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); /* no transition necessary */ if (freqs.old == freqs.new) @@ -351,7 +347,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (!speed) return -EIO; - dprintk("currently at %s speed setting - %i MHz\n", + pr_debug("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -405,14 +401,14 @@ static int __init speedstep_init(void) /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { - dprintk("Intel(R) SpeedStep(TM) capable processor " + pr_debug("Intel(R) SpeedStep(TM) capable processor " "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { - dprintk("Intel(R) SpeedStep(TM) for this chipset not " + pr_debug("Intel(R) SpeedStep(TM) for this chipset not " "(yet) available.\n"); return -ENODEV; } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index a94ec6be69fa5..8af2d2fd9d511 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -18,9 +18,6 @@ #include #include "speedstep-lib.h" -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-lib", msg) - #define PFX "speedstep-lib: " #ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK @@ -75,7 +72,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* read MSR 0x2a - we only need the low 32 bits */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = msr_lo; /* decode the FSB */ @@ -89,7 +86,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) /* decode the multiplier */ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { - dprintk("workaround for early PIIIs\n"); + pr_debug("workaround for early PIIIs\n"); msr_lo &= 0x03c00000; } else msr_lo &= 0x0bc00000; @@ -100,7 +97,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) j++; } - dprintk("speed is %u\n", + pr_debug("speed is %u\n", (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; @@ -112,7 +109,7 @@ static unsigned int pentiumM_get_frequency(void) u32 msr_lo, msr_tmp; rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); /* see table B-2 of 24547212.pdf */ if (msr_lo & 0x00040000) { @@ -122,7 +119,7 @@ static unsigned int pentiumM_get_frequency(void) } msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", + pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * 100 * 1000)); return msr_tmp * 100 * 1000; @@ -160,11 +157,11 @@ static unsigned int pentium_core_get_frequency(void) } rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); - dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); msr_tmp = (msr_lo >> 22) & 0x1f; - dprintk("bits 22-26 are 0x%x, speed is %u\n", + pr_debug("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb)); ret = (msr_tmp * fsb); @@ -190,7 +187,7 @@ static unsigned int pentium4_get_frequency(void) rdmsr(0x2c, msr_lo, msr_hi); - dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); /* decode the FSB: see IA-32 Intel (C) Architecture Software * Developer's Manual, Volume 3: System Prgramming Guide, @@ -217,7 +214,7 @@ static unsigned int pentium4_get_frequency(void) /* Multiplier. */ mult = msr_lo >> 24; - dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult)); ret = (fsb * mult); @@ -257,7 +254,7 @@ unsigned int speedstep_detect_processor(void) struct cpuinfo_x86 *c = &cpu_data(0); u32 ebx, msr_lo, msr_hi; - dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); + pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); if ((c->x86_vendor != X86_VENDOR_INTEL) || ((c->x86 != 6) && (c->x86 != 0xF))) @@ -272,7 +269,7 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); switch (c->x86_mask) { case 4: @@ -327,7 +324,7 @@ unsigned int speedstep_detect_processor(void) /* cpuid_ebx(1) is 0x04 for desktop PIII, * 0x06 for mobile PIII-M */ ebx = cpuid_ebx(0x00000001); - dprintk("ebx is %x\n", ebx); + pr_debug("ebx is %x\n", ebx); ebx &= 0x000000FF; @@ -344,7 +341,7 @@ unsigned int speedstep_detect_processor(void) /* all mobile PIII Coppermines have FSB 100 MHz * ==> sort out a few desktop PIIIs. */ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", msr_lo, msr_hi); msr_lo &= 0x00c0000; if (msr_lo != 0x0080000) @@ -357,12 +354,12 @@ unsigned int speedstep_detect_processor(void) * bit 56 or 57 is set */ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); - dprintk("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { if (c->x86_mask == 0x01) { - dprintk("early PIII version\n"); + pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else return SPEEDSTEP_CPU_PIII_C; @@ -393,14 +390,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) return -EINVAL; - dprintk("trying to determine both speeds\n"); + pr_debug("trying to determine both speeds\n"); /* get current speed */ prev_speed = speedstep_get_frequency(processor); if (!prev_speed) return -EIO; - dprintk("previous speed is %u\n", prev_speed); + pr_debug("previous speed is %u\n", prev_speed); local_irq_save(flags); @@ -412,7 +409,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - dprintk("low speed is %u\n", *low_speed); + pr_debug("low speed is %u\n", *low_speed); /* start latency measurement */ if (transition_latency) @@ -431,7 +428,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, goto out; } - dprintk("high speed is %u\n", *high_speed); + pr_debug("high speed is %u\n", *high_speed); if (*low_speed == *high_speed) { ret = -ENODEV; @@ -445,7 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, if (transition_latency) { *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + tv2.tv_usec - tv1.tv_usec; - dprintk("transition latency is %u uSec\n", *transition_latency); + pr_debug("transition latency is %u uSec\n", *transition_latency); /* convert uSec to nSec and add 20% for safety reasons */ *transition_latency *= 1200; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index 8abd869baabfb..c76ead3490bf7 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c @@ -55,9 +55,6 @@ static struct cpufreq_frequency_table speedstep_freqs[] = { * of DMA activity going on? */ #define SMI_TRIES 5 -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ - "speedstep-smi", msg) - /** * speedstep_smi_ownership */ @@ -70,7 +67,7 @@ static int speedstep_smi_ownership(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); magic = virt_to_phys(magic_data); - dprintk("trying to obtain ownership with command %x at port %x\n", + pr_debug("trying to obtain ownership with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -85,7 +82,7 @@ static int speedstep_smi_ownership(void) : "memory" ); - dprintk("result is %x\n", result); + pr_debug("result is %x\n", result); return result; } @@ -106,13 +103,13 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { - dprintk("bug #1422 -- can't read freqs from BIOS\n"); + pr_debug("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine frequencies with command %x at port %x\n", + pr_debug("trying to determine frequencies with command %x at port %x\n", command, smi_port); __asm__ __volatile__( @@ -129,7 +126,7 @@ static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) "d" (smi_port), "S" (0), "D" (0) ); - dprintk("result %x, low_freq %u, high_freq %u\n", + pr_debug("result %x, low_freq %u, high_freq %u\n", result, low_mhz, high_mhz); /* abort if results are obviously incorrect... */ @@ -154,7 +151,7 @@ static int speedstep_get_state(void) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to determine current setting with command %x " + pr_debug("trying to determine current setting with command %x " "at port %x\n", command, smi_port); __asm__ __volatile__( @@ -168,7 +165,7 @@ static int speedstep_get_state(void) "d" (smi_port), "S" (0), "D" (0) ); - dprintk("state is %x, result is %x\n", state, result); + pr_debug("state is %x, result is %x\n", state, result); return state & 1; } @@ -194,13 +191,13 @@ static void speedstep_set_state(unsigned int state) command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); - dprintk("trying to set frequency to state %u " + pr_debug("trying to set frequency to state %u " "with command %x at port %x\n", state, command, smi_port); do { if (retry) { - dprintk("retry %u, previous result %u, waiting...\n", + pr_debug("retry %u, previous result %u, waiting...\n", retry, result); mdelay(retry * 50); } @@ -221,7 +218,7 @@ static void speedstep_set_state(unsigned int state) local_irq_restore(flags); if (new_state == state) - dprintk("change to %u MHz succeeded after %u tries " + pr_debug("change to %u MHz succeeded after %u tries " "with result %u\n", (speedstep_freqs[new_state].frequency / 1000), retry, result); @@ -292,7 +289,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) result = speedstep_smi_ownership(); if (result) { - dprintk("fails in aquiring ownership of a SMI interface.\n"); + pr_debug("fails in acquiring ownership of a SMI interface.\n"); return -EINVAL; } @@ -304,7 +301,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) if (result) { /* fall back to speedstep_lib.c dection mechanism: * try both states out */ - dprintk("could not detect low and high frequencies " + pr_debug("could not detect low and high frequencies " "by SMI call.\n"); result = speedstep_get_freqs(speedstep_processor, low, high, @@ -312,18 +309,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) &speedstep_set_state); if (result) { - dprintk("could not detect two different speeds" + pr_debug("could not detect two different speeds" " -- aborting.\n"); return result; } else - dprintk("workaround worked.\n"); + pr_debug("workaround worked.\n"); } /* get current speed setting */ state = speedstep_get_state(); speed = speedstep_freqs[state].frequency; - dprintk("currently at %s speed setting - %i MHz\n", + pr_debug("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); @@ -360,7 +357,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) int result = speedstep_smi_ownership(); if (result) - dprintk("fails in re-aquiring ownership of a SMI interface.\n"); + pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); return result; } @@ -403,12 +400,12 @@ static int __init speedstep_init(void) } if (!speedstep_processor) { - dprintk("No supported Intel CPU detected.\n"); + pr_debug("No supported Intel CPU detected.\n"); return -ENODEV; } - dprintk("signature:0x%.8lx, command:0x%.8lx, " - "event:0x%.8lx, perf_level:0x%.8lx.\n", + pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " + "event:0x%.8ulx, perf_level:0x%.8ulx.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 5bf2fac52aca7..ca46a3a595599 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -509,6 +509,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, out_free: if (b) { kobject_put(&b->kobj); + list_del(&b->miscj); kfree(b); } return err; diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97f0..0f034460260d5 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); + h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. - * Always restore the value that BIOS has programmed on AP based on - * BSP's info we saved since BIOS is always setting the same value - * for all threads/cores + * If BIOS takes over the thermal interrupt and sets its interrupt + * delivery mode to SMI (not fixed), it restores the value that the + * BIOS has programmed on AP based on BSP's info we saved since BIOS + * is always setting the same value for all threads/cores. */ - apic_write(APIC_LVTTHMR, lvtthmr_init); + if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) + apic_write(APIC_LVTTHMR, lvtthmr_init); - h = lvtthmr_init; if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index bebabec5b448d..151787e382c77 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -292,14 +292,24 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ /* * HACK! - * We use this same function to initialize the mtrrs on boot. - * The state of the boot cpu's mtrrs has been saved, and we want - * to replicate across all the APs. - * If we're doing that @reg is set to something special... + * + * We use this same function to initialize the mtrrs during boot, + * resume, runtime cpu online and on an explicit request to set a + * specific MTRR. + * + * During boot or suspend, the state of the boot cpu's mtrrs has been + * saved, and we want to replicate that across all the cpus that come + * online (either at the end of boot or resume or during a runtime cpu + * online). If we're doing that, @reg is set to something special and on + * this cpu we still do mtrr_if->set_all(). During boot/resume, this + * is unnecessary if at this point we are still on the cpu that started + * the boot/resume sequence. But there is no guarantee that we are still + * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be + * sure that we are in sync with everyone else. */ if (reg != ~0U) mtrr_if->set(reg, base, size, type); - else if (!mtrr_aps_delayed_init) + else mtrr_if->set_all(); /* Wait for the others */ diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 294f26da0c0ce..0b5e2b546566f 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -847,15 +847,21 @@ static int __init parse_memopt(char *p) if (!p) return -EINVAL; -#ifdef CONFIG_X86_32 if (!strcmp(p, "nopentium")) { +#ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; - } +#else + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); + return -EINVAL; #endif + } userdef = 1; mem_size = memparse(p, &p); + /* don't remove all of memory when handling "mem={invalid}" param */ + if (mem_size == 0) + return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9efbdcc56425b..3755ef4943905 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -159,7 +159,12 @@ static void __init ati_bugs_contd(int num, int slot, int func) if (rev >= 0x40) acpi_fix_pin2_polarity = 1; - if (rev > 0x13) + /* + * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... + * SB700: revisions 0x39, 0x3a, ... + * SB800: revisions 0x40, 0x41, ... + */ + if (rev >= 0x39) return; if (acpi_use_timer_override) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c8b4efad7ebb0..9ca3b0e343e5d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1413,7 +1413,7 @@ ENTRY(async_page_fault) CFI_ADJUST_CFA_OFFSET 4 jmp error_code CFI_ENDPROC -END(apf_page_fault) +END(async_page_fault) #endif /* diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index aed1ffbeb0c9b..bbd5c80cb09b0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1248,7 +1248,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) decl PER_CPU_VAR(irq_count) jmp error_exit CFI_ENDPROC -END(do_hypervisor_callback) +END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index c01ffa5b9b87e..197a46ff5148b 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -81,6 +81,9 @@ static u32 gart_unmapped_entry; #define AGPEXTERN #endif +/* GART can only remap to physical addresses < 1TB */ +#define GART_MAX_PHYS_ADDR (1ULL << 40) + /* backdoor interface to AGP driver */ AGPEXTERN int agp_memory_reserved; AGPEXTERN __u32 *agp_gatt_table; @@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, size_t size, int dir, unsigned long align_mask) { unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); - unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); + unsigned long iommu_page; int i; + if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) + return bad_dma_addr; + + iommu_page = alloc_iommu(dev, npages, align_mask); if (iommu_page == -1) { if (!nonforced_iommu(dev, phys_mem, size)) return phys_mem; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index bd387e8f73b47..080ea55522140 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -56,31 +56,17 @@ asmlinkage extern void ret_from_fork(void); DEFINE_PER_CPU(unsigned long, old_rsp); static DEFINE_PER_CPU(unsigned char, is_idle); -static ATOMIC_NOTIFIER_HEAD(idle_notifier); - -void idle_notifier_register(struct notifier_block *n) -{ - atomic_notifier_chain_register(&idle_notifier, n); -} -EXPORT_SYMBOL_GPL(idle_notifier_register); - -void idle_notifier_unregister(struct notifier_block *n) -{ - atomic_notifier_chain_unregister(&idle_notifier, n); -} -EXPORT_SYMBOL_GPL(idle_notifier_unregister); - void enter_idle(void) { percpu_write(is_idle, 1); - atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); + idle_notifier_call_chain(IDLE_START); } static void __exit_idle(void) { if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) return; - atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); + idle_notifier_call_chain(IDLE_END); } /* Called from interrupts to signify idle end */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 45892dc4b72a3..f65e5b521dbd4 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) unsigned len, type; struct perf_event *bp; + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: @@ -655,6 +658,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) } goto restore; } + + ptrace_put_breakpoints(tsk); + return ((orig_ret < 0) ? orig_ret : rc); } @@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) if (n < HBP_NUM) { struct perf_event *bp; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; + bp = thread->ptrace_bps[n]; if (!bp) - return 0; - val = bp->hw.info.address; + val = 0; + else + val = bp->hw.info.address; + + ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { @@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; + int err = 0; + + if (ptrace_get_breakpoints(tsk) < 0) + return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); @@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, * writing for the user. And anyway this is the previous * behaviour. */ - if (IS_ERR(bp)) - return PTR_ERR(bp); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + goto put; + } t->ptrace_bps[nr] = bp; } else { - int err; - bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); - if (err) - return err; } - - return 0; +put: + ptrace_put_breakpoints(tsk); + return err; } /* diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f02b8edc3d449..1bf7b28bed230 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3575,10 +3575,11 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); } -static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) +static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; struct kvm *kvm_freed = NULL; + int nr_to_scan = sc->nr_to_scan; if (nr_to_scan == 0) goto out; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 63fec1531e89b..d8a15a17d7671 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -135,6 +135,8 @@ struct vcpu_svm { u32 *msrpm; + ulong nmi_iret_rip; + struct nested_state nested; bool nmi_singlestep; @@ -2653,6 +2655,7 @@ static int iret_interception(struct vcpu_svm *svm) ++svm->vcpu.stat.nmi_window_exits; clr_intercept(svm, INTERCEPT_IRET); svm->vcpu.arch.hflags |= HF_IRET_MASK; + svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); return 1; } @@ -3474,7 +3477,12 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) svm->int3_injected = 0; - if (svm->vcpu.arch.hflags & HF_IRET_MASK) { + /* + * If we've made progress since setting HF_IRET_MASK, we've + * executed an IRET and can allow NMI injection. + */ + if ((svm->vcpu.arch.hflags & HF_IRET_MASK) + && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); } diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a460158b5ac5b..cfd3ca4ac4a74 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -72,7 +72,7 @@ ENTRY(_copy_to_user) addq %rdx,%rcx jc bad_to_user cmpq TI_addr_limit(%rax),%rcx - jae bad_to_user + ja bad_to_user ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string CFI_ENDPROC ENDPROC(_copy_to_user) @@ -85,7 +85,7 @@ ENTRY(_copy_from_user) addq %rdx,%rcx jc bad_from_user cmpq TI_addr_limit(%rax),%rcx - jae bad_from_user + ja bad_from_user ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string CFI_ENDPROC ENDPROC(_copy_from_user) diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 648fe47417823..f35eec78a68e5 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S @@ -36,7 +36,7 @@ */ #ifdef CONFIG_SMP ENTRY(__write_lock_failed) - CFI_STARTPROC simple + CFI_STARTPROC FRAME 2: LOCK_PREFIX addl $ RW_LOCK_BIAS,(%eax) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 0113d19c8aa60..8573b83a63d03 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -168,8 +168,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... */ - if (mm == current->active_mm) - write_cr3(read_cr3()); + flush_tlb_mm(mm); } #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c3b8e24f2b16f..9fd8a567fe1e4 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void) wrmsrl(MSR_AMD64_IBSOPCTL, 0); } -static inline int eilvt_is_available(int offset) +static inline int get_eilvt(int offset) { - /* check if we may assign a vector */ return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); } +static inline int put_eilvt(int offset) +{ + return !setup_APIC_eilvt(offset, 0, 0, 1); +} + static inline int ibs_eilvt_valid(void) { int offset; u64 val; + int valid = 0; + + preempt_disable(); rdmsrl(MSR_AMD64_IBSCTL, val); offset = val & IBSCTL_LVT_OFFSET_MASK; @@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void) if (!(val & IBSCTL_LVT_OFFSET_VALID)) { pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); - return 0; + goto out; } - if (!eilvt_is_available(offset)) { + if (!get_eilvt(offset)) { pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); - return 0; + goto out; } - return 1; + valid = 1; +out: + preempt_enable(); + + return valid; } static inline int get_ibs_offset(void) @@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off) static int force_ibs_eilvt_setup(void) { - int i; + int offset; int ret; - /* find the next free available EILVT entry */ - for (i = 1; i < 4; i++) { - if (!eilvt_is_available(i)) - continue; - ret = setup_ibs_ctl(i); - if (ret) - return ret; - pr_err(FW_BUG "using offset %d for IBS interrupts\n", i); - return 0; + /* + * find the next free available EILVT entry, skip offset 0, + * pin search to this cpu + */ + preempt_disable(); + for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { + if (get_eilvt(offset)) + break; } + preempt_enable(); - printk(KERN_DEBUG "No EILVT entry available\n"); - - return -EBUSY; -} - -static int __init_ibs_nmi(void) -{ - int ret; - - if (ibs_eilvt_valid()) - return 0; + if (offset == APIC_EILVT_NR_MAX) { + printk(KERN_DEBUG "No EILVT entry available\n"); + return -EBUSY; + } - ret = force_ibs_eilvt_setup(); + ret = setup_ibs_ctl(offset); if (ret) - return ret; + goto out; - if (!ibs_eilvt_valid()) - return -EFAULT; + if (!ibs_eilvt_valid()) { + ret = -EFAULT; + goto out; + } + pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); return 0; +out: + preempt_disable(); + put_eilvt(offset); + preempt_enable(); + return ret; } /* * check and reserve APIC extended interrupt LVT offset for IBS if * available - * - * init_ibs() preforms implicitly cpu-local operations, so pin this - * thread to its current CPU */ static void init_ibs(void) { - preempt_disable(); - ibs_caps = get_ibs_caps(); + if (!ibs_caps) + return; + + if (ibs_eilvt_valid()) goto out; - if (__init_ibs_nmi() < 0) - ibs_caps = 0; - else - printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); + if (!force_ibs_eilvt_setup()) + goto out; + + /* Failed to setup ibs */ + ibs_caps = 0; + return; out: - preempt_enable(); + printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); } static int (*create_arch_files)(struct super_block *sb, struct dentry *root); diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 9fadec074142b..98ab13058f892 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c @@ -50,7 +50,7 @@ static inline void setup_num_counters(void) #endif } -static int inline addr_increment(void) +static inline int addr_increment(void) { #ifdef CONFIG_SMP return smp_num_siblings == 2 ? 2 : 1; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index a7b38d35c29a1..3796f99d7bf0c 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -698,16 +698,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long va, unsigned int cpu) { - int tcpu; - int uvhub; int locals = 0; int remotes = 0; int hubs = 0; + int tcpu; + int tpnode; struct bau_desc *bau_desc; struct cpumask *flush_mask; struct ptc_stats *stat; struct bau_control *bcp; struct bau_control *tbcp; + struct hub_and_pnode *hpp; /* kernel was booted 'nobau' */ if (nobau) @@ -749,11 +750,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); - /* cpu statistics */ for_each_cpu(tcpu, flush_mask) { - uvhub = uv_cpu_to_blade_id(tcpu); - bau_uvhub_set(uvhub, &bau_desc->distribution); - if (uvhub == bcp->uvhub) + /* + * The distribution vector is a bit map of pnodes, relative + * to the partition base pnode (and the partition base nasid + * in the header). + * Translate cpu to pnode and hub using an array stored + * in local memory. + */ + hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; + tpnode = hpp->pnode - bcp->partition_base_pnode; + bau_uvhub_set(tpnode, &bau_desc->distribution); + if (hpp->uvhub == bcp->uvhub) locals++; else remotes++; @@ -854,7 +862,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) * an interrupt, but causes an error message to be returned to * the sender. */ -static void uv_enable_timeouts(void) +static void __init uv_enable_timeouts(void) { int uvhub; int nuvhubs; @@ -1325,10 +1333,10 @@ static int __init uv_ptc_init(void) } /* - * initialize the sending side's sending buffers + * Initialize the sending side's sending buffers. */ static void -uv_activation_descriptor_init(int node, int pnode) +uv_activation_descriptor_init(int node, int pnode, int base_pnode) { int i; int cpu; @@ -1351,11 +1359,11 @@ uv_activation_descriptor_init(int node, int pnode) n = pa >> uv_nshift; m = pa & uv_mmask; + /* the 14-bit pnode */ uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, (n << UV_DESC_BASE_PNODE_SHIFT | m)); - /* - * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each + * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each * cpu even though we only use the first one; one descriptor can * describe a broadcast to 256 uv hubs. */ @@ -1364,12 +1372,13 @@ uv_activation_descriptor_init(int node, int pnode) memset(bd2, 0, sizeof(struct bau_desc)); bd2->header.sw_ack_flag = 1; /* - * base_dest_nodeid is the nasid of the first uvhub - * in the partition. The bit map will indicate uvhub numbers, - * which are 0-N in a partition. Pnodes are unique system-wide. + * The base_dest_nasid set in the message header is the nasid + * of the first uvhub in the partition. The bit map will + * indicate destination pnode numbers relative to that base. + * They may not be consecutive if nasid striding is being used. */ - bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); - bd2->header.dest_subnodeid = 0x10; /* the LB */ + bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); + bd2->header.dest_subnodeid = UV_LB_SUBNODEID; bd2->header.command = UV_NET_ENDPOINT_INTD; bd2->header.int_both = 1; /* @@ -1441,7 +1450,7 @@ uv_payload_queue_init(int node, int pnode) /* * Initialization of each UV hub's structures */ -static void __init uv_init_uvhub(int uvhub, int vector) +static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) { int node; int pnode; @@ -1449,11 +1458,11 @@ static void __init uv_init_uvhub(int uvhub, int vector) node = uvhub_to_first_node(uvhub); pnode = uv_blade_to_pnode(uvhub); - uv_activation_descriptor_init(node, pnode); + uv_activation_descriptor_init(node, pnode, base_pnode); uv_payload_queue_init(node, pnode); /* - * the below initialization can't be in firmware because the - * messaging IRQ will be determined by the OS + * The below initialization can't be in firmware because the + * messaging IRQ will be determined by the OS. */ apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, @@ -1490,10 +1499,11 @@ calculate_destination_timeout(void) /* * initialize the bau_control structure for each cpu */ -static int __init uv_init_per_cpu(int nuvhubs) +static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) { int i; int cpu; + int tcpu; int pnode; int uvhub; int have_hmaster; @@ -1527,6 +1537,15 @@ static int __init uv_init_per_cpu(int nuvhubs) bcp = &per_cpu(bau_control, cpu); memset(bcp, 0, sizeof(struct bau_control)); pnode = uv_cpu_hub_info(cpu)->pnode; + if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { + printk(KERN_EMERG + "cpu %d pnode %d-%d beyond %d; BAU disabled\n", + cpu, pnode, base_part_pnode, + UV_DISTRIBUTION_SIZE); + return 1; + } + bcp->osnode = cpu_to_node(cpu); + bcp->partition_base_pnode = uv_partition_base_pnode; uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); bdp = &uvhub_descs[uvhub]; @@ -1535,7 +1554,7 @@ static int __init uv_init_per_cpu(int nuvhubs) bdp->pnode = pnode; /* kludge: 'assuming' one node per socket, and assuming that disabling a socket just leaves a gap in node numbers */ - socket = (cpu_to_node(cpu) & 1); + socket = bcp->osnode & 1; bdp->socket_mask |= (1 << socket); sdp = &bdp->socket[socket]; sdp->cpu_number[sdp->num_cpus] = cpu; @@ -1584,6 +1603,20 @@ static int __init uv_init_per_cpu(int nuvhubs) nextsocket: socket++; socket_mask = (socket_mask >> 1); + /* each socket gets a local array of pnodes/hubs */ + bcp = smaster; + bcp->target_hub_and_pnode = kmalloc_node( + sizeof(struct hub_and_pnode) * + num_possible_cpus(), GFP_KERNEL, bcp->osnode); + memset(bcp->target_hub_and_pnode, 0, + sizeof(struct hub_and_pnode) * + num_possible_cpus()); + for_each_present_cpu(tcpu) { + bcp->target_hub_and_pnode[tcpu].pnode = + uv_cpu_hub_info(tcpu)->pnode; + bcp->target_hub_and_pnode[tcpu].uvhub = + uv_cpu_hub_info(tcpu)->numa_blade_id; + } } } kfree(uvhub_descs); @@ -1636,21 +1669,22 @@ static int __init uv_bau_init(void) spin_lock_init(&disable_lock); congested_cycles = microsec_2_cycles(congested_response_us); - if (uv_init_per_cpu(nuvhubs)) { - nobau = 1; - return 0; - } - uv_partition_base_pnode = 0x7fffffff; - for (uvhub = 0; uvhub < nuvhubs; uvhub++) + for (uvhub = 0; uvhub < nuvhubs; uvhub++) { if (uv_blade_nr_possible_cpus(uvhub) && (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) uv_partition_base_pnode = uv_blade_to_pnode(uvhub); + } + + if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { + nobau = 1; + return 0; + } vector = UV_BAU_MESSAGE; for_each_possible_blade(uvhub) if (uv_blade_nr_possible_cpus(uvhub)) - uv_init_uvhub(uvhub, vector); + uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); uv_enable_timeouts(); alloc_intr_gate(vector, uv_bau_message_intr1); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f6089421147a0..d835bc2a6a988 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1138,7 +1138,7 @@ static void drop_other_mm_ref(void *info) active_mm = percpu_read(cpu_tlbstate.active_mm); - if (active_mm == mm) + if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) leave_mm(smp_processor_id()); /* If this cpu still has a stale cr3 reference, then make sure @@ -1651,9 +1651,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - if (!pte_none(pte_page[pteidx])) continue; @@ -1711,6 +1708,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pud_t *l3; pmd_t *l2; + /* max_pfn_mapped is the last pfn mapped in the initial memory + * mappings. Considering that on Xen after the kernel mappings we + * have the mappings of some pages that don't exist in pfn space, we + * set max_pfn_mapped to the last real pfn mapped. */ + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + /* Zap identity mapping */ init_level4_pgt[0] = __pgd(0); @@ -1815,9 +1818,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 3199b76f795de..59054520e5615 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -43,6 +43,28 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. +config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + depends on EXPERIMENTAL + default n + ---help--- + The BFQ I/O scheduler tries to distribute bandwidth among + all processes according to their weights. + It aims at distributing the bandwidth as desired, independently of + the disk parameters and with any workload. It also tries to + guarantee low latency to interactive and soft real-time + applications. If compiled built-in (saying Y here), BFQ can + be configured to support hierarchical scheduling. + +config CGROUP_BFQIO + bool "BFQ hierarchical scheduling support" + depends on CGROUPS && IOSCHED_BFQ=y + default n + ---help--- + Enable hierarchical scheduling in BFQ, using the cgroups + filesystem interface. The name of the subsystem will be + bfqio. + choice prompt "Default I/O scheduler" default DEFAULT_CFQ @@ -56,6 +78,9 @@ choice config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y + config DEFAULT_BFQ + bool "BFQ" if IOSCHED_BFQ=y + config DEFAULT_NOOP bool "No-op" @@ -65,6 +90,7 @@ config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ + default "bfq" if DEFAULT_BFQ default "noop" if DEFAULT_NOOP endmenu diff --git a/block/Makefile b/block/Makefile index 0fec4b3fab511..22d86087f7f56 100644 --- a/block/Makefile +++ b/block/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o +obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c new file mode 100644 index 0000000000000..cc7be6ba4034d --- /dev/null +++ b/block/bfq-cgroup.c @@ -0,0 +1,769 @@ +/* + * BFQ: CGROUPS support. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. + */ + +#ifdef CONFIG_CGROUP_BFQIO +static struct bfqio_cgroup bfqio_root_cgroup = { + .weight = BFQ_DEFAULT_GRP_WEIGHT, + .ioprio = BFQ_DEFAULT_GRP_IOPRIO, + .ioprio_class = BFQ_DEFAULT_GRP_CLASS, +}; + +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; +} + +static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id), + struct bfqio_cgroup, css); +} + +/* + * Search the bfq_group for bfqd into the hash table (by now only a list) + * of bgrp. Must be called under rcu_read_lock(). + */ +static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp, + struct bfq_data *bfqd) +{ + struct bfq_group *bfqg; + struct hlist_node *n; + void *key; + + hlist_for_each_entry_rcu(bfqg, n, &bgrp->group_data, group_node) { + key = rcu_dereference(bfqg->bfqd); + if (key == bfqd) + return bfqg; + } + + return NULL; +} + +static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp, + struct bfq_group *bfqg) +{ + struct bfq_entity *entity = &bfqg->entity; + + entity->weight = entity->new_weight = bgrp->weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio = bgrp->ioprio; + entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class; + entity->ioprio_changed = 1; + entity->my_sched_data = &bfqg->sched_data; +} + +static inline void bfq_group_set_parent(struct bfq_group *bfqg, + struct bfq_group *parent) +{ + struct bfq_entity *entity; + + BUG_ON(parent == NULL); + BUG_ON(bfqg == NULL); + + entity = &bfqg->entity; + entity->parent = parent->my_entity; + entity->sched_data = &parent->sched_data; +} + +/** + * bfq_group_chain_alloc - allocate a chain of groups. + * @bfqd: queue descriptor. + * @cgroup: the leaf cgroup this chain starts from. + * + * Allocate a chain of groups starting from the one belonging to + * @cgroup up to the root cgroup. Stop if a cgroup on the chain + * to the root has already an allocated group on @bfqd. + */ +static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *prev = NULL, *leaf = NULL; + + for (; cgroup != NULL; cgroup = cgroup->parent) { + bgrp = cgroup_to_bfqio(cgroup); + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) { + /* + * All the cgroups in the path from there to the + * root must have a bfq_group for bfqd, so we don't + * need any more allocations. + */ + break; + } + + bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC); + if (bfqg == NULL) + goto cleanup; + + bfq_group_init_entity(bgrp, bfqg); + bfqg->my_entity = &bfqg->entity; + + if (leaf == NULL) { + leaf = bfqg; + prev = leaf; + } else { + bfq_group_set_parent(prev, bfqg); + /* + * Build a list of allocated nodes using the bfqd + * filed, that is still unused and will be initialized + * only after the node will be connected. + */ + prev->bfqd = bfqg; + prev = bfqg; + } + } + + return leaf; + +cleanup: + while (leaf != NULL) { + prev = leaf; + leaf = leaf->bfqd; + kfree(prev); + } + + return NULL; +} + +/** + * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy. + * @bfqd: the queue descriptor. + * @cgroup: the leaf cgroup to start from. + * @leaf: the leaf group (to be associated to @cgroup). + * + * Try to link a chain of groups to a cgroup hierarchy, connecting the + * nodes bottom-up, so we can be sure that when we find a cgroup in the + * hierarchy that already as a group associated to @bfqd all the nodes + * in the path to the root cgroup have one too. + * + * On locking: the queue lock protects the hierarchy (there is a hierarchy + * per device) while the bfqio_cgroup lock protects the list of groups + * belonging to the same cgroup. + */ +static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup, + struct bfq_group *leaf) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *next, *prev = NULL; + unsigned long flags; + + assert_spin_locked(bfqd->queue->queue_lock); + + for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) { + bgrp = cgroup_to_bfqio(cgroup); + next = leaf->bfqd; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + BUG_ON(bfqg != NULL); + + spin_lock_irqsave(&bgrp->lock, flags); + + rcu_assign_pointer(leaf->bfqd, bfqd); + hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data); + hlist_add_head(&leaf->bfqd_node, &bfqd->group_list); + + spin_unlock_irqrestore(&bgrp->lock, flags); + + prev = leaf; + leaf = next; + } + + BUG_ON(cgroup == NULL && leaf != NULL); + if (cgroup != NULL && prev != NULL) { + bgrp = cgroup_to_bfqio(cgroup); + bfqg = bfqio_lookup_group(bgrp, bfqd); + bfq_group_set_parent(prev, bfqg); + } +} + +/** + * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup. + * @bfqd: queue descriptor. + * @cgroup: cgroup being searched for. + * + * Return a group associated to @bfqd in @cgroup, allocating one if + * necessary. When a group is returned all the cgroups in the path + * to the root have a group associated to @bfqd. + * + * If the allocation fails, return the root group: this breaks guarantees + * but is a safe fallbak. If this loss becames a problem it can be + * mitigated using the equivalent weight (given by the product of the + * weights of the groups in the path from @group to the root) in the + * root scheduler. + * + * We allocate all the missing nodes in the path from the leaf cgroup + * to the root and we connect the nodes only after all the allocations + * have been successful. + */ +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); + struct bfq_group *bfqg; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) + return bfqg; + + bfqg = bfq_group_chain_alloc(bfqd, cgroup); + if (bfqg != NULL) + bfq_group_chain_link(bfqd, cgroup, bfqg); + else + bfqg = bfqd->root_group; + + return bfqg; +} + +/** + * bfq_bfqq_move - migrate @bfqq to @bfqg. + * @bfqd: queue descriptor. + * @bfqq: the queue to move. + * @entity: @bfqq's entity. + * @bfqg: the group to move to. + * + * Move @bfqq to @bfqg, deactivating it from its old group and reactivating + * it on the new one. Avoid putting the entity on the old group idle tree. + * + * Must be called under the queue lock; the cgroup owning @bfqg must + * not disappear (by now this just means that we are called under + * rcu_read_lock()). + */ +static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct bfq_entity *entity, struct bfq_group *bfqg) +{ + int busy, resume; + + busy = bfq_bfqq_busy(bfqq); + resume = !RB_EMPTY_ROOT(&bfqq->sort_list); + + BUG_ON(resume && !entity->on_st); + BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue); + + if (busy) { + BUG_ON(atomic_read(&bfqq->ref) < 2); + + if (!resume) + bfq_del_bfqq_busy(bfqd, bfqq, 0); + else + bfq_deactivate_bfqq(bfqd, bfqq, 0); + } + + /* + * Here we use a reference to bfqg. We don't need a refcounter + * as the cgroup reference will not be dropped, so that its + * destroy() callback will not be invoked. + */ + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; + + if (busy && resume) + bfq_activate_bfqq(bfqd, bfqq); +} + +/** + * __bfq_cic_change_cgroup - move @cic to @cgroup. + * @bfqd: the queue descriptor. + * @cic: the cic to move. + * @cgroup: the cgroup to move to. + * + * Move cic to cgroup, assuming that bfqd->queue is locked; the caller + * has to make sure that the reference to cgroup is valid across the call. + * + * NOTE: an alternative approach might have been to store the current + * cgroup in bfqq and getting a reference to it, reducing the lookup + * time here, at the price of slightly more complex code. + */ +static struct bfq_group *__bfq_cic_change_cgroup(struct bfq_data *bfqd, + struct cfq_io_context *cic, + struct cgroup *cgroup) +{ + struct bfq_queue *async_bfqq = cic_to_bfqq(cic, 0); + struct bfq_queue *sync_bfqq = cic_to_bfqq(cic, 1); + struct bfq_entity *entity; + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + + bgrp = cgroup_to_bfqio(cgroup); + + bfqg = bfq_find_alloc_group(bfqd, cgroup); + if (async_bfqq != NULL) { + entity = &async_bfqq->entity; + + if (entity->sched_data != &bfqg->sched_data) { + cic_set_bfqq(cic, NULL, 0); + bfq_log_bfqq(bfqd, async_bfqq, + "cic_change_group: %p %d", + async_bfqq, async_bfqq->ref); + bfq_put_queue(async_bfqq); + } + } + + if (sync_bfqq != NULL) { + entity = &sync_bfqq->entity; + if (entity->sched_data != &bfqg->sched_data) + bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg); + } + + return bfqg; +} + +/** + * bfq_cic_change_cgroup - move @cic to @cgroup. + * @cic: the cic being migrated. + * @cgroup: the destination cgroup. + * + * When the task owning @cic is moved to @cgroup, @cic is immediately + * moved into its new parent group. + */ +static void bfq_cic_change_cgroup(struct cfq_io_context *cic, + struct cgroup *cgroup) +{ + struct bfq_data *bfqd; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (bfqd != NULL) { + __bfq_cic_change_cgroup(bfqd, cic, cgroup); + bfq_put_bfqd_unlock(bfqd, &flags); + } +} + +/** + * bfq_cic_update_cgroup - update the cgroup of @cic. + * @cic: the @cic to update. + * + * Make sure that @cic is enqueued in the cgroup of the current task. + * We need this in addition to moving cics during the cgroup attach + * phase because the task owning @cic could be at its first disk + * access or we may end up in the root cgroup as the result of a + * memory allocation failure and here we try to move to the right + * group. + * + * Must be called under the queue lock. It is safe to use the returned + * value even after the rcu_read_unlock() as the migration/destruction + * paths act under the queue lock too. IOW it is impossible to race with + * group migration/destruction and end up with an invalid group as: + * a) here cgroup has not yet been destroyed, nor its destroy callback + * has started execution, as current holds a reference to it, + * b) if it is destroyed after rcu_read_unlock() [after current is + * migrated to a different cgroup] its attach() callback will have + * taken care of remove all the references to the old cgroup data. + */ +static struct bfq_group *bfq_cic_update_cgroup(struct cfq_io_context *cic) +{ + struct bfq_data *bfqd = cic->key; + struct bfq_group *bfqg; + struct cgroup *cgroup; + + BUG_ON(bfqd == NULL); + + rcu_read_lock(); + cgroup = task_cgroup(current, bfqio_subsys_id); + bfqg = __bfq_cic_change_cgroup(bfqd, cic, cgroup); + rcu_read_unlock(); + + return bfqg; +} + +/** + * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. + * @st: the service tree being flushed. + */ +static inline void bfq_flush_idle_tree(struct bfq_service_tree *st) +{ + struct bfq_entity *entity = st->first_idle; + + for (; entity != NULL; entity = st->first_idle) + __bfq_deactivate_entity(entity, 0); +} + +/** + * bfq_destroy_group - destroy @bfqg. + * @bgrp: the bfqio_cgroup containing @bfqg. + * @bfqg: the group being destroyed. + * + * Destroy @bfqg, making sure that it is not referenced from its parent. + */ +static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg) +{ + struct bfq_data *bfqd; + struct bfq_service_tree *st; + struct bfq_entity *entity = bfqg->my_entity; + unsigned long uninitialized_var(flags); + int i; + + hlist_del(&bfqg->group_node); + + /* + * We may race with device destruction, take extra care when + * dereferencing bfqg->bfqd. + */ + bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); + if (bfqd != NULL) { + hlist_del(&bfqg->bfqd_node); + __bfq_deactivate_entity(entity, 0); + bfq_put_async_queues(bfqd, bfqg); + bfq_put_bfqd_unlock(bfqd, &flags); + } + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { + st = bfqg->sched_data.service_tree + i; + + /* + * The idle tree may still contain bfq_queues belonging + * to exited task because they never migrated to a different + * cgroup from the one being destroyed now. Noone else + * can access them so it's safe to act without any lock. + */ + bfq_flush_idle_tree(st); + + BUG_ON(!RB_EMPTY_ROOT(&st->active)); + BUG_ON(!RB_EMPTY_ROOT(&st->idle)); + } + BUG_ON(bfqg->sched_data.next_active != NULL); + BUG_ON(bfqg->sched_data.active_entity != NULL); + BUG_ON(entity->tree != NULL); + + /* + * No need to defer the kfree() to the end of the RCU grace + * period: we are called from the destroy() callback of our + * cgroup, so we can be sure that noone is a) still using + * this cgroup or b) doing lookups in it. + */ + kfree(bfqg); +} + +/** + * bfq_disconnect_groups - diconnect @bfqd from all its groups. + * @bfqd: the device descriptor being exited. + * + * When the device exits we just make sure that no lookup can return + * the now unused group structures. They will be deallocated on cgroup + * destruction. + */ +static void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + struct hlist_node *pos, *n; + struct bfq_group *bfqg; + + bfq_log(bfqd, "disconnect_groups beginning") ; + hlist_for_each_entry_safe(bfqg, pos, n, &bfqd->group_list, bfqd_node) { + hlist_del(&bfqg->bfqd_node); + + __bfq_deactivate_entity(bfqg->my_entity, 0); + + /* + * Don't remove from the group hash, just set an + * invalid key. No lookups can race with the + * assignment as bfqd is being destroyed; this + * implies also that new elements cannot be added + * to the list. + */ + rcu_assign_pointer(bfqg->bfqd, NULL); + + bfq_log(bfqd, "disconnect_groups: put async for group %p", + bfqg) ; + bfq_put_async_queues(bfqd, bfqg); + } +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + struct bfqio_cgroup *bgrp = &bfqio_root_cgroup; + struct bfq_group *bfqg = bfqd->root_group; + + bfq_put_async_queues(bfqd, bfqg); + + spin_lock_irq(&bgrp->lock); + hlist_del_rcu(&bfqg->group_node); + spin_unlock_irq(&bgrp->lock); + + /* + * No need to synchronize_rcu() here: since the device is gone + * there cannot be any read-side access to its root_group. + */ + kfree(bfqg); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + int i; + + bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); + if (bfqg == NULL) + return NULL; + + bfqg->entity.parent = NULL; + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + bgrp = &bfqio_root_cgroup; + spin_lock_irq(&bgrp->lock); + rcu_assign_pointer(bfqg->bfqd, bfqd); + hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data); + spin_unlock_irq(&bgrp->lock); + + return bfqg; +} + +#define SHOW_FUNCTION(__VAR) \ +static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype) \ +{ \ + struct bfqio_cgroup *bgrp; \ + u64 ret; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + bgrp = cgroup_to_bfqio(cgroup); \ + spin_lock_irq(&bgrp->lock); \ + ret = bgrp->__VAR; \ + spin_unlock_irq(&bgrp->lock); \ + \ + cgroup_unlock(); \ + \ + return ret; \ +} + +SHOW_FUNCTION(weight); +SHOW_FUNCTION(ioprio); +SHOW_FUNCTION(ioprio_class); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__VAR, __MIN, __MAX) \ +static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \ + struct cftype *cftype, \ + u64 val) \ +{ \ + struct bfqio_cgroup *bgrp; \ + struct bfq_group *bfqg; \ + struct hlist_node *n; \ + \ + if (val < (__MIN) || val > (__MAX)) \ + return -EINVAL; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + bgrp = cgroup_to_bfqio(cgroup); \ + \ + spin_lock_irq(&bgrp->lock); \ + bgrp->__VAR = (unsigned short)val; \ + hlist_for_each_entry(bfqg, n, &bgrp->group_data, group_node) { \ + bfqg->entity.new_##__VAR = (unsigned short)val; \ + smp_wmb(); \ + bfqg->entity.ioprio_changed = 1; \ + } \ + spin_unlock_irq(&bgrp->lock); \ + \ + cgroup_unlock(); \ + \ + return 0; \ +} + +STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT); +STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1); +STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE); +#undef STORE_FUNCTION + +static struct cftype bfqio_files[] = { + { + .name = "weight", + .read_u64 = bfqio_cgroup_weight_read, + .write_u64 = bfqio_cgroup_weight_write, + }, + { + .name = "ioprio", + .read_u64 = bfqio_cgroup_ioprio_read, + .write_u64 = bfqio_cgroup_ioprio_write, + }, + { + .name = "ioprio_class", + .read_u64 = bfqio_cgroup_ioprio_class_read, + .write_u64 = bfqio_cgroup_ioprio_class_write, + }, +}; + +static int bfqio_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + return cgroup_add_files(cgroup, subsys, bfqio_files, + ARRAY_SIZE(bfqio_files)); +} + +static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys *subsys, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp; + + if (cgroup->parent != NULL) { + bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL); + if (bgrp == NULL) + return ERR_PTR(-ENOMEM); + } else + bgrp = &bfqio_root_cgroup; + + spin_lock_init(&bgrp->lock); + INIT_HLIST_HEAD(&bgrp->group_data); + bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO; + bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS; + + return &bgrp->css; +} + +/* + * We cannot support shared io contexts, as we have no mean to support + * two tasks with the same ioc in two different groups without major rework + * of the main cic/bfqq data structures. By now we allow a task to change + * its cgroup only if it's the only owner of its ioc; the drawback of this + * behavior is that a group containing a task that forked using CLONE_IO + * will not be destroyed until the tasks sharing the ioc die. + */ +static int bfqio_can_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct task_struct *tsk, bool threadgroup) +{ + struct io_context *ioc; + int ret = 0; + + /* task_lock() is needed to avoid races with exit_io_context() */ + task_lock(tsk); + ioc = tsk->io_context; + if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1) + /* + * ioc == NULL means that the task is either too young or + * exiting: if it has still no ioc the ioc can't be shared, + * if the task is exiting the attach will fail anyway, no + * matter what we return here. + */ + ret = -EINVAL; + task_unlock(tsk); + + return ret; +} + +static void bfqio_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct cgroup *prev, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + struct cfq_io_context *cic; + struct hlist_node *n; + + task_lock(tsk); + ioc = tsk->io_context; + if (ioc != NULL) { + BUG_ON(atomic_long_read(&ioc->refcount) == 0); + atomic_long_inc(&ioc->refcount); + } + task_unlock(tsk); + + if (ioc == NULL) + return; + + rcu_read_lock(); + hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list) + bfq_cic_change_cgroup(cic, cgroup); + rcu_read_unlock(); + + put_io_context(ioc); +} + +static void bfqio_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); + struct hlist_node *n, *tmp; + struct bfq_group *bfqg; + + /* + * Since we are destroying the cgroup, there are no more tasks + * referencing it, and all the RCU grace periods that may have + * referenced it are ended (as the destruction of the parent + * cgroup is RCU-safe); bgrp->group_data will not be accessed by + * anything else and we don't need any synchronization. + */ + hlist_for_each_entry_safe(bfqg, n, tmp, &bgrp->group_data, group_node) + bfq_destroy_group(bgrp, bfqg); + + BUG_ON(!hlist_empty(&bgrp->group_data)); + + kfree(bgrp); +} + +struct cgroup_subsys bfqio_subsys = { + .name = "bfqio", + .create = bfqio_create, + .can_attach = bfqio_can_attach, + .attach = bfqio_attach, + .destroy = bfqio_destroy, + .populate = bfqio_populate, + .subsys_id = bfqio_subsys_id, +}; +#else +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->sched_data = &bfqg->sched_data; +} + +static inline struct bfq_group * +bfq_cic_update_cgroup(struct cfq_io_context *cic) +{ + struct bfq_data *bfqd = cic->key; + return bfqd->root_group; +} + +static inline void bfq_bfqq_move(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct bfq_entity *entity, + struct bfq_group *bfqg) +{ +} + +static inline void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + bfq_put_async_queues(bfqd, bfqd->root_group); +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + kfree(bfqd->root_group); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + int i; + + bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); + if (bfqg == NULL) + return NULL; + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + return bfqg; +} +#endif diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c new file mode 100644 index 0000000000000..01f8313320eec --- /dev/null +++ b/block/bfq-ioc.c @@ -0,0 +1,380 @@ +/* + * BFQ: I/O context handling. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +/** + * bfq_cic_free_rcu - deferred cic freeing. + * @head: RCU head of the cic to free. + * + * Free the cic containing @head and, if it was the last one and + * the module is exiting wake up anyone waiting for its deallocation + * (see bfq_exit()). + */ +static void bfq_cic_free_rcu(struct rcu_head *head) +{ + struct cfq_io_context *cic; + + cic = container_of(head, struct cfq_io_context, rcu_head); + + kmem_cache_free(bfq_ioc_pool, cic); + elv_ioc_count_dec(bfq_ioc_count); + + if (bfq_ioc_gone != NULL) { + spin_lock(&bfq_ioc_gone_lock); + if (bfq_ioc_gone != NULL && + !elv_ioc_count_read(bfq_ioc_count)) { + complete(bfq_ioc_gone); + bfq_ioc_gone = NULL; + } + spin_unlock(&bfq_ioc_gone_lock); + } +} + +static void bfq_cic_free(struct cfq_io_context *cic) +{ + call_rcu(&cic->rcu_head, bfq_cic_free_rcu); +} + +/** + * cic_free_func - disconnect a cic ready to be freed. + * @ioc: the io_context @cic belongs to. + * @cic: the cic to be freed. + * + * Remove @cic from the @ioc radix tree hash and from its cic list, + * deferring the deallocation of @cic to the end of the current RCU + * grace period. This assumes that __bfq_exit_single_io_context() + * has already been called for @cic. + */ +static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) +{ + unsigned long flags; + unsigned long dead_key = (unsigned long) cic->key; + + BUG_ON(!(dead_key & CIC_DEAD_KEY)); + + spin_lock_irqsave(&ioc->lock, flags); + radix_tree_delete(&ioc->bfq_radix_root, + dead_key >> CIC_DEAD_INDEX_SHIFT); + hlist_del_init_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + bfq_cic_free(cic); +} + +static void bfq_free_io_context(struct io_context *ioc) +{ + /* + * ioc->refcount is zero here, or we are called from elv_unregister(), + * so no more cic's are allowed to be linked into this ioc. So it + * should be ok to iterate over the known list, we will see all cic's + * since no new ones are added. + */ + call_for_each_cic(ioc, cic_free_func); +} + +/** + * __bfq_exit_single_io_context - deassociate @cic from any running task. + * @bfqd: bfq_data on which @cic is valid. + * @cic: the cic being exited. + * + * Whenever no more tasks are using @cic or @bfqd is deallocated we + * need to invalidate its entry in the radix tree hash table and to + * release the queues it refers to. + * + * Called under the queue lock. + */ +static void __bfq_exit_single_io_context(struct bfq_data *bfqd, + struct cfq_io_context *cic) +{ + struct io_context *ioc = cic->ioc; + + list_del_init(&cic->queue_list); + + /* + * Make sure dead mark is seen for dead queues + */ + smp_wmb(); + rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd)); + + /* + * No write-side locking as no task is using @ioc (they're exited + * or bfqd is being deallocated. + */ + rcu_read_lock(); + if (rcu_dereference(ioc->ioc_data) == cic) { + rcu_read_unlock(); + spin_lock(&ioc->lock); + rcu_assign_pointer(ioc->ioc_data, NULL); + spin_unlock(&ioc->lock); + } else + rcu_read_unlock(); + + if (cic->cfqq[BLK_RW_ASYNC] != NULL) { + bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; + } + + if (cic->cfqq[BLK_RW_SYNC] != NULL) { + bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; + } +} + +/** + * bfq_exit_single_io_context - deassociate @cic from @ioc (unlocked version). + * @ioc: the io_context @cic belongs to. + * @cic: the cic being exited. + * + * Take the queue lock and call __bfq_exit_single_io_context() to do the + * rest of the work. We take care of possible races with bfq_exit_queue() + * using bfq_get_bfqd_locked() (and abusing a little bit the RCU mechanism). + */ +static void bfq_exit_single_io_context(struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct bfq_data *bfqd; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (bfqd != NULL) { + __bfq_exit_single_io_context(bfqd, cic); + bfq_put_bfqd_unlock(bfqd, &flags); + } +} + +/** + * bfq_exit_io_context - deassociate @ioc from all cics it owns. + * @ioc: the @ioc being exited. + * + * No more processes are using @ioc we need to clean up and put the + * internal structures we have that belongs to that process. Loop + * through all its cics, locking their queues and exiting them. + */ +static void bfq_exit_io_context(struct io_context *ioc) +{ + call_for_each_cic(ioc, bfq_exit_single_io_context); +} + +static struct cfq_io_context *bfq_alloc_io_context(struct bfq_data *bfqd, + gfp_t gfp_mask) +{ + struct cfq_io_context *cic; + + cic = kmem_cache_alloc_node(bfq_ioc_pool, gfp_mask | __GFP_ZERO, + bfqd->queue->node); + if (cic != NULL) { + cic->last_end_request = jiffies; + INIT_LIST_HEAD(&cic->queue_list); + INIT_HLIST_NODE(&cic->cic_list); + cic->dtor = bfq_free_io_context; + cic->exit = bfq_exit_io_context; + elv_ioc_count_inc(bfq_ioc_count); + } + + return cic; +} + +/** + * bfq_drop_dead_cic - free an exited cic. + * @bfqd: bfq data for the device in use. + * @ioc: io_context owning @cic. + * @cic: the @cic to free. + * + * We drop cfq io contexts lazily, so we may find a dead one. + */ +static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + unsigned long flags; + + WARN_ON(!list_empty(&cic->queue_list)); + BUG_ON(cic->key != bfqd_dead_key(bfqd)); + + spin_lock_irqsave(&ioc->lock, flags); + + BUG_ON(ioc->ioc_data == cic); + + /* + * With shared I/O contexts two lookups may race and drop the + * same cic more than one time: RCU guarantees that the storage + * will not be freed too early, here we make sure that we do + * not try to remove the cic from the hashing structures multiple + * times. + */ + if (!hlist_unhashed(&cic->cic_list)) { + radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index); + hlist_del_init_rcu(&cic->cic_list); + bfq_cic_free(cic); + } + + spin_unlock_irqrestore(&ioc->lock, flags); +} + +/** + * bfq_cic_lookup - search into @ioc a cic associated to @bfqd. + * @bfqd: the lookup key. + * @ioc: the io_context of the process doing I/O. + * + * If @ioc already has a cic associated to @bfqd return it, return %NULL + * otherwise. + */ +static struct cfq_io_context *bfq_cic_lookup(struct bfq_data *bfqd, + struct io_context *ioc) +{ + struct cfq_io_context *cic; + unsigned long flags; + void *k; + + if (unlikely(ioc == NULL)) + return NULL; + + rcu_read_lock(); + + /* We maintain a last-hit cache, to avoid browsing over the tree. */ + cic = rcu_dereference(ioc->ioc_data); + if (cic != NULL) { + k = rcu_dereference(cic->key); + if (k == bfqd) + goto out; + } + + do { + cic = radix_tree_lookup(&ioc->bfq_radix_root, + bfqd->cic_index); + if (cic == NULL) + goto out; + + k = rcu_dereference(cic->key); + if (unlikely(k != bfqd)) { + rcu_read_unlock(); + bfq_drop_dead_cic(bfqd, ioc, cic); + rcu_read_lock(); + continue; + } + + spin_lock_irqsave(&ioc->lock, flags); + rcu_assign_pointer(ioc->ioc_data, cic); + spin_unlock_irqrestore(&ioc->lock, flags); + break; + } while (1); + +out: + rcu_read_unlock(); + + return cic; +} + +/** + * bfq_cic_link - add @cic to @ioc. + * @bfqd: bfq_data @cic refers to. + * @ioc: io_context @cic belongs to. + * @cic: the cic to link. + * @gfp_mask: the mask to use for radix tree preallocations. + * + * Add @cic to @ioc, using @bfqd as the search key. This enables us to + * lookup the process specific cfq io context when entered from the block + * layer. Also adds @cic to a per-bfqd list, used when this queue is + * removed. + */ +static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc, + struct cfq_io_context *cic, gfp_t gfp_mask) +{ + unsigned long flags; + int ret; + + ret = radix_tree_preload(gfp_mask); + if (ret == 0) { + cic->ioc = ioc; + + /* No write-side locking, cic is not published yet. */ + rcu_assign_pointer(cic->key, bfqd); + + spin_lock_irqsave(&ioc->lock, flags); + ret = radix_tree_insert(&ioc->bfq_radix_root, + bfqd->cic_index, cic); + if (ret == 0) + hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + radix_tree_preload_end(); + + if (ret == 0) { + spin_lock_irqsave(bfqd->queue->queue_lock, flags); + list_add(&cic->queue_list, &bfqd->cic_list); + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); + } + } + + if (ret != 0) + printk(KERN_ERR "bfq: cic link failed!\n"); + + return ret; +} + +/** + * bfq_ioc_set_ioprio - signal a priority change to the cics belonging to @ioc. + * @ioc: the io_context changing its priority. + */ +static inline void bfq_ioc_set_ioprio(struct io_context *ioc) +{ + call_for_each_cic(ioc, bfq_changed_ioprio); +} + +/** + * bfq_get_io_context - return the @cic associated to @bfqd in @ioc. + * @bfqd: the search key. + * @gfp_mask: the mask to use for cic allocation. + * + * Setup general io context and cfq io context. There can be several cfq + * io contexts per general io context, if this process is doing io to more + * than one device managed by cfq. + */ +static struct cfq_io_context *bfq_get_io_context(struct bfq_data *bfqd, + gfp_t gfp_mask) +{ + struct io_context *ioc = NULL; + struct cfq_io_context *cic; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + ioc = get_io_context(gfp_mask, bfqd->queue->node); + if (ioc == NULL) + return NULL; + + /* Lookup for an existing cic. */ + cic = bfq_cic_lookup(bfqd, ioc); + if (cic != NULL) + goto out; + + /* Alloc one if needed. */ + cic = bfq_alloc_io_context(bfqd, gfp_mask); + if (cic == NULL) + goto err; + + /* Link it into the ioc's radix tree and cic list. */ + if (bfq_cic_link(bfqd, ioc, cic, gfp_mask) != 0) + goto err_free; + +out: + /* + * test_and_clear_bit() implies a memory barrier, paired with + * the wmb() in fs/ioprio.c, so the value seen for ioprio is the + * new one. + */ + if (unlikely(test_and_clear_bit(IOC_BFQ_IOPRIO_CHANGED, + ioc->ioprio_changed))) + bfq_ioc_set_ioprio(ioc); + + return cic; +err_free: + bfq_cic_free(cic); +err: + put_io_context(ioc); + return NULL; +} diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c new file mode 100644 index 0000000000000..bc805ff326312 --- /dev/null +++ b/block/bfq-iosched.c @@ -0,0 +1,2974 @@ +/* + * BFQ, or Budget Fair Queueing, disk scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. + * + * BFQ is a proportional share disk scheduling algorithm based on the + * slice-by-slice service scheme of CFQ. But BFQ assigns budgets, + * measured in number of sectors, to tasks instead of time slices. + * The disk is not granted to the active task for a given time slice, + * but until it has exahusted its assigned budget. This change from + * the time to the service domain allows BFQ to distribute the disk + * bandwidth among tasks as desired, without any distortion due to + * ZBR, workload fluctuations or other factors. BFQ uses an ad hoc + * internal scheduler, called B-WF2Q+, to schedule tasks according to + * their budgets. Thanks to this accurate scheduler, BFQ can afford + * to assign high budgets to disk-bound non-seeky tasks (to boost the + * throughput), and yet guarantee low latencies to interactive and + * soft real-time applications. + * + * BFQ has been introduced in [1], where the interested reader can + * find an accurate description of the algorithm, the bandwidth + * distribution and latency guarantees it provides, plus formal proofs + * of all the properties. With respect to the algorithm presented in + * the paper, this implementation adds several little heuristics, and + * a hierarchical extension, based on H-WF2Q+. + * + * B-WF2Q+ is based on WF2Q+, that is described in [2], together with + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) + * complexity derives from the one introduced with EEVDF in [3]. + * + * [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling + * with Deterministic Guarantees on Bandwidth Distribution,'', + * IEEE Transactions on Computer, May 2010. + * + * http://algo.ing.unimo.it/people/paolo/disk_sched/bfq-techreport.pdf + * + * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing + * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, + * Oct 1997. + * + * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz + * + * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline + * First: A Flexible and Accurate Mechanism for Proportional Share + * Resource Allocation,'' technical report. + * + * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "bfq.h" + +/* Max number of dispatches in one round of service. */ +static const int bfq_quantum = 4; + +/* Expiration time of sync (0) and async (1) requests, in jiffies. */ +static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; + +/* Maximum backwards seek, in KiB. */ +static const int bfq_back_max = 16 * 1024; + +/* Penalty of a backwards seek, in number of sectors. */ +static const int bfq_back_penalty = 2; + +/* Idling period duration, in jiffies. */ +static int bfq_slice_idle = HZ / 125; + +/* Default maximum budget values, in sectors and number of requests. */ +static const int bfq_default_max_budget = 16 * 1024; +static const int bfq_max_budget_async_rq = 4; + +/* + * Async to sync throughput distribution is controlled as follows: + * when an async request is served, the entity is charged the number + * of sectors of the request, multipled by the factor below + */ +static const int bfq_async_charge_factor = 10; + +/* Default timeout values, in jiffies, approximating CFQ defaults. */ +static const int bfq_timeout_sync = HZ / 8; +static int bfq_timeout_async = HZ / 25; + +struct kmem_cache *bfq_pool; +struct kmem_cache *bfq_ioc_pool; + +static DEFINE_PER_CPU(unsigned long, bfq_ioc_count); +static struct completion *bfq_ioc_gone; +static DEFINE_SPINLOCK(bfq_ioc_gone_lock); + +static DEFINE_SPINLOCK(cic_index_lock); +static DEFINE_IDA(cic_index_ida); + +/* Below this threshold (in ms), we consider thinktime immediate. */ +#define BFQ_MIN_TT 2 + +/* hw_tag detection: parallel requests threshold and min samples needed. */ +#define BFQ_HW_QUEUE_THRESHOLD 4 +#define BFQ_HW_QUEUE_SAMPLES 32 + +#define BFQQ_SEEK_THR (sector_t)(8 * 1024) +#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR) + +/* Min samples used for peak rate estimation (for autotuning). */ +#define BFQ_PEAK_RATE_SAMPLES 32 + +/* Shift used for peak rate fixed precision calculations. */ +#define BFQ_RATE_SHIFT 16 + +#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ + { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) + +#define RQ_CIC(rq) \ + ((struct cfq_io_context *) (rq)->elevator_private) +#define RQ_BFQQ(rq) ((rq)->elevator_private2) + +#include "bfq-ioc.c" +#include "bfq-sched.c" +#include "bfq-cgroup.c" + +#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\ + IOPRIO_CLASS_IDLE) +#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\ + IOPRIO_CLASS_RT) + +#define bfq_sample_valid(samples) ((samples) > 80) + +/* + * We regard a request as SYNC, if either it's a read or has the SYNC bit + * set (in which case it could also be a direct WRITE). + */ +static inline int bfq_bio_sync(struct bio *bio) +{ + if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC)) + return 1; + + return 0; +} + +/* + * Scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing. + */ +static inline void bfq_schedule_dispatch(struct bfq_data *bfqd) +{ + if (bfqd->queued != 0) { + bfq_log(bfqd, "schedule dispatch"); + kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work); + } +} + +static inline int bfq_queue_empty(struct request_queue *q) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + return bfqd->queued == 0; +} + +/* + * Lifted from AS - choose which of rq1 and rq2 that is best served now. + * We choose the request that is closesr to the head right now. Distance + * behind the head is penalized and only allowed to a certain extent. + */ +static struct request *bfq_choose_req(struct bfq_data *bfqd, + struct request *rq1, + struct request *rq2, + sector_t last) +{ + sector_t s1, s2, d1 = 0, d2 = 0; + unsigned long back_max; +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; + + if (rq_is_sync(rq1) && !rq_is_sync(rq2)) + return rq1; + else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) + return rq2; + if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) + return rq1; + else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) + return rq2; + + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); + + /* + * By definition, 1KiB is 2 sectors. + */ + back_max = bfqd->bfq_back_max * 2; + + /* + * Strict one way elevator _except_ in the case where we allow + * short backward seeks which are biased as twice the cost of a + * similar forward seek. + */ + if (s1 >= last) + d1 = s1 - last; + else if (s1 + back_max >= last) + d1 = (last - s1) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ1_WRAP; + + if (s2 >= last) + d2 = s2 - last; + else if (s2 + back_max >= last) + d2 = (last - s2) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ2_WRAP; + + /* Found required data */ + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ + if (d1 < d2) + return rq1; + else if (d2 < d1) + return rq2; + else { + if (s1 >= s2) + return rq1; + else + return rq2; + } + + case BFQ_RQ2_WRAP: + return rq1; + case BFQ_RQ1_WRAP: + return rq2; + case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) + return rq1; + else + return rq2; + } +} + +static struct bfq_queue * +bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, + sector_t sector, struct rb_node **ret_parent, + struct rb_node ***rb_link) +{ + struct rb_node **p, *parent; + struct bfq_queue *bfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + bfqq = rb_entry(parent, struct bfq_queue, pos_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > blk_rq_pos(bfqq->next_rq)) + n = &(*p)->rb_right; + else if (sector < blk_rq_pos(bfqq->next_rq)) + n = &(*p)->rb_left; + else + break; + p = n; + bfqq = NULL; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + + bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %lu", sector, + bfqq != NULL ? bfqq->pid : 0); + + return bfqq; +} + +static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct rb_node **p, *parent; + struct bfq_queue *__bfqq; + + if (bfqq->pos_root != NULL) { + rb_erase(&bfqq->pos_node, bfqq->pos_root); + bfqq->pos_root = NULL; + } + + if (bfq_class_idle(bfqq)) + return; + if (!bfqq->next_rq) + return; + + bfqq->pos_root = &bfqd->rq_pos_tree; + __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, + blk_rq_pos(bfqq->next_rq), &parent, &p); + if (__bfqq == NULL) { + rb_link_node(&bfqq->pos_node, parent, p); + rb_insert_color(&bfqq->pos_node, bfqq->pos_root); + } else + bfqq->pos_root = NULL; +} + +static struct request *bfq_find_next_rq(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *last) +{ + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); + struct request *next = NULL, *prev = NULL; + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + + if (rbprev != NULL) + prev = rb_entry_rq(rbprev); + + if (rbnext != NULL) + next = rb_entry_rq(rbnext); + else { + rbnext = rb_first(&bfqq->sort_list); + if (rbnext && rbnext != &last->rb_node) + next = rb_entry_rq(rbnext); + } + + return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); +} + +static void bfq_del_rq_rb(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + const int sync = rq_is_sync(rq); + + BUG_ON(bfqq->queued[sync] == 0); + bfqq->queued[sync]--; + bfqd->queued--; + + elv_rb_del(&bfqq->sort_list, rq); + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) { + if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue) + bfq_del_bfqq_busy(bfqd, bfqq, 1); + /* + * Remove queue from request-position tree as it is empty. + */ + if (bfqq->pos_root != NULL) { + rb_erase(&bfqq->pos_node, bfqq->pos_root); + bfqq->pos_root = NULL; + } + } +} + +/* see the definition of bfq_async_charge_factor for details */ +static inline unsigned long bfq_serv_to_charge(struct request *rq, + struct bfq_queue *bfqq) +{ + return blk_rq_sectors(rq) * + (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->raising_coeff == 1) * + bfq_async_charge_factor)); +} + +/** + * bfq_updated_next_req - update the queue after a new next_rq selection. + * @bfqd: the device data the queue belongs to. + * @bfqq: the queue to update. + * + * If the first request of a queue changes we make sure that the queue + * has enough budget to serve at least its first request (if the + * request has grown). We do this because if the queue has not enough + * budget for its first request, it has to go through two dispatch + * rounds to actually get it dispatched. + */ +static void bfq_updated_next_req(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + struct request *next_rq = bfqq->next_rq; + unsigned long new_budget; + + if (next_rq == NULL) + return; + + if (bfqq == bfqd->active_queue) + /* + * In order not to break guarantees, budgets cannot be + * changed after an entity has been selected. + */ + return; + + BUG_ON(entity->tree != &st->active); + BUG_ON(entity == entity->sched_data->active_entity); + + new_budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + entity->budget = new_budget; + bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget); + bfq_activate_bfqq(bfqd, bfqq); +} + +static void bfq_add_rq_rb(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_entity *entity = &bfqq->entity; + struct bfq_data *bfqd = bfqq->bfqd; + struct request *__alias, *next_rq, *prev; + unsigned long old_raising_coeff = bfqq->raising_coeff; + int idle_for_long_time = bfqq->budget_timeout + + bfqd->bfq_raising_min_idle_time < jiffies; + + bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq)); + bfqq->queued[rq_is_sync(rq)]++; + bfqd->queued++; + + /* + * Looks a little odd, but the first insert might return an alias, + * if that happens, put the alias on the dispatch list. + */ + while ((__alias = elv_rb_add(&bfqq->sort_list, rq)) != NULL) + bfq_dispatch_insert(bfqd->queue, __alias); + + /* + * Check if this request is a better next-serve candidate. + */ + prev = bfqq->next_rq; + next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); + BUG_ON(next_rq == NULL); + bfqq->next_rq = next_rq; + + /* + * Adjust priority tree position, if next_rq changes. + */ + if (prev != bfqq->next_rq) + bfq_rq_pos_tree_add(bfqd, bfqq); + + if (!bfq_bfqq_busy(bfqq)) { + int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 && + bfqq->soft_rt_next_start < jiffies; + entity->budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + + if (! bfqd->low_latency) + goto add_bfqq_busy; + + /* + * If the queue is not being boosted and has been idle + * for enough time, start a weight-raising period + */ + if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) { + bfqq->raising_coeff = bfqd->bfq_raising_coeff; + bfqq->raising_cur_max_time = idle_for_long_time ? + bfqd->bfq_raising_max_time : + bfqd->bfq_raising_rt_max_time; + bfq_log_bfqq(bfqd, bfqq, + "wrais starting at %llu msec," + "rais_max_time %u", + bfqq->last_rais_start_finish, + jiffies_to_msecs(bfqq-> + raising_cur_max_time)); + } else if (old_raising_coeff > 1) { + if (idle_for_long_time) + bfqq->raising_cur_max_time = + bfqd->bfq_raising_max_time; + else if (bfqq->raising_cur_max_time == + bfqd->bfq_raising_rt_max_time && + !soft_rt) { + bfqq->raising_coeff = 1; + bfq_log_bfqq(bfqd, bfqq, + "wrais ending at %llu msec," + "rais_max_time %u", + bfqq->last_rais_start_finish, + jiffies_to_msecs(bfqq-> + raising_cur_max_time)); + } + } + if (old_raising_coeff != bfqq->raising_coeff) + entity->ioprio_changed = 1; +add_bfqq_busy: + bfq_add_bfqq_busy(bfqd, bfqq); + } else { + if(bfqd->low_latency && old_raising_coeff == 1 && + !rq_is_sync(rq) && + bfqq->last_rais_start_finish + + bfqd->bfq_raising_min_idle_time < jiffies) { + bfqq->raising_coeff = bfqd->bfq_raising_coeff; + + entity->ioprio_changed = 1; + bfq_log_bfqq(bfqd, bfqq, + "non-idle wrais starting at %llu msec," + "rais_max_time %u", + bfqq->last_rais_start_finish, + jiffies_to_msecs(bfqq-> + raising_cur_max_time)); + } + bfq_updated_next_req(bfqd, bfqq); + } + + if(bfqd->low_latency && + (old_raising_coeff == 1 || bfqq->raising_coeff == 1 || + idle_for_long_time)) + bfqq->last_rais_start_finish = jiffies; +} + +static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq) +{ + elv_rb_del(&bfqq->sort_list, rq); + bfqq->queued[rq_is_sync(rq)]--; + bfqq->bfqd->queued--; + bfq_add_rq_rb(rq); +} + +static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, + struct bio *bio) +{ + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + cic = bfq_cic_lookup(bfqd, tsk->io_context); + if (cic == NULL) + return NULL; + + bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio)); + if (bfqq != NULL) { + sector_t sector = bio->bi_sector + bio_sectors(bio); + + return elv_rb_find(&bfqq->sort_list, sector); + } + + return NULL; +} + +static void bfq_activate_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + bfqd->rq_in_driver++; + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", + bfqd->last_position); +} + +static void bfq_deactivate_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + WARN_ON(bfqd->rq_in_driver == 0); + bfqd->rq_in_driver--; +} + +static void bfq_remove_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + + if (bfqq->next_rq == rq) { + bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); + bfq_updated_next_req(bfqd, bfqq); + } + + list_del_init(&rq->queuelist); + bfq_del_rq_rb(rq); + + if (rq->cmd_flags & REQ_META) { + WARN_ON(bfqq->meta_pending == 0); + bfqq->meta_pending--; + } +} + +static int bfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct request *__rq; + + __rq = bfq_find_rq_fmerge(bfqd, bio); + if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void bfq_merged_request(struct request_queue *q, struct request *req, + int type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct bfq_queue *bfqq = RQ_BFQQ(req); + + bfq_reposition_rq_rb(bfqq, req); + } +} + +static void bfq_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + /* + * Reposition in fifo if next is older than rq. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { + list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } + + if (bfqq->next_rq == next) + bfqq->next_rq = rq; + + bfq_remove_request(next); +} + +static int bfq_allow_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + /* Disallow merge of a sync bio into an async request. */ + if (bfq_bio_sync(bio) && !rq_is_sync(rq)) + return 0; + + /* + * Lookup the bfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + cic = bfq_cic_lookup(bfqd, current->io_context); + if (cic == NULL) + return 0; + + bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio)); + return bfqq == RQ_BFQQ(rq); +} + +static void __bfq_set_active_queue(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + if (bfqq != NULL) { + bfq_mark_bfqq_must_alloc(bfqq); + bfq_mark_bfqq_budget_new(bfqq); + bfq_clear_bfqq_fifo_expire(bfqq); + + bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; + + bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu", + bfqq->entity.budget); + } + + bfqd->active_queue = bfqq; +} + +/* + * Get and set a new active queue for service. + */ +static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + if (!bfqq) + bfqq = bfq_get_next_queue(bfqd); + else + bfq_get_next_queue_forced(bfqd, bfqq); + + __bfq_set_active_queue(bfqd, bfqq); + return bfqq; +} + +static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd, + struct request *rq) +{ + if (blk_rq_pos(rq) >= bfqd->last_position) + return blk_rq_pos(rq) - bfqd->last_position; + else + return bfqd->last_position - blk_rq_pos(rq); +} + +/* + * Return true if bfqq has no request pending and rq is close enough to + * bfqd->last_position, or if rq is closer to bfqd->last_position than + * bfqq->next_rq + */ +static inline int bfq_rq_close(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct request *rq) +{ + sector_t sdist = bfqq->seek_mean; + + if (!bfq_sample_valid(bfqq->seek_samples)) + sdist = BFQQ_SEEK_THR; + + /* If seek_mean is large, using it as close criteria is meaningless */ + if (sdist > BFQQ_SEEK_THR) + sdist = BFQQ_SEEK_THR; + + return bfq_dist_from_last(bfqd, rq) <= sdist; +} + +static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, + struct bfq_queue *cur_bfqq) +{ + struct rb_root *root = &bfqd->rq_pos_tree; + struct rb_node *parent, *node; + struct bfq_queue *__bfqq; + sector_t sector = bfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); + if (__bfqq != NULL) + return __bfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector (rq_pos_tree sorted by next_request + * position). + */ + __bfqq = rb_entry(parent, struct bfq_queue, pos_node); + if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq)) + return __bfqq; + + if (blk_rq_pos(__bfqq->next_rq) < sector) + node = rb_next(&__bfqq->pos_node); + else + node = rb_prev(&__bfqq->pos_node); + if (node == NULL) + return NULL; + + __bfqq = rb_entry(node, struct bfq_queue, pos_node); + if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq)) + return __bfqq; + + return NULL; +} + +/* + * bfqd - obvious + * cur_bfqq - passed in so that we don't decide that the current queue + * is closely cooperating with itself. + * + * We are assuming that cur_bfqq has dispatched at least one request, + * and that bfqd->last_position reflects a position on the disk associated + * with the I/O issued by cur_bfqq. + */ +static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, + struct bfq_queue *cur_bfqq) +{ + struct bfq_queue *bfqq; + + if (bfq_class_idle(cur_bfqq)) + return NULL; + if (!bfq_bfqq_sync(cur_bfqq)) + return NULL; + if (BFQQ_SEEKY(cur_bfqq)) + return NULL; + + /* If device has only one backlogged bfq_queue, don't search. */ + if (bfqd->busy_queues == 1) + return NULL; + + /* + * We should notice if some of the queues are cooperating, e.g. + * working closely on the same area of the disk. In that case, + * we can group them together and don't waste time idling. + */ + bfqq = bfqq_close(bfqd, cur_bfqq); + if (bfqq == NULL || bfqq == cur_bfqq) + return NULL; + + /* + * Do not merge queues from different bfq_groups. + */ + if (bfqq->entity.parent != cur_bfqq->entity.parent) + return NULL; + + /* + * It only makes sense to merge sync queues. + */ + if (!bfq_bfqq_sync(bfqq)) + return NULL; + if (BFQQ_SEEKY(bfqq)) + return NULL; + + /* + * Do not merge queues of different priority classes. + */ + if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq)) + return NULL; + + return bfqq; +} + +/* + * If enough samples have been computed, return the current max budget + * stored in bfqd, which is dynamically updated according to the + * estimated disk peak rate; otherwise return the default max budget + */ +static inline unsigned long bfq_max_budget(struct bfq_data *bfqd) +{ + return bfqd->budgets_assigned < 194 ? bfq_default_max_budget : + bfqd->bfq_max_budget; +} + +/* + * Return min budget, which is a fraction of the current or default + * max budget (trying with 1/32) + */ +static inline unsigned long bfq_min_budget(struct bfq_data *bfqd) +{ + return bfqd->budgets_assigned < 194 ? bfq_default_max_budget / 32 : + bfqd->bfq_max_budget / 32; +} + +static void bfq_arm_slice_timer(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->active_queue; + struct cfq_io_context *cic; + unsigned long sl; + + WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + /* Idling is disabled, either manually or by past process history. */ + if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_idle_window(bfqq)) + return; + + /* Tasks have exited, don't wait. */ + cic = bfqd->active_cic; + if (cic == NULL || atomic_read(&cic->ioc->nr_tasks) == 0) + return; + + bfq_mark_bfqq_wait_request(bfqq); + + /* + * We don't want to idle for seeks, but we do want to allow + * fair distribution of slice time for a process doing back-to-back + * seeks. So allow a little bit of time for him to submit a new rq. + * + * To prevent processes with (partly) seeky workloads from + * being too ill-treated, grant them a small fraction of the + * assigned budget before reducing the waiting time to + * BFQ_MIN_TT. This happened to help reduce latency. + */ + sl = bfqd->bfq_slice_idle; + if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) && + bfqq->entity.service > bfq_max_budget(bfqd) / 8 && + bfqq->raising_coeff == 1) + sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); + else if (bfqq->raising_coeff > 1) + sl = sl * 3; + bfqd->last_idling_start = ktime_get(); + mod_timer(&bfqd->idle_slice_timer, jiffies + sl); + bfq_log(bfqd, "arm idle: %lu/%lu ms", + jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); +} + +/* + * Set the maximum time for the active queue to consume its + * budget. This prevents seeky processes from lowering the disk + * throughput (always guaranteed with a time slice scheme as in CFQ). + */ +static void bfq_set_budget_timeout(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->active_queue; + unsigned int timeout_coeff = + bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time ? + 1 : (bfqq->entity.weight / bfqq->entity.orig_weight); + + bfqd->last_budget_start = ktime_get(); + + bfq_clear_bfqq_budget_new(bfqq); + bfqq->budget_timeout = jiffies + + bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; + + bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", + jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * + timeout_coeff)); +} + +/* + * Move request from internal lists to the request queue dispatch list. + */ +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + bfq_remove_request(rq); + bfqq->dispatched++; + elv_dispatch_sort(q, rq); + + if (bfq_bfqq_sync(bfqq)) + bfqd->sync_flight++; +} + +/* + * Return expired entry, or NULL to just start from scratch in rbtree. + */ +static struct request *bfq_check_fifo(struct bfq_queue *bfqq) +{ + struct request *rq = NULL; + + if (bfq_bfqq_fifo_expire(bfqq)) + return NULL; + + bfq_mark_bfqq_fifo_expire(bfqq); + + if (list_empty(&bfqq->fifo)) + return NULL; + + rq = rq_entry_fifo(bfqq->fifo.next); + + if (time_before(jiffies, rq_fifo_time(rq))) + return NULL; + + return rq; +} + +/* + * Must be called with the queue_lock held. + */ +static int bfqq_process_refs(struct bfq_queue *bfqq) +{ + int process_refs, io_refs; + + io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; + process_refs = atomic_read(&bfqq->ref) - io_refs; + BUG_ON(process_refs < 0); + return process_refs; +} + +static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +{ + int process_refs, new_process_refs; + struct bfq_queue *__bfqq; + + /* + * If there are no process references on the new_bfqq, then it is + * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain + * may have dropped their last reference (not just their last process + * reference). + */ + if (!bfqq_process_refs(new_bfqq)) + return; + + /* Avoid a circular list and skip interim queue merges. */ + while ((__bfqq = new_bfqq->new_bfqq)) { + if (__bfqq == bfqq) + return; + new_bfqq = __bfqq; + } + + process_refs = bfqq_process_refs(bfqq); + new_process_refs = bfqq_process_refs(new_bfqq); + /* + * If the process for the bfqq has gone away, there is no + * sense in merging the queues. + */ + if (process_refs == 0 || new_process_refs == 0) + return; + + /* + * Merge in the direction of the lesser amount of work. + */ + if (new_process_refs >= process_refs) { + bfqq->new_bfqq = new_bfqq; + atomic_add(process_refs, &new_bfqq->ref); + } else { + new_bfqq->new_bfqq = bfqq; + atomic_add(new_process_refs, &bfqq->ref); + } + bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", + new_bfqq->pid); +} + +static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + return entity->budget - entity->service; +} + +static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfqq != bfqd->active_queue); + + __bfq_bfqd_reset_active(bfqd); + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) { + bfq_del_bfqq_busy(bfqd, bfqq, 1); + /* + * overloading budget_timeout field to store when + * the queue remains with no backlog, used by + * the weight-raising mechanism + */ + bfqq->budget_timeout = jiffies ; + } + else { + bfq_activate_bfqq(bfqd, bfqq); + /* + * Resort priority tree of potential close cooperators. + */ + bfq_rq_pos_tree_add(bfqd, bfqq); + } + + /* + * If this bfqq is shared between multiple processes, check + * to make sure that those processes are still issuing I/Os + * within the mean seek distance. If not, it may be time to + * break the queues apart again. + */ + if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) + bfq_mark_bfqq_split_coop(bfqq); +} + +/** + * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. + * @bfqd: device data. + * @bfqq: queue to update. + * @reason: reason for expiration. + * + * Handle the feedback on @bfqq budget. See the body for detailed + * comments. + */ +static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + enum bfqq_expiration reason) +{ + struct request *next_rq; + unsigned long budget, min_budget; + + budget = bfqq->max_budget; + min_budget = bfq_min_budget(bfqd); + + BUG_ON(bfqq != bfqd->active_queue); + + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu", + bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu", + budget, bfq_min_budget(bfqd)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", + bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->active_queue)); + + if (bfq_bfqq_sync(bfqq)) { + switch (reason) { + /* + * Caveat: in all the following cases we trade latency + * for throughput. + */ + case BFQ_BFQQ_TOO_IDLE: + /* + * This is the only case where we may reduce + * the budget: if there is no requets of the + * process still waiting for completion, then + * we assume (tentatively) that the timer has + * expired because the batch of requests of + * the process could have been served with a + * smaller budget. Hence, betting that + * process will behave in the same way when it + * becomes backlogged again, we reduce its + * next budget. As long as we guess right, + * this budget cut reduces the latency + * experienced by the process. + * + * However, if there are still outstanding + * requests, then the process may have not yet + * issued its next request just because it is + * still waiting for the completion of some of + * the still oustanding ones. So in this + * subcase we do not reduce its budget, on the + * contrary we increase it to possibly boost + * the throughput, as discussed in the + * comments to the BUDGET_TIMEOUT case. + */ + if (bfqq->dispatched > 0) /* still oustanding reqs */ + budget = min(budget * 2, bfqd->bfq_max_budget); + else { + if (budget > 5 * min_budget) + budget -= 4 * min_budget; + else + budget = min_budget; + } + break; + case BFQ_BFQQ_BUDGET_TIMEOUT: + /* + * We double the budget here because: 1) it + * gives the chance to boost the throughput if + * this is not a seeky process (which may have + * bumped into this timeout because of, e.g., + * ZBR), 2) together with charge_full_budget + * it helps give seeky processes higher + * timestamps, and hence be served less + * frequently. + */ + budget = min(budget * 2, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_BUDGET_EXHAUSTED: + /* + * The process still has backlog, and did not + * let either the budget timeout or the disk + * idling timeout expire. Hence it is not + * seeky, has a short thinktime and may be + * happy with a higher budget too. So + * definitely increase the budget of this good + * candidate to boost the disk throughput. + */ + budget = min(budget * 4, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_NO_MORE_REQUESTS: + /* + * Leave the budget unchanged. + */ + default: + return; + } + } else /* async queue */ + /* async queues get always the maximum possible budget + * (their ability to dispatch is limited by + * @bfqd->bfq_max_budget_async_rq). + */ + budget = bfqd->bfq_max_budget; + + bfqq->max_budget = budget; + + if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 && + bfqq->max_budget > bfqd->bfq_max_budget) + bfqq->max_budget = bfqd->bfq_max_budget; + + /* + * Make sure that we have enough budget for the next request. + * Since the finish time of the bfqq must be kept in sync with + * the budget, be sure to call __bfq_bfqq_expire() after the + * update. + */ + next_rq = bfqq->next_rq; + if (next_rq != NULL) + bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + else + bfqq->entity.budget = bfqq->max_budget; + + bfq_log_bfqq(bfqd, bfqq, "head sect: %lu, new budget %lu", + next_rq != NULL ? blk_rq_sectors(next_rq) : 0, + bfqq->entity.budget); +} + +static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout) +{ + unsigned long max_budget; + + /* + * The max_budget calculated when autotuning is equal to the + * amount of sectors transfered in timeout_sync at the + * estimated peak rate. + */ + max_budget = (unsigned long)(peak_rate * 1000 * + timeout >> BFQ_RATE_SHIFT); + + return max_budget; +} + +/* + * In addition to updating the peak rate, checks whether the process + * is "slow", and returns 1 if so. This slow flag is used, in addition + * to the budget timeout, to reduce the amount of service provided to + * seeky processes, and hence reduce their chances to lower the + * throughput. See the code for more details. + */ +static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int compensate, enum bfqq_expiration reason) +{ + u64 bw, usecs, expected, timeout; + ktime_t delta; + int update = 0; + + if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq)) + return 0; + + delta = compensate ? bfqd->last_idling_start : ktime_get(); + delta = ktime_sub(delta, bfqd->last_budget_start); + usecs = ktime_to_us(delta); + + /* Don't trust short/unrealistic values. */ + if (usecs < 100 || usecs >= LONG_MAX) + return 0; + + /* + * Calculate the bandwidth for the last slice. We use a 64 bit + * value to store the peak rate, in sectors per usec in fixed + * point math. We do so to have enough precision in the estimate + * and to avoid overflows. + */ + bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; + do_div(bw, (unsigned long)usecs); + + timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + /* + * Use only long (> 20ms) intervals to filter out spikes for + * the peak rate estimation. + */ + if (usecs > 20000) { + if (bw > bfqd->peak_rate || + (!BFQQ_SEEKY(bfqq) && + reason == BFQ_BFQQ_BUDGET_TIMEOUT)) { + bfq_log(bfqd, "measured bw =%llu", bw); + /* + * To smooth oscillations use a low-pass filter with + * alpha=7/8, i.e., + * new_rate = (7/8) * old_rate + (1/8) * bw + */ + do_div(bw, 8); + bfqd->peak_rate *= 7; + do_div(bfqd->peak_rate, 8); + bfqd->peak_rate += bw; + update = 1; + bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate); + } + + update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; + + if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES) + bfqd->peak_rate_samples++; + + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + update && bfqd->bfq_user_max_budget == 0) { + bfqd->bfq_max_budget = + bfq_calc_max_budget(bfqd->peak_rate, timeout); + bfq_log(bfqd, "new max_budget=%lu", + bfqd->bfq_max_budget); + } + } + + /* + * If the process has been served for a too short time + * interval to let its possible sequential accesses prevail on + * the initial seek time needed to move the disk head on the + * first sector it requested, then give the process a chance + * and for the moment return false. + */ + if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8) + return 0; + + /* + * A process is considered ``slow'' (i.e., seeky, so that we + * cannot treat it fairly in the service domain, as it would + * slow down too much the other processes) if, when a slice + * ends for whatever reason, it has received service at a + * rate that would not be high enough to complete the budget + * before the budget timeout expiration. + */ + expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; + + /* + * Caveat: processes doing IO in the slower disk zones will + * tend to be slow(er) even if not seeky. And the estimated + * peak rate will actually be an average over the disk + * surface. Hence, to not be too harsh with unlucky processes, + * we keep a budget/3 margin of safety before declaring a + * process slow. + */ + return expected > (4 * bfqq->entity.budget) / 3; +} + +/** + * bfq_bfqq_expire - expire a queue. + * @bfqd: device owning the queue. + * @bfqq: the queue to expire. + * @compensate: if true, compensate for the time spent idling. + * @reason: the reason causing the expiration. + * + * + * If the process associated to the queue is slow (i.e., seeky), or in + * case of budget timeout, or, finally, if it is async, we + * artificially charge it an entire budget (independently of the + * actual service it received). As a consequence, the queue will get + * higher timestamps than the correct ones upon reactivation, and + * hence it will be rescheduled as if it had received more service + * than what it actually received. In the end, this class of processes + * will receive less service in proportion to how slowly they consume + * their budgets (and hence how seriously they tend to lower the + * throughput). + * + * In contrast, when a queue expires because it has been idling for + * too much or because it exhausted its budget, we do not touch the + * amount of service it has received. Hence when the queue will be + * reactivated and its timestamps updated, the latter will be in sync + * with the actual service received by the queue until expiration. + * + * Charging a full budget to the first type of queues and the exact + * service to the others has the effect of using the WF2Q+ policy to + * schedule the former on a timeslice basis, without violating the + * service domain guarantees of the latter. + */ +static void bfq_bfqq_expire(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + int compensate, + enum bfqq_expiration reason) +{ + int slow; + BUG_ON(bfqq != bfqd->active_queue); + + /* Update disk peak rate for autotuning and check whether the + * process is slow (see bfq_update_peak_rate). + */ + slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); + + /* + * As above explained, 'punish' slow (i.e., seeky), timed-out + * and async queues, to favor sequential sync workloads. + * + * Processes doing IO in the slower disk zones will tend to be + * slow(er) even if not seeky. Hence, since the estimated peak + * rate is actually an average over the disk surface, these + * processes may timeout just for bad luck. To avoid punishing + * them we do not charge a full budget to a process that + * succeeded in consuming at least 2/3 of its budget. + */ + if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) + bfq_bfqq_charge_full_budget(bfqq); + + if (bfqd->low_latency && bfqq->raising_coeff == 1) + bfqq->last_rais_start_finish = jiffies; + + if (bfqd->low_latency && bfqd->bfq_raising_max_softrt_rate > 0) { + if(reason != BFQ_BFQQ_BUDGET_TIMEOUT) + bfqq->soft_rt_next_start = + jiffies + + HZ * bfqq->entity.service / + bfqd->bfq_raising_max_softrt_rate; + else + bfqq->soft_rt_next_start = -1; /* infinity */ + } + bfq_log_bfqq(bfqd, bfqq, + "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow, + bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); + + /* Increase, decrease or leave budget unchanged according to reason */ + __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); + __bfq_bfqq_expire(bfqd, bfqq); +} + +/* + * Budget timeout is not implemented through a dedicated timer, but + * just checked on request arrivals and completions, as well as on + * idle timer expirations. + */ +static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_budget_new(bfqq)) + return 0; + + if (time_before(jiffies, bfqq->budget_timeout)) + return 0; + + return 1; +} + +/* + * If we expire a queue that is waiting for the arrival of a new + * request, we may prevent the fictitious timestamp backshifting that + * allows the guarantees of the queue to be preserved (see [1] for + * this tricky aspect). Hence we return true only if this condition + * does not hold, or if the queue is slow enough to deserve only to be + * kicked off for preserving a high throughput. +*/ +static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) +{ + bfq_log_bfqq(bfqq->bfqd, bfqq, + "may_budget_timeout: wr %d left %d timeout %d", + bfq_bfqq_wait_request(bfqq), + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, + bfq_bfqq_budget_timeout(bfqq)); + + return (!bfq_bfqq_wait_request(bfqq) || + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) + && + bfq_bfqq_budget_timeout(bfqq); +} + +/* + * Select a queue for service. If we have a current active queue, + * check whether to continue servicing it, or retrieve and set a new one. + */ +static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq, *new_bfqq = NULL; + struct request *next_rq; + enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; + + bfqq = bfqd->active_queue; + if (bfqq == NULL) + goto new_queue; + + bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue"); + + /* + * If another queue has a request waiting within our mean seek + * distance, let it run. The expire code will check for close + * cooperators and put the close queue at the front of the + * service tree. If possible, merge the expiring queue with the + * new bfqq. + */ + new_bfqq = bfq_close_cooperator(bfqd, bfqq); + if (new_bfqq != NULL && bfqq->new_bfqq == NULL) + bfq_setup_merge(bfqq, new_bfqq); + + if (bfq_may_expire_for_budg_timeout(bfqq)) + goto expire; + + next_rq = bfqq->next_rq; + /* + * If bfqq has requests queued and it has enough budget left to + * serve them, keep the queue, otherwise expire it. + */ + if (next_rq != NULL) { + if (bfq_serv_to_charge(next_rq, bfqq) > + bfq_bfqq_budget_left(bfqq)) { + reason = BFQ_BFQQ_BUDGET_EXHAUSTED; + goto expire; + } else { + /* + * The idle timer may be pending because we may not + * disable disk idling even when a new request arrives + */ + if (timer_pending(&bfqd->idle_slice_timer)) { + /* + * If we get here: 1) at least a new request + * has arrived but we have not disabled the + * timer because the request was too small, + * 2) then the block layer has unplugged the + * device, causing the dispatch to be invoked. + * + * Since the device is unplugged, now the + * requests are probably large enough to + * provide a reasonable throughput. + * So we disable idling. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + } + if (new_bfqq == NULL) + goto keep_queue; + else + goto expire; + } + } + + /* + * No requests pending. If there is no cooperator, and the active + * queue still has requests in flight or is idling for a new request, + * then keep it. + */ + if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) || + (bfqq->dispatched != 0 && bfq_bfqq_idle_window(bfqq)))) { + bfqq = NULL; + goto keep_queue; + } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) { + /* + * Expiring the queue because there is a close cooperator, + * cancel timer. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + } + + reason = BFQ_BFQQ_NO_MORE_REQUESTS; +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, reason); +new_queue: + bfqq = bfq_set_active_queue(bfqd, new_bfqq); + bfq_log(bfqd, "select_queue: new queue %lu returned", + bfqq != NULL ? bfqq->pid : 0); +keep_queue: + return bfqq; +} + +static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + if (bfqq->raising_coeff > 1) { /* queue is being boosted */ + struct bfq_entity *entity = &bfqq->entity; + + bfq_log_bfqq(bfqd, bfqq, + "raising period dur %u/%u msec, " + "old raising coeff %u, w %d(%d)", + jiffies_to_msecs(jiffies - + bfqq->last_rais_start_finish), + jiffies_to_msecs(bfqq->raising_cur_max_time), + bfqq->raising_coeff, + bfqq->entity.weight, bfqq->entity.orig_weight); + + BUG_ON(bfqq != bfqd->active_queue && entity->weight != + entity->orig_weight * bfqq->raising_coeff); + if(entity->ioprio_changed) + bfq_log_bfqq(bfqd, bfqq, + "WARN: pending prio change"); + /* + * If too much time has elapsed from the beginning + * of this weight-raising period and process is not soft + * real-time, stop it + */ + if (jiffies - bfqq->last_rais_start_finish > + bfqq->raising_cur_max_time) { + int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 && + bfqq->soft_rt_next_start < jiffies; + + bfqq->last_rais_start_finish = jiffies; + if (soft_rt) + bfqq->raising_cur_max_time = + bfqd->bfq_raising_rt_max_time; + else { + bfqq->raising_coeff = 1; + entity->ioprio_changed = 1; + __bfq_entity_update_weight_prio( + bfq_entity_service_tree(entity), + entity); + } + } + } +} + + +/* + * Dispatch one request from bfqq, moving it to the request queue + * dispatch list. + */ +static int bfq_dispatch_request(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + int dispatched = 0; + struct request *rq; + unsigned long service_to_charge; + + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); + + /* Follow expired path, else get first next available. */ + rq = bfq_check_fifo(bfqq); + if (rq == NULL) + rq = bfqq->next_rq; + service_to_charge = bfq_serv_to_charge(rq, bfqq); + + if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { + /* + * This may happen if the next rq is chosen + * in fifo order instead of sector order. + * The budget is properly dimensioned + * to be always sufficient to serve the next request + * only if it is chosen in sector order. The reason is + * that it would be quite inefficient and little useful + * to always make sure that the budget is large enough + * to serve even the possible next rq in fifo order. + * In fact, requests are seldom served in fifo order. + * + * Expire the queue for budget exhaustion, and + * make sure that the next act_budget is enough + * to serve the next request, even if it comes + * from the fifo expired path. + */ + bfqq->next_rq = rq; + /* + * Since this dispatch is failed, make sure that + * a new one will be performed + */ + if (!bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); + goto expire; + } + + /* Finally, insert request into driver dispatch list. */ + bfq_bfqq_served(bfqq, service_to_charge); + bfq_dispatch_insert(bfqd->queue, rq); + + update_raising_data(bfqd, bfqq); + + bfq_log_bfqq(bfqd, bfqq, "dispatched %u sec req (%llu), " + "budg left %lu", + blk_rq_sectors(rq), + (long long unsigned)blk_rq_pos(rq), + bfq_bfqq_budget_left(bfqq)); + + dispatched++; + + if (bfqd->active_cic == NULL) { + atomic_long_inc(&RQ_CIC(rq)->ioc->refcount); + bfqd->active_cic = RQ_CIC(rq); + } + + if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) && + dispatched >= bfqd->bfq_max_budget_async_rq) || + bfq_class_idle(bfqq))) + goto expire; + + return dispatched; + +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED); + return dispatched; +} + +static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) +{ + int dispatched = 0; + + while (bfqq->next_rq != NULL) { + bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&bfqq->fifo)); + return dispatched; +} + +/* + * Drain our current requests. Used for barriers and when switching + * io schedulers on-the-fly. + */ +static int bfq_forced_dispatch(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq, *n; + struct bfq_service_tree *st; + int dispatched = 0; + + bfqq = bfqd->active_queue; + if (bfqq != NULL) + __bfq_bfqq_expire(bfqd, bfqq); + + /* + * Loop through classes, and be careful to leave the scheduler + * in a consistent state, as feedback mechanisms and vtime + * updates cannot be disabled during the process. + */ + list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { + st = bfq_entity_service_tree(&bfqq->entity); + + dispatched += __bfq_forced_dispatch_bfqq(bfqq); + bfqq->max_budget = bfq_max_budget(bfqd); + + bfq_forget_idle(st); + } + + BUG_ON(bfqd->busy_queues != 0); + + return dispatched; +} + +static int bfq_dispatch_requests(struct request_queue *q, int force) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq; + int max_dispatch; + + bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); + if (bfqd->busy_queues == 0) + return 0; + + if (unlikely(force)) + return bfq_forced_dispatch(bfqd); + + if((bfqq = bfq_select_queue(bfqd)) == NULL) + return 0; + + max_dispatch = bfqd->bfq_quantum; + if (bfq_class_idle(bfqq)) + max_dispatch = 1; + + if (!bfq_bfqq_sync(bfqq)) + max_dispatch = bfqd->bfq_max_budget_async_rq; + + if (bfqq->dispatched >= max_dispatch) { + if (bfqd->busy_queues > 1) + return 0; + if (bfqq->dispatched >= 4 * max_dispatch) + return 0; + } + + if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq)) + return 0; + + bfq_clear_bfqq_wait_request(bfqq); + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + if (! bfq_dispatch_request(bfqd, bfqq)) + return 0; + + bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d" + "(max_disp %d)", bfqq->pid, max_dispatch); + + return 1; +} + +/* + * Task holds one reference to the queue, dropped when task exits. Each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * + * Queue lock must be held here. + */ +static void bfq_put_queue(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + + BUG_ON(atomic_read(&bfqq->ref) <= 0); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); + if (!atomic_dec_and_test(&bfqq->ref)) + return; + + BUG_ON(rb_first(&bfqq->sort_list) != NULL); + BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); + BUG_ON(bfqq->entity.tree != NULL); + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqd->active_queue == bfqq); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq); + + kmem_cache_free(bfq_pool, bfqq); +} + +static void bfq_put_cooperator(struct bfq_queue *bfqq) +{ + struct bfq_queue *__bfqq, *next; + + /* + * If this queue was scheduled to merge with another queue, be + * sure to drop the reference taken on that queue (and others in + * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. + */ + __bfqq = bfqq->new_bfqq; + while (__bfqq) { + if (__bfqq == bfqq) { + WARN(1, "bfqq->new_bfqq loop detected.\n"); + break; + } + next = __bfqq->new_bfqq; + bfq_put_queue(__bfqq); + __bfqq = next; + } +} + +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + if (bfqq == bfqd->active_queue) { + __bfq_bfqq_expire(bfqd, bfqq); + bfq_schedule_dispatch(bfqd); + } + + bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); + + bfq_put_cooperator(bfqq); + + bfq_put_queue(bfqq); +} + +/* + * Update the entity prio values; note that the new values will not + * be used until the next (re)activation. + */ +static void bfq_init_prio_data(struct bfq_queue *bfqq, struct io_context *ioc) +{ + struct task_struct *tsk = current; + int ioprio_class; + + if (!bfq_bfqq_prio_changed(bfqq)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "bfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * No prio set, inherit CPU scheduling settings. + */ + bfqq->entity.new_ioprio = task_nice_ioprio(tsk); + bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + bfqq->entity.new_ioprio = task_ioprio(ioc); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + bfqq->entity.new_ioprio = task_ioprio(ioc); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE; + bfqq->entity.new_ioprio = 7; + bfq_clear_bfqq_idle_window(bfqq); + break; + } + + bfqq->entity.ioprio_changed = 1; + + /* + * Keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue. + */ + bfqq->org_ioprio = bfqq->entity.new_ioprio; + bfqq->org_ioprio_class = bfqq->entity.new_ioprio_class; + bfq_clear_bfqq_prio_changed(bfqq); +} + +static void bfq_changed_ioprio(struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct bfq_data *bfqd; + struct bfq_queue *bfqq, *new_bfqq; + struct bfq_group *bfqg; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (unlikely(bfqd == NULL)) + return; + + bfqq = cic->cfqq[BLK_RW_ASYNC]; + if (bfqq != NULL) { + bfqg = container_of(bfqq->entity.sched_data, struct bfq_group, + sched_data); + new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, cic->ioc, + GFP_ATOMIC); + if (new_bfqq != NULL) { + cic->cfqq[BLK_RW_ASYNC] = new_bfqq; + bfq_log_bfqq(bfqd, bfqq, + "changed_ioprio: bfqq %p %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } + } + + bfqq = cic->cfqq[BLK_RW_SYNC]; + if (bfqq != NULL) + bfq_mark_bfqq_prio_changed(bfqq); + + bfq_put_bfqd_unlock(bfqd, &flags); +} + +static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + pid_t pid, int is_sync) +{ + RB_CLEAR_NODE(&bfqq->entity.rb_node); + INIT_LIST_HEAD(&bfqq->fifo); + + atomic_set(&bfqq->ref, 0); + bfqq->bfqd = bfqd; + + bfq_mark_bfqq_prio_changed(bfqq); + + if (is_sync) { + if (!bfq_class_idle(bfqq)) + bfq_mark_bfqq_idle_window(bfqq); + bfq_mark_bfqq_sync(bfqq); + } + + /* Tentative initial value to trade off between thr and lat */ + bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; + bfqq->pid = pid; + + bfqq->raising_coeff = 1; + bfqq->last_rais_start_finish = 0; + bfqq->soft_rt_next_start = -1; +} + +static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int is_sync, + struct io_context *ioc, + gfp_t gfp_mask) +{ + struct bfq_queue *bfqq, *new_bfqq = NULL; + struct cfq_io_context *cic; + +retry: + cic = bfq_cic_lookup(bfqd, ioc); + /* cic always exists here */ + bfqq = cic_to_bfqq(cic, is_sync); + + /* + * Always try a new alloc if we fall back to the OOM bfqq + * originally, since it should just be a temporary situation. + */ + if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { + bfqq = NULL; + if (new_bfqq != NULL) { + bfqq = new_bfqq; + new_bfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(bfqd->queue->queue_lock); + new_bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_ZERO, + bfqd->queue->node); + spin_lock_irq(bfqd->queue->queue_lock); + if (new_bfqq != NULL) + goto retry; + } else { + bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_ZERO, + bfqd->queue->node); + } + + if (bfqq != NULL) { + bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync); + bfq_log_bfqq(bfqd, bfqq, "allocated"); + } else { + bfqq = &bfqd->oom_bfqq; + bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); + } + + bfq_init_prio_data(bfqq, ioc); + bfq_init_entity(&bfqq->entity, bfqg); + } + + if (new_bfqq != NULL) + kmem_cache_free(bfq_pool, new_bfqq); + + return bfqq; +} + +static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int ioprio_class, int ioprio) +{ + switch (ioprio_class) { + case IOPRIO_CLASS_RT: + return &bfqg->async_bfqq[0][ioprio]; + case IOPRIO_CLASS_BE: + return &bfqg->async_bfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &bfqg->async_idle_bfqq; + default: + BUG(); + } +} + +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct io_context *ioc, gfp_t gfp_mask) +{ + const int ioprio = task_ioprio(ioc); + const int ioprio_class = task_ioprio_class(ioc); + struct bfq_queue **async_bfqq = NULL; + struct bfq_queue *bfqq = NULL; + + if (!is_sync) { + async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, + ioprio); + bfqq = *async_bfqq; + } + + if (bfqq == NULL) + bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, ioc, gfp_mask); + + /* + * Pin the queue now that it's allocated, scheduler exit will prune it. + */ + if (!is_sync && *async_bfqq == NULL) { + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", + bfqq, bfqq->ref); + *async_bfqq = bfqq; + } + + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); + return bfqq; +} + +static void bfq_update_io_thinktime(struct bfq_data *bfqd, + struct cfq_io_context *cic) +{ + unsigned long elapsed = jiffies - cic->last_end_request; + unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); + + cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; + cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; + cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; +} + +static void bfq_update_io_seektime(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *rq) +{ + sector_t sdist; + u64 total; + + if (bfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - bfqq->last_request_pos; + else + sdist = bfqq->last_request_pos - blk_rq_pos(rq); + + /* + * Don't allow the seek distance to get too large from the + * odd fragment, pagein, etc. + */ + if (bfqq->seek_samples == 0) /* first request, not really a seek */ + sdist = 0; + else if (bfqq->seek_samples <= 60) /* second & third seek */ + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024); + else + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64); + + bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8; + bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8; + total = bfqq->seek_total + (bfqq->seek_samples/2); + do_div(total, bfqq->seek_samples); + if (bfq_bfqq_coop(bfqq)) { + /* + * If the mean seektime increases for a (non-seeky) shared + * queue, some cooperator is likely to be idling too much. + * On the contrary, if it decreases, some cooperator has + * probably waked up. + * + */ + if ((sector_t)total < bfqq->seek_mean) + bfq_mark_bfqq_some_coop_idle(bfqq) ; + else if ((sector_t)total > bfqq->seek_mean) + bfq_clear_bfqq_some_coop_idle(bfqq) ; + } + bfqq->seek_mean = (sector_t)total; + + bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist, + (u64)bfqq->seek_mean); +} + +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter. + */ +static void bfq_update_idle_window(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct cfq_io_context *cic) +{ + int enable_idle; + + /* Don't idle for async or idle io prio class. */ + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) + return; + + enable_idle = bfq_bfqq_idle_window(bfqq); + + if (atomic_read(&cic->ioc->nr_tasks) == 0 || + bfqd->bfq_slice_idle == 0 || + (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && + bfqq->raising_coeff == 1)) + enable_idle = 0; + else if (bfq_sample_valid(cic->ttime_samples)) { + if (cic->ttime_mean > bfqd->bfq_slice_idle && + bfqq->raising_coeff == 1) + enable_idle = 0; + else + enable_idle = 1; + } + bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", + enable_idle); + + if (enable_idle) + bfq_mark_bfqq_idle_window(bfqq); + else + bfq_clear_bfqq_idle_window(bfqq); +} + +/* + * Called when a new fs request (rq) is added to bfqq. Check if there's + * something we should do about it. + */ +static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct request *rq) +{ + struct cfq_io_context *cic = RQ_CIC(rq); + + if (rq->cmd_flags & REQ_META) + bfqq->meta_pending++; + + bfq_update_io_thinktime(bfqd, cic); + bfq_update_io_seektime(bfqd, bfqq, rq); + if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || + !BFQQ_SEEKY(bfqq)) + bfq_update_idle_window(bfqd, bfqq, cic); + + bfq_log_bfqq(bfqd, bfqq, + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), + bfqq->seek_mean); + + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + + if (bfqq == bfqd->active_queue) { + /* + * If there is just this request queued and the request + * is small, just make sure the queue is plugged and exit. + * In this way, if the disk is being idled to wait for a new + * request from the active queue, we avoid unplugging the + * device for this request. + * + * By doing so, we spare the disk to be committed + * to serve just a small request. On the contrary, we wait for + * the block layer to decide when to unplug the device: + * hopefully, new requests will be merged to this + * one quickly, then the device will be unplugged + * and larger requests will be dispatched. + */ + if (bfqq->queued[rq_is_sync(rq)] == 1 && + blk_rq_sectors(rq) < 32) { + blk_plug_device(bfqd->queue); + return; + } + if (bfq_bfqq_wait_request(bfqq)) { + /* + * If we are waiting for a request for this queue, let + * it rip immediately and flag that we must not expire + * this queue just now. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + /* + * Here we can safely expire the queue, in + * case of budget timeout, without wasting + * guarantees + */ + if (bfq_bfqq_budget_timeout(bfqq)) + bfq_bfqq_expire(bfqd, bfqq, 0, + BFQ_BFQQ_BUDGET_TIMEOUT); + __blk_run_queue(bfqd->queue, false); + } + } +} + +static void bfq_insert_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + assert_spin_locked(bfqd->queue->queue_lock); + bfq_init_prio_data(bfqq, RQ_CIC(rq)->ioc); + + bfq_add_rq_rb(rq); + + rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]); + list_add_tail(&rq->queuelist, &bfqq->fifo); + + bfq_rq_enqueued(bfqd, bfqq, rq); +} + +static void bfq_update_hw_tag(struct bfq_data *bfqd) +{ + bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver, + bfqd->rq_in_driver); + + /* + * This sample is valid if the number of outstanding requests + * is large enough to allow a queueing behavior. Note that the + * sum is not exact, as it's not taking into account deactivated + * requests. + */ + if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) + return; + + if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) + return; + + bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; + bfqd->max_rq_in_driver = 0; + bfqd->hw_tag_samples = 0; +} + +static void bfq_completed_request(struct request_queue *q, struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + const int sync = rq_is_sync(rq); + + bfq_log_bfqq(bfqd, bfqq, "completed %lu sects req (%d)", + blk_rq_sectors(rq), sync); + + bfq_update_hw_tag(bfqd); + + WARN_ON(!bfqd->rq_in_driver); + WARN_ON(!bfqq->dispatched); + bfqd->rq_in_driver--; + bfqq->dispatched--; + + if (bfq_bfqq_sync(bfqq)) + bfqd->sync_flight--; + + if (sync) + RQ_CIC(rq)->last_end_request = jiffies; + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (bfqd->active_queue == bfqq) { + if (bfq_bfqq_budget_new(bfqq)) + bfq_set_budget_timeout(bfqd); + + /* Idling is disabled also for cooperation issues: + * 1) there is a close cooperator for the queue, or + * 2) the queue is shared and some cooperator is likely + * to be idle (in this case, by not arming the idle timer, + * we try to slow down the queue, to prevent the zones + * of the disk accessed by the active cooperators to become + * too distant from the zone that will be accessed by the + * currently idle cooperators) + */ + if (bfq_may_expire_for_budg_timeout(bfqq)) + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); + else if (sync && + (bfqd->rq_in_driver == 0 || + bfqq->raising_coeff > 1) + && RB_EMPTY_ROOT(&bfqq->sort_list) + && !bfq_close_cooperator(bfqd, bfqq) + && (!bfq_bfqq_coop(bfqq) || + !bfq_bfqq_some_coop_idle(bfqq))) + bfq_arm_slice_timer(bfqd); + } + + if (!bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); +} + +/* + * We temporarily boost lower priority queues if they are holding fs exclusive + * resources. They are boosted to normal prio (CLASS_BE/4). + */ +static void bfq_prio_boost(struct bfq_queue *bfqq) +{ + if (has_fs_excl()) { + /* + * Boost idle prio on transactions that would lock out other + * users of the filesystem + */ + if (bfq_class_idle(bfqq)) + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; + if (bfqq->entity.new_ioprio > IOPRIO_NORM) + bfqq->entity.new_ioprio = IOPRIO_NORM; + } else { + /* + * Unboost the queue (if needed) + */ + bfqq->entity.new_ioprio_class = bfqq->org_ioprio_class; + bfqq->entity.new_ioprio = bfqq->org_ioprio; + } +} + +static inline int __bfq_may_queue(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { + bfq_clear_bfqq_must_alloc(bfqq); + return ELV_MQUEUE_MUST; + } + + return ELV_MQUEUE_MAY; +} + +static int bfq_may_queue(struct request_queue *q, int rw) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + /* + * Don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * So just lookup a possibly existing queue, or return 'may queue' + * if that fails. + */ + cic = bfq_cic_lookup(bfqd, tsk->io_context); + if (cic == NULL) + return ELV_MQUEUE_MAY; + + bfqq = cic_to_bfqq(cic, rw_is_sync(rw)); + if (bfqq != NULL) { + bfq_init_prio_data(bfqq, cic->ioc); + bfq_prio_boost(bfqq); + + return __bfq_may_queue(bfqq); + } + + return ELV_MQUEUE_MAY; +} + +/* + * Queue lock held here. + */ +static void bfq_put_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + if (bfqq != NULL) { + const int rw = rq_data_dir(rq); + + BUG_ON(!bfqq->allocated[rw]); + bfqq->allocated[rw]--; + + put_io_context(RQ_CIC(rq)->ioc); + + rq->elevator_private = NULL; + rq->elevator_private2 = NULL; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } +} + +static struct bfq_queue * +bfq_merge_bfqqs(struct bfq_data *bfqd, struct cfq_io_context *cic, + struct bfq_queue *bfqq) +{ + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", + bfqq->new_bfqq->pid); + cic_set_bfqq(cic, bfqq->new_bfqq, 1); + bfq_mark_bfqq_coop(bfqq->new_bfqq); + bfq_put_queue(bfqq); + return cic_to_bfqq(cic, 1); +} + +/* + * Returns NULL if a new bfqq should be allocated, or the old bfqq if this + * was the last process referring to said bfqq. + */ +static struct bfq_queue * +bfq_split_bfqq(struct cfq_io_context *cic, struct bfq_queue *bfqq) +{ + bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); + if (bfqq_process_refs(bfqq) == 1) { + bfqq->pid = current->pid; + bfq_clear_bfqq_some_coop_idle(bfqq); + bfq_clear_bfqq_coop(bfqq); + bfq_clear_bfqq_split_coop(bfqq); + return bfqq; + } + + cic_set_bfqq(cic, NULL, 1); + + bfq_put_cooperator(bfqq); + + bfq_put_queue(bfqq); + return NULL; +} + +/* + * Allocate bfq data structures associated with this request. + */ +static int bfq_set_request(struct request_queue *q, struct request *rq, + gfp_t gfp_mask) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + const int rw = rq_data_dir(rq); + const int is_sync = rq_is_sync(rq); + struct bfq_queue *bfqq; + struct bfq_group *bfqg; + unsigned long flags; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + cic = bfq_get_io_context(bfqd, gfp_mask); + + spin_lock_irqsave(q->queue_lock, flags); + + if (cic == NULL) + goto queue_fail; + + bfqg = bfq_cic_update_cgroup(cic); + +new_queue: + bfqq = cic_to_bfqq(cic, is_sync); + if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { + bfqq = bfq_get_queue(bfqd, bfqg, is_sync, cic->ioc, gfp_mask); + cic_set_bfqq(cic, bfqq, is_sync); + } else { + /* + * If the queue was seeky for too long, break it apart. + */ + if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { + bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); + bfqq = bfq_split_bfqq(cic, bfqq); + if (!bfqq) + goto new_queue; + } + + /* + * Check to see if this queue is scheduled to merge with + * another closely cooperating queue. The merging of queues + * happens here as it must be done in process context. + * The reference on new_bfqq was taken in merge_bfqqs. + */ + if (bfqq->new_bfqq != NULL) + bfqq = bfq_merge_bfqqs(bfqd, cic, bfqq); + } + + bfqq->allocated[rw]++; + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); + + spin_unlock_irqrestore(q->queue_lock, flags); + + rq->elevator_private = cic; + rq->elevator_private2 = bfqq; + + return 0; + +queue_fail: + if (cic != NULL) + put_io_context(cic->ioc); + + bfq_schedule_dispatch(bfqd); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 1; +} + +static void bfq_kick_queue(struct work_struct *work) +{ + struct bfq_data *bfqd = + container_of(work, struct bfq_data, unplug_work); + struct request_queue *q = bfqd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(q, false); + spin_unlock_irq(q->queue_lock); +} + +/* + * Handler of the expiration of the timer running if the active_queue + * is idling inside its time slice. + */ +static void bfq_idle_slice_timer(unsigned long data) +{ + struct bfq_data *bfqd = (struct bfq_data *)data; + struct bfq_queue *bfqq; + unsigned long flags; + enum bfqq_expiration reason; + + spin_lock_irqsave(bfqd->queue->queue_lock, flags); + + bfqq = bfqd->active_queue; + /* + * Theoretical race here: active_queue can be NULL or different + * from the queue that was idling if the timer handler spins on + * the queue_lock and a new request arrives for the current + * queue and there is a full dispatch cycle that changes the + * active_queue. This can hardly happen, but in the worst case + * we just expire a queue too early. + */ + if (bfqq != NULL) { + bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired + * for budget timeout without wasting + * guarantees + */ + reason = BFQ_BFQQ_BUDGET_TIMEOUT; + else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) + /* + * The queue may not be empty upon timer expiration, + * because we may not disable the timer when the first + * request of the active queue arrives during + * disk idling + */ + reason = BFQ_BFQQ_TOO_IDLE; + else + goto schedule_dispatch; + + bfq_bfqq_expire(bfqd, bfqq, 1, reason); + } + +schedule_dispatch: + bfq_schedule_dispatch(bfqd); + + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); +} + +static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) +{ + del_timer_sync(&bfqd->idle_slice_timer); + cancel_work_sync(&bfqd->unplug_work); +} + +static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd, + struct bfq_queue **bfqq_ptr) +{ + struct bfq_group *root_group = bfqd->root_group; + struct bfq_queue *bfqq = *bfqq_ptr; + + bfq_log(bfqd, "put_async_bfqq: %p", bfqq); + if (bfqq != NULL) { + bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group); + bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + *bfqq_ptr = NULL; + } +} + +/* + * Release all the bfqg references to its async queues. If we are + * deallocating the group these queues may still contain requests, so + * we reparent them to the root cgroup (i.e., the only one that will + * exist for sure untill all the requests on a device are gone). + */ +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) +{ + int i, j; + + for (i = 0; i < 2; i++) + for (j = 0; j < IOPRIO_BE_NR; j++) + __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); + + __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); +} + +static void bfq_exit_queue(struct elevator_queue *e) +{ + struct bfq_data *bfqd = e->elevator_data; + struct request_queue *q = bfqd->queue; + struct bfq_queue *bfqq, *n; + struct cfq_io_context *cic; + + bfq_shutdown_timer_wq(bfqd); + + spin_lock_irq(q->queue_lock); + + while (!list_empty(&bfqd->cic_list)) { + cic = list_entry(bfqd->cic_list.next, struct cfq_io_context, + queue_list); + __bfq_exit_single_io_context(bfqd, cic); + } + + BUG_ON(bfqd->active_queue != NULL); + list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) + bfq_deactivate_bfqq(bfqd, bfqq, 0); + + bfq_disconnect_groups(bfqd); + spin_unlock_irq(q->queue_lock); + + bfq_shutdown_timer_wq(bfqd); + + spin_lock(&cic_index_lock); + ida_remove(&cic_index_ida, bfqd->cic_index); + spin_unlock(&cic_index_lock); + + /* Wait for cic->key accessors to exit their grace periods. */ + synchronize_rcu(); + + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + bfq_free_root_group(bfqd); + kfree(bfqd); +} + +static int bfq_alloc_cic_index(void) +{ + int index, error; + + do { + if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(&cic_index_lock); + error = ida_get_new(&cic_index_ida, &index); + spin_unlock(&cic_index_lock); + if (error && error != -EAGAIN) + return error; + } while (error); + + return index; +} + +static void *bfq_init_queue(struct request_queue *q) +{ + struct bfq_group *bfqg; + struct bfq_data *bfqd; + int i; + + i = bfq_alloc_cic_index(); + if (i < 0) + return NULL; + + bfqd = kmalloc_node(sizeof(*bfqd), GFP_KERNEL | __GFP_ZERO, q->node); + if (bfqd == NULL) + return NULL; + + bfqd->cic_index = i; + + /* + * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. + * Grab a permanent reference to it, so that the normal code flow + * will not attempt to free it. + */ + bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0); + atomic_inc(&bfqd->oom_bfqq.ref); + + INIT_LIST_HEAD(&bfqd->cic_list); + + bfqd->queue = q; + + bfqg = bfq_alloc_root_group(bfqd, q->node); + if (bfqg == NULL) { + kfree(bfqd); + return NULL; + } + + bfqd->root_group = bfqg; + + init_timer(&bfqd->idle_slice_timer); + bfqd->idle_slice_timer.function = bfq_idle_slice_timer; + bfqd->idle_slice_timer.data = (unsigned long)bfqd; + + bfqd->rq_pos_tree = RB_ROOT; + + INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); + + INIT_LIST_HEAD(&bfqd->active_list); + INIT_LIST_HEAD(&bfqd->idle_list); + + bfqd->hw_tag = 1; + + bfqd->bfq_max_budget = bfq_default_max_budget; + + bfqd->bfq_quantum = bfq_quantum; + bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; + bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; + bfqd->bfq_back_max = bfq_back_max; + bfqd->bfq_back_penalty = bfq_back_penalty; + bfqd->bfq_slice_idle = bfq_slice_idle; + bfqd->bfq_class_idle_last_service = 0; + bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; + bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; + bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; + + bfqd->low_latency = true; + + bfqd->bfq_raising_coeff = 20; + bfqd->bfq_raising_rt_max_time = msecs_to_jiffies(300); + bfqd->bfq_raising_max_time = msecs_to_jiffies(7500); + bfqd->bfq_raising_min_idle_time = msecs_to_jiffies(2000); + bfqd->bfq_raising_max_softrt_rate = 7000; + + return bfqd; +} + +static void bfq_slab_kill(void) +{ + if (bfq_pool != NULL) + kmem_cache_destroy(bfq_pool); + if (bfq_ioc_pool != NULL) + kmem_cache_destroy(bfq_ioc_pool); +} + +static int __init bfq_slab_setup(void) +{ + bfq_pool = KMEM_CACHE(bfq_queue, 0); + if (bfq_pool == NULL) + goto fail; + + bfq_ioc_pool = kmem_cache_create("bfq_io_context", + sizeof(struct cfq_io_context), + __alignof__(struct cfq_io_context), + 0, NULL); + if (bfq_ioc_pool == NULL) + goto fail; + + return 0; +fail: + bfq_slab_kill(); + return -ENOMEM; +} + +static ssize_t bfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t bfq_var_store(unsigned long *var, const char *page, size_t count) +{ + unsigned long new_val; + int ret = strict_strtoul(page, 10, &new_val); + + if (ret == 0) + *var = new_val; + + return count; +} + +static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) +{ + struct bfq_queue *bfqq; + struct bfq_data *bfqd = e->elevator_data; + ssize_t num_char = 0; + + num_char += sprintf(page + num_char, "Active:\n"); + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { + num_char += sprintf(page + num_char, + "pid%d: weight %hu, dur %d/%u\n", + bfqq->pid, + bfqq->entity.weight, + jiffies_to_msecs(jiffies - + bfqq->last_rais_start_finish), + jiffies_to_msecs(bfqq->raising_cur_max_time)); + } + num_char += sprintf(page + num_char, "Idle:\n"); + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { + num_char += sprintf(page + num_char, + "pid%d: weight %hu, dur %d/%u\n", + bfqq->pid, + bfqq->entity.weight, + jiffies_to_msecs(jiffies - + bfqq->last_rais_start_finish), + jiffies_to_msecs(bfqq->raising_cur_max_time)); + } + return num_char; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return bfq_var_show(__data, (page)); \ +} +SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0); +SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1); +SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1); +SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); +SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); +SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); +SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); +SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0); +SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1); +SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1); +SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); +SHOW_FUNCTION(bfq_raising_coeff_show, bfqd->bfq_raising_coeff, 0); +SHOW_FUNCTION(bfq_raising_max_time_show, bfqd->bfq_raising_max_time, 1); +SHOW_FUNCTION(bfq_raising_rt_max_time_show, bfqd->bfq_raising_rt_max_time, 1); +SHOW_FUNCTION(bfq_raising_min_idle_time_show, bfqd->bfq_raising_min_idle_time, + 1); +SHOW_FUNCTION(bfq_raising_max_softrt_rate_show, + bfqd->bfq_raising_max_softrt_rate, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t \ +__FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned long __data; \ + int ret = bfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0); +STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); +STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, + INT_MAX, 0); +STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq, + 1, INT_MAX, 0); +STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0, + INT_MAX, 1); +STORE_FUNCTION(bfq_raising_coeff_store, &bfqd->bfq_raising_coeff, 1, + INT_MAX, 0); +STORE_FUNCTION(bfq_raising_max_time_store, &bfqd->bfq_raising_max_time, 0, + INT_MAX, 1); +STORE_FUNCTION(bfq_raising_rt_max_time_store, &bfqd->bfq_raising_rt_max_time, 0, + INT_MAX, 1); +STORE_FUNCTION(bfq_raising_min_idle_time_store, + &bfqd->bfq_raising_min_idle_time, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_raising_max_softrt_rate_store, + &bfqd->bfq_raising_max_softrt_rate, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +/* do nothing for the moment */ +static ssize_t bfq_weights_store(struct elevator_queue *e, + const char *page, size_t count) +{ + return count; +} + +static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd) +{ + u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) + return bfq_calc_max_budget(bfqd->peak_rate, timeout); + else + return bfq_default_max_budget; +} + +static ssize_t bfq_max_budget_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + else { + if (__data > INT_MAX) + __data = INT_MAX; + bfqd->bfq_max_budget = __data; + } + + bfqd->bfq_user_max_budget = __data; + + return ret; +} + +static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data < 1) + __data = 1; + else if (__data > INT_MAX) + __data = INT_MAX; + + bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data); + if (bfqd->bfq_user_max_budget == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + + return ret; +} + +static ssize_t bfq_low_latency_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned long __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data > 1) + __data = 1; + bfqd->low_latency = __data; + + return ret; +} + +#define BFQ_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) + +static struct elv_fs_entry bfq_attrs[] = { + BFQ_ATTR(quantum), + BFQ_ATTR(fifo_expire_sync), + BFQ_ATTR(fifo_expire_async), + BFQ_ATTR(back_seek_max), + BFQ_ATTR(back_seek_penalty), + BFQ_ATTR(slice_idle), + BFQ_ATTR(max_budget), + BFQ_ATTR(max_budget_async_rq), + BFQ_ATTR(timeout_sync), + BFQ_ATTR(timeout_async), + BFQ_ATTR(low_latency), + BFQ_ATTR(raising_coeff), + BFQ_ATTR(raising_max_time), + BFQ_ATTR(raising_rt_max_time), + BFQ_ATTR(raising_min_idle_time), + BFQ_ATTR(raising_max_softrt_rate), + BFQ_ATTR(weights), + __ATTR_NULL +}; + +static struct elevator_type iosched_bfq = { + .ops = { + .elevator_merge_fn = bfq_merge, + .elevator_merged_fn = bfq_merged_request, + .elevator_merge_req_fn = bfq_merged_requests, + .elevator_allow_merge_fn = bfq_allow_merge, + .elevator_dispatch_fn = bfq_dispatch_requests, + .elevator_add_req_fn = bfq_insert_request, + .elevator_activate_req_fn = bfq_activate_request, + .elevator_deactivate_req_fn = bfq_deactivate_request, + .elevator_queue_empty_fn = bfq_queue_empty, + .elevator_completed_req_fn = bfq_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_set_req_fn = bfq_set_request, + .elevator_put_req_fn = bfq_put_request, + .elevator_may_queue_fn = bfq_may_queue, + .elevator_init_fn = bfq_init_queue, + .elevator_exit_fn = bfq_exit_queue, + .trim = bfq_free_io_context, + }, + .elevator_attrs = bfq_attrs, + .elevator_name = "bfq", + .elevator_owner = THIS_MODULE, +}; + +static int __init bfq_init(void) +{ + /* + * Can be 0 on HZ < 1000 setups. + */ + if (bfq_slice_idle == 0) + bfq_slice_idle = 1; + + if (bfq_timeout_async == 0) + bfq_timeout_async = 1; + + if (bfq_slab_setup()) + return -ENOMEM; + + elv_register(&iosched_bfq); + + return 0; +} + +static void __exit bfq_exit(void) +{ + DECLARE_COMPLETION_ONSTACK(all_gone); + elv_unregister(&iosched_bfq); + bfq_ioc_gone = &all_gone; + /* bfq_ioc_gone's update must be visible before reading bfq_ioc_count */ + smp_wmb(); + if (elv_ioc_count_read(bfq_ioc_count) != 0) + wait_for_completion(&all_gone); + ida_destroy(&cic_index_ida); + bfq_slab_kill(); +} + +module_init(bfq_init); +module_exit(bfq_exit); + +MODULE_AUTHOR("Fabio Checconi, Paolo Valente"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler"); diff --git a/block/bfq-sched.c b/block/bfq-sched.c new file mode 100644 index 0000000000000..ea26d05fe1f03 --- /dev/null +++ b/block/bfq-sched.c @@ -0,0 +1,1037 @@ +/* + * BFQ: Hierarchical B-WF2Q+ scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +#ifdef CONFIG_CGROUP_BFQIO +#define for_each_entity(entity) \ + for (; entity != NULL; entity = entity->parent) + +#define for_each_entity_safe(entity, parent) \ + for (; entity && ({ parent = entity->parent; 1; }); entity = parent) + +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract, + struct bfq_data *bfqd); + +static int bfq_update_next_active(struct bfq_sched_data *sd) +{ + struct bfq_group *bfqg; + struct bfq_entity *entity, *next_active; + + if (sd->active_entity != NULL) + /* will update/requeue at the end of service */ + return 0; + + /* + * NOTE: this can be improved in many ways, such as returning + * 1 (and thus propagating upwards the update) only when the + * budget changes, or caching the bfqq that will be scheduled + * next from this subtree. By now we worry more about + * correctness than about performance... + */ + next_active = bfq_lookup_next_entity(sd, 0, NULL); + sd->next_active = next_active; + + if (next_active != NULL) { + bfqg = container_of(sd, struct bfq_group, sched_data); + entity = bfqg->my_entity; + if (entity != NULL) + entity->budget = next_active->budget; + } + + return 1; +} + +static inline void bfq_check_next_active(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ + BUG_ON(sd->next_active != entity); +} +#else +#define for_each_entity(entity) \ + for (; entity != NULL; entity = NULL) + +#define for_each_entity_safe(entity, parent) \ + for (parent = NULL; entity != NULL; entity = parent) + +static inline int bfq_update_next_active(struct bfq_sched_data *sd) +{ + return 0; +} + +static inline void bfq_check_next_active(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ +} +#endif + +/* + * Shift for timestamp calculations. This actually limits the maximum + * service allowed in one timestamp delta (small shift values increase it), + * the maximum total weight that can be used for the queues in the system + * (big shift values increase it), and the period of virtual time wraparounds. + */ +#define WFQ_SERVICE_SHIFT 22 + +/** + * bfq_gt - compare two timestamps. + * @a: first ts. + * @b: second ts. + * + * Return @a > @b, dealing with wrapping correctly. + */ +static inline int bfq_gt(u64 a, u64 b) +{ + return (s64)(a - b) > 0; +} + +static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = NULL; + + BUG_ON(entity == NULL); + + if (entity->my_sched_data == NULL) + bfqq = container_of(entity, struct bfq_queue, entity); + + return bfqq; +} + + +/** + * bfq_delta - map service into the virtual time domain. + * @service: amount of service. + * @weight: scale factor (weight of an entity or weight sum). + */ +static inline u64 bfq_delta(unsigned long service, + unsigned long weight) +{ + u64 d = (u64)service << WFQ_SERVICE_SHIFT; + + do_div(d, weight); + return d; +} + +/** + * bfq_calc_finish - assign the finish time to an entity. + * @entity: the entity to act upon. + * @service: the service to be charged to the entity. + */ +static inline void bfq_calc_finish(struct bfq_entity *entity, + unsigned long service) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + BUG_ON(entity->weight == 0); + + entity->finish = entity->start + + bfq_delta(service, entity->weight); + + if (bfqq != NULL) { + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: serv %lu, w %lu", + service, entity->weight); + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: start %llu, finish %llu, delta %llu", + entity->start, entity->finish, + bfq_delta(service, entity->weight)); + } +} + +/** + * bfq_entity_of - get an entity from a node. + * @node: the node field of the entity. + * + * Convert a node pointer to the relative entity. This is used only + * to simplify the logic of some functions and not as the generic + * conversion mechanism because, e.g., in the tree walking functions, + * the check for a %NULL value would be redundant. + */ +static inline struct bfq_entity *bfq_entity_of(struct rb_node *node) +{ + struct bfq_entity *entity = NULL; + + if (node != NULL) + entity = rb_entry(node, struct bfq_entity, rb_node); + + return entity; +} + +/** + * bfq_extract - remove an entity from a tree. + * @root: the tree root. + * @entity: the entity to remove. + */ +static inline void bfq_extract(struct rb_root *root, + struct bfq_entity *entity) +{ + BUG_ON(entity->tree != root); + + entity->tree = NULL; + rb_erase(&entity->rb_node, root); +} + +/** + * bfq_idle_extract - extract an entity from the idle tree. + * @st: the service tree of the owning @entity. + * @entity: the entity being removed. + */ +static void bfq_idle_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *next; + + BUG_ON(entity->tree != &st->idle); + + if (entity == st->first_idle) { + next = rb_next(&entity->rb_node); + st->first_idle = bfq_entity_of(next); + } + + if (entity == st->last_idle) { + next = rb_prev(&entity->rb_node); + st->last_idle = bfq_entity_of(next); + } + + bfq_extract(&st->idle, entity); + + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +} + +/** + * bfq_insert - generic tree insertion. + * @root: tree root. + * @entity: entity to insert. + * + * This is used for the idle and the active tree, since they are both + * ordered by finish time. + */ +static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) +{ + struct bfq_entity *entry; + struct rb_node **node = &root->rb_node; + struct rb_node *parent = NULL; + + BUG_ON(entity->tree != NULL); + + while (*node != NULL) { + parent = *node; + entry = rb_entry(parent, struct bfq_entity, rb_node); + + if (bfq_gt(entry->finish, entity->finish)) + node = &parent->rb_left; + else + node = &parent->rb_right; + } + + rb_link_node(&entity->rb_node, parent, node); + rb_insert_color(&entity->rb_node, root); + + entity->tree = root; +} + +/** + * bfq_update_min - update the min_start field of a entity. + * @entity: the entity to update. + * @node: one of its children. + * + * This function is called when @entity may store an invalid value for + * min_start due to updates to the active tree. The function assumes + * that the subtree rooted at @node (which may be its left or its right + * child) has a valid min_start value. + */ +static inline void bfq_update_min(struct bfq_entity *entity, + struct rb_node *node) +{ + struct bfq_entity *child; + + if (node != NULL) { + child = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entity->min_start, child->min_start)) + entity->min_start = child->min_start; + } +} + +/** + * bfq_update_active_node - recalculate min_start. + * @node: the node to update. + * + * @node may have changed position or one of its children may have moved, + * this function updates its min_start value. The left and right subtrees + * are assumed to hold a correct min_start value. + */ +static inline void bfq_update_active_node(struct rb_node *node) +{ + struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); + + entity->min_start = entity->start; + bfq_update_min(entity, node->rb_right); + bfq_update_min(entity, node->rb_left); +} + +/** + * bfq_update_active_tree - update min_start for the whole active tree. + * @node: the starting node. + * + * @node must be the deepest modified node after an update. This function + * updates its min_start using the values held by its children, assuming + * that they did not change, and then updates all the nodes that may have + * changed in the path to the root. The only nodes that may have changed + * are the ones in the path or their siblings. + */ +static void bfq_update_active_tree(struct rb_node *node) +{ + struct rb_node *parent; + +up: + bfq_update_active_node(node); + + parent = rb_parent(node); + if (parent == NULL) + return; + + if (node == parent->rb_left && parent->rb_right != NULL) + bfq_update_active_node(parent->rb_right); + else if (parent->rb_left != NULL) + bfq_update_active_node(parent->rb_left); + + node = parent; + goto up; +} + +/** + * bfq_active_insert - insert an entity in the active tree of its group/device. + * @st: the service tree of the entity. + * @entity: the entity being inserted. + * + * The active tree is ordered by finish time, but an extra key is kept + * per each node, containing the minimum value for the start times of + * its children (and the node itself), so it's possible to search for + * the eligible node with the lowest finish time in logarithmic time. + */ +static void bfq_active_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node = &entity->rb_node; + + bfq_insert(&st->active, entity); + + if (node->rb_left != NULL) + node = node->rb_left; + else if (node->rb_right != NULL) + node = node->rb_right; + + bfq_update_active_tree(node); + + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); +} + +/** + * bfq_ioprio_to_weight - calc a weight from an ioprio. + * @ioprio: the ioprio value to convert. + */ +static unsigned short bfq_ioprio_to_weight(int ioprio) +{ + WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); + return IOPRIO_BE_NR - ioprio; +} + +/** + * bfq_weight_to_ioprio - calc an ioprio from a weight. + * @weight: the weight value to convert. + * + * To preserve as mush as possible the old only-ioprio user interface, + * 0 is used as an escape ioprio value for weights (numerically) equal or + * larger than IOPRIO_BE_NR + */ +static unsigned short bfq_weight_to_ioprio(int weight) +{ + WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); + return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight; +} + +static inline void bfq_get_entity(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_sched_data *sd; + + if (bfqq != NULL) { + sd = entity->sched_data; + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", + bfqq, bfqq->ref); + } +} + +/** + * bfq_find_deepest - find the deepest node that an extraction can modify. + * @node: the node being removed. + * + * Do the first step of an extraction in an rb tree, looking for the + * node that will replace @node, and returning the deepest node that + * the following modifications to the tree can touch. If @node is the + * last node in the tree return %NULL. + */ +static struct rb_node *bfq_find_deepest(struct rb_node *node) +{ + struct rb_node *deepest; + + if (node->rb_right == NULL && node->rb_left == NULL) + deepest = rb_parent(node); + else if (node->rb_right == NULL) + deepest = node->rb_left; + else if (node->rb_left == NULL) + deepest = node->rb_right; + else { + deepest = rb_next(node); + if (deepest->rb_right != NULL) + deepest = deepest->rb_right; + else if (rb_parent(deepest) != node) + deepest = rb_parent(deepest); + } + + return deepest; +} + +/** + * bfq_active_extract - remove an entity from the active tree. + * @st: the service_tree containing the tree. + * @entity: the entity being removed. + */ +static void bfq_active_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node; + + node = bfq_find_deepest(&entity->rb_node); + bfq_extract(&st->active, entity); + + if (node != NULL) + bfq_update_active_tree(node); + + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +} + +/** + * bfq_idle_insert - insert an entity into the idle tree. + * @st: the service tree containing the tree. + * @entity: the entity to insert. + */ +static void bfq_idle_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish)) + st->first_idle = entity; + if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish)) + st->last_idle = entity; + + bfq_insert(&st->idle, entity); + + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); +} + +/** + * bfq_forget_entity - remove an entity from the wfq trees. + * @st: the service tree. + * @entity: the entity being removed. + * + * Update the device status and forget everything about @entity, putting + * the device reference to it, if it is a queue. Entities belonging to + * groups are not refcounted. + */ +static void bfq_forget_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_sched_data *sd; + + BUG_ON(!entity->on_st); + + entity->on_st = 0; + st->wsum -= entity->weight; + if (bfqq != NULL) { + sd = entity->sched_data; + bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } +} + +/** + * bfq_put_idle_entity - release the idle tree ref of an entity. + * @st: service tree for the entity. + * @entity: the entity being released. + */ +static void bfq_put_idle_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + bfq_idle_extract(st, entity); + bfq_forget_entity(st, entity); +} + +/** + * bfq_forget_idle - update the idle tree if necessary. + * @st: the service tree to act upon. + * + * To preserve the global O(log N) complexity we only remove one entry here; + * as the idle tree will not grow indefinitely this can be done safely. + */ +static void bfq_forget_idle(struct bfq_service_tree *st) +{ + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL && + !bfq_gt(last_idle->finish, st->vtime)) { + /* + * Forget the whole idle tree, increasing the vtime past + * the last finish time of idle entities. + */ + st->vtime = last_idle->finish; + } + + if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime)) + bfq_put_idle_entity(st, first_idle); +} + +static struct bfq_service_tree * +__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, + struct bfq_entity *entity) +{ + struct bfq_service_tree *new_st = old_st; + + if (entity->ioprio_changed) { + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + BUG_ON(old_st->wsum < entity->weight); + old_st->wsum -= entity->weight; + + if (entity->new_weight != entity->orig_weight) { + entity->orig_weight = entity->new_weight; + entity->ioprio = + bfq_weight_to_ioprio(entity->orig_weight); + } else if (entity->new_ioprio != entity->ioprio) { + entity->ioprio = entity->new_ioprio; + entity->orig_weight = + bfq_ioprio_to_weight(entity->ioprio); + } else + entity->new_weight = entity->orig_weight = + bfq_ioprio_to_weight(entity->ioprio); + + entity->ioprio_class = entity->new_ioprio_class; + entity->ioprio_changed = 0; + + /* + * NOTE: here we may be changing the weight too early, + * this will cause unfairness. The correct approach + * would have required additional complexity to defer + * weight changes to the proper time instants (i.e., + * when entity->finish <= old_st->vtime). + */ + new_st = bfq_entity_service_tree(entity); + entity->weight = entity->orig_weight * + (bfqq != NULL ? bfqq->raising_coeff : 1); + new_st->wsum += entity->weight; + + if (new_st != old_st) + entity->start = new_st->vtime; + } + + return new_st; +} + +/** + * bfq_bfqq_served - update the scheduler status after selection for service. + * @bfqq: the queue being served. + * @served: bytes to transfer. + * + * NOTE: this can be optimized, as the timestamps of upper level entities + * are synchronized every time a new bfqq is selected for service. By now, + * we keep it to better check consistency. + */ +static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st; + + for_each_entity(entity) { + st = bfq_entity_service_tree(entity); + + entity->service += served; + WARN_ON_ONCE(entity->service > entity->budget); + BUG_ON(st->wsum == 0); + + st->vtime += bfq_delta(served, st->wsum); + bfq_forget_idle(st); + } + bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served); +} + +/** + * bfq_bfqq_charge_full_budget - set the service to the entity budget. + * @bfqq: the queue that needs a service update. + * + * When it's not possible to be fair in the service domain, because + * a queue is not consuming its budget fast enough (the meaning of + * fast depends on the timeout parameter), we charge it a full + * budget. In this way we should obtain a sort of time-domain + * fairness among all the seeky/slow queues. + */ +static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); + + bfq_bfqq_served(bfqq, entity->budget - entity->service); +} + +/** + * __bfq_activate_entity - activate an entity. + * @entity: the entity being activated. + * + * Called whenever an entity is activated, i.e., it is not active and one + * of its children receives a new request, or has to be reactivated due to + * budget exhaustion. It uses the current budget of the entity (and the + * service received if @entity is active) of the queue to calculate its + * timestamps. + */ +static void __bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + + if (entity == sd->active_entity) { + BUG_ON(entity->tree != NULL); + /* + * If we are requeueing the current entity we have + * to take care of not charging to it service it has + * not received. + */ + bfq_calc_finish(entity, entity->service); + entity->start = entity->finish; + sd->active_entity = NULL; + } else if (entity->tree == &st->active) { + /* + * Requeueing an entity due to a change of some + * next_active entity below it. We reuse the old + * start time. + */ + bfq_active_extract(st, entity); + } else if (entity->tree == &st->idle) { + /* + * Must be on the idle tree, bfq_idle_extract() will + * check for that. + */ + bfq_idle_extract(st, entity); + entity->start = bfq_gt(st->vtime, entity->finish) ? + st->vtime : entity->finish; + } else { + /* + * The finish time of the entity may be invalid, and + * it is in the past for sure, otherwise the queue + * would have been on the idle tree. + */ + entity->start = st->vtime; + st->wsum += entity->weight; + bfq_get_entity(entity); + + BUG_ON(entity->on_st); + entity->on_st = 1; + } + + st = __bfq_entity_update_weight_prio(st, entity); + bfq_calc_finish(entity, entity->budget); + bfq_active_insert(st, entity); +} + +/** + * bfq_activate_entity - activate an entity and its ancestors if necessary. + * @entity: the entity to activate. + * + * Activate @entity and all the entities on the path from it to the root. + */ +static void bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd; + + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_active(sd)) + /* + * No need to propagate the activation to the + * upper entities, as they will be updated when + * the active entity is rescheduled. + */ + break; + } +} + +/** + * __bfq_deactivate_entity - deactivate an entity from its service tree. + * @entity: the entity to deactivate. + * @requeue: if false, the entity will not be put into the idle tree. + * + * Deactivate an entity, independently from its previous state. If the + * entity was not on a service tree just return, otherwise if it is on + * any scheduler tree, extract it from that tree, and if necessary + * and if the caller did not specify @requeue, put it on the idle tree. + * + * Return %1 if the caller should update the entity hierarchy, i.e., + * if the entity was under service or if it was the next_active for + * its sched_data; return %0 otherwise. + */ +static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + int was_active = entity == sd->active_entity; + int ret = 0; + + if (!entity->on_st) + return 0; + + BUG_ON(was_active && entity->tree != NULL); + + if (was_active) { + bfq_calc_finish(entity, entity->service); + sd->active_entity = NULL; + } else if (entity->tree == &st->active) + bfq_active_extract(st, entity); + else if (entity->tree == &st->idle) + bfq_idle_extract(st, entity); + else if (entity->tree != NULL) + BUG(); + + if (was_active || sd->next_active == entity) + ret = bfq_update_next_active(sd); + + if (!requeue || !bfq_gt(entity->finish, st->vtime)) + bfq_forget_entity(st, entity); + else + bfq_idle_insert(st, entity); + + BUG_ON(sd->active_entity == entity); + BUG_ON(sd->next_active == entity); + + return ret; +} + +/** + * bfq_deactivate_entity - deactivate an entity. + * @entity: the entity to deactivate. + * @requeue: true if the entity can be put on the idle tree + */ +static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd; + struct bfq_entity *parent; + + for_each_entity_safe(entity, parent) { + sd = entity->sched_data; + + if (!__bfq_deactivate_entity(entity, requeue)) + /* + * The parent entity is still backlogged, and + * we don't need to update it as it is still + * under service. + */ + break; + + if (sd->next_active != NULL) + /* + * The parent entity is still backlogged and + * the budgets on the path towards the root + * need to be updated. + */ + goto update; + + /* + * If we reach there the parent is no more backlogged and + * we want to propagate the dequeue upwards. + */ + requeue = 1; + } + + return; + +update: + entity = parent; + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_active(sd)) + break; + } +} + +/** + * bfq_update_vtime - update vtime if necessary. + * @st: the service tree to act upon. + * + * If necessary update the service tree vtime to have at least one + * eligible entity, skipping to its start time. Assumes that the + * active tree of the device is not empty. + * + * NOTE: this hierarchical implementation updates vtimes quite often, + * we may end up with reactivated tasks getting timestamps after a + * vtime skip done because we needed a ->first_active entity on some + * intermediate node. + */ +static void bfq_update_vtime(struct bfq_service_tree *st) +{ + struct bfq_entity *entry; + struct rb_node *node = st->active.rb_node; + + entry = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entry->min_start, st->vtime)) { + st->vtime = entry->min_start; + bfq_forget_idle(st); + } +} + +/** + * bfq_first_active - find the eligible entity with the smallest finish time + * @st: the service tree to select from. + * + * This function searches the first schedulable entity, starting from the + * root of the tree and going on the left every time on this side there is + * a subtree with at least one eligible (start >= vtime) entity. The path + * on the right is followed only if a) the left subtree contains no eligible + * entities and b) no eligible entity has been found yet. + */ +static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) +{ + struct bfq_entity *entry, *first = NULL; + struct rb_node *node = st->active.rb_node; + + while (node != NULL) { + entry = rb_entry(node, struct bfq_entity, rb_node); +left: + if (!bfq_gt(entry->start, st->vtime)) + first = entry; + + BUG_ON(bfq_gt(entry->min_start, st->vtime)); + + if (node->rb_left != NULL) { + entry = rb_entry(node->rb_left, + struct bfq_entity, rb_node); + if (!bfq_gt(entry->min_start, st->vtime)) { + node = node->rb_left; + goto left; + } + } + if (first != NULL) + break; + node = node->rb_right; + } + + BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active)); + return first; +} + +/** + * __bfq_lookup_next_entity - return the first eligible entity in @st. + * @st: the service tree. + * + * Update the virtual time in @st and return the first eligible entity + * it contains. + */ +static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st) +{ + struct bfq_entity *entity; + + if (RB_EMPTY_ROOT(&st->active)) + return NULL; + + bfq_update_vtime(st); + entity = bfq_first_active_entity(st); + BUG_ON(bfq_gt(entity->start, st->vtime)); + + return entity; +} + +/** + * bfq_lookup_next_entity - return the first eligible entity in @sd. + * @sd: the sched_data. + * @extract: if true the returned entity will be also extracted from @sd. + * + * NOTE: since we cache the next_active entity at each level of the + * hierarchy, the complexity of the lookup can be decreased with + * absolutely no effort just returning the cached next_active value; + * we prefer to do full lookups to test the consistency of * the data + * structures. + */ +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract, + struct bfq_data *bfqd) +{ + struct bfq_service_tree *st = sd->service_tree; + struct bfq_entity *entity; + int i=0; + + BUG_ON(sd->active_entity != NULL); + + if (bfqd != NULL && + jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { + entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1); + if (entity != NULL) { + i = BFQ_IOPRIO_CLASSES - 1; + bfqd->bfq_class_idle_last_service = jiffies; + sd->next_active = entity; + } + } + for (; i < BFQ_IOPRIO_CLASSES; i++) { + entity = __bfq_lookup_next_entity(st + i); + if (entity != NULL) { + if (extract) { + bfq_check_next_active(sd, entity); + bfq_active_extract(st + i, entity); + sd->active_entity = entity; + sd->next_active = NULL; + } + break; + } + } + + return entity; +} + +/* + * Get next queue for service. + */ +static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) +{ + struct bfq_entity *entity = NULL; + struct bfq_sched_data *sd; + struct bfq_queue *bfqq; + + BUG_ON(bfqd->active_queue != NULL); + + if (bfqd->busy_queues == 0) + return NULL; + + sd = &bfqd->root_group->sched_data; + for (; sd != NULL; sd = entity->my_sched_data) { + entity = bfq_lookup_next_entity(sd, 1, bfqd); + BUG_ON(entity == NULL); + entity->service = 0; + } + + bfqq = bfq_entity_to_bfqq(entity); + BUG_ON(bfqq == NULL); + + return bfqq; +} + +/* + * Forced extraction of the given queue. + */ +static void bfq_get_next_queue_forced(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + struct bfq_entity *entity; + struct bfq_sched_data *sd; + + BUG_ON(bfqd->active_queue != NULL); + + entity = &bfqq->entity; + /* + * Bubble up extraction/update from the leaf to the root. + */ + for_each_entity(entity) { + sd = entity->sched_data; + bfq_update_vtime(bfq_entity_service_tree(entity)); + bfq_active_extract(bfq_entity_service_tree(entity), entity); + sd->active_entity = entity; + sd->next_active = NULL; + entity->service = 0; + } + + return; +} + +static void __bfq_bfqd_reset_active(struct bfq_data *bfqd) +{ + if (bfqd->active_cic != NULL) { + put_io_context(bfqd->active_cic->ioc); + bfqd->active_cic = NULL; + } + + bfqd->active_queue = NULL; + del_timer(&bfqd->idle_slice_timer); +} + +static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + struct bfq_entity *entity = &bfqq->entity; + + if (bfqq == bfqd->active_queue) + __bfq_bfqd_reset_active(bfqd); + + bfq_deactivate_entity(entity, requeue); +} + +static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_activate_entity(entity); +} + +/* + * Called when the bfqq no longer has requests pending, remove it from + * the service tree. + */ +static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + BUG_ON(!bfq_bfqq_busy(bfqq)); + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + bfq_log_bfqq(bfqd, bfqq, "del from busy"); + + bfq_clear_bfqq_busy(bfqq); + + BUG_ON(bfqd->busy_queues == 0); + bfqd->busy_queues--; + + bfq_deactivate_bfqq(bfqd, bfqq, requeue); +} + +/* + * Called when an inactive queue receives a new request. + */ +static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqq == bfqd->active_queue); + + bfq_log_bfqq(bfqd, bfqq, "add to busy"); + + bfq_activate_bfqq(bfqd, bfqq); + + bfq_mark_bfqq_busy(bfqq); + bfqd->busy_queues++; +} diff --git a/block/bfq.h b/block/bfq.h new file mode 100644 index 0000000000000..0e8ac689af0a2 --- /dev/null +++ b/block/bfq.h @@ -0,0 +1,587 @@ +/* + * BFQ-v3r2 for 2.6.38: data structures and common functions prototypes. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +#ifndef _BFQ_H +#define _BFQ_H + +#include +#include +#include +#include + +#define BFQ_IOPRIO_CLASSES 3 +#define BFQ_CL_IDLE_TIMEOUT HZ/5 + +#define BFQ_MIN_WEIGHT 1 +#define BFQ_MAX_WEIGHT 1000 + +#define BFQ_DEFAULT_GRP_WEIGHT 10 +#define BFQ_DEFAULT_GRP_IOPRIO 0 +#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE + +struct bfq_entity; + +/** + * struct bfq_service_tree - per ioprio_class service tree. + * @active: tree for active entities (i.e., those backlogged). + * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i). + * @first_idle: idle entity with minimum F_i. + * @last_idle: idle entity with maximum F_i. + * @vtime: scheduler virtual time. + * @wsum: scheduler weight sum; active and idle entities contribute to it. + * + * Each service tree represents a B-WF2Q+ scheduler on its own. Each + * ioprio_class has its own independent scheduler, and so its own + * bfq_service_tree. All the fields are protected by the queue lock + * of the containing bfqd. + */ +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + + u64 vtime; + unsigned long wsum; +}; + +/** + * struct bfq_sched_data - multi-class scheduler. + * @active_entity: entity under service. + * @next_active: head-of-the-line entity in the scheduler. + * @service_tree: array of service trees, one per ioprio_class. + * + * bfq_sched_data is the basic scheduler queue. It supports three + * ioprio_classes, and can be used either as a toplevel queue or as + * an intermediate queue on a hierarchical setup. + * @next_active points to the active entity of the sched_data service + * trees that will be scheduled next. + * + * The supported ioprio_classes are the same as in CFQ, in descending + * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. + * Requests from higher priority queues are served before all the + * requests from lower priority queues; among requests of the same + * queue requests are served according to B-WF2Q+. + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_sched_data { + struct bfq_entity *active_entity; + struct bfq_entity *next_active; + struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; +}; + +/** + * struct bfq_entity - schedulable entity. + * @rb_node: service_tree member. + * @on_st: flag, true if the entity is on a tree (either the active or + * the idle one of its service_tree). + * @finish: B-WF2Q+ finish timestamp (aka F_i). + * @start: B-WF2Q+ start timestamp (aka S_i). + * @tree: tree the entity is enqueued into; %NULL if not on a tree. + * @min_start: minimum start time of the (active) subtree rooted at + * this entity; used for O(log N) lookups into active trees. + * @service: service received during the last round of service. + * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight. + * @weight: weight of the queue + * @parent: parent entity, for hierarchical scheduling. + * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the + * associated scheduler queue, %NULL on leaf nodes. + * @sched_data: the scheduler queue this entity belongs to. + * @ioprio: the ioprio in use. + * @new_weight: when a weight change is requested, the new weight value. + * @orig_weight: original weight, used to implement weight boosting + * @new_ioprio: when an ioprio change is requested, the new ioprio value. + * @ioprio_class: the ioprio_class in use. + * @new_ioprio_class: when an ioprio_class change is requested, the new + * ioprio_class value. + * @ioprio_changed: flag, true when the user requested a weight, ioprio or + * ioprio_class change. + * + * A bfq_entity is used to represent either a bfq_queue (leaf node in the + * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each + * entity belongs to the sched_data of the parent group in the cgroup + * hierarchy. Non-leaf entities have also their own sched_data, stored + * in @my_sched_data. + * + * Each entity stores independently its priority values; this would + * allow different weights on different devices, but this + * functionality is not exported to userspace by now. Priorities and + * weights are updated lazily, first storing the new values into the + * new_* fields, then setting the @ioprio_changed flag. As soon as + * there is a transition in the entity state that allows the priority + * update to take place the effective and the requested priority + * values are synchronized. + * + * Unless cgroups are used, the weight value is calculated from the + * ioprio to export the same interface as CFQ. When dealing with + * ``well-behaved'' queues (i.e., queues that do not spend too much + * time to consume their budget and have true sequential behavior, and + * when there are no external factors breaking anticipation) the + * relative weights at each level of the cgroups hierarchy should be + * guaranteed. All the fields are protected by the queue lock of the + * containing bfqd. + */ +struct bfq_entity { + struct rb_node rb_node; + + int on_st; + + u64 finish; + u64 start; + + struct rb_root *tree; + + u64 min_start; + + unsigned long service, budget; + unsigned short weight, new_weight; + unsigned short orig_weight; + + struct bfq_entity *parent; + + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + + unsigned short ioprio, new_ioprio; + unsigned short ioprio_class, new_ioprio_class; + + int ioprio_changed; +}; + +struct bfq_group; + +/** + * struct bfq_queue - leaf schedulable entity. + * @ref: reference counter. + * @bfqd: parent bfq_data. + * @new_bfqq: shared bfq_queue if queue is cooperating with + * one or more other queues. + * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree). + * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree). + * @sort_list: sorted list of pending requests. + * @next_rq: if fifo isn't expired, next request to serve. + * @queued: nr of requests queued in @sort_list. + * @allocated: currently allocated requests. + * @meta_pending: pending metadata requests. + * @fifo: fifo list of requests in sort_list. + * @entity: entity representing this queue in the scheduler. + * @max_budget: maximum budget allowed from the feedback mechanism. + * @budget_timeout: budget expiration (in jiffies). + * @dispatched: number of requests on the dispatch list or inside driver. + * @org_ioprio: saved ioprio during boosted periods. + * @org_ioprio_class: saved ioprio_class during boosted periods. + * @flags: status flags. + * @bfqq_list: node for active/idle bfqq list inside our bfqd. + * @seek_samples: number of seeks sampled + * @seek_total: sum of the distances of the seeks sampled + * @seek_mean: mean seek distance + * @last_request_pos: position of the last request enqueued + * @pid: pid of the process owning the queue, used for logging purposes. + * @last_rais_start_time: last (idle -> weight-raised) transition attempt + * @raising_cur_max_time: current max raising time for this queue + * + * A bfq_queue is a leaf request queue; it can be associated to an io_context + * or more (if it is an async one). @cgroup holds a reference to the + * cgroup, to be sure that it does not disappear while a bfqq still + * references it (mostly to avoid races between request issuing and task + * migration followed by cgroup distruction). + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_queue { + atomic_t ref; + struct bfq_data *bfqd; + + /* fields for cooperating queues handling */ + struct bfq_queue *new_bfqq; + struct rb_node pos_node; + struct rb_root *pos_root; + + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int allocated[2]; + int meta_pending; + struct list_head fifo; + + struct bfq_entity entity; + + unsigned long max_budget; + unsigned long budget_timeout; + + int dispatched; + + unsigned short org_ioprio; + unsigned short org_ioprio_class; + + unsigned int flags; + + struct list_head bfqq_list; + + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + sector_t last_request_pos; + + pid_t pid; + + /* weight-raising fields */ + unsigned int raising_cur_max_time; + u64 last_rais_start_finish, soft_rt_next_start; + unsigned int raising_coeff; +}; + +/** + * struct bfq_data - per device data structure. + * @queue: request queue for the managed device. + * @root_group: root bfq_group for the device. + * @rq_pos_tree: rbtree sorted by next_request position, + * used when determining if two or more queues + * have interleaving requests (see bfq_close_cooperator). + * @busy_queues: number of bfq_queues containing requests (including the + * queue under service, even if it is idling). + * @queued: number of queued requests. + * @rq_in_driver: number of requests dispatched and waiting for completion. + * @sync_flight: number of sync requests in the driver. + * @max_rq_in_driver: max number of reqs in driver in the last @hw_tag_samples + * completed requests . + * @hw_tag_samples: nr of samples used to calculate hw_tag. + * @hw_tag: flag set to one if the driver is showing a queueing behavior. + * @budgets_assigned: number of budgets assigned. + * @idle_slice_timer: timer set when idling for the next sequential request + * from the queue under service. + * @unplug_work: delayed work to restart dispatching on the request queue. + * @active_queue: bfq_queue under service. + * @active_cic: cfq_io_context (cic) associated with the @active_queue. + * @last_position: on-disk position of the last served request. + * @last_budget_start: beginning of the last budget. + * @last_idling_start: beginning of the last idle slice. + * @peak_rate: peak transfer rate observed for a budget. + * @peak_rate_samples: number of samples used to calculate @peak_rate. + * @bfq_max_budget: maximum budget allotted to a bfq_queue before rescheduling. + * @cic_index: use small consequent indexes as radix tree keys to reduce depth + * @cic_list: list of all the cics active on the bfq_data device. + * @group_list: list of all the bfq_groups active on the device. + * @active_list: list of all the bfq_queues active on the device. + * @idle_list: list of all the bfq_queues idle on the device. + * @bfq_quantum: max number of requests dispatched per dispatch round. + * @bfq_fifo_expire: timeout for async/sync requests; when it expires + * requests are served in fifo order. + * @bfq_back_penalty: weight of backward seeks wrt forward ones. + * @bfq_back_max: maximum allowed backward seek. + * @bfq_slice_idle: maximum idling time. + * @bfq_user_max_budget: user-configured max budget value (0 for auto-tuning). + * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to + * async queues. + * @bfq_timeout: timeout for bfq_queues to consume their budget; used to + * to prevent seeky queues to impose long latencies to well + * behaved ones (this also implies that seeky queues cannot + * receive guarantees in the service domain; after a timeout + * they are charged for the whole allocated budget, to try + * to preserve a behavior reasonably fair among them, but + * without service-domain guarantees). + * @bfq_raising_coeff: Maximum factor by which the weight of a boosted + * queue is multiplied + * @bfq_raising_max_time: maximum duration of a weight-raising period (jiffies) + * @bfq_raising_rt_max_time: maximum duration for soft real-time processes + * @bfq_raising_min_idle_time: minimum idle period after which weight-raising + * may be reactivated for a queue (in jiffies) + * @bfq_raising_max_softrt_rate: max service-rate for a soft real-time queue, + * sectors per seconds + * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions + * + * All the fields are protected by the @queue lock. + */ +struct bfq_data { + struct request_queue *queue; + + struct bfq_group *root_group; + + struct rb_root rq_pos_tree; + + int busy_queues; + int queued; + int rq_in_driver; + int sync_flight; + + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + + int budgets_assigned; + + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + + struct bfq_queue *active_queue; + struct cfq_io_context *active_cic; + + sector_t last_position; + + ktime_t last_budget_start; + ktime_t last_idling_start; + int peak_rate_samples; + u64 peak_rate; + unsigned long bfq_max_budget; + + unsigned int cic_index; + struct list_head cic_list; + struct hlist_head group_list; + struct list_head active_list; + struct list_head idle_list; + + unsigned int bfq_quantum; + unsigned int bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + unsigned int bfq_slice_idle; + u64 bfq_class_idle_last_service; + + unsigned int bfq_user_max_budget; + unsigned int bfq_max_budget_async_rq; + unsigned int bfq_timeout[2]; + + bool low_latency; + + /* parameters of the low_latency heuristics */ + unsigned int bfq_raising_coeff; + unsigned int bfq_raising_max_time; + unsigned int bfq_raising_rt_max_time; + unsigned int bfq_raising_min_idle_time; + unsigned int bfq_raising_max_softrt_rate; + + struct bfq_queue oom_bfqq; +}; + +enum bfqq_state_flags { + BFQ_BFQQ_FLAG_busy = 0, /* has requests or is under service */ + BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ + BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ + BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ + BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ + BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */ + BFQ_BFQQ_FLAG_sync, /* synchronous queue */ + BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */ + BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ + BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */ + BFQ_BFQQ_FLAG_some_coop_idle, /* some cooperator is inactive */ +}; + +#define BFQ_BFQQ_FNS(name) \ +static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ +{ \ + return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ +} + +BFQ_BFQQ_FNS(busy); +BFQ_BFQQ_FNS(wait_request); +BFQ_BFQQ_FNS(must_alloc); +BFQ_BFQQ_FNS(fifo_expire); +BFQ_BFQQ_FNS(idle_window); +BFQ_BFQQ_FNS(prio_changed); +BFQ_BFQQ_FNS(sync); +BFQ_BFQQ_FNS(budget_new); +BFQ_BFQQ_FNS(coop); +BFQ_BFQQ_FNS(split_coop); +BFQ_BFQQ_FNS(some_coop_idle); +#undef BFQ_BFQQ_FNS + +/* Logging facilities. */ +#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) + +#define bfq_log(bfqd, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) + +/* Expiration reasons. */ +enum bfqq_expiration { + BFQ_BFQQ_TOO_IDLE = 0, /* queue has been idling for too long */ + BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ + BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ + BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ +}; + +#ifdef CONFIG_CGROUP_BFQIO +/** + * struct bfq_group - per (device, cgroup) data structure. + * @entity: schedulable entity to insert into the parent group sched_data. + * @sched_data: own sched_data, to contain child entities (they may be + * both bfq_queues and bfq_groups). + * @group_node: node to be inserted into the bfqio_cgroup->group_data + * list of the containing cgroup's bfqio_cgroup. + * @bfqd_node: node to be inserted into the @bfqd->group_list list + * of the groups active on the same device; used for cleanup. + * @bfqd: the bfq_data for the device this group acts upon. + * @async_bfqq: array of async queues for all the tasks belonging to + * the group, one queue per ioprio value per ioprio_class, + * except for the idle class that has only one queue. + * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). + * @my_entity: pointer to @entity, %NULL for the toplevel group; used + * to avoid too many special cases during group creation/migration. + * + * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup + * there is a set of bfq_groups, each one collecting the lower-level + * entities belonging to the group that are acting on the same device. + * + * Locking works as follows: + * o @group_node is protected by the bfqio_cgroup lock, and is accessed + * via RCU from its readers. + * o @bfqd is protected by the queue lock, RCU is used to access it + * from the readers. + * o All the other fields are protected by the @bfqd queue lock. + */ +struct bfq_group { + struct bfq_entity entity; + struct bfq_sched_data sched_data; + + struct hlist_node group_node; + struct hlist_node bfqd_node; + + void *bfqd; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; + + struct bfq_entity *my_entity; +}; + +/** + * struct bfqio_cgroup - bfq cgroup data structure. + * @css: subsystem state for bfq in the containing cgroup. + * @weight: cgroup weight. + * @ioprio: cgroup ioprio. + * @ioprio_class: cgroup ioprio_class. + * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data. + * @group_data: list containing the bfq_group belonging to this cgroup. + * + * @group_data is accessed using RCU, with @lock protecting the updates, + * @ioprio and @ioprio_class are protected by @lock. + */ +struct bfqio_cgroup { + struct cgroup_subsys_state css; + + unsigned short weight, ioprio, ioprio_class; + + spinlock_t lock; + struct hlist_head group_data; +}; +#else +struct bfq_group { + struct bfq_sched_data sched_data; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; +}; +#endif + +static inline struct bfq_service_tree * +bfq_entity_service_tree(struct bfq_entity *entity) +{ + struct bfq_sched_data *sched_data = entity->sched_data; + unsigned int idx = entity->ioprio_class - 1; + + BUG_ON(idx >= BFQ_IOPRIO_CLASSES); + BUG_ON(sched_data == NULL); + + return sched_data->service_tree + idx; +} + +static inline struct bfq_queue *cic_to_bfqq(struct cfq_io_context *cic, + int is_sync) +{ + return cic->cfqq[!!is_sync]; +} + +static inline void cic_set_bfqq(struct cfq_io_context *cic, + struct bfq_queue *bfqq, int is_sync) +{ + cic->cfqq[!!is_sync] = bfqq; +} + +static inline void call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, + struct cfq_io_context *)) +{ + struct cfq_io_context *cic; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list) + func(ioc, cic); + rcu_read_unlock(); +} + +#define CIC_DEAD_KEY 1ul +#define CIC_DEAD_INDEX_SHIFT 1 + +static inline void *bfqd_dead_key(struct bfq_data *bfqd) +{ + return (void *)(bfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); +} + +/** + * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer. + * @ptr: a pointer to a bfqd. + * @flags: storage for the flags to be saved. + * + * This function allows cic->key and bfqg->bfqd to be protected by the + * queue lock of the bfqd they reference; the pointer is dereferenced + * under RCU, so the storage for bfqd is assured to be safe as long + * as the RCU read side critical section does not end. After the + * bfqd->queue->queue_lock is taken the pointer is rechecked, to be + * sure that no other writer accessed it. If we raced with a writer, + * the function returns NULL, with the queue unlocked, otherwise it + * returns the dereferenced pointer, with the queue locked. + */ +static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr, + unsigned long *flags) +{ + struct bfq_data *bfqd; + + rcu_read_lock(); + bfqd = rcu_dereference(*(struct bfq_data **)ptr); + + if (bfqd != NULL && !((unsigned long) bfqd & CIC_DEAD_KEY)) { + spin_lock_irqsave(bfqd->queue->queue_lock, *flags); + if (*ptr == bfqd) + goto out; + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); + } + + bfqd = NULL; +out: + rcu_read_unlock(); + return bfqd; +} + +static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd, + unsigned long *flags) +{ + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); +} + +static void bfq_changed_ioprio(struct io_context *ioc, + struct cfq_io_context *cic); +static void bfq_put_queue(struct bfq_queue *bfqq); +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct io_context *ioc, gfp_t gfp_mask); +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); +#endif diff --git a/block/blk-ioc.c b/block/blk-ioc.c index b791022beef31..ced5f8c9fd631 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include /* for max_pfn/max_low_pfn */ #include @@ -16,13 +17,12 @@ */ static struct kmem_cache *iocontext_cachep; -static void cfq_dtor(struct io_context *ioc) +static void hlist_sched_dtor(struct io_context *ioc, struct hlist_head *list) { - if (!hlist_empty(&ioc->cic_list)) { + if (!hlist_empty(list)) { struct cfq_io_context *cic; - cic = list_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); + cic = list_entry(list->first, struct cfq_io_context, cic_list); cic->dtor(ioc); } } @@ -40,7 +40,9 @@ int put_io_context(struct io_context *ioc) if (atomic_long_dec_and_test(&ioc->refcount)) { rcu_read_lock(); - cfq_dtor(ioc); + + hlist_sched_dtor(ioc, &ioc->cic_list); + hlist_sched_dtor(ioc, &ioc->bfq_cic_list); rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -50,15 +52,14 @@ int put_io_context(struct io_context *ioc) } EXPORT_SYMBOL(put_io_context); -static void cfq_exit(struct io_context *ioc) +static void hlist_sched_exit(struct io_context *ioc, struct hlist_head *list) { rcu_read_lock(); - if (!hlist_empty(&ioc->cic_list)) { + if (!hlist_empty(list)) { struct cfq_io_context *cic; - cic = list_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); + cic = list_entry(list->first, struct cfq_io_context, cic_list); cic->exit(ioc); } rcu_read_unlock(); @@ -74,9 +75,10 @@ void exit_io_context(struct task_struct *task) task->io_context = NULL; task_unlock(task); - if (atomic_dec_and_test(&ioc->nr_tasks)) - cfq_exit(ioc); - + if (atomic_dec_and_test(&ioc->nr_tasks)) { + hlist_sched_exit(ioc, &ioc->cic_list); + hlist_sched_exit(ioc, &ioc->bfq_cic_list); + } put_io_context(ioc); } @@ -89,12 +91,14 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) atomic_long_set(&ret->refcount, 1); atomic_set(&ret->nr_tasks, 1); spin_lock_init(&ret->lock); - ret->ioprio_changed = 0; + bitmap_zero(ret->ioprio_changed, IOC_IOPRIO_CHANGED_BITS); ret->ioprio = 0; ret->last_waited = 0; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_HLIST_HEAD(&ret->cic_list); + INIT_RADIX_TREE(&ret->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH); + INIT_HLIST_HEAD(&ret->bfq_cic_list); ret->ioc_data = NULL; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 36c8c1f2af180..0aef26e6dc2f4 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; - lim->discard_zeroes_data = -1; + lim->discard_zeroes_data = 1; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; @@ -174,6 +174,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) blk_set_default_limits(&q->limits); blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); + q->limits.discard_zeroes_data = 0; /* * If the caller didn't supply a lock, fall back to our embedded diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 41fb69150b4d3..629070309f0d3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { - return queue_var_show(q->limits.max_discard_sectors << 9, page); + return sprintf(page, "%llu\n", + (unsigned long long)q->limits.max_discard_sectors << 9); } static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) @@ -511,8 +512,10 @@ int blk_register_queue(struct gendisk *disk) return ret; ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); - if (ret < 0) + if (ret < 0) { + blk_trace_remove_sysfs(dev); return ret; + } kobject_uevent(&q->kobj, KOBJ_ADD); diff --git a/block/blk.h b/block/blk.h index 2db8f32838e73..e03adf82761a9 100644 --- a/block/blk.h +++ b/block/blk.h @@ -68,7 +68,8 @@ static inline struct request *__elv_next_request(struct request_queue *q) return rq; } - if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || + !q->elevator->ops->elevator_dispatch_fn(q, 0)) return NULL; } } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ea83a4f0c27df..7b2163e141870 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2840,7 +2840,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) static void cfq_ioc_set_ioprio(struct io_context *ioc) { call_for_each_cic(ioc, changed_ioprio); - ioc->ioprio_changed = 0; } static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, @@ -3124,8 +3123,13 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) goto err_free; out: - smp_read_barrier_depends(); - if (unlikely(ioc->ioprio_changed)) + /* + * test_and_clear_bit() implies a memory barrier, paired with + * the wmb() in fs/ioprio.c, so the value seen for ioprio is the + * new one. + */ + if (unlikely(test_and_clear_bit(IOC_CFQ_IOPRIO_CHANGED, + ioc->ioprio_changed))) cfq_ioc_set_ioprio(ioc); #ifdef CONFIG_CFQ_GROUP_IOSCHED diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b547cbca7b23a..05d1872b74a1a 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -17,10 +17,10 @@ /* * See Documentation/block/deadline-iosched.txt */ -static const int read_expire = HZ / 2; /* max time before a read is submitted. */ +static const int read_expire = HZ / 4; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ -static const int writes_starved = 2; /* max times reads can starve a write */ -static const int fifo_batch = 16; /* # of sequential requests treated as one +static const int writes_starved = 4; /* max times reads can starve a write */ +static const int fifo_batch = 1; /* # of sequential requests treated as one by the above parameters. For throughput. */ struct deadline_data { diff --git a/block/genhd.c b/block/genhd.c index e70544beb83e2..9e8522a1d1060 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1741,7 +1741,7 @@ static void disk_add_events(struct gendisk *disk) { struct disk_events *ev; - if (!disk->fops->check_events || !(disk->events | disk->async_events)) + if (!disk->fops->check_events) return; ev = kzalloc(sizeof(*ev), GFP_KERNEL); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ac1a599f51476..fcc13ac0aa187 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -33,6 +33,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI_PROCFS_POWER #include @@ -102,6 +103,7 @@ struct acpi_battery { struct mutex lock; struct power_supply bat; struct acpi_device *device; + struct notifier_block pm_nb; unsigned long update_time; int rate_now; int capacity_now; @@ -940,6 +942,21 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event) power_supply_changed(&battery->bat); } +static int battery_notify(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + struct acpi_battery *battery = container_of(nb, struct acpi_battery, + pm_nb); + switch (mode) { + case PM_POST_SUSPEND: + sysfs_remove_battery(battery); + sysfs_add_battery(battery); + break; + } + + return 0; +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -972,6 +989,10 @@ static int acpi_battery_add(struct acpi_device *device) #endif kfree(battery); } + + battery->pm_nb.notifier_call = battery_notify; + register_pm_notifier(&battery->pm_nb); + return result; } @@ -982,6 +1003,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) if (!device || !acpi_driver_data(device)) return -EINVAL; battery = acpi_driver_data(device); + unregister_pm_notifier(&battery->pm_nb); #ifdef CONFIG_ACPI_PROCFS_POWER acpi_battery_remove_fs(device); #endif diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 85249395623ba..c7358dd68b31c 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -564,7 +564,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) /* Indicate support for various _OSC capabilities. */ if (pci_ext_cfg_avail(root->bus->self)) flags |= OSC_EXT_PCI_CONFIG_SUPPORT; - if (pcie_aspm_enabled()) + if (pcie_aspm_support_enabled()) flags |= OSC_ACTIVE_STATE_PWR_SUPPORT | OSC_CLOCK_PWR_CAPABILITY_SUPPORT; if (pci_msi_enabled()) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 3a73a93596e88..85b32376dad7e 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib"); static DEFINE_MUTEX(performance_mutex); -/* Use cpufreq debug layer for _PPC changes. */ -#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /* * _PPC support is implemented as a CPUfreq policy notifier: * This means each time a CPUfreq driver registered also with @@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) return -ENODEV; } - cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, (int)ppc, ppc ? "" : "not"); pr->performance_platform_limit = (int)ppc; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index b99e624946074..8eee69faf235b 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -944,6 +944,10 @@ static int acpi_bus_get_flags(struct acpi_device *device) if (ACPI_SUCCESS(status)) device->flags.lockable = 1; + /* Power resources cannot be power manageable. */ + if (device->device_type == ACPI_BUS_TYPE_POWER) + return 0; + /* Presence of _PS0|_PR0 indicates 'power manageable' */ status = acpi_get_handle(device->handle, "_PS0", &temp); if (ACPI_FAILURE(status)) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b8d96ce37fc99..54c096b1a71cb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = { { AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ), - .flags = AHCI_FLAG_COMMON, + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, @@ -260,6 +260,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ @@ -383,6 +384,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .class = PCI_CLASS_STORAGE_SATA_AHCI, .class_mask = 0xffffff, .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ + { PCI_DEVICE(0x1b4b, 0x9125), + .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 26d452339e98f..8498eb5cd413b 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1897,7 +1897,17 @@ static void ahci_pmp_attach(struct ata_port *ap) ahci_enable_fbs(ap); pp->intr_mask |= PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* + * We must not change the port interrupt mask register if the + * port is marked frozen, the value in pp->intr_mask will be + * restored later when the port is thawed. + * + * Note that during initialization, the port is marked as + * frozen since the irq handler is not yet registered. + */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); } static void ahci_pmp_detach(struct ata_port *ap) @@ -1913,7 +1923,10 @@ static void ahci_pmp_detach(struct ata_port *ap) writel(cmd, port_mmio + PORT_CMD); pp->intr_mask &= ~PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* see comment above in ahci_pmp_attach() */ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); } int ahci_port_resume(struct ata_port *ap) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d4e52e2148593..4ccce0f371a36 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5479,8 +5479,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) return NULL; - - ap->pflags |= ATA_PFLAG_INITIALIZING; + + ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; ap->lock = &host->lock; ap->print_id = -1; ap->host = host; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 17a637877d031..09329a1eea62c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1618,7 +1618,7 @@ static void ata_eh_analyze_serror(struct ata_link *link) * host links. For disabled PMP links, only N bit is * considered as X bit is left at 1 for link plugging. */ - if (link->lpm_policy != ATA_LPM_MAX_POWER) + if (link->lpm_policy > ATA_LPM_MAX_POWER) hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; @@ -3276,6 +3276,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; @@ -3292,7 +3293,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, */ ata_for_each_dev(dev, link, ENABLED) { bool hipm = ata_id_has_hipm(dev->id); - bool dipm = ata_id_has_dipm(dev->id); + bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; /* find the first enabled and LPM enabled devices */ if (!link_dev) @@ -3349,7 +3350,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, /* host config updated, enable DIPM if transitioning to MIN_POWER */ ata_for_each_dev(dev, link, ENABLED) { - if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { + if (policy == ATA_LPM_MIN_POWER && !no_dipm && + ata_id_has_dipm(dev->id)) { err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, SATA_DIPM); if (err_mask && err_mask != AC_ERR_DEV) { diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 905ff76d3cbbc..635a759cca607 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -41,6 +41,9 @@ enum { CFR = 0x50, CFR_INTR_CH0 = 0x04, + CNTRL = 0x51, + CNTRL_CH0 = 0x04, + CNTRL_CH1 = 0x08, CMDTIM = 0x52, ARTTIM0 = 0x53, DRWTIM0 = 0x54, @@ -328,9 +331,19 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) .port_ops = &cmd648_port_ops } }; - const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL }; - u8 mrdmode; + const struct ata_port_info *ppi[] = { + &cmd_info[id->driver_data], + &cmd_info[id->driver_data], + NULL + }; + u8 mrdmode, reg; int rc; + struct pci_dev *bridge = pdev->bus->self; + /* mobility split bridges don't report enabled ports correctly */ + int port_ok = !(bridge && bridge->vendor == + PCI_VENDOR_ID_MOBILITY_ELECTRONICS); + /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */ + int cntrl_ch0_ok = (id->driver_data != 0); rc = pcim_enable_device(pdev); if (rc) @@ -341,11 +354,18 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (pdev->device == PCI_DEVICE_ID_CMD_646) { /* Does UDMA work ? */ - if (pdev->revision > 4) + if (pdev->revision > 4) { ppi[0] = &cmd_info[2]; + ppi[1] = &cmd_info[2]; + } /* Early rev with other problems ? */ - else if (pdev->revision == 1) + else if (pdev->revision == 1) { ppi[0] = &cmd_info[3]; + ppi[1] = &cmd_info[3]; + } + /* revs 1,2 have no CNTRL_CH0 */ + if (pdev->revision < 3) + cntrl_ch0_ok = 0; } pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); @@ -354,6 +374,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mrdmode |= 0x02; /* Memory read line enable */ pci_write_config_byte(pdev, MRDMODE, mrdmode); + /* check for enabled ports */ + pci_read_config_byte(pdev, CNTRL, ®); + if (!port_ok) + dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n"); + if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) { + dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n"); + ppi[0] = &ata_dummy_port_info; + + } + if (port_ok && !(reg & CNTRL_CH1)) { + dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n"); + ppi[1] = &ata_dummy_port_info; + } + /* Force PIO 0 here.. */ /* PPC specific fixup copied from old driver */ diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 25ef1a4556e62..b836e11a8a34e 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -165,7 +165,6 @@ static uint32_t fpga_tx(struct solos_card *); static irqreturn_t solos_irq(int irq, void *dev_id); static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); static int list_vccs(int vci); -static void release_vccs(struct atm_dev *dev); static int atm_init(struct solos_card *, struct device *); static void atm_remove(struct solos_card *); static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); @@ -384,7 +383,6 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb /* Anything but 'Showtime' is down */ if (strcmp(state_str, "Showtime")) { atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); - release_vccs(card->atmdev[port]); dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); return 0; } @@ -697,7 +695,7 @@ void solos_bh(unsigned long card_arg) size); } if (atmdebug) { - dev_info(&card->dev->dev, "Received: device %d\n", port); + dev_info(&card->dev->dev, "Received: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); @@ -830,28 +828,6 @@ static int list_vccs(int vci) return num_found; } -static void release_vccs(struct atm_dev *dev) -{ - int i; - - write_lock_irq(&vcc_sklist_lock); - for (i = 0; i < VCC_HTABLE_SIZE; i++) { - struct hlist_head *head = &vcc_hash[i]; - struct hlist_node *node, *tmp; - struct sock *s; - struct atm_vcc *vcc; - - sk_for_each_safe(s, node, tmp, head) { - vcc = atm_sk(s); - if (vcc->dev == dev) { - vcc_release_async(vcc, -EPIPE); - sk_del_node_init(s); - } - } - } - write_unlock_irq(&vcc_sklist_lock); -} - static int popen(struct atm_vcc *vcc) { @@ -1018,8 +994,15 @@ static uint32_t fpga_tx(struct solos_card *card) /* Clean up and free oldskb now it's gone */ if (atmdebug) { + struct pkt_hdr *header = (void *)oldskb->data; + int size = le16_to_cpu(header->size); + + skb_pull(oldskb, sizeof(*header)); dev_info(&card->dev->dev, "Transmitted: port %d\n", port); + dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", + size, le16_to_cpu(header->vpi), + le16_to_cpu(header->vci)); print_buffer(oldskb); } @@ -1262,7 +1245,7 @@ static int atm_init(struct solos_card *card, struct device *parent) card->atmdev[i]->ci_range.vci_bits = 16; card->atmdev[i]->dev_data = card; card->atmdev[i]->phy_data = (void *)(unsigned long)i; - atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN); + atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index d57e8d0fb8235..4e20415918cda 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -168,4 +168,44 @@ config SYS_HYPERVISOR bool default n +config GENLOCK + bool "Enable a generic cross-process locking mechanism" + depends on ANON_INODES + help + Enable a generic cross-process locking API to provide protection + for shared memory objects such as graphics buffers. + +config GENLOCK_MISCDEVICE + bool "Enable a misc-device for userspace to access the genlock engine" + depends on GENLOCK + help + Create a miscdevice for the purposes of allowing userspace to create + and interact with locks created using genlock. + +config SYNC + bool "Synchronization framework" + default n + select ANON_INODES + help + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config SW_SYNC + bool "Software synchronization objects" + default n + depends on SYNC + help + A sync object driver that uses a 32bit counter to coordinate + syncrhronization. Useful when there is no hardware primitive backing + the synchronization. + +config SW_SYNC_USER + bool "Userspace API for SW_SYNC" + default n + depends on SW_SYNC + help + Provides a user space API to the sw sync object. + *WARNING* improper use of this can result in deadlocking kernel + drivers from userspace. endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 5f51c3b4451e2..b930083bb1656 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -1,6 +1,6 @@ # Makefile for the Linux device tree -obj-y := core.o sys.o bus.o dd.o \ +obj-y := core.o sys.o bus.o dd.o syscore.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o @@ -8,6 +8,7 @@ obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o +obj-$(CONFIG_GENLOCK) += genlock.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o @@ -19,5 +20,8 @@ obj-$(CONFIG_MODULES) += module.o endif obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o +obj-$(CONFIG_SYNC) += sync.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o + ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/base.h b/drivers/base/base.h index 19f49e41ce5de..a34dca0ad0417 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern void sysdev_shutdown(void); - extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c new file mode 100644 index 0000000000000..5e1d7af5e5295 --- /dev/null +++ b/drivers/base/genlock.c @@ -0,0 +1,819 @@ +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Lock states - can either be unlocked, held as an exclusive write lock or a + * shared read lock + */ + +#define _UNLOCKED 0 +#define _RDLOCK GENLOCK_RDLOCK +#define _WRLOCK GENLOCK_WRLOCK + +#define GENLOCK_LOG_ERR(fmt, args...) \ +pr_err("genlock: %s: " fmt, __func__, ##args) + +struct genlock { + struct list_head active; /* List of handles holding lock */ + spinlock_t lock; /* Spinlock to protect the lock internals */ + wait_queue_head_t queue; /* Holding pen for processes pending lock */ + struct file *file; /* File structure for exported lock */ + int state; /* Current state of the lock */ + struct kref refcount; +}; + +struct genlock_handle { + struct genlock *lock; /* Lock currently attached to the handle */ + struct list_head entry; /* List node for attaching to a lock */ + struct file *file; /* File structure associated with handle */ + int active; /* Number of times the active lock has been + taken */ +}; + +/* + * Create a spinlock to protect against a race condition when a lock gets + * released while another process tries to attach it + */ + +static DEFINE_SPINLOCK(genlock_file_lock); + +static void genlock_destroy(struct kref *kref) +{ + struct genlock *lock = container_of(kref, struct genlock, + refcount); + + /* + * Clear the private data for the file descriptor in case the fd is + * still active after the lock gets released + */ + + spin_lock(&genlock_file_lock); + if (lock->file) + lock->file->private_data = NULL; + spin_unlock(&genlock_file_lock); + + kfree(lock); +} + +/* + * Release the genlock object. Called when all the references to + * the genlock file descriptor are released + */ + +static int genlock_release(struct inode *inodep, struct file *file) +{ + struct genlock *lock = file->private_data; + /* + * Clear the refrence back to this file structure to avoid + * somehow reusing the lock after the file has been destroyed + */ + + if (lock) + lock->file = NULL; + + return 0; +} + +static const struct file_operations genlock_fops = { + .release = genlock_release, +}; + +/** + * genlock_create_lock - Create a new lock + * @handle - genlock handle to attach the lock to + * + * Returns: a pointer to the genlock + */ + +struct genlock *genlock_create_lock(struct genlock_handle *handle) +{ + struct genlock *lock; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return ERR_PTR(-EINVAL); + } + + if (handle->lock != NULL) { + GENLOCK_LOG_ERR("Handle already has a lock attached\n"); + return ERR_PTR(-EINVAL); + } + + lock = kzalloc(sizeof(*lock), GFP_KERNEL); + if (lock == NULL) { + GENLOCK_LOG_ERR("Unable to allocate memory for a lock\n"); + return ERR_PTR(-ENOMEM); + } + + INIT_LIST_HEAD(&lock->active); + init_waitqueue_head(&lock->queue); + spin_lock_init(&lock->lock); + + lock->state = _UNLOCKED; + + /* + * Create an anonyonmous inode for the object that can exported to + * other processes + */ + + lock->file = anon_inode_getfile("genlock", &genlock_fops, + lock, O_RDWR); + + /* Attach the new lock to the handle */ + handle->lock = lock; + kref_init(&lock->refcount); + + return lock; +} +EXPORT_SYMBOL(genlock_create_lock); + +/* + * Get a file descriptor reference to a lock suitable for sharing with + * other processes + */ + +static int genlock_get_fd(struct genlock *lock) +{ + int ret; + + if (!lock->file) { + GENLOCK_LOG_ERR("No file attached to the lock\n"); + return -EINVAL; + } + + ret = get_unused_fd_flags(0); + if (ret < 0) + return ret; + fd_install(ret, lock->file); + return ret; +} + +/** + * genlock_attach_lock - Attach an existing lock to a handle + * @handle - Pointer to a genlock handle to attach the lock to + * @fd - file descriptor for the exported lock + * + * Returns: A pointer to the attached lock structure + */ + +struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd) +{ + struct file *file; + struct genlock *lock; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return ERR_PTR(-EINVAL); + } + + if (handle->lock != NULL) { + GENLOCK_LOG_ERR("Handle already has a lock attached\n"); + return ERR_PTR(-EINVAL); + } + + file = fget(fd); + if (file == NULL) { + GENLOCK_LOG_ERR("Bad file descriptor\n"); + return ERR_PTR(-EBADF); + } + + /* + * take a spinlock to avoid a race condition if the lock is + * released and then attached + */ + + spin_lock(&genlock_file_lock); + lock = file->private_data; + spin_unlock(&genlock_file_lock); + + fput(file); + + if (lock == NULL) { + GENLOCK_LOG_ERR("File descriptor is invalid\n"); + return ERR_PTR(-EINVAL); + } + + handle->lock = lock; + kref_get(&lock->refcount); + + return lock; +} +EXPORT_SYMBOL(genlock_attach_lock); + +/* Helper function that returns 1 if the specified handle holds the lock */ + +static int handle_has_lock(struct genlock *lock, struct genlock_handle *handle) +{ + struct genlock_handle *h; + + list_for_each_entry(h, &lock->active, entry) { + if (h == handle) + return 1; + } + + return 0; +} + +/* If the lock just became available, signal the next entity waiting for it */ + +static void _genlock_signal(struct genlock *lock) +{ + if (list_empty(&lock->active)) { + /* If the list is empty, then the lock is free */ + lock->state = _UNLOCKED; + /* Wake up the first process sitting in the queue */ + wake_up(&lock->queue); + } +} + +/* Attempt to release the handle's ownership of the lock */ + +static int _genlock_unlock(struct genlock *lock, struct genlock_handle *handle) +{ + int ret = -EINVAL; + unsigned long irqflags; + + spin_lock_irqsave(&lock->lock, irqflags); + + if (lock->state == _UNLOCKED) { + GENLOCK_LOG_ERR("Trying to unlock an unlocked handle\n"); + goto done; + } + + /* Make sure this handle is an owner of the lock */ + if (!handle_has_lock(lock, handle)) { + GENLOCK_LOG_ERR("handle does not have lock attached to it\n"); + goto done; + } + /* If the handle holds no more references to the lock then + release it (maybe) */ + + if (--handle->active == 0) { + list_del(&handle->entry); + _genlock_signal(lock); + } + + ret = 0; + +done: + spin_unlock_irqrestore(&lock->lock, irqflags); + return ret; +} + +/* Attempt to acquire the lock for the handle */ + +static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, + int op, int flags, uint32_t timeout) +{ + unsigned long irqflags; + int ret = 0; + unsigned long ticks = msecs_to_jiffies(timeout); + + spin_lock_irqsave(&lock->lock, irqflags); + + /* Sanity check - no blocking locks in a debug context. Even if it + * succeed to not block, the mere idea is too dangerous to continue + */ + + if (in_interrupt() && !(flags & GENLOCK_NOBLOCK)) + BUG(); + + /* Fast path - the lock is unlocked, so go do the needful */ + + if (lock->state == _UNLOCKED) + goto dolock; + + if (handle_has_lock(lock, handle)) { + + /* + * If the handle already holds the lock and the lock type is + * a read lock then just increment the active pointer. This + * allows the handle to do recursive read locks. Recursive + * write locks are not allowed in order to support + * synchronization within a process using a single gralloc + * handle. + */ + + if (lock->state == _RDLOCK && op == _RDLOCK) { + handle->active++; + goto done; + } + + /* + * If the handle holds a write lock then the owner can switch + * to a read lock if they want. Do the transition atomically + * then wake up any pending waiters in case they want a read + * lock too. In order to support synchronization within a + * process the caller must explicity request to convert the + * lock type with the GENLOCK_WRITE_TO_READ flag. + */ + + if (flags & GENLOCK_WRITE_TO_READ) { + if (lock->state == _WRLOCK && op == _RDLOCK) { + lock->state = _RDLOCK; + wake_up(&lock->queue); + goto done; + } else { + GENLOCK_LOG_ERR("Invalid state to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } + } + } else { + + /* + * Check to ensure the caller has not attempted to convert a + * write to a read without holding the lock. + */ + + if (flags & GENLOCK_WRITE_TO_READ) { + GENLOCK_LOG_ERR("Handle must have lock to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } + + /* + * If we request a read and the lock is held by a read, then go + * ahead and share the lock + */ + + if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) + goto dolock; + } + + /* Treat timeout 0 just like a NOBLOCK flag and return if the + lock cannot be aquired without blocking */ + + if (flags & GENLOCK_NOBLOCK || timeout == 0) { + ret = -EAGAIN; + goto done; + } + + /* + * Wait while the lock remains in an incompatible state + * state op wait + * ------------------- + * unlocked n/a no + * read read no + * read write yes + * write n/a yes + */ + + while ((lock->state == _RDLOCK && op == _WRLOCK) || + lock->state == _WRLOCK) { + signed long elapsed; + + spin_unlock_irqrestore(&lock->lock, irqflags); + + elapsed = wait_event_interruptible_timeout(lock->queue, + lock->state == _UNLOCKED || + (lock->state == _RDLOCK && op == _RDLOCK), + ticks); + + spin_lock_irqsave(&lock->lock, irqflags); + + if (elapsed <= 0) { + ret = (elapsed < 0) ? elapsed : -ETIMEDOUT; + goto done; + } + + ticks = (unsigned long) elapsed; + } + +dolock: + /* We can now get the lock, add ourselves to the list of owners */ + + list_add_tail(&handle->entry, &lock->active); + lock->state = op; + handle->active++; + +done: + spin_unlock_irqrestore(&lock->lock, irqflags); + return ret; + +} + +/** + * genlock_lock - Acquire or release a lock (depreciated) + * @handle - pointer to the genlock handle that is requesting the lock + * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) + * @flags - flags to control the operation + * @timeout - optional timeout to wait for the lock to come free + * + * Returns: 0 on success or error code on failure + */ + +int genlock_lock(struct genlock_handle *handle, int op, int flags, + uint32_t timeout) +{ + struct genlock *lock; + unsigned long irqflags; + + int ret = 0; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + switch (op) { + case GENLOCK_UNLOCK: + ret = _genlock_unlock(lock, handle); + break; + case GENLOCK_RDLOCK: + spin_lock_irqsave(&lock->lock, irqflags); + if (handle_has_lock(lock, handle)) { + /* request the WRITE_TO_READ flag for compatibility */ + flags |= GENLOCK_WRITE_TO_READ; + } + spin_unlock_irqrestore(&lock->lock, irqflags); + /* fall through to take lock */ + case GENLOCK_WRLOCK: + ret = _genlock_lock(lock, handle, op, flags, timeout); + break; + default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(genlock_lock); + +/** + * genlock_dreadlock - Acquire or release a lock + * @handle - pointer to the genlock handle that is requesting the lock + * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) + * @flags - flags to control the operation + * @timeout - optional timeout to wait for the lock to come free + * + * Returns: 0 on success or error code on failure + */ + +int genlock_dreadlock(struct genlock_handle *handle, int op, int flags, + uint32_t timeout) +{ + struct genlock *lock; + + int ret = 0; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + switch (op) { + case GENLOCK_UNLOCK: + ret = _genlock_unlock(lock, handle); + break; + case GENLOCK_RDLOCK: + case GENLOCK_WRLOCK: + ret = _genlock_lock(lock, handle, op, flags, timeout); + break; + default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(genlock_dreadlock); + +/** + * genlock_wait - Wait for the lock to be released + * @handle - pointer to the genlock handle that is waiting for the lock + * @timeout - optional timeout to wait for the lock to get released + */ + +int genlock_wait(struct genlock_handle *handle, uint32_t timeout) +{ + struct genlock *lock; + unsigned long irqflags; + int ret = 0; + unsigned long ticks = msecs_to_jiffies(timeout); + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + spin_lock_irqsave(&lock->lock, irqflags); + + /* + * if timeout is 0 and the lock is already unlocked, then success + * otherwise return -EAGAIN + */ + + if (timeout == 0) { + ret = (lock->state == _UNLOCKED) ? 0 : -EAGAIN; + goto done; + } + + while (lock->state != _UNLOCKED) { + signed long elapsed; + + spin_unlock_irqrestore(&lock->lock, irqflags); + + elapsed = wait_event_interruptible_timeout(lock->queue, + lock->state == _UNLOCKED, ticks); + + spin_lock_irqsave(&lock->lock, irqflags); + + if (elapsed <= 0) { + ret = (elapsed < 0) ? elapsed : -ETIMEDOUT; + break; + } + + ticks = (unsigned long) elapsed; + } + +done: + spin_unlock_irqrestore(&lock->lock, irqflags); + return ret; +} + +static void genlock_release_lock(struct genlock_handle *handle) +{ + unsigned long flags; + + if (handle == NULL || handle->lock == NULL) + return; + + spin_lock_irqsave(&handle->lock->lock, flags); + + /* If the handle is holding the lock, then force it closed */ + + if (handle_has_lock(handle->lock, handle)) { + list_del(&handle->entry); + _genlock_signal(handle->lock); + } + spin_unlock_irqrestore(&handle->lock->lock, flags); + + kref_put(&handle->lock->refcount, genlock_destroy); + handle->lock = NULL; + handle->active = 0; +} + +/* + * Release function called when all references to a handle are released + */ + +static int genlock_handle_release(struct inode *inodep, struct file *file) +{ + struct genlock_handle *handle = file->private_data; + + genlock_release_lock(handle); + kfree(handle); + + return 0; +} + +static const struct file_operations genlock_handle_fops = { + .release = genlock_handle_release +}; + +/* + * Allocate a new genlock handle + */ + +static struct genlock_handle *_genlock_get_handle(void) +{ + struct genlock_handle *handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (handle == NULL) { + GENLOCK_LOG_ERR("Unable to allocate memory for the handle\n"); + return ERR_PTR(-ENOMEM); + } + + return handle; +} + +/** + * genlock_get_handle - Create a new genlock handle + * + * Returns: A pointer to a new genlock handle + */ + +struct genlock_handle *genlock_get_handle(void) +{ + struct genlock_handle *handle = _genlock_get_handle(); + if (IS_ERR(handle)) + return handle; + + handle->file = anon_inode_getfile("genlock-handle", + &genlock_handle_fops, handle, O_RDWR); + + return handle; +} +EXPORT_SYMBOL(genlock_get_handle); + +/** + * genlock_put_handle - release a reference to a genlock handle + * @handle - A pointer to the handle to release + */ + +void genlock_put_handle(struct genlock_handle *handle) +{ + if (handle) + fput(handle->file); +} +EXPORT_SYMBOL(genlock_put_handle); + +/** + * genlock_get_handle_fd - Get a handle reference from a file descriptor + * @fd - The file descriptor for a genlock handle + */ + +struct genlock_handle *genlock_get_handle_fd(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return ERR_PTR(-EINVAL); + + return file->private_data; +} +EXPORT_SYMBOL(genlock_get_handle_fd); + +#ifdef CONFIG_GENLOCK_MISCDEVICE + +static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + struct genlock_lock param; + struct genlock_handle *handle = filep->private_data; + struct genlock *lock; + int ret; + + if (IS_ERR_OR_NULL(handle)) + return -EINVAL; + + switch (cmd) { + case GENLOCK_IOC_NEW: { + lock = genlock_create_lock(handle); + if (IS_ERR(lock)) + return PTR_ERR(lock); + + return 0; + } + case GENLOCK_IOC_EXPORT: { + if (handle->lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock" + "attached\n"); + return -EINVAL; + } + + ret = genlock_get_fd(handle->lock); + if (ret < 0) + return ret; + + param.fd = ret; + + if (copy_to_user((void __user *) arg, ¶m, + sizeof(param))) + return -EFAULT; + + return 0; + } + case GENLOCK_IOC_ATTACH: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + lock = genlock_attach_lock(handle, param.fd); + if (IS_ERR(lock)) + return PTR_ERR(lock); + + return 0; + } + case GENLOCK_IOC_LOCK: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_lock(handle, param.op, param.flags, + param.timeout); + } + case GENLOCK_IOC_DREADLOCK: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_dreadlock(handle, param.op, param.flags, + param.timeout); + } + case GENLOCK_IOC_WAIT: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_wait(handle, param.timeout); + } + case GENLOCK_IOC_RELEASE: { + /* + * Return error - this ioctl has been deprecated. + * Locks should only be released when the handle is + * destroyed + */ + GENLOCK_LOG_ERR("Deprecated RELEASE ioctl called\n"); + return -EINVAL; + } + default: + GENLOCK_LOG_ERR("Invalid ioctl\n"); + return -EINVAL; + } +} + +static int genlock_dev_release(struct inode *inodep, struct file *file) +{ + struct genlock_handle *handle = file->private_data; + + genlock_release_lock(handle); + kfree(handle); + + return 0; +} + +static int genlock_dev_open(struct inode *inodep, struct file *file) +{ + struct genlock_handle *handle = _genlock_get_handle(); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + handle->file = file; + file->private_data = handle; + return 0; +} + +static const struct file_operations genlock_dev_fops = { + .open = genlock_dev_open, + .release = genlock_dev_release, + .unlocked_ioctl = genlock_dev_ioctl, +}; + +static struct miscdevice genlock_dev; + +static int genlock_dev_init(void) +{ + genlock_dev.minor = MISC_DYNAMIC_MINOR; + genlock_dev.name = "genlock"; + genlock_dev.fops = &genlock_dev_fops; + genlock_dev.parent = NULL; + + return misc_register(&genlock_dev); +} + +static void genlock_dev_close(void) +{ + misc_deregister(&genlock_dev); +} + +module_init(genlock_dev_init); +module_exit(genlock_dev_close); + +#endif diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index 6e6b6a11b3ced..1377e854e05ba 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -22,6 +22,7 @@ #include #include #include +#include static struct iommu_ops *iommu_ops; @@ -39,7 +40,7 @@ bool iommu_found(void) } EXPORT_SYMBOL_GPL(iommu_found); -struct iommu_domain *iommu_domain_alloc(void) +struct iommu_domain *iommu_domain_alloc(int flags) { struct iommu_domain *domain; int ret; @@ -48,7 +49,7 @@ struct iommu_domain *iommu_domain_alloc(void) if (!domain) return NULL; - ret = iommu_ops->domain_init(domain); + ret = iommu_ops->domain_init(domain, flags); if (ret) goto out_free; @@ -122,3 +123,21 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) return iommu_ops->unmap(domain, iova, gfp_order); } EXPORT_SYMBOL_GPL(iommu_unmap); + +int iommu_map_range(struct iommu_domain *domain, unsigned int iova, + struct scatterlist *sg, unsigned int len, int prot) +{ + BUG_ON(iova & (~PAGE_MASK)); + + return iommu_ops->map_range(domain, iova, sg, len, prot); +} +EXPORT_SYMBOL_GPL(iommu_map_range); + +int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova, + unsigned int len) +{ + BUG_ON(iova & (~PAGE_MASK)); + + return iommu_ops->unmap_range(domain, iova, len); +} +EXPORT_SYMBOL_GPL(iommu_unmap_range); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index cafeaaf0428fc..4969f2f5dc329 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -54,6 +54,9 @@ static const struct kset_uevent_ops memory_uevent_ops = { static BLOCKING_NOTIFIER_HEAD(memory_chain); +unsigned long movable_reserved_start, movable_reserved_size; +unsigned long low_power_memory_start, low_power_memory_size; + int register_memory_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&memory_chain, nb); @@ -326,6 +329,64 @@ static int block_size_init(void) &attr_block_size_bytes.attr); } +static ssize_t +print_movable_size(struct class *class, struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%lx\n", movable_reserved_size); +} + +static CLASS_ATTR(movable_size_bytes, 0444, print_movable_size, NULL); + +static int movable_size_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_movable_size_bytes.attr); +} + +static ssize_t +print_movable_start(struct class *class, struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%lx\n", movable_reserved_start); +} + +static CLASS_ATTR(movable_start_bytes, 0444, print_movable_start, NULL); + +static int movable_start_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_movable_start_bytes.attr); +} + +static ssize_t +print_low_power_memory_size(struct class *class, struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%lx\n", low_power_memory_size); +} + +static CLASS_ATTR(low_power_memory_size_bytes, 0444, + print_low_power_memory_size, NULL); + +static int low_power_memory_size_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_low_power_memory_size_bytes.attr); +} + +static ssize_t +print_low_power_memory_start(struct class *class, struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%lx\n", low_power_memory_start); +} + +static CLASS_ATTR(low_power_memory_start_bytes, 0444, + print_low_power_memory_start, NULL); + +static int low_power_memory_start_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_low_power_memory_start_bytes.attr); +} + /* * Some architectures will have custom drivers to do this, and * will not need to do it from userspace. The fake hot-add code @@ -427,6 +488,96 @@ static inline int memory_fail_init(void) } #endif +#ifdef CONFIG_ARCH_MEMORY_REMOVE +static ssize_t +memory_remove_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + u64 phys_addr; + int ret; + + phys_addr = simple_strtoull(buf, NULL, 0); + + ret = physical_remove_memory(phys_addr, + PAGES_PER_SECTION << PAGE_SHIFT); + + if (ret) + count = ret; + + return count; +} +static CLASS_ATTR(remove, S_IWUSR, NULL, memory_remove_store); + +static int memory_remove_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_remove.attr); +} + +static ssize_t +memory_active_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + u64 phys_addr; + int ret; + + phys_addr = simple_strtoull(buf, NULL, 0); + + ret = physical_active_memory(phys_addr, + PAGES_PER_SECTION << PAGE_SHIFT); + + if (ret) + count = ret; + + return count; +} +static CLASS_ATTR(active, S_IWUSR, NULL, memory_active_store); + +static int memory_active_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_active.attr); +} + +static ssize_t +memory_low_power_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + u64 phys_addr; + int ret; + + phys_addr = simple_strtoull(buf, NULL, 0); + + ret = physical_low_power_memory(phys_addr, + PAGES_PER_SECTION << PAGE_SHIFT); + + if (ret) + count = ret; + + return count; +} +static CLASS_ATTR(low_power, S_IWUSR, NULL, memory_low_power_store); + +static int memory_low_power_init(void) +{ + return sysfs_create_file(&memory_sysdev_class.kset.kobj, + &class_attr_low_power.attr); +} +#else +static inline int memory_remove_init(void) +{ + return 0; +} +static inline int memory_active_init(void) +{ + return 0; +} +static inline int memory_low_power_init(void) +{ + return 0; +} +#endif + /* * Note that phys_device is optional. It is here to allow for * differentiation between which *physical* devices each @@ -583,11 +734,32 @@ int __init memory_dev_init(void) if (!ret) ret = err; err = memory_fail_init(); + if (!ret) + ret = err; + err = memory_remove_init(); + if (!ret) + ret = err; + err = memory_active_init(); + if (!ret) + ret = err; + err = memory_low_power_init(); if (!ret) ret = err; err = block_size_init(); if (!ret) ret = err; + err = movable_size_init(); + if (!ret) + ret = err; + err = movable_start_init(); + if (!ret) + ret = err; + err = low_power_memory_size_init(); + if (!ret) + ret = err; + err = low_power_memory_start_init(); + if (!ret) + ret = err; out: if (ret) printk(KERN_ERR "%s() failed: %d\n", __func__, ret); diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c new file mode 100644 index 0000000000000..935769c754279 --- /dev/null +++ b/drivers/base/sw_sync.c @@ -0,0 +1,259 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int sw_sync_cmp(u32 a, u32 b) +{ + if (a == b) + return 0; + + return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ + struct sw_sync_pt *pt; + + pt = (struct sw_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + + pt->value = value; + + return (struct sync_pt *)pt; +} +EXPORT_SYMBOL(sw_sync_pt_create); + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; + struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + + return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static void sw_sync_print_obj(struct seq_file *s, + struct sync_timeline *sync_timeline) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *)sync_timeline; + + seq_printf(s, "%d", obj->value); +} + +static void sw_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + seq_printf(s, "%d / %d", pt->value, obj->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, + void *data, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + if (size < sizeof(pt->value)) + return -ENOMEM; + + memcpy(data, &pt->value, sizeof(pt->value)); + + return sizeof(pt->value); +} + +struct sync_timeline_ops sw_sync_timeline_ops = { + .driver_name = "sw_sync", + .dup = sw_sync_pt_dup, + .has_signaled = sw_sync_pt_has_signaled, + .compare = sw_sync_pt_compare, + .print_obj = sw_sync_print_obj, + .print_pt = sw_sync_print_pt, + .fill_driver_data = sw_sync_fill_driver_data, +}; + + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *) + sync_timeline_create(&sw_sync_timeline_ops, + sizeof(struct sw_sync_timeline), + name); + + return obj; +} +EXPORT_SYMBOL(sw_sync_timeline_create); + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ + obj->value += inc; + + sync_timeline_signal(&obj->obj); +} +EXPORT_SYMBOL(sw_sync_timeline_inc); + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +int sw_sync_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (obj == NULL) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +int sw_sync_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + sync_timeline_destroy(&obj->obj); + return 0; +} + +long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_pt *pt; + struct sync_fence *fence; + struct sw_sync_create_fence_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + pt = sw_sync_pt_create(obj, data.value); + if (pt == NULL) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, pt); + if (fence == NULL) { + sync_pt_free(pt); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_fence_put(fence); + err = -EFAULT; + goto err; + } + + sync_fence_install(fence, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_fops = { + .owner = THIS_MODULE, + .open = sw_sync_open, + .release = sw_sync_release, + .unlocked_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_fops, +}; + +int __init sw_sync_device_init(void) +{ + return misc_register(&sw_sync_dev); +} + +void __exit sw_sync_device_remove(void) +{ + misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/base/sync.c b/drivers/base/sync.c new file mode 100755 index 0000000000000..809d02b21e089 --- /dev/null +++ b/drivers/base/sync.c @@ -0,0 +1,1009 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CREATE_TRACE_POINTS +#include + +static void sync_fence_signal_pt(struct sync_pt *pt); +static int _sync_pt_has_signaled(struct sync_pt *pt); +static void sync_fence_free(struct kref *kref); +static void sync_dump(void); + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); + +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name) +{ + struct sync_timeline *obj; + unsigned long flags; + + if (size < sizeof(struct sync_timeline)) + return NULL; + + obj = kzalloc(size, GFP_KERNEL); + if (obj == NULL) + return NULL; + + kref_init(&obj->kref); + obj->ops = ops; + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + spin_lock_init(&obj->child_list_lock); + + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->active_list_lock); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + return obj; +} +EXPORT_SYMBOL(sync_timeline_create); + +static void sync_timeline_free(struct kref *kref) +{ + struct sync_timeline *obj = + container_of(kref, struct sync_timeline, kref); + unsigned long flags; + + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + kfree(obj); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ + obj->destroyed = true; + + /* + * If this is not the last reference, signal any children + * that their parent is going away. + */ + + if (!kref_put(&obj->kref, sync_timeline_free)) + sync_timeline_signal(obj); +} +EXPORT_SYMBOL(sync_timeline_destroy); + +static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) +{ + unsigned long flags; + + pt->parent = obj; + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_add_tail(&pt->child_list, &obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_timeline_remove_pt(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + + spin_lock_irqsave(&obj->active_list_lock, flags); + if (!list_empty(&pt->active_list)) + list_del_init(&pt->active_list); + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + spin_lock_irqsave(&obj->child_list_lock, flags); + if (!list_empty(&pt->child_list)) { + list_del_init(&pt->child_list); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +void sync_timeline_signal(struct sync_timeline *obj) +{ + unsigned long flags; + LIST_HEAD(signaled_pts); + struct list_head *pos, *n; + + trace_sync_timeline(obj); + + spin_lock_irqsave(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &obj->active_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, active_list); + + if (_sync_pt_has_signaled(pt)) { + list_del_init(pos); + list_add(&pt->signaled_list, &signaled_pts); + kref_get(&pt->fence->kref); + } + } + + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &signaled_pts) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, signaled_list); + + list_del_init(pos); + sync_fence_signal_pt(pt); + kref_put(&pt->fence->kref, sync_fence_free); + } +} +EXPORT_SYMBOL(sync_timeline_signal); + +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +{ + struct sync_pt *pt; + + if (size < sizeof(struct sync_pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (pt == NULL) + return NULL; + + INIT_LIST_HEAD(&pt->active_list); + kref_get(&parent->kref); + sync_timeline_add_pt(parent, pt); + + return pt; +} +EXPORT_SYMBOL(sync_pt_create); + +void sync_pt_free(struct sync_pt *pt) +{ + if (pt->parent->ops->free_pt) + pt->parent->ops->free_pt(pt); + + sync_timeline_remove_pt(pt); + + kref_put(&pt->parent->kref, sync_timeline_free); + + kfree(pt); +} +EXPORT_SYMBOL(sync_pt_free); + +/* call with pt->parent->active_list_lock held */ +static int _sync_pt_has_signaled(struct sync_pt *pt) +{ + int old_status = pt->status; + + if (!pt->status) + pt->status = pt->parent->ops->has_signaled(pt); + + if (!pt->status && pt->parent->destroyed) + pt->status = -ENOENT; + + if (pt->status != old_status) + pt->timestamp = ktime_get(); + + return pt->status; +} + +static struct sync_pt *sync_pt_dup(struct sync_pt *pt) +{ + return pt->parent->ops->dup(pt); +} + +/* Adds a sync pt to the active queue. Called when added to a fence */ +static void sync_pt_activate(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + int err; + + spin_lock_irqsave(&obj->active_list_lock, flags); + + err = _sync_pt_has_signaled(pt); + if (err != 0) + goto out; + + list_add_tail(&pt->active_list, &obj->active_list_head); + +out: + spin_unlock_irqrestore(&obj->active_list_lock, flags); +} + +static int sync_fence_release(struct inode *inode, struct file *file); +static unsigned int sync_fence_poll(struct file *file, poll_table *wait); +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, +}; + +static struct sync_fence *sync_fence_alloc(const char *name) +{ + struct sync_fence *fence; + unsigned long flags; + + fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, + fence, 0); + if (fence->file == NULL) + goto err; + + kref_init(&fence->kref); + strlcpy(fence->name, name, sizeof(fence->name)); + + INIT_LIST_HEAD(&fence->pt_list_head); + INIT_LIST_HEAD(&fence->waiter_list_head); + spin_lock_init(&fence->waiter_list_lock); + + init_waitqueue_head(&fence->wq); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + return fence; + +err: + kfree(fence); + return NULL; +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ + struct sync_fence *fence; + + if (pt->fence) + return NULL; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + pt->fence = fence; + list_add(&pt->pt_list, &fence->pt_list_head); + sync_pt_activate(pt); + + /* + * signal the fence in case pt was activated before + * sync_pt_activate(pt) was called + */ + sync_fence_signal_pt(pt); + + return fence; +} +EXPORT_SYMBOL(sync_fence_create); + +static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *pos; + + list_for_each(pos, &src->pt_list_head) { + struct sync_pt *orig_pt = + container_of(pos, struct sync_pt, pt_list); + struct sync_pt *new_pt = sync_pt_dup(orig_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + sync_pt_activate(new_pt); + } + + return 0; +} + +static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *src_pos, *dst_pos, *n; + + list_for_each(src_pos, &src->pt_list_head) { + struct sync_pt *src_pt = + container_of(src_pos, struct sync_pt, pt_list); + bool collapsed = false; + + list_for_each_safe(dst_pos, n, &dst->pt_list_head) { + struct sync_pt *dst_pt = + container_of(dst_pos, struct sync_pt, pt_list); + /* collapse two sync_pts on the same timeline + * to a single sync_pt that will signal at + * the later of the two + */ + if (dst_pt->parent == src_pt->parent) { + if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) { + struct sync_pt *new_pt = + sync_pt_dup(src_pt); + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_replace(&dst_pt->pt_list, + &new_pt->pt_list); + sync_pt_activate(new_pt); + sync_pt_free(dst_pt); + } + collapsed = true; + break; + } + } + + if (!collapsed) { + struct sync_pt *new_pt = sync_pt_dup(src_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + sync_pt_activate(new_pt); + } + } + + return 0; +} + +static void sync_fence_detach_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_timeline_remove_pt(pt); + } +} + +static void sync_fence_free_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_pt_free(pt); + } +} + +struct sync_fence *sync_fence_fdget(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return NULL; + + if (file->f_op != &sync_fence_fops) + goto err; + + return file->private_data; + +err: + fput(file); + return NULL; +} +EXPORT_SYMBOL(sync_fence_fdget); + +void sync_fence_put(struct sync_fence *fence) +{ + fput(fence->file); +} +EXPORT_SYMBOL(sync_fence_put); + +void sync_fence_install(struct sync_fence *fence, int fd) +{ + fd_install(fd, fence->file); +} +EXPORT_SYMBOL(sync_fence_install); + +static int sync_fence_get_status(struct sync_fence *fence) +{ + struct list_head *pos; + int status = 1; + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + int pt_status = pt->status; + + if (pt_status < 0) { + status = pt_status; + break; + } else if (status == 1) { + status = pt_status; + } + } + + return status; +} + +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b) +{ + struct sync_fence *fence; + int err; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + err = sync_fence_copy_pts(fence, a); + if (err < 0) + goto err; + + err = sync_fence_merge_pts(fence, b); + if (err < 0) + goto err; + + /* + * signal the fence in case one of it's pts were activated before + * they were activated + */ + sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, + struct sync_pt, + pt_list)); + + return fence; +err: + sync_fence_free_pts(fence); + kfree(fence); + return NULL; +} +EXPORT_SYMBOL(sync_fence_merge); + +static void sync_fence_signal_pt(struct sync_pt *pt) +{ + LIST_HEAD(signaled_waiters); + struct sync_fence *fence = pt->fence; + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int status; + + status = sync_fence_get_status(fence); + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * this should protect against two threads racing on the signaled + * false -> true transition + */ + if (status && !fence->status) { + list_for_each_safe(pos, n, &fence->waiter_list_head) + list_move(pos, &signaled_waiters); + + fence->status = status; + } else { + status = 0; + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + if (status) { + list_for_each_safe(pos, n, &signaled_waiters) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + list_del(pos); + waiter->callback(fence, waiter); + } + wake_up(&fence->wq); + } +} + +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + + if (fence->status) { + err = fence->status; + goto out; + } + + list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); +out: + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + return err; +} +EXPORT_SYMBOL(sync_fence_wait_async); + +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int ret = -ENOENT; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * Make sure waiter is still in waiter_list because it is possible for + * the waiter to be removed from the list while the callback is still + * pending. + */ + list_for_each_safe(pos, n, &fence->waiter_list_head) { + struct sync_fence_waiter *list_waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + if (list_waiter == waiter) { + list_del(pos); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + return ret; +} +EXPORT_SYMBOL(sync_fence_cancel_async); + +static bool sync_fence_check(struct sync_fence *fence) +{ + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + return fence->status != 0; +} + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ + int err = 0; + struct sync_pt *pt; + + trace_sync_wait(fence, 1); + list_for_each_entry(pt, &fence->pt_list_head, pt_list) + trace_sync_pt(pt); + + if (timeout > 0) { + timeout = msecs_to_jiffies(timeout); + err = wait_event_interruptible_timeout(fence->wq, + sync_fence_check(fence), + timeout); + } else if (timeout < 0) { + err = wait_event_interruptible(fence->wq, + sync_fence_check(fence)); + } + trace_sync_wait(fence, 0); + + if (err < 0) + return err; + + if (fence->status < 0) { + pr_info("fence error %d on [%p]\n", fence->status, fence); + sync_dump(); + return fence->status; + } + + if (fence->status == 0) { + pr_info("fence timeout on [%p] after %dms\n", fence, + jiffies_to_msecs(timeout)); + sync_dump(); + return -ETIME; + } + + return 0; +} +EXPORT_SYMBOL(sync_fence_wait); + +static void sync_fence_free(struct kref *kref) +{ + struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + + sync_fence_free_pts(fence); + + kfree(fence); +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ + struct sync_fence *fence = file->private_data; + unsigned long flags; + + /* + * We need to remove all ways to access this fence before droping + * our ref. + * + * start with its membership in the global fence list + */ + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + /* + * remove its pts from their parents so that sync_timeline_signal() + * can't reference the fence. + */ + sync_fence_detach_pts(fence); + + kref_put(&fence->kref, sync_fence_free); + + return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ + struct sync_fence *fence = file->private_data; + + poll_wait(file, &fence->wq, wait); + + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + + if (fence->status == 1) + return POLLIN; + else if (fence->status < 0) + return POLLERR; + else + return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ + __s32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_fence *fence2, *fence3; + struct sync_merge_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err_put_fd; + } + + fence2 = sync_fence_fdget(data.fd2); + if (fence2 == NULL) { + err = -ENOENT; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence3 = sync_fence_merge(data.name, fence, fence2); + if (fence3 == NULL) { + err = -ENOMEM; + goto err_put_fence2; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + err = -EFAULT; + goto err_put_fence3; + } + + sync_fence_install(fence3, fd); + sync_fence_put(fence2); + return 0; + +err_put_fence3: + sync_fence_put(fence3); + +err_put_fence2: + sync_fence_put(fence2); + +err_put_fd: + put_unused_fd(fd); + return err; +} + +static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +{ + struct sync_pt_info *info = data; + int ret; + + if (size < sizeof(struct sync_pt_info)) + return -ENOMEM; + + info->len = sizeof(struct sync_pt_info); + + if (pt->parent->ops->fill_driver_data) { + ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, + size - sizeof(*info)); + if (ret < 0) + return ret; + + info->len += ret; + } + + strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); + strlcpy(info->driver_name, pt->parent->ops->driver_name, + sizeof(info->driver_name)); + info->status = pt->status; + info->timestamp_ns = ktime_to_ns(pt->timestamp); + + return info->len; +} + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, + unsigned long arg) +{ + struct sync_fence_info_data *data; + struct list_head *pos; + __u32 size; + __u32 len = 0; + int ret; + + if (copy_from_user(&size, (void __user *)arg, sizeof(size))) + return -EFAULT; + + if (size < sizeof(struct sync_fence_info_data)) + return -EINVAL; + + if (size > 4096) + size = 4096; + + data = kzalloc(size, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + strlcpy(data->name, fence->name, sizeof(data->name)); + data->status = fence->status; + len = sizeof(struct sync_fence_info_data); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + + ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + + if (ret < 0) + goto out; + + len += ret; + } + + data->len = len; + + if (copy_to_user((void __user *)arg, data, len)) + ret = -EFAULT; + else + ret = 0; + +out: + kfree(data); + + return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_fence *fence = file->private_data; + switch (cmd) { + case SYNC_IOC_WAIT: + return sync_fence_ioctl_wait(fence, arg); + + case SYNC_IOC_MERGE: + return sync_fence_ioctl_merge(fence, arg); + + case SYNC_IOC_FENCE_INFO: + return sync_fence_ioctl_fence_info(fence, arg); + + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_DEBUG_FS +static const char *sync_status_str(int status) +{ + if (status > 0) + return "signaled"; + else if (status == 0) + return "active"; + else + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = pt->status; + seq_printf(s, " %s%spt %s", + fence ? pt->parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + if (pt->status) { + struct timeval tv = ktime_to_timeval(pt->timestamp); + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); + } + + if (pt->parent->ops->timeline_value_str && + pt->parent->ops->pt_value_str) { + char value[64]; + pt->parent->ops->pt_value_str(pt, value, sizeof(value)); + seq_printf(s, ": %s", value); + if (fence) { + pt->parent->ops->timeline_value_str(pt->parent, value, + sizeof(value)); + seq_printf(s, " / %s", value); + } + } else if (pt->parent->ops->print_pt) { + seq_printf(s, ": "); + pt->parent->ops->print_pt(s, pt); + } + + seq_printf(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->timeline_value_str) { + char value[64]; + obj->ops->timeline_value_str(obj, value, sizeof(value)); + seq_printf(s, ": %s", value); + } else if (obj->ops->print_obj) { + seq_printf(s, ": "); + obj->ops->print_obj(s, obj); + } + + seq_printf(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "[%p] %s: %s\n", fence, fence->name, + sync_status_str(fence->status)); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + list_for_each(pos, &fence->waiter_list_head) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + seq_printf(s, "waiter %pF\n", waiter->callback); + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_printf(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_printf(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +void sync_dump(void) +{ + struct seq_file s = { + .buf = sync_dump_buf, + .size = sizeof(sync_dump_buf) - 1, + }; + int i; + + sync_debugfs_show(&s, NULL); + + for (i = 0; i < s.count; i += DUMP_CHUNK) { + if ((s.count - i) > DUMP_CHUNK) { + char c = s.buf[i + DUMP_CHUNK]; + s.buf[i + DUMP_CHUNK] = 0; + pr_cont("%s", s.buf + i); + s.buf[i + DUMP_CHUNK] = c; + } else { + s.buf[s.count] = 0; + pr_cont("%s", s.buf + i); + } + } +} +#else +static void sync_dump(void) +{ +} +#endif diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 1667aaf4fde68..9dff77bfe1e34 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -166,8 +166,38 @@ EXPORT_SYMBOL_GPL(sysdev_class_unregister); static DEFINE_MUTEX(sysdev_drivers_lock); +/* + * @dev != NULL means that we're unwinding because some drv->add() + * failed for some reason. You need to grab sysdev_drivers_lock before + * calling this. + */ +static void __sysdev_driver_remove(struct sysdev_class *cls, + struct sysdev_driver *drv, + struct sys_device *from_dev) +{ + struct sys_device *dev = from_dev; + + list_del_init(&drv->entry); + if (!cls) + return; + + if (!drv->remove) + goto kset_put; + + if (dev) + list_for_each_entry_continue_reverse(dev, &cls->kset.list, + kobj.entry) + drv->remove(dev); + else + list_for_each_entry(dev, &cls->kset.list, kobj.entry) + drv->remove(dev); + +kset_put: + kset_put(&cls->kset); +} + /** - * sysdev_driver_register - Register auxillary driver + * sysdev_driver_register - Register auxiliary driver * @cls: Device class driver belongs to. * @drv: Driver. * @@ -175,14 +205,14 @@ static DEFINE_MUTEX(sysdev_drivers_lock); * called on each operation on devices of that class. The refcount * of @cls is incremented. */ - int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) { + struct sys_device *dev = NULL; int err = 0; if (!cls) { - WARN(1, KERN_WARNING "sysdev: invalid class passed to " - "sysdev_driver_register!\n"); + WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n", + __func__); return -EINVAL; } @@ -198,21 +228,29 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) /* If devices of this class already exist, tell the driver */ if (drv->add) { - struct sys_device *dev; - list_for_each_entry(dev, &cls->kset.list, kobj.entry) - drv->add(dev); + list_for_each_entry(dev, &cls->kset.list, kobj.entry) { + err = drv->add(dev); + if (err) + goto unwind; + } } } else { err = -EINVAL; WARN(1, KERN_ERR "%s: invalid device class\n", __func__); } + + goto unlock; + +unwind: + __sysdev_driver_remove(cls, drv, dev); + +unlock: mutex_unlock(&sysdev_drivers_lock); return err; } - /** - * sysdev_driver_unregister - Remove an auxillary driver. + * sysdev_driver_unregister - Remove an auxiliary driver. * @cls: Class driver belongs to. * @drv: Driver. */ @@ -220,23 +258,12 @@ void sysdev_driver_unregister(struct sysdev_class *cls, struct sysdev_driver *drv) { mutex_lock(&sysdev_drivers_lock); - list_del_init(&drv->entry); - if (cls) { - if (drv->remove) { - struct sys_device *dev; - list_for_each_entry(dev, &cls->kset.list, kobj.entry) - drv->remove(dev); - } - kset_put(&cls->kset); - } + __sysdev_driver_remove(cls, drv, NULL); mutex_unlock(&sysdev_drivers_lock); } - EXPORT_SYMBOL_GPL(sysdev_driver_register); EXPORT_SYMBOL_GPL(sysdev_driver_unregister); - - /** * sysdev_register - add a system device to the tree * @sysdev: device in question @@ -275,7 +302,7 @@ int sysdev_register(struct sys_device *sysdev) * code that should have called us. */ - /* Notify class auxillary drivers */ + /* Notify class auxiliary drivers */ list_for_each_entry(drv, &cls->drivers, entry) { if (drv->add) drv->add(sysdev); @@ -301,202 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) kobject_put(&sysdev->kobj); } - - -/** - * sysdev_shutdown - Shut down all system devices. - * - * Loop over each class of system devices, and the devices in each - * of those classes. For each device, we call the shutdown method for - * each driver registered for the device - the auxillaries, - * and the class driver. - * - * Note: The list is iterated in reverse order, so that we shut down - * child devices before we shut down their parents. The list ordering - * is guaranteed by virtue of the fact that child devices are registered - * after their parents. - */ -void sysdev_shutdown(void) -{ - struct sysdev_class *cls; - - pr_debug("Shutting Down System Devices\n"); - - mutex_lock(&sysdev_drivers_lock); - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Shutting down type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - struct sysdev_driver *drv; - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxillary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->shutdown) - drv->shutdown(sysdev); - } - - /* Now call the generic one */ - if (cls->shutdown) - cls->shutdown(sysdev); - } - } - mutex_unlock(&sysdev_drivers_lock); -} - -static void __sysdev_resume(struct sys_device *dev) -{ - struct sysdev_class *cls = dev->cls; - struct sysdev_driver *drv; - - /* First, call the class-specific one */ - if (cls->resume) - cls->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", cls->resume); - - /* Call auxillary drivers next. */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->resume) - drv->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", drv->resume); - } -} - -/** - * sysdev_suspend - Suspend all system devices. - * @state: Power state to enter. - * - * We perform an almost identical operation as sysdev_shutdown() - * above, though calling ->suspend() instead. Interrupts are disabled - * when this called. Devices are responsible for both saving state and - * quiescing or powering down the device. - * - * This is only called by the device PM core, so we let them handle - * all synchronization. - */ -int sysdev_suspend(pm_message_t state) -{ - struct sysdev_class *cls; - struct sys_device *sysdev, *err_dev; - struct sysdev_driver *drv, *err_drv; - int ret; - - pr_debug("Checking wake-up interrupts\n"); - - /* Return error code if there are any wake-up interrupts pending */ - ret = check_wakeup_irqs(); - if (ret) - return ret; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while suspending system devices\n"); - - pr_debug("Suspending System Devices\n"); - - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - pr_debug("Suspending type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxillary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->suspend) { - ret = drv->suspend(sysdev, state); - if (ret) - goto aux_driver; - } - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - drv->suspend); - } - - /* Now call the generic one */ - if (cls->suspend) { - ret = cls->suspend(sysdev, state); - if (ret) - goto cls_driver; - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - cls->suspend); - } - } - } - return 0; - /* resume current sysdev */ -cls_driver: - drv = NULL; - printk(KERN_ERR "Class suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - -aux_driver: - if (drv) - printk(KERN_ERR "Class driver suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - list_for_each_entry(err_drv, &cls->drivers, entry) { - if (err_drv == drv) - break; - if (err_drv->resume) - err_drv->resume(sysdev); - } - - /* resume other sysdevs in current class */ - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - if (err_dev == sysdev) - break; - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - - /* resume other classes */ - list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - } - return ret; -} -EXPORT_SYMBOL_GPL(sysdev_suspend); - -/** - * sysdev_resume - Bring system devices back to life. - * - * Similar to sysdev_suspend(), but we iterate the list forwards - * to guarantee that parent devices are resumed before their children. - * - * Note: Interrupts are disabled when called. - */ -int sysdev_resume(void) -{ - struct sysdev_class *cls; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while resuming system devices\n"); - - pr_debug("Resuming System Devices\n"); - - list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Resuming type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - __sysdev_resume(sysdev); - } - } - return 0; -} -EXPORT_SYMBOL_GPL(sysdev_resume); +EXPORT_SYMBOL_GPL(sysdev_register); +EXPORT_SYMBOL_GPL(sysdev_unregister); int __init system_bus_init(void) { @@ -506,9 +339,6 @@ int __init system_bus_init(void) return 0; } -EXPORT_SYMBOL_GPL(sysdev_register); -EXPORT_SYMBOL_GPL(sysdev_unregister); - #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c new file mode 100644 index 0000000000000..e8d11b6630eeb --- /dev/null +++ b/drivers/base/syscore.c @@ -0,0 +1,127 @@ +/* + * syscore.c - Execution of system core operations. + * + * Copyright (C) 2011 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. + */ + +#include +#include +#include +#include + +static LIST_HEAD(syscore_ops_list); +static DEFINE_MUTEX(syscore_ops_lock); + +/** + * register_syscore_ops - Register a set of system core operations. + * @ops: System core operations to register. + */ +void register_syscore_ops(struct syscore_ops *ops) +{ + mutex_lock(&syscore_ops_lock); + list_add_tail(&ops->node, &syscore_ops_list); + mutex_unlock(&syscore_ops_lock); +} +EXPORT_SYMBOL_GPL(register_syscore_ops); + +/** + * unregister_syscore_ops - Unregister a set of system core operations. + * @ops: System core operations to unregister. + */ +void unregister_syscore_ops(struct syscore_ops *ops) +{ + mutex_lock(&syscore_ops_lock); + list_del(&ops->node); + mutex_unlock(&syscore_ops_lock); +} +EXPORT_SYMBOL_GPL(unregister_syscore_ops); + +#ifdef CONFIG_PM_SLEEP +/** + * syscore_suspend - Execute all the registered system core suspend callbacks. + * + * This function is executed with one CPU on-line and disabled interrupts. + */ +int syscore_suspend(void) +{ + struct syscore_ops *ops; + int ret = 0; + + pr_debug("Checking wakeup interrupts\n"); + + /* Return error code if there are any wakeup interrupts pending. */ + ret = check_wakeup_irqs(); + if (ret) + return ret; + + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled before system core suspend.\n"); + + list_for_each_entry_reverse(ops, &syscore_ops_list, node) + if (ops->suspend) { + if (initcall_debug) + pr_info("PM: Calling %pF\n", ops->suspend); + ret = ops->suspend(); + if (ret) + goto err_out; + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", ops->suspend); + } + + return 0; + + err_out: + pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); + + list_for_each_entry_continue(ops, &syscore_ops_list, node) + if (ops->resume) + ops->resume(); + + return ret; +} +EXPORT_SYMBOL_GPL(syscore_suspend); + +/** + * syscore_resume - Execute all the registered system core resume callbacks. + * + * This function is executed with one CPU on-line and disabled interrupts. + */ +void syscore_resume(void) +{ + struct syscore_ops *ops; + + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled before system core resume.\n"); + + list_for_each_entry(ops, &syscore_ops_list, node) + if (ops->resume) { + if (initcall_debug) + pr_info("PM: Calling %pF\n", ops->resume); + ops->resume(); + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pF\n", ops->resume); + } +} +EXPORT_SYMBOL_GPL(syscore_resume); +#endif /* CONFIG_PM_SLEEP */ + +/** + * syscore_shutdown - Execute all the registered system core shutdown callbacks. + */ +void syscore_shutdown(void) +{ + struct syscore_ops *ops; + + mutex_lock(&syscore_ops_lock); + + list_for_each_entry_reverse(ops, &syscore_ops_list, node) + if (ops->shutdown) { + if (initcall_debug) + pr_info("PM: Calling %pF\n", ops->shutdown); + ops->shutdown(); + } + + mutex_unlock(&syscore_ops_lock); +} diff --git a/drivers/block/brd.c b/drivers/block/brd.c index b7f51e4594f86..c94bc48d99df7 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -552,7 +552,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) struct kobject *kobj; mutex_lock(&brd_devices_mutex); - brd = brd_init_one(dev & MINORMASK); + brd = brd_init_one(MINOR(dev) >> part_shift); kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); mutex_unlock(&brd_devices_mutex); @@ -585,15 +585,18 @@ static int __init brd_init(void) if (max_part > 0) part_shift = fls(max_part); + if ((1UL << part_shift) > DISK_MAX_PARTS) + return -EINVAL; + if (rd_nr > 1UL << (MINORBITS - part_shift)) return -EINVAL; if (rd_nr) { nr = rd_nr; - range = rd_nr; + range = rd_nr << part_shift; } else { nr = CONFIG_BLK_DEV_RAM_COUNT; - range = 1UL << (MINORBITS - part_shift); + range = 1UL << MINORBITS; } if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) @@ -632,7 +635,7 @@ static void __exit brd_exit(void) unsigned long range; struct brd_device *brd, *next; - range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); + range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS; list_for_each_entry_safe(brd, next, &brd_devices, brd_list) brd_del_one(brd); diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 579f749184930..554bbd907d144 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) h->ctlr, c->busaddr); #endif /* CCISS_DEBUG */ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); h->commands_outstanding++; if ( h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index dbf31ec9114db..f49e43fd73d71 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1674,7 +1674,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) struct kobject *kobj; mutex_lock(&loop_devices_mutex); - lo = loop_init_one(dev & MINORMASK); + lo = loop_init_one(MINOR(dev) >> part_shift); kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); mutex_unlock(&loop_devices_mutex); @@ -1707,15 +1707,18 @@ static int __init loop_init(void) if (max_part > 0) part_shift = fls(max_part); + if ((1UL << part_shift) > DISK_MAX_PARTS) + return -EINVAL; + if (max_loop > 1UL << (MINORBITS - part_shift)) return -EINVAL; if (max_loop) { nr = max_loop; - range = max_loop; + range = max_loop << part_shift; } else { nr = 8; - range = 1UL << (MINORBITS - part_shift); + range = 1UL << MINORBITS; } if (register_blkdev(LOOP_MAJOR, "loop")) @@ -1754,7 +1757,7 @@ static void __exit loop_exit(void) unsigned long range; struct loop_device *lo, *next; - range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift); + range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; list_for_each_entry_safe(lo, next, &loop_devices, lo_list) loop_del_one(lo); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 62cec6afd7adf..a0aabd904a512 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -172,7 +172,8 @@ module_param_array(drive3, int, NULL, 0); static int pcd_open(struct cdrom_device_info *cdi, int purpose); static void pcd_release(struct cdrom_device_info *cdi); static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr); -static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr); +static unsigned int pcd_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot_nr); static int pcd_tray_move(struct cdrom_device_info *cdi, int position); static int pcd_lock_door(struct cdrom_device_info *cdi, int lock); static int pcd_drive_reset(struct cdrom_device_info *cdi); @@ -257,10 +258,11 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode, return ret; } -static int pcd_block_media_changed(struct gendisk *disk) +static unsigned int pcd_block_check_events(struct gendisk *disk, + unsigned int clearing) { struct pcd_unit *cd = disk->private_data; - return cdrom_media_changed(&cd->info); + return cdrom_check_events(&cd->info, clearing); } static const struct block_device_operations pcd_bdops = { @@ -268,14 +270,14 @@ static const struct block_device_operations pcd_bdops = { .open = pcd_block_open, .release = pcd_block_release, .ioctl = pcd_block_ioctl, - .media_changed = pcd_block_media_changed, + .check_events = pcd_block_check_events, }; static struct cdrom_device_ops pcd_dops = { .open = pcd_open, .release = pcd_release, .drive_status = pcd_drive_status, - .media_changed = pcd_media_changed, + .check_events = pcd_check_events, .tray_move = pcd_tray_move, .lock_door = pcd_lock_door, .get_mcn = pcd_get_mcn, @@ -318,6 +320,8 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; + disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + disk->events = DISK_EVENT_MEDIA_CHANGE; } } @@ -502,13 +506,14 @@ static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) #define DBMSG(msg) ((verbose>1)?(msg):NULL) -static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr) +static unsigned int pcd_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int slot_nr) { struct pcd_unit *cd = cdi->handle; int res = cd->changed; if (res) cd->changed = 0; - return res; + return res ? DISK_EVENT_MEDIA_CHANGE : 0; } static int pcd_lock_door(struct cdrom_device_info *cdi, int lock) diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c0ee1558b9bba..21dfdb7768695 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -794,7 +794,7 @@ static int pd_release(struct gendisk *p, fmode_t mode) return 0; } -static int pd_check_media(struct gendisk *p) +static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing) { struct pd_unit *disk = p->private_data; int r; @@ -803,7 +803,7 @@ static int pd_check_media(struct gendisk *p) pd_special_command(disk, pd_media_check); r = disk->changed; disk->changed = 0; - return r; + return r ? DISK_EVENT_MEDIA_CHANGE : 0; } static int pd_revalidate(struct gendisk *p) @@ -822,7 +822,7 @@ static const struct block_device_operations pd_fops = { .release = pd_release, .ioctl = pd_ioctl, .getgeo = pd_getgeo, - .media_changed = pd_check_media, + .check_events = pd_check_events, .revalidate_disk= pd_revalidate }; @@ -837,6 +837,7 @@ static void pd_probe_drive(struct pd_unit *disk) p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; + p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; p->queue = pd_queue; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 635f25dd9e108..7adeb1edbf43f 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -243,7 +243,8 @@ static struct pf_unit units[PF_UNITS]; static int pf_identify(struct pf_unit *pf); static void pf_lock(struct pf_unit *pf, int func); static void pf_eject(struct pf_unit *pf); -static int pf_check_media(struct gendisk *disk); +static unsigned int pf_check_events(struct gendisk *disk, + unsigned int clearing); static char pf_scratch[512]; /* scratch block buffer */ @@ -270,7 +271,7 @@ static const struct block_device_operations pf_fops = { .release = pf_release, .ioctl = pf_ioctl, .getgeo = pf_getgeo, - .media_changed = pf_check_media, + .check_events = pf_check_events, }; static void __init pf_init_units(void) @@ -293,6 +294,7 @@ static void __init pf_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; + disk->events = DISK_EVENT_MEDIA_CHANGE; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } @@ -377,9 +379,9 @@ static int pf_release(struct gendisk *disk, fmode_t mode) } -static int pf_check_media(struct gendisk *disk) +static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing) { - return 1; + return DISK_EVENT_MEDIA_CHANGE; } static inline int status_reg(struct pf_unit *pf) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 700a3840fddc2..8fc368e8ef1e6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -71,6 +71,9 @@ static struct usb_device_id btusb_table[] = { /* Apple MacBookAir3,1, MacBookAir3,2 */ { USB_DEVICE(0x05ac, 0x821b) }, + /* Apple MacBookPro8,2 */ + { USB_DEVICE(0x05ac, 0x821a) }, + /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, @@ -430,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb) } } -static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) +static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) { int i, offset = 0; @@ -781,7 +784,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt) } } -static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) +static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) { struct btusb_data *data = hdev->driver_data; struct usb_interface *intf = data->isoc; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 38595e782d02e..bd2954b851954 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -86,6 +87,28 @@ struct ll_struct { struct sk_buff_head tx_wait_q; /* HCILL wait queue */ }; +#ifdef CONFIG_SERIAL_MSM_HS +void msm_hs_request_clock_off(struct uart_port *uport); +void msm_hs_request_clock_on(struct uart_port *uport); + +static void __ll_msm_serial_clock_on(struct tty_struct *tty) { + struct uart_state *state = tty->driver_data; + struct uart_port *port = state->uart_port; + + msm_hs_request_clock_on(port); +} + +static void __ll_msm_serial_clock_request_off(struct tty_struct *tty) { + struct uart_state *state = tty->driver_data; + struct uart_port *port = state->uart_port; + + msm_hs_request_clock_off(port); +} +#else +static inline void __ll_msm_serial_clock_on(struct tty_struct *tty) {} +static inline void __ll_msm_serial_clock_request_off(struct tty_struct *tty) {} +#endif + /* * Builds and sends an HCILL command packet. * These are very simple packets with only 1 cmd byte @@ -217,6 +240,10 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu) BT_DBG("dual wake-up-indication"); /* deliberate fall-through - do not add break */ case HCILL_ASLEEP: + /* Make sure clock is on - we may have turned clock off since + * receiving the wake up indicator + */ + __ll_msm_serial_clock_on(hu->tty); /* acknowledge device wake up */ if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { BT_ERR("cannot acknowledge device wake up"); @@ -270,6 +297,11 @@ static void ll_device_want_to_sleep(struct hci_uart *hu) /* actually send the sleep ack packet */ hci_uart_tx_wakeup(hu); + + spin_lock_irqsave(&ll->hcill_lock, flags); + if (ll->hcill_state == HCILL_ASLEEP) + __ll_msm_serial_clock_request_off(hu->tty); + spin_unlock_irqrestore(&ll->hcill_lock, flags); } /* @@ -321,6 +353,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) break; case HCILL_ASLEEP: BT_DBG("device asleep, waking up and queueing packet"); + __ll_msm_serial_clock_on(hu->tty); /* save packet for later */ skb_queue_tail(&ll->tx_wait_q, skb); /* awake device */ diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index e2c48a7eccffe..5ade78a01c4dc 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "entering cdrom_open\n"); + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); - /* Do this on open. Don't wait for mount, because they might - not be mounting, but opening with O_NONBLOCK */ - check_disk_change(bdev); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 64a21461c408f..b2b034fea34e6 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -395,10 +395,12 @@ static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore) return CDS_NO_INFO; } -static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore) +static unsigned int gdrom_check_events(struct cdrom_device_info *cd_info, + unsigned int clearing, int ignore) { /* check the sense key */ - return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60; + return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60 ? + DISK_EVENT_MEDIA_CHANGE : 0; } /* reset the G1 bus */ @@ -483,7 +485,7 @@ static struct cdrom_device_ops gdrom_ops = { .open = gdrom_open, .release = gdrom_release, .drive_status = gdrom_drivestatus, - .media_changed = gdrom_mediachanged, + .check_events = gdrom_check_events, .get_last_session = gdrom_get_last_session, .reset = gdrom_hardreset, .audio_ioctl = gdrom_audio_ioctl, @@ -509,9 +511,10 @@ static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode) return 0; } -static int gdrom_bdops_mediachanged(struct gendisk *disk) +static unsigned int gdrom_bdops_check_events(struct gendisk *disk, + unsigned int clearing) { - return cdrom_media_changed(gd.cd_info); + return cdrom_check_events(gd.cd_info, clearing); } static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, @@ -530,7 +533,7 @@ static const struct block_device_operations gdrom_bdops = { .owner = THIS_MODULE, .open = gdrom_bdops_open, .release = gdrom_bdops_release, - .media_changed = gdrom_bdops_mediachanged, + .check_events = gdrom_bdops_check_events, .ioctl = gdrom_bdops_ioctl, }; @@ -800,6 +803,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr) goto probe_fail_cdrom_register; } gd.disk->fops = &gdrom_bdops; + gd.disk->events = DISK_EVENT_MEDIA_CHANGE; /* latch on to the interrupt */ err = gdrom_set_interrupt_handlers(); if (err) diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index be73a9b493a69..ae15a4ddaa9b0 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -186,10 +186,11 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode, return ret; } -static int viocd_blk_media_changed(struct gendisk *disk) +static unsigned int viocd_blk_check_events(struct gendisk *disk, + unsigned int clearing) { struct disk_info *di = disk->private_data; - return cdrom_media_changed(&di->viocd_info); + return cdrom_check_events(&di->viocd_info, clearing); } static const struct block_device_operations viocd_fops = { @@ -197,7 +198,7 @@ static const struct block_device_operations viocd_fops = { .open = viocd_blk_open, .release = viocd_blk_release, .ioctl = viocd_blk_ioctl, - .media_changed = viocd_blk_media_changed, + .check_events = viocd_blk_check_events, }; static int viocd_open(struct cdrom_device_info *cdi, int purpose) @@ -320,7 +321,8 @@ static void do_viocd_request(struct request_queue *q) } } -static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr) +static unsigned int viocd_check_events(struct cdrom_device_info *cdi, + unsigned int clearing, int disc_nr) { struct viocd_waitevent we; HvLpEvent_Rc hvrc; @@ -340,7 +342,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr) if (hvrc != 0) { pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", (int)hvrc); - return -EIO; + return 0; } wait_for_completion(&we.com); @@ -354,7 +356,7 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr) return 0; } - return we.changed; + return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0; } static int viocd_lock_door(struct cdrom_device_info *cdi, int locking) @@ -550,7 +552,7 @@ static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, static struct cdrom_device_ops viocd_dops = { .open = viocd_open, .release = viocd_release, - .media_changed = viocd_media_changed, + .check_events = viocd_check_events, .lock_door = viocd_lock_door, .generic_packet = viocd_packet, .audio_ioctl = viocd_audio_ioctl, @@ -623,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) blk_queue_max_hw_sectors(q, 4096 / 512); gendisk->queue = q; gendisk->fops = &viocd_fops; - gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; + gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE | + GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 012cba0d6d965..b072648dc3f64 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) struct agp_memory *new; unsigned long alloc_size = num_agp_pages*sizeof(struct page *); + if (INT_MAX/sizeof(struct page *) < num_agp_pages) + return NULL; + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; @@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, int scratch_pages; struct agp_memory *new; size_t i; + int cur_memory; if (!bridge) return NULL; - if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) + cur_memory = atomic_read(&bridge->current_memory_agp); + if ((cur_memory + page_count > bridge->max_memory_agp) || + (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { @@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) return -EINVAL; } - /* AK: could wrap */ - if ((pg_start + mem->page_count) > num_entries) + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; @@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; - int mask_type; + int mask_type, num_entries; bridge = mem->bridge; if (!bridge) @@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) if (type != mem->type) return -EINVAL; + num_entries = agp_num_entries(); + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index d72433f2d310d..ee017166545eb 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -139,8 +139,8 @@ static int i8k_smm(struct smm_regs *regs) "movl %%edi,20(%%rax)\n\t" "popq %%rdx\n\t" "movl %%edx,0(%%rax)\n\t" - "lahf\n\t" - "shrl $8,%%eax\n\t" + "pushfq\n\t" + "popq %%rax\n\t" "andl $1,%%eax\n" :"=a"(rc) : "a"(regs) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 238d5fa66a36b..103a68cf590b1 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -329,7 +329,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) vma->vm_ops = &mmap_mem_ops; /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ - if (remap_pfn_range(vma, + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index e6d75627c6c81..ecd0082502efe 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c @@ -487,7 +487,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp) return 0; }; -static int sgi_clock_set(clockid_t clockid, struct timespec *tp) +static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp) { u64 nsec; diff --git a/drivers/char/random.c b/drivers/char/random.c index 72a4fcb177450..11ed4b73597ed 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -263,14 +263,14 @@ * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. */ -static int random_read_wakeup_thresh = 64; +static int random_read_wakeup_thresh = 256; /* * If the entropy count falls under this number of bits, then we * should wake up processes which are selecting or polling on write * access to /dev/random. */ -static int random_write_wakeup_thresh = 128; +static int random_write_wakeup_thresh = 512; /* * When the input pool goes over trickle_thresh, start dropping most @@ -997,57 +997,7 @@ void rand_initialize_disk(struct gendisk *disk) static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { - ssize_t n, retval = 0, count = 0; - - if (nbytes == 0) - return 0; - - while (nbytes > 0) { - n = nbytes; - if (n > SEC_XFER_SIZE) - n = SEC_XFER_SIZE; - - DEBUG_ENT("reading %d bits\n", n*8); - - n = extract_entropy_user(&blocking_pool, buf, n); - - DEBUG_ENT("read got %d bits (%d still needed)\n", - n*8, (nbytes-n)*8); - - if (n == 0) { - if (file->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - break; - } - - DEBUG_ENT("sleeping?\n"); - - wait_event_interruptible(random_read_wait, - input_pool.entropy_count >= - random_read_wakeup_thresh); - - DEBUG_ENT("awake\n"); - - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } - - continue; - } - - if (n < 0) { - retval = n; - break; - } - count += n; - buf += n; - nbytes -= n; - break; /* This break makes the device work */ - /* like a named pipe */ - } - - return (count ? count : retval); + return extract_entropy_user(&nonblocking_pool, buf, nbytes); } static ssize_t diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 1f46f1cd9225c..7beb0e25f1e1e 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -980,7 +980,7 @@ int tpm_open(struct inode *inode, struct file *file) return -EBUSY; } - chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); + chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); put_device(chip->dev); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 84b164d1eb2b1..838568a7dbf56 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port) spin_lock_irq(&pdrvdata_lock); list_del(&port->cons.list); spin_unlock_irq(&pdrvdata_lock); -#if 0 - /* - * hvc_remove() not called as removing one hvc port - * results in other hvc ports getting frozen. - * - * Once this is resolved in hvc, this functionality - * will be enabled. Till that is done, the -EPIPE - * return from get_chars() above will help - * hvc_console.c to clean up on ports we remove here. - */ hvc_remove(port->cons.hvc); -#endif } /* Remove unused data this port might have received. */ diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d384bcaabff9d..4391df9a8c19f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -18,19 +18,6 @@ if CPU_FREQ config CPU_FREQ_TABLE tristate -config CPU_FREQ_DEBUG - bool "Enable CPUfreq debugging" - help - Say Y here to enable CPUfreq subsystem (including drivers) - debugging. You will need to activate it via the kernel - command line by passing - cpufreq.debug= - - To get , add - 1 to activate CPUfreq core debugging, - 2 to activate CPUfreq drivers debugging, and - 4 to activate CPUfreq governor debugging - config CPU_FREQ_STAT tristate "CPU frequency translation statistics" select CPU_FREQ_TABLE @@ -120,6 +107,12 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE loading your cpufreq low-level hardware driver, using the 'interactive' governor for latency-sensitive workloads. +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + help + Use the CPUFreq governor 'smartassV2' as default. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -183,6 +176,24 @@ config CPU_FREQ_GOV_INTERACTIVE 'interactive' - This driver adds a dynamic cpufreq policy governor designed for latency-sensitive workloads. + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_interactive. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" governor + If in doubt, say N. + config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 30629f7dc747c..35aa00903236f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5cb4d09919d67..09fb29a7911e3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -28,12 +28,11 @@ #include #include #include +#include +#include #include -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /** * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock @@ -43,7 +42,11 @@ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); #ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ -static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); +struct cpufreq_cpu_save_data { + char gov[CPUFREQ_NAME_LEN]; + unsigned int max, min; +}; +static DEFINE_PER_CPU(struct cpufreq_cpu_save_data, cpufreq_policy_save); #endif static DEFINE_SPINLOCK(cpufreq_driver_lock); @@ -70,7 +73,7 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); #define lock_policy_rwsem(mode, cpu) \ -static int lock_policy_rwsem_##mode \ +int lock_policy_rwsem_##mode \ (int cpu) \ { \ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ @@ -95,7 +98,7 @@ static void unlock_policy_rwsem_read(int cpu) up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); } -static void unlock_policy_rwsem_write(int cpu) +void unlock_policy_rwsem_write(int cpu) { int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); BUG_ON(policy_cpu == -1); @@ -131,7 +134,7 @@ pure_initcall(init_cpufreq_transition_notifier_list); static LIST_HEAD(cpufreq_governor_list); static DEFINE_MUTEX(cpufreq_governor_mutex); -struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) +static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, int sysfs) { struct cpufreq_policy *data; unsigned long flags; @@ -155,7 +158,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) if (!data) goto err_out_put_module; - if (!kobject_get(&data->kobj)) + if (!sysfs && !kobject_get(&data->kobj)) goto err_out_put_module; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -168,103 +171,35 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) err_out: return NULL; } -EXPORT_SYMBOL_GPL(cpufreq_cpu_get); - -void cpufreq_cpu_put(struct cpufreq_policy *data) +struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { - kobject_put(&data->kobj); - module_put(cpufreq_driver->owner); + return __cpufreq_cpu_get(cpu, 0); } -EXPORT_SYMBOL_GPL(cpufreq_cpu_put); - - -/********************************************************************* - * UNIFIED DEBUG HELPERS * - *********************************************************************/ -#ifdef CONFIG_CPU_FREQ_DEBUG - -/* what part(s) of the CPUfreq subsystem are debugged? */ -static unsigned int debug; - -/* is the debug output ratelimit'ed using printk_ratelimit? User can - * set or modify this value. - */ -static unsigned int debug_ratelimit = 1; - -/* is the printk_ratelimit'ing enabled? It's enabled after a successful - * loading of a cpufreq driver, temporarily disabled when a new policy - * is set, and disabled upon cpufreq driver removal - */ -static unsigned int disable_ratelimit = 1; -static DEFINE_SPINLOCK(disable_ratelimit_lock); +EXPORT_SYMBOL_GPL(cpufreq_cpu_get); -static void cpufreq_debug_enable_ratelimit(void) +static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) { - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (disable_ratelimit) - disable_ratelimit--; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); + return __cpufreq_cpu_get(cpu, 1); } -static void cpufreq_debug_disable_ratelimit(void) +static void __cpufreq_cpu_put(struct cpufreq_policy *data, int sysfs) { - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - disable_ratelimit++; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); + if (!sysfs) + kobject_put(&data->kobj); + module_put(cpufreq_driver->owner); } -void cpufreq_debug_printk(unsigned int type, const char *prefix, - const char *fmt, ...) +void cpufreq_cpu_put(struct cpufreq_policy *data) { - char s[256]; - va_list args; - unsigned int len; - unsigned long flags; - - WARN_ON(!prefix); - if (type & debug) { - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (!disable_ratelimit && debug_ratelimit - && !printk_ratelimit()) { - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - return; - } - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - - len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); - - va_start(args, fmt); - len += vsnprintf(&s[len], (256 - len), fmt, args); - va_end(args); - - printk(s); - - WARN_ON(len < 5); - } + __cpufreq_cpu_put(data, 0); } -EXPORT_SYMBOL(cpufreq_debug_printk); - - -module_param(debug, uint, 0644); -MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," - " 2 to debug drivers, and 4 to debug governors."); - -module_param(debug_ratelimit, uint, 0644); -MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" - " set to 0 to disable ratelimiting."); - -#else /* !CONFIG_CPU_FREQ_DEBUG */ - -static inline void cpufreq_debug_enable_ratelimit(void) { return; } -static inline void cpufreq_debug_disable_ratelimit(void) { return; } - -#endif /* CONFIG_CPU_FREQ_DEBUG */ +EXPORT_SYMBOL_GPL(cpufreq_cpu_put); +static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) +{ + __cpufreq_cpu_put(data, 1); +} /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * @@ -290,7 +225,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - dprintk("saving %lu as reference value for loops_per_jiffy; " + pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || @@ -298,7 +233,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - dprintk("scaling loops_per_jiffy to %lu " + pr_debug("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } @@ -325,7 +260,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; - dprintk("notification %u of frequency transition to %u kHz\n", + pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); @@ -339,7 +274,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - dprintk("Warning: CPU frequency is" + pr_debug("Warning: CPU frequency is" " %u, cpufreq assumed %u kHz.\n", freqs->old, policy->cur); freqs->old = policy->cur; @@ -352,20 +287,37 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, + pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); - if (likely(policy) && likely(policy->cpu == freqs->cpu)) + if (likely(policy) && likely(policy->cpu == freqs->cpu)) { policy->cur = freqs->new; + sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq"); + } break; } } EXPORT_SYMBOL_GPL(cpufreq_notify_transition); +/** + * cpufreq_notify_utilization - notify CPU userspace about CPU utilization + * change + * + * This function is called everytime the CPU load is evaluated by the + * ondemand governor. It notifies userspace of cpu load changes via sysfs. + */ +void cpufreq_notify_utilization(struct cpufreq_policy *policy, + unsigned int util) +{ + if (policy) + policy->util = util; + if (policy->util >= MIN_CPU_UTIL_NOTIFY) + sysfs_notify(&policy->kobj, NULL, "cpu_utilization"); +} /********************************************************************* * SYSFS INTERFACE * @@ -410,21 +362,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, t = __find_governor(str_governor); if (t == NULL) { - char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", - str_governor); - - if (name) { - int ret; - - mutex_unlock(&cpufreq_governor_mutex); - ret = request_module("%s", name); - mutex_lock(&cpufreq_governor_mutex); + int ret; - if (ret == 0) - t = __find_governor(str_governor); - } + mutex_unlock(&cpufreq_governor_mutex); + ret = request_module("cpufreq_%s", str_governor); + mutex_lock(&cpufreq_governor_mutex); - kfree(name); + if (ret == 0) + t = __find_governor(str_governor); } if (t != NULL) { @@ -460,6 +405,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); show_one(scaling_cur_freq, cur); +show_one(cpu_utilization, util); static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); @@ -529,6 +475,9 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, unsigned int ret = -EINVAL; char str_governor[16]; struct cpufreq_policy new_policy; + char *envp[3]; + char buf1[64]; + char buf2[64]; ret = cpufreq_get_policy(&new_policy, policy->cpu); if (ret) @@ -549,6 +498,15 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; + sysfs_notify(&policy->kobj, NULL, "scaling_governor"); + + snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name); + snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu); + envp[0] = buf1; + envp[1] = buf2; + envp[2] = NULL; + kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp); + if (ret) return ret; else @@ -674,6 +632,7 @@ cpufreq_freq_attr_ro(scaling_cur_freq); cpufreq_freq_attr_ro(bios_limit); cpufreq_freq_attr_ro(related_cpus); cpufreq_freq_attr_ro(affected_cpus); +cpufreq_freq_attr_ro(cpu_utilization); cpufreq_freq_attr_rw(scaling_min_freq); cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); @@ -686,6 +645,7 @@ static struct attribute *default_attrs[] = { &scaling_min_freq.attr, &scaling_max_freq.attr, &affected_cpus.attr, + &cpu_utilization.attr, &related_cpus.attr, &scaling_governor.attr, &scaling_driver.attr, @@ -705,7 +665,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get(policy->cpu); + policy = cpufreq_cpu_get_sysfs(policy->cpu); if (!policy) goto no_policy; @@ -719,7 +679,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) unlock_policy_rwsem_read(policy->cpu); fail: - cpufreq_cpu_put(policy); + cpufreq_cpu_put_sysfs(policy); no_policy: return ret; } @@ -730,7 +690,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); ssize_t ret = -EINVAL; - policy = cpufreq_cpu_get(policy->cpu); + policy = cpufreq_cpu_get_sysfs(policy->cpu); if (!policy) goto no_policy; @@ -744,7 +704,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, unlock_policy_rwsem_write(policy->cpu); fail: - cpufreq_cpu_put(policy); + cpufreq_cpu_put_sysfs(policy); no_policy: return ret; } @@ -752,7 +712,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); - dprintk("last reference is dropped\n"); + pr_debug("last reference is dropped\n"); complete(&policy->kobj_unregister); } @@ -784,12 +744,22 @@ static int cpufreq_add_dev_policy(unsigned int cpu, #ifdef CONFIG_HOTPLUG_CPU struct cpufreq_governor *gov; - gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); + gov = __find_governor(per_cpu(cpufreq_policy_save, cpu).gov); if (gov) { policy->governor = gov; - dprintk("Restoring governor %s for cpu %d\n", + pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } + if (per_cpu(cpufreq_policy_save, cpu).min) { + policy->min = per_cpu(cpufreq_policy_save, cpu).min; + policy->user_policy.min = policy->min; + } + if (per_cpu(cpufreq_policy_save, cpu).max) { + policy->max = per_cpu(cpufreq_policy_save, cpu).max; + policy->user_policy.max = policy->max; + } + pr_debug("Restoring CPU%d min %d and max %d\n", + cpu, policy->min, policy->max); #endif for_each_cpu(j, policy->cpus) { @@ -823,7 +793,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - dprintk("CPU already managed, adding link\n"); + pr_debug("CPU already managed, adding link\n"); ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); @@ -864,7 +834,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, if (!cpu_online(j)) continue; - dprintk("CPU %u already managed, adding link\n", j); + pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, @@ -940,7 +910,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, policy->user_policy.governor = policy->governor; if (ret) { - dprintk("setting policy failed\n"); + pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } @@ -976,8 +946,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) if (cpu_is_offline(cpu)) return 0; - cpufreq_debug_disable_ratelimit(); - dprintk("adding CPU %u\n", cpu); + pr_debug("adding CPU %u\n", cpu); #ifdef CONFIG_SMP /* check whether a different CPU already registered this @@ -985,7 +954,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); - cpufreq_debug_enable_ratelimit(); return 0; } #endif @@ -1036,7 +1004,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) */ ret = cpufreq_driver->init(policy); if (ret) { - dprintk("initialization failed\n"); + pr_debug("initialization failed\n"); goto err_unlock_policy; } policy->user_policy.min = policy->min; @@ -1062,8 +1030,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); - dprintk("initialization complete\n"); - cpufreq_debug_enable_ratelimit(); + pr_debug("initialization complete\n"); return 0; @@ -1087,7 +1054,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) nomem_out: module_put(cpufreq_driver->owner); module_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1111,15 +1077,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) unsigned int j; #endif - cpufreq_debug_disable_ratelimit(); - dprintk("unregistering CPU %u\n", cpu); + pr_debug("unregistering CPU %u\n", cpu); spin_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); return -EINVAL; } @@ -1131,12 +1095,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { - dprintk("removing link\n"); + pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobj = &sys_dev->kobj; cpufreq_cpu_put(data); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); return 0; @@ -1146,8 +1109,12 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU - strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, + strncpy(per_cpu(cpufreq_policy_save, cpu).gov, data->governor->name, CPUFREQ_NAME_LEN); + per_cpu(cpufreq_policy_save, cpu).min = data->min; + per_cpu(cpufreq_policy_save, cpu).max = data->max; + pr_debug("Saving CPU%d policy min %d and max %d\n", + cpu, data->min, data->max); #endif /* if we have other CPUs still registered, we need to unlink them, @@ -1169,10 +1136,14 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) for_each_cpu(j, data->cpus) { if (j == cpu) continue; - dprintk("removing link for cpu %u\n", j); + pr_debug("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU - strncpy(per_cpu(cpufreq_cpu_governor, j), + strncpy(per_cpu(cpufreq_policy_save, j).gov, data->governor->name, CPUFREQ_NAME_LEN); + per_cpu(cpufreq_policy_save, j).min = data->min; + per_cpu(cpufreq_policy_save, j).max = data->max; + pr_debug("Saving CPU%d policy min %d and max %d\n", + j, data->min, data->max); #endif cpu_sys_dev = get_cpu_sysdev(j); kobj = &cpu_sys_dev->kobj; @@ -1198,21 +1169,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * not referenced anymore by anybody before we proceed with * unloading. */ - dprintk("waiting for dropping of refcount\n"); + pr_debug("waiting for dropping of refcount\n"); wait_for_completion(cmp); - dprintk("wait complete\n"); + pr_debug("wait complete\n"); lock_policy_rwsem_write(cpu); if (cpufreq_driver->exit) cpufreq_driver->exit(data); unlock_policy_rwsem_write(cpu); +#ifdef CONFIG_HOTPLUG_CPU + /* when the CPU which is the parent of the kobj is hotplugged + * offline, check for siblings, and create cpufreq sysfs interface + * and symlinks + */ + if (unlikely(cpumask_weight(data->cpus) > 1)) { + /* first sibling now owns the new sysfs dir */ + cpumask_clear_cpu(cpu, data->cpus); + cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); + + /* finally remove our own symlink */ + lock_policy_rwsem_write(cpu); + __cpufreq_remove_dev(sys_dev); + } +#endif + free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); - per_cpu(cpufreq_cpu_data, cpu) = NULL; - cpufreq_debug_enable_ratelimit(); return 0; } @@ -1238,7 +1223,7 @@ static void handle_update(struct work_struct *work) struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); unsigned int cpu = policy->cpu; - dprintk("handle_update for cpu %u called\n", cpu); + pr_debug("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -1256,7 +1241,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, { struct cpufreq_freqs freqs; - dprintk("Warning: CPU frequency out of sync: cpufreq and timing " + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); freqs.cpu = cpu; @@ -1340,50 +1325,45 @@ unsigned int cpufreq_get(unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get); +static struct sysdev_driver cpufreq_sysdev_driver = { + .add = cpufreq_add_dev, + .remove = cpufreq_remove_dev, +}; + /** - * cpufreq_suspend - let the low level driver prepare for suspend + * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. + * + * This function is only executed for the boot processor. The other CPUs + * have been put offline by means of CPU hotplug. */ - -static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) +static int cpufreq_bp_suspend(void) { int ret = 0; - int cpu = sysdev->id; + int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("suspending cpu %u\n", cpu); - - if (!cpu_online(cpu)) - return 0; - - /* we may be lax here as interrupts are off. Nonetheless - * we need to grab the correct cpu policy, as to check - * whether we really run on this CPU. - */ + pr_debug("suspending cpu %u\n", cpu); + /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) - return -EINVAL; - - /* only handle each CPU group once */ - if (unlikely(cpu_policy->cpu != cpu)) - goto out; + return 0; if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(cpu_policy, pmsg); + ret = cpufreq_driver->suspend(cpu_policy); if (ret) printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); } -out: cpufreq_cpu_put(cpu_policy); return ret; } /** - * cpufreq_resume - restore proper CPU frequency handling after resume + * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. * * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are @@ -1391,31 +1371,23 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) * what we believe it to be. This is a bit later than when it * should be, but nonethteless it's better than calling * cpufreq_driver->get() here which might re-enable interrupts... + * + * This function is only executed for the boot CPU. The other CPUs have not + * been turned on yet. */ -static int cpufreq_resume(struct sys_device *sysdev) +static void cpufreq_bp_resume(void) { int ret = 0; - int cpu = sysdev->id; + int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("resuming cpu %u\n", cpu); - - if (!cpu_online(cpu)) - return 0; - - /* we may be lax here as interrupts are off. Nonetheless - * we need to grab the correct cpu policy, as to check - * whether we really run on this CPU. - */ + pr_debug("resuming cpu %u\n", cpu); + /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); if (!cpu_policy) - return -EINVAL; - - /* only handle each CPU group once */ - if (unlikely(cpu_policy->cpu != cpu)) - goto fail; + return; if (cpufreq_driver->resume) { ret = cpufreq_driver->resume(cpu_policy); @@ -1430,14 +1402,11 @@ static int cpufreq_resume(struct sys_device *sysdev) fail: cpufreq_cpu_put(cpu_policy); - return ret; } -static struct sysdev_driver cpufreq_sysdev_driver = { - .add = cpufreq_add_dev, - .remove = cpufreq_remove_dev, - .suspend = cpufreq_suspend, - .resume = cpufreq_resume, +static struct syscore_ops cpufreq_syscore_ops = { + .suspend = cpufreq_bp_suspend, + .resume = cpufreq_bp_resume, }; @@ -1525,10 +1494,16 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, + pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); + if (likely(retval != -EINVAL)) { + if (target_freq == policy->max) + cpu_nonscaling(policy->cpu); + else + cpu_scaling(policy->cpu); + } return retval; } @@ -1611,7 +1586,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, if (!try_module_get(policy->governor->owner)) return -EINVAL; - dprintk("__cpufreq_governor for CPU %u, event %u\n", + pr_debug("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); ret = policy->governor->governor(policy, event); @@ -1660,8 +1635,11 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) for_each_present_cpu(cpu) { if (cpu_online(cpu)) continue; - if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) - strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); + if (!strcmp(per_cpu(cpufreq_policy_save, cpu).gov, + governor->name)) + strcpy(per_cpu(cpufreq_policy_save, cpu).gov, "\0"); + per_cpu(cpufreq_policy_save, cpu).min = 0; + per_cpu(cpufreq_policy_save, cpu).max = 0; } #endif @@ -1712,8 +1690,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, { int ret = 0; - cpufreq_debug_disable_ratelimit(); - dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); memcpy(&policy->cpuinfo, &data->cpuinfo, @@ -1750,19 +1727,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->min = policy->min; data->max = policy->max; - dprintk("new min and max freqs are %u - %u kHz\n", + pr_debug("new min and max freqs are %u - %u kHz\n", data->min, data->max); if (cpufreq_driver->setpolicy) { data->policy = policy->policy; - dprintk("setting range\n"); + pr_debug("setting range\n"); ret = cpufreq_driver->setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; - dprintk("governor switch\n"); + pr_debug("governor switch\n"); /* end old governor */ if (data->governor) @@ -1772,7 +1749,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->governor = policy->governor; if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { /* new governor failed, so re-start old one */ - dprintk("starting governor %s failed\n", + pr_debug("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; @@ -1784,12 +1761,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, } /* might be a policy change, too, so fall through */ } - dprintk("governor: change or update limits\n"); + pr_debug("governor: change or update limits\n"); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } error_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1797,7 +1773,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, * cpufreq_update_policy - re-evaluate an existing cpufreq policy * @cpu: CPU which shall be re-evaluated * - * Usefull for policy notifiers which have different necessities + * Useful for policy notifiers which have different necessities * at different times. */ int cpufreq_update_policy(unsigned int cpu) @@ -1816,7 +1792,7 @@ int cpufreq_update_policy(unsigned int cpu) goto fail; } - dprintk("updating policy for CPU %u\n", cpu); + pr_debug("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; @@ -1828,7 +1804,7 @@ int cpufreq_update_policy(unsigned int cpu) if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); if (!data->cur) { - dprintk("Driver did not initialize current freq"); + pr_debug("Driver did not initialize current freq"); data->cur = policy.cur; } else { if (data->cur != policy.cur) @@ -1904,7 +1880,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ((!driver_data->setpolicy) && (!driver_data->target))) return -EINVAL; - dprintk("trying to register driver %s\n", driver_data->name); + pr_debug("trying to register driver %s\n", driver_data->name); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; @@ -1935,15 +1911,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { - dprintk("no CPU initialized for driver %s\n", + pr_debug("no CPU initialized for driver %s\n", driver_data->name); goto err_sysdev_unreg; } } register_hotcpu_notifier(&cpufreq_cpu_notifier); - dprintk("driver %s up and running\n", driver_data->name); - cpufreq_debug_enable_ratelimit(); + pr_debug("driver %s up and running\n", driver_data->name); return 0; err_sysdev_unreg: @@ -1970,14 +1945,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; - cpufreq_debug_disable_ratelimit(); - - if (!cpufreq_driver || (driver != cpufreq_driver)) { - cpufreq_debug_enable_ratelimit(); + if (!cpufreq_driver || (driver != cpufreq_driver)) return -EINVAL; - } - dprintk("unregistering driver %s\n", driver->name); + pr_debug("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); @@ -2002,6 +1973,7 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_sysdev_class.kset.kobj); BUG_ON(!cpufreq_global_kobject); + register_syscore_ops(&cpufreq_syscore_ops); return 0; } diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf696112..33b56e5c5c14a 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -76,13 +76,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. + * dbs_mutex protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); -static struct workqueue_struct *kconservative_wq; - static struct dbs_tuners { unsigned int sampling_rate; unsigned int sampling_down_factor; @@ -118,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time);; + return (cputime64_t)jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -164,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = { }; /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } -define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); /* cpufreq_conservative Governor Tunables */ @@ -195,33 +183,6 @@ show_one(down_threshold, down_threshold); show_one(ignore_nice_load, ignore_nice); show_one(freq_step, freq_step); -/*** delete after deprecation time ***/ -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} -show_one_old(sampling_rate); -show_one_old(sampling_down_factor); -show_one_old(up_threshold); -show_one_old(down_threshold); -show_one_old(ignore_nice_load); -show_one_old(freq_step); -show_one_old(sampling_rate_min); -show_one_old(sampling_rate_max); - -cpufreq_freq_attr_ro_old(sampling_rate_min); -cpufreq_freq_attr_ro_old(sampling_rate_max); - -/*** delete after deprecation time ***/ - static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) @@ -233,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -250,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - return count; } @@ -264,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); + input <= dbs_tuners_ins.down_threshold) return -EINVAL; - } dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -284,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); + input >= dbs_tuners_ins.up_threshold) return -EINVAL; - } dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -313,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ return count; - } + dbs_tuners_ins.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ @@ -329,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (dbs_tuners_ins.ignore_nice) dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } - mutex_unlock(&dbs_mutex); - return count; } @@ -349,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ - mutex_lock(&dbs_mutex); dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -364,7 +302,6 @@ define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &sampling_down_factor.attr, @@ -380,49 +317,6 @@ static struct attribute_group dbs_attr_group = { .name = "conservative", }; -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} -write_one_old(sampling_rate); -write_one_old(sampling_down_factor); -write_one_old(up_threshold); -write_one_old(down_threshold); -write_one_old(ignore_nice_load); -write_one_old(freq_step); - -cpufreq_freq_attr_rw_old(sampling_rate); -cpufreq_freq_attr_rw_old(sampling_down_factor); -cpufreq_freq_attr_rw_old(up_threshold); -cpufreq_freq_attr_rw_old(down_threshold); -cpufreq_freq_attr_rw_old(ignore_nice_load); -cpufreq_freq_attr_rw_old(freq_step); - -static struct attribute *dbs_attributes_old[] = { - &sampling_rate_max_old.attr, - &sampling_rate_min_old.attr, - &sampling_rate_old.attr, - &sampling_down_factor_old.attr, - &up_threshold_old.attr, - &down_threshold_old.attr, - &ignore_nice_load_old.attr, - &freq_step_old.attr, - NULL -}; - -static struct attribute_group dbs_attr_group_old = { - .attrs = dbs_attributes_old, - .name = "conservative", -}; - -/*** delete after deprecation time ***/ - /************************** sysfs end ************************/ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) @@ -560,7 +454,7 @@ static void do_dbs_timer(struct work_struct *work) dbs_check_cpu(dbs_info); - queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); + schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } @@ -572,8 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, - delay); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) @@ -599,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_mutex); - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); @@ -667,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); dbs_enable--; mutex_destroy(&this_dbs_info->timer_mutex); @@ -716,25 +602,12 @@ struct cpufreq_governor cpufreq_gov_conservative = { static int __init cpufreq_gov_dbs_init(void) { - int err; - - kconservative_wq = create_workqueue("kconservative"); - if (!kconservative_wq) { - printk(KERN_ERR "Creation of kconservative failed\n"); - return -EFAULT; - } - - err = cpufreq_register_governor(&cpufreq_gov_conservative); - if (err) - destroy_workqueue(kconservative_wq); - - return err; + return cpufreq_register_governor(&cpufreq_gov_conservative); } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_conservative); - destroy_workqueue(kconservative_wq); } diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 81783286cad2f..224626869b8a0 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -19,288 +19,426 @@ #include #include #include -#include +#include +#include +#include #include #include +#include #include #include #include - +#include +#include #include -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); +#define CREATE_TRACE_POINTS +#include + +static int active_count; struct cpufreq_interactive_cpuinfo { struct timer_list cpu_timer; - int timer_idlecancel; + struct timer_list cpu_slack_timer; + spinlock_t load_lock; /* protects the next 4 fields */ u64 time_in_idle; - u64 idle_exit_time; - u64 timer_run_time; - int idling; - u64 freq_change_time; - u64 freq_change_time_in_idle; + u64 time_in_idle_timestamp; + u64 cputime_speedadj; + u64 cputime_speedadj_timestamp; struct cpufreq_policy *policy; struct cpufreq_frequency_table *freq_table; unsigned int target_freq; + unsigned int floor_freq; + u64 floor_validate_time; + u64 hispeed_validate_time; + struct rw_semaphore enable_sem; int governor_enabled; }; static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); -/* Workqueues handle frequency scaling */ -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; +/* realtime thread handles frequency scaling */ +static struct task_struct *speedchange_task; +static cpumask_t speedchange_cpumask; +static spinlock_t speedchange_cpumask_lock; +static struct mutex gov_lock; + +/* Hi speed to bump to from lo speed when load burst (default max) */ +#define DEFAULT_HISPEED_FREQ 729600 +static unsigned int hispeed_freq = DEFAULT_HISPEED_FREQ; -/* Go to max speed when CPU load at or above this value. */ -#define DEFAULT_GO_MAXSPEED_LOAD 85 -static unsigned long go_maxspeed_load; +/* Go to hi speed when CPU load at or above this value. */ +#define DEFAULT_GO_HISPEED_LOAD 50 +static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; + +/* Target load. Lower values result in higher CPU speeds. */ +#define DEFAULT_TARGET_LOAD 90 +static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; +static spinlock_t target_loads_lock; +static unsigned int *target_loads = default_target_loads; +static int ntarget_loads = ARRAY_SIZE(default_target_loads); /* * The minimum amount of time to spend at a frequency before we can ramp down. */ -#define DEFAULT_MIN_SAMPLE_TIME 80000; -static unsigned long min_sample_time; +#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) +static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME; -#define DEBUG 0 -#define BUFSZ 128 +/* + * The sample rate of the timer used to increase frequency + */ +#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) +static unsigned long timer_rate = DEFAULT_TIMER_RATE; -#if DEBUG -#include +/* + * Wait this long before raising speed above hispeed, by default a single + * timer interval. + */ +#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE +static unsigned int default_above_hispeed_delay[] = { + DEFAULT_ABOVE_HISPEED_DELAY }; +static spinlock_t above_hispeed_delay_lock; +static unsigned int *above_hispeed_delay = default_above_hispeed_delay; +static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); + +/* Non-zero means indefinite speed boost active */ +static int boost_val; +/* Duration of a boot pulse in usecs */ +static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; +/* End time of boost pulse in ktime converted to usecs */ +static u64 boostpulse_endtime; -struct dbgln { - int cpu; - unsigned long jiffy; - unsigned long run; - char buf[BUFSZ]; -}; +/* + * Max additional time to wait in idle, beyond timer_rate, at speeds above + * minimum before wakeup to reduce speed, or -1 if unnecessary. + */ +#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) +static int timer_slack_val = DEFAULT_TIMER_SLACK; -#define NDBGLNS 256 +#define DEFAULT_IO_IS_BUSY 1 +static bool io_is_busy = DEFAULT_IO_IS_BUSY; -static struct dbgln dbgbuf[NDBGLNS]; -static int dbgbufs; -static int dbgbufe; -static struct proc_dir_entry *dbg_proc; -static spinlock_t dbgpr_lock; +static int cpufreq_governor_interactive(struct cpufreq_policy *policy, + unsigned int event); -static u64 up_request_time; -static unsigned int up_max_latency; +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_interactive = { + .name = "interactive", + .governor = cpufreq_governor_interactive, + .max_transition_latency = 10000000, + .owner = THIS_MODULE, +}; -static void dbgpr(char *fmt, ...) +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) { - va_list args; - int n; - unsigned long flags; + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); - spin_lock_irqsave(&dbgpr_lock, flags); - n = dbgbufe; - va_start(args, fmt); - vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args); - va_end(args); - dbgbuf[n].cpu = smp_processor_id(); - dbgbuf[n].run = nr_running(); - dbgbuf[n].jiffy = jiffies; + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - if (++dbgbufe >= NDBGLNS) - dbgbufe = 0; + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - if (dbgbufe == dbgbufs) - if (++dbgbufs >= NDBGLNS) - dbgbufs = 0; + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); - spin_unlock_irqrestore(&dbgpr_lock, flags); + if (idle_time == -1ULL) + idle_time = get_cpu_idle_time_jiffy(cpu, wall); + else if (!io_is_busy) + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; } -static void dbgdump(void) +static void cpufreq_interactive_timer_resched( + struct cpufreq_interactive_cpuinfo *pcpu) { - int i, j; + unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); unsigned long flags; - static struct dbgln prbuf[NDBGLNS]; - - spin_lock_irqsave(&dbgpr_lock, flags); - i = dbgbufs; - j = dbgbufe; - memcpy(prbuf, dbgbuf, sizeof(dbgbuf)); - dbgbufs = 0; - dbgbufe = 0; - spin_unlock_irqrestore(&dbgpr_lock, flags); - - while (i != j) - { - printk("%lu %d %lu %s", - prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run, - prbuf[i].buf); - if (++i == NDBGLNS) - i = 0; + + mod_timer_pinned(&pcpu->cpu_timer, expires); + if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { + expires += usecs_to_jiffies(timer_slack_val); + mod_timer_pinned(&pcpu->cpu_slack_timer, expires); } + + spin_lock_irqsave(&pcpu->load_lock, flags); + pcpu->time_in_idle = + get_cpu_idle_time(smp_processor_id(), + &pcpu->time_in_idle_timestamp); + pcpu->cputime_speedadj = 0; + pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; + spin_unlock_irqrestore(&pcpu->load_lock, flags); } -static int dbg_proc_read(char *buffer, char **start, off_t offset, - int count, int *peof, void *dat) +static unsigned int freq_to_above_hispeed_delay(unsigned int freq) { - printk("max up_task latency=%uus\n", up_max_latency); - dbgdump(); - *peof = 1; - return 0; + int i; + unsigned int ret; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay - 1 && + freq >= above_hispeed_delay[i+1]; i += 2) + ; + + ret = above_hispeed_delay[i]; + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; } +static unsigned int freq_to_targetload(unsigned int freq) +{ + int i; + unsigned int ret; + unsigned long flags; -#else -#define dbgpr(...) do {} while (0) -#endif + spin_lock_irqsave(&target_loads_lock, flags); -static int cpufreq_governor_interactive(struct cpufreq_policy *policy, - unsigned int event); + for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) + ; -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_interactive = { - .name = "interactive", - .governor = cpufreq_governor_interactive, - .max_transition_latency = 10000000, - .owner = THIS_MODULE, -}; + ret = target_loads[i]; + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} -static void cpufreq_interactive_timer(unsigned long data) +/* + * If increasing frequencies never map to a lower target load then + * choose_freq() will find the minimum frequency that does not exceed its + * target load given the current load. + */ + +static unsigned int choose_freq( + struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) { + unsigned int freq = pcpu->policy->cur; + unsigned int prevfreq, freqmin, freqmax; + unsigned int tl; + int index; + + freqmin = 0; + freqmax = UINT_MAX; + + do { + prevfreq = freq; + tl = freq_to_targetload(freq); + + /* + * Find the lowest frequency where the computed load is less + * than or equal to the target load. + */ + + cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, loadadjfreq / tl, + CPUFREQ_RELATION_L, &index); + freq = pcpu->freq_table[index].frequency; + + if (freq > prevfreq) { + /* The previous frequency is too low. */ + freqmin = prevfreq; + + if (freq >= freqmax) { + /* + * Find the highest frequency that is less + * than freqmax. + */ + cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmax - 1, CPUFREQ_RELATION_H, + &index); + freq = pcpu->freq_table[index].frequency; + + if (freq == freqmin) { + /* + * The first frequency below freqmax + * has already been found to be too + * low. freqmax is the lowest speed + * we found that is fast enough. + */ + freq = freqmax; + break; + } + } + } else if (freq < prevfreq) { + /* The previous frequency is high enough. */ + freqmax = prevfreq; + + if (freq <= freqmin) { + /* + * Find the lowest frequency that is higher + * than freqmin. + */ + cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmin + 1, CPUFREQ_RELATION_L, + &index); + freq = pcpu->freq_table[index].frequency; + + /* + * If freqmax is the first frequency above + * freqmin then we have already found that + * this speed is fast enough. + */ + if (freq == freqmax) + break; + } + } + + /* If same frequency chosen as previous then done. */ + } while (freq != prevfreq); + + return freq; +} + +static u64 update_load(int cpu) +{ + struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); + u64 now; + u64 now_idle; unsigned int delta_idle; unsigned int delta_time; + u64 active_time; + + now_idle = get_cpu_idle_time(cpu, &now); + delta_idle = (unsigned int) cputime64_sub(now_idle, pcpu->time_in_idle); + delta_time = (unsigned int) cputime64_sub(now, pcpu->time_in_idle_timestamp); + active_time = delta_time - delta_idle; + pcpu->cputime_speedadj += active_time * pcpu->policy->cur; + + pcpu->time_in_idle = now_idle; + pcpu->time_in_idle_timestamp = now; + return now; +} + +static void cpufreq_interactive_timer(unsigned long data) +{ + u64 now; + unsigned int delta_time; + u64 cputime_speedadj; int cpu_load; - int load_since_change; - u64 time_in_idle; - u64 idle_exit_time; struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, data); - u64 now_idle; unsigned int new_freq; + unsigned int loadadjfreq; unsigned int index; unsigned long flags; + bool boosted; - smp_rmb(); - + if (!down_read_trylock(&pcpu->enable_sem)) + return; if (!pcpu->governor_enabled) goto exit; - /* - * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, - * this lets idle exit know the current idle time sample has - * been processed, and idle exit can generate a new sample and - * re-arm the timer. This prevents a concurrent idle - * exit on that CPU from writing a new set of info at the same time - * the timer function runs (the timer function can't use that info - * until more time passes). - */ - time_in_idle = pcpu->time_in_idle; - idle_exit_time = pcpu->idle_exit_time; - now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); - smp_wmb(); - - /* If we raced with cancelling a timer, skip. */ - if (!idle_exit_time) { - dbgpr("timer %d: no valid idle exit sample\n", (int) data); - goto exit; - } - -#if DEBUG - if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10) - dbgpr("timer %d: late by %d ticks\n", - (int) data, jiffies - pcpu->cpu_timer.expires); -#endif - - delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - idle_exit_time); + spin_lock_irqsave(&pcpu->load_lock, flags); + now = update_load(data); + delta_time = (unsigned int) cputime64_sub(now, pcpu->cputime_speedadj_timestamp); + cputime_speedadj = pcpu->cputime_speedadj; + spin_unlock_irqrestore(&pcpu->load_lock, flags); - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) { - dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data, - delta_time, idle_exit_time, pcpu->timer_run_time); + if (WARN_ON_ONCE(!delta_time)) goto rearm; - } - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (delta_time - delta_idle) / delta_time; + do_div(cputime_speedadj, delta_time); + loadadjfreq = (unsigned int)cputime_speedadj * 100; + cpu_load = loadadjfreq / pcpu->target_freq; + boosted = boost_val || now < boostpulse_endtime; - delta_idle = (unsigned int) cputime64_sub(now_idle, - pcpu->freq_change_time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - pcpu->freq_change_time); + if (cpu_load >= go_hispeed_load || boosted) { + if (pcpu->target_freq < hispeed_freq) { + new_freq = hispeed_freq; + } else { + new_freq = choose_freq(pcpu, loadadjfreq); - if (delta_idle > delta_time) - load_since_change = 0; - else - load_since_change = - 100 * (delta_time - delta_idle) / delta_time; + if (new_freq < hispeed_freq) + new_freq = hispeed_freq; + } + } else { + new_freq = choose_freq(pcpu, loadadjfreq); + } - /* - * Choose greater of short-term load (since last idle timer - * started or timer function re-armed itself) or long-term load - * (since last frequency change). - */ - if (load_since_change > cpu_load) - cpu_load = load_since_change; + if (pcpu->target_freq >= hispeed_freq && + new_freq > pcpu->target_freq && + now - pcpu->hispeed_validate_time < + freq_to_above_hispeed_delay(pcpu->target_freq)) { + trace_cpufreq_interactive_notyet( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + goto rearm; + } - if (cpu_load >= go_maxspeed_load) - new_freq = pcpu->policy->max; - else - new_freq = pcpu->policy->max * cpu_load / 100; + pcpu->hispeed_validate_time = now; if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, - new_freq, CPUFREQ_RELATION_H, + new_freq, CPUFREQ_RELATION_L, &index)) { - dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data); + pr_warn_once("timer %d: cpufreq_frequency_table_target error\n", + (int) data); goto rearm; } new_freq = pcpu->freq_table[index].frequency; - if (pcpu->target_freq == new_freq) - { - dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq); - goto rearm_if_notmax; - } - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. + * Do not scale below floor_freq unless we have been at or above the + * floor frequency for the minimum sample time since last validated. */ - if (new_freq < pcpu->target_freq) { - if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < - min_sample_time) { - dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + if (new_freq < pcpu->floor_freq) { + if (cputime64_sub(now, pcpu->floor_validate_time) + < min_sample_time) { + trace_cpufreq_interactive_notyet( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); goto rearm; } } - dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + /* + * Update the timestamp for checking whether speed has been held at + * or above the selected frequency for a minimum of min_sample_time, + * if not boosted to hispeed_freq. If boosted to hispeed_freq then we + * allow the speed to drop as soon as the boostpulse duration expires + * (or the indefinite boost is turned off). + */ - if (new_freq < pcpu->target_freq) { - pcpu->target_freq = new_freq; - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_set_cpu(data, &down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - queue_work(down_wq, &freq_scale_down_work); - } else { - pcpu->target_freq = new_freq; -#if DEBUG - up_request_time = ktime_to_us(ktime_get()); -#endif - spin_lock_irqsave(&up_cpumask_lock, flags); - cpumask_set_cpu(data, &up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - wake_up_process(up_task); + if (!boosted || new_freq > hispeed_freq) { + pcpu->floor_freq = new_freq; + pcpu->floor_validate_time = now; + } + + if (pcpu->target_freq == new_freq) { + trace_cpufreq_interactive_already( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + goto rearm_if_notmax; } + trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + + pcpu->target_freq = new_freq; + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + cpumask_set_cpu(data, &speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + wake_up_process(speedchange_task); + rearm_if_notmax: /* * Already set max speed and don't see a need to change that, @@ -310,50 +448,30 @@ static void cpufreq_interactive_timer(unsigned long data) goto exit; rearm: - if (!timer_pending(&pcpu->cpu_timer)) { - /* - * If already at min: if that CPU is idle, don't set timer. - * Else cancel the timer if that CPU goes idle. We don't - * need to re-evaluate speed until the next idle exit. - */ - if (pcpu->target_freq == pcpu->policy->min) { - smp_rmb(); - - if (pcpu->idling) { - dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data); - goto exit; - } - - pcpu->timer_idlecancel = 1; - } - - pcpu->time_in_idle = get_cpu_idle_time_us( - data, &pcpu->idle_exit_time); - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time); - } + if (!timer_pending(&pcpu->cpu_timer)) + cpufreq_interactive_timer_resched(pcpu); exit: + up_read(&pcpu->enable_sem); return; } -static void cpufreq_interactive_idle(void) +static void cpufreq_interactive_idle_start(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); int pending; + if (!down_read_trylock(&pcpu->enable_sem)) + return; if (!pcpu->governor_enabled) { - pm_idle_old(); + up_read(&pcpu->enable_sem); return; } - pcpu->idling = 1; - smp_wmb(); pending = timer_pending(&pcpu->cpu_timer); if (pcpu->target_freq != pcpu->policy->min) { -#ifdef CONFIG_SMP /* * Entering idle while not at lowest speed. On some * platforms this can hold the other CPU(s) at that speed @@ -362,185 +480,345 @@ static void cpufreq_interactive_idle(void) * min indefinitely. This should probably be a quirk of * the CPUFreq driver. */ - if (!pending) { - pcpu->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n", - pcpu->target_freq, pcpu->cpu_timer.expires, - pcpu->idle_exit_time); - } -#endif - } else { - /* - * If at min speed and entering idle after load has - * already been evaluated, and a timer has been set just in - * case the CPU suddenly goes busy, cancel that timer. The - * CPU didn't go busy; we'll recheck things upon idle exit. - */ - if (pending && pcpu->timer_idlecancel) { - dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires); - del_timer(&pcpu->cpu_timer); - /* - * Ensure last timer run time is after current idle - * sample start time, so next idle exit will always - * start a new idle sampling period. - */ - pcpu->idle_exit_time = 0; - pcpu->timer_idlecancel = 0; - } + if (!pending) + cpufreq_interactive_timer_resched(pcpu); } - pm_idle_old(); - pcpu->idling = 0; - smp_wmb(); + up_read(&pcpu->enable_sem); +} - /* - * Arm the timer for 1-2 ticks later if not already, and if the timer - * function has already processed the previous load sampling - * interval. (If the timer is not pending but has not processed - * the previous interval, it is probably racing with us on another - * CPU. Let it compute load based on the previous sample and then - * re-arm the timer for another interval when it's done, rather - * than updating the interval start time to be "now", which doesn't - * give the timer function enough time to make a decision on this - * run.) - */ - if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time >= pcpu->idle_exit_time && - pcpu->governor_enabled) { - pcpu->time_in_idle = - get_cpu_idle_time_us(smp_processor_id(), - &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time); -#if DEBUG - } else if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time < pcpu->idle_exit_time) { - dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n", - pcpu->idle_exit_time, pcpu->timer_run_time); -#endif +static void cpufreq_interactive_idle_end(void) +{ + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return; } + /* Arm the timer for 1-2 ticks later if not already. */ + if (!timer_pending(&pcpu->cpu_timer)) { + cpufreq_interactive_timer_resched(pcpu); + } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { + del_timer(&pcpu->cpu_timer); + del_timer(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer(smp_processor_id()); + } + + up_read(&pcpu->enable_sem); } -static int cpufreq_interactive_up_task(void *data) +static int cpufreq_interactive_speedchange_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; -#if DEBUG - u64 now; - u64 then; - unsigned int lat; -#endif - while (1) { set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irqsave(&up_cpumask_lock, flags); + spin_lock_irqsave(&speedchange_cpumask_lock, flags); - if (cpumask_empty(&up_cpumask)) { - spin_unlock_irqrestore(&up_cpumask_lock, flags); + if (cpumask_empty(&speedchange_cpumask)) { + spin_unlock_irqrestore(&speedchange_cpumask_lock, + flags); schedule(); if (kthread_should_stop()) break; - spin_lock_irqsave(&up_cpumask_lock, flags); + spin_lock_irqsave(&speedchange_cpumask_lock, flags); } set_current_state(TASK_RUNNING); - -#if DEBUG - then = up_request_time; - now = ktime_to_us(ktime_get()); - - if (now > then) { - lat = ktime_to_us(ktime_get()) - then; - - if (lat > up_max_latency) - up_max_latency = lat; - } -#endif - - tmp_mask = up_cpumask; - cpumask_clear(&up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); + tmp_mask = speedchange_cpumask; + cpumask_clear(&speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); + unsigned int j; + unsigned int max_freq = 0; - if (nr_running() == 1) { - dbgpr("up %d: tgt=%d nothing else running\n", cpu, - pcpu->target_freq); + pcpu = &per_cpu(cpuinfo, cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + continue; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + continue; } - smp_rmb(); + for_each_cpu(j, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, j); - if (!pcpu->governor_enabled) - continue; + if (pjcpu->target_freq > max_freq) + max_freq = pjcpu->target_freq; + } - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + if (max_freq != pcpu->policy->cur) + __cpufreq_driver_target(pcpu->policy, + max_freq, + CPUFREQ_RELATION_H); + trace_cpufreq_interactive_setspeed(cpu, + pcpu->target_freq, + pcpu->policy->cur); + + up_read(&pcpu->enable_sem); } } return 0; } -static void cpufreq_interactive_freq_down(struct work_struct *work) +static void cpufreq_interactive_boost(void) { - unsigned int cpu; - cpumask_t tmp_mask; + int i; + int anyboost = 0; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; - spin_lock_irqsave(&down_cpumask_lock, flags); - tmp_mask = down_cpumask; - cpumask_clear(&down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + + for_each_online_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + + if (pcpu->target_freq < hispeed_freq) { + pcpu->target_freq = hispeed_freq; + cpumask_set_cpu(i, &speedchange_cpumask); + pcpu->hispeed_validate_time = + ktime_to_us(ktime_get()); + anyboost = 1; + } + + /* + * Set floor freq and (re)start timer for when last + * validated. + */ + + pcpu->floor_freq = hispeed_freq; + pcpu->floor_validate_time = ktime_to_us(ktime_get()); + } + + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + + if (anyboost) + wake_up_process(speedchange_task); +} + +static int cpufreq_interactive_notifier( + struct notifier_block *nb, unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpufreq_interactive_cpuinfo *pcpu; + int cpu; + unsigned long flags; + + if (val == CPUFREQ_POSTCHANGE) { + pcpu = &per_cpu(cpuinfo, freq->cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + return 0; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return 0; + } + + for_each_cpu(cpu, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, cpu); + spin_lock_irqsave(&pjcpu->load_lock, flags); + update_load(cpu); + spin_unlock_irqrestore(&pjcpu->load_lock, flags); + } + + up_read(&pcpu->enable_sem); + } + return 0; +} - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); +static struct notifier_block cpufreq_notifier_block = { + .notifier_call = cpufreq_interactive_notifier, +}; - smp_rmb(); +static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) +{ + const char *cp; + int i; + int ntokens = 1; + unsigned int *tokenized_data; + int err = -EINVAL; + + cp = buf; + while ((cp = strpbrk(cp + 1, " :"))) + ntokens++; + + if (!(ntokens & 0x1)) + goto err; + + tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); + if (!tokenized_data) { + err = -ENOMEM; + goto err; + } - if (!pcpu->governor_enabled) - continue; + cp = buf; + i = 0; + while (i < ntokens) { + if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) + goto err_kfree; - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + cp = strpbrk(cp, " :"); + if (!cp) + break; + cp++; } + + if (i != ntokens) + goto err_kfree; + + *num_tokens = ntokens; + return tokenized_data; + +err_kfree: + kfree(tokenized_data); +err: + return ERR_PTR(err); } -static ssize_t show_go_maxspeed_load(struct kobject *kobj, +static ssize_t show_target_loads( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&target_loads_lock, flags); + + for (i = 0; i < ntarget_loads; i++) + ret += sprintf(buf + ret, "%u%s", target_loads[i], + i & 0x1 ? ":" : " "); + + ret += sprintf(buf + ret, "\n"); + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} + +static ssize_t store_target_loads( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_target_loads = NULL; + unsigned long flags; + + new_target_loads = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_target_loads)) + return PTR_RET(new_target_loads); + + spin_lock_irqsave(&target_loads_lock, flags); + if (target_loads != default_target_loads) + kfree(target_loads); + target_loads = new_target_loads; + ntarget_loads = ntokens; + spin_unlock_irqrestore(&target_loads_lock, flags); + return count; +} + +static struct global_attr target_loads_attr = + __ATTR(target_loads, S_IRUGO | S_IWUSR, + show_target_loads, store_target_loads); + +static ssize_t show_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay; i++) + ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i], + i & 0x1 ? ":" : " "); + + ret += sprintf(buf + ret, "\n"); + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; +} + +static ssize_t store_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_above_hispeed_delay = NULL; + unsigned long flags; + + new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_above_hispeed_delay)) + return PTR_RET(new_above_hispeed_delay); + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + if (above_hispeed_delay != default_above_hispeed_delay) + kfree(above_hispeed_delay); + above_hispeed_delay = new_above_hispeed_delay; + nabove_hispeed_delay = ntokens; + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return count; + +} + +static struct global_attr above_hispeed_delay_attr = + __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR, + show_above_hispeed_delay, store_above_hispeed_delay); + +static ssize_t show_hispeed_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", hispeed_freq); +} + +static ssize_t store_hispeed_freq(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + long unsigned int val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + hispeed_freq = val; + return count; +} + +static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, + show_hispeed_freq, store_hispeed_freq); + + +static ssize_t show_go_hispeed_load(struct kobject *kobj, struct attribute *attr, char *buf) { - return sprintf(buf, "%lu\n", go_maxspeed_load); + return sprintf(buf, "%lu\n", go_hispeed_load); } -static ssize_t store_go_maxspeed_load(struct kobject *kobj, +static ssize_t store_go_hispeed_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - return strict_strtoul(buf, 0, &go_maxspeed_load); + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + go_hispeed_load = val; + return count; } -static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644, - show_go_maxspeed_load, store_go_maxspeed_load); +static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, + show_go_hispeed_load, store_go_hispeed_load); static ssize_t show_min_sample_time(struct kobject *kobj, struct attribute *attr, char *buf) @@ -551,15 +829,170 @@ static ssize_t show_min_sample_time(struct kobject *kobj, static ssize_t store_min_sample_time(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - return strict_strtoul(buf, 0, &min_sample_time); + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + min_sample_time = val; + return count; } static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, show_min_sample_time, store_min_sample_time); +static ssize_t show_timer_rate(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", timer_rate); +} + +static ssize_t store_timer_rate(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + timer_rate = val; + return count; +} + +static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, + show_timer_rate, store_timer_rate); + +static ssize_t show_timer_slack( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", timer_slack_val); +} + +static ssize_t store_timer_slack( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) + return ret; + + timer_slack_val = val; + return count; +} + +define_one_global_rw(timer_slack); + +static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", boost_val); +} + +static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boost_val = val; + + if (boost_val) { + trace_cpufreq_interactive_boost("on"); + cpufreq_interactive_boost(); + } else { + trace_cpufreq_interactive_unboost("off"); + } + + return count; +} + +define_one_global_rw(boost); + +static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val; + trace_cpufreq_interactive_boost("pulse"); + cpufreq_interactive_boost(); + return count; +} + +static struct global_attr boostpulse = + __ATTR(boostpulse, 0200, NULL, store_boostpulse); + +static ssize_t show_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", boostpulse_duration_val); +} + +static ssize_t store_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_duration_val = val; + return count; +} + +define_one_global_rw(boostpulse_duration); + +static ssize_t show_io_is_busy(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", io_is_busy); +} + +static ssize_t store_io_is_busy(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + io_is_busy = val; + return count; +} + +static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644, + show_io_is_busy, store_io_is_busy); + static struct attribute *interactive_attributes[] = { - &go_maxspeed_load_attr.attr, + &target_loads_attr.attr, + &above_hispeed_delay_attr.attr, + &hispeed_freq_attr.attr, + &go_hispeed_load_attr.attr, &min_sample_time_attr.attr, + &timer_rate_attr.attr, + &timer_slack.attr, + &boost.attr, + &boostpulse.attr, + &boostpulse_duration.attr, + &io_is_busy_attr.attr, NULL, }; @@ -568,125 +1001,169 @@ static struct attribute_group interactive_attr_group = { .name = "interactive", }; -static int cpufreq_governor_interactive(struct cpufreq_policy *new_policy, +static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, + unsigned long val, + void *data) +{ + switch (val) { + case IDLE_START: + cpufreq_interactive_idle_start(); + break; + case IDLE_END: + cpufreq_interactive_idle_end(); + break; + } + + return 0; +} + +static struct notifier_block cpufreq_interactive_idle_nb = { + .notifier_call = cpufreq_interactive_idle_notifier, +}; + +static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event) { int rc; - struct cpufreq_interactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, new_policy->cpu); + unsigned int j; + struct cpufreq_interactive_cpuinfo *pcpu; + struct cpufreq_frequency_table *freq_table; switch (event) { case CPUFREQ_GOV_START: - if (!cpu_online(new_policy->cpu)) + if (!cpu_online(policy->cpu)) return -EINVAL; - pcpu->policy = new_policy; - pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu); - pcpu->target_freq = new_policy->cur; - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(new_policy->cpu, - &pcpu->freq_change_time); - pcpu->governor_enabled = 1; - smp_wmb(); + mutex_lock(&gov_lock); + + freq_table = + cpufreq_frequency_get_table(policy->cpu); + if (!hispeed_freq) + hispeed_freq = policy->max; + + for_each_cpu(j, policy->cpus) { + unsigned long expires; + + pcpu = &per_cpu(cpuinfo, j); + pcpu->policy = policy; + pcpu->target_freq = policy->cur; + pcpu->freq_table = freq_table; + pcpu->floor_freq = pcpu->target_freq; + pcpu->floor_validate_time = + ktime_to_us(ktime_get()); + pcpu->hispeed_validate_time = + pcpu->floor_validate_time; + down_write(&pcpu->enable_sem); + expires = jiffies + usecs_to_jiffies(timer_rate); + pcpu->cpu_timer.expires = expires; + add_timer_on(&pcpu->cpu_timer, j); + if (timer_slack_val >= 0) { + expires += usecs_to_jiffies(timer_slack_val); + pcpu->cpu_slack_timer.expires = expires; + add_timer_on(&pcpu->cpu_slack_timer, j); + } + pcpu->governor_enabled = 1; + up_write(&pcpu->enable_sem); + } + /* * Do not register the idle hook and create sysfs * entries if we have already done so. */ - if (atomic_inc_return(&active_count) > 1) + if (++active_count > 1) { + mutex_unlock(&gov_lock); return 0; + } rc = sysfs_create_group(cpufreq_global_kobject, &interactive_attr_group); - if (rc) + if (rc) { + mutex_unlock(&gov_lock); return rc; + } - pm_idle_old = pm_idle; - pm_idle = cpufreq_interactive_idle; + idle_notifier_register(&cpufreq_interactive_idle_nb); + cpufreq_register_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + mutex_unlock(&gov_lock); break; case CPUFREQ_GOV_STOP: - pcpu->governor_enabled = 0; - smp_wmb(); - del_timer_sync(&pcpu->cpu_timer); - flush_work(&freq_scale_down_work); - /* - * Reset idle exit time since we may cancel the timer - * before it can run after the last idle exit time, - * to avoid tripping the check in idle exit for a timer - * that is trying to run. - */ - pcpu->idle_exit_time = 0; + mutex_lock(&gov_lock); + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + down_write(&pcpu->enable_sem); + pcpu->governor_enabled = 0; + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + up_write(&pcpu->enable_sem); + } - if (atomic_dec_return(&active_count) > 0) + if (--active_count > 0) { + mutex_unlock(&gov_lock); return 0; + } + cpufreq_unregister_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + idle_notifier_unregister(&cpufreq_interactive_idle_nb); sysfs_remove_group(cpufreq_global_kobject, &interactive_attr_group); + mutex_unlock(&gov_lock); - pm_idle = pm_idle_old; break; case CPUFREQ_GOV_LIMITS: - if (new_policy->max < new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->max, CPUFREQ_RELATION_H); - else if (new_policy->min > new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->min, CPUFREQ_RELATION_L); + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, + policy->min, CPUFREQ_RELATION_L); break; } return 0; } +static void cpufreq_interactive_nop_timer(unsigned long data) +{ +} + static int __init cpufreq_interactive_init(void) { unsigned int i; struct cpufreq_interactive_cpuinfo *pcpu; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; - min_sample_time = DEFAULT_MIN_SAMPLE_TIME; - /* Initalize per-cpu timers */ for_each_possible_cpu(i) { pcpu = &per_cpu(cpuinfo, i); - init_timer(&pcpu->cpu_timer); + init_timer_deferrable(&pcpu->cpu_timer); pcpu->cpu_timer.function = cpufreq_interactive_timer; pcpu->cpu_timer.data = i; + init_timer(&pcpu->cpu_slack_timer); + pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; + spin_lock_init(&pcpu->load_lock); + init_rwsem(&pcpu->enable_sem); } - up_task = kthread_create(cpufreq_interactive_up_task, NULL, - "kinteractiveup"); - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = alloc_workqueue("knteractive_down", 0, 1); + spin_lock_init(&target_loads_lock); + spin_lock_init(&speedchange_cpumask_lock); + spin_lock_init(&above_hispeed_delay_lock); + mutex_init(&gov_lock); + speedchange_task = + kthread_create(cpufreq_interactive_speedchange_task, NULL, + "cfinteractive"); + if (IS_ERR(speedchange_task)) + return PTR_ERR(speedchange_task); - if (! down_wq) - goto err_freeuptask; + sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m); + get_task_struct(speedchange_task); - INIT_WORK(&freq_scale_down_work, - cpufreq_interactive_freq_down); - - spin_lock_init(&up_cpumask_lock); - spin_lock_init(&down_cpumask_lock); - -#if DEBUG - spin_lock_init(&dbgpr_lock); - dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL); - dbg_proc->read_proc = dbg_proc_read; -#endif + /* NB: wake up so the thread does not look hung to the freezer */ + wake_up_process(speedchange_task); return cpufreq_register_governor(&cpufreq_gov_interactive); - -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; } #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE @@ -698,9 +1175,8 @@ module_init(cpufreq_interactive_init); static void __exit cpufreq_interactive_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_interactive); - kthread_stop(up_task); - put_task_struct(up_task); - destroy_workqueue(down_wq); + kthread_stop(speedchange_task); + put_task_struct(speedchange_task); } module_exit(cpufreq_interactive_exit); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c631f27a3dcc7..9d236fe4e2eb5 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -37,6 +37,10 @@ #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEFAULT_FREQ_BOOST_TIME (500000) +#define MAX_FREQ_BOOST_TIME (5000000) + +u64 freq_boosted_time; /* * The polling frequency of this governor depends on the capability of @@ -99,13 +103,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. + * dbs_mutex protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); -static struct workqueue_struct *kondemand_wq; - static struct dbs_tuners { unsigned int sampling_rate; unsigned int up_threshold; @@ -114,12 +115,17 @@ static struct dbs_tuners { unsigned int sampling_down_factor; unsigned int powersave_bias; unsigned int io_is_busy; + unsigned int boosted; + unsigned int freq_boost_time; + unsigned int boostfreq; } dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, .ignore_nice = 0, .powersave_bias = 0, + .freq_boost_time = DEFAULT_FREQ_BOOST_TIME, + .boostfreq = CONFIG_MSM_CPU_FREQ_MAX, }; static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, @@ -237,21 +243,12 @@ static void ondemand_powersave_bias_init(void) /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } -define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); /* cpufreq_ondemand Governor Tunables */ @@ -267,32 +264,9 @@ show_one(up_threshold, up_threshold); show_one(sampling_down_factor, sampling_down_factor); show_one(ignore_nice_load, ignore_nice); show_one(powersave_bias, powersave_bias); - -/*** delete after deprecation time ***/ - -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} -show_one_old(sampling_rate); -show_one_old(up_threshold); -show_one_old(ignore_nice_load); -show_one_old(powersave_bias); -show_one_old(sampling_rate_min); -show_one_old(sampling_rate_max); - -cpufreq_freq_attr_ro_old(sampling_rate_min); -cpufreq_freq_attr_ro_old(sampling_rate_max); - -/*** delete after deprecation time ***/ +show_one(boostpulse, boosted); +show_one(boosttime, freq_boost_time); +show_one(boostfreq, boostfreq); static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) @@ -302,11 +276,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - return count; } @@ -319,11 +289,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - - mutex_lock(&dbs_mutex); dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - return count; } @@ -338,11 +304,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - - mutex_lock(&dbs_mutex); dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -355,7 +317,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ @@ -364,8 +325,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->rate_mult = 1; } - mutex_unlock(&dbs_mutex); - return count; } @@ -384,9 +343,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - mutex_lock(&dbs_mutex); if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; @@ -401,8 +358,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } - mutex_unlock(&dbs_mutex); - return count; } @@ -419,11 +374,41 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, if (input > 1000) input = 1000; - mutex_lock(&dbs_mutex); dbs_tuners_ins.powersave_bias = input; ondemand_powersave_bias_init(); - mutex_unlock(&dbs_mutex); + return count; +} + + +static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned int input; + + ret = sscanf(buf, "%u", &input); + if (ret < 0) + return ret; + + if (input > 1 && input <= MAX_FREQ_BOOST_TIME) + dbs_tuners_ins.freq_boost_time = input; + else + dbs_tuners_ins.freq_boost_time = DEFAULT_FREQ_BOOST_TIME; + + dbs_tuners_ins.boosted = 1; + freq_boosted_time = ktime_to_us(ktime_get()); + return count; +} +static ssize_t store_boostfreq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.boostfreq = input; return count; } @@ -433,9 +418,10 @@ define_one_global_rw(up_threshold); define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); +define_one_global_rw(boostpulse); +define_one_global_rw(boostfreq); static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &up_threshold.attr, @@ -443,6 +429,8 @@ static struct attribute *dbs_attributes[] = { &ignore_nice_load.attr, &powersave_bias.attr, &io_is_busy.attr, + &boostpulse.attr, + &boostfreq.attr, NULL }; @@ -451,43 +439,6 @@ static struct attribute_group dbs_attr_group = { .name = "ondemand", }; -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} -write_one_old(sampling_rate); -write_one_old(up_threshold); -write_one_old(ignore_nice_load); -write_one_old(powersave_bias); - -cpufreq_freq_attr_rw_old(sampling_rate); -cpufreq_freq_attr_rw_old(up_threshold); -cpufreq_freq_attr_rw_old(ignore_nice_load); -cpufreq_freq_attr_rw_old(powersave_bias); - -static struct attribute *dbs_attributes_old[] = { - &sampling_rate_max_old.attr, - &sampling_rate_min_old.attr, - &sampling_rate_old.attr, - &up_threshold_old.attr, - &ignore_nice_load_old.attr, - &powersave_bias_old.attr, - NULL -}; - -static struct attribute_group dbs_attr_group_old = { - .attrs = dbs_attributes_old, - .name = "ondemand", -}; - -/*** delete after deprecation time ***/ - /************************** sysfs end ************************/ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) @@ -507,10 +458,21 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) struct cpufreq_policy *policy; unsigned int j; + unsigned int boostfreq; this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; - + /* Only core0 controls the boost */ + if (dbs_tuners_ins.boosted && policy->cpu == 0) { + if (ktime_to_us(ktime_get()) - freq_boosted_time >= + dbs_tuners_ins.freq_boost_time) { + dbs_tuners_ins.boosted = 0; + } + } + if (dbs_tuners_ins.boostfreq != 0) + boostfreq = dbs_tuners_ins.boostfreq; + else + boostfreq = policy->max; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency @@ -601,6 +563,13 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) return; } + /* check for frequency boost */ + if (dbs_tuners_ins.boosted && policy->cur < boostfreq) { + dbs_freq_increase(policy, boostfreq); + dbs_tuners_ins.boostfreq = policy->cur; + return; + } + /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) @@ -619,6 +588,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential); + if (dbs_tuners_ins.boosted && + freq_next < boostfreq) { + freq_next = boostfreq; + } /* No longer fully busy, reset rate_mult */ this_dbs_info->rate_mult = 1; @@ -644,12 +617,7 @@ static void do_dbs_timer(struct work_struct *work) unsigned int cpu = dbs_info->cpu; int sample_type = dbs_info->sample_type; - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; + int delay; mutex_lock(&dbs_info->timer_mutex); @@ -662,12 +630,22 @@ static void do_dbs_timer(struct work_struct *work) /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; } } else { __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; } - queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); + schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); } @@ -681,8 +659,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, - delay); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); } static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) @@ -730,12 +707,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_mutex); - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; @@ -788,7 +759,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); @@ -814,7 +784,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, static int __init cpufreq_gov_dbs_init(void) { - int err; cputime64_t wall; u64 idle_time; int cpu = get_cpu(); @@ -838,22 +807,12 @@ static int __init cpufreq_gov_dbs_init(void) MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); } - kondemand_wq = create_workqueue("kondemand"); - if (!kondemand_wq) { - printk(KERN_ERR "Creation of kondemand failed\n"); - return -EFAULT; - } - err = cpufreq_register_governor(&cpufreq_gov_ondemand); - if (err) - destroy_workqueue(kondemand_wq); - - return err; + return cpufreq_register_governor(&cpufreq_gov_ondemand); } static void __exit cpufreq_gov_dbs_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_ondemand); - destroy_workqueue(kondemand_wq); } diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index 7e2e515087f89..f13a8a9af6a13 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -15,9 +15,6 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) - static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) @@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->max, event); __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index e6db5faf3eb11..4c2eb512f2bc3 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -15,16 +15,13 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) - static int cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) { switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->min, event); __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c new file mode 100644 index 0000000000000..36480b8957421 --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartass2.c @@ -0,0 +1,868 @@ +/* + * drivers/cpufreq/cpufreq_smartass2.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * For a general overview of smartassV2 see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +#define DEFAULT_AWAKE_IDEAL_FREQ 768000 +static unsigned int awake_ideal_freq; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +#define DEFAULT_SLEEP_IDEAL_FREQ 245000 +static unsigned int sleep_ideal_freq; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +#define DEFAULT_RAMP_UP_STEP 256000 +static unsigned int ramp_up_step; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +#define DEFAULT_RAMP_DOWN_STEP 256000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 50 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 25 +static unsigned long min_cpu_load; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +#define DEFAULT_UP_RATE_US 48000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +#define DEFAULT_DOWN_RATE_US 99000; +static unsigned long down_rate_us; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 +static unsigned int sleep_wakeup_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + + +/*************** End of tunables ***************/ + + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int suspended; + +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + SMARTASS_DEBUG_JUMPS=1, + SMARTASS_DEBUG_LOAD=2, + SMARTASS_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +static int cpufreq_governor_smartass(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +static +#endif +struct cpufreq_governor cpufreq_gov_smartass2 = { + .name = "smartassV2", + .governor = cpufreq_governor_smartass, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max + policy->max > sleep_ideal_freq ? + (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; + } else { + this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max + policy->min < awake_ideal_freq ? + (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; + } +} + +inline static void smartass_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); + if (this_smartass->enable) + smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { + this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_smartass->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_smartass_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_smartass->cur_cpu_load = cpu_load; + this_smartass->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_smartass->ideal_speed || delta_idle == 0 || + cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_smartass->ideal_speed || + cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_smartass); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + pm_idle_old(); + + if (!timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); +} + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + this_smartass = &per_cpu(smartass_info, cpu); + if (!work_cpumask_test_and_clear(cpu)) + continue; + + ramp_dir = this_smartass->ramp_dir; + this_smartass->ramp_dir = 0; + + old_freq = this_smartass->old_freq; + policy = this_smartass->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_smartass->ideal_speed) + new_freq = this_smartass->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_smartass->ideal_speed) { + new_freq = this_smartass->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); + if (new_freq) + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_smartass); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + } +} + +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (!suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +define_global_rw_attr(debug_mask); +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartass", +}; + +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_smartass->cur_policy = new_policy; + + this_smartass->enable = 1; + + smartass_update_min_max(this_smartass,new_policy,suspended); + + this_smartass->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_smartass->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + smp_wmb(); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &smartass_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_LIMITS: + smartass_update_min_max(this_smartass,new_policy,suspended); + + if (this_smartass->cur_policy->cur > new_policy->max) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_smartass->cur_policy->cur < new_policy->min) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + smp_wmb(); + del_timer(&this_smartass->timer); + flush_work(&freq_scale_work); + this_smartass->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) <= 1) { + sysfs_remove_group(cpufreq_global_kobject, + &smartass_attr_group); + pm_idle = pm_idle_old; + } + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable) + return; + + smartass_update_min_max(this_smartass,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_smartass); +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + if (!suspended) // already not suspended so nothing to do + return; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + + spin_lock_init(&cpumask_lock); + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->cur_policy = 0; + this_smartass->ramp_dir = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + this_smartass->freq_change_time = 0; + this_smartass->freq_change_time_in_idle = 0; + this_smartass->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1); + down_wq = alloc_workqueue("ksmartass_down", 0, 1); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass2); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +fs_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass2); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor"); +MODULE_LICENSE ("GPL"); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index ca3f24c296a03..1957eee7549ee 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -59,7 +59,7 @@ static int cpufreq_stats_update(unsigned int cpu) cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); - if (stat->time_in_state) + if (stat->time_in_state && stat->last_index >= 0) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); @@ -159,23 +159,33 @@ static struct attribute_group stats_attr_group = { static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) { int index; - for (index = 0; index < stat->max_state; index++) - if (stat->freq_table[index] == freq) - return index; - return -1; + for (index = 0; index < stat->state_num; index++) + if (stat->freq_table[index] > freq) + break; + return index - 1; /* below lowest freq in table: return -1 */ } +/* should be called late in the CPU removal sequence so that the stats + * memory is still available in case someone tries to use it. + */ static void cpufreq_stats_free_table(unsigned int cpu) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (policy && policy->cpu == cpu) - sysfs_remove_group(&policy->kobj, &stats_attr_group); if (stat) { kfree(stat->time_in_state); kfree(stat); } per_cpu(cpufreq_stats_table, cpu) = NULL; +} + +/* must be called early in the CPU removal sequence (before + * cpufreq_remove_dev) so that policy is still valid. + */ +static void cpufreq_stats_free_sysfs(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (policy && policy->cpu == cpu) + sysfs_remove_group(&policy->kobj, &stats_attr_group); if (policy) cpufreq_cpu_put(policy); } @@ -183,7 +193,7 @@ static void cpufreq_stats_free_table(unsigned int cpu) static int cpufreq_stats_create_table(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { - unsigned int i, j, count = 0, ret = 0; + unsigned int i, j, k, l, count = 0, ret = 0; struct cpufreq_stats *stat; struct cpufreq_policy *data; unsigned int alloc_size; @@ -235,8 +245,16 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) continue; - if (freq_table_get_index(stat, freq) == -1) - stat->freq_table[j++] = freq; + + /* Insert in sorted stat->freq_table */ + for (k = 0; k < j && stat->freq_table[k] < freq; k++) + ; + if (stat->freq_table[k] == freq) + continue; + for (l = j; l > k; l--) + stat->freq_table[l] = stat->freq_table[l - 1]; + stat->freq_table[k] = freq; + j++; } stat->state_num = j; spin_lock(&cpufreq_stats_lock); @@ -289,16 +307,15 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, new_index = freq_table_get_index(stat, freq->new); cpufreq_stats_update(freq->cpu); - if (old_index == new_index) - return 0; - if (old_index == -1 || new_index == -1) + if (old_index == new_index) return 0; spin_lock(&cpufreq_stats_lock); stat->last_index = new_index; #ifdef CONFIG_CPU_FREQ_STAT_DETAILS - stat->trans_table[old_index * stat->max_state + new_index]++; + if (old_index >= 0 && new_index >= 0) + stat->trans_table[old_index * stat->max_state + new_index]++; #endif stat->total_trans++; spin_unlock(&cpufreq_stats_lock); @@ -339,6 +356,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: + cpufreq_stats_free_sysfs(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); break; case CPU_DOWN_FAILED: @@ -349,8 +370,8 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = -{ +/* priority=1 so this will get called before cpufreq_remove_dev */ +static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { .notifier_call = cpufreq_stat_cpu_callback, .priority = 1, }; @@ -399,6 +420,7 @@ static void __exit cpufreq_stats_exit(void) unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); for_each_online_cpu(cpu) { cpufreq_stats_free_table(cpu); + cpufreq_stats_free_sysfs(cpu); } } diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 66d2d1d6c80f1..f231015904c0a 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); static DEFINE_MUTEX(userspace_mutex); static int cpus_using_userspace_governor; -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) - /* keep track of frequency transitions */ static int userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", + pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); per_cpu(cpu_cur_freq, freq->cpu) = freq->new; @@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); + pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); mutex_lock(&userspace_mutex); if (!per_cpu(cpu_is_managed, policy->cpu)) @@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_max_freq, cpu) = policy->max; per_cpu(cpu_cur_freq, cpu) = policy->cur; per_cpu(cpu_set_freq, cpu) = policy->cur; - dprintk("managing cpu %u started " + pr_debug("managing cpu %u started " "(%u - %u kHz, currently %u kHz)\n", cpu, per_cpu(cpu_min_freq, cpu), @@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_min_freq, cpu) = 0; per_cpu(cpu_max_freq, cpu) = 0; per_cpu(cpu_set_freq, cpu) = 0; - dprintk("managing cpu %u stopped\n", cpu); + pr_debug("managing cpu %u stopped\n", cpu); mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&userspace_mutex); - dprintk("limit event for cpu %u: %u - %u kHz, " + pr_debug("limit event for cpu %u: %u - %u kHz, " "currently %u kHz, last set to %u kHz\n", cpu, policy->min, policy->max, per_cpu(cpu_cur_freq, cpu), diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 05432216e2246..90431cb92804b 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -14,9 +14,6 @@ #include #include -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { - dprintk("table entry %u is invalid, skipping\n", i); + pr_debug("table entry %u is invalid, skipping\n", i); continue; } - dprintk("table entry %u: %u kHz, %u index\n", + pr_debug("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; @@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, unsigned int i; unsigned int count = 0; - dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", + pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) @@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - dprintk("verification lead to (%u - %u kHz) for cpu %u\n", + pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; @@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, }; unsigned int i; - dprintk("request for target %u kHz (relation: %u) for cpu %u\n", + pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { @@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } else *index = optimal.index; - dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, + pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; @@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { - dprintk("setting show_table for cpu %u to %p\n", cpu, table); + pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { - dprintk("clearing show_table for cpu %u\n", cpu); + pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f508690eb9585..c47f3d09c1eeb 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev) unsigned int power_usage = -1; int i; int multiplier; + struct timespec t; if (data->needs_update) { menu_update(dev); @@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev) return 0; /* determine the expected residency time, round up */ + t = ktime_to_timespec(tick_nohz_get_sleep_length()); data->expected_us = - DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); + t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; data->bucket = which_bucket(data->expected_us); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0310ffaec9df0..be7917ec40c9f 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = { .release = cpuidle_state_sysfs_release, }; -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) { kobject_put(&device->kobjs[i]->kobj); wait_for_completion(&device->kobjs[i]->kobj_unregister); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 23e03554f0d3a..7e0e66037e025 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2765,7 +2765,7 @@ static int __init amd64_edac_init(void) mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL); ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); if (!(mcis && ecc_stngs)) - goto err_ret; + goto err_free; msrs = msrs_alloc(); if (!msrs) diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 05523b504271f..76d1f576cdc87 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci; #define AMBPRESENT_0 0x64 #define AMBPRESENT_1 0x66 -const static u16 mtr_regs[MAX_SLOTS] = { +static const u16 mtr_regs[MAX_SLOTS] = { 0x80, 0x84, 0x88, 0x8c, 0x82, 0x86, 0x8a, 0x8e }; diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index bd3c61b6dd8d5..6a788c3070eab 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2163,7 +2163,6 @@ static int ohci_set_config_rom(struct fw_card *card, { struct fw_ohci *ohci; unsigned long flags; - int ret = -EBUSY; __be32 *next_config_rom; dma_addr_t uninitialized_var(next_config_rom_bus); @@ -2204,22 +2203,37 @@ static int ohci_set_config_rom(struct fw_card *card, spin_lock_irqsave(&ohci->lock, flags); + /* + * If there is not an already pending config_rom update, + * push our new allocation into the ohci->next_config_rom + * and then mark the local variable as null so that we + * won't deallocate the new buffer. + * + * OTOH, if there is a pending config_rom update, just + * use that buffer with the new config_rom data, and + * let this routine free the unused DMA allocation. + */ + if (ohci->next_config_rom == NULL) { ohci->next_config_rom = next_config_rom; ohci->next_config_rom_bus = next_config_rom_bus; + next_config_rom = NULL; + } - copy_config_rom(ohci->next_config_rom, config_rom, length); + copy_config_rom(ohci->next_config_rom, config_rom, length); - ohci->next_header = config_rom[0]; - ohci->next_config_rom[0] = 0; + ohci->next_header = config_rom[0]; + ohci->next_config_rom[0] = 0; - reg_write(ohci, OHCI1394_ConfigROMmap, - ohci->next_config_rom_bus); - ret = 0; - } + reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); spin_unlock_irqrestore(&ohci->lock, flags); + /* If we didn't use the DMA allocation, delete it. */ + if (next_config_rom != NULL) + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + next_config_rom, next_config_rom_bus); + /* * Now initiate a bus reset to have the changes take * effect. We clean up the old config rom memory and DMA @@ -2227,13 +2241,10 @@ static int ohci_set_config_rom(struct fw_card *card, * controller could need to access it before the bus reset * takes effect. */ - if (ret == 0) - fw_schedule_bus_reset(&ohci->card, true, true); - else - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - next_config_rom, next_config_rom_bus); - return ret; + fw_schedule_bus_reset(&ohci->card, true, true); + + return 0; } static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 69ad529d92fbb..ea5ac2dc12337 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) } /* generate SMI */ + /* inb to force posted write through and make SMI happen now */ asm volatile ( - "outb %b0,%w1" + "outb %b0,%w1\n" + "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index cc9277885dd07..01cef64bf4d29 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1 +1,2 @@ -obj-y += drm/ vga/ stub/ +obj-y += drm/ vga/ stub/ ion/ +obj-$(CONFIG_MSM_KGSL) += msm/ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 0902d44600394..4b4b5455b00ff 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -24,6 +24,7 @@ config DRM_KMS_HELPER depends on DRM select FB select FRAMEBUFFER_CONSOLE if !EXPERT + select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE help FB and CRTC helpers for KMS drivers. diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 654faa803dcbc..6a5371b8fe9af 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, uint32_t __user *encoder_id; struct drm_mode_group *mode_group; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); /* @@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, struct drm_mode_object *obj; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, @@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, uint64_t __user *prop_values; uint32_t __user *encoder_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); @@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_encoder *encoder; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); @@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, int ret = 0; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); @@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; @@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_ERROR("mode new framebuffer width not within limits\n"); return -EINVAL; @@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, int ret = 0; int found = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we realy get a framebuffer back. */ @@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, int num_clips; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, uint64_t __user *values_ptr; uint32_t __user *blob_length_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { @@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, int ret = 0; void *blob_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { @@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, int ret = -EINVAL; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ea1c4b019ebf9..c3c78eefc538d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; - mutex_lock(&obj->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); drm_vm_close_locked(vma); drm_gem_object_unreference(obj); - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 28d1d3c24d65e..1d4afbc3c8fa2 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -942,11 +942,34 @@ EXPORT_SYMBOL(drm_vblank_put); void drm_vblank_off(struct drm_device *dev, int crtc) { + struct drm_pending_vblank_event *e, *t; + struct timeval now; unsigned long irqflags; + unsigned int seq; spin_lock_irqsave(&dev->vbl_lock, irqflags); vblank_disable_and_save(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); + + /* Send any queued vblank events, lest the natives grow disquiet */ + seq = drm_vblank_count_and_time(dev, crtc, &now); + list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { + if (e->pipe != crtc) + continue; + DRM_DEBUG("Sending premature vblank event on disable: \ + wanted %d, current %d\n", + e->event.sequence, seq); + + e->event.sequence = seq; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + drm_vblank_put(dev, e->pipe); + list_move_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_off); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 85da4c40694cc..2eee8e016b385 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device, { struct drm_connector *connector = to_drm_connector(device); enum drm_connector_status status; + int ret; + + ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex); + if (ret) + return ret; status = connector->funcs->detect(connector, true); + mutex_unlock(&connector->dev->mode_config.mutex); + return snprintf(buf, PAGE_SIZE, "%s\n", drm_get_connector_status_name(status)); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 36e66cc5225eb..382ae273519b0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -56,9 +56,7 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); static int i915_gem_inactive_shrink(struct shrinker *shrinker, - int nr_to_scan, - gfp_t gfp_mask); - + struct shrink_control *sc); /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, @@ -1749,8 +1747,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) return; spin_lock(&file_priv->mm.lock); - list_del(&request->client_list); - request->file_priv = NULL; + if (request->file_priv) { + list_del(&request->client_list); + request->file_priv = NULL; + } spin_unlock(&file_priv->mm.lock); } @@ -4062,9 +4062,7 @@ i915_gpu_is_active(struct drm_device *dev) } static int -i915_gem_inactive_shrink(struct shrinker *shrinker, - int nr_to_scan, - gfp_t gfp_mask) +i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, @@ -4072,6 +4070,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj, *next; + int nr_to_scan = sc->nr_to_scan; int cnt; if (!mutex_trylock(&dev->struct_mutex)) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 50ab1614571c7..ded73a6007a8f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -388,6 +388,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && in_atomic()) + return -EFAULT; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) return ret; @@ -461,15 +465,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, struct list_head *objects) { struct drm_i915_gem_object *obj; - int ret; - + int ret = 0; + + /* This is the fast path and we cannot handle a pagefault whilst + * holding the struct mutex lest the user pass in the relocations + * contained within a mmaped bo. For in such a case we, the page + * fault handler would call i915_gem_fault() and we would try to + * acquire the struct mutex again. Obviously this is bad and so + * lockdep complains vehemently. + */ + pagefault_disable(); list_for_each_entry(obj, objects, exec_list) { ret = i915_gem_execbuffer_relocate_object(obj, eb); if (ret) - return ret; + break; } + pagefault_enable(); - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8a9e08bf1cf74..2347bc16d7fbe 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1377,7 +1377,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); + + /* maintain vblank delivery even in deep C-states */ + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + return 0; } @@ -1390,6 +1395,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->info->gen == 3) + I915_WRITE(INSTPM, + INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); + if (HAS_PCH_SPLIT(dev)) ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2abe240dae583..12c547a5ca13e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -405,9 +405,12 @@ #define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define INSTPM_SELF_EN (1<<12) /* 915GM only */ +#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts + will not assert AGPBUSY# and will only + be delivered when out of C3. */ #define ACTHD 0x020c8 #define FW_BLC 0x020d8 -#define FW_BLC2 0x020dc +#define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ #define FW_BLC_SELF_EN_MASK (1<<31) #define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 49fb54fd9a187..841558bc91cc2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5630,36 +5630,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return ret; } -static void intel_crtc_reset(struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - /* Reset flags back to the 'unknown' status so that they - * will be correctly set on the initial modeset. - */ - intel_crtc->dpms_mode = -1; -} - -static struct drm_crtc_helper_funcs intel_helper_funcs = { - .dpms = intel_crtc_dpms, - .mode_fixup = intel_crtc_mode_fixup, - .mode_set = intel_crtc_mode_set, - .mode_set_base = intel_pipe_set_base, - .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, - .disable = intel_crtc_disable, -}; - -static const struct drm_crtc_funcs intel_crtc_funcs = { - .reset = intel_crtc_reset, - .cursor_set = intel_crtc_cursor_set, - .cursor_move = intel_crtc_cursor_move, - .gamma_set = intel_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, - .destroy = intel_crtc_destroy, - .page_flip = intel_crtc_page_flip, -}; - static void intel_sanitize_modesetting(struct drm_device *dev, int pipe, int plane) { @@ -5710,6 +5680,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev, } } +static void intel_crtc_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* Reset flags back to the 'unknown' status so that they + * will be correctly set on the initial modeset. + */ + intel_crtc->dpms_mode = -1; + + /* We need to fix up any BIOS configuration that conflicts with + * our expectations. + */ + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); +} + +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .dpms = intel_crtc_dpms, + .mode_fixup = intel_crtc_mode_fixup, + .mode_set = intel_crtc_mode_set, + .mode_set_base = intel_pipe_set_base, + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, +}; + +static const struct drm_crtc_funcs intel_crtc_funcs = { + .reset = intel_crtc_reset, + .cursor_set = intel_crtc_cursor_set, + .cursor_move = intel_crtc_cursor_move, + .gamma_set = intel_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, + .destroy = intel_crtc_destroy, + .page_flip = intel_crtc_page_flip, +}; + static void intel_crtc_init(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -5759,8 +5765,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, (unsigned long)intel_crtc); - - intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -6001,8 +6005,10 @@ intel_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) + if (!intel_fb) { + drm_gem_object_unreference_unlocked(&obj->base); return ERR_PTR(-ENOMEM); + } ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 51cb4e36997f7..8f3a02bc1cf5f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1455,7 +1455,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + struct drm_crtc *crtc = intel_dp->base.base.crtc; + /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -1470,7 +1471,19 @@ intel_dp_link_down(struct intel_dp *intel_dp) /* Changes to enable or select take place the vblank * after being written. */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + if (crtc == NULL) { + /* We can arrive here never having been attached + * to a CRTC, for instance, due to inheriting + * random state from the BIOS. + * + * If the pipe is not running, play safe and + * wait for the clocks to stabilise before + * continuing. + */ + POSTING_READ(intel_dp->output_reg); + msleep(50); + } else + intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bcdba7bd5cfaf..b902192c4647c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -540,6 +540,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = dev_priv->int_lvds_connector; + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + return NOTIFY_OK; + /* * check and update the status of LVDS connector after receiving * the LID nofication event. diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 445f27efe677f..bd087df02b8e5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -684,12 +684,37 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) static bool bsd_ring_get_irq(struct intel_ring_buffer *ring) { - return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev->irq_enabled) + return false; + + spin_lock(&ring->irq_lock); + if (ring->irq_refcount++ == 0) { + if (IS_G4X(dev)) + i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); + else + ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); + } + spin_unlock(&ring->irq_lock); + + return true; } static void bsd_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_BSD_USER_INTERRUPT); + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + spin_lock(&ring->irq_lock); + if (--ring->irq_refcount == 0) { + if (IS_G4X(dev)) + i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); + else + ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); + } + spin_unlock(&ring->irq_lock); } static int diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index fe4a53a50b833..65edb227ae811 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1380,7 +1380,9 @@ intel_tv_detect(struct drm_connector *connector, bool force) if (type < 0) return connector_status_disconnected; + intel_tv->type = type; intel_tv_find_better_format(connector); + return connector_status_connected; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 60769d2f9a668..7826be0653e19 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info) OUT_RING (chan, 0); } - nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff); FIRE_RING(chan); mutex_unlock(&chan->mutex); ret = -EBUSY; for (i = 0; i < 100000; i++) { - if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { + if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) { ret = 0; break; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 258fa5e7a2d9a..7bd7456890974 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -32,6 +32,7 @@ #include "atom.h" #include "atom-names.h" #include "atom-bits.h" +#include "radeon.h" #define ATOM_COND_ABOVE 0 #define ATOM_COND_ABOVEOREQUAL 1 @@ -101,7 +102,9 @@ static void debug_print_spaces(int n) static uint32_t atom_iio_execute(struct atom_context *ctx, int base, uint32_t index, uint32_t data) { + struct radeon_device *rdev = ctx->card->dev->dev_private; uint32_t temp = 0xCDCDCDCD; + while (1) switch (CU8(base)) { case ATOM_IIO_NOP: @@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base += 3; break; case ATOM_IIO_WRITE: - (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); + if (rdev->family == CHIP_RV515) + (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; @@ -131,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_INDEX: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((index >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -141,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_DATA: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((data >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -151,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_ATTR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((ctx-> io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a4e5e53e0a627..bede31c6571bd 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); } else if (a2 > a1) { - args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); - args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); + args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); + args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); } break; case RMX_FULL: @@ -531,6 +531,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + + if (rdev->family < CHIP_RV770) + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; } else { pll->flags |= RADEON_PLL_LEGACY; @@ -559,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (ss_enabled) { if (ss->refdiv) { - pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; pll->flags |= RADEON_PLL_USE_REF_DIV; pll->reference_div = ss->refdiv; if (ASIC_IS_AVIVO(rdev)) @@ -957,7 +959,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - if (ASIC_IS_AVIVO(rdev)) + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + /* TV seems to prefer the legacy algo on some boards */ + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 6140ea1de45a6..5d6774ab77284 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -869,9 +869,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) SYSTEM_ACCESS_MODE_NOT_IN_SYS | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); - WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if (rdev->flags & RADEON_IS_IGP) { + WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp); + } else { + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + } WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); @@ -1579,7 +1585,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) u32 sq_stack_resource_mgmt_2; u32 sq_stack_resource_mgmt_3; u32 vgt_cache_invalidation; - u32 hdp_host_path_cntl; + u32 hdp_host_path_cntl, tmp; int i, j, num_shader_engines, ps_thread_count; switch (rdev->family) { @@ -2139,6 +2145,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) WREG32(i, 0); + tmp = RREG32(HDP_MISC_CNTL); + tmp |= HDP_FLUSH_INVALIDATE_CACHE; + WREG32(HDP_MISC_CNTL, tmp); + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); @@ -2930,11 +2940,6 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* XXX: ontario has problems blitting to gart at the moment */ - if (rdev->family == CHIP_PALM) { - rdev->asic->copy = NULL; - radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - } /* allocate wb buffer */ r = radeon_wb_init(rdev); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index eb4acf4528ff4..621d61c3cfb9c 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -64,6 +64,8 @@ #define GB_BACKEND_MAP 0x98FC #define DMIF_ADDR_CONFIG 0xBD4 #define HDP_ADDR_CONFIG 0x2F48 +#define HDP_MISC_CNTL 0x2F4C +#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0) #define CC_SYS_RB_BACKEND_DISABLE 0x3F88 #define GC_USER_RB_BACKEND_DISABLE 0x9B7C @@ -221,6 +223,11 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C + +#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C +#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 +#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664 + #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 793c5e6026ad7..04152b7c0dd6b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -782,6 +782,7 @@ static struct radeon_asic evergreen_asic = { .hpd_fini = &evergreen_hpd_fini, .hpd_sense = &evergreen_hpd_sense, .hpd_set_polarity = &evergreen_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .pm_misc = &evergreen_pm_misc, .pm_prepare = &evergreen_pm_prepare, @@ -828,6 +829,7 @@ static struct radeon_asic sumo_asic = { .hpd_fini = &evergreen_hpd_fini, .hpd_sense = &evergreen_hpd_sense, .hpd_set_polarity = &evergreen_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .pm_misc = &evergreen_pm_misc, .pm_prepare = &evergreen_pm_prepare, @@ -874,6 +876,8 @@ static struct radeon_asic btc_asic = { .hpd_fini = &evergreen_hpd_fini, .hpd_sense = &evergreen_hpd_sense, .hpd_set_polarity = &evergreen_hpd_set_polarity, + .ioctl_wait_idle = r600_ioctl_wait_idle, + .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .pm_misc = &evergreen_pm_misc, .pm_prepare = &evergreen_pm_prepare, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 02d5c415f4993..ad11a057a9f25 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop (Acer TravelMate 5730G) has an HDMI port + /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, @@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ - if ((dev->pdev->device == 0x95c4) && + if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && @@ -675,7 +675,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ATOM_ENCODER_CAP_RECORD *cap_record; u16 caps = 0; - while (record->ucRecordType > 0 && + while (record->ucRecordSize > 0 && + record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_ENCODER_CAP_RECORD_TYPE: @@ -720,7 +721,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) break; } - while (record->ucRecordType > 0 && + while (record->ucRecordSize > 0 && + record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: @@ -782,10 +784,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; - while (record->ucRecordType > 0 - && record-> - ucRecordType <= - ATOM_MAX_OBJECT_RECORD_NUMBER) { + while (record->ucRecordSize > 0 && + record->ucRecordType > 0 && + record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = @@ -1573,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; - u8 *record = (u8 *)(mode_info->atom_context->bios + - data_offset + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + u8 *record; + + if ((frev == 1) && (crev < 2)) + /* absolute */ + record = (u8 *)(mode_info->atom_context->bios + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + else + /* relative */ + record = (u8 *)(mode_info->atom_context->bios + + data_offset + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: @@ -1598,9 +1607,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) + if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; - else + rdev->mode_info.bios_hardcoded_edid_size = edid_size; + } else kfree(edid); } } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index cf7c8d5b4ec24..cf602e2d0718e 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { - int edid_info; + int edid_info, size; struct edid *edid; unsigned char *raw; edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); @@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) return false; raw = rdev->bios + edid_info; - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); + size = EDID_LENGTH * (raw[0x7e] + 1); + edid = kmalloc(size, GFP_KERNEL); if (edid == NULL) return false; - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); + memcpy((unsigned char *)edid, raw, size); if (!drm_edid_is_valid(edid)) { kfree(edid); @@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) } rdev->mode_info.bios_hardcoded_edid = edid; + rdev->mode_info.bios_hardcoded_edid_size = size; return true; } @@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) struct edid * radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { - if (rdev->mode_info.bios_hardcoded_edid) - return rdev->mode_info.bios_hardcoded_edid; + struct edid *edid; + + if (rdev->mode_info.bios_hardcoded_edid) { + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); + if (edid) { + memcpy((unsigned char *)edid, + (unsigned char *)rdev->mode_info.bios_hardcoded_edid, + rdev->mode_info.bios_hardcoded_edid_size); + return edid; + } + } return NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 22b7e3dc0eca4..d83338b11cdb0 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -629,6 +629,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; @@ -679,6 +681,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); + + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the CRT is connected and use that EDID. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + ret = connector_status_connected; + } + radeon_connector_update_scratch_regs(connector, ret); return ret; } @@ -790,6 +803,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; @@ -829,8 +844,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { - struct drm_device *dev = connector->dev; - struct radeon_device *rdev = dev->dev_private; struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { @@ -895,6 +908,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); } + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the DFP is connected and use that EDID. In most + * cases the DVI port is actually a virtual KVM port connected to the service + * processor. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + radeon_connector->use_digital = true; + ret = connector_status_connected; + } + out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 171b0b2e3a644..2b0ee623cd319 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -80,7 +80,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; else scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; - seq = rdev->wb.wb[scratch_index/4]; + seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); } else seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8387d32caaa76..a5f463bbc4702 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -205,6 +205,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /* return clock value in KHz */ value = rdev->clock.spll.reference_freq * 10; break; + case RADEON_INFO_FUSION_GART_WORKING: + value = 1; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index a670caaee29e5..8c134db007d9b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -239,6 +239,7 @@ struct radeon_mode_info { struct drm_property *underscan_vborder_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; + int bios_hardcoded_edid_size; /* pointer to fbdev info structure */ struct radeon_fbdev *rfbdev; diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 06e79822a2bff..d6edfeb6f3411 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) void radeon_ring_free_size(struct radeon_device *rdev) { if (rdev->wb.enabled) - rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; + rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); else { if (rdev->family >= CHIP_R600) rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b1e02fffd3ccd..7f730f8262034 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -394,12 +394,14 @@ static int ttm_pool_get_num_unused_pages(void) /** * Callback for mm to request pool to reduce number of page held. */ -static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) +static int ttm_pool_mm_shrink(struct shrinker *shrink, + struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned i; unsigned pool_offset = atomic_add_return(1, &start_pool); struct ttm_page_pool *pool; + int shrink_pages = sc->nr_to_scan; pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 0000000000000..f4affe413f5c4 --- /dev/null +++ b/drivers/gpu/ion/Kconfig @@ -0,0 +1,17 @@ +menuconfig ION + tristate "Ion Memory Manager" + select GENERIC_ALLOCATOR + help + Chose this option to enable the ION Memory Manager. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + +config ION_MSM + tristate "Ion for MSM" + depends on ARCH_MSM && ION + help + Choose this option if you wish to use ion on an MSM target. diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 0000000000000..c9e8a944052e9 --- /dev/null +++ b/drivers/gpu/ion/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o +obj-$(CONFIG_ION_TEGRA) += tegra/ +obj-$(CONFIG_ION_MSM) += msm/ diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 0000000000000..060c71fc5fbdf --- /dev/null +++ b/drivers/gpu/ion/ion.c @@ -0,0 +1,1775 @@ +/* + * drivers/gpu/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "ion_priv.h" +#define DEBUG + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @lock: lock protecting the buffers & heaps trees + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex lock; + struct rb_root heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root user_clients; + struct rb_root kernel_clients; + struct dentry *debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @ref: for reference counting the client + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @lock: lock protecting the tree of handles + * @heap_mask: mask of all supported heaps + * @name: used for debugging + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct mutex lock; + unsigned int heap_mask; + char *name; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @dmap_cnt: count of times this client has mapped for dma + * @usermap_cnt: count of times this client has mapped for userspace + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + unsigned int dmap_cnt; + unsigned int usermap_cnt; + unsigned int iommu_map_cnt; +}; + +static void ion_iommu_release(struct kref *kref); + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +static void ion_iommu_add(struct ion_buffer *buffer, + struct ion_iommu_map *iommu) +{ + struct rb_node **p = &buffer->iommu_maps.rb_node; + struct rb_node *parent = NULL; + struct ion_iommu_map *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_iommu_map, node); + + if (iommu->key < entry->key) { + p = &(*p)->rb_left; + } else if (iommu->key > entry->key) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer %p already has mapping for domain %d" + " and partition %d\n", __func__, + buffer, + iommu_map_domain(iommu), + iommu_map_partition(iommu)); + BUG(); + } + } + + rb_link_node(&iommu->node, parent, p); + rb_insert_color(&iommu->node, &buffer->iommu_maps); + +} + +static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer, + unsigned int domain_no, + unsigned int partition_no) +{ + struct rb_node **p = &buffer->iommu_maps.rb_node; + struct rb_node *parent = NULL; + struct ion_iommu_map *entry; + uint64_t key = domain_no; + key = key << 32 | partition_no; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_iommu_map, node); + + if (key < entry->key) + p = &(*p)->rb_left; + else if (key > entry->key) + p = &(*p)->rb_right; + else + return entry; + } + + return NULL; +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + int ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + if (ret) { + kfree(buffer); + return ERR_PTR(ret); + } + buffer->dev = dev; + buffer->size = len; + buffer->flags = flags; + mutex_init(&buffer->lock); + ion_buffer_add(dev, buffer); + return buffer; +} + +/** + * Check for delayed IOMMU unmapping. Also unmap any outstanding + * mappings which would otherwise have been leaked. + */ +static void ion_iommu_delayed_unmap(struct ion_buffer *buffer) +{ + struct ion_iommu_map *iommu_map; + struct rb_node *node; + const struct rb_root *rb = &(buffer->iommu_maps); + unsigned long ref_count; + unsigned int delayed_unmap; + + mutex_lock(&buffer->lock); + + while ((node = rb_first(rb)) != 0) { + iommu_map = rb_entry(node, struct ion_iommu_map, node); + ref_count = atomic_read(&iommu_map->ref.refcount); + delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED; + + if ((delayed_unmap && ref_count > 1) || !delayed_unmap) { + pr_err("%s: Virtual memory address leak in domain %u, partition %u\n", + __func__, iommu_map->domain_info[DI_DOMAIN_NUM], + iommu_map->domain_info[DI_PARTITION_NUM]); + } + /* set ref count to 1 to force release */ + kref_init(&iommu_map->ref); + kref_put(&iommu_map->ref, ion_iommu_release); + } + + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_device *dev = buffer->dev; + + ion_iommu_delayed_unmap(buffer); + buffer->heap->ops->free(buffer); + mutex_lock(&dev->lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->lock); + kfree(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, ion_buffer_destroy); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + rb_init_node(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + handle->buffer = buffer; + + return handle; +} + +/* Client lock must be locked when calling */ +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + /* XXX Can a handle be destroyed while it's map count is non-zero?: + if (handle->map_cnt) unmap + */ + WARN_ON(handle->kmap_cnt || handle->dmap_cnt || handle->usermap_cnt); + ion_buffer_put(handle->buffer); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &handle->client->handles); + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + return kref_put(&handle->ref, ion_handle_destroy); +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n; + + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + if (handle->buffer == buffer) + return handle; + } + return NULL; +} + +static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *handle_node = rb_entry(n, struct ion_handle, + node); + if (handle < handle_node) + n = n->rb_left; + else if (handle > handle_node) + n = n->rb_right; + else + return true; + } + return false; +} + +static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle < entry) + p = &(*p)->rb_left; + else if (handle > entry) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int flags) +{ + struct rb_node *n; + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + unsigned long secure_allocation = flags & ION_SECURE; + const unsigned int MAX_DBG_STR_LEN = 64; + char dbg_str[MAX_DBG_STR_LEN]; + unsigned int dbg_str_idx = 0; + + dbg_str[0] = '\0'; + + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + /* if the client doesn't support this heap type */ + if (!((1 << heap->type) & client->heap_mask)) + continue; + /* if the caller didn't specify this heap type */ + if (!((1 << heap->id) & flags)) + continue; + /* Do not allow un-secure heap if secure is specified */ + if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR_OR_NULL(buffer)) + break; + if (dbg_str_idx < MAX_DBG_STR_LEN) { + unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; + int ret_value = snprintf(&dbg_str[dbg_str_idx], + len_left, "%s ", heap->name); + if (ret_value >= len_left) { + /* overflow */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + dbg_str_idx = MAX_DBG_STR_LEN; + } else if (ret_value >= 0) { + dbg_str_idx += ret_value; + } else { + /* error */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + } + } + } + mutex_unlock(&dev->lock); + + if (IS_ERR_OR_NULL(buffer)) { + pr_debug("ION is unable to allocate 0x%x bytes (alignment: " + "0x%x) from heap(s) %sfor client %s with heap " + "mask 0x%x\n", + len, align, dbg_str, client->name, client->heap_mask); + return ERR_PTR(PTR_ERR(buffer)); + } + + handle = ion_handle_create(client, buffer); + + if (IS_ERR_OR_NULL(handle)) + goto end; + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + mutex_lock(&client->lock); + ion_handle_add(client, handle); + mutex_unlock(&client->lock); + return handle; + +end: + ion_buffer_put(buffer); + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + mutex_unlock(&client->lock); + WARN("%s: invalid handle passed to free.\n", __func__); + return; + } + ion_handle_put(handle); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_free); + +static void ion_client_get(struct ion_client *client); +static int ion_client_put(struct ion_client *client); + +static bool _ion_map(int *buffer_cnt, int *handle_cnt) +{ + bool map; + + BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); + + if (*buffer_cnt) + map = false; + else + map = true; + if (*handle_cnt == 0) + (*buffer_cnt)++; + (*handle_cnt)++; + return map; +} + +static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) +{ + BUG_ON(*handle_cnt == 0); + (*handle_cnt)--; + if (*handle_cnt != 0) + return false; + BUG_ON(*buffer_cnt == 0); + (*buffer_cnt)--; + if (*buffer_cnt == 0) + return true; + return false; +} + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} +EXPORT_SYMBOL(ion_phys); + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, + unsigned long flags) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer, + flags); + if (IS_ERR_OR_NULL(vaddr)) + _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); + buffer->vaddr = vaddr; + } else { + vaddr = buffer->vaddr; + } + + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer, + int domain_num, int partition_num, unsigned long align, + unsigned long iova_length, unsigned long flags, + unsigned long *iova) +{ + struct ion_iommu_map *data; + int ret; + + data = kmalloc(sizeof(*data), GFP_ATOMIC); + + if (!data) + return ERR_PTR(-ENOMEM); + + data->buffer = buffer; + iommu_map_domain(data) = domain_num; + iommu_map_partition(data) = partition_num; + + ret = buffer->heap->ops->map_iommu(buffer, data, + domain_num, + partition_num, + align, + iova_length, + flags); + + if (ret) + goto out; + + kref_init(&data->ref); + *iova = data->iova_addr; + + ion_iommu_add(buffer, data); + + return data; + +out: + kfree(data); + return ERR_PTR(ret); +} + +int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, + int domain_num, int partition_num, unsigned long align, + unsigned long iova_length, unsigned long *iova, + unsigned long *buffer_size, + unsigned long flags, unsigned long iommu_flags) +{ + struct ion_buffer *buffer; + struct ion_iommu_map *iommu_map; + int ret = 0; + + if (ION_IS_CACHED(flags)) { + pr_err("%s: Cannot map iommu as cached.\n", __func__); + return -EINVAL; + } + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_iommu) { + pr_err("%s: map_iommu is not implemented by this heap.\n", + __func__); + ret = -ENODEV; + goto out; + } + + /* + * If clients don't want a custom iova length, just use whatever + * the buffer size is + */ + if (!iova_length) + iova_length = buffer->size; + + if (buffer->size > iova_length) { + pr_debug("%s: iova length %lx is not at least buffer size" + " %x\n", __func__, iova_length, buffer->size); + ret = -EINVAL; + goto out; + } + + if (buffer->size & ~PAGE_MASK) { + pr_debug("%s: buffer size %x is not aligned to %lx", __func__, + buffer->size, PAGE_SIZE); + ret = -EINVAL; + goto out; + } + + if (iova_length & ~PAGE_MASK) { + pr_debug("%s: iova_length %lx is not aligned to %lx", __func__, + iova_length, PAGE_SIZE); + ret = -EINVAL; + goto out; + } + + iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); + _ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); + if (!iommu_map) { + iommu_map = __ion_iommu_map(buffer, domain_num, partition_num, + align, iova_length, flags, iova); + if (IS_ERR_OR_NULL(iommu_map)) { + _ion_unmap(&buffer->iommu_map_cnt, + &handle->iommu_map_cnt); + } else { + iommu_map->flags = iommu_flags; + + if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED) + kref_get(&iommu_map->ref); + } + } else { + if (iommu_map->flags != iommu_flags) { + pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n", + __func__, handle, + iommu_map->flags, iommu_flags); + _ion_unmap(&buffer->iommu_map_cnt, + &handle->iommu_map_cnt); + ret = -EINVAL; + } else if (iommu_map->mapped_size != iova_length) { + pr_err("%s: handle %p is already mapped with length" + " %x, trying to map with length %lx\n", + __func__, handle, iommu_map->mapped_size, + iova_length); + _ion_unmap(&buffer->iommu_map_cnt, + &handle->iommu_map_cnt); + ret = -EINVAL; + } else { + kref_get(&iommu_map->ref); + *iova = iommu_map->iova_addr; + } + } + *buffer_size = buffer->size; +out: + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ret; +} +EXPORT_SYMBOL(ion_map_iommu); + +static void ion_iommu_release(struct kref *kref) +{ + struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map, + ref); + struct ion_buffer *buffer = map->buffer; + + rb_erase(&map->node, &buffer->iommu_maps); + buffer->heap->ops->unmap_iommu(map); + kfree(map); +} + +void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, + int domain_num, int partition_num) +{ + struct ion_iommu_map *iommu_map; + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + + mutex_lock(&buffer->lock); + + iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num); + + if (!iommu_map) { + WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__, + domain_num, partition_num, buffer); + goto out; + } + + _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt); + kref_put(&iommu_map->ref, ion_iommu_release); + +out: + mutex_unlock(&buffer->lock); + + mutex_unlock(&client->lock); + +} +EXPORT_SYMBOL(ion_unmap_iommu); + +struct scatterlist *ion_map_dma(struct ion_client *client, + struct ion_handle *handle, + unsigned long flags) +{ + struct ion_buffer *buffer; + struct scatterlist *sglist; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_dma) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { + sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); + if (IS_ERR_OR_NULL(sglist)) + _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); + buffer->sglist = sglist; + } else { + sglist = buffer->sglist; + } + + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return sglist; +} +EXPORT_SYMBOL(ion_map_dma); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->sglist = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_dma); + +struct ion_buffer *ion_share(struct ion_client *client, + struct ion_handle *handle) +{ + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + mutex_unlock(&client->lock); + if (!valid_handle) { + WARN("%s: invalid handle passed to share.\n", __func__); + return ERR_PTR(-EINVAL); + } + + /* do not take an extra reference here, the burden is on the caller + * to make sure the buffer doesn't go away while it's passing it + * to another client -- ion_free should not be called on this handle + * until the buffer has been imported into the other client + */ + return handle->buffer; +} +EXPORT_SYMBOL(ion_share); + +struct ion_handle *ion_import(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle = NULL; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR_OR_NULL(handle)) { + ion_handle_get(handle); + goto end; + } + handle = ion_handle_create(client, buffer); + if (IS_ERR_OR_NULL(handle)) + goto end; + ion_handle_add(client, handle); +end: + mutex_unlock(&client->lock); + return handle; +} +EXPORT_SYMBOL(ion_import); + +int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *uaddr, unsigned long offset, unsigned long len, + unsigned int cmd) +{ + struct ion_buffer *buffer; + int ret = -EINVAL; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to do_cache_op.\n", + __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!ION_IS_CACHED(buffer->flags)) { + ret = 0; + goto out; + } + + if (!handle->buffer->heap->ops->cache_op) { + pr_err("%s: cache_op is not implemented by this heap.\n", + __func__); + ret = -ENODEV; + goto out; + } + + + ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr, + offset, len, cmd); + +out: + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ret; + +} + +static const struct file_operations ion_share_fops; + +struct ion_handle *ion_import_fd(struct ion_client *client, int fd) +{ + struct file *file = fget(fd); + struct ion_handle *handle; + + if (!file) { + pr_err("%s: imported fd not found in file table.\n", __func__); + return ERR_PTR(-EINVAL); + } + if (file->f_op != &ion_share_fops) { + pr_err("%s: imported file %s is not a shared ion" + " file.", __func__, file->f_dentry->d_name.name); + handle = ERR_PTR(-EINVAL); + goto end; + } + handle = ion_import(client, file->private_data); +end: + fput(file); + return handle; +} +EXPORT_SYMBOL(ion_import_fd); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + + seq_printf(s, "%16.16s: %16.16s : %16.16s : %16.16s\n", "heap_name", + "size_in_bytes", "handle refcount", "buffer"); + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + + seq_printf(s, "%16.16s: %16x : %16d : %16p\n", + handle->buffer->heap->name, + handle->buffer->size, + atomic_read(&handle->ref.refcount), + handle->buffer); + } + + seq_printf(s, "%16.16s %d\n", "client refcount:", + atomic_read(&client->ref.refcount)); + mutex_unlock(&client->lock); + + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct ion_client *ion_client_lookup(struct ion_device *dev, + struct task_struct *task) +{ + struct rb_node *n = dev->user_clients.rb_node; + struct ion_client *client; + + mutex_lock(&dev->lock); + while (n) { + client = rb_entry(n, struct ion_client, node); + if (task == client->task) { + ion_client_get(client); + mutex_unlock(&dev->lock); + return client; + } else if (task < client->task) { + n = n->rb_left; + } else if (task > client->task) { + n = n->rb_right; + } + } + mutex_unlock(&dev->lock); + return NULL; +} + +struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + pid_t pid; + unsigned int name_len = strnlen(name, 64); + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + /* if this isn't a kernel thread, see if a client already + exists */ + if (task) { + client = ion_client_lookup(dev, task); + if (!IS_ERR_OR_NULL(client)) { + put_task_struct(current->group_leader); + return client; + } + } + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) { + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); + } + + client->dev = dev; + client->handles = RB_ROOT; + mutex_init(&client->lock); + + client->name = kzalloc(name_len+1, GFP_KERNEL); + if (!client->name) { + put_task_struct(current->group_leader); + kfree(client); + return ERR_PTR(-ENOMEM); + } else { + strlcpy(client->name, name, name_len+1); + } + + client->heap_mask = heap_mask; + client->task = task; + client->pid = pid; + kref_init(&client->ref); + + mutex_lock(&dev->lock); + if (task) { + p = &dev->user_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (task < entry->task) + p = &(*p)->rb_left; + else if (task > entry->task) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->user_clients); + } else { + p = &dev->kernel_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->kernel_clients); + } + + + client->debug_root = debugfs_create_file(name, 0664, + dev->debug_root, client, + &debug_client_fops); + mutex_unlock(&dev->lock); + + return client; +} + +static void _ion_client_destroy(struct kref *kref) +{ + struct ion_client *client = container_of(kref, struct ion_client, ref); + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + mutex_lock(&dev->lock); + if (client->task) { + rb_erase(&client->node, &dev->user_clients); + put_task_struct(client->task); + } else { + rb_erase(&client->node, &dev->kernel_clients); + } + debugfs_remove_recursive(client->debug_root); + mutex_unlock(&dev->lock); + + kfree(client->name); + kfree(client); +} + +static void ion_client_get(struct ion_client *client) +{ + kref_get(&client->ref); +} + +static int ion_client_put(struct ion_client *client) +{ + return kref_put(&client->ref, _ion_client_destroy); +} + +void ion_client_destroy(struct ion_client *client) +{ + if (client) + ion_client_put(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, + unsigned long *flags) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + /* + * Make sure we only return FLAGS. buffer->flags also holds + * the heap_mask, so we need to make sure we're only looking + * at the supported Ion flags. + */ + *flags = buffer->flags & (ION_FLAG_CACHED | ION_SECURE); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_flags); + +int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, + unsigned long *size) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + *size = buffer->size; + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_size); + +static int ion_share_release(struct inode *inode, struct file* file) +{ + struct ion_buffer *buffer = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* drop the reference to the buffer -- this prevents the + buffer from going away because the client holding it exited + while it was being passed */ + ion_buffer_put(buffer); + return 0; +} + +static void ion_vma_open(struct vm_area_struct *vma) +{ + + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_handle *handle = vma->vm_private_data; + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* check that the client still exists and take a reference so + it can't go away until this vma is closed */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + vma->vm_private_data = NULL; + return; + } + ion_handle_get(handle); + mutex_lock(&buffer->lock); + buffer->umap_cnt++; + mutex_unlock(&buffer->lock); + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static void ion_vma_close(struct vm_area_struct *vma) +{ + struct ion_handle *handle = vma->vm_private_data; + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* this indicates the client is gone, nothing to do here */ + if (!handle) + return; + client = handle->client; + mutex_lock(&buffer->lock); + buffer->umap_cnt--; + mutex_unlock(&buffer->lock); + + if (buffer->heap->ops->unmap_user) + buffer->heap->ops->unmap_user(buffer->heap, buffer); + + + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + mutex_lock(&client->lock); + ion_handle_put(handle); + mutex_unlock(&client->lock); + ion_client_put(client); + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static struct vm_operations_struct ion_vm_ops = { + .open = ion_vma_open, + .close = ion_vma_close, +}; + +static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = file->private_data; + unsigned long size = vma->vm_end - vma->vm_start; + struct ion_client *client; + struct ion_handle *handle; + int ret; + unsigned long flags = buffer->flags; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* make sure the client still exists, it's possible for the client to + have gone away but the map/share fd still to be around, take + a reference to it so it can't go away while this mapping exists */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + pr_err("%s: trying to mmap an ion handle in a process with no " + "ion client\n", __func__); + return -EINVAL; + } + + if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > + buffer->size)) { + pr_err("%s: trying to map larger area than handle has available" + "\n", __func__); + ret = -EINVAL; + goto err; + } + + /* find the handle and take a reference to it */ + handle = ion_import(client, buffer); + if (IS_ERR_OR_NULL(handle)) { + ret = -EINVAL; + goto err; + } + + if (!handle->buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping " + "to userspace\n", __func__); + ret = -EINVAL; + goto err1; + } + + mutex_lock(&buffer->lock); + + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma, + flags); + + buffer->umap_cnt++; + if (ret) { + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + goto err2; + } + mutex_unlock(&buffer->lock); + + vma->vm_ops = &ion_vm_ops; + /* move the handle into the vm_private_data so we can access it from + vma_open/close */ + vma->vm_private_data = handle; + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + return 0; + +err2: + buffer->umap_cnt--; + mutex_unlock(&buffer->lock); + /* drop the reference to the handle */ +err1: + mutex_lock(&client->lock); + ion_handle_put(handle); + mutex_unlock(&client->lock); +err: + /* drop the reference to the client */ + ion_client_put(client); + return ret; +} + +static const struct file_operations ion_share_fops = { + .owner = THIS_MODULE, + .release = ion_share_release, + .mmap = ion_share_mmap, +}; + +static int ion_ioctl_share(struct file *parent, struct ion_client *client, + struct ion_handle *handle) +{ + int fd = get_unused_fd(); + struct file *file; + + if (fd < 0) + return -ENFILE; + + file = anon_inode_getfile("ion_share_fd", &ion_share_fops, + handle->buffer, O_RDWR); + if (IS_ERR_OR_NULL(file)) + goto err; + + ion_buffer_get(handle->buffer); + fd_install(fd, file); + + return fd; + +err: + put_unused_fd(fd); + return -ENFILE; +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_allocation_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + data.flags |= data.heap_mask; + data.handle = ion_alloc(client, data.len, data.align, + data.flags); + + if (IS_ERR_OR_NULL(data.handle)) + return -ENOMEM; + + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + break; + } + case ION_IOC_FREE: + { + struct ion_handle_data data; + bool valid; + + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_handle_data))) + return -EFAULT; + mutex_lock(&client->lock); + valid = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + if (!valid) + return -EINVAL; + ion_free(client, data.handle); + break; + } + case ION_IOC_MAP: + case ION_IOC_SHARE: + { + struct ion_fd_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + mutex_lock(&client->lock); + if (!ion_handle_validate(client, data.handle)) { + pr_err("%s: invalid handle passed to share ioctl.\n", + __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + data.fd = ion_ioctl_share(filp, client, data.handle); + mutex_unlock(&client->lock); + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + break; + } + case ION_IOC_IMPORT: + { + struct ion_fd_data data; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_fd_data))) + return -EFAULT; + + data.handle = ion_import_fd(client, data.fd); + if (IS_ERR(data.handle)) + data.handle = NULL; + if (copy_to_user((void __user *)arg, &data, + sizeof(struct ion_fd_data))) + return -EFAULT; + break; + } + case ION_IOC_CUSTOM: + { + struct ion_device *dev = client->dev; + struct ion_custom_data data; + + if (!dev->custom_ioctl) + return -ENOTTY; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_custom_data))) + return -EFAULT; + return dev->custom_ioctl(client, data.cmd, data.arg); + } + case ION_IOC_CLEAN_CACHES: + case ION_IOC_INV_CACHES: + case ION_IOC_CLEAN_INV_CACHES: + case ION_IOC_GET_FLAGS: + return client->dev->custom_ioctl(client, cmd, arg); + default: + return -ENOTTY; + } + return 0; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_put(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + char debug_name[64]; + + pr_debug("%s: %d\n", __func__, __LINE__); + snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); + client = ion_client_create(dev, -1, debug_name); + if (IS_ERR_OR_NULL(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + enum ion_heap_ids id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + char task_comm[TASK_COMM_LEN]; + size_t size = ion_debug_heap_total(client, heap->id); + if (!size) + continue; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16x\n", task_comm, client->pid, + size); + } + + for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + if (!size) + continue; + seq_printf(s, "%16.s %16u %16x\n", client->name, client->pid, + size); + } + if (heap->ops->print_debug) + heap->ops->print_debug(heap, s); + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + struct rb_node **p = &dev->heaps.rb_node; + struct rb_node *parent = NULL; + struct ion_heap *entry; + + heap->dev = dev; + mutex_lock(&dev->lock); + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_heap, node); + + if (heap->id < entry->id) { + p = &(*p)->rb_left; + } else if (heap->id > entry->id ) { + p = &(*p)->rb_right; + } else { + pr_err("%s: can not insert multiple heaps with " + "id %d\n", __func__, heap->id); + goto end; + } + } + + rb_link_node(&heap->node, parent, p); + rb_insert_color(&heap->node, &dev->heaps); + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, + &debug_heap_fops); +end: + mutex_unlock(&dev->lock); +} + +int ion_secure_heap(struct ion_device *dev, int heap_id) +{ + struct rb_node *n; + int ret_val = 0; + + /* + * traverse the list of heaps available in this system + * and find the heap that is specified. + */ + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + if (heap->type != ION_HEAP_TYPE_CP) + continue; + if (ION_HEAP(heap->id) != heap_id) + continue; + if (heap->ops->secure_heap) + ret_val = heap->ops->secure_heap(heap); + else + ret_val = -EINVAL; + break; + } + mutex_unlock(&dev->lock); + return ret_val; +} + +int ion_unsecure_heap(struct ion_device *dev, int heap_id) +{ + struct rb_node *n; + int ret_val = 0; + + /* + * traverse the list of heaps available in this system + * and find the heap that is specified. + */ + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + if (heap->type != ION_HEAP_TYPE_CP) + continue; + if (ION_HEAP(heap->id) != heap_id) + continue; + if (heap->ops->secure_heap) + ret_val = heap->ops->unsecure_heap(heap); + else + ret_val = -EINVAL; + break; + } + mutex_unlock(&dev->lock); + return ret_val; +} + +static int ion_debug_leak_show(struct seq_file *s, void *unused) +{ + struct ion_device *dev = s->private; + struct rb_node *n; + struct rb_node *n2; + + /* mark all buffers as 1 */ + seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size", + "ref cnt"); + mutex_lock(&dev->lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buf = rb_entry(n, struct ion_buffer, + node); + + buf->marked = 1; + } + + /* now see which buffers we can access */ + for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + + mutex_lock(&client->lock); + for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) { + struct ion_handle *handle = rb_entry(n2, + struct ion_handle, node); + + handle->buffer->marked = 0; + + } + mutex_unlock(&client->lock); + + } + + for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + + mutex_lock(&client->lock); + for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) { + struct ion_handle *handle = rb_entry(n2, + struct ion_handle, node); + + handle->buffer->marked = 0; + + } + mutex_unlock(&client->lock); + + } + /* And anyone still marked as a 1 means a leaked handle somewhere */ + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buf = rb_entry(n, struct ion_buffer, + node); + + if (buf->marked == 1) + seq_printf(s, "%16.x %16.s %16.x %16.d\n", + (int)buf, buf->heap->name, buf->size, + atomic_read(&buf->ref.refcount)); + } + mutex_unlock(&dev->lock); + return 0; +} + +static int ion_debug_leak_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_leak_show, inode->i_private); +} + +static const struct file_operations debug_leak_fops = { + .open = ion_debug_leak_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (IS_ERR_OR_NULL(idev->debug_root)) + pr_err("ion: failed to create debug files.\n"); + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->lock); + idev->heaps = RB_ROOT; + idev->user_clients = RB_ROOT; + idev->kernel_clients = RB_ROOT; + debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev, + &debug_leak_fops); + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 0000000000000..cc283d53c38ff --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -0,0 +1,446 @@ +/* + * drivers/gpu/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" + +#include +#include +#include + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned long allocated_bytes; + unsigned long total_size; + int (*request_region)(void *); + int (*release_region)(void *); + atomic_t map_count; + void *bus_id; + unsigned int has_outer_cache; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool, + size, ilog2(align)); + + if (!offset) { + if ((carveout_heap->total_size - + carveout_heap->allocated_bytes) > size) + pr_debug("%s: heap %s has enough memory (%lx) but" + " the allocation of size %lx still failed." + " Memory is probably fragmented.", + __func__, heap->name, + carveout_heap->total_size - + carveout_heap->allocated_bytes, size); + return ION_CARVEOUT_ALLOCATE_FAIL; + } + + carveout_heap->allocated_bytes += size; + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); + carveout_heap->allocated_bytes -= size; +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = buffer->priv_phys; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_phys = ion_carveout_allocate(heap, size, align); + return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + ion_carveout_free(heap, buffer->priv_phys, buffer->size); + buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; +} + +struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + + sg_init_table(sglist, 1); + sglist->length = buffer->size; + sglist->offset = 0; + sglist->dma_address = buffer->priv_phys; + + return sglist; +} + +void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + if (buffer->sglist) + vfree(buffer->sglist); +} + +static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap) +{ + int ret_value = 0; + if (atomic_inc_return(&carveout_heap->map_count) == 1) { + if (carveout_heap->request_region) { + ret_value = carveout_heap->request_region( + carveout_heap->bus_id); + if (ret_value) { + pr_err("Unable to request SMI region"); + atomic_dec(&carveout_heap->map_count); + } + } + } + return ret_value; +} + +static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap) +{ + int ret_value = 0; + if (atomic_dec_and_test(&carveout_heap->map_count)) { + if (carveout_heap->release_region) { + ret_value = carveout_heap->release_region( + carveout_heap->bus_id); + if (ret_value) + pr_err("Unable to release SMI region"); + } + } + return ret_value; +} + +void *ion_carveout_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long flags) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + void *ret_value; + + if (ion_carveout_request_region(carveout_heap)) + return NULL; + + if (ION_IS_CACHED(flags)) + ret_value = ioremap_cached(buffer->priv_phys, buffer->size); + else + ret_value = ioremap(buffer->priv_phys, buffer->size); + + if (!ret_value) + ion_carveout_release_region(carveout_heap); + return ret_value; +} + +void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + __arch_iounmap(buffer->vaddr); + buffer->vaddr = NULL; + + ion_carveout_release_region(carveout_heap); + return; +} + +int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma, unsigned long flags) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + int ret_value = 0; + + if (ion_carveout_request_region(carveout_heap)) + return -EINVAL; + + if (!ION_IS_CACHED(flags)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + ret_value = remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + if (ret_value) + ion_carveout_release_region(carveout_heap); + return ret_value; +} + +void ion_carveout_heap_unmap_user(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + ion_carveout_release_region(carveout_heap); +} + +int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, + void *vaddr, unsigned int offset, unsigned int length, + unsigned int cmd) +{ + void (*outer_cache_op)(phys_addr_t, phys_addr_t); + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } + + if (carveout_heap->has_outer_cache) { + unsigned long pstart = buffer->priv_phys + offset; + outer_cache_op(pstart, pstart + length); + } + return 0; +} + +static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + seq_printf(s, "total bytes currently allocated: %lx\n", + carveout_heap->allocated_bytes); + seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size); + + return 0; +} + +int ion_carveout_heap_map_iommu(struct ion_buffer *buffer, + struct ion_iommu_map *data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags) +{ + struct iommu_domain *domain; + int ret = 0; + unsigned long extra; + struct scatterlist *sglist = 0; + int prot = IOMMU_WRITE | IOMMU_READ; + prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; + + data->mapped_size = iova_length; + + if (!msm_use_iommu()) { + data->iova_addr = buffer->priv_phys; + return 0; + } + + extra = iova_length - buffer->size; + + data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, + data->mapped_size, align); + + if (!data->iova_addr) { + ret = -ENOMEM; + goto out; + } + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + ret = -ENOMEM; + goto out1; + } + + sglist = vmalloc(sizeof(*sglist)); + if (!sglist) + goto out1; + + sg_init_table(sglist, 1); + sglist->length = buffer->size; + sglist->offset = 0; + sglist->dma_address = buffer->priv_phys; + + ret = iommu_map_range(domain, data->iova_addr, sglist, + buffer->size, prot); + if (ret) { + pr_err("%s: could not map %lx in domain %p\n", + __func__, data->iova_addr, domain); + goto out1; + } + + if (extra) { + unsigned long extra_iova_addr = data->iova_addr + buffer->size; + ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, + SZ_4K, prot); + if (ret) + goto out2; + } + vfree(sglist); + return ret; + +out2: + iommu_unmap_range(domain, data->iova_addr, buffer->size); +out1: + vfree(sglist); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); + +out: + + return ret; +} + +void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data) +{ + unsigned int domain_num; + unsigned int partition_num; + struct iommu_domain *domain; + + if (!msm_use_iommu()) + return; + + domain_num = iommu_map_domain(data); + partition_num = iommu_map_partition(data); + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + WARN(1, "Could not get domain %d. Corruption?\n", domain_num); + return; + } + + iommu_unmap_range(domain, data->iova_addr, data->mapped_size); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); + + return; +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_user = ion_carveout_heap_map_user, + .map_kernel = ion_carveout_heap_map_kernel, + .unmap_user = ion_carveout_heap_unmap_user, + .unmap_kernel = ion_carveout_heap_unmap_kernel, + .map_dma = ion_carveout_heap_map_dma, + .unmap_dma = ion_carveout_heap_unmap_dma, + .cache_op = ion_carveout_cache_ops, + .print_debug = ion_carveout_print_debug, + .map_iommu = ion_carveout_heap_map_iommu, + .unmap_iommu = ion_carveout_heap_unmap_iommu, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + int ret; + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + ret = gen_pool_add(carveout_heap->pool, carveout_heap->base, + heap_data->size, -1); + if (ret < 0) { + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + return ERR_PTR(-EINVAL); + } + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + carveout_heap->allocated_bytes = 0; + carveout_heap->total_size = heap_data->size; + carveout_heap->has_outer_cache = heap_data->has_outer_cache; + + if (heap_data->extra_data) { + struct ion_co_heap_pdata *extra_data = + heap_data->extra_data; + + if (extra_data->setup_region) + carveout_heap->bus_id = extra_data->setup_region(); + if (extra_data->request_region) + carveout_heap->request_region = + extra_data->request_region; + if (extra_data->release_region) + carveout_heap->release_region = + extra_data->release_region; + } + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c new file mode 100644 index 0000000000000..ced6fe5fda954 --- /dev/null +++ b/drivers/gpu/ion/ion_cp_heap.c @@ -0,0 +1,978 @@ +/* + * drivers/gpu/ion/ion_cp_heap.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "ion_priv.h" + +#include +#include + +/** + * struct ion_cp_heap - container for the heap and shared heap data + + * @heap: the heap information structure + * @pool: memory pool to allocate from. + * @base: the base address of the memory pool. + * @permission_type: Identifier for the memory used by SCM for protecting + * and unprotecting memory. + * @secure_base: Base address used when securing a heap that is shared. + * @secure_size: Size used when securing a heap that is shared. + * @lock: mutex to protect shared access. + * @heap_protected: Indicates whether heap has been protected or not. + * @allocated_bytes: the total number of allocated bytes from the pool. + * @total_size: the total size of the memory pool. + * @request_region: function pointer to call when first mapping of memory + * occurs. + * @release_region: function pointer to call when last mapping of memory + * unmapped. + * @bus_id: token used with request/release region. + * @kmap_cached_count: the total number of times this heap has been mapped in + * kernel space (cached). + * @kmap_uncached_count:the total number of times this heap has been mapped in + * kernel space (un-cached). + * @umap_count: the total number of times this heap has been mapped in + * user space. + * @iommu_iova: saved iova when mapping full heap at once. + * @iommu_partition: partition used to map full heap. + * @reusable: indicates if the memory should be reused via fmem. + * @reserved_vrange: reserved virtual address range for use with fmem + * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. + * @iommu_2x_map_domain: Indicates the domain to use for overmapping. + * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. +*/ +struct ion_cp_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned int permission_type; + ion_phys_addr_t secure_base; + size_t secure_size; + struct mutex lock; + unsigned int heap_protected; + unsigned long allocated_bytes; + unsigned long total_size; + int (*request_region)(void *); + int (*release_region)(void *); + void *bus_id; + unsigned long kmap_cached_count; + unsigned long kmap_uncached_count; + unsigned long umap_count; + unsigned long iommu_iova[MAX_DOMAINS]; + unsigned long iommu_partition[MAX_DOMAINS]; + int reusable; + void *reserved_vrange; + int iommu_map_all; + int iommu_2x_map_domain; + unsigned int has_outer_cache; + atomic_t protect_cnt; +}; + +enum { + HEAP_NOT_PROTECTED = 0, + HEAP_PROTECTED = 1, +}; + +static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type); + +static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type); + +/** + * Get the total number of kernel mappings. + * Must be called with heap->lock locked. + */ +static unsigned long ion_cp_get_total_kmap_count( + const struct ion_cp_heap *cp_heap) +{ + return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count; +} + +/** + * Protects memory if heap is unsecured heap. Also ensures that we are in + * the correct FMEM state if this heap is a reusable heap. + * Must be called with heap->lock locked. + */ +static int ion_cp_protect(struct ion_heap *heap) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + int ret_value = 0; + + if (atomic_inc_return(&cp_heap->protect_cnt) == 1) { + /* Make sure we are in C state when the heap is protected. */ + if (cp_heap->reusable && !cp_heap->allocated_bytes) { + ret_value = fmem_set_state(FMEM_C_STATE); + if (ret_value) + goto out; + } + + ret_value = ion_cp_protect_mem(cp_heap->secure_base, + cp_heap->secure_size, cp_heap->permission_type); + if (ret_value) { + pr_err("Failed to protect memory for heap %s - " + "error code: %d\n", heap->name, ret_value); + + if (cp_heap->reusable && !cp_heap->allocated_bytes) { + if (fmem_set_state(FMEM_T_STATE) != 0) + pr_err("%s: unable to transition heap to T-state\n", + __func__); + } + atomic_dec(&cp_heap->protect_cnt); + } else { + cp_heap->heap_protected = HEAP_PROTECTED; + pr_debug("Protected heap %s @ 0x%lx\n", + heap->name, cp_heap->base); + } + } +out: + pr_debug("%s: protect count is %d\n", __func__, + atomic_read(&cp_heap->protect_cnt)); + BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); + return ret_value; +} + +/** + * Unprotects memory if heap is secure heap. Also ensures that we are in + * the correct FMEM state if this heap is a reusable heap. + * Must be called with heap->lock locked. + */ +static void ion_cp_unprotect(struct ion_heap *heap) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + if (atomic_dec_and_test(&cp_heap->protect_cnt)) { + int error_code = ion_cp_unprotect_mem( + cp_heap->secure_base, cp_heap->secure_size, + cp_heap->permission_type); + if (error_code) { + pr_err("Failed to un-protect memory for heap %s - " + "error code: %d\n", heap->name, error_code); + } else { + cp_heap->heap_protected = HEAP_NOT_PROTECTED; + pr_debug("Un-protected heap %s @ 0x%x\n", heap->name, + (unsigned int) cp_heap->base); + + if (cp_heap->reusable && !cp_heap->allocated_bytes) { + if (fmem_set_state(FMEM_T_STATE) != 0) + pr_err("%s: unable to transition heap to T-state", + __func__); + } + } + } + pr_debug("%s: protect count is %d\n", __func__, + atomic_read(&cp_heap->protect_cnt)); + BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0); +} + +ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align, + unsigned long flags) +{ + unsigned long offset; + unsigned long secure_allocation = flags & ION_SECURE; + + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + mutex_lock(&cp_heap->lock); + if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) { + mutex_unlock(&cp_heap->lock); + pr_err("ION cannot allocate un-secure memory from protected" + " heap %s\n", heap->name); + return ION_CP_ALLOCATE_FAIL; + } + + if (secure_allocation && + (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) { + mutex_unlock(&cp_heap->lock); + pr_err("ION cannot allocate secure memory from heap with " + "outstanding mappings: User space: %lu, kernel space " + "(cached): %lu\n", cp_heap->umap_count, + cp_heap->kmap_cached_count); + return ION_CP_ALLOCATE_FAIL; + } + + /* + * if this is the first reusable allocation, transition + * the heap + */ + if (cp_heap->reusable && !cp_heap->allocated_bytes) { + if (fmem_set_state(FMEM_C_STATE) != 0) { + mutex_unlock(&cp_heap->lock); + return ION_RESERVED_ALLOCATE_FAIL; + } + } + + cp_heap->allocated_bytes += size; + mutex_unlock(&cp_heap->lock); + + offset = gen_pool_alloc_aligned(cp_heap->pool, + size, ilog2(align)); + + if (!offset) { + mutex_lock(&cp_heap->lock); + cp_heap->allocated_bytes -= size; + if ((cp_heap->total_size - + cp_heap->allocated_bytes) >= size) + pr_debug("%s: heap %s has enough memory (%lx) but" + " the allocation of size %lx still failed." + " Memory is probably fragmented.\n", + __func__, heap->name, + cp_heap->total_size - + cp_heap->allocated_bytes, size); + + if (cp_heap->reusable && !cp_heap->allocated_bytes && + cp_heap->heap_protected == HEAP_NOT_PROTECTED) { + if (fmem_set_state(FMEM_T_STATE) != 0) + pr_err("%s: unable to transition heap to T-state\n", + __func__); + } + mutex_unlock(&cp_heap->lock); + + return ION_CP_ALLOCATE_FAIL; + } + + return offset; +} + +static void iommu_unmap_all(unsigned long domain_num, + struct ion_cp_heap *cp_heap) +{ + unsigned long left_to_unmap = cp_heap->total_size; + unsigned long order = get_order(SZ_64K); + unsigned long page_size = SZ_64K; + + struct iommu_domain *domain = msm_get_iommu_domain(domain_num); + if (domain) { + unsigned long temp_iova = cp_heap->iommu_iova[domain_num]; + + while (left_to_unmap) { + iommu_unmap(domain, temp_iova, order); + temp_iova += page_size; + left_to_unmap -= page_size; + } + if (domain_num == cp_heap->iommu_2x_map_domain) + msm_iommu_unmap_extra(domain, temp_iova, + cp_heap->total_size, SZ_64K); + } else { + pr_err("Unable to get IOMMU domain %lu\n", domain_num); + } +} + +void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + if (addr == ION_CP_ALLOCATE_FAIL) + return; + gen_pool_free(cp_heap->pool, addr, size); + + mutex_lock(&cp_heap->lock); + cp_heap->allocated_bytes -= size; + + if (cp_heap->reusable && !cp_heap->allocated_bytes && + cp_heap->heap_protected == HEAP_NOT_PROTECTED) { + if (fmem_set_state(FMEM_T_STATE) != 0) + pr_err("%s: unable to transition heap to T-state\n", + __func__); + } + + /* Unmap everything if we previously mapped the whole heap at once. */ + if (!cp_heap->allocated_bytes) { + unsigned int i; + for (i = 0; i < MAX_DOMAINS; ++i) { + if (cp_heap->iommu_iova[i]) { + unsigned long vaddr_len = cp_heap->total_size; + + if (i == cp_heap->iommu_2x_map_domain) + vaddr_len <<= 1; + iommu_unmap_all(i, cp_heap); + + msm_free_iova_address(cp_heap->iommu_iova[i], i, + cp_heap->iommu_partition[i], + vaddr_len); + } + cp_heap->iommu_iova[i] = 0; + cp_heap->iommu_partition[i] = 0; + } + } + mutex_unlock(&cp_heap->lock); +} + +static int ion_cp_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = buffer->priv_phys; + *len = buffer->size; + return 0; +} + +static int ion_cp_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_phys = ion_cp_allocate(heap, size, align, flags); + return buffer->priv_phys == ION_CP_ALLOCATE_FAIL ? -ENOMEM : 0; +} + +static void ion_cp_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + ion_cp_free(heap, buffer->priv_phys, buffer->size); + buffer->priv_phys = ION_CP_ALLOCATE_FAIL; +} + +struct scatterlist *ion_cp_heap_create_sglist(struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(*sglist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + + sg_init_table(sglist, 1); + sglist->length = buffer->size; + sglist->offset = 0; + sglist->dma_address = buffer->priv_phys; + + return sglist; +} + +struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return ion_cp_heap_create_sglist(buffer); +} + +void ion_cp_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + if (buffer->sglist) + vfree(buffer->sglist); +} + +/** + * Call request region for SMI memory of this is the first mapping. + */ +static int ion_cp_request_region(struct ion_cp_heap *cp_heap) +{ + int ret_value = 0; + if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) + if (cp_heap->request_region) + ret_value = cp_heap->request_region(cp_heap->bus_id); + return ret_value; +} + +/** + * Call release region for SMI memory of this is the last un-mapping. + */ +static int ion_cp_release_region(struct ion_cp_heap *cp_heap) +{ + int ret_value = 0; + if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0) + if (cp_heap->release_region) + ret_value = cp_heap->release_region(cp_heap->bus_id); + return ret_value; +} + +void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, + void *virt_base, unsigned long flags) +{ + int ret; + unsigned int offset = buffer->priv_phys - phys_base; + unsigned long start = ((unsigned long)virt_base) + offset; + const struct mem_type *type = ION_IS_CACHED(flags) ? + get_mem_type(MT_DEVICE_CACHED) : + get_mem_type(MT_DEVICE); + + if (phys_base > buffer->priv_phys) + return NULL; + + + ret = ioremap_page_range(start, start + buffer->size, + buffer->priv_phys, __pgprot(type->prot_pte)); + + if (!ret) + return (void *)start; + else + return NULL; +} + +void *ion_cp_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long flags) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + void *ret_value = NULL; + + mutex_lock(&cp_heap->lock); + if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) || + ((cp_heap->heap_protected == HEAP_PROTECTED) && + !ION_IS_CACHED(flags))) { + + if (ion_cp_request_region(cp_heap)) { + mutex_unlock(&cp_heap->lock); + return NULL; + } + + if (cp_heap->reusable) { + ret_value = ion_map_fmem_buffer(buffer, cp_heap->base, + cp_heap->reserved_vrange, flags); + + } else { + if (ION_IS_CACHED(flags)) + ret_value = ioremap_cached(buffer->priv_phys, + buffer->size); + else + ret_value = ioremap(buffer->priv_phys, + buffer->size); + } + + if (!ret_value) { + ion_cp_release_region(cp_heap); + } else { + if (ION_IS_CACHED(buffer->flags)) + ++cp_heap->kmap_cached_count; + else + ++cp_heap->kmap_uncached_count; + } + } + mutex_unlock(&cp_heap->lock); + return ret_value; +} + +void ion_cp_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + if (cp_heap->reusable) + unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size); + else + __arch_iounmap(buffer->vaddr); + + buffer->vaddr = NULL; + + mutex_lock(&cp_heap->lock); + if (ION_IS_CACHED(buffer->flags)) + --cp_heap->kmap_cached_count; + else + --cp_heap->kmap_uncached_count; + ion_cp_release_region(cp_heap); + mutex_unlock(&cp_heap->lock); + + return; +} + +int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma, unsigned long flags) +{ + int ret_value = -EAGAIN; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + mutex_lock(&cp_heap->lock); + if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) { + if (ion_cp_request_region(cp_heap)) { + mutex_unlock(&cp_heap->lock); + return -EINVAL; + } + + if (!ION_IS_CACHED(flags)) + vma->vm_page_prot = pgprot_writecombine( + vma->vm_page_prot); + + ret_value = remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + + if (ret_value) + ion_cp_release_region(cp_heap); + else + ++cp_heap->umap_count; + } + mutex_unlock(&cp_heap->lock); + return ret_value; +} + +void ion_cp_heap_unmap_user(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + mutex_lock(&cp_heap->lock); + --cp_heap->umap_count; + ion_cp_release_region(cp_heap); + mutex_unlock(&cp_heap->lock); +} + +int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, + void *vaddr, unsigned int offset, unsigned int length, + unsigned int cmd) +{ + void (*outer_cache_op)(phys_addr_t, phys_addr_t); + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } + + if (cp_heap->has_outer_cache) { + unsigned long pstart = buffer->priv_phys + offset; + outer_cache_op(pstart, pstart + length); + } + return 0; +} + +static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s) +{ + unsigned long total_alloc; + unsigned long total_size; + unsigned long umap_count; + unsigned long kmap_count; + unsigned long heap_protected; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + mutex_lock(&cp_heap->lock); + total_alloc = cp_heap->allocated_bytes; + total_size = cp_heap->total_size; + umap_count = cp_heap->umap_count; + kmap_count = ion_cp_get_total_kmap_count(cp_heap); + heap_protected = cp_heap->heap_protected == HEAP_PROTECTED; + mutex_unlock(&cp_heap->lock); + + seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc); + seq_printf(s, "total heap size: %lx\n", total_size); + seq_printf(s, "umapping count: %lx\n", umap_count); + seq_printf(s, "kmapping count: %lx\n", kmap_count); + seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No"); + seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No"); + + return 0; +} + +int ion_cp_secure_heap(struct ion_heap *heap) +{ + int ret_value; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + mutex_lock(&cp_heap->lock); + if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) { + ret_value = ion_cp_protect(heap); + } else { + pr_err("ION cannot secure heap with outstanding mappings: " + "User space: %lu, kernel space (cached): %lu\n", + cp_heap->umap_count, cp_heap->kmap_cached_count); + ret_value = -EINVAL; + } + + mutex_unlock(&cp_heap->lock); + return ret_value; +} + +int ion_cp_unsecure_heap(struct ion_heap *heap) +{ + int ret_value = 0; + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + mutex_lock(&cp_heap->lock); + ion_cp_unprotect(heap); + mutex_unlock(&cp_heap->lock); + return ret_value; +} + +static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap, + int partition, unsigned long prot) +{ + unsigned long left_to_map = cp_heap->total_size; + unsigned long order = get_order(SZ_64K); + unsigned long page_size = SZ_64K; + int ret_value = 0; + unsigned long virt_addr_len = cp_heap->total_size; + struct iommu_domain *domain = msm_get_iommu_domain(domain_num); + + /* If we are mapping into the video domain we need to map twice the + * size of the heap to account for prefetch issue in video core. + */ + if (domain_num == cp_heap->iommu_2x_map_domain) + virt_addr_len <<= 1; + + if (cp_heap->total_size & (SZ_64K-1)) { + pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n"); + ret_value = -EINVAL; + } + if (cp_heap->base & (SZ_64K-1)) { + pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n"); + ret_value = -EINVAL; + } + if (!ret_value && domain) { + unsigned long temp_phys = cp_heap->base; + unsigned long temp_iova = + msm_allocate_iova_address(domain_num, partition, + virt_addr_len, SZ_64K); + if (!temp_iova) { + pr_err("%s: could not allocate iova from domain %lu, partition %d\n", + __func__, domain_num, partition); + ret_value = -ENOMEM; + goto out; + } + cp_heap->iommu_iova[domain_num] = temp_iova; + + while (left_to_map) { + int ret = iommu_map(domain, temp_iova, temp_phys, + order, prot); + if (ret) { + pr_err("%s: could not map %lx in domain %p, error: %d\n", + __func__, temp_iova, domain, ret); + ret_value = -EAGAIN; + goto free_iova; + } + temp_iova += page_size; + temp_phys += page_size; + left_to_map -= page_size; + } + if (domain_num == cp_heap->iommu_2x_map_domain) + ret_value = msm_iommu_map_extra(domain, temp_iova, + cp_heap->total_size, + SZ_64K, prot); + if (ret_value) + goto free_iova; + } else { + pr_err("Unable to get IOMMU domain %lu\n", domain_num); + ret_value = -ENOMEM; + } + goto out; + +free_iova: + msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num, + partition, virt_addr_len); +out: + return ret_value; +} + +static int ion_cp_heap_map_iommu(struct ion_buffer *buffer, + struct ion_iommu_map *data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags) +{ + struct iommu_domain *domain; + int ret = 0; + unsigned long extra; + struct scatterlist *sglist = 0; + struct ion_cp_heap *cp_heap = + container_of(buffer->heap, struct ion_cp_heap, heap); + int prot = IOMMU_WRITE | IOMMU_READ; + prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; + + data->mapped_size = iova_length; + + if (!msm_use_iommu()) { + data->iova_addr = buffer->priv_phys; + return 0; + } + + if (cp_heap->iommu_iova[domain_num]) { + /* Already mapped. */ + unsigned long offset = buffer->priv_phys - cp_heap->base; + data->iova_addr = cp_heap->iommu_iova[domain_num] + offset; + return 0; + } else if (cp_heap->iommu_map_all) { + ret = iommu_map_all(domain_num, cp_heap, partition_num, prot); + if (!ret) { + unsigned long offset = + buffer->priv_phys - cp_heap->base; + data->iova_addr = + cp_heap->iommu_iova[domain_num] + offset; + cp_heap->iommu_partition[domain_num] = partition_num; + /* + clear delayed map flag so that we don't interfere + with this feature (we are already delaying). + */ + data->flags &= ~ION_IOMMU_UNMAP_DELAYED; + return 0; + } else { + cp_heap->iommu_iova[domain_num] = 0; + cp_heap->iommu_partition[domain_num] = 0; + return ret; + } + } + + extra = iova_length - buffer->size; + + data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, + data->mapped_size, align); + + if (!data->iova_addr) { + ret = -ENOMEM; + goto out; + } + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + ret = -ENOMEM; + goto out1; + } + + sglist = ion_cp_heap_create_sglist(buffer); + if (IS_ERR_OR_NULL(sglist)) { + ret = -ENOMEM; + goto out1; + } + ret = iommu_map_range(domain, data->iova_addr, sglist, + buffer->size, prot); + if (ret) { + pr_err("%s: could not map %lx in domain %p\n", + __func__, data->iova_addr, domain); + goto out1; + } + + if (extra) { + unsigned long extra_iova_addr = data->iova_addr + buffer->size; + ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, + SZ_4K, prot); + if (ret) + goto out2; + } + vfree(sglist); + return ret; + +out2: + iommu_unmap_range(domain, data->iova_addr, buffer->size); +out1: + if (!IS_ERR_OR_NULL(sglist)) + vfree(sglist); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); +out: + return ret; +} + +static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data) +{ + unsigned int domain_num; + unsigned int partition_num; + struct iommu_domain *domain; + struct ion_cp_heap *cp_heap = + container_of(data->buffer->heap, struct ion_cp_heap, heap); + + if (!msm_use_iommu()) + return; + + + domain_num = iommu_map_domain(data); + + /* If we are mapping everything we'll wait to unmap until everything + is freed. */ + if (cp_heap->iommu_iova[domain_num]) + return; + + partition_num = iommu_map_partition(data); + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + WARN(1, "Could not get domain %d. Corruption?\n", domain_num); + return; + } + + iommu_unmap_range(domain, data->iova_addr, data->mapped_size); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); + + return; +} + +static struct ion_heap_ops cp_heap_ops = { + .allocate = ion_cp_heap_allocate, + .free = ion_cp_heap_free, + .phys = ion_cp_heap_phys, + .map_user = ion_cp_heap_map_user, + .unmap_user = ion_cp_heap_unmap_user, + .map_kernel = ion_cp_heap_map_kernel, + .unmap_kernel = ion_cp_heap_unmap_kernel, + .map_dma = ion_cp_heap_map_dma, + .unmap_dma = ion_cp_heap_unmap_dma, + .cache_op = ion_cp_cache_ops, + .print_debug = ion_cp_print_debug, + .secure_heap = ion_cp_secure_heap, + .unsecure_heap = ion_cp_unsecure_heap, + .map_iommu = ion_cp_heap_map_iommu, + .unmap_iommu = ion_cp_heap_unmap_iommu, +}; + +struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_cp_heap *cp_heap; + int ret; + + cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL); + if (!cp_heap) + return ERR_PTR(-ENOMEM); + + mutex_init(&cp_heap->lock); + + cp_heap->pool = gen_pool_create(12, -1); + if (!cp_heap->pool) + goto free_heap; + + cp_heap->base = heap_data->base; + ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1); + if (ret < 0) + goto destroy_pool; + + cp_heap->allocated_bytes = 0; + cp_heap->umap_count = 0; + cp_heap->kmap_cached_count = 0; + cp_heap->kmap_uncached_count = 0; + cp_heap->total_size = heap_data->size; + cp_heap->heap.ops = &cp_heap_ops; + cp_heap->heap.type = ION_HEAP_TYPE_CP; + cp_heap->heap_protected = HEAP_NOT_PROTECTED; + cp_heap->secure_base = cp_heap->base; + cp_heap->secure_size = heap_data->size; + cp_heap->has_outer_cache = heap_data->has_outer_cache; + atomic_set(&cp_heap->protect_cnt, 0); + if (heap_data->extra_data) { + struct ion_cp_heap_pdata *extra_data = + heap_data->extra_data; + cp_heap->reusable = extra_data->reusable; + cp_heap->reserved_vrange = extra_data->virt_addr; + cp_heap->permission_type = extra_data->permission_type; + if (extra_data->secure_size) { + cp_heap->secure_base = extra_data->secure_base; + cp_heap->secure_size = extra_data->secure_size; + } + if (extra_data->setup_region) + cp_heap->bus_id = extra_data->setup_region(); + if (extra_data->request_region) + cp_heap->request_region = extra_data->request_region; + if (extra_data->release_region) + cp_heap->release_region = extra_data->release_region; + cp_heap->iommu_map_all = + extra_data->iommu_map_all; + cp_heap->iommu_2x_map_domain = + extra_data->iommu_2x_map_domain; + + } + + return &cp_heap->heap; + +destroy_pool: + gen_pool_destroy(cp_heap->pool); + +free_heap: + kfree(cp_heap); + + return ERR_PTR(-ENOMEM); +} + +void ion_cp_heap_destroy(struct ion_heap *heap) +{ + struct ion_cp_heap *cp_heap = + container_of(heap, struct ion_cp_heap, heap); + + gen_pool_destroy(cp_heap->pool); + kfree(cp_heap); + cp_heap = NULL; +} + + +/* SCM related code for locking down memory for content protection */ + +#define SCM_CP_LOCK_CMD_ID 0x1 +#define SCM_CP_PROTECT 0x1 +#define SCM_CP_UNPROTECT 0x0 + +struct cp_lock_msg { + unsigned int start; + unsigned int end; + unsigned int permission_type; + unsigned char lock; +} __attribute__ ((__packed__)); + + +static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type) +{ + struct cp_lock_msg cmd; + cmd.start = phy_base; + cmd.end = phy_base + size; + cmd.permission_type = permission_type; + cmd.lock = SCM_CP_PROTECT; + + return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID, + &cmd, sizeof(cmd), NULL, 0); +} + +static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type) +{ + struct cp_lock_msg cmd; + cmd.start = phy_base; + cmd.end = phy_base + size; + cmd.permission_type = permission_type; + cmd.lock = SCM_CP_UNPROTECT; + + return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID, + &cmd, sizeof(cmd), NULL, 0); +} diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 0000000000000..f6f5bf3a52d81 --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c @@ -0,0 +1,85 @@ +/* + * drivers/gpu/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include "ion_priv.h" + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_IOMMU: + heap = ion_iommu_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CP: + heap = ion_cp_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %u\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_IOMMU: + ion_iommu_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CP: + ion_cp_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c new file mode 100644 index 0000000000000..7c656976d6a8a --- /dev/null +++ b/drivers/gpu/ion/ion_iommu_heap.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" + +#include +#include +#include +#include + +struct ion_iommu_heap { + struct ion_heap heap; + unsigned int has_outer_cache; +}; + +struct ion_iommu_priv_data { + struct page **pages; + int nrpages; + unsigned long size; + struct scatterlist *iommu_sglist; +}; + +static int ion_iommu_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + int ret, i; + struct ion_iommu_priv_data *data = NULL; + + if (msm_use_iommu()) { + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->size = PFN_ALIGN(size); + data->nrpages = data->size >> PAGE_SHIFT; + data->pages = kzalloc(sizeof(struct page *)*data->nrpages, + GFP_KERNEL); + if (!data->pages) { + ret = -ENOMEM; + goto err1; + } + data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) * + data->nrpages); + if (!data->iommu_sglist) { + ret = -ENOMEM; + goto err1; + } + + sg_init_table(data->iommu_sglist, data->nrpages); + + for (i = 0; i < data->nrpages; i++) { + data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!data->pages[i]) + goto err2; + + sg_set_page(&data->iommu_sglist[i], data->pages[i], + PAGE_SIZE, 0); + } + + + buffer->priv_virt = data; + return 0; + + } else { + return -ENOMEM; + } + + +err2: + vfree(data->iommu_sglist); + data->iommu_sglist = NULL; + + for (i = 0; i < data->nrpages; i++) { + if (data->pages[i]) + __free_page(data->pages[i]); + } + kfree(data->pages); +err1: + kfree(data); + return ret; +} + +static void ion_iommu_heap_free(struct ion_buffer *buffer) +{ + struct ion_iommu_priv_data *data = buffer->priv_virt; + int i; + + if (!data) + return; + + for (i = 0; i < data->nrpages; i++) + __free_page(data->pages[i]); + + vfree(data->iommu_sglist); + data->iommu_sglist = NULL; + + kfree(data->pages); + kfree(data); +} + +void *ion_iommu_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long flags) +{ + struct ion_iommu_priv_data *data = buffer->priv_virt; + pgprot_t page_prot = PAGE_KERNEL; + + if (!data) + return NULL; + + if (!ION_IS_CACHED(flags)) + page_prot = pgprot_noncached(page_prot); + + buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot); + + return buffer->vaddr; +} + +void ion_iommu_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + if (!buffer->vaddr) + return; + + vunmap(buffer->vaddr); + buffer->vaddr = NULL; +} + +int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma, unsigned long flags) +{ + struct ion_iommu_priv_data *data = buffer->priv_virt; + int i; + unsigned long curr_addr; + if (!data) + return -EINVAL; + + if (!ION_IS_CACHED(flags)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + curr_addr = vma->vm_start; + for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) { + if (vm_insert_page(vma, curr_addr, data->pages[i])) { + /* + * This will fail the mmap which will + * clean up the vma space properly. + */ + return -EINVAL; + } + curr_addr += PAGE_SIZE; + } + return 0; +} + +int ion_iommu_heap_map_iommu(struct ion_buffer *buffer, + struct ion_iommu_map *data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags) +{ + struct iommu_domain *domain; + int ret = 0; + unsigned long extra; + struct ion_iommu_priv_data *buffer_data = buffer->priv_virt; + int prot = IOMMU_WRITE | IOMMU_READ; + prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; + + BUG_ON(!msm_use_iommu()); + + data->mapped_size = iova_length; + extra = iova_length - buffer->size; + + data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, + data->mapped_size, align); + + if (!data->iova_addr) { + ret = -ENOMEM; + goto out; + } + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + ret = -ENOMEM; + goto out1; + } + + ret = iommu_map_range(domain, data->iova_addr, + buffer_data->iommu_sglist, buffer->size, prot); + if (ret) { + pr_err("%s: could not map %lx in domain %p\n", + __func__, data->iova_addr, domain); + goto out1; + } + + if (extra) { + unsigned long extra_iova_addr = data->iova_addr + buffer->size; + ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, + prot); + if (ret) + goto out2; + } + return ret; + +out2: + iommu_unmap_range(domain, data->iova_addr, buffer->size); +out1: + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + buffer->size); + +out: + + return ret; +} + +void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data) +{ + unsigned int domain_num; + unsigned int partition_num; + struct iommu_domain *domain; + + BUG_ON(!msm_use_iommu()); + + domain_num = iommu_map_domain(data); + partition_num = iommu_map_partition(data); + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + WARN(1, "Could not get domain %d. Corruption?\n", domain_num); + return; + } + + iommu_unmap_range(domain, data->iova_addr, data->mapped_size); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); + + return; +} + +static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, + void *vaddr, unsigned int offset, unsigned int length, + unsigned int cmd) +{ + void (*outer_cache_op)(phys_addr_t, phys_addr_t); + struct ion_iommu_heap *iommu_heap = + container_of(heap, struct ion_iommu_heap, heap); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } + + if (iommu_heap->has_outer_cache) { + unsigned long pstart; + unsigned int i; + struct ion_iommu_priv_data *data = buffer->priv_virt; + if (!data) + return -ENOMEM; + + for (i = 0; i < data->nrpages; ++i) { + pstart = page_to_phys(data->pages[i]); + outer_cache_op(pstart, pstart + PAGE_SIZE); + } + } + return 0; +} + +static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_iommu_priv_data *data = buffer->priv_virt; + return data->iommu_sglist; +} + +static void ion_iommu_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops iommu_heap_ops = { + .allocate = ion_iommu_heap_allocate, + .free = ion_iommu_heap_free, + .map_user = ion_iommu_heap_map_user, + .map_kernel = ion_iommu_heap_map_kernel, + .unmap_kernel = ion_iommu_heap_unmap_kernel, + .map_iommu = ion_iommu_heap_map_iommu, + .unmap_iommu = ion_iommu_heap_unmap_iommu, + .cache_op = ion_iommu_cache_ops, + .map_dma = ion_iommu_heap_map_dma, + .unmap_dma = ion_iommu_heap_unmap_dma, +}; + +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_iommu_heap *iommu_heap; + + iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL); + if (!iommu_heap) + return ERR_PTR(-ENOMEM); + + iommu_heap->heap.ops = &iommu_heap_ops; + iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU; + iommu_heap->has_outer_cache = heap_data->has_outer_cache; + + return &iommu_heap->heap; +} + +void ion_iommu_heap_destroy(struct ion_heap *heap) +{ + struct ion_iommu_heap *iommu_heap = + container_of(heap, struct ion_iommu_heap, heap); + + kfree(iommu_heap); + iommu_heap = NULL; +} diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 0000000000000..17d4e5b67a7a4 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h @@ -0,0 +1,301 @@ +/* + * drivers/gpu/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include +#include +#include +#include +#include +#include + +struct ion_mapping; + +struct ion_dma_mapping { + struct kref ref; + struct scatterlist *sglist; +}; + +struct ion_kernel_mapping { + struct kref ref; + void *vaddr; +}; + +enum { + DI_PARTITION_NUM = 0, + DI_DOMAIN_NUM = 1, + DI_MAX, +}; + +/** + * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu + * @iova_addr - iommu virtual address + * @node - rb node to exist in the buffer's tree of iommu mappings + * @domain_info - contains the partition number and domain number + * domain_info[1] = domain number + * domain_info[0] = partition number + * @ref - for reference counting this mapping + * @mapped_size - size of the iova space mapped + * (may not be the same as the buffer size) + * @flags - iommu domain/partition specific flags. + * + * Represents a mapping of one ion buffer to a particular iommu domain + * and address range. There may exist other mappings of this buffer in + * different domains or address ranges. All mappings will have the same + * cacheability and security. + */ +struct ion_iommu_map { + unsigned long iova_addr; + struct rb_node node; + union { + int domain_info[DI_MAX]; + uint64_t key; + }; + struct ion_buffer *buffer; + struct kref ref; + int mapped_size; + unsigned long flags; +}; + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sglist: the scatterlist for the buffer is dmap_cnt is not zero +*/ +struct ion_buffer { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct scatterlist *sglist; + int umap_cnt; + unsigned int iommu_map_cnt; + struct rb_root iommu_maps; + int marked; +}; + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * @unmap_user unmap memory to userspace + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct scatterlist *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long flags); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma, unsigned long flags); + void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer); + int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer, + void *vaddr, unsigned int offset, + unsigned int length, unsigned int cmd); + int (*map_iommu)(struct ion_buffer *buffer, + struct ion_iommu_map *map_data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags); + void (*unmap_iommu)(struct ion_iommu_map *data); + int (*print_debug)(struct ion_heap *heap, struct seq_file *s); + int (*secure_heap)(struct ion_heap *heap); + int (*unsecure_heap)(struct ion_heap *heap); +}; + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct rb_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + int id; + const char *name; +}; + + + +#define iommu_map_domain(__m) ((__m)->domain_info[1]) +#define iommu_map_partition(__m) ((__m)->domain_info[0]) + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); +void ion_iommu_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *); +void ion_cp_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *); +void ion_reusable_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); + + +struct ion_heap *msm_get_contiguous_heap(void); +/** + * The carveout/cp heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 +#define ION_CP_ALLOCATE_FAIL -1 + +/** + * The reserved heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_RESERVED_ALLOCATE_FAIL -1 + +/** + * ion_map_fmem_buffer - map fmem allocated memory into the kernel + * @buffer - buffer to map + * @phys_base - physical base of the heap + * @virt_base - virtual base of the heap + * @flags - flags for the heap + * + * Map fmem allocated memory into the kernel address space. This + * is designed to be used by other heaps that need fmem behavior. + * The virtual range must be pre-allocated. + */ +void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, + void *virt_base, unsigned long flags); + +/** + * ion_do_cache_op - do cache operations. + * + * @client - pointer to ION client. + * @handle - pointer to buffer handle. + * @uaddr - virtual address to operate on. + * @offset - offset from physical address. + * @len - Length of data to do cache operation on. + * @cmd - Cache operation to perform: + * ION_IOC_CLEAN_CACHES + * ION_IOC_INV_CACHES + * ION_IOC_CLEAN_INV_CACHES + * + * Returns 0 on success + */ +int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *uaddr, unsigned long offset, unsigned long len, + unsigned int cmd); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 0000000000000..6b98eb2c8a5ed --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c @@ -0,0 +1,558 @@ +/* + * drivers/gpu/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" +#include +#include + +static atomic_t system_heap_allocated; +static atomic_t system_contig_heap_allocated; +static unsigned int system_heap_has_outer_cache; +static unsigned int system_heap_contig_has_outer_cache; + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = vmalloc_user(size); + if (!buffer->priv_virt) + return -ENOMEM; + + atomic_add(size, &system_heap_allocated); + return 0; +} + +void ion_system_heap_free(struct ion_buffer *buffer) +{ + vfree(buffer->priv_virt); + atomic_sub(buffer->size, &system_heap_allocated); +} + +struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + struct page *page; + int i; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + void *vaddr = buffer->priv_virt; + + sglist = vmalloc(npages * sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + memset(sglist, 0, npages * sizeof(struct scatterlist)); + sg_init_table(sglist, npages); + for (i = 0; i < npages; i++) { + page = vmalloc_to_page(vaddr); + if (!page) + goto end; + sg_set_page(&sglist[i], page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + /* XXX do cache maintenance for dma? */ + return sglist; +end: + vfree(sglist); + return NULL; +} + +void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + /* XXX undo cache maintenance for dma? */ + if (buffer->sglist) + vfree(buffer->sglist); +} + +void *ion_system_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long flags) +{ + if (ION_IS_CACHED(flags)) + return buffer->priv_virt; + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return ERR_PTR(-EINVAL); + } +} + +void ion_system_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +void ion_system_heap_unmap_iommu(struct ion_iommu_map *data) +{ + unsigned int domain_num; + unsigned int partition_num; + struct iommu_domain *domain; + + if (!msm_use_iommu()) + return; + + domain_num = iommu_map_domain(data); + partition_num = iommu_map_partition(data); + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + WARN(1, "Could not get domain %d. Corruption?\n", domain_num); + return; + } + + iommu_unmap_range(domain, data->iova_addr, data->mapped_size); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); + + return; +} + +int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma, unsigned long flags) +{ + if (ION_IS_CACHED(flags)) + return remap_vmalloc_range(vma, buffer->priv_virt, + vma->vm_pgoff); + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return -EINVAL; + } +} + +int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, + void *vaddr, unsigned int offset, unsigned int length, + unsigned int cmd) +{ + void (*outer_cache_op)(phys_addr_t, phys_addr_t); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } + + if (system_heap_has_outer_cache) { + unsigned long pstart; + void *vend; + void *vtemp; + unsigned long ln = 0; + vend = buffer->priv_virt + buffer->size; + vtemp = buffer->priv_virt + offset; + + if ((vtemp+length) > vend) { + pr_err("Trying to flush outside of mapped range.\n"); + pr_err("End of mapped range: %p, trying to flush to " + "address %p\n", vend, vtemp+length); + WARN(1, "%s: called with heap name %s, buffer size 0x%x, " + "vaddr 0x%p, offset 0x%x, length: 0x%x\n", + __func__, heap->name, buffer->size, vaddr, + offset, length); + return -EINVAL; + } + + for (; ln < length && vtemp < vend; + vtemp += PAGE_SIZE, ln += PAGE_SIZE) { + struct page *page = vmalloc_to_page(vtemp); + if (!page) { + WARN(1, "Could not find page for virt. address %p\n", + vtemp); + return -EINVAL; + } + pstart = page_to_phys(page); + /* + * If page -> phys is returning NULL, something + * has really gone wrong... + */ + if (!pstart) { + WARN(1, "Could not translate %p to physical address\n", + vtemp); + return -EINVAL; + } + + outer_cache_op(pstart, pstart + PAGE_SIZE); + } + } + return 0; +} + +static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s) +{ + seq_printf(s, "total bytes currently allocated: %lx\n", + (unsigned long) atomic_read(&system_heap_allocated)); + + return 0; +} + +int ion_system_heap_map_iommu(struct ion_buffer *buffer, + struct ion_iommu_map *data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags) +{ + int ret = 0, i; + struct iommu_domain *domain; + unsigned long extra; + unsigned long extra_iova_addr; + struct page *page; + int npages = buffer->size >> PAGE_SHIFT; + void *vaddr = buffer->priv_virt; + struct scatterlist *sglist = 0; + int prot = IOMMU_WRITE | IOMMU_READ; + prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; + + if (!ION_IS_CACHED(flags)) + return -EINVAL; + + if (!msm_use_iommu()) + return -EINVAL; + + data->mapped_size = iova_length; + extra = iova_length - buffer->size; + + data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, + data->mapped_size, align); + + if (!data->iova_addr) { + ret = -ENOMEM; + goto out; + } + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + ret = -ENOMEM; + goto out1; + } + + + sglist = vmalloc(sizeof(*sglist) * npages); + if (!sglist) { + ret = -ENOMEM; + goto out1; + } + + sg_init_table(sglist, npages); + for (i = 0; i < npages; i++) { + page = vmalloc_to_page(vaddr); + if (!page) + goto out1; + sg_set_page(&sglist[i], page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + + ret = iommu_map_range(domain, data->iova_addr, sglist, + buffer->size, prot); + + if (ret) { + pr_err("%s: could not map %lx in domain %p\n", + __func__, data->iova_addr, domain); + goto out1; + } + + extra_iova_addr = data->iova_addr + buffer->size; + if (extra) { + ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, + prot); + if (ret) + goto out2; + } + vfree(sglist); + return ret; + +out2: + iommu_unmap_range(domain, data->iova_addr, buffer->size); +out1: + vfree(sglist); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); +out: + return ret; +} + +static struct ion_heap_ops vmalloc_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_heap_map_user, + .cache_op = ion_system_heap_cache_ops, + .print_debug = ion_system_print_debug, + .map_iommu = ion_system_heap_map_iommu, + .unmap_iommu = ion_system_heap_unmap_iommu, +}; + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &vmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM; + system_heap_has_outer_cache = pheap->has_outer_cache; + return heap; +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = kzalloc(len, GFP_KERNEL); + if (!buffer->priv_virt) + return -ENOMEM; + atomic_add(len, &system_contig_heap_allocated); + return 0; +} + +void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + kfree(buffer->priv_virt); + atomic_sub(buffer->size, &system_contig_heap_allocated); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = virt_to_phys(buffer->priv_virt); + *len = buffer->size; + return 0; +} + +struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + sg_init_table(sglist, 1); + sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); + return sglist; +} + +int ion_system_contig_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma, + unsigned long flags) +{ + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); + + if (ION_IS_CACHED(flags)) + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + else { + pr_err("%s: cannot map system heap uncached\n", __func__); + return -EINVAL; + } +} + +int ion_system_contig_heap_cache_ops(struct ion_heap *heap, + struct ion_buffer *buffer, void *vaddr, + unsigned int offset, unsigned int length, + unsigned int cmd) +{ + void (*outer_cache_op)(phys_addr_t, phys_addr_t); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + outer_cache_op = outer_clean_range; + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + outer_cache_op = outer_inv_range; + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + outer_cache_op = outer_flush_range; + break; + default: + return -EINVAL; + } + + if (system_heap_contig_has_outer_cache) { + unsigned long pstart; + + pstart = virt_to_phys(buffer->priv_virt) + offset; + if (!pstart) { + WARN(1, "Could not do virt to phys translation on %p\n", + buffer->priv_virt); + return -EINVAL; + } + + outer_cache_op(pstart, pstart + PAGE_SIZE); + } + + return 0; +} + +static int ion_system_contig_print_debug(struct ion_heap *heap, + struct seq_file *s) +{ + seq_printf(s, "total bytes currently allocated: %lx\n", + (unsigned long) atomic_read(&system_contig_heap_allocated)); + + return 0; +} + +int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer, + struct ion_iommu_map *data, + unsigned int domain_num, + unsigned int partition_num, + unsigned long align, + unsigned long iova_length, + unsigned long flags) +{ + int ret = 0; + struct iommu_domain *domain; + unsigned long extra; + struct scatterlist *sglist = 0; + struct page *page = 0; + int prot = IOMMU_WRITE | IOMMU_READ; + prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0; + + if (!ION_IS_CACHED(flags)) + return -EINVAL; + + if (!msm_use_iommu()) { + data->iova_addr = virt_to_phys(buffer->vaddr); + return 0; + } + + data->mapped_size = iova_length; + extra = iova_length - buffer->size; + + data->iova_addr = msm_allocate_iova_address(domain_num, partition_num, + data->mapped_size, align); + + if (!data->iova_addr) { + ret = -ENOMEM; + goto out; + } + + domain = msm_get_iommu_domain(domain_num); + + if (!domain) { + ret = -ENOMEM; + goto out1; + } + page = virt_to_page(buffer->vaddr); + + sglist = vmalloc(sizeof(*sglist)); + if (!sglist) + goto out1; + + sg_init_table(sglist, 1); + sg_set_page(sglist, page, buffer->size, 0); + + ret = iommu_map_range(domain, data->iova_addr, sglist, + buffer->size, prot); + if (ret) { + pr_err("%s: could not map %lx in domain %p\n", + __func__, data->iova_addr, domain); + goto out1; + } + + if (extra) { + unsigned long extra_iova_addr = data->iova_addr + buffer->size; + ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K, + prot); + if (ret) + goto out2; + } + vfree(sglist); + return ret; +out2: + iommu_unmap_range(domain, data->iova_addr, buffer->size); + +out1: + vfree(sglist); + msm_free_iova_address(data->iova_addr, domain_num, partition_num, + data->mapped_size); +out: + return ret; +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_contig_heap_map_user, + .cache_op = ion_system_contig_heap_cache_ops, + .print_debug = ion_system_contig_print_debug, + .map_iommu = ion_system_contig_heap_map_iommu, + .unmap_iommu = ion_system_heap_unmap_iommu, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + system_heap_contig_has_outer_cache = pheap->has_outer_cache; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 0000000000000..692458e07b5e8 --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c @@ -0,0 +1,114 @@ +/* + * drivers/gpu/ion/ion_system_mapper.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" +/* + * This mapper is valid for any heap that allocates memory that already has + * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, + * pages obtained via io_remap, etc. + */ +static void *ion_kernel_mapper_map(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping **mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to map an unsupported heap\n", __func__); + return ERR_PTR(-EINVAL); + } + /* XXX REVISIT ME!!! */ + *((unsigned long *)mapping) = (unsigned long)buffer->priv; + return buffer->priv; +} + +static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); +} + +static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); + return ERR_PTR(-EINVAL); + } + return buffer->priv; +} + +static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma, + struct ion_mapping *mapping) +{ + int ret; + + switch (buffer->heap->type) { + case ION_HEAP_KMALLOC: + { + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); + ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + break; + } + case ION_HEAP_VMALLOC: + ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); + break; + default: + pr_err("%s: attempting to map unsupported heap to userspace\n", + __func__); + return -EINVAL; + } + + return ret; +} + +static struct ion_mapper_ops ops = { + .map = ion_kernel_mapper_map, + .map_kernel = ion_kernel_mapper_map_kernel, + .map_user = ion_kernel_mapper_map_user, + .unmap = ion_kernel_mapper_unmap, +}; + +struct ion_mapper *ion_system_mapper_create(void) +{ + struct ion_mapper *mapper; + mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); + if (!mapper) + return ERR_PTR(-ENOMEM); + mapper->type = ION_SYSTEM_MAPPER; + mapper->ops = &ops; + mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); + return mapper; +} + +void ion_system_mapper_destroy(struct ion_mapper *mapper) +{ + kfree(mapper); +} + diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile new file mode 100644 index 0000000000000..bedd8d22779e1 --- /dev/null +++ b/drivers/gpu/ion/msm/Makefile @@ -0,0 +1 @@ +obj-y += msm_ion.o diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c new file mode 100644 index 0000000000000..0a496ddf56e1d --- /dev/null +++ b/drivers/gpu/ion/msm/msm_ion.c @@ -0,0 +1,404 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +struct ion_client *msm_ion_client_create(unsigned int heap_mask, + const char *name) +{ + return ion_client_create(idev, heap_mask, name); +} +EXPORT_SYMBOL(msm_ion_client_create); + +int msm_ion_secure_heap(int heap_id) +{ + return ion_secure_heap(idev, heap_id); +} +EXPORT_SYMBOL(msm_ion_secure_heap); + +int msm_ion_unsecure_heap(int heap_id) +{ + return ion_unsecure_heap(idev, heap_id); +} +EXPORT_SYMBOL(msm_ion_unsecure_heap); + +int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *vaddr, unsigned long len, unsigned int cmd) +{ + return ion_do_cache_op(client, handle, vaddr, 0, len, cmd); +} +EXPORT_SYMBOL(msm_ion_do_cache_op); + +static unsigned long msm_ion_get_base(unsigned long size, int memory_type, + unsigned int align) +{ + switch (memory_type) { + case ION_EBI_TYPE: + return allocate_contiguous_ebi_nomap(size, align); + break; + case ION_SMI_TYPE: + return allocate_contiguous_memory_nomap(size, MEMTYPE_SMI, + align); + break; + default: + pr_err("%s: Unknown memory type %d\n", __func__, memory_type); + return 0; + } +} + +static struct ion_platform_heap *find_heap(const struct ion_platform_heap + heap_data[], + unsigned int nr_heaps, + int heap_id) +{ + unsigned int i; + for (i = 0; i < nr_heaps; ++i) { + const struct ion_platform_heap *heap = &heap_data[i]; + if (heap->id == heap_id) + return (struct ion_platform_heap *) heap; + } + return 0; +} + +static void ion_set_base_address(struct ion_platform_heap *heap, + struct ion_platform_heap *shared_heap, + struct ion_co_heap_pdata *co_heap_data, + struct ion_cp_heap_pdata *cp_data) +{ + if (cp_data->reusable) { + const struct fmem_data *fmem_info = fmem_get_info(); + + if (!fmem_info) { + pr_err("fmem info pointer NULL!\n"); + BUG(); + } + + heap->base = fmem_info->phys - fmem_info->reserved_size_low; + cp_data->virt_addr = fmem_info->virt; + pr_info("ION heap %s using FMEM\n", shared_heap->name); + } else { + heap->base = msm_ion_get_base(heap->size + shared_heap->size, + shared_heap->memory_type, + co_heap_data->align); + } + if (heap->base) { + shared_heap->base = heap->base + heap->size; + cp_data->secure_base = heap->base; + cp_data->secure_size = heap->size + shared_heap->size; + } else { + pr_err("%s: could not get memory for heap %s (id %x)\n", + __func__, heap->name, heap->id); + } +} + +static void allocate_co_memory(struct ion_platform_heap *heap, + struct ion_platform_heap heap_data[], + unsigned int nr_heaps) +{ + struct ion_co_heap_pdata *co_heap_data = + (struct ion_co_heap_pdata *) heap->extra_data; + + if (co_heap_data->adjacent_mem_id != INVALID_HEAP_ID) { + struct ion_platform_heap *shared_heap = + find_heap(heap_data, nr_heaps, + co_heap_data->adjacent_mem_id); + if (shared_heap) { + struct ion_cp_heap_pdata *cp_data = + (struct ion_cp_heap_pdata *) shared_heap->extra_data; + if (cp_data->fixed_position == FIXED_MIDDLE) { + const struct fmem_data *fmem_info = + fmem_get_info(); + + if (!fmem_info) { + pr_err("fmem info pointer NULL!\n"); + BUG(); + } + + cp_data->virt_addr = fmem_info->virt; + cp_data->secure_base = heap->base; + cp_data->secure_size = + heap->size + shared_heap->size; + } else if (!heap->base) { + ion_set_base_address(heap, shared_heap, + co_heap_data, cp_data); + } + } + } +} + +/* Fixup heaps in board file to support two heaps being adjacent to each other. + * A flag (adjacent_mem_id) in the platform data tells us that the heap phy + * memory location must be adjacent to the specified heap. We do this by + * carving out memory for both heaps and then splitting up the memory to the + * two heaps. The heap specifying the "adjacent_mem_id" get the base of the + * memory while heap specified in "adjacent_mem_id" get base+size as its + * base address. + * Note: Modifies platform data and allocates memory. + */ +static void msm_ion_heap_fixup(struct ion_platform_heap heap_data[], + unsigned int nr_heaps) +{ + unsigned int i; + + for (i = 0; i < nr_heaps; i++) { + struct ion_platform_heap *heap = &heap_data[i]; + if (heap->type == ION_HEAP_TYPE_CARVEOUT) { + if (heap->extra_data) + allocate_co_memory(heap, heap_data, nr_heaps); + } + } +} + +static void msm_ion_allocate(struct ion_platform_heap *heap) +{ + + if (!heap->base && heap->extra_data) { + unsigned int align = 0; + switch (heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + align = + ((struct ion_co_heap_pdata *) heap->extra_data)->align; + break; + case ION_HEAP_TYPE_CP: + { + struct ion_cp_heap_pdata *data = + (struct ion_cp_heap_pdata *) + heap->extra_data; + if (data->reusable) { + const struct fmem_data *fmem_info = + fmem_get_info(); + heap->base = fmem_info->phys; + data->virt_addr = fmem_info->virt; + pr_info("ION heap %s using FMEM\n", heap->name); + } else if (data->mem_is_fmem) { + const struct fmem_data *fmem_info = + fmem_get_info(); + heap->base = fmem_info->phys + fmem_info->size; + } + align = data->align; + break; + } + default: + break; + } + if (align && !heap->base) { + heap->base = msm_ion_get_base(heap->size, + heap->memory_type, + align); + if (!heap->base) + pr_err("%s: could not get memory for heap %s " + "(id %x)\n", __func__, heap->name, heap->id); + } + } +} + +static int check_vaddr_bounds(unsigned long start, unsigned long end) +{ + struct mm_struct *mm = current->active_mm; + struct vm_area_struct *vma; + int ret = 1; + + if (end < start) + goto out; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, start); + if (vma && vma->vm_start < end) { + if (start < vma->vm_start) + goto out_up; + if (end > vma->vm_end) + goto out_up; + ret = 0; + } + +out_up: + up_read(&mm->mmap_sem); +out: + return ret; +} + +static long msm_ion_custom_ioctl(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + case ION_IOC_INV_CACHES: + case ION_IOC_CLEAN_INV_CACHES: + { + struct ion_flush_data data; + unsigned long start, end; + struct ion_handle *handle = NULL; + int ret; + + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_flush_data))) + return -EFAULT; + + start = (unsigned long) data.vaddr; + end = (unsigned long) data.vaddr + data.length; + + if (check_vaddr_bounds(start, end)) { + pr_err("%s: virtual address %p is out of bounds\n", + __func__, data.vaddr); + return -EINVAL; + } + + if (!data.handle) { + handle = ion_import_fd(client, data.fd); + if (IS_ERR_OR_NULL(handle)) { + pr_info("%s: Could not import handle: %d\n", + __func__, (int)handle); + return -EINVAL; + } + } + + ret = ion_do_cache_op(client, + data.handle ? data.handle : handle, + data.vaddr, data.offset, data.length, + cmd); + + if (!data.handle) + ion_free(client, handle); + + if (ret < 0) + return ret; + + break; + + } + case ION_IOC_GET_FLAGS: + { + struct ion_flag_data data; + int ret; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_flag_data))) + return -EFAULT; + + ret = ion_handle_get_flags(client, data.handle, &data.flags); + if (ret < 0) + return ret; + if (copy_to_user((void __user *)arg, &data, + sizeof(struct ion_flag_data))) + return -EFAULT; + break; + } + default: + return -ENOTTY; + } + return 0; +} + +static int msm_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL); + + if (!heaps) { + err = -ENOMEM; + goto out; + } + + idev = ion_device_create(msm_ion_custom_ioctl); + if (IS_ERR_OR_NULL(idev)) { + err = PTR_ERR(idev); + goto freeheaps; + } + + msm_ion_heap_fixup(pdata->heaps, num_heaps); + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + msm_ion_allocate(heap_data); + + heap_data->has_outer_cache = pdata->has_outer_cache; + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + heaps[i] = 0; + continue; + } else { + if (heap_data->size) + pr_info("ION heap %s created at %lx " + "with size %x\n", heap_data->name, + heap_data->base, + heap_data->size); + else + pr_info("ION heap %s created\n", + heap_data->name); + } + + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; + +freeheaps: + kfree(heaps); +out: + return err; +} + +static int msm_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + + ion_device_destroy(idev); + kfree(heaps); + return 0; +} + +static struct platform_driver msm_ion_driver = { + .probe = msm_ion_probe, + .remove = msm_ion_remove, + .driver = { .name = "ion-msm" } +}; + +static int __init msm_ion_init(void) +{ + return platform_driver_register(&msm_ion_driver); +} + +static void __exit msm_ion_exit(void) +{ + platform_driver_unregister(&msm_ion_driver); +} + +subsys_initcall(msm_ion_init); +module_exit(msm_ion_exit); + diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 0000000000000..11cd003fb08f0 --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 0000000000000..7af6e168ff4cf --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c @@ -0,0 +1,96 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include "../ion_priv.h" + +struct ion_device *idev; +struct ion_mapper *tegra_user_mapper; +int num_heaps; +struct ion_heap **heaps; + +int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +static int __init ion_init(void) +{ + return platform_driver_register(&ion_driver); +} + +static void __exit ion_exit(void) +{ + platform_driver_unregister(&ion_driver); +} + +module_init(ion_init); +module_exit(ion_exit); + diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig new file mode 100644 index 0000000000000..ba63fbcbbb408 --- /dev/null +++ b/drivers/gpu/msm/Kconfig @@ -0,0 +1,98 @@ +config MSM_KGSL + tristate "MSM 3D Graphics driver" + default n + depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25 + select GENERIC_ALLOCATOR + select FW_LOADER + ---help--- + 3D graphics driver. Required to use hardware accelerated + OpenGL ES 2.0 and 1.1. + +config MSM_KGSL_CFF_DUMP + bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]" + default n + depends on MSM_KGSL + select RELAY + ---help--- + This is an analysis and diagnostic feature only, and should only be + turned on during KGSL GPU diagnostics and will slow down the KGSL + performance sigificantly, hence *do not use in production builds*. + When enabled, CFF Dump is on at boot. It can be turned off at runtime + via 'echo 0 > /d/kgsl/cff_dump'. The log can be captured via + /d/kgsl-cff/cpu[0|1]. + +config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP + bool "When selected will disable KGSL CFF Dump for context switches" + default n + depends on MSM_KGSL_CFF_DUMP + ---help--- + Dumping all the memory for every context switch can produce quite + huge log files, to reduce this, turn this feature on. + +config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL + bool "Disable human readable CP_STAT fields in post-mortem dump" + default n + depends on MSM_KGSL + ---help--- + For a more compact kernel log the human readable output of + CP_STAT can be turned off with this option. + +config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP + bool "Disable dumping current IB1 and IB2 in post-mortem dump" + default n + depends on MSM_KGSL + ---help--- + For a more compact kernel log the IB1 and IB2 embedded dump + can be turned off with this option. Some IB dumps take up + so much space that vital other information gets cut from the + post-mortem dump. + +config MSM_KGSL_PSTMRTMDMP_RB_HEX + bool "Use hex version for ring-buffer in post-mortem dump" + default n + depends on MSM_KGSL + ---help--- + Use hex version for the ring-buffer in the post-mortem dump, instead + of the human readable version. + +config MSM_KGSL_2D + tristate "MSM 2D graphics driver. Required for OpenVG" + default y + depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A) + +config MSM_KGSL_DRM + bool "Build a DRM interface for the MSM_KGSL driver" + depends on MSM_KGSL && DRM + +config KGSL_PER_PROCESS_PAGE_TABLE + bool "Enable Per Process page tables for the KGSL driver" + default n + depends on !MSM_KGSL_DRM + ---help--- + The MMU will use per process pagetables when enabled. + +config MSM_KGSL_PAGE_TABLE_SIZE + hex "Size of pagetables" + default 0xFFF0000 + ---help--- + Sets the pagetable size used by the MMU. The max value + is 0xFFF0000 or (256M - 64K). + +config MSM_KGSL_PAGE_TABLE_COUNT + int "Minimum of concurrent pagetables to support" + default 8 + depends on KGSL_PER_PROCESS_PAGE_TABLE + ---help--- + Specify the number of pagetables to allocate at init time + This is the number of concurrent processes that are guaranteed to + to run at any time. Additional processes can be created dynamically + assuming there is enough contiguous memory to allocate the pagetable. + +config MSM_KGSL_MMU_PAGE_FAULT + bool "Force the GPU MMU to page fault for unmapped regions" + default y + +config MSM_KGSL_DISABLE_SHADOW_WRITES + bool "Disable register shadow writes for context switches" + default n + depends on MSM_KGSL diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile new file mode 100644 index 0000000000000..65774c34ae19e --- /dev/null +++ b/drivers/gpu/msm/Makefile @@ -0,0 +1,43 @@ +ccflags-y := -Iinclude/drm -Idrivers/gpu/msm + +msm_kgsl_core-y = \ + kgsl.o \ + kgsl_trace.o \ + kgsl_sharedmem.o \ + kgsl_pwrctrl.o \ + kgsl_pwrscale.o \ + kgsl_mmu.o \ + kgsl_gpummu.o \ + kgsl_iommu.o \ + kgsl_snapshot.o + +msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o +msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o +msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o +msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o +msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o +msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o + +msm_adreno-y += \ + adreno_ringbuffer.o \ + adreno_drawctxt.o \ + adreno_postmortem.o \ + adreno_snapshot.o \ + adreno_a2xx.o \ + adreno_a2xx_trace.o \ + adreno_a2xx_snapshot.o \ + adreno.o + +msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o + +msm_z180-y += \ + z180.o \ + z180_trace.o + +msm_kgsl_core-objs = $(msm_kgsl_core-y) +msm_adreno-objs = $(msm_adreno-y) +msm_z180-objs = $(msm_z180-y) + +obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o +obj-$(CONFIG_MSM_KGSL) += msm_adreno.o +obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h new file mode 100644 index 0000000000000..5a811d5fafd6e --- /dev/null +++ b/drivers/gpu/msm/a2xx_reg.h @@ -0,0 +1,452 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __A200_REG_H +#define __A200_REG_H + +enum VGT_EVENT_TYPE { + VS_DEALLOC = 0, + PS_DEALLOC = 1, + VS_DONE_TS = 2, + PS_DONE_TS = 3, + CACHE_FLUSH_TS = 4, + CONTEXT_DONE = 5, + CACHE_FLUSH = 6, + VIZQUERY_START = 7, + VIZQUERY_END = 8, + SC_WAIT_WC = 9, + RST_PIX_CNT = 13, + RST_VTX_CNT = 14, + TILE_FLUSH = 15, + CACHE_FLUSH_AND_INV_TS_EVENT = 20, + ZPASS_DONE = 21, + CACHE_FLUSH_AND_INV_EVENT = 22, + PERFCOUNTER_START = 23, + PERFCOUNTER_STOP = 24, + VS_FETCH_DONE = 27, + FACENESS_FLUSH = 28, +}; + +enum COLORFORMATX { + COLORX_4_4_4_4 = 0, + COLORX_1_5_5_5 = 1, + COLORX_5_6_5 = 2, + COLORX_8 = 3, + COLORX_8_8 = 4, + COLORX_8_8_8_8 = 5, + COLORX_S8_8_8_8 = 6, + COLORX_16_FLOAT = 7, + COLORX_16_16_FLOAT = 8, + COLORX_16_16_16_16_FLOAT = 9, + COLORX_32_FLOAT = 10, + COLORX_32_32_FLOAT = 11, + COLORX_32_32_32_32_FLOAT = 12, + COLORX_2_3_3 = 13, + COLORX_8_8_8 = 14, +}; + +enum SURFACEFORMAT { + FMT_1_REVERSE = 0, + FMT_1 = 1, + FMT_8 = 2, + FMT_1_5_5_5 = 3, + FMT_5_6_5 = 4, + FMT_6_5_5 = 5, + FMT_8_8_8_8 = 6, + FMT_2_10_10_10 = 7, + FMT_8_A = 8, + FMT_8_B = 9, + FMT_8_8 = 10, + FMT_Cr_Y1_Cb_Y0 = 11, + FMT_Y1_Cr_Y0_Cb = 12, + FMT_5_5_5_1 = 13, + FMT_8_8_8_8_A = 14, + FMT_4_4_4_4 = 15, + FMT_10_11_11 = 16, + FMT_11_11_10 = 17, + FMT_DXT1 = 18, + FMT_DXT2_3 = 19, + FMT_DXT4_5 = 20, + FMT_24_8 = 22, + FMT_24_8_FLOAT = 23, + FMT_16 = 24, + FMT_16_16 = 25, + FMT_16_16_16_16 = 26, + FMT_16_EXPAND = 27, + FMT_16_16_EXPAND = 28, + FMT_16_16_16_16_EXPAND = 29, + FMT_16_FLOAT = 30, + FMT_16_16_FLOAT = 31, + FMT_16_16_16_16_FLOAT = 32, + FMT_32 = 33, + FMT_32_32 = 34, + FMT_32_32_32_32 = 35, + FMT_32_FLOAT = 36, + FMT_32_32_FLOAT = 37, + FMT_32_32_32_32_FLOAT = 38, + FMT_32_AS_8 = 39, + FMT_32_AS_8_8 = 40, + FMT_16_MPEG = 41, + FMT_16_16_MPEG = 42, + FMT_8_INTERLACED = 43, + FMT_32_AS_8_INTERLACED = 44, + FMT_32_AS_8_8_INTERLACED = 45, + FMT_16_INTERLACED = 46, + FMT_16_MPEG_INTERLACED = 47, + FMT_16_16_MPEG_INTERLACED = 48, + FMT_DXN = 49, + FMT_8_8_8_8_AS_16_16_16_16 = 50, + FMT_DXT1_AS_16_16_16_16 = 51, + FMT_DXT2_3_AS_16_16_16_16 = 52, + FMT_DXT4_5_AS_16_16_16_16 = 53, + FMT_2_10_10_10_AS_16_16_16_16 = 54, + FMT_10_11_11_AS_16_16_16_16 = 55, + FMT_11_11_10_AS_16_16_16_16 = 56, + FMT_32_32_32_FLOAT = 57, + FMT_DXT3A = 58, + FMT_DXT5A = 59, + FMT_CTX1 = 60, + FMT_DXT3A_AS_1_1_1_1 = 61 +}; + +#define REG_PERF_MODE_CNT 0x0 +#define REG_PERF_STATE_RESET 0x0 +#define REG_PERF_STATE_ENABLE 0x1 +#define REG_PERF_STATE_FREEZE 0x2 + +#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4 +#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2 +#define RB_EDRAM_INFO_UNUSED0_SIZE 8 +#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18 + +struct rb_edram_info_t { + unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE; + unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE; + unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE; + unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE; +}; + +union reg_rb_edram_info { + unsigned int val; + struct rb_edram_info_t f; +}; + +#define RBBM_READ_ERROR_UNUSED0_SIZE 2 +#define RBBM_READ_ERROR_READ_ADDRESS_SIZE 15 +#define RBBM_READ_ERROR_UNUSED1_SIZE 13 +#define RBBM_READ_ERROR_READ_REQUESTER_SIZE 1 +#define RBBM_READ_ERROR_READ_ERROR_SIZE 1 + +struct rbbm_read_error_t { + unsigned int unused0:RBBM_READ_ERROR_UNUSED0_SIZE; + unsigned int read_address:RBBM_READ_ERROR_READ_ADDRESS_SIZE; + unsigned int unused1:RBBM_READ_ERROR_UNUSED1_SIZE; + unsigned int read_requester:RBBM_READ_ERROR_READ_REQUESTER_SIZE; + unsigned int read_error:RBBM_READ_ERROR_READ_ERROR_SIZE; +}; + +union rbbm_read_error_u { + unsigned int val:32; + struct rbbm_read_error_t f; +}; + +#define CP_RB_CNTL_RB_BUFSZ_SIZE 6 +#define CP_RB_CNTL_UNUSED0_SIZE 2 +#define CP_RB_CNTL_RB_BLKSZ_SIZE 6 +#define CP_RB_CNTL_UNUSED1_SIZE 2 +#define CP_RB_CNTL_BUF_SWAP_SIZE 2 +#define CP_RB_CNTL_UNUSED2_SIZE 2 +#define CP_RB_CNTL_RB_POLL_EN_SIZE 1 +#define CP_RB_CNTL_UNUSED3_SIZE 6 +#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1 +#define CP_RB_CNTL_UNUSED4_SIZE 3 +#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1 + +struct cp_rb_cntl_t { + unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE; + unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE; + unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE; + unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE; + unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE; + unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE; + unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE; + unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE; + unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE; + unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE; + unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE; +}; + +union reg_cp_rb_cntl { + unsigned int val:32; + struct cp_rb_cntl_t f; +}; + +#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL +#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004 + + +#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L +#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L + +#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L +#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L +#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L + +#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL +#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L +#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L +#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L +#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L +#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L +#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L +#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L +#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L +#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L +#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L +#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L +#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L +#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L +#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L +#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L +#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L +#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L +#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L +#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L + +#define CP_INT_CNTL__SW_INT_MASK 0x00080000L +#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L +#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L +#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L +#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L +#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L +#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L +#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L +#define CP_INT_CNTL__RB_INT_MASK 0x80000000L + +#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L +#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L +#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L +#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L + +#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL +#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L + +#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006 +#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007 +#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008 +#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009 +#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a +#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d +#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e +#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f +#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010 +#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016 +#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017 +#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018 +#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019 +#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a + +#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000 +#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008 +#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014 +#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b + +#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000 +#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004 +#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e + +#define REG_CP_CSQ_IB1_STAT 0x01FE +#define REG_CP_CSQ_IB2_STAT 0x01FF +#define REG_CP_CSQ_RB_STAT 0x01FD +#define REG_CP_DEBUG 0x01FC +#define REG_CP_IB1_BASE 0x0458 +#define REG_CP_IB1_BUFSZ 0x0459 +#define REG_CP_IB2_BASE 0x045A +#define REG_CP_IB2_BUFSZ 0x045B +#define REG_CP_INT_ACK 0x01F4 +#define REG_CP_INT_CNTL 0x01F2 +#define REG_CP_INT_STATUS 0x01F3 +#define REG_CP_ME_CNTL 0x01F6 +#define REG_CP_ME_RAM_DATA 0x01FA +#define REG_CP_ME_RAM_WADDR 0x01F8 +#define REG_CP_ME_STATUS 0x01F7 +#define REG_CP_PFP_UCODE_ADDR 0x00C0 +#define REG_CP_PFP_UCODE_DATA 0x00C1 +#define REG_CP_QUEUE_THRESHOLDS 0x01D5 +#define REG_CP_RB_BASE 0x01C0 +#define REG_CP_RB_CNTL 0x01C1 +#define REG_CP_RB_RPTR 0x01C4 +#define REG_CP_RB_RPTR_ADDR 0x01C3 +#define REG_CP_RB_RPTR_WR 0x01C7 +#define REG_CP_RB_WPTR 0x01C5 +#define REG_CP_RB_WPTR_BASE 0x01C8 +#define REG_CP_RB_WPTR_DELAY 0x01C6 +#define REG_CP_STAT 0x047F +#define REG_CP_STATE_DEBUG_DATA 0x01ED +#define REG_CP_STATE_DEBUG_INDEX 0x01EC +#define REG_CP_ST_BASE 0x044D +#define REG_CP_ST_BUFSZ 0x044E + +#define REG_CP_PERFMON_CNTL 0x0444 +#define REG_CP_PERFCOUNTER_SELECT 0x0445 +#define REG_CP_PERFCOUNTER_LO 0x0446 +#define REG_CP_PERFCOUNTER_HI 0x0447 + +#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395 +#define REG_RBBM_PERFCOUNTER1_HI 0x0398 +#define REG_RBBM_PERFCOUNTER1_LO 0x0397 + +#define REG_MASTER_INT_SIGNAL 0x03B7 + +#define REG_PA_CL_VPORT_XSCALE 0x210F +#define REG_PA_CL_VPORT_ZOFFSET 0x2114 +#define REG_PA_CL_VPORT_ZSCALE 0x2113 +#define REG_PA_CL_CLIP_CNTL 0x2204 +#define REG_PA_CL_VTE_CNTL 0x2206 +#define REG_PA_SC_AA_MASK 0x2312 +#define REG_PA_SC_LINE_CNTL 0x2300 +#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F +#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E +#define REG_PA_SC_VIZ_QUERY 0x2293 +#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44 +#define REG_PA_SC_WINDOW_OFFSET 0x2080 +#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082 +#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081 +#define REG_PA_SU_FACE_DATA 0x0C86 +#define REG_PA_SU_POINT_SIZE 0x2280 +#define REG_PA_SU_LINE_CNTL 0x2282 +#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383 +#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380 +#define REG_PA_SU_SC_MODE_CNTL 0x2205 + +#define REG_PC_INDEX_OFFSET 0x2102 + +#define REG_RBBM_CNTL 0x003B +#define REG_RBBM_INT_ACK 0x03B6 +#define REG_RBBM_INT_CNTL 0x03B4 +#define REG_RBBM_INT_STATUS 0x03B5 +#define REG_RBBM_PATCH_RELEASE 0x0001 +#define REG_RBBM_PERIPHID1 0x03F9 +#define REG_RBBM_PERIPHID2 0x03FA +#define REG_RBBM_DEBUG 0x039B +#define REG_RBBM_DEBUG_OUT 0x03A0 +#define REG_RBBM_DEBUG_CNTL 0x03A1 +#define REG_RBBM_PM_OVERRIDE1 0x039C +#define REG_RBBM_PM_OVERRIDE2 0x039D +#define REG_RBBM_READ_ERROR 0x03B3 +#define REG_RBBM_SOFT_RESET 0x003C +#define REG_RBBM_STATUS 0x05D0 + +#define REG_RB_COLORCONTROL 0x2202 +#define REG_RB_COLOR_DEST_MASK 0x2326 +#define REG_RB_COLOR_MASK 0x2104 +#define REG_RB_COPY_CONTROL 0x2318 +#define REG_RB_DEPTHCONTROL 0x2200 +#define REG_RB_EDRAM_INFO 0x0F02 +#define REG_RB_MODECONTROL 0x2208 +#define REG_RB_SURFACE_INFO 0x2000 +#define REG_RB_SAMPLE_POS 0x220a + +#define REG_SCRATCH_ADDR 0x01DD +#define REG_SCRATCH_REG0 0x0578 +#define REG_SCRATCH_REG2 0x057A +#define REG_SCRATCH_UMSK 0x01DC + +#define REG_SQ_CF_BOOLEANS 0x4900 +#define REG_SQ_CF_LOOP 0x4908 +#define REG_SQ_GPR_MANAGEMENT 0x0D00 +#define REG_SQ_FLOW_CONTROL 0x0D01 +#define REG_SQ_INST_STORE_MANAGMENT 0x0D02 +#define REG_SQ_INT_ACK 0x0D36 +#define REG_SQ_INT_CNTL 0x0D34 +#define REG_SQ_INT_STATUS 0x0D35 +#define REG_SQ_PROGRAM_CNTL 0x2180 +#define REG_SQ_PS_PROGRAM 0x21F6 +#define REG_SQ_VS_PROGRAM 0x21F7 +#define REG_SQ_WRAPPING_0 0x2183 +#define REG_SQ_WRAPPING_1 0x2184 + +#define REG_VGT_ENHANCE 0x2294 +#define REG_VGT_INDX_OFFSET 0x2102 +#define REG_VGT_MAX_VTX_INDX 0x2100 +#define REG_VGT_MIN_VTX_INDX 0x2101 + +#define REG_TP0_CHICKEN 0x0E1E +#define REG_TC_CNTL_STATUS 0x0E00 +#define REG_PA_SC_AA_CONFIG 0x2301 +#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316 +#define REG_SQ_INTERPOLATOR_CNTL 0x2182 +#define REG_RB_DEPTH_INFO 0x2002 +#define REG_COHER_DEST_BASE_0 0x2006 +#define REG_RB_FOG_COLOR 0x2109 +#define REG_RB_STENCILREFMASK_BF 0x210C +#define REG_PA_SC_LINE_STIPPLE 0x2283 +#define REG_SQ_PS_CONST 0x2308 +#define REG_RB_DEPTH_CLEAR 0x231D +#define REG_RB_SAMPLE_COUNT_CTL 0x2324 +#define REG_SQ_CONSTANT_0 0x4000 +#define REG_SQ_FETCH_0 0x4800 + +#define REG_COHER_BASE_PM4 0xA2A +#define REG_COHER_STATUS_PM4 0xA2B +#define REG_COHER_SIZE_PM4 0xA29 + +/*registers added in adreno220*/ +#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET +#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL +#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX +#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209 +#define REG_A220_GRAS_CONTROL 0x2210 +#define REG_A220_VSC_BIN_SIZE 0x0C01 +#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D + +/*registers added in adreno225*/ +#define REG_A225_RB_COLOR_INFO3 0x2005 +#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103 +#define REG_A225_GRAS_UCP0X 0x2340 +#define REG_A225_GRAS_UCP5W 0x2357 +#define REG_A225_GRAS_UCP_ENABLED 0x2360 + +/* Debug registers used by snapshot */ +#define REG_PA_SU_DEBUG_CNTL 0x0C80 +#define REG_PA_SU_DEBUG_DATA 0x0C81 +#define REG_RB_DEBUG_CNTL 0x0F26 +#define REG_RB_DEBUG_DATA 0x0F27 +#define REG_PC_DEBUG_CNTL 0x0C38 +#define REG_PC_DEBUG_DATA 0x0C39 +#define REG_GRAS_DEBUG_CNTL 0x0C80 +#define REG_GRAS_DEBUG_DATA 0x0C81 +#define REG_SQ_DEBUG_MISC 0x0D05 +#define REG_SQ_DEBUG_INPUT_FSM 0x0DAE +#define REG_SQ_DEBUG_CONST_MGR_FSM 0x0DAF +#define REG_SQ_DEBUG_EXP_ALLOC 0x0DB3 +#define REG_SQ_DEBUG_FSM_ALU_0 0x0DB1 +#define REG_SQ_DEBUG_FSM_ALU_1 0x0DB2 +#define REG_SQ_DEBUG_PTR_BUFF 0x0DB4 +#define REG_SQ_DEBUG_GPR_VTX 0x0DB5 +#define REG_SQ_DEBUG_GPR_PIX 0x0DB6 +#define REG_SQ_DEBUG_TB_STATUS_SEL 0x0DB7 +#define REG_SQ_DEBUG_VTX_TB_0 0x0DB8 +#define REG_SQ_DEBUG_VTX_TB_1 0x0DB9 +#define REG_SQ_DEBUG_VTX_TB_STATE_MEM 0x0DBB +#define REG_SQ_DEBUG_TP_FSM 0x0DB0 +#define REG_SQ_DEBUG_VTX_TB_STATUS_REG 0x0DBA +#define REG_SQ_DEBUG_PIX_TB_0 0x0DBC +#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x0DBD +#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x0DBE +#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x0DBF +#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x0DC0 +#define REG_SQ_DEBUG_PIX_TB_STATE_MEM 0x0DC1 +#define REG_SQ_DEBUG_MISC_0 0x2309 +#define REG_SQ_DEBUG_MISC_1 0x230A + +#endif /* __A200_REG_H */ diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c new file mode 100644 index 0000000000000..eb6f47cea4af9 --- /dev/null +++ b/drivers/gpu/msm/adreno.c @@ -0,0 +1,1537 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include + +#include + +#include "kgsl.h" +#include "kgsl_pwrscale.h" +#include "kgsl_cffdump.h" +#include "kgsl_sharedmem.h" + +#include "adreno.h" +#include "adreno_pm4types.h" +#include "adreno_debugfs.h" +#include "adreno_postmortem.h" + +#include "a2xx_reg.h" +#include "kgsl_mmu.h" + +#define DRIVER_VERSION_MAJOR 3 +#define DRIVER_VERSION_MINOR 1 + +/* Adreno MH arbiter config*/ +#define ADRENO_CFG_MHARB \ + (0x10 \ + | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \ + | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \ + | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \ + | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT)) + +#define ADRENO_MMU_CONFIG \ + (0x01 \ + | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT)) + +static const struct kgsl_functable adreno_functable; + +static struct adreno_device device_3d0 = { + .dev = { + .name = DEVICE_3D0_NAME, + .id = KGSL_DEVICE_3D0, + .ver_major = DRIVER_VERSION_MAJOR, + .ver_minor = DRIVER_VERSION_MINOR, + .mh = { + .mharb = ADRENO_CFG_MHARB, + /* Remove 1k boundary check in z470 to avoid a GPU + * hang. Notice that this solution won't work if + * both EBI and SMI are used + */ + .mh_intf_cfg1 = 0x00032f07, + /* turn off memory protection unit by setting + acceptable physical address range to include + all pages. */ + .mpu_base = 0x00000000, + .mpu_range = 0xFFFFF000, + }, + .mmu = { + .config = ADRENO_MMU_CONFIG, + }, + .pwrctrl = { + .regulator_name = "fs_gfx3d", + .irq_name = KGSL_3D0_IRQ, + }, + .mutex = __MUTEX_INITIALIZER(device_3d0.dev.mutex), + .state = KGSL_STATE_INIT, + .active_cnt = 0, + .iomemname = KGSL_3D0_REG_MEMORY, + .ftbl = &adreno_functable, +#ifdef CONFIG_HAS_EARLYSUSPEND + .display_off = { + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, + .suspend = kgsl_early_suspend_driver, + .resume = kgsl_late_resume_driver, + }, +#endif + }, + .gmemspace = { + .gpu_base = 0, + .sizebytes = SZ_256K, + }, + .pfp_fw = NULL, + .pm4_fw = NULL, + .wait_timeout = 10000, /* in milliseconds */ + .ib_check_level = 0, +}; + + +/* + * This is the master list of all GPU cores that are supported by this + * driver. + */ + +#define ANY_ID (~0) + +static const struct { + enum adreno_gpurev gpurev; + unsigned int core, major, minor, patchid; + const char *pm4fw; + const char *pfpfw; + struct adreno_gpudev *gpudev; + unsigned int istore_size; + unsigned int pix_shader_start; +} adreno_gpulist[] = { + { ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID, + "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev, + 512, 384}, + { ADRENO_REV_A205, 0, 1, 0, ANY_ID, + "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev, + 512, 384}, + { ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID, + "leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev, + 512, 384}, + /* + * patchlevel 5 (8960v2) needs special pm4 firmware to work around + * a hardware problem. + */ + { ADRENO_REV_A225, 2, 2, 0, 5, + "a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev, + 1536, 768 }, + { ADRENO_REV_A225, 2, 2, 0, 6, + "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev, + 1536, 768 }, + { ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID, + "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev, + 1536, 768 }, +}; + +static void adreno_gmeminit(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = &adreno_dev->dev; + union reg_rb_edram_info rb_edram_info; + unsigned int gmem_size; + unsigned int edram_value = 0; + + /* make sure edram range is aligned to size */ + BUG_ON(adreno_dev->gmemspace.gpu_base & + (adreno_dev->gmemspace.sizebytes - 1)); + + /* get edram_size value equivalent */ + gmem_size = (adreno_dev->gmemspace.sizebytes >> 14); + while (gmem_size >>= 1) + edram_value++; + + rb_edram_info.val = 0; + + rb_edram_info.f.edram_size = edram_value; + rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */ + + /* must be aligned to size */ + rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14); + + adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val); +} + +static irqreturn_t adreno_isr(int irq, void *data) +{ + irqreturn_t result; + struct kgsl_device *device = data; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + result = adreno_dev->gpudev->irq_handler(adreno_dev); + + if (device->requested_state == KGSL_STATE_NONE) { + if (device->pwrctrl.nap_allowed == true) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP); + queue_work(device->work_queue, &device->idle_check_ws); + } else if (device->pwrscale.policy != NULL) { + queue_work(device->work_queue, &device->idle_check_ws); + } + } + + /* Reset the time-out in our idle timer */ + mod_timer_pending(&device->idle_timer, + jiffies + device->pwrctrl.interval_timeout); + return result; +} + +static void adreno_cleanup_pt(struct kgsl_device *device, + struct kgsl_pagetable *pagetable) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + + kgsl_mmu_unmap(pagetable, &rb->buffer_desc); + + kgsl_mmu_unmap(pagetable, &rb->memptrs_desc); + + kgsl_mmu_unmap(pagetable, &device->memstore); + + kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory); +} + +static int adreno_setup_pt(struct kgsl_device *device, + struct kgsl_pagetable *pagetable) +{ + int result = 0; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + + result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc, + GSL_PT_PAGE_RV); + if (result) + goto error; + + result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + if (result) + goto unmap_buffer_desc; + + result = kgsl_mmu_map_global(pagetable, &device->memstore, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + if (result) + goto unmap_memptrs_desc; + + result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + if (result) + goto unmap_memstore_desc; + + return result; + +unmap_memstore_desc: + kgsl_mmu_unmap(pagetable, &device->memstore); + +unmap_memptrs_desc: + kgsl_mmu_unmap(pagetable, &rb->memptrs_desc); + +unmap_buffer_desc: + kgsl_mmu_unmap(pagetable, &rb->buffer_desc); + +error: + return result; +} + +static void adreno_setstate(struct kgsl_device *device, + unsigned int context_id, + uint32_t flags) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int link[32]; + unsigned int *cmds = &link[0]; + int sizedwords = 0; + unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */ + struct kgsl_context *context; + struct adreno_context *adreno_ctx = NULL; + + /* + * If possible, then set the state via the command stream to avoid + * a CPU idle. Otherwise, use the default setstate which uses register + * writes For CFF dump we must idle and use the registers so that it is + * easier to filter out the mmu accesses from the dump + */ + if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) { + context = idr_find(&device->context_idr, context_id); + adreno_ctx = context->devctxt; + + if (flags & KGSL_MMUFLAGS_PTUPDATE) { + /* wait for graphics pipe to be idle */ + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0x00000000; + + /* set page table base */ + *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1); + *cmds++ = kgsl_pt_get_base_addr( + device->mmu.hwpagetable); + sizedwords += 4; + } + + if (flags & KGSL_MMUFLAGS_TLBFLUSH) { + if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) { + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, + 1); + *cmds++ = 0x00000000; + sizedwords += 2; + } + *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1); + *cmds++ = mh_mmu_invalidate; + sizedwords += 2; + } + + if (flags & KGSL_MMUFLAGS_PTUPDATE && + adreno_is_a20x(adreno_dev)) { + /* HW workaround: to resolve MMU page fault interrupts + * caused by the VGT.It prevents the CP PFP from filling + * the VGT DMA request fifo too early,thereby ensuring + * that the VGT will not fetch vertex/bin data until + * after the page table base register has been updated. + * + * Two null DRAW_INDX_BIN packets are inserted right + * after the page table base update, followed by a + * wait for idle. The null packets will fill up the + * VGT DMA request fifo and prevent any further + * vertex/bin updates from occurring until the wait + * has finished. */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = (0x4 << 16) | + (REG_PA_SU_SC_MODE_CNTL - 0x2000); + *cmds++ = 0; /* disable faceness generation */ + *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); + *cmds++ = device->mmu.setstate_memory.gpuaddr; + *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6); + *cmds++ = 0; /* viz query info */ + *cmds++ = 0x0003C004; /* draw indicator */ + *cmds++ = 0; /* bin base */ + *cmds++ = 3; /* bin size */ + *cmds++ = + device->mmu.setstate_memory.gpuaddr; /* dma base */ + *cmds++ = 6; /* dma size */ + *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6); + *cmds++ = 0; /* viz query info */ + *cmds++ = 0x0003C004; /* draw indicator */ + *cmds++ = 0; /* bin base */ + *cmds++ = 3; /* bin size */ + /* dma base */ + *cmds++ = device->mmu.setstate_memory.gpuaddr; + *cmds++ = 6; /* dma size */ + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0x00000000; + sizedwords += 21; + } + + + if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) { + *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1); + *cmds++ = 0x7fff; /* invalidate all base pointers */ + sizedwords += 2; + } + + adreno_ringbuffer_issuecmds(device, adreno_ctx, + KGSL_CMD_FLAGS_PMODE, + &link[0], sizedwords); + } else { + kgsl_mmu_device_setstate(device, flags); + } +} + +static unsigned int +adreno_getchipid(struct kgsl_device *device) +{ + unsigned int chipid = 0; + unsigned int coreid, majorid, minorid, patchid, revid; + uint32_t soc_platform_version = socinfo_get_version(); + + adreno_regread(device, REG_RBBM_PERIPHID1, &coreid); + adreno_regread(device, REG_RBBM_PERIPHID2, &majorid); + adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid); + + /* + * adreno 22x gpus are indicated by coreid 2, + * but REG_RBBM_PERIPHID1 always contains 0 for this field + */ + if (cpu_is_msm8960() || cpu_is_msm8x60() || cpu_is_msm8930()) + chipid = 2 << 24; + else + chipid = (coreid & 0xF) << 24; + + chipid |= ((majorid >> 4) & 0xF) << 16; + + minorid = ((revid >> 0) & 0xFF); + + patchid = ((revid >> 16) & 0xFF); + + /* 8x50 returns 0 for patch release, but it should be 1 */ + /* 8960v3 returns 5 for patch release, but it should be 6 */ + if (cpu_is_qsd8x50()) + patchid = 1; + else if (cpu_is_msm8960() && + SOCINFO_VERSION_MAJOR(soc_platform_version) == 3) + patchid = 6; + + chipid |= (minorid << 8) | patchid; + + return chipid; +} + +static inline bool _rev_match(unsigned int id, unsigned int entry) +{ + return (entry == ANY_ID || entry == id); +} + +static void +adreno_identify_gpu(struct adreno_device *adreno_dev) +{ + unsigned int i, core, major, minor, patchid; + + adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev); + + core = (adreno_dev->chip_id >> 24) & 0xff; + major = (adreno_dev->chip_id >> 16) & 0xff; + minor = (adreno_dev->chip_id >> 8) & 0xff; + patchid = (adreno_dev->chip_id & 0xff); + + for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) { + if (core == adreno_gpulist[i].core && + _rev_match(major, adreno_gpulist[i].major) && + _rev_match(minor, adreno_gpulist[i].minor) && + _rev_match(patchid, adreno_gpulist[i].patchid)) + break; + } + + if (i == ARRAY_SIZE(adreno_gpulist)) { + adreno_dev->gpurev = ADRENO_REV_UNKNOWN; + return; + } + + adreno_dev->gpurev = adreno_gpulist[i].gpurev; + adreno_dev->gpudev = adreno_gpulist[i].gpudev; + adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw; + adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw; + adreno_dev->istore_size = adreno_gpulist[i].istore_size; + adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start; +} + +static int __devinit +adreno_probe(struct platform_device *pdev) +{ + struct kgsl_device *device; + struct adreno_device *adreno_dev; + int status = -EINVAL; + + device = (struct kgsl_device *)pdev->id_entry->driver_data; + adreno_dev = ADRENO_DEVICE(device); + device->parentdev = &pdev->dev; + + init_completion(&device->recovery_gate); + + status = adreno_ringbuffer_init(device); + if (status != 0) + goto error; + + status = kgsl_device_platform_probe(device, adreno_isr); + if (status) + goto error_close_rb; + + adreno_debugfs_init(device); + + kgsl_pwrscale_init(device); + kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY); + + INIT_WORK(&device->print_fault_ib, adreno_print_fault_ib_work); + + device->flags &= ~KGSL_FLAGS_SOFT_RESET; + return 0; + +error_close_rb: + adreno_ringbuffer_close(&adreno_dev->ringbuffer); +error: + device->parentdev = NULL; + return status; +} + +static int __devexit adreno_remove(struct platform_device *pdev) +{ + struct kgsl_device *device; + struct adreno_device *adreno_dev; + + device = (struct kgsl_device *)pdev->id_entry->driver_data; + adreno_dev = ADRENO_DEVICE(device); + + kgsl_pwrscale_detach_policy(device); + kgsl_pwrscale_close(device); + + adreno_ringbuffer_close(&adreno_dev->ringbuffer); + kgsl_device_platform_remove(device); + + return 0; +} + +static int adreno_start(struct kgsl_device *device, unsigned int init_ram) +{ + int status = -EINVAL; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int init_reftimestamp = 0x7fffffff; + + if (KGSL_STATE_DUMP_AND_RECOVER != device->state) + kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); + + /* Power up the device */ + kgsl_pwrctrl_enable(device); + + /* Identify the specific GPU */ + adreno_identify_gpu(adreno_dev); + + if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) { + KGSL_DRV_ERR(device, "Unknown chip ID %x\n", + adreno_dev->chip_id); + goto error_clk_off; + } + + if (adreno_is_a20x(adreno_dev)) { + /* + * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present + * on older gpus + */ + device->mh.mh_intf_cfg1 = 0; + device->mh.mh_intf_cfg2 = 0; + } + + kgsl_mh_start(device); + + if (kgsl_mmu_start(device)) + goto error_clk_off; + + /*We need to make sure all blocks are powered up and clocked before + *issuing a soft reset. The overrides will then be turned off (set to 0) + */ + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe); + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff); + + /* Only reset CP block if all blocks have previously been reset */ + if (!(device->flags & KGSL_FLAGS_SOFT_RESET) || + !adreno_is_a22x(adreno_dev)) { + adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0xFFFFFFFF); + device->flags |= KGSL_FLAGS_SOFT_RESET; + } else + adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000001); + + /* The core is in an indeterminate state until the reset completes + * after 30ms. + */ + msleep(30); + + adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000); + + adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442); + + if (adreno_is_a225(adreno_dev)) { + /* Enable large instruction store for A225 */ + adreno_regwrite(device, REG_SQ_FLOW_CONTROL, 0x18000000); + } + + adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000); + adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000); + + if (cpu_is_msm8960() || cpu_is_msm8930()) + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200); + else + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0); + + if (!adreno_is_a22x(adreno_dev)) + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0); + else + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80); + + kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size); + + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + init_reftimestamp); + + adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000); + + /* Make sure interrupts are disabled */ + + adreno_regwrite(device, REG_RBBM_INT_CNTL, 0); + adreno_regwrite(device, REG_CP_INT_CNTL, 0); + adreno_regwrite(device, REG_SQ_INT_CNTL, 0); + + if (adreno_is_a22x(adreno_dev)) + adreno_dev->gmemspace.sizebytes = SZ_512K; + else + adreno_dev->gmemspace.sizebytes = SZ_256K; + adreno_gmeminit(adreno_dev); + + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); + device->ftbl->irqctrl(device, 1); + + status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram); + + if (status == 0) { + /* While recovery is on we do not want timer to + * fire and attempt to change any device state */ + if (KGSL_STATE_DUMP_AND_RECOVER != device->state) + mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT); + return 0; + } + + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + kgsl_mmu_stop(device); +error_clk_off: + kgsl_pwrctrl_disable(device); + + return status; +} + +static int adreno_stop(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + adreno_dev->drawctxt_active = NULL; + + adreno_ringbuffer_stop(&adreno_dev->ringbuffer); + + kgsl_mmu_stop(device); + + device->ftbl->irqctrl(device, 0); + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + del_timer_sync(&device->idle_timer); + + /* Power down the device */ + kgsl_pwrctrl_disable(device); + + return 0; +} + +static void adreno_mark_context_status(struct kgsl_device *device, + int recovery_status) +{ + struct kgsl_context *context; + int next = 0; + /* + * Set the reset status of all contexts to + * INNOCENT_CONTEXT_RESET_EXT except for the bad context + * since thats the guilty party, if recovery failed then + * mark all as guilty + */ + while ((context = idr_get_next(&device->context_idr, &next))) { + struct adreno_context *adreno_context = context->devctxt; + if (recovery_status) { + context->reset_status = + KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT; + adreno_context->flags |= CTXT_FLAGS_GPU_HANG; + } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT != + context->reset_status) { + if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG || + CTXT_FLAGS_GPU_HANG_RECOVERED)) + context->reset_status = + KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT; + else + context->reset_status = + KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT; + } + next = next + 1; + } +} + +static int +adreno_recover_hang(struct kgsl_device *device, + struct adreno_recovery_data *rec_data) +{ + int ret; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned int timestamp; + struct kgsl_context *context; + struct adreno_context *adreno_context; + + KGSL_DRV_ERR(device, + "Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, " + "Bad context_id: %u, global_eop: 0x%x\n", rec_data->ib1, + rec_data->context_id, rec_data->global_eop); + + context = idr_find(&device->context_idr, rec_data->context_id); + if (context == NULL) { + KGSL_DRV_ERR(device, "Last context unknown id:%d\n", + rec_data->context_id); + rec_data->context_id = 0; + } else { + adreno_context = context->devctxt; + adreno_context->flags |= CTXT_FLAGS_GPU_HANG; + } + /* Extract valid contents from rb which can still be executed after + * hang */ + ret = adreno_ringbuffer_extract(rb, rec_data); + if (ret) + goto done; + + timestamp = rb->timestamp; + KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp); + + /* Make sure memory is synchronized before restarting the GPU */ + mb(); + + /* restart device */ + ret = adreno_stop(device); + if (ret) + goto done; + ret = adreno_start(device, true); + if (ret) + goto done; + KGSL_DRV_ERR(device, "Device has been restarted after hang\n"); + + /* Restore valid commands in ringbuffer */ + adreno_ringbuffer_restore(rb, rec_data->rb_buffer, rec_data->rb_size); + rb->timestamp = timestamp; + + /* wait for idle */ + ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); +done: + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), + rb->timestamp); + adreno_mark_context_status(device, ret); + return ret; +} + +static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data) +{ + vfree(rec_data->rb_buffer); + vfree(rec_data->bad_rb_buffer); +} + +static int adreno_setup_recovery_data(struct kgsl_device *device, + struct adreno_recovery_data *rec_data) +{ + int ret = 0; + unsigned int ib1_sz, ib2_sz; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + + memset(rec_data, 0, sizeof(*rec_data)); + + adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz); + adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz); + if (ib1_sz || ib2_sz) + adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1); + + kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id, + KGSL_DEVICE_MEMSTORE_OFFSET(current_context)); + + kgsl_sharedmem_readl(&device->memstore, + &rec_data->global_eop, + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)); + + rec_data->rb_buffer = vmalloc(rb->buffer_desc.size); + if (!rec_data->rb_buffer) { + KGSL_MEM_ERR(device, "vmalloc(%d) failed\n", + rb->buffer_desc.size); + return -ENOMEM; + } + + rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size); + if (!rec_data->bad_rb_buffer) { + KGSL_MEM_ERR(device, "vmalloc(%d) failed\n", + rb->buffer_desc.size); + ret = -ENOMEM; + goto done; + } + +done: + if (ret) { + vfree(rec_data->rb_buffer); + vfree(rec_data->bad_rb_buffer); + } + return ret; +} + +int adreno_dump_and_recover(struct kgsl_device *device) +{ + int result = -ETIMEDOUT; + struct adreno_recovery_data rec_data; + + if (device->state == KGSL_STATE_HUNG) + goto done; + if (device->state == KGSL_STATE_DUMP_AND_RECOVER) { + mutex_unlock(&device->mutex); + wait_for_completion(&device->recovery_gate); + mutex_lock(&device->mutex); + if (device->state != KGSL_STATE_HUNG) + result = 0; + } else { + kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_RECOVER); + INIT_COMPLETION(device->recovery_gate); + /* Detected a hang */ + + /* Get the recovery data as soon as hang is detected */ + result = adreno_setup_recovery_data(device, &rec_data); + /* + * Trigger an automatic dump of the state to + * the console + */ + adreno_postmortem_dump(device, 0); + + /* + * Make a GPU snapshot. For now, do it after the PM dump so we + * can at least be sure the PM dump will work as it always has + */ + kgsl_device_snapshot(device, 1); + + result = adreno_recover_hang(device, &rec_data); + adreno_destroy_recovery_data(&rec_data); + if (result) { + kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG); + } else { + kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); + mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT); + } + complete_all(&device->recovery_gate); + } +done: + return result; +} +EXPORT_SYMBOL(adreno_dump_and_recover); + +static int adreno_getproperty(struct kgsl_device *device, + enum kgsl_property_type type, + void *value, + unsigned int sizebytes) +{ + int status = -EINVAL; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + switch (type) { + case KGSL_PROP_DEVICE_INFO: + { + struct kgsl_devinfo devinfo; + + if (sizebytes != sizeof(devinfo)) { + status = -EINVAL; + break; + } + + memset(&devinfo, 0, sizeof(devinfo)); + devinfo.device_id = device->id+1; + devinfo.chip_id = adreno_dev->chip_id; + devinfo.mmu_enabled = kgsl_mmu_enabled(); + devinfo.gpu_id = adreno_dev->gpurev; + devinfo.gmem_gpubaseaddr = adreno_dev->gmemspace. + gpu_base; + devinfo.gmem_sizebytes = adreno_dev->gmemspace. + sizebytes; + + if (copy_to_user(value, &devinfo, sizeof(devinfo)) != + 0) { + status = -EFAULT; + break; + } + status = 0; + } + break; + case KGSL_PROP_DEVICE_SHADOW: + { + struct kgsl_shadowprop shadowprop; + + if (sizebytes != sizeof(shadowprop)) { + status = -EINVAL; + break; + } + memset(&shadowprop, 0, sizeof(shadowprop)); + if (device->memstore.hostptr) { + /*NOTE: with mmu enabled, gpuaddr doesn't mean + * anything to mmap(). + */ + shadowprop.gpuaddr = device->memstore.physaddr; + shadowprop.size = device->memstore.size; + /* GSL needs this to be set, even if it + appears to be meaningless */ + shadowprop.flags = KGSL_FLAGS_INITIALIZED; + } + if (copy_to_user(value, &shadowprop, + sizeof(shadowprop))) { + status = -EFAULT; + break; + } + status = 0; + } + break; + case KGSL_PROP_MMU_ENABLE: + { + int mmu_prop = kgsl_mmu_enabled(); + + if (sizebytes != sizeof(int)) { + status = -EINVAL; + break; + } + if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) { + status = -EFAULT; + break; + } + status = 0; + } + break; + case KGSL_PROP_INTERRUPT_WAITS: + { + int int_waits = 1; + if (sizebytes != sizeof(int)) { + status = -EINVAL; + break; + } + if (copy_to_user(value, &int_waits, sizeof(int))) { + status = -EFAULT; + break; + } + status = 0; + } + break; + default: + status = -EINVAL; + } + + return status; +} + +static inline void adreno_poke(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr); +} + +/* Caller must hold the device mutex. */ +int adreno_idle(struct kgsl_device *device, unsigned int timeout) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned int rbbm_status; + unsigned long wait_timeout = + msecs_to_jiffies(adreno_dev->wait_timeout); + unsigned long wait_time; + unsigned long wait_time_part; + unsigned int msecs; + unsigned int msecs_first; + unsigned int msecs_part; + + kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2, + 0x00000000, 0x80000000); + /* first, wait until the CP has consumed all the commands in + * the ring buffer + */ +retry: + if (rb->flags & KGSL_FLAGS_STARTED) { + msecs = adreno_dev->wait_timeout; + msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; + msecs_part = (msecs - msecs_first + 3) / 4; + wait_time = jiffies + wait_timeout; + wait_time_part = jiffies + msecs_to_jiffies(msecs_first); + adreno_poke(device); + do { + if (time_after(jiffies, wait_time_part)) { + adreno_poke(device); + wait_time_part = jiffies + + msecs_to_jiffies(msecs_part); + } + GSL_RB_GET_READPTR(rb, &rb->rptr); + if (time_after(jiffies, wait_time)) { + KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n", + rb->rptr, rb->wptr); + goto err; + } + } while (rb->rptr != rb->wptr); + } + + /* now, wait for the GPU to finish its operations */ + wait_time = jiffies + wait_timeout; + while (time_before(jiffies, wait_time)) { + adreno_regread(device, REG_RBBM_STATUS, &rbbm_status); + if (rbbm_status == 0x110) + return 0; + } + +err: + KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n"); + if (KGSL_STATE_DUMP_AND_RECOVER != device->state && + !adreno_dump_and_recover(device)) { + wait_time = jiffies + wait_timeout; + goto retry; + } + return -ETIMEDOUT; +} + +static unsigned int adreno_isidle(struct kgsl_device *device) +{ + int status = false; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned int rbbm_status; + + WARN_ON(device->state == KGSL_STATE_INIT); + /* If the device isn't active, don't force it on. */ + if (device->state == KGSL_STATE_ACTIVE) { + /* Is the ring buffer is empty? */ + GSL_RB_GET_READPTR(rb, &rb->rptr); + if (!device->active_cnt && (rb->rptr == rb->wptr)) { + /* Is the core idle? */ + if (adreno_dev->gpudev->irq_pending(adreno_dev) == 0) { + adreno_regread(device, REG_RBBM_STATUS, + &rbbm_status); + if (rbbm_status == 0x110) + status = true; + } + } + } else { + status = true; + } + return status; +} + +/* Caller must hold the device mutex. */ +static int adreno_suspend_context(struct kgsl_device *device) +{ + int status = 0; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + /* switch to NULL ctxt */ + if (adreno_dev->drawctxt_active != NULL) { + adreno_drawctxt_switch(adreno_dev, NULL, 0); + status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + } + + return status; +} + +struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device, + unsigned int pt_base, + unsigned int gpuaddr, + unsigned int size) +{ + struct kgsl_memdesc *result = NULL; + struct kgsl_mem_entry *entry; + struct kgsl_process_private *priv; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer; + struct kgsl_context *context; + int next = 0; + + if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size)) + return &ringbuffer->buffer_desc; + + if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr, size)) + return &ringbuffer->memptrs_desc; + + if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size)) + return &device->memstore; + + mutex_lock(&kgsl_driver.process_mutex); + list_for_each_entry(priv, &kgsl_driver.process_list, list) { + if (!kgsl_mmu_pt_equal(priv->pagetable, pt_base)) + continue; + spin_lock(&priv->mem_lock); + entry = kgsl_sharedmem_find_region(priv, gpuaddr, size); + if (entry) { + result = &entry->memdesc; + spin_unlock(&priv->mem_lock); + mutex_unlock(&kgsl_driver.process_mutex); + return result; + } + spin_unlock(&priv->mem_lock); + } + mutex_unlock(&kgsl_driver.process_mutex); + + while (1) { + struct adreno_context *adreno_context = NULL; + context = idr_get_next(&device->context_idr, &next); + if (context == NULL) + break; + + adreno_context = (struct adreno_context *)context->devctxt; + + if (kgsl_mmu_pt_equal(adreno_context->pagetable, pt_base)) { + struct kgsl_memdesc *desc; + + desc = &adreno_context->gpustate; + if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size)) { + result = desc; + return result; + } + + desc = &adreno_context->context_gmem_shadow.gmemshadow; + if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size)) { + result = desc; + return result; + } + } + next = next + 1; + } + + return NULL; + +} + +uint8_t *adreno_convertaddr(struct kgsl_device *device, unsigned int pt_base, + unsigned int gpuaddr, unsigned int size) +{ + struct kgsl_memdesc *memdesc; + + memdesc = adreno_find_region(device, pt_base, gpuaddr, size); + + return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL; +} + +void adreno_regread(struct kgsl_device *device, unsigned int offsetwords, + unsigned int *value) +{ + unsigned int *reg; + BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes); + reg = (unsigned int *)(device->regspace.mmio_virt_base + + (offsetwords << 2)); + + if (!in_interrupt()) + kgsl_pre_hwaccess(device); + + /*ensure this read finishes before the next one. + * i.e. act like normal readl() */ + *value = __raw_readl(reg); + rmb(); +} + +void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, + unsigned int value) +{ + unsigned int *reg; + + BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes); + + if (!in_interrupt()) + kgsl_pre_hwaccess(device); + + kgsl_cffdump_regwrite(device->id, offsetwords << 2, value); + reg = (unsigned int *)(device->regspace.mmio_virt_base + + (offsetwords << 2)); + + /*ensure previous writes post before this one, + * i.e. act like normal writel() */ + wmb(); + __raw_writel(value, reg); +} + +static int adreno_next_event(struct kgsl_device *device, + struct kgsl_event *event) +{ + int status; + unsigned int ref_ts, enableflag; + + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + status = kgsl_check_timestamp(device, event->timestamp); + if (!status) { + kgsl_sharedmem_readl(&device->memstore, &enableflag, + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)); + mb(); + + if (enableflag) { + kgsl_sharedmem_readl(&device->memstore, &ref_ts, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)); + mb(); + if (timestamp_cmp(ref_ts, event->timestamp) >= 0) { + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + event->timestamp); + wmb(); + } + } else { + unsigned int cmds[2]; + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + event->timestamp); + enableflag = 1; + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), + enableflag); + wmb(); + /* submit a dummy packet so that even if all + * commands upto timestamp get executed we will still + * get an interrupt */ + cmds[0] = cp_type3_packet(CP_NOP, 1); + cmds[1] = 0; + + adreno_ringbuffer_issuecmds(device, + adreno_dev->drawctxt_active, + KGSL_CMD_FLAGS_NONE, &cmds[0], 2); + } + } + return status; +} + +static int kgsl_check_interrupt_timestamp(struct kgsl_device *device, + unsigned int timestamp) +{ + int status; + unsigned int ref_ts, enableflag; + + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + status = kgsl_check_timestamp(device, timestamp); + if (!status) { + mutex_lock(&device->mutex); + kgsl_sharedmem_readl(&device->memstore, &enableflag, + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)); + mb(); + + if (enableflag) { + kgsl_sharedmem_readl(&device->memstore, &ref_ts, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)); + mb(); + if (timestamp_cmp(ref_ts, timestamp) >= 0) { + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + timestamp); + wmb(); + } + } else { + unsigned int cmds[2]; + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + timestamp); + enableflag = 1; + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), + enableflag); + wmb(); + /* submit a dummy packet so that even if all + * commands upto timestamp get executed we will still + * get an interrupt */ + cmds[0] = cp_type3_packet(CP_NOP, 1); + cmds[1] = 0; + + adreno_ringbuffer_issuecmds(device, + adreno_dev->drawctxt_active, + KGSL_CMD_FLAGS_NONE, &cmds[0], 2); + } + mutex_unlock(&device->mutex); + } + + return status; +} + +/* + wait_event_interruptible_timeout checks for the exit condition before + placing a process in wait q. For conditional interrupts we expect the + process to already be in its wait q when its exit condition checking + function is called. +*/ +#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\ +({ \ + long __ret = timeout; \ + if (io) \ + __wait_io_event_interruptible_timeout(wq, condition, __ret);\ + else \ + __wait_event_interruptible_timeout(wq, condition, __ret);\ + __ret; \ +}) + +/* MUST be called with the device mutex held */ +static int adreno_waittimestamp(struct kgsl_device *device, + unsigned int timestamp, + unsigned int msecs) +{ + long status = 0; + uint io = 1; + static uint io_cnt; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int retries; + unsigned int msecs_first; + unsigned int msecs_part; + + /* Don't wait forever, set a max value for now */ + if (msecs == -1) + msecs = adreno_dev->wait_timeout; + + if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) { + KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, " + "rb->timestamp: %x\n", + timestamp, adreno_dev->ringbuffer.timestamp); + status = -EINVAL; + goto done; + } + + /* Keep the first timeout as 100msecs before rewriting + * the WPTR. Less visible impact if the WPTR has not + * been updated properly. + */ + msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; + msecs_part = (msecs - msecs_first + 3) / 4; + for (retries = 0; retries < 5; retries++) { + if (kgsl_check_timestamp(device, timestamp)) { + /* if the timestamp happens while we're not + * waiting, there's a chance that an interrupt + * will not be generated and thus the timestamp + * work needs to be queued. + */ + queue_work(device->work_queue, &device->ts_expired_ws); + status = 0; + goto done; + } + adreno_poke(device); + io_cnt = (io_cnt + 1) % 100; + if (io_cnt < + pwr->pwrlevels[pwr->active_pwrlevel].io_fraction) + io = 0; + mutex_unlock(&device->mutex); + /* We need to make sure that the process is + * placed in wait-q before its condition is called + */ + status = kgsl_wait_event_interruptible_timeout( + device->wait_queue, + kgsl_check_interrupt_timestamp(device, + timestamp), + msecs_to_jiffies(retries ? + msecs_part : msecs_first), io); + mutex_lock(&device->mutex); + + if (status > 0) { + /*completed before the wait finished */ + status = 0; + goto done; + } else if (status < 0) { + /*an error occurred*/ + goto done; + } + /*this wait timed out*/ + } + + /* Check if timestamp has retired here because we may have hit + * recovery which can take some time and cause waiting threads + * to timeout + */ + if (kgsl_check_timestamp(device, timestamp)) + goto done; + + status = -ETIMEDOUT; + KGSL_DRV_ERR(device, + "Device hang detected while waiting for timestamp: %x," + "last submitted(rb->timestamp): %x, wptr: %x\n", + timestamp, adreno_dev->ringbuffer.timestamp, + adreno_dev->ringbuffer.wptr); + if (!adreno_dump_and_recover(device)) { + /* The timestamp that this process wanted + * to wait on may be invalid or expired now + * after successful recovery */ + status = 0; + } +done: + return (int)status; +} + +static unsigned int adreno_readtimestamp(struct kgsl_device *device, + enum kgsl_timestamp_type type) +{ + unsigned int timestamp = 0; + + if (type == KGSL_TIMESTAMP_CONSUMED) + adreno_regread(device, REG_CP_TIMESTAMP, ×tamp); + else if (type == KGSL_TIMESTAMP_RETIRED) + kgsl_sharedmem_readl(&device->memstore, ×tamp, + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)); + rmb(); + + return timestamp; +} + +static long adreno_ioctl(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_drawctxt_set_bin_base_offset *binbase; + struct kgsl_context *context; + + switch (cmd) { + case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: + binbase = data; + + context = kgsl_find_context(dev_priv, binbase->drawctxt_id); + if (context) { + adreno_drawctxt_set_bin_base_offset( + dev_priv->device, context, binbase->offset); + } else { + result = -EINVAL; + KGSL_DRV_ERR(dev_priv->device, + "invalid drawctxt drawctxt_id %d " + "device_id=%d\n", + binbase->drawctxt_id, dev_priv->device->id); + } + break; + + default: + KGSL_DRV_INFO(dev_priv->device, + "invalid ioctl code %08x\n", cmd); + result = -EINVAL; + break; + } + return result; + +} + +static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq) +{ + gpu_freq /= 2000000; + return ticks / gpu_freq; +} + +static void adreno_power_stats(struct kgsl_device *device, + struct kgsl_power_stats *stats) +{ + unsigned int reg; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + /* In order to calculate idle you have to have run the algorithm * + * at least once to get a start time. */ + if (pwr->time != 0) { + s64 tmp; + /* Stop the performance moniter and read the current * + * busy cycles. */ + adreno_regwrite(device, + REG_CP_PERFMON_CNTL, + REG_PERF_MODE_CNT | + REG_PERF_STATE_FREEZE); + adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, ®); + tmp = ktime_to_us(ktime_get()); + stats->total_time = tmp - pwr->time; + pwr->time = tmp; + stats->busy_time = adreno_ticks_to_us(reg, device->pwrctrl. + pwrlevels[device->pwrctrl.active_pwrlevel]. + gpu_freq); + + adreno_regwrite(device, + REG_CP_PERFMON_CNTL, + REG_PERF_MODE_CNT | + REG_PERF_STATE_RESET); + } else { + stats->total_time = 0; + stats->busy_time = 0; + pwr->time = ktime_to_us(ktime_get()); + } + + /* re-enable the performance moniters */ + adreno_regread(device, REG_RBBM_PM_OVERRIDE2, ®); + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40)); + adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1); + adreno_regwrite(device, + REG_CP_PERFMON_CNTL, + REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE); +} + +void adreno_irqctrl(struct kgsl_device *device, int state) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + adreno_dev->gpudev->irq_control(adreno_dev, state); +} + +static unsigned int adreno_gpuid(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + /* Standard KGSL gpuid format: + * top word is 0x0002 for 2D or 0x0003 for 3D + * Bottom word is core specific identifer + */ + + return (0x0003 << 16) | ((int) adreno_dev->gpurev); +} + +static const struct kgsl_functable adreno_functable = { + /* Mandatory functions */ + .regread = adreno_regread, + .regwrite = adreno_regwrite, + .idle = adreno_idle, + .isidle = adreno_isidle, + .suspend_context = adreno_suspend_context, + .start = adreno_start, + .stop = adreno_stop, + .getproperty = adreno_getproperty, + .waittimestamp = adreno_waittimestamp, + .readtimestamp = adreno_readtimestamp, + .issueibcmds = adreno_ringbuffer_issueibcmds, + .ioctl = adreno_ioctl, + .setup_pt = adreno_setup_pt, + .cleanup_pt = adreno_cleanup_pt, + .power_stats = adreno_power_stats, + .irqctrl = adreno_irqctrl, + .gpuid = adreno_gpuid, + .snapshot = adreno_snapshot, + /* Optional functions */ + .setstate = adreno_setstate, + .drawctxt_create = adreno_drawctxt_create, + .drawctxt_destroy = adreno_drawctxt_destroy, + .next_event = adreno_next_event, +}; + +static struct platform_device_id adreno_id_table[] = { + { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, }, + { }, +}; +MODULE_DEVICE_TABLE(platform, adreno_id_table); + +static struct platform_driver adreno_platform_driver = { + .probe = adreno_probe, + .remove = __devexit_p(adreno_remove), + .suspend = kgsl_suspend_driver, + .resume = kgsl_resume_driver, + .id_table = adreno_id_table, + .driver = { + .owner = THIS_MODULE, + .name = DEVICE_3D_NAME, + .pm = &kgsl_pm_ops, + } +}; + +static int __init kgsl_3d_init(void) +{ + return platform_driver_register(&adreno_platform_driver); +} + +static void __exit kgsl_3d_exit(void) +{ + platform_driver_unregister(&adreno_platform_driver); +} + +module_init(kgsl_3d_init); +module_exit(kgsl_3d_exit); + +MODULE_DESCRIPTION("3D Graphics driver"); +MODULE_VERSION("1.2"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:kgsl_3d"); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h new file mode 100644 index 0000000000000..0694ebd5cf4f5 --- /dev/null +++ b/drivers/gpu/msm/adreno.h @@ -0,0 +1,208 @@ +/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ADRENO_H +#define __ADRENO_H + +#include "kgsl_device.h" +#include "adreno_drawctxt.h" +#include "adreno_ringbuffer.h" + +#define DEVICE_3D_NAME "kgsl-3d" +#define DEVICE_3D0_NAME "kgsl-3d0" + +#define ADRENO_DEVICE(device) \ + KGSL_CONTAINER_OF(device, struct adreno_device, dev) + +/* Flags to control command packet settings */ +#define KGSL_CMD_FLAGS_NONE 0x00000000 +#define KGSL_CMD_FLAGS_PMODE 0x00000001 +#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002 + +/* Command identifiers */ +#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0xDEADBEEF +#define KGSL_CMD_IDENTIFIER 0xFEEDFACE +#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE +#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD + +#ifdef CONFIG_MSM_SCM +#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz) +#else +#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL +#endif + +/* + * constants for the size of shader instructions + */ +#define ADRENO_ISTORE_BYTES 12 +#define ADRENO_ISTORE_WORDS 3 +#define ADRENO_ISTORE_START 0x5000 + +#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50 + +enum adreno_gpurev { + ADRENO_REV_UNKNOWN = 0, + ADRENO_REV_A200 = 200, + ADRENO_REV_A205 = 205, + ADRENO_REV_A220 = 220, + ADRENO_REV_A225 = 225, +}; + +struct adreno_gpudev; + +struct adreno_device { + struct kgsl_device dev; /* Must be first field in this struct */ + unsigned int chip_id; + enum adreno_gpurev gpurev; + struct kgsl_memregion gmemspace; + struct adreno_context *drawctxt_active; + const char *pfp_fwfile; + unsigned int *pfp_fw; + size_t pfp_fw_size; + const char *pm4_fwfile; + unsigned int *pm4_fw; + size_t pm4_fw_size; + struct adreno_ringbuffer ringbuffer; + unsigned int mharb; + struct adreno_gpudev *gpudev; + unsigned int wait_timeout; + unsigned int istore_size; + unsigned int pix_shader_start; + unsigned int ib_check_level; +}; + +struct adreno_gpudev { + /* keeps track of when we need to execute the draw workaround code */ + int ctx_switches_since_last_draw; + int (*ctxt_create)(struct adreno_device *, struct adreno_context *); + void (*ctxt_save)(struct adreno_device *, struct adreno_context *); + void (*ctxt_restore)(struct adreno_device *, struct adreno_context *); + void (*ctxt_draw_workaround)(struct adreno_device *, struct adreno_context *); + irqreturn_t (*irq_handler)(struct adreno_device *); + void (*irq_control)(struct adreno_device *, int); + unsigned int (*irq_pending)(struct adreno_device *); + void * (*snapshot)(struct adreno_device *, void *, int *, int); +}; + +/* + * struct adreno_recovery_data - Structure that contains all information to + * perform gpu recovery from hangs + * @ib1 - IB1 that the GPU was executing when hang happened + * @context_id - Context which caused the hang + * @global_eop - eoptimestamp at time of hang + * @rb_buffer - Buffer that holds the commands from good contexts + * @rb_size - Number of valid dwords in rb_buffer + * @bad_rb_buffer - Buffer that holds commands from the hanging context + * bad_rb_size - Number of valid dwords in bad_rb_buffer + * @last_valid_ctx_id - The last context from which commands were placed in + * ringbuffer before the GPU hung + */ +struct adreno_recovery_data { + unsigned int ib1; + unsigned int context_id; + unsigned int global_eop; + unsigned int *rb_buffer; + unsigned int rb_size; + unsigned int *bad_rb_buffer; + unsigned int bad_rb_size; + unsigned int last_valid_ctx_id; +}; + +extern struct adreno_gpudev adreno_a2xx_gpudev; + +/* A2XX register sets defined in adreno_a2xx.c */ +extern const unsigned int a200_registers[]; +extern const unsigned int a220_registers[]; +extern const unsigned int a200_registers_count; +extern const unsigned int a220_registers_count; + +int adreno_idle(struct kgsl_device *device, unsigned int timeout); +void adreno_regread(struct kgsl_device *device, unsigned int offsetwords, + unsigned int *value); +void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, + unsigned int value); + +struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device, + unsigned int pt_base, + unsigned int gpuaddr, + unsigned int size); + +uint8_t *adreno_convertaddr(struct kgsl_device *device, + unsigned int pt_base, unsigned int gpuaddr, unsigned int size); + +void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain, + int hang); + +int adreno_dump_and_recover(struct kgsl_device *device); + +static inline int adreno_is_a200(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A200); +} + +static inline int adreno_is_a205(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A200); +} + +static inline int adreno_is_a20x(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A200 || + adreno_dev->gpurev == ADRENO_REV_A205); +} + +static inline int adreno_is_a220(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A220); +} + +static inline int adreno_is_a225(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A225); +} + +static inline int adreno_is_a22x(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev == ADRENO_REV_A220 || + adreno_dev->gpurev == ADRENO_REV_A225); +} + +static inline int adreno_is_a2xx(struct adreno_device *adreno_dev) +{ + return (adreno_dev->gpurev <= ADRENO_REV_A225); +} + +/** + * adreno_encode_istore_size - encode istore size in CP format + * @adreno_dev - The 3D device. + * + * Encode the istore size into the format expected that the + * CP_SET_SHADER_BASES and CP_ME_INIT commands: + * bits 31:29 - istore size as encoded by this function + * bits 27:16 - vertex shader start offset in instructions + * bits 11:0 - pixel shader start offset in instructions. + */ +static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev) +{ + unsigned int size; + /* in a225 the CP microcode multiplies the encoded + * value by 3 while decoding. + */ + if (adreno_is_a225(adreno_dev)) + size = adreno_dev->istore_size/3; + else + size = adreno_dev->istore_size; + + return (ilog2(size) - 5) << 29; +} + +#endif /*__ADRENO_H */ diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c new file mode 100644 index 0000000000000..53822609974cd --- /dev/null +++ b/drivers/gpu/msm/adreno_a2xx.c @@ -0,0 +1,1794 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "kgsl.h" +#include "kgsl_sharedmem.h" +#include "kgsl_cffdump.h" +#include "adreno.h" +#include "adreno_a2xx_trace.h" + +/* + * These are the registers that are dumped with GPU snapshot + * and postmortem. The lists are dword offset pairs in the + * form of {start offset, end offset} inclusive. + */ + +/* A200, A205 */ +const unsigned int a200_registers[] = { + 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044, + 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9, + 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7, + 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5, + 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444, + 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B, + 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0, + 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614, + 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45, + 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C, + 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94, + 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06, + 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4, + 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E, + 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7, + 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12, + 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F, + 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, + 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294, + 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326, + 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482, + 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, + 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708, + 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783, + 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908, +}; + +/* A220, A225 */ +const unsigned int a220_registers[] = { + 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044, + 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9, + 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7, + 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5, + 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444, + 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B, + 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0, + 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614, + 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43, + 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39, + 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, + 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, + 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, + 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, + 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002, + 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109, + 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202, + 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294, + 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316, + 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402, + 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509, + 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602, + 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694, + 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D, + 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805, + 0x4900, 0x4900, 0x4908, 0x4908, +}; + +const unsigned int a200_registers_count = ARRAY_SIZE(a200_registers) / 2; +const unsigned int a220_registers_count = ARRAY_SIZE(a220_registers) / 2; + +/* + * + * Memory Map for Register, Constant & Instruction Shadow, and Command Buffers + * (34.5KB) + * + * +---------------------+------------+-------------+---+---------------------+ + * | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow | + * +---------------------+------------+-------------+---+---------------------+ + * ________________________________/ \____________________ + * / | + * +--------------+-----------+------+-----------+------------------------+ + * | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused | + * +--------------+-----------+------+-----------+------------------------+ + * + * 8K - ALU Constant Shadow (8K aligned) + * 4K - H/W Register Shadow (8K aligned) + * 4K - Command and Vertex Buffers + * - Indirect command buffer : Const/Reg restore + * - includes Loop & Bool const shadows + * - Indirect command buffer : Const/Reg save + * - Quad vertices & texture coordinates + * - Indirect command buffer : Gmem save + * - Indirect command buffer : Gmem restore + * - Unused (padding to 8KB boundary) + * <1K - Texture Constant Shadow (768 bytes) (8K aligned) + * 18K - Shader Instruction Shadow + * - 6K vertex (32 byte aligned) + * - 6K pixel (32 byte aligned) + * - 6K shared (32 byte aligned) + * + * Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes + * 3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of + * 16 bytes per constant. If the texture constants were transfered this way, + * the Command & Vertex Buffers section would extend past the 16K boundary. + * By moving the texture constant shadow area to start at 16KB boundary, we + * only require approximately 40 bytes more memory, but are able to use the + * LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up + * context switching. + * + * [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool + * constants would require an additional 8KB each, for alignment.] + * + */ + +/* Constants */ + +#define ALU_CONSTANTS 2048 /* DWORDS */ +#define NUM_REGISTERS 1024 /* DWORDS */ +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES +#define CMD_BUFFER_LEN 9216 /* DWORDS */ +#else +#define CMD_BUFFER_LEN 3072 /* DWORDS */ +#endif +#define TEX_CONSTANTS (32*6) /* DWORDS */ +#define BOOL_CONSTANTS 8 /* DWORDS */ +#define LOOP_CONSTANTS 56 /* DWORDS */ + +/* LOAD_CONSTANT_CONTEXT shadow size */ +#define LCC_SHADOW_SIZE 0x2000 /* 8KB */ + +#define ALU_SHADOW_SIZE LCC_SHADOW_SIZE /* 8KB */ +#define REG_SHADOW_SIZE 0x1000 /* 4KB */ +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES +#define CMD_BUFFER_SIZE 0x9000 /* 36KB */ +#else +#define CMD_BUFFER_SIZE 0x3000 /* 12KB */ +#endif +#define TEX_SHADOW_SIZE (TEX_CONSTANTS*4) /* 768 bytes */ + +#define REG_OFFSET LCC_SHADOW_SIZE +#define CMD_OFFSET (REG_OFFSET + REG_SHADOW_SIZE) +#define TEX_OFFSET (CMD_OFFSET + CMD_BUFFER_SIZE) +#define SHADER_OFFSET ((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31) + +static inline int _shader_shadow_size(struct adreno_device *adreno_dev) +{ + return adreno_dev->istore_size*ADRENO_ISTORE_BYTES; +} + +static inline int _context_size(struct adreno_device *adreno_dev) +{ + return SHADER_OFFSET + 3*_shader_shadow_size(adreno_dev); +} + +/* A scratchpad used to build commands during context create */ + +static struct tmp_ctx { + unsigned int *start; /* Command & Vertex buffer start */ + unsigned int *cmd; /* Next available dword in C&V buffer */ + + /* address of buffers, needed when creating IB1 command buffers. */ + uint32_t bool_shadow; /* bool constants */ + uint32_t loop_shadow; /* loop constants */ + + uint32_t shader_shared; /* shared shader instruction shadow */ + uint32_t shader_vertex; /* vertex shader instruction shadow */ + uint32_t shader_pixel; /* pixel shader instruction shadow */ + + /* Addresses in command buffer where separately handled registers + * are saved + */ + uint32_t reg_values[33]; + uint32_t chicken_restore; + + uint32_t gmem_base; /* Base gpu address of GMEM */ + +} tmp_ctx; + +/* context save (gmem -> sys) */ + +/* pre-compiled vertex shader program +* +* attribute vec4 P; +* void main(void) +* { +* gl_Position = P; +* } +*/ +#define GMEM2SYS_VTX_PGM_LEN 0x12 + +static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = { + 0x00011003, 0x00001000, 0xc2000000, + 0x00001004, 0x00001000, 0xc4000000, + 0x00001005, 0x00002000, 0x00000000, + 0x1cb81000, 0x00398a88, 0x00000003, + 0x140f803e, 0x00000000, 0xe2010100, + 0x14000000, 0x00000000, 0xe2000000 +}; + +/* pre-compiled fragment shader program +* +* precision highp float; +* uniform vec4 clear_color; +* void main(void) +* { +* gl_FragColor = clear_color; +* } +*/ + +#define GMEM2SYS_FRAG_PGM_LEN 0x0c + +static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = { + 0x00000000, 0x1002c400, 0x10000000, + 0x00001003, 0x00002000, 0x00000000, + 0x140f8000, 0x00000000, 0x22000000, + 0x14000000, 0x00000000, 0xe2000000 +}; + +/* context restore (sys -> gmem) */ +/* pre-compiled vertex shader program +* +* attribute vec4 position; +* attribute vec4 texcoord; +* varying vec4 texcoord0; +* void main() +* { +* gl_Position = position; +* texcoord0 = texcoord; +* } +*/ + +#define SYS2GMEM_VTX_PGM_LEN 0x18 + +static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = { + 0x00052003, 0x00001000, 0xc2000000, 0x00001005, + 0x00001000, 0xc4000000, 0x00001006, 0x10071000, + 0x20000000, 0x18981000, 0x0039ba88, 0x00000003, + 0x12982000, 0x40257b08, 0x00000002, 0x140f803e, + 0x00000000, 0xe2010100, 0x140f8000, 0x00000000, + 0xe2020200, 0x14000000, 0x00000000, 0xe2000000 +}; + +/* pre-compiled fragment shader program +* +* precision mediump float; +* uniform sampler2D tex0; +* varying vec4 texcoord0; +* void main() +* { +* gl_FragColor = texture2D(tex0, texcoord0.xy); +* } +*/ + +#define SYS2GMEM_FRAG_PGM_LEN 0x0f + +static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = { + 0x00011002, 0x00001000, 0xc4000000, 0x00001003, + 0x10041000, 0x20000000, 0x10000001, 0x1ffff688, + 0x00000002, 0x140f8000, 0x00000000, 0xe2000000, + 0x14000000, 0x00000000, 0xe2000000 +}; + +/* shader texture constants (sysmem -> gmem) */ +#define SYS2GMEM_TEX_CONST_LEN 6 + +static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = { + /* Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat, + * RFMode=ZeroClamp-1, Dim=1:2d + */ + 0x00000002, /* Pitch = TBD */ + + /* Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0, + * NearestClamp=1:OGL Mode + */ + 0x00000800, /* Address[31:12] = TBD */ + + /* Width, Height, EndianSwap=0:None */ + 0, /* Width & Height = TBD */ + + /* NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point, + * Mip=2:BaseMap + */ + 0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23, + + /* VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0, + * Dim3d=0 + */ + 0, + + /* BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0, + * Dim=1:2d, MipPacking=0 + */ + 1 << 9 /* Mip Address[31:12] = TBD */ +}; + +#define NUM_COLOR_FORMATS 13 + +static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = { + FMT_4_4_4_4, /* COLORX_4_4_4_4 */ + FMT_1_5_5_5, /* COLORX_1_5_5_5 */ + FMT_5_6_5, /* COLORX_5_6_5 */ + FMT_8, /* COLORX_8 */ + FMT_8_8, /* COLORX_8_8 */ + FMT_8_8_8_8, /* COLORX_8_8_8_8 */ + FMT_8_8_8_8, /* COLORX_S8_8_8_8 */ + FMT_16_FLOAT, /* COLORX_16_FLOAT */ + FMT_16_16_FLOAT, /* COLORX_16_16_FLOAT */ + FMT_16_16_16_16_FLOAT, /* COLORX_16_16_16_16_FLOAT */ + FMT_32_FLOAT, /* COLORX_32_FLOAT */ + FMT_32_32_FLOAT, /* COLORX_32_32_FLOAT */ + FMT_32_32_32_32_FLOAT, /* COLORX_32_32_32_32_FLOAT */ +}; + +static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = { + 2, /* COLORX_4_4_4_4 */ + 2, /* COLORX_1_5_5_5 */ + 2, /* COLORX_5_6_5 */ + 1, /* COLORX_8 */ + 2, /* COLORX_8_8 8*/ + 4, /* COLORX_8_8_8_8 */ + 4, /* COLORX_S8_8_8_8 */ + 2, /* COLORX_16_FLOAT */ + 4, /* COLORX_16_16_FLOAT */ + 8, /* COLORX_16_16_16_16_FLOAT */ + 4, /* COLORX_32_FLOAT */ + 8, /* COLORX_32_32_FLOAT */ + 16, /* COLORX_32_32_32_32_FLOAT */ +}; + +/* shader linkage info */ +#define SHADER_CONST_ADDR (11 * 6 + 3) + + +static unsigned int *program_shader(unsigned int *cmds, int vtxfrag, + unsigned int *shader_pgm, int dwords) +{ + /* load the patched vertex shader stream */ + *cmds++ = cp_type3_packet(CP_IM_LOAD_IMMEDIATE, 2 + dwords); + /* 0=vertex shader, 1=fragment shader */ + *cmds++ = vtxfrag; + /* instruction start & size (in 32-bit words) */ + *cmds++ = ((0 << 16) | dwords); + + memcpy(cmds, shader_pgm, dwords << 2); + cmds += dwords; + + return cmds; +} + +static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst, + uint32_t src, int dwords) +{ + while (dwords-- > 0) { + *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmds++ = src++; + *cmds++ = dst; + dst += 4; + } + + return cmds; +} + +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + +static void build_reg_to_mem_range(unsigned int start, unsigned int end, + unsigned int **cmd, + struct adreno_context *drawctxt) +{ + unsigned int i = start; + + for (i = start; i <= end; i++) { + *(*cmd)++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *(*cmd)++ = i; + *(*cmd)++ = + ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) + + (i - 0x2000) * 4; + } +} + +#endif + +/* chicken restore */ +static unsigned int *build_chicken_restore_cmds( + struct adreno_context *drawctxt) +{ + unsigned int *start = tmp_ctx.cmd; + unsigned int *cmds = start; + + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0; + + *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1); + tmp_ctx.chicken_restore = virt2gpu(cmds, &drawctxt->gpustate); + *cmds++ = 0x00000000; + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds); + + return cmds; +} + +/****************************************************************************/ +/* context save */ +/****************************************************************************/ + +static const unsigned int register_ranges_a20x[] = { + REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO, + REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR, + REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR, + REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET, + REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1, + REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST, + REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK, + REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK, + REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET, + REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR, + REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL, + REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE, + REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY, + REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR +}; + +static const unsigned int register_ranges_a220[] = { + REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO, + REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR, + REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR, + REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET, + REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1, + REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST, + REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK, + REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK, + REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET, + REG_A220_PC_MAX_VTX_INDX, REG_A220_PC_INDX_OFFSET, + REG_RB_COLOR_MASK, REG_RB_FOG_COLOR, + REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL, + REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL, + REG_RB_MODECONTROL, REG_RB_SAMPLE_POS, + REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL, + REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL, + REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL, + REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR +}; + +static const unsigned int register_ranges_a225[] = { + REG_RB_SURFACE_INFO, REG_A225_RB_COLOR_INFO3, + REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR, + REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR, + REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET, + REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1, + REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST, + REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK, + REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK, + REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET, + REG_A220_PC_MAX_VTX_INDX, REG_A225_PC_MULTI_PRIM_IB_RESET_INDX, + REG_RB_COLOR_MASK, REG_RB_FOG_COLOR, + REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL, + REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL, + REG_RB_MODECONTROL, REG_RB_SAMPLE_POS, + REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL, + REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL, + REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL, + REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR, + REG_A225_GRAS_UCP0X, REG_A225_GRAS_UCP5W, + REG_A225_GRAS_UCP_ENABLED, REG_A225_GRAS_UCP_ENABLED +}; + + +/* save h/w regs, alu constants, texture contants, etc. ... +* requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr +*/ +static void build_regsave_cmds(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + unsigned int *start = tmp_ctx.cmd; + unsigned int *cmd = start; + + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + /* Make sure the HW context has the correct register values + * before reading them. */ + *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1); + *cmd++ = 0; + + { + unsigned int i = 0; + unsigned int reg_array_size = 0; + const unsigned int *ptr_register_ranges; + + /* Based on chip id choose the register ranges */ + if (adreno_is_a220(adreno_dev)) { + ptr_register_ranges = register_ranges_a220; + reg_array_size = ARRAY_SIZE(register_ranges_a220); + } else if (adreno_is_a225(adreno_dev)) { + ptr_register_ranges = register_ranges_a225; + reg_array_size = ARRAY_SIZE(register_ranges_a225); + } else { + ptr_register_ranges = register_ranges_a20x; + reg_array_size = ARRAY_SIZE(register_ranges_a20x); + } + + + /* Write HW registers into shadow */ + for (i = 0; i < (reg_array_size/2) ; i++) { + build_reg_to_mem_range(ptr_register_ranges[i*2], + ptr_register_ranges[i*2+1], + &cmd, drawctxt); + } + } + + /* Copy ALU constants */ + cmd = + reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000, + REG_SQ_CONSTANT_0, ALU_CONSTANTS); + + /* Copy Tex constants */ + cmd = + reg_to_mem(cmd, + (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000, + REG_SQ_FETCH_0, TEX_CONSTANTS); +#else + + /* Insert a wait for idle packet before reading the registers. + * This is to fix a hang/reset seen during stress testing. In this + * hang, CP encountered a timeout reading SQ's boolean constant + * register. There is logic in the HW that blocks reading of this + * register when the SQ block is not idle, which we believe is + * contributing to the hang.*/ + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + + /* H/w registers are already shadowed; just need to disable shadowing + * to prevent corruption. + */ + *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); + *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; + *cmd++ = 4 << 16; /* regs, start=0 */ + *cmd++ = 0x0; /* count = 0 */ + + /* ALU constants are already shadowed; just need to disable shadowing + * to prevent corruption. + */ + *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); + *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; + *cmd++ = 0 << 16; /* ALU, start=0 */ + *cmd++ = 0x0; /* count = 0 */ + + /* Tex constants are already shadowed; just need to disable shadowing + * to prevent corruption. + */ + *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); + *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; + *cmd++ = 1 << 16; /* Tex, start=0 */ + *cmd++ = 0x0; /* count = 0 */ +#endif + + /* Need to handle some of the registers separately */ + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_SQ_GPR_MANAGEMENT; + *cmd++ = tmp_ctx.reg_values[0]; + + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_TP0_CHICKEN; + *cmd++ = tmp_ctx.reg_values[1]; + + if (adreno_is_a22x(adreno_dev)) { + unsigned int i; + unsigned int j = 2; + for (i = REG_A220_VSC_BIN_SIZE; i <= + REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = i; + *cmd++ = tmp_ctx.reg_values[j]; + j++; + } + } + + /* Copy Boolean constants */ + cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS, + BOOL_CONSTANTS); + + /* Copy Loop constants */ + cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow, + REG_SQ_CF_LOOP, LOOP_CONSTANTS); + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->reg_save, start, cmd); + + tmp_ctx.cmd = cmd; +} + +/*copy colour, depth, & stencil buffers from graphics memory to system memory*/ +static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + struct gmem_shadow_t *shadow) +{ + unsigned int *cmds = shadow->gmem_save_commands; + unsigned int *start = cmds; + /* Calculate the new offset based on the adjusted base */ + unsigned int bytesperpixel = format2bytesperpixel[shadow->format]; + unsigned int addr = shadow->gmemshadow.gpuaddr; + unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel; + + if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { + /* Store TP0_CHICKEN register */ + *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmds++ = REG_TP0_CHICKEN; + + *cmds++ = tmp_ctx.chicken_restore; + + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0; + } + + /* Set TP0_CHICKEN to zero */ + *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1); + *cmds++ = 0x00000000; + + /* Set PA_SC_AA_CONFIG to 0 */ + *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1); + *cmds++ = 0x00000000; + + /* program shader */ + + /* load shader vtx constants ... 5 dwords */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4); + *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR; + *cmds++ = 0; + /* valid(?) vtx constant flag & addr */ + *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; + /* limit = 12 dwords */ + *cmds++ = 0x00000030; + + /* Invalidate L2 cache to make sure vertices are updated */ + *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1); + *cmds++ = 0x1; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4); + *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX); + *cmds++ = 0x00ffffff; /* REG_VGT_MAX_VTX_INDX */ + *cmds++ = 0x0; /* REG_VGT_MIN_VTX_INDX */ + *cmds++ = 0x00000000; /* REG_VGT_INDX_OFFSET */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SC_AA_MASK); + *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLORCONTROL); + *cmds++ = 0x00000c20; + + /* Repartition shaders */ + *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1); + *cmds++ = adreno_dev->pix_shader_start; + + /* Invalidate Vertex & Pixel instruction code address and sizes */ + *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1); + *cmds++ = 0x00003F00; + + *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1); + *cmds++ = adreno_encode_istore_size(adreno_dev) + | adreno_dev->pix_shader_start; + + /* load the patched vertex shader stream */ + cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN); + + /* Load the patched fragment shader stream */ + cmds = + program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN); + + /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL); + if (adreno_is_a22x(adreno_dev)) + *cmds++ = 0x10018001; + else + *cmds++ = 0x10010001; + *cmds++ = 0x00000008; + + /* resolve */ + + /* PA_CL_VTE_CNTL */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL); + /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */ + *cmds++ = 0x00000b00; + + /* program surface info */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_RB_SURFACE_INFO); + *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */ + + /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, + * Base=gmem_base + */ + /* gmem base assumed 4K aligned. */ + BUG_ON(tmp_ctx.gmem_base & 0xFFF); + *cmds++ = + (shadow-> + format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base; + + /* disable Z */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_DEPTHCONTROL); + if (adreno_is_a22x(adreno_dev)) + *cmds++ = 0x08; + else + *cmds++ = 0; + + /* set REG_PA_SU_SC_MODE_CNTL + * Front_ptype = draw triangles + * Back_ptype = draw triangles + * Provoking vertex = last + */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL); + *cmds++ = 0x00080240; + + /* Use maximum scissor values -- quad vertices already have the + * correct bounds */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL); + *cmds++ = (0 << 16) | 0; + *cmds++ = (0x1fff << 16) | (0x1fff); + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL); + *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0); + *cmds++ = (0x1fff << 16) | (0x1fff); + + /* load the viewport so that z scale = clear depth and + * z offset = 0.0f + */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE); + *cmds++ = 0xbf800000; /* -1.0f */ + *cmds++ = 0x0; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLOR_MASK); + *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK); + *cmds++ = 0xffffffff; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_SQ_WRAPPING_0); + *cmds++ = 0x00000000; + *cmds++ = 0x00000000; + + /* load the stencil ref value + * $AAM - do this later + */ + + /* load the COPY state */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6); + *cmds++ = CP_REG(REG_RB_COPY_CONTROL); + *cmds++ = 0; /* RB_COPY_CONTROL */ + *cmds++ = addr & 0xfffff000; /* RB_COPY_DEST_BASE */ + *cmds++ = shadow->pitch >> 5; /* RB_COPY_DEST_PITCH */ + + /* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither, + * MaskWrite:R=G=B=A=1 + */ + *cmds++ = 0x0003c008 | + (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT); + /* Make sure we stay in offsetx field. */ + BUG_ON(offset & 0xfffff000); + *cmds++ = offset; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_MODECONTROL); + *cmds++ = 0x6; /* EDRAM copy */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL); + *cmds++ = 0x00010000; + + if (adreno_is_a22x(adreno_dev)) { + *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1); + *cmds++ = 0; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL); + *cmds++ = 0x0000000; + + *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3); + *cmds++ = 0; /* viz query info. */ + /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/ + *cmds++ = 0x00004088; + *cmds++ = 3; /* NumIndices=3 */ + } else { + /* queue the draw packet */ + *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2); + *cmds++ = 0; /* viz query info. */ + /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */ + *cmds++ = 0x00030088; + } + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, shadow->gmem_save, start, cmds); + + return cmds; +} + +/* context restore */ + +/*copy colour, depth, & stencil buffers from system memory to graphics memory*/ +static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + struct gmem_shadow_t *shadow) +{ + unsigned int *cmds = shadow->gmem_restore_commands; + unsigned int *start = cmds; + + if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { + /* Store TP0_CHICKEN register */ + *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmds++ = REG_TP0_CHICKEN; + *cmds++ = tmp_ctx.chicken_restore; + + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0; + } + + /* Set TP0_CHICKEN to zero */ + *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1); + *cmds++ = 0x00000000; + + /* Set PA_SC_AA_CONFIG to 0 */ + *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1); + *cmds++ = 0x00000000; + /* shader constants */ + + /* vertex buffer constants */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7); + + *cmds++ = (0x1 << 16) | (9 * 6); + /* valid(?) vtx constant flag & addr */ + *cmds++ = shadow->quad_vertices.gpuaddr | 0x3; + /* limit = 12 dwords */ + *cmds++ = 0x00000030; + /* valid(?) vtx constant flag & addr */ + *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3; + /* limit = 8 dwords */ + *cmds++ = 0x00000020; + *cmds++ = 0; + *cmds++ = 0; + + /* Invalidate L2 cache to make sure vertices are updated */ + *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1); + *cmds++ = 0x1; + + cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN); + + /* Repartition shaders */ + *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1); + *cmds++ = adreno_dev->pix_shader_start; + + /* Invalidate Vertex & Pixel instruction code address and sizes */ + *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1); + *cmds++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */ + + *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1); + *cmds++ = adreno_encode_istore_size(adreno_dev) + | adreno_dev->pix_shader_start; + + /* Load the patched fragment shader stream */ + cmds = + program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN); + + /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL); + *cmds++ = 0x10030002; + *cmds++ = 0x00000008; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SC_AA_MASK); + *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */ + + if (!adreno_is_a22x(adreno_dev)) { + /* PA_SC_VIZ_QUERY */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SC_VIZ_QUERY); + *cmds++ = 0x0; /*REG_PA_SC_VIZ_QUERY */ + } + + /* RB_COLORCONTROL */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLORCONTROL); + *cmds++ = 0x00000c20; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4); + *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX); + *cmds++ = 0x00ffffff; /* mmVGT_MAX_VTX_INDX */ + *cmds++ = 0x0; /* mmVGT_MIN_VTX_INDX */ + *cmds++ = 0x00000000; /* mmVGT_INDX_OFFSET */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL); + *cmds++ = 0x00000002; /* mmVGT_VERTEX_REUSE_BLOCK_CNTL */ + *cmds++ = 0x00000002; /* mmVGT_OUT_DEALLOC_CNTL */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_SQ_INTERPOLATOR_CNTL); + *cmds++ = 0xffffffff; /* mmSQ_INTERPOLATOR_CNTL */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SC_AA_CONFIG); + *cmds++ = 0x00000000; /* REG_PA_SC_AA_CONFIG */ + + /* set REG_PA_SU_SC_MODE_CNTL + * Front_ptype = draw triangles + * Back_ptype = draw triangles + * Provoking vertex = last + */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL); + *cmds++ = 0x00080240; + + /* texture constants */ + *cmds++ = + cp_type3_packet(CP_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1)); + *cmds++ = (0x1 << 16) | (0 * 6); + memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2); + cmds[0] |= (shadow->pitch >> 5) << 22; + cmds[1] |= + shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format]; + cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13; + cmds += SYS2GMEM_TEX_CONST_LEN; + + /* program surface info */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_RB_SURFACE_INFO); + *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */ + + /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0, + * Base=gmem_base + */ + *cmds++ = + (shadow-> + format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base; + + /* RB_DEPTHCONTROL */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_DEPTHCONTROL); + + if (adreno_is_a22x(adreno_dev)) + *cmds++ = 8; /* disable Z */ + else + *cmds++ = 0; /* disable Z */ + + /* Use maximum scissor values -- quad vertices already + * have the correct bounds */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL); + *cmds++ = (0 << 16) | 0; + *cmds++ = ((0x1fff) << 16) | 0x1fff; + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL); + *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0); + *cmds++ = ((0x1fff) << 16) | 0x1fff; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL); + /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */ + *cmds++ = 0x00000b00; + + /*load the viewport so that z scale = clear depth and z offset = 0.0f */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE); + *cmds++ = 0xbf800000; + *cmds++ = 0x0; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLOR_MASK); + *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */ + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK); + *cmds++ = 0xffffffff; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3); + *cmds++ = CP_REG(REG_SQ_WRAPPING_0); + *cmds++ = 0x00000000; + *cmds++ = 0x00000000; + + /* load the stencil ref value + * $AAM - do this later + */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_RB_MODECONTROL); + /* draw pixels with color and depth/stencil component */ + *cmds++ = 0x4; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL); + *cmds++ = 0x00010000; + + if (adreno_is_a22x(adreno_dev)) { + *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1); + *cmds++ = 0; + + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL); + *cmds++ = 0x0000000; + + *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3); + *cmds++ = 0; /* viz query info. */ + /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/ + *cmds++ = 0x00004088; + *cmds++ = 3; /* NumIndices=3 */ + } else { + /* queue the draw packet */ + *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2); + *cmds++ = 0; /* viz query info. */ + /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */ + *cmds++ = 0x00030088; + } + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, shadow->gmem_restore, start, cmds); + + return cmds; +} + +static void build_regrestore_cmds(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + unsigned int *start = tmp_ctx.cmd; + unsigned int *cmd = start; + + unsigned int i = 0; + unsigned int reg_array_size = 0; + const unsigned int *ptr_register_ranges; + + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + + /* H/W Registers */ + /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */ + cmd++; +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + /* Force mismatch */ + *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1; +#else + *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000; +#endif + + /* Based on chip id choose the registers ranges*/ + if (adreno_is_a220(adreno_dev)) { + ptr_register_ranges = register_ranges_a220; + reg_array_size = ARRAY_SIZE(register_ranges_a220); + } else if (adreno_is_a225(adreno_dev)) { + ptr_register_ranges = register_ranges_a225; + reg_array_size = ARRAY_SIZE(register_ranges_a225); + } else { + ptr_register_ranges = register_ranges_a20x; + reg_array_size = ARRAY_SIZE(register_ranges_a20x); + } + + + for (i = 0; i < (reg_array_size/2); i++) { + cmd = reg_range(cmd, ptr_register_ranges[i*2], + ptr_register_ranges[i*2+1]); + } + + /* Now we know how many register blocks we have, we can compute command + * length + */ + start[2] = + cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3); + /* Enable shadowing for the entire register block. */ +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + start[4] |= (0 << 24) | (4 << 16); /* Disable shadowing. */ +#else + start[4] |= (1 << 24) | (4 << 16); +#endif + + /* Need to handle some of the registers separately */ + *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1); + tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate); + *cmd++ = 0x00040400; + + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1); + tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); + *cmd++ = 0x00000000; + + if (adreno_is_a22x(adreno_dev)) { + unsigned int i; + unsigned int j = 2; + for (i = REG_A220_VSC_BIN_SIZE; i <= + REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) { + *cmd++ = cp_type0_packet(i, 1); + tmp_ctx.reg_values[j] = virt2gpu(cmd, + &drawctxt->gpustate); + *cmd++ = 0x00000000; + j++; + } + } + + /* ALU Constants */ + *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); + *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000; +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + *cmd++ = (0 << 24) | (0 << 16) | 0; /* Disable shadowing */ +#else + *cmd++ = (1 << 24) | (0 << 16) | 0; +#endif + *cmd++ = ALU_CONSTANTS; + + /* Texture Constants */ + *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3); + *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000; +#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES + /* Disable shadowing */ + *cmd++ = (0 << 24) | (1 << 16) | 0; +#else + *cmd++ = (1 << 24) | (1 << 16) | 0; +#endif + *cmd++ = TEX_CONSTANTS; + + /* Boolean Constants */ + *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS); + *cmd++ = (2 << 16) | 0; + + /* the next BOOL_CONSTANT dwords is the shadow area for + * boolean constants. + */ + tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate); + cmd += BOOL_CONSTANTS; + + /* Loop Constants */ + *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS); + *cmd++ = (3 << 16) | 0; + + /* the next LOOP_CONSTANTS dwords is the shadow area for + * loop constants. + */ + tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate); + cmd += LOOP_CONSTANTS; + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->reg_restore, start, cmd); + + tmp_ctx.cmd = cmd; +} + +static void +build_shader_save_restore_cmds(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + unsigned int *cmd = tmp_ctx.cmd; + unsigned int *save, *restore, *fixup; + unsigned int *startSizeVtx, *startSizePix, *startSizeShared; + unsigned int *partition1; + unsigned int *shaderBases, *partition2; + + /* compute vertex, pixel and shared instruction shadow GPU addresses */ + tmp_ctx.shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET; + tmp_ctx.shader_pixel = tmp_ctx.shader_vertex + + _shader_shadow_size(adreno_dev); + tmp_ctx.shader_shared = tmp_ctx.shader_pixel + + _shader_shadow_size(adreno_dev); + + /* restore shader partitioning and instructions */ + + restore = cmd; /* start address */ + + /* Invalidate Vertex & Pixel instruction code address and sizes */ + *cmd++ = cp_type3_packet(CP_INVALIDATE_STATE, 1); + *cmd++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */ + + /* Restore previous shader vertex & pixel instruction bases. */ + *cmd++ = cp_type3_packet(CP_SET_SHADER_BASES, 1); + shaderBases = cmd++; /* TBD #5: shader bases (from fixup) */ + + /* write the shader partition information to a scratch register */ + *cmd++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1); + partition1 = cmd++; /* TBD #4a: partition info (from save) */ + + /* load vertex shader instructions from the shadow. */ + *cmd++ = cp_type3_packet(CP_IM_LOAD, 2); + *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */ + startSizeVtx = cmd++; /* TBD #1: start/size (from save) */ + + /* load pixel shader instructions from the shadow. */ + *cmd++ = cp_type3_packet(CP_IM_LOAD, 2); + *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */ + startSizePix = cmd++; /* TBD #2: start/size (from save) */ + + /* load shared shader instructions from the shadow. */ + *cmd++ = cp_type3_packet(CP_IM_LOAD, 2); + *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */ + startSizeShared = cmd++; /* TBD #3: start/size (from save) */ + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd); + + /* + * fixup SET_SHADER_BASES data + * + * since self-modifying PM4 code is being used here, a seperate + * command buffer is used for this fixup operation, to ensure the + * commands are not read by the PM4 engine before the data fields + * have been written. + */ + + fixup = cmd; /* start address */ + + /* write the shader partition information to a scratch register */ + *cmd++ = cp_type0_packet(REG_SCRATCH_REG2, 1); + partition2 = cmd++; /* TBD #4b: partition info (from save) */ + + /* mask off unused bits, then OR with shader instruction memory size */ + *cmd++ = cp_type3_packet(CP_REG_RMW, 3); + *cmd++ = REG_SCRATCH_REG2; + /* AND off invalid bits. */ + *cmd++ = 0x0FFF0FFF; + /* OR in instruction memory size. */ + *cmd++ = adreno_encode_istore_size(adreno_dev); + + /* write the computed value to the SET_SHADER_BASES data field */ + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_SCRATCH_REG2; + /* TBD #5: shader bases (to restore) */ + *cmd++ = virt2gpu(shaderBases, &drawctxt->gpustate); + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd); + + /* save shader partitioning and instructions */ + + save = cmd; /* start address */ + + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + + /* fetch the SQ_INST_STORE_MANAGMENT register value, + * store the value in the data fields of the SET_CONSTANT commands + * above. + */ + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_SQ_INST_STORE_MANAGMENT; + /* TBD #4a: partition info (to restore) */ + *cmd++ = virt2gpu(partition1, &drawctxt->gpustate); + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_SQ_INST_STORE_MANAGMENT; + /* TBD #4b: partition info (to fixup) */ + *cmd++ = virt2gpu(partition2, &drawctxt->gpustate); + + + /* store the vertex shader instructions */ + *cmd++ = cp_type3_packet(CP_IM_STORE, 2); + *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */ + /* TBD #1: start/size (to restore) */ + *cmd++ = virt2gpu(startSizeVtx, &drawctxt->gpustate); + + /* store the pixel shader instructions */ + *cmd++ = cp_type3_packet(CP_IM_STORE, 2); + *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */ + /* TBD #2: start/size (to restore) */ + *cmd++ = virt2gpu(startSizePix, &drawctxt->gpustate); + + /* store the shared shader instructions if vertex base is nonzero */ + + *cmd++ = cp_type3_packet(CP_IM_STORE, 2); + *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */ + /* TBD #3: start/size (to restore) */ + *cmd++ = virt2gpu(startSizeShared, &drawctxt->gpustate); + + + *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmd++ = 0; + + /* create indirect buffer command for above command sequence */ + create_ib1(drawctxt, drawctxt->shader_save, save, cmd); + + tmp_ctx.cmd = cmd; +} + +/* create buffers for saving/restoring registers, constants, & GMEM */ +static int a2xx_create_gpustate_shadow(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW; + + /* build indirect command buffers to save & restore regs/constants */ + build_regrestore_cmds(adreno_dev, drawctxt); + build_regsave_cmds(adreno_dev, drawctxt); + + build_shader_save_restore_cmds(adreno_dev, drawctxt); + + return 0; +} + +/* create buffers for saving/restoring registers, constants, & GMEM */ +static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + int result; + + calc_gmemsize(&drawctxt->context_gmem_shadow, + adreno_dev->gmemspace.sizebytes); + tmp_ctx.gmem_base = adreno_dev->gmemspace.gpu_base; + + result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow, + drawctxt->pagetable, drawctxt->context_gmem_shadow.size); + + if (result) + return result; + + /* we've allocated the shadow, when swapped out, GMEM must be saved. */ + drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE; + + /* blank out gmem shadow. */ + kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0, + drawctxt->context_gmem_shadow.size); + + /* build quad vertex buffer */ + build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow, + &tmp_ctx.cmd); + + /* build TP0_CHICKEN register restore command buffer */ + if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) + tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt); + + /* build indirect command buffers to save & restore gmem */ + drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd; + tmp_ctx.cmd = + build_gmem2sys_cmds(adreno_dev, drawctxt, + &drawctxt->context_gmem_shadow); + drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd; + tmp_ctx.cmd = + build_sys2gmem_cmds(adreno_dev, drawctxt, + &drawctxt->context_gmem_shadow); + + kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow, + KGSL_CACHE_OP_FLUSH); + + kgsl_cffdump_syncmem(NULL, + &drawctxt->context_gmem_shadow.gmemshadow, + drawctxt->context_gmem_shadow.gmemshadow.gpuaddr, + drawctxt->context_gmem_shadow.gmemshadow.size, false); + + return 0; +} + +static int a2xx_drawctxt_create(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt) +{ + int ret; + + /* + * Allocate memory for the GPU state and the context commands. + * Despite the name, this is much more then just storage for + * the gpustate. This contains command space for gmem save + * and texture and vertex buffer storage too + */ + + ret = kgsl_allocate(&drawctxt->gpustate, + drawctxt->pagetable, _context_size(adreno_dev)); + + if (ret) + return ret; + + kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, + _context_size(adreno_dev)); + + tmp_ctx.cmd = tmp_ctx.start + = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET); + + if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { + ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt); + if (ret) + goto done; + + drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; + } + + if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) { + ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt); + if (ret) + goto done; + } + + /* Flush and sync the gpustate memory */ + + kgsl_cache_range_op(&drawctxt->gpustate, + KGSL_CACHE_OP_FLUSH); + + kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, + drawctxt->gpustate.gpuaddr, + drawctxt->gpustate.size, false); + +done: + if (ret) + kgsl_sharedmem_free(&drawctxt->gpustate); + + return ret; +} + +static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev, + struct adreno_context *context) +{ + struct kgsl_device *device = &adreno_dev->dev; + unsigned int cmd[11]; + unsigned int *cmds = &cmd[0]; + + adreno_dev->gpudev->ctx_switches_since_last_draw++; + /* If there have been > than ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW + * calls to context switches w/o gmem being saved then we need to + * execute this workaround */ + if (adreno_dev->gpudev->ctx_switches_since_last_draw > + ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW) + adreno_dev->gpudev->ctx_switches_since_last_draw = 0; + else + return; + /* + * Issue an empty draw call to avoid possible hangs due to + * repeated idles without intervening draw calls. + * On adreno 225 the PC block has a cache that is only + * flushed on draw calls and repeated idles can make it + * overflow. The gmem save path contains draw calls so + * this workaround isn't needed there. + */ + *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2); + *cmds++ = (0x4 << 16) | (REG_PA_SU_SC_MODE_CNTL - 0x2000); + *cmds++ = 0; + *cmds++ = cp_type3_packet(CP_DRAW_INDX, 5); + *cmds++ = 0; + *cmds++ = 1<<14; + *cmds++ = 0; + *cmds++ = device->mmu.setstate_memory.gpuaddr; + *cmds++ = 0; + *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1); + *cmds++ = 0x00000000; + + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_PMODE, + &cmd[0], 11); +} + +static void a2xx_drawctxt_save(struct adreno_device *adreno_dev, + struct adreno_context *context) +{ + struct kgsl_device *device = &adreno_dev->dev; + + if (context == NULL) + return; + + if (context->flags & CTXT_FLAGS_GPU_HANG) + KGSL_CTXT_WARN(device, + "Current active context has caused gpu hang\n"); + + if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { + + /* save registers and constants. */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, + context->reg_save, 3); + + if (context->flags & CTXT_FLAGS_SHADER_SAVE) { + /* save shader partitioning and instructions. */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_PMODE, + context->shader_save, 3); + + /* + * fixup shader partitioning parameter for + * SET_SHADER_BASES. + */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, + context->shader_fixup, 3); + + context->flags |= CTXT_FLAGS_SHADER_RESTORE; + } + } + + if ((context->flags & CTXT_FLAGS_GMEM_SAVE) && + (context->flags & CTXT_FLAGS_GMEM_SHADOW)) { + /* save gmem. + * (note: changes shader. shader must already be saved.) + */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_PMODE, + context->context_gmem_shadow.gmem_save, 3); + + /* Restore TP0_CHICKEN */ + if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, + context->chicken_restore, 3); + } + adreno_dev->gpudev->ctx_switches_since_last_draw = 0; + + context->flags |= CTXT_FLAGS_GMEM_RESTORE; + } else if (adreno_is_a225(adreno_dev)) + a2xx_drawctxt_draw_workaround(adreno_dev, context); +} + +static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev, + struct adreno_context *context) +{ + struct kgsl_device *device = &adreno_dev->dev; + unsigned int cmds[5]; + + if (context == NULL) { + /* No context - set the default apgetable and thats it */ + kgsl_mmu_setstate(device, device->mmu.defaultpagetable, + adreno_dev->drawctxt_active->id); + return; + } + + KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags); + + cmds[0] = cp_nop_packet(1); + cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; + cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); + cmds[3] = device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(current_context); + cmds[4] = (unsigned int) context; + adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE, + cmds, 5); + kgsl_mmu_setstate(device, context->pagetable, context->id); + +#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP + kgsl_cffdump_syncmem(NULL, &context->gpustate, + context->gpustate.gpuaddr, LCC_SHADOW_SIZE + + REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false); +#endif + + /* restore gmem. + * (note: changes shader. shader must not already be restored.) + */ + if (context->flags & CTXT_FLAGS_GMEM_RESTORE) { + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_PMODE, + context->context_gmem_shadow.gmem_restore, 3); + + if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { + /* Restore TP0_CHICKEN */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, + context->chicken_restore, 3); + } + + context->flags &= ~CTXT_FLAGS_GMEM_RESTORE; + } + + if (!(context->flags & CTXT_FLAGS_PREAMBLE)) { + + /* restore registers and constants. */ + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, context->reg_restore, 3); + + /* restore shader instructions & partitioning. */ + if (context->flags & CTXT_FLAGS_SHADER_RESTORE) { + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, + context->shader_restore, 3); + } + } + + if (adreno_is_a20x(adreno_dev)) { + cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1); + cmds[1] = context->bin_base_offset; + adreno_ringbuffer_issuecmds(device, context, + KGSL_CMD_FLAGS_NONE, cmds, 2); + } +} + +/* + * Interrupt management + * + * a2xx interrupt control is distributed among the various + * hardware components (RB, CP, MMU). The main interrupt + * tells us which component fired the interrupt, but one needs + * to go to the individual component to find out why. The + * following functions provide the broken out support for + * managing the interrupts + */ + +#define RBBM_INT_MASK RBBM_INT_CNTL__RDERR_INT_MASK + +#define CP_INT_MASK \ + (CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \ + CP_INT_CNTL__OPCODE_ERROR_MASK | \ + CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \ + CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \ + CP_INT_CNTL__IB_ERROR_MASK | \ + CP_INT_CNTL__IB1_INT_MASK | \ + CP_INT_CNTL__RB_INT_MASK) + +#define VALID_STATUS_COUNT_MAX 10 + +static struct { + unsigned int mask; + const char *message; +} kgsl_cp_error_irqs[] = { + { CP_INT_CNTL__T0_PACKET_IN_IB_MASK, + "ringbuffer TO packet in IB interrupt" }, + { CP_INT_CNTL__OPCODE_ERROR_MASK, + "ringbuffer opcode error interrupt" }, + { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK, + "ringbuffer protected mode error interrupt" }, + { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK, + "ringbuffer reserved bit error interrupt" }, + { CP_INT_CNTL__IB_ERROR_MASK, + "ringbuffer IB error interrupt" }, +}; + +static void a2xx_cp_intrcallback(struct kgsl_device *device) +{ + unsigned int status = 0, num_reads = 0, master_status = 0; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + int i; + + adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status); + while (!status && (num_reads < VALID_STATUS_COUNT_MAX) && + (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) { + adreno_regread(device, REG_CP_INT_STATUS, &status); + adreno_regread(device, REG_MASTER_INT_SIGNAL, + &master_status); + num_reads++; + } + if (num_reads > 1) + KGSL_DRV_WARN(device, + "Looped %d times to read REG_CP_INT_STATUS\n", + num_reads); + + trace_kgsl_a2xx_irq_status(device, master_status, status); + + if (!status) { + if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) { + /* This indicates that we could not read CP_INT_STAT. + * As a precaution just wake up processes so + * they can check their timestamps. Since, we + * did not ack any interrupts this interrupt will + * be generated again */ + KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n"); + wake_up_interruptible_all(&device->wait_queue); + } else + KGSL_DRV_WARN(device, "Spurious interrput detected\n"); + return; + } + + for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) { + if (status & kgsl_cp_error_irqs[i].mask) { + KGSL_CMD_CRIT(rb->device, "%s\n", + kgsl_cp_error_irqs[i].message); + /* + * on fatal errors, turn off the interrupts to + * avoid storming. This has the side effect of + * forcing a PM dump when the timestamp times out + */ + + kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF); + } + } + + /* only ack bits we understand */ + status &= CP_INT_MASK; + adreno_regwrite(device, REG_CP_INT_ACK, status); + + if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) { + KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n"); + queue_work(device->work_queue, &device->ts_expired_ws); + wake_up_interruptible_all(&device->wait_queue); + atomic_notifier_call_chain(&(device->ts_notifier_list), + device->id, + NULL); + } +} + +static void a2xx_rbbm_intrcallback(struct kgsl_device *device) +{ + unsigned int status = 0; + unsigned int rderr = 0; + + adreno_regread(device, REG_RBBM_INT_STATUS, &status); + + if (status & RBBM_INT_CNTL__RDERR_INT_MASK) { + union rbbm_read_error_u rerr; + adreno_regread(device, REG_RBBM_READ_ERROR, &rderr); + rerr.val = rderr; + if (rerr.f.read_address == REG_CP_INT_STATUS && + rerr.f.read_error && + rerr.f.read_requester) + KGSL_DRV_WARN(device, + "rbbm read error interrupt: %08x\n", rderr); + else + KGSL_DRV_CRIT(device, + "rbbm read error interrupt: %08x\n", rderr); + } + + status &= RBBM_INT_MASK; + adreno_regwrite(device, REG_RBBM_INT_ACK, status); +} + +irqreturn_t a2xx_irq_handler(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = &adreno_dev->dev; + irqreturn_t result = IRQ_NONE; + unsigned int status; + + adreno_regread(device, REG_MASTER_INT_SIGNAL, &status); + + if (status & MASTER_INT_SIGNAL__MH_INT_STAT) { + kgsl_mh_intrcallback(device); + result = IRQ_HANDLED; + } + + if (status & MASTER_INT_SIGNAL__CP_INT_STAT) { + a2xx_cp_intrcallback(device); + result = IRQ_HANDLED; + } + + if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) { + a2xx_rbbm_intrcallback(device); + result = IRQ_HANDLED; + } + + return result; +} + +static void a2xx_irq_control(struct adreno_device *adreno_dev, int state) +{ + struct kgsl_device *device = &adreno_dev->dev; + + if (state) { + adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK); + adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK); + adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK); + } else { + adreno_regwrite(device, REG_RBBM_INT_CNTL, 0); + adreno_regwrite(device, REG_CP_INT_CNTL, 0); + adreno_regwrite(device, MH_INTERRUPT_MASK, 0); + } + + /* Force the writes to post before touching the IRQ line */ + wmb(); +} + +static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = &adreno_dev->dev; + unsigned int rbbm, cp, mh; + + adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm); + adreno_regread(device, REG_CP_INT_CNTL, &cp); + adreno_regread(device, MH_INTERRUPT_MASK, &mh); + + return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) || + (mh & MH_INTERRUPT_MASK)) ? 1 : 0; +} + +/* Defined in adreno_a2xx_snapshot.c */ +void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, + int *remain, int hang); + +struct adreno_gpudev adreno_a2xx_gpudev = { + .ctxt_create = a2xx_drawctxt_create, + .ctxt_save = a2xx_drawctxt_save, + .ctxt_restore = a2xx_drawctxt_restore, + .ctxt_draw_workaround = a2xx_drawctxt_draw_workaround, + .irq_handler = a2xx_irq_handler, + .irq_control = a2xx_irq_control, + .irq_pending = a2xx_irq_pending, + .snapshot = a2xx_snapshot, +}; diff --git a/drivers/gpu/msm/adreno_a2xx_snapshot.c b/drivers/gpu/msm/adreno_a2xx_snapshot.c new file mode 100644 index 0000000000000..d87dc211954f3 --- /dev/null +++ b/drivers/gpu/msm/adreno_a2xx_snapshot.c @@ -0,0 +1,356 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "kgsl.h" +#include "adreno.h" +#include "kgsl_snapshot.h" + +#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \ + + sizeof(struct kgsl_snapshot_debug)) + +/* Dump the SX debug registers into a GPU snapshot debug section */ + +#define SXDEBUG_COUNT 0x1B + +static int a2xx_snapshot_sxdebug(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_debug *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i; + + if (remain < DEBUG_SECTION_SZ(SXDEBUG_COUNT)) { + SNAPSHOT_ERR_NOMEM(device, "SX DEBUG"); + return 0; + } + + header->type = SNAPSHOT_DEBUG_SX; + header->size = SXDEBUG_COUNT; + + for (i = 0; i < SXDEBUG_COUNT; i++) { + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i); + adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]); + } + + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); + + return DEBUG_SECTION_SZ(SXDEBUG_COUNT); +} + +#define CPDEBUG_COUNT 0x20 + +static int a2xx_snapshot_cpdebug(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_debug *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i; + + if (remain < DEBUG_SECTION_SZ(CPDEBUG_COUNT)) { + SNAPSHOT_ERR_NOMEM(device, "CP DEBUG"); + return 0; + } + + header->type = SNAPSHOT_DEBUG_CP; + header->size = CPDEBUG_COUNT; + + for (i = 0; i < CPDEBUG_COUNT; i++) { + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628); + adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]); + } + + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); + + return DEBUG_SECTION_SZ(CPDEBUG_COUNT); +} + +/* + * The contents of the SQ debug sections are dword pairs: + * [register offset]:[value] + * This macro writes both dwords for the given register + */ + +#define SQ_DEBUG_WRITE(_device, _reg, _data, _offset) \ + do { _data[(_offset)++] = (_reg); \ + adreno_regread(_device, (_reg), &_data[(_offset)++]); } while (0) + +#define SQ_DEBUG_BANK_SIZE 23 + +static int a2xx_snapshot_sqdebug(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_debug *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i, offset = 0; + int size = SQ_DEBUG_BANK_SIZE * 2 * 2; + + if (remain < DEBUG_SECTION_SZ(size)) { + SNAPSHOT_ERR_NOMEM(device, "SQ Debug"); + return 0; + } + + header->type = SNAPSHOT_DEBUG_SQ; + header->size = size; + + for (i = 0; i < 2; i++) { + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_CONST_MGR_FSM+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_EXP_ALLOC+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_0+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_1+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_PIX+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_VTX+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_INPUT_FSM+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_0+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_1+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_0+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, + REG_SQ_DEBUG_PIX_TB_STATUS_REG_0+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, + REG_SQ_DEBUG_PIX_TB_STATUS_REG_1+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, + REG_SQ_DEBUG_PIX_TB_STATUS_REG_2+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, + REG_SQ_DEBUG_PIX_TB_STATUS_REG_3+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PTR_BUFF+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TB_STATUS_SEL+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TP_FSM+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_0+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_1+i*0x1000, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM+i*0x1000, + data, offset); + } + + return DEBUG_SECTION_SZ(size); +} + +#define SQ_DEBUG_THREAD_SIZE 7 + +static int a2xx_snapshot_sqthreaddebug(struct kgsl_device *device, + void *snapshot, int remain, void *priv) +{ + struct kgsl_snapshot_debug *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i, offset = 0; + int size = SQ_DEBUG_THREAD_SIZE * 2 * 16; + + if (remain < DEBUG_SECTION_SZ(size)) { + SNAPSHOT_ERR_NOMEM(device, "SQ THREAD DEBUG"); + return 0; + } + + header->type = SNAPSHOT_DEBUG_SQTHREAD; + header->size = size; + + for (i = 0; i < 16; i++) { + adreno_regwrite(device, REG_SQ_DEBUG_TB_STATUS_SEL, + i | (6<<4) | (i<<7) | (1<<11) | (1<<12) + | (i<<16) | (6<<20) | (i<<23)); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATUS_REG, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_0, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_1, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_2, + data, offset); + SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_3, + data, offset); + } + + return DEBUG_SECTION_SZ(size); +} + +#define MIUDEBUG_COUNT 0x10 + +static int a2xx_snapshot_miudebug(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_debug *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i; + + if (remain < DEBUG_SECTION_SZ(MIUDEBUG_COUNT)) { + SNAPSHOT_ERR_NOMEM(device, "MIU DEBUG"); + return 0; + } + + header->type = SNAPSHOT_DEBUG_MIU; + header->size = MIUDEBUG_COUNT; + + for (i = 0; i < MIUDEBUG_COUNT; i++) { + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1600 | i); + adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]); + } + + adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); + + return DEBUG_SECTION_SZ(MIUDEBUG_COUNT); +} + +/* Helper function to snapshot a section of indexed registers */ + +static void *a2xx_snapshot_indexed_registers(struct kgsl_device *device, + void *snapshot, int *remain, + unsigned int index, unsigned int data, unsigned int start, + unsigned int count) +{ + struct kgsl_snapshot_indexed_registers iregs; + iregs.index = index; + iregs.data = data; + iregs.start = start; + iregs.count = count; + + return kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_INDEXED_REGS, snapshot, + remain, kgsl_snapshot_dump_indexed_regs, &iregs); +} + +/* A2XX GPU snapshot function - this is where all of the A2XX specific + * bits and pieces are grabbed into the snapshot memory + */ + +void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, + int *remain, int hang) +{ + struct kgsl_device *device = &adreno_dev->dev; + struct kgsl_snapshot_registers regs; + unsigned int pmoverride; + + /* Choose the register set to dump */ + + if (adreno_is_a20x(adreno_dev)) { + regs.regs = (unsigned int *) a200_registers; + regs.count = a200_registers_count; + } else { + regs.regs = (unsigned int *) a220_registers; + regs.count = a220_registers_count; + } + + /* Master set of (non debug) registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain, + kgsl_snapshot_dump_regs, ®s); + + /* CP_STATE_DEBUG indexed registers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_CP_STATE_DEBUG_INDEX, + REG_CP_STATE_DEBUG_DATA, 0x0, 0x14); + + /* CP_ME indexed registers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS, + 64, 44); + + /* + * Need to temporarily turn off clock gating for the debug bus to + * work + */ + + adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride); + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF); + + /* SX debug registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, + a2xx_snapshot_sxdebug, NULL); + + /* SU debug indexed registers (only for < 470) */ + if (!adreno_is_a22x(adreno_dev)) + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_PA_SU_DEBUG_CNTL, + REG_PA_SU_DEBUG_DATA, + 0, 0x1B); + + /* CP debug registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, + a2xx_snapshot_cpdebug, NULL); + + /* MH debug indexed registers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40); + + /* Leia only register sets */ + if (adreno_is_a22x(adreno_dev)) { + /* RB DEBUG indexed regisers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8); + + /* RB DEBUG indexed registers bank 2 */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000, + 0, 8); + + /* PC_DEBUG indexed registers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8); + + /* GRAS_DEBUG indexed registers */ + snapshot = a2xx_snapshot_indexed_registers(device, snapshot, + remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4); + + /* MIU debug registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, + a2xx_snapshot_miudebug, NULL); + + /* SQ DEBUG debug registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, + a2xx_snapshot_sqdebug, NULL); + + /* + * Reading SQ THREAD causes bad things to happen on a running + * system, so only read it if the GPU is already hung + */ + + if (hang) { + /* SQ THREAD debug registers */ + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain, + a2xx_snapshot_sqthreaddebug, NULL); + } + } + + /* Reset the clock gating */ + adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride); + + return snapshot; +} diff --git a/drivers/gpu/msm/adreno_a2xx_trace.c b/drivers/gpu/msm/adreno_a2xx_trace.c new file mode 100644 index 0000000000000..87c930b04389e --- /dev/null +++ b/drivers/gpu/msm/adreno_a2xx_trace.c @@ -0,0 +1,19 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "kgsl.h" +#include "adreno.h" + +/* Instantiate tracepoints */ +#define CREATE_TRACE_POINTS +#include "adreno_a2xx_trace.h" diff --git a/drivers/gpu/msm/adreno_a2xx_trace.h b/drivers/gpu/msm/adreno_a2xx_trace.h new file mode 100644 index 0000000000000..af355d680f0cd --- /dev/null +++ b/drivers/gpu/msm/adreno_a2xx_trace.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#if !defined(_ADRENO_A2XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _ADRENO_A2XX_TRACE_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kgsl +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE adreno_a2xx_trace + +#include + +struct kgsl_device; + +/* + * Tracepoint for a2xx irq. Includes status info + */ +TRACE_EVENT(kgsl_a2xx_irq_status, + + TP_PROTO(struct kgsl_device *device, unsigned int master_status, + unsigned int status), + + TP_ARGS(device, master_status, status), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, master_status) + __field(unsigned int, status) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->master_status = master_status; + __entry->status = status; + ), + + TP_printk( + "d_name=%s master=%s status=%s", + __get_str(device_name), + __entry->master_status ? __print_flags(__entry->master_status, + "|", + { MASTER_INT_SIGNAL__MH_INT_STAT, "MH" }, + { MASTER_INT_SIGNAL__SQ_INT_STAT, "SQ" }, + { MASTER_INT_SIGNAL__CP_INT_STAT, "CP" }, + { MASTER_INT_SIGNAL__RBBM_INT_STAT, "RBBM" }) : "None", + __entry->status ? __print_flags(__entry->status, "|", + { CP_INT_CNTL__SW_INT_MASK, "SW" }, + { CP_INT_CNTL__T0_PACKET_IN_IB_MASK, + "T0_PACKET_IN_IB" }, + { CP_INT_CNTL__OPCODE_ERROR_MASK, "OPCODE_ERROR" }, + { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK, + "PROTECTED_MODE_ERROR" }, + { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK, + "RESERVED_BIT_ERROR" }, + { CP_INT_CNTL__IB_ERROR_MASK, "IB_ERROR" }, + { CP_INT_CNTL__IB2_INT_MASK, "IB2" }, + { CP_INT_CNTL__IB1_INT_MASK, "IB1" }, + { CP_INT_CNTL__RB_INT_MASK, "RB" }) : "None" + ) +); + +#endif /* _ADRENO_A2XX_TRACE_H */ + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c new file mode 100644 index 0000000000000..b9ec6c4d639a0 --- /dev/null +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -0,0 +1,381 @@ +/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "kgsl.h" +#include "adreno_postmortem.h" +#include "adreno.h" + +#include "a2xx_reg.h" + +unsigned int kgsl_cff_dump_enable; +int kgsl_pm_regs_enabled; +int adreno_ib_dump_on_pagef_enabled; + +static struct dentry *pm_d_debugfs; + +static int pm_dump_set(void *data, u64 val) +{ + struct kgsl_device *device = data; + + if (val) { + mutex_lock(&device->mutex); + adreno_postmortem_dump(device, 1); + mutex_unlock(&device->mutex); + } + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops, + NULL, + pm_dump_set, "%llu\n"); + +static int pm_regs_enabled_set(void *data, u64 val) +{ + kgsl_pm_regs_enabled = val ? 1 : 0; + return 0; +} + +static int pm_regs_enabled_get(void *data, u64 *val) +{ + *val = kgsl_pm_regs_enabled; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops, + pm_regs_enabled_get, + pm_regs_enabled_set, "%llu\n"); + +static int ib_dump_on_pagef_enabled_get(void *data, u64 *val) +{ + *val = adreno_ib_dump_on_pagef_enabled; + return 0; +} + +static int ib_dump_on_pagef_enabled_set(void *data, u64 val) +{ + adreno_ib_dump_on_pagef_enabled = val ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ib_dump_on_pagef_enabled_fops, + ib_dump_on_pagef_enabled_get, + ib_dump_on_pagef_enabled_set, "%llu\n"); + +static int kgsl_cff_dump_enable_set(void *data, u64 val) +{ +#ifdef CONFIG_MSM_KGSL_CFF_DUMP + kgsl_cff_dump_enable = (val != 0); + return 0; +#else + return -EINVAL; +#endif +} + +static int kgsl_cff_dump_enable_get(void *data, u64 *val) +{ + *val = kgsl_cff_dump_enable; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get, + kgsl_cff_dump_enable_set, "%llu\n"); + +static int kgsl_dbgfs_open(struct inode *inode, struct file *file) +{ + file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + return 0; +} + +static int kgsl_dbgfs_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static int kgsl_hex_dump(const char *prefix, int c, uint8_t *data, + int rowc, int linec, char __user *buff) +{ + int ss; + /* Prefix of 20 chars max, 32 bytes per row, in groups of four - that's + * 8 groups at 8 chars per group plus a space, plus new-line, plus + * ending character */ + char linebuf[20 + 64 + 1 + 1]; + + ss = snprintf(linebuf, sizeof(linebuf), prefix, c); + hex_dump_to_buffer(data, linec, rowc, 4, linebuf+ss, + sizeof(linebuf)-ss, 0); + strlcat(linebuf, "\n", sizeof(linebuf)); + linebuf[sizeof(linebuf)-1] = 0; + ss = strlen(linebuf); + if (copy_to_user(buff, linebuf, ss+1)) + return -EFAULT; + return ss; +} + +static int kgsl_regread_nolock(struct kgsl_device *device, + unsigned int offsetwords, unsigned int *value) +{ + unsigned int *reg; + + if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) { + KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords); + return -ERANGE; + } + + reg = (unsigned int *)(device->regspace.mmio_virt_base + + (offsetwords << 2)); + *value = __raw_readl(reg); + return 0; +} + +static ssize_t kgsl_istore_read( + struct file *file, + char __user *buff, + size_t buff_count, + loff_t *ppos) +{ + int i, count, remaining, pos = 0, tot = 0; + struct kgsl_device *device = file->private_data; + const int rowc = 8; + struct adreno_device *adreno_dev; + + if (!ppos || !device) + return 0; + + adreno_dev = ADRENO_DEVICE(device); + count = adreno_dev->istore_size * ADRENO_ISTORE_WORDS; + remaining = count; + for (i = 0; i < count; i += rowc) { + unsigned int vals[rowc]; + int j, ss; + int linec = min(remaining, rowc); + remaining -= rowc; + + if (pos >= *ppos) { + for (j = 0; j < linec; ++j) + kgsl_regread_nolock(device, + ADRENO_ISTORE_START + i + j, + vals + j); + } else + memset(vals, 0, sizeof(vals)); + + ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4, + linec*4, buff); + if (ss < 0) + return ss; + + if (pos >= *ppos) { + if (tot+ss >= buff_count) + return tot; + tot += ss; + buff += ss; + *ppos += ss; + } + pos += ss; + } + + return tot; +} + +static const struct file_operations kgsl_istore_fops = { + .open = kgsl_dbgfs_open, + .release = kgsl_dbgfs_release, + .read = kgsl_istore_read, + .llseek = default_llseek, +}; + +typedef void (*reg_read_init_t)(struct kgsl_device *device); +typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i, + unsigned int *vals, int linec); +static ssize_t kgsl_reg_read(struct kgsl_device *device, int count, + reg_read_init_t reg_read_init, + reg_read_fill_t reg_read_fill, const char *prefix, char __user *buff, + loff_t *ppos) +{ + int i, remaining; + const int rowc = 8; + + if (!ppos || *ppos || !device) + return 0; + + mutex_lock(&device->mutex); + reg_read_init(device); + remaining = count; + for (i = 0; i < count; i += rowc) { + unsigned int vals[rowc]; + int ss; + int linec = min(remaining, rowc); + remaining -= rowc; + + reg_read_fill(device, i, vals, linec); + ss = kgsl_hex_dump(prefix, i, (uint8_t *)vals, rowc*4, linec*4, + buff); + if (ss < 0) { + mutex_unlock(&device->mutex); + return ss; + } + buff += ss; + *ppos += ss; + } + mutex_unlock(&device->mutex); + + return *ppos; +} + + +static void kgsl_sx_reg_read_init(struct kgsl_device *device) +{ + kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF); + kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); +} + +static void kgsl_sx_reg_read_fill(struct kgsl_device *device, int i, + unsigned int *vals, int linec) +{ + int j; + + for (j = 0; j < linec; ++j) { + kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i); + kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j); + } +} + +static ssize_t kgsl_sx_debug_read( + struct file *file, + char __user *buff, + size_t buff_count, + loff_t *ppos) +{ + struct kgsl_device *device = file->private_data; + return kgsl_reg_read(device, 0x1B, kgsl_sx_reg_read_init, + kgsl_sx_reg_read_fill, "SX: %02x: ", buff, ppos); +} + +static const struct file_operations kgsl_sx_debug_fops = { + .open = kgsl_dbgfs_open, + .release = kgsl_dbgfs_release, + .read = kgsl_sx_debug_read, +}; + +static void kgsl_cp_reg_read_init(struct kgsl_device *device) +{ + kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); +} + +static void kgsl_cp_reg_read_fill(struct kgsl_device *device, int i, + unsigned int *vals, int linec) +{ + int j; + + for (j = 0; j < linec; ++j) { + kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628); + kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j); + msleep(100); + } +} + +static ssize_t kgsl_cp_debug_read( + struct file *file, + char __user *buff, + size_t buff_count, + loff_t *ppos) +{ + struct kgsl_device *device = file->private_data; + return kgsl_reg_read(device, 20, kgsl_cp_reg_read_init, + kgsl_cp_reg_read_fill, + "CP: %02x: ", buff, ppos); +} + +static const struct file_operations kgsl_cp_debug_fops = { + .open = kgsl_dbgfs_open, + .release = kgsl_dbgfs_release, + .read = kgsl_cp_debug_read, +}; + +static void kgsl_mh_reg_read_init(struct kgsl_device *device) +{ + kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0); +} + +static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i, + unsigned int *vals, int linec) +{ + int j; + + for (j = 0; j < linec; ++j) { + kgsl_regwrite(device, MH_DEBUG_CTRL, i+j); + kgsl_regread(device, MH_DEBUG_DATA, vals+j); + } +} + +static ssize_t kgsl_mh_debug_read( + struct file *file, + char __user *buff, + size_t buff_count, + loff_t *ppos) +{ + struct kgsl_device *device = file->private_data; + return kgsl_reg_read(device, 0x40, kgsl_mh_reg_read_init, + kgsl_mh_reg_read_fill, + "MH: %02x: ", buff, ppos); +} + +static const struct file_operations kgsl_mh_debug_fops = { + .open = kgsl_dbgfs_open, + .release = kgsl_dbgfs_release, + .read = kgsl_mh_debug_read, +}; + +void adreno_debugfs_init(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + if (!device->d_debugfs || IS_ERR(device->d_debugfs)) + return; + + debugfs_create_file("istore", 0400, device->d_debugfs, device, + &kgsl_istore_fops); + debugfs_create_file("sx_debug", 0400, device->d_debugfs, device, + &kgsl_sx_debug_fops); + debugfs_create_file("cp_debug", 0400, device->d_debugfs, device, + &kgsl_cp_debug_fops); + debugfs_create_file("mh_debug", 0400, device->d_debugfs, device, + &kgsl_mh_debug_fops); + debugfs_create_file("cff_dump", 0644, device->d_debugfs, device, + &kgsl_cff_dump_enable_fops); + debugfs_create_u32("wait_timeout", 0644, device->d_debugfs, + &adreno_dev->wait_timeout); + debugfs_create_u32("ib_check", 0644, device->d_debugfs, + &adreno_dev->ib_check_level); + + /* Create post mortem control files */ + + pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs); + + if (IS_ERR(pm_d_debugfs)) + return; + + debugfs_create_file("dump", 0600, pm_d_debugfs, device, + &pm_dump_fops); + debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device, + &pm_regs_enabled_fops); + + debugfs_create_file("ib_dump_on_pagefault", 0644, device->d_debugfs, + device, &ib_dump_on_pagef_enabled_fops); +} diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h new file mode 100644 index 0000000000000..667919c246928 --- /dev/null +++ b/drivers/gpu/msm/adreno_debugfs.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ADRENO_DEBUGFS_H +#define __ADRENO_DEBUGFS_H + +#ifdef CONFIG_DEBUG_FS + +int adreno_debugfs_init(struct kgsl_device *device); + +extern int kgsl_pm_regs_enabled; +extern int adreno_ib_dump_on_pagef_enabled; + +static inline int kgsl_pmregs_enabled(void) +{ + return kgsl_pm_regs_enabled; +} + +#else +static inline int adreno_debugfs_init(struct kgsl_device *device) +{ + return 0; +} + +static inline int kgsl_pmregs_enabled(void) +{ + /* If debugfs is turned off, then always print registers */ + return 1; +} + +#endif + +static inline int is_adreno_ib_dump_on_pagef_enabled(struct kgsl_device *device) +{ + return adreno_ib_dump_on_pagef_enabled; +} + +#endif /* __ADRENO_DEBUGFS_H */ diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c new file mode 100644 index 0000000000000..e425592a24aa1 --- /dev/null +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -0,0 +1,268 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "kgsl.h" +#include "kgsl_sharedmem.h" +#include "adreno.h" + +/* quad for copying GMEM to context shadow */ +#define QUAD_LEN 12 + +static unsigned int gmem_copy_quad[QUAD_LEN] = { + 0x00000000, 0x00000000, 0x3f800000, + 0x00000000, 0x00000000, 0x3f800000, + 0x00000000, 0x00000000, 0x3f800000, + 0x00000000, 0x00000000, 0x3f800000 +}; + +#define TEXCOORD_LEN 8 + +static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = { + 0x00000000, 0x3f800000, + 0x3f800000, 0x3f800000, + 0x00000000, 0x00000000, + 0x3f800000, 0x00000000 +}; + +/* + * Helper functions + * These are global helper functions used by the GPUs during context switch + */ + +/** + * uint2float - convert a uint to IEEE754 single precision float + * @ uintval - value to convert + */ + +unsigned int uint2float(unsigned int uintval) +{ + unsigned int exp, frac = 0; + + if (uintval == 0) + return 0; + + exp = ilog2(uintval); + + /* Calculate fraction */ + if (23 > exp) + frac = (uintval & (~(1 << exp))) << (23 - exp); + + /* Exp is biased by 127 and shifted 23 bits */ + exp = (exp + 127) << 23; + + return exp | frac; +} + +static void set_gmem_copy_quad(struct gmem_shadow_t *shadow) +{ + /* set vertex buffer values */ + gmem_copy_quad[1] = uint2float(shadow->height); + gmem_copy_quad[3] = uint2float(shadow->width); + gmem_copy_quad[4] = uint2float(shadow->height); + gmem_copy_quad[9] = uint2float(shadow->width); + + gmem_copy_quad[0] = 0; + gmem_copy_quad[6] = 0; + gmem_copy_quad[7] = 0; + gmem_copy_quad[10] = 0; + + memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2); + + memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord, + TEXCOORD_LEN << 2); +} + +/** + * build_quad_vtxbuff - Create a quad for saving/restoring GMEM + * @ context - Pointer to the context being created + * @ shadow - Pointer to the GMEM shadow structure + * @ incmd - Pointer to pointer to the temporary command buffer + */ + +/* quad for saving/restoring gmem */ +void build_quad_vtxbuff(struct adreno_context *drawctxt, + struct gmem_shadow_t *shadow, unsigned int **incmd) +{ + unsigned int *cmd = *incmd; + + /* quad vertex buffer location (in GPU space) */ + shadow->quad_vertices.hostptr = cmd; + shadow->quad_vertices.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); + + cmd += QUAD_LEN; + + /* tex coord buffer location (in GPU space) */ + shadow->quad_texcoords.hostptr = cmd; + shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate); + + cmd += TEXCOORD_LEN; + + set_gmem_copy_quad(shadow); + *incmd = cmd; +} + +/** + * adreno_drawctxt_create - create a new adreno draw context + * @device - KGSL device to create the context on + * @pagetable - Pagetable for the context + * @context- Generic KGSL context structure + * @flags - flags for the context (passed from user space) + * + * Create a new draw context for the 3D core. Return 0 on success, + * or error code on failure. + */ +int adreno_drawctxt_create(struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + struct kgsl_context *context, uint32_t flags) +{ + struct adreno_context *drawctxt; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int ret; + + drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL); + + if (drawctxt == NULL) + return -ENOMEM; + + drawctxt->pagetable = pagetable; + drawctxt->bin_base_offset = 0; + drawctxt->id = context->id; + + if (flags & KGSL_CONTEXT_PREAMBLE) + drawctxt->flags |= CTXT_FLAGS_PREAMBLE; + + if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC) + drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC; + + ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt); + if (ret) + goto err; + + context->devctxt = drawctxt; + return 0; +err: + kfree(drawctxt); + return ret; +} + +/** + * adreno_drawctxt_destroy - destroy a draw context + * @device - KGSL device that owns the context + * @context- Generic KGSL context container for the context + * + * Destroy an existing context. Return 0 on success or error + * code on failure. + */ + +/* destroy a drawing context */ + +void adreno_drawctxt_destroy(struct kgsl_device *device, + struct kgsl_context *context) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_context *drawctxt; + + if (context == NULL || context->devctxt == NULL) + return; + + drawctxt = context->devctxt; + + /* deactivate context */ + if (adreno_dev->drawctxt_active == drawctxt) { + /* no need to save GMEM or shader, the context is + * being destroyed. + */ + drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | + CTXT_FLAGS_SHADER_SAVE | + CTXT_FLAGS_GMEM_SHADOW | + CTXT_FLAGS_STATE_SHADOW); + + adreno_drawctxt_switch(adreno_dev, NULL, 0); + } + + adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + + kgsl_sharedmem_free(&drawctxt->gpustate); + kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); + + kfree(drawctxt); + context->devctxt = NULL; +} + +/** + * adreno_drawctxt_set_bin_base_offset - set bin base offset for the context + * @device - KGSL device that owns the context + * @context- Generic KGSL context container for the context + * @offset - Offset to set + * + * Set the bin base offset for A2XX devices. Not valid for A3XX devices. + */ + +void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device, + struct kgsl_context *context, + unsigned int offset) +{ + struct adreno_context *drawctxt = context->devctxt; + + if (drawctxt) + drawctxt->bin_base_offset = offset; +} + +/** + * adreno_drawctxt_switch - switch the current draw context + * @adreno_dev - The 3D device that owns the context + * @drawctxt - the 3D context to switch to + * @flags - Flags to accompany the switch (from user space) + * + * Switch the current draw context + */ + +void adreno_drawctxt_switch(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + unsigned int flags) +{ + struct kgsl_device *device = &adreno_dev->dev; + + if (drawctxt) { + if (flags & KGSL_CONTEXT_SAVE_GMEM) + /* Set the flag in context so that the save is done + * when this context is switched out. */ + drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE; + else + /* Remove GMEM saving flag from the context */ + drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE; + } + + /* already current? */ + if (adreno_dev->drawctxt_active == drawctxt) { + if (adreno_dev->gpudev->ctxt_draw_workaround && + adreno_is_a225(adreno_dev)) + adreno_dev->gpudev->ctxt_draw_workaround( + adreno_dev, drawctxt); + return; + } + + KGSL_CTXT_INFO(device, "from %p to %p flags %d\n", + adreno_dev->drawctxt_active, drawctxt, flags); + + /* Save the old context */ + adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active); + + /* Set the new context */ + adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt); + adreno_dev->drawctxt_active = drawctxt; +} + + diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h new file mode 100644 index 0000000000000..91e49f245ced0 --- /dev/null +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -0,0 +1,162 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ADRENO_DRAWCTXT_H +#define __ADRENO_DRAWCTXT_H + +#include "adreno_pm4types.h" +#include "a2xx_reg.h" + +/* Flags */ + +#define CTXT_FLAGS_NOT_IN_USE 0x00000000 +#define CTXT_FLAGS_IN_USE 0x00000001 + +/* state shadow memory allocated */ +#define CTXT_FLAGS_STATE_SHADOW 0x00000010 + +/* gmem shadow memory allocated */ +#define CTXT_FLAGS_GMEM_SHADOW 0x00000100 +/* gmem must be copied to shadow */ +#define CTXT_FLAGS_GMEM_SAVE 0x00000200 +/* gmem can be restored from shadow */ +#define CTXT_FLAGS_GMEM_RESTORE 0x00000400 +/* preamble packed in cmdbuffer for context switching */ +#define CTXT_FLAGS_PREAMBLE 0x00000800 +/* shader must be copied to shadow */ +#define CTXT_FLAGS_SHADER_SAVE 0x00002000 +/* shader can be restored from shadow */ +#define CTXT_FLAGS_SHADER_RESTORE 0x00004000 +/* Context has caused a GPU hang */ +#define CTXT_FLAGS_GPU_HANG 0x00008000 +/* Specifies there is no need to save GMEM */ +#define CTXT_FLAGS_NOGMEMALLOC 0x00010000 +/* Trash state for context */ +#define CTXT_FLAGS_TRASHSTATE 0x00020000 +/* per context timestamps enabled */ +#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000 +/* Context has caused a GPU hang and recovered properly */ +#define CTXT_FLAGS_GPU_HANG_RECOVERED 0x00008000 + +struct kgsl_device; +struct adreno_device; +struct kgsl_device_private; +struct kgsl_context; + +/* draw context */ +struct gmem_shadow_t { + struct kgsl_memdesc gmemshadow; /* Shadow buffer address */ + + /* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x + * 256 rows. */ + /* width & height must be a multiples of 32, in case tiled textures + * are used. */ + enum COLORFORMATX format; + unsigned int size; /* Size of surface used to store GMEM */ + unsigned int width; /* Width of surface used to store GMEM */ + unsigned int height; /* Height of surface used to store GMEM */ + unsigned int pitch; /* Pitch of surface used to store GMEM */ + unsigned int gmem_pitch; /* Pitch value used for GMEM */ + unsigned int *gmem_save_commands; + unsigned int *gmem_restore_commands; + unsigned int gmem_save[3]; + unsigned int gmem_restore[3]; + struct kgsl_memdesc quad_vertices; + struct kgsl_memdesc quad_texcoords; +}; + +struct adreno_context { + unsigned int id; + uint32_t flags; + struct kgsl_pagetable *pagetable; + struct kgsl_memdesc gpustate; + unsigned int reg_save[3]; + unsigned int reg_restore[3]; + unsigned int shader_save[3]; + unsigned int shader_fixup[3]; + unsigned int shader_restore[3]; + unsigned int chicken_restore[3]; + unsigned int bin_base_offset; + /* Information of the GMEM shadow that is created in context create */ + struct gmem_shadow_t context_gmem_shadow; +}; + +int adreno_drawctxt_create(struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + struct kgsl_context *context, + uint32_t flags); + +void adreno_drawctxt_destroy(struct kgsl_device *device, + struct kgsl_context *context); + +void adreno_drawctxt_switch(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + unsigned int flags); +void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device, + struct kgsl_context *context, + unsigned int offset); + +/* GPU context switch helper functions */ + +void build_quad_vtxbuff(struct adreno_context *drawctxt, + struct gmem_shadow_t *shadow, unsigned int **incmd); + +unsigned int uint2float(unsigned int); + +static inline unsigned int virt2gpu(unsigned int *cmd, + struct kgsl_memdesc *memdesc) +{ + return memdesc->gpuaddr + ((char *) cmd - (char *) memdesc->hostptr); +} + +static inline void create_ib1(struct adreno_context *drawctxt, + unsigned int *cmd, + unsigned int *start, + unsigned int *end) +{ + cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD; + cmd[1] = virt2gpu(start, &drawctxt->gpustate); + cmd[2] = end - start; +} + + +static inline unsigned int *reg_range(unsigned int *cmd, unsigned int start, + unsigned int end) +{ + *cmd++ = CP_REG(start); /* h/w regs, start addr */ + *cmd++ = end - start + 1; /* count */ + return cmd; +} + +static inline void calc_gmemsize(struct gmem_shadow_t *shadow, int gmem_size) +{ + int w = 64, h = 64; + + shadow->format = COLORX_8_8_8_8; + + /* convert from bytes to 32-bit words */ + gmem_size = (gmem_size + 3) / 4; + + while ((w * h) < gmem_size) { + if (w < h) + w *= 2; + else + h *= 2; + } + + shadow->pitch = shadow->width = w; + shadow->height = h; + shadow->gmem_pitch = shadow->pitch; + shadow->size = shadow->pitch * shadow->height * 4; +} + +#endif /* __ADRENO_DRAWCTXT_H */ diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h new file mode 100644 index 0000000000000..8f3da7497fdb0 --- /dev/null +++ b/drivers/gpu/msm/adreno_pm4types.h @@ -0,0 +1,220 @@ +/* Copyright (c) 2002,2007-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ADRENO_PM4TYPES_H +#define __ADRENO_PM4TYPES_H + + +#define CP_PKT_MASK 0xc0000000 + +#define CP_TYPE0_PKT ((unsigned int)0 << 30) +#define CP_TYPE1_PKT ((unsigned int)1 << 30) +#define CP_TYPE2_PKT ((unsigned int)2 << 30) +#define CP_TYPE3_PKT ((unsigned int)3 << 30) + + +/* type3 packets */ +/* initialize CP's micro-engine */ +#define CP_ME_INIT 0x48 + +/* skip N 32-bit words to get to the next packet */ +#define CP_NOP 0x10 + +/* indirect buffer dispatch. same as IB, but init is pipelined */ +#define CP_INDIRECT_BUFFER_PFD 0x37 + +/* wait for the IDLE state of the engine */ +#define CP_WAIT_FOR_IDLE 0x26 + +/* wait until a register or memory location is a specific value */ +#define CP_WAIT_REG_MEM 0x3c + +/* wait until a register location is equal to a specific value */ +#define CP_WAIT_REG_EQ 0x52 + +/* wait until a register location is >= a specific value */ +#define CP_WAT_REG_GTE 0x53 + +/* wait until a read completes */ +#define CP_WAIT_UNTIL_READ 0x5c + +/* wait until all base/size writes from an IB_PFD packet have completed */ +#define CP_WAIT_IB_PFD_COMPLETE 0x5d + +/* register read/modify/write */ +#define CP_REG_RMW 0x21 + +/* reads register in chip and writes to memory */ +#define CP_REG_TO_MEM 0x3e + +/* write N 32-bit words to memory */ +#define CP_MEM_WRITE 0x3d + +/* write CP_PROG_COUNTER value to memory */ +#define CP_MEM_WRITE_CNTR 0x4f + +/* conditional execution of a sequence of packets */ +#define CP_COND_EXEC 0x44 + +/* conditional write to memory or register */ +#define CP_COND_WRITE 0x45 + +/* generate an event that creates a write to memory when completed */ +#define CP_EVENT_WRITE 0x46 + +/* generate a VS|PS_done event */ +#define CP_EVENT_WRITE_SHD 0x58 + +/* generate a cache flush done event */ +#define CP_EVENT_WRITE_CFL 0x59 + +/* generate a z_pass done event */ +#define CP_EVENT_WRITE_ZPD 0x5b + + +/* initiate fetch of index buffer and draw */ +#define CP_DRAW_INDX 0x22 + +/* draw using supplied indices in packet */ +#define CP_DRAW_INDX_2 0x36 + +/* initiate fetch of index buffer and binIDs and draw */ +#define CP_DRAW_INDX_BIN 0x34 + +/* initiate fetch of bin IDs and draw using supplied indices */ +#define CP_DRAW_INDX_2_BIN 0x35 + + +/* begin/end initiator for viz query extent processing */ +#define CP_VIZ_QUERY 0x23 + +/* fetch state sub-blocks and initiate shader code DMAs */ +#define CP_SET_STATE 0x25 + +/* load constant into chip and to memory */ +#define CP_SET_CONSTANT 0x2d + +/* load sequencer instruction memory (pointer-based) */ +#define CP_IM_LOAD 0x27 + +/* load sequencer instruction memory (code embedded in packet) */ +#define CP_IM_LOAD_IMMEDIATE 0x2b + +/* load constants from a location in memory */ +#define CP_LOAD_CONSTANT_CONTEXT 0x2e + +/* (A2x) sets binning configuration registers */ +#define CP_SET_BIN_DATA 0x2f + +/* selective invalidation of state pointers */ +#define CP_INVALIDATE_STATE 0x3b + + +/* dynamically changes shader instruction memory partition */ +#define CP_SET_SHADER_BASES 0x4A + +/* sets the 64-bit BIN_MASK register in the PFP */ +#define CP_SET_BIN_MASK 0x50 + +/* sets the 64-bit BIN_SELECT register in the PFP */ +#define CP_SET_BIN_SELECT 0x51 + + +/* updates the current context, if needed */ +#define CP_CONTEXT_UPDATE 0x5e + +/* generate interrupt from the command stream */ +#define CP_INTERRUPT 0x40 + + +/* copy sequencer instruction memory to system memory */ +#define CP_IM_STORE 0x2c + +/* + * for a20x + * program an offset that will added to the BIN_BASE value of + * the 3D_DRAW_INDX_BIN packet + */ +#define CP_SET_BIN_BASE_OFFSET 0x4B + +/* + * for a22x + * sets draw initiator flags register in PFP, gets bitwise-ORed into + * every draw initiator + */ +#define CP_SET_DRAW_INIT_FLAGS 0x4B + +#define CP_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */ + +/* + * for a3xx + */ + +/* Conditionally load a IB based on a flag */ +#define CP_COND_INDIRECT_BUFFER_PFE 0x3A /* prefetch enabled */ +#define CP_COND_INDIRECT_BUFFER_PFD 0x32 /* prefetch disabled */ + +/* Load a buffer with pre-fetch enabled */ +#define CP_INDIRECT_BUFFER_PFE 0x3F + +/* packet header building macros */ +#define cp_type0_packet(regindx, cnt) \ + (CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF)) + +#define cp_type0_packet_for_sameregister(regindx, cnt) \ + ((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \ + ((regindx) & 0x7FFF))) + +#define cp_type1_packet(reg0, reg1) \ + (CP_TYPE1_PKT | ((reg1) << 12) | (reg0)) + +#define cp_type3_packet(opcode, cnt) \ + (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8)) + +#define cp_predicated_type3_packet(opcode, cnt) \ + (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1) + +#define cp_nop_packet(cnt) \ + (CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8)) + +#define pkt_is_type0(pkt) (((pkt) & 0XC0000000) == CP_TYPE0_PKT) + +#define type0_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1) +#define type0_pkt_offset(pkt) ((pkt) & 0x7FFF) + +#define pkt_is_type3(pkt) (((pkt) & 0xC0000000) == CP_TYPE3_PKT) + +#define cp_type3_opcode(pkt) (((pkt) >> 8) & 0xFF) +#define type3_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1) + +/* packet headers */ +#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18) +#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2) +#define CP_HDR_INDIRECT_BUFFER_PFE cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2) + +/* dword base address of the GFX decode space */ +#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000))) + +/* gmem command buffer length */ +#define CP_REG(reg) ((0x4 << 16) | (SUBBLOCK_OFFSET(reg))) + + +/* Return 1 if the command is an indirect buffer of any kind */ +static inline int adreno_cmd_is_ib(unsigned int cmd) +{ + return (cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2) || + cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2) || + cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFE, 2) || + cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFD, 2)); +} + +#endif /* __ADRENO_PM4TYPES_H */ diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c new file mode 100644 index 0000000000000..bdd65f2ddb4a7 --- /dev/null +++ b/drivers/gpu/msm/adreno_postmortem.c @@ -0,0 +1,771 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "kgsl.h" + +#include "adreno.h" +#include "adreno_pm4types.h" +#include "adreno_ringbuffer.h" +#include "adreno_postmortem.h" +#include "adreno_debugfs.h" +#include "kgsl_cffdump.h" +#include "kgsl_pwrctrl.h" + +#include "a2xx_reg.h" + +#define INVALID_RB_CMD 0xaaaaaaaa +#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100 + +struct pm_id_name { + uint32_t id; + char name[9]; +}; + +static const struct pm_id_name pm0_types[] = { + {REG_PA_SC_AA_CONFIG, "RPASCAAC"}, + {REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"}, + {REG_SCRATCH_REG2, "RSCRTRG2"}, + {REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"}, + {REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"}, + {REG_TC_CNTL_STATUS, "RTCCNTLS"}, + {REG_TP0_CHICKEN, "RTP0CHCK"}, + {REG_CP_TIMESTAMP, "CP_TM_ST"}, +}; + +static const struct pm_id_name pm3_types[] = { + {CP_COND_EXEC, "CND_EXEC"}, + {CP_CONTEXT_UPDATE, "CX__UPDT"}, + {CP_DRAW_INDX, "DRW_NDX_"}, + {CP_DRAW_INDX_BIN, "DRW_NDXB"}, + {CP_EVENT_WRITE, "EVENT_WT"}, + {CP_IM_LOAD, "IN__LOAD"}, + {CP_IM_LOAD_IMMEDIATE, "IM_LOADI"}, + {CP_IM_STORE, "IM_STORE"}, + {CP_INDIRECT_BUFFER_PFE, "IND_BUF_"}, + {CP_INDIRECT_BUFFER_PFD, "IND_BUFP"}, + {CP_INTERRUPT, "PM4_INTR"}, + {CP_INVALIDATE_STATE, "INV_STAT"}, + {CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"}, + {CP_ME_INIT, "ME__INIT"}, + {CP_NOP, "PM4__NOP"}, + {CP_REG_RMW, "REG__RMW"}, + {CP_REG_TO_MEM, "REG2_MEM"}, + {CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"}, + {CP_SET_CONSTANT, "ST_CONST"}, + {CP_SET_PROTECTED_MODE, "ST_PRT_M"}, + {CP_SET_SHADER_BASES, "ST_SHD_B"}, + {CP_WAIT_FOR_IDLE, "WAIT4IDL"}, +}; + +static uint32_t adreno_is_pm4_len(uint32_t word) +{ + if (word == INVALID_RB_CMD) + return 0; + + return (word >> 16) & 0x3FFF; +} + +static bool adreno_is_pm4_type(uint32_t word) +{ + int i; + + if (word == INVALID_RB_CMD) + return 1; + + if (adreno_is_pm4_len(word) > 16) + return 0; + + if ((word & (3<<30)) == CP_TYPE0_PKT) { + for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) { + if ((word & 0x7FFF) == pm0_types[i].id) + return 1; + } + return 0; + } + if ((word & (3<<30)) == CP_TYPE3_PKT) { + for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) { + if ((word & 0xFFFF) == (pm3_types[i].id << 8)) + return 1; + } + return 0; + } + return 0; +} + +static const char *adreno_pm4_name(uint32_t word) +{ + int i; + + if (word == INVALID_RB_CMD) + return "--------"; + + if ((word & (3<<30)) == CP_TYPE0_PKT) { + for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) { + if ((word & 0x7FFF) == pm0_types[i].id) + return pm0_types[i].name; + } + return "????????"; + } + if ((word & (3<<30)) == CP_TYPE3_PKT) { + for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) { + if ((word & 0xFFFF) == (pm3_types[i].id << 8)) + return pm3_types[i].name; + } + return "????????"; + } + return "????????"; +} + +static void adreno_dump_regs(struct kgsl_device *device, + const int *registers, int size) +{ + int range = 0, offset = 0; + + for (range = 0; range < size; range++) { + /* start and end are in dword offsets */ + int start = registers[range * 2]; + int end = registers[range * 2 + 1]; + + unsigned char linebuf[32 * 3 + 2 + 32 + 1]; + int linelen, i; + + for (offset = start; offset <= end; offset += linelen) { + unsigned int regvals[32/4]; + linelen = min(end+1-offset, 32/4); + + for (i = 0; i < linelen; ++i) + kgsl_regread(device, offset+i, regvals+i); + + hex_dump_to_buffer(regvals, linelen*4, 32, 4, + linebuf, sizeof(linebuf), 0); + KGSL_LOG_DUMP(device, + "REG: %5.5X: %s\n", offset, linebuf); + } + } +} + +void dump_ib(struct kgsl_device *device, char *buffId, uint32_t pt_base, + uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump) +{ + uint8_t *base_addr = adreno_convertaddr(device, pt_base, + ib_base, ib_size*sizeof(uint32_t)); + + if (base_addr && dump) + print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET, + 32, 4, base_addr, ib_size*4, 0); + else + KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d " + "offset:%5.5X%s\n", + buffId, ib_base, ib_size*4, base_offset, + base_addr ? "" : " [Invalid]"); +} + +void dump_ib1(struct kgsl_device *device, uint32_t pt_base, + uint32_t base_offset, + uint32_t ib1_base, uint32_t ib1_size, + struct ib_list *ib_list, bool dump) +{ + int i, j; + uint32_t value; + uint32_t *ib1_addr; + + dump_ib(device, "IB1:", pt_base, base_offset, ib1_base, + ib1_size, dump); + + /* fetch virtual address for given IB base */ + ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base, + ib1_base, ib1_size*sizeof(uint32_t)); + if (!ib1_addr) + return; + + for (i = 0; i+3 < ib1_size; ) { + value = ib1_addr[i++]; + if (adreno_cmd_is_ib(value)) { + uint32_t ib2_base = ib1_addr[i++]; + uint32_t ib2_size = ib1_addr[i++]; + + /* find previous match */ + for (j = 0; j < ib_list->count; ++j) + if (ib_list->sizes[j] == ib2_size + && ib_list->bases[j] == ib2_base) + break; + + if (j < ib_list->count || ib_list->count + >= IB_LIST_SIZE) + continue; + + /* store match */ + ib_list->sizes[ib_list->count] = ib2_size; + ib_list->bases[ib_list->count] = ib2_base; + ib_list->offsets[ib_list->count] = i<<2; + ++ib_list->count; + } + } +} + +static void adreno_dump_rb_buffer(const void *buf, size_t len, + char *linebuf, size_t linebuflen, int *argp) +{ + const u32 *ptr4 = buf; + const int ngroups = len; + int lx = 0, j; + bool nxsp = 1; + + for (j = 0; j < ngroups; j++) { + if (*argp < 0) { + lx += scnprintf(linebuf + lx, linebuflen - lx, " <"); + *argp = -*argp; + } else if (nxsp) + lx += scnprintf(linebuf + lx, linebuflen - lx, " "); + else + nxsp = 1; + if (!*argp && adreno_is_pm4_type(ptr4[j])) { + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%s", adreno_pm4_name(ptr4[j])); + *argp = -(adreno_is_pm4_len(ptr4[j])+1); + } else { + lx += scnprintf(linebuf + lx, linebuflen - lx, + "%8.8X", ptr4[j]); + if (*argp > 1) + --*argp; + else if (*argp == 1) { + *argp = 0; + nxsp = 0; + lx += scnprintf(linebuf + lx, linebuflen - lx, + "> "); + } + } + } + linebuf[lx] = '\0'; +} + +static bool adreno_rb_use_hex(void) +{ +#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX + return 1; +#else + return 0; +#endif +} + +static void adreno_dump_rb(struct kgsl_device *device, const void *buf, + size_t len, int start, int size) +{ + const uint32_t *ptr = buf; + int i, remaining, args = 0; + unsigned char linebuf[32 * 3 + 2 + 32 + 1]; + const int rowsize = 8; + + len >>= 2; + remaining = len; + for (i = 0; i < len; i += rowsize) { + int linelen = min(remaining, rowsize); + remaining -= rowsize; + + if (adreno_rb_use_hex()) + hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4, + linebuf, sizeof(linebuf), 0); + else + adreno_dump_rb_buffer(ptr+i, linelen, linebuf, + sizeof(linebuf), &args); + KGSL_LOG_DUMP(device, + "RB: %4.4X:%s\n", (start+i)%size, linebuf); + } +} + +static bool adreno_ib_dump_enabled(void) +{ +#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP + return 0; +#else + return 1; +#endif +} + +struct log_field { + bool show; + const char *display; +}; + +static int adreno_dump_fields_line(struct kgsl_device *device, + const char *start, char *str, int slen, + const struct log_field **lines, + int num) +{ + const struct log_field *l = *lines; + int sptr, count = 0; + + sptr = snprintf(str, slen, "%s", start); + + for ( ; num && sptr < slen; num--, l++) { + int ilen = strlen(l->display); + + if (!l->show) + continue; + + if (count) + ilen += strlen(" | "); + + if (ilen > (slen - sptr)) + break; + + if (count++) + sptr += snprintf(str + sptr, slen - sptr, " | "); + + sptr += snprintf(str + sptr, slen - sptr, "%s", l->display); + } + + KGSL_LOG_DUMP(device, "%s\n", str); + + *lines = l; + return num; +} + +static void adreno_dump_fields(struct kgsl_device *device, + const char *start, const struct log_field *lines, + int num) +{ + char lb[90]; + const char *sstr = start; + + lb[sizeof(lb) - 1] = '\0'; + + while (num) { + int ret = adreno_dump_fields_line(device, sstr, lb, + sizeof(lb) - 1, &lines, num); + + if (ret == num) + break; + + num = ret; + sstr = " "; + } +} + +static int adreno_dump(struct kgsl_device *device) +{ + unsigned int r1, r2, r3, rbbm_status; + unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat; + unsigned int cp_ib2_base, cp_ib2_bufsz; + unsigned int pt_base, cur_pt_base; + unsigned int cp_rb_base, rb_count; + unsigned int cp_rb_wptr, cp_rb_rptr; + unsigned int i; + int result = 0; + uint32_t *rb_copy; + const uint32_t *rb_vaddr; + int num_item = 0; + int read_idx, write_idx; + unsigned int ts_processed; + + static struct ib_list ib_list; + + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + mb(); + + kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status); + kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2); + kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3); + KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | " + "PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3); + + kgsl_regread(device, REG_RBBM_INT_CNTL, &r1); + kgsl_regread(device, REG_RBBM_INT_STATUS, &r2); + kgsl_regread(device, REG_RBBM_READ_ERROR, &r3); + KGSL_LOG_DUMP(device, " INT_CNTL = %08X | INT_STATUS = %08X | " + "READ_ERROR = %08X\n", r1, r2, r3); + + { + char cmdFifo[16]; + struct log_field lines[] = { + {rbbm_status & 0x001F, cmdFifo}, + {rbbm_status & BIT(5), "TC busy "}, + {rbbm_status & BIT(8), "HIRQ pending"}, + {rbbm_status & BIT(9), "CPRQ pending"}, + {rbbm_status & BIT(10), "CFRQ pending"}, + {rbbm_status & BIT(11), "PFRQ pending"}, + {rbbm_status & BIT(12), "VGT 0DMA bsy"}, + {rbbm_status & BIT(14), "RBBM WU busy"}, + {rbbm_status & BIT(16), "CP NRT busy "}, + {rbbm_status & BIT(18), "MH busy "}, + {rbbm_status & BIT(19), "MH chncy bsy"}, + {rbbm_status & BIT(21), "SX busy "}, + {rbbm_status & BIT(22), "TPC busy "}, + {rbbm_status & BIT(24), "SC CNTX busy"}, + {rbbm_status & BIT(25), "PA busy "}, + {rbbm_status & BIT(26), "VGT busy "}, + {rbbm_status & BIT(27), "SQ cntx1 bsy"}, + {rbbm_status & BIT(28), "SQ cntx0 bsy"}, + {rbbm_status & BIT(30), "RB busy "}, + {rbbm_status & BIT(31), "Grphs pp bsy"}, + }; + snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ", + rbbm_status & 0xf); + adreno_dump_fields(device, " STATUS=", lines, + ARRAY_SIZE(lines)); + } + + kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base); + kgsl_regread(device, REG_CP_RB_CNTL, &r2); + rb_count = 2 << (r2 & (BIT(6)-1)); + kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3); + KGSL_LOG_DUMP(device, + "CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X" + " | rb_count = %08X\n", cp_rb_base, r2, r3, rb_count); + { + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + if (rb->sizedwords != rb_count) + rb_count = rb->sizedwords; + } + + kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr); + kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr); + kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3); + KGSL_LOG_DUMP(device, + " RPTR = %08X | WPTR = %08X | RPTR_WR = %08X" + "\n", cp_rb_rptr, cp_rb_wptr, r3); + + kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base); + kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz); + KGSL_LOG_DUMP(device, + "CP_IB1: BASE = %08X | BUFSZ = %d\n", cp_ib1_base, + cp_ib1_bufsz); + + kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base); + kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz); + KGSL_LOG_DUMP(device, + "CP_IB2: BASE = %08X | BUFSZ = %d\n", cp_ib2_base, + cp_ib2_bufsz); + + kgsl_regread(device, REG_CP_INT_CNTL, &r1); + kgsl_regread(device, REG_CP_INT_STATUS, &r2); + KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2); + + kgsl_regread(device, REG_CP_ME_CNTL, &r1); + kgsl_regread(device, REG_CP_ME_STATUS, &r2); + kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3); + KGSL_LOG_DUMP(device, + "CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = " + "%08X\n", r1, r2, r3); + + kgsl_regread(device, REG_CP_STAT, &cp_stat); + KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat); +#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL + { + struct log_field lns[] = { + {cp_stat & BIT(0), "WR_BSY 0"}, + {cp_stat & BIT(1), "RD_RQ_BSY 1"}, + {cp_stat & BIT(2), "RD_RTN_BSY 2"}, + }; + adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns)); + } + { + struct log_field lns[] = { + {cp_stat & BIT(5), "RING_BUSY 5"}, + {cp_stat & BIT(6), "NDRCTS_BSY 6"}, + {cp_stat & BIT(7), "NDRCT2_BSY 7"}, + {cp_stat & BIT(9), "ST_BUSY 9"}, + {cp_stat & BIT(10), "BUSY 10"}, + }; + adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns)); + } + { + struct log_field lns[] = { + {cp_stat & BIT(11), "RNG_Q_BSY 11"}, + {cp_stat & BIT(12), "NDRCTS_Q_B12"}, + {cp_stat & BIT(13), "NDRCT2_Q_B13"}, + {cp_stat & BIT(16), "ST_QUEUE_B16"}, + {cp_stat & BIT(17), "PFP_BUSY 17"}, + }; + adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns)); + } + { + struct log_field lns[] = { + {cp_stat & BIT(3), "RBIU_BUSY 3"}, + {cp_stat & BIT(4), "RCIU_BUSY 4"}, + {cp_stat & BIT(18), "MQ_RG_BSY 18"}, + {cp_stat & BIT(19), "MQ_NDRS_BS19"}, + {cp_stat & BIT(20), "MQ_NDR2_BS20"}, + {cp_stat & BIT(21), "MIU_WC_STL21"}, + {cp_stat & BIT(22), "CP_NRT_BSY22"}, + {cp_stat & BIT(23), "3D_BUSY 23"}, + {cp_stat & BIT(26), "ME_BUSY 26"}, + {cp_stat & BIT(29), "ME_WC_BSY 29"}, + {cp_stat & BIT(30), "MIU_FF EM 30"}, + {cp_stat & BIT(31), "CP_BUSY 31"}, + }; + adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns)); + } +#endif + + kgsl_regread(device, REG_SCRATCH_REG0, &r1); + KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1); + + kgsl_regread(device, REG_COHER_SIZE_PM4, &r1); + kgsl_regread(device, REG_COHER_BASE_PM4, &r2); + kgsl_regread(device, REG_COHER_STATUS_PM4, &r3); + KGSL_LOG_DUMP(device, + "COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4" + " = %08X\n", r1, r2, r3); + + kgsl_regread(device, MH_AXI_ERROR, &r1); + KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1); + + kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1); + kgsl_regread(device, MH_MMU_CONFIG, &r2); + kgsl_regread(device, MH_MMU_MPU_BASE, &r3); + KGSL_LOG_DUMP(device, + "MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE =" + " %08X\n", r1, r2, r3); + + kgsl_regread(device, MH_MMU_MPU_END, &r1); + kgsl_regread(device, MH_MMU_VA_RANGE, &r2); + pt_base = kgsl_mmu_get_current_ptbase(device); + KGSL_LOG_DUMP(device, + " MPU_END = %08X | VA_RANGE = %08X | PT_BASE =" + " %08X\n", r1, r2, pt_base); + cur_pt_base = pt_base; + + KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", + kgsl_mmu_get_ptsize()); + + kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1); + KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1); + + kgsl_regread(device, MH_INTERRUPT_MASK, &r1); + kgsl_regread(device, MH_INTERRUPT_STATUS, &r2); + KGSL_LOG_DUMP(device, + "MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2); + + ts_processed = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed); + + num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer, + cp_rb_rptr); + if (num_item <= 0) + KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n"); + + rb_copy = vmalloc(rb_count<<2); + if (!rb_copy) { + KGSL_LOG_POSTMORTEM_WRITE(device, + "vmalloc(%d) failed\n", rb_count << 2); + result = -ENOMEM; + goto end; + } + + KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n", + cp_rb_base, rb_count<<2, num_item); + + if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base) + KGSL_LOG_POSTMORTEM_WRITE(device, + "rb address mismatch, should be 0x%08x\n", + adreno_dev->ringbuffer.buffer_desc.gpuaddr); + + rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr; + if (!rb_vaddr) { + KGSL_LOG_POSTMORTEM_WRITE(device, + "rb has no kernel mapping!\n"); + goto error_vfree; + } + + read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY; + if (read_idx < 0) + read_idx += rb_count; + write_idx = (int)cp_rb_wptr + 16; + if (write_idx > rb_count) + write_idx -= rb_count; + num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16; + if (num_item > rb_count) + num_item = rb_count; + if (write_idx >= read_idx) + memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2); + else { + int part1_c = rb_count-read_idx; + memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2); + memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2); + } + + /* extract the latest ib commands from the buffer */ + ib_list.count = 0; + i = 0; + for (read_idx = 0; read_idx < num_item; ) { + uint32_t this_cmd = rb_copy[read_idx++]; + if (adreno_cmd_is_ib(this_cmd)) { + uint32_t ib_addr = rb_copy[read_idx++]; + uint32_t ib_size = rb_copy[read_idx++]; + dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr, + ib_size, &ib_list, 0); + for (; i < ib_list.count; ++i) + dump_ib(device, "IB2:", cur_pt_base, + ib_list.offsets[i], + ib_list.bases[i], + ib_list.sizes[i], 0); + } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) { + + KGSL_LOG_DUMP(device, "Current pagetable: %x\t" + "pagetable base: %x\n", + kgsl_mmu_get_ptname_from_ptbase(cur_pt_base), + cur_pt_base); + + /* Set cur_pt_base to the new pagetable base */ + cur_pt_base = rb_copy[read_idx++]; + + KGSL_LOG_DUMP(device, "New pagetable: %x\t" + "pagetable base: %x\n", + kgsl_mmu_get_ptname_from_ptbase(cur_pt_base), + cur_pt_base); + } + } + + /* Restore cur_pt_base back to the pt_base of + the process in whose context the GPU hung */ + cur_pt_base = pt_base; + + read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY; + if (read_idx < 0) + read_idx += rb_count; + KGSL_LOG_DUMP(device, + "RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n", + cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx); + adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count); + + if (adreno_ib_dump_enabled()) { + for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY; + read_idx >= 0; --read_idx) { + uint32_t this_cmd = rb_copy[read_idx]; + if (adreno_cmd_is_ib(this_cmd)) { + uint32_t ib_addr = rb_copy[read_idx+1]; + uint32_t ib_size = rb_copy[read_idx+2]; + if (ib_size && cp_ib1_base == ib_addr) { + KGSL_LOG_DUMP(device, + "IB1: base:%8.8X " + "count:%d\n", ib_addr, ib_size); + dump_ib(device, "IB1: ", cur_pt_base, + read_idx<<2, ib_addr, ib_size, + 1); + } + } + } + for (i = 0; i < ib_list.count; ++i) { + uint32_t ib_size = ib_list.sizes[i]; + uint32_t ib_offset = ib_list.offsets[i]; + if (ib_size && cp_ib2_base == ib_list.bases[i]) { + KGSL_LOG_DUMP(device, + "IB2: base:%8.8X count:%d\n", + cp_ib2_base, ib_size); + dump_ib(device, "IB2: ", cur_pt_base, ib_offset, + ib_list.bases[i], ib_size, 1); + } + } + } + + /* Dump the registers if the user asked for it */ + + if (adreno_is_a20x(adreno_dev)) + adreno_dump_regs(device, a200_registers, + a200_registers_count); + else if (adreno_is_a22x(adreno_dev)) + adreno_dump_regs(device, a220_registers, + a220_registers_count); + +error_vfree: + vfree(rb_copy); +end: + return result; +} + +/** + * adreno_postmortem_dump - Dump the current GPU state + * @device - A pointer to the KGSL device to dump + * @manual - A flag that indicates if this was a manually triggered + * dump (from debugfs). If zero, then this is assumed to be a + * dump automaticlaly triggered from a hang +*/ + +int adreno_postmortem_dump(struct kgsl_device *device, int manual) +{ + bool saved_nap; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + BUG_ON(device == NULL); + + kgsl_cffdump_hang(device->id); + + /* For a manual dump, make sure that the system is idle */ + + if (manual) { + if (device->active_cnt != 0) { + mutex_unlock(&device->mutex); + wait_for_completion(&device->suspend_gate); + mutex_lock(&device->mutex); + } + + if (device->state == KGSL_STATE_ACTIVE) + kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + + } + KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X", + pwr->power_flags, pwr->active_pwrlevel); + + KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ", + pwr->interval_timeout); + + KGSL_LOG_DUMP(device, "GRP_CLK = %lu ", + kgsl_get_clkrate(pwr->grp_clks[0])); + + KGSL_LOG_DUMP(device, "BUS CLK = %lu ", + kgsl_get_clkrate(pwr->ebi1_clk)); + + /* Disable the idle timer so we don't get interrupted */ + del_timer_sync(&device->idle_timer); + mutex_unlock(&device->mutex); + flush_workqueue(device->work_queue); + mutex_lock(&device->mutex); + + /* Turn off napping to make sure we have the clocks full + attention through the following process */ + saved_nap = device->pwrctrl.nap_allowed; + device->pwrctrl.nap_allowed = false; + + /* Force on the clocks */ + kgsl_pwrctrl_wake(device); + + /* Disable the irq */ + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + + adreno_dump(device); + + /* Restore nap mode */ + device->pwrctrl.nap_allowed = saved_nap; + + /* On a manual trigger, turn on the interrupts and put + the clocks to sleep. They will recover themselves + on the next event. For a hang, leave things as they + are until recovery kicks in. */ + + if (manual) { + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); + + /* try to go into a sleep mode until the next event */ + kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP); + kgsl_pwrctrl_sleep(device); + } + + KGSL_DRV_ERR(device, "Dump Finished\n"); + + return 0; +} diff --git a/drivers/gpu/msm/adreno_postmortem.h b/drivers/gpu/msm/adreno_postmortem.h new file mode 100644 index 0000000000000..68832e28c19b3 --- /dev/null +++ b/drivers/gpu/msm/adreno_postmortem.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __ADRENO_POSTMORTEM_H +#define __ADRENO_POSTMORTEM_H + +struct kgsl_device; + +#define IB_LIST_SIZE 64 +struct ib_list { + int count; + uint32_t bases[IB_LIST_SIZE]; + uint32_t sizes[IB_LIST_SIZE]; + uint32_t offsets[IB_LIST_SIZE]; +}; + +int adreno_postmortem_dump(struct kgsl_device *device, int manual); + +void dump_ib(struct kgsl_device *device, char *buffId, uint32_t pt_base, + uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump); + +void dump_ib1(struct kgsl_device *device, uint32_t pt_base, + uint32_t base_offset, + uint32_t ib1_base, uint32_t ib1_size, + struct ib_list *ib_list, bool dump); + +#endif /* __ADRENO_POSTMORTEM_H */ diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c new file mode 100644 index 0000000000000..3de70166787c7 --- /dev/null +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -0,0 +1,1293 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_sharedmem.h" +#include "kgsl_cffdump.h" + +#include "adreno.h" +#include "adreno_pm4types.h" +#include "adreno_ringbuffer.h" +#include "adreno_debugfs.h" +#include "adreno_postmortem.h" + +#include "a2xx_reg.h" + +#define GSL_RB_NOP_SIZEDWORDS 2 +/* protected mode error checking below register address 0x800 +* note: if CP_INTERRUPT packet is used then checking needs +* to change to below register address 0x7C8 +*/ +#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2 + +/* Firmware file names + * Legacy names must remain but replacing macro names to + * match current kgsl model. + * a200 is yamato + * a220 is leia + */ +#define A200_PFP_FW "yamato_pfp.fw" +#define A200_PM4_FW "yamato_pm4.fw" +#define A220_PFP_470_FW "leia_pfp_470.fw" +#define A220_PM4_470_FW "leia_pm4_470.fw" +#define A225_PFP_FW "a225_pfp.fw" +#define A225_PM4_FW "a225_pm4.fw" + +static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) +{ + BUG_ON(rb->wptr == 0); + + /* Let the pwrscale policy know that new commands have + been submitted. */ + kgsl_pwrscale_busy(rb->device); + + /*synchronize memory before informing the hardware of the + *new commands. + */ + mb(); + + adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr); +} + +static void +adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, + int wptr_ahead) +{ + int nopcount; + unsigned int freecmds; + unsigned int *cmds; + uint cmds_gpu; + struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); + unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout); + unsigned long wait_time; + + /* if wptr ahead, fill the remaining with NOPs */ + if (wptr_ahead) { + /* -1 for header */ + nopcount = rb->sizedwords - rb->wptr - 1; + + cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; + cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr; + + GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount)); + + /* Make sure that rptr is not 0 before submitting + * commands at the end of ringbuffer. We do not + * want the rptr and wptr to become equal when + * the ringbuffer is not empty */ + do { + GSL_RB_GET_READPTR(rb, &rb->rptr); + } while (!rb->rptr); + + rb->wptr++; + + adreno_ringbuffer_submit(rb); + + rb->wptr = 0; + } + + wait_time = jiffies + wait_timeout; + /* wait for space in ringbuffer */ + while (1) { + GSL_RB_GET_READPTR(rb, &rb->rptr); + + freecmds = rb->rptr - rb->wptr; + + if (freecmds == 0 || freecmds > numcmds) + break; + + if (time_after(jiffies, wait_time)) { + KGSL_DRV_ERR(rb->device, + "Timed out while waiting for freespace in ringbuffer " + "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); + if (!adreno_dump_and_recover(rb->device)) + wait_time = jiffies + wait_timeout; + else + /* GPU is hung and we cannot recover */ + BUG(); + } + } +} + +static unsigned int find_faulting_ib1_size(struct adreno_ringbuffer *rb, + unsigned int rptr, unsigned int ib1) +{ + unsigned int value; + unsigned int temp_rptr = rptr * sizeof(unsigned int); + unsigned int rb_size = rb->buffer_desc.size; + + do { + temp_rptr = adreno_ringbuffer_dec_wrapped(temp_rptr, rb_size); + kgsl_sharedmem_readl(&rb->buffer_desc, &value, temp_rptr); + + if (ib1 == value) { + temp_rptr = adreno_ringbuffer_dec_wrapped(temp_rptr, + rb_size); + kgsl_sharedmem_readl(&rb->buffer_desc, &value, + temp_rptr); + if (adreno_cmd_is_ib(value)) { + temp_rptr += 2 * sizeof(unsigned int); + kgsl_sharedmem_readl(&rb->buffer_desc, &value, + temp_rptr); + return value; + } else { + temp_rptr += sizeof(unsigned int); + } + } + } while (temp_rptr != rptr); + + return 0; +} + +static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb, + unsigned int numcmds) +{ + unsigned int *ptr = NULL; + + BUG_ON(numcmds >= rb->sizedwords); + + GSL_RB_GET_READPTR(rb, &rb->rptr); + /* check for available space */ + if (rb->wptr >= rb->rptr) { + /* wptr ahead or equal to rptr */ + /* reserve dwords for nop packet */ + if ((rb->wptr + numcmds) > (rb->sizedwords - + GSL_RB_NOP_SIZEDWORDS)) + adreno_ringbuffer_waitspace(rb, numcmds, 1); + } else { + /* wptr behind rptr */ + if ((rb->wptr + numcmds) >= rb->rptr) + adreno_ringbuffer_waitspace(rb, numcmds, 0); + /* check for remaining space */ + /* reserve dwords for nop packet */ + if ((rb->wptr + numcmds) > (rb->sizedwords - + GSL_RB_NOP_SIZEDWORDS)) + adreno_ringbuffer_waitspace(rb, numcmds, 1); + } + + ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; + rb->wptr += numcmds; + + return ptr; +} + +static int _load_firmware(struct kgsl_device *device, const char *fwfile, + void **data, int *len) +{ + const struct firmware *fw = NULL; + int ret; + + ret = request_firmware(&fw, fwfile, device->dev); + + if (ret) { + KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n", + fwfile, ret); + return ret; + } + + *data = kmalloc(fw->size, GFP_KERNEL); + + if (*data) { + memcpy(*data, fw->data, fw->size); + *len = fw->size; + } else + KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size); + + release_firmware(fw); + return (*data != NULL) ? 0 : -ENOMEM; +} + +static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int i, ret = 0; + + if (adreno_dev->pm4_fw == NULL) { + int len; + void *ptr; + + ret = _load_firmware(device, adreno_dev->pm4_fwfile, + &ptr, &len); + + if (ret) + goto err; + + /* PM4 size is 3 dword aligned plus 1 dword of version */ + if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) { + KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); + ret = -EINVAL; + kfree(ptr); + goto err; + } + + adreno_dev->pm4_fw_size = len / sizeof(uint32_t); + adreno_dev->pm4_fw = ptr; + } + + KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", + adreno_dev->pm4_fw[0]); + + adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); + adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); + for (i = 1; i < adreno_dev->pm4_fw_size; i++) + adreno_regwrite(device, REG_CP_ME_RAM_DATA, + adreno_dev->pm4_fw[i]); +err: + return ret; +} + +static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int i, ret = 0; + + if (adreno_dev->pfp_fw == NULL) { + int len; + void *ptr; + + ret = _load_firmware(device, adreno_dev->pfp_fwfile, + &ptr, &len); + if (ret) + goto err; + + /* PFP size shold be dword aligned */ + if (len % sizeof(uint32_t) != 0) { + KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len); + ret = -EINVAL; + kfree(ptr); + goto err; + } + + adreno_dev->pfp_fw_size = len / sizeof(uint32_t); + adreno_dev->pfp_fw = ptr; + } + + KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n", + adreno_dev->pfp_fw[0]); + + adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0); + for (i = 1; i < adreno_dev->pfp_fw_size; i++) + adreno_regwrite(device, REG_CP_PFP_UCODE_DATA, + adreno_dev->pfp_fw[i]); +err: + return ret; +} + +int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram) +{ + int status; + /*cp_rb_cntl_u cp_rb_cntl; */ + union reg_cp_rb_cntl cp_rb_cntl; + unsigned int *cmds, rb_cntl; + struct kgsl_device *device = rb->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + uint cmds_gpu; + + if (rb->flags & KGSL_FLAGS_STARTED) + return 0; + + if (init_ram) { + rb->timestamp = 0; + GSL_RB_INIT_TIMESTAMP(rb); + } + + kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0, + sizeof(struct kgsl_rbmemptrs)); + + kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA, + (rb->sizedwords << 2)); + + adreno_regwrite(device, REG_CP_RB_WPTR_BASE, + (rb->memptrs_desc.gpuaddr + + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET)); + + /* setup WPTR delay */ + adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */); + + /*setup REG_CP_RB_CNTL */ + adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl); + cp_rb_cntl.val = rb_cntl; + + /* + * The size of the ringbuffer in the hardware is the log2 + * representation of the size in quadwords (sizedwords / 2) + */ + cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1); + + /* + * Specify the quadwords to read before updating mem RPTR. + * Like above, pass the log2 representation of the blocksize + * in quadwords. + */ + cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3); + + cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */ + /* mem RPTR writebacks */ + cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE; + + adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val); + + adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr); + + adreno_regwrite(device, REG_CP_RB_RPTR_ADDR, + rb->memptrs_desc.gpuaddr + + GSL_RB_MEMPTRS_RPTR_OFFSET); + + /* explicitly clear all cp interrupts */ + adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF); + + /* setup scratch/timestamp */ + adreno_regwrite(device, REG_SCRATCH_ADDR, + device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp)); + + adreno_regwrite(device, REG_SCRATCH_UMSK, + GSL_RB_MEMPTRS_SCRATCH_MASK); + + /* update the eoptimestamp field with the last retired timestamp */ + kgsl_sharedmem_writel(&device->memstore, + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), + rb->timestamp); + + /* load the CP ucode */ + + status = adreno_ringbuffer_load_pm4_ucode(device); + if (status != 0) + return status; + + /* load the prefetch parser ucode */ + status = adreno_ringbuffer_load_pfp_ucode(device); + if (status != 0) + return status; + + adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804); + + rb->rptr = 0; + rb->wptr = 0; + + /* clear ME_HALT to start micro engine */ + adreno_regwrite(device, REG_CP_ME_CNTL, 0); + + /* ME_INIT */ + cmds = adreno_ringbuffer_allocspace(rb, 19); + cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19); + + GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT); + /* All fields present (bits 9:0) */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff); + /* Disable/Enable Real-Time Stream processing (present but ignored) */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL)); + GSL_RB_WRITE(cmds, cmds_gpu, + SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE)); + + /* Instruction memory size: */ + GSL_RB_WRITE(cmds, cmds_gpu, + (adreno_encode_istore_size(adreno_dev) + | adreno_dev->pix_shader_start)); + /* Maximum Contexts */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001); + /* Write Confirm Interval and The CP will wait the + * wait_interval * 16 clocks between polling */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + + /* NQ and External Memory Swap */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + /* Protected mode error checking */ + GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL); + /* Disable header dumping and Header dump address */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + /* Header dump size */ + GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000); + + adreno_ringbuffer_submit(rb); + + /* idle device to validate ME INIT */ + status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + + if (status == 0) + rb->flags |= KGSL_FLAGS_STARTED; + + return status; +} + +void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb) +{ + if (rb->flags & KGSL_FLAGS_STARTED) { + /* ME_HALT */ + adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000); + rb->flags &= ~KGSL_FLAGS_STARTED; + } +} + +int adreno_ringbuffer_init(struct kgsl_device *device) +{ + int status; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + + rb->device = device; + /* + * It is silly to convert this to words and then back to bytes + * immediately below, but most of the rest of the code deals + * in words, so we might as well only do the math once + */ + rb->sizedwords = KGSL_RB_SIZE >> 2; + + /* allocate memory for ringbuffer */ + status = kgsl_allocate_contiguous(&rb->buffer_desc, + (rb->sizedwords << 2)); + + if (status != 0) { + adreno_ringbuffer_close(rb); + return status; + } + + /* allocate memory for polling and timestamps */ + /* This really can be at 4 byte alignment boundry but for using MMU + * we need to make it at page boundary */ + status = kgsl_allocate_contiguous(&rb->memptrs_desc, + sizeof(struct kgsl_rbmemptrs)); + + if (status != 0) { + adreno_ringbuffer_close(rb); + return status; + } + + /* overlay structure on memptrs memory */ + rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr; + + return 0; +} + +void adreno_ringbuffer_close(struct adreno_ringbuffer *rb) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); + + kgsl_sharedmem_free(&rb->buffer_desc); + kgsl_sharedmem_free(&rb->memptrs_desc); + + kfree(adreno_dev->pfp_fw); + kfree(adreno_dev->pm4_fw); + + adreno_dev->pfp_fw = NULL; + adreno_dev->pm4_fw = NULL; + + memset(rb, 0, sizeof(struct adreno_ringbuffer)); +} + +static uint32_t +adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, + struct adreno_context *context, + unsigned int flags, unsigned int *cmds, + int sizedwords) +{ + unsigned int *ringcmds; + unsigned int timestamp; + unsigned int total_sizedwords = sizedwords + 6; + unsigned int i; + unsigned int rcmd_gpu; + + /* reserve space to temporarily turn off protected mode + * error checking if needed + */ + total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0; + total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 10 : 0; + /* 2 dwords to store the start of command sequence */ + total_sizedwords += 2; + + ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords); + /* GPU may hang during space allocation, if thats the case the current + * context may have hung the GPU */ + if (context && context->flags & CTXT_FLAGS_GPU_HANG) { + KGSL_CTXT_WARN(rb->device, + "Context %p caused a gpu hang. Will not accept commands for context %d\n", + context, context->id); + return rb->timestamp; + } + + rcmd_gpu = rb->buffer_desc.gpuaddr + + sizeof(uint)*(rb->wptr-total_sizedwords); + + GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER); + + if (flags & KGSL_CMD_FLAGS_PMODE) { + /* disable protected mode error checking */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, 0); + } + + for (i = 0; i < sizedwords; i++) { + GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds); + cmds++; + } + + if (flags & KGSL_CMD_FLAGS_PMODE) { + /* re-enable protected mode error checking */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_SET_PROTECTED_MODE, 1)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, 1); + } + + rb->timestamp++; + timestamp = rb->timestamp; + + /* start-of-pipeline and end-of-pipeline timestamps */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS); + GSL_RB_WRITE(ringcmds, rcmd_gpu, + (rb->device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp))); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + + if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) { + /* Conditional execution based on memory values */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_COND_EXEC, 4)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + /* # of conditional command DWORDs */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, 5); + + /* Clear the ts_cmp_enable for the global timestamp*/ + GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->device->memstore.gpuaddr + KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0); + + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_INTERRUPT, 1)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK); + } + + adreno_ringbuffer_submit(rb); + + /* return timestamp of issued coREG_ands */ + return timestamp; +} + +void +adreno_ringbuffer_issuecmds(struct kgsl_device *device, + struct adreno_context *drawctxt, + unsigned int flags, + unsigned int *cmds, + int sizedwords) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + + if (device->state & KGSL_STATE_HUNG) + return; + + adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords); +} + +static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr, + int sizedwords); + +static bool +_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr) +{ + unsigned int opcode = cp_type3_opcode(*hostaddr); + switch (opcode) { + case CP_INDIRECT_BUFFER_PFD: + case CP_INDIRECT_BUFFER_PFE: + case CP_COND_INDIRECT_BUFFER_PFE: + case CP_COND_INDIRECT_BUFFER_PFD: + return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]); + case CP_NOP: + case CP_WAIT_FOR_IDLE: + case CP_WAIT_REG_MEM: + case CP_WAIT_REG_EQ: + case CP_WAT_REG_GTE: + case CP_WAIT_UNTIL_READ: + case CP_WAIT_IB_PFD_COMPLETE: + case CP_REG_RMW: + case CP_REG_TO_MEM: + case CP_MEM_WRITE: + case CP_MEM_WRITE_CNTR: + case CP_COND_EXEC: + case CP_COND_WRITE: + case CP_EVENT_WRITE: + case CP_EVENT_WRITE_SHD: + case CP_EVENT_WRITE_CFL: + case CP_EVENT_WRITE_ZPD: + case CP_DRAW_INDX: + case CP_DRAW_INDX_2: + case CP_DRAW_INDX_BIN: + case CP_DRAW_INDX_2_BIN: + case CP_VIZ_QUERY: + case CP_SET_STATE: + case CP_SET_CONSTANT: + case CP_IM_LOAD: + case CP_IM_LOAD_IMMEDIATE: + case CP_LOAD_CONSTANT_CONTEXT: + case CP_INVALIDATE_STATE: + case CP_SET_SHADER_BASES: + case CP_SET_BIN_MASK: + case CP_SET_BIN_SELECT: + case CP_SET_BIN_BASE_OFFSET: + case CP_SET_BIN_DATA: + case CP_CONTEXT_UPDATE: + case CP_INTERRUPT: + case CP_IM_STORE: + break; + /* these shouldn't come from userspace */ + case CP_ME_INIT: + case CP_SET_PROTECTED_MODE: + default: + KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode); + return false; + break; + } + + return true; +} + +static bool +_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr) +{ + unsigned int reg = type0_pkt_offset(*hostaddr); + unsigned int cnt = type0_pkt_size(*hostaddr); + if (reg < 0x0192 || (reg + cnt) >= 0x8000) { + KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n", + reg, cnt); + return false; + } + return true; +} + +/* + * Traverse IBs and dump them to test vector. Detect swap by inspecting + * register writes, keeping note of the current state, and dump + * framebuffer config to test vector + */ +static bool _parse_ibs(struct kgsl_device_private *dev_priv, + uint gpuaddr, int sizedwords) +{ + static uint level; /* recursion level */ + bool ret = false; + uint *hostaddr, *hoststart; + int dwords_left = sizedwords; /* dwords left in the current command + buffer */ + struct kgsl_mem_entry *entry; + + spin_lock(&dev_priv->process_priv->mem_lock); + entry = kgsl_sharedmem_find_region(dev_priv->process_priv, + gpuaddr, sizedwords * sizeof(uint)); + spin_unlock(&dev_priv->process_priv->mem_lock); + if (entry == NULL) { + KGSL_CMD_ERR(dev_priv->device, + "no mapping for gpuaddr: 0x%08x\n", gpuaddr); + return false; + } + + hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr); + if (hostaddr == NULL) { + KGSL_CMD_ERR(dev_priv->device, + "no mapping for gpuaddr: 0x%08x\n", gpuaddr); + return false; + } + + hoststart = hostaddr; + + level++; + + KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n", + gpuaddr, sizedwords, hostaddr); + + mb(); + while (dwords_left > 0) { + bool cur_ret = true; + int count = 0; /* dword count including packet header */ + + switch (*hostaddr >> 30) { + case 0x0: /* type-0 */ + count = (*hostaddr >> 16)+2; + cur_ret = _handle_type0(dev_priv, hostaddr); + break; + case 0x1: /* type-1 */ + count = 2; + break; + case 0x3: /* type-3 */ + count = ((*hostaddr >> 16) & 0x3fff) + 2; + cur_ret = _handle_type3(dev_priv, hostaddr); + break; + default: + KGSL_CMD_ERR(dev_priv->device, "unexpected type: " + "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n", + *hostaddr >> 30, *hostaddr, hostaddr, + gpuaddr+4*(sizedwords-dwords_left)); + cur_ret = false; + count = dwords_left; + break; + } + + if (!cur_ret) { + KGSL_CMD_ERR(dev_priv->device, + "bad sub-type: #:%d/%d, v:0x%08x" + " @ 0x%p[gb:0x%08x], level:%d\n", + sizedwords-dwords_left, sizedwords, *hostaddr, + hostaddr, gpuaddr+4*(sizedwords-dwords_left), + level); + + if (ADRENO_DEVICE(dev_priv->device)->ib_check_level + >= 2) + print_hex_dump(KERN_ERR, + level == 1 ? "IB1:" : "IB2:", + DUMP_PREFIX_OFFSET, 32, 4, hoststart, + sizedwords*4, 0); + goto done; + } + + /* jump to next packet */ + dwords_left -= count; + hostaddr += count; + if (dwords_left < 0) { + KGSL_CMD_ERR(dev_priv->device, + "bad count: c:%d, #:%d/%d, " + "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n", + count, sizedwords-(dwords_left+count), + sizedwords, *(hostaddr-count), hostaddr-count, + gpuaddr+4*(sizedwords-(dwords_left+count)), + level); + if (ADRENO_DEVICE(dev_priv->device)->ib_check_level + >= 2) + print_hex_dump(KERN_ERR, + level == 1 ? "IB1:" : "IB2:", + DUMP_PREFIX_OFFSET, 32, 4, hoststart, + sizedwords*4, 0); + goto done; + } + } + + ret = true; +done: + if (!ret) + KGSL_DRV_ERR(dev_priv->device, + "parsing failed: gpuaddr:0x%08x, " + "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords); + + level--; + + return ret; +} + +int +adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, + struct kgsl_ibdesc *ibdesc, + unsigned int numibs, + uint32_t *timestamp, + unsigned int flags) +{ + struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int *link; + unsigned int *cmds; + unsigned int i; + struct adreno_context *drawctxt; + unsigned int start_index = 0; + + if (device->state & KGSL_STATE_HUNG) + return -EBUSY; + if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) || + context == NULL || ibdesc == 0 || numibs == 0) + return -EINVAL; + + drawctxt = context->devctxt; + + if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) { + KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.." + " will not accept commands for this context\n", + drawctxt); + return -EDEADLK; + } + link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL); + cmds = link; + if (!link) { + KGSL_MEM_ERR(device, "Failed to allocate memory for for command" + " submission, size %x\n", numibs * 3); + return -ENOMEM; + } + + /*When preamble is enabled, the preamble buffer with state restoration + commands are stored in the first node of the IB chain. We can skip that + if a context switch hasn't occured */ + + if (drawctxt->flags & CTXT_FLAGS_PREAMBLE && + adreno_dev->drawctxt_active == drawctxt) + start_index = 1; + + for (i = start_index; i < numibs; i++) { + if (unlikely(adreno_dev->ib_check_level >= 1 && + !_parse_ibs(dev_priv, ibdesc[i].gpuaddr, + ibdesc[i].sizedwords))) { + kfree(link); + return -EINVAL; + } + *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD; + *cmds++ = ibdesc[i].gpuaddr; + *cmds++ = ibdesc[i].sizedwords; + } + + kgsl_setstate(device, context->id, + kgsl_mmu_pt_get_flags(device->mmu.hwpagetable, + device->id)); + + adreno_drawctxt_switch(adreno_dev, drawctxt, flags); + + *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer, + drawctxt, 0, + &link[0], (cmds - link)); + + KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n", + context->id, (unsigned int)ibdesc, numibs, *timestamp); + + kfree(link); + +#ifdef CONFIG_MSM_KGSL_CFF_DUMP + /* + * insert wait for idle after every IB1 + * this is conservative but works reliably and is ok + * even for performance simulations + */ + adreno_idle(device, KGSL_TIMEOUT_DEFAULT); +#endif + + return 0; +} + +static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb, + unsigned int *ptr, + bool inc) +{ + int status = -EINVAL; + unsigned int val1; + unsigned int size = rb->buffer_desc.size; + unsigned int start_ptr = *ptr; + + while ((start_ptr / sizeof(unsigned int)) != rb->wptr) { + if (inc) + start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr, + size); + else + start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr, + size); + kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr); + if (KGSL_CMD_IDENTIFIER == val1) { + if ((start_ptr / sizeof(unsigned int)) != rb->wptr) + start_ptr = adreno_ringbuffer_dec_wrapped( + start_ptr, size); + *ptr = start_ptr; + status = 0; + break; + } + } + return status; +} + +static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb, + unsigned int *rb_rptr, + unsigned int global_eop, + bool inc) +{ + int status = -EINVAL; + unsigned int temp_rb_rptr = *rb_rptr; + unsigned int size = rb->buffer_desc.size; + unsigned int val[3]; + int i = 0; + bool check = false; + + if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr) + return status; + + do { + /* when decrementing we need to decrement first and + * then read make sure we cover all the data */ + if (!inc) + temp_rb_rptr = adreno_ringbuffer_dec_wrapped( + temp_rb_rptr, size); + kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], + temp_rb_rptr); + + if (check && ((inc && val[i] == global_eop) || + (!inc && (val[i] == + cp_type3_packet(CP_MEM_WRITE, 2) || + val[i] == CACHE_FLUSH_TS)))) { + /* decrement i, i.e i = (i - 1 + 3) % 3 if + * we are going forward, else increment i */ + i = (i + 2) % 3; + if (val[i] == rb->device->memstore.gpuaddr + + KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)) { + int j = ((i + 2) % 3); + if ((inc && (val[j] == CACHE_FLUSH_TS || + val[j] == cp_type3_packet( + CP_MEM_WRITE, 2))) || + (!inc && val[j] == global_eop)) { + /* Found the global eop */ + status = 0; + break; + } + } + /* if no match found then increment i again + * since we decremented before matching */ + i = (i + 1) % 3; + } + if (inc) + temp_rb_rptr = adreno_ringbuffer_inc_wrapped( + temp_rb_rptr, size); + + i = (i + 1) % 3; + if (2 == i) + check = true; + } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr); + /* temp_rb_rptr points to the command stream after global eop, + * move backward till the start of command sequence */ + if (!status) { + status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false); + if (!status) { + *rb_rptr = temp_rb_rptr; + KGSL_DRV_ERR(rb->device, + "Offset of cmd sequence after eop timestamp: 0x%x\n", + temp_rb_rptr / sizeof(unsigned int)); + } + } + return status; +} + +static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb, + unsigned int *rb_rptr, + unsigned int ib1) +{ + int status = -EINVAL; + unsigned int temp_rb_rptr = *rb_rptr; + unsigned int size = rb->buffer_desc.size; + unsigned int val[2]; + int i = 0; + bool check = false; + bool ctx_switch = false; + + while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) { + kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr); + + if (check && val[i] == ib1) { + /* decrement i, i.e i = (i - 1 + 2) % 2 */ + i = (i + 1) % 2; + if (adreno_cmd_is_ib(val[i])) { + /* go till start of command sequence */ + status = _find_start_of_cmd_seq(rb, + &temp_rb_rptr, false); + KGSL_DRV_ERR(rb->device, + "Found the hanging IB at offset 0x%x\n", + temp_rb_rptr / sizeof(unsigned int)); + break; + } + /* if no match the increment i since we decremented + * before checking */ + i = (i + 1) % 2; + } + /* Make sure you do not encounter a context switch twice, we can + * encounter it once for the bad context as the start of search + * can point to the context switch */ + if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) { + if (ctx_switch) { + KGSL_DRV_ERR(rb->device, + "Context switch encountered before bad " + "IB found\n"); + break; + } + ctx_switch = true; + } + i = (i + 1) % 2; + if (1 == i) + check = true; + temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr, + size); + } + if (!status) + *rb_rptr = temp_rb_rptr; + return status; +} + +static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb, + unsigned int rb_rptr) +{ + unsigned int temp_rb_rptr = rb_rptr; + unsigned int size = rb->buffer_desc.size; + unsigned int val[2]; + int i = 0; + bool check = false; + bool cmd_start = false; + + /* Go till the start of the ib sequence and turn on preamble */ + while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) { + kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr); + if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) { + /* decrement i */ + i = (i + 1) % 2; + if (val[i] == cp_nop_packet(4)) { + temp_rb_rptr = adreno_ringbuffer_dec_wrapped( + temp_rb_rptr, size); + kgsl_sharedmem_writel(&rb->buffer_desc, + temp_rb_rptr, cp_nop_packet(1)); + } + KGSL_DRV_ERR(rb->device, + "Turned preamble on at offset 0x%x\n", + temp_rb_rptr / 4); + break; + } + /* If you reach beginning of next command sequence then exit + * First command encountered is the current one so don't break + * on that. */ + if (KGSL_CMD_IDENTIFIER == val[i]) { + if (cmd_start) + break; + cmd_start = true; + } + + i = (i + 1) % 2; + if (1 == i) + check = true; + temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr, + size); + } +} + +static void _copy_valid_rb_content(struct adreno_ringbuffer *rb, + unsigned int rb_rptr, unsigned int *temp_rb_buffer, + int *rb_size, unsigned int *bad_rb_buffer, + int *bad_rb_size, + int *last_valid_ctx_id) +{ + unsigned int good_rb_idx = 0, cmd_start_idx = 0; + unsigned int val1 = 0; + struct kgsl_context *k_ctxt; + struct adreno_context *a_ctxt; + unsigned int bad_rb_idx = 0; + int copy_rb_contents = 0; + unsigned int temp_rb_rptr; + unsigned int size = rb->buffer_desc.size; + unsigned int good_cmd_start_idx = 0; + + /* Walk the rb from the context switch. Omit any commands + * for an invalid context. */ + while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) { + kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr); + + if (KGSL_CMD_IDENTIFIER == val1) { + /* Start is the NOP dword that comes before + * KGSL_CMD_IDENTIFIER */ + cmd_start_idx = bad_rb_idx - 1; + if (copy_rb_contents) + good_cmd_start_idx = good_rb_idx - 1; + } + + /* check for context switch indicator */ + if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) { + unsigned int temp_idx, val2; + /* increment by 3 to get to the context_id */ + temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) % + size; + kgsl_sharedmem_readl(&rb->buffer_desc, &val2, + temp_rb_rptr); + + /* if context switches to a context that did not cause + * hang then start saving the rb contents as those + * commands can be executed */ + k_ctxt = idr_find(&rb->device->context_idr, val2); + if (k_ctxt) { + a_ctxt = k_ctxt->devctxt; + + /* If we are changing to a good context and were not + * copying commands then copy over commands to the good + * context */ + if (!copy_rb_contents && ((k_ctxt && + !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) || + !k_ctxt)) { + for (temp_idx = cmd_start_idx; + temp_idx < bad_rb_idx; + temp_idx++) + temp_rb_buffer[good_rb_idx++] = + bad_rb_buffer[temp_idx]; + *last_valid_ctx_id = val2; + copy_rb_contents = 1; + } else if (copy_rb_contents && k_ctxt && + (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) { + /* If we are changing to bad context then remove + * the dwords we copied for this sequence from + * the good buffer */ + good_rb_idx = good_cmd_start_idx; + copy_rb_contents = 0; + } + } + } + + if (copy_rb_contents) + temp_rb_buffer[good_rb_idx++] = val1; + /* Copy both good and bad commands for replay to the bad + * buffer */ + bad_rb_buffer[bad_rb_idx++] = val1; + + rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size); + } + *rb_size = good_rb_idx; + *bad_rb_size = bad_rb_idx; +} + +int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, + struct adreno_recovery_data *rec_data) +{ + int status; + struct kgsl_device *device = rb->device; + unsigned int rb_rptr = rb->wptr * sizeof(unsigned int); + struct kgsl_context *context; + struct adreno_context *adreno_context; + + context = idr_find(&device->context_idr, rec_data->context_id); + + /* Look for the command stream that is right after the global eop */ + status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr, + rec_data->global_eop + 1, false); + + if (status) + goto done; + + if (context) { + adreno_context = context->devctxt; + + if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) { + if (rec_data->ib1) { + status = _find_hanging_ib_sequence(rb, &rb_rptr, + rec_data->ib1); + if (status) + goto copy_rb_contents; + } + _turn_preamble_on_for_ib_seq(rb, rb_rptr); + } + } + +copy_rb_contents: + _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer, + &rec_data->rb_size, + rec_data->bad_rb_buffer, + &rec_data->bad_rb_size, + &rec_data->last_valid_ctx_id); + /* If we failed to get the hanging IB sequence then we cannot execute + * commands from the bad context */ + if (status) { + rec_data->bad_rb_size = 0; + status = 0; + } +done: + return status; +} + +void +adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff, + int num_rb_contents) +{ + int i; + unsigned int *ringcmds; + unsigned int rcmd_gpu; + + if (!num_rb_contents) + return; + + if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) { + adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0); + rb->rptr = 0; + BUG_ON(num_rb_contents > rb->buffer_desc.size); + } + ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr; + rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr; + for (i = 0; i < num_rb_contents; i++) + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]); + rb->wptr += num_rb_contents; + adreno_ringbuffer_submit(rb); +} + +void adreno_print_fault_ib_work(struct work_struct *work) +{ + struct kgsl_device *device = container_of(work, struct kgsl_device, + print_fault_ib); + mutex_lock(&device->mutex); + adreno_print_fault_ib(device); + mutex_unlock(&device->mutex); +} + +void adreno_print_fault_ib(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct ib_list ib_list; + int i; + + unsigned int ib_sz; + + if (!device->page_fault_ptbase || + !is_adreno_ib_dump_on_pagef_enabled(device)) + goto done; + + ib_sz = find_faulting_ib1_size(&adreno_dev->ringbuffer, + device->page_fault_rptr, + device->page_fault_ib1); + if (!ib_sz) { + KGSL_DRV_ERR(device, "Could not find size of fault ib 0x%x\n", + device->page_fault_ib1); + goto done; + } + ib_list.count = 0; + + KGSL_DRV_ERR(device, "Page faulting IB1 0x%x, size 0x%x\n", + device->page_fault_ib1, ib_sz); + dump_ib1(device, device->page_fault_ptbase, 0, + device->page_fault_ib1, ib_sz, &ib_list, true); + + /* print ib2's in faulting ib1 */ + for (i = 0; i < ib_list.count; i++) { + KGSL_DRV_ERR(device, "IB2 0x%x, size 0x%x\n", + ib_list.bases[i], ib_list.sizes[i]); + dump_ib(device, "IB2:", device->page_fault_ptbase, + ib_list.offsets[i], ib_list.bases[i], ib_list.sizes[i], + true); + } + KGSL_DRV_ERR(device, "Finished printing fault IB\n"); +done: + device->page_fault_ptbase = 0; +} diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h new file mode 100644 index 0000000000000..b976600e90102 --- /dev/null +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -0,0 +1,166 @@ +/* Copyright (c) 2002,2007-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __ADRENO_RINGBUFFER_H +#define __ADRENO_RINGBUFFER_H + +#define GSL_RB_USE_MEM_RPTR +#define GSL_RB_USE_MEM_TIMESTAMP +#define GSL_DEVICE_SHADOW_MEMSTORE_TO_USER + +/* + * Adreno ringbuffer sizes in bytes - these are converted to + * the appropriate log2 values in the code + */ + +#define KGSL_RB_SIZE (32 * 1024) +#define KGSL_RB_BLKSIZE 16 + +/* CP timestamp register */ +#define REG_CP_TIMESTAMP REG_SCRATCH_REG0 + + +struct kgsl_device; +struct kgsl_device_private; +struct adreno_recovery_data; + +#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8 +struct kgsl_rbmemptrs { + int rptr; + int wptr_poll; +}; + +#define GSL_RB_MEMPTRS_RPTR_OFFSET \ + (offsetof(struct kgsl_rbmemptrs, rptr)) + +#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \ + (offsetof(struct kgsl_rbmemptrs, wptr_poll)) + +struct adreno_ringbuffer { + struct kgsl_device *device; + uint32_t flags; + + struct kgsl_memdesc buffer_desc; + + struct kgsl_memdesc memptrs_desc; + struct kgsl_rbmemptrs *memptrs; + + /*ringbuffer size */ + unsigned int sizedwords; + + unsigned int wptr; /* write pointer offset in dwords from baseaddr */ + unsigned int rptr; /* read pointer offset in dwords from baseaddr */ + uint32_t timestamp; +}; + + +#define GSL_RB_WRITE(ring, gpuaddr, data) \ + do { \ + *ring = data; \ + wmb(); \ + kgsl_cffdump_setmem(gpuaddr, data, 4); \ + ring++; \ + gpuaddr += sizeof(uint); \ + } while (0) + +/* timestamp */ +#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER +#define GSL_RB_USE_MEM_TIMESTAMP +#endif /* GSL_DEVICE_SHADOW_MEMSTORE_TO_USER */ + +#ifdef GSL_RB_USE_MEM_TIMESTAMP +/* enable timestamp (...scratch0) memory shadowing */ +#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1 +#define GSL_RB_INIT_TIMESTAMP(rb) + +#else +#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x0 +#define GSL_RB_INIT_TIMESTAMP(rb) \ + adreno_regwrite((rb)->device->id, REG_CP_TIMESTAMP, 0) + +#endif /* GSL_RB_USE_MEMTIMESTAMP */ + +/* mem rptr */ +#ifdef GSL_RB_USE_MEM_RPTR +#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */ +#define GSL_RB_GET_READPTR(rb, data) \ + do { \ + *(data) = rb->memptrs->rptr; \ + } while (0) +#else +#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */ +#define GSL_RB_GET_READPTR(rb, data) \ + do { \ + adreno_regread((rb)->device->id, REG_CP_RB_RPTR, (data)); \ + } while (0) +#endif /* GSL_RB_USE_MEMRPTR */ + +#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */ + +int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, + struct kgsl_ibdesc *ibdesc, + unsigned int numibs, + uint32_t *timestamp, + unsigned int flags); + +int adreno_ringbuffer_init(struct kgsl_device *device); + +int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, + unsigned int init_ram); + +void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb); + +void adreno_ringbuffer_close(struct adreno_ringbuffer *rb); + +void adreno_ringbuffer_issuecmds(struct kgsl_device *device, + struct adreno_context *drawctxt, + unsigned int flags, + unsigned int *cmdaddr, + int sizedwords); + +void kgsl_cp_intrcallback(struct kgsl_device *device); + +int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, + struct adreno_recovery_data *rec_data); + +void +adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff, + int num_rb_contents); + +void adreno_print_fault_ib_work(struct work_struct *work); + +void adreno_print_fault_ib(struct kgsl_device *device); + +static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb, + unsigned int rptr) +{ + if (rb->wptr >= rptr) + return rb->wptr - rptr; + return rb->wptr + rb->sizedwords - rptr; +} + +/* Increment a value by 4 bytes with wrap-around based on size */ +static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val, + unsigned int size) +{ + return (val + sizeof(unsigned int)) % size; +} + +/* Decrement a value by 4 bytes with wrap-around based on size */ +static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val, + unsigned int size) +{ + return (val + size - sizeof(unsigned int)) % size; +} + +#endif /* __ADRENO_RINGBUFFER_H */ diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c new file mode 100644 index 0000000000000..26fa42908de30 --- /dev/null +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -0,0 +1,429 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "kgsl.h" +#include "kgsl_sharedmem.h" +#include "kgsl_snapshot.h" + +#include "adreno.h" +#include "adreno_pm4types.h" +#include "a2xx_reg.h" + +/* Number of dwords of ringbuffer history to record */ +#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100 + +/* Maintain a list of the objects we see during parsing */ + +#define SNAPSHOT_OBJ_BUFSIZE 64 + +#define SNAPSHOT_OBJ_TYPE_IB 0 + +static struct kgsl_snapshot_obj { + int type; + uint32_t gpuaddr; + uint32_t ptbase; + void *ptr; + int dwords; +} objbuf[SNAPSHOT_OBJ_BUFSIZE]; + +/* Pointer to the next open entry in the object list */ +static int objbufptr; + +/* Push a new buffer object onto the list */ +static void push_object(struct kgsl_device *device, int type, uint32_t ptbase, + uint32_t gpuaddr, int dwords) +{ + int index; + void *ptr; + + /* + * Sometimes IBs can be reused in the same dump. Because we parse from + * oldest to newest, if we come across an IB that has already been used, + * assume that it has been reused and update the list with the newest + * size. + */ + + for (index = 0; index < objbufptr; index++) { + if (objbuf[index].gpuaddr == gpuaddr && + objbuf[index].ptbase == ptbase) { + objbuf[index].dwords = dwords; + return; + } + } + + if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) { + KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n"); + return; + } + + /* + * adreno_convertaddr verifies that the IB size is valid - at least in + * the context of it being smaller then the allocated memory space + */ + ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2); + + if (ptr == NULL) { + KGSL_DRV_ERR(device, + "snapshot: Can't find GPU address for %x\n", gpuaddr); + return; + } + + /* Put it on the list of things to parse */ + objbuf[objbufptr].type = type; + objbuf[objbufptr].gpuaddr = gpuaddr; + objbuf[objbufptr].ptbase = ptbase; + objbuf[objbufptr].dwords = dwords; + objbuf[objbufptr++].ptr = ptr; +} + +/* + * Return a 1 if the specified object is already on the list of buffers + * to be dumped + */ + +static int find_object(int type, unsigned int gpuaddr, unsigned int ptbase) +{ + int index; + + for (index = 0; index < objbufptr; index++) { + if (objbuf[index].gpuaddr == gpuaddr && + objbuf[index].ptbase == ptbase && + objbuf[index].type == type) + return 1; + } + + return 0; +} + +/* Snapshot the istore memory */ +static int snapshot_istore(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_istore *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int count, i; + + count = adreno_dev->istore_size * ADRENO_ISTORE_WORDS; + + if (remain < (count * 4) + sizeof(*header)) { + KGSL_DRV_ERR(device, + "snapshot: Not enough memory for the istore section"); + return 0; + } + + header->count = adreno_dev->istore_size; + + for (i = 0; i < count; i++) + kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]); + + return (count * 4) + sizeof(*header); +} + +/* Snapshot the ringbuffer memory */ +static int snapshot_rb(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_rb *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned int rbbase, ptbase, rptr, *rbptr; + int start, stop, index; + int numitems, size; + int parse_ibs = 0, ib_parse_start; + + /* Get the GPU address of the ringbuffer */ + kgsl_regread(device, REG_CP_RB_BASE, &rbbase); + + /* Get the physical address of the MMU pagetable */ + ptbase = kgsl_mmu_get_current_ptbase(device); + + /* Get the current read pointers for the RB */ + kgsl_regread(device, REG_CP_RB_RPTR, &rptr); + + /* start the dump at the rptr minus some history */ + start = (int) rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY; + if (start < 0) + start += rb->sizedwords; + + /* + * Stop the dump at the point where the software last wrote. Don't use + * the hardware value here on the chance that it didn't get properly + * updated + */ + + stop = (int) rb->wptr + 16; + if (stop > rb->sizedwords) + stop -= rb->sizedwords; + + /* Set up the header for the section */ + + numitems = (stop > start) ? stop - start : + (rb->sizedwords - start) + stop; + + size = (numitems << 2); + + if (remain < size + sizeof(*header)) { + KGSL_DRV_ERR(device, + "snapshot: Not enough memory for the rb section"); + return 0; + } + + /* Write the sub-header for the section */ + header->start = start; + header->end = stop; + header->wptr = rb->wptr; + header->rbsize = rb->sizedwords; + header->count = numitems; + + /* + * We can only reliably dump IBs from the beginning of the context, + * and it turns out that for the vast majority of the time we really + * only care about the current context when it comes to diagnosing + * a hang. So, with an eye to limiting the buffer dumping to what is + * really useful find the beginning of the context and only dump + * IBs from that point + */ + + index = rptr; + ib_parse_start = start; + rbptr = rb->buffer_desc.hostptr; + + while (index != start) { + index--; + + if (index < 0) { + /* + * The marker we are looking for is 2 dwords long, so + * when wrapping, go back 2 from the end so we don't + * access out of range in the if statement below + */ + index = rb->sizedwords - 2; + + /* + * Account for the possibility that start might be at + * rb->sizedwords - 1 + */ + + if (start == rb->sizedwords - 1) + break; + } + + /* + * Look for a NOP packet with the context switch identifier in + * the second dword + */ + + if (rbptr[index] == cp_nop_packet(1) && + rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) { + ib_parse_start = index; + break; + } + } + + index = start; + + /* + * Loop through the RB, copying the data and looking for indirect + * buffers and MMU pagetable changes + */ + + while (index != rb->wptr) { + *data = rbptr[index]; + + /* Only parse IBs between the context start and the rptr */ + + if (index == ib_parse_start) + parse_ibs = 1; + + if (index == rptr) + parse_ibs = 0; + + if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) + push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase, + rbptr[index + 1], rbptr[index + 2]); + + index = index + 1; + + if (index == rb->sizedwords) + index = 0; + + data++; + } + + /* Dump 16 dwords past the wptr, but don't bother interpeting it */ + + while (index != stop) { + *data = rbptr[index]; + index = index + 1; + + if (index == rb->sizedwords) + index = 0; + + data++; + } + + /* Return the size of the section */ + return size + sizeof(*header); +} + +/* Snapshot the memory for an indirect buffer */ +static int snapshot_ib(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_ib *header = snapshot; + struct kgsl_snapshot_obj *obj = priv; + unsigned int *src = obj->ptr; + unsigned int *dst = snapshot + sizeof(*header); + int i; + + if (remain < (obj->dwords << 2) + sizeof(*header)) { + KGSL_DRV_ERR(device, + "snapshot: Not enough memory for the ib section"); + return 0; + } + + /* Write the sub-header for the section */ + header->gpuaddr = obj->gpuaddr; + header->ptbase = obj->ptbase; + header->size = obj->dwords; + + /* Write the contents of the ib */ + for (i = 0; i < obj->dwords; i++) { + *dst = *src; + /* If another IB is discovered, then push it on the list too */ + + if (adreno_cmd_is_ib(*src)) + push_object(device, SNAPSHOT_OBJ_TYPE_IB, obj->ptbase, + *(src + 1), *(src + 2)); + + src++; + dst++; + } + + return (obj->dwords << 2) + sizeof(*header); +} + +/* Dump another item on the current pending list */ +static void *dump_object(struct kgsl_device *device, int obj, void *snapshot, + int *remain) +{ + switch (objbuf[obj].type) { + case SNAPSHOT_OBJ_TYPE_IB: + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_IB, snapshot, remain, + snapshot_ib, &objbuf[obj]); + break; + default: + KGSL_DRV_ERR(device, + "snapshot: Invalid snapshot object type: %d\n", + objbuf[obj].type); + break; + } + + return snapshot; +} + +/* adreno_snapshot - Snapshot the Adreno GPU state + * @device - KGSL device to snapshot + * @snapshot - Pointer to the start of memory to write into + * @remain - A pointer to how many bytes of memory are remaining in the snapshot + * @hang - set if this snapshot was automatically triggered by a GPU hang + * This is a hook function called by kgsl_snapshot to snapshot the + * Adreno specific information for the GPU snapshot. In turn, this function + * calls the GPU specific snapshot function to get core specific information. + */ + +void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain, + int hang) +{ + int i; + uint32_t ptbase, ibbase, ibsize; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + /* Reset the list of objects */ + objbufptr = 0; + + /* Get the physical address of the MMU pagetable */ + ptbase = kgsl_mmu_get_current_ptbase(device); + + /* Dump the ringbuffer */ + snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB, + snapshot, remain, snapshot_rb, NULL); + + /* + * Make sure that the last IB1 that was being executed is dumped. + * Since this was the last IB1 that was processed, we should have + * already added it to the list during the ringbuffer parse but we + * want to be double plus sure. + */ + + kgsl_regread(device, REG_CP_IB1_BASE, &ibbase); + kgsl_regread(device, REG_CP_IB1_BUFSZ, &ibsize); + + /* + * The problem is that IB size from the register is the unprocessed size + * of the buffer not the original size, so if we didn't catch this + * buffer being directly used in the RB, then we might not be able to + * dump the whle thing. Print a warning message so we can try to + * figure how often this really happens. + */ + + if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) { + push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase, + ibbase, ibsize); + KGSL_DRV_ERR(device, "CP_IB1_BASE not found in the ringbuffer. " + "Dumping %x dwords of the buffer.\n", ibsize); + } + + kgsl_regread(device, REG_CP_IB2_BASE, &ibbase); + kgsl_regread(device, REG_CP_IB2_BUFSZ, &ibsize); + + /* + * Add the last parsed IB2 to the list. The IB2 should be found as we + * parse the objects below, but we try to add it to the list first, so + * it too can be parsed. Don't print an error message in this case - if + * the IB2 is found during parsing, the list will be updated with the + * correct size. + */ + + if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) { + push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase, + ibbase, ibsize); + } + + /* + * Go through the list of found objects and dump each one. As the IBs + * are parsed, more objects might be found, and objbufptr will increase + */ + for (i = 0; i < objbufptr; i++) + snapshot = dump_object(device, i, snapshot, remain); + + /* + * Only dump the istore on a hang - reading it on a running system + * has a non 0 chance of hanging the GPU + */ + + if (hang) { + snapshot = kgsl_snapshot_add_section(device, + KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain, + snapshot_istore, NULL); + } + + /* Add GPU specific sections - registers mainly, but other stuff too */ + if (adreno_dev->gpudev->snapshot) + snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot, + remain, hang); + + return snapshot; +} diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c new file mode 100644 index 0000000000000..0414652d64398 --- /dev/null +++ b/drivers/gpu/msm/kgsl.c @@ -0,0 +1,2516 @@ +/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_debugfs.h" +#include "kgsl_cffdump.h" +#include "kgsl_log.h" +#include "kgsl_sharedmem.h" +#include "kgsl_device.h" +#include "kgsl_trace.h" +#include "kgsl_sync.h" + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "kgsl." + +static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT; +static char *ksgl_mmu_type; +module_param_named(ptcount, kgsl_pagetable_count, int, 0); +MODULE_PARM_DESC(kgsl_pagetable_count, +"Minimum number of pagetables for KGSL to allocate at initialization time"); +module_param_named(mmutype, ksgl_mmu_type, charp, 0); +MODULE_PARM_DESC(ksgl_mmu_type, +"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'"); + +static struct ion_client *kgsl_ion_client; + +/** + * kgsl_add_event - Add a new timstamp event for the KGSL device + * @device - KGSL device for the new event + * @ts - the timestamp to trigger the event on + * @cb - callback function to call when the timestamp expires + * @priv - private data for the specific event type + * @owner - driver instance that owns this event + * + * @returns - 0 on success or error code on failure + */ + +int kgsl_add_event(struct kgsl_device *device, u32 ts, + void (*cb)(struct kgsl_device *, void *, u32), void *priv, + void *owner) +{ + struct kgsl_event *event; + struct list_head *n; + unsigned int cur = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + + if (cb == NULL) + return -EINVAL; + + /* Check to see if the requested timestamp has already fired */ + + if (timestamp_cmp(cur, ts) >= 0) { + cb(device, priv, cur); + return 0; + } + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event == NULL) + return -ENOMEM; + + event->timestamp = ts; + event->priv = priv; + event->func = cb; + event->owner = owner; + + /* Add the event in order to the list */ + + for (n = device->events.next ; n != &device->events; n = n->next) { + struct kgsl_event *e = + list_entry(n, struct kgsl_event, list); + + if (timestamp_cmp(e->timestamp, ts) > 0) { + list_add(&event->list, n->prev); + break; + } + } + + if (n == &device->events) + list_add_tail(&event->list, &device->events); + + queue_work(device->work_queue, &device->ts_expired_ws); + return 0; +} +EXPORT_SYMBOL(kgsl_add_event); + +/** + * kgsl_cancel_events - Cancel all events for a process + * @device - KGSL device for the events to cancel + * @owner - driver instance that owns the events to cancel + * + */ +void kgsl_cancel_events(struct kgsl_device *device, + void *owner) +{ + struct kgsl_event *event, *event_tmp; + unsigned int cur = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + + list_for_each_entry_safe(event, event_tmp, &device->events, list) { + if (event->owner != owner) + continue; + /* + * "cancel" the events by calling their callback. + * Currently, events are used for lock and memory + * management, so if the process is dying the right + * thing to do is release or free. + */ + if (event->func) + event->func(device, event->priv, cur); + + list_del(&event->list); + kfree(event); + } +} +EXPORT_SYMBOL(kgsl_cancel_events); + +static inline struct kgsl_mem_entry * +kgsl_mem_entry_create(void) +{ + struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); + + if (!entry) + KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry)); + else + kref_init(&entry->refcount); + + return entry; +} + +void +kgsl_mem_entry_destroy(struct kref *kref) +{ + struct kgsl_mem_entry *entry = container_of(kref, + struct kgsl_mem_entry, + refcount); + + entry->priv->stats[entry->memtype].cur -= entry->memdesc.size; + + if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) + kgsl_driver.stats.mapped -= entry->memdesc.size; + + /* + * Ion takes care of freeing the sglist for us (how nice ) so + * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free + * doesn't try to free it again + */ + + if (entry->memtype == KGSL_MEM_ENTRY_ION) { + ion_unmap_dma(kgsl_ion_client, entry->priv_data); + entry->memdesc.sg = NULL; + } + + kgsl_sharedmem_free(&entry->memdesc); + + switch (entry->memtype) { + case KGSL_MEM_ENTRY_PMEM: + case KGSL_MEM_ENTRY_ASHMEM: + if (entry->priv_data) + fput(entry->priv_data); + break; + case KGSL_MEM_ENTRY_ION: + ion_free(kgsl_ion_client, entry->priv_data); + break; + } + + kfree(entry); +} +EXPORT_SYMBOL(kgsl_mem_entry_destroy); + +static +void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, + struct kgsl_process_private *process) +{ + struct rb_node **node; + struct rb_node *parent = NULL; + + spin_lock(&process->mem_lock); + + node = &process->mem_rb.rb_node; + + while (*node) { + struct kgsl_mem_entry *cur; + + parent = *node; + cur = rb_entry(parent, struct kgsl_mem_entry, node); + + if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr) + node = &parent->rb_left; + else + node = &parent->rb_right; + } + + rb_link_node(&entry->node, parent, node); + rb_insert_color(&entry->node, &process->mem_rb); + + spin_unlock(&process->mem_lock); + + entry->priv = process; +} + +/* Allocate a new context id */ + +static struct kgsl_context * +kgsl_create_context(struct kgsl_device_private *dev_priv) +{ + struct kgsl_context *context; + int ret, id; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + + if (context == NULL) + return NULL; + + while (1) { + if (idr_pre_get(&dev_priv->device->context_idr, + GFP_KERNEL) == 0) { + kfree(context); + return NULL; + } + + ret = idr_get_new(&dev_priv->device->context_idr, + context, &id); + + if (ret != -EAGAIN) + break; + } + + if (ret) { + kfree(context); + return NULL; + } + + context->id = id; + context->dev_priv = dev_priv; + + if (kgsl_sync_timeline_create(context)) { + idr_remove(&dev_priv->device->context_idr, id); + kfree(context); + return NULL; + } + + return context; +} + +static void +kgsl_destroy_context(struct kgsl_device_private *dev_priv, + struct kgsl_context *context) +{ + int id; + + if (context == NULL) + return; + + /* Fire a bug if the devctxt hasn't been freed */ + BUG_ON(context->devctxt); + + id = context->id; + kgsl_sync_timeline_destroy(context); + kfree(context); + + idr_remove(&dev_priv->device->context_idr, id); +} +static inline int _mark_next_event(struct kgsl_device *device, + struct list_head *head) +{ + struct kgsl_event *event; + + if (!list_empty(head)) { + event = list_first_entry(head, struct kgsl_event, list); + if (device->ftbl->next_event) + return device->ftbl->next_event(device, event); + } + return 0; +} + +static void kgsl_timestamp_expired(struct work_struct *work) +{ + struct kgsl_device *device = container_of(work, struct kgsl_device, + ts_expired_ws); + struct kgsl_event *event, *event_tmp; + uint32_t ts_processed; + + mutex_lock(&device->mutex); + + while (1) { + /* get current EOP timestamp */ + ts_processed = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + + /* Process expired events */ + list_for_each_entry_safe(event, event_tmp, &device->events, list) { + if (timestamp_cmp(ts_processed, event->timestamp) < 0) + break; + + if (event->func) + event->func(device, event->priv, ts_processed); + + list_del(&event->list); + kfree(event); + } + + /* + * Keep looping until we hit an event which has not + * passed and then we write a dummy interrupt. + * mark_next_event will return 1 for every event + * that has passed and return 0 for the event which has not + * passed yet. + */ + if (_mark_next_event(device, &device->events) == 0) + break; + } + + mutex_unlock(&device->mutex); +} + +static void kgsl_check_idle_locked(struct kgsl_device *device) +{ + if (device->pwrctrl.nap_allowed == true && + device->state == KGSL_STATE_ACTIVE && + device->requested_state == KGSL_STATE_NONE) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP); + if (kgsl_pwrctrl_sleep(device) != 0) + mod_timer(&device->idle_timer, + jiffies + + device->pwrctrl.interval_timeout); + } +} + +static void kgsl_check_idle(struct kgsl_device *device) +{ + mutex_lock(&device->mutex); + kgsl_check_idle_locked(device); + mutex_unlock(&device->mutex); +} + +struct kgsl_device *kgsl_get_device(int dev_idx) +{ + int i; + struct kgsl_device *ret = NULL; + + mutex_lock(&kgsl_driver.devlock); + + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) { + ret = kgsl_driver.devp[i]; + break; + } + } + + mutex_unlock(&kgsl_driver.devlock); + return ret; +} +EXPORT_SYMBOL(kgsl_get_device); + +static struct kgsl_device *kgsl_get_minor(int minor) +{ + struct kgsl_device *ret = NULL; + + if (minor < 0 || minor >= KGSL_DEVICE_MAX) + return NULL; + + mutex_lock(&kgsl_driver.devlock); + ret = kgsl_driver.devp[minor]; + mutex_unlock(&kgsl_driver.devlock); + + return ret; +} + +int kgsl_register_ts_notifier(struct kgsl_device *device, + struct notifier_block *nb) +{ + BUG_ON(device == NULL); + return atomic_notifier_chain_register(&device->ts_notifier_list, + nb); +} +EXPORT_SYMBOL(kgsl_register_ts_notifier); + +int kgsl_unregister_ts_notifier(struct kgsl_device *device, + struct notifier_block *nb) +{ + BUG_ON(device == NULL); + return atomic_notifier_chain_unregister(&device->ts_notifier_list, + nb); +} +EXPORT_SYMBOL(kgsl_unregister_ts_notifier); + +int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp) +{ + unsigned int ts_processed; + + ts_processed = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + + return (timestamp_cmp(ts_processed, timestamp) >= 0); +} +EXPORT_SYMBOL(kgsl_check_timestamp); + +static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state) +{ + int status = -EINVAL; + unsigned int nap_allowed_saved; + struct kgsl_pwrscale_policy *policy_saved; + + if (!device) + return -EINVAL; + + KGSL_PWR_WARN(device, "suspend start\n"); + + mutex_lock(&device->mutex); + nap_allowed_saved = device->pwrctrl.nap_allowed; + device->pwrctrl.nap_allowed = false; + policy_saved = device->pwrscale.policy; + device->pwrscale.policy = NULL; + kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND); + /* Make sure no user process is waiting for a timestamp * + * before supending */ + if (device->active_cnt != 0) { + mutex_unlock(&device->mutex); + wait_for_completion(&device->suspend_gate); + mutex_lock(&device->mutex); + } + /* Don't let the timer wake us during suspended sleep. */ + del_timer_sync(&device->idle_timer); + switch (device->state) { + case KGSL_STATE_INIT: + break; + case KGSL_STATE_ACTIVE: + /* Wait for the device to become idle */ + device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT); + case KGSL_STATE_NAP: + case KGSL_STATE_SLEEP: + /* Get the completion ready to be waited upon. */ + INIT_COMPLETION(device->hwaccess_gate); + device->ftbl->suspend_context(device); + device->ftbl->stop(device); + if (device->idle_wakelock.name) + wake_unlock(&device->idle_wakelock); + pm_qos_update_request(&device->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND); + break; + case KGSL_STATE_SLUMBER: + INIT_COMPLETION(device->hwaccess_gate); + kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND); + break; + default: + KGSL_PWR_ERR(device, "suspend fail, device %d\n", + device->id); + goto end; + } + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + device->pwrctrl.nap_allowed = nap_allowed_saved; + device->pwrscale.policy = policy_saved; + status = 0; + +end: + mutex_unlock(&device->mutex); + KGSL_PWR_WARN(device, "suspend end\n"); + return status; +} + +static int kgsl_resume_device(struct kgsl_device *device) +{ + int status = -EINVAL; + + if (!device) + return -EINVAL; + + KGSL_PWR_WARN(device, "resume start\n"); + mutex_lock(&device->mutex); + if (device->state == KGSL_STATE_SUSPEND) { + kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); + status = 0; + complete_all(&device->hwaccess_gate); + } + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + + mutex_unlock(&device->mutex); + KGSL_PWR_WARN(device, "resume end\n"); + return status; +} + +static int kgsl_suspend(struct device *dev) +{ + + pm_message_t arg = {0}; + struct kgsl_device *device = dev_get_drvdata(dev); + return kgsl_suspend_device(device, arg); +} + +static int kgsl_resume(struct device *dev) +{ + struct kgsl_device *device = dev_get_drvdata(dev); + return kgsl_resume_device(device); +} + +static int kgsl_runtime_suspend(struct device *dev) +{ + return 0; +} + +static int kgsl_runtime_resume(struct device *dev) +{ + return 0; +} + +const struct dev_pm_ops kgsl_pm_ops = { + .suspend = kgsl_suspend, + .resume = kgsl_resume, + .runtime_suspend = kgsl_runtime_suspend, + .runtime_resume = kgsl_runtime_resume, +}; +EXPORT_SYMBOL(kgsl_pm_ops); + +void kgsl_early_suspend_driver(struct early_suspend *h) +{ + struct kgsl_device *device = container_of(h, + struct kgsl_device, display_off); + KGSL_PWR_WARN(device, "early suspend start\n"); + mutex_lock(&device->mutex); + kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER); + kgsl_pwrctrl_sleep(device); + mutex_unlock(&device->mutex); + KGSL_PWR_WARN(device, "early suspend end\n"); +} +EXPORT_SYMBOL(kgsl_early_suspend_driver); + +int kgsl_suspend_driver(struct platform_device *pdev, + pm_message_t state) +{ + struct kgsl_device *device = dev_get_drvdata(&pdev->dev); + return kgsl_suspend_device(device, state); +} +EXPORT_SYMBOL(kgsl_suspend_driver); + +int kgsl_resume_driver(struct platform_device *pdev) +{ + struct kgsl_device *device = dev_get_drvdata(&pdev->dev); + return kgsl_resume_device(device); +} +EXPORT_SYMBOL(kgsl_resume_driver); + +void kgsl_late_resume_driver(struct early_suspend *h) +{ + struct kgsl_device *device = container_of(h, + struct kgsl_device, display_off); + KGSL_PWR_WARN(device, "late resume start\n"); + mutex_lock(&device->mutex); + device->pwrctrl.restore_slumber = 0; + kgsl_pwrctrl_wake(device); + kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO); + mutex_unlock(&device->mutex); + kgsl_check_idle(device); + KGSL_PWR_WARN(device, "late resume end\n"); +} +EXPORT_SYMBOL(kgsl_late_resume_driver); + +/* file operations */ +static struct kgsl_process_private * +kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv) +{ + struct kgsl_process_private *private; + + mutex_lock(&kgsl_driver.process_mutex); + list_for_each_entry(private, &kgsl_driver.process_list, list) { + if (private->pid == task_tgid_nr(current)) { + private->refcnt++; + goto out; + } + } + + /* no existing process private found for this dev_priv, create one */ + private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL); + if (private == NULL) { + KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n", + sizeof(struct kgsl_process_private)); + goto out; + } + + spin_lock_init(&private->mem_lock); + private->refcnt = 1; + private->pid = task_tgid_nr(current); + private->mem_rb = RB_ROOT; + + if (kgsl_mmu_enabled()) + { + unsigned long pt_name; + + pt_name = task_tgid_nr(current); + private->pagetable = kgsl_mmu_getpagetable(pt_name); + if (private->pagetable == NULL) { + kfree(private); + private = NULL; + goto out; + } + } + + list_add(&private->list, &kgsl_driver.process_list); + + kgsl_process_init_sysfs(private); + +out: + mutex_unlock(&kgsl_driver.process_mutex); + return private; +} + +static void +kgsl_put_process_private(struct kgsl_device *device, + struct kgsl_process_private *private) +{ + struct kgsl_mem_entry *entry = NULL; + struct rb_node *node; + + if (!private) + return; + + mutex_lock(&kgsl_driver.process_mutex); + + if (--private->refcnt) + goto unlock; + + kgsl_process_uninit_sysfs(private); + + list_del(&private->list); + + for (node = rb_first(&private->mem_rb); node; ) { + entry = rb_entry(node, struct kgsl_mem_entry, node); + node = rb_next(&entry->node); + + rb_erase(&entry->node, &private->mem_rb); + kgsl_mem_entry_put(entry); + } + kgsl_mmu_putpagetable(private->pagetable); + kfree(private); +unlock: + mutex_unlock(&kgsl_driver.process_mutex); +} + +static int kgsl_release(struct inode *inodep, struct file *filep) +{ + int result = 0; + struct kgsl_device_private *dev_priv = filep->private_data; + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_device *device = dev_priv->device; + struct kgsl_context *context; + int next = 0; + + filep->private_data = NULL; + + mutex_lock(&device->mutex); + kgsl_check_suspended(device); + + while (1) { + context = idr_get_next(&device->context_idr, &next); + if (context == NULL) + break; + + if (context->dev_priv == dev_priv) { + device->ftbl->drawctxt_destroy(device, context); + kgsl_destroy_context(dev_priv, context); + } + + next = next + 1; + } + + device->open_count--; + if (device->open_count == 0) { + result = device->ftbl->stop(device); + kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); + } + /* clean up any to-be-freed entries that belong to this + * process and this device + */ + kgsl_cancel_events(device, dev_priv); + + mutex_unlock(&device->mutex); + kfree(dev_priv); + + kgsl_put_process_private(device, private); + + pm_runtime_put(device->parentdev); + return result; +} + +static int kgsl_open(struct inode *inodep, struct file *filep) +{ + int result; + struct kgsl_device_private *dev_priv; + struct kgsl_device *device; + unsigned int minor = iminor(inodep); + + device = kgsl_get_minor(minor); + BUG_ON(device == NULL); + + if (filep->f_flags & O_EXCL) { + KGSL_DRV_ERR(device, "O_EXCL not allowed\n"); + return -EBUSY; + } + + result = pm_runtime_get_sync(device->parentdev); + if (result < 0) { + KGSL_DRV_ERR(device, + "Runtime PM: Unable to wake up the device, rc = %d\n", + result); + return result; + } + result = 0; + + dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL); + if (dev_priv == NULL) { + KGSL_DRV_ERR(device, "kzalloc failed(%d)\n", + sizeof(struct kgsl_device_private)); + result = -ENOMEM; + goto err_pmruntime; + } + + dev_priv->device = device; + filep->private_data = dev_priv; + + /* Get file (per process) private struct */ + dev_priv->process_priv = kgsl_get_process_private(dev_priv); + if (dev_priv->process_priv == NULL) { + result = -ENOMEM; + goto err_freedevpriv; + } + + mutex_lock(&device->mutex); + kgsl_check_suspended(device); + + if (device->open_count == 0) { + result = device->ftbl->start(device, true); + + if (result) { + mutex_unlock(&device->mutex); + goto err_putprocess; + } + kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); + } + device->open_count++; + mutex_unlock(&device->mutex); + + KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n", + device->name, kgsl_mmu_enabled() ? "on" : "off", + kgsl_pagetable_count); + + return result; + +err_putprocess: + kgsl_put_process_private(device, dev_priv->process_priv); +err_freedevpriv: + filep->private_data = NULL; + kfree(dev_priv); +err_pmruntime: + pm_runtime_put(device->parentdev); + return result; +} + +/*call with private->mem_lock locked */ +struct kgsl_mem_entry * +kgsl_sharedmem_find_region(struct kgsl_process_private *private, + unsigned int gpuaddr, size_t size) +{ + struct rb_node *node = private->mem_rb.rb_node; + + if (!kgsl_mmu_gpuaddr_in_range(gpuaddr)) + return NULL; + + while (node != NULL) { + struct kgsl_mem_entry *entry; + + entry = rb_entry(node, struct kgsl_mem_entry, node); + + + if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) + return entry; + + if (gpuaddr < entry->memdesc.gpuaddr) + node = node->rb_left; + else if (gpuaddr >= + (entry->memdesc.gpuaddr + entry->memdesc.size)) + node = node->rb_right; + else { + return NULL; + } + } + + return NULL; +} +EXPORT_SYMBOL(kgsl_sharedmem_find_region); + +/*call with private->mem_lock locked */ +static inline struct kgsl_mem_entry * +kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr) +{ + return kgsl_sharedmem_find_region(private, gpuaddr, 1); +} + +/*call all ioctl sub functions with driver locked*/ +static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_device_getproperty *param = data; + + switch (param->type) { + case KGSL_PROP_VERSION: + { + struct kgsl_version version; + if (param->sizebytes != sizeof(version)) { + result = -EINVAL; + break; + } + + version.drv_major = KGSL_VERSION_MAJOR; + version.drv_minor = KGSL_VERSION_MINOR; + version.dev_major = dev_priv->device->ver_major; + version.dev_minor = dev_priv->device->ver_minor; + + if (copy_to_user(param->value, &version, sizeof(version))) + result = -EFAULT; + + break; + } + case KGSL_PROP_GPU_RESET_STAT: + { + /* Return reset status of given context and clear it */ + uint32_t id; + struct kgsl_context *context; + + if (param->sizebytes != sizeof(unsigned int)) { + result = -EINVAL; + break; + } + /* We expect the value passed in to contain the context id */ + if (copy_from_user(&id, param->value, + sizeof(unsigned int))) { + result = -EFAULT; + break; + } + context = kgsl_find_context(dev_priv, id); + if (!context) { + result = -EINVAL; + break; + } + /* + * Copy the reset status to value which also serves as + * the out parameter + */ + if (copy_to_user(param->value, &(context->reset_status), + sizeof(unsigned int))) { + result = -EFAULT; + break; + } + /* Clear reset status once its been queried */ + context->reset_status = KGSL_CTX_STAT_NO_ERROR; + break; + } + default: + result = dev_priv->device->ftbl->getproperty( + dev_priv->device, param->type, + param->value, param->sizebytes); + } + + + return result; +} + +static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + int result = 0; + struct kgsl_device_waittimestamp *param = data; + + /* Set the active count so that suspend doesn't do the + wrong thing */ + + dev_priv->device->active_cnt++; + + trace_kgsl_waittimestamp_entry(dev_priv->device, param); + + result = dev_priv->device->ftbl->waittimestamp(dev_priv->device, + param->timestamp, + param->timeout); + + trace_kgsl_waittimestamp_exit(dev_priv->device, result); + + /* Fire off any pending suspend operations that are in flight */ + + INIT_COMPLETION(dev_priv->device->suspend_gate); + dev_priv->device->active_cnt--; + complete(&dev_priv->device->suspend_gate); + + return result; +} + +static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_ringbuffer_issueibcmds *param = data; + struct kgsl_ibdesc *ibdesc; + struct kgsl_context *context; + +#ifdef CONFIG_MSM_KGSL_DRM + kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV); +#endif + + context = kgsl_find_context(dev_priv, param->drawctxt_id); + if (context == NULL) { + result = -EINVAL; + KGSL_DRV_ERR(dev_priv->device, + "invalid drawctxt drawctxt_id %d\n", + param->drawctxt_id); + goto done; + } + + if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) { + KGSL_DRV_INFO(dev_priv->device, + "Using IB list mode for ib submission, numibs: %d\n", + param->numibs); + if (!param->numibs) { + KGSL_DRV_ERR(dev_priv->device, + "Invalid numibs as parameter: %d\n", + param->numibs); + result = -EINVAL; + goto done; + } + + ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs, + GFP_KERNEL); + if (!ibdesc) { + KGSL_MEM_ERR(dev_priv->device, + "kzalloc(%d) failed\n", + sizeof(struct kgsl_ibdesc) * param->numibs); + result = -ENOMEM; + goto done; + } + + if (copy_from_user(ibdesc, (void *)param->ibdesc_addr, + sizeof(struct kgsl_ibdesc) * param->numibs)) { + result = -EFAULT; + KGSL_DRV_ERR(dev_priv->device, + "copy_from_user failed\n"); + goto free_ibdesc; + } + } else { + KGSL_DRV_INFO(dev_priv->device, + "Using single IB submission mode for ib submission\n"); + /* If user space driver is still using the old mode of + * submitting single ib then we need to support that as well */ + ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL); + if (!ibdesc) { + KGSL_MEM_ERR(dev_priv->device, + "kzalloc(%d) failed\n", + sizeof(struct kgsl_ibdesc)); + result = -ENOMEM; + goto done; + } + ibdesc[0].gpuaddr = param->ibdesc_addr; + ibdesc[0].sizedwords = param->numibs; + param->numibs = 1; + } + + result = dev_priv->device->ftbl->issueibcmds(dev_priv, + context, + ibdesc, + param->numibs, + ¶m->timestamp, + param->flags); + + trace_kgsl_issueibcmds(dev_priv->device, param, result); + +free_ibdesc: + kfree(ibdesc); +done: + +#ifdef CONFIG_MSM_KGSL_DRM + kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV); +#endif + + return result; +} + +static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_cmdstream_readtimestamp *param = data; + + param->timestamp = + dev_priv->device->ftbl->readtimestamp(dev_priv->device, + param->type); + + trace_kgsl_readtimestamp(dev_priv->device, param); + + return 0; +} + +static void kgsl_freemem_event_cb(struct kgsl_device *device, + void *priv, u32 timestamp) +{ + struct kgsl_mem_entry *entry = priv; + spin_lock(&entry->priv->mem_lock); + rb_erase(&entry->node, &entry->priv->mem_rb); + spin_unlock(&entry->priv->mem_lock); + kgsl_mem_entry_put(entry); +} + +static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + int result = 0; + struct kgsl_cmdstream_freememontimestamp *param = data; + struct kgsl_mem_entry *entry = NULL; + + spin_lock(&dev_priv->process_priv->mem_lock); + entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr); + spin_unlock(&dev_priv->process_priv->mem_lock); + + if (entry) { + result = kgsl_add_event(dev_priv->device, param->timestamp, + kgsl_freemem_event_cb, entry, dev_priv); + } else { + KGSL_DRV_ERR(dev_priv->device, + "invalid gpuaddr %08x\n", param->gpuaddr); + result = -EINVAL; + } + + return result; +} + +static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_drawctxt_create *param = data; + struct kgsl_context *context = NULL; + + context = kgsl_create_context(dev_priv); + + if (context == NULL) { + result = -ENOMEM; + goto done; + } + + if (dev_priv->device->ftbl->drawctxt_create) + result = dev_priv->device->ftbl->drawctxt_create( + dev_priv->device, dev_priv->process_priv->pagetable, + context, param->flags); + + param->drawctxt_id = context->id; + +done: + if (result && context) + kgsl_destroy_context(dev_priv, context); + + return result; +} + +static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_drawctxt_destroy *param = data; + struct kgsl_context *context; + + context = kgsl_find_context(dev_priv, param->drawctxt_id); + + if (context == NULL) { + result = -EINVAL; + goto done; + } + + if (dev_priv->device->ftbl->drawctxt_destroy) + dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device, + context); + + kgsl_destroy_context(dev_priv, context); + +done: + return result; +} + +static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_sharedmem_free *param = data; + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_mem_entry *entry = NULL; + + spin_lock(&private->mem_lock); + entry = kgsl_sharedmem_find(private, param->gpuaddr); + if (entry) + rb_erase(&entry->node, &private->mem_rb); + + spin_unlock(&private->mem_lock); + + if (entry) { + kgsl_mem_entry_put(entry); + } else { + KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr); + result = -EINVAL; + } + + return result; +} + +static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr) +{ + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + up_read(¤t->mm->mmap_sem); + if (!vma) + KGSL_CORE_ERR("find_vma(%x) failed\n", addr); + + return vma; +} + +static long +kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0, len = 0; + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_sharedmem_from_vmalloc *param = data; + struct kgsl_mem_entry *entry = NULL; + struct vm_area_struct *vma; + + if (!kgsl_mmu_enabled()) + return -ENODEV; + + if (!param->hostptr) { + KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr); + result = -EINVAL; + goto error; + } + + vma = kgsl_get_vma_from_start_addr(param->hostptr); + if (!vma) { + result = -EINVAL; + goto error; + } + + /* + * If the user specified a length, use it, otherwise try to + * infer the length if the vma region + */ + if (param->gpuaddr != 0) { + len = param->gpuaddr; + } else { + /* + * For this to work, we have to assume the VMA region is only + * for this single allocation. If it isn't, then bail out + */ + if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) { + KGSL_CORE_ERR("VMA region does not match hostaddr\n"); + result = -EINVAL; + goto error; + } + + len = vma->vm_end - vma->vm_start; + } + + /* Make sure it fits */ + if (len == 0 || param->hostptr + len > vma->vm_end) { + KGSL_CORE_ERR("Invalid memory allocation length %d\n", len); + result = -EINVAL; + goto error; + } + + entry = kgsl_mem_entry_create(); + if (entry == NULL) { + result = -ENOMEM; + goto error; + } + + result = kgsl_sharedmem_vmalloc_user(&entry->memdesc, + private->pagetable, len, + param->flags); + if (result != 0) + goto error_free_entry; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + result = kgsl_sharedmem_map_vma(vma, &entry->memdesc); + if (result) { + KGSL_CORE_ERR("kgsl_sharedmem_map_vma failed: %d\n", result); + goto error_free_vmalloc; + } + + param->gpuaddr = entry->memdesc.gpuaddr; + + entry->memtype = KGSL_MEM_ENTRY_KERNEL; + + kgsl_mem_entry_attach_process(entry, private); + + /* Process specific statistics */ + kgsl_process_add_stats(private, entry->memtype, len); + + kgsl_check_idle(dev_priv->device); + return 0; + +error_free_vmalloc: + kgsl_sharedmem_free(&entry->memdesc); + +error_free_entry: + kfree(entry); + +error: + kgsl_check_idle(dev_priv->device); + return result; +} + +static inline int _check_region(unsigned long start, unsigned long size, + uint64_t len) +{ + uint64_t end = ((uint64_t) start) + size; + return (end > len); +} + +static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len, + unsigned long *vstart, struct file **filep) +{ + struct file *fbfile; + int ret = 0; + dev_t rdev; + struct fb_info *info; + + *filep = NULL; +#ifdef CONFIG_ANDROID_PMEM + if (!get_pmem_file(fd, start, vstart, len, filep)) + return 0; +#endif + + fbfile = fget(fd); + if (fbfile == NULL) { + KGSL_CORE_ERR("fget_light failed\n"); + return -1; + } + + rdev = fbfile->f_dentry->d_inode->i_rdev; + info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL; + if (info) { + *start = info->fix.smem_start; + *len = info->fix.smem_len; + *vstart = (unsigned long)__va(info->fix.smem_start); + ret = 0; + } else { + KGSL_CORE_ERR("framebuffer minor %d not found\n", + MINOR(rdev)); + ret = -1; + } + + fput(fbfile); + + return ret; +} + +static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry, + struct kgsl_pagetable *pagetable, + unsigned int fd, unsigned int offset, + size_t size) +{ + int ret; + unsigned long phys, virt, len; + struct file *filep; + + ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep); + if (ret) + return ret; + + if (phys == 0) { + ret = -EINVAL; + goto err; + } + + if (offset >= len) { + ret = -EINVAL; + goto err; + } + + if (size == 0) + size = len; + + /* Adjust the size of the region to account for the offset */ + size += offset & ~PAGE_MASK; + + size = ALIGN(size, PAGE_SIZE); + + if (_check_region(offset & PAGE_MASK, size, len)) { + KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger" + "than pmem region length %ld\n", + offset & PAGE_MASK, size, len); + ret = -EINVAL; + goto err; + + } + + entry->priv_data = filep; + + entry->memdesc.pagetable = pagetable; + entry->memdesc.size = size; + entry->memdesc.physaddr = phys + (offset & PAGE_MASK); + entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK)); + + ret = memdesc_sg_phys(&entry->memdesc, + phys + (offset & PAGE_MASK), size); + if (ret) + goto err; + + return 0; +err: +#ifdef CONFIG_ANDROID_PMEM + put_pmem_file(filep); +#endif + return ret; +} + +static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, + void *addr, int size) +{ + int i; + int sglen = PAGE_ALIGN(size) / PAGE_SIZE; + unsigned long paddr = (unsigned long) addr; + + memdesc->sg = kgsl_sg_alloc(sglen); + + if (memdesc->sg == NULL) + return -ENOMEM; + + memdesc->sglen = sglen; + sg_init_table(memdesc->sg, sglen); + + spin_lock(¤t->mm->page_table_lock); + + for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) { + struct page *page; + pmd_t *ppmd; + pte_t *ppte; + pgd_t *ppgd = pgd_offset(current->mm, paddr); + + if (pgd_none(*ppgd) || pgd_bad(*ppgd)) + goto err; + + ppmd = pmd_offset(ppgd, paddr); + if (pmd_none(*ppmd) || pmd_bad(*ppmd)) + goto err; + + ppte = pte_offset_map(ppmd, paddr); + if (ppte == NULL) + goto err; + + page = pfn_to_page(pte_pfn(*ppte)); + if (!page) + goto err; + + sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0); + pte_unmap(ppte); + } + + spin_unlock(¤t->mm->page_table_lock); + + return 0; + +err: + spin_unlock(¤t->mm->page_table_lock); + kgsl_sg_free(memdesc->sg, sglen); + memdesc->sg = NULL; + + return -EINVAL; +} + +static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry, + struct kgsl_pagetable *pagetable, + void *hostptr, unsigned int offset, + size_t size) +{ + struct vm_area_struct *vma; + unsigned int len; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, (unsigned int) hostptr); + up_read(¤t->mm->mmap_sem); + + if (!vma) { + KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr); + return -EINVAL; + } + + /* We don't necessarily start at vma->vm_start */ + len = vma->vm_end - (unsigned long) hostptr; + + if (offset >= len) + return -EINVAL; + + if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) || + !KGSL_IS_PAGE_ALIGNED(len)) { + KGSL_CORE_ERR("user address len(%u)" + "and start(%p) must be page" + "aligned\n", len, hostptr); + return -EINVAL; + } + + if (size == 0) + size = len; + + /* Adjust the size of the region to account for the offset */ + size += offset & ~PAGE_MASK; + + size = ALIGN(size, PAGE_SIZE); + + if (_check_region(offset & PAGE_MASK, size, len)) { + KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger" + "than region length %d\n", + offset & PAGE_MASK, size, len); + return -EINVAL; + } + + entry->memdesc.pagetable = pagetable; + entry->memdesc.size = size; + entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK); + + return memdesc_sg_virt(&entry->memdesc, + hostptr + (offset & PAGE_MASK), size); +} + +#ifdef CONFIG_ASHMEM +static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry, + struct kgsl_pagetable *pagetable, + int fd, void *hostptr, size_t size) +{ + int ret; + struct vm_area_struct *vma; + struct file *filep, *vmfile; + unsigned long len; + unsigned int hostaddr = (unsigned int) hostptr; + + vma = kgsl_get_vma_from_start_addr(hostaddr); + if (vma == NULL) + return -EINVAL; + + if (vma->vm_pgoff || vma->vm_start != hostaddr) { + KGSL_CORE_ERR("Invalid vma region\n"); + return -EINVAL; + } + + len = vma->vm_end - vma->vm_start; + + if (size == 0) + size = len; + + if (size != len) { + KGSL_CORE_ERR("Invalid size %d for vma region %p\n", + size, hostptr); + return -EINVAL; + } + + ret = get_ashmem_file(fd, &filep, &vmfile, &len); + + if (ret) { + KGSL_CORE_ERR("get_ashmem_file failed\n"); + return ret; + } + + if (vmfile != vma->vm_file) { + KGSL_CORE_ERR("ashmem shmem file does not match vma\n"); + ret = -EINVAL; + goto err; + } + + entry->priv_data = filep; + entry->memdesc.pagetable = pagetable; + entry->memdesc.size = ALIGN(size, PAGE_SIZE); + entry->memdesc.hostptr = hostptr; + + ret = memdesc_sg_virt(&entry->memdesc, hostptr, size); + if (ret) + goto err; + + return 0; + +err: + put_ashmem_file(filep); + return ret; +} +#else +static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry, + struct kgsl_pagetable *pagetable, + int fd, void *hostptr, size_t size) +{ + return -EINVAL; +} +#endif + +static int kgsl_setup_ion(struct kgsl_mem_entry *entry, + struct kgsl_pagetable *pagetable, int fd) +{ + struct ion_handle *handle; + struct scatterlist *s; + unsigned long flags; + + if (kgsl_ion_client == NULL) { + kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME); + if (kgsl_ion_client == NULL) + return -ENODEV; + } + + handle = ion_import_fd(kgsl_ion_client, fd); + if (IS_ERR_OR_NULL(handle)) + return PTR_ERR(handle); + + entry->memtype = KGSL_MEM_ENTRY_ION; + entry->priv_data = handle; + entry->memdesc.pagetable = pagetable; + entry->memdesc.size = 0; + + if (ion_handle_get_flags(kgsl_ion_client, handle, &flags)) + goto err; + + entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags); + + if (IS_ERR_OR_NULL(entry->memdesc.sg)) + goto err; + + /* Calculate the size of the memdesc from the sglist */ + + entry->memdesc.sglen = 0; + + for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) { + entry->memdesc.size += s->length; + entry->memdesc.sglen++; + } + + return 0; +err: + ion_free(kgsl_ion_client, handle); + return -ENOMEM; +} + +static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = -EINVAL; + struct kgsl_map_user_mem *param = data; + struct kgsl_mem_entry *entry = NULL; + struct kgsl_process_private *private = dev_priv->process_priv; + enum kgsl_user_mem_type memtype; + + entry = kgsl_mem_entry_create(); + + if (entry == NULL) + return -ENOMEM; + + if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem)) + memtype = KGSL_USER_MEM_TYPE_PMEM; + else + memtype = param->memtype; + + switch (memtype) { + case KGSL_USER_MEM_TYPE_PMEM: + if (param->fd == 0 || param->len == 0) + break; + + result = kgsl_setup_phys_file(entry, private->pagetable, + param->fd, param->offset, + param->len); + entry->memtype = KGSL_MEM_ENTRY_PMEM; + break; + + case KGSL_USER_MEM_TYPE_ADDR: + if (!kgsl_mmu_enabled()) { + KGSL_DRV_ERR(dev_priv->device, + "Cannot map paged memory with the " + "MMU disabled\n"); + break; + } + + if (param->hostptr == 0) + break; + + result = kgsl_setup_hostptr(entry, private->pagetable, + (void *) param->hostptr, + param->offset, param->len); + entry->memtype = KGSL_MEM_ENTRY_USER; + break; + + case KGSL_USER_MEM_TYPE_ASHMEM: + if (!kgsl_mmu_enabled()) { + KGSL_DRV_ERR(dev_priv->device, + "Cannot map paged memory with the " + "MMU disabled\n"); + break; + } + + if (param->hostptr == 0) + break; + + result = kgsl_setup_ashmem(entry, private->pagetable, + param->fd, (void *) param->hostptr, + param->len); + + entry->memtype = KGSL_MEM_ENTRY_ASHMEM; + break; + case KGSL_USER_MEM_TYPE_ION: + result = kgsl_setup_ion(entry, private->pagetable, + param->fd); + break; + default: + KGSL_CORE_ERR("Invalid memory type: %x\n", memtype); + break; + } + + if (result) + goto error; + + result = kgsl_mmu_map(private->pagetable, + &entry->memdesc, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + + if (result) + goto error_put_file_ptr; + + /* Adjust the returned value for a non 4k aligned offset */ + param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK); + + KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped, + kgsl_driver.stats.mapped_max); + + kgsl_process_add_stats(private, entry->memtype, param->len); + + kgsl_mem_entry_attach_process(entry, private); + + kgsl_check_idle(dev_priv->device); + return result; + +error_put_file_ptr: + switch (entry->memtype) { + case KGSL_MEM_ENTRY_PMEM: + case KGSL_MEM_ENTRY_ASHMEM: + if (entry->priv_data) + fput(entry->priv_data); + break; + case KGSL_MEM_ENTRY_ION: + ion_unmap_dma(kgsl_ion_client, entry->priv_data); + ion_free(kgsl_ion_client, entry->priv_data); + break; + default: + break; + } +error: + kfree(entry); + kgsl_check_idle(dev_priv->device); + return result; +} + +/*This function flushes a graphics memory allocation from CPU cache + *when caching is enabled with MMU*/ +static long +kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_mem_entry *entry; + struct kgsl_sharedmem_free *param = data; + struct kgsl_process_private *private = dev_priv->process_priv; + + spin_lock(&private->mem_lock); + entry = kgsl_sharedmem_find(private, param->gpuaddr); + if (!entry) { + KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr); + result = -EINVAL; + goto done; + } + if (!entry->memdesc.hostptr) { + KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n", + param->gpuaddr); + goto done; + } + + kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN); +done: + spin_unlock(&private->mem_lock); + return result; +} + +static long +kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_gpumem_alloc *param = data; + struct kgsl_mem_entry *entry; + int result; + + entry = kgsl_mem_entry_create(); + if (entry == NULL) + return -ENOMEM; + + result = kgsl_allocate_user(&entry->memdesc, private->pagetable, + param->size, param->flags); + + if (result == 0) { + entry->memtype = KGSL_MEM_ENTRY_KERNEL; + kgsl_mem_entry_attach_process(entry, private); + param->gpuaddr = entry->memdesc.gpuaddr; + + kgsl_process_add_stats(private, entry->memtype, param->size); + } else + kfree(entry); + + kgsl_check_idle(dev_priv->device); + return result; +} +static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_cff_syncmem *param = data; + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_mem_entry *entry = NULL; + + spin_lock(&private->mem_lock); + entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len); + if (entry) + kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr, + param->len, true); + else + result = -EINVAL; + spin_unlock(&private->mem_lock); + return result; +} + +static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + int result = 0; + struct kgsl_cff_user_event *param = data; + + kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2, + param->op3, param->op4, param->op5); + + return result; +} + +#ifdef CONFIG_GENLOCK +struct kgsl_genlock_event_priv { + struct genlock_handle *handle; + struct genlock *lock; +}; + +/** + * kgsl_genlock_event_cb - Event callback for a genlock timestamp event + * @device - The KGSL device that expired the timestamp + * @priv - private data for the event + * @timestamp - the timestamp that triggered the event + * + * Release a genlock lock following the expiration of a timestamp + */ + +static void kgsl_genlock_event_cb(struct kgsl_device *device, + void *priv, u32 timestamp) +{ + struct kgsl_genlock_event_priv *ev = priv; + int ret; + + ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0); + if (ret) + KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret); + + genlock_put_handle(ev->handle); + + kfree(ev); +} + +/** + * kgsl_add_genlock-event - Create a new genlock event + * @device - KGSL device to create the event on + * @timestamp - Timestamp to trigger the event + * @data - User space buffer containing struct kgsl_genlock_event_priv + * @len - length of the userspace buffer + * @owner - driver instance that owns this event + * @returns 0 on success or error code on error + * + * Attack to a genlock handle and register an event to release the + * genlock lock when the timestamp expires + */ + +static int kgsl_add_genlock_event(struct kgsl_device *device, + u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + struct kgsl_genlock_event_priv *event; + struct kgsl_timestamp_event_genlock priv; + int ret; + + if (len != sizeof(priv)) + return -EINVAL; + + if (copy_from_user(&priv, data, sizeof(priv))) + return -EFAULT; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + + if (event == NULL) + return -ENOMEM; + + event->handle = genlock_get_handle_fd(priv.handle); + + if (IS_ERR(event->handle)) { + int ret = PTR_ERR(event->handle); + kfree(event); + return ret; + } + + ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event, + owner); + if (ret) + kfree(event); + + return ret; +} +#else +static long kgsl_add_genlock_event(struct kgsl_device *device, + u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + return -EINVAL; +} +#endif + +/** + * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace + * @dev_priv - pointer to the private device structure + * @cmd - the ioctl cmd passed from kgsl_ioctl + * @data - the user data buffer from kgsl_ioctl + * @returns 0 on success or error code on failure + */ + +static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) +{ + struct kgsl_timestamp_event *param = data; + int ret; + + switch (param->type) { + case KGSL_TIMESTAMP_EVENT_GENLOCK: + ret = kgsl_add_genlock_event(dev_priv->device, + param->timestamp, param->priv, param->len, + dev_priv); + break; + case KGSL_TIMESTAMP_EVENT_FENCE: + ret = kgsl_add_fence_event(dev_priv->device, + param->context_id, param->timestamp, param->priv, + param->len, dev_priv); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *, + unsigned int, void *); + +#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \ + [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock } + +static const struct { + unsigned int cmd; + kgsl_ioctl_func_t func; + int lock; +} kgsl_ioctl_funcs[] = { + KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY, + kgsl_ioctl_device_getproperty, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP, + kgsl_ioctl_device_waittimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, + kgsl_ioctl_rb_issueibcmds, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP, + kgsl_ioctl_cmdstream_readtimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP, + kgsl_ioctl_cmdstream_freememontimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE, + kgsl_ioctl_drawctxt_create, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY, + kgsl_ioctl_drawctxt_destroy, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM, + kgsl_ioctl_map_user_mem, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM, + kgsl_ioctl_map_user_mem, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE, + kgsl_ioctl_sharedmem_free, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC, + kgsl_ioctl_sharedmem_from_vmalloc, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE, + kgsl_ioctl_sharedmem_flush_cache, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC, + kgsl_ioctl_gpumem_alloc, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM, + kgsl_ioctl_cff_syncmem, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT, + kgsl_ioctl_cff_user_event, 0), + KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT, + kgsl_ioctl_timestamp_event, 1), +}; + +static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct kgsl_device_private *dev_priv = filep->private_data; + unsigned int nr; + kgsl_ioctl_func_t func; + int lock, ret; + char ustack[64]; + void *uptr = NULL; + + BUG_ON(dev_priv == NULL); + + /* Workaround for an previously incorrectly defined ioctl code. + This helps ensure binary compatability */ + + if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD) + cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP; + else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD) + cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP; + else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD) + cmd = IOCTL_KGSL_TIMESTAMP_EVENT; + + nr = _IOC_NR(cmd); + + if (cmd & (IOC_IN | IOC_OUT)) { + if (_IOC_SIZE(cmd) < sizeof(ustack)) + uptr = ustack; + else { + uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (uptr == NULL) { + KGSL_MEM_ERR(dev_priv->device, + "kzalloc(%d) failed\n", _IOC_SIZE(cmd)); + ret = -ENOMEM; + goto done; + } + } + + if (cmd & IOC_IN) { + if (copy_from_user(uptr, (void __user *) arg, + _IOC_SIZE(cmd))) { + ret = -EFAULT; + goto done; + } + } else + memset(uptr, 0, _IOC_SIZE(cmd)); + } + + if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) && + kgsl_ioctl_funcs[nr].func != NULL) { + + /* + * Make sure that nobody tried to send us a malformed ioctl code + * with a valid NR but bogus flags + */ + + if (kgsl_ioctl_funcs[nr].cmd != cmd) { + KGSL_DRV_ERR(dev_priv->device, + "Malformed ioctl code %08x\n", cmd); + ret = -ENOIOCTLCMD; + goto done; + } + + func = kgsl_ioctl_funcs[nr].func; + lock = kgsl_ioctl_funcs[nr].lock; + } else { + func = dev_priv->device->ftbl->ioctl; + if (!func) { + KGSL_DRV_INFO(dev_priv->device, + "invalid ioctl code %08x\n", cmd); + ret = -EINVAL; + goto done; + } + lock = 1; + } + + if (lock) { + mutex_lock(&dev_priv->device->mutex); + kgsl_check_suspended(dev_priv->device); + } + + ret = func(dev_priv, cmd, uptr); + + if (lock) { + kgsl_check_idle_locked(dev_priv->device); + mutex_unlock(&dev_priv->device->mutex); + } + + if (ret == 0 && (cmd & IOC_OUT)) { + if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd))) + ret = -EFAULT; + } + +done: + if (_IOC_SIZE(cmd) >= sizeof(ustack)) + kfree(uptr); + + return ret; +} + +static int +kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma) +{ + struct kgsl_memdesc *memdesc = &device->memstore; + int result; + unsigned int vma_size = vma->vm_end - vma->vm_start; + + /* The memstore can only be mapped as read only */ + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + if (memdesc->size != vma_size) { + KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n", + vma_size, memdesc->size); + return -EINVAL; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma_size, vma->vm_page_prot); + if (result != 0) + KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n", + result); + + return result; +} + +/* + * kgsl_gpumem_vm_open is called whenever a vma region is copied or split. + * Increase the refcount to make sure that the accounting stays correct + */ + +static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) +{ + struct kgsl_mem_entry *entry = vma->vm_private_data; + kgsl_mem_entry_get(entry); +} + +static int +kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct kgsl_mem_entry *entry = vma->vm_private_data; + + if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) + return VM_FAULT_SIGBUS; + + return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf); +} + +static void +kgsl_gpumem_vm_close(struct vm_area_struct *vma) +{ + struct kgsl_mem_entry *entry = vma->vm_private_data; + kgsl_mem_entry_put(entry); +} + +static struct vm_operations_struct kgsl_gpumem_vm_ops = { + .open = kgsl_gpumem_vm_open, + .fault = kgsl_gpumem_vm_fault, + .close = kgsl_gpumem_vm_close, +}; + +static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT; + struct kgsl_device_private *dev_priv = file->private_data; + struct kgsl_process_private *private = dev_priv->process_priv; + struct kgsl_mem_entry *entry = NULL; + struct kgsl_device *device = dev_priv->device; + + /* Handle leagacy behavior for memstore */ + + if (vma_offset == device->memstore.physaddr) + return kgsl_mmap_memstore(device, vma); + + /* Find a chunk of GPU memory */ + + spin_lock(&private->mem_lock); + entry = kgsl_sharedmem_find(private, vma_offset); + + if (entry) + kgsl_mem_entry_get(entry); + + spin_unlock(&private->mem_lock); + + if (entry == NULL) + return -EINVAL; + + if (!entry->memdesc.ops || + !entry->memdesc.ops->vmflags || + !entry->memdesc.ops->vmfault) + return -EINVAL; + + vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc); + + vma->vm_private_data = entry; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_ops = &kgsl_gpumem_vm_ops; + vma->vm_file = file; + + return 0; +} + +static const struct file_operations kgsl_fops = { + .owner = THIS_MODULE, + .release = kgsl_release, + .open = kgsl_open, + .mmap = kgsl_mmap, + .unlocked_ioctl = kgsl_ioctl, +}; + +struct kgsl_driver kgsl_driver = { + .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex), + .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock), + .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock), +}; +EXPORT_SYMBOL(kgsl_driver); + +void kgsl_unregister_device(struct kgsl_device *device) +{ + int minor; + + mutex_lock(&kgsl_driver.devlock); + for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) { + if (device == kgsl_driver.devp[minor]) + break; + } + + mutex_unlock(&kgsl_driver.devlock); + + if (minor == KGSL_DEVICE_MAX) + return; + + kgsl_device_snapshot_close(device); + + kgsl_cffdump_close(device->id); + kgsl_pwrctrl_uninit_sysfs(device); + + wake_lock_destroy(&device->idle_wakelock); + pm_qos_remove_request(&device->pm_qos_req_dma); + + idr_destroy(&device->context_idr); + + if (device->memstore.hostptr) + kgsl_sharedmem_free(&device->memstore); + + kgsl_mmu_close(device); + + if (device->work_queue) { + destroy_workqueue(device->work_queue); + device->work_queue = NULL; + } + + device_destroy(kgsl_driver.class, + MKDEV(MAJOR(kgsl_driver.major), minor)); + + mutex_lock(&kgsl_driver.devlock); + kgsl_driver.devp[minor] = NULL; + mutex_unlock(&kgsl_driver.devlock); +} +EXPORT_SYMBOL(kgsl_unregister_device); + +int +kgsl_register_device(struct kgsl_device *device) +{ + int minor, ret; + dev_t dev; + + /* Find a minor for the device */ + + mutex_lock(&kgsl_driver.devlock); + for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) { + if (kgsl_driver.devp[minor] == NULL) { + kgsl_driver.devp[minor] = device; + break; + } + } + + mutex_unlock(&kgsl_driver.devlock); + + if (minor == KGSL_DEVICE_MAX) { + KGSL_CORE_ERR("minor devices exhausted\n"); + return -ENODEV; + } + + /* Create the device */ + dev = MKDEV(MAJOR(kgsl_driver.major), minor); + device->dev = device_create(kgsl_driver.class, + device->parentdev, + dev, device, + device->name); + + if (IS_ERR(device->dev)) { + ret = PTR_ERR(device->dev); + KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret); + goto err_devlist; + } + + dev_set_drvdata(device->parentdev, device); + + /* Generic device initialization */ + init_waitqueue_head(&device->wait_queue); + + kgsl_cffdump_open(device->id); + + init_completion(&device->hwaccess_gate); + init_completion(&device->suspend_gate); + + ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list); + + setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device); + ret = kgsl_create_device_workqueue(device); + if (ret) + goto err_devlist; + + INIT_WORK(&device->idle_check_ws, kgsl_idle_check); + INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired); + + INIT_LIST_HEAD(&device->events); + + ret = kgsl_mmu_init(device); + if (ret != 0) + goto err_dest_work_q; + + ret = kgsl_allocate_contiguous(&device->memstore, + sizeof(struct kgsl_devmemstore)); + + if (ret != 0) + goto err_close_mmu; + + wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name); + pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + idr_init(&device->context_idr); + + /* Initalize the snapshot engine */ + kgsl_device_snapshot_init(device); + + /* sysfs and debugfs initalization - failure here is non fatal */ + + /* Initialize logging */ + kgsl_device_debugfs_init(device); + + /* Initialize common sysfs entries */ + kgsl_pwrctrl_init_sysfs(device); + + return 0; + +err_close_mmu: + kgsl_mmu_close(device); +err_dest_work_q: + destroy_workqueue(device->work_queue); + device->work_queue = NULL; +err_devlist: + mutex_lock(&kgsl_driver.devlock); + kgsl_driver.devp[minor] = NULL; + mutex_unlock(&kgsl_driver.devlock); + + return ret; +} +EXPORT_SYMBOL(kgsl_register_device); + +int kgsl_device_platform_probe(struct kgsl_device *device, + irqreturn_t (*dev_isr) (int, void*)) +{ + int status = -EINVAL; + struct kgsl_memregion *regspace = NULL; + struct resource *res; + struct platform_device *pdev = + container_of(device->parentdev, struct platform_device, dev); + + pm_runtime_enable(device->parentdev); + + status = kgsl_pwrctrl_init(device); + if (status) + goto error; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + device->iomemname); + if (res == NULL) { + KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n"); + status = -EINVAL; + goto error_pwrctrl_close; + } + if (res->start == 0 || resource_size(res) == 0) { + KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id); + status = -EINVAL; + goto error_pwrctrl_close; + } + + regspace = &device->regspace; + regspace->mmio_phys_base = res->start; + regspace->sizebytes = resource_size(res); + + if (!request_mem_region(regspace->mmio_phys_base, + regspace->sizebytes, device->name)) { + KGSL_DRV_ERR(device, "request_mem_region failed\n"); + status = -ENODEV; + goto error_pwrctrl_close; + } + + regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base, + regspace->sizebytes); + + if (regspace->mmio_virt_base == NULL) { + KGSL_DRV_ERR(device, "ioremap failed\n"); + status = -ENODEV; + goto error_release_mem; + } + + status = request_irq(device->pwrctrl.interrupt_num, dev_isr, + IRQF_TRIGGER_HIGH, device->name, device); + if (status) { + KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n", + device->pwrctrl.interrupt_num, status); + goto error_iounmap; + } + device->pwrctrl.have_irq = 1; + disable_irq(device->pwrctrl.interrupt_num); + + KGSL_DRV_INFO(device, + "dev_id %d regs phys 0x%08x size 0x%08x virt %p\n", + device->id, regspace->mmio_phys_base, + regspace->sizebytes, regspace->mmio_virt_base); + + + status = kgsl_register_device(device); + if (!status) + return status; + + free_irq(device->pwrctrl.interrupt_num, NULL); + device->pwrctrl.have_irq = 0; +error_iounmap: + iounmap(regspace->mmio_virt_base); + regspace->mmio_virt_base = NULL; +error_release_mem: + release_mem_region(regspace->mmio_phys_base, regspace->sizebytes); +error_pwrctrl_close: + kgsl_pwrctrl_close(device); +error: + return status; +} +EXPORT_SYMBOL(kgsl_device_platform_probe); + +void kgsl_device_platform_remove(struct kgsl_device *device) +{ + struct kgsl_memregion *regspace = &device->regspace; + + kgsl_unregister_device(device); + + if (regspace->mmio_virt_base != NULL) { + iounmap(regspace->mmio_virt_base); + regspace->mmio_virt_base = NULL; + release_mem_region(regspace->mmio_phys_base, + regspace->sizebytes); + } + kgsl_pwrctrl_close(device); + + pm_runtime_disable(device->parentdev); +} +EXPORT_SYMBOL(kgsl_device_platform_remove); + +static int __devinit +kgsl_ptdata_init(void) +{ + kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count); + + if (!kgsl_driver.ptpool) + return -ENOMEM; + return 0; +} + +static void kgsl_core_exit(void) +{ + unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX); + + kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool); + kgsl_driver.ptpool = NULL; + + device_unregister(&kgsl_driver.virtdev); + + if (kgsl_driver.class) { + class_destroy(kgsl_driver.class); + kgsl_driver.class = NULL; + } + + kgsl_drm_exit(); + kgsl_cffdump_destroy(); + kgsl_core_debugfs_close(); + kgsl_sharedmem_uninit_sysfs(); +} + +static int __init kgsl_core_init(void) +{ + int result = 0; + /* alloc major and minor device numbers */ + result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, + KGSL_NAME); + if (result < 0) { + KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result); + goto err; + } + + cdev_init(&kgsl_driver.cdev, &kgsl_fops); + kgsl_driver.cdev.owner = THIS_MODULE; + kgsl_driver.cdev.ops = &kgsl_fops; + result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0), + KGSL_DEVICE_MAX); + + if (result) { + KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d," + " result= %d\n", kgsl_driver.major, result); + goto err; + } + + kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME); + + if (IS_ERR(kgsl_driver.class)) { + result = PTR_ERR(kgsl_driver.class); + KGSL_CORE_ERR("failed to create class %s", KGSL_NAME); + goto err; + } + + /* Make a virtual device for managing core related things + in sysfs */ + kgsl_driver.virtdev.class = kgsl_driver.class; + dev_set_name(&kgsl_driver.virtdev, "kgsl"); + result = device_register(&kgsl_driver.virtdev); + if (result) { + KGSL_CORE_ERR("driver_register failed\n"); + goto err; + } + + /* Make kobjects in the virtual device for storing statistics */ + + kgsl_driver.ptkobj = + kobject_create_and_add("pagetables", + &kgsl_driver.virtdev.kobj); + + kgsl_driver.prockobj = + kobject_create_and_add("proc", + &kgsl_driver.virtdev.kobj); + + kgsl_core_debugfs_init(); + + kgsl_sharedmem_init_sysfs(); + kgsl_cffdump_init(); + + INIT_LIST_HEAD(&kgsl_driver.process_list); + + INIT_LIST_HEAD(&kgsl_driver.pagetable_list); + + kgsl_mmu_set_mmutype(ksgl_mmu_type); + + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) { + result = kgsl_ptdata_init(); + if (result) + goto err; + } + + result = kgsl_drm_init(NULL); + + if (result) + goto err; + + return 0; + +err: + kgsl_core_exit(); + return result; +} + +module_init(kgsl_core_init); +module_exit(kgsl_core_exit); + +MODULE_AUTHOR("Qualcomm Innovation Center, Inc."); +MODULE_DESCRIPTION("MSM GPU driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h new file mode 100644 index 0000000000000..d0f7c67324283 --- /dev/null +++ b/drivers/gpu/msm/kgsl.h @@ -0,0 +1,252 @@ +/* Copyright (c) 2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_H +#define __KGSL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KGSL_NAME "kgsl" + +/* Timestamp window used to detect rollovers (half of integer range) */ +#define KGSL_TIMESTAMP_WINDOW 0x80000000 + +/*cache coherency ops */ +#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001 +#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002 + +/* The size of each entry in a page table */ +#define KGSL_PAGETABLE_ENTRY_SIZE 4 + +/* Pagetable Virtual Address base */ +#define KGSL_PAGETABLE_BASE 0x66000000 + +/* Extra accounting entries needed in the pagetable */ +#define KGSL_PT_EXTRA_ENTRIES 16 + +#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \ + KGSL_PT_EXTRA_ENTRIES) + +#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE +#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT) +#else +#define KGSL_PAGETABLE_COUNT 1 +#endif + +/* Casting using container_of() for structures that kgsl owns. */ +#define KGSL_CONTAINER_OF(ptr, type, member) \ + container_of(ptr, type, member) + +/* A macro for memory statistics - add the new size to the stat and if + the statisic is greater then _max, set _max +*/ + +#define KGSL_STATS_ADD(_size, _stat, _max) \ + do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0) + +struct kgsl_device; + +struct kgsl_driver { + struct cdev cdev; + dev_t major; + struct class *class; + /* Virtual device for managing the core */ + struct device virtdev; + /* Kobjects for storing pagetable and process statistics */ + struct kobject *ptkobj; + struct kobject *prockobj; + struct kgsl_device *devp[KGSL_DEVICE_MAX]; + + /* Global lilst of open processes */ + struct list_head process_list; + /* Global list of pagetables */ + struct list_head pagetable_list; + /* Spinlock for accessing the pagetable list */ + spinlock_t ptlock; + /* Mutex for accessing the process list */ + struct mutex process_mutex; + + /* Mutex for protecting the device list */ + struct mutex devlock; + + void *ptpool; + + struct { + unsigned int vmalloc; + unsigned int vmalloc_max; + unsigned int coherent; + unsigned int coherent_max; + unsigned int mapped; + unsigned int mapped_max; + unsigned int histogram[16]; + } stats; +}; + +extern struct kgsl_driver kgsl_driver; + +struct kgsl_pagetable; +struct kgsl_memdesc; + +struct kgsl_memdesc_ops { + int (*vmflags)(struct kgsl_memdesc *); + int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *, + struct vm_fault *); + void (*free)(struct kgsl_memdesc *memdesc); + int (*map_kernel_mem)(struct kgsl_memdesc *); +}; + +/* shared memory allocation */ +struct kgsl_memdesc { + struct kgsl_pagetable *pagetable; + void *hostptr; + unsigned int gpuaddr; + unsigned int physaddr; + unsigned int size; + unsigned int priv; + struct scatterlist *sg; + unsigned int sglen; + struct kgsl_memdesc_ops *ops; +}; + +/* List of different memory entry types */ + +#define KGSL_MEM_ENTRY_KERNEL 0 +#define KGSL_MEM_ENTRY_PMEM 1 +#define KGSL_MEM_ENTRY_ASHMEM 2 +#define KGSL_MEM_ENTRY_USER 3 +#define KGSL_MEM_ENTRY_ION 4 +#define KGSL_MEM_ENTRY_MAX 5 + +struct kgsl_mem_entry { + struct kref refcount; + struct kgsl_memdesc memdesc; + int memtype; + void *priv_data; + struct rb_node node; + uint32_t free_timestamp; + /* back pointer to private structure under whose context this + * allocation is made */ + struct kgsl_process_private *priv; +}; + +#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT +#define MMU_CONFIG 2 +#else +#define MMU_CONFIG 1 +#endif + +void kgsl_mem_entry_destroy(struct kref *kref); +struct kgsl_mem_entry *kgsl_sharedmem_find_region( + struct kgsl_process_private *private, unsigned int gpuaddr, + size_t size); + +extern const struct dev_pm_ops kgsl_pm_ops; + +struct early_suspend; +int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state); +int kgsl_resume_driver(struct platform_device *pdev); +void kgsl_early_suspend_driver(struct early_suspend *h); +void kgsl_late_resume_driver(struct early_suspend *h); + +#ifdef CONFIG_MSM_KGSL_DRM +extern int kgsl_drm_init(struct platform_device *dev); +extern void kgsl_drm_exit(void); +extern void kgsl_gpu_mem_flush(int op); +#else +static inline int kgsl_drm_init(struct platform_device *dev) +{ + return 0; +} + +static inline void kgsl_drm_exit(void) +{ +} +#endif + +static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc, + unsigned int gpuaddr, unsigned int size) +{ + if (gpuaddr >= memdesc->gpuaddr && + ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) { + return 1; + } + return 0; +} + +static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc) +{ + if (memdesc->hostptr == NULL && memdesc->ops && + memdesc->ops->map_kernel_mem) + memdesc->ops->map_kernel_mem(memdesc); + + return memdesc->hostptr; +} + +static inline uint8_t *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc, + unsigned int gpuaddr) +{ + void *hostptr = NULL; + + if ((gpuaddr >= memdesc->gpuaddr) && + (gpuaddr < (memdesc->gpuaddr + memdesc->size))) + hostptr = kgsl_memdesc_map(memdesc); + + return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL; +} + +static inline int timestamp_cmp(unsigned int a, unsigned int b) +{ + /* check for equal */ + if (a == b) + return 0; + + /* check for greater-than for non-rollover case */ + if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW)) + return 1; + + /* check for greater-than for rollover case + * note that <= is required to ensure that consistent + * results are returned for values whose difference is + * equal to the window size + */ + a += KGSL_TIMESTAMP_WINDOW; + b += KGSL_TIMESTAMP_WINDOW; + return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1; +} + +static inline void +kgsl_mem_entry_get(struct kgsl_mem_entry *entry) +{ + kref_get(&entry->refcount); +} + +static inline void +kgsl_mem_entry_put(struct kgsl_mem_entry *entry) +{ + kref_put(&entry->refcount, kgsl_mem_entry_destroy); +} +int kgsl_add_event(struct kgsl_device *device, u32 ts, + void (*cb)(struct kgsl_device *, void *, u32), void *priv, + void *owner); + +void kgsl_cancel_events(struct kgsl_device *device, + void *owner); + +#endif /* __KGSL_H */ diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c new file mode 100644 index 0000000000000..e06c94d6b9fbf --- /dev/null +++ b/drivers/gpu/msm/kgsl_cffdump.c @@ -0,0 +1,591 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +#define ALIGN_CPU + +#include +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_cffdump.h" +#include "kgsl_debugfs.h" +#include "kgsl_log.h" +#include "kgsl_sharedmem.h" +#include "adreno_pm4types.h" + +static struct rchan *chan; +static struct dentry *dir; +static int suspended; +static size_t dropped; +static size_t subbuf_size = 256*1024; +static size_t n_subbufs = 64; + +/* forward declarations */ +static void destroy_channel(void); +static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs); + +static spinlock_t cffdump_lock; +static ulong serial_nr; +static ulong total_bytes; +static ulong total_syncmem; +static long last_sec; + +#define MEMBUF_SIZE 64 + +#define CFF_OP_WRITE_REG 0x00000002 +struct cff_op_write_reg { + unsigned char op; + uint addr; + uint value; +} __packed; + +#define CFF_OP_POLL_REG 0x00000004 +struct cff_op_poll_reg { + unsigned char op; + uint addr; + uint value; + uint mask; +} __packed; + +#define CFF_OP_WAIT_IRQ 0x00000005 +struct cff_op_wait_irq { + unsigned char op; +} __packed; + +#define CFF_OP_RMW 0x0000000a + +#define CFF_OP_WRITE_MEM 0x0000000b +struct cff_op_write_mem { + unsigned char op; + uint addr; + uint value; +} __packed; + +#define CFF_OP_WRITE_MEMBUF 0x0000000c +struct cff_op_write_membuf { + unsigned char op; + uint addr; + ushort count; + uint buffer[MEMBUF_SIZE]; +} __packed; + +#define CFF_OP_MEMORY_BASE 0x0000000d +struct cff_op_memory_base { + unsigned char op; + uint base; + uint size; + uint gmemsize; +} __packed; + +#define CFF_OP_HANG 0x0000000e +struct cff_op_hang { + unsigned char op; +} __packed; + +#define CFF_OP_EOF 0xffffffff +struct cff_op_eof { + unsigned char op; +} __packed; + +#define CFF_OP_VERIFY_MEM_FILE 0x00000007 +#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011 +struct cff_op_user_event { + unsigned char op; + unsigned int op1; + unsigned int op2; + unsigned int op3; + unsigned int op4; + unsigned int op5; +} __packed; + + +static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len) +{ + static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno" + "pqrstuvwxyz0123456789+/"; + + out[0] = tob64[in[0] >> 2]; + out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)]; + out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2) + | ((in[2] & 0xc0) >> 6)] : '='); + out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '='); +} + +static void b64_encode(const unsigned char *in_buf, int in_size, + unsigned char *out_buf, int out_bufsize, int *out_size) +{ + unsigned char in[3], out[4]; + int i, len; + + *out_size = 0; + while (in_size > 0) { + len = 0; + for (i = 0; i < 3; ++i) { + if (in_size-- > 0) { + in[i] = *in_buf++; + ++len; + } else + in[i] = 0; + } + if (len) { + b64_encodeblock(in, out, len); + if (out_bufsize < 4) { + pr_warn("kgsl: cffdump: %s: out of buffer\n", + __func__); + return; + } + for (i = 0; i < 4; ++i) + *out_buf++ = out[i]; + *out_size += 4; + out_bufsize -= 4; + } + } +} + +#define KLOG_TMPBUF_SIZE (1024) +static void klog_printk(const char *fmt, ...) +{ + /* per-cpu klog formatting temporary buffer */ + static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE]; + + va_list args; + int len; + char *cbuf; + unsigned long flags; + + local_irq_save(flags); + cbuf = klog_buf[smp_processor_id()]; + va_start(args, fmt); + len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args); + total_bytes += len; + va_end(args); + relay_write(chan, cbuf, len); + local_irq_restore(flags); +} + +static struct cff_op_write_membuf cff_op_write_membuf; +static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize) +{ + void *data; + int len, out_size; + struct cff_op_write_mem cff_op_write_mem; + + uint addr = cff_op_write_membuf.addr + - sizeof(uint)*cff_op_write_membuf.count; + + if (!cff_op_write_membuf.count) { + pr_warn("kgsl: cffdump: membuf: count == 0, skipping"); + return; + } + + if (cff_op_write_membuf.count != 1) { + cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF; + cff_op_write_membuf.addr = addr; + len = sizeof(cff_op_write_membuf) - + sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count); + data = &cff_op_write_membuf; + } else { + cff_op_write_mem.op = CFF_OP_WRITE_MEM; + cff_op_write_mem.addr = addr; + cff_op_write_mem.value = cff_op_write_membuf.buffer[0]; + data = &cff_op_write_mem; + len = sizeof(cff_op_write_mem); + } + b64_encode(data, len, out_buf, out_bufsize, &out_size); + out_buf[out_size] = 0; + klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf); + cff_op_write_membuf.count = 0; + cff_op_write_membuf.addr = 0; +} + +static void cffdump_printline(int id, uint opcode, uint op1, uint op2, + uint op3, uint op4, uint op5) +{ + struct cff_op_write_reg cff_op_write_reg; + struct cff_op_poll_reg cff_op_poll_reg; + struct cff_op_wait_irq cff_op_wait_irq; + struct cff_op_memory_base cff_op_memory_base; + struct cff_op_hang cff_op_hang; + struct cff_op_eof cff_op_eof; + struct cff_op_user_event cff_op_user_event; + unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16]; + void *data; + int len = 0, out_size; + long cur_secs; + + spin_lock(&cffdump_lock); + if (opcode == CFF_OP_WRITE_MEM) { + if ((cff_op_write_membuf.addr != op1 && + cff_op_write_membuf.count) + || (cff_op_write_membuf.count == MEMBUF_SIZE)) + cffdump_membuf(id, out_buf, sizeof(out_buf)); + + cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2; + cff_op_write_membuf.addr = op1 + sizeof(uint); + spin_unlock(&cffdump_lock); + return; + } else if (cff_op_write_membuf.count) + cffdump_membuf(id, out_buf, sizeof(out_buf)); + spin_unlock(&cffdump_lock); + + switch (opcode) { + case CFF_OP_WRITE_REG: + cff_op_write_reg.op = opcode; + cff_op_write_reg.addr = op1; + cff_op_write_reg.value = op2; + data = &cff_op_write_reg; + len = sizeof(cff_op_write_reg); + break; + + case CFF_OP_POLL_REG: + cff_op_poll_reg.op = opcode; + cff_op_poll_reg.addr = op1; + cff_op_poll_reg.value = op2; + cff_op_poll_reg.mask = op3; + data = &cff_op_poll_reg; + len = sizeof(cff_op_poll_reg); + break; + + case CFF_OP_WAIT_IRQ: + cff_op_wait_irq.op = opcode; + data = &cff_op_wait_irq; + len = sizeof(cff_op_wait_irq); + break; + + case CFF_OP_MEMORY_BASE: + cff_op_memory_base.op = opcode; + cff_op_memory_base.base = op1; + cff_op_memory_base.size = op2; + cff_op_memory_base.gmemsize = op3; + data = &cff_op_memory_base; + len = sizeof(cff_op_memory_base); + break; + + case CFF_OP_HANG: + cff_op_hang.op = opcode; + data = &cff_op_hang; + len = sizeof(cff_op_hang); + break; + + case CFF_OP_EOF: + cff_op_eof.op = opcode; + data = &cff_op_eof; + len = sizeof(cff_op_eof); + break; + + case CFF_OP_WRITE_SURFACE_PARAMS: + case CFF_OP_VERIFY_MEM_FILE: + cff_op_user_event.op = opcode; + cff_op_user_event.op1 = op1; + cff_op_user_event.op2 = op2; + cff_op_user_event.op3 = op3; + cff_op_user_event.op4 = op4; + cff_op_user_event.op5 = op5; + data = &cff_op_user_event; + len = sizeof(cff_op_user_event); + break; + } + + if (len) { + b64_encode(data, len, out_buf, sizeof(out_buf), &out_size); + out_buf[out_size] = 0; + klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf); + } else + pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode); + + cur_secs = get_seconds(); + if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) { + pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], " + "seq#: %lu\n", total_bytes/1024, total_syncmem/1024, + serial_nr); + last_sec = cur_secs; + } +} + +void kgsl_cffdump_init() +{ + struct dentry *debugfs_dir = kgsl_get_debugfs_dir(); + +#ifdef ALIGN_CPU + cpumask_t mask; + + cpumask_clear(&mask); + cpumask_set_cpu(0, &mask); + sched_setaffinity(0, &mask); +#endif + if (!debugfs_dir || IS_ERR(debugfs_dir)) { + KGSL_CORE_ERR("Debugfs directory is bad\n"); + return; + } + + kgsl_cff_dump_enable = 1; + + spin_lock_init(&cffdump_lock); + + dir = debugfs_create_dir("cff", debugfs_dir); + if (!dir) { + KGSL_CORE_ERR("debugfs_create_dir failed\n"); + return; + } + + chan = create_channel(subbuf_size, n_subbufs); +} + +void kgsl_cffdump_destroy() +{ + if (chan) + relay_flush(chan); + destroy_channel(); + if (dir) + debugfs_remove(dir); +} + +void kgsl_cffdump_open(enum kgsl_deviceid device_id) +{ + kgsl_cffdump_memory_base(device_id, KGSL_PAGETABLE_BASE, + kgsl_mmu_get_ptsize(), SZ_256K); +} + +void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base, + unsigned int range, unsigned gmemsize) +{ + cffdump_printline(device_id, CFF_OP_MEMORY_BASE, base, + range, gmemsize, 0, 0); +} + +void kgsl_cffdump_hang(enum kgsl_deviceid device_id) +{ + cffdump_printline(device_id, CFF_OP_HANG, 0, 0, 0, 0, 0); +} + +void kgsl_cffdump_close(enum kgsl_deviceid device_id) +{ + cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0, 0, 0); +} + +void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1, + unsigned int op2, unsigned int op3, + unsigned int op4, unsigned int op5) +{ + cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5); +} + +void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv, + const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes, + bool clean_cache) +{ + const void *src; + + if (!kgsl_cff_dump_enable) + return; + + total_syncmem += sizebytes; + + if (memdesc == NULL) { + struct kgsl_mem_entry *entry; + spin_lock(&dev_priv->process_priv->mem_lock); + entry = kgsl_sharedmem_find_region(dev_priv->process_priv, + gpuaddr, sizebytes); + spin_unlock(&dev_priv->process_priv->mem_lock); + if (entry == NULL) { + KGSL_CORE_ERR("did not find mapping " + "for gpuaddr: 0x%08x\n", gpuaddr); + return; + } + memdesc = &entry->memdesc; + } + src = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr); + if (memdesc->hostptr == NULL) { + KGSL_CORE_ERR("no kernel mapping for " + "gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n", + gpuaddr, memdesc->hostptr, memdesc->physaddr); + return; + } + + if (clean_cache) { + /* Ensure that this memory region is not read from the + * cache but fetched fresh */ + + mb(); + + kgsl_cache_range_op((struct kgsl_memdesc *)memdesc, + KGSL_CACHE_OP_INV); + } + + while (sizebytes > 3) { + cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src, + 0, 0, 0); + gpuaddr += 4; + src += 4; + sizebytes -= 4; + } + if (sizebytes > 0) + cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src, + 0, 0, 0); +} + +void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes) +{ + if (!kgsl_cff_dump_enable) + return; + + while (sizebytes > 3) { + /* Use 32bit memory writes as long as there's at least + * 4 bytes left */ + cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, + 0, 0, 0); + addr += 4; + sizebytes -= 4; + } + if (sizebytes > 0) + cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, + 0, 0, 0); +} + +void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr, + uint value) +{ + if (!kgsl_cff_dump_enable) + return; + + cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value, + 0, 0, 0); +} + +void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr, + uint value, uint mask) +{ + if (!kgsl_cff_dump_enable) + return; + + cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value, + mask, 0, 0); +} + +void kgsl_cffdump_slavewrite(uint addr, uint value) +{ + if (!kgsl_cff_dump_enable) + return; + + cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0); +} + +int kgsl_cffdump_waitirq(void) +{ + if (!kgsl_cff_dump_enable) + return 0; + + cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0); + + return 1; +} +EXPORT_SYMBOL(kgsl_cffdump_waitirq); + +static int subbuf_start_handler(struct rchan_buf *buf, + void *subbuf, void *prev_subbuf, uint prev_padding) +{ + pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf" + "=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding); + + if (relay_buf_full(buf)) { + if (!suspended) { + suspended = 1; + pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n", + smp_processor_id()); + } + dropped++; + return 0; + } else if (suspended) { + suspended = 0; + pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n", + smp_processor_id()); + } + + subbuf_start_reserve(buf, 0); + return 1; +} + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, int mode, struct rchan_buf *buf, + int *is_global) +{ + return debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); +} + +/* + * file_remove() default callback. Removes relay file in debugfs. + */ +static int remove_buf_file_handler(struct dentry *dentry) +{ + pr_info("kgsl: cffdump: %s()\n", __func__); + debugfs_remove(dentry); + return 0; +} + +/* + * relay callbacks + */ +static struct rchan_callbacks relay_callbacks = { + .subbuf_start = subbuf_start_handler, + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +/** + * create_channel - creates channel /debug/klog/cpuXXX + * + * Creates channel along with associated produced/consumed control files + * + * Returns channel on success, NULL otherwise + */ +static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs) +{ + struct rchan *chan; + + pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, " + "n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir); + + chan = relay_open("cpu", dir, subbuf_size, + n_subbufs, &relay_callbacks, NULL); + if (!chan) { + KGSL_CORE_ERR("relay_open failed\n"); + return NULL; + } + + suspended = 0; + dropped = 0; + + return chan; +} + +/** + * destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX + * + * Destroys channel along with associated produced/consumed control files + */ +static void destroy_channel(void) +{ + pr_info("kgsl: cffdump: relay: destroy_channel\n"); + if (chan) { + relay_close(chan); + chan = NULL; + } +} + diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h new file mode 100644 index 0000000000000..2733cc3fab805 --- /dev/null +++ b/drivers/gpu/msm/kgsl_cffdump.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __KGSL_CFFDUMP_H +#define __KGSL_CFFDUMP_H + +#ifdef CONFIG_MSM_KGSL_CFF_DUMP + +#include + +#include "kgsl_device.h" + +void kgsl_cffdump_init(void); +void kgsl_cffdump_destroy(void); +void kgsl_cffdump_open(enum kgsl_deviceid device_id); +void kgsl_cffdump_close(enum kgsl_deviceid device_id); +void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv, + const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes, + bool clean_cache); +void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes); +void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr, + uint value); +void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr, + uint value, uint mask); +bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv, + const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords, + bool check_only); +void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1, + unsigned int op2, unsigned int op3, + unsigned int op4, unsigned int op5); +static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; } + +void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base, + unsigned int range, unsigned int gmemsize); + +void kgsl_cffdump_hang(enum kgsl_deviceid device_id); + +#else + +#define kgsl_cffdump_init() (void)0 +#define kgsl_cffdump_destroy() (void)0 +#define kgsl_cffdump_open(device_id) (void)0 +#define kgsl_cffdump_close(device_id) (void)0 +#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \ + (void) 0 +#define kgsl_cffdump_setmem(addr, value, sizebytes) (void)0 +#define kgsl_cffdump_regwrite(device_id, addr, value) (void)0 +#define kgsl_cffdump_regpoll(device_id, addr, value, mask) (void)0 +#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \ + sizedwords, check_only) true +#define kgsl_cffdump_flags_no_memzero() true +#define kgsl_cffdump_memory_base(base, range, gmemsize) (void)0 +#define kgsl_cffdump_hang(device_id) (void)0 +#define kgsl_cffdump_user_event(cff_opcode, op1, op2, op3, op4, op5) \ + (void)param + +#endif /* CONFIG_MSM_KGSL_CFF_DUMP */ + +#endif /* __KGSL_CFFDUMP_H */ diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c new file mode 100644 index 0000000000000..c6a275949e540 --- /dev/null +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -0,0 +1,87 @@ +/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "kgsl.h" +#include "kgsl_device.h" + +/*default log levels is error for everything*/ +#define KGSL_LOG_LEVEL_DEFAULT 3 +#define KGSL_LOG_LEVEL_MAX 7 + +struct dentry *kgsl_debugfs_dir; + +static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val) +{ + *log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX); + return 0; +} + +#define KGSL_DEBUGFS_LOG(__log) \ +static int __log ## _set(void *data, u64 val) \ +{ \ + struct kgsl_device *device = data; \ + return kgsl_log_set(&device->__log, data, val); \ +} \ +static int __log ## _get(void *data, u64 *val) \ +{ \ + struct kgsl_device *device = data; \ + *val = device->__log; \ + return 0; \ +} \ +DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \ +__log ## _get, __log ## _set, "%llu\n"); \ + +KGSL_DEBUGFS_LOG(drv_log); +KGSL_DEBUGFS_LOG(cmd_log); +KGSL_DEBUGFS_LOG(ctxt_log); +KGSL_DEBUGFS_LOG(mem_log); +KGSL_DEBUGFS_LOG(pwr_log); + +void kgsl_device_debugfs_init(struct kgsl_device *device) +{ + if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir)) + device->d_debugfs = debugfs_create_dir(device->name, + kgsl_debugfs_dir); + + if (!device->d_debugfs || IS_ERR(device->d_debugfs)) + return; + + device->cmd_log = KGSL_LOG_LEVEL_DEFAULT; + device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT; + device->drv_log = KGSL_LOG_LEVEL_DEFAULT; + device->mem_log = KGSL_LOG_LEVEL_DEFAULT; + device->pwr_log = KGSL_LOG_LEVEL_DEFAULT; + + debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device, + &cmd_log_fops); + debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device, + &ctxt_log_fops); + debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device, + &drv_log_fops); + debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device, + &mem_log_fops); + debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device, + &pwr_log_fops); +} + +void kgsl_core_debugfs_init(void) +{ + kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0); +} + +void kgsl_core_debugfs_close(void) +{ + debugfs_remove_recursive(kgsl_debugfs_dir); +} diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h new file mode 100644 index 0000000000000..b13e7fddecbdf --- /dev/null +++ b/drivers/gpu/msm/kgsl_debugfs.h @@ -0,0 +1,39 @@ +/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _KGSL_DEBUGFS_H +#define _KGSL_DEBUGFS_H + +struct kgsl_device; + +#ifdef CONFIG_DEBUG_FS +void kgsl_core_debugfs_init(void); +void kgsl_core_debugfs_close(void); + +void kgsl_device_debugfs_init(struct kgsl_device *device); + +extern struct dentry *kgsl_debugfs_dir; +static inline struct dentry *kgsl_get_debugfs_dir(void) +{ + return kgsl_debugfs_dir; +} + +#else +static inline void kgsl_core_debugfs_init(void) { } +static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { } +static inline void kgsl_core_debugfs_close(void) { } +static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; } + +#endif + +#endif diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h new file mode 100644 index 0000000000000..73e8f87e1947d --- /dev/null +++ b/drivers/gpu/msm/kgsl_device.h @@ -0,0 +1,362 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_DEVICE_H +#define __KGSL_DEVICE_H + +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_mmu.h" +#include "kgsl_pwrctrl.h" +#include "kgsl_log.h" +#include "kgsl_pwrscale.h" +#include + +#define KGSL_TIMEOUT_NONE 0 +#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF + +#define FIRST_TIMEOUT (HZ / 2) + + +/* KGSL device state is initialized to INIT when platform_probe * + * sucessfully initialized the device. Once a device has been opened * + * (started) it becomes active. NAP implies that only low latency * + * resources (for now clocks on some platforms) are off. SLEEP implies * + * that the KGSL module believes a device is idle (has been inactive * + * past its timer) and all system resources are released. SUSPEND is * + * requested by the kernel and will be enforced upon all open devices. */ + +#define KGSL_STATE_NONE 0x00000000 +#define KGSL_STATE_INIT 0x00000001 +#define KGSL_STATE_ACTIVE 0x00000002 +#define KGSL_STATE_NAP 0x00000004 +#define KGSL_STATE_SLEEP 0x00000008 +#define KGSL_STATE_SUSPEND 0x00000010 +#define KGSL_STATE_HUNG 0x00000020 +#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040 +#define KGSL_STATE_SLUMBER 0x00000080 + +#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000 + +#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) + +struct kgsl_device; +struct platform_device; +struct kgsl_device_private; +struct kgsl_context; +struct kgsl_power_stats; +struct kgsl_event; + +struct kgsl_functable { + /* Mandatory functions - these functions must be implemented + by the client device. The driver will not check for a NULL + pointer before calling the hook. + */ + void (*regread) (struct kgsl_device *device, + unsigned int offsetwords, unsigned int *value); + void (*regwrite) (struct kgsl_device *device, + unsigned int offsetwords, unsigned int value); + int (*idle) (struct kgsl_device *device, unsigned int timeout); + unsigned int (*isidle) (struct kgsl_device *device); + int (*suspend_context) (struct kgsl_device *device); + int (*start) (struct kgsl_device *device, unsigned int init_ram); + int (*stop) (struct kgsl_device *device); + int (*getproperty) (struct kgsl_device *device, + enum kgsl_property_type type, void *value, + unsigned int sizebytes); + int (*waittimestamp) (struct kgsl_device *device, + unsigned int timestamp, unsigned int msecs); + unsigned int (*readtimestamp) (struct kgsl_device *device, + enum kgsl_timestamp_type type); + int (*issueibcmds) (struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_ibdesc *ibdesc, + unsigned int sizedwords, uint32_t *timestamp, + unsigned int flags); + int (*setup_pt)(struct kgsl_device *device, + struct kgsl_pagetable *pagetable); + void (*cleanup_pt)(struct kgsl_device *device, + struct kgsl_pagetable *pagetable); + void (*power_stats)(struct kgsl_device *device, + struct kgsl_power_stats *stats); + void (*irqctrl)(struct kgsl_device *device, int state); + unsigned int (*gpuid)(struct kgsl_device *device); + void * (*snapshot)(struct kgsl_device *device, void *snapshot, + int *remain, int hang); + /* Optional functions - these functions are not mandatory. The + driver will check that the function pointer is not NULL before + calling the hook */ + void (*setstate) (struct kgsl_device *device, unsigned int context_id, + uint32_t flags); + int (*drawctxt_create) (struct kgsl_device *device, + struct kgsl_pagetable *pagetable, struct kgsl_context *context, + uint32_t flags); + void (*drawctxt_destroy) (struct kgsl_device *device, + struct kgsl_context *context); + long (*ioctl) (struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data); + int (*next_event)(struct kgsl_device *device, + struct kgsl_event *event); +}; + +struct kgsl_memregion { + unsigned char *mmio_virt_base; + unsigned int mmio_phys_base; + uint32_t gpu_base; + unsigned int sizebytes; +}; + +/* MH register values */ +struct kgsl_mh { + unsigned int mharb; + unsigned int mh_intf_cfg1; + unsigned int mh_intf_cfg2; + uint32_t mpu_base; + int mpu_range; +}; + +struct kgsl_event { + uint32_t timestamp; + void (*func)(struct kgsl_device *, void *, u32); + void *priv; + struct list_head list; + void *owner; +}; + + +struct kgsl_device { + struct device *dev; + const char *name; + unsigned int ver_major; + unsigned int ver_minor; + uint32_t flags; + enum kgsl_deviceid id; + struct kgsl_memregion regspace; + struct kgsl_memdesc memstore; + const char *iomemname; + + struct kgsl_mh mh; + struct kgsl_mmu mmu; + struct completion hwaccess_gate; + const struct kgsl_functable *ftbl; + struct work_struct idle_check_ws; + struct timer_list idle_timer; + struct kgsl_pwrctrl pwrctrl; + int open_count; + + struct atomic_notifier_head ts_notifier_list; + struct mutex mutex; + uint32_t state; + uint32_t requested_state; + + unsigned int active_cnt; + struct completion suspend_gate; + + wait_queue_head_t wait_queue; + struct workqueue_struct *work_queue; + struct device *parentdev; + struct completion recovery_gate; + struct dentry *d_debugfs; + struct idr context_idr; + struct early_suspend display_off; + + void *snapshot; /* Pointer to the snapshot memory region */ + int snapshot_maxsize; /* Max size of the snapshot region */ + int snapshot_size; /* Current size of the snapshot region */ + u32 snapshot_timestamp; /* Timestamp of the last valid snapshot */ + int snapshot_frozen; /* 1 if the snapshot output is frozen until + it gets read by the user. This avoids + losing the output on multiple hangs */ + struct kobject snapshot_kobj; + + /* Logging levels */ + int cmd_log; + int ctxt_log; + int drv_log; + int mem_log; + int pwr_log; + struct wake_lock idle_wakelock; + struct kgsl_pwrscale pwrscale; + struct kobject pwrscale_kobj; + struct pm_qos_request_list pm_qos_req_dma; + struct work_struct ts_expired_ws; + struct list_head events; + s64 on_time; + + /* page fault debugging parameters */ + struct work_struct print_fault_ib; + unsigned int page_fault_ptbase; + unsigned int page_fault_ib1; + unsigned int page_fault_rptr; +}; + +struct kgsl_context { + uint32_t id; + + /* Pointer to the owning device instance */ + struct kgsl_device_private *dev_priv; + + /* Pointer to the device specific context information */ + void *devctxt; + /* + * Status indicating whether a gpu reset occurred and whether this + * context was responsible for causing it + */ + unsigned int reset_status; + + /* + * Timeline used to create fences that can be signaled when a + * sync_pt timestamp expires. + */ + struct sync_timeline *timeline; +}; + +struct kgsl_process_private { + unsigned int refcnt; + pid_t pid; + spinlock_t mem_lock; + struct rb_root mem_rb; + struct kgsl_pagetable *pagetable; + struct list_head list; + struct kobject kobj; + + struct { + unsigned int cur; + unsigned int max; + } stats[KGSL_MEM_ENTRY_MAX]; +}; + +struct kgsl_device_private { + struct kgsl_device *device; + struct kgsl_process_private *process_priv; +}; + +struct kgsl_power_stats { + s64 total_time; + s64 busy_time; +}; + +struct kgsl_device *kgsl_get_device(int dev_idx); + +static inline void kgsl_process_add_stats(struct kgsl_process_private *priv, + unsigned int type, size_t size) +{ + priv->stats[type].cur += size; + if (priv->stats[type].max < priv->stats[type].cur) + priv->stats[type].max = priv->stats[type].cur; +} + +static inline void kgsl_regread(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int *value) +{ + device->ftbl->regread(device, offsetwords, value); +} + +static inline void kgsl_regwrite(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int value) +{ + device->ftbl->regwrite(device, offsetwords, value); +} + +static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout) +{ + return device->ftbl->idle(device, timeout); +} + +static inline unsigned int kgsl_gpuid(struct kgsl_device *device) +{ + return device->ftbl->gpuid(device); +} + +static inline int kgsl_create_device_sysfs_files(struct device *root, + const struct device_attribute **list) +{ + int ret = 0, i; + for (i = 0; list[i] != NULL; i++) + ret |= device_create_file(root, list[i]); + return ret; +} + +static inline void kgsl_remove_device_sysfs_files(struct device *root, + const struct device_attribute **list) +{ + int i; + for (i = 0; list[i] != NULL; i++) + device_remove_file(root, list[i]); +} + +static inline struct kgsl_mmu * +kgsl_get_mmu(struct kgsl_device *device) +{ + return (struct kgsl_mmu *) (device ? &device->mmu : NULL); +} + +static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev) +{ + int i; + + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev) + return kgsl_driver.devp[i]; + } + + return NULL; +} + +static inline int kgsl_create_device_workqueue(struct kgsl_device *device) +{ + device->work_queue = create_singlethread_workqueue(device->name); + if (!device->work_queue) { + KGSL_DRV_ERR(device, + "create_singlethread_workqueue(%s) failed\n", + device->name); + return -EINVAL; + } + return 0; +} + +static inline struct kgsl_context * +kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id) +{ + struct kgsl_context *ctxt = + idr_find(&dev_priv->device->context_idr, id); + + /* Make sure that the context belongs to the current instance so + that other processes can't guess context IDs and mess things up */ + + return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL; +} + +int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp); + +int kgsl_register_ts_notifier(struct kgsl_device *device, + struct notifier_block *nb); + +int kgsl_unregister_ts_notifier(struct kgsl_device *device, + struct notifier_block *nb); + +int kgsl_device_platform_probe(struct kgsl_device *device, + irqreturn_t (*dev_isr) (int, void*)); +void kgsl_device_platform_remove(struct kgsl_device *device); + +const char *kgsl_pwrstate_to_str(unsigned int state); + +int kgsl_device_snapshot_init(struct kgsl_device *device); +int kgsl_device_snapshot(struct kgsl_device *device, int hang); +void kgsl_device_snapshot_close(struct kgsl_device *device); + +#endif /* __KGSL_DEVICE_H */ diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c new file mode 100644 index 0000000000000..58ec252dd16e1 --- /dev/null +++ b/drivers/gpu/msm/kgsl_drm.c @@ -0,0 +1,1664 @@ +/* Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Implements an interface between KGSL and the DRM subsystem. For now this + * is pretty simple, but it will take on more of the workload as time goes + * on + */ +#include "drmP.h" +#include "drm.h" +#include +#include + +#include "kgsl.h" +#include "kgsl_device.h" +#include "kgsl_drm.h" +#include "kgsl_mmu.h" +#include "kgsl_sharedmem.h" + +#define DRIVER_AUTHOR "Qualcomm" +#define DRIVER_NAME "kgsl" +#define DRIVER_DESC "KGSL DRM" +#define DRIVER_DATE "20100127" + +#define DRIVER_MAJOR 2 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 1 + +#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0) + +#define ENTRY_EMPTY -1 +#define ENTRY_NEEDS_CLEANUP -2 + +#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2) +#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5 + +/* Returns true if the memory type is in PMEM */ + +#ifdef CONFIG_KERNEL_PMEM_SMI_REGION +#define TYPE_IS_PMEM(_t) \ + (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \ + ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \ + ((_t) & DRM_KGSL_GEM_TYPE_PMEM)) +#else +#define TYPE_IS_PMEM(_t) \ + (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \ + ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI))) +#endif + +/* Returns true if the memory type is regular */ + +#define TYPE_IS_MEM(_t) \ + (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \ + ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \ + ((_t) & DRM_KGSL_GEM_TYPE_MEM)) + +#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK) + +/* Returns true if KMEM region is uncached */ + +#define IS_MEM_UNCACHED(_t) \ + ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \ + (_t == DRM_KGSL_GEM_TYPE_KMEM) || \ + (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE))) + +struct drm_kgsl_gem_object_wait_list_entry { + struct list_head list; + int pid; + int in_use; + wait_queue_head_t process_wait_q; +}; + +struct drm_kgsl_gem_object_fence { + int32_t fence_id; + unsigned int num_buffers; + int ts_valid; + unsigned int timestamp; + int ts_device; + int lockpid; + struct list_head buffers_in_fence; +}; + +struct drm_kgsl_gem_object_fence_list_entry { + struct list_head list; + int in_use; + struct drm_gem_object *gem_obj; +}; + +static int32_t fence_id = 0x1; + +static struct drm_kgsl_gem_object_fence + gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES]; + +struct drm_kgsl_gem_object { + struct drm_gem_object *obj; + uint32_t type; + struct kgsl_memdesc memdesc; + struct kgsl_pagetable *pagetable; + uint64_t mmap_offset; + int bufcount; + int flags; + struct list_head list; + int active; + + struct { + uint32_t offset; + uint32_t gpuaddr; + } bufs[DRM_KGSL_GEM_MAX_BUFFERS]; + + int bound; + int lockpid; + /* Put these here to avoid allocing all the time */ + struct drm_kgsl_gem_object_wait_list_entry + wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES]; + /* Each object can only appear in a single fence */ + struct drm_kgsl_gem_object_fence_list_entry + fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES]; + + struct list_head wait_list; +}; + +/* This is a global list of all the memory currently mapped in the MMU */ +static struct list_head kgsl_mem_list; + +static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op) +{ + int cacheop = 0; + + switch (op) { + case DRM_KGSL_GEM_CACHE_OP_TO_DEV: + if (type & (DRM_KGSL_GEM_CACHE_WBACK | + DRM_KGSL_GEM_CACHE_WBACKWA)) + cacheop = KGSL_CACHE_OP_CLEAN; + + break; + + case DRM_KGSL_GEM_CACHE_OP_FROM_DEV: + if (type & (DRM_KGSL_GEM_CACHE_WBACK | + DRM_KGSL_GEM_CACHE_WBACKWA | + DRM_KGSL_GEM_CACHE_WTHROUGH)) + cacheop = KGSL_CACHE_OP_INV; + } + + kgsl_cache_range_op(memdesc, cacheop); +} + +/* Flush all the memory mapped in the MMU */ + +void kgsl_gpu_mem_flush(int op) +{ + struct drm_kgsl_gem_object *entry; + + list_for_each_entry(entry, &kgsl_mem_list, list) { + kgsl_gem_mem_flush(&entry->memdesc, entry->type, op); + } + + /* Takes care of WT/WC case. + * More useful when we go barrierless + */ + dmb(); +} + +/* TODO: + * Add vsync wait */ + +static int kgsl_drm_load(struct drm_device *dev, unsigned long flags) +{ + return 0; +} + +static int kgsl_drm_unload(struct drm_device *dev) +{ + return 0; +} + +struct kgsl_drm_device_priv { + struct kgsl_device *device[KGSL_DEVICE_MAX]; + struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX]; +}; + +static int kgsl_ts_notifier_cb(struct notifier_block *blk, + unsigned long code, void *_param); + +static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX]; + +static int kgsl_drm_firstopen(struct drm_device *dev) +{ + int i; + + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + struct kgsl_device *device = kgsl_get_device(i); + + if (device == NULL) + continue; + + kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb; + kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]); + } + + return 0; +} + +void kgsl_drm_lastclose(struct drm_device *dev) +{ + int i; + + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + struct kgsl_device *device = kgsl_get_device(i); + if (device == NULL) + continue; + + kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]); + } +} + +void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv) +{ +} + +static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state) +{ + return 0; +} + +static int kgsl_drm_resume(struct drm_device *dev) +{ + return 0; +} + +static void +kgsl_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_kgsl_gem_object *priv = obj->driver_private; + struct drm_map_list *list; + + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + kfree(list->map); + list->map = NULL; + + priv->mmap_offset = 0; +} + +static int +kgsl_gem_memory_allocated(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv = obj->driver_private; + return priv->memdesc.size ? 1 : 0; +} + +static int +kgsl_gem_alloc_memory(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv = obj->driver_private; + int index; + + /* Return if the memory is already allocated */ + + if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type)) + return 0; + + if (TYPE_IS_PMEM(priv->type)) { + int type; + + if (priv->type == DRM_KGSL_GEM_TYPE_EBI || + priv->type & DRM_KGSL_GEM_PMEM_EBI) + type = PMEM_MEMTYPE_EBI1; + else + type = PMEM_MEMTYPE_SMI; + + priv->memdesc.physaddr = + pmem_kalloc(obj->size * priv->bufcount, + type | PMEM_ALIGNMENT_4K); + + if (IS_ERR((void *) priv->memdesc.physaddr)) { + DRM_ERROR("Unable to allocate PMEM memory\n"); + return -ENOMEM; + } + + priv->memdesc.size = obj->size * priv->bufcount; + + } else if (TYPE_IS_MEM(priv->type)) { + priv->memdesc.hostptr = + vmalloc_user(obj->size * priv->bufcount); + + if (priv->memdesc.hostptr == NULL) { + DRM_ERROR("Unable to allocate vmalloc memory\n"); + return -ENOMEM; + } + + priv->memdesc.size = obj->size * priv->bufcount; + priv->memdesc.ops = &kgsl_vmalloc_ops; + } else + return -EINVAL; + + for (index = 0; index < priv->bufcount; index++) + priv->bufs[index].offset = index * obj->size; + + + return 0; +} + +static void +kgsl_gem_unmap(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv = obj->driver_private; + + if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED) + return; + + kgsl_mmu_unmap(priv->pagetable, &priv->memdesc); + + kgsl_mmu_putpagetable(priv->pagetable); + priv->pagetable = NULL; + + if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) || + (priv->type & DRM_KGSL_GEM_CACHE_MASK)) + list_del(&priv->list); + + priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED; +} + +static void +kgsl_gem_free_memory(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv = obj->driver_private; + + if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type)) + return; + + kgsl_gem_mem_flush(&priv->memdesc, priv->type, + DRM_KGSL_GEM_CACHE_OP_FROM_DEV); + + kgsl_gem_unmap(obj); + + if (TYPE_IS_PMEM(priv->type)) + pmem_kfree(priv->memdesc.physaddr); + + kgsl_sharedmem_free(&priv->memdesc); +} + +int +kgsl_gem_init_object(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (priv == NULL) { + DRM_ERROR("Unable to create GEM object\n"); + return -ENOMEM; + } + + obj->driver_private = priv; + priv->obj = obj; + + return 0; +} + +void +kgsl_gem_free_object(struct drm_gem_object *obj) +{ + kgsl_gem_free_memory(obj); + kgsl_gem_free_mmap_offset(obj); + drm_gem_object_release(obj); + kfree(obj->driver_private); +} + +static int +kgsl_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_kgsl_gem_object *priv = obj->driver_private; + struct drm_map_list *list; + int msize; + + list = &obj->map_list; + list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); + if (list->map == NULL) { + DRM_ERROR("Unable to allocate drm_map_list\n"); + return -ENOMEM; + } + + msize = obj->size * priv->bufcount; + + list->map->type = _DRM_GEM; + list->map->size = msize; + list->map->handle = obj; + + /* Allocate a mmap offset */ + list->file_offset_node = drm_mm_search_free(&mm->offset_manager, + msize / PAGE_SIZE, + 0, 0); + + if (!list->file_offset_node) { + DRM_ERROR("Failed to allocate offset for %d\n", obj->name); + kfree(list->map); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + msize / PAGE_SIZE, 0); + + if (!list->file_offset_node) { + DRM_ERROR("Unable to create the file_offset_node\n"); + kfree(list->map); + return -ENOMEM; + } + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { + DRM_ERROR("Failed to add to map hash\n"); + drm_mm_put_block(list->file_offset_node); + kfree(list->map); + return -ENOMEM; + } + + priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} + +int +kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, + unsigned long *len) +{ + struct file *filp; + struct drm_device *dev; + struct drm_file *file_priv; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = 0; + + filp = fget(drm_fd); + if (unlikely(filp == NULL)) { + DRM_ERROR("Unable to ghet the DRM file descriptor\n"); + return -EINVAL; + } + file_priv = filp->private_data; + if (unlikely(file_priv == NULL)) { + DRM_ERROR("Unable to get the file private data\n"); + fput(filp); + return -EINVAL; + } + dev = file_priv->minor->dev; + if (unlikely(dev == NULL)) { + DRM_ERROR("Unable to get the minor device\n"); + fput(filp); + return -EINVAL; + } + + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (unlikely(obj == NULL)) { + DRM_ERROR("Invalid GEM handle %x\n", handle); + fput(filp); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + /* We can only use the MDP for PMEM regions */ + + if (TYPE_IS_PMEM(priv->type)) { + *start = priv->memdesc.physaddr + + priv->bufs[priv->active].offset; + + *len = priv->memdesc.size; + + kgsl_gem_mem_flush(&priv->memdesc, + priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV); + } else { + *start = 0; + *len = 0; + ret = -EINVAL; + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + fput(filp); + return ret; +} + +static int +kgsl_gem_init_obj(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_gem_object *obj, + int *handle) +{ + struct drm_kgsl_gem_object *priv; + int ret, i; + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + memset(&priv->memdesc, 0, sizeof(priv->memdesc)); + priv->bufcount = 1; + priv->active = 0; + priv->bound = 0; + + /* To preserve backwards compatability, the default memory source + is EBI */ + + priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI; + + ret = drm_gem_handle_create(file_priv, obj, handle); + + drm_gem_object_handle_unreference(obj); + INIT_LIST_HEAD(&priv->wait_list); + + for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) { + INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]); + priv->wait_entries[i].pid = 0; + init_waitqueue_head(&priv->wait_entries[i].process_wait_q); + } + + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]); + priv->fence_entries[i].in_use = 0; + priv->fence_entries[i].gem_obj = obj; + } + + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +kgsl_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_create *create = data; + struct drm_gem_object *obj; + int ret, handle; + + /* Page align the size so we can allocate multiple buffers */ + create->size = ALIGN(create->size, 4096); + + obj = drm_gem_object_alloc(dev, create->size); + + if (obj == NULL) { + DRM_ERROR("Unable to allocate the GEM object\n"); + return -ENOMEM; + } + + ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle); + if (ret) + return ret; + + create->handle = handle; + return 0; +} + +int +kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_create_fd *args = data; + struct file *file; + dev_t rdev; + struct fb_info *info; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret, put_needed, handle; + + file = fget_light(args->fd, &put_needed); + + if (file == NULL) { + DRM_ERROR("Unable to get the file object\n"); + return -EBADF; + } + + rdev = file->f_dentry->d_inode->i_rdev; + + /* Only framebuffer objects are supported ATM */ + + if (MAJOR(rdev) != FB_MAJOR) { + DRM_ERROR("File descriptor is not a framebuffer\n"); + ret = -EBADF; + goto error_fput; + } + + info = registered_fb[MINOR(rdev)]; + + if (info == NULL) { + DRM_ERROR("Framebuffer minor %d is not registered\n", + MINOR(rdev)); + ret = -EBADF; + goto error_fput; + } + + obj = drm_gem_object_alloc(dev, info->fix.smem_len); + + if (obj == NULL) { + DRM_ERROR("Unable to allocate GEM object\n"); + ret = -ENOMEM; + goto error_fput; + } + + ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle); + + if (ret) + goto error_fput; + + mutex_lock(&dev->struct_mutex); + + priv = obj->driver_private; + priv->memdesc.physaddr = info->fix.smem_start; + priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM; + + mutex_unlock(&dev->struct_mutex); + args->handle = handle; + +error_fput: + fput_light(file, put_needed); + + return ret; +} + +int +kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_memtype *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = 0; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + if (TYPE_IS_FD(priv->type)) + ret = -EINVAL; + else { + if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type)) + priv->type = args->type; + else + ret = -EINVAL; + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_memtype *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + args->type = priv->type; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int +kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_bind_gpu *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + if (--priv->bound == 0) + kgsl_gem_unmap(obj); + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +static int +kgsl_gem_map(struct drm_gem_object *obj) +{ + struct drm_kgsl_gem_object *priv = obj->driver_private; + int index; + int ret = -EINVAL; + + if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED) + return 0; + + /* Get the global page table */ + + if (priv->pagetable == NULL) { + priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT); + + if (priv->pagetable == NULL) { + DRM_ERROR("Unable to get the GPU MMU pagetable\n"); + return -EINVAL; + } + } + + priv->memdesc.pagetable = priv->pagetable; + + ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + + if (!ret) { + for (index = 0; index < priv->bufcount; index++) { + priv->bufs[index].gpuaddr = + priv->memdesc.gpuaddr + + priv->bufs[index].offset; + } + } + + /* Add cached memory to the list to be cached */ + + if (priv->type == DRM_KGSL_GEM_TYPE_KMEM || + priv->type & DRM_KGSL_GEM_CACHE_MASK) + list_add(&priv->list, &kgsl_mem_list); + + priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED; + + return ret; +} + +int +kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_bind_gpu *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = 0; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + if (priv->bound++ == 0) { + + if (!kgsl_gem_memory_allocated(obj)) { + DRM_ERROR("Memory not allocated for this object\n"); + ret = -ENOMEM; + goto out; + } + + ret = kgsl_gem_map(obj); + + /* This is legacy behavior - use GET_BUFFERINFO instead */ + args->gpuptr = priv->bufs[0].gpuaddr; + } +out: + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* Allocate the memory and prepare it for CPU mapping */ + +int +kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_alloc *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + ret = kgsl_gem_alloc_memory(obj); + + if (ret) { + DRM_ERROR("Unable to allocate object memory\n"); + } else if (!priv->mmap_offset) { + ret = kgsl_gem_create_mmap_offset(obj); + if (ret) + DRM_ERROR("Unable to create a mmap offset\n"); + } + + args->offset = priv->mmap_offset; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_mmap *args = data; + struct drm_gem_object *obj; + unsigned long addr; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + down_write(¤t->mm->mmap_sem); + + addr = do_mmap(obj->filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + + up_write(¤t->mm->mmap_sem); + + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + if (IS_ERR((void *) addr)) + return addr; + + args->hostptr = (uint32_t) addr; + return 0; +} + +/* This function is deprecated */ + +int +kgsl_gem_prep_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_prep *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + ret = kgsl_gem_alloc_memory(obj); + if (ret) { + DRM_ERROR("Unable to allocate object memory\n"); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + + if (priv->mmap_offset == 0) { + ret = kgsl_gem_create_mmap_offset(obj); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + args->offset = priv->mmap_offset; + args->phys = priv->memdesc.physaddr; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int +kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_bufinfo *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = -EINVAL; + int index; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + if (!kgsl_gem_memory_allocated(obj)) { + DRM_ERROR("Memory not allocated for this object\n"); + goto out; + } + + for (index = 0; index < priv->bufcount; index++) { + args->offset[index] = priv->bufs[index].offset; + args->gpuaddr[index] = priv->bufs[index].gpuaddr; + } + + args->count = priv->bufcount; + args->active = priv->active; + + ret = 0; + +out: + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_bufcount *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = -EINVAL; + + if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS) + return -EINVAL; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + /* It is too much math to worry about what happens if we are already + allocated, so just bail if we are */ + + if (kgsl_gem_memory_allocated(obj)) { + DRM_ERROR("Memory already allocated - cannot change" + "number of buffers\n"); + goto out; + } + + priv->bufcount = args->bufcount; + ret = 0; + +out: + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int +kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_active *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + int ret = -EINVAL; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", args->handle); + return -EBADF; + } + + mutex_lock(&dev->struct_mutex); + priv = obj->driver_private; + + if (args->active < 0 || args->active >= priv->bufcount) { + DRM_ERROR("Invalid active buffer %d\n", args->active); + goto out; + } + + priv->active = args->active; + ret = 0; + +out: + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + struct drm_kgsl_gem_object *priv; + unsigned long offset; + struct page *page; + int i; + + mutex_lock(&dev->struct_mutex); + + priv = obj->driver_private; + + offset = (unsigned long) vmf->virtual_address - vma->vm_start; + i = offset >> PAGE_SHIFT; + page = sg_page(&(priv->memdesc.sg[i])); + + if (!page) { + mutex_unlock(&dev->struct_mutex); + return VM_FAULT_SIGBUS; + } + + get_page(page); + vmf->page = page; + + mutex_unlock(&dev->struct_mutex); + return 0; +} + +int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + struct drm_kgsl_gem_object *priv; + unsigned long offset, pfn; + int ret = 0; + + offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + mutex_lock(&dev->struct_mutex); + + priv = obj->driver_private; + + pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset; + ret = vm_insert_pfn(vma, + (unsigned long) vmf->virtual_address, pfn); + mutex_unlock(&dev->struct_mutex); + + switch (ret) { + case -ENOMEM: + case -EAGAIN: + return VM_FAULT_OOM; + case -EFAULT: + return VM_FAULT_SIGBUS; + default: + return VM_FAULT_NOPAGE; + } +} + +static struct vm_operations_struct kgsl_gem_kmem_vm_ops = { + .fault = kgsl_gem_kmem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static struct vm_operations_struct kgsl_gem_phys_vm_ops = { + .fault = kgsl_gem_phys_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +/* This is a clone of the standard drm_gem_mmap function modified to allow + us to properly map KMEM regions as well as the PMEM regions */ + +int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_local_map *map = NULL; + struct drm_gem_object *obj; + struct drm_hash_item *hash; + struct drm_kgsl_gem_object *gpriv; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + + if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { + mutex_unlock(&dev->struct_mutex); + return drm_mmap(filp, vma); + } + + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; + if (!map || + ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { + ret = -EPERM; + goto out_unlock; + } + + /* Check for valid size. */ + if (map->size < vma->vm_end - vma->vm_start) { + ret = -EINVAL; + goto out_unlock; + } + + obj = map->handle; + + gpriv = obj->driver_private; + + /* VM_PFNMAP is only for memory that doesn't use struct page + * in other words, not "normal" memory. If you try to use it + * with "normal" memory then the mappings don't get flushed. */ + + if (TYPE_IS_MEM(gpriv->type)) { + vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; + vma->vm_ops = &kgsl_gem_kmem_vm_ops; + } else { + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | + VM_DONTEXPAND; + vma->vm_ops = &kgsl_gem_phys_vm_ops; + } + + vma->vm_private_data = map->handle; + + + /* Take care of requested caching policy */ + if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM || + gpriv->type & DRM_KGSL_GEM_CACHE_MASK) { + if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA) + vma->vm_page_prot = + pgprot_writebackwacache(vma->vm_page_prot); + else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK) + vma->vm_page_prot = + pgprot_writebackcache(vma->vm_page_prot); + else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH) + vma->vm_page_prot = + pgprot_writethroughcache(vma->vm_page_prot); + else + vma->vm_page_prot = + pgprot_writecombine(vma->vm_page_prot); + } else { + if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) + vma->vm_page_prot = + pgprot_noncached(vma->vm_page_prot); + else + /* default pmem is WC */ + vma->vm_page_prot = + pgprot_writecombine(vma->vm_page_prot); + } + + /* flush out existing KMEM cached mappings if new ones are + * of uncached type */ + if (IS_MEM_UNCACHED(gpriv->type)) + kgsl_cache_range_op(&gpriv->memdesc, + KGSL_CACHE_OP_FLUSH); + + /* Add the other memory types here */ + + /* Take a ref for this mapping of the object, so that the fault + * handler can dereference the mmap offset's pointer to the object. + * This reference is cleaned up by the corresponding vm_close + * (which should happen whether the vma was created by this call, or + * by a vm_open due to mremap or partial unmap or whatever). + */ + drm_gem_object_reference(obj); + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open_locked(vma); + +out_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +void +cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting) +{ + int j; + struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL; + struct drm_kgsl_gem_object *unlock_obj; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object_wait_list_entry *lock_next; + + fence->ts_valid = 0; + fence->timestamp = -1; + fence->ts_device = -1; + + /* Walk the list of buffers in this fence and clean up the */ + /* references. Note that this can cause memory allocations */ + /* to be freed */ + for (j = fence->num_buffers; j > 0; j--) { + this_fence_entry = + (struct drm_kgsl_gem_object_fence_list_entry *) + fence->buffers_in_fence.prev; + + this_fence_entry->in_use = 0; + obj = this_fence_entry->gem_obj; + unlock_obj = obj->driver_private; + + /* Delete it from the list */ + + list_del(&this_fence_entry->list); + + /* we are unlocking - see if there are other pids waiting */ + if (check_waiting) { + if (!list_empty(&unlock_obj->wait_list)) { + lock_next = + (struct drm_kgsl_gem_object_wait_list_entry *) + unlock_obj->wait_list.prev; + + list_del((struct list_head *)&lock_next->list); + + unlock_obj->lockpid = 0; + wake_up_interruptible( + &lock_next->process_wait_q); + lock_next->pid = 0; + + } else { + /* List is empty so set pid to 0 */ + unlock_obj->lockpid = 0; + } + } + + drm_gem_object_unreference(obj); + } + /* here all the buffers in the fence are released */ + /* clear the fence entry */ + fence->fence_id = ENTRY_EMPTY; +} + +int +find_empty_fence(void) +{ + int i; + + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) { + gem_buf_fence[i].fence_id = fence_id++; + gem_buf_fence[i].ts_valid = 0; + INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence)); + if (fence_id == 0xFFFFFFF0) + fence_id = 1; + return i; + } else { + + /* Look for entries to be cleaned up */ + if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP) + cleanup_fence(&gem_buf_fence[i], 0); + } + } + + return ENTRY_EMPTY; +} + +int +find_fence(int index) +{ + int i; + + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + if (gem_buf_fence[i].fence_id == index) + return i; + } + + return ENTRY_EMPTY; +} + +void +wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence) +{ + struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL; + struct drm_kgsl_gem_object_wait_list_entry *lock_next; + struct drm_kgsl_gem_object *unlock_obj; + struct drm_gem_object *obj; + + /* TS has expired when we get here */ + fence->ts_valid = 0; + fence->timestamp = -1; + fence->ts_device = -1; + + list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) { + obj = this_fence_entry->gem_obj; + unlock_obj = obj->driver_private; + + if (!list_empty(&unlock_obj->wait_list)) { + lock_next = + (struct drm_kgsl_gem_object_wait_list_entry *) + unlock_obj->wait_list.prev; + + /* Unblock the pid */ + lock_next->pid = 0; + + /* Delete it from the list */ + list_del((struct list_head *)&lock_next->list); + + unlock_obj->lockpid = 0; + wake_up_interruptible(&lock_next->process_wait_q); + + } else { + /* List is empty so set pid to 0 */ + unlock_obj->lockpid = 0; + } + } + fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */ +} + +static int kgsl_ts_notifier_cb(struct notifier_block *blk, + unsigned long code, void *_param) +{ + struct drm_kgsl_gem_object_fence *fence; + struct kgsl_device *device = kgsl_get_device(code); + int i; + + /* loop through the fences to see what things can be processed */ + + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + fence = &gem_buf_fence[i]; + if (!fence->ts_valid || fence->ts_device != code) + continue; + + if (kgsl_check_timestamp(device, fence->timestamp)) + wakeup_fence_entries(fence); + } + + return 0; +} + +int +kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + /* The purpose of this function is to lock a given set of handles. */ + /* The driver will maintain a list of locked handles. */ + /* If a request comes in for a handle that's locked the thread will */ + /* block until it's no longer in use. */ + + struct drm_kgsl_gem_lock_handles *args = data; + struct drm_gem_object *obj; + struct drm_kgsl_gem_object *priv; + struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL; + struct drm_kgsl_gem_object_fence *fence; + struct drm_kgsl_gem_object_wait_list_entry *lock_item; + int i, j; + int result = 0; + uint32_t *lock_list; + uint32_t *work_list = NULL; + int32_t fence_index; + + /* copy in the data from user space */ + lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL); + if (!lock_list) { + DRM_ERROR("Unable allocate memory for lock list\n"); + result = -ENOMEM; + goto error; + } + + if (copy_from_user(lock_list, args->handle_list, + sizeof(uint32_t) * args->num_handles)) { + DRM_ERROR("Unable to copy the lock list from the user\n"); + result = -EFAULT; + goto free_handle_list; + } + + + work_list = lock_list; + mutex_lock(&dev->struct_mutex); + + /* build the fence for this group of handles */ + fence_index = find_empty_fence(); + if (fence_index == ENTRY_EMPTY) { + DRM_ERROR("Unable to find a empty fence\n"); + args->lock_id = 0xDEADBEEF; + result = -EFAULT; + goto out_unlock; + } + + fence = &gem_buf_fence[fence_index]; + gem_buf_fence[fence_index].num_buffers = args->num_handles; + args->lock_id = gem_buf_fence[fence_index].fence_id; + + for (j = args->num_handles; j > 0; j--, lock_list++) { + obj = drm_gem_object_lookup(dev, file_priv, *lock_list); + + if (obj == NULL) { + DRM_ERROR("Invalid GEM handle %x\n", *lock_list); + result = -EBADF; + goto out_unlock; + } + + priv = obj->driver_private; + this_fence_entry = NULL; + + /* get a fence entry to hook into the fence */ + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + if (!priv->fence_entries[i].in_use) { + this_fence_entry = &priv->fence_entries[i]; + this_fence_entry->in_use = 1; + break; + } + } + + if (this_fence_entry == NULL) { + fence->num_buffers = 0; + fence->fence_id = ENTRY_EMPTY; + args->lock_id = 0xDEADBEAD; + result = -EFAULT; + drm_gem_object_unreference(obj); + goto out_unlock; + } + + /* We're trying to lock - add to a fence */ + list_add((struct list_head *)this_fence_entry, + &gem_buf_fence[fence_index].buffers_in_fence); + if (priv->lockpid) { + + if (priv->lockpid == args->pid) { + /* now that things are running async this */ + /* happens when an op isn't done */ + /* so it's already locked by the calling pid */ + continue; + } + + + /* if a pid already had it locked */ + /* create and add to wait list */ + for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) { + if (priv->wait_entries[i].in_use == 0) { + /* this one is empty */ + lock_item = &priv->wait_entries[i]; + lock_item->in_use = 1; + lock_item->pid = args->pid; + INIT_LIST_HEAD((struct list_head *) + &priv->wait_entries[i]); + break; + } + } + + if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) { + + result = -EFAULT; + drm_gem_object_unreference(obj); + goto out_unlock; + } + + list_add_tail((struct list_head *)&lock_item->list, + &priv->wait_list); + mutex_unlock(&dev->struct_mutex); + /* here we need to block */ + wait_event_interruptible_timeout( + priv->wait_entries[i].process_wait_q, + (priv->lockpid == 0), + msecs_to_jiffies(64)); + mutex_lock(&dev->struct_mutex); + lock_item->in_use = 0; + } + + /* Getting here means no one currently holds the lock */ + priv->lockpid = args->pid; + + args->lock_id = gem_buf_fence[fence_index].fence_id; + } + fence->lockpid = args->pid; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + +free_handle_list: + kfree(work_list); + +error: + return result; +} + +int +kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_unlock_handles *args = data; + int result = 0; + int32_t fence_index; + + mutex_lock(&dev->struct_mutex); + fence_index = find_fence(args->lock_id); + if (fence_index == ENTRY_EMPTY) { + DRM_ERROR("Invalid lock ID: %x\n", args->lock_id); + result = -EFAULT; + goto out_unlock; + } + + cleanup_fence(&gem_buf_fence[fence_index], 1); + +out_unlock: + mutex_unlock(&dev->struct_mutex); + + return result; +} + + +int +kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_kgsl_gem_unlock_on_ts *args = data; + int result = 0; + int ts_done = 0; + int32_t fence_index, ts_device; + struct drm_kgsl_gem_object_fence *fence; + struct kgsl_device *device; + + if (args->type == DRM_KGSL_GEM_TS_3D) + ts_device = KGSL_DEVICE_3D0; + else if (args->type == DRM_KGSL_GEM_TS_2D) + ts_device = KGSL_DEVICE_2D0; + else { + result = -EINVAL; + goto error; + } + + device = kgsl_get_device(ts_device); + ts_done = kgsl_check_timestamp(device, args->timestamp); + + mutex_lock(&dev->struct_mutex); + + fence_index = find_fence(args->lock_id); + if (fence_index == ENTRY_EMPTY) { + DRM_ERROR("Invalid lock ID: %x\n", args->lock_id); + result = -EFAULT; + goto out_unlock; + } + + fence = &gem_buf_fence[fence_index]; + fence->ts_device = ts_device; + + if (!ts_done) + fence->ts_valid = 1; + else + cleanup_fence(fence, 1); + + +out_unlock: + mutex_unlock(&dev->struct_mutex); + +error: + return result; +} + +struct drm_ioctl_desc kgsl_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT, + kgsl_gem_set_bufcount_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE, + kgsl_gem_lock_handle_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE, + kgsl_gem_unlock_handle_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS, + kgsl_gem_unlock_on_ts_ioctl, 0), + DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl, + DRM_MASTER), +}; + +static struct drm_driver driver = { + .driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM, + .load = kgsl_drm_load, + .unload = kgsl_drm_unload, + .firstopen = kgsl_drm_firstopen, + .lastclose = kgsl_drm_lastclose, + .preclose = kgsl_drm_preclose, + .suspend = kgsl_drm_suspend, + .resume = kgsl_drm_resume, + .reclaim_buffers = drm_core_reclaim_buffers, + .gem_init_object = kgsl_gem_init_object, + .gem_free_object = kgsl_gem_free_object, + .ioctls = kgsl_drm_ioctls, + + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = msm_drm_gem_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +int kgsl_drm_init(struct platform_device *dev) +{ + int i; + + driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls); + driver.platform_device = dev; + + INIT_LIST_HEAD(&kgsl_mem_list); + + for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) { + gem_buf_fence[i].num_buffers = 0; + gem_buf_fence[i].ts_valid = 0; + gem_buf_fence[i].fence_id = ENTRY_EMPTY; + } + + return drm_init(&driver); +} + +void kgsl_drm_exit(void) +{ + drm_exit(&driver); +} diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c new file mode 100644 index 0000000000000..d41254561f491 --- /dev/null +++ b/drivers/gpu/msm/kgsl_gpummu.c @@ -0,0 +1,802 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_mmu.h" +#include "kgsl_device.h" +#include "kgsl_sharedmem.h" +#include "a2xx_reg.h" + +#define KGSL_PAGETABLE_SIZE \ + ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \ + KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE) + +static ssize_t +sysfs_show_ptpool_entries(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_ptpool *pool = (struct kgsl_ptpool *) + kgsl_driver.ptpool; + return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries); +} + +static ssize_t +sysfs_show_ptpool_min(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_ptpool *pool = (struct kgsl_ptpool *) + kgsl_driver.ptpool; + return snprintf(buf, PAGE_SIZE, "%d\n", + pool->static_entries); +} + +static ssize_t +sysfs_show_ptpool_chunks(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_ptpool *pool = (struct kgsl_ptpool *) + kgsl_driver.ptpool; + return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks); +} + +static ssize_t +sysfs_show_ptpool_ptsize(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_ptpool *pool = (struct kgsl_ptpool *) + kgsl_driver.ptpool; + return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize); +} + +static struct kobj_attribute attr_ptpool_entries = { + .attr = { .name = "ptpool_entries", .mode = 0444 }, + .show = sysfs_show_ptpool_entries, + .store = NULL, +}; + +static struct kobj_attribute attr_ptpool_min = { + .attr = { .name = "ptpool_min", .mode = 0444 }, + .show = sysfs_show_ptpool_min, + .store = NULL, +}; + +static struct kobj_attribute attr_ptpool_chunks = { + .attr = { .name = "ptpool_chunks", .mode = 0444 }, + .show = sysfs_show_ptpool_chunks, + .store = NULL, +}; + +static struct kobj_attribute attr_ptpool_ptsize = { + .attr = { .name = "ptpool_ptsize", .mode = 0444 }, + .show = sysfs_show_ptpool_ptsize, + .store = NULL, +}; + +static struct attribute *ptpool_attrs[] = { + &attr_ptpool_entries.attr, + &attr_ptpool_min.attr, + &attr_ptpool_chunks.attr, + &attr_ptpool_ptsize.attr, + NULL, +}; + +static struct attribute_group ptpool_attr_group = { + .attrs = ptpool_attrs, +}; + +static int +_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic) +{ + struct kgsl_ptpool_chunk *chunk; + size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE); + + BUG_ON(count == 0); + + if (get_order(size) >= MAX_ORDER) { + KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size); + return -EINVAL; + } + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (chunk == NULL) { + KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk)); + return -ENOMEM; + } + + chunk->size = size; + chunk->count = count; + chunk->dynamic = dynamic; + + chunk->data = dma_alloc_coherent(NULL, size, + &chunk->phys, GFP_KERNEL); + + if (chunk->data == NULL) { + KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size); + goto err; + } + + chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL); + + if (chunk->bitmap == NULL) { + KGSL_CORE_ERR("kzalloc(%d) failed\n", + BITS_TO_LONGS(count) * 4); + goto err_dma; + } + + list_add_tail(&chunk->list, &pool->list); + + pool->chunks++; + pool->entries += count; + + if (!dynamic) + pool->static_entries += count; + + return 0; + +err_dma: + dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys); +err: + kfree(chunk); + return -ENOMEM; +} + +static void * +_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr) +{ + struct kgsl_ptpool_chunk *chunk; + + list_for_each_entry(chunk, &pool->list, list) { + int bit = find_first_zero_bit(chunk->bitmap, chunk->count); + + if (bit >= chunk->count) + continue; + + set_bit(bit, chunk->bitmap); + *physaddr = chunk->phys + (bit * pool->ptsize); + + return chunk->data + (bit * pool->ptsize); + } + + return NULL; +} + +/** + * kgsl_ptpool_add + * @pool: A pointer to a ptpool structure + * @entries: Number of entries to add + * + * Add static entries to the pagetable pool. + */ + +static int +kgsl_ptpool_add(struct kgsl_ptpool *pool, int count) +{ + int ret = 0; + BUG_ON(count == 0); + + mutex_lock(&pool->lock); + + /* Only 4MB can be allocated in one chunk, so larger allocations + need to be split into multiple sections */ + + while (count) { + int entries = ((count * pool->ptsize) > SZ_4M) ? + SZ_4M / pool->ptsize : count; + + /* Add the entries as static, i.e. they don't ever stand + a chance of being removed */ + + ret = _kgsl_ptpool_add_entries(pool, entries, 0); + if (ret) + break; + + count -= entries; + } + + mutex_unlock(&pool->lock); + return ret; +} + +/** + * kgsl_ptpool_alloc + * @pool: A pointer to a ptpool structure + * @addr: A pointer to store the physical address of the chunk + * + * Allocate a pagetable from the pool. Returns the virtual address + * of the pagetable, the physical address is returned in physaddr + */ + +static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, + unsigned int *physaddr) +{ + void *addr = NULL; + int ret; + + mutex_lock(&pool->lock); + addr = _kgsl_ptpool_get_entry(pool, physaddr); + if (addr) + goto done; + + /* Add a chunk for 1 more pagetable and mark it as dynamic */ + ret = _kgsl_ptpool_add_entries(pool, 1, 1); + + if (ret) + goto done; + + addr = _kgsl_ptpool_get_entry(pool, physaddr); +done: + mutex_unlock(&pool->lock); + return addr; +} + +static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk) +{ + list_del(&chunk->list); + + if (chunk->data) + dma_free_coherent(NULL, chunk->size, chunk->data, + chunk->phys); + kfree(chunk->bitmap); + kfree(chunk); +} + +/** + * kgsl_ptpool_free + * @pool: A pointer to a ptpool structure + * @addr: A pointer to the virtual address to free + * + * Free a pagetable allocated from the pool + */ + +static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr) +{ + struct kgsl_ptpool_chunk *chunk, *tmp; + + if (pool == NULL || addr == NULL) + return; + + mutex_lock(&pool->lock); + list_for_each_entry_safe(chunk, tmp, &pool->list, list) { + if (addr >= chunk->data && + addr < chunk->data + chunk->size) { + int bit = ((unsigned long) (addr - chunk->data)) / + pool->ptsize; + + clear_bit(bit, chunk->bitmap); + memset(addr, 0, pool->ptsize); + + if (chunk->dynamic && + bitmap_empty(chunk->bitmap, chunk->count)) + _kgsl_ptpool_rm_chunk(chunk); + + break; + } + } + + mutex_unlock(&pool->lock); +} + +void kgsl_gpummu_ptpool_destroy(void *ptpool) +{ + struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool; + struct kgsl_ptpool_chunk *chunk, *tmp; + + if (pool == NULL) + return; + + mutex_lock(&pool->lock); + list_for_each_entry_safe(chunk, tmp, &pool->list, list) + _kgsl_ptpool_rm_chunk(chunk); + mutex_unlock(&pool->lock); + + kfree(pool); +} + +/** + * kgsl_ptpool_init + * @pool: A pointer to a ptpool structure to initialize + * @entries: The number of inital entries to add to the pool + * + * Initalize a pool and allocate an initial chunk of entries. + */ +void *kgsl_gpummu_ptpool_init(int entries) +{ + int ptsize = KGSL_PAGETABLE_SIZE; + struct kgsl_ptpool *pool; + int ret = 0; + + pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL); + if (!pool) { + KGSL_CORE_ERR("Failed to allocate memory " + "for ptpool\n"); + return NULL; + } + + pool->ptsize = ptsize; + mutex_init(&pool->lock); + INIT_LIST_HEAD(&pool->list); + + if (entries) { + ret = kgsl_ptpool_add(pool, entries); + if (ret) + goto err_ptpool_remove; + } + + ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group); + if (ret) { + KGSL_CORE_ERR("sysfs_create_group failed for ptpool " + "statistics: %d\n", ret); + goto err_ptpool_remove; + } + return (void *)pool; + +err_ptpool_remove: + kgsl_gpummu_ptpool_destroy(pool); + return NULL; +} + +int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt, + unsigned int pt_base) +{ + struct kgsl_gpummu_pt *gpummu_pt = pt->priv; + return pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base); +} + +void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt) +{ + struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *) + mmu_specific_pt; + kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool, + gpummu_pt->base.hostptr); + + kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE; + + kfree(gpummu_pt->tlbflushfilter.base); + + kfree(gpummu_pt); +} + +static inline uint32_t +kgsl_pt_entry_get(unsigned int va_base, uint32_t va) +{ + return (va - va_base) >> PAGE_SHIFT; +} + +static inline void +kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val) +{ + uint32_t *baseptr = (uint32_t *)pt->base.hostptr; + BUG_ON(pte*sizeof(uint32_t) >= pt->base.size); + baseptr[pte] = val; +} + +static inline uint32_t +kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte) +{ + uint32_t *baseptr = (uint32_t *)pt->base.hostptr; + BUG_ON(pte*sizeof(uint32_t) >= pt->base.size); + return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK; +} + +static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt, + enum kgsl_deviceid id) +{ + unsigned int result = 0; + struct kgsl_gpummu_pt *gpummu_pt; + + if (pt == NULL) + return 0; + gpummu_pt = pt->priv; + + spin_lock(&pt->lock); + if (gpummu_pt->tlb_flags & (1<tlb_flags &= ~(1<lock); + return result; +} + +static void kgsl_gpummu_pagefault(struct kgsl_device *device) +{ + unsigned int reg; + unsigned int ptbase; + + kgsl_regread(device, MH_MMU_PAGE_FAULT, ®); + kgsl_regread(device, MH_MMU_PT_BASE, &ptbase); + + if (KGSL_DEVICE_3D0 == device->id) { + unsigned int ib1; + unsigned int ib1_sz; + unsigned int ib2; + unsigned int ib2_sz; + unsigned int rptr; + kgsl_regread(device, REG_CP_IB1_BASE, &ib1); + kgsl_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz); + kgsl_regread(device, REG_CP_IB2_BASE, &ib2); + kgsl_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz); + kgsl_regread(device, REG_CP_RB_RPTR, &rptr); + + /* queue a work which will print the IB that caused the + * pagefault, if we are in recovery then no need to q + * work as the recovery routine will mess with the ringbuffer + * contents and then the information will become stale + * anyways */ + if (!device->page_fault_ptbase && + KGSL_STATE_DUMP_AND_RECOVER != device->state) { + device->page_fault_ptbase = ptbase; + device->page_fault_ib1 = ib1; + device->page_fault_rptr = rptr; + + queue_work(device->work_queue, + &device->print_fault_ib); + } + + KGSL_MEM_CRIT(device, + "mmu page fault: page=0x%lx pt=%d op=%s axi=%d " + "ptbase=0x%x IB1=0x%x IB1_SZ=0x%x " + "IB2=0x%x IB2_SZ=0x%x\n", + reg & ~(PAGE_SIZE - 1), + kgsl_mmu_get_ptname_from_ptbase(ptbase), + reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF, + ptbase, ib1, ib1_sz, ib2, ib2_sz); + } +} + +static void *kgsl_gpummu_create_pagetable(void) +{ + struct kgsl_gpummu_pt *gpummu_pt; + + gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt), + GFP_KERNEL); + if (!gpummu_pt) + return NULL; + + gpummu_pt->tlb_flags = 0; + gpummu_pt->last_superpte = 0; + + gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE / + (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1; + gpummu_pt->tlbflushfilter.base = (unsigned int *) + kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL); + if (!gpummu_pt->tlbflushfilter.base) { + KGSL_CORE_ERR("kzalloc(%d) failed\n", + gpummu_pt->tlbflushfilter.size); + goto err_free_gpummu; + } + GSL_TLBFLUSH_FILTER_RESET(); + + gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *) + kgsl_driver.ptpool, + &gpummu_pt->base.physaddr); + + if (gpummu_pt->base.hostptr == NULL) + goto err_flushfilter; + + /* ptpool allocations are from coherent memory, so update the + device statistics acordingly */ + + KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent, + kgsl_driver.stats.coherent_max); + + gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr; + gpummu_pt->base.size = KGSL_PAGETABLE_SIZE; + + return (void *)gpummu_pt; + +err_flushfilter: + kfree(gpummu_pt->tlbflushfilter.base); +err_free_gpummu: + kfree(gpummu_pt); + + return NULL; +} + +static void kgsl_gpummu_default_setstate(struct kgsl_device *device, + uint32_t flags) +{ + struct kgsl_gpummu_pt *gpummu_pt; + if (!kgsl_mmu_enabled()) + return; + + if (flags & KGSL_MMUFLAGS_PTUPDATE) { + kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + gpummu_pt = device->mmu.hwpagetable->priv; + kgsl_regwrite(device, MH_MMU_PT_BASE, + gpummu_pt->base.gpuaddr); + } + + if (flags & KGSL_MMUFLAGS_TLBFLUSH) { + /* Invalidate all and tc */ + kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003); + } +} + +static void kgsl_gpummu_setstate(struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + unsigned int context_id) +{ + struct kgsl_mmu *mmu = &device->mmu; + struct kgsl_gpummu_pt *gpummu_pt; + + if (mmu->flags & KGSL_FLAGS_STARTED) { + /* page table not current, then setup mmu to use new + * specified page table + */ + if (mmu->hwpagetable != pagetable) { + mmu->hwpagetable = pagetable; + spin_lock(&mmu->hwpagetable->lock); + gpummu_pt = mmu->hwpagetable->priv; + gpummu_pt->tlb_flags &= ~(1<id); + spin_unlock(&mmu->hwpagetable->lock); + + /* call device specific set page table */ + kgsl_setstate(mmu->device, context_id, + KGSL_MMUFLAGS_TLBFLUSH | + KGSL_MMUFLAGS_PTUPDATE); + } + } +} + +static int kgsl_gpummu_init(struct kgsl_device *device) +{ + /* + * intialize device mmu + * + * call this with the global lock held + */ + int status = 0; + struct kgsl_mmu *mmu = &device->mmu; + + mmu->device = device; + + /* sub-client MMU lookups require address translation */ + if ((mmu->config & ~0x1) > 0) { + /*make sure virtual address range is a multiple of 64Kb */ + if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) { + KGSL_CORE_ERR("Invalid pagetable size requested " + "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE); + return -EINVAL; + } + + /* allocate memory used for completing r/w operations that + * cannot be mapped by the MMU + */ + status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64); + if (!status) + kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0, + mmu->setstate_memory.size); + } + + dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n", + __func__); + return status; +} + +static int kgsl_gpummu_start(struct kgsl_device *device) +{ + /* + * intialize device mmu + * + * call this with the global lock held + */ + + struct kgsl_mmu *mmu = &device->mmu; + struct kgsl_gpummu_pt *gpummu_pt; + + if (mmu->flags & KGSL_FLAGS_STARTED) + return 0; + + /* MMU not enabled */ + if ((mmu->config & 0x1) == 0) + return 0; + + /* setup MMU and sub-client behavior */ + kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config); + + /* idle device */ + kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + + /* enable axi interrupts */ + kgsl_regwrite(device, MH_INTERRUPT_MASK, + GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT); + + kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0, + mmu->setstate_memory.size); + + /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory + * to complete transactions in case of an MMU fault. Note that + * we'll leave the bottom 32 bytes of the setstate_memory for other + * purposes (e.g. use it when dummy read cycles are needed + * for other blocks) */ + kgsl_regwrite(device, MH_MMU_TRAN_ERROR, + mmu->setstate_memory.physaddr + 32); + + if (mmu->defaultpagetable == NULL) + mmu->defaultpagetable = + kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT); + + /* Return error if the default pagetable doesn't exist */ + if (mmu->defaultpagetable == NULL) + return -ENOMEM; + + mmu->hwpagetable = mmu->defaultpagetable; + gpummu_pt = mmu->hwpagetable->priv; + kgsl_regwrite(device, MH_MMU_PT_BASE, + gpummu_pt->base.gpuaddr); + kgsl_regwrite(device, MH_MMU_VA_RANGE, + (KGSL_PAGETABLE_BASE | + (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16))); + kgsl_setstate(device, 0, KGSL_MMUFLAGS_TLBFLUSH); + mmu->flags |= KGSL_FLAGS_STARTED; + + return 0; +} + +static int +kgsl_gpummu_unmap(void *mmu_specific_pt, + struct kgsl_memdesc *memdesc) +{ + unsigned int numpages; + unsigned int pte, ptefirst, ptelast, superpte; + unsigned int range = memdesc->size; + struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt; + + /* All GPU addresses as assigned are page aligned, but some + functions purturb the gpuaddr with an offset, so apply the + mask here to make sure we have the right address */ + + unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK; + + numpages = (range >> PAGE_SHIFT); + if (range & (PAGE_SIZE - 1)) + numpages++; + + ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr); + ptelast = ptefirst + numpages; + + superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1)); + GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE); + for (pte = ptefirst; pte < ptelast; pte++) { +#ifdef VERBOSE_DEBUG + /* check if PTE exists */ + if (!kgsl_pt_map_get(gpummu_pt, pte)) + KGSL_CORE_ERR("pt entry %x is already " + "unmapped for pagetable %p\n", pte, gpummu_pt); +#endif + kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY); + superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1)); + if (pte == superpte) + GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / + GSL_PT_SUPER_PTE); + } + + /* Post all writes to the pagetable */ + wmb(); + + return 0; +} + +#define SUPERPTE_IS_DIRTY(_p) \ +(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \ +GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE)) + +static int +kgsl_gpummu_map(void *mmu_specific_pt, + struct kgsl_memdesc *memdesc, + unsigned int protflags) +{ + unsigned int pte; + struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt; + struct scatterlist *s; + int flushtlb = 0; + int i; + + pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr); + + /* Flush the TLB if the first PTE isn't at the superpte boundary */ + if (pte & (GSL_PT_SUPER_PTE - 1)) + flushtlb = 1; + + for_each_sg(memdesc->sg, s, memdesc->sglen, i) { + unsigned int paddr = kgsl_get_sg_pa(s); + unsigned int j; + + /* Each sg entry might be multiple pages long */ + for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) { + if (SUPERPTE_IS_DIRTY(pte)) + flushtlb = 1; + kgsl_pt_map_set(gpummu_pt, pte, j | protflags); + } + } + + /* Flush the TLB if the last PTE isn't at the superpte boundary */ + if ((pte + 1) & (GSL_PT_SUPER_PTE - 1)) + flushtlb = 1; + + wmb(); + + if (flushtlb) { + /*set all devices as needing flushing*/ + gpummu_pt->tlb_flags = UINT_MAX; + GSL_TLBFLUSH_FILTER_RESET(); + } + + return 0; +} + +static int kgsl_gpummu_stop(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + + kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000); + mmu->flags &= ~KGSL_FLAGS_STARTED; + + return 0; +} + +static int kgsl_gpummu_close(struct kgsl_device *device) +{ + /* + * close device mmu + * + * call this with the global lock held + */ + struct kgsl_mmu *mmu = &device->mmu; + + if (mmu->setstate_memory.gpuaddr) + kgsl_sharedmem_free(&mmu->setstate_memory); + + if (mmu->defaultpagetable) + kgsl_mmu_putpagetable(mmu->defaultpagetable); + + return 0; +} + +static unsigned int +kgsl_gpummu_get_current_ptbase(struct kgsl_device *device) +{ + unsigned int ptbase; + kgsl_regread(device, MH_MMU_PT_BASE, &ptbase); + return ptbase; +} + +struct kgsl_mmu_ops gpummu_ops = { + .mmu_init = kgsl_gpummu_init, + .mmu_close = kgsl_gpummu_close, + .mmu_start = kgsl_gpummu_start, + .mmu_stop = kgsl_gpummu_stop, + .mmu_setstate = kgsl_gpummu_setstate, + .mmu_device_setstate = kgsl_gpummu_default_setstate, + .mmu_pagefault = kgsl_gpummu_pagefault, + .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase, +}; + +struct kgsl_mmu_pt_ops gpummu_pt_ops = { + .mmu_map = kgsl_gpummu_map, + .mmu_unmap = kgsl_gpummu_unmap, + .mmu_create_pagetable = kgsl_gpummu_create_pagetable, + .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable, + .mmu_pt_equal = kgsl_gpummu_pt_equal, + .mmu_pt_get_flags = kgsl_gpummu_pt_get_flags, +}; diff --git a/drivers/gpu/msm/kgsl_gpummu.h b/drivers/gpu/msm/kgsl_gpummu.h new file mode 100644 index 0000000000000..fc46f3c9d9caf --- /dev/null +++ b/drivers/gpu/msm/kgsl_gpummu.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __KGSL_GPUMMU_H +#define __KGSL_GPUMMU_H + +#define GSL_PT_PAGE_BITS_MASK 0x00000007 +#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK + +#define GSL_MMU_INT_MASK \ + (MH_INTERRUPT_MASK__AXI_READ_ERROR | \ + MH_INTERRUPT_MASK__AXI_WRITE_ERROR) + +/* Macros to manage TLB flushing */ +#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8) +#define GSL_TLBFLUSH_FILTER_GET(superpte) \ + (*((unsigned char *) \ + (((unsigned int)gpummu_pt->tlbflushfilter.base) \ + + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))) +#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \ + (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \ + (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)) +#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \ + (GSL_TLBFLUSH_FILTER_GET((superpte)) & \ + (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))) +#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\ + 0, gpummu_pt->tlbflushfilter.size) + +extern struct kgsl_mmu_ops gpummu_ops; +extern struct kgsl_mmu_pt_ops gpummu_pt_ops; + +struct kgsl_tlbflushfilter { + unsigned int *base; + unsigned int size; +}; + +struct kgsl_gpummu_pt { + struct kgsl_memdesc base; + unsigned int last_superpte; + unsigned int tlb_flags; + /* Maintain filter to manage tlb flushing */ + struct kgsl_tlbflushfilter tlbflushfilter; +}; + +struct kgsl_ptpool_chunk { + size_t size; + unsigned int count; + int dynamic; + + void *data; + unsigned int phys; + + unsigned long *bitmap; + struct list_head list; +}; + +struct kgsl_ptpool { + size_t ptsize; + struct mutex lock; + struct list_head list; + int entries; + int static_entries; + int chunks; +}; + +void *kgsl_gpummu_ptpool_init(int entries); +void kgsl_gpummu_ptpool_destroy(void *ptpool); + +static inline unsigned int kgsl_pt_get_base_addr(struct kgsl_pagetable *pt) +{ + struct kgsl_gpummu_pt *gpummu_pt = pt->priv; + return gpummu_pt->base.gpuaddr; +} +#endif /* __KGSL_GPUMMU_H */ diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c new file mode 100644 index 0000000000000..c471569ee88f3 --- /dev/null +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -0,0 +1,334 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_device.h" +#include "kgsl_mmu.h" +#include "kgsl_sharedmem.h" + +struct kgsl_iommu { + struct device *iommu_user_dev; + int iommu_user_dev_attached; + struct device *iommu_priv_dev; + int iommu_priv_dev_attached; +}; + +static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt, + unsigned int pt_base) +{ + struct iommu_domain *domain = pt->priv; + return pt && pt_base && ((unsigned int)domain == pt_base); +} + +static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt) +{ + struct iommu_domain *domain = mmu_specific_pt; + if (domain) + iommu_domain_free(domain); +} + +void *kgsl_iommu_create_pagetable(void) +{ + struct iommu_domain *domain = iommu_domain_alloc(0); + if (!domain) + KGSL_CORE_ERR("Failed to create iommu domain\n"); + + return domain; +} + +static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) +{ + struct iommu_domain *domain; + struct kgsl_iommu *iommu = mmu->priv; + + BUG_ON(mmu->hwpagetable == NULL); + BUG_ON(mmu->hwpagetable->priv == NULL); + + domain = mmu->hwpagetable->priv; + + if (iommu->iommu_user_dev_attached) { + iommu_detach_device(domain, iommu->iommu_user_dev); + iommu->iommu_user_dev_attached = 0; + KGSL_MEM_INFO(mmu->device, + "iommu %p detached from user dev of MMU: %p\n", + domain, mmu); + } + if (iommu->iommu_priv_dev_attached) { + iommu_detach_device(domain, iommu->iommu_priv_dev); + iommu->iommu_priv_dev_attached = 0; + KGSL_MEM_INFO(mmu->device, + "iommu %p detached from priv dev of MMU: %p\n", + domain, mmu); + } +} + +static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu) +{ + struct iommu_domain *domain; + int ret = 0; + struct kgsl_iommu *iommu = mmu->priv; + + BUG_ON(mmu->hwpagetable == NULL); + BUG_ON(mmu->hwpagetable->priv == NULL); + + domain = mmu->hwpagetable->priv; + + if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) { + ret = iommu_attach_device(domain, iommu->iommu_user_dev); + if (ret) { + KGSL_MEM_ERR(mmu->device, + "Failed to attach device, err %d\n", ret); + goto done; + } + iommu->iommu_user_dev_attached = 1; + KGSL_MEM_INFO(mmu->device, + "iommu %p attached to user dev of MMU: %p\n", + domain, mmu); + } + if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) { + ret = iommu_attach_device(domain, iommu->iommu_priv_dev); + if (ret) { + KGSL_MEM_ERR(mmu->device, + "Failed to attach device, err %d\n", ret); + iommu_detach_device(domain, iommu->iommu_user_dev); + iommu->iommu_user_dev_attached = 0; + goto done; + } + iommu->iommu_priv_dev_attached = 1; + KGSL_MEM_INFO(mmu->device, + "iommu %p attached to priv dev of MMU: %p\n", + domain, mmu); + } +done: + return ret; +} + +static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu, + struct kgsl_device *device) +{ + int status = 0; + struct platform_device *pdev = + container_of(device->parentdev, struct platform_device, dev); + struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data; + if (pdata_dev->iommu_user_ctx_name) + iommu->iommu_user_dev = msm_iommu_get_ctx( + pdata_dev->iommu_user_ctx_name); + if (pdata_dev->iommu_priv_ctx_name) + iommu->iommu_priv_dev = msm_iommu_get_ctx( + pdata_dev->iommu_priv_ctx_name); + if (!iommu->iommu_user_dev) { + KGSL_CORE_ERR("Failed to get user iommu dev handle for " + "device %s\n", + pdata_dev->iommu_user_ctx_name); + status = -EINVAL; + } + return status; +} + +static void kgsl_iommu_setstate(struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + unsigned int context_id) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (mmu->flags & KGSL_FLAGS_STARTED) { + /* page table not current, then setup mmu to use new + * specified page table + */ + if (mmu->hwpagetable != pagetable) { + kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_detach_pagetable_iommu_domain(mmu); + mmu->hwpagetable = pagetable; + if (mmu->hwpagetable) + kgsl_attach_pagetable_iommu_domain(mmu); + } + } +} + +static int kgsl_iommu_init(struct kgsl_device *device) +{ + /* + * intialize device mmu + * + * call this with the global lock held + */ + int status = 0; + struct kgsl_mmu *mmu = &device->mmu; + struct kgsl_iommu *iommu; + + mmu->device = device; + + iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL); + if (!iommu) { + KGSL_CORE_ERR("kzalloc(%d) failed\n", + sizeof(struct kgsl_iommu)); + return -ENOMEM; + } + + iommu->iommu_priv_dev_attached = 0; + iommu->iommu_user_dev_attached = 0; + status = kgsl_get_iommu_ctxt(iommu, device); + if (status) { + kfree(iommu); + iommu = NULL; + } + mmu->priv = iommu; + + dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n", + __func__); + return status; +} + +static int kgsl_iommu_start(struct kgsl_device *device) +{ + int status; + struct kgsl_mmu *mmu = &device->mmu; + + if (mmu->flags & KGSL_FLAGS_STARTED) + return 0; + + kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000); + if (mmu->defaultpagetable == NULL) + mmu->defaultpagetable = + kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT); + /* Return error if the default pagetable doesn't exist */ + if (mmu->defaultpagetable == NULL) + return -ENOMEM; + mmu->hwpagetable = mmu->defaultpagetable; + + status = kgsl_attach_pagetable_iommu_domain(mmu); + if (!status) + mmu->flags |= KGSL_FLAGS_STARTED; + + return status; +} + +static int +kgsl_iommu_unmap(void *mmu_specific_pt, + struct kgsl_memdesc *memdesc) +{ + int ret; + unsigned int range = memdesc->size; + struct iommu_domain *domain = (struct iommu_domain *) + mmu_specific_pt; + + /* All GPU addresses as assigned are page aligned, but some + functions purturb the gpuaddr with an offset, so apply the + mask here to make sure we have the right address */ + + unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK; + + if (range == 0 || gpuaddr == 0) + return 0; + + ret = iommu_unmap_range(domain, gpuaddr, range); + if (ret) + KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed " + "with err: %d\n", domain, gpuaddr, + range, ret); + + return 0; +} + +static int +kgsl_iommu_map(void *mmu_specific_pt, + struct kgsl_memdesc *memdesc, + unsigned int protflags) +{ + int ret; + unsigned int iommu_virt_addr; + struct iommu_domain *domain = mmu_specific_pt; + + BUG_ON(NULL == domain); + + + iommu_virt_addr = memdesc->gpuaddr; + + ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg, + memdesc->size, (IOMMU_READ | IOMMU_WRITE)); + if (ret) { + KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) " + "failed with err: %d\n", domain, + iommu_virt_addr, memdesc->sg, memdesc->size, + 0, ret); + return ret; + } + + return ret; +} + +static int kgsl_iommu_stop(struct kgsl_device *device) +{ + /* + * stop device mmu + * + * call this with the global lock held + */ + struct kgsl_mmu *mmu = &device->mmu; + + if (mmu->flags & KGSL_FLAGS_STARTED) { + /* detach iommu attachment */ + kgsl_detach_pagetable_iommu_domain(mmu); + + mmu->flags &= ~KGSL_FLAGS_STARTED; + } + + return 0; +} + +static int kgsl_iommu_close(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + if (mmu->defaultpagetable) + kgsl_mmu_putpagetable(mmu->defaultpagetable); + + return 0; +} + +static unsigned int +kgsl_iommu_get_current_ptbase(struct kgsl_device *device) +{ + /* Current base is always the hwpagetables domain as we + * do not use per process pagetables right not for iommu. + * This will change when we switch to per process pagetables. + */ + return (unsigned int)device->mmu.hwpagetable->priv; +} + +struct kgsl_mmu_ops iommu_ops = { + .mmu_init = kgsl_iommu_init, + .mmu_close = kgsl_iommu_close, + .mmu_start = kgsl_iommu_start, + .mmu_stop = kgsl_iommu_stop, + .mmu_setstate = kgsl_iommu_setstate, + .mmu_device_setstate = NULL, + .mmu_pagefault = NULL, + .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase, +}; + +struct kgsl_mmu_pt_ops iommu_pt_ops = { + .mmu_map = kgsl_iommu_map, + .mmu_unmap = kgsl_iommu_unmap, + .mmu_create_pagetable = kgsl_iommu_create_pagetable, + .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable, + .mmu_pt_equal = kgsl_iommu_pt_equal, + .mmu_pt_get_flags = NULL, +}; diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h new file mode 100644 index 0000000000000..0cbdaff93b057 --- /dev/null +++ b/drivers/gpu/msm/kgsl_log.h @@ -0,0 +1,102 @@ +/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_LOG_H +#define __KGSL_LOG_H + +extern unsigned int kgsl_cff_dump_enable; + +#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \ + do { \ + if ((lvl) >= 6) \ + dev_info(dev, "|%s| " fmt, \ + __func__, ##args);\ + } while (0) + +#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \ + do { \ + if ((lvl) >= 4) \ + dev_warn(dev, "|%s| " fmt, \ + __func__, ##args);\ + } while (0) + +#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \ + do { \ + if ((lvl) >= 3) \ + dev_err(dev, "|%s| " fmt, \ + __func__, ##args);\ + } while (0) + +#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \ + do { \ + if ((lvl) >= 2) \ + dev_crit(dev, "|%s| " fmt, \ + __func__, ##args);\ + } while (0) + +#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \ + do { dev_crit(_dev->dev, fmt, ##args); } while (0) + +#define KGSL_LOG_DUMP(_dev, fmt, args...) dev_err(_dev->dev, fmt, ##args) + +#define KGSL_DRV_INFO(_dev, fmt, args...) \ +KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args) +#define KGSL_DRV_WARN(_dev, fmt, args...) \ +KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args) +#define KGSL_DRV_ERR(_dev, fmt, args...) \ +KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args) +#define KGSL_DRV_CRIT(_dev, fmt, args...) \ +KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args) + +#define KGSL_CMD_INFO(_dev, fmt, args...) \ +KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args) +#define KGSL_CMD_WARN(_dev, fmt, args...) \ +KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args) +#define KGSL_CMD_ERR(_dev, fmt, args...) \ +KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args) +#define KGSL_CMD_CRIT(_dev, fmt, args...) \ +KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args) + +#define KGSL_CTXT_INFO(_dev, fmt, args...) \ +KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args) +#define KGSL_CTXT_WARN(_dev, fmt, args...) \ +KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args) +#define KGSL_CTXT_ERR(_dev, fmt, args...) \ +KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args) +#define KGSL_CTXT_CRIT(_dev, fmt, args...) \ +KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args) + +#define KGSL_MEM_INFO(_dev, fmt, args...) \ +KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args) +#define KGSL_MEM_WARN(_dev, fmt, args...) \ +KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args) +#define KGSL_MEM_ERR(_dev, fmt, args...) \ +KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args) +#define KGSL_MEM_CRIT(_dev, fmt, args...) \ +KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args) + +#define KGSL_PWR_INFO(_dev, fmt, args...) \ +KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args) +#define KGSL_PWR_WARN(_dev, fmt, args...) \ +KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args) +#define KGSL_PWR_ERR(_dev, fmt, args...) \ +KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args) +#define KGSL_PWR_CRIT(_dev, fmt, args...) \ +KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args) + +/* Core error messages - these are for core KGSL functions that have + no device associated with them (such as memory) */ + +#define KGSL_CORE_ERR(fmt, args...) \ +pr_err("kgsl: %s: " fmt, __func__, ##args) + +#endif /* __KGSL_LOG_H */ diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c new file mode 100644 index 0000000000000..cc5a9abf9dd5f --- /dev/null +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -0,0 +1,745 @@ +/* Copyright (c) 2002,2007-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_mmu.h" +#include "kgsl_device.h" +#include "kgsl_sharedmem.h" +#include "adreno_postmortem.h" + +#define KGSL_MMU_ALIGN_SHIFT 13 +#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1)) + +static enum kgsl_mmutype kgsl_mmu_type; + +static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable); + +static int kgsl_cleanup_pt(struct kgsl_pagetable *pt) +{ + int i; + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + struct kgsl_device *device = kgsl_driver.devp[i]; + if (device) + device->ftbl->cleanup_pt(device, pt); + } + return 0; +} + +static void kgsl_destroy_pagetable(struct kref *kref) +{ + struct kgsl_pagetable *pagetable = container_of(kref, + struct kgsl_pagetable, refcount); + unsigned long flags; + + spin_lock_irqsave(&kgsl_driver.ptlock, flags); + list_del(&pagetable->list); + spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); + + pagetable_remove_sysfs_objects(pagetable); + + kgsl_cleanup_pt(pagetable); + + if (pagetable->pool) + gen_pool_destroy(pagetable->pool); + + pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv); + + kfree(pagetable); +} + +static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable) +{ + if (pagetable) + kref_put(&pagetable->refcount, kgsl_destroy_pagetable); +} + +static struct kgsl_pagetable * +kgsl_get_pagetable(unsigned long name) +{ + struct kgsl_pagetable *pt, *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&kgsl_driver.ptlock, flags); + list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) { + if (pt->name == name) { + ret = pt; + kref_get(&ret->refcount); + break; + } + } + + spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); + return ret; +} + +static struct kgsl_pagetable * +_get_pt_from_kobj(struct kobject *kobj) +{ + unsigned long ptname; + + if (!kobj) + return NULL; + + if (sscanf(kobj->name, "%ld", &ptname) != 1) + return NULL; + + return kgsl_get_pagetable(ptname); +} + +static ssize_t +sysfs_show_entries(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_pagetable *pt; + int ret = 0; + + pt = _get_pt_from_kobj(kobj); + + if (pt) + ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries); + + kgsl_put_pagetable(pt); + return ret; +} + +static ssize_t +sysfs_show_mapped(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_pagetable *pt; + int ret = 0; + + pt = _get_pt_from_kobj(kobj); + + if (pt) + ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped); + + kgsl_put_pagetable(pt); + return ret; +} + +static ssize_t +sysfs_show_va_range(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_pagetable *pt; + int ret = 0; + + pt = _get_pt_from_kobj(kobj); + + if (pt) { + ret += snprintf(buf, PAGE_SIZE, "0x%x\n", + kgsl_mmu_get_ptsize()); + } + + kgsl_put_pagetable(pt); + return ret; +} + +static ssize_t +sysfs_show_max_mapped(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_pagetable *pt; + int ret = 0; + + pt = _get_pt_from_kobj(kobj); + + if (pt) + ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped); + + kgsl_put_pagetable(pt); + return ret; +} + +static ssize_t +sysfs_show_max_entries(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + struct kgsl_pagetable *pt; + int ret = 0; + + pt = _get_pt_from_kobj(kobj); + + if (pt) + ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries); + + kgsl_put_pagetable(pt); + return ret; +} + +static struct kobj_attribute attr_entries = { + .attr = { .name = "entries", .mode = 0444 }, + .show = sysfs_show_entries, + .store = NULL, +}; + +static struct kobj_attribute attr_mapped = { + .attr = { .name = "mapped", .mode = 0444 }, + .show = sysfs_show_mapped, + .store = NULL, +}; + +static struct kobj_attribute attr_va_range = { + .attr = { .name = "va_range", .mode = 0444 }, + .show = sysfs_show_va_range, + .store = NULL, +}; + +static struct kobj_attribute attr_max_mapped = { + .attr = { .name = "max_mapped", .mode = 0444 }, + .show = sysfs_show_max_mapped, + .store = NULL, +}; + +static struct kobj_attribute attr_max_entries = { + .attr = { .name = "max_entries", .mode = 0444 }, + .show = sysfs_show_max_entries, + .store = NULL, +}; + +static struct attribute *pagetable_attrs[] = { + &attr_entries.attr, + &attr_mapped.attr, + &attr_va_range.attr, + &attr_max_mapped.attr, + &attr_max_entries.attr, + NULL, +}; + +static struct attribute_group pagetable_attr_group = { + .attrs = pagetable_attrs, +}; + +static void +pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable) +{ + if (pagetable->kobj) + sysfs_remove_group(pagetable->kobj, + &pagetable_attr_group); + + kobject_put(pagetable->kobj); +} + +static int +pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable) +{ + char ptname[16]; + int ret = -ENOMEM; + + snprintf(ptname, sizeof(ptname), "%d", pagetable->name); + pagetable->kobj = kobject_create_and_add(ptname, + kgsl_driver.ptkobj); + if (pagetable->kobj == NULL) + goto err; + + ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group); + +err: + if (ret) { + if (pagetable->kobj) + kobject_put(pagetable->kobj); + + pagetable->kobj = NULL; + } + + return ret; +} + +unsigned int kgsl_mmu_get_ptsize(void) +{ + /* + * For IOMMU, we could do up to 4G virtual range if we wanted to, but + * it makes more sense to return a smaller range and leave the rest of + * the virtual range for future improvements + */ + + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE; + else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) + return SZ_2G; + else + return 0; +} + +unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return 0; + else + return mmu->mmu_ops->mmu_get_current_ptbase(device); +} +EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase); + +int +kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base) +{ + struct kgsl_pagetable *pt; + int ptid = -1; + + spin_lock(&kgsl_driver.ptlock); + list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) { + if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) { + ptid = (int) pt->name; + break; + } + } + spin_unlock(&kgsl_driver.ptlock); + + return ptid; +} +EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase); + +void kgsl_mmu_setstate(struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + unsigned int context_id) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return; + else + mmu->mmu_ops->mmu_setstate(device, + pagetable, context_id); +} +EXPORT_SYMBOL(kgsl_mmu_setstate); + +int kgsl_mmu_init(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + + mmu->device = device; + + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) { + dev_info(device->dev, "|%s| MMU type set for device is " + "NOMMU\n", __func__); + return 0; + } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + mmu->mmu_ops = &gpummu_ops; + else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) + mmu->mmu_ops = &iommu_ops; + + return mmu->mmu_ops->mmu_init(device); +} +EXPORT_SYMBOL(kgsl_mmu_init); + +int kgsl_mmu_start(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { + kgsl_regwrite(device, MH_MMU_CONFIG, 0); + return 0; + } else { + return mmu->mmu_ops->mmu_start(device); + } +} +EXPORT_SYMBOL(kgsl_mmu_start); + +void kgsl_mh_intrcallback(struct kgsl_device *device) +{ + unsigned int status = 0; + unsigned int reg; + + kgsl_regread(device, MH_INTERRUPT_STATUS, &status); + kgsl_regread(device, MH_AXI_ERROR, ®); + + if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR) + KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg); + if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR) + KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg); + if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) + device->mmu.mmu_ops->mmu_pagefault(device); + + status &= KGSL_MMU_INT_MASK; + kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status); +} +EXPORT_SYMBOL(kgsl_mh_intrcallback); + +static int kgsl_setup_pt(struct kgsl_pagetable *pt) +{ + int i = 0; + int status = 0; + + for (i = 0; i < KGSL_DEVICE_MAX; i++) { + struct kgsl_device *device = kgsl_driver.devp[i]; + if (device) { + status = device->ftbl->setup_pt(device, pt); + if (status) + goto error_pt; + } + } + return status; +error_pt: + while (i >= 0) { + struct kgsl_device *device = kgsl_driver.devp[i]; + if (device) + device->ftbl->cleanup_pt(device, pt); + i--; + } + return status; +} + +static struct kgsl_pagetable *kgsl_mmu_createpagetableobject( + unsigned int name) +{ + int status = 0; + struct kgsl_pagetable *pagetable = NULL; + unsigned long flags; + unsigned int ptsize; + + pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL); + if (pagetable == NULL) { + KGSL_CORE_ERR("kzalloc(%d) failed\n", + sizeof(struct kgsl_pagetable)); + return NULL; + } + + kref_init(&pagetable->refcount); + + spin_lock_init(&pagetable->lock); + + ptsize = kgsl_mmu_get_ptsize(); + + pagetable->name = name; + pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize); + + pagetable->pool = gen_pool_create(PAGE_SHIFT, -1); + if (pagetable->pool == NULL) { + KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT); + goto err_alloc; + } + + if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE, + ptsize, -1)) { + KGSL_CORE_ERR("gen_pool_add failed\n"); + goto err_pool; + } + + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + pagetable->pt_ops = &gpummu_pt_ops; + else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) + pagetable->pt_ops = &iommu_pt_ops; + + pagetable->priv = pagetable->pt_ops->mmu_create_pagetable(); + if (!pagetable->priv) + goto err_pool; + + status = kgsl_setup_pt(pagetable); + if (status) + goto err_mmu_create; + + spin_lock_irqsave(&kgsl_driver.ptlock, flags); + list_add(&pagetable->list, &kgsl_driver.pagetable_list); + spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); + + /* Create the sysfs entries */ + pagetable_add_sysfs_objects(pagetable); + + return pagetable; + +err_mmu_create: + pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv); +err_pool: + gen_pool_destroy(pagetable->pool); +err_alloc: + kfree(pagetable); + + return NULL; +} + +struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name) +{ + struct kgsl_pagetable *pt; + + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return (void *)(-1); + +#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE + if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) + name = KGSL_MMU_GLOBAL_PT; +#else + name = KGSL_MMU_GLOBAL_PT; +#endif + pt = kgsl_get_pagetable(name); + + if (pt == NULL) + pt = kgsl_mmu_createpagetableobject(name); + + return pt; +} + +void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) +{ + kgsl_put_pagetable(pagetable); +} +EXPORT_SYMBOL(kgsl_mmu_putpagetable); + +void kgsl_setstate(struct kgsl_device *device, unsigned int context_id, + uint32_t flags) +{ + struct kgsl_mmu *mmu = &device->mmu; + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return; + else if (device->ftbl->setstate) + device->ftbl->setstate(device, context_id, flags); + else if (mmu->mmu_ops->mmu_device_setstate) + mmu->mmu_ops->mmu_device_setstate(device, flags); +} +EXPORT_SYMBOL(kgsl_setstate); + +void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags) +{ + struct kgsl_mmu *mmu = &device->mmu; + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return; + else if (mmu->mmu_ops->mmu_device_setstate) + mmu->mmu_ops->mmu_device_setstate(device, flags); +} +EXPORT_SYMBOL(kgsl_mmu_device_setstate); + +void kgsl_mh_start(struct kgsl_device *device) +{ + struct kgsl_mh *mh = &device->mh; + /* force mmu off to for now*/ + kgsl_regwrite(device, MH_MMU_CONFIG, 0); + kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + + /* define physical memory range accessible by the core */ + kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base); + kgsl_regwrite(device, MH_MMU_MPU_END, + mh->mpu_base + mh->mpu_range); + kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb); + + if (mh->mh_intf_cfg1 != 0) + kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1, + mh->mh_intf_cfg1); + + if (mh->mh_intf_cfg2 != 0) + kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2, + mh->mh_intf_cfg2); + + /* + * Interrupts are enabled on a per-device level when + * kgsl_pwrctrl_irq() is called + */ +} + +int +kgsl_mmu_map(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc, + unsigned int protflags) +{ + int ret; + + if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { + memdesc->gpuaddr = memdesc->physaddr; + return 0; + } + memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool, + memdesc->size, KGSL_MMU_ALIGN_SHIFT); + + if (memdesc->gpuaddr == 0) { + KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size); + KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n", + pagetable->name, pagetable->stats.mapped, + pagetable->stats.entries); + return -ENOMEM; + } + + spin_lock(&pagetable->lock); + ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags); + + if (ret) + goto err_free_gpuaddr; + + /* Keep track of the statistics for the sysfs files */ + + KGSL_STATS_ADD(1, pagetable->stats.entries, + pagetable->stats.max_entries); + + KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped, + pagetable->stats.max_mapped); + + spin_unlock(&pagetable->lock); + + return 0; + +err_free_gpuaddr: + spin_unlock(&pagetable->lock); + gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size); + memdesc->gpuaddr = 0; + return ret; +} +EXPORT_SYMBOL(kgsl_mmu_map); + +int +kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc) +{ + if (memdesc->size == 0 || memdesc->gpuaddr == 0) + return 0; + + if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { + memdesc->gpuaddr = 0; + return 0; + } + spin_lock(&pagetable->lock); + pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc); + /* Remove the statistics */ + pagetable->stats.entries--; + pagetable->stats.mapped -= memdesc->size; + + spin_unlock(&pagetable->lock); + + gen_pool_free(pagetable->pool, + memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK, + memdesc->size); + + return 0; +} +EXPORT_SYMBOL(kgsl_mmu_unmap); + +int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc, unsigned int protflags) +{ + int result = -EINVAL; + unsigned int gpuaddr = 0; + + if (memdesc == NULL) { + KGSL_CORE_ERR("invalid memdesc\n"); + goto error; + } + /* Not all global mappings are needed for all MMU types */ + if (!memdesc->size) + return 0; + + gpuaddr = memdesc->gpuaddr; + + result = kgsl_mmu_map(pagetable, memdesc, protflags); + if (result) + goto error; + + /*global mappings must have the same gpu address in all pagetables*/ + if (gpuaddr && gpuaddr != memdesc->gpuaddr) { + KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x" + "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr, + gpuaddr, memdesc->gpuaddr); + goto error_unmap; + } + return result; +error_unmap: + kgsl_mmu_unmap(pagetable, memdesc); +error: + return result; +} +EXPORT_SYMBOL(kgsl_mmu_map_global); + +int kgsl_mmu_stop(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) + return 0; + else + return mmu->mmu_ops->mmu_stop(device); +} +EXPORT_SYMBOL(kgsl_mmu_stop); + +int kgsl_mmu_close(struct kgsl_device *device) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) + return 0; + else + return mmu->mmu_ops->mmu_close(device); +} +EXPORT_SYMBOL(kgsl_mmu_close); + +int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt, + enum kgsl_deviceid id) +{ + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + return pt->pt_ops->mmu_pt_get_flags(pt, id); + else + return 0; +} +EXPORT_SYMBOL(kgsl_mmu_pt_get_flags); + +void kgsl_mmu_ptpool_destroy(void *ptpool) +{ + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + kgsl_gpummu_ptpool_destroy(ptpool); + ptpool = 0; +} +EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy); + +void *kgsl_mmu_ptpool_init(int entries) +{ + if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type) + return kgsl_gpummu_ptpool_init(entries); + else + return (void *)(-1); +} +EXPORT_SYMBOL(kgsl_mmu_ptpool_init); + +int kgsl_mmu_enabled(void) +{ + if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type) + return 1; + else + return 0; +} +EXPORT_SYMBOL(kgsl_mmu_enabled); + +int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt, + unsigned int pt_base) +{ + if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) + return true; + else + return pt->pt_ops->mmu_pt_equal(pt, pt_base); +} +EXPORT_SYMBOL(kgsl_mmu_pt_equal); + +enum kgsl_mmutype kgsl_mmu_get_mmutype(void) +{ + return kgsl_mmu_type; +} +EXPORT_SYMBOL(kgsl_mmu_get_mmutype); + +void kgsl_mmu_set_mmutype(char *mmutype) +{ + kgsl_mmu_type = iommu_found() ? KGSL_MMU_TYPE_IOMMU : KGSL_MMU_TYPE_GPU; + if (mmutype && !strncmp(mmutype, "gpummu", 6)) + kgsl_mmu_type = KGSL_MMU_TYPE_GPU; + if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5)) + kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU; + if (mmutype && !strncmp(mmutype, "nommu", 5)) + kgsl_mmu_type = KGSL_MMU_TYPE_NONE; +} +EXPORT_SYMBOL(kgsl_mmu_set_mmutype); diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h new file mode 100644 index 0000000000000..07cd6d9d9e2d5 --- /dev/null +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -0,0 +1,197 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_MMU_H +#define __KGSL_MMU_H + +#define KGSL_MMU_ALIGN_SHIFT 13 +#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1)) + +/* Identifier for the global page table */ +/* Per process page tables will probably pass in the thread group + as an identifier */ + +#define KGSL_MMU_GLOBAL_PT 0 + +struct kgsl_device; + +#define GSL_PT_SUPER_PTE 8 +#define GSL_PT_PAGE_WV 0x00000001 +#define GSL_PT_PAGE_RV 0x00000002 +#define GSL_PT_PAGE_DIRTY 0x00000004 + +/* MMU registers - the register locations for all cores are the + same. The method for getting to those locations differs between + 2D and 3D, but the 2D and 3D register functions do that magic + for us */ + +#define MH_MMU_CONFIG 0x0040 +#define MH_MMU_VA_RANGE 0x0041 +#define MH_MMU_PT_BASE 0x0042 +#define MH_MMU_PAGE_FAULT 0x0043 +#define MH_MMU_TRAN_ERROR 0x0044 +#define MH_MMU_INVALIDATE 0x0045 +#define MH_MMU_MPU_BASE 0x0046 +#define MH_MMU_MPU_END 0x0047 + +#define MH_INTERRUPT_MASK 0x0A42 +#define MH_INTERRUPT_STATUS 0x0A43 +#define MH_INTERRUPT_CLEAR 0x0A44 +#define MH_AXI_ERROR 0x0A45 +#define MH_ARBITER_CONFIG 0x0A40 +#define MH_DEBUG_CTRL 0x0A4E +#define MH_DEBUG_DATA 0x0A4F +#define MH_AXI_HALT_CONTROL 0x0A50 +#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54 +#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55 + +/* MH_MMU_CONFIG bit definitions */ + +#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004 +#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006 +#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008 +#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a +#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c +#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e +#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010 +#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012 +#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014 +#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016 +#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018 + +/* MMU Flags */ +#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000 +#define KGSL_MMUFLAGS_PTUPDATE 0x20000000 + +#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L +#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L +#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L + +#define KGSL_MMU_INT_MASK \ + (MH_INTERRUPT_MASK__AXI_READ_ERROR | \ + MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \ + MH_INTERRUPT_MASK__MMU_PAGE_FAULT) + +enum kgsl_mmutype { + KGSL_MMU_TYPE_GPU = 0, + KGSL_MMU_TYPE_IOMMU, + KGSL_MMU_TYPE_NONE +}; + +struct kgsl_pagetable { + spinlock_t lock; + struct kref refcount; + unsigned int max_entries; + struct gen_pool *pool; + struct list_head list; + unsigned int name; + struct kobject *kobj; + + struct { + unsigned int entries; + unsigned int mapped; + unsigned int max_mapped; + unsigned int max_entries; + } stats; + const struct kgsl_mmu_pt_ops *pt_ops; + void *priv; +}; + +struct kgsl_mmu_ops { + int (*mmu_init) (struct kgsl_device *device); + int (*mmu_close) (struct kgsl_device *device); + int (*mmu_start) (struct kgsl_device *device); + int (*mmu_stop) (struct kgsl_device *device); + void (*mmu_setstate) (struct kgsl_device *device, + struct kgsl_pagetable *pagetable, + unsigned int context_id); + void (*mmu_device_setstate) (struct kgsl_device *device, + uint32_t flags); + void (*mmu_pagefault) (struct kgsl_device *device); + unsigned int (*mmu_get_current_ptbase) + (struct kgsl_device *device); +}; + +struct kgsl_mmu_pt_ops { + int (*mmu_map) (void *mmu_pt, + struct kgsl_memdesc *memdesc, + unsigned int protflags); + int (*mmu_unmap) (void *mmu_pt, + struct kgsl_memdesc *memdesc); + void *(*mmu_create_pagetable) (void); + void (*mmu_destroy_pagetable) (void *pt); + int (*mmu_pt_equal) (struct kgsl_pagetable *pt, + unsigned int pt_base); + unsigned int (*mmu_pt_get_flags) (struct kgsl_pagetable *pt, + enum kgsl_deviceid id); +}; + +struct kgsl_mmu { + unsigned int refcnt; + uint32_t flags; + struct kgsl_device *device; + unsigned int config; + struct kgsl_memdesc setstate_memory; + /* current page table object being used by device mmu */ + struct kgsl_pagetable *defaultpagetable; + struct kgsl_pagetable *hwpagetable; + const struct kgsl_mmu_ops *mmu_ops; + void *priv; +}; + +#include "kgsl_gpummu.h" + +extern struct kgsl_mmu_ops iommu_ops; +extern struct kgsl_mmu_pt_ops iommu_pt_ops; + +struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name); +void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); +void kgsl_mh_start(struct kgsl_device *device); +void kgsl_mh_intrcallback(struct kgsl_device *device); +int kgsl_mmu_init(struct kgsl_device *device); +int kgsl_mmu_start(struct kgsl_device *device); +int kgsl_mmu_stop(struct kgsl_device *device); +int kgsl_mmu_close(struct kgsl_device *device); +int kgsl_mmu_map(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc, + unsigned int protflags); +int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc, unsigned int protflags); +int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, + struct kgsl_memdesc *memdesc); +unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr); +void kgsl_setstate(struct kgsl_device *device, unsigned int context_id, + uint32_t flags); +void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags); +void kgsl_mmu_setstate(struct kgsl_device *device, + struct kgsl_pagetable *pt, unsigned int context_id); +int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base); +int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt, + enum kgsl_deviceid id); +void kgsl_mmu_ptpool_destroy(void *ptpool); +void *kgsl_mmu_ptpool_init(int entries); +int kgsl_mmu_enabled(void); +int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt, + unsigned int pt_base); +void kgsl_mmu_set_mmutype(char *mmutype); +unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device); +enum kgsl_mmutype kgsl_mmu_get_mmutype(void); +unsigned int kgsl_mmu_get_ptsize(void); + +static inline int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr) +{ + return ((gpuaddr >= KGSL_PAGETABLE_BASE) && + (gpuaddr < + (KGSL_PAGETABLE_BASE + CONFIG_MSM_KGSL_PAGE_TABLE_SIZE))); +} + +#endif /* __KGSL_MMU_H */ diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c new file mode 100644 index 0000000000000..7704227643285 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -0,0 +1,938 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_pwrscale.h" +#include "kgsl_device.h" +#include "kgsl_trace.h" + +#define KGSL_PWRFLAGS_POWER_ON 0 +#define KGSL_PWRFLAGS_CLK_ON 1 +#define KGSL_PWRFLAGS_AXI_ON 2 +#define KGSL_PWRFLAGS_IRQ_ON 3 + +#define GPU_SWFI_LATENCY 3 +#define UPDATE_BUSY_VAL 1000000 +#define UPDATE_BUSY 50 + +struct clk_pair { + const char *name; + uint map; +}; + +struct clk_pair clks[KGSL_MAX_CLKS] = { + { + .name = "src_clk", + .map = KGSL_CLK_SRC, + }, + { + .name = "core_clk", + .map = KGSL_CLK_CORE, + }, + { + .name = "iface_clk", + .map = KGSL_CLK_IFACE, + }, + { + .name = "mem_clk", + .map = KGSL_CLK_MEM, + }, + { + .name = "mem_iface_clk", + .map = KGSL_CLK_MEM_IFACE, + }, + { + .name = "grp_clk", + .map = KGSL_CLK_GRP, + }, + { + .name = "imem_clk", + .map = KGSL_CLK_IMEM, + }, +}; + +void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, + unsigned int new_level) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + if (new_level < (pwr->num_pwrlevels - 1) && + new_level >= pwr->thermal_pwrlevel && + new_level != pwr->active_pwrlevel) { + struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level]; + pwr->active_pwrlevel = new_level; + if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || + (device->state == KGSL_STATE_NAP)) { + /* + * On some platforms, instability is caused on + * changing clock freq when the core is busy. + * Idle the gpu core before changing the clock freq. + */ + if (pwr->idle_needed == true) + device->ftbl->idle(device, + KGSL_TIMEOUT_DEFAULT); + clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); + } + if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { + if (pwr->pcl) + msm_bus_scale_client_update_request(pwr->pcl, + pwrlevel->bus_freq); + else if (pwr->ebi1_clk) + clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq); + } + trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, + pwrlevel->gpu_freq); + } +} +EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change); + +static int __gpuclk_store(int max, struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ int ret, i, delta = 5000000; + unsigned long val; + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + + ret = sscanf(buf, "%ld", &val); + if (ret != 1) + return count; + + mutex_lock(&device->mutex); + for (i = 0; i < pwr->num_pwrlevels; i++) { + if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) { + if (max) + pwr->thermal_pwrlevel = i; + break; + } + } + + if (i == pwr->num_pwrlevels) + goto done; + + /* + * If the current or requested clock speed is greater than the + * thermal limit, bump down immediately. + */ + + if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq > + pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq) + kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel); + else if (!max) + kgsl_pwrctrl_pwrlevel_change(device, i); + +done: + mutex_unlock(&device->mutex); + return count; +} + +static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return __gpuclk_store(1, dev, attr, buf, count); +} + +static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + return snprintf(buf, PAGE_SIZE, "%d\n", + pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq); +} + +static int kgsl_pwrctrl_gpuclk_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return __gpuclk_store(0, dev, attr, buf, count); +} + +static int kgsl_pwrctrl_gpuclk_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + return snprintf(buf, PAGE_SIZE, "%d\n", + pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq); +} + +static int kgsl_pwrctrl_pwrnap_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char temp[20]; + unsigned long val; + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + int rc; + + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + + snprintf(temp, sizeof(temp), "%.*s", + (int)min(count, sizeof(temp) - 1), buf); + rc = strict_strtoul(temp, 0, &val); + if (rc) + return rc; + + mutex_lock(&device->mutex); + + if (val == 1) + pwr->nap_allowed = true; + else if (val == 0) + pwr->nap_allowed = false; + + mutex_unlock(&device->mutex); + + return count; +} + +static int kgsl_pwrctrl_pwrnap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + if (device == NULL) + return 0; + return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed); +} + + +static int kgsl_pwrctrl_idle_timer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char temp[20]; + unsigned long val; + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + const long div = 1000/HZ; + static unsigned int org_interval_timeout = 1; + int rc; + + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + + snprintf(temp, sizeof(temp), "%.*s", + (int)min(count, sizeof(temp) - 1), buf); + rc = strict_strtoul(temp, 0, &val); + if (rc) + return rc; + + if (org_interval_timeout == 1) + org_interval_timeout = pwr->interval_timeout; + + mutex_lock(&device->mutex); + + /* Let the timeout be requested in ms, but convert to jiffies. */ + val /= div; + if (val >= org_interval_timeout) + pwr->interval_timeout = val; + + mutex_unlock(&device->mutex); + + return count; +} + +static int kgsl_pwrctrl_idle_timer_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + if (device == NULL) + return 0; + return snprintf(buf, PAGE_SIZE, "%d\n", + device->pwrctrl.interval_timeout); +} + +static int kgsl_pwrctrl_gpubusy_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_busy *b = &device->pwrctrl.busy; + ret = snprintf(buf, 17, "%7d %7d\n", + b->on_time_old, b->time_old); + if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) { + b->on_time_old = 0; + b->time_old = 0; + } + return ret; +} + +DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store); +DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show, + kgsl_pwrctrl_max_gpuclk_store); +DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store); +DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show, + kgsl_pwrctrl_idle_timer_store); +DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show, + NULL); + +static const struct device_attribute *pwrctrl_attr_list[] = { + &dev_attr_gpuclk, + &dev_attr_max_gpuclk, + &dev_attr_pwrnap, + &dev_attr_idle_timer, + &dev_attr_gpubusy, + NULL +}; + +int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device) +{ + return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list); +} + +void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device) +{ + kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list); +} + +/* Track the amount of time the gpu is on vs the total system time. * + * Regularly update the percentage of busy time displayed by sysfs. */ +static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time) +{ + struct kgsl_busy *b = &device->pwrctrl.busy; + int elapsed; + if (b->start.tv_sec == 0) + do_gettimeofday(&(b->start)); + do_gettimeofday(&(b->stop)); + elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000; + elapsed += b->stop.tv_usec - b->start.tv_usec; + b->time += elapsed; + if (on_time) + b->on_time += elapsed; + /* Update the output regularly and reset the counters. */ + if ((b->time > UPDATE_BUSY_VAL) || + !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) { + b->on_time_old = b->on_time; + b->time_old = b->time; + b->on_time = 0; + b->time = 0; + } + do_gettimeofday(&(b->start)); +} + +void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, + int requested_state) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int i = 0; + if (state == KGSL_PWRFLAGS_OFF) { + if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON, + &pwr->power_flags)) { + trace_kgsl_clk(device, state); + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_disable(pwr->grp_clks[i]); + if ((pwr->pwrlevels[0].gpu_freq > 0) && + (requested_state != KGSL_STATE_NAP)) + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[pwr->num_pwrlevels - 1]. + gpu_freq); + kgsl_pwrctrl_busy_time(device, true); + } + } else if (state == KGSL_PWRFLAGS_ON) { + if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON, + &pwr->power_flags)) { + trace_kgsl_clk(device, state); + if ((pwr->pwrlevels[0].gpu_freq > 0) && + (device->state != KGSL_STATE_NAP)) + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[pwr->active_pwrlevel]. + gpu_freq); + + /* as last step, enable grp_clk + this is to let GPU interrupt to come */ + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_enable(pwr->grp_clks[i]); + kgsl_pwrctrl_busy_time(device, false); + } + } +} + +void kgsl_pwrctrl_axi(struct kgsl_device *device, int state) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + if (state == KGSL_PWRFLAGS_OFF) { + if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON, + &pwr->power_flags)) { + trace_kgsl_bus(device, state); + if (pwr->ebi1_clk) { + clk_set_rate(pwr->ebi1_clk, 0); + clk_disable(pwr->ebi1_clk); + } + if (pwr->pcl) + msm_bus_scale_client_update_request(pwr->pcl, + 0); + } + } else if (state == KGSL_PWRFLAGS_ON) { + if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON, + &pwr->power_flags)) { + trace_kgsl_bus(device, state); + if (pwr->ebi1_clk) { + clk_enable(pwr->ebi1_clk); + clk_set_rate(pwr->ebi1_clk, + pwr->pwrlevels[pwr->active_pwrlevel]. + bus_freq); + } + if (pwr->pcl) + msm_bus_scale_client_update_request(pwr->pcl, + pwr->pwrlevels[pwr->active_pwrlevel]. + bus_freq); + } + } +} + +void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + if (state == KGSL_PWRFLAGS_OFF) { + if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON, + &pwr->power_flags)) { + trace_kgsl_rail(device, state); + if (pwr->gpu_reg) + regulator_disable(pwr->gpu_reg); + } + } else if (state == KGSL_PWRFLAGS_ON) { + if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON, + &pwr->power_flags)) { + trace_kgsl_rail(device, state); + if (pwr->gpu_reg) + regulator_enable(pwr->gpu_reg); + } + } +} + +void kgsl_pwrctrl_irq(struct kgsl_device *device, int state) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + if (state == KGSL_PWRFLAGS_ON) { + if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON, + &pwr->power_flags)) { + trace_kgsl_irq(device, state); + enable_irq(pwr->interrupt_num); + } + } else if (state == KGSL_PWRFLAGS_OFF) { + if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON, + &pwr->power_flags)) { + trace_kgsl_irq(device, state); + if (in_interrupt()) + disable_irq_nosync(pwr->interrupt_num); + else + disable_irq(pwr->interrupt_num); + } + } +} +EXPORT_SYMBOL(kgsl_pwrctrl_irq); + +int kgsl_pwrctrl_init(struct kgsl_device *device) +{ + int i, result = 0; + struct clk *clk; + struct platform_device *pdev = + container_of(device->parentdev, struct platform_device, dev); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; + + /*acquire clocks */ + for (i = 0; i < KGSL_MAX_CLKS; i++) { + if (pdata->clk_map & clks[i].map) { + clk = clk_get(&pdev->dev, clks[i].name); + if (IS_ERR(clk)) + goto clk_err; + pwr->grp_clks[i] = clk; + } + } + /* Make sure we have a source clk for freq setting */ + if (pwr->grp_clks[0] == NULL) + pwr->grp_clks[0] = pwr->grp_clks[1]; + + /* put the AXI bus into asynchronous mode with the graphics cores */ + if (pdata->set_grp_async != NULL) + pdata->set_grp_async(); + + if (pdata->num_levels > KGSL_MAX_PWRLEVELS) { + KGSL_PWR_ERR(device, "invalid power level count: %d\n", + pdata->num_levels); + result = -EINVAL; + goto done; + } + pwr->num_pwrlevels = pdata->num_levels; + pwr->active_pwrlevel = pdata->init_level; + for (i = 0; i < pdata->num_levels; i++) { + pwr->pwrlevels[i].gpu_freq = + (pdata->pwrlevel[i].gpu_freq > 0) ? + clk_round_rate(pwr->grp_clks[0], + pdata->pwrlevel[i]. + gpu_freq) : 0; + pwr->pwrlevels[i].bus_freq = + pdata->pwrlevel[i].bus_freq; + pwr->pwrlevels[i].io_fraction = + pdata->pwrlevel[i].io_fraction; + } + /* Do not set_rate for targets in sync with AXI */ + if (pwr->pwrlevels[0].gpu_freq > 0) + clk_set_rate(pwr->grp_clks[0], pwr-> + pwrlevels[pwr->num_pwrlevels - 1].gpu_freq); + + pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name); + if (IS_ERR(pwr->gpu_reg)) + pwr->gpu_reg = NULL; + + pwr->power_flags = 0; + + pwr->nap_allowed = pdata->nap_allowed; + pwr->idle_needed = pdata->idle_needed; + pwr->interval_timeout = pdata->idle_timeout; + pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(pwr->ebi1_clk)) + pwr->ebi1_clk = NULL; + else + clk_set_rate(pwr->ebi1_clk, + pwr->pwrlevels[pwr->active_pwrlevel]. + bus_freq); + if (pdata->bus_scale_table != NULL) { + pwr->pcl = msm_bus_scale_register_client(pdata-> + bus_scale_table); + if (!pwr->pcl) { + KGSL_PWR_ERR(device, + "msm_bus_scale_register_client failed: " + "id %d table %p", device->id, + pdata->bus_scale_table); + result = -EINVAL; + goto done; + } + } + + /*acquire interrupt */ + pwr->interrupt_num = + platform_get_irq_byname(pdev, pwr->irq_name); + + if (pwr->interrupt_num <= 0) { + KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n", + pwr->interrupt_num); + result = -EINVAL; + goto done; + } + + register_early_suspend(&device->display_off); + return result; + +clk_err: + result = PTR_ERR(clk); + KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n", + clks[i].name, result); + +done: + return result; +} + +void kgsl_pwrctrl_close(struct kgsl_device *device) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int i; + + KGSL_PWR_INFO(device, "close device %d\n", device->id); + + unregister_early_suspend(&device->display_off); + + if (pwr->interrupt_num > 0) { + if (pwr->have_irq) { + free_irq(pwr->interrupt_num, NULL); + pwr->have_irq = 0; + } + pwr->interrupt_num = 0; + } + + clk_put(pwr->ebi1_clk); + + if (pwr->pcl) + msm_bus_scale_unregister_client(pwr->pcl); + + pwr->pcl = 0; + + if (pwr->gpu_reg) { + regulator_put(pwr->gpu_reg); + pwr->gpu_reg = NULL; + } + + for (i = 1; i < KGSL_MAX_CLKS; i++) + if (pwr->grp_clks[i]) { + clk_put(pwr->grp_clks[i]); + pwr->grp_clks[i] = NULL; + } + + pwr->grp_clks[0] = NULL; + pwr->power_flags = 0; +} + +void kgsl_idle_check(struct work_struct *work) +{ + struct kgsl_device *device = container_of(work, struct kgsl_device, + idle_check_ws); + WARN_ON(device == NULL); + if (device == NULL) + return; + + mutex_lock(&device->mutex); + if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) { + if ((device->requested_state != KGSL_STATE_SLEEP) && + (device->requested_state != KGSL_STATE_SLUMBER)) + kgsl_pwrscale_idle(device); + + if (kgsl_pwrctrl_sleep(device) != 0) { + mod_timer(&device->idle_timer, + jiffies + + device->pwrctrl.interval_timeout); + /* If the GPU has been too busy to sleep, make sure * + * that is acurately reflected in the % busy numbers. */ + device->pwrctrl.busy.no_nap_cnt++; + if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) { + kgsl_pwrctrl_busy_time(device, true); + device->pwrctrl.busy.no_nap_cnt = 0; + } + } + } else if (device->state & (KGSL_STATE_HUNG | + KGSL_STATE_DUMP_AND_RECOVER)) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + } + + mutex_unlock(&device->mutex); +} + +void kgsl_timer(unsigned long data) +{ + struct kgsl_device *device = (struct kgsl_device *) data; + + KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id); + if (device->requested_state != KGSL_STATE_SUSPEND) { + if (device->pwrctrl.restore_slumber) + kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER); + else + kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP); + /* Have work run in a non-interrupt context. */ + queue_work(device->work_queue, &device->idle_check_ws); + } +} + +void kgsl_pre_hwaccess(struct kgsl_device *device) +{ + BUG_ON(!mutex_is_locked(&device->mutex)); + switch (device->state) { + case KGSL_STATE_ACTIVE: + return; + case KGSL_STATE_NAP: + case KGSL_STATE_SLEEP: + case KGSL_STATE_SLUMBER: + kgsl_pwrctrl_wake(device); + break; + case KGSL_STATE_SUSPEND: + kgsl_check_suspended(device); + break; + case KGSL_STATE_INIT: + case KGSL_STATE_HUNG: + case KGSL_STATE_DUMP_AND_RECOVER: + if (test_bit(KGSL_PWRFLAGS_CLK_ON, + &device->pwrctrl.power_flags)) + break; + else + KGSL_PWR_ERR(device, + "hw access while clocks off from state %d\n", + device->state); + break; + default: + KGSL_PWR_ERR(device, "hw access while in unknown state %d\n", + device->state); + break; + } +} +EXPORT_SYMBOL(kgsl_pre_hwaccess); + +void kgsl_check_suspended(struct kgsl_device *device) +{ + if (device->requested_state == KGSL_STATE_SUSPEND || + device->state == KGSL_STATE_SUSPEND) { + mutex_unlock(&device->mutex); + wait_for_completion(&device->hwaccess_gate); + mutex_lock(&device->mutex); + } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) { + mutex_unlock(&device->mutex); + wait_for_completion(&device->recovery_gate); + mutex_lock(&device->mutex); + } else if (device->state == KGSL_STATE_SLUMBER) + kgsl_pwrctrl_wake(device); +} + +static int +_nap(struct kgsl_device *device) +{ + switch (device->state) { + case KGSL_STATE_ACTIVE: + if (!device->ftbl->isidle(device)) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + return -EBUSY; + } + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP); + kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP); + if (device->idle_wakelock.name) + wake_unlock(&device->idle_wakelock); + case KGSL_STATE_NAP: + case KGSL_STATE_SLEEP: + case KGSL_STATE_SLUMBER: + break; + default: + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + break; + } + return 0; +} + +static void +_sleep_accounting(struct kgsl_device *device) +{ + kgsl_pwrctrl_busy_time(device, false); + device->pwrctrl.busy.start.tv_sec = 0; + device->pwrctrl.time = 0; + kgsl_pwrscale_sleep(device); +} + +static int +_sleep(struct kgsl_device *device) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + switch (device->state) { + case KGSL_STATE_ACTIVE: + if (!device->ftbl->isidle(device)) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + return -EBUSY; + } + /* fall through */ + case KGSL_STATE_NAP: + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); + if (pwr->pwrlevels[0].gpu_freq > 0) + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[pwr->num_pwrlevels - 1]. + gpu_freq); + _sleep_accounting(device); + kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP); + kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP); + wake_unlock(&device->idle_wakelock); + pm_qos_update_request(&device->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + break; + case KGSL_STATE_SLEEP: + case KGSL_STATE_SLUMBER: + break; + default: + KGSL_PWR_WARN(device, "unhandled state %s\n", + kgsl_pwrstate_to_str(device->state)); + break; + } + return 0; +} + +static int +_slumber(struct kgsl_device *device) +{ + switch (device->state) { + case KGSL_STATE_ACTIVE: + if (!device->ftbl->isidle(device)) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + device->pwrctrl.restore_slumber = true; + return -EBUSY; + } + /* fall through */ + case KGSL_STATE_NAP: + case KGSL_STATE_SLEEP: + del_timer_sync(&device->idle_timer); + kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL); + device->pwrctrl.restore_slumber = true; + device->ftbl->suspend_context(device); + device->ftbl->stop(device); + _sleep_accounting(device); + kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); + if (device->idle_wakelock.name) + wake_unlock(&device->idle_wakelock); + pm_qos_update_request(&device->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + break; + case KGSL_STATE_SLUMBER: + break; + default: + KGSL_PWR_WARN(device, "unhandled state %s\n", + kgsl_pwrstate_to_str(device->state)); + break; + } + return 0; +} + +/******************************************************************/ +/* Caller must hold the device mutex. */ +int kgsl_pwrctrl_sleep(struct kgsl_device *device) +{ + int status = 0; + KGSL_PWR_INFO(device, "sleep device %d\n", device->id); + + /* Work through the legal state transitions */ + switch (device->requested_state) { + case KGSL_STATE_NAP: + status = _nap(device); + break; + case KGSL_STATE_SLEEP: + status = _sleep(device); + break; + case KGSL_STATE_SLUMBER: + status = _slumber(device); + break; + default: + KGSL_PWR_INFO(device, "bad state request 0x%x\n", + device->requested_state); + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + status = -EINVAL; + break; + } + return status; +} +EXPORT_SYMBOL(kgsl_pwrctrl_sleep); + +/******************************************************************/ +/* Caller must hold the device mutex. */ +void kgsl_pwrctrl_wake(struct kgsl_device *device) +{ + int status; + kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE); + switch (device->state) { + case KGSL_STATE_SLUMBER: + status = device->ftbl->start(device, 0); + if (status) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + KGSL_DRV_ERR(device, "start failed %d\n", status); + break; + } + /* fall through */ + case KGSL_STATE_SLEEP: + kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); + kgsl_pwrscale_wake(device); + /* fall through */ + case KGSL_STATE_NAP: + /* Turn on the core clocks */ + kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); + /* Enable state before turning on irq */ + kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE); + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); + /* Re-enable HW access */ + mod_timer(&device->idle_timer, + jiffies + device->pwrctrl.interval_timeout); + wake_lock(&device->idle_wakelock); + if (device->pwrctrl.restore_slumber == false) + pm_qos_update_request(&device->pm_qos_req_dma, + GPU_SWFI_LATENCY); + case KGSL_STATE_ACTIVE: + break; + default: + KGSL_PWR_WARN(device, "unhandled state %s\n", + kgsl_pwrstate_to_str(device->state)); + kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); + break; + } +} +EXPORT_SYMBOL(kgsl_pwrctrl_wake); + +void kgsl_pwrctrl_enable(struct kgsl_device *device) +{ + /* Order pwrrail/clk sequence based upon platform */ + kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); + kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); + kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON); +} +EXPORT_SYMBOL(kgsl_pwrctrl_enable); + +void kgsl_pwrctrl_disable(struct kgsl_device *device) +{ + /* Order pwrrail/clk sequence based upon platform */ + kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); + kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP); + kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF); +} +EXPORT_SYMBOL(kgsl_pwrctrl_disable); + +void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state) +{ + trace_kgsl_pwr_set_state(device, state); + device->state = state; + device->requested_state = KGSL_STATE_NONE; +} +EXPORT_SYMBOL(kgsl_pwrctrl_set_state); + +void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state) +{ + if (state != KGSL_STATE_NONE && state != device->requested_state) + trace_kgsl_pwr_request_state(device, state); + device->requested_state = state; +} +EXPORT_SYMBOL(kgsl_pwrctrl_request_state); + +const char *kgsl_pwrstate_to_str(unsigned int state) +{ + switch (state) { + case KGSL_STATE_NONE: + return "NONE"; + case KGSL_STATE_INIT: + return "INIT"; + case KGSL_STATE_ACTIVE: + return "ACTIVE"; + case KGSL_STATE_NAP: + return "NAP"; + case KGSL_STATE_SLEEP: + return "SLEEP"; + case KGSL_STATE_SUSPEND: + return "SUSPEND"; + case KGSL_STATE_HUNG: + return "HUNG"; + case KGSL_STATE_DUMP_AND_RECOVER: + return "DNR"; + case KGSL_STATE_SLUMBER: + return "SLUMBER"; + default: + break; + } + return "UNKNOWN"; +} +EXPORT_SYMBOL(kgsl_pwrstate_to_str); + diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h new file mode 100644 index 0000000000000..e37b334a13b32 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -0,0 +1,83 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_PWRCTRL_H +#define __KGSL_PWRCTRL_H + +/***************************************************************************** +** power flags +*****************************************************************************/ +#define KGSL_PWRFLAGS_ON 1 +#define KGSL_PWRFLAGS_OFF 0 + +#define KGSL_PWRLEVEL_TURBO 0 +#define KGSL_PWRLEVEL_NOMINAL 1 + +#define KGSL_MAX_CLKS 7 + +struct platform_device; + +struct kgsl_busy { + struct timeval start; + struct timeval stop; + int on_time; + int time; + int on_time_old; + int time_old; + unsigned int no_nap_cnt; +}; + +struct kgsl_pwrctrl { + int interrupt_num; + int have_irq; + struct clk *ebi1_clk; + struct clk *grp_clks[KGSL_MAX_CLKS]; + unsigned long power_flags; + struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS]; + unsigned int active_pwrlevel; + int thermal_pwrlevel; + unsigned int num_pwrlevels; + unsigned int interval_timeout; + struct regulator *gpu_reg; + uint32_t pcl; + unsigned int nap_allowed; + unsigned int idle_needed; + const char *regulator_name; + const char *irq_name; + s64 time; + struct kgsl_busy busy; + unsigned int restore_slumber; +}; + +void kgsl_pwrctrl_irq(struct kgsl_device *device, int state); +int kgsl_pwrctrl_init(struct kgsl_device *device); +void kgsl_pwrctrl_close(struct kgsl_device *device); +void kgsl_timer(unsigned long data); +void kgsl_idle_check(struct work_struct *work); +void kgsl_pre_hwaccess(struct kgsl_device *device); +void kgsl_check_suspended(struct kgsl_device *device); +int kgsl_pwrctrl_sleep(struct kgsl_device *device); +void kgsl_pwrctrl_wake(struct kgsl_device *device); +void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, + unsigned int level); +int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device); +void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device); +void kgsl_pwrctrl_enable(struct kgsl_device *device); +void kgsl_pwrctrl_disable(struct kgsl_device *device); +static inline unsigned long kgsl_get_clkrate(struct clk *clk) +{ + return (clk != NULL) ? clk_get_rate(clk) : 0; +} + +void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state); +void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state); +#endif /* __KGSL_PWRCTRL_H */ diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c new file mode 100644 index 0000000000000..f6f69b0b899f3 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -0,0 +1,342 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "kgsl.h" +#include "kgsl_pwrscale.h" +#include "kgsl_device.h" + +struct kgsl_pwrscale_attribute { + struct attribute attr; + ssize_t (*show)(struct kgsl_device *device, char *buf); + ssize_t (*store)(struct kgsl_device *device, const char *buf, + size_t count); +}; + +#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj) +#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale) +#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj) +#define to_pwrscale_attr(a) \ +container_of(a, struct kgsl_pwrscale_attribute, attr) +#define to_policy_attr(a) \ +container_of(a, struct kgsl_pwrscale_policy_attribute, attr) + +#define PWRSCALE_ATTR(_name, _mode, _show, _store) \ +struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \ +__ATTR(_name, _mode, _show, _store) + +/* Master list of available policies */ + +static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = { +#ifdef CONFIG_MSM_SCM + &kgsl_pwrscale_policy_tz, +#endif +#ifdef CONFIG_MSM_SLEEP_STATS_DEVICE + &kgsl_pwrscale_policy_idlestats, +#endif + NULL +}; + +static ssize_t pwrscale_policy_store(struct kgsl_device *device, + const char *buf, size_t count) +{ + int i; + struct kgsl_pwrscale_policy *policy = NULL; + + /* The special keyword none allows the user to detach all + policies */ + if (!strncmp("none", buf, 4)) { + kgsl_pwrscale_detach_policy(device); + return count; + } + + for (i = 0; kgsl_pwrscale_policies[i]; i++) { + if (!strncmp(kgsl_pwrscale_policies[i]->name, buf, + strnlen(kgsl_pwrscale_policies[i]->name, + PAGE_SIZE))) { + policy = kgsl_pwrscale_policies[i]; + break; + } + } + + if (policy) + if (kgsl_pwrscale_attach_policy(device, policy)) + return -EIO; + + return count; +} + +static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf) +{ + int ret; + + if (device->pwrscale.policy) + ret = snprintf(buf, PAGE_SIZE, "%s\n", + device->pwrscale.policy->name); + else + ret = snprintf(buf, PAGE_SIZE, "none\n"); + + return ret; +} + +PWRSCALE_ATTR(policy, 0664, pwrscale_policy_show, pwrscale_policy_store); + +static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device, + char *buf) +{ + int i, ret = 0; + + for (i = 0; kgsl_pwrscale_policies[i]; i++) { + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ", + kgsl_pwrscale_policies[i]->name); + } + + ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n"); + return ret; +} +PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL); + +static struct attribute *pwrscale_attrs[] = { + &pwrscale_attr_policy.attr, + &pwrscale_attr_avail_policies.attr, + NULL +}; + +static ssize_t policy_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj); + struct kgsl_device *device = pwrscale_to_device(pwrscale); + struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr); + ssize_t ret; + + if (pattr->show) + ret = pattr->show(device, pwrscale, buf); + else + ret = -EIO; + + return ret; +} + +static ssize_t policy_sysfs_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) +{ + struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj); + struct kgsl_device *device = pwrscale_to_device(pwrscale); + struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr); + ssize_t ret; + + if (pattr->store) + ret = pattr->store(device, pwrscale, buf, count); + else + ret = -EIO; + + return ret; +} + +static void policy_sysfs_release(struct kobject *kobj) +{ +} + +static ssize_t pwrscale_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kgsl_device *device = to_device(kobj); + struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr); + ssize_t ret; + + if (pattr->show) + ret = pattr->show(device, buf); + else + ret = -EIO; + + return ret; +} + +static ssize_t pwrscale_sysfs_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) +{ + struct kgsl_device *device = to_device(kobj); + struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr); + ssize_t ret; + + if (pattr->store) + ret = pattr->store(device, buf, count); + else + ret = -EIO; + + return ret; +} + +static void pwrscale_sysfs_release(struct kobject *kobj) +{ +} + +static const struct sysfs_ops policy_sysfs_ops = { + .show = policy_sysfs_show, + .store = policy_sysfs_store +}; + +static const struct sysfs_ops pwrscale_sysfs_ops = { + .show = pwrscale_sysfs_show, + .store = pwrscale_sysfs_store +}; + +static struct kobj_type ktype_pwrscale_policy = { + .sysfs_ops = &policy_sysfs_ops, + .default_attrs = NULL, + .release = policy_sysfs_release +}; + +static struct kobj_type ktype_pwrscale = { + .sysfs_ops = &pwrscale_sysfs_ops, + .default_attrs = pwrscale_attrs, + .release = pwrscale_sysfs_release +}; + +void kgsl_pwrscale_sleep(struct kgsl_device *device) +{ + if (device->pwrscale.policy && device->pwrscale.policy->sleep) + device->pwrscale.policy->sleep(device, &device->pwrscale); +} +EXPORT_SYMBOL(kgsl_pwrscale_sleep); + +void kgsl_pwrscale_wake(struct kgsl_device *device) +{ + if (device->pwrscale.policy && device->pwrscale.policy->wake) + device->pwrscale.policy->wake(device, &device->pwrscale); +} +EXPORT_SYMBOL(kgsl_pwrscale_wake); + +void kgsl_pwrscale_busy(struct kgsl_device *device) +{ + if (device->pwrscale.policy && device->pwrscale.policy->busy) + if (!device->pwrscale.gpu_busy) + device->pwrscale.policy->busy(device, + &device->pwrscale); + device->pwrscale.gpu_busy = 1; +} + +void kgsl_pwrscale_idle(struct kgsl_device *device) +{ + if (device->pwrscale.policy && device->pwrscale.policy->idle) + device->pwrscale.policy->idle(device, &device->pwrscale); + device->pwrscale.gpu_busy = 0; +} +EXPORT_SYMBOL(kgsl_pwrscale_idle); + +int kgsl_pwrscale_policy_add_files(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + struct attribute_group *attr_group) +{ + int ret; + + ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj, + "%s", pwrscale->policy->name); + + if (ret) + return ret; + + ret = sysfs_create_group(&pwrscale->kobj, attr_group); + + if (ret) { + kobject_del(&pwrscale->kobj); + kobject_put(&pwrscale->kobj); + } + + return ret; +} + +void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + struct attribute_group *attr_group) +{ + sysfs_remove_group(&pwrscale->kobj, attr_group); + kobject_del(&pwrscale->kobj); + kobject_put(&pwrscale->kobj); +} + +static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device) +{ + if (device->pwrscale.policy != NULL) { + device->pwrscale.policy->close(device, &device->pwrscale); + kgsl_pwrctrl_pwrlevel_change(device, + device->pwrctrl.thermal_pwrlevel); + } + device->pwrscale.policy = NULL; +} + +void kgsl_pwrscale_detach_policy(struct kgsl_device *device) +{ + mutex_lock(&device->mutex); + _kgsl_pwrscale_detach_policy(device); + mutex_unlock(&device->mutex); +} +EXPORT_SYMBOL(kgsl_pwrscale_detach_policy); + +int kgsl_pwrscale_attach_policy(struct kgsl_device *device, + struct kgsl_pwrscale_policy *policy) +{ + int ret = 0; + + mutex_lock(&device->mutex); + + if (device->pwrscale.policy == policy) + goto done; + + if (device->pwrctrl.num_pwrlevels < 3) { + ret = -EINVAL; + goto done; + } + + if (device->pwrscale.policy != NULL) + _kgsl_pwrscale_detach_policy(device); + + device->pwrscale.policy = policy; + + if (policy) { + ret = device->pwrscale.policy->init(device, &device->pwrscale); + if (ret) + device->pwrscale.policy = NULL; + } + +done: + mutex_unlock(&device->mutex); + + return ret; +} +EXPORT_SYMBOL(kgsl_pwrscale_attach_policy); + +int kgsl_pwrscale_init(struct kgsl_device *device) +{ + int ret; + + ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale, + &device->dev->kobj, "pwrscale"); + + if (ret) + return ret; + + kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy); + return ret; +} +EXPORT_SYMBOL(kgsl_pwrscale_init); + +void kgsl_pwrscale_close(struct kgsl_device *device) +{ + kobject_put(&device->pwrscale_kobj); +} +EXPORT_SYMBOL(kgsl_pwrscale_close); diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h new file mode 100644 index 0000000000000..512375e69b201 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrscale.h @@ -0,0 +1,77 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __KGSL_PWRSCALE_H +#define __KGSL_PWRSCALE_H + +struct kgsl_pwrscale; + +struct kgsl_pwrscale_policy { + const char *name; + int (*init)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); + void (*close)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); + void (*idle)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); + void (*busy)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); + void (*sleep)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); + void (*wake)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale); +}; + +struct kgsl_pwrscale { + struct kgsl_pwrscale_policy *policy; + struct kobject kobj; + void *priv; + int gpu_busy; +}; + +struct kgsl_pwrscale_policy_attribute { + struct attribute attr; + ssize_t (*show)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, char *buf); + ssize_t (*store)(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, const char *buf, + size_t count); +}; + +#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store) \ + struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz; +extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats; + +int kgsl_pwrscale_init(struct kgsl_device *device); +void kgsl_pwrscale_close(struct kgsl_device *device); + +int kgsl_pwrscale_attach_policy(struct kgsl_device *device, + struct kgsl_pwrscale_policy *policy); +void kgsl_pwrscale_detach_policy(struct kgsl_device *device); + +void kgsl_pwrscale_idle(struct kgsl_device *device); +void kgsl_pwrscale_busy(struct kgsl_device *device); +void kgsl_pwrscale_sleep(struct kgsl_device *device); +void kgsl_pwrscale_wake(struct kgsl_device *device); + +int kgsl_pwrscale_policy_add_files(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + struct attribute_group *attr_group); + +void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + struct attribute_group *attr_group); +#endif diff --git a/drivers/gpu/msm/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c new file mode 100644 index 0000000000000..acf98c5045119 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrscale_idlestats.c @@ -0,0 +1,222 @@ +/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_pwrscale.h" +#include "kgsl_device.h" + +#define MAX_CORES 4 +struct _cpu_info { + spinlock_t lock; + struct notifier_block cpu_nb; + u64 start[MAX_CORES]; + u64 end[MAX_CORES]; + int curr_freq[MAX_CORES]; + int max_freq[MAX_CORES]; +}; + +struct idlestats_priv { + char name[32]; + struct msm_idle_stats_device idledev; + struct kgsl_device *device; + struct msm_idle_pulse pulse; + struct _cpu_info cpu_info; +}; + +static int idlestats_cpufreq_notifier( + struct notifier_block *nb, + unsigned long val, void *data) +{ + struct _cpu_info *cpu = container_of(nb, + struct _cpu_info, cpu_nb); + struct cpufreq_freqs *freq = data; + + if (val != CPUFREQ_POSTCHANGE) + return 0; + + spin_lock(&cpu->lock); + if (freq->cpu < num_possible_cpus()) + cpu->curr_freq[freq->cpu] = freq->new / 1000; + spin_unlock(&cpu->lock); + + return 0; +} + +static void idlestats_get_sample(struct msm_idle_stats_device *idledev, + struct msm_idle_pulse *pulse) +{ + struct kgsl_power_stats stats; + struct idlestats_priv *priv = container_of(idledev, + struct idlestats_priv, idledev); + struct kgsl_device *device = priv->device; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + + mutex_lock(&device->mutex); + /* If the GPU is asleep, don't wake it up - assume that we + are idle */ + + if (device->state == KGSL_STATE_ACTIVE) { + device->ftbl->power_stats(device, &stats); + pulse->busy_start_time = pwr->time - stats.busy_time; + pulse->busy_interval = stats.busy_time; + } else { + pulse->busy_start_time = pwr->time; + pulse->busy_interval = 0; + } + pulse->wait_interval = 0; + mutex_unlock(&device->mutex); +} + +static void idlestats_busy(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + struct idlestats_priv *priv = pwrscale->priv; + int i, busy, nr_cpu = 1; + + if (priv->pulse.busy_start_time != 0) { + priv->pulse.wait_interval = 0; + /* Calculate the total CPU busy time for this GPU pulse */ + for (i = 0; i < num_possible_cpus(); i++) { + spin_lock(&priv->cpu_info.lock); + if (cpu_online(i)) { + priv->cpu_info.end[i] = + (u64)ktime_to_us(ktime_get()) - + get_cpu_idle_time_us(i, NULL); + busy = priv->cpu_info.end[i] - + priv->cpu_info.start[i]; + /* Normalize the busy time by frequency */ + busy = priv->cpu_info.curr_freq[i] * + (busy / priv->cpu_info.max_freq[i]); + priv->pulse.wait_interval += busy; + nr_cpu++; + } + spin_unlock(&priv->cpu_info.lock); + } + priv->pulse.wait_interval /= nr_cpu; + msm_idle_stats_idle_end(&priv->idledev, &priv->pulse); + } + priv->pulse.busy_start_time = ktime_to_us(ktime_get()); +} + +static void idlestats_idle(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + int i, nr_cpu; + struct kgsl_power_stats stats; + struct idlestats_priv *priv = pwrscale->priv; + + /* This is called from within a mutex protected function, so + no additional locking required */ + device->ftbl->power_stats(device, &stats); + + /* If total_time is zero, then we don't have + any interesting statistics to store */ + if (stats.total_time == 0) { + priv->pulse.busy_start_time = 0; + return; + } + + priv->pulse.busy_interval = stats.busy_time; + nr_cpu = num_possible_cpus(); + for (i = 0; i < nr_cpu; i++) + if (cpu_online(i)) + priv->cpu_info.start[i] = + (u64)ktime_to_us(ktime_get()) - + get_cpu_idle_time_us(i, NULL); + + msm_idle_stats_idle_start(&priv->idledev); +} + +static void idlestats_sleep(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + struct idlestats_priv *priv = pwrscale->priv; + msm_idle_stats_update_event(&priv->idledev, + MSM_IDLE_STATS_EVENT_IDLE_TIMER_EXPIRED); +} + +static int idlestats_init(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + struct idlestats_priv *priv; + struct cpufreq_policy cpu_policy; + int ret, i; + + priv = pwrscale->priv = kzalloc(sizeof(struct idlestats_priv), + GFP_KERNEL); + if (pwrscale->priv == NULL) + return -ENOMEM; + + snprintf(priv->name, sizeof(priv->name), "idle_stats_%s", + device->name); + + priv->device = device; + + priv->idledev.name = (const char *) priv->name; + priv->idledev.get_sample = idlestats_get_sample; + + spin_lock_init(&priv->cpu_info.lock); + priv->cpu_info.cpu_nb.notifier_call = + idlestats_cpufreq_notifier; + ret = cpufreq_register_notifier(&priv->cpu_info.cpu_nb, + CPUFREQ_TRANSITION_NOTIFIER); + if (ret) + goto err; + for (i = 0; i < num_possible_cpus(); i++) { + cpufreq_frequency_table_cpuinfo(&cpu_policy, + cpufreq_frequency_get_table(i)); + priv->cpu_info.max_freq[i] = cpu_policy.max / 1000; + priv->cpu_info.curr_freq[i] = cpu_policy.max / 1000; + } + ret = msm_idle_stats_register_device(&priv->idledev); +err: + if (ret) { + kfree(pwrscale->priv); + pwrscale->priv = NULL; + } + + return ret; +} + +static void idlestats_close(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + struct idlestats_priv *priv = pwrscale->priv; + + if (pwrscale->priv == NULL) + return; + + cpufreq_unregister_notifier(&priv->cpu_info.cpu_nb, + CPUFREQ_TRANSITION_NOTIFIER); + msm_idle_stats_deregister_device(&priv->idledev); + + kfree(pwrscale->priv); + pwrscale->priv = NULL; +} + +struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats = { + .name = "idlestats", + .init = idlestats_init, + .idle = idlestats_idle, + .busy = idlestats_busy, + .sleep = idlestats_sleep, + .close = idlestats_close +}; diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c new file mode 100644 index 0000000000000..9bcb88914b974 --- /dev/null +++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c @@ -0,0 +1,213 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_pwrscale.h" +#include "kgsl_device.h" + +#define TZ_GOVERNOR_PERFORMANCE 0 +#define TZ_GOVERNOR_ONDEMAND 1 + +struct tz_priv { + int governor; + unsigned int no_switch_cnt; + unsigned int skip_cnt; +}; +spinlock_t tz_lock; + +#define SWITCH_OFF 200 +#define SWITCH_OFF_RESET_TH 40 +#define SKIP_COUNTER 500 +#define TZ_RESET_ID 0x3 +#define TZ_UPDATE_ID 0x4 + +#ifdef CONFIG_MSM_SCM +/* Trap into the TrustZone, and call funcs there. */ +static int __secure_tz_entry(u32 cmd, u32 val, u32 id) +{ + int ret; + spin_lock(&tz_lock); + __iowmb(); + ret = scm_call_atomic2(SCM_SVC_IO, cmd, val, id); + spin_unlock(&tz_lock); + return ret; +} +#else +static int __secure_tz_entry(u32 cmd, u32 val, u32 id) +{ + return 0; +} +#endif /* CONFIG_MSM_SCM */ + +static ssize_t tz_governor_show(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + char *buf) +{ + struct tz_priv *priv = pwrscale->priv; + int ret; + + if (priv->governor == TZ_GOVERNOR_ONDEMAND) + ret = snprintf(buf, 10, "ondemand\n"); + else + ret = snprintf(buf, 13, "performance\n"); + + return ret; +} + +static ssize_t tz_governor_store(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale, + const char *buf, size_t count) +{ + char str[20]; + struct tz_priv *priv = pwrscale->priv; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int ret; + + ret = sscanf(buf, "%20s", str); + if (ret != 1) + return -EINVAL; + + mutex_lock(&device->mutex); + + if (!strncmp(str, "ondemand", 8)) + priv->governor = TZ_GOVERNOR_ONDEMAND; + else if (!strncmp(str, "performance", 11)) + priv->governor = TZ_GOVERNOR_PERFORMANCE; + + if (priv->governor == TZ_GOVERNOR_PERFORMANCE) + kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel); + + mutex_unlock(&device->mutex); + return count; +} + +PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store); + +static struct attribute *tz_attrs[] = { + &policy_attr_governor.attr, + NULL +}; + +static struct attribute_group tz_attr_group = { + .attrs = tz_attrs, +}; + +static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) +{ + struct tz_priv *priv = pwrscale->priv; + if (device->state != KGSL_STATE_NAP && + priv->governor == TZ_GOVERNOR_ONDEMAND && + device->pwrctrl.restore_slumber == 0) + kgsl_pwrctrl_pwrlevel_change(device, + device->pwrctrl.thermal_pwrlevel); +} + +static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct tz_priv *priv = pwrscale->priv; + struct kgsl_power_stats stats; + int val, idle; + + /* In "performance" mode the clock speed always stays + the same */ + + if (priv->governor == TZ_GOVERNOR_PERFORMANCE) + return; + + device->ftbl->power_stats(device, &stats); + if (stats.total_time == 0) + return; + + /* If the GPU has stayed in turbo mode for a while, * + * stop writing out values. */ + if (pwr->active_pwrlevel == 0) { + if (priv->no_switch_cnt > SWITCH_OFF) { + priv->skip_cnt++; + if (priv->skip_cnt > SKIP_COUNTER) { + priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; + priv->skip_cnt = 0; + } + return; + } + priv->no_switch_cnt++; + } else { + priv->no_switch_cnt = 0; + } + + idle = stats.total_time - stats.busy_time; + idle = (idle > 0) ? idle : 0; + val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id); + if (val) + kgsl_pwrctrl_pwrlevel_change(device, + pwr->active_pwrlevel + val); +} + +static void tz_busy(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + device->on_time = ktime_to_us(ktime_get()); +} + +static void tz_sleep(struct kgsl_device *device, + struct kgsl_pwrscale *pwrscale) +{ + struct tz_priv *priv = pwrscale->priv; + + __secure_tz_entry(TZ_RESET_ID, 0, device->id); + priv->no_switch_cnt = 0; +} + +static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) +{ + struct tz_priv *priv; + + /* Trustzone is only valid for some SOCs */ + if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_msm8930())) + return -EINVAL; + + priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); + if (pwrscale->priv == NULL) + return -ENOMEM; + + priv->governor = TZ_GOVERNOR_ONDEMAND; + spin_lock_init(&tz_lock); + kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); + + return 0; +} + +static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) +{ + kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group); + kfree(pwrscale->priv); + pwrscale->priv = NULL; +} + +struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = { + .name = "trustzone", + .init = tz_init, + .busy = tz_busy, + .idle = tz_idle, + .sleep = tz_sleep, + .wake = tz_wake, + .close = tz_close +}; +EXPORT_SYMBOL(kgsl_pwrscale_policy_tz); diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c new file mode 100644 index 0000000000000..67fff1802d46c --- /dev/null +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -0,0 +1,756 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_sharedmem.h" +#include "kgsl_cffdump.h" +#include "kgsl_device.h" + +/* An attribute for showing per-process memory statistics */ +struct kgsl_mem_entry_attribute { + struct attribute attr; + int memtype; + ssize_t (*show)(struct kgsl_process_private *priv, + int type, char *buf); +}; + +#define to_mem_entry_attr(a) \ +container_of(a, struct kgsl_mem_entry_attribute, attr) + +#define __MEM_ENTRY_ATTR(_type, _name, _show) \ +{ \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .memtype = _type, \ + .show = _show, \ +} + +/* + * A structure to hold the attributes for a particular memory type. + * For each memory type in each process we store the current and maximum + * memory usage and display the counts in sysfs. This structure and + * the following macro allow us to simplify the definition for those + * adding new memory types + */ + +struct mem_entry_stats { + int memtype; + struct kgsl_mem_entry_attribute attr; + struct kgsl_mem_entry_attribute max_attr; +}; + + +#define MEM_ENTRY_STAT(_type, _name) \ +{ \ + .memtype = _type, \ + .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \ + .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \ + mem_entry_max_show), \ +} + + +/** + * Given a kobj, find the process structure attached to it + */ + +static struct kgsl_process_private * +_get_priv_from_kobj(struct kobject *kobj) +{ + struct kgsl_process_private *private; + unsigned long name; + + if (!kobj) + return NULL; + + if (sscanf(kobj->name, "%ld", &name) != 1) + return NULL; + + list_for_each_entry(private, &kgsl_driver.process_list, list) { + if (private->pid == name) + return private; + } + + return NULL; +} + +/** + * Show the current amount of memory allocated for the given memtype + */ + +static ssize_t +mem_entry_show(struct kgsl_process_private *priv, int type, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur); +} + +/** + * Show the maximum memory allocated for the given memtype through the life of + * the process + */ + +static ssize_t +mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max); +} + + +static void mem_entry_sysfs_release(struct kobject *kobj) +{ +} + +static ssize_t mem_entry_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr); + struct kgsl_process_private *priv; + ssize_t ret; + + mutex_lock(&kgsl_driver.process_mutex); + priv = _get_priv_from_kobj(kobj); + + if (priv && pattr->show) + ret = pattr->show(priv, pattr->memtype, buf); + else + ret = -EIO; + + mutex_unlock(&kgsl_driver.process_mutex); + return ret; +} + +static const struct sysfs_ops mem_entry_sysfs_ops = { + .show = mem_entry_sysfs_show, +}; + +static struct kobj_type ktype_mem_entry = { + .sysfs_ops = &mem_entry_sysfs_ops, + .default_attrs = NULL, + .release = mem_entry_sysfs_release +}; + +static struct mem_entry_stats mem_stats[] = { + MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel), +#ifdef CONFIG_ANDROID_PMEM + MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem), +#endif +#ifdef CONFIG_ASHMEM + MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem), +#endif + MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user), +#ifdef CONFIG_ION + MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, ion), +#endif +}; + +void +kgsl_process_uninit_sysfs(struct kgsl_process_private *private) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mem_stats); i++) { + sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr); + sysfs_remove_file(&private->kobj, + &mem_stats[i].max_attr.attr); + } + + kobject_put(&private->kobj); +} + +void +kgsl_process_init_sysfs(struct kgsl_process_private *private) +{ + unsigned char name[16]; + int i, ret; + + snprintf(name, sizeof(name), "%d", private->pid); + + if (kobject_init_and_add(&private->kobj, &ktype_mem_entry, + kgsl_driver.prockobj, name)) + return; + + for (i = 0; i < ARRAY_SIZE(mem_stats); i++) { + /* We need to check the value of sysfs_create_file, but we + * don't really care if it passed or not */ + + ret = sysfs_create_file(&private->kobj, + &mem_stats[i].attr.attr); + ret = sysfs_create_file(&private->kobj, + &mem_stats[i].max_attr.attr); + } +} + +static int kgsl_drv_memstat_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned int val = 0; + + if (!strncmp(attr->attr.name, "vmalloc", 7)) + val = kgsl_driver.stats.vmalloc; + else if (!strncmp(attr->attr.name, "vmalloc_max", 11)) + val = kgsl_driver.stats.vmalloc_max; + else if (!strncmp(attr->attr.name, "coherent", 8)) + val = kgsl_driver.stats.coherent; + else if (!strncmp(attr->attr.name, "coherent_max", 12)) + val = kgsl_driver.stats.coherent_max; + else if (!strncmp(attr->attr.name, "mapped", 6)) + val = kgsl_driver.stats.mapped; + else if (!strncmp(attr->attr.name, "mapped_max", 10)) + val = kgsl_driver.stats.mapped_max; + + return snprintf(buf, PAGE_SIZE, "%u\n", val); +} + +static int kgsl_drv_histogram_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int len = 0; + int i; + + for (i = 0; i < 16; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%d ", + kgsl_driver.stats.histogram[i]); + + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); + return len; +} + +DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL); +DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL); + +static const struct device_attribute *drv_attr_list[] = { + &dev_attr_vmalloc, + &dev_attr_vmalloc_max, + &dev_attr_coherent, + &dev_attr_coherent_max, + &dev_attr_mapped, + &dev_attr_mapped_max, + &dev_attr_histogram, + NULL +}; + +void +kgsl_sharedmem_uninit_sysfs(void) +{ + kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list); +} + +int +kgsl_sharedmem_init_sysfs(void) +{ + return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev, + drv_attr_list); +} + +#ifdef CONFIG_OUTER_CACHE +static void _outer_cache_range_op(int op, unsigned long addr, size_t size) +{ + switch (op) { + case KGSL_CACHE_OP_FLUSH: + outer_flush_range(addr, addr + size); + break; + case KGSL_CACHE_OP_CLEAN: + outer_clean_range(addr, addr + size); + break; + case KGSL_CACHE_OP_INV: + outer_inv_range(addr, addr + size); + break; + } +} + +static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, sglen, i) { + unsigned int paddr = kgsl_get_sg_pa(s); + _outer_cache_range_op(op, paddr, s->length); + } +} + +#else +static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op) +{ +} +#endif + +static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + unsigned long offset; + struct page *page; + int i; + + offset = (unsigned long) vmf->virtual_address - vma->vm_start; + + i = offset >> PAGE_SHIFT; + page = sg_page(&memdesc->sg[i]); + if (page == NULL) + return VM_FAULT_SIGBUS; + + get_page(page); + + vmf->page = page; + return 0; +} + +static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc) +{ + return VM_RESERVED | VM_DONTEXPAND; +} + +static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc) +{ + int i = 0; + struct scatterlist *sg; + kgsl_driver.stats.vmalloc -= memdesc->size; + if (memdesc->hostptr) + vunmap(memdesc->hostptr); + if (memdesc->sg) + for_each_sg(memdesc->sg, sg, memdesc->sglen, i) + __free_page(sg_page(sg)); +} + +static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc) +{ + return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; +} + +/* + * kgsl_vmalloc_map_kernel - Map the memory in memdesc to kernel address space + * + * @memdesc - The memory descriptor which contains information about the memory + * + * Return: 0 on success else error code + */ +static int kgsl_vmalloc_map_kernel(struct kgsl_memdesc *memdesc) +{ + if (!memdesc->hostptr) { + pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL); + struct page **pages = NULL; + struct scatterlist *sg; + int i; + /* create a list of pages to call vmap */ + pages = vmalloc(memdesc->sglen * sizeof(struct page *)); + if (!pages) { + KGSL_CORE_ERR("vmalloc(%d) failed\n", + memdesc->sglen * sizeof(struct page *)); + return -ENOMEM; + } + for_each_sg(memdesc->sg, sg, memdesc->sglen, i) + pages[i] = sg_page(sg); + memdesc->hostptr = vmap(pages, memdesc->sglen, + VM_IOREMAP, page_prot); + vfree(pages); + } + if (!memdesc->hostptr) + return -ENOMEM; + + return 0; +} + +static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + unsigned long offset, pfn; + int ret; + + offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset; + ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn); + + if (ret == -ENOMEM || ret == -EAGAIN) + return VM_FAULT_OOM; + else if (ret == -EFAULT) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc) + +{ + kgsl_driver.stats.coherent -= memdesc->size; + if (memdesc->hostptr) + iounmap(memdesc->hostptr); + + free_contiguous_memory_by_paddr(memdesc->physaddr); +} + +static void kgsl_coherent_free(struct kgsl_memdesc *memdesc) +{ + kgsl_driver.stats.coherent -= memdesc->size; + dma_free_coherent(NULL, memdesc->size, + memdesc->hostptr, memdesc->physaddr); +} + +/* Global - also used by kgsl_drm.c */ +struct kgsl_memdesc_ops kgsl_vmalloc_ops = { + .free = kgsl_vmalloc_free, + .vmflags = kgsl_vmalloc_vmflags, + .vmfault = kgsl_vmalloc_vmfault, + .map_kernel_mem = kgsl_vmalloc_map_kernel, +}; +EXPORT_SYMBOL(kgsl_vmalloc_ops); + +static struct kgsl_memdesc_ops kgsl_ebimem_ops = { + .free = kgsl_ebimem_free, + .vmflags = kgsl_contiguous_vmflags, + .vmfault = kgsl_contiguous_vmfault, +}; + +static struct kgsl_memdesc_ops kgsl_coherent_ops = { + .free = kgsl_coherent_free, +}; + +void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op) +{ + void *addr = memdesc->hostptr; + int size = memdesc->size; + + switch (op) { + case KGSL_CACHE_OP_FLUSH: + dmac_flush_range(addr, addr + size); + break; + case KGSL_CACHE_OP_CLEAN: + dmac_clean_range(addr, addr + size); + break; + case KGSL_CACHE_OP_INV: + dmac_inv_range(addr, addr + size); + break; + } + + outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op); +} +EXPORT_SYMBOL(kgsl_cache_range_op); + +static int +_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, unsigned int protflags) +{ + int order, ret = 0; + int sglen = PAGE_ALIGN(size) / PAGE_SIZE; + int i; + + memdesc->size = size; + memdesc->pagetable = pagetable; + memdesc->priv = KGSL_MEMFLAGS_CACHED; + memdesc->ops = &kgsl_vmalloc_ops; + + memdesc->sg = kgsl_sg_alloc(sglen); + + if (memdesc->sg == NULL) { + ret = -ENOMEM; + goto done; + } + + kmemleak_not_leak(memdesc->sg); + + memdesc->sglen = sglen; + sg_init_table(memdesc->sg, sglen); + + for (i = 0; i < memdesc->sglen; i++) { + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO | + __GFP_HIGHMEM); + if (!page) { + ret = -ENOMEM; + memdesc->sglen = i; + goto done; + } + flush_dcache_page(page); + sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0); + } + outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, + KGSL_CACHE_OP_FLUSH); + + ret = kgsl_mmu_map(pagetable, memdesc, protflags); + + if (ret) + goto done; + + KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc, + kgsl_driver.stats.vmalloc_max); + + order = get_order(size); + + if (order < 16) + kgsl_driver.stats.histogram[order]++; + +done: + if (ret) + kgsl_sharedmem_free(memdesc); + + return ret; +} + +int +kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, size_t size) +{ + int ret = 0; + BUG_ON(size == 0); + + size = ALIGN(size, PAGE_SIZE * 2); + + ret = _kgsl_sharedmem_vmalloc(memdesc, pagetable, size, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + if (!ret) + ret = kgsl_vmalloc_map_kernel(memdesc); + if (ret) + kgsl_sharedmem_free(memdesc); + return ret; +} +EXPORT_SYMBOL(kgsl_sharedmem_vmalloc); + +int +kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, int flags) +{ + unsigned int protflags; + + BUG_ON(size == 0); + + protflags = GSL_PT_PAGE_RV; + if (!(flags & KGSL_MEMFLAGS_GPUREADONLY)) + protflags |= GSL_PT_PAGE_WV; + + return _kgsl_sharedmem_vmalloc(memdesc, pagetable, size, + protflags); +} +EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user); + +int +kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size) +{ + int result = 0; + + size = ALIGN(size, PAGE_SIZE); + + memdesc->size = size; + memdesc->ops = &kgsl_coherent_ops; + + memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr, + GFP_KERNEL); + if (memdesc->hostptr == NULL) { + KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size); + result = -ENOMEM; + goto err; + } + + result = memdesc_sg_phys(memdesc, memdesc->physaddr, size); + if (result) + goto err; + + /* Record statistics */ + + KGSL_STATS_ADD(size, kgsl_driver.stats.coherent, + kgsl_driver.stats.coherent_max); + +err: + if (result) + kgsl_sharedmem_free(memdesc); + + return result; +} +EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent); + +void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc) +{ + if (memdesc == NULL || memdesc->size == 0) + return; + + if (memdesc->gpuaddr) + kgsl_mmu_unmap(memdesc->pagetable, memdesc); + + if (memdesc->ops && memdesc->ops->free) + memdesc->ops->free(memdesc); + + kgsl_sg_free(memdesc->sg, memdesc->sglen); + + memset(memdesc, 0, sizeof(*memdesc)); +} +EXPORT_SYMBOL(kgsl_sharedmem_free); + +static int +_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, size_t size) +{ + int result = 0; + + memdesc->size = size; + memdesc->pagetable = pagetable; + memdesc->ops = &kgsl_ebimem_ops; + memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K); + + if (memdesc->physaddr == 0) { + KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n", + size); + return -ENOMEM; + } + + result = memdesc_sg_phys(memdesc, memdesc->physaddr, size); + + if (result) + goto err; + + result = kgsl_mmu_map(pagetable, memdesc, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + + if (result) + goto err; + + KGSL_STATS_ADD(size, kgsl_driver.stats.coherent, + kgsl_driver.stats.coherent_max); + +err: + if (result) + kgsl_sharedmem_free(memdesc); + + return result; +} + +int +kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, int flags) +{ + size = ALIGN(size, PAGE_SIZE); + return _kgsl_sharedmem_ebimem(memdesc, pagetable, size); +} +EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user); + +int +kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, size_t size) +{ + int result; + size = ALIGN(size, 8192); + result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size); + + if (result) + return result; + + memdesc->hostptr = ioremap(memdesc->physaddr, size); + + if (memdesc->hostptr == NULL) { + KGSL_CORE_ERR("ioremap failed\n"); + kgsl_sharedmem_free(memdesc); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(kgsl_sharedmem_ebimem); + +int +kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc, + uint32_t *dst, + unsigned int offsetbytes) +{ + uint32_t *src; + BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL); + WARN_ON(offsetbytes % sizeof(uint32_t) != 0); + if (offsetbytes % sizeof(uint32_t) != 0) + return -EINVAL; + + WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size); + if (offsetbytes + sizeof(uint32_t) > memdesc->size) + return -ERANGE; + src = (uint32_t *)(memdesc->hostptr + offsetbytes); + *dst = *src; + return 0; +} +EXPORT_SYMBOL(kgsl_sharedmem_readl); + +int +kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc, + unsigned int offsetbytes, + uint32_t src) +{ + uint32_t *dst; + BUG_ON(memdesc == NULL || memdesc->hostptr == NULL); + WARN_ON(offsetbytes % sizeof(uint32_t) != 0); + if (offsetbytes % sizeof(uint32_t) != 0) + return -EINVAL; + + WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size); + if (offsetbytes + sizeof(uint32_t) > memdesc->size) + return -ERANGE; + kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, + src, sizeof(uint32_t)); + dst = (uint32_t *)(memdesc->hostptr + offsetbytes); + *dst = src; + return 0; +} +EXPORT_SYMBOL(kgsl_sharedmem_writel); + +int +kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes, + unsigned int value, unsigned int sizebytes) +{ + BUG_ON(memdesc == NULL || memdesc->hostptr == NULL); + BUG_ON(offsetbytes + sizebytes > memdesc->size); + + kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value, + sizebytes); + memset(memdesc->hostptr + offsetbytes, value, sizebytes); + return 0; +} +EXPORT_SYMBOL(kgsl_sharedmem_set); + +/* + * kgsl_sharedmem_map_vma - Map a user vma to physical memory + * + * @vma - The user vma to map + * @memdesc - The memory descriptor which contains information about the + * physical memory + * + * Return: 0 on success else error code + */ +int +kgsl_sharedmem_map_vma(struct vm_area_struct *vma, + const struct kgsl_memdesc *memdesc) +{ + unsigned long addr = vma->vm_start; + unsigned long size = vma->vm_end - vma->vm_start; + int ret, i = 0; + + if (!memdesc->sg || (size != memdesc->size) || + (memdesc->sglen != (size / PAGE_SIZE))) + return -EINVAL; + + for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) { + ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i])); + if (ret) + return ret; + } + return 0; +} +EXPORT_SYMBOL(kgsl_sharedmem_map_vma); diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h new file mode 100644 index 0000000000000..0d4cbacb968d2 --- /dev/null +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -0,0 +1,158 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_SHAREDMEM_H +#define __KGSL_SHAREDMEM_H + +#include +#include +#include +#include "kgsl_mmu.h" +#include +#include + +struct kgsl_device; +struct kgsl_process_private; + +#define KGSL_CACHE_OP_INV 0x01 +#define KGSL_CACHE_OP_FLUSH 0x02 +#define KGSL_CACHE_OP_CLEAN 0x03 + +/** Set if the memdesc describes cached memory */ +#define KGSL_MEMFLAGS_CACHED 0x00000001 + +extern struct kgsl_memdesc_ops kgsl_vmalloc_ops; + +int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, size_t size); + +int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, int flags); + +int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size); + +int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, int flags); + +int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size); + +void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc); + +int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc, + uint32_t *dst, + unsigned int offsetbytes); + +int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc, + unsigned int offsetbytes, + uint32_t src); + +int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, + unsigned int offsetbytes, unsigned int value, + unsigned int sizebytes); + +void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op); + +void kgsl_process_init_sysfs(struct kgsl_process_private *private); +void kgsl_process_uninit_sysfs(struct kgsl_process_private *private); + +int kgsl_sharedmem_init_sysfs(void); +void kgsl_sharedmem_uninit_sysfs(void); + +static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg) +{ + /* + * Try sg_dma_address first to support ion carveout + * regions which do not work with sg_phys(). + */ + unsigned int pa = sg_dma_address(sg); + if (pa == 0) + pa = sg_phys(sg); + return pa; +} + +int +kgsl_sharedmem_map_vma(struct vm_area_struct *vma, + const struct kgsl_memdesc *memdesc); + +/* + * For relatively small sglists, it is preferable to use kzalloc + * rather than going down the vmalloc rat hole. If the size of + * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to + * vmalloc + */ + +static inline void *kgsl_sg_alloc(unsigned int sglen) +{ + if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE) + return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL); + else + return vmalloc(sglen * sizeof(struct scatterlist)); +} + +static inline void kgsl_sg_free(void *ptr, unsigned int sglen) +{ + if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE) + kfree(ptr); + else + vfree(ptr); +} + +static inline int +memdesc_sg_phys(struct kgsl_memdesc *memdesc, + unsigned int physaddr, unsigned int size) +{ + memdesc->sg = kgsl_sg_alloc(1); + + kmemleak_not_leak(memdesc->sg); + + memdesc->sglen = 1; + sg_init_table(memdesc->sg, 1); + memdesc->sg[0].length = size; + memdesc->sg[0].offset = 0; + memdesc->sg[0].dma_address = physaddr; + return 0; +} + +static inline int +kgsl_allocate(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, size_t size) +{ + if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE) + return kgsl_sharedmem_ebimem(memdesc, pagetable, size); + return kgsl_sharedmem_vmalloc(memdesc, pagetable, size); +} + +static inline int +kgsl_allocate_user(struct kgsl_memdesc *memdesc, + struct kgsl_pagetable *pagetable, + size_t size, unsigned int flags) +{ + if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE) + return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, + flags); + return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags); +} + +static inline int +kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size) +{ + int ret = kgsl_sharedmem_alloc_coherent(memdesc, size); + if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)) + memdesc->gpuaddr = memdesc->physaddr; + return ret; +} + +#endif /* __KGSL_SHAREDMEM_H */ diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c new file mode 100644 index 0000000000000..e1e704b9e9440 --- /dev/null +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -0,0 +1,489 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "kgsl.h" +#include "kgsl_log.h" +#include "kgsl_device.h" +#include "kgsl_sharedmem.h" +#include "kgsl_snapshot.h" + +/* idr_for_each function to count the number of contexts */ + +static int snapshot_context_count(int id, void *ptr, void *data) +{ + int *count = data; + *count = *count + 1; + + return 0; +} + +/* + * To simplify the iterator loop use a global pointer instead of trying + * to pass around double star references to the snapshot data + */ + +static void *_ctxtptr; + +static int snapshot_context_info(int id, void *ptr, void *data) +{ + struct kgsl_snapshot_linux_context *header = _ctxtptr; + struct kgsl_context *context = ptr; + struct kgsl_device *device = context->dev_priv->device; + + header->id = id; + + /* Future-proof for per-context timestamps - for now, just + * return the global timestamp for all contexts + */ + + header->timestamp_queued = -1; + header->timestamp_retired = device->ftbl->readtimestamp(device, + KGSL_TIMESTAMP_RETIRED); + + _ctxtptr += sizeof(struct kgsl_snapshot_linux_context); + + return 0; +} + +/* Snapshot the Linux specific information */ +static int snapshot_os(struct kgsl_device *device, + void *snapshot, int remain, void *priv) +{ + struct kgsl_snapshot_linux *header = snapshot; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct task_struct *task; + pid_t pid; + int hang = (int) priv; + int ctxtcount = 0; + int size = sizeof(*header); + + /* Figure out how many active contexts there are - these will + * be appended on the end of the structure */ + + idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount); + + size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context); + + /* Make sure there is enough room for the data */ + if (remain < size) { + SNAPSHOT_ERR_NOMEM(device, "OS"); + return 0; + } + + memset(header, 0, sizeof(*header)); + + header->osid = KGSL_SNAPSHOT_OS_LINUX; + + header->state = hang ? SNAPSHOT_STATE_HUNG : SNAPSHOT_STATE_RUNNING; + + /* Get the kernel build information */ + strlcpy(header->release, utsname()->release, sizeof(header->release)); + strlcpy(header->version, utsname()->version, sizeof(header->version)); + + /* Get the Unix time for the timestamp */ + header->seconds = get_seconds(); + + /* Remember the power information */ + header->power_flags = pwr->power_flags; + header->power_level = pwr->active_pwrlevel; + header->power_interval_timeout = pwr->interval_timeout; + header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]); + header->busclk = kgsl_get_clkrate(pwr->ebi1_clk); + + /* Future proof for per-context timestamps */ + header->current_context = -1; + + /* Get the current PT base */ + header->ptbase = kgsl_mmu_get_current_ptbase(device); + /* And the PID for the task leader */ + pid = header->pid = kgsl_mmu_get_ptname_from_ptbase(header->ptbase); + + task = find_task_by_vpid(pid); + + if (task) + get_task_comm(header->comm, task); + + header->ctxtcount = ctxtcount; + + /* append information for each context */ + _ctxtptr = snapshot + sizeof(*header); + idr_for_each(&device->context_idr, snapshot_context_info, NULL); + + /* Return the size of the data segment */ + return size; +} +/* + * kgsl_snapshot_dump_indexed_regs - helper function to dump indexed registers + * @device - the device to dump registers from + * @snapshot - pointer to the start of the region of memory for the snapshot + * @remain - a pointer to the number of bytes remaining in the snapshot + * @priv - A pointer to the kgsl_snapshot_indexed_registers data + * + * Given a indexed register cmd/data pair and a count, dump each indexed + * register + */ + +int kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device, + void *snapshot, int remain, void *priv) +{ + struct kgsl_snapshot_indexed_registers *iregs = priv; + struct kgsl_snapshot_indexed_regs *header = snapshot; + unsigned int *data = snapshot + sizeof(*header); + int i; + + if (remain < (iregs->count * 4) + sizeof(*header)) { + SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS"); + return 0; + } + + header->index_reg = iregs->index; + header->data_reg = iregs->data; + header->count = iregs->count; + header->start = iregs->start; + + for (i = 0; i < iregs->count; i++) { + kgsl_regwrite(device, iregs->index, iregs->start + i); + kgsl_regread(device, iregs->data, &data[i]); + } + + return (iregs->count * 4) + sizeof(*header); +} +EXPORT_SYMBOL(kgsl_snapshot_dump_indexed_regs); + +/* + * kgsl_snapshot_dump_regs - helper function to dump device registers + * @device - the device to dump registers from + * @snapshot - pointer to the start of the region of memory for the snapshot + * @remain - a pointer to the number of bytes remaining in the snapshot + * @priv - A pointer to the kgsl_snapshot_registers data + * + * Given an array of register ranges pairs (start,end [inclusive]), dump the + * registers into a snapshot register section. The snapshot region stores a + * part of dwords for each register - the word address of the register, and + * the value. + */ +int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot, + int remain, void *priv) +{ + struct kgsl_snapshot_regs *header = snapshot; + struct kgsl_snapshot_registers *regs = priv; + unsigned int *data = snapshot + sizeof(*header); + int count = 0, i, j; + + /* Figure out how many registers we are going to dump */ + + for (i = 0; i < regs->count; i++) { + int start = regs->regs[i * 2]; + int end = regs->regs[i * 2 + 1]; + + count += (end - start + 1); + } + + if (remain < (count * 8) + sizeof(*header)) { + SNAPSHOT_ERR_NOMEM(device, "REGISTERS"); + return 0; + } + + for (i = 0; i < regs->count; i++) { + unsigned int start = regs->regs[i * 2]; + unsigned int end = regs->regs[i * 2 + 1]; + + for (j = start; j <= end; j++) { + unsigned int val; + + kgsl_regread(device, j, &val); + *data++ = j; + *data++ = val; + } + } + + header->count = count; + + /* Return the size of the section */ + return (count * 8) + sizeof(*header); +} +EXPORT_SYMBOL(kgsl_snapshot_dump_regs); + +/* + * kgsl_snapshot - construct a device snapshot + * @device - device to snapshot + * @hang - set to 1 if the snapshot was triggered following a hnag + * Given a device, construct a binary snapshot dump of the current device state + * and store it in the device snapshot memory. + */ +int kgsl_device_snapshot(struct kgsl_device *device, int hang) +{ + struct kgsl_snapshot_header *header = device->snapshot; + int remain = device->snapshot_maxsize - sizeof(*header); + void *snapshot; + + /* + * The first hang is always the one we are interested in. To + * avoid a subsequent hang blowing away the first, the snapshot + * is frozen until it is dumped via sysfs. + * + * Note that triggered snapshots are always taken regardless + * of the state and never frozen. + */ + + if (hang && device->snapshot_frozen == 1) + return 0; + + if (device->snapshot == NULL) { + KGSL_DRV_ERR(device, + "snapshot: No snapshot memory available\n"); + return -ENOMEM; + } + + if (remain < sizeof(*header)) { + KGSL_DRV_ERR(device, + "snapshot: Not enough memory for the header\n"); + return -ENOMEM; + } + + header->magic = SNAPSHOT_MAGIC; + + header->gpuid = kgsl_gpuid(device); + + /* Get a pointer to the first section (right after the header) */ + snapshot = ((void *) device->snapshot) + sizeof(*header); + + /* Build the Linux specific header */ + snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS, + snapshot, &remain, snapshot_os, (void *) hang); + + /* Get the device specific sections */ + if (device->ftbl->snapshot) + snapshot = device->ftbl->snapshot(device, snapshot, &remain, + hang); + + /* Add the empty end section to let the parser know we are done */ + snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_END, + snapshot, &remain, NULL, NULL); + + device->snapshot_timestamp = get_seconds(); + device->snapshot_size = (int) (snapshot - device->snapshot); + + /* Freeze the snapshot on a hang until it gets read */ + device->snapshot_frozen = (hang) ? 1 : 0; + + /* log buffer info to aid in ramdump recovery */ + KGSL_DRV_ERR(device, "snapshot created at va %p pa %lx size %d\n", + device->snapshot, __pa(device->snapshot), + device->snapshot_size); + if (hang) + sysfs_notify(&device->snapshot_kobj, NULL, "timestamp"); + return 0; +} +EXPORT_SYMBOL(kgsl_device_snapshot); + +/* An attribute for showing snapshot details */ +struct kgsl_snapshot_attribute { + struct attribute attr; + ssize_t (*show)(struct kgsl_device *device, char *buf); + ssize_t (*store)(struct kgsl_device *device, const char *buf, + size_t count); +}; + +#define to_snapshot_attr(a) \ +container_of(a, struct kgsl_snapshot_attribute, attr) + +#define kobj_to_device(a) \ +container_of(a, struct kgsl_device, snapshot_kobj) + +/* Dump the sysfs binary data to the user */ +static ssize_t snapshot_show(struct file *filep, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct kgsl_device *device = kobj_to_device(kobj); + + if (device == NULL) + return 0; + + /* Return nothing if we haven't taken a snapshot yet */ + if (device->snapshot_timestamp == 0) + return 0; + + /* Get the mutex to keep things from changing while we are dumping */ + mutex_lock(&device->mutex); + + /* + * Release the freeze on the snapshot the first time the buffer is read + */ + + device->snapshot_frozen = 0; + + if (off >= device->snapshot_size) { + count = 0; + goto exit; + } + + if (off + count > device->snapshot_size) + count = device->snapshot_size - off; + + memcpy(buf, device->snapshot + off, count); + +exit: + mutex_unlock(&device->mutex); + return count; +} + +/* Show the timestamp of the last collected snapshot */ +static ssize_t timestamp_show(struct kgsl_device *device, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%x\n", device->snapshot_timestamp); +} + +/* manually trigger a new snapshot to be collected */ +static ssize_t trigger_store(struct kgsl_device *device, const char *buf, + size_t count) +{ + if (device && count > 0) { + mutex_lock(&device->mutex); + kgsl_device_snapshot(device, 0); + mutex_unlock(&device->mutex); + } + + return count; +} + +static struct bin_attribute snapshot_attr = { + .attr.name = "dump", + .attr.mode = 0444, + .size = 0, + .read = snapshot_show +}; + +#define SNAPSHOT_ATTR(_name, _mode, _show, _store) \ +struct kgsl_snapshot_attribute attr_##_name = { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +} + +SNAPSHOT_ATTR(trigger, 0600, NULL, trigger_store); +SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL); + +static void snapshot_sysfs_release(struct kobject *kobj) +{ +} + +static ssize_t snapshot_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr); + struct kgsl_device *device = kobj_to_device(kobj); + ssize_t ret; + + if (device && pattr->show) + ret = pattr->show(device, buf); + else + ret = -EIO; + + return ret; +} + +static ssize_t snapshot_sysfs_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr); + struct kgsl_device *device = kobj_to_device(kobj); + ssize_t ret; + + if (device && pattr->store) + ret = pattr->store(device, buf, count); + else + ret = -EIO; + + return ret; +} + +static const struct sysfs_ops snapshot_sysfs_ops = { + .show = snapshot_sysfs_show, + .store = snapshot_sysfs_store, +}; + +static struct kobj_type ktype_snapshot = { + .sysfs_ops = &snapshot_sysfs_ops, + .default_attrs = NULL, + .release = snapshot_sysfs_release, +}; + +/* kgsl_device_snapshot_init - Add resources for the device GPU snapshot + * @device - The device to initalize + * + * Allocate memory for a GPU snapshot for the specified device, + * and create the sysfs files to manage it + */ + +int kgsl_device_snapshot_init(struct kgsl_device *device) +{ + int ret; + + if (device->snapshot == NULL) + device->snapshot = kzalloc(KGSL_SNAPSHOT_MEMSIZE, GFP_KERNEL); + + if (device->snapshot == NULL) + return -ENOMEM; + + device->snapshot_maxsize = KGSL_SNAPSHOT_MEMSIZE; + device->snapshot_timestamp = 0; + + ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot, + &device->dev->kobj, "snapshot"); + if (ret) + goto done; + + ret = sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr); + if (ret) + goto done; + + ret = sysfs_create_file(&device->snapshot_kobj, &attr_trigger.attr); + if (ret) + goto done; + + ret = sysfs_create_file(&device->snapshot_kobj, &attr_timestamp.attr); + +done: + return ret; +} +EXPORT_SYMBOL(kgsl_device_snapshot_init); + +/* kgsl_device_snapshot_close - Take down snapshot memory for a device + * @device - Pointer to the kgsl_device + * + * Remove the sysfs files and free the memory allocated for the GPU + * snapshot + */ + +void kgsl_device_snapshot_close(struct kgsl_device *device) +{ + sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr); + sysfs_remove_file(&device->snapshot_kobj, &attr_trigger.attr); + sysfs_remove_file(&device->snapshot_kobj, &attr_timestamp.attr); + + kobject_put(&device->snapshot_kobj); + + kfree(device->snapshot); + + device->snapshot = NULL; + device->snapshot_maxsize = 0; + device->snapshot_timestamp = 0; +} +EXPORT_SYMBOL(kgsl_device_snapshot_close); diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h new file mode 100644 index 0000000000000..86c2c84f0c3e3 --- /dev/null +++ b/drivers/gpu/msm/kgsl_snapshot.h @@ -0,0 +1,259 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _KGSL_SNAPSHOT_H_ +#define _KGSL_SNAPSHOT_H_ + +#include + +/* Snapshot header */ + +#define SNAPSHOT_MAGIC 0x504D0001 + +/* GPU ID scheme: + * [16:31] - core identifer (0x0002 for 2D or 0x0003 for 3D) + * [00:16] - GPU specific identifier + */ + +struct kgsl_snapshot_header { + __u32 magic; /* Magic identifier */ + __u32 gpuid; /* GPU ID - see above */ +} __packed; + +/* Section header */ +#define SNAPSHOT_SECTION_MAGIC 0xABCD + +struct kgsl_snapshot_section_header { + __u16 magic; /* Magic identifier */ + __u16 id; /* Type of section */ + __u32 size; /* Size of the section including this header */ +} __packed; + +/* Section identifiers */ +#define KGSL_SNAPSHOT_SECTION_OS 0x0101 +#define KGSL_SNAPSHOT_SECTION_REGS 0x0201 +#define KGSL_SNAPSHOT_SECTION_RB 0x0301 +#define KGSL_SNAPSHOT_SECTION_IB 0x0401 +#define KGSL_SNAPSHOT_SECTION_INDEXED_REGS 0x0501 +#define KGSL_SNAPSHOT_SECTION_ISTORE 0x0801 +#define KGSL_SNAPSHOT_SECTION_DEBUG 0x0901 +#define KGSL_SNAPSHOT_SECTION_END 0xFFFF + +/* OS sub-section header */ +#define KGSL_SNAPSHOT_OS_LINUX 0x0001 + +/* Linux OS specific information */ + +#define SNAPSHOT_STATE_HUNG 0 +#define SNAPSHOT_STATE_RUNNING 1 + +struct kgsl_snapshot_linux { + int osid; /* subsection OS identifier */ + int state; /* 1 if the thread is running, 0 for hung */ + __u32 seconds; /* Unix timestamp for the snapshot */ + __u32 power_flags; /* Current power flags */ + __u32 power_level; /* Current power level */ + __u32 power_interval_timeout; /* Power interval timeout */ + __u32 grpclk; /* Current GP clock value */ + __u32 busclk; /* Current busclk value */ + __u32 ptbase; /* Current ptbase */ + __u32 pid; /* PID of the process that owns the PT */ + __u32 current_context; /* ID of the current context */ + __u32 ctxtcount; /* Number of contexts appended to section */ + unsigned char release[32]; /* kernel release */ + unsigned char version[32]; /* kernel version */ + unsigned char comm[16]; /* Name of the process that owns the PT */ +} __packed; + +/* + * This structure contains a record of an active context. + * These are appended one after another in the OS section below + * the header above + */ + +struct kgsl_snapshot_linux_context { + __u32 id; /* The context ID */ + __u32 timestamp_queued; /* The last queued timestamp */ + __u32 timestamp_retired; /* The last timestamp retired by HW */ +}; + +/* Ringbuffer sub-section header */ +struct kgsl_snapshot_rb { + int start; /* dword at the start of the dump */ + int end; /* dword at the end of the dump */ + int rbsize; /* Size (in dwords) of the ringbuffer */ + int wptr; /* Current index of the CPU write pointer */ + int rptr; /* Current index of the GPU read pointer */ + int count; /* Number of dwords in the dump */ +} __packed; + +/* Indirect buffer sub-section header */ +struct kgsl_snapshot_ib { + __u32 gpuaddr; /* GPU address of the the IB */ + __u32 ptbase; /* Base for the pagetable the GPU address is valid in */ + int size; /* Size of the IB */ +} __packed; + +/* Register sub-section header */ +struct kgsl_snapshot_regs { + __u32 count; /* Number of register pairs in the section */ +} __packed; + +/* Indexed register sub-section header */ +struct kgsl_snapshot_indexed_regs { + __u32 index_reg; /* Offset of the index register for this section */ + __u32 data_reg; /* Offset of the data register for this section */ + int start; /* Starting index */ + int count; /* Number of dwords in the data */ +} __packed; + +/* Istore sub-section header */ +struct kgsl_snapshot_istore { + int count; /* Number of instructions in the istore */ +} __packed; + +/* Debug data sub-section header */ + +#define SNAPSHOT_DEBUG_SX 1 +#define SNAPSHOT_DEBUG_CP 2 +#define SNAPSHOT_DEBUG_SQ 3 +#define SNAPSHOT_DEBUG_SQTHREAD 4 +#define SNAPSHOT_DEBUG_MIU 5 + +struct kgsl_snapshot_debug { + int type; /* Type identifier for the attached tata */ + int size; /* Size of the section in bytes */ +} __packed; + +#ifdef __KERNEL__ + +/* Allocate 512K for each device snapshot */ +#define KGSL_SNAPSHOT_MEMSIZE (512 * 1024) + +struct kgsl_device; +/* + * A helper macro to print out "not enough memory functions" - this + * makes it easy to standardize the messages as well as cut down on + * the number of strings in the binary + */ + +#define SNAPSHOT_ERR_NOMEM(_d, _s) \ + KGSL_DRV_ERR((_d), \ + "snapshot: not enough snapshot memory for section %s\n", (_s)) + +/* + * kgsl_snapshot_add_section - Add a new section to the GPU snapshot + * @device - the KGSL device being snapshotted + * @id - the section id + * @snapshot - pointer to the memory for the snapshot + * @remain - pointer to the number of bytes left in the snapshot region + * @func - Function pointer to fill the section + * @priv - Priv pointer to pass to the function + * + * Set up a KGSL snapshot header by filling the memory with the callback + * function and adding the standard section header + */ + +static inline void *kgsl_snapshot_add_section(struct kgsl_device *device, + u16 id, void *snapshot, int *remain, + int (*func)(struct kgsl_device *, void *, int, void *), void *priv) +{ + struct kgsl_snapshot_section_header *header = snapshot; + void *data = snapshot + sizeof(*header); + int ret = 0; + + /* + * Sanity check to make sure there is enough for the header. The + * callback will check to make sure there is enough for the rest + * of the data. If there isn't enough room then don't advance the + * pointer. + */ + + if (*remain < sizeof(*header)) + return snapshot; + + /* It is legal to have no function (i.e. - make an empty section) */ + + if (func) { + ret = func(device, data, *remain, priv); + + /* + * If there wasn't enough room for the data then don't bother + * setting up the header. + */ + + if (ret == 0) + return snapshot; + } + + header->magic = SNAPSHOT_SECTION_MAGIC; + header->id = id; + header->size = ret + sizeof(*header); + + /* Decrement the room left in the snapshot region */ + *remain -= header->size; + /* Advance the pointer to the end of the next function */ + return snapshot + header->size; +} + +/* A common helper function to dump a range of registers. This will be used in + * the GPU specific devices like this: + * + * struct kgsl_snapshot_registers priv; + * priv.regs = registers_array;; + * priv.count = num_registers; + * + * kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot, + * remain, kgsl_snapshot_dump_regs, &priv). + * + * Pass in an array of register range pairs in the form of: + * start reg, stop reg + * All the registers between start and stop inclusive will be dumped + */ + +struct kgsl_snapshot_registers { + unsigned int *regs; /* Pointer to the array of register ranges */ + int count; /* Number of entries in the array */ +}; + +int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot, + int remain, void *priv); + +/* + * A common helper function to dump a set of indexed registers. Use it + * like this: + * + * struct kgsl_snapshot_indexed_registers priv; + * priv.index = REG_INDEX; + * priv.data = REG_DATA; + * priv.count = num_registers + * + * kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS, + * snapshot, remain, kgsl_snapshot_dump_indexed_regs, &priv). + * + * The callback function will write an index from 0 to priv.count to + * the index register and read the data from the data register. + */ + +struct kgsl_snapshot_indexed_registers { + unsigned int index; /* Offset of the index register */ + unsigned int data; /* Offset of the data register */ + unsigned int start; /* Index to start with */ + unsigned int count; /* Number of values to read from the pair */ +}; + +int kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device, + void *snapshot, int remain, void *priv); + +#endif +#endif diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c new file mode 100644 index 0000000000000..057ae30cc21d2 --- /dev/null +++ b/drivers/gpu/msm/kgsl_sync.c @@ -0,0 +1,212 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "kgsl_sync.h" + +struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline, + unsigned int timestamp) +{ + struct sync_pt *pt; + pt = sync_pt_create(timeline, (int) sizeof(struct kgsl_sync_pt)); + if (pt) { + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + kpt->timestamp = timestamp; + } + return pt; +} + +/* + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void kgsl_sync_pt_destroy(struct sync_pt *pt) +{ + sync_pt_free(pt); +} + +static struct sync_pt *kgsl_sync_pt_dup(struct sync_pt *pt) +{ + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + return kgsl_sync_pt_create(pt->parent, kpt->timestamp); +} + +static int kgsl_sync_pt_has_signaled(struct sync_pt *pt) +{ + struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt; + struct kgsl_sync_timeline *ktimeline = + (struct kgsl_sync_timeline *) pt->parent; + unsigned int ts = kpt->timestamp; + unsigned int last_ts = ktimeline->last_timestamp; + if (timestamp_cmp(last_ts, ts) >= 0) { + /* signaled */ + return 1; + } + return 0; +} + +static int kgsl_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct kgsl_sync_pt *kpt_a = (struct kgsl_sync_pt *) a; + struct kgsl_sync_pt *kpt_b = (struct kgsl_sync_pt *) b; + unsigned int ts_a = kpt_a->timestamp; + unsigned int ts_b = kpt_b->timestamp; + return timestamp_cmp(ts_a, ts_b); +} + +struct kgsl_fence_event_priv { + struct kgsl_context *context; +}; + +/** + * kgsl_fence_event_cb - Event callback for a fence timestamp event + * @device - The KGSL device that expired the timestamp + * @priv - private data for the event + * @context_id - the context id that goes with the timestamp + * @timestamp - the timestamp that triggered the event + * + * Signal a fence following the expiration of a timestamp + */ + +static inline void kgsl_fence_event_cb(struct kgsl_device *device, + void *priv, u32 timestamp) +{ + struct kgsl_fence_event_priv *ev = priv; + kgsl_sync_timeline_signal(ev->context->timeline, timestamp); + kfree(ev); +} + +/** + * kgsl_add_fence_event - Create a new fence event + * @device - KGSL device to create the event on + * @timestamp - Timestamp to trigger the event + * @data - Return fence fd stored in struct kgsl_timestamp_event_fence + * @len - length of the fence event + * @owner - driver instance that owns this event + * @returns 0 on success or error code on error + * + * Create a fence and register an event to signal the fence when + * the timestamp expires + */ + +int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + struct kgsl_fence_event_priv *event; + struct kgsl_timestamp_event_fence priv; + struct kgsl_context *context; + struct sync_pt *pt; + struct sync_fence *fence = NULL; + int ret = -EINVAL; + + if (len != sizeof(priv)) + return -EINVAL; + + context = kgsl_find_context(owner, context_id); + if (context == NULL) + return -EINVAL; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (event == NULL) + return -ENOMEM; + event->context = context; + + pt = kgsl_sync_pt_create(context->timeline, timestamp); + if (pt == NULL) { + KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n"); + ret = -ENOMEM; + goto fail_pt; + } + + fence = sync_fence_create("kgsl-fence", pt); + if (fence == NULL) { + /* only destroy pt when not added to fence */ + kgsl_sync_pt_destroy(pt); + KGSL_DRV_ERR(device, "sync_fence_create failed\n"); + ret = -ENOMEM; + goto fail_fence; + } + + priv.fence_fd = get_unused_fd_flags(0); + if (priv.fence_fd < 0) { + KGSL_DRV_ERR(device, "invalid fence fd\n"); + ret = -EINVAL; + goto fail_fd; + } + sync_fence_install(fence, priv.fence_fd); + + if (copy_to_user(data, &priv, sizeof(priv))) { + ret = -EFAULT; + goto fail_copy_fd; + } + + ret = kgsl_add_event(device, timestamp, + kgsl_fence_event_cb, event, owner); + if (ret) + goto fail_event; + + return 0; + +fail_event: +fail_copy_fd: + /* clean up sync_fence_install */ + sync_fence_put(fence); + put_unused_fd(priv.fence_fd); +fail_fd: + /* clean up sync_fence_create */ + sync_fence_put(fence); +fail_fence: +fail_pt: + kfree(event); + return ret; +} + +static const struct sync_timeline_ops kgsl_sync_timeline_ops = { + .driver_name = "kgsl-timeline", + .dup = kgsl_sync_pt_dup, + .has_signaled = kgsl_sync_pt_has_signaled, + .compare = kgsl_sync_pt_compare, +}; + +int kgsl_sync_timeline_create(struct kgsl_context *context) +{ + struct kgsl_sync_timeline *ktimeline; + + context->timeline = sync_timeline_create(&kgsl_sync_timeline_ops, + (int) sizeof(struct kgsl_sync_timeline), "kgsl-timeline"); + if (context->timeline == NULL) + return -EINVAL; + + ktimeline = (struct kgsl_sync_timeline *) context->timeline; + ktimeline->last_timestamp = 0; + + return 0; +} + +void kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp) +{ + struct kgsl_sync_timeline *ktimeline = + (struct kgsl_sync_timeline *) timeline; + ktimeline->last_timestamp = timestamp; + sync_timeline_signal(timeline); +} + +void kgsl_sync_timeline_destroy(struct kgsl_context *context) +{ + sync_timeline_destroy(context->timeline); +} diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h new file mode 100644 index 0000000000000..06b3ad0d89188 --- /dev/null +++ b/drivers/gpu/msm/kgsl_sync.h @@ -0,0 +1,75 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __KGSL_SYNC_H +#define __KGSL_SYNC_H + +#include +#include "kgsl_device.h" + +struct kgsl_sync_timeline { + struct sync_timeline timeline; + unsigned int last_timestamp; +}; + +struct kgsl_sync_pt { + struct sync_pt pt; + unsigned int timestamp; +}; + +#if defined(CONFIG_SYNC) +struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline, + unsigned int timestamp); +void kgsl_sync_pt_destroy(struct sync_pt *pt); +int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner); +int kgsl_sync_timeline_create(struct kgsl_context *context); +void kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp); +void kgsl_sync_timeline_destroy(struct kgsl_context *context); +#else +static inline struct sync_pt +*kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp) +{ + return NULL; +} + +static inline void kgsl_sync_pt_destroy(struct sync_pt *pt) +{ +} + +static inline int kgsl_add_fence_event(struct kgsl_device *device, + u32 context_id, u32 timestamp, void __user *data, int len, + struct kgsl_device_private *owner) +{ + return -EINVAL; +} + +static int kgsl_sync_timeline_create(struct kgsl_context *context) +{ + context->timeline = NULL; + return 0; +} + +static inline void +kgsl_sync_timeline_signal(struct sync_timeline *timeline, + unsigned int timestamp) +{ +} + +static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context) +{ +} +#endif + +#endif /* __KGSL_SYNC_H */ diff --git a/drivers/gpu/msm/kgsl_trace.c b/drivers/gpu/msm/kgsl_trace.c new file mode 100644 index 0000000000000..e432729fb3537 --- /dev/null +++ b/drivers/gpu/msm/kgsl_trace.c @@ -0,0 +1,19 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "kgsl.h" +#include "kgsl_device.h" + +/* Instantiate tracepoints */ +#define CREATE_TRACE_POINTS +#include "kgsl_trace.h" diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h new file mode 100644 index 0000000000000..0ebc276fb71dc --- /dev/null +++ b/drivers/gpu/msm/kgsl_trace.h @@ -0,0 +1,263 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _KGSL_TRACE_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kgsl +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE kgsl_trace + +#include + +struct kgsl_device; +struct kgsl_ringbuffer_issueibcmds; +struct kgsl_device_waittimestamp; + +/* + * Tracepoint for kgsl issue ib commands + */ +TRACE_EVENT(kgsl_issueibcmds, + + TP_PROTO(struct kgsl_device *device, + struct kgsl_ringbuffer_issueibcmds *cmd, int result), + + TP_ARGS(device, cmd, result), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, drawctxt_id) + __field(unsigned int, ibdesc_addr) + __field(unsigned int, numibs) + __field(unsigned int, timestamp) + __field(unsigned int, flags) + __field(int, result) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->drawctxt_id = cmd->drawctxt_id; + __entry->ibdesc_addr = cmd->ibdesc_addr; + __entry->numibs = cmd->numibs; + __entry->timestamp = cmd->timestamp; + __entry->flags = cmd->flags; + __entry->result = result; + ), + + TP_printk( + "d_name=%s ctx=%u ib=%u numibs=%u timestamp=%u " + "flags=%u result=%d", + __get_str(device_name), + __entry->drawctxt_id, + __entry->ibdesc_addr, + __entry->numibs, + __entry->timestamp, + __entry->flags, + __entry->result + ) +); + +/* + * Tracepoint for kgsl readtimestamp + */ +TRACE_EVENT(kgsl_readtimestamp, + + TP_PROTO(struct kgsl_device *device, + struct kgsl_cmdstream_readtimestamp *cmd), + + TP_ARGS(device, cmd), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, type) + __field(unsigned int, timestamp) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->type = cmd->type; + __entry->timestamp = cmd->timestamp; + ), + + TP_printk( + "d_name=%s type=%u timestamp=%u", + __get_str(device_name), + __entry->type, + __entry->timestamp + ) +); + +/* + * Tracepoint for kgsl waittimestamp entry + */ +TRACE_EVENT(kgsl_waittimestamp_entry, + + TP_PROTO(struct kgsl_device *device, + struct kgsl_device_waittimestamp *cmd), + + TP_ARGS(device, cmd), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, timestamp) + __field(unsigned int, timeout) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->timestamp = cmd->timestamp; + __entry->timeout = cmd->timeout; + ), + + TP_printk( + "d_name=%s timestamp=%u timeout=%u", + __get_str(device_name), + __entry->timestamp, + __entry->timeout + ) +); + +/* + * Tracepoint for kgsl waittimestamp exit + */ +TRACE_EVENT(kgsl_waittimestamp_exit, + + TP_PROTO(struct kgsl_device *device, int result), + + TP_ARGS(device, result), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(int, result) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->result = result; + ), + + TP_printk( + "d_name=%s result=%d", + __get_str(device_name), + __entry->result + ) +); + +DECLARE_EVENT_CLASS(kgsl_pwr_template, + TP_PROTO(struct kgsl_device *device, int on), + + TP_ARGS(device, on), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(int, on) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->on = on; + ), + + TP_printk( + "d_name=%s %s", + __get_str(device_name), + __entry->on ? "on" : "off" + ) +); + +DEFINE_EVENT(kgsl_pwr_template, kgsl_clk, + TP_PROTO(struct kgsl_device *device, int on), + TP_ARGS(device, on) +); + +DEFINE_EVENT(kgsl_pwr_template, kgsl_irq, + TP_PROTO(struct kgsl_device *device, int on), + TP_ARGS(device, on) +); + +DEFINE_EVENT(kgsl_pwr_template, kgsl_bus, + TP_PROTO(struct kgsl_device *device, int on), + TP_ARGS(device, on) +); + +DEFINE_EVENT(kgsl_pwr_template, kgsl_rail, + TP_PROTO(struct kgsl_device *device, int on), + TP_ARGS(device, on) +); + +TRACE_EVENT(kgsl_pwrlevel, + + TP_PROTO(struct kgsl_device *device, unsigned int pwrlevel, + unsigned int freq), + + TP_ARGS(device, pwrlevel, freq), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, pwrlevel) + __field(unsigned int, freq) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->pwrlevel = pwrlevel; + __entry->freq = freq; + ), + + TP_printk( + "d_name=%s pwrlevel=%d freq=%d", + __get_str(device_name), + __entry->pwrlevel, + __entry->freq + ) +); + +DECLARE_EVENT_CLASS(kgsl_pwrstate_template, + TP_PROTO(struct kgsl_device *device, unsigned int state), + + TP_ARGS(device, state), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, state) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->state = state; + ), + + TP_printk( + "d_name=%s %s", + __get_str(device_name), + kgsl_pwrstate_to_str(__entry->state) + ) +); + +DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_set_state, + TP_PROTO(struct kgsl_device *device, unsigned int state), + TP_ARGS(device, state) +); + +DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_request_state, + TP_PROTO(struct kgsl_device *device, unsigned int state), + TP_ARGS(device, state) +); + +#endif /* _KGSL_TRACE_H */ + +/* This part must be outside protection */ +#include diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c new file mode 100644 index 0000000000000..154a1f4e5bb47 --- /dev/null +++ b/drivers/gpu/msm/z180.c @@ -0,0 +1,973 @@ +/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include + +#include "kgsl.h" +#include "kgsl_cffdump.h" +#include "kgsl_sharedmem.h" + +#include "z180.h" +#include "z180_reg.h" +#include "z180_trace.h" + +#define DRIVER_VERSION_MAJOR 3 +#define DRIVER_VERSION_MINOR 1 + +#define Z180_DEVICE(device) \ + KGSL_CONTAINER_OF(device, struct z180_device, dev) + +#define GSL_VGC_INT_MASK \ + (REG_VGC_IRQSTATUS__MH_MASK | \ + REG_VGC_IRQSTATUS__G2D_MASK | \ + REG_VGC_IRQSTATUS__FIFO_MASK) + +#define VGV3_NEXTCMD_JUMP 0x01 + +#define VGV3_NEXTCMD_NEXTCMD_FSHIFT 12 +#define VGV3_NEXTCMD_NEXTCMD_FMASK 0x7 + +#define VGV3_CONTROL_MARKADD_FSHIFT 0 +#define VGV3_CONTROL_MARKADD_FMASK 0xfff + +#define Z180_PACKET_SIZE 15 +#define Z180_MARKER_SIZE 10 +#define Z180_CALL_CMD 0x1000 +#define Z180_MARKER_CMD 0x8000 +#define Z180_STREAM_END_CMD 0x9000 +#define Z180_STREAM_PACKET 0x7C000176 +#define Z180_STREAM_PACKET_CALL 0x7C000275 +#define Z180_PACKET_COUNT 8 +#define Z180_RB_SIZE (Z180_PACKET_SIZE*Z180_PACKET_COUNT \ + *sizeof(uint32_t)) + +#define NUMTEXUNITS 4 +#define TEXUNITREGCOUNT 25 +#define VG_REGCOUNT 0x39 + +#define PACKETSIZE_BEGIN 3 +#define PACKETSIZE_G2DCOLOR 2 +#define PACKETSIZE_TEXUNIT (TEXUNITREGCOUNT * 2) +#define PACKETSIZE_REG (VG_REGCOUNT * 2) +#define PACKETSIZE_STATE (PACKETSIZE_TEXUNIT * NUMTEXUNITS + \ + PACKETSIZE_REG + PACKETSIZE_BEGIN + \ + PACKETSIZE_G2DCOLOR) +#define PACKETSIZE_STATESTREAM (ALIGN((PACKETSIZE_STATE * \ + sizeof(unsigned int)), 32) / \ + sizeof(unsigned int)) + +#define Z180_INVALID_CONTEXT UINT_MAX + +/* z180 MH arbiter config*/ +#define Z180_CFG_MHARB \ + (0x10 \ + | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \ + | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \ + | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \ + | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \ + | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT)) + +#define Z180_TIMESTAMP_EPSILON 20000 +#define Z180_IDLE_COUNT_MAX 1000000 + +enum z180_cmdwindow_type { + Z180_CMDWINDOW_2D = 0x00000000, + Z180_CMDWINDOW_MMU = 0x00000002, +}; + +#define Z180_CMDWINDOW_TARGET_MASK 0x000000FF +#define Z180_CMDWINDOW_ADDR_MASK 0x00FFFF00 +#define Z180_CMDWINDOW_TARGET_SHIFT 0 +#define Z180_CMDWINDOW_ADDR_SHIFT 8 + +static int z180_start(struct kgsl_device *device, unsigned int init_ram); +static int z180_stop(struct kgsl_device *device); +static int z180_wait(struct kgsl_device *device, + unsigned int timestamp, + unsigned int msecs); +static void z180_regread(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int *value); +static void z180_regwrite(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int value); +static void z180_cmdwindow_write(struct kgsl_device *device, + unsigned int addr, + unsigned int data); + +#define Z180_MMU_CONFIG \ + (0x01 \ + | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \ + | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT)) + +static const struct kgsl_functable z180_functable; + +static struct z180_device device_2d0 = { + .dev = { + .name = DEVICE_2D0_NAME, + .id = KGSL_DEVICE_2D0, + .ver_major = DRIVER_VERSION_MAJOR, + .ver_minor = DRIVER_VERSION_MINOR, + .mh = { + .mharb = Z180_CFG_MHARB, + .mh_intf_cfg1 = 0x00032f07, + .mh_intf_cfg2 = 0x004b274f, + /* turn off memory protection unit by setting + acceptable physical address range to include + all pages. */ + .mpu_base = 0x00000000, + .mpu_range = 0xFFFFF000, + }, + .mmu = { + .config = Z180_MMU_CONFIG, + }, + .pwrctrl = { + .regulator_name = "fs_gfx2d0", + .irq_name = KGSL_2D0_IRQ, + }, + .mutex = __MUTEX_INITIALIZER(device_2d0.dev.mutex), + .state = KGSL_STATE_INIT, + .active_cnt = 0, + .iomemname = KGSL_2D0_REG_MEMORY, + .ftbl = &z180_functable, + }, +}; + +static struct z180_device device_2d1 = { + .dev = { + .name = DEVICE_2D1_NAME, + .id = KGSL_DEVICE_2D1, + .ver_major = DRIVER_VERSION_MAJOR, + .ver_minor = DRIVER_VERSION_MINOR, + .mh = { + .mharb = Z180_CFG_MHARB, + .mh_intf_cfg1 = 0x00032f07, + .mh_intf_cfg2 = 0x004b274f, + /* turn off memory protection unit by setting + acceptable physical address range to include + all pages. */ + .mpu_base = 0x00000000, + .mpu_range = 0xFFFFF000, + }, + .mmu = { + .config = Z180_MMU_CONFIG, + }, + .pwrctrl = { + .regulator_name = "fs_gfx2d1", + .irq_name = KGSL_2D1_IRQ, + }, + .mutex = __MUTEX_INITIALIZER(device_2d1.dev.mutex), + .state = KGSL_STATE_INIT, + .active_cnt = 0, + .iomemname = KGSL_2D1_REG_MEMORY, + .ftbl = &z180_functable, + }, +}; + +static irqreturn_t z180_isr(int irq, void *data) +{ + irqreturn_t result = IRQ_NONE; + unsigned int status; + struct kgsl_device *device = (struct kgsl_device *) data; + struct z180_device *z180_dev = Z180_DEVICE(device); + + z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status); + + trace_kgsl_z180_irq_status(device, status); + + if (status & GSL_VGC_INT_MASK) { + z180_regwrite(device, + ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK); + + result = IRQ_HANDLED; + + if (status & REG_VGC_IRQSTATUS__FIFO_MASK) + KGSL_DRV_ERR(device, "z180 fifo interrupt\n"); + if (status & REG_VGC_IRQSTATUS__MH_MASK) + kgsl_mh_intrcallback(device); + if (status & REG_VGC_IRQSTATUS__G2D_MASK) { + int count; + + z180_regread(device, + ADDR_VGC_IRQ_ACTIVE_CNT >> 2, + &count); + + count >>= 8; + count &= 255; + z180_dev->timestamp += count; + + queue_work(device->work_queue, &device->ts_expired_ws); + wake_up_interruptible(&device->wait_queue); + + atomic_notifier_call_chain( + &(device->ts_notifier_list), + device->id, NULL); + } + } + + if ((device->pwrctrl.nap_allowed == true) && + (device->requested_state == KGSL_STATE_NONE)) { + kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP); + queue_work(device->work_queue, &device->idle_check_ws); + } + mod_timer_pending(&device->idle_timer, + jiffies + device->pwrctrl.interval_timeout); + + return result; +} + +static void z180_cleanup_pt(struct kgsl_device *device, + struct kgsl_pagetable *pagetable) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + + kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory); + + kgsl_mmu_unmap(pagetable, &device->memstore); + + kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc); +} + +static int z180_setup_pt(struct kgsl_device *device, + struct kgsl_pagetable *pagetable) +{ + int result = 0; + struct z180_device *z180_dev = Z180_DEVICE(device); + + result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + + if (result) + goto error; + + result = kgsl_mmu_map_global(pagetable, &device->memstore, + GSL_PT_PAGE_RV | GSL_PT_PAGE_WV); + if (result) + goto error_unmap_dummy; + + result = kgsl_mmu_map_global(pagetable, + &z180_dev->ringbuffer.cmdbufdesc, + GSL_PT_PAGE_RV); + if (result) + goto error_unmap_memstore; + return result; + +error_unmap_dummy: + kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory); + +error_unmap_memstore: + kgsl_mmu_unmap(pagetable, &device->memstore); + +error: + return result; +} + +static inline unsigned int rb_offset(unsigned int index) +{ + return index*sizeof(unsigned int)*(Z180_PACKET_SIZE); +} + +static void addmarker(struct z180_ringbuffer *rb, unsigned int index) +{ + char *ptr = (char *)(rb->cmdbufdesc.hostptr); + unsigned int *p = (unsigned int *)(ptr + rb_offset(index)); + + *p++ = Z180_STREAM_PACKET; + *p++ = (Z180_MARKER_CMD | 5); + *p++ = ADDR_VGV3_LAST << 24; + *p++ = ADDR_VGV3_LAST << 24; + *p++ = ADDR_VGV3_LAST << 24; + *p++ = Z180_STREAM_PACKET; + *p++ = 5; + *p++ = ADDR_VGV3_LAST << 24; + *p++ = ADDR_VGV3_LAST << 24; + *p++ = ADDR_VGV3_LAST << 24; +} + +static void addcmd(struct z180_ringbuffer *rb, unsigned int index, + unsigned int cmd, unsigned int nextcnt) +{ + char * ptr = (char *)(rb->cmdbufdesc.hostptr); + unsigned int *p = (unsigned int *)(ptr + (rb_offset(index) + + (Z180_MARKER_SIZE * sizeof(unsigned int)))); + + *p++ = Z180_STREAM_PACKET_CALL; + *p++ = cmd; + *p++ = Z180_CALL_CMD | nextcnt; + *p++ = ADDR_VGV3_LAST << 24; + *p++ = ADDR_VGV3_LAST << 24; +} + +static void z180_cmdstream_start(struct kgsl_device *device) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT; + + z180_dev->timestamp = 0; + z180_dev->current_timestamp = 0; + + addmarker(&z180_dev->ringbuffer, 0); + + z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4); + + z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR, + z180_dev->ringbuffer.cmdbufdesc.gpuaddr); + + z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5); + + z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR, + device->memstore.gpuaddr); + + cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK) + << VGV3_CONTROL_MARKADD_FSHIFT); + + z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd); + + z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0); +} + +static int room_in_rb(struct z180_device *device) +{ + int ts_diff; + + ts_diff = device->current_timestamp - device->timestamp; + + return ts_diff < Z180_PACKET_COUNT; +} + +static int z180_idle(struct kgsl_device *device, unsigned int timeout) +{ + int status = 0; + struct z180_device *z180_dev = Z180_DEVICE(device); + + if (timestamp_cmp(z180_dev->current_timestamp, + z180_dev->timestamp) > 0) + status = z180_wait(device, z180_dev->current_timestamp, + timeout); + + if (status) + KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n"); + + return status; +} + +int +z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, + struct kgsl_ibdesc *ibdesc, + unsigned int numibs, + uint32_t *timestamp, + unsigned int ctrl) +{ + long result = 0; + unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int); + unsigned int cnt = 5; + unsigned int nextaddr = 0; + unsigned int index = 0; + unsigned int nextindex; + unsigned int nextcnt = Z180_STREAM_END_CMD | 5; + struct kgsl_mem_entry *entry = NULL; + unsigned int cmd; + struct kgsl_device *device = dev_priv->device; + struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable; + struct z180_device *z180_dev = Z180_DEVICE(device); + unsigned int sizedwords; + + if (device->state & KGSL_STATE_HUNG) { + result = -EINVAL; + goto error; + } + if (numibs != 1) { + KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs); + result = -EINVAL; + goto error; + } + cmd = ibdesc[0].gpuaddr; + sizedwords = ibdesc[0].sizedwords; + /* + * Get a kernel mapping to the IB for monkey patching. + * See the end of this function. + */ + entry = kgsl_sharedmem_find_region(dev_priv->process_priv, cmd, + sizedwords); + if (entry == NULL) { + KGSL_DRV_ERR(device, "Bad ibdesc: gpuaddr 0x%x size %d\n", + cmd, sizedwords); + result = -EINVAL; + goto error; + } + /* + * This will only map memory if it exists, otherwise it will reuse the + * mapping. And the 2d userspace reuses IBs so we likely won't create + * too many mappings. + */ + if (kgsl_gpuaddr_to_vaddr(&entry->memdesc, cmd) == NULL) { + KGSL_DRV_ERR(device, + "Cannot make kernel mapping for gpuaddr 0x%x\n", + cmd); + result = -EINVAL; + goto error; + } + + KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n", + context->id, cmd, sizedwords); + /* context switch */ + if ((context->id != (int)z180_dev->ringbuffer.prevctx) || + (ctrl & KGSL_CONTEXT_CTX_SWITCH)) { + KGSL_CMD_INFO(device, "context switch %d -> %d\n", + context->id, z180_dev->ringbuffer.prevctx); + kgsl_mmu_setstate(device, pagetable, + 0); + cnt = PACKETSIZE_STATESTREAM; + ofs = 0; + } + kgsl_setstate(device, 0, kgsl_mmu_pt_get_flags(device->mmu.hwpagetable, + device->id)); + + result = wait_event_interruptible_timeout(device->wait_queue, + room_in_rb(z180_dev), + msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT)); + if (result < 0) { + KGSL_CMD_ERR(device, "wait_event_interruptible_timeout " + "failed: %ld\n", result); + goto error; + } + result = 0; + + index = z180_dev->current_timestamp % Z180_PACKET_COUNT; + z180_dev->current_timestamp++; + nextindex = z180_dev->current_timestamp % Z180_PACKET_COUNT; + *timestamp = z180_dev->current_timestamp; + + z180_dev->ringbuffer.prevctx = context->id; + + addcmd(&z180_dev->ringbuffer, index, cmd + ofs, cnt); + kgsl_pwrscale_busy(device); + + /* Make sure the next ringbuffer entry has a marker */ + addmarker(&z180_dev->ringbuffer, nextindex); + + nextaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr + + rb_offset(nextindex); + + /* monkey patch the IB so that it jumps back to the ringbuffer */ + kgsl_sharedmem_writel(&entry->memdesc, + ((sizedwords + 1) * sizeof(unsigned int)), + nextaddr); + kgsl_sharedmem_writel(&entry->memdesc, + ((sizedwords + 2) * sizeof(unsigned int)), + nextcnt); + + /* sync memory before activating the hardware for the new command*/ + mb(); + + cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK) + << VGV3_CONTROL_MARKADD_FSHIFT); + + z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd); + z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0); +error: + return (int)result; +} + +static int z180_ringbuffer_init(struct kgsl_device *device) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer)); + z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT; + return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc, + Z180_RB_SIZE); +} + +static void z180_ringbuffer_close(struct kgsl_device *device) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + kgsl_sharedmem_free(&z180_dev->ringbuffer.cmdbufdesc); + memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer)); +} + +static int __devinit z180_probe(struct platform_device *pdev) +{ + int status = -EINVAL; + struct kgsl_device *device = NULL; + struct z180_device *z180_dev; + + device = (struct kgsl_device *)pdev->id_entry->driver_data; + device->parentdev = &pdev->dev; + + z180_dev = Z180_DEVICE(device); + spin_lock_init(&z180_dev->cmdwin_lock); + + status = z180_ringbuffer_init(device); + if (status != 0) + goto error; + + status = kgsl_device_platform_probe(device, z180_isr); + if (status) + goto error_close_ringbuffer; + + kgsl_pwrscale_init(device); + kgsl_pwrscale_attach_policy(device, Z180_DEFAULT_PWRSCALE_POLICY); + + return status; + +error_close_ringbuffer: + z180_ringbuffer_close(device); +error: + device->parentdev = NULL; + return status; +} + +static int __devexit z180_remove(struct platform_device *pdev) +{ + struct kgsl_device *device = NULL; + + device = (struct kgsl_device *)pdev->id_entry->driver_data; + + kgsl_pwrscale_close(device); + kgsl_device_platform_remove(device); + + z180_ringbuffer_close(device); + + return 0; +} + +static int z180_start(struct kgsl_device *device, unsigned int init_ram) +{ + int status = 0; + + kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT); + + kgsl_pwrctrl_enable(device); + + /* Set interrupts to 0 to ensure a good state */ + z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0); + + kgsl_mh_start(device); + + status = kgsl_mmu_start(device); + if (status) + goto error_clk_off; + + z180_cmdstream_start(device); + + mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT); + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON); + device->ftbl->irqctrl(device, 1); + return 0; + +error_clk_off: + z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0); + kgsl_pwrctrl_disable(device); + return status; +} + +static int z180_stop(struct kgsl_device *device) +{ + device->ftbl->irqctrl(device, 0); + z180_idle(device, KGSL_TIMEOUT_DEFAULT); + + del_timer_sync(&device->idle_timer); + + kgsl_mmu_stop(device); + + /* Disable the clocks before the power rail. */ + kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); + + kgsl_pwrctrl_disable(device); + + return 0; +} + +static int z180_getproperty(struct kgsl_device *device, + enum kgsl_property_type type, + void *value, + unsigned int sizebytes) +{ + int status = -EINVAL; + + switch (type) { + case KGSL_PROP_DEVICE_INFO: + { + struct kgsl_devinfo devinfo; + + if (sizebytes != sizeof(devinfo)) { + status = -EINVAL; + break; + } + + memset(&devinfo, 0, sizeof(devinfo)); + devinfo.device_id = device->id+1; + devinfo.chip_id = 0; + devinfo.mmu_enabled = kgsl_mmu_enabled(); + + if (copy_to_user(value, &devinfo, sizeof(devinfo)) != + 0) { + status = -EFAULT; + break; + } + status = 0; + } + break; + case KGSL_PROP_MMU_ENABLE: + { + int mmu_prop = kgsl_mmu_enabled(); + if (sizebytes != sizeof(int)) { + status = -EINVAL; + break; + } + if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) { + status = -EFAULT; + break; + } + status = 0; + } + break; + + default: + KGSL_DRV_ERR(device, "invalid property: %d\n", type); + status = -EINVAL; + } + return status; +} + +static unsigned int z180_isidle(struct kgsl_device *device) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + + return (timestamp_cmp(z180_dev->timestamp, + z180_dev->current_timestamp) == 0) ? true : false; +} + +static int z180_suspend_context(struct kgsl_device *device) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + + z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT; + + return 0; +} + +/* Not all Z180 registers are directly accessible. + * The _z180_(read|write)_simple functions below handle the ones that are. + */ +static void _z180_regread_simple(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int *value) +{ + unsigned int *reg; + + BUG_ON(offsetwords * sizeof(uint32_t) >= device->regspace.sizebytes); + + reg = (unsigned int *)(device->regspace.mmio_virt_base + + (offsetwords << 2)); + + /*ensure this read finishes before the next one. + * i.e. act like normal readl() */ + *value = __raw_readl(reg); + rmb(); + +} + +static void _z180_regwrite_simple(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int value) +{ + unsigned int *reg; + + BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes); + + reg = (unsigned int *)(device->regspace.mmio_virt_base + + (offsetwords << 2)); + kgsl_cffdump_regwrite(device->id, offsetwords << 2, value); + /*ensure previous writes post before this one, + * i.e. act like normal writel() */ + wmb(); + __raw_writel(value, reg); +} + + +/* The MH registers must be accessed through via a 2 step write, (read|write) + * process. These registers may be accessed from interrupt context during + * the handling of MH or MMU error interrupts. Therefore a spin lock is used + * to ensure that the 2 step sequence is not interrupted. + */ +static void _z180_regread_mmu(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int *value) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + unsigned long flags; + + spin_lock_irqsave(&z180_dev->cmdwin_lock, flags); + _z180_regwrite_simple(device, (ADDR_VGC_MH_READ_ADDR >> 2), + offsetwords); + _z180_regread_simple(device, (ADDR_VGC_MH_DATA_ADDR >> 2), value); + spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags); +} + + +static void _z180_regwrite_mmu(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int value) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + unsigned int cmdwinaddr; + unsigned long flags; + + cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) & + Z180_CMDWINDOW_TARGET_MASK); + cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) & + Z180_CMDWINDOW_ADDR_MASK); + + spin_lock_irqsave(&z180_dev->cmdwin_lock, flags); + _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, + cmdwinaddr); + _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, value); + spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags); +} + +/* the rest of the code doesn't want to think about if it is writing mmu + * registers or normal registers so handle it here + */ +static void z180_regread(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int *value) +{ + if (!in_interrupt()) + kgsl_pre_hwaccess(device); + + if ((offsetwords >= MH_ARBITER_CONFIG && + offsetwords <= MH_AXI_HALT_CONTROL) || + (offsetwords >= MH_MMU_CONFIG && + offsetwords <= MH_MMU_MPU_END)) { + _z180_regread_mmu(device, offsetwords, value); + } else { + _z180_regread_simple(device, offsetwords, value); + } +} + +static void z180_regwrite(struct kgsl_device *device, + unsigned int offsetwords, + unsigned int value) +{ + if (!in_interrupt()) + kgsl_pre_hwaccess(device); + + if ((offsetwords >= MH_ARBITER_CONFIG && + offsetwords <= MH_CLNT_INTF_CTRL_CONFIG2) || + (offsetwords >= MH_MMU_CONFIG && + offsetwords <= MH_MMU_MPU_END)) { + _z180_regwrite_mmu(device, offsetwords, value); + } else { + _z180_regwrite_simple(device, offsetwords, value); + } +} + +static void z180_cmdwindow_write(struct kgsl_device *device, + unsigned int addr, unsigned int data) +{ + unsigned int cmdwinaddr; + + cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) & + Z180_CMDWINDOW_TARGET_MASK); + cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) & + Z180_CMDWINDOW_ADDR_MASK); + + z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr); + z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data); +} + +static unsigned int z180_readtimestamp(struct kgsl_device *device, + enum kgsl_timestamp_type type) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + /* get current EOP timestamp */ + return z180_dev->timestamp; +} + +static int z180_waittimestamp(struct kgsl_device *device, + unsigned int timestamp, + unsigned int msecs) +{ + int status = -EINVAL; + + /* Don't wait forever, set a max (10 sec) value for now */ + if (msecs == -1) + msecs = 10 * MSEC_PER_SEC; + + mutex_unlock(&device->mutex); + status = z180_wait(device, timestamp, msecs); + mutex_lock(&device->mutex); + + return status; +} + +static int z180_wait(struct kgsl_device *device, + unsigned int timestamp, + unsigned int msecs) +{ + int status = -EINVAL; + long timeout = 0; + + timeout = wait_io_event_interruptible_timeout( + device->wait_queue, + kgsl_check_timestamp(device, timestamp), + msecs_to_jiffies(msecs)); + + if (timeout > 0) + status = 0; + else if (timeout == 0) { + status = -ETIMEDOUT; + kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG); + } else + status = timeout; + + return status; +} + +static void +z180_drawctxt_destroy(struct kgsl_device *device, + struct kgsl_context *context) +{ + struct z180_device *z180_dev = Z180_DEVICE(device); + + z180_idle(device, KGSL_TIMEOUT_DEFAULT); + + if (z180_dev->ringbuffer.prevctx == context->id) { + z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT; + device->mmu.hwpagetable = device->mmu.defaultpagetable; + kgsl_setstate(device, 0, + KGSL_MMUFLAGS_PTUPDATE); + } +} + +static void z180_power_stats(struct kgsl_device *device, + struct kgsl_power_stats *stats) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + s64 tmp = ktime_to_us(ktime_get()); + + if (pwr->time == 0) { + pwr->time = tmp; + stats->total_time = 0; + stats->busy_time = 0; + } else { + stats->total_time = tmp - pwr->time; + pwr->time = tmp; + stats->busy_time = tmp - device->on_time; + device->on_time = tmp; + } +} + +static void z180_irqctrl(struct kgsl_device *device, int state) +{ + /* Control interrupts for Z180 and the Z180 MMU */ + + if (state) { + z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3); + z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK); + } else { + z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0); + z180_regwrite(device, MH_INTERRUPT_MASK, 0); + } +} + +static unsigned int z180_gpuid(struct kgsl_device *device) +{ + /* Standard KGSL gpuid format: + * top word is 0x0002 for 2D or 0x0003 for 3D + * Bottom word is core specific identifer + */ + + return (0x0002 << 16) | 180; +} + +static const struct kgsl_functable z180_functable = { + /* Mandatory functions */ + .regread = z180_regread, + .regwrite = z180_regwrite, + .idle = z180_idle, + .isidle = z180_isidle, + .suspend_context = z180_suspend_context, + .start = z180_start, + .stop = z180_stop, + .getproperty = z180_getproperty, + .waittimestamp = z180_waittimestamp, + .readtimestamp = z180_readtimestamp, + .issueibcmds = z180_cmdstream_issueibcmds, + .setup_pt = z180_setup_pt, + .cleanup_pt = z180_cleanup_pt, + .power_stats = z180_power_stats, + .irqctrl = z180_irqctrl, + .gpuid = z180_gpuid, + /* Optional functions */ + .drawctxt_create = NULL, + .drawctxt_destroy = z180_drawctxt_destroy, + .ioctl = NULL, +}; + +static struct platform_device_id z180_id_table[] = { + { DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, }, + { DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, }, + { }, +}; +MODULE_DEVICE_TABLE(platform, z180_id_table); + +static struct platform_driver z180_platform_driver = { + .probe = z180_probe, + .remove = __devexit_p(z180_remove), + .suspend = kgsl_suspend_driver, + .resume = kgsl_resume_driver, + .id_table = z180_id_table, + .driver = { + .owner = THIS_MODULE, + .name = DEVICE_2D_NAME, + .pm = &kgsl_pm_ops, + } +}; + +static int __init kgsl_2d_init(void) +{ + return platform_driver_register(&z180_platform_driver); +} + +static void __exit kgsl_2d_exit(void) +{ + platform_driver_unregister(&z180_platform_driver); +} + +module_init(kgsl_2d_init); +module_exit(kgsl_2d_exit); + +MODULE_DESCRIPTION("2D Graphics driver"); +MODULE_VERSION("1.2"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:kgsl_2d"); diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h new file mode 100644 index 0000000000000..b7ff8140eb0b8 --- /dev/null +++ b/drivers/gpu/msm/z180.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2008-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __Z180_H +#define __Z180_H + +#include "kgsl_device.h" + +#define DEVICE_2D_NAME "kgsl-2d" +#define DEVICE_2D0_NAME "kgsl-2d0" +#define DEVICE_2D1_NAME "kgsl-2d1" + +#define Z180_DEFAULT_PWRSCALE_POLICY NULL + +struct z180_ringbuffer { + unsigned int prevctx; + struct kgsl_memdesc cmdbufdesc; +}; + +struct z180_device { + struct kgsl_device dev; /* Must be first field in this struct */ + int current_timestamp; + int timestamp; + struct z180_ringbuffer ringbuffer; + spinlock_t cmdwin_lock; +}; + +#endif /* __Z180_H */ diff --git a/drivers/gpu/msm/z180_reg.h b/drivers/gpu/msm/z180_reg.h new file mode 100644 index 0000000000000..41a17ce610826 --- /dev/null +++ b/drivers/gpu/msm/z180_reg.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2002,2007-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __Z80_REG_H +#define __Z80_REG_H + +#define REG_VGC_IRQSTATUS__MH_MASK 0x00000001L +#define REG_VGC_IRQSTATUS__G2D_MASK 0x00000002L +#define REG_VGC_IRQSTATUS__FIFO_MASK 0x00000004L + +#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006 +#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007 +#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008 +#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009 +#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a +#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d +#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e +#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f +#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010 +#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016 +#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017 +#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018 +#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019 +#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a + +#define ADDR_VGC_MH_READ_ADDR 0x0510 +#define ADDR_VGC_MH_DATA_ADDR 0x0518 +#define ADDR_VGC_COMMANDSTREAM 0x0000 +#define ADDR_VGC_IRQENABLE 0x0438 +#define ADDR_VGC_IRQSTATUS 0x0418 +#define ADDR_VGC_IRQ_ACTIVE_CNT 0x04E0 +#define ADDR_VGC_MMUCOMMANDSTREAM 0x03FC +#define ADDR_VGV3_CONTROL 0x0070 +#define ADDR_VGV3_LAST 0x007F +#define ADDR_VGV3_MODE 0x0071 +#define ADDR_VGV3_NEXTADDR 0x0075 +#define ADDR_VGV3_NEXTCMD 0x0076 +#define ADDR_VGV3_WRITEADDR 0x0072 + +#endif /* __Z180_REG_H */ diff --git a/drivers/gpu/msm/z180_trace.c b/drivers/gpu/msm/z180_trace.c new file mode 100644 index 0000000000000..9d971ee885c32 --- /dev/null +++ b/drivers/gpu/msm/z180_trace.c @@ -0,0 +1,20 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "kgsl.h" +#include "z180.h" +#include "z180_reg.h" + +/* Instantiate tracepoints */ +#define CREATE_TRACE_POINTS +#include "z180_trace.h" diff --git a/drivers/gpu/msm/z180_trace.h b/drivers/gpu/msm/z180_trace.h new file mode 100644 index 0000000000000..4f65b9b2ad788 --- /dev/null +++ b/drivers/gpu/msm/z180_trace.h @@ -0,0 +1,60 @@ +/* Copyright (c) 2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#if !defined(_Z180_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _Z180_TRACE_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kgsl +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE z180_trace + +#include + +struct kgsl_device; + +/* + * Tracepoint for z180 irq. Includes status info + */ +TRACE_EVENT(kgsl_z180_irq_status, + + TP_PROTO(struct kgsl_device *device, unsigned int status), + + TP_ARGS(device, status), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned int, status) + ), + + TP_fast_assign( + __assign_str(device_name, device->name); + __entry->status = status; + ), + + TP_printk( + "d_name=%s status=%s", + __get_str(device_name), + __entry->status ? __print_flags(__entry->status, "|", + { REG_VGC_IRQSTATUS__MH_MASK, "MH" }, + { REG_VGC_IRQSTATUS__G2D_MASK, "G2D" }, + { REG_VGC_IRQSTATUS__FIFO_MASK, "FIFO" }) : "None" + ) +); + +#endif /* _Z180_TRACE_H */ + +/* This part must be outside protection */ +#include diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 2560f01c1a632..6ed5e4fc200c3 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -50,6 +50,27 @@ config HIDRAW If unsure, say Y. +config UHID + tristate "User-space I/O driver support for HID subsystem" + depends on HID + default n + ---help--- + Say Y here if you want to provide HID I/O Drivers from user-space. + This allows to write I/O drivers in user-space and feed the data from + the device into the kernel. The kernel parses the HID reports, loads the + corresponding HID Device Driver or provides input devices on top of your + user-space device. + + This driver cannot be used to parse HID-reports in user-space and write + special HID-drivers. You should use hidraw for that. + Instead, this driver allows to write the transport-layer driver in + user-space like USB-HID and Bluetooth-HID do in kernel-space. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called uhid. + source "drivers/hid/usbhid/Kconfig" menu "Special HID drivers" @@ -68,9 +89,15 @@ config HID_A4TECH ---help--- Support for A4 tech X5 and WOP-35 / Trust 450L mice. -config HID_ACRUX_FF - tristate "ACRUX force feedback" +config HID_ACRUX + tristate "ACRUX game controller support" depends on USB_HID + ---help--- + Say Y here if you want to enable support for ACRUX game controllers. + +config HID_ACRUX_FF + tristate "ACRUX force feedback support" + depends on HID_ACRUX select INPUT_FF_MEMLESS ---help--- Say Y here if you want to enable force feedback support for ACRUX @@ -319,10 +346,10 @@ config HID_NTRIG Support for N-Trig touch screen. config HID_ORTEK - tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad" + tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" depends on USB_HID ---help--- - Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. + Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. config HID_PANTHERLORD tristate "Pantherlord/GreenAsia game controller" diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 6efc2a0370ad1..ec4e38d69587f 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -8,6 +8,7 @@ ifdef CONFIG_DEBUG_FS endif obj-$(CONFIG_HID) += hid.o +obj-$(CONFIG_UHID) += uhid.o hid-$(CONFIG_HIDRAW) += hidraw.o @@ -27,7 +28,7 @@ endif obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o -obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o +obj-$(CONFIG_HID_ACRUX) += hid-axff.o obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_CANDO) += hid-cando.o diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 61aa712333927..b85744fe84647 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -481,6 +481,12 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index e5b961d6ff220..b4554288de00b 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -33,6 +33,8 @@ #include #include "hid-ids.h" + +#ifdef CONFIG_HID_ACRUX_FF #include "usbhid/usbhid.h" struct axff_device { @@ -109,6 +111,12 @@ static int axff_init(struct hid_device *hid) kfree(axff); return error; } +#else +static inline int axff_init(struct hid_device *hid) +{ + return 0; +} +#endif static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -139,9 +147,25 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) error); } + /* + * We need to start polling device right away, otherwise + * it will go into a coma. + */ + error = hid_hw_open(hdev); + if (error) { + dev_err(&hdev->dev, "hw open failed\n"); + return error; + } + return 0; } +static void ax_remove(struct hid_device *hdev) +{ + hid_hw_close(hdev); + hid_hw_stop(hdev); +} + static const struct hid_device_id ax_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), }, { } @@ -149,9 +173,10 @@ static const struct hid_device_id ax_devices[] = { MODULE_DEVICE_TABLE(hid, ax_devices); static struct hid_driver ax_driver = { - .name = "acrux", - .id_table = ax_devices, - .probe = ax_probe, + .name = "acrux", + .id_table = ax_devices, + .probe = ax_probe, + .remove = ax_remove, }; static int __init ax_init(void) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index d678cf3d33d5e..9477b2a46f494 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1256,9 +1256,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, -#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, -#endif { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, @@ -1302,6 +1300,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, @@ -1400,6 +1401,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, @@ -1801,6 +1803,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 92a0d61a7379c..090bf488196e1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -103,6 +103,9 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b @@ -466,6 +469,7 @@ #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 #define USB_VENDOR_ID_ORTEK 0x05a4 +#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 #define USB_VENDOR_ID_PANJIT 0x134c diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 7f552bfad32c0..ebcc02a1c1c94 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel goto ignore; } - if (field->report_type == HID_FEATURE_REPORT) { - if (device->driver->feature_mapping) { - device->driver->feature_mapping(device, hidinput, field, - usage); - } - goto ignore; - } - if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); @@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev) hid_hw_close(hid); } +static void report_features(struct hid_device *hid) +{ + struct hid_driver *drv = hid->driver; + struct hid_report_enum *rep_enum; + struct hid_report *rep; + int i, j; + + if (!drv->feature_mapping) + return; + + rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; + list_for_each_entry(rep, &rep_enum->report_list, list) + for (i = 0; i < rep->maxfield; i++) + for (j = 0; j < rep->field[i]->maxusage; j++) + drv->feature_mapping(hid, rep->field[i], + rep->field[i]->usage + j); +} + /* * Register the input device; print a message. * Configure the input layer interface @@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) return -1; } - for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) { + report_features(hid); + + for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { if (k == HID_OUTPUT_REPORT && hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) continue; diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 698e6459fd0b3..a3972bbfa3664 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -258,7 +258,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda input_report_abs(input, ABS_MT_TRACKING_ID, id); input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); - input_report_abs(input, ABS_MT_ORIENTATION, orientation); + input_report_abs(input, ABS_MT_ORIENTATION, -orientation); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); @@ -397,7 +397,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); - input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); + input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); /* Note: Touch Y position from the device is inverted relative * to how pointer motion is reported (and relative to how USB @@ -418,6 +418,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h input_set_abs_params(input, ABS_MT_POSITION_Y, -2456, 2565, 4, 0); } + + input_set_events_per_packet(input, 60); } if (report_undeciphered) { @@ -499,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev, } report->size = 6; + /* + * The device reponds with 'invalid report id' when feature + * report switching it into multitouch mode is sent to it. + * + * This results in -EIO from the _raw low-level transport callback, + * but there seems to be no other way of switching the mode. + * Thus the super-ugly hacky success check below. + */ ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), HID_FEATURE_REPORT); - if (ret != sizeof(feature)) { + if (ret != -EIO) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 07d3183fdde50..2bbc9545f5ccd 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -122,7 +122,7 @@ struct mt_class mt_classes[] = { { } }; -static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi, +static void mt_feature_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { if (usage->hid == HID_DG_INPUTMODE) { diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index e90edfc63051b..ad6faa6c0ceac 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c @@ -1,5 +1,5 @@ /* - * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). + * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). * Fixes LogicalMaximum error in USB report description, see * http://bugzilla.kernel.org/show_bug.cgi?id=14787 * @@ -30,6 +30,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, } static const struct hid_device_id ortek_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { } }; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 468e87b53ed26..8f06044a3e3e8 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -102,15 +102,14 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, } /* the first byte is expected to be a report number */ -static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) +/* This function is to be called with the minors_lock mutex held */ +static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct hid_device *dev; __u8 *buf; int ret = 0; - mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { ret = -ENODEV; goto out; @@ -148,14 +147,92 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t goto out_free; } - ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); + ret = dev->hid_output_raw_report(dev, buf, count, report_type); out_free: kfree(buf); out: + return ret; +} + +/* the first byte is expected to be a report number */ +static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) +{ + ssize_t ret; + mutex_lock(&minors_lock); + ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); mutex_unlock(&minors_lock); return ret; } + +/* This function performs a Get_Report transfer over the control endpoint + per section 7.2.1 of the HID specification, version 1.1. The first byte + of buffer is the report number to request, or 0x0 if the defice does not + use numbered reports. The report_type parameter can be HID_FEATURE_REPORT + or HID_INPUT_REPORT. This function is to be called with the minors_lock + mutex held. */ +static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + struct hid_device *dev; + __u8 *buf; + int ret = 0, len; + unsigned char report_number; + + dev = hidraw_table[minor]->hid; + + if (!dev->hid_get_raw_report) { + ret = -ENODEV; + goto out; + } + + if (count > HID_MAX_BUFFER_SIZE) { + printk(KERN_WARNING "hidraw: pid %d passed too large report\n", + task_pid_nr(current)); + ret = -EINVAL; + goto out; + } + + if (count < 2) { + printk(KERN_WARNING "hidraw: pid %d passed too short report\n", + task_pid_nr(current)); + ret = -EINVAL; + goto out; + } + + buf = kmalloc(count * sizeof(__u8), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out; + } + + /* Read the first byte from the user. This is the report number, + which is passed to dev->hid_get_raw_report(). */ + if (copy_from_user(&report_number, buffer, 1)) { + ret = -EFAULT; + goto out_free; + } + + ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type); + + if (ret < 0) + goto out_free; + + len = (ret < count) ? ret : count; + + if (copy_to_user(buffer, buf, len)) { + ret = -EFAULT; + goto out_free; + } + + ret = len; + +out_free: + kfree(buf); +out: + return ret; +} + static unsigned int hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; @@ -295,7 +372,24 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, default: { struct hid_device *hid = dev->hid; - if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) { + if (_IOC_TYPE(cmd) != 'H') { + ret = -EINVAL; + break; + } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { + int len = _IOC_SIZE(cmd); + ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); + break; + } + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { + int len = _IOC_SIZE(cmd); + ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); + break; + } + + /* Begin Read-only ioctls. */ + if (_IOC_DIR(cmd) != _IOC_READ) { ret = -EINVAL; break; } @@ -327,7 +421,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } - } + } ret = -ENOTTY; } diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c new file mode 100644 index 0000000000000..714cd8cc9579b --- /dev/null +++ b/drivers/hid/uhid.c @@ -0,0 +1,572 @@ +/* + * User-space I/O driver support for HID subsystem + * Copyright (c) 2012 David Herrmann + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UHID_NAME "uhid" +#define UHID_BUFSIZE 32 + +struct uhid_device { + struct mutex devlock; + bool running; + + __u8 *rd_data; + uint rd_size; + + struct hid_device *hid; + struct uhid_event input_buf; + + wait_queue_head_t waitq; + spinlock_t qlock; + __u8 head; + __u8 tail; + struct uhid_event *outq[UHID_BUFSIZE]; + + struct mutex report_lock; + wait_queue_head_t report_wait; + atomic_t report_done; + atomic_t report_id; + struct uhid_event report_buf; +}; + +static struct miscdevice uhid_misc; + +static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) +{ + __u8 newhead; + + newhead = (uhid->head + 1) % UHID_BUFSIZE; + + if (newhead != uhid->tail) { + uhid->outq[uhid->head] = ev; + uhid->head = newhead; + wake_up_interruptible(&uhid->waitq); + } else { + hid_warn(uhid->hid, "Output queue is full\n"); + kfree(ev); + } +} + +static int uhid_queue_event(struct uhid_device *uhid, __u32 event) +{ + unsigned long flags; + struct uhid_event *ev; + + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + return -ENOMEM; + + ev->type = event; + + spin_lock_irqsave(&uhid->qlock, flags); + uhid_queue(uhid, ev); + spin_unlock_irqrestore(&uhid->qlock, flags); + + return 0; +} + +static int uhid_hid_start(struct hid_device *hid) +{ + struct uhid_device *uhid = hid->driver_data; + + return uhid_queue_event(uhid, UHID_START); +} + +static void uhid_hid_stop(struct hid_device *hid) +{ + struct uhid_device *uhid = hid->driver_data; + + hid->claimed = 0; + uhid_queue_event(uhid, UHID_STOP); +} + +static int uhid_hid_open(struct hid_device *hid) +{ + struct uhid_device *uhid = hid->driver_data; + + return uhid_queue_event(uhid, UHID_OPEN); +} + +static void uhid_hid_close(struct hid_device *hid) +{ + struct uhid_device *uhid = hid->driver_data; + + uhid_queue_event(uhid, UHID_CLOSE); +} + +static int uhid_hid_input(struct input_dev *input, unsigned int type, + unsigned int code, int value) +{ + struct hid_device *hid = input_get_drvdata(input); + struct uhid_device *uhid = hid->driver_data; + unsigned long flags; + struct uhid_event *ev; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) + return -ENOMEM; + + ev->type = UHID_OUTPUT_EV; + ev->u.output_ev.type = type; + ev->u.output_ev.code = code; + ev->u.output_ev.value = value; + + spin_lock_irqsave(&uhid->qlock, flags); + uhid_queue(uhid, ev); + spin_unlock_irqrestore(&uhid->qlock, flags); + + return 0; +} + +static int uhid_hid_parse(struct hid_device *hid) +{ + struct uhid_device *uhid = hid->driver_data; + + return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); +} + +static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, + __u8 *buf, size_t count, unsigned char rtype) +{ + struct uhid_device *uhid = hid->driver_data; + __u8 report_type; + struct uhid_event *ev; + unsigned long flags; + int ret; + size_t uninitialized_var(len); + struct uhid_feature_answer_req *req; + + if (!uhid->running) + return -EIO; + + switch (rtype) { + case HID_FEATURE_REPORT: + report_type = UHID_FEATURE_REPORT; + break; + case HID_OUTPUT_REPORT: + report_type = UHID_OUTPUT_REPORT; + break; + case HID_INPUT_REPORT: + report_type = UHID_INPUT_REPORT; + break; + default: + return -EINVAL; + } + + ret = mutex_lock_interruptible(&uhid->report_lock); + if (ret) + return ret; + + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) { + ret = -ENOMEM; + goto unlock; + } + + spin_lock_irqsave(&uhid->qlock, flags); + ev->type = UHID_FEATURE; + ev->u.feature.id = atomic_inc_return(&uhid->report_id); + ev->u.feature.rnum = rnum; + ev->u.feature.rtype = report_type; + + atomic_set(&uhid->report_done, 0); + uhid_queue(uhid, ev); + spin_unlock_irqrestore(&uhid->qlock, flags); + + ret = wait_event_interruptible_timeout(uhid->report_wait, + atomic_read(&uhid->report_done), 5 * HZ); + + /* + * Make sure "uhid->running" is cleared on shutdown before + * "uhid->report_done" is set. + */ + smp_rmb(); + if (!ret || !uhid->running) { + ret = -EIO; + } else if (ret < 0) { + ret = -ERESTARTSYS; + } else { + spin_lock_irqsave(&uhid->qlock, flags); + req = &uhid->report_buf.u.feature_answer; + + if (req->err) { + ret = -EIO; + } else { + ret = 0; + len = min(count, + min_t(size_t, req->size, UHID_DATA_MAX)); + memcpy(buf, req->data, len); + } + + spin_unlock_irqrestore(&uhid->qlock, flags); + } + + atomic_set(&uhid->report_done, 1); + +unlock: + mutex_unlock(&uhid->report_lock); + return ret ? ret : len; +} + +static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, + unsigned char report_type) +{ + struct uhid_device *uhid = hid->driver_data; + __u8 rtype; + unsigned long flags; + struct uhid_event *ev; + + switch (report_type) { + case HID_FEATURE_REPORT: + rtype = UHID_FEATURE_REPORT; + break; + case HID_OUTPUT_REPORT: + rtype = UHID_OUTPUT_REPORT; + break; + default: + return -EINVAL; + } + + if (count < 1 || count > UHID_DATA_MAX) + return -EINVAL; + + ev = kzalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + return -ENOMEM; + + ev->type = UHID_OUTPUT; + ev->u.output.size = count; + ev->u.output.rtype = rtype; + memcpy(ev->u.output.data, buf, count); + + spin_lock_irqsave(&uhid->qlock, flags); + uhid_queue(uhid, ev); + spin_unlock_irqrestore(&uhid->qlock, flags); + + return count; +} + +static struct hid_ll_driver uhid_hid_driver = { + .start = uhid_hid_start, + .stop = uhid_hid_stop, + .open = uhid_hid_open, + .close = uhid_hid_close, + .hidinput_input_event = uhid_hid_input, + .parse = uhid_hid_parse, +}; + +static int uhid_dev_create(struct uhid_device *uhid, + const struct uhid_event *ev) +{ + struct hid_device *hid; + int ret; + + if (uhid->running) + return -EALREADY; + + uhid->rd_size = ev->u.create.rd_size; + if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE) + return -EINVAL; + + uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL); + if (!uhid->rd_data) + return -ENOMEM; + + if (copy_from_user(uhid->rd_data, ev->u.create.rd_data, + uhid->rd_size)) { + ret = -EFAULT; + goto err_free; + } + + hid = hid_allocate_device(); + if (IS_ERR(hid)) { + ret = PTR_ERR(hid); + goto err_free; + } + + strncpy(hid->name, ev->u.create.name, 127); + hid->name[127] = 0; + strncpy(hid->phys, ev->u.create.phys, 63); + hid->phys[63] = 0; + strncpy(hid->uniq, ev->u.create.uniq, 63); + hid->uniq[63] = 0; + + hid->ll_driver = &uhid_hid_driver; + hid->hid_get_raw_report = uhid_hid_get_raw; + hid->hid_output_raw_report = uhid_hid_output_raw; + hid->bus = ev->u.create.bus; + hid->vendor = ev->u.create.vendor; + hid->product = ev->u.create.product; + hid->version = ev->u.create.version; + hid->country = ev->u.create.country; + hid->driver_data = uhid; + hid->dev.parent = uhid_misc.this_device; + + uhid->hid = hid; + uhid->running = true; + + ret = hid_add_device(hid); + if (ret) { + hid_err(hid, "Cannot register HID device\n"); + goto err_hid; + } + + return 0; + +err_hid: + hid_destroy_device(hid); + uhid->hid = NULL; + uhid->running = false; +err_free: + kfree(uhid->rd_data); + return ret; +} + +static int uhid_dev_destroy(struct uhid_device *uhid) +{ + if (!uhid->running) + return -EINVAL; + + /* clear "running" before setting "report_done" */ + uhid->running = false; + smp_wmb(); + atomic_set(&uhid->report_done, 1); + wake_up_interruptible(&uhid->report_wait); + + hid_destroy_device(uhid->hid); + kfree(uhid->rd_data); + + return 0; +} + +static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) +{ + if (!uhid->running) + return -EINVAL; + + hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, + min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); + + return 0; +} + +static int uhid_dev_feature_answer(struct uhid_device *uhid, + struct uhid_event *ev) +{ + unsigned long flags; + + if (!uhid->running) + return -EINVAL; + + spin_lock_irqsave(&uhid->qlock, flags); + + /* id for old report; drop it silently */ + if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id) + goto unlock; + if (atomic_read(&uhid->report_done)) + goto unlock; + + memcpy(&uhid->report_buf, ev, sizeof(*ev)); + atomic_set(&uhid->report_done, 1); + wake_up_interruptible(&uhid->report_wait); + +unlock: + spin_unlock_irqrestore(&uhid->qlock, flags); + return 0; +} + +static int uhid_char_open(struct inode *inode, struct file *file) +{ + struct uhid_device *uhid; + + uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); + if (!uhid) + return -ENOMEM; + + mutex_init(&uhid->devlock); + mutex_init(&uhid->report_lock); + spin_lock_init(&uhid->qlock); + init_waitqueue_head(&uhid->waitq); + init_waitqueue_head(&uhid->report_wait); + uhid->running = false; + atomic_set(&uhid->report_done, 1); + + file->private_data = uhid; + nonseekable_open(inode, file); + + return 0; +} + +static int uhid_char_release(struct inode *inode, struct file *file) +{ + struct uhid_device *uhid = file->private_data; + unsigned int i; + + uhid_dev_destroy(uhid); + + for (i = 0; i < UHID_BUFSIZE; ++i) + kfree(uhid->outq[i]); + + kfree(uhid); + + return 0; +} + +static ssize_t uhid_char_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct uhid_device *uhid = file->private_data; + int ret; + unsigned long flags; + size_t len; + + /* they need at least the "type" member of uhid_event */ + if (count < sizeof(__u32)) + return -EINVAL; + +try_again: + if (file->f_flags & O_NONBLOCK) { + if (uhid->head == uhid->tail) + return -EAGAIN; + } else { + ret = wait_event_interruptible(uhid->waitq, + uhid->head != uhid->tail); + if (ret) + return ret; + } + + ret = mutex_lock_interruptible(&uhid->devlock); + if (ret) + return ret; + + if (uhid->head == uhid->tail) { + mutex_unlock(&uhid->devlock); + goto try_again; + } else { + len = min(count, sizeof(**uhid->outq)); + if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { + ret = -EFAULT; + } else { + kfree(uhid->outq[uhid->tail]); + uhid->outq[uhid->tail] = NULL; + + spin_lock_irqsave(&uhid->qlock, flags); + uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; + spin_unlock_irqrestore(&uhid->qlock, flags); + } + } + + mutex_unlock(&uhid->devlock); + return ret ? ret : len; +} + +static ssize_t uhid_char_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct uhid_device *uhid = file->private_data; + int ret; + size_t len; + + /* we need at least the "type" member of uhid_event */ + if (count < sizeof(__u32)) + return -EINVAL; + + ret = mutex_lock_interruptible(&uhid->devlock); + if (ret) + return ret; + + memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); + len = min(count, sizeof(uhid->input_buf)); + if (copy_from_user(&uhid->input_buf, buffer, len)) { + ret = -EFAULT; + goto unlock; + } + + switch (uhid->input_buf.type) { + case UHID_CREATE: + ret = uhid_dev_create(uhid, &uhid->input_buf); + break; + case UHID_DESTROY: + ret = uhid_dev_destroy(uhid); + break; + case UHID_INPUT: + ret = uhid_dev_input(uhid, &uhid->input_buf); + break; + case UHID_FEATURE_ANSWER: + ret = uhid_dev_feature_answer(uhid, &uhid->input_buf); + break; + default: + ret = -EOPNOTSUPP; + } + +unlock: + mutex_unlock(&uhid->devlock); + + /* return "count" not "len" to not confuse the caller */ + return ret ? ret : count; +} + +static unsigned int uhid_char_poll(struct file *file, poll_table *wait) +{ + struct uhid_device *uhid = file->private_data; + + poll_wait(file, &uhid->waitq, wait); + + if (uhid->head != uhid->tail) + return POLLIN | POLLRDNORM; + + return 0; +} + +static const struct file_operations uhid_fops = { + .owner = THIS_MODULE, + .open = uhid_char_open, + .release = uhid_char_release, + .read = uhid_char_read, + .write = uhid_char_write, + .poll = uhid_char_poll, + .llseek = no_llseek, +}; + +static struct miscdevice uhid_misc = { + .fops = &uhid_fops, + .minor = MISC_DYNAMIC_MINOR, + .name = UHID_NAME, +}; + +static int __init uhid_init(void) +{ + return misc_register(&uhid_misc); +} + +static void __exit uhid_exit(void) +{ + misc_deregister(&uhid_misc); +} + +module_init(uhid_init); +module_exit(uhid_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Herrmann "); +MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b336dd84036f4..38c261a40c744 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -799,6 +799,40 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) return 0; } +static int usbhid_get_raw_report(struct hid_device *hid, + unsigned char report_number, __u8 *buf, size_t count, + unsigned char report_type) +{ + struct usbhid_device *usbhid = hid->driver_data; + struct usb_device *dev = hid_to_usb_dev(hid); + struct usb_interface *intf = usbhid->intf; + struct usb_host_interface *interface = intf->cur_altsetting; + int skipped_report_id = 0; + int ret; + + /* Byte 0 is the report number. Report data starts at byte 1.*/ + buf[0] = report_number; + if (report_number == 0x0) { + /* Offset the return buffer by 1, so that the report ID + will remain in byte 0. */ + buf++; + count--; + skipped_report_id = 1; + } + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + HID_REQ_GET_REPORT, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + ((report_type + 1) << 8) | report_number, + interface->desc.bInterfaceNumber, buf, count, + USB_CTRL_SET_TIMEOUT); + + /* count also the report id */ + if (ret > 0 && skipped_report_id) + ret++; + + return ret; +} + static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { @@ -1139,6 +1173,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; + hid->hid_get_raw_report = usbhid_get_raw_report; hid->hid_output_raw_report = usbhid_output_raw_report; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index bc6e2ab3a361d..82e3ef8f0996b 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, aem_send_message(ipmi); res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); - if (!res) - return -ETIMEDOUT; + if (!res) { + res = -ETIMEDOUT; + goto out; + } if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { - kfree(rs_resp); - return -ENOENT; + res = -ENOENT; + goto out; } switch (size) { @@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, break; } } + res = 0; - return 0; +out: + kfree(rs_resp); + return res; } /* Update AEM energy registers */ diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index a610e7880fb3e..38a41d27f3da1 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) const int c1 = -4; const int c2 = 40500; /* x 10 ^ -6 */ - const int c3 = -2800; /* x10 ^ -9 */ + const int c3 = -28; /* x 10 ^ -7 */ RHlinear = c1*1000 + c2 * data->val_humid/1000 - + (data->val_humid * data->val_humid * c3)/1000000; + + (data->val_humid * data->val_humid * c3) / 10000; return (temp - 25000) * (10000 + 80 * data->val_humid) / 1000000 + RHlinear; } diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 38319a69bd0a9..d6d58684712bc 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) * Sanity check for the adapter hardware - check the reaction of * the bus lines only if it seems to be idle. */ -static int test_bus(struct i2c_algo_bit_data *adap, char *name) +static int test_bus(struct i2c_adapter *i2c_adap) { - int scl, sda; + struct i2c_algo_bit_data *adap = i2c_adap->algo_data; + const char *name = i2c_adap->name; + int scl, sda, ret; + + if (adap->pre_xfer) { + ret = adap->pre_xfer(i2c_adap); + if (ret < 0) + return -ENODEV; + } if (adap->getscl == NULL) pr_info("%s: Testing SDA only, SCL is not readable\n", name); @@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name) "while pulling SCL high!\n", name); goto bailout; } + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + pr_info("%s: Test OK\n", name); return 0; bailout: sdahi(adap); sclhi(adap); + + if (adap->post_xfer) + adap->post_xfer(i2c_adap); + return -ENODEV; } @@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap, int ret; if (bit_test) { - ret = test_bus(bit_adap, adap->name); + ret = test_bus(adap); if (ret < 0) return -ENODEV; } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 113505a6434ed..9e883c57f55e1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -442,6 +442,22 @@ config I2C_MPC This driver can also be built as a module. If so, the module will be called i2c-mpc. +config I2C_MSM + tristate "MSM" + depends on I2C && ARCH_MSM + default y + help + If you say yes to this option, support will be included for the + built-in I2C interface on the MSM7X00A family processors. + +config I2C_QUP + tristate "I2C_QUP" + depends on I2C && ARCH_MSM7X30 + default y + help + If you say yes to this option, support will be included for the + built-in I2C interface on the MSM family processors. + config I2C_MV64XXX tristate "Marvell mv64xxx I2C Controller" depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 9d2d0ec7fb232..060bb9292521f 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -42,6 +42,8 @@ obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o obj-$(CONFIG_I2C_MPC) += i2c-mpc.o +obj-$(CONFIG_I2C_MSM) += i2c-msm.o +obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o diff --git a/drivers/i2c/busses/i2c-msm.c b/drivers/i2c/busses/i2c-msm.c new file mode 100644 index 0000000000000..1e2e493f44cc0 --- /dev/null +++ b/drivers/i2c/busses/i2c-msm.c @@ -0,0 +1,612 @@ +/* drivers/i2c/busses/i2c-msm.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 0 + +enum { + I2C_WRITE_DATA = 0x00, + I2C_CLK_CTL = 0x04, + I2C_STATUS = 0x08, + I2C_READ_DATA = 0x0c, + I2C_INTERFACE_SELECT = 0x10, + + I2C_WRITE_DATA_DATA_BYTE = 0xff, + I2C_WRITE_DATA_ADDR_BYTE = 1U << 8, + I2C_WRITE_DATA_LAST_BYTE = 1U << 9, + + I2C_CLK_CTL_FS_DIVIDER_VALUE = 0xff, + I2C_CLK_CTL_HS_DIVIDER_VALUE = 7U << 8, + + I2C_STATUS_WR_BUFFER_FULL = 1U << 0, + I2C_STATUS_RD_BUFFER_FULL = 1U << 1, + I2C_STATUS_BUS_ERROR = 1U << 2, + I2C_STATUS_PACKET_NACKED = 1U << 3, + I2C_STATUS_ARB_LOST = 1U << 4, + I2C_STATUS_INVALID_WRITE = 1U << 5, + I2C_STATUS_FAILED = 3U << 6, + I2C_STATUS_BUS_ACTIVE = 1U << 8, + I2C_STATUS_BUS_MASTER = 1U << 9, + I2C_STATUS_ERROR_MASK = 0xfc, + + I2C_INTERFACE_SELECT_INTF_SELECT = 1U << 0, + I2C_INTERFACE_SELECT_SCL = 1U << 8, + I2C_INTERFACE_SELECT_SDA = 1U << 9, +}; + +struct msm_i2c_dev { + struct device *dev; + void __iomem *base; /* virtual */ + int irq; + struct clk *clk; + struct i2c_adapter adapter; + + spinlock_t lock; + + struct i2c_msg *msg; + int rem; + int pos; + int cnt; + int ret; + bool need_flush; + int flush_cnt; + void *complete; + struct wake_lock wakelock; + bool is_suspended; +}; + +#if DEBUG +static void +dump_status(uint32_t status) +{ + printk("STATUS (0x%.8x): ", status); + if (status & I2C_STATUS_BUS_MASTER) + printk("MST "); + if (status & I2C_STATUS_BUS_ACTIVE) + printk("ACT "); + if (status & I2C_STATUS_INVALID_WRITE) + printk("INV_WR "); + if (status & I2C_STATUS_ARB_LOST) + printk("ARB_LST "); + if (status & I2C_STATUS_PACKET_NACKED) + printk("NAK "); + if (status & I2C_STATUS_BUS_ERROR) + printk("BUS_ERR "); + if (status & I2C_STATUS_RD_BUFFER_FULL) + printk("RD_FULL "); + if (status & I2C_STATUS_WR_BUFFER_FULL) + printk("WR_FULL "); + if (status & I2C_STATUS_FAILED) + printk("FAIL 0x%x", (status & I2C_STATUS_FAILED)); + printk("\n"); +} +#endif + +static void msm_i2c_write_delay(struct msm_i2c_dev *dev) +{ + /* If scl is still high we have >4us (for 100kbps) to write the data + * register before we risk hitting a bug where the controller releases + * scl to soon after driving sda low. Writing the data after the + * scheduled release time for scl also avoids the bug. + */ + if (readl(dev->base + I2C_INTERFACE_SELECT) & I2C_INTERFACE_SELECT_SCL) + return; + udelay(6); +} + +static bool msm_i2c_fill_write_buffer(struct msm_i2c_dev *dev) +{ + uint16_t val; + if (dev->pos < 0) { + val = I2C_WRITE_DATA_ADDR_BYTE | dev->msg->addr << 1; + if (dev->msg->flags & I2C_M_RD) + val |= 1; + if (dev->rem == 1 && dev->msg->len == 0) + val |= I2C_WRITE_DATA_LAST_BYTE; + msm_i2c_write_delay(dev); + writel(val, dev->base + I2C_WRITE_DATA); + dev->pos++; + return true; + } + + if (dev->msg->flags & I2C_M_RD) + return false; + + if (!dev->cnt) + return false; + + /* Ready to take a byte */ + val = dev->msg->buf[dev->pos]; + if (dev->cnt == 1 && dev->rem == 1) + val |= I2C_WRITE_DATA_LAST_BYTE; + + msm_i2c_write_delay(dev); + writel(val, dev->base + I2C_WRITE_DATA); + dev->pos++; + dev->cnt--; + return true; +} + +static void msm_i2c_read_buffer(struct msm_i2c_dev *dev) +{ + /* + * Theres something in the FIFO. + * Are we expecting data or flush crap? + */ + if ((dev->msg->flags & I2C_M_RD) && dev->pos >= 0 && dev->cnt) { + switch (dev->cnt) { + case 1: + if (dev->pos != 0) + break; + dev->need_flush = true; + /* fall-trough */ + case 2: + writel(I2C_WRITE_DATA_LAST_BYTE, + dev->base + I2C_WRITE_DATA); + } + dev->msg->buf[dev->pos] = readl(dev->base + I2C_READ_DATA); + dev->cnt--; + dev->pos++; + } else { /* FLUSH */ + if (dev->flush_cnt & 1) { + /* + * Stop requests are sometimes ignored, but writing + * more than one request generates a write error. + */ + writel(I2C_WRITE_DATA_LAST_BYTE, + dev->base + I2C_WRITE_DATA); + } + readl(dev->base + I2C_READ_DATA); + if (dev->need_flush) + dev->need_flush = false; + else + dev->flush_cnt++; + } +} + +static void msm_i2c_interrupt_locked(struct msm_i2c_dev *dev) +{ + uint32_t status = readl(dev->base + I2C_STATUS); + bool not_done = true; + +#if DEBUG + dump_status(status); +#endif + if (!dev->msg) { + dev_err(dev->dev, + "IRQ but nothing to do!, status %x\n", status); + return; + } + if (status & I2C_STATUS_ERROR_MASK) + goto out_err; + + if (!(status & I2C_STATUS_WR_BUFFER_FULL)) + not_done = msm_i2c_fill_write_buffer(dev); + if (status & I2C_STATUS_RD_BUFFER_FULL) + msm_i2c_read_buffer(dev); + + if (dev->pos >= 0 && dev->cnt == 0) { + if (dev->rem > 1) { + dev->rem--; + dev->msg++; + dev->pos = -1; + dev->cnt = dev->msg->len; + } else if (!not_done && !dev->need_flush) + goto out_complete; + } + return; + +out_err: + dev_err(dev->dev, "error, status %x (%02X)\n", status, dev->msg->addr); + dev->ret = -EIO; +out_complete: + complete(dev->complete); +} + +static irqreturn_t +msm_i2c_interrupt(int irq, void *devid) +{ + struct msm_i2c_dev *dev = devid; + + spin_lock(&dev->lock); + msm_i2c_interrupt_locked(dev); + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} + +static int +msm_i2c_poll_notbusy(struct msm_i2c_dev *dev, int warn) +{ + uint32_t retries = 0; + + while (retries != 200) { + uint32_t status = readl(dev->base + I2C_STATUS); + + if (!(status & I2C_STATUS_BUS_ACTIVE)) { + if (retries && warn) + dev_warn(dev->dev, + "Warning bus was busy (%d)\n", retries); + return 0; + } + if (retries++ > 100) + usleep_range(100, 200); + } + dev_err(dev->dev, "Error waiting for notbusy (%d)\n", warn); + return -ETIMEDOUT; +} + +static int +msm_i2c_recover_bus_busy(struct msm_i2c_dev *dev) +{ + int i; + uint32_t status = readl(dev->base + I2C_STATUS); + int gpio_clk, gpio_dat; + bool gpio_clk_status = false; + + if (!(status & (I2C_STATUS_BUS_ACTIVE | I2C_STATUS_WR_BUFFER_FULL))) + return 0; + + msm_set_i2c_mux(true, &gpio_clk, &gpio_dat); + + if (status & I2C_STATUS_RD_BUFFER_FULL) { + dev_warn(dev->dev, "Read buffer full, status %x, intf %x\n", + status, readl(dev->base + I2C_INTERFACE_SELECT)); + writel(I2C_WRITE_DATA_LAST_BYTE, dev->base + I2C_WRITE_DATA); + readl(dev->base + I2C_READ_DATA); + } + else if (status & I2C_STATUS_BUS_MASTER) { + dev_warn(dev->dev, "Still the bus master, status %x, intf %x\n", + status, readl(dev->base + I2C_INTERFACE_SELECT)); + writel(I2C_WRITE_DATA_LAST_BYTE | 0xff, + dev->base + I2C_WRITE_DATA); + } + + gpio_request(gpio_clk, "gpio_clk"); + gpio_request(gpio_dat, "gpio_dat"); + + dev_warn(dev->dev, "i2c_scl: %d, i2c_sda: %d\n", + gpio_get_value(gpio_clk), gpio_get_value(gpio_dat)); + + for (i = 0; i < 9; i++) { + if (gpio_get_value(gpio_dat) && gpio_clk_status) + break; + gpio_direction_output(gpio_clk, 0); + udelay(5); + gpio_direction_output(gpio_dat, 0); + udelay(5); + gpio_direction_input(gpio_clk); + udelay(5); + if (!gpio_get_value(gpio_clk)) + usleep_range(20, 30); + if (!gpio_get_value(gpio_clk)) + msleep(10); + gpio_clk_status = gpio_get_value(gpio_clk); + gpio_direction_input(gpio_dat); + udelay(5); + } + msm_set_i2c_mux(false, NULL, NULL); + udelay(10); + + status = readl(dev->base + I2C_STATUS); + if (!(status & I2C_STATUS_BUS_ACTIVE)) { + dev_info(dev->dev, "Bus busy cleared after %d clock cycles, " + "status %x, intf %x\n", + i, status, readl(dev->base + I2C_INTERFACE_SELECT)); + return 0; + } + + dev_warn(dev->dev, "Bus still busy, status %x, intf %x\n", + status, readl(dev->base + I2C_INTERFACE_SELECT)); + return -EBUSY; +} + + +static int +msm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + DECLARE_COMPLETION_ONSTACK(complete); + struct msm_i2c_dev *dev = i2c_get_adapdata(adap); + int ret; + long timeout; + unsigned long flags; + + if (WARN_ON(!num)) + return -EINVAL; + + /* + * If there is an i2c_xfer after driver has been suspended, + * grab wakelock to abort suspend. + */ + if (dev->is_suspended) + wake_lock(&dev->wakelock); + clk_enable(dev->clk); + enable_irq(dev->irq); + + ret = msm_i2c_poll_notbusy(dev, 1); + if (ret) { + dev_err(dev->dev, "Still busy in starting xfer(%02X)\n", msgs->addr); + ret = msm_i2c_recover_bus_busy(dev); + if (ret) + goto err; + } + + spin_lock_irqsave(&dev->lock, flags); + if (dev->flush_cnt) { + dev_warn(dev->dev, "%d unrequested bytes read\n", + dev->flush_cnt); + } + dev->msg = msgs; + dev->rem = num; + dev->pos = -1; + dev->ret = num; + dev->need_flush = false; + dev->flush_cnt = 0; + dev->cnt = msgs->len; + dev->complete = &complete; + + msm_i2c_interrupt_locked(dev); + spin_unlock_irqrestore(&dev->lock, flags); + + /* + * Now that we've setup the xfer, the ISR will transfer the data + * and wake us up with dev->err set if there was an error + */ + + timeout = wait_for_completion_timeout(&complete, HZ); + if (msm_i2c_poll_notbusy(dev, 0)) /* Read may not have stopped in time */ + dev_err(dev->dev, "Still busy after xfer completion (%02X)\n", + msgs->addr); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->flush_cnt) { + dev_warn(dev->dev, "%d unrequested bytes read\n", + dev->flush_cnt); + } + ret = dev->ret; + dev->complete = NULL; + dev->msg = NULL; + dev->rem = 0; + dev->pos = 0; + dev->ret = 0; + dev->flush_cnt = 0; + dev->cnt = 0; + spin_unlock_irqrestore(&dev->lock, flags); + + if (!timeout) { + dev_err(dev->dev, "Transaction timed out\n"); + ret = -ETIMEDOUT; + } + + if (ret < 0) { + dev_err(dev->dev, "Error during data xfer (%d) @%02X\n", ret, msgs->addr); + msm_i2c_recover_bus_busy(dev); + } +err: + disable_irq(dev->irq); + clk_disable(dev->clk); + if (dev->is_suspended) + wake_unlock(&dev->wakelock); + + return ret; +} + +static u32 +msm_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static const struct i2c_algorithm msm_i2c_algo = { + .master_xfer = msm_i2c_xfer, + .functionality = msm_i2c_func, +}; + +static int +msm_i2c_probe(struct platform_device *pdev) +{ + struct msm_i2c_dev *dev; + struct resource *mem, *irq, *ioarea; + int ret; + int fs_div; + int hs_div; + int i2c_clk; + int clk_ctl; + int target_clk; + struct clk *clk; + + printk(KERN_INFO "msm_i2c_probe\n"); + + /* NOTE: driver uses the static register mapping */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -ENODEV; + } + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "no irq resource?\n"); + return -ENODEV; + } + + ioarea = request_mem_region(mem->start, (mem->end - mem->start) + 1, + pdev->name); + if (!ioarea) { + dev_err(&pdev->dev, "I2C region already claimed\n"); + return -EBUSY; + } + clk = clk_get(&pdev->dev, "i2c_clk"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Could not get clock\n"); + ret = PTR_ERR(clk); + goto err_clk_get_failed; + } + + dev = kzalloc(sizeof(struct msm_i2c_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto err_alloc_dev_failed; + } + + dev->dev = &pdev->dev; + dev->irq = irq->start; + dev->clk = clk; + dev->base = ioremap(mem->start, (mem->end - mem->start) + 1); + if (!dev->base) { + ret = -ENOMEM; + goto err_ioremap_failed; + } + + spin_lock_init(&dev->lock); + wake_lock_init(&dev->wakelock, WAKE_LOCK_SUSPEND, "i2c"); + platform_set_drvdata(pdev, dev); + + msm_set_i2c_mux(false, NULL, NULL); + + clk_enable(clk); + + /* I2C_HS_CLK = I2C_CLK/(3*(HS_DIVIDER_VALUE+1) */ + /* I2C_FS_CLK = I2C_CLK/(2*(FS_DIVIDER_VALUE+3) */ + /* FS_DIVIDER_VALUE = ((I2C_CLK / I2C_FS_CLK) / 2) - 3 */ + i2c_clk = 19200000; /* input clock */ + target_clk = 100000; + /* target_clk = 200000; */ + fs_div = ((i2c_clk / target_clk) / 2) - 3; + hs_div = 3; + clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff); + writel(clk_ctl, dev->base + I2C_CLK_CTL); + printk(KERN_INFO "msm_i2c_probe: clk_ctl %x, %d Hz\n", + clk_ctl, i2c_clk / (2 * ((clk_ctl & 0xff) + 3))); + clk_disable(clk); + + i2c_set_adapdata(&dev->adapter, dev); + dev->adapter.algo = &msm_i2c_algo; + strncpy(dev->adapter.name, + "MSM I2C adapter", + sizeof(dev->adapter.name)); + + dev->adapter.nr = pdev->id; + ret = i2c_add_numbered_adapter(&dev->adapter); + if (ret) { + dev_err(&pdev->dev, "i2c_add_adapter failed\n"); + goto err_i2c_add_adapter_failed; + } + + ret = request_irq(dev->irq, msm_i2c_interrupt, + IRQF_DISABLED | IRQF_TRIGGER_RISING, pdev->name, dev); + if (ret) { + dev_err(&pdev->dev, "request_irq failed\n"); + goto err_request_irq_failed; + } + disable_irq(dev->irq); + return 0; + +/* free_irq(dev->irq, dev); */ +err_request_irq_failed: + i2c_del_adapter(&dev->adapter); +err_i2c_add_adapter_failed: + iounmap(dev->base); +err_ioremap_failed: + kfree(dev); +err_alloc_dev_failed: + clk_put(clk); +err_clk_get_failed: + release_mem_region(mem->start, (mem->end - mem->start) + 1); + return ret; +} + +static int +msm_i2c_remove(struct platform_device *pdev) +{ + struct msm_i2c_dev *dev = platform_get_drvdata(pdev); + struct resource *mem; + + platform_set_drvdata(pdev, NULL); + enable_irq(dev->irq); + free_irq(dev->irq, dev); + i2c_del_adapter(&dev->adapter); + wake_lock_destroy(&dev->wakelock); + clk_put(dev->clk); + iounmap(dev->base); + kfree(dev); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, (mem->end - mem->start) + 1); + return 0; +} + +static int msm_i2c_suspend_noirq(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct msm_i2c_dev *dev = platform_get_drvdata(pdev); + + /* Block to allow any i2c_xfers to finish */ + i2c_lock_adapter(&dev->adapter); + dev->is_suspended = true; + i2c_unlock_adapter(&dev->adapter); + return 0; +} + +static int msm_i2c_resume_noirq(struct device *device) { + struct platform_device *pdev = to_platform_device(device); + struct msm_i2c_dev *dev = platform_get_drvdata(pdev); + + /* Block to allow any i2c_xfers to finish */ + i2c_lock_adapter(&dev->adapter); + dev->is_suspended = false; + i2c_unlock_adapter(&dev->adapter); + return 0; +} + +static struct dev_pm_ops msm_i2c_pm_ops = { + .suspend_noirq = msm_i2c_suspend_noirq, + .resume_noirq = msm_i2c_resume_noirq, +}; + +static struct platform_driver msm_i2c_driver = { + .probe = msm_i2c_probe, + .remove = msm_i2c_remove, + .driver = { + .name = "msm_i2c", + .owner = THIS_MODULE, + .pm = &msm_i2c_pm_ops, + }, +}; + +/* I2C may be needed to bring up other drivers */ +static int __init +msm_i2c_init_driver(void) +{ + return platform_driver_register(&msm_i2c_driver); +} +subsys_initcall(msm_i2c_init_driver); + +static void __exit msm_i2c_exit_driver(void) +{ + platform_driver_unregister(&msm_i2c_driver); +} +module_exit(msm_i2c_exit_driver); + diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index 0eb1515541e72..2dbba163b1020 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport.c I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2010 Jean Delvare + Copyright (C) 2003-2011 Jean Delvare Based on older i2c-philips-par.c driver Copyright (C) 1995-2000 Simon G. Vogl @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include "i2c-parport.h" /* ----- Device list ------------------------------------------------------ */ @@ -43,10 +45,11 @@ struct i2c_par { struct i2c_algo_bit_data algo_data; struct i2c_smbus_alert_setup alert_data; struct i2c_client *ara; - struct i2c_par *next; + struct list_head node; }; -static struct i2c_par *adapter_list; +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(adapter_list_lock); /* ----- Low-level parallel port access ----------------------------------- */ @@ -228,8 +231,9 @@ static void i2c_parport_attach (struct parport *port) } /* Add the new adapter to the list */ - adapter->next = adapter_list; - adapter_list = adapter; + mutex_lock(&adapter_list_lock); + list_add_tail(&adapter->node, &adapter_list); + mutex_unlock(&adapter_list_lock); return; ERROR1: @@ -241,11 +245,11 @@ static void i2c_parport_attach (struct parport *port) static void i2c_parport_detach (struct parport *port) { - struct i2c_par *adapter, *prev; + struct i2c_par *adapter, *_n; /* Walk the list */ - for (prev = NULL, adapter = adapter_list; adapter; - prev = adapter, adapter = adapter->next) { + mutex_lock(&adapter_list_lock); + list_for_each_entry_safe(adapter, _n, &adapter_list, node) { if (adapter->pdev->port == port) { if (adapter->ara) { parport_disable_irq(port); @@ -259,14 +263,11 @@ static void i2c_parport_detach (struct parport *port) parport_release(adapter->pdev); parport_unregister_device(adapter->pdev); - if (prev) - prev->next = adapter->next; - else - adapter_list = adapter->next; + list_del(&adapter->node); kfree(adapter); - return; } } + mutex_unlock(&adapter_list_lock); } static struct parport_driver i2c_parport_driver = { diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c new file mode 100644 index 0000000000000..9983dd8956576 --- /dev/null +++ b/drivers/i2c/busses/i2c-qup.c @@ -0,0 +1,930 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +/* + * QUP driver for Qualcomm MSM platforms + * + */ + +/* #define DEBUG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION("0.2"); +MODULE_ALIAS("platform:i2c_qup"); + +/* QUP Registers */ +enum { + QUP_CONFIG = 0x0, + QUP_STATE = 0x4, + QUP_IO_MODE = 0x8, + QUP_SW_RESET = 0xC, + QUP_OPERATIONAL = 0x18, + QUP_ERROR_FLAGS = 0x1C, + QUP_ERROR_FLAGS_EN = 0x20, + QUP_MX_READ_CNT = 0x208, + QUP_MX_INPUT_CNT = 0x200, + QUP_OUT_DEBUG = 0x108, + QUP_OUT_FIFO_CNT = 0x10C, + QUP_OUT_FIFO_BASE = 0x110, + QUP_IN_READ_CUR = 0x20C, + QUP_IN_DEBUG = 0x210, + QUP_IN_FIFO_CNT = 0x214, + QUP_IN_FIFO_BASE = 0x218, + QUP_I2C_CLK_CTL = 0x400, + QUP_I2C_STATUS = 0x404, +}; + +/* QUP States and reset values */ +enum { + QUP_RESET_STATE = 0, + QUP_RUN_STATE = 1U, + QUP_STATE_MASK = 3U, + QUP_PAUSE_STATE = 3U, + QUP_STATE_VALID = 1U << 2, + QUP_I2C_MAST_GEN = 1U << 4, + QUP_OPERATIONAL_RESET = 0xFF0, + QUP_I2C_STATUS_RESET = 0xFFFFFC, +}; + +/* I2C mini core related values */ +enum { + I2C_MINI_CORE = 2U << 8, + I2C_N_VAL = 0xF, + +}; + +/* Packing Unpacking words in FIFOs */ +enum { + QUP_UNPACK_EN = 1U << 14, + QUP_PACK_EN = 1U << 15, +}; + +/* QUP tags */ +enum { + QUP_OUT_NOP = 0, + QUP_OUT_START = 1U << 8, + QUP_OUT_DATA = 2U << 8, + QUP_OUT_STOP = 3U << 8, + QUP_OUT_REC = 4U << 8, + QUP_IN_DATA = 5U << 8, + QUP_IN_STOP = 6U << 8, + QUP_IN_NACK = 7U << 8, +}; + +/* Status, Error flags */ +enum { + I2C_STATUS_WR_BUFFER_FULL = 1U << 0, + I2C_STATUS_ERROR_MASK = 0xfc, + QUP_IN_NOT_EMPTY = 1U << 5, + QUP_STATUS_ERROR_MASK = 0x7F, + QUP_STATUS_ERROR_FLAGS = 0x7C, +}; + +struct qup_i2c_dev { + struct device *dev; + void __iomem *base; /* virtual */ + void __iomem *gsbi; /* virtual */ + int in_irq; + int out_irq; + int err_irq; + struct clk *clk; + struct clk *pclk; + struct i2c_adapter adapter; + + struct i2c_msg *msg; + int pos; + int cnt; + int err; + int mode; + int clk_ctl; + int out_fifo_sz; + int in_fifo_sz; + int out_blk_sz; + int in_blk_sz; + struct msm_qup_i2c_platform_data *pdata; + void *complete; +}; + +#ifdef DEBUG +static void +qup_print_status(struct qup_i2c_dev *dev) +{ + uint32_t val; + val = readl(dev->base+QUP_CONFIG); + dev_dbg(dev->dev, "Qup config is :0x%x\n", val); + val = readl(dev->base+QUP_STATE); + dev_dbg(dev->dev, "Qup state is :0x%x\n", val); + val = readl(dev->base+QUP_IO_MODE); + dev_dbg(dev->dev, "Qup mode is :0x%x\n", val); +} +#else +static inline void qup_print_status(struct qup_i2c_dev *dev) +{ +} +#endif + +static irqreturn_t +qup_i2c_interrupt(int irq, void *devid) +{ + struct qup_i2c_dev *dev = devid; + uint32_t status = readl(dev->base + QUP_I2C_STATUS); + uint32_t status1 = readl(dev->base + QUP_ERROR_FLAGS); + int err = 0; + + if (status & I2C_STATUS_ERROR_MASK) { + dev_err(dev->dev, "QUP: Got i2c error :0x%x\n", status); + err = -status; + goto intr_done; + } + + if (status1 & 0x7F) { + dev_err(dev->dev, "QUP: Got QUP error :0x%x\n", status1); + err = -status1; + goto intr_done; + } + + /* Ignore output buffer empty interrupt for READ transaction */ + if (dev->msg && dev->msg->flags == I2C_M_RD && irq == dev->out_irq) + return IRQ_HANDLED; + else if (!dev->msg) + return IRQ_HANDLED; + +intr_done: + dev_dbg(dev->dev, "QUP intr= %d, i2c status=0x%x, qup status = 0x%x\n", + irq, status, status1); + qup_print_status(dev); + dev->err = err; + complete(dev->complete); + return IRQ_HANDLED; +} + +static int +qup_i2c_poll_writeready(struct qup_i2c_dev *dev) +{ + uint32_t retries = 0; + + while (retries != 2000) { + uint32_t status = readl(dev->base + QUP_I2C_STATUS); + + if (!(status & I2C_STATUS_WR_BUFFER_FULL)) + return 0; + if (retries++ == 1000) + udelay(100); + } + qup_print_status(dev); + return -ETIMEDOUT; +} + +static int +qup_i2c_poll_state(struct qup_i2c_dev *dev, uint32_t state) +{ + uint32_t retries = 0; + + dev_dbg(dev->dev, "Polling Status for state:0x%x\n", state); + + while (retries != 2000) { + uint32_t status = readl(dev->base + QUP_STATE); + + if ((status & (QUP_STATE_VALID | state)) == + (QUP_STATE_VALID | state)) + return 0; + else if (retries++ == 1000) + udelay(100); + } + return -ETIMEDOUT; +} + +#ifdef DEBUG +static void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val, + uint32_t addr, int rdwr) +{ + if (rdwr) + dev_dbg(dev->dev, "RD:Wrote 0x%x to out_ff:0x%x\n", val, addr); + else + dev_dbg(dev->dev, "WR:Wrote 0x%x to out_ff:0x%x\n", val, addr); +} +#else +static inline void qup_verify_fifo(struct qup_i2c_dev *dev, uint32_t val, + uint32_t addr, int rdwr) +{ +} +#endif + +static void +qup_issue_read(struct qup_i2c_dev *dev, struct i2c_msg *msg, int *idx, + uint32_t carry_over) +{ + uint16_t addr = (msg->addr << 1) | 1; + + /* QUP limit 256 bytes per read */ + if (*idx % 4) { + writel(carry_over | ((QUP_OUT_START | addr) << 16), + dev->base + QUP_OUT_FIFO_BASE);/* + (*idx-2)); */ + + qup_verify_fifo(dev, carry_over | + ((QUP_OUT_START | addr) << 16), (uint32_t)dev->base + + QUP_OUT_FIFO_BASE + (*idx - 2), 1); + writel((QUP_OUT_REC | dev->cnt), + dev->base + QUP_OUT_FIFO_BASE);/* + (*idx+2)); */ + + qup_verify_fifo(dev, (QUP_OUT_REC | dev->cnt), + (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx + 2), 1); + } else { + writel(((QUP_OUT_REC | dev->cnt) << 16) | QUP_OUT_START | addr, + dev->base + QUP_OUT_FIFO_BASE);/* + (*idx)); */ + + qup_verify_fifo(dev, QUP_OUT_REC << 16 | dev->cnt << 16 | + QUP_OUT_START | addr, + (uint32_t)dev->base + QUP_OUT_FIFO_BASE + (*idx), 1); + } + *idx += 4; +} + +static void +qup_issue_write(struct qup_i2c_dev *dev, struct i2c_msg *msg, int rem, + int *idx, uint32_t *carry_over) +{ + int entries = dev->cnt; + int i = 0; + uint32_t val = 0; + uint32_t last_entry = 0; + uint16_t addr = msg->addr << 1; + if (dev->pos == 0) + entries++; + + if (dev->pos == 0) { + if (*idx % 4) { + writel(*carry_over | ((QUP_OUT_START | addr) << 16), + dev->base + QUP_OUT_FIFO_BASE); + + qup_verify_fifo(dev, *carry_over | QUP_OUT_DATA << 16 | + addr << 16, (uint32_t)dev->base + + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); + } else + val = QUP_OUT_START | addr; + *idx += 2; + i++; + } else if (*idx % 4) { + val = (QUP_OUT_NOP | 1); + i++; + } + + for (; i < (entries - 1); i++) { + if (*idx % 4) { + writel(val | ((QUP_OUT_DATA | + msg->buf[dev->pos]) << 16), + dev->base + QUP_OUT_FIFO_BASE); + + qup_verify_fifo(dev, val | QUP_OUT_DATA << 16 | + msg->buf[dev->pos] << 16, (uint32_t)dev->base + + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); + } else + val = QUP_OUT_DATA | msg->buf[dev->pos]; + (*idx) += 2; + dev->pos++; + } + if (dev->pos < (dev->cnt - 1)) + last_entry = QUP_OUT_DATA; + else if (rem > 1) /* not last array entry */ + last_entry = QUP_OUT_DATA; + else + last_entry = QUP_OUT_STOP; + if ((*idx % 4) == 0) { + /* + * If read-start and read-command end up in different fifos, it + * may result in extra-byte being read due to extra-read cycle. + * Avoid that by inserting NOP as the last entry of fifo only + * if write command(s) leave 1 space in fifo. + */ + if (rem > 1) { + struct i2c_msg *next = msg + 1; + if (next->addr == msg->addr && (next->flags | I2C_M_RD) + && *idx == ((dev->out_fifo_sz*2) - 4)) { + writel(((last_entry | msg->buf[dev->pos]) | + ((1 | QUP_OUT_NOP) << 16)), dev->base + + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ + *idx += 2; + } else + *carry_over = (last_entry | msg->buf[dev->pos]); + } else { + writel((last_entry | msg->buf[dev->pos]), + dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ + + qup_verify_fifo(dev, last_entry | msg->buf[dev->pos], + (uint32_t)dev->base + QUP_OUT_FIFO_BASE + + (*idx), 0); + } + } else { + writel(val | ((last_entry | msg->buf[dev->pos]) << 16), + dev->base + QUP_OUT_FIFO_BASE);/* + (*idx) - 2); */ + + qup_verify_fifo(dev, val | (last_entry << 16) | + (msg->buf[dev->pos] << 16), (uint32_t)dev->base + + QUP_OUT_FIFO_BASE + (*idx) - 2, 0); + } + + *idx += 2; + dev->pos++; + dev->cnt = msg->len - dev->pos; +} + +static int +qup_update_state(struct qup_i2c_dev *dev, uint32_t state) +{ + if (qup_i2c_poll_state(dev, 0) != 0) + return -EIO; + writel(state, dev->base + QUP_STATE); + if (qup_i2c_poll_state(dev, state) != 0) + return -EIO; + return 0; +} + +static int +qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + DECLARE_COMPLETION_ONSTACK(complete); + struct qup_i2c_dev *dev = i2c_get_adapdata(adap); + int ret; + int rem = num; + long timeout; + int err; + + /* Initialize QUP registers during first transfer */ + if (dev->clk_ctl == 0) { + int fs_div; + int hs_div; + int i2c_clk; + uint32_t fifo_reg; + writel(0x2 << 4, dev->gsbi); + + i2c_clk = 19200000; /* input clock */ + fs_div = ((i2c_clk / dev->pdata->clk_freq) / 2) - 3; + hs_div = 3; + dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff); + fifo_reg = readl(dev->base + QUP_IO_MODE); + if (fifo_reg & 0x3) + dev->out_blk_sz = (fifo_reg & 0x3) * 16; + else + dev->out_blk_sz = 16; + if (fifo_reg & 0x60) + dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16; + else + dev->in_blk_sz = 16; + /* + * The block/fifo size w.r.t. 'actual data' is 1/2 due to 'tag' + * associated with each byte written/received + */ + dev->out_blk_sz /= 2; + dev->in_blk_sz /= 2; + dev->out_fifo_sz = dev->out_blk_sz * + (2 << ((fifo_reg & 0x1C) >> 2)); + dev->in_fifo_sz = dev->in_blk_sz * + (2 << ((fifo_reg & 0x380) >> 7)); + dev_dbg(dev->dev, "QUP IN:bl:%d, ff:%d, OUT:bl:%d, ff:%d\n", + dev->in_blk_sz, dev->in_fifo_sz, + dev->out_blk_sz, dev->out_fifo_sz); + } + + enable_irq(dev->in_irq); + enable_irq(dev->out_irq); + enable_irq(dev->err_irq); + writel(QUP_RESET_STATE, dev->base + QUP_STATE); + ret = qup_i2c_poll_state(dev, QUP_RESET_STATE); + if (ret) { + dev_err(dev->dev, "QUP Busy:Trying to recover\n"); + goto out_err; + } + + /* Initialize QUP registers */ + writel(1, dev->base + QUP_SW_RESET); + writel(0, dev->base + QUP_CONFIG); + writel(QUP_OPERATIONAL_RESET, dev->base + QUP_OPERATIONAL); + writel(QUP_STATUS_ERROR_FLAGS, dev->base + QUP_ERROR_FLAGS_EN); + + writel(QUP_PACK_EN | QUP_UNPACK_EN, dev->base + QUP_IO_MODE); + writel(I2C_MINI_CORE | I2C_N_VAL, dev->base + QUP_CONFIG); + + /* Initialize I2C mini core registers */ + writel(0, dev->base + QUP_I2C_CLK_CTL); + writel(QUP_I2C_STATUS_RESET, dev->base + QUP_I2C_STATUS); + + dev->cnt = msgs->len; + dev->pos = 0; + dev->msg = msgs; + while (rem) { + bool filled = false; + + /* Wait for WR buffer not full */ + ret = qup_i2c_poll_writeready(dev); + if (ret) { + dev_err(dev->dev, + "Error waiting for write ready before addr\n"); + goto out_err; + } + + dev->err = 0; + dev->complete = &complete; + + if (qup_i2c_poll_state(dev, QUP_I2C_MAST_GEN) != 0) { + ret = -EIO; + goto out_err; + } + + qup_print_status(dev); + /* HW limits Read upto 256 bytes in 1 read without stop + * only FIFO mode supported right now, so read size of + * in_fifo supported in 1 read + */ + if (dev->msg->flags == I2C_M_RD) { + if (dev->cnt > dev->in_fifo_sz) { + dev_err(dev->dev, "No Block mode support\n"); + ret = -EPROTONOSUPPORT; + goto out_err; + } + writel(dev->cnt, dev->base + QUP_MX_READ_CNT); + } else { + if (dev->cnt > dev->out_fifo_sz) { + dev_err(dev->dev, "No Block mode support\n"); + ret = -EPROTONOSUPPORT; + goto out_err; + } else if (rem > 1) { + struct i2c_msg *next = msgs + 1; + if (next->addr == msgs->addr && + next->flags == I2C_M_RD) { + if (next->len > dev->in_fifo_sz) { + dev_err(dev->dev, + "No Block mode support\n"); + ret = -EPROTONOSUPPORT; + goto out_err; + } + writel(next->len, dev->base + + QUP_MX_READ_CNT); + } + } + } + + err = qup_update_state(dev, QUP_RUN_STATE); + if (err < 0) { + ret = err; + goto out_err; + } + + qup_print_status(dev); + writel(dev->clk_ctl, dev->base + QUP_I2C_CLK_CTL); + + do { + int idx = 0; + uint32_t carry_over = 0; + + /* Transition to PAUSE state only possible from RUN */ + err = qup_update_state(dev, QUP_PAUSE_STATE); + if (err < 0) { + ret = err; + goto out_err; + } + + qup_print_status(dev); + /* This operation is Write, check the next operation + * and decide mode + */ + while (filled == false) { + if (msgs->flags & I2C_M_RD) + qup_issue_read(dev, msgs, &idx, + carry_over); + else + qup_issue_write(dev, msgs, rem, &idx, + &carry_over); + if (idx >= dev->out_fifo_sz) + filled = true; + /* Start new message */ + if (filled == false) { + if (msgs->flags & I2C_M_RD) + filled = true; + else if (rem > 1) { + /* Only combine operations with + * same address + */ + struct i2c_msg *next = msgs + 1; + if (next->addr != msgs->addr) + filled = true; + else { + rem--; + msgs++; + dev->msg = msgs; + dev->pos = 0; + dev->cnt = msgs->len; + } + } else + filled = true; + } + } + err = qup_update_state(dev, QUP_RUN_STATE); + if (err < 0) { + ret = err; + goto out_err; + } + dev_dbg(dev->dev, "idx:%d, rem:%d, num:%d, mode:%d\n", + idx, rem, num, dev->mode); + + qup_print_status(dev); + timeout = wait_for_completion_timeout(&complete, + msecs_to_jiffies(dev->out_fifo_sz)); + if (!timeout) { + dev_err(dev->dev, "Transaction timed out\n"); + writel(1, dev->base + QUP_SW_RESET); + msleep(10); + ret = -ETIMEDOUT; + goto out_err; + } + if (dev->err) { + dev_err(dev->dev, + "Error during data xfer (%d)\n", + dev->err); + ret = dev->err; + goto out_err; + } + if (dev->msg->flags & I2C_M_RD) { + int i; + uint32_t dval = 0; + for (i = 0; dev->pos < dev->msg->len; i++, + dev->pos++) { + uint32_t rd_status = readl(dev->base + + QUP_OPERATIONAL); + if (i % 2 == 0) { + if ((rd_status & + QUP_IN_NOT_EMPTY) == 0) + break; + dval = readl(dev->base + + QUP_IN_FIFO_BASE); + dev->msg->buf[dev->pos] = + dval & 0xFF; + } else + dev->msg->buf[dev->pos] = + ((dval & 0xFF0000) >> + 16); + } + dev->cnt -= i; + } else + filled = false; /* refill output FIFO */ + } while (dev->cnt > 0); + if (dev->cnt == 0) { + rem--; + msgs++; + if (rem) { + dev->pos = 0; + dev->cnt = msgs->len; + dev->msg = msgs; + } + } + } + + ret = num; + out_err: + dev->complete = NULL; + dev->msg = NULL; + dev->pos = 0; + dev->err = 0; + dev->cnt = 0; + disable_irq(dev->err_irq); + disable_irq(dev->in_irq); + disable_irq(dev->out_irq); + return ret; +} + +static u32 +qup_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static const struct i2c_algorithm qup_i2c_algo = { + .master_xfer = qup_i2c_xfer, + .functionality = qup_i2c_func, +}; + +static int __devinit +qup_i2c_probe(struct platform_device *pdev) +{ + struct qup_i2c_dev *dev; + struct resource *qup_mem, *gsbi_mem, *qup_io, *gsbi_io; + struct resource *in_irq, *out_irq, *err_irq; + struct clk *clk, *pclk; + int ret = 0; + struct msm_qup_i2c_platform_data *pdata; + + dev_dbg(&pdev->dev, "qup_i2c_probe\n"); + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "platform data not initialized\n"); + return -ENOSYS; + } + qup_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "qup_phys_addr"); + if (!qup_mem) { + dev_err(&pdev->dev, "no qup mem resource?\n"); + return -ENODEV; + } + gsbi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "gsbi_qup_i2c_addr"); + if (!gsbi_mem) { + dev_err(&pdev->dev, "no gsbi mem resource?\n"); + return -ENODEV; + } + + in_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "qup_in_intr"); + if (!in_irq) { + dev_err(&pdev->dev, "no input irq resource?\n"); + return -ENODEV; + } + out_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "qup_out_intr"); + if (!out_irq) { + dev_err(&pdev->dev, "no output irq resource?\n"); + return -ENODEV; + } + err_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "qup_err_intr"); + if (!err_irq) { + dev_err(&pdev->dev, "no error irq resource?\n"); + return -ENODEV; + } + + qup_io = request_mem_region(qup_mem->start, resource_size(qup_mem), + pdev->name); + if (!qup_io) { + dev_err(&pdev->dev, "QUP region already claimed\n"); + return -EBUSY; + } + gsbi_io = request_mem_region(gsbi_mem->start, resource_size(gsbi_mem), + pdev->name); + if (!gsbi_io) { + dev_err(&pdev->dev, "GSBI region already claimed\n"); + return -EBUSY; + } + + clk = clk_get(&pdev->dev, "qup_clk"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Could not get clock\n"); + ret = PTR_ERR(clk); + goto err_clk_get_failed; + } + + pclk = clk_get(&pdev->dev, "qup_pclk"); + if (IS_ERR(clk)) + pclk = NULL; + + if (!(pdata->msm_i2c_config_gpio)) { + dev_err(&pdev->dev, "config_gpio function not initialized\n"); + ret = -ENOSYS; + goto err_config_failed; + } + + /* We support frequencies upto FAST Mode(400KHz) */ + if (pdata->clk_freq <= 0 || + pdata->clk_freq > 400000) { + dev_err(&pdev->dev, "clock frequency not supported\n"); + ret = -EIO; + goto err_config_failed; + } + + dev = kzalloc(sizeof(struct qup_i2c_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto err_alloc_dev_failed; + } + + dev->dev = &pdev->dev; + dev->in_irq = in_irq->start; + dev->out_irq = out_irq->start; + dev->err_irq = err_irq->start; + dev->clk = clk; + dev->pclk = pclk; + dev->base = ioremap(qup_mem->start, resource_size(qup_mem)); + if (!dev->base) { + ret = -ENOMEM; + goto err_ioremap_failed; + } + + /* Configure GSBI block to use I2C functionality */ + dev->gsbi = ioremap(gsbi_mem->start, resource_size(gsbi_mem)); + if (!dev->gsbi) { + ret = -ENOMEM; + goto err_gsbi_failed; + } + + platform_set_drvdata(pdev, dev); + + clk_enable(clk); + if (pclk) + clk_enable(pclk); + dev->pdata = pdata; + dev->clk_ctl = 0; + + i2c_set_adapdata(&dev->adapter, dev); + dev->adapter.algo = &qup_i2c_algo; + strlcpy(dev->adapter.name, + "QUP I2C adapter", + sizeof(dev->adapter.name)); + + dev->adapter.nr = pdev->id; + ret = i2c_add_numbered_adapter(&dev->adapter); + if (ret) { + dev_err(&pdev->dev, "i2c_add_adapter failed\n"); + goto err_i2c_add_adapter_failed; + } + + ret = request_irq(dev->in_irq, qup_i2c_interrupt, + IRQF_TRIGGER_RISING, "qup_in_intr", dev); + if (ret) { + dev_err(&pdev->dev, "request_out_irq failed\n"); + goto err_request_irq_failed; + } + ret = request_irq(dev->out_irq, qup_i2c_interrupt, + IRQF_TRIGGER_RISING, "qup_out_intr", dev); + if (ret) { + dev_err(&pdev->dev, "request_in_irq failed\n"); + free_irq(dev->in_irq, dev); + goto err_request_irq_failed; + } + ret = request_irq(dev->err_irq, qup_i2c_interrupt, + IRQF_TRIGGER_RISING, "qup_err_intr", dev); + if (ret) { + dev_err(&pdev->dev, "request_err_irq failed\n"); + free_irq(dev->out_irq, dev); + free_irq(dev->in_irq, dev); + goto err_request_irq_failed; + } + disable_irq(dev->err_irq); + disable_irq(dev->in_irq); + disable_irq(dev->out_irq); + pdata->msm_i2c_config_gpio(dev->adapter.nr, 1); + + return 0; + +err_request_irq_failed: + i2c_del_adapter(&dev->adapter); +err_i2c_add_adapter_failed: + clk_disable(clk); + if (pclk) + clk_disable(pclk); + iounmap(dev->gsbi); +err_gsbi_failed: + iounmap(dev->base); +err_ioremap_failed: + kfree(dev); +err_alloc_dev_failed: +err_config_failed: + clk_put(clk); + if (pclk) + clk_put(pclk); +err_clk_get_failed: + release_mem_region(gsbi_mem->start, resource_size(gsbi_mem)); + release_mem_region(qup_mem->start, resource_size(qup_mem)); + return ret; +} + +static int __devexit +qup_i2c_remove(struct platform_device *pdev) +{ + struct qup_i2c_dev *dev = platform_get_drvdata(pdev); + struct resource *qup_mem, *gsbi_mem; + + platform_set_drvdata(pdev, NULL); + free_irq(dev->out_irq, dev); + free_irq(dev->in_irq, dev); + free_irq(dev->err_irq, dev); + i2c_del_adapter(&dev->adapter); + clk_disable(dev->clk); + clk_put(dev->clk); + if (dev->pclk) { + clk_disable(dev->pclk); + clk_put(dev->pclk); + } + iounmap(dev->gsbi); + iounmap(dev->base); + kfree(dev); + gsbi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "gsbi_qup_i2c_addr"); + release_mem_region(gsbi_mem->start, resource_size(gsbi_mem)); + qup_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "qup_phys_addr"); + release_mem_region(qup_mem->start, resource_size(qup_mem)); + return 0; +} + +#ifdef CONFIG_PM +static int qup_i2c_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct qup_i2c_dev *dev = platform_get_drvdata(pdev); + + clk_disable(dev->clk); + if (dev->pclk) + clk_disable(dev->pclk); + return 0; +} + +static int qup_i2c_resume(struct platform_device *pdev) +{ + struct qup_i2c_dev *dev = platform_get_drvdata(pdev); + + clk_enable(dev->clk); + if (dev->pclk) + clk_enable(dev->pclk); + return 0; +} +#else +#define qup_i2c_suspend NULL +#define qup_i2c_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver qup_i2c_driver = { + .probe = qup_i2c_probe, + .remove = __devexit_p(qup_i2c_remove), + .suspend = qup_i2c_suspend, + .resume = qup_i2c_resume, + .driver = { + .name = "qup_i2c", + .owner = THIS_MODULE, + }, +}; + +/* QUP may be needed to bring up other drivers */ +static int __init +qup_i2c_init_driver(void) +{ + return platform_driver_register(&qup_i2c_driver); +} +subsys_initcall(qup_i2c_init_driver); + +static void __exit qup_i2c_exit_driver(void) +{ + platform_driver_unregister(&qup_i2c_driver); +} +module_exit(qup_i2c_exit_driver); + diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig new file mode 100644 index 0000000000000..f58e49a6d2cb0 --- /dev/null +++ b/drivers/i2c/chips/Kconfig @@ -0,0 +1,16 @@ +# +# Miscellaneous I2C chip drivers configuration +# +# *** DEPRECATED! Do not add new entries! See Makefile *** +# + +menu "Miscellaneous I2C Chip support" + +config SENSORS_MT9T013 + tristate "MT9T013 Camera Driver" + depends on I2C + default y + help + MT9T013 Camera Driver implemented by HTC. + +endmenu diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile new file mode 100644 index 0000000000000..23b7b7b2b7ea4 --- /dev/null +++ b/drivers/i2c/chips/Makefile @@ -0,0 +1,18 @@ +# +# Makefile for miscellaneous I2C chip drivers. +# +# Do not add new drivers to this directory! It is DEPRECATED. +# +# Device drivers are better grouped according to the functionality they +# implement rather than to the bus they are connected to. In particular: +# * Hardware monitoring chip drivers go to drivers/hwmon +# * RTC chip drivers go to drivers/rtc +# * I/O expander drivers go to drivers/gpio +# + +obj-$(CONFIG_SENSORS_MT9T013) += mt9t013.o + +ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) +EXTRA_CFLAGS += -DDEBUG +endif + diff --git a/drivers/i2c/chips/mt9t013.c b/drivers/i2c/chips/mt9t013.c new file mode 100755 index 0000000000000..200a9f8b457ae --- /dev/null +++ b/drivers/i2c/chips/mt9t013.c @@ -0,0 +1,1351 @@ +/* + * Copyright (C) 2007-2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* define ioctls */ + + +#define ALLOW_USPACE_RW 0 + +static const uint32_t fps_divider = 1; + +#define AF_I2C_ID 0x18 /* actuator's slave address */ + +static struct i2c_client *pclient; + +/* we need this to set the clock rate */ +static struct clk *vfe_clk; + +/* camif clocks */ +static struct clk *vfe_mdc_clk; +static struct clk *mdc_clk; + +static int mdc_clk_enabled; +static int vfe_mdc_clk_enabled; +static int vfe_clk_enabled; +static int opened; +static int pclk_set; + +static const struct mt9t013_reg_pat mt9t013_reg_pattern = { .reg = { + { /* preview 2x2 binning 20fps, pclk MHz, MCLK 24MHz */ + 10, /*vt_pix_clk_div REG=0x0300*/ + /*update get_snapshot_fps if this change*/ + 1, /*vt_sys_clk_div REG=0x0302*/ + /*update get_snapshot_fps if this change*/ + 3, /*2, pre_pll_clk_div REG=0x0304*/ + /*update get_snapshot_fps if this change*/ + 80, /*40, pll_multiplier REG=0x0306*/ + /*60 for 30fps preview, 40 for 20fps preview*/ + 10, /*op_pix_clk_div REG=0x0308*/ + 1, /*op_sys_clk_div REG=0x030A*/ + 16, /*scale_m REG=0x0404*/ + 0x0111, /*row_speed REG=0x3016*/ + 8, /*x_addr_start REG=0x3004*/ + 2053, /*x_addr_end REG=0x3008*/ + 8, /*y_addr_start REG=0x3002*/ + 1541, /*y_addr_end REG=0x3006*/ + 0x046C, /*read_mode REG=0x3040*/ + 1024, /*x_output_size REG=0x034C*/ + 768, /*y_output_size REG=0x034E*/ + 3540, /*2616, line_length_pck REG=0x300C*/ + 861, /*916, frame_length_lines REG=0x300A*/ + 16, /*coarse_integration_time REG=0x3012*/ + 1461 /*fine_integration_time REG=0x3014*/ + }, + { /* snapshot */ + 10, /*vt_pix_clk_div REG=0x0300*/ + /*update get_snapshot_fps if this change*/ + 1, /*vt_sys_clk_div REG=0x0302*/ + /*update get_snapshot_fps if this change*/ + 3, /*2, pre_pll_clk_div REG=0x0304*/ + /*update get_snapshot_fps if this change*/ + 80, /*40, pll_multiplier REG=0x0306*/ + /*50 for 15fps snapshot, 40 for 10fps snapshot*/ + 10, /*op_pix_clk_div REG=0x0308*/ + 1, /*op_sys_clk_div REG=0x030A*/ + 16, /*scale_m REG=0x0404*/ + 0x0111, /*row_speed REG=0x3016*/ + 8, /*0, x_addr_start REG=0x3004*/ + 2063, /*2061, x_addr_end REG=0x3008*/ + 8, /*2, y_addr_start REG=0x3002*/ + 1551, /*1545, y_addr_end REG=0x3006*/ + 0x0024, /*read_mode REG=0x3040*/ + 2063, /*output_size REG=0x034C*/ + 1544, /*y_output_size REG=0x034E*/ + 4800, /*2952, line_length_pck REG=0x300C*/ + 1629, /*frame_length_lines REG=0x300A*/ + 16, /*coarse_integration_time REG=0x3012*/ + 733 /*fine_integration_time REG=0x3014*/ + } +}}; + +#define MT9T013_MU3M0VC_REG_MODEL_ID 0x0000 +#define MT9T013_MU3M0VC_MODEL_ID 0x2600 +#define REG_GROUPED_PARAMETER_HOLD 0x0104 +#define GROUPED_PARAMETER_HOLD 0x0100 +#define GROUPED_PARAMETER_UPDATE 0x0000 +#define REG_COARSE_INTEGRATION_TIME 0x3012 +#define REG_VT_PIX_CLK_DIV 0x0300 +#define REG_VT_SYS_CLK_DIV 0x0302 +#define REG_PRE_PLL_CLK_DIV 0x0304 +#define REG_PLL_MULTIPLIER 0x0306 +#define REG_OP_PIX_CLK_DIV 0x0308 +#define REG_OP_SYS_CLK_DIV 0x030A +#define REG_SCALE_M 0x0404 +#define REG_FRAME_LENGTH_LINES 0x300A +#define REG_LINE_LENGTH_PCK 0x300C +#define REG_X_ADDR_START 0x3004 +#define REG_Y_ADDR_START 0x3002 +#define REG_X_ADDR_END 0x3008 +#define REG_Y_ADDR_END 0x3006 +#define REG_X_OUTPUT_SIZE 0x034C +#define REG_Y_OUTPUT_SIZE 0x034E +#define REG_FINE_INTEGRATION_TIME 0x3014 +#define REG_ROW_SPEED 0x3016 +#define MT9T013_REG_RESET_REGISTER 0x301A +#define MT9T013_RESET_REGISTER_PWON 0x10CC /*enable paralled and start streaming*/ +#define MT9T013_RESET_REGISTER_PWOFF 0x1008 //0x10C8 /*stop streaming*/ +#define REG_READ_MODE 0x3040 +#define REG_GLOBAL_GAIN 0x305E +#define REG_TEST_PATTERN_MODE 0x3070 + +static struct wake_lock mt9t013_wake_lock; + +static inline void init_suspend(void) +{ + wake_lock_init(&mt9t013_wake_lock, WAKE_LOCK_IDLE, "mt9t013"); +} + +static inline void deinit_suspend(void) +{ + wake_lock_destroy(&mt9t013_wake_lock); +} + +static inline void prevent_suspend(void) +{ + wake_lock(&mt9t013_wake_lock); +} + +static inline void allow_suspend(void) +{ + wake_unlock(&mt9t013_wake_lock); +} + +#define CLK_GET(clk) do { \ + if (!clk) { \ + clk = clk_get(NULL, #clk); \ + printk(KERN_INFO \ + "mt9t013: clk_get(%s): %p\n", #clk, clk); \ + } \ +} while(0) + +DECLARE_MUTEX(sem); + +static struct msm_camera_legacy_device_platform_data *cam; + +#define out_dword(addr, val) \ + (*((volatile unsigned long *)(addr)) = ((unsigned long)(val))) + +#define out_dword_masked_ns(io, mask, val, current_reg_content) \ + (void) out_dword(io, ((current_reg_content & (uint32_t)(~(mask))) | \ + ((uint32_t)((val) & (mask))))) + +#define __inpdw(port) (*((volatile uint32_t *) (port))) +#define in_dword_masked(addr, mask) (__inpdw(addr) & (uint32_t)mask ) + +#define HWIO_MDDI_CAMIF_CFG_ADDR MSM_MDC_BASE +#define HWIO_MDDI_CAMIF_CFG_RMSK 0x1fffff +#define HWIO_MDDI_CAMIF_CFG_IN \ + in_dword_masked(HWIO_MDDI_CAMIF_CFG_ADDR, HWIO_MDDI_CAMIF_CFG_RMSK) + +#define HWIO_MDDI_CAMIF_CFG_OUTM(m,v) \ + out_dword_masked_ns(HWIO_MDDI_CAMIF_CFG_ADDR,m,v,HWIO_MDDI_CAMIF_CFG_IN); +#define __msmhwio_outm(hwiosym, mask, val) HWIO_##hwiosym##_OUTM(mask, val) +#define HWIO_OUTM(hwiosym, mask, val) __msmhwio_outm(hwiosym, mask, val) + +#define HWIO_MDDI_CAMIF_CFG_CAM_SEL_BMSK 0x2 +#define HWIO_MDDI_CAMIF_CFG_CAM_PCLK_SRC_SEL_BMSK 0x60000 +#define HWIO_MDDI_CAMIF_CFG_CAM_PCLK_INVERT_BMSK 0x80000 +#define HWIO_MDDI_CAMIF_CFG_CAM_PAD_REG_SW_RESET_BMSK 0x100000 + +#define HWIO_MDDI_CAMIF_CFG_CAM_SEL_SHFT 0x1 +#define HWIO_MDDI_CAMIF_CFG_CAM_PCLK_SRC_SEL_SHFT 0x11 +#define HWIO_MDDI_CAMIF_CFG_CAM_PCLK_INVERT_SHFT 0x13 +#define HWIO_MDDI_CAMIF_CFG_CAM_PAD_REG_SW_RESET_SHFT 0x14 + +#define __msmhwio_shft(hwio_regsym, hwio_fldsym) HWIO_##hwio_regsym##_##hwio_fldsym##_SHFT +#define HWIO_SHFT(hwio_regsym, hwio_fldsym) __msmhwio_shft(hwio_regsym, hwio_fldsym) + +#define __msmhwio_fmsk(hwio_regsym, hwio_fldsym) HWIO_##hwio_regsym##_##hwio_fldsym##_BMSK +#define HWIO_FMSK(hwio_regsym, hwio_fldsym) __msmhwio_fmsk(hwio_regsym, hwio_fldsym) + +#define HWIO_APPS_RESET_ADDR (MSM_CLK_CTL_BASE + 0x00000210) +#define HWIO_APPS_RESET_RMSK 0x1fff +#define HWIO_APPS_RESET_VFE_BMSK 1 +#define HWIO_APPS_RESET_VFE_SHFT 0 +#define HWIO_APPS_RESET_IN in_dword_masked(HWIO_APPS_RESET_ADDR, HWIO_APPS_RESET_RMSK) +#define HWIO_APPS_RESET_OUTM(m,v) out_dword_masked_ns(HWIO_APPS_RESET_ADDR,m,v,HWIO_APPS_RESET_IN) + +struct mt9t013_data { + struct work_struct work; +}; + +static DECLARE_WAIT_QUEUE_HEAD(g_data_ready_wait_queue); + +static int mt9t013_i2c_sensor_init(struct mt9t013_init *init); +static int mt9t013_i2c_sensor_setting(unsigned long arg); +static int mt9t013_i2c_exposure_gain(uint32_t mode, uint16_t line, + uint16_t gain); +static int mt9t013_i2c_move_focus(uint16_t position); +static int mt9t013_i2c_set_default_focus(uint8_t step); +static int mt9t013_i2c_power_up(void); +static int mt9t013_i2c_power_down(void); +static int mt9t013_camif_pad_reg_reset(void); +static int mt9t013_lens_power(int on); + +int mt_i2c_lens_tx_data(unsigned char slave_addr, char* txData, int length) +{ + int rc; + struct i2c_msg msg[] = { + { + .addr = slave_addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + +#if 0 + { + int i; + /* printk(KERN_INFO "mt_i2c_lens_tx_data: af i2c client addr = %x," + " register addr = 0x%02x%02x:\n", slave_addr, txData[0], txData[1]); + */ + for (i = 0; i < length - 2; i++) + printk(KERN_INFO "\tdata[%d]: 0x%02x\n", i, txData[i+2]); + } +#endif + + rc = i2c_transfer(pclient->adapter, msg, 1); + if (rc < 0) { + printk(KERN_ERR "mt_i2c_lens_tx_data: i2c_transfer error %d\n", rc); + return rc; + } + return 0; +} + +static int mt9t013_i2c_lens_write(unsigned char slave_addr, unsigned char u_addr, unsigned char u_data) +{ + unsigned char buf[2] = { u_addr, u_data }; + return mt_i2c_lens_tx_data(slave_addr, buf, sizeof(buf)); +} + +static int mt_i2c_rx_data(char* rxData, int length) +{ + int rc; + struct i2c_msg msgs[] = { + { + .addr = pclient->addr, + .flags = 0, + .len = 2, + .buf = rxData, + }, + { + .addr = pclient->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + rc = i2c_transfer(pclient->adapter, msgs, 2); + if (rc < 0) { + printk(KERN_ERR "mt9t013: mt_i2c_rx_data error %d\n", rc); + return rc; + } +#if 0 + else { + int i; + for (i = 0; i < length; i++) + printk(KERN_INFO "\tdata[%d]: 0x%02x\n", i, rxData[i]); + } +#endif + + return 0; +} + +int mt_i2c_tx_data(char* txData, int length) +{ + int rc; + + struct i2c_msg msg[] = { + { + .addr = pclient->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + rc = i2c_transfer(pclient->adapter, msg, 1); + if (rc < 0) { + printk(KERN_ERR "mt9t013: mt_i2c_tx_data error %d\n", rc); + return rc; + } + return 0; +} + +static int mt9t013_i2c_write(unsigned short u_addr, unsigned short u_data) +{ + int rc; + unsigned char buf[4]; + + buf[0] = (u_addr & 0xFF00) >> 8; + buf[1] = u_addr & 0x00FF; + buf[2] = (u_data & 0xFF00) >> 8; + buf[3] = u_data & 0x00FF; + + rc = mt_i2c_tx_data(buf, sizeof(buf)); + if(rc < 0) + printk(KERN_ERR "mt9t013: txdata error %d add:0x%02x data:0x%02x\n", + rc, u_addr, u_data); + return rc; +} + +static int mt9t013_i2c_read(unsigned short u_addr, unsigned short *pu_data) +{ + int rc; + unsigned char buf[2]; + + buf[0] = (u_addr & 0xFF00)>>8; + buf[1] = (u_addr & 0x00FF); + rc = mt_i2c_rx_data(buf, 2); + if (!rc) + *pu_data = buf[0]<<8 | buf[1]; + else printk(KERN_ERR "mt9t013: i2c read failed\n"); + return rc; +} + +static int msm_camio_clk_enable (int clk_type) +{ + struct clk *clk = NULL; + int *enabled = NULL; + + switch (clk_type) { + case CAMIO_VFE_MDC_CLK: + CLK_GET(vfe_mdc_clk); + clk = vfe_mdc_clk; + enabled = &vfe_mdc_clk_enabled; + break; + case CAMIO_MDC_CLK: + CLK_GET(mdc_clk); + clk = mdc_clk; + enabled = &mdc_clk_enabled; + break; + default: + break; + } + + if (clk != NULL && !*enabled) { + int rc = clk_enable(clk); + *enabled = !rc; + return rc; + } + + return -EINVAL; +} + +static int msm_camio_clk_disable(int clk_type) +{ + int rc = 0; + struct clk *clk = NULL; + int *enabled = NULL; + + switch (clk_type) { + case CAMIO_VFE_MDC_CLK: + clk = vfe_mdc_clk; + enabled = &vfe_mdc_clk_enabled; + break; + case CAMIO_MDC_CLK: + clk = mdc_clk; + enabled = &mdc_clk_enabled; + break; + default: + rc = -1; + break; + } + + if (clk != NULL && *enabled) { + clk_disable(clk); + *enabled = 0; + return 0; + } + + return -EINVAL; +} + +static int msm_camio_vfe_clk_enable(void) +{ + CLK_GET(vfe_clk); + if (vfe_clk && !vfe_clk_enabled) { + vfe_clk_enabled = !clk_enable(vfe_clk); + printk(KERN_INFO "mt9t013: enable vfe_clk\n"); + } + return vfe_clk_enabled ? 0 : -EIO; +} + +static int msm_camio_clk_rate_set(int rate) +{ + int rc = msm_camio_vfe_clk_enable(); + if (!rc && vfe_clk_enabled) + rc = clk_set_rate(vfe_clk, rate); + return rc; +} + +static int clk_select(int internal) +{ + int rc = -EIO; + printk(KERN_INFO "mt9t013: clk select %d\n", internal); + CLK_GET(vfe_clk); + if (vfe_clk != NULL) { + extern int clk_set_flags(struct clk *clk, unsigned long flags); + rc = clk_set_flags(vfe_clk, 0x00000100 << internal); + if (!rc && internal) rc = msm_camio_vfe_clk_enable(); + } + return rc; +} + +static void mt9t013_sensor_init(void) +{ + int ret; + printk(KERN_INFO "mt9t013: init\n"); + if (!pclient) + return; + + /*pull hi reset*/ + printk(KERN_INFO "mt9t013: mt9t013_register_init\n"); + ret = gpio_request(cam->sensor_reset, "mt9t013"); + if (!ret) { + gpio_direction_output(cam->sensor_reset, 1); + printk(KERN_INFO "mt9t013: camera sensor_reset set as 1\n"); + } else + printk(KERN_ERR "mt9t013 error: request gpio %d failed: " + "%d\n", cam->sensor_reset, ret); + mdelay(2); + + /* pull down power down */ + ret = gpio_request(cam->sensor_pwd, "mt9t013"); + if (!ret || ret == -EBUSY) + gpio_direction_output(cam->sensor_pwd, 0); + else printk(KERN_ERR "mt913t013 error: request gpio %d failed: " + "%d\n", cam->sensor_pwd, ret); + gpio_free(cam->sensor_pwd); + + /* enable clk */ + msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); + msm_camio_clk_enable(CAMIO_MDC_CLK); + + /* reset CAMIF */ + mt9t013_camif_pad_reg_reset(); + + /* set mclk */ + ret = msm_camio_clk_rate_set(24000000); + if(ret < 0) + printk(KERN_ERR "camio clk rate select error\n"); + mdelay(2); + + /* enable gpio */ + cam->config_gpio_on(); + /* delay 2 ms */ + mdelay(2); + + /* reset sensor sequency */ + gpio_direction_output(cam->sensor_reset, 0); + mdelay(2); + gpio_direction_output(cam->sensor_reset, 1); + gpio_free(cam->sensor_reset); + mdelay(2); + + printk(KERN_INFO "mt9t013: camera sensor init sequence done\n"); +} + +#define CLK_DISABLE_AND_PUT(clk) do { \ + if (clk) { \ + if (clk##_enabled) { \ + printk(KERN_INFO "mt9t013: disabling "#clk"\n");\ + clk_disable(clk); \ + clk##_enabled = 0; \ + } \ + printk(KERN_INFO \ + "mt9t013: clk_put(%s): %p\n", #clk, clk); \ + clk_put(clk); \ + clk = NULL; \ + } \ +} while(0) + +static void mt9t013_sensor_suspend(void) +{ + printk(KERN_INFO "mt9t013: camera sensor suspend sequence\n"); + if (!pclient) { + return; + } + /*disable clk*/ + msm_camio_clk_disable(CAMIO_VFE_MDC_CLK); + msm_camio_clk_disable(CAMIO_MDC_CLK); + CLK_DISABLE_AND_PUT(vfe_clk); /* this matches clk_select(1) */ + /* disable gpios */ + cam->config_gpio_off(); + printk(KERN_INFO "mt9t013: camera sensor suspend sequence done\n"); +} + +static int mt9t013_open(struct inode *ip, struct file *fp) +{ + int rc = -EBUSY; + down(&sem); + printk(KERN_INFO "mt9t013: open\n"); + if (!opened) { + printk(KERN_INFO "mt9t013: prevent collapse on idle\n"); + prevent_suspend(); + cam->config_gpio_on(); + opened = 1; + rc = 0; + } + up(&sem); + return rc; +} + +static int mt9t013_release(struct inode *ip, struct file *fp) +{ + int rc = -EBADF; + printk(KERN_INFO "mt9t013: release\n"); + down(&sem); + if (opened) { + printk(KERN_INFO "mt9t013: release clocks\n"); + + + /* mt9t013_i2c_power_down() should be called before closing MCLK */ + /* otherwise I2C_WRITE will always fail */ + mt9t013_i2c_power_down(); + + CLK_DISABLE_AND_PUT(mdc_clk); + CLK_DISABLE_AND_PUT(vfe_mdc_clk); + CLK_DISABLE_AND_PUT(vfe_clk); + mt9t013_lens_power(0); + + cam->config_gpio_off(); + + printk(KERN_INFO "mt9t013: allow collapse on idle\n"); + allow_suspend(); + rc = pclk_set = opened = 0; + } + up(&sem); + return rc; +} + +#undef CLK_DISABLE_AND_PUT + +#define CHECK() ({ \ + if (!mdc_clk_enabled || !vfe_mdc_clk_enabled) { \ + printk(KERN_ERR "mt9t013 error: one or more clocks" \ + " are NULL.\n"); \ + rc = -EIO; \ + } \ + !rc; }) + +static int mt9t013_camif_pad_reg_reset(void) +{ + int rc = clk_select(1); + if(rc < 0) { + printk(KERN_ERR "mt9t013 error switching to internal clock\n"); + return rc; + } + HWIO_OUTM (MDDI_CAMIF_CFG, + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_SEL) | + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PCLK_SRC_SEL) | + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PCLK_INVERT), + 1 << HWIO_SHFT (MDDI_CAMIF_CFG, CAM_SEL) | + 3 << HWIO_SHFT (MDDI_CAMIF_CFG, CAM_PCLK_SRC_SEL) | + 0 << HWIO_SHFT (MDDI_CAMIF_CFG, CAM_PCLK_INVERT)); + msleep(10); + HWIO_OUTM (MDDI_CAMIF_CFG, + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PAD_REG_SW_RESET), + 1 << HWIO_SHFT (MDDI_CAMIF_CFG, + CAM_PAD_REG_SW_RESET)); + msleep(10); + HWIO_OUTM (MDDI_CAMIF_CFG, + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PAD_REG_SW_RESET), + 0 << HWIO_SHFT (MDDI_CAMIF_CFG, + CAM_PAD_REG_SW_RESET)); + msleep(10); + rc = clk_select(0); /* external */ + if(rc < 0) { + printk(KERN_ERR "mt9t013 error switching to external clock\n"); + return rc; + } + + return rc; +} + +#if ALLOW_USPACE_RW +#define COPY_FROM_USER(size) ({ \ + if (copy_from_user(rwbuf, argp, size)) rc = -EFAULT; \ + !rc; }) +#endif + +static long mt9t013_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int rc = 0; + +#if ALLOW_USPACE_RW + unsigned short addr = 0; + unsigned short data = 0; + char rwbuf[4]; +#endif + + down(&sem); + + switch(cmd) { +#if ALLOW_USPACE_RW + case MT9T013_I2C_IOCTL_W: + if (/* CHECK() && */ COPY_FROM_USER(4)) { + addr = *((unsigned short *)rwbuf); + data = *((unsigned short *)(rwbuf+2)); + rc = mt9t013_i2c_write(addr, data); + } else + printk(KERN_ERR "mt9t013: write: err %d\n", rc); + break; + + case MT9T013_I2C_IOCTL_R: + if (/* CHECK() && */ COPY_FROM_USER(4)) { + addr = *((unsigned short*) rwbuf); + rc = mt9t013_i2c_read(addr, (unsigned short *)(rwbuf+2)); + if (!rc) { + if (copy_to_user(argp, rwbuf, 4)) { + printk(KERN_ERR "mt9t013: read: err " \ + "writeback -EFAULT\n"); + rc = -EFAULT; + } + } + } else + printk(KERN_ERR "mt9t013: read: err %d\n", rc); + break; + + case MT9T013_I2C_IOCTL_AF_W: + if (/* CHECK() && */ COPY_FROM_USER(3)) + rc = mt9t013_i2c_lens_write(*rwbuf, *(rwbuf + 1), *(rwbuf + 2)); + else + printk(KERN_ERR "mt9t013: af write: err %d\n", rc); + break; +#endif /* ALLOW_USPACE_RW */ + + case MT9T013_I2C_IOCTL_CAMIF_PAD_REG_RESET: + printk(KERN_INFO "mt9t013: CAMIF_PAD_REG_RESET\n"); + if (CHECK()) + rc = mt9t013_camif_pad_reg_reset(); + break; + + case MT9T013_I2C_IOCTL_CAMIF_PAD_REG_RESET_2: + printk(KERN_INFO "mt9t013: CAMIF_PAD_REG_RESET_2 (pclk_set %d)\n", + pclk_set); + if (!pclk_set) + rc = -EIO; + else if (CHECK()) { + HWIO_OUTM (MDDI_CAMIF_CFG, + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PAD_REG_SW_RESET), + 1 << HWIO_SHFT (MDDI_CAMIF_CFG, + CAM_PAD_REG_SW_RESET)); + msleep(10); + HWIO_OUTM (MDDI_CAMIF_CFG, + HWIO_FMSK (MDDI_CAMIF_CFG, CAM_PAD_REG_SW_RESET), + 0 << HWIO_SHFT (MDDI_CAMIF_CFG, + CAM_PAD_REG_SW_RESET)); + msleep(10); + } + break; + + case MT9T013_I2C_IOCTL_CAMIF_APPS_RESET: + printk(KERN_INFO "mt9t013: CAMIF_APPS_RESET\n"); + if (CHECK()) { + rc = clk_select(1); + if(rc < 0) { + printk(KERN_ERR "mt9t013 error switching to internal clock\n"); + break; + } + HWIO_OUTM (APPS_RESET, + HWIO_FMSK(APPS_RESET,VFE), + 1 << HWIO_SHFT(APPS_RESET,VFE)); + udelay(10); + HWIO_OUTM (APPS_RESET, + HWIO_FMSK(APPS_RESET,VFE), + 0 << HWIO_SHFT(APPS_RESET,VFE)); + udelay(10); + rc = clk_select(0); /* external */ + if(rc < 0) { + printk(KERN_ERR "mt9t013 error switching to external clock\n"); + break; + } + } + break; + + case CAMERA_LENS_POWER_ON: + rc = mt9t013_lens_power(1); + break; + + case CAMERA_LENS_POWER_OFF: + rc = mt9t013_lens_power(0); + break; + + case MT9T013_I2C_IOCTL_CLK_ENABLE: + printk(KERN_INFO "mt9t013: clk enable %ld\n", arg); + rc = msm_camio_clk_enable(arg); + break; + + case MT9T013_I2C_IOCTL_CLK_DISABLE: + printk(KERN_INFO "mt9t013: clk disable %ld\n", arg); + rc = msm_camio_clk_disable(arg); + break; + + case MT9T013_I2C_IOCTL_CLK_SELECT: + printk(KERN_INFO "mt9t013: clk select %ld\n", arg); + rc = clk_select(!!arg); + break; + + case MT9T013_I2C_IOCTL_CLK_FREQ_PROG: + printk(KERN_INFO "mt9t013: clk rate select %ld\n", arg); + rc = msm_camio_clk_rate_set(arg); + break; + + case MT9T013_I2C_IOCTL_GET_REGISTERS: + printk(KERN_INFO "mt9t013: get registers\n"); + if (copy_to_user(argp, &mt9t013_reg_pattern.reg, sizeof(mt9t013_reg_pattern.reg))) + rc = -EFAULT; + break; + + case MT9T013_I2C_IOCTL_SENSOR_SETTING: + printk(KERN_INFO "mt9t013: sensor setting 0x%lx\n", arg); + rc = mt9t013_i2c_sensor_setting(arg); + break; + + case MT9T013_I2C_IOCTL_EXPOSURE_GAIN: { + struct mt9t013_exposure_gain exp; + if (copy_from_user(&exp, argp, sizeof(exp))) { + printk(KERN_ERR "mt9t013: (exposure gain) invalid user pointer\n"); + rc = -EFAULT; + break; + } + rc = mt9t013_i2c_exposure_gain(exp.mode, exp.line, exp.gain); + } + break; + + case MT9T013_I2C_IOCTL_MOVE_FOCUS: + printk(KERN_INFO "mt9t013: move focus %ld\n", arg); + rc = mt9t013_i2c_move_focus((uint16_t)arg); + break; + + case MT9T013_I2C_IOCTL_SET_DEFAULT_FOCUS: + printk(KERN_INFO "mt9t013: set default focus %ld\n", arg); + rc = mt9t013_i2c_set_default_focus((uint8_t)arg); + break; + + case MT9T013_I2C_IOCTL_POWER_DOWN: + rc = mt9t013_i2c_power_down(); + break; + + case MT9T013_I2C_IOCTL_INIT: { + struct mt9t013_init init; + printk(KERN_INFO "mt9t013: init\n"); + if (copy_from_user(&init, argp, sizeof(init))) { + printk(KERN_ERR "mt9t013: (init) invalid user pointer\n"); + rc = -EFAULT; + break; + } + rc = mt9t013_i2c_sensor_init(&init); + if (copy_to_user(argp, &init, sizeof(init))) + rc = -EFAULT; + } + break; + + case CAMERA_CONFIGURE_GPIOS: + case CAMERA_UNCONFIGURE_GPIOS: + break; + + default: + printk(KERN_INFO "mt9t013: unknown ioctl %d\n", cmd); + break; + } + + up(&sem); + + return rc; +} + +#undef CHECK + +static int mt9t013_lens_power(int on) +{ + int rc; + printk(KERN_INFO "mt9t013: lens power %d\n", on); + rc = gpio_request(cam->vcm_pwd, "mt9t013"); + if (!rc) + gpio_direction_output(cam->vcm_pwd, !on); + else printk(KERN_ERR "mt9t013 error: request gpio %d failed:" + " %d\n", cam->vcm_pwd, rc); + gpio_free(cam->vcm_pwd); + return rc; +} + +#define I2C_WRITE(reg,data) if (!mt9t013_i2c_write(reg, data) < 0) return -EIO +#define MT9T013_MU3M0VC_RESET_DELAY_MSECS 66 + +static int mt9t013_i2c_sensor_init(struct mt9t013_init *init) +{ + int rc; + + /* RESET the sensor via I2C register */ + I2C_WRITE(MT9T013_REG_RESET_REGISTER, 0x10cc & 0xfffe); + msleep(MT9T013_MU3M0VC_RESET_DELAY_MSECS); + + if ((rc = mt9t013_i2c_read(MT9T013_MU3M0VC_REG_MODEL_ID, &init->chipid)) < 0) { + printk(KERN_ERR "mt9t013: could not read chip id: %d\n", rc); + return rc; + } + printk(KERN_INFO "mt9t013: chip id: %d\n", init->chipid); + + if (init->chipid != MT9T013_MU3M0VC_MODEL_ID) { + printk(KERN_INFO "mt9t013: chip id %d is invalid\n", + init->chipid); + return -EINVAL; + } + + I2C_WRITE(0x306E, 0x9080); + I2C_WRITE(0x301A, 0x10CC); + I2C_WRITE(0x3064, 0x0805); + msleep(MT9T013_MU3M0VC_RESET_DELAY_MSECS); + + if ((rc = mt9t013_i2c_sensor_setting(CAMSENSOR_REG_INIT | + ((init->preview ? 0 : 1) << 1))) < 0) { + printk(KERN_INFO "mt9t013: failed to configure the sensor\n"); + return rc; + } + + mt9t013_i2c_power_up(); + + return 0; +} + +static int mt9t013_mu3m0vc_set_lc(void) +{ + /* lens shading 85% TL84 */ + I2C_WRITE(0x360A, 0x0290); // P_RD_P0Q0 + I2C_WRITE(0x360C, 0xC92D); // P_RD_P0Q1 + I2C_WRITE(0x360E, 0x0771); // P_RD_P0Q2 + I2C_WRITE(0x3610, 0xE38C); // P_RD_P0Q3 + I2C_WRITE(0x3612, 0xD74F); // P_RD_P0Q4 + I2C_WRITE(0x364A, 0x168C); // P_RD_P1Q0 + I2C_WRITE(0x364C, 0xCACB); // P_RD_P1Q1 + I2C_WRITE(0x364E, 0x8C4C); // P_RD_P1Q2 + I2C_WRITE(0x3650, 0x0BEA); // P_RD_P1Q3 + I2C_WRITE(0x3652, 0xDC0F); // P_RD_P1Q4 + I2C_WRITE(0x368A, 0x70B0); // P_RD_P2Q0 + I2C_WRITE(0x368C, 0x200B); // P_RD_P2Q1 + I2C_WRITE(0x368E, 0x30B2); // P_RD_P2Q2 + I2C_WRITE(0x3690, 0xD04F); // P_RD_P2Q3 + I2C_WRITE(0x3692, 0xACF5); // P_RD_P2Q4 + I2C_WRITE(0x36CA, 0xF7C9); // P_RD_P3Q0 + I2C_WRITE(0x36CC, 0x2AED); // P_RD_P3Q1 + I2C_WRITE(0x36CE, 0xA652); // P_RD_P3Q2 + I2C_WRITE(0x36D0, 0x8192); // P_RD_P3Q3 + I2C_WRITE(0x36D2, 0x3A15); // P_RD_P3Q4 + I2C_WRITE(0x370A, 0xDA30); // P_RD_P4Q0 + I2C_WRITE(0x370C, 0x2E2F); // P_RD_P4Q1 + I2C_WRITE(0x370E, 0xBB56); // P_RD_P4Q2 + I2C_WRITE(0x3710, 0x8195); // P_RD_P4Q3 + I2C_WRITE(0x3712, 0x02F9); // P_RD_P4Q4 + I2C_WRITE(0x3600, 0x0230); // P_GR_P0Q0 + I2C_WRITE(0x3602, 0x58AD); // P_GR_P0Q1 + I2C_WRITE(0x3604, 0x18D1); // P_GR_P0Q2 + I2C_WRITE(0x3606, 0x260D); // P_GR_P0Q3 + I2C_WRITE(0x3608, 0xF530); // P_GR_P0Q4 + I2C_WRITE(0x3640, 0x17EB); // P_GR_P1Q0 + I2C_WRITE(0x3642, 0x3CAB); // P_GR_P1Q1 + I2C_WRITE(0x3644, 0x87CE); // P_GR_P1Q2 + I2C_WRITE(0x3646, 0xC02E); // P_GR_P1Q3 + I2C_WRITE(0x3648, 0xF48F); // P_GR_P1Q4 + I2C_WRITE(0x3680, 0x5350); // P_GR_P2Q0 + I2C_WRITE(0x3682, 0x7EAF); // P_GR_P2Q1 + I2C_WRITE(0x3684, 0x4312); // P_GR_P2Q2 + I2C_WRITE(0x3686, 0xC652); // P_GR_P2Q3 + I2C_WRITE(0x3688, 0xBC15); // P_GR_P2Q4 + I2C_WRITE(0x36C0, 0xB8AD); // P_GR_P3Q0 + I2C_WRITE(0x36C2, 0xBDCD); // P_GR_P3Q1 + I2C_WRITE(0x36C4, 0xE4B2); // P_GR_P3Q2 + I2C_WRITE(0x36C6, 0xB50F); // P_GR_P3Q3 + I2C_WRITE(0x36C8, 0x5B95); // P_GR_P3Q4 + I2C_WRITE(0x3700, 0xFC90); // P_GR_P4Q0 + I2C_WRITE(0x3702, 0x8C51); // P_GR_P4Q1 + I2C_WRITE(0x3704, 0xCED6); // P_GR_P4Q2 + I2C_WRITE(0x3706, 0xB594); // P_GR_P4Q3 + I2C_WRITE(0x3708, 0x0A39); // P_GR_P4Q4 + I2C_WRITE(0x3614, 0x0230); // P_BL_P0Q0 + I2C_WRITE(0x3616, 0x160D); // P_BL_P0Q1 + I2C_WRITE(0x3618, 0x08D1); // P_BL_P0Q2 + I2C_WRITE(0x361A, 0x98AB); // P_BL_P0Q3 + I2C_WRITE(0x361C, 0xEA50); // P_BL_P0Q4 + I2C_WRITE(0x3654, 0xB4EA); // P_BL_P1Q0 + I2C_WRITE(0x3656, 0xEA6C); // P_BL_P1Q1 + I2C_WRITE(0x3658, 0xFE08); // P_BL_P1Q2 + I2C_WRITE(0x365A, 0x2C6E); // P_BL_P1Q3 + I2C_WRITE(0x365C, 0xEB0E); // P_BL_P1Q4 + I2C_WRITE(0x3694, 0x6DF0); // P_BL_P2Q0 + I2C_WRITE(0x3696, 0x3ACF); // P_BL_P2Q1 + I2C_WRITE(0x3698, 0x3E0F); // P_BL_P2Q2 + I2C_WRITE(0x369A, 0xB2B1); // P_BL_P2Q3 + I2C_WRITE(0x369C, 0xC374); // P_BL_P2Q4 + I2C_WRITE(0x36D4, 0xF2AA); // P_BL_P3Q0 + I2C_WRITE(0x36D6, 0x8CCC); // P_BL_P3Q1 + I2C_WRITE(0x36D8, 0xDEF2); // P_BL_P3Q2 + I2C_WRITE(0x36DA, 0xFA11); // P_BL_P3Q3 + I2C_WRITE(0x36DC, 0x42F5); // P_BL_P3Q4 + I2C_WRITE(0x3714, 0xF4F1); // P_BL_P4Q0 + I2C_WRITE(0x3716, 0xF6F0); // P_BL_P4Q1 + I2C_WRITE(0x3718, 0x8FD6); // P_BL_P4Q2 + I2C_WRITE(0x371A, 0xEA14); // P_BL_P4Q3 + I2C_WRITE(0x371C, 0x6338); // P_BL_P4Q4 + I2C_WRITE(0x361E, 0x0350); // P_GB_P0Q0 + I2C_WRITE(0x3620, 0x91AE); // P_GB_P0Q1 + I2C_WRITE(0x3622, 0x0571); // P_GB_P0Q2 + I2C_WRITE(0x3624, 0x100D); // P_GB_P0Q3 + I2C_WRITE(0x3626, 0xCA70); // P_GB_P0Q4 + I2C_WRITE(0x365E, 0xE6CB); // P_GB_P1Q0 + I2C_WRITE(0x3660, 0x50ED); // P_GB_P1Q1 + I2C_WRITE(0x3662, 0x3DAE); // P_GB_P1Q2 + I2C_WRITE(0x3664, 0xAA4F); // P_GB_P1Q3 + I2C_WRITE(0x3666, 0xDC50); // P_GB_P1Q4 + I2C_WRITE(0x369E, 0x5470); // P_GB_P2Q0 + I2C_WRITE(0x36A0, 0x1F6E); // P_GB_P2Q1 + I2C_WRITE(0x36A2, 0x6671); // P_GB_P2Q2 + I2C_WRITE(0x36A4, 0xC010); // P_GB_P2Q3 + I2C_WRITE(0x36A6, 0x8DF5); // P_GB_P2Q4 + I2C_WRITE(0x36DE, 0x0B0C); // P_GB_P3Q0 + I2C_WRITE(0x36E0, 0x84CE); // P_GB_P3Q1 + I2C_WRITE(0x36E2, 0x8493); // P_GB_P3Q2 + I2C_WRITE(0x36E4, 0xA610); // P_GB_P3Q3 + I2C_WRITE(0x36E6, 0x50B5); // P_GB_P3Q4 + I2C_WRITE(0x371E, 0x9651); // P_GB_P4Q0 + I2C_WRITE(0x3720, 0x1EAB); // P_GB_P4Q1 + I2C_WRITE(0x3722, 0xAF76); // P_GB_P4Q2 + I2C_WRITE(0x3724, 0xE4F4); // P_GB_P4Q3 + I2C_WRITE(0x3726, 0x79F8); // P_GB_P4Q4 + I2C_WRITE(0x3782, 0x0410); // Original LC 2 // POLY_ORIGIN_C + I2C_WRITE(0x3784, 0x0320); // POLY_ORIGIN_R + I2C_WRITE(0x3780, 0x8000); // POLY_SC_ENABLE + + return 0; +} + +static int mt9t013_set_pclk(int rt, int div_adj) +{ + int rc; + if ((rc = mt9t013_i2c_power_down()) < 0) return rc; + I2C_WRITE(REG_VT_PIX_CLK_DIV, mt9t013_reg_pattern.reg[rt].vt_pix_clk_div); + I2C_WRITE(REG_VT_SYS_CLK_DIV, mt9t013_reg_pattern.reg[rt].vt_sys_clk_div); + I2C_WRITE(REG_PRE_PLL_CLK_DIV, mt9t013_reg_pattern.reg[rt].pre_pll_clk_div * div_adj); + I2C_WRITE(REG_PLL_MULTIPLIER, mt9t013_reg_pattern.reg[rt].pll_multiplier); + I2C_WRITE(REG_OP_PIX_CLK_DIV, mt9t013_reg_pattern.reg[rt].op_pix_clk_div); + I2C_WRITE(REG_OP_SYS_CLK_DIV, mt9t013_reg_pattern.reg[rt].op_sys_clk_div); + if ((rc = mt9t013_i2c_power_up()) < 0) return rc; + pclk_set = 1; + return 0; +} + +static int mt9t013_i2c_sensor_setting(unsigned long arg) +{ + uint32_t update = arg & 1; + uint32_t rt = (arg & 2) >> 1; + + if (rt > 1 || update > 1) { + printk(KERN_ERR "mt9t013: invalid values %d of rt or %d of update\n", + rt, update); + return -EINVAL; + } + + switch (update) { + case CAMSENSOR_REG_UPDATE_PERIODIC: { + uint16_t pclk_div_adj = arg >> 16; + + printk(KERN_INFO "CAMSENSOR_REG_UPDATE_PERIODIC (rt %d)\n", rt); + + if (!pclk_div_adj || pclk_div_adj > 2) { + printk(KERN_ERR "mt9t013: invalid value %d of pclk_div_adj\n", + pclk_div_adj); + return -EINVAL; + } + + if (mt9t013_set_pclk(rt, pclk_div_adj) < 0) + return -EIO; + + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); + I2C_WRITE(REG_ROW_SPEED, mt9t013_reg_pattern.reg[rt].row_speed); + I2C_WRITE(REG_X_ADDR_START, mt9t013_reg_pattern.reg[rt].x_addr_start); + I2C_WRITE(REG_X_ADDR_END, mt9t013_reg_pattern.reg[rt].x_addr_end); + I2C_WRITE(REG_Y_ADDR_START, mt9t013_reg_pattern.reg[rt].y_addr_start); + I2C_WRITE(REG_Y_ADDR_END, mt9t013_reg_pattern.reg[rt].y_addr_end); + + if (machine_is_sapphire()) { + if (rt == 0) { + I2C_WRITE(REG_READ_MODE, 0x046F); + } else { + I2C_WRITE(REG_READ_MODE, 0x0027); + } + } else { + I2C_WRITE(REG_READ_MODE, + mt9t013_reg_pattern.reg[rt].read_mode); + } + + I2C_WRITE(REG_SCALE_M, mt9t013_reg_pattern.reg[rt].scale_m); + I2C_WRITE(REG_X_OUTPUT_SIZE, mt9t013_reg_pattern.reg[rt].x_output_size); + I2C_WRITE(REG_Y_OUTPUT_SIZE, mt9t013_reg_pattern.reg[rt].y_output_size); + I2C_WRITE(REG_LINE_LENGTH_PCK, mt9t013_reg_pattern.reg[rt].line_length_pck); + I2C_WRITE(REG_FRAME_LENGTH_LINES, (uint16_t) (mt9t013_reg_pattern.reg[rt].frame_length_lines * fps_divider)); + I2C_WRITE(REG_COARSE_INTEGRATION_TIME, mt9t013_reg_pattern.reg[rt].coarse_integration_time); + I2C_WRITE(REG_FINE_INTEGRATION_TIME, mt9t013_reg_pattern.reg[rt].fine_integration_time); + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); + } + break; + + case CAMSENSOR_REG_INIT: + printk(KERN_INFO "CAMSENSOR_REG_INIT (rt %d)\n", rt); + + if (mt9t013_set_pclk(rt, 1) < 0) return -EIO; + + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); + + /* additional power saving mode ok around 38.2MHz */ + I2C_WRITE(0x3084, 0x2409); + I2C_WRITE(0x3092, 0x0A49); + I2C_WRITE(0x3094, 0x4949); + I2C_WRITE(0x3096, 0x4949); + + /* set preview or snapshot mode */ + I2C_WRITE(REG_ROW_SPEED, + mt9t013_reg_pattern.reg[rt].row_speed); + I2C_WRITE(REG_X_ADDR_START, + mt9t013_reg_pattern.reg[rt].x_addr_start); + I2C_WRITE(REG_X_ADDR_END, + mt9t013_reg_pattern.reg[rt].x_addr_end); + I2C_WRITE(REG_Y_ADDR_START, + mt9t013_reg_pattern.reg[rt].y_addr_start); + I2C_WRITE(REG_Y_ADDR_END, + mt9t013_reg_pattern.reg[rt].y_addr_end); + + if (machine_is_sapphire()) { + if (rt == 0) { + I2C_WRITE(REG_READ_MODE, 0x046F); + } else { + I2C_WRITE(REG_READ_MODE, 0x0027); + } + } else { + I2C_WRITE(REG_READ_MODE, + mt9t013_reg_pattern.reg[rt].read_mode); + } + + I2C_WRITE(REG_SCALE_M, mt9t013_reg_pattern.reg[rt].scale_m); + I2C_WRITE(REG_X_OUTPUT_SIZE, mt9t013_reg_pattern.reg[rt].x_output_size); + I2C_WRITE(REG_Y_OUTPUT_SIZE, mt9t013_reg_pattern.reg[rt].y_output_size); + I2C_WRITE(REG_LINE_LENGTH_PCK, mt9t013_reg_pattern.reg[rt].line_length_pck); + I2C_WRITE(REG_FRAME_LENGTH_LINES, mt9t013_reg_pattern.reg[rt].frame_length_lines); + I2C_WRITE(REG_COARSE_INTEGRATION_TIME, mt9t013_reg_pattern.reg[rt].coarse_integration_time); + I2C_WRITE(REG_FINE_INTEGRATION_TIME, mt9t013_reg_pattern.reg[rt].fine_integration_time); + + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); + + /* load lens shading */ + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD); + if(mt9t013_mu3m0vc_set_lc() < 0) return -EIO; + I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int mt9t013_i2c_exposure_gain(uint32_t mode, uint16_t line, + uint16_t gain) +{ + static const uint16_t max_legal_gain = 0x01FF; + + if (gain > max_legal_gain) gain = max_legal_gain; + + gain |= 0x200; /* set digital gain */ + + /*I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_HOLD);*/ + I2C_WRITE(REG_GLOBAL_GAIN, gain); + I2C_WRITE(REG_COARSE_INTEGRATION_TIME, line); + /*I2C_WRITE(REG_GROUPED_PARAMETER_HOLD, GROUPED_PARAMETER_UPDATE);*/ + if (mode == 1) { + /* RESET REGISTER RESTART */ + I2C_WRITE(MT9T013_REG_RESET_REGISTER, 0x10cc|0x0002); + } + return 0; +} + +#define I2C_AF_WRITE(command, data) if (mt9t013_i2c_lens_write(AF_I2C_ID >> 1, command, data) < 0) return -EIO; + +static int mt9t013_i2c_move_focus(uint16_t position) +{ + uint8_t code_val_msb = (position >> 2) | ((position << 4) >> 6); + uint8_t code_val_lsb = (position & 0x03) << 6; + + I2C_AF_WRITE(code_val_msb, code_val_lsb); + return 0; +} + +static int mt9t013_i2c_set_default_focus(uint8_t step) +{ + I2C_AF_WRITE(0x01, step); + return 0; +} + +static int powered; + +static int mt9t013_i2c_power_up(void) +{ + printk(KERN_INFO "mt9t013: power up\n"); + if (powered) { + printk(KERN_INFO "mt9t013: already powered up\n"); + return 0; + } + I2C_WRITE(MT9T013_REG_RESET_REGISTER, MT9T013_RESET_REGISTER_PWON); + mdelay(5); + powered = 1; + return 0; +} + +static int mt9t013_i2c_power_down(void) +{ + int i = 0, try_more = 100; + + printk(KERN_INFO "mt9t013: power down\n"); + if (!powered) { + printk(KERN_INFO "mt9t013: already powered down\n"); + return 0; + } + + /* I2C_WRITE(MT9T013_REG_RESET_REGISTER, MT9T013_RESET_REGISTER_PWOFF); */ + /* Modified by Horng for more tries while I2C write fail */ + /* -------------------------------------------------------------------- */ + while(mt9t013_i2c_write(MT9T013_REG_RESET_REGISTER, MT9T013_RESET_REGISTER_PWOFF) < 0) + { + if (i >= try_more) + return -EIO; + else { + i++; + printk(KERN_INFO "mt9p012: in mt9p012_i2c_power_down() call mt9p012_i2c_write() failed !!! (try %d times)\n", i); + mdelay(i+5); + } + } + /* -------------------------------------------------------------------- */ + mdelay(5); + powered = pclk_set = 0; + return 0; +} + +#undef I2C_WRITE +#undef I2C_AF_WRITE + +static int mt9t013_init_client(struct i2c_client *client) +{ + /* Initialize the MT9T013 Chip */ + init_waitqueue_head(&g_data_ready_wait_queue); + return 0; +} + +static struct file_operations mt9t013_fops = { + .owner = THIS_MODULE, + .open = mt9t013_open, + .release = mt9t013_release, + .unlocked_ioctl = mt9t013_ioctl, +}; + +static struct miscdevice mt9t013_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "mt9t013", + .fops = &mt9t013_fops, +}; + +static const char *MT9T013Vendor = "micron"; +static const char *MT9T013NAME = "mt9t013"; +static const char *MT9T013Size = "3M"; + + + +static ssize_t sensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", MT9T013Vendor, MT9T013NAME, MT9T013Size); + ret = strlen(buf) + 1; + + return ret; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); + + +static struct kobject *android_mt9t013 = NULL; + +static int mt9t013_sysfs_init(void) +{ + int ret ; + printk(KERN_INFO "mt9t013:kobject creat and add\n"); + android_mt9t013 = kobject_create_and_add("android_camera", NULL); + if (android_mt9t013 == NULL) { + printk(KERN_INFO "mt9t013_sysfs_init: subsystem_register " \ + "failed\n"); + ret = -ENOMEM; + return ret ; + } + printk(KERN_INFO "mt9t013:sysfs_create_file\n"); + ret = sysfs_create_file(android_mt9t013, &dev_attr_sensor.attr); + if (ret) { + printk(KERN_INFO "mt9t013_sysfs_init: sysfs_create_file " \ + "failed\n"); + kobject_del(android_mt9t013); + } + return 0 ; +} + + + +static int mt9t013_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + struct mt9t013_data *mt; + int err = 0; + printk(KERN_INFO "mt9t013: probe\n"); + + if(!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + goto exit_check_functionality_failed; + + if(!(mt = kzalloc( sizeof(struct mt9t013_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + i2c_set_clientdata(client, mt); + mt9t013_init_client(client); + pclient = client; + mt9t013_sensor_init(); + mt9t013_sensor_suspend(); + + /* Register a misc device */ + err = misc_register(&mt9t013_device); + if(err) { + printk(KERN_ERR "mt9t013_probe: misc_register failed \n"); + goto exit_misc_device_register_failed; + } + init_suspend(); + mt9t013_sysfs_init(); + return 0; + +exit_misc_device_register_failed: +exit_alloc_data_failed: +exit_check_functionality_failed: + + return err; +} + + +static int mt9t013_remove(struct i2c_client *client) +{ + struct mt9t013_data *mt = i2c_get_clientdata(client); + free_irq(client->irq, mt); + deinit_suspend(); + pclient = NULL; + misc_deregister(&mt9t013_device); + kfree(mt); + return 0; +} + +static const struct i2c_device_id mt9t013_id[] = { + { "mt9t013", 0 }, + { } +}; + +static struct i2c_driver mt9t013_driver = { + .probe = mt9t013_probe, + .remove = mt9t013_remove, + .id_table = mt9t013_id, + .driver = { + .name = "mt9t013", + }, +}; + +static int mt9t013_plat_probe(struct platform_device *pdev __attribute__((unused))) +{ + int rc = -EFAULT; + + if(pdev->dev.platform_data) + { + printk(KERN_INFO "pdev->dev.platform_data is not NULL\n"); + cam = pdev->dev.platform_data; + rc = i2c_add_driver(&mt9t013_driver); + } + return rc; +} + +static struct platform_driver mt9t013_plat_driver = { + .probe = mt9t013_plat_probe, + .driver = { + .name = "camera", + .owner = THIS_MODULE, + }, +}; + +static int __init mt9t013_init(void) +{ + return platform_driver_register(&mt9t013_plat_driver); +} + +module_init(mt9t013_init); + +MODULE_AUTHOR("Kidd Chen"); +MODULE_DESCRIPTION("MT9T013 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 0c73fe39a236b..413e4ef60356e 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1177,7 +1177,7 @@ static struct cdrom_device_ops ide_cdrom_dops = { .open = ide_cdrom_open_real, .release = ide_cdrom_release_real, .drive_status = ide_cdrom_drive_status, - .media_changed = ide_cdrom_check_media_change_real, + .check_events = ide_cdrom_check_events_real, .tray_move = ide_cdrom_tray_move, .lock_door = ide_cdrom_lock_door, .select_speed = ide_cdrom_select_speed, @@ -1702,10 +1702,11 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode, } -static int idecd_media_changed(struct gendisk *disk) +static unsigned int idecd_check_events(struct gendisk *disk, + unsigned int clearing) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); - return cdrom_media_changed(&info->devinfo); + return cdrom_check_events(&info->devinfo, clearing); } static int idecd_revalidate_disk(struct gendisk *disk) @@ -1723,7 +1724,7 @@ static const struct block_device_operations idecd_ops = { .open = idecd_open, .release = idecd_release, .ioctl = idecd_ioctl, - .media_changed = idecd_media_changed, + .check_events = idecd_check_events, .revalidate_disk = idecd_revalidate_disk }; @@ -1789,7 +1790,8 @@ static int ide_cd_probe(ide_drive_t *drive) ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; - g->flags |= GENHD_FL_REMOVABLE; + g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 93a3cf1b0f3f8..1efc936f5b667 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h @@ -111,7 +111,8 @@ int cdrom_check_status(ide_drive_t *, struct request_sense *); int ide_cdrom_open_real(struct cdrom_device_info *, int); void ide_cdrom_release_real(struct cdrom_device_info *); int ide_cdrom_drive_status(struct cdrom_device_info *, int); -int ide_cdrom_check_media_change_real(struct cdrom_device_info *, int); +unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *, + unsigned int clearing, int slot_nr); int ide_cdrom_tray_move(struct cdrom_device_info *, int); int ide_cdrom_lock_door(struct cdrom_device_info *, int); int ide_cdrom_select_speed(struct cdrom_device_info *, int); diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 766b3deeb23c7..2a6bc50e8a41e 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c @@ -79,8 +79,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) return CDS_DRIVE_NOT_READY; } -int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi, - int slot_nr) +unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, + unsigned int clearing, int slot_nr) { ide_drive_t *drive = cdi->handle; int retval; @@ -89,9 +89,9 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi, (void) cdrom_check_status(drive, NULL); retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0; drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; - return retval; + return retval ? DISK_EVENT_MEDIA_CHANGE : 0; } else { - return -EINVAL; + return 0; } } diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index 35c4b43585e34..c4ffd4888939a 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -285,11 +285,12 @@ static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static int ide_gd_media_changed(struct gendisk *disk) +static unsigned int ide_gd_check_events(struct gendisk *disk, + unsigned int clearing) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; - int ret; + bool ret; /* do not scan partitions twice if this is a removable device */ if (drive->dev_flags & IDE_DFLAG_ATTACH) { @@ -297,10 +298,10 @@ static int ide_gd_media_changed(struct gendisk *disk) return 0; } - ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED); + ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; - return ret; + return ret ? DISK_EVENT_MEDIA_CHANGE : 0; } static void ide_gd_unlock_native_capacity(struct gendisk *disk) @@ -318,7 +319,7 @@ static int ide_gd_revalidate_disk(struct gendisk *disk) struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ide_drive_t *drive = idkp->drive; - if (ide_gd_media_changed(disk)) + if (ide_gd_check_events(disk, 0)) drive->disk_ops->get_capacity(drive); set_capacity(disk, ide_gd_capacity(drive)); @@ -340,7 +341,7 @@ static const struct block_device_operations ide_gd_ops = { .release = ide_gd_release, .ioctl = ide_gd_ioctl, .getgeo = ide_gd_getgeo, - .media_changed = ide_gd_media_changed, + .check_events = ide_gd_check_events, .unlock_native_capacity = ide_gd_unlock_native_capacity, .revalidate_disk = ide_gd_revalidate_disk }; @@ -412,6 +413,7 @@ static int ide_gd_probe(ide_drive_t *drive) if (drive->dev_flags & IDE_DFLAG_REMOVABLE) g->flags = GENHD_FL_REMOVABLE; g->fops = &ide_gd_ops; + g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 64e0903091a86..1d9616be41922 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2989,6 +2989,7 @@ static int cm_sidr_req_handler(struct cm_work *work) goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); + atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6884da24fde1e..e450c5a877276 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1210,6 +1210,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (!ret) { /* @@ -1222,8 +1227,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); goto out; } + cma_deref_id(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; @@ -1425,17 +1432,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; event.param.conn.responder_resources = attr.max_qp_rd_atom; + + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; cma_exch(conn_id, CMA_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); goto out; } mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); out: if (dev) diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index 47e8bd49d388d..17e0dea65b3b8 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -194,6 +194,8 @@ source "drivers/input/touchscreen/Kconfig" source "drivers/input/misc/Kconfig" +source "drivers/input/opticaljoystick/Kconfig" + endif menu "Hardware I/O ports" diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 867badd4b47f7..8b4d6fca68acd 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -27,3 +27,5 @@ obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o + +obj-$(CONFIG_INPUT_OPTICALJOYSTICK) += opticaljoystick/ diff --git a/drivers/input/input.c b/drivers/input/input.c index 11905b6a30237..06e90a3618a10 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -352,8 +352,8 @@ void input_event(struct input_dev *dev, if (is_event_supported(type, dev->evbit, EV_MAX)) { spin_lock_irqsave(&dev->event_lock, flags); - add_input_randomness(type, code, value); input_handle_event(dev, type, code, value); + add_input_randomness(type, code, value); spin_unlock_irqrestore(&dev->event_lock, flags); } } diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index c7a92028f4509..c3a7a6711ffb4 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -362,6 +362,13 @@ config KEYBOARD_OPENCORES To compile this driver as a module, choose M here; the module will be called opencores-kbd. +config KEYBOARD_PM8058 + bool "Qualcomm PM8058 Matrix Keypad support" + depends on PM8058 + help + Say Y here to enable the driver for the keypad matrix interface + on the Qualcomm PM8058 power management I/C device. + config KEYBOARD_PXA27x tristate "PXA27x/PXA3xx keypad support" depends on PXA27x || PXA3xx || ARCH_MMP diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 468c627a28447..a085153e2522e 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o +obj-$(CONFIG_KEYBOARD_PM8058) += pm8058-keypad.o obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o diff --git a/drivers/input/keyboard/pm8058-keypad.c b/drivers/input/keyboard/pm8058-keypad.c new file mode 100644 index 0000000000000..f1aefd42463ee --- /dev/null +++ b/drivers/input/keyboard/pm8058-keypad.c @@ -0,0 +1,417 @@ +/* drivers/input/keyboard/pm8058-keypad.c + * + * Copyright (C) 2010 Google, Inc. + * Copyright (C) 2009 Code Aurora Forum + * + * Author: Dima Zavin + * - Heavily based on the driver from the Code Aurora Forum. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + DEBUG_IRQ = 1U << 0, + DEBUG_KEYS = 1U << 1, +}; +static int debug_mask = 0; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define REG_KEYP_CTRL 0x148 +#define REG_KEYP_SCAN 0x149 +#define REG_KEYP_TEST 0x14a +#define REG_KEYP_NEW_DATA 0x14b +#define REG_KEYP_OLD_DATA 0x14c + +#define KP_SNS_MIN 5 +#define KP_SNS_MAX 8 +#define KP_DRV_MIN 5 +#define KP_DRV_MAX 18 + +#define KP_CLOCK_FREQ 32768 + +#define KPF_HAS_SYNC_READ 0x00000001 + +struct pm8058_keypad { + struct device *dev; + struct input_dev *input_dev; + int sense_irq; + int stuck_irq; + + int num_sns; + int num_drv; + const unsigned short *keymap; + + u8 key_state[KP_DRV_MAX]; + u8 stuck_state[KP_DRV_MAX]; + + u32 flags; +}; + +/* convenience wrapers */ +static inline int kp_writeb(struct pm8058_keypad *kp, u16 addr, u8 val) +{ + return pm8058_writeb(kp->dev->parent, addr, val); +} + +static inline int kp_readb(struct pm8058_keypad *kp, u16 addr, u8 *val) +{ + return pm8058_readb(kp->dev->parent, addr, val); +} + +static inline int kp_read_buf(struct pm8058_keypad *kp, u16 addr, + u8 *buf, int cnt) +{ + return pm8058_read_buf(kp->dev->parent, addr, buf, cnt); +} + +static int kp_hw_init(struct pm8058_keypad *kp, + struct pm8058_keypad_platform_data *pdata) +{ + int ret; + u8 val; + u8 drv_bits[] = { + 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, + }; + u8 sns_bits[] = { + 0, 0, 0, 0, 0, 0, 1, 2, 3, + }; + + val = ((drv_bits[pdata->num_drv] << 2) | + (sns_bits[pdata->num_sns] << 5)); + ret = kp_writeb(kp, REG_KEYP_CTRL, val); + if (ret) { + pr_err("%s: can't write kp ctrl\n", __func__); + goto out; + } + + val = ((((pdata->drv_hold_clks - 1) & 0x3) << 6) | + ((pdata->scan_delay_shift & 0x7) << 3) | + ((((pdata->debounce_ms / 5) & 0x3)) << 1)); + ret = kp_writeb(kp, REG_KEYP_SCAN, val); + if (ret) { + pr_err("%s: can't write kp scan\n", __func__); + goto out; + } + +out: + return ret; +} + +static int kp_get_scan_data(struct pm8058_keypad *kp, u8 *old, u8 *new) +{ + int ret; + u8 val; + + /* XXX: B0 only? */ + if (kp->flags & KPF_HAS_SYNC_READ) { + ret = kp_readb(kp, REG_KEYP_SCAN, &val); + if (ret) + goto err; + ret = kp_writeb(kp, REG_KEYP_SCAN, val | 1); + if (ret) + goto err; + /* 2 * 32KHz clocks */ + udelay((2 * USEC_PER_SEC / KP_CLOCK_FREQ) + 1); + } + + if (old) { + ret = kp_read_buf(kp, REG_KEYP_OLD_DATA, old, kp->num_drv); + if (ret) + goto done; + } + + ret = kp_read_buf(kp, REG_KEYP_NEW_DATA, new, kp->num_drv); + if (ret) + goto done; + +done: + if (kp->flags & KPF_HAS_SYNC_READ) { + /* 4 * 32KHz clocks */ + udelay((4 * USEC_PER_SEC / KP_CLOCK_FREQ) + 1); + ret = kp_readb(kp, REG_KEYP_SCAN, &val); + if (ret) + goto err; + ret = kp_writeb(kp, REG_KEYP_SCAN, val & (~0x1)); + if (ret) + goto err; + } + +err: + if (ret) + pr_err("%s: can't get scan data\n", __func__); + return ret; +} + +static int kp_process_scan_data(struct pm8058_keypad *kp, u8 *old, u8 *new) +{ + int drv; + int sns; + + for (drv = 0; drv < kp->num_drv; ++drv) { + unsigned long bits_changed = (new[drv] ^ old[drv]) & 0xff; + + for_each_set_bit(sns, &bits_changed, kp->num_sns) { + int key_idx = drv * kp->num_sns + sns; + unsigned int code = kp->keymap[key_idx] ?: KEY_UNKNOWN; + int down = !(new[drv] & (1 << sns)); + + if (debug_mask & DEBUG_KEYS) + pr_info("%s: key [%d:%d] %s\n", __func__, + drv, sns, down ? "down" : "up"); + input_event(kp->input_dev, EV_MSC, MSC_SCAN, key_idx); + input_report_key(kp->input_dev, code, down); + input_sync(kp->input_dev); + } + } + + return 0; +} + +/* + * NOTE: We are reading recent and old data registers blindly + * whenever key-stuck interrupt happens, because events counter doesn't + * get updated when this interrupt happens due to key stuck doesn't get + * considered as key state change. + * + * We are not using old data register contents after they are being read + * because it might report the key which was pressed before the key being stuck + * as stuck key because it's pressed status is stored in the old data + * register. + */ +static irqreturn_t kp_stuck_irq_handler(int irq, void *dev_id) +{ + struct pm8058_keypad *kp = dev_id; + u8 old[KP_DRV_MAX]; + u8 new[KP_DRV_MAX]; + int ret; + + if (debug_mask & DEBUG_IRQ) + pr_info("%s: key stuck!\n", __func__); + + ret = kp_get_scan_data(kp, old, new); + if (ret) { + pr_err("%s: couldn't get scan data\n", __func__); + goto out; + } + kp_process_scan_data(kp, kp->stuck_state, new); + +out: + return IRQ_HANDLED; +} + +static irqreturn_t kp_sense_irq_handler(int irq, void *dev_id) +{ + struct pm8058_keypad *kp = dev_id; + int ret; + u8 old[KP_DRV_MAX]; + u8 new[KP_DRV_MAX]; + u8 val; + + if (debug_mask & DEBUG_IRQ) + pr_info("%s: key event!!\n", __func__); + ret = kp_readb(kp, REG_KEYP_CTRL, &val); + if (ret) { + pr_err("%s: can't read events\n", __func__); + goto out; + } + + /* events counter is gray coded */ + switch(val & 0x3) { + case 0x1: + ret = kp_get_scan_data(kp, NULL, new); + if (ret) + goto out; + kp_process_scan_data(kp, kp->key_state, new); + memcpy(kp->key_state, new, sizeof(new)); + break; + + case 0x2: + pr_debug("%s: some key events were missed\n", __func__); + case 0x3: + ret = kp_get_scan_data(kp, old, new); + if (ret) + goto out; + /* first process scan data in relation to last known + * key state */ + kp_process_scan_data(kp, kp->key_state, old); + kp_process_scan_data(kp, old, new); + memcpy(kp->key_state, new, sizeof(new)); + break; + + case 0x0: + pr_warning("%s: interrupt without any events?!\n", __func__); + break; + } + +out: + if (ret) + pr_err("%s: couldn't get scan data\n", __func__); + + return IRQ_HANDLED; +} + +static int pm8058_keypad_probe(struct platform_device *pdev) +{ + struct pm8058_keypad_platform_data *pdata = pdev->dev.platform_data; + struct pm8058_keypad *kp; + int sense_irq; + int stuck_irq; + int ret; + int i; + u8 val; + + sense_irq = platform_get_irq_byname(pdev, "kp_sense"); + stuck_irq = platform_get_irq_byname(pdev, "kp_stuck"); + + if (!pdata || sense_irq < 0 || stuck_irq < 0) { + pr_err("%s: missing platform data/resources\n", __func__); + return -EINVAL; + } + + if (pdata->num_sns > KP_SNS_MAX || pdata->num_drv > KP_DRV_MAX || + (pdata->drv_hold_clks == 0) || !pdata->keymap) { + pr_err("%s: invalid plaform data\n", __func__); + return -EINVAL; + } + + kp = kzalloc(sizeof(*kp), GFP_KERNEL); + if (!kp) { + pr_err("%s: can't allocate memory for kp struct\n", __func__); + return -ENOMEM; + } + + platform_set_drvdata(pdev, kp); + kp->dev = &pdev->dev; + kp->num_sns = pdata->num_sns; + kp->num_drv = pdata->num_drv; + kp->keymap = pdata->keymap; + kp->sense_irq = sense_irq; + kp->stuck_irq = stuck_irq; + + memset(kp->key_state, 0xff, sizeof(kp->key_state)); + memset(kp->stuck_state, 0xff, sizeof(kp->stuck_state)); + + /* b0 and up have sync_read support */ + kp->flags = KPF_HAS_SYNC_READ; + + kp->input_dev = input_allocate_device(); + if (!kp->input_dev) { + ret = -ENOMEM; + pr_err("%s: Failed to allocate input device\n", __func__); + goto err_input_dev_alloc; + } + + kp->input_dev->name = pdata->name; + input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN); + input_set_drvdata(kp->input_dev, kp); + + for (i = 0; i < kp->num_drv * kp->num_sns; ++i) { + unsigned short keycode = kp->keymap[i]; + BUG_ON(keycode && keycode > KEY_MAX); + if (keycode) + input_set_capability(kp->input_dev, EV_KEY, keycode); + } + + ret = input_register_device(kp->input_dev); + if (ret) { + pr_err("%s: can't register input device '%s'\n", __func__, + pdata->name); + goto err_input_dev_reg; + } + + ret = kp_hw_init(kp, pdata); + if (ret) { + pr_err("%s: can't initialize keypad hardware\n", __func__); + goto err_kp_hw_init; + } + + if (pdata->init) { + ret = pdata->init(kp->dev); + if (ret) { + pr_err("%s: can't call board's init\n", __func__); + goto err_pdata_init; + } + } + + ret = request_threaded_irq(kp->sense_irq, NULL, kp_sense_irq_handler, + IRQF_TRIGGER_RISING, "pm8058-keypad-sense", kp); + if (ret) { + pr_err("%s: can't request sense_irq\n", __func__); + goto err_req_sense_irq; + } + ret = request_threaded_irq(kp->stuck_irq, NULL, kp_stuck_irq_handler, + IRQF_TRIGGER_RISING, "pm8058-keypad-stuck", kp); + if (ret) { + pr_err("%s: can't request stuck\n", __func__); + goto err_req_stuck_irq; + } + + enable_irq_wake(kp->sense_irq); + + ret = kp_readb(kp, REG_KEYP_CTRL, &val); + if (ret) { + pr_err("%s: can't read kp ctrl\n", __func__); + goto err_read_kp_ctrl; + } + val |= 1 << 7; + ret = kp_writeb(kp, REG_KEYP_CTRL, val); + if (ret) { + pr_err("%s: can't enable kp\n", __func__); + goto err_kp_enable; + } + + pr_info("%s: %dx%d matrix keypad '%s' registered\n", __func__, + kp->num_drv, kp->num_sns, pdata->name); + + return 0; + +err_kp_enable: +err_read_kp_ctrl: + disable_irq_wake(kp->sense_irq); + free_irq(kp->stuck_irq, kp); +err_req_stuck_irq: + free_irq(kp->sense_irq, kp); +err_req_sense_irq: +err_pdata_init: +err_kp_hw_init: + input_unregister_device(kp->input_dev); +err_input_dev_reg: + input_free_device(kp->input_dev); +err_input_dev_alloc: + platform_set_drvdata(pdev, NULL); + kfree(kp); + return ret; +} + +static struct platform_driver pm8058_keypad_driver = { + .probe = pm8058_keypad_probe, + .driver = { + .name = "pm8058-keypad", + .owner = THIS_MODULE, + }, +}; + +static int __init pm8058_keypad_init(void) +{ + return platform_driver_register(&pm8058_keypad_driver); +} +device_initcall(pm8058_keypad_init); diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c index 36208fe0baae6..54b6f982cfbe6 100644 --- a/drivers/input/keyreset.c +++ b/drivers/input/keyreset.c @@ -19,10 +19,10 @@ #include #include #include -#include #include +#include - +#define KEYRESET_DELAY 3*HZ struct keyreset_state { struct input_handler input_handler; unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; @@ -36,15 +36,16 @@ struct keyreset_state { int (*reset_fn)(void); }; -int restart_requested; +static int restart_requested; static void deferred_restart(struct work_struct *dummy) { + pr_info("keyreset::%s in\n", __func__); restart_requested = 2; sys_sync(); restart_requested = 3; kernel_restart(NULL); } -static DECLARE_WORK(restart_work, deferred_restart); +static DECLARE_DELAYED_WORK(restart_work, deferred_restart); static void keyreset_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) @@ -88,6 +89,7 @@ static void keyreset_event(struct input_handle *handle, unsigned int type, state->restart_disabled = 1; if (restart_requested) panic("keyboard reset failed, %d", restart_requested); + if (state->reset_fn) { restart_requested = state->reset_fn(); } else { diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index d50c4c89df3ae..fb5ee47be7095 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -470,4 +470,21 @@ config INPUT_CMA3000_I2C To compile this driver as a module, choose M here: the module will be called cma3000_d0x_i2c. +config INPUT_CAPELLA_CM3602 + tristate "Capella CM3602 proximity and light sensor" + help + Say Y here to enable the Capella CM3602 Short Distance Proximity + Sensor with Ambient Light Sensor. + +config INPUT_CAPELLA_CM3602_HTC + tristate "Capella CM3602 proximity and light sensor (HTC)" + help + Say Y here to enable the Capella CM3602 Short Distance Proximity + Sensor with Ambient Light Sensor. + +config LIGHTSENSOR_MICROP + tristate "LIGHTSENSOR MICROP Driver" + help + HTC LIGHTSENSOR Microp support. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 0968ae815c319..b659acac45e27 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -17,6 +17,8 @@ obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o +obj-$(CONFIG_INPUT_CAPELLA_CM3602) += capella_cm3602.o +obj-$(CONFIG_INPUT_CAPELLA_CM3602_HTC) += capella_cm3602_htc.o obj-$(CONFIG_INPUT_CM109) += cm109.o obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o @@ -46,3 +48,8 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o obj-$(CONFIG_INPUT_YEALINK) += yealink.o +ifdef CONFIG_MICROP_COMMON + obj-$(CONFIG_LIGHTSENSOR_MICROP) += cm3602_lightsensor_microp_htc.o +else + obj-$(CONFIG_LIGHTSENSOR_MICROP) += cm3602_lightsensor_microp.o +endif diff --git a/drivers/input/misc/capella_cm3602.c b/drivers/input/misc/capella_cm3602.c new file mode 100644 index 0000000000000..453a88c1bb2b0 --- /dev/null +++ b/drivers/input/misc/capella_cm3602.c @@ -0,0 +1,269 @@ +/* drivers/input/misc/capella_cm3602.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define D(x...) pr_info(x) + +static struct capella_cm3602_data { + struct input_dev *input_dev; + struct capella_cm3602_platform_data *pdata; + int enabled; +} the_data; + +static int misc_opened; + +static int capella_cm3602_report(struct capella_cm3602_data *data) +{ + int val = gpio_get_value(data->pdata->p_out); + if (val < 0) { + pr_err("%s: gpio_get_value error %d\n", __func__, val); + return val; + } + + D("proximity %d\n", val); + + /* 0 is close, 1 is far */ + input_report_abs(data->input_dev, ABS_DISTANCE, val); + input_sync(data->input_dev); + return val; +} + +static irqreturn_t capella_cm3602_irq_handler(int irq, void *data) +{ + struct capella_cm3602_data *ip = data; + int val = capella_cm3602_report(ip); + return IRQ_HANDLED; +} + +static int capella_cm3602_enable(struct capella_cm3602_data *data) +{ + D("%s\n", __func__); + if (data->enabled) { + D("%s: already enabled\n", __func__); + } else { + data->pdata->power(1); + data->enabled = 1; + capella_cm3602_report(data); + } + return 0; +} + +static int capella_cm3602_disable(struct capella_cm3602_data *data) +{ + D("%s\n", __func__); + if (data->enabled) { + data->pdata->power(0); + data->enabled = 0; + } else { + D("%s: already disabled\n", __func__); + } + return 0; +} + +static int capella_cm3602_setup(struct capella_cm3602_data *ip) +{ + int rc = -EIO; + struct capella_cm3602_platform_data *pdata = ip->pdata; + int irq = gpio_to_irq(pdata->p_out); + + D("%s\n", __func__); + + rc = gpio_request(pdata->p_out, "gpio_proximity_out"); + if (rc < 0) { + pr_err("%s: gpio %d request failed (%d)\n", + __func__, pdata->p_out, rc); + goto done; + } + + rc = gpio_direction_input(pdata->p_out); + if (rc < 0) { + pr_err("%s: failed to set gpio %d as input (%d)\n", + __func__, pdata->p_out, rc); + goto fail_free_p_out; + } + + rc = request_irq(irq, + capella_cm3602_irq_handler, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "capella_cm3602", + ip); + if (rc < 0) { + pr_err("%s: request_irq(%d) failed for gpio %d (%d)\n", + __func__, irq, + pdata->p_out, rc); + goto fail_free_p_out; + } + + rc = set_irq_wake(irq, 1); + if (rc < 0) { + pr_err("%s: failed to set irq %d as a wake interrupt\n", + __func__, irq); + goto fail_free_irq; + + } + + goto done; + +fail_free_irq: + free_irq(irq, 0); +fail_free_p_out: + gpio_free(pdata->p_out); +done: + return rc; +} + +static int capella_cm3602_open(struct inode *inode, struct file *file) +{ + D("%s\n", __func__); + if (misc_opened) + return -EBUSY; + misc_opened = 1; + return 0; +} + +static int capella_cm3602_release(struct inode *inode, struct file *file) +{ + D("%s\n", __func__); + misc_opened = 0; + return capella_cm3602_disable(&the_data); +} + +static long capella_cm3602_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int val; + D("%s cmd %d\n", __func__, _IOC_NR(cmd)); + switch (cmd) { + case CAPELLA_CM3602_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) + return -EFAULT; + if (val) + return capella_cm3602_enable(&the_data); + else + return capella_cm3602_disable(&the_data); + break; + case CAPELLA_CM3602_IOCTL_GET_ENABLED: + return put_user(the_data.enabled, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + return -EINVAL; + } +} + +static struct file_operations capella_cm3602_fops = { + .owner = THIS_MODULE, + .open = capella_cm3602_open, + .release = capella_cm3602_release, + .unlocked_ioctl = capella_cm3602_ioctl +}; + +struct miscdevice capella_cm3602_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "cm3602", + .fops = &capella_cm3602_fops +}; + +static int capella_cm3602_probe(struct platform_device *pdev) +{ + int rc = -EIO; + struct input_dev *input_dev; + struct capella_cm3602_data *ip; + struct capella_cm3602_platform_data *pdata; + + D("%s: probe\n", __func__); + + pdata = pdev->dev.platform_data; + if (!pdata) { + pr_err("%s: missing pdata!\n", __func__); + goto done; + } + if (!pdata->power) { + pr_err("%s: incomplete pdata!\n", __func__); + goto done; + } + + ip = &the_data; + platform_set_drvdata(pdev, ip); + + D("%s: allocating input device\n", __func__); + input_dev = input_allocate_device(); + if (!input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + rc = -ENOMEM; + goto done; + } + ip->input_dev = input_dev; + ip->pdata = pdata; + input_set_drvdata(input_dev, ip); + + input_dev->name = "proximity"; + + set_bit(EV_ABS, input_dev->evbit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, 1, 0, 0); + + D("%s: registering input device\n", __func__); + rc = input_register_device(input_dev); + if (rc < 0) { + pr_err("%s: could not register input device\n", __func__); + goto err_free_input_device; + } + + D("%s: registering misc device\n", __func__); + rc = misc_register(&capella_cm3602_misc); + if (rc < 0) { + pr_err("%s: could not register misc device\n", __func__); + goto err_unregister_input_device; + } + + rc = capella_cm3602_setup(ip); + if (!rc) + goto done; + + misc_deregister(&capella_cm3602_misc); +err_unregister_input_device: + input_unregister_device(input_dev); + goto done; +err_free_input_device: + input_free_device(input_dev); +done: + return rc; +} + +static struct platform_driver capella_cm3602_driver = { + .probe = capella_cm3602_probe, + .driver = { + .name = CAPELLA_CM3602, + .owner = THIS_MODULE + }, +}; + +static int __init capella_cm3602_init(void) +{ + return platform_driver_register(&capella_cm3602_driver); +} + +device_initcall(capella_cm3602_init); diff --git a/drivers/input/misc/capella_cm3602_htc.c b/drivers/input/misc/capella_cm3602_htc.c new file mode 100644 index 0000000000000..985bce83cc7ad --- /dev/null +++ b/drivers/input/misc/capella_cm3602_htc.c @@ -0,0 +1,352 @@ +/* drivers/input/misc/capella_cm3602.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define D(x...) pr_info(x) + +struct wake_lock proximity_wake_lock; + +static void ps_irq_do_work(struct work_struct *work); +static DECLARE_WORK(ps_irq_work, ps_irq_do_work); + +static struct capella_cm3602_data { + struct input_dev *input_dev; + struct capella_cm3602_platform_data *pdata; + struct workqueue_struct *ps_wq; + int enabled; +} the_data; + +static int misc_opened; + +static int capella_cm3602_report(struct capella_cm3602_data *data) +{ + int val = gpio_get_value(data->pdata->p_out); + int value1, value2; + int retry_limit = 10; + int irq = data->pdata->irq; + + do { + value1 = gpio_get_value(data->pdata->p_out); + set_irq_type(irq, value1 ? + IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + value2 = gpio_get_value(data->pdata->p_out); + } while (value1 != value2 && retry_limit-- > 0); + + if (val < 0) { + pr_err("%s: gpio_get_value error %d\n", __func__, val); + return val; + } + + D("proximity %s\n", val ? "FAR" : "NEAR"); + + /* 0 is close, 1 is far */ + input_report_abs(data->input_dev, ABS_DISTANCE, val); + input_sync(data->input_dev); + + wake_lock_timeout(&proximity_wake_lock, 2*HZ); + + return val; +} + +static void ps_irq_do_work(struct work_struct *work) +{ + capella_cm3602_report(&the_data); + enable_irq(the_data.pdata->irq); +} + +static irqreturn_t capella_cm3602_irq_handler(int irq, void *data) +{ + disable_irq_nosync(irq); + + queue_work(the_data.ps_wq, &ps_irq_work); + + return IRQ_HANDLED; +} + +static int capella_cm3602_enable(struct capella_cm3602_data *data) +{ + int rc; + int irq = data->pdata->irq; + + D("%s\n", __func__); + if (data->enabled) { + D("%s: already enabled\n", __func__); + return 0; + } + + /* dummy report */ + input_report_abs(data->input_dev, ABS_DISTANCE, -1); + input_sync(data->input_dev); + + data->pdata->power(PS_PWR_ON, 1); + + rc = gpio_direction_output(data->pdata->p_en, 0); + + msleep(220); + + data->enabled = !rc; + if (!rc) + capella_cm3602_report(data); + + enable_irq(irq); + rc = set_irq_wake(irq, 1); + if (rc < 0) + pr_err("%s: failed to set irq %d as a wake interrupt\n", + __func__, irq); + + return rc; +} + +static int capella_cm3602_disable(struct capella_cm3602_data *data) +{ + int rc = -EIO; + int irq = data->pdata->irq; + + D("%s\n", __func__); + if (!data->enabled) { + D("%s: already disabled\n", __func__); + return 0; + } + disable_irq(irq); + rc = set_irq_wake(irq, 0); + if (rc < 0) + pr_err("%s: failed to set irq %d as a non-wake interrupt\n", + __func__, irq); + + rc = gpio_direction_output(data->pdata->p_en, 1); + + if (rc < 0) + return rc; + data->pdata->power(PS_PWR_ON, 0); + data->enabled = 0; + + input_event(data->input_dev, EV_SYN, SYN_CONFIG, 0); + return rc; +} + +static int capella_cm3602_setup(struct capella_cm3602_data *ip) +{ + int rc = -EIO; + struct capella_cm3602_platform_data *pdata = ip->pdata; + int irq = pdata->irq; + + D("%s\n", __func__); + + if (pdata->p_out == 0 || pdata->p_en == 0) { + pr_err("%s: gpio == 0!!\n", __func__); + rc = -1; + goto done; + } + + rc = gpio_request(pdata->p_out, "gpio_proximity_out"); + if (rc < 0) { + pr_err("%s: gpio %d request failed (%d)\n", + __func__, pdata->p_out, rc); + goto done; + } + + rc = gpio_request(pdata->p_en, "gpio_proximity_en"); + if (rc < 0) { + pr_err("%s: gpio %d request failed (%d)\n", + __func__, pdata->p_en, rc); + goto fail_free_p_out; + } + + rc = gpio_direction_input(pdata->p_out); + if (rc < 0) { + pr_err("%s: failed to set gpio %d as input (%d)\n", + __func__, pdata->p_out, rc); + goto fail_free_p_en; + } + + the_data.ps_wq = create_singlethread_workqueue("proximity_wq"); + if (!the_data.ps_wq) { + pr_err("%s: can't create workqueue\n", __func__); + rc = -ENOMEM; + goto fail_free_p_en; + } + + rc = request_irq(irq, + capella_cm3602_irq_handler, + IRQF_TRIGGER_LOW | IRQF_TRIGGER_HIGH, + "capella_cm3602", + ip); + if (rc < 0) { + pr_err("%s: request_irq(%d) failed for gpio %d (%d)\n", + __func__, irq, + pdata->p_out, rc); + goto fail_free_wq; + } + + goto done; + +fail_free_wq: + destroy_workqueue(the_data.ps_wq); +fail_free_p_en: + gpio_free(pdata->p_en); +fail_free_p_out: + gpio_free(pdata->p_out); +done: + return rc; +} + +static int capella_cm3602_open(struct inode *inode, struct file *file) +{ + D("%s\n", __func__); + if (misc_opened) + return -EBUSY; + misc_opened = 1; + return 0; +} + +static int capella_cm3602_release(struct inode *inode, struct file *file) +{ + D("%s\n", __func__); + misc_opened = 0; + return capella_cm3602_disable(&the_data); +} + +static long capella_cm3602_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int val; + D("%s cmd %d\n", __func__, _IOC_NR(cmd)); + switch (cmd) { + case CAPELLA_CM3602_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) + return -EFAULT; + if (val) + return capella_cm3602_enable(&the_data); + else + return capella_cm3602_disable(&the_data); + break; + case CAPELLA_CM3602_IOCTL_GET_ENABLED: + return put_user(the_data.enabled, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + return -EINVAL; + } +} + +static struct file_operations capella_cm3602_fops = { + .owner = THIS_MODULE, + .open = capella_cm3602_open, + .release = capella_cm3602_release, + .unlocked_ioctl = capella_cm3602_ioctl +}; + +struct miscdevice capella_cm3602_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "cm3602", + .fops = &capella_cm3602_fops +}; + +static int capella_cm3602_probe(struct platform_device *pdev) +{ + int rc = -EIO; + struct input_dev *input_dev; + struct capella_cm3602_data *ip; + struct capella_cm3602_platform_data *pdata; + + D("%s: probe\n", __func__); + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + pr_err("%s: missing pdata!\n", __func__); + goto done; + } + if (!pdata->power) { + pr_err("%s: incomplete pdata!\n", __func__); + goto done; + } + + ip = &the_data; + platform_set_drvdata(pdev, ip); + + /*D("%s: allocating input device\n", __func__);*/ + input_dev = input_allocate_device(); + if (!input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + rc = -ENOMEM; + goto done; + } + ip->input_dev = input_dev; + ip->pdata = pdata; + input_set_drvdata(input_dev, ip); + + input_dev->name = "proximity"; + + set_bit(EV_ABS, input_dev->evbit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, 1, 0, 0); + + /*D("%s: registering input device\n", __func__);*/ + rc = input_register_device(input_dev); + if (rc < 0) { + pr_err("%s: could not register input device\n", __func__); + goto err_free_input_device; + } + + /*D("%s: registering misc device\n", __func__);*/ + rc = misc_register(&capella_cm3602_misc); + if (rc < 0) { + pr_err("%s: could not register misc device\n", __func__); + goto err_unregister_input_device; + } + + wake_lock_init(&proximity_wake_lock, WAKE_LOCK_SUSPEND, "proximity"); + + rc = capella_cm3602_setup(ip); + if (!rc) + goto done; + + misc_deregister(&capella_cm3602_misc); +err_unregister_input_device: + input_unregister_device(input_dev); +err_free_input_device: + input_free_device(input_dev); +done: + if (ip->pdata && ip->pdata->irq) + disable_irq(ip->pdata->irq); + return rc; +} + +static struct platform_driver capella_cm3602_driver = { + .probe = capella_cm3602_probe, + .driver = { + .name = CAPELLA_CM3602, + .owner = THIS_MODULE + }, +}; + +static int __init capella_cm3602_init(void) +{ + return platform_driver_register(&capella_cm3602_driver); +} + +device_initcall(capella_cm3602_init); diff --git a/drivers/input/misc/cm3602_lightsensor_microp.c b/drivers/input/misc/cm3602_lightsensor_microp.c new file mode 100644 index 0000000000000..ee6b9e86eb57d --- /dev/null +++ b/drivers/input/misc/cm3602_lightsensor_microp.c @@ -0,0 +1,640 @@ +/* + * + * Copyright (C) 2009 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +struct microp_ls_info { + struct microp_function_config *ls_config; + struct input_dev *ls_input_dev; + struct early_suspend early_suspend; + struct i2c_client *client; + struct workqueue_struct *ls_wq; + + uint32_t als_func; + uint32_t als_kadc; + uint32_t als_gadc; + uint8_t als_calibrating; + int als_intr_enabled; + int is_suspend; + int old_intr_cmd; +}; + +struct microp_ls_info *ls_info; +static int ls_enable_flag; +static int ls_enable_num; +static uint32_t als_kadc; + +static void enable_intr_do_work(struct work_struct *w); +static DECLARE_DELAYED_WORK(enable_intr_work, enable_intr_do_work); + +static void lightsensor_do_work(struct work_struct *w); +static DECLARE_WORK(lightsensor_work, lightsensor_do_work); + +void set_ls_kvalue(struct microp_ls_info *li) +{ + if (!li) { + pr_err("%s: ls_info is empty\n", __func__); + return; + } + + printk(KERN_INFO "%s: ALS calibrated als_kadc=0x%x\n", + __func__, als_kadc); + if (als_kadc >> 16 == ALS_CALIBRATED) + li->als_kadc = als_kadc & 0xFFFF; + else { + li->als_kadc = 0; + printk(KERN_INFO "%s: no ALS calibrated\n", __func__); + } + + if (li->als_kadc && li->ls_config->golden_adc) { + li->als_kadc = (li->als_kadc > 0 && li->als_kadc < 0x400) ? + li->als_kadc : li->ls_config->golden_adc; + li->als_gadc = (li->ls_config->golden_adc > 0) + ? li->ls_config->golden_adc : li->als_kadc; + } else { + li->als_kadc = 1; + li->als_gadc = 1; + } + printk(KERN_INFO "%s: als_kadc=0x%x, als_gadc=0x%x\n", + __func__, li->als_kadc, li->als_gadc); +} + +static int upload_ls_table(struct microp_ls_info *li) +{ + uint8_t data[20]; + int i; + for (i = 0; i < 10; i++) { + if (li->ls_config->levels[i] < 0x3FF) { + data[i] = (uint8_t)(li->ls_config->levels[i] + * li->als_kadc / li->als_gadc >> 8); + data[i + 10] = (uint8_t)(li->ls_config->levels[i] + * li->als_kadc / li->als_gadc); + } else { + data[i] = (uint8_t)(li->ls_config->levels[i] >> 8); + data[i + 10] = (uint8_t)(li->ls_config->levels[i] & 0xFF); + } + printk("ls_table: data[%d] , data[%d] = %x, %x\n", i, i, data[i], data[i+10]); + } + + return microp_i2c_write(MICROP_I2C_WCMD_ADC_TABLE, data, 20); +} + +static int get_ls_adc_level(uint8_t *data) +{ + struct microp_ls_info *li = ls_info; + uint8_t i, adc_level = 0; + uint16_t adc_value = 0; + +/* From HTC + data[0] = 0x00; + data[1] = li->ls_config->channel; + if (microp_read_adc(data)) + return -1; + + adc_value = data[0]<<8 | data[1]; +*/ + /* new */ + if (microp_read_adc(li->ls_config->channel, &adc_value)) + return -1; + + if (adc_value > 0x3FF) { + printk(KERN_WARNING "%s: get wrong value: 0x%X\n", + __func__, adc_value); + return -1; + } else { + if (!li->als_calibrating) { + adc_value = adc_value * li->als_gadc / li->als_kadc; + if (adc_value > 0x3FF) + adc_value = 0x3FF; + data[0] = adc_value >> 8; + data[1] = adc_value & 0xFF; + } + for (i = 0; i < 10; i++) { + if (adc_value <= + li->ls_config->levels[i]) { + adc_level = i; +// if (li->ls_config->levels[i]) + break; + } + } + printk(KERN_DEBUG "ALS value: 0x%X, level: %d #\n", + adc_value, adc_level); + data[2] = adc_level; + } + + return 0; +} + +void report_lightsensor_data(void) +{ + uint8_t data[3]; + int ret; + struct microp_ls_info *li = ls_info; + + ret = get_ls_adc_level(data); + if (!ret) { + input_report_abs(li->ls_input_dev, + ABS_MISC, (int)data[2]); + input_sync(li->ls_input_dev); + } +} + +static int ls_microp_intr_enable(uint8_t enable) +{ + int ret; + uint8_t data[2]; + struct microp_ls_info *li = ls_info; + + if (li->old_intr_cmd) { + data[0] = 0; + if (enable) + data[1] = 1; + else + data[1] = 0; + + ret = microp_i2c_write(MICROP_I2C_WCMD_AUTO_BL_CTL, data, 2); + } else { + ret = microp_write_interrupt(li->client, + li->ls_config->int_pin, enable); + } + + return ret; +} + +static void enable_intr_do_work(struct work_struct *w) +{ + struct microp_ls_info *li = ls_info; + int ret; + + if (ls_enable_flag) { + ret = ls_microp_intr_enable(1); + if (ret < 0) + pr_err("%s error\n", __func__); + else { + li->als_intr_enabled = 1; + ls_enable_flag = 0; + input_report_abs(li->ls_input_dev, ABS_MISC, -1); + input_sync(li->ls_input_dev); + } + } + + report_lightsensor_data(); +} + +static void lightsensor_do_work(struct work_struct *w) +{ + /* Wait for Framework event polling ready */ + if (ls_enable_num == 0) { + ls_enable_num = 1; + msleep(300); + } + + report_lightsensor_data(); +} + +static irqreturn_t lightsensor_irq_handler(int irq, void *data) +{ + struct microp_ls_info *li = ls_info; + queue_work(li->ls_wq, &lightsensor_work); + + return IRQ_HANDLED; +} + +static int ls_power(int enable) +{ + struct microp_ls_info *li = ls_info; + + if (li->ls_config->ls_gpio_on) + gpio_set_value(li->ls_config->ls_gpio_on, enable ? 0 : 1); + + if (li->ls_config->ls_power) + li->ls_config->ls_power(LS_PWR_ON, enable); + + return 0; +} + +static int lightsensor_enable(void) +{ + int ret; + struct microp_ls_info *li = ls_info; + + pr_info("%s\n", __func__); + + ls_enable_flag = 1; + if (li->is_suspend) { + li->als_intr_enabled = 1; + pr_err("%s: microp is suspended\n", __func__); + return 0; + } + if (!li->als_intr_enabled) { + ret = ls_microp_intr_enable(1); + if (ret < 0) + pr_err("%s: set auto light sensor fail\n", __func__); + else { + li->als_intr_enabled = 1; + /* report an invalid value first to ensure we trigger an event + * when adc_level is zero. + */ + input_report_abs(li->ls_input_dev, ABS_MISC, -1); + input_sync(li->ls_input_dev); + } + } + return 0; +} + +static int lightsensor_disable(void) +{ + /* update trigger data when done */ + struct microp_ls_info *li = ls_info; + int ret; + + pr_info("%s\n", __func__); + ls_enable_flag = 0; + if (li->is_suspend) { + li->als_intr_enabled = 0; + pr_err("%s: microp is suspended\n", __func__); + return 0; + } + + if (li->als_intr_enabled) { + ret = ls_microp_intr_enable(0); + if (ret < 0) + pr_err("%s: disable auto light sensor fail\n", + __func__); + else + li->als_intr_enabled = 0; + } + return 0; +} + +DEFINE_MUTEX(ls_i2c_api_lock); +static int lightsensor_opened; + +static int lightsensor_open(struct inode *inode, struct file *file) +{ + int rc = 0; + pr_debug("%s\n", __func__); + mutex_lock(&ls_i2c_api_lock); + if (lightsensor_opened) { + pr_err("%s: already opened\n", __func__); + rc = -EBUSY; + } + lightsensor_opened = 1; + mutex_unlock(&ls_i2c_api_lock); + return rc; +} + +static int lightsensor_release(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + mutex_lock(&ls_i2c_api_lock); + lightsensor_opened = 0; + mutex_unlock(&ls_i2c_api_lock); + return 0; +} + +static long lightsensor_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int rc, val; + struct microp_ls_info *li = ls_info; + mutex_lock(&ls_i2c_api_lock); + pr_debug("%s cmd %d\n", __func__, _IOC_NR(cmd)); + + switch (cmd) { + case LIGHTSENSOR_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) { + rc = -EFAULT; + break; + } + pr_info("%s set value = %d\n", __func__, val); + rc = val ? lightsensor_enable() : lightsensor_disable(); + break; + case LIGHTSENSOR_IOCTL_GET_ENABLED: + val = li->als_intr_enabled; + pr_info("%s get enabled status: %d\n", __func__, val); + rc = put_user(val, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + } + + mutex_unlock(&ls_i2c_api_lock); + return rc; +} + +static struct file_operations lightsensor_fops = { + .owner = THIS_MODULE, + .open = lightsensor_open, + .release = lightsensor_release, + .unlocked_ioctl = lightsensor_ioctl +}; + +static struct miscdevice lightsensor_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lightsensor", + .fops = &lightsensor_fops +}; + +static ssize_t ls_adc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + uint8_t data[3]; + int ret; + + ret = get_ls_adc_level(data); + + ret = sprintf(buf, + "ADC[0x%03X] => level %d\n", + (data[0] << 8 | data[1]), data[2]); + + return ret; +} + +static DEVICE_ATTR(ls_adc, 0644, ls_adc_show, NULL); + +static ssize_t ls_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t data[2] = {0, 0}; + int ret; + + microp_i2c_read(MICROP_I2C_RCMD_SPI_BL_STATUS, data, 2); + ret = sprintf(buf, "Light sensor Auto = %d, SPI enable = %d\n", + data[0], data[1]); + + return ret; +} + +static ssize_t ls_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct microp_ls_info *li = ls_info; + uint8_t enable = 0; + int ls_auto; + int ret; + + ls_auto = -1; + sscanf(buf, "%d", &ls_auto); + + if (ls_auto != 0 && ls_auto != 1 && ls_auto != 147) + return -EINVAL; + + if (ls_auto) { + enable = 1; + li->als_calibrating = (ls_auto == 147) ? 1 : 0; + li->als_intr_enabled = 1; + } else { + enable = 0; + li->als_calibrating = 0; + li->als_intr_enabled = 0; + } + + ret = ls_microp_intr_enable(enable); + if (ret < 0) + pr_err("%s: ls intr enable fail\n", __func__); + + return count; +} + +static DEVICE_ATTR(ls_auto, 0644, ls_enable_show, ls_enable_store); + +static ssize_t ls_kadc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct microp_ls_info *li = ls_info; + int ret; + + ret = sprintf(buf, "kadc = 0x%x, gadc = 0x%x, real kadc = 0x%x\n", + li->als_kadc, li->als_gadc, als_kadc); + + return ret; +} + +static ssize_t ls_kadc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct microp_ls_info *li = ls_info; + int kadc_temp = 0; + + sscanf(buf, "%d", &kadc_temp); + if (kadc_temp <= 0 || li->ls_config->golden_adc <= 0) { + printk(KERN_ERR "%s: kadc_temp=0x%x, als_gadc=0x%x\n", + __func__, + kadc_temp, + li->ls_config->golden_adc); + return -EINVAL; + } + + li->als_kadc = kadc_temp; + li->als_gadc = li->ls_config->golden_adc; + printk(KERN_INFO "%s: als_kadc=0x%x, als_gadc=0x%x\n", + __func__, li->als_kadc, li->als_gadc); + + if (upload_ls_table(li) < 0) + printk(KERN_ERR "%s: upload ls table fail\n", __func__); + + return count; +} + +static DEVICE_ATTR(ls_kadc, 0644, ls_kadc_show, ls_kadc_store); + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void light_sensor_suspend(struct early_suspend *h) +{ + struct microp_ls_info *li = ls_info; + int ret; + + li->is_suspend = 1; + cancel_delayed_work(&enable_intr_work); + if (li->als_intr_enabled) { + ret = ls_microp_intr_enable(0); + if (ret < 0) + pr_err("%s: disable auto light sensor fail\n", + __func__); + else + li->als_intr_enabled = 0; + } + ls_power(0); +} + +static void light_sensor_resume(struct early_suspend *h) +{ + struct microp_ls_info *li = ls_info; + + ls_power(1); + queue_delayed_work(li->ls_wq, &enable_intr_work, msecs_to_jiffies(800)); + li->is_suspend = 0; +} +#endif + +static int lightsensor_probe(struct platform_device *pdev) +{ + int ret, irq; + struct microp_ls_info *li; + struct lightsensor_platform_data *pdata = pdev->dev.platform_data; + + li = kzalloc(sizeof(struct microp_ls_info), GFP_KERNEL); + if (!li) + return -ENOMEM; + ls_info = li; + +/* From HTC + li->client = dev_get_drvdata(&pdev->dev); +*/ + /* new */ + li->client = get_microp_client(); + + if (!li->client) { + pr_err("%s: can't get microp i2c client\n", __func__); + return -1; + } + li->ls_input_dev = input_allocate_device(); + if (!li->ls_input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + return -ENOMEM; + } + li->ls_input_dev->name = "lightsensor-level"; + set_bit(EV_ABS, li->ls_input_dev->evbit); + input_set_abs_params(li->ls_input_dev, ABS_MISC, 0, 9, 0, 0); + + ret = input_register_device(li->ls_input_dev); + if (ret < 0) { + pr_err("%s: can not register input device\n", + __func__); + return ret; + } + + ret = misc_register(&lightsensor_misc); + if (ret < 0) { + pr_err("%s: can not register misc device\n", + __func__); + return ret; + } + + li->ls_config = pdata->config; + irq = pdata->irq; + li->old_intr_cmd = pdata->old_intr_cmd; + ret = request_irq(irq, lightsensor_irq_handler, IRQF_TRIGGER_NONE, "lightsensor_microp", li); + if (ret < 0) { + pr_err("%s: request_irq(%d) failed for (%d)\n", + __func__, irq, ret); + return ret; + } + + set_ls_kvalue(li); + ret = upload_ls_table(li); + if (ret < 0) { + pr_err("%s: upload ls table fail\n", + __func__); + return ret; + } + + li->ls_wq = create_workqueue("ls_wq"); + if (li->ls_wq == NULL) + return -ENOMEM; + + if (li->ls_config->ls_gpio_on) { + ret = gpio_request(li->ls_config->ls_gpio_on, + "microp_i2c"); + if (ret < 0) { + pr_err("request gpio ls failed\n"); + return ret; + } + ret = gpio_direction_output(li->ls_config->ls_gpio_on, 0); + if (ret < 0) { + pr_err("gpio_direction_output ls failed\n"); + return ret; + } + } + ls_power(1); +#ifdef CONFIG_HAS_EARLYSUSPEND + li->early_suspend.level = + EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + li->early_suspend.suspend = light_sensor_suspend; + li->early_suspend.resume = light_sensor_resume; + register_early_suspend(&li->early_suspend); +#endif + ret = device_create_file(&li->client->dev, &dev_attr_ls_adc); + ret = device_create_file(&li->client->dev, &dev_attr_ls_auto); + ret = device_create_file(&li->client->dev, &dev_attr_ls_kadc); + + return 0; + +} + +static struct platform_driver lightsensor_driver = { + .probe = lightsensor_probe, + .driver = { .name = "lightsensor_microp", }, +}; + +static int __init parse_tag_als_kadc(const struct tag *tags) +{ + int found = 0; + struct tag *t = (struct tag *)tags; + + for (; t->hdr.size; t = tag_next(t)) { + if (t->hdr.tag == ATAG_ALS) { + found = 1; + break; + } + } + + if (found) + als_kadc = t->u.revision.rev; + pr_debug("%s: als_kadc = 0x%x\n", __func__, als_kadc); + return 0; +} +__tagtable(ATAG_ALS, parse_tag_als_kadc); + +static int __init light_sensor_init(void) +{ + return platform_driver_register(&lightsensor_driver); +} + +static void __exit light_sensor_exit(void) +{ + platform_driver_unregister(&lightsensor_driver); +} + +module_init(light_sensor_init); +module_exit(light_sensor_exit); + +MODULE_DESCRIPTION("HTC LIGHT SENSOR"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/cm3602_lightsensor_microp_htc.c b/drivers/input/misc/cm3602_lightsensor_microp_htc.c new file mode 100644 index 0000000000000..236fa35070b5e --- /dev/null +++ b/drivers/input/misc/cm3602_lightsensor_microp_htc.c @@ -0,0 +1,608 @@ +/* + * + * Copyright (C) 2009 HTC, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct microp_ls_info { + struct microp_function_config *ls_config; + struct input_dev *ls_input_dev; + struct early_suspend early_suspend; + struct i2c_client *client; + struct workqueue_struct *ls_wq; + + uint32_t als_func; + uint32_t als_kadc; + uint32_t als_gadc; + uint8_t als_calibrating; + int als_intr_enabled; + int is_suspend; + int old_intr_cmd; +}; + +struct microp_ls_info *ls_info; +static int ls_enable_flag; +static int ls_enable_num; + +static void enable_intr_do_work(struct work_struct *w); +static DECLARE_DELAYED_WORK(enable_intr_work, enable_intr_do_work); + +static void lightsensor_do_work(struct work_struct *w); +static DECLARE_WORK(lightsensor_work, lightsensor_do_work); + +void set_ls_kvalue(struct microp_ls_info *li) +{ + + if (!li) { + pr_err("%s: ls_info is empty\n", __func__); + return; + } + + printk(KERN_INFO "%s: ALS calibrated als_kadc=0x%x\n", + __func__, als_kadc); + if (als_kadc >> 16 == ALS_CALIBRATED) + li->als_kadc = als_kadc & 0xFFFF; + else { + li->als_kadc = 0; + printk(KERN_INFO "%s: no ALS calibrated\n", __func__); + } + + if (li->als_kadc && li->ls_config->golden_adc > 0) { + li->als_kadc = (li->als_kadc > 0 && li->als_kadc < 0x400) ? + li->als_kadc : li->ls_config->golden_adc; + li->als_gadc = li->ls_config->golden_adc; + } else { + li->als_kadc = 1; + li->als_gadc = 1; + } + printk(KERN_INFO "%s: als_kadc=0x%x, als_gadc=0x%x\n", + __func__, li->als_kadc, li->als_gadc); +} + +static int upload_ls_table(struct microp_ls_info *li) +{ + uint8_t data[20]; + int i; + for (i = 0; i < 10; i++) { + if (li->ls_config->levels[i] < 0x3FF) { + data[i] = (uint8_t)(li->ls_config->levels[i] + * li->als_kadc / li->als_gadc >> 8); + data[i + 10] = (uint8_t)(li->ls_config->levels[i] + * li->als_kadc / li->als_gadc); + } else { + data[i] = (uint8_t)(li->ls_config->levels[i] >> 8); + data[i + 10] = (uint8_t)(li->ls_config->levels[i] & 0xFF); + } + printk("ls_table: data[%d] , data[%d] = %x, %x\n", i, i, data[i], data[i+10]); + } + + return microp_i2c_write(MICROP_I2C_WCMD_ADC_TABLE, data, 20); +} + +static int get_ls_adc_level(uint8_t *data) +{ + struct microp_ls_info *li = ls_info; + uint8_t i, adc_level = 0; + uint16_t adc_value = 0; + + data[0] = 0x00; + data[1] = li->ls_config->channel; + if (microp_read_adc(data)) + return -1; + + adc_value = data[0]<<8 | data[1]; + if (adc_value > 0x3FF) { + printk(KERN_WARNING "%s: get wrong value: 0x%X\n", + __func__, adc_value); + return -1; + } else { + if (!li->als_calibrating) { + adc_value = adc_value * li->als_gadc / li->als_kadc; + if (adc_value > 0x3FF) + adc_value = 0x3FF; + data[0] = adc_value >> 8; + data[1] = adc_value & 0xFF; + } + for (i = 0; i < 10; i++) { + if (adc_value <= + li->ls_config->levels[i]) { + adc_level = i; + if (li->ls_config->levels[i]) + break; + } + } + printk(KERN_DEBUG "ALS value: 0x%X, level: %d #\n", + adc_value, adc_level); + data[2] = adc_level; + } + + return 0; +} + +void report_lightseneor_data(void) +{ + uint8_t data[3]; + int ret; + struct microp_ls_info *li = ls_info; + + ret = get_ls_adc_level(data); + if (!ret) { + input_report_abs(li->ls_input_dev, + ABS_MISC, (int)data[2]); + input_sync(li->ls_input_dev); + } +} + +static int ls_microp_intr_enable(uint8_t enable) +{ + + int ret; + uint8_t data[2]; + struct microp_ls_info *li = ls_info; + + if (li->old_intr_cmd) { + data[0] = 0; + if (enable) + data[1] = 1; + else + data[1] = 0; + + ret = microp_i2c_write(MICROP_I2C_WCMD_AUTO_BL_CTL, data, 2); + } else { + ret = microp_write_interrupt(li->client, + li->ls_config->int_pin, enable); + } + + return ret; +} + +static void enable_intr_do_work(struct work_struct *w) +{ + struct microp_ls_info *li = ls_info; + int ret; + + if (ls_enable_flag) { + ret = ls_microp_intr_enable(1); + if (ret < 0) + pr_err("%s error\n", __func__); + else { + li->als_intr_enabled = 1; + ls_enable_flag = 0; + input_report_abs(li->ls_input_dev, ABS_MISC, -1); + input_sync(li->ls_input_dev); + } + } + + report_lightseneor_data(); +} + +static void lightsensor_do_work(struct work_struct *w) +{ + /* Wait for Framework event polling ready */ + if (ls_enable_num == 0) { + ls_enable_num = 1; + msleep(300); + } + + report_lightseneor_data(); +} + +static irqreturn_t lightsensor_irq_handler(int irq, void *data) +{ + struct microp_ls_info *li = ls_info; + queue_work(li->ls_wq, &lightsensor_work); + + return IRQ_HANDLED; +} + +static int ls_power(int enable) +{ + struct microp_ls_info *li = ls_info; + + if (li->ls_config->ls_gpio_on) + gpio_set_value(li->ls_config->ls_gpio_on, enable ? 0 : 1); + + if (li->ls_config->ls_power) + li->ls_config->ls_power(LS_PWR_ON, enable); + + return 0; +} + +static int lightsensor_enable(void) +{ + int ret; + struct microp_ls_info *li = ls_info; + + pr_info("%s\n", __func__); + + ls_enable_flag = 1; + if (li->is_suspend) { + li->als_intr_enabled = 1; + pr_err("%s: microp is suspended\n", __func__); + return 0; + } + if (!li->als_intr_enabled) { + ret = ls_microp_intr_enable(1); + if (ret < 0) + pr_err("%s: set auto light sensor fail\n", __func__); + else { + li->als_intr_enabled = 1; + /* report an invalid value first to ensure we trigger an event + * when adc_level is zero. + */ + input_report_abs(li->ls_input_dev, ABS_MISC, -1); + input_sync(li->ls_input_dev); + } + } + return 0; +} + +static int lightsensor_disable(void) +{ + /* update trigger data when done */ + struct microp_ls_info *li = ls_info; + int ret; + + pr_info("%s\n", __func__); + ls_enable_flag = 0; + if (li->is_suspend) { + li->als_intr_enabled = 0; + pr_err("%s: microp is suspended\n", __func__); + return 0; + } + + if (li->als_intr_enabled) { + ret = ls_microp_intr_enable(0); + if (ret < 0) + pr_err("%s: disable auto light sensor fail\n", + __func__); + else + li->als_intr_enabled = 0; + } + return 0; +} + +DEFINE_MUTEX(ls_i2c_api_lock); +static int lightsensor_opened; + +static int lightsensor_open(struct inode *inode, struct file *file) +{ + int rc = 0; + pr_debug("%s\n", __func__); + mutex_lock(&ls_i2c_api_lock); + if (lightsensor_opened) { + pr_err("%s: already opened\n", __func__); + rc = -EBUSY; + } + lightsensor_opened = 1; + mutex_unlock(&ls_i2c_api_lock); + return rc; +} + +static int lightsensor_release(struct inode *inode, struct file *file) +{ + pr_debug("%s\n", __func__); + mutex_lock(&ls_i2c_api_lock); + lightsensor_opened = 0; + mutex_unlock(&ls_i2c_api_lock); + return 0; +} + +static long lightsensor_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int rc, val; + struct microp_ls_info *li = ls_info; + mutex_lock(&ls_i2c_api_lock); + pr_debug("%s cmd %d\n", __func__, _IOC_NR(cmd)); + + switch (cmd) { + case LIGHTSENSOR_IOCTL_ENABLE: + if (get_user(val, (unsigned long __user *)arg)) { + rc = -EFAULT; + break; + } + pr_info("%s set value = %d\n", __func__, val); + rc = val ? lightsensor_enable() : lightsensor_disable(); + break; + case LIGHTSENSOR_IOCTL_GET_ENABLED: + val = li->als_intr_enabled; + pr_info("%s get enabled status: %d\n", __func__, val); + rc = put_user(val, (unsigned long __user *)arg); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + } + + mutex_unlock(&ls_i2c_api_lock); + return rc; +} + +static struct file_operations lightsensor_fops = { + .owner = THIS_MODULE, + .open = lightsensor_open, + .release = lightsensor_release, + .unlocked_ioctl = lightsensor_ioctl +}; + +static struct miscdevice lightsensor_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lightsensor", + .fops = &lightsensor_fops +}; + +static ssize_t ls_adc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + uint8_t data[3]; + int ret; + + ret = get_ls_adc_level(data); + + ret = sprintf(buf, + "ADC[0x%03X] => level %d\n", + (data[0] << 8 | data[1]), data[2]); + + return ret; +} + +static DEVICE_ATTR(ls_adc, 0666, ls_adc_show, NULL); + +static ssize_t ls_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t data[2] = {0, 0}; + int ret; + + microp_i2c_read(MICROP_I2C_RCMD_SPI_BL_STATUS, data, 2); + ret = sprintf(buf, "Light sensor Auto = %d, SPI enable = %d\n", + data[0], data[1]); + + return ret; +} + +static ssize_t ls_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct microp_ls_info *li = ls_info; + uint8_t enable = 0; + int ls_auto; + int ret; + + ls_auto = -1; + sscanf(buf, "%d", &ls_auto); + + if (ls_auto != 0 && ls_auto != 1 && ls_auto != 147) + return -EINVAL; + + if (ls_auto) { + enable = 1; + li->als_calibrating = (ls_auto == 147) ? 1 : 0; + li->als_intr_enabled = 1; + } else { + enable = 0; + li->als_calibrating = 0; + li->als_intr_enabled = 0; + } + + ret = ls_microp_intr_enable(enable); + if (ret < 0) + pr_err("%s: ls intr enable fail\n", __func__); + + return count; +} + +static DEVICE_ATTR(ls_auto, 0666, ls_enable_show, ls_enable_store); + +static ssize_t ls_kadc_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct microp_ls_info *li = ls_info; + int ret; + + ret = sprintf(buf, "kadc = 0x%x, gadc = 0x%x, real kadc = 0x%x\n", + li->als_kadc, li->als_gadc, als_kadc); + + return ret; +} + +static ssize_t ls_kadc_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct microp_ls_info *li = ls_info; + int kadc_temp = 0; + + sscanf(buf, "%d", &kadc_temp); + if (kadc_temp <= 0 || li->ls_config->golden_adc <= 0) { + printk(KERN_ERR "%s: kadc_temp=0x%x, als_gadc=0x%x\n", + __func__, + kadc_temp, + li->ls_config->golden_adc); + return -EINVAL; + } + + li->als_kadc = kadc_temp; + li->als_gadc = li->ls_config->golden_adc; + printk(KERN_INFO "%s: als_kadc=0x%x, als_gadc=0x%x\n", + __func__, li->als_kadc, li->als_gadc); + + if (upload_ls_table(li) < 0) + printk(KERN_ERR "%s: upload ls table fail\n", __func__); + + return count; +} + +static DEVICE_ATTR(ls_kadc, 0666, ls_kadc_show, ls_kadc_store); + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void light_sensor_suspend(struct early_suspend *h) +{ + struct microp_ls_info *li = ls_info; + int ret; + + li->is_suspend = 1; + cancel_delayed_work(&enable_intr_work); + if (li->als_intr_enabled) { + ret = ls_microp_intr_enable(0); + if (ret < 0) + pr_err("%s: disable auto light sensor fail\n", + __func__); + else + li->als_intr_enabled = 0; + } + ls_power(0); +} + +static void light_sensor_resume(struct early_suspend *h) +{ + struct microp_ls_info *li = ls_info; + + ls_power(1); + queue_delayed_work(li->ls_wq, &enable_intr_work, msecs_to_jiffies(800)); + li->is_suspend = 0; +} +#endif + +static int lightsensor_probe(struct platform_device *pdev) +{ + int ret, irq; + struct microp_ls_info *li; + struct lightsensor_platform_data *pdata = pdev->dev.platform_data; + + li = kzalloc(sizeof(struct microp_ls_info), GFP_KERNEL); + if (!li) + return -ENOMEM; + ls_info = li; + li->client = dev_get_drvdata(&pdev->dev); + + if (!li->client) { + pr_err("%s: can't get microp i2c client\n", __func__); + return -1; + } + li->ls_input_dev = input_allocate_device(); + if (!li->ls_input_dev) { + pr_err("%s: could not allocate input device\n", __func__); + return -ENOMEM; + } + li->ls_input_dev->name = "lightsensor-level"; + set_bit(EV_ABS, li->ls_input_dev->evbit); + input_set_abs_params(li->ls_input_dev, ABS_MISC, 0, 9, 0, 0); + + ret = input_register_device(li->ls_input_dev); + if (ret < 0) { + pr_err("%s: can not register input device\n", + __func__); + return ret; + } + + ret = misc_register(&lightsensor_misc); + if (ret < 0) { + pr_err("%s: can not register misc device\n", + __func__); + return ret; + } + + li->ls_config = pdata->config; + irq = pdata->irq; + li->old_intr_cmd = pdata->old_intr_cmd; + ret = request_irq(irq, lightsensor_irq_handler, IRQF_TRIGGER_NONE, "lightsensor_microp", li); + if (ret < 0) { + pr_err("%s: request_irq(%d) failed for (%d)\n", + __func__, irq, ret); + return ret; + } + + set_ls_kvalue(li); + ret = upload_ls_table(li); + if (ret < 0) { + pr_err("%s: upload ls table fail\n", + __func__); + return ret; + } + + li->ls_wq = create_workqueue("ls_wq"); + if (li->ls_wq == NULL) + return -ENOMEM; + + if (li->ls_config->ls_gpio_on) { + ret = gpio_request(li->ls_config->ls_gpio_on, + "microp_i2c"); + if (ret < 0) { + pr_err("request gpio ls failed\n"); + return ret; + } + ret = gpio_direction_output(li->ls_config->ls_gpio_on, 0); + if (ret < 0) { + pr_err("gpio_direction_output ls failed\n"); + return ret; + } + } + ls_power(1); +#ifdef CONFIG_HAS_EARLYSUSPEND + li->early_suspend.level = + EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + li->early_suspend.suspend = light_sensor_suspend; + li->early_suspend.resume = light_sensor_resume; + register_early_suspend(&li->early_suspend); +#endif + ret = device_create_file(&li->client->dev, &dev_attr_ls_adc); + ret = device_create_file(&li->client->dev, &dev_attr_ls_auto); + ret = device_create_file(&li->client->dev, &dev_attr_ls_kadc); + + return 0; + +} + +static struct platform_driver lightsensor_driver = { + .probe = lightsensor_probe, + .driver = { .name = "lightsensor_microp", }, +}; + +static int __init light_sensor_init(void) +{ + return platform_driver_register(&lightsensor_driver); +} + +static void __exit light_sensor_exit(void) +{ + platform_driver_unregister(&lightsensor_driver); +} + +module_init(light_sensor_init); +module_exit(light_sensor_exit); + +MODULE_DESCRIPTION("HTC LIGHT SENSOR"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c index 758df480600ba..23c81937897d3 100644 --- a/drivers/input/misc/gpio_input.c +++ b/drivers/input/misc/gpio_input.c @@ -21,6 +21,9 @@ #include #include #include +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL +#include +#endif enum { DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */ @@ -127,6 +130,14 @@ static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) " "changed to %d\n", ds->info->type, key_entry->code, i, key_entry->gpio, pressed); +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + if (key_entry->code == BTN_MOUSE) { + pr_info("gpio_keys_scan_keys: OJ action key %x-%x, %d (%d) " + "changed to %d\n", ds->info->type, + key_entry->code, i, key_entry->gpio, pressed); + curcial_oj_send_key(BTN_MOUSE, pressed); + } else +#endif input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, key_entry->code, pressed); } @@ -152,6 +163,35 @@ static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) return HRTIMER_NORESTART; } +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL +void keypad_reprort_keycode(struct gpio_key_state *ks){ + struct gpio_input_state *ds = ks->ds; + int keymap_index = ks - ds->key_state; + const struct gpio_event_direct_entry *key_entry; + int pressed; + + key_entry = &ds->info->keymap[keymap_index]; + + pressed = gpio_get_value(key_entry->gpio) ^ + !(ds->info->flags & GPIOEDF_ACTIVE_HIGH); + if (ds->info->flags & GPIOEDF_PRINT_KEYS) + pr_info("keypad_reprort_keycode: key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + + if (ds->info->info.oj_btn && key_entry->code == BTN_MOUSE){ + curcial_oj_send_key(BTN_MOUSE, pressed); + pr_info("keypad_reprort_keycode: OJ key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + } else + input_event(ds->input_devs->dev[key_entry->dev], + ds->info->type, key_entry->code, pressed); +} +#endif + static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) { struct gpio_key_state *ks = dev_id; @@ -196,6 +236,9 @@ static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) key_entry->gpio, pressed); input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, key_entry->code, pressed); +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + keypad_reprort_keycode(ks); +#endif } return IRQ_HANDLED; } diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c index 227eb8fe3c09f..96276cd1270f5 100644 --- a/drivers/input/misc/gpio_matrix.c +++ b/drivers/input/misc/gpio_matrix.c @@ -21,6 +21,11 @@ #include #include +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL +#include +#include +#endif + struct gpio_kp { struct gpio_event_input_devs *input_devs; struct gpio_event_matrix_info *keypad_info; @@ -111,6 +116,9 @@ static void report_key(struct gpio_kp *kp, int key_index, int out, int in) unsigned short keyentry = mi->keymap[key_index]; unsigned short keycode = keyentry & MATRIX_KEY_MASK; unsigned short dev = keyentry >> MATRIX_CODE_BITS; +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + static unsigned need_send_spec_key = 1; +#endif if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) { if (keycode == KEY_RESERVED) { @@ -125,9 +133,24 @@ static void report_key(struct gpio_kp *kp, int key_index, int out, int in) "changed to %d\n", keycode, out, in, mi->output_gpios[out], mi->input_gpios[in], pressed); +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + if (mi->info.oj_btn && keycode == BTN_MOUSE) + ; + else +#endif input_report_key(kp->input_devs->dev[dev], keycode, pressed); } } +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + if (mi->info.oj_btn && keycode == BTN_MOUSE) { + if (need_send_spec_key == pressed) { + curcial_oj_send_key(keycode, pressed); + need_send_spec_key = !pressed; + printk(KERN_INFO "%s: send OJ action key, pressed: %d\n", + __func__, need_send_spec_key); + } + } +#endif } static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c index ca23905f30467..5e5d78534f3d5 100644 --- a/drivers/input/misc/keychord.c +++ b/drivers/input/misc/keychord.c @@ -27,6 +27,8 @@ #define KEYCHORD_NAME "keychord" #define BUFFER_SIZE 16 +#define MATCH_COUNT 3 +#define KEYCHORD_TIMEOUT 5*HZ MODULE_AUTHOR("Mike Lockwood "); MODULE_DESCRIPTION("Key chord input driver"); @@ -60,8 +62,20 @@ struct keychord_device { unsigned char head; unsigned char tail; __u16 buff[BUFFER_SIZE]; + struct delayed_work keychord_work; + int8_t keychord_match_count; }; +static void keychord_timeout(struct work_struct *work) +{ + struct keychord_device *kdev = + container_of(work, struct keychord_device, keychord_work.work); + + pr_info("%s: match_count=%d\n", __func__, kdev->keychord_match_count); + if (kdev->keychord_match_count) + kdev->keychord_match_count = 0; +} + static int check_keychord(struct keychord_device *kdev, struct input_keychord *keychord) { @@ -75,8 +89,18 @@ static int check_keychord(struct keychord_device *kdev, return 0; } - /* we have a match */ - return 1; + if (kdev->keychord_match_count++ == 0) + schedule_delayed_work(&kdev->keychord_work, KEYCHORD_TIMEOUT); + + if (kdev->keychord_match_count == MATCH_COUNT) { + if (cancel_delayed_work(&kdev->keychord_work)) { + kdev->keychord_match_count = 0; + return 1; + } else + pr_info("%s: timeout already started\n", __func__); + } + + return 0; } static void keychord_event(struct input_handle *handle, unsigned int type, @@ -344,6 +368,7 @@ static int keychord_open(struct inode *inode, struct file *file) file->private_data = kdev; + INIT_DELAYED_WORK(&kdev->keychord_work, keychord_timeout); return 0; } diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index ee82851afe3ee..318531424848f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -63,6 +63,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 +/* Macbook8 (unibody, March 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 +#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -96,6 +100,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), + /* MacbookPro8 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), /* Terminating entry */ {} }; @@ -274,6 +282,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, + { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } + }, {} }; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index aa186cf6c5145..e06e045bf907a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -836,8 +836,8 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { }, }, - { } #endif + { } }; static bool broken_olpc_ec; @@ -851,8 +851,8 @@ static const struct dmi_system_id __initconst olpc_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "XO"), }, }, - { } #endif + { } }; void __init synaptics_module_init(void) diff --git a/drivers/input/opticaljoystick/Kconfig b/drivers/input/opticaljoystick/Kconfig new file mode 100644 index 0000000000000..cd9c812c0a37d --- /dev/null +++ b/drivers/input/opticaljoystick/Kconfig @@ -0,0 +1,34 @@ +# +# Touchscreen driver configuration +# +menuconfig INPUT_OPTICALJOYSTICK + bool "Opticaljoystick" + help + Say Y here, and a list of supported optical joystick will be displayed. + This option doesn't affect the kernel. + + If unsure, say Y. + +if INPUT_OPTICALJOYSTICK + +config OPTICALJOYSTICK_CRUCIAL + boolean + +choice + prompt "Interface" + +config OPTICALJOYSTICK_CRUCIAL_uP + boolean "Crucial Optical Joystick (microP)" +# depends on MICROP_COMMON + select OPTICALJOYSTICK_CRUCIAL + help + +config OPTICALJOYSTICK_CRUCIAL_SPI + boolean "Crucial Optical Joystick (SPI)" +# depends on SPI_CRUCIAL_OJ + select OPTICALJOYSTICK_CRUCIAL + help + +endchoice + +endif diff --git a/drivers/input/opticaljoystick/Makefile b/drivers/input/opticaljoystick/Makefile new file mode 100644 index 0000000000000..c91575771e4bd --- /dev/null +++ b/drivers/input/opticaljoystick/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the touchscreen drivers. +# + +# Each configuration option enables a list of files. +obj-$(CONFIG_OPTICALJOYSTICK_CRUCIAL_uP) += curcial.o +obj-$(CONFIG_OPTICALJOYSTICK_CRUCIAL_SPI) += curcial_spi.o diff --git a/drivers/input/opticaljoystick/curcial.c b/drivers/input/opticaljoystick/curcial.c new file mode 100644 index 0000000000000..7af4050602041 --- /dev/null +++ b/drivers/input/opticaljoystick/curcial.c @@ -0,0 +1,773 @@ +/* drivers/input/opticaljoystick/curcial.c + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "curcial.h" + +#define OJ_POWERON 1 +#define OJ_POWEROFF 0 +#define CURCIAL_OJ_POWER 85 +#define BURST_DATA_SIZE 7 +#define OJ_DEVICE_ID 0x0D +#define OJ_REGISTER_WRITE 0x7B +#define OJ_REGISTER_REQUEST 0x7C +#define OJ_REGISTER_READ 0x7D +#define OJ_REGISTER_BURST_REQUEST 0x7E +#define OJ_REGISTER_BURST_READ 0x7F +#define OJ_REGISTER_OJ_POLLING 0xA8 +#define OJ_MOTION 0x02 +#define OJ_DELTA_Y 0x03 +#define OJ_DELTA_X 0x04 +#define OJ_SQUAL 0x05 +#define OJ_SHU_HIGH 0x06 +#define OJ_SHU_LOW 0x07 +#define OJ_OBSERVATION 0x2E +#define OJ_POLLING_ENABLE 1 +#define OJ_POLLING_DISABLE 0 +#define OJ_POLLING_INTERVAL 10 +#define OJ_POLLING_COUNT 10 + +#define DELTA_SUM_TIME 40 +#define DELTA_SUM_CP 0 +#define OJ_RETRY 5 +static const unsigned short keymap[] = { + KEY_RIGHT, + KEY_LEFT, + KEY_UP, + KEY_DOWN /*, + KEY_REPLY*/ +}; + +enum { + MOTION = 0, + Y, + X, + SQUAL, + SHUTTER_UPPER, + SHUTTER_LOWER, + MAXIMUM_PIXEL +}; +extern unsigned int system_rev; +static struct proc_dir_entry *oj_proc_entry; +static struct workqueue_struct *curcial_wq; +static struct curcial_oj_platform_data *my_oj; +static uint8_t polling_delay;/* use msleep*/ +static uint8_t interval; +static uint8_t debugflag; +static uint8_t ap_code; +static int16_t mSumDeltaX; +static int16_t mSumDeltaY; +static int8_t DeltaX[64]; +static int8_t DeltaY[64]; +static int16_t mDeltaX; +static int16_t mDeltaY; +static int8_t normal_th; +static int8_t xy_ratio; + +static atomic_t suspend_flag = ATOMIC_INIT(0); +static uint16_t index; + +static int __devinit curcial_oj_probe(struct platform_device *pdev); +static int __devexit curcial_oj_remove(struct platform_device *pdev); + +static struct platform_driver curcial_oj_device_driver = { + .probe = curcial_oj_probe, + .remove = __devexit_p(curcial_oj_remove), + .driver = { + .name = CURCIAL_OJ_NAME, + .owner = THIS_MODULE, + } +}; + +static uint8_t curcial_oj_register_read(uint8_t reg) +{ + uint8_t cmd[2]; + + cmd[0] = 0; + cmd[1] = reg; + microp_i2c_write(OJ_REGISTER_REQUEST, cmd, 2); + microp_i2c_read(OJ_REGISTER_READ, cmd, 2); + + return cmd[1]; +} + +static void curcial_oj_burst_read(uint8_t *data) +{ + uint8_t cmd[2]; + + cmd[0] = 0x01; + microp_i2c_write(OJ_REGISTER_BURST_REQUEST, cmd, 1); + microp_i2c_read(OJ_REGISTER_BURST_READ, data, BURST_DATA_SIZE); +} + +static void curcial_oj_polling_mode(uint8_t mode) +{ + uint8_t cmd[2]; + + cmd[0] = mode; + microp_i2c_write(OJ_REGISTER_OJ_POLLING, cmd, 1); +} + +static irqreturn_t curcial_oj_irq_handler(int irq, void *data) +{ + queue_work(curcial_wq, &my_oj->work); + return IRQ_HANDLED; +} + +static int curcial_oj_init(void) +{ + uint8_t data[BURST_DATA_SIZE]; + uint8_t id; + uint8_t version; + uint8_t i; + + microp_i2c_read(MICROP_I2C_RCMD_VERSION, data, 2); + version = my_oj->microp_version; + + + if (data[0] < version) { + printk("Microp firmware version:%d have to large than %d !\n\ + Stop OJ driver loading!\n", data[0], version); + return 0; + } + + if (!my_oj->oj_poweron(OJ_POWERON)) + return 0; + + mdelay(10); + microp_spi_vote_enable(SPI_OJ, 1); + + /*microp_i2c_read(0x24, data, 2);*/ + my_oj->oj_shutdown(0); + /* Write 0x5a to register 0x3a */ + data[0] = 0x3a; + data[1] = 0x5a; + microp_i2c_write(OJ_REGISTER_WRITE, data, 2); + mdelay(23); + + /* Read from register 0x02,0x03 and 0x04 one time regardless the state of the motion pin */ + curcial_oj_register_read(OJ_MOTION); + curcial_oj_register_read(OJ_DELTA_Y); + curcial_oj_register_read(OJ_DELTA_X); + + for (i = 0;i < OJ_RETRY; i++ ) { + id = curcial_oj_register_read(0x00); + if (id == OJ_DEVICE_ID) { + printk(KERN_INFO"OpticalJoystick Device ID: %02x\n", OJ_DEVICE_ID); + id = curcial_oj_register_read(0x01); + printk(KERN_INFO"OJ Driver: Revision : %02x\n", id); + break; + } else { + printk("probe OpticalJoystick Device:retry =%d\n", i); + } + } + if (i == OJ_RETRY) { + printk("Can't probe OpticalJoystick Device: %02x!\n", id); + return 0; + } + + /* Write 0x10 to register 0x1C. This will activate burst mode. */ + data[0] = 0x1C; + data[1] = 0x10; + microp_i2c_write(OJ_REGISTER_WRITE, data, 2); + + curcial_oj_polling_mode(OJ_POLLING_ENABLE); + + return 1; +} +static OJKeyEvt_T OJ_ProcessNavi(int Ratio, int DeltaMin, int16_t SumDeltaX, int16_t SumDeltaY) +{ + OJKeyEvt_T tmpKey; + + if ((10*abs(SumDeltaY) > (Ratio*abs(SumDeltaX))) + && (abs(SumDeltaY) > DeltaMin)) { + if (SumDeltaY > 0) + tmpKey = OJ_KEY_UP; + else + tmpKey = OJ_KEY_DOWN; + } else if (abs(SumDeltaX) > DeltaMin) { + if (SumDeltaX > 0) + tmpKey = OJ_KEY_RIGHT; + else + tmpKey = OJ_KEY_LEFT; + } else + tmpKey = OJ_KEY_NONE; + + return tmpKey; +} + +static void curcial_oj_work_func(struct work_struct *work) +{ + struct curcial_oj_platform_data *oj = container_of(work, struct curcial_oj_platform_data, work); + OJData_T OJData; + uint16_t i, j; + uint8_t data[BURST_DATA_SIZE]; + uint32_t click_time = 0; + uint32_t delta_time = 0; + uint32_t entry_time = 0; + OJKeyEvt_T evtKey = OJ_KEY_NONE; + uint8_t x_count = 0; + uint8_t y_count = 0; + bool out = false; + uint8_t pxsum; + uint16_t sht; + int16_t x_sum; + int16_t y_sum; + + curcial_oj_polling_mode(OJ_POLLING_DISABLE); + + mDeltaX = 0; + mDeltaY = 0; + oj->interval = interval; + entry_time = jiffies_to_msecs(jiffies); + x_sum = 0; + y_sum = 0; + + do { + memset(data, 0x00, sizeof(data)); + out = false; + curcial_oj_burst_read(data); + OJData.squal = data[SQUAL]; + pxsum = curcial_oj_register_read(0x09); + sht = ((data[SHUTTER_UPPER] << 8)|data[SHUTTER_LOWER]); + if (debugflag) { + printk(KERN_INFO"OJ1:M=0x%02x Y=0x%02x X=0x%02x SQUAL=0x%02x " + "SHU_U=0x%02x SHU_L=0x%02x pxsum=%d sht=%d \n", data[MOTION], data[Y], data[X], + data[SQUAL], data[SHUTTER_UPPER], data[SHUTTER_LOWER], pxsum, sht); + } + if (ap_code) { + for (i = 1; i < oj->degree; i++) { + if (((oj->sht_tbl[i-1] < sht) && (sht <= oj->sht_tbl[i])) && (oj->pxsum_tbl[i] < pxsum)) { + if (debugflag) + printk("OJ:A.code_condition:%d\n", i); + out = true; + break; + } + } + if (!out) + goto exit; + } + oj->oj_adjust_xy(data, &mDeltaX, &mDeltaY); + + + DeltaX[index] = (int8_t)mDeltaX; + DeltaY[index] = (int8_t)mDeltaY; + /*printk(KERN_INFO"index=%d: DeltaX[] = %d DeltaY[] = %d \n",index, DeltaX[index] , DeltaY[index]);*/ + if (++index == 64) + index = 0; + + x_sum = x_sum + mDeltaX; + y_sum = y_sum + mDeltaY; + mSumDeltaX = mSumDeltaX + mDeltaX; + mSumDeltaY = mSumDeltaY + mDeltaY; + if (debugflag) + printk(KERN_INFO"check:OJ:mSumDeltaX = %d mSumDeltaY = %d \n", mSumDeltaX, mSumDeltaY); + + evtKey = OJ_ProcessNavi(xy_ratio, normal_th, x_sum, y_sum); + + if (evtKey != OJ_KEY_NONE) { + click_time = jiffies_to_msecs(jiffies); + if (debugflag) + printk(KERN_INFO"click_time=%x last_click_time=%x, %x\n", click_time, oj->last_click_time, click_time-oj->last_click_time); + + if (oj->last_click_time == 0) { + oj->last_click_time = entry_time - oj->interval; + oj->key = evtKey; + } + + delta_time = click_time - entry_time; + + /*printk(KERN_INFO"x_sum=%d y_sum=%d, delta time=%dms\n", x_sum, y_sum, delta_time);*/ + + if (click_time - oj->last_click_time < oj->interval) { + evtKey = OJ_KEY_NONE; + + if (debugflag) + printk(KERN_INFO"interval blocking < %d\n", oj->interval); + }else if (click_time - oj->last_click_time < 80 && evtKey != oj->key) { + evtKey = OJ_KEY_NONE; + printk(KERN_INFO"sudden key ignore \n"); + } + } + + x_count = oj->Xsteps[abs(x_sum) / normal_th]; + y_count = oj->Ysteps[abs(y_sum) / normal_th]; + if (evtKey == OJ_KEY_LEFT) { + for (j = 0; j < x_count; j++) { + input_report_rel(oj->input_dev, REL_X, -1); + input_sync(oj->input_dev); + } + if (debugflag) + printk(KERN_INFO"OJ:KEY_LEFT:%d\n", x_count); + + } else if (evtKey == OJ_KEY_RIGHT) { + for (j = 0; j < x_count; j++) { + input_report_rel(oj->input_dev, REL_X, 1); + input_sync(oj->input_dev); + } + if (debugflag) + printk(KERN_INFO"OJ:KEY_RIGHT:%d\n", x_count); + + } else if (evtKey == OJ_KEY_DOWN) { + for (j = 0; j < y_count; j++) { + input_report_rel(oj->input_dev, REL_Y, 1); + input_sync(oj->input_dev); + } + if (debugflag) + printk(KERN_INFO"OJ:KEY_DOWN:%d\n", y_count); + + } else if (evtKey == OJ_KEY_UP) { + for (j = 0; j < y_count; j++) { + input_report_rel(oj->input_dev, REL_Y, -1); + input_sync(oj->input_dev); + } + if (debugflag) + printk(KERN_INFO"OJ:KEY_UP:%d\n", y_count); + } + + if (evtKey != OJ_KEY_NONE) { + oj->key = evtKey; + oj->last_click_time = click_time; + x_sum = 0; + y_sum = 0; + /*goto exit;*/ + } + mDeltaX = 0; + mDeltaY = 0; + if (polling_delay) + msleep(polling_delay);/*hr_msleep(polling_delay);*/ + } while ((data[0] & 0x80) && (!atomic_read(&suspend_flag))); + + +exit: + + if (debugflag) + printk(KERN_INFO"%s:-\n", __func__); + if (!atomic_read(&suspend_flag)) + curcial_oj_polling_mode(OJ_POLLING_ENABLE); + else + curcial_oj_polling_mode(OJ_POLLING_DISABLE); +} + + +static ssize_t oj_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + return sprintf(buf, + "interval=%d normal_th=%d system_rev=%d" + " debugflag=%d polling_delay=%d xy_ratio=%d ap_code=%d", + interval, normal_th, system_rev, debugflag, + polling_delay, xy_ratio, ap_code); + +} + +static ssize_t oj_ap_code_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + ap_code = simple_strtoull(buf, NULL, 10); + + return count; +} +static ssize_t oj_interval_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + interval = simple_strtoull(buf, NULL, 10); + + return count; +} + +static ssize_t oj_normal_th_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + normal_th = simple_strtoull(buf, NULL, 10); + + return count; +} +static ssize_t oj_polling_delay_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + polling_delay = simple_strtoull(buf, NULL, 10); + + return count; +} + +static ssize_t oj_xy_ratio_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + xy_ratio = simple_strtoull(buf, NULL, 10); + + return count; +} +static ssize_t oj_debugflag_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + debugflag = simple_strtoull(buf, NULL, 10); + + return count; +} + + +static ssize_t oj_xtable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + + char *buffer,*endptr; + int i; + buffer = (char *)buf; + + i= simple_strtoull(buffer, &endptr, 10); + buffer = endptr+1; + + if (i <= 30) + my_oj->Xsteps[i-1] = simple_strtoull(buffer, &endptr, 10); + + return count; +} +static ssize_t oj_ytable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *buffer,*endptr; + int i; + buffer = (char *)buf;; + + + i= simple_strtoull(buffer, &endptr, 10); + buffer = endptr+1; + + if (i <= 30) + my_oj->Ysteps[i-1] = simple_strtoull(buffer, &endptr, 10); + + return count; +} +static ssize_t oj_xtable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char log[128]; + int i,p; + + for (i = 0, p = 0; i < 30 ; i++) { + p += sprintf(log+p, "%d,", my_oj->Xsteps[i]); + } + return sprintf(buf,"X_table:%s\n",log); + +} +static ssize_t oj_ytable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char log[128]; + int i,p; + + for (i = 0, p = 0; i < 30 ; i++) { + p += sprintf(log+p, "%d,", my_oj->Ysteps[i]); + } + return sprintf(buf,"Y_table:%s\n",log); + +} +static ssize_t oj_deltax_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char log[512]; + uint8_t i,p; + + for (i = 0, p = 0; i < 64 ; i++) { + if (i == 63) + p += sprintf(log+p, "%d", DeltaX[i]); + else + p += sprintf(log+p, "%d,", DeltaX[i]); + } + + return sprintf(buf,"%s\n", log); + +} +static ssize_t oj_deltay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char log[512]; + uint8_t i,p; + + for (i = 0, p = 0; i < 64 ; i++) { + if (i == 63) + p += sprintf(log+p, "%d", DeltaY[i]); + else + p += sprintf(log+p, "%d,", DeltaY[i]); + } + + return sprintf(buf,"%s\n", log); + +} +static ssize_t oj_SumDeltaX_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + return sprintf(buf,"%d\n", mSumDeltaX); + +} +static ssize_t oj_SumDeltaY_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + + return sprintf(buf,"%d\n", mSumDeltaY); + +} +static ssize_t oj_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + index = 0; + mSumDeltaX = 0; + mSumDeltaY = 0; + memset(DeltaX, 0x00, sizeof(DeltaX)); + memset(DeltaY, 0x00, sizeof(DeltaY)); + + return count; +} +static DEVICE_ATTR(reset, 0666, oj_show, oj_reset_store); +static DEVICE_ATTR(deltax, 0444, oj_deltax_show, NULL); +static DEVICE_ATTR(deltay, 0444, oj_deltay_show, NULL); +static DEVICE_ATTR(SumDeltaX, 0444, oj_SumDeltaX_show, NULL); +static DEVICE_ATTR(SumDeltaY, 0444, oj_SumDeltaY_show, NULL); +static DEVICE_ATTR(ap_code, 0644, oj_show, oj_ap_code_store); +static DEVICE_ATTR(interval, 0644, oj_show, oj_interval_store); +static DEVICE_ATTR(normal_th, 0644, oj_show, oj_normal_th_store); +static DEVICE_ATTR(polling_delay, 0644, oj_show, oj_polling_delay_store); +static DEVICE_ATTR(xy_ratio, 0644, oj_show, oj_xy_ratio_store); +static DEVICE_ATTR(debugflag, 0644, oj_show, oj_debugflag_store); +static DEVICE_ATTR(xtable, 0644, oj_xtable_show, oj_xtable_store); +static DEVICE_ATTR(ytable, 0644, oj_ytable_show, oj_ytable_store); + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void curcial_oj_early_suspend(struct early_suspend *h) +{ + struct curcial_oj_platform_data *oj; + atomic_set(&suspend_flag, 1); + oj = container_of(h, struct curcial_oj_platform_data, early_suspend); + printk(KERN_ERR"%s: enter\n", __func__); + oj->oj_shutdown(1); + curcial_oj_polling_mode(OJ_POLLING_DISABLE); + if (oj->share_power == false) { + oj->oj_poweron(OJ_POWEROFF); + } + microp_spi_vote_enable(SPI_OJ, 0); + +} + +static void curcial_oj_late_resume(struct early_suspend *h) +{ + struct curcial_oj_platform_data *oj; + atomic_set(&suspend_flag, 0); + oj = container_of(h, struct curcial_oj_platform_data, early_suspend); + printk(KERN_ERR"%s: enter\n", __func__); + if (!curcial_oj_init()) + microp_spi_vote_enable(SPI_OJ, 0); +} +#endif + +static int __devinit curcial_oj_probe(struct platform_device *pdev) +{ + struct curcial_oj_platform_data *oj = pdev->dev.platform_data; + int err; + int i; + + err = -ENOMEM; + my_oj = oj; + + + INIT_WORK(&oj->work, curcial_oj_work_func); + + curcial_wq = create_singlethread_workqueue("curcial_wq"); + if (!curcial_wq) { + err = -ENOMEM; + goto fail; + } + + oj->input_dev = input_allocate_device(); + if (!oj->input_dev) { + printk(KERN_ERR "Unable to allocate device for OJ\n"); + err = -ENOMEM; + goto fail; + } + + oj->input_dev->name = "curcial-oj"; + + + oj->input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + input_set_capability(oj->input_dev, EV_KEY, BTN_MOUSE); + oj->input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); + + + for(i = 0; i < ARRAY_SIZE(keymap); i++) + set_bit(keymap[i], oj->input_dev->keybit); + + err = input_register_device(oj->input_dev); + if (err) { + printk(KERN_ERR "Unable to register %s input device\n", oj->input_dev->name); + goto fail; + } + + if (!curcial_oj_init()) + goto fail; + + err = request_irq(my_oj->irq, curcial_oj_irq_handler, + IRQF_TRIGGER_NONE, CURCIAL_OJ_NAME, oj); + if (err < 0) { + err = -ENOMEM; + printk(KERN_ERR "request_irq failed\n"); + goto fail; + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + oj->early_suspend.suspend = curcial_oj_early_suspend; + oj->early_suspend.resume = curcial_oj_late_resume; +/* oj->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1;*/ + register_early_suspend(&oj->early_suspend); +#endif + err = device_create_file(&(pdev->dev), &dev_attr_reset); + err = device_create_file(&(pdev->dev), &dev_attr_deltax); + err = device_create_file(&(pdev->dev), &dev_attr_deltay); + err = device_create_file(&(pdev->dev), &dev_attr_SumDeltaX); + err = device_create_file(&(pdev->dev), &dev_attr_SumDeltaY); + err = device_create_file(&(pdev->dev), &dev_attr_ap_code); + err = device_create_file(&(pdev->dev), &dev_attr_interval); + err = device_create_file(&(pdev->dev), &dev_attr_normal_th); + err = device_create_file(&(pdev->dev), &dev_attr_polling_delay); + err = device_create_file(&(pdev->dev), &dev_attr_xy_ratio); + err = device_create_file(&(pdev->dev), &dev_attr_debugflag); + err = device_create_file(&(pdev->dev), &dev_attr_xtable); + err = device_create_file(&(pdev->dev), &dev_attr_ytable); + + normal_th = my_oj->normal_th; + xy_ratio = my_oj->xy_ratio; + interval = my_oj->interval; + polling_delay = my_oj->mdelay_time; + debugflag = my_oj->debugflag; + ap_code = my_oj->ap_code; + + printk(KERN_INFO "OJ: driver loaded\n"); + return 0; + +fail: + microp_spi_vote_enable(SPI_OJ, 0); + + if (oj->share_power == false) { + oj->oj_poweron(OJ_POWEROFF); + } + + if (oj->input_dev) { + input_free_device(oj->input_dev); + } + + if (curcial_wq) + destroy_workqueue(curcial_wq); + + if (oj_proc_entry) + remove_proc_entry("oj", NULL); + + return err; +} + +static int __devexit curcial_oj_remove(struct platform_device *pdev) +{ + struct curcial_oj_platform_data *oj = pdev->dev.platform_data; + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (oj->early_suspend.suspend && oj->early_suspend.resume) + unregister_early_suspend(&oj->early_suspend); +#endif + if (oj->share_power == false) { + oj->oj_poweron(OJ_POWEROFF); + } + microp_spi_vote_enable(SPI_OJ, 0); + + if (oj->input_dev) { + input_unregister_device(oj->input_dev); + input_free_device(oj->input_dev); + } + + if (curcial_wq) + destroy_workqueue(curcial_wq); + + if (oj_proc_entry) + remove_proc_entry("oj", NULL); + + device_remove_file(&(pdev->dev), &dev_attr_reset); + device_remove_file(&(pdev->dev), &dev_attr_deltax); + device_remove_file(&(pdev->dev), &dev_attr_deltay); + device_remove_file(&(pdev->dev), &dev_attr_SumDeltaX); + device_remove_file(&(pdev->dev), &dev_attr_SumDeltaY); + device_remove_file(&(pdev->dev), &dev_attr_ap_code); + device_remove_file(&(pdev->dev), &dev_attr_interval); + device_remove_file(&(pdev->dev), &dev_attr_normal_th); + device_remove_file(&(pdev->dev), &dev_attr_polling_delay); + device_remove_file(&(pdev->dev), &dev_attr_xy_ratio); + device_remove_file(&(pdev->dev), &dev_attr_debugflag); + device_remove_file(&(pdev->dev), &dev_attr_xtable); + device_remove_file(&(pdev->dev), &dev_attr_ytable); + printk(KERN_INFO "OJ: driver unloaded\n"); + return 0; +} + +static int __init curcial_oj_module_init(void) +{ + return platform_driver_register(&curcial_oj_device_driver); +} + +static void __exit curcial_oj_module_exit(void) +{ + platform_driver_unregister(&curcial_oj_device_driver); +} + +module_init(curcial_oj_module_init); +module_exit(curcial_oj_module_exit); + +void curcial_oj_send_key(unsigned int code, int value) +{ + if ((my_oj != NULL) && (my_oj->input_dev != NULL)) + input_report_key(my_oj->input_dev, code, value); + else + printk(KERN_WARNING "%s: device not ready...\n", __func__); +} + +MODULE_DESCRIPTION("Crucial OpticalJoystick Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/opticaljoystick/curcial.h b/drivers/input/opticaljoystick/curcial.h new file mode 100644 index 0000000000000..c5f7edae8f533 --- /dev/null +++ b/drivers/input/opticaljoystick/curcial.h @@ -0,0 +1,62 @@ +/* drivers/input/opticaljoystick/curcial.h + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _CURCIAL_H +#define _CURCIAL_H + +#include + +typedef enum { + OJ_KEY_RIGHT = KEY_RIGHT, + OJ_KEY_LEFT = KEY_LEFT, + OJ_KEY_UP = KEY_UP, + OJ_KEY_DOWN = KEY_DOWN, +/* OJ_KEY_CLICK = KEY_REPLY,*/ + OJ_KEY_NONE +}OJKeyEvt_T; + +typedef enum { + OJ_TOUCH_NONE_EVT = 0, + OJ_TOUCH_PRESS_EVT, + OJ_TOUCH_RELEASE_EVT, + OJ_TOUCH_CLICK_EVT +}OJTouchEvt_T; + +typedef struct { + int8_t deltaX; + int8_t deltaY; + int8_t shtHi; + int8_t shtLo; + uint8_t squal; + uint16_t key; +}OJData_T; + +enum { + OJ_QUEUE_01 = 0, + OJ_QUEUE_02, + OJ_QUEUE_03, + OJ_QUEUE_04, + OJ_QUEUE_05, + OJ_QUEUE_MAX +}; + +extern OJTouchEvt_T OJ_SoftClick_Event(OJData_T* OJData); +extern OJTouchEvt_T gTouchEvt; +extern uint8_t gSqRatio; +extern uint8_t gdeltamod; +extern uint8_t gPressBufCnt; +extern uint8_t softclick; +extern uint8_t gDeltaU; +extern uint8_t gDeltaL; +#endif diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig old mode 100644 new mode 100755 index 4210c3ed7cd49..a142ee9dfeff2 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -11,6 +11,19 @@ menuconfig INPUT_TOUCHSCREEN if INPUT_TOUCHSCREEN +config TOUCHSCREEN_ATMEL + tristate "Atmel i2c touchscreen" + depends on I2C + help + This enables support for Atmel over I2C based touchscreens. + +config TOUCHSCREEN_COMPATIBLE_REPORT + bool "Touchscreen compatible report" + depends on I2C + default n + help + This enables support for old report style in touchscreen driver + config TOUCHSCREEN_88PM860X tristate "Marvell 88PM860x touchscreen" depends on MFD_88PM860X @@ -124,6 +137,20 @@ config TOUCHSCREEN_CY8CTMG110 To compile this driver as a module, choose M here: the module will be called cy8ctmg110_ts. +config TOUCHSCREEN_CYTTSP_I2C + tristate "Cypress TTSP based touchscreens" + depends on I2C + default n + help + Say Y here if you have a Cypress TTSP based touchscreen. + TMA300 is a multi-touch screen which can report upto 10 + touches at a time. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called cyttsp-i2c. + config TOUCHSCREEN_DA9034 tristate "Touchscreen support for Dialog Semiconductor DA9034" depends on PMIC_DA903X @@ -202,6 +229,10 @@ config TOUCHSCREEN_GUNZE To compile this driver as a module, choose M here: the module will be called gunze. +config TOUCHSCREEN_ELAN_I2C_8232 + tristate "Elan 8232 I2C touchscreen" + depends on I2C + config TOUCHSCREEN_ELO tristate "Elo serial touchscreens" select SERIO @@ -351,6 +382,15 @@ config TOUCHSCREEN_QT602240 To compile this driver as a module, choose M here: the module will be called qt602240_ts. +config TOUCHSCREEN_MSM + bool "Qualcomm MSM touchscreen controller" + depends on ARCH_MSM + default n + help + Say Y here if you have a 4-wire resistive touchscreen panel + connected to the TSSC touchscreen controller on a + Qualcomm MSM/QSD based SoC. + config TOUCHSCREEN_MIGOR tristate "Renesas MIGO-R touchscreen" depends on SH_MIGOR && I2C @@ -377,6 +417,14 @@ config TOUCHSCREEN_SYNAPTICS_I2C_RMI help This enables support for Synaptics RMI over I2C based touchscreens. +config TOUCHSCREEN_DUPLICATED_FILTER + bool "Touchscreen duplicated report filter" + depends on TOUCHSCREEN_SYNAPTICS_I2C_RMI + default y + help + This enables filter for duplicated report in touchscreen driver + This support will discard small movement report in touchscreen driver + config TOUCHSCREEN_TOUCHRIGHT tristate "Touchright serial touchscreen" select SERIO diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile old mode 100644 new mode 100755 index a13a1d48d367a..71f04007865cd --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -16,11 +16,13 @@ obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o +obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp-i2c.o obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o +obj-$(CONFIG_TOUCHSCREEN_ELAN_I2C_8232) += elan8232_i2c.o obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o @@ -31,6 +33,7 @@ obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o +obj-$(CONFIG_TOUCHSCREEN_MSM) += msm_ts.o obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o @@ -58,3 +61,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o +obj-$(CONFIG_TOUCHSCREEN_ATMEL) += atmel.o diff --git a/drivers/input/touchscreen/atmel.c b/drivers/input/touchscreen/atmel.c new file mode 100644 index 0000000000000..adb3183b31c8a --- /dev/null +++ b/drivers/input/touchscreen/atmel.c @@ -0,0 +1,1705 @@ +/* drivers/input/touchscreen/atmel.c - ATMEL Touch driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ATMEL_EN_SYSFS +#define ATMEL_I2C_RETRY_TIMES 10 + +/* config_setting */ +#define NONE 0 +#define CONNECTED 1 + +struct atmel_ts_data { + struct i2c_client *client; + struct input_dev *input_dev; + struct workqueue_struct *atmel_wq; + struct work_struct work; + int (*power) (int on); + struct early_suspend early_suspend; + struct info_id_t *id; + struct object_t *object_table; + uint8_t finger_count; + uint16_t abs_x_min; + uint16_t abs_x_max; + uint16_t abs_y_min; + uint16_t abs_y_max; + uint8_t abs_pressure_min; + uint8_t abs_pressure_max; + uint8_t abs_width_min; + uint8_t abs_width_max; + uint8_t first_pressed; + uint8_t debug_log_level; + struct atmel_finger_data finger_data[10]; + uint8_t finger_type; + uint8_t finger_support; + uint16_t finger_pressed; + uint8_t face_suppression; + uint8_t grip_suppression; + uint8_t noise_status[2]; + uint16_t *filter_level; + uint8_t calibration_confirm; + uint64_t timestamp; + struct atmel_config_data config_setting[2]; + int8_t noise_config[3]; + uint8_t status; + uint8_t GCAF_sample; + uint8_t *GCAF_level; + uint8_t noisethr; + uint8_t noisethr_config; + uint8_t diag_command; + uint8_t *ATCH_EXT; + int pre_data[3]; +#ifdef ATMEL_EN_SYSFS + struct device dev; +#endif + +}; + +static struct atmel_ts_data *private_ts; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void atmel_ts_early_suspend(struct early_suspend *h); +static void atmel_ts_late_resume(struct early_suspend *h); +#endif + +int i2c_atmel_read(struct i2c_client *client, uint16_t address, uint8_t *data, uint8_t length) +{ + int retry; + uint8_t addr[2]; + + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = 2, + .buf = addr, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = data, + } + }; + addr[0] = address & 0xFF; + addr[1] = (address >> 8) & 0xFF; + + for (retry = 0; retry < ATMEL_I2C_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msg, 2) == 2) + break; + mdelay(10); + } + if (retry == ATMEL_I2C_RETRY_TIMES) { + printk(KERN_ERR "i2c_read_block retry over %d\n", + ATMEL_I2C_RETRY_TIMES); + return -EIO; + } + return 0; + +} + +int i2c_atmel_write(struct i2c_client *client, uint16_t address, uint8_t *data, uint8_t length) +{ + int retry, loop_i; + uint8_t buf[length + 2]; + + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length + 2, + .buf = buf, + } + }; + + buf[0] = address & 0xFF; + buf[1] = (address >> 8) & 0xFF; + + for (loop_i = 0; loop_i < length; loop_i++) + buf[loop_i + 2] = data[loop_i]; + + for (retry = 0; retry < ATMEL_I2C_RETRY_TIMES; retry++) { + if (i2c_transfer(client->adapter, msg, 1) == 1) + break; + mdelay(10); + } + + if (retry == ATMEL_I2C_RETRY_TIMES) { + printk(KERN_ERR "i2c_write_block retry over %d\n", + ATMEL_I2C_RETRY_TIMES); + return -EIO; + } + return 0; + +} + +int i2c_atmel_write_byte_data(struct i2c_client *client, uint16_t address, uint8_t value) +{ + i2c_atmel_write(client, address, &value, 1); + return 0; +} + +uint16_t get_object_address(struct atmel_ts_data *ts, uint8_t object_type) +{ + uint8_t loop_i; + for (loop_i = 0; loop_i < ts->id->num_declared_objects; loop_i++) { + if (ts->object_table[loop_i].object_type == object_type) + return ts->object_table[loop_i].i2c_address; + } + return 0; +} +uint8_t get_object_size(struct atmel_ts_data *ts, uint8_t object_type) +{ + uint8_t loop_i; + for (loop_i = 0; loop_i < ts->id->num_declared_objects; loop_i++) { + if (ts->object_table[loop_i].object_type == object_type) + return ts->object_table[loop_i].size; + } + return 0; +} + +uint8_t get_rid(struct atmel_ts_data *ts, uint8_t object_type) +{ + uint8_t loop_i; + for (loop_i = 0; loop_i < ts->id->num_declared_objects; loop_i++) { + if (ts->object_table[loop_i].object_type == object_type) + return ts->object_table[loop_i].report_ids; + } + return 0; +} + +#ifdef ATMEL_EN_SYSFS +static ssize_t atmel_gpio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct atmel_ts_data *ts_data; + struct atmel_i2c_platform_data *pdata; + + ts_data = private_ts; + pdata = ts_data->client->dev.platform_data; + + ret = gpio_get_value(pdata->gpio_irq); + printk(KERN_DEBUG "GPIO_TP_INT_N=%d\n", pdata->gpio_irq); + sprintf(buf, "GPIO_TP_INT_N=%d\n", ret); + ret = strlen(buf) + 1; + return ret; +} +static DEVICE_ATTR(gpio, S_IRUGO, atmel_gpio_show, NULL); +static ssize_t atmel_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct atmel_ts_data *ts_data; + ts_data = private_ts; + sprintf(buf, "%s_x%4.4X_x%4.4X\n", "ATMEL", + ts_data->id->family_id, ts_data->id->version); + ret = strlen(buf) + 1; + return ret; +} + +static DEVICE_ATTR(vendor, S_IRUGO, atmel_vendor_show, NULL); + +static uint16_t atmel_reg_addr; + +static ssize_t atmel_register_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + uint8_t ptr[1] = { 0 }; + struct atmel_ts_data *ts_data; + ts_data = private_ts; + if (i2c_atmel_read(ts_data->client, atmel_reg_addr, ptr, 1) < 0) { + printk(KERN_WARNING "%s: read fail\n", __func__); + return ret; + } + ret += sprintf(buf, "addr: %d, data: %d\n", atmel_reg_addr, ptr[0]); + return ret; +} + +static ssize_t atmel_register_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret = 0; + struct atmel_ts_data *ts_data; + char buf_tmp[4], buf_zero[200]; + uint8_t write_da; + + ts_data = private_ts; + memset(buf_tmp, 0x0, sizeof(buf_tmp)); + if ((buf[0] == 'r' || buf[0] == 'w') && buf[1] == ':' && + (buf[5] == ':' || buf[5] == '\n')) { + memcpy(buf_tmp, buf + 2, 3); + atmel_reg_addr = simple_strtol(buf_tmp, NULL, 10); + printk(KERN_DEBUG "read addr: 0x%X\n", atmel_reg_addr); + if (!atmel_reg_addr) { + printk(KERN_WARNING "%s: string to number fail\n", + __func__); + return count; + } + printk(KERN_DEBUG "%s: set atmel_reg_addr is: %d\n", + __func__, atmel_reg_addr); + if (buf[0] == 'w' && buf[5] == ':' && buf[9] == '\n') { + memcpy(buf_tmp, buf + 6, 3); + write_da = simple_strtol(buf_tmp, NULL, 10); + printk(KERN_DEBUG "write addr: 0x%X, data: 0x%X\n", + atmel_reg_addr, write_da); + ret = i2c_atmel_write_byte_data(ts_data->client, + atmel_reg_addr, write_da); + if (ret < 0) { + printk(KERN_ERR "%s: write fail(%d)\n", + __func__, ret); + } + } + } + if ((buf[0] == '0') && (buf[1] == ':') && (buf[5] == ':')) { + memcpy(buf_tmp, buf + 2, 3); + atmel_reg_addr = simple_strtol(buf_tmp, NULL, 10); + memcpy(buf_tmp, buf + 6, 3); + memset(buf_zero, 0x0, sizeof(buf_zero)); + ret = i2c_atmel_write(ts_data->client, atmel_reg_addr, + buf_zero, simple_strtol(buf_tmp, NULL, 10) - atmel_reg_addr + 1); + if (buf[9] == 'r') { + i2c_atmel_write_byte_data(ts_data->client, + get_object_address(ts_data, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_BACKUPNV, 0x55); + i2c_atmel_write_byte_data(ts_data->client, + get_object_address(ts_data, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_RESET, 0x11); + } + } + + return count; +} + +static DEVICE_ATTR(register, (S_IWUSR|S_IRUGO), + atmel_register_show, atmel_register_store); + +static ssize_t atmel_regdump_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int count = 0, ret_t = 0; + struct atmel_ts_data *ts_data; + uint16_t loop_i; + uint8_t ptr[1] = { 0 }; + ts_data = private_ts; + if (ts_data->id->version >= 0x14) { + for (loop_i = 230; loop_i <= 425; loop_i++) { + ret_t = i2c_atmel_read(ts_data->client, loop_i, ptr, 1); + if (ret_t < 0) { + printk(KERN_WARNING "dump fail, addr: %d\n", + loop_i); + } + count += sprintf(buf + count, "addr[%3d]: %3d, ", + loop_i , *ptr); + if (((loop_i - 230) % 4) == 3) + count += sprintf(buf + count, "\n"); + } + count += sprintf(buf + count, "\n"); + } + return count; +} + +static ssize_t atmel_regdump_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct atmel_ts_data *ts_data; + ts_data = private_ts; + if (buf[0] >= '0' && buf[0] <= '9' && buf[1] == '\n') + ts_data->debug_log_level = buf[0] - '0'; + + return count; + +} + +static DEVICE_ATTR(regdump, (S_IWUSR|S_IRUGO), + atmel_regdump_show, atmel_regdump_dump); + +static ssize_t atmel_debug_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct atmel_ts_data *ts_data; + size_t count = 0; + ts_data = private_ts; + + count += sprintf(buf, "%d\n", ts_data->debug_log_level); + + return count; +} + +static ssize_t atmel_debug_level_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct atmel_ts_data *ts_data; + ts_data = private_ts; + if (buf[0] >= '0' && buf[0] <= '9' && buf[1] == '\n') + ts_data->debug_log_level = buf[0] - '0'; + + return count; +} + +static DEVICE_ATTR(debug_level, (S_IWUSR|S_IRUGO), + atmel_debug_level_show, atmel_debug_level_dump); + +static ssize_t atmel_diag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct atmel_ts_data *ts_data; + size_t count = 0; + uint8_t data[T37_PAGE_SIZE]; + uint8_t loop_i, loop_j; + int16_t rawdata; + int x, y; + ts_data = private_ts; + memset(data, 0x0, sizeof(data)); + + if (ts_data->diag_command != T6_CFG_DIAG_CMD_DELTAS && + ts_data->diag_command != T6_CFG_DIAG_CMD_REF) + return count; + + i2c_atmel_write_byte_data(ts_data->client, + get_object_address(ts_data, GEN_COMMANDPROCESSOR_T6) + T6_CFG_DIAG, + ts_data->diag_command); + + x = T28_CFG_MODE0_X + ts_data->config_setting[NONE].config_T28[T28_CFG_MODE]; + y = T28_CFG_MODE0_Y - ts_data->config_setting[NONE].config_T28[T28_CFG_MODE]; + count += sprintf(buf, "Channel: %d * %d\n", x, y); + + for (loop_i = 0; loop_i < 4; loop_i++) { + for (loop_j = 0; + !(data[T37_MODE] == ts_data->diag_command && data[T37_PAGE] == loop_i) && loop_j < 10; loop_j++) { + msleep(5); + i2c_atmel_read(ts_data->client, + get_object_address(ts_data, DIAGNOSTIC_T37), data, 2); + } + if (loop_j == 10) + printk(KERN_ERR "%s: Diag data not ready\n", __func__); + + i2c_atmel_read(ts_data->client, + get_object_address(ts_data, DIAGNOSTIC_T37) + + T37_DATA, data, T37_PAGE_SIZE); + for (loop_j = 0; loop_j < T37_PAGE_SIZE - 1; loop_j += 2) { + if ((loop_i * 64 + loop_j / 2) >= (x * y)) { + count += sprintf(buf + count, "\n"); + return count; + } else { + rawdata = data[loop_j+1] << 8 | data[loop_j]; + count += sprintf(buf + count, "%5d", rawdata); + if (((loop_i * 64 + loop_j / 2) % y) == (y - 1)) + count += sprintf(buf + count, "\n"); + } + } + i2c_atmel_write_byte_data(ts_data->client, + get_object_address(ts_data, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_DIAG, T6_CFG_DIAG_CMD_PAGEUP); + + } + + return count; +} + +static ssize_t atmel_diag_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct atmel_ts_data *ts_data; + ts_data = private_ts; + if (buf[0] == '1') + ts_data->diag_command = T6_CFG_DIAG_CMD_DELTAS; + if (buf[0] == '2') + ts_data->diag_command = T6_CFG_DIAG_CMD_REF; + + return count; +} + +static DEVICE_ATTR(diag, (S_IWUSR|S_IRUGO), + atmel_diag_show, atmel_diag_dump); + +static struct kobject *android_touch_kobj; + +static int atmel_touch_sysfs_init(void) +{ + int ret; + android_touch_kobj = kobject_create_and_add("android_touch", NULL); + if (android_touch_kobj == NULL) { + printk(KERN_ERR "%s: subsystem_register failed\n", __func__); + ret = -ENOMEM; + return ret; + } + ret = sysfs_create_file(android_touch_kobj, &dev_attr_gpio.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + ret = sysfs_create_file(android_touch_kobj, &dev_attr_vendor.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + atmel_reg_addr = 0; + ret = sysfs_create_file(android_touch_kobj, &dev_attr_register.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + ret = sysfs_create_file(android_touch_kobj, &dev_attr_regdump.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + ret = sysfs_create_file(android_touch_kobj, &dev_attr_debug_level.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + ret = sysfs_create_file(android_touch_kobj, &dev_attr_diag.attr); + if (ret) { + printk(KERN_ERR "%s: sysfs_create_file failed\n", __func__); + return ret; + } + return 0; +} + +static void atmel_touch_sysfs_deinit(void) +{ + sysfs_remove_file(android_touch_kobj, &dev_attr_regdump.attr); + sysfs_remove_file(android_touch_kobj, &dev_attr_register.attr); + sysfs_remove_file(android_touch_kobj, &dev_attr_vendor.attr); + sysfs_remove_file(android_touch_kobj, &dev_attr_gpio.attr); + kobject_del(android_touch_kobj); +} + +#endif +static int check_delta(struct atmel_ts_data*ts) +{ + int8_t data[T37_DATA + T37_PAGE_SIZE]; + uint8_t loop_i; + int16_t rawdata, count = 0; + + memset(data, 0xFF, sizeof(data)); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_DIAG, T6_CFG_DIAG_CMD_DELTAS); + + for (loop_i = 0; + !(data[T37_MODE] == T6_CFG_DIAG_CMD_DELTAS && data[T37_PAGE] == T37_PAGE_NUM0) && loop_i < 10; loop_i++) { + msleep(5); + i2c_atmel_read(ts->client, + get_object_address(ts, DIAGNOSTIC_T37), data, 2); + } + if (loop_i == 10) + printk(KERN_ERR "%s: Diag data not ready\n", __func__); + + i2c_atmel_read(ts->client, + get_object_address(ts, DIAGNOSTIC_T37), + data, T37_DATA + T37_PAGE_SIZE); + if (data[T37_MODE] == T6_CFG_DIAG_CMD_DELTAS && + data[T37_PAGE] == T37_PAGE_NUM0) { + for (loop_i = T37_DATA; + loop_i < (T37_DATA + T37_PAGE_SIZE - 1); loop_i += 2) { + rawdata = data[loop_i+1] << 8 | data[loop_i]; + if (abs(rawdata) > 50) + count++; + } + if (count > 32) + return 1; + } + return 0; +} + +static void check_calibration(struct atmel_ts_data*ts) +{ + uint8_t data[T37_DATA + T37_TCH_FLAG_SIZE]; + uint8_t loop_i, loop_j, x_limit = 0, check_mask, tch_ch = 0, atch_ch = 0; + + memset(data, 0xFF, sizeof(data)); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_DIAG, T6_CFG_DIAG_CMD_TCH); + + for (loop_i = 0; + !(data[T37_MODE] == T6_CFG_DIAG_CMD_TCH && data[T37_PAGE] == T37_PAGE_NUM0) && loop_i < 10; loop_i++) { + msleep(5); + i2c_atmel_read(ts->client, + get_object_address(ts, DIAGNOSTIC_T37), data, 2); + } + + if (loop_i == 10) + printk(KERN_ERR "%s: Diag data not ready\n", __func__); + + i2c_atmel_read(ts->client, get_object_address(ts, DIAGNOSTIC_T37), data, + T37_DATA + T37_TCH_FLAG_SIZE); + if (data[T37_MODE] == T6_CFG_DIAG_CMD_TCH && + data[T37_PAGE] == T37_PAGE_NUM0) { + x_limit = T28_CFG_MODE0_X + ts->config_setting[NONE].config_T28[T28_CFG_MODE]; + x_limit = x_limit << 1; + if (x_limit <= 40) { + for (loop_i = 0; loop_i < x_limit; loop_i += 2) { + for (loop_j = 0; loop_j < BITS_PER_BYTE; loop_j++) { + check_mask = BIT_MASK(loop_j); + if (data[T37_DATA + T37_TCH_FLAG_IDX + loop_i] & + check_mask) + tch_ch++; + if (data[T37_DATA + T37_TCH_FLAG_IDX + loop_i + 1] & + check_mask) + tch_ch++; + if (data[T37_DATA + T37_ATCH_FLAG_IDX + loop_i] & + check_mask) + atch_ch++; + if (data[T37_DATA + T37_ATCH_FLAG_IDX + loop_i + 1] & + check_mask) + atch_ch++; + } + } + } + } + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_DIAG, T6_CFG_DIAG_CMD_PAGEUP); + + if (tch_ch && (atch_ch == 0)) { + if (jiffies > (ts->timestamp + HZ/2) && (ts->calibration_confirm == 1)) { + ts->calibration_confirm = 2; + printk(KERN_INFO "%s: calibration confirm\n", __func__); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + T8_CFG_ATCHCALST, + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALST]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALSTHR, + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALSTHR]); + } + if (ts->calibration_confirm < 2) + ts->calibration_confirm = 1; + ts->timestamp = jiffies; + } else if ((tch_ch - 25) <= atch_ch && (tch_ch || atch_ch)) { + ts->calibration_confirm = 0; + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_CALIBRATE, 0x55); + } +} + +static void msg_process_finger_data(struct atmel_finger_data *fdata, uint8_t *data) +{ + fdata->x = data[T9_MSG_XPOSMSB] << 2 | data[T9_MSG_XYPOSLSB] >> 6; + fdata->y = data[T9_MSG_YPOSMSB] << 2 | (data[T9_MSG_XYPOSLSB] & 0x0C) >> 2; + fdata->w = data[T9_MSG_TCHAREA]; + fdata->z = data[T9_MSG_TCHAMPLITUDE]; +} + +static void msg_process_multitouch(struct atmel_ts_data *ts, uint8_t *data, uint8_t idx) +{ + if (ts->calibration_confirm < 2 && ts->id->version == 0x16) + check_calibration(ts); + + msg_process_finger_data(&ts->finger_data[idx], data); + if (data[T9_MSG_STATUS] & T9_MSG_STATUS_RELEASE) { + if (ts->grip_suppression & BIT(idx)) + ts->grip_suppression &= ~BIT(idx); + if (ts->finger_pressed & BIT(idx)) { + ts->finger_count--; + ts->finger_pressed &= ~BIT(idx); + if (!ts->first_pressed) { + if (!ts->finger_count) + ts->first_pressed = 1; + printk(KERN_INFO "E%d@%d,%d\n", + idx + 1, ts->finger_data[idx].x, ts->finger_data[idx].y); + } + if (ts->finger_count == 0 && ts->id->version >= 0x20) { + if (ts->pre_data[0] == 2) + ts->pre_data[0] = 0; + else if (idx == 0 && ts->pre_data[0] == 0 && ts->finger_data[0].y > 750 + && ((ts->finger_data[0].y - ts->pre_data[2]) > 135)) { + uint8_t ATCH_NOR[4] = {0, 1, 0, 0}; + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + 6, + ATCH_NOR, 4); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + 7, + ts->config_setting[ts->status].config[0]); + if (ts->GCAF_sample < 32) + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_CALIBRATE, 0x55); + ts->pre_data[0] = 1; + printk(KERN_INFO "%s: calibration confirm\n", __func__); + } + } + } + } else if ((data[T9_MSG_STATUS] & (T9_MSG_STATUS_DETECT|T9_MSG_STATUS_PRESS)) && + !(ts->finger_pressed & BIT(idx))) { + if (ts->filter_level[0]) { + if (ts->finger_data[idx].x < ts->filter_level[FL_XLOGRIPMIN] || + ts->finger_data[idx].x > ts->filter_level[FL_XHIGRIPMAX]) + ts->grip_suppression |= BIT(idx); + else if ((ts->finger_data[idx].x < ts->filter_level[FL_XLOGRIPMAX] || + ts->finger_data[idx].x > ts->filter_level[FL_XHIGRIPMIN]) && + (ts->grip_suppression & BIT(idx))) + ts->grip_suppression |= BIT(idx); + else if (ts->finger_data[idx].x > ts->filter_level[FL_XLOGRIPMAX] && + ts->finger_data[idx].x < ts->filter_level[FL_XHIGRIPMIN]) + ts->grip_suppression &= ~BIT(idx); + } + if (!(ts->grip_suppression & BIT(idx))) { + if (!ts->first_pressed) + printk(KERN_INFO "S%d@%d,%d\n", + idx + 1, ts->finger_data[idx].x, ts->finger_data[idx].y); + ts->finger_count++; + ts->finger_pressed |= BIT(idx); + if (ts->id->version >= 0x20) { + if (ts->pre_data[0] == 0 && idx == 0) { + ts->pre_data[1] = ts->finger_data[0].x; + ts->pre_data[2] = ts->finger_data[0].y; + } + if (ts->pre_data[0] == 0 && ts->finger_count > 1) + ts->pre_data[0] = 2; + } + } + } +} + +static void msg_process_multitouch_legacy(struct atmel_ts_data *ts, uint8_t *data, uint8_t idx) +{ + /* for issue debug only */ + if ((data[T9_MSG_STATUS] & (T9_MSG_STATUS_PRESS|T9_MSG_STATUS_RELEASE)) == + (T9_MSG_STATUS_PRESS|T9_MSG_STATUS_RELEASE)) + printk(KERN_INFO "x60 ISSUE happened: %x, %x, %x, %x, %x, %x, %x\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + + msg_process_finger_data(&ts->finger_data[idx], data); + if ((data[T9_MSG_STATUS] & T9_MSG_STATUS_RELEASE) && + (ts->finger_pressed & BIT(idx))) { + ts->finger_count--; + ts->finger_pressed &= ~BIT(idx); + } else if ((data[T9_MSG_STATUS] & (T9_MSG_STATUS_DETECT|T9_MSG_STATUS_PRESS)) && + !(ts->finger_pressed & BIT(idx))) { + ts->finger_count++; + ts->finger_pressed |= BIT(idx); + } +} + +static void msg_process_noisesuppression(struct atmel_ts_data *ts, uint8_t *data) +{ + uint8_t loop_i; + + if (ts->status == CONNECTED && data[T22_MSG_GCAFDEPTH] >= ts->GCAF_sample) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7) + + T7_CFG_IDLEACQINT, 0x08); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7) + + T7_CFG_ACTVACQINT, 0x08); + for (loop_i = 0; loop_i < 5; loop_i++) { + if (ts->GCAF_sample < ts->GCAF_level[loop_i]) { + ts->GCAF_sample = ts->GCAF_level[loop_i]; + break; + } + } + if (loop_i == 5) + ts->GCAF_sample += 24; + if (ts->GCAF_sample >= 63) { + ts->GCAF_sample = 63; + if (ts->noise_config[0]) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + + T9_CFG_TCHTHR, ts->noise_config[NC_TCHTHR]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + + T9_CFG_TCHDI, ts->noise_config[NC_TCHDI]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22) + + T22_CFG_NOISETHR, ts->noise_config[NC_NOISETHR]); + } else { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22) + + T22_CFG_NOISETHR, ts->config_setting[CONNECTED].config[CB_NOISETHR]); + } + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALSTHR, 0x1); + } + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28) + + T28_CFG_ACTVGCAFDEPTH, ts->GCAF_sample); + } + if (data[T22_MSG_STATUS] & (T22_MSG_STATUS_FHERR|T22_MSG_STATUS_GCAFERR) && + ts->GCAF_sample == 63) { + ts->noisethr_config += 3; + if (ts->noisethr && ts->noisethr_config > ts->noisethr) + ts->noisethr_config = ts->noisethr; + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22) + + T22_CFG_NOISETHR, ts->noisethr_config); + } +} + +static void compatible_input_report(struct input_dev *idev, + struct atmel_finger_data *fdata, uint8_t press, uint8_t last) +{ + if (!press) { + input_report_key(idev, BTN_TOUCH, 0); + input_report_abs(idev, ABS_MT_TOUCH_MAJOR, 0); + } + else { + input_report_key(idev, BTN_TOUCH, 1); + input_report_abs(idev, ABS_MT_TOUCH_MAJOR, fdata->z); + input_report_abs(idev, ABS_MT_WIDTH_MAJOR, fdata->w); + input_report_abs(idev, ABS_MT_POSITION_X, fdata->x); + input_report_abs(idev, ABS_MT_POSITION_Y, fdata->y); + input_report_abs(idev, ABS_MT_PRESSURE, fdata->z); + input_mt_sync(idev); + } +} + +#ifndef CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT +static void htc_input_report(struct input_dev *idev, + struct atmel_finger_data *fdata, uint8_t press, uint8_t last) +{ + if (!press) { + input_report_key(idev, BTN_TOUCH, 0); + input_report_abs(idev, ABS_MT_AMPLITUDE, 0); + input_report_abs(idev, ABS_MT_PRESSURE, 0); + input_report_abs(idev, ABS_MT_POSITION, BIT(31)); + } else { + input_report_key(idev, BTN_TOUCH, 1); + input_report_abs(idev, ABS_MT_AMPLITUDE, fdata->z << 16 | fdata->w); + input_report_abs(idev, ABS_MT_POSITION, + (last ? BIT(31) : 0) | fdata->x << 16 | fdata->y); + input_report_abs(idev, ABS_MT_PRESSURE, fdata->z); + } +} +#endif + +static void multi_input_report(struct atmel_ts_data *ts) +{ + uint8_t loop_i, finger_report = 0; + + for (loop_i = 0; loop_i < ts->finger_support; loop_i++) { + if (ts->finger_pressed & BIT(loop_i)) { + if (ts->id->version >= 0x15) { +#ifdef CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT + compatible_input_report(ts->input_dev, &ts->finger_data[loop_i], + 1, (ts->finger_count == ++finger_report)); +#else + htc_input_report(ts->input_dev, &ts->finger_data[loop_i], + 1, (ts->finger_count == ++finger_report)); +#endif + } else { + compatible_input_report(ts->input_dev, &ts->finger_data[loop_i], + 1, (ts->finger_count == ++finger_report)); + } + if (ts->debug_log_level & 0x2) + printk(KERN_INFO "Finger %d=> X:%d, Y:%d w:%d, z:%d, F:%d\n", + loop_i + 1, + ts->finger_data[loop_i].x, ts->finger_data[loop_i].y, + ts->finger_data[loop_i].w, ts->finger_data[loop_i].z, + ts->finger_count); + } + } +} + +static void atmel_ts_work_func(struct work_struct *work) +{ + int ret; + struct atmel_ts_data *ts = container_of(work, struct atmel_ts_data, work); + uint8_t data[7]; + uint8_t loop_i, loop_j, report_type, msg_byte_num = 7; + + memset(data, 0x0, sizeof(data)); + + ret = i2c_atmel_read(ts->client, get_object_address(ts, + GEN_MESSAGEPROCESSOR_T5), data, 7); + + if (ts->debug_log_level & 0x1) { + for (loop_i = 0; loop_i < 7; loop_i++) + printk("0x%2.2X ", data[loop_i]); + printk("\n"); + } + + if (ts->id->version >= 0x15) { + report_type = data[MSG_RID] - ts->finger_type; + if (report_type >= 0 && report_type < ts->finger_support) { + msg_process_multitouch(ts, data, report_type); + } else { + if (data[MSG_RID] == get_rid(ts, GEN_COMMANDPROCESSOR_T6)) { + printk(KERN_INFO "Touch Status: "); + msg_byte_num = 5; + } else if (data[MSG_RID] == get_rid(ts, PROCI_GRIPFACESUPPRESSION_T20)) { + if (ts->calibration_confirm < 2 && ts->id->version == 0x16) + check_calibration(ts); + ts->face_suppression = data[T20_MSG_STATUS]; + printk(KERN_INFO "Touch Face suppression %s: ", + ts->face_suppression ? "Active" : "Inactive"); + msg_byte_num = 2; + } else if (data[MSG_RID] == get_rid(ts, PROCG_NOISESUPPRESSION_T22)) { + if (data[T22_MSG_STATUS] == T22_MSG_STATUS_GCAFCHG) /* reduce message print */ + msg_byte_num = 0; + else { + printk(KERN_INFO "Touch Noise suppression: "); + msg_byte_num = 4; + msg_process_noisesuppression(ts, data); + } + } + if (data[MSG_RID] != 0xFF) { + for (loop_j = 0; loop_j < msg_byte_num; loop_j++) + printk("0x%2.2X ", data[loop_j]); + if (msg_byte_num) + printk("\n"); + } + } + if (!ts->finger_count || ts->face_suppression) { + ts->finger_pressed = 0; + ts->finger_count = 0; +#ifdef CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT + compatible_input_report(ts->input_dev, NULL, 0, 1); +#else + htc_input_report(ts->input_dev, NULL, 0, 1); +#endif + if (ts->debug_log_level & 0x2) + printk(KERN_INFO "Finger leave\n"); + } else { + multi_input_report(ts); + } +#ifdef CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT + input_sync(ts->input_dev); +#endif + } else { /*read one message one time */ + report_type = data[MSG_RID] - ts->finger_type; + if (report_type >= 0 && report_type < ts->finger_support) { + msg_process_multitouch_legacy(ts, data, report_type); + if (!ts->finger_count) { + ts->finger_pressed = 0; + compatible_input_report(ts->input_dev, NULL, 0, 1); + if (ts->debug_log_level & 0x2) + printk(KERN_INFO "Finger leave\n"); + } else { + multi_input_report(ts); + } + input_sync(ts->input_dev); + } else + printk(KERN_INFO"RAW data: %x, %x, %x, %x, %x, %x, %x\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + } + enable_irq(ts->client->irq); +} + +static irqreturn_t atmel_ts_irq_handler(int irq, void *dev_id) +{ + struct atmel_ts_data *ts = dev_id; + + disable_irq_nosync(ts->client->irq); + queue_work(ts->atmel_wq, &ts->work); + return IRQ_HANDLED; +} + +static void cable_tp_status_handler_func(int connect_status) +{ + struct atmel_ts_data *ts; + + printk(KERN_INFO "Touch: cable change to %d\n", connect_status); + ts = private_ts; + if (connect_status != ts->status) { + ts->status = connect_status ? CONNECTED : NONE; + if (ts->config_setting[CONNECTED].config[0]) { + if (ts->status == CONNECTED && ts->id->version < 0x20) { + ts->calibration_confirm = 2; + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALST, + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALST]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALSTHR , + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALSTHR]); + } + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + + T9_CFG_TCHTHR, + ts->config_setting[ts->status].config[CB_TCHTHR]); + if (ts->status == NONE) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22) + + T22_CFG_NOISETHR, + ts->config_setting[NONE].config[CB_NOISETHR]); + ts->noisethr_config = + ts->config_setting[CONNECTED].config[CB_NOISETHR]; + } + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28) + + T28_CFG_IDLEGCAFDEPTH, + ts->config_setting[ts->status].config[CB_IDLEGCAFDEPTH]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28) + + T28_CFG_ACTVGCAFDEPTH, + ts->config_setting[ts->status].config[CB_ACTVGCAFDEPTH]); + ts->GCAF_sample = + ts->config_setting[CONNECTED].config[CB_ACTVGCAFDEPTH]; + } else { + if (ts->config_setting[CONNECTED].config_T7 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7), + ts->config_setting[ts->status].config_T7, + get_object_size(ts, GEN_POWERCONFIG_T7)); + if (ts->config_setting[CONNECTED].config_T8 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8), + ts->config_setting[CONNECTED].config_T8, + get_object_size(ts, GEN_ACQUISITIONCONFIG_T8)); + if (ts->config_setting[ts->status].config_T9 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9), + ts->config_setting[ts->status].config_T9, + get_object_size(ts, TOUCH_MULTITOUCHSCREEN_T9)); + if (ts->config_setting[ts->status].config_T22 != NULL) { + i2c_atmel_write(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22), + ts->config_setting[ts->status].config_T22, + get_object_size(ts, PROCG_NOISESUPPRESSION_T22)); + ts->noisethr_config = + ts->config_setting[ts->status].config_T22[8]; + } + if (ts->config_setting[ts->status].config_T28 != NULL) { + i2c_atmel_write(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28), + ts->config_setting[ts->status].config_T28, + get_object_size(ts, SPT_CTECONFIG_T28)); + ts->GCAF_sample = + ts->config_setting[ts->status].config_T28[T28_CFG_ACTVGCAFDEPTH]; + } + } + } +} + +static int read_object_table(struct atmel_ts_data *ts) +{ + uint8_t i, type_count = 0; + uint8_t data[6]; + memset(data, 0x0, sizeof(data)); + + ts->object_table = kzalloc(sizeof(struct object_t)*ts->id->num_declared_objects, GFP_KERNEL); + if (ts->object_table == NULL) { + printk(KERN_ERR "%s: allocate object_table failed\n", __func__); + return -ENOMEM; + } + + for (i = 0; i < ts->id->num_declared_objects; i++) { + i2c_atmel_read(ts->client, i * 6 + 0x07, data, 6); + ts->object_table[i].object_type = data[OBJ_TABLE_TYPE]; + ts->object_table[i].i2c_address = + data[OBJ_TABLE_LSB] | data[OBJ_TABLE_MSB] << 8; + ts->object_table[i].size = data[OBJ_TABLE_SIZE] + 1; + ts->object_table[i].instances = data[OBJ_TABLE_INSTANCES]; + ts->object_table[i].num_report_ids = data[OBJ_TABLE_RIDS]; + if (data[OBJ_TABLE_RIDS]) { + ts->object_table[i].report_ids = type_count + 1; + type_count += data[OBJ_TABLE_RIDS]; + } + if (data[OBJ_TABLE_TYPE] == TOUCH_MULTITOUCHSCREEN_T9) + ts->finger_type = ts->object_table[i].report_ids; + printk(KERN_INFO + "Type: %2.2X, Start: %4.4X, Size: %2X, Instance: %2X, RD#: %2X, %2X\n", + ts->object_table[i].object_type , ts->object_table[i].i2c_address, + ts->object_table[i].size, ts->object_table[i].instances, + ts->object_table[i].num_report_ids, ts->object_table[i].report_ids); + } + + return 0; +} + +static struct t_usb_status_notifier cable_status_handler = { + .name = "usb_tp_connected", + .func = cable_tp_status_handler_func, +}; + +static int atmel_ts_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct atmel_ts_data *ts; + struct atmel_i2c_platform_data *pdata; + int ret = 0, intr = 0; + uint8_t loop_i; + struct i2c_msg msg[2]; + uint8_t data[16]; + uint8_t CRC_check = 0; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + printk(KERN_ERR"%s: need I2C_FUNC_I2C\n", __func__); + ret = -ENODEV; + goto err_check_functionality_failed; + } + + ts = kzalloc(sizeof(struct atmel_ts_data), GFP_KERNEL); + if (ts == NULL) { + printk(KERN_ERR"%s: allocate atmel_ts_data failed\n", __func__); + ret = -ENOMEM; + goto err_alloc_data_failed; + } + + ts->atmel_wq = create_singlethread_workqueue("atmel_wq"); + if (!ts->atmel_wq) { + printk(KERN_ERR"%s: create workqueue failed\n", __func__); + ret = -ENOMEM; + goto err_cread_wq_failed; + } + + INIT_WORK(&ts->work, atmel_ts_work_func); + ts->client = client; + i2c_set_clientdata(client, ts); + pdata = client->dev.platform_data; + + if (pdata) { + ts->power = pdata->power; + intr = pdata->gpio_irq; + } + if (ts->power) { + ret = ts->power(1); + msleep(2); + if (ret < 0) { + printk(KERN_ERR "%s:power on failed\n", __func__); + goto err_power_failed; + } + } + + for (loop_i = 0; loop_i < 10; loop_i++) { + if (!gpio_get_value(intr)) + break; + msleep(10); + } + + if (loop_i == 10) + printk(KERN_ERR "No Messages\n"); + + /* read message*/ + msg[0].addr = ts->client->addr; + msg[0].flags = I2C_M_RD; + msg[0].len = 7; + msg[0].buf = data; + ret = i2c_transfer(client->adapter, msg, 1); + + if (ret < 0) { + printk(KERN_INFO "No Atmel chip inside\n"); + goto err_detect_failed; + } + printk(KERN_INFO + "Touch: 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + + if (data[MSG_RID] == 0x01 && + (data[T6_MSG_STATUS] & (T6_MSG_STATUS_SIGERR|T6_MSG_STATUS_COMSERR))) { + printk(KERN_INFO "atmel_ts_probe(): init err: %x\n", data[1]); + goto err_detect_failed; + } else { + for (loop_i = 0; loop_i < 10; loop_i++) { + if (gpio_get_value(intr)) { + printk(KERN_INFO "Touch: No more message\n"); + break; + } + ret = i2c_transfer(client->adapter, msg, 1); + printk(KERN_INFO + "Touch: 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + msleep(10); + } + } + + /* Read the info block data. */ + ts->id = kzalloc(sizeof(struct info_id_t), GFP_KERNEL); + if (ts->id == NULL) { + printk(KERN_ERR"%s: allocate info_id_t failed\n", __func__); + goto err_alloc_failed; + } + ret = i2c_atmel_read(client, 0x00, data, 7); + + ts->id->family_id = data[INFO_BLK_FID]; + ts->id->variant_id = data[INFO_BLK_VID]; + if (ts->id->family_id == 0x80 && ts->id->variant_id == 0x10) + ts->id->version = data[INFO_BLK_VER] + 6; + else + ts->id->version = data[INFO_BLK_VER]; + ts->id->build = data[INFO_BLK_BUILD]; + ts->id->matrix_x_size = data[INFO_BLK_XSIZE]; + ts->id->matrix_y_size = data[INFO_BLK_YSIZE]; + ts->id->num_declared_objects = data[INFO_BLK_OBJS]; + + printk(KERN_INFO + "info block: 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X\n", + ts->id->family_id, ts->id->variant_id, + ts->id->version, ts->id->build, + ts->id->matrix_x_size, ts->id->matrix_y_size, + ts->id->num_declared_objects); + + /* + Begin ffolkes multitouch selection + Purpose: Some hardware can't support more than 3 multitouch points, so revert back to 3 if we find a 004F touch sensor + BEGIN + */ + if(ts->id->family_id == 0x4F && pdata->config_T9[14] != 3) { + printk(KERN_INFO "%d point multitouch disabled due to possible hardware conflict, reverting to 3 point\n", pdata->config_T9[14]); + pdata->config_T9[14] = 3; + } else { + printk(KERN_INFO "%d point multitouch enabled\n", pdata->config_T9[14]); + } + /* + END + */ + + /* Read object table. */ + ret = read_object_table(ts); + if (ret < 0) + goto err_alloc_failed; + + if (pdata) { + while (pdata->version > ts->id->version) + pdata++; + if (pdata->source) { + i2c_atmel_write_byte_data(client, + get_object_address(ts, SPT_GPIOPWM_T19) + T19_CFG_CTRL, + T19_CFG_CTRL_ENABLE | + T19_CFG_CTRL_RPTEN | + T19_CFG_CTRL_FORCERPT); + for (loop_i = 0; loop_i < 10; loop_i++) { + if (!gpio_get_value(intr)) + break; + msleep(10); + } + if (loop_i == 10) + printk(KERN_ERR "No Messages when check source\n"); + for (loop_i = 0; loop_i < 100; loop_i++) { + i2c_atmel_read(ts->client, get_object_address(ts, + GEN_MESSAGEPROCESSOR_T5), data, 2); + if (data[MSG_RID] == get_rid(ts, SPT_GPIOPWM_T19)) { + while ((data[T19_MSG_STATUS] >> 3) != pdata->source) + pdata++; + break; + } + } + } + + ts->finger_support = pdata->config_T9[T9_CFG_NUMTOUCH]; + printk(KERN_INFO + "finger_type: %d, max finger: %d\n", + ts->finger_type, ts->finger_support); + + /* infoamtion block CRC check */ + if (pdata->object_crc[0]) { + ret = i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_CALIBRATE, 0x55); + for (loop_i = 0; loop_i < 10; loop_i++) { + if (!gpio_get_value(intr)) { + ret = i2c_atmel_read(ts->client, get_object_address(ts, + GEN_MESSAGEPROCESSOR_T5), data, 5); + if (data[MSG_RID] == get_rid(ts, GEN_COMMANDPROCESSOR_T6)) + break; + } + msleep(10); + } + if (loop_i == 10) + printk(KERN_INFO "Touch: No checksum read\n"); + else { + for (loop_i = 0; loop_i < 3; loop_i++) { + if (pdata->object_crc[loop_i] != + data[T6_MSG_CHECKSUM + loop_i]) { + printk(KERN_ERR + "CRC Error: %x, %x\n", + pdata->object_crc[loop_i], + data[T6_MSG_CHECKSUM + loop_i]); + break; + } + } + if (loop_i == 3) { + printk(KERN_INFO "CRC passed: "); + for (loop_i = 0; loop_i < 3; loop_i++) + printk("0x%2.2X ", pdata->object_crc[loop_i]); + printk("\n"); + CRC_check = 1; + } + } + } + ts->abs_x_min = pdata->abs_x_min; + ts->abs_x_max = pdata->abs_x_max; + ts->abs_y_min = pdata->abs_y_min; + ts->abs_y_max = pdata->abs_y_max; + ts->abs_pressure_min = pdata->abs_pressure_min; + ts->abs_pressure_max = pdata->abs_pressure_max; + ts->abs_width_min = pdata->abs_width_min; + ts->abs_width_max = pdata->abs_width_max; + ts->GCAF_level = pdata->GCAF_level; + if (ts->id->version >= 0x20) + ts->ATCH_EXT = &pdata->config_T8[6]; + ts->filter_level = pdata->filter_level; + + if (usb_get_connect_type()) + ts->status = CONNECTED; + + ts->config_setting[NONE].config_T7 + = ts->config_setting[CONNECTED].config_T7 + = pdata->config_T7; + ts->config_setting[NONE].config_T8 + = ts->config_setting[CONNECTED].config_T8 + = pdata->config_T8; + ts->config_setting[NONE].config_T9 = pdata->config_T9; + ts->config_setting[NONE].config_T22 = pdata->config_T22; + ts->config_setting[NONE].config_T28 = pdata->config_T28; + + if (pdata->noise_config[0]) + for (loop_i = 0; loop_i < 3; loop_i++) + ts->noise_config[loop_i] = pdata->noise_config[loop_i]; + + if (pdata->cable_config[0]) { + ts->config_setting[NONE].config[CB_TCHTHR] = + pdata->config_T9[T9_CFG_TCHTHR]; + ts->config_setting[NONE].config[CB_NOISETHR] = + pdata->config_T22[T22_CFG_NOISETHR]; + ts->config_setting[NONE].config[CB_IDLEGCAFDEPTH] = + pdata->config_T28[T28_CFG_IDLEGCAFDEPTH]; + ts->config_setting[NONE].config[CB_ACTVGCAFDEPTH] = + pdata->config_T28[T28_CFG_ACTVGCAFDEPTH]; + for (loop_i = 0; loop_i < 4; loop_i++) + ts->config_setting[CONNECTED].config[loop_i] = + pdata->cable_config[loop_i]; + ts->GCAF_sample = + ts->config_setting[CONNECTED].config[CB_ACTVGCAFDEPTH]; + if (ts->id->version >= 0x20) + ts->noisethr = pdata->cable_config[CB_TCHTHR] - + pdata->config_T9[T9_CFG_TCHHYST]; + else + ts->noisethr = pdata->cable_config[CB_TCHTHR]; + ts->noisethr_config = + ts->config_setting[CONNECTED].config[CB_NOISETHR]; + } else { + if (pdata->cable_config_T7[0]) + ts->config_setting[CONNECTED].config_T7 = + pdata->cable_config_T7; + if (pdata->cable_config_T8[0]) + ts->config_setting[CONNECTED].config_T8 = + pdata->cable_config_T8; + if (pdata->cable_config_T9[0]) { + ts->config_setting[CONNECTED].config_T9 = + pdata->cable_config_T9; + ts->config_setting[CONNECTED].config_T22 = + pdata->cable_config_T22; + ts->config_setting[CONNECTED].config_T28 = + pdata->cable_config_T28; + ts->GCAF_sample = + ts->config_setting[CONNECTED].config_T28[T28_CFG_ACTVGCAFDEPTH]; + } + if (ts->status == CONNECTED) + ts->noisethr = (ts->id->version >= 0x20) ? + pdata->cable_config_T9[T9_CFG_TCHTHR] - pdata->cable_config_T9[T9_CFG_TCHHYST] : + pdata->cable_config_T9[T9_CFG_TCHTHR]; + else + ts->noisethr = (ts->id->version >= 0x20) ? + pdata->config_T9[T9_CFG_TCHTHR] - pdata->config_T9[T9_CFG_TCHHYST] : + pdata->config_T9[T9_CFG_TCHTHR]; + ts->noisethr_config = pdata->cable_config_T22[T22_CFG_NOISETHR]; + + } + + if (!CRC_check) { + printk(KERN_INFO "Touch: Config reload\n"); + + i2c_atmel_write(ts->client, get_object_address(ts, SPT_CTECONFIG_T28), + pdata->config_T28, get_object_size(ts, SPT_CTECONFIG_T28)); + + ret = i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_BACKUPNV, 0x55); + msleep(10); + + ret = i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_RESET, 0x11); + msleep(100); + + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6), + pdata->config_T6, + get_object_size(ts, GEN_COMMANDPROCESSOR_T6)); + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7), + pdata->config_T7, + get_object_size(ts, GEN_POWERCONFIG_T7)); + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8), + pdata->config_T8, + get_object_size(ts, GEN_ACQUISITIONCONFIG_T8)); + i2c_atmel_write(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9), + pdata->config_T9, + get_object_size(ts, TOUCH_MULTITOUCHSCREEN_T9)); + i2c_atmel_write(ts->client, + get_object_address(ts, TOUCH_KEYARRAY_T15), + pdata->config_T15, + get_object_size(ts, TOUCH_KEYARRAY_T15)); + i2c_atmel_write(ts->client, + get_object_address(ts, SPT_GPIOPWM_T19), + pdata->config_T19, + get_object_size(ts, SPT_GPIOPWM_T19)); + i2c_atmel_write(ts->client, + get_object_address(ts, PROCI_GRIPFACESUPPRESSION_T20), + pdata->config_T20, + get_object_size(ts, PROCI_GRIPFACESUPPRESSION_T20)); + i2c_atmel_write(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22), + pdata->config_T22, + get_object_size(ts, PROCG_NOISESUPPRESSION_T22)); + i2c_atmel_write(ts->client, + get_object_address(ts, TOUCH_PROXIMITY_T23), + pdata->config_T23, + get_object_size(ts, TOUCH_PROXIMITY_T23)); + i2c_atmel_write(ts->client, + get_object_address(ts, PROCI_ONETOUCHGESTUREPROCESSOR_T24), + pdata->config_T24, + get_object_size(ts, PROCI_ONETOUCHGESTUREPROCESSOR_T24)); + i2c_atmel_write(ts->client, + get_object_address(ts, SPT_SELFTEST_T25), + pdata->config_T25, + get_object_size(ts, SPT_SELFTEST_T25)); + i2c_atmel_write(ts->client, + get_object_address(ts, PROCI_TWOTOUCHGESTUREPROCESSOR_T27), + pdata->config_T27, + get_object_size(ts, PROCI_TWOTOUCHGESTUREPROCESSOR_T27)); + i2c_atmel_write(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28), + pdata->config_T28, + get_object_size(ts, SPT_CTECONFIG_T28)); + + ret = i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_BACKUPNV, 0x55); + + for (loop_i = 0; loop_i < 10; loop_i++) { + if (!gpio_get_value(intr)) + break; + printk(KERN_INFO "Touch: wait for Message(%d)\n", loop_i + 1); + msleep(10); + } + + i2c_atmel_read(client, + get_object_address(ts, GEN_MESSAGEPROCESSOR_T5), data, 7); + printk(KERN_INFO + "Touch: 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X 0x%2.2X\n", + data[0], data[1], data[2], data[3], data[4], data[5], data[6]); + + ret = i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_RESET, 0x11); + msleep(100); + + } + + if (ts->status == CONNECTED) { + printk(KERN_INFO "Touch: set cable config\n"); + if (ts->config_setting[CONNECTED].config[0]) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + + T9_CFG_TCHTHR, + ts->config_setting[CONNECTED].config[CB_TCHTHR]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28) + + T28_CFG_IDLEGCAFDEPTH, + ts->config_setting[CONNECTED].config[CB_IDLEGCAFDEPTH]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28) + + T28_CFG_ACTVGCAFDEPTH, + ts->config_setting[CONNECTED].config[CB_ACTVGCAFDEPTH]); + } else { + if (ts->config_setting[CONNECTED].config_T7 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7), + ts->config_setting[CONNECTED].config_T7, + get_object_size(ts, GEN_POWERCONFIG_T7)); + if (ts->config_setting[CONNECTED].config_T8 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8), + ts->config_setting[CONNECTED].config_T8, + get_object_size(ts, GEN_ACQUISITIONCONFIG_T8)); + if (ts->config_setting[CONNECTED].config_T9 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9), + ts->config_setting[CONNECTED].config_T9, + get_object_size(ts, TOUCH_MULTITOUCHSCREEN_T9)); + if (ts->config_setting[CONNECTED].config_T22 != NULL) + i2c_atmel_write(ts->client, + get_object_address(ts, PROCG_NOISESUPPRESSION_T22), + ts->config_setting[CONNECTED].config_T22, + get_object_size(ts, PROCG_NOISESUPPRESSION_T22)); + if (ts->config_setting[CONNECTED].config_T28 != NULL) { + i2c_atmel_write(ts->client, + get_object_address(ts, SPT_CTECONFIG_T28), + ts->config_setting[CONNECTED].config_T28, + get_object_size(ts, SPT_CTECONFIG_T28)); + } + } + } + if (ts->id->version >= 0x20) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + T9_CFG_TCHTHR, + ts->config_setting[ts->status].config[0] + 10); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + T8_CFG_ATCHCALSTHR, + ts->config_setting[ts->status].config[0] + 5); + } + } + ts->input_dev = input_allocate_device(); + if (ts->input_dev == NULL) { + ret = -ENOMEM; + dev_err(&client->dev, "Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + ts->input_dev->name = "atmel-touchscreen"; + set_bit(EV_SYN, ts->input_dev->evbit); + set_bit(EV_KEY, ts->input_dev->keybit); + set_bit(BTN_TOUCH, ts->input_dev->keybit); + set_bit(BTN_2, ts->input_dev->keybit); + set_bit(EV_ABS, ts->input_dev->evbit); + + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, + ts->abs_x_min, ts->abs_x_max, 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, + ts->abs_y_min, ts->abs_y_max, 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 30, 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, 0, 255, 0, 0); +#ifndef CONFIG_TOUCHSCREEN_COMPATIBLE_REPORT + input_set_abs_params(ts->input_dev, ABS_MT_AMPLITUDE, + 0, ((ts->abs_pressure_max << 16) | ts->abs_width_max), 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION, + 0, (BIT(31) | (ts->abs_x_max << 16) | ts->abs_y_max), 0, 0); + input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, 0, 255, 0, 0); +#endif + + + ret = input_register_device(ts->input_dev); + if (ret) { + dev_err(&client->dev, + "atmel_ts_probe: Unable to register %s input device\n", + ts->input_dev->name); + goto err_input_register_device_failed; + } + + ret = request_irq(client->irq, atmel_ts_irq_handler, IRQF_TRIGGER_LOW, + client->name, ts); + if (ret) + dev_err(&client->dev, "request_irq failed\n"); + +#ifdef CONFIG_HAS_EARLYSUSPEND + ts->early_suspend.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING - 1; + ts->early_suspend.suspend = atmel_ts_early_suspend; + ts->early_suspend.resume = atmel_ts_late_resume; + register_early_suspend(&ts->early_suspend); +#endif + + private_ts = ts; +#ifdef ATMEL_EN_SYSFS + atmel_touch_sysfs_init(); +#endif + + dev_info(&client->dev, "Start touchscreen %s in interrupt mode\n", + ts->input_dev->name); + + usb_register_notifier(&cable_status_handler); + + return 0; + +err_input_register_device_failed: + input_free_device(ts->input_dev); + +err_input_dev_alloc_failed: +err_alloc_failed: +err_detect_failed: +err_power_failed: + destroy_workqueue(ts->atmel_wq); + +err_cread_wq_failed: + kfree(ts); + +err_alloc_data_failed: +err_check_functionality_failed: + + return ret; +} + +static int atmel_ts_remove(struct i2c_client *client) +{ + struct atmel_ts_data *ts = i2c_get_clientdata(client); + +#ifdef ATMEL_EN_SYSFS + atmel_touch_sysfs_deinit(); +#endif + + unregister_early_suspend(&ts->early_suspend); + free_irq(client->irq, ts); + + destroy_workqueue(ts->atmel_wq); + input_unregister_device(ts->input_dev); + kfree(ts); + + return 0; +} + +static int atmel_ts_suspend(struct i2c_client *client, pm_message_t mesg) +{ + int ret; + struct atmel_ts_data *ts = i2c_get_clientdata(client); + + printk(KERN_INFO "%s: enter\n", __func__); + + disable_irq(client->irq); + + ret = cancel_work_sync(&ts->work); + if (ret) + enable_irq(client->irq); + + ts->finger_pressed = 0; + ts->finger_count = 0; + ts->first_pressed = 0; + + if (ts->id->version >= 0x20) { + ts->pre_data[0] = 0; + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + T8_CFG_ATCHCALST, + ts->ATCH_EXT, 4); + } + + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_POWERCONFIG_T7) + T7_CFG_IDLEACQINT, 0x0); + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_POWERCONFIG_T7) + T7_CFG_ACTVACQINT, 0x0); + + return 0; +} + +static int atmel_ts_resume(struct i2c_client *client) +{ + struct atmel_ts_data *ts = i2c_get_clientdata(client); + + if (ts->id->version >= 0x20) { + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, TOUCH_MULTITOUCHSCREEN_T9) + T9_CFG_TCHTHR, + ts->config_setting[ts->status].config[0] + 10); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + T8_CFG_ATCHCALSTHR, + ts->config_setting[ts->status].config[0] + 5); + } + + i2c_atmel_write(ts->client, + get_object_address(ts, GEN_POWERCONFIG_T7), + ts->config_setting[ts->status].config_T7, + get_object_size(ts, GEN_POWERCONFIG_T7)); + if (ts->id->version == 0x16) { + if (ts->config_setting[CONNECTED].config[0] && ts->status && + !check_delta(ts)) { + ts->calibration_confirm = 2; + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALST, + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALST]); + i2c_atmel_write_byte_data(ts->client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALSTHR, + ts->config_setting[ts->status].config_T8[T8_CFG_ATCHCALSTHR]); + } else { + ts->calibration_confirm = 0; + msleep(1); + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_CALIBRATE, 0x55); + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALST, 0x0); + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_ACQUISITIONCONFIG_T8) + + T8_CFG_ATCHCALSTHR, 0x0); + } + } else { + msleep(1); + i2c_atmel_write_byte_data(client, + get_object_address(ts, GEN_COMMANDPROCESSOR_T6) + + T6_CFG_CALIBRATE, 0x55); + } + enable_irq(client->irq); + + return 0; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void atmel_ts_early_suspend(struct early_suspend *h) +{ + struct atmel_ts_data *ts; + ts = container_of(h, struct atmel_ts_data, early_suspend); + atmel_ts_suspend(ts->client, PMSG_SUSPEND); +} + +static void atmel_ts_late_resume(struct early_suspend *h) +{ + struct atmel_ts_data *ts; + ts = container_of(h, struct atmel_ts_data, early_suspend); + atmel_ts_resume(ts->client); +} +#endif + +static const struct i2c_device_id atml_ts_i2c_id[] = { + { ATMEL_QT602240_NAME, 0 }, + { } +}; + +static struct i2c_driver atmel_ts_driver = { + .id_table = atml_ts_i2c_id, + .probe = atmel_ts_probe, + .remove = atmel_ts_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .suspend = atmel_ts_suspend, + .resume = atmel_ts_resume, +#endif + .driver = { + .name = ATMEL_QT602240_NAME, + }, +}; + +static int __devinit atmel_ts_init(void) +{ + printk(KERN_INFO "atmel_ts_init():\n"); + return i2c_add_driver(&atmel_ts_driver); +} + +static void __exit atmel_ts_exit(void) +{ + i2c_del_driver(&atmel_ts_driver); +} + +module_init(atmel_ts_init); +module_exit(atmel_ts_exit); + +MODULE_DESCRIPTION("ATMEL Touch driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/input/touchscreen/cyttsp-i2c.c b/drivers/input/touchscreen/cyttsp-i2c.c new file mode 100644 index 0000000000000..b5908e4cd9f5a --- /dev/null +++ b/drivers/input/touchscreen/cyttsp-i2c.c @@ -0,0 +1,2136 @@ +/* Source for: + * Cypress TrueTouch(TM) Standard Product I2C touchscreen driver. + * drivers/input/touchscreen/cyttsp-i2c.c + * + * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Cypress reserves the right to make changes without further notice + * to the materials described herein. Cypress does not assume any + * liability arising out of the application described herein. + * + * Contact Cypress Semiconductor at www.cypress.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif /* CONFIG_HAS_EARLYSUSPEND */ + +#define CY_DECLARE_GLOBALS + +#include + +uint32_t cyttsp_tsdebug1 = 0xff; +module_param_named(tsdebug1, cyttsp_tsdebug1, uint, 0664); + +/* CY TTSP I2C Driver private data */ +struct cyttsp { + struct i2c_client *client; + struct input_dev *input; + struct work_struct work; + struct timer_list timer; + struct mutex mutex; + char phys[32]; + struct cyttsp_platform_data *platform_data; + u8 num_prv_st_tch; + u16 act_trk[CY_NUM_TRK_ID]; + u16 prv_st_tch[CY_NUM_ST_TCH_ID]; + u16 prv_mt_tch[CY_NUM_MT_TCH_ID]; + u16 prv_mt_pos[CY_NUM_TRK_ID][2]; + atomic_t irq_enabled; + char cyttsp_fw_ver[10]; + bool cyttsp_update_fw; +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND */ +}; +static u8 irq_cnt; /* comparison counter with register valuw */ +static u32 irq_cnt_total; /* total interrupts */ +static u32 irq_err_cnt; /* count number of touch interrupts with err */ +#define CY_IRQ_CNT_MASK 0x000000FF /* mapped for sizeof count in reg */ +#define CY_IRQ_CNT_REG 0x00 /* tt_undef[0]=reg 0x1B - Gen3 only */ + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void cyttsp_early_suspend(struct early_suspend *handler); +static void cyttsp_late_resume(struct early_suspend *handler); +#endif /* CONFIG_HAS_EARLYSUSPEND */ + +static struct workqueue_struct *cyttsp_ts_wq; + + +/* **************************************************************************** + * Prototypes for static functions + * ************************************************************************** */ +static void cyttsp_xy_worker(struct work_struct *work); +static irqreturn_t cyttsp_irq(int irq, void *handle); +static int cyttsp_inlist(u16 prev_track[], + u8 cur_trk_id, u8 *prev_loc, u8 num_touches); +static int cyttsp_next_avail_inlist(u16 cur_trk[], + u8 *new_loc, u8 num_touches); +static int cyttsp_putbl(struct cyttsp *ts, int show, + int show_status, int show_version, int show_cid); +static int __devinit cyttsp_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int __devexit cyttsp_remove(struct i2c_client *client); +static int cyttsp_resume(struct device *dev); +static int cyttsp_suspend(struct device *dev); + +/* Static variables */ +static struct cyttsp_gen3_xydata_t g_xy_data; +static struct cyttsp_bootloader_data_t g_bl_data; +static struct cyttsp_sysinfo_data_t g_sysinfo_data; +static const struct i2c_device_id cyttsp_id[] = { + { CY_I2C_NAME, 0 }, { } +}; +static u8 bl_cmd[] = { + CY_BL_FILE0, CY_BL_CMD, CY_BL_EXIT, + CY_BL_KEY0, CY_BL_KEY1, CY_BL_KEY2, + CY_BL_KEY3, CY_BL_KEY4, CY_BL_KEY5, + CY_BL_KEY6, CY_BL_KEY7}; + +MODULE_DEVICE_TABLE(i2c, cyttsp_id); + +static const struct dev_pm_ops cyttsp_pm_ops = { + .suspend = cyttsp_suspend, + .resume = cyttsp_resume, +}; + +static struct i2c_driver cyttsp_driver = { + .driver = { + .name = CY_I2C_NAME, + .owner = THIS_MODULE, + .pm = &cyttsp_pm_ops, + }, + .probe = cyttsp_probe, + .remove = __devexit_p(cyttsp_remove), + .id_table = cyttsp_id, +}; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver"); +MODULE_AUTHOR("Cypress"); + +static ssize_t cyttsp_irq_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct cyttsp *ts = i2c_get_clientdata(client); + return sprintf(buf, "%u\n", atomic_read(&ts->irq_enabled)); +} + +static ssize_t cyttsp_irq_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + struct cyttsp *ts = i2c_get_clientdata(client); + int err = 0; + unsigned long value; + + if (size > 2) + return -EINVAL; + + err = strict_strtoul(buf, 10, &value); + if (err != 0) + return err; + + switch (value) { + case 0: + if (atomic_cmpxchg(&ts->irq_enabled, 1, 0)) { + pr_info("touch irq disabled!\n"); + disable_irq_nosync(ts->client->irq); + } + err = size; + break; + case 1: + if (!atomic_cmpxchg(&ts->irq_enabled, 0, 1)) { + pr_info("touch irq enabled!\n"); + enable_irq(ts->client->irq); + } + err = size; + break; + default: + pr_info("cyttsp_irq_enable failed -> irq_enabled = %d\n", + atomic_read(&ts->irq_enabled)); + err = -EINVAL; + break; + } + + return err; +} + +static DEVICE_ATTR(irq_enable, 0777, cyttsp_irq_status, cyttsp_irq_enable); + +static ssize_t cyttsp_fw_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cyttsp *ts = dev_get_drvdata(dev); + return sprintf(buf, "%s\n", ts->cyttsp_fw_ver); +} + +static DEVICE_ATTR(cyttsp_fw_ver, 0777, cyttsp_fw_show, NULL); + +static ssize_t cyttsp_update_fw_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cyttsp *ts = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", ts->cyttsp_update_fw); +} + +static ssize_t cyttsp_update_fw_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct cyttsp *ts = dev_get_drvdata(dev); + const struct firmware *cyttsp_fw = NULL; + unsigned long val; + int i, data_len, rc; + const u8 *data = NULL; + + if (size > 2) + return -EINVAL; + + rc = strict_strtoul(buf, 10, &val); + if (rc != 0) + return rc; + + if ((ts->cyttsp_update_fw ^ val) && val) { + ts->cyttsp_update_fw = 1; + /* read fw file */ + if (request_firmware(&cyttsp_fw, "ttsp.fw", dev)) + pr_err("%s: ttsp.fw request failed\n", __func__); + else { + data = cyttsp_fw->data; + data_len = cyttsp_fw->size; + for (i = 0; i < data_len; i++) + pr_debug("%x ", data[i]); + } + ts->cyttsp_update_fw = 0; + } + return size; +} + +static DEVICE_ATTR(cyttsp_update_fw, 0777, cyttsp_update_fw_show, + cyttsp_update_fw_store); + +/* The cyttsp_xy_worker function reads the XY coordinates and sends them to + * the input layer. It is scheduled from the interrupt (or timer). + */ +void cyttsp_xy_worker(struct work_struct *work) +{ + struct cyttsp *ts = container_of(work, struct cyttsp, work); + u8 id, tilt, rev_x, rev_y; + u8 i, loc; + u8 prv_tch; /* number of previous touches */ + u8 cur_tch; /* number of current touches */ + u16 tmp_trk[CY_NUM_MT_TCH_ID]; + u16 snd_trk[CY_NUM_MT_TCH_ID]; + u16 cur_trk[CY_NUM_TRK_ID]; + u16 cur_st_tch[CY_NUM_ST_TCH_ID]; + u16 cur_mt_tch[CY_NUM_MT_TCH_ID]; + /* if NOT CY_USE_TRACKING_ID then + * only uses CY_NUM_MT_TCH_ID positions */ + u16 cur_mt_pos[CY_NUM_TRK_ID][2]; + /* if NOT CY_USE_TRACKING_ID then + * only uses CY_NUM_MT_TCH_ID positions */ + u8 cur_mt_z[CY_NUM_TRK_ID]; + u8 curr_tool_width; + u16 st_x1, st_y1; + u8 st_z1; + u16 st_x2, st_y2; + u8 st_z2; + s32 retval; + + cyttsp_xdebug("TTSP worker start 1:\n"); + + /* get event data from CYTTSP device */ + i = CY_NUM_RETRY; + do { + retval = i2c_smbus_read_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(struct cyttsp_gen3_xydata_t), (u8 *)&g_xy_data); + } while ((retval < CY_OK) && --i); + + if (retval < CY_OK) { + /* return immediately on + * failure to read device on the i2c bus */ + goto exit_xy_worker; + } + + cyttsp_xdebug("TTSP worker start 2:\n"); + + /* compare own irq counter with the device irq counter */ + if (ts->client->irq) { + u8 host_reg; + u8 cur_cnt; + if (ts->platform_data->use_hndshk) { + + host_reg = g_xy_data.hst_mode & CY_HNDSHK_BIT ? + g_xy_data.hst_mode & ~CY_HNDSHK_BIT : + g_xy_data.hst_mode | CY_HNDSHK_BIT; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, sizeof(host_reg), &host_reg); + } + cur_cnt = g_xy_data.tt_undef[CY_IRQ_CNT_REG]; + irq_cnt_total++; + irq_cnt++; + if (irq_cnt != cur_cnt) { + irq_err_cnt++; + cyttsp_debug("i_c_ER: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \ + irq_cnt, \ + cur_cnt, g_xy_data.hst_mode, \ + (unsigned long)irq_cnt_total, \ + (unsigned long)irq_err_cnt); + } else { + cyttsp_debug("i_c_ok: dv=%d fw=%d hm=%02X t=%lu te=%lu\n", \ + irq_cnt, \ + cur_cnt, g_xy_data.hst_mode, \ + (unsigned long)irq_cnt_total, \ + (unsigned long)irq_err_cnt); + } + irq_cnt = cur_cnt; + } + + /* Get the current num touches and return if there are no touches */ + if ((GET_BOOTLOADERMODE(g_xy_data.tt_mode) == 1) || + (GET_HSTMODE(g_xy_data.hst_mode) != CY_OK)) { + u8 host_reg, tries; + /* the TTSP device has suffered spurious reset or mode switch */ + cyttsp_debug( \ + "Spurious err opmode (tt_mode=%02X hst_mode=%02X)\n", \ + g_xy_data.tt_mode, g_xy_data.hst_mode); + cyttsp_debug("Reset TTSP Device; Terminating active tracks\n"); + /* terminate all active tracks */ + cur_tch = CY_NTCH; + /* reset TTSP part and take it back out of Bootloader mode */ + /* reset TTSP Device back to bootloader mode */ + host_reg = CY_SOFT_RESET_MODE; + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete reset back to bootloader */ + tries = 0; + do { + mdelay(1); + cyttsp_putbl(ts, 1, false, false, false); + } while (g_bl_data.bl_status != 0x10 && + g_bl_data.bl_status != 0x11 && + tries++ < 100); + retval = cyttsp_putbl(ts, 1, true, true, true); + /* switch back to operational mode */ + /* take TTSP device out of bootloader mode; + * switch back to TrueTouch operational mode */ + if (!(retval < CY_OK)) { + int tries; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(bl_cmd), bl_cmd); + /* wait for TTSP Device to complete + * switch to Operational mode */ + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 2, false, false, false); + } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && + tries++ < 100); + cyttsp_putbl(ts, 2, true, false, false); + } + goto exit_xy_worker; + } else { + cur_tch = GET_NUM_TOUCHES(g_xy_data.tt_stat); + if (IS_LARGE_AREA(g_xy_data.tt_stat)) { + /* terminate all active tracks */ + cur_tch = CY_NTCH; + cyttsp_debug("Large obj detect (tt_stat=0x%02X). Terminate act trks\n", \ + g_xy_data.tt_stat); + } else if (cur_tch > CY_NUM_MT_TCH_ID) { + /* if the number of fingers on the touch surface + * is more than the maximum then + * there will be no new track information + * even for the original touches. + * Therefore, terminate all active tracks. + */ + cur_tch = CY_NTCH; + cyttsp_debug("Num touch err (tt_stat=0x%02X). Terminate act trks\n", \ + g_xy_data.tt_stat); + } + } + + /* set tool size */ + curr_tool_width = CY_SMALL_TOOL_WIDTH; + + /* translate Gen2 interface data into comparable Gen3 data */ + if (ts->platform_data->gen == CY_GEN2) { + struct cyttsp_gen2_xydata_t *pxy_gen2_data; + pxy_gen2_data = (struct cyttsp_gen2_xydata_t *)(&g_xy_data); + + /* use test data? */ + cyttsp_testdat(&g_xy_data, &tt_gen2_testray, \ + sizeof(struct cyttsp_gen3_xydata_t)); + + if (pxy_gen2_data->evnt_idx == CY_GEN2_NOTOUCH) { + cur_tch = 0; + } else if (cur_tch == CY_GEN2_GHOST) { + cur_tch = 0; + } else if (cur_tch == CY_GEN2_2TOUCH) { + /* stuff artificial track ID1 and ID2 */ + g_xy_data.touch12_id = 0x12; + g_xy_data.z1 = CY_MAXZ; + g_xy_data.z2 = CY_MAXZ; + cur_tch--; /* 2 touches */ + } else if (cur_tch == CY_GEN2_1TOUCH) { + /* stuff artificial track ID1 and ID2 */ + g_xy_data.touch12_id = 0x12; + g_xy_data.z1 = CY_MAXZ; + g_xy_data.z2 = CY_NTCH; + if (pxy_gen2_data->evnt_idx == CY_GEN2_TOUCH2) { + /* push touch 2 data into touch1 + * (first finger up; second finger down) */ + /* stuff artificial track ID1 for touch2 info */ + g_xy_data.touch12_id = 0x20; + /* stuff touch 1 with touch 2 coordinate data */ + g_xy_data.x1 = g_xy_data.x2; + g_xy_data.y1 = g_xy_data.y2; + } + } else { + cur_tch = 0; + } + } else { + /* use test data? */ + cyttsp_testdat(&g_xy_data, &tt_gen3_testray, \ + sizeof(struct cyttsp_gen3_xydata_t)); + } + + + + /* clear current active track ID array and count previous touches */ + for (id = 0, prv_tch = CY_NTCH; + id < CY_NUM_TRK_ID; id++) { + cur_trk[id] = CY_NTCH; + prv_tch += ts->act_trk[id]; + } + + /* send no events if no previous touches and no new touches */ + if ((prv_tch == CY_NTCH) && + ((cur_tch == CY_NTCH) || + (cur_tch > CY_NUM_MT_TCH_ID))) { + goto exit_xy_worker; + } + + cyttsp_debug("prev=%d curr=%d\n", prv_tch, cur_tch); + + for (id = 0; id < CY_NUM_ST_TCH_ID; id++) { + /* clear current single touches array */ + cur_st_tch[id] = CY_IGNR_TCH; + } + + /* clear single touch positions */ + st_x1 = CY_NTCH; + st_y1 = CY_NTCH; + st_z1 = CY_NTCH; + st_x2 = CY_NTCH; + st_y2 = CY_NTCH; + st_z2 = CY_NTCH; + + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + /* clear current multi-touches array and + * multi-touch positions/z */ + cur_mt_tch[id] = CY_IGNR_TCH; + } + + if (ts->platform_data->use_trk_id) { + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + cur_mt_pos[id][CY_XPOS] = 0; + cur_mt_pos[id][CY_YPOS] = 0; + cur_mt_z[id] = 0; + } + } else { + for (id = 0; id < CY_NUM_TRK_ID; id++) { + cur_mt_pos[id][CY_XPOS] = 0; + cur_mt_pos[id][CY_YPOS] = 0; + cur_mt_z[id] = 0; + } + } + + /* Determine if display is tilted */ + if (FLIP_DATA(ts->platform_data->flags)) + tilt = true; + else + tilt = false; + + /* Check for switch in origin */ + if (REVERSE_X(ts->platform_data->flags)) + rev_x = true; + else + rev_x = false; + + if (REVERSE_Y(ts->platform_data->flags)) + rev_y = true; + else + rev_y = false; + + if (cur_tch) { + struct cyttsp_gen2_xydata_t *pxy_gen2_data; + struct cyttsp_gen3_xydata_t *pxy_gen3_data; + switch (ts->platform_data->gen) { + case CY_GEN2: { + pxy_gen2_data = + (struct cyttsp_gen2_xydata_t *)(&g_xy_data); + cyttsp_xdebug("TTSP Gen2 report:\n"); + cyttsp_xdebug("%02X %02X %02X\n", \ + pxy_gen2_data->hst_mode, \ + pxy_gen2_data->tt_mode, \ + pxy_gen2_data->tt_stat); + cyttsp_xdebug("%04X %04X %02X %02X\n", \ + pxy_gen2_data->x1, \ + pxy_gen2_data->y1, \ + pxy_gen2_data->z1, \ + pxy_gen2_data->evnt_idx); + cyttsp_xdebug("%04X %04X %02X\n", \ + pxy_gen2_data->x2, \ + pxy_gen2_data->y2, \ + pxy_gen2_data->tt_undef1); + cyttsp_xdebug("%02X %02X %02X\n", \ + pxy_gen2_data->gest_cnt, \ + pxy_gen2_data->gest_id, \ + pxy_gen2_data->gest_set); + break; + } + case CY_GEN3: + default: { + pxy_gen3_data = + (struct cyttsp_gen3_xydata_t *)(&g_xy_data); + cyttsp_xdebug("TTSP Gen3 report:\n"); + cyttsp_xdebug("%02X %02X %02X\n", \ + pxy_gen3_data->hst_mode, + pxy_gen3_data->tt_mode, + pxy_gen3_data->tt_stat); + cyttsp_xdebug("%04X %04X %02X %02X", \ + pxy_gen3_data->x1, + pxy_gen3_data->y1, + pxy_gen3_data->z1, \ + pxy_gen3_data->touch12_id); + cyttsp_xdebug("%04X %04X %02X\n", \ + pxy_gen3_data->x2, \ + pxy_gen3_data->y2, \ + pxy_gen3_data->z2); + cyttsp_xdebug("%02X %02X %02X\n", \ + pxy_gen3_data->gest_cnt, \ + pxy_gen3_data->gest_id, \ + pxy_gen3_data->gest_set); + cyttsp_xdebug("%04X %04X %02X %02X\n", \ + pxy_gen3_data->x3, \ + pxy_gen3_data->y3, \ + pxy_gen3_data->z3, \ + pxy_gen3_data->touch34_id); + cyttsp_xdebug("%04X %04X %02X\n", \ + pxy_gen3_data->x4, \ + pxy_gen3_data->y4, \ + pxy_gen3_data->z4); + break; + } + } + } + + /* process the touches */ + switch (cur_tch) { + case 4: { + g_xy_data.x4 = be16_to_cpu(g_xy_data.x4); + g_xy_data.y4 = be16_to_cpu(g_xy_data.y4); + if (tilt) + FLIP_XY(g_xy_data.x4, g_xy_data.y4); + + if (rev_x) { + g_xy_data.x4 = INVERT_X(g_xy_data.x4, + ts->platform_data->panel_maxx); + if (g_xy_data.x4 < 0) + pr_debug("X value is negative. Please configure" + " maxx in platform data structure\n"); + } + if (rev_y) { + g_xy_data.y4 = INVERT_X(g_xy_data.y4, + ts->platform_data->panel_maxy); + if (g_xy_data.y4 < 0) + pr_debug("Y value is negative. Please configure" + " maxy in platform data structure\n"); + + } + id = GET_TOUCH4_ID(g_xy_data.touch34_id); + if (ts->platform_data->use_trk_id) { + cur_mt_pos[CY_MT_TCH4_IDX][CY_XPOS] = + g_xy_data.x4; + cur_mt_pos[CY_MT_TCH4_IDX][CY_YPOS] = + g_xy_data.y4; + cur_mt_z[CY_MT_TCH4_IDX] = g_xy_data.z4; + } else { + cur_mt_pos[id][CY_XPOS] = g_xy_data.x4; + cur_mt_pos[id][CY_YPOS] = g_xy_data.y4; + cur_mt_z[id] = g_xy_data.z4; + } + cur_mt_tch[CY_MT_TCH4_IDX] = id; + cur_trk[id] = CY_TCH; + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < + CY_NUM_TRK_ID) { + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { + st_x1 = g_xy_data.x4; + st_y1 = g_xy_data.y4; + st_z1 = g_xy_data.z4; + cur_st_tch[CY_ST_FNGR1_IDX] = id; + } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { + st_x2 = g_xy_data.x4; + st_y2 = g_xy_data.y4; + st_z2 = g_xy_data.z4; + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + } + cyttsp_xdebug("4th XYZ:% 3d,% 3d,% 3d ID:% 2d\n\n", \ + g_xy_data.x4, g_xy_data.y4, g_xy_data.z4, \ + (g_xy_data.touch34_id & 0x0F)); + /* do not break */ + } + case 3: { + g_xy_data.x3 = be16_to_cpu(g_xy_data.x3); + g_xy_data.y3 = be16_to_cpu(g_xy_data.y3); + if (tilt) + FLIP_XY(g_xy_data.x3, g_xy_data.y3); + + if (rev_x) { + g_xy_data.x3 = INVERT_X(g_xy_data.x3, + ts->platform_data->panel_maxx); + if (g_xy_data.x3 < 0) + pr_debug("X value is negative. Please configure" + " maxx in platform data structure\n"); + + } + if (rev_y) { + g_xy_data.y3 = INVERT_X(g_xy_data.y3, + ts->platform_data->panel_maxy); + if (g_xy_data.y3 < 0) + pr_debug("Y value is negative. Please configure" + " maxy in platform data structure\n"); + + } + id = GET_TOUCH3_ID(g_xy_data.touch34_id); + if (ts->platform_data->use_trk_id) { + cur_mt_pos[CY_MT_TCH3_IDX][CY_XPOS] = + g_xy_data.x3; + cur_mt_pos[CY_MT_TCH3_IDX][CY_YPOS] = + g_xy_data.y3; + cur_mt_z[CY_MT_TCH3_IDX] = g_xy_data.z3; + } else { + cur_mt_pos[id][CY_XPOS] = g_xy_data.x3; + cur_mt_pos[id][CY_YPOS] = g_xy_data.y3; + cur_mt_z[id] = g_xy_data.z3; + } + cur_mt_tch[CY_MT_TCH3_IDX] = id; + cur_trk[id] = CY_TCH; + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < + CY_NUM_TRK_ID) { + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { + st_x1 = g_xy_data.x3; + st_y1 = g_xy_data.y3; + st_z1 = g_xy_data.z3; + cur_st_tch[CY_ST_FNGR1_IDX] = id; + } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { + st_x2 = g_xy_data.x3; + st_y2 = g_xy_data.y3; + st_z2 = g_xy_data.z3; + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + } + cyttsp_xdebug("3rd XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ + g_xy_data.x3, g_xy_data.y3, g_xy_data.z3, \ + ((g_xy_data.touch34_id >> 4) & 0x0F)); + /* do not break */ + } + case 2: { + g_xy_data.x2 = be16_to_cpu(g_xy_data.x2); + g_xy_data.y2 = be16_to_cpu(g_xy_data.y2); + if (tilt) + FLIP_XY(g_xy_data.x2, g_xy_data.y2); + + if (rev_x) { + g_xy_data.x2 = INVERT_X(g_xy_data.x2, + ts->platform_data->panel_maxx); + if (g_xy_data.x2 < 0) + pr_debug("X value is negative. Please configure" + " maxx in platform data structure\n"); + } + if (rev_y) { + g_xy_data.y2 = INVERT_X(g_xy_data.y2, + ts->platform_data->panel_maxy); + if (g_xy_data.y2 < 0) + pr_debug("Y value is negative. Please configure" + " maxy in platform data structure\n"); + } + id = GET_TOUCH2_ID(g_xy_data.touch12_id); + if (ts->platform_data->use_trk_id) { + cur_mt_pos[CY_MT_TCH2_IDX][CY_XPOS] = + g_xy_data.x2; + cur_mt_pos[CY_MT_TCH2_IDX][CY_YPOS] = + g_xy_data.y2; + cur_mt_z[CY_MT_TCH2_IDX] = g_xy_data.z2; + } else { + cur_mt_pos[id][CY_XPOS] = g_xy_data.x2; + cur_mt_pos[id][CY_YPOS] = g_xy_data.y2; + cur_mt_z[id] = g_xy_data.z2; + } + cur_mt_tch[CY_MT_TCH2_IDX] = id; + cur_trk[id] = CY_TCH; + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < + CY_NUM_TRK_ID) { + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { + st_x1 = g_xy_data.x2; + st_y1 = g_xy_data.y2; + st_z1 = g_xy_data.z2; + cur_st_tch[CY_ST_FNGR1_IDX] = id; + } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { + st_x2 = g_xy_data.x2; + st_y2 = g_xy_data.y2; + st_z2 = g_xy_data.z2; + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + } + cyttsp_xdebug("2nd XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ + g_xy_data.x2, g_xy_data.y2, g_xy_data.z2, \ + (g_xy_data.touch12_id & 0x0F)); + /* do not break */ + } + case 1: { + g_xy_data.x1 = be16_to_cpu(g_xy_data.x1); + g_xy_data.y1 = be16_to_cpu(g_xy_data.y1); + if (tilt) + FLIP_XY(g_xy_data.x1, g_xy_data.y1); + + if (rev_x) { + g_xy_data.x1 = INVERT_X(g_xy_data.x1, + ts->platform_data->panel_maxx); + if (g_xy_data.x1 < 0) + pr_debug("X value is negative. Please configure" + " maxx in platform data structure\n"); + } + if (rev_y) { + g_xy_data.y1 = INVERT_X(g_xy_data.y1, + ts->platform_data->panel_maxy); + if (g_xy_data.y1 < 0) + pr_debug("Y value is negative. Please configure" + " maxy in platform data structure"); + } + id = GET_TOUCH1_ID(g_xy_data.touch12_id); + if (ts->platform_data->use_trk_id) { + cur_mt_pos[CY_MT_TCH1_IDX][CY_XPOS] = + g_xy_data.x1; + cur_mt_pos[CY_MT_TCH1_IDX][CY_YPOS] = + g_xy_data.y1; + cur_mt_z[CY_MT_TCH1_IDX] = g_xy_data.z1; + } else { + cur_mt_pos[id][CY_XPOS] = g_xy_data.x1; + cur_mt_pos[id][CY_YPOS] = g_xy_data.y1; + cur_mt_z[id] = g_xy_data.z1; + } + cur_mt_tch[CY_MT_TCH1_IDX] = id; + cur_trk[id] = CY_TCH; + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] < + CY_NUM_TRK_ID) { + if (ts->prv_st_tch[CY_ST_FNGR1_IDX] == id) { + st_x1 = g_xy_data.x1; + st_y1 = g_xy_data.y1; + st_z1 = g_xy_data.z1; + cur_st_tch[CY_ST_FNGR1_IDX] = id; + } else if (ts->prv_st_tch[CY_ST_FNGR2_IDX] == id) { + st_x2 = g_xy_data.x1; + st_y2 = g_xy_data.y1; + st_z2 = g_xy_data.z1; + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + } + cyttsp_xdebug("1st XYZ:% 3d,% 3d,% 3d ID:% 2d\n", \ + g_xy_data.x1, g_xy_data.y1, g_xy_data.z1, \ + ((g_xy_data.touch12_id >> 4) & 0x0F)); + break; + } + case 0: + default:{ + break; + } + } + + /* handle Single Touch signals */ + if (ts->platform_data->use_st) { + cyttsp_xdebug("ST STEP 0 - ST1 ID=%d ST2 ID=%d\n", \ + cur_st_tch[CY_ST_FNGR1_IDX], \ + cur_st_tch[CY_ST_FNGR2_IDX]); + if (cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) { + /* reassign finger 1 and 2 positions to new tracks */ + if (cur_tch > 0) { + /* reassign st finger1 */ + if (ts->platform_data->use_trk_id) { + id = CY_MT_TCH1_IDX; + cur_st_tch[CY_ST_FNGR1_IDX] = cur_mt_tch[id]; + } else { + id = GET_TOUCH1_ID(g_xy_data.touch12_id); + cur_st_tch[CY_ST_FNGR1_IDX] = id; + } + st_x1 = cur_mt_pos[id][CY_XPOS]; + st_y1 = cur_mt_pos[id][CY_YPOS]; + st_z1 = cur_mt_z[id]; + cyttsp_xdebug("ST STEP 1 - ST1 ID=%3d\n", \ + cur_st_tch[CY_ST_FNGR1_IDX]); + if ((cur_tch > 1) && + (cur_st_tch[CY_ST_FNGR2_IDX] > + CY_NUM_TRK_ID)) { + /* reassign st finger2 */ + if (cur_tch > 1) { + if (ts->platform_data->use_trk_id) { + id = CY_MT_TCH2_IDX; + cur_st_tch[CY_ST_FNGR2_IDX] = cur_mt_tch[id]; + } else { + id = GET_TOUCH2_ID(g_xy_data.touch12_id); + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + st_x2 = cur_mt_pos[id][CY_XPOS]; + st_y2 = cur_mt_pos[id][CY_YPOS]; + st_z2 = cur_mt_z[id]; + cyttsp_xdebug("ST STEP 2 - ST2 ID=%3d\n", \ + cur_st_tch[CY_ST_FNGR2_IDX]); + } + } + } + } else if (cur_st_tch[CY_ST_FNGR2_IDX] > CY_NUM_TRK_ID) { + if (cur_tch > 1) { + /* reassign st finger2 */ + if (ts->platform_data->use_trk_id) { + /* reassign st finger2 */ + id = CY_MT_TCH2_IDX; + cur_st_tch[CY_ST_FNGR2_IDX] = + cur_mt_tch[id]; + } else { + /* reassign st finger2 */ + id = GET_TOUCH2_ID(g_xy_data.touch12_id); + cur_st_tch[CY_ST_FNGR2_IDX] = id; + } + st_x2 = cur_mt_pos[id][CY_XPOS]; + st_y2 = cur_mt_pos[id][CY_YPOS]; + st_z2 = cur_mt_z[id]; + cyttsp_xdebug("ST STEP 3 - ST2 ID=%3d\n", \ + cur_st_tch[CY_ST_FNGR2_IDX]); + } + } + /* if the 1st touch is missing and there is a 2nd touch, + * then set the 1st touch to 2nd touch and terminate 2nd touch + */ + if ((cur_st_tch[CY_ST_FNGR1_IDX] > CY_NUM_TRK_ID) && + (cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID)) { + st_x1 = st_x2; + st_y1 = st_y2; + st_z1 = st_z2; + cur_st_tch[CY_ST_FNGR1_IDX] = + cur_st_tch[CY_ST_FNGR2_IDX]; + cur_st_tch[CY_ST_FNGR2_IDX] = + CY_IGNR_TCH; + } + /* if the 2nd touch ends up equal to the 1st touch, + * then just report a single touch */ + if (cur_st_tch[CY_ST_FNGR1_IDX] == + cur_st_tch[CY_ST_FNGR2_IDX]) { + cur_st_tch[CY_ST_FNGR2_IDX] = + CY_IGNR_TCH; + } + /* set Single Touch current event signals */ + if (cur_st_tch[CY_ST_FNGR1_IDX] < CY_NUM_TRK_ID) { + input_report_abs(ts->input, + ABS_X, st_x1); + input_report_abs(ts->input, + ABS_Y, st_y1); + input_report_abs(ts->input, + ABS_PRESSURE, st_z1); + input_report_key(ts->input, + BTN_TOUCH, + CY_TCH); + input_report_abs(ts->input, + ABS_TOOL_WIDTH, + curr_tool_width); + cyttsp_debug("ST->F1:%3d X:%3d Y:%3d Z:%3d\n", \ + cur_st_tch[CY_ST_FNGR1_IDX], \ + st_x1, st_y1, st_z1); + if (cur_st_tch[CY_ST_FNGR2_IDX] < CY_NUM_TRK_ID) { + input_report_key(ts->input, BTN_2, CY_TCH); + input_report_abs(ts->input, ABS_HAT0X, st_x2); + input_report_abs(ts->input, ABS_HAT0Y, st_y2); + cyttsp_debug("ST->F2:%3d X:%3d Y:%3d Z:%3d\n", \ + cur_st_tch[CY_ST_FNGR2_IDX], + st_x2, st_y2, st_z2); + } else { + input_report_key(ts->input, + BTN_2, + CY_NTCH); + } + } else { + input_report_abs(ts->input, ABS_PRESSURE, CY_NTCH); + input_report_key(ts->input, BTN_TOUCH, CY_NTCH); + input_report_key(ts->input, BTN_2, CY_NTCH); + } + /* update platform data for the current single touch info */ + ts->prv_st_tch[CY_ST_FNGR1_IDX] = cur_st_tch[CY_ST_FNGR1_IDX]; + ts->prv_st_tch[CY_ST_FNGR2_IDX] = cur_st_tch[CY_ST_FNGR2_IDX]; + + } + + /* handle Multi-touch signals */ + if (ts->platform_data->use_mt) { + if (ts->platform_data->use_trk_id) { + /* terminate any previous touch where the track + * is missing from the current event */ + for (id = 0; id < CY_NUM_TRK_ID; id++) { + if ((ts->act_trk[id] != CY_NTCH) && + (cur_trk[id] == CY_NTCH)) { + input_report_abs(ts->input, + ABS_MT_TRACKING_ID, + id); + input_report_abs(ts->input, + ABS_MT_TOUCH_MAJOR, + CY_NTCH); + input_report_abs(ts->input, + ABS_MT_WIDTH_MAJOR, + curr_tool_width); + input_report_abs(ts->input, + ABS_MT_POSITION_X, + ts->prv_mt_pos[id][CY_XPOS]); + input_report_abs(ts->input, + ABS_MT_POSITION_Y, + ts->prv_mt_pos[id][CY_YPOS]); + CY_MT_SYNC(ts->input); + ts->act_trk[id] = CY_NTCH; + ts->prv_mt_pos[id][CY_XPOS] = 0; + ts->prv_mt_pos[id][CY_YPOS] = 0; + } + } + /* set Multi-Touch current event signals */ + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + if (cur_mt_tch[id] < CY_NUM_TRK_ID) { + input_report_abs(ts->input, + ABS_MT_TRACKING_ID, + cur_mt_tch[id]); + input_report_abs(ts->input, + ABS_MT_TOUCH_MAJOR, + cur_mt_z[id]); + input_report_abs(ts->input, + ABS_MT_WIDTH_MAJOR, + curr_tool_width); + input_report_abs(ts->input, + ABS_MT_POSITION_X, + cur_mt_pos[id][CY_XPOS]); + input_report_abs(ts->input, + ABS_MT_POSITION_Y, + cur_mt_pos[id][CY_YPOS]); + CY_MT_SYNC(ts->input); + ts->act_trk[id] = CY_TCH; + ts->prv_mt_pos[id][CY_XPOS] = + cur_mt_pos[id][CY_XPOS]; + ts->prv_mt_pos[id][CY_YPOS] = + cur_mt_pos[id][CY_YPOS]; + } + } + } else { + /* set temporary track array elements to voids */ + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + tmp_trk[id] = CY_IGNR_TCH; + snd_trk[id] = CY_IGNR_TCH; + } + + /* get what is currently active */ + for (i = 0, id = 0; + id < CY_NUM_TRK_ID && i < CY_NUM_MT_TCH_ID; + id++) { + if (cur_trk[id] == CY_TCH) { + /* only incr counter if track found */ + tmp_trk[i] = id; + i++; + } + } + cyttsp_xdebug("T1: t0=%d, t1=%d, t2=%d, t3=%d\n", \ + tmp_trk[0], tmp_trk[1], tmp_trk[2], \ + tmp_trk[3]); + cyttsp_xdebug("T1: p0=%d, p1=%d, p2=%d, p3=%d\n", \ + ts->prv_mt_tch[0], ts->prv_mt_tch[1], \ + ts->prv_mt_tch[2], ts->prv_mt_tch[3]); + + /* pack in still active previous touches */ + for (id = 0, prv_tch = 0; + id < CY_NUM_MT_TCH_ID; id++) { + if (tmp_trk[id] < CY_NUM_TRK_ID) { + if (cyttsp_inlist(ts->prv_mt_tch, + tmp_trk[id], &loc, + CY_NUM_MT_TCH_ID)) { + loc &= CY_NUM_MT_TCH_ID - 1; + snd_trk[loc] = tmp_trk[id]; + prv_tch++; + cyttsp_xdebug("inlist s[%d]=%d t[%d]=%d l=%d p=%d\n", \ + loc, snd_trk[loc], \ + id, tmp_trk[id], \ + loc, prv_tch); + } else { + cyttsp_xdebug("not inlist s[%d]=%d t[%d]=%d l=%d \n", \ + id, snd_trk[id], \ + id, tmp_trk[id], \ + loc); + } + } + } + cyttsp_xdebug("S1: s0=%d, s1=%d, s2=%d, s3=%d p=%d\n", \ + snd_trk[0], snd_trk[1], snd_trk[2], \ + snd_trk[3], prv_tch); + + /* pack in new touches */ + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + if (tmp_trk[id] < CY_NUM_TRK_ID) { + if (!cyttsp_inlist(snd_trk, tmp_trk[id], &loc, CY_NUM_MT_TCH_ID)) { + cyttsp_xdebug("not inlist t[%d]=%d l=%d\n", \ + id, tmp_trk[id], loc); + if (cyttsp_next_avail_inlist(snd_trk, &loc, CY_NUM_MT_TCH_ID)) { + loc &= CY_NUM_MT_TCH_ID - 1; + snd_trk[loc] = tmp_trk[id]; + cyttsp_xdebug("put inlist s[%d]=%d t[%d]=%d\n", + loc, snd_trk[loc], id, tmp_trk[id]); + } + } else { + cyttsp_xdebug("is in list s[%d]=%d t[%d]=%d loc=%d\n", \ + id, snd_trk[id], id, tmp_trk[id], loc); + } + } + } + cyttsp_xdebug("S2: s0=%d, s1=%d, s2=%d, s3=%d\n", \ + snd_trk[0], snd_trk[1], + snd_trk[2], snd_trk[3]); + + /* sync motion event signals for each current touch */ + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + /* z will either be 0 (NOTOUCH) or + * some pressure (TOUCH) */ + cyttsp_xdebug("MT0 prev[%d]=%d temp[%d]=%d send[%d]=%d\n", \ + id, ts->prv_mt_tch[id], \ + id, tmp_trk[id], \ + id, snd_trk[id]); + if (snd_trk[id] < CY_NUM_TRK_ID) { + input_report_abs(ts->input, + ABS_MT_TOUCH_MAJOR, + cur_mt_z[snd_trk[id]]); + input_report_abs(ts->input, + ABS_MT_WIDTH_MAJOR, + curr_tool_width); + input_report_abs(ts->input, + ABS_MT_POSITION_X, + cur_mt_pos[snd_trk[id]][CY_XPOS]); + input_report_abs(ts->input, + ABS_MT_POSITION_Y, + cur_mt_pos[snd_trk[id]][CY_YPOS]); + CY_MT_SYNC(ts->input); + cyttsp_debug("MT1->TID:%2d X:%3d Y:%3d Z:%3d touch-sent\n", \ + snd_trk[id], \ + cur_mt_pos[snd_trk[id]][CY_XPOS], \ + cur_mt_pos[snd_trk[id]][CY_YPOS], \ + cur_mt_z[snd_trk[id]]); + } else if (ts->prv_mt_tch[id] < CY_NUM_TRK_ID) { + /* void out this touch */ + input_report_abs(ts->input, + ABS_MT_TOUCH_MAJOR, + CY_NTCH); + input_report_abs(ts->input, + ABS_MT_WIDTH_MAJOR, + curr_tool_width); + input_report_abs(ts->input, + ABS_MT_POSITION_X, + ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS]); + input_report_abs(ts->input, + ABS_MT_POSITION_Y, + ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS]); + CY_MT_SYNC(ts->input); + cyttsp_debug("MT2->TID:%2d X:%3d Y:%3d Z:%3d lift off-sent\n", \ + ts->prv_mt_tch[id], \ + ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_XPOS], \ + ts->prv_mt_pos[ts->prv_mt_tch[id]][CY_YPOS], \ + CY_NTCH); + } else { + /* do not stuff any signals for this + * previously and currently + * void touches */ + cyttsp_xdebug("MT3->send[%d]=%d - No touch - NOT sent\n", \ + id, snd_trk[id]); + } + } + + /* save current posted tracks to + * previous track memory */ + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + ts->prv_mt_tch[id] = snd_trk[id]; + if (snd_trk[id] < CY_NUM_TRK_ID) { + ts->prv_mt_pos[snd_trk[id]][CY_XPOS] = + cur_mt_pos[snd_trk[id]][CY_XPOS]; + ts->prv_mt_pos[snd_trk[id]][CY_YPOS] = + cur_mt_pos[snd_trk[id]][CY_YPOS]; + cyttsp_xdebug("MT4->TID:%2d X:%3d Y:%3d Z:%3d save for previous\n", \ + snd_trk[id], \ + ts->prv_mt_pos[snd_trk[id]][CY_XPOS], \ + ts->prv_mt_pos[snd_trk[id]][CY_YPOS], \ + CY_NTCH); + } + } + for (id = 0; id < CY_NUM_TRK_ID; id++) + ts->act_trk[id] = CY_NTCH; + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) { + if (snd_trk[id] < CY_NUM_TRK_ID) + ts->act_trk[snd_trk[id]] = CY_TCH; + } + } + } + + /* handle gestures */ + if (ts->platform_data->use_gestures) { + if (g_xy_data.gest_id) { + input_report_key(ts->input, + BTN_3, CY_TCH); + input_report_abs(ts->input, + ABS_HAT1X, g_xy_data.gest_id); + input_report_abs(ts->input, + ABS_HAT2Y, g_xy_data.gest_cnt); + } + } + + /* signal the view motion event */ + input_sync(ts->input); + + for (id = 0; id < CY_NUM_TRK_ID; id++) { + /* update platform data for the current MT information */ + ts->act_trk[id] = cur_trk[id]; + } + +exit_xy_worker: + if (cyttsp_disable_touch) { + /* Turn off the touch interrupts */ + cyttsp_debug("Not enabling touch\n"); + } else { + if (ts->client->irq == 0) { + /* restart event timer */ + mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); + } else { + /* re-enable the interrupt after processing */ + enable_irq(ts->client->irq); + } + } + return; +} + +static int cyttsp_inlist(u16 prev_track[], u8 cur_trk_id, + u8 *prev_loc, u8 num_touches) +{ + u8 id = 0; + + *prev_loc = CY_IGNR_TCH; + + cyttsp_xdebug("IN p[%d]=%d c=%d n=%d loc=%d\n", \ + id, prev_track[id], cur_trk_id, \ + num_touches, *prev_loc); + for (id = 0, *prev_loc = CY_IGNR_TCH; + (id < num_touches); id++) { + cyttsp_xdebug("p[%d]=%d c=%d n=%d loc=%d\n", \ + id, prev_track[id], cur_trk_id, \ + num_touches, *prev_loc); + if (prev_track[id] == cur_trk_id) { + *prev_loc = id; + break; + } + } + cyttsp_xdebug("OUT p[%d]=%d c=%d n=%d loc=%d\n", \ + id, prev_track[id], cur_trk_id, num_touches, *prev_loc); + + return ((*prev_loc < CY_NUM_TRK_ID) ? true : false); +} + +static int cyttsp_next_avail_inlist(u16 cur_trk[], + u8 *new_loc, u8 num_touches) +{ + u8 id; + + for (id = 0, *new_loc = CY_IGNR_TCH; + (id < num_touches); id++) { + if (cur_trk[id] > CY_NUM_TRK_ID) { + *new_loc = id; + break; + } + } + + return ((*new_loc < CY_NUM_TRK_ID) ? true : false); +} + +/* Timer function used as dummy interrupt driver */ +static void cyttsp_timer(unsigned long handle) +{ + struct cyttsp *ts = (struct cyttsp *) handle; + + cyttsp_xdebug("TTSP Device timer event\n"); + + /* schedule motion signal handling */ + queue_work(cyttsp_ts_wq, &ts->work); + + return; +} + + + +/* ************************************************************************ + * ISR function. This function is general, initialized in drivers init + * function + * ************************************************************************ */ +static irqreturn_t cyttsp_irq(int irq, void *handle) +{ + struct cyttsp *ts = (struct cyttsp *) handle; + + cyttsp_xdebug("%s: Got IRQ\n", CY_I2C_NAME); + + /* disable further interrupts until this interrupt is processed */ + disable_irq_nosync(ts->client->irq); + + /* schedule motion signal handling */ + queue_work(cyttsp_ts_wq, &ts->work); + return IRQ_HANDLED; +} + +/* ************************************************************************ + * Probe initialization functions + * ************************************************************************ */ +static int cyttsp_putbl(struct cyttsp *ts, int show, + int show_status, int show_version, int show_cid) +{ + int retval = CY_OK; + + int num_bytes = (show_status * 3) + (show_version * 6) + (show_cid * 3); + + if (show_cid) + num_bytes = sizeof(struct cyttsp_bootloader_data_t); + else if (show_version) + num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 3; + else + num_bytes = sizeof(struct cyttsp_bootloader_data_t) - 9; + + if (show) { + retval = i2c_smbus_read_i2c_block_data(ts->client, + CY_REG_BASE, num_bytes, (u8 *)&g_bl_data); + if (show_status) { + cyttsp_debug("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \ + show, \ + g_bl_data.bl_file, \ + g_bl_data.bl_status, \ + g_bl_data.bl_error, \ + g_bl_data.blver_hi, g_bl_data.blver_lo, \ + g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo); + } + if (show_version) { + cyttsp_debug("BL%d: ttspver=0x%02X%02X appid=0x%02X%02X appver=0x%02X%02X\n", \ + show, \ + g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ + g_bl_data.appid_hi, g_bl_data.appid_lo, \ + g_bl_data.appver_hi, g_bl_data.appver_lo); + } + if (show_cid) { + cyttsp_debug("BL%d: cid=0x%02X%02X%02X\n", \ + show, \ + g_bl_data.cid_0, \ + g_bl_data.cid_1, \ + g_bl_data.cid_2); + } + } + + return retval; +} + +#ifdef CY_INCLUDE_LOAD_FILE +#define CY_MAX_I2C_LEN 256 +#define CY_MAX_TRY 10 +#define CY_BL_PAGE_SIZE 16 +#define CY_BL_NUM_PAGES 5 +static int cyttsp_i2c_wr_blk_chunks(struct cyttsp *ts, u8 command, + u8 length, const u8 *values) +{ + int retval = CY_OK; + int block = 1; + + u8 dataray[CY_MAX_I2C_LEN]; + + /* first page already includes the bl page offset */ + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + CY_BL_PAGE_SIZE+1, values); + values += CY_BL_PAGE_SIZE+1; + length -= CY_BL_PAGE_SIZE+1; + + /* rem blocks require bl page offset stuffing */ + while (length && + (block < CY_BL_NUM_PAGES) && + !(retval < CY_OK)) { + udelay(43*2); /* TRM * 2 */ + dataray[0] = CY_BL_PAGE_SIZE*block; + memcpy(&dataray[1], values, + length >= CY_BL_PAGE_SIZE ? + CY_BL_PAGE_SIZE : length); + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + length >= CY_BL_PAGE_SIZE ? + CY_BL_PAGE_SIZE + 1 : length+1, dataray); + values += CY_BL_PAGE_SIZE; + length = length >= CY_BL_PAGE_SIZE ? + length - CY_BL_PAGE_SIZE : 0; + block++; + } + + return retval; +} + +static int cyttsp_bootload_app(struct cyttsp *ts) +{ + int retval = CY_OK; + int i, tries; + u8 host_reg; + + cyttsp_debug("load new firmware \n"); + /* reset TTSP Device back to bootloader mode */ + host_reg = CY_SOFT_RESET_MODE; + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete reset back to bootloader */ + tries = 0; + do { + mdelay(1); + cyttsp_putbl(ts, 3, false, false, false); + } while (g_bl_data.bl_status != 0x10 && + g_bl_data.bl_status != 0x11 && + tries++ < 100); + cyttsp_debug("load file - tver=0x%02X%02X a_id=0x%02X%02X aver=0x%02X%02X\n", \ + cyttsp_fw_tts_verh, cyttsp_fw_tts_verl, \ + cyttsp_fw_app_idh, cyttsp_fw_app_idl, \ + cyttsp_fw_app_verh, cyttsp_fw_app_verl); + + /* download new TTSP Application to the Bootloader */ + if (!(retval < CY_OK)) { + i = 0; + /* send bootload initiation command */ + if (cyttsp_fw[i].Command == CY_BL_INIT_LOAD) { + g_bl_data.bl_file = 0; + g_bl_data.bl_status = 0; + g_bl_data.bl_error = 0; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + cyttsp_fw[i].Length, cyttsp_fw[i].Block); + /* delay to allow bl to get ready for block writes */ + i++; + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 4, false, false, false); + } while (g_bl_data.bl_status != 0x10 && + g_bl_data.bl_status != 0x11 && + tries++ < 100); + cyttsp_debug("wait init f=%02X, s=%02X, e=%02X t=%d\n", \ + g_bl_data.bl_file, g_bl_data.bl_status, \ + g_bl_data.bl_error, tries); + /* send bootload firmware load blocks */ + if (!(retval < CY_OK)) { + while (cyttsp_fw[i].Command == CY_BL_WRITE_BLK) { + retval = cyttsp_i2c_wr_blk_chunks(ts, + CY_REG_BASE, + cyttsp_fw[i].Length, + cyttsp_fw[i].Block); + cyttsp_xdebug("BL DNLD Rec=% 3d Len=% 3d Addr=%04X\n", \ + cyttsp_fw[i].Record, \ + cyttsp_fw[i].Length, \ + cyttsp_fw[i].Address); + i++; + if (retval < CY_OK) { + cyttsp_debug("BL fail Rec=%3d retval=%d\n", \ + cyttsp_fw[i-1].Record, \ + retval); + break; + } else { + tries = 0; + cyttsp_putbl(ts, 5, false, false, false); + while (!((g_bl_data.bl_status == 0x10) && + (g_bl_data.bl_error == 0x20)) && + !((g_bl_data.bl_status == 0x11) && + (g_bl_data.bl_error == 0x20)) && + (tries++ < 100)) { + mdelay(1); + cyttsp_putbl(ts, 5, false, false, false); + } + } + } + + if (!(retval < CY_OK)) { + while (i < cyttsp_fw_records) { + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + cyttsp_fw[i].Length, + cyttsp_fw[i].Block); + i++; + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 6, true, false, false); + } while (g_bl_data.bl_status != 0x10 && + g_bl_data.bl_status != 0x11 && + tries++ < 100); + cyttsp_debug("wait term f=%02X, s=%02X, e=%02X t=%d\n", \ + g_bl_data.bl_file, \ + g_bl_data.bl_status, \ + g_bl_data.bl_error, \ + tries); + if (retval < CY_OK) + break; + } + } + } + } + } + + /* reset TTSP Device back to bootloader mode */ + host_reg = CY_SOFT_RESET_MODE; + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete reset back to bootloader */ + tries = 0; + do { + mdelay(1); + cyttsp_putbl(ts, 3, false, false, false); + } while (g_bl_data.bl_status != 0x10 && + g_bl_data.bl_status != 0x11 && + tries++ < 100); + + /* set arg2 to non-0 to activate */ + retval = cyttsp_putbl(ts, 8, true, true, true); + + return retval; +} +#else +static int cyttsp_bootload_app(struct cyttsp *ts) +{ + cyttsp_debug("no-load new firmware \n"); + return CY_OK; +} +#endif /* CY_INCLUDE_LOAD_FILE */ + + +static int cyttsp_power_on(struct cyttsp *ts) +{ + int retval = CY_OK; + u8 host_reg; + int tries; + + cyttsp_debug("Power up \n"); + + /* check if the TTSP device has a bootloader installed */ + host_reg = CY_SOFT_RESET_MODE; + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + sizeof(host_reg), &host_reg); + tries = 0; + do { + mdelay(1); + + /* set arg2 to non-0 to activate */ + retval = cyttsp_putbl(ts, 1, true, true, true); + cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X R=%d\n", \ + 101, \ + g_bl_data.bl_file, g_bl_data.bl_status, \ + g_bl_data.bl_error, \ + g_bl_data.blver_hi, g_bl_data.blver_lo, \ + g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo, + retval); + cyttsp_info("BL%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \ + 102, \ + g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ + g_bl_data.appid_hi, g_bl_data.appid_lo, \ + g_bl_data.appver_hi, g_bl_data.appver_lo); + cyttsp_info("BL%d: c_id=%02X%02X%02X\n", \ + 103, \ + g_bl_data.cid_0, g_bl_data.cid_1, g_bl_data.cid_2); + } while (!(retval < CY_OK) && + !GET_BOOTLOADERMODE(g_bl_data.bl_status) && + !(g_bl_data.bl_file == CY_OP_MODE + CY_LOW_PWR_MODE) && + tries++ < 100); + + /* is bootloader missing? */ + if (!(retval < CY_OK)) { + cyttsp_xdebug("Ret=%d Check if bootloader is missing...\n", \ + retval); + if (!GET_BOOTLOADERMODE(g_bl_data.bl_status)) { + /* skip all bl and sys info and go to op mode */ + if (!(retval < CY_OK)) { + cyttsp_xdebug("Bl is missing (ret=%d)\n", \ + retval); + host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/; + retval = i2c_smbus_write_i2c_block_data(ts->client, CY_REG_BASE, + sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete switch to + * Operational mode */ + mdelay(1000); + goto bypass; + } + } + } + + + /* take TTSP out of bootloader mode; go to TrueTouch operational mode */ + if (!(retval < CY_OK)) { + cyttsp_xdebug1("exit bootloader; go operational\n"); + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, sizeof(bl_cmd), bl_cmd); + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 4, true, false, false); + cyttsp_info("BL%d: f=%02X s=%02X err=%02X bl=%02X%02X bld=%02X%02X\n", \ + 104, \ + g_bl_data.bl_file, g_bl_data.bl_status, \ + g_bl_data.bl_error, \ + g_bl_data.blver_hi, g_bl_data.blver_lo, \ + g_bl_data.bld_blver_hi, g_bl_data.bld_blver_lo); + } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && + tries++ < 100); + } + + + + if (!(retval < CY_OK) && + cyttsp_app_load()) { + if (CY_DIFF(g_bl_data.ttspver_hi, cyttsp_tts_verh()) || + CY_DIFF(g_bl_data.ttspver_lo, cyttsp_tts_verl()) || + CY_DIFF(g_bl_data.appid_hi, cyttsp_app_idh()) || + CY_DIFF(g_bl_data.appid_lo, cyttsp_app_idl()) || + CY_DIFF(g_bl_data.appver_hi, cyttsp_app_verh()) || + CY_DIFF(g_bl_data.appver_lo, cyttsp_app_verl()) || + CY_DIFF(g_bl_data.cid_0, cyttsp_cid_0()) || + CY_DIFF(g_bl_data.cid_1, cyttsp_cid_1()) || + CY_DIFF(g_bl_data.cid_2, cyttsp_cid_2()) || + cyttsp_force_fw_load()) { + cyttsp_debug("blttsp=0x%02X%02X flttsp=0x%02X%02X force=%d\n", \ + g_bl_data.ttspver_hi, g_bl_data.ttspver_lo, \ + cyttsp_tts_verh(), cyttsp_tts_verl(), \ + cyttsp_force_fw_load()); + cyttsp_debug("blappid=0x%02X%02X flappid=0x%02X%02X\n", \ + g_bl_data.appid_hi, g_bl_data.appid_lo, \ + cyttsp_app_idh(), cyttsp_app_idl()); + cyttsp_debug("blappver=0x%02X%02X flappver=0x%02X%02X\n", \ + g_bl_data.appver_hi, g_bl_data.appver_lo, \ + cyttsp_app_verh(), cyttsp_app_verl()); + cyttsp_debug("blcid=0x%02X%02X%02X flcid=0x%02X%02X%02X\n", \ + g_bl_data.cid_0, \ + g_bl_data.cid_1, \ + g_bl_data.cid_2, \ + cyttsp_cid_0(), \ + cyttsp_cid_1(), \ + cyttsp_cid_2()); + /* enter bootloader to load new app into TTSP Device */ + retval = cyttsp_bootload_app(ts); + /* take TTSP device out of bootloader mode; + * switch back to TrueTouch operational mode */ + if (!(retval < CY_OK)) { + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(bl_cmd), bl_cmd); + /* wait for TTSP Device to complete + * switch to Operational mode */ + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 9, false, false, false); + } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && + tries++ < 100); + cyttsp_putbl(ts, 9, true, false, false); + } + } + } + +bypass: + /* switch to System Information mode to read versions + * and set interval registers */ + if (!(retval < CY_OK)) { + cyttsp_debug("switch to sysinfo mode \n"); + host_reg = CY_SYSINFO_MODE; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete switch to SysInfo mode */ + mdelay(100); + if (!(retval < CY_OK)) { + retval = i2c_smbus_read_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(struct cyttsp_sysinfo_data_t), + (u8 *)&g_sysinfo_data); + cyttsp_debug("SI2: hst_mode=0x%02X mfg_cmd=0x%02X mfg_stat=0x%02X\n", \ + g_sysinfo_data.hst_mode, \ + g_sysinfo_data.mfg_cmd, \ + g_sysinfo_data.mfg_stat); + cyttsp_debug("SI2: bl_ver=0x%02X%02X\n", \ + g_sysinfo_data.bl_verh, \ + g_sysinfo_data.bl_verl); + cyttsp_debug("SI2: sysinfo act_int=0x%02X tch_tmout=0x%02X lp_int=0x%02X\n", \ + g_sysinfo_data.act_intrvl, \ + g_sysinfo_data.tch_tmout, \ + g_sysinfo_data.lp_intrvl); + cyttsp_info("SI%d: tver=%02X%02X a_id=%02X%02X aver=%02X%02X\n", \ + 102, \ + g_sysinfo_data.tts_verh, \ + g_sysinfo_data.tts_verl, \ + g_sysinfo_data.app_idh, \ + g_sysinfo_data.app_idl, \ + g_sysinfo_data.app_verh, \ + g_sysinfo_data.app_verl); + cyttsp_info("SI%d: c_id=%02X%02X%02X\n", \ + 103, \ + g_sysinfo_data.cid[0], \ + g_sysinfo_data.cid[1], \ + g_sysinfo_data.cid[2]); + if (!(retval < CY_OK) && + (CY_DIFF(ts->platform_data->act_intrvl, + CY_ACT_INTRVL_DFLT) || + CY_DIFF(ts->platform_data->tch_tmout, + CY_TCH_TMOUT_DFLT) || + CY_DIFF(ts->platform_data->lp_intrvl, + CY_LP_INTRVL_DFLT))) { + if (!(retval < CY_OK)) { + u8 intrvl_ray[sizeof(ts->platform_data->act_intrvl) + + sizeof(ts->platform_data->tch_tmout) + + sizeof(ts->platform_data->lp_intrvl)]; + u8 i = 0; + + intrvl_ray[i++] = + ts->platform_data->act_intrvl; + intrvl_ray[i++] = + ts->platform_data->tch_tmout; + intrvl_ray[i++] = + ts->platform_data->lp_intrvl; + + cyttsp_debug("SI2: platinfo act_intrvl=0x%02X tch_tmout=0x%02X lp_intrvl=0x%02X\n", \ + ts->platform_data->act_intrvl, \ + ts->platform_data->tch_tmout, \ + ts->platform_data->lp_intrvl); + /* set intrvl registers */ + retval = i2c_smbus_write_i2c_block_data( + ts->client, + CY_REG_ACT_INTRVL, + sizeof(intrvl_ray), intrvl_ray); + mdelay(CY_DLY_SYSINFO); + } + } + } + /* switch back to Operational mode */ + cyttsp_debug("switch back to operational mode \n"); + if (!(retval < CY_OK)) { + host_reg = CY_OP_MODE/* + CY_LOW_PWR_MODE*/; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(host_reg), &host_reg); + /* wait for TTSP Device to complete + * switch to Operational mode */ + mdelay(100); + } + } + /* init gesture setup; + * this is required even if not using gestures + * in order to set the active distance */ + if (!(retval < CY_OK)) { + u8 gesture_setup; + cyttsp_debug("init gesture setup \n"); + gesture_setup = ts->platform_data->gest_set; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_GEST_SET, + sizeof(gesture_setup), &gesture_setup); + mdelay(CY_DLY_DFLT); + } + + if (!(retval < CY_OK)) + ts->platform_data->power_state = CY_ACTIVE_STATE; + else + ts->platform_data->power_state = CY_IDLE_STATE; + + cyttsp_debug("Retval=%d Power state is %s\n", \ + retval, \ + ts->platform_data->power_state == CY_ACTIVE_STATE ? \ + "ACTIVE" : "IDLE"); + + return retval; +} + +/* cyttsp_initialize: Driver Initialization. This function takes + * care of the following tasks: + * 1. Create and register an input device with input layer + * 2. Take CYTTSP device out of bootloader mode; go operational + * 3. Start any timers/Work queues. */ +static int cyttsp_initialize(struct i2c_client *client, struct cyttsp *ts) +{ + struct input_dev *input_device; + int error = 0; + int retval = CY_OK; + u8 id; + + /* Create the input device and register it. */ + input_device = input_allocate_device(); + if (!input_device) { + error = -ENOMEM; + cyttsp_xdebug1("err input allocate device\n"); + goto error_free_device; + } + + if (!client) { + error = ~ENODEV; + cyttsp_xdebug1("err client is Null\n"); + goto error_free_device; + } + + if (!ts) { + error = ~ENODEV; + cyttsp_xdebug1("err context is Null\n"); + goto error_free_device; + } + + ts->input = input_device; + input_device->name = CY_I2C_NAME; + input_device->phys = ts->phys; + input_device->dev.parent = &client->dev; + + /* init the touch structures */ + ts->num_prv_st_tch = CY_NTCH; + for (id = 0; id < CY_NUM_TRK_ID; id++) { + ts->act_trk[id] = CY_NTCH; + ts->prv_mt_pos[id][CY_XPOS] = 0; + ts->prv_mt_pos[id][CY_YPOS] = 0; + } + + for (id = 0; id < CY_NUM_MT_TCH_ID; id++) + ts->prv_mt_tch[id] = CY_IGNR_TCH; + + for (id = 0; id < CY_NUM_ST_TCH_ID; id++) + ts->prv_st_tch[id] = CY_IGNR_TCH; + + set_bit(EV_SYN, input_device->evbit); + set_bit(EV_KEY, input_device->evbit); + set_bit(EV_ABS, input_device->evbit); + set_bit(BTN_TOUCH, input_device->keybit); + set_bit(BTN_2, input_device->keybit); + if (ts->platform_data->use_gestures) + set_bit(BTN_3, input_device->keybit); + + input_set_abs_params(input_device, ABS_X, ts->platform_data->disp_minx, + ts->platform_data->disp_maxx, 0, 0); + input_set_abs_params(input_device, ABS_Y, ts->platform_data->disp_miny, + ts->platform_data->disp_maxy, 0, 0); + input_set_abs_params(input_device, + ABS_TOOL_WIDTH, 0, CY_LARGE_TOOL_WIDTH, 0 , 0); + input_set_abs_params(input_device, + ABS_PRESSURE, 0, CY_MAXZ, 0, 0); + input_set_abs_params(input_device, + ABS_HAT0X, 0, ts->platform_data->panel_maxx, 0, 0); + input_set_abs_params(input_device, + ABS_HAT0Y, 0, ts->platform_data->panel_maxy, 0, 0); + if (ts->platform_data->use_gestures) { + input_set_abs_params(input_device, + ABS_HAT1X, 0, CY_MAXZ, 0, 0); + input_set_abs_params(input_device, + ABS_HAT1Y, 0, CY_MAXZ, 0, 0); + } + if (ts->platform_data->use_mt) { + input_set_abs_params(input_device, ABS_MT_POSITION_X, + ts->platform_data->disp_minx, + ts->platform_data->disp_maxx, 0, 0); + input_set_abs_params(input_device, ABS_MT_POSITION_Y, + ts->platform_data->disp_miny, + ts->platform_data->disp_maxy, 0, 0); + input_set_abs_params(input_device, + ABS_MT_TOUCH_MAJOR, 0, CY_MAXZ, 0, 0); + input_set_abs_params(input_device, + ABS_MT_WIDTH_MAJOR, 0, CY_LARGE_TOOL_WIDTH, 0, 0); + if (ts->platform_data->use_trk_id) { + input_set_abs_params(input_device, + ABS_MT_TRACKING_ID, 0, CY_NUM_TRK_ID, 0, 0); + } + } + + /* set dummy key to make driver work with virtual keys */ + input_set_capability(input_device, EV_KEY, KEY_PROG1); + + cyttsp_info("%s: Register input device\n", CY_I2C_NAME); + error = input_register_device(input_device); + if (error) { + cyttsp_alert("%s: Failed to register input device\n", \ + CY_I2C_NAME); + retval = error; + goto error_free_device; + } + + /* Prepare our worker structure prior to setting up the timer/ISR */ + INIT_WORK(&ts->work, cyttsp_xy_worker); + + /* Power on the chip and make sure that I/Os are set as specified + * in the platform */ + if (ts->platform_data->init) + retval = ts->platform_data->init(client); + + if (!(retval < CY_OK)) + retval = cyttsp_power_on(ts); + + if (retval < 0) + goto error_unreg_device; + + /* Timer or Interrupt setup */ + if (ts->client->irq == 0) { + cyttsp_info("Setting up timer\n"); + setup_timer(&ts->timer, cyttsp_timer, (unsigned long) ts); + mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); + } else { + cyttsp_info("Setting up interrupt\n"); + /* request_irq() will also call enable_irq() */ + error = request_irq(client->irq, cyttsp_irq, + IRQF_TRIGGER_FALLING, + client->dev.driver->name, ts); + if (error) { + cyttsp_alert("error: could not request irq\n"); + retval = error; + goto error_free_irq; + } + } + + irq_cnt = 0; + irq_cnt_total = 0; + irq_err_cnt = 0; + + atomic_set(&ts->irq_enabled, 1); + retval = device_create_file(&ts->client->dev, &dev_attr_irq_enable); + if (retval < CY_OK) { + cyttsp_alert("File device creation failed: %d\n", retval); + retval = -ENODEV; + goto error_free_irq; + } + + retval = device_create_file(&client->dev, &dev_attr_cyttsp_fw_ver); + if (retval) { + cyttsp_alert("sysfs entry for firmware version failed\n"); + goto error_rm_dev_file_irq_en; + } + + sprintf(ts->cyttsp_fw_ver, "%d.%d", g_bl_data.ttspver_hi, + g_bl_data.ttspver_lo); + + retval = device_create_file(&client->dev, &dev_attr_cyttsp_update_fw); + if (retval) { + cyttsp_alert("sysfs entry for firmware update failed\n"); + goto error_rm_dev_file_fw_ver; + } + + cyttsp_info("%s: Successful registration\n", CY_I2C_NAME); + goto success; + +error_rm_dev_file_fw_ver: + device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver); +error_rm_dev_file_irq_en: + device_remove_file(&client->dev, &dev_attr_irq_enable); +error_free_irq: + cyttsp_alert("Error: Failed to register IRQ handler\n"); + free_irq(client->irq, ts); +error_unreg_device: + input_unregister_device(input_device); +error_free_device: + if (input_device) + input_free_device(input_device); + +success: + return retval; +} + +/* I2C driver probe function */ +static int __devinit cyttsp_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct cyttsp *ts; + int error; + int retval = CY_OK; + + cyttsp_info("Start Probe 1.2\n"); + + /* allocate and clear memory */ + ts = kzalloc(sizeof(struct cyttsp), GFP_KERNEL); + if (ts == NULL) { + cyttsp_xdebug1("err kzalloc for cyttsp\n"); + retval = -ENOMEM; + } + + /* Enable runtime PM ops, start in ACTIVE mode */ + error = pm_runtime_set_active(&client->dev); + if (error < 0) + dev_dbg(&client->dev, "unable to set runtime pm state\n"); + pm_runtime_enable(&client->dev); + + if (!(retval < CY_OK)) { + /* register driver_data */ + ts->client = client; + ts->platform_data = client->dev.platform_data; + i2c_set_clientdata(client, ts); + + error = cyttsp_initialize(client, ts); + if (error) { + cyttsp_xdebug1("err cyttsp_initialize\n"); + if (ts != NULL) { + /* deallocate memory */ + kfree(ts); + } +/* + i2c_del_driver(&cyttsp_driver); +*/ + retval = -ENODEV; + } else + cyttsp_openlog(); + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (!(retval < CY_OK)) { + ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ts->early_suspend.suspend = cyttsp_early_suspend; + ts->early_suspend.resume = cyttsp_late_resume; + register_early_suspend(&ts->early_suspend); + } +#endif /* CONFIG_HAS_EARLYSUSPEND */ + device_init_wakeup(&client->dev, ts->platform_data->wakeup); + + cyttsp_info("Start Probe %s\n", \ + (retval < CY_OK) ? "FAIL" : "PASS"); + + return retval; +} + +/* Function to manage power-on resume */ +static int cyttsp_resume(struct device *dev) +{ + struct cyttsp *ts = dev_get_drvdata(dev); + int retval = CY_OK; + + cyttsp_debug("Wake Up\n"); + + if (device_may_wakeup(dev)) { + if (ts->client->irq) + disable_irq_wake(ts->client->irq); + return 0; + } + + /* re-enable the interrupt prior to wake device */ + if (ts->client->irq) + enable_irq(ts->client->irq); + + if (ts->platform_data->use_sleep && + (ts->platform_data->power_state != CY_ACTIVE_STATE)) { + if (ts->platform_data->resume) + retval = ts->platform_data->resume(ts->client); + if (!(retval < CY_OK)) { + /* take TTSP device out of bootloader mode; + * switch back to TrueTouch operational mode */ + if (!(retval < CY_OK)) { + int tries; + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(bl_cmd), bl_cmd); + /* wait for TTSP Device to complete + * switch to Operational mode */ + tries = 0; + do { + mdelay(100); + cyttsp_putbl(ts, 16, false, false, false); + } while (GET_BOOTLOADERMODE(g_bl_data.bl_status) && + tries++ < 100); + cyttsp_putbl(ts, 16, true, false, false); + } + } + } + + if (!(retval < CY_OK) && + (GET_HSTMODE(g_bl_data.bl_file) == CY_OK)) { + ts->platform_data->power_state = CY_ACTIVE_STATE; + + /* re-enable the timer after resuming */ + if (ts->client->irq == 0) + mod_timer(&ts->timer, jiffies + TOUCHSCREEN_TIMEOUT); + } else + retval = -ENODEV; + + cyttsp_debug("Wake Up %s\n", \ + (retval < CY_OK) ? "FAIL" : "PASS"); + + return retval; +} + + +/* Function to manage low power suspend */ +static int cyttsp_suspend(struct device *dev) +{ + struct cyttsp *ts = dev_get_drvdata(dev); + u8 sleep_mode = CY_OK; + int retval = CY_OK; + + cyttsp_debug("Enter Sleep\n"); + + if (device_may_wakeup(dev)) { + if (ts->client->irq) + enable_irq_wake(ts->client->irq); + return 0; + } + + /* disable worker */ + if (ts->client->irq == 0) + del_timer(&ts->timer); + else + disable_irq_nosync(ts->client->irq); + retval = cancel_work_sync(&ts->work); + + if (retval) + enable_irq(ts->client->irq); + + if (!(retval < CY_OK)) { + if (ts->platform_data->use_sleep && + (ts->platform_data->power_state == CY_ACTIVE_STATE)) { + if (ts->platform_data->use_sleep & CY_USE_DEEP_SLEEP_SEL) + sleep_mode = CY_DEEP_SLEEP_MODE; + else + sleep_mode = CY_LOW_PWR_MODE; + + retval = i2c_smbus_write_i2c_block_data(ts->client, + CY_REG_BASE, + sizeof(sleep_mode), &sleep_mode); + } + } + + if (!(retval < CY_OK)) { + if (sleep_mode == CY_DEEP_SLEEP_MODE) + ts->platform_data->power_state = CY_SLEEP_STATE; + else if (sleep_mode == CY_LOW_PWR_MODE) + ts->platform_data->power_state = CY_LOW_PWR_STATE; + } + + cyttsp_debug("Sleep Power state is %s\n", \ + (ts->platform_data->power_state == CY_ACTIVE_STATE) ? \ + "ACTIVE" : \ + ((ts->platform_data->power_state == CY_SLEEP_STATE) ? \ + "SLEEP" : "LOW POWER")); + + return retval; +} + +/* registered in driver struct */ +static int __devexit cyttsp_remove(struct i2c_client *client) +{ + struct cyttsp *ts; + int err; + + cyttsp_alert("Unregister\n"); + + pm_runtime_set_suspended(&client->dev); + pm_runtime_disable(&client->dev); + + device_init_wakeup(&client->dev, 0); + /* clientdata registered on probe */ + ts = i2c_get_clientdata(client); + device_remove_file(&ts->client->dev, &dev_attr_irq_enable); + device_remove_file(&client->dev, &dev_attr_cyttsp_fw_ver); + device_remove_file(&client->dev, &dev_attr_cyttsp_update_fw); + + /* Start cleaning up by removing any delayed work and the timer */ + if (cancel_delayed_work((struct delayed_work *)&ts->work) < CY_OK) + cyttsp_alert("error: could not remove work from workqueue\n"); + + /* free up timer or irq */ + if (ts->client->irq == 0) { + err = del_timer(&ts->timer); + if (err < CY_OK) + cyttsp_alert("error: failed to delete timer\n"); + } else + free_irq(client->irq, ts); + +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&ts->early_suspend); +#endif /* CONFIG_HAS_EARLYSUSPEND */ + + /* housekeeping */ + if (ts != NULL) + kfree(ts); + + cyttsp_alert("Leaving\n"); + + return 0; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void cyttsp_early_suspend(struct early_suspend *handler) +{ + struct cyttsp *ts; + + ts = container_of(handler, struct cyttsp, early_suspend); + cyttsp_suspend(&ts->client->dev); +} + +static void cyttsp_late_resume(struct early_suspend *handler) +{ + struct cyttsp *ts; + + ts = container_of(handler, struct cyttsp, early_suspend); + cyttsp_resume(&ts->client->dev); +} +#endif /* CONFIG_HAS_EARLYSUSPEND */ + +static int cyttsp_init(void) +{ + int ret; + + cyttsp_info("Cypress TrueTouch(R) Standard Product\n"); + cyttsp_info("I2C Touchscreen Driver (Built %s @ %s)\n", \ + __DATE__, __TIME__); + + cyttsp_ts_wq = create_singlethread_workqueue("cyttsp_ts_wq"); + if (cyttsp_ts_wq == NULL) { + cyttsp_debug("No memory for cyttsp_ts_wq\n"); + return -ENOMEM; + } + + ret = i2c_add_driver(&cyttsp_driver); + + return ret; +} + +static void cyttsp_exit(void) +{ + if (cyttsp_ts_wq) + destroy_workqueue(cyttsp_ts_wq); + return i2c_del_driver(&cyttsp_driver); +} + +module_init(cyttsp_init); +module_exit(cyttsp_exit); +MODULE_FIRMWARE("ttsp.fw"); + diff --git a/drivers/input/touchscreen/cyttsp_fw.h b/drivers/input/touchscreen/cyttsp_fw.h new file mode 100755 index 0000000000000..f14153e0dec80 --- /dev/null +++ b/drivers/input/touchscreen/cyttsp_fw.h @@ -0,0 +1,4307 @@ +/* Header file for: + * Cypress TrueTouch(TM) Standard Product touchscreen drivers. + * drivers/input/touchscreen/cyttsp_fw.h + * + * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Cypress reserves the right to make changes without further notice + * to the materials described herein. Cypress does not assume any + * liability arising out of the application described herein. + * + * Contact Cypress Semiconductor at www.cypress.com + * + */ + +#define CYTTSP_BL_OST_LEN 1 +#define CYTTSP_BL_CMD_LEN 2 +#define CYTTSP_BL_KEY_LEN 8 +#define CYTTSP_LD_ADR_LEN 2 +#define CYTTSP_LD_DAT_LEN 64 +#define CYTTSP_LD_CHK_LEN 2 +#define CYTTSP_LD_BLK_LEN (CYTTSP_BL_OST_LEN + CYTTSP_BL_CMD_LEN + CYTTSP_BL_KEY_LEN + \ + CYTTSP_LD_ADR_LEN + CYTTSP_LD_DAT_LEN + CYTTSP_LD_CHK_LEN) + +typedef struct cyttsp_ld_blk_ray_t { + unsigned short Record; + unsigned short Length; + unsigned char Command; + unsigned short Address; + unsigned char Block[CYTTSP_LD_BLK_LEN]; +} cyttsp_ld_blk_ray, *pcyttsp_ld_blk_ray; + +cyttsp_ld_blk_ray cyttsp_fw[] = { + { + 0, + 11, + 0x38, + -1, + { + 0x00, 0xFF, 0x38, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + } + }, + { + 1, + 79, + 0x39, + 0x002C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2C, 0x40, 0x7D, 0x0B, 0x68, 0x30, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x10, 0x12, 0x7E, 0x7D, 0x10, 0x36, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x1F, 0x2A, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7D, 0x20, 0x70, 0x7E, 0x7E, 0x30, 0x30, 0x30, 0x5B, 0x36 + } + }, + { + 2, + 79, + 0x39, + 0x002D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2D, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x7E, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x40, 0x43, 0xE6, 0x02, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xE3, 0x02, 0x70, 0xCF, 0x41, 0xFF, 0xEF, 0x50, 0x80, 0x4E, 0x5D, 0xD5, 0x08, 0x62, 0x44, 0x09 + } + }, + { + 3, + 79, + 0x39, + 0x002E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2E, 0xD5, 0x00, 0x55, 0xFA, 0x01, 0x40, 0x50, 0x06, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x40, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x51, 0xFA, 0x60, 0xE8, 0x70, 0xCF, 0x18, 0x60, 0xD5, 0x55, 0xF8, 0x00, 0x55, 0xF9, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x9F, 0xFE, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x62, 0xD1, 0x0F, 0x50, 0x00, 0x4E, 0x62, 0xD3, 0x0F, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x35, 0xEC + } + }, + { + 4, + 79, + 0x39, + 0x002F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x2F, 0x00, 0x71, 0xC0, 0x7C, 0x0F, 0x76, 0x62, 0xD0, 0x00, 0x50, 0x0F, 0x57, 0x74, 0x08, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x4B, 0x51, 0xE9, 0x80, 0x04, 0x75, 0x09, 0x00, 0x62, 0xE3, 0x00, 0x08, 0x28, 0x60, 0xD5, 0x74, 0xA0, 0x4B, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0xA0, 0x1C, 0x53, 0xE8, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0xCD, 0x1D + } + }, + { + 5, + 79, + 0x39, + 0x0030, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x30, 0x3F, 0xE9, 0x47, 0xE9, 0xFF, 0xB0, 0x06, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x18, 0x7A, 0xE8, 0xBF, 0xEB, 0x8F, 0xC9, 0x18, 0x75, 0x09, 0x00, 0x08, 0x28, 0x53, 0xE8, 0x50, 0x00, 0x3F, 0xE9, 0x47, 0xE9, 0xFF, 0xB0, 0x08, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x50, 0x00, 0x7A, 0xE8, 0xBF, 0xEF, 0x18, 0x8F, 0xAA, 0x18, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xEC, 0x10, 0x43, 0xE3, 0x00, 0x70, 0xCF, 0x62, 0x4D, 0x1E + } + }, + { + 6, + 79, + 0x39, + 0x0031, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x31, 0xE0, 0x00, 0x41, 0xFE, 0xE7, 0x43, 0xFE, 0x10, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xE0, 0x53, 0x70, 0xCF, 0x62, 0xE2, 0x00, 0x7C, 0x3E, 0xD3, 0x8F, 0xFF, 0x7F, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xE9, 0x57 + } + }, + { + 7, + 79, + 0x39, + 0x0032, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x32, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xFA, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xF3, 0x5D, 0x04, 0x73, 0x21, 0xA0, 0xBF, 0xEC, 0x50, 0x18, 0x49, 0x04, 0x20, 0xAF, 0xE5, 0x60, 0xFF, 0x49, 0xC9, 0x01, 0xB0, 0x1A, 0x41, 0xD6, 0xFE, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x43, 0xD6, 0x01, 0x40, 0x70, 0xCF, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x70, 0xCF, 0x7F, 0x30, 0x30, 0x30, 0x81, 0x88 + } + }, + { + 8, + 79, + 0x39, + 0x0033, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x33, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x87 + } + }, + { + 9, + 79, + 0x39, + 0x0034, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x34, 0x0A, 0x20, 0x20, 0x51, 0x55, 0x41, 0x4C, 0x43, 0x4F, 0x4D, 0x4D, 0x20, 0x56, 0x50, 0x30, 0x34, 0x33, 0x2D, 0x48, 0x32, 0x20, 0x54, 0x4D, 0x41, 0x33, 0x30, 0x30, 0x45, 0x20, 0x46, 0x69, 0x72, 0x6D, 0x77, 0x61, 0x72, 0x65, 0x20, 0x49, 0x64, 0x65, 0x6E, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x56, 0x99, 0xBA + } + }, + { + 10, + 79, + 0x39, + 0x0035, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x35, 0x65, 0x72, 0x73, 0x69, 0x6F, 0x6E, 0x20, 0x30, 0x32, 0x2E, 0x30, 0x34, 0x2E, 0x30, 0x30, 0x20, 0x43, 0x6F, 0x6D, 0x70, 0x69, 0x6C, 0x65, 0x64, 0x20, 0x4A, 0x75, 0x6C, 0x20, 0x31, 0x34, 0x20, 0x32, 0x30, 0x31, 0x30, 0x20, 0x31, 0x32, 0x3A, 0x35, 0x33, 0x3A, 0x31, 0x33, 0x0A, 0x20, 0x20, 0x45, 0x6E, 0x64, 0x20, 0x6F, 0x66, 0x20, 0x49, 0x44, 0x20, 0x42, 0x6C, 0x6F, 0x63, 0x6B, 0x0A, 0x0D, 0xA3 + } + }, + { + 11, + 79, + 0x39, + 0x0036, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x36, 0x00, 0x03, 0x09, 0x10, 0x16, 0x06, 0x02, 0x02, 0x02, 0x01, 0xF4, 0x00, 0x0A, 0x01, 0xF4, 0x00, 0x0A, 0x01, 0xF4, 0x00, 0x0A, 0x14, 0x19, 0x19, 0x00, 0x32, 0x02, 0x14, 0x01, 0x01, 0xE0, 0x03, 0x98, 0x0C, 0x0C, 0x00, 0x10, 0x10, 0x08, 0x00, 0x04, 0x08, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x04, 0x08, 0x00, 0x00, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x80, 0x10, 0x01, 0x80, 0x50, 0x2A + } + }, + { + 12, + 79, + 0x39, + 0x0037, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x37, 0x01, 0x40, 0x04, 0x02, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x02, 0x40, 0x08, 0x80, 0x20, 0x80, 0x08, 0x04, 0x02, 0x40, 0x20, 0x23, 0x04, 0x21, 0x20, 0x22, 0x00, 0x61, 0x00, 0xFD, 0x00, 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, 0xA8, 0x00, 0xA7, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0x79, 0x00, 0xCA, 0x24, 0xD6, 0x04, 0xCF, 0x00, 0xC8, 0x00, 0xA9, 0x00, 0xB7, 0x00, 0xB0, 0xB3, 0xF1 + } + }, + { + 13, + 79, + 0x39, + 0x0038, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x38, 0xCA, 0xB1, 0x0B, 0xB2, 0x00, 0xB3, 0x33, 0xB4, 0x33, 0xB5, 0x80, 0xB6, 0x00, 0x6C, 0x00, 0x6D, 0x00, 0x6E, 0x00, 0x6F, 0x00, 0xE6, 0x00, 0xE9, 0x00, 0xEC, 0x00, 0xE8, 0x20, 0xEB, 0x00, 0xEE, 0x00, 0xE7, 0x00, 0xEA, 0x00, 0xED, 0x00, 0xFF, 0x23, 0x00, 0x20, 0x20, 0x21, 0x07, 0x22, 0x40, 0x76, 0x00, 0xAF, 0x00, 0xD1, 0x00, 0xA1, 0x00, 0xD3, 0x00, 0xA3, 0x00, 0xD0, 0x00, 0xA0, 0x00, 0x69, 0x5E + } + }, + { + 14, + 79, + 0x39, + 0x0039, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x39, 0xD2, 0x00, 0xA2, 0x00, 0xDC, 0x08, 0xE1, 0xFF, 0xE2, 0x01, 0xDF, 0xFF, 0xDE, 0x02, 0xDD, 0x00, 0x99, 0x00, 0x9C, 0x00, 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0x9E, 0x00, 0xAC, 0x00, 0xFF, 0x70, 0xCF, 0x62, 0x00, 0x04, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x00, 0xFF, 0x62, 0x01, 0xF6, 0x70, 0xCF, 0x62, 0x02, 0x00, 0x62, 0x01, 0x00, 0x62, 0x04, 0xAB, 0x70, 0xCF, 0x71, 0x10, 0x62, 0xF2, 0x71 + } + }, + { + 15, + 79, + 0x39, + 0x003A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3A, 0x04, 0xEF, 0x62, 0x05, 0xFC, 0x70, 0xCF, 0x62, 0x06, 0x00, 0x62, 0x05, 0x00, 0x62, 0x08, 0x04, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x08, 0xFF, 0x62, 0x09, 0x8F, 0x70, 0xCF, 0x62, 0x0A, 0x00, 0x62, 0x09, 0x00, 0x62, 0x0C, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x0C, 0xFF, 0x62, 0x0D, 0xFF, 0x70, 0xCF, 0x62, 0x0E, 0x00, 0x62, 0x0D, 0x00, 0x62, 0x10, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x10, 0xD6, 0x3A + } + }, + { + 16, + 79, + 0x39, + 0x003B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3B, 0xFF, 0x62, 0x11, 0xEF, 0x70, 0xCF, 0x62, 0x12, 0x00, 0x62, 0x11, 0x00, 0x70, 0xCF, 0x7F, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00, 0xAB, 0x00, 0xAC, 0x00, 0xAD, 0x00, 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, 0xAE, 0x00, 0xAF, 0x00, 0xB0, 0x00, 0x8C, 0x65, 0x59 + } + }, + { + 17, + 79, + 0x39, + 0x003C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, 0xB7, 0x00, 0xB8, 0x00, 0xB9, 0x00, 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, 0xBA, 0x00, 0xBB, 0x00, 0xBC, 0x00, 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00, 0xBD, 0x6D, 0x6A + } + }, + { + 18, + 79, + 0x39, + 0x003D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3D, 0x00, 0xBE, 0x00, 0xBF, 0x00, 0xA4, 0x00, 0xC0, 0x00, 0xFF, 0x11, 0x06, 0x12, 0x02, 0x13, 0x87, 0x14, 0x03, 0x1B, 0x30, 0x1C, 0x00, 0x19, 0x24, 0x1A, 0x30, 0x0A, 0x3C, 0x0B, 0x3C, 0xFF, 0x01, 0x02, 0x06, 0x00, 0x01, 0x02, 0x01, 0x00, 0x02, 0x00, 0x02, 0x01, 0x02, 0x00, 0x01, 0x01, 0x02, 0x00, 0x02, 0x01, 0x00, 0x73, 0x97, 0x55, 0xAE, 0x04, 0x55, 0xAF, 0xAB, 0x55, 0xB0, 0x04, 0x55, 0x6F, 0x6F + } + }, + { + 19, + 79, + 0x39, + 0x003E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3E, 0xB1, 0x00, 0x55, 0xB2, 0x00, 0x7C, 0x0F, 0x8C, 0x7C, 0x0E, 0x61, 0x7F, 0x10, 0x70, 0xCF, 0x50, 0x00, 0x08, 0x50, 0x0D, 0x57, 0xD5, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x01, 0x08, 0x50, 0x0E, 0x57, 0x28, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x02, 0x08, 0x50, 0x0E, 0x57, 0xCF, 0x7C, 0x0F, 0xBF, 0x18, 0x50, 0x03, 0x08, 0x50, 0x0F, 0x57, 0x4A, 0x7C, 0x0F, 0xBF, 0x18, 0x70, 0xCF, 0x20, 0x7F, 0x38, 0x76, 0x7E + } + }, + { + 20, + 79, + 0x39, + 0x003F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x3F, 0x02, 0x10, 0x08, 0x4F, 0x52, 0xF9, 0x64, 0x08, 0x64, 0x03, 0x00, 0x54, 0xFC, 0x18, 0x18, 0x20, 0x70, 0xCF, 0x62, 0xE3, 0x00, 0x10, 0x08, 0x28, 0x39, 0xFF, 0xA0, 0x30, 0x4F, 0x54, 0xFD, 0x52, 0xFC, 0x39, 0x00, 0xA0, 0x13, 0x11, 0x06, 0xE0, 0x01, 0x70, 0xCF, 0x71, 0x10, 0x80, 0x09, 0x70, 0xCF, 0x71, 0x20, 0x80, 0x03, 0x71, 0x30, 0x18, 0x20, 0x75, 0x09, 0x00, 0x10, 0x08, 0x28, 0x4F, 0x47, 0x21 + } + }, + { + 21, + 79, + 0x39, + 0x0040, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x40, 0x59, 0xFD, 0x61, 0x00, 0x18, 0x20, 0x75, 0x09, 0x00, 0x8F, 0xC6, 0x38, 0xFC, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x08, 0x10, 0x5D, 0xD0, 0x08, 0x5D, 0xD3, 0x08, 0x5D, 0xD4, 0x08, 0x5D, 0xD5, 0x08, 0x70, 0x3F, 0x71, 0x80, 0x62, 0xD0, 0x00, 0x18, 0x60, 0xD5, 0x18, 0x60, 0xD4, 0x18, 0x60, 0xD3, 0x18, 0x60, 0xD0, 0x20, 0x18, 0x7E, 0x08, 0x51, 0x54, 0x04, 0x01, 0x51, 0x53, 0x0C, 0x00, 0x51, 0xB4, 0xFC + } + }, + { + 22, + 79, + 0x39, + 0x0041, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x41, 0x54, 0x04, 0x03, 0x51, 0x53, 0x0C, 0x02, 0x51, 0x54, 0x04, 0x05, 0x51, 0x53, 0x0C, 0x04, 0x51, 0x54, 0x04, 0x07, 0x51, 0x53, 0x0C, 0x06, 0x18, 0x08, 0x51, 0x0B, 0x04, 0x0D, 0x51, 0x0A, 0x0C, 0x0C, 0x41, 0x23, 0xFE, 0x55, 0xBB, 0x00, 0x51, 0x55, 0x60, 0x21, 0x62, 0xDB, 0xFE, 0x43, 0x23, 0x01, 0x18, 0x7E, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x42, 0x08, 0x26, 0x42, 0xEF, 0x7C, 0x19, 0x73, 0xD7, 0x43 + } + }, + { + 23, + 79, + 0x39, + 0x0042, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x42, 0x7C, 0x19, 0x73, 0x62, 0xD0, 0x00, 0x18, 0x53, 0x42, 0x70, 0xBF, 0x57, 0x98, 0x62, 0xD3, 0x05, 0x52, 0x00, 0x73, 0x54, 0x00, 0x62, 0xD3, 0x05, 0x54, 0x00, 0x79, 0xDF, 0xF1, 0x7C, 0x19, 0x64, 0x7C, 0x19, 0x64, 0x70, 0xBF, 0x57, 0x98, 0x62, 0xD3, 0x05, 0x52, 0x00, 0x62, 0xD3, 0x08, 0x54, 0x00, 0x62, 0xD3, 0x07, 0x56, 0x00, 0x00, 0x79, 0xDF, 0xEE, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x7F, 0x12, 0xBA + } + }, + { + 24, + 79, + 0x39, + 0x0043, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x43, 0x5C, 0x51, 0x41, 0xE0, 0x01, 0x80, 0x13, 0x80, 0x08, 0x80, 0x01, 0x5B, 0x9F, 0xF1, 0x80, 0x77, 0x62, 0xD3, 0x05, 0x51, 0x57, 0x54, 0x00, 0x80, 0x6E, 0x62, 0xD3, 0x05, 0x51, 0x57, 0x73, 0x53, 0x46, 0x47, 0x42, 0x07, 0xB0, 0x05, 0x54, 0x00, 0x80, 0x15, 0x47, 0x42, 0x04, 0xA0, 0x10, 0x62, 0xD3, 0x05, 0x3B, 0x00, 0xA0, 0x09, 0xC0, 0x04, 0x78, 0x80, 0x02, 0x74, 0x54, 0x00, 0x62, 0xD3, 0xA3, 0xDD + } + }, + { + 25, + 79, + 0x39, + 0x0044, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x44, 0x08, 0x13, 0x00, 0xD0, 0x0E, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x00, 0x3C, 0x0E, 0x00, 0xB0, 0x37, 0x80, 0x16, 0x62, 0xD3, 0x02, 0x08, 0x11, 0x05, 0xD0, 0x03, 0x50, 0x00, 0x54, 0x00, 0x18, 0x3A, 0x15, 0xD0, 0x24, 0x51, 0x0E, 0xB0, 0x20, 0x62, 0xD3, 0x08, 0x52, 0x00, 0x53, 0x45, 0x51, 0x47, 0x12, 0x45, 0x1E, 0x45, 0x00, 0x62, 0xD3, 0x07, 0x03, 0x00, 0x0E, 0x45, 0x00, 0x54, 0x00, 0x51, 0x53, 0x3E + } + }, + { + 26, + 79, + 0x39, + 0x0045, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x45, 0x45, 0x62, 0xD3, 0x08, 0x54, 0x00, 0x7F, 0x7C, 0x21, 0xD2, 0x08, 0x18, 0x7F, 0x50, 0xFF, 0x3C, 0x10, 0x80, 0xC0, 0x11, 0x34, 0x12, 0x76, 0x12, 0x34, 0x11, 0x0E, 0x11, 0x00, 0x34, 0x10, 0x0E, 0x10, 0x00, 0x53, 0x0F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x55, 0x46, 0x18, 0x65, 0x12, 0x6B, 0x11, 0x6B, 0x10, 0x6B, 0x4B, 0x6B, 0x4A, 0x6B, 0x49, 0x51, 0x4B, 0x1A, 0xFD, 0x93 + } + }, + { + 27, + 79, + 0x39, + 0x0046, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x46, 0x14, 0x51, 0x4A, 0x1A, 0x13, 0x51, 0x49, 0x19, 0x00, 0xC0, 0x0D, 0x53, 0x49, 0x51, 0x14, 0x14, 0x4B, 0x51, 0x13, 0x1C, 0x4A, 0x76, 0x12, 0x7A, 0x46, 0xBF, 0xD7, 0x50, 0xFF, 0x3C, 0x0F, 0x80, 0xC0, 0x11, 0x34, 0x12, 0x76, 0x12, 0x34, 0x11, 0x0E, 0x11, 0x00, 0x34, 0x10, 0x0E, 0x10, 0x00, 0x34, 0x0F, 0x7F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x51, 0x12, 0x04, 0xCE, 0x36 + } + }, + { + 28, + 79, + 0x39, + 0x0047, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x47, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x54, 0x90, 0x52, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x48, 0x90, 0x46, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x3C, 0x90, 0x3A, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x30, 0x90, 0x2E, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x24, 0x90, 0x22, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0xA3, 0xE1 + } + }, + { + 29, + 79, + 0x39, + 0x0048, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x48, 0x48, 0x90, 0x18, 0x90, 0x16, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x0C, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x90, 0x02, 0x90, 0x00, 0x70, 0xFB, 0x6E, 0x48, 0x6E, 0x49, 0x6E, 0x4A, 0x6E, 0x4B, 0x7F, 0x50, 0x00, 0x53, 0x48, 0x53, 0x49, 0x53, 0x4A, 0x53, 0x4B, 0x9F, 0xE9, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xDF, 0x51, 0x12, 0x04, 0x49, 0xC6, 0x28 + } + }, + { + 30, + 79, + 0x39, + 0x0049, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xD5, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xCB, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xC1, 0x9F, 0xBF, 0x9F, 0xBD, 0x9F, 0xBB, 0x9F, 0xB9, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xAF, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0xA5, 0x9F, 0xA3, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x32, 0x01 + } + }, + { + 31, + 79, + 0x39, + 0x004A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4A, 0x9F, 0x99, 0x51, 0x12, 0x04, 0x49, 0x51, 0x11, 0x0C, 0x48, 0x9F, 0x8F, 0x9F, 0x8D, 0x8F, 0x8C, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x53, 0x44, 0x55, 0x0F, 0x80, 0x55, 0x10, 0x60, 0x55, 0x11, 0x00, 0x62, 0xD3, 0x02, 0x50, 0x10, 0x57, 0x98, 0x54, 0x00, 0x79, 0xDF, 0xFC, 0x62, 0xD3, 0x01, 0x51, 0x0F, 0x57, 0x1A, 0x54, 0xA0, 0x79, 0xDF, 0xFC, 0x55, 0x3D, 0x00, 0x7C, 0x17, 0x18, 0x55, 0x45, 0x6E, 0x7A + } + }, + { + 32, + 79, + 0x39, + 0x004B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4B, 0x00, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x51, 0x10, 0x54, 0xC5, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x51, 0x0F, 0x54, 0xA0, 0x55, 0x4A, 0x80, 0x52, 0xC5, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA0, 0x60, 0xFD, 0x55, 0x4B, 0x10, 0x7C, 0x1B, 0x87, 0x51, 0xA0, 0x01, 0x00, 0x5C, 0x62, 0xD3, 0x02, 0x51, 0x45, 0x7C, 0x19, 0x8F, 0x43, 0xA4, 0x08, 0x47, 0x37, 0x0D + } + }, + { + 33, + 79, + 0x39, + 0x004C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4C, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x7C, 0x1C, 0x8E, 0x7C, 0x1C, 0xE3, 0x55, 0x56, 0x00, 0x55, 0x57, 0xFF, 0x55, 0x48, 0x07, 0x62, 0xD3, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x2A, 0x52, 0x68, 0x08, 0x51, 0x48, 0x64, 0x5C, 0x52, 0x59, 0x20, 0x62, 0xD3, 0x02, 0x3A, 0x44, 0xD0, 0x06, 0x51, 0x4B, 0x73, 0x25, 0x00, 0x51, 0x4B, 0x67, 0x2D, 0x00, 0x52, 0x00, 0x3A, 0x56, 0x92, 0xC4 + } + }, + { + 34, + 79, + 0x39, + 0x004D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4D, 0xC0, 0x03, 0x53, 0x56, 0x3A, 0x57, 0xD0, 0x03, 0x53, 0x57, 0x7A, 0x48, 0xDF, 0xCA, 0x68, 0x4B, 0xDF, 0x9B, 0x51, 0x4A, 0xA0, 0x42, 0x47, 0x11, 0x01, 0xB0, 0x3D, 0x58, 0xA1, 0x62, 0xD3, 0x01, 0x51, 0x57, 0x02, 0x56, 0x39, 0x1F, 0xA0, 0x30, 0xD0, 0x06, 0x51, 0x4A, 0x73, 0x25, 0xA0, 0x51, 0x4A, 0x67, 0x21, 0x7F, 0x2D, 0xA0, 0x68, 0x4A, 0x26, 0x4A, 0x7F, 0x55, 0x48, 0x07, 0x62, 0xD3, 0xBE, 0x1D + } + }, + { + 35, + 79, + 0x39, + 0x004E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4E, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x0A, 0x52, 0x68, 0x5C, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x10, 0x7A, 0x48, 0xDF, 0xEA, 0x8F, 0x4A, 0x47, 0x11, 0x02, 0xB0, 0x32, 0x3C, 0x56, 0x1F, 0xC0, 0x2D, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x3D, 0xC5, 0x40, 0xA0, 0x23, 0x17, 0xC5, 0x20, 0x55, 0x48, 0x07, 0x62, 0xD3, 0x00, 0x62, 0xD3, 0x00, 0x58, 0x48, 0x3D, 0x70, 0x00, 0xA0, 0x0A, 0x52, 0x68, 0xD7, 0x50 + } + }, + { + 36, + 79, + 0x39, + 0x004F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x4F, 0x5C, 0x62, 0xD3, 0x02, 0x56, 0x00, 0x10, 0x7A, 0x48, 0xDF, 0xEA, 0x8E, 0xFE, 0x76, 0xA1, 0x51, 0x45, 0x7C, 0x1A, 0xE9, 0x76, 0x45, 0x3C, 0x45, 0x03, 0xCE, 0xE7, 0x7C, 0x17, 0x55, 0x76, 0x3D, 0x3C, 0x3D, 0x09, 0xCE, 0xD7, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x44, 0x99, 0x53, 0x0F, 0x5A, 0x10, 0x55, 0x11, 0x03, 0x8E, 0xA6, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0xAE, 0xFF + } + }, + { + 37, + 79, + 0x39, + 0x0050, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x50, 0x55, 0x44, 0x99, 0x53, 0x0F, 0x55, 0x10, 0x60, 0x55, 0x11, 0x01, 0x8E, 0x94, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x44, 0x99, 0x55, 0x0F, 0x80, 0x53, 0x10, 0x55, 0x11, 0x02, 0x8E, 0x82, 0x90, 0x11, 0x55, 0x38, 0x03, 0x51, 0x38, 0x90, 0x1A, 0x76, 0x38, 0x3C, 0x38, 0x0A, 0xCF, 0xF6, 0x90, 0x66, 0x7F, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x55, 0x34, 0x99, 0x55, 0x0E, 0x00, 0x55, 0x33, 0x76, 0x90 + } + }, + { + 38, + 79, + 0x39, + 0x0051, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x51, 0x01, 0x7F, 0x70, 0xBF, 0x62, 0xD3, 0x02, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x11, 0x02, 0x53, 0x45, 0x51, 0x33, 0x02, 0x4C, 0x53, 0x33, 0x53, 0x32, 0x55, 0x44, 0x01, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x24, 0x3B, 0x12, 0xC0, 0x20, 0x3B, 0x11, 0xC0, 0x1C, 0x3B, 0x10, 0xC0, 0x18, 0x3B, 0x01, 0xC0, 0x14, 0x78, 0x3B, 0xFF, 0xC0, 0x0F, 0x3B, 0xF0, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x6C, 0x7D + } + }, + { + 39, + 79, + 0x39, + 0x0052, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x52, 0x07, 0x3B, 0xEE, 0xC0, 0x03, 0x91, 0x84, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xCB, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD3, 0x02, 0x62, 0xD5, 0x02, 0x62, 0xD0, 0x00, 0x55, 0x32, 0x01, 0x55, 0x44, 0x01, 0x55, 0x45, 0x00, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x12, 0xC0, 0x14, 0x3B, 0x11, 0xC0, 0x10, 0x3B, 0x10, 0xC0, 0x0C, 0x3B, 0x06, 0xB2 + } + }, + { + 40, + 79, + 0x39, + 0x0053, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x53, 0x01, 0xC0, 0x08, 0x78, 0x3B, 0xFF, 0xC0, 0x03, 0x91, 0x41, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xD7, 0x55, 0x44, 0x01, 0x51, 0x4D, 0x78, 0x53, 0x45, 0x51, 0x4E, 0x12, 0x4C, 0x74, 0x74, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x01, 0xC0, 0x14, 0x78, 0x3B, 0xFF, 0xC0, 0x0F, 0x3B, 0xF0, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0xEE, 0xF2, 0x8B + } + }, + { + 41, + 79, + 0x39, + 0x0054, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x54, 0xC0, 0x03, 0x91, 0x07, 0x76, 0x32, 0x76, 0x44, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xBF, 0xD7, 0x51, 0x4C, 0x78, 0x53, 0x44, 0x55, 0x45, 0x01, 0x02, 0x4C, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x11, 0xC0, 0x14, 0x3B, 0x10, 0xC0, 0x10, 0x78, 0x3B, 0xFF, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0xEE, 0xC0, 0x03, 0x90, 0xD1, 0x51, 0x4C, 0x04, 0x32, 0x76, 0x45, 0x88, 0xB8 + } + }, + { + 42, + 79, + 0x39, + 0x0055, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x55, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xBF, 0xD5, 0x55, 0x44, 0x00, 0x55, 0x45, 0x01, 0x51, 0x4C, 0x53, 0x32, 0x58, 0x32, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x18, 0x3B, 0x12, 0xC0, 0x14, 0x3B, 0x11, 0xC0, 0x10, 0x3B, 0x01, 0xC0, 0x0C, 0x78, 0x3B, 0xF0, 0xC0, 0x07, 0x3B, 0xEF, 0xC0, 0x03, 0x90, 0x9B, 0x51, 0x4C, 0x04, 0x32, 0x76, 0x45, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xBF, 0xD5, 0x50, 0x00, 0x53, 0xA4, 0xF1 + } + }, + { + 43, + 79, + 0x39, + 0x0056, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x56, 0x44, 0x53, 0x45, 0x55, 0x32, 0x00, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x0F, 0x3B, 0x12, 0xC0, 0x0B, 0x3B, 0x11, 0xC0, 0x07, 0x3B, 0x01, 0xC0, 0x03, 0x90, 0x70, 0x55, 0x44, 0x00, 0x51, 0x4D, 0x78, 0x53, 0x45, 0x51, 0x4E, 0x12, 0x4C, 0x74, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x3B, 0x01, 0xC0, 0x0C, 0x78, 0x3B, 0xF0, 0xC0, 0x07, 0x3B, 0xEF, 0xC0, 0x03, 0x90, 0x4B, 0x9F, 0xE8 + } + }, + { + 44, + 79, + 0x39, + 0x0057, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x57, 0x55, 0x45, 0x00, 0x51, 0x4C, 0x78, 0x53, 0x44, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x3B, 0x11, 0xC0, 0x0C, 0x3B, 0x10, 0xC0, 0x08, 0x78, 0x3B, 0xFF, 0xC0, 0x03, 0x90, 0x2B, 0x51, 0x4D, 0x53, 0x45, 0x51, 0x4C, 0x53, 0x44, 0x51, 0x4E, 0x7A, 0x44, 0x7A, 0x45, 0x53, 0x32, 0x5C, 0x52, 0x00, 0x3A, 0x16, 0xC0, 0x10, 0x78, 0x3B, 0xFF, 0xC0, 0x0B, 0x3B, 0xEF, 0xC0, 0x07, 0x3B, 0x21 + } + }, + { + 45, + 79, + 0x39, + 0x0058, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x58, 0x3B, 0xEE, 0xC0, 0x03, 0x90, 0x05, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x51, 0x43, 0x01, 0x03, 0x3A, 0x0E, 0xC0, 0x0F, 0x51, 0x45, 0x3F, 0x34, 0x51, 0x44, 0x3F, 0x34, 0x51, 0x32, 0x3F, 0x34, 0x76, 0x0E, 0x7F, 0x84, 0x88, 0x8C, 0x90, 0x94, 0x98, 0x9C, 0x9C, 0x9C, 0x10, 0x80, 0x10, 0x80, 0x10, 0x80, 0x10, 0x20, 0x40, 0x62, 0xD0, 0x00, 0x55, 0x4C, 0x11, 0x55, 0x4D, 0x09, 0x55, 0x4E, 0x98, 0xAB, 0x02 + } + }, + { + 46, + 79, + 0x39, + 0x0059, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x59, 0x55, 0x4F, 0x03, 0x55, 0x50, 0x97, 0x55, 0x51, 0x01, 0x55, 0x52, 0xDF, 0x55, 0x15, 0x08, 0x55, 0x16, 0x08, 0x55, 0x17, 0x08, 0x55, 0x42, 0x1C, 0x55, 0x43, 0x04, 0x55, 0xA2, 0x00, 0x55, 0xA3, 0x00, 0x55, 0xA4, 0x48, 0x55, 0xA5, 0x04, 0x55, 0xA6, 0x08, 0x55, 0xA9, 0x01, 0x55, 0xA7, 0x0C, 0x55, 0xA8, 0x05, 0x55, 0x18, 0x04, 0x55, 0xAD, 0x02, 0x55, 0x40, 0x00, 0x55, 0x3F, 0x00, 0x51, 0xE1, 0x6F + } + }, + { + 47, + 79, + 0x39, + 0x005A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5A, 0xA9, 0xA0, 0x08, 0x51, 0xA7, 0x58, 0xA8, 0x7C, 0x18, 0xF9, 0x70, 0xBF, 0x62, 0xD3, 0x01, 0x57, 0x3F, 0x50, 0x09, 0x28, 0x54, 0xA0, 0x79, 0xDF, 0xF9, 0x70, 0x3F, 0x71, 0xC0, 0x5D, 0xFC, 0x70, 0xCF, 0x71, 0x10, 0x62, 0x76, 0x07, 0x43, 0xE2, 0x08, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA4, 0x01, 0x62, 0xC0, 0x00, 0x39, 0x04, 0xD0, 0x04, 0x43, 0xC8, 0x04, 0x7C, 0x19, 0x41, 0x70, 0xCF, 0x71, 0x3B, 0x24 + } + }, + { + 48, + 79, + 0x39, + 0x005B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5B, 0x20, 0x43, 0x81, 0x0E, 0x43, 0x85, 0x0E, 0x43, 0x89, 0x0E, 0x43, 0x8D, 0x0E, 0x43, 0x91, 0x0E, 0x43, 0x95, 0x0E, 0x43, 0x99, 0x0E, 0x43, 0x9D, 0x0E, 0x70, 0xCF, 0x55, 0x9A, 0x07, 0x55, 0x9C, 0x02, 0x55, 0x9E, 0x06, 0x55, 0x9D, 0x00, 0x50, 0x48, 0x57, 0x00, 0x7C, 0x18, 0xB7, 0x71, 0x30, 0x62, 0x1B, 0x40, 0x70, 0xCF, 0x62, 0xA2, 0x10, 0x7C, 0x2A, 0xD8, 0x50, 0x04, 0x7C, 0x18, 0xE7, 0x6B, 0x85 + } + }, + { + 49, + 79, + 0x39, + 0x005C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5C, 0x70, 0xCF, 0x7C, 0x19, 0x4E, 0x7F, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x00, 0x03, 0x06, 0x09, 0x0C, 0x0F, 0x12, 0x15, 0x18, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x51, 0x3D, 0xF0, 0x60, 0x5C, 0x51, 0x3D, 0xF0, 0x75, 0x73, 0x53, 0x09, 0x5E, 0x00, 0x22, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x71, 0x20, 0x51, 0x3D, 0xFE, 0xE9, 0x5C, 0x51, 0x3D, 0xFE, 0xED, 0x53, 0x09, 0xBF, 0x2E + } + }, + { + 50, + 79, + 0x39, + 0x005D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5D, 0x5E, 0x00, 0x2A, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x51, 0x3D, 0xFF, 0xBA, 0x53, 0xA0, 0x51, 0x3D, 0xFF, 0xBD, 0x53, 0xA1, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x51, 0x3D, 0xFE, 0xC5, 0x5C, 0x51, 0x3D, 0xFE, 0xC9, 0x73, 0x53, 0x09, 0x5E, 0x00, 0x22, 0x09, 0x61, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x51, 0x3D, 0xF0, 0x10, 0x5C, 0x51, 0x3D, 0xF0, 0x25, 0x53, 0x09, 0x5E, 0x00, 0x2A, 0x09, 0x61, 0x00, 0x4E, 0x4D + } + }, + { + 51, + 79, + 0x39, + 0x005E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5E, 0x70, 0xCF, 0x7F, 0x0C, 0x0C, 0x00, 0x10, 0x10, 0x08, 0x00, 0x04, 0x08, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x00, 0x04, 0x08, 0x00, 0x00, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x01, 0x80, 0x10, 0x01, 0x80, 0x01, 0x40, 0x04, 0x02, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x02, 0x40, 0x08, 0x80, 0x20, 0x80, 0x08, 0x04, 0x02, 0x40, 0x20, 0x62, 0xD0, 0x00, 0x55, 0x9D, 0x00, 0x51, 0xA6, 0x91, 0x11, 0xD4 + } + }, + { + 52, + 79, + 0x39, + 0x005F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x5F, 0x76, 0x51, 0xA4, 0x58, 0xA3, 0x7C, 0x18, 0xB7, 0x51, 0xA5, 0x7C, 0x18, 0xE7, 0x70, 0xCF, 0x71, 0x20, 0x50, 0x00, 0x60, 0x80, 0x60, 0x84, 0x60, 0x88, 0x60, 0x8C, 0x60, 0x90, 0x60, 0x94, 0x60, 0x98, 0x60, 0x9C, 0x60, 0x82, 0x60, 0x86, 0x60, 0x8A, 0x60, 0x8E, 0x60, 0x92, 0x60, 0x96, 0x60, 0x9A, 0x60, 0x9E, 0x60, 0xC0, 0x43, 0x81, 0x04, 0x43, 0x85, 0x04, 0x43, 0x89, 0x04, 0x43, 0x8D, 0x86, 0xBF + } + }, + { + 53, + 79, + 0x39, + 0x0060, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x60, 0x04, 0x43, 0x91, 0x04, 0x43, 0x95, 0x04, 0x43, 0x99, 0x04, 0x43, 0x9D, 0x04, 0x71, 0x30, 0x71, 0x30, 0x62, 0x1F, 0x00, 0x62, 0x1B, 0x70, 0x62, 0x13, 0x87, 0x70, 0xCF, 0x71, 0x10, 0x55, 0x09, 0x19, 0x51, 0x09, 0xFF, 0x5E, 0x5C, 0x51, 0x09, 0xFF, 0x73, 0x53, 0x45, 0x5E, 0x00, 0x2A, 0x45, 0x61, 0x00, 0x7A, 0x09, 0xDF, 0xEC, 0x70, 0xCF, 0x41, 0xA2, 0x3F, 0x55, 0x40, 0x00, 0x55, 0x3F, 0xDC, 0x6C + } + }, + { + 54, + 79, + 0x39, + 0x0061, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x61, 0x00, 0x51, 0xA9, 0xA0, 0x08, 0x51, 0xA7, 0x58, 0xA8, 0x7C, 0x18, 0xF9, 0x7F, 0x41, 0xE0, 0xFB, 0x71, 0x30, 0x41, 0x1B, 0xBF, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x81, 0xFD, 0x41, 0x85, 0xFD, 0x41, 0x89, 0xFD, 0x41, 0x8D, 0xFD, 0x41, 0x91, 0xFD, 0x41, 0x95, 0xFD, 0x41, 0x99, 0xFD, 0x41, 0x9D, 0xFD, 0x70, 0xCF, 0x41, 0xA2, 0xEF, 0x7C, 0x19, 0x5A, 0x70, 0xCF, 0x71, 0x10, 0x41, 0xE2, 0xF7, 0x90, 0xD5 + } + }, + { + 55, + 79, + 0x39, + 0x0062, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x62, 0x70, 0xCF, 0x7F, 0x7C, 0x19, 0x41, 0x70, 0xCF, 0x71, 0x10, 0x43, 0xE2, 0x08, 0x71, 0x30, 0x43, 0x1B, 0x40, 0x70, 0xCF, 0x71, 0x20, 0x43, 0x81, 0x02, 0x43, 0x85, 0x02, 0x43, 0x89, 0x02, 0x43, 0x8D, 0x02, 0x43, 0x91, 0x02, 0x43, 0x95, 0x02, 0x43, 0x99, 0x02, 0x43, 0x9D, 0x02, 0x70, 0xCF, 0x43, 0xA2, 0x10, 0x7C, 0x19, 0x4E, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0x49, 0x5A, 0x48, 0x53, 0x9B, 0x24, 0xFE + } + }, + { + 56, + 79, + 0x39, + 0x0063, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x63, 0x5B, 0x21, 0x01, 0xA0, 0x06, 0x2E, 0x9E, 0x01, 0x80, 0x04, 0x26, 0x9E, 0xFE, 0x68, 0x48, 0x6E, 0x49, 0x51, 0x49, 0x78, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xC9, 0x70, 0xCF, 0x7C, 0x2A, 0xBA, 0x7F, 0x00, 0x04, 0x0C, 0x1C, 0x3C, 0x7C, 0xFC, 0x62, 0xD0, 0x00, 0x53, 0x3E, 0x76, 0x3E, 0xFF, 0xF0, 0x26, 0x9C, 0x03, 0x2C, 0x9C, 0x7C, 0x2A, 0xBA, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xA7, 0x5A, 0xA8, 0xA6, 0x03 + } + }, + { + 57, + 79, + 0x39, + 0x0064, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x64, 0x90, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x08, 0x5A, 0x3E, 0x55, 0x40, 0xFF, 0x55, 0x3F, 0x01, 0x78, 0xA0, 0x0A, 0x06, 0x40, 0xFF, 0x0E, 0x3F, 0x01, 0x78, 0xBF, 0xF8, 0x51, 0x3E, 0x68, 0x3F, 0x6E, 0x40, 0x78, 0xDF, 0xFA, 0x16, 0x40, 0x7F, 0x1E, 0x3F, 0x00, 0x18, 0x78, 0x64, 0x64, 0x26, 0x9C, 0x03, 0x2C, 0x9C, 0x7C, 0x2A, 0xBA, 0x7F, 0x62, 0xD0, 0x00, 0x78, 0x53, 0x9A, 0x7C, 0x2A, 0xBA, 0x11, 0xDA + } + }, + { + 58, + 79, + 0x39, + 0x0065, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x65, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA7, 0x89, 0x62, 0xA7, 0x49, 0x70, 0xCF, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x49, 0xC8, 0x08, 0xAF, 0xFC, 0x70, 0xCF, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA7, 0x09, 0x70, 0xCF, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x41, 0x00, 0x93, 0xA8, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x55, 0x41, 0x02, 0x93, 0x99, 0x70, 0x3F, 0x71, 0xB9, 0x2B + } + }, + { + 59, + 79, + 0x39, + 0x0066, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x66, 0xC0, 0x7F, 0x57, 0x98, 0x50, 0x0A, 0x28, 0x21, 0xE0, 0xB0, 0x04, 0x79, 0xDF, 0xF7, 0x7F, 0x70, 0xCF, 0x71, 0x10, 0x64, 0xE0, 0x01, 0x80, 0x09, 0x80, 0x75, 0x80, 0xE1, 0x81, 0x3D, 0x81, 0x49, 0x41, 0x00, 0xFD, 0x41, 0x0C, 0xF7, 0x41, 0x0C, 0xBF, 0x41, 0x00, 0x7F, 0x41, 0x10, 0xF7, 0x41, 0x10, 0xBF, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x04, 0x52, 0xE2, 0x7E + } + }, + { + 60, + 79, + 0x39, + 0x0067, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x67, 0x06, 0x60, 0x83, 0x06, 0x68, 0x06, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x08, 0x52, 0x03, 0x60, 0x87, 0x06, 0x69, 0x03, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x04, 0x52, 0x00, 0x60, 0x8B, 0x06, 0x6A, 0x00, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x02, 0x52, 0x09, 0x60, 0x8F, 0x06, 0x6B, 0x09, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x01, 0x52, 0x0C, 0x60, 0x93, 0x06, 0x6C, 0x0C, 0x2E, 0x74, 0x01, 0x43, 0x94, 0x02, 0x8C, 0xD3 + } + }, + { + 61, + 79, + 0x39, + 0x0068, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x68, 0x52, 0x0F, 0x60, 0x97, 0x06, 0x6D, 0x0F, 0x2E, 0x75, 0x01, 0x43, 0xA3, 0x3F, 0x7F, 0x41, 0x04, 0xBF, 0x41, 0x0C, 0xFB, 0x41, 0x0C, 0xDF, 0x41, 0x00, 0xDF, 0x41, 0x10, 0xFB, 0x41, 0x10, 0xDF, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x02, 0x52, 0x07, 0x60, 0x83, 0x06, 0x68, 0x07, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x04, 0x52, 0x04, 0x60, 0x87, 0x06, 0x69, 0x46, 0x48 + } + }, + { + 62, + 79, + 0x39, + 0x0069, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x69, 0x04, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x02, 0x52, 0x01, 0x60, 0x8B, 0x06, 0x6A, 0x01, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x04, 0x52, 0x0A, 0x60, 0x8F, 0x06, 0x6B, 0x0A, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x02, 0x52, 0x0D, 0x60, 0x93, 0x06, 0x6C, 0x0D, 0x2E, 0x74, 0x01, 0x43, 0x94, 0x04, 0x52, 0x10, 0x60, 0x97, 0x06, 0x6D, 0x10, 0x2E, 0x75, 0x01, 0x43, 0xA3, 0x3F, 0x7F, 0x41, 0x08, 0xF7, 0x41, 0xC5, 0x47 + } + }, + { + 63, + 79, + 0x39, + 0x006A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6A, 0x0C, 0xFD, 0x41, 0x0C, 0xEF, 0x41, 0x08, 0x7F, 0x41, 0x10, 0xFD, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0x80, 0x01, 0x52, 0x08, 0x60, 0x83, 0x06, 0x68, 0x08, 0x2E, 0x70, 0x01, 0x43, 0x84, 0x02, 0x52, 0x05, 0x60, 0x87, 0x06, 0x69, 0x05, 0x2E, 0x71, 0x01, 0x43, 0x88, 0x01, 0x52, 0x02, 0x60, 0x8B, 0x06, 0x6A, 0x02, 0x2E, 0x72, 0x01, 0x43, 0x8C, 0x08, 0x52, 0x57, 0x6C + } + }, + { + 64, + 79, + 0x39, + 0x006B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6B, 0x0B, 0x60, 0x8F, 0x06, 0x6B, 0x0B, 0x2E, 0x73, 0x01, 0x43, 0x90, 0x04, 0x52, 0x0E, 0x60, 0x93, 0x06, 0x6C, 0x0E, 0x2E, 0x74, 0x01, 0x43, 0xA3, 0x1F, 0x7F, 0x70, 0xCF, 0x71, 0x20, 0x5D, 0xF7, 0x53, 0x9F, 0x70, 0xFE, 0x43, 0xA3, 0x00, 0x7F, 0x7F, 0x70, 0xCF, 0x71, 0x10, 0x64, 0xE0, 0x01, 0x80, 0x09, 0x80, 0x34, 0x80, 0x5F, 0x80, 0x84, 0x80, 0x8B, 0x43, 0x00, 0x02, 0x43, 0x0C, 0x08, 0x1D, 0xF9 + } + }, + { + 65, + 79, + 0x39, + 0x006C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6C, 0x43, 0x0C, 0x40, 0x43, 0x00, 0x80, 0x43, 0x10, 0x08, 0x43, 0x10, 0x40, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFB, 0x41, 0x84, 0xF7, 0x41, 0x88, 0xFB, 0x41, 0x8C, 0xFD, 0x41, 0x90, 0xFE, 0x41, 0x94, 0xFD, 0x62, 0xA3, 0x00, 0x80, 0x5E, 0x43, 0x04, 0x40, 0x43, 0x0C, 0x04, 0x43, 0x0C, 0x20, 0x43, 0x00, 0x20, 0x43, 0x10, 0x04, 0x43, 0x10, 0x20, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFD, 0x9E, 0xFC + } + }, + { + 66, + 79, + 0x39, + 0x006D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6D, 0x41, 0x84, 0xFB, 0x41, 0x88, 0xFD, 0x41, 0x8C, 0xFB, 0x41, 0x90, 0xFD, 0x41, 0x94, 0xFB, 0x62, 0xA3, 0x00, 0x80, 0x31, 0x43, 0x08, 0x08, 0x43, 0x0C, 0x02, 0x43, 0x0C, 0x10, 0x43, 0x08, 0x80, 0x43, 0x10, 0x02, 0x70, 0xCF, 0x71, 0x20, 0x41, 0x80, 0xFE, 0x41, 0x84, 0xFD, 0x41, 0x88, 0xFE, 0x41, 0x8C, 0xF7, 0x41, 0x90, 0xFB, 0x62, 0xA3, 0x00, 0x80, 0x0A, 0x70, 0xCF, 0x71, 0x20, 0x62, 0x2E, 0x1D + } + }, + { + 67, + 79, + 0x39, + 0x006E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6E, 0xA3, 0x00, 0x80, 0x01, 0x70, 0xCF, 0x7F, 0x62, 0xD3, 0x00, 0x57, 0x07, 0x52, 0x70, 0x54, 0x80, 0x52, 0x68, 0x54, 0x78, 0x51, 0xA0, 0x56, 0x70, 0x00, 0x54, 0x68, 0x79, 0xDF, 0xEF, 0x7F, 0x62, 0xD5, 0x00, 0x62, 0xD3, 0x00, 0x58, 0xA0, 0x55, 0x09, 0x88, 0x50, 0x0A, 0x28, 0x21, 0x1F, 0x3F, 0x09, 0x75, 0x3C, 0x09, 0x99, 0xCF, 0xF4, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0xAA, 0x00, 0x55, 0xAC, 0xE5, 0x8C + } + }, + { + 68, + 79, + 0x39, + 0x006F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x6F, 0x00, 0x57, 0x07, 0x62, 0xD3, 0x00, 0x5B, 0x3D, 0x80, 0x00, 0xA0, 0x5A, 0x10, 0x64, 0x5C, 0x52, 0x58, 0x53, 0x56, 0x52, 0x59, 0x53, 0x57, 0x51, 0x3E, 0x6E, 0x56, 0x6E, 0x57, 0x78, 0xDF, 0xFA, 0x51, 0x40, 0x14, 0x57, 0x51, 0x3F, 0x1C, 0x56, 0xA0, 0x0E, 0xD0, 0x06, 0x55, 0x57, 0x00, 0x80, 0x04, 0x55, 0x57, 0xFF, 0x55, 0x56, 0x00, 0x20, 0x10, 0x52, 0x78, 0x5C, 0x62, 0xD3, 0x02, 0x51, 0x6C, 0x9B + } + }, + { + 69, + 79, + 0x39, + 0x0070, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x70, 0x57, 0x73, 0x54, 0x00, 0x39, 0xDC, 0xD0, 0x05, 0x39, 0x23, 0xD0, 0x04, 0x55, 0xAB, 0x01, 0x62, 0xD3, 0x08, 0x13, 0x00, 0xC0, 0x07, 0x39, 0x0F, 0xD0, 0x0B, 0x80, 0x05, 0x39, 0xF1, 0xC0, 0x05, 0x04, 0xAA, 0x76, 0xAC, 0x20, 0x79, 0xDF, 0x9C, 0x51, 0xAC, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x39, 0x02, 0xC0, 0x18, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x67, 0x92 + } + }, + { + 70, + 79, + 0x39, + 0x0071, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x71, 0x67, 0x39, 0x02, 0xC0, 0x0A, 0x47, 0xAA, 0x80, 0xA0, 0x03, 0x76, 0xAA, 0x68, 0xAA, 0x57, 0x07, 0x62, 0xD3, 0x00, 0x3D, 0x80, 0x00, 0xA0, 0x33, 0x10, 0x52, 0x78, 0x5C, 0x62, 0xD3, 0x02, 0x52, 0x00, 0x53, 0x47, 0x47, 0x42, 0x10, 0xA0, 0x1B, 0x51, 0xAA, 0x15, 0x00, 0xD0, 0x0B, 0x47, 0xAA, 0x80, 0xB0, 0x0E, 0x56, 0x00, 0x00, 0x80, 0x09, 0x47, 0xAA, 0x80, 0xA0, 0x04, 0x56, 0x00, 0xFF, 0xE5, 0x8F + } + }, + { + 71, + 79, + 0x39, + 0x0072, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x72, 0x52, 0x00, 0x73, 0x53, 0x57, 0x5B, 0x7C, 0x10, 0xC0, 0x20, 0x79, 0xDF, 0xC4, 0x7F, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x20, 0x49, 0xC4, 0x01, 0xAF, 0xFC, 0x41, 0xA4, 0xF7, 0x41, 0xC4, 0xFE, 0x5D, 0xA8, 0x53, 0x59, 0x5D, 0xA9, 0x53, 0x58, 0x5D, 0xAB, 0x53, 0x5B, 0x5D, 0xAC, 0x53, 0x5A, 0x5D, 0xAE, 0x53, 0x5D, 0x5D, 0xAF, 0x53, 0x5C, 0x5D, 0xB1, 0x53, 0x5F, 0x5D, 0xB2, 0x53, 0x5E, 0x2F, 0x24 + } + }, + { + 72, + 79, + 0x39, + 0x0073, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x73, 0x5D, 0xB4, 0x53, 0x61, 0x5D, 0xB5, 0x53, 0x60, 0x5D, 0xB7, 0x53, 0x63, 0x5D, 0xB8, 0x53, 0x62, 0x5D, 0xBA, 0x53, 0x65, 0x5D, 0xBB, 0x53, 0x64, 0x5D, 0xBD, 0x53, 0x67, 0x5D, 0xBE, 0x53, 0x66, 0x70, 0xCF, 0x7F, 0x62, 0xD3, 0x00, 0x57, 0x07, 0x5B, 0x3D, 0x70, 0x00, 0xA0, 0x25, 0x10, 0x64, 0x5C, 0x51, 0x3E, 0x6F, 0x58, 0x6F, 0x59, 0x78, 0xDF, 0xFA, 0x51, 0x40, 0x15, 0x59, 0x51, 0x3F, 0x50, 0x67 + } + }, + { + 73, + 79, + 0x39, + 0x0074, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x74, 0x1D, 0x58, 0xA0, 0x0E, 0xD0, 0x06, 0x56, 0x59, 0x00, 0x80, 0x04, 0x56, 0x59, 0xFF, 0x56, 0x58, 0x00, 0x20, 0x79, 0xDF, 0xD4, 0x7F, 0x55, 0x3D, 0x00, 0x55, 0x37, 0x01, 0x7C, 0x17, 0x18, 0x9E, 0x7E, 0x58, 0xA1, 0x62, 0xD3, 0x01, 0x52, 0xA0, 0x60, 0xFD, 0x52, 0xC5, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9E, 0x51, 0x50, 0x00, 0x57, 0x88, 0x9C, 0x53, 0x43, 0xA4, 0x08, 0x47, 0x25, 0x12 + } + }, + { + 74, + 79, + 0x39, + 0x0075, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x75, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x51, 0x3D, 0xA0, 0x05, 0x9E, 0x6A, 0x51, 0x3D, 0x9F, 0x3C, 0x50, 0x00, 0x9D, 0x93, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA1, 0x60, 0xFD, 0x52, 0xC6, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9E, 0x1C, 0x50, 0x01, 0x57, 0x88, 0x9C, 0x1E, 0x43, 0xA4, 0x08, 0x47, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x9E, 0x39, 0x9F, 0x07, 0xD7 + } + }, + { + 75, + 79, + 0x39, + 0x0076, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x76, 0x0D, 0x50, 0x01, 0x9D, 0x64, 0x62, 0xD3, 0x01, 0x58, 0xA1, 0x52, 0xA2, 0x60, 0xFD, 0x52, 0xC7, 0x70, 0xCF, 0x71, 0x20, 0x60, 0xA5, 0x70, 0xCF, 0x9D, 0xED, 0x50, 0x02, 0x57, 0x88, 0x9B, 0xEF, 0x43, 0xA4, 0x08, 0x47, 0x9F, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x70, 0xCF, 0x9E, 0x0A, 0x9E, 0xDE, 0x50, 0x02, 0x9D, 0x35, 0x7C, 0x17, 0x55, 0x76, 0x3D, 0x3C, 0x3D, 0x09, 0xCF, 0x5F, 0x62, 0xD3, 0x43, 0x50 + } + }, + { + 76, + 79, + 0x39, + 0x0077, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x77, 0x00, 0x9D, 0xC4, 0x9D, 0xF3, 0x7C, 0x14, 0x1F, 0x55, 0x37, 0x00, 0x7F, 0x43, 0xE0, 0x08, 0x7F, 0x41, 0xE0, 0xF7, 0x7F, 0x62, 0xE6, 0x04, 0x62, 0xD0, 0x00, 0x5A, 0x53, 0x53, 0x54, 0x10, 0x08, 0x51, 0x55, 0x08, 0x38, 0x03, 0x4F, 0x50, 0x00, 0x54, 0xFE, 0x54, 0xFD, 0x01, 0x08, 0x54, 0xFF, 0x48, 0xFC, 0x01, 0xA0, 0x09, 0x52, 0xFB, 0x05, 0xFE, 0x52, 0xFA, 0x0D, 0xFD, 0x6F, 0xFD, 0x6F, 0xCC, 0x63 + } + }, + { + 77, + 79, + 0x39, + 0x0078, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x78, 0xFE, 0x6F, 0xFC, 0x7B, 0xFF, 0xBF, 0xEA, 0x52, 0xFC, 0x60, 0xE8, 0x52, 0xFE, 0x60, 0xE7, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x38, 0xFA, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x57, 0xF0, 0x50, 0x00, 0x62, 0xE6, 0x04, 0x62, 0xE8, 0x01, 0x62, 0xE7, 0x00, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x62, 0xDA, 0xF7, 0x49, 0xDA, 0x08, 0xAF, 0xFC, 0x62, 0xDA, 0xF7, 0x08, 0xF1, 0xAE + } + }, + { + 78, + 79, + 0x39, + 0x0079, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x79, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x51, 0x00, 0x18, 0x49, 0xDA, 0x08, 0xB0, 0x04, 0x40, 0x80, 0x05, 0x74, 0x62, 0xDA, 0xF7, 0x79, 0xBF, 0xE0, 0x49, 0xDA, 0x08, 0xA0, 0x02, 0x74, 0x62, 0xE6, 0x04, 0x60, 0xE8, 0x62, 0xE7, 0x00, 0x62, 0xE6, 0x00, 0x62, 0xE6, 0x01, 0x62, 0xD0, 0x00, 0x53, 0x55, 0x55, 0x53, 0x00, 0x55, 0x54, 0x01, 0x7E, 0x55, 0x77 + } + }, + { + 79, + 79, + 0x39, + 0x007A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7A, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x05, 0x58, 0x04, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x01, 0x58, 0x00, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x03, 0x58, 0x02, 0x7E, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x07, 0x58, 0x06, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x9C, 0x06 + } + }, + { + 80, + 79, + 0x39, + 0x007B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7B, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x05, 0x5A, 0x04, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x01, 0x5A, 0x00, 0x7E, 0x08, 0x08, 0x10, 0x4F, 0x5D, 0xF7, 0x54, 0xFD, 0x70, 0x3F, 0x71, 0xC0, 0x20, 0x18, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x03, 0x5A, 0x02, 0x7E, 0x0E, 0x1E, 0x3D, 0x7A, 0xE3, 0x95 + } + }, + { + 81, + 79, + 0x39, + 0x007C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7C, 0x07, 0x03, 0x00, 0x00, 0x07, 0x0E, 0x1E, 0x3D, 0x03, 0x01, 0x00, 0x00, 0x1E, 0x3D, 0x7A, 0xF6, 0x0E, 0x07, 0x01, 0x00, 0x58, 0x45, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x79, 0xDF, 0xF6, 0x7A, 0x44, 0xBF, 0xF0, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x08, 0x10, 0x70, 0x3F, 0x71, 0x80, 0x5D, 0xD3, 0x08, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xB6, 0x60, 0xD3, 0x2E, 0xB3, 0x80, 0x48, 0x60 + } + }, + { + 82, + 79, + 0x39, + 0x007D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7D, 0x49, 0xD7, 0x08, 0xA0, 0x09, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x00, 0x80, 0x08, 0x49, 0xD7, 0x20, 0xA0, 0x03, 0x80, 0xA6, 0x51, 0xB3, 0x21, 0x0E, 0xE0, 0x01, 0x80, 0x11, 0x80, 0x67, 0x80, 0x79, 0x80, 0x47, 0x80, 0x96, 0x80, 0x94, 0x80, 0x92, 0x80, 0x90, 0x80, 0x97, 0x5D, 0xD8, 0x21, 0xFE, 0x39, 0x48, 0xA0, 0x06, 0x62, 0xD7, 0x00, 0x80, 0x8A, 0x49, 0xD8, 0x01, 0xB0, 0x0F, 0x55, 0xBA, 0x69, 0xA3 + } + }, + { + 83, + 79, + 0x39, + 0x007E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7E, 0x02, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x02, 0x62, 0xD7, 0x10, 0x80, 0x77, 0x55, 0xBA, 0x01, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x06, 0x5F, 0xB5, 0xB4, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x52, 0x00, 0x60, 0xD8, 0x76, 0xB5, 0x62, 0xD7, 0x14, 0x80, 0x5B, 0x51, 0xB8, 0x78, 0x3A, 0xB5, 0xC0, 0x0F, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x52, 0x00, 0x60, 0xD8, 0x76, 0xB5, 0x2E, 0xB3, 0x20, 0x60, 0xD8, 0x62, 0x18, 0x02 + } + }, + { + 84, + 79, + 0x39, + 0x007F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x7F, 0xD7, 0x04, 0x80, 0x3F, 0x5D, 0xD8, 0x3A, 0xB8, 0xD0, 0x2B, 0xA0, 0x29, 0x53, 0xB5, 0x53, 0xB4, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x04, 0x80, 0x18, 0x51, 0xB9, 0x78, 0x3A, 0xB5, 0xC0, 0x16, 0x51, 0xB7, 0x02, 0xB5, 0x5C, 0x5D, 0xD8, 0x54, 0x00, 0x2E, 0xB3, 0x10, 0x76, 0xB5, 0x80, 0x01, 0x62, 0xD7, 0x10, 0x80, 0x0F, 0x62, 0xD7, 0x00, 0x80, 0x0A, 0x26, 0xB3, 0xF0, 0x2E, 0xB3, 0x00, 0x55, 0xFC, 0xCB + } + }, + { + 85, + 79, + 0x39, + 0x0080, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x80, 0xBA, 0x00, 0x18, 0x60, 0xD0, 0x18, 0x60, 0xD3, 0x20, 0x18, 0x7E, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x43, 0x05, 0xA0, 0x70, 0xCF, 0x26, 0xAF, 0x5F, 0x51, 0xAF, 0x60, 0x04, 0x55, 0xBA, 0x00, 0x90, 0x1F, 0x90, 0x24, 0x40, 0x40, 0x40, 0x40, 0x40, 0x50, 0x00, 0x53, 0xB4, 0x70, 0xCF, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x43, 0x05, 0xA0, 0x70, 0xCF, 0x2E, 0xAF, 0xA0, 0xAC, 0x2C + } + }, + { + 86, + 79, + 0x39, + 0x0081, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x81, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x41, 0xE0, 0x7F, 0x43, 0xE0, 0x80, 0x7F, 0x43, 0xD6, 0x31, 0x7F, 0x41, 0xE0, 0x7F, 0x41, 0xD6, 0xFE, 0x7F, 0x62, 0xD0, 0x00, 0x4F, 0x52, 0xFD, 0x53, 0xB8, 0x52, 0xFC, 0x53, 0xB9, 0x52, 0xFB, 0x53, 0xB7, 0x52, 0xFA, 0x53, 0xB6, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x76, 0xBB, 0xD0, 0x04, 0x55, 0xBB, 0xFF, 0x7E, 0x43, 0xE1, 0x01, 0x7F, 0x41, 0xE1, 0xFE, 0x7F, 0xB7, 0x43 + } + }, + { + 87, + 79, + 0x39, + 0x0082, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x82, 0x43, 0x23, 0x01, 0x7F, 0x54, 0x00, 0x70, 0xFE, 0x41, 0x23, 0xFE, 0x18, 0x60, 0x22, 0x18, 0x60, 0x23, 0x18, 0x70, 0x3F, 0x71, 0xC0, 0x7E, 0x30, 0x62, 0xD0, 0x00, 0x53, 0xF8, 0x5D, 0xF7, 0x08, 0x21, 0xC0, 0xB0, 0x07, 0x56, 0x01, 0x00, 0x55, 0xF8, 0x00, 0x51, 0xF8, 0x70, 0x3F, 0x71, 0x80, 0x60, 0xD3, 0x55, 0xFD, 0x01, 0x3C, 0xFD, 0x01, 0xB0, 0xAE, 0x70, 0xCF, 0x71, 0x10, 0x5D, 0xE0, 0xFE, 0xD2 + } + }, + { + 88, + 79, + 0x39, + 0x0083, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x83, 0x08, 0x21, 0xF8, 0x49, 0xFE, 0x08, 0xB0, 0x0A, 0x49, 0xFE, 0x10, 0xB0, 0x09, 0x29, 0x01, 0x80, 0x07, 0x29, 0x02, 0x80, 0x03, 0x29, 0x00, 0x60, 0xE0, 0x70, 0xCF, 0x80, 0x01, 0x65, 0xFD, 0x3C, 0xFD, 0x02, 0xB0, 0x84, 0x65, 0xFD, 0x70, 0xCF, 0x71, 0x10, 0x49, 0xE4, 0x08, 0xA0, 0x05, 0x70, 0xCF, 0x80, 0x20, 0x70, 0xCF, 0x52, 0x00, 0x53, 0xFA, 0x51, 0xFD, 0x39, 0x04, 0xB0, 0x69, 0x08, 0xF8, 0xC7 + } + }, + { + 89, + 79, + 0x39, + 0x0084, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x84, 0x10, 0x50, 0x03, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x20, 0x18, 0x53, 0xFD, 0x3C, 0xF8, 0x00, 0xA0, 0x09, 0x55, 0xFF, 0x00, 0x55, 0xFD, 0x10, 0x80, 0x37, 0x65, 0xFD, 0x52, 0x00, 0x53, 0xFA, 0x52, 0x02, 0x53, 0xFB, 0x52, 0x01, 0x60, 0xD4, 0x52, 0x05, 0x53, 0xFC, 0x55, 0xFE, 0x56, 0x51, 0xFD, 0x39, 0x08, 0xB0, 0x33, 0x08, 0x10, 0x50, 0x02, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x20, 0x70, 0xB8 + } + }, + { + 90, + 79, + 0x39, + 0x0085, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x85, 0x18, 0x53, 0xFD, 0x55, 0xFF, 0x01, 0x3C, 0xF8, 0x00, 0xA0, 0x04, 0x55, 0xFF, 0x00, 0x65, 0xFD, 0x3C, 0xFD, 0x10, 0xB0, 0x13, 0x18, 0x70, 0xCF, 0x71, 0x10, 0x60, 0xE0, 0x70, 0xCF, 0x65, 0xFD, 0x51, 0xFF, 0x3C, 0xFD, 0x20, 0xA0, 0x04, 0x30, 0x8F, 0xFE, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x7E, 0x30, 0x30, 0x30, 0x51, 0xF8, 0x70, 0x3F, 0x71, 0x80, 0x60, 0xD3, 0x52, 0x35, 0x43 + } + }, + { + 91, + 79, + 0x39, + 0x0086, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x86, 0x02, 0x53, 0xFB, 0x52, 0x01, 0x60, 0xD5, 0x52, 0x03, 0x74, 0x53, 0xFD, 0x52, 0x04, 0x53, 0xFE, 0x50, 0x00, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x6C, 0x00, 0x6A, 0x08, 0x52, 0x00, 0x5C, 0x18, 0x08, 0x28, 0x3F, 0xFB, 0x18, 0x75, 0xB0, 0x02, 0x74, 0x7A, 0xFE, 0xB0, 0x05, 0x7A, 0xFD, 0xA0, 0x0F, 0x3C, 0xFB, 0x00, 0x37, 0x48 + } + }, + { + 92, + 79, + 0x39, + 0x0087, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x87, 0xBF, 0xEB, 0x08, 0x5D, 0xD5, 0x74, 0x60, 0xD5, 0x18, 0x8F, 0xE2, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x7E, 0x70, 0xBF, 0x62, 0xD0, 0x00, 0x47, 0x36, 0x40, 0xB0, 0x0F, 0x47, 0x36, 0x80, 0xA0, 0x0A, 0x26, 0x36, 0x3F, 0x51, 0x36, 0x3A, 0x0E, 0xA0, 0x01, 0x70, 0xBF, 0x51, 0x0E, 0xA1, 0x1A, 0x55, 0xBE, 0x00, 0x3C, 0x0E, 0x02, 0xC0, 0x04, 0x55, 0xBE, 0x01, 0x5F, 0x36, 0x0E, 0x62, 0xD4, 0xE5, 0xA5 + } + }, + { + 93, + 79, + 0x39, + 0x0088, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x88, 0x02, 0x62, 0xD5, 0x01, 0x55, 0x0E, 0x00, 0x55, 0x35, 0x99, 0x55, 0x34, 0xE0, 0x3E, 0x35, 0x53, 0x45, 0x3E, 0x35, 0x53, 0x44, 0x3E, 0x35, 0x53, 0x32, 0x3C, 0x45, 0x02, 0xC0, 0x94, 0x51, 0x4D, 0x11, 0x03, 0x3A, 0x45, 0xC0, 0x8C, 0x3C, 0x44, 0x02, 0xC0, 0x87, 0x51, 0x4C, 0x11, 0x03, 0x3A, 0x44, 0xC0, 0x7F, 0x62, 0xD3, 0x02, 0x58, 0x32, 0x52, 0xFE, 0x53, 0x23, 0x52, 0xFF, 0x53, 0x24, 0x10, 0xFC + } + }, + { + 94, + 79, + 0x39, + 0x0089, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x89, 0x52, 0x00, 0x53, 0x25, 0x52, 0x01, 0x53, 0x26, 0x52, 0x02, 0x53, 0x27, 0x5B, 0x12, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x1E, 0x52, 0xFF, 0x53, 0x1F, 0x52, 0x00, 0x53, 0x20, 0x52, 0x01, 0x53, 0x21, 0x52, 0x02, 0x53, 0x22, 0x5B, 0x12, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x19, 0x52, 0xFF, 0x53, 0x1A, 0x52, 0x00, 0x53, 0x1B, 0x52, 0x01, 0x53, 0x1C, 0x52, 0x02, 0x53, 0x1D, 0x51, 0x32, 0x02, 0x4C, 0xF8, 0xCD + } + }, + { + 95, + 79, + 0x39, + 0x008A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8A, 0x5C, 0x52, 0xFE, 0x53, 0x28, 0x52, 0xFF, 0x53, 0x29, 0x52, 0x00, 0x53, 0x2A, 0x52, 0x01, 0x53, 0x2B, 0x52, 0x02, 0x53, 0x2C, 0x5B, 0x02, 0x4C, 0x5C, 0x52, 0xFE, 0x53, 0x2D, 0x52, 0xFF, 0x53, 0x2E, 0x52, 0x00, 0x53, 0x2F, 0x52, 0x01, 0x53, 0x30, 0x52, 0x02, 0x53, 0x31, 0x90, 0x62, 0x80, 0x44, 0x7C, 0x25, 0x70, 0x90, 0x5B, 0x55, 0xBD, 0x00, 0x51, 0x45, 0xA0, 0x18, 0x51, 0x4D, 0x78, 0xB8, 0x4E + } + }, + { + 96, + 79, + 0x39, + 0x008B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8B, 0x3A, 0x45, 0xA0, 0x11, 0x51, 0x44, 0xA0, 0x1A, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xA0, 0x13, 0x7C, 0x26, 0x0E, 0x80, 0x21, 0x51, 0x44, 0xA0, 0x17, 0x51, 0x4C, 0x78, 0x3A, 0x44, 0xA0, 0x10, 0x80, 0x11, 0x51, 0x45, 0xA0, 0x0A, 0x51, 0x4D, 0x78, 0x3A, 0x45, 0xA0, 0x03, 0x80, 0x04, 0x55, 0xBD, 0x01, 0x7C, 0x26, 0x94, 0x51, 0x0E, 0x3A, 0x43, 0xC0, 0x05, 0x50, 0xFF, 0x80, 0x0C, 0x7C, 0x23, 0x96, 0x0B + } + }, + { + 97, + 79, + 0x39, + 0x008C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8C, 0x48, 0x7A, 0x36, 0x51, 0x36, 0xBF, 0x07, 0x51, 0x0E, 0x55, 0x36, 0x00, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x55, 0x13, 0x00, 0x51, 0x1F, 0x02, 0x20, 0x0E, 0x13, 0x00, 0x02, 0x21, 0x0E, 0x13, 0x00, 0x02, 0x24, 0x0E, 0x13, 0x00, 0x02, 0x25, 0x0E, 0x13, 0x00, 0x02, 0x26, 0x0E, 0x13, 0x00, 0x02, 0x29, 0x0E, 0x13, 0x00, 0x02, 0x2A, 0x0E, 0x13, 0x00, 0x02, 0x2B, 0x0E, 0x13, 0x00, 0x3C, 0x13, 0xFB, 0xD6 + } + }, + { + 98, + 79, + 0x39, + 0x008D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8D, 0x00, 0xA0, 0x03, 0x50, 0xFF, 0x53, 0xBC, 0x7F, 0x3C, 0xBE, 0x01, 0xB0, 0x23, 0x50, 0x00, 0x53, 0x19, 0x53, 0x1A, 0x53, 0x1B, 0x53, 0x1C, 0x53, 0x1D, 0x53, 0x1E, 0x53, 0x22, 0x53, 0x23, 0x53, 0x27, 0x53, 0x28, 0x53, 0x2C, 0x53, 0x2D, 0x53, 0x2E, 0x53, 0x2F, 0x53, 0x30, 0x53, 0x31, 0x62, 0xD5, 0x01, 0x06, 0x34, 0x03, 0x50, 0x00, 0x53, 0x0F, 0x53, 0x10, 0x53, 0x12, 0x53, 0x13, 0x62, 0xD5, 0x8B + } + }, + { + 99, + 79, + 0x39, + 0x008E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8E, 0xD3, 0x00, 0x10, 0x51, 0x31, 0x57, 0x17, 0x03, 0x19, 0x0E, 0x13, 0x00, 0x79, 0xDF, 0xF9, 0x53, 0x14, 0x20, 0x51, 0xBC, 0x80, 0x08, 0x3C, 0x13, 0x00, 0xA0, 0x03, 0x50, 0xFF, 0x3F, 0x34, 0x51, 0x31, 0x02, 0x2C, 0x0E, 0x10, 0x00, 0x02, 0x27, 0x0E, 0x10, 0x00, 0x02, 0x22, 0x0E, 0x10, 0x00, 0x02, 0x1D, 0x0E, 0x10, 0x00, 0x12, 0x19, 0x1E, 0x10, 0x00, 0x12, 0x1E, 0x1E, 0x10, 0x00, 0x12, 0x8E, 0xFE + } + }, + { + 100, + 79, + 0x39, + 0x008F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x8F, 0x23, 0x1E, 0x10, 0x00, 0x12, 0x28, 0x1E, 0x10, 0x00, 0x12, 0x2D, 0x1E, 0x10, 0x00, 0x64, 0x6B, 0x10, 0x02, 0x30, 0x0E, 0x10, 0x00, 0x02, 0x2B, 0x0E, 0x10, 0x00, 0x02, 0x26, 0x0E, 0x10, 0x00, 0x02, 0x21, 0x0E, 0x10, 0x00, 0x02, 0x1C, 0x0E, 0x10, 0x00, 0x12, 0x1A, 0x1E, 0x10, 0x00, 0x12, 0x1F, 0x1E, 0x10, 0x00, 0x12, 0x24, 0x1E, 0x10, 0x00, 0x12, 0x29, 0x1E, 0x10, 0x00, 0x12, 0x2E, 0x29, 0x35 + } + }, + { + 101, + 79, + 0x39, + 0x0090, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x90, 0x1E, 0x10, 0x00, 0x53, 0x11, 0x7C, 0x11, 0x4D, 0x51, 0x44, 0x06, 0x12, 0x80, 0x0C, 0x11, 0x0E, 0x10, 0x00, 0x47, 0x10, 0x80, 0xA0, 0x0A, 0x55, 0x10, 0x00, 0x55, 0x11, 0x00, 0x55, 0x12, 0x00, 0x7C, 0x12, 0x26, 0x47, 0x42, 0x08, 0xA0, 0x36, 0x62, 0xD3, 0x01, 0x4D, 0x34, 0x51, 0x48, 0x3B, 0x00, 0xC0, 0x1E, 0xB0, 0x09, 0x51, 0x49, 0x3B, 0x01, 0xA0, 0x21, 0xC0, 0x14, 0x51, 0x48, 0x3A, 0x02, 0xE8 + } + }, + { + 102, + 79, + 0x39, + 0x0091, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x91, 0x4F, 0xB0, 0x07, 0x51, 0x49, 0x3A, 0x50, 0xA0, 0x13, 0x7A, 0x49, 0x1E, 0x48, 0x00, 0x80, 0x0C, 0x51, 0x48, 0x2A, 0x49, 0xA0, 0x06, 0x76, 0x49, 0x0E, 0x48, 0x00, 0x4D, 0x34, 0x51, 0x48, 0x3A, 0x4F, 0xC0, 0x0B, 0xB0, 0x13, 0x51, 0x49, 0x3A, 0x50, 0xC0, 0x03, 0xB0, 0x0B, 0x51, 0x48, 0x3F, 0x34, 0x51, 0x49, 0x3F, 0x34, 0x80, 0x09, 0x51, 0x4F, 0x3F, 0x34, 0x51, 0x50, 0x3F, 0x34, 0x50, 0x45, 0x6F + } + }, + { + 103, + 79, + 0x39, + 0x0092, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x92, 0x00, 0x53, 0x10, 0x53, 0x12, 0x51, 0x2D, 0x02, 0x2E, 0x0E, 0x10, 0x00, 0x02, 0x2F, 0x0E, 0x10, 0x00, 0x02, 0x30, 0x0E, 0x10, 0x00, 0x02, 0x31, 0x0E, 0x10, 0x00, 0x12, 0x19, 0x1E, 0x10, 0x00, 0x12, 0x1A, 0x1E, 0x10, 0x00, 0x12, 0x1B, 0x1E, 0x10, 0x00, 0x12, 0x1C, 0x1E, 0x10, 0x00, 0x12, 0x1D, 0x1E, 0x10, 0x00, 0x64, 0x6B, 0x10, 0x02, 0x28, 0x0E, 0x10, 0x00, 0x02, 0x29, 0x0E, 0x10, 0xBB, 0x5C + } + }, + { + 104, + 79, + 0x39, + 0x0093, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x93, 0x00, 0x02, 0x2A, 0x0E, 0x10, 0x00, 0x02, 0x2B, 0x0E, 0x10, 0x00, 0x02, 0x2C, 0x0E, 0x10, 0x00, 0x12, 0x1E, 0x1E, 0x10, 0x00, 0x12, 0x1F, 0x1E, 0x10, 0x00, 0x12, 0x20, 0x1E, 0x10, 0x00, 0x12, 0x21, 0x1E, 0x10, 0x00, 0x12, 0x22, 0x1E, 0x10, 0x00, 0x53, 0x11, 0x7C, 0x11, 0x4D, 0x51, 0x45, 0x06, 0x12, 0x80, 0x0C, 0x11, 0x0E, 0x10, 0x00, 0x47, 0x10, 0x80, 0xA0, 0x0A, 0x55, 0x10, 0x00, 0x4E, 0x83 + } + }, + { + 105, + 79, + 0x39, + 0x0094, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x94, 0x55, 0x11, 0x00, 0x55, 0x12, 0x00, 0x7C, 0x11, 0xB3, 0x47, 0x42, 0x08, 0xA0, 0x36, 0x62, 0xD3, 0x01, 0x4D, 0x34, 0x51, 0x48, 0x3B, 0x00, 0xC0, 0x1E, 0xB0, 0x09, 0x51, 0x49, 0x3B, 0x01, 0xA0, 0x21, 0xC0, 0x14, 0x51, 0x48, 0x3A, 0x51, 0xB0, 0x07, 0x51, 0x49, 0x3A, 0x52, 0xA0, 0x13, 0x7A, 0x49, 0x1E, 0x48, 0x00, 0x80, 0x0C, 0x51, 0x48, 0x2A, 0x49, 0xA0, 0x06, 0x76, 0x49, 0x0E, 0x48, 0x31, 0x4A + } + }, + { + 106, + 79, + 0x39, + 0x0095, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x95, 0x00, 0x4D, 0x34, 0x51, 0x48, 0x3A, 0x51, 0xC0, 0x0B, 0xB0, 0x13, 0x51, 0x49, 0x3A, 0x52, 0xC0, 0x03, 0xB0, 0x0B, 0x51, 0x48, 0x3F, 0x34, 0x51, 0x49, 0x3F, 0x34, 0x80, 0x09, 0x51, 0x51, 0x3F, 0x34, 0x51, 0x52, 0x3F, 0x34, 0x62, 0xD3, 0x02, 0x76, 0x0E, 0x51, 0x0E, 0x55, 0xBC, 0x00, 0x7F, 0x55, 0x12, 0x00, 0x5F, 0x11, 0x45, 0x06, 0x11, 0xFE, 0x5F, 0x10, 0x44, 0x06, 0x10, 0xFE, 0x51, 0x97, 0x17 + } + }, + { + 107, + 79, + 0x39, + 0x0096, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x96, 0x32, 0x08, 0x51, 0x4C, 0x14, 0x32, 0x14, 0x32, 0x16, 0x32, 0x02, 0x55, 0x0F, 0x06, 0x7A, 0x0F, 0x51, 0x0F, 0xA0, 0x74, 0x47, 0x11, 0x80, 0xB0, 0x44, 0x51, 0x4D, 0x78, 0x3A, 0x11, 0xC0, 0x3D, 0x55, 0x13, 0x06, 0x7A, 0x13, 0x51, 0x13, 0xA0, 0x4F, 0x47, 0x10, 0x80, 0xB0, 0x1E, 0x51, 0x4C, 0x78, 0x3A, 0x10, 0xC0, 0x17, 0x58, 0x32, 0x62, 0xD3, 0x02, 0x52, 0x00, 0x58, 0x12, 0x62, 0xD3, 0x19, 0x1C + } + }, + { + 108, + 79, + 0x39, + 0x0097, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x97, 0x00, 0x54, 0x19, 0x76, 0x12, 0x76, 0x10, 0x76, 0x32, 0x8F, 0xD9, 0x58, 0x12, 0x62, 0xD3, 0x00, 0x56, 0x19, 0x00, 0x75, 0x5A, 0x12, 0x76, 0x10, 0x76, 0x32, 0x8F, 0xC8, 0x58, 0x12, 0x62, 0xD3, 0x00, 0x50, 0x00, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x54, 0x19, 0x75, 0x5A, 0x12, 0x06, 0x32, 0x05, 0x76, 0x11, 0x5F, 0x10, 0x44, 0x06, 0x10, 0xFE, 0x51, 0xA0, 0x2B + } + }, + { + 109, + 79, + 0x39, + 0x0098, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x98, 0x4C, 0x11, 0x05, 0x04, 0x32, 0x8F, 0x88, 0x62, 0xD3, 0x02, 0x18, 0x53, 0x32, 0x7F, 0x62, 0xD3, 0x00, 0x3C, 0x45, 0x01, 0xB0, 0x1B, 0x57, 0x05, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x14, 0x75, 0x5B, 0x39, 0x0A, 0xBF, 0xEB, 0x80, 0x21, 0x51, 0x4D, 0x11, 0x02, 0x3A, 0x45, 0xB0, 0x19, 0x57, 0x0F, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x4A, 0x80 + } + }, + { + 110, + 79, + 0x39, + 0x0099, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x99, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x1E, 0x75, 0x5B, 0x39, 0x14, 0xBF, 0xEB, 0x3C, 0x44, 0x01, 0xB0, 0x1D, 0x57, 0x01, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x18, 0x5B, 0x01, 0x05, 0x5C, 0x39, 0x15, 0xBF, 0xE9, 0x80, 0x23, 0x51, 0x4C, 0x11, 0x02, 0x3A, 0x44, 0xB0, 0x1B, 0x57, 0x03, 0x52, 0x19, 0x6D, 0x6D, 0x6D, 0xB7, 0x5B + } + }, + { + 111, + 79, + 0x39, + 0x009A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9A, 0x21, 0x1F, 0x53, 0x0F, 0x6D, 0x21, 0x7F, 0x02, 0x0F, 0x54, 0x1A, 0x5B, 0x01, 0x05, 0x5C, 0x39, 0x17, 0xBF, 0xE9, 0x7F, 0x62, 0xD3, 0x00, 0x51, 0x45, 0xB0, 0x94, 0x55, 0x19, 0x04, 0x55, 0x1A, 0x10, 0x55, 0x1B, 0x10, 0x55, 0x1C, 0x10, 0x55, 0x1D, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x24, 0x5F, 0x12, 0x26, 0x5F, 0x14, 0x25, 0x92, 0x47, 0x53, 0x1B, 0x51, 0x2A, 0x43, 0x74 + } + }, + { + 112, + 79, + 0x39, + 0x009B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9B, 0xA0, 0x0F, 0x51, 0x29, 0x5F, 0x12, 0x2B, 0x5F, 0x14, 0x2A, 0x92, 0x37, 0x53, 0x1A, 0x53, 0x1C, 0x57, 0x04, 0x52, 0x23, 0x53, 0x4A, 0x52, 0x19, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x28, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0xD0, 0x8F + } + }, + { + 113, + 79, + 0x39, + 0x009C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9C, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1E, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x19, 0x6D, 0x21, 0x7F, 0x05, 0x19, 0x79, 0xDF, 0xA5, 0x51, 0x4D, 0x11, 0x01, 0x3A, 0x45, 0xB0, 0x94, 0x55, 0x2D, 0x04, 0x55, 0x2E, 0x10, 0x55, 0x2F, 0x10, 0x55, 0xF3, 0xD6 + } + }, + { + 114, + 79, + 0x39, + 0x009D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9D, 0x30, 0x10, 0x55, 0x31, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x24, 0x5F, 0x12, 0x26, 0x5F, 0x14, 0x25, 0x91, 0xAC, 0x53, 0x2F, 0x51, 0x20, 0xA0, 0x0F, 0x51, 0x1F, 0x5F, 0x12, 0x21, 0x5F, 0x14, 0x20, 0x91, 0x9C, 0x53, 0x2E, 0x53, 0x30, 0x57, 0x04, 0x52, 0x23, 0x53, 0x4A, 0x52, 0x2D, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0x6F, 0xCF + } + }, + { + 115, + 79, + 0x39, + 0x009E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9E, 0xFA, 0x52, 0x1E, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x28, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0xC2, 0x76 + } + }, + { + 116, + 79, + 0x39, + 0x009F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x9F, 0x2D, 0x6D, 0x21, 0x7F, 0x05, 0x2D, 0x79, 0xDF, 0xA5, 0x3C, 0x44, 0x00, 0xB0, 0x97, 0x55, 0x19, 0x04, 0x55, 0x1E, 0x10, 0x55, 0x23, 0x10, 0x55, 0x28, 0x10, 0x55, 0x2D, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0x25, 0xA0, 0x0D, 0x51, 0x20, 0x5F, 0x12, 0x2A, 0x5F, 0x14, 0x25, 0x91, 0x14, 0x53, 0x23, 0x51, 0x26, 0xA0, 0x0F, 0x51, 0x21, 0x5F, 0x12, 0x2B, 0x5F, 0x14, 0x26, 0x91, 0x04, 0x53, 0x38, 0x63 + } + }, + { + 117, + 79, + 0x39, + 0x00A0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA0, 0x1E, 0x53, 0x28, 0x57, 0x14, 0x52, 0x1B, 0x53, 0x4A, 0x52, 0x19, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x1C, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0xA1, 0x36 + } + }, + { + 118, + 79, + 0x39, + 0x00A1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA1, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1A, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x19, 0x6D, 0x21, 0x7F, 0x05, 0x19, 0x5B, 0x11, 0x05, 0x5C, 0xDF, 0xA2, 0x51, 0x4C, 0x11, 0x01, 0x3A, 0x44, 0xB0, 0x97, 0x55, 0x1D, 0x04, 0x55, 0x22, 0x10, 0x55, 0x27, 0x10, 0x55, 0x2C, 0x10, 0x55, 0x31, 0x04, 0x51, 0xBD, 0xB0, 0x23, 0x51, 0xD2, 0x99 + } + }, + { + 119, + 79, + 0x39, + 0x00A2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA2, 0x25, 0xA0, 0x0D, 0x51, 0x20, 0x5F, 0x12, 0x2A, 0x5F, 0x14, 0x25, 0x90, 0x76, 0x53, 0x27, 0x51, 0x24, 0xA0, 0x0F, 0x51, 0x1F, 0x5F, 0x12, 0x29, 0x5F, 0x14, 0x24, 0x90, 0x66, 0x53, 0x22, 0x53, 0x2C, 0x57, 0x14, 0x52, 0x1B, 0x53, 0x4A, 0x52, 0x1D, 0x53, 0x4B, 0x7C, 0x29, 0x2D, 0x50, 0x04, 0x6E, 0x4A, 0x6E, 0x4B, 0x78, 0xBF, 0xFA, 0x52, 0x1A, 0x14, 0x4B, 0x1E, 0x4A, 0x00, 0x47, 0x4A, 0xB3, 0x5C + } + }, + { + 120, + 79, + 0x39, + 0x00A3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA3, 0x80, 0xA0, 0x07, 0x55, 0x4A, 0x00, 0x55, 0x4B, 0x00, 0x47, 0x4A, 0x7F, 0xA0, 0x04, 0x55, 0x4B, 0xFF, 0x51, 0x4B, 0x3C, 0xAD, 0x02, 0xA0, 0x0D, 0x3C, 0xAD, 0x03, 0xA0, 0x03, 0x80, 0x10, 0x6D, 0x21, 0x7F, 0x80, 0x0B, 0x6D, 0x21, 0x7F, 0x53, 0x4B, 0x6D, 0x21, 0x7F, 0x02, 0x4B, 0x54, 0x1C, 0x6D, 0x6D, 0x6D, 0x21, 0x1F, 0x54, 0x1D, 0x6D, 0x21, 0x7F, 0x05, 0x1D, 0x5B, 0x11, 0x05, 0x5C, 0x0D, 0x11 + } + }, + { + 121, + 79, + 0x39, + 0x00A4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA4, 0xDF, 0xA2, 0x7F, 0x55, 0x11, 0x00, 0x04, 0x12, 0x0E, 0x11, 0x00, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x65, 0x12, 0x6B, 0x11, 0x55, 0x10, 0x00, 0x55, 0x13, 0x00, 0x7C, 0x11, 0x4D, 0x51, 0x12, 0x39, 0x10, 0xD0, 0x03, 0x50, 0x10, 0x7F, 0x12, 0x4B, 0x55, 0x10, 0x08, 0x47, 0x4B, 0x01, 0xA0, 0x03, 0x02, 0x4A, 0x6D, 0x6E, 0x4B, 0x7A, 0x10, 0xBF, 0xF3, 0x1A, 0x2C + } + }, + { + 122, + 79, + 0x39, + 0x00A5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA5, 0x53, 0x4A, 0x7F, 0x62, 0xD0, 0x00, 0x3C, 0x0E, 0x02, 0xC0, 0x0E, 0x55, 0x36, 0x00, 0x90, 0x09, 0x47, 0x36, 0x40, 0xA0, 0x04, 0x7C, 0x14, 0x1F, 0x7F, 0x70, 0xBF, 0x62, 0xD4, 0x02, 0x62, 0xD3, 0x02, 0x50, 0x00, 0x53, 0x32, 0x53, 0x35, 0x53, 0x44, 0x53, 0x45, 0x55, 0x34, 0x99, 0x3E, 0x34, 0x53, 0x19, 0x3E, 0x34, 0x53, 0x1A, 0x3E, 0x34, 0x53, 0x1B, 0x76, 0x45, 0x51, 0x45, 0x3A, 0x0E, 0x9D, 0x33 + } + }, + { + 123, + 79, + 0x39, + 0x00A6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA6, 0xD1, 0x1F, 0x3E, 0x34, 0x53, 0x1C, 0x3E, 0x34, 0x53, 0x1D, 0x3E, 0x34, 0x53, 0x1E, 0x51, 0x19, 0x12, 0x1C, 0xD0, 0x03, 0x73, 0x74, 0x53, 0x1F, 0x51, 0x1A, 0x12, 0x1D, 0xD0, 0x03, 0x73, 0x74, 0x53, 0x20, 0x51, 0x1F, 0xA0, 0x07, 0x39, 0x02, 0xA0, 0x03, 0x80, 0x2D, 0x51, 0x20, 0xA0, 0x05, 0x39, 0x02, 0xB0, 0x25, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x11, 0x51, 0x1B, 0x12, 0x4C, 0xEF, 0xD8 + } + }, + { + 124, + 79, + 0x39, + 0x00A7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA7, 0x53, 0x21, 0x80, 0x0C, 0x51, 0x1B, 0x02, 0x4C, 0x53, 0x21, 0x80, 0x04, 0x5F, 0x21, 0x1B, 0x51, 0x1A, 0x12, 0x1D, 0x67, 0x14, 0x21, 0x80, 0x8D, 0x3C, 0x1F, 0x02, 0xB0, 0x41, 0x3C, 0x20, 0x01, 0xB0, 0x3C, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x11, 0x51, 0x1B, 0x12, 0x4C, 0x53, 0x21, 0x80, 0x0C, 0x51, 0x1B, 0x02, 0x4C, 0x53, 0x21, 0x80, 0x04, 0x5F, 0x21, 0x1B, 0x51, 0x1A, 0x3A, 0x1F, 0x39 + } + }, + { + 125, + 79, + 0x39, + 0x00A8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA8, 0x1D, 0xC0, 0x0E, 0x58, 0x21, 0x52, 0x00, 0x79, 0x3B, 0x00, 0xD0, 0x10, 0x7A, 0x21, 0x80, 0x0C, 0x58, 0x21, 0x52, 0x00, 0x75, 0x3B, 0x00, 0xD0, 0x03, 0x76, 0x21, 0x80, 0x48, 0x3C, 0x1F, 0x01, 0xB0, 0x41, 0x3C, 0x20, 0x02, 0xB0, 0x3C, 0x51, 0x1A, 0x3A, 0x1D, 0xC0, 0x08, 0x51, 0x1B, 0x78, 0x53, 0x21, 0x80, 0x06, 0x51, 0x1B, 0x74, 0x53, 0x21, 0x51, 0x19, 0x3A, 0x1C, 0xC0, 0x0B, 0xA0, 0x9C, 0x34 + } + }, + { + 126, + 79, + 0x39, + 0x00A9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xA9, 0x11, 0x51, 0x21, 0x12, 0x4C, 0x53, 0x1F, 0x80, 0x0C, 0x51, 0x21, 0x02, 0x4C, 0x53, 0x1F, 0x80, 0x04, 0x55, 0x1F, 0x00, 0x58, 0x21, 0x52, 0x00, 0x58, 0x1F, 0x3B, 0x00, 0xD0, 0x03, 0x5A, 0x21, 0x80, 0x03, 0x8F, 0x17, 0x58, 0x1B, 0x52, 0x00, 0x58, 0x21, 0x13, 0x00, 0xCF, 0x0D, 0x3A, 0x18, 0xDF, 0x09, 0x58, 0x1E, 0x52, 0x00, 0x58, 0x21, 0x13, 0x00, 0xCE, 0xFF, 0x3A, 0x18, 0xDE, 0xFB, 0xB0, 0x5D + } + }, + { + 127, + 79, + 0x39, + 0x00AA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAA, 0x58, 0x1B, 0x52, 0x00, 0x58, 0x1E, 0x3B, 0x00, 0xD0, 0x0A, 0x52, 0x00, 0x01, 0x01, 0x55, 0x36, 0x40, 0x80, 0x06, 0x01, 0x01, 0x55, 0x36, 0x40, 0x58, 0x21, 0x54, 0x00, 0x76, 0x32, 0x8E, 0xDB, 0x76, 0x44, 0x5F, 0x45, 0x44, 0x06, 0x35, 0x03, 0x51, 0x35, 0x55, 0x34, 0x99, 0x04, 0x34, 0x51, 0x44, 0x3A, 0x0E, 0xCE, 0xBA, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x70, 0xCF, 0x71, 0x06, 0x0A + } + }, + { + 128, + 79, + 0x39, + 0x00AB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAB, 0x20, 0x51, 0x9A, 0x60, 0xA0, 0x51, 0x9C, 0x60, 0xA2, 0x51, 0x9B, 0x60, 0xA1, 0x51, 0x9E, 0x60, 0xC7, 0x51, 0x9D, 0x60, 0xA4, 0x70, 0xCF, 0x7F, 0x62, 0xD0, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x62, 0xD3, 0x00, 0x55, 0xFA, 0x00, 0x50, 0x06, 0x55, 0xF8, 0x3A, 0x7C, 0x00, 0x60, 0x3C, 0xF8, 0x05, 0xB0, 0x12, 0x70, 0xCF, 0x71, 0x20, 0x62, 0xA6, 0x00, 0x71, 0x30, 0x62, 0x1B, 0x30, 0xAA, 0x53 + } + }, + { + 129, + 79, + 0x39, + 0x00AC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAC, 0x43, 0x1B, 0x40, 0x70, 0xCF, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x03, 0x51, 0xE1, 0x54, 0x01, 0x51, 0xE0, 0x54, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x7F, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x26, 0xAF, 0xFD, 0x7C, 0x72, 0x51, 0x26, 0xAE, 0xFB, 0x51, 0xAE, 0x60, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0xEF, 0xDE + } + }, + { + 130, + 79, + 0x39, + 0x00AD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAD, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x02, 0x7C, 0x6F, 0x20, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x29, 0x04, 0x53, 0xAE, 0x51, 0xAE, 0x60, 0x00, 0x20, 0x7F, 0x7F, 0x7F, 0x08, 0x62, 0xD0, 0x00, 0x55, 0xFA, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x4F, 0x5B, 0x01, 0x03, 0x53, 0xF9, 0x55, 0xF8, 0x3A, 0x50, 0xE3, 0xC7 + } + }, + { + 131, + 79, + 0x39, + 0x00AE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAE, 0x06, 0x00, 0x20, 0x70, 0xBF, 0x62, 0xD3, 0x00, 0x52, 0xF8, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0xFA, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD4, 0x00, 0x4F, 0x5B, 0x01, 0x03, 0x53, 0xF9, 0x55, 0xF8, 0x3A, 0x50, 0x06, 0x00, 0x7F, 0x11, 0x04, 0x4B, 0xD0, 0x04, 0x78, 0xC0, 0x09, 0x3A, 0x80, 0x40, 0x79, 0x19, 0x00, 0xDF, 0xF9, 0x7F, 0x71, 0x40, 0xA0, 0x05, 0x70, 0xCF, 0x71, 0xD5, 0xAC + } + }, + { + 132, + 79, + 0x39, + 0x00AF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xAF, 0x10, 0x5E, 0x00, 0x70, 0xCF, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x3F, 0xE8, 0x77, 0x00, 0x3D, 0x00, 0x04, 0xCF, 0xEA, 0x62, 0xD0, 0x04, 0x55, 0xB6, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xB5, 0x00, 0x7C, 0x73, 0x74, 0x38, 0xFF, 0x20, 0x7F, 0x7F, 0x10, 0x4F, 0xBC, 0x7B + } + }, + { + 133, + 79, + 0x39, + 0x00B0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB0, 0x38, 0x01, 0x10, 0x7C, 0x11, 0x47, 0x62, 0xD0, 0x00, 0x20, 0x54, 0x00, 0x50, 0x0F, 0x08, 0x10, 0x7C, 0x2B, 0x38, 0x38, 0xFE, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x56, 0x01, 0x00, 0x9F, 0xD7, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0x00, 0x08, 0x7C, 0x47, 0x34, 0x38, 0xFF, 0x52, 0x00, 0x08, 0x90, 0x46, 0x52, 0x00, 0x08, 0x62, 0xD0, 0x04, 0x51, 0x2E, 0x60 + } + }, + { + 134, + 79, + 0x39, + 0x00B1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB1, 0xB5, 0x08, 0x7C, 0x3A, 0x9B, 0x38, 0xFD, 0x62, 0xD0, 0x00, 0x54, 0x01, 0x5A, 0xE8, 0x06, 0xE8, 0x01, 0x50, 0x0F, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x08, 0x91, 0x47, 0x62, 0xD0, 0x00, 0x5A, 0xE8, 0x06, 0xE8, 0x01, 0x50, 0x0F, 0x08, 0x51, 0xE8, 0x08, 0x7C, 0x2B, 0x3C, 0x38, 0xFB, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xB5, 0x52, 0x01, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0xF0, 0xE5 + } + }, + { + 135, + 79, + 0x39, + 0x00B2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB2, 0x4F, 0x38, 0x06, 0x50, 0x04, 0x3B, 0xFC, 0xD0, 0x04, 0x56, 0xFC, 0x04, 0x56, 0x05, 0x00, 0x56, 0x04, 0x00, 0x80, 0x67, 0x56, 0x02, 0xE0, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x23, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x7C, 0x70, 0xCD, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x3B, 0x03, 0xB0, 0x03, 0x80, 0x0F, 0x07, 0x02, 0x08, 0x0F, 0x01, 0x00, 0x77, 0x09, 0x18 + } + }, + { + 136, + 79, + 0x39, + 0x00B3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB3, 0x00, 0x52, 0x00, 0x3B, 0xFC, 0xCF, 0xD9, 0x52, 0x00, 0x3B, 0xFC, 0xA0, 0x2C, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x18, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x3F, 0xE8, 0x7C, 0x6F, 0x18, 0x06, 0xE8, 0xC4, 0x7C, 0x6F, 0x54, 0x52, 0x03, 0x3F, 0xE8, 0x52, 0x02, 0x53, 0xE8, 0x52, 0x01, 0x60, 0xD5, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x05, 0x77, 0x04, 0x62, 0xD0, 0x04, 0x52, 0x04, 0x3A, 0xDB, 0xBD + } + }, + { + 137, + 79, + 0x39, + 0x00B4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB4, 0xB6, 0xCF, 0x92, 0x52, 0x05, 0x62, 0xD0, 0x04, 0x53, 0xB6, 0x3D, 0x05, 0x04, 0xD0, 0x55, 0x56, 0x02, 0xE0, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x44, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x7C, 0x70, 0xCD, 0x3D, 0x03, 0xFF, 0xA0, 0x2F, 0x62, 0xD0, 0x04, 0x51, 0xB6, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0xC0, 0x7C, 0x6F, 0x54, 0x52, 0x00, 0x7C, 0x72, 0xE6, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0xC4, 0x09, 0x1A + } + }, + { + 138, + 79, + 0x39, + 0x00B5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB5, 0x7C, 0x6F, 0x54, 0x52, 0x03, 0x7C, 0x72, 0xE6, 0x01, 0x01, 0x53, 0xB6, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xC0, 0x03, 0x80, 0x0F, 0x07, 0x02, 0x08, 0x0F, 0x01, 0x00, 0x77, 0x00, 0x52, 0x00, 0x3B, 0xFC, 0xCF, 0xB8, 0x56, 0x04, 0x00, 0x80, 0x32, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x72, 0x31, 0x06, 0xE6, 0xC0, 0x0E, 0x76, 0xF5 + } + }, + { + 139, + 79, + 0x39, + 0x00B6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB6, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x7C, 0x6E, 0xB6, 0x7C, 0x70, 0x1E, 0x06, 0xE6, 0xE0, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x77, 0x04, 0x52, 0x04, 0x3B, 0x05, 0xCF, 0xCA, 0x38, 0xFA, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x3D, 0xFC, 0x00, 0xB0, 0x06, 0x7C, 0x73, 0x74, 0x80, 0x28, 0x90, 0x29, 0x54, 0x00, 0x3D, 0x00, 0x00, 0xA0, 0x1F, 0x62, 0xD0, 0x04, 0x3C, 0xB4, 0x00, 0xF5, 0xF4 + } + }, + { + 140, + 79, + 0x39, + 0x00B7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB7, 0xB0, 0x10, 0x62, 0xD0, 0x00, 0x52, 0xFB, 0x53, 0xE8, 0x52, 0xFA, 0x60, 0xD5, 0x50, 0x01, 0x3F, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xB4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x6F, 0xC9, 0x80, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x62, 0xD0, 0x00, 0x51, 0x16, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x04, 0x13 + } + }, + { + 141, + 79, + 0x39, + 0x00B8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB8, 0xD0, 0x03, 0x77, 0x01, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xD0, 0x07, 0x50, 0x28, 0x3B, 0x01, 0xDF, 0xD5, 0x50, 0x28, 0x3B, 0x01, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x50, 0x10, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x0A, 0x3D, 0xFC, 0x00, 0xA0, 0x4E, 0x91, 0x78, 0x80, 0x4A, 0x3D, 0x00, 0x10, 0xB0, 0x03, 0x80, 0x43, 0xFB, 0x02 + } + }, + { + 142, + 79, + 0x39, + 0x00B9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xB9, 0x3D, 0x00, 0x20, 0xB0, 0x2B, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x14, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0x10, 0x80, 0x0B, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x40, 0xA0, 0x03, 0xDB, 0xC3 + } + }, + { + 143, + 79, + 0x39, + 0x00BA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBA, 0x90, 0xE4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x08, 0x50, 0x23, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x04, 0x50, 0x04, 0x3A, 0xB5, 0xC0, 0xB7, 0x56, 0x03, 0x00, 0x80, 0xA9, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x52, 0x03, 0x64, 0x64, 0x64, 0x01, 0x8B, 0x24 + } + }, + { + 144, + 79, + 0x39, + 0x00BB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBB, 0x03, 0x54, 0x00, 0x7C, 0x6F, 0xF6, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x01, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x01, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x03, 0x7C, 0x6E, 0xA3, 0x08, 0x52, 0x00, 0x01, 0x03, 0x2A, 0x63 + } + }, + { + 145, + 79, + 0x39, + 0x00BC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBC, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x04, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0x0A, 0x24 + } + }, + { + 146, + 79, + 0x39, + 0x00BD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBD, 0xE8, 0x08, 0x52, 0x00, 0x01, 0x06, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x77, 0x03, 0x62, 0xD0, 0x04, 0x52, 0x03, 0x3A, 0xB5, 0xCF, 0x50, 0x50, 0x00, 0x08, 0x50, 0x25, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x38, 0xFC, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0xCB, 0xA7 + } + }, + { + 147, + 79, + 0x39, + 0x00BE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBE, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x50, 0x03, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x50, 0x05, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x7F, 0x10, 0x4F, 0x38, 0x07, 0x62, 0xD0, 0x04, 0x51, 0xB5, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x62, 0xD0, 0xB2 + } + }, + { + 148, + 79, + 0x39, + 0x00BF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xBF, 0xD0, 0x04, 0x51, 0xB4, 0x21, 0xF0, 0x62, 0xD0, 0x00, 0x2A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0x9A, 0x51, 0x9A, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x56, 0x00, 0x00, 0x80, 0xCA, 0x56, 0x04, 0x00, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xB5, 0xD0, 0x12, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x62, 0xD0, 0x00, 0x3F, 0x91 + } + }, + { + 149, + 79, + 0x39, + 0x00C0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC0, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x81, 0x0E, 0xE9, 0x0D, 0x7C, 0x6F, 0x5C, 0x54, 0x03, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x03, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFD, 0x62, 0xD0, 0xD7, 0xC2 + } + }, + { + 150, + 79, + 0x39, + 0x00C1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC1, 0x00, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x03, 0x01, 0x02, 0x08, 0x7C, 0x32, 0x52, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x03, 0x7C, 0x6E, 0xA3, 0x08, 0x52, 0x03, 0x01, 0x04, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x7C, 0x6F, 0x4C, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE8, 0x54, 0x05, 0xAB, 0x6B + } + }, + { + 151, + 79, + 0x39, + 0x00C2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC2, 0x48, 0x00, 0x01, 0xA0, 0x18, 0x52, 0x05, 0x21, 0x0F, 0x53, 0xE9, 0x52, 0x06, 0x2A, 0xE9, 0x08, 0x52, 0x03, 0x11, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x0C, 0x52, 0x05, 0x62, 0xD0, 0x00, 0x64, 0x64, 0x64, 0x64, 0x54, 0x06, 0x77, 0x00, 0x3D, 0x00, 0x04, 0xCF, 0x33, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x4D, 0xB0 + } + }, + { + 152, + 79, + 0x39, + 0x00C3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC3, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x10, 0x7C, 0x20, 0x0B, 0x7C, 0x20, 0x50, 0x20, 0x10, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x50, 0xA0, 0x08, 0x08, 0x7C, 0x20, 0x57, 0x38, 0xFC, 0x20, 0x62, 0xC8, 0x0B, 0x62, 0xCA, 0x24, 0x43, 0xD6, 0x01, 0x62, 0xCD, 0x00, 0x56, 0x00, 0x20, 0x80, 0x06, 0x62, 0xCF, 0x00, 0x7B, 0x00, 0x3D, 0x00, 0x00, 0xBF, 0xF7, 0x41, 0xD6, 0xFE, 0x38, 0xFF, 0x54, 0xBF + } + }, + { + 153, + 79, + 0x39, + 0x00C4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC4, 0x20, 0x7F, 0x10, 0x4F, 0x3D, 0xFC, 0x21, 0xD0, 0x0C, 0x41, 0xD6, 0xEF, 0x41, 0xE0, 0x7F, 0x62, 0xC8, 0x0B, 0x80, 0x0A, 0x62, 0xC8, 0x00, 0x43, 0xD6, 0x10, 0x43, 0xE0, 0x80, 0x20, 0x7F, 0x43, 0xD6, 0x01, 0x40, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0x04, 0xA0, 0x70, 0xCF, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x5D, 0xF7, 0x54, 0x00, 0x70, 0xFE, 0x7C, 0xDE, 0xD4 + } + }, + { + 154, + 79, + 0x39, + 0x00C5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC5, 0x70, 0xF8, 0xB0, 0x13, 0x7C, 0x73, 0x90, 0xBF, 0xFC, 0x71, 0x01, 0x40, 0x70, 0xFE, 0x62, 0xE3, 0x38, 0x41, 0xD6, 0xFE, 0x80, 0x06, 0x10, 0x7C, 0x33, 0x60, 0x20, 0x71, 0x10, 0x41, 0x04, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x48, 0x00, 0x01, 0xA0, 0x03, 0x71, 0x01, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x7C, 0x33, 0x60, 0x20, 0x71, 0x10, 0x41, 0x04, 0x7F, 0x17 + } + }, + { + 155, + 79, + 0x39, + 0x00C6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC6, 0x5F, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0xA0, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0xEC, 0x02, 0x70, 0xCF, 0x62, 0xDA, 0x7F, 0x43, 0xE0, 0x80, 0x9F, 0x83, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x9F, 0x92, 0x71, 0x10, 0x43, 0xEC, 0x02, 0x70, 0xCF, 0x62, 0xDA, 0x7F, 0x43, 0xE0, 0x80, 0x9F, 0x6D, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x73, 0x89, 0x38, 0xFF, 0x20, 0xF5, 0x04 + } + }, + { + 156, + 79, + 0x39, + 0x00C7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC7, 0x7F, 0x7C, 0x73, 0x89, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x5D, 0xC8, 0x39, 0x00, 0xB0, 0x18, 0x7C, 0x73, 0x90, 0xA0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x80, 0x1D, 0x5D, 0xC9, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x47, 0xE9, 0x01, 0xA0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0xEB, 0xF1 + } + }, + { + 157, + 79, + 0x39, + 0x00C8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC8, 0x52, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x70, 0xF8, 0xA0, 0x25, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x00, 0x43, 0xD6, 0x01, 0x52, 0xFC, 0x60, 0xCD, 0x52, 0xFB, 0x60, 0xCF, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x00, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x16, 0x3D, 0xFC, 0xA0, 0xD0, 0x11, 0x7C, 0x6F, 0x06, 0x28 + } + }, + { + 158, + 79, + 0x39, + 0x00C9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xC9, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x7C, 0x71, 0x08, 0x52, 0xFB, 0x3F, 0xE8, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x52, 0xFB, 0x54, 0x01, 0x52, 0xFA, 0x54, 0x00, 0x7C, 0x70, 0xF8, 0xA0, 0x1C, 0x7C, 0x71, 0x65, 0x60, 0xCD, 0x52, 0x00, 0x60, 0xCF, 0x52, 0x01, 0x60, 0xCF, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x02, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x26, 0x3D, 0xFC, 0x41, 0x9F + } + }, + { + 159, + 79, + 0x39, + 0x00CA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCA, 0x9F, 0xD0, 0x21, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6F, 0xD9, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x01, 0x7C, 0x71, 0x08, 0x52, 0x01, 0x3F, 0xE8, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x70, 0xF8, 0xA0, 0x29, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x01, 0x43, 0xD6, 0x01, 0x10, 0x52, 0xEA, 0xF2 + } + }, + { + 160, + 79, + 0x39, + 0x00CB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCB, 0xFC, 0x7C, 0x33, 0x49, 0x62, 0xD0, 0x00, 0x20, 0x54, 0x00, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x01, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x17, 0x3D, 0xFC, 0xA0, 0xD0, 0x12, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x00, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x70, 0xF8, 0xA0, 0x1B, 0x45, 0xA9 + } + }, + { + 161, + 79, + 0x39, + 0x00CC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCC, 0x7C, 0x71, 0x65, 0x08, 0x7C, 0x33, 0x53, 0x38, 0xFF, 0x7C, 0x72, 0x25, 0x5D, 0xD6, 0x53, 0xE9, 0x52, 0x02, 0x24, 0xE9, 0x51, 0xE9, 0x60, 0xD6, 0x80, 0x29, 0x3D, 0xFC, 0x9F, 0xD0, 0x24, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x00, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0x01, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0xFD, 0x1A + } + }, + { + 162, + 79, + 0x39, + 0x00CD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCD, 0x54, 0x01, 0x7C, 0x71, 0x2E, 0x38, 0xFD, 0x20, 0x7F, 0x60, 0xCD, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x5D, 0xCF, 0x7E, 0x60, 0xCD, 0x5D, 0xF7, 0x08, 0x70, 0xFE, 0x5D, 0xCF, 0x5C, 0x5D, 0xCF, 0x7E, 0x49, 0xC9, 0x01, 0xBF, 0xFC, 0x41, 0xD6, 0xFE, 0x7F, 0x41, 0x05, 0xF7, 0x7C, 0x73, 0x82, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x08, 0x7C, 0x6F, 0x20, 0x71, 0x10, 0x43, 0x05, 0x08, 0x43, 0x04, 0xA4, 0x69 + } + }, + { + 163, + 79, + 0x39, + 0x00CE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCE, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x55, 0xB9, 0x00, 0x62, 0xD0, 0x03, 0x55, 0x99, 0x04, 0x55, 0x9A, 0x00, 0x55, 0x9B, 0xF8, 0x55, 0x9C, 0x00, 0x55, 0x9E, 0x64, 0x55, 0x9D, 0x32, 0x55, 0x9F, 0x00, 0x55, 0xA0, 0x00, 0x7C, 0x30, 0xB2, 0x90, 0x10, 0x7C, 0x6F, 0x64, 0x10, 0x57, 0x01, 0x50, 0xF4, 0x7C, 0x2B, 0xA8, 0x20, 0x7C, 0x6E, 0xE8, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x31, 0x35, 0x13, 0x48 + } + }, + { + 164, + 79, + 0x39, + 0x00CF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xCF, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x56, 0x01, 0x20, 0x80, 0x04, 0x56, 0x01, 0xA0, 0x52, 0x01, 0x08, 0x7C, 0x31, 0x02, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0xFC, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x20, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x3D, 0x00, 0x00, 0xB0, 0x28, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x7C, 0x1B + } + }, + { + 165, + 79, + 0x39, + 0x00D0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD0, 0x32, 0x0C, 0x38, 0xFE, 0x50, 0x00, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x08, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x82, 0x52, 0x3D, 0x00, 0x10, 0xB1, 0x87, 0x50, 0x00, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x7C, 0x40, 0x1F, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0xC0, 0x08, 0x50, 0x03, 0x08, 0x01, 0x26 + } + }, + { + 166, + 79, + 0x39, + 0x00D1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD1, 0x7C, 0x32, 0x0C, 0x50, 0xC1, 0x08, 0x50, 0x04, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0xC2, 0x08, 0x50, 0x05, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x06, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x10, 0x50, 0x00, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x01, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0xD6, 0xD1 + } + }, + { + 167, + 79, + 0x39, + 0x00D2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD2, 0x50, 0x08, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x02, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x09, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x03, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0A, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x04, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xAF, 0x84 + } + }, + { + 168, + 79, + 0x39, + 0x00D3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD3, 0xFE, 0x10, 0x50, 0x05, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0C, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x06, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0D, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x10, 0x50, 0x07, 0x7C, 0x2B, 0x69, 0x62, 0xD0, 0x00, 0x20, 0x08, 0x50, 0x0E, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x50, 0x07, 0x10, 0x06, 0x33 + } + }, + { + 169, + 79, + 0x39, + 0x00D4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD4, 0x08, 0x57, 0xA0, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x50, 0x0F, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFD, 0x50, 0x10, 0x08, 0x50, 0x12, 0x08, 0x50, 0x11, 0x08, 0x7C, 0x32, 0x52, 0x50, 0xA0, 0x08, 0x50, 0x02, 0x08, 0x50, 0x13, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFA, 0x50, 0x04, 0x08, 0x50, 0x00, 0x08, 0x50, 0x15, 0x08, 0x7C, 0x62, 0xEC + } + }, + { + 170, + 79, + 0x39, + 0x00D5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD5, 0x32, 0x52, 0x50, 0x00, 0x08, 0x50, 0x17, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFB, 0x50, 0x00, 0x08, 0x50, 0x18, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x19, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x00, 0x08, 0x50, 0x1A, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x1B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x00, 0x08, 0x50, 0x1C, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x66, 0xF5 + } + }, + { + 171, + 79, + 0x39, + 0x00D6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD6, 0x03, 0x51, 0x9C, 0x08, 0x50, 0x1D, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x62, 0xD0, 0x03, 0x51, 0x9E, 0x08, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x08, 0x50, 0x1F, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x80, 0xC7, 0x3D, 0x00, 0x20, 0xB0, 0x03, 0x80, 0xC0, 0x3D, 0x00, 0x30, 0xB0, 0xBB, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x04, 0x08, 0x7D, 0x24 + } + }, + { + 172, + 79, + 0x39, + 0x00D7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD7, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x01, 0x08, 0x50, 0x02, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x08, 0x50, 0x29, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x04, 0x08, 0x50, 0x2A, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x08, 0x08, 0x50, 0x2B, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x08, 0x08, 0x50, 0x2C, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0xFF, 0x29 + } + }, + { + 173, + 79, + 0x39, + 0x00D8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD8, 0x48, 0x08, 0x50, 0x2D, 0x08, 0x7C, 0x32, 0x52, 0x38, 0xFB, 0x50, 0x1C, 0x08, 0x50, 0x2F, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x30, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x08, 0x08, 0x50, 0x31, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x08, 0x08, 0x50, 0x32, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x5A, 0x08, 0x50, 0x33, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x00, 0x08, 0x50, 0x34, 0xD9, 0xDE + } + }, + { + 174, + 79, + 0x39, + 0x00D9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xD9, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x04, 0x08, 0x50, 0x35, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x0C, 0x08, 0x50, 0x36, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x50, 0x05, 0x08, 0x50, 0x37, 0x08, 0x7C, 0x32, 0x0C, 0x50, 0x01, 0x08, 0x50, 0x38, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x7C, 0x2B, 0x1A, 0x7C, 0x31, 0x1F, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x7C, 0x32, 0x06, 0x7C, 0x20, 0x6D + } + }, + { + 175, + 79, + 0x39, + 0x00DA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDA, 0x6F, 0x3C, 0x54, 0x00, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0xE8, 0x80, 0x03, 0x90, 0x4B, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0x01, 0x62, 0xD0, 0x00, 0x39, 0x01, 0xB0, 0x19, 0x7C, 0x31, 0x35, 0x62, 0xD4, 0x00, 0x62, 0xD5, 0x00, 0x62, 0xD1, 0x00, 0x62, 0xD3, 0x00, 0x62, 0xD0, 0x00, 0x62, 0xE3, 0x38, 0x50, 0x00, 0x00, 0x7C, 0x6F, 0x3C, 0x54, 0x01, 0x52, 0x01, 0x3B, 0x00, 0xA0, 0x17, 0xE4, 0xF6 + } + }, + { + 176, + 79, + 0x39, + 0x00DB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDB, 0x52, 0x01, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x1B, 0x9C, 0xEE, 0x52, 0x01, 0x08, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x1F, 0x38, 0xFC, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x30, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9B, 0x47, 0x99, 0x02, 0xA0, 0x70, 0xFB, 0x25 + } + }, + { + 177, + 79, + 0x39, + 0x00DC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDC, 0x51, 0x99, 0x21, 0xFD, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x5E, 0x3D, 0x00, 0x10, 0xB0, 0x33, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x50, 0x1D, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9C, 0x50, 0x1F, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x9D, 0x50, 0x1E, 0x08, 0x7C, 0x32, 0xA7, 0x46, 0xBC + } + }, + { + 178, + 79, + 0x39, + 0x00DD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDD, 0x38, 0xFE, 0x62, 0xD0, 0x03, 0x53, 0x9E, 0x80, 0x27, 0x3D, 0x00, 0x20, 0xB0, 0x10, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x80, 0x13, 0x48, 0x00, 0x40, 0xA0, 0x0E, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xAA, 0x85 + } + }, + { + 179, + 79, + 0x39, + 0x00DE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDE, 0xD0, 0x00, 0x54, 0x00, 0x3D, 0x00, 0x01, 0xA0, 0x1F, 0x52, 0x00, 0x21, 0x70, 0x39, 0x30, 0xB0, 0x0E, 0x50, 0x01, 0x08, 0x50, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0xE7, 0x52, 0x00, 0x62, 0xD0, 0x03, 0x53, 0x99, 0x80, 0xDE, 0x50, 0x29, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0x9F, 0x50, 0x02, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x39, 0x81, 0xB0, 0x82, 0x36 + } + }, + { + 180, + 79, + 0x39, + 0x00DF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xDF, 0xC4, 0x50, 0x2A, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0xA5, 0x50, 0x2B, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0x15, 0x50, 0x2C, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0x16, 0x50, 0x2D, 0x08, 0x7C, 0x32, 0xF7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x00, 0x53, 0xA3, 0x18, 0x53, 0xA4, 0x50, 0x2F, 0x08, 0x7C, 0x28, 0x83 + } + }, + { + 181, + 79, + 0x39, + 0x00E0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE0, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x53, 0x42, 0x50, 0x30, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x04, 0x53, 0xB7, 0x50, 0x31, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA6, 0x50, 0x32, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0xD0, 0x00, 0x53, 0x17, 0x50, 0x36, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA7, 0x50, 0x37, 0x08, 0x7C, 0x32, 0xA7, 0x62, 0x39, 0xA6 + } + }, + { + 182, + 79, + 0x39, + 0x00E1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE1, 0xD0, 0x00, 0x53, 0xA8, 0x50, 0x38, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x53, 0xA9, 0x10, 0x7C, 0x18, 0x83, 0x7C, 0x17, 0xB7, 0x20, 0x80, 0x04, 0x62, 0xE3, 0x38, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xBF, 0xF4, 0x7C, 0x71, 0x10, 0x7C, 0x49, 0x07, 0x62, 0xE3, 0x38, 0x52, 0x01, 0x71, 0x10, 0x60, 0xE0, 0x50, 0x01, 0x08, 0x50, 0x02, 0x08, 0x70, 0xCF, 0x7C, 0xFE, 0x31 + } + }, + { + 183, + 79, + 0x39, + 0x00E2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE2, 0x32, 0x0C, 0x38, 0xFE, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x3D, 0x00, 0x20, 0xA0, 0x06, 0x48, 0x00, 0x40, 0xA0, 0x26, 0x62, 0xD0, 0x04, 0x06, 0xB9, 0x40, 0x51, 0xB9, 0x29, 0x20, 0x62, 0xD0, 0x00, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x80, 0x04, 0x62, 0xE3, 0x38, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0xA4, 0x7E + } + }, + { + 184, + 79, + 0x39, + 0x00E3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE3, 0x00, 0x39, 0x00, 0xBF, 0xF4, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x3D, 0x00, 0x20, 0xA0, 0x06, 0x48, 0x00, 0x40, 0xA0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x21, 0x80, 0x62, 0xD0, 0x04, 0x53, 0xB8, 0x38, 0xFF, 0xE6, 0x03 + } + }, + { + 185, + 79, + 0x39, + 0x00E4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE4, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0x2F, 0xB0, 0x0A, 0x3D, 0xFC, 0x01, 0xB0, 0x19, 0x90, 0xC1, 0x80, 0x15, 0x3D, 0x00, 0x10, 0xB0, 0x05, 0x90, 0x63, 0x80, 0x0C, 0x3D, 0x00, 0x30, 0xB0, 0x05, 0x90, 0x21, 0x80, 0x03, 0x90, 0x56, 0x52, 0xFC, 0x08, 0x7C, 0x2B, 0x4C, 0x38, 0xFF, 0x38, 0xFF, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xB9, 0x62, 0xD0, 0x00, 0x67, 0x67, 0x67, 0x67, 0x15, 0x62 + } + }, + { + 186, + 79, + 0x39, + 0x00E5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE5, 0x67, 0x67, 0x21, 0x03, 0x7F, 0x50, 0x84, 0x08, 0x50, 0x01, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x7C, 0x6F, 0x64, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x62, 0xE3, 0x38, 0x50, 0x01, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xA0, 0x10, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x39, 0x01, 0xAF, 0xDA, 0x7C, 0x6E, 0xE8, 0x7F, 0x10, 0x4F, 0xD5, 0xE3 + } + }, + { + 187, + 79, + 0x39, + 0x00E6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE6, 0x38, 0x02, 0x7C, 0x6F, 0x64, 0x56, 0x01, 0xFA, 0x56, 0x00, 0x00, 0x80, 0x36, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x08, 0x7C, 0x32, 0xA7, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x80, 0x62, 0xD0, 0x04, 0x51, 0xB8, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x03, 0x80, 0x1C, 0x10, 0x57, 0x03, 0x50, 0xE3, 0x7C, 0x2B, 0xA8, 0x20, 0x62, 0xE3, 0x38, 0x7B, 0x01, 0x1F, 0xA4, 0x82 + } + }, + { + 188, + 79, + 0x39, + 0x00E7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE7, 0x00, 0x00, 0x3D, 0x00, 0x00, 0xBF, 0xC7, 0x3D, 0x01, 0x00, 0xBF, 0xC2, 0x7C, 0x6E, 0xE8, 0x38, 0xFE, 0x20, 0x7F, 0x7C, 0x6F, 0x64, 0x10, 0x57, 0x01, 0x50, 0xF4, 0x7C, 0x2B, 0xA8, 0x20, 0x7C, 0x6E, 0xE8, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x7F, 0x7C, 0x31, 0x77, 0x7F, 0x7C, 0x31, 0xC1, 0x7F, 0x43, 0x05, 0x08, 0x62, 0xD0, 0x00, 0x26, 0xB0, 0xFB, 0x51, 0xB0, 0x60, 0x00, 0x62, 0xDA, 0x4A, 0xCF + } + }, + { + 189, + 79, + 0x39, + 0x00E8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE8, 0xEF, 0x43, 0xE0, 0x10, 0x7C, 0x31, 0x9D, 0x7F, 0x7C, 0x31, 0xB6, 0x7C, 0x73, 0x82, 0x41, 0x05, 0xF7, 0x62, 0xD0, 0x00, 0x51, 0xB0, 0x29, 0x04, 0x53, 0xB0, 0x51, 0xB0, 0x60, 0x00, 0x7F, 0x7F, 0x7F, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xC2, 0x00, 0x62, 0xD0, 0x04, 0x09, 0x4E + } + }, + { + 190, + 79, + 0x39, + 0x00E9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xE9, 0x55, 0xC1, 0x00, 0x7C, 0x3A, 0x1F, 0x10, 0x7C, 0x49, 0x5B, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x21, 0xF0, 0x62, 0xD0, 0x04, 0x53, 0xC2, 0x7C, 0x6F, 0x2F, 0xA0, 0x06, 0x3D, 0x00, 0x30, 0xB0, 0x0B, 0x7C, 0x73, 0x26, 0x62, 0xD0, 0x00, 0x53, 0x39, 0x80, 0x07, 0x62, 0xD0, 0x00, 0x55, 0x39, 0x00, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3A, 0x4C, 0xD5 + } + }, + { + 191, + 79, + 0x39, + 0x00EA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEA, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3B, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x62, 0xD0, 0x00, 0x53, 0x3C, 0x7C, 0x3A, 0x21, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x54, 0x01, 0x10, 0x52, 0xFB, 0x7C, 0x49, 0x92, 0x20, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x3C, 0xC3, 0x00, 0xB0, 0x4F, 0x52, 0xFB, 0x3B, 0xFC, 0xA0, 0x49, 0x52, 0xFC, 0x3B, 0xFE, 0x3A + } + }, + { + 192, + 79, + 0x39, + 0x00EB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEB, 0xFB, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x2F, 0x80, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x4F, 0x3D, 0xFB, 0x00, 0xB0, 0x16, 0x7C, 0x3A, 0x2C, 0x7C, 0x3A, 0x22, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xA0, 0x09, 0x7C, 0x3A, 0x22, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0C, 0x62, 0xD0, 0x04, 0x52, 0x01, 0x01, 0x01, 0x53, 0xCF, 0x63, 0x05 + } + }, + { + 193, + 79, + 0x39, + 0x00EC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEC, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x09, 0x52, 0xFB, 0x08, 0x7C, 0x3A, 0x28, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x08, 0x52, 0xFB, 0x08, 0x91, 0x86, 0x38, 0xFE, 0x39, 0x00, 0xA0, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0xFF, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0C, 0x62, 0xD0, 0x04, 0x52, 0x01, 0x01, 0x01, 0x53, 0x63, 0x06 + } + }, + { + 194, + 79, + 0x39, + 0x00ED, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xED, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x06, 0x56, 0x00, 0x01, 0x80, 0x04, 0x56, 0x00, 0x00, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x08, 0x50, 0x04, 0x08, 0x50, 0xC3, 0x08, 0x62, 0xD0, 0x00, 0x50, 0x0F, 0x08, 0x10, 0x7C, 0x2B, 0x40, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x2B, 0x90, 0x31, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0x00, 0xA0, 0x21, 0x41, 0xC3 + } + }, + { + 195, + 79, + 0x39, + 0x00EE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEE, 0x62, 0xD0, 0x04, 0x3C, 0xCF, 0x00, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x3C, 0xC3, 0xFF, 0xA0, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x80, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x62, 0xD0, 0x04, 0x53, 0xC1, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x54, 0x00, 0x3C, 0xC3, 0x00, 0xA0, 0x0B, 0x7C, 0x6F, 0x3C, 0x62, 0xD0, 0x00, 0x39, 0x14, 0x6A + } + }, + { + 196, + 79, + 0x39, + 0x00EF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xEF, 0x30, 0xB0, 0x03, 0x80, 0xB1, 0x50, 0x10, 0x08, 0x50, 0x29, 0x08, 0x50, 0x28, 0x08, 0x90, 0xA9, 0x50, 0x20, 0x08, 0x50, 0x3F, 0x08, 0x50, 0x30, 0x08, 0x90, 0x9E, 0x38, 0xFA, 0x50, 0x40, 0x08, 0x50, 0x49, 0x08, 0x50, 0x48, 0x08, 0x90, 0x91, 0x50, 0x80, 0x08, 0x50, 0x9F, 0x08, 0x50, 0x90, 0x08, 0x90, 0x86, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x3B, 0x00, 0xA0, 0x6E, 0x3D, 0x00, 0x76, 0x2F + } + }, + { + 197, + 79, + 0x39, + 0x00F0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF0, 0x28, 0xC0, 0x69, 0x50, 0x29, 0x3B, 0x00, 0xC0, 0x63, 0x62, 0xD0, 0x04, 0x51, 0xDF, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x65, 0xE9, 0x51, 0xE9, 0x01, 0x10, 0x62, 0xD0, 0x04, 0x53, 0xC3, 0x62, 0xD0, 0x04, 0x51, 0xC1, 0x62, 0xD0, 0x04, 0x3A, 0xC3, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xBF, 0x01, 0x01, 0x62, 0xD0, 0x04, 0x53, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x50, 0x10, 0x08, 0x50, 0x29, 0x67, 0x12 + } + }, + { + 198, + 79, + 0x39, + 0x00F1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF1, 0x08, 0x50, 0x28, 0x08, 0x90, 0x33, 0x50, 0x20, 0x08, 0x50, 0x3F, 0x08, 0x50, 0x30, 0x08, 0x90, 0x28, 0x38, 0xFA, 0x50, 0x40, 0x08, 0x50, 0x49, 0x08, 0x50, 0x48, 0x08, 0x90, 0x1B, 0x50, 0x80, 0x08, 0x50, 0x9F, 0x08, 0x50, 0x90, 0x08, 0x90, 0x10, 0x38, 0xFA, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xBF, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0xF6, 0x31 + } + }, + { + 199, + 79, + 0x39, + 0x00F2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF2, 0x3B, 0xFC, 0xC0, 0x21, 0x62, 0xD0, 0x04, 0x52, 0xFB, 0x3A, 0xC3, 0xC0, 0x18, 0x62, 0xD0, 0x04, 0x51, 0xC2, 0x23, 0xFA, 0x39, 0x00, 0xB0, 0x0D, 0x62, 0xD0, 0x04, 0x55, 0xC3, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0A, 0x50, 0x02, 0x3B, 0xFC, 0xD1, 0xA6, 0x7C, 0x73, 0x26, 0x54, 0x03, 0x56, 0x00, 0x00, 0x80, 0xCA, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x80, 0x46 + } + }, + { + 200, + 79, + 0x39, + 0x00F3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF3, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xDD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x02, 0x53, 0xE6, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE6, 0x50, 0x00, 0x3A, 0xE9, 0xB0, 0x07, 0x51, 0xE6, 0x3A, 0xC6, 0xD3 + } + }, + { + 201, + 79, + 0x39, + 0x00F4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF4, 0xE8, 0xA0, 0x03, 0x80, 0x84, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x01, 0x06, 0x7C, 0x6E, 0xA3, 0x54, 0x04, 0x3E, 0xE8, 0x54, 0x05, 0x52, 0x02, 0x01, 0x04, 0x7C, 0x6E, 0xA3, 0x54, 0x06, 0x3E, 0xE8, 0x54, 0x07, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xF5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x04, 0x08, 0x52, 0x05, 0xBB, 0xBE + } + }, + { + 202, + 79, + 0x39, + 0x00F5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF5, 0x08, 0x91, 0x1B, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x71, 0xD1, 0xC0, 0x31, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xA1, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x51, 0xE9, 0x08, 0x51, 0xE8, 0x08, 0x52, 0x06, 0x08, 0x52, 0x07, 0x08, 0x90, 0xEB, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x7C, 0x71, 0xD1, 0xD0, 0x03, 0x80, 0xCA, 0xDD + } + }, + { + 203, + 79, + 0x39, + 0x00F6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF6, 0x08, 0x77, 0x00, 0x7C, 0x72, 0x41, 0xCF, 0x33, 0x50, 0x04, 0x3B, 0xFC, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xC0, 0x04, 0x80, 0x08, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xC0, 0x7C, 0x72, 0x41, 0xA0, 0xAD, 0x56, 0x00, 0x00, 0x80, 0x89, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xC0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7C, 0x6D, 0x8A, 0x51, 0xE8, 0x01, 0xE0, 0x54, 0x02, 0x51, 0x65, 0x14 + } + }, + { + 204, + 79, + 0x39, + 0x00F7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF7, 0xE9, 0x09, 0x01, 0x54, 0x01, 0x7C, 0x6F, 0xF6, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xDD, 0x0E, 0xE7, 0x02, 0x51, 0xE7, 0x60, 0xD5, 0x50, 0x00, 0x3F, 0xE6, 0x51, 0xE8, 0x3F, 0xE6, 0x52, 0x02, 0x01, 0x06, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xF5, 0x0E, 0xE7, 0xC3, 0xD1 + } + }, + { + 205, + 79, + 0x39, + 0x00F8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF8, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x52, 0x02, 0x01, 0x04, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x7C, 0x6D, 0xE3, 0x3E, 0xE8, 0x53, 0xE8, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x06, 0xE6, 0xA1, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x00, 0x7C, 0x72, 0x41, 0xCF, 0x74, 0x3D, 0xFB, 0x00, 0xB0, 0x09, 0x56, 0x09, 0x01, 0x56, 0x08, 0x00, 0x80, 0x53, 0xF2 + } + }, + { + 206, + 79, + 0x39, + 0x00F9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xF9, 0x07, 0x56, 0x09, 0x00, 0x56, 0x08, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x09, 0x80, 0x0D, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xF6, 0x20, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x13, 0xFA, 0x52, 0xFB, 0x1B, 0xF9, 0xC0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x13, 0xFA, 0x53, 0xE8, 0x52, 0xFB, 0x1B, 0xF9, 0x53, 0xE9, 0x80, 0x10, 0x62, 0xD0, 0x00, 0x52, 0xFA, 0x95, 0x77 + } + }, + { + 207, + 79, + 0x39, + 0x00FA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFA, 0x13, 0xFC, 0x53, 0xE8, 0x52, 0xF9, 0x1B, 0xFB, 0x53, 0xE9, 0x20, 0x7F, 0x10, 0x4F, 0x7C, 0x72, 0x93, 0xB0, 0x22, 0x3D, 0xFC, 0x01, 0xB0, 0x32, 0x62, 0xD0, 0x04, 0x51, 0xCF, 0x08, 0x50, 0x0E, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0x08, 0x50, 0x0F, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x80, 0x16, 0x7C, 0x6F, 0x3C, 0x39, 0x30, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x51, 0xC3, 0xA8, 0x9E + } + }, + { + 208, + 79, + 0x39, + 0x00FB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFB, 0x08, 0x50, 0x24, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x3A, 0x2D, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xE3, 0x38, 0x7C, 0x2B, 0x06, 0x71, 0x01, 0x90, 0x67, 0x90, 0xC5, 0x80, 0x5D, 0x62, 0xD0, 0x00, 0x26, 0xAE, 0xFB, 0x51, 0xAE, 0x60, 0x00, 0x7C, 0x70, 0x4B, 0x51, 0xAE, 0x29, 0x04, 0x53, 0xAE, 0x51, 0xAE, 0x60, 0x00, 0x7C, 0x70, 0x4B, 0x26, 0x61, 0x11 + } + }, + { + 209, + 79, + 0x39, + 0x00FC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFC, 0xAF, 0xFD, 0x51, 0xAF, 0x60, 0x04, 0x7C, 0x70, 0x4B, 0x51, 0xAF, 0x29, 0x02, 0x7C, 0x6F, 0x20, 0x56, 0x01, 0x00, 0x80, 0x03, 0x77, 0x01, 0x3D, 0x01, 0x0A, 0xCF, 0xFA, 0x62, 0xE3, 0x38, 0x90, 0x9D, 0x62, 0xD0, 0x04, 0x3C, 0xC4, 0x00, 0xA0, 0x0C, 0x7C, 0x45, 0x53, 0x7C, 0x2C, 0x1E, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0x00, 0x08, 0x90, 0x9B, 0x52, 0x00, 0x08, 0x7C, 0x39, 0x02, 0x38, 0x9A, 0x84 + } + }, + { + 210, + 79, + 0x39, + 0x00FD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFD, 0xFE, 0x8F, 0xA3, 0x38, 0xFE, 0x20, 0x8F, 0xFF, 0x10, 0x4F, 0x38, 0x01, 0x10, 0x7C, 0x16, 0x34, 0x20, 0x10, 0x57, 0x13, 0x50, 0x88, 0x7C, 0x2B, 0xA8, 0x20, 0x62, 0xD0, 0x04, 0x55, 0xC4, 0x01, 0x62, 0xD0, 0x00, 0x55, 0xA7, 0x0C, 0x62, 0xD0, 0x00, 0x55, 0xA8, 0x05, 0x62, 0xD0, 0x00, 0x55, 0xA9, 0x01, 0x10, 0x7C, 0x17, 0xB7, 0x7C, 0x19, 0x82, 0x62, 0xD0, 0x00, 0x20, 0x39, 0x00, 0xA0, 0xEE, 0x2D + } + }, + { + 211, + 79, + 0x39, + 0x00FE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFE, 0x1F, 0x71, 0x10, 0x5D, 0xE0, 0x54, 0x00, 0x41, 0xE0, 0xE7, 0x43, 0xE0, 0x18, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x7C, 0x49, 0x07, 0x62, 0xE3, 0x38, 0x52, 0x00, 0x7C, 0x72, 0xB2, 0x80, 0x06, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x38, 0xFF, 0x20, 0x7F, 0x7C, 0x33, 0x69, 0x7C, 0x40, 0x59, 0x7C, 0x45, 0x51, 0x7C, 0x2B, 0xCA, 0x7C, 0x3A, 0x31, 0x7C, 0x40, 0x10, 0x7C, 0x2B, 0x19, 0x7F, 0x7C, 0x40, 0x55, 0xFC + } + }, + { + 212, + 79, + 0x39, + 0x00FF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0xFF, 0x98, 0x7C, 0x36, 0x78, 0x7C, 0x45, 0x52, 0x7C, 0x2B, 0xFD, 0x7C, 0x3A, 0x4C, 0x7C, 0x40, 0x1E, 0x7C, 0x2B, 0x23, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x08, 0x7C, 0x38, 0x88, 0x52, 0xFC, 0x08, 0x7C, 0x45, 0x4D, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x45, 0x76, 0x52, 0xFC, 0x08, 0x7C, 0x2E, 0x27, 0x38, 0xFE, 0x52, 0xFC, 0x08, 0x7C, 0x3E, 0x8C, 0x52, 0xFC, 0x08, 0x7C, 0x40, 0x28, 0x38, 0xFE, 0x1F, 0x91 + } + }, + { + 213, + 79, + 0x39, + 0x0100, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x00, 0x52, 0xFC, 0x08, 0x7C, 0x2B, 0x48, 0x52, 0xFC, 0x08, 0x7C, 0x38, 0xC9, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xC5, 0x04, 0x38, 0xFF, 0x20, 0x7F, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xC5, 0x62, 0xD0, 0x00, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x00, 0x51, 0x54, 0x54, 0x01, 0x51, 0x53, 0x54, 0x00, 0x70, 0xFE, 0x10, 0x7C, 0x1E, 0xFE, 0x51 + } + }, + { + 214, + 79, + 0x39, + 0x0101, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x01, 0x1C, 0x62, 0xD0, 0x00, 0x20, 0x10, 0x52, 0x00, 0x08, 0x52, 0x01, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x62, 0xDA, 0xF7, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x7C, 0x1E, 0x1C, 0x20, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x55, 0x0D, 0x00, 0x55, 0x0C, 0x00, 0x7C, 0x72, 0x19, 0x92, 0x21, 0x7C, 0x73, 0x6D, 0x10, 0x7C, 0x1D, 0xCC, 0x7C, 0x20, 0x80, 0x7C, 0x20, 0x78, 0x20, 0x62, 0xD0, 0x00, 0x55, 0xFD, 0x50 + } + }, + { + 215, + 79, + 0x39, + 0x0102, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x02, 0xEF, 0x00, 0x55, 0xEE, 0x00, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x72, 0x93, 0xB0, 0xF9, 0x7C, 0x70, 0xE8, 0x54, 0x01, 0x51, 0x0C, 0x54, 0x00, 0x71, 0x01, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x02, 0xA0, 0x3F, 0x93, 0x58, 0x9F, 0x74, 0x7C, 0x70, 0xE8, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0xDD, 0x11 + } + }, + { + 216, + 79, + 0x39, + 0x0103, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x03, 0x00, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x71, 0x01, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x10, 0x7C, 0x18, 0x83, 0x20, 0x10, 0x57, 0x13, 0x50, 0x88, 0x7C, 0x2B, 0xA8, 0x7C, 0x10, 0x74, 0x20, 0x91, 0x3B, 0x7C, 0x6F, 0xEA, 0x81, 0x33, 0x62, 0xD0, 0x03, 0x51, 0x9A, 0x21, 0x0F, 0x54, 0x02, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0x0A, 0x6C + } + }, + { + 217, + 79, + 0x39, + 0x0104, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x04, 0xF1, 0xB0, 0x45, 0x62, 0xD0, 0x03, 0x3C, 0x9C, 0x00, 0xA0, 0x03, 0x93, 0x67, 0x3D, 0x02, 0x00, 0xA0, 0x06, 0x7C, 0x6F, 0xEA, 0x80, 0x97, 0x7C, 0x73, 0x2E, 0xA0, 0x27, 0x62, 0xD0, 0x03, 0x52, 0x01, 0x12, 0xE5, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x03, 0x1A, 0xE4, 0x7C, 0x72, 0x49, 0x62, 0xD0, 0x03, 0x12, 0xDB, 0x7C, 0x70, 0xA1, 0x1A, 0xDA, 0xC0, 0x70, 0x91, 0x2A, 0x7B, 0x4F + } + }, + { + 218, + 79, + 0x39, + 0x0105, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x05, 0x80, 0x6C, 0x7C, 0x6F, 0xEA, 0x80, 0x67, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF2, 0xB0, 0x1E, 0x3D, 0x02, 0x00, 0xB0, 0x06, 0x7C, 0x73, 0x2E, 0xB0, 0x08, 0x90, 0xCD, 0x7C, 0x6F, 0xEA, 0x80, 0x4E, 0x62, 0xD0, 0x03, 0x3C, 0x9D, 0x00, 0xA0, 0x46, 0x93, 0x0A, 0x80, 0x42, 0x9E, 0xBE, 0x7C, 0x70, 0xE8, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0x00, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x71, 0x01, 0x62, 0xD0, 0xC8, 0xEA + } + }, + { + 219, + 79, + 0x39, + 0x0106, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x06, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x67, 0x90, 0xFD, 0x90, 0x94, 0x7C, 0x6F, 0xEA, 0x80, 0x15, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF4, 0xA0, 0x0D, 0x10, 0x57, 0x00, 0x50, 0x01, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x73, 0x6D, 0x7C, 0x72, 0x93, 0xB0, 0x70, 0x7C, 0x70, 0xE8, 0x54, 0x01, 0x51, 0x0C, 0x54, 0x00, 0x71, 0x01, 0x62, 0xD0, 0x00, 0x44, 0xE3 + } + }, + { + 220, + 79, + 0x39, + 0x0107, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x07, 0x52, 0x01, 0x12, 0xEF, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x1A, 0xEE, 0x7C, 0x72, 0x49, 0x53, 0xE6, 0x51, 0xE9, 0x53, 0xE7, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x04, 0xCB, 0x62, 0xD0, 0x00, 0x51, 0xE7, 0x62, 0xD0, 0x03, 0x0C, 0xCA, 0x0E, 0xC9, 0x00, 0x0E, 0xC8, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x53, 0xEF, 0x52, 0x00, 0x53, 0xEE, 0x62, 0xD0, 0x03, 0x51, 0xCB, 0x39, 0xCE + } + }, + { + 221, + 79, + 0x39, + 0x0108, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x08, 0x11, 0x30, 0x51, 0xCA, 0x19, 0x75, 0x51, 0xC9, 0x19, 0x00, 0x51, 0xC8, 0x19, 0x00, 0xC0, 0x12, 0x9E, 0x1A, 0x62, 0xD0, 0x03, 0x55, 0xC8, 0x00, 0x55, 0xC9, 0x00, 0x55, 0xCA, 0x00, 0x55, 0xCB, 0x00, 0x7C, 0x2B, 0x68, 0x38, 0xFD, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF1, 0x62, 0xD0, 0x03, 0x51, 0x9C, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xD8, 0x00, 0x18, 0x53, 0xD9, 0x62, 0xD0, 0x03, 0xB7, 0xCB + } + }, + { + 222, + 79, + 0x39, + 0x0109, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x09, 0x3C, 0x9C, 0x00, 0xA0, 0x14, 0x10, 0x62, 0xD0, 0x03, 0x51, 0xD8, 0x08, 0x51, 0xD9, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x70, 0xAA, 0x80, 0x0F, 0x10, 0x57, 0x00, 0x50, 0x01, 0x7C, 0x1D, 0xD4, 0x20, 0x70, 0xFE, 0x7C, 0x72, 0x19, 0x7C, 0x71, 0x54, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF2, 0x62, 0xD0, 0x03, 0x51, 0xD7, 0x08, 0x51, 0xD6, 0x62, 0xD0, 0x03, 0x53, 0xD8, 0x18, 0x53, 0xD9, 0xF0, 0x3E + } + }, + { + 223, + 79, + 0x39, + 0x010A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0A, 0x10, 0x51, 0xD8, 0x08, 0x51, 0xD9, 0x20, 0x7C, 0x1D, 0xD4, 0x20, 0x7C, 0x70, 0xAA, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x62, 0xD0, 0x03, 0x50, 0x78, 0x3A, 0x9D, 0xD0, 0x07, 0x62, 0xD0, 0x03, 0x55, 0x9D, 0x78, 0x7C, 0x71, 0xD8, 0x53, 0xE9, 0x65, 0xE9, 0x7C, 0x71, 0xD8, 0x64, 0x64, 0x64, 0x02, 0xE9, 0x54, 0x00, 0x80, 0x09, 0x62, 0xD0, 0x03, 0x76, 0x9D, 0x07, 0x00, 0x0A, 0x62, 0xD0, 0x03, 0xA5, 0xA9 + } + }, + { + 224, + 79, + 0x39, + 0x010B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0B, 0x3C, 0x9D, 0x1A, 0xD0, 0x0A, 0x62, 0xD0, 0x03, 0x52, 0x00, 0x3A, 0x9C, 0xCF, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xD6, 0x00, 0x18, 0x53, 0xD7, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x7C, 0x70, 0x0E, 0x7C, 0x6D, 0x9C, 0x62, 0xD0, 0x03, 0x51, 0xD7, 0x62, 0xD0, 0x00, 0x04, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xD6, 0x62, 0xD0, 0x00, 0x0C, 0xE9, 0x7C, 0x70, 0x17, 0x08, 0x59, 0x12 + } + }, + { + 225, + 79, + 0x39, + 0x010C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0C, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xD6, 0x18, 0x53, 0xD7, 0x62, 0xD0, 0x03, 0x51, 0x9E, 0x08, 0x62, 0xD0, 0x03, 0x55, 0xDA, 0x00, 0x18, 0x53, 0xDB, 0x51, 0xDB, 0x08, 0x51, 0xDA, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x18, 0x53, 0xE8, 0x7C, 0x6D, 0x9C, 0x62, 0xD0, 0x03, 0x51, 0xDB, 0x62, 0xD0, 0x00, 0x04, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xDA, 0x62, 0xD0, 0x00, 0x0C, 0xE9, 0x7C, 0x70, 0x17, 0x5E, 0x1D + } + }, + { + 226, + 79, + 0x39, + 0x010D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0D, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xDA, 0x18, 0x53, 0xDB, 0x38, 0xFF, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x21, 0x0D, 0x60, 0x00, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x21, 0xBB, 0x60, 0x04, 0x62, 0xD0, 0x00, 0x51, 0xB0, 0x21, 0x74, 0x7C, 0x73, 0x1E, 0x21, 0x00, 0x7C, 0x73, 0x16, 0x21, 0x10, 0x60, 0x10, 0x71, 0x10, 0x5D, 0x00, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xCA, 0x71, 0x6E, 0x3E + } + }, + { + 227, + 79, + 0x39, + 0x010E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0E, 0x10, 0x5D, 0x04, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC9, 0x71, 0x10, 0x5D, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC8, 0x71, 0x10, 0x5D, 0x0C, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC7, 0x71, 0x10, 0x5D, 0x10, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x53, 0xC6, 0x71, 0x10, 0x43, 0x00, 0xF2, 0x43, 0x04, 0x44, 0x43, 0x08, 0x8B, 0x43, 0x0C, 0xFF, 0x43, 0x10, 0xEF, 0x70, 0xCF, 0x7F, 0x62, 0x34, 0xCB + } + }, + { + 228, + 79, + 0x39, + 0x010F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x0F, 0xD0, 0x04, 0x51, 0xCA, 0x71, 0x10, 0x60, 0x00, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC9, 0x71, 0x10, 0x60, 0x04, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC8, 0x71, 0x10, 0x60, 0x08, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC7, 0x71, 0x10, 0x60, 0x0C, 0x70, 0xCF, 0x62, 0xD0, 0x04, 0x51, 0xC6, 0x71, 0x10, 0x60, 0x10, 0x70, 0xCF, 0x62, 0xD0, 0x00, 0x51, 0xAE, 0x60, 0x00, 0x62, 0xD0, 0x00, 0xB4, 0xCC + } + }, + { + 229, + 79, + 0x39, + 0x0110, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x10, 0x7C, 0x72, 0x51, 0x51, 0xB0, 0x7C, 0x73, 0x1E, 0x7C, 0x73, 0x16, 0x60, 0x10, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x71, 0x10, 0x43, 0xEC, 0x01, 0x70, 0xFE, 0x70, 0xCF, 0x9F, 0x32, 0x7C, 0x31, 0xC5, 0x62, 0xD0, 0x00, 0x39, 0x00, 0xB0, 0x3A, 0x7C, 0x72, 0xCB, 0x10, 0x70, 0xCF, 0x7C, 0x1D, 0xD0, 0x7C, 0x20, 0x7C, 0x20, 0x62, 0xDB, 0xFE, 0x7C, 0x39, 0xF1, 0x71, 0x10, 0x43, 0xD7, 0x20, 0x43, 0x58, 0x15 + } + }, + { + 230, + 79, + 0x39, + 0x0111, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x11, 0xC9, 0x80, 0x70, 0xCF, 0x43, 0xFF, 0x08, 0x71, 0x10, 0x41, 0xC9, 0x7F, 0x41, 0xD7, 0xDF, 0x40, 0x7C, 0x73, 0x7B, 0x70, 0xCF, 0x7C, 0x3A, 0x08, 0x10, 0x7C, 0x20, 0x78, 0x7C, 0x1D, 0xCC, 0x20, 0x62, 0xD0, 0x00, 0x55, 0xBB, 0xFF, 0x62, 0xD0, 0x03, 0x26, 0x99, 0xFD, 0x9F, 0x51, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x56, 0x00, 0x00, 0x71, 0x10, 0x41, 0xEC, 0xFE, 0x27, 0xB4 + } + }, + { + 231, + 79, + 0x39, + 0x0112, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x12, 0x70, 0xFE, 0x70, 0xCF, 0x9E, 0xC9, 0x7C, 0x71, 0x10, 0x80, 0x6B, 0x7C, 0x32, 0x06, 0x62, 0xD0, 0x00, 0x7C, 0x31, 0xC5, 0x39, 0x00, 0xB0, 0x5A, 0x3D, 0x00, 0x00, 0xB0, 0x55, 0x62, 0xD0, 0x00, 0x3C, 0xBB, 0xFF, 0xA0, 0x4D, 0x7C, 0x39, 0xE9, 0x62, 0xD0, 0x03, 0x51, 0xD9, 0x11, 0x02, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xD8, 0x19, 0x00, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x50, 0x07 + } + }, + { + 232, + 79, + 0x39, + 0x0113, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x13, 0x62, 0xD0, 0x00, 0x51, 0xBB, 0x62, 0xD0, 0x00, 0x7C, 0x70, 0xBF, 0xC0, 0x22, 0x7C, 0x72, 0xCB, 0x56, 0x00, 0x01, 0x10, 0x70, 0xCF, 0x7C, 0x20, 0x7C, 0x20, 0x62, 0xDB, 0xFE, 0x10, 0x7C, 0x0C, 0x80, 0x20, 0x71, 0x10, 0x7C, 0x73, 0x7B, 0x10, 0x70, 0xCF, 0x7C, 0x20, 0x78, 0x20, 0x7C, 0x39, 0xED, 0x71, 0x01, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x62, 0xD0, 0x03, 0x12, 0xD5, 0x62, 0x05, 0x72 + } + }, + { + 233, + 79, + 0x39, + 0x0114, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x14, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x00, 0x51, 0x0C, 0x62, 0xD0, 0x03, 0x1A, 0xD4, 0x7C, 0x72, 0x49, 0x62, 0xD0, 0x03, 0x12, 0xD9, 0x7C, 0x70, 0xA1, 0x1A, 0xD8, 0xCF, 0x6F, 0x52, 0x01, 0x7C, 0x72, 0xB2, 0x9E, 0x9B, 0x7C, 0x71, 0x54, 0x71, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x04, 0x3C, 0xCB, 0xF2, 0xB0, 0x09, 0x56, 0x01, 0x01, 0x56, 0x00, 0x00, 0x80, 0x34, 0xD1 + } + }, + { + 234, + 79, + 0x39, + 0x0115, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x15, 0x04, 0x7C, 0x6F, 0xC9, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x7F, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x10, 0x7C, 0x18, 0x83, 0x7C, 0x19, 0x64, 0x20, 0x56, 0x00, 0x01, 0x52, 0x00, 0x08, 0x7C, 0x2B, 0x34, 0x38, 0xFF, 0x10, 0x7C, 0x18, 0x4D, 0x20, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x7C, 0x6F, 0x3C, 0x54, 0x01, 0x3D, 0x3A, 0xDE + } + }, + { + 235, + 79, + 0x39, + 0x0116, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x16, 0x01, 0x50, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x85, 0x3D, 0x01, 0x40, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0x92, 0x8F + } + }, + { + 236, + 79, + 0x39, + 0x0117, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x17, 0xE9, 0x05, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x58, 0x3D, 0x01, 0x70, 0xB0, 0x61, 0x7C, 0x39, 0x34, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x01, 0x3C, 0xE9, 0x01, 0xB0, 0x29, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x05, 0x7C, 0xC7, 0xFA + } + }, + { + 237, + 79, + 0x39, + 0x0118, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x18, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x81, 0x1B, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x08, 0x7C, 0x6D, 0x83, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x80, 0xF3, 0x3D, 0x01, 0xF5, 0x57 + } + }, + { + 238, + 79, + 0x39, + 0x0119, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x19, 0x60, 0xB0, 0x95, 0x7C, 0x39, 0x34, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x26, 0xE9, 0x01, 0x3C, 0xE9, 0x01, 0xB0, 0x5D, 0x50, 0x1B, 0x08, 0x50, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x09, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x00, 0x01, 0x08, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0xF8, 0x5E + } + }, + { + 239, + 79, + 0x39, + 0x011A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1A, 0x00, 0x1B, 0xCF, 0xE0, 0x56, 0x02, 0x23, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x25, 0x0E, 0xE9, 0x09, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x02, 0x03, 0x00, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x1B, 0xCF, 0xE0, 0x80, 0x82, 0x56, 0x00, 0x00, 0x80, 0x1D, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x0A, 0xCB, 0x05 + } + }, + { + 240, + 79, + 0x39, + 0x011B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1B, 0x7C, 0x6F, 0x5C, 0x08, 0x52, 0x00, 0x01, 0x07, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x77, 0x00, 0x3D, 0x00, 0x99, 0xCF, 0xE0, 0x80, 0x5A, 0x3D, 0x01, 0x30, 0xB0, 0x55, 0x62, 0xD0, 0x03, 0x3C, 0x9F, 0x99, 0xD0, 0x4D, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x08, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x26, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFE, 0x62, 0xD0, 0xD0, 0x10 + } + }, + { + 241, + 79, + 0x39, + 0x011C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1C, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x27, 0x08, 0x7C, 0x32, 0x0C, 0x62, 0xD0, 0x03, 0x51, 0x9F, 0x7C, 0x70, 0x0E, 0x06, 0xE8, 0x00, 0x0E, 0xE9, 0x05, 0x7C, 0x6D, 0x83, 0x08, 0x50, 0x28, 0x08, 0x7C, 0x32, 0x0C, 0x38, 0xFC, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x56, 0x00, 0x00, 0x80, 0x5A, 0x62, 0xD0, 0x00, 0x3B, 0xE7 + } + }, + { + 242, + 79, + 0x39, + 0x011D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1D, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE6, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x01, 0x3E, 0xE8, 0x54, 0x02, 0x7C, 0x70, 0x01, 0x7C, 0x70, 0xD4, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xB8, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE4, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x54, 0x01, 0x3E, 0xE8, 0x54, 0x02, 0x7C, 0x70, 0xCA, 0x06 + } + }, + { + 243, + 79, + 0x39, + 0x011E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1E, 0xD4, 0x7C, 0x70, 0x01, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xB4, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xA3, 0x52, 0xFC, 0x08, 0x7C, 0x5F, 0x91, 0x38, 0xFF, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x60, 0xD5, 0x50, 0x04, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x01, 0x7C, 0x71, 0x76, 0x50, 0x01, 0x3F, 0x32, 0xD7 + } + }, + { + 244, + 79, + 0x39, + 0x011F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x1F, 0xE8, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x7C, 0x6D, 0xE3, 0x47, 0xE9, 0x80, 0xBF, 0xF4, 0x52, 0xFC, 0x53, 0xE8, 0x52, 0xFB, 0x60, 0xD4, 0x3E, 0xE8, 0x39, 0x04, 0xB0, 0x73, 0x56, 0x00, 0x00, 0x80, 0x3F, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x52, 0xFC, 0x01, 0x02, 0x53, 0xE6, 0x52, 0xFB, 0x09, 0x00, 0x53, 0xE7, 0x51, 0xE8, 0x02, 0x69, 0x46 + } + }, + { + 245, + 79, + 0x39, + 0x0120, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x20, 0xE6, 0x53, 0xE6, 0x51, 0xE9, 0x0A, 0xE7, 0x53, 0xE7, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x51, 0xE9, 0x60, 0xD4, 0x51, 0xE7, 0x60, 0xD5, 0x10, 0x57, 0x08, 0x62, 0xD0, 0x00, 0x3E, 0xE8, 0x3F, 0xE6, 0x79, 0xBF, 0xF7, 0x20, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xBE, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x01, 0x22, 0x7C, 0x71, 0x76, 0x52, 0xF9, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x23, 0x7C, 0x71, 0xE7, 0x43 + } + }, + { + 246, + 79, + 0x39, + 0x0121, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x21, 0x76, 0x52, 0xFA, 0x3F, 0xE8, 0x52, 0xFC, 0x01, 0x24, 0x7C, 0x71, 0x76, 0x62, 0xD0, 0x00, 0x51, 0xA2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x7C, 0x6F, 0xB9, 0x52, 0xFB, 0x60, 0xD5, 0x50, 0x84, 0x3F, 0xE8, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x62, 0xD0, 0x03, 0x55, 0xAC, 0x19, 0x55, 0xAB, 0x00, 0x55, 0xAA, 0x02, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xDD, 0x30 + } + }, + { + 247, + 79, + 0x39, + 0x0122, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x22, 0xE9, 0x0A, 0x7C, 0x6D, 0xAD, 0x7C, 0x6D, 0xC6, 0x51, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x20, 0x62, 0xD0, 0x03, 0x50, 0x00, 0x01, 0x80, 0x53, 0xAB, 0x50, 0x02, 0x09, 0x00, 0x53, 0xAA, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xE9, 0x0A, 0x7C, 0x6D, 0xC6, 0x7C, 0x6D, 0xAD, 0x7C, 0x73, 0x3C, 0x51, 0xE8, 0x62, 0xD0, 0xBC, 0xEF + } + }, + { + 248, + 79, + 0x39, + 0x0123, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x23, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xAB, 0xA0, 0x55, 0xAA, 0x01, 0x62, 0xD0, 0x00, 0x55, 0xE8, 0x00, 0x55, 0xE9, 0x0A, 0x7C, 0x6D, 0xC6, 0x7C, 0x6D, 0xAD, 0x16, 0xE8, 0x02, 0x1E, 0xE9, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xA9, 0x10, 0x50, 0x03, 0x08, 0x50, 0xA9, 0x5C, 0x18, 0x7C, 0x20, 0x98, 0x62, 0xDB, 0x2E + } + }, + { + 249, + 79, + 0x39, + 0x0124, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x24, 0xD0, 0x00, 0x20, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x50, 0x99, 0x7C, 0x12, 0x90, 0x20, 0x9F, 0x59, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x7F, 0x10, 0x4F, 0x10, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x20, 0x7C, 0x13, 0xEA, 0x20, 0x9F, 0x44, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x10, 0x52, 0xFC, 0x7C, 0x13, 0xFB, 0x20, 0x9F, 0x32, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x86, 0x85 + } + }, + { + 250, + 79, + 0x39, + 0x0125, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x25, 0x10, 0x52, 0xFC, 0x7C, 0x14, 0x0D, 0x20, 0x9F, 0x20, 0x10, 0x7C, 0x10, 0x74, 0x20, 0x20, 0x7F, 0x10, 0x4F, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x53, 0xA9, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0x39, 0x08, 0x55, 0x3A, 0x08, 0x55, 0x3B, 0x08, 0x55, 0x3C, 0x08, 0x55, 0x01, 0x00, 0x55, 0x00, 0x00, 0x55, 0x03, 0x00, 0x55, 0x02, 0x00, 0x55, 0x05, 0x00, 0x55, 0x04, 0x00, 0x55, 0x07, 0x00, 0x55, 0x71, 0x5C + } + }, + { + 251, + 79, + 0x39, + 0x0126, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x26, 0x06, 0x00, 0x55, 0x55, 0x20, 0x55, 0x54, 0x01, 0x55, 0x53, 0x00, 0x43, 0xE6, 0x01, 0x43, 0xE0, 0x08, 0x7F, 0x49, 0xE0, 0x08, 0xB0, 0x05, 0x50, 0x00, 0x80, 0x07, 0x08, 0x7C, 0x54, 0x59, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xA4, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xB5, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xE2, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x54, 0xF3, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x84, 0x83 + } + }, + { + 252, + 79, + 0x39, + 0x0127, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x27, 0x55, 0x04, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x55, 0x15, 0x38, 0xFF, 0x7F, 0x08, 0x7C, 0x5B, 0xE0, 0x38, 0xFF, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x50, 0x00, 0x3D, 0xF9, 0x80, 0xC0, 0x06, 0x7C, 0x4A, 0x8B, 0x50, 0xC0, 0x3D, 0xF5, 0x80, 0xC0, 0x0C, 0x10, 0x4B, 0x11, 0x04, 0x4B, 0x7C, 0x4A, 0x8B, 0x31, 0x80, 0x20, 0x08, 0x7C, 0x4A, 0x28, 0x18, 0x6A, 0xD0, 0x04, 0x7C, 0x4A, 0x8B, 0x6A, 0xD0, 0x1F, 0xBA + } + }, + { + 253, + 79, + 0x39, + 0x0128, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x28, 0x08, 0x4B, 0x11, 0x04, 0x4B, 0x7C, 0x4A, 0x8B, 0x38, 0xFF, 0x20, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x10, 0x4F, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x7C, 0x4A, 0xA3, 0x51, 0xE1, 0x54, 0xFB, 0x18, 0x60, 0xD0, 0x20, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x5D, 0xD0, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xE9, 0x08, 0x50, 0x00, 0x53, 0xE9, 0x53, 0xE1, 0x53, 0xE0, 0x53, 0xDF, 0x56, 0x00, 0x20, 0x66, 0xFC, 0xD7, 0x2B + } + }, + { + 254, + 79, + 0x39, + 0x0129, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x29, 0x6C, 0xFB, 0x6C, 0xFA, 0x6C, 0xF9, 0x6B, 0xDF, 0x6B, 0xE0, 0x6B, 0xE1, 0x6B, 0xE9, 0x51, 0xDF, 0x1B, 0xF8, 0x51, 0xE0, 0x1B, 0xF7, 0x51, 0xE1, 0x1B, 0xF6, 0x51, 0xE9, 0x1B, 0xF5, 0xC0, 0x11, 0x53, 0xE9, 0x52, 0xF8, 0x14, 0xDF, 0x52, 0xF7, 0x1C, 0xE0, 0x52, 0xF6, 0x1C, 0xE1, 0x77, 0xFC, 0x7B, 0x00, 0xBF, 0xCB, 0x51, 0xDF, 0x54, 0xF8, 0x51, 0xE0, 0x54, 0xF7, 0x51, 0xE1, 0x54, 0xF6, 0x3A, 0xF2 + } + }, + { + 255, + 79, + 0x39, + 0x012A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2A, 0x51, 0xE9, 0x54, 0xF5, 0x18, 0x53, 0xE9, 0x18, 0x60, 0xD0, 0x7F, 0x37, 0xFC, 0xFF, 0x77, 0xFC, 0x37, 0xFB, 0xFF, 0x0F, 0xFB, 0x00, 0x37, 0xFA, 0xFF, 0x0F, 0xFA, 0x00, 0x37, 0xF9, 0xFF, 0x0F, 0xF9, 0x00, 0x7F, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x08, 0x66, 0xFC, 0x6B, 0xE1, 0x51, 0xE1, 0x1B, 0xFB, 0xC0, 0x05, 0x53, 0xE1, 0x77, 0xFC, 0x7A, 0xE0, 0xBF, 0xEF, 0x7F, 0x08, 0x10, 0x4F, 0x50, 0x80, 0x7F + } + }, + { + 256, + 79, + 0x39, + 0x012B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2B, 0x00, 0x6F, 0xFF, 0xD0, 0x03, 0x03, 0xFE, 0x66, 0xFE, 0xBF, 0xF7, 0x38, 0xFE, 0x70, 0x3F, 0x71, 0xC0, 0x7F, 0x7C, 0x6E, 0x82, 0x7C, 0x6E, 0xC0, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xFE, 0x18, 0x53, 0xFF, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xFC, 0x18, 0x53, 0xFD, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0x7E, 0x7C + } + }, + { + 257, + 79, + 0x39, + 0x012C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2C, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xFA, 0x18, 0x53, 0xFB, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xF8, 0x18, 0x53, 0xF9, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x02, 0xEF, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x0A, 0xEE, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA3, 0x18, 0x23, 0xC7 + } + }, + { + 258, + 79, + 0x39, + 0x012D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2D, 0x53, 0xA4, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x02, 0xED, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x0A, 0xEC, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x7C, 0x71, 0xC3, 0x7F, 0x10, 0x4F, 0x52, 0xFA, 0x13, 0xFC, 0x52, 0xF9, 0x1B, 0xFB, 0xD0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x13, 0xFA, 0x53, 0xE8, 0x52, 0xFB, 0x1B, 0xF9, 0x53, 0xE9, 0x80, 0x10, 0x62, 0xD0, 0xB7, 0xF0 + } + }, + { + 259, + 79, + 0x39, + 0x012E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2E, 0x00, 0x52, 0xFA, 0x13, 0xFC, 0x53, 0xE8, 0x52, 0xF9, 0x1B, 0xFB, 0x53, 0xE9, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x41, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9F, 0xB4, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEE, 0x8C, 0x9B + } + }, + { + 260, + 79, + 0x39, + 0x012F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x2F, 0x08, 0x51, 0xEF, 0x08, 0x9F, 0x9B, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x04, 0x04, 0xA0, 0x7C, 0x71, 0x7F, 0x0C, 0x9F, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x9F, 0x78, 0x38, 0xF8, 0x7C, 0x6F, 0x76, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x6A, 0x58 + } + }, + { + 261, + 79, + 0x39, + 0x0130, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x30, 0x9F, 0x5F, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x62, 0xD0, 0x04, 0x04, 0x9E, 0x7C, 0x71, 0x7F, 0x0C, 0x9D, 0x7F, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x7C, 0x71, 0x22, 0x51, 0xAE, 0x62, 0xD0, 0x03, 0x12, 0xDD, 0x62, 0xD0, 0x04, 0x53, 0xA6, 0x62, 0xD0, 0x04, 0x51, 0xAD, 0x62, 0xD0, 0x03, 0x1A, 0xDC, 0x62, 0xD0, 0x04, 0x53, 0xA5, 0x62, 0xD0, 0x04, 0x51, 0x3D, 0xFF + } + }, + { + 262, + 79, + 0x39, + 0x0131, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x31, 0xAE, 0x08, 0x51, 0xAD, 0x62, 0xD0, 0x03, 0x53, 0xDC, 0x18, 0x53, 0xDD, 0x62, 0xD0, 0x04, 0x51, 0xA6, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x04, 0x51, 0xA5, 0x62, 0xD0, 0x00, 0x53, 0xE7, 0x62, 0xD0, 0x04, 0x51, 0xA0, 0x08, 0x62, 0xD0, 0x00, 0x18, 0x53, 0xE5, 0x55, 0xE4, 0x00, 0x65, 0xE5, 0x65, 0xE4, 0x6B, 0xE5, 0x51, 0xE4, 0x53, 0xE2, 0x51, 0xE5, 0x53, 0xE3, 0x50, 0x00, 0x08, 0x8B, 0x9C + } + }, + { + 263, + 79, + 0x39, + 0x0132, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x32, 0x08, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x08, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE3, 0x08, 0x51, 0xE2, 0x08, 0x7C, 0x49, 0xD3, 0x18, 0x53, 0xE6, 0x18, 0x53, 0xE7, 0x18, 0x18, 0x38, 0xFC, 0x51, 0xE6, 0x53, 0xE8, 0x51, 0xE7, 0x53, 0xE9, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x9B, 0x18, 0x53, 0x9C, 0x62, 0xD0, 0x04, 0x51, 0xA6, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x04, 0x57, 0x35 + } + }, + { + 264, + 79, + 0x39, + 0x0133, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x33, 0x51, 0xA5, 0x62, 0xD0, 0x00, 0x53, 0xE7, 0x62, 0xD0, 0x04, 0x51, 0x9E, 0x08, 0x62, 0xD0, 0x00, 0x18, 0x53, 0xE5, 0x55, 0xE4, 0x00, 0x65, 0xE5, 0x65, 0xE4, 0x6B, 0xE5, 0x51, 0xE4, 0x53, 0xE2, 0x51, 0xE5, 0x53, 0xE3, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x08, 0x50, 0x00, 0x08, 0x08, 0x51, 0xE3, 0x08, 0x51, 0xE2, 0x08, 0x7C, 0x49, 0xD3, 0x18, 0x53, 0xE6, 0x18, 0x53, 0x0D, 0xA2 + } + }, + { + 265, + 79, + 0x39, + 0x0134, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x34, 0xE7, 0x18, 0x18, 0x38, 0xFC, 0x51, 0xE6, 0x53, 0xE8, 0x51, 0xE7, 0x53, 0xE9, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x99, 0x18, 0x53, 0x9A, 0x7C, 0x6E, 0x82, 0x7F, 0x10, 0x4F, 0x38, 0x08, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x00, 0x52, 0xFC, 0x03, 0xFA, 0x54, 0x01, 0x52, 0xFB, 0x0B, 0xF9, 0x54, 0x00, 0x52, 0xF8, 0x03, 0xF6, 0x54, 0x03, 0x0D, 0xA3 + } + }, + { + 266, + 79, + 0x39, + 0x0135, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x35, 0x52, 0xF7, 0x0B, 0xF5, 0x54, 0x02, 0x52, 0xFC, 0x03, 0xF6, 0x54, 0x05, 0x52, 0xFB, 0x0B, 0xF5, 0x54, 0x04, 0x52, 0xFA, 0x03, 0xF8, 0x54, 0x07, 0x52, 0xF9, 0x0B, 0xF7, 0x54, 0x06, 0x52, 0xFC, 0x13, 0xF8, 0x52, 0xFB, 0x1B, 0xF7, 0xC0, 0x43, 0x7C, 0x72, 0xD4, 0xD0, 0x1E, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x01, 0x52, 0x01, 0x13, 0x03, 0x52, 0x00, 0x1B, 0x02, 0xD0, 0x06, 0x7C, 0x73, 0x66, 0x05, 0x94 + } + }, + { + 267, + 79, + 0x39, + 0x0136, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x36, 0x80, 0x69, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x02, 0x80, 0x61, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x02, 0x52, 0x07, 0x13, 0x05, 0x52, 0x06, 0x1B, 0x04, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x02, 0x80, 0x49, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x03, 0x80, 0x41, 0x7C, 0x72, 0xD4, 0xC0, 0x21, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x03, 0x52, 0x03, 0x13, 0x01, 0x52, 0x02, 0x1B, 0x00, 0xD0, 0x09, 0x62, 0xE1, 0x4D + } + }, + { + 268, + 79, + 0x39, + 0x0137, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x37, 0xD0, 0x04, 0x55, 0xDD, 0x03, 0x80, 0x24, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x04, 0x80, 0x1C, 0x62, 0xD0, 0x04, 0x55, 0xDE, 0x04, 0x52, 0x05, 0x13, 0x07, 0x52, 0x04, 0x1B, 0x06, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x04, 0x80, 0x04, 0x7C, 0x73, 0x66, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x01, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF3, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xFA, 0x02, 0xE8, 0x77, 0x7A + } + }, + { + 269, + 79, + 0x39, + 0x0138, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x38, 0x53, 0xE8, 0x52, 0xF9, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xF6, 0x51, 0xE9, 0x1B, 0xF5, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x01, 0x80, 0x97, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x02, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF4, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xF8, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xF7, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xFC, 0x51, 0xE9, 0x1B, 0xFB, 0x37, 0xFB + } + }, + { + 270, + 79, + 0x39, + 0x0139, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x39, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x02, 0x80, 0x67, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x03, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF3, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xF6, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xF5, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xFA, 0x51, 0xE9, 0x1B, 0xF9, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x03, 0x80, 0x37, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x04, 0x10, 0xAE + } + }, + { + 271, + 79, + 0x39, + 0x013A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3A, 0xB0, 0x29, 0x62, 0xD0, 0x00, 0x52, 0xF4, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x52, 0xFC, 0x02, 0xE8, 0x53, 0xE8, 0x52, 0xFB, 0x0A, 0xE9, 0x53, 0xE9, 0x51, 0xE8, 0x13, 0xF8, 0x51, 0xE9, 0x1B, 0xF7, 0xD0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x04, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xE0, 0x00, 0x62, 0xD0, 0x04, 0x3C, 0xDE, 0x04, 0xB0, 0x15, 0x62, 0xD0, 0x04, 0x3C, 0xDD, 0x01, 0xB0, 0x0D, 0xA9, 0xE1 + } + }, + { + 272, + 79, + 0x39, + 0x013B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3B, 0x62, 0xD0, 0x04, 0x51, 0xDE, 0x01, 0x03, 0x62, 0xD0, 0x00, 0x80, 0x15, 0x62, 0xD0, 0x04, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x02, 0xDD, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x16, 0xE9, 0x02, 0x51, 0xE9, 0x38, 0xF8, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x09, 0x52, 0xF7, 0x3B, 0xFB, 0xB0, 0x1B, 0x52, 0xF8, 0x3B, 0xFC, 0xB0, 0x15, 0x52, 0xF5, 0x3B, 0xF9, 0xB0, 0x0F, 0x52, 0xF6, 0x3B, 0xFA, 0xB0, 0x09, 0xC2, 0x14 + } + }, + { + 273, + 79, + 0x39, + 0x013C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3C, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0xFE, 0x81, 0x17, 0x52, 0xF7, 0x3B, 0xFB, 0xB0, 0x07, 0x52, 0xF8, 0x3B, 0xFC, 0xA0, 0x0D, 0x52, 0xF5, 0x3B, 0xF9, 0xB0, 0x4E, 0x52, 0xF6, 0x3B, 0xFA, 0xB0, 0x48, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x00, 0xA0, 0x06, 0x3C, 0xCE, 0x07, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x00, 0x80, 0xEA, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x01, 0xA0, 0x06, 0x3C, 0xCE, 0x02, 0xC9, 0x23 + } + }, + { + 274, + 79, + 0x39, + 0x013D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3D, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x02, 0x80, 0xD5, 0x62, 0xD0, 0x04, 0x3C, 0xCE, 0x03, 0xA0, 0x06, 0x3C, 0xCE, 0x04, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x04, 0x80, 0xC0, 0x62, 0xD0, 0x04, 0x55, 0xDF, 0x06, 0x80, 0xB8, 0x52, 0xFB, 0x08, 0x52, 0xFC, 0x08, 0x52, 0xF7, 0x08, 0x52, 0xF8, 0x08, 0x9B, 0xEC, 0x7C, 0x72, 0x25, 0x52, 0xF9, 0x08, 0x52, 0xFA, 0x08, 0x52, 0xF5, 0x56, 0x3E + } + }, + { + 275, + 79, + 0x39, + 0x013E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3E, 0x08, 0x52, 0xF6, 0x08, 0x9B, 0xDB, 0x38, 0xF8, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x03, 0x51, 0xE9, 0x54, 0x02, 0x52, 0x01, 0x13, 0x03, 0x52, 0x00, 0x1B, 0x02, 0xD0, 0x23, 0x7C, 0x71, 0x2E, 0x7C, 0x70, 0x17, 0x7C, 0x73, 0x4A, 0xD0, 0x09, 0x56, 0x06, 0x01, 0x56, 0x05, 0x00, 0x80, 0x07, 0x56, 0x06, 0x00, 0x56, 0x05, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x54, 0x04, 0x80, 0x2C, 0x62, 0x31, 0xF5 + } + }, + { + 276, + 79, + 0x39, + 0x013F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x3F, 0xD0, 0x00, 0x52, 0x03, 0x53, 0xE8, 0x52, 0x02, 0x53, 0xE9, 0x7C, 0x70, 0x17, 0x13, 0x01, 0x51, 0xE9, 0x1B, 0x00, 0xD0, 0x09, 0x56, 0x08, 0x01, 0x56, 0x07, 0x00, 0x80, 0x07, 0x56, 0x08, 0x00, 0x56, 0x07, 0x00, 0x62, 0xD0, 0x00, 0x52, 0x08, 0x54, 0x04, 0x62, 0xD0, 0x04, 0x47, 0xCE, 0x01, 0xB0, 0x1B, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x31, 0x01, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xCE, 0x79, 0x86 + } + }, + { + 277, + 79, + 0x39, + 0x0140, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x40, 0x62, 0xD0, 0x00, 0x02, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xDF, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xCE, 0x03, 0x04, 0x62, 0xD0, 0x04, 0x53, 0xDF, 0x62, 0xD0, 0x04, 0x26, 0xDF, 0x07, 0x62, 0xD0, 0x04, 0x51, 0xDF, 0x01, 0x01, 0x62, 0xD0, 0x00, 0x38, 0xF7, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x56, 0x00, 0x00, 0x7C, 0x72, 0xAB, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0xC9, 0x27 + } + }, + { + 278, + 79, + 0x39, + 0x0141, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x41, 0x7C, 0x71, 0x22, 0x3D, 0xFC, 0x01, 0xA0, 0x62, 0x3D, 0xFC, 0x00, 0xB0, 0x41, 0x62, 0xD0, 0x04, 0x55, 0xD0, 0x00, 0x62, 0xD0, 0x04, 0x7C, 0x71, 0xEE, 0x3C, 0xAD, 0x00, 0xB0, 0x06, 0x3C, 0xAE, 0x00, 0xA0, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xAE, 0x08, 0x51, 0xAD, 0x62, 0xD0, 0x04, 0x53, 0xAF, 0x18, 0x53, 0xB0, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xB4, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xDD, 0x4B, 0x2C + } + }, + { + 279, + 79, + 0x39, + 0x0142, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x42, 0x00, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xA6, 0x00, 0x55, 0xA5, 0x00, 0x7C, 0x72, 0x85, 0x62, 0xD0, 0x03, 0x55, 0xE3, 0x00, 0x55, 0xE2, 0x00, 0x7C, 0x73, 0x5F, 0x62, 0xD0, 0x04, 0x55, 0xD4, 0x00, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0xAD, 0x62, 0xD0, 0x04, 0x3C, 0xDC, 0x01, 0xA0, 0x2A, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xFE, 0x18, 0x53, 0x86, 0xA3 + } + }, + { + 280, + 79, + 0x39, + 0x0143, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x43, 0xFF, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xFC, 0x18, 0x53, 0xFD, 0x7C, 0x6E, 0x82, 0x62, 0xD0, 0x04, 0x55, 0xDC, 0x01, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xF6, 0x18, 0x53, 0xF7, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xF4, 0x18, 0x53, 0xF5, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x36, 0x04 + } + }, + { + 281, + 79, + 0x39, + 0x0144, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x44, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9B, 0xF5, 0x38, 0xF6, 0x7C, 0x71, 0x88, 0x54, 0x00, 0x3D, 0x00, 0x00, 0xA1, 0x23, 0x3D, 0xFB, 0x00, 0xA1, 0x1E, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0xAC, 0xF1 + } + }, + { + 282, + 79, + 0x39, + 0x0145, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x45, 0x62, 0xD0, 0x03, 0x51, 0xE3, 0x21, 0x0F, 0x62, 0xD0, 0x00, 0x53, 0xE6, 0x62, 0xD0, 0x03, 0x51, 0xE2, 0x21, 0x00, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xB0, 0x07, 0x51, 0xE6, 0x3A, 0xE8, 0xA0, 0xA4, 0x62, 0xD0, 0x03, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x65, 0xE3, 0x6B, 0xE2, 0x52, 0x00, 0x2C, 0xE3, 0x3C, 0xE2, 0x12, 0xB0, 0x06, 0x3C, 0xE3, 0x34, 0xA0, 0xF9, 0x8C + } + }, + { + 283, + 79, + 0x39, + 0x0146, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x46, 0x28, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x23, 0xB0, 0x06, 0x3C, 0xE3, 0x41, 0xA0, 0x1B, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x34, 0xB0, 0x06, 0x3C, 0xE3, 0x12, 0xA0, 0x0E, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x41, 0xB0, 0x14, 0x3C, 0xE3, 0x23, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x55, 0xD2, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x01, 0x80, 0x49, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x43, 0xB0, 0x06, 0x3C, 0xE3, 0xD5, 0x45 + } + }, + { + 284, + 79, + 0x39, + 0x0147, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x47, 0x21, 0xA0, 0x28, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x32, 0xB0, 0x06, 0x3C, 0xE3, 0x14, 0xA0, 0x1B, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x21, 0xB0, 0x06, 0x3C, 0xE3, 0x43, 0xA0, 0x0E, 0x62, 0xD0, 0x03, 0x3C, 0xE2, 0x14, 0xB0, 0x14, 0x3C, 0xE3, 0x32, 0xB0, 0x0F, 0x62, 0xD0, 0x04, 0x55, 0xD2, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x01, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x00, 0x7C, 0x72, 0x16, 0xC8 + } + }, + { + 285, + 79, + 0x39, + 0x0148, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x48, 0x85, 0x80, 0x3D, 0x7C, 0x73, 0x0E, 0x39, 0x00, 0xA0, 0x36, 0x62, 0xD0, 0x04, 0x3C, 0xD3, 0x01, 0xB0, 0x2E, 0x62, 0xD0, 0x00, 0x7C, 0x73, 0x0E, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD1, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0xD1, 0x80, 0x13, 0x62, 0xD0, 0x03, 0x55, 0xE3, 0x00, 0x55, 0xE2, 0x00, 0x7C, 0x72, 0x85, 0x62, 0xD0, 0x04, 0x55, 0xD3, 0x00, 0x62, 0x19, 0xCF + } + }, + { + 286, + 79, + 0x39, + 0x0149, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x49, 0xD0, 0x04, 0x3C, 0xD3, 0x01, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x3C, 0xD2, 0x01, 0xB0, 0x06, 0x56, 0x00, 0x28, 0x80, 0x04, 0x56, 0x00, 0x29, 0x3D, 0x00, 0x00, 0xA0, 0x3E, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x9C, 0x65, 0x38, 0x1E, 0xDA + } + }, + { + 287, + 79, + 0x39, + 0x014A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4A, 0xF8, 0x54, 0x03, 0x3D, 0xFB, 0x00, 0xA0, 0x09, 0x62, 0xD0, 0x04, 0x3C, 0xD3, 0x00, 0xB0, 0x0A, 0x52, 0x03, 0x54, 0x00, 0x66, 0x00, 0x07, 0x00, 0x0E, 0x7C, 0x72, 0xF6, 0x39, 0x00, 0xA1, 0x5B, 0x3D, 0x00, 0x00, 0xA1, 0x56, 0x3D, 0x00, 0x28, 0xA1, 0x51, 0x3D, 0x00, 0x29, 0xA1, 0x4C, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0xB4, 0x07 + } + }, + { + 288, + 79, + 0x39, + 0x014B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4B, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xD4, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x01, 0x00, 0xB0, 0xD4, 0x62, 0xD0, 0x00, 0x3C, 0x39, 0x00, 0xB0, 0x73, 0x62, 0xD0, 0x00, 0x3C, 0xBA, 0x14 + } + }, + { + 289, + 79, + 0x39, + 0x014C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4C, 0x3A, 0x00, 0xB0, 0x6B, 0x62, 0xD0, 0x04, 0x3C, 0x9F, 0x00, 0xB0, 0x06, 0x3C, 0xA0, 0x00, 0xA0, 0x0E, 0x62, 0xD0, 0x04, 0x3C, 0x9D, 0x00, 0xB0, 0x56, 0x3C, 0x9E, 0x00, 0xB0, 0x51, 0x3D, 0x00, 0x10, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x14, 0xA0, 0x06, 0x3C, 0xD4, 0x1C, 0xB0, 0x3F, 0x56, 0x01, 0x01, 0x80, 0x3A, 0x3D, 0x00, 0x1C, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x18, 0x4D, 0x3B + } + }, + { + 290, + 79, + 0x39, + 0x014D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4D, 0xA0, 0x06, 0x3C, 0xD4, 0x10, 0xB0, 0x28, 0x56, 0x01, 0x01, 0x80, 0x23, 0x3D, 0x00, 0x18, 0xA0, 0x06, 0x3D, 0x00, 0x14, 0xB0, 0x19, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x11, 0x04, 0x7C, 0x70, 0x27, 0xA0, 0x0A, 0x52, 0x00, 0x01, 0x04, 0x7C, 0x70, 0x27, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x00, 0x10, 0xB0, 0x18, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x10, 0xA0, 0x0B, 0x3C, 0xD4, 0x1E, 0xA0, 0x06, 0x64, 0x6A + } + }, + { + 291, + 79, + 0x39, + 0x014E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4E, 0x3C, 0xD4, 0x12, 0xB0, 0x43, 0x56, 0x01, 0x01, 0x80, 0x3E, 0x3D, 0x00, 0x1E, 0xB0, 0x18, 0x62, 0xD0, 0x04, 0x3C, 0xD4, 0x1E, 0xA0, 0x0B, 0x3C, 0xD4, 0x10, 0xA0, 0x06, 0x3C, 0xD4, 0x1C, 0xB0, 0x27, 0x56, 0x01, 0x01, 0x80, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xD4, 0x3B, 0x00, 0xA0, 0x16, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x11, 0x02, 0x7C, 0x70, 0x27, 0xA0, 0x0A, 0x52, 0x00, 0x01, 0x02, 0x7C, 0x91, 0xC5 + } + }, + { + 292, + 79, + 0x39, + 0x014F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x4F, 0x70, 0x27, 0xB0, 0x04, 0x56, 0x01, 0x01, 0x3D, 0x01, 0x01, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0xF6, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD5, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x1B, 0x7C, 0x6E, 0x82, 0x62, 0xD0, 0x04, 0x76, 0xD5, 0x56, 0x00, 0x00, 0x80, 0x0E, 0x7C, 0x73, 0x5F, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xD4, 0x56, 0x00, 0x00, 0x3D, 0x00, 0x00, 0xA0, 0x52, 0x62, 0x27, 0xF2 + } + }, + { + 293, + 79, + 0x39, + 0x0150, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x50, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x7C, 0x4C, 0x14, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xAE, 0x01 + } + }, + { + 294, + 79, + 0x39, + 0x0151, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x51, 0xD0, 0x04, 0x76, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0x00, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x72, 0xAB, 0x56, 0x00, 0x00, 0x52, 0xFC, 0x08, 0x7C, 0x5B, 0xE0, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x52, 0xFC, 0x08, 0x90, 0x6D, 0x62, 0xD0, 0x00, 0x54, 0x01, 0x52, 0xFC, 0x08, 0x90, 0x96, 0x38, 0xC7, 0x34 + } + }, + { + 295, + 79, + 0x39, + 0x0152, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x52, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x02, 0x3D, 0x00, 0x00, 0xA0, 0x05, 0x52, 0x00, 0x80, 0x12, 0x3D, 0x01, 0x00, 0xA0, 0x08, 0x52, 0x01, 0x62, 0xD0, 0x00, 0x80, 0x06, 0x52, 0x02, 0x62, 0xD0, 0x00, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x9B, 0x7E, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x01, 0x56, 0x00, 0x00, 0x50, 0x01, 0x08, 0x52, 0x5D, 0x61 + } + }, + { + 296, + 79, + 0x39, + 0x0153, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x53, 0xFC, 0x08, 0x9B, 0x68, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x3D, 0x00, 0x28, 0xA0, 0x0A, 0x3D, 0x00, 0x29, 0xA0, 0x05, 0x50, 0x00, 0x80, 0x06, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFF, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x01, 0x08, 0x52, 0xFC, 0x08, 0x9B, 0x40, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x29, 0x38, 0xFE, 0x62, 0x5B, 0x5E + } + }, + { + 297, + 79, + 0x39, + 0x0154, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x54, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x01, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x18, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x02, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x07, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0F, 0x56, 0x00, 0x00, 0x56, 0x04, 0x00, 0x7C, 0x72, 0xAB, 0x56, 0x03, 0x00, 0x10, 0x7C, 0x1E, 0x80, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0xE0, 0x69 + } + }, + { + 298, + 79, + 0x39, + 0x0155, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x55, 0x7C, 0x71, 0x22, 0x10, 0x7C, 0x1E, 0x8D, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xAB, 0x18, 0x53, 0xAC, 0x3D, 0xFC, 0x02, 0xD0, 0x61, 0x3D, 0xFC, 0x00, 0xB0, 0x41, 0x62, 0xD0, 0x04, 0x55, 0xD0, 0x00, 0x62, 0xD0, 0x04, 0x7C, 0x71, 0xEE, 0x3C, 0xAD, 0x00, 0xB0, 0x06, 0x3C, 0xAE, 0x00, 0xA0, 0x22, 0x62, 0xD0, 0x04, 0x51, 0xAE, 0x08, 0x51, 0xAD, 0xF0, 0x8A + } + }, + { + 299, + 79, + 0x39, + 0x0156, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x56, 0x62, 0xD0, 0x04, 0x53, 0xAF, 0x18, 0x53, 0xB0, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xB4, 0x20, 0x62, 0xD0, 0x03, 0x55, 0xDD, 0x00, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xA6, 0x00, 0x55, 0xA5, 0x00, 0x62, 0xD0, 0x04, 0x55, 0xDB, 0x00, 0x7C, 0x73, 0x58, 0x7C, 0x73, 0x51, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xCC, 0x20, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0x71, 0x3D, 0xFC, 0x02, 0x20, 0xEB + } + }, + { + 300, + 79, + 0x39, + 0x0157, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x57, 0xB3, 0x0F, 0x62, 0xD0, 0x04, 0x3C, 0xDC, 0x02, 0xD0, 0x55, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x01, 0x20, 0x3C, 0xE9, 0x00, 0xB0, 0x05, 0x39, 0x00, 0xA0, 0x33, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x01, 0x53, 0xE8, 0x20, 0x62, 0xD0, 0x04, 0x51, 0xAC, 0x62, 0xD0, 0x00, 0x12, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xAB, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0xD0, 0x1A, 0x7C, 0xE0, 0x6C + } + }, + { + 301, + 79, + 0x39, + 0x0158, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x58, 0x4A, 0xD9, 0x7C, 0x4A, 0xD2, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x83, 0x1F, 0x7C, 0x4A, 0xD9, 0x7C, 0x4A, 0xD2, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x04, 0x55, 0xDC, 0x02, 0x3D, 0xFB, 0x01, 0xA0, 0x06, 0x3D, 0xFB, 0x02, 0xB1, 0x49, 0x62, 0xD0, 0x01, 0x51, 0xEE, 0x08, 0x51, 0xEF, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4B, 0x61, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x7C, 0xA5 + } + }, + { + 302, + 79, + 0x39, + 0x0159, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x59, 0x08, 0x51, 0xE9, 0x54, 0x07, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xF8, 0x7C, 0x71, 0xF5, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x7C, 0x4B, 0x61, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0C, 0x51, 0xE9, 0x54, 0x0B, 0x62, 0xD0, 0x03, 0x51, 0x9A, 0xE2 + } + }, + { + 303, + 79, + 0x39, + 0x015A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5A, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xF8, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0E, 0x51, 0xE9, 0x54, 0x0D, 0x52, 0x08, 0x13, 0x0C, 0x54, 0x06, 0x52, 0x07, 0x1B, 0x0B, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3B, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0x96, 0xDB + } + }, + { + 304, + 79, + 0x39, + 0x015B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5B, 0xD0, 0x06, 0x56, 0x00, 0x48, 0x80, 0x41, 0x52, 0x06, 0x11, 0x00, 0x52, 0x05, 0x31, 0x80, 0x19, 0x80, 0xD0, 0x35, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x73, 0x53, 0xE8, 0x52, 0x05, 0x73, 0x53, 0xE9, 0x51, 0xE8, 0x01, 0x01, 0x54, 0x06, 0x51, 0xE9, 0x09, 0x00, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3B, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0xF9, 0xA2 + } + }, + { + 305, + 79, + 0x39, + 0x015C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5C, 0x1A, 0xE1, 0xD0, 0x04, 0x56, 0x00, 0x49, 0x52, 0x0A, 0x13, 0x0E, 0x54, 0x06, 0x52, 0x09, 0x1B, 0x0D, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3C, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0xD0, 0x06, 0x56, 0x00, 0x48, 0x80, 0x41, 0x52, 0x06, 0x11, 0x00, 0x52, 0x05, 0x31, 0x80, 0x19, 0x80, 0xD0, 0x35, 0x62, 0xD0, 0x00, 0x52, 0x2B, 0x07 + } + }, + { + 306, + 79, + 0x39, + 0x015D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5D, 0x06, 0x73, 0x53, 0xE8, 0x52, 0x05, 0x73, 0x53, 0xE9, 0x51, 0xE8, 0x01, 0x01, 0x54, 0x06, 0x51, 0xE9, 0x09, 0x00, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x3C, 0x62, 0xD0, 0x00, 0x13, 0x06, 0x52, 0x05, 0x31, 0x80, 0x53, 0xE1, 0x50, 0x00, 0x31, 0x80, 0x1A, 0xE1, 0xD0, 0x04, 0x56, 0x00, 0x49, 0x3D, 0xFB, 0x00, 0xA0, 0x06, 0x3D, 0xFB, 0x02, 0xB1, 0x57, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x85, 0xBC + } + }, + { + 307, + 79, + 0x39, + 0x015E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5E, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4D, 0x1E, 0x7C, 0x71, 0x88, 0x54, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x08, 0x51, 0xEB, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x92, 0xD7 + } + }, + { + 308, + 79, + 0x39, + 0x015F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x5F, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xE4, 0x08, 0x51, 0xE5, 0x08, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x7C, 0x4E, 0xE4, 0x38, 0xEE, 0x54, 0x02, 0x62, 0xD0, 0x00, 0x51, 0x3A, 0x08, 0x62, 0xD0, 0x00, 0x51, 0x39, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xEE, 0x51, 0x56 + } + }, + { + 309, + 79, + 0x39, + 0x0160, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x60, 0x08, 0x51, 0xEF, 0x08, 0x7C, 0x4D, 0x1E, 0x7C, 0x71, 0x88, 0x05, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xE6, 0x08, 0x51, 0xE7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x08, 0x62, 0xD0, 0x01, 0x51, 0xEC, 0x08, 0x51, 0xED, 0x08, 0x51, 0xEE, 0x08, 0x51, 0xEF, 0x08, 0x7C, 0x4E, 0xE4, 0x38, 0xEE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x3D, 0x01, 0x00, 0xA0, 0x95, 0x3D, 0x02, 0xFF, 0xA0, 0x19, 0xE7 + } + }, + { + 310, + 79, + 0x39, + 0x0161, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x61, 0x90, 0x3D, 0x03, 0xFF, 0xA0, 0x8B, 0x52, 0x02, 0x3B, 0x03, 0xA0, 0x0B, 0x3D, 0x02, 0x08, 0xB0, 0x0B, 0x3D, 0x03, 0x01, 0xB0, 0x06, 0x7C, 0x71, 0xB9, 0x80, 0x76, 0x3D, 0x03, 0x08, 0xB0, 0x11, 0x3D, 0x02, 0x01, 0xB0, 0x0C, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x61, 0x52, 0x03, 0x3B, 0x02, 0xD0, 0x2C, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x13, 0x03, 0x39, 0x01, 0xB0, 0xAA, 0x0A + } + }, + { + 311, + 79, + 0x39, + 0x0162, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x62, 0x21, 0x50, 0x02, 0x08, 0x52, 0x02, 0x08, 0x7C, 0x4A, 0x10, 0x38, 0xFF, 0x18, 0x39, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0xB9, 0x80, 0x3B, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x30, 0x52, 0x02, 0x3B, 0x03, 0xD0, 0x2A, 0x62, 0xD0, 0x00, 0x52, 0x03, 0x13, 0x02, 0x39, 0x01, 0xB0, 0x1F, 0x50, 0x02, 0x08, 0x52, 0x03, 0x08, 0x7C, 0x4A, 0x10, 0x38, 0xFF, 0x18, 0x39, 0x00, 0x26, 0x03 + } + }, + { + 312, + 79, + 0x39, + 0x0163, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x63, 0xB0, 0x0C, 0x52, 0x03, 0x03, 0x03, 0x54, 0x00, 0x07, 0x00, 0x2E, 0x80, 0x04, 0x7C, 0x71, 0xB9, 0x3D, 0x00, 0x00, 0xA0, 0x54, 0x3D, 0xFB, 0x01, 0xB0, 0x0D, 0x52, 0x00, 0x08, 0x90, 0x52, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x48, 0x3D, 0xFB, 0x00, 0xB0, 0x12, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x29, 0x30, 0x08, 0x91, 0x16, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x32, 0x3D, 0xFB, 0x02, 0xB0, 0xE9, 0x8A + } + }, + { + 313, + 79, + 0x39, + 0x0164, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x64, 0x28, 0x3D, 0x00, 0x48, 0xA0, 0x06, 0x3D, 0x00, 0x49, 0xB0, 0x0D, 0x52, 0x00, 0x08, 0x90, 0x21, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x17, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x29, 0x30, 0x08, 0x90, 0xEA, 0x38, 0xFF, 0x62, 0xD0, 0x00, 0x80, 0x06, 0x62, 0xD0, 0x00, 0x50, 0x00, 0x38, 0xF1, 0x20, 0x7F, 0x10, 0x4F, 0x7C, 0x72, 0xEE, 0x39, 0x00, 0xA0, 0x43, 0x3D, 0xFC, 0x00, 0xA0, 0x3E, 0x62, 0x09, 0xCB + } + }, + { + 314, + 79, + 0x39, + 0x0165, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x65, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD7, 0xB0, 0x2C, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0xEE, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD8, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x21, 0x7C, 0x72, 0x39, 0x39, 0x00, 0xA0, 0x04, 0x7C, 0x72, 0x8C, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xD8, 0x56, 0xFC, 0x00, 0x80, 0x0A, 0x62, 0xD0, 0x04, 0x55, 0xD8, 0x00, 0x7C, 0x72, 0x59, 0x7C, 0x72, 0x39, 0x39, 0x4E, 0x56 + } + }, + { + 315, + 79, + 0x39, + 0x0166, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x66, 0x00, 0xA0, 0x4D, 0x3D, 0xFC, 0x00, 0xA0, 0x48, 0x62, 0xD0, 0x04, 0x3C, 0xDB, 0x00, 0xA0, 0x3D, 0x3C, 0xDB, 0x48, 0xA0, 0x38, 0x3C, 0xDB, 0x49, 0xA0, 0x33, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD7, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x72, 0x39, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD6, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x19, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xD6, 0x56, 0x79, 0xAD + } + }, + { + 316, + 79, + 0x39, + 0x0167, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x67, 0xFC, 0x00, 0x80, 0x0C, 0x7C, 0x72, 0x8C, 0x7C, 0x72, 0x59, 0x80, 0x04, 0x7C, 0x72, 0x8C, 0x3D, 0xFC, 0x00, 0xA0, 0x31, 0x7C, 0x4B, 0x1A, 0x7C, 0x4B, 0x99, 0x7C, 0x4C, 0x14, 0x7C, 0x6E, 0xC0, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0x22, 0x00 + } + }, + { + 317, + 79, + 0x39, + 0x0168, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x68, 0xDB, 0x7C, 0x73, 0x58, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x05, 0x7C, 0x72, 0x64, 0x39, 0x00, 0xA1, 0x10, 0x56, 0x00, 0x00, 0x3D, 0xFC, 0x00, 0xA1, 0x08, 0x7C, 0x4B, 0x99, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD9, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0x00, 0x00, 0xB0, 0xC1, 0x62, 0xD0, 0x00, 0x3C, 0x39, 0x00, 0xB0, 0x73, 0x62, 0xD0, 0x00, 0x3C, 0x3A, 0x00, 0x73, 0xA3 + } + }, + { + 318, + 79, + 0x39, + 0x0169, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x69, 0xB0, 0x6B, 0x62, 0xD0, 0x04, 0x3C, 0x9F, 0x00, 0xB0, 0x06, 0x3C, 0xA0, 0x00, 0xA0, 0x0E, 0x62, 0xD0, 0x04, 0x3C, 0x9D, 0x00, 0xB0, 0x56, 0x3C, 0x9E, 0x00, 0xB0, 0x51, 0x3D, 0xFC, 0x30, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x34, 0xA0, 0x06, 0x3C, 0xD9, 0x3C, 0xB0, 0x3F, 0x56, 0x00, 0x01, 0x80, 0x3A, 0x3D, 0xFC, 0x3C, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x38, 0xA0, 0x06, 0x5F, 0x7C + } + }, + { + 319, + 79, + 0x39, + 0x016A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6A, 0x3C, 0xD9, 0x30, 0xB0, 0x28, 0x56, 0x00, 0x01, 0x80, 0x23, 0x3D, 0xFC, 0x38, 0xA0, 0x06, 0x3D, 0xFC, 0x34, 0xB0, 0x19, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x11, 0x04, 0x7C, 0x70, 0x34, 0xA0, 0x0A, 0x52, 0xFC, 0x01, 0x04, 0x7C, 0x70, 0x34, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0xFC, 0x30, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x3E, 0xA0, 0x06, 0x3C, 0xD9, 0x32, 0xB0, 0x35, 0x56, 0x00, 0x1E, 0xFB + } + }, + { + 320, + 79, + 0x39, + 0x016B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6B, 0x01, 0x80, 0x30, 0x3D, 0xFC, 0x3E, 0xB0, 0x13, 0x62, 0xD0, 0x04, 0x3C, 0xD9, 0x30, 0xA0, 0x06, 0x3C, 0xD9, 0x3C, 0xB0, 0x1E, 0x56, 0x00, 0x01, 0x80, 0x19, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x11, 0x02, 0x7C, 0x70, 0x34, 0xA0, 0x0A, 0x52, 0xFC, 0x01, 0x02, 0x7C, 0x70, 0x34, 0xB0, 0x04, 0x56, 0x00, 0x01, 0x3D, 0x00, 0x01, 0xB0, 0x1F, 0x7C, 0x72, 0x64, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0x16, 0xEC + } + }, + { + 321, + 79, + 0x39, + 0x016C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6C, 0xDA, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0xA0, 0x1E, 0x7C, 0x4A, 0xD2, 0x62, 0xD0, 0x04, 0x76, 0xDA, 0x56, 0xFC, 0x00, 0x80, 0x11, 0x62, 0xD0, 0x04, 0x55, 0xDA, 0x00, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD9, 0x56, 0xFC, 0x00, 0x3D, 0xFC, 0x00, 0xA0, 0xAE, 0x7C, 0x4B, 0x1A, 0x62, 0xD0, 0x03, 0x51, 0xED, 0x62, 0xD0, 0x03, 0x02, 0xE9, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0x43, 0x47 + } + }, + { + 322, + 79, + 0x39, + 0x016D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6D, 0xEC, 0x62, 0xD0, 0x03, 0x0A, 0xE8, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x54, 0x02, 0x51, 0xE9, 0x54, 0x01, 0x62, 0xD0, 0x03, 0x51, 0xEB, 0x62, 0xD0, 0x03, 0x02, 0xE7, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xEA, 0x62, 0xD0, 0x03, 0x0A, 0xE6, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x54, 0x04, 0x51, 0xE9, 0x54, 0x03, 0x52, 0xCA, 0x56 + } + }, + { + 323, + 79, + 0x39, + 0x016E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6E, 0x01, 0x08, 0x52, 0x02, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xA3, 0x08, 0x51, 0xA4, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0xA8, 0x52, 0x03, 0x08, 0x52, 0x04, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xA1, 0x08, 0x51, 0xA2, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x7C, 0x6F, 0x76, 0x7C, 0x4C, 0x14, 0x7C, 0x6E, 0xC0, 0x62, 0xD0, 0x04, 0x52, 0xFC, 0x3A, 0xD0, 0xB0, 0x08, 0x62, 0xD0, 0x04, 0x76, 0x9E, 0xFF + } + }, + { + 324, + 79, + 0x39, + 0x016F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x6F, 0xCF, 0x80, 0x04, 0x7C, 0x70, 0x5B, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD0, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xDB, 0x7C, 0x73, 0x51, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x38, 0xFB, 0x20, 0x7F, 0x10, 0x4F, 0x50, 0x00, 0x08, 0x52, 0xFC, 0x08, 0x90, 0x07, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x04, 0x7C, 0x6F, 0xC9, 0x3D, 0xFB, 0x00, 0xA0, 0x37, 0x62, 0xD0, 0x04, 0xC4, 0x4C + } + }, + { + 325, + 79, + 0x39, + 0x0170, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x70, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x62, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x8D, 0x28, 0x53, 0xE7, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x02, 0xE8, 0x53, 0xE8, 0x51, 0xE7, 0x0A, 0xE9, 0x10, 0x08, 0x51, 0xE8, 0x20, 0x7C, 0x1E, 0xE4, 0x20, 0x7C, 0x73, 0x43, 0x7C, 0x70, 0x41, 0x52, 0xFB, 0x62, 0xD0, 0x00, 0x83, 0x5B, 0x3D, 0xFC, 0x00, 0xB2, 0xB8, 0x7C, 0x70, 0x5B, 0x10, 0x7C, 0x1E, 0x9A, 0x62, 0xB4, 0x2D + } + }, + { + 326, + 79, + 0x39, + 0x0171, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x71, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA9, 0x18, 0x53, 0xAA, 0x10, 0x7C, 0x1E, 0xA7, 0x62, 0xD0, 0x00, 0x5A, 0xE9, 0x20, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA7, 0x18, 0x53, 0xA8, 0x62, 0xD0, 0x04, 0x3C, 0xA9, 0x00, 0xB0, 0x06, 0x3C, 0xAA, 0x00, 0xA1, 0x37, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x01, 0xB0, 0xFB, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x3F, 0x44 + } + }, + { + 327, + 79, + 0x39, + 0x0172, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x72, 0x08, 0x57, 0x8F, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x62, 0xD0, 0x04, 0x12, 0xAA, 0x7C, 0x71, 0x7F, 0x1A, 0xA9, 0xD0, 0xDD, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x8D, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x7C, 0x70, 0x77, 0xD0, 0xC4, 0x7C, 0x72, 0xB9, 0x3A, 0xFE, 0xB0, 0x08, 0x7C, 0x72, 0xC2, 0x3A, 0xFF, 0xA0, 0x96, 0x62, 0x15, 0xF1 + } + }, + { + 328, + 79, + 0x39, + 0x0173, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x73, 0xD0, 0x03, 0x51, 0xF4, 0x62, 0xD0, 0x03, 0x3A, 0xFC, 0xB0, 0x0D, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x62, 0xD0, 0x03, 0x3A, 0xFD, 0xA0, 0x7E, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x08, 0x51, 0xF7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xFE, 0x08, 0x51, 0xFF, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x96, 0x28, 0x20, 0x7C, 0x71, 0xD1, 0xC0, 0x27, 0x62, 0xD0, 0x03, 0x07, 0xD6 + } + }, + { + 329, + 79, + 0x39, + 0x0174, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x74, 0x51, 0xF4, 0x08, 0x51, 0xF5, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xFC, 0x08, 0x51, 0xFD, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x97, 0x28, 0x20, 0x7C, 0x71, 0xD1, 0xD0, 0x32, 0x56, 0x01, 0x01, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x00, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x03, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x9D, 0x28, 0x53, 0xBC, 0x18, 0x75, 0x09, 0x00, 0x28, 0xDC, 0x81 + } + }, + { + 330, + 79, + 0x39, + 0x0175, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x75, 0x53, 0xBD, 0x20, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x9F, 0x28, 0x53, 0xBE, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xBF, 0x20, 0x3D, 0x01, 0x00, 0xB0, 0x1C, 0x62, 0xD0, 0x04, 0x3C, 0xCD, 0x00, 0xB0, 0x07, 0x7C, 0x6F, 0x87, 0x56, 0x00, 0x20, 0x62, 0xD0, 0x04, 0x76, 0xCD, 0x3C, 0xCD, 0x01, 0xB0, 0x04, 0x7C, 0x71, 0x3A, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x02, 0xB0, 0x2D, 0x62, 0xD0, 0x00, 0x50, 0xA9, 0x1C + } + }, + { + 331, + 79, + 0x39, + 0x0176, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x76, 0x0D, 0x10, 0x08, 0x57, 0x8B, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x20, 0x62, 0xD0, 0x04, 0x12, 0xAA, 0x7C, 0x71, 0x7F, 0x1A, 0xA9, 0xD0, 0x0F, 0x7C, 0x70, 0x62, 0x7C, 0x70, 0x77, 0xD0, 0x07, 0x56, 0x00, 0x40, 0x7C, 0x4B, 0x8F, 0x62, 0xD0, 0x03, 0x3C, 0xEE, 0x00, 0xB0, 0x06, 0x3C, 0xEF, 0x00, 0xA1, 0x26, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x00, 0xB0, 0x42, 0x62, 0xD0, 0x04, 0x2A, 0x1F + } + }, + { + 332, + 79, + 0x39, + 0x0177, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x77, 0x3C, 0xCD, 0x01, 0xB0, 0x3A, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x04, 0x12, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x04, 0x1A, 0xA7, 0xD0, 0x0D, 0x7C, 0x72, 0x6F, 0x54, 0x03, 0x7C, 0x72, 0x7A, 0x54, 0x02, 0x80, 0x04, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x0D, 0x20, 0x7C, 0x73, 0x4A, 0xD0, 0xEA, 0x7C, 0x4B, 0x8F, 0x80, 0xE5, 0x62, 0xD0, 0x5E, 0x88 + } + }, + { + 333, + 79, + 0x39, + 0x0178, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x78, 0x04, 0x3C, 0xCC, 0x01, 0xB0, 0xDD, 0x7C, 0x73, 0x35, 0xB0, 0xD8, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x04, 0x12, 0xA8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x04, 0x1A, 0xA7, 0xD0, 0x0D, 0x7C, 0x72, 0x6F, 0x53, 0xEF, 0x7C, 0x72, 0x7A, 0x53, 0xEE, 0x80, 0x04, 0x7C, 0x70, 0x41, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x93, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x77, 0xBB + } + }, + { + 334, + 79, + 0x39, + 0x0179, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x79, 0x28, 0x20, 0x62, 0xD0, 0x03, 0x12, 0xEF, 0x7C, 0x70, 0xA1, 0x1A, 0xEE, 0xD0, 0x88, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x7C, 0x72, 0x0D, 0x53, 0xE8, 0x20, 0x62, 0xD0, 0x03, 0x51, 0xEF, 0x62, 0xD0, 0x00, 0x12, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xEE, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0xD0, 0x66, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x08, 0x51, 0xF7, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xBC, 0x08, 0x51, 0x4F, 0x6C + } + }, + { + 335, + 79, + 0x39, + 0x017A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7A, 0xBD, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x95, 0x28, 0x20, 0x7C, 0x70, 0xBF, 0xD0, 0x2F, 0x62, 0xD0, 0x03, 0x51, 0xF4, 0x08, 0x51, 0xF5, 0x08, 0x62, 0xD0, 0x03, 0x51, 0xBE, 0x08, 0x51, 0xBF, 0x08, 0x7C, 0x4B, 0x61, 0x38, 0xFC, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x95, 0x28, 0x20, 0x7C, 0x70, 0xBF, 0xD0, 0x09, 0x56, 0x00, 0x22, 0x7C, 0x79, 0xC1 + } + }, + { + 336, + 79, + 0x39, + 0x017B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7B, 0x4B, 0x8F, 0x80, 0x1F, 0x7C, 0x6F, 0x87, 0x62, 0xD0, 0x04, 0x55, 0xCD, 0x01, 0x7C, 0x71, 0x3A, 0x56, 0x00, 0x20, 0x80, 0x0E, 0x7C, 0x4B, 0x8F, 0x80, 0x09, 0x7C, 0x73, 0x35, 0xB0, 0x04, 0x7C, 0x4B, 0x8F, 0x10, 0x50, 0x00, 0x5C, 0x7C, 0x1E, 0xE4, 0x20, 0x7C, 0x73, 0x43, 0x80, 0x53, 0x62, 0xD0, 0x04, 0x3C, 0xCC, 0x00, 0xB0, 0x04, 0x7C, 0x4A, 0xD9, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0x43, 0x56 + } + }, + { + 337, + 79, + 0x39, + 0x017C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7C, 0xCC, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xF6, 0x18, 0x53, 0xF7, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xF4, 0x18, 0x53, 0xF5, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xF2, 0x18, 0x53, 0xF3, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xF0, 0x18, 0x53, 0x03, 0xD7 + } + }, + { + 338, + 79, + 0x39, + 0x017D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7D, 0xF1, 0x3D, 0x00, 0x40, 0xB0, 0x43, 0x7C, 0x72, 0xC2, 0x02, 0xF3, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x7C, 0x72, 0xB9, 0x0A, 0xF2, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA3, 0x18, 0x53, 0xA4, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x02, 0xF5, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xF4, 0x0A, 0xF4, 0x62, 0xD0, 0x00, 0x9F, 0x10 + } + }, + { + 339, + 79, + 0x39, + 0x017E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7E, 0x53, 0xE9, 0x7C, 0x6D, 0xF3, 0x7C, 0x71, 0xC3, 0x52, 0x00, 0x62, 0xD0, 0x00, 0x38, 0xFC, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x06, 0x62, 0xD0, 0x00, 0x3C, 0x0E, 0x00, 0xA0, 0x06, 0x3D, 0xFC, 0xFF, 0xB0, 0x09, 0x62, 0xD0, 0x04, 0x55, 0xE2, 0x01, 0x85, 0xED, 0x62, 0xD0, 0x04, 0x3C, 0xE1, 0x00, 0xA0, 0x06, 0x3C, 0xE1, 0xFF, 0xB0, 0x74, 0x56, 0x00, 0x00, 0x56, 0x00, 0x00, 0x80, 0x65, 0x62, 0xDB, 0x89 + } + }, + { + 340, + 79, + 0x39, + 0x017F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x7F, 0xD0, 0x00, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x7C, 0x71, 0x08, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x52, 0x00, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x7C, 0x71, 0x08, 0x7C, 0x72, 0x9A, 0x62, 0xD0, 0x04, 0x76, 0xE2, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0x45, 0x5E + } + }, + { + 341, + 79, + 0x39, + 0x0180, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x80, 0xB0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x7C, 0x6D, 0xEA, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x98, 0x85, 0x6D, 0x62, 0xD0, 0x04, 0x50, 0x03, 0x3A, 0xE1, 0xC0, 0x0A, 0x62, 0xD0, 0x00, 0x50, 0x03, 0x3A, 0x0E, 0xD4, 0xAD, 0x7C, 0x71, 0x93, 0xC2, 0xFD, 0xCF + } + }, + { + 342, + 79, + 0x39, + 0x0181, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x81, 0x0D, 0x7C, 0x6F, 0xC9, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x7C, 0x6F, 0x54, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x00, 0x7C, 0x71, 0xB1, 0xCF, 0xEA, 0x56, 0x00, 0x00, 0x80, 0x51, 0x56, 0x02, 0x00, 0x80, 0x41, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x29, 0x52, 0x02, 0x08, 0x95, 0x00 + } + }, + { + 343, + 79, + 0x39, + 0x0182, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x82, 0x52, 0x00, 0x08, 0x95, 0xA1, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x50, 0x0D, 0x10, 0x57, 0x85, 0x28, 0x20, 0x3B, 0x03, 0xC0, 0x0F, 0x52, 0x02, 0x08, 0x52, 0x00, 0x08, 0x95, 0x0D, 0x38, 0xFE, 0x77, 0x01, 0x80, 0x0C, 0x77, 0x02, 0x62, 0xD0, 0x04, 0x52, 0x02, 0x3A, 0xE1, 0xCF, 0xB8, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xAC, 0x7C, 0x71, 0xE1, 0x3D, 0x00, 0x02, 0xA0, 0x06, 0x3D, 0xB2, 0x3B + } + }, + { + 344, + 79, + 0x39, + 0x0183, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x83, 0x00, 0x03, 0xB1, 0x09, 0x7C, 0x68, 0xBB, 0x52, 0x01, 0x08, 0x7C, 0x66, 0xDE, 0x52, 0x01, 0x08, 0x7C, 0x6A, 0x7F, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x56, 0x02, 0x00, 0x80, 0xDF, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0x73, 0xBE + } + }, + { + 345, + 79, + 0x39, + 0x0184, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x84, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x7C, 0x71, 0x00, 0xCF, 0x77 + } + }, + { + 346, + 79, + 0x39, + 0x0185, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x85, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x25, 0x24 + } + }, + { + 347, + 79, + 0x39, + 0x0186, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x86, 0x83, 0x54, 0x03, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x72, 0x31, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x03, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x04, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x36, 0x47 + } + }, + { + 348, + 79, + 0x39, + 0x0187, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x87, 0x3F, 0xE6, 0x77, 0x02, 0x52, 0x02, 0x3B, 0x00, 0xCF, 0x1D, 0x83, 0x3A, 0x56, 0x00, 0x00, 0x80, 0x76, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x5E, 0x56, 0x03, 0x00, 0x56, 0x04, 0xFF, 0x56, 0x02, 0x00, 0x80, 0x3B, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x02, 0xE0 + } + }, + { + 349, + 79, + 0x39, + 0x0188, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x88, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x23, 0x52, 0x02, 0x08, 0x52, 0x00, 0x08, 0x94, 0x16, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x05, 0x3D, 0x03, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0x4B, 0x80, 0x0A, 0x52, 0x05, 0x3B, 0x03, 0xD0, 0x04, 0x7C, 0x71, 0x4B, 0x77, 0x02, 0x62, 0xD0, 0x04, 0x52, 0x02, 0x3A, 0xE1, 0xCF, 0xBE, 0x3D, 0x04, 0xFF, 0xA0, 0x0B, 0x52, 0x04, 0x08, 0x52, 0x00, 0x08, 0x89, 0xEF + } + }, + { + 350, + 79, + 0x39, + 0x0189, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x89, 0x93, 0x6A, 0x38, 0xFE, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x87, 0x82, 0xB9, 0x7C, 0x6F, 0xC9, 0x80, 0x13, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xCC, 0x7C, 0x6F, 0x54, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xEA, 0x56, 0x00, 0x00, 0x80, 0x51, 0x56, 0x02, 0x00, 0x80, 0x41, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0xED, 0xB8 + } + }, + { + 351, + 79, + 0x39, + 0x018A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8A, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x29, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x93, 0x95, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x03, 0x50, 0x0D, 0x10, 0x57, 0x85, 0x28, 0x20, 0x3B, 0x03, 0xC0, 0x0F, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x93, 0x01, 0x38, 0xFE, 0x77, 0x01, 0x80, 0x0C, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xB8, 0x77, 0x00, 0x7C, 0x71, 0x06, 0xEB + } + }, + { + 352, + 79, + 0x39, + 0x018B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8B, 0xB1, 0xCF, 0xAC, 0x7C, 0x71, 0xE1, 0x50, 0x00, 0x3B, 0x00, 0xC0, 0x06, 0x3D, 0x00, 0x04, 0xD1, 0x24, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x39, 0x04, 0xD1, 0x16, 0x7C, 0x68, 0xBB, 0x52, 0x01, 0x08, 0x93, 0xF7, 0x52, 0x01, 0x08, 0x7C, 0x6B, 0x94, 0x38, 0xFE, 0x56, 0x02, 0x00, 0x80, 0xF9, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x9B, 0x16 + } + }, + { + 353, + 79, + 0x39, + 0x018C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8C, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x04, 0x7C, 0x6F, 0x27, 0x98, 0x11 + } + }, + { + 354, + 79, + 0x39, + 0x018D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8D, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0xF0, 0x7C, 0x6F, 0xD0, 0x7C, 0x71, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xE5, 0xAC + } + }, + { + 355, + 79, + 0x39, + 0x018E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8E, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x54, 0x03, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x52, 0x03, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x06, 0xE6, 0x47, 0x71 + } + }, + { + 356, + 79, + 0x39, + 0x018F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x8F, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0x04, 0x7C, 0x6D, 0x8A, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x52, 0x03, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x02, 0x52, 0x02, 0x3B, 0x00, 0xCF, 0x03, 0x80, 0x80, 0x56, 0x00, 0x00, 0x80, 0x76, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0x2C, 0x3C + } + }, + { + 357, + 79, + 0x39, + 0x0190, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x90, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0x5E, 0x56, 0x03, 0x00, 0x56, 0x04, 0xFF, 0x56, 0x02, 0x00, 0x80, 0x3B, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x23, 0x52, 0x00, 0x08, 0x52, 0x02, 0x08, 0x91, 0xEE, 0x38, 0xFE, 0x62, 0xD0, 0x00, 0x54, 0x05, 0x3D, 0x0E, 0x01 + } + }, + { + 358, + 79, + 0x39, + 0x0191, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x91, 0x03, 0x00, 0xB0, 0x06, 0x7C, 0x71, 0x4B, 0x80, 0x0A, 0x52, 0x05, 0x3B, 0x03, 0xD0, 0x04, 0x7C, 0x71, 0x4B, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xBE, 0x3D, 0x04, 0xFF, 0xA0, 0x0B, 0x52, 0x00, 0x08, 0x52, 0x04, 0x08, 0x91, 0x42, 0x38, 0xFE, 0x77, 0x00, 0x7C, 0x71, 0xB1, 0xCF, 0x87, 0x7C, 0x71, 0x93, 0xD0, 0x8E, 0x7C, 0x72, 0xDD, 0x12, 0xE1, 0x62, 0xD0, 0x00, 0xD5, 0x90 + } + }, + { + 359, + 79, + 0x39, + 0x0192, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x92, 0x54, 0x00, 0x56, 0x02, 0x00, 0x80, 0x57, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x26, 0xE9, 0xF0, 0x3C, 0xE9, 0xF0, 0xB0, 0x35, 0x7C, 0x6F, 0x27, 0x06, 0xE8, 0xD0, 0x7C, 0x6F, 0x54, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x52, 0x02, 0x7C, 0x6D, 0xD9, 0x06, 0xE8, 0xD5, 0x7C, 0x6F, 0xE2, 0x7C, 0x72, 0x81, 0xE9 + } + }, + { + 360, + 79, + 0x39, + 0x0193, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x93, 0x9A, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x08, 0x91, 0xCC, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x53, 0xE2, 0x7B, 0x00, 0x80, 0x08, 0x3D, 0x00, 0x00, 0xB0, 0x03, 0x80, 0x2B, 0x77, 0x02, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x3A, 0x0E, 0xCF, 0xA2, 0x80, 0x1E, 0x50, 0x00, 0x08, 0x91, 0xF1, 0x38, 0xFF, 0x7C, 0x71, 0x93, 0xC0, 0x0A, 0x50, 0x00, 0x08, 0x95, 0x86, 0x38, 0xFF, 0x80, 0x09, 0x50, 0x00, 0x08, 0xE4, 0xB0 + } + }, + { + 361, + 79, + 0x39, + 0x0194, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x94, 0x7C, 0x6B, 0x94, 0x38, 0xFF, 0x56, 0x00, 0x00, 0x80, 0x88, 0x62, 0xD0, 0x00, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xD0, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x7C, 0x70, 0x1E, 0x06, 0xE6, 0xE0, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0x78, 0xD9 + } + }, + { + 362, + 79, + 0x39, + 0x0195, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x95, 0xE6, 0xB0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x7C, 0x6D, 0x83, 0x53, 0xE9, 0x7C, 0x6E, 0xAE, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x7C, 0x6D, 0xEA, 0x7C, 0x6D, 0xA5, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xD5, 0x0E, 0xE9, 0x02, 0x7C, 0x6D, 0x83, 0x7C, 0x6E, 0xE1, 0x7C, 0x73, 0x3C, 0x7C, 0x6E, 0xAE, 0x65, 0xE6, 0x6B, 0xE7, 0x7C, 0x67, 0xB8 + } + }, + { + 363, + 79, + 0x39, + 0x0196, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x96, 0x70, 0x1E, 0x06, 0xE6, 0xE1, 0x0E, 0xE7, 0x01, 0x7C, 0x6D, 0xEA, 0x51, 0xE8, 0x3F, 0xE6, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0x75, 0x3D, 0xFC, 0xFF, 0xA0, 0x08, 0x7C, 0x72, 0xDD, 0x53, 0xE1, 0x80, 0x07, 0x62, 0xD0, 0x04, 0x55, 0xE1, 0xFF, 0x38, 0xFA, 0x20, 0x7F, 0x10, 0x4F, 0x62, 0xD0, 0x00, 0x52, 0xFB, 0x97, 0xD5, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x97, 0xC5, 0x40, 0x53, 0xCA, 0x7F + } + }, + { + 364, + 79, + 0x39, + 0x0197, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x97, 0xE9, 0x52, 0xFC, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x7C, 0x6D, 0xEA, 0x52, 0xFB, 0x97, 0xB5, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x97, 0xA5, 0x40, 0x7C, 0x6E, 0xE1, 0x52, 0xFC, 0x7C, 0x6E, 0xB6, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x97, 0xFB, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x97, 0x84, 0x40, 0xDB, 0xA2 + } + }, + { + 365, + 79, + 0x39, + 0x0198, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x98, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0xF0, 0x7C, 0x6F, 0xD0, 0x52, 0xFC, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x97, 0x6A, 0x40, 0x7A, 0xE8, 0x53, 0xE7, 0x26, 0xE7, 0x0F, 0x7C, 0x6F, 0xD0, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x03, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x97, 0x4B, 0x40, 0x54, 0x00, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0x7E, 0xE9 + } + }, + { + 366, + 79, + 0x39, + 0x0199, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x99, 0xB0, 0x0E, 0xE9, 0x03, 0x97, 0x3D, 0x40, 0x54, 0x01, 0x52, 0x00, 0x3B, 0x01, 0xD0, 0x08, 0x7C, 0x73, 0x06, 0x54, 0x02, 0x80, 0x06, 0x7C, 0x72, 0xFE, 0x54, 0x02, 0x7C, 0x6F, 0xB9, 0x55, 0xE9, 0x00, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x97, 0x1A, 0x40, 0x54, 0x00, 0x7C, 0x70, 0xF0, 0x06, 0xE8, 0xEA, 0x0E, 0xE9, 0x00, 0x97, 0x0C, 0x40, 0x54, 0x01, 0x52, 0x00, 0x3B, 0x01, 0xD0, 0x08, 0x98, 0x1E + } + }, + { + 367, + 79, + 0x39, + 0x019A, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9A, 0x7C, 0x73, 0x06, 0x05, 0x02, 0x80, 0x06, 0x7C, 0x72, 0xFE, 0x05, 0x02, 0x52, 0x02, 0x62, 0xD0, 0x00, 0x38, 0xFD, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x77, 0xFC, 0xB0, 0x03, 0x77, 0xFC, 0x50, 0x0F, 0x3B, 0xFC, 0xD0, 0x04, 0x56, 0xFC, 0x01, 0x52, 0xFC, 0x54, 0x01, 0x56, 0x00, 0x00, 0x80, 0x1A, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x96, 0xD2, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x96, 0xD9, 0xA1 + } + }, + { + 368, + 79, + 0x39, + 0x019B, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9B, 0xC2, 0x40, 0x3B, 0xFC, 0xB0, 0x03, 0x77, 0xFC, 0x77, 0x00, 0x7C, 0x6F, 0xC1, 0xCF, 0xE3, 0x52, 0xFC, 0x3B, 0x01, 0xBF, 0xD4, 0x52, 0xFC, 0x62, 0xD0, 0x00, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x0B, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x03, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x03, 0x43, 0x76 + } + }, + { + 369, + 79, + 0x39, + 0x019C, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9C, 0x7C, 0x70, 0xDB, 0x56, 0x00, 0x00, 0x56, 0x01, 0x00, 0x81, 0xA7, 0x56, 0x04, 0x00, 0x81, 0x97, 0x3D, 0xFC, 0x00, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x7C, 0x6F, 0x44, 0x06, 0xE8, 0xB8, 0x0E, 0xE9, 0x03, 0x96, 0x60, 0x40, 0x54, 0x05, 0x96, 0x7D, 0x40, 0x06, 0xE8, 0xB0, 0x0E, 0xE9, 0x03, 0x96, 0x52, 0x40, 0x54, 0x06, 0x80, 0x54, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x96, 0xBD, 0x40, 0x96, 0x43, 0xDE, 0xAD + } + }, + { + 370, + 79, + 0x39, + 0x019D, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9D, 0x40, 0x54, 0x05, 0x96, 0x60, 0x40, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0x4D, 0x8C + } + }, + { + 371, + 79, + 0x39, + 0x019E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9E, 0xC3, 0x0E, 0xE9, 0x02, 0x95, 0xFD, 0x40, 0x54, 0x06, 0x52, 0x05, 0x3B, 0x06, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x13, 0x05, 0x54, 0x07, 0x80, 0x0A, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x13, 0x06, 0x54, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x22, 0x62, 0xD0, 0x00, 0x97, 0x97, 0x40, 0x06, 0xE8, 0xB4, 0x0E, 0xE9, 0x03, 0x95, 0xCD, 0x40, 0x54, 0x05, 0x95, 0xEA, 0x40, 0x06, 0xE8, 0xEA, 0x0E, 0xC7, 0x81 + } + }, + { + 372, + 79, + 0x39, + 0x019F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0x9F, 0xE9, 0x00, 0x95, 0xBF, 0x40, 0x54, 0x06, 0x80, 0x54, 0x62, 0xD0, 0x00, 0x52, 0x04, 0x96, 0x6E, 0x40, 0x95, 0xB0, 0x40, 0x54, 0x05, 0x95, 0xCD, 0x40, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0B, 0x0A + } + }, + { + 373, + 79, + 0x39, + 0x01A0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA0, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x02, 0x95, 0x6A, 0x40, 0x54, 0x06, 0x52, 0x05, 0x3B, 0x06, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x13, 0x05, 0x54, 0x08, 0x80, 0x0A, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x13, 0x06, 0x54, 0x08, 0x62, 0xD0, 0x00, 0x52, 0x07, 0x53, 0xE8, 0x50, 0x00, 0x08, 0xD8, 0xA5 + } + }, + { + 374, + 79, + 0x39, + 0x01A1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA1, 0x51, 0xE8, 0x08, 0x52, 0x07, 0x08, 0x95, 0x13, 0x7C, 0x71, 0xF5, 0x52, 0x08, 0x53, 0xE6, 0x50, 0x00, 0x08, 0x51, 0xE6, 0x08, 0x52, 0x08, 0x08, 0x95, 0x01, 0x38, 0xFA, 0x62, 0xD0, 0x00, 0x52, 0x0A, 0x02, 0xE8, 0x62, 0xD0, 0x03, 0x53, 0xDF, 0x52, 0x09, 0x62, 0xD0, 0x00, 0x0A, 0xE9, 0x62, 0xD0, 0x03, 0x53, 0xDE, 0x62, 0xD0, 0x00, 0x96, 0xCB, 0x40, 0x52, 0x01, 0x02, 0xE8, 0x53, 0xE8, 0x90, 0x16 + } + }, + { + 375, + 79, + 0x39, + 0x01A2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA2, 0x50, 0x00, 0x0A, 0xE9, 0x53, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xB1, 0x97, 0x53, 0x40, 0x62, 0xD0, 0x03, 0x51, 0xDE, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x77, 0x04, 0x52, 0x04, 0x3B, 0x02, 0xCE, 0x65, 0x77, 0x00, 0x07, 0x01, 0x03, 0x52, 0x00, 0x3B, 0x03, 0xCE, 0x55, 0x38, 0xF5, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x97, 0xD6, 0xA3 + } + }, + { + 376, + 79, + 0x39, + 0x01A3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA3, 0x08, 0x40, 0x80, 0x95, 0x62, 0xD0, 0x00, 0x94, 0xDC, 0x40, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x94, 0xB1, 0x40, 0x53, 0xE9, 0x47, 0xE9, 0xF0, 0xA0, 0x7D, 0x52, 0x01, 0x95, 0x1C, 0x40, 0x95, 0xCD, 0x40, 0x06, 0xE6, 0xB8, 0x0E, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0xDE, 0x40, 0x52, 0x01, 0x95, 0x47, 0x40, 0x95, 0xB4, 0x40, 0x06, 0xE6, 0xB4, 0x0E, 0xE7, 0x7E, 0xF4 + } + }, + { + 377, + 79, + 0x39, + 0x01A4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA4, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0xC5, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0xD5, 0xA3 + } + }, + { + 378, + 79, + 0x39, + 0x01A5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA5, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCE, 0x0E, 0xE9, 0x02, 0x96, 0x86, 0x40, 0x77, 0x01, 0x77, 0x00, 0x96, 0x67, 0x40, 0xCF, 0x68, 0x96, 0x6A, 0x40, 0x81, 0x15, 0x62, 0xD0, 0x00, 0x94, 0x3E, 0x40, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x03, 0x94, 0x13, 0x40, 0x53, 0xE9, 0x47, 0xE9, 0x0F, 0xA0, 0xFD, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0x51, 0x9C + } + }, + { + 379, + 79, + 0x39, + 0x01A6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA6, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC3, 0x0E, 0xE9, 0x02, 0x94, 0xEF, 0x40, 0xA8, 0x4B + } + }, + { + 380, + 79, + 0x39, + 0x01A7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA7, 0x06, 0xE6, 0xB0, 0x0E, 0xE7, 0x03, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x96, 0x00, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0x20, 0x3C + } + }, + { + 381, + 79, + 0x39, + 0x01A8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA8, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC4, 0x0E, 0xE9, 0x02, 0x94, 0x96, 0x40, 0x06, 0xE6, 0xEA, 0x0E, 0xE7, 0x00, 0x51, 0xE7, 0x60, 0xD4, 0x3E, 0xE6, 0x53, 0xE7, 0x95, 0xA7, 0x40, 0x52, 0x01, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0x6A, 0xD1 + } + }, + { + 382, + 79, + 0x39, + 0x01A9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xA9, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xC5, 0x0E, 0xE9, 0x02, 0x95, 0x68, 0x40, 0x77, 0x01, 0x77, 0x00, 0x97, 0x39, 0x40, 0xCE, 0xE8, 0x38, 0xFE, 0x20, 0x7F, 0x10, 0x0B, 0x14 + } + }, + { + 383, + 79, + 0x39, + 0x01AA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAA, 0x4F, 0x38, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x05, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x05, 0x96, 0x38, 0x40, 0x62, 0xD0, 0x00, 0x94, 0x6F, 0x40, 0x06, 0xE8, 0x5E, 0x0E, 0xE9, 0x0F, 0x94, 0xAA, 0x40, 0x54, 0x04, 0x56, 0x03, 0x00, 0x56, 0x01, 0x00, 0x97, 0xE4, 0x40, 0x80, 0xCB, 0xEE, 0xDB + } + }, + { + 384, + 79, + 0x39, + 0x01AB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAB, 0x62, 0xD0, 0x03, 0x55, 0xDF, 0x00, 0x55, 0xDE, 0x00, 0x56, 0x06, 0x00, 0x80, 0x34, 0x96, 0xCE, 0x40, 0x52, 0x01, 0x94, 0x2B, 0x40, 0x10, 0x57, 0x03, 0x7C, 0x4A, 0xBC, 0x20, 0x03, 0x06, 0x54, 0x00, 0x92, 0xC2, 0x40, 0x65, 0xE8, 0x6B, 0xE9, 0x06, 0xE8, 0xB1, 0x0E, 0xE9, 0x02, 0x92, 0x93, 0x40, 0x53, 0xE9, 0x3E, 0xE8, 0x62, 0xD0, 0x03, 0x04, 0xDF, 0x95, 0xA5, 0x40, 0x0C, 0xDE, 0x77, 0x92, 0x24 + } + }, + { + 385, + 79, + 0x39, + 0x01AC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAC, 0x06, 0x52, 0x06, 0x3B, 0x02, 0xCF, 0xC8, 0x95, 0x83, 0x40, 0xD0, 0x7A, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x08, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x53, 0xB1, 0x18, 0x53, 0xB2, 0x56, 0x06, 0x00, 0x80, 0x5F, 0x96, 0x7B, 0x40, 0x52, 0x01, 0x93, 0xD8, 0x40, 0x54, 0x00, 0x3D, 0xFC, 0x00, 0xB0, 0x42, 0x52, 0x00, 0x92, 0x56, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x92, 0x46, 0x40, 0x53, 0xE9, 0x64, 0xC9 + } + }, + { + 386, + 79, + 0x39, + 0x01AD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAD, 0x96, 0x67, 0x40, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x92, 0x9F, 0x40, 0x52, 0x00, 0x92, 0x3A, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x92, 0x2A, 0x40, 0x93, 0x85, 0x40, 0x52, 0x06, 0x93, 0x55, 0x40, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x92, 0x80, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0x80, 0x0D, 0x96, 0x2B, 0x40, 0x06, 0xE8, 0xFD, 0x0E, 0xE9, 0x02, 0x94, 0x5D, 0x40, 0x77, 0x06, 0x52, 0x35, 0x6C + } + }, + { + 387, + 79, + 0x39, + 0x01AE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAE, 0x06, 0x3B, 0x02, 0xCF, 0x9D, 0x77, 0x03, 0x07, 0x01, 0x03, 0x52, 0x03, 0x3B, 0x04, 0xCF, 0x31, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x07, 0x3D, 0xFC, 0x00, 0xB0, 0x11, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x54, 0x01, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x54, 0x02, 0x80, 0x0D, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0xFC, 0x54, 0x01, 0x95, 0x23, 0x40, 0x62, 0xD0, 0x00, 0x93, 0x69, 0x40, 0x06, 0x99, 0x35 + } + }, + { + 388, + 79, + 0x39, + 0x01AF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xAF, 0xE8, 0x5E, 0x0E, 0xE9, 0x0F, 0x93, 0x95, 0x40, 0x54, 0x04, 0x56, 0x03, 0x00, 0x56, 0x00, 0x00, 0x96, 0xCF, 0x40, 0x81, 0x6E, 0x62, 0xD0, 0x03, 0x55, 0xDF, 0x00, 0x55, 0xDE, 0x00, 0x56, 0x05, 0x00, 0x80, 0x39, 0x62, 0xD0, 0x00, 0x93, 0x30, 0x40, 0x52, 0x00, 0x93, 0x13, 0x40, 0x53, 0xE9, 0x10, 0x52, 0x05, 0x57, 0x03, 0x7C, 0x4A, 0xBC, 0x20, 0x02, 0xE9, 0x54, 0x06, 0x52, 0x06, 0x91, 0x39, 0x76 + } + }, + { + 389, + 79, + 0x39, + 0x01B0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB0, 0xD8, 0x40, 0x06, 0xE8, 0xB1, 0x0E, 0xE9, 0x02, 0x91, 0x79, 0x40, 0x53, 0xE9, 0x3E, 0xE8, 0x62, 0xD0, 0x03, 0x04, 0xDF, 0x94, 0x8B, 0x40, 0x0C, 0xDE, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x01, 0xCF, 0xC3, 0x94, 0x69, 0x40, 0xD1, 0x18, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x08, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x53, 0xB1, 0x18, 0x53, 0xB2, 0x56, 0x05, 0x00, 0x80, 0x2A, 0x3D, 0xFC, 0x00, 0xB0, 0x13, 0x78, 0xF5 + } + }, + { + 390, + 79, + 0x39, + 0x01B1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB1, 0x62, 0xD0, 0x00, 0x92, 0xD3, 0x40, 0x06, 0xE8, 0xD0, 0x93, 0x09, 0x40, 0x50, 0x00, 0x3F, 0xE8, 0x80, 0x11, 0x62, 0xD0, 0x00, 0x92, 0xC1, 0x40, 0x06, 0xE8, 0xFD, 0x93, 0x85, 0x40, 0x50, 0xFF, 0x3F, 0xE8, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x02, 0xCF, 0xD2, 0x56, 0x05, 0x00, 0x80, 0x66, 0x62, 0xD0, 0x00, 0x92, 0xA4, 0x40, 0x52, 0x00, 0x92, 0x87, 0x40, 0x54, 0x06, 0x3D, 0xFC, 0x00, 0xB0, 0x7F, 0x04 + } + }, + { + 391, + 79, + 0x39, + 0x01B2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB2, 0x42, 0x52, 0x05, 0x91, 0x05, 0x40, 0x06, 0xE8, 0xE0, 0x0E, 0xE9, 0x01, 0x90, 0xF5, 0x40, 0x53, 0xE9, 0x95, 0x16, 0x40, 0x06, 0xE6, 0xD0, 0x0E, 0xE7, 0x03, 0x91, 0x4E, 0x40, 0x52, 0x05, 0x90, 0xE9, 0x40, 0x06, 0xE8, 0xE1, 0x0E, 0xE9, 0x01, 0x90, 0xD9, 0x40, 0x92, 0x34, 0x40, 0x52, 0x06, 0x92, 0x04, 0x40, 0x06, 0xE6, 0xD5, 0x0E, 0xE7, 0x02, 0x91, 0x2F, 0x40, 0x51, 0xE8, 0x3F, 0xE6, 0xBE, 0x83 + } + }, + { + 392, + 79, + 0x39, + 0x01B3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB3, 0x80, 0x11, 0x62, 0xD0, 0x00, 0x92, 0x51, 0x40, 0x06, 0xE8, 0xFD, 0x93, 0x15, 0x40, 0x52, 0x06, 0x3F, 0xE8, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x01, 0xCF, 0x96, 0x3D, 0xFC, 0x00, 0xB0, 0x5F, 0x62, 0xD0, 0x04, 0x51, 0xE2, 0x62, 0xD0, 0x04, 0x53, 0xE3, 0x56, 0x05, 0x00, 0x80, 0x4A, 0x62, 0xD0, 0x00, 0x92, 0x25, 0x40, 0x06, 0xE8, 0xD0, 0x0E, 0xE9, 0x03, 0x90, 0x87, 0x40, 0x39, 0x00, 0xB0, 0x0F, 0x26 + } + }, + { + 393, + 79, + 0x39, + 0x01B4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB4, 0x35, 0x92, 0x15, 0x40, 0x06, 0xE8, 0xD0, 0x92, 0x4B, 0x40, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x62, 0xD0, 0x00, 0x3F, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x08, 0x7C, 0x66, 0x95, 0x38, 0xFF, 0x62, 0xD0, 0x04, 0x53, 0xE3, 0x62, 0xD0, 0x00, 0x52, 0x05, 0x90, 0xAE, 0x40, 0x06, 0xE8, 0xD5, 0x92, 0xB1, 0x40, 0x95, 0x66, 0x40, 0x77, 0x05, 0x52, 0x05, 0x3B, 0x02, 0xCF, 0xB2, 0x77, 0x03, 0x07, 0xE0, 0xC9 + } + }, + { + 394, + 79, + 0x39, + 0x01B5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB5, 0x00, 0x03, 0x52, 0x03, 0x3B, 0x04, 0xCE, 0x8E, 0x3D, 0xFC, 0x00, 0xB0, 0x0B, 0x62, 0xD0, 0x04, 0x51, 0xE3, 0x62, 0xD0, 0x04, 0x53, 0xE2, 0x38, 0xF9, 0x20, 0x7F, 0x10, 0x4F, 0x38, 0x02, 0x92, 0x68, 0x40, 0x48, 0xFC, 0x01, 0xA0, 0x09, 0x52, 0xFB, 0x05, 0x01, 0x52, 0xFA, 0x0D, 0x00, 0x66, 0xFB, 0x6C, 0xFA, 0x70, 0xFB, 0x6F, 0xFC, 0x3D, 0xFC, 0x00, 0xBF, 0xE7, 0x93, 0xB0, 0x40, 0x38, 0x30, 0x6A + } + }, + { + 395, + 79, + 0x39, + 0x01B6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB6, 0xFE, 0x20, 0x7F, 0x51, 0xE9, 0x60, 0xD4, 0x3E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x65, 0xE8, 0x6B, 0xE9, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x52, 0x00, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0x4E, 0xA7 + } + }, + { + 396, + 79, + 0x39, + 0x01B7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB7, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x65, 0xE8, 0x6B, 0xE9, 0x7F, 0x60, 0xD4, 0x3E, 0xE8, 0x53, 0xE9, 0x7F, 0x51, 0xE7, 0x60, 0xD5, 0x51, 0xE9, 0x3F, 0xE6, 0x7F, 0x70, 0xFB, 0x6E, 0xE9, 0x6E, 0xE8, 0x7F, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x55, 0x4C, 0xA4 + } + }, + { + 397, + 79, + 0x39, + 0x01B8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB8, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCC, 0x0E, 0xE9, 0x02, 0x7F, 0x53, 0xE8, 0xA8, 0x5D + } + }, + { + 398, + 79, + 0x39, + 0x01B9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xB9, 0x55, 0xE9, 0x00, 0x55, 0xE6, 0x03, 0x55, 0xE7, 0x00, 0x55, 0xE1, 0x00, 0x55, 0xE0, 0x00, 0x3C, 0xE7, 0x00, 0xB0, 0x06, 0x3C, 0xE6, 0x00, 0xA0, 0x1A, 0x70, 0xFB, 0x6E, 0xE7, 0x6E, 0xE6, 0xD0, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x04, 0xE1, 0x51, 0xE9, 0x0C, 0xE0, 0x65, 0xE8, 0x6B, 0xE9, 0x8F, 0xDE, 0x5F, 0xE8, 0xE1, 0x5F, 0xE9, 0xE0, 0x62, 0xD0, 0x00, 0x06, 0xE8, 0xCD, 0x0E, 0xE9, 0x80, 0x0E + } + }, + { + 399, + 79, + 0x39, + 0x01BA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBA, 0x02, 0x7F, 0x62, 0xD0, 0x01, 0x51, 0xE7, 0x08, 0x51, 0xE6, 0x62, 0xD0, 0x03, 0x53, 0xEC, 0x18, 0x53, 0xED, 0x62, 0xD0, 0x01, 0x51, 0xE5, 0x08, 0x51, 0xE4, 0x62, 0xD0, 0x03, 0x53, 0xEA, 0x18, 0x53, 0xEB, 0x7F, 0x53, 0xE8, 0x52, 0x01, 0x09, 0x00, 0x60, 0xD4, 0x3E, 0xE8, 0x7F, 0x52, 0x00, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x65, 0xE6, 0x6B, 0xE7, 0x7F, 0x2B, 0x65 + } + }, + { + 400, + 79, + 0x39, + 0x01BB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBB, 0x62, 0xD0, 0x01, 0x51, 0xEF, 0x08, 0x51, 0xEE, 0x62, 0xD0, 0x03, 0x53, 0xE8, 0x18, 0x53, 0xE9, 0x62, 0xD0, 0x01, 0x51, 0xED, 0x08, 0x51, 0xEC, 0x62, 0xD0, 0x03, 0x53, 0xE6, 0x18, 0x53, 0xE7, 0x7F, 0x53, 0xE9, 0x3E, 0xE8, 0x53, 0xE8, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xAF, 0x29, 0x01, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x51, 0xAF, 0x29, 0x08, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x67, 0xDE + } + }, + { + 401, + 79, + 0x39, + 0x01BC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBC, 0x02, 0xE8, 0x53, 0xE8, 0x50, 0x00, 0x0A, 0xE9, 0x53, 0xE9, 0x06, 0xE8, 0x62, 0x0E, 0xE9, 0x0F, 0x51, 0xE9, 0x10, 0x58, 0xE8, 0x28, 0x20, 0x7F, 0x52, 0x05, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x53, 0xAF, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x52, 0x02, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x99, 0x21, 0x70, 0x54, 0x00, 0x3D, 0x00, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x11, 0x33 + } + }, + { + 402, + 79, + 0x39, + 0x01BD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBD, 0x99, 0x21, 0x70, 0x7F, 0x52, 0x04, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x02, 0x53, 0xE8, 0x7F, 0x0E, 0xE9, 0x03, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x51, 0xE9, 0x10, 0x58, 0xE8, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x00, 0x26, 0xAF, 0xF7, 0x51, 0xAF, 0x60, 0x04, 0x26, 0xAF, 0xFE, 0x51, 0xAF, 0x60, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x67, 0xE0 + } + }, + { + 403, + 79, + 0x39, + 0x01BE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBE, 0x04, 0x53, 0x9D, 0x18, 0x53, 0x9E, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF7, 0x08, 0x51, 0xF6, 0x62, 0xD0, 0x03, 0x53, 0xBC, 0x18, 0x53, 0xBD, 0x62, 0xD0, 0x03, 0x51, 0xF5, 0x08, 0x51, 0xF4, 0x62, 0xD0, 0x03, 0x53, 0xBE, 0x18, 0x53, 0xBF, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0x9F, 0x18, 0x53, 0xA0, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0xFC, 0x53, 0xE8, 0x85, 0x1D + } + }, + { + 404, + 79, + 0x39, + 0x01BF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xBF, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x00, 0x3A, 0x0E, 0x7F, 0x56, 0x01, 0x00, 0x56, 0x00, 0x00, 0x7F, 0x51, 0xE9, 0x60, 0xD5, 0x51, 0xE7, 0x3F, 0xE8, 0x7F, 0x51, 0xE9, 0x60, 0xD5, 0x52, 0x00, 0x3F, 0xE8, 0x7F, 0x0E, 0xE9, 0x02, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x62, 0xD0, 0x03, 0x52, 0x01, 0x53, 0xE5, 0x52, 0x00, 0x53, 0xE4, 0x7F, 0x52, 0x02, 0x53, 0xE8, 0x52, 0x01, 0x60, 0xD4, 0x3E, 0xE8, 0x95, 0x3E + } + }, + { + 405, + 79, + 0x39, + 0x01C0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC0, 0x7F, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x65, 0xE8, 0x6B, 0xE9, 0x51, 0xE8, 0x7F, 0x65, 0xE6, 0x6B, 0xE7, 0x65, 0xE6, 0x6B, 0xE7, 0x7F, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD4, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x7F, 0x53, 0xE9, 0x62, 0xD0, 0x04, 0x51, 0xD9, 0x62, 0xD0, 0x00, 0x3A, 0xE9, 0x2E, 0x71 + } + }, + { + 406, + 79, + 0x39, + 0x01C1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC1, 0x7F, 0x62, 0xD0, 0x03, 0x55, 0xEF, 0x00, 0x55, 0xEE, 0x00, 0x7F, 0x56, 0x01, 0x00, 0x80, 0x03, 0x77, 0x01, 0x3D, 0x01, 0x0A, 0xCF, 0xFA, 0x62, 0xD0, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCF, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x08, 0x57, 0x89, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x53, 0xE8, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xAA, 0x62, 0xD0, 0x00, 0x12, 0xA0, 0x56 + } + }, + { + 407, + 79, + 0x39, + 0x01C2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC2, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xA9, 0x62, 0xD0, 0x00, 0x1A, 0xE9, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xDF, 0x62, 0xD0, 0x04, 0x12, 0xB2, 0x62, 0xD0, 0x03, 0x51, 0xDE, 0x62, 0xD0, 0x04, 0x1A, 0xB1, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE9, 0x62, 0xD0, 0x03, 0x7F, 0x70, 0xFE, 0x62, 0xD0, 0x03, 0x51, 0xD9, 0x08, 0x51, 0xD8, 0x62, 0xD0, 0x00, 0x53, 0x0A, 0x18, 0x53, 0x0B, 0x71, 0x01, 0x7F, 0x53, 0x76, 0x03 + } + }, + { + 408, + 79, + 0x39, + 0x01C3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC3, 0xE6, 0x55, 0xE7, 0x00, 0x51, 0xE8, 0x12, 0xE6, 0x51, 0xE9, 0x1A, 0xE7, 0x7F, 0x60, 0xD4, 0x3E, 0xE8, 0x54, 0x03, 0x7F, 0x70, 0xFB, 0x6F, 0x01, 0x6F, 0x02, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x13, 0xFC, 0x62, 0xD0, 0x00, 0x54, 0x02, 0x7F, 0x70, 0xFE, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x7F, 0x52, 0xFB, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x5D, 0xC8, 0x62, 0xD0, 0x00, 0x39, 0x00, 0x7F, 0x80, 0x18 + } + }, + { + 409, + 79, + 0x39, + 0x01C4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC4, 0x52, 0x03, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x0E, 0xE9, 0x01, 0x51, 0xE9, 0x60, 0xD5, 0x7F, 0x71, 0x10, 0x5D, 0xE0, 0x54, 0x01, 0x41, 0xE0, 0xE7, 0x43, 0xE0, 0x18, 0x70, 0xCF, 0x62, 0xE3, 0x38, 0x7F, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xAD, 0x18, 0x53, 0xAE, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x53, 0xE8, 0x52, 0x00, 0x53, 0xE9, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA8, 0x08, 0xD8, 0xC9 + } + }, + { + 410, + 79, + 0x39, + 0x01C5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC5, 0x51, 0xA7, 0x62, 0xD0, 0x03, 0x53, 0xEE, 0x18, 0x53, 0xEF, 0x7F, 0x52, 0x05, 0x54, 0x03, 0x52, 0x02, 0x54, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0D, 0x08, 0x51, 0x0C, 0x62, 0xD0, 0x03, 0x53, 0xD4, 0x18, 0x53, 0xD5, 0x7F, 0x5D, 0xD6, 0x53, 0xE9, 0x2E, 0xE9, 0xFE, 0x51, 0xE9, 0x54, 0x02, 0x43, 0xD6, 0x01, 0x52, 0xFC, 0x7F, 0x53, 0xE8, 0x52, 0xFB, 0x09, 0x00, 0x60, 0xD5, 0x7F, 0x62, 0xD2, 0xBE + } + }, + { + 411, + 79, + 0x39, + 0x01C6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC6, 0xD0, 0x00, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x7F, 0x62, 0xD0, 0x04, 0x53, 0xCE, 0x62, 0xD0, 0x04, 0x51, 0xE0, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x62, 0xD0, 0x00, 0x3A, 0x0E, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x06, 0x53, 0xE8, 0x55, 0xE9, 0x00, 0x7F, 0x52, 0x06, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xE1, 0x7F, 0x52, 0x02, 0x03, 0x02, 0x54, 0x00, 0x07, 0x01, 0x1D + } + }, + { + 412, + 79, + 0x39, + 0x01C7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC7, 0x00, 0x2E, 0x7F, 0x51, 0xE8, 0x08, 0x51, 0xE9, 0x62, 0xD0, 0x04, 0x53, 0xA1, 0x18, 0x53, 0xA2, 0x7F, 0x12, 0xE8, 0x50, 0x00, 0x1A, 0xE9, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x9D, 0x62, 0xD0, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xE1, 0x13, 0x01, 0x62, 0xD0, 0x00, 0x54, 0x00, 0x7F, 0x55, 0xDC, 0x00, 0x62, 0xD0, 0x04, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x0A, 0x51, 0xE9, 0x54, 0x09, 0x45, 0xA6 + } + }, + { + 413, + 79, + 0x39, + 0x01C8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC8, 0x7F, 0x08, 0x57, 0x98, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x7F, 0x08, 0x57, 0x91, 0x28, 0x53, 0xE9, 0x18, 0x75, 0x09, 0x00, 0x28, 0x7F, 0x62, 0xD0, 0x00, 0x55, 0x0B, 0x01, 0x55, 0x0A, 0x00, 0x71, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0xE8, 0x54, 0x01, 0x51, 0xE9, 0x54, 0x00, 0x7F, 0x52, 0x04, 0x53, 0xE6, 0x55, 0xE7, 0x00, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x88, 0x28, 0x20, 0x36, 0x89 + } + }, + { + 414, + 79, + 0x39, + 0x01C9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xC9, 0x7F, 0x62, 0xD0, 0x04, 0x52, 0x00, 0x3A, 0xC0, 0x7F, 0x62, 0xD0, 0x00, 0x53, 0xE9, 0x51, 0xE8, 0x7F, 0x51, 0xAF, 0x60, 0x04, 0x62, 0xD0, 0x00, 0x7F, 0x52, 0xFC, 0x62, 0xD0, 0x04, 0x53, 0xD7, 0x56, 0xFC, 0x00, 0x7F, 0x62, 0xD0, 0x00, 0x50, 0x0D, 0x10, 0x57, 0x87, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA8, 0x62, 0xD0, 0x03, 0x12, 0xEF, 0x7F, 0x62, 0xD0, 0x04, 0x51, 0xA7, 0x62, 0xF2, 0x02 + } + }, + { + 415, + 79, + 0x39, + 0x01CA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCA, 0xD0, 0x03, 0x1A, 0xEE, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD1, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD6, 0x00, 0x7F, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x70, 0x7F, 0x50, 0x00, 0x3F, 0xE8, 0x3F, 0xE8, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xB2, 0xFF, 0x55, 0xB1, 0xFF, 0x7F, 0x56, 0x01, 0x00, 0x56, 0x02, 0x00, 0x7F, 0x71, 0x10, 0x60, 0xE0, 0x70, 0xCF, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF6, 0x62, 0xD0, 0x51, 0xC1 + } + }, + { + 416, + 79, + 0x39, + 0x01CB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCB, 0x03, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0xF7, 0x62, 0xD0, 0x03, 0x7F, 0x71, 0x10, 0x43, 0xD7, 0x20, 0x43, 0xE0, 0x40, 0x7F, 0x52, 0xFA, 0x13, 0xF6, 0x52, 0xF9, 0x1B, 0xF5, 0x7F, 0x62, 0xD0, 0x00, 0x51, 0x0E, 0x62, 0xD0, 0x04, 0x7F, 0x3F, 0xE8, 0x62, 0xD0, 0x04, 0x51, 0xB6, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x86, 0x28, 0x20, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x9A, 0x28, 0x20, 0x7F, 0x62, 0xD0, 0x10, 0x40 + } + }, + { + 417, + 79, + 0x39, + 0x01CC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCC, 0x00, 0x52, 0x00, 0x13, 0x01, 0x7F, 0x62, 0xD0, 0x00, 0x52, 0x01, 0x13, 0x00, 0x7F, 0x50, 0x0D, 0x10, 0x57, 0x9B, 0x28, 0x20, 0x7F, 0x60, 0x0C, 0x62, 0xD0, 0x00, 0x51, 0xB2, 0x7F, 0x60, 0x08, 0x62, 0xD0, 0x00, 0x51, 0xB1, 0x7F, 0x62, 0xD0, 0x03, 0x51, 0x9B, 0x21, 0x0F, 0x7F, 0x62, 0xD0, 0x03, 0x47, 0x99, 0x04, 0x7F, 0x62, 0xD0, 0x04, 0x3C, 0xCD, 0x02, 0x7F, 0x06, 0xE8, 0x01, 0x0E, 0x82, 0x25 + } + }, + { + 418, + 79, + 0x39, + 0x01CD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCD, 0xE9, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCC, 0x00, 0x7F, 0x13, 0x03, 0x51, 0xE9, 0x1B, 0x02, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD7, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD9, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xD5, 0x00, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xDD, 0x01, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xCB, 0xF4, 0x7F, 0x62, 0xD0, 0x04, 0x55, 0xB4, 0x00, 0x7F, 0x41, 0xD7, 0xDF, 0x41, 0xE0, 0x54, 0xCA + } + }, + { + 419, + 79, + 0x39, + 0x01CE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCE, 0xBF, 0x7F, 0x41, 0xE0, 0xEF, 0x62, 0xDA, 0xEF, 0x7F, 0x41, 0xE0, 0x7F, 0x62, 0xDA, 0x7F, 0x7F, 0x62, 0xD0, 0x00, 0x3C, 0xBA, 0x00, 0x7F, 0x00, 0xBF, 0x00, 0x20, 0x00, 0xEA, 0x00, 0x06, 0x01, 0x00, 0x00, 0xA0, 0x02, 0xB1, 0x00, 0x4F, 0x03, 0x99, 0x00, 0x47, 0x03, 0xE0, 0x01, 0x0D, 0x03, 0xE1, 0x00, 0x1F, 0x04, 0x99, 0x00, 0x49, 0x04, 0xE2, 0x02, 0x01, 0x00, 0xFF, 0x00, 0x30, 0x30, 0xF0, 0x03 + } + }, + { + 420, + 79, + 0x39, + 0x01CF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xCF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x24 + } + }, + { + 421, + 79, + 0x39, + 0x01D0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x25 + } + }, + { + 422, + 79, + 0x39, + 0x01D1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x26 + } + }, + { + 423, + 79, + 0x39, + 0x01D2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x27 + } + }, + { + 424, + 79, + 0x39, + 0x01D3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x28 + } + }, + { + 425, + 79, + 0x39, + 0x01D4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x29 + } + }, + { + 426, + 79, + 0x39, + 0x01D5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2A + } + }, + { + 427, + 79, + 0x39, + 0x01D6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2B + } + }, + { + 428, + 79, + 0x39, + 0x01D7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2C + } + }, + { + 429, + 79, + 0x39, + 0x01D8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2D + } + }, + { + 430, + 79, + 0x39, + 0x01D9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xD9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2E + } + }, + { + 431, + 79, + 0x39, + 0x01DA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x2F + } + }, + { + 432, + 79, + 0x39, + 0x01DB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x30 + } + }, + { + 433, + 79, + 0x39, + 0x01DC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x31 + } + }, + { + 434, + 79, + 0x39, + 0x01DD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDD, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x32 + } + }, + { + 435, + 79, + 0x39, + 0x01DE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x33 + } + }, + { + 436, + 79, + 0x39, + 0x01DF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xDF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x34 + } + }, + { + 437, + 79, + 0x39, + 0x01E0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x35 + } + }, + { + 438, + 79, + 0x39, + 0x01E1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x36 + } + }, + { + 439, + 79, + 0x39, + 0x01E2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x37 + } + }, + { + 440, + 79, + 0x39, + 0x01E3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x38 + } + }, + { + 441, + 79, + 0x39, + 0x01E4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x39 + } + }, + { + 442, + 79, + 0x39, + 0x01E5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3A + } + }, + { + 443, + 79, + 0x39, + 0x01E6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3B + } + }, + { + 444, + 79, + 0x39, + 0x01E7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3C + } + }, + { + 445, + 79, + 0x39, + 0x01E8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3D + } + }, + { + 446, + 79, + 0x39, + 0x01E9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xE9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3E + } + }, + { + 447, + 79, + 0x39, + 0x01EA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x3F + } + }, + { + 448, + 79, + 0x39, + 0x01EB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x40 + } + }, + { + 449, + 79, + 0x39, + 0x01EC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x41 + } + }, + { + 450, + 79, + 0x39, + 0x01ED, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xED, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x42 + } + }, + { + 451, + 79, + 0x39, + 0x01EE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x43 + } + }, + { + 452, + 79, + 0x39, + 0x01EF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xEF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x44 + } + }, + { + 453, + 79, + 0x39, + 0x01F0, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF0, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x45 + } + }, + { + 454, + 79, + 0x39, + 0x01F1, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF1, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x46 + } + }, + { + 455, + 79, + 0x39, + 0x01F2, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x47 + } + }, + { + 456, + 79, + 0x39, + 0x01F3, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF3, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x48 + } + }, + { + 457, + 79, + 0x39, + 0x01F4, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF4, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x49 + } + }, + { + 458, + 79, + 0x39, + 0x01F5, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4A + } + }, + { + 459, + 79, + 0x39, + 0x01F6, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF6, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4B + } + }, + { + 460, + 79, + 0x39, + 0x01F7, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF7, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4C + } + }, + { + 461, + 79, + 0x39, + 0x01F8, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF8, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4D + } + }, + { + 462, + 79, + 0x39, + 0x01F9, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xF9, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4E + } + }, + { + 463, + 79, + 0x39, + 0x01FA, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFA, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x4F + } + }, + { + 464, + 79, + 0x39, + 0x01FB, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFB, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x50 + } + }, + { + 465, + 79, + 0x39, + 0x01FC, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFC, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x51 + } + }, + { + 466, + 79, + 0x39, + 0x01FD, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFD, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x52 + } + }, + { + 467, + 79, + 0x39, + 0x01FE, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFE, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x53 + } + }, + { + 468, + 79, + 0x39, + 0x01FF, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x01, 0xFF, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x54 + } + }, + { + 469, + 79, + 0x39, + 0x001E, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x1E, 0x19, 0xE5, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x01, 0x0B, 0x10, 0x12, 0xA0, 0x02, 0x04, 0x00, 0xC0, 0xC1, 0xC2, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xA5, 0xBC + } + }, + { + 470, + 79, + 0x39, + 0x001F, + { + 0x00, 0xFF, 0x39, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x1F, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xA0, 0x07, 0x5F, 0xF8, 0x3E, 0xEF + } + }, + { + 471, + 11, + 0x3B, + -1, + { + 0x00, 0xFF, 0x3B, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 + } + }, +}; + +unsigned short cyttsp_fw_records = 472; + +unsigned char cyttsp_fw_tts_verh = 0x10; +unsigned char cyttsp_fw_tts_verl = 0x12; +unsigned char cyttsp_fw_app_idh = 0xA0; +unsigned char cyttsp_fw_app_idl = 0x02; +unsigned char cyttsp_fw_app_verh = 0x04; +unsigned char cyttsp_fw_app_verl = 0x00; +unsigned char cyttsp_fw_cid_0 = 0xC0; +unsigned char cyttsp_fw_cid_1 = 0xC1; +unsigned char cyttsp_fw_cid_2 = 0xC2; diff --git a/drivers/input/touchscreen/elan8232_i2c.c b/drivers/input/touchscreen/elan8232_i2c.c new file mode 100644 index 0000000000000..4870b22b11539 --- /dev/null +++ b/drivers/input/touchscreen/elan8232_i2c.c @@ -0,0 +1,829 @@ +/* + * Copyright (C) 2007-2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static const char EKT8232NAME[] = "elan-touch"; + +#define ELAN_TS_FUZZ 0 +#define ELAN_TS_FLAT 0 +#define IDX_PACKET_SIZE 9 + +enum { + STATE_DEEP_SLEEP = 0, + STATE_NORMAL = 1U, + STATE_MASK = 0x08, + cmd_reponse_packet = 0x52, + read_cmd_packet = 0x53, + write_cmd_packet = 0x54, + hello_packet = 0x55, + enable_int = 0xa6, + disable_int = 0x56, + idx_coordinate_packet = 0x5a, +}; + +enum { + idx_finger_width = 7, + idx_finger_state = 8, +}; + +static struct workqueue_struct *elan_wq; + +static struct ekt8232_data { + int intr_gpio; + int use_irq; + /* delete when finish migration */ + int fw_ver; + struct hrtimer timer; + struct work_struct work; + struct i2c_client *client; + struct input_dev *input; + wait_queue_head_t wait; + int (*power)(int on); + struct early_suspend early_suspend; +} ekt_data; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void elan_ts_early_suspend(struct early_suspend *h); +static void elan_ts_late_resume(struct early_suspend *h); +#endif + +static ssize_t touch_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s_%#x\n", EKT8232NAME, ekt_data.fw_ver); + ret = strlen(buf) + 1; + + return ret; +} + +static DEVICE_ATTR(vendor, 0444, touch_vendor_show, NULL); + +static struct kobject *android_touch_kobj; + +static int touch_sysfs_init(void) +{ + int ret ; + + android_touch_kobj = kobject_create_and_add("android_touch", NULL) ; + if (android_touch_kobj == NULL) { + printk(KERN_INFO + "touch_sysfs_init: subsystem_register failed\n"); + ret = -ENOMEM; + goto err; + } + + ret = sysfs_create_file(android_touch_kobj, &dev_attr_vendor.attr); + if (ret) { + printk(KERN_INFO + "touch_sysfs_init: sysfs_create_group failed\n"); + goto err4; + } + + return 0 ; +err4: + kobject_del(android_touch_kobj); +err: + return ret ; +} + +static int ekt8232_detect_int_level(void) +{ + unsigned v; + v = gpio_get_value(ekt_data.intr_gpio); + /* printk("ekt8232_detect_int_level: v = %0x\n", v); */ + return v; +} + +static int __ekt8232_poll(struct i2c_client *client) +{ + int status = 0, retry = 10; + + do { + status = ekt8232_detect_int_level(); + dev_dbg(&client->dev, "%s: status = %d\n", __func__, status); + retry--; + mdelay(20); + } while (status == 1 && retry > 0); + + dev_dbg(&client->dev, "%s: poll interrupt status %s\n", + __func__, status == 1 ? "high" : "low"); + return (status == 0 ? 0 : -ETIMEDOUT); +} + +static int ekt8232_poll(struct i2c_client *client) +{ + return __ekt8232_poll(client); +} + +static int ekt8232_get_data(struct i2c_client *client, uint8_t *cmd, + uint8_t *buf, size_t size, int sleep) +{ + int rc; + unsigned time_out = msecs_to_jiffies(10); + + dev_dbg(&client->dev, "%s: enter.\n", __func__); + + if (buf == NULL) + return -EINVAL; + + if ((i2c_master_send(client, cmd, 4)) != 4) { + dev_err(&client->dev, + "%s: i2c_master_send failed\n", __func__); + return -EINVAL; + } + + if (sleep == 1) { + rc = wait_event_timeout(ekt_data.wait, + i2c_master_recv(client, buf, size) == size && + buf[0] == cmd_reponse_packet, time_out); + if (rc == 0) { + dev_err(&client->dev, + "%s: i2c_master_recv failed\n", __func__); + return -ETIMEDOUT; + } + } else { + rc = ekt8232_poll(client); + if (rc < 0) + return -EINVAL; + else { + if (i2c_master_recv(client, buf, size) != size || + buf[0] != cmd_reponse_packet) + return -EINVAL; + } + } + + return 0; +} + +static int __hello_packet_handler(struct i2c_client *client) +{ + int rc; + uint8_t buf_recv[4] = { 0 }; + + rc = ekt8232_poll(client); + if (rc < 0) { + dev_err(&client->dev, "%s: failed!\n", __func__); + return -EINVAL; + } + + rc = i2c_master_recv(client, buf_recv, 4); + if (rc != 4) { + dev_err(&client->dev, + "%s: get hello packet failed!, rc = %d\n", + __func__, rc); + return rc; + } else { + int i; + dev_dbg(&client->dev, + "dump hello packet: %0x, %0x, %0x, %0x\n", + buf_recv[0], buf_recv[1], buf_recv[2], buf_recv[3]); + + for (i = 0; i < 4; i++) + if (buf_recv[i] != hello_packet) + return -EINVAL; + } + + return 0; +} + +static int __fw_packet_handler(struct i2c_client *client) +{ + int rc; + int major, minor; + uint8_t cmd[] = { read_cmd_packet, 0x00, 0x00, 0x01 }; + uint8_t buf_recv[4] = { 0 }; + + rc = ekt8232_get_data(client, cmd, buf_recv, 4, 0); + if (rc < 0) + return rc; + + major = ((buf_recv[1] & 0x0f) << 4) | ((buf_recv[2] & 0xf0) >> 4); + minor = ((buf_recv[2] & 0x0f) << 4) | ((buf_recv[3] & 0xf0) >> 4); + + /* delete after migration */ + ekt_data.fw_ver = major << 8 | minor; + + printk(KERN_INFO "%s: firmware version: 0x%x\n", + __func__, ekt_data.fw_ver); + return 0; +} + +static int __set_report_type(struct i2c_client *client) +{ + return 0; +} + +static inline int ekt8232_parse_xy(uint8_t *data, uint16_t *x, uint16_t *y) +{ + *x = (data[0] & 0xf0); + *x <<= 4; + *x |= data[1]; + + *y = (data[0] & 0x0f); + *y <<= 8; + *y |= data[2]; + + return 0; +} + +/* ekt8232_ts_init -- hand shaking with touch panel + * + * 1. recv hello packet + * 2. check its' firmware version + * 3. set up sensitivity, report rate, ... + */ +static int ekt8232_ts_init(struct i2c_client *client) +{ + int rc; + + rc = __hello_packet_handler(client); + if (rc < 0) + goto hand_shake_failed; + dev_dbg(&client->dev, "%s: hello packet got.\n", __func__); + + rc = __fw_packet_handler(client); + if (rc < 0) + goto hand_shake_failed; + dev_dbg(&client->dev, "%s: firmware checking done.\n", __func__); + + rc = __set_report_type(client); + if (rc < 0) + goto hand_shake_failed; + dev_dbg(&client->dev, + "%s: channging operating mode done.\n", __func__); + + if (ekt_data.fw_ver == 0x103) { + uint8_t cmd[4] = {0x5c, 0x10, 0x00, 0x01}; + if ((i2c_master_send(client, cmd, 4)) != 4) { + dev_err(&client->dev, + "%s: set adc failed\n", __func__); + } + cmd[0] = 0x54; + cmd[0] = 0x43; + cmd[0] = 0x00; + cmd[0] = 0x01; + if ((i2c_master_send(client, cmd, 4)) != 4) { + dev_err(&client->dev, + "%s: set gain failed\n", __func__); + } + } + +hand_shake_failed: + return rc; +} + +static int ekt8232_set_power_state(struct i2c_client *client, int state) +{ + uint8_t cmd[] = {write_cmd_packet, 0x50, 0x00, 0x01}; + + dev_dbg(&client->dev, "%s: enter.\n", __func__); + + cmd[1] |= (state << 3); + + dev_dbg(&client->dev, + "dump cmd: %02x, %02x, %02x, %02x\n", + cmd[0], cmd[1], cmd[2], cmd[3]); + + if ((i2c_master_send(client, cmd, sizeof(cmd))) != sizeof(cmd)) { + dev_err(&client->dev, + "%s: i2c_master_send failed\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int ekt8232_get_power_state(struct i2c_client *client) +{ + int rc = 0; + uint8_t cmd[] = { read_cmd_packet, 0x50, 0x00, 0x01 }; + uint8_t buf[4], power_state; + + rc = ekt8232_get_data(client, cmd, buf, 4, 0); + if (rc) + return rc; + else { + power_state = buf[1]; + dev_dbg(&client->dev, "dump repsponse: %0x\n", power_state); + + power_state = (power_state & STATE_MASK) >> 3; + dev_dbg(&client->dev, "power state = %s\n", + power_state == STATE_DEEP_SLEEP ? + "Deep Sleep" : "Normal/Idle"); + return power_state; + } +} + +static int ekt8232_recv_data(struct i2c_client *client, uint8_t *buf) +{ + int rc, bytes_to_recv = IDX_PACKET_SIZE; + int retry = 5; + + if (ekt_data.fw_ver == 0x101) + bytes_to_recv = 8; + + if (buf == NULL) + return -EINVAL; + + memset(buf, 0, bytes_to_recv); + rc = i2c_master_recv(client, buf, bytes_to_recv); + + if (rc != bytes_to_recv) { + dev_err(&client->dev, + "%s: i2c_master_recv error?! \n", __func__); + /* power off level shift */ + ekt_data.power(0); + msleep(5); + /* power on level shift */ + ekt_data.power(1); + /* re-initial */ + if (ekt_data.fw_ver > 0x101) { + msleep(100); + rc = ekt8232_ts_init(client); + } else { + do { + rc = ekt8232_set_power_state(client, + STATE_NORMAL); + + rc = ekt8232_get_power_state(client); + if (rc != STATE_NORMAL) + dev_err(&client->dev, + "%s: wake up tp failed! \ + err = %d\n", + __func__, rc); + else + break; + } while (--retry); + } + if (ekt8232_detect_int_level() == 0) + queue_work(elan_wq, &ekt_data.work); + return -EINVAL; + } + + return rc; +} + +static inline void ekt8232_parse_width(uint8_t data, uint8_t *w1, uint8_t *w2) +{ + *w1 = *w2 = 0; + *w1 = (data & 0xf0) >> 4; + *w2 = data & 0x0f; +} + +static void ekt8232_report_data(struct i2c_client *client, uint8_t *buf) +{ + static unsigned report_time; + unsigned report_time2; + + switch (buf[0]) { + case idx_coordinate_packet: { + uint16_t x1, x2, y1, y2; + uint8_t finger_stat, w1 = 1, w2 = 1; + + ekt8232_parse_xy(&buf[1], &x1, &y1); + if (ekt_data.fw_ver == 0x101) { + finger_stat = buf[7] >> 1; + } else { + ekt8232_parse_width(buf[idx_finger_width], &w1, &w2); + finger_stat = buf[idx_finger_state] >> 1; + } + + if (finger_stat != 0) { + input_report_abs(ekt_data.input, ABS_X, x1); + if (ekt_data.fw_ver == 0x101) + input_report_abs(ekt_data.input, ABS_Y, + (544 - 1) - y1); + else + input_report_abs(ekt_data.input, ABS_Y, y1); + /* only report finger width at y */ + input_report_abs(ekt_data.input, ABS_TOOL_WIDTH, w2); + } + + dev_dbg(&client->dev, + "x1 = %d, y1 = %d, \ + w1 = %d, w2 = %d, finger status = %d\n", + x1, y1, w1, w2, finger_stat); + + input_report_abs(ekt_data.input, ABS_PRESSURE, 100); + input_report_key(ekt_data.input, BTN_TOUCH, finger_stat); + input_report_key(ekt_data.input, BTN_2, finger_stat == 2); + + if (finger_stat > 1) { + ekt8232_parse_xy(&buf[4], &x2, &y2); + dev_dbg(&client->dev, "x2 = %d, y2 = %d\n", x2, y2); + input_report_abs(ekt_data.input, ABS_HAT0X, x2); + input_report_abs(ekt_data.input, ABS_HAT0Y, y2); + } + input_sync(ekt_data.input); + break; + } + default: + dev_err(&client->dev, + "%s: Unknown packet type: %0x\n", __func__, buf[0]); + break; + } + + report_time2 = jiffies; + dev_dbg(&client->dev, + "report time = %d\n", + jiffies_to_msecs(report_time2 - report_time)); + + report_time = report_time2; + +} + +static void ekt8232_work_func(struct work_struct *work) +{ + int rc; + uint8_t buf[IDX_PACKET_SIZE] = { 0 }; + struct i2c_client *client = ekt_data.client; + + /* dev_dbg(&client->dev, "%s: enter. \n", __func__); */ + + /* this means that we have already serviced it */ + if (ekt8232_detect_int_level()) + return; + + rc = ekt8232_recv_data(client, buf); + if (rc < 0) + return; + + ekt8232_report_data(client, buf); +} + +static irqreturn_t ekt8232_ts_interrupt(int irq, void *dev_id) +{ + /* the queue_work has spin_lock protection */ + /* disable_irq(irq); */ + queue_work(elan_wq, &ekt_data.work); + + return IRQ_HANDLED; +} + +static enum hrtimer_restart ekt8232_ts_timer_func(struct hrtimer *timer) +{ + queue_work(elan_wq, &ekt_data.work); + hrtimer_start(&ekt_data.timer, + ktime_set(0, 12500000), + HRTIMER_MODE_REL); + + return HRTIMER_NORESTART; +} + +static int ekt8232_register_interrupt(struct i2c_client *client) +{ + int err = 0; + + if (client->irq) { + ekt_data.use_irq = 1; + + err = request_irq(client->irq, ekt8232_ts_interrupt, 0, + EKT8232NAME, &ekt_data); + if (err < 0) { + dev_err(&client->dev, + "%s(%s): Can't allocate irq %d\n", + __FILE__, __func__, client->irq); + ekt_data.use_irq = 0; + } + } + + if (!ekt_data.use_irq) { + hrtimer_init(&ekt_data.timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ekt_data.timer.function = ekt8232_ts_timer_func; + hrtimer_start(&ekt_data.timer, ktime_set(1, 0), + HRTIMER_MODE_REL); + } + + dev_dbg(&client->dev, + "elan starts in %s mode.\n", + ekt_data.use_irq == 1 ? "interrupt":"polling"); + return 0; +} + +static int ekt8232_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + int err = 0; + struct elan_i2c_platform_data *pdata; + int x_max, y_max; + uint8_t x_resolution_cmd[] = { read_cmd_packet, 0x60, 0x00, 0x01 }; + uint8_t y_resolution_cmd[] = { read_cmd_packet, 0x63, 0x00, 0x01 }; + uint8_t buf_recv[4] = { 0 }; + + elan_wq = create_singlethread_workqueue("elan_wq"); + if (!elan_wq) { + err = -ENOMEM; + goto fail; + } + + printk(KERN_INFO "ekt8232_probe enter.\n"); + dev_dbg(&client->dev, "ekt8232_probe enter.\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, + "No supported i2c func what we need?!!\n"); + err = -ENOTSUPP; + goto fail; + } + + ekt_data.client = client; + strlcpy(client->name, EKT8232NAME, I2C_NAME_SIZE); + i2c_set_clientdata(client, &ekt_data); + INIT_WORK(&ekt_data.work, ekt8232_work_func); + init_waitqueue_head(&ekt_data.wait); + + ekt_data.input = input_allocate_device(); + if (ekt_data.input == NULL) { + err = -ENOMEM; + goto fail; + } + + pdata = client->dev.platform_data; + if (likely(pdata != NULL)) { + ekt_data.intr_gpio = + ((struct elan_i2c_platform_data *)pdata)->intr_gpio; + ekt_data.power = + ((struct elan_i2c_platform_data *)pdata)->power; + ekt_data.power(1); + dev_info(&client->dev, "touch panel is powered on. \n"); + mdelay(500); /* elan will be ready after about 500 ms */ + } else { + dev_err(&client->dev, "without platform data??!!\n"); + } + + err = ekt8232_ts_init(client); + if (err < 0) { + printk(KERN_INFO "looks like it's not Elan, so..i'll quit\n"); + err = -ENODEV; + goto fail; + } + + if (pdata) { + while (pdata->version > ekt_data.fw_ver) { + printk(KERN_INFO "ekt8232_probe: old tp detected, " + "panel version = 0x%x\n", + ekt_data.fw_ver); + pdata++; + } + } + printk(KERN_INFO "ekt8232_register_input\n"); + + ekt_data.input->name = EKT8232NAME; + ekt_data.input->id.bustype = BUS_I2C; + set_bit(EV_SYN, ekt_data.input->evbit); + set_bit(EV_KEY, ekt_data.input->evbit); + set_bit(BTN_TOUCH, ekt_data.input->keybit); + set_bit(BTN_2, ekt_data.input->keybit); + set_bit(EV_ABS, ekt_data.input->evbit); + + if (ekt_data.fw_ver >= 0x104) { + err = ekt8232_get_data(ekt_data.client, x_resolution_cmd, + buf_recv, 4, 0); + if (err < 0) { + dev_err(&client->dev, + "%s: get x resolution failed, err = %d\n", + __func__, err); + goto fail; + } + + x_max = ((buf_recv[3] & 0xf0) << 4) | ((buf_recv[2] & 0xff)); + printk(KERN_INFO "ekt8232_probe: x_max: %d\n", x_max); + + err = ekt8232_get_data(ekt_data.client, y_resolution_cmd, + buf_recv, 4, 0); + if (err < 0) { + dev_err(&client->dev, + "%s: get y resolution failed, err = %d\n", + __func__, err); + goto fail; + } + + y_max = ((buf_recv[3] & 0xf0) << 4) | ((buf_recv[2] & 0xff)); + printk(KERN_INFO "ekt8232_probe: y_max: %d\n", y_max); + input_set_abs_params(ekt_data.input, ABS_X, + pdata->abs_x_min, x_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_Y, + pdata->abs_y_min, y_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_HAT0X, + pdata->abs_x_min, x_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_HAT0Y, + pdata->abs_y_min, y_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + } else { + input_set_abs_params(ekt_data.input, ABS_X, + pdata->abs_x_min, pdata->abs_x_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_Y, + pdata->abs_y_min, pdata->abs_y_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_HAT0X, + pdata->abs_x_min, pdata->abs_x_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_HAT0Y, + pdata->abs_y_min, pdata->abs_y_max, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_PRESSURE, 0, 255, + ELAN_TS_FUZZ, ELAN_TS_FLAT); + input_set_abs_params(ekt_data.input, ABS_TOOL_WIDTH, 1, 8, + 1, ELAN_TS_FLAT); + } + + err = input_register_device(ekt_data.input); + if (err < 0) { + dev_err(&client->dev, + "%s: input_register_device failed, err = %d\n", + __func__, err); + goto fail; + } + + ekt8232_register_interrupt(ekt_data.client); + + /* checking the interrupt to avoid missing any interrupt */ + if (ekt8232_detect_int_level() == 0) + ekt8232_ts_interrupt(client->irq, NULL); +#ifdef CONFIG_HAS_EARLYSUSPEND + ekt_data.early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ekt_data.early_suspend.suspend = elan_ts_early_suspend; + ekt_data.early_suspend.resume = elan_ts_late_resume; + register_early_suspend(&ekt_data.early_suspend); +#endif + touch_sysfs_init(); + return 0; + +fail: + input_free_device(ekt_data.input); + if (elan_wq) + destroy_workqueue(elan_wq); + return err; +} + +static int ekt8232_remove(struct i2c_client *client) +{ + struct ekt8232_data *tp = i2c_get_clientdata(client); + + if (elan_wq) + destroy_workqueue(elan_wq); + + dev_dbg(&client->dev, "%s: enter.\n", __func__); + + input_unregister_device(tp->input); + + if (ekt_data.use_irq) + free_irq(client->irq, tp); + else + hrtimer_cancel(&ekt_data.timer); + return 0; +} + +static int ekt8232_suspend(struct i2c_client *client, pm_message_t mesg) +{ + uint8_t cmd[4]; + int rc = 0; + + dev_dbg(&client->dev, "%s: enter. irq = %d\n", __func__, client->irq); + + cancel_work_sync(&ekt_data.work); + + rc = ekt8232_set_power_state(client, STATE_DEEP_SLEEP); +/* + rc = ekt8232_get_power_state(client); + if (rc < 0 || rc != STATE_DEEP_SLEEP) + dev_err(&client->dev, + "%s: put tp into sleep failed, err = %d!\n", + __func__, rc); +*/ + /* disable tp interrupt */ + if (ekt_data.fw_ver > 0x101) { + memset(cmd, disable_int, 4); + if ((i2c_master_send(client, cmd, sizeof(cmd))) != sizeof(cmd)) + dev_err(&client->dev, + "%s: tp disable interrupt failed\n", __func__); + } + + /* power off level shift */ + ekt_data.power(0); + + return 0; +} + +static int ekt8232_resume(struct i2c_client *client) +{ + int rc = 0, retry = 5; + + dev_dbg(&client->dev, + "%s: enter. irq = %d\n", __func__, client->irq); + + disable_irq(client->irq); + + /* power on level shift */ + ekt_data.power(1); + + /* re-initial */ + if (ekt_data.fw_ver > 0x101) { + msleep(500); + rc = ekt8232_ts_init(client); + } else { + do { + rc = ekt8232_set_power_state(client, STATE_NORMAL); + rc = ekt8232_get_power_state(client); + if (rc != STATE_NORMAL) + dev_err(&client->dev, + "%s: wake up tp failed! err = %d\n", + __func__, rc); + else + break; + } while (--retry); + } + + enable_irq(client->irq); + + if (ekt8232_detect_int_level() == 0) + ekt8232_ts_interrupt(client->irq, NULL); + + return 0; +} + + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void elan_ts_early_suspend(struct early_suspend *h) +{ + struct i2c_client *client = ekt_data.client; + + dev_dbg(&client->dev, "%s enter.\n", __func__); + ekt8232_suspend(client, PMSG_SUSPEND); +} + +static void elan_ts_late_resume(struct early_suspend *h) +{ + struct i2c_client *client = ekt_data.client; + + dev_dbg(&client->dev, "%s enter.\n", __func__); + ekt8232_resume(client); +} +#endif + +/* -------------------------------------------------------------------- */ +static const struct i2c_device_id ekt8232_ts_id[] = { + { ELAN_8232_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver ekt8232_driver = { + .probe = ekt8232_probe, + .remove = ekt8232_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .suspend = ekt8232_suspend, + .resume = ekt8232_resume, +#endif + .id_table = ekt8232_ts_id, + .driver = { + .name = ELAN_8232_I2C_NAME, + }, +}; + +static int __init ekt8232_init(void) +{ + return i2c_add_driver(&ekt8232_driver); +} + +static void __exit ekt8232_exit(void) +{ + i2c_del_driver(&ekt8232_driver); +} + +module_init(ekt8232_init); +module_exit(ekt8232_exit); + +MODULE_AUTHOR("Shan-Fu Chiou , " + "Jay Tu "); +MODULE_DESCRIPTION("ELAN ekt8232 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/msm_ts.c b/drivers/input/touchscreen/msm_ts.c new file mode 100644 index 0000000000000..dff730ad86c33 --- /dev/null +++ b/drivers/input/touchscreen/msm_ts.c @@ -0,0 +1,344 @@ +/* drivers/input/touchscreen/msm_ts.c + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * TODO: + * - Add a timer to simulate a pen_up in case there's a timeout. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define TSSC_CTL 0x100 +#define TSSC_CTL_PENUP_IRQ (1 << 12) +#define TSSC_CTL_DATA_FLAG (1 << 11) +#define TSSC_CTL_DEBOUNCE_EN (1 << 6) +#define TSSC_CTL_EN_AVERAGE (1 << 5) +#define TSSC_CTL_MODE_MASTER (3 << 3) +#define TSSC_CTL_ENABLE (1 << 0) +#define TSSC_OPN 0x104 +#define TSSC_OPN_NOOP 0x00 +#define TSSC_OPN_4WIRE_X 0x01 +#define TSSC_OPN_4WIRE_Y 0x02 +#define TSSC_OPN_4WIRE_Z1 0x03 +#define TSSC_OPN_4WIRE_Z2 0x04 +#define TSSC_SAMPLING_INT 0x108 +#define TSSC_STATUS 0x10c +#define TSSC_AVG_12 0x110 +#define TSSC_AVG_34 0x114 +#define TSSC_SAMPLE(op,samp) ((0x118 + ((op & 0x3) * 0x20)) + \ + ((samp & 0x7) * 0x4)) +#define TSSC_TEST_1 0x198 +#define TSSC_TEST_2 0x19c + +struct msm_ts { + struct msm_ts_platform_data *pdata; + struct input_dev *input_dev; + void __iomem *tssc_base; + uint32_t ts_down:1; + struct ts_virt_key *vkey_down; +}; + +static uint32_t msm_tsdebug; +module_param_named(tsdebug, msm_tsdebug, uint, 0664); + +#define tssc_readl(t, a) (readl(((t)->tssc_base) + (a))) +#define tssc_writel(t, v, a) do {writel(v, ((t)->tssc_base) + (a));} while(0) + +static void setup_next_sample(struct msm_ts *ts) +{ + uint32_t tmp; + + /* 1.2ms debounce time */ + tmp = ((2 << 7) | TSSC_CTL_DEBOUNCE_EN | TSSC_CTL_EN_AVERAGE | + TSSC_CTL_MODE_MASTER | TSSC_CTL_ENABLE); + tssc_writel(ts, tmp, TSSC_CTL); +} + +static struct ts_virt_key *find_virt_key(struct msm_ts *ts, + struct msm_ts_virtual_keys *vkeys, + uint32_t val) +{ + int i; + + if (!vkeys) + return NULL; + + for (i = 0; i < vkeys->num_keys; ++i) + if ((val >= vkeys->keys[i].min) && (val <= vkeys->keys[i].max)) + return &vkeys->keys[i]; + return NULL; +} + + +static irqreturn_t msm_ts_irq(int irq, void *dev_id) +{ + struct msm_ts *ts = dev_id; + struct msm_ts_platform_data *pdata = ts->pdata; + + uint32_t tssc_avg12, tssc_avg34, tssc_status, tssc_ctl; + int x, y, z1, z2; + int was_down; + int down; + + tssc_ctl = tssc_readl(ts, TSSC_CTL); + tssc_status = tssc_readl(ts, TSSC_STATUS); + tssc_avg12 = tssc_readl(ts, TSSC_AVG_12); + tssc_avg34 = tssc_readl(ts, TSSC_AVG_34); + + setup_next_sample(ts); + + x = tssc_avg12 & 0xffff; + y = tssc_avg12 >> 16; + z1 = tssc_avg34 & 0xffff; + z2 = tssc_avg34 >> 16; + + /* invert the inputs if necessary */ + if (pdata->inv_x) x = pdata->inv_x - x; + if (pdata->inv_y) y = pdata->inv_y - y; + if (x < 0) x = 0; + if (y < 0) y = 0; + + down = !(tssc_ctl & TSSC_CTL_PENUP_IRQ); + was_down = ts->ts_down; + ts->ts_down = down; + + /* no valid data */ + if (down && !(tssc_ctl & TSSC_CTL_DATA_FLAG)) + return IRQ_HANDLED; + + if (msm_tsdebug & 2) + printk("%s: down=%d, x=%d, y=%d, z1=%d, z2=%d, status %x\n", + __func__, down, x, y, z1, z2, tssc_status); + + if (!was_down && down) { + struct ts_virt_key *vkey = NULL; + + if (pdata->vkeys_y && (y > pdata->virt_y_start)) + vkey = find_virt_key(ts, pdata->vkeys_y, x); + if (!vkey && ts->pdata->vkeys_x && (x > pdata->virt_x_start)) + vkey = find_virt_key(ts, pdata->vkeys_x, y); + + if (vkey) { + WARN_ON(ts->vkey_down != NULL); + if(msm_tsdebug) + printk("%s: virtual key down %d\n", __func__, + vkey->key); + ts->vkey_down = vkey; + input_report_key(ts->input_dev, vkey->key, 1); + input_sync(ts->input_dev); + return IRQ_HANDLED; + } + } else if (ts->vkey_down != NULL) { + if (!down) { + if(msm_tsdebug) + printk("%s: virtual key up %d\n", __func__, + ts->vkey_down->key); + input_report_key(ts->input_dev, ts->vkey_down->key, 0); + input_sync(ts->input_dev); + ts->vkey_down = NULL; + } + return IRQ_HANDLED; + } + + if (down) { + input_report_abs(ts->input_dev, ABS_X, x); + input_report_abs(ts->input_dev, ABS_Y, y); + input_report_abs(ts->input_dev, ABS_PRESSURE, z1); + } + input_report_key(ts->input_dev, BTN_TOUCH, down); + input_sync(ts->input_dev); + + return IRQ_HANDLED; +} + +static void dump_tssc_regs(struct msm_ts *ts) +{ +#define __dump_tssc_reg(r) \ + do { printk(#r " %x\n", tssc_readl(ts, (r))); } while(0) + + __dump_tssc_reg(TSSC_CTL); + __dump_tssc_reg(TSSC_OPN); + __dump_tssc_reg(TSSC_SAMPLING_INT); + __dump_tssc_reg(TSSC_STATUS); + __dump_tssc_reg(TSSC_AVG_12); + __dump_tssc_reg(TSSC_AVG_34); +#undef __dump_tssc_reg +} + +static int __devinit msm_ts_hw_init(struct msm_ts *ts) +{ + uint32_t tmp; + + /* Enable the register clock to tssc so we can configure it. */ + tssc_writel(ts, TSSC_CTL_ENABLE, TSSC_CTL); + + /* op1 - measure X, 1 sample, 12bit resolution */ + tmp = (TSSC_OPN_4WIRE_X << 16) | (2 << 8) | (2 << 0); + /* op2 - measure Y, 1 sample, 12bit resolution */ + tmp |= (TSSC_OPN_4WIRE_Y << 20) | (2 << 10) | (2 << 2); + /* op3 - measure Z1, 1 sample, 8bit resolution */ + tmp |= (TSSC_OPN_4WIRE_Z1 << 24) | (2 << 12) | (0 << 4); + + /* XXX: we don't actually need to measure Z2 (thus 0 samples) when + * doing voltage-driven measurement */ + /* op4 - measure Z2, 0 samples, 8bit resolution */ + tmp |= (TSSC_OPN_4WIRE_Z2 << 28) | (0 << 14) | (0 << 6); + tssc_writel(ts, tmp, TSSC_OPN); + + /* 16ms sampling interval */ + tssc_writel(ts, 16, TSSC_SAMPLING_INT); + + setup_next_sample(ts); + + return 0; +} + +static int __devinit msm_ts_probe(struct platform_device *pdev) +{ + struct msm_ts_platform_data *pdata = pdev->dev.platform_data; + struct msm_ts *ts; + struct resource *tssc_res; + struct resource *irq1_res; + struct resource *irq2_res; + int err = 0; + int i; + + tssc_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tssc"); + irq1_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tssc1"); + irq2_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tssc2"); + + if (!tssc_res || !irq1_res || !irq2_res) { + pr_err("%s: required resources not defined\n", __func__); + return -ENODEV; + } + + if (pdata == NULL) { + pr_err("%s: missing platform_data\n", __func__); + return -ENODEV; + } + + ts = kzalloc(sizeof(struct msm_ts), GFP_KERNEL); + if (ts == NULL) { + pr_err("%s: No memory for struct msm_ts\n", __func__); + return -ENOMEM; + } + ts->pdata = pdata; + + ts->tssc_base = ioremap(tssc_res->start, resource_size(tssc_res)); + if (ts->tssc_base == NULL) { + pr_err("%s: Can't ioremap region (0x%08x - 0x%08x)\n", __func__, + (uint32_t)tssc_res->start, (uint32_t)tssc_res->end); + err = -ENOMEM; + goto err_ioremap_tssc; + } + + ts->input_dev = input_allocate_device(); + if (ts->input_dev == NULL) { + pr_err("failed to allocate touchscreen input device\n"); + err = -ENOMEM; + goto err_alloc_input_dev; + } + ts->input_dev->name = "msm-touchscreen"; + input_set_drvdata(ts->input_dev, ts); + + input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH); + set_bit(EV_ABS, ts->input_dev->evbit); + + input_set_abs_params(ts->input_dev, ABS_X, pdata->min_x, pdata->max_x, + 0, 0); + input_set_abs_params(ts->input_dev, ABS_Y, pdata->min_y, pdata->max_y, + 0, 0); + input_set_abs_params(ts->input_dev, ABS_PRESSURE, pdata->min_press, + pdata->max_press, 0, 0); + + for (i = 0; pdata->vkeys_x && (i < pdata->vkeys_x->num_keys); ++i) + input_set_capability(ts->input_dev, EV_KEY, + pdata->vkeys_x->keys[i].key); + for (i = 0; pdata->vkeys_y && (i < pdata->vkeys_y->num_keys); ++i) + input_set_capability(ts->input_dev, EV_KEY, + pdata->vkeys_y->keys[i].key); + + err = input_register_device(ts->input_dev); + if (err != 0) { + pr_err("%s: failed to register input device\n", __func__); + goto err_input_dev_reg; + } + + msm_ts_hw_init(ts); + + err = request_irq(irq1_res->start, msm_ts_irq, + (irq1_res->flags & ~IORESOURCE_IRQ) | IRQF_DISABLED, + "msm_touchscreen", ts); + if (err != 0) { + pr_err("%s: Cannot register irq1 (%d)\n", __func__, err); + goto err_request_irq1; + } + + err = request_irq(irq2_res->start, msm_ts_irq, + (irq2_res->flags & ~IORESOURCE_IRQ) | IRQF_DISABLED, + "msm_touchscreen", ts); + if (err != 0) { + pr_err("%s: Cannot register irq2 (%d)\n", __func__, err); + goto err_request_irq2; + } + + platform_set_drvdata(pdev, ts); + + pr_info("%s: tssc_base=%p irq1=%d irq2=%d\n", __func__, + ts->tssc_base, (int)irq1_res->start, (int)irq2_res->start); + return 0; + +err_request_irq2: + free_irq(irq1_res->start, ts); + +err_request_irq1: + /* disable the tssc */ + tssc_writel(ts, TSSC_CTL_ENABLE, TSSC_CTL); + +err_input_dev_reg: + input_set_drvdata(ts->input_dev, NULL); + input_free_device(ts->input_dev); + +err_alloc_input_dev: + iounmap(ts->tssc_base); + +err_ioremap_tssc: + kfree(ts); + return err; +} + +static struct platform_driver msm_touchscreen_driver = { + .driver = { + .name = "msm_touchscreen", + .owner = THIS_MODULE, + }, + .probe = msm_ts_probe, +}; + +static int __init msm_ts_init(void) +{ + return platform_driver_register(&msm_touchscreen_driver); +} +device_initcall(msm_ts_init); + +MODULE_DESCRIPTION("Qualcomm MSM/QSD Touchscreen controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c index 5729602cbb63a..c4d967c312d3d 100644 --- a/drivers/input/touchscreen/synaptics_i2c_rmi.c +++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c @@ -13,6 +13,12 @@ * */ +/* Ported HTC's filtering code from Hero kernel sources to prevent + * the event hub being spammed with unnecessary events causing + * massive cpu usage. + * netarchy / Ninpo + */ + #include #include #include @@ -46,6 +52,7 @@ struct synaptics_ts_data { uint32_t flags; int reported_finger_count; int8_t sensitivity_adjust; + uint32_t dup_threshold; int (*power)(int on); struct early_suspend early_suspend; }; @@ -55,6 +62,48 @@ static void synaptics_ts_early_suspend(struct early_suspend *h); static void synaptics_ts_late_resume(struct early_suspend *h); #endif +#ifdef CONFIG_TOUCHSCREEN_DUPLICATED_FILTER +static int duplicated_filter(struct synaptics_ts_data *ts, int pos[2][2], + const int finger2_pressed, const int z) +{ + int drift_x[2]; + int drift_y[2]; + static int ref_x[2], ref_y[2]; + uint8_t discard[2] = {0, 0}; + + drift_x[0] = abs(ref_x[0] - pos[0][0]); + drift_y[0] = abs(ref_y[0] - pos[0][1]); + if (finger2_pressed) { + drift_x[1] = abs(ref_x[1] - pos[1][0]); + drift_y[1] = abs(ref_y[1] - pos[1][1]); + } + + if (drift_x[0] < ts->dup_threshold && drift_y[0] < ts->dup_threshold && z != 0) { + + discard[0] = 1; + } + if (!finger2_pressed || (drift_x[1] < ts->dup_threshold && drift_y[1] < ts->dup_threshold)) { + discard[1] = 1; + } + if (discard[0] && discard[1]) { +// if finger 0 and finger 1's movement < threshold , discard it. + return 1; + } + ref_x[0] = pos[0][0]; + ref_y[0] = pos[0][1]; + if (finger2_pressed) { + ref_x[1] = pos[1][0]; + ref_y[1] = pos[1][1]; + } + if (z == 0) { + ref_x[0] = ref_y[0] = 0; + ref_x[1] = ref_y[1] = 0; + } + + return 0; +} +#endif + static int synaptics_init_panel(struct synaptics_ts_data *ts) { int ret; @@ -209,6 +258,14 @@ static void synaptics_ts_work_func(struct work_struct *work) if (!finger) z = 0; +#ifdef CONFIG_TOUCHSCREEN_DUPLICATED_FILTER + // discard duplicate events + ret = duplicated_filter(ts, pos, finger2_pressed, z); + if (ret == 1) { + /* printk("%s: duplicated_filter\n", __func__); */ + break; + } +#endif input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z); input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]); @@ -363,6 +420,7 @@ static int synaptics_ts_probe( fuzz_y = pdata->fuzz_y; fuzz_p = pdata->fuzz_p; fuzz_w = pdata->fuzz_w; + ts->dup_threshold = pdata->dup_threshold; // adapting filtering } else { irqflags = 0; inactive_area_left = 0; diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index 7f85a862ad116..c35ab940f1f00 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -110,7 +110,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) static int __devinit xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { - int ret, i; + int ret, i, abs; struct xenkbd_info *info; struct input_dev *kbd, *ptr; @@ -128,6 +128,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, if (!info->page) goto error_nomem; + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) + abs = 0; + if (abs) + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); + /* keyboard */ kbd = input_allocate_device(); if (!kbd) @@ -137,11 +142,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, kbd->id.bustype = BUS_PCI; kbd->id.vendor = 0x5853; kbd->id.product = 0xffff; - kbd->evbit[0] = BIT(EV_KEY); + + __set_bit(EV_KEY, kbd->evbit); for (i = KEY_ESC; i < KEY_UNKNOWN; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); for (i = KEY_OK; i < KEY_MAX; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); ret = input_register_device(kbd); if (ret) { @@ -160,12 +166,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, ptr->id.bustype = BUS_PCI; ptr->id.vendor = 0x5853; ptr->id.product = 0xfffe; - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); + + if (abs) { + __set_bit(EV_ABS, ptr->evbit); + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + } else { + input_set_capability(ptr, EV_REL, REL_X); + input_set_capability(ptr, EV_REL, REL_Y); + } + input_set_capability(ptr, EV_REL, REL_WHEEL); + + __set_bit(EV_KEY, ptr->evbit); for (i = BTN_LEFT; i <= BTN_TASK; i++) - set_bit(i, ptr->keybit); - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + __set_bit(i, ptr->keybit); ret = input_register_device(ptr); if (ret) { diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index bf1aebd1acea3..9bc10860ba31e 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -210,6 +210,13 @@ config LEDS_LP5523 Driver provides direct control via LED class and interface for programming the engines. +config LEDS_CPLD + tristate "LED Support for CPLD connected LEDs" + default y + depends on LEDS_CLASS + help + This option enables support for the LEDs connected to CPLD + config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 5ca5c92220f83..891073b4fccf6 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o +obj-$(CONFIG_LEDS_CPLD) += leds-cpld.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o @@ -39,8 +40,8 @@ obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o -obj-$(CONFIG_LEDS_NS2) += leds-ns2.o -obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o +obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o +obj-$(CONFIG_MICROP_COMMON) += leds-microp.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/leds-cpld.c b/drivers/leds/leds-cpld.c new file mode 100644 index 0000000000000..2a45093f1aff1 --- /dev/null +++ b/drivers/leds/leds-cpld.c @@ -0,0 +1,405 @@ +/* include/asm/mach-msm/leds-cpld.c + * + * Copyright (C) 2008 HTC Corporation. + * + * Author: Farmer Tseng + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_LED_CHANGE 0 + +static int _g_cpld_led_addr; + +struct CPLD_LED_data { + spinlock_t data_lock; + struct led_classdev leds[4]; /* blue, green, red */ +}; + +static ssize_t led_blink_solid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct CPLD_LED_data *CPLD_LED; + int idx = 2; + uint8_t reg_val; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + ssize_t ret = 0; + + if (!strcmp(led_cdev->name, "red")) + idx = 0; + else if (!strcmp(led_cdev->name, "green")) + idx = 1; + else + idx = 2; + + CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]); + + spin_lock(&CPLD_LED->data_lock); + reg_val = readb(_g_cpld_led_addr); + reg_val = reg_val >> (2 * idx + 1); + reg_val &= 0x1; + spin_unlock(&CPLD_LED->data_lock); + + /* no lock needed for this */ + sprintf(buf, "%u\n", reg_val); + ret = strlen(buf) + 1; + + return ret; +} + +static ssize_t led_blink_solid_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct CPLD_LED_data *CPLD_LED; + int idx = 2; + uint8_t reg_val; + char *after; + unsigned long state; + ssize_t ret = -EINVAL; + size_t count; + + struct led_classdev *led_cdev = dev_get_drvdata(dev); + + if (!strcmp(led_cdev->name, "red")) + idx = 0; + else if (!strcmp(led_cdev->name, "green")) + idx = 1; + else + idx = 2; + + CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]); + + state = simple_strtoul(buf, &after, 10); + + count = after - buf; + + if (*after && isspace(*after)) + count++; + + if (count == size) { + ret = count; + spin_lock(&CPLD_LED->data_lock); + reg_val = readb(_g_cpld_led_addr); + if (state) + reg_val |= 1 << (2 * idx + 1); + else + reg_val &= ~(1 << (2 * idx + 1)); + + writeb(reg_val, _g_cpld_led_addr); + spin_unlock(&CPLD_LED->data_lock); + } + + return ret; +} + +static DEVICE_ATTR(blink, 0644, led_blink_solid_show, led_blink_solid_store); + +static ssize_t cpldled_blink_all_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t reg_val; + struct CPLD_LED_data *CPLD_LED = dev_get_drvdata(dev); + ssize_t ret = 0; + + spin_lock(&CPLD_LED->data_lock); + reg_val = readb(_g_cpld_led_addr); + reg_val &= 0x2A; + if (reg_val == 0x2A) + reg_val = 1; + else + reg_val = 0; + spin_unlock(&CPLD_LED->data_lock); + + /* no lock needed for this */ + sprintf(buf, "%u\n", reg_val); + ret = strlen(buf) + 1; + + return ret; +} + +static ssize_t cpldled_blink_all_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + uint8_t reg_val; + char *after; + unsigned long state; + ssize_t ret = -EINVAL; + size_t count; + struct CPLD_LED_data *CPLD_LED = dev_get_drvdata(dev); + + state = simple_strtoul(buf, &after, 10); + + count = after - buf; + + if (*after && isspace(*after)) + count++; + + if (count == size) { + ret = count; + spin_lock(&CPLD_LED->data_lock); + reg_val = readb(_g_cpld_led_addr); + if (state) + reg_val |= 0x2A; + else + reg_val &= ~0x2A; + + writeb(reg_val, _g_cpld_led_addr); + spin_unlock(&CPLD_LED->data_lock); + } + + return ret; +} + +static struct device_attribute dev_attr_blink_all = { + .attr = { + .name = "blink", + .mode = 0644, + }, + .show = cpldled_blink_all_show, + .store = cpldled_blink_all_store, +}; + +static void cpld_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct CPLD_LED_data *CPLD_LED; + int idx = 2; + struct led_classdev *led; + uint8_t reg_val; + + if (!strcmp(led_cdev->name, "jogball-backlight")) { + if (brightness > 7) + reg_val = 1; + else + reg_val = brightness; + writeb(0, _g_cpld_led_addr + 0x8); + writeb(reg_val, _g_cpld_led_addr + 0x8); +#if DEBUG_LED_CHANGE + printk(KERN_INFO "LED change: jogball backlight = %d \n", + reg_val); +#endif + return; + } else if (!strcmp(led_cdev->name, "red")) { + idx = 0; + } else if (!strcmp(led_cdev->name, "green")) { + idx = 1; + } else { + idx = 2; + } + + CPLD_LED = container_of(led_cdev, struct CPLD_LED_data, leds[idx]); + spin_lock(&CPLD_LED->data_lock); + reg_val = readb(_g_cpld_led_addr); + led = &CPLD_LED->leds[idx]; + + if (led->brightness > LED_OFF) + reg_val |= 1 << (2 * idx); + else + reg_val &= ~(1 << (2 * idx)); + + writeb(reg_val, _g_cpld_led_addr); +#if DEBUG_LED_CHANGE + printk(KERN_INFO "LED change: %s = %d \n", led_cdev->name, led->brightness); +#endif + spin_unlock(&CPLD_LED->data_lock); +} + +static ssize_t cpldled_grpfreq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", 0); +} + +static ssize_t cpldled_grpfreq_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return 0; +} + +static DEVICE_ATTR(grpfreq, 0644, cpldled_grpfreq_show, cpldled_grpfreq_store); + +static ssize_t cpldled_grppwm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", 0); +} + +static ssize_t cpldled_grppwm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return 0; +} + +static DEVICE_ATTR(grppwm, 0644, cpldled_grppwm_show, cpldled_grppwm_store); + +static int CPLD_LED_probe(struct platform_device *pdev) +{ + int ret = 0; + int i, j; + struct resource *res; + struct CPLD_LED_data *CPLD_LED; + + CPLD_LED = kzalloc(sizeof(struct CPLD_LED_data), GFP_KERNEL); + if (CPLD_LED == NULL) { + printk(KERN_ERR "CPLD_LED_probe: no memory for device\n"); + ret = -ENOMEM; + goto err_alloc_failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENOMEM; + goto err_alloc_failed; + } + + _g_cpld_led_addr = res->start; + if (!_g_cpld_led_addr) { + ret = -ENOMEM; + goto err_alloc_failed; + } + + memset(CPLD_LED, 0, sizeof(struct CPLD_LED_data)); + writeb(0x00, _g_cpld_led_addr); + + CPLD_LED->leds[0].name = "red"; + CPLD_LED->leds[0].brightness_set = cpld_led_brightness_set; + + CPLD_LED->leds[1].name = "green"; + CPLD_LED->leds[1].brightness_set = cpld_led_brightness_set; + + CPLD_LED->leds[2].name = "blue"; + CPLD_LED->leds[2].brightness_set = cpld_led_brightness_set; + + CPLD_LED->leds[3].name = "jogball-backlight"; + CPLD_LED->leds[3].brightness_set = cpld_led_brightness_set; + + spin_lock_init(&CPLD_LED->data_lock); + + for (i = 0; i < 4; i++) { /* red, green, blue jogball */ + ret = led_classdev_register(&pdev->dev, &CPLD_LED->leds[i]); + if (ret) { + printk(KERN_ERR + "CPLD_LED: led_classdev_register failed\n"); + goto err_led_classdev_register_failed; + } + } + + for (i = 0; i < 3; i++) { + ret = + device_create_file(CPLD_LED->leds[i].dev, &dev_attr_blink); + if (ret) { + printk(KERN_ERR + "CPLD_LED: device_create_file failed\n"); + goto err_out_attr_blink; + } + } + + dev_set_drvdata(&pdev->dev, CPLD_LED); + ret = device_create_file(&pdev->dev, &dev_attr_blink_all); + if (ret) { + printk(KERN_ERR + "CPLD_LED: create dev_attr_blink_all failed\n"); + goto err_out_attr_blink; + } + ret = device_create_file(&pdev->dev, &dev_attr_grppwm); + if (ret) { + printk(KERN_ERR + "CPLD_LED: create dev_attr_grppwm failed\n"); + goto err_out_attr_grppwm; + } + ret = device_create_file(&pdev->dev, &dev_attr_grpfreq); + if (ret) { + printk(KERN_ERR + "CPLD_LED: create dev_attr_grpfreq failed\n"); + goto err_out_attr_grpfreq; + } + + return 0; + +err_out_attr_grpfreq: + device_remove_file(&pdev->dev, &dev_attr_grppwm); +err_out_attr_grppwm: + device_remove_file(&pdev->dev, &dev_attr_blink_all); +err_out_attr_blink: + for (j = 0; j < i; j++) + device_remove_file(CPLD_LED->leds[j].dev, &dev_attr_blink); + i = 3; + +err_led_classdev_register_failed: + for (j = 0; j < i; j++) + led_classdev_unregister(&CPLD_LED->leds[j]); + +err_alloc_failed: + kfree(CPLD_LED); + + return ret; +} + +static int __devexit CPLD_LED_remove(struct platform_device *pdev) +{ + struct CPLD_LED_data *CPLD_LED; + int i; + + CPLD_LED = platform_get_drvdata(pdev); + + for (i = 0; i < 3; i++) { + device_remove_file(CPLD_LED->leds[i].dev, &dev_attr_blink); + led_classdev_unregister(&CPLD_LED->leds[i]); + } + + device_remove_file(&pdev->dev, &dev_attr_blink_all); + device_remove_file(&pdev->dev, &dev_attr_grppwm); + device_remove_file(&pdev->dev, &dev_attr_grpfreq); + + kfree(CPLD_LED); + return 0; +} + +static struct platform_driver CPLD_LED_driver = { + .probe = CPLD_LED_probe, + .remove = __devexit_p(CPLD_LED_remove), + .driver = { + .name = "leds-cpld", + .owner = THIS_MODULE, + }, +}; + +static int __init CPLD_LED_init(void) +{ + return platform_driver_register(&CPLD_LED_driver); +} + +static void __exit CPLD_LED_exit(void) +{ + platform_driver_unregister(&CPLD_LED_driver); +} + +MODULE_AUTHOR("Farmer Tseng"); +MODULE_DESCRIPTION("CPLD_LED driver"); +MODULE_LICENSE("GPL"); + +module_init(CPLD_LED_init); +module_exit(CPLD_LED_exit); diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 80a3ae3c00b93..c0cff64a1ae64 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev, } /* led class device attributes */ -static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); +static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); static struct attribute *lp5521_led_attributes[] = { @@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = { }; /* device attributes */ -static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, show_engine1_mode, store_engine1_mode); -static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, show_engine2_mode, store_engine2_mode); -static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, show_engine3_mode, store_engine3_mode); -static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); -static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); -static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); +static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); +static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); +static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); static struct attribute *lp5521_attributes[] = { diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index d0c4068ecddd7..e19fed25f1376 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev, } /* led class device attributes */ -static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); +static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current); static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); static struct attribute *lp5523_led_attributes[] = { @@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = { }; /* device attributes */ -static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR, show_engine1_mode, store_engine1_mode); -static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR, show_engine2_mode, store_engine2_mode); -static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR, show_engine3_mode, store_engine3_mode); -static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR, show_engine1_leds, store_engine1_leds); -static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR, show_engine2_leds, store_engine2_leds); -static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR, show_engine3_leds, store_engine3_leds); -static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); -static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); -static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); +static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load); +static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load); +static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load); static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); static struct attribute *lp5523_attributes[] = { diff --git a/drivers/leds/leds-microp.c b/drivers/leds/leds-microp.c new file mode 100644 index 0000000000000..ecee931426aea --- /dev/null +++ b/drivers/leds/leds-microp.c @@ -0,0 +1,778 @@ +/* include/asm/mach-msm/leds-microp.c + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifdef CONFIG_MICROP_COMMON +#include +#include +#include +#include +#include +#include +#include +#include + +static int microp_write_led_mode(struct led_classdev *led_cdev, + uint8_t mode, uint16_t off_timer) +{ + struct microp_led_data *ldata; + uint8_t data[7]; + int ret; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + if (!strcmp(ldata->ldev.name, "green")) { + data[0] = 0x01; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } else if (!strcmp(ldata->ldev.name, "amber")) { + data[0] = 0x02; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + data[4] = mode; + data[5] = off_timer >> 8; + data[6] = off_timer & 0xFF; + } else if (!strcmp(ldata->ldev.name, "blue")) { + data[0] = 0x04; + data[1] = mode; + data[2] = off_timer >> 8; + data[3] = off_timer & 0xFF; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + } + + ret = microp_i2c_write(MICROP_I2C_WCMD_LED_MODE, data, 7); + if (ret == 0) { + mutex_lock(&ldata->led_data_mutex); + if (mode > 1) + ldata->blink = mode; + ldata->mode = mode; + mutex_unlock(&ldata->led_data_mutex); + } + return ret; +} + +static void microp_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct microp_led_data *ldata; + unsigned long flags; + int ret; + uint8_t mode; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + if (brightness > 255) + brightness = 255; + led_cdev->brightness = brightness; + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + if (brightness) + mode = 1; + else + mode = 0; + + ret = microp_write_led_mode(led_cdev, mode, 0xffff); + if (ret) + pr_err("%s: led_brightness_set failed to set mode\n", __func__); +} + +static void microp_led_jogball_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct microp_led_data *ldata; + unsigned long flags; + uint8_t data[3] = {0, 0, 0}; + int ret = 0; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + switch (brightness) { + case 0: + data[0] = 0; + break; + case 1: + data[0] = 3; + data[1] = data[2] = 0xFF; + break; + case 3: + data[0] = 1; + data[1] = data[2] = 0xFF; + break; + case 7: + data[0] = 2; + data[1] = 0; + data[2] = 60; + break; + default: + pr_warning("%s: unknown value: %d\n", __func__, brightness); + break; + } + ret = microp_i2c_write(MICROP_I2C_WCMD_JOGBALL_LED_MODE, data, 3); + if (ret < 0) + pr_err("%s failed on set jogball mode:0x%2.2X\n", __func__, data[0]); +} + +static void microp_led_mobeam_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + ; +} + +static void microp_led_wimax_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct microp_led_data *ldata; + unsigned long flags; + uint8_t data[3] = {0, 0, 0}; + int ret = 0; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + switch (brightness) { + case 0: + data[0] = 0; + break; + case 1: + case 2: + case 3: + case 4: + case 5: + case 129: + case 130: + case 131: + data[0] = brightness; + data[1] = data[2] = 0xFF; + break; + default: + pr_warning("%s: unknown value: %d\n", __func__, brightness); + break; + } + + ret = microp_i2c_write(MICROP_I2C_WCMD_JOGBALL_LED_MODE, data, 3); + if (ret < 0) + pr_err("%s failed on set wimax mode:0x%2.2X\n", __func__, data[0]); +} + +static void microp_led_gpo_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct microp_led_data *ldata; + unsigned long flags; + uint8_t enable, addr, data[3] = {0, 0, 0}; + int ret = 0; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + enable = brightness ? 1 : 0; + if (enable) + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_EN; + else + addr = MICROP_I2C_WCMD_GPO_LED_STATUS_DIS; + data[0] = ldata->led_config->mask_w[0]; + data[1] = ldata->led_config->mask_w[1]; + data[2] = ldata->led_config->mask_w[2]; + ret = microp_i2c_write(addr, data, 3);; + if (ret < 0) + pr_err("%s failed on set gpo led mode:%d\n", __func__, brightness); +} + +static void microp_led_pwm_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct microp_led_data *ldata; + unsigned long flags; + uint8_t data[4] = {0, 0, 0, 0}; + int ret = 0; + uint8_t value; + + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + spin_lock_irqsave(&ldata->brightness_lock, flags); + ldata->brightness = brightness; + spin_unlock_irqrestore(&ldata->brightness_lock, flags); + + value = brightness >= 255 ? 0x20 : 0; + + data[0] = ldata->led_config->fade_time; + if (brightness) + data[1] = ldata->led_config->init_value ? + ldata->led_config->init_value : + brightness; + else + data[1] = 0x00; + data[2] = ldata->led_config->led_pin >> 8; + data[3] = ldata->led_config->led_pin; + + ret = microp_i2c_write(MICROP_I2C_WCMD_LED_PWM, data, 4); + if (ret < 0) + pr_err("%s failed on set pwm led mode:0x%2.2X\n", __func__, data[1]); +} + +static ssize_t microp_led_blink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + int ret; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + mutex_lock(&ldata->led_data_mutex); + ret = sprintf(buf, "%d\n", ldata->blink ? ldata->blink - 1 : 0); + mutex_unlock(&ldata->led_data_mutex); + + return ret; +} + +static ssize_t microp_led_blink_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + int val, ret; + uint8_t mode; + + val = -1; + sscanf(buf, "%u", &val); + if (val < 0 || val > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + mutex_lock(&ldata->led_data_mutex); + switch (val) { + case 0: /* stop flashing */ + ldata->blink = 0; + if (led_cdev->brightness) + mode = 1; + else + mode = 0; + break; + case 1: + case 2: + case 3: + mode = val + 1; + break; + case 4: + if (!strcmp(ldata->ldev.name, "amber")) { + mode = val + 1; + break; + } + default: + mutex_unlock(&ldata->led_data_mutex); + return -EINVAL; + } + mutex_unlock(&ldata->led_data_mutex); + + ret = microp_write_led_mode(led_cdev, mode, 0xffff); + if (ret) + pr_err("%s:%s set blink failed\n", __func__, led_cdev->name); + + return count; +} + +static DEVICE_ATTR(blink, 0644, microp_led_blink_show, + microp_led_blink_store); + +static ssize_t microp_led_off_timer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + uint8_t data[2]; + int ret, offtime; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + dev_dbg(dev, "Getting %s remaining time\n", led_cdev->name); + + if (!strcmp(ldata->ldev.name, "green")) + ret = microp_i2c_read(MICROP_I2C_RCMD_GREEN_LED_REMAIN_TIME, data, 2); + else if (!strcmp(ldata->ldev.name, "amber")) + ret = microp_i2c_read(MICROP_I2C_RCMD_AMBER_LED_REMAIN_TIME, data, 2); + else if (!strcmp(ldata->ldev.name, "blue")) + ret = microp_i2c_read(MICROP_I2C_RCMD_BLUE_LED_REMAIN_TIME, data, 2); + else { + pr_err("%s: Unknown led %s\n", __func__, ldata->ldev.name); + return -EINVAL; + } + + if (ret) + pr_err("%s: %s get off_timer failed\n", __func__, led_cdev->name); + + offtime = (int)((data[1] | data[0] << 8) * 2); + + ret = sprintf(buf, "Time remains %d:%d\n", offtime / 60, offtime % 60); + return ret; +} + +static ssize_t microp_led_off_timer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + int min, sec, ret; + uint16_t off_timer; + + min = -1; + sec = -1; + sscanf(buf, "%d %d", &min, &sec); + + if (min < 0 || min > 255) + return -EINVAL; + if (sec < 0 || sec > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + + dev_dbg(dev, "Setting %s off_timer to %d min %d sec\n", + led_cdev->name, min, sec); + + if (!min && !sec) + off_timer = 0xFFFF; + else + off_timer = (min * 60 + sec) / 2; + + ret = microp_write_led_mode(led_cdev, ldata->mode, off_timer); + if (ret) + pr_err("%s: %s set off_timer %d min %d sec failed\n", + __func__, led_cdev->name, min, sec); + + return count; +} + +static DEVICE_ATTR(off_timer, 0644, microp_led_off_timer_show, + microp_led_off_timer_store); + +static ssize_t microp_jogball_color_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct led_classdev *led_cdev; + struct microp_led_data *ldata; + struct i2c_client *client; + int rpwm, gpwm, bpwm, ret; + uint8_t data[4]; + + rpwm = -1; + gpwm = -1; + bpwm = -1; + sscanf(buf, "%d %d %d", &rpwm, &gpwm, &bpwm); + + if (rpwm < 0 || rpwm > 255) + return -EINVAL; + if (gpwm < 0 || gpwm > 255) + return -EINVAL; + if (bpwm < 0 || bpwm > 255) + return -EINVAL; + + led_cdev = (struct led_classdev *)dev_get_drvdata(dev); + ldata = container_of(led_cdev, struct microp_led_data, ldev); + client = to_i2c_client(dev->parent); + + dev_dbg(&client->dev, "Setting %s color to R=%d, G=%d, B=%d\n", + led_cdev->name, rpwm, gpwm, bpwm); + + data[0] = rpwm; + data[1] = gpwm; + data[2] = bpwm; + data[3] = 0x00; + + ret = microp_i2c_write(MICROP_I2C_WCMD_JOGBALL_LED_PWM_SET, data, 4); + if (ret) { + dev_err(&client->dev, "%s set color R=%d G=%d B=%d failed\n", + led_cdev->name, rpwm, gpwm, bpwm); + } + return count; +} + +static DEVICE_ATTR(color, 0644, NULL, microp_jogball_color_store); + +static ssize_t microp_mobeam_read_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t data[2] = {0, 0}; + int ret; +pr_info("%s\n", __func__); + ret = microp_i2c_read(MICROP_I2C_RCMD_MOBEAM_STATUS, data, 2); + if (ret == 0) + ret = sprintf(buf, "%d %d\n", data[0], data[1]); + else + data[0] = data[1] = 0; + + return ret; +} +static DEVICE_ATTR(read_status, 0444, microp_mobeam_read_status_show, NULL); + +static ssize_t microp_mobeam_download_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int i, ret, num; + uint8_t data[73] ; /* Size of cbitstream array MAX 73 bytes. */ + +pr_info("%s\n", __func__); + memset(data, 0x00, sizeof(data)); + + num = *(buf++); + if (num < 0 || num > 73) + return -EINVAL; +pr_info("%s: count=%d\n", __func__, count); + for (i = 0; i < count; i++) + data[i] = *(buf + i); + + ret = microp_i2c_write(MICROP_I2C_WCMD_MOBEAM_DL, data, num); + if (ret != 0) + count = 0; + + return count; +} +static DEVICE_ATTR(data_download, 0644, NULL, microp_mobeam_download_store); + +static ssize_t microp_mobeam_send_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + uint8_t data[2]; + unsigned char num; + int ret; + +pr_info("%s\n", __func__); + num = *buf; + + if (num < 0 || num > 73) + return -EINVAL; + + data[0] = (uint8_t)num; + data[1] = 0; + ret = microp_i2c_write(MICROP_I2C_WCMD_MOBEAM_SEND, data, 2); + if (ret != 0) + count = 0; + + return count; +} +static DEVICE_ATTR(send_data, 0644, NULL, microp_mobeam_send_store); + +static uint8_t leds_data[7]; + +static ssize_t microp_mobeam_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + uint8_t data[7]; + int ret; + +pr_info("%s\n", __func__); + val = -1; + sscanf(buf, "%u", &val); + if (val != 1 && val != 0) + return -EINVAL; + + if (val) { + ret = microp_i2c_read(MICROP_I2C_RCMD_LED_STATUS, leds_data, 7); + data[0] = 0x03; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + data[4] = 0x00; + data[5] = 0x00; + data[6] = 0x00; + ret = microp_i2c_write(MICROP_I2C_WCMD_LED_MODE, data, 7); + pr_info("%s: enabled\n", __func__); + msleep(150); + microp_mobeam_enable(1); + } else { + microp_mobeam_enable(0); + ret = microp_i2c_write(MICROP_I2C_WCMD_LED_MODE, leds_data, 7); + } + return count; +} +static DEVICE_ATTR(mobeam_enable, 0644, NULL, microp_mobeam_enable_store); + +static ssize_t microp_mobeam_stop_led(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int val; + uint8_t data[2]; + +pr_info("%s\n", __func__); + val = -1; + sscanf(buf, "%u", &val); + + data[0] = 0x00; + data[1] = 0x01; + + + return count; +} +static DEVICE_ATTR(stop_led, 0644, NULL, microp_mobeam_stop_led); + + +static int microp_led_probe(struct platform_device *pdev) +{ + struct microp_led_platform_data *pdata; + struct microp_led_data *ldata; + int i, ret; + + pdata = pdev->dev.platform_data; + if (pdata == NULL) { + pr_err("%s: platform data is NULL\n", __func__); + return -ENODEV; + } + + ldata = kzalloc(sizeof(struct microp_led_data) + * pdata->num_leds, GFP_KERNEL); + if (!ldata && pdata->num_leds) { + ret = -ENOMEM; + pr_err("%s: failed on allocate ldata\n", __func__); + goto err_exit; + } + + dev_set_drvdata(&pdev->dev, ldata); + for (i = 0; i < pdata->num_leds; i++) { + ldata[i].led_config = pdata->led_config + i; + ldata[i].ldev.name = pdata->led_config[i].name; + if (pdata->led_config[i].type == LED_JOGBALL) + ldata[i].ldev.brightness_set + = microp_led_jogball_brightness_set; + else if (pdata->led_config[i].type == LED_GPO) + ldata[i].ldev.brightness_set + = microp_led_gpo_brightness_set; + else if (pdata->led_config[i].type == LED_PWM) + ldata[i].ldev.brightness_set + = microp_led_pwm_brightness_set; + else if (pdata->led_config[i].type == LED_RGB) + ldata[i].ldev.brightness_set + = microp_led_brightness_set; + else if (pdata->led_config[i].type == LED_WIMAX) + ldata[i].ldev.brightness_set + = microp_led_wimax_brightness_set; + else if (pdata->led_config[i].type == LED_MOBEAM) + ldata[i].ldev.brightness_set + = microp_led_mobeam_brightness_set; + + mutex_init(&ldata[i].led_data_mutex); + spin_lock_init(&ldata[i].brightness_lock); + ret = led_classdev_register(&pdev->dev, &ldata[i].ldev); + if (ret < 0) { + pr_err("%s: failed on led_classdev_register [%s]\n", + __func__, ldata[i].ldev.name); + goto err_register_led_cdev; + } + } + + for (i = 0; i < pdata->num_leds; i++) { + if (pdata->led_config[i].type != LED_RGB) + continue; + ret = device_create_file(ldata[i].ldev.dev, &dev_attr_blink); + if (ret < 0) { + pr_err("%s: failed on create attr blink [%d]\n", __func__, i); + goto err_register_attr_blink; + } + } + + for (i = 0; i < pdata->num_leds; i++) { + if (pdata->led_config[i].type != LED_RGB) + continue; + ret = device_create_file(ldata[i].ldev.dev, &dev_attr_off_timer); + if (ret < 0) { + pr_err("%s: failed on create attr off_timer [%d]\n", + __func__, i); + goto err_register_attr_off_timer; + } + } + + for (i = 0; i < pdata->num_leds; i++) { + if (pdata->led_config[i].type != LED_JOGBALL) + continue; + ret = device_create_file(ldata[i].ldev.dev, &dev_attr_color); + if (ret < 0) { + pr_err("%s: failed on create attr jogball color\n", + __func__); + goto err_register_attr_color; + } else + break; + } + + for (i = 0; i < pdata->num_leds; i++) { + if (pdata->led_config[i].type != LED_MOBEAM) + continue; + ret = device_create_file(ldata[i].ldev.dev, + &dev_attr_data_download); + if (ret < 0) { + pr_err("%s: failed on create attr data download\n", + __func__); + goto err_create_mo_download_attr_file; + } + + ret = device_create_file(ldata[i].ldev.dev, + &dev_attr_send_data); + if (ret < 0) { + pr_err("%s: failed on create attr send data\n", + __func__); + goto err_create_mo_send_attr_file; + } + + ret = device_create_file(ldata[i].ldev.dev, + &dev_attr_read_status); + if (ret < 0) { + pr_err("%s: failed on create attr read status\n", + __func__); + goto err_create_mo_read_attr_file; + } + + ret = device_create_file(ldata[i].ldev.dev, + &dev_attr_mobeam_enable); + if (ret < 0) { + pr_err("%s: failed on create attr enable\n", + __func__); + goto err_create_mo_enable_attr_file; + } + + ret = device_create_file(ldata[i].ldev.dev, + &dev_attr_stop_led); + if (ret < 0) { + pr_err("%s: failed on create attr stop led\n", + __func__); + goto err_create_mo_stop_attr_file; + } + break; + } + + pr_info("%s: succeeded\n", __func__); + return 0; + +err_create_mo_stop_attr_file: + device_remove_file(ldata[i].ldev.dev, &dev_attr_mobeam_enable); +err_create_mo_enable_attr_file: + device_remove_file(ldata[i].ldev.dev, &dev_attr_read_status); +err_create_mo_read_attr_file: + device_remove_file(ldata[i].ldev.dev, &dev_attr_send_data); +err_create_mo_send_attr_file: + device_remove_file(ldata[i].ldev.dev, &dev_attr_data_download); +err_create_mo_download_attr_file: + i = pdata->num_leds; +err_register_attr_color: + for (i--; i >= 0; i--) { + if (pdata->led_config[i].type != LED_JOGBALL) + continue; + device_remove_file(ldata[i].ldev.dev, &dev_attr_color); + } + i = pdata->num_leds; +err_register_attr_off_timer: + for (i--; i >= 0; i--) { + if (pdata->led_config[i].type != LED_RGB) + continue; + device_remove_file(ldata[i].ldev.dev, + &dev_attr_off_timer); + } + i = pdata->num_leds; +err_register_attr_blink: + for (i--; i >= 0; i--) { + if (pdata->led_config[i].type != LED_RGB) + continue; + device_remove_file(ldata[i].ldev.dev, + &dev_attr_blink); + } + i = pdata->num_leds; + +err_register_led_cdev: + for (i--; i >= 0; i--) + led_classdev_unregister(&ldata[i].ldev); + kfree(ldata); + +err_exit: + return ret; +} + +static int __devexit microp_led_remove(struct platform_device *pdev) +{ + struct microp_led_platform_data *pdata; + struct microp_led_data *ldata; + int i; + + pdata = pdev->dev.platform_data; + ldata = platform_get_drvdata(pdev); + + for (i = 0; i < pdata->num_leds; i++) { + led_classdev_unregister(&ldata[i].ldev); + if (pdata->led_config[i].type == LED_RGB) { + device_remove_file(ldata[i].ldev.dev, + &dev_attr_off_timer); + device_remove_file(ldata[i].ldev.dev, + &dev_attr_blink); + } else if (pdata->led_config[i].type == LED_JOGBALL) + device_remove_file(ldata[i].ldev.dev, &dev_attr_color); + } + kfree(ldata); + + return 0; +} + +static struct platform_driver microp_led_driver = { + .probe = microp_led_probe, + .remove = __devexit_p(microp_led_remove), + .driver = { + .name = "leds-microp", + .owner = THIS_MODULE, + }, +}; + +int __init microp_led_init(void) +{ + return platform_driver_register(µp_led_driver); +} + +void microp_led_exit(void) +{ + platform_driver_unregister(µp_led_driver); +} + +module_init(microp_led_init); +module_exit(microp_led_exit); + +MODULE_DESCRIPTION("Atmega MicroP led driver"); +MODULE_LICENSE("GPL"); +#endif /* end of #ifdef CONFIG_MICROP_COMMON*/ + diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 9a35320fb59f7..9d4406985fe57 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -493,11 +493,11 @@ void bitmap_update_sb(struct bitmap *bitmap) spin_unlock_irqrestore(&bitmap->lock, flags); sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb->events = cpu_to_le64(bitmap->mddev->events); - if (bitmap->mddev->events < bitmap->events_cleared) { + if (bitmap->mddev->events < bitmap->events_cleared) /* rocking back to read-only */ bitmap->events_cleared = bitmap->mddev->events; - sb->events_cleared = cpu_to_le64(bitmap->events_cleared); - } + sb->events_cleared = cpu_to_le64(bitmap->events_cleared); + sb->state = cpu_to_le32(bitmap->flags); /* Just in case these have been changed via sysfs: */ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); @@ -618,7 +618,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) bitmap->flags |= BITMAP_HOSTENDIAN; bitmap->events_cleared = le64_to_cpu(sb->events_cleared); - if (sb->state & cpu_to_le32(BITMAP_STALE)) + if (bitmap->flags & BITMAP_STALE) bitmap->events_cleared = bitmap->mddev->events; err = 0; out: @@ -652,9 +652,11 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, switch (op) { case MASK_SET: sb->state |= cpu_to_le32(bits); + bitmap->flags |= bits; break; case MASK_UNSET: sb->state &= cpu_to_le32(~bits); + bitmap->flags &= ~bits; break; default: BUG(); diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 95891dfcbca02..4c20f3c3366a6 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -764,7 +764,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, WRITE); + r = area_io(ps, WRITE_FLUSH_FUA); if (r < 0) return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 38e4eb1bb9656..abd23aa34e4f7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -361,6 +361,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { + struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = @@ -369,6 +370,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; + /* + * Some devices exist without request functions, + * such as loop devices not yet bound to backing files. + * Forbid the use of such devices. + */ + q = bdev_get_queue(bdev); + if (!q || !q->make_request_fn) { + DMWARN("%s: %s is not yet initialised: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); + return 1; + } + if (!dev_size) return 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eaa3af0e0632a..7e9c2c39ecf00 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -37,6 +37,8 @@ static const char *_name = DM_NAME; static unsigned int major = 0; static unsigned int _major = 0; +static DEFINE_IDR(_minor_idr); + static DEFINE_SPINLOCK(_minor_lock); /* * For bio-based dm. @@ -313,6 +315,12 @@ static void __exit dm_exit(void) while (i--) _exits[i](); + + /* + * Should be empty by this point. + */ + idr_remove_all(&_minor_idr); + idr_destroy(&_minor_idr); } /* @@ -1723,8 +1731,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ -static DEFINE_IDR(_minor_idr); - static void free_minor(int minor) { spin_lock(&_minor_lock); diff --git a/drivers/md/linear.h b/drivers/md/linear.h index 0ce29b61605a2..2f2da05b2ce9c 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h @@ -10,9 +10,9 @@ typedef struct dev_info dev_info_t; struct linear_private_data { + struct rcu_head rcu; sector_t array_sectors; dev_info_t disks[0]; - struct rcu_head rcu; }; diff --git a/drivers/md/md.c b/drivers/md/md.c index 818313e277e7c..3683a07415fb4 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4335,13 +4335,19 @@ static int md_alloc(dev_t dev, char *name) disk->fops = &md_fops; disk->private_data = mddev; disk->queue = mddev->queue; + blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); /* Allow extended partitions. This makes the * 'mdp' device redundant, but we can't really * remove it now. */ disk->flags |= GENHD_FL_EXT_DEVT; - add_disk(disk); mddev->gendisk = disk; + /* As soon as we call add_disk(), another thread could get + * through to md_open, so make sure it doesn't get too far + */ + mutex_lock(&mddev->open_mutex); + add_disk(disk); + error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk_to_dev(disk)->kobj, "%s", "md"); if (error) { @@ -4355,8 +4361,7 @@ static int md_alloc(dev_t dev, char *name) if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_bitmap_group)) printk(KERN_DEBUG "pointless warning\n"); - - blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); + mutex_unlock(&mddev->open_mutex); abort: mutex_unlock(&disks_mutex); if (!error && mddev->kobj.sd) { diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c index 227c0200b70a5..4f3e3ceaa7c90 100644 --- a/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/drivers/media/dvb/b2c2/flexcop-pci.c @@ -38,7 +38,7 @@ MODULE_PARM_DESC(debug, DEBSTATUS); #define DRIVER_VERSION "0.1" -#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" +#define DRIVER_NAME "flexcop-pci" #define DRIVER_AUTHOR "Patrick Boettcher " struct flexcop_pci { diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig index ecdffa6aac664..c0902465bdfd7 100644 --- a/drivers/media/radio/Kconfig +++ b/drivers/media/radio/Kconfig @@ -441,6 +441,7 @@ config RADIO_TIMBERDALE config RADIO_WL1273 tristate "Texas Instruments WL1273 I2C FM Radio" depends on I2C && VIDEO_V4L2 + select MFD_CORE select MFD_WL1273_CORE select FW_LOADER ---help--- diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c index 585680ffbfb64..b1193dfc5087b 100644 --- a/drivers/media/radio/saa7706h.c +++ b/drivers/media/radio/saa7706h.c @@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL); + state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c index 7c0d77751f6e6..0991e1973678c 100644 --- a/drivers/media/radio/tef6862.c +++ b/drivers/media/radio/tef6862.c @@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%02x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL); + state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; state->freq = TEF6862_LO_FREQ; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index e7dc6b46fdfac..eee39570f23db 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -46,7 +46,7 @@ #define MOD_AUTHOR "Jarod Wilson " #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" -#define MOD_VERSION "0.9.2" +#define MOD_VERSION "0.9.3" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" @@ -451,8 +451,9 @@ static int display_close(struct inode *inode, struct file *file) } /** - * Sends a packet to the device -- this function must be called - * with ictx->lock held. + * Sends a packet to the device -- this function must be called with + * ictx->lock held, or its unlock/lock sequence while waiting for tx + * to complete can/will lead to a deadlock. */ static int send_packet(struct imon_context *ictx) { @@ -982,12 +983,21 @@ static void imon_touch_display_timeout(unsigned long data) * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. + * + * This function gets called two different ways, one way is from + * rc_register_device, for initial protocol selection/setup, and the other is + * via a userspace-initiated protocol change request, either by direct sysfs + * prodding or by something like ir-keytable. In the rc_register_device case, + * the imon context lock is already held, but when initiated from userspace, + * it is not, so we must acquire it prior to calling send_packet, which + * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; + bool unlock = false; unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; @@ -1020,6 +1030,11 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); + if (!mutex_is_locked(&ictx->lock)) { + unlock = true; + mutex_lock(&ictx->lock); + } + retval = send_packet(ictx); if (retval) goto out; @@ -1028,6 +1043,9 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type) ictx->pad_mouse = false; out: + if (unlock) + mutex_unlock(&ictx->lock); + return retval; } @@ -2125,6 +2143,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf) goto rdev_setup_failed; } + mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: @@ -2196,6 +2215,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf, goto urb_submit_failed; } + mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: @@ -2290,6 +2310,8 @@ static int __devinit imon_probe(struct usb_interface *interface, usb_set_intfdata(interface, ictx); if (ifnum == 0) { + mutex_lock(&ictx->lock); + if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); @@ -2300,13 +2322,14 @@ static int __devinit imon_probe(struct usb_interface *interface, if (ictx->display_supported) imon_init_display(ictx, interface); + + mutex_unlock(&ictx->lock); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on " "usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); - mutex_unlock(&ictx->lock); mutex_unlock(&driver_lock); return 0; diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index aa021600e9df3..3459b7814989f 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -950,8 +950,12 @@ config USB_S2255 This driver can be compiled as a module, called s2255drv. endif # V4L_USB_DRIVERS + endif # VIDEO_CAPTURE_DRIVERS +# MSM camera does not require V4L2 +source "drivers/media/video/msm/Kconfig" + menuconfig V4L_MEM2MEM_DRIVERS bool "Memory-to-memory multimedia devices" depends on VIDEO_V4L2 diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index a509d317e258d..61f43f2bb2943 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -169,6 +169,8 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o obj-y += davinci/ +obj-$(CONFIG_MSM_CAMERA) += msm/ + obj-$(CONFIG_ARCH_OMAP) += omap/ EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index bca307eb1e249..f637d34d5062e 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c @@ -1122,7 +1122,6 @@ static int mpeg_release(struct file *file) mutex_lock(&dev->core->lock); file->private_data = NULL; kfree(fh); - mutex_unlock(&dev->core->lock); /* Make sure we release the hardware */ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); @@ -1131,6 +1130,8 @@ static int mpeg_release(struct file *file) atomic_dec(&dev->core->mpeg_users); + mutex_unlock(&dev->core->lock); + return 0; } @@ -1334,11 +1335,9 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv) blackbird_register_video(dev); /* initial device configuration: needed ? */ - mutex_lock(&dev->core->lock); // init_controls(core); cx88_set_tvnorm(core,core->tvnorm); cx88_video_mux(core,0); - mutex_unlock(&dev->core->lock); return 0; diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c index 90717ee944ec5..5eccd0211e79f 100644 --- a/drivers/media/video/cx88/cx88-dvb.c +++ b/drivers/media/video/cx88/cx88-dvb.c @@ -132,6 +132,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire) return -EINVAL; } + mutex_lock(&dev->core->lock); drv = cx8802_get_driver(dev, CX88_MPEG_DVB); if (drv) { if (acquire){ @@ -142,6 +143,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire) dev->frontends.active_fe_id = 0; } } + mutex_unlock(&dev->core->lock); return ret; } diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index 06f7d1d009440..67a2b086101ad 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c @@ -283,7 +283,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) case CX88_BOARD_PCHDTV_HD3000: case CX88_BOARD_PCHDTV_HD5500: case CX88_BOARD_HAUPPAUGE_IRONLY: - ir_codes = RC_MAP_HAUPPAUGE_NEW; + ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW; ir->sampling = 1; break; case CX88_BOARD_WINFAST_DTV2000H: @@ -523,7 +523,7 @@ void cx88_ir_irq(struct cx88_core *core) for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); - ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); + ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c index addf9545e9bf4..497f26fd07baa 100644 --- a/drivers/media/video/cx88/cx88-mpeg.c +++ b/drivers/media/video/cx88/cx88-mpeg.c @@ -624,13 +624,11 @@ static int cx8802_request_acquire(struct cx8802_driver *drv) if (drv->advise_acquire) { - mutex_lock(&drv->core->lock); core->active_ref++; if (core->active_type_id == CX88_BOARD_NONE) { core->active_type_id = drv->type_id; drv->advise_acquire(drv); } - mutex_unlock(&drv->core->lock); mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO)); } @@ -643,14 +641,12 @@ static int cx8802_request_release(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; - mutex_lock(&drv->core->lock); if (drv->advise_release && --core->active_ref == 0) { drv->advise_release(drv); core->active_type_id = CX88_BOARD_NONE; mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO)); } - mutex_unlock(&drv->core->lock); return 0; } @@ -713,18 +709,17 @@ int cx8802_register_driver(struct cx8802_driver *drv) drv->request_release = cx8802_request_release; memcpy(driver, drv, sizeof(*driver)); + mutex_lock(&drv->core->lock); err = drv->probe(driver); if (err == 0) { i++; - mutex_lock(&drv->core->lock); list_add_tail(&driver->drvlist, &dev->drvlist); - mutex_unlock(&drv->core->lock); } else { printk(KERN_ERR "%s/2: cx8802 probe failed, err = %d\n", dev->core->name, err); } - + mutex_unlock(&drv->core->lock); } return i ? 0 : -ENODEV; @@ -748,6 +743,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv) dev->pci->subsystem_device, dev->core->board.name, dev->core->boardnr); + mutex_lock(&dev->core->lock); + list_for_each_entry_safe(d, dtmp, &dev->drvlist, drvlist) { /* only unregister the correct driver type */ if (d->type_id != drv->type_id) @@ -755,15 +752,14 @@ int cx8802_unregister_driver(struct cx8802_driver *drv) err = d->remove(d); if (err == 0) { - mutex_lock(&drv->core->lock); list_del(&d->drvlist); - mutex_unlock(&drv->core->lock); kfree(d); } else printk(KERN_ERR "%s/2: cx8802 driver remove " "failed (%d)\n", dev->core->name, err); } + mutex_unlock(&dev->core->lock); } return err; @@ -827,6 +823,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev) flush_request_modules(dev); + mutex_lock(&dev->core->lock); + if (!list_empty(&dev->drvlist)) { struct cx8802_driver *drv, *tmp; int err; @@ -838,9 +836,7 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev) list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) { err = drv->remove(drv); if (err == 0) { - mutex_lock(&drv->core->lock); list_del(&drv->drvlist); - mutex_unlock(&drv->core->lock); } else printk(KERN_ERR "%s/2: cx8802 driver remove " "failed (%d)\n", dev->core->name, err); @@ -848,6 +844,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev) } } + mutex_unlock(&dev->core->lock); + /* Destroy any 8802 reference. */ dev->core->dvbdev = NULL; diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h index c9981e77416a6..93a94bff471fd 100644 --- a/drivers/media/video/cx88/cx88.h +++ b/drivers/media/video/cx88/cx88.h @@ -495,6 +495,8 @@ struct cx8802_driver { int (*suspend)(struct pci_dev *pci_dev, pm_message_t state); int (*resume)(struct pci_dev *pci_dev); + /* Callers to the following functions must hold core->lock */ + /* MPEG 8802 -> mini driver - Driver probe and configuration */ int (*probe)(struct cx8802_driver *drv); int (*remove)(struct cx8802_driver *drv); @@ -551,8 +553,9 @@ struct cx8802_dev { /* for switching modulation types */ unsigned char ts_gen_cntrl; - /* List of attached drivers */ + /* List of attached drivers; must hold core->lock to access */ struct list_head drvlist; + struct work_struct request_module_wk; }; @@ -675,6 +678,8 @@ int cx88_audio_thread(void *data); int cx8802_register_driver(struct cx8802_driver *drv); int cx8802_unregister_driver(struct cx8802_driver *drv); + +/* Caller must hold core->lock */ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype); /* ----------------------------------------------------------- */ diff --git a/drivers/media/video/m52790.c b/drivers/media/video/m52790.c index 5e1c9a81984ca..303ffa7df4aca 100644 --- a/drivers/media/video/m52790.c +++ b/drivers/media/video/m52790.c @@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL); + state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig new file mode 100644 index 0000000000000..142c28641ea75 --- /dev/null +++ b/drivers/media/video/msm/Kconfig @@ -0,0 +1,88 @@ +comment "Qualcomm MSM Camera And Video" + +menuconfig MSM_CAMERA + bool "Qualcomm MSM camera and video capture support" + depends on ARCH_MSM + help + Say Y here to enable selecting the video adapters for + Qualcomm msm camera and video encoding + +config 720P_CAMERA + bool "Qualcomm MSM camera with 720P video support" + depends on MSM_CAMERA + help + 720P video support + +config MSM_CAMERA_V4L2 + bool "Video For Linux interface to MSM camera" + depends on MSM_CAMERA && VIDEO_V4L2 && EXPERIMENTAL + help + Say Y here to enable the V4L2 interface for the MSM camera. + Not everything works through this interface and it has not + been thoroughly tested. + +config MSM_CAMERA_DEBUG + bool "Qualcomm MSM camera debugging with printk" + depends on MSM_CAMERA + help + Enable printk() debug for msm camera + +comment "Camera Sensor Selection" +config MT9T013 + bool "Sensor mt9t013 (BAYER 3M)" + depends on MSM_CAMERA + ---help--- + MICRON 3M Bayer Sensor with AutoFocus + +config MT9D112 + bool "Sensor mt9d112 (YUV 2M)" + depends on MSM_CAMERA + ---help--- + MICRON 2M YUV Sensor + +config MT9P012 + bool "Sensor mt9p012 (BAYER 5M)" + depends on MSM_CAMERA + ---help--- + MICRON 5M Bayer Sensor with Autofocus + +config MSM_CAMERA_AF_FOXCONN + bool "FOXCONN Module" + depends on MT9P012 + ---help--- + This driver supports FOXCONN AF module for 5M Bayer sensor + +config S5K3E2FX + bool "Sensor s5k3e2fx (Samsung 5M)" + depends on MSM_CAMERA + ---help--- + Samsung 5M with Autofocus + +config S5K6AAFX + bool "Sensor s5k6aafx (Samsung 1.3M)" + depends on MSM_CAMERA + default n + ---help--- + Samsung 1.3M without Autofocus + +config OV8810 + bool "Sensor ov8810" + depends on MSM_CAMERA + default n + ---help--- + OmniVision 8M Bayer Sensor + +config OV9665 + bool "Sensor ov9665" + depends on MSM_CAMERA + default n + ---help--- + OmniVision 3M YUV Sensor + +config S5K3H1GX + bool "Sensor s5k3h1gx" + depends on MSM_CAMERA + default n + ---help--- + Samsung 8M 3H1 MIPI AF Sensor + diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile new file mode 100644 index 0000000000000..26d74658d0a1b --- /dev/null +++ b/drivers/media/video/msm/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_MT9T013) += mt9t013.o mt9t013_reg.o +obj-$(CONFIG_MT9D112) += mt9d112.o mt9d112_reg.o +obj-$(CONFIG_OV9665) += ov9665.o ov9665_reg.o +obj-$(CONFIG_MT9P012) += mt9p012_fox.o mt9p012_reg.o +obj-$(CONFIG_MSM_CAMERA) += msm_camera.o +obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_v4l2.o +obj-$(CONFIG_S5K3E2FX) += s5k3e2fx.o +obj-$(CONFIG_S5K6AAFX) += s5k6aafx.o s5k6aafx_reg_mecha.o +obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o msm_io7x.o +obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o msm_io8x.o +obj-$(CONFIG_OV8810) += ov8810.o +obj-$(CONFIG_S5K3H1GX) += s5k3h1gx.o s5k3h1gx_reg.o diff --git a/drivers/media/video/msm/msm_camera.c b/drivers/media/video/msm/msm_camera.c new file mode 100644 index 0000000000000..aace48743b006 --- /dev/null +++ b/drivers/media/video/msm/msm_camera.c @@ -0,0 +1,2895 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +/* FIXME: management of mutexes */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +DEFINE_MUTEX(hlist_mut); + +#define MSM_MAX_CAMERA_SENSORS 5 + +#define ERR_USER_COPY(to) pr_err("%s(%d): copy %s user\n", \ + __func__, __LINE__, ((to) ? "to" : "from")) +#define ERR_COPY_FROM_USER() ERR_USER_COPY(0) +#define ERR_COPY_TO_USER() ERR_USER_COPY(1) + +static struct class *msm_class; +static dev_t msm_devno; +static LIST_HEAD(msm_sensors); + +#define __CONTAINS(r, v, l, field) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = __v >= __r->field && \ + __e <= __r->field + __r->len; \ + res; \ +}) + +#define CONTAINS(r1, r2, field) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->field, __r2->len, field); \ +}) + +#define IN_RANGE(r, v, field) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->field) && \ + (__vv < (__r->field + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2, field) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->field) __v = __r2->field; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v, field) || \ + IN_RANGE(__r1, __e, field)); \ + res; \ +}) + +static inline void free_qcmd(struct msm_queue_cmd *qcmd) +{ + if (!qcmd || !qcmd->on_heap) + return; + CDBG("%s qcmd->on_heap:%d\n",__func__,qcmd->on_heap); + if (!--qcmd->on_heap) + kfree(qcmd); +} + +static void msm_queue_init(struct msm_device_queue *queue, const char *name) +{ + spin_lock_init(&queue->lock); + queue->len = 0; + queue->max = 0; + queue->name = name; + INIT_LIST_HEAD(&queue->list); + init_waitqueue_head(&queue->wait); +} + +static void msm_enqueue(struct msm_device_queue *queue, + struct list_head *entry) +{ + unsigned long flags; + spin_lock_irqsave(&queue->lock, flags); + queue->len++; + + if (queue->len > queue->max) { + queue->max = queue->len; +#if 0 + pr_info("%s: queue %s new max is %d\n", __func__, + queue->name, queue->max); +#else + if(queue->max < 1024) + pr_info("%s: queue %s new max is %d\n", __func__, + queue->name, queue->max); +#endif + } + list_add_tail(entry, &queue->list); + wake_up(&queue->wait); + CDBG("%s: woke up %s\n", __func__, queue->name); + spin_unlock_irqrestore(&queue->lock, flags); +} + +#define msm_dequeue(queue, member) ({ \ + unsigned long flags; \ + struct msm_device_queue *__q = (queue); \ + struct msm_queue_cmd *qcmd = 0; \ + spin_lock_irqsave(&__q->lock, flags); \ + if (!list_empty(&__q->list)) { \ + __q->len--; \ + qcmd = list_first_entry(&__q->list, \ + struct msm_queue_cmd, member); \ + if (qcmd) { \ + list_del_init(&qcmd->member); \ + } \ + } \ + spin_unlock_irqrestore(&__q->lock, flags); \ + qcmd; \ +}) + + +#define msm_queue_drain(queue, member) do { \ + unsigned long flags; \ + struct msm_device_queue *__q = (queue); \ + struct msm_queue_cmd *qcmd; \ + spin_lock_irqsave(&__q->lock, flags); \ + CDBG("%s: draining queue %s\n", __func__, __q->name); \ + while (!list_empty(&__q->list)) { \ + qcmd = list_first_entry(&__q->list, \ + struct msm_queue_cmd, member); \ + list_del_init(&qcmd->member); \ + free_qcmd(qcmd); \ + }; \ + __q->len = 0; \ + spin_unlock_irqrestore(&__q->lock, flags); \ +} while(0) + +static int check_overlap(struct hlist_head *ptype, + unsigned long paddr, + unsigned long len) +{ + struct msm_pmem_region *region; + struct msm_pmem_region t = { .paddr = paddr, .len = len }; + struct hlist_node *node; + + hlist_for_each_entry(region, node, ptype, list) { + if (CONTAINS(region, &t, paddr) || + CONTAINS(&t, region, paddr) || + OVERLAPS(region, &t, paddr)) { + printk(KERN_ERR + " region (PHYS %p len %ld)" + " clashes with registered region" + " (paddr %p len %ld)\n", + (void *)t.paddr, t.len, + (void *)region->paddr, region->len); + return -1; + } + } + + return 0; +} + +static int check_pmem_info(struct msm_pmem_info *info, int len) +{ + if (info->offset & (PAGE_SIZE - 1)) { + pr_err("%s: pmem offset is not page-aligned\n", __func__); + goto error; + } + + if (info->offset < len && + info->offset + info->len <= len && + info->y_off < len && + info->cbcr_off < len) + return 0; + +error: + pr_err("%s: check failed: off %d len %d y %d cbcr %d (total len %d)\n", + __func__, + info->offset, + info->len, + info->y_off, + info->cbcr_off, + len); + return -EINVAL; +} + +static int msm_pmem_table_add(struct hlist_head *ptype, + struct msm_pmem_info *info) +{ + struct file *file; + unsigned long paddr; + unsigned long kvstart; + unsigned long len; + int rc; + struct msm_pmem_region *region; + + rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file); + if (rc < 0) { + pr_err("%s: get_pmem_file fd %d error %d\n", + __func__, + info->fd, rc); + return rc; + } + + if (!info->len) + info->len = len; + + rc = check_pmem_info(info, len); + if (rc < 0) + return rc; + + paddr += info->offset; + kvstart += info->offset; + len = info->len; + + if (check_overlap(ptype, paddr, len) < 0) + return -EINVAL; + + CDBG("%s: type %d, paddr 0x%lx, vaddr 0x%lx\n", + __func__, + info->type, paddr, (unsigned long)info->vaddr); + + region = kzalloc(sizeof(struct msm_pmem_region), GFP_KERNEL); + if (!region) + return -ENOMEM; + + INIT_HLIST_NODE(®ion->list); + + region->paddr = paddr; + region->kvaddr = kvstart; + region->len = len; + region->file = file; + memcpy(®ion->info, info, sizeof(region->info)); + + hlist_add_head(&(region->list), ptype); + + return 0; +} + +/* return of 0 means failure */ +static uint8_t msm_pmem_region_lookup(struct hlist_head *ptype, + int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount) +{ + struct msm_pmem_region *region; + struct msm_pmem_region *regptr; + struct hlist_node *node, *n; + + uint8_t rc = 0; + + regptr = reg; + mutex_lock(&hlist_mut); + hlist_for_each_entry_safe(region, node, n, ptype, list) { + if (region->info.type == pmem_type && + region->info.vfe_can_write) { + *regptr = *region; + rc += 1; + if (rc >= maxcount) + break; + regptr++; + } + } + mutex_unlock(&hlist_mut); + return rc; +} + +static int msm_pmem_frame_ptov_lookup(struct msm_sync *sync, + unsigned long pyaddr, unsigned long pcbcraddr, + struct msm_pmem_region **pmem_region, + int take_from_vfe) +{ + struct hlist_node *node, *n; + struct msm_pmem_region *region; + + hlist_for_each_entry_safe(region, node, n, &sync->pmem_frames, list) { + if (pyaddr == (region->paddr + region->info.y_off) && +#ifndef CONFIG_ARCH_MSM7225 + pcbcraddr == (region->paddr + + region->info.cbcr_off) && +#endif + region->info.vfe_can_write) { + *pmem_region = region; + region->info.vfe_can_write = !take_from_vfe; +#ifdef CONFIG_ARCH_MSM7225 + if (pcbcraddr != (region->paddr + region->info.cbcr_off)) { + pr_err("%s cbcr addr = %lx, NOT EQUAL to region->paddr + region->info.cbcr_off = %lx\n", + __func__, pcbcraddr, region->paddr + region->info.cbcr_off); + } +#endif + return 0; + } + } + + return -EINVAL; +} + +static unsigned long msm_pmem_stats_ptov_lookup(struct msm_sync *sync, + unsigned long addr, int *fd) +{ + struct msm_pmem_region *region; + struct hlist_node *node, *n; + + hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) { + if (addr == region->paddr && region->info.vfe_can_write) { + /* offset since we could pass vaddr inside a + * registered pmem buffer */ + *fd = region->info.fd; + region->info.vfe_can_write = 0; + return (unsigned long)(region->info.vaddr); + } + } +#if 1 + printk("msm_pmem_stats_ptov_lookup: lookup vaddr..\n"); + hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) { + if (addr == (unsigned long)(region->info.vaddr)) { + /* offset since we could pass vaddr inside a + * registered pmem buffer */ + *fd = region->info.fd; + region->info.vfe_can_write = 0; + return (unsigned long)(region->info.vaddr); + } + } +#endif + return 0; +} + +static unsigned long msm_pmem_frame_vtop_lookup(struct msm_sync *sync, + unsigned long buffer, + uint32_t yoff, uint32_t cbcroff, int fd) +{ + struct msm_pmem_region *region; + struct hlist_node *node, *n; + + hlist_for_each_entry_safe(region, + node, n, &sync->pmem_frames, list) { + if (((unsigned long)(region->info.vaddr) == buffer) && + (region->info.y_off == yoff) && + (region->info.cbcr_off == cbcroff) && + (region->info.fd == fd) && + (region->info.vfe_can_write == 0)) { + region->info.vfe_can_write = 1; + return region->paddr; + } + } + + return 0; +} + +static unsigned long msm_pmem_stats_vtop_lookup( + struct msm_sync *sync, + unsigned long buffer, + int fd) +{ + struct msm_pmem_region *region; + struct hlist_node *node, *n; + + hlist_for_each_entry_safe(region, node, n, &sync->pmem_stats, list) { + if (((unsigned long)(region->info.vaddr) == buffer) && + (region->info.fd == fd) && + region->info.vfe_can_write == 0) { + region->info.vfe_can_write = 1; + return region->paddr; + } + } + + return 0; +} + +static int __msm_pmem_table_del(struct msm_sync *sync, + struct msm_pmem_info *pinfo) +{ + int rc = 0; + struct msm_pmem_region *region; + struct hlist_node *node, *n; + + switch (pinfo->type) { +#ifndef CONFIG_720P_CAMERA + case MSM_PMEM_OUTPUT1: + case MSM_PMEM_OUTPUT2: +#else + case MSM_PMEM_VIDEO: + case MSM_PMEM_PREVIEW: +#endif + case MSM_PMEM_THUMBNAIL: + case MSM_PMEM_MAINIMG: + case MSM_PMEM_RAW_MAINIMG: + hlist_for_each_entry_safe(region, node, n, + &sync->pmem_frames, list) { + + if (pinfo->type == region->info.type && + pinfo->vaddr == region->info.vaddr && + pinfo->fd == region->info.fd) { + hlist_del(node); + put_pmem_file(region->file); + kfree(region); + } + } + break; + + case MSM_PMEM_AEC_AWB: + case MSM_PMEM_AF: + hlist_for_each_entry_safe(region, node, n, + &sync->pmem_stats, list) { + + if (pinfo->type == region->info.type && + pinfo->vaddr == region->info.vaddr && + pinfo->fd == region->info.fd) { + hlist_del(node); + put_pmem_file(region->file); + kfree(region); + } + } + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int msm_pmem_table_del(struct msm_sync *sync, void __user *arg) +{ + struct msm_pmem_info info; + + if (copy_from_user(&info, arg, sizeof(info))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + return __msm_pmem_table_del(sync, &info); +} + +static int __msm_get_frame(struct msm_sync *sync, + struct msm_frame *frame) +{ + int rc = 0; + + struct msm_pmem_region *region; + struct msm_queue_cmd *qcmd = NULL; + struct msm_vfe_resp *vdata; + struct msm_vfe_phy_info *pphy; + + if (&sync->frame_q) { + qcmd = msm_dequeue(&sync->frame_q, list_frame); + } + + if (!qcmd) { + pr_err("%s: no preview frame.\n", __func__); + return -EAGAIN; + } + + vdata = (struct msm_vfe_resp *)(qcmd->command); + pphy = &vdata->phy; + + rc = msm_pmem_frame_ptov_lookup(sync, + pphy->y_phy, + pphy->cbcr_phy, + ®ion, + 1); /* give frame to user space */ + + if (rc < 0) { + pr_err("%s: cannot get frame, invalid lookup address " + "y %x cbcr %x\n", + __func__, + pphy->y_phy, + pphy->cbcr_phy); + goto err; + } + + frame->buffer = (unsigned long)region->info.vaddr; + frame->y_off = region->info.y_off; + frame->cbcr_off = region->info.cbcr_off; + frame->fd = region->info.fd; + frame->path = vdata->phy.output_id; + CDBG("%s: y %x, cbcr %x, qcmd %x, virt_addr %x\n", + __func__, + pphy->y_phy, pphy->cbcr_phy, (int) qcmd, (int) frame->buffer); + +err: + free_qcmd(qcmd); + return rc; +} + +static int msm_get_frame(struct msm_sync *sync, void __user *arg) +{ + int rc = 0; + struct msm_frame frame; + + if (copy_from_user(&frame, + arg, + sizeof(struct msm_frame))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + rc = __msm_get_frame(sync, &frame); + if (rc < 0) + return rc; + + if (sync->croplen) { + if (frame.croplen != sync->croplen) { + pr_err("%s: invalid frame croplen %d," + "expecting %d\n", + __func__, + frame.croplen, + sync->croplen); + return -EINVAL; + } + + if (copy_to_user((void *)frame.cropinfo, + sync->cropinfo, + sync->croplen)) { + ERR_COPY_TO_USER(); + return -EFAULT; + } + } + + if (copy_to_user((void *)arg, + &frame, sizeof(struct msm_frame))) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + } + + CDBG("%s: got frame\n", __func__); + + return rc; +} + +static int msm_enable_vfe(struct msm_sync *sync, void __user *arg) +{ + int rc = -EIO; + struct camera_enable_cmd cfg; + + if (copy_from_user(&cfg, + arg, + sizeof(struct camera_enable_cmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + if (sync->vfefn.vfe_enable) + rc = sync->vfefn.vfe_enable(&cfg); + + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} + +static int msm_disable_vfe(struct msm_sync *sync, void __user *arg) +{ + int rc = -EIO; + struct camera_enable_cmd cfg; + + if (copy_from_user(&cfg, + arg, + sizeof(struct camera_enable_cmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + if (sync->vfefn.vfe_disable) + rc = sync->vfefn.vfe_disable(&cfg, NULL); + + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} + +static struct msm_queue_cmd *__msm_control(struct msm_sync *sync, + struct msm_device_queue *queue, + struct msm_queue_cmd *qcmd, + int timeout) +{ + int rc; + + msm_enqueue(&sync->event_q, &qcmd->list_config); + + if (!queue) + return NULL; + + /* wait for config status */ + rc = wait_event_interruptible_timeout( + queue->wait, + !list_empty_careful(&queue->list), + timeout); + if (list_empty_careful(&queue->list)) { + if (!rc) + rc = -ETIMEDOUT; + if (rc < 0) { + pr_err("%s: wait_event error %d\n", __func__, rc); + /* qcmd may be still on the event_q, in which case we + * need to remove it. Alternatively, qcmd may have + * been dequeued and processed already, in which case + * the list removal will be a no-op. + */ + list_del_init(&qcmd->list_config); + return ERR_PTR(rc); + } + } + + qcmd = msm_dequeue(queue, list_control); + BUG_ON(!qcmd); + + return qcmd; +} + +static struct msm_queue_cmd *__msm_control_nb(struct msm_sync *sync, + struct msm_queue_cmd *qcmd_to_copy) +{ + /* Since this is a non-blocking command, we cannot use qcmd_to_copy and + * its data, since they are on the stack. We replicate them on the heap + * and mark them on_heap so that they get freed when the config thread + * dequeues them. + */ + + struct msm_ctrl_cmd *udata; + struct msm_ctrl_cmd *udata_to_copy = qcmd_to_copy->command; + + struct msm_queue_cmd *qcmd = + kzalloc(sizeof(*qcmd_to_copy) + + sizeof(*udata_to_copy) + + udata_to_copy->length, + GFP_KERNEL); + if (!qcmd) { + pr_err("%s: out of memory\n", __func__); + return ERR_PTR(-ENOMEM); + } + + *qcmd = *qcmd_to_copy; + udata = qcmd->command = qcmd + 1; + memcpy(udata, udata_to_copy, sizeof(*udata)); + udata->value = udata + 1; + memcpy(udata->value, udata_to_copy->value, udata_to_copy->length); + + qcmd->on_heap = 1; + + /* qcmd_resp will be set to NULL */ + return __msm_control(sync, NULL, qcmd, 0); +} + +static int msm_control(struct msm_control_device *ctrl_pmsm, + int block, + void __user *arg) +{ + int rc = 0; + + struct msm_sync *sync = ctrl_pmsm->pmsm->sync; + void __user *uptr; + struct msm_ctrl_cmd udata; + struct msm_queue_cmd qcmd; + struct msm_queue_cmd *qcmd_resp = NULL; + uint8_t data[50]; + + if (copy_from_user(&udata, arg, sizeof(struct msm_ctrl_cmd))) { + ERR_COPY_FROM_USER(); + rc = -EFAULT; + goto end; + } + + uptr = udata.value; + udata.value = data; + + qcmd.on_heap = 0; + qcmd.type = MSM_CAM_Q_CTRL; + qcmd.command = &udata; + + if (udata.length) { + if (udata.length > sizeof(data)) { + pr_err("%s: user data too large (%d, max is %d)\n", + __func__, + udata.length, + sizeof(data)); + rc = -EIO; + goto end; + } + if (copy_from_user(udata.value, uptr, udata.length)) { + ERR_COPY_FROM_USER(); + rc = -EFAULT; + goto end; + } + } + + if (unlikely(!block)) { + qcmd_resp = __msm_control_nb(sync, &qcmd); + goto end; + } + + qcmd_resp = __msm_control(sync, + &ctrl_pmsm->ctrl_q, + &qcmd, MAX_SCHEDULE_TIMEOUT); + + if (!qcmd_resp || IS_ERR(qcmd_resp)) { + /* Do not free qcmd_resp here. If the config thread read it, + * then it has already been freed, and we timed out because + * we did not receive a MSM_CAM_IOCTL_CTRL_CMD_DONE. If the + * config thread itself is blocked and not dequeueing commands, + * then it will either eventually unblock and process them, + * or when it is killed, qcmd will be freed in + * msm_release_config. + */ + rc = PTR_ERR(qcmd_resp); + qcmd_resp = NULL; + goto end; + } + + if (qcmd_resp->command) { + udata = *(struct msm_ctrl_cmd *)qcmd_resp->command; + if (udata.length > 0) { + if (copy_to_user(uptr, + udata.value, + udata.length)) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto end; + } + } + udata.value = uptr; + + if (copy_to_user((void *)arg, &udata, + sizeof(struct msm_ctrl_cmd))) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto end; + } + } + +end: + free_qcmd(qcmd_resp); + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} + +/* Divert frames for post-processing by delivering them to the config thread; + * when post-processing is done, it will return the frame to the frame thread. + */ +static int msm_divert_frame(struct msm_sync *sync, + struct msm_vfe_resp *data, + struct msm_stats_event_ctrl *se) +{ + struct msm_pmem_region *region; + struct msm_postproc buf; + int rc; + + pr_info("%s: preview PP sync->pp_mask %d\n", __func__, sync->pp_mask); + + if (!(sync->pp_mask & PP_PREV)) { + pr_err("%s: diverting preview frame but not in PP_PREV!\n", + __func__); + return -EINVAL; + } + + rc = msm_pmem_frame_ptov_lookup(sync, + data->phy.y_phy, + data->phy.cbcr_phy, + ®ion, + 0); /* vfe can still write to frame */ + if (rc < 0) { + CDBG("%s: msm_pmem_frame_ptov_lookup failed\n", __func__); + return rc; + } + + buf.fmain.buffer = (unsigned long)region->info.vaddr; + buf.fmain.y_off = region->info.y_off; + buf.fmain.cbcr_off = region->info.cbcr_off; + buf.fmain.fd = region->info.fd; + + CDBG("%s: buf %ld fd %d\n", + __func__, buf.fmain.buffer, + buf.fmain.fd); + if (copy_to_user((void *)(se->stats_event.data), + &(buf.fmain), + sizeof(struct msm_frame))) { + ERR_COPY_TO_USER(); + return -EFAULT; + } + + return 0; +} + +static int msm_divert_snapshot(struct msm_sync *sync, + struct msm_vfe_resp *data, + struct msm_stats_event_ctrl *se) +{ + struct msm_postproc buf; + struct msm_pmem_region region; + + CDBG("%s: preview PP sync->pp_mask %d\n", __func__, sync->pp_mask); + + if (!(sync->pp_mask & (PP_SNAP|PP_RAW_SNAP))) { + pr_err("%s: diverting snapshot but not in PP_SNAP!\n", + __func__); + return -EINVAL; + } + + memset(®ion, 0, sizeof(region)); + buf.fmnum = msm_pmem_region_lookup(&sync->pmem_frames, + MSM_PMEM_MAINIMG, + ®ion, 1); + if (buf.fmnum == 1) { + buf.fmain.buffer = (uint32_t)region.info.vaddr; + buf.fmain.y_off = region.info.y_off; + buf.fmain.cbcr_off = region.info.cbcr_off; + buf.fmain.fd = region.info.fd; + } else { + if (buf.fmnum > 1) + pr_err("%s: MSM_PMEM_MAINIMG lookup found %d\n", + __func__, buf.fmnum); + buf.fmnum = msm_pmem_region_lookup(&sync->pmem_frames, + MSM_PMEM_RAW_MAINIMG, + ®ion, 1); + if (buf.fmnum == 1) { + buf.fmain.path = MSM_FRAME_PREV_2; + buf.fmain.buffer = (uint32_t)region.info.vaddr; + buf.fmain.fd = region.info.fd; + } else { + pr_err("%s: pmem lookup fail (found %d)\n", + __func__, buf.fmnum); + return -EIO; + } + } + + CDBG("%s: snapshot copy_to_user!\n", __func__); + if (copy_to_user((void *)(se->stats_event.data), &buf, sizeof(buf))) { + ERR_COPY_TO_USER(); + return -EFAULT; + } + + return 0; +} + +static int msm_get_stats(struct msm_sync *sync, void __user *arg) +{ + int timeout; + int rc = 0; + + struct msm_stats_event_ctrl se; + + struct msm_queue_cmd *qcmd = NULL; + struct msm_ctrl_cmd *ctrl = NULL; + struct msm_vfe_resp *data = NULL; + struct msm_stats_buf stats; + + if (copy_from_user(&se, arg, + sizeof(struct msm_stats_event_ctrl))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + timeout = (int)se.timeout_ms; + + CDBG("%s: timeout %d\n", __func__, timeout); + rc = wait_event_interruptible_timeout( + sync->event_q.wait, + !list_empty_careful(&sync->event_q.list), + msecs_to_jiffies(timeout)); + if (list_empty_careful(&sync->event_q.list)) { + if (rc == 0) + rc = -ETIMEDOUT; + if (rc < 0) { + pr_err("\n%s: error %d\n", __func__, rc); + return rc; + } + } + CDBG("%s: returned from wait: %d\n", __func__, rc); + + rc = 0; + + qcmd = msm_dequeue(&sync->event_q, list_config); + BUG_ON(!qcmd); + + /* HTC: check qcmd */ + if (!qcmd) { + rc = -EFAULT; + pr_err("%s: qcmd is NULL, rc %d\n", __func__, rc); + return rc; + } + + CDBG("%s: received from DSP %d\n", __func__, qcmd->type); + + switch (qcmd->type) { + case MSM_CAM_Q_VFE_EVT: + case MSM_CAM_Q_VFE_MSG: + data = (struct msm_vfe_resp *)(qcmd->command); + + /* adsp event and message */ + se.resptype = MSM_CAM_RESP_STAT_EVT_MSG; + + /* 0 - msg from aDSP, 1 - event from mARM */ + se.stats_event.type = data->evt_msg.type; + se.stats_event.msg_id = data->evt_msg.msg_id; + se.stats_event.len = data->evt_msg.len; + + CDBG("%s: qcmd->type %d length %d msd_id %d\n", __func__, + qcmd->type, + se.stats_event.len, + se.stats_event.msg_id); + + if ((data->type == VFE_MSG_STATS_AF) || + (data->type == VFE_MSG_STATS_WE)) { + + stats.buffer = + msm_pmem_stats_ptov_lookup(sync, + data->phy.sbuf_phy, + &(stats.fd)); + if (!stats.buffer) { + pr_err("%s: msm_pmem_stats_ptov_lookup error, addr = %x\n", + __func__,data->phy.sbuf_phy); +#if 1 + se.resptype = MSM_CAM_RESP_MAX; +#else + rc = -EINVAL; + goto failure; +#endif + } + + if (copy_to_user((void *)(se.stats_event.data), + &stats, + sizeof(struct msm_stats_buf))) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto failure; + } + } else if ((data->evt_msg.len > 0) && + (data->type == VFE_MSG_GENERAL)) { + if (copy_to_user((void *)(se.stats_event.data), + data->evt_msg.data, + data->evt_msg.len)) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto failure; + } + } else { +#ifndef CONFIG_720P_CAMERA + if ((sync->pp_mask & PP_PREV) && + (data->type == VFE_MSG_OUTPUT1 || + data->type == VFE_MSG_OUTPUT2)) + rc = msm_divert_frame(sync, data, &se); + else if ((sync->pp_mask & (PP_SNAP|PP_RAW_SNAP)) && + data->type == VFE_MSG_SNAPSHOT) + rc = msm_divert_snapshot(sync, + data, &se); +#else + if ((sync->pp_mask & PP_PREV) && + (data->type == VFE_MSG_OUTPUT_P)) + rc = msm_divert_frame(sync, data, &se); + else if ((sync->pp_mask & (PP_SNAP|PP_RAW_SNAP)) && + (data->type == VFE_MSG_SNAPSHOT || + data->type == VFE_MSG_OUTPUT_T || + data->type == VFE_MSG_OUTPUT_S)) + rc = msm_divert_snapshot(sync, + data, &se); +#endif + } + break; + + case MSM_CAM_Q_CTRL: + /* control command from control thread */ + ctrl = (struct msm_ctrl_cmd *)(qcmd->command); + + CDBG("%s: qcmd->type %d length %d\n", __func__, + qcmd->type, ctrl->length); + + if (ctrl->length > 0) { + if (copy_to_user((void *)(se.ctrl_cmd.value), + ctrl->value, + ctrl->length)) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto failure; + } + } + + se.resptype = MSM_CAM_RESP_CTRL; + + /* what to control */ + se.ctrl_cmd.type = ctrl->type; + se.ctrl_cmd.length = ctrl->length; + se.ctrl_cmd.resp_fd = ctrl->resp_fd; + break; + +#ifdef CONFIG_MSM_CAMERA_V4L2 + case MSM_CAM_Q_V4L2_REQ: + /* control command from v4l2 client */ + ctrl = (struct msm_ctrl_cmd *)(qcmd->command); + + CDBG("%s: qcmd->type %d len %d\n", __func__, qcmd->type, ctrl->length); + + if (ctrl->length > 0) { + if (copy_to_user((void *)(se.ctrl_cmd.value), + ctrl->value, ctrl->length)) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto failure; + } + } + + /* 2 tells config thread this is v4l2 request */ + se.resptype = MSM_CAM_RESP_V4L2; + + /* what to control */ + se.ctrl_cmd.type = ctrl->type; + se.ctrl_cmd.length = ctrl->length; + break; +#endif + + default: + rc = -EFAULT; + goto failure; + } /* switch qcmd->type */ + + if (copy_to_user((void *)arg, &se, sizeof(se))) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + goto failure; + } + +failure: + free_qcmd(qcmd); + + CDBG("%s: %d\n", __func__, rc); + return rc; +} + +static int msm_ctrl_cmd_done(struct msm_control_device *ctrl_pmsm, + void __user *arg) +{ + void __user *uptr; + struct msm_queue_cmd *qcmd = &ctrl_pmsm->qcmd; + struct msm_ctrl_cmd *command = &ctrl_pmsm->ctrl; + + if (copy_from_user(command, arg, sizeof(*command))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + qcmd->on_heap = 0; + qcmd->command = command; + uptr = command->value; + + if (command->length > 0) { + command->value = ctrl_pmsm->ctrl_data; + if (command->length > sizeof(ctrl_pmsm->ctrl_data)) { + pr_err("%s: user data %d is too big (max %d)\n", + __func__, command->length, + sizeof(ctrl_pmsm->ctrl_data)); + return -EINVAL; + } + if (copy_from_user(command->value, + uptr, + command->length)) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + } else + command->value = NULL; + + CDBG("%s: end\n", __func__); + + /* wake up control thread */ + msm_enqueue(&ctrl_pmsm->ctrl_q, &qcmd->list_control); + + return 0; +} + +static int msm_config_vfe(struct msm_sync *sync, void __user *arg) +{ + struct msm_vfe_cfg_cmd cfgcmd; + struct msm_pmem_region region[8]; + struct axidata axi_data; + + if (!sync->vfefn.vfe_config) { + pr_err("%s: no vfe_config!\n", __func__); + return -EIO; + } + + if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + memset(&axi_data, 0, sizeof(axi_data)); + + CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type); + + switch (cfgcmd.cmd_type) { + case CMD_STATS_ENABLE: + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_stats, + MSM_PMEM_AEC_AWB, ®ion[0], + NUM_WB_EXP_STAT_OUTPUT_BUFFERS); + + /* HTC: check axi_data.bufnum1 if out of bound of "region" array */ + if (!axi_data.bufnum1 || axi_data.bufnum1 >= + (sizeof(region)/sizeof(struct msm_pmem_region))) { + pr_err("%s %d: pmem region lookup error or out of bound\n", + __func__, __LINE__); + return -EINVAL; + } + + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_stats, + MSM_PMEM_AF, ®ion[axi_data.bufnum1], + NUM_AF_STAT_OUTPUT_BUFFERS); + if (!axi_data.bufnum1 || !axi_data.bufnum2) { + pr_err("%s: pmem region lookup error\n", __func__); + return -EINVAL; + } + axi_data.region = ®ion[0]; + return sync->vfefn.vfe_config(&cfgcmd, &axi_data); + case CMD_STATS_AF_ENABLE: + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_stats, + MSM_PMEM_AF, ®ion[0], + NUM_AF_STAT_OUTPUT_BUFFERS); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + axi_data.region = ®ion[0]; + return sync->vfefn.vfe_config(&cfgcmd, &axi_data); + + case CMD_STATS_AEC_AWB_ENABLE: + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_stats, + MSM_PMEM_AEC_AWB, ®ion[0], + NUM_WB_EXP_STAT_OUTPUT_BUFFERS); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + axi_data.region = ®ion[0]; + return sync->vfefn.vfe_config(&cfgcmd, &axi_data); + case CMD_GENERAL: + case CMD_STATS_DISABLE: + return sync->vfefn.vfe_config(&cfgcmd, NULL); + default: + pr_err("%s: unknown command type %d\n", + __func__, cfgcmd.cmd_type); + } + + return -EINVAL; +} + +static int msm_frame_axi_cfg(struct msm_sync *sync, + struct msm_vfe_cfg_cmd *cfgcmd) +{ + int rc = -EIO; + struct axidata axi_data; + void *data = &axi_data; + struct msm_pmem_region region[8]; + int pmem_type; + + memset(&axi_data, 0, sizeof(axi_data)); + + switch (cfgcmd->cmd_type) { + +#ifndef CONFIG_720P_CAMERA + case CMD_AXI_CFG_OUT1: + pmem_type = MSM_PMEM_OUTPUT1; + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + break; + + case CMD_AXI_CFG_OUT2: + pmem_type = MSM_PMEM_OUTPUT2; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error (empty %d)\n", + __func__, __LINE__, + hlist_empty(&sync->pmem_frames)); + return -EINVAL; + } + break; + + case CMD_AXI_CFG_SNAP_O1_AND_O2: + pmem_type = MSM_PMEM_THUMBNAIL; + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + + /* HTC: check axi_data.bufnum1 if out of bound of "region" array */ + if (!axi_data.bufnum1 || axi_data.bufnum1 >= + (sizeof(region)/sizeof(struct msm_pmem_region))) { + pr_err("%s %d: pmem region lookup error or out of bound\n", + __func__, __LINE__); + return -EINVAL; + } + + pmem_type = MSM_PMEM_MAINIMG; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[axi_data.bufnum1], 8); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + break; +#else + case CMD_AXI_CFG_PREVIEW: + pmem_type = MSM_PMEM_PREVIEW; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error (empty %d)\n", + __func__, __LINE__, + hlist_empty(&sync->pmem_frames)); + return -EINVAL; + } + break; + + case CMD_AXI_CFG_VIDEO: + pmem_type = MSM_PMEM_PREVIEW; + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + + pmem_type = MSM_PMEM_VIDEO; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[axi_data.bufnum1], + (8-(axi_data.bufnum1))); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + break; + + + case CMD_AXI_CFG_SNAP: + pmem_type = MSM_PMEM_THUMBNAIL; + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + + pmem_type = MSM_PMEM_MAINIMG; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[axi_data.bufnum1], + (8-(axi_data.bufnum1))); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + break; +#endif + case CMD_RAW_PICT_AXI_CFG: + pmem_type = MSM_PMEM_RAW_MAINIMG; + axi_data.bufnum2 = + msm_pmem_region_lookup(&sync->pmem_frames, pmem_type, + ®ion[0], 8); + if (!axi_data.bufnum2) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + break; + + case CMD_GENERAL: + data = NULL; + break; + + default: + pr_err("%s: unknown command type %d\n", + __func__, cfgcmd->cmd_type); + return -EINVAL; + } + + axi_data.region = ®ion[0]; + + /* send the AXI configuration command to driver */ + + if (sync->vfefn.vfe_config) + rc = sync->vfefn.vfe_config(cfgcmd, data); + + return rc; +} + +static int msm_get_sensor_info(struct msm_sync *sync, void __user *arg) +{ + int rc = 0; + struct msm_camsensor_info info; + struct msm_camera_sensor_info *sdata; + + if (copy_from_user(&info, + arg, + sizeof(struct msm_camsensor_info))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + sdata = sync->pdev->dev.platform_data; + CDBG("%s: sensor_name %s\n", __func__, sdata->sensor_name); + + memcpy(&info.name[0], + sdata->sensor_name, + MAX_SENSOR_NAME); + info.flash_enabled = !!sdata->flash_cfg; + + /* copy back to user space */ + if (copy_to_user((void *)arg, + &info, + sizeof(struct msm_camsensor_info))) { + ERR_COPY_TO_USER(); + rc = -EFAULT; + } + + return rc; +} + +static int __msm_put_frame_buf(struct msm_sync *sync, + struct msm_frame *pb) +{ + unsigned long pphy; + struct msm_vfe_cfg_cmd cfgcmd; + + int rc = -EIO; + + pphy = msm_pmem_frame_vtop_lookup(sync, + pb->buffer, + pb->y_off, pb->cbcr_off, pb->fd); + + if (pphy != 0) { + CDBG("%s: rel: vaddr %lx, paddr %lx\n", + __func__, + pb->buffer, pphy); + cfgcmd.cmd_type = CMD_FRAME_BUF_RELEASE; + cfgcmd.value = (void *)pb; + if (sync->vfefn.vfe_config) { + rc = sync->vfefn.vfe_config(&cfgcmd, &pphy); + } + } else { + pr_err("%s: msm_pmem_frame_vtop_lookup failed\n", + __func__); + rc = -EINVAL; + } + + return rc; +} + +static int msm_put_frame_buffer(struct msm_sync *sync, void __user *arg) +{ + struct msm_frame buf; + + if (copy_from_user(&buf, + arg, + sizeof(struct msm_frame))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + return __msm_put_frame_buf(sync, &buf); +} + +static int __msm_register_pmem(struct msm_sync *sync, + struct msm_pmem_info *pinfo) +{ + int rc = 0; + + switch (pinfo->type) { +#ifndef CONFIG_720P_CAMERA + case MSM_PMEM_OUTPUT1: + case MSM_PMEM_OUTPUT2: +#else + case MSM_PMEM_VIDEO: + case MSM_PMEM_PREVIEW: +#endif + case MSM_PMEM_THUMBNAIL: + case MSM_PMEM_MAINIMG: + case MSM_PMEM_RAW_MAINIMG: + rc = msm_pmem_table_add(&sync->pmem_frames, pinfo); + break; + + case MSM_PMEM_AEC_AWB: + case MSM_PMEM_AF: + rc = msm_pmem_table_add(&sync->pmem_stats, pinfo); + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int msm_register_pmem(struct msm_sync *sync, void __user *arg) +{ + struct msm_pmem_info info; + + if (copy_from_user(&info, arg, sizeof(info))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + return __msm_register_pmem(sync, &info); +} + +static int msm_stats_axi_cfg(struct msm_sync *sync, + struct msm_vfe_cfg_cmd *cfgcmd) +{ + int rc = -EIO; + struct axidata axi_data; + void *data = &axi_data; + + struct msm_pmem_region region[3]; + int pmem_type = MSM_PMEM_MAX; + + memset(&axi_data, 0, sizeof(axi_data)); + + switch (cfgcmd->cmd_type) { + case CMD_STATS_AXI_CFG: + pmem_type = MSM_PMEM_AEC_AWB; + break; + case CMD_STATS_AF_AXI_CFG: + pmem_type = MSM_PMEM_AF; + break; + case CMD_GENERAL: + data = NULL; + break; + default: + pr_err("%s: unknown command type %d\n", + __func__, cfgcmd->cmd_type); + return -EINVAL; + } + + if (cfgcmd->cmd_type != CMD_GENERAL) { + axi_data.bufnum1 = + msm_pmem_region_lookup(&sync->pmem_stats, pmem_type, + ®ion[0], NUM_WB_EXP_STAT_OUTPUT_BUFFERS); + if (!axi_data.bufnum1) { + pr_err("%s %d: pmem region lookup error\n", + __func__, __LINE__); + return -EINVAL; + } + axi_data.region = ®ion[0]; + } + + /* send the AEC/AWB STATS configuration command to driver */ + + if (sync->vfefn.vfe_config) + rc = sync->vfefn.vfe_config(cfgcmd, &axi_data); + + return rc; +} + +static int msm_put_stats_buffer(struct msm_sync *sync, void __user *arg) +{ + int rc = -EIO; + + struct msm_stats_buf buf; + unsigned long pphy; + struct msm_vfe_cfg_cmd cfgcmd; + + if (copy_from_user(&buf, arg, + sizeof(struct msm_stats_buf))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + CDBG("%s\n", __func__); + pphy = msm_pmem_stats_vtop_lookup(sync, buf.buffer, buf.fd); + + if (pphy != 0) { + if (buf.type == STAT_AEAW) + cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE; + else if (buf.type == STAT_AF) + cfgcmd.cmd_type = CMD_STATS_AF_BUF_RELEASE; + else { + pr_err("%s: invalid buf type %d\n", + __func__, + buf.type); + rc = -EINVAL; + goto put_done; + } + + cfgcmd.value = (void *)&buf; + if (sync->vfefn.vfe_config) { + rc = sync->vfefn.vfe_config(&cfgcmd, &pphy); + if (rc < 0) + pr_err("%s: vfe_config error %d\n", + __func__, rc); + } else + pr_err("%s: vfe_config is NULL\n", __func__); + } else { + pr_err("%s: NULL physical address\n", __func__); + rc = -EINVAL; + } + +put_done: + return rc; +} + +static int msm_axi_config(struct msm_sync *sync, void __user *arg) +{ + struct msm_vfe_cfg_cmd cfgcmd; + + if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + switch (cfgcmd.cmd_type) { +#ifndef CONFIG_720P_CAMERA + case CMD_AXI_CFG_OUT1: + case CMD_AXI_CFG_OUT2: + case CMD_AXI_CFG_SNAP_O1_AND_O2: +#else + case CMD_AXI_CFG_VIDEO: + case CMD_AXI_CFG_PREVIEW: + case CMD_AXI_CFG_SNAP: +#endif + case CMD_RAW_PICT_AXI_CFG: + return msm_frame_axi_cfg(sync, &cfgcmd); + + case CMD_STATS_AXI_CFG: + case CMD_STATS_AF_AXI_CFG: + return msm_stats_axi_cfg(sync, &cfgcmd); + + default: + pr_err("%s: unknown command type %d\n", + __func__, + cfgcmd.cmd_type); + return -EINVAL; + } + + return 0; +} + +static int __msm_get_pic(struct msm_sync *sync, struct msm_ctrl_cmd *ctrl) +{ + int rc = 0; + int tm; + + struct msm_queue_cmd *qcmd = NULL; + + tm = (int)ctrl->timeout_ms; + + rc = wait_event_interruptible_timeout( + sync->pict_q.wait, + !list_empty_careful(&sync->pict_q.list), + msecs_to_jiffies(tm)); + if (list_empty_careful(&sync->pict_q.list)) { + if (rc == 0) + return -ETIMEDOUT; + if (rc < 0) { + pr_err("%s: rc %d\n", __func__, rc); + return rc; + } + } + + rc = 0; + + qcmd = msm_dequeue(&sync->pict_q, list_pict); + BUG_ON(!qcmd); + + /* HTC: check qcmd */ + if (!qcmd) { + rc = -EFAULT; + pr_err("%s: qcmd is NULL, rc %d\n", __func__, rc); + return rc; + } + + if (qcmd->command != NULL) { + struct msm_ctrl_cmd *q = + (struct msm_ctrl_cmd *)qcmd->command; + ctrl->type = q->type; + ctrl->status = q->status; + } else { + ctrl->type = -1; + ctrl->status = -1; + } + + free_qcmd(qcmd); + + return rc; +} + +static int msm_get_pic(struct msm_sync *sync, void __user *arg) +{ + struct msm_ctrl_cmd ctrlcmd; + struct msm_pmem_region pic_pmem_region; + int rc; + unsigned long end; + int cline_mask; + + if (copy_from_user(&ctrlcmd, + arg, + sizeof(struct msm_ctrl_cmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + rc = __msm_get_pic(sync, &ctrlcmd); + if (rc < 0) + return rc; + + if (sync->croplen) { + if (ctrlcmd.length != sync->croplen) { + pr_err("%s: invalid len %d < %d\n", + __func__, + ctrlcmd.length, + sync->croplen); + return -EINVAL; + } + if (copy_to_user(ctrlcmd.value, + sync->cropinfo, + sync->croplen)) { + ERR_COPY_TO_USER(); + return -EFAULT; + } + } + + if (msm_pmem_region_lookup(&sync->pmem_frames, + MSM_PMEM_MAINIMG, + &pic_pmem_region, 1) == 0) { + pr_err("%s pmem region lookup error\n", __func__); + pr_info("%s probably getting RAW\n", __func__); + if (msm_pmem_region_lookup(&sync->pmem_frames, + MSM_PMEM_RAW_MAINIMG, + &pic_pmem_region, 1) == 0) { + pr_err("%s RAW pmem region lookup error\n", __func__); + return -EIO; + } + } + + cline_mask = cache_line_size() - 1; + end = pic_pmem_region.kvaddr + pic_pmem_region.len; + end = (end + cline_mask) & ~cline_mask; + + pr_info("%s: flushing cache for [%08lx, %08lx)\n", + __func__, + pic_pmem_region.kvaddr, end); + + /* HACK: Invalidate buffer */ + dmac_unmap_area((void*)pic_pmem_region.kvaddr, pic_pmem_region.len, + DMA_FROM_DEVICE); + pic_pmem_region.info.vfe_can_write = 0; + + CDBG("%s: copy snapshot frame to user\n", __func__); + if (copy_to_user((void *)arg, + &ctrlcmd, + sizeof(struct msm_ctrl_cmd))) { + ERR_COPY_TO_USER(); + return -EFAULT; + } + return 0; +} + +static int msm_set_crop(struct msm_sync *sync, void __user *arg) +{ + struct crop_info crop; + + if (copy_from_user(&crop, + arg, + sizeof(struct crop_info))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + if (!sync->croplen) { + sync->cropinfo = kzalloc(crop.len, GFP_KERNEL); + if (!sync->cropinfo) + return -ENOMEM; + } else if (sync->croplen < crop.len) + return -EINVAL; + + if (copy_from_user(sync->cropinfo, + crop.info, + crop.len)) { + ERR_COPY_FROM_USER(); + kfree(sync->cropinfo); + return -EFAULT; + } + + sync->croplen = crop.len; + + return 0; +} + +static int msm_pp_grab(struct msm_sync *sync, void __user *arg) +{ + uint32_t enable; + if (copy_from_user(&enable, arg, sizeof(enable))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } else { + enable &= PP_MASK; + if (enable & (enable - 1)) { + pr_err("%s: error: more than one PP request!\n", + __func__); + return -EINVAL; + } + if (sync->pp_mask) { + pr_err("%s: postproc %x is already enabled\n", + __func__, sync->pp_mask & enable); + return -EINVAL; + } + + CDBG("%s: sync->pp_mask %d enable %d\n", __func__, + sync->pp_mask, enable); + sync->pp_mask |= enable; + } + + return 0; +} + +static int msm_pp_release(struct msm_sync *sync, void __user *arg) +{ + uint32_t mask; + if (copy_from_user(&mask, arg, sizeof(uint32_t))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + mask &= PP_MASK; + if (!(sync->pp_mask & mask)) { + pr_warning("%s: pp not in progress for %x\n", __func__, + mask); + return -EINVAL; + } + + if ((mask & PP_PREV) && (sync->pp_mask & PP_PREV)) { + if (!sync->pp_prev) { + pr_err("%s: no preview frame to deliver!\n", __func__); + return -EINVAL; + } + pr_info("%s: delivering pp_prev\n", __func__); + msm_enqueue(&sync->frame_q, &sync->pp_prev->list_frame); + sync->pp_prev = NULL; + goto done; + } + + if (((mask & PP_SNAP) && (sync->pp_mask & PP_SNAP)) || + ((mask & PP_RAW_SNAP) && + (sync->pp_mask & PP_RAW_SNAP))) { + if (!sync->pp_snap) { + pr_err("%s: no snapshot to deliver!\n", __func__); + return -EINVAL; + } + pr_info("%s: delivering pp_snap\n", __func__); + msm_enqueue(&sync->pict_q, &sync->pp_snap->list_pict); + sync->pp_snap = NULL; + } + +done: + sync->pp_mask = 0; + return 0; +} + +static long msm_ioctl_common(struct msm_device *pmsm, + unsigned int cmd, + void __user *argp) +{ + CDBG("%s\n", __func__); + switch (cmd) { + case MSM_CAM_IOCTL_REGISTER_PMEM: + return msm_register_pmem(pmsm->sync, argp); + case MSM_CAM_IOCTL_UNREGISTER_PMEM: + return msm_pmem_table_del(pmsm->sync, argp); + default: + return -EINVAL; + } +} + +int msm_camera_flash(struct msm_sync *sync, int level) +{ + int flash_level = 0; + uint8_t phy_flash = 0; + int ret = 0; + + if (!sync->sdata->flash_cfg) { + pr_err("%s: camera flash is not supported.\n", __func__); + return -EINVAL; + } + + if (!sync->sdata->flash_cfg->num_flash_levels) { + pr_err("%s: no flash levels.\n", __func__); + return -EINVAL; + } + + sync->sdata->flash_cfg->postpone_led_mode = MSM_CAMERA_LED_OFF; + + switch (level) { + case MSM_CAMERA_LED_DEATH_RAY: + flash_level = FL_MODE_DEATH_RAY; + phy_flash = 1; + break; + case MSM_CAMERA_LED_LOW_FOR_SNAPSHOT: + /* postpone set led low*/ + sync->sdata->flash_cfg->postpone_led_mode = MSM_CAMERA_LED_LOW; + phy_flash = 0; + break; + case MSM_CAMERA_LED_HIGH: + /* postpone set led high*/ + sync->sdata->flash_cfg->postpone_led_mode = MSM_CAMERA_LED_HIGH; + phy_flash = 0; + break; + case MSM_CAMERA_LED_LOW: + flash_level = sync->sdata->flash_cfg->num_flash_levels / 2; + phy_flash = 1; + break; + case MSM_CAMERA_LED_OFF: + flash_level = 0; + phy_flash = 1; + break; + default: + pr_err("%s: invalid flash level %d.\n", __func__, level); + return -EINVAL; + } + + if (phy_flash) + ret = sync->sdata->flash_cfg->camera_flash(flash_level); + + return ret; +} + +static long msm_ioctl_config(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int rc = -EINVAL; + void __user *argp = (void __user *)arg; + struct msm_device *pmsm = filep->private_data; + + CDBG("%s: cmd %d\n", __func__, _IOC_NR(cmd)); + + switch (cmd) { + case MSM_CAM_IOCTL_GET_SENSOR_INFO: + rc = msm_get_sensor_info(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_CONFIG_VFE: + /* Coming from config thread for update */ + rc = msm_config_vfe(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_GET_STATS: + /* Coming from config thread wait + * for vfe statistics and control requests */ + rc = msm_get_stats(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_ENABLE_VFE: + /* This request comes from control thread: + * enable either QCAMTASK or VFETASK */ + rc = msm_enable_vfe(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_DISABLE_VFE: + /* This request comes from control thread: + * disable either QCAMTASK or VFETASK */ + rc = msm_disable_vfe(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_VFE_APPS_RESET: + msm_camio_vfe_blk_reset(); + rc = 0; + break; + + case MSM_CAM_IOCTL_RELEASE_STATS_BUFFER: + rc = msm_put_stats_buffer(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_AXI_CONFIG: + rc = msm_axi_config(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_SET_CROP: + rc = msm_set_crop(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_PP: + /* Grab one preview frame or one snapshot + * frame. + */ + rc = msm_pp_grab(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_PP_DONE: + /* Release the preview of snapshot frame + * that was grabbed. + */ + rc = msm_pp_release(pmsm->sync, argp); + break; + + case MSM_CAM_IOCTL_SENSOR_IO_CFG: + rc = pmsm->sync->sctrl.s_config(argp); + break; + + case MSM_CAM_IOCTL_FLASH_LED_CFG: { + uint32_t led_state; + if (copy_from_user(&led_state, argp, sizeof(led_state))) { + ERR_COPY_FROM_USER(); + rc = -EFAULT; + } else + rc = msm_camera_flash(pmsm->sync, led_state); + break; + } + case MSM_CAM_IOCTL_ENABLE_OUTPUT_IND: { + uint32_t enable; + if (copy_from_user(&enable, argp, sizeof(enable))) { + ERR_COPY_FROM_USER(); + rc = -EFAULT; + break; + } + pmsm->sync->report_preview_to_config = enable; + } + + default: + rc = msm_ioctl_common(pmsm, cmd, argp); + break; + } + + CDBG("%s: cmd %d DONE\n", __func__, _IOC_NR(cmd)); + return rc; +} + +static int msm_unblock_poll_frame(struct msm_sync *); + +static long msm_ioctl_frame(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int rc = -EINVAL; + void __user *argp = (void __user *)arg; + struct msm_device *pmsm = filep->private_data; + + switch (cmd) { + case MSM_CAM_IOCTL_GETFRAME: + /* Coming from frame thread to get frame + * after SELECT is done */ + rc = msm_get_frame(pmsm->sync, argp); + break; + case MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER: + rc = msm_put_frame_buffer(pmsm->sync, argp); + break; + case MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME: + rc = msm_unblock_poll_frame(pmsm->sync); + break; + default: + break; + } + + return rc; +} + + +static long msm_ioctl_control(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int rc = -EINVAL; + void __user *argp = (void __user *)arg; + struct msm_control_device *ctrl_pmsm = filep->private_data; + struct msm_device *pmsm = ctrl_pmsm->pmsm; + + switch (cmd) { + case MSM_CAM_IOCTL_CTRL_COMMAND: + /* Coming from control thread, may need to wait for + * command status */ + rc = msm_control(ctrl_pmsm, 1, argp); + break; + case MSM_CAM_IOCTL_CTRL_COMMAND_2: + /* Sends a message, returns immediately */ + rc = msm_control(ctrl_pmsm, 0, argp); + break; + case MSM_CAM_IOCTL_CTRL_CMD_DONE: + /* Config thread calls the control thread to notify it + * of the result of a MSM_CAM_IOCTL_CTRL_COMMAND. + */ + rc = msm_ctrl_cmd_done(ctrl_pmsm, argp); + break; + case MSM_CAM_IOCTL_GET_PICTURE: + rc = msm_get_pic(pmsm->sync, argp); + break; + case MSM_CAM_IOCTL_GET_SENSOR_INFO: + rc = msm_get_sensor_info(pmsm->sync, argp); + break; + default: + rc = msm_ioctl_common(pmsm, cmd, argp); + break; + } + + return rc; +} + + +static void msm_show_time(void){ + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info(">>>>>>>>(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)<<<<<<<\n", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + return; +} + + +static int __msm_release(struct msm_sync *sync) +{ + struct msm_pmem_region *region; + struct hlist_node *hnode; + struct hlist_node *n; + pr_info("%s:sync->opencnt:%d \n", __func__, sync->opencnt); + mutex_lock(&sync->lock); + if (sync->opencnt) + sync->opencnt--; + + if (!sync->opencnt) { + + msm_show_time(); + + /* need to clean up system resource */ + if (sync->vfefn.vfe_release) + sync->vfefn.vfe_release(sync->pdev); + + kfree(sync->cropinfo); + sync->cropinfo = NULL; + sync->croplen = 0; + + /*sensor release moved to vfe_release*/ + + hlist_for_each_entry_safe(region, hnode, n, + &sync->pmem_frames, list) { + hlist_del(hnode); + put_pmem_file(region->file); + kfree(region); + } + + hlist_for_each_entry_safe(region, hnode, n, + &sync->pmem_stats, list) { + hlist_del(hnode); + put_pmem_file(region->file); + kfree(region); + } + + msm_queue_drain(&sync->event_q, list_config); + msm_queue_drain(&sync->frame_q, list_frame); + msm_queue_drain(&sync->pict_q, list_pict); + + wake_unlock(&sync->wake_suspend_lock); + wake_unlock(&sync->wake_lock); + + sync->apps_id = NULL; + CDBG("%s: completed\n", __func__); + } + mutex_unlock(&sync->lock); + + return 0; +} + +static int msm_release_config(struct inode *node, struct file *filep) +{ + int rc; + struct msm_device *pmsm = filep->private_data; + pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name); + rc = __msm_release(pmsm->sync); + if (!rc) { + pr_info("release config atomic_set pmsm->opened as 0\n"); + atomic_set(&pmsm->opened, 0); + } + return rc; +} + +static int msm_release_control(struct inode *node, struct file *filep) +{ + int rc; + struct msm_control_device *ctrl_pmsm = filep->private_data; + struct msm_device *pmsm = ctrl_pmsm->pmsm; + pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name); + rc = __msm_release(pmsm->sync); + if (!rc) { + msm_queue_drain(&ctrl_pmsm->ctrl_q, list_control); + kfree(ctrl_pmsm); + } + return rc; +} + +static int msm_release_frame(struct inode *node, struct file *filep) +{ + int rc; + struct msm_device *pmsm = filep->private_data; + pr_info("%s: %s\n", __func__, filep->f_path.dentry->d_name.name); + rc = __msm_release(pmsm->sync); + if (!rc) { + pr_info("release frame atomic_set pmsm->opened as 0\n"); + atomic_set(&pmsm->opened, 0); + } + return rc; +} + +static int msm_unblock_poll_frame(struct msm_sync *sync) +{ + unsigned long flags; + CDBG("%s\n", __func__); + spin_lock_irqsave(&sync->frame_q.lock, flags); + sync->unblock_poll_frame = 1; + wake_up(&sync->frame_q.wait); + spin_unlock_irqrestore(&sync->frame_q.lock, flags); + return 0; +} + +static unsigned int __msm_poll_frame(struct msm_sync *sync, + struct file *filep, + struct poll_table_struct *pll_table) +{ + int rc = 0; + unsigned long flags; + + poll_wait(filep, &sync->frame_q.wait, pll_table); + + spin_lock_irqsave(&sync->frame_q.lock, flags); + if (!list_empty_careful(&sync->frame_q.list)) + /* frame ready */ + rc = POLLIN | POLLRDNORM; + if (sync->unblock_poll_frame) { + CDBG("%s: sync->unblock_poll_frame is true\n", __func__); + rc |= POLLPRI; + sync->unblock_poll_frame = 0; + } + spin_unlock_irqrestore(&sync->frame_q.lock, flags); + + return rc; +} + +static unsigned int msm_poll_frame(struct file *filep, + struct poll_table_struct *pll_table) +{ + struct msm_device *pmsm = filep->private_data; + return __msm_poll_frame(pmsm->sync, filep, pll_table); +} + +/* + * This function executes in interrupt context. + */ + +static void *msm_vfe_sync_alloc(int size, + void *syncdata __attribute__((unused)), + gfp_t gfp) +{ + struct msm_queue_cmd *qcmd = + kzalloc(sizeof(struct msm_queue_cmd) + size, gfp); + if (qcmd) { + /* Becker and kant */ + memset(qcmd, 0x0, sizeof(struct msm_queue_cmd) + size); + + qcmd->on_heap = 1; + return qcmd + 1; + } + return NULL; +} + +static void msm_vfe_sync_free(void *ptr) +{ + if (ptr) { + struct msm_queue_cmd *qcmd = + (struct msm_queue_cmd *)ptr; + qcmd--; + if (qcmd->on_heap) + kfree(qcmd); + } +} + +/* + * This function may execute in interrupt context. + */ + +static void msm_vfe_sync(struct msm_vfe_resp *vdata, + enum msm_queue qtype, void *syncdata, + gfp_t gfp) +{ + struct msm_queue_cmd *qcmd = NULL; + struct msm_sync *sync = (struct msm_sync *)syncdata; + + if (!sync) { + pr_err("%s: no context in dsp callback.\n", __func__); + return; + } + + if (!sync->opencnt) { + pr_err("%s: SPURIOUS INTERRUPT\n", __func__); + return; + } + + qcmd = ((struct msm_queue_cmd *)vdata) - 1; + qcmd->type = qtype; + qcmd->command = vdata; + + CDBG("%s: qtype %d \n", __func__, qtype); + CDBG("%s: evt_msg.msg_id %d\n", __func__, vdata->evt_msg.msg_id); + CDBG("%s: evt_msg.exttype %d\n", __func__, vdata->evt_msg.exttype); + if (sync->sdata->flash_cfg) { + if (qtype == MSM_CAM_Q_VFE_MSG && + vdata->evt_msg.exttype == VFE_MSG_SNAPSHOT) { +#if defined(CONFIG_ARCH_MSM_ARM11) + if (vdata->evt_msg.msg_id == 4) + /* QDSP_VFETASK_MSG_VFE_START_ACK */ +#elif defined(CONFIG_ARCH_QSD8X50) + if (vdata->evt_msg.msg_id == 1) + /* VFE_MSG_ID_START_ACK */ +#endif + { + pr_info("flashlight: postpone_led_mode %d\n", + sync->sdata->flash_cfg->postpone_led_mode); + sync->sdata->flash_cfg->camera_flash( + sync->sdata->flash_cfg->postpone_led_mode); + } + } + } + + if (qtype != MSM_CAM_Q_VFE_MSG) + goto for_config; + + CDBG("%s: vdata->type %d\n", __func__, vdata->type); + switch (vdata->type) { +#ifndef CONFIG_720P_CAMERA + case VFE_MSG_OUTPUT1: + case VFE_MSG_OUTPUT2: +#else + case VFE_MSG_OUTPUT_P: +#endif + + if (sync->pp_mask & PP_PREV) { + CDBG("%s: PP_PREV in progress: phy_y %x phy_cbcr %x\n", + __func__, + vdata->phy.y_phy, + vdata->phy.cbcr_phy); + if (sync->pp_prev) + pr_warning("%s: overwriting pp_prev!\n", + __func__); + pr_info("%s: sending preview to config\n", __func__); + sync->pp_prev = qcmd; + if (qcmd->on_heap) + qcmd->on_heap++; + break; + } + CDBG("%s: msm_enqueue frame_q\n", __func__); + if (qcmd->on_heap) + qcmd->on_heap++; + msm_enqueue(&sync->frame_q, &qcmd->list_frame); + break; + +#ifdef CONFIG_720P_CAMERA + case VFE_MSG_OUTPUT_V: + if (qcmd->on_heap) + qcmd->on_heap++; + CDBG("%s: msm_enqueue video frame_q\n", __func__); + msm_enqueue(&sync->frame_q, &qcmd->list_frame); + break; +#endif + + case VFE_MSG_SNAPSHOT: + if (sync->pp_mask & (PP_SNAP | PP_RAW_SNAP)) { + CDBG("%s: PP_SNAP in progress: pp_mask %x\n", + __func__, sync->pp_mask); + if (sync->pp_snap) + pr_warning("%s: overwriting pp_snap!\n", + __func__); + pr_info("%s: sending snapshot to config\n", __func__); + sync->pp_snap = qcmd; + if (qcmd->on_heap) + qcmd->on_heap++; + break; + } + if (qcmd->on_heap) + qcmd->on_heap++; + msm_enqueue(&sync->pict_q, &qcmd->list_pict); + break; + + default: + CDBG("%s: qtype %d not handled\n", __func__, vdata->type); + /* fall through, send to config. */ + } + +for_config: + msm_enqueue(&sync->event_q, &qcmd->list_config); +} + +static struct msm_vfe_callback msm_vfe_s = { + .vfe_resp = msm_vfe_sync, + .vfe_alloc = msm_vfe_sync_alloc, + .vfe_free = msm_vfe_sync_free, +}; + +static int __msm_open(struct msm_sync *sync, const char *const apps_id) +{ + int rc = 0; + + mutex_lock(&sync->lock); + if (sync->apps_id && strcmp(sync->apps_id, apps_id)) { + pr_err("%s(%s): sensor %s is already opened for %s\n", + __func__, + apps_id, + sync->sdata->sensor_name, + sync->apps_id); + rc = -EBUSY; + goto msm_open_done; + } + + sync->apps_id = apps_id; + + if (!sync->opencnt) { + wake_lock(&sync->wake_suspend_lock); + wake_lock(&sync->wake_lock); + + msm_camvfe_fn_init(&sync->vfefn, sync); + if (sync->vfefn.vfe_init) { + rc = sync->vfefn.vfe_init(&msm_vfe_s, + sync->pdev); + if (rc < 0) { + pr_err("%s: vfe_init failed at %d\n", + __func__, rc); + goto msm_open_done; + } + rc = sync->sctrl.s_init(sync->sdata); + if (rc < 0) { + pr_err("%s: sensor init failed: %d\n", + __func__, rc); + sync->vfefn.vfe_release(sync->pdev); + goto msm_open_done; + } + } else { + pr_err("%s: no sensor init func\n", __func__); + rc = -ENODEV; + goto msm_open_done; + } + + if (rc >= 0) { + INIT_HLIST_HEAD(&sync->pmem_frames); + INIT_HLIST_HEAD(&sync->pmem_stats); + sync->unblock_poll_frame = 0; + } + } + sync->opencnt++; + +msm_open_done: + mutex_unlock(&sync->lock); + return rc; +} + +static int msm_open_common(struct inode *inode, struct file *filep, + int once) +{ + int rc; + struct msm_device *pmsm = + container_of(inode->i_cdev, struct msm_device, cdev); + + pr_info("%s: open %s\n", __func__, filep->f_path.dentry->d_name.name); + + if (atomic_cmpxchg(&pmsm->opened, 0, 1) && once) { + pr_err("%s: %s is already opened.\n", + __func__, + filep->f_path.dentry->d_name.name); + return -EBUSY; + } + + rc = nonseekable_open(inode, filep); + if (rc < 0) { + pr_err("%s: nonseekable_open error %d\n", __func__, rc); + return rc; + } + + rc = __msm_open(pmsm->sync, MSM_APPS_ID_PROP); + if (rc < 0) + return rc; + + filep->private_data = pmsm; + + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} + + + +static int msm_open(struct inode *inode, struct file *filep) +{ + msm_show_time(); + return msm_open_common(inode, filep, 1); +} + +static int msm_open_control(struct inode *inode, struct file *filep) +{ + int rc; + + struct msm_control_device *ctrl_pmsm = + kzalloc(sizeof(struct msm_control_device), GFP_KERNEL); + if (!ctrl_pmsm) + return -ENOMEM; + + rc = msm_open_common(inode, filep, 0); + if (rc < 0) { + kfree(ctrl_pmsm); + return rc; + } + + ctrl_pmsm->pmsm = filep->private_data; + filep->private_data = ctrl_pmsm; + msm_queue_init(&ctrl_pmsm->ctrl_q, "control"); + + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} + +#ifdef CONFIG_MSM_CAMERA_V4L2 +static int __msm_v4l2_control(struct msm_sync *sync, + struct msm_ctrl_cmd *out) +{ + int rc = 0; + + struct msm_queue_cmd *qcmd = NULL, *rcmd = NULL; + struct msm_ctrl_cmd *ctrl; + struct msm_device_queue FIXME; + + /* wake up config thread, 4 is for V4L2 application */ + qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL); + if (!qcmd) { + pr_err("%s: cannot allocate buffer\n", __func__); + rc = -ENOMEM; + goto end; + } + qcmd->type = MSM_CAM_Q_V4L2_REQ; + qcmd->command = out; + qcmd->on_heap = 1; + + rcmd = __msm_control(sync, &FIXME, qcmd, out->timeout_ms); + if (IS_ERR(rcmd)) { + rc = PTR_ERR(rcmd); + goto end; + } + + ctrl = (struct msm_ctrl_cmd *)(rcmd->command); + /* FIXME: we should just set out->length = ctrl->length; */ + BUG_ON(out->length < ctrl->length); + memcpy(out->value, ctrl->value, ctrl->length); + +end: + free_qcmd(rcmd); + CDBG("%s: rc %d\n", __func__, rc); + return rc; +} +#endif + +static const struct file_operations msm_fops_config = { + .owner = THIS_MODULE, + .open = msm_open, + .unlocked_ioctl = msm_ioctl_config, + .release = msm_release_config, +}; + +static const struct file_operations msm_fops_control = { + .owner = THIS_MODULE, + .open = msm_open_control, + .unlocked_ioctl = msm_ioctl_control, + .release = msm_release_control, +}; + +static const struct file_operations msm_fops_frame = { + .owner = THIS_MODULE, + .open = msm_open, + .unlocked_ioctl = msm_ioctl_frame, + .release = msm_release_frame, + .poll = msm_poll_frame, +}; + +static int msm_setup_cdev(struct msm_device *msm, + int node, + dev_t devno, + const char *suffix, + const struct file_operations *fops) +{ + int rc = -ENODEV; + + struct device *device = + device_create(msm_class, NULL, + devno, NULL, + "%s%d", suffix, node); + + if (IS_ERR(device)) { + rc = PTR_ERR(device); + pr_err("%s: error creating device: %d\n", __func__, rc); + return rc; + } + + cdev_init(&msm->cdev, fops); + msm->cdev.owner = THIS_MODULE; + + rc = cdev_add(&msm->cdev, devno, 1); + if (rc < 0) { + pr_err("%s: error adding cdev: %d\n", __func__, rc); + device_destroy(msm_class, devno); + return rc; + } + + return rc; +} + +static int msm_tear_down_cdev(struct msm_device *msm, dev_t devno) +{ + cdev_del(&msm->cdev); + device_destroy(msm_class, devno); + return 0; +} + +static uint32_t led_ril_status_value; +static uint32_t led_wimax_status_value; +static uint32_t led_hotspot_status_value; +static uint16_t led_low_temp_limit; +static uint16_t led_low_cap_limit; +static struct kobject *led_status_obj; + +static ssize_t led_ril_status_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", led_ril_status_value); + return length; +} + +static ssize_t led_ril_status_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + + tmp = buf[0] - 0x30; /* only get the first char */ + + led_ril_status_value = tmp; + pr_info("led_ril_status_value = %d\n", led_ril_status_value); + return count; +} + +static ssize_t led_wimax_status_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", led_wimax_status_value); + return length; +} + +static ssize_t led_wimax_status_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + + tmp = buf[0] - 0x30; /* only get the first char */ + + led_wimax_status_value = tmp; + pr_info("led_wimax_status_value = %d\n", led_wimax_status_value); + return count; +} + +static ssize_t led_hotspot_status_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", led_hotspot_status_value); + return length; +} + +static ssize_t led_hotspot_status_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + + tmp = buf[0] - 0x30; /* only get the first char */ + + led_hotspot_status_value = tmp; + pr_info("led_hotspot_status_value = %d\n", led_hotspot_status_value); + return count; +} + +static ssize_t low_temp_limit_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", led_low_temp_limit); + return length; +} + +static ssize_t low_cap_limit_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", led_low_cap_limit); + return length; +} + +static DEVICE_ATTR(led_ril_status, 0644, + led_ril_status_get, + led_ril_status_set); + +static DEVICE_ATTR(led_wimax_status, 0644, + led_wimax_status_get, + led_wimax_status_set); + +static DEVICE_ATTR(led_hotspot_status, 0644, + led_hotspot_status_get, + led_hotspot_status_set); + +static DEVICE_ATTR(low_temp_limit, 0444, + low_temp_limit_get, + NULL); + +static DEVICE_ATTR(low_cap_limit, 0444, + low_cap_limit_get, + NULL); + +static int msm_camera_sysfs_init(struct msm_sync* sync) +{ + int ret = 0; + CDBG("msm_camera:kobject creat and add\n"); + led_status_obj = kobject_create_and_add("camera_led_status", NULL); + if (led_status_obj == NULL) { + pr_info("msm_camera: subsystem_register failed\n"); + ret = -ENOMEM; + goto error; + } + + ret = sysfs_create_file(led_status_obj, + &dev_attr_led_ril_status.attr); + if (ret) { + pr_info("msm_camera: sysfs_create_file ril failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(led_status_obj, + &dev_attr_led_wimax_status.attr); + if (ret) { + pr_info("msm_camera: sysfs_create_file wimax failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(led_status_obj, + &dev_attr_led_hotspot_status.attr); + if (ret) { + pr_info("msm_camera: sysfs_create_file hotspot failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(led_status_obj, + &dev_attr_low_temp_limit.attr); + if (ret) { + pr_info("msm_camera: sysfs_create_file low_temp_limit failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(led_status_obj, + &dev_attr_low_cap_limit.attr); + if (ret) { + pr_info("msm_camera: sysfs_create_file low_cap_limit failed\n"); + ret = -EFAULT; + goto error; + } + + led_low_temp_limit = sync->sdata->flash_cfg->low_temp_limit; + led_low_cap_limit = sync->sdata->flash_cfg->low_cap_limit; + + return ret; +error: + kobject_del(led_status_obj); + return ret; +} + +#ifdef CONFIG_MSM_CAMERA_V4L2 +int msm_v4l2_register(struct msm_v4l2_driver *drv) +{ + /* FIXME: support multiple sensors */ + if (list_empty(&msm_sensors)) + return -ENODEV; + + drv->sync = list_first_entry(&msm_sensors, struct msm_sync, list); + drv->open = __msm_open; + drv->release = __msm_release; + drv->ctrl = __msm_v4l2_control; + drv->reg_pmem = __msm_register_pmem; + drv->get_frame = __msm_get_frame; + drv->put_frame = __msm_put_frame_buf; + drv->get_pict = __msm_get_pic; + drv->drv_poll = __msm_poll_frame; + + return 0; +} +EXPORT_SYMBOL(msm_v4l2_register); + +int msm_v4l2_unregister(struct msm_v4l2_driver *drv) +{ + drv->sync = NULL; + return 0; +} +EXPORT_SYMBOL(msm_v4l2_unregister); +#endif + +static int msm_sync_init(struct msm_sync *sync, + struct platform_device *pdev, + int (*sensor_probe)(struct msm_camera_sensor_info *, + struct msm_sensor_ctrl *), int camera_node) +{ + int rc = 0; + struct msm_sensor_ctrl sctrl; + sync->sdata = pdev->dev.platform_data; + + msm_queue_init(&sync->event_q, "event"); + msm_queue_init(&sync->frame_q, "frame"); + msm_queue_init(&sync->pict_q, "pict"); + + wake_lock_init(&sync->wake_suspend_lock, WAKE_LOCK_SUSPEND, "msm_camera_wake"); + wake_lock_init(&sync->wake_lock, WAKE_LOCK_IDLE, "msm_camera"); + + rc = msm_camio_probe_on(pdev); + if (rc < 0) + return rc; + sctrl.node = camera_node; + pr_info("sctrl.node %d\n", sctrl.node); + rc = sensor_probe(sync->sdata, &sctrl); + if (rc >= 0) { + sync->pdev = pdev; + sync->sctrl = sctrl; + } + msm_camio_probe_off(pdev); + if (rc < 0) { + pr_err("%s: failed to initialize %s\n", + __func__, + sync->sdata->sensor_name); + wake_lock_destroy(&sync->wake_suspend_lock); + wake_lock_destroy(&sync->wake_lock); + return rc; + } + + sync->opencnt = 0; + mutex_init(&sync->lock); + CDBG("%s: initialized %s\n", __func__, sync->sdata->sensor_name); + return rc; +} + +static int msm_sync_destroy(struct msm_sync *sync) +{ + wake_lock_destroy(&sync->wake_suspend_lock); + wake_lock_destroy(&sync->wake_lock); + return 0; +} + +static int msm_device_init(struct msm_device *pmsm, + struct msm_sync *sync, + int node) +{ + int dev_num = 3 * node; + int rc = msm_setup_cdev(pmsm, node, + MKDEV(MAJOR(msm_devno), dev_num), + "control", &msm_fops_control); + if (rc < 0) { + pr_err("%s: error creating control node: %d\n", __func__, rc); + return rc; + } + + rc = msm_setup_cdev(pmsm + 1, node, + MKDEV(MAJOR(msm_devno), dev_num + 1), + "config", &msm_fops_config); + if (rc < 0) { + pr_err("%s: error creating config node: %d\n", __func__, rc); + msm_tear_down_cdev(pmsm, MKDEV(MAJOR(msm_devno), + dev_num)); + return rc; + } + + rc = msm_setup_cdev(pmsm + 2, node, + MKDEV(MAJOR(msm_devno), dev_num + 2), + "frame", &msm_fops_frame); + if (rc < 0) { + pr_err("%s: error creating frame node: %d\n", __func__, rc); + msm_tear_down_cdev(pmsm, + MKDEV(MAJOR(msm_devno), dev_num)); + msm_tear_down_cdev(pmsm + 1, + MKDEV(MAJOR(msm_devno), dev_num + 1)); + return rc; + } + + atomic_set(&pmsm[0].opened, 0); + atomic_set(&pmsm[1].opened, 0); + atomic_set(&pmsm[2].opened, 0); + + pmsm[0].sync = sync; + pmsm[1].sync = sync; + pmsm[2].sync = sync; + + return rc; +} + +int msm_camera_drv_start(struct platform_device *dev, + int (*sensor_probe)(struct msm_camera_sensor_info *, + struct msm_sensor_ctrl *)) +{ + struct msm_device *pmsm = NULL; + struct msm_sync *sync; + int rc = -ENODEV; + static int camera_node = 0; + + if (camera_node >= MSM_MAX_CAMERA_SENSORS) { + pr_err("%s: too many camera sensors\n", __func__); + return rc; + } + + if (!msm_class) { + /* There are three device nodes per sensor */ + rc = alloc_chrdev_region(&msm_devno, 0, + 3 * MSM_MAX_CAMERA_SENSORS, + "msm_camera"); + if (rc < 0) { + pr_err("%s: failed to allocate chrdev: %d\n", __func__, + rc); + return rc; + } + + msm_class = class_create(THIS_MODULE, "msm_camera"); + if (IS_ERR(msm_class)) { + rc = PTR_ERR(msm_class); + pr_err("%s: create device class failed: %d\n", + __func__, rc); + return rc; + } + } + + pmsm = kzalloc(sizeof(struct msm_device) * 3 + + sizeof(struct msm_sync), GFP_ATOMIC); + if (!pmsm) + return -ENOMEM; + sync = (struct msm_sync *)(pmsm + 3); + + rc = msm_sync_init(sync, dev, sensor_probe, camera_node); + if (rc < 0) { + kfree(pmsm); + return rc; + } + + CDBG("%s: setting camera node %d\n", __func__, camera_node); + rc = msm_device_init(pmsm, sync, camera_node); + if (rc < 0) { + msm_sync_destroy(sync); + kfree(pmsm); + return rc; + } + + if (!!sync->sdata->flash_cfg) + msm_camera_sysfs_init(sync); + + camera_node++; + list_add(&sync->list, &msm_sensors); + return rc; +} +EXPORT_SYMBOL(msm_camera_drv_start); diff --git a/drivers/media/video/msm/msm_io7x.c b/drivers/media/video/msm/msm_io7x.c new file mode 100644 index 0000000000000..4a33d2e5727de --- /dev/null +++ b/drivers/media/video/msm/msm_io7x.c @@ -0,0 +1,321 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#define CAMIF_CFG_RMSK 0x1fffff +#define CAM_SEL_BMSK 0x2 +#define CAM_PCLK_SRC_SEL_BMSK 0x60000 +#define CAM_PCLK_INVERT_BMSK 0x80000 +#define CAM_PAD_REG_SW_RESET_BMSK 0x100000 + +#define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000 +#define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000 +#define MDDI_CLK_CHICKEN_BIT_BMSK 0x80 + +#define CAM_SEL_SHFT 0x1 +#define CAM_PCLK_SRC_SEL_SHFT 0x11 +#define CAM_PCLK_INVERT_SHFT 0x13 +#define CAM_PAD_REG_SW_RESET_SHFT 0x14 + +#define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10 +#define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF +#define MDDI_CLK_CHICKEN_BIT_SHFT 0x7 +#define APPS_RESET_OFFSET 0x00000210 + +static struct clk *camio_vfe_mdc_clk; +static struct clk *camio_mdc_clk; +static struct clk *camio_vfe_clk; + +static struct msm_camera_io_ext camio_ext; +static struct resource *appio, *mdcio; +void __iomem *appbase, *mdcbase; + +static struct msm_camera_io_ext camio_ext; +static struct resource *appio, *mdcio; +void __iomem *appbase, *mdcbase; + +extern int clk_set_flags(struct clk *clk, unsigned long flags); + +int msm_camio_clk_enable(enum msm_camio_clk_type clktype) +{ + int rc = -1; + struct clk *clk = NULL; + + switch (clktype) { + case CAMIO_VFE_MDC_CLK: + clk = camio_vfe_mdc_clk = clk_get(NULL, "vfe_mdc_clk"); + break; + + case CAMIO_MDC_CLK: + clk = camio_mdc_clk = clk_get(NULL, "mdc_clk"); + break; + + case CAMIO_VFE_CLK: + clk = camio_vfe_clk = clk_get(NULL, "vfe_clk"); + break; + + default: + break; + } + + /* HTC: check clk */ + if (!IS_ERR(clk) && clk) { + clk_enable(clk); + rc = 0; + } + + return rc; +} + +int msm_camio_clk_disable(enum msm_camio_clk_type clktype) +{ + int rc = -1; + struct clk *clk = NULL; + + switch (clktype) { + case CAMIO_VFE_MDC_CLK: + clk = camio_vfe_mdc_clk; + break; + + case CAMIO_MDC_CLK: + clk = camio_mdc_clk; + break; + + case CAMIO_VFE_CLK: + clk = camio_vfe_clk; + break; + + default: + break; + } + /* HTC: check clk */ + if (!IS_ERR(clk) && clk) { + clk_disable(clk); + clk_put(clk); + rc = 0; + } + + return rc; +} + +void msm_camio_clk_rate_set(int rate) +{ + struct clk *clk = camio_vfe_clk; + + if (clk != ERR_PTR(-ENOENT)) + clk_set_rate(clk, rate); +} + +int msm_camio_enable(struct platform_device *pdev) +{ + int rc = 0; + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + camio_ext = camdev->ioext; + + appio = request_mem_region(camio_ext.appphy, + camio_ext.appsz, pdev->name); + if (!appio) { + rc = -EBUSY; + goto enable_fail; + } + + appbase = ioremap(camio_ext.appphy, + camio_ext.appsz); + if (!appbase) { + rc = -ENOMEM; + goto apps_no_mem; + } + + mdcio = request_mem_region(camio_ext.mdcphy, + camio_ext.mdcsz, pdev->name); + if (!mdcio) { + rc = -EBUSY; + goto mdc_busy; + } + + mdcbase = ioremap(camio_ext.mdcphy, + camio_ext.mdcsz); + if (!mdcbase) { + rc = -ENOMEM; + goto mdc_no_mem; + } + + msm_camio_clk_enable(CAMIO_VFE_CLK); + msm_camio_clk_enable(CAMIO_MDC_CLK); + msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); + + mdelay(2); + camdev->camera_gpio_on(); + mdelay(2); + + return 0; + +mdc_no_mem: + release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz); +mdc_busy: + iounmap(appbase); +apps_no_mem: + release_mem_region(camio_ext.appphy, camio_ext.appsz); +enable_fail: + return rc; +} + +void msm_camio_disable(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + iounmap(mdcbase); + release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz); + iounmap(appbase); + release_mem_region(camio_ext.appphy, camio_ext.appsz); + + + msm_camio_clk_disable(CAMIO_VFE_CLK); + msm_camio_clk_disable(CAMIO_MDC_CLK); + msm_camio_clk_disable(CAMIO_VFE_MDC_CLK); + + camdev->camera_gpio_off(); +} + +void msm_camio_camif_pad_reg_reset(void) +{ + uint32_t reg; + uint32_t mask, value; + + /* select CLKRGM_VFE_SRC_CAM_VFE_SRC: internal source */ + msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + + mask = CAM_SEL_BMSK | + CAM_PCLK_SRC_SEL_BMSK | + CAM_PCLK_INVERT_BMSK; + + value = 1 << CAM_SEL_SHFT | + 3 << CAM_PCLK_SRC_SEL_SHFT | + 0 << CAM_PCLK_INVERT_SHFT; + + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 1 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 0 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_EXTERNAL); + mdelay(10); +} + +void msm_camio_vfe_blk_reset(void) +{ + uint32_t val; + + val = readl(appbase + 0x00000210); + val |= 0x1; + writel(val, appbase + 0x00000210); + mdelay(10); + + val = readl(appbase + 0x00000210); + val &= ~0x1; + writel(val, appbase + 0x00000210); + mdelay(10); + + /* do axi reset */ + val = readl(appbase + 0x00000208); + val |= 0x1; + writel(val, appbase + 0x00000208); + mdelay(10); + + val = readl(appbase + 0x00000208); + val &= ~0x1; + writel(val, appbase + 0x00000208); + mdelay(10); +} + +void msm_camio_camif_pad_reg_reset_2(void) +{ + uint32_t reg; + uint32_t mask, value; + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 1 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 0 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); +} + +void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype) +{ + struct clk *clk = NULL; + + clk = camio_vfe_clk; + + if (clk != NULL && clk != ERR_PTR(-ENOENT)) { + switch (srctype) { + case MSM_CAMIO_CLK_SRC_INTERNAL: + clk_set_flags(clk, 0x00000100 << 1); + break; + + case MSM_CAMIO_CLK_SRC_EXTERNAL: + clk_set_flags(clk, 0x00000100); + break; + + default: + break; + } + } +} + +int msm_camio_probe_on(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + camdev->camera_gpio_on(); + return msm_camio_clk_enable(CAMIO_VFE_CLK); +} + +int msm_camio_probe_off(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + camdev->camera_gpio_off(); + return msm_camio_clk_disable(CAMIO_VFE_CLK); +} diff --git a/drivers/media/video/msm/msm_io8x.c b/drivers/media/video/msm/msm_io8x.c new file mode 100644 index 0000000000000..13364d91b48a7 --- /dev/null +++ b/drivers/media/video/msm/msm_io8x.c @@ -0,0 +1,332 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#define CAMIF_CFG_RMSK 0x1fffff +#define CAM_SEL_BMSK 0x2 +#define CAM_PCLK_SRC_SEL_BMSK 0x60000 +#define CAM_PCLK_INVERT_BMSK 0x80000 +#define CAM_PAD_REG_SW_RESET_BMSK 0x100000 + +#define EXT_CAM_HSYNC_POL_SEL_BMSK 0x10000 +#define EXT_CAM_VSYNC_POL_SEL_BMSK 0x8000 +#define MDDI_CLK_CHICKEN_BIT_BMSK 0x80 + +#define CAM_SEL_SHFT 0x1 +#define CAM_PCLK_SRC_SEL_SHFT 0x11 +#define CAM_PCLK_INVERT_SHFT 0x13 +#define CAM_PAD_REG_SW_RESET_SHFT 0x14 + +#define EXT_CAM_HSYNC_POL_SEL_SHFT 0x10 +#define EXT_CAM_VSYNC_POL_SEL_SHFT 0xF +#define MDDI_CLK_CHICKEN_BIT_SHFT 0x7 +#define APPS_RESET_OFFSET 0x00000210 + +static struct clk *camio_vfe_mdc_clk; +static struct clk *camio_mdc_clk; +static struct clk *camio_vfe_clk; +static struct clk *camio_vfe_axi_clk; +static struct msm_camera_io_ext camio_ext; +static struct resource *appio, *mdcio; +void __iomem *appbase, *mdcbase; + +int clk_set_flags(struct clk *clk, unsigned long flags); + +int msm_camio_clk_enable(enum msm_camio_clk_type clktype) +{ + int rc = 0; + struct clk *clk = NULL; + + switch (clktype) { + case CAMIO_VFE_MDC_CLK: + camio_vfe_mdc_clk = clk = clk_get(NULL, "vfe_mdc_clk"); + break; + + case CAMIO_MDC_CLK: + camio_mdc_clk = clk = clk_get(NULL, "mdc_clk"); + break; + + case CAMIO_VFE_CLK: + camio_vfe_clk = clk = clk_get(NULL, "vfe_clk"); + break; + + case CAMIO_VFE_AXI_CLK: + camio_vfe_axi_clk = clk = clk_get(NULL, "vfe_axi_clk"); + break; + + default: + break; + } + + if (!IS_ERR(clk) && clk != NULL) { + /* Set rate here *before* enabling the block to prevent + * unstable clock from source. + */ + if (clktype == CAMIO_VFE_CLK && camio_vfe_clk) { + clk_set_rate(camio_vfe_clk, 96000000); + } + clk_enable(clk); + } + else + rc = -1; + + return rc; +} + +int msm_camio_clk_disable(enum msm_camio_clk_type clktype) +{ + int rc = 0; + struct clk *clk = NULL; + + switch (clktype) { + case CAMIO_VFE_MDC_CLK: + clk = camio_vfe_mdc_clk; + break; + + case CAMIO_MDC_CLK: + clk = camio_mdc_clk; + break; + + case CAMIO_VFE_CLK: + clk = camio_vfe_clk; + break; + + case CAMIO_VFE_AXI_CLK: + clk = camio_vfe_axi_clk; + break; + + default: + break; + } + + if (!IS_ERR(clk) && clk != NULL) { + clk_disable(clk); + clk_put(clk); + } else + rc = -1; + + return rc; +} + +void msm_camio_clk_rate_set(int rate) +{ + struct clk *clk = camio_vfe_mdc_clk; + + /* TODO: check return */ + clk_set_rate(clk, rate); +} + +int msm_camio_enable(struct platform_device *pdev) +{ + int rc = 0; + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + camio_ext = camdev->ioext; + + appio = request_mem_region(camio_ext.appphy, + camio_ext.appsz, pdev->name); + if (!appio) { + rc = -EBUSY; + goto enable_fail; + } + + appbase = ioremap(camio_ext.appphy, camio_ext.appsz); + if (!appbase) { + rc = -ENOMEM; + goto apps_no_mem; + } + + mdcio = request_mem_region(camio_ext.mdcphy, + camio_ext.mdcsz, pdev->name); + if (!mdcio) { + rc = -EBUSY; + goto mdc_busy; + } + + mdcbase = ioremap(camio_ext.mdcphy, camio_ext.mdcsz); + if (!mdcbase) { + rc = -ENOMEM; + goto mdc_no_mem; + } + + camdev->camera_gpio_on(); + + msm_camio_clk_enable(CAMIO_VFE_CLK); + msm_camio_clk_enable(CAMIO_MDC_CLK); + msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); + msm_camio_clk_enable(CAMIO_VFE_AXI_CLK); + + return 0; + +mdc_no_mem: + release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz); +mdc_busy: + iounmap(appbase); +apps_no_mem: + release_mem_region(camio_ext.appphy, camio_ext.appsz); +enable_fail: + return rc; +} + +void msm_camio_disable(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + iounmap(mdcbase); + release_mem_region(camio_ext.mdcphy, camio_ext.mdcsz); + iounmap(appbase); + release_mem_region(camio_ext.appphy, camio_ext.appsz); + + camdev->camera_gpio_off(); + + msm_camio_clk_disable(CAMIO_VFE_MDC_CLK); + msm_camio_clk_disable(CAMIO_MDC_CLK); + msm_camio_clk_disable(CAMIO_VFE_CLK); + msm_camio_clk_disable(CAMIO_VFE_AXI_CLK); +} + +void msm_camio_camif_pad_reg_reset(void) +{ + uint32_t reg; + uint32_t mask, value; + + /* select CLKRGM_VFE_SRC_CAM_VFE_SRC: internal source */ + msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_INTERNAL); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + + mask = CAM_SEL_BMSK | + CAM_PCLK_SRC_SEL_BMSK | + CAM_PCLK_INVERT_BMSK | + EXT_CAM_HSYNC_POL_SEL_BMSK | + EXT_CAM_VSYNC_POL_SEL_BMSK | MDDI_CLK_CHICKEN_BIT_BMSK; + + value = 1 << CAM_SEL_SHFT | + 3 << CAM_PCLK_SRC_SEL_SHFT | + 0 << CAM_PCLK_INVERT_SHFT | + 0 << EXT_CAM_HSYNC_POL_SEL_SHFT | + 0 << EXT_CAM_VSYNC_POL_SEL_SHFT | 0 << MDDI_CLK_CHICKEN_BIT_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 1 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 0 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + msm_camio_clk_sel(MSM_CAMIO_CLK_SRC_EXTERNAL); + + mdelay(10); +} + +void msm_camio_vfe_blk_reset(void) +{ +#if 0 + uint32_t val; + + val = readl(appbase + 0x00000210); + val |= 0x1; + writel(val, appbase + 0x00000210); + mdelay(10); + + val = readl(appbase + 0x00000210); + val &= ~0x1; + writel(val, appbase + 0x00000210); + mdelay(10); +#endif +} + +void msm_camio_camif_pad_reg_reset_2(void) +{ + uint32_t reg; + uint32_t mask, value; + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 1 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); + + reg = (readl(mdcbase)) & CAMIF_CFG_RMSK; + mask = CAM_PAD_REG_SW_RESET_BMSK; + value = 0 << CAM_PAD_REG_SW_RESET_SHFT; + writel((reg & (~mask)) | (value & mask), mdcbase); + mdelay(10); +} + +void msm_camio_clk_sel(enum msm_camio_clk_src_type srctype) +{ + struct clk *clk = NULL; + + clk = camio_vfe_clk; + + if (clk != NULL) { + switch (srctype) { + case MSM_CAMIO_CLK_SRC_INTERNAL: + clk_set_flags(clk, 0x00000100 << 1); + break; + + case MSM_CAMIO_CLK_SRC_EXTERNAL: + clk_set_flags(clk, 0x00000100); + break; + + default: + break; + } + } +} + +void msm_camio_clk_axi_rate_set(int rate) +{ + struct clk *clk = camio_vfe_axi_clk; + /* todo: check return */ + clk_set_rate(clk, rate); +} + +int msm_camio_probe_on(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + camdev->camera_gpio_on(); + return msm_camio_clk_enable(CAMIO_VFE_MDC_CLK); +} + +int msm_camio_probe_off(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + struct msm_camera_device_platform_data *camdev = sinfo->pdata; + + camdev->camera_gpio_off(); + return msm_camio_clk_disable(CAMIO_VFE_MDC_CLK); +} diff --git a/drivers/media/video/msm/msm_v4l2.c b/drivers/media/video/msm/msm_v4l2.c new file mode 100644 index 0000000000000..138d7d4960d4c --- /dev/null +++ b/drivers/media/video/msm/msm_v4l2.c @@ -0,0 +1,791 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/*#include */ + +#define MSM_V4L2_START_SNAPSHOT _IOWR('V', BASE_VIDIOC_PRIVATE+1, \ + struct v4l2_buffer) + +#define MSM_V4L2_GET_PICTURE _IOWR('V', BASE_VIDIOC_PRIVATE+2, \ + struct v4l2_buffer) + +#define MSM_V4L2_DEVICE_NAME "msm_v4l2" + +#define MSM_V4L2_PROC_NAME "msm_v4l2" + +#define MSM_V4L2_DEVNUM_MPEG2 0 +#define MSM_V4L2_DEVNUM_YUV 20 + +/* HVGA-P (portrait) and HVGA-L (landscape) */ +#define MSM_V4L2_WIDTH 480 +#define MSM_V4L2_HEIGHT 320 + +#if 1 +#define D(fmt, args...) printk(KERN_INFO "msm_v4l2: " fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +#define PREVIEW_FRAMES_NUM 4 + +struct msm_v4l2_device { + struct list_head read_queue; + struct v4l2_format current_cap_format; + struct v4l2_format current_pix_format; + struct video_device *pvdev; + struct msm_v4l2_driver *drv; + uint8_t opencnt; + + spinlock_t read_queue_lock; +}; + +static struct msm_v4l2_device *g_pmsm_v4l2_dev; + +static DEFINE_MUTEX(msm_v4l2_opencnt_lock); + +static int msm_v4l2_open(struct file *f) +{ + int rc = 0; + D("%s\n", __func__); + mutex_lock(&msm_v4l2_opencnt_lock); + if (!g_pmsm_v4l2_dev->opencnt) { + rc = g_pmsm_v4l2_dev->drv->open(g_pmsm_v4l2_dev->drv->sync, + MSM_APPS_ID_V4L2); + } + g_pmsm_v4l2_dev->opencnt++; + mutex_unlock(&msm_v4l2_opencnt_lock); + return rc; +} + +static int msm_v4l2_release(struct file *f) +{ + int rc = 0; + D("%s\n", __func__); + mutex_lock(&msm_v4l2_opencnt_lock); + if (!g_pmsm_v4l2_dev->opencnt) { + g_pmsm_v4l2_dev->opencnt--; + if (!g_pmsm_v4l2_dev->opencnt) { + rc = g_pmsm_v4l2_dev->drv->release(g_pmsm_v4l2_dev-> + drv->sync); + } + } + mutex_unlock(&msm_v4l2_opencnt_lock); + return rc; +} + +static unsigned int msm_v4l2_poll(struct file *f, struct poll_table_struct *w) +{ + return g_pmsm_v4l2_dev->drv->drv_poll(g_pmsm_v4l2_dev->drv->sync, f, w); +} + +static long msm_v4l2_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct msm_ctrl_cmd *ctrlcmd; + + D("msm_v4l2_ioctl, cmd = %d, %d\n", cmd, __LINE__); + + switch (cmd) { + case MSM_V4L2_START_SNAPSHOT: + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_ioctl: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->length = 0; + ctrlcmd->value = NULL; + ctrlcmd->timeout_ms = 10000; + + D("msm_v4l2_ioctl, MSM_V4L2_START_SNAPSHOT v4l2 ioctl %d\n", + cmd); + ctrlcmd->type = MSM_V4L2_SNAPSHOT; + return g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, + ctrlcmd); + + case MSM_V4L2_GET_PICTURE: + D("msm_v4l2_ioctl, MSM_V4L2_GET_PICTURE v4l2 ioctl %d\n", cmd); + ctrlcmd = (struct msm_ctrl_cmd *)arg; + return g_pmsm_v4l2_dev->drv->get_pict(g_pmsm_v4l2_dev->drv-> + sync, ctrlcmd); + + default: + D("msm_v4l2_ioctl, standard v4l2 ioctl %d\n", cmd); + return video_ioctl2(filep, cmd, arg); + } +} + +static void msm_v4l2_release_dev(struct video_device *d) +{ + D("%s\n", __func__); +} + +static int msm_v4l2_querycap(struct file *f, + void *pctx, struct v4l2_capability *pcaps) +{ + D("%s\n", __func__); + strncpy(pcaps->driver, MSM_APPS_ID_V4L2, sizeof(pcaps->driver)); + strncpy(pcaps->card, MSM_V4L2_DEVICE_NAME, sizeof(pcaps->card)); + pcaps->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + return 0; +} + +static int msm_v4l2_s_std(struct file *f, void *pctx, v4l2_std_id * pnorm) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_queryctrl(struct file *f, + void *pctx, struct v4l2_queryctrl *pqctrl) +{ + int rc = 0; + struct msm_ctrl_cmd *ctrlcmd; + + D("%s\n", __func__); + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_queryctrl: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_QUERY_CTRL; + ctrlcmd->length = sizeof(struct v4l2_queryctrl); + ctrlcmd->value = pqctrl; + ctrlcmd->timeout_ms = 10000; + + rc = g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + if (rc < 0) + return -1; + + return ctrlcmd->status; +} + +static int msm_v4l2_g_ctrl(struct file *f, void *pctx, struct v4l2_control *c) +{ + int rc = 0; + struct msm_ctrl_cmd *ctrlcmd; + + D("%s\n", __func__); + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_g_ctrl: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_GET_CTRL; + ctrlcmd->length = sizeof(struct v4l2_control); + ctrlcmd->value = c; + ctrlcmd->timeout_ms = 10000; + + rc = g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + if (rc < 0) + return -1; + + return ctrlcmd->status; +} + +static int msm_v4l2_s_ctrl(struct file *f, void *pctx, struct v4l2_control *c) +{ + int rc = 0; + struct msm_ctrl_cmd *ctrlcmd; + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_s_ctrl: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_SET_CTRL; + ctrlcmd->length = sizeof(struct v4l2_control); + ctrlcmd->value = c; + ctrlcmd->timeout_ms = 10000; + + D("%s\n", __func__); + + rc = g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + if (rc < 0) + return -1; + + return ctrlcmd->status; +} + +static int msm_v4l2_reqbufs(struct file *f, + void *pctx, struct v4l2_requestbuffers *b) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_querybuf(struct file *f, void *pctx, struct v4l2_buffer *pb) +{ + struct msm_pmem_info pmem_buf; +#if 0 + __u32 width = 0; + __u32 height = 0; + __u32 y_size = 0; + __u32 y_pad = 0; + + /* FIXME: g_pmsm_v4l2_dev->current_pix_format.fmt.pix.width; */ + width = 640; + /* FIXME: g_pmsm_v4l2_dev->current_pix_format.fmt.pix.height; */ + height = 480; + + D("%s: width = %d, height = %d\n", __func__, width, height); + + y_size = width * height; + y_pad = y_size % 4; +#endif + + __u32 y_pad = pb->bytesused % 4; + + /* V4L2 videodev will do the copy_from_user. */ + + memset(&pmem_buf, 0, sizeof(struct msm_pmem_info)); + pmem_buf.type = MSM_PMEM_OUTPUT2; + pmem_buf.vaddr = (void *)pb->m.userptr; + pmem_buf.y_off = 0; + pmem_buf.fd = (int)pb->reserved; + /* pmem_buf.cbcr_off = (y_size + y_pad); */ + pmem_buf.cbcr_off = (pb->bytesused + y_pad); + + g_pmsm_v4l2_dev->drv->reg_pmem(g_pmsm_v4l2_dev->drv->sync, &pmem_buf); + + return 0; +} + +static int msm_v4l2_qbuf(struct file *f, void *pctx, struct v4l2_buffer *pb) +{ + /* + __u32 y_size = 0; + __u32 y_pad = 0; + __u32 width = 0; + __u32 height = 0; + */ + + __u32 y_pad = 0; + + struct msm_pmem_info meminfo; + struct msm_frame frame; + static int cnt; + + if ((pb->flags >> 16) & 0x0001) { + /* this is for previwe */ +#if 0 + width = 640; + height = 480; + + /* V4L2 videodev will do the copy_from_user. */ + D("%s: width = %d, height = %d\n", __func__, width, height); + y_size = width * height; + y_pad = y_size % 4; +#endif + + y_pad = pb->bytesused % 4; + + if (pb->type == V4L2_BUF_TYPE_PRIVATE) { + /* this qbuf is actually for releasing */ + + frame.buffer = pb->m.userptr; + frame.y_off = 0; + /* frame.cbcr_off = (y_size + y_pad); */ + frame.cbcr_off = (pb->bytesused + y_pad); + frame.fd = pb->reserved; + + D("V4L2_BUF_TYPE_PRIVATE: pb->bytesused = %d \n", + pb->bytesused); + + g_pmsm_v4l2_dev->drv->put_frame(g_pmsm_v4l2_dev->drv-> + sync, &frame); + + return 0; + } + + D("V4L2_BUF_TYPE_VIDEO_CAPTURE: pb->bytesused = %d \n", + pb->bytesused); + + meminfo.type = MSM_PMEM_OUTPUT2; + meminfo.fd = (int)pb->reserved; + meminfo.vaddr = (void *)pb->m.userptr; + meminfo.y_off = 0; + /* meminfo.cbcr_off = (y_size + y_pad); */ + meminfo.cbcr_off = (pb->bytesused + y_pad); + meminfo.vfe_can_write = + cnt != PREVIEW_FRAMES_NUM - 1; + cnt++; + g_pmsm_v4l2_dev->drv->reg_pmem(g_pmsm_v4l2_dev->drv->sync, + &meminfo); + } else if ((pb->flags) & 0x0001) { + /* this is for snapshot */ + + __u32 y_size = 0; + + if ((pb->flags >> 8) & 0x01) { + + y_size = pb->bytesused; + + meminfo.type = MSM_PMEM_THUMBNAIL; + } else if ((pb->flags >> 9) & 0x01) { + + y_size = pb->bytesused; + + meminfo.type = MSM_PMEM_MAINIMG; + } + + y_pad = y_size % 4; + + meminfo.fd = (int)pb->reserved; + meminfo.vaddr = (void *)pb->m.userptr; + meminfo.y_off = 0; + /* meminfo.cbcr_off = (y_size + y_pad); */ + meminfo.cbcr_off = (y_size + y_pad); + meminfo.vfe_can_write = 1; + g_pmsm_v4l2_dev->drv->reg_pmem(g_pmsm_v4l2_dev->drv->sync, + &meminfo); + } + + return 0; +} + +static int msm_v4l2_dqbuf(struct file *f, void *pctx, struct v4l2_buffer *pb) +{ + struct msm_frame frame; + D("%s\n", __func__); + + /* V4L2 videodev will do the copy_to_user. */ + if (pb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + + D("%s, %d\n", __func__, __LINE__); + + g_pmsm_v4l2_dev->drv->get_frame(g_pmsm_v4l2_dev->drv->sync, + &frame); + + pb->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + pb->m.userptr = (unsigned long)frame.buffer; /* FIXME */ + pb->reserved = (int)frame.fd; + /* pb->length = (int)frame.cbcr_off; */ + + pb->bytesused = frame.cbcr_off; + + } else if (pb->type == V4L2_BUF_TYPE_PRIVATE) { + __u32 y_pad = pb->bytesused % 4; + + frame.buffer = pb->m.userptr; + frame.y_off = 0; + /* frame.cbcr_off = (y_size + y_pad); */ + frame.cbcr_off = (pb->bytesused + y_pad); + frame.fd = pb->reserved; + + g_pmsm_v4l2_dev->drv->put_frame(g_pmsm_v4l2_dev->drv->sync, + &frame); + } + + return 0; +} + +static int msm_v4l2_streamon(struct file *f, void *pctx, enum v4l2_buf_type i) +{ + struct msm_ctrl_cmd *ctrlcmd; + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_s_fmt_cap: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_STREAM_ON; + ctrlcmd->timeout_ms = 10000; + ctrlcmd->length = 0; + ctrlcmd->value = NULL; + + D("%s\n", __func__); + + g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + + D("%s after drv->ctrl \n", __func__); + + return 0; +} + +static int msm_v4l2_streamoff(struct file *f, void *pctx, enum v4l2_buf_type i) +{ + struct msm_ctrl_cmd *ctrlcmd; + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_s_fmt_cap: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_STREAM_OFF; + ctrlcmd->timeout_ms = 10000; + ctrlcmd->length = 0; + ctrlcmd->value = NULL; + + D("%s\n", __func__); + + g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + + return 0; +} + +static int msm_v4l2_enum_fmt_overlay(struct file *f, + void *pctx, struct v4l2_fmtdesc *pfmtdesc) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_enum_fmt_cap(struct file *f, + void *pctx, struct v4l2_fmtdesc *pfmtdesc) +{ + D("%s\n", __func__); + + switch (pfmtdesc->index) { + case 0: + pfmtdesc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + pfmtdesc->flags = 0; + strncpy(pfmtdesc->description, "YUV 4:2:0", + sizeof(pfmtdesc->description)); + pfmtdesc->pixelformat = V4L2_PIX_FMT_YVU420; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int msm_v4l2_g_fmt_cap(struct file *f, + void *pctx, struct v4l2_format *pfmt) +{ + D("%s\n", __func__); + pfmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + pfmt->fmt.pix.width = MSM_V4L2_WIDTH; + pfmt->fmt.pix.height = MSM_V4L2_HEIGHT; + pfmt->fmt.pix.pixelformat = V4L2_PIX_FMT_YVU420; + pfmt->fmt.pix.field = V4L2_FIELD_ANY; + pfmt->fmt.pix.bytesperline = 0; + pfmt->fmt.pix.sizeimage = 0; + pfmt->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; + pfmt->fmt.pix.priv = 0; + return 0; +} + +static int msm_v4l2_s_fmt_cap(struct file *f, + void *pctx, struct v4l2_format *pfmt) +{ + struct msm_ctrl_cmd *ctrlcmd; + + D("%s\n", __func__); + + ctrlcmd = kmalloc(sizeof(struct msm_ctrl_cmd), GFP_ATOMIC); + if (!ctrlcmd) { + CDBG("msm_v4l2_s_fmt_cap: cannot allocate buffer\n"); + return -ENOMEM; + } + + ctrlcmd->type = MSM_V4L2_VID_CAP_TYPE; + ctrlcmd->length = sizeof(struct v4l2_format); + ctrlcmd->value = pfmt; + ctrlcmd->timeout_ms = 10000; + + if (pfmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -1; + +#if 0 + /* FIXEME */ + if (pfmt->fmt.pix.pixelformat != V4L2_PIX_FMT_YVU420) + return -EINVAL; +#endif + + /* Ok, but check other params, too. */ + +#if 0 + memcpy(&g_pmsm_v4l2_dev->current_pix_format.fmt.pix, pfmt, + sizeof(struct v4l2_format)); +#endif + + g_pmsm_v4l2_dev->drv->ctrl(g_pmsm_v4l2_dev->drv->sync, ctrlcmd); + + return 0; +} + +static int msm_v4l2_g_fmt_overlay(struct file *f, + void *pctx, struct v4l2_format *pfmt) +{ + D("%s\n", __func__); + pfmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY; + pfmt->fmt.pix.width = MSM_V4L2_WIDTH; + pfmt->fmt.pix.height = MSM_V4L2_HEIGHT; + pfmt->fmt.pix.pixelformat = V4L2_PIX_FMT_YVU420; + pfmt->fmt.pix.field = V4L2_FIELD_ANY; + pfmt->fmt.pix.bytesperline = 0; + pfmt->fmt.pix.sizeimage = 0; + pfmt->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG; + pfmt->fmt.pix.priv = 0; + return 0; +} + +static int msm_v4l2_s_fmt_overlay(struct file *f, + void *pctx, struct v4l2_format *pfmt) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_overlay(struct file *f, void *pctx, unsigned int i) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_g_jpegcomp(struct file *f, + void *pctx, struct v4l2_jpegcompression *pcomp) +{ + D("%s\n", __func__); + return 0; +} + +static int msm_v4l2_s_jpegcomp(struct file *f, + void *pctx, struct v4l2_jpegcompression *pcomp) +{ + D("%s\n", __func__); + return 0; +} + +#ifdef CONFIG_PROC_FS +int msm_v4l2_read_proc(char *pbuf, char **start, off_t offset, + int count, int *eof, void *data) +{ + int len = 0; + len += snprintf(pbuf, strlen("stats\n") + 1, "stats\n"); + + if (g_pmsm_v4l2_dev) { + len += snprintf(pbuf, strlen("mode: ") + 1, "mode: "); + + if (g_pmsm_v4l2_dev->current_cap_format.type + == V4L2_BUF_TYPE_VIDEO_CAPTURE) + len += snprintf(pbuf, strlen("capture\n") + 1, + "capture\n"); + else + len += snprintf(pbuf, strlen("unknown\n") + 1, + "unknown\n"); + + len += snprintf(pbuf, 21, "resolution: %dx%d\n", + g_pmsm_v4l2_dev->current_cap_format.fmt.pix. + width, + g_pmsm_v4l2_dev->current_cap_format.fmt.pix. + height); + + len += snprintf(pbuf, + strlen("pixel format: ") + 1, "pixel format: "); + if (g_pmsm_v4l2_dev->current_cap_format.fmt.pix.pixelformat + == V4L2_PIX_FMT_YVU420) + len += snprintf(pbuf, strlen("yvu420\n") + 1, + "yvu420\n"); + else + len += snprintf(pbuf, strlen("unknown\n") + 1, + "unknown\n"); + + len += snprintf(pbuf, strlen("colorspace: ") + 1, + "colorspace: "); + if (g_pmsm_v4l2_dev->current_cap_format.fmt.pix.colorspace + == V4L2_COLORSPACE_JPEG) + len += snprintf(pbuf, strlen("jpeg\n") + 1, "jpeg\n"); + else + len += snprintf(pbuf, strlen("unknown\n") + 1, + "unknown\n"); + } + + *eof = 1; + return len; +} +#endif + +static const struct v4l2_file_operations msm_v4l2_fops = { + .owner = THIS_MODULE, + .open = msm_v4l2_open, + .poll = msm_v4l2_poll, + .release = msm_v4l2_release, + .ioctl = msm_v4l2_ioctl, +}; + +static void msm_v4l2_dev_init(struct msm_v4l2_device *pmsm_v4l2_dev) +{ + pmsm_v4l2_dev->read_queue_lock = + __SPIN_LOCK_UNLOCKED(pmsm_v4l2_dev->read_queue_lock); + INIT_LIST_HEAD(&pmsm_v4l2_dev->read_queue); +} + +static int msm_v4l2_try_fmt_cap(struct file *file, + void *fh, struct v4l2_format *f) +{ + /* FIXME */ + return 0; +} + +static int mm_v4l2_try_fmt_type_private(struct file *file, + void *fh, struct v4l2_format *f) +{ + /* FIXME */ + return 0; +} + +/* + * should the following structure be used instead of the code in the function? + * static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { + * .vidioc_querycap = .... + * } + */ +static const struct v4l2_ioctl_ops msm_ioctl_ops = { + .vidioc_querycap = msm_v4l2_querycap, + .vidioc_s_std = msm_v4l2_s_std, + + .vidioc_queryctrl = msm_v4l2_queryctrl, + .vidioc_g_ctrl = msm_v4l2_g_ctrl, + .vidioc_s_ctrl = msm_v4l2_s_ctrl, + + .vidioc_reqbufs = msm_v4l2_reqbufs, + .vidioc_querybuf = msm_v4l2_querybuf, + .vidioc_qbuf = msm_v4l2_qbuf, + .vidioc_dqbuf = msm_v4l2_dqbuf, + + .vidioc_streamon = msm_v4l2_streamon, + .vidioc_streamoff = msm_v4l2_streamoff, + + .vidioc_enum_fmt_vid_overlay = msm_v4l2_enum_fmt_overlay, + .vidioc_enum_fmt_vid_cap = msm_v4l2_enum_fmt_cap, + + .vidioc_try_fmt_vid_cap = msm_v4l2_try_fmt_cap, + .vidioc_try_fmt_type_private = mm_v4l2_try_fmt_type_private, + + .vidioc_g_fmt_vid_cap = msm_v4l2_g_fmt_cap, + .vidioc_s_fmt_vid_cap = msm_v4l2_s_fmt_cap, + .vidioc_g_fmt_vid_overlay = msm_v4l2_g_fmt_overlay, + .vidioc_s_fmt_vid_overlay = msm_v4l2_s_fmt_overlay, + .vidioc_overlay = msm_v4l2_overlay, + + .vidioc_g_jpegcomp = msm_v4l2_g_jpegcomp, + .vidioc_s_jpegcomp = msm_v4l2_s_jpegcomp, +}; + +static int msm_v4l2_video_dev_init(struct video_device *pvd) +{ + strncpy(pvd->name, MSM_APPS_ID_V4L2, sizeof(pvd->name)); + pvd->fops = &msm_v4l2_fops; + pvd->release = msm_v4l2_release_dev; + pvd->minor = -1; + pvd->ioctl_ops = &msm_ioctl_ops; + return msm_v4l2_register(g_pmsm_v4l2_dev->drv); +} + +static int __init msm_v4l2_init(void) +{ + int rc = -ENOMEM; + struct video_device *pvdev = NULL; + struct msm_v4l2_device *pmsm_v4l2_dev = NULL; + D("%s\n", __func__); + + pvdev = video_device_alloc(); + if (pvdev == NULL) + return rc; + + pmsm_v4l2_dev = kzalloc(sizeof(struct msm_v4l2_device), GFP_KERNEL); + if (pmsm_v4l2_dev == NULL) { + video_device_release(pvdev); + return rc; + } + + msm_v4l2_dev_init(pmsm_v4l2_dev); + + g_pmsm_v4l2_dev = pmsm_v4l2_dev; + g_pmsm_v4l2_dev->pvdev = pvdev; + + g_pmsm_v4l2_dev->drv = + kzalloc(sizeof(struct msm_v4l2_driver), GFP_KERNEL); + if (!g_pmsm_v4l2_dev->drv) { + video_device_release(pvdev); + kfree(pmsm_v4l2_dev); + return rc; + } + + rc = msm_v4l2_video_dev_init(pvdev); + if (rc < 0) { + video_device_release(pvdev); + kfree(g_pmsm_v4l2_dev->drv); + kfree(pmsm_v4l2_dev); + return rc; + } + + if (video_register_device(pvdev, + VFL_TYPE_GRABBER, MSM_V4L2_DEVNUM_YUV)) { + D("failed to register device\n"); + video_device_release(pvdev); + kfree(g_pmsm_v4l2_dev); + g_pmsm_v4l2_dev = NULL; + return -ENOENT; + } +#ifdef CONFIG_PROC_FS + create_proc_read_entry(MSM_V4L2_PROC_NAME, + 0, NULL, msm_v4l2_read_proc, NULL); +#endif + + return 0; +} + +static void __exit msm_v4l2_exit(void) +{ + struct video_device *pvdev = g_pmsm_v4l2_dev->pvdev; + D("%s\n", __func__); +#ifdef CONFIG_PROC_FS + remove_proc_entry(MSM_V4L2_PROC_NAME, NULL); +#endif + video_unregister_device(pvdev); + video_device_release(pvdev); + + msm_v4l2_unregister(g_pmsm_v4l2_dev->drv); + + kfree(g_pmsm_v4l2_dev->drv); + g_pmsm_v4l2_dev->drv = NULL; + + kfree(g_pmsm_v4l2_dev); + g_pmsm_v4l2_dev = NULL; +} + +module_init(msm_v4l2_init); +module_exit(msm_v4l2_exit); + +MODULE_DESCRIPTION("MSM V4L2 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/video/msm/msm_vfe7x.c b/drivers/media/video/msm/msm_vfe7x.c new file mode 100644 index 0000000000000..c7c75f68cacbd --- /dev/null +++ b/drivers/media/video/msm/msm_vfe7x.c @@ -0,0 +1,739 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_vfe7x.h" + +#define QDSP_CMDQUEUE QDSP_vfeCommandQueue + +#define VFE_RESET_CMD 0 +#define VFE_START_CMD 1 +#define VFE_STOP_CMD 2 +#define VFE_FRAME_ACK 20 +#define STATS_AF_ACK 21 +#define STATS_WE_ACK 22 + +#define MSG_STOP_ACK 1 +#define MSG_SNAPSHOT 2 +#define MSG_OUTPUT1 6 +#define MSG_OUTPUT2 7 +#define MSG_STATS_AF 8 +#define MSG_STATS_WE 9 + +static struct msm_adsp_module *qcam_mod; +static struct msm_adsp_module *vfe_mod; +static struct msm_vfe_callback *resp; +static struct vfe_frame_extra *extdata; + +struct mutex vfe_lock; +static void *vfe_syncdata; +static uint8_t vfestopped; + +static struct stop_event stopevent; + +static void vfe_7x_convert(struct msm_vfe_phy_info *pinfo, + enum vfe_resp_msg type, + void *data, void **ext, int *elen) +{ + switch (type) { + case VFE_MSG_OUTPUT1: + case VFE_MSG_OUTPUT2:{ + pinfo->y_phy = ((struct vfe_endframe *)data)->y_address; + pinfo->cbcr_phy = + ((struct vfe_endframe *)data)->cbcr_address; + + CDBG("vfe_7x_convert, y_phy = 0x%x, cbcr_phy = 0x%x\n", + pinfo->y_phy, pinfo->cbcr_phy); + + ((struct vfe_frame_extra *)extdata)->bl_evencol = + ((struct vfe_endframe *)data)->blacklevelevencolumn; + + ((struct vfe_frame_extra *)extdata)->bl_oddcol = + ((struct vfe_endframe *)data)->blackleveloddcolumn; + + ((struct vfe_frame_extra *)extdata)->g_def_p_cnt = + ((struct vfe_endframe *)data)-> + greendefectpixelcount; + + ((struct vfe_frame_extra *)extdata)->r_b_def_p_cnt = + ((struct vfe_endframe *)data)-> + redbluedefectpixelcount; + + *ext = extdata; + *elen = sizeof(*extdata); + } + break; + + case VFE_MSG_STATS_AF: + case VFE_MSG_STATS_WE: + pinfo->sbuf_phy = *(uint32_t *) data; + break; + + default: + break; + } +} + +static void vfe_7x_ops(void *driver_data, unsigned id, size_t len, + void (*getevent) (void *ptr, size_t len)) +{ + uint32_t evt_buf[3]; + struct msm_vfe_resp *rp; + void *data; + + len = (id == (uint16_t)-1) ? 0 : len; + data = resp->vfe_alloc(sizeof(struct msm_vfe_resp) + len, + vfe_syncdata, + GFP_ATOMIC); + + if (!data) { + pr_err("rp: cannot allocate buffer\n"); + return; + } + rp = (struct msm_vfe_resp *)data; + rp->evt_msg.len = len; + + if (id == ((uint16_t)-1)) { + /* event */ + rp->type = VFE_EVENT; + rp->evt_msg.type = MSM_CAMERA_EVT; + getevent(evt_buf, sizeof(evt_buf)); + rp->evt_msg.msg_id = evt_buf[0]; + resp->vfe_resp(rp, MSM_CAM_Q_VFE_EVT, + vfe_syncdata, + GFP_ATOMIC); + } else { + /* messages */ + rp->evt_msg.type = MSM_CAMERA_MSG; + rp->evt_msg.msg_id = id; + rp->evt_msg.data = rp + 1; + getevent(rp->evt_msg.data, len); + + switch (rp->evt_msg.msg_id) { + case MSG_SNAPSHOT: + rp->type = VFE_MSG_SNAPSHOT; + break; + + case MSG_OUTPUT1: + rp->type = VFE_MSG_OUTPUT1; + vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT1, + rp->evt_msg.data, &(rp->extdata), + &(rp->extlen)); + break; + + case MSG_OUTPUT2: + rp->type = VFE_MSG_OUTPUT2; + vfe_7x_convert(&(rp->phy), VFE_MSG_OUTPUT2, + rp->evt_msg.data, &(rp->extdata), + &(rp->extlen)); + break; + + case MSG_STATS_AF: + rp->type = VFE_MSG_STATS_AF; + vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_AF, + rp->evt_msg.data, NULL, NULL); + break; + + case MSG_STATS_WE: + rp->type = VFE_MSG_STATS_WE; + vfe_7x_convert(&(rp->phy), VFE_MSG_STATS_WE, + rp->evt_msg.data, NULL, NULL); + + CDBG("MSG_STATS_WE: phy = 0x%x\n", rp->phy.sbuf_phy); + break; + + case MSG_STOP_ACK: + rp->type = VFE_MSG_GENERAL; + stopevent.state = 1; + wake_up(&stopevent.wait); + break; + + default: + rp->type = VFE_MSG_GENERAL; + break; + } + resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, vfe_syncdata, GFP_ATOMIC); + } +} + +static struct msm_adsp_ops vfe_7x_sync = { + .event = vfe_7x_ops, +}; + +static int vfe_7x_enable(struct camera_enable_cmd *enable) +{ + int rc = -EFAULT; + + if (!strcmp(enable->name, "QCAMTASK")) + rc = msm_adsp_enable(qcam_mod); + else if (!strcmp(enable->name, "VFETASK")) + rc = msm_adsp_enable(vfe_mod); + + return rc; +} + +static int vfe_7x_disable(struct camera_enable_cmd *enable, + struct platform_device *dev __attribute__ ((unused))) +{ + int rc = -EFAULT; + + if (!strcmp(enable->name, "QCAMTASK")) + rc = msm_adsp_disable(qcam_mod); + else if (!strcmp(enable->name, "VFETASK")) + rc = msm_adsp_disable(vfe_mod); + + return rc; +} + +static int vfe_7x_stop(void) +{ + int rc = 0; + uint32_t stopcmd = VFE_STOP_CMD; + rc = msm_adsp_write(vfe_mod, QDSP_CMDQUEUE, &stopcmd, sizeof(uint32_t)); + if (rc < 0) { + CDBG("%s:%d: failed rc = %d \n", __func__, __LINE__, rc); + return rc; + } + + stopevent.state = 0; + rc = wait_event_timeout(stopevent.wait, + stopevent.state != 0, + msecs_to_jiffies(stopevent.timeout)); + + return rc; +} + +static void vfe_7x_release(struct platform_device *pdev) +{ + struct msm_sensor_ctrl *sctrl = + &((struct msm_sync *)vfe_syncdata)->sctrl; + mutex_lock(&vfe_lock); + vfe_syncdata = NULL; + mutex_unlock(&vfe_lock); + + if (!vfestopped) { + CDBG("%s:%d:Calling vfe_7x_stop()\n", __func__, __LINE__); + vfe_7x_stop(); + } else + vfestopped = 0; + + msm_adsp_disable(qcam_mod); + msm_adsp_disable(vfe_mod); + + if (sctrl) + sctrl->s_release(); + + msm_adsp_put(qcam_mod); + msm_adsp_put(vfe_mod); + + msm_camio_disable(pdev); + + kfree(extdata); + extdata = 0; +} + +static int vfe_7x_init(struct msm_vfe_callback *presp, + struct platform_device *dev) +{ + int rc = 0; + + init_waitqueue_head(&stopevent.wait); + stopevent.timeout = 200; + stopevent.state = 0; + + if (presp && presp->vfe_resp) + resp = presp; + else + return -EFAULT; + + /* Bring up all the required GPIOs and Clocks */ + rc = msm_camio_enable(dev); + if (rc < 0) + return rc; + + msm_camio_camif_pad_reg_reset(); + + extdata = kmalloc(sizeof(struct vfe_frame_extra), GFP_ATOMIC); + if (!extdata) { + rc = -ENOMEM; + goto init_fail; + } + + rc = msm_adsp_get("QCAMTASK", &qcam_mod, &vfe_7x_sync, NULL); + if (rc) { + rc = -EBUSY; + goto get_qcam_fail; + } + + rc = msm_adsp_get("VFETASK", &vfe_mod, &vfe_7x_sync, NULL); + if (rc) { + rc = -EBUSY; + goto get_vfe_fail; + } + + return 0; + +get_vfe_fail: + msm_adsp_put(qcam_mod); +get_qcam_fail: + kfree(extdata); +init_fail: + return rc; +} + +static int vfe_7x_config_axi(int mode, struct axidata *ad, struct axiout *ao) +{ + struct msm_pmem_region *regptr; + unsigned long *bptr; + int cnt; + + int rc = 0; + + if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) { + regptr = ad->region; + + CDBG("bufnum1 = %d\n", ad->bufnum1); + CDBG("config_axi1: O1, phy = 0x%lx, y_off = %d, cbcr_off =%d\n", + regptr->paddr, regptr->info.y_off, regptr->info.cbcr_off); + + bptr = &ao->output1buffer1_y_phy; + for (cnt = 0; cnt < ad->bufnum1; cnt++) { + *bptr = regptr->paddr + regptr->info.y_off; + bptr++; + *bptr = regptr->paddr + regptr->info.cbcr_off; + + bptr++; + regptr++; + } + + regptr--; + for (cnt = 0; cnt < (8 - ad->bufnum1); cnt++) { + *bptr = regptr->paddr + regptr->info.y_off; + bptr++; + *bptr = regptr->paddr + regptr->info.cbcr_off; + bptr++; + } + } + /* if OUTPUT1 or Both */ + if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) { + regptr = &(ad->region[ad->bufnum1]); + + CDBG("bufnum2 = %d\n", ad->bufnum2); + CDBG("config_axi2: O2, phy = 0x%lx, y_off = %d, cbcr_off =%d\n", + regptr->paddr, regptr->info.y_off, regptr->info.cbcr_off); + + bptr = &ao->output2buffer1_y_phy; + for (cnt = 0; cnt < ad->bufnum2; cnt++) { + *bptr = regptr->paddr + regptr->info.y_off; + bptr++; + *bptr = regptr->paddr + regptr->info.cbcr_off; + + bptr++; + regptr++; + } + + regptr--; + for (cnt = 0; cnt < (8 - ad->bufnum2); cnt++) { + *bptr = regptr->paddr + regptr->info.y_off; + bptr++; + *bptr = regptr->paddr + regptr->info.cbcr_off; + bptr++; + } + } + + return rc; +} + +static int vfe_7x_config(struct msm_vfe_cfg_cmd *cmd, void *data) +{ + struct msm_pmem_region *regptr; + unsigned char buf[256]; + + struct vfe_stats_ack sack; + struct axidata *axid; + uint32_t i; + + struct vfe_stats_we_cfg *scfg = NULL; + struct vfe_stats_af_cfg *sfcfg = NULL; + + struct axiout *axio = NULL; + void *cmd_data = NULL; + void *cmd_data_alloc = NULL; + long rc = 0; + struct msm_vfe_command_7k *vfecmd; + + vfecmd = kmalloc(sizeof(struct msm_vfe_command_7k), GFP_ATOMIC); + if (!vfecmd) { + pr_err("vfecmd alloc failed!\n"); + return -ENOMEM; + } + + if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && + cmd->cmd_type != CMD_STATS_BUF_RELEASE && + cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) { + if (copy_from_user(vfecmd, + (void __user *)(cmd->value), + sizeof(struct msm_vfe_command_7k))) { + rc = -EFAULT; + goto config_failure; + } + } + + switch (cmd->cmd_type) { + case CMD_STATS_AEC_AWB_ENABLE: + case CMD_STATS_AXI_CFG:{ + axid = data; + if (!axid) { + rc = -EFAULT; + goto config_failure; + } + + scfg = + kmalloc(sizeof(struct vfe_stats_we_cfg), + GFP_ATOMIC); + if (!scfg) { + rc = -ENOMEM; + goto config_failure; + } + + if (vfecmd->length != sizeof(typeof(*scfg))) { + rc = -EIO; + pr_err + ("msm_camera: %s: cmd %d: "\ + "user-space data size %d "\ + "!= kernel data size %d\n", __func__, + cmd->cmd_type, vfecmd->length, + sizeof(typeof(*scfg))); + goto config_failure; + } + + if (copy_from_user(scfg, + (void __user *)(vfecmd->value), + vfecmd->length)) { + + rc = -EFAULT; + goto config_done; + } + + CDBG("STATS_ENABLE: bufnum = %d, enabling = %d\n", + axid->bufnum1, scfg->wb_expstatsenable); + + if (axid->bufnum1 > 0) { + regptr = axid->region; + + for (i = 0; i < axid->bufnum1; i++) { + + CDBG("STATS_ENABLE, phy = 0x%lx\n", + regptr->paddr); + + scfg->wb_expstatoutputbuffer[i] = + (void *)regptr->paddr; + regptr++; + } + + cmd_data = scfg; + + } else { + rc = -EINVAL; + goto config_done; + } + } + break; + + case CMD_STATS_AF_ENABLE: + case CMD_STATS_AF_AXI_CFG:{ + axid = data; + if (!axid) { + rc = -EFAULT; + goto config_failure; + } + + sfcfg = + kmalloc(sizeof(struct vfe_stats_af_cfg), + GFP_ATOMIC); + + if (!sfcfg) { + rc = -ENOMEM; + goto config_failure; + } + + if (vfecmd->length > sizeof(typeof(*sfcfg))) { + pr_err + ("msm_camera: %s: cmd %d: user-space "\ + "data %d exceeds kernel buffer %d\n", + __func__, cmd->cmd_type, vfecmd->length, + sizeof(typeof(*sfcfg))); + rc = -EIO; + goto config_failure; + } + + if (copy_from_user(sfcfg, + (void __user *)(vfecmd->value), + vfecmd->length)) { + + rc = -EFAULT; + goto config_done; + } + + CDBG("AF_ENABLE: bufnum = %d, enabling = %d\n", + axid->bufnum1, sfcfg->af_enable); + + if (axid->bufnum1 > 0) { + regptr = axid->region; + + for (i = 0; i < axid->bufnum1; i++) { + + CDBG("STATS_ENABLE, phy = 0x%lx\n", + regptr->paddr); + + sfcfg->af_outbuf[i] = + (void *)regptr->paddr; + + regptr++; + } + + cmd_data = sfcfg; + + } else { + rc = -EINVAL; + goto config_done; + } + } + break; + + case CMD_FRAME_BUF_RELEASE:{ + struct msm_frame *b; + unsigned long p; + struct vfe_outputack fack; + if (!data) { + rc = -EFAULT; + goto config_failure; + } + + b = (struct msm_frame *)(cmd->value); + p = *(unsigned long *)data; + + fack.header = VFE_FRAME_ACK; + + fack.output2newybufferaddress = (void *)(p + b->y_off); + + fack.output2newcbcrbufferaddress = + (void *)(p + b->cbcr_off); + + vfecmd->queue = QDSP_CMDQUEUE; + vfecmd->length = sizeof(struct vfe_outputack); + cmd_data = &fack; + } + break; + + case CMD_SNAP_BUF_RELEASE: + break; + + case CMD_STATS_BUF_RELEASE:{ + CDBG("vfe_7x_config: CMD_STATS_BUF_RELEASE\n"); + if (!data) { + rc = -EFAULT; + goto config_failure; + } + + sack.header = STATS_WE_ACK; + sack.bufaddr = (void *)*(uint32_t *) data; + + vfecmd->queue = QDSP_CMDQUEUE; + vfecmd->length = sizeof(struct vfe_stats_ack); + cmd_data = &sack; + } + break; + + case CMD_STATS_AF_BUF_RELEASE:{ + CDBG("vfe_7x_config: CMD_STATS_AF_BUF_RELEASE\n"); + if (!data) { + rc = -EFAULT; + goto config_failure; + } + + sack.header = STATS_AF_ACK; + sack.bufaddr = (void *)*(uint32_t *) data; + + vfecmd->queue = QDSP_CMDQUEUE; + vfecmd->length = sizeof(struct vfe_stats_ack); + cmd_data = &sack; + } + break; + + case CMD_GENERAL: + case CMD_STATS_DISABLE:{ + if (vfecmd->length > sizeof(buf)) { + cmd_data_alloc = + cmd_data = + kmalloc(vfecmd->length, GFP_ATOMIC); + if (!cmd_data) { + rc = -ENOMEM; + goto config_failure; + } + } else + cmd_data = buf; + + if (copy_from_user(cmd_data, + (void __user *)(vfecmd->value), + vfecmd->length)) { + + rc = -EFAULT; + goto config_done; + } + + if (vfecmd->queue == QDSP_CMDQUEUE) { + switch (*(uint32_t *) cmd_data) { + case VFE_RESET_CMD: + msm_camio_vfe_blk_reset(); + msm_camio_camif_pad_reg_reset_2(); + vfestopped = 0; + break; + + case VFE_START_CMD: + msm_camio_camif_pad_reg_reset_2(); + vfestopped = 0; + break; + + case VFE_STOP_CMD: + vfestopped = 1; + goto config_send; + + default: + break; + } + } /* QDSP_CMDQUEUE */ + } + break; + + case CMD_AXI_CFG_OUT1:{ + axid = data; + if (!axid) { + rc = -EFAULT; + goto config_failure; + } + + axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC); + if (!axio) { + rc = -ENOMEM; + goto config_failure; + } + + if (copy_from_user(axio, (void *)(vfecmd->value), + sizeof(struct axiout))) { + rc = -EFAULT; + goto config_done; + } + + vfe_7x_config_axi(OUTPUT_1, axid, axio); + + cmd_data = axio; + } + break; + + case CMD_AXI_CFG_OUT2: + case CMD_RAW_PICT_AXI_CFG:{ + axid = data; + if (!axid) { + rc = -EFAULT; + goto config_failure; + } + + axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC); + if (!axio) { + rc = -ENOMEM; + goto config_failure; + } + + if (copy_from_user(axio, (void __user *)(vfecmd->value), + sizeof(struct axiout))) { + rc = -EFAULT; + goto config_done; + } + + vfe_7x_config_axi(OUTPUT_2, axid, axio); + cmd_data = axio; + } + break; + + case CMD_AXI_CFG_SNAP_O1_AND_O2:{ + axid = data; + if (!axid) { + rc = -EFAULT; + goto config_failure; + } + + axio = kmalloc(sizeof(struct axiout), GFP_ATOMIC); + if (!axio) { + rc = -ENOMEM; + goto config_failure; + } + + if (copy_from_user(axio, (void __user *)(vfecmd->value), + sizeof(struct axiout))) { + rc = -EFAULT; + goto config_done; + } + + vfe_7x_config_axi(OUTPUT_1_AND_2, axid, axio); + + cmd_data = axio; + } + break; + + default: + break; + } /* switch */ + + if (vfestopped) + goto config_done; + +config_send: + CDBG("send adsp command = %d\n", *(uint32_t *) cmd_data); + rc = msm_adsp_write(vfe_mod, vfecmd->queue, cmd_data, vfecmd->length); + +config_done: + if (cmd_data_alloc != NULL) + kfree(cmd_data_alloc); + +config_failure: + kfree(scfg); + kfree(axio); + kfree(vfecmd); + return rc; +} + +void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data) +{ + mutex_init(&vfe_lock); + fptr->vfe_init = vfe_7x_init; + fptr->vfe_enable = vfe_7x_enable; + fptr->vfe_config = vfe_7x_config; + fptr->vfe_disable = vfe_7x_disable; + fptr->vfe_release = vfe_7x_release; + vfe_syncdata = data; +} diff --git a/drivers/media/video/msm/msm_vfe7x.h b/drivers/media/video/msm/msm_vfe7x.h new file mode 100644 index 0000000000000..5e86ce0ba9df4 --- /dev/null +++ b/drivers/media/video/msm/msm_vfe7x.h @@ -0,0 +1,269 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __MSM_VFE7X_H__ +#define __MSM_VFE7X_H__ +#include +#include + +struct vfe_frame_extra { + uint32_t bl_evencol; + uint32_t bl_oddcol; + uint16_t g_def_p_cnt; + uint16_t r_b_def_p_cnt; +}; + +struct vfe_endframe { + uint32_t y_address; + uint32_t cbcr_address; + + unsigned int blacklevelevencolumn:23; + uint16_t reserved1:9; + unsigned int blackleveloddcolumn:23; + uint16_t reserved2:9; + + uint16_t greendefectpixelcount:8; + uint16_t reserved3:8; + uint16_t redbluedefectpixelcount:8; + uint16_t reserved4:8; +} __attribute__ ((packed, aligned(4))); + +struct vfe_outputack { + uint32_t header; + void *output2newybufferaddress; + void *output2newcbcrbufferaddress; +} __attribute__ ((packed, aligned(4))); + +struct vfe_stats_ack { + uint32_t header; + /* MUST BE 64 bit ALIGNED */ + void *bufaddr; +} __attribute__ ((packed, aligned(4))); + +/* AXI Output Config Command sent to DSP */ +struct axiout { + uint32_t cmdheader:32; + int outputmode:3; + uint8_t format:2; + uint32_t /* reserved */ : 27; + + /* AXI Output 1 Y Configuration, Part 1 */ + uint32_t out1yimageheight:12; + uint32_t /* reserved */ : 4; + uint32_t out1yimagewidthin64bitwords:10; + uint32_t /* reserved */ : 6; + + /* AXI Output 1 Y Configuration, Part 2 */ + uint8_t out1yburstlen:2; + uint32_t out1ynumrows:12; + uint32_t out1yrowincin64bitincs:12; + uint32_t /* reserved */ : 6; + + /* AXI Output 1 CbCr Configuration, Part 1 */ + uint32_t out1cbcrimageheight:12; + uint32_t /* reserved */ : 4; + uint32_t out1cbcrimagewidthin64bitwords:10; + uint32_t /* reserved */ : 6; + + /* AXI Output 1 CbCr Configuration, Part 2 */ + uint8_t out1cbcrburstlen:2; + uint32_t out1cbcrnumrows:12; + uint32_t out1cbcrrowincin64bitincs:12; + uint32_t /* reserved */ : 6; + + /* AXI Output 2 Y Configuration, Part 1 */ + uint32_t out2yimageheight:12; + uint32_t /* reserved */ : 4; + uint32_t out2yimagewidthin64bitwords:10; + uint32_t /* reserved */ : 6; + + /* AXI Output 2 Y Configuration, Part 2 */ + uint8_t out2yburstlen:2; + uint32_t out2ynumrows:12; + uint32_t out2yrowincin64bitincs:12; + uint32_t /* reserved */ : 6; + + /* AXI Output 2 CbCr Configuration, Part 1 */ + uint32_t out2cbcrimageheight:12; + uint32_t /* reserved */ : 4; + uint32_t out2cbcrimagewidtein64bitwords:10; + uint32_t /* reserved */ : 6; + + /* AXI Output 2 CbCr Configuration, Part 2 */ + uint8_t out2cbcrburstlen:2; + uint32_t out2cbcrnumrows:12; + uint32_t out2cbcrrowincin64bitincs:12; + uint32_t /* reserved */ : 6; + + /* Address configuration: + * output1 phisycal address */ + unsigned long output1buffer1_y_phy; + unsigned long output1buffer1_cbcr_phy; + unsigned long output1buffer2_y_phy; + unsigned long output1buffer2_cbcr_phy; + unsigned long output1buffer3_y_phy; + unsigned long output1buffer3_cbcr_phy; + unsigned long output1buffer4_y_phy; + unsigned long output1buffer4_cbcr_phy; + unsigned long output1buffer5_y_phy; + unsigned long output1buffer5_cbcr_phy; + unsigned long output1buffer6_y_phy; + unsigned long output1buffer6_cbcr_phy; + unsigned long output1buffer7_y_phy; + unsigned long output1buffer7_cbcr_phy; + unsigned long output1buffer8_y_phy; + unsigned long output1buffer8_cbcr_phy; + + /* output2 phisycal address */ + unsigned long output2buffer1_y_phy; + unsigned long output2buffer1_cbcr_phy; + unsigned long output2buffer2_y_phy; + unsigned long output2buffer2_cbcr_phy; + unsigned long output2buffer3_y_phy; + unsigned long output2buffer3_cbcr_phy; + unsigned long output2buffer4_y_phy; + unsigned long output2buffer4_cbcr_phy; + unsigned long output2buffer5_y_phy; + unsigned long output2buffer5_cbcr_phy; + unsigned long output2buffer6_y_phy; + unsigned long output2buffer6_cbcr_phy; + unsigned long output2buffer7_y_phy; + unsigned long output2buffer7_cbcr_phy; + unsigned long output2buffer8_y_phy; + unsigned long output2buffer8_cbcr_phy; +} __attribute__ ((packed, aligned(4))); + +struct vfe_stats_we_cfg { + uint32_t header; + + /* White Balance/Exposure Statistic Selection */ + uint8_t wb_expstatsenable:1; + uint8_t wb_expstatbuspriorityselection:1; + unsigned int wb_expstatbuspriorityvalue:4; + unsigned int /* reserved */ : 26; + + /* White Balance/Exposure Statistic Configuration, Part 1 */ + uint8_t exposurestatregions:1; + uint8_t exposurestatsubregions:1; + unsigned int /* reserved */ : 14; + + unsigned int whitebalanceminimumy:8; + unsigned int whitebalancemaximumy:8; + + /* White Balance/Exposure Statistic Configuration, Part 2 */ + uint8_t + wb_expstatslopeofneutralregionline[NUM_WB_EXP_NEUTRAL_REGION_LINES]; + + /* White Balance/Exposure Statistic Configuration, Part 3 */ + unsigned int wb_expstatcrinterceptofneutralregionline2:12; + unsigned int /* reserved */ : 4; + unsigned int wb_expstatcbinterceptofneutralreginnline1:12; + unsigned int /* reserved */ : 4; + + /* White Balance/Exposure Statistic Configuration, Part 4 */ + unsigned int wb_expstatcrinterceptofneutralregionline4:12; + unsigned int /* reserved */ : 4; + unsigned int wb_expstatcbinterceptofneutralregionline3:12; + unsigned int /* reserved */ : 4; + + /* White Balance/Exposure Statistic Output Buffer Header */ + unsigned int wb_expmetricheaderpattern:8; + unsigned int /* reserved */ : 24; + + /* White Balance/Exposure Statistic Output Buffers-MUST + * BE 64 bit ALIGNED */ + void *wb_expstatoutputbuffer[NUM_WB_EXP_STAT_OUTPUT_BUFFERS]; +} __attribute__ ((packed, aligned(4))); + +struct vfe_stats_af_cfg { + uint32_t header; + + /* Autofocus Statistic Selection */ + uint8_t af_enable:1; + uint8_t af_busprioritysel:1; + unsigned int af_buspriorityval:4; + unsigned int /* reserved */ : 26; + + /* Autofocus Statistic Configuration, Part 1 */ + unsigned int af_singlewinvoffset:12; + unsigned int /* reserved */ : 4; + unsigned int af_singlewinhoffset:12; + unsigned int /* reserved */ : 3; + uint8_t af_winmode:1; + + /* Autofocus Statistic Configuration, Part 2 */ + unsigned int af_singglewinvh:11; + unsigned int /* reserved */ : 5; + unsigned int af_singlewinhw:11; + unsigned int /* reserved */ : 5; + + /* Autofocus Statistic Configuration, Parts 3-6 */ + uint8_t af_multiwingrid[NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS]; + + /* Autofocus Statistic Configuration, Part 7 */ + signed int af_metrichpfcoefa00:5; + signed int af_metrichpfcoefa04:5; + unsigned int af_metricmaxval:11; + uint8_t af_metricsel:1; + unsigned int /* reserved */ : 10; + + /* Autofocus Statistic Configuration, Part 8 */ + signed int af_metrichpfcoefa20:5; + signed int af_metrichpfcoefa21:5; + signed int af_metrichpfcoefa22:5; + signed int af_metrichpfcoefa23:5; + signed int af_metrichpfcoefa24:5; + unsigned int /* reserved */ : 7; + + /* Autofocus Statistic Output Buffer Header */ + unsigned int af_metrichp:8; + unsigned int /* reserved */ : 24; + + /* Autofocus Statistic Output Buffers - MUST BE 64 bit ALIGNED!!! */ + void *af_outbuf[NUM_AF_STAT_OUTPUT_BUFFERS]; +} __attribute__ ((packed, aligned(4))); /* VFE_StatsAutofocusConfigCmdType */ + +struct msm_camera_frame_msg { + unsigned long output_y_address; + unsigned long output_cbcr_address; + + unsigned int blacklevelevenColumn:23; + uint16_t reserved1:9; + unsigned int blackleveloddColumn:23; + uint16_t reserved2:9; + + uint16_t greendefectpixelcount:8; + uint16_t reserved3:8; + uint16_t redbluedefectpixelcount:8; + uint16_t reserved4:8; +} __attribute__ ((packed, aligned(4))); + +/* New one for 7k */ +struct msm_vfe_command_7k { + uint16_t queue; + uint16_t length; + void *value; +}; + +struct stop_event { + wait_queue_head_t wait; + int state; + int timeout; +}; + +#endif /* __MSM_VFE7X_H__ */ diff --git a/drivers/media/video/msm/msm_vfe8x.c b/drivers/media/video/msm/msm_vfe8x.c new file mode 100644 index 0000000000000..1fedb2e36c90f --- /dev/null +++ b/drivers/media/video/msm/msm_vfe8x.c @@ -0,0 +1,838 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include "msm_vfe8x_proc.h" +#include + +#define ON 1 +#define OFF 0 + +static void *vfe_syncdata; +static struct clk *ebi1_clk; +static const char *const clk_name = "ebi1_clk"; + +static int vfe_enable(struct camera_enable_cmd *enable) +{ + return 0; +} + +static int vfe_disable(struct camera_enable_cmd *enable, + struct platform_device *dev) +{ + vfe_stop(); + msm_camio_disable(dev); + return 0; +} + +static int vfe_init(struct msm_vfe_callback *presp, struct platform_device *dev) +{ + int rc = 0; + + ebi1_clk = clk_get(NULL, clk_name); + if (!ebi1_clk) { + pr_err("%s: could not get %s\n", __func__, clk_name); + return -EIO; + } + + rc = clk_set_rate(ebi1_clk, 128000000); + if (rc < 0) { + pr_err("%s: clk_set_rate(%s) failed: %d\n", __func__, + clk_name, rc); + return rc; + } + + rc = vfe_cmd_init(presp, dev, vfe_syncdata); + if (rc < 0) + return rc; + + /* Bring up all the required GPIOs and Clocks */ + return msm_camio_enable(dev); +} + +static void vfe_release(struct platform_device *dev) +{ + struct msm_sensor_ctrl *sctrl = + &((struct msm_sync *)vfe_syncdata)->sctrl; + + if (ebi1_clk) { + clk_set_rate(ebi1_clk, 0); + clk_put(ebi1_clk); + ebi1_clk = 0; + } + + if (sctrl) + sctrl->s_release(); + + msm_camio_disable(dev); + vfe_cmd_release(dev); + vfe_syncdata = NULL; +} + +static void vfe_config_axi(int mode, + struct axidata *ad, + struct vfe_cmd_axi_output_config *ao) +{ + struct msm_pmem_region *regptr, *regptr1; + int i, j; + uint32_t *p1, *p2; + regptr1 = NULL; + + if (mode == OUTPUT_1 || mode == OUTPUT_1_AND_2) { + regptr = ad->region; + for (i = 0; i < ad->bufnum1; i++) { + + p1 = &(ao->output1.outputY.outFragments[i][0]); + p2 = &(ao->output1.outputCbcr.outFragments[i][0]); + + for (j = 0; j < ao->output1.fragmentCount; j++) { + + *p1 = regptr->paddr + regptr->info.y_off; + p1++; + + *p2 = regptr->paddr + regptr->info.cbcr_off; + p2++; + } + regptr++; + } + } + /* if OUTPUT1 or Both */ + if (mode == OUTPUT_2 || mode == OUTPUT_1_AND_2) { + + regptr = &(ad->region[ad->bufnum1]); + CDBG("bufnum2 = %d\n", ad->bufnum2); + + for (i = 0; i < ad->bufnum2; i++) { + + p1 = &(ao->output2.outputY.outFragments[i][0]); + p2 = &(ao->output2.outputCbcr.outFragments[i][0]); + + CDBG("config_axi: O2, phy = 0x%lx, y_off = %d, "\ + "cbcr_off = %d\n", regptr->paddr, + regptr->info.y_off, regptr->info.cbcr_off); + + for (j = 0; j < ao->output2.fragmentCount; j++) { + + *p1 = regptr->paddr + regptr->info.y_off; + CDBG("vfe_config_axi: p1 = 0x%x\n", *p1); + p1++; + + *p2 = regptr->paddr + regptr->info.cbcr_off; + CDBG("vfe_config_axi: p2 = 0x%x\n", *p2); + p2++; + } + regptr++; + } + } +#ifdef CONFIG_720P_CAMERA + /* For video configuration */ + if (mode == OUTPUT_1_AND_3) { + /* this is preview buffer. */ + regptr = &(ad->region[0]); + /* this is video buffer. */ + regptr1 = &(ad->region[ad->bufnum1]); + CDBG("bufnum1 = %d\n", ad->bufnum1); + CDBG("bufnum2 = %d\n", ad->bufnum2); + + for (i = 0; i < ad->bufnum1; i++) { + p1 = &(ao->output1.outputY.outFragments[i][0]); + p2 = &(ao->output1.outputCbcr.outFragments[i][0]); + + CDBG("config_axi: O1, phy = 0x%lx, y_off = %d, "\ + "cbcr_off = %d\n", regptr->paddr, + regptr->info.y_off, regptr->info.cbcr_off); + + for (j = 0; j < ao->output1.fragmentCount; j++) { + + *p1 = regptr->paddr + regptr->info.y_off; + CDBG("vfe_config_axi: p1 = 0x%x\n", *p1); + p1++; + + *p2 = regptr->paddr + regptr->info.cbcr_off; + CDBG("vfe_config_axi: p2 = 0x%x\n", *p2); + p2++; + } + regptr++; + } + for (i = 0; i < ad->bufnum2; i++) { + p1 = &(ao->output2.outputY.outFragments[i][0]); + p2 = &(ao->output2.outputCbcr.outFragments[i][0]); + + CDBG("config_axi: O2, phy = 0x%lx, y_off = %d, "\ + "cbcr_off = %d\n", regptr1->paddr, + regptr1->info.y_off, regptr1->info.cbcr_off); + + for (j = 0; j < ao->output2.fragmentCount; j++) { + + *p1 = regptr1->paddr + regptr1->info.y_off; + CDBG("vfe_config_axi: p1 = 0x%x\n", *p1); + p1++; + + *p2 = regptr1->paddr + regptr1->info.cbcr_off; + CDBG("vfe_config_axi: p2 = 0x%x\n", *p2); + p2++; + } + regptr1++; + } + } +#endif + +} + +#define ERR_COPY_FROM_USER() \ + pr_err("%s(%d): copy from user\n", __func__, __LINE__) + +#define CHECKED_COPY_FROM_USER(in) { \ + if (cmd->length != sizeof(*(in))) { \ + pr_err("msm_camera: %s:%d cmd %d: user data size %d " \ + "!= kernel data size %d\n", \ + __func__, __LINE__, \ + cmd->id, cmd->length, sizeof(*(in))); \ + rc = -EIO; \ + break; \ + } \ + if (copy_from_user((in), (void __user *)cmd->value, \ + sizeof(*(in)))) { \ + ERR_COPY_FROM_USER(); \ + rc = -EFAULT; \ + break; \ + } \ +} + +static int vfe_proc_general(struct msm_vfe_command_8k *cmd) +{ + int rc = 0; + + CDBG("%s: cmdID = %d\n", __func__, cmd->id); + + switch (cmd->id) { + case VFE_CMD_ID_RESET: + msm_camio_vfe_blk_reset(); + msm_camio_camif_pad_reg_reset_2(); + vfe_reset(); + break; + + case VFE_CMD_ID_START:{ + struct vfe_cmd_start start; + CHECKED_COPY_FROM_USER(&start); + + /* msm_camio_camif_pad_reg_reset_2(); */ + msm_camio_camif_pad_reg_reset(); + vfe_start(&start); + } + break; + + case VFE_CMD_ID_CAMIF_CONFIG:{ + struct vfe_cmd_camif_config camif; + CHECKED_COPY_FROM_USER(&camif); + + vfe_camif_config(&camif); + } + break; + + case VFE_CMD_ID_BLACK_LEVEL_CONFIG:{ + struct vfe_cmd_black_level_config bl; + CHECKED_COPY_FROM_USER(&bl); + + vfe_black_level_config(&bl); + } + break; + + case VFE_CMD_ID_ROLL_OFF_CONFIG:{ + /* rolloff is too big to be on the stack */ + struct vfe_cmd_roll_off_config *rolloff = + kmalloc(sizeof(struct vfe_cmd_roll_off_config), + GFP_KERNEL); + if (!rolloff) { + pr_err("%s: out of memory\n", __func__); + rc = -ENOMEM; + break; + } + /* Wrap CHECKED_COPY_FROM_USER() in a do-while(0) loop + * to make sure we free rolloff when copy_from_user() + * fails. + */ + do { + CHECKED_COPY_FROM_USER(rolloff); + vfe_roll_off_config(rolloff); + } while (0); + kfree(rolloff); + } + break; + + case VFE_CMD_ID_DEMUX_CHANNEL_GAIN_CONFIG:{ + struct vfe_cmd_demux_channel_gain_config demuxc; + CHECKED_COPY_FROM_USER(&demuxc); + + /* demux is always enabled. */ + vfe_demux_channel_gain_config(&demuxc); + } + break; + + case VFE_CMD_ID_DEMOSAIC_CONFIG:{ + struct vfe_cmd_demosaic_config demosaic; + CHECKED_COPY_FROM_USER(&demosaic); + + vfe_demosaic_config(&demosaic); + } + break; + + case VFE_CMD_ID_FOV_CROP_CONFIG: + case VFE_CMD_ID_FOV_CROP_UPDATE:{ + struct vfe_cmd_fov_crop_config fov; + CHECKED_COPY_FROM_USER(&fov); + + vfe_fov_crop_config(&fov); + } + break; + + case VFE_CMD_ID_MAIN_SCALER_CONFIG: + case VFE_CMD_ID_MAIN_SCALER_UPDATE:{ + struct vfe_cmd_main_scaler_config mainds; + CHECKED_COPY_FROM_USER(&mainds); + + vfe_main_scaler_config(&mainds); + } + break; + + case VFE_CMD_ID_WHITE_BALANCE_CONFIG: + case VFE_CMD_ID_WHITE_BALANCE_UPDATE:{ + struct vfe_cmd_white_balance_config wb; + CHECKED_COPY_FROM_USER(&wb); + + vfe_white_balance_config(&wb); + } + break; + + case VFE_CMD_ID_COLOR_CORRECTION_CONFIG: + case VFE_CMD_ID_COLOR_CORRECTION_UPDATE:{ + struct vfe_cmd_color_correction_config cc; + CHECKED_COPY_FROM_USER(&cc); + + vfe_color_correction_config(&cc); + } + break; + + case VFE_CMD_ID_LA_CONFIG:{ + struct vfe_cmd_la_config la; + CHECKED_COPY_FROM_USER(&la); + + vfe_la_config(&la); + } + break; + + case VFE_CMD_ID_RGB_GAMMA_CONFIG:{ + struct vfe_cmd_rgb_gamma_config rgb; + CHECKED_COPY_FROM_USER(&rgb); + + rc = vfe_rgb_gamma_config(&rgb); + } + break; + + case VFE_CMD_ID_CHROMA_ENHAN_CONFIG: + case VFE_CMD_ID_CHROMA_ENHAN_UPDATE:{ + struct vfe_cmd_chroma_enhan_config chrom; + CHECKED_COPY_FROM_USER(&chrom); + + vfe_chroma_enhan_config(&chrom); + } + break; + + case VFE_CMD_ID_CHROMA_SUPPRESSION_CONFIG: + case VFE_CMD_ID_CHROMA_SUPPRESSION_UPDATE:{ + struct vfe_cmd_chroma_suppression_config chromsup; + CHECKED_COPY_FROM_USER(&chromsup); + + vfe_chroma_sup_config(&chromsup); + } + break; + + case VFE_CMD_ID_ASF_CONFIG:{ + struct vfe_cmd_asf_config asf; + CHECKED_COPY_FROM_USER(&asf); + + vfe_asf_config(&asf); + } + break; + + case VFE_CMD_ID_SCALER2Y_CONFIG: + case VFE_CMD_ID_SCALER2Y_UPDATE:{ + struct vfe_cmd_scaler2_config ds2y; + CHECKED_COPY_FROM_USER(&ds2y); + + vfe_scaler2y_config(&ds2y); + } + break; + + case VFE_CMD_ID_SCALER2CbCr_CONFIG: + case VFE_CMD_ID_SCALER2CbCr_UPDATE:{ + struct vfe_cmd_scaler2_config ds2cbcr; + CHECKED_COPY_FROM_USER(&ds2cbcr); + + vfe_scaler2cbcr_config(&ds2cbcr); + } + break; + + case VFE_CMD_ID_CHROMA_SUBSAMPLE_CONFIG:{ + struct vfe_cmd_chroma_subsample_config sub; + CHECKED_COPY_FROM_USER(&sub); + + vfe_chroma_subsample_config(&sub); + } + break; + + case VFE_CMD_ID_FRAME_SKIP_CONFIG:{ + struct vfe_cmd_frame_skip_config fskip; + CHECKED_COPY_FROM_USER(&fskip); + + vfe_frame_skip_config(&fskip); + } + break; + + case VFE_CMD_ID_OUTPUT_CLAMP_CONFIG:{ + struct vfe_cmd_output_clamp_config clamp; + CHECKED_COPY_FROM_USER(&clamp); + + vfe_output_clamp_config(&clamp); + } + break; + + /* module update commands */ + case VFE_CMD_ID_BLACK_LEVEL_UPDATE:{ + struct vfe_cmd_black_level_config blk; + CHECKED_COPY_FROM_USER(&blk); + + vfe_black_level_update(&blk); + } + break; + + case VFE_CMD_ID_DEMUX_CHANNEL_GAIN_UPDATE:{ + struct vfe_cmd_demux_channel_gain_config dmu; + CHECKED_COPY_FROM_USER(&dmu); + + vfe_demux_channel_gain_update(&dmu); + } + break; + + case VFE_CMD_ID_DEMOSAIC_BPC_UPDATE:{ + struct vfe_cmd_demosaic_bpc_update demo_bpc; + CHECKED_COPY_FROM_USER(&demo_bpc); + + vfe_demosaic_bpc_update(&demo_bpc); + } + break; + + case VFE_CMD_ID_DEMOSAIC_ABF_UPDATE:{ + struct vfe_cmd_demosaic_abf_update demo_abf; + CHECKED_COPY_FROM_USER(&demo_abf); + + vfe_demosaic_abf_update(&demo_abf); + } + break; + + case VFE_CMD_ID_LA_UPDATE:{ + struct vfe_cmd_la_config la; + CHECKED_COPY_FROM_USER(&la); + + vfe_la_update(&la); + } + break; + + case VFE_CMD_ID_RGB_GAMMA_UPDATE:{ + struct vfe_cmd_rgb_gamma_config rgb; + CHECKED_COPY_FROM_USER(&rgb); + + rc = vfe_rgb_gamma_update(&rgb); + } + break; + + case VFE_CMD_ID_ASF_UPDATE:{ + struct vfe_cmd_asf_update asf; + CHECKED_COPY_FROM_USER(&asf); + + vfe_asf_update(&asf); + } + break; + + case VFE_CMD_ID_FRAME_SKIP_UPDATE:{ + struct vfe_cmd_frame_skip_update fskip; + CHECKED_COPY_FROM_USER(&fskip); + + vfe_frame_skip_update(&fskip); + } + break; + + case VFE_CMD_ID_CAMIF_FRAME_UPDATE:{ + struct vfe_cmds_camif_frame fup; + CHECKED_COPY_FROM_USER(&fup); + + vfe_camif_frame_update(&fup); + } + break; + + /* stats update commands */ + case VFE_CMD_ID_STATS_AUTOFOCUS_UPDATE:{ + struct vfe_cmd_stats_af_update afup; + CHECKED_COPY_FROM_USER(&afup); + + vfe_stats_update_af(&afup); + } + break; + + case VFE_CMD_ID_STATS_WB_EXP_UPDATE:{ + struct vfe_cmd_stats_wb_exp_update wbexp; + CHECKED_COPY_FROM_USER(&wbexp); + + vfe_stats_update_wb_exp(&wbexp); + } + break; + + /* control of start, stop, update, etc... */ + case VFE_CMD_ID_STOP: + vfe_stop(); + break; + + case VFE_CMD_ID_GET_HW_VERSION: + break; + + /* stats */ + case VFE_CMD_ID_STATS_SETTING:{ + struct vfe_cmd_stats_setting stats; + CHECKED_COPY_FROM_USER(&stats); + + vfe_stats_setting(&stats); + } + break; + + case VFE_CMD_ID_STATS_AUTOFOCUS_START:{ + struct vfe_cmd_stats_af_start af; + CHECKED_COPY_FROM_USER(&af); + + vfe_stats_start_af(&af); + } + break; + + case VFE_CMD_ID_STATS_AUTOFOCUS_STOP: + vfe_stats_af_stop(); + break; + + case VFE_CMD_ID_STATS_WB_EXP_START:{ + struct vfe_cmd_stats_wb_exp_start awexp; + CHECKED_COPY_FROM_USER(&awexp); + + vfe_stats_start_wb_exp(&awexp); + } + break; + + case VFE_CMD_ID_STATS_WB_EXP_STOP: + vfe_stats_wb_exp_stop(); + break; + + case VFE_CMD_ID_ASYNC_TIMER_SETTING: + break; + + case VFE_CMD_ID_UPDATE: + vfe_update(); + break; + + /* test gen */ + case VFE_CMD_ID_TEST_GEN_START: + break; + +/* + acknowledge from upper layer + these are not in general command. + + case VFE_CMD_ID_OUTPUT1_ACK: + break; + case VFE_CMD_ID_OUTPUT2_ACK: + break; + case VFE_CMD_ID_EPOCH1_ACK: + break; + case VFE_CMD_ID_EPOCH2_ACK: + break; + case VFE_CMD_ID_STATS_AUTOFOCUS_ACK: + break; + case VFE_CMD_ID_STATS_WB_EXP_ACK: + break; +*/ + + default: + pr_err("%s: invalid cmd id %d\n", __func__, cmd->id); + rc = -EINVAL; + break; + } /* switch */ + + return rc; +} + +static int vfe_config(struct msm_vfe_cfg_cmd *cmd, void *data) +{ + struct msm_pmem_region *regptr; + struct msm_vfe_command_8k vfecmd; + struct vfe_cmd_axi_output_config axio; + struct axidata *axid = data; + + int rc = 0; + + if (cmd->cmd_type != CMD_FRAME_BUF_RELEASE && + cmd->cmd_type != CMD_STATS_BUF_RELEASE && + cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) { + if (copy_from_user(&vfecmd, + (void __user *)(cmd->value), sizeof(vfecmd))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + } + + CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type); + + switch (cmd->cmd_type) { + case CMD_GENERAL: + rc = vfe_proc_general(&vfecmd); + break; + + case CMD_STATS_ENABLE: + case CMD_STATS_AXI_CFG: { + int i; + struct vfe_cmd_stats_setting scfg; + + BUG_ON(!axid); + + if (vfecmd.length != sizeof(scfg)) { + pr_err + ("msm_camera: %s: cmd %d: user-space "\ + "data size %d != kernel data size %d\n", + __func__, + cmd->cmd_type, vfecmd.length, + sizeof(scfg)); + return -EIO; + } + + if (copy_from_user(&scfg, + (void __user *)(vfecmd.value), + sizeof(scfg))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + regptr = axid->region; + if (axid->bufnum1 > 0) { + for (i = 0; i < axid->bufnum1; i++) { + scfg.awbBuffer[i] = + (uint32_t) (regptr->paddr); + regptr++; + } + } + + if (axid->bufnum2 > 0) { + for (i = 0; i < axid->bufnum2; i++) { + scfg.afBuffer[i] = + (uint32_t) (regptr->paddr); + regptr++; + } + } + + vfe_stats_setting(&scfg); + } + break; + + case CMD_STATS_AF_AXI_CFG: + break; + + case CMD_FRAME_BUF_RELEASE: { + /* preview buffer release */ + struct msm_frame *b; + unsigned long p; + struct vfe_cmd_output_ack fack; + + BUG_ON(!data); + + b = (struct msm_frame *)(cmd->value); + p = *(unsigned long *)data; + +#ifndef CONFIG_720P_CAMERA + b->path = MSM_FRAME_ENC; + + fack.ybufaddr[0] = (uint32_t) (p + b->y_off); + + fack.chromabufaddr[0] = (uint32_t) (p + b->cbcr_off); + + if (b->path == MSM_FRAME_PREV_1) + vfe_output1_ack(&fack); + + if (b->path == MSM_FRAME_ENC || + b->path == MSM_FRAME_PREV_2) + vfe_output2_ack(&fack); +#else + + fack.ybufaddr[0] = (uint32_t) (p + b->y_off); + + fack.chromabufaddr[0] = (uint32_t) (p + b->cbcr_off); + + if (b->path == OUTPUT_TYPE_P) + vfe_output_p_ack(&fack); + + if ((b->path == OUTPUT_TYPE_V) + || (b->path == OUTPUT_TYPE_S)) + vfe_output_v_ack(&fack); +#endif + } + break; + + case CMD_SNAP_BUF_RELEASE: + break; + + case CMD_STATS_BUF_RELEASE: { + struct vfe_cmd_stats_wb_exp_ack sack; + + BUG_ON(!data); + + sack.nextWbExpOutputBufferAddr = *(uint32_t *) data; + vfe_stats_wb_exp_ack(&sack); + } + break; + + case CMD_STATS_AF_BUF_RELEASE: { + struct vfe_cmd_stats_af_ack ack; + + BUG_ON(!data); + + ack.nextAFOutputBufferAddr = *(uint32_t *) data; + vfe_stats_af_ack(&ack); + } + break; +#ifndef CONFIG_720P_CAMERA + case CMD_AXI_CFG_OUT1: { + + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + vfe_config_axi(OUTPUT_1, axid, &axio); + vfe_axi_output_config(&axio); + } + break; + + case CMD_AXI_CFG_OUT2: + case CMD_RAW_PICT_AXI_CFG: { + + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + vfe_config_axi(OUTPUT_2, axid, &axio); + + axio.outputDataSize = 0; + vfe_axi_output_config(&axio); + } + break; + + case CMD_AXI_CFG_SNAP_O1_AND_O2:{ + + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + ERR_COPY_FROM_USER(); + return -EFAULT; + } + + vfe_config_axi(OUTPUT_1_AND_2, axid, &axio); + vfe_axi_output_config(&axio); + } + break; +#else + case CMD_AXI_CFG_PREVIEW: + case CMD_RAW_PICT_AXI_CFG: { + + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + pr_err("%s %d: copy_from_user failed\n", + __func__, __LINE__); + return -EFAULT; + } + + vfe_config_axi(OUTPUT_2, axid, &axio); + + axio.outputDataSize = 0; + vfe_axi_output_config(&axio); + } + break; + + case CMD_AXI_CFG_SNAP: { + + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + pr_err("%s %d: copy_from_user failed\n", + __func__, __LINE__); + return -EFAULT; + } + + vfe_config_axi(OUTPUT_1_AND_2, axid, &axio); + vfe_axi_output_config(&axio); + } + break; + + case CMD_AXI_CFG_VIDEO: { + BUG_ON(!axid); + + if (copy_from_user(&axio, (void __user *)(vfecmd.value), + sizeof(axio))) { + pr_err("%s %d: copy_from_user failed\n", + __func__, __LINE__); + return -EFAULT; + } + vfe_config_axi(OUTPUT_1_AND_3, axid, &axio); + axio.outputDataSize = 0; + vfe_axi_output_config(&axio); + } + break; +#endif + default: + break; + } /* switch */ + + return rc; +} + +void msm_camvfe_fn_init(struct msm_camvfe_fn *fptr, void *data) +{ + fptr->vfe_init = vfe_init; + fptr->vfe_enable = vfe_enable; + fptr->vfe_config = vfe_config; + fptr->vfe_disable = vfe_disable; + fptr->vfe_release = vfe_release; + vfe_syncdata = data; +} diff --git a/drivers/media/video/msm/msm_vfe8x.h b/drivers/media/video/msm/msm_vfe8x.h new file mode 100644 index 0000000000000..dc59b0fdd39bf --- /dev/null +++ b/drivers/media/video/msm/msm_vfe8x.h @@ -0,0 +1,921 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __MSM_VFE8X_H__ +#define __MSM_VFE8X_H__ + +#define TRUE 1 +#define FALSE 0 +#define boolean uint8_t + +enum VFE_STATE { + VFE_STATE_IDLE, + VFE_STATE_ACTIVE +}; + +enum vfe_cmd_id { + /* + *Important! Command_ID are arranged in order. + *Don't change!*/ + VFE_CMD_ID_START, + VFE_CMD_ID_RESET, + + /* bus and camif config */ + VFE_CMD_ID_AXI_INPUT_CONFIG, + VFE_CMD_ID_CAMIF_CONFIG, + VFE_CMD_ID_AXI_OUTPUT_CONFIG, + + /* module config */ + VFE_CMD_ID_BLACK_LEVEL_CONFIG, + VFE_CMD_ID_ROLL_OFF_CONFIG, + VFE_CMD_ID_DEMUX_CHANNEL_GAIN_CONFIG, + VFE_CMD_ID_DEMOSAIC_CONFIG, + VFE_CMD_ID_FOV_CROP_CONFIG, + VFE_CMD_ID_MAIN_SCALER_CONFIG, + VFE_CMD_ID_WHITE_BALANCE_CONFIG, + VFE_CMD_ID_COLOR_CORRECTION_CONFIG, + VFE_CMD_ID_LA_CONFIG, + VFE_CMD_ID_RGB_GAMMA_CONFIG, + VFE_CMD_ID_CHROMA_ENHAN_CONFIG, + VFE_CMD_ID_CHROMA_SUPPRESSION_CONFIG, + VFE_CMD_ID_ASF_CONFIG, + VFE_CMD_ID_SCALER2Y_CONFIG, + VFE_CMD_ID_SCALER2CbCr_CONFIG, + VFE_CMD_ID_CHROMA_SUBSAMPLE_CONFIG, + VFE_CMD_ID_FRAME_SKIP_CONFIG, + VFE_CMD_ID_OUTPUT_CLAMP_CONFIG, + + /* test gen */ + VFE_CMD_ID_TEST_GEN_START, + + VFE_CMD_ID_UPDATE, + + /* ackownledge from upper layer */ + VFE_CMD_ID_OUTPUT1_ACK, + VFE_CMD_ID_OUTPUT2_ACK, + VFE_CMD_ID_EPOCH1_ACK, + VFE_CMD_ID_EPOCH2_ACK, + VFE_CMD_ID_STATS_AUTOFOCUS_ACK, + VFE_CMD_ID_STATS_WB_EXP_ACK, + + /* module update commands */ + VFE_CMD_ID_BLACK_LEVEL_UPDATE, + VFE_CMD_ID_DEMUX_CHANNEL_GAIN_UPDATE, + VFE_CMD_ID_DEMOSAIC_BPC_UPDATE, + VFE_CMD_ID_DEMOSAIC_ABF_UPDATE, + VFE_CMD_ID_FOV_CROP_UPDATE, + VFE_CMD_ID_WHITE_BALANCE_UPDATE, + VFE_CMD_ID_COLOR_CORRECTION_UPDATE, + VFE_CMD_ID_LA_UPDATE, + VFE_CMD_ID_RGB_GAMMA_UPDATE, + VFE_CMD_ID_CHROMA_ENHAN_UPDATE, + VFE_CMD_ID_CHROMA_SUPPRESSION_UPDATE, + VFE_CMD_ID_MAIN_SCALER_UPDATE, + VFE_CMD_ID_SCALER2CbCr_UPDATE, + VFE_CMD_ID_SCALER2Y_UPDATE, + VFE_CMD_ID_ASF_UPDATE, + VFE_CMD_ID_FRAME_SKIP_UPDATE, + VFE_CMD_ID_CAMIF_FRAME_UPDATE, + + /* stats update commands */ + VFE_CMD_ID_STATS_AUTOFOCUS_UPDATE, + VFE_CMD_ID_STATS_WB_EXP_UPDATE, + + /* control of start, stop, update, etc... */ + VFE_CMD_ID_STOP, + VFE_CMD_ID_GET_HW_VERSION, + + /* stats */ + VFE_CMD_ID_STATS_SETTING, + VFE_CMD_ID_STATS_AUTOFOCUS_START, + VFE_CMD_ID_STATS_AUTOFOCUS_STOP, + VFE_CMD_ID_STATS_WB_EXP_START, + VFE_CMD_ID_STATS_WB_EXP_STOP, + + VFE_CMD_ID_ASYNC_TIMER_SETTING, +#ifdef CONFIG_720P_CAMERA + /* Video recording */ + VFE_CMD_ID_START_RECORDING, + VFE_CMD_ID_STOP_RECORDING, +#endif + /* max id */ + VFE_CMD_ID_MAX +}; + +struct vfe_cmd_hw_version { + uint32_t minorVersion; + uint32_t majorVersion; + uint32_t coreVersion; +}; + +enum VFE_CAMIF_SYNC_EDGE { + VFE_CAMIF_SYNC_EDGE_ActiveHigh, + VFE_CAMIF_SYNC_EDGE_ActiveLow +}; + +enum VFE_CAMIF_SYNC_MODE { + VFE_CAMIF_SYNC_MODE_APS, + VFE_CAMIF_SYNC_MODE_EFS, + VFE_CAMIF_SYNC_MODE_ELS, + VFE_CAMIF_SYNC_MODE_ILLEGAL +}; + +struct vfe_cmds_camif_efs { + uint8_t efsendofline; + uint8_t efsstartofline; + uint8_t efsendofframe; + uint8_t efsstartofframe; +}; + +struct vfe_cmds_camif_frame { + uint16_t pixelsPerLine; + uint16_t linesPerFrame; +}; + +struct vfe_cmds_camif_window { + uint16_t firstpixel; + uint16_t lastpixel; + uint16_t firstline; + uint16_t lastline; +}; + +enum CAMIF_SUBSAMPLE_FRAME_SKIP { + CAMIF_SUBSAMPLE_FRAME_SKIP_0, + CAMIF_SUBSAMPLE_FRAME_SKIP_AllFrames, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_2Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_3Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_4Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_5Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_6Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_7Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_8Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_9Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_10Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_11Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_12Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_13Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_14Frame, + CAMIF_SUBSAMPLE_FRAME_SKIP_ONE_OUT_OF_EVERY_15Frame +}; + +struct vfe_cmds_camif_subsample { + uint16_t pixelskipmask; + uint16_t lineskipmask; + enum CAMIF_SUBSAMPLE_FRAME_SKIP frameskip; + uint8_t frameskipmode; + uint8_t pixelskipwrap; +}; + +struct vfe_cmds_camif_epoch { + uint8_t enable; + uint16_t lineindex; +}; + +struct vfe_cmds_camif_cfg { + enum VFE_CAMIF_SYNC_EDGE vSyncEdge; + enum VFE_CAMIF_SYNC_EDGE hSyncEdge; + enum VFE_CAMIF_SYNC_MODE syncMode; + uint8_t vfeSubSampleEnable; + uint8_t busSubSampleEnable; + uint8_t irqSubSampleEnable; + uint8_t binningEnable; + uint8_t misrEnable; +}; + +struct vfe_cmd_camif_config { + struct vfe_cmds_camif_cfg camifConfig; + struct vfe_cmds_camif_efs EFS; + struct vfe_cmds_camif_frame frame; + struct vfe_cmds_camif_window window; + struct vfe_cmds_camif_subsample subsample; + struct vfe_cmds_camif_epoch epoch1; + struct vfe_cmds_camif_epoch epoch2; +}; + +enum VFE_AXI_OUTPUT_MODE { + VFE_AXI_OUTPUT_MODE_Output1, + VFE_AXI_OUTPUT_MODE_Output2, + VFE_AXI_OUTPUT_MODE_Output1AndOutput2, + VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2, + VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1, + VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2, + VFE_AXI_LAST_OUTPUT_MODE_ENUM +}; + +enum VFE_RAW_WR_PATH_SEL { + VFE_RAW_OUTPUT_DISABLED, + VFE_RAW_OUTPUT_ENC_CBCR_PATH, + VFE_RAW_OUTPUT_VIEW_CBCR_PATH, + VFE_RAW_OUTPUT_PATH_INVALID +}; + +enum VFE_RAW_PIXEL_DATA_SIZE { + VFE_RAW_PIXEL_DATA_SIZE_8BIT, + VFE_RAW_PIXEL_DATA_SIZE_10BIT, + VFE_RAW_PIXEL_DATA_SIZE_12BIT, +}; + +#define VFE_AXI_OUTPUT_BURST_LENGTH 4 +#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4 +#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT 3 + +struct vfe_cmds_axi_out_per_component { + uint16_t imageWidth; + uint16_t imageHeight; + uint16_t outRowCount; + uint16_t outRowIncrement; + uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT] + [VFE_MAX_NUM_FRAGMENTS_PER_FRAME]; +}; + +struct vfe_cmds_axi_per_output_path { + uint8_t fragmentCount; + struct vfe_cmds_axi_out_per_component outputY; + struct vfe_cmds_axi_out_per_component outputCbcr; +}; + +enum VFE_AXI_BURST_LENGTH { + VFE_AXI_BURST_LENGTH_IS_2 = 2, + VFE_AXI_BURST_LENGTH_IS_4 = 4, + VFE_AXI_BURST_LENGTH_IS_8 = 8, + VFE_AXI_BURST_LENGTH_IS_16 = 16 +}; + +struct vfe_cmd_axi_output_config { + enum VFE_AXI_BURST_LENGTH burstLength; + enum VFE_AXI_OUTPUT_MODE outputMode; + enum VFE_RAW_PIXEL_DATA_SIZE outputDataSize; + struct vfe_cmds_axi_per_output_path output1; + struct vfe_cmds_axi_per_output_path output2; +}; + +struct vfe_cmd_fov_crop_config { + uint8_t enable; + uint16_t firstPixel; + uint16_t lastPixel; + uint16_t firstLine; + uint16_t lastLine; +}; + +struct vfe_cmds_main_scaler_stripe_init { + uint16_t MNCounterInit; + uint16_t phaseInit; +}; + +struct vfe_cmds_scaler_one_dimension { + uint8_t enable; + uint16_t inputSize; + uint16_t outputSize; + uint32_t phaseMultiplicationFactor; + uint8_t interpolationResolution; +}; + +struct vfe_cmd_main_scaler_config { + uint8_t enable; + struct vfe_cmds_scaler_one_dimension hconfig; + struct vfe_cmds_scaler_one_dimension vconfig; + struct vfe_cmds_main_scaler_stripe_init MNInitH; + struct vfe_cmds_main_scaler_stripe_init MNInitV; +}; + +struct vfe_cmd_scaler2_config { + uint8_t enable; + struct vfe_cmds_scaler_one_dimension hconfig; + struct vfe_cmds_scaler_one_dimension vconfig; +}; + +struct vfe_cmd_frame_skip_config { + uint8_t output1Period; + uint32_t output1Pattern; + uint8_t output2Period; + uint32_t output2Pattern; +}; + +struct vfe_cmd_frame_skip_update { + uint32_t output1Pattern; + uint32_t output2Pattern; +}; + +struct vfe_cmd_output_clamp_config { + uint8_t minCh0; + uint8_t minCh1; + uint8_t minCh2; + uint8_t maxCh0; + uint8_t maxCh1; + uint8_t maxCh2; +}; + +struct vfe_cmd_chroma_subsample_config { + uint8_t enable; + uint8_t cropEnable; + uint8_t vsubSampleEnable; + uint8_t hsubSampleEnable; + uint8_t vCosited; + uint8_t hCosited; + uint8_t vCositedPhase; + uint8_t hCositedPhase; + uint16_t cropWidthFirstPixel; + uint16_t cropWidthLastPixel; + uint16_t cropHeightFirstLine; + uint16_t cropHeightLastLine; +}; + +enum VFE_START_INPUT_SOURCE { + VFE_START_INPUT_SOURCE_CAMIF, + VFE_START_INPUT_SOURCE_TESTGEN, + VFE_START_INPUT_SOURCE_AXI, + VFE_START_INPUT_SOURCE_INVALID +}; + +enum VFE_START_OPERATION_MODE { + VFE_START_OPERATION_MODE_CONTINUOUS, + VFE_START_OPERATION_MODE_SNAPSHOT +}; + +enum VFE_START_PIXEL_PATTERN { + VFE_BAYER_RGRGRG, + VFE_BAYER_GRGRGR, + VFE_BAYER_BGBGBG, + VFE_BAYER_GBGBGB, + VFE_YUV_YCbYCr, + VFE_YUV_YCrYCb, + VFE_YUV_CbYCrY, + VFE_YUV_CrYCbY +}; + +enum VFE_BUS_RD_INPUT_PIXEL_PATTERN { + VFE_BAYER_RAW, + VFE_YUV_INTERLEAVED, + VFE_YUV_PSEUDO_PLANAR_Y, + VFE_YUV_PSEUDO_PLANAR_CBCR +}; + +enum VFE_YUV_INPUT_COSITING_MODE { + VFE_YUV_COSITED, + VFE_YUV_INTERPOLATED +}; + +struct vfe_cmd_start { + enum VFE_START_INPUT_SOURCE inputSource; + enum VFE_START_OPERATION_MODE operationMode; + uint8_t snapshotCount; + enum VFE_START_PIXEL_PATTERN pixel; + enum VFE_YUV_INPUT_COSITING_MODE yuvInputCositingMode; +}; + +struct vfe_cmd_output_ack { + uint32_t ybufaddr[VFE_MAX_NUM_FRAGMENTS_PER_FRAME]; + uint32_t chromabufaddr[VFE_MAX_NUM_FRAGMENTS_PER_FRAME]; +}; + +#define VFE_STATS_BUFFER_COUNT 3 + +struct vfe_cmd_stats_setting { + uint16_t frameHDimension; + uint16_t frameVDimension; + uint8_t afBusPrioritySelection; + uint8_t afBusPriority; + uint8_t awbBusPrioritySelection; + uint8_t awbBusPriority; + uint8_t histBusPrioritySelection; + uint8_t histBusPriority; + uint32_t afBuffer[VFE_STATS_BUFFER_COUNT]; + uint32_t awbBuffer[VFE_STATS_BUFFER_COUNT]; + uint32_t histBuffer[VFE_STATS_BUFFER_COUNT]; +}; + +struct vfe_cmd_stats_af_start { + uint8_t enable; + uint8_t windowMode; + uint16_t windowHOffset; + uint16_t windowVOffset; + uint16_t windowWidth; + uint16_t windowHeight; + uint8_t gridForMultiWindows[16]; + uint8_t metricSelection; + int16_t metricMax; + int8_t highPassCoef[7]; + int8_t bufferHeader; +}; + +struct vfe_cmd_stats_af_update { + uint8_t windowMode; + uint16_t windowHOffset; + uint16_t windowVOffset; + uint16_t windowWidth; + uint16_t windowHeight; +}; + +struct vfe_cmd_stats_wb_exp_start { + uint8_t enable; + uint8_t wbExpRegions; + uint8_t wbExpSubRegion; + uint8_t awbYMin; + uint8_t awbYMax; + int8_t awbMCFG[4]; + int16_t awbCCFG[4]; + int8_t axwHeader; +}; + +struct vfe_cmd_stats_wb_exp_update { + uint8_t wbExpRegions; + uint8_t wbExpSubRegion; + int8_t awbYMin; + int8_t awbYMax; + int8_t awbMCFG[4]; + int16_t awbCCFG[4]; +}; + +struct vfe_cmd_stats_af_ack { + uint32_t nextAFOutputBufferAddr; +}; + +struct vfe_cmd_stats_wb_exp_ack { + uint32_t nextWbExpOutputBufferAddr; +}; + +struct vfe_cmd_black_level_config { + uint8_t enable; + uint16_t evenEvenAdjustment; + uint16_t evenOddAdjustment; + uint16_t oddEvenAdjustment; + uint16_t oddOddAdjustment; +}; + +/* 13*1 */ +#define VFE_ROLL_OFF_INIT_TABLE_SIZE 13 +/* 13*16 */ +#define VFE_ROLL_OFF_DELTA_TABLE_SIZE 208 + +struct vfe_cmd_roll_off_config { + uint8_t enable; + uint16_t gridWidth; + uint16_t gridHeight; + uint16_t yDelta; + uint8_t gridXIndex; + uint8_t gridYIndex; + uint16_t gridPixelXIndex; + uint16_t gridPixelYIndex; + uint16_t yDeltaAccum; + uint16_t initTableR[VFE_ROLL_OFF_INIT_TABLE_SIZE]; + uint16_t initTableGr[VFE_ROLL_OFF_INIT_TABLE_SIZE]; + uint16_t initTableB[VFE_ROLL_OFF_INIT_TABLE_SIZE]; + uint16_t initTableGb[VFE_ROLL_OFF_INIT_TABLE_SIZE]; + int16_t deltaTableR[VFE_ROLL_OFF_DELTA_TABLE_SIZE]; + int16_t deltaTableGr[VFE_ROLL_OFF_DELTA_TABLE_SIZE]; + int16_t deltaTableB[VFE_ROLL_OFF_DELTA_TABLE_SIZE]; + int16_t deltaTableGb[VFE_ROLL_OFF_DELTA_TABLE_SIZE]; +}; + +struct vfe_cmd_demux_channel_gain_config { + uint16_t ch0EvenGain; + uint16_t ch0OddGain; + uint16_t ch1Gain; + uint16_t ch2Gain; +}; + +struct vfe_cmds_demosaic_abf { + uint8_t enable; + uint8_t forceOn; + uint8_t shift; + uint16_t lpThreshold; + uint16_t max; + uint16_t min; + uint8_t ratio; +}; + +struct vfe_cmds_demosaic_bpc { + uint8_t enable; + uint16_t fmaxThreshold; + uint16_t fminThreshold; + uint16_t redDiffThreshold; + uint16_t blueDiffThreshold; + uint16_t greenDiffThreshold; +}; + +struct vfe_cmd_demosaic_config { + uint8_t enable; + uint8_t slopeShift; + struct vfe_cmds_demosaic_abf abfConfig; + struct vfe_cmds_demosaic_bpc bpcConfig; +}; + +struct vfe_cmd_demosaic_bpc_update { + struct vfe_cmds_demosaic_bpc bpcUpdate; +}; + +struct vfe_cmd_demosaic_abf_update { + struct vfe_cmds_demosaic_abf abfUpdate; +}; + +struct vfe_cmd_white_balance_config { + uint8_t enable; + uint16_t ch2Gain; + uint16_t ch1Gain; + uint16_t ch0Gain; +}; + +enum VFE_COLOR_CORRECTION_COEF_QFACTOR { + COEF_IS_Q7_SIGNED, + COEF_IS_Q8_SIGNED, + COEF_IS_Q9_SIGNED, + COEF_IS_Q10_SIGNED +}; + +struct vfe_cmd_color_correction_config { + uint8_t enable; + enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor; + int16_t C0; + int16_t C1; + int16_t C2; + int16_t C3; + int16_t C4; + int16_t C5; + int16_t C6; + int16_t C7; + int16_t C8; + int16_t K0; + int16_t K1; + int16_t K2; +}; + +#define VFE_LA_TABLE_LENGTH 256 +struct vfe_cmd_la_config { + uint8_t enable; + int16_t table[VFE_LA_TABLE_LENGTH]; +}; + +#define VFE_GAMMA_TABLE_LENGTH 256 +enum VFE_RGB_GAMMA_TABLE_SELECT { + RGB_GAMMA_CH0_SELECTED, + RGB_GAMMA_CH1_SELECTED, + RGB_GAMMA_CH2_SELECTED, + RGB_GAMMA_CH0_CH1_SELECTED, + RGB_GAMMA_CH0_CH2_SELECTED, + RGB_GAMMA_CH1_CH2_SELECTED, + RGB_GAMMA_CH0_CH1_CH2_SELECTED +}; + +struct vfe_cmd_rgb_gamma_config { + uint8_t enable; + enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect; + int16_t table[VFE_GAMMA_TABLE_LENGTH]; +}; + +struct vfe_cmd_chroma_enhan_config { + uint8_t enable; + int16_t am; + int16_t ap; + int16_t bm; + int16_t bp; + int16_t cm; + int16_t cp; + int16_t dm; + int16_t dp; + int16_t kcr; + int16_t kcb; + int16_t RGBtoYConversionV0; + int16_t RGBtoYConversionV1; + int16_t RGBtoYConversionV2; + uint8_t RGBtoYConversionOffset; +}; + +struct vfe_cmd_chroma_suppression_config { + uint8_t enable; + uint8_t m1; + uint8_t m3; + uint8_t n1; + uint8_t n3; + uint8_t nn1; + uint8_t mm1; +}; + +struct vfe_cmd_asf_config { + uint8_t enable; + uint8_t smoothFilterEnabled; + uint8_t sharpMode; + uint8_t smoothCoefCenter; + uint8_t smoothCoefSurr; + uint8_t normalizeFactor; + uint8_t sharpK1; + uint8_t sharpK2; + uint8_t sharpThreshE1; + int8_t sharpThreshE2; + int8_t sharpThreshE3; + int8_t sharpThreshE4; + int8_t sharpThreshE5; + int8_t filter1Coefficients[9]; + int8_t filter2Coefficients[9]; + uint8_t cropEnable; + uint16_t cropFirstPixel; + uint16_t cropLastPixel; + uint16_t cropFirstLine; + uint16_t cropLastLine; +}; + +struct vfe_cmd_asf_update { + uint8_t enable; + uint8_t smoothFilterEnabled; + uint8_t sharpMode; + uint8_t smoothCoefCenter; + uint8_t smoothCoefSurr; + uint8_t normalizeFactor; + uint8_t sharpK1; + uint8_t sharpK2; + uint8_t sharpThreshE1; + int8_t sharpThreshE2; + int8_t sharpThreshE3; + int8_t sharpThreshE4; + int8_t sharpThreshE5; + int8_t filter1Coefficients[9]; + int8_t filter2Coefficients[9]; + uint8_t cropEnable; +}; + +enum VFE_TEST_GEN_SYNC_EDGE { + VFE_TEST_GEN_SYNC_EDGE_ActiveHigh, + VFE_TEST_GEN_SYNC_EDGE_ActiveLow +}; + +struct vfe_cmd_test_gen_start { + uint8_t pixelDataSelect; + uint8_t systematicDataSelect; + enum VFE_TEST_GEN_SYNC_EDGE hsyncEdge; + enum VFE_TEST_GEN_SYNC_EDGE vsyncEdge; + uint16_t numFrame; + enum VFE_RAW_PIXEL_DATA_SIZE pixelDataSize; + uint16_t imageWidth; + uint16_t imageHeight; + uint32_t startOfFrameOffset; + uint32_t endOfFrameNOffset; + uint16_t startOfLineOffset; + uint16_t endOfLineNOffset; + uint16_t hbi; + uint8_t vblEnable; + uint16_t vbl; + uint8_t startOfFrameDummyLine; + uint8_t endOfFrameDummyLine; + uint8_t unicolorBarEnable; + uint8_t colorBarsSplitEnable; + uint8_t unicolorBarSelect; + enum VFE_START_PIXEL_PATTERN colorBarsPixelPattern; + uint8_t colorBarsRotatePeriod; + uint16_t testGenRandomSeed; +}; + +struct vfe_cmd_bus_pm_start { + uint8_t output2YWrPmEnable; + uint8_t output2CbcrWrPmEnable; + uint8_t output1YWrPmEnable; + uint8_t output1CbcrWrPmEnable; +}; + +struct vfe_cmd_camif_frame_update { + struct vfe_cmds_camif_frame camifFrame; +}; + +struct vfe_cmd_sync_timer_setting { + uint8_t whichSyncTimer; + uint8_t operation; + uint8_t polarity; + uint16_t repeatCount; + uint16_t hsyncCount; + uint32_t pclkCount; + uint32_t outputDuration; +}; + +struct vfe_cmd_async_timer_setting { + uint8_t whichAsyncTimer; + uint8_t operation; + uint8_t polarity; + uint16_t repeatCount; + uint16_t inactiveCount; + uint32_t activeCount; +}; + +struct vfe_frame_skip_counts { + uint32_t totalFrameCount; + uint32_t output1Count; + uint32_t output2Count; +}; + +enum VFE_AXI_RD_UNPACK_HBI_SEL { + VFE_AXI_RD_HBI_32_CLOCK_CYCLES, + VFE_AXI_RD_HBI_64_CLOCK_CYCLES, + VFE_AXI_RD_HBI_128_CLOCK_CYCLES, + VFE_AXI_RD_HBI_256_CLOCK_CYCLES, + VFE_AXI_RD_HBI_512_CLOCK_CYCLES, + VFE_AXI_RD_HBI_1024_CLOCK_CYCLES, + VFE_AXI_RD_HBI_2048_CLOCK_CYCLES, + VFE_AXI_RD_HBI_4096_CLOCK_CYCLES +}; + +struct vfe_cmd_axi_input_config { + uint32_t fragAddr[4]; + uint8_t totalFragmentCount; + uint16_t ySize; + uint16_t xOffset; + uint16_t xSize; + uint16_t rowIncrement; + uint16_t numOfRows; + enum VFE_AXI_BURST_LENGTH burstLength; + uint8_t unpackPhase; + enum VFE_AXI_RD_UNPACK_HBI_SEL unpackHbi; + enum VFE_RAW_PIXEL_DATA_SIZE pixelSize; + uint8_t padRepeatCountLeft; + uint8_t padRepeatCountRight; + uint8_t padRepeatCountTop; + uint8_t padRepeatCountBottom; + uint8_t padLeftComponentSelectCycle0; + uint8_t padLeftComponentSelectCycle1; + uint8_t padLeftComponentSelectCycle2; + uint8_t padLeftComponentSelectCycle3; + uint8_t padLeftStopCycle0; + uint8_t padLeftStopCycle1; + uint8_t padLeftStopCycle2; + uint8_t padLeftStopCycle3; + uint8_t padRightComponentSelectCycle0; + uint8_t padRightComponentSelectCycle1; + uint8_t padRightComponentSelectCycle2; + uint8_t padRightComponentSelectCycle3; + uint8_t padRightStopCycle0; + uint8_t padRightStopCycle1; + uint8_t padRightStopCycle2; + uint8_t padRightStopCycle3; + uint8_t padTopLineCount; + uint8_t padBottomLineCount; +}; + +struct vfe_interrupt_status { + uint8_t camifErrorIrq; + uint8_t camifSofIrq; + uint8_t camifEolIrq; + uint8_t camifEofIrq; + uint8_t camifEpoch1Irq; + uint8_t camifEpoch2Irq; + uint8_t camifOverflowIrq; + uint8_t ceIrq; + uint8_t regUpdateIrq; + uint8_t resetAckIrq; + uint8_t encYPingpongIrq; + uint8_t encCbcrPingpongIrq; + uint8_t viewYPingpongIrq; + uint8_t viewCbcrPingpongIrq; + uint8_t rdPingpongIrq; + uint8_t afPingpongIrq; + uint8_t awbPingpongIrq; + uint8_t histPingpongIrq; + uint8_t encIrq; + uint8_t viewIrq; + uint8_t busOverflowIrq; + uint8_t afOverflowIrq; + uint8_t awbOverflowIrq; + uint8_t syncTimer0Irq; + uint8_t syncTimer1Irq; + uint8_t syncTimer2Irq; + uint8_t asyncTimer0Irq; + uint8_t asyncTimer1Irq; + uint8_t asyncTimer2Irq; + uint8_t asyncTimer3Irq; + uint8_t axiErrorIrq; + uint8_t violationIrq; + uint8_t anyErrorIrqs; + uint8_t anyOutput1PathIrqs; + uint8_t anyOutput2PathIrqs; + uint8_t anyOutputPathIrqs; + uint8_t anyAsyncTimerIrqs; + uint8_t anySyncTimerIrqs; + uint8_t anyIrqForActiveStatesOnly; +}; + +enum VFE_MESSAGE_ID { + VFE_MSG_ID_RESET_ACK, + VFE_MSG_ID_START_ACK, + VFE_MSG_ID_STOP_ACK, + VFE_MSG_ID_UPDATE_ACK, +#ifndef CONFIG_720P_CAMERA + VFE_MSG_ID_OUTPUT1, + VFE_MSG_ID_OUTPUT2, +#else + VFE_MSG_ID_OUTPUT_P, + VFE_MSG_ID_OUTPUT_V, + VFE_MSG_ID_OUTPUT_S, + VFE_MSG_ID_OUTPUT_T, +#endif + VFE_MSG_ID_SNAPSHOT_DONE, + VFE_MSG_ID_STATS_AUTOFOCUS, + VFE_MSG_ID_STATS_WB_EXP, + VFE_MSG_ID_EPOCH1, + VFE_MSG_ID_EPOCH2, + VFE_MSG_ID_SYNC_TIMER0_DONE, + VFE_MSG_ID_SYNC_TIMER1_DONE, + VFE_MSG_ID_SYNC_TIMER2_DONE, + VFE_MSG_ID_ASYNC_TIMER0_DONE, + VFE_MSG_ID_ASYNC_TIMER1_DONE, + VFE_MSG_ID_ASYNC_TIMER2_DONE, + VFE_MSG_ID_ASYNC_TIMER3_DONE, + VFE_MSG_ID_AF_OVERFLOW, + VFE_MSG_ID_AWB_OVERFLOW, + VFE_MSG_ID_AXI_ERROR, + VFE_MSG_ID_CAMIF_OVERFLOW, + VFE_MSG_ID_VIOLATION, + VFE_MSG_ID_CAMIF_ERROR, + VFE_MSG_ID_BUS_OVERFLOW, +}; + +struct vfe_msg_stats_autofocus { + uint32_t afBuffer; + uint32_t frameCounter; +}; + +struct vfe_msg_stats_wb_exp { + uint32_t awbBuffer; + uint32_t frameCounter; +}; + +struct vfe_frame_bpc_info { + uint32_t greenDefectPixelCount; + uint32_t redBlueDefectPixelCount; +}; + +struct vfe_frame_asf_info { + uint32_t asfMaxEdge; + uint32_t asfHbiCount; +}; + +struct vfe_msg_camif_status { + uint8_t camifState; + uint32_t pixelCount; + uint32_t lineCount; +}; + +struct vfe_bus_pm_per_path { + uint32_t yWrPmStats0; + uint32_t yWrPmStats1; + uint32_t cbcrWrPmStats0; + uint32_t cbcrWrPmStats1; +}; + +struct vfe_bus_performance_monitor { + struct vfe_bus_pm_per_path encPathPmInfo; + struct vfe_bus_pm_per_path viewPathPmInfo; +}; + +struct vfe_irq_thread_msg { + uint32_t vfeIrqStatus; + uint32_t camifStatus; + uint32_t demosaicStatus; + uint32_t asfMaxEdge; + struct vfe_bus_performance_monitor pmInfo; +}; + +struct vfe_msg_output { + uint32_t yBuffer; + uint32_t cbcrBuffer; + struct vfe_frame_bpc_info bpcInfo; + struct vfe_frame_asf_info asfInfo; + uint32_t frameCounter; + struct vfe_bus_pm_per_path pmData; +}; + +struct vfe_message { + enum VFE_MESSAGE_ID _d; + union { + struct vfe_msg_output msgOutput1; + struct vfe_msg_output msgOutput2; + struct vfe_msg_stats_autofocus msgStatsAf; + struct vfe_msg_stats_wb_exp msgStatsWbExp; + struct vfe_msg_camif_status msgCamifError; + struct vfe_bus_performance_monitor msgBusOverflow; + } _u; +}; + +/* New one for 8k */ +struct msm_vfe_command_8k { + int id; + uint16_t length; + void *value; +}; + +struct vfe_frame_extra { + struct vfe_frame_bpc_info bpcInfo; + struct vfe_frame_asf_info asfInfo; + uint32_t frameCounter; + struct vfe_bus_pm_per_path pmData; +}; +#endif /* __MSM_VFE8X_H__ */ diff --git a/drivers/media/video/msm/msm_vfe8x_proc.c b/drivers/media/video/msm/msm_vfe8x_proc.c new file mode 100644 index 0000000000000..58f9afd533a15 --- /dev/null +++ b/drivers/media/video/msm/msm_vfe8x_proc.c @@ -0,0 +1,4089 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "msm_vfe8x_proc.h" +#include +#include + +struct isr_queue_cmd { + struct list_head list; + struct vfe_interrupt_status vfeInterruptStatus; + struct vfe_frame_asf_info vfeAsfFrameInfo; + struct vfe_frame_bpc_info vfeBpcFrameInfo; + struct vfe_msg_camif_status vfeCamifStatusLocal; + struct vfe_bus_performance_monitor vfePmData; +}; + +struct msm_vfe8x_ctrl { + /* bit 1:0 ENC_IRQ_MASK = 0x11: + * generate IRQ when both y and cbcr frame is ready. */ + + /* bit 1:0 VIEW_IRQ_MASK= 0x11: + * generate IRQ when both y and cbcr frame is ready. */ + struct vfe_irq_composite_mask_config vfeIrqCompositeMaskLocal; + struct vfe_module_enable vfeModuleEnableLocal; + struct vfe_camif_cfg_data vfeCamifConfigLocal; + struct vfe_cmds_camif_epoch vfeCamifEpoch1Local; + struct vfe_interrupt_mask vfeImaskLocal; + struct vfe_stats_cmd_data vfeStatsCmdLocal; + struct vfe_bus_cfg_data vfeBusConfigLocal; + struct vfe_cmd_bus_pm_start vfeBusPmConfigLocal; + struct vfe_bus_cmd_data vfeBusCmdLocal; + enum vfe_interrupt_name vfeInterruptNameLocal; + uint32_t vfeLaBankSel; + struct vfe_gamma_lut_sel vfeGammaLutSel; + + boolean vfeStartAckPendingFlag; + boolean vfeStopAckPending; + boolean vfeResetAckPending; + boolean vfeUpdateAckPending; + + enum VFE_AXI_OUTPUT_MODE axiOutputMode; + enum VFE_START_OPERATION_MODE vfeOperationMode; + + uint32_t vfeSnapShotCount; + uint32_t vfeRequestedSnapShotCount; + boolean vfeStatsPingPongReloadFlag; + uint32_t vfeFrameId; + + struct vfe_cmd_frame_skip_config vfeFrameSkip; + uint32_t vfeFrameSkipPattern; + uint8_t vfeFrameSkipCount; + uint8_t vfeFrameSkipPeriod; + + boolean vfeTestGenStartFlag; + uint32_t vfeImaskPacked; + uint32_t vfeImaskCompositePacked; + enum VFE_RAW_PIXEL_DATA_SIZE axiInputDataSize; + struct vfe_irq_thread_msg vfeIrqThreadMsgLocal; + + struct vfe_output_path_combo viewPath; + struct vfe_output_path_combo encPath; + struct vfe_frame_skip_counts vfeDroppedFrameCounts; + struct vfe_stats_control afStatsControl; + struct vfe_stats_control awbStatsControl; + + enum VFE_STATE vstate; + + struct msm_vfe_callback *resp; + struct vfe_frame_extra extdata; + + struct isr_queue_cmd irqs[5]; + spinlock_t irqs_lock; + int irq_get; + int irq_put; + + int vfeirq; + void __iomem *vfebase; + + void *syncdata; + struct msm_camera_sensor_info *s_info; +}; + +static struct msm_vfe8x_ctrl *ctrl; + +static void vfe_prog_hw(uint8_t *hwreg, uint32_t *inptr, uint32_t regcnt) +{ + /* unsigned long flags; */ + uint32_t i; + uint32_t *p; + + p = (uint32_t *) (hwreg); + for (i = 0; i < (regcnt >> 2); i++) + writel(*inptr++, p++); + /* *p++ = *inptr++; */ +} + +static void +vfe_set_bus_pipo_addr(struct vfe_output_path_combo *vpath, + struct vfe_output_path_combo *epath) +{ + vpath->yPath.hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_VIEW_Y_WR_PING_ADDR); + vpath->yPath.hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_VIEW_Y_WR_PONG_ADDR); + vpath->cbcrPath.hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_VIEW_CBCR_WR_PING_ADDR); + vpath->cbcrPath.hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_VIEW_CBCR_WR_PONG_ADDR); + + epath->yPath.hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_ENC_Y_WR_PING_ADDR); + epath->yPath.hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_ENC_Y_WR_PONG_ADDR); + epath->cbcrPath.hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_ENC_CBCR_WR_PING_ADDR); + epath->cbcrPath.hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_ENC_CBCR_WR_PONG_ADDR); +} + +static void vfe_axi_output(struct vfe_cmd_axi_output_config *in, + struct vfe_output_path_combo *out1, + struct vfe_output_path_combo *out2, uint16_t out) +{ + struct vfe_axi_out_cfg cmd; + + uint16_t temp; + uint32_t burstLength; + + memset(&cmd, 0, sizeof(cmd)); + /* force it to burst length 4, hardware does not support it. */ + burstLength = 1; + + /* AXI Output 2 Y Configuration */ + /* VFE_BUS_ENC_Y_WR_PING_ADDR */ + cmd.out2YPingAddr = out2->yPath.addressBuffer[0]; + + /* VFE_BUS_ENC_Y_WR_PONG_ADDR */ + cmd.out2YPongAddr = out2->yPath.addressBuffer[1]; + + /* VFE_BUS_ENC_Y_WR_IMAGE_SIZE */ + cmd.out2YImageHeight = in->output2.outputY.imageHeight; + /* convert the image width and row increment to be in + * unit of 64bit (8 bytes) */ + temp = (in->output2.outputY.imageWidth + (out - 1)) / out; + cmd.out2YImageWidthin64bit = temp; + + /* VFE_BUS_ENC_Y_WR_BUFFER_CFG */ + cmd.out2YBurstLength = burstLength; + cmd.out2YNumRows = in->output2.outputY.outRowCount; + temp = (in->output2.outputY.outRowIncrement + (out - 1)) / out; + cmd.out2YRowIncrementIn64bit = temp; + + /* AXI Output 2 Cbcr Configuration */ + /* VFE_BUS_ENC_Cbcr_WR_PING_ADDR */ + cmd.out2CbcrPingAddr = out2->cbcrPath.addressBuffer[0]; + + /* VFE_BUS_ENC_Cbcr_WR_PONG_ADDR */ + cmd.out2CbcrPongAddr = out2->cbcrPath.addressBuffer[1]; + + /* VFE_BUS_ENC_Cbcr_WR_IMAGE_SIZE */ + cmd.out2CbcrImageHeight = in->output2.outputCbcr.imageHeight; + temp = (in->output2.outputCbcr.imageWidth + (out - 1)) / out; + cmd.out2CbcrImageWidthIn64bit = temp; + + /* VFE_BUS_ENC_Cbcr_WR_BUFFER_CFG */ + cmd.out2CbcrBurstLength = burstLength; + cmd.out2CbcrNumRows = in->output2.outputCbcr.outRowCount; + temp = (in->output2.outputCbcr.outRowIncrement + (out - 1)) / out; + cmd.out2CbcrRowIncrementIn64bit = temp; + + /* AXI Output 1 Y Configuration */ + /* VFE_BUS_VIEW_Y_WR_PING_ADDR */ + cmd.out1YPingAddr = out1->yPath.addressBuffer[0]; + + /* VFE_BUS_VIEW_Y_WR_PONG_ADDR */ + cmd.out1YPongAddr = out1->yPath.addressBuffer[1]; + + /* VFE_BUS_VIEW_Y_WR_IMAGE_SIZE */ + cmd.out1YImageHeight = in->output1.outputY.imageHeight; + temp = (in->output1.outputY.imageWidth + (out - 1)) / out; + cmd.out1YImageWidthin64bit = temp; + + /* VFE_BUS_VIEW_Y_WR_BUFFER_CFG */ + cmd.out1YBurstLength = burstLength; + cmd.out1YNumRows = in->output1.outputY.outRowCount; + + temp = (in->output1.outputY.outRowIncrement + (out - 1)) / out; + cmd.out1YRowIncrementIn64bit = temp; + + /* AXI Output 1 Cbcr Configuration */ + cmd.out1CbcrPingAddr = out1->cbcrPath.addressBuffer[0]; + + /* VFE_BUS_VIEW_Cbcr_WR_PONG_ADDR */ + cmd.out1CbcrPongAddr = out1->cbcrPath.addressBuffer[1]; + + /* VFE_BUS_VIEW_Cbcr_WR_IMAGE_SIZE */ + cmd.out1CbcrImageHeight = in->output1.outputCbcr.imageHeight; + temp = (in->output1.outputCbcr.imageWidth + (out - 1)) / out; + cmd.out1CbcrImageWidthIn64bit = temp; + + cmd.out1CbcrBurstLength = burstLength; + cmd.out1CbcrNumRows = in->output1.outputCbcr.outRowCount; + temp = (in->output1.outputCbcr.outRowIncrement + (out - 1)) / out; + + cmd.out1CbcrRowIncrementIn64bit = temp; + + vfe_prog_hw(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PING_ADDR, + (uint32_t *)&cmd, sizeof(cmd)); +} + +static void vfe_reg_bus_cfg(struct vfe_bus_cfg_data *in) +{ + struct vfe_axi_bus_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.stripeRdPathEn = in->stripeRdPathEn; + cmd.encYWrPathEn = in->encYWrPathEn; + cmd.encCbcrWrPathEn = in->encCbcrWrPathEn; + cmd.viewYWrPathEn = in->viewYWrPathEn; + cmd.viewCbcrWrPathEn = in->viewCbcrWrPathEn; + cmd.rawPixelDataSize = (uint32_t) in->rawPixelDataSize; + cmd.rawWritePathSelect = (uint32_t) in->rawWritePathSelect; + + /* program vfe_bus_cfg */ + writel(*((uint32_t *)&cmd), ctrl->vfebase + VFE_BUS_CFG); +} + +static void vfe_reg_camif_config(struct vfe_camif_cfg_data *in) +{ + struct VFE_CAMIFConfigType cfg; + + memset(&cfg, 0, sizeof(cfg)); + + cfg.VSyncEdge = in->camifCfgFromCmd.vSyncEdge; + + cfg.HSyncEdge = in->camifCfgFromCmd.hSyncEdge; + + cfg.syncMode = in->camifCfgFromCmd.syncMode; + + cfg.vfeSubsampleEnable = in->camifCfgFromCmd.vfeSubSampleEnable; + + cfg.busSubsampleEnable = in->camifCfgFromCmd.busSubSampleEnable; + + cfg.camif2vfeEnable = in->camif2OutputEnable; + + cfg.camif2busEnable = in->camif2BusEnable; + + cfg.irqSubsampleEnable = in->camifCfgFromCmd.irqSubSampleEnable; + + cfg.binningEnable = in->camifCfgFromCmd.binningEnable; + + cfg.misrEnable = in->camifCfgFromCmd.misrEnable; + + /* program camif_config */ + writel(*((uint32_t *)&cfg), ctrl->vfebase + CAMIF_CONFIG); +} + +static void vfe_reg_bus_cmd(struct vfe_bus_cmd_data *in) +{ + struct vfe_buscmd cmd; + memset(&cmd, 0, sizeof(cmd)); + + cmd.stripeReload = in->stripeReload; + cmd.busPingpongReload = in->busPingpongReload; + cmd.statsPingpongReload = in->statsPingpongReload; + + writel(*((uint32_t *)&cmd), ctrl->vfebase + VFE_BUS_CMD); + + CDBG("bus command = 0x%x\n", (*((uint32_t *)&cmd))); + + /* this is needed, as the control bits are pulse based. + * Don't want to reload bus pingpong again. */ + in->busPingpongReload = 0; + in->statsPingpongReload = 0; + in->stripeReload = 0; +} + +static void vfe_reg_module_cfg(struct vfe_module_enable *in) +{ + struct vfe_mod_enable ena; + + memset(&ena, 0, sizeof(ena)); + + ena.blackLevelCorrectionEnable = in->blackLevelCorrectionEnable; + ena.lensRollOffEnable = in->lensRollOffEnable; + ena.demuxEnable = in->demuxEnable; + ena.chromaUpsampleEnable = in->chromaUpsampleEnable; + ena.demosaicEnable = in->demosaicEnable; + ena.statsEnable = in->statsEnable; + ena.cropEnable = in->cropEnable; + ena.mainScalerEnable = in->mainScalerEnable; + ena.whiteBalanceEnable = in->whiteBalanceEnable; + ena.colorCorrectionEnable = in->colorCorrectionEnable; + ena.yHistEnable = in->yHistEnable; + ena.skinToneEnable = in->skinToneEnable; + ena.lumaAdaptationEnable = in->lumaAdaptationEnable; + ena.rgbLUTEnable = in->rgbLUTEnable; + ena.chromaEnhanEnable = in->chromaEnhanEnable; + ena.asfEnable = in->asfEnable; + ena.chromaSuppressionEnable = in->chromaSuppressionEnable; + ena.chromaSubsampleEnable = in->chromaSubsampleEnable; + ena.scaler2YEnable = in->scaler2YEnable; + ena.scaler2CbcrEnable = in->scaler2CbcrEnable; + + writel(*((uint32_t *)&ena), ctrl->vfebase + VFE_MODULE_CFG); +} + +static void vfe_program_dmi_cfg(enum VFE_DMI_RAM_SEL bankSel) +{ + /* set bit 8 for auto increment. */ + uint32_t value = (uint32_t) ctrl->vfebase + VFE_DMI_CFG_DEFAULT; + + value += (uint32_t) bankSel; + /* CDBG("dmi cfg input bank is 0x%x\n", bankSel); */ + + writel(value, ctrl->vfebase + VFE_DMI_CFG); + writel(0, ctrl->vfebase + VFE_DMI_ADDR); +} + +static void vfe_write_lens_roll_off_table(struct vfe_cmd_roll_off_config *in) +{ + uint16_t i; + uint32_t data; + + uint16_t *initGr = in->initTableGr; + uint16_t *initGb = in->initTableGb; + uint16_t *initB = in->initTableB; + uint16_t *initR = in->initTableR; + + int16_t *pDeltaGr = in->deltaTableGr; + int16_t *pDeltaGb = in->deltaTableGb; + int16_t *pDeltaB = in->deltaTableB; + int16_t *pDeltaR = in->deltaTableR; + + vfe_program_dmi_cfg(ROLLOFF_RAM); + + /* first pack and write init table */ + for (i = 0; i < VFE_ROLL_OFF_INIT_TABLE_SIZE; i++) { + data = (((uint32_t) (*initR)) & 0x0000FFFF) | + (((uint32_t) (*initGr)) << 16); + initR++; + initGr++; + + writel(data, ctrl->vfebase + VFE_DMI_DATA_LO); + + data = (((uint32_t) (*initB)) & 0x0000FFFF) | + /* 20101011: fix mesh LSC */ + (((uint32_t) (*initGb)) << 16); + initB++; + initGb++; + + writel(data, ctrl->vfebase + VFE_DMI_DATA_LO); + } + + /* there are gaps between the init table and delta table, + * set the offset for delta table. */ + writel(LENS_ROLL_OFF_DELTA_TABLE_OFFSET, ctrl->vfebase + VFE_DMI_ADDR); + + /* pack and write delta table */ + for (i = 0; i < VFE_ROLL_OFF_DELTA_TABLE_SIZE; i++) { + /* 20101011: fix mesh LSC */ + data = (((int32_t)(*pDeltaR)) & 0x0000FFFF) | + (((int32_t)(*pDeltaGr))<<16); + pDeltaR++; + pDeltaGr++; + + writel(data, ctrl->vfebase + VFE_DMI_DATA_LO); + /* 20101011: fix mesh LSC */ + data = (((int32_t)(*pDeltaB)) & 0x0000FFFF) | + (((int32_t)(*pDeltaGb))<<16); + + pDeltaB++; + pDeltaGb++; + + writel(data, ctrl->vfebase + VFE_DMI_DATA_LO); + } + + /* After DMI transfer, to make it safe, need to set the + * DMI_CFG to unselect any SRAM + */ + /* unselect the SRAM Bank. */ + writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG); +} + +static void vfe_set_default_reg_values(void) +{ + writel(0x800080, ctrl->vfebase + VFE_DEMUX_GAIN_0); + writel(0x800080, ctrl->vfebase + VFE_DEMUX_GAIN_1); + writel(0xFFFFF, ctrl->vfebase + VFE_CGC_OVERRIDE); + + /* default frame drop period and pattern */ + writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG); + writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_CFG); + writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN); + writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_ENC_CBCR_PATTERN); + writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_CFG); + writel(0x1f, ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_CFG); + writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN); + writel(0xFFFFFFFF, ctrl->vfebase + VFE_FRAMEDROP_VIEW_CBCR_PATTERN); + writel(0, ctrl->vfebase + VFE_CLAMP_MIN_CFG); + writel(0xFFFFFF, ctrl->vfebase + VFE_CLAMP_MAX_CFG); +} + +static void vfe_config_demux(uint32_t period, uint32_t even, uint32_t odd) +{ + writel(period, ctrl->vfebase + VFE_DEMUX_CFG); + writel(even, ctrl->vfebase + VFE_DEMUX_EVEN_CFG); + writel(odd, ctrl->vfebase + VFE_DEMUX_ODD_CFG); +} + +static void vfe_pm_stop(void) +{ + writel(VFE_PERFORMANCE_MONITOR_STOP, ctrl->vfebase + VFE_BUS_PM_CMD); +} + +static void vfe_camif_stop_immediately(void) +{ + writel(CAMIF_COMMAND_STOP_IMMEDIATELY, ctrl->vfebase + CAMIF_COMMAND); + writel(0, ctrl->vfebase + VFE_CGC_OVERRIDE); +} + +static void vfe_program_reg_update_cmd(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_REG_UPDATE_CMD); +} + +static void vfe_program_global_reset_cmd(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_GLOBAL_RESET_CMD); +} + +static void vfe_program_axi_cmd(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_AXI_CMD); +} + +static void vfe_program_irq_composite_mask(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_IRQ_COMPOSITE_MASK); +} + +static inline void vfe_program_irq_mask(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_IRQ_MASK); +} + +static uint32_t vfe_read_axi_status(void) +{ + return readl(ctrl->vfebase + VFE_AXI_STATUS); +} + +static void +vfe_set_stats_pingpong_address(struct vfe_stats_control *afControl, + struct vfe_stats_control *awbControl) +{ + afControl->hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR); + afControl->hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR); + + awbControl->hwRegPingAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR); + awbControl->hwRegPongAddress = (uint8_t *) + (ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR); +} + +static void vfe_program_lut_bank_sel(struct vfe_gamma_lut_sel *in) +{ + struct VFE_GammaLutSelect_ConfigCmdType cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.ch0BankSelect = in->ch0BankSelect; + cmd.ch1BankSelect = in->ch1BankSelect; + cmd.ch2BankSelect = in->ch2BankSelect; + CDBG("VFE gamma lut bank selection is 0x%x\n", *((uint32_t *)&cmd)); + vfe_prog_hw(ctrl->vfebase + VFE_LUT_BANK_SEL, + (uint32_t *)&cmd, sizeof(cmd)); +} + +static void vfe_program_stats_cmd(struct vfe_stats_cmd_data *in) +{ + struct VFE_StatsCmdType stats; + memset(&stats, 0, sizeof(stats)); + + stats.autoFocusEnable = in->autoFocusEnable; + stats.axwEnable = in->axwEnable; + stats.histEnable = in->histEnable; + stats.clearHistEnable = in->clearHistEnable; + stats.histAutoClearEnable = in->histAutoClearEnable; + stats.colorConversionEnable = in->colorConversionEnable; + + writel(*((uint32_t *)&stats), ctrl->vfebase + VFE_STATS_CMD); +} + +static void vfe_pm_start(struct vfe_cmd_bus_pm_start *in) +{ + struct VFE_Bus_Pm_ConfigCmdType cmd; + memset(&cmd, 0, sizeof(struct VFE_Bus_Pm_ConfigCmdType)); + + cmd.output2YWrPmEnable = in->output2YWrPmEnable; + cmd.output2CbcrWrPmEnable = in->output2CbcrWrPmEnable; + cmd.output1YWrPmEnable = in->output1YWrPmEnable; + cmd.output1CbcrWrPmEnable = in->output1CbcrWrPmEnable; + + vfe_prog_hw(ctrl->vfebase + VFE_BUS_PM_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +static void vfe_8k_pm_start(struct vfe_cmd_bus_pm_start *in) +{ + in->output1CbcrWrPmEnable = ctrl->vfeBusConfigLocal.viewCbcrWrPathEn; + in->output1YWrPmEnable = ctrl->vfeBusConfigLocal.viewYWrPathEn; + in->output2CbcrWrPmEnable = ctrl->vfeBusConfigLocal.encCbcrWrPathEn; + in->output2YWrPmEnable = ctrl->vfeBusConfigLocal.encYWrPathEn; + + if (in->output1CbcrWrPmEnable || in->output1YWrPmEnable) + ctrl->viewPath.pmEnabled = TRUE; + + if (in->output2CbcrWrPmEnable || in->output2YWrPmEnable) + ctrl->encPath.pmEnabled = TRUE; + + vfe_pm_start(in); + + writel(VFE_PERFORMANCE_MONITOR_GO, ctrl->vfebase + VFE_BUS_PM_CMD); +} + +static uint32_t vfe_irq_pack(struct vfe_interrupt_mask data) +{ + struct vfe_irqenable packedData; + + memset(&packedData, 0, sizeof(packedData)); + + packedData.camifErrorIrq = data.camifErrorIrq; + packedData.camifSofIrq = data.camifSofIrq; + packedData.camifEolIrq = data.camifEolIrq; + packedData.camifEofIrq = data.camifEofIrq; + packedData.camifEpoch1Irq = data.camifEpoch1Irq; + packedData.camifEpoch2Irq = data.camifEpoch2Irq; + packedData.camifOverflowIrq = data.camifOverflowIrq; + packedData.ceIrq = data.ceIrq; + packedData.regUpdateIrq = data.regUpdateIrq; + packedData.resetAckIrq = data.resetAckIrq; + packedData.encYPingpongIrq = data.encYPingpongIrq; + packedData.encCbcrPingpongIrq = data.encCbcrPingpongIrq; + packedData.viewYPingpongIrq = data.viewYPingpongIrq; + packedData.viewCbcrPingpongIrq = data.viewCbcrPingpongIrq; + packedData.rdPingpongIrq = data.rdPingpongIrq; + packedData.afPingpongIrq = data.afPingpongIrq; + packedData.awbPingpongIrq = data.awbPingpongIrq; + packedData.histPingpongIrq = data.histPingpongIrq; + packedData.encIrq = data.encIrq; + packedData.viewIrq = data.viewIrq; + packedData.busOverflowIrq = data.busOverflowIrq; + packedData.afOverflowIrq = data.afOverflowIrq; + packedData.awbOverflowIrq = data.awbOverflowIrq; + packedData.syncTimer0Irq = data.syncTimer0Irq; + packedData.syncTimer1Irq = data.syncTimer1Irq; + packedData.syncTimer2Irq = data.syncTimer2Irq; + packedData.asyncTimer0Irq = data.asyncTimer0Irq; + packedData.asyncTimer1Irq = data.asyncTimer1Irq; + packedData.asyncTimer2Irq = data.asyncTimer2Irq; + packedData.asyncTimer3Irq = data.asyncTimer3Irq; + packedData.axiErrorIrq = data.axiErrorIrq; + packedData.violationIrq = data.violationIrq; + + return *((uint32_t *)&packedData); +} + +static uint32_t +vfe_irq_composite_pack(struct vfe_irq_composite_mask_config data) +{ + struct VFE_Irq_Composite_MaskType packedData; + + memset(&packedData, 0, sizeof(packedData)); + + packedData.encIrqComMaskBits = data.encIrqComMask; + packedData.viewIrqComMaskBits = data.viewIrqComMask; + packedData.ceDoneSelBits = data.ceDoneSel; + + return *((uint32_t *)&packedData); +} + +static void vfe_addr_convert(struct msm_vfe_phy_info *pinfo, + enum vfe_resp_msg type, void *data, void **ext, + int *elen) +{ + switch (type) { +#ifndef CONFIG_720P_CAMERA + case VFE_MSG_OUTPUT1:{ + pinfo->y_phy = + ((struct vfe_message *)data)->_u.msgOutput1.yBuffer; + pinfo->cbcr_phy = + ((struct vfe_message *)data)->_u.msgOutput1. + cbcrBuffer; + + ctrl->extdata.bpcInfo = + ((struct vfe_message *)data)->_u.msgOutput1.bpcInfo; + + ctrl->extdata.asfInfo = + ((struct vfe_message *)data)->_u.msgOutput1.asfInfo; + + ctrl->extdata.frameCounter = + ((struct vfe_message *)data)->_u.msgOutput1. + frameCounter; + + ctrl->extdata.pmData = + ((struct vfe_message *)data)->_u.msgOutput1.pmData; + + *ext = &ctrl->extdata; + *elen = sizeof(ctrl->extdata); + } + break; + + case VFE_MSG_OUTPUT2:{ + pinfo->y_phy = + ((struct vfe_message *)data)->_u.msgOutput2.yBuffer; + pinfo->cbcr_phy = + ((struct vfe_message *)data)->_u.msgOutput2. + cbcrBuffer; + + CDBG("vfe_addr_convert, pinfo->y_phy = 0x%x\n", + pinfo->y_phy); + CDBG("vfe_addr_convert, pinfo->cbcr_phy = 0x%x\n", + pinfo->cbcr_phy); + + ctrl->extdata.bpcInfo = + ((struct vfe_message *)data)->_u.msgOutput2.bpcInfo; + + ctrl->extdata.asfInfo = + ((struct vfe_message *)data)->_u.msgOutput2.asfInfo; + + ctrl->extdata.frameCounter = + ((struct vfe_message *)data)->_u.msgOutput2. + frameCounter; + + ctrl->extdata.pmData = + ((struct vfe_message *)data)->_u.msgOutput2.pmData; + + *ext = &ctrl->extdata; + *elen = sizeof(ctrl->extdata); + } + break; +#else + case VFE_MSG_OUTPUT_P: + case VFE_MSG_OUTPUT_V:{ + pinfo->y_phy = + ((struct vfe_message *)data)->_u.msgOutput2.yBuffer; + pinfo->cbcr_phy = + ((struct vfe_message *)data)->_u.msgOutput2. + cbcrBuffer; + + CDBG("vfe_addr_convert, pinfo->y_phy = 0x%x\n", + pinfo->y_phy); + CDBG("vfe_addr_convert, pinfo->cbcr_phy = 0x%x\n", + pinfo->cbcr_phy); + /*pinfo->output_id = OUTPUT_TYPE_P;*/ + ctrl->extdata.bpcInfo = + ((struct vfe_message *)data)->_u.msgOutput2.bpcInfo; + ctrl->extdata.asfInfo = + ((struct vfe_message *)data)->_u.msgOutput2.asfInfo; + ctrl->extdata.frameCounter = + ((struct vfe_message *)data)->_u.msgOutput2. + frameCounter; + ctrl->extdata.pmData = + ((struct vfe_message *)data)->_u.msgOutput2.pmData; + *ext = &ctrl->extdata; + *elen = sizeof(ctrl->extdata); + } + break; +#endif + + case VFE_MSG_STATS_AF: + pinfo->sbuf_phy = + ((struct vfe_message *)data)->_u.msgStatsAf.afBuffer; + break; + + case VFE_MSG_STATS_WE: + pinfo->sbuf_phy = + ((struct vfe_message *)data)->_u.msgStatsWbExp.awbBuffer; + break; + + default: + break; + } /* switch */ +} + +static boolean vfe_send_preview_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +static boolean vfe_send_video_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +#ifdef CONFIG_720P_CAMERA +static boolean vfe_send_mainimage_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +static boolean vfe_send_thumbnail_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +#endif + +static boolean vfe_send_af_stats_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +static boolean vfe_send_awb_stats_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +static boolean vfe_send_camif_error_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); +static boolean vfe_send_bus_overflow_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data); + +static boolean invalid(struct msm_vfe_resp *rp, + struct vfe_message *_m, void *_d) +{ + BUG_ON(1); /* this function should not be called. */ + return FALSE; +} + +static struct { + boolean (*fn)(struct msm_vfe_resp *rp, struct vfe_message *msg, void *data); + enum vfe_resp_msg rt; /* reponse type */ +} vfe_funcs[] = { + [VFE_MSG_ID_RESET_ACK] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_START_ACK] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_STOP_ACK] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_UPDATE_ACK] = { NULL, VFE_MSG_GENERAL }, +#ifndef CONFIG_720P_CAMERA + [VFE_MSG_ID_OUTPUT1] = { vfe_send_preview_msg, VFE_MSG_OUTPUT1 }, + [VFE_MSG_ID_OUTPUT2] = { vfe_send_video_msg, VFE_MSG_OUTPUT2 }, +#else + [VFE_MSG_ID_OUTPUT_P] = { vfe_send_preview_msg, VFE_MSG_OUTPUT_P }, + [VFE_MSG_ID_OUTPUT_V] = { vfe_send_video_msg, VFE_MSG_OUTPUT_V }, + [VFE_MSG_ID_OUTPUT_S] = { vfe_send_mainimage_msg, VFE_MSG_OUTPUT_S }, + [VFE_MSG_ID_OUTPUT_T] = { vfe_send_thumbnail_msg, VFE_MSG_OUTPUT_T }, +#endif + [VFE_MSG_ID_SNAPSHOT_DONE] = { NULL, VFE_MSG_SNAPSHOT }, + [VFE_MSG_ID_STATS_AUTOFOCUS] = { vfe_send_af_stats_msg, VFE_MSG_STATS_AF }, + [VFE_MSG_ID_STATS_WB_EXP] = { vfe_send_awb_stats_msg, VFE_MSG_STATS_WE }, + [VFE_MSG_ID_EPOCH1] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_EPOCH2] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_SYNC_TIMER0_DONE] = { invalid }, + [VFE_MSG_ID_SYNC_TIMER1_DONE] = { invalid }, + [VFE_MSG_ID_SYNC_TIMER2_DONE] = { invalid }, + [VFE_MSG_ID_ASYNC_TIMER0_DONE] = { invalid }, + [VFE_MSG_ID_ASYNC_TIMER1_DONE] = { invalid }, + [VFE_MSG_ID_ASYNC_TIMER2_DONE] = { invalid }, + [VFE_MSG_ID_ASYNC_TIMER3_DONE] = { invalid }, + [VFE_MSG_ID_AF_OVERFLOW] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_AWB_OVERFLOW] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_AXI_ERROR] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_CAMIF_OVERFLOW] = { NULL, VFE_MSG_GENERAL }, + [VFE_MSG_ID_VIOLATION] = { invalid }, + [VFE_MSG_ID_CAMIF_ERROR] = { vfe_send_camif_error_msg, VFE_MSG_GENERAL }, + [VFE_MSG_ID_BUS_OVERFLOW] = { vfe_send_bus_overflow_msg, VFE_MSG_GENERAL }, +}; + +static void vfe_proc_ops(enum VFE_MESSAGE_ID id, void *data) +{ + struct msm_vfe_resp *rp; + struct vfe_message *msg; + struct msm_sync *sync = (struct msm_sync *)ctrl->syncdata; + + CDBG("ctrl->vfeOperationMode = %d, msgId = %d\n", + ctrl->vfeOperationMode, id); + + if (id >= ARRAY_SIZE(vfe_funcs) || vfe_funcs[id].fn == invalid) { + pr_err("%s: invalid VFE message id %d\n", __func__, id); + return; + } + + /* In 8k, OUTPUT1 & OUTPUT2 messages arrive before SNAPSHOT_DONE. + * We don't send such messages to the user. Note that we can do + * this in the vfe_func[] callback, but that would cause us to + * allocate and then immediately free the msm_vfe_resp structure, + * which is wasteful. + */ +#ifndef CONFIG_720P_CAMERA + if ((ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) && + (id == VFE_MSG_ID_OUTPUT1 || + id == VFE_MSG_ID_OUTPUT2)) + return; +#else + if ((ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) && + (id == VFE_MSG_ID_OUTPUT_T || + id == VFE_MSG_ID_OUTPUT_S)) + return; +#endif + rp = ctrl->resp->vfe_alloc(sizeof(*rp) + + (vfe_funcs[id].fn ? sizeof(*msg) : 0), + ctrl->syncdata, + GFP_ATOMIC); + if (!rp) { + pr_err("%s: out of memory\n", __func__); + return; + } + + rp->type = vfe_funcs[id].rt; + rp->evt_msg.type = MSM_CAMERA_MSG; + rp->evt_msg.msg_id = id; + rp->evt_msg.exttype = 0; + + if (ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) { + rp->evt_msg.exttype = VFE_MSG_SNAPSHOT; + +#if 0 /* google flashlight */ + /* Turn off the flash if epoch1 is enabled and snapshot is done. */ + if (ctrl->vfeCamifEpoch1Local.enable && + ctrl->vfeOperationMode == + VFE_START_OPERATION_MODE_SNAPSHOT && + id == VFE_MSG_ID_SNAPSHOT_DONE) { + ctrl->resp->flash_ctrl(sync, MSM_CAMERA_LED_OFF); + ctrl->vfeCamifEpoch1Local.enable = 0; +#endif + } + + if (!vfe_funcs[id].fn) { + rp->evt_msg.len = 0; + rp->evt_msg.data = 0; + } else { + /* populate the message accordingly */ + if (vfe_funcs[id].fn) + rp->evt_msg.data = msg = + (struct vfe_message *)(rp + 1); + else + rp->evt_msg.data = msg = 0; + rp->evt_msg.len = sizeof(*msg); + + if (msg == NULL) { + pr_err("%s dsp send msg with NULL pointer\n", + __func__); + return ; + } + msg->_d = id; + if (vfe_funcs[id].fn(rp, msg, data) == FALSE) { + pr_info("%s: freeing memory: handler for %d " + "returned false\n", __func__, id); + ctrl->resp->vfe_free(rp); + return; + } + } + + ctrl->resp->vfe_resp(rp, MSM_CAM_Q_VFE_MSG, ctrl->syncdata, GFP_ATOMIC); +} + +static boolean vfe_send_bus_overflow_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, + void *data) +{ + struct isr_queue_cmd *qcmd = data; + memcpy(&(msg->_u.msgBusOverflow), + &qcmd->vfePmData, sizeof(qcmd->vfePmData)); + return TRUE; +} + +static boolean vfe_send_camif_error_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, + void *data) +{ + struct isr_queue_cmd *qcmd = data; + memcpy(&(msg->_u.msgCamifError), + &qcmd->vfeCamifStatusLocal, sizeof(qcmd->vfeCamifStatusLocal)); + return TRUE; +} + +static void vfe_process_error_irq(struct isr_queue_cmd *qcmd) +{ + struct vfe_interrupt_status *irqstatus = &qcmd->vfeInterruptStatus; + + /* all possible error irq. Note error irqs are not enabled, it is + * checked only when other interrupts are present. */ + if (irqstatus->afOverflowIrq) + vfe_proc_ops(VFE_MSG_ID_AF_OVERFLOW, qcmd); + + if (irqstatus->awbOverflowIrq) + vfe_proc_ops(VFE_MSG_ID_AWB_OVERFLOW, qcmd); + + if (irqstatus->axiErrorIrq) + vfe_proc_ops(VFE_MSG_ID_AXI_ERROR, qcmd); + + if (irqstatus->busOverflowIrq) + vfe_proc_ops(VFE_MSG_ID_BUS_OVERFLOW, qcmd); + + if (irqstatus->camifErrorIrq) + vfe_proc_ops(VFE_MSG_ID_CAMIF_ERROR, qcmd); + + if (irqstatus->camifOverflowIrq) + vfe_proc_ops(VFE_MSG_ID_CAMIF_OVERFLOW, qcmd); + + if (irqstatus->violationIrq) + pr_err("%s: violation irq\n", __func__); +} + +/* We use epoch1 interrupt to control flash timing. The purpose is to reduce the + * flash duration as much as possible. Userspace driver has no way to control + * the exactly timing like VFE. Currently we skip a frame during snapshot. + * We want to fire the flash in the middle of the first frame. Epoch1 interrupt + * allows us to set a line index and we will get an interrupt when VFE reaches + * the line. Userspace driver sets the line index in camif configuration. VFE + * will fire the flash in high mode when it gets the epoch1 interrupt. Flash + * will be turned off after snapshot is done. + */ +static void vfe_process_camif_epoch1_irq(void) +{ + /* Turn on the flash. */ + struct msm_sync *sync = (struct msm_sync *)ctrl->syncdata; + /*remove google flashlight*/ + /*ctrl->resp->flash_ctrl(sync, MSM_CAMERA_LED_HIGH);*/ + + /* Disable the epoch1 interrupt. */ + ctrl->vfeImaskLocal.camifEpoch1Irq = FALSE; + ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal); + vfe_program_irq_mask(ctrl->vfeImaskPacked); +} + +static void vfe_process_camif_sof_irq(void) +{ + /* increment the frame id number. */ + ctrl->vfeFrameId++; + + CDBG("camif_sof_irq, frameId = %d\n", ctrl->vfeFrameId); + + /* In snapshot mode, if frame skip is programmed, + * need to check it accordingly to stop camif at + * correct frame boundary. For the dropped frames, + * there won't be any output path irqs, but there is + * still SOF irq, which can help us determine when + * to stop the camif. + */ + if (ctrl->vfeOperationMode) { + if ((1 << ctrl->vfeFrameSkipCount)&ctrl->vfeFrameSkipPattern) { + + ctrl->vfeSnapShotCount--; + if (ctrl->vfeSnapShotCount == 0) + /* terminate vfe pipeline at frame boundary. */ + writel(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY, + ctrl->vfebase + CAMIF_COMMAND); + } + + /* update frame skip counter for bit checking. */ + ctrl->vfeFrameSkipCount++; + if (ctrl->vfeFrameSkipCount == (ctrl->vfeFrameSkipPeriod + 1)) + ctrl->vfeFrameSkipCount = 0; + } +} + +static boolean vfe_get_af_pingpong_status(void) +{ + uint32_t busPingPongStatus = + readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS); + return !!(busPingPongStatus & VFE_AF_PINGPONG_STATUS_BIT); +} + +static uint32_t vfe_read_af_buf_addr(boolean pipo) +{ + if (pipo == FALSE) + return readl(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR); + else + return readl(ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR); +} + +static void vfe_update_af_buf_addr(boolean pipo, uint32_t addr) +{ + if (pipo == FALSE) + writel(addr, ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR); + else + writel(addr, ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR); +} + +static boolean vfe_send_af_stats_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ + uint32_t afBufAddress = (uint32_t)data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + msg->_u.msgStatsAf.afBuffer = afBufAddress; + msg->_u.msgStatsAf.frameCounter = ctrl->vfeFrameId; + + ctrl->afStatsControl.ackPending = TRUE; + + vfe_addr_convert(&(rp->phy), rp->type, msg, NULL, NULL); + + return TRUE; +} + +static void vfe_process_stats_af_irq(void) +{ + boolean bufferAvailable; + + if (!(ctrl->afStatsControl.ackPending)) { + + /* read hardware status. */ + ctrl->afStatsControl.pingPongStatus = + vfe_get_af_pingpong_status(); + + bufferAvailable = (ctrl->afStatsControl.pingPongStatus) ^ 1; + + ctrl->afStatsControl.bufToRender = + vfe_read_af_buf_addr(bufferAvailable); + + /* update the same buffer address (ping or pong) */ + vfe_update_af_buf_addr(bufferAvailable, + ctrl->afStatsControl.nextFrameAddrBuf); + + vfe_proc_ops(VFE_MSG_ID_STATS_AUTOFOCUS, + (void *)ctrl->afStatsControl.bufToRender); + } else + ctrl->afStatsControl.droppedStatsFrameCount++; +} + +static boolean vfe_get_awb_pingpong_status(void) +{ + uint32_t busPingPongStatus = + readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS); + return !!(busPingPongStatus & VFE_AWB_PINGPONG_STATUS_BIT); +} + +static uint32_t vfe_read_awb_buf_addr(boolean pingpong) +{ + if (pingpong == FALSE) + return readl(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR); + else + return readl(ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR); +} + +static void vfe_update_awb_buf_addr(boolean pingpong, uint32_t addr) +{ + if (pingpong == FALSE) + writel(addr, ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR); + else + writel(addr, ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR); +} + +static boolean vfe_send_awb_stats_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ + uint32_t awbBufAddress = (uint32_t)data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + msg->_u.msgStatsWbExp.awbBuffer = awbBufAddress; + msg->_u.msgStatsWbExp.frameCounter = ctrl->vfeFrameId; + + ctrl->awbStatsControl.ackPending = TRUE; + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + NULL, NULL); + + return TRUE; +} + +static void vfe_process_stats_awb_irq(void) +{ + boolean bufferAvailable; + + if (!(ctrl->awbStatsControl.ackPending)) { + + ctrl->awbStatsControl.pingPongStatus = + vfe_get_awb_pingpong_status(); + + bufferAvailable = (ctrl->awbStatsControl.pingPongStatus) ^ 1; + + ctrl->awbStatsControl.bufToRender = + vfe_read_awb_buf_addr(bufferAvailable); + + vfe_update_awb_buf_addr(bufferAvailable, + ctrl->awbStatsControl.nextFrameAddrBuf); + + vfe_proc_ops(VFE_MSG_ID_STATS_WB_EXP, + (void *)ctrl->awbStatsControl.bufToRender); + + } else + ctrl->awbStatsControl.droppedStatsFrameCount++; +} + +static void vfe_write_gamma_table(uint8_t channel, + boolean bank, int16_t *pTable) +{ + uint16_t i; + + enum VFE_DMI_RAM_SEL dmiRamSel = NO_MEM_SELECTED; + + switch (channel) { + case 0: + if (bank == 0) + dmiRamSel = RGBLUT_RAM_CH0_BANK0; + else + dmiRamSel = RGBLUT_RAM_CH0_BANK1; + break; + + case 1: + if (bank == 0) + dmiRamSel = RGBLUT_RAM_CH1_BANK0; + else + dmiRamSel = RGBLUT_RAM_CH1_BANK1; + break; + + case 2: + if (bank == 0) + dmiRamSel = RGBLUT_RAM_CH2_BANK0; + else + dmiRamSel = RGBLUT_RAM_CH2_BANK1; + break; + + default: + break; + } + + vfe_program_dmi_cfg(dmiRamSel); + + for (i = 0; i < VFE_GAMMA_TABLE_LENGTH; i++) { + writel((uint32_t) (*pTable), ctrl->vfebase + VFE_DMI_DATA_LO); + pTable++; + } + + /* After DMI transfer, need to set the DMI_CFG to unselect any SRAM + unselect the SRAM Bank. */ + writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG); +} + +static void vfe_prog_hw_testgen_cmd(uint32_t value) +{ + writel(value, ctrl->vfebase + VFE_HW_TESTGEN_CMD); +} + +static inline void vfe_read_irq_status(struct vfe_irq_thread_msg *out) +{ + uint32_t *temp; + + memset(out, 0, sizeof(struct vfe_irq_thread_msg)); + + temp = (uint32_t *) (ctrl->vfebase + VFE_IRQ_STATUS); + out->vfeIrqStatus = readl(temp); + + temp = (uint32_t *) (ctrl->vfebase + CAMIF_STATUS); + out->camifStatus = readl(temp); +#if 0 /*this for YUV performance tuning */ + writel(0x7, ctrl->vfebase + CAMIF_COMMAND); + writel(0x3, ctrl->vfebase + CAMIF_COMMAND); + CDBG("camifStatus = 0x%x\n", out->camifStatus); +#endif +/* + temp = (uint32_t *)(ctrl->vfebase + VFE_DEMOSAIC_STATUS); + out->demosaicStatus = readl(temp); + + temp = (uint32_t *)(ctrl->vfebase + VFE_ASF_MAX_EDGE); + out->asfMaxEdge = readl(temp); + + temp = (uint32_t *)(ctrl->vfebase + VFE_BUS_ENC_Y_WR_PM_STATS_0); +*/ + +#if 0 + out->pmInfo.encPathPmInfo.yWrPmStats0 = readl(temp++); + out->pmInfo.encPathPmInfo.yWrPmStats1 = readl(temp++); + out->pmInfo.encPathPmInfo.cbcrWrPmStats0 = readl(temp++); + out->pmInfo.encPathPmInfo.cbcrWrPmStats1 = readl(temp++); + out->pmInfo.viewPathPmInfo.yWrPmStats0 = readl(temp++); + out->pmInfo.viewPathPmInfo.yWrPmStats1 = readl(temp++); + out->pmInfo.viewPathPmInfo.cbcrWrPmStats0 = readl(temp++); + out->pmInfo.viewPathPmInfo.cbcrWrPmStats1 = readl(temp); +#endif /* if 0 Jeff */ +} + +static void +vfe_parse_interrupt_status(struct vfe_interrupt_status *ret, uint32_t irqStatusIn) +{ + struct vfe_irqenable hwstat; + boolean temp; + + memset(&hwstat, 0, sizeof(hwstat)); + memset(ret, 0, sizeof(*ret)); + + hwstat = *((struct vfe_irqenable *)(&irqStatusIn)); + + ret->camifErrorIrq = hwstat.camifErrorIrq; + ret->camifSofIrq = hwstat.camifSofIrq; + ret->camifEolIrq = hwstat.camifEolIrq; + ret->camifEofIrq = hwstat.camifEofIrq; + ret->camifEpoch1Irq = hwstat.camifEpoch1Irq; + ret->camifEpoch2Irq = hwstat.camifEpoch2Irq; + ret->camifOverflowIrq = hwstat.camifOverflowIrq; + ret->ceIrq = hwstat.ceIrq; + ret->regUpdateIrq = hwstat.regUpdateIrq; + ret->resetAckIrq = hwstat.resetAckIrq; + ret->encYPingpongIrq = hwstat.encYPingpongIrq; + ret->encCbcrPingpongIrq = hwstat.encCbcrPingpongIrq; + ret->viewYPingpongIrq = hwstat.viewYPingpongIrq; + ret->viewCbcrPingpongIrq = hwstat.viewCbcrPingpongIrq; + ret->rdPingpongIrq = hwstat.rdPingpongIrq; + ret->afPingpongIrq = hwstat.afPingpongIrq; + ret->awbPingpongIrq = hwstat.awbPingpongIrq; + ret->histPingpongIrq = hwstat.histPingpongIrq; + ret->encIrq = hwstat.encIrq; + ret->viewIrq = hwstat.viewIrq; + ret->busOverflowIrq = hwstat.busOverflowIrq; + ret->afOverflowIrq = hwstat.afOverflowIrq; + ret->awbOverflowIrq = hwstat.awbOverflowIrq; + ret->syncTimer0Irq = hwstat.syncTimer0Irq; + ret->syncTimer1Irq = hwstat.syncTimer1Irq; + ret->syncTimer2Irq = hwstat.syncTimer2Irq; + ret->asyncTimer0Irq = hwstat.asyncTimer0Irq; + ret->asyncTimer1Irq = hwstat.asyncTimer1Irq; + ret->asyncTimer2Irq = hwstat.asyncTimer2Irq; + ret->asyncTimer3Irq = hwstat.asyncTimer3Irq; + ret->axiErrorIrq = hwstat.axiErrorIrq; + ret->violationIrq = hwstat.violationIrq; + + /* logic OR of any error bits + * although each irq corresponds to a bit, the data type here is a + * boolean already. hence use logic operation. + */ + temp = + ret->camifErrorIrq || + ret->camifOverflowIrq || + ret->afOverflowIrq || + ret->awbOverflowIrq || + ret->awbPingpongIrq || + ret->afPingpongIrq || + ret->busOverflowIrq || ret->axiErrorIrq || ret->violationIrq; + + ret->anyErrorIrqs = temp; + + /* logic OR of any output path bits */ + temp = ret->encYPingpongIrq || ret->encCbcrPingpongIrq || ret->encIrq; + + ret->anyOutput2PathIrqs = temp; + + temp = ret->viewYPingpongIrq || ret->viewCbcrPingpongIrq || ret->viewIrq; + + ret->anyOutput1PathIrqs = temp; + + ret->anyOutputPathIrqs = + ret->anyOutput1PathIrqs || ret->anyOutput2PathIrqs; + + /* logic OR of any sync timer bits */ + temp = ret->syncTimer0Irq || ret->syncTimer1Irq || ret->syncTimer2Irq; + + ret->anySyncTimerIrqs = temp; + + /* logic OR of any async timer bits */ + temp = + ret->asyncTimer0Irq || + ret->asyncTimer1Irq || ret->asyncTimer2Irq || ret->asyncTimer3Irq; + + ret->anyAsyncTimerIrqs = temp; + + /* bool for all interrupts that are not allowed in idle state */ + temp = + ret->anyErrorIrqs || + ret->anyOutputPathIrqs || + ret->anySyncTimerIrqs || + ret->regUpdateIrq || + ret->awbPingpongIrq || + ret->afPingpongIrq || + ret->camifSofIrq || ret->camifEpoch2Irq || ret->camifEpoch1Irq; + + ret->anyIrqForActiveStatesOnly = temp; +} + +static void +vfe_get_asf_frame_info(struct vfe_frame_asf_info *rc, struct vfe_irq_thread_msg *in) +{ + struct vfe_asf_info asfInfoTemp; + + memset(rc, 0, sizeof(*rc)); + memset(&asfInfoTemp, 0, sizeof(asfInfoTemp)); + + asfInfoTemp = *((struct vfe_asf_info *)(&(in->asfMaxEdge))); + + rc->asfHbiCount = asfInfoTemp.HBICount; + rc->asfMaxEdge = asfInfoTemp.maxEdge; +} + +static void +vfe_get_demosaic_frame_info(struct vfe_frame_bpc_info *rc, struct vfe_irq_thread_msg *in) +{ + struct vfe_bps_info bpcInfoTemp; + + memset(rc, 0, sizeof(*rc)); + memset(&bpcInfoTemp, 0, sizeof(bpcInfoTemp)); + + bpcInfoTemp = *((struct vfe_bps_info *)(&(in->demosaicStatus))); + + rc->greenDefectPixelCount = bpcInfoTemp.greenBadPixelCount; + + rc->redBlueDefectPixelCount = bpcInfoTemp.RedBlueBadPixelCount; +} + +static void +vfe_get_camif_status(struct vfe_msg_camif_status *rc, struct vfe_irq_thread_msg *in) +{ + struct vfe_camif_stats camifStatusTemp; + + memset(rc, 0, sizeof(*rc)); + memset(&camifStatusTemp, 0, sizeof(camifStatusTemp)); + + camifStatusTemp = *((struct vfe_camif_stats *)(&(in->camifStatus))); + + rc->camifState = (boolean) camifStatusTemp.camifHalt; + rc->lineCount = camifStatusTemp.lineCount; + rc->pixelCount = camifStatusTemp.pixelCount; +} + +static void +vfe_get_performance_monitor_data(struct vfe_bus_performance_monitor *rc, + struct vfe_irq_thread_msg *in) +{ + memset(rc, 0, sizeof(*rc)); + + rc->encPathPmInfo.yWrPmStats0 = in->pmInfo.encPathPmInfo.yWrPmStats0; + rc->encPathPmInfo.yWrPmStats1 = in->pmInfo.encPathPmInfo.yWrPmStats1; + rc->encPathPmInfo.cbcrWrPmStats0 = + in->pmInfo.encPathPmInfo.cbcrWrPmStats0; + rc->encPathPmInfo.cbcrWrPmStats1 = + in->pmInfo.encPathPmInfo.cbcrWrPmStats1; + rc->viewPathPmInfo.yWrPmStats0 = in->pmInfo.viewPathPmInfo.yWrPmStats0; + rc->viewPathPmInfo.yWrPmStats1 = in->pmInfo.viewPathPmInfo.yWrPmStats1; + rc->viewPathPmInfo.cbcrWrPmStats0 = + in->pmInfo.viewPathPmInfo.cbcrWrPmStats0; + rc->viewPathPmInfo.cbcrWrPmStats1 = + in->pmInfo.viewPathPmInfo.cbcrWrPmStats1; +} + +static void vfe_process_reg_update_irq(void) +{ + CDBG("vfe_process_reg_update_irq: ackPendingFlag is %d\n", + ctrl->vfeStartAckPendingFlag); + if (ctrl->vfeStartAckPendingFlag == TRUE) { + vfe_proc_ops(VFE_MSG_ID_START_ACK, NULL); + ctrl->vfeStartAckPendingFlag = FALSE; + } else + vfe_proc_ops(VFE_MSG_ID_UPDATE_ACK, NULL); +} + +static void vfe_process_reset_irq(void) +{ + /* unsigned long flags; */ + + ctrl->vstate = VFE_STATE_IDLE; + + if (ctrl->vfeStopAckPending == TRUE) { + ctrl->vfeStopAckPending = FALSE; + /* disable all irqs when got stop ack from VFE */ + vfe_program_irq_mask(VFE_DISABLE_ALL_IRQS); + vfe_proc_ops(VFE_MSG_ID_STOP_ACK, NULL); + } else { + vfe_set_default_reg_values(); + vfe_proc_ops(VFE_MSG_ID_RESET_ACK, NULL); + } +} + +static void vfe_process_pingpong_irq(struct vfe_output_path *in, + uint8_t fragmentCount) +{ + uint16_t circularIndex; + uint32_t nextFragmentAddr; + + /* get next fragment address from circular buffer */ + circularIndex = (in->fragIndex) % (2 * fragmentCount); + nextFragmentAddr = in->addressBuffer[circularIndex]; + + in->fragIndex = circularIndex + 1; + + /* use next fragment to program hardware ping/pong address. */ + if (in->hwCurrentFlag == ping) { + writel(nextFragmentAddr, in->hwRegPingAddress); + in->hwCurrentFlag = pong; + + } else { + writel(nextFragmentAddr, in->hwRegPongAddress); + in->hwCurrentFlag = ping; + } +} + +static boolean vfe_send_video_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ +#ifdef CONFIG_720P_CAMERA + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + memcpy(&(msg->_u), + (void *)pPayload, sizeof(struct vfe_msg_output)); + + rp->phy.output_id = OUTPUT_TYPE_V; + CDBG("vfe_send_video_msg rp->type= %d\n",rp->type); + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + return TRUE; +#else + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + memcpy(&(msg->_u.msgOutput2), + (void *)pPayload, sizeof(struct vfe_msg_output)); + + ctrl->encPath.ackPending = TRUE; + + if (!(ctrl->vfeRequestedSnapShotCount <= 3) && + (ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT)) + ctrl->encPath.ackPending = TRUE; + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + return TRUE; +#endif +} + +static boolean vfe_send_preview_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ +#ifdef CONFIG_720P_CAMERA + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output)); + + rp->phy.output_id = OUTPUT_TYPE_P; + CDBG("vfe_send_preview_msg rp->type= %d\n",rp->type); + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + + return TRUE; +#else + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output)); + + ctrl->viewPath.ackPending = TRUE; + + if (!(ctrl->vfeRequestedSnapShotCount <= 3) && + (ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT)) + ctrl->viewPath.ackPending = TRUE; + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + + return TRUE; +#endif +} + +#ifdef CONFIG_720P_CAMERA + +static boolean vfe_send_thumbnail_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output)); + + rp->phy.output_id = OUTPUT_TYPE_T; + CDBG("vfe_send_thumbnail_msg rp->type= %d\n",rp->type); + + if (ctrl->viewPath.snapshotPendingCount <= 1) + ctrl->viewPath.ackPending = FALSE; + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + + return TRUE; +} + +static boolean vfe_send_mainimage_msg(struct msm_vfe_resp *rp, + struct vfe_message *msg, void *data) +{ + struct vfe_msg_output *pPayload = data; + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return FALSE; + + memcpy(&(msg->_u), (void *)pPayload, sizeof(struct vfe_msg_output)); + + rp->phy.output_id = OUTPUT_TYPE_S; + CDBG("vfe_send_mainimage_msg rp->type= %d\n",rp->type); + + if (ctrl->encPath.snapshotPendingCount <=1 ) { + ctrl->encPath.ackPending = FALSE; + } + + vfe_addr_convert(&(rp->phy), + rp->type, msg, + &(rp->extdata), &(rp->extlen)); + + return TRUE; +} +#endif + +static void vfe_send_output_msg(boolean whichOutputPath, + uint32_t yPathAddr, uint32_t cbcrPathAddr) +{ + struct vfe_msg_output msgPayload; + + msgPayload.yBuffer = yPathAddr; + msgPayload.cbcrBuffer = cbcrPathAddr; + + /* asf info is common for both output1 and output2 */ +#if 0 + msgPayload.asfInfo.asfHbiCount = ctrl->vfeAsfFrameInfo.asfHbiCount; + msgPayload.asfInfo.asfMaxEdge = ctrl->vfeAsfFrameInfo.asfMaxEdge; + + /* demosaic info is common for both output1 and output2 */ + msgPayload.bpcInfo.greenDefectPixelCount = + ctrl->vfeBpcFrameInfo.greenDefectPixelCount; + msgPayload.bpcInfo.redBlueDefectPixelCount = + ctrl->vfeBpcFrameInfo.redBlueDefectPixelCount; +#endif /* if 0 */ + + /* frame ID is common for both paths. */ + msgPayload.frameCounter = ctrl->vfeFrameId; + +#ifndef CONFIG_720P_CAMERA + if (whichOutputPath) { + /* msgPayload.pmData = ctrl->vfePmData.encPathPmInfo; */ + vfe_proc_ops(VFE_MSG_ID_OUTPUT2, &msgPayload); + } else { + /* msgPayload.pmData = ctrl->vfePmData.viewPathPmInfo; */ + vfe_proc_ops(VFE_MSG_ID_OUTPUT1, &msgPayload); + } +#else + if (whichOutputPath) {/* vfe output2 physical path */ + /* msgPayload.pmData = ctrl->vfePmData.encPathPmInfo; */ + ctrl->encPath.ackPending = TRUE; + + if (ctrl->vfeOperationMode == 0) { + if (ctrl->axiOutputMode == VFE_AXI_OUTPUT_MODE_Output1AndOutput2) { + /* video mode */ + vfe_proc_ops(VFE_MSG_ID_OUTPUT_V, &msgPayload); + } else { /* preview mode */ + vfe_proc_ops(VFE_MSG_ID_OUTPUT_P, &msgPayload); + } + } else { + vfe_proc_ops(VFE_MSG_ID_OUTPUT_S, &msgPayload); + } + + } else { /* physical output1 path from vfe */ + ctrl->viewPath.ackPending = TRUE; + + if (ctrl->vfeOperationMode == 0) { + vfe_proc_ops(VFE_MSG_ID_OUTPUT_P, &msgPayload); + CDBG(" ==== check output ==== video mode display output.\n"); + + } else { + vfe_proc_ops(VFE_MSG_ID_OUTPUT_T, &msgPayload); + CDBG(" ==== check output ==== snapshot mode thumbnail output.\n"); + } + } +#endif +} + +static void vfe_process_frame_done_irq_multi_frag(struct vfe_output_path_combo + *in) +{ + uint32_t yAddress, cbcrAddress; + uint16_t idx; + uint32_t *ptrY; + uint32_t *ptrCbcr; + const uint32_t *ptrSrc; + uint8_t i; + + if (!in->ackPending) { + + idx = (in->currentFrame) * (in->fragCount); + + /* Send output message. */ + yAddress = in->yPath.addressBuffer[idx]; + cbcrAddress = in->cbcrPath.addressBuffer[idx]; + + /* copy next frame to current frame. */ + ptrSrc = in->nextFrameAddrBuf; + ptrY = (uint32_t *)&in->yPath.addressBuffer[idx]; + ptrCbcr = (uint32_t *)&in->cbcrPath.addressBuffer[idx]; + + /* Copy Y address */ + for (i = 0; i < in->fragCount; i++) + *ptrY++ = *ptrSrc++; + + /* Copy Cbcr address */ + for (i = 0; i < in->fragCount; i++) + *ptrCbcr++ = *ptrSrc++; + + vfe_send_output_msg(in->whichOutputPath, yAddress, cbcrAddress); + + } else { + if (in->whichOutputPath == 0) + ctrl->vfeDroppedFrameCounts.output1Count++; + + if (in->whichOutputPath == 1) + ctrl->vfeDroppedFrameCounts.output2Count++; + } + + /* toggle current frame. */ + in->currentFrame = in->currentFrame ^ 1; + + if (ctrl->vfeOperationMode) + in->snapshotPendingCount--; +} + +static void vfe_process_frame_done_irq_no_frag_io( + struct vfe_output_path_combo *in, + uint32_t *pNextAddr, + uint32_t *pdestRenderAddr) +{ + uint32_t busPingPongStatus; + uint32_t tempAddress; + + /* 1. read hw status register. */ + busPingPongStatus = readl(ctrl->vfebase + VFE_BUS_PINGPONG_STATUS); + + CDBG("hardware status is 0x%x\n", busPingPongStatus); + + /* 2. determine ping or pong */ + /* use cbcr status */ + busPingPongStatus = busPingPongStatus & (1 << (in->cbcrStatusBit)); + + /* 3. read out address and update address */ + if (busPingPongStatus == 0) { + /* hw is working on ping, render pong buffer */ + /* a. read out pong address */ + /* read out y address. */ + tempAddress = readl(in->yPath.hwRegPongAddress); + + CDBG("pong 1 addr = 0x%x\n", tempAddress); + *pdestRenderAddr++ = tempAddress; + /* read out cbcr address. */ + tempAddress = readl(in->cbcrPath.hwRegPongAddress); + + CDBG("pong 2 addr = 0x%x\n", tempAddress); + *pdestRenderAddr = tempAddress; + + /* b. update pong address */ + writel(*pNextAddr++, in->yPath.hwRegPongAddress); + writel(*pNextAddr, in->cbcrPath.hwRegPongAddress); + } else { + /* hw is working on pong, render ping buffer */ + + /* a. read out ping address */ + tempAddress = readl(in->yPath.hwRegPingAddress); + CDBG("ping 1 addr = 0x%x\n", tempAddress); + *pdestRenderAddr++ = tempAddress; + tempAddress = readl(in->cbcrPath.hwRegPingAddress); + + CDBG("ping 2 addr = 0x%x\n", tempAddress); + *pdestRenderAddr = tempAddress; + + /* b. update ping address */ + writel(*pNextAddr++, in->yPath.hwRegPingAddress); + CDBG("NextAddress = 0x%x\n", *pNextAddr); + writel(*pNextAddr, in->cbcrPath.hwRegPingAddress); + } +} + +static void vfe_process_frame_done_irq_no_frag(struct vfe_output_path_combo *in) +{ + uint32_t addressToRender[2]; + + if (!in->ackPending) { + vfe_process_frame_done_irq_no_frag_io(in, + in->nextFrameAddrBuf, + addressToRender); + + /* use addressToRender to send out message. */ + vfe_send_output_msg(in->whichOutputPath, + addressToRender[0], addressToRender[1]); + + } else { + /* ackPending is still there, accumulate dropped frame count. + * These count can be read through ioctrl command. */ + CDBG("waiting frame ACK\n"); + + if (in->whichOutputPath == 0) + ctrl->vfeDroppedFrameCounts.output1Count++; + + if (in->whichOutputPath == 1) + ctrl->vfeDroppedFrameCounts.output2Count++; + } + + /* in case of multishot when upper layer did not ack, there will still + * be a snapshot done msg sent out, even though the number of frames + * sent out may be less than the desired number of frames. snapshot + * done msg would be helpful to indicate that vfe pipeline has stop, + * and in good known state. + */ + if (ctrl->vfeOperationMode) + in->snapshotPendingCount--; +} + +static void vfe_process_output_path_irq(struct vfe_interrupt_status *irqstatus) +{ + /* unsigned long flags; */ + + /* process the view path interrupts */ + if (irqstatus->anyOutput1PathIrqs) { + if (ctrl->viewPath.multiFrag) { + + if (irqstatus->viewCbcrPingpongIrq) + vfe_process_pingpong_irq(& + (ctrl->viewPath. + cbcrPath), + ctrl->viewPath. + fragCount); + + if (irqstatus->viewYPingpongIrq) + vfe_process_pingpong_irq(& + (ctrl->viewPath.yPath), + ctrl->viewPath. + fragCount); + + if (irqstatus->viewIrq) + vfe_process_frame_done_irq_multi_frag(&ctrl-> + viewPath); + + } else { + /* typical case for no fragment, + only frame done irq is enabled. */ + if (irqstatus->viewIrq) + vfe_process_frame_done_irq_no_frag(&ctrl-> + viewPath); + } + } + + /* process the encoder path interrupts */ + if (irqstatus->anyOutput2PathIrqs) { + if (ctrl->encPath.multiFrag) { + if (irqstatus->encCbcrPingpongIrq) + vfe_process_pingpong_irq(& + (ctrl->encPath. + cbcrPath), + ctrl->encPath. + fragCount); + + if (irqstatus->encYPingpongIrq) + vfe_process_pingpong_irq(&(ctrl->encPath.yPath), + ctrl->encPath. + fragCount); + + if (irqstatus->encIrq) + vfe_process_frame_done_irq_multi_frag(&ctrl-> + encPath); + + } else { + CDBG("horng irqstatus->encIrq = %x\n", irqstatus->encIrq); + if (irqstatus->encIrq) + vfe_process_frame_done_irq_no_frag(&ctrl-> + encPath); + } + } + + if (ctrl->vfeOperationMode) { + if ((ctrl->encPath.snapshotPendingCount == 0) && + (ctrl->viewPath.snapshotPendingCount == 0)) { + + ctrl->vstate = VFE_STATE_IDLE; + + vfe_proc_ops(VFE_MSG_ID_SNAPSHOT_DONE, NULL); + vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_STOP); + vfe_pm_stop(); + } + } +} + +static int preview_skipframe; +#define FRAME_SKIP 2 +static void __vfe_do_tasklet(struct isr_queue_cmd *qcmd) +{ + if (qcmd->vfeInterruptStatus.regUpdateIrq) { + CDBG("irq regUpdateIrq\n"); + vfe_process_reg_update_irq(); + } + + if (qcmd->vfeInterruptStatus.resetAckIrq) { + CDBG("%s: process resetAckIrq\n", __func__); + preview_skipframe = 0; + vfe_process_reset_irq(); + } + + if (ctrl->vstate != VFE_STATE_ACTIVE) + return; + + if (qcmd->vfeInterruptStatus.camifEpoch1Irq) { + vfe_process_camif_epoch1_irq(); + } + +#if 0 + if (qcmd->vfeInterruptStatus.camifEpoch2Irq) + vfe_proc_ops(VFE_MSG_ID_EPOCH2); +#endif + + /* next, check output path related interrupts. */ + if (qcmd->vfeInterruptStatus.anyOutputPathIrqs) { + CDBG("irq: anyOutputPathIrqs\n"); + if(preview_skipframe > FRAME_SKIP || + ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) + vfe_process_output_path_irq(&qcmd->vfeInterruptStatus); + else + preview_skipframe ++; + } + + if (qcmd->vfeInterruptStatus.afPingpongIrq) + vfe_process_stats_af_irq(); + + if (qcmd->vfeInterruptStatus.awbPingpongIrq) + vfe_process_stats_awb_irq(); + + /* any error irqs */ + if (qcmd->vfeInterruptStatus.anyErrorIrqs) + vfe_process_error_irq(qcmd); + +#if 0 + if (qcmd->vfeInterruptStatus.anySyncTimerIrqs) + vfe_process_sync_timer_irq(); + + if (qcmd->vfeInterruptStatus.anyAsyncTimerIrqs) + vfe_process_async_timer_irq(); +#endif + + if (qcmd->vfeInterruptStatus.camifSofIrq) { + CDBG("irq: camifSofIrq\n"); + vfe_process_camif_sof_irq(); + } +} + +static struct isr_queue_cmd *get_irq_cmd_nosync(void) +{ + int old_get = ctrl->irq_get++; + ctrl->irq_get = ctrl->irq_get % ARRAY_SIZE(ctrl->irqs); + if (ctrl->irq_get == ctrl->irq_put) { + pr_err("%s: out of irq command packets\n", __func__); + ctrl->irq_get = old_get; + return NULL; + } + + return ctrl->irqs + old_get; +} + +static struct isr_queue_cmd *next_irq_cmd(void) +{ + unsigned long flags; + struct isr_queue_cmd *cmd; + spin_lock_irqsave(&ctrl->irqs_lock, flags); + if (ctrl->irq_get == ctrl->irq_put) { + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); + return NULL; /* already empty */ + } + cmd = ctrl->irqs + ctrl->irq_put; + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); + return cmd; +} + +static void put_irq_cmd(void) +{ + unsigned long flags; + spin_lock_irqsave(&ctrl->irqs_lock, flags); + if (ctrl->irq_get == ctrl->irq_put) { + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); + return; /* already empty */ + } + ctrl->irq_put++; + ctrl->irq_put %= ARRAY_SIZE(ctrl->irqs); + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); +} + +static void vfe_do_tasklet(unsigned long data) +{ + int cnt = 0; + struct isr_queue_cmd *qcmd = NULL; + + if (!ctrl) + return; + + CDBG("%s\n", __func__); + + while ((qcmd = next_irq_cmd())) { + __vfe_do_tasklet(qcmd); + put_irq_cmd(); + cnt++; + } + + if (cnt > 1) + pr_info("%s: serviced %d vfe interrupts\n", __func__, cnt); +} + +DECLARE_TASKLET(vfe_tasklet, vfe_do_tasklet, 0); + +static irqreturn_t vfe_parse_irq(int irq_num, void *data) +{ + unsigned long flags; + uint32_t irqStatusLocal; + struct vfe_irq_thread_msg irq; + struct isr_queue_cmd *qcmd; + + CDBG("vfe_parse_irq\n"); + + vfe_read_irq_status(&irq); + + if (irq.vfeIrqStatus == 0) { + CDBG("vfe_parse_irq: irq.vfeIrqStatus is 0\n"); + return IRQ_HANDLED; + } + + if (ctrl->vfeStopAckPending) + irqStatusLocal = (VFE_IMASK_WHILE_STOPPING & irq.vfeIrqStatus); + else + irqStatusLocal = + ((ctrl->vfeImaskPacked | VFE_IMASK_ERROR_ONLY) & + irq.vfeIrqStatus); + + spin_lock_irqsave(&ctrl->irqs_lock, flags); + qcmd = get_irq_cmd_nosync(); + if (!qcmd) { + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); + goto done; + } + /* parse the interrupt status to local data structures. */ + vfe_parse_interrupt_status(&qcmd->vfeInterruptStatus, irqStatusLocal); + vfe_get_asf_frame_info(&qcmd->vfeAsfFrameInfo, &irq); + vfe_get_demosaic_frame_info(&qcmd->vfeBpcFrameInfo, &irq); + vfe_get_camif_status(&qcmd->vfeCamifStatusLocal, &irq); + vfe_get_performance_monitor_data(&qcmd->vfePmData, &irq); + spin_unlock_irqrestore(&ctrl->irqs_lock, flags); + tasklet_schedule(&vfe_tasklet); + +done: + /* clear the pending interrupt of the same kind. */ + writel(irq.vfeIrqStatus, ctrl->vfebase + VFE_IRQ_CLEAR); + return IRQ_HANDLED; +} + +int vfe_cmd_init(struct msm_vfe_callback *presp, + struct platform_device *pdev, void *sdata) +{ + struct resource *vfemem, *vfeirq, *vfeio; + int rc; + struct msm_camera_sensor_info *s_info; + s_info = pdev->dev.platform_data; + + pdev->resource = s_info->resource; + pdev->num_resources = s_info->num_resources; + + vfemem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!vfemem) { + pr_err("%s: no mem resource\n", __func__); + return -ENODEV; + } + + vfeirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!vfeirq) { + pr_err("%s: no irq resource\n", __func__); + return -ENODEV; + } + + vfeio = request_mem_region(vfemem->start, + resource_size(vfemem), pdev->name); + if (!vfeio) { + pr_err("%s: VFE region already claimed\n", __func__); + return -EBUSY; + } + + ctrl = kzalloc(sizeof(struct msm_vfe8x_ctrl), GFP_KERNEL); + if (!ctrl) { + pr_err("%s: out of memory\n", __func__); + rc = -ENOMEM; + goto cmd_init_failed1; + } + + spin_lock_init(&ctrl->irqs_lock); + + ctrl->vfeirq = vfeirq->start; + + ctrl->vfebase = + ioremap(vfemem->start, (vfemem->end - vfemem->start) + 1); + if (!ctrl->vfebase) { + pr_err("%s: ioremap failed\n", __func__); + rc = -ENOMEM; + goto cmd_init_failed2; + } + + rc = request_irq(ctrl->vfeirq, vfe_parse_irq, + IRQF_TRIGGER_RISING, "vfe", 0); + if (rc < 0) { + pr_err("%s: request_irq(%d) failed\n", __func__, ctrl->vfeirq); + goto cmd_init_failed2; + } + + if (presp && presp->vfe_resp) + ctrl->resp = presp; + else { + pr_err("%s: no vfe_resp function\n", __func__); + rc = -EIO; + goto cmd_init_failed3; + } + + ctrl->syncdata = sdata; + ctrl->s_info = s_info; + return 0; + +cmd_init_failed3: + disable_irq(ctrl->vfeirq); + free_irq(ctrl->vfeirq, 0); + iounmap(ctrl->vfebase); +cmd_init_failed2: + kfree(ctrl); +cmd_init_failed1: + release_mem_region(vfemem->start, (vfemem->end - vfemem->start) + 1); + return rc; +} + +void vfe_cmd_release(struct platform_device *dev) +{ + struct resource *mem; + + disable_irq(ctrl->vfeirq); + free_irq(ctrl->vfeirq, 0); + + iounmap(ctrl->vfebase); + mem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (mem == NULL) { + pr_err("%s : platform get resource is NULL pointer\n", + __func__); + } else + release_mem_region(mem->start, (mem->end - mem->start) + 1); + + kfree(ctrl); + ctrl = 0; +} + +void vfe_stats_af_stop(void) +{ + ctrl->vfeStatsCmdLocal.autoFocusEnable = FALSE; + ctrl->vfeImaskLocal.afPingpongIrq = FALSE; +} + +void vfe_stop(void) +{ + int spin_cnt = 0; + uint32_t vfeAxiStauts; + + /* for reset hw modules, and send msg when reset_irq comes. */ + ctrl->vfeStopAckPending = TRUE; + + ctrl->vfeStatsPingPongReloadFlag = FALSE; + vfe_pm_stop(); + + /* disable all interrupts. */ + vfe_program_irq_mask(VFE_DISABLE_ALL_IRQS); + + /* in either continuous or snapshot mode, stop command can be issued + * at any time. + */ + vfe_camif_stop_immediately(); + vfe_program_axi_cmd(AXI_HALT); + vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_STOP); + + do { + vfeAxiStauts = vfe_read_axi_status(); + spin_cnt++; + } while (!(vfeAxiStauts & AXI_STATUS_BUSY_MASK)); + if (spin_cnt > 1) + pr_warning("%s: spin_cnt %d\n", __func__, spin_cnt); + + vfe_program_axi_cmd(AXI_HALT_CLEAR); + + /* clear all pending interrupts */ + writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR); + + /* enable reset_ack and async timer interrupt only while stopping + * the pipeline. + */ + vfe_program_irq_mask(VFE_IMASK_WHILE_STOPPING); + + vfe_program_global_reset_cmd(VFE_RESET_UPON_STOP_CMD); +} + +void vfe_update(void) +{ + ctrl->vfeModuleEnableLocal.statsEnable = + ctrl->vfeStatsCmdLocal.autoFocusEnable | + ctrl->vfeStatsCmdLocal.axwEnable; + + vfe_reg_module_cfg(&ctrl->vfeModuleEnableLocal); + + vfe_program_stats_cmd(&ctrl->vfeStatsCmdLocal); + + ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal); + vfe_program_irq_mask(ctrl->vfeImaskPacked); + + if ((ctrl->vfeModuleEnableLocal.statsEnable == TRUE) && + (ctrl->vfeStatsPingPongReloadFlag == FALSE)) { + ctrl->vfeStatsPingPongReloadFlag = TRUE; + + ctrl->vfeBusCmdLocal.statsPingpongReload = TRUE; + vfe_reg_bus_cmd(&ctrl->vfeBusCmdLocal); + } + + vfe_program_reg_update_cmd(VFE_REG_UPDATE_TRIGGER); +} + +int vfe_rgb_gamma_update(struct vfe_cmd_rgb_gamma_config *in) +{ + int rc = 0; + + ctrl->vfeModuleEnableLocal.rgbLUTEnable = in->enable; + + switch (in->channelSelect) { + case RGB_GAMMA_CH0_SELECTED: + ctrl->vfeGammaLutSel.ch0BankSelect ^= 1; + vfe_write_gamma_table(0, + ctrl->vfeGammaLutSel.ch0BankSelect, + in->table); + break; + + case RGB_GAMMA_CH1_SELECTED: + ctrl->vfeGammaLutSel.ch1BankSelect ^= 1; + vfe_write_gamma_table(1, + ctrl->vfeGammaLutSel.ch1BankSelect, + in->table); + break; + + case RGB_GAMMA_CH2_SELECTED: + ctrl->vfeGammaLutSel.ch2BankSelect ^= 1; + vfe_write_gamma_table(2, + ctrl->vfeGammaLutSel.ch2BankSelect, + in->table); + break; + + case RGB_GAMMA_CH0_CH1_SELECTED: + ctrl->vfeGammaLutSel.ch0BankSelect ^= 1; + ctrl->vfeGammaLutSel.ch1BankSelect ^= 1; + vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect, + in->table); + vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect, + in->table); + break; + + case RGB_GAMMA_CH0_CH2_SELECTED: + ctrl->vfeGammaLutSel.ch0BankSelect ^= 1; + ctrl->vfeGammaLutSel.ch2BankSelect ^= 1; + vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect, + in->table); + vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect, + in->table); + break; + + case RGB_GAMMA_CH1_CH2_SELECTED: + ctrl->vfeGammaLutSel.ch1BankSelect ^= 1; + ctrl->vfeGammaLutSel.ch2BankSelect ^= 1; + vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect, + in->table); + vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect, + in->table); + break; + + case RGB_GAMMA_CH0_CH1_CH2_SELECTED: + ctrl->vfeGammaLutSel.ch0BankSelect ^= 1; + ctrl->vfeGammaLutSel.ch1BankSelect ^= 1; + ctrl->vfeGammaLutSel.ch2BankSelect ^= 1; + vfe_write_gamma_table(0, ctrl->vfeGammaLutSel.ch0BankSelect, + in->table); + vfe_write_gamma_table(1, ctrl->vfeGammaLutSel.ch1BankSelect, + in->table); + vfe_write_gamma_table(2, ctrl->vfeGammaLutSel.ch2BankSelect, + in->table); + break; + + default: + pr_err("%s: invalid gamma channel %d\n", __func__, in->channelSelect); + return -EINVAL; + } /* switch */ + + /* update the gammaLutSel register. */ + vfe_program_lut_bank_sel(&ctrl->vfeGammaLutSel); + + return rc; +} + +int vfe_rgb_gamma_config(struct vfe_cmd_rgb_gamma_config *in) +{ + int rc = 0; + + ctrl->vfeModuleEnableLocal.rgbLUTEnable = in->enable; + + switch (in->channelSelect) { + case RGB_GAMMA_CH0_SELECTED: + vfe_write_gamma_table(0, 0, in->table); + break; + + case RGB_GAMMA_CH1_SELECTED: + vfe_write_gamma_table(1, 0, in->table); + break; + + case RGB_GAMMA_CH2_SELECTED: + vfe_write_gamma_table(2, 0, in->table); + break; + + case RGB_GAMMA_CH0_CH1_SELECTED: + vfe_write_gamma_table(0, 0, in->table); + vfe_write_gamma_table(1, 0, in->table); + break; + + case RGB_GAMMA_CH0_CH2_SELECTED: + vfe_write_gamma_table(0, 0, in->table); + vfe_write_gamma_table(2, 0, in->table); + break; + + case RGB_GAMMA_CH1_CH2_SELECTED: + vfe_write_gamma_table(1, 0, in->table); + vfe_write_gamma_table(2, 0, in->table); + break; + + case RGB_GAMMA_CH0_CH1_CH2_SELECTED: + vfe_write_gamma_table(0, 0, in->table); + vfe_write_gamma_table(1, 0, in->table); + vfe_write_gamma_table(2, 0, in->table); + break; + + default: + pr_err("%s: invalid gamma channel %d\n", __func__, in->channelSelect); + rc = -EINVAL; + break; + } /* switch */ + + return rc; +} + +void vfe_stats_af_ack(struct vfe_cmd_stats_af_ack *in) +{ + ctrl->afStatsControl.nextFrameAddrBuf = in->nextAFOutputBufferAddr; + ctrl->afStatsControl.ackPending = FALSE; +} + +void vfe_stats_wb_exp_ack(struct vfe_cmd_stats_wb_exp_ack *in) +{ + ctrl->awbStatsControl.nextFrameAddrBuf = in->nextWbExpOutputBufferAddr; + ctrl->awbStatsControl.ackPending = FALSE; +} + +#ifndef CONFIG_720P_CAMERA +void vfe_output2_ack(struct vfe_cmd_output_ack *in) +{ + const uint32_t *psrc; + uint32_t *pdest; + uint8_t i; + + pdest = ctrl->encPath.nextFrameAddrBuf; + + CDBG("output2_ack: ack addr = 0x%x\n", in->ybufaddr[0]); + + psrc = in->ybufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + psrc = in->chromabufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + ctrl->encPath.ackPending = FALSE; +} + +void vfe_output1_ack(struct vfe_cmd_output_ack *in) +{ + const uint32_t *psrc; + uint32_t *pdest; + uint8_t i; + + pdest = ctrl->viewPath.nextFrameAddrBuf; + + psrc = in->ybufaddr; + for (i = 0; i < ctrl->viewPath.fragCount; i++) + *pdest++ = *psrc++; + + psrc = in->chromabufaddr; + for (i = 0; i < ctrl->viewPath.fragCount; i++) + *pdest++ = *psrc++; + + ctrl->viewPath.ackPending = FALSE; +} + +#else + +void vfe_output_v_ack(struct vfe_cmd_output_ack *in) +{ + const uint32_t *psrc; + uint32_t *pdest; + uint8_t i; + + pdest = ctrl->encPath.nextFrameAddrBuf; + +// CDBG("output2_ack: ack addr = 0x%x\n", in->ybufaddr[0]); + CDBG("video_frame_ack: ack addr = 0x%x\n", in->ybufaddr[0]); + + psrc = in->ybufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + psrc = in->chromabufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + ctrl->encPath.ackPending = FALSE; +} + +void vfe_output_p_ack(struct vfe_cmd_output_ack *in) +{ + const uint32_t *psrc; + uint32_t *pdest; + uint8_t i; + + if (ctrl->axiOutputMode == VFE_AXI_OUTPUT_MODE_Output1AndOutput2 ) { + /* video mode, preview comes from output1 path */ + + pdest = ctrl->viewPath.nextFrameAddrBuf; + + psrc = in->ybufaddr; + for (i = 0; i < ctrl->viewPath.fragCount; i++) + *pdest++ = *psrc++; + + psrc = in->chromabufaddr; + for (i = 0; i < ctrl->viewPath.fragCount; i++) + *pdest++ = *psrc++; + + ctrl->viewPath.ackPending = FALSE; + + } else { /* preview mode, preview comes from output2 path. */ + pdest = ctrl->encPath.nextFrameAddrBuf; + + psrc = in->ybufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + psrc = in->chromabufaddr; + for (i = 0; i < ctrl->encPath.fragCount; i++) + *pdest++ = *psrc++; + + ctrl->encPath.ackPending = FALSE; + + } +} + +#endif + +void vfe_start(struct vfe_cmd_start *in) +{ + uint32_t pmstatus = 0; + boolean rawmode; + uint32_t demperiod = 0; + uint32_t demeven = 0; + uint32_t demodd = 0; + + /* derived from other commands. (camif config, axi output config, + * etc) + */ + struct vfe_cfg hwcfg; + struct vfe_upsample_cfg chromupcfg; + + CDBG("vfe_start operationMode = %d\n", in->operationMode); + + memset(&hwcfg, 0, sizeof(hwcfg)); + memset(&chromupcfg, 0, sizeof(chromupcfg)); + + switch (in->pixel) { + case VFE_BAYER_RGRGRG: + demperiod = 1; + demeven = 0xC9; + demodd = 0xAC; + break; + + case VFE_BAYER_GRGRGR: + demperiod = 1; + demeven = 0x9C; + demodd = 0xCA; + break; + + case VFE_BAYER_BGBGBG: + demperiod = 1; + demeven = 0xCA; + demodd = 0x9C; + break; + + case VFE_BAYER_GBGBGB: + demperiod = 1; + demeven = 0xAC; + demodd = 0xC9; + break; + + case VFE_YUV_YCbYCr: + demperiod = 3; + demeven = 0x9CAC; + demodd = 0x9CAC; + break; + + case VFE_YUV_YCrYCb: + demperiod = 3; + demeven = 0xAC9C; + demodd = 0xAC9C; + break; + + case VFE_YUV_CbYCrY: + demperiod = 3; + demeven = 0xC9CA; + demodd = 0xC9CA; + break; + + case VFE_YUV_CrYCbY: + demperiod = 3; + demeven = 0xCAC9; + demodd = 0xCAC9; + break; + + default: + return; + } + + vfe_config_demux(demperiod, demeven, demodd); + + vfe_program_lut_bank_sel(&ctrl->vfeGammaLutSel); + + /* save variables to local. */ + ctrl->vfeOperationMode = in->operationMode; + if (ctrl->vfeOperationMode == VFE_START_OPERATION_MODE_SNAPSHOT) { + /* in snapshot mode, initialize snapshot count */ + ctrl->vfeSnapShotCount = in->snapshotCount; + + /* save the requested count, this is temporarily done, to + help with HJR / multishot. */ + ctrl->vfeRequestedSnapShotCount = ctrl->vfeSnapShotCount; + + CDBG("requested snapshot count = %d\n", ctrl->vfeSnapShotCount); + + /* Assumption is to have the same pattern and period for both + paths, if both paths are used. */ + if (ctrl->viewPath.pathEnabled) { + ctrl->viewPath.snapshotPendingCount = in->snapshotCount; + + ctrl->vfeFrameSkipPattern = + ctrl->vfeFrameSkip.output1Pattern; + ctrl->vfeFrameSkipPeriod = + ctrl->vfeFrameSkip.output1Period; + } + + if (ctrl->encPath.pathEnabled) { + ctrl->encPath.snapshotPendingCount = in->snapshotCount; + + ctrl->vfeFrameSkipPattern = + ctrl->vfeFrameSkip.output2Pattern; + ctrl->vfeFrameSkipPeriod = + ctrl->vfeFrameSkip.output2Period; + } + } + + /* enable color conversion for bayer sensor + if stats enabled, need to do color conversion. */ + if (in->pixel <= VFE_BAYER_GBGBGB) + ctrl->vfeStatsCmdLocal.colorConversionEnable = TRUE; + + vfe_program_stats_cmd(&ctrl->vfeStatsCmdLocal); + + if (in->pixel >= VFE_YUV_YCbYCr) + ctrl->vfeModuleEnableLocal.chromaUpsampleEnable = TRUE; + + ctrl->vfeModuleEnableLocal.demuxEnable = TRUE; + + /* if any stats module is enabled, the main bit is enabled. */ + ctrl->vfeModuleEnableLocal.statsEnable = + ctrl->vfeStatsCmdLocal.autoFocusEnable | + ctrl->vfeStatsCmdLocal.axwEnable; + + vfe_reg_module_cfg(&ctrl->vfeModuleEnableLocal); + + /* in case of offline processing, do not need to config camif. Having + * bus output enabled in camif_config register might confuse the + * hardware? + */ + if (in->inputSource != VFE_START_INPUT_SOURCE_AXI) { + vfe_reg_camif_config(&ctrl->vfeCamifConfigLocal); + } else { + /* offline processing, enable axi read */ + ctrl->vfeBusConfigLocal.stripeRdPathEn = TRUE; + ctrl->vfeBusCmdLocal.stripeReload = TRUE; + ctrl->vfeBusConfigLocal.rawPixelDataSize = + ctrl->axiInputDataSize; + } + + vfe_reg_bus_cfg(&ctrl->vfeBusConfigLocal); + + /* directly from start command */ + hwcfg.pixelPattern = in->pixel; + hwcfg.inputSource = in->inputSource; + writel(*(uint32_t *)&hwcfg, ctrl->vfebase + VFE_CFG); + + /* regardless module enabled or not, it does not hurt + * to program the cositing mode. */ + chromupcfg.chromaCositingForYCbCrInputs = in->yuvInputCositingMode; + + writel(*(uint32_t *)&chromupcfg, + ctrl->vfebase + VFE_CHROMA_UPSAMPLE_CFG); + + /* MISR to monitor the axi read. */ + writel(0xd8, ctrl->vfebase + VFE_BUS_MISR_MAST_CFG_0); + + /* clear all pending interrupts. */ + writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR); + + /* define how composite interrupt work. */ + ctrl->vfeImaskCompositePacked = + vfe_irq_composite_pack(ctrl->vfeIrqCompositeMaskLocal); + + vfe_program_irq_composite_mask(ctrl->vfeImaskCompositePacked); + + /* enable all necessary interrupts. */ + ctrl->vfeImaskLocal.camifSofIrq = TRUE; + ctrl->vfeImaskLocal.regUpdateIrq = TRUE; + ctrl->vfeImaskLocal.resetAckIrq = TRUE; + + ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal); + vfe_program_irq_mask(ctrl->vfeImaskPacked); + + /* enable bus performance monitor */ + vfe_8k_pm_start(&ctrl->vfeBusPmConfigLocal); + + /* trigger vfe reg update */ + ctrl->vfeStartAckPendingFlag = TRUE; + + /* write bus command to trigger reload of ping pong buffer. */ + ctrl->vfeBusCmdLocal.busPingpongReload = TRUE; + + if (ctrl->vfeModuleEnableLocal.statsEnable == TRUE) { + ctrl->vfeBusCmdLocal.statsPingpongReload = TRUE; + ctrl->vfeStatsPingPongReloadFlag = TRUE; + } + + writel(VFE_REG_UPDATE_TRIGGER, ctrl->vfebase + VFE_REG_UPDATE_CMD); + + /* program later than the reg update. */ + vfe_reg_bus_cmd(&ctrl->vfeBusCmdLocal); + + if ((in->inputSource == + VFE_START_INPUT_SOURCE_CAMIF) || + (in->inputSource == VFE_START_INPUT_SOURCE_TESTGEN)) + writel(CAMIF_COMMAND_START, ctrl->vfebase + CAMIF_COMMAND); + + /* start test gen if it is enabled */ + if (ctrl->vfeTestGenStartFlag == TRUE) { + ctrl->vfeTestGenStartFlag = FALSE; + vfe_prog_hw_testgen_cmd(VFE_TEST_GEN_GO); + } + + CDBG("ctrl->axiOutputMode = %d\n", ctrl->axiOutputMode); + if (ctrl->axiOutputMode == VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2) { + /* raw dump mode */ + rawmode = TRUE; + + while (rawmode) { + pmstatus = + readl(ctrl->vfebase + + VFE_BUS_ENC_CBCR_WR_PM_STATS_1); + + if ((pmstatus & VFE_PM_BUF_MAX_CNT_MASK) != 0) + rawmode = FALSE; + } + + vfe_proc_ops(VFE_MSG_ID_START_ACK, NULL); + ctrl->vfeStartAckPendingFlag = FALSE; + } + + ctrl->vstate = VFE_STATE_ACTIVE; +} + +void vfe_la_update(struct vfe_cmd_la_config *in) +{ + int16_t *pTable; + enum VFE_DMI_RAM_SEL dmiRamSel; + int i; + + pTable = in->table; + ctrl->vfeModuleEnableLocal.lumaAdaptationEnable = in->enable; + + /* toggle the bank to be used. */ + ctrl->vfeLaBankSel ^= 1; + + if (ctrl->vfeLaBankSel == 0) + dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK0; + else + dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK1; + + /* configure the DMI_CFG to select right sram */ + vfe_program_dmi_cfg(dmiRamSel); + + for (i = 0; i < VFE_LA_TABLE_LENGTH; i++) { + writel((uint32_t) (*pTable), ctrl->vfebase + VFE_DMI_DATA_LO); + pTable++; + } + + /* After DMI transfer, to make it safe, need to set + * the DMI_CFG to unselect any SRAM */ + writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG); + writel(ctrl->vfeLaBankSel, ctrl->vfebase + VFE_LA_CFG); +} + +void vfe_la_config(struct vfe_cmd_la_config *in) +{ + uint16_t i; + int16_t *pTable; + enum VFE_DMI_RAM_SEL dmiRamSel; + + pTable = in->table; + ctrl->vfeModuleEnableLocal.lumaAdaptationEnable = in->enable; + + if (ctrl->vfeLaBankSel == 0) + dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK0; + else + dmiRamSel = LUMA_ADAPT_LUT_RAM_BANK1; + + /* configure the DMI_CFG to select right sram */ + vfe_program_dmi_cfg(dmiRamSel); + + for (i = 0; i < VFE_LA_TABLE_LENGTH; i++) { + writel((uint32_t) (*pTable), ctrl->vfebase + VFE_DMI_DATA_LO); + pTable++; + } + + /* After DMI transfer, to make it safe, need to set the + * DMI_CFG to unselect any SRAM */ + writel(VFE_DMI_CFG_DEFAULT, ctrl->vfebase + VFE_DMI_CFG); + + /* can only be bank 0 or bank 1 for now. */ + writel(ctrl->vfeLaBankSel, ctrl->vfebase + VFE_LA_CFG); + CDBG("VFE Luma adaptation bank selection is 0x%x\n", + *(uint32_t *)&ctrl->vfeLaBankSel); +} + +void vfe_test_gen_start(struct vfe_cmd_test_gen_start *in) +{ + struct VFE_TestGen_ConfigCmdType cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.numFrame = in->numFrame; + cmd.pixelDataSelect = in->pixelDataSelect; + cmd.systematicDataSelect = in->systematicDataSelect; + cmd.pixelDataSize = (uint32_t) in->pixelDataSize; + cmd.hsyncEdge = (uint32_t) in->hsyncEdge; + cmd.vsyncEdge = (uint32_t) in->vsyncEdge; + cmd.imageWidth = in->imageWidth; + cmd.imageHeight = in->imageHeight; + cmd.sofOffset = in->startOfFrameOffset; + cmd.eofNOffset = in->endOfFrameNOffset; + cmd.solOffset = in->startOfLineOffset; + cmd.eolNOffset = in->endOfLineNOffset; + cmd.hBlankInterval = in->hbi; + cmd.vBlankInterval = in->vbl; + cmd.vBlankIntervalEnable = in->vblEnable; + cmd.sofDummy = in->startOfFrameDummyLine; + cmd.eofDummy = in->endOfFrameDummyLine; + cmd.unicolorBarSelect = in->unicolorBarSelect; + cmd.unicolorBarEnable = in->unicolorBarEnable; + cmd.splitEnable = in->colorBarsSplitEnable; + cmd.pixelPattern = (uint32_t) in->colorBarsPixelPattern; + cmd.rotatePeriod = in->colorBarsRotatePeriod; + cmd.randomSeed = in->testGenRandomSeed; + + vfe_prog_hw(ctrl->vfebase + VFE_HW_TESTGEN_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_frame_skip_update(struct vfe_cmd_frame_skip_update *in) +{ + struct VFE_FRAME_SKIP_UpdateCmdType cmd; + + cmd.yPattern = in->output1Pattern; + cmd.cbcrPattern = in->output1Pattern; + vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_VIEW_Y_PATTERN, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd.yPattern = in->output2Pattern; + cmd.cbcrPattern = in->output2Pattern; + vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_PATTERN, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_frame_skip_config(struct vfe_cmd_frame_skip_config *in) +{ + struct vfe_frame_skip_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeFrameSkip = *in; + + cmd.output2YPeriod = in->output2Period; + cmd.output2CbCrPeriod = in->output2Period; + cmd.output2YPattern = in->output2Pattern; + cmd.output2CbCrPattern = in->output2Pattern; + cmd.output1YPeriod = in->output1Period; + cmd.output1CbCrPeriod = in->output1Period; + cmd.output1YPattern = in->output1Pattern; + cmd.output1CbCrPattern = in->output1Pattern; + + vfe_prog_hw(ctrl->vfebase + VFE_FRAMEDROP_ENC_Y_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_output_clamp_config(struct vfe_cmd_output_clamp_config *in) +{ + struct vfe_output_clamp_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + cmd.yChanMax = in->maxCh0; + cmd.cbChanMax = in->maxCh1; + cmd.crChanMax = in->maxCh2; + + cmd.yChanMin = in->minCh0; + cmd.cbChanMin = in->minCh1; + cmd.crChanMin = in->minCh2; + + vfe_prog_hw(ctrl->vfebase + VFE_CLAMP_MAX_CFG, (uint32_t *)&cmd, + sizeof(cmd)); +} + +void vfe_camif_frame_update(struct vfe_cmds_camif_frame *in) +{ + struct vfe_camifframe_update cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.pixelsPerLine = in->pixelsPerLine; + cmd.linesPerFrame = in->linesPerFrame; + + vfe_prog_hw(ctrl->vfebase + CAMIF_FRAME_CONFIG, (uint32_t *)&cmd, + sizeof(cmd)); +} + +void vfe_color_correction_config(struct vfe_cmd_color_correction_config *in) +{ + struct vfe_color_correction_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + ctrl->vfeModuleEnableLocal.colorCorrectionEnable = in->enable; + + cmd.c0 = in->C0; + cmd.c1 = in->C1; + cmd.c2 = in->C2; + cmd.c3 = in->C3; + cmd.c4 = in->C4; + cmd.c5 = in->C5; + cmd.c6 = in->C6; + cmd.c7 = in->C7; + cmd.c8 = in->C8; + + cmd.k0 = in->K0; + cmd.k1 = in->K1; + cmd.k2 = in->K2; + + cmd.coefQFactor = in->coefQFactor; + + vfe_prog_hw(ctrl->vfebase + VFE_COLOR_CORRECT_COEFF_0, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_demosaic_abf_update(struct vfe_cmd_demosaic_abf_update *in) +{ + struct vfe_demosaic_cfg cmd; + struct vfe_demosaic_abf_cfg cmdabf; + uint32_t temp; + + memset(&cmd, 0, sizeof(cmd)); + temp = readl(ctrl->vfebase + VFE_DEMOSAIC_CFG); + + cmd = *((struct vfe_demosaic_cfg *)(&temp)); + cmd.abfEnable = in->abfUpdate.enable; + cmd.forceAbfOn = in->abfUpdate.forceOn; + cmd.abfShift = in->abfUpdate.shift; + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmdabf.lpThreshold = in->abfUpdate.lpThreshold; + cmdabf.ratio = in->abfUpdate.ratio; + cmdabf.minValue = in->abfUpdate.min; + cmdabf.maxValue = in->abfUpdate.max; + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_ABF_CFG_0, + (uint32_t *)&cmdabf, sizeof(cmdabf)); +} + +void vfe_demosaic_bpc_update(struct vfe_cmd_demosaic_bpc_update *in) +{ + struct vfe_demosaic_cfg cmd; + struct vfe_demosaic_bpc_cfg cmdbpc; + uint32_t temp; + + memset(&cmd, 0, sizeof(cmd)); + + temp = readl(ctrl->vfebase + VFE_DEMOSAIC_CFG); + + cmd = *((struct vfe_demosaic_cfg *)(&temp)); + cmd.badPixelCorrEnable = in->bpcUpdate.enable; + cmd.fminThreshold = in->bpcUpdate.fminThreshold; + cmd.fmaxThreshold = in->bpcUpdate.fmaxThreshold; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmdbpc.blueDiffThreshold = in->bpcUpdate.blueDiffThreshold; + cmdbpc.redDiffThreshold = in->bpcUpdate.redDiffThreshold; + cmdbpc.greenDiffThreshold = in->bpcUpdate.greenDiffThreshold; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_BPC_CFG_0, + (uint32_t *)&cmdbpc, sizeof(cmdbpc)); +} + +void vfe_demosaic_config(struct vfe_cmd_demosaic_config *in) +{ + struct vfe_demosaic_cfg cmd; + struct vfe_demosaic_bpc_cfg cmd_bpc; + struct vfe_demosaic_abf_cfg cmd_abf; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd_bpc, 0, sizeof(cmd_bpc)); + memset(&cmd_abf, 0, sizeof(cmd_abf)); + + ctrl->vfeModuleEnableLocal.demosaicEnable = in->enable; + + cmd.abfEnable = in->abfConfig.enable; + cmd.badPixelCorrEnable = in->bpcConfig.enable; + cmd.forceAbfOn = in->abfConfig.forceOn; + cmd.abfShift = in->abfConfig.shift; + cmd.fminThreshold = in->bpcConfig.fminThreshold; + cmd.fmaxThreshold = in->bpcConfig.fmaxThreshold; + cmd.slopeShift = in->slopeShift; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_CFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd_abf.lpThreshold = in->abfConfig.lpThreshold; + cmd_abf.ratio = in->abfConfig.ratio; + cmd_abf.minValue = in->abfConfig.min; + cmd_abf.maxValue = in->abfConfig.max; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_ABF_CFG_0, + (uint32_t *)&cmd_abf, sizeof(cmd_abf)); + + cmd_bpc.blueDiffThreshold = in->bpcConfig.blueDiffThreshold; + cmd_bpc.redDiffThreshold = in->bpcConfig.redDiffThreshold; + cmd_bpc.greenDiffThreshold = in->bpcConfig.greenDiffThreshold; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMOSAIC_BPC_CFG_0, + (uint32_t *)&cmd_bpc, sizeof(cmd_bpc)); +} + +void vfe_demux_channel_gain_update(struct vfe_cmd_demux_channel_gain_config *in) +{ + struct vfe_demux_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.ch0EvenGain = in->ch0EvenGain; + cmd.ch0OddGain = in->ch0OddGain; + cmd.ch1Gain = in->ch1Gain; + cmd.ch2Gain = in->ch2Gain; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMUX_GAIN_0, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_demux_channel_gain_config(struct vfe_cmd_demux_channel_gain_config *in) +{ + struct vfe_demux_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.ch0EvenGain = in->ch0EvenGain; + cmd.ch0OddGain = in->ch0OddGain; + cmd.ch1Gain = in->ch1Gain; + cmd.ch2Gain = in->ch2Gain; + + vfe_prog_hw(ctrl->vfebase + VFE_DEMUX_GAIN_0, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_black_level_update(struct vfe_cmd_black_level_config *in) +{ + struct vfe_blacklevel_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + ctrl->vfeModuleEnableLocal.blackLevelCorrectionEnable = in->enable; + + cmd.evenEvenAdjustment = in->evenEvenAdjustment; + cmd.evenOddAdjustment = in->evenOddAdjustment; + cmd.oddEvenAdjustment = in->oddEvenAdjustment; + cmd.oddOddAdjustment = in->oddOddAdjustment; + + vfe_prog_hw(ctrl->vfebase + VFE_BLACK_EVEN_EVEN_VALUE, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_black_level_config(struct vfe_cmd_black_level_config *in) +{ + struct vfe_blacklevel_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.blackLevelCorrectionEnable = in->enable; + + cmd.evenEvenAdjustment = in->evenEvenAdjustment; + cmd.evenOddAdjustment = in->evenOddAdjustment; + cmd.oddEvenAdjustment = in->oddEvenAdjustment; + cmd.oddOddAdjustment = in->oddOddAdjustment; + + vfe_prog_hw(ctrl->vfebase + VFE_BLACK_EVEN_EVEN_VALUE, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_asf_update(struct vfe_cmd_asf_update *in) +{ + struct vfe_asf_update cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.asfEnable = in->enable; + + cmd.smoothEnable = in->smoothFilterEnabled; + cmd.sharpMode = in->sharpMode; + cmd.smoothCoeff0 = in->smoothCoefCenter; + cmd.smoothCoeff1 = in->smoothCoefSurr; + cmd.cropEnable = in->cropEnable; + cmd.sharpThresholdE1 = in->sharpThreshE1; + cmd.sharpDegreeK1 = in->sharpK1; + cmd.sharpDegreeK2 = in->sharpK2; + cmd.normalizeFactor = in->normalizeFactor; + cmd.sharpThresholdE2 = in->sharpThreshE2; + cmd.sharpThresholdE3 = in->sharpThreshE3; + cmd.sharpThresholdE4 = in->sharpThreshE4; + cmd.sharpThresholdE5 = in->sharpThreshE5; + cmd.F1Coeff0 = in->filter1Coefficients[0]; + cmd.F1Coeff1 = in->filter1Coefficients[1]; + cmd.F1Coeff2 = in->filter1Coefficients[2]; + cmd.F1Coeff3 = in->filter1Coefficients[3]; + cmd.F1Coeff4 = in->filter1Coefficients[4]; + cmd.F1Coeff5 = in->filter1Coefficients[5]; + cmd.F1Coeff6 = in->filter1Coefficients[6]; + cmd.F1Coeff7 = in->filter1Coefficients[7]; + cmd.F1Coeff8 = in->filter1Coefficients[8]; + cmd.F2Coeff0 = in->filter2Coefficients[0]; + cmd.F2Coeff1 = in->filter2Coefficients[1]; + cmd.F2Coeff2 = in->filter2Coefficients[2]; + cmd.F2Coeff3 = in->filter2Coefficients[3]; + cmd.F2Coeff4 = in->filter2Coefficients[4]; + cmd.F2Coeff5 = in->filter2Coefficients[5]; + cmd.F2Coeff6 = in->filter2Coefficients[6]; + cmd.F2Coeff7 = in->filter2Coefficients[7]; + cmd.F2Coeff8 = in->filter2Coefficients[8]; + + vfe_prog_hw(ctrl->vfebase + VFE_ASF_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_asf_config(struct vfe_cmd_asf_config *in) +{ + struct vfe_asf_update cmd; + struct vfe_asfcrop_cfg cmd2; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd2, 0, sizeof(cmd2)); + + ctrl->vfeModuleEnableLocal.asfEnable = in->enable; + + cmd.smoothEnable = in->smoothFilterEnabled; + cmd.sharpMode = in->sharpMode; + cmd.smoothCoeff0 = in->smoothCoefCenter; + cmd.smoothCoeff1 = in->smoothCoefSurr; + cmd.cropEnable = in->cropEnable; + cmd.sharpThresholdE1 = in->sharpThreshE1; + cmd.sharpDegreeK1 = in->sharpK1; + cmd.sharpDegreeK2 = in->sharpK2; + cmd.normalizeFactor = in->normalizeFactor; + cmd.sharpThresholdE2 = in->sharpThreshE2; + cmd.sharpThresholdE3 = in->sharpThreshE3; + cmd.sharpThresholdE4 = in->sharpThreshE4; + cmd.sharpThresholdE5 = in->sharpThreshE5; + cmd.F1Coeff0 = in->filter1Coefficients[0]; + cmd.F1Coeff1 = in->filter1Coefficients[1]; + cmd.F1Coeff2 = in->filter1Coefficients[2]; + cmd.F1Coeff3 = in->filter1Coefficients[3]; + cmd.F1Coeff4 = in->filter1Coefficients[4]; + cmd.F1Coeff5 = in->filter1Coefficients[5]; + cmd.F1Coeff6 = in->filter1Coefficients[6]; + cmd.F1Coeff7 = in->filter1Coefficients[7]; + cmd.F1Coeff8 = in->filter1Coefficients[8]; + cmd.F2Coeff0 = in->filter2Coefficients[0]; + cmd.F2Coeff1 = in->filter2Coefficients[1]; + cmd.F2Coeff2 = in->filter2Coefficients[2]; + cmd.F2Coeff3 = in->filter2Coefficients[3]; + cmd.F2Coeff4 = in->filter2Coefficients[4]; + cmd.F2Coeff5 = in->filter2Coefficients[5]; + cmd.F2Coeff6 = in->filter2Coefficients[6]; + cmd.F2Coeff7 = in->filter2Coefficients[7]; + cmd.F2Coeff8 = in->filter2Coefficients[8]; + + vfe_prog_hw(ctrl->vfebase + VFE_ASF_CFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd2.firstLine = in->cropFirstLine; + cmd2.lastLine = in->cropLastLine; + cmd2.firstPixel = in->cropFirstPixel; + cmd2.lastPixel = in->cropLastPixel; + + vfe_prog_hw(ctrl->vfebase + VFE_ASF_CROP_WIDTH_CFG, + (uint32_t *)&cmd2, sizeof(cmd2)); +} + +void vfe_white_balance_config(struct vfe_cmd_white_balance_config *in) +{ + struct vfe_wb_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.whiteBalanceEnable = in->enable; + + cmd.ch0Gain = in->ch0Gain; + cmd.ch1Gain = in->ch1Gain; + cmd.ch2Gain = in->ch2Gain; + + vfe_prog_hw(ctrl->vfebase + VFE_WB_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_chroma_sup_config(struct vfe_cmd_chroma_suppression_config *in) +{ + struct vfe_chroma_suppress_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.chromaSuppressionEnable = in->enable; + + cmd.m1 = in->m1; + cmd.m3 = in->m3; + cmd.n1 = in->n1; + cmd.n3 = in->n3; + cmd.mm1 = in->mm1; + cmd.nn1 = in->nn1; + + vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_SUPPRESS_CFG_0, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_roll_off_config(struct vfe_cmd_roll_off_config *in) +{ + struct vfe_rolloff_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.lensRollOffEnable = in->enable; + + cmd.gridWidth = in->gridWidth; + cmd.gridHeight = in->gridHeight; + cmd.yDelta = in->yDelta; + cmd.gridX = in->gridXIndex; + cmd.gridY = in->gridYIndex; + cmd.pixelX = in->gridPixelXIndex; + cmd.pixelY = in->gridPixelYIndex; + cmd.yDeltaAccum = in->yDeltaAccum; + + vfe_prog_hw(ctrl->vfebase + VFE_ROLLOFF_CFG_0, + (uint32_t *)&cmd, sizeof(cmd)); + + vfe_write_lens_roll_off_table(in); +} + +void vfe_chroma_subsample_config(struct vfe_cmd_chroma_subsample_config *in) +{ + struct vfe_chromasubsample_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.chromaSubsampleEnable = in->enable; + + cmd.hCositedPhase = in->hCositedPhase; + cmd.vCositedPhase = in->vCositedPhase; + cmd.hCosited = in->hCosited; + cmd.vCosited = in->vCosited; + cmd.hsubSampleEnable = in->hsubSampleEnable; + cmd.vsubSampleEnable = in->vsubSampleEnable; + cmd.cropEnable = in->cropEnable; + cmd.cropWidthLastPixel = in->cropWidthLastPixel; + cmd.cropWidthFirstPixel = in->cropWidthFirstPixel; + cmd.cropHeightLastLine = in->cropHeightLastLine; + cmd.cropHeightFirstLine = in->cropHeightFirstLine; + + vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_SUBSAMPLE_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_chroma_enhan_config(struct vfe_cmd_chroma_enhan_config *in) +{ + struct vfe_chroma_enhance_cfg cmd; + struct vfe_color_convert_cfg cmd2; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd2, 0, sizeof(cmd2)); + + ctrl->vfeModuleEnableLocal.chromaEnhanEnable = in->enable; + + cmd.ap = in->ap; + cmd.am = in->am; + cmd.bp = in->bp; + cmd.bm = in->bm; + cmd.cp = in->cp; + cmd.cm = in->cm; + cmd.dp = in->dp; + cmd.dm = in->dm; + cmd.kcb = in->kcb; + cmd.kcr = in->kcr; + + cmd2.v0 = in->RGBtoYConversionV0; + cmd2.v1 = in->RGBtoYConversionV1; + cmd2.v2 = in->RGBtoYConversionV2; + cmd2.ConvertOffset = in->RGBtoYConversionOffset; + + vfe_prog_hw(ctrl->vfebase + VFE_CHROMA_ENHAN_A, + (uint32_t *)&cmd, sizeof(cmd)); + + vfe_prog_hw(ctrl->vfebase + VFE_COLOR_CONVERT_COEFF_0, + (uint32_t *)&cmd2, sizeof(cmd2)); +} + +void vfe_scaler2cbcr_config(struct vfe_cmd_scaler2_config *in) +{ + struct vfe_scaler2_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.scaler2CbcrEnable = in->enable; + + cmd.hEnable = in->hconfig.enable; + cmd.vEnable = in->vconfig.enable; + cmd.inWidth = in->hconfig.inputSize; + cmd.outWidth = in->hconfig.outputSize; + cmd.horizPhaseMult = in->hconfig.phaseMultiplicationFactor; + cmd.horizInterResolution = in->hconfig.interpolationResolution; + cmd.inHeight = in->vconfig.inputSize; + cmd.outHeight = in->vconfig.outputSize; + cmd.vertPhaseMult = in->vconfig.phaseMultiplicationFactor; + cmd.vertInterResolution = in->vconfig.interpolationResolution; + + vfe_prog_hw(ctrl->vfebase + VFE_SCALE_CBCR_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_scaler2y_config(struct vfe_cmd_scaler2_config *in) +{ + struct vfe_scaler2_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.scaler2YEnable = in->enable; + + cmd.hEnable = in->hconfig.enable; + cmd.vEnable = in->vconfig.enable; + cmd.inWidth = in->hconfig.inputSize; + cmd.outWidth = in->hconfig.outputSize; + cmd.horizPhaseMult = in->hconfig.phaseMultiplicationFactor; + cmd.horizInterResolution = in->hconfig.interpolationResolution; + cmd.inHeight = in->vconfig.inputSize; + cmd.outHeight = in->vconfig.outputSize; + cmd.vertPhaseMult = in->vconfig.phaseMultiplicationFactor; + cmd.vertInterResolution = in->vconfig.interpolationResolution; + + vfe_prog_hw(ctrl->vfebase + VFE_SCALE_Y_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_main_scaler_config(struct vfe_cmd_main_scaler_config *in) +{ + struct vfe_main_scaler_cfg cmd; + + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.mainScalerEnable = in->enable; + + cmd.hEnable = in->hconfig.enable; + cmd.vEnable = in->vconfig.enable; + cmd.inWidth = in->hconfig.inputSize; + cmd.outWidth = in->hconfig.outputSize; + cmd.horizPhaseMult = in->hconfig.phaseMultiplicationFactor; + cmd.horizInterResolution = in->hconfig.interpolationResolution; + cmd.horizMNInit = in->MNInitH.MNCounterInit; + cmd.horizPhaseInit = in->MNInitH.phaseInit; + cmd.inHeight = in->vconfig.inputSize; + cmd.outHeight = in->vconfig.outputSize; + cmd.vertPhaseMult = in->vconfig.phaseMultiplicationFactor; + cmd.vertInterResolution = in->vconfig.interpolationResolution; + cmd.vertMNInit = in->MNInitV.MNCounterInit; + cmd.vertPhaseInit = in->MNInitV.phaseInit; + + vfe_prog_hw(ctrl->vfebase + VFE_SCALE_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_stats_wb_exp_stop(void) +{ + ctrl->vfeStatsCmdLocal.axwEnable = FALSE; + ctrl->vfeImaskLocal.awbPingpongIrq = FALSE; +} + +void vfe_stats_update_wb_exp(struct vfe_cmd_stats_wb_exp_update *in) +{ + struct vfe_statsawb_update cmd; + struct vfe_statsawbae_update cmd2; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd2, 0, sizeof(cmd2)); + + cmd.m1 = in->awbMCFG[0]; + cmd.m2 = in->awbMCFG[1]; + cmd.m3 = in->awbMCFG[2]; + cmd.m4 = in->awbMCFG[3]; + cmd.c1 = in->awbCCFG[0]; + cmd.c2 = in->awbCCFG[1]; + cmd.c3 = in->awbCCFG[2]; + cmd.c4 = in->awbCCFG[3]; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWB_MCFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd2.aeRegionCfg = in->wbExpRegions; + cmd2.aeSubregionCfg = in->wbExpSubRegion; + cmd2.awbYMin = in->awbYMin; + cmd2.awbYMax = in->awbYMax; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWBAE_CFG, + (uint32_t *)&cmd2, sizeof(cmd2)); +} + +void vfe_stats_update_af(struct vfe_cmd_stats_af_update *in) +{ + struct vfe_statsaf_update cmd; + memset(&cmd, 0, sizeof(cmd)); + + cmd.windowVOffset = in->windowVOffset; + cmd.windowHOffset = in->windowHOffset; + cmd.windowMode = in->windowMode; + cmd.windowHeight = in->windowHeight; + cmd.windowWidth = in->windowWidth; + + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_stats_start_wb_exp(struct vfe_cmd_stats_wb_exp_start *in) +{ + struct vfe_statsawb_update cmd; + struct vfe_statsawbae_update cmd2; + struct vfe_statsaxw_hdr_cfg cmd3; + + ctrl->vfeStatsCmdLocal.axwEnable = in->enable; + ctrl->vfeImaskLocal.awbPingpongIrq = TRUE; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd2, 0, sizeof(cmd2)); + memset(&cmd3, 0, sizeof(cmd3)); + + cmd.m1 = in->awbMCFG[0]; + cmd.m2 = in->awbMCFG[1]; + cmd.m3 = in->awbMCFG[2]; + cmd.m4 = in->awbMCFG[3]; + cmd.c1 = in->awbCCFG[0]; + cmd.c2 = in->awbCCFG[1]; + cmd.c3 = in->awbCCFG[2]; + cmd.c4 = in->awbCCFG[3]; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWB_MCFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd2.aeRegionCfg = in->wbExpRegions; + cmd2.aeSubregionCfg = in->wbExpSubRegion; + cmd2.awbYMin = in->awbYMin; + cmd2.awbYMax = in->awbYMax; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AWBAE_CFG, + (uint32_t *)&cmd2, sizeof(cmd2)); + + cmd3.axwHeader = in->axwHeader; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AXW_HEADER, + (uint32_t *)&cmd3, sizeof(cmd3)); +} + +void vfe_stats_start_af(struct vfe_cmd_stats_af_start *in) +{ + struct vfe_statsaf_update cmd; + struct vfe_statsaf_cfg cmd2; + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd2, 0, sizeof(cmd2)); + + ctrl->vfeStatsCmdLocal.autoFocusEnable = in->enable; + ctrl->vfeImaskLocal.afPingpongIrq = TRUE; + + cmd.windowVOffset = in->windowVOffset; + cmd.windowHOffset = in->windowHOffset; + cmd.windowMode = in->windowMode; + cmd.windowHeight = in->windowHeight; + cmd.windowWidth = in->windowWidth; + + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_CFG, + (uint32_t *)&cmd, sizeof(cmd)); + + cmd2.a00 = in->highPassCoef[0]; + cmd2.a04 = in->highPassCoef[1]; + cmd2.a20 = in->highPassCoef[2]; + cmd2.a21 = in->highPassCoef[3]; + cmd2.a22 = in->highPassCoef[4]; + cmd2.a23 = in->highPassCoef[5]; + cmd2.a24 = in->highPassCoef[6]; + cmd2.fvMax = in->metricMax; + cmd2.fvMetric = in->metricSelection; + cmd2.afHeader = in->bufferHeader; + cmd2.entry00 = in->gridForMultiWindows[0]; + cmd2.entry01 = in->gridForMultiWindows[1]; + cmd2.entry02 = in->gridForMultiWindows[2]; + cmd2.entry03 = in->gridForMultiWindows[3]; + cmd2.entry10 = in->gridForMultiWindows[4]; + cmd2.entry11 = in->gridForMultiWindows[5]; + cmd2.entry12 = in->gridForMultiWindows[6]; + cmd2.entry13 = in->gridForMultiWindows[7]; + cmd2.entry20 = in->gridForMultiWindows[8]; + cmd2.entry21 = in->gridForMultiWindows[9]; + cmd2.entry22 = in->gridForMultiWindows[10]; + cmd2.entry23 = in->gridForMultiWindows[11]; + cmd2.entry30 = in->gridForMultiWindows[12]; + cmd2.entry31 = in->gridForMultiWindows[13]; + cmd2.entry32 = in->gridForMultiWindows[14]; + cmd2.entry33 = in->gridForMultiWindows[15]; + + vfe_prog_hw(ctrl->vfebase + VFE_STATS_AF_GRID_0, + (uint32_t *)&cmd2, sizeof(cmd2)); +} + +void vfe_stats_setting(struct vfe_cmd_stats_setting *in) +{ + struct vfe_statsframe cmd1; + struct vfe_busstats_wrprio cmd2; + + memset(&cmd1, 0, sizeof(cmd1)); + memset(&cmd2, 0, sizeof(cmd2)); + + ctrl->afStatsControl.addressBuffer[0] = in->afBuffer[0]; + ctrl->afStatsControl.addressBuffer[1] = in->afBuffer[1]; + ctrl->afStatsControl.nextFrameAddrBuf = in->afBuffer[2]; + + ctrl->awbStatsControl.addressBuffer[0] = in->awbBuffer[0]; + ctrl->awbStatsControl.addressBuffer[1] = in->awbBuffer[1]; + ctrl->awbStatsControl.nextFrameAddrBuf = in->awbBuffer[2]; + + cmd1.lastPixel = in->frameHDimension; + cmd1.lastLine = in->frameVDimension; + vfe_prog_hw(ctrl->vfebase + VFE_STATS_FRAME_SIZE, + (uint32_t *)&cmd1, sizeof(cmd1)); + + cmd2.afBusPriority = in->afBusPriority; + cmd2.awbBusPriority = in->awbBusPriority; + cmd2.histBusPriority = in->histBusPriority; + cmd2.afBusPriorityEn = in->afBusPrioritySelection; + cmd2.awbBusPriorityEn = in->awbBusPrioritySelection; + cmd2.histBusPriorityEn = in->histBusPrioritySelection; + + vfe_prog_hw(ctrl->vfebase + VFE_BUS_STATS_WR_PRIORITY, + (uint32_t *)&cmd2, sizeof(cmd2)); + + /* Program the bus ping pong address for statistics modules. */ + writel(in->afBuffer[0], ctrl->vfebase + VFE_BUS_STATS_AF_WR_PING_ADDR); + writel(in->afBuffer[1], ctrl->vfebase + VFE_BUS_STATS_AF_WR_PONG_ADDR); + writel(in->awbBuffer[0], + ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PING_ADDR); + writel(in->awbBuffer[1], + ctrl->vfebase + VFE_BUS_STATS_AWB_WR_PONG_ADDR); + writel(in->histBuffer[0], + ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PING_ADDR); + writel(in->histBuffer[1], + ctrl->vfebase + VFE_BUS_STATS_HIST_WR_PONG_ADDR); +} + +void vfe_axi_input_config(struct vfe_cmd_axi_input_config *in) +{ + struct VFE_AxiInputCmdType cmd; + uint32_t xSizeWord, axiRdUnpackPattern; + uint8_t axiInputPpw; + uint32_t busPingpongRdIrqEnable; + + ctrl->vfeImaskLocal.rdPingpongIrq = TRUE; + + switch (in->pixelSize) { + case VFE_RAW_PIXEL_DATA_SIZE_10BIT: + ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_10BIT; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_12BIT: + ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_12BIT; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_8BIT: + default: + ctrl->axiInputDataSize = VFE_RAW_PIXEL_DATA_SIZE_8BIT; + break; + } + + memset(&cmd, 0, sizeof(cmd)); + + switch (in->pixelSize) { + case VFE_RAW_PIXEL_DATA_SIZE_10BIT: + axiInputPpw = 6; + axiRdUnpackPattern = 0xD43210; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_12BIT: + axiInputPpw = 5; + axiRdUnpackPattern = 0xC3210; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_8BIT: + default: + axiInputPpw = 8; + axiRdUnpackPattern = 0xF6543210; + break; + } + + xSizeWord = + ((((in->xOffset % axiInputPpw) + in->xSize) + + (axiInputPpw - 1)) / axiInputPpw) - 1; + + cmd.stripeStartAddr0 = in->fragAddr[0]; + cmd.stripeStartAddr1 = in->fragAddr[1]; + cmd.stripeStartAddr2 = in->fragAddr[2]; + cmd.stripeStartAddr3 = in->fragAddr[3]; + cmd.ySize = in->ySize; + cmd.yOffsetDelta = 0; + cmd.xSizeWord = xSizeWord; + cmd.burstLength = 1; + cmd.NumOfRows = in->numOfRows; + cmd.RowIncrement = (in->rowIncrement + (axiInputPpw - 1)) / axiInputPpw; + cmd.mainUnpackHeight = in->ySize; + cmd.mainUnpackWidth = in->xSize - 1; + cmd.mainUnpackHbiSel = (uint32_t) in->unpackHbi; + cmd.mainUnpackPhase = in->unpackPhase; + cmd.unpackPattern = axiRdUnpackPattern; + cmd.padLeft = in->padRepeatCountLeft; + cmd.padRight = in->padRepeatCountRight; + cmd.padTop = in->padRepeatCountTop; + cmd.padBottom = in->padRepeatCountBottom; + cmd.leftUnpackPattern0 = in->padLeftComponentSelectCycle0; + cmd.leftUnpackPattern1 = in->padLeftComponentSelectCycle1; + cmd.leftUnpackPattern2 = in->padLeftComponentSelectCycle2; + cmd.leftUnpackPattern3 = in->padLeftComponentSelectCycle3; + cmd.leftUnpackStop0 = in->padLeftStopCycle0; + cmd.leftUnpackStop1 = in->padLeftStopCycle1; + cmd.leftUnpackStop2 = in->padLeftStopCycle2; + cmd.leftUnpackStop3 = in->padLeftStopCycle3; + cmd.rightUnpackPattern0 = in->padRightComponentSelectCycle0; + cmd.rightUnpackPattern1 = in->padRightComponentSelectCycle1; + cmd.rightUnpackPattern2 = in->padRightComponentSelectCycle2; + cmd.rightUnpackPattern3 = in->padRightComponentSelectCycle3; + cmd.rightUnpackStop0 = in->padRightStopCycle0; + cmd.rightUnpackStop1 = in->padRightStopCycle1; + cmd.rightUnpackStop2 = in->padRightStopCycle2; + cmd.rightUnpackStop3 = in->padRightStopCycle3; + cmd.topUnapckPattern = in->padTopLineCount; + cmd.bottomUnapckPattern = in->padBottomLineCount; + + /* program vfe_bus_cfg */ + vfe_prog_hw(ctrl->vfebase + VFE_BUS_STRIPE_RD_ADDR_0, + (uint32_t *)&cmd, sizeof(cmd)); + + /* hacking code, put it to default value */ + busPingpongRdIrqEnable = 0xf; + + writel(busPingpongRdIrqEnable, ctrl->vfebase + VFE_BUS_PINGPONG_IRQ_EN); +} + +void vfe_axi_output_config(struct vfe_cmd_axi_output_config *in) +{ + /* local variable */ + uint32_t *pcircle; + uint32_t *pdest; + uint32_t *psrc; + uint8_t i; + uint8_t fcnt; + uint16_t axioutpw = 8; + + /* parameters check, condition and usage mode check */ + ctrl->encPath.fragCount = in->output2.fragmentCount; + if (ctrl->encPath.fragCount > 1) + ctrl->encPath.multiFrag = TRUE; + + ctrl->viewPath.fragCount = in->output1.fragmentCount; + if (ctrl->viewPath.fragCount > 1) + ctrl->viewPath.multiFrag = TRUE; + + /* VFE_BUS_CFG. raw data size */ + ctrl->vfeBusConfigLocal.rawPixelDataSize = in->outputDataSize; + + switch (in->outputDataSize) { + case VFE_RAW_PIXEL_DATA_SIZE_8BIT: + axioutpw = 8; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_10BIT: + axioutpw = 6; + break; + + case VFE_RAW_PIXEL_DATA_SIZE_12BIT: + axioutpw = 5; + break; + } + + ctrl->axiOutputMode = in->outputMode; + + CDBG("axiOutputMode = %d\n", ctrl->axiOutputMode); + + switch (ctrl->axiOutputMode) { + case VFE_AXI_OUTPUT_MODE_Output1:{ + ctrl->vfeCamifConfigLocal.camif2BusEnable = FALSE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_DISABLED; + + ctrl->encPath.pathEnabled = FALSE; + ctrl->vfeImaskLocal.encIrq = FALSE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.encYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = FALSE; + ctrl->viewPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.viewIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = TRUE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_Output1 */ + break; + + case VFE_AXI_OUTPUT_MODE_Output2:{ + ctrl->vfeCamifConfigLocal.camif2BusEnable = FALSE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_DISABLED; + + ctrl->encPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.encIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.encYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = TRUE; + + ctrl->viewPath.pathEnabled = FALSE; + ctrl->vfeImaskLocal.viewIrq = FALSE; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = FALSE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_Output2 */ + break; + + case VFE_AXI_OUTPUT_MODE_Output1AndOutput2:{ + ctrl->vfeCamifConfigLocal.camif2BusEnable = FALSE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_DISABLED; + + ctrl->encPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.encIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.encYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = TRUE; + ctrl->viewPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.viewIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = TRUE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_Output1AndOutput2 */ + break; + + case VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2:{ + /* For raw snapshot, we need both ping and pong buffer + * initialized to the same address. Otherwise, if we + * leave the pong buffer to NULL, there will be + * axi_error. + * Note that ideally we should deal with this at upper + * layer, which is in msm_vfe8x.c */ + if (!in->output2.outputCbcr.outFragments[1][0]) { + in->output2.outputCbcr.outFragments[1][0] = + in->output2.outputCbcr.outFragments[0][0]; + } + + ctrl->vfeCamifConfigLocal.camif2BusEnable = TRUE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = FALSE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_ENC_CBCR_PATH; + + ctrl->encPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.encIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_CBCR_ONLY; + + ctrl->vfeBusConfigLocal.encYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = TRUE; + + ctrl->viewPath.pathEnabled = FALSE; + ctrl->vfeImaskLocal.viewIrq = FALSE; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = FALSE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2 */ + break; + + case VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1:{ + ctrl->vfeCamifConfigLocal.camif2BusEnable = TRUE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_VIEW_CBCR_PATH; + + ctrl->encPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.encIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.encYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = TRUE; + + ctrl->viewPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.viewIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_CBCR_ONLY; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = TRUE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1 */ + break; + + case VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2:{ + ctrl->vfeCamifConfigLocal.camif2BusEnable = TRUE; + ctrl->vfeCamifConfigLocal.camif2OutputEnable = TRUE; + ctrl->vfeBusConfigLocal.rawWritePathSelect = + VFE_RAW_OUTPUT_ENC_CBCR_PATH; + + ctrl->encPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.encIrq = TRUE; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = + VFE_COMP_IRQ_CBCR_ONLY; + + ctrl->vfeBusConfigLocal.encYWrPathEn = FALSE; + ctrl->vfeBusConfigLocal.encCbcrWrPathEn = TRUE; + + ctrl->viewPath.pathEnabled = TRUE; + ctrl->vfeImaskLocal.viewIrq = TRUE; + + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vfeBusConfigLocal.viewYWrPathEn = TRUE; + ctrl->vfeBusConfigLocal.viewCbcrWrPathEn = TRUE; + + if (ctrl->vfeBusConfigLocal.encYWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.encCbcrWrPathEn && + ctrl->encPath.multiFrag) + ctrl->vfeImaskLocal.encCbcrPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewYWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewYPingpongIrq = TRUE; + + if (ctrl->vfeBusConfigLocal.viewCbcrWrPathEn && + ctrl->viewPath.multiFrag) + ctrl->vfeImaskLocal.viewCbcrPingpongIrq = TRUE; + } /* VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2 */ + break; + + case VFE_AXI_LAST_OUTPUT_MODE_ENUM: + break; + } /* switch */ + + /* Save the addresses for each path. */ + /* output2 path */ + fcnt = ctrl->encPath.fragCount; + + pcircle = ctrl->encPath.yPath.addressBuffer; + pdest = ctrl->encPath.nextFrameAddrBuf; + + psrc = &(in->output2.outputY.outFragments[0][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output2.outputY.outFragments[1][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output2.outputY.outFragments[2][0]); + for (i = 0; i < fcnt; i++) + *pdest++ = *psrc++; + + pcircle = ctrl->encPath.cbcrPath.addressBuffer; + + psrc = &(in->output2.outputCbcr.outFragments[0][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output2.outputCbcr.outFragments[1][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output2.outputCbcr.outFragments[2][0]); + for (i = 0; i < fcnt; i++) + *pdest++ = *psrc++; + + vfe_set_bus_pipo_addr(&ctrl->viewPath, &ctrl->encPath); + + ctrl->encPath.ackPending = FALSE; + ctrl->encPath.currentFrame = ping; + ctrl->encPath.whichOutputPath = 1; + ctrl->encPath.yPath.fragIndex = 2; + ctrl->encPath.cbcrPath.fragIndex = 2; + ctrl->encPath.yPath.hwCurrentFlag = ping; + ctrl->encPath.cbcrPath.hwCurrentFlag = ping; + + /* output1 path */ + pcircle = ctrl->viewPath.yPath.addressBuffer; + pdest = ctrl->viewPath.nextFrameAddrBuf; + fcnt = ctrl->viewPath.fragCount; + + psrc = &(in->output1.outputY.outFragments[0][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output1.outputY.outFragments[1][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output1.outputY.outFragments[2][0]); + for (i = 0; i < fcnt; i++) + *pdest++ = *psrc++; + + pcircle = ctrl->viewPath.cbcrPath.addressBuffer; + + psrc = &(in->output1.outputCbcr.outFragments[0][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output1.outputCbcr.outFragments[1][0]); + for (i = 0; i < fcnt; i++) + *pcircle++ = *psrc++; + + psrc = &(in->output1.outputCbcr.outFragments[2][0]); + for (i = 0; i < fcnt; i++) + *pdest++ = *psrc++; + + ctrl->viewPath.ackPending = FALSE; + ctrl->viewPath.currentFrame = ping; + ctrl->viewPath.whichOutputPath = 0; + ctrl->viewPath.yPath.fragIndex = 2; + ctrl->viewPath.cbcrPath.fragIndex = 2; + ctrl->viewPath.yPath.hwCurrentFlag = ping; + ctrl->viewPath.cbcrPath.hwCurrentFlag = ping; + + /* call to program the registers. */ + vfe_axi_output(in, &ctrl->viewPath, &ctrl->encPath, axioutpw); +} + +void vfe_epoch1_config(struct vfe_cmds_camif_epoch *in) +{ + struct vfe_epoch1cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + /* determine if epoch interrupt needs to be enabled. */ + if (in->enable == TRUE) { + cmd.epoch1Line = in->lineindex; + vfe_prog_hw(ctrl->vfebase + CAMIF_EPOCH_IRQ, (uint32_t *)&cmd, + sizeof(cmd)); + } + + /* Set the epoch1 interrupt mask. */ + ctrl->vfeImaskLocal.camifEpoch1Irq = in->enable; + ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal); + vfe_program_irq_mask(ctrl->vfeImaskPacked); + + /* Store the epoch1 data. */ + ctrl->vfeCamifEpoch1Local.enable = in->enable; + ctrl->vfeCamifEpoch1Local.lineindex = in->lineindex; +} + +void vfe_camif_config(struct vfe_cmd_camif_config *in) +{ + struct vfe_camifcfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + CDBG("camif.frame pixelsPerLine = %d\n", in->frame.pixelsPerLine); + CDBG("camif.frame linesPerFrame = %d\n", in->frame.linesPerFrame); + CDBG("camif.window firstpixel = %d\n", in->window.firstpixel); + CDBG("camif.window lastpixel = %d\n", in->window.lastpixel); + CDBG("camif.window firstline = %d\n", in->window.firstline); + CDBG("camif.window lastline = %d\n", in->window.lastline); + + /* determine if epoch interrupt needs to be enabled. */ + if ((in->epoch1.enable == TRUE) && + (in->epoch1.lineindex <= in->frame.linesPerFrame)) + ctrl->vfeImaskLocal.camifEpoch1Irq = 1; + + if ((in->epoch2.enable == TRUE) && + (in->epoch2.lineindex <= in->frame.linesPerFrame)) { + ctrl->vfeImaskLocal.camifEpoch2Irq = 1; + } + + /* save the content to program CAMIF_CONFIG seperately. */ + ctrl->vfeCamifConfigLocal.camifCfgFromCmd = in->camifConfig; + + /* EFS_Config */ + cmd.efsEndOfLine = in->EFS.efsendofline; + cmd.efsStartOfLine = in->EFS.efsstartofline; + cmd.efsEndOfFrame = in->EFS.efsendofframe; + cmd.efsStartOfFrame = in->EFS.efsstartofframe; + + /* Frame Config */ + cmd.frameConfigPixelsPerLine = in->frame.pixelsPerLine; + cmd.frameConfigLinesPerFrame = in->frame.linesPerFrame; + + /* Window Width Config */ + cmd.windowWidthCfgLastPixel = in->window.lastpixel; + cmd.windowWidthCfgFirstPixel = in->window.firstpixel; + + /* Window Height Config */ + cmd.windowHeightCfglastLine = in->window.lastline; + cmd.windowHeightCfgfirstLine = in->window.firstline; + + /* Subsample 1 Config */ + cmd.subsample1CfgPixelSkip = in->subsample.pixelskipmask; + cmd.subsample1CfgLineSkip = in->subsample.lineskipmask; + + /* Subsample 2 Config */ + cmd.subsample2CfgFrameSkip = in->subsample.frameskip; + cmd.subsample2CfgFrameSkipMode = in->subsample.frameskipmode; + cmd.subsample2CfgPixelSkipWrap = in->subsample.pixelskipwrap; + + /* Epoch Interrupt */ + cmd.epoch1Line = in->epoch1.lineindex; + cmd.epoch2Line = in->epoch2.lineindex; + + vfe_prog_hw(ctrl->vfebase + CAMIF_EFS_CONFIG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_fov_crop_config(struct vfe_cmd_fov_crop_config *in) +{ + struct vfe_fov_crop_cfg cmd; + memset(&cmd, 0, sizeof(cmd)); + + ctrl->vfeModuleEnableLocal.cropEnable = in->enable; + + /* FOV Corp, Part 1 */ + cmd.lastPixel = in->lastPixel; + cmd.firstPixel = in->firstPixel; + + /* FOV Corp, Part 2 */ + cmd.lastLine = in->lastLine; + cmd.firstLine = in->firstLine; + + vfe_prog_hw(ctrl->vfebase + VFE_CROP_WIDTH_CFG, + (uint32_t *)&cmd, sizeof(cmd)); +} + +void vfe_get_hw_version(struct vfe_cmd_hw_version *out) +{ + uint32_t vfeHwVersionPacked; + struct vfe_hw_ver ver; + + vfeHwVersionPacked = readl(ctrl->vfebase + VFE_HW_VERSION); + + ver = *((struct vfe_hw_ver *)&vfeHwVersionPacked); + + out->coreVersion = ver.coreVersion; + out->minorVersion = ver.minorVersion; + out->majorVersion = ver.majorVersion; +} + +static void vfe_reset_internal_variables(void) +{ + /* local variables to program the hardware. */ + ctrl->vfeImaskPacked = 0; + ctrl->vfeImaskCompositePacked = 0; + + /* FALSE = disable, 1 = enable. */ + memset(&ctrl->vfeModuleEnableLocal, 0, + sizeof(ctrl->vfeModuleEnableLocal)); + + /* 0 = disable, 1 = enable */ + memset(&ctrl->vfeCamifConfigLocal, 0, + sizeof(ctrl->vfeCamifConfigLocal)); + /* 0 = disable, 1 = enable */ + memset(&ctrl->vfeImaskLocal, 0, sizeof(ctrl->vfeImaskLocal)); + memset(&ctrl->vfeStatsCmdLocal, 0, sizeof(ctrl->vfeStatsCmdLocal)); + memset(&ctrl->vfeBusConfigLocal, 0, sizeof(ctrl->vfeBusConfigLocal)); + memset(&ctrl->vfeBusPmConfigLocal, 0, + sizeof(ctrl->vfeBusPmConfigLocal)); + memset(&ctrl->vfeBusCmdLocal, 0, sizeof(ctrl->vfeBusCmdLocal)); + memset(&ctrl->vfeInterruptNameLocal, 0, + sizeof(ctrl->vfeInterruptNameLocal)); + memset(&ctrl->vfeDroppedFrameCounts, 0, + sizeof(ctrl->vfeDroppedFrameCounts)); + memset(&ctrl->vfeIrqThreadMsgLocal, 0, + sizeof(ctrl->vfeIrqThreadMsgLocal)); + + /* state control variables */ + ctrl->vfeStartAckPendingFlag = FALSE; + ctrl->vfeStopAckPending = FALSE; + ctrl->vfeIrqCompositeMaskLocal.ceDoneSel = 0; + ctrl->vfeIrqCompositeMaskLocal.encIrqComMask = VFE_COMP_IRQ_BOTH_Y_CBCR; + ctrl->vfeIrqCompositeMaskLocal.viewIrqComMask = + VFE_COMP_IRQ_BOTH_Y_CBCR; + + ctrl->vstate = VFE_STATE_IDLE; + + ctrl->axiOutputMode = VFE_AXI_LAST_OUTPUT_MODE_ENUM; + /* 0 for continuous mode, 1 for snapshot mode */ + ctrl->vfeOperationMode = VFE_START_OPERATION_MODE_CONTINUOUS; + ctrl->vfeSnapShotCount = 0; + ctrl->vfeStatsPingPongReloadFlag = FALSE; + /* this is unsigned 32 bit integer. */ + ctrl->vfeFrameId = 0; + ctrl->vfeFrameSkip.output1Pattern = 0xffffffff; + ctrl->vfeFrameSkip.output1Period = 31; + ctrl->vfeFrameSkip.output2Pattern = 0xffffffff; + ctrl->vfeFrameSkip.output2Period = 31; + ctrl->vfeFrameSkipPattern = 0xffffffff; + ctrl->vfeFrameSkipCount = 0; + ctrl->vfeFrameSkipPeriod = 31; + + memset((void *)&ctrl->encPath, 0, sizeof(ctrl->encPath)); + memset((void *)&ctrl->viewPath, 0, sizeof(ctrl->viewPath)); + + ctrl->encPath.whichOutputPath = 1; + ctrl->encPath.cbcrStatusBit = 5; + ctrl->viewPath.whichOutputPath = 0; + ctrl->viewPath.cbcrStatusBit = 7; + + ctrl->vfeTestGenStartFlag = FALSE; + + /* default to bank 0. */ + ctrl->vfeLaBankSel = 0; + + /* default to bank 0 for all channels. */ + memset(&ctrl->vfeGammaLutSel, 0, sizeof(ctrl->vfeGammaLutSel)); + + /* Stats control variables. */ + memset(&ctrl->afStatsControl, 0, sizeof(ctrl->afStatsControl)); + memset(&ctrl->awbStatsControl, 0, sizeof(ctrl->awbStatsControl)); + vfe_set_stats_pingpong_address(&ctrl->afStatsControl, + &ctrl->awbStatsControl); +} + +void vfe_reset(void) +{ + vfe_reset_internal_variables(); + + ctrl->vfeImaskLocal.resetAckIrq = TRUE; + ctrl->vfeImaskPacked = vfe_irq_pack(ctrl->vfeImaskLocal); + + /* disable all interrupts. */ + writel(VFE_DISABLE_ALL_IRQS, ctrl->vfebase + VFE_IRQ_COMPOSITE_MASK); + + /* clear all pending interrupts */ + writel(VFE_CLEAR_ALL_IRQS, ctrl->vfebase + VFE_IRQ_CLEAR); + + /* enable reset_ack interrupt. */ + writel(ctrl->vfeImaskPacked, ctrl->vfebase + VFE_IRQ_MASK); + + writel(VFE_RESET_UPON_RESET_CMD, ctrl->vfebase + VFE_GLOBAL_RESET_CMD); +} diff --git a/drivers/media/video/msm/msm_vfe8x_proc.h b/drivers/media/video/msm/msm_vfe8x_proc.h new file mode 100644 index 0000000000000..27db7df28e194 --- /dev/null +++ b/drivers/media/video/msm/msm_vfe8x_proc.h @@ -0,0 +1,1574 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef __MSM_VFE8X_REG_H__ +#define __MSM_VFE8X_REG_H__ + +#include +#include +#include "msm_vfe8x.h" + +/* at start of camif, bit 1:0 = 0x01:enable + * image data capture at frame boundary. */ +#define CAMIF_COMMAND_START 0x00000005 + +/* bit 2= 0x1:clear the CAMIF_STATUS register + * value. */ +#define CAMIF_COMMAND_CLEAR 0x00000004 + +/* at stop of vfe pipeline, for now it is assumed + * that camif will stop at any time. Bit 1:0 = 0x10: + * disable image data capture immediately. */ +#define CAMIF_COMMAND_STOP_IMMEDIATELY 0x00000002 + +/* at stop of vfe pipeline, for now it is assumed + * that camif will stop at any time. Bit 1:0 = 0x00: + * disable image data capture at frame boundary */ +#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY 0x00000000 + +/* to halt axi bridge */ +#define AXI_HALT 0x00000001 + +/* clear the halt bit. */ +#define AXI_HALT_CLEAR 0x00000000 + +/* reset the pipeline when stop command is issued. + * (without reset the register.) bit 26-31 = 0, + * domain reset, bit 0-9 = 1 for module reset, except + * register module. */ +#define VFE_RESET_UPON_STOP_CMD 0x000003ef + +/* reset the pipeline when reset command. + * bit 26-31 = 0, domain reset, bit 0-9 = 1 for module reset. */ +#define VFE_RESET_UPON_RESET_CMD 0x000003ff + +/* bit 5 is for axi status idle or busy. + * 1 = halted, 0 = busy */ +#define AXI_STATUS_BUSY_MASK 0x00000020 + +/* bit 0 & bit 1 = 1, both y and cbcr irqs need to be present + * for frame done interrupt */ +#define VFE_COMP_IRQ_BOTH_Y_CBCR 3 + +/* bit 1 = 1, only cbcr irq triggers frame done interrupt */ +#define VFE_COMP_IRQ_CBCR_ONLY 2 + +/* bit 0 = 1, only y irq triggers frame done interrupt */ +#define VFE_COMP_IRQ_Y_ONLY 1 + +/* bit 0 = 1, PM go; bit1 = 1, PM stop */ +#define VFE_PERFORMANCE_MONITOR_GO 0x00000001 +#define VFE_PERFORMANCE_MONITOR_STOP 0x00000002 + +/* bit 0 = 1, test gen go; bit1 = 1, test gen stop */ +#define VFE_TEST_GEN_GO 0x00000001 +#define VFE_TEST_GEN_STOP 0x00000002 + +/* the chroma is assumed to be interpolated between + * the luma samples. JPEG 4:2:2 */ +#define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0 + +/* constants for irq registers */ +#define VFE_DISABLE_ALL_IRQS 0 +/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS. */ +#define VFE_CLEAR_ALL_IRQS 0xffffffff +/* imask for while waiting for stop ack, driver has already + * requested stop, waiting for reset irq, + * bit 29,28,27,26 for async timer, bit 9 for reset */ +#define VFE_IMASK_WHILE_STOPPING 0x3c000200 + +/* when normal case, don't want to block error status. + * bit 0,6,20,21,22,30,31 */ +#define VFE_IMASK_ERROR_ONLY 0xC0700041 +#define VFE_REG_UPDATE_TRIGGER 1 +#define VFE_PM_BUF_MAX_CNT_MASK 0xFF +#define VFE_DMI_CFG_DEFAULT 0x00000100 +#define LENS_ROLL_OFF_DELTA_TABLE_OFFSET 32 +#define VFE_AF_PINGPONG_STATUS_BIT 0x100 +#define VFE_AWB_PINGPONG_STATUS_BIT 0x200 + +/* VFE I/O registers */ +enum { + VFE_HW_VERSION = 0x00000000, + VFE_GLOBAL_RESET_CMD = 0x00000004, + VFE_MODULE_RESET = 0x00000008, + VFE_CGC_OVERRIDE = 0x0000000C, + VFE_MODULE_CFG = 0x00000010, + VFE_CFG = 0x00000014, + VFE_IRQ_MASK = 0x00000018, + VFE_IRQ_CLEAR = 0x0000001C, + VFE_IRQ_STATUS = 0x00000020, + VFE_IRQ_COMPOSITE_MASK = 0x00000024, + VFE_BUS_CMD = 0x00000028, + VFE_BUS_CFG = 0x0000002C, + VFE_BUS_ENC_Y_WR_PING_ADDR = 0x00000030, + VFE_BUS_ENC_Y_WR_PONG_ADDR = 0x00000034, + VFE_BUS_ENC_Y_WR_IMAGE_SIZE = 0x00000038, + VFE_BUS_ENC_Y_WR_BUFFER_CFG = 0x0000003C, + VFE_BUS_ENC_CBCR_WR_PING_ADDR = 0x00000040, + VFE_BUS_ENC_CBCR_WR_PONG_ADDR = 0x00000044, + VFE_BUS_ENC_CBCR_WR_IMAGE_SIZE = 0x00000048, + VFE_BUS_ENC_CBCR_WR_BUFFER_CFG = 0x0000004C, + VFE_BUS_VIEW_Y_WR_PING_ADDR = 0x00000050, + VFE_BUS_VIEW_Y_WR_PONG_ADDR = 0x00000054, + VFE_BUS_VIEW_Y_WR_IMAGE_SIZE = 0x00000058, + VFE_BUS_VIEW_Y_WR_BUFFER_CFG = 0x0000005C, + VFE_BUS_VIEW_CBCR_WR_PING_ADDR = 0x00000060, + VFE_BUS_VIEW_CBCR_WR_PONG_ADDR = 0x00000064, + VFE_BUS_VIEW_CBCR_WR_IMAGE_SIZE = 0x00000068, + VFE_BUS_VIEW_CBCR_WR_BUFFER_CFG = 0x0000006C, + VFE_BUS_STATS_AF_WR_PING_ADDR = 0x00000070, + VFE_BUS_STATS_AF_WR_PONG_ADDR = 0x00000074, + VFE_BUS_STATS_AWB_WR_PING_ADDR = 0x00000078, + VFE_BUS_STATS_AWB_WR_PONG_ADDR = 0x0000007C, + VFE_BUS_STATS_HIST_WR_PING_ADDR = 0x00000080, + VFE_BUS_STATS_HIST_WR_PONG_ADDR = 0x00000084, + VFE_BUS_STATS_WR_PRIORITY = 0x00000088, + VFE_BUS_STRIPE_RD_ADDR_0 = 0x0000008C, + VFE_BUS_STRIPE_RD_ADDR_1 = 0x00000090, + VFE_BUS_STRIPE_RD_ADDR_2 = 0x00000094, + VFE_BUS_STRIPE_RD_ADDR_3 = 0x00000098, + VFE_BUS_STRIPE_RD_VSIZE = 0x0000009C, + VFE_BUS_STRIPE_RD_HSIZE = 0x000000A0, + VFE_BUS_STRIPE_RD_BUFFER_CFG = 0x000000A4, + VFE_BUS_STRIPE_RD_UNPACK_CFG = 0x000000A8, + VFE_BUS_STRIPE_RD_UNPACK = 0x000000AC, + VFE_BUS_STRIPE_RD_PAD_SIZE = 0x000000B0, + VFE_BUS_STRIPE_RD_PAD_L_UNPACK = 0x000000B4, + VFE_BUS_STRIPE_RD_PAD_R_UNPACK = 0x000000B8, + VFE_BUS_STRIPE_RD_PAD_TB_UNPACK = 0x000000BC, + VFE_BUS_PINGPONG_IRQ_EN = 0x000000C0, + VFE_BUS_PINGPONG_STATUS = 0x000000C4, + VFE_BUS_PM_CMD = 0x000000C8, + VFE_BUS_PM_CFG = 0x000000CC, + VFE_BUS_ENC_Y_WR_PM_STATS_0 = 0x000000D0, + VFE_BUS_ENC_Y_WR_PM_STATS_1 = 0x000000D4, + VFE_BUS_ENC_CBCR_WR_PM_STATS_0 = 0x000000D8, + VFE_BUS_ENC_CBCR_WR_PM_STATS_1 = 0x000000DC, + VFE_BUS_VIEW_Y_WR_PM_STATS_0 = 0x000000E0, + VFE_BUS_VIEW_Y_WR_PM_STATS_1 = 0x000000E4, + VFE_BUS_VIEW_CBCR_WR_PM_STATS_0 = 0x000000E8, + VFE_BUS_VIEW_CBCR_WR_PM_STATS_1 = 0x000000EC, + VFE_BUS_MISR_CFG = 0x000000F4, + VFE_BUS_MISR_MAST_CFG_0 = 0x000000F8, + VFE_BUS_MISR_MAST_CFG_1 = 0x000000FC, + VFE_BUS_MISR_RD_VAL = 0x00000100, + VFE_AXI_CMD = 0x00000104, + VFE_AXI_CFG = 0x00000108, + VFE_AXI_STATUS = 0x0000010C, + CAMIF_COMMAND = 0x00000110, + CAMIF_CONFIG = 0x00000114, + CAMIF_EFS_CONFIG = 0x00000118, + CAMIF_FRAME_CONFIG = 0x0000011C, + CAMIF_WINDOW_WIDTH_CONFIG = 0x00000120, + CAMIF_WINDOW_HEIGHT_CONFIG = 0x00000124, + CAMIF_SUBSAMPLE1_CONFIG = 0x00000128, + CAMIF_SUBSAMPLE2_CONFIG = 0x0000012C, + CAMIF_EPOCH_IRQ = 0x00000130, + CAMIF_STATUS = 0x00000134, + CAMIF_MISR = 0x00000138, + VFE_SYNC_TIMER_CMD = 0x0000013C, + VFE_SYNC_TIMER0_LINE_START = 0x00000140, + VFE_SYNC_TIMER0_PIXEL_START = 0x00000144, + VFE_SYNC_TIMER0_PIXEL_DURATION = 0x00000148, + VFE_SYNC_TIMER1_LINE_START = 0x0000014C, + VFE_SYNC_TIMER1_PIXEL_START = 0x00000150, + VFE_SYNC_TIMER1_PIXEL_DURATION = 0x00000154, + VFE_SYNC_TIMER2_LINE_START = 0x00000158, + VFE_SYNC_TIMER2_PIXEL_START = 0x0000015C, + VFE_SYNC_TIMER2_PIXEL_DURATION = 0x00000160, + VFE_SYNC_TIMER_POLARITY = 0x00000164, + VFE_ASYNC_TIMER_CMD = 0x00000168, + VFE_ASYNC_TIMER0_CFG_0 = 0x0000016C, + VFE_ASYNC_TIMER0_CFG_1 = 0x00000170, + VFE_ASYNC_TIMER1_CFG_0 = 0x00000174, + VFE_ASYNC_TIMER1_CFG_1 = 0x00000178, + VFE_ASYNC_TIMER2_CFG_0 = 0x0000017C, + VFE_ASYNC_TIMER2_CFG_1 = 0x00000180, + VFE_ASYNC_TIMER3_CFG_0 = 0x00000184, + VFE_ASYNC_TIMER3_CFG_1 = 0x00000188, + VFE_TIMER_SEL = 0x0000018C, + VFE_REG_UPDATE_CMD = 0x00000190, + VFE_BLACK_EVEN_EVEN_VALUE = 0x00000194, + VFE_BLACK_EVEN_ODD_VALUE = 0x00000198, + VFE_BLACK_ODD_EVEN_VALUE = 0x0000019C, + VFE_BLACK_ODD_ODD_VALUE = 0x000001A0, + VFE_ROLLOFF_CFG_0 = 0x000001A4, + VFE_ROLLOFF_CFG_1 = 0x000001A8, + VFE_ROLLOFF_CFG_2 = 0x000001AC, + VFE_DEMUX_CFG = 0x000001B0, + VFE_DEMUX_GAIN_0 = 0x000001B4, + VFE_DEMUX_GAIN_1 = 0x000001B8, + VFE_DEMUX_EVEN_CFG = 0x000001BC, + VFE_DEMUX_ODD_CFG = 0x000001C0, + VFE_DEMOSAIC_CFG = 0x000001C4, + VFE_DEMOSAIC_ABF_CFG_0 = 0x000001C8, + VFE_DEMOSAIC_ABF_CFG_1 = 0x000001CC, + VFE_DEMOSAIC_BPC_CFG_0 = 0x000001D0, + VFE_DEMOSAIC_BPC_CFG_1 = 0x000001D4, + VFE_DEMOSAIC_STATUS = 0x000001D8, + VFE_CHROMA_UPSAMPLE_CFG = 0x000001DC, + VFE_CROP_WIDTH_CFG = 0x000001E0, + VFE_CROP_HEIGHT_CFG = 0x000001E4, + VFE_COLOR_CORRECT_COEFF_0 = 0x000001E8, + VFE_COLOR_CORRECT_COEFF_1 = 0x000001EC, + VFE_COLOR_CORRECT_COEFF_2 = 0x000001F0, + VFE_COLOR_CORRECT_COEFF_3 = 0x000001F4, + VFE_COLOR_CORRECT_COEFF_4 = 0x000001F8, + VFE_COLOR_CORRECT_COEFF_5 = 0x000001FC, + VFE_COLOR_CORRECT_COEFF_6 = 0x00000200, + VFE_COLOR_CORRECT_COEFF_7 = 0x00000204, + VFE_COLOR_CORRECT_COEFF_8 = 0x00000208, + VFE_COLOR_CORRECT_OFFSET_0 = 0x0000020C, + VFE_COLOR_CORRECT_OFFSET_1 = 0x00000210, + VFE_COLOR_CORRECT_OFFSET_2 = 0x00000214, + VFE_COLOR_CORRECT_COEFF_Q = 0x00000218, + VFE_LA_CFG = 0x0000021C, + VFE_LUT_BANK_SEL = 0x00000220, + VFE_CHROMA_ENHAN_A = 0x00000224, + VFE_CHROMA_ENHAN_B = 0x00000228, + VFE_CHROMA_ENHAN_C = 0x0000022C, + VFE_CHROMA_ENHAN_D = 0x00000230, + VFE_CHROMA_ENHAN_K = 0x00000234, + VFE_COLOR_CONVERT_COEFF_0 = 0x00000238, + VFE_COLOR_CONVERT_COEFF_1 = 0x0000023C, + VFE_COLOR_CONVERT_COEFF_2 = 0x00000240, + VFE_COLOR_CONVERT_OFFSET = 0x00000244, + VFE_ASF_CFG = 0x00000248, + VFE_ASF_SHARP_CFG_0 = 0x0000024C, + VFE_ASF_SHARP_CFG_1 = 0x00000250, + VFE_ASF_SHARP_COEFF_0 = 0x00000254, + VFE_ASF_SHARP_COEFF_1 = 0x00000258, + VFE_ASF_SHARP_COEFF_2 = 0x0000025C, + VFE_ASF_SHARP_COEFF_3 = 0x00000260, + VFE_ASF_MAX_EDGE = 0x00000264, + VFE_ASF_CROP_WIDTH_CFG = 0x00000268, + VFE_ASF_CROP_HEIGHT_CFG = 0x0000026C, + VFE_SCALE_CFG = 0x00000270, + VFE_SCALE_H_IMAGE_SIZE_CFG = 0x00000274, + VFE_SCALE_H_PHASE_CFG = 0x00000278, + VFE_SCALE_H_STRIPE_CFG = 0x0000027C, + VFE_SCALE_V_IMAGE_SIZE_CFG = 0x00000280, + VFE_SCALE_V_PHASE_CFG = 0x00000284, + VFE_SCALE_V_STRIPE_CFG = 0x00000288, + VFE_SCALE_Y_CFG = 0x0000028C, + VFE_SCALE_Y_H_IMAGE_SIZE_CFG = 0x00000290, + VFE_SCALE_Y_H_PHASE_CFG = 0x00000294, + VFE_SCALE_Y_V_IMAGE_SIZE_CFG = 0x00000298, + VFE_SCALE_Y_V_PHASE_CFG = 0x0000029C, + VFE_SCALE_CBCR_CFG = 0x000002A0, + VFE_SCALE_CBCR_H_IMAGE_SIZE_CFG = 0x000002A4, + VFE_SCALE_CBCR_H_PHASE_CFG = 0x000002A8, + VFE_SCALE_CBCR_V_IMAGE_SIZE_CFG = 0x000002AC, + VFE_SCALE_CBCR_V_PHASE_CFG = 0x000002B0, + VFE_WB_CFG = 0x000002B4, + VFE_CHROMA_SUPPRESS_CFG_0 = 0x000002B8, + VFE_CHROMA_SUPPRESS_CFG_1 = 0x000002BC, + VFE_CHROMA_SUBSAMPLE_CFG = 0x000002C0, + VFE_CHROMA_SUB_CROP_WIDTH_CFG = 0x000002C4, + VFE_CHROMA_SUB_CROP_HEIGHT_CFG = 0x000002C8, + VFE_FRAMEDROP_ENC_Y_CFG = 0x000002CC, + VFE_FRAMEDROP_ENC_CBCR_CFG = 0x000002D0, + VFE_FRAMEDROP_ENC_Y_PATTERN = 0x000002D4, + VFE_FRAMEDROP_ENC_CBCR_PATTERN = 0x000002D8, + VFE_FRAMEDROP_VIEW_Y_CFG = 0x000002DC, + VFE_FRAMEDROP_VIEW_CBCR_CFG = 0x000002E0, + VFE_FRAMEDROP_VIEW_Y_PATTERN = 0x000002E4, + VFE_FRAMEDROP_VIEW_CBCR_PATTERN = 0x000002E8, + VFE_CLAMP_MAX_CFG = 0x000002EC, + VFE_CLAMP_MIN_CFG = 0x000002F0, + VFE_STATS_CMD = 0x000002F4, + VFE_STATS_AF_CFG = 0x000002F8, + VFE_STATS_AF_DIM = 0x000002FC, + VFE_STATS_AF_GRID_0 = 0x00000300, + VFE_STATS_AF_GRID_1 = 0x00000304, + VFE_STATS_AF_GRID_2 = 0x00000308, + VFE_STATS_AF_GRID_3 = 0x0000030C, + VFE_STATS_AF_HEADER = 0x00000310, + VFE_STATS_AF_COEF0 = 0x00000314, + VFE_STATS_AF_COEF1 = 0x00000318, + VFE_STATS_AWBAE_CFG = 0x0000031C, + VFE_STATS_AXW_HEADER = 0x00000320, + VFE_STATS_AWB_MCFG = 0x00000324, + VFE_STATS_AWB_CCFG1 = 0x00000328, + VFE_STATS_AWB_CCFG2 = 0x0000032C, + VFE_STATS_HIST_HEADER = 0x00000330, + VFE_STATS_HIST_INNER_OFFSET = 0x00000334, + VFE_STATS_HIST_INNER_DIM = 0x00000338, + VFE_STATS_FRAME_SIZE = 0x0000033C, + VFE_DMI_CFG = 0x00000340, + VFE_DMI_ADDR = 0x00000344, + VFE_DMI_DATA_HI = 0x00000348, + VFE_DMI_DATA_LO = 0x0000034C, + VFE_DMI_RAM_AUTO_LOAD_CMD = 0x00000350, + VFE_DMI_RAM_AUTO_LOAD_STATUS = 0x00000354, + VFE_DMI_RAM_AUTO_LOAD_CFG = 0x00000358, + VFE_DMI_RAM_AUTO_LOAD_SEED = 0x0000035C, + VFE_TESTBUS_SEL = 0x00000360, + VFE_TESTGEN_CFG = 0x00000364, + VFE_SW_TESTGEN_CMD = 0x00000368, + VFE_HW_TESTGEN_CMD = 0x0000036C, + VFE_HW_TESTGEN_CFG = 0x00000370, + VFE_HW_TESTGEN_IMAGE_CFG = 0x00000374, + VFE_HW_TESTGEN_SOF_OFFSET_CFG = 0x00000378, + VFE_HW_TESTGEN_EOF_NOFFSET_CFG = 0x0000037C, + VFE_HW_TESTGEN_SOL_OFFSET_CFG = 0x00000380, + VFE_HW_TESTGEN_EOL_NOFFSET_CFG = 0x00000384, + VFE_HW_TESTGEN_HBI_CFG = 0x00000388, + VFE_HW_TESTGEN_VBL_CFG = 0x0000038C, + VFE_HW_TESTGEN_SOF_DUMMY_LINE_CFG2 = 0x00000390, + VFE_HW_TESTGEN_EOF_DUMMY_LINE_CFG2 = 0x00000394, + VFE_HW_TESTGEN_COLOR_BARS_CFG = 0x00000398, + VFE_HW_TESTGEN_RANDOM_CFG = 0x0000039C, + VFE_SPARE = 0x000003A0, +}; + +#define ping 0x0 +#define pong 0x1 + +struct vfe_bus_cfg_data { + boolean stripeRdPathEn; + boolean encYWrPathEn; + boolean encCbcrWrPathEn; + boolean viewYWrPathEn; + boolean viewCbcrWrPathEn; + enum VFE_RAW_PIXEL_DATA_SIZE rawPixelDataSize; + enum VFE_RAW_WR_PATH_SEL rawWritePathSelect; +}; + +struct vfe_camif_cfg_data { + boolean camif2OutputEnable; + boolean camif2BusEnable; + struct vfe_cmds_camif_cfg camifCfgFromCmd; +}; + +struct vfe_irq_composite_mask_config { + uint8_t encIrqComMask; + uint8_t viewIrqComMask; + uint8_t ceDoneSel; +}; + +/* define a structure for each output path.*/ +struct vfe_output_path { + uint32_t addressBuffer[8]; + uint16_t fragIndex; + boolean hwCurrentFlag; + uint8_t *hwRegPingAddress; + uint8_t *hwRegPongAddress; +}; + +struct vfe_output_path_combo { + boolean whichOutputPath; + boolean pathEnabled; + boolean multiFrag; + uint8_t fragCount; + boolean ackPending; + uint8_t currentFrame; + uint32_t nextFrameAddrBuf[8]; + struct vfe_output_path yPath; + struct vfe_output_path cbcrPath; + uint8_t snapshotPendingCount; + boolean pmEnabled; + uint8_t cbcrStatusBit; +}; + +struct vfe_stats_control { + boolean ackPending; + uint32_t addressBuffer[2]; + uint32_t nextFrameAddrBuf; + boolean pingPongStatus; + uint8_t *hwRegPingAddress; + uint8_t *hwRegPongAddress; + uint32_t droppedStatsFrameCount; + uint32_t bufToRender; +}; + +struct vfe_gamma_lut_sel { + boolean ch0BankSelect; + boolean ch1BankSelect; + boolean ch2BankSelect; +}; + +struct vfe_interrupt_mask { + boolean camifErrorIrq; + boolean camifSofIrq; + boolean camifEolIrq; + boolean camifEofIrq; + boolean camifEpoch1Irq; + boolean camifEpoch2Irq; + boolean camifOverflowIrq; + boolean ceIrq; + boolean regUpdateIrq; + boolean resetAckIrq; + boolean encYPingpongIrq; + boolean encCbcrPingpongIrq; + boolean viewYPingpongIrq; + boolean viewCbcrPingpongIrq; + boolean rdPingpongIrq; + boolean afPingpongIrq; + boolean awbPingpongIrq; + boolean histPingpongIrq; + boolean encIrq; + boolean viewIrq; + boolean busOverflowIrq; + boolean afOverflowIrq; + boolean awbOverflowIrq; + boolean syncTimer0Irq; + boolean syncTimer1Irq; + boolean syncTimer2Irq; + boolean asyncTimer0Irq; + boolean asyncTimer1Irq; + boolean asyncTimer2Irq; + boolean asyncTimer3Irq; + boolean axiErrorIrq; + boolean violationIrq; +}; + +enum vfe_interrupt_name { + CAMIF_ERROR_IRQ, + CAMIF_SOF_IRQ, + CAMIF_EOL_IRQ, + CAMIF_EOF_IRQ, + CAMIF_EPOCH1_IRQ, + CAMIF_EPOCH2_IRQ, + CAMIF_OVERFLOW_IRQ, + CE_IRQ, + REG_UPDATE_IRQ, + RESET_ACK_IRQ, + ENC_Y_PINGPONG_IRQ, + ENC_CBCR_PINGPONG_IRQ, + VIEW_Y_PINGPONG_IRQ, + VIEW_CBCR_PINGPONG_IRQ, + RD_PINGPONG_IRQ, + AF_PINGPONG_IRQ, + AWB_PINGPONG_IRQ, + HIST_PINGPONG_IRQ, + ENC_IRQ, + VIEW_IRQ, + BUS_OVERFLOW_IRQ, + AF_OVERFLOW_IRQ, + AWB_OVERFLOW_IRQ, + SYNC_TIMER0_IRQ, + SYNC_TIMER1_IRQ, + SYNC_TIMER2_IRQ, + ASYNC_TIMER0_IRQ, + ASYNC_TIMER1_IRQ, + ASYNC_TIMER2_IRQ, + ASYNC_TIMER3_IRQ, + AXI_ERROR_IRQ, + VIOLATION_IRQ +}; + +enum VFE_DMI_RAM_SEL { + NO_MEM_SELECTED = 0, + ROLLOFF_RAM = 0x1, + RGBLUT_RAM_CH0_BANK0 = 0x2, + RGBLUT_RAM_CH0_BANK1 = 0x3, + RGBLUT_RAM_CH1_BANK0 = 0x4, + RGBLUT_RAM_CH1_BANK1 = 0x5, + RGBLUT_RAM_CH2_BANK0 = 0x6, + RGBLUT_RAM_CH2_BANK1 = 0x7, + STATS_HIST_CB_EVEN_RAM = 0x8, + STATS_HIST_CB_ODD_RAM = 0x9, + STATS_HIST_CR_EVEN_RAM = 0xa, + STATS_HIST_CR_ODD_RAM = 0xb, + RGBLUT_CHX_BANK0 = 0xc, + RGBLUT_CHX_BANK1 = 0xd, + LUMA_ADAPT_LUT_RAM_BANK0 = 0xe, + LUMA_ADAPT_LUT_RAM_BANK1 = 0xf +}; + +struct vfe_module_enable { + boolean blackLevelCorrectionEnable; + boolean lensRollOffEnable; + boolean demuxEnable; + boolean chromaUpsampleEnable; + boolean demosaicEnable; + boolean statsEnable; + boolean cropEnable; + boolean mainScalerEnable; + boolean whiteBalanceEnable; + boolean colorCorrectionEnable; + boolean yHistEnable; + boolean skinToneEnable; + boolean lumaAdaptationEnable; + boolean rgbLUTEnable; + boolean chromaEnhanEnable; + boolean asfEnable; + boolean chromaSuppressionEnable; + boolean chromaSubsampleEnable; + boolean scaler2YEnable; + boolean scaler2CbcrEnable; +}; + +struct vfe_bus_cmd_data { + boolean stripeReload; + boolean busPingpongReload; + boolean statsPingpongReload; +}; + +struct vfe_stats_cmd_data { + boolean autoFocusEnable; + boolean axwEnable; + boolean histEnable; + boolean clearHistEnable; + boolean histAutoClearEnable; + boolean colorConversionEnable; +}; + +struct vfe_hw_ver { + uint32_t minorVersion:8; + uint32_t majorVersion:8; + uint32_t coreVersion:4; + uint32_t /* reserved */ : 12; +} __attribute__ ((packed, aligned(4))); + +struct vfe_cfg { + uint32_t pixelPattern:3; + uint32_t /* reserved */ : 13; + uint32_t inputSource:2; + uint32_t /* reserved */ : 14; +} __attribute__ ((packed, aligned(4))); + +struct vfe_buscmd { + uint32_t stripeReload:1; + uint32_t /* reserved */ : 3; + uint32_t busPingpongReload:1; + uint32_t statsPingpongReload:1; + uint32_t /* reserved */ : 26; +} __attribute__ ((packed, aligned(4))); + +struct VFE_Irq_Composite_MaskType { + uint32_t encIrqComMaskBits:2; + uint32_t viewIrqComMaskBits:2; + uint32_t ceDoneSelBits:5; + uint32_t /* reserved */ : 23; +} __attribute__ ((packed, aligned(4))); + +struct vfe_mod_enable { + uint32_t blackLevelCorrectionEnable:1; + uint32_t lensRollOffEnable:1; + uint32_t demuxEnable:1; + uint32_t chromaUpsampleEnable:1; + uint32_t demosaicEnable:1; + uint32_t statsEnable:1; + uint32_t cropEnable:1; + uint32_t mainScalerEnable:1; + uint32_t whiteBalanceEnable:1; + uint32_t colorCorrectionEnable:1; + uint32_t yHistEnable:1; + uint32_t skinToneEnable:1; + uint32_t lumaAdaptationEnable:1; + uint32_t rgbLUTEnable:1; + uint32_t chromaEnhanEnable:1; + uint32_t asfEnable:1; + uint32_t chromaSuppressionEnable:1; + uint32_t chromaSubsampleEnable:1; + uint32_t scaler2YEnable:1; + uint32_t scaler2CbcrEnable:1; + uint32_t /* reserved */ : 14; +} __attribute__ ((packed, aligned(4))); + +struct vfe_irqenable { + uint32_t camifErrorIrq:1; + uint32_t camifSofIrq:1; + uint32_t camifEolIrq:1; + uint32_t camifEofIrq:1; + uint32_t camifEpoch1Irq:1; + uint32_t camifEpoch2Irq:1; + uint32_t camifOverflowIrq:1; + uint32_t ceIrq:1; + uint32_t regUpdateIrq:1; + uint32_t resetAckIrq:1; + uint32_t encYPingpongIrq:1; + uint32_t encCbcrPingpongIrq:1; + uint32_t viewYPingpongIrq:1; + uint32_t viewCbcrPingpongIrq:1; + uint32_t rdPingpongIrq:1; + uint32_t afPingpongIrq:1; + uint32_t awbPingpongIrq:1; + uint32_t histPingpongIrq:1; + uint32_t encIrq:1; + uint32_t viewIrq:1; + uint32_t busOverflowIrq:1; + uint32_t afOverflowIrq:1; + uint32_t awbOverflowIrq:1; + uint32_t syncTimer0Irq:1; + uint32_t syncTimer1Irq:1; + uint32_t syncTimer2Irq:1; + uint32_t asyncTimer0Irq:1; + uint32_t asyncTimer1Irq:1; + uint32_t asyncTimer2Irq:1; + uint32_t asyncTimer3Irq:1; + uint32_t axiErrorIrq:1; + uint32_t violationIrq:1; +} __attribute__ ((packed, aligned(4))); + +struct vfe_upsample_cfg { + uint32_t chromaCositingForYCbCrInputs:1; + uint32_t /* reserved */ : 31; +} __attribute__ ((packed, aligned(4))); + +struct VFE_CAMIFConfigType { + /* CAMIF Config */ + uint32_t /* reserved */ : 1; + uint32_t VSyncEdge:1; + uint32_t HSyncEdge:1; + uint32_t syncMode:2; + uint32_t vfeSubsampleEnable:1; + uint32_t /* reserved */ : 1; + uint32_t busSubsampleEnable:1; + uint32_t camif2vfeEnable:1; + uint32_t /* reserved */ : 1; + uint32_t camif2busEnable:1; + uint32_t irqSubsampleEnable:1; + uint32_t binningEnable:1; + uint32_t /* reserved */ : 18; + uint32_t misrEnable:1; +} __attribute__ ((packed, aligned(4))); + +struct vfe_camifcfg { + /* EFS_Config */ + uint32_t efsEndOfLine:8; + uint32_t efsStartOfLine:8; + uint32_t efsEndOfFrame:8; + uint32_t efsStartOfFrame:8; + /* Frame Config */ + uint32_t frameConfigPixelsPerLine:14; + uint32_t /* reserved */ : 2; + uint32_t frameConfigLinesPerFrame:14; + uint32_t /* reserved */ : 2; + /* Window Width Config */ + uint32_t windowWidthCfgLastPixel:14; + uint32_t /* reserved */ : 2; + uint32_t windowWidthCfgFirstPixel:14; + uint32_t /* reserved */ : 2; + /* Window Height Config */ + uint32_t windowHeightCfglastLine:14; + uint32_t /* reserved */ : 2; + uint32_t windowHeightCfgfirstLine:14; + uint32_t /* reserved */ : 2; + /* Subsample 1 Config */ + uint32_t subsample1CfgPixelSkip:16; + uint32_t subsample1CfgLineSkip:16; + /* Subsample 2 Config */ + uint32_t subsample2CfgFrameSkip:4; + uint32_t subsample2CfgFrameSkipMode:1; + uint32_t subsample2CfgPixelSkipWrap:1; + uint32_t /* reserved */ : 26; + /* Epoch Interrupt */ + uint32_t epoch1Line:14; + uint32_t /* reserved */ : 2; + uint32_t epoch2Line:14; + uint32_t /* reserved */ : 2; +} __attribute__ ((packed, aligned(4))); + +struct vfe_epoch1cfg { + /* Epoch Interrupt */ + uint32_t epoch1Line:14; + uint32_t /* reserved */ : 2; +} __attribute__ ((packed, aligned(4))); + + +struct vfe_camifframe_update { + uint32_t pixelsPerLine:14; + uint32_t /* reserved */ : 2; + uint32_t linesPerFrame:14; + uint32_t /* reserved */ : 2; +} __attribute__ ((packed, aligned(4))); + +struct vfe_axi_bus_cfg { + uint32_t stripeRdPathEn:1; + uint32_t /* reserved */ : 3; + uint32_t encYWrPathEn:1; + uint32_t encCbcrWrPathEn:1; + uint32_t viewYWrPathEn:1; + uint32_t viewCbcrWrPathEn:1; + uint32_t rawPixelDataSize:2; + uint32_t rawWritePathSelect:2; + uint32_t /* reserved */ : 20; +} __attribute__ ((packed, aligned(4))); + +struct vfe_axi_out_cfg { + uint32_t out2YPingAddr:32; + uint32_t out2YPongAddr:32; + uint32_t out2YImageHeight:12; + uint32_t /* reserved */ : 4; + uint32_t out2YImageWidthin64bit:10; + uint32_t /* reserved */ : 6; + uint32_t out2YBurstLength:2; + uint32_t /* reserved */ : 2; + uint32_t out2YNumRows:12; + uint32_t out2YRowIncrementIn64bit:12; + uint32_t /* reserved */ : 4; + uint32_t out2CbcrPingAddr:32; + uint32_t out2CbcrPongAddr:32; + uint32_t out2CbcrImageHeight:12; + uint32_t /* reserved */ : 4; + uint32_t out2CbcrImageWidthIn64bit:10; + uint32_t /* reserved */ : 6; + uint32_t out2CbcrBurstLength:2; + uint32_t /* reserved */ : 2; + uint32_t out2CbcrNumRows:12; + uint32_t out2CbcrRowIncrementIn64bit:12; + uint32_t /* reserved */ : 4; + uint32_t out1YPingAddr:32; + uint32_t out1YPongAddr:32; + uint32_t out1YImageHeight:12; + uint32_t /* reserved */ : 4; + uint32_t out1YImageWidthin64bit:10; + uint32_t /* reserved */ : 6; + uint32_t out1YBurstLength:2; + uint32_t /* reserved */ : 2; + uint32_t out1YNumRows:12; + uint32_t out1YRowIncrementIn64bit:12; + uint32_t /* reserved */ : 4; + uint32_t out1CbcrPingAddr:32; + uint32_t out1CbcrPongAddr:32; + uint32_t out1CbcrImageHeight:12; + uint32_t /* reserved */ : 4; + uint32_t out1CbcrImageWidthIn64bit:10; + uint32_t /* reserved */ : 6; + uint32_t out1CbcrBurstLength:2; + uint32_t /* reserved */ : 2; + uint32_t out1CbcrNumRows:12; + uint32_t out1CbcrRowIncrementIn64bit:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_output_clamp_cfg { + /* Output Clamp Maximums */ + uint32_t yChanMax:8; + uint32_t cbChanMax:8; + uint32_t crChanMax:8; + uint32_t /* reserved */ : 8; + /* Output Clamp Minimums */ + uint32_t yChanMin:8; + uint32_t cbChanMin:8; + uint32_t crChanMin:8; + uint32_t /* reserved */ : 8; +} __attribute__ ((packed, aligned(4))); + +struct vfe_fov_crop_cfg { + uint32_t lastPixel:12; + uint32_t /* reserved */ : 4; + uint32_t firstPixel:12; + uint32_t /* reserved */ : 4; + + /* FOV Corp, Part 2 */ + uint32_t lastLine:12; + uint32_t /* reserved */ : 4; + uint32_t firstLine:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct VFE_FRAME_SKIP_UpdateCmdType { + uint32_t yPattern:32; + uint32_t cbcrPattern:32; +} __attribute__ ((packed, aligned(4))); + +struct vfe_frame_skip_cfg { + /* Frame Drop Enc (output2) */ + uint32_t output2YPeriod:5; + uint32_t /* reserved */ : 27; + uint32_t output2CbCrPeriod:5; + uint32_t /* reserved */ : 27; + uint32_t output2YPattern:32; + uint32_t output2CbCrPattern:32; + /* Frame Drop View (output1) */ + uint32_t output1YPeriod:5; + uint32_t /* reserved */ : 27; + uint32_t output1CbCrPeriod:5; + uint32_t /* reserved */ : 27; + uint32_t output1YPattern:32; + uint32_t output1CbCrPattern:32; +} __attribute__ ((packed, aligned(4))); + +struct vfe_main_scaler_cfg { + /* Scaler Enable Config */ + uint32_t hEnable:1; + uint32_t vEnable:1; + uint32_t /* reserved */ : 30; + /* Scale H Image Size Config */ + uint32_t inWidth:12; + uint32_t /* reserved */ : 4; + uint32_t outWidth:12; + uint32_t /* reserved */ : 4; + /* Scale H Phase Config */ + uint32_t horizPhaseMult:18; + uint32_t /* reserved */ : 2; + uint32_t horizInterResolution:2; + uint32_t /* reserved */ : 10; + /* Scale H Stripe Config */ + uint32_t horizMNInit:12; + uint32_t /* reserved */ : 4; + uint32_t horizPhaseInit:15; + uint32_t /* reserved */ : 1; + /* Scale V Image Size Config */ + uint32_t inHeight:12; + uint32_t /* reserved */ : 4; + uint32_t outHeight:12; + uint32_t /* reserved */ : 4; + /* Scale V Phase Config */ + uint32_t vertPhaseMult:18; + uint32_t /* reserved */ : 2; + uint32_t vertInterResolution:2; + uint32_t /* reserved */ : 10; + /* Scale V Stripe Config */ + uint32_t vertMNInit:12; + uint32_t /* reserved */ : 4; + uint32_t vertPhaseInit:15; + uint32_t /* reserved */ : 1; +} __attribute__ ((packed, aligned(4))); + +struct vfe_scaler2_cfg { + /* Scaler Enable Config */ + uint32_t hEnable:1; + uint32_t vEnable:1; + uint32_t /* reserved */ : 30; + /* Scaler H Image Size Config */ + uint32_t inWidth:12; + uint32_t /* reserved */ : 4; + uint32_t outWidth:12; + uint32_t /* reserved */ : 4; + /* Scaler H Phase Config */ + uint32_t horizPhaseMult:18; + uint32_t /* reserved */ : 2; + uint32_t horizInterResolution:2; + uint32_t /* reserved */ : 10; + /* Scaler V Image Size Config */ + uint32_t inHeight:12; + uint32_t /* reserved */ : 4; + uint32_t outHeight:12; + uint32_t /* reserved */ : 4; + /* Scaler V Phase Config */ + uint32_t vertPhaseMult:18; + uint32_t /* reserved */ : 2; + uint32_t vertInterResolution:2; + uint32_t /* reserved */ : 10; +} __attribute__ ((packed, aligned(4))); + +struct vfe_rolloff_cfg { + /* Rolloff 0 Config */ + uint32_t gridWidth:9; + uint32_t gridHeight:9; + uint32_t yDelta:9; + uint32_t /* reserved */ : 5; + /* Rolloff 1 Config */ + uint32_t gridX:4; + uint32_t gridY:4; + uint32_t pixelX:9; + uint32_t /* reserved */ : 3; + uint32_t pixelY:9; + uint32_t /* reserved */ : 3; + /* Rolloff 2 Config */ + uint32_t yDeltaAccum:12; + uint32_t /* reserved */ : 20; +} __attribute__ ((packed, aligned(4))); + +struct vfe_asf_update { + /* ASF Config Command */ + uint32_t smoothEnable:1; + uint32_t sharpMode:2; + uint32_t /* reserved */ : 1; + uint32_t smoothCoeff1:4; + uint32_t smoothCoeff0:8; + uint32_t pipeFlushCount:12; + uint32_t pipeFlushOvd:1; + uint32_t flushHaltOvd:1; + uint32_t cropEnable:1; + uint32_t /* reserved */ : 1; + /* Sharpening Config 0 */ + uint32_t sharpThresholdE1:7; + uint32_t /* reserved */ : 1; + uint32_t sharpDegreeK1:5; + uint32_t /* reserved */ : 3; + uint32_t sharpDegreeK2:5; + uint32_t /* reserved */ : 3; + uint32_t normalizeFactor:7; + uint32_t /* reserved */ : 1; + /* Sharpening Config 1 */ + uint32_t sharpThresholdE2:8; + uint32_t sharpThresholdE3:8; + uint32_t sharpThresholdE4:8; + uint32_t sharpThresholdE5:8; + /* Sharpening Coefficients 0 */ + uint32_t F1Coeff0:6; + uint32_t F1Coeff1:6; + uint32_t F1Coeff2:6; + uint32_t F1Coeff3:6; + uint32_t F1Coeff4:6; + uint32_t /* reserved */ : 2; + /* Sharpening Coefficients 1 */ + uint32_t F1Coeff5:6; + uint32_t F1Coeff6:6; + uint32_t F1Coeff7:6; + uint32_t F1Coeff8:7; + uint32_t /* reserved */ : 7; + /* Sharpening Coefficients 2 */ + uint32_t F2Coeff0:6; + uint32_t F2Coeff1:6; + uint32_t F2Coeff2:6; + uint32_t F2Coeff3:6; + uint32_t F2Coeff4:6; + uint32_t /* reserved */ : 2; + /* Sharpening Coefficients 3 */ + uint32_t F2Coeff5:6; + uint32_t F2Coeff6:6; + uint32_t F2Coeff7:6; + uint32_t F2Coeff8:7; + uint32_t /* reserved */ : 7; +} __attribute__ ((packed, aligned(4))); + +struct vfe_asfcrop_cfg { + /* ASF Crop Width Config */ + uint32_t lastPixel:12; + uint32_t /* reserved */ : 4; + uint32_t firstPixel:12; + uint32_t /* reserved */ : 4; + /* ASP Crop Height Config */ + uint32_t lastLine:12; + uint32_t /* reserved */ : 4; + uint32_t firstLine:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_chroma_suppress_cfg { + /* Chroma Suppress 0 Config */ + uint32_t m1:8; + uint32_t m3:8; + uint32_t n1:3; + uint32_t /* reserved */ : 1; + uint32_t n3:3; + uint32_t /* reserved */ : 9; + /* Chroma Suppress 1 Config */ + uint32_t mm1:8; + uint32_t nn1:3; + uint32_t /* reserved */ : 21; +} __attribute__ ((packed, aligned(4))); + +struct vfe_chromasubsample_cfg { + /* Chroma Subsample Selection */ + uint32_t hCositedPhase:1; + uint32_t vCositedPhase:1; + uint32_t hCosited:1; + uint32_t vCosited:1; + uint32_t hsubSampleEnable:1; + uint32_t vsubSampleEnable:1; + uint32_t cropEnable:1; + uint32_t /* reserved */ : 25; + uint32_t cropWidthLastPixel:12; + uint32_t /* reserved */ : 4; + uint32_t cropWidthFirstPixel:12; + uint32_t /* reserved */ : 4; + uint32_t cropHeightLastLine:12; + uint32_t /* reserved */ : 4; + uint32_t cropHeightFirstLine:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_blacklevel_cfg { + /* Black Even-Even Value Config */ + uint32_t evenEvenAdjustment:9; + uint32_t /* reserved */ : 23; + /* Black Even-Odd Value Config */ + uint32_t evenOddAdjustment:9; + uint32_t /* reserved */ : 23; + /* Black Odd-Even Value Config */ + uint32_t oddEvenAdjustment:9; + uint32_t /* reserved */ : 23; + /* Black Odd-Odd Value Config */ + uint32_t oddOddAdjustment:9; + uint32_t /* reserved */ : 23; +} __attribute__ ((packed, aligned(4))); + +struct vfe_demux_cfg { + /* Demux Gain 0 Config */ + uint32_t ch0EvenGain:10; + uint32_t /* reserved */ : 6; + uint32_t ch0OddGain:10; + uint32_t /* reserved */ : 6; + /* Demux Gain 1 Config */ + uint32_t ch1Gain:10; + uint32_t /* reserved */ : 6; + uint32_t ch2Gain:10; + uint32_t /* reserved */ : 6; +} __attribute__ ((packed, aligned(4))); + +struct vfe_bps_info { + uint32_t greenBadPixelCount:8; + uint32_t /* reserved */ : 8; + uint32_t RedBlueBadPixelCount:8; + uint32_t /* reserved */ : 8; +} __attribute__ ((packed, aligned(4))); + +struct vfe_demosaic_cfg { + /* Demosaic Config */ + uint32_t abfEnable:1; + uint32_t badPixelCorrEnable:1; + uint32_t forceAbfOn:1; + uint32_t /* reserved */ : 1; + uint32_t abfShift:4; + uint32_t fminThreshold:7; + uint32_t /* reserved */ : 1; + uint32_t fmaxThreshold:7; + uint32_t /* reserved */ : 5; + uint32_t slopeShift:3; + uint32_t /* reserved */ : 1; +} __attribute__ ((packed, aligned(4))); + +struct vfe_demosaic_bpc_cfg { + /* Demosaic BPC Config 0 */ + uint32_t blueDiffThreshold:12; + uint32_t redDiffThreshold:12; + uint32_t /* reserved */ : 8; + /* Demosaic BPC Config 1 */ + uint32_t greenDiffThreshold:12; + uint32_t /* reserved */ : 20; +} __attribute__ ((packed, aligned(4))); + +struct vfe_demosaic_abf_cfg { + /* Demosaic ABF Config 0 */ + uint32_t lpThreshold:10; + uint32_t /* reserved */ : 22; + /* Demosaic ABF Config 1 */ + uint32_t ratio:4; + uint32_t minValue:10; + uint32_t /* reserved */ : 2; + uint32_t maxValue:10; + uint32_t /* reserved */ : 6; +} __attribute__ ((packed, aligned(4))); + +struct vfe_color_correction_cfg { + /* Color Corr. Coefficient 0 Config */ + uint32_t c0:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 1 Config */ + uint32_t c1:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 2 Config */ + uint32_t c2:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 3 Config */ + uint32_t c3:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 4 Config */ + uint32_t c4:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 5 Config */ + uint32_t c5:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 6 Config */ + uint32_t c6:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 7 Config */ + uint32_t c7:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Coefficient 8 Config */ + uint32_t c8:12; + uint32_t /* reserved */ : 20; + /* Color Corr. Offset 0 Config */ + uint32_t k0:11; + uint32_t /* reserved */ : 21; + /* Color Corr. Offset 1 Config */ + uint32_t k1:11; + uint32_t /* reserved */ : 21; + /* Color Corr. Offset 2 Config */ + uint32_t k2:11; + uint32_t /* reserved */ : 21; + /* Color Corr. Coefficient Q Config */ + uint32_t coefQFactor:2; + uint32_t /* reserved */ : 30; +} __attribute__ ((packed, aligned(4))); + +struct VFE_LumaAdaptation_ConfigCmdType { + /* LA Config */ + uint32_t lutBankSelect:1; + uint32_t /* reserved */ : 31; +} __attribute__ ((packed, aligned(4))); + +struct vfe_wb_cfg { + /* WB Config */ + uint32_t ch0Gain:9; + uint32_t ch1Gain:9; + uint32_t ch2Gain:9; + uint32_t /* reserved */ : 5; +} __attribute__ ((packed, aligned(4))); + +struct VFE_GammaLutSelect_ConfigCmdType { + /* LUT Bank Select Config */ + uint32_t ch0BankSelect:1; + uint32_t ch1BankSelect:1; + uint32_t ch2BankSelect:1; + uint32_t /* reserved */ : 29; +} __attribute__ ((packed, aligned(4))); + +struct vfe_chroma_enhance_cfg { + /* Chroma Enhance A Config */ + uint32_t ap:11; + uint32_t /* reserved */ : 5; + uint32_t am:11; + uint32_t /* reserved */ : 5; + /* Chroma Enhance B Config */ + uint32_t bp:11; + uint32_t /* reserved */ : 5; + uint32_t bm:11; + uint32_t /* reserved */ : 5; + /* Chroma Enhance C Config */ + uint32_t cp:11; + uint32_t /* reserved */ : 5; + uint32_t cm:11; + uint32_t /* reserved */ : 5; + /* Chroma Enhance D Config */ + uint32_t dp:11; + uint32_t /* reserved */ : 5; + uint32_t dm:11; + uint32_t /* reserved */ : 5; + /* Chroma Enhance K Config */ + uint32_t kcb:11; + uint32_t /* reserved */ : 5; + uint32_t kcr:11; + uint32_t /* reserved */ : 5; +} __attribute__ ((packed, aligned(4))); + +struct vfe_color_convert_cfg { + /* Conversion Coefficient 0 */ + uint32_t v0:12; + uint32_t /* reserved */ : 20; + /* Conversion Coefficient 1 */ + uint32_t v1:12; + uint32_t /* reserved */ : 20; + /* Conversion Coefficient 2 */ + uint32_t v2:12; + uint32_t /* reserved */ : 20; + /* Conversion Offset */ + uint32_t ConvertOffset:8; + uint32_t /* reserved */ : 24; +} __attribute__ ((packed, aligned(4))); + +struct VFE_SyncTimer_ConfigCmdType { + /* Timer Line Start Config */ + uint32_t timerLineStart:12; + uint32_t /* reserved */ : 20; + /* Timer Pixel Start Config */ + uint32_t timerPixelStart:18; + uint32_t /* reserved */ : 14; + /* Timer Pixel Duration Config */ + uint32_t timerPixelDuration:28; + uint32_t /* reserved */ : 4; + /* Sync Timer Polarity Config */ + uint32_t timer0Polarity:1; + uint32_t timer1Polarity:1; + uint32_t timer2Polarity:1; + uint32_t /* reserved */ : 29; +} __attribute__ ((packed, aligned(4))); + +struct VFE_AsyncTimer_ConfigCmdType { + /* Async Timer Config 0 */ + uint32_t inactiveLength:20; + uint32_t numRepetition:10; + uint32_t /* reserved */ : 1; + uint32_t polarity:1; + /* Async Timer Config 1 */ + uint32_t activeLength:20; + uint32_t /* reserved */ : 12; +} __attribute__ ((packed, aligned(4))); + +struct VFE_AWBAEStatistics_ConfigCmdType { + /* AWB autoexposure Config */ + uint32_t aeRegionConfig:1; + uint32_t aeSubregionConfig:1; + uint32_t /* reserved */ : 14; + uint32_t awbYMin:8; + uint32_t awbYMax:8; + /* AXW Header */ + uint32_t axwHeader:8; + uint32_t /* reserved */ : 24; + /* AWB Mconfig */ + uint32_t m4:8; + uint32_t m3:8; + uint32_t m2:8; + uint32_t m1:8; + /* AWB Cconfig */ + uint32_t c2:12; + uint32_t /* reserved */ : 4; + uint32_t c1:12; + uint32_t /* reserved */ : 4; + /* AWB Cconfig 2 */ + uint32_t c4:12; + uint32_t /* reserved */ : 4; + uint32_t c3:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct VFE_TestGen_ConfigCmdType { + /* HW Test Gen Config */ + uint32_t numFrame:10; + uint32_t /* reserved */ : 2; + uint32_t pixelDataSelect:1; + uint32_t systematicDataSelect:1; + uint32_t /* reserved */ : 2; + uint32_t pixelDataSize:2; + uint32_t hsyncEdge:1; + uint32_t vsyncEdge:1; + uint32_t /* reserved */ : 12; + /* HW Test Gen Image Config */ + uint32_t imageWidth:14; + uint32_t /* reserved */ : 2; + uint32_t imageHeight:14; + uint32_t /* reserved */ : 2; + /* SOF Offset Config */ + uint32_t sofOffset:24; + uint32_t /* reserved */ : 8; + /* EOF NOffset Config */ + uint32_t eofNOffset:24; + uint32_t /* reserved */ : 8; + /* SOL Offset Config */ + uint32_t solOffset:9; + uint32_t /* reserved */ : 23; + /* EOL NOffset Config */ + uint32_t eolNOffset:9; + uint32_t /* reserved */ : 23; + /* HBI Config */ + uint32_t hBlankInterval:14; + uint32_t /* reserved */ : 18; + /* VBL Config */ + uint32_t vBlankInterval:14; + uint32_t /* reserved */ : 2; + uint32_t vBlankIntervalEnable:1; + uint32_t /* reserved */ : 15; + /* SOF Dummy Line Config */ + uint32_t sofDummy:8; + uint32_t /* reserved */ : 24; + /* EOF Dummy Line Config */ + uint32_t eofDummy:8; + uint32_t /* reserved */ : 24; + /* Color Bars Config */ + uint32_t unicolorBarSelect:3; + uint32_t /* reserved */ : 1; + uint32_t unicolorBarEnable:1; + uint32_t splitEnable:1; + uint32_t pixelPattern:2; + uint32_t rotatePeriod:6; + uint32_t /* reserved */ : 18; + /* Random Config */ + uint32_t randomSeed:16; + uint32_t /* reserved */ : 16; +} __attribute__ ((packed, aligned(4))); + +struct VFE_Bus_Pm_ConfigCmdType { + /* VFE Bus Performance Monitor Config */ + uint32_t output2YWrPmEnable:1; + uint32_t output2CbcrWrPmEnable:1; + uint32_t output1YWrPmEnable:1; + uint32_t output1CbcrWrPmEnable:1; + uint32_t /* reserved */ : 28; +} __attribute__ ((packed, aligned(4))); + +struct vfe_asf_info { + /* asf max edge */ + uint32_t maxEdge:13; + uint32_t /* reserved */ : 3; + /* HBi count */ + uint32_t HBICount:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_camif_stats { + uint32_t pixelCount:14; + uint32_t /* reserved */ : 2; + uint32_t lineCount:14; + uint32_t /* reserved */ : 1; + uint32_t camifHalt:1; +} __attribute__ ((packed, aligned(4))); + +struct VFE_StatsCmdType { + uint32_t autoFocusEnable:1; + uint32_t axwEnable:1; + uint32_t histEnable:1; + uint32_t clearHistEnable:1; + uint32_t histAutoClearEnable:1; + uint32_t colorConversionEnable:1; + uint32_t /* reserved */ : 26; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsframe { + uint32_t lastPixel:12; + uint32_t /* reserved */ : 4; + uint32_t lastLine:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_busstats_wrprio { + uint32_t afBusPriority:4; + uint32_t awbBusPriority:4; + uint32_t histBusPriority:4; + uint32_t afBusPriorityEn:1; + uint32_t awbBusPriorityEn:1; + uint32_t histBusPriorityEn:1; + uint32_t /* reserved */ : 17; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsaf_update { + /* VFE_STATS_AF_CFG */ + uint32_t windowVOffset:12; + uint32_t /* reserved */ : 4; + uint32_t windowHOffset:12; + uint32_t /* reserved */ : 3; + uint32_t windowMode:1; + + /* VFE_STATS_AF_DIM */ + uint32_t windowHeight:12; + uint32_t /* reserved */ : 4; + uint32_t windowWidth:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsaf_cfg { + /* VFE_STATS_AF_GRID_0 */ + uint32_t entry00:8; + uint32_t entry01:8; + uint32_t entry02:8; + uint32_t entry03:8; + + /* VFE_STATS_AF_GRID_1 */ + uint32_t entry10:8; + uint32_t entry11:8; + uint32_t entry12:8; + uint32_t entry13:8; + + /* VFE_STATS_AF_GRID_2 */ + uint32_t entry20:8; + uint32_t entry21:8; + uint32_t entry22:8; + uint32_t entry23:8; + + /* VFE_STATS_AF_GRID_3 */ + uint32_t entry30:8; + uint32_t entry31:8; + uint32_t entry32:8; + uint32_t entry33:8; + + /* VFE_STATS_AF_HEADER */ + uint32_t afHeader:8; + uint32_t /* reserved */ : 24; + /* VFE_STATS_AF_COEF0 */ + uint32_t a00:5; + uint32_t a04:5; + uint32_t fvMax:11; + uint32_t fvMetric:1; + uint32_t /* reserved */ : 10; + + /* VFE_STATS_AF_COEF1 */ + uint32_t a20:5; + uint32_t a21:5; + uint32_t a22:5; + uint32_t a23:5; + uint32_t a24:5; + uint32_t /* reserved */ : 7; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsawbae_update { + uint32_t aeRegionCfg:1; + uint32_t aeSubregionCfg:1; + uint32_t /* reserved */ : 14; + uint32_t awbYMin:8; + uint32_t awbYMax:8; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsaxw_hdr_cfg { + /* Stats AXW Header Config */ + uint32_t axwHeader:8; + uint32_t /* reserved */ : 24; +} __attribute__ ((packed, aligned(4))); + +struct vfe_statsawb_update { + /* AWB MConfig */ + uint32_t m4:8; + uint32_t m3:8; + uint32_t m2:8; + uint32_t m1:8; + + /* AWB CConfig1 */ + uint32_t c2:12; + uint32_t /* reserved */ : 4; + uint32_t c1:12; + uint32_t /* reserved */ : 4; + + /* AWB CConfig2 */ + uint32_t c4:12; + uint32_t /* reserved */ : 4; + uint32_t c3:12; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct VFE_SyncTimerCmdType { + uint32_t hsyncCount:12; + uint32_t /* reserved */ : 20; + uint32_t pclkCount:18; + uint32_t /* reserved */ : 14; + uint32_t outputDuration:28; + uint32_t /* reserved */ : 4; +} __attribute__ ((packed, aligned(4))); + +struct VFE_AsyncTimerCmdType { + /* config 0 */ + uint32_t inactiveCount:20; + uint32_t repeatCount:10; + uint32_t /* reserved */ : 1; + uint32_t polarity:1; + /* config 1 */ + uint32_t activeCount:20; + uint32_t /* reserved */ : 12; +} __attribute__ ((packed, aligned(4))); + +struct VFE_AxiInputCmdType { + uint32_t stripeStartAddr0:32; + uint32_t stripeStartAddr1:32; + uint32_t stripeStartAddr2:32; + uint32_t stripeStartAddr3:32; + + uint32_t ySize:12; + uint32_t yOffsetDelta:12; + uint32_t /* reserved */ : 8; + + /* bus_stripe_rd_hSize */ + uint32_t /* reserved */ : 16; + uint32_t xSizeWord:10; + uint32_t /* reserved */ : 6; + + /* bus_stripe_rd_buffer_cfg */ + uint32_t burstLength:2; + uint32_t /* reserved */ : 2; + uint32_t NumOfRows:12; + uint32_t RowIncrement:12; + uint32_t /* reserved */ : 4; + + /* bus_stripe_rd_unpack_cfg */ + uint32_t mainUnpackHeight:12; + uint32_t mainUnpackWidth:13; + uint32_t mainUnpackHbiSel:3; + uint32_t mainUnpackPhase:3; + uint32_t /* reserved */ : 1; + + /* bus_stripe_rd_unpack */ + uint32_t unpackPattern:32; + + /* bus_stripe_rd_pad_size */ + uint32_t padLeft:7; + uint32_t /* reserved */ : 1; + uint32_t padRight:7; + uint32_t /* reserved */ : 1; + uint32_t padTop:7; + uint32_t /* reserved */ : 1; + uint32_t padBottom:7; + uint32_t /* reserved */ : 1; + + /* bus_stripe_rd_pad_L_unpack */ + uint32_t leftUnpackPattern0:4; + uint32_t leftUnpackPattern1:4; + uint32_t leftUnpackPattern2:4; + uint32_t leftUnpackPattern3:4; + uint32_t leftUnpackStop0:1; + uint32_t leftUnpackStop1:1; + uint32_t leftUnpackStop2:1; + uint32_t leftUnpackStop3:1; + uint32_t /* reserved */ : 12; + + /* bus_stripe_rd_pad_R_unpack */ + uint32_t rightUnpackPattern0:4; + uint32_t rightUnpackPattern1:4; + uint32_t rightUnpackPattern2:4; + uint32_t rightUnpackPattern3:4; + uint32_t rightUnpackStop0:1; + uint32_t rightUnpackStop1:1; + uint32_t rightUnpackStop2:1; + uint32_t rightUnpackStop3:1; + uint32_t /* reserved */ : 12; + + /* bus_stripe_rd_pad_tb_unpack */ + uint32_t topUnapckPattern:4; + uint32_t /* reserved */ : 12; + uint32_t bottomUnapckPattern:4; + uint32_t /* reserved */ : 12; +} __attribute__ ((packed, aligned(4))); + +struct VFE_AxiRdFragIrqEnable { + uint32_t stripeRdFragirq0Enable:1; + uint32_t stripeRdFragirq1Enable:1; + uint32_t stripeRdFragirq2Enable:1; + uint32_t stripeRdFragirq3Enable:1; + uint32_t /* reserved */ : 28; +} __attribute__ ((packed, aligned(4))); + +int vfe_cmd_init(struct msm_vfe_callback *, struct platform_device *, void *); +void vfe_stats_af_stop(void); +void vfe_stop(void); +void vfe_update(void); +int vfe_rgb_gamma_update(struct vfe_cmd_rgb_gamma_config *); +int vfe_rgb_gamma_config(struct vfe_cmd_rgb_gamma_config *); +void vfe_stats_wb_exp_ack(struct vfe_cmd_stats_wb_exp_ack *); +void vfe_stats_af_ack(struct vfe_cmd_stats_af_ack *); +void vfe_start(struct vfe_cmd_start *); +void vfe_la_update(struct vfe_cmd_la_config *); +void vfe_la_config(struct vfe_cmd_la_config *); +void vfe_test_gen_start(struct vfe_cmd_test_gen_start *); +void vfe_frame_skip_update(struct vfe_cmd_frame_skip_update *); +void vfe_frame_skip_config(struct vfe_cmd_frame_skip_config *); +void vfe_output_clamp_config(struct vfe_cmd_output_clamp_config *); +void vfe_camif_frame_update(struct vfe_cmds_camif_frame *); +void vfe_color_correction_config(struct vfe_cmd_color_correction_config *); +void vfe_demosaic_abf_update(struct vfe_cmd_demosaic_abf_update *); +void vfe_demosaic_bpc_update(struct vfe_cmd_demosaic_bpc_update *); +void vfe_demosaic_config(struct vfe_cmd_demosaic_config *); +void vfe_demux_channel_gain_update(struct vfe_cmd_demux_channel_gain_config *); +void vfe_demux_channel_gain_config(struct vfe_cmd_demux_channel_gain_config *); +void vfe_black_level_update(struct vfe_cmd_black_level_config *); +void vfe_black_level_config(struct vfe_cmd_black_level_config *); +void vfe_asf_update(struct vfe_cmd_asf_update *); +void vfe_asf_config(struct vfe_cmd_asf_config *); +void vfe_white_balance_config(struct vfe_cmd_white_balance_config *); +void vfe_chroma_sup_config(struct vfe_cmd_chroma_suppression_config *); +void vfe_roll_off_config(struct vfe_cmd_roll_off_config *); +void vfe_chroma_subsample_config(struct vfe_cmd_chroma_subsample_config *); +void vfe_chroma_enhan_config(struct vfe_cmd_chroma_enhan_config *); +void vfe_scaler2cbcr_config(struct vfe_cmd_scaler2_config *); +void vfe_scaler2y_config(struct vfe_cmd_scaler2_config *); +void vfe_main_scaler_config(struct vfe_cmd_main_scaler_config *); +void vfe_stats_wb_exp_stop(void); +void vfe_stats_update_wb_exp(struct vfe_cmd_stats_wb_exp_update *); +void vfe_stats_update_af(struct vfe_cmd_stats_af_update *); +void vfe_stats_start_wb_exp(struct vfe_cmd_stats_wb_exp_start *); +void vfe_stats_start_af(struct vfe_cmd_stats_af_start *); +void vfe_stats_setting(struct vfe_cmd_stats_setting *); +void vfe_axi_input_config(struct vfe_cmd_axi_input_config *); +void vfe_stats_config(struct vfe_cmd_stats_setting *); +void vfe_axi_output_config(struct vfe_cmd_axi_output_config *); +void vfe_camif_config(struct vfe_cmd_camif_config *); +void vfe_fov_crop_config(struct vfe_cmd_fov_crop_config *); +void vfe_get_hw_version(struct vfe_cmd_hw_version *); +void vfe_reset(void); +void vfe_cmd_release(struct platform_device *); +#ifndef CONFIG_720P_CAMERA +void vfe_output1_ack(struct vfe_cmd_output_ack *); +void vfe_output2_ack(struct vfe_cmd_output_ack *); +#else +void vfe_output_p_ack(struct vfe_cmd_output_ack *); +void vfe_output_v_ack(struct vfe_cmd_output_ack *); +#endif +#endif /* __MSM_VFE8X_REG_H__ */ diff --git a/drivers/media/video/msm/mt9d112.c b/drivers/media/video/msm/mt9d112.c new file mode 100644 index 0000000000000..868445f1e04c9 --- /dev/null +++ b/drivers/media/video/msm/mt9d112.c @@ -0,0 +1,737 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt9d112.h" + +/* Micron MT9D112 Registers and their values */ +/* Sensor Core Registers */ +#define REG_MT9D112_MODEL_ID 0x3000 +#define MT9D112_MODEL_ID 0x1580 + +/* SOC Registers Page 1 */ +#define REG_MT9D112_SENSOR_RESET 0x301A +#define REG_MT9D112_STANDBY_CONTROL 0x3202 +#define REG_MT9D112_MCU_BOOT 0x3386 + +struct mt9d112_work { + struct work_struct work; +}; + +static struct mt9d112_work *mt9d112_sensorw; +static struct i2c_client *mt9d112_client; + +struct mt9d112_ctrl { + const struct msm_camera_sensor_info *sensordata; +}; + +static struct mt9d112_ctrl *mt9d112_ctrl; + +static DECLARE_WAIT_QUEUE_HEAD(mt9d112_wait_queue); + +static int mt9d112_reset(const struct msm_camera_sensor_info *dev) +{ + int rc = 0; + + rc = gpio_request(dev->sensor_reset, "mt9d112"); + + if (!rc) { + rc = gpio_direction_output(dev->sensor_reset, 0); + mdelay(20); + rc = gpio_direction_output(dev->sensor_reset, 1); + } + + gpio_free(dev->sensor_reset); + return rc; +} + +static int mt9d112_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + if (i2c_transfer(mt9d112_client->adapter, msg, 1) < 0) { + CDBG("mt9d112_i2c_txdata failed\n"); + return -EIO; + } + + return 0; +} + +static int mt9d112_i2c_write(unsigned short saddr, + unsigned short waddr, unsigned short wdata, + enum mt9d112_width width) +{ + int rc = -EIO; + unsigned char buf[4]; + + memset(buf, 0, sizeof(buf)); + switch (width) { + case WORD_LEN:{ + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = (wdata & 0xFF00) >> 8; + buf[3] = (wdata & 0x00FF); + + rc = mt9d112_i2c_txdata(saddr, buf, 4); + } + break; + + case BYTE_LEN:{ + buf[0] = waddr; + buf[1] = wdata; + rc = mt9d112_i2c_txdata(saddr, buf, 2); + } + break; + + default: + break; + } + + if (rc < 0) + CDBG("i2c_write failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int mt9d112_i2c_write_table(struct mt9d112_i2c_reg_conf const + *reg_conf_tbl, int num_of_items_in_table) +{ + int i; + int rc = -EIO; + + for (i = 0; i < num_of_items_in_table; i++) { + rc = mt9d112_i2c_write(mt9d112_client->addr, + reg_conf_tbl->waddr, reg_conf_tbl->wdata, + reg_conf_tbl->width); + if (rc < 0) + break; + if (reg_conf_tbl->mdelay_time != 0) + mdelay(reg_conf_tbl->mdelay_time); + reg_conf_tbl++; + } + + return rc; +} + +static int mt9d112_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer(mt9d112_client->adapter, msgs, 2) < 0) { + CDBG("mt9d112_i2c_rxdata failed!\n"); + return -EIO; + } + + return 0; +} + +static int mt9d112_i2c_read(unsigned short saddr, + unsigned short raddr, unsigned short *rdata, + enum mt9d112_width width) +{ + int rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + switch (width) { + case WORD_LEN:{ + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); + + rc = mt9d112_i2c_rxdata(saddr, buf, 2); + if (rc < 0) + return rc; + + *rdata = buf[0] << 8 | buf[1]; + } + break; + + default: + break; + } + + if (rc < 0) + CDBG("mt9d112_i2c_read failed!\n"); + + return rc; +} + +static int mt9d112_set_lens_roll_off(void) +{ + int rc = 0; + rc = mt9d112_i2c_write_table(&mt9d112_regs.rftbl[0], + mt9d112_regs.rftbl_size); + return rc; +} + +static int mt9d112_reg_init(void) +{ + int array_length; + int i; + int rc; + + /* PLL Setup Start */ + rc = mt9d112_i2c_write_table(&mt9d112_regs.plltbl[0], + mt9d112_regs.plltbl_size); + + if (rc < 0) + return rc; + /* PLL Setup End */ + + array_length = mt9d112_regs.prev_snap_reg_settings_size; + + /* Configure sensor for Preview mode and Snapshot mode */ + for (i = 0; i < array_length; i++) { + rc = mt9d112_i2c_write(mt9d112_client->addr, + mt9d112_regs.prev_snap_reg_settings[i]. + register_address, + mt9d112_regs.prev_snap_reg_settings[i]. + register_value, WORD_LEN); + + if (rc < 0) + return rc; + } + + /* Configure for Noise Reduction, Saturation and Aperture Correction */ + array_length = mt9d112_regs.noise_reduction_reg_settings_size; + + for (i = 0; i < array_length; i++) { + rc = mt9d112_i2c_write(mt9d112_client->addr, + mt9d112_regs. + noise_reduction_reg_settings[i]. + register_address, + mt9d112_regs. + noise_reduction_reg_settings[i]. + register_value, WORD_LEN); + + if (rc < 0) + return rc; + } + + /* Set Color Kill Saturation point to optimum value */ + rc = mt9d112_i2c_write(mt9d112_client->addr, 0x35A4, 0x0593, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write_table(&mt9d112_regs.stbl[0], + mt9d112_regs.stbl_size); + if (rc < 0) + return rc; + + rc = mt9d112_set_lens_roll_off(); + if (rc < 0) + return rc; + + return 0; +} + +static int mt9d112_set_sensor_mode(int mode) +{ + uint16_t clock; + int rc = 0; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA20C, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0004, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA215, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0004, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA20B, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0000, WORD_LEN); + if (rc < 0) + return rc; + + clock = 0x0250; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x341C, clock, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA103, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0001, WORD_LEN); + if (rc < 0) + return rc; + + mdelay(5); + break; + + case SENSOR_SNAPSHOT_MODE: + /* Switch to lower fps for Snapshot */ + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x341C, 0x0120, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA120, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0002, WORD_LEN); + if (rc < 0) + return rc; + + mdelay(5); + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, 0xA103, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, 0x0002, WORD_LEN); + if (rc < 0) + return rc; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int mt9d112_set_effect(int mode, int effect) +{ + uint16_t reg_addr; + uint16_t reg_val; + int rc = 0; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + /* Context A Special Effects */ + reg_addr = 0x2799; + break; + + case SENSOR_SNAPSHOT_MODE: + /* Context B Special Effects */ + reg_addr = 0x279B; + break; + + default: + reg_addr = 0x2799; + break; + } + + switch (effect) { + case CAMERA_EFFECT_OFF:{ + reg_val = 0x6440; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + } + break; + + case CAMERA_EFFECT_MONO:{ + reg_val = 0x6441; + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + } + break; + + case CAMERA_EFFECT_NEGATIVE:{ + reg_val = 0x6443; + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + } + break; + + case CAMERA_EFFECT_SOLARIZE:{ + reg_val = 0x6445; + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + } + break; + + case CAMERA_EFFECT_SEPIA:{ + reg_val = 0x6442; + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + } + break; + + case CAMERA_EFFECT_PASTEL: + case CAMERA_EFFECT_MOSAIC: + case CAMERA_EFFECT_RESIZE: + return -EINVAL; + + default:{ + reg_val = 0x6440; + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x338C, reg_addr, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + 0x3390, reg_val, WORD_LEN); + if (rc < 0) + return rc; + + return -EINVAL; + } + } + + /* Refresh Sequencer */ + rc = mt9d112_i2c_write(mt9d112_client->addr, 0x338C, 0xA103, WORD_LEN); + if (rc < 0) + return rc; + + rc = mt9d112_i2c_write(mt9d112_client->addr, 0x3390, 0x0005, WORD_LEN); + + return rc; +} + +static int mt9d112_sensor_init_probe(const struct msm_camera_sensor_info *data) +{ + uint16_t model_id = 0; + int rc = 0; + + CDBG("init entry \n"); + rc = mt9d112_reset(data); + if (rc < 0) { + CDBG("reset failed!\n"); + goto init_probe_fail; + } + + mdelay(5); + + /* Micron suggested Power up block Start: + * Put MCU into Reset - Stop MCU */ + rc = mt9d112_i2c_write(mt9d112_client->addr, + REG_MT9D112_MCU_BOOT, 0x0501, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + /* Pull MCU from Reset - Start MCU */ + rc = mt9d112_i2c_write(mt9d112_client->addr, + REG_MT9D112_MCU_BOOT, 0x0500, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + mdelay(5); + + /* Micron Suggested - Power up block */ + rc = mt9d112_i2c_write(mt9d112_client->addr, + REG_MT9D112_SENSOR_RESET, 0x0ACC, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + rc = mt9d112_i2c_write(mt9d112_client->addr, + REG_MT9D112_STANDBY_CONTROL, 0x0008, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + /* FUSED_DEFECT_CORRECTION */ + rc = mt9d112_i2c_write(mt9d112_client->addr, 0x33F4, 0x031D, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + mdelay(5); + + /* Micron suggested Power up block End */ + /* Read the Model ID of the sensor */ + rc = mt9d112_i2c_read(mt9d112_client->addr, + REG_MT9D112_MODEL_ID, &model_id, WORD_LEN); + if (rc < 0) + goto init_probe_fail; + + CDBG("mt9d112 model_id = 0x%x\n", model_id); + + /* Check if it matches it with the value in Datasheet */ + if (model_id != MT9D112_MODEL_ID) { + rc = -EINVAL; + goto init_probe_fail; + } + + rc = mt9d112_reg_init(); + if (rc < 0) + goto init_probe_fail; + + return rc; + +init_probe_fail: + return rc; +} + +int mt9d112_sensor_init(const struct msm_camera_sensor_info *data) +{ + int rc = 0; + + mt9d112_ctrl = kzalloc(sizeof(struct mt9d112_ctrl), GFP_KERNEL); + if (!mt9d112_ctrl) { + CDBG("mt9d112_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + + if (data) + mt9d112_ctrl->sensordata = data; + + /* Input MCLK = 24MHz */ + msm_camio_clk_rate_set(24000000); + mdelay(5); + + msm_camio_camif_pad_reg_reset(); + + rc = mt9d112_sensor_init_probe(data); + if (rc < 0) { + CDBG("mt9d112_sensor_init failed!\n"); + goto init_fail; + } + +init_done: + return rc; + +init_fail: + kfree(mt9d112_ctrl); + return rc; +} + +static int mt9d112_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&mt9d112_wait_queue); + return 0; +} + +int mt9d112_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cfg_data; + long rc = 0; + + if (copy_from_user(&cfg_data, + (void *)argp, sizeof(struct sensor_cfg_data))) + return -EFAULT; + + CDBG("mt9d112_ioctl, cfgtype = %d, mode = %d\n", + cfg_data.cfgtype, cfg_data.mode); + + switch (cfg_data.cfgtype) { + case CFG_SET_MODE: + rc = mt9d112_set_sensor_mode(cfg_data.mode); + break; + + case CFG_SET_EFFECT: + rc = mt9d112_set_effect(cfg_data.mode, cfg_data.cfg.effect); + break; + + case CFG_GET_AF_MAX_STEPS: + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int mt9d112_sensor_release(void) +{ + int rc = 0; + + kfree(mt9d112_ctrl); + + return rc; +} + +static int mt9d112_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + rc = -ENOTSUPP; + goto probe_failure; + } + + mt9d112_sensorw = kzalloc(sizeof(struct mt9d112_work), GFP_KERNEL); + + if (!mt9d112_sensorw) { + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, mt9d112_sensorw); + mt9d112_init_client(client); + mt9d112_client = client; + + CDBG("mt9d112_probe succeeded!\n"); + + return 0; + +probe_failure: + kfree(mt9d112_sensorw); + mt9d112_sensorw = NULL; + CDBG("mt9d112_probe failed!\n"); + return rc; +} + +static const struct i2c_device_id mt9d112_i2c_id[] = { + {"mt9d112", 0}, + {}, +}; + +static struct i2c_driver mt9d112_i2c_driver = { + .id_table = mt9d112_i2c_id, + .probe = mt9d112_i2c_probe, + .remove = __exit_p(mt9d112_i2c_remove), + .driver = { + .name = "mt9d112", + }, +}; + +static int mt9d112_sensor_probe(const struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = i2c_add_driver(&mt9d112_i2c_driver); + if (rc < 0 || mt9d112_client == NULL) { + rc = -ENOTSUPP; + goto probe_done; + } + + /* Input MCLK = 24MHz */ + msm_camio_clk_rate_set(24000000); + mdelay(5); + + rc = mt9d112_sensor_init_probe(info); + if (rc < 0) + goto probe_done; + + s->s_init = mt9d112_sensor_init; + s->s_release = mt9d112_sensor_release; + s->s_config = mt9d112_sensor_config; + +probe_done: + CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__); + return rc; +} + +static int __mt9d112_probe(struct platform_device *pdev) +{ + return msm_camera_drv_start(pdev, mt9d112_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __mt9d112_probe, + .driver = { + .name = "msm_camera_mt9d112", + .owner = THIS_MODULE, + }, +}; + +static int __init mt9d112_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(mt9d112_init); diff --git a/drivers/media/video/msm/mt9d112.h b/drivers/media/video/msm/mt9d112.h new file mode 100644 index 0000000000000..39777c7343a53 --- /dev/null +++ b/drivers/media/video/msm/mt9d112.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef MT9D112_H +#define MT9D112_H + +#include +#include + +enum mt9d112_width { + WORD_LEN, + BYTE_LEN +}; + +struct mt9d112_i2c_reg_conf { + unsigned short waddr; + unsigned short wdata; + enum mt9d112_width width; + unsigned short mdelay_time; +}; + +struct mt9d112_reg { + const struct register_address_value_pair *prev_snap_reg_settings; + uint16_t prev_snap_reg_settings_size; + const struct register_address_value_pair *noise_reduction_reg_settings; + uint16_t noise_reduction_reg_settings_size; + const struct mt9d112_i2c_reg_conf *plltbl; + uint16_t plltbl_size; + const struct mt9d112_i2c_reg_conf *stbl; + uint16_t stbl_size; + const struct mt9d112_i2c_reg_conf *rftbl; + uint16_t rftbl_size; +}; + +extern struct mt9d112_reg mt9d112_regs; + +#endif /* MT9D112_H */ diff --git a/drivers/media/video/msm/mt9d112_reg.c b/drivers/media/video/msm/mt9d112_reg.c new file mode 100644 index 0000000000000..f2ae0c7d1301f --- /dev/null +++ b/drivers/media/video/msm/mt9d112_reg.c @@ -0,0 +1,320 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "mt9d112.h" + +struct register_address_value_pair + preview_snapshot_mode_reg_settings_array[] = { + {0x338C, 0x2703}, + {0x3390, 800}, /* Output Width (P) = 640 */ + {0x338C, 0x2705}, + {0x3390, 600}, /* Output Height (P) = 480 */ + {0x338C, 0x2707}, + {0x3390, 0x0640}, /* Output Width (S) = 1600 */ + {0x338C, 0x2709}, + {0x3390, 0x04B0}, /* Output Height (S) = 1200 */ + {0x338C, 0x270D}, + {0x3390, 0x0000}, /* Row Start (P) = 0 */ + {0x338C, 0x270F}, + {0x3390, 0x0000}, /* Column Start (P) = 0 */ + {0x338C, 0x2711}, + {0x3390, 0x04BD}, /* Row End (P) = 1213 */ + {0x338C, 0x2713}, + {0x3390, 0x064D}, /* Column End (P) = 1613 */ + {0x338C, 0x2715}, + {0x3390, 0x0000}, /* Extra Delay (P) = 0 */ + {0x338C, 0x2717}, + {0x3390, 0x2111}, /* Row Speed (P) = 8465 */ + {0x338C, 0x2719}, + {0x3390, 0x046C}, /* Read Mode (P) = 1132 */ + {0x338C, 0x271B}, + {0x3390, 0x024F}, /* Sensor_Sample_Time_pck(P) = 591 */ + {0x338C, 0x271D}, + {0x3390, 0x0102}, /* Sensor_Fine_Correction(P) = 258 */ + {0x338C, 0x271F}, + {0x3390, 0x0279}, /* Sensor_Fine_IT_min(P) = 633 */ + {0x338C, 0x2721}, + {0x3390, 0x0155}, /* Sensor_Fine_IT_max_margin(P) = 341 */ + {0x338C, 0x2723}, + {0x3390, 659}, /* Frame Lines (P) = 679 */ + {0x338C, 0x2725}, + {0x3390, 0x0824}, /* Line Length (P) = 2084 */ + {0x338C, 0x2727}, + {0x3390, 0x2020}, + {0x338C, 0x2729}, + {0x3390, 0x2020}, + {0x338C, 0x272B}, + {0x3390, 0x1020}, + {0x338C, 0x272D}, + {0x3390, 0x2007}, + {0x338C, 0x272F}, + {0x3390, 0x0004}, /* Row Start(S) = 4 */ + {0x338C, 0x2731}, + {0x3390, 0x0004}, /* Column Start(S) = 4 */ + {0x338C, 0x2733}, + {0x3390, 0x04BB}, /* Row End(S) = 1211 */ + {0x338C, 0x2735}, + {0x3390, 0x064B}, /* Column End(S) = 1611 */ + {0x338C, 0x2737}, + {0x3390, 0x04CE}, /* Extra Delay(S) = 1230 */ + {0x338C, 0x2739}, + {0x3390, 0x2111}, /* Row Speed(S) = 8465 */ + {0x338C, 0x273B}, + {0x3390, 0x0024}, /* Read Mode(S) = 36 */ + {0x338C, 0x273D}, + {0x3390, 0x0120}, /* Sensor sample time pck(S) = 288 */ + {0x338C, 0x2741}, + {0x3390, 0x0169}, /* Sensor_Fine_IT_min(P) = 361 */ + {0x338C, 0x2745}, + {0x3390, 0x04FF}, /* Frame Lines(S) = 1279 */ + {0x338C, 0x2747}, + {0x3390, 0x0824}, /* Line Length(S) = 2084 */ + {0x338C, 0x2751}, + {0x3390, 0x0000}, /* Crop_X0(P) = 0 */ + {0x338C, 0x2753}, + {0x3390, 0x0320}, /* Crop_X1(P) = 800 */ + {0x338C, 0x2755}, + {0x3390, 0x0000}, /* Crop_Y0(P) = 0 */ + {0x338C, 0x2757}, + {0x3390, 0x0258}, /* Crop_Y1(P) = 600 */ + {0x338C, 0x275F}, + {0x3390, 0x0000}, /* Crop_X0(S) = 0 */ + {0x338C, 0x2761}, + {0x3390, 0x0640}, /* Crop_X1(S) = 1600 */ + {0x338C, 0x2763}, + {0x3390, 0x0000}, /* Crop_Y0(S) = 0 */ + {0x338C, 0x2765}, + {0x3390, 0x04B0}, /* Crop_Y1(S) = 1200 */ + {0x338C, 0x222E}, + {0x3390, 0x00A0}, /* R9 Step = 160 */ + {0x338C, 0xA408}, + {0x3390, 0x001F}, + {0x338C, 0xA409}, + {0x3390, 0x0021}, + {0x338C, 0xA40A}, + {0x3390, 0x0025}, + {0x338C, 0xA40B}, + {0x3390, 0x0027}, + {0x338C, 0x2411}, + {0x3390, 0x00A0}, + {0x338C, 0x2413}, + {0x3390, 0x00C0}, + {0x338C, 0x2415}, + {0x3390, 0x00A0}, + {0x338C, 0x2417}, + {0x3390, 0x00C0}, + {0x338C, 0x2799}, + {0x3390, 0x6408}, /* MODE_SPEC_EFFECTS(P) */ + {0x338C, 0x279B}, + {0x3390, 0x6408}, /* MODE_SPEC_EFFECTS(S) */ +}; + +static struct register_address_value_pair + noise_reduction_reg_settings_array[] = { + {0x338C, 0xA76D}, + {0x3390, 0x0003}, + {0x338C, 0xA76E}, + {0x3390, 0x0003}, + {0x338C, 0xA76F}, + {0x3390, 0}, + {0x338C, 0xA770}, + {0x3390, 21}, + {0x338C, 0xA771}, + {0x3390, 37}, + {0x338C, 0xA772}, + {0x3390, 63}, + {0x338C, 0xA773}, + {0x3390, 100}, + {0x338C, 0xA774}, + {0x3390, 128}, + {0x338C, 0xA775}, + {0x3390, 151}, + {0x338C, 0xA776}, + {0x3390, 169}, + {0x338C, 0xA777}, + {0x3390, 186}, + {0x338C, 0xA778}, + {0x3390, 199}, + {0x338C, 0xA779}, + {0x3390, 210}, + {0x338C, 0xA77A}, + {0x3390, 220}, + {0x338C, 0xA77B}, + {0x3390, 228}, + {0x338C, 0xA77C}, + {0x3390, 234}, + {0x338C, 0xA77D}, + {0x3390, 240}, + {0x338C, 0xA77E}, + {0x3390, 244}, + {0x338C, 0xA77F}, + {0x3390, 248}, + {0x338C, 0xA780}, + {0x3390, 252}, + {0x338C, 0xA781}, + {0x3390, 255}, + {0x338C, 0xA782}, + {0x3390, 0}, + {0x338C, 0xA783}, + {0x3390, 21}, + {0x338C, 0xA784}, + {0x3390, 37}, + {0x338C, 0xA785}, + {0x3390, 63}, + {0x338C, 0xA786}, + {0x3390, 100}, + {0x338C, 0xA787}, + {0x3390, 128}, + {0x338C, 0xA788}, + {0x3390, 151}, + {0x338C, 0xA789}, + {0x3390, 169}, + {0x338C, 0xA78A}, + {0x3390, 186}, + {0x338C, 0xA78B}, + {0x3390, 199}, + {0x338C, 0xA78C}, + {0x3390, 210}, + {0x338C, 0xA78D}, + {0x3390, 220}, + {0x338C, 0xA78E}, + {0x3390, 228}, + {0x338C, 0xA78F}, + {0x3390, 234}, + {0x338C, 0xA790}, + {0x3390, 240}, + {0x338C, 0xA791}, + {0x3390, 244}, + {0x338C, 0xA793}, + {0x3390, 252}, + {0x338C, 0xA794}, + {0x3390, 255}, + {0x338C, 0xA103}, + {0x3390, 6}, +}; + +static const struct mt9d112_i2c_reg_conf const lens_roll_off_tbl[] = { + {0x34CE, 0x81A0, WORD_LEN, 0}, + {0x34D0, 0x6331, WORD_LEN, 0}, + {0x34D2, 0x3394, WORD_LEN, 0}, + {0x34D4, 0x9966, WORD_LEN, 0}, + {0x34D6, 0x4B25, WORD_LEN, 0}, + {0x34D8, 0x2670, WORD_LEN, 0}, + {0x34DA, 0x724C, WORD_LEN, 0}, + {0x34DC, 0xFFFD, WORD_LEN, 0}, + {0x34DE, 0x00CA, WORD_LEN, 0}, + {0x34E6, 0x00AC, WORD_LEN, 0}, + {0x34EE, 0x0EE1, WORD_LEN, 0}, + {0x34F6, 0x0D87, WORD_LEN, 0}, + {0x3500, 0xE1F7, WORD_LEN, 0}, + {0x3508, 0x1CF4, WORD_LEN, 0}, + {0x3510, 0x1D28, WORD_LEN, 0}, + {0x3518, 0x1F26, WORD_LEN, 0}, + {0x3520, 0x2220, WORD_LEN, 0}, + {0x3528, 0x333D, WORD_LEN, 0}, + {0x3530, 0x15D9, WORD_LEN, 0}, + {0x3538, 0xCFB8, WORD_LEN, 0}, + {0x354C, 0x05FE, WORD_LEN, 0}, + {0x3544, 0x05F8, WORD_LEN, 0}, + {0x355C, 0x0596, WORD_LEN, 0}, + {0x3554, 0x0611, WORD_LEN, 0}, + {0x34E0, 0x00F2, WORD_LEN, 0}, + {0x34E8, 0x00A8, WORD_LEN, 0}, + {0x34F0, 0x0F7B, WORD_LEN, 0}, + {0x34F8, 0x0CD7, WORD_LEN, 0}, + {0x3502, 0xFEDB, WORD_LEN, 0}, + {0x350A, 0x13E4, WORD_LEN, 0}, + {0x3512, 0x1F2C, WORD_LEN, 0}, + {0x351A, 0x1D20, WORD_LEN, 0}, + {0x3522, 0x2422, WORD_LEN, 0}, + {0x352A, 0x2925, WORD_LEN, 0}, + {0x3532, 0x1D04, WORD_LEN, 0}, + {0x353A, 0xFBF2, WORD_LEN, 0}, + {0x354E, 0x0616, WORD_LEN, 0}, + {0x3546, 0x0597, WORD_LEN, 0}, + {0x355E, 0x05CD, WORD_LEN, 0}, + {0x3556, 0x0529, WORD_LEN, 0}, + {0x34E4, 0x00B2, WORD_LEN, 0}, + {0x34EC, 0x005E, WORD_LEN, 0}, + {0x34F4, 0x0F43, WORD_LEN, 0}, + {0x34FC, 0x0E2F, WORD_LEN, 0}, + {0x3506, 0xF9FC, WORD_LEN, 0}, + {0x350E, 0x0CE4, WORD_LEN, 0}, + {0x3516, 0x1E1E, WORD_LEN, 0}, + {0x351E, 0x1B19, WORD_LEN, 0}, + {0x3526, 0x151B, WORD_LEN, 0}, + {0x352E, 0x1416, WORD_LEN, 0}, + {0x3536, 0x10FC, WORD_LEN, 0}, + {0x353E, 0xC018, WORD_LEN, 0}, + {0x3552, 0x06B4, WORD_LEN, 0}, + {0x354A, 0x0506, WORD_LEN, 0}, + {0x3562, 0x06AB, WORD_LEN, 0}, + {0x355A, 0x063A, WORD_LEN, 0}, + {0x34E2, 0x00E5, WORD_LEN, 0}, + {0x34EA, 0x008B, WORD_LEN, 0}, + {0x34F2, 0x0E4C, WORD_LEN, 0}, + {0x34FA, 0x0CA3, WORD_LEN, 0}, + {0x3504, 0x0907, WORD_LEN, 0}, + {0x350C, 0x1DFD, WORD_LEN, 0}, + {0x3514, 0x1E24, WORD_LEN, 0}, + {0x351C, 0x2529, WORD_LEN, 0}, + {0x3524, 0x1D20, WORD_LEN, 0}, + {0x352C, 0x2332, WORD_LEN, 0}, + {0x3534, 0x10E9, WORD_LEN, 0}, + {0x353C, 0x0BCB, WORD_LEN, 0}, + {0x3550, 0x04EF, WORD_LEN, 0}, + {0x3548, 0x0609, WORD_LEN, 0}, + {0x3560, 0x0580, WORD_LEN, 0}, + {0x3558, 0x05DD, WORD_LEN, 0}, + {0x3540, 0x0000, WORD_LEN, 0}, + {0x3542, 0x0000, WORD_LEN, 0} +}; + +static const struct mt9d112_i2c_reg_conf const pll_setup_tbl[] = { + {0x341E, 0x8F09, WORD_LEN, 0}, + {0x341C, 0x0250, WORD_LEN, 0}, + {0x341E, 0x8F09, WORD_LEN, 5}, + {0x341E, 0x8F08, WORD_LEN, 0} +}; + +/* Refresh Sequencer */ +static const struct mt9d112_i2c_reg_conf const sequencer_tbl[] = { + {0x338C, 0x2799, WORD_LEN, 0}, + {0x3390, 0x6440, WORD_LEN, 5}, + {0x338C, 0x279B, WORD_LEN, 0}, + {0x3390, 0x6440, WORD_LEN, 5}, + {0x338C, 0xA103, WORD_LEN, 0}, + {0x3390, 0x0005, WORD_LEN, 5}, + {0x338C, 0xA103, WORD_LEN, 0}, + {0x3390, 0x0006, WORD_LEN, 5} +}; + +struct mt9d112_reg mt9d112_regs = { + .prev_snap_reg_settings = &preview_snapshot_mode_reg_settings_array[0], + .prev_snap_reg_settings_size = + ARRAY_SIZE(preview_snapshot_mode_reg_settings_array), + .noise_reduction_reg_settings = &noise_reduction_reg_settings_array[0], + .noise_reduction_reg_settings_size = + ARRAY_SIZE(noise_reduction_reg_settings_array), + .plltbl = pll_setup_tbl, + .plltbl_size = ARRAY_SIZE(pll_setup_tbl), + .stbl = sequencer_tbl, + .stbl_size = ARRAY_SIZE(sequencer_tbl), + .rftbl = lens_roll_off_tbl, + .rftbl_size = ARRAY_SIZE(lens_roll_off_tbl) +}; diff --git a/drivers/media/video/msm/mt9p012.h b/drivers/media/video/msm/mt9p012.h new file mode 100644 index 0000000000000..4c05f79e14d2e --- /dev/null +++ b/drivers/media/video/msm/mt9p012.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef MT9T012_H +#define MT9T012_H + +#include + +struct reg_struct { + uint16_t vt_pix_clk_div; /* 0x0300 */ + uint16_t vt_sys_clk_div; /* 0x0302 */ + uint16_t pre_pll_clk_div; /* 0x0304 */ + uint16_t pll_multiplier; /* 0x0306 */ + uint16_t op_pix_clk_div; /* 0x0308 */ + uint16_t op_sys_clk_div; /* 0x030A */ + uint16_t scale_m; /* 0x0404 */ + uint16_t row_speed; /* 0x3016 */ + uint16_t x_addr_start; /* 0x3004 */ + uint16_t x_addr_end; /* 0x3008 */ + uint16_t y_addr_start; /* 0x3002 */ + uint16_t y_addr_end; /* 0x3006 */ + uint16_t read_mode; /* 0x3040 */ + uint16_t x_output_size; /* 0x034C */ + uint16_t y_output_size; /* 0x034E */ + uint16_t line_length_pck; /* 0x300C */ + uint16_t frame_length_lines; /* 0x300A */ + uint16_t coarse_int_time; /* 0x3012 */ + uint16_t fine_int_time; /* 0x3014 */ +}; + +struct mt9p012_i2c_reg_conf { + unsigned short waddr; + unsigned short wdata; +}; + +struct mt9p012_reg { + struct reg_struct *reg_pat; + uint16_t reg_pat_size; + struct mt9p012_i2c_reg_conf *ttbl; + uint16_t ttbl_size; + struct mt9p012_i2c_reg_conf *lctbl; + uint16_t lctbl_size; + struct mt9p012_i2c_reg_conf *rftbl; + uint16_t rftbl_size; +}; + +extern struct mt9p012_reg mt9p012_regs; + +#endif /* MT9T012_H */ diff --git a/drivers/media/video/msm/mt9p012_fox.c b/drivers/media/video/msm/mt9p012_fox.c new file mode 100644 index 0000000000000..299c59bae0a5e --- /dev/null +++ b/drivers/media/video/msm/mt9p012_fox.c @@ -0,0 +1,1329 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt9p012.h" + +/*============================================================= + SENSOR REGISTER DEFINES +==============================================================*/ +#define MT9P012_REG_MODEL_ID 0x0000 +#define MT9P012_MODEL_ID 0x2801 +#define REG_GROUPED_PARAMETER_HOLD 0x0104 +#define GROUPED_PARAMETER_HOLD 0x0100 +#define GROUPED_PARAMETER_UPDATE 0x0000 +#define REG_COARSE_INT_TIME 0x3012 +#define REG_VT_PIX_CLK_DIV 0x0300 +#define REG_VT_SYS_CLK_DIV 0x0302 +#define REG_PRE_PLL_CLK_DIV 0x0304 +#define REG_PLL_MULTIPLIER 0x0306 +#define REG_OP_PIX_CLK_DIV 0x0308 +#define REG_OP_SYS_CLK_DIV 0x030A +#define REG_SCALE_M 0x0404 +#define REG_FRAME_LENGTH_LINES 0x300A +#define REG_LINE_LENGTH_PCK 0x300C +#define REG_X_ADDR_START 0x3004 +#define REG_Y_ADDR_START 0x3002 +#define REG_X_ADDR_END 0x3008 +#define REG_Y_ADDR_END 0x3006 +#define REG_X_OUTPUT_SIZE 0x034C +#define REG_Y_OUTPUT_SIZE 0x034E +#define REG_FINE_INTEGRATION_TIME 0x3014 +#define REG_ROW_SPEED 0x3016 +#define MT9P012_REG_RESET_REGISTER 0x301A +#define MT9P012_RESET_REGISTER_PWON 0x10CC +#define MT9P012_RESET_REGISTER_PWOFF 0x10C8 +#define REG_READ_MODE 0x3040 +#define REG_GLOBAL_GAIN 0x305E +#define REG_TEST_PATTERN_MODE 0x3070 + +#define MT9P012_REV_7 + +enum mt9p012_test_mode { + TEST_OFF, + TEST_1, + TEST_2, + TEST_3 +}; + +enum mt9p012_resolution { + QTR_SIZE, + FULL_SIZE, + INVALID_SIZE +}; + +enum mt9p012_reg_update { + /* Sensor egisters that need to be updated during initialization */ + REG_INIT, + /* Sensor egisters that needs periodic I2C writes */ + UPDATE_PERIODIC, + /* All the sensor Registers will be updated */ + UPDATE_ALL, + /* Not valid update */ + UPDATE_INVALID +}; + +enum mt9p012_setting { + RES_PREVIEW, + RES_CAPTURE +}; + +/* actuator's Slave Address */ +#define MT9P012_AF_I2C_ADDR 0x18 + +/* AF Total steps parameters */ +#define MT9P012_STEPS_NEAR_TO_CLOSEST_INF 32 +#define MT9P012_TOTAL_STEPS_NEAR_TO_FAR 32 + +#define MT9P012_MU5M0_PREVIEW_DUMMY_PIXELS 0 +#define MT9P012_MU5M0_PREVIEW_DUMMY_LINES 0 + +/* Time in milisecs for waiting for the sensor to reset.*/ +#define MT9P012_RESET_DELAY_MSECS 66 + +/* for 20 fps preview */ +#define MT9P012_DEFAULT_CLOCK_RATE 24000000 +#define MT9P012_DEFAULT_MAX_FPS 26 /* ???? */ + +struct mt9p012_work { + struct work_struct work; +}; +static struct mt9p012_work *mt9p012_sensorw; +static struct i2c_client *mt9p012_client; + +struct mt9p012_ctrl { + const struct msm_camera_sensor_info *sensordata; + + int sensormode; + uint32_t fps_divider; /* init to 1 * 0x00000400 */ + uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ + + uint16_t curr_lens_pos; + uint16_t init_curr_lens_pos; + uint16_t my_reg_gain; + uint32_t my_reg_line_count; + + enum mt9p012_resolution prev_res; + enum mt9p012_resolution pict_res; + enum mt9p012_resolution curr_res; + enum mt9p012_test_mode set_test; +}; + +static struct mt9p012_ctrl *mt9p012_ctrl; +static DECLARE_WAIT_QUEUE_HEAD(mt9p012_wait_queue); + +static int mt9p012_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, + int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer(mt9p012_client->adapter, msgs, 2) < 0) { + CDBG("mt9p012_i2c_rxdata failed!\n"); + return -EIO; + } + + return 0; +} + +static int mt9p012_i2c_read_w(unsigned short saddr, unsigned short raddr, + unsigned short *rdata) +{ + int rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); + + rc = mt9p012_i2c_rxdata(saddr, buf, 2); + if (rc < 0) + return rc; + + *rdata = buf[0] << 8 | buf[1]; + + if (rc < 0) + CDBG("mt9p012_i2c_read failed!\n"); + + return rc; +} + +static int mt9p012_i2c_txdata(unsigned short saddr, unsigned char *txdata, + int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + if (i2c_transfer(mt9p012_client->adapter, msg, 1) < 0) { + CDBG("mt9p012_i2c_txdata failed\n"); + return -EIO; + } + + return 0; +} + +static int mt9p012_i2c_write_b(unsigned short saddr, unsigned short baddr, + unsigned short bdata) +{ + int rc = -EIO; + unsigned char buf[2]; + + memset(buf, 0, sizeof(buf)); + buf[0] = baddr; + buf[1] = bdata; + rc = mt9p012_i2c_txdata(saddr, buf, 2); + + if (rc < 0) + CDBG("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n", + saddr, baddr, bdata); + + return rc; +} + +static int mt9p012_i2c_write_w(unsigned short saddr, unsigned short waddr, + unsigned short wdata) +{ + int rc = -EIO; + unsigned char buf[4]; + + memset(buf, 0, sizeof(buf)); + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = (wdata & 0xFF00) >> 8; + buf[3] = (wdata & 0x00FF); + + rc = mt9p012_i2c_txdata(saddr, buf, 4); + + if (rc < 0) + CDBG("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int mt9p012_i2c_write_w_table(struct mt9p012_i2c_reg_conf + *reg_conf_tbl, int num) +{ + int i; + int rc = -EIO; + + for (i = 0; i < num; i++) { + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + reg_conf_tbl->waddr, + reg_conf_tbl->wdata); + if (rc < 0) + break; + reg_conf_tbl++; + } + + return rc; +} + +static int mt9p012_test(enum mt9p012_test_mode mo) +{ + int rc = 0; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + if (mo == TEST_OFF) + return 0; + else { + rc = mt9p012_i2c_write_w_table(mt9p012_regs.ttbl, + mt9p012_regs.ttbl_size); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_TEST_PATTERN_MODE, (uint16_t) mo); + if (rc < 0) + return rc; + } + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + return rc; +} + +static int mt9p012_lens_shading_enable(uint8_t is_enable) +{ + int rc = 0; + + CDBG("%s: entered. enable = %d\n", __func__, is_enable); + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3780, + ((uint16_t) is_enable) << 15); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + + CDBG("%s: exiting. rc = %d\n", __func__, rc); + return rc; +} + +static int mt9p012_set_lc(void) +{ + int rc; + + rc = mt9p012_i2c_write_w_table(mt9p012_regs.lctbl, + mt9p012_regs.lctbl_size); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w_table(mt9p012_regs.rftbl, + mt9p012_regs.rftbl_size); + + return rc; +} + +static void mt9p012_get_pict_fps(uint16_t fps, uint16_t *pfps) +{ + /* input fps is preview fps in Q8 format */ + uint32_t divider; /*Q10 */ + uint32_t pclk_mult; /*Q10 */ + + if (mt9p012_ctrl->prev_res == QTR_SIZE) { + divider = (uint32_t) + (((mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines * + mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck) * + 0x00000400) / + (mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines * + mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck)); + + pclk_mult = + (uint32_t) ((mt9p012_regs.reg_pat[RES_CAPTURE]. + pll_multiplier * 0x00000400) / + (mt9p012_regs.reg_pat[RES_PREVIEW]. + pll_multiplier)); + } else { + /* full size resolution used for preview. */ + divider = 0x00000400; /*1.0 */ + pclk_mult = 0x00000400; /*1.0 */ + } + + /* Verify PCLK settings and frame sizes. */ + *pfps = (uint16_t) (fps * divider * pclk_mult / 0x00000400 / + 0x00000400); +} + +static uint16_t mt9p012_get_prev_lines_pf(void) +{ + if (mt9p012_ctrl->prev_res == QTR_SIZE) + return mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines; + else + return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines; +} + +static uint16_t mt9p012_get_prev_pixels_pl(void) +{ + if (mt9p012_ctrl->prev_res == QTR_SIZE) + return mt9p012_regs.reg_pat[RES_PREVIEW].line_length_pck; + else + return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck; +} + +static uint16_t mt9p012_get_pict_lines_pf(void) +{ + return mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines; +} + +static uint16_t mt9p012_get_pict_pixels_pl(void) +{ + return mt9p012_regs.reg_pat[RES_CAPTURE].line_length_pck; +} + +static uint32_t mt9p012_get_pict_max_exp_lc(void) +{ + uint16_t snapshot_lines_per_frame; + + if (mt9p012_ctrl->pict_res == QTR_SIZE) + snapshot_lines_per_frame = + mt9p012_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1; + else + snapshot_lines_per_frame = + mt9p012_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1; + + return snapshot_lines_per_frame * 24; +} + +static int mt9p012_set_fps(struct fps_cfg *fps) +{ + /* input is new fps in Q10 format */ + int rc = 0; + + mt9p012_ctrl->fps_divider = fps->fps_div; + mt9p012_ctrl->pict_fps_divider = fps->pict_fps_div; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return -EBUSY; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_LINE_LENGTH_PCK, + (mt9p012_regs.reg_pat[RES_PREVIEW]. + line_length_pck * fps->f_mult / 0x00000400)); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + + return rc; +} + +static int mt9p012_write_exp_gain(uint16_t gain, uint32_t line) +{ + uint16_t max_legal_gain = 0x01FF; + uint32_t line_length_ratio = 0x00000400; + enum mt9p012_setting setting; + int rc = 0; + + CDBG("Line:%d mt9p012_write_exp_gain \n", __LINE__); + + if (mt9p012_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + mt9p012_ctrl->my_reg_gain = gain; + mt9p012_ctrl->my_reg_line_count = (uint16_t) line; + } + + if (gain > max_legal_gain) { + CDBG("Max legal gain Line:%d \n", __LINE__); + gain = max_legal_gain; + } + + /* Verify no overflow */ + if (mt9p012_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) { + line = (uint32_t) (line * mt9p012_ctrl->fps_divider / + 0x00000400); + setting = RES_PREVIEW; + } else { + line = (uint32_t) (line * mt9p012_ctrl->pict_fps_divider / + 0x00000400); + setting = RES_CAPTURE; + } + + /* Set digital gain to 1 */ +#ifdef MT9P012_REV_7 + gain |= 0x1000; +#else + gain |= 0x0200; +#endif + + if ((mt9p012_regs.reg_pat[setting].frame_length_lines - 1) < line) { + line_length_ratio = (uint32_t) (line * 0x00000400) / + (mt9p012_regs.reg_pat[setting].frame_length_lines - 1); + } else + line_length_ratio = 0x00000400; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) { + CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); + return rc; + } + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, REG_GLOBAL_GAIN, gain); + if (rc < 0) { + CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); + return rc; + } + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_COARSE_INT_TIME, line); + if (rc < 0) { + CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); + return rc; + } + + CDBG("mt9p012_write_exp_gain: gain = %d, line = %d\n", gain, line); + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); + + return rc; +} + +static int mt9p012_set_pict_exp_gain(uint16_t gain, uint32_t line) +{ + int rc = 0; + + CDBG("Line:%d mt9p012_set_pict_exp_gain \n", __LINE__); + + rc = mt9p012_write_exp_gain(gain, line); + if (rc < 0) { + CDBG("Line:%d mt9p012_set_pict_exp_gain failed... \n", + __LINE__); + return rc; + } + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0002); + if (rc < 0) { + CDBG("mt9p012_i2c_write_w failed... Line:%d \n", __LINE__); + return rc; + } + + mdelay(5); + + /* camera_timed_wait(snapshot_wait*exposure_ratio); */ + return rc; +} + +static int mt9p012_setting(enum mt9p012_reg_update rupdate, + enum mt9p012_setting rt) +{ + int rc = 0; + + switch (rupdate) { + case UPDATE_PERIODIC: + if (rt == RES_PREVIEW || rt == RES_CAPTURE) { + + struct mt9p012_i2c_reg_conf ppc_tbl[] = { + {REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD}, + {REG_ROW_SPEED, + mt9p012_regs.reg_pat[rt].row_speed}, + {REG_X_ADDR_START, + mt9p012_regs.reg_pat[rt].x_addr_start}, + {REG_X_ADDR_END, + mt9p012_regs.reg_pat[rt].x_addr_end}, + {REG_Y_ADDR_START, + mt9p012_regs.reg_pat[rt].y_addr_start}, + {REG_Y_ADDR_END, + mt9p012_regs.reg_pat[rt].y_addr_end}, + {REG_READ_MODE, + mt9p012_regs.reg_pat[rt].read_mode}, + {REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m}, + {REG_X_OUTPUT_SIZE, + mt9p012_regs.reg_pat[rt].x_output_size}, + {REG_Y_OUTPUT_SIZE, + mt9p012_regs.reg_pat[rt].y_output_size}, + + {REG_LINE_LENGTH_PCK, + mt9p012_regs.reg_pat[rt].line_length_pck}, + {REG_FRAME_LENGTH_LINES, + (mt9p012_regs.reg_pat[rt].frame_length_lines * + mt9p012_ctrl->fps_divider / 0x00000400)}, + {REG_COARSE_INT_TIME, + mt9p012_regs.reg_pat[rt].coarse_int_time}, + {REG_FINE_INTEGRATION_TIME, + mt9p012_regs.reg_pat[rt].fine_int_time}, + {REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE}, + }; + + rc = mt9p012_i2c_write_w_table(&ppc_tbl[0], + ARRAY_SIZE(ppc_tbl)); + if (rc < 0) + return rc; + + rc = mt9p012_test(mt9p012_ctrl->set_test); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWON | + 0x0002); + if (rc < 0) + return rc; + + mdelay(5); /* 15? wait for sensor to transition */ + + return rc; + } + break; /* UPDATE_PERIODIC */ + + case REG_INIT: + if (rt == RES_PREVIEW || rt == RES_CAPTURE) { + struct mt9p012_i2c_reg_conf ipc_tbl1[] = { + {MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWOFF}, + {REG_VT_PIX_CLK_DIV, + mt9p012_regs.reg_pat[rt].vt_pix_clk_div}, + {REG_VT_SYS_CLK_DIV, + mt9p012_regs.reg_pat[rt].vt_sys_clk_div}, + {REG_PRE_PLL_CLK_DIV, + mt9p012_regs.reg_pat[rt].pre_pll_clk_div}, + {REG_PLL_MULTIPLIER, + mt9p012_regs.reg_pat[rt].pll_multiplier}, + {REG_OP_PIX_CLK_DIV, + mt9p012_regs.reg_pat[rt].op_pix_clk_div}, + {REG_OP_SYS_CLK_DIV, + mt9p012_regs.reg_pat[rt].op_sys_clk_div}, +#ifdef MT9P012_REV_7 + {0x30B0, 0x0001}, + {0x308E, 0xE060}, + {0x3092, 0x0A52}, + {0x3094, 0x4656}, + {0x3096, 0x5652}, + {0x30CA, 0x8006}, + {0x312A, 0xDD02}, + {0x312C, 0x00E4}, + {0x3170, 0x299A}, +#endif + /* optimized settings for noise */ + {0x3088, 0x6FF6}, + {0x3154, 0x0282}, + {0x3156, 0x0381}, + {0x3162, 0x04CE}, + {0x0204, 0x0010}, + {0x0206, 0x0010}, + {0x0208, 0x0010}, + {0x020A, 0x0010}, + {0x020C, 0x0010}, + {MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWON}, + }; + + struct mt9p012_i2c_reg_conf ipc_tbl2[] = { + {MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWOFF}, + {REG_VT_PIX_CLK_DIV, + mt9p012_regs.reg_pat[rt].vt_pix_clk_div}, + {REG_VT_SYS_CLK_DIV, + mt9p012_regs.reg_pat[rt].vt_sys_clk_div}, + {REG_PRE_PLL_CLK_DIV, + mt9p012_regs.reg_pat[rt].pre_pll_clk_div}, + {REG_PLL_MULTIPLIER, + mt9p012_regs.reg_pat[rt].pll_multiplier}, + {REG_OP_PIX_CLK_DIV, + mt9p012_regs.reg_pat[rt].op_pix_clk_div}, + {REG_OP_SYS_CLK_DIV, + mt9p012_regs.reg_pat[rt].op_sys_clk_div}, +#ifdef MT9P012_REV_7 + {0x30B0, 0x0001}, + {0x308E, 0xE060}, + {0x3092, 0x0A52}, + {0x3094, 0x4656}, + {0x3096, 0x5652}, + {0x30CA, 0x8006}, + {0x312A, 0xDD02}, + {0x312C, 0x00E4}, + {0x3170, 0x299A}, +#endif + /* optimized settings for noise */ + {0x3088, 0x6FF6}, + {0x3154, 0x0282}, + {0x3156, 0x0381}, + {0x3162, 0x04CE}, + {0x0204, 0x0010}, + {0x0206, 0x0010}, + {0x0208, 0x0010}, + {0x020A, 0x0010}, + {0x020C, 0x0010}, + {MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWON}, + }; + + struct mt9p012_i2c_reg_conf ipc_tbl3[] = { + {REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD}, + /* Set preview or snapshot mode */ + {REG_ROW_SPEED, + mt9p012_regs.reg_pat[rt].row_speed}, + {REG_X_ADDR_START, + mt9p012_regs.reg_pat[rt].x_addr_start}, + {REG_X_ADDR_END, + mt9p012_regs.reg_pat[rt].x_addr_end}, + {REG_Y_ADDR_START, + mt9p012_regs.reg_pat[rt].y_addr_start}, + {REG_Y_ADDR_END, + mt9p012_regs.reg_pat[rt].y_addr_end}, + {REG_READ_MODE, + mt9p012_regs.reg_pat[rt].read_mode}, + {REG_SCALE_M, mt9p012_regs.reg_pat[rt].scale_m}, + {REG_X_OUTPUT_SIZE, + mt9p012_regs.reg_pat[rt].x_output_size}, + {REG_Y_OUTPUT_SIZE, + mt9p012_regs.reg_pat[rt].y_output_size}, + {REG_LINE_LENGTH_PCK, + mt9p012_regs.reg_pat[rt].line_length_pck}, + {REG_FRAME_LENGTH_LINES, + mt9p012_regs.reg_pat[rt].frame_length_lines}, + {REG_COARSE_INT_TIME, + mt9p012_regs.reg_pat[rt].coarse_int_time}, + {REG_FINE_INTEGRATION_TIME, + mt9p012_regs.reg_pat[rt].fine_int_time}, + {REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE}, + }; + + /* reset fps_divider */ + mt9p012_ctrl->fps_divider = 1 * 0x0400; + + rc = mt9p012_i2c_write_w_table(&ipc_tbl1[0], + ARRAY_SIZE(ipc_tbl1)); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w_table(&ipc_tbl2[0], + ARRAY_SIZE(ipc_tbl2)); + if (rc < 0) + return rc; + + mdelay(5); + + rc = mt9p012_i2c_write_w_table(&ipc_tbl3[0], + ARRAY_SIZE(ipc_tbl3)); + if (rc < 0) + return rc; + + /* load lens shading */ + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + rc = mt9p012_set_lc(); + if (rc < 0) + return rc; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + + if (rc < 0) + return rc; + } + break; /* case REG_INIT: */ + + default: + rc = -EINVAL; + break; + } /* switch (rupdate) */ + + return rc; +} + +static int mt9p012_video_config(int mode, int res) +{ + int rc; + + switch (res) { + case QTR_SIZE: + rc = mt9p012_setting(UPDATE_PERIODIC, RES_PREVIEW); + if (rc < 0) + return rc; + + CDBG("mt9p012 sensor configuration done!\n"); + break; + + case FULL_SIZE: + rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + + break; + + default: + return 0; + } /* switch */ + + mt9p012_ctrl->prev_res = res; + mt9p012_ctrl->curr_res = res; + mt9p012_ctrl->sensormode = mode; + + rc = mt9p012_write_exp_gain(mt9p012_ctrl->my_reg_gain, + mt9p012_ctrl->my_reg_line_count); + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, 0x10cc | 0x0002); + + return rc; +} + +static int mt9p012_snapshot_config(int mode) +{ + int rc = 0; + + rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + + mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res; + + mt9p012_ctrl->sensormode = mode; + + return rc; +} + +static int mt9p012_raw_snapshot_config(int mode) +{ + int rc = 0; + + rc = mt9p012_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + + mt9p012_ctrl->curr_res = mt9p012_ctrl->pict_res; + + mt9p012_ctrl->sensormode = mode; + + return rc; +} + +static int mt9p012_power_down(void) +{ + int rc = 0; + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWOFF); + + mdelay(5); + return rc; +} + +static int mt9p012_move_focus(int direction, int num_steps) +{ + int16_t step_direction; + int16_t actual_step; + int16_t next_position; + uint8_t code_val_msb, code_val_lsb; + + if (num_steps > MT9P012_TOTAL_STEPS_NEAR_TO_FAR) + num_steps = MT9P012_TOTAL_STEPS_NEAR_TO_FAR; + else if (num_steps == 0) { + CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__); + return -EINVAL; + } + + if (direction == MOVE_NEAR) + step_direction = 16; /* 10bit */ + else if (direction == MOVE_FAR) + step_direction = -16; /* 10 bit */ + else { + CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__); + return -EINVAL; + } + + if (mt9p012_ctrl->curr_lens_pos < mt9p012_ctrl->init_curr_lens_pos) + mt9p012_ctrl->curr_lens_pos = mt9p012_ctrl->init_curr_lens_pos; + + actual_step = (int16_t) (step_direction * (int16_t) num_steps); + next_position = (int16_t) (mt9p012_ctrl->curr_lens_pos + actual_step); + + if (next_position > 1023) + next_position = 1023; + else if (next_position < 0) + next_position = 0; + + code_val_msb = next_position >> 4; + code_val_lsb = (next_position & 0x000F) << 4; + /* code_val_lsb |= mode_mask; */ + + /* Writing the digital code for current to the actuator */ + if (mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, + code_val_msb, code_val_lsb) < 0) { + CDBG("mt9p012_move_focus failed at line %d ...\n", __LINE__); + return -EBUSY; + } + + /* Storing the current lens Position */ + mt9p012_ctrl->curr_lens_pos = next_position; + + return 0; +} + +static int mt9p012_set_default_focus(void) +{ + int rc = 0; + uint8_t code_val_msb, code_val_lsb; + + code_val_msb = 0x00; + code_val_lsb = 0x00; + + /* Write the digital code for current to the actuator */ + rc = mt9p012_i2c_write_b(MT9P012_AF_I2C_ADDR >> 1, + code_val_msb, code_val_lsb); + + mt9p012_ctrl->curr_lens_pos = 0; + mt9p012_ctrl->init_curr_lens_pos = 0; + + return rc; +} + +static int mt9p012_probe_init_done(const struct msm_camera_sensor_info *data) +{ + gpio_direction_output(data->sensor_reset, 0); + gpio_free(data->sensor_reset); + return 0; +} + +static int mt9p012_probe_init_sensor(const struct msm_camera_sensor_info *data) +{ + int rc; + uint16_t chipid; + + rc = gpio_request(data->sensor_reset, "mt9p012"); + if (!rc) + gpio_direction_output(data->sensor_reset, 1); + else + goto init_probe_done; + + mdelay(20); + + /* RESET the sensor image part via I2C command */ + CDBG("mt9p012_sensor_init(): reseting sensor.\n"); + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, 0x10CC | 0x0001); + if (rc < 0) { + CDBG("sensor reset failed. rc = %d\n", rc); + goto init_probe_fail; + } + + mdelay(MT9P012_RESET_DELAY_MSECS); + + /* 3. Read sensor Model ID: */ + rc = mt9p012_i2c_read_w(mt9p012_client->addr, + MT9P012_REG_MODEL_ID, &chipid); + if (rc < 0) + goto init_probe_fail; + + /* 4. Compare sensor ID to MT9T012VC ID: */ + if (chipid != MT9P012_MODEL_ID) { + CDBG("mt9p012 wrong model_id = 0x%x\n", chipid); + rc = -ENODEV; + goto init_probe_fail; + } + + rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x306E, 0x9000); + if (rc < 0) { + CDBG("REV_7 write failed. rc = %d\n", rc); + goto init_probe_fail; + } + + /* RESET_REGISTER, enable parallel interface and disable serialiser */ + CDBG("mt9p012_sensor_init(): enabling parallel interface.\n"); + rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x301A, 0x10CC); + if (rc < 0) { + CDBG("enable parallel interface failed. rc = %d\n", rc); + goto init_probe_fail; + } + + /* To disable the 2 extra lines */ + rc = mt9p012_i2c_write_w(mt9p012_client->addr, 0x3064, 0x0805); + + if (rc < 0) { + CDBG("disable the 2 extra lines failed. rc = %d\n", rc); + goto init_probe_fail; + } + + mdelay(MT9P012_RESET_DELAY_MSECS); + goto init_probe_done; + +init_probe_fail: + mt9p012_probe_init_done(data); +init_probe_done: + return rc; +} + +static int mt9p012_sensor_open_init(const struct msm_camera_sensor_info *data) +{ + int rc; + + mt9p012_ctrl = kzalloc(sizeof(struct mt9p012_ctrl), GFP_KERNEL); + if (!mt9p012_ctrl) { + CDBG("mt9p012_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + + mt9p012_ctrl->fps_divider = 1 * 0x00000400; + mt9p012_ctrl->pict_fps_divider = 1 * 0x00000400; + mt9p012_ctrl->set_test = TEST_OFF; + mt9p012_ctrl->prev_res = QTR_SIZE; + mt9p012_ctrl->pict_res = FULL_SIZE; + + if (data) + mt9p012_ctrl->sensordata = data; + + /* enable mclk first */ + msm_camio_clk_rate_set(MT9P012_DEFAULT_CLOCK_RATE); + mdelay(20); + + msm_camio_camif_pad_reg_reset(); + mdelay(20); + + rc = mt9p012_probe_init_sensor(data); + if (rc < 0) + goto init_fail1; + + if (mt9p012_ctrl->prev_res == QTR_SIZE) + rc = mt9p012_setting(REG_INIT, RES_PREVIEW); + else + rc = mt9p012_setting(REG_INIT, RES_CAPTURE); + + if (rc < 0) { + CDBG("mt9p012_setting failed. rc = %d\n", rc); + goto init_fail1; + } + + /* sensor : output enable */ + CDBG("mt9p012_sensor_open_init(): enabling output.\n"); + rc = mt9p012_i2c_write_w(mt9p012_client->addr, + MT9P012_REG_RESET_REGISTER, + MT9P012_RESET_REGISTER_PWON); + if (rc < 0) { + CDBG("sensor output enable failed. rc = %d\n", rc); + goto init_fail1; + } + + /* TODO: enable AF actuator */ +#if 0 + CDBG("enable AF actuator, gpio = %d\n", + mt9p012_ctrl->sensordata->vcm_pwd); + rc = gpio_request(mt9p012_ctrl->sensordata->vcm_pwd, "mt9p012"); + if (!rc) + gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 1); + else { + CDBG("mt9p012_ctrl gpio request failed!\n"); + goto init_fail1; + } + mdelay(20); + + rc = mt9p012_set_default_focus(); +#endif + if (rc >= 0) + goto init_done; + + /* TODO: + * gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 0); + * gpio_free(mt9p012_ctrl->sensordata->vcm_pwd); */ +init_fail1: + mt9p012_probe_init_done(data); + kfree(mt9p012_ctrl); +init_done: + return rc; +} + +static int mt9p012_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&mt9p012_wait_queue); + return 0; +} + +static int mt9p012_set_sensor_mode(int mode, int res) +{ + int rc = 0; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + rc = mt9p012_video_config(mode, res); + break; + + case SENSOR_SNAPSHOT_MODE: + rc = mt9p012_snapshot_config(mode); + break; + + case SENSOR_RAW_SNAPSHOT_MODE: + rc = mt9p012_raw_snapshot_config(mode); + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int mt9p012_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cdata; + int rc = 0; + + if (copy_from_user(&cdata, + (void *)argp, sizeof(struct sensor_cfg_data))) + return -EFAULT; + + CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype); + switch (cdata.cfgtype) { + case CFG_GET_PICT_FPS: + mt9p012_get_pict_fps(cdata.cfg.gfps.prevfps, + &(cdata.cfg.gfps.pictfps)); + + if (copy_to_user((void *)argp, &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_L_PF: + cdata.cfg.prevl_pf = mt9p012_get_prev_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_P_PL: + cdata.cfg.prevp_pl = mt9p012_get_prev_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_L_PF: + cdata.cfg.pictl_pf = mt9p012_get_pict_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_P_PL: + cdata.cfg.pictp_pl = mt9p012_get_pict_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_MAX_EXP_LC: + cdata.cfg.pict_max_exp_lc = mt9p012_get_pict_max_exp_lc(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_FPS: + case CFG_SET_PICT_FPS: + rc = mt9p012_set_fps(&(cdata.cfg.fps)); + break; + + case CFG_SET_EXP_GAIN: + rc = mt9p012_write_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_PICT_EXP_GAIN: + CDBG("Line:%d CFG_SET_PICT_EXP_GAIN \n", __LINE__); + rc = mt9p012_set_pict_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_MODE: + rc = mt9p012_set_sensor_mode(cdata.mode, cdata.rs); + break; + + case CFG_PWR_DOWN: + rc = mt9p012_power_down(); + break; + + case CFG_MOVE_FOCUS: + CDBG("mt9p012_ioctl: CFG_MOVE_FOCUS: cdata.cfg.focus.dir=%d " + "cdata.cfg.focus.steps=%d\n", + cdata.cfg.focus.dir, cdata.cfg.focus.steps); + rc = mt9p012_move_focus(cdata.cfg.focus.dir, + cdata.cfg.focus.steps); + break; + + case CFG_SET_DEFAULT_FOCUS: + rc = mt9p012_set_default_focus(); + break; + + case CFG_SET_LENS_SHADING: + CDBG("%s: CFG_SET_LENS_SHADING\n", __func__); + rc = mt9p012_lens_shading_enable(cdata.cfg.lens_shading); + break; + + case CFG_GET_AF_MAX_STEPS: + cdata.max_steps = MT9P012_STEPS_NEAR_TO_CLOSEST_INF; + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_EFFECT: + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int mt9p012_sensor_release(void) +{ + int rc = -EBADF; + + mt9p012_power_down(); + + gpio_direction_output(mt9p012_ctrl->sensordata->sensor_reset, 0); + gpio_free(mt9p012_ctrl->sensordata->sensor_reset); + + gpio_direction_output(mt9p012_ctrl->sensordata->vcm_pwd, 0); + gpio_free(mt9p012_ctrl->sensordata->vcm_pwd); + + kfree(mt9p012_ctrl); + mt9p012_ctrl = NULL; + + CDBG("mt9p012_release completed\n"); + + return rc; +} + +static int mt9p012_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + CDBG("mt9p012_probe called!\n"); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + CDBG("i2c_check_functionality failed\n"); + goto probe_failure; + } + + mt9p012_sensorw = kzalloc(sizeof(struct mt9p012_work), GFP_KERNEL); + if (!mt9p012_sensorw) { + CDBG("kzalloc failed.\n"); + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, mt9p012_sensorw); + mt9p012_init_client(client); + mt9p012_client = client; + + mdelay(50); + + CDBG("mt9p012_probe successed! rc = %d\n", rc); + return 0; + +probe_failure: + CDBG("mt9p012_probe failed! rc = %d\n", rc); + return rc; +} + +static const struct i2c_device_id mt9p012_i2c_id[] = { + {"mt9p012", 0}, + {} +}; + +static struct i2c_driver mt9p012_i2c_driver = { + .id_table = mt9p012_i2c_id, + .probe = mt9p012_i2c_probe, + .remove = __exit_p(mt9p012_i2c_remove), + .driver = { + .name = "mt9p012", + }, +}; + +static int mt9p012_sensor_probe(const struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = i2c_add_driver(&mt9p012_i2c_driver); + if (rc < 0 || mt9p012_client == NULL) { + rc = -ENOTSUPP; + goto probe_done; + } + + msm_camio_clk_rate_set(MT9P012_DEFAULT_CLOCK_RATE); + mdelay(20); + + rc = mt9p012_probe_init_sensor(info); + if (rc < 0) + goto probe_done; + + s->s_init = mt9p012_sensor_open_init; + s->s_release = mt9p012_sensor_release; + s->s_config = mt9p012_sensor_config; + mt9p012_probe_init_done(info); + +probe_done: + CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__); + return rc; +} + +static int __mt9p012_probe(struct platform_device *pdev) +{ + return msm_camera_drv_start(pdev, mt9p012_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __mt9p012_probe, + .driver = { + .name = "msm_camera_mt9p012", + .owner = THIS_MODULE, + }, +}; + +static int __init mt9p012_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(mt9p012_init); diff --git a/drivers/media/video/msm/mt9p012_reg.c b/drivers/media/video/msm/mt9p012_reg.c new file mode 100644 index 0000000000000..713a0619fc81d --- /dev/null +++ b/drivers/media/video/msm/mt9p012_reg.c @@ -0,0 +1,580 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "mt9p012.h" +#include + +/*Micron settings from Applications for lower power consumption.*/ +struct reg_struct mt9p012_reg_pat[2] = { + { /* Preview */ + /* vt_pix_clk_div REG=0x0300 */ + 6, /* 5 */ + + /* vt_sys_clk_div REG=0x0302 */ + 1, + + /* pre_pll_clk_div REG=0x0304 */ + 2, + + /* pll_multiplier REG=0x0306 */ + 60, + + /* op_pix_clk_div REG=0x0308 */ + 8, /* 10 */ + + /* op_sys_clk_div REG=0x030A */ + 1, + + /* scale_m REG=0x0404 */ + 16, + + /* row_speed REG=0x3016 */ + 0x0111, + + /* x_addr_start REG=0x3004 */ + 8, + + /* x_addr_end REG=0x3008 */ + 2597, + + /* y_addr_start REG=0x3002 */ + 8, + + /* y_addr_end REG=0x3006 */ + 1949, + + /* read_mode REG=0x3040 + * Preview 2x2 skipping */ + 0x00C3, + + /* x_output_size REG=0x034C */ + 1296, + + /* y_output_size REG=0x034E */ + 972, + + /* line_length_pck REG=0x300C */ + 3784, + + /* frame_length_lines REG=0x300A */ + 1057, + + /* coarse_integration_time REG=0x3012 */ + 16, + + /* fine_integration_time REG=0x3014 */ + 1764}, + { /*Snapshot */ + /* vt_pix_clk_div REG=0x0300 */ + 6, + + /* vt_sys_clk_div REG=0x0302 */ + 1, + + /* pre_pll_clk_div REG=0x0304 */ + 2, + + /* pll_multiplier REG=0x0306 + * 60 for 10fps snapshot */ + 60, + + /* op_pix_clk_div REG=0x0308 */ + 8, + + /* op_sys_clk_div REG=0x030A */ + 1, + + /* scale_m REG=0x0404 */ + 16, + + /* row_speed REG=0x3016 */ + 0x0111, + + /* x_addr_start REG=0x3004 */ + 8, + + /* x_addr_end REG=0x3008 */ + 2615, + + /* y_addr_start REG=0x3002 */ + 8, + + /* y_addr_end REG=0x3006 */ + 1967, + + /* read_mode REG=0x3040 */ + 0x0041, + + /* x_output_size REG=0x034C */ + 2608, + + /* y_output_size REG=0x034E */ + 1960, + + /* line_length_pck REG=0x300C */ + 3911, + + /* frame_length_lines REG=0x300A //10 fps snapshot */ + 2045, + + /* coarse_integration_time REG=0x3012 */ + 16, + + /* fine_integration_time REG=0x3014 */ + 882} +}; + +struct mt9p012_i2c_reg_conf mt9p012_test_tbl[] = { + {0x3044, 0x0544 & 0xFBFF}, + {0x30CA, 0x0004 | 0x0001}, + {0x30D4, 0x9020 & 0x7FFF}, + {0x31E0, 0x0003 & 0xFFFE}, + {0x3180, 0x91FF & 0x7FFF}, + {0x301A, (0x10CC | 0x8000) & 0xFFF7}, + {0x301E, 0x0000}, + {0x3780, 0x0000}, +}; + +struct mt9p012_i2c_reg_conf mt9p012_lc_tbl[] = { + /* [Lens shading 85 Percent TL84] */ + /* P_RD_P0Q0 */ + {0x360A, 0x7FEF}, + /* P_RD_P0Q1 */ + {0x360C, 0x232C}, + /* P_RD_P0Q2 */ + {0x360E, 0x7050}, + /* P_RD_P0Q3 */ + {0x3610, 0xF3CC}, + /* P_RD_P0Q4 */ + {0x3612, 0x89D1}, + /* P_RD_P1Q0 */ + {0x364A, 0xBE0D}, + /* P_RD_P1Q1 */ + {0x364C, 0x9ACB}, + /* P_RD_P1Q2 */ + {0x364E, 0x2150}, + /* P_RD_P1Q3 */ + {0x3650, 0xB26B}, + /* P_RD_P1Q4 */ + {0x3652, 0x9511}, + /* P_RD_P2Q0 */ + {0x368A, 0x2151}, + /* P_RD_P2Q1 */ + {0x368C, 0x00AD}, + /* P_RD_P2Q2 */ + {0x368E, 0x8334}, + /* P_RD_P2Q3 */ + {0x3690, 0x478E}, + /* P_RD_P2Q4 */ + {0x3692, 0x0515}, + /* P_RD_P3Q0 */ + {0x36CA, 0x0710}, + /* P_RD_P3Q1 */ + {0x36CC, 0x452D}, + /* P_RD_P3Q2 */ + {0x36CE, 0xF352}, + /* P_RD_P3Q3 */ + {0x36D0, 0x190F}, + /* P_RD_P3Q4 */ + {0x36D2, 0x4413}, + /* P_RD_P4Q0 */ + {0x370A, 0xD112}, + /* P_RD_P4Q1 */ + {0x370C, 0xF50F}, + /* P_RD_P4Q2 */ + {0x370C, 0xF50F}, + /* P_RD_P4Q3 */ + {0x3710, 0xDC11}, + /* P_RD_P4Q4 */ + {0x3712, 0xD776}, + /* P_GR_P0Q0 */ + {0x3600, 0x1750}, + /* P_GR_P0Q1 */ + {0x3602, 0xF0AC}, + /* P_GR_P0Q2 */ + {0x3604, 0x4711}, + /* P_GR_P0Q3 */ + {0x3606, 0x07CE}, + /* P_GR_P0Q4 */ + {0x3608, 0x96B2}, + /* P_GR_P1Q0 */ + {0x3640, 0xA9AE}, + /* P_GR_P1Q1 */ + {0x3642, 0xF9AC}, + /* P_GR_P1Q2 */ + {0x3644, 0x39F1}, + /* P_GR_P1Q3 */ + {0x3646, 0x016F}, + /* P_GR_P1Q4 */ + {0x3648, 0x8AB2}, + /* P_GR_P2Q0 */ + {0x3680, 0x1752}, + /* P_GR_P2Q1 */ + {0x3682, 0x70F0}, + /* P_GR_P2Q2 */ + {0x3684, 0x83F5}, + /* P_GR_P2Q3 */ + {0x3686, 0x8392}, + /* P_GR_P2Q4 */ + {0x3688, 0x1FD6}, + /* P_GR_P3Q0 */ + {0x36C0, 0x1131}, + /* P_GR_P3Q1 */ + {0x36C2, 0x3DAF}, + /* P_GR_P3Q2 */ + {0x36C4, 0x89B4}, + /* P_GR_P3Q3 */ + {0x36C6, 0xA391}, + /* P_GR_P3Q4 */ + {0x36C8, 0x1334}, + /* P_GR_P4Q0 */ + {0x3700, 0xDC13}, + /* P_GR_P4Q1 */ + {0x3702, 0xD052}, + /* P_GR_P4Q2 */ + {0x3704, 0x5156}, + /* P_GR_P4Q3 */ + {0x3706, 0x1F13}, + /* P_GR_P4Q4 */ + {0x3708, 0x8C38}, + /* P_BL_P0Q0 */ + {0x3614, 0x0050}, + /* P_BL_P0Q1 */ + {0x3616, 0xBD4C}, + /* P_BL_P0Q2 */ + {0x3618, 0x41B0}, + /* P_BL_P0Q3 */ + {0x361A, 0x660D}, + /* P_BL_P0Q4 */ + {0x361C, 0xC590}, + /* P_BL_P1Q0 */ + {0x3654, 0x87EC}, + /* P_BL_P1Q1 */ + {0x3656, 0xE44C}, + /* P_BL_P1Q2 */ + {0x3658, 0x302E}, + /* P_BL_P1Q3 */ + {0x365A, 0x106E}, + /* P_BL_P1Q4 */ + {0x365C, 0xB58E}, + /* P_BL_P2Q0 */ + {0x3694, 0x0DD1}, + /* P_BL_P2Q1 */ + {0x3696, 0x2A50}, + /* P_BL_P2Q2 */ + {0x3698, 0xC793}, + /* P_BL_P2Q3 */ + {0x369A, 0xE8F1}, + /* P_BL_P2Q4 */ + {0x369C, 0x4174}, + /* P_BL_P3Q0 */ + {0x36D4, 0x01EF}, + /* P_BL_P3Q1 */ + {0x36D6, 0x06CF}, + /* P_BL_P3Q2 */ + {0x36D8, 0x8D91}, + /* P_BL_P3Q3 */ + {0x36DA, 0x91F0}, + /* P_BL_P3Q4 */ + {0x36DC, 0x52EF}, + /* P_BL_P4Q0 */ + {0x3714, 0xA6D2}, + /* P_BL_P4Q1 */ + {0x3716, 0xA312}, + /* P_BL_P4Q2 */ + {0x3718, 0x2695}, + /* P_BL_P4Q3 */ + {0x371A, 0x3953}, + /* P_BL_P4Q4 */ + {0x371C, 0x9356}, + /* P_GB_P0Q0 */ + {0x361E, 0x7EAF}, + /* P_GB_P0Q1 */ + {0x3620, 0x2A4C}, + /* P_GB_P0Q2 */ + {0x3622, 0x49F0}, + {0x3624, 0xF1EC}, + /* P_GB_P0Q4 */ + {0x3626, 0xC670}, + /* P_GB_P1Q0 */ + {0x365E, 0x8E0C}, + /* P_GB_P1Q1 */ + {0x3660, 0xC2A9}, + /* P_GB_P1Q2 */ + {0x3662, 0x274F}, + /* P_GB_P1Q3 */ + {0x3664, 0xADAB}, + /* P_GB_P1Q4 */ + {0x3666, 0x8EF0}, + /* P_GB_P2Q0 */ + {0x369E, 0x09B1}, + /* P_GB_P2Q1 */ + {0x36A0, 0xAA2E}, + /* P_GB_P2Q2 */ + {0x36A2, 0xC3D3}, + /* P_GB_P2Q3 */ + {0x36A4, 0x7FAF}, + /* P_GB_P2Q4 */ + {0x36A6, 0x3F34}, + /* P_GB_P3Q0 */ + {0x36DE, 0x4C8F}, + /* P_GB_P3Q1 */ + {0x36E0, 0x886E}, + /* P_GB_P3Q2 */ + {0x36E2, 0xE831}, + /* P_GB_P3Q3 */ + {0x36E4, 0x1FD0}, + /* P_GB_P3Q4 */ + {0x36E6, 0x1192}, + /* P_GB_P4Q0 */ + {0x371E, 0xB952}, + /* P_GB_P4Q1 */ + {0x3720, 0x6DCF}, + /* P_GB_P4Q2 */ + {0x3722, 0x1B55}, + /* P_GB_P4Q3 */ + {0x3724, 0xA112}, + /* P_GB_P4Q4 */ + {0x3726, 0x82F6}, + /* POLY_ORIGIN_C */ + {0x3782, 0x0510}, + /* POLY_ORIGIN_R */ + {0x3784, 0x0390}, + /* POLY_SC_ENABLE */ + {0x3780, 0x8000}, +}; + +/* rolloff table for illuminant A */ +struct mt9p012_i2c_reg_conf mt9p012_rolloff_tbl[] = { + /* P_RD_P0Q0 */ + {0x360A, 0x7FEF}, + /* P_RD_P0Q1 */ + {0x360C, 0x232C}, + /* P_RD_P0Q2 */ + {0x360E, 0x7050}, + /* P_RD_P0Q3 */ + {0x3610, 0xF3CC}, + /* P_RD_P0Q4 */ + {0x3612, 0x89D1}, + /* P_RD_P1Q0 */ + {0x364A, 0xBE0D}, + /* P_RD_P1Q1 */ + {0x364C, 0x9ACB}, + /* P_RD_P1Q2 */ + {0x364E, 0x2150}, + /* P_RD_P1Q3 */ + {0x3650, 0xB26B}, + /* P_RD_P1Q4 */ + {0x3652, 0x9511}, + /* P_RD_P2Q0 */ + {0x368A, 0x2151}, + /* P_RD_P2Q1 */ + {0x368C, 0x00AD}, + /* P_RD_P2Q2 */ + {0x368E, 0x8334}, + /* P_RD_P2Q3 */ + {0x3690, 0x478E}, + /* P_RD_P2Q4 */ + {0x3692, 0x0515}, + /* P_RD_P3Q0 */ + {0x36CA, 0x0710}, + /* P_RD_P3Q1 */ + {0x36CC, 0x452D}, + /* P_RD_P3Q2 */ + {0x36CE, 0xF352}, + /* P_RD_P3Q3 */ + {0x36D0, 0x190F}, + /* P_RD_P3Q4 */ + {0x36D2, 0x4413}, + /* P_RD_P4Q0 */ + {0x370A, 0xD112}, + /* P_RD_P4Q1 */ + {0x370C, 0xF50F}, + /* P_RD_P4Q2 */ + {0x370E, 0x6375}, + /* P_RD_P4Q3 */ + {0x3710, 0xDC11}, + /* P_RD_P4Q4 */ + {0x3712, 0xD776}, + /* P_GR_P0Q0 */ + {0x3600, 0x1750}, + /* P_GR_P0Q1 */ + {0x3602, 0xF0AC}, + /* P_GR_P0Q2 */ + {0x3604, 0x4711}, + /* P_GR_P0Q3 */ + {0x3606, 0x07CE}, + /* P_GR_P0Q4 */ + {0x3608, 0x96B2}, + /* P_GR_P1Q0 */ + {0x3640, 0xA9AE}, + /* P_GR_P1Q1 */ + {0x3642, 0xF9AC}, + /* P_GR_P1Q2 */ + {0x3644, 0x39F1}, + /* P_GR_P1Q3 */ + {0x3646, 0x016F}, + /* P_GR_P1Q4 */ + {0x3648, 0x8AB2}, + /* P_GR_P2Q0 */ + {0x3680, 0x1752}, + /* P_GR_P2Q1 */ + {0x3682, 0x70F0}, + /* P_GR_P2Q2 */ + {0x3684, 0x83F5}, + /* P_GR_P2Q3 */ + {0x3686, 0x8392}, + /* P_GR_P2Q4 */ + {0x3688, 0x1FD6}, + /* P_GR_P3Q0 */ + {0x36C0, 0x1131}, + /* P_GR_P3Q1 */ + {0x36C2, 0x3DAF}, + /* P_GR_P3Q2 */ + {0x36C4, 0x89B4}, + /* P_GR_P3Q3 */ + {0x36C6, 0xA391}, + /* P_GR_P3Q4 */ + {0x36C8, 0x1334}, + /* P_GR_P4Q0 */ + {0x3700, 0xDC13}, + /* P_GR_P4Q1 */ + {0x3702, 0xD052}, + /* P_GR_P4Q2 */ + {0x3704, 0x5156}, + /* P_GR_P4Q3 */ + {0x3706, 0x1F13}, + /* P_GR_P4Q4 */ + {0x3708, 0x8C38}, + /* P_BL_P0Q0 */ + {0x3614, 0x0050}, + /* P_BL_P0Q1 */ + {0x3616, 0xBD4C}, + /* P_BL_P0Q2 */ + {0x3618, 0x41B0}, + /* P_BL_P0Q3 */ + {0x361A, 0x660D}, + /* P_BL_P0Q4 */ + {0x361C, 0xC590}, + /* P_BL_P1Q0 */ + {0x3654, 0x87EC}, + /* P_BL_P1Q1 */ + {0x3656, 0xE44C}, + /* P_BL_P1Q2 */ + {0x3658, 0x302E}, + /* P_BL_P1Q3 */ + {0x365A, 0x106E}, + /* P_BL_P1Q4 */ + {0x365C, 0xB58E}, + /* P_BL_P2Q0 */ + {0x3694, 0x0DD1}, + /* P_BL_P2Q1 */ + {0x3696, 0x2A50}, + /* P_BL_P2Q2 */ + {0x3698, 0xC793}, + /* P_BL_P2Q3 */ + {0x369A, 0xE8F1}, + /* P_BL_P2Q4 */ + {0x369C, 0x4174}, + /* P_BL_P3Q0 */ + {0x36D4, 0x01EF}, + /* P_BL_P3Q1 */ + {0x36D6, 0x06CF}, + /* P_BL_P3Q2 */ + {0x36D8, 0x8D91}, + /* P_BL_P3Q3 */ + {0x36DA, 0x91F0}, + /* P_BL_P3Q4 */ + {0x36DC, 0x52EF}, + /* P_BL_P4Q0 */ + {0x3714, 0xA6D2}, + /* P_BL_P4Q1 */ + {0x3716, 0xA312}, + /* P_BL_P4Q2 */ + {0x3718, 0x2695}, + /* P_BL_P4Q3 */ + {0x371A, 0x3953}, + /* P_BL_P4Q4 */ + {0x371C, 0x9356}, + /* P_GB_P0Q0 */ + {0x361E, 0x7EAF}, + /* P_GB_P0Q1 */ + {0x3620, 0x2A4C}, + /* P_GB_P0Q2 */ + {0x3622, 0x49F0}, + {0x3624, 0xF1EC}, + /* P_GB_P0Q4 */ + {0x3626, 0xC670}, + /* P_GB_P1Q0 */ + {0x365E, 0x8E0C}, + /* P_GB_P1Q1 */ + {0x3660, 0xC2A9}, + /* P_GB_P1Q2 */ + {0x3662, 0x274F}, + /* P_GB_P1Q3 */ + {0x3664, 0xADAB}, + /* P_GB_P1Q4 */ + {0x3666, 0x8EF0}, + /* P_GB_P2Q0 */ + {0x369E, 0x09B1}, + /* P_GB_P2Q1 */ + {0x36A0, 0xAA2E}, + /* P_GB_P2Q2 */ + {0x36A2, 0xC3D3}, + /* P_GB_P2Q3 */ + {0x36A4, 0x7FAF}, + /* P_GB_P2Q4 */ + {0x36A6, 0x3F34}, + /* P_GB_P3Q0 */ + {0x36DE, 0x4C8F}, + /* P_GB_P3Q1 */ + {0x36E0, 0x886E}, + /* P_GB_P3Q2 */ + {0x36E2, 0xE831}, + /* P_GB_P3Q3 */ + {0x36E4, 0x1FD0}, + /* P_GB_P3Q4 */ + {0x36E6, 0x1192}, + /* P_GB_P4Q0 */ + {0x371E, 0xB952}, + /* P_GB_P4Q1 */ + {0x3720, 0x6DCF}, + /* P_GB_P4Q2 */ + {0x3722, 0x1B55}, + /* P_GB_P4Q3 */ + {0x3724, 0xA112}, + /* P_GB_P4Q4 */ + {0x3726, 0x82F6}, + /* POLY_ORIGIN_C */ + {0x3782, 0x0510}, + /* POLY_ORIGIN_R */ + {0x3784, 0x0390}, + /* POLY_SC_ENABLE */ + {0x3780, 0x8000}, +}; + +struct mt9p012_reg mt9p012_regs = { + .reg_pat = &mt9p012_reg_pat[0], + .reg_pat_size = ARRAY_SIZE(mt9p012_reg_pat), + .ttbl = &mt9p012_test_tbl[0], + .ttbl_size = ARRAY_SIZE(mt9p012_test_tbl), + .lctbl = &mt9p012_lc_tbl[0], + .lctbl_size = ARRAY_SIZE(mt9p012_lc_tbl), + .rftbl = &mt9p012_rolloff_tbl[0], + .rftbl_size = ARRAY_SIZE(mt9p012_rolloff_tbl) +}; diff --git a/drivers/media/video/msm/mt9t013.c b/drivers/media/video/msm/mt9t013.c new file mode 100644 index 0000000000000..e77904e5d551f --- /dev/null +++ b/drivers/media/video/msm/mt9t013.c @@ -0,0 +1,1506 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mt9t013.h" + +/*============================================================= + SENSOR REGISTER DEFINES +==============================================================*/ +#define MT9T013_REG_MODEL_ID 0x0000 +#define MT9T013_MODEL_ID 0x2600 +#define REG_GROUPED_PARAMETER_HOLD 0x0104 +#define GROUPED_PARAMETER_HOLD 0x0100 +#define GROUPED_PARAMETER_UPDATE 0x0000 +#define REG_COARSE_INT_TIME 0x3012 +#define REG_VT_PIX_CLK_DIV 0x0300 +#define REG_VT_SYS_CLK_DIV 0x0302 +#define REG_PRE_PLL_CLK_DIV 0x0304 +#define REG_PLL_MULTIPLIER 0x0306 +#define REG_OP_PIX_CLK_DIV 0x0308 +#define REG_OP_SYS_CLK_DIV 0x030A +#define REG_SCALE_M 0x0404 +#define REG_FRAME_LENGTH_LINES 0x300A +#define REG_LINE_LENGTH_PCK 0x300C +#define REG_X_ADDR_START 0x3004 +#define REG_Y_ADDR_START 0x3002 +#define REG_X_ADDR_END 0x3008 +#define REG_Y_ADDR_END 0x3006 +#define REG_X_OUTPUT_SIZE 0x034C +#define REG_Y_OUTPUT_SIZE 0x034E +#define REG_FINE_INT_TIME 0x3014 +#define REG_ROW_SPEED 0x3016 +#define MT9T013_REG_RESET_REGISTER 0x301A +#define MT9T013_RESET_REGISTER_PWON 0x10CC +#define MT9T013_RESET_REGISTER_PWOFF 0x1008 /* 0x10C8 stop streaming */ +#define REG_READ_MODE 0x3040 +#define REG_GLOBAL_GAIN 0x305E +#define REG_TEST_PATTERN_MODE 0x3070 + +enum mt9t013_test_mode { + TEST_OFF, + TEST_1, + TEST_2, + TEST_3 +}; + +enum mt9t013_resolution { + QTR_SIZE, + FULL_SIZE, + INVALID_SIZE +}; + +enum mt9t013_reg_update { + REG_INIT, /* registers that need to be updated during initialization */ + UPDATE_PERIODIC, /* registers that needs periodic I2C writes */ + UPDATE_ALL, /* all registers will be updated */ + UPDATE_INVALID +}; + +enum mt9t013_setting { + RES_PREVIEW, + RES_CAPTURE +}; + +/* actuator's Slave Address */ +#define MT9T013_AF_I2C_ADDR 0x18 + +/* +* AF Total steps parameters +*/ +#define MT9T013_TOTAL_STEPS_NEAR_TO_FAR 30 + +/* + * Time in milisecs for waiting for the sensor to reset. + */ +#define MT9T013_RESET_DELAY_MSECS 66 + +/* for 30 fps preview */ +#define MT9T013_DEFAULT_CLOCK_RATE 24000000 +#define MT9T013_DEFAULT_MAX_FPS 26 + +/* FIXME: Changes from here */ +struct mt9t013_work { + struct work_struct work; +}; + +static struct mt9t013_work *mt9t013_sensorw; +static struct i2c_client *mt9t013_client; + +struct mt9t013_ctrl { + const struct msm_camera_sensor_info *sensordata; + + int sensormode; + uint32_t fps_divider; /* init to 1 * 0x00000400 */ + uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ + + uint16_t curr_lens_pos; + uint16_t init_curr_lens_pos; + uint16_t my_reg_gain; + uint32_t my_reg_line_count; + + enum mt9t013_resolution prev_res; + enum mt9t013_resolution pict_res; + enum mt9t013_resolution curr_res; + enum mt9t013_test_mode set_test; + + unsigned short imgaddr; +}; + +static struct mt9t013_ctrl *mt9t013_ctrl; +static DECLARE_WAIT_QUEUE_HEAD(mt9t013_wait_queue); + +static int mt9t013_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer(mt9t013_client->adapter, msgs, 2) < 0) { + pr_err("mt9t013_i2c_rxdata failed!\n"); + return -EIO; + } + + return 0; +} + +static int mt9t013_i2c_read_w(unsigned short saddr, + unsigned short raddr, unsigned short *rdata) +{ + int rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); + + rc = mt9t013_i2c_rxdata(saddr, buf, 2); + if (rc < 0) + return rc; + + *rdata = buf[0] << 8 | buf[1]; + + if (rc < 0) + pr_err("mt9t013_i2c_read failed!\n"); + + return rc; +} + +static int mt9t013_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + if (i2c_transfer(mt9t013_client->adapter, msg, 1) < 0) { + pr_err("mt9t013_i2c_txdata failed\n"); + return -EIO; + } + + return 0; +} + +static int mt9t013_i2c_write_b(unsigned short saddr, + unsigned short waddr, unsigned short wdata) +{ + int rc = -EIO; + unsigned char buf[2]; + + memset(buf, 0, sizeof(buf)); + buf[0] = waddr; + buf[1] = wdata; + rc = mt9t013_i2c_txdata(saddr, buf, 2); + + if (rc < 0) + pr_err("i2c_write failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int mt9t013_i2c_write_w(unsigned short saddr, + unsigned short waddr, unsigned short wdata) +{ + int rc = -EIO; + unsigned char buf[4]; + + memset(buf, 0, sizeof(buf)); + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = (wdata & 0xFF00) >> 8; + buf[3] = (wdata & 0x00FF); + + rc = mt9t013_i2c_txdata(saddr, buf, 4); + + if (rc < 0) + pr_err("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int mt9t013_i2c_write_w_table(struct mt9t013_i2c_reg_conf + *reg_conf_tbl, + int num_of_items_in_table) +{ + int i; + int rc = -EIO; + + for (i = 0; i < num_of_items_in_table; i++) { + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + reg_conf_tbl->waddr, + reg_conf_tbl->wdata); + if (rc < 0) + break; + reg_conf_tbl++; + } + + return rc; +} + +static int mt9t013_test(enum mt9t013_test_mode mo) +{ + int rc = 0; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + if (mo == TEST_OFF) + return 0; + else { + rc = mt9t013_i2c_write_w_table(mt9t013_regs.ttbl, + mt9t013_regs.ttbl_size); + if (rc < 0) + return rc; + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_TEST_PATTERN_MODE, (uint16_t) mo); + if (rc < 0) + return rc; + } + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + return rc; +} + +static int mt9t013_set_lc(void) +{ + int rc; + + rc = mt9t013_i2c_write_w_table(mt9t013_regs.lctbl, + mt9t013_regs.lctbl_size); + if (rc < 0) + return rc; + + return rc; +} + +static int mt9t013_set_default_focus(uint8_t af_step) +{ + int rc = 0; + uint8_t code_val_msb, code_val_lsb; + code_val_msb = 0x01; + code_val_lsb = af_step; + + /* Write the digital code for current to the actuator */ + rc = mt9t013_i2c_write_b(MT9T013_AF_I2C_ADDR >> 1, + code_val_msb, code_val_lsb); + + mt9t013_ctrl->curr_lens_pos = 0; + mt9t013_ctrl->init_curr_lens_pos = 0; + return rc; +} + +static void mt9t013_get_pict_fps(uint16_t fps, uint16_t *pfps) +{ + /* input fps is preview fps in Q8 format */ + uint32_t divider; /*Q10 */ + uint32_t pclk_mult; /*Q10 */ + + if (mt9t013_ctrl->prev_res == QTR_SIZE) { + divider = + (uint32_t) (((mt9t013_regs.reg_pat[RES_PREVIEW]. + frame_length_lines * + mt9t013_regs.reg_pat[RES_PREVIEW]. + line_length_pck) * 0x00000400) / + (mt9t013_regs.reg_pat[RES_CAPTURE]. + frame_length_lines * + mt9t013_regs.reg_pat[RES_CAPTURE]. + line_length_pck)); + + pclk_mult = + (uint32_t) ((mt9t013_regs.reg_pat[RES_CAPTURE]. + pll_multiplier * 0x00000400) / + (mt9t013_regs.reg_pat[RES_PREVIEW]. + pll_multiplier)); + + } else { + /* full size resolution used for preview. */ + divider = 0x00000400; /*1.0 */ + pclk_mult = 0x00000400; /*1.0 */ + } + + /* Verify PCLK settings and frame sizes. */ + *pfps = + (uint16_t) (fps * divider * pclk_mult / 0x00000400 / 0x00000400); +} + +static uint16_t mt9t013_get_prev_lines_pf(void) +{ + if (mt9t013_ctrl->prev_res == QTR_SIZE) + return mt9t013_regs.reg_pat[RES_PREVIEW].frame_length_lines; + else + return mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines; +} + +static uint16_t mt9t013_get_prev_pixels_pl(void) +{ + if (mt9t013_ctrl->prev_res == QTR_SIZE) + return mt9t013_regs.reg_pat[RES_PREVIEW].line_length_pck; + else + return mt9t013_regs.reg_pat[RES_CAPTURE].line_length_pck; +} + +static uint16_t mt9t013_get_pict_lines_pf(void) +{ + return mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines; +} + +static uint16_t mt9t013_get_pict_pixels_pl(void) +{ + return mt9t013_regs.reg_pat[RES_CAPTURE].line_length_pck; +} + +static uint32_t mt9t013_get_pict_max_exp_lc(void) +{ + uint16_t snapshot_lines_per_frame; + + if (mt9t013_ctrl->pict_res == QTR_SIZE) { + snapshot_lines_per_frame = + mt9t013_regs.reg_pat[RES_PREVIEW].frame_length_lines - 1; + } else { + snapshot_lines_per_frame = + mt9t013_regs.reg_pat[RES_CAPTURE].frame_length_lines - 1; + } + + return snapshot_lines_per_frame * 24; +} + +static int mt9t013_set_fps(struct fps_cfg *fps) +{ + /* input is new fps in Q8 format */ + int rc = 0; + + mt9t013_ctrl->fps_divider = fps->fps_div; + mt9t013_ctrl->pict_fps_divider = fps->pict_fps_div; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return -EBUSY; + + CDBG("mt9t013_set_fps: fps_div is %d, frame_rate is %d\n", + fps->fps_div, + (uint16_t) (mt9t013_regs.reg_pat[RES_PREVIEW]. + frame_length_lines * fps->fps_div / 0x00000400)); + + CDBG("mt9t013_set_fps: fps_mult is %d, frame_rate is %d\n", + fps->f_mult, + (uint16_t) (mt9t013_regs.reg_pat[RES_PREVIEW]. + line_length_pck * fps->f_mult / 0x00000400)); + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_LINE_LENGTH_PCK, + (uint16_t) (mt9t013_regs.reg_pat[RES_PREVIEW]. + line_length_pck * fps->f_mult / + 0x00000400)); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + return rc; +} + +static int mt9t013_write_exp_gain(uint16_t gain, uint32_t line) +{ + const uint16_t max_legal_gain = 0x01FF; + uint32_t line_length_ratio = 0x00000400; + enum mt9t013_setting setting; + int rc = 0; + + if (mt9t013_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + mt9t013_ctrl->my_reg_gain = gain; + mt9t013_ctrl->my_reg_line_count = (uint16_t) line; + } + + if (gain > max_legal_gain) + gain = max_legal_gain; + + /* Verify no overflow */ + if (mt9t013_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) { + line = (uint32_t) (line * mt9t013_ctrl->fps_divider / + 0x00000400); + + setting = RES_PREVIEW; + + } else { + line = (uint32_t) (line * mt9t013_ctrl->pict_fps_divider / + 0x00000400); + + setting = RES_CAPTURE; + } + + /*Set digital gain to 1 */ + gain |= 0x0200; + + if ((mt9t013_regs.reg_pat[setting].frame_length_lines - 1) < line) { + + line_length_ratio = + (uint32_t) (line * 0x00000400) / + (mt9t013_regs.reg_pat[setting].frame_length_lines - 1); + } else + line_length_ratio = 0x00000400; + + /* There used to be PARAMETER_HOLD register write before and + * after REG_GLOBAL_GAIN & REG_COARSE_INIT_TIME. This causes + * aec oscillation. Hence removed. */ + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, REG_GLOBAL_GAIN, gain); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_COARSE_INT_TIME, + (uint16_t) ((uint32_t) line)); + if (rc < 0) + return rc; + + return rc; +} + +static int mt9t013_set_pict_exp_gain(uint16_t gain, uint32_t line) +{ + int rc = 0; + + rc = mt9t013_write_exp_gain(gain, line); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, 0x10CC | 0x0002); + + mdelay(5); + + /* camera_timed_wait(snapshot_wait*exposure_ratio); */ + return rc; +} + +static int mt9t013_setting(enum mt9t013_reg_update rupdate, + enum mt9t013_setting rt) +{ + int rc = 0; + + switch (rupdate) { + case UPDATE_PERIODIC:{ + + if (rt == RES_PREVIEW || rt == RES_CAPTURE) { +#if 0 + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWOFF); + if (rc < 0) + return rc; +#endif + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_VT_PIX_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + vt_pix_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_VT_SYS_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + vt_sys_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_PRE_PLL_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + pre_pll_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_PLL_MULTIPLIER, + mt9t013_regs. + reg_pat[rt]. + pll_multiplier); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_OP_PIX_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + op_pix_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_OP_SYS_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + op_sys_clk_div); + if (rc < 0) + return rc; + + mdelay(5); + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_ROW_SPEED, + mt9t013_regs. + reg_pat[rt].row_speed); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_ADDR_START, + mt9t013_regs. + reg_pat[rt]. + x_addr_start); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_ADDR_END, + mt9t013_regs. + reg_pat[rt]. + x_addr_end); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_ADDR_START, + mt9t013_regs. + reg_pat[rt]. + y_addr_start); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_ADDR_END, + mt9t013_regs. + reg_pat[rt]. + y_addr_end); + if (rc < 0) + return rc; + + if (machine_is_sapphire()) { + if (rt == 0) + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, 0x046F); + else + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, 0x0027); + } else + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, + mt9t013_regs.reg_pat[rt]. + read_mode); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_SCALE_M, + mt9t013_regs. + reg_pat[rt].scale_m); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_OUTPUT_SIZE, + mt9t013_regs. + reg_pat[rt]. + x_output_size); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_OUTPUT_SIZE, + mt9t013_regs. + reg_pat[rt]. + y_output_size); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_LINE_LENGTH_PCK, + mt9t013_regs. + reg_pat[rt]. + line_length_pck); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_FRAME_LENGTH_LINES, + (mt9t013_regs. + reg_pat[rt]. + frame_length_lines * + mt9t013_ctrl-> + fps_divider / + 0x00000400)); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_COARSE_INT_TIME, + mt9t013_regs. + reg_pat[rt]. + coarse_int_time); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_FINE_INT_TIME, + mt9t013_regs. + reg_pat[rt]. + fine_int_time); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + rc = mt9t013_test(mt9t013_ctrl->set_test); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWON); + if (rc < 0) + return rc; + + mdelay(5); + + return rc; + } + } + break; + + /*CAMSENSOR_REG_UPDATE_PERIODIC */ + case REG_INIT:{ + if (rt == RES_PREVIEW || rt == RES_CAPTURE) { + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWOFF); + if (rc < 0) + /* MODE_SELECT, stop streaming */ + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_VT_PIX_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + vt_pix_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_VT_SYS_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + vt_sys_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_PRE_PLL_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + pre_pll_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_PLL_MULTIPLIER, + mt9t013_regs. + reg_pat[rt]. + pll_multiplier); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_OP_PIX_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + op_pix_clk_div); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_OP_SYS_CLK_DIV, + mt9t013_regs. + reg_pat[rt]. + op_sys_clk_div); + if (rc < 0) + return rc; + + mdelay(5); + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + /* additional power saving mode ok around + * 38.2MHz + */ + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + 0x3084, 0x2409); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + 0x3092, 0x0A49); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + 0x3094, 0x4949); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + 0x3096, 0x4949); + if (rc < 0) + return rc; + + /* Set preview or snapshot mode */ + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_ROW_SPEED, + mt9t013_regs. + reg_pat[rt].row_speed); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_ADDR_START, + mt9t013_regs. + reg_pat[rt]. + x_addr_start); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_ADDR_END, + mt9t013_regs. + reg_pat[rt]. + x_addr_end); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_ADDR_START, + mt9t013_regs. + reg_pat[rt]. + y_addr_start); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_ADDR_END, + mt9t013_regs. + reg_pat[rt]. + y_addr_end); + if (rc < 0) + return rc; + + if (machine_is_sapphire()) { + if (rt == 0) + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, 0x046F); + else + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, 0x0027); + } else + rc = mt9t013_i2c_write_w + (mt9t013_client->addr, + REG_READ_MODE, + mt9t013_regs.reg_pat[rt]. + read_mode); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_SCALE_M, + mt9t013_regs. + reg_pat[rt].scale_m); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_X_OUTPUT_SIZE, + mt9t013_regs. + reg_pat[rt]. + x_output_size); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_Y_OUTPUT_SIZE, + mt9t013_regs. + reg_pat[rt]. + y_output_size); + if (rc < 0) + return 0; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_LINE_LENGTH_PCK, + mt9t013_regs. + reg_pat[rt]. + line_length_pck); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_FRAME_LENGTH_LINES, + mt9t013_regs. + reg_pat[rt]. + frame_length_lines); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_COARSE_INT_TIME, + mt9t013_regs. + reg_pat[rt]. + coarse_int_time); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_FINE_INT_TIME, + mt9t013_regs. + reg_pat[rt]. + fine_int_time); + if (rc < 0) + return rc; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + /* load lens shading */ + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + /* most likely needs to be written only once. */ + rc = mt9t013_set_lc(); + if (rc < 0) + return -EBUSY; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + + rc = mt9t013_test(mt9t013_ctrl->set_test); + if (rc < 0) + return rc; + + mdelay(5); + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWON); + if (rc < 0) + /* MODE_SELECT, stop streaming */ + return rc; + + CDBG("!!! mt9t013 !!! PowerOn is done!\n"); + mdelay(5); + return rc; + } + } /* case CAMSENSOR_REG_INIT: */ + break; + + /*CAMSENSOR_REG_INIT */ + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int mt9t013_video_config(int mode, int res) +{ + int rc; + + switch (res) { + case QTR_SIZE: + rc = mt9t013_setting(UPDATE_PERIODIC, RES_PREVIEW); + if (rc < 0) + return rc; + CDBG("sensor configuration done!\n"); + break; + + case FULL_SIZE: + rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + break; + + default: + return -EINVAL; + } + + mt9t013_ctrl->prev_res = res; + mt9t013_ctrl->curr_res = res; + mt9t013_ctrl->sensormode = mode; + + return mt9t013_write_exp_gain(mt9t013_ctrl->my_reg_gain, + mt9t013_ctrl->my_reg_line_count); +} + +static int mt9t013_snapshot_config(int mode) +{ + int rc = 0; + + rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + + mt9t013_ctrl->curr_res = mt9t013_ctrl->pict_res; + mt9t013_ctrl->sensormode = mode; + return rc; +} + +static int mt9t013_raw_snapshot_config(int mode) +{ + int rc = 0; + + rc = mt9t013_setting(UPDATE_PERIODIC, RES_CAPTURE); + if (rc < 0) + return rc; + + mt9t013_ctrl->curr_res = mt9t013_ctrl->pict_res; + mt9t013_ctrl->sensormode = mode; + return rc; +} + +static int mt9t013_power_down(void) +{ + int rc = 0; + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWOFF); + if (rc >= 0) + mdelay(5); + return rc; +} + +static int mt9t013_move_focus(int direction, int num_steps) +{ + int16_t step_direction; + int16_t actual_step; + int16_t next_position; + int16_t break_steps[4]; + uint8_t code_val_msb, code_val_lsb; + int16_t i; + + if (num_steps > MT9T013_TOTAL_STEPS_NEAR_TO_FAR) + num_steps = MT9T013_TOTAL_STEPS_NEAR_TO_FAR; + else if (num_steps == 0) + return -EINVAL; + + if (direction == MOVE_NEAR) + step_direction = 4; + else if (direction == MOVE_FAR) + step_direction = -4; + else + return -EINVAL; + + if (mt9t013_ctrl->curr_lens_pos < mt9t013_ctrl->init_curr_lens_pos) + mt9t013_ctrl->curr_lens_pos = mt9t013_ctrl->init_curr_lens_pos; + + actual_step = (int16_t) (step_direction * (int16_t) num_steps); + + for (i = 0; i < 4; i++) + break_steps[i] = + actual_step / 4 * (i + 1) - actual_step / 4 * i; + + for (i = 0; i < 4; i++) { + next_position = (int16_t) + (mt9t013_ctrl->curr_lens_pos + break_steps[i]); + + if (next_position > 255) + next_position = 255; + else if (next_position < 0) + next_position = 0; + + code_val_msb = + ((next_position >> 4) << 2) | ((next_position << 4) >> 6); + + code_val_lsb = ((next_position & 0x03) << 6); + + /* Writing the digital code for current to the actuator */ + if (mt9t013_i2c_write_b(MT9T013_AF_I2C_ADDR >> 1, + code_val_msb, code_val_lsb) < 0) + return -EBUSY; + + /* Storing the current lens Position */ + mt9t013_ctrl->curr_lens_pos = next_position; + + if (i < 3) + mdelay(1); + } + + return 0; +} + +static int mt9t013_sensor_init_done(const struct msm_camera_sensor_info *data) +{ + gpio_direction_output(data->sensor_reset, 0); + gpio_free(data->sensor_reset); + return 0; +} + +static int mt9t013_probe_init_sensor(const struct msm_camera_sensor_info *data) +{ + int rc; + uint16_t chipid; + + rc = gpio_request(data->sensor_reset, "mt9t013"); + if (!rc) + gpio_direction_output(data->sensor_reset, 1); + else + goto init_probe_done; + + mdelay(20); + + /* RESET the sensor image part via I2C command */ + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, 0x1009); + if (rc < 0) + goto init_probe_fail; + + /* 3. Read sensor Model ID: */ + rc = mt9t013_i2c_read_w(mt9t013_client->addr, + MT9T013_REG_MODEL_ID, &chipid); + + if (rc < 0) + goto init_probe_fail; + + CDBG("mt9t013 model_id = 0x%x\n", chipid); + + /* 4. Compare sensor ID to MT9T012VC ID: */ + if (chipid != MT9T013_MODEL_ID) { + rc = -ENODEV; + goto init_probe_fail; + } + + rc = mt9t013_i2c_write_w(mt9t013_client->addr, 0x3064, 0x0805); + if (rc < 0) + goto init_probe_fail; + + mdelay(MT9T013_RESET_DELAY_MSECS); + + goto init_probe_done; + + /* sensor: output enable */ +#if 0 + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + MT9T013_REG_RESET_REGISTER, + MT9T013_RESET_REGISTER_PWON); + + /* if this fails, the sensor is not the MT9T013 */ + rc = mt9t013_set_default_focus(0); +#endif + +init_probe_fail: + gpio_direction_output(data->sensor_reset, 0); + gpio_free(data->sensor_reset); +init_probe_done: + return rc; +} + +static int mt9t013_poweron_af(void) +{ + int rc = 0; + + /* enable AF actuator */ + CDBG("enable AF actuator, gpio = %d\n", + mt9t013_ctrl->sensordata->vcm_pwd); + rc = gpio_request(mt9t013_ctrl->sensordata->vcm_pwd, "mt9t013"); + if (!rc) { + gpio_direction_output(mt9t013_ctrl->sensordata->vcm_pwd, 0); + mdelay(20); + rc = mt9t013_set_default_focus(0); + } else + pr_err("%s, gpio_request failed (%d)!\n", __func__, rc); + return rc; +} + +static void mt9t013_poweroff_af(void) +{ + gpio_direction_output(mt9t013_ctrl->sensordata->vcm_pwd, 1); + gpio_free(mt9t013_ctrl->sensordata->vcm_pwd); +} + +int mt9t013_sensor_open_init(const struct msm_camera_sensor_info *data) +{ + int rc; + + mt9t013_ctrl = kzalloc(sizeof(struct mt9t013_ctrl), GFP_KERNEL); + if (!mt9t013_ctrl) { + pr_err("mt9t013_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + + mt9t013_ctrl->fps_divider = 1 * 0x00000400; + mt9t013_ctrl->pict_fps_divider = 1 * 0x00000400; + mt9t013_ctrl->set_test = TEST_OFF; + mt9t013_ctrl->prev_res = QTR_SIZE; + mt9t013_ctrl->pict_res = FULL_SIZE; + + if (data) + mt9t013_ctrl->sensordata = data; + + /* enable mclk first */ + msm_camio_clk_rate_set(MT9T013_DEFAULT_CLOCK_RATE); + mdelay(20); + + msm_camio_camif_pad_reg_reset(); + mdelay(20); + + rc = mt9t013_probe_init_sensor(data); + if (rc < 0) + goto init_fail; + + if (mt9t013_ctrl->prev_res == QTR_SIZE) + rc = mt9t013_setting(REG_INIT, RES_PREVIEW); + else + rc = mt9t013_setting(REG_INIT, RES_CAPTURE); + + if (rc >= 0) + rc = mt9t013_poweron_af(); + + if (rc < 0) + goto init_fail; + else + goto init_done; + +init_fail: + kfree(mt9t013_ctrl); +init_done: + return rc; +} + +static int mt9t013_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&mt9t013_wait_queue); + return 0; +} + +static int mt9t013_set_sensor_mode(int mode, int res) +{ + int rc = 0; + rc = mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + rc = mt9t013_video_config(mode, res); + break; + + case SENSOR_SNAPSHOT_MODE: + rc = mt9t013_snapshot_config(mode); + break; + + case SENSOR_RAW_SNAPSHOT_MODE: + rc = mt9t013_raw_snapshot_config(mode); + break; + + default: + return -EINVAL; + } + + /* FIXME: what should we do if rc < 0? */ + if (rc >= 0) + return mt9t013_i2c_write_w(mt9t013_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + return rc; +} + +int mt9t013_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cdata; + long rc = 0; + + if (copy_from_user(&cdata, (void *)argp, + sizeof(struct sensor_cfg_data))) + return -EFAULT; + + CDBG("mt9t013_sensor_config: cfgtype = %d\n", cdata.cfgtype); + switch (cdata.cfgtype) { + case CFG_GET_PICT_FPS: + mt9t013_get_pict_fps(cdata.cfg.gfps.prevfps, + &(cdata.cfg.gfps.pictfps)); + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_L_PF: + cdata.cfg.prevl_pf = mt9t013_get_prev_lines_pf(); + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_P_PL: + cdata.cfg.prevp_pl = mt9t013_get_prev_pixels_pl(); + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_L_PF: + cdata.cfg.pictl_pf = mt9t013_get_pict_lines_pf(); + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_P_PL: + cdata.cfg.pictp_pl = mt9t013_get_pict_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_MAX_EXP_LC: + cdata.cfg.pict_max_exp_lc = mt9t013_get_pict_max_exp_lc(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_FPS: + case CFG_SET_PICT_FPS: + rc = mt9t013_set_fps(&(cdata.cfg.fps)); + break; + + case CFG_SET_EXP_GAIN: + rc = mt9t013_write_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_PICT_EXP_GAIN: + rc = mt9t013_set_pict_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_MODE: + rc = mt9t013_set_sensor_mode(cdata.mode, cdata.rs); + break; + + case CFG_PWR_DOWN: + rc = mt9t013_power_down(); + break; + + case CFG_MOVE_FOCUS: + rc = mt9t013_move_focus(cdata.cfg.focus.dir, + cdata.cfg.focus.steps); + break; + + case CFG_SET_DEFAULT_FOCUS: + rc = mt9t013_set_default_focus(cdata.cfg.focus.steps); + break; + + case CFG_GET_AF_MAX_STEPS: + cdata.max_steps = MT9T013_TOTAL_STEPS_NEAR_TO_FAR; + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_EFFECT: + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int mt9t013_sensor_release(void) +{ + int rc = -EBADF; + + mt9t013_poweroff_af(); + mt9t013_power_down(); + + gpio_direction_output(mt9t013_ctrl->sensordata->sensor_reset, 0); + gpio_free(mt9t013_ctrl->sensordata->sensor_reset); + + kfree(mt9t013_ctrl); + + CDBG("mt9t013_release completed!\n"); + return rc; +} + +static int mt9t013_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + rc = -ENOTSUPP; + goto probe_failure; + } + + mt9t013_sensorw = kzalloc(sizeof(struct mt9t013_work), GFP_KERNEL); + + if (!mt9t013_sensorw) { + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, mt9t013_sensorw); + mt9t013_init_client(client); + mt9t013_client = client; + mt9t013_client->addr = mt9t013_client->addr >> 1; + mdelay(50); + + CDBG("i2c probe ok\n"); + return 0; + +probe_failure: + kfree(mt9t013_sensorw); + mt9t013_sensorw = NULL; + pr_err("i2c probe failure %d\n", rc); + return rc; +} + +static const struct i2c_device_id mt9t013_i2c_id[] = { + {"mt9t013", 0}, + {} +}; + +static struct i2c_driver mt9t013_i2c_driver = { + .id_table = mt9t013_i2c_id, + .probe = mt9t013_i2c_probe, + .remove = __exit_p(mt9t013_i2c_remove), + .driver = { + .name = "mt9t013", + }, +}; + +static int mt9t013_sensor_probe(const struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + /* We expect this driver to match with the i2c device registered + * in the board file immediately. */ + int rc = i2c_add_driver(&mt9t013_i2c_driver); + if (rc < 0 || mt9t013_client == NULL) { + rc = -ENOTSUPP; + goto probe_done; + } + + /* enable mclk first */ + msm_camio_clk_rate_set(MT9T013_DEFAULT_CLOCK_RATE); + mdelay(20); + + rc = mt9t013_probe_init_sensor(info); + if (rc < 0) { + i2c_del_driver(&mt9t013_i2c_driver); + goto probe_done; + } + + s->s_init = mt9t013_sensor_open_init; + s->s_release = mt9t013_sensor_release; + s->s_config = mt9t013_sensor_config; + mt9t013_sensor_init_done(info); + +probe_done: + return rc; +} + +static int __mt9t013_probe(struct platform_device *pdev) +{ + return msm_camera_drv_start(pdev, mt9t013_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __mt9t013_probe, + .driver = { + .name = "msm_camera_mt9t013", + .owner = THIS_MODULE, + }, +}; + +static int __init mt9t013_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(mt9t013_init); diff --git a/drivers/media/video/msm/mt9t013.h b/drivers/media/video/msm/mt9t013.h new file mode 100644 index 0000000000000..67c4d9c5b65c5 --- /dev/null +++ b/drivers/media/video/msm/mt9t013.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef MT9T013_H +#define MT9T013_H + +#include + +struct reg_struct { + uint16_t vt_pix_clk_div; /* 0x0300 */ + uint16_t vt_sys_clk_div; /* 0x0302 */ + uint16_t pre_pll_clk_div; /* 0x0304 */ + uint16_t pll_multiplier; /* 0x0306 */ + uint16_t op_pix_clk_div; /* 0x0308 */ + uint16_t op_sys_clk_div; /* 0x030A */ + uint16_t scale_m; /* 0x0404 */ + uint16_t row_speed; /* 0x3016 */ + uint16_t x_addr_start; /* 0x3004 */ + uint16_t x_addr_end; /* 0x3008 */ + uint16_t y_addr_start; /* 0x3002 */ + uint16_t y_addr_end; /* 0x3006 */ + uint16_t read_mode; /* 0x3040 */ + uint16_t x_output_size; /* 0x034C */ + uint16_t y_output_size; /* 0x034E */ + uint16_t line_length_pck; /* 0x300C */ + uint16_t frame_length_lines; /* 0x300A */ + uint16_t coarse_int_time; /* 0x3012 */ + uint16_t fine_int_time; /* 0x3014 */ +}; + +struct mt9t013_i2c_reg_conf { + unsigned short waddr; + unsigned short wdata; +}; + +struct mt9t013_reg { + struct reg_struct *reg_pat; + int reg_pat_size; + struct mt9t013_i2c_reg_conf *ttbl; + int ttbl_size; + struct mt9t013_i2c_reg_conf *lctbl; + int lctbl_size; + struct mt9t013_i2c_reg_conf *rftbl; + int rftbl_size; +}; + +extern struct mt9t013_reg mt9t013_regs; + +#endif /* #define MT9T013_H */ diff --git a/drivers/media/video/msm/mt9t013_reg.c b/drivers/media/video/msm/mt9t013_reg.c new file mode 100644 index 0000000000000..aac81c796883a --- /dev/null +++ b/drivers/media/video/msm/mt9t013_reg.c @@ -0,0 +1,277 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "mt9t013.h" +#include + +struct reg_struct mt9t013_reg_pat[2] = { + {/* Preview 2x2 binning 20fps, pclk MHz, MCLK 24MHz */ + /* vt_pix_clk_div:REG=0x0300 update get_snapshot_fps + * if this change */ + 10, + + /* vt_sys_clk_div: REG=0x0302 update get_snapshot_fps + * if this change */ + 1, + + /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps + * if this change */ + 3, + + /* pll_multiplier REG=0x0306 60 for 30fps preview, 40 + * for 20fps preview + * 46 for 30fps preview, try 47/48 to increase further */ + 80, + + /* op_pix_clk_div REG=0x0308 */ + 10, + + /* op_sys_clk_div REG=0x030A */ + 1, + + /* scale_m REG=0x0404 */ + 16, + + /* row_speed REG=0x3016 */ + 0x0111, + + /* x_addr_start REG=0x3004 */ + 8, + + /* x_addr_end REG=0x3008 */ + 2053, + + /* y_addr_start REG=0x3002 */ + 8, + + /* y_addr_end REG=0x3006 */ + 1541, + + /* read_mode REG=0x3040 */ + 0x046C, + + /* x_output_size REG=0x034C */ + 1024, + + /* y_output_size REG=0x034E */ + 768, + + /* line_length_pck REG=0x300C */ + 3540, + + /* frame_length_lines REG=0x300A */ + 861, + + /* coarse_int_time REG=0x3012 */ + 16, + + /* fine_int_time REG=0x3014 */ + 1461}, + + { /*Snapshot */ + /* vt_pix_clk_div REG=0x0300 update get_snapshot_fps + * if this change */ + 10, + + /* vt_sys_clk_div REG=0x0302 update get_snapshot_fps + * if this change */ + 1, + + /* pre_pll_clk_div REG=0x0304 update get_snapshot_fps + * if this change */ + 3, + + /* pll_multiplier REG=0x0306 50 for 15fps snapshot, + * 40 for 10fps snapshot + * 46 for 30fps snapshot, try 47/48 to increase further */ + 80, + + /* op_pix_clk_div REG=0x0308 */ + 10, + + /* op_sys_clk_div REG=0x030A */ + 1, + + /* scale_m REG=0x0404 */ + 16, + + /* row_speed REG=0x3016 */ + 0x0111, + + /* x_addr_start REG=0x3004 */ + 8, + + /* x_addr_end REG=0x3008 */ + 2063, + + /* y_addr_start REG=0x3002 */ + 8, + + /* y_addr_end REG=0x3006 */ + 1551, + + /* read_mode REG=0x3040 */ + 0x0024, + + /* x_output_size REG=0x034C */ + 2064, + + /* y_output_size REG=0x034E */ + 1544, + + /* line_length_pck REG=0x300C */ + 4800, + + /* frame_length_lines REG=0x300A */ + 1629, + + /* coarse_int_time REG=0x3012 */ + 16, + + /* fine_int_time REG=0x3014 */ + 733} +}; + +struct mt9t013_i2c_reg_conf mt9t013_test_tbl[] = { + {0x3044, 0x0544 & 0xFBFF}, + {0x30CA, 0x0004 | 0x0001}, + {0x30D4, 0x9020 & 0x7FFF}, + {0x31E0, 0x0003 & 0xFFFE}, + {0x3180, 0x91FF & 0x7FFF}, + {0x301A, (0x10CC | 0x8000) & 0xFFF7}, + {0x301E, 0x0000}, + {0x3780, 0x0000}, +}; + +/* [Lens shading 85 Percent TL84] */ +struct mt9t013_i2c_reg_conf mt9t013_lc_tbl[] = { + {0x360A, 0x0290}, /* P_RD_P0Q0 */ + {0x360C, 0xC92D}, /* P_RD_P0Q1 */ + {0x360E, 0x0771}, /* P_RD_P0Q2 */ + {0x3610, 0xE38C}, /* P_RD_P0Q3 */ + {0x3612, 0xD74F}, /* P_RD_P0Q4 */ + {0x364A, 0x168C}, /* P_RD_P1Q0 */ + {0x364C, 0xCACB}, /* P_RD_P1Q1 */ + {0x364E, 0x8C4C}, /* P_RD_P1Q2 */ + {0x3650, 0x0BEA}, /* P_RD_P1Q3 */ + {0x3652, 0xDC0F}, /* P_RD_P1Q4 */ + {0x368A, 0x70B0}, /* P_RD_P2Q0 */ + {0x368C, 0x200B}, /* P_RD_P2Q1 */ + {0x368E, 0x30B2}, /* P_RD_P2Q2 */ + {0x3690, 0xD04F}, /* P_RD_P2Q3 */ + {0x3692, 0xACF5}, /* P_RD_P2Q4 */ + {0x36CA, 0xF7C9}, /* P_RD_P3Q0 */ + {0x36CC, 0x2AED}, /* P_RD_P3Q1 */ + {0x36CE, 0xA652}, /* P_RD_P3Q2 */ + {0x36D0, 0x8192}, /* P_RD_P3Q3 */ + {0x36D2, 0x3A15}, /* P_RD_P3Q4 */ + {0x370A, 0xDA30}, /* P_RD_P4Q0 */ + {0x370C, 0x2E2F}, /* P_RD_P4Q1 */ + {0x370E, 0xBB56}, /* P_RD_P4Q2 */ + {0x3710, 0x8195}, /* P_RD_P4Q3 */ + {0x3712, 0x02F9}, /* P_RD_P4Q4 */ + {0x3600, 0x0230}, /* P_GR_P0Q0 */ + {0x3602, 0x58AD}, /* P_GR_P0Q1 */ + {0x3604, 0x18D1}, /* P_GR_P0Q2 */ + {0x3606, 0x260D}, /* P_GR_P0Q3 */ + {0x3608, 0xF530}, /* P_GR_P0Q4 */ + {0x3640, 0x17EB}, /* P_GR_P1Q0 */ + {0x3642, 0x3CAB}, /* P_GR_P1Q1 */ + {0x3644, 0x87CE}, /* P_GR_P1Q2 */ + {0x3646, 0xC02E}, /* P_GR_P1Q3 */ + {0x3648, 0xF48F}, /* P_GR_P1Q4 */ + {0x3680, 0x5350}, /* P_GR_P2Q0 */ + {0x3682, 0x7EAF}, /* P_GR_P2Q1 */ + {0x3684, 0x4312}, /* P_GR_P2Q2 */ + {0x3686, 0xC652}, /* P_GR_P2Q3 */ + {0x3688, 0xBC15}, /* P_GR_P2Q4 */ + {0x36C0, 0xB8AD}, /* P_GR_P3Q0 */ + {0x36C2, 0xBDCD}, /* P_GR_P3Q1 */ + {0x36C4, 0xE4B2}, /* P_GR_P3Q2 */ + {0x36C6, 0xB50F}, /* P_GR_P3Q3 */ + {0x36C8, 0x5B95}, /* P_GR_P3Q4 */ + {0x3700, 0xFC90}, /* P_GR_P4Q0 */ + {0x3702, 0x8C51}, /* P_GR_P4Q1 */ + {0x3704, 0xCED6}, /* P_GR_P4Q2 */ + {0x3706, 0xB594}, /* P_GR_P4Q3 */ + {0x3708, 0x0A39}, /* P_GR_P4Q4 */ + {0x3614, 0x0230}, /* P_BL_P0Q0 */ + {0x3616, 0x160D}, /* P_BL_P0Q1 */ + {0x3618, 0x08D1}, /* P_BL_P0Q2 */ + {0x361A, 0x98AB}, /* P_BL_P0Q3 */ + {0x361C, 0xEA50}, /* P_BL_P0Q4 */ + {0x3654, 0xB4EA}, /* P_BL_P1Q0 */ + {0x3656, 0xEA6C}, /* P_BL_P1Q1 */ + {0x3658, 0xFE08}, /* P_BL_P1Q2 */ + {0x365A, 0x2C6E}, /* P_BL_P1Q3 */ + {0x365C, 0xEB0E}, /* P_BL_P1Q4 */ + {0x3694, 0x6DF0}, /* P_BL_P2Q0 */ + {0x3696, 0x3ACF}, /* P_BL_P2Q1 */ + {0x3698, 0x3E0F}, /* P_BL_P2Q2 */ + {0x369A, 0xB2B1}, /* P_BL_P2Q3 */ + {0x369C, 0xC374}, /* P_BL_P2Q4 */ + {0x36D4, 0xF2AA}, /* P_BL_P3Q0 */ + {0x36D6, 0x8CCC}, /* P_BL_P3Q1 */ + {0x36D8, 0xDEF2}, /* P_BL_P3Q2 */ + {0x36DA, 0xFA11}, /* P_BL_P3Q3 */ + {0x36DC, 0x42F5}, /* P_BL_P3Q4 */ + {0x3714, 0xF4F1}, /* P_BL_P4Q0 */ + {0x3716, 0xF6F0}, /* P_BL_P4Q1 */ + {0x3718, 0x8FD6}, /* P_BL_P4Q2 */ + {0x371A, 0xEA14}, /* P_BL_P4Q3 */ + {0x371C, 0x6338}, /* P_BL_P4Q4 */ + {0x361E, 0x0350}, /* P_GB_P0Q0 */ + {0x3620, 0x91AE}, /* P_GB_P0Q1 */ + {0x3622, 0x0571}, /* P_GB_P0Q2 */ + {0x3624, 0x100D}, /* P_GB_P0Q3 */ + {0x3626, 0xCA70}, /* P_GB_P0Q4 */ + {0x365E, 0xE6CB}, /* P_GB_P1Q0 */ + {0x3660, 0x50ED}, /* P_GB_P1Q1 */ + {0x3662, 0x3DAE}, /* P_GB_P1Q2 */ + {0x3664, 0xAA4F}, /* P_GB_P1Q3 */ + {0x3666, 0xDC50}, /* P_GB_P1Q4 */ + {0x369E, 0x5470}, /* P_GB_P2Q0 */ + {0x36A0, 0x1F6E}, /* P_GB_P2Q1 */ + {0x36A2, 0x6671}, /* P_GB_P2Q2 */ + {0x36A4, 0xC010}, /* P_GB_P2Q3 */ + {0x36A6, 0x8DF5}, /* P_GB_P2Q4 */ + {0x36DE, 0x0B0C}, /* P_GB_P3Q0 */ + {0x36E0, 0x84CE}, /* P_GB_P3Q1 */ + {0x36E2, 0x8493}, /* P_GB_P3Q2 */ + {0x36E4, 0xA610}, /* P_GB_P3Q3 */ + {0x36E6, 0x50B5}, /* P_GB_P3Q4 */ + {0x371E, 0x9651}, /* P_GB_P4Q0 */ + {0x3720, 0x1EAB}, /* P_GB_P4Q1 */ + {0x3722, 0xAF76}, /* P_GB_P4Q2 */ + {0x3724, 0xE4F4}, /* P_GB_P4Q3 */ + {0x3726, 0x79F8}, /* P_GB_P4Q4 */ + {0x3782, 0x0410}, /* POLY_ORIGIN_C */ + {0x3784, 0x0320}, /* POLY_ORIGIN_R */ + {0x3780, 0x8000} /* POLY_SC_ENABLE */ +}; + +struct mt9t013_reg mt9t013_regs = { + .reg_pat = &mt9t013_reg_pat[0], + .reg_pat_size = ARRAY_SIZE(mt9t013_reg_pat), + .ttbl = &mt9t013_test_tbl[0], + .ttbl_size = ARRAY_SIZE(mt9t013_test_tbl), + .lctbl = &mt9t013_lc_tbl[0], + .lctbl_size = ARRAY_SIZE(mt9t013_lc_tbl), + .rftbl = &mt9t013_lc_tbl[0], + .rftbl_size = ARRAY_SIZE(mt9t013_lc_tbl) +}; diff --git a/drivers/media/video/msm/ov8810.c b/drivers/media/video/msm/ov8810.c new file mode 100644 index 0000000000000..e719399f89a80 --- /dev/null +++ b/drivers/media/video/msm/ov8810.c @@ -0,0 +1,2812 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ov8810.h" + + +/* CAMIF output resolutions */ +/* 816x612, 24MHz MCLK 96MHz PCLK */ +#define OV8810_FULL_SIZE_DUMMY_PIXELS 0 +#define OV8810_FULL_SIZE_DUMMY_LINES 0 +#define OV8810_FULL_SIZE_WIDTH 3280 +#define OV8810_FULL_SIZE_HEIGHT 2456 + +#define OV8810_QTR_SIZE_DUMMY_PIXELS 0 +#define OV8810_QTR_SIZE_DUMMY_LINES 0 +#define OV8810_QTR_SIZE_WIDTH 1632 +#define OV8810_QTR_SIZE_HEIGHT 1224 + +#define OV8810_HRZ_FULL_BLK_PIXELS 696 /*stella 1203*/ +#define OV8810_VER_FULL_BLK_LINES 44 +#define OV8810_HRZ_QTR_BLK_PIXELS 890 +#define OV8810_VER_QTR_BLK_LINES 44 + +static int cam_mode_sel = 0; /* 0: photo, 1: video@30fps, 2: video@24fps */ +/* 240: 26, 365: 24, 589: 21 */ +const int ov8810_ver_qtr_blk_lines_array[] = {44, 44, 365}; + +/*============================================================= + SENSOR REGISTER DEFINES +==============================================================*/ +#define Q8 0x00000100 + +/* Omnivision8810 product ID register address */ +#define OV8810_PIDH_REG 0x300A +#define OV8810_PIDL_REG 0x300B + +/* Omnivision8810 product ID */ +#define OV8810_PID 0x88 +/* Omnivision8810 version */ +#define OV8810_VER 0x10 + +/* Time in milisecs for waiting for the sensor to reset */ +#define OV8810_RESET_DELAY_MSECS 66 + +#define OV8810_DEFAULT_CLOCK_RATE 24000000 + +/* Registers*/ +/* PLL Registers */ +#define REG_PRE_PLL_CLK_DIV 0x3011 /*0x0305*/ +#define REG_PLL_MULTIPLIER 0x3010 +#define REG_VT_CLK_DIV 0x300E /*[7:4]VT_SYS_DIV, [3-0]VT_PIX_DIV*/ +#define REG_OP_CLK_DIV 0x300F /*[7:4]OP_SYS_DIV, [3-0]OP_PIX_DIV*/ + +/* ISP Enable Control */ +#define REG_ISP_ENABLE_CONTROL_00 0x3302 +#define REG_ISP_ENABLE_CONTROL_01 0x3301 + +/* AWB Control */ +#define REG_AWB_CTRL_0 0x3320 +#define REG_AWB_CTRL_1 0x3321 +#define REG_AWB_CTRL_2 0x3322 +#define REG_AWB_CTRL_8 0x3328 + +/* Output Size */ +#define REG_X_OUTPUT_SIZE_MSB 0x302C +#define REG_X_OUTPUT_SIZE_LSB 0x302D +#define REG_Y_OUTPUT_SIZE_MSB 0x302E +#define REG_Y_OUTPUT_SIZE_LSB 0x302F + +/*Reserved register */ +#define REG_BINNING_CONTROL 0x3091 + +/* Frame Fotmat */ +#define REG_FRAME_LENGTH_LINES_MSB 0x3020 +#define REG_FRAME_LENGTH_LINES_LSB 0x3021 +#define REG_LINE_LENGTH_PCK_MSB 0x3022 +#define REG_LINE_LENGTH_PCK_LSB 0x3023 +#define REG_EXTRA_VSYNC_WIDTH_MSB 0x301E +#define REG_EXTRA_VSYNC_WIDTH_LSB 0x301F + +#define REG_X_ADDR_START_HIGH 0x3024 +#define REG_X_ADDR_START_LOW 0x3025 +#define REG_Y_ADDR_START_HIGH 0x3026 +#define REG_Y_ADDR_START_LOW 0x3027 +#define REG_X_ADDR_END_HIGH 0x3028 +#define REG_X_ADDR_END_LOW 0x3029 +#define REG_Y_ADDR_END_HIGH 0x302A +#define REG_Y_ADDR_END_LOW 0x302B + +/* Gain setting register */ +#define OV8810_GAIN 0x3000 +#define OV8810_AEC_MSB 0x3002 +#define OV8810_AEC_LSB 0x3003 + +/* additional gain function provided by OV8810, + * original gain can changed to 1x, 2x or 4x + * to increase the gain that OV8810 can provide */ +#define OV8810_REG_MUL_GAIN 0x3006 +#define MUL_GAIN_INIT_VALUE 0x00 + +#define OV8810_MAX_EXPOSURE_GAIN 0x1FF + +/* Mode select register */ +#define OV8810_REG_MODE_SELECT 0x30FA /* image system */ +#define OV8810_MODE_SELECT_STREAM 0x01 /* start streaming */ +#define OV8810_MODE_SELECT_SW_STANDBY 0x00 /* software standby */ +#define OV8810_REG_SOFTWARE_RESET 0x3012 /* 0x0103 */ +#define OV8810_SOFTWARE_RESET 0x80 /* 0x01 */ + +/* AF Total steps parameters */ +#define OV8810_AF_MSB 0x30EC +#define OV8810_AF_LSB 0x30ED + +#define OV8810_STEPS_NEAR_TO_CLOSEST_INF 42 /*43 stella0122 */ +#define OV8810_TOTAL_STEPS_NEAR_TO_FAR 42 /*43 stella0122 */ + +/*Test pattern*/ +/* Color bar pattern selection */ +#define OV8810_COLOR_BAR_PATTERN_SEL_REG 0x307B + +/* Color bar enabling control */ +#define OV8810_COLOR_BAR_ENABLE_REG 0x307D + +/* I2C Address of the Sensor */ +#define OV8810_I2C_SLAVE_ID 0x6C + +/*LSC table length*/ +#define LSC_table_length 144 +/*============================================================================ +TYPE DECLARATIONS +============================================================================*/ + +/* 16bit address - 8 bit context register structure */ +#if 0 +typedef struct reg_addr_val_pair_struct { + uint16_t reg_addr; + uint8_t reg_val; +} reg_struct_type; +#endif + +struct awb_lsc_struct_type { + unsigned int caBuff[8]; /*awb_calibartion*/ + struct reg_addr_val_pair_struct LSC_table[150]; /*lsc_calibration*/ + uint32_t LSC_table_CRC; +}; + +enum ov8810_test_mode_t { + TEST_OFF, + TEST_1, + TEST_2, + TEST_3 +}; + +enum ov8810_resolution_t { + QTR_SIZE, + FULL_SIZE, + INVALID_SIZE +}; + +/*LSC calibration*/ +int global_mode; +/*TODO: should be use a header file to reference this function*/ +extern unsigned char *get_cam_awb_cal(void); + +static int sensor_probe_node = 0; +static int preview_frame_count = 0; + +static struct wake_lock ov8810_wake_lock; + +static inline void init_suspend(void) +{ + wake_lock_init(&ov8810_wake_lock, WAKE_LOCK_IDLE, "ov8810"); +} + +static inline void deinit_suspend(void) +{ + wake_lock_destroy(&ov8810_wake_lock); +} + +static inline void prevent_suspend(void) +{ + wake_lock(&ov8810_wake_lock); +} + +static inline void allow_suspend(void) +{ + wake_unlock(&ov8810_wake_lock); +} + +/*============================================================================ +DATA DECLARATIONS +============================================================================*/ + +/* 96MHz PCLK @ 24MHz MCLK inc*/ + /*stella1223 start*/ +static struct reg_addr_val_pair_struct ov8810_init_settings_array[] = +{ + /* Sensor clk setup */ + {REG_OP_CLK_DIV, 0x04}, + {REG_VT_CLK_DIV, 0x05}, +#if 1 /* weiting0414 prevent capture hang restore CLK */ + {REG_PLL_MULTIPLIER, 0x28}, /*0x28 96MHz PCLK 0x18 64MHz PCLK*/ + {REG_PRE_PLL_CLK_DIV, 0x22}, +#else + {REG_PLL_MULTIPLIER, 0x14}, /*Reduce internal clock to prevent hang Weiting0331*/ + {REG_PRE_PLL_CLK_DIV, 0x21}, +#endif + {OV8810_GAIN, 8}, /*0x30},*/ + {OV8810_AEC_MSB, 0x04}, + {OV8810_AEC_LSB, 0xc4}, /*stella 1203*/ + {REG_ISP_ENABLE_CONTROL_00, 0x20}, + {0x30b2, 0x13}, /*driving strength*/ + {0x30a0, 0x40}, + {0x3098, 0x24}, + {0x3099, 0x81}, + {0x309a, 0x64}, + {0x309b, 0x00}, + {0x309d, 0x64}, + {0x309e, 0x2d}, + {REG_AWB_CTRL_0, 0xc2}, /*set wb manual*/ + {REG_AWB_CTRL_1, 0x02}, + {REG_AWB_CTRL_2, 0x04}, + {REG_AWB_CTRL_8, 0x40}, + {0x3329, 0xe3}, /*00},*/ /*stella 1203*/ + {0x3306, 0x00}, + {0x3316, 0x03}, + {0x3079, 0x0a}, + /*stella 1203*/ + {0x3058, 0x01}, + {0x3059, 0xa0}, + {0x306b, 0x00}, + {0x3065, 0x50}, + {0x3067, 0x40}, + {0x3069, 0x80}, + {0x3071, 0x40},/*50 BLC trigger by gain 40 BLC every frame */ + {0x3300, 0xef}, + {0x3334, 0x02}, + {0x3331, 0x08}, /*BLC level 8813*/ /*stella 1203*/ + {0x3332, 0x08}, /*8813*/ + {0x3333, 0x41}, + /*Stella1221 for adding init size */ + {0x30f8, 0x45}, + {REG_FRAME_LENGTH_LINES_MSB, + ((OV8810_QTR_SIZE_HEIGHT + OV8810_VER_QTR_BLK_LINES) & 0xFF00) >> 8}, + {REG_FRAME_LENGTH_LINES_LSB, + ((OV8810_QTR_SIZE_HEIGHT + OV8810_VER_QTR_BLK_LINES) & 0x00FF)}, + {REG_LINE_LENGTH_PCK_MSB, + ((OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS) & 0xFF00) >> 8}, + {REG_LINE_LENGTH_PCK_LSB, + ((OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS) & 0x00FF)}, + {REG_X_ADDR_START_HIGH, 0x00}, + {REG_X_ADDR_START_LOW, 0x04}, /*stella 1203*/ + {REG_Y_ADDR_START_HIGH, 0x00}, + {REG_Y_ADDR_START_LOW, 0x00}, + {REG_X_ADDR_END_HIGH, 0x0c}, + {REG_X_ADDR_END_LOW, 0xdb}, /*stella 1203*/ + {REG_Y_ADDR_END_HIGH, 0x09}, + {REG_Y_ADDR_END_LOW, 0x9f}, + {REG_X_OUTPUT_SIZE_MSB, (OV8810_QTR_SIZE_WIDTH & 0xFF00) >> 8}, + {REG_X_OUTPUT_SIZE_LSB, (OV8810_QTR_SIZE_WIDTH & 0x00FF)}, + {REG_Y_OUTPUT_SIZE_MSB, (OV8810_QTR_SIZE_HEIGHT & 0xFF00) >> 8}, + {REG_Y_OUTPUT_SIZE_LSB, (OV8810_QTR_SIZE_HEIGHT & 0x00FF)}, + /*Stella1221 for adding init size */ + /* {REG_BINNING_CONTROL, 0x00},*/ /*stella 1203*/ + {OV8810_REG_MUL_GAIN, MUL_GAIN_INIT_VALUE}, + {0x3082, 0x80}, + {0x331e, 0x94}, + {0x331f, 0x6e}, + {0x3092, 0x00}, + {0x3094, 0x01}, + {0x3090, 0x2b}, /* for AN version 8a */ /*changed by Stella for 8813*/ + {0x30ab, 0x44}, + {0x3095, 0x0a}, + {0x308d, 0x00}, + {0x3082, 0x00}, + {0x3080, 0x40}, + {0x30aa, 0x59}, + {0x30a9, 0x00}, + {0x30be, 0x08}, + {0x309f, 0x23}, + {0x3065, 0x40}, + {0x3068, 0x00}, + {0x30bf, 0x80}, + {0x309c, 0x00}, + {0x3084, 0x44}, /*added by stella for 8813*/ + {0x3016, 0x03}, /*added by stella for 8813*/ + {0x30e9, 0x09}, /*changed by stella for 8813*/ + {0x3075, 0x29}, + {0x3076, 0x29}, + {0x3077, 0x29}, + {0x3078, 0x29}, + {0x306a, 0x05}, + {0x3015, 0x33}, /*changed by stella for 8813*/ + /*stella 1203 start*/ + {0x3090, 0x36}, + {0x333e, 0x00}, + {0x306a, 0x05}, + /*stella 1203 end*/ + {0x3087, 0x41}, + {0x3090, 0x97}, /*99, QCT=97*/ + {0x309e, 0x1b}, + {0x30e3, 0x0e}, + {0x30f0, 0x00}, + {0x30f2, 0x00}, + {0x30f4, 0x90}, + /*stella 1203 start*/ + {0x3347, 0x00}, + {0x3347, 0x00}, +#if 0 + {0x3092, 0x00}, //marked by QCT + {0x30f0, 0x10}, //marked by QCT + {0x30f1, 0x56}, //marked by QCT + {0x30fb, 0x8e}, //marked by QCT + {0x30f3, 0xa7}, //marked by QCT +#endif + {0x3091, 0x08}, /*QCT for 8813*/ + {0x3090, 0x97}, /*QCT for 8813*/ + {0x30fb, 0xc9}, /*QCT for 8813*/ + {0x308d, 0x02}, + {0x30e7, 0x41}, + {0x30b3, 0x08}, + {0x33e5, 0x00}, /*30e5*/ + {0x350e, 0x40}, /*305e*/ + {0x301f, 0x00}, + {0x309f, 0x23}, + {0x3013, 0xc0}, + {0x30e1, 0x90}, + {0x3058, 0x01}, + {0x3500, 0x40}, /* vsync_new */ + {REG_BINNING_CONTROL, 0x00}, /*stella 0126*/ + /*stella 1203 end*/ +}; + +/*Vincent for LSC calibration*/ +static struct reg_addr_val_pair_struct lsc_table_array[] = +{ + {0x3358, 0x1f },//{0x3358, 0x18}, + {0x3359, 0x14 },//{0x3359, 0x0f}, + {0x335a, 0x0f },//{0x335a, 0x0c}, + {0x335b, 0x0d },//{0x335b, 0x0a}, + {0x335c, 0x0d },//{0x335c, 0x0a}, + {0x335d, 0x0f },//{0x335d, 0x0b}, + {0x335e, 0x14 },//{0x335e, 0x0d}, + {0x335f, 0x1d },//{0x335f, 0x15}, + {0x3360, 0x0f },//{0x3360, 0x0b}, + {0x3361, 0x0a },//{0x3361, 0x09}, + {0x3362, 0x07 },//{0x3362, 0x06}, + {0x3363, 0x06 },//{0x3363, 0x05}, + {0x3364, 0x06 },//{0x3364, 0x05}, + {0x3365, 0x07 },//{0x3365, 0x06}, + {0x3366, 0x09 },//{0x3366, 0x08}, + {0x3367, 0x0d },//{0x3367, 0x0b}, + {0x3368, 0x09 },//{0x3368, 0x07}, + {0x3369, 0x06 },//{0x3369, 0x05}, + {0x336a, 0x04 },//{0x336a, 0x03}, + {0x336b, 0x03 },//{0x336b, 0x02}, + {0x336c, 0x03 },//{0x336c, 0x02}, + {0x336d, 0x04 },//{0x336d, 0x03}, + {0x336e, 0x06 },//{0x336e, 0x04}, + {0x336f, 0x09 },//{0x336f, 0x06}, + {0x3370, 0x07 },//{0x3370, 0x05}, + {0x3371, 0x04 },//{0x3371, 0x04}, + {0x3372, 0x01 },//{0x3372, 0x01}, + {0x3373, 0x00 },//{0x3373, 0x00}, + {0x3374, 0x00 },//{0x3374, 0x00}, + {0x3375, 0x01 },//{0x3375, 0x01}, + {0x3376, 0x04 },//{0x3376, 0x03}, + {0x3377, 0x07 },//{0x3377, 0x05}, + {0x3378, 0x08 },//{0x3378, 0x05}, + {0x3379, 0x04 },//{0x3379, 0x03}, + {0x337a, 0x01 },//{0x337a, 0x01}, + {0x337b, 0x00 },//{0x337b, 0x00}, + {0x337c, 0x00 },//{0x337c, 0x00}, + {0x337d, 0x01 },//{0x337d, 0x00}, + {0x337e, 0x04 },//{0x337e, 0x02}, + {0x337f, 0x07 },//{0x337f, 0x05}, + {0x3380, 0x09 },//{0x3380, 0x06}, + {0x3381, 0x06 },//{0x3381, 0x04}, + {0x3382, 0x04 },//{0x3382, 0x03}, + {0x3383, 0x02 },//{0x3383, 0x02}, + {0x3384, 0x02 },//{0x3384, 0x01}, + {0x3385, 0x04 },//{0x3385, 0x02}, + {0x3386, 0x06 },//{0x3386, 0x03}, + {0x3387, 0x09 },//{0x3387, 0x05}, + {0x3388, 0x0f },//{0x3388, 0x0a}, + {0x3389, 0x0a },//{0x3389, 0x07}, + {0x338a, 0x07 },//{0x338a, 0x05}, + {0x338b, 0x07 },//{0x338b, 0x04}, + {0x338c, 0x07 },//{0x338c, 0x04}, + {0x338d, 0x07 },//{0x338d, 0x05}, + {0x338e, 0x0a },//{0x338e, 0x06}, + {0x338f, 0x0f },//{0x338f, 0x09}, + {0x3390, 0x1d },//{0x3390, 0x12}, + {0x3391, 0x12 },//{0x3391, 0x0d}, + {0x3392, 0x0d },//{0x3392, 0x09}, + {0x3393, 0x0b },//{0x3393, 0x08}, + {0x3394, 0x0b },//{0x3394, 0x08}, + {0x3395, 0x0d },//{0x3395, 0x09}, + {0x3396, 0x12 },//{0x3396, 0x0c}, + {0x3397, 0x1a },//{0x3397, 0x11}, + {0x3398, 0x0f },//{0x3398, 0x10}, + {0x3399, 0x0d },//{0x3399, 0x10}, + {0x339a, 0x0e },//{0x339a, 0x10}, + {0x339b, 0x0f },//{0x339b, 0x0e}, + {0x339c, 0x11 },//{0x339c, 0x0e}, + {0x339d, 0x0d },//{0x339d, 0x0f}, + {0x339e, 0x12 },//{0x339e, 0x0e}, + {0x339f, 0x0e },//{0x339f, 0x0f}, + {0x33a0, 0x0f },//{0x33a0, 0x0f}, + {0x33a1, 0x0f },//{0x33a1, 0x0f}, + {0x33a2, 0x10 },//{0x33a2, 0x0f}, + {0x33a3, 0x10 },//{0x33a3, 0x10}, + {0x33a4, 0x0f },//{0x33a4, 0x0e}, + {0x33a5, 0x0d },//{0x33a5, 0x10}, + {0x33a6, 0x0f },//{0x33a6, 0x11}, + {0x33a7, 0x10 },//{0x33a7, 0x10}, + {0x33a8, 0x10 },//{0x33a8, 0x10}, + {0x33a9, 0x0f },//{0x33a9, 0x0f}, + {0x33aa, 0x10 },//{0x33aa, 0x0e}, + {0x33ab, 0x0e },//{0x33ab, 0x0f}, + {0x33ac, 0x10 },//{0x33ac, 0x10}, + {0x33ad, 0x11 },//{0x33ad, 0x10}, + {0x33ae, 0x11 },//{0x33ae, 0x10}, + {0x33af, 0x0f },//{0x33af, 0x0f}, + {0x33b0, 0x0f },//{0x33b0, 0x0e}, + {0x33b1, 0x0d },//{0x33b1, 0x0f}, + {0x33b2, 0x0d },//{0x33b2, 0x0f}, + {0x33b3, 0x0e },//{0x33b3, 0x0f}, + {0x33b4, 0x0f },//{0x33b4, 0x0f}, + {0x33b5, 0x10 },//{0x33b5, 0x0f}, + {0x33b6, 0x12 },//{0x33b6, 0x0e}, + {0x33b7, 0x0d },//{0x33b7, 0x0d}, + {0x33b8, 0x0c },//{0x33b8, 0x0c}, + {0x33b9, 0x0c },//{0x33b9, 0x0c}, + {0x33ba, 0x0c },//{0x33ba, 0x0d}, + {0x33bb, 0x0b },//{0x33bb, 0x0f}, + {0x33bc, 0x1b },//{0x33bc, 0x16}, + {0x33bd, 0x1b },//{0x33bd, 0x17}, + {0x33be, 0x1d },//{0x33be, 0x17}, + {0x33bf, 0x1d },//{0x33bf, 0x17}, + {0x33c0, 0x1e },//{0x33c0, 0x17}, + {0x33c1, 0x1c },//{0x33c1, 0x14}, + {0x33c2, 0x1a },//{0x33c2, 0x17}, + {0x33c3, 0x17 },//{0x33c3, 0x14}, + {0x33c4, 0x15 },//{0x33c4, 0x13}, + {0x33c5, 0x16 },//{0x33c5, 0x13}, + {0x33c6, 0x19 },//{0x33c6, 0x14}, + {0x33c7, 0x1e },//{0x33c7, 0x15}, + {0x33c8, 0x16 },//{0x33c8, 0x15}, + {0x33c9, 0x12 },//{0x33c9, 0x12}, + {0x33ca, 0x10 },//{0x33ca, 0x10}, + {0x33cb, 0x10 },//{0x33cb, 0x10}, + {0x33cc, 0x14 },//{0x33cc, 0x12}, + {0x33cd, 0x19 },//{0x33cd, 0x14}, + {0x33ce, 0x16 },//{0x33ce, 0x15}, + {0x33cf, 0x12 },//{0x33cf, 0x12}, + {0x33d0, 0x10 },//{0x33d0, 0x10}, + {0x33d1, 0x11 },//{0x33d1, 0x10}, + {0x33d2, 0x14 },//{0x33d2, 0x12}, + {0x33d3, 0x1a },//{0x33d3, 0x14}, + {0x33d4, 0x18 },//{0x33d4, 0x16}, + {0x33d5, 0x15 },//{0x33d5, 0x13}, + {0x33d6, 0x13 },//{0x33d6, 0x12}, + {0x33d7, 0x14 },//{0x33d7, 0x12}, + {0x33d8, 0x17 },//{0x33d8, 0x13}, + {0x33d9, 0x1b },//{0x33d9, 0x15}, + {0x33da, 0x18 },//{0x33da, 0x18}, + {0x33db, 0x1a },//{0x33db, 0x15}, + {0x33dc, 0x1b },//{0x33dc, 0x15}, + {0x33dd, 0x1b },//{0x33dd, 0x15}, + {0x33de, 0x1b },//{0x33de, 0x15}, + {0x33df, 0x1c },//{0x33df, 0x14}, + {0x3350, 0x06 },//{0x3350, 0x06}, + {0x3351, 0xab },//{0x3351, 0xab}, + {0x3352, 0x05 },//{0x3352, 0x05}, + {0x3353, 0x00 },//{0x3353, 0x00}, + {0x3354, 0x04 },//{0x3354, 0x04}, + {0x3355, 0xf8 },//{0x3355, 0xf8}, + {0x3356, 0x07 },//{0x3356, 0x07}, + {0x3357, 0x74 },//{0x3357, 0x74}, + /* lsc setting on sensor*/ + {0x3300, 0xff}, /*enable lsc on sensor*/ + /*move to the last*/ + {OV8810_REG_MODE_SELECT, OV8810_MODE_SELECT_STREAM}, +}; + +/*1632x1224; 24MHz MCLK 96MHz PCLK*/ +static struct reg_addr_val_pair_struct ov8810_qtr_settings_array[] = +{ + {0x30f8, 0x45}, + {REG_FRAME_LENGTH_LINES_MSB, + ((OV8810_QTR_SIZE_HEIGHT + OV8810_VER_QTR_BLK_LINES) & 0xFF00) >> 8}, + {REG_FRAME_LENGTH_LINES_LSB, + ((OV8810_QTR_SIZE_HEIGHT + OV8810_VER_QTR_BLK_LINES) & 0x00FF)}, + {REG_LINE_LENGTH_PCK_MSB, + ((OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS) & 0xFF00) >> 8}, + {REG_LINE_LENGTH_PCK_LSB, + ((OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS) & 0x00FF)}, + {REG_X_ADDR_START_HIGH, 0x00}, + {REG_X_ADDR_START_LOW, 0x04}, /*stella 1203*/ + {REG_Y_ADDR_START_HIGH, 0x00}, + {REG_Y_ADDR_START_LOW, 0x00}, + {REG_X_ADDR_END_HIGH, 0x0c}, + {REG_X_ADDR_END_LOW, 0xd8}, /*stella 1203=db*/ /*QCT:d8*/ + {REG_Y_ADDR_END_HIGH, 0x09}, + {REG_Y_ADDR_END_LOW, 0x9f}, + {REG_X_OUTPUT_SIZE_MSB, (OV8810_QTR_SIZE_WIDTH & 0xFF00) >> 8}, + {REG_X_OUTPUT_SIZE_LSB, (OV8810_QTR_SIZE_WIDTH & 0x00FF)}, + {REG_Y_OUTPUT_SIZE_MSB, (OV8810_QTR_SIZE_HEIGHT & 0xFF00) >> 8}, + {REG_Y_OUTPUT_SIZE_LSB, (OV8810_QTR_SIZE_HEIGHT & 0x00FF)}, + /*stella1202 for capture over exposure issue due to user space use 2X line count*/ + {0x3068, 0x00}, /*changed for color edge, stella 1203*/ + {0x307e, 0x00}, + {0x3071, 0x40},/*50 BLC trigger by gain 40 BLC every frame */ + {REG_ISP_ENABLE_CONTROL_01, 0x0B}, + {REG_BINNING_CONTROL, 0x00}, //stella0127 + {0x331c, 0x00}, + {0x331d, 0x00}, + {0x308a, 0x02}, + {0x3072, 0x0d}, + {0x3319, 0x04}, + {0x309e, 0x09}, + {0x300e, 0x05}, + {0x300f, 0x04}, + {0x33e4, 0x07}, /*lsc for 2:1 down sampling*/ +}; + + /*stella1223 end*/ + +/* 3280x2456 Sensor Raw; 24MHz MCLK 96MHz PCLK*/ +static struct reg_addr_val_pair_struct ov8810_full_settings_array[] = +{ + {0x30f8, 0x40}, + {REG_FRAME_LENGTH_LINES_MSB, + ((OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES) & 0xFF00) >> 8}, + {REG_FRAME_LENGTH_LINES_LSB, + ((OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES) & 0x00FF)}, + {REG_LINE_LENGTH_PCK_MSB, + ((OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS) & 0xFF00) >> 8}, + {REG_LINE_LENGTH_PCK_LSB, + ((OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS) & 0x00FF)}, + {REG_X_ADDR_START_HIGH, 0x00}, + {REG_X_ADDR_START_LOW, 0x02}, /*stella 1203*/ + {REG_Y_ADDR_START_HIGH, 0x00}, + {REG_Y_ADDR_START_LOW, 0x00}, + {REG_X_ADDR_END_HIGH, 0x0c}, + {REG_X_ADDR_END_LOW, 0xdd}, /*stella 1203*/ + {REG_Y_ADDR_END_HIGH, 0x09}, + {REG_Y_ADDR_END_LOW, 0x9f}, + {REG_X_OUTPUT_SIZE_MSB, (OV8810_FULL_SIZE_WIDTH & 0xFF00) >> 8}, + {REG_X_OUTPUT_SIZE_LSB, (OV8810_FULL_SIZE_WIDTH & 0x00FF)}, + {REG_Y_OUTPUT_SIZE_MSB, (OV8810_FULL_SIZE_HEIGHT & 0xFF00) >> 8}, + {REG_Y_OUTPUT_SIZE_LSB, (OV8810_FULL_SIZE_HEIGHT & 0x00FF)}, + /*stella1202 for capture over exposure issue + due to user space use 2X line count */ + {0x3068, 0x00}, /* changed for color edge stella 1203*/ + {0x307e, 0x00}, + {REG_ISP_ENABLE_CONTROL_01, 0x0B}, + {REG_BINNING_CONTROL, 0x00}, //stella0127 + {0x331c, 0x28}, + {0x331d, 0x21}, + {0x308a, 0x01}, + {0x3072, 0x01}, + {0x3319, 0x06}, + {0x309e, 0x1b}, + {0x300e, 0x05}, + {0x300f, 0x04}, + {0x33e4, 0x02}, /*lsc for full resolution*/ +}; + +/* AF Tuning Parameters */ + +static uint16_t ov8810_step_position_table[OV8810_TOTAL_STEPS_NEAR_TO_FAR+1]; + +static uint8_t ov8810_damping_threshold = 10; +static uint8_t ov8810_damping_course_step = 4; +static uint8_t ov8810_damping_fine_step = 10; +static uint8_t ov8810_damping_time_wait; +static uint16_t ov8810_focus_debug; /*don't init to 0*/ +static uint16_t ov8810_use_default_damping = 1; +static uint16_t ov8810_use_threshold_damping = 1; /*set to FALSE if too slow*/ +/*static uint32_t stored_line_length_ratio = 1 * Q8;*/ + +/*Andy1217 write Line 1 frame ealier before Gain*/ +struct backup_line_gain_struct { + uint32_t line; + uint8_t mul; + uint16_t gain; + uint32_t extra_line_length; +}; + +static struct backup_line_gain_struct backup_line_gain[2]; + +static uint16_t write_cnt; +static uint16_t updated_BLC; /* only set to 0x50 after 1st update again*/ + +uint8_t S3_to_0 = 0x1; /* 0x9 */ + +/* static Variables*/ +static uint16_t step_position_table[OV8810_TOTAL_STEPS_NEAR_TO_FAR+1]; + + +/* FIXME: Changes from here */ +struct ov8810_work { + struct work_struct work; +}; + +static struct ov8810_work *ov8810_sensorw; +static struct i2c_client *ov8810_client; + +static struct vreg *vreg_af_actuator; + +struct ov8810_ctrl { + const struct msm_camera_sensor_info *sensordata; + + uint32_t sensormode; + uint32_t fps_divider; /* init to 1 * 0x00000400 */ + uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ + uint16_t fps; + + int16_t curr_lens_pos; + uint16_t curr_step_pos; + uint16_t my_reg_gain; + uint32_t my_reg_line_count; + uint16_t total_lines_per_frame; + + enum ov8810_resolution_t prev_res; + enum ov8810_resolution_t pict_res; + enum ov8810_resolution_t curr_res; + enum ov8810_test_mode_t set_test; + + unsigned short imgaddr; +}; + + +static struct ov8810_ctrl *ov8810_ctrl; +static struct platform_device *ov8810_pdev; + +struct ov8810_waitevent{ + uint32_t waked_up; + wait_queue_head_t event_wait; +}; +static struct ov8810_waitevent ov8810_event; + +static DECLARE_WAIT_QUEUE_HEAD(ov8810_wait_queue); +DEFINE_SEMAPHORE(ov8810_sem); + + +/*=============================================================*/ + +static int ov8810_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + CDBG("%s: saddr=0x%X\n", __func__, saddr); + CDBG("%s: raddr=0x%X\n", __func__, *rxdata); + + if (i2c_transfer(ov8810_client->adapter, msgs, 2) < 0) { + pr_err("[CAM]ov8810_i2c_rxdata failed!\n"); + return -EIO; + } + CDBG("%s: rxdata=0x%X\n", __func__, *rxdata); + + return 0; +} +static int32_t ov8810_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + if (i2c_transfer(ov8810_client->adapter, msg, 1) < 0) { + pr_err("[CAM]ov8810_i2c_txdata faild 0x%x\n", ov8810_client->addr); + return -EIO; + } + + return 0; +} + + +static int32_t ov8810_i2c_read(unsigned short raddr, + unsigned short *rdata, int rlen) +{ + int32_t rc = 0; + unsigned char buf[2]; + int count = 0; + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); +retry: + rc = ov8810_i2c_rxdata(ov8810_client->addr, buf, rlen); + + if (rc < 0) { + pr_err("[CAM]ov8810_i2c_read 0x%x failed!\n", raddr); + printk(KERN_ERR "starting read retry policy count:%d\n", count); + udelay(10); + count++; + if (count < 20) { + if (count > 10) + udelay(100); + } else + return rc; + goto retry; + } + + *rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]); + return rc; +} + + +static int32_t ov8810_i2c_write_b(unsigned short saddr, + unsigned short waddr, uint8_t bdata) +{ + int32_t rc = -EFAULT; + unsigned char buf[3]; + int count = 0; + CDBG("i2c_write_w_b, addr = 0x%x, val = 0x%x!\n", waddr, bdata); + + memset(buf, 0, sizeof(buf)); + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = bdata; + +retry: + CDBG("i2c_write_b addr = %d, val = %d\n", waddr, bdata); + rc = ov8810_i2c_txdata(saddr, buf, 3); + + if (rc < 0) { + pr_err("[CAM]i2c_write_b failed, addr = 0x%x, val = 0x%x!\n", + waddr, bdata); + pr_err(KERN_ERR "starting read retry policy count:%d\n", count); + udelay(10); + count++; + if (count < 20) { + if (count > 10) + udelay(100); + } else + return rc; + goto retry; + } + + return rc; +} + + +/*for LSC calibration*/ +static int ov8810_update_lsc_table(struct sensor_cfg_data *cdata) +{ + int i = 0; + pr_info("[CAM][LSC calibration]ov8810_update_lsc_table\n"); + for (i = 0; i < 144; i++) { + ov8810_i2c_write_b( + ov8810_client->addr, + cdata->cfg.lsctable.lsc_table[i].reg_addr, + cdata->cfg.lsctable.lsc_table[i].reg_val); + pr_info("[CAM][LSC calibration]update_lsc_table: 0x%x, 0x%x\n", + cdata->cfg.lsctable.lsc_table[i].reg_addr, + cdata->cfg.lsctable.lsc_table[i].reg_val); + } + /*enable lsc on sensor*/ + ov8810_i2c_write_b(ov8810_client->addr, 0x3300, 0xff); + /*mirror on*/ + ov8810_i2c_write_b(ov8810_client->addr, 0x30f8, 0x45); + /*mirror on*/ + ov8810_i2c_write_b(ov8810_client->addr, 0x3316, 0x03); + return 1; + +} + +/*20100330 vincent for LSC calibration*/ +static int ov8810_LSC_calibration_set_rawflag(struct sensor_cfg_data *cdata) +{ + global_mode = 1; + return 1; +} + +#define MAX_FUSE_ID_INFO 11 +static int ov8810_i2c_read_fuseid(struct sensor_cfg_data *cdata) +{ + unsigned short fuse_id[MAX_FUSE_ID_INFO]; + int count = 0; + + ov8810_i2c_write_b(ov8810_client->addr, 0x30d5, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30d6, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30d7, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30d8, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30d9, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30da, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30db, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30dc, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30dd, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30de, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x30df, 0xff); + ov8810_i2c_write_b(ov8810_client->addr, 0x303e, 0x55); + ov8810_i2c_read(0x30d5, &fuse_id[0], 2); + ov8810_i2c_read(0x30d6, &fuse_id[1], 2); + ov8810_i2c_read(0x30d7, &fuse_id[2], 2); + ov8810_i2c_read(0x30d8, &fuse_id[3], 2); + ov8810_i2c_read(0x30d9, &fuse_id[4], 2); + ov8810_i2c_read(0x30da, &fuse_id[5], 2); + ov8810_i2c_read(0x30db, &fuse_id[6], 2); + ov8810_i2c_read(0x30dc, &fuse_id[7], 2); + ov8810_i2c_read(0x30dd, &fuse_id[8], 2); + ov8810_i2c_read(0x30de, &fuse_id[9], 2); + ov8810_i2c_read(0x30df, &fuse_id[10], 2); + cdata->cfg.fuse.fuse_id_word1 = (uint32_t) fuse_id[0]; + cdata->cfg.fuse.fuse_id_word2 = (uint32_t) fuse_id[1]; + cdata->cfg.fuse.fuse_id_word3 = 0; + cdata->cfg.fuse.fuse_id_word4 = 0; + for (count = 0; count < MAX_FUSE_ID_INFO; count++) + pr_info("[CAM]Ov8810 Get fuse: fuse_id[%d]: %x\n", + count, fuse_id[count]); + return 0; +} + + +static int32_t ov8810_af_i2c_write(uint16_t data) +{ + uint8_t code_val_msb, code_val_lsb; /* S3_to_0; */ + int32_t rc = 0; + /* S3_to_0 = 0x9; S[3:0] */ + code_val_msb = data >> 4; /* D[9:4] */ + code_val_lsb = ((data & 0x000F) << 4) | S3_to_0; + + CDBG("code value = %d ,D[9:4] = %d ,D[3:0] = %d\n", + data, code_val_msb, code_val_lsb); + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AF_MSB, code_val_msb); + + if (rc < 0) { + pr_err("[CAM]Unable to write code_val_msb = %d\n", code_val_msb); + return rc; + } + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AF_LSB, code_val_lsb); + if (rc < 0) { + pr_err("[CAM]Unable to write code_val_lsb = %disclaimer\n", + code_val_lsb); + return rc; + } + + return rc; +} /* ov8810_af_i2c_write */ + +static int32_t ov8810_move_focus(int direction, int32_t num_steps) +{ + + int8_t step_direction; + int8_t dest_step_position; + uint16_t dest_lens_position, target_dist, small_step; + int16_t next_lens_position; + int32_t rc = 0; + + if (num_steps == 0) { + return rc; + } + + if (direction == MOVE_NEAR) { + step_direction = 1; + } else if (direction == MOVE_FAR) { + step_direction = -1; + } else { + pr_err("[CAM]Illegal focus direction\n"); + return -EINVAL;; /* CAMERA_INVALID_PARM; */ + } + + CDBG("%s, interpolate\n", __func__); + dest_step_position = + ov8810_ctrl->curr_step_pos + (step_direction * num_steps); + + if (dest_step_position < 0) + dest_step_position = 0; + else if (dest_step_position > OV8810_TOTAL_STEPS_NEAR_TO_FAR) + dest_step_position = OV8810_TOTAL_STEPS_NEAR_TO_FAR; + + dest_lens_position = ov8810_step_position_table[dest_step_position]; + + /* Taking small damping steps */ + target_dist = step_direction * + (dest_lens_position - ov8810_ctrl->curr_lens_pos); + + if (target_dist == 0) { + return rc; + } + + if (ov8810_use_threshold_damping && + (step_direction < 0) && + (target_dist >= + ov8810_step_position_table[ov8810_damping_threshold])) { + + /* change to variable */ + small_step = (uint16_t)(target_dist/ov8810_damping_fine_step); + ov8810_damping_time_wait = 1; + } else { + small_step = (uint16_t)(target_dist/ov8810_damping_course_step); + ov8810_damping_time_wait = 4; + } + + for (next_lens_position = + ov8810_ctrl->curr_lens_pos + (step_direction * small_step); + (step_direction * next_lens_position) <= + (step_direction * dest_lens_position); + next_lens_position += (step_direction * small_step)) { + + if (ov8810_af_i2c_write(next_lens_position) < 0) + return -EBUSY; + + ov8810_ctrl->curr_lens_pos = next_lens_position; + + if (ov8810_ctrl->curr_lens_pos != dest_lens_position) { + mdelay(ov8810_damping_time_wait); + } + } + + if (ov8810_ctrl->curr_lens_pos != dest_lens_position) { + + if (ov8810_af_i2c_write(dest_lens_position) < 0) { + return -EBUSY; + } + } + + /* Storing the current lens Position */ + ov8810_ctrl->curr_lens_pos = dest_lens_position; + ov8810_ctrl->curr_step_pos = dest_step_position; + + CDBG("done\n"); + return rc; +} + +static int32_t ov8810_set_default_focus(uint8_t af_step) +{ + int16_t position; + int32_t rc = 0; + ov8810_damping_time_wait = 4; + + if (ov8810_use_default_damping) { + + /* when lens is uninitialized */ + if (ov8810_ctrl->curr_lens_pos == -1 + || (ov8810_focus_debug == 1)) { + + position = ov8810_step_position_table[ov8810_damping_threshold]; + rc = ov8810_af_i2c_write(position); + + if (rc < 0) { + return rc; + } + + ov8810_ctrl->curr_step_pos = ov8810_damping_threshold; + ov8810_ctrl->curr_lens_pos = position; + mdelay(ov8810_damping_time_wait); + } + + rc = ov8810_move_focus(MOVE_FAR, ov8810_ctrl->curr_step_pos); + if (rc < 0) + return rc; + } else { + rc = ov8810_af_i2c_write(ov8810_step_position_table[0]); + if (rc < 0) + return rc; + + ov8810_ctrl->curr_step_pos = 0; + ov8810_ctrl->curr_lens_pos = ov8810_step_position_table[0]; + } + + return rc; +} + + +static void ov8810_get_pict_fps(uint16_t fps, uint16_t *pfps) +{ + /* input fps is preview fps in Q8 format */ + + uint32_t divider, d1, d2; + + uint16_t snapshot_height, preview_height, preview_width, snapshot_width; + + if (ov8810_ctrl->prev_res == QTR_SIZE) { + preview_width = + OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS ; + preview_height = + OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel] ; + } else { + /* full size resolution used for preview. */ + preview_width = + OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS ; + preview_height = + OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES ; + } + + if (ov8810_ctrl->pict_res == QTR_SIZE) { + snapshot_width = + OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS ; + snapshot_height = + OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel] ; + + } else { + snapshot_width = + OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS; + snapshot_height = + OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES; + } + + d1 = preview_height * 0x00000400 / snapshot_height; + d2 = preview_width * 0x00000400 / snapshot_width; + + divider = (uint32_t) (d1 * d2) / 0x00000400; + *pfps = (uint16_t)(fps * divider / 0x00000400); + +} /* endof ov8810_get_pict_fps */ + +static uint16_t ov8810_get_prev_lines_pf(void) +{ + if (ov8810_ctrl->prev_res == QTR_SIZE) { + return (OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES); + } +} + +static uint16_t ov8810_get_prev_pixels_pl(void) +{ + if (ov8810_ctrl->prev_res == QTR_SIZE) { + return (OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS); + } else { + return (OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS); +} +} + +static uint16_t ov8810_get_pict_lines_pf(void) +{ + if (ov8810_ctrl->pict_res == QTR_SIZE) { + return (OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES); + } +} + +static uint16_t ov8810_get_pict_pixels_pl(void) +{ + if (ov8810_ctrl->pict_res == QTR_SIZE) { + return (OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS); + } else { + return (OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS); + } +} + +static uint32_t ov8810_get_pict_max_exp_lc(void) +{ + if (ov8810_ctrl->pict_res == QTR_SIZE) { + return (OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES); + } +} + +static int32_t ov8810_set_fps(struct fps_cfg *fps) +{ + int32_t rc = 0; + ov8810_ctrl->fps_divider = fps->fps_div; + ov8810_ctrl->pict_fps_divider = fps->pict_fps_div; + ov8810_ctrl->fps = fps->f_mult; + return rc; +} + + +static int32_t ov8810_write_exp_gain + (uint16_t mul, uint16_t gain, uint32_t line) +{ + uint16_t aec_msb; + uint16_t aec_lsb; + int32_t rc = 0; + uint32_t total_lines_per_frame; + uint32_t total_pixels_per_line; + /*uint32_t line_length_ratio = 1 * Q8;*/ + /**uint8_t ov8810_offset = 2; */ + uint32_t extra_line_length = 0; + uint16_t extra_line_msb = 0; + uint16_t extra_line_lsb = 0; + uint32_t phy_line = 0; + uint8_t phy_mul = MUL_GAIN_INIT_VALUE; + uint16_t phy_gain = 0; + uint32_t phy_extra_line_length = 0; + const uint16_t postpone_frames = 4; + uint16_t do_write = 1; /* assume do things */ + uint16_t ori_reg_mul_gain; + uint8_t ori_reg_mul_gain_8bit; + + CDBG("%s start, mul = %d gain = %d line = %d\n", __func__, + mul, gain, line); + + if (ov8810_ctrl->curr_res == QTR_SIZE) { + total_lines_per_frame = + (OV8810_QTR_SIZE_HEIGHT + ov8810_ver_qtr_blk_lines_array[cam_mode_sel]); + total_pixels_per_line = + OV8810_QTR_SIZE_WIDTH + OV8810_HRZ_QTR_BLK_PIXELS; + } else { + total_lines_per_frame = + (OV8810_FULL_SIZE_HEIGHT + OV8810_VER_FULL_BLK_LINES); + total_pixels_per_line = + OV8810_FULL_SIZE_WIDTH + OV8810_HRZ_FULL_BLK_PIXELS; + } + + if (line > total_lines_per_frame - 4) { + extra_line_length = + (uint32_t)(line - (total_lines_per_frame-4)); + line = total_lines_per_frame - 4; + } else { + extra_line_length = (uint16_t)0; + } + + phy_line = line; + phy_mul = mul; + phy_gain = gain; + phy_extra_line_length = extra_line_length; + + /* postpone writing gain only apply to preview */ + if (ov8810_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + + /* need time to wait for aec stable (prevent black preview) */ + mdelay(6); + + CDBG("Stella: write_cnt=%d, pre_line = %d, line = %d," \ + "pre_mul = %d mul = %d," \ + "pre_gain = %d gain = %d," \ + "pre_extra_line_length =%d extra_line_length = %d\n", + write_cnt, + backup_line_gain[1].line, line, + backup_line_gain[1].mul, mul, + backup_line_gain[1].gain, gain, + backup_line_gain[1].extra_line_length, extra_line_length); + + if (write_cnt == 0 && ( + backup_line_gain[1].line != line || + backup_line_gain[1].mul != mul || + backup_line_gain[1].gain != gain || + backup_line_gain[1].extra_line_length != extra_line_length)) { + + backup_line_gain[1].line = line; + backup_line_gain[1].mul = mul; + backup_line_gain[1].gain = gain; + backup_line_gain[1].extra_line_length = extra_line_length; + phy_line = backup_line_gain[1].line; + phy_mul = backup_line_gain[0].mul; + phy_gain = backup_line_gain[0].gain; + phy_extra_line_length = backup_line_gain[0].extra_line_length; + write_cnt++; + } else if (write_cnt >= 1 && write_cnt < postpone_frames) { + phy_line = backup_line_gain[1].line; + phy_mul = backup_line_gain[1].mul; + phy_gain = backup_line_gain[1].gain; + phy_extra_line_length = backup_line_gain[1].extra_line_length; + + CDBG("updated_BLC = %d\n", updated_BLC); + if (updated_BLC == 5) { + /*50 BLC trigger by gain 40 BLC every frame */ + pr_info("[CAM]### BLC to 0x50 ###\n"); +#if 0 + ov8810_i2c_write_b(ov8810_client->addr, 0x3071, 0x50); +#endif + } + if (updated_BLC <= 5) + updated_BLC++; + + if (write_cnt > 1) + do_write = 0; + write_cnt++; + } else { + write_cnt = 0; + do_write = 0; + } + + if (do_write) { + backup_line_gain[0].line = phy_line; + backup_line_gain[0].mul = phy_mul; + backup_line_gain[0].gain = phy_gain; + backup_line_gain[0].extra_line_length = phy_extra_line_length; + } + + } +#if 0 + pr_info("[CAM]Stella: backup_line_gain[0].line = %d\n", + backup_line_gain[0].line); + pr_info("[CAM]Stella: backup_line_gain[0].mul = %d\n", + backup_line_gain[0].mul); + pr_info("[CAM]Stella: backup_line_gain[0].gain = %d\n", + backup_line_gain[0].gain); + pr_info("[CAM]Stella: backup_line_gain[0].extra_line_length = %d\n", + backup_line_gain[0].extra_line_length); + pr_info("[CAM]Stella: backup_line_gain[1].line = %d\n", + backup_line_gain[1].line); + pr_info("[CAM]Stella: backup_line_gain[1].mul = %d\n", + backup_line_gain[1].mul); + pr_info("[CAM]Stella: backup_line_gain[1].gain = %d\n", + backup_line_gain[1].gain); + pr_info("[CAM]Stella: backup_line_gain[1].extra_line_length = %d\n", + backup_line_gain[1].extra_line_length); + + pr_info("[CAM]Stella: phy_line=%d\n", phy_line); + pr_info("[CAM]Stella: phy_gain=%d\n", phy_gain); + pr_info("[CAM]Stella: phy_extra_line_length=%d\n", phy_extra_line_length); +#endif + + extra_line_msb = (uint16_t)(phy_extra_line_length & 0xFF00) >> 8; + extra_line_lsb = (uint16_t)(phy_extra_line_length & 0x00FF); + + aec_msb = (uint16_t)(phy_line & 0xFF00) >> 8; + aec_lsb = (uint16_t)(phy_line & 0x00FF); + + if (!do_write) + return rc; + +/*Move the read function out of group update to prevent hang Weiting0331*/ + rc = ov8810_i2c_read(OV8810_REG_MUL_GAIN, + &ori_reg_mul_gain, 2); + if (rc < 0) { + pr_err("[CAM]read OV8810_REG_MUL_GAIN fail\n"); + return rc; + } + + + /* since we do STREAM ON here, don't do group update for snapshot */ + if (ov8810_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) { + /*for group update top*/ + /* weiting0414 prevent capture hang, enable 0x30b7[2] */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30b7, 0x8c); + if (rc < 0) + return rc; + } + + /* FIXME: prevent black preview by restoring 0x30bf -> 0x80 */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30bf, 0x80); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AEC_MSB, (uint8_t)aec_msb); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AEC_LSB, (uint8_t)aec_lsb); + if (rc < 0) + return rc; + + ori_reg_mul_gain_8bit = + (uint8_t)((ori_reg_mul_gain & 0xFF00) >> 8); + CDBG("%s, read OV8810_REG_MUL_GAIN ori_reg_mul_gain = %x\n", + __func__, ori_reg_mul_gain_8bit); + ori_reg_mul_gain_8bit = + (ori_reg_mul_gain_8bit & 0xFC) | (phy_mul & 0x03); + CDBG("%s, read OV8810_REG_MUL_GAIN ori_reg_mul_gain = %x\n", + __func__, ori_reg_mul_gain_8bit); + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MUL_GAIN, ori_reg_mul_gain_8bit); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_GAIN, (uint8_t)phy_gain); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_EXTRA_VSYNC_WIDTH_MSB, (uint8_t)extra_line_msb); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_EXTRA_VSYNC_WIDTH_LSB, (uint8_t)extra_line_lsb); + if (rc < 0) + return rc; + + if (ov8810_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) { + /* for group update bottom */ + /* weiting0414 prevent capture hang , enable 0x30b7[2] */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30b7, 0x84); + if (rc < 0) + return rc; + + /* for group update enable */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30ff, 0xff); + if (rc < 0) + return rc; + /* weiting0414 prevent capture hang , + retry I2C write to make sure enable */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30ff, 0xff); + if (rc < 0) + return rc; + } + + if (ov8810_ctrl->sensormode == SENSOR_RAW_SNAPSHOT_MODE) { + pr_info("[CAM]sleep 500 ms for safety raw snapshot"); + msleep(500); + } + + /* STREAM ON for SNAPSHOT */ + if (ov8810_ctrl->sensormode == SENSOR_SNAPSHOT_MODE) { + pr_info("[CAM]ov8810_ctrl: STREAM ON for SNAPSHOT\n"); + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MODE_SELECT, + OV8810_MODE_SELECT_STREAM); + if (rc < 0) + return rc; + msleep(50); + } + + /*stored_line_length_ratio = line_length_ratio;*/ + return rc; + +} /* endof ov8810_write_exp_gain*/ + +/* ### this function is not called for userspace ### */ +static int32_t ov8810_set_pict_exp_gain + (uint16_t mul, uint16_t gain, uint32_t line) +{ + int32_t rc = 0; + rc = ov8810_write_exp_gain(mul, gain, line); + return rc; +} /* endof ov8810_set_pict_exp_gain*/ + +/* remove test code */ +#if 0 +static int32_t ov8810_test(enum ov8810_test_mode_t mo) +{ + int32_t rc = 0; + if (mo == TEST_OFF) { + return rc; + } + + /* Activate the Color bar test pattern */ + if (mo == TEST_1) { + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_COLOR_BAR_ENABLE_REG, 0xa0); + if (rc < 0) { + return rc; + } + rc = ov8810_i2c_write_b(ov8810_client->addr, + 0x3085, 0x20); + if (rc < 0) { + return rc; + } + rc = ov8810_i2c_write_b(ov8810_client->addr, + 0x306c, 0x00); + if (rc < 0) { + return rc; + } + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_COLOR_BAR_PATTERN_SEL_REG, 0x02); + if (rc < 0) { + return rc; + } + } + + return rc; + +} +#endif + + +uint32_t Crc32CheckSumByte(uint8_t *pData, uint32_t uiLen, uint32_t preValue) +{ + const uint32_t crc32table[256] = { + /* 0x00 */ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + /* 0x04 */ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + /* 0x08 */ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + /* 0x0C */ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + /* 0x10 */ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + /* 0x14 */ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + /* 0x18 */ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + /* 0x1C */ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + /* 0x20 */ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + /* 0x24 */ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + /* 0x28 */ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + /* 0x2C */ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + /* 0x30 */ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + /* 0x34 */ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + /* 0x38 */ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + /* 0x3C */ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + /* 0x40 */ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + /* 0x44 */ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + /* 0x48 */ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + /* 0x4C */ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + /* 0x50 */ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + /* 0x54 */ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + /* 0x58 */ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + /* 0x5C */ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + /* 0x60 */ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + /* 0x64 */ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + /* 0x68 */ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + /* 0x6C */ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + /* 0x70 */ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + /* 0x74 */ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + /* 0x78 */ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + /* 0x7C */ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + /* 0x80 */ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + /* 0x84 */ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + /* 0x88 */ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + /* 0x8C */ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + /* 0x90 */ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + /* 0x94 */ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + /* 0x98 */ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + /* 0x9C */ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + /* 0xA0 */ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + /* 0xA4 */ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + /* 0xA8 */ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + /* 0xAC */ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + /* 0xB0 */ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + /* 0xB4 */ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + /* 0xB8 */ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + /* 0xBC */ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + /* 0xC0 */ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + /* 0xC4 */ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + /* 0xC8 */ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + /* 0xCC */ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + /* 0xD0 */ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + /* 0xD4 */ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + /* 0xD8 */ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + /* 0xDC */ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + /* 0xE0 */ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + /* 0xE4 */ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + /* 0xE8 */ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + /* 0xEC */ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + /* 0xF0 */ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + /* 0xF4 */ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + /* 0xF8 */ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + /* 0xFC */ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D,}; + uint32_t i, CheckSum, cvalue; + + CheckSum = preValue; + for (i = 0; i < uiLen; i++) { + cvalue = *pData; + CheckSum = + (CheckSum>>8) ^ + crc32table[(CheckSum & 0xFF) ^ + (cvalue & 0xFF)]; + pData++; + } + return CheckSum; +} + +static int32_t HTC_update_ov8810_lsc_registers(void) +{ + int i; + struct awb_lsc_struct_type *awb_lsc_data_ptr; + awb_lsc_data_ptr = (struct awb_lsc_struct_type *)get_cam_awb_cal(); + + for (i = 0; i < 8; i++) { + pr_info(KERN_INFO"[LSC calibration] read AWB table 0x%x\n", + awb_lsc_data_ptr->caBuff[i]); + } + + for (i = 0; i < LSC_table_length; i++) { + pr_info("[CAM][LSC calibration] read LSC table 0x%x, 0x%x\n", + awb_lsc_data_ptr->LSC_table[i].reg_addr, + awb_lsc_data_ptr->LSC_table[i].reg_val); + } + + if (awb_lsc_data_ptr->LSC_table_CRC == + Crc32CheckSumByte( + (uint8_t *) awb_lsc_data_ptr->LSC_table, + 150 * sizeof(struct reg_addr_val_pair_struct), 0) && + awb_lsc_data_ptr->LSC_table_CRC != 0) { + + pr_info("[CAM][LSC calibration]checksum pass,use calibrated LSC\n"); + + for (i = 0; i < LSC_table_length; i++) { + ov8810_i2c_write_b(ov8810_client->addr, + awb_lsc_data_ptr->LSC_table[i].reg_addr, + awb_lsc_data_ptr->LSC_table[i].reg_val); + } + /*enable lsc on sensor*/ + ov8810_i2c_write_b(ov8810_client->addr, 0x3300, 0xff); + /*move to the last*/ + ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MODE_SELECT, OV8810_MODE_SELECT_STREAM); + + } else {/*use default LSC table*/ + pr_info("[CAM][LSC calibration]checksum fail\n"); + return false; + } + return true; +} + +static int32_t initialize_ov8810_registers(void) +{ + int32_t i, array_length; + int32_t rc = 0; + + struct msm_camera_sensor_info *sdata = ov8810_pdev->dev.platform_data; + + mdelay(5); + ov8810_i2c_write_b( + ov8810_client->addr, + OV8810_REG_SOFTWARE_RESET, + OV8810_SOFTWARE_RESET); + mdelay(5); + ov8810_i2c_write_b( + ov8810_client->addr, + OV8810_REG_MODE_SELECT, + OV8810_MODE_SELECT_SW_STANDBY); + mdelay(1); + array_length = sizeof(ov8810_init_settings_array) / + sizeof(ov8810_init_settings_array[0]); + + /* Configure sensor for Preview mode and Snapshot mode */ + for (i = 0; i < array_length; i++) { + rc = ov8810_i2c_write_b(ov8810_client->addr, + ov8810_init_settings_array[i].reg_addr, + ov8810_init_settings_array[i].reg_val); + if (rc < 0) + return rc; + } + + /*use calibrated LSC table*/ + if (!sdata->sensor_lc_disable) { /* 0902 disable old LSC method */ + if (HTC_update_ov8810_lsc_registers()) { + pr_info("[CAM][LSC calibration] use calibrated LSC table done!\n"); + } else {/*use default LSC table*/ + array_length = + sizeof(lsc_table_array) / sizeof(lsc_table_array[0]); + + for (i = 0; i < array_length; i++) { + rc = ov8810_i2c_write_b(ov8810_client->addr, + lsc_table_array[i].reg_addr, + lsc_table_array[i].reg_val); + } + pr_info("[CAM][LSC calibration] use default LSC table done\n"); + } + } else { + /* add streaming on */ + ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MODE_SELECT, OV8810_MODE_SELECT_STREAM); + } + return rc; +} /* end of initialize_ov8810_ov8m0vc_registers. */ + +static int32_t ov8810_setting(int rt) +{ + int32_t rc = 0; + int32_t i, array_length; + static int16_t did_snapshot; + uint16_t ori_reg_mul_gain; + uint8_t ori_reg_mul_gain_8bit; + + uint16_t i2c_ret = 0; + + write_cnt = 0; + + pr_info("[CAM]ov8810_setting rt = %d\n", rt); + + if (rt == FULL_SIZE) { + + ov8810_i2c_read(0x30b7, &i2c_ret, 1); + pr_info("[CAM]0x30b7, i2c_ret = 0x%X\n", i2c_ret); + /*Retry writing group update bottom to ensure capture settings can be updated Weiting0331*/ + while (i2c_ret != 0x84) { + + /* for group update bottom */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30b7, 0x84); + if (rc < 0) + return rc; + + /* for group update enable */ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30ff, 0xff); + if (rc < 0) + return rc; + + msleep(50); + ov8810_i2c_read(0x30b7, &i2c_ret, 1); + pr_info("[CAM]retry 0x30b7, i2c_ret = 0x%X\n", i2c_ret); + }; + } + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MODE_SELECT, + OV8810_MODE_SELECT_SW_STANDBY); + if (rc < 0) { + return rc; + } + + ov8810_i2c_read(OV8810_REG_MODE_SELECT, &i2c_ret, 1); + pr_info("[CAM]OV8810_REG_MODE_SELECT, i2c_ret = 0x%X\n", i2c_ret); + + switch (rt) { + + case QTR_SIZE: + + array_length = sizeof(ov8810_qtr_settings_array) / + sizeof(ov8810_qtr_settings_array[0]); + + /* Configure sensor for XGA preview mode */ + for (i = 0; i < array_length; i++) { + rc = ov8810_i2c_write_b(ov8810_client->addr, + ov8810_qtr_settings_array[i].reg_addr, + ov8810_qtr_settings_array[i].reg_val); + + if (rc < 0) { + return rc; + } + } + +/* reconfigure the qtr height to adjust frame rate */ +{ + uint16_t fl_line = 0; + fl_line = OV8810_QTR_SIZE_HEIGHT + + ov8810_ver_qtr_blk_lines_array[cam_mode_sel]; + pr_info("%s fl_line = %d\n", __func__, __LINE__); + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_FRAME_LENGTH_LINES_MSB, + (fl_line & 0xFF00) >> 8); + if (rc < 0) + return rc; + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_FRAME_LENGTH_LINES_LSB, + fl_line & 0x00FF); + if (rc < 0) + return rc; +#if 0 + if (cam_mode_sel > 0) { + pr_info("[CAM]andy write binning ctrl 0x00, cam_mode_sel %d\n", cam_mode_sel); + rc = ov8810_i2c_write_b(ov8810_client->addr, //weiting ori c0 + REG_BINNING_CONTROL, 0x00); + if (rc < 0) + return rc; + + } +#endif +} + + +#if 1 /* this is supposed to prevent abnormal color when restart preview */ + + if (!did_snapshot) + { + memset(&backup_line_gain, 0, + sizeof(struct backup_line_gain_struct)); + backup_line_gain[0].line = 0x4c4; + backup_line_gain[0].mul = MUL_GAIN_INIT_VALUE; + backup_line_gain[0].gain = 8; /*0x30;*/ + backup_line_gain[0].extra_line_length = 0; + } + + CDBG("backup_line_gain[0].line = %d" \ + "backup_line_gain[0].mul = %d" \ + "backup_line_gain[0].gain = %d" \ + "backup_line_gain[0].extra_line_length = %d", + backup_line_gain[0].line, + backup_line_gain[0].mul, + backup_line_gain[0].gain, + backup_line_gain[0].extra_line_length); + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AEC_MSB, + (uint8_t)((backup_line_gain[0].line & 0xFF00) >> 8)); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_AEC_LSB, + (uint8_t)(backup_line_gain[0].line & 0x00FF)); + if (rc < 0) + return rc; + + rc = ov8810_i2c_read(OV8810_REG_MUL_GAIN, &ori_reg_mul_gain, 2); + if (rc < 0) { + pr_err("[CAM]read OV8810_REG_MUL_GAIN fail\n"); + return rc; + } + ori_reg_mul_gain_8bit = + (uint8_t)((ori_reg_mul_gain & 0xFF00)>>8); + CDBG("%s, read OV8810_REG_MUL_GAIN ori_reg_mul_gain = %x\n", + __func__, ori_reg_mul_gain_8bit); + ori_reg_mul_gain_8bit = + (ori_reg_mul_gain_8bit & 0xFC) | + (backup_line_gain[0].mul & 0x03); + CDBG("%s, read OV8810_REG_MUL_GAIN ori_reg_mul_gain = %x\n", + __func__, ori_reg_mul_gain_8bit); + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MUL_GAIN, ori_reg_mul_gain_8bit); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_GAIN, + (uint8_t)(backup_line_gain[0].gain & 0x00FF)); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_EXTRA_VSYNC_WIDTH_MSB, + (uint8_t)((backup_line_gain[0].extra_line_length + & 0xFF00) >> 8)); + if (rc < 0) + return rc; + + rc = ov8810_i2c_write_b(ov8810_client->addr, + REG_EXTRA_VSYNC_WIDTH_LSB, + (uint8_t)(backup_line_gain[0].extra_line_length + & 0x00FF)); + if (rc < 0) + return rc; + +#endif + did_snapshot = 0; + + ov8810_ctrl->curr_res = QTR_SIZE; + + break; + + case FULL_SIZE: + + array_length = sizeof(ov8810_full_settings_array) / + sizeof(ov8810_full_settings_array[0]); + /* Configure sensor for QXGA capture mode */ + for (i = 0; i < array_length; i++) { + rc = ov8810_i2c_write_b(ov8810_client->addr, + ov8810_full_settings_array[i].reg_addr, + ov8810_full_settings_array[i].reg_val); + if (rc < 0) + return rc; + } + did_snapshot = 1; + ov8810_ctrl->curr_res = FULL_SIZE; + break; + + default: + rc = -EFAULT; + return rc; + } + + /*disablt LSC for calibration*/ + pr_info("[CAM][LSC calibration] global_mode=%d!!!!\n", global_mode); + /*take raw picture for LSC calibration*/ + if (global_mode) { + /*disable sensor LSC*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x3300, 0xef); + /*mirror off*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x30f8, 0x00); + /*mirror off*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x3316, 0x02); + pr_info("[CAM][LSC calibration]turn off LSC!Mirror On\n"); + + /*fix gain & linecount*/ + /*Gain=0x9,exp=008d*/ + /*so luma taget = 100 to mfg light source*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x3000, 0x9); + /*AEC_MSB*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x3002, 0x00); + /*AEC_LSB*/ + rc = ov8810_i2c_write_b(ov8810_client->addr, 0x3003, 0x8d); + pr_info("[CAM][LSC calibration]fix gain & linecount\n"); + global_mode = 0; + } + + if (ov8810_ctrl->sensormode != SENSOR_SNAPSHOT_MODE) { + msleep(50); + rc = ov8810_i2c_write_b(ov8810_client->addr, + OV8810_REG_MODE_SELECT, + OV8810_MODE_SELECT_STREAM); + if (rc < 0) + return rc; + + updated_BLC = 0; + } + +/* remove test code + rc = ov8810_test(ov8810_ctrl->set_test); + if (rc < 0) + return rc; +*/ + + return rc; +} /*endof ov8810_setting*/ + +static int32_t ov8810_video_config(int mode) +{ + int32_t rc = 0; + static int pre_sel = 0; + int cur_sel = (cam_mode_sel > 1)?1:0; + + ov8810_ctrl->sensormode = mode; + + pr_info("[CAM]%s cam_mode_sel %d cur_sel %d \n", __func__, cam_mode_sel, cur_sel); + + preview_frame_count = 0; + + if (ov8810_ctrl->curr_res != ov8810_ctrl->prev_res + || pre_sel != cur_sel + ) { + rc = ov8810_setting(ov8810_ctrl->prev_res); + if (rc < 0) + return rc; + + } else { + ov8810_ctrl->curr_res = ov8810_ctrl->prev_res; + } + + pre_sel = cur_sel; + + ov8810_ctrl->sensormode = mode; + + return rc; + +} /*end of ov354_video_config*/ + +static int32_t ov8810_snapshot_config(int mode) +{ + int32_t rc = 0; + ov8810_ctrl->sensormode = mode; + + if (ov8810_ctrl->curr_res != ov8810_ctrl->pict_res) { + rc = ov8810_setting(ov8810_ctrl->pict_res); + if (rc < 0) + return rc; + } else { + ov8810_ctrl->curr_res = ov8810_ctrl->pict_res; + } + ov8810_ctrl->sensormode = mode; + + return rc; + +} /*end of ov8810_snapshot_config*/ + +static int32_t ov8810_raw_snapshot_config(int mode) +{ + int32_t rc = 0; + ov8810_ctrl->sensormode = mode; + if (ov8810_ctrl->curr_res != ov8810_ctrl->pict_res) { + rc = ov8810_setting(ov8810_ctrl->pict_res); + if (rc < 0) + return rc; + } else { + ov8810_ctrl->curr_res = ov8810_ctrl->pict_res; + } /* Update sensor resolution */ + + ov8810_ctrl->sensormode = mode; + + return rc; + +} /*end of ov8810_raw_snapshot_config*/ + +static int32_t ov8810_set_sensor_mode(int mode, + int res) +{ + int32_t rc = 0; + struct msm_camera_sensor_info *sinfo = ov8810_pdev->dev.platform_data; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + rc = ov8810_video_config(mode); + break; + + case SENSOR_SNAPSHOT_MODE: + pr_info("[CAM]KPI PA: start sensor snapshot config: %d\n", __LINE__); + sinfo->kpi_sensor_start = ktime_to_ns(ktime_get()); + rc = ov8810_snapshot_config(mode); + break; + + case SENSOR_RAW_SNAPSHOT_MODE: + /*global_mode = 1; //20100330 vincent lsc calibration*/ + pr_info("[CAM]KPI PA: start sensor snapshot config: %d\n", __LINE__); + sinfo->kpi_sensor_start = ktime_to_ns(ktime_get()); + rc = ov8810_raw_snapshot_config(mode); + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int32_t ov8810_power_down(void) +{ + return 0; +} + +static int ov8810_probe_read_id(const struct msm_camera_sensor_info *data) +{ + int32_t rc = 0; + uint16_t chipidh = 0; /*, chipidl;*/ + uint16_t def_chipid = 0; + msleep(20); + pr_info("[CAM]%s, ov8810_probe_init_sensor 1\n", __func__); + /* 3. Read sensor Model ID: */ + if (ov8810_i2c_read(OV8810_PIDH_REG, &chipidh, 2) < 0) { + rc = -1; + pr_err("[CAM]read sensor id fail\n"); + } + + pr_info("[CAM]ov8810 model_id + ver = 0x%x\n", chipidh); + + /* 4. Compare sensor ID to OV8810 ID: */ + def_chipid = (((OV8810_PID << 8) & 0xFF00) + (OV8810_VER & 0x00FF)); + pr_info("[CAM]%s, Expected id=0x%x\n", __func__, def_chipid); + + if (chipidh < def_chipid) { + rc = -ENODEV; + pr_err("[CAM]read sensor id incorrect\n"); + } + + pr_info("[CAM]%s, vreg_get vreg_af_actuator\n", __func__); + vreg_af_actuator = vreg_get(0, "gp5"); + if (IS_ERR(vreg_af_actuator)) + return PTR_ERR(vreg_af_actuator); + +#ifdef CONFIG_ARCH_QSD8X50 + data->camera_set_source(MAIN_SOURCE); +#endif + pr_info("[CAM]ov8810_probe_init_sensor finishes\n"); + return rc; +} + +static int ov8810_sensor_open_init(struct msm_camera_sensor_info *data) +{ + + int i; + int32_t rc = 0; + /*stella0122*/ + uint16_t ov8810_nl_region_boundary = 5; /*3;*/ + uint16_t ov8810_nl_region_code_per_step = 35; /*101;*/ + uint16_t ov8810_l_region_code_per_step = 20; /*18;*/ + int timeout; + pr_info("[CAM]Calling ov8810_sensor_open_init\n"); + + down(&ov8810_sem); + + if (data == NULL) { + pr_info("[CAM]data is a NULL pointer\n"); + return -EINVAL; + } + /*check whether resume done*/ + timeout = wait_event_interruptible_timeout( + ov8810_event.event_wait, + ov8810_event.waked_up, + 30*HZ); + pr_info("[CAM]wait event : %d timeout:%d\n", ov8810_event.waked_up, timeout); + if (timeout == 0) { + up(&ov8810_sem); + return rc; + } + msm_camio_probe_on(ov8810_pdev); + ov8810_ctrl = kzalloc(sizeof(struct ov8810_ctrl), GFP_KERNEL); + if (!ov8810_ctrl) { + pr_err("[CAM]ov8810_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + ov8810_ctrl->curr_lens_pos = -1; + ov8810_ctrl->fps_divider = 1 * 0x00000400; + ov8810_ctrl->pict_fps_divider = 1 * 0x00000400; + ov8810_ctrl->set_test = TEST_OFF; + ov8810_ctrl->prev_res = QTR_SIZE; + ov8810_ctrl->pict_res = FULL_SIZE; + ov8810_ctrl->curr_res = INVALID_SIZE; + if (data) + ov8810_ctrl->sensordata = data; + + /*switch pclk and mclk between main cam and 2nd cam*/ + /*only for supersonic*/ + pr_info("[CAM]doing clk switch (ov8810)\n"); + if(data->camera_clk_switch != NULL) + data->camera_clk_switch(); + + msm_camio_camif_pad_reg_reset(); + msleep(20); + + /*PWD and RST config*/ + pr_info("[CAM]%s, GPIO(%d) sensor_pwd 0\n", __func__, data->sensor_pwd); + rc = gpio_request(data->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(data->sensor_pwd, 0); + else + pr_err("[CAM]GPIO (%d) request faile\n", data->sensor_pwd); + gpio_free(data->sensor_pwd); + msleep(5); + + /* enable mclk first */ + msm_camio_clk_rate_set(OV8810_DEFAULT_CLOCK_RATE); + msm_camio_camif_pad_reg_reset(); + msleep(3); + /*Pull reset*/ + rc = gpio_request(data->sensor_reset, "ov8810"); + if (!rc) + gpio_direction_output(data->sensor_reset, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", data->sensor_reset); + gpio_free(data->sensor_reset); + msleep(1); + + /*read sensor id*/ + rc = ov8810_probe_read_id(data); + + ov8810_ctrl->sensormode = SENSOR_PREVIEW_MODE ; + + pr_info("[CAM]%s, initialize_ov8810_registers: %d\n", __func__, __LINE__); + if (rc < 0) + goto init_fail; +#ifdef CONFIG_ARCH_QSD8X50 + /* Initialize Sensor registers */ + rc = initialize_ov8810_registers(); + if (rc < 0) + return rc; +#endif + + pr_info("[CAM]%s, enable AF actuator %d\n", __func__, __LINE__); + + /* enable AF actuator */ + rc = vreg_enable(vreg_af_actuator); + if (!rc) { + + rc = vreg_set_level(vreg_af_actuator, 2800); /*2v8*/ + if (rc) + { + pr_err("[CAM]vreg_af_actuator vreg_set_level 2v8 failed!\n"); + goto init_fail; + } + } + else { + pr_err("[CAM]vreg_af_actuator vreg_enable failed!\n"); + goto init_fail; + } + + msleep(20); + + pr_info("[CAM]%s, set step_position_table %d\n", __func__, __LINE__); + + ov8810_ctrl->fps = 30*Q8; + + step_position_table[0] = 0; + + for (i = 1; i <= OV8810_TOTAL_STEPS_NEAR_TO_FAR; i++) { + if (i <= ov8810_nl_region_boundary) { + ov8810_step_position_table[i] = + ov8810_step_position_table[i-1] + + ov8810_nl_region_code_per_step; + } else { + ov8810_step_position_table[i] = + ov8810_step_position_table[i-1] + + ov8810_l_region_code_per_step; + } + } + + /* generate test pattern */ + pr_info("[CAM]%s, generate test pattern, %d, rc=%d\n", + __func__, __LINE__, rc); + + if (rc >= 0) + goto init_done; + /* reset the driver state */ +init_fail: + pr_err("[CAM]%s: init_fail\n", __func__); + vreg_disable(vreg_af_actuator); + if (ov8810_ctrl) { + kfree(ov8810_ctrl); + ov8810_ctrl = NULL; + } +init_done: + up(&ov8810_sem); + pr_info("[CAM]%s: init_done\n", __func__); + return rc; + +} /*endof ov8810_sensor_open_init*/ + +static int ov8810_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&ov8810_wait_queue); + return 0; +} + +static const struct i2c_device_id ov8810_i2c_id[] = { + { "ov8810", 0}, + { } +}; + +static int ov8810_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + pr_info("[CAM]ov8810_probe called!\n"); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("[CAM]i2c_check_functionality failed\n"); + goto probe_failure; + } + + ov8810_sensorw = kzalloc(sizeof(struct ov8810_work), GFP_KERNEL); + if (!ov8810_sensorw) { + pr_err("[CAM]kzalloc failed.\n"); + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, ov8810_sensorw); + ov8810_init_client(client); + ov8810_client = client; + + msleep(50); + + pr_info("[CAM]ov8810_probe successed! rc = %d\n", rc); + return 0; + +probe_failure: + pr_err("[CAM]ov8810_probe failed! rc = %d\n", rc); + return rc; +} + +static int ov8810_probe_init_done(const struct msm_camera_sensor_info *data) +{ + int rc; + rc = gpio_request(data->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(data->sensor_pwd, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", data->sensor_pwd); + gpio_free(data->sensor_pwd); + mdelay(1); +#ifdef CONFIG_ARCH_QSD8X50 + rc = gpio_request(data->sensor_reset, "ov8810"); + if (!rc) + gpio_direction_output(data->sensor_reset, 0); + else + pr_err("GPIO (%d) request faile\n", data->sensor_reset); + gpio_free(data->sensor_reset); +#endif + return 0; +} + +static int ov8810_suspend(struct platform_device *pdev, pm_message_t state) +{ + int rc; + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + + if (!sinfo->need_suspend) + return 0; + ov8810_event.waked_up = 0; + + pr_info("[CAM]ov8810: camera suspend\n"); + + pr_info("[CAM]%s, vreg_af_actuator vreg_disable\n", __func__); + vreg_disable(vreg_af_actuator); + + rc = gpio_request(sinfo->sensor_reset, "ov8810"); + if (!rc) + gpio_direction_output(sinfo->sensor_reset, 0); + else + pr_info("[CAM]ov8810: request GPIO(sensor_reset) :%d faile\n", + sinfo->sensor_reset); + + gpio_free(sinfo->sensor_reset); + msleep(10); + rc = gpio_request(sinfo->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(sinfo->sensor_pwd, 0); + else + pr_info("[CAM]ov8810: request GPIO(sensor_reset) :%d faile\n", + sinfo->sensor_pwd); + + gpio_free(sinfo->sensor_pwd); + + pr_info("[CAM]ov8810:suspend done\n"); + return rc; +} + +static void ov8810_resume(struct early_suspend *handler) +{ + int rc = 0; + struct msm_camera_sensor_info *sinfo = ov8810_pdev->dev.platform_data; + pr_info("[CAM]ov8810_resume\n"); + + /*check whether need resume*/ + if (!sinfo->need_suspend) + return; + + /*check whether already suspend*/ + if (ov8810_event.waked_up == 1) { + pr_info("[CAM]Ov8810: No nesesary to do Resume\n"); + return; + } + + mdelay(5); + /*power down setup*/ + pr_info("[CAM]%s, sensor_pwd 0\n", __func__); + rc = gpio_request(sinfo->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(sinfo->sensor_pwd, 0); + else + pr_err("[CAM]GPIO (%d) request faile\n", sinfo->sensor_pwd); + gpio_free(sinfo->sensor_pwd); + mdelay(5); + /*reset setup */ + rc = gpio_request(sinfo->sensor_reset, "ov8810"); + if (!rc) + gpio_direction_output(sinfo->sensor_reset, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", sinfo->sensor_reset); + gpio_free(sinfo->sensor_reset); + + /*init msm,clk ,GPIO,enable*/ + pr_info("[CAM]%s, msm_camio_probe_on\n", __func__); + msm_camio_probe_on(ov8810_pdev); + msm_camio_clk_enable(CAMIO_MDC_CLK); + + /*set MCLK*/ + pr_info("[CAM]%s, msm_camio_clk_rate_set = %d\n", + __func__, OV8810_DEFAULT_CLOCK_RATE); + msm_camio_clk_rate_set(OV8810_DEFAULT_CLOCK_RATE); + msleep(100); + + /*read sensor id*/ + rc = ov8810_probe_read_id(sinfo); + if (rc < 0) + pr_err("[CAM]OV8810 resume faile :can not read sensor ID\n"); + + /* Initialize Sensor registers */ + rc = initialize_ov8810_registers(); + if (rc < 0) + return; + msleep(20); + /*resume done*/ + ov8810_probe_init_done(sinfo); + /*turn off MCLK*/ + msm_camio_probe_off(ov8810_pdev); + msm_camio_clk_disable(CAMIO_MDC_CLK); + + ov8810_event.waked_up = 1; + pr_info("[CAM]ov8810:resume done\n"); + wake_up(&ov8810_event.event_wait); + return; +} + + +static int __exit ov8810_i2c_remove(struct i2c_client *client) +{ + struct ov8810_work_t *sensorw = i2c_get_clientdata(client); + free_irq(client->irq, sensorw); + deinit_suspend(); + ov8810_client = NULL; + kfree(sensorw); + return 0; +} + +static struct i2c_driver ov8810_i2c_driver = { + .id_table = ov8810_i2c_id, + .probe = ov8810_i2c_probe, + .remove = __exit_p(ov8810_i2c_remove), + .driver = { + .name = "ov8810", + }, +}; + + +static struct early_suspend early_suspend_ov8810 = { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN+1, + .resume = ov8810_resume, + .suspend = NULL, +}; + +static const char *Ov8810Vendor = "OmniVision"; +static const char *Ov8810NAME = "ov8810"; +static const char *Ov8810Size = "8M"; + +static ssize_t sensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", Ov8810Vendor, Ov8810NAME, Ov8810Size); + ret = strlen(buf) + 1; + + return ret; +} + +DEFINE_MUTEX(cam_mode_lock); + +static ssize_t sensor_read_cam_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + mutex_lock(&cam_mode_lock); + length = sprintf(buf, "%d\n", cam_mode_sel); + mutex_unlock(&cam_mode_lock); + return length; +} + +static ssize_t sensor_set_cam_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + mutex_lock(&cam_mode_lock); + tmp = buf[0] - 0x30; /* only get the first char */ + cam_mode_sel = tmp; + mutex_unlock(&cam_mode_lock); + return count; +} + +static ssize_t sensor_read_node(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", sensor_probe_node); + return length; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); +static DEVICE_ATTR(cam_mode, 0644, sensor_read_cam_mode, sensor_set_cam_mode); +static DEVICE_ATTR(node, 0444, sensor_read_node, NULL); + +static struct kobject *android_ov8810; + +static int ov8810_sysfs_init(void) +{ + int ret = 0; + pr_info("[CAM]ov8810:kobject creat and add\n"); + android_ov8810 = kobject_create_and_add("android_camera", NULL); + if (android_ov8810 == NULL) { + pr_info("[CAM]ov8810_sysfs_init: subsystem_register failed\n"); + ret = -ENOMEM; + return ret ; + } + pr_info("[CAM]Ov8810:sysfs_create_file\n"); + ret = sysfs_create_file(android_ov8810, &dev_attr_sensor.attr); + if (ret) { + pr_info("[CAM]ov8810_sysfs_init: sysfs_create_file failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(android_ov8810, &dev_attr_cam_mode.attr); + if (ret) { + pr_info("[CAM]ov8810_sysfs_init: dev_attr_cam_mode failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(android_ov8810, &dev_attr_node.attr); + if (ret) { + pr_info("[CAM]ov8810_sysfs_init: dev_attr_node failed\n"); + ret = -EFAULT; + goto error; + } + + return ret; + +error: + kobject_del(android_ov8810); + return ret; +} + +#ifdef CONFIG_ARCH_MSM7X30 +uint8_t ov8810_preview_skip_frame(void) +{ + if (ov8810_ctrl->sensormode == SENSOR_PREVIEW_MODE && preview_frame_count < 2) { + preview_frame_count++; + return 1; + } + return 0; +} +#endif + +int ov8810_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cdata; + long rc = 0; + + if (copy_from_user(&cdata, + (void *)argp, + sizeof(struct sensor_cfg_data))) + return -EFAULT; + + down(&ov8810_sem); + + CDBG("ov8810_sensor_config: cfgtype = %d\n", + cdata.cfgtype); + switch (cdata.cfgtype) { + case CFG_GET_PICT_FPS: + ov8810_get_pict_fps( + cdata.cfg.gfps.prevfps, + &(cdata.cfg.gfps.pictfps)); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_L_PF: + cdata.cfg.prevl_pf = + ov8810_get_prev_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_P_PL: + cdata.cfg.prevp_pl = + ov8810_get_prev_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_L_PF: + cdata.cfg.pictl_pf = + ov8810_get_pict_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_P_PL: + cdata.cfg.pictp_pl = + ov8810_get_pict_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_MAX_EXP_LC: + cdata.cfg.pict_max_exp_lc = + ov8810_get_pict_max_exp_lc(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_FPS: + case CFG_SET_PICT_FPS: + rc = ov8810_set_fps(&(cdata.cfg.fps)); + break; + + case CFG_SET_EXP_GAIN: + rc = + ov8810_write_exp_gain( + cdata.cfg.exp_gain.mul, + cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_PICT_EXP_GAIN: + rc = + ov8810_set_pict_exp_gain( + cdata.cfg.exp_gain.mul, + cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_MODE: + rc = ov8810_set_sensor_mode(cdata.mode, + cdata.rs); + break; + + case CFG_PWR_DOWN: + rc = ov8810_power_down(); + break; + + case CFG_MOVE_FOCUS: + rc = + ov8810_move_focus( + cdata.cfg.focus.dir, + cdata.cfg.focus.steps); + break; + + case CFG_SET_DEFAULT_FOCUS: + rc = + ov8810_set_default_focus( + (uint8_t)cdata.cfg.focus.steps); + break; + + case CFG_SET_EFFECT: + rc = ov8810_set_default_focus( + (uint8_t)cdata.cfg.effect); + break; + + case CFG_I2C_IOCTL_R_OTP:{ + rc = ov8810_i2c_read_fuseid(&cdata); + if (copy_to_user + (argp, &cdata, sizeof(struct sensor_cfg_data)) + ) + rc = -EFAULT; + } + break; + + case CFG_SET_OV_LSC: + rc = ov8810_update_lsc_table(&cdata); + break; + + /*20100330 vincent for lsc calibration*/ + case CFG_SET_OV_LSC_RAW_CAPTURE: + rc = ov8810_LSC_calibration_set_rawflag(&cdata); + break; + + default: + rc = -EFAULT; + break; + } + + prevent_suspend(); + up(&ov8810_sem); + + return rc; +} + + + + +static int ov8810_sensor_release(void) +{ + int rc = -EBADF; + + down(&ov8810_sem); + msleep(35); + + if (ov8810_ctrl) { + rc = gpio_request(ov8810_ctrl->sensordata->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(ov8810_ctrl->sensordata->sensor_pwd, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", ov8810_ctrl->sensordata->sensor_pwd); + gpio_free(ov8810_ctrl->sensordata->sensor_pwd); +#ifdef CONFIG_ARCH_QSD8X50 + /*Pull low RST*/ + gpio_request(ov8810_ctrl->sensordata->sensor_reset, "ov8810"); + gpio_direction_output(ov8810_ctrl->sensordata->sensor_reset, 0); + gpio_free(ov8810_ctrl->sensordata->sensor_reset); +#endif + } + + pr_info("[CAM]vreg_af_actuator vreg_disable\n"); + vreg_disable(vreg_af_actuator); + + msleep(20); + + pr_info("[CAM]%s, %d\n", __func__, __LINE__); + + msm_camio_probe_off(ov8810_pdev); + if (ov8810_ctrl) { + kfree(ov8810_ctrl); + ov8810_ctrl = NULL; + } + mdelay(3); + allow_suspend(); + pr_info("[CAM]ov8810_release completed\n"); + up(&ov8810_sem); + + return rc; +} + +static int ov8810_sensor_probe(struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = 0; + rc = i2c_add_driver(&ov8810_i2c_driver); + if (rc < 0 || ov8810_client == NULL) { + rc = -ENOTSUPP; + goto probe_fail; + } + + pr_info("[CAM]ov8810 s->node %d\n", s->node); + sensor_probe_node = s->node; + /*switch pclk and mclk between main cam and 2nd cam*/ + /*only for supersonic*/ + pr_info("[CAM]Ov8810: doing clk switch (ov8810)\n"); + if(info->camera_clk_switch != NULL) + info->camera_clk_switch(); + mdelay(5); + /*power down setup*/ + rc = gpio_request(info->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(info->sensor_pwd, 0); + else + pr_err("[CAM]GPIO (%d) request faile\n", info->sensor_pwd); + gpio_free(info->sensor_pwd); + mdelay(5); + /*reset setup */ + rc = gpio_request(info->sensor_reset, "ov8810"); + if (!rc) + gpio_direction_output(info->sensor_reset, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", info->sensor_reset); + gpio_free(info->sensor_reset); + + /*set MCLK*/ + pr_info("[CAM]%s, msm_camio_clk_rate_set %d\n", + __func__, OV8810_DEFAULT_CLOCK_RATE); + msm_camio_clk_rate_set(OV8810_DEFAULT_CLOCK_RATE); + msleep(100); + /*read sensor id*/ + rc = ov8810_probe_read_id(info); + if (rc < 0) + goto probe_fail; + + /* Initialize Sensor registers */ + rc = initialize_ov8810_registers(); + if (rc < 0) + return rc; + + if (info->camera_main_set_probe != NULL) + info->camera_main_set_probe(true); + + init_suspend(); + s->s_init = ov8810_sensor_open_init; + s->s_release = ov8810_sensor_release; + s->s_config = ov8810_sensor_config; + +#ifdef CONFIG_ARCH_MSM7X30 + info->preview_skip_frame = ov8810_preview_skip_frame; +#endif + + msleep(20); + ov8810_probe_init_done(info); + /*register late resuem*/ + register_early_suspend(&early_suspend_ov8810); + /*init wait event*/ + init_waitqueue_head(&ov8810_event.event_wait); + /*init waked_up value*/ + ov8810_event.waked_up = 1; + /*write sysfs*/ + ov8810_sysfs_init(); + pr_info("[CAM]%s: ov8810_probe_init_done %d\n", __func__, __LINE__); + goto probe_done; + +probe_fail: + pr_err("[CAM]SENSOR PROBE FAILS!\n"); +probe_done: + return rc; + +} + +#ifndef CONFIG_ARCH_QSD8X50 +static int ov8810_vreg_enable(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + int rc; + pr_info("[CAM]%s camera vreg on\n", __func__); + + if (sdata->camera_power_on == NULL) { + pr_err("[CAM]sensor platform_data didnt register\n"); + return -EIO; + } + rc = sdata->camera_power_on(); + return rc; +} +#endif + +#if 0 +static int ov8810_vreg_disable(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + int rc; + printk(KERN_INFO "%s camera vreg off\n", __func__); + if (sdata->camera_power_off == NULL) { + pr_err("[CAM]sensor platform_data didnt register\n"); + return -EIO; + } + rc = sdata->camera_power_off(); + return rc; +} +#endif + +static int __ov8810_probe(struct platform_device *pdev) +{ + + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + printk("[CAM]__ov8810_probe\n"); + ov8810_pdev = pdev; + + if (sdata->camera_main_get_probe != NULL) { + if (sdata->camera_main_get_probe()) { + pr_info("[CAM]__ov8810_probe camera main get probed already.\n"); + return 0; + } + } + +#ifndef CONFIG_ARCH_QSD8X50 + { + int rc; + rc = gpio_request(sdata->sensor_pwd, "ov8810"); + if (!rc) + gpio_direction_output(sdata->sensor_pwd, 1); + else + pr_err("[CAM]GPIO (%d) request faile\n", sdata->sensor_pwd); + gpio_free(sdata->sensor_pwd); + udelay(200); + + rc = ov8810_vreg_enable(pdev); + if (rc < 0) + pr_err("[CAM]__ov8810_probe fail sensor power on error\n"); + } +#endif + + return msm_camera_drv_start(pdev, ov8810_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __ov8810_probe, + .driver = { + .name = "msm_camera_ov8810", + .owner = THIS_MODULE, + }, + .suspend = ov8810_suspend, +}; + +static int __init ov8810_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(ov8810_init); diff --git a/drivers/media/video/msm/ov8810.h b/drivers/media/video/msm/ov8810.h new file mode 100644 index 0000000000000..48219d2db63d9 --- /dev/null +++ b/drivers/media/video/msm/ov8810.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef OV8810_H +#define OV8810_H + +#endif + diff --git a/drivers/media/video/msm/ov9665.c b/drivers/media/video/msm/ov9665.c new file mode 100644 index 0000000000000..6d1ab50ab9268 --- /dev/null +++ b/drivers/media/video/msm/ov9665.c @@ -0,0 +1,1168 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ov9665.h" + +/* OV9665 Registers and their values */ +/* Sensor Core Registers */ +#define REG_OV9665_MODEL_ID_H 0x0A +#define REG_OV9665_MODEL_ID_L 0x0B +#define OV9665_MODEL_ID 0x9663 + +/* SOC Registers Page 1 */ +#define REG_OV9665_SENSOR_RESET 0x301A +#define REG_OV9665_STANDBY_CONTROL 0x3202 +#define REG_OV9665_MCU_BOOT 0x3386 + +struct ov9665_work { + struct work_struct work; +}; + +static struct ov9665_work *ov9665_sensorw; +static struct i2c_client *ov9665_client; + +struct ov9665_ctrl { + const struct msm_camera_sensor_info *sensordata; +}; + +static struct ov9665_ctrl *ov9665_ctrl; +static int op_mode; +static DECLARE_WAIT_QUEUE_HEAD(ov9665_wait_queue); +DEFINE_SEMAPHORE(ov9665_sem); + +static int sensor_probe_node = 0; + +static enum wb_mode current_wb = CAMERA_AWB_AUTO; +static int ov9665_set_wb(enum wb_mode wb_value); + +#define MAX_I2C_RETRIES 20 +static int i2c_transfer_retry(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int len) +{ + int i2c_retry = 0; + int ns; /* number sent */ + + while (i2c_retry++ < MAX_I2C_RETRIES) { + ns = i2c_transfer(adap, msgs, len); + if (ns == len) + break; + pr_err("%s: try %d/%d: i2c_transfer sent: %d, len %d\n", + __func__, + i2c_retry, MAX_I2C_RETRIES, ns, len); + msleep(10); + } + + return ns == len ? 0 : -EIO; +} + + +static int ov9665_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + if (i2c_transfer_retry(ov9665_client->adapter, msg, 1) < 0) { + pr_info("ov9665_i2c_txdata failed\n"); + return -EIO; + } + + return 0; +} + +static int ov9665_i2c_write(unsigned short saddr, + unsigned char waddr, unsigned char wdata, + enum ov9665_width width) +{ + int rc = -EIO; + unsigned char buf[4]; + memset(buf, 0, sizeof(buf)); + switch (width) { + case WORD_LEN:{ + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = (wdata & 0xFF00) >> 8; + buf[3] = (wdata & 0x00FF); + + rc = ov9665_i2c_txdata(saddr, buf, 4); + } + break; + + case BYTE_LEN:{ + buf[0] = waddr; + buf[1] = wdata; + rc = ov9665_i2c_txdata(saddr, buf, 2); + } + break; + + default: + break; + } + + if (rc < 0) + pr_info("i2c_write failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int ov9665_i2c_write_table(struct ov9665_i2c_reg_conf const + *reg_conf_tbl, int num_of_items_in_table) +{ + int i; + int rc = -EIO; + + for (i = 0; i < num_of_items_in_table; i++) { + rc = ov9665_i2c_write(ov9665_client->addr, + reg_conf_tbl->waddr, reg_conf_tbl->wdata, + reg_conf_tbl->width); + if (rc < 0) + break; + if (reg_conf_tbl->mdelay_time != 0) + mdelay(reg_conf_tbl->mdelay_time); + reg_conf_tbl++; + } + + return rc; +} + +static int ov9665_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 1, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer_retry(ov9665_client->adapter, msgs, 2) < 0) { + pr_info("ov9665_i2c_rxdata failed!\n"); + return -EIO; + } + + return 0; +} + + +static int ov9665_i2c_read(unsigned short saddr, + unsigned short raddr, unsigned char *rdata) +{ + int rc = 0; + unsigned char buf[1]; + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + buf[0] = raddr; + rc = ov9665_i2c_rxdata(saddr, buf,1); + if (rc < 0) + return rc; + *rdata = buf[0]; + if (rc < 0) + pr_info("ov9665_i2c_read failed!\n"); + + return rc; +} + + +static int ov9665_i2c_write_mask( + unsigned char addr,unsigned char Data, unsigned char Mask) +{ + int rc = 0; + unsigned char temp; + rc = ov9665_i2c_read(ov9665_client->addr, addr, &temp); + if(rc < 0){ + pr_err("ov9665 error : read i2c error\n"); + return rc; + } + temp = (temp&(~Mask))|Data; + rc = ov9665_i2c_write(ov9665_client->addr, addr, temp, BYTE_LEN); + if(rc < 0){ + pr_err("ov9665 error : write i2c error\n"); + return rc; + } + return rc; +} + + +static int ov9665_pwd(const struct msm_camera_sensor_info *info){ + int rc=0; + rc = gpio_request(info->sensor_pwd, "ov9665"); + if (!rc) + gpio_direction_output(info->sensor_pwd, 0); + else + pr_err("GPIO(%d) request faile",info->sensor_pwd); + gpio_free(info->sensor_pwd); + /*for 2nd camera 2nd source*/ + /*main camera pwd pull down*/ + rc = gpio_request(105, "ov9665"); + if (!rc) + gpio_direction_output(105, 0); + else + pr_err("GPIO(105) request faile"); + gpio_free(105); + return rc; +} + +static int ov9665_reset(const struct msm_camera_sensor_info *dev) +{ + int rc = 0; + rc = gpio_request(dev->sensor_reset, "ov9665"); + + if (!rc) + rc = gpio_direction_output(dev->sensor_reset, 1); + else + pr_err("GPIO(%d) request faile",dev->sensor_reset); + + gpio_free(dev->sensor_reset); + return rc; +} + +static int ov9665_reg_init(void) +{ + int rc = 0; + rc = ov9665_i2c_write_table(&ov9665_regs.register_init[0], + ov9665_regs.register_init_size); + return rc; +} + +static int ov9665_init_done(const struct msm_camera_sensor_info *info) +{ + int rc = 0; + rc = gpio_request(info->sensor_pwd, "ov9665"); + if (!rc) + gpio_direction_output(info->sensor_pwd, 1); + else + pr_err("GPIO(%d) request faile",info->sensor_pwd); + gpio_free(info->sensor_pwd); + return rc; +} + + +static int ov9665_set_sensor_mode(int mode) +{ + unsigned char shading, gain; + switch (mode) { + case SENSOR_PREVIEW_MODE: + op_mode = SENSOR_PREVIEW_MODE; + pr_info("ov9665:sensor set mode: preview\n"); + ov9665_i2c_write(ov9665_client->addr, 0x63, 0x00, BYTE_LEN); + /*Windowing*/ + ov9665_i2c_write(ov9665_client->addr, 0x12, 0x40, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x4d, 0x09, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x17, 0x0c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x18, 0x5d, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x19, 0x02, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x1a, 0x3f, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x03, 0x83, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x32, 0xad, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x2b, 0x00, BYTE_LEN); + /*scaling*/ + ov9665_i2c_write(ov9665_client->addr, 0x64, 0xa4, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xab, 0xe7, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xb9, 0x50, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xba, 0x3c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbb, 0x50, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbc, 0x3c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x85, 0xe7, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x0d, 0x92, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x0d, 0x90, BYTE_LEN); + /*enable 3A*/ + ov9665_i2c_write(ov9665_client->addr, 0x13, 0xe7, BYTE_LEN); + ov9665_set_wb(current_wb); + /*VGA 30fps*/ + ov9665_i2c_write(ov9665_client->addr, 0x11, 0x80, BYTE_LEN); + //ov9665_i2c_write(ov9665_client->addr, 0x09, 0x01, BYTE_LEN); + mdelay(400);/*skip 2 break frame */ + break; + + case SENSOR_SNAPSHOT_MODE: + pr_info("ov9665:sensor set mode: snapshot\n"); + op_mode = SENSOR_SNAPSHOT_MODE; + /*disable 3A*/ + ov9665_i2c_write(ov9665_client->addr, 0x13, 0xe0, BYTE_LEN); + /*SXGA 7.5fps*/ + ov9665_i2c_write(ov9665_client->addr, 0x11, 0x81, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x63, 0x01, BYTE_LEN); + /*Windowing*/ + ov9665_i2c_write(ov9665_client->addr, 0x12, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x4d, 0x11, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x17, 0x0c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x18, 0x5d, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x19, 0x01, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x1a, 0x82, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x03, 0x83, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x32, 0x24, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x2b, 0x00, BYTE_LEN); + /*scaling*/ + ov9665_i2c_write(ov9665_client->addr, 0x64, 0x24, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xab, 0xe7, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xb9, 0xa0, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xba, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbb, 0xa0, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbc, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x85, 0xe7, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x0d, 0x82, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0x0d, 0x80, BYTE_LEN); + mdelay(400); /*wait for AE stable*/ + break; + case SENSOR_GET_EXP: + ov9665_i2c_read(ov9665_client->addr, 0x83, &shading); + ov9665_i2c_read(ov9665_client->addr, 0x00, &gain); + if (gain >= 0x36) //5.5xgain + ov9665_i2c_write + (ov9665_client->addr, 0x83, 0x06, BYTE_LEN); + else if (gain < 0x34) //5x gain + ov9665_i2c_write + (ov9665_client->addr, 0x83, 0x07, BYTE_LEN); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ov9665_set_effect(int effect) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + switch (effect) { + case CAMERA_EFFECT_OFF: + #if 1/*color matrix*/ + ov9665_i2c_write(ov9665_client->addr, 0xbd, 0x04, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbe, 0x1f, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbf, 0x03, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc0, 0x0d, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc1, 0x24, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc2, 0x30, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc3, 0x34, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc4, 0x34, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc5, 0x01, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc6, 0x9c, BYTE_LEN); + #endif + #if 0/*control by SDE*/ + ov9665_i2c_write(ov9665_client->addr, 0xc7, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc8, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xcd, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xce, 0x80, BYTE_LEN); + #endif + break; + + case CAMERA_EFFECT_MONO: + #if 1/*color matrix*/ + ov9665_i2c_write(ov9665_client->addr, 0xbd, 0x09, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbe, 0x12, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbf, 0x03, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc0, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc1, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc2, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc3, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc4, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc5, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc6, 0x00, BYTE_LEN); + #endif + #if 0/*control by SDE*/ + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x18, 0x18); + ov9665_i2c_write(ov9665_client->addr, 0xCD, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xCE, 0x80, BYTE_LEN); + #endif + break; + + case CAMERA_EFFECT_NEGATIVE: + #if 0/*control by SDE*/ + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x40, 0x58); + /*ov9665_i2c_write(ov9665_client->addr, 0xC7, 0x90, BYTE_LEN);*/ + #endif + break; + + case CAMERA_EFFECT_SEPIA: + #if 1/*user color matrix*/ + ov9665_i2c_write(ov9665_client->addr, 0xbd, 0x09, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbe, 0x09, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xbf, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc0, 0x06, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc1, 0x08, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc2, 0x07, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc3, 0x0a, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc4, 0x04, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc5, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc6, 0x98, BYTE_LEN); + #endif + #if 0/*control by SDE*/ + ov9665_i2c_write(ov9665_client->addr, 0xc7, 0x90, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xc8, 0x18, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xCD, 0x40, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, 0xCE, 0xa0, BYTE_LEN); + #endif + break; + default: + return -EINVAL; + } + return 0; +} + + +static int ov9665_set_antibanding(enum antibanding_mode antibanding_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + switch (antibanding_value) { + case CAMERA_ANTI_BANDING_50HZ: + ov9665_i2c_write_mask(0x0c, 0x00, 0x02); + ov9665_i2c_write_mask(0x0c, 0x04, 0x04); + break; + case CAMERA_ANTI_BANDING_60HZ: + ov9665_i2c_write_mask(0x0c, 0x00, 0x02); + ov9665_i2c_write_mask(0x0c, 0x00, 0x04); + break; + case CAMERA_ANTI_BANDING_AUTO: + ov9665_i2c_write_mask(0x0c, 0x02, 0x02); + break; + } + return 0; +} + + +static int ov9665_set_brightness(enum brightness_t brightness_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + ov9665_i2c_write_mask(0xc8, 0x04, 0x04); + switch (brightness_value) { + case CAMERA_BRIGHTNESS_N4: + ov9665_i2c_write_mask(0xc7, 0x18, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x40, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_N3: + ov9665_i2c_write_mask(0xc7, 0x18, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x30, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_N2: + ov9665_i2c_write_mask(0xc7, 0x18, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x20, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_N1: + ov9665_i2c_write_mask(0xc7, 0x18, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x10, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_D: + ov9665_i2c_write_mask(0xc7, 0x10, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x00, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_P1: + ov9665_i2c_write_mask(0xc7, 0x10, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x10, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_P2: + ov9665_i2c_write_mask(0xc7, 0x10, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x20, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_P3: + ov9665_i2c_write_mask(0xc7, 0x10, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x30, BYTE_LEN); + break; + case CAMERA_BRIGHTNESS_P4: + ov9665_i2c_write_mask(0xc7, 0x10, 0x18); + ov9665_i2c_write(ov9665_client->addr, + 0xd1, 0x40, BYTE_LEN); + break; + default: + break; + } + return 0; +} + + +static int ov9665_set_wb(enum wb_mode wb_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + switch (wb_value) { + case CAMERA_AWB_AUTO: + ov9665_i2c_write(ov9665_client->addr, + 0x13, 0xe7, BYTE_LEN); + break; + case CAMERA_AWB_CLOUDY: + ov9665_i2c_write(ov9665_client->addr, + 0x13, 0xe5, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x01, 0x40, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x02, 0x68, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x16, 0x46, BYTE_LEN); + break; + case CAMERA_AWB_INDOOR_HOME: + ov9665_i2c_write(ov9665_client->addr, + 0x13, 0xe5, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x01, 0x62, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x02, 0x3c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x16, 0x41, BYTE_LEN); + break; + case CAMERA_AWB_INDOOR_OFFICE: + ov9665_i2c_write(ov9665_client->addr, + 0x13, 0xe5, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x01, 0x50, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x02, 0x40, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x16, 0x40, BYTE_LEN); + break; + case CAMERA_AWB_SUNNY: + ov9665_i2c_write(ov9665_client->addr, + 0x13, 0xe5, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x01, 0x35, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x02, 0x52, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x16, 0x40, BYTE_LEN); + break; + default: + break; + } + current_wb = wb_value; + return 0; +} + + +static int ov9665_set_sharpness(enum sharpness_mode sharpness_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + ov9665_i2c_write_mask(0xab, 0x04, 0x04); + switch (sharpness_value) { + case CAMERA_SHARPNESS_X0: + ov9665_i2c_write(ov9665_client->addr, + 0xad, 0x20, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xd9, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xda, 0x00, BYTE_LEN); + break; + case CAMERA_SHARPNESS_X1: + ov9665_i2c_write(ov9665_client->addr, + 0xad, 0x22, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xd9, 0x01, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xda, 0x00, BYTE_LEN); + break; + case CAMERA_SHARPNESS_X2: + ov9665_i2c_write(ov9665_client->addr, + 0xad, 0x24, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xd9, 0x13, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xda, 0x00, BYTE_LEN); + break; + case CAMERA_SHARPNESS_X3: + ov9665_i2c_write(ov9665_client->addr, + 0xad, 0x28, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xd9, 0x26, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xda, 0x22, BYTE_LEN); + break; + case CAMERA_SHARPNESS_X4: + ov9665_i2c_write(ov9665_client->addr, + 0xad, 0x2c, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xd9, 0x6a, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xda, 0x66, BYTE_LEN); + break; + default: + break; + } + return 0; +} + + +static int ov9665_set_saturation(enum saturation_mode saturation_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + switch (saturation_value) { + case CAMERA_SATURATION_X0: + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x02, 0x02); + ov9665_i2c_write(ov9665_client->addr, + 0xcb, 0x00, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xcc, 0x00, BYTE_LEN); + break; + case CAMERA_SATURATION_X05: + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x02, 0x02); + ov9665_i2c_write(ov9665_client->addr, + 0xcb, 0x20, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xcc, 0x20, BYTE_LEN); + break; + case CAMERA_SATURATION_X1: + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x02, 0x02); + ov9665_i2c_write(ov9665_client->addr, + 0xcb, 0x40, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xcc, 0x40, BYTE_LEN); + break; + case CAMERA_SATURATION_X15: + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x02, 0x02); + ov9665_i2c_write(ov9665_client->addr, + 0xcb, 0x60, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xcc, 0x60, BYTE_LEN); + break; + case CAMERA_SATURATION_X2: + ov9665_i2c_write_mask(0xc7, 0x10, 0x10); + ov9665_i2c_write_mask(0xc8, 0x02, 0x02); + ov9665_i2c_write(ov9665_client->addr, + 0xcb, 0x80, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0xcc, 0x80, BYTE_LEN); + break; + default: + break; + } + return 0; +} + +static int ov9665_set_contrast(enum contrast_mode contrast_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + ov9665_i2c_write_mask(0xc7, 0x34, 0x34); + ov9665_i2c_write_mask(0xc8, 0x04, 0x04); + ov9665_i2c_write_mask(0x64, 0x02, 0x02); + switch (contrast_value) { + case CAMERA_CONTRAST_N2: + ov9665_i2c_write(ov9665_client->addr, + 0xd0, 0x08, BYTE_LEN); + break; + case CAMERA_CONTRAST_N1: + ov9665_i2c_write(ov9665_client->addr, + 0xd0, 0x10, BYTE_LEN); + break; + case CAMERA_CONTRAST_D: + ov9665_i2c_write(ov9665_client->addr, + 0xd0, 0x20, BYTE_LEN); + break; + case CAMERA_CONTRAST_P1: + ov9665_i2c_write(ov9665_client->addr, + 0xd0, 0x30, BYTE_LEN); + break; + case CAMERA_CONTRAST_P2: + ov9665_i2c_write(ov9665_client->addr, + 0xd0, 0x40, BYTE_LEN); + break; + default: + break; + } + return 0; +} + +static int ov9665_set_front_camera_mode(enum frontcam_t frontcam_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + switch (frontcam_value) { + case CAMERA_MIRROR: + /*mirror and flip*/ + ov9665_i2c_write(ov9665_client->addr, + 0x04, 0xa8, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x33, 0xc8, BYTE_LEN); + break; + case CAMERA_REVERSE: + /*reverse mode*/ + ov9665_i2c_write(ov9665_client->addr, + 0x04, 0x28, BYTE_LEN); + ov9665_i2c_write(ov9665_client->addr, + 0x33, 0xc0, BYTE_LEN); + break; + default: + break; + } + return 0; +} + +static int ov9665_sensor_init(const struct msm_camera_sensor_info *data) +{ + uint8_t model_id_h = 0,model_id_l = 0; + uint16_t model_id; + int rc = 0; + pr_info("ov9665_sensor_init_probe \n"); + /* Read the Model ID of the sensor */ + rc = ov9665_i2c_read(ov9665_client->addr, + REG_OV9665_MODEL_ID_H, &model_id_h); + if (rc < 0) + goto init_probe_fail; + + rc = ov9665_i2c_read(ov9665_client->addr, + REG_OV9665_MODEL_ID_L, &model_id_l); + if (rc < 0) + goto init_probe_fail; + model_id = (((model_id_h << 8) & 0xFF00) +(model_id_l)); + pr_info("ov9665: model_id = 0x%x\n", model_id); + /* Check if it matches it with the value in Datasheet */ + if (model_id != OV9665_MODEL_ID) { + rc = -EINVAL; + goto init_probe_fail; + } + + + return rc; +init_probe_fail: + return rc; +} + +int ov9665_sensor_open_init(struct msm_camera_sensor_info *data) +{ + int rc = 0; + ov9665_ctrl = kzalloc(sizeof(struct ov9665_ctrl), GFP_KERNEL); + if (!ov9665_ctrl) { + pr_info("ov9665_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + + if (data == NULL) { + pr_err("%s sensor data is NULL\n", __func__); + return -EINVAL; + } + ov9665_ctrl->sensordata = data; + /*switch PCLK and MCLK to 2nd cam*/ + pr_info("ov9665: ov9665_sensor_probe switch clk\n"); + if(data->camera_clk_switch != NULL) + data->camera_clk_switch(); + + /* Config power down */ + rc = gpio_request(data->sensor_pwd, "ov9665"); + if (!rc) + gpio_direction_output(data->sensor_pwd, 0); + else + pr_info("GPIO(%d) request faile",data->sensor_pwd); + gpio_free(data->sensor_pwd); + mdelay(3); + /* Input MCLK = 24MHz */ + msm_camio_clk_rate_set(24000000); + mdelay(5); + + /* Config reset */ + rc = gpio_request(data->sensor_reset, "ov9665"); + if (!rc) + gpio_direction_output(data->sensor_reset, 1); + else + pr_info("GPIO(%d) request faile", data->sensor_reset); + gpio_free(data->sensor_reset); + mdelay(20); + msm_camio_camif_pad_reg_reset(); + + rc = ov9665_i2c_write_table(&ov9665_regs.plltbl[0], + ov9665_regs.plltbl_size); + + /*read ID*/ + rc = ov9665_sensor_init(data); + if (rc < 0) { + pr_info("ov9665_sensor_init failed!\n"); + goto init_fail; + } + /*set initial register*/ + rc = ov9665_reg_init(); + if (rc < 0) { + pr_info("ov9665_sensor_reg_init failed!\n"); + goto init_fail; + } +init_done: + return rc; + +init_fail: + /* remove free ov9665_ctrl to prevent kernel panic in sensor release */ + pr_info("ov9665_sensor_open_init failed\n"); + return rc; +} + +static int ov9665_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&ov9665_wait_queue); + return 0; +} + +int ov9665_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cfg_data; + long rc = 0; + if (copy_from_user(&cfg_data, + (void *)argp, sizeof(struct sensor_cfg_data))) + return -EFAULT; + + switch (cfg_data.cfgtype) { + case CFG_SET_MODE: + rc = ov9665_set_sensor_mode(cfg_data.mode); + break; + + case CFG_SET_EFFECT: + rc = ov9665_set_effect(cfg_data.cfg.effect); + if(rc < 0) + return rc; + break; + + case CFG_SET_ANTIBANDING: + rc = ov9665_set_antibanding + (cfg_data.cfg.antibanding_value); + break; + case CFG_SET_BRIGHTNESS: + rc = ov9665_set_brightness + (cfg_data.cfg.brightness_value); + break; + case CFG_SET_WB: + rc = ov9665_set_wb(cfg_data.cfg.wb_value); + break; + case CFG_SET_SHARPNESS: + rc = ov9665_set_sharpness + (cfg_data.cfg.sharpness_value); + break; + case CFG_SET_SATURATION: + rc = ov9665_set_saturation + (cfg_data.cfg.saturation_value); + break; + case CFG_SET_CONTRAST: + rc = ov9665_set_contrast(cfg_data.cfg.contrast_value); + break; + case CFG_SET_FRONT_CAMERA_MODE: + rc = ov9665_set_front_camera_mode(cfg_data.cfg.frontcam_value); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int ov9665_sensor_release(void) +{ + int rc = 0; + down(&ov9665_sem); + + rc = gpio_request(ov9665_ctrl->sensordata->sensor_pwd, "ov9665"); + if (!rc) + gpio_direction_output(ov9665_ctrl->sensordata->sensor_pwd, 1); + else + pr_info("GPIO(%d) request faile", + ov9665_ctrl->sensordata->sensor_pwd); + gpio_free(ov9665_ctrl->sensordata->sensor_pwd); + + if (ov9665_ctrl->sensordata->camera_get_source() == SECOND_SOURCE) { + rc = gpio_request( + ov9665_ctrl->sensordata->sensor_reset, "ov9665"); + if (!rc) + gpio_direction_output( + ov9665_ctrl->sensordata->sensor_reset, 0); + else + pr_info("GPIO(%d) request faile", + ov9665_ctrl->sensordata->sensor_reset); + gpio_free(ov9665_ctrl->sensordata->sensor_reset); + } + + + if (ov9665_ctrl) { + kfree(ov9665_ctrl); + ov9665_ctrl = NULL; + } + + up(&ov9665_sem); + + return rc; +} + +static const char *Ov9665Vendor = "OmniVision"; +static const char *Ov9665NAME = "ov9665"; +static const char *Ov9665Size = "1M"; +static uint32_t htcwc_value; + +static ssize_t sensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", Ov9665Vendor, Ov9665NAME, Ov9665Size); + ret = strlen(buf) + 1; + + return ret; +} + +static ssize_t htcwc_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", htcwc_value); + return length; +} + +static ssize_t htcwc_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + + tmp = buf[0] - 0x30; /* only get the first char */ + +#if 0 + if (strcmp(current->comm,"com.android.camera")!=0){ + pr_info("No permission : not camera ap\n"); + return -EINVAL; + } +#endif + + htcwc_value = tmp; + //pr_info("current_comm = %s\n", current->comm); + pr_info("htcwc_value = %d\n", htcwc_value); + return count; +} + +static ssize_t sensor_read_node(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", sensor_probe_node); + return length; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); +static DEVICE_ATTR(htcwc, 0777, htcwc_get, htcwc_set); +static DEVICE_ATTR(node, 0444, sensor_read_node, NULL); + +static struct kobject *android_ov9665; + +static int ov9665_sysfs_init(void) +{ + int ret ; + pr_info("ov9665:kobject creat and add\n"); + android_ov9665 = kobject_create_and_add("android_camera2", NULL); + if (android_ov9665 == NULL) { + pr_info("ov9665_sysfs_init: subsystem_register " \ + "failed\n"); + ret = -ENOMEM; + return ret ; + } + pr_info("ov9665:sysfs_create_file\n"); + ret = sysfs_create_file(android_ov9665, &dev_attr_sensor.attr); + if (ret) { + pr_info("ov9665_sysfs_init: sysfs_create_file " \ + "failed\n"); + kobject_del(android_ov9665); + } + + ret = sysfs_create_file(android_ov9665, &dev_attr_htcwc.attr); + if (ret) { + pr_info("ov9665_sysfs_init: sysfs_create_file htcwc failed\n"); + kobject_del(android_ov9665); + } + + ret = sysfs_create_file(android_ov9665, &dev_attr_node.attr); + if (ret) { + pr_info("ov9665_sysfs_init: dev_attr_node failed\n"); + kobject_del(android_ov9665); + } + + return 0 ; +} + + +static int ov9665_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + rc = -ENOTSUPP; + goto probe_failure; + } + + ov9665_sensorw = kzalloc(sizeof(struct ov9665_work), GFP_KERNEL); + + if (!ov9665_sensorw) { + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, ov9665_sensorw); + ov9665_init_client(client); + ov9665_client = client; + + pr_info("ov9665_probe succeeded!\n"); + + return 0; + +probe_failure: + kfree(ov9665_sensorw); + ov9665_sensorw = NULL; + pr_info("ov9665_probe failed!\n"); + return rc; +} + +static const struct i2c_device_id ov9665_i2c_id[] = { + {"ov9665", 0}, + {}, +}; + +static struct i2c_driver ov9665_i2c_driver = { + .id_table = ov9665_i2c_id, + .probe = ov9665_i2c_probe, + .remove = __exit_p(ov9665_i2c_remove), + .driver = { + .name = "ov9665", + }, +}; + +static int ov9665_sensor_probe(struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = i2c_add_driver(&ov9665_i2c_driver); + if (rc < 0 || ov9665_client == NULL) { + rc = -ENOTSUPP; + goto probe_done; + } + + pr_info("ov9665 s->node %d\n", s->node); + sensor_probe_node = s->node; + + /*switch clk source*/ + pr_info("ov9665: ov9665_sensor_probe switch clk\n"); + if(info->camera_clk_switch != NULL) + info->camera_clk_switch(); + + + /* Config power down */ + if(ov9665_pwd(info)<0) + goto probe_fail; + mdelay(3); + + /*Config reset */ + if(ov9665_reset(info)<0) + goto probe_fail; + mdelay(5); + + /*MCLK enable*/ + pr_info("ov9665: MCLK enable clk\n"); + msm_camio_clk_rate_set(24000000); + mdelay(100); + + /* PLL Setup Start */ + rc = ov9665_i2c_write_table(&ov9665_regs.plltbl[0], + ov9665_regs.plltbl_size); + + rc = ov9665_sensor_init(info); + if (rc < 0) + goto probe_fail; + /*set initial register*/ + rc = ov9665_reg_init(); + if (rc < 0) + goto probe_fail; + if (info->camera_main_set_probe != NULL) + info->camera_main_set_probe(true); + + s->s_init = ov9665_sensor_open_init; + s->s_release = ov9665_sensor_release; + s->s_config = ov9665_sensor_config; + + /*init done*/ + mdelay(800); + ov9665_init_done(info); + ov9665_sysfs_init(); + +probe_done: + pr_info("%s %s:%d\n", __FILE__, __func__, __LINE__); + return rc; +probe_fail: + pr_err("OV9665 probe faile\n"); + return rc; + +} + +static int __ov9665_probe(struct platform_device *pdev) +{ + int rc; + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + + if (sdata->camera_main_get_probe != NULL) { + if (sdata->camera_main_get_probe()) { + pr_info("__s5k6aafx_probe camera main get probed already.\n"); + return 0; + } + } + return msm_camera_drv_start(pdev, ov9665_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __ov9665_probe, + .driver = { + .name = "msm_camera_ov9665", + .owner = THIS_MODULE, + }, +}; + +static int __init ov9665_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(ov9665_init); diff --git a/drivers/media/video/msm/ov9665.h b/drivers/media/video/msm/ov9665.h new file mode 100644 index 0000000000000..e399b86b6bbbf --- /dev/null +++ b/drivers/media/video/msm/ov9665.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef MT9D112_H +#define MT9D112_H + +#include +#include + +extern struct ov9665_reg ov9665_regs; + +enum ov9665_width { + WORD_LEN, + BYTE_LEN +}; + +struct ov9665_i2c_reg_conf { + unsigned short waddr; + unsigned short wdata; + enum ov9665_width width; + unsigned short mdelay_time; +}; + +struct ov9665_reg { + const struct ov9665_i2c_reg_conf *register_init; + uint16_t register_init_size; + const struct register_address_value_pair *prev_snap_reg_settings; + uint16_t prev_snap_reg_settings_size; + const struct register_address_value_pair *noise_reduction_reg_settings; + uint16_t noise_reduction_reg_settings_size; + const struct ov9665_i2c_reg_conf *plltbl; + uint16_t plltbl_size; + const struct ov9665_i2c_reg_conf *stbl; + uint16_t stbl_size; + const struct ov9665_i2c_reg_conf *rftbl; + uint16_t rftbl_size; +}; + +#endif /* MT9D112_H */ diff --git a/drivers/media/video/msm/ov9665_reg.c b/drivers/media/video/msm/ov9665_reg.c new file mode 100644 index 0000000000000..46cd4357717b5 --- /dev/null +++ b/drivers/media/video/msm/ov9665_reg.c @@ -0,0 +1,210 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "ov9665.h" + + +static const struct ov9665_i2c_reg_conf const pll_setup_tbl[] = { + {0x3E, 0xD0, BYTE_LEN, 5}, + {0x3E, 0xD0, BYTE_LEN, 5}, + {0x12, 0x80, BYTE_LEN, 5} +}; + +/* sensor register init*/ +static const struct ov9665_i2c_reg_conf const register_init_tbl[] = { + /*sensor reset*/ + {0x12, 0x80, BYTE_LEN, 1}, + /*sensor IO output*/ + {0xd5, 0xff, BYTE_LEN, 1}, + {0xd6, 0x3f, BYTE_LEN, 1}, + /*Clock 24MHz 10FPS*/ + {0x3d, 0x3c, BYTE_LEN, 1}, + {0x11, 0x80, BYTE_LEN, 1},/*0x81 24MHz,0x80 48MHz*/ + {0x2a, 0x00, BYTE_LEN, 1}, + {0x2b, 0x00, BYTE_LEN, 1}, + /*power control*/ + {0x3a, 0xd9, BYTE_LEN, 1}, + {0x3b, 0x00, BYTE_LEN, 1}, + {0x3c, 0x58, BYTE_LEN, 1}, + {0x3e, 0x50, BYTE_LEN, 1}, + {0x71, 0x00, BYTE_LEN, 1}, + /*driving strengh*/ + {0x09, 0x03, BYTE_LEN, 1}, + /*Data Format YUV*/ + {0xD7, 0x10, BYTE_LEN, 1}, + {0x6a, 0x24, BYTE_LEN, 1}, + {0x85, 0xe7, BYTE_LEN, 1}, + /*sample option*/ + {0x63, 0x01, BYTE_LEN, 1}, + /*Windowing*/ + {0x17, 0x0c, BYTE_LEN, 1}, + {0x18, 0x5c, BYTE_LEN, 1}, + {0x19, 0x01, BYTE_LEN, 1}, + {0x1a, 0x82, BYTE_LEN, 1}, + {0x03, 0x0f, BYTE_LEN, 1}, + {0x2b, 0x00, BYTE_LEN, 1}, + {0x32, 0x34, BYTE_LEN, 1}, + /*BLC*/ + {0x36, 0xb4, BYTE_LEN, 1}, + {0x65, 0x10, BYTE_LEN, 1}, + {0x70, 0x02, BYTE_LEN, 1}, + {0x71, 0x9c, BYTE_LEN, 1}, + {0x72, 0xc0, BYTE_LEN, 1}, /*For preview greenish in lowlight Weiting*/ + {0x64, 0x24, BYTE_LEN, 1}, + /*AEC ,Average ,9 zone*/ + {0x43, 0x00, BYTE_LEN, 1}, + {0x5d, 0x55, BYTE_LEN, 1}, + {0x5e, 0x57, BYTE_LEN, 1}, + {0x5f, 0x21, BYTE_LEN, 1}, + /*Brightness*/ + {0x24, 0x40, BYTE_LEN, 1}, /*upper bc 35ori 39*/ + {0x25, 0x35, BYTE_LEN, 1}, /*lower bc 2aori 2e*/ + {0x26, 0x82, BYTE_LEN, 1}, + /*BF 60Hz*/ + /*0x48 for 8xgain 28 for 4xgain 68for 16xgain*/ + {0x14, 0x48, BYTE_LEN, 1}, + {0x0c, 0x38, BYTE_LEN, 1}, + {0x4f, 0x9e, BYTE_LEN, 1}, + {0x50, 0x84, BYTE_LEN, 1}, + {0x5a, 0x67, BYTE_LEN, 1}, + /*LC enable*/ + {0x7d, 0x00, BYTE_LEN, 1}, + {0x7e, 0xa0, BYTE_LEN, 1}, + {0x7f, 0x00, BYTE_LEN, 1}, + {0x80, 0x09, BYTE_LEN, 1}, + {0x81, 0x0a, BYTE_LEN, 1}, + {0x82, 0x09, BYTE_LEN, 1}, + {0x83, 0x07, BYTE_LEN, 1}, /*07 enable LC 06 disable*/ + /*AWB advance*/ + {0x96, 0xf0, BYTE_LEN, 1}, + {0x97, 0x0a, BYTE_LEN, 1}, + {0x92, 0x17, BYTE_LEN, 1}, + {0x94, 0x38, BYTE_LEN, 1}, + {0x93, 0x33, BYTE_LEN, 1}, + {0x95, 0x49, BYTE_LEN, 1}, + {0x91, 0xd8, BYTE_LEN, 1}, + {0x90, 0xdf, BYTE_LEN, 1}, + {0x8e, 0x4a, BYTE_LEN, 1}, + {0x8f, 0x59, BYTE_LEN, 1}, + {0x8d, 0x12, BYTE_LEN, 1}, + {0x8c, 0x11, BYTE_LEN, 1}, + {0x8b, 0x0c, BYTE_LEN, 1}, + {0x86, 0x9e, BYTE_LEN, 1}, + {0x87, 0x11, BYTE_LEN, 1}, + {0x88, 0x22, BYTE_LEN, 1}, + {0x89, 0x05, BYTE_LEN, 1}, + {0x8a, 0x03, BYTE_LEN, 1}, + /*Gamma enable for outdoor 1228*/ + {0x9b, 0x05, BYTE_LEN, 1}, /*ori0x08 htc0d*/ + {0x9c, 0x10, BYTE_LEN, 1}, /*ori0x16 htc19*/ + {0x9d, 0x28, BYTE_LEN, 1}, /*ori0x2f htc2e*/ + {0x9e, 0x51, BYTE_LEN, 1}, /*ori0x56 htc51*/ + {0x9f, 0x60, BYTE_LEN, 1}, /*ori0x66 htc60*/ + {0xa0, 0x6c, BYTE_LEN, 1}, /*ori0x75 htc6c*/ + {0xa1, 0x77, BYTE_LEN, 1}, /*ori0x80 htc77*/ + {0xa2, 0x81, BYTE_LEN, 1}, /*ori0x88 htc81*/ + {0xa3, 0x8a, BYTE_LEN, 1}, /*ori0x8f htc8a*/ + {0xa4, 0x93, BYTE_LEN, 1}, /*ori0x96 htc93*/ + {0xa5, 0xa1, BYTE_LEN, 1}, /*ori0xa3 htca1*/ + {0xa6, 0xae, BYTE_LEN, 1}, /*ori0xaf htcae*/ + {0xa7, 0xc4, BYTE_LEN, 1}, /*ori0xc4 htcc4*/ + {0xa8, 0xd6, BYTE_LEN, 1}, /*ori0xd7 htcd6*/ + {0xa9, 0xe7, BYTE_LEN, 1}, /*ori0xe8 htce7*/ + {0xaa, 0x21, BYTE_LEN, 1}, /*ori0x20 htc21*/ + /*De-noise enable auto*/ + {0xab, 0xe7, BYTE_LEN, 1}, + {0xb0, 0x43, BYTE_LEN, 1}, + {0xac, 0x04, BYTE_LEN, 1}, + {0x84, 0x80, BYTE_LEN, 1}, // For stronger de-noise ori0x50 + /*Sharpness*/ + {0xad, 0x24, BYTE_LEN, 1}, //Sharpness of 0-2xgain ori0x22 + {0xd9, 0x13, BYTE_LEN, 1}, //Sharpness of 2-4,4-8xgain ori0x64 + {0xda, 0x00, BYTE_LEN, 1}, //Sharpness of >8xgain ori0xa8 + {0xae, 0x10, BYTE_LEN, 1}, + /*Scaling*/ + {0xab, 0xe7, BYTE_LEN, 1}, + {0xb9, 0xa0, BYTE_LEN, 1}, + {0xba, 0x80, BYTE_LEN, 1}, + {0xbb, 0xa0, BYTE_LEN, 1}, + {0xbc, 0x80, BYTE_LEN, 1}, + /*CMX*/ + {0xbd, 0x04, BYTE_LEN, 1}, /*0x08 unit0a*/ + {0xbe, 0x1f, BYTE_LEN, 1}, /*0x19 unit12*/ + {0xbf, 0x03, BYTE_LEN, 1}, /*0x02 unit03*/ + {0xc0, 0x0d, BYTE_LEN, 1}, /*0x05 unit05 06*/ + {0xc1, 0x24, BYTE_LEN, 1}, /*0x28 unit0b 2c*/ + {0xc2, 0x30, BYTE_LEN, 1}, /*0x2e unit10 33*/ + {0xc3, 0x34, BYTE_LEN, 1}, /*0x27 unit10 2b*/ + {0xc4, 0x34, BYTE_LEN, 1}, /*0x26 unit0d 2a*/ + {0xc5, 0x01, BYTE_LEN, 1}, /*0x00 unit03*/ + {0xc6, 0x9c, BYTE_LEN, 1}, /*0x98 unit98*/ + {0xc7, 0x18, BYTE_LEN, 1}, /*0x18 unit98*/ + {0x69, 0x48, BYTE_LEN, 1}, + /*UV ave*/ + {0x74, 0xc0, BYTE_LEN, 1}, + /*SAT & Brightness*/ + {0xc7, 0x18, BYTE_LEN, 1}, + {0xc8, 0x06, BYTE_LEN, 1}, + {0xcb, 0x40, BYTE_LEN, 1}, + {0xcc, 0x40, BYTE_LEN, 1}, + {0xcf, 0x00, BYTE_LEN, 1}, + {0xd0, 0x20, BYTE_LEN, 1}, + {0xd1, 0x00, BYTE_LEN, 1}, + /*BLC*/ + {0x0d, 0x82, BYTE_LEN, 1}, + {0x0d, 0x80, BYTE_LEN, 1}, + #if 1 + /*UV adjustment*/ + {0xd2, 0x80, BYTE_LEN, 1}, + {0x7c, 0x18, BYTE_LEN, 1}, + {0x65, 0x01, BYTE_LEN, 1}, + {0x66, 0x00, BYTE_LEN, 1}, + {0x41, 0xa0, BYTE_LEN, 1}, + {0x5b, 0x08, BYTE_LEN, 1}, + {0x60, 0x05, BYTE_LEN, 1}, + {0x05, 0x06, BYTE_LEN, 1}, + {0x03, 0x4f, BYTE_LEN, 1}, + {0x72, 0xc0, BYTE_LEN, 1}, + #endif + /*output driving current*/ + {0x09, 0x03, BYTE_LEN, 1}, + /*H/V sync signal control*/ + {0xd8, 0xc4, BYTE_LEN, 1}, + {0x15, 0x02, BYTE_LEN, 1}, + /*night mode*/ + {0x03, 0x8f, BYTE_LEN, 1}, //Control the min fps 4f ->1/2 8f ->1/4 cf->1/8 + {0x0f, 0x4e, BYTE_LEN, 1}, + {0x06, 0x50, BYTE_LEN, 1}, //keep fps 30 when gain<4 +#if 1 + /*mirror and flip*/ + {0x04, 0xa8, BYTE_LEN, 1}, + {0x33, 0xc8, BYTE_LEN, 1} +#else + /*reverse mode*/ + {0x04, 0x28, BYTE_LEN, 1}, + {0x33, 0xc0, BYTE_LEN, 1} +#endif +}; + + +struct ov9665_reg ov9665_regs = { + .register_init = ®ister_init_tbl, + .register_init_size = ARRAY_SIZE(register_init_tbl), + .plltbl = pll_setup_tbl, + .plltbl_size = ARRAY_SIZE(pll_setup_tbl), +}; diff --git a/drivers/media/video/msm/s5k3e2fx.c b/drivers/media/video/msm/s5k3e2fx.c new file mode 100644 index 0000000000000..e548d5855f21c --- /dev/null +++ b/drivers/media/video/msm/s5k3e2fx.c @@ -0,0 +1,3566 @@ +/* + * Copyright (C) 2008-2009 QUALCOMM Incorporated. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static uint16_t g_usModuleVersion; /*0: rev.4, 1: rev.5 */ + +/* prepare for modify PCLK*/ +#define REG_PLL_MULTIPLIER_LSB_VALUE 0xA6 +/* 0xA6 for PCLK=83MHz */ +/* 0xA0 for PCLK=80MHz */ +/* 0x90 for PCLK=72MHz */ + +/* prepare for modify initial gain*/ +#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB_VALUE 0x80 + +#define S5K3E2FX_REG_MODEL_ID 0x0000 +#define S5K3E2FX_MODEL_ID 0x3E2F + +#define S5K3E2FX_REG_MODULE_VER 0x0002 + +#define S5K3E2FX_DEF_MCLK 24000000 + +#define S5K3E2FX_QTR_SIZE_WIDTH 1296 +#define S5K3E2FX_QTR_SIZE_HEIGHT 972 + +#define S5K3E2FX_FULL_SIZE_WIDTH 2608 +#define S5K3E2FX_FULL_SIZE_HEIGHT 1960 + +/* AEC_FLASHING */ +#define REG_GROUPED_PARAMETER_HOLD 0x0104 +#define GROUPED_PARAMETER_HOLD 0x01 +#define GROUPED_PARAMETER_UPDATE 0x00 + +/* PLL Registers */ +#define REG_PRE_PLL_CLK_DIV 0x0305 +#define REG_PLL_MULTIPLIER_MSB 0x0306 +#define REG_PLL_MULTIPLIER_LSB 0x0307 +#define REG_VT_PIX_CLK_DIV 0x0301 +#define REG_VT_SYS_CLK_DIV 0x0303 +#define REG_OP_PIX_CLK_DIV 0x0309 +#define REG_OP_SYS_CLK_DIV 0x030B + +/* Data Format Registers */ +#define REG_CCP_DATA_FORMAT_MSB 0x0112 +#define REG_CCP_DATA_FORMAT_LSB 0x0113 + +/* Output Size */ +#define REG_X_OUTPUT_SIZE_MSB 0x034C +#define REG_X_OUTPUT_SIZE_LSB 0x034D +#define REG_Y_OUTPUT_SIZE_MSB 0x034E +#define REG_Y_OUTPUT_SIZE_LSB 0x034F + +/* Binning */ +#define REG_X_EVEN_INC 0x0381 +#define REG_X_ODD_INC 0x0383 +#define REG_Y_EVEN_INC 0x0385 +#define REG_Y_ODD_INC 0x0387 +/*Reserved register */ +#define REG_BINNING_ENABLE 0x3014 + +/* Frame Fotmat */ +#define REG_FRAME_LENGTH_LINES_MSB 0x0340 +#define REG_FRAME_LENGTH_LINES_LSB 0x0341 +#define REG_LINE_LENGTH_PCK_MSB 0x0342 +#define REG_LINE_LENGTH_PCK_LSB 0x0343 + +/* MSR setting */ +/* Reserved registers */ +#define REG_SHADE_CLK_ENABLE 0x30AC +#define REG_SEL_CCP 0x30C4 +#define REG_VPIX 0x3024 +#define REG_CLAMP_ON 0x3015 +#define REG_OFFSET 0x307E + +/* CDS timing settings */ +/* Reserved registers */ +#define REG_LD_START 0x3000 +#define REG_LD_END 0x3001 +#define REG_SL_START 0x3002 +#define REG_SL_END 0x3003 +#define REG_RX_START 0x3004 +#define REG_S1_START 0x3005 +#define REG_S1_END 0x3006 +#define REG_S1S_START 0x3007 +#define REG_S1S_END 0x3008 +#define REG_S3_START 0x3009 +#define REG_S3_END 0x300A +#define REG_CMP_EN_START 0x300B +#define REG_CLP_SL_START 0x300C +#define REG_CLP_SL_END 0x300D +#define REG_OFF_START 0x300E +#define REG_RMP_EN_START 0x300F +#define REG_TX_START 0x3010 +#define REG_TX_END 0x3011 +#define REG_STX_WIDTH 0x3012 +#define REG_TYPE1_AF_ENABLE 0x3130 +#define DRIVER_ENABLED 0x0001 +#define AUTO_START_ENABLED 0x0010 +#define REG_NEW_POSITION 0x3131 +#define REG_3152_RESERVED 0x3152 +#define REG_315A_RESERVED 0x315A +#define REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB 0x0204 +#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB 0x0205 +#define REG_FINE_INTEGRATION_TIME 0x0200 +#define REG_COARSE_INTEGRATION_TIME 0x0202 +#define REG_COARSE_INTEGRATION_TIME_LSB 0x0203 + +/* Mode select register */ +#define S5K3E2FX_REG_MODE_SELECT 0x0100 +#define S5K3E2FX_MODE_SELECT_STREAM 0x01 /* start streaming */ +#define S5K3E2FX_MODE_SELECT_SW_STANDBY 0x00 /* software standby */ +#define S5K3E2FX_REG_SOFTWARE_RESET 0x0103 +#define S5K3E2FX_SOFTWARE_RESET 0x01 +#define REG_TEST_PATTERN_MODE 0x0601 + +/* Samsung other MSR setting*/ +#define REG_301D_RESERVED 0x301D +#define REG_3028_RESERVED 0x3028 +#define REG_3070_RESERVED 0x3070 +#define REG_3072_RESERVED 0x3072 +#define REG_301B_RESERVED 0x301B +#define REG_30BD_RESERVED 0x30BD +#define REG_30C2_RESERVED 0x30C2 +#define REG_3151_RESERVED 0x3151 /* 100202 the right address is 0x3151 */ +#define REG_3029_RESERVED 0x3029 +#define REG_30BF_RESERVED 0x30BF +#define REG_3022_RESERVED 0x3022 +#define REG_3019_RESERVED 0x3019 +#define REG_3150_RESERVED 0x3150 +#define REG_3157_RESERVED 0x3157 +#define REG_3159_RESERVED 0x3159 +/* LC Preview/Snapshot difference register */ +#define REG_SH4CH_BLK_WIDTH_R 0x309E +#define REG_SH4CH_BLK_HEIGHT_R 0x309F +#define REG_SH4CH_STEP_X_R_MSB 0x30A0 +#define REG_SH4CH_STEP_X_R_LSB 0x30A1 +#define REG_SH4CH_STEP_Y_R_MSB 0x30A2 +#define REG_SH4CH_STEP_Y_R_LSB 0x30A3 +#define REG_SH4CH_START_BLK_CNT_X_R 0x30A4 +#define REG_SH4CH_START_BLK_INT_X_R 0x30A5 +#define REG_SH4CH_START_FRAC_X_R_MSB 0x30A6 +#define REG_SH4CH_START_FRAC_X_R_LSB 0x30A7 +#define REG_SH4CH_START_BLK_CNT_Y_R 0x30A8 +#define REG_SH4CH_START_BLK_INT_Y_R 0x30A9 +#define REG_SH4CH_START_FRAC_Y_R_MSB 0x30AA +#define REG_SH4CH_START_FRAC_Y_R_LSB 0x30AB +#define REG_X_ADDR_START_MSB 0x0344 +#define REG_X_ADDR_START_LSB 0x0345 +#define REG_Y_ADDR_START_MSB 0x0346 +#define REG_Y_ADDR_START_LSB 0x0347 +#define REG_X_ADDR_END_MSB 0x0348 +#define REG_X_ADDR_END_LSB 0x0349 +#define REG_Y_ADDR_END_MSB 0x034A +#define REG_Y_ADDR_END_LSB 0x034B + + +struct s5k3e2fx_i2c_reg_conf { + unsigned short waddr; + unsigned char bdata; +}; + +/* Separate the EVT4/EVT5 sensor init and LC setting start */ + +struct s5k3e2fx_i2c_reg_conf Init_setting_evt4[] = { +/*EVT4 */ + {REG_PRE_PLL_CLK_DIV, 0x06}, /* PLL setting */ + {REG_PLL_MULTIPLIER_MSB, 0x00}, + {REG_PLL_MULTIPLIER_LSB, 0x83}, + {REG_VT_PIX_CLK_DIV, 0x08}, + {REG_VT_SYS_CLK_DIV, 0x01}, + {REG_OP_PIX_CLK_DIV, 0x08}, + {REG_OP_SYS_CLK_DIV, 0x01}, +/* Preview Output Size */ + {REG_X_OUTPUT_SIZE_MSB, 0x05}, + {REG_X_OUTPUT_SIZE_LSB, 0x10}, + {REG_Y_OUTPUT_SIZE_MSB, 0x03}, + {REG_Y_OUTPUT_SIZE_LSB, 0xcc}, +/* Frame format */ + {REG_FRAME_LENGTH_LINES_MSB, 0x03}, + {REG_FRAME_LENGTH_LINES_LSB, 0xe2}, + {REG_LINE_LENGTH_PCK_MSB, 0x0a}, + {REG_LINE_LENGTH_PCK_LSB, 0xac}, +/* Preview Binning */ + {REG_X_EVEN_INC, 0x01}, + {REG_X_ODD_INC, 0x01}, + {REG_Y_EVEN_INC, 0x01}, + {REG_Y_ODD_INC, 0x03}, + {REG_BINNING_ENABLE, 0x06}, +/* Samsung MSR Setting */ + {REG_SEL_CCP, 0x01}, + {REG_LD_START, 0x03}, +/* Add EVT5 sensor Samsung MSR setting, Start */ + {REG_LD_END, 0x94}, + {REG_SL_START, 0x02}, + {REG_SL_END, 0x95}, + {REG_RX_START, 0x0f}, + {REG_S1_START, 0x05}, + {REG_S1_END, 0x3c}, + {REG_S1S_START, 0x8c}, + {REG_S1S_END, 0x93}, + {REG_S3_START, 0x05}, + {REG_S3_END, 0x3a}, + {REG_CLP_SL_START, 0x02}, + {REG_CLP_SL_END, 0x3e}, + {REG_RMP_EN_START, 0x0e}, + {REG_TX_START, 0x46}, + {REG_TX_END, 0x64}, + {REG_STX_WIDTH, 0x1e}, + {REG_301D_RESERVED, 0x3f}, + {REG_VPIX, 0x04}, + {REG_3028_RESERVED, 0x40}, + {REG_3070_RESERVED, 0xdf}, + {REG_301B_RESERVED, 0x73}, + {REG_OFFSET, 0x02}, + {REG_30BD_RESERVED, 0x06}, + {REG_30C2_RESERVED, 0x0b}, + {REG_SHADE_CLK_ENABLE, 0x81}, + {0x3151, 0xe6}, + {REG_3029_RESERVED, 0x02}, + {REG_30BF_RESERVED, 0x00}, + {REG_3022_RESERVED, 0x87}, + {REG_3019_RESERVED, 0x60}, + {0x3146, 0x3c}, + {REG_3152_RESERVED, 0x08}, + {REG_3159_RESERVED, 0x0A}, +/* HS, VS driving strength [3:2]=>VS, [1:0]=>HS 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ + {REG_315A_RESERVED, 0xAA}, +/* PCLK, DATA driving strength [7:6]=>data, [5:4]=>PCLK 00:2mA, 01:4mA + * 10:6mA, 11:8mA + */ +/* AEC Setting */ + {REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB, REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB_VALUE}, + {REG_FINE_INTEGRATION_TIME, 0x02}, + {REG_COARSE_INTEGRATION_TIME, 0x03}, +}; + +struct s5k3e2fx_i2c_reg_conf Init_setting_evt5[] = { +/* EVT5 */ + {REG_PRE_PLL_CLK_DIV, 0x06}, /* PLL setting */ + {REG_PLL_MULTIPLIER_MSB, 0x00}, + {REG_PLL_MULTIPLIER_LSB, REG_PLL_MULTIPLIER_LSB_VALUE}, + {REG_VT_PIX_CLK_DIV, 0x08}, + {REG_VT_SYS_CLK_DIV, 0x01}, + {REG_OP_PIX_CLK_DIV, 0x08}, + {REG_OP_SYS_CLK_DIV, 0x01}, +/* Data Format */ + {REG_CCP_DATA_FORMAT_MSB, 0x0a}, + {REG_CCP_DATA_FORMAT_LSB, 0x0a}, +/* Preview Output Size */ + {REG_X_OUTPUT_SIZE_MSB, 0x05}, + {REG_X_OUTPUT_SIZE_LSB, 0x10}, + {REG_Y_OUTPUT_SIZE_MSB, 0x03}, + {REG_Y_OUTPUT_SIZE_LSB, 0xcc}, + {REG_X_ADDR_START_MSB, 0x00}, + {REG_X_ADDR_START_LSB, 0x00}, /* 100202 Change to 00 to the same as DesireC */ + {REG_Y_ADDR_START_MSB, 0x00}, + {REG_Y_ADDR_START_LSB, 0x00}, /* 100202 Change to 00 to the same as DesireC */ + {REG_X_ADDR_END_MSB, 0x0a}, + {REG_X_ADDR_END_LSB, 0x2F}, /* 100202 Change to 2F to the same as DesireC */ + {REG_Y_ADDR_END_MSB, 0x07}, + {REG_Y_ADDR_END_LSB, 0xA7}, /* 100202 Change to A7 to the same as DesireC */ +/* Frame format */ + {REG_FRAME_LENGTH_LINES_MSB, 0x03}, + {REG_FRAME_LENGTH_LINES_LSB, 0xe2}, + {REG_LINE_LENGTH_PCK_MSB, 0x0a}, + {REG_LINE_LENGTH_PCK_LSB, 0xac}, +/* Preview Binning */ + {REG_X_EVEN_INC, 0x01}, + {REG_X_ODD_INC, 0x01}, + {REG_Y_EVEN_INC, 0x01}, + {REG_Y_ODD_INC, 0x03}, + {REG_BINNING_ENABLE, 0x06}, +/* Samsung MSR Setting */ + {REG_SEL_CCP, 0x01}, + {REG_LD_START, 0x03}, +/* EVT5 sensor Samsung MSR setting */ + {REG_LD_END, 0x99}, + {REG_SL_START, 0x02}, + {REG_SL_END, 0x9A}, + {REG_RX_START, 0x0f}, + {REG_S1_START, 0x05}, + {REG_S1_END, 0x3c}, + {REG_S1S_START, 0x8c}, + {REG_S1S_END, 0x26}, + {REG_S3_START, 0x05}, + {REG_S3_END, 0x3a}, + {REG_CMP_EN_START, 0x10}, + {REG_CLP_SL_START, 0x02}, + {REG_CLP_SL_END, 0x3e}, + {REG_OFF_START, 0x02}, + {REG_RMP_EN_START, 0x0e}, + {REG_TX_START, 0x46}, + {REG_TX_END, 0x64}, + {REG_STX_WIDTH, 0x1e}, + {REG_CLAMP_ON, 0x00}, + {REG_301D_RESERVED, 0x3f}, + {REG_VPIX, 0x04}, + {REG_3028_RESERVED, 0x40}, + {REG_3070_RESERVED, 0xdf}, + {REG_3072_RESERVED, 0x20}, + {REG_301B_RESERVED, 0x73}, + {REG_OFFSET, 0x02}, + {REG_30BD_RESERVED, 0x06}, + {REG_30C2_RESERVED, 0x0b}, + {REG_SHADE_CLK_ENABLE, 0x81}, + {REG_3151_RESERVED, 0xe6}, /* 100202 the right address is 0x3151 */ + {REG_3029_RESERVED, 0x02}, + {REG_30BF_RESERVED, 0x00}, + {REG_3022_RESERVED, 0x87}, + {REG_3019_RESERVED, 0x60}, + {0x3060, 0x03}, + {0x3061, 0x6C}, + {0x3062, 0x00}, + {0x3063, 0xD6}, + {0x3023, 0x0C}, + {REG_3152_RESERVED, 0x08}, + {REG_3150_RESERVED, 0x50}, /* from 0x40 to 0x50 for PCLK=80MHz */ +/* Inverse PCLK = 0x50 */ + {REG_3157_RESERVED, 0x04}, /* from 0x00 to 0x04 for PCLK=80MHz */ +/* PCLK Delay offset; 0x0a will delay around 4ns at 80MHz */ + {REG_3159_RESERVED, 0x0f}, /* from 0x00 to 0x0f for PCLK=80MHz */ +/* HS, VS driving strength [3:2]=>VS, [1:0]=>HS 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ + {REG_315A_RESERVED, 0xf0}, /* from 0x10 to 0xf0 for PCLK=80MHz */ +/* PCLK, DATA driving strength [7:6]=>data, [5:4]=>PCLK 00:2mA, 01:4mA, + * 10:6mA, 11:8mA + */ +/* AEC Setting */ + {REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB, REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB_VALUE}, + {REG_FINE_INTEGRATION_TIME, 0x02}, + {REG_COARSE_INTEGRATION_TIME, 0x03}, +/* Preview LC config Setting */ + {REG_SH4CH_BLK_WIDTH_R, 0x52}, + {REG_SH4CH_BLK_HEIGHT_R, 0x3e}, + {REG_SH4CH_STEP_X_R_MSB, 0x03}, + {REG_SH4CH_STEP_X_R_LSB, 0x1f}, + {REG_SH4CH_STEP_Y_R_MSB, 0x04}, + {REG_SH4CH_STEP_Y_R_LSB, 0x21}, + {REG_SH4CH_START_BLK_CNT_X_R, 0x04}, + {REG_SH4CH_START_BLK_INT_X_R, 0x00}, + {REG_SH4CH_START_FRAC_X_R_MSB, 0x0c}, + {REG_SH4CH_START_FRAC_X_R_LSB, 0x7c}, + {REG_SH4CH_START_BLK_CNT_Y_R, 0x04}, + {REG_SH4CH_START_BLK_INT_Y_R, 0x00}, + {REG_SH4CH_START_FRAC_Y_R_MSB, 0x10}, + {REG_SH4CH_START_FRAC_Y_R_LSB, 0x84}, +}; + +struct s5k3e2fx_i2c_reg_conf lc_setting_evt4[] = { + /*EVT4 */ + {0x3200, 0x00}, + {0x3201, 0xbe}, + {0x3202, 0x4e}, + {0x3203, 0x0f}, + {0x3204, 0xb9}, + {0x3205, 0x07}, + {0x3206, 0x00}, + {0x3207, 0x4b}, + {0x3208, 0xdf}, + {0x3209, 0x0f}, + {0x320a, 0xc6}, + {0x320b, 0x39}, + {0x320c, 0x00}, + {0x320d, 0x13}, + {0x320e, 0xee}, + {0x320f, 0x00}, + {0x3210, 0x14}, + {0x3211, 0x79}, + {0x3212, 0x0f}, + {0x3213, 0x9d}, + {0x3214, 0xed}, + {0x3215, 0x00}, + {0x3216, 0x3d}, + {0x3217, 0x02}, + {0x3218, 0x0f}, + {0x3219, 0xa8}, + {0x321a, 0x6a}, + {0x321b, 0x00}, + {0x321c, 0x4c}, + {0x321d, 0x9a}, + {0x321e, 0x0f}, + {0x321f, 0xfb}, + {0x3220, 0xdb}, + {0x3221, 0x0f}, + {0x3222, 0xc8}, + {0x3223, 0x1a}, + {0x3224, 0x00}, + {0x3225, 0x5b}, + {0x3226, 0xf3}, + {0x3227, 0x0f}, + {0x3228, 0xae}, + {0x3229, 0xe3}, + {0x322a, 0x00}, + {0x322b, 0x5b}, + {0x322c, 0xc8}, + {0x322d, 0x0f}, + {0x322e, 0xc3}, + {0x322f, 0xf6}, + {0x3230, 0x0f}, + {0x3231, 0xe4}, + {0x3232, 0xb3}, + {0x3233, 0x00}, + {0x3234, 0x58}, + {0x3235, 0xdf}, + {0x3236, 0x0f}, + {0x3237, 0xbf}, + {0x3238, 0x67}, + {0x3239, 0x00}, + {0x323a, 0x3c}, + {0x323b, 0x8e}, + {0x323c, 0x0f}, + {0x323d, 0xd0}, + {0x323e, 0x3d}, + {0x323f, 0x00}, + {0x3240, 0x11}, + {0x3241, 0xfd}, + {0x3242, 0x00}, + {0x3243, 0x1a}, + {0x3244, 0xf0}, + {0x3245, 0x0f}, + {0x3246, 0xbd}, + {0x3247, 0x5d}, + {0x3248, 0x00}, + {0x3249, 0x22}, + {0x324a, 0x32}, + {0x324b, 0x0f}, + {0x324c, 0xff}, + {0x324d, 0x2e}, + {0x324e, 0x0f}, + {0x324f, 0xeb}, + {0x3250, 0x0c}, + {0x3251, 0x00}, + {0x3252, 0x11}, + {0x3253, 0xbd}, + {0x3254, 0x00}, + {0x3255, 0x17}, + {0x3256, 0xda}, + {0x3257, 0x0f}, + {0x3258, 0xeb}, + {0x3259, 0xf9}, + {0x325a, 0x00}, + {0x325b, 0x00}, + {0x325c, 0x81}, + {0x325d, 0x0f}, + {0x325e, 0xdf}, + {0x325f, 0x3e}, + {0x3260, 0x00}, + {0x3261, 0x2c}, + {0x3262, 0x9f}, + {0x3263, 0x0f}, + {0x3264, 0xe9}, + {0x3265, 0xd7}, + {0x3266, 0x0f}, + {0x3267, 0xd1}, + {0x3268, 0x83}, + {0x3269, 0x00}, + {0x326a, 0x3e}, + {0x326b, 0x18}, + {0x326c, 0x00}, + {0x326d, 0xcb}, + {0x326e, 0x32}, + {0x326f, 0x0f}, + {0x3270, 0xaf}, + {0x3271, 0xe3}, + {0x3272, 0x00}, + {0x3273, 0x51}, + {0x3274, 0xc8}, + {0x3275, 0x0f}, + {0x3276, 0xc5}, + {0x3277, 0x4c}, + {0x3278, 0x00}, + {0x3279, 0x13}, + {0x327a, 0x30}, + {0x327b, 0x00}, + {0x327c, 0x15}, + {0x327d, 0x7b}, + {0x327e, 0x0f}, + {0x327f, 0x97}, + {0x3280, 0x3f}, + {0x3281, 0x00}, + {0x3282, 0x3e}, + {0x3283, 0x26}, + {0x3284, 0x0f}, + {0x3285, 0xb3}, + {0x3286, 0x02}, + {0x3287, 0x00}, + {0x3288, 0x37}, + {0x3289, 0x73}, + {0x328a, 0x00}, + {0x328b, 0x0f}, + {0x328c, 0xd7}, + {0x328d, 0x0f}, + {0x328e, 0xbf}, + {0x328f, 0xdc}, + {0x3290, 0x00}, + {0x3291, 0x5a}, + {0x3292, 0x9b}, + {0x3293, 0x0f}, + {0x3294, 0xaf}, + {0x3295, 0x68}, + {0x3296, 0x00}, + {0x3297, 0x4c}, + {0x3298, 0xdb}, + {0x3299, 0x0f}, + {0x329a, 0xdc}, + {0x329b, 0xb5}, + {0x329c, 0x0f}, + {0x329d, 0xca}, + {0x329e, 0x69}, + {0x329f, 0x00}, + {0x32a0, 0x68}, + {0x32a1, 0x0a}, + {0x32a2, 0x0f}, + {0x32a3, 0xc9}, + {0x32a4, 0x6c}, + {0x32a5, 0x00}, + {0x32a6, 0x37}, + {0x32a7, 0x6e}, + {0x32a8, 0x0f}, + {0x32a9, 0xe2}, + {0x32aa, 0x22}, + {0x32ab, 0x0f}, + {0x32ac, 0xfd}, + {0x32ad, 0x8b}, + {0x32ae, 0x00}, + {0x32af, 0x36}, + {0x32b0, 0x33}, + {0x32b1, 0x0f}, + {0x32b2, 0xa3}, + {0x32b3, 0xf7}, + {0x32b4, 0x00}, + {0x32b5, 0x1b}, + {0x32b6, 0xd5}, + {0x32b7, 0x00}, + {0x32b8, 0x0a}, + {0x32b9, 0x4f}, + {0x32ba, 0x0f}, + {0x32bb, 0xd6}, + {0x32bc, 0x4d}, + {0x32bd, 0x00}, + {0x32be, 0x21}, + {0x32bf, 0x85}, + {0x32c0, 0x0f}, + {0x32c1, 0xfc}, + {0x32c2, 0x04}, + {0x32c3, 0x00}, + {0x32c4, 0x10}, + {0x32c5, 0x8c}, + {0x32c6, 0x00}, + {0x32c7, 0x00}, + {0x32c8, 0xf5}, + {0x32c9, 0x0f}, + {0x32ca, 0xd4}, + {0x32cb, 0xf3}, + {0x32cc, 0x00}, + {0x32cd, 0x3b}, + {0x32ce, 0x31}, + {0x32cf, 0x0f}, + {0x32d0, 0xe0}, + {0x32d1, 0xb3}, + {0x32d2, 0x0f}, + {0x32d3, 0xe4}, + {0x32d4, 0xa1}, + {0x32d5, 0x00}, + {0x32d6, 0x22}, + {0x32d7, 0x10}, + {0x32d8, 0x00}, + {0x32d9, 0xa7}, + {0x32da, 0x91}, + {0x32db, 0x0f}, + {0x32dc, 0xc6}, + {0x32dd, 0xd2}, + {0x32de, 0x00}, + {0x32df, 0x3a}, + {0x32e0, 0x5e}, + {0x32e1, 0x0f}, + {0x32e2, 0xd6}, + {0x32e3, 0xe0}, + {0x32e4, 0x00}, + {0x32e5, 0x0f}, + {0x32e6, 0xa2}, + {0x32e7, 0x00}, + {0x32e8, 0x0b}, + {0x32e9, 0x02}, + {0x32ea, 0x0f}, + {0x32eb, 0xb3}, + {0x32ec, 0xdd}, + {0x32ed, 0x00}, + {0x32ee, 0x2f}, + {0x32ef, 0xa2}, + {0x32f0, 0x0f}, + {0x32f1, 0xbb}, + {0x32f2, 0x1f}, + {0x32f3, 0x00}, + {0x32f4, 0x38}, + {0x32f5, 0x09}, + {0x32f6, 0x0f}, + {0x32f7, 0xfc}, + {0x32f8, 0xc4}, + {0x32f9, 0x0f}, + {0x32fa, 0xde}, + {0x32fb, 0x51}, + {0x32fc, 0x00}, + {0x32fd, 0x3c}, + {0x32fe, 0xdb}, + {0x32ff, 0x0f}, + {0x3300, 0xc3}, + {0x3301, 0x2e}, + {0x3302, 0x00}, + {0x3303, 0x4a}, + {0x3304, 0x96}, + {0x3305, 0x0f}, + {0x3306, 0xd7}, + {0x3307, 0x20}, + {0x3308, 0x0f}, + {0x3309, 0xe3}, + {0x330a, 0x64}, + {0x330b, 0x00}, + {0x330c, 0x3b}, + {0x330d, 0xde}, + {0x330e, 0x0f}, + {0x330f, 0xe2}, + {0x3310, 0xb6}, + {0x3311, 0x00}, + {0x3312, 0x29}, + {0x3313, 0xfd}, + {0x3314, 0x0f}, + {0x3315, 0xd3}, + {0x3316, 0xee}, + {0x3317, 0x00}, + {0x3318, 0x0c}, + {0x3319, 0x40}, + {0x331a, 0x00}, + {0x331b, 0x1d}, + {0x331c, 0x96}, + {0x331d, 0x0f}, + {0x331e, 0xd4}, + {0x331f, 0xd9}, + {0x3320, 0x00}, + {0x3321, 0x0e}, + {0x3322, 0xa8}, + {0x3323, 0x00}, + {0x3324, 0x02}, + {0x3325, 0xc6}, + {0x3326, 0x0f}, + {0x3327, 0xf3}, + {0x3328, 0xc1}, + {0x3329, 0x00}, + {0x332a, 0x0f}, + {0x332b, 0xe2}, + {0x332c, 0x00}, + {0x332d, 0x03}, + {0x332e, 0x56}, + {0x332f, 0x0f}, + {0x3330, 0xf4}, + {0x3331, 0xc0}, + {0x3332, 0x0f}, + {0x3333, 0xfe}, + {0x3334, 0xc5}, + {0x3335, 0x0f}, + {0x3336, 0xe8}, + {0x3337, 0xb8}, + {0x3338, 0x00}, + {0x3339, 0x1e}, + {0x333a, 0xb0}, + {0x333b, 0x0f}, + {0x333c, 0xf2}, + {0x333d, 0x01}, + {0x333e, 0x0f}, + {0x333f, 0xe4}, + {0x3340, 0x68}, + {0x3341, 0x00}, + {0x3342, 0x27}, + {0x3343, 0x00}, + {0x3344, 0x00}, + {0x3345, 0xc0}, + {0x3346, 0x46}, + {0x3347, 0x0f}, + {0x3348, 0xbb}, + {0x3349, 0x8b}, + {0x334a, 0x00}, + {0x334b, 0x46}, + {0x334c, 0xea}, + {0x334d, 0x0f}, + {0x334e, 0xcc}, + {0x334f, 0xb7}, + {0x3350, 0x00}, + {0x3351, 0x10}, + {0x3352, 0x01}, + {0x3353, 0x00}, + {0x3354, 0x13}, + {0x3355, 0xe1}, + {0x3356, 0x0f}, + {0x3357, 0x9f}, + {0x3358, 0xff}, + {0x3359, 0x00}, + {0x335a, 0x3d}, + {0x335b, 0x6c}, + {0x335c, 0x0f}, + {0x335d, 0xa7}, + {0x335e, 0x7b}, + {0x335f, 0x00}, + {0x3360, 0x4b}, + {0x3361, 0x91}, + {0x3362, 0x0f}, + {0x3363, 0xfb}, + {0x3364, 0x99}, + {0x3365, 0x0f}, + {0x3366, 0xcc}, + {0x3367, 0x52}, + {0x3368, 0x00}, + {0x3369, 0x53}, + {0x336a, 0x00}, + {0x336b, 0x0f}, + {0x336c, 0xaa}, + {0x336d, 0xa2}, + {0x336e, 0x00}, + {0x336f, 0x64}, + {0x3370, 0xa2}, + {0x3371, 0x0f}, + {0x3372, 0xbe}, + {0x3373, 0xc4}, + {0x3374, 0x0f}, + {0x3375, 0xe4}, + {0x3376, 0xbb}, + {0x3377, 0x00}, + {0x3378, 0x56}, + {0x3379, 0xd8}, + {0x337a, 0x0f}, + {0x337b, 0xc8}, + {0x337c, 0xdc}, + {0x337d, 0x00}, + {0x337e, 0x44}, + {0x337f, 0xa7}, + {0x3380, 0x0f}, + {0x3381, 0xbd}, + {0x3382, 0xca}, + {0x3383, 0x00}, + {0x3384, 0x29}, + {0x3385, 0xf7}, + {0x3386, 0x00}, + {0x3387, 0x08}, + {0x3388, 0xf2}, + {0x3389, 0x0f}, + {0x338a, 0xc6}, + {0x338b, 0x1c}, + {0x338c, 0x00}, + {0x338d, 0x28}, + {0x338e, 0x3b}, + {0x338f, 0x0f}, + {0x3390, 0xfc}, + {0x3391, 0x30}, + {0x3392, 0x0f}, + {0x3393, 0xee}, + {0x3394, 0x3e}, + {0x3395, 0x00}, + {0x3396, 0x02}, + {0x3397, 0x32}, + {0x3398, 0x00}, + {0x3399, 0x25}, + {0x339a, 0xb6}, + {0x339b, 0x0f}, + {0x339c, 0xe9}, + {0x339d, 0xd5}, + {0x339e, 0x0f}, + {0x339f, 0xf3}, + {0x33a0, 0x80}, + {0x33a1, 0x0f}, + {0x33a2, 0xda}, + {0x33a3, 0x56}, + {0x33a4, 0x00}, + {0x33a5, 0x3c}, + {0x33a6, 0x4a}, + {0x33a7, 0x0f}, + {0x33a8, 0xe0}, + {0x33a9, 0x9d}, + {0x33aa, 0x0f}, + {0x33ab, 0xd9}, + {0x33ac, 0x7d}, + {0x33ad, 0x00}, + {0x33ae, 0x34}, + {0x33af, 0x54}, + {0x309D, 0x62}, + {0x309d, 0x22}, + {0x309e, 0x52}, + {0x309f, 0x3e}, + {0x30a0, 0x03}, + {0x30a1, 0x1f}, + {0x30a2, 0x04}, + {0x30a3, 0x21}, + {0x30a4, 0x04}, + {0x30a5, 0x00}, + {0x30a6, 0x0c}, + {0x30a7, 0x7c}, + {0x30a8, 0x04}, + {0x30a9, 0x00}, + {0x30aa, 0x10}, + {0x30ab, 0x84}, +}; +struct s5k3e2fx_i2c_reg_conf lc_setting_evt5[] = { +/*EVT5 */ +/* LC setting Start */ + {0x3200, 0x00}, /* 100304 Modify LC setting DNP light source t75-r70 to improve reddish issue*/ + {0x3201, 0x99}, + {0x3202, 0xc1}, + {0x3203, 0x0f}, + {0x3204, 0xd0}, + {0x3205, 0x1b}, + {0x3206, 0x00}, + {0x3207, 0x24}, + {0x3208, 0x8d}, + {0x3209, 0x0f}, + {0x320a, 0xee}, + {0x320b, 0x0f}, + {0x320c, 0x00}, + {0x320d, 0x04}, + {0x320e, 0x5c}, + {0x320f, 0x00}, + {0x3210, 0x07}, + {0x3211, 0x68}, + {0x3212, 0x0f}, + {0x3213, 0xc2}, + {0x3214, 0x82}, + {0x3215, 0x00}, + {0x3216, 0x29}, + {0x3217, 0x3e}, + {0x3218, 0x0f}, + {0x3219, 0xd3}, + {0x321a, 0x63}, + {0x321b, 0x00}, + {0x321c, 0x22}, + {0x321d, 0x6c}, + {0x321e, 0x0f}, + {0x321f, 0xf8}, + {0x3220, 0xce}, + {0x3221, 0x0f}, + {0x3222, 0xed}, + {0x3223, 0x30}, + {0x3224, 0x00}, + {0x3225, 0x37}, + {0x3226, 0x87}, + {0x3227, 0x0f}, + {0x3228, 0xc2}, + {0x3229, 0x87}, + {0x322a, 0x00}, + {0x322b, 0x2a}, + {0x322c, 0xc6}, + {0x322d, 0x0f}, + {0x322e, 0xf3}, + {0x322f, 0xd9}, + {0x3230, 0x0f}, + {0x3231, 0xea}, + {0x3232, 0x1a}, + {0x3233, 0x00}, + {0x3234, 0x2d}, + {0x3235, 0x9f}, + {0x3236, 0x0f}, + {0x3237, 0xde}, + {0x3238, 0x7d}, + {0x3239, 0x00}, + {0x323a, 0x37}, + {0x323b, 0x1e}, + {0x323c, 0x0f}, + {0x323d, 0xed}, + {0x323e, 0x9c}, + {0x323f, 0x0f}, + {0x3240, 0xf6}, + {0x3241, 0xfd}, + {0x3242, 0x00}, + {0x3243, 0x15}, + {0x3244, 0xeb}, + {0x3245, 0x0f}, + {0x3246, 0xd3}, + {0x3247, 0xca}, + {0x3248, 0x00}, + {0x3249, 0x08}, + {0x324a, 0xe6}, + {0x324b, 0x0f}, + {0x324c, 0xf4}, + {0x324d, 0x7a}, + {0x324e, 0x0f}, + {0x324f, 0xed}, + {0x3250, 0x1e}, + {0x3251, 0x00}, + {0x3252, 0x0d}, + {0x3253, 0x46}, + {0x3254, 0x00}, + {0x3255, 0x0c}, + {0x3256, 0x3e}, + {0x3257, 0x00}, + {0x3258, 0x09}, + {0x3259, 0xcf}, + {0x325a, 0x00}, + {0x325b, 0x09}, + {0x325c, 0xb5}, + {0x325d, 0x0f}, + {0x325e, 0xec}, + {0x325f, 0x47}, + {0x3260, 0x00}, + {0x3261, 0x1d}, + {0x3262, 0xd8}, + {0x3263, 0x0f}, + {0x3264, 0xf7}, + {0x3265, 0x11}, + {0x3266, 0x0f}, + {0x3267, 0xea}, + {0x3268, 0x3d}, + {0x3269, 0x00}, + {0x326a, 0x09}, + {0x326b, 0xcc}, + {0x326c, 0x00}, + {0x326d, 0x99}, + {0x326e, 0x45}, + {0x326f, 0x0f}, + {0x3270, 0xd3}, + {0x3271, 0x80}, + {0x3272, 0x00}, + {0x3273, 0x20}, + {0x3274, 0xf7}, + {0x3275, 0x0f}, + {0x3276, 0xef}, + {0x3277, 0x0d}, + {0x3278, 0x00}, + {0x3279, 0x09}, + {0x327a, 0x3c}, + {0x327b, 0x00}, + {0x327c, 0x01}, + {0x327d, 0x16}, + {0x327e, 0x0f}, + {0x327f, 0xc9}, + {0x3280, 0x36}, + {0x3281, 0x00}, + {0x3282, 0x21}, + {0x3283, 0xff}, + {0x3284, 0x0f}, + {0x3285, 0xdc}, + {0x3286, 0xc2}, + {0x3287, 0x00}, + {0x3288, 0x1e}, + {0x3289, 0xc0}, + {0x328a, 0x0f}, + {0x328b, 0xf0}, + {0x328c, 0xa7}, + {0x328d, 0x0f}, + {0x328e, 0xf9}, + {0x328f, 0x2a}, + {0x3290, 0x00}, + {0x3291, 0x29}, + {0x3292, 0x5c}, + {0x3293, 0x0f}, + {0x3294, 0xc9}, + {0x3295, 0x2a}, + {0x3296, 0x00}, + {0x3297, 0x1f}, + {0x3298, 0x5c}, + {0x3299, 0x0f}, + {0x329a, 0xfa}, + {0x329b, 0x0c}, + {0x329c, 0x0f}, + {0x329d, 0xf3}, + {0x329e, 0x94}, + {0x329f, 0x00}, + {0x32a0, 0x1c}, + {0x32a1, 0xce}, + {0x32a2, 0x0f}, + {0x32a3, 0xed}, + {0x32a4, 0xb7}, + {0x32a5, 0x00}, + {0x32a6, 0x34}, + {0x32a7, 0x51}, + {0x32a8, 0x0f}, + {0x32a9, 0xfa}, + {0x32aa, 0x7d}, + {0x32ab, 0x0f}, + {0x32ac, 0xe6}, + {0x32ad, 0xbf}, + {0x32ae, 0x00}, + {0x32af, 0x18}, + {0x32b0, 0xc6}, + {0x32b1, 0x0f}, + {0x32b2, 0xe0}, + {0x32b3, 0x72}, + {0x32b4, 0x00}, + {0x32b5, 0x08}, + {0x32b6, 0x23}, + {0x32b7, 0x0f}, + {0x32b8, 0xf1}, + {0x32b9, 0x54}, + {0x32ba, 0x0f}, + {0x32bb, 0xe1}, + {0x32bc, 0x84}, + {0x32bd, 0x00}, + {0x32be, 0x26}, + {0x32bf, 0xb1}, + {0x32c0, 0x0f}, + {0x32c1, 0xfa}, + {0x32c2, 0xc2}, + {0x32c3, 0x00}, + {0x32c4, 0x05}, + {0x32c5, 0x3d}, + {0x32c6, 0x0f}, + {0x32c7, 0xff}, + {0x32c8, 0xaf}, + {0x32c9, 0x0f}, + {0x32ca, 0xf1}, + {0x32cb, 0xe5}, + {0x32cc, 0x00}, + {0x32cd, 0x21}, + {0x32ce, 0xdd}, + {0x32cf, 0x0f}, + {0x32d0, 0xe8}, + {0x32d1, 0x6a}, + {0x32d2, 0x0f}, + {0x32d3, 0xf4}, + {0x32d4, 0xfb}, + {0x32d5, 0x00}, + {0x32d6, 0x0c}, + {0x32d7, 0x89}, + {0x32d8, 0x00}, + {0x32d9, 0x7c}, + {0x32da, 0x79}, + {0x32db, 0x0f}, + {0x32dc, 0xde}, + {0x32dd, 0x19}, + {0x32de, 0x00}, + {0x32df, 0x19}, + {0x32e0, 0xe8}, + {0x32e1, 0x0f}, + {0x32e2, 0xf3}, + {0x32e3, 0x41}, + {0x32e4, 0x00}, + {0x32e5, 0x03}, + {0x32e6, 0x4c}, + {0x32e7, 0x00}, + {0x32e8, 0x05}, + {0x32e9, 0x73}, + {0x32ea, 0x0f}, + {0x32eb, 0xd6}, + {0x32ec, 0xa5}, + {0x32ed, 0x00}, + {0x32ee, 0x1f}, + {0x32ef, 0x81}, + {0x32f0, 0x0f}, + {0x32f1, 0xdc}, + {0x32f2, 0xe6}, + {0x32f3, 0x00}, + {0x32f4, 0x18}, + {0x32f5, 0x65}, + {0x32f6, 0x00}, + {0x32f7, 0x00}, + {0x32f8, 0x11}, + {0x32f9, 0x0f}, + {0x32fa, 0xed}, + {0x32fb, 0x65}, + {0x32fc, 0x00}, + {0x32fd, 0x23}, + {0x32fe, 0x12}, + {0x32ff, 0x0f}, + {0x3300, 0xcf}, + {0x3301, 0x28}, + {0x3302, 0x00}, + {0x3303, 0x2b}, + {0x3304, 0xda}, + {0x3305, 0x0f}, + {0x3306, 0xef}, + {0x3307, 0xae}, + {0x3308, 0x0f}, + {0x3309, 0xeb}, + {0x330a, 0x13}, + {0x330b, 0x00}, + {0x330c, 0x27}, + {0x330d, 0xb8}, + {0x330e, 0x0f}, + {0x330f, 0xec}, + {0x3310, 0x69}, + {0x3311, 0x00}, + {0x3312, 0x2f}, + {0x3313, 0x5f}, + {0x3314, 0x0f}, + {0x3315, 0xdf}, + {0x3316, 0x4f}, + {0x3317, 0x00}, + {0x3318, 0x05}, + {0x3319, 0x70}, + {0x331a, 0x00}, + {0x331b, 0x0f}, + {0x331c, 0xd2}, + {0x331d, 0x0f}, + {0x331e, 0xe1}, + {0x331f, 0xd8}, + {0x3320, 0x00}, + {0x3321, 0x09}, + {0x3322, 0xcf}, + {0x3323, 0x0f}, + {0x3324, 0xf2}, + {0x3325, 0x6e}, + {0x3326, 0x0f}, + {0x3327, 0xf6}, + {0x3328, 0xb4}, + {0x3329, 0x00}, + {0x332a, 0x0d}, + {0x332b, 0x87}, + {0x332c, 0x00}, + {0x332d, 0x08}, + {0x332e, 0x1e}, + {0x332f, 0x0f}, + {0x3330, 0xfa}, + {0x3331, 0x6e}, + {0x3332, 0x0f}, + {0x3333, 0xff}, + {0x3334, 0xaa}, + {0x3335, 0x0f}, + {0x3336, 0xf2}, + {0x3337, 0xc0}, + {0x3338, 0x00}, + {0x3339, 0x1d}, + {0x333a, 0x18}, + {0x333b, 0x0f}, + {0x333c, 0xef}, + {0x333d, 0xed}, + {0x333e, 0x0f}, + {0x333f, 0xec}, + {0x3340, 0xf6}, + {0x3341, 0x00}, + {0x3342, 0x16}, + {0x3343, 0x8e}, + {0x3344, 0x00}, + {0x3345, 0x9c}, + {0x3346, 0x52}, + {0x3347, 0x0f}, + {0x3348, 0xcf}, + {0x3349, 0xb9}, + {0x334a, 0x00}, + {0x334b, 0x29}, + {0x334c, 0xe9}, + {0x334d, 0x0f}, + {0x334e, 0xe2}, + {0x334f, 0x83}, + {0x3350, 0x00}, + {0x3351, 0x11}, + {0x3352, 0xcc}, + {0x3353, 0x0f}, + {0x3354, 0xff}, + {0x3355, 0xf4}, + {0x3356, 0x0f}, + {0x3357, 0xc1}, + {0x3358, 0xa4}, + {0x3359, 0x00}, + {0x335a, 0x2f}, + {0x335b, 0xce}, + {0x335c, 0x0f}, + {0x335d, 0xc5}, + {0x335e, 0xbb}, + {0x335f, 0x00}, + {0x3360, 0x35}, + {0x3361, 0x2a}, + {0x3362, 0x0f}, + {0x3363, 0xe6}, + {0x3364, 0x2a}, + {0x3365, 0x0f}, + {0x3366, 0xf7}, + {0x3367, 0x44}, + {0x3368, 0x00}, + {0x3369, 0x31}, + {0x336a, 0xfe}, + {0x336b, 0x0f}, + {0x336c, 0xb6}, + {0x336d, 0x84}, + {0x336e, 0x00}, + {0x336f, 0x3c}, + {0x3370, 0x71}, + {0x3371, 0x0f}, + {0x3372, 0xe5}, + {0x3373, 0xfe}, + {0x3374, 0x0f}, + {0x3375, 0xf2}, + {0x3376, 0x87}, + {0x3377, 0x00}, + {0x3378, 0x29}, + {0x3379, 0x2b}, + {0x337a, 0x0f}, + {0x337b, 0xe5}, + {0x337c, 0x3f}, + {0x337d, 0x00}, + {0x337e, 0x45}, + {0x337f, 0xc6}, + {0x3380, 0x0f}, + {0x3381, 0xdf}, + {0x3382, 0xe6}, + {0x3383, 0x0f}, + {0x3384, 0xfb}, + {0x3385, 0x0f}, + {0x3386, 0x00}, + {0x3387, 0x0f}, + {0x3388, 0xf4}, + {0x3389, 0x0f}, + {0x338a, 0xdf}, + {0x338b, 0x72}, + {0x338c, 0x00}, + {0x338d, 0x0e}, + {0x338e, 0xaf}, + {0x338f, 0x0f}, + {0x3390, 0xed}, + {0x3391, 0x7a}, + {0x3392, 0x0f}, + {0x3393, 0xe5}, + {0x3394, 0xab}, + {0x3395, 0x00}, + {0x3396, 0x18}, + {0x3397, 0x43}, + {0x3398, 0x00}, + {0x3399, 0x1b}, + {0x339a, 0x41}, + {0x339b, 0x0f}, + {0x339c, 0xea}, + {0x339d, 0x84}, + {0x339e, 0x0f}, + {0x339f, 0xfd}, + {0x33a0, 0xdb}, + {0x33a1, 0x0f}, + {0x33a2, 0xe9}, + {0x33a3, 0xbd}, + {0x33a4, 0x00}, + {0x33a5, 0x30}, + {0x33a6, 0x77}, + {0x33a7, 0x0f}, + {0x33a8, 0xe9}, + {0x33a9, 0x93}, + {0x33aa, 0x0f}, + {0x33ab, 0xd7}, + {0x33ac, 0xde}, + {0x33ad, 0x00}, + {0x33ae, 0x2a}, + {0x33af, 0x14}, + {0x309D, 0x62}, + {0x309d, 0x22}, /* shading enable */ + /*LC setting End */ +}; /* lc_setting} */ + +static struct wake_lock s5k3e2fx_wake_lock; + +static inline void init_suspend(void) +{ + wake_lock_init(&s5k3e2fx_wake_lock, WAKE_LOCK_IDLE, "s5k3e2fx"); +} + +static inline void deinit_suspend(void) +{ + wake_lock_destroy(&s5k3e2fx_wake_lock); +} + +static inline void prevent_suspend(void) +{ + wake_lock(&s5k3e2fx_wake_lock); +} + +static inline void allow_suspend(void) +{ + wake_unlock(&s5k3e2fx_wake_lock); +} + +struct reg_struct { +/* PLL setting */ + uint8_t pre_pll_clk_div; /* 0x0305 */ + uint8_t pll_multiplier_msb; /* 0x0306 */ + uint8_t pll_multiplier_lsb; /* 0x0307 */ + uint8_t vt_pix_clk_div; /* 0x0301 */ + uint8_t vt_sys_clk_div; /* 0x0303 */ + uint8_t op_pix_clk_div; /* 0x0309 */ + uint8_t op_sys_clk_div; /* 0x030B */ +/* Data Format */ + uint8_t ccp_data_format_msb; /* 0x0112 */ + uint8_t ccp_data_format_lsb; /* 0x0113 */ +/* Preview Output Size */ + uint8_t x_output_size_msb; /* 0x034C */ + uint8_t x_output_size_lsb; /* 0x034D */ + uint8_t y_output_size_msb; /* 0x034E */ + uint8_t y_output_size_lsb; /* 0x034F */ +/* add the X-Y addr setting position */ + uint8_t x_addr_start_MSB; /* 0x0344 */ + uint8_t x_addr_start_LSB; /* 0x0345 */ + uint8_t y_addr_start_MSB; /* 0x0346 */ + uint8_t y_addr_start_LSB; /* 0x0347 */ + uint8_t x_addr_end_MSB; /* 0x0348 */ + uint8_t x_addr_end_LSB; /* 0x0349 */ + uint8_t y_addr_end_MSB; /* 0x034A */ + uint8_t y_addr_end_LSB; /* 0x034B */ +/* change the setting position */ +/* Frame format */ + uint8_t frame_length_lines_msb; /* 0x0340 */ + uint8_t frame_length_lines_lsb; /* 0x0341 */ + uint8_t line_length_pck_msb; /* 0x0342 */ + uint8_t line_length_pck_lsb; /* 0x0343 */ +/* binning */ + uint8_t x_even_inc; /* 0x0381 */ + uint8_t x_odd_inc; /* 0x0383 */ + uint8_t y_even_inc; /* 0x0385 */ + uint8_t y_odd_inc; /* 0x0387 */ + uint8_t binning_enable; /* 0x3014 */ +/* Samsung MSR Setting */ + uint8_t sel_ccp; /* 0x30C4 */ + uint8_t ld_start; /* 0x3000 */ + uint8_t ld_end; /* 0x3001 */ + uint8_t sl_start; /* 0x3002 */ + uint8_t sl_end; /* 0x3003 */ + uint8_t rx_start; /* 0x3004 */ + uint8_t s1_start; /* 0x3005 */ + uint8_t s1_end; /* 0x3006 */ + uint8_t s1s_start; /* 0x3007 */ + uint8_t s1s_end; /* 0x3008 */ + uint8_t s3_start; /* 0x3009 */ + uint8_t s3_end; /* 0x300A */ + uint8_t cmp_en_start; /* 0x300B */ + uint8_t clp_sl_start; /* 0x300C */ + uint8_t clp_sl_end; /* 0x300D */ + uint8_t off_start; /* 0x300E */ + uint8_t rmp_en_start; /* 0x300F */ + uint8_t tx_start; /* 0x3010 */ + uint8_t tx_end; /* 0x3011 */ + uint8_t stx_width; /* 0x3012 */ +/* Samsung other MSR setting */ + uint8_t clamp_on; /* 0x3015 */ + uint8_t reg_301d_reserved; /* 0x301D */ + uint8_t vpix; /* 0x3024 */ + uint8_t reg_3028_reserved; /* 0x3028 */ + uint8_t reg_3070_reserved; /* 0x3070 */ + uint8_t reg_3072_reserved; /* 0x3072 */ + uint8_t reg_301b_reserved; /* 0x301B */ + uint8_t offset; /* 0x307E */ + uint8_t reg_30bd_reserved; /* 0x30BD */ + uint8_t reg_30c2_reserved; /* 0x30C2 */ + uint8_t shade_clk_enable; /* 0x30AC */ + uint8_t reg_3151_reserved; /* 0x3151 */ /* 100202 the right address is 0x3151 */ + uint8_t reg_3029_reserved; /* 0x3029 */ + uint8_t reg_30bf_reserved; /* 0x30BF */ + uint8_t reg_3022_reserved; /* 0x3022 */ + uint8_t reg_3019_reserved; /* 0x3019 */ +/* end: Samsung other MSR setting */ + uint8_t reg_3152_reserved; /* 0x3152 */ +/* Samsung signal output setting */ + uint8_t reg_3150_reserved; /* 0x3150 */ + uint8_t reg_3157_reserved; /* 0x3157 */ + uint8_t reg_3159_reserved; /* 0x3159 */ +/* end: Samsung signal output setting */ + uint8_t reg_315A_reserved; /* 0x315A */ +/* AEC Setting */ + uint8_t analogue_gain_code_global_msb; /* 0x0204 */ + uint8_t analogue_gain_code_global_lsb; /* 0x0205 */ + uint8_t fine_integration_time; /* 0x0200 */ + uint8_t coarse_integration_time_msb; /* 0x0202 */ + uint8_t coarse_integration_time_lsb; /* 0x0203 */ /* 100202 Add coarse_integration_time_lsb */ +/* LC Preview/Snapshot difference register */ +/* Preview LC Setting */ + uint8_t sh4ch_blk_width_r; /* 0x309E */ + uint8_t sh4ch_blk_height_r; /* 0x309F */ + uint8_t sh4ch_step_x_r_MSB; /* 0x30A0 */ + uint8_t sh4ch_step_x_r_LSB; /* 0x30A1 */ + uint8_t sh4ch_step_y_r_MSB; /* 0x30A2 */ + uint8_t sh4ch_step_y_r_LSB; /* 0x30A3 */ + uint8_t sh4ch_start_blk_cnt_x_r; /* 0x30A4 */ + uint8_t sh4ch_start_blk_int_x_r; /* 0x30A5 */ + uint8_t sh4ch_start_frac_x_r_MSB; /* 0x30A6 */ + uint8_t sh4ch_start_frac_x_r_LSB; /* 0x30A7 */ + uint8_t sh4ch_start_blk_cnt_y_r; /* 0x30A8 */ + uint8_t sh4ch_start_blk_int_y_r; /* 0x30A9 */ + uint8_t sh4ch_start_frac_y_r_MSB; /* 0x30AA */ + uint8_t sh4ch_start_frac_y_r_LSB; /* 0x30AB */ +/* end: LC Preview/Snapshot difference register */ + uint32_t size_h; + uint32_t blk_l; + uint32_t size_w; + uint32_t blk_p; +}; + +struct reg_struct s5k3e2fx_reg_pat[2] = { + { /* Preview */ +/* PLL setting */ + 0x06, /* pre_pll_clk_div REG=0x0305 */ + 0x00, /* pll_multiplier_msb REG=0x0306 */ + REG_PLL_MULTIPLIER_LSB_VALUE, + /* pll_multiplier_lsb REG=0x0307 */ + 0x08, /* vt_pix_clk_div REG=0x0301 */ + 0x01, /* vt_sys_clk_div REG=0x0303 */ + 0x08, /* op_pix_clk_div REG=0x0309 */ + 0x01, /* op_sys_clk_div REG=0x030B */ +/* Data Format */ + 0x0a, /* ccp_data_format_msb REG=0x0112 */ + 0x0a, /* ccp_data_format_lsb REG=0x0113 */ +/* Preview Output Size */ + 0x05, /* x_output_size_msb REG=0x034C */ + 0x10, /* x_output_size_lsb REG=0x034D */ + 0x03, /* y_output_size_msb REG=0x034E */ + 0xcc, /* y_output_size_lsb REG=0x034F */ +/* X-Y addr setting position. Start */ + 0x00, /* x_addr_start_MSB REG=0x0344 */ + 0x00, /* x_addr_start_LSB REG=0x0345 */ /* 100202 Change to 00 to the same as DesireC */ + 0x00, /* y_addr_start_MSB REG=0x0346 */ + 0x00, /* y_addr_start_LSB REG=0x0347 */ /* 100202 Change to 00 to the same as DesireC */ + 0x0a, /* x_addr_end_MSB REG=0x0348 */ + 0x2F, /* x_addr_end_LSB REG=0x0349 */ /* 100202 Change to 2F to the same as DesireC */ + 0x07, /* y_addr_end_MSB REG=0x034A */ + 0xA7, /* y_addr_end_LSB REG=0x034B */ /* 100202 Change to A7 to the same as DesireC */ +/* change the setting position */ +/* Frame format */ + 0x03, /* frame_length_lines_msb REG=0x0340 */ + 0xe2, /* frame_length_lines_lsb REG=0x0341 */ + 0x0a, /* line_length_pck_msb REG=0x0342 */ + 0xac, /* line_length_pck_lsb REG=0x0343 */ +/* enable binning for preview */ + 0x01, /* x_even_inc REG=0x0381 */ + 0x01, /* x_odd_inc REG=0x0383 */ + 0x01, /* y_even_inc REG=0x0385 */ + 0x03, /* y_odd_inc REG=0x0387 */ + 0x06, /* binning_enable REG=0x3014 */ +/* Samsung MSR Setting */ + 0x01, /* sel_ccp REG=0x30C4 */ + 0x03, /* ld_start REG=0x3000 */ + 0x94, /* ld_end REG=0x3001 */ + 0x02, /* sl_start REG=0x3002 */ + 0x95, /* sl_end REG=0x3003 */ + 0x0f, /* rx_start REG=0x3004 */ + 0x05, /* s1_start REG=0x3005 */ + 0x3c, /* s1_end REG=0x3006 */ + 0x8c, /* s1s_start REG=0x3007 */ + 0x93, /* s1s_end REG=0x3008 */ + 0x05, /* s3_start REG=0x3009 */ + 0x3a, /* s3_end REG=0x300A */ + 0x10, /* cmp_en_start REG=0x300B */ + 0x02, /* clp_sl_start REG=0x300C */ + 0x3e, /* clp_sl_end REG=0x300D */ + 0x02, /* off_start REG=0x300E */ + 0x0e, /* rmp_en_start REG=0x300F */ + 0x46, /* tx_start REG=0x3010 */ + 0x64, /* tx_end REG=0x3011 */ + 0x1e, /* stx_width REG=0x3012 */ +/* Samsung other MSR setting. */ + 0x00, /* clamp_on REG=0x3015 */ + 0x3f, /* reg_301d_reserved REG=0x301D */ + 0x04, /* vpix REG=0x3024 */ + 0x40, /* reg_3028_reserved REG=0x3028 */ + 0xdf, /* reg_3070_reserved REG=0x3070 */ + 0x20, /* reg_3072_reserved REG=0x3072 */ + 0x73, /* reg_3073_reserved REG=0x301B */ + 0x02, /* offset REG=0x307E */ + 0x06, /* reg_30bd_reserved REG=0x30BD */ + 0x0b, /* reg_30c2_reserved REG=0x30C2 */ + 0x81, /* shade_clk_enable REG=0x30AC */ + 0xe6, /* reg_3151_reserved REG=0x3151 */ /* 100202 the right address is 0x3151 */ + 0x02, /* reg_3029_reserved REG=0x3029 */ + 0x00, /* reg_30bf_reserved REG=0x30BF */ + 0x87, /* reg_3022_reserved REG=0x3022 */ + 0x60, /* reg_3019_reserved REG=0x3019 */ +/* end: Samsung other MSR setting. */ + 0x08, /* reg_3152_reserved REG=0x3152 */ + 0x50, /* reg_3150_reserved REG=0x3150 */ +/* Inverse PCLK */ + 0x04, /* reg_3157_reserved REG=0x3157 */ +/* PCLK Delay offset; 0x0a will delay around 4ns at 80MHz */ + 0x0f, /* reg_3159_reserved REG=0x3159 */ +/* HS, VS driving strength [3:2]=>VS, [1:0]=>HS 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ + 0xf0, /* reg_315A_reserved REG=0x315A */ +/* PCLK, DATA driving strength [7:6]=>data, [5:4]=>PCLK 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ +/* AEC Setting */ + 0x00, /* analogue_gain_code_global_msb REG=0x0204 */ + REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB_VALUE, + /* analogue_gain_code_global_lsb REG=0x0205 */ + 0x02, /* fine_integration_time REG=0x0200 */ + 0x03, /* coarse_integration_time_msb REG=0x0202 */ + 0x00, /* coarse_integration_time_lsb REG=0x0203 */ /* 100202 Add coarse_integration_time_lsb */ +/* LC Preview/Snapshot difference register. */ +/* Preview LC config Setting */ + 0x52, /* sh4ch_blk_width_r REG=0x309E */ + 0x3e, /* sh4ch_blk_height_r REG=0x309F */ + 0x03, /* sh4ch_step_x_r_MSB REG=0x30A0 */ + 0x1f, /* sh4ch_step_x_r_LSB REG=0x30A1 */ + 0x04, /* sh4ch_step_y_r_MSB REG=0x30A2 */ + 0x21, /* sh4ch_step_y_r_LSB REG=0x30A3 */ + 0x04, /* sh4ch_start_blk_cnt_x_r REG=0x30A4 */ + 0x00, /* sh4ch_start_blk_int_x_r REG=0x30A5 */ + 0x0c, /* sh4ch_start_frac_x_r_MSB REG=0x30A6 */ + 0x7c, /* sh4ch_start_frac_x_r_LSB REG=0x30A7 */ + 0x04, /* sh4ch_start_blk_cnt_y_r REG=0x30A8 */ + 0x00, /* sh4ch_start_blk_int_y_r REG=0x30A9 */ + 0x10, /* sh4ch_start_frac_y_r_MSB REG=0x30AA */ + 0x84, /* sh4ch_start_frac_y_r_LSB REG=0x30AB */ +/* end: LC Preview/Snapshot difference register. */ + S5K3E2FX_QTR_SIZE_HEIGHT, + 18, + S5K3E2FX_QTR_SIZE_WIDTH, + 1436}, + { /* Snapshot */ +/* PLL setting */ + 0x06, /* pre_pll_clk_div REG=0x0305 */ + 0x00, /* pll_multiplier_msb REG=0x0306 */ + REG_PLL_MULTIPLIER_LSB_VALUE, + /* pll_multiplier_lsb REG=0x0307 */ + 0x08, /* vt_pix_clk_div REG=0x0301 */ + 0x01, /* vt_sys_clk_div REG=0x0303 */ + 0x08, /* op_pix_clk_div REG=0x0309 */ + 0x01, /* op_sys_clk_div REG=0x030B */ +/* Data Format */ + 0x0a, /* ccp_data_format_msb REG=0x0112 */ + 0x0a, /* ccp_data_format_lsb REG=0x0113 */ +/* Snapshot Output Size */ + 0x0a, /* x_output_size_msb REG=0x034C */ + 0x30, /* x_output_size_lsb REG=0x034D */ + 0x07, /* y_output_size_msb REG=0x034E */ + 0xa8, /* y_output_size_lsb REG=0x034F */ +/* add the X-Y addr setting position. */ + 0x00, /* x_addr_start_MSB REG=0x0344 */ + 0x00, /* x_addr_start_LSB REG=0x0345 */ + 0x00, /* y_addr_start_MSB REG=0x0346 */ + 0x00, /* y_addr_start_LSB REG=0x0347 */ + 0x0a, /* x_addr_end_MSB REG=0x0348 */ + 0x2F, /* x_addr_end_LSB REG=0x0349 */ + 0x07, /* y_addr_end_MSB REG=0x034A */ + 0xA7, /* y_addr_end_LSB REG=0x034B */ +/* Change the setting position. */ +/* Frame format */ + 0x07, /* frame_length_lines_msb REG=0x0340 */ + 0xb6, /* frame_length_lines_lsb REG=0x0341 */ + 0x0a, /* line_length_pck_msb REG=0x0342 */ + 0xac, /* line_length_pck_lsb REG=0x0343 */ +/* disable binning for snapshot */ + 0x01, /* x_even_inc REG=0x0381 */ + 0x01, /* x_odd_inc REG=0x0383 */ + 0x01, /* y_even_inc REG=0x0385 */ + 0x01, /* y_odd_inc REG=0x0387 */ + 0x00, /* binning_enable REG=0x3014 */ +/* Samsung MSR Setting */ + 0x01, /* sel_ccp REG=0x30C4 */ + 0x03, /* ld_start REG=0x3000 */ + 0x94, /* ld_end REG=0x3001 */ + 0x02, /* sl_start REG=0x3002 */ + 0x95, /* sl_end REG=0x3003 */ + 0x0f, /* rx_start REG=0x3004 */ + 0x05, /* s1_start REG=0x3005 */ + 0x3c, /* s1_end REG=0x3006 */ + 0x8c, /* s1s_start REG=0x3007 */ + 0x93, /* s1s_end REG=0x3008 */ + 0x05, /* s3_start REG=0x3009 */ + 0x3a, /* s3_end REG=0x300A */ + 0x10, /* cmp_en_start REG=0x300B */ + 0x02, /* clp_sl_start REG=0x300C */ + 0x3e, /* clp_sl_end REG=0x300D */ + 0x02, /* off_start REG=0x300E */ + 0x0e, /* rmp_en_start REG=0x300F */ + 0x46, /* tx_start REG=0x3010 */ + 0x64, /* tx_end REG=0x3011 */ + 0x1e, /* stx_width REG=0x3012 */ +/* Add Samsung other MSR setting. */ + 0x00, /* clamp_on REG=0x3015 */ + 0x3f, /* reg_301d_reserved REG=0x301D */ + 0x04, /* vpix REG=0x3024 */ + 0x40, /* reg_3028_reserved REG=0x3028 */ + 0xdf, /* reg_3070_reserved REG=0x3070 */ + 0x20, /* reg_3072_reserved REG=0x3072 */ + 0x73, /* reg_3073_reserved REG=0x301B */ + 0x02, /* offset REG=0x307E */ + 0x06, /* reg_30bd_reserved REG=0x30BD */ + 0x0b, /* reg_30c2_reserved REG=0x30C2 */ + 0x81, /* shade_clk_enable REG=0x30AC */ + 0xe6, /* reg_3151_reserved REG=0x3151 */ /* 100202 the right address is 0x3151 */ + 0x02, /* reg_3029_reserved REG=0x3029 */ + 0x00, /* reg_30bf_reserved REG=0x30BF */ + 0x87, /* reg_3022_reserved REG=0x3022 */ + 0x60, /* reg_3019_reserved REG=0x3019 */ +/* end: Add Samsung other MSR setting. */ + 0x08, /* reg_3152_reserved REG=0x3152 */ +/* Add Samsung signal output setting. */ + 0x50, /* reg_3150_reserved REG=0x3150 */ +/* Inverse PCLK = 0x50 */ + 0x04, /* reg_3157_reserved REG=0x3157 */ +/* PCLK Delay offset; 0x0a will delay around 4ns at 80MHz */ + 0x0f, /* reg_3159_reserved REG=0x3159 */ +/* HS, VS driving strength [3:2]=>VS, [1:0]=>HS 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ + 0xf0, /* reg_315A_reserved REG=0x315A */ +/* PCLK, DATA driving strength [7:6]=>data, [5:4]=>PCLK 00:2mA, 01:4mA, 10:6mA, + * 11:8mA + */ +/* AEC Setting */ + 0x00, /* analogue_gain_code_global_msb REG=0x0204 */ + REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB_VALUE, + /* analogue_gain_code_global_lsb REG=0x0205 */ + 0x02, /* fine_integration_time REG=0x0200 */ + 0x03, /* coarse_integration_time_msb REG=0x0202 */ + 0x00, /* coarse_integration_time_lsb REG=0x0203 */ /* 100202 Add coarse_integration_time_lsb */ +/* Add LC Preview/Snapshot diff register. */ +/* Snapshot LC config Setting */ + 0x52, /* sh4ch_blk_width_r REG=0x309E */ + 0x7b, /* sh4ch_blk_height_r REG=0x309F */ + 0x03, /* sh4ch_step_x_r_MSB REG=0x30A0 */ + 0x1f, /* sh4ch_step_x_r_LSB REG=0x30A1 */ + 0x02, /* sh4ch_step_y_r_MSB REG=0x30A2 */ + 0x15, /* sh4ch_step_y_r_LSB REG=0x30A3 */ + 0x00, /* sh4ch_start_blk_cnt_x_r REG=0x30A4 */ + 0x00, /* sh4ch_start_blk_int_x_r REG=0x30A5 */ + 0x00, /* sh4ch_start_frac_x_r_MSB REG=0x30A6 */ + 0x00, /* sh4ch_start_frac_x_r_LSB REG=0x30A7 */ + 0x00, /* sh4ch_start_blk_cnt_y_r REG=0x30A8 */ + 0x00, /* sh4ch_start_blk_int_y_r REG=0x30A9 */ + 0x00, /* sh4ch_start_frac_y_r_MSB REG=0x30AA */ + 0x00, /* sh4ch_start_frac_y_r_LSB REG=0x30AB */ +/* diff: Add LC Preview/Snapshot diff register. */ + S5K3E2FX_FULL_SIZE_HEIGHT, + 14, + S5K3E2FX_FULL_SIZE_WIDTH, + 124} +}; + +struct s5k3e2fx_work { + struct work_struct work; +}; +static struct s5k3e2fx_work *s5k3e2fx_sensorw; +static struct i2c_client *s5k3e2fx_client; + +struct s5k3e2fx_ctrl { + const struct msm_camera_sensor_info *sensordata; + + int sensormode; + uint32_t fps_divider; /* init to 1 * 0x00000400 */ + uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ + + uint16_t curr_lens_pos; + uint16_t init_curr_lens_pos; + uint16_t my_reg_gain; + uint32_t my_reg_line_count; + + enum msm_s_resolution prev_res; + enum msm_s_resolution pict_res; + enum msm_s_resolution curr_res; + enum msm_s_test_mode set_test; +}; + +static struct s5k3e2fx_ctrl *s5k3e2fx_ctrl; +static struct platform_device *s5k3e2fx_pdev; + +struct s5k3e2fx_waitevent{ + uint32_t waked_up; + wait_queue_head_t event_wait; +}; +static struct s5k3e2fx_waitevent s5k3e2fx_event; + + +static DECLARE_WAIT_QUEUE_HEAD(s5k3e2fx_wait_queue); +DEFINE_SEMAPHORE(s5k3e2fx_sem); + +#define MAX_I2C_RETRIES 20 +static int i2c_transfer_retry(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int len) +{ + int i2c_retry = 0; + int ns; /* number sent */ + + while (i2c_retry++ < MAX_I2C_RETRIES) { + ns = i2c_transfer(adap, msgs, len); + if (ns == len) + break; + pr_err("%s: try %d/%d: i2c_transfer sent: %d, len %d\n", + __func__, + i2c_retry, MAX_I2C_RETRIES, ns, len); + msleep(10); + } + + return ns == len ? 0 : -EIO; +} + +static inline int s5k3e2fx_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, + int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + return i2c_transfer_retry(s5k3e2fx_client->adapter, msgs, 2); +} + +static inline int s5k3e2fx_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + return i2c_transfer_retry(s5k3e2fx_client->adapter, msg, 1); +} + +static int s5k3e2fx_i2c_write_b(unsigned short saddr, unsigned short waddr, + unsigned char bdata) +{ + int rc = -EFAULT; + unsigned char buf[4]; + + memset(buf, 0, sizeof(buf)); + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = bdata; + + rc = s5k3e2fx_i2c_txdata(saddr, buf, 3); + + if (rc < 0) + pr_err("i2c_write_w failed, addr = 0x%x, val = 0x%x!\n", + waddr, bdata); + + return rc; +} + +static int s5k3e2fx_i2c_write_table(struct s5k3e2fx_i2c_reg_conf + *reg_cfg_tbl, int num) +{ + int i; + int rc = -EFAULT; + CDBG("s5k3e2fx_i2c_write_table starts\n"); + for (i = 0; i < num; i++) { + CDBG("%d: waddr = 0x%x, bdata = 0x%x\n", i, + (int)reg_cfg_tbl->waddr, (int)reg_cfg_tbl->bdata); + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + reg_cfg_tbl->waddr, + reg_cfg_tbl->bdata); + if (rc < 0) + break; + reg_cfg_tbl++; + } + + CDBG("s5k3e2fx_i2c_write_table ends\n"); + return rc; +} + +static int s5k3e2fx_i2c_read_w(unsigned short saddr, unsigned short raddr, + unsigned short *rdata) +{ + int rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); + + rc = s5k3e2fx_i2c_rxdata(saddr, buf, 2); + if (rc < 0) + return rc; + + *rdata = buf[0] << 8 | buf[1]; + + if (rc < 0) + pr_err("s5k3e2fx_i2c_read failed!\n"); + + return rc; +} + +static int s5k3e2fx_i2c_read_b(unsigned short saddr, unsigned short raddr, + unsigned short *rdata) +{ + int rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); + + rc = s5k3e2fx_i2c_rxdata(saddr, buf, 1); + if (rc < 0) + return rc; + + *rdata = buf[0]; + + if (rc < 0) + pr_err("s5k3e2fx_i2c_read failed!\n"); + + return rc; +} + +static int s5k3e2fx_probe_init_sensor(const struct msm_camera_sensor_info *data) +{ + int rc; + uint16_t chipid = 0; + uint16_t modulever = 0; + + CDBG("s5k3e2fx: gpio_request: %d\n", data->sensor_reset); + rc = gpio_request(data->sensor_reset, "s5k3e2fx"); + if (!rc) + gpio_direction_output(data->sensor_reset, 1); + else { + pr_err("s5k3e2fx: request GPIO(sensor_reset): %d failed\n", + data->sensor_reset); + goto init_probe_fail; + } + CDBG("s5k3e2fx: gpio_free: %d\n", data->sensor_reset); + + gpio_free(data->sensor_reset); + + msleep(20); + + CDBG("s5k3e2fx_sensor_init(): reseting sensor.\n"); + + rc = s5k3e2fx_i2c_read_w(s5k3e2fx_client->addr, S5K3E2FX_REG_MODEL_ID, + &chipid); + if (rc < 0) { + pr_err("s5k3e2fx: read model_id failed: %d\n", rc); + goto init_probe_fail; + } + CDBG("s5k3e2fx_sensor_init(): model_id=0x%X\n", chipid); + + if (chipid != S5K3E2FX_MODEL_ID) { + pr_err("S5K3E2FX wrong model_id = 0x%x\n", chipid); + rc = -ENODEV; + goto init_probe_fail; + } + + rc = s5k3e2fx_i2c_read_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODULE_VER, &modulever); + if (rc < 0) { + pr_err("S5K3E2FX read module version failed, line=%d\n", + __LINE__); + goto init_probe_fail; + } + /* modulever = (0xF000 & modulever) >> 8; */ + modulever = 0x00F0 & modulever; + CDBG("s5k3e2fx_sensor_init(): module version=0x%X\n", modulever); + + if (modulever == 0x40) + g_usModuleVersion = 0; + else if (modulever == 0x50) + g_usModuleVersion = 1; + goto init_probe_done; + +init_probe_fail: + pr_err("s5k3e2fx: prob init sensor failed\n"); +init_probe_done: + return rc; +} + +static int s5k3e2fx_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&s5k3e2fx_wait_queue); + return 0; +} + +static const struct i2c_device_id s5k3e2fx_i2c_id[] = { + {"s5k3e2fx", 0}, + {} +}; + +static int s5k3e2fx_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + CDBG("s5k3e2fx_probe called!\n"); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("i2c_check_functionality failed\n"); + goto probe_failure; + } + + s5k3e2fx_sensorw = kzalloc(sizeof(struct s5k3e2fx_work), GFP_KERNEL); + if (!s5k3e2fx_sensorw) { + pr_err("kzalloc failed\n"); + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, s5k3e2fx_sensorw); + s5k3e2fx_init_client(client); + s5k3e2fx_client = client; + + msleep(50); + + CDBG("s5k3e2fx_probe successed! rc = %d\n", rc); + return 0; + +probe_failure: + pr_err("s5k3e2fx_probe failed! rc = %d\n", rc); + return rc; +} + +static int __exit s5k3e2fx_i2c_remove(struct i2c_client *client) +{ + struct s5k3e2fx_work_t *sensorw = i2c_get_clientdata(client); + free_irq(client->irq, sensorw); + deinit_suspend(); + s5k3e2fx_client = NULL; + kfree(sensorw); + return 0; +} + +static struct i2c_driver s5k3e2fx_i2c_driver = { + .id_table = s5k3e2fx_i2c_id, + .probe = s5k3e2fx_i2c_probe, + .remove = __exit_p(s5k3e2fx_i2c_remove), + .driver = { + .name = "s5k3e2fx", + }, +}; + +#if 0 +static int s5k3e2fx_test(enum msm_s_test_mode mo) +{ + int rc = 0; + + if (mo == S_TEST_OFF) + rc = 0; + else + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_TEST_PATTERN_MODE, (uint16_t) mo); + + return rc; +} +#endif + +static int s5k3e2fx_setting_INIT_EVT4(void) +{ + int rc = 0; + struct s5k3e2fx_i2c_reg_conf EVT4_INIT[] = { + /*pclk setting*/ + {0x0305, 0x06}, + {0x0306, 0x00}, + {0x0307, 0x83}, + {0x0301, 0x08}, + {0x0303, 0x01}, + {0x0309, 0x08}, + {0x030b, 0x01}, + /*output size*/ + {0x034c, 0x05}, + {0x034d, 0x10}, + {0x034e, 0x03}, + {0x034f, 0xcc}, + /*frame format (min blanking)*/ + {0x0340, 0x03}, + {0x0341, 0xe2}, + {0x0342, 0x0a}, + {0x0343, 0xac}, + /*Binning */ + {0x0381, 0x01}, + {0x0383, 0x01}, + {0x0385, 0x01}, + {0x0387, 0x03}, + {0x3014, 0x06}, + /*MSR setting*/ + {0x30c4, 0x01}, + {0x3000, 0x03}, + {0x3001, 0x94}, + {0x3002, 0x02}, + {0x3003, 0x95}, + {0x3004, 0x0f}, + {0x3005, 0x05}, + {0x3006, 0x3c}, + {0x3007, 0x8c}, + {0x3008, 0x93}, + {0x3009, 0x05}, + {0x300a, 0x3a}, + {0x300c, 0x02}, + {0x300d, 0x3e}, + {0x300f, 0x0e}, + {0x3010, 0x46}, + {0x3011, 0x64}, + {0x3012, 0x1e}, + {0x301d, 0x3f}, + {0x3024, 0x04}, + {0x3028, 0x40}, + {0x3070, 0xdf}, + {0x301b, 0x73}, + {0x307e, 0x02}, + {0x30bd, 0x06}, + {0x30c2, 0x0b}, + {0x30ac, 0x81}, + {0x3151, 0xe6}, + {0x3029, 0x02}, + {0x30bf, 0x00}, + {0x3022, 0x87}, + /*tune ADC to got batter yield rate in EDS*/ + {0x3019, 0x60}, + /*AF driving strength*/ + {0x3146, 0x3c}, + {0x3152, 0x08}, + /*data pclk driving strength*/ + {0x315a, 0x7f}, + /*h sync v sync driving strength*/ + {0x3159, 0x0f}, + {0x3157, 0x03}, + {0x0204, 0x00}, + {0x0205, 0x80}, + {0x0202, 0x03}, + {0x0203, 0xd9}, + {0x0200, 0x02}, + {0x3130, 0x03}, + {0x0100, 0x01}, + }; + + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0100, 0x00); + msleep(30); + /* write REG_INIT registers */ + rc = s5k3e2fx_i2c_write_table(&EVT4_INIT[0], + ARRAY_SIZE(EVT4_INIT)); + return rc; +} + +static int s5k3e2fx_setting_INIT_EVT5(enum msm_s_setting rt) +{ + int rc = 0; + struct s5k3e2fx_i2c_reg_conf EVT5_INIT[] = { + {S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_SW_STANDBY}, + /*Output Size */ + {REG_X_OUTPUT_SIZE_MSB, + s5k3e2fx_reg_pat[rt].x_output_size_msb}, + {REG_X_OUTPUT_SIZE_LSB, + s5k3e2fx_reg_pat[rt].x_output_size_lsb}, + {REG_Y_OUTPUT_SIZE_MSB, + s5k3e2fx_reg_pat[rt].y_output_size_msb}, + {REG_Y_OUTPUT_SIZE_LSB, + s5k3e2fx_reg_pat[rt].y_output_size_lsb}, + /* Start-End address */ +/* 100202 Modify X_ADDR and Y_ADDR Start-end is the same between preview and snapshot like DesireC + {REG_X_ADDR_START_MSB, + s5k3e2fx_reg_pat[rt].x_addr_start_MSB}, + {REG_X_ADDR_START_LSB, + s5k3e2fx_reg_pat[rt].x_addr_start_LSB}, + {REG_Y_ADDR_START_MSB, + s5k3e2fx_reg_pat[rt].y_addr_start_MSB}, + {REG_Y_ADDR_START_LSB, + s5k3e2fx_reg_pat[rt].y_addr_start_LSB}, + {REG_X_ADDR_END_MSB, + s5k3e2fx_reg_pat[rt].x_addr_end_MSB}, + {REG_X_ADDR_END_LSB, + s5k3e2fx_reg_pat[rt].x_addr_end_LSB}, + {REG_Y_ADDR_END_MSB, + s5k3e2fx_reg_pat[rt].y_addr_end_MSB}, + {REG_Y_ADDR_END_LSB, + s5k3e2fx_reg_pat[rt].y_addr_end_LSB}, +*/ + /* Binning */ + {REG_X_EVEN_INC, + s5k3e2fx_reg_pat[rt].x_even_inc}, + {REG_X_ODD_INC, + s5k3e2fx_reg_pat[rt].x_odd_inc}, + {REG_Y_EVEN_INC, + s5k3e2fx_reg_pat[rt].y_even_inc}, + {REG_Y_ODD_INC, + s5k3e2fx_reg_pat[rt].y_odd_inc}, + {REG_BINNING_ENABLE, + s5k3e2fx_reg_pat[rt].binning_enable}, + /* Frame format */ + {REG_FRAME_LENGTH_LINES_MSB, + s5k3e2fx_reg_pat[rt].frame_length_lines_msb}, + {REG_FRAME_LENGTH_LINES_LSB, + s5k3e2fx_reg_pat[rt].frame_length_lines_lsb}, +/* 100202 Remove the AEC setting in EVT5_INIT + {REG_LINE_LENGTH_PCK_MSB, + s5k3e2fx_reg_pat[rt].line_length_pck_msb}, + {REG_LINE_LENGTH_PCK_LSB, + s5k3e2fx_reg_pat[rt].line_length_pck_lsb}, +*/ + /* MSR setting */ +/* 100202 Remove the AEC and the same value setting in EVT5_INIT + {REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB, + s5k3e2fx_reg_pat[rt].analogue_gain_code_global_msb}, + {REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB, + s5k3e2fx_reg_pat[rt].analogue_gain_code_global_lsb}, + {REG_FINE_INTEGRATION_TIME, + s5k3e2fx_reg_pat[rt].fine_integration_time}, + {REG_COARSE_INTEGRATION_TIME, + s5k3e2fx_reg_pat[rt].coarse_integration_time_msb}, + {REG_COARSE_INTEGRATION_TIME_LSB, + s5k3e2fx_reg_pat[rt].coarse_integration_time_lsb}, +*/ + {S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_STREAM}, + }; + /*write table*/ + rc = s5k3e2fx_i2c_write_table(&EVT5_INIT[0], ARRAY_SIZE(EVT5_INIT)); + if (rc < 0) { + pr_err("REG_INIT failed, rc=%d\n", rc); + return rc; + } + return rc; +} + + +static int s5k3e2fx_setting_PREIODIC_EVT4(enum msm_s_setting rt) +{ + int rc = 0; + struct s5k3e2fx_i2c_reg_conf EVT4_1[] = { + /*output size*/ + {0x034c, 0x05}, + {0x034d, 0x10}, + {0x034e, 0x03}, + {0x034f, 0xcc}, + /*frame format (min blanking)*/ + {0x0340, 0x03}, + {0x0341, 0xe2}, + {0x0342, 0x0a}, + {0x0343, 0xac}, + /*Binning*/ + {0x0381, 0x01}, + {0x0383, 0x01}, + {0x0385, 0x01}, + {0x0387, 0x03}, + {0x3014, 0x06}, + {0x30bf, 0x00}, + {0x3022, 0x87}, + /*tune ADC to got batter yield rate in EDS*/ + {0x3019, 0x60}, + /*AF driving strength*/ + {0x3146, 0x3c}, + {0x3152, 0x08}, + /*data pclk driving strength*/ + {0x315a, 0x7f}, + {0x3159, 0x0f}, + /*h sync v sync driving strength*/ + {0x3157, 0x03}, + {0x0204, 0x00}, + {0x0205, 0x80}, + {0x0202, 0x03}, + {0x0203, 0xd9}, + {0x0200, 0x02}, + {0x3130, 0x03}, + /*lens shading setting for preview*/ + {0x309e, 0x52}, + {0x309f, 0x3e}, + {0x30a0, 0x03}, + {0x30a1, 0x1f}, + {0x30a2, 0x04}, + {0x30a3, 0x21}, + {0x30a4, 0x04}, + {0x30a5, 0x00}, + {0x30a6, 0x0c}, + {0x30a7, 0x7c}, + {0x30a8, 0x04}, + {0x30a9, 0x00}, + {0x30aa, 0x10}, + {0x30ab, 0x84}, + /*streaimg on*/ + {0x0100, 0x01}, + }; + struct s5k3e2fx_i2c_reg_conf EVT4_2[] = { + /*output size*/ + {0x034c, 0x0a}, + {0x034d, 0x30}, + {0x034e, 0x07}, + {0x034f, 0xa8}, + /*frame format (min blanking)*/ + {0x0340, 0x07}, + {0x0341, 0xb6}, + {0x0342, 0x0a}, + {0x0343, 0xac}, + /*Binning*/ + {0x0381, 0x01}, + {0x0383, 0x01}, + {0x0385, 0x01}, + {0x0387, 0x01}, + {0x3014, 0x00}, + {0x30bf, 0x00}, + {0x3022, 0x87}, + /*tune ADC to got batter yield rate in EDS*/ + {0x3019, 0x60}, + /*AF driving strength*/ + {0x3146, 0x3c}, + {0x3152, 0x08}, + /*data pclk driving strength*/ + {0x315a, 0x7f}, + /*h sync v sync driving strength*/ + {0x3159, 0x0f}, + {0x3157, 0x03}, + {0x0204, 0x00}, + {0x0205, 0x80}, + {0x0202, 0x03}, + {0x0203, 0xd9}, + {0x0200, 0x02}, + {0x3130, 0x03}, + /*lens shading setting for snapshot*/ + {0x309e, 0x52}, + {0x309f, 0x7b}, + {0x30a0, 0x03}, + {0x30a1, 0x1f}, + {0x30a2, 0x02}, + {0x30a3, 0x15}, + {0x30a4, 0x00}, + {0x30a5, 0x00}, + {0x30a6, 0x00}, + {0x30a7, 0x00}, + {0x30a8, 0x00}, + {0x30a9, 0x00}, + {0x30aa, 0x00}, + {0x30ab, 0x00}, + /*streaming on*/ + {0x0100, 0x01}, + }; + struct s5k3e2fx_i2c_reg_conf EVT4_PCLK[] = { + /*pclk setting*/ + {0x0305, 0x06}, + {0x0306, 0x00}, + {0x0307, 0x83}, + {0x0301, 0x08}, + {0x0303, 0x01}, + {0x0309, 0x08}, + {0x030b, 0x01}, + }; + /*streaming off*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0100, 0x00); + msleep(30); + /*pclk setting*/ + rc = s5k3e2fx_i2c_write_table(&EVT4_PCLK[0], ARRAY_SIZE(EVT4_PCLK)); + /*write table*/ + if (rt == 0) + rc = s5k3e2fx_i2c_write_table(&EVT4_1[0], ARRAY_SIZE(EVT4_1)); + else + rc = s5k3e2fx_i2c_write_table(&EVT4_2[0], ARRAY_SIZE(EVT4_2)); + return rc; +} + + +static int s5k3e2fc_setting_PREIODIC_EVT5(enum msm_s_setting rt) +{ + int rc = 0; + uint16_t num_lperf; + struct s5k3e2fx_i2c_reg_conf tbl_1[] = { + /* skip doing streaming off + {S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_SW_STANDBY}, + */ + {REG_X_OUTPUT_SIZE_MSB, + s5k3e2fx_reg_pat[rt].x_output_size_msb}, + {REG_X_OUTPUT_SIZE_LSB, + s5k3e2fx_reg_pat[rt].x_output_size_lsb}, + {REG_Y_OUTPUT_SIZE_MSB, + s5k3e2fx_reg_pat[rt].y_output_size_msb}, + {REG_Y_OUTPUT_SIZE_LSB, + s5k3e2fx_reg_pat[rt].y_output_size_lsb}, + /* Start-End address */ +/* 100202 Modify X_ADDR and Y_ADDR Start-end is the same between preview and snapshot like DesireC + {REG_X_ADDR_START_MSB, + s5k3e2fx_reg_pat[rt].x_addr_start_MSB}, + {REG_X_ADDR_START_LSB, + s5k3e2fx_reg_pat[rt].x_addr_start_LSB}, + {REG_Y_ADDR_START_MSB, + s5k3e2fx_reg_pat[rt].y_addr_start_MSB}, + {REG_Y_ADDR_START_LSB, + s5k3e2fx_reg_pat[rt].y_addr_start_LSB}, + {REG_X_ADDR_END_MSB, + s5k3e2fx_reg_pat[rt].x_addr_end_MSB}, + {REG_X_ADDR_END_LSB, + s5k3e2fx_reg_pat[rt].x_addr_end_LSB}, + {REG_Y_ADDR_END_MSB, + s5k3e2fx_reg_pat[rt].y_addr_end_MSB}, + {REG_Y_ADDR_END_LSB, + s5k3e2fx_reg_pat[rt].y_addr_end_LSB}, +*/ + /* Binning */ + {REG_X_EVEN_INC, + s5k3e2fx_reg_pat[rt].x_even_inc}, + {REG_X_ODD_INC, + s5k3e2fx_reg_pat[rt].x_odd_inc}, + {REG_Y_EVEN_INC, + s5k3e2fx_reg_pat[rt].y_even_inc}, + {REG_Y_ODD_INC, + s5k3e2fx_reg_pat[rt].y_odd_inc}, + {REG_BINNING_ENABLE, + s5k3e2fx_reg_pat[rt].binning_enable}, + }; + struct s5k3e2fx_i2c_reg_conf tbl_2[] = { + {REG_FRAME_LENGTH_LINES_MSB, 0}, + {REG_FRAME_LENGTH_LINES_LSB, 0}, +/* 100202 Remove the AEC setting in s5k3e2fc_setting_PREIODIC_EVT5 + {REG_LINE_LENGTH_PCK_MSB, + s5k3e2fx_reg_pat[rt].line_length_pck_msb}, + {REG_LINE_LENGTH_PCK_LSB, + s5k3e2fx_reg_pat[rt].line_length_pck_lsb}, + {REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB, + s5k3e2fx_reg_pat[rt].analogue_gain_code_global_msb}, + {REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB, + s5k3e2fx_reg_pat[rt].analogue_gain_code_global_lsb}, + {REG_FINE_INTEGRATION_TIME, + s5k3e2fx_reg_pat[rt].fine_integration_time}, + {REG_COARSE_INTEGRATION_TIME, + s5k3e2fx_reg_pat[rt].coarse_integration_time_msb}, + {REG_COARSE_INTEGRATION_TIME_LSB, + s5k3e2fx_reg_pat[rt].coarse_integration_time_lsb}, +*/ + /* LC Preview/Snapshot difference register*/ + {REG_SH4CH_BLK_WIDTH_R, + s5k3e2fx_reg_pat[rt].sh4ch_blk_width_r}, + {REG_SH4CH_BLK_HEIGHT_R, + s5k3e2fx_reg_pat[rt].sh4ch_blk_height_r}, + {REG_SH4CH_STEP_X_R_MSB, + s5k3e2fx_reg_pat[rt].sh4ch_step_x_r_MSB}, + {REG_SH4CH_STEP_X_R_LSB, + s5k3e2fx_reg_pat[rt].sh4ch_step_x_r_LSB}, + {REG_SH4CH_STEP_Y_R_MSB, + s5k3e2fx_reg_pat[rt].sh4ch_step_y_r_MSB}, + {REG_SH4CH_STEP_Y_R_LSB, + s5k3e2fx_reg_pat[rt].sh4ch_step_y_r_LSB}, + {REG_SH4CH_START_BLK_CNT_X_R, + s5k3e2fx_reg_pat[rt].sh4ch_start_blk_cnt_x_r}, + {REG_SH4CH_START_BLK_INT_X_R, + s5k3e2fx_reg_pat[rt].sh4ch_start_blk_int_x_r}, + {REG_SH4CH_START_FRAC_X_R_MSB, + s5k3e2fx_reg_pat[rt].sh4ch_start_frac_x_r_MSB}, + {REG_SH4CH_START_FRAC_X_R_LSB, + s5k3e2fx_reg_pat[rt].sh4ch_start_frac_x_r_LSB}, + {REG_SH4CH_START_BLK_CNT_Y_R, + s5k3e2fx_reg_pat[rt].sh4ch_start_blk_cnt_y_r}, + {REG_SH4CH_START_BLK_INT_Y_R, + s5k3e2fx_reg_pat[rt].sh4ch_start_blk_int_y_r}, + {REG_SH4CH_START_FRAC_Y_R_MSB, + s5k3e2fx_reg_pat[rt].sh4ch_start_frac_y_r_MSB}, + {REG_SH4CH_START_FRAC_Y_R_LSB, + s5k3e2fx_reg_pat[rt].sh4ch_start_frac_y_r_LSB}, + }; + + /* add EVT5 sensor Samsung difference + * MSR setting between Preview and Capture + */ + + struct s5k3e2fx_i2c_reg_conf + tbl_only_for_EVT5[2][2] = { + { /* S_RES_PREVIEW */ + {0x3062, 0x00}, + {0x3063, 0xD6}, + }, + { /* S_RES_CAPTURE */ + {0x3062, 0x01}, + {0x3063, 0x16}, + } + }; +/* solve greenish: hold for both */ + rc = s5k3e2fx_i2c_write_b( + s5k3e2fx_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) + return rc; + + CDBG("Binning_enable = 0x %2x" + "[s5k3e2fx.c s5k3e2fx_setting]\r\n", + s5k3e2fx_reg_pat[rt].binning_enable); + rc = s5k3e2fx_i2c_write_table(&tbl_1[0], ARRAY_SIZE(tbl_1)); + if (rc < 0) { + pr_err("UPDATE_PERIODIC, tb1_1 failed"); + return rc; + } + + /* only for evt5 */ + if (g_usModuleVersion == 1) { + rc = s5k3e2fx_i2c_write_table(&tbl_only_for_EVT5[rt][0], 2); + if (rc < 0) + return rc; + } + + num_lperf = (uint16_t) ((s5k3e2fx_reg_pat[rt].frame_length_lines_msb + << 8) & 0xFF00) + s5k3e2fx_reg_pat[rt].frame_length_lines_lsb; + + num_lperf = num_lperf * s5k3e2fx_ctrl->fps_divider / 0x0400; + + tbl_2[0] = (struct s5k3e2fx_i2c_reg_conf) {REG_FRAME_LENGTH_LINES_MSB, + (num_lperf & 0xFF00) >> 8}; + tbl_2[1] = (struct s5k3e2fx_i2c_reg_conf) {REG_FRAME_LENGTH_LINES_LSB, + (num_lperf & 0x00FF)}; + + rc = s5k3e2fx_i2c_write_table(&tbl_2[0], ARRAY_SIZE(tbl_2)); + if (rc < 0) { + pr_err("UPDATE_PERIODIC, tb1_2 failed"); + return rc; + } + +/* solve greenish: only release for preview */ + if (s5k3e2fx_ctrl->sensormode == SENSOR_PREVIEW_MODE) + { + rc = s5k3e2fx_i2c_write_b( + s5k3e2fx_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) + return rc; + } + + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_STREAM); + if (rc < 0) + return rc; + + return 0; +} + + +static int s5k3e2fx_setting(enum msm_s_reg_update rupdate, + enum msm_s_setting rt) +{ + int rc = 0; + pr_info("s5k3e2fx_setting rupdate:%d g_usModuleVersion:%d\n", + rupdate, g_usModuleVersion); + switch (rupdate) { + case S_UPDATE_PERIODIC:{ + if (g_usModuleVersion == 1) + rc = s5k3e2fc_setting_PREIODIC_EVT5(rt); + else + rc = s5k3e2fx_setting_PREIODIC_EVT4(rt); + } + break; /* UPDATE_PERIODIC */ + + case S_REG_INIT:{ + if (g_usModuleVersion == 1) { + /*EVT5*/ + if (rt == S_RES_PREVIEW || rt == S_RES_CAPTURE) { + unsigned short rData = 0; + mdelay(1); + s5k3e2fx_i2c_read_b(s5k3e2fx_client->addr, + REG_3150_RESERVED, &rData); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_3150_RESERVED, (rData & 0xFFFE)); + mdelay(1); + s5k3e2fx_i2c_read_b(s5k3e2fx_client->addr, + REG_TYPE1_AF_ENABLE, &rData); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_TYPE1_AF_ENABLE, (rData | 0x0001)); + mdelay(1); + /* reset fps_divider */ + s5k3e2fx_ctrl->fps_divider = 1 * 0x0400; + /* write REG_INIT registers */ + s5k3e2fx_setting_INIT_EVT5(rt); + } + } else{ + /*EVT4*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_3150_RESERVED, 0x50); + s5k3e2fx_setting_INIT_EVT4(); + } + } + break; /* REG_INIT */ + + default: + rc = -EFAULT; + break; + } /* switch (rupdate) */ + + return rc; +} + +#define MAX_LAYER_NUM 5 +#define FIRST_LAYER 9 +#define FUSE_ID_FIRST_ADDR 14 + +static int s5k3e2fx_i2c_read_fuseid(struct sensor_cfg_data *cdata) +{ + + uint32_t otp_vendorid_index = 0; + uint32_t otp_fuseid_index = 0; + unsigned short otp_vendorid1[MAX_LAYER_NUM]; + unsigned short otp_vendorid2[MAX_LAYER_NUM]; + unsigned short otp_vendorid3[MAX_LAYER_NUM]; + unsigned short otp_fuseid1[MAX_LAYER_NUM]; + unsigned short otp_fuseid2[MAX_LAYER_NUM]; + unsigned short otp_fuseid3[MAX_LAYER_NUM]; + for (otp_vendorid_index = 0; + otp_vendorid_index < MAX_LAYER_NUM; + otp_vendorid_index++) { + + s5k3e2fx_i2c_write_b( + s5k3e2fx_client->addr, + 0x311A, FIRST_LAYER+otp_vendorid_index); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311B, &otp_vendorid1[otp_vendorid_index]); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311C, &otp_vendorid2[otp_vendorid_index]); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311D, &otp_vendorid3[otp_vendorid_index]); + pr_info("s5k3e2fx: otp_vendorid1[%d]:0x%4x\n", + otp_vendorid_index, otp_vendorid1[otp_vendorid_index]); + pr_info("s5k3e2fx: otp_vendorid2[%d]:0x%4x\n", + otp_vendorid_index, otp_vendorid2[otp_vendorid_index]); + pr_info("s5k3e2fx: otp_vendorid3[%d]:0x%4x\n", + otp_vendorid_index, otp_vendorid3[otp_vendorid_index]); + if ((otp_vendorid1[otp_vendorid_index] == 0) && + (otp_vendorid2[otp_vendorid_index] == 0) && + (otp_vendorid3[otp_vendorid_index] == 0) && + (otp_vendorid_index != 0)) { + break; + } + } + otp_vendorid_index = otp_vendorid_index-1; + /*read fuse id from layer14~layer18. + *The last non-all-zero layer contains + *correct fuse id */ + + for (otp_fuseid_index = 0; + otp_fuseid_index < MAX_LAYER_NUM; + otp_fuseid_index++) { + /*give OTP the address you want to read*/ + s5k3e2fx_i2c_write_b( + s5k3e2fx_client->addr, + 0x311A, FUSE_ID_FIRST_ADDR+otp_fuseid_index); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311B, &otp_fuseid1[otp_fuseid_index]); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311C, &otp_fuseid2[otp_fuseid_index]); + s5k3e2fx_i2c_read_b( + s5k3e2fx_client->addr, + 0x311D, &otp_fuseid3[otp_fuseid_index]); + pr_info("s5k3e2fx: otp_fuseid1[%d]:0x%4x\n", + otp_fuseid_index, otp_fuseid1[otp_fuseid_index]); + pr_info("s5k3e2fx: otp_fuseid2[%d]:0x%4x\n", + otp_fuseid_index, otp_fuseid2[otp_fuseid_index]); + pr_info("s5k3e2fx: otp_fuseid3[%d]:0x%4x\n", + otp_fuseid_index, otp_fuseid3[otp_fuseid_index]); + if ((otp_fuseid1[otp_fuseid_index] == 0) && + (otp_fuseid2[otp_fuseid_index] == 0) && + (otp_fuseid3[otp_fuseid_index] == 0) && + (otp_fuseid_index != 0)) { + break; + } + } + otp_fuseid_index = otp_fuseid_index-1; + cdata->cfg.fuse.fuse_id_word1 = otp_vendorid_index; + cdata->cfg.fuse.fuse_id_word2 = otp_fuseid_index; + cdata->cfg.fuse.fuse_id_word3 = + (((uint32_t)otp_vendorid1[otp_vendorid_index])<<16) | + (((uint32_t)otp_vendorid2[otp_vendorid_index])<<8) | + ((uint32_t)otp_vendorid3[otp_vendorid_index]); + cdata->cfg.fuse.fuse_id_word4 = + (((uint32_t)otp_fuseid1[otp_fuseid_index])<<16) | + (((uint32_t)otp_fuseid2[otp_fuseid_index])<<8) | + ((uint32_t)otp_fuseid3[otp_fuseid_index]); + pr_info("s5k3e2fx: fuse->fuse_id_word1:%d\n", + cdata->cfg.fuse.fuse_id_word1); + pr_info("s5k3e2fx: fuse->fuse_id_word2:%d\n", + cdata->cfg.fuse.fuse_id_word2); + pr_info("s5k3e2fx: fuse->fuse_id_word3:0x%08x\n", + cdata->cfg.fuse.fuse_id_word3); + pr_info("s5k3e2fx: fuse->fuse_id_word4:0x%08x\n", + cdata->cfg.fuse.fuse_id_word4); + return 0; +} + +static int s5k3e2fx_sensor_open_init(struct msm_camera_sensor_info *data) +{ + int rc = 0; + int timeout; + + down(&s5k3e2fx_sem); + + /*check whether resume done*/ + timeout = wait_event_interruptible_timeout( + s5k3e2fx_event.event_wait, + s5k3e2fx_event.waked_up, + 30*HZ); + + pr_info("wait event : %d timeout:%d\n", + s5k3e2fx_event.waked_up, timeout); + if (timeout == 0) { + up(&s5k3e2fx_sem); + return rc; + } + + msm_camio_probe_on(s5k3e2fx_pdev); + CDBG("%s %s:%d\n", __FILE__, __func__, __LINE__); + s5k3e2fx_ctrl = kzalloc(sizeof(struct s5k3e2fx_ctrl), GFP_KERNEL); + if (!s5k3e2fx_ctrl) { + pr_err("s5k3e2fx_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + s5k3e2fx_ctrl->fps_divider = 1 * 0x00000400; + s5k3e2fx_ctrl->pict_fps_divider = 1 * 0x00000400; + s5k3e2fx_ctrl->set_test = S_TEST_OFF; + s5k3e2fx_ctrl->prev_res = S_QTR_SIZE; + s5k3e2fx_ctrl->pict_res = S_FULL_SIZE; + + if (data) + s5k3e2fx_ctrl->sensordata = data; + + /* enable mclk first */ + msm_camio_clk_rate_set(S5K3E2FX_DEF_MCLK); + + msleep(20); + + msm_camio_camif_pad_reg_reset(); + msleep(20); + + rc = s5k3e2fx_probe_init_sensor(data); + if (rc < 0) + goto init_fail1; + + pr_info("s5k3e2fx_ctrl->prev_res:%d\n",s5k3e2fx_ctrl->prev_res); + if (s5k3e2fx_ctrl->prev_res == S_QTR_SIZE) + rc = s5k3e2fx_setting(S_REG_INIT, S_RES_PREVIEW); + else + rc = s5k3e2fx_setting(S_REG_INIT, S_RES_CAPTURE); + + if (rc < 0) { + pr_err("s5k3e2fx_setting failed. rc = %d\n", rc); + goto init_fail1; + } + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + 0x3130, 0x03); + if (rc < 0) + goto init_fail1; + + goto init_done; + +init_fail1: + if (s5k3e2fx_ctrl) { + kfree(s5k3e2fx_ctrl); + s5k3e2fx_ctrl = NULL; + } +init_done: + up(&s5k3e2fx_sem); + return rc; +} + +static void s5k3e2fx_suspend_sensor(void) +{ + unsigned short rData = 0; + + /*AF*/ + s5k3e2fx_i2c_read_b(s5k3e2fx_client->addr, + REG_TYPE1_AF_ENABLE, &rData); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_TYPE1_AF_ENABLE, (rData & 0xFFFE)); + mdelay(1); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_SW_STANDBY); + msleep(210); /*for 5FPS */ + /* hi z */ + s5k3e2fx_i2c_read_b(s5k3e2fx_client->addr, REG_3150_RESERVED, &rData); + if (g_usModuleVersion == 1) + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_3150_RESERVED, (rData | 0x0001)); + else + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_3150_RESERVED, 0x51); + mdelay(1); +} + +static int s5k3e2fx_power_down(void) +{ + int rc = -EBADF; + s5k3e2fx_suspend_sensor(); + return rc; +} + +static int s5k3e2fx_sensor_release(void) +{ + int rc = -EBADF; + + s5k3e2fx_suspend_sensor(); + + msm_camio_probe_off(s5k3e2fx_pdev); + if (s5k3e2fx_ctrl) { + kfree(s5k3e2fx_ctrl); + s5k3e2fx_ctrl = NULL; + } + + allow_suspend(); + + CDBG("s5k3e2fx_release completed\n"); + + return rc; +} + +static int s5k3e2fx_probe_init_lens_correction( + const struct msm_camera_sensor_info *data) +{ + int rc = 0; + /* LC setting */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_SOFTWARE_RESET, + S5K3E2FX_SOFTWARE_RESET); + mdelay(2); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_SW_STANDBY); + + /*20090811 separates the EVT4/EVT5 sensor init and LC setting start */ + if (g_usModuleVersion == 0) + s5k3e2fx_i2c_write_table( + &Init_setting_evt4[0], + ARRAY_SIZE(Init_setting_evt4)); + else + s5k3e2fx_i2c_write_table( + &Init_setting_evt5[0], + ARRAY_SIZE(Init_setting_evt5)); + + if (g_usModuleVersion == 1) { + /*Only for EVT5*/ + /* 090911 Add for Samsung VCM calibration current Start */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3112, 0x0A); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3112, 0x09); + mdelay(5); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3145, 0x04); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3146, 0x80); + /* 090911 Add for Samsung VCM calibration current End */ + } else{ + /*for AWB auto calibration*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3110, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3112, 0x0A); + msleep(5); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3110, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3112, 0x09); + } + + if (g_usModuleVersion == 0) + s5k3e2fx_i2c_write_table( + &lc_setting_evt4[0], + ARRAY_SIZE(lc_setting_evt4)); + else + s5k3e2fx_i2c_write_table( + &lc_setting_evt5[0], + ARRAY_SIZE(lc_setting_evt5)); + + /* Solve EVT5 greenish in lowlight, prevent corrupted frame*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0105,0x1); + + /*20090811 separates the EVT4/EVT5 sensor init and LC setting end */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_STREAM); + msleep(10); + s5k3e2fx_suspend_sensor(); + + return rc; +} + +static void s5k3e2fx_get_pict_fps(uint16_t fps, uint16_t *pfps) +{ + /* input fps is preview fps in Q8 format */ + uint32_t divider; /* Q10 */ + + divider = (uint32_t) + ((s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l) * + (s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p)) * 0x00000400 / + ((s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l) * + (s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p)); + + /* Verify PCLK settings and frame sizes. */ + *pfps = (uint16_t) (fps * divider / 0x00000400); +} + +static uint16_t s5k3e2fx_get_prev_lines_pf(void) +{ + return s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l; +} + +static uint16_t s5k3e2fx_get_prev_pixels_pl(void) +{ + return s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p; +} + +static uint16_t s5k3e2fx_get_pict_lines_pf(void) +{ + return s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l; +} + +static uint16_t s5k3e2fx_get_pict_pixels_pl(void) +{ + return s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p; +} + +static uint32_t s5k3e2fx_get_pict_max_exp_lc(void) +{ + uint32_t snapshot_lines_per_frame; + + if (s5k3e2fx_ctrl->pict_res == S_QTR_SIZE) + snapshot_lines_per_frame = + s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l; + else { + /* snapshot max linecount + (should be larger than transmitted preview max linecount of + preview ISO and chromatix gain-line table) */ + snapshot_lines_per_frame = 3000; //3961 * 3; + } + + return snapshot_lines_per_frame; +} + +static int s5k3e2fx_set_fps(struct fps_cfg *fps) +{ + /* input is new fps in Q10 format */ + int rc = 0; + + s5k3e2fx_ctrl->fps_divider = fps->fps_div; + + CDBG("s5k3e2fx_ctrl->fps_divider = %d\n", + s5k3e2fx_ctrl->fps_divider); +#if 0 + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_FRAME_LENGTH_LINES_MSB, + (((s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l) * + s5k3e2fx_ctrl->fps_divider / + 0x400) & 0xFF00) >> 8); + if (rc < 0) + goto set_fps_done; + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_FRAME_LENGTH_LINES_LSB, + (((s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l) * + s5k3e2fx_ctrl->fps_divider / + 0x400) & 0xFF00)); +set_fps_done: + return rc; +#endif +} + +static int s5k3e2fx_write_exp_gain(uint16_t gain, uint32_t line) +{ + int rc = 0; + + uint16_t max_legal_gain = 0x0200; + //uint32_t ll_ratio; /* Q10 */ + uint32_t ll_pck, fl_lines; + uint16_t offset = 4; + uint32_t gain_msb, gain_lsb; + uint32_t intg_t_msb, intg_t_lsb; + uint32_t ll_pck_msb, ll_pck_lsb; + + struct s5k3e2fx_i2c_reg_conf tbl[2]; + + CDBG("Line:%d s5k3e2fx_write_exp_gain \n", __LINE__); +//printk("Steven Enter write_exp_gain User Space Gain and Line:gain = %4d, line = %6d \n", gain, line); +// if ((gain == 0) || (line == 0)) /* 100223 Mask this for Bright Flash In Beginning */ +// return rc; + + if (s5k3e2fx_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + + s5k3e2fx_ctrl->my_reg_gain = gain; + s5k3e2fx_ctrl->my_reg_line_count = (uint16_t) line; + + fl_lines = s5k3e2fx_reg_pat[S_RES_PREVIEW].size_h + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_l; /* 972 + 18 = 990 */ + + ll_pck = s5k3e2fx_reg_pat[S_RES_PREVIEW].size_w + + s5k3e2fx_reg_pat[S_RES_PREVIEW].blk_p; /* 1296 + 1436 = 2732 */ + + } else { + + fl_lines = s5k3e2fx_reg_pat[S_RES_CAPTURE].size_h + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_l; /* 1960 + 14 = 1974 */ + + ll_pck = s5k3e2fx_reg_pat[S_RES_CAPTURE].size_w + + s5k3e2fx_reg_pat[S_RES_CAPTURE].blk_p; /* 2608 + 124 = 2732 */ + } + + if (gain > max_legal_gain) + gain = max_legal_gain; + +#if 0 + /* in Q10 */ + line = (line * s5k3e2fx_ctrl->fps_divider); + + if (fl_lines < (line / 0x400)) + ll_ratio = (line / (fl_lines - offset)); + else + ll_ratio = 0x400; + + ll_pck = ll_pck * ll_ratio / 0x400; + line = line / ll_ratio; +#else + // solving accuracy lossing by calculating separately + if (fl_lines < (line * s5k3e2fx_ctrl->fps_divider / 0x400)){ + /* ll_ratio = + (line * s5k3e2fx_ctrl->fps_divider / (fl_lines - offset)); */ + + ll_pck = ll_pck * + (line * s5k3e2fx_ctrl->fps_divider / (fl_lines - offset)) / + 0x400; + + /* line = line * s5k3e2fx_ctrl->fps_divider / + (line * s5k3e2fx_ctrl->fps_divider / (fl_lines - offset)); */ + line = fl_lines - offset; + } + else{ + /* ll_ratio = 0x400; */ + /* ll_pck = ll_pck * 0x400 / 0x400; */ + line = line * s5k3e2fx_ctrl->fps_divider / 0x400; + } +#endif + +/* solve greenish: only release for preview */ + if (s5k3e2fx_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_HOLD); + if (rc < 0) { + pr_err("s5k3e2fx_i2c_write_b failed on line %d\n", + __LINE__); + return rc; + } + } + + /* update gain registers */ + gain_msb = (gain & 0xFF00) >> 8; + gain_lsb = gain & 0x00FF; + tbl[0].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB; + tbl[0].bdata = gain_msb; + tbl[1].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB; + tbl[1].bdata = gain_lsb; + rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + if (rc < 0) + goto write_gain_done; +#if 1 /* Solve EVT5 greenish in lowlight*/ + ll_pck_msb = (ll_pck & 0xFF00) >> 8; + ll_pck_lsb = ll_pck & 0x00FF; + tbl[0].waddr = REG_LINE_LENGTH_PCK_MSB; + tbl[0].bdata = ll_pck_msb; + tbl[1].waddr = REG_LINE_LENGTH_PCK_LSB; + tbl[1].bdata = ll_pck_lsb; + + rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + if (rc < 0) + goto write_gain_done; + +#else + if (line / 0x400 + offset > fl_lines) + ll_pck = line / 0x400 + offset; + else + ll_pck = fl_lines; + + ll_pck_msb = ((ll_pck) & 0xFF00) >> 8; + ll_pck_lsb = (ll_pck) & 0x00FF; + tbl[0].waddr = REG_FRAME_LENGTH_LINES_MSB; + tbl[0].bdata = ll_pck_msb; + tbl[1].waddr = REG_FRAME_LENGTH_LINES_LSB; + tbl[1].bdata = ll_pck_lsb; + + rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + if (rc < 0) + goto write_gain_done; +#endif + CDBG("line %d, fl_lines %d\n", line, fl_lines); + intg_t_msb = (line & 0xFF00) >> 8; + intg_t_lsb = (line & 0x00FF); + tbl[0].waddr = REG_COARSE_INTEGRATION_TIME; + tbl[0].bdata = intg_t_msb; + tbl[1].waddr = REG_COARSE_INTEGRATION_TIME_LSB; + tbl[1].bdata = intg_t_lsb; + rc = s5k3e2fx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + +/* solve greenish: release for both */ + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + REG_GROUPED_PARAMETER_HOLD, + GROUPED_PARAMETER_UPDATE); + if (rc < 0) { + pr_err("s5k3e2fx_i2c_write_b failed on line %d\n", __LINE__); + return rc; + } +write_gain_done: + return rc; +} + +static int s5k3e2fx_set_pict_exp_gain(uint16_t gain, uint32_t line) +{ + pr_info("s5k3e2fx_set_pict_exp_gain gain %d line %d\n", + gain, line); + + return s5k3e2fx_write_exp_gain(gain, line); +} + +static int s5k3e2fx_video_config(int mode, int res) +{ + int rc; + struct msm_camera_sensor_info *sinfo = s5k3e2fx_pdev->dev.platform_data; + CDBG("s5k3e2fx_video_config res:%d\n", res); +/* 100202 Move these setting from down */ + s5k3e2fx_ctrl->prev_res = res; + s5k3e2fx_ctrl->curr_res = res; + s5k3e2fx_ctrl->sensormode = mode; + + switch (res) { + case S_QTR_SIZE: + rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_PREVIEW); + if (rc < 0) + return rc; + break; + + case S_FULL_SIZE: + pr_info("KPI PA: start sensor snapshot config: %d\n", __LINE__); + sinfo->kpi_sensor_start = ktime_to_ns(ktime_get()); + rc = s5k3e2fx_setting(S_UPDATE_PERIODIC, S_RES_CAPTURE); + if (rc < 0) + return rc; + break; + + default: + return 0; + } +/* 100202 Move these setting up + s5k3e2fx_ctrl->prev_res = res; + s5k3e2fx_ctrl->curr_res = res; + s5k3e2fx_ctrl->sensormode = mode; +*/ + + rc = s5k3e2fx_write_exp_gain(s5k3e2fx_ctrl->my_reg_gain, + s5k3e2fx_ctrl->my_reg_line_count); + + return rc; +} + +static int s5k3e2fx_set_default_focus(void) +{ + int rc = 0; + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3131, 0); + if (rc < 0) + return rc; + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3132, 0); + if (rc < 0) + return rc; + + s5k3e2fx_ctrl->curr_lens_pos = 0; + + return rc; +} + +static int s5k3e2fx_move_focus( + int direction, int num_steps,int coarse_delay,int fine_delay, + int step_dir, int init_code_offset_max) +{ + int rc = 0; + int i; + int16_t step_direction; + int16_t actual_step; + int16_t next_pos, pos_offset; + int16_t init_code = 0; + uint8_t next_pos_msb, next_pos_lsb; + int16_t s_move[5]; + uint32_t gain; /* Q10 format */ + int16_t step_direction_pre_define; + int16_t init_code_offset_pre_define; + int16_t coarse_search_delay; + int16_t fine_search_delay; + + if (g_usModuleVersion == 1) { /* EVT5 */ + step_direction_pre_define = step_dir; + init_code_offset_pre_define = init_code_offset_max; + /*fine search delay time is turnable*/ + coarse_search_delay = coarse_delay; + fine_search_delay = fine_delay; + } else { + step_direction_pre_define = 20; + init_code_offset_pre_define = 738; + coarse_search_delay = 6; + fine_search_delay = 4; + } + +pr_info("%s step_direction_pre_define %d\n", __func__, step_direction_pre_define); + + if (direction == MOVE_NEAR) + step_direction = step_direction_pre_define; + else if (direction == MOVE_FAR) + step_direction = 0 - step_direction_pre_define; + else { + pr_err("s5k3e2fx_move_focus failed at line %d ...\n", __LINE__); + return -EINVAL; + } + + actual_step = step_direction * (int16_t) num_steps; + pos_offset = init_code + s5k3e2fx_ctrl->curr_lens_pos; + gain = actual_step * 0x400 / 5; + + for (i = 0; i <= 4; i++) { + if (actual_step >= 0) + s_move[i] = + ((((i + 1) * gain + 0x200) - + (i * gain + 0x200)) / 0x400); + else + s_move[i] = + ((((i + 1) * gain - 0x200) - + (i * gain - 0x200)) / 0x400); + } + + /* Ring Damping Code */ + for (i = 0; i <= 4; i++) { + next_pos = (int16_t) (pos_offset + s_move[i]); + + if (next_pos > (init_code_offset_max + init_code)) + next_pos = init_code_offset_pre_define + init_code; + else if (next_pos < 0) + next_pos = 0; + + CDBG("next_position in damping mode = %d\n", next_pos); + /* Writing the Values to the actuator */ + if (next_pos == init_code) + next_pos = 0x00; + + next_pos_msb = next_pos >> 8; + next_pos_lsb = next_pos & 0x00FF; + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3131, + next_pos_msb); + if (rc < 0) + break; + + rc = s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3132, + next_pos_lsb); + if (rc < 0) + break; + + pos_offset = next_pos; + s5k3e2fx_ctrl->curr_lens_pos = pos_offset - init_code; + + if(num_steps > 1) + mdelay(coarse_search_delay); + else + mdelay(fine_search_delay); + } + + return rc; +} + + +static int s5k3e2fx_suspend(struct platform_device *pdev, pm_message_t state) +{ + int rc; + struct msm_camera_sensor_info *sinfo = pdev->dev.platform_data; + + if (!sinfo->need_suspend) + return 0; + s5k3e2fx_event.waked_up = 0; + + + pr_info("s5k3e2fx: camera suspend\n"); + rc = gpio_request(sinfo->sensor_reset, "s5k3e2fx"); + if (!rc) + gpio_direction_output(sinfo->sensor_reset, 0); + else { + pr_err("s5k3e2fx: request GPIO(sensor_reset) :%d faile\n", + sinfo->sensor_reset); + goto suspend_fail; + } + CDBG("s5k3e2fx: gpio_free:%d line:%d\n", sinfo->sensor_reset, + __LINE__); + gpio_free(sinfo->sensor_reset); + +suspend_fail: + return rc; +} +static void s5k3e2fx_sensor_resume_setting(void) +{ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_SOFTWARE_RESET, + S5K3E2FX_SOFTWARE_RESET); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0100, 0x00); + /*--------------PLL setting for 80Mhz*/ + /* PLL setting */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0305, 0x06); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0306, 0x00); + /*88 54.4Mhz */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0307, 0x83); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0301, 0x08); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0303, 0x01); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0309, 0x08); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x030b, 0x01); + /*--------------output size*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x034c, 0x05); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x034d, 0x10); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x034e, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x034f, 0xcc); + /*--------------frame format (min blanking)*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0340, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0341, 0xe2); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0342, 0x0a); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0343, 0xac); + /*--------------Binning */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0381, 0x01); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0383, 0x01); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0385, 0x01); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0387, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3014, 0x06); + /*--------------MSR setting*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x30c4, 0x01); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3000, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3001, 0x94); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3002, 0x02); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3003, 0x95); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3004, 0x0f); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3005, 0x05); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3006, 0x3c); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3007, 0x8c); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3008, 0x93); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3009, 0x05); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x300a, 0x3a); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x300c, 0x02); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x300d, 0x3e); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x300f, 0x0e); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3010, 0x46); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3011, 0x64); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3012, 0x1e); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x301d, 0x3f); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3024, 0x04); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3028, 0x40); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3070, 0xdf); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x301b, 0x73); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x307e, 0x02); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x30bd, 0x06); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x30c2, 0x0b); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x30ac, 0x81); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3151, 0xe6); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3029, 0x02); + /*--------------EVT4 setting*/ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x30bf, 0x00); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3022, 0x87); + /*tune ADC to got batter yield rate in EDS */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3019, 0x60); + /*AF driving strength */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3146, 0x3c); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3152, 0x08); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x315a, 0xaa); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3159, 0x0a); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0205, 0x80); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0202, 0x03); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x0200, 0x02); +} + +static void s5k3e2fx_resume(struct early_suspend *handler) +{ + struct msm_camera_sensor_info *sinfo = s5k3e2fx_pdev->dev.platform_data; + + if (!sinfo->need_suspend) + return; + + /*check whether already suspend*/ + if (s5k3e2fx_event.waked_up == 1) { + pr_info("S5k3e2fx: No nesesary to do Resume\n"); + return; + } + + pr_info("s5k3e2fx_resume\n"); + /*init msm,clk ,GPIO,enable */ + msm_camio_probe_on(s5k3e2fx_pdev); + msm_camio_clk_enable(CAMIO_MDC_CLK); + + pr_info("msm_camio_probe_on\n"); + /*read sensor ID and pull down reset */ + msm_camio_clk_rate_set(S5K3E2FX_DEF_MCLK); + pr_info("msm_camio_clk_rate_set\n"); + msleep(20); + s5k3e2fx_probe_init_sensor(sinfo); + CDBG("s5k3e2fx_probe_init_sensor\n"); + /*init sensor,streaming on, SW init streaming off */ + s5k3e2fx_sensor_resume_setting(); + /*lens sharding */ + s5k3e2fx_probe_init_lens_correction(sinfo); + /*stream on */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_STREAM); + + /*software standby */ + msleep(25); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3130, 0x00); + mdelay(1); + /*stream off */ + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, + S5K3E2FX_REG_MODE_SELECT, + S5K3E2FX_MODE_SELECT_SW_STANDBY); + mdelay(1); + s5k3e2fx_i2c_write_b(s5k3e2fx_client->addr, 0x3150, 0x51); + msleep(240); + /*set RST to low */ + msm_camio_probe_off(s5k3e2fx_pdev); + msm_camio_clk_disable(CAMIO_MDC_CLK); + s5k3e2fx_event.waked_up = 1; + wake_up(&s5k3e2fx_event.event_wait); + pr_info("s5k3e2fx:resume done\n"); + return; +} + + +static struct early_suspend early_suspend_s5k3e2fx = { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN+1, + .resume = s5k3e2fx_resume, + .suspend = NULL, +}; + +static const char *s5k3e2fxVendor = "Samsung"; +static const char *s5k3e2fxNAME = "s5k3e2fx"; +static const char *s5k3e2fxSize = "5M"; + +static ssize_t sensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", s5k3e2fxVendor, s5k3e2fxNAME, s5k3e2fxSize); + ret = strlen(buf) + 1; + + return ret; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); + +static struct kobject *android_s5k3e2fx; + +static int s5k3e2fx_sysfs_init(void) +{ + int ret ; + pr_info("s5k3e2fx:kobject creat and add\n"); + android_s5k3e2fx = kobject_create_and_add("android_camera", NULL); + if (android_s5k3e2fx == NULL) { + pr_info("s5k3e2fx_sysfs_init: subsystem_register " \ + "failed\n"); + ret = -ENOMEM; + return ret ; + } + pr_info("s5k3e2fx:sysfs_create_file\n"); + ret = sysfs_create_file(android_s5k3e2fx, &dev_attr_sensor.attr); + if (ret) { + pr_info("s5k3e2fx_sysfs_init: sysfs_create_file " \ + "failed\n"); + kobject_del(android_s5k3e2fx); + } + return 0 ; +} + + +static int s5k3e2fx_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cdata; + long rc = 0; + + if (copy_from_user(&cdata, + (void *)argp, sizeof(struct sensor_cfg_data))) + return -EFAULT; + + CDBG("%s: cfgtype = %d\n", __func__, cdata.cfgtype); + switch (cdata.cfgtype) { + case CFG_GET_PICT_FPS: + s5k3e2fx_get_pict_fps(cdata.cfg.gfps.prevfps, + &(cdata.cfg.gfps.pictfps)); + + if (copy_to_user((void *)argp, &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_L_PF: + cdata.cfg.prevl_pf = s5k3e2fx_get_prev_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_P_PL: + cdata.cfg.prevp_pl = s5k3e2fx_get_prev_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_L_PF: + cdata.cfg.pictl_pf = s5k3e2fx_get_pict_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_P_PL: + cdata.cfg.pictp_pl = s5k3e2fx_get_pict_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_MAX_EXP_LC: + cdata.cfg.pict_max_exp_lc = s5k3e2fx_get_pict_max_exp_lc(); + + if (copy_to_user((void *)argp, + &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_FPS: + case CFG_SET_PICT_FPS: + rc = s5k3e2fx_set_fps(&(cdata.cfg.fps)); + break; + + case CFG_SET_EXP_GAIN: + rc = s5k3e2fx_write_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_PICT_EXP_GAIN: + rc = s5k3e2fx_set_pict_exp_gain(cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_MODE: + pr_info("CFG_SET_MODE\n"); + rc = s5k3e2fx_video_config(cdata.mode, cdata.rs); + break; + + case CFG_PWR_DOWN: + rc = s5k3e2fx_power_down(); + break; + + case CFG_MOVE_FOCUS: + rc = s5k3e2fx_move_focus(cdata.cfg.focus.dir, + cdata.cfg.focus.steps, + cdata.cfg.focus.coarse_delay, + cdata.cfg.focus.fine_delay, + cdata.cfg.focus.step_dir, + cdata.cfg.focus.init_code_offset_max); + break; + + case CFG_SET_DEFAULT_FOCUS: + rc = s5k3e2fx_set_default_focus(); + break; + + /*case CFG_GET_AF_MAX_STEPS: */ + case CFG_SET_EFFECT: + rc = s5k3e2fx_set_default_focus(); + break; + + case CFG_I2C_IOCTL_R_OTP:{ + rc = s5k3e2fx_i2c_read_fuseid(&cdata); + if (copy_to_user(argp, &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + } + break; + case CFG_SET_LENS_SHADING: + default: + rc = -EFAULT; + break; + } + + prevent_suspend(); + return rc; +} + +static int s5k3e2fx_sensor_probe(struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = 0; + pr_info("%s\n", __func__); + + rc = i2c_add_driver(&s5k3e2fx_i2c_driver); + if (rc < 0 || s5k3e2fx_client == NULL) { + rc = -ENOTSUPP; + goto probe_fail; + } + + msm_camio_clk_rate_set(S5K3E2FX_DEF_MCLK); + msleep(20); + + rc = s5k3e2fx_probe_init_sensor(info); + if (rc < 0) + goto probe_fail; + + /* lens correction */ + s5k3e2fx_probe_init_lens_correction(info); + init_suspend(); + + s->s_init = s5k3e2fx_sensor_open_init; + s->s_release = s5k3e2fx_sensor_release; + s->s_config = s5k3e2fx_sensor_config; + + /*register late resuem*/ + register_early_suspend(&early_suspend_s5k3e2fx); + /*init wait event*/ + init_waitqueue_head(&s5k3e2fx_event.event_wait); + /*init waked_up value*/ + s5k3e2fx_event.waked_up = 1; + /*write sysfs*/ + s5k3e2fx_sysfs_init(); + + return rc; + +probe_fail: + pr_err("SENSOR PROBE FAILS!\n"); + return rc; +} + +static int __s5k3e2fx_probe(struct platform_device *pdev) +{ + s5k3e2fx_pdev = pdev; + return msm_camera_drv_start(pdev, s5k3e2fx_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __s5k3e2fx_probe, + .driver = { + .name = "msm_camera_s5k3e2fx", + .owner = THIS_MODULE, + }, + .suspend = s5k3e2fx_suspend, +}; + +static int __init s5k3e2fx_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(s5k3e2fx_init); diff --git a/drivers/media/video/msm/s5k3h1gx.c b/drivers/media/video/msm/s5k3h1gx.c new file mode 100644 index 0000000000000..c7e417c8573e8 --- /dev/null +++ b/drivers/media/video/msm/s5k3h1gx.c @@ -0,0 +1,1756 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "s5k3h1gx.h" + +/* CAMIF output resolutions */ +/* 816x612, 24MHz MCLK 96MHz PCLK */ +#define SENSOR_FULL_SIZE_WIDTH 3280 +#define SENSOR_FULL_SIZE_HEIGHT 2464 + +#define SENSOR_QTR_SIZE_WIDTH 1640 +#define SENSOR_QTR_SIZE_HEIGHT 1232 + +#define SENSOR_HRZ_FULL_BLK_PIXELS 190 +#define SENSOR_VER_FULL_BLK_LINES 16 /* 0 */ +#define SENSOR_HRZ_QTR_BLK_PIXELS 1830 +#define SENSOR_VER_QTR_BLK_LINES 16 /* 8 */ +#define SENSOR_VER_QTR_BLK_LINES_PARALLEL 611 /* 16 */ /* 8 */ + +static int cam_mode_sel = 0; /* 0: photo, 1: video@30fps, 2: video@24fps */ +/* 611: 30fps, 1073: 24fps */ +const int s5k3h1gx_ver_qtr_blk_lines_array[] = {611,611,1073}; + +#define S5K3H1GX_AF_I2C_ADDR 0x18 +#define S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR 42 /* 36 */ +#define S5K3H1GX_SW_DAMPING_STEP 10 +#define S5K3H1GX_MAX_FPS 30 +#define S5K3H1GX_MAX_FPS_PARALLEL 30 /* 22 */ + +/*============================================================= + SENSOR REGISTER DEFINES +==============================================================*/ + +#define S5K3H1GX_REG_MODEL_ID 0x0000 +#define S5K3H1GX_MODEL_ID 0x3810 + +/* Color bar pattern selection */ +#define S5K3H1GX_COLOR_BAR_PATTERN_SEL_REG 0x0601 + +#define REG_FRAME_LENGTH_LINES_MSB 0x0340 +#define REG_FRAME_LENGTH_LINES_LSB 0x0341 +#define REG_LINE_LENGTH_PCK_MSB 0x0342 +#define REG_LINE_LENGTH_PCK_LSB 0x0343 +#define REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB 0x0204 +#define REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB 0x0205 +#define REG_COARSE_INTEGRATION_TIME_MSB 0x0202 +#define REG_COARSE_INTEGRATION_TIME_LSB 0x0203 + +#define S5K3H1GX_REG_GROUP_PARAMETER_HOLD 0x0104 +#define S5K3H1GX_GROUP_PARAMETER_HOLD 0x01 +#define S5K3H1GX_GROUP_PARAMETER_UNHOLD 0x00 + +//////////////////////////// + +#define Q8 0x00000100 +#define SENSOR_DEFAULT_CLOCK_RATE 24000000 + +//////////////////////////////////////////////////////////// + +/*============================================================================ + TYPE DECLARATIONS +============================================================================*/ + +/* 16bit address - 8 bit context register structure */ +#if 0 +typedef struct reg_addr_val_pair_struct { + uint16_t reg_addr; + uint8_t reg_val; +} reg_struct_type; +#endif + +struct awb_lsc_struct_type { + unsigned int caBuff[8]; /*awb_calibartion*/ + struct reg_addr_val_pair_struct LSC_table[150]; /*lsc_calibration*/ + uint32_t LSC_table_CRC; +}; + +enum s5k3h1gx_test_mode_t { + TEST_OFF, + TEST_1, + TEST_2, + TEST_3 +}; + +enum s5k3h1gx_resolution_t { + QTR_SIZE, + FULL_SIZE, + INVALID_SIZE +}; + +enum s5k3h1gx_reg_update_t{ + REG_INIT, + REG_PERIODIC +}; + +/*20101011 QCT mesh LSC calibration*/ +static int global_mode = 0; + +static int sensor_probe_node; +static int preview_frame_count; + +static struct wake_lock s5k3h1gx_wake_lock; + +static inline void init_suspend(void) +{ + wake_lock_init(&s5k3h1gx_wake_lock, WAKE_LOCK_IDLE, "s5k3h1gx"); +} + +static inline void deinit_suspend(void) +{ + wake_lock_destroy(&s5k3h1gx_wake_lock); +} + +static inline void prevent_suspend(void) +{ + wake_lock(&s5k3h1gx_wake_lock); +} + +static inline void allow_suspend(void) +{ + wake_unlock(&s5k3h1gx_wake_lock); +} + +/*============================================================================ +DATA DECLARATIONS +============================================================================*/ + +/* 96MHz PCLK @ 24MHz MCLK inc*/ + + +/* FIXME: Changes from here */ +struct s5k3h1gx_work { + struct work_struct work; +}; + +static struct s5k3h1gx_work *s5k3h1gx_sensorw; +static struct i2c_client *s5k3h1gx_client; +static uint16_t s5k3h1gx_pos_tbl[S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR + 1]; + +struct s5k3h1gx_ctrl { + const struct msm_camera_sensor_info *sensordata; + + uint32_t sensormode; + uint32_t fps_divider; /* init to 1 * 0x00000400 */ + uint32_t pict_fps_divider; /* init to 1 * 0x00000400 */ + uint16_t fps; + + int16_t curr_lens_pos; + uint16_t curr_step_pos; + uint16_t init_curr_lens_pos; + uint16_t my_reg_gain; + uint32_t my_reg_line_count; + uint16_t total_lines_per_frame; + + enum s5k3h1gx_resolution_t prev_res; + enum s5k3h1gx_resolution_t pict_res; + enum s5k3h1gx_resolution_t curr_res; + enum s5k3h1gx_test_mode_t set_test; + enum s5k3h1gx_reg_update_t reg_update; + + unsigned short imgaddr; +}; + +static struct s5k3h1gx_ctrl *s5k3h1gx_ctrl; +static struct platform_device *s5k3h1gx_pdev; + +struct s5k3h1gx_waitevent{ + uint32_t waked_up; + wait_queue_head_t event_wait; +}; + +static DECLARE_WAIT_QUEUE_HEAD(s5k3h1gx_wait_queue); +DEFINE_SEMAPHORE(s5k3h1gx_sem); + + +/*=============================================================*/ + +static int s5k3h1gx_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer(s5k3h1gx_client->adapter, msgs, 2) < 0) { + pr_err("s5k3h1gx_i2c_rxdata failed!\n"); + return -EIO; + } + pr_info("%s: rxdata=0x%X\n", __func__, *rxdata); + + return 0; +} +static int32_t s5k3h1gx_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + if (i2c_transfer(s5k3h1gx_client->adapter, msg, 1) < 0) { + pr_err("s5k3h1gx_i2c_txdata failed 0x%x\n", saddr); + return -EIO; + } + + return 0; +} + +static int32_t s5k3h1gx_i2c_read_b(unsigned short saddr, unsigned short raddr, + unsigned short *rdata) +{ + int32_t rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00)>>8; + buf[1] = (raddr & 0x00FF); + + rc = s5k3h1gx_i2c_rxdata(saddr, buf, 1); + if (rc < 0) + return rc; + + *rdata = buf[0]; + + if (rc < 0) + pr_info("s5k3h1gx_i2c_read failed!\n"); + + return rc; +} + + +static int32_t s5k3h1gx_i2c_read(unsigned short raddr, + unsigned short *rdata, int rlen) +{ + int32_t rc = 0; + unsigned char buf[2]; + int count = 0; + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00) >> 8; + buf[1] = (raddr & 0x00FF); +retry: + rc = s5k3h1gx_i2c_rxdata(s5k3h1gx_client->addr, buf, rlen); + + if (rc < 0) { + pr_err("s5k3h1gx_i2c_read 0x%x failed!\n", raddr); + printk(KERN_ERR "starting read retry policy count:%d\n", count); + udelay(10); + count++; + if (count < 20) { + if (count > 10) + udelay(100); + } else + return rc; + goto retry; + } + + *rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]); + return rc; +} + + +static int32_t s5k3h1gx_i2c_write_b(unsigned short saddr, + unsigned short waddr, uint8_t bdata) +{ + int32_t rc = -EFAULT; + unsigned char buf[3]; + int count = 0; + + memset(buf, 0, sizeof(buf)); + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = bdata; + +retry: + rc = s5k3h1gx_i2c_txdata(saddr, buf, 3); + + if (rc < 0) { + pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x\n", + waddr, bdata); + pr_err(KERN_ERR "starting read retry policy count:%d\n", count); + udelay(10); + count++; + if (count < 20) { + if (count > 10) + udelay(100); + } else + return rc; + goto retry; + } + + return rc; +} + +static void s5k3h1gx_get_pict_fps(uint16_t fps, uint16_t *pfps) +{ + /* input fps is preview fps in Q8 format */ + uint32_t divider, d1, d2; + uint16_t snapshot_height, preview_height, preview_width, snapshot_width; + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + if (s5k3h1gx_ctrl->prev_res == QTR_SIZE) { + preview_width = + SENSOR_QTR_SIZE_WIDTH + SENSOR_HRZ_QTR_BLK_PIXELS; + + if (sinfo->csi_if) + preview_height = + SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES; + else + preview_height = + SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel] ; + } else { + /* full size resolution used for preview. */ + preview_width = + SENSOR_FULL_SIZE_WIDTH + SENSOR_HRZ_FULL_BLK_PIXELS; + preview_height = + SENSOR_FULL_SIZE_HEIGHT + SENSOR_VER_FULL_BLK_LINES; + } + + if (s5k3h1gx_ctrl->pict_res == QTR_SIZE) { + snapshot_width = + SENSOR_QTR_SIZE_WIDTH + SENSOR_HRZ_QTR_BLK_PIXELS; + + if (sinfo->csi_if) + snapshot_height = + SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES; + else + snapshot_height = + SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel] ; + } else { + snapshot_width = + SENSOR_FULL_SIZE_WIDTH + + SENSOR_HRZ_FULL_BLK_PIXELS; + snapshot_height = + SENSOR_FULL_SIZE_HEIGHT + + SENSOR_VER_FULL_BLK_LINES; + } + + d1 = preview_height * 0x00000400 / snapshot_height; + d2 = preview_width * 0x00000400 / snapshot_width; + + divider = (uint32_t) (d1 * d2) / 0x00000400; + *pfps = (uint16_t)(fps * divider / 0x00000400); + +} /* endof s5k3h1gx_get_pict_fps */ + +static uint16_t s5k3h1gx_get_prev_lines_pf(void) +{ + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + if (s5k3h1gx_ctrl->prev_res == QTR_SIZE) { + if (sinfo->csi_if) + return (SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES); + else + return (SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (SENSOR_FULL_SIZE_HEIGHT + + SENSOR_VER_FULL_BLK_LINES); + } +} + +static uint16_t s5k3h1gx_get_prev_pixels_pl(void) +{ + if (s5k3h1gx_ctrl->prev_res == QTR_SIZE) { + return (SENSOR_QTR_SIZE_WIDTH + + SENSOR_HRZ_QTR_BLK_PIXELS); + } else { + return (SENSOR_FULL_SIZE_WIDTH + + SENSOR_HRZ_FULL_BLK_PIXELS); +} +} + +static uint16_t s5k3h1gx_get_pict_lines_pf(void) +{ + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + if (s5k3h1gx_ctrl->pict_res == QTR_SIZE) { + if (sinfo->csi_if) + return (SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES); + else + return (SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (SENSOR_FULL_SIZE_HEIGHT + + SENSOR_VER_FULL_BLK_LINES); + } +} + +static uint16_t s5k3h1gx_get_pict_pixels_pl(void) +{ + if (s5k3h1gx_ctrl->pict_res == QTR_SIZE) { + return (SENSOR_QTR_SIZE_WIDTH + + SENSOR_HRZ_QTR_BLK_PIXELS); + } else { + return (SENSOR_FULL_SIZE_WIDTH + + SENSOR_HRZ_FULL_BLK_PIXELS); + } +} + +static uint32_t s5k3h1gx_get_pict_max_exp_lc(void) +{ + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + if (s5k3h1gx_ctrl->pict_res == QTR_SIZE) { + if (sinfo->csi_if) + return (SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES); + else + return (SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel]); + } else { + return (SENSOR_FULL_SIZE_HEIGHT + + SENSOR_VER_FULL_BLK_LINES); + } +} + +static int32_t s5k3h1gx_set_fps(struct fps_cfg *fps) +{ + int32_t rc = 0; + s5k3h1gx_ctrl->fps_divider = fps->fps_div; + s5k3h1gx_ctrl->pict_fps_divider = fps->pict_fps_div; + s5k3h1gx_ctrl->fps = fps->f_mult; + return rc; +} + +static int32_t s5k3h1gx_i2c_write_table( + struct s5k3h1gx_i2c_reg_conf *reg_cfg_tbl, int num) +{ + int i; + int32_t rc = -EIO; + for (i = 0; i < num; i++) { + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, + reg_cfg_tbl->waddr, reg_cfg_tbl->bdata); + if (rc < 0) + break; + reg_cfg_tbl++; + } + + return rc; +} + +static int32_t s5k3h1gx_write_exp_gain + (uint16_t gain, uint32_t line) +{ + int32_t rc = 0; + + uint16_t max_legal_gain = 0x0200; + uint32_t ll_ratio; /* Q10 */ + uint32_t ll_pck, fl_lines; + uint16_t offset = 8; /* 4; */ /* kipper */ + uint32_t gain_msb, gain_lsb; + uint32_t intg_t_msb, intg_t_lsb; + uint32_t ll_pck_msb, ll_pck_lsb; + struct s5k3h1gx_i2c_reg_conf tbl[3]; + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + CDBG("Line:%d s5k3h1gx_write_exp_gain , gain=%d, line=%d\n", + __LINE__, gain, line); + + if (s5k3h1gx_ctrl->sensormode == SENSOR_PREVIEW_MODE) { + + s5k3h1gx_ctrl->my_reg_gain = gain; + s5k3h1gx_ctrl->my_reg_line_count = (uint16_t)line; + + if (sinfo->csi_if) + fl_lines = SENSOR_QTR_SIZE_HEIGHT + + SENSOR_VER_QTR_BLK_LINES; + else + fl_lines = SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel] ; + + ll_pck = SENSOR_QTR_SIZE_WIDTH + + SENSOR_HRZ_QTR_BLK_PIXELS; + + } else { + + fl_lines = SENSOR_FULL_SIZE_HEIGHT + + SENSOR_VER_FULL_BLK_LINES; + + ll_pck = SENSOR_FULL_SIZE_WIDTH + + SENSOR_HRZ_FULL_BLK_PIXELS; + } + + if (gain > max_legal_gain) + gain = max_legal_gain; + + /* in Q10 */ + line = (line * s5k3h1gx_ctrl->fps_divider); + + CDBG("s5k3h1gx_ctrl->fps_divider = %d\n", + s5k3h1gx_ctrl->fps_divider); + CDBG("fl_lines = %d\n", fl_lines); + CDBG("line = %d\n", line); + if ((fl_lines-offset) < (line / 0x400)) + ll_ratio = (line / (fl_lines - offset)); + else + ll_ratio = 0x400; + CDBG("ll_ratio = %d\n", ll_ratio); + + /* update gain registers */ + CDBG("gain = %d\n", gain); + gain_msb = (gain & 0xFF00) >> 8; + gain_lsb = gain & 0x00FF; + tbl[0].waddr = S5K3H1GX_REG_GROUP_PARAMETER_HOLD; + tbl[0].bdata = S5K3H1GX_GROUP_PARAMETER_HOLD; + tbl[1].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_MSB; + tbl[1].bdata = gain_msb; + tbl[2].waddr = REG_ANALOGUE_GAIN_CODE_GLOBAL_LSB; + tbl[2].bdata = gain_lsb; + rc = s5k3h1gx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + if (rc < 0) + goto write_gain_done; + + ll_pck = ll_pck * ll_ratio; + CDBG("ll_pck/0x400 = %d\n", ll_pck / 0x400); + ll_pck_msb = ((ll_pck / 0x400) & 0xFF00) >> 8; + ll_pck_lsb = (ll_pck / 0x400) & 0x00FF; + tbl[0].waddr = REG_LINE_LENGTH_PCK_MSB; + tbl[0].bdata = ll_pck_msb; + tbl[1].waddr = REG_LINE_LENGTH_PCK_LSB; + tbl[1].bdata = ll_pck_lsb; + rc = s5k3h1gx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)-1); + if (rc < 0) + goto write_gain_done; + + line = line / ll_ratio; + CDBG("line = %d\n", line); + intg_t_msb = (line & 0xFF00) >> 8; + intg_t_lsb = (line & 0x00FF); + tbl[0].waddr = REG_COARSE_INTEGRATION_TIME_MSB; + tbl[0].bdata = intg_t_msb; + tbl[1].waddr = REG_COARSE_INTEGRATION_TIME_LSB; + tbl[1].bdata = intg_t_lsb; + tbl[2].waddr = S5K3H1GX_REG_GROUP_PARAMETER_HOLD; + tbl[2].bdata = S5K3H1GX_GROUP_PARAMETER_UNHOLD; + rc = s5k3h1gx_i2c_write_table(&tbl[0], ARRAY_SIZE(tbl)); + +write_gain_done: + return rc; +} + +/* ### this function is not called for userspace ### */ +static int32_t s5k3h1gx_set_pict_exp_gain(uint16_t gain, uint32_t line) +{ + int32_t rc = 0; + rc = s5k3h1gx_write_exp_gain(gain, line); + + return rc; +} /* endof s5k3h1gx_set_pict_exp_gain*/ + +static int32_t initialize_s5k3h1gx_registers(void) +{ + int32_t rc = 0; + struct msm_camera_sensor_info *sinfo = + s5k3h1gx_pdev->dev.platform_data; + + mdelay(5); + + if (sinfo->csi_if) { + if (s5k3h1gx_regs.init_mipi_size > 0) + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.init_mipi, + s5k3h1gx_regs.init_mipi_size); + } else { + if (s5k3h1gx_regs.init_parallel_size > 0) + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.init_parallel, + s5k3h1gx_regs.init_parallel_size); + } + + return rc; +} /* end of initialize_s5k3h1gx_ov8m0vc_registers. */ + +static int32_t s5k3h1gx_setting(int rt) +{ + int32_t rc = 0; + /* reconfigure the qtr height to adjust frame rate */ + uint16_t fl_line = 0; + struct msm_camera_sensor_info *sinfo = + s5k3h1gx_pdev->dev.platform_data; + + switch (rt) { + case QTR_SIZE: + pr_err("s5k3h1gx_setting(QTR_SIZE)\n"); + rc = s5k3h1gx_i2c_write_b( + s5k3h1gx_client->addr, 0x0100, 0x00); + + if (sinfo->csi_if) { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.qtr_mipi, + s5k3h1gx_regs.qtr_mipi_size); + } else { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.qtr_parallel, + s5k3h1gx_regs.qtr_parallel_size); + if (rc < 0) + return rc; + fl_line = SENSOR_QTR_SIZE_HEIGHT + + s5k3h1gx_ver_qtr_blk_lines_array[cam_mode_sel]; + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, + REG_FRAME_LENGTH_LINES_MSB, + (fl_line & 0xFF00) >> 8); + if (rc < 0) + return rc; + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, + REG_FRAME_LENGTH_LINES_LSB, + fl_line & 0x00FF); + if (rc < 0) + return rc; + } + msleep(200); + rc = s5k3h1gx_i2c_write_b( + s5k3h1gx_client->addr, 0x0100, 0x01); + if (rc < 0) + return rc; + + s5k3h1gx_ctrl->curr_res = QTR_SIZE; + break; + + case FULL_SIZE: + pr_err("s5k3h1gx_setting(FULL_SIZE)\n"); + rc = s5k3h1gx_i2c_write_b( + s5k3h1gx_client->addr, 0x0100, 0x00); + + if (sinfo->csi_if) { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.full_mipi, + s5k3h1gx_regs.full_mipi_size); + } else { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.full_parallel, + s5k3h1gx_regs.full_parallel_size); + } + if (rc < 0) + return rc; + + msleep(100); + rc = s5k3h1gx_i2c_write_b( + s5k3h1gx_client->addr, 0x0100, 0x01); + + if (rc < 0) + return rc; + + s5k3h1gx_ctrl->curr_res = FULL_SIZE; + break; + + default: + rc = -EFAULT; + return rc; + } + + /* sync OV8810 method to set gain and line */ +#if 1 + /*take raw picture for LSC calibration*/ + if (global_mode) { + pr_info("[LSC calibration] set gain and line !!!!\n"); + s5k3h1gx_write_exp_gain(32, 300); + global_mode = 0; + } +#endif + + return rc; +} /* end of s5k3h1gx_setting */ + +static int32_t s5k3h1gx_video_config(int mode) +{ + int32_t rc = 0; + static int pre_sel = 0; + int cur_sel = (cam_mode_sel > 1)?1:0; + s5k3h1gx_ctrl->sensormode = mode; + + CDBG("cam_mode_sel %d cur_sel = %d, pre_sel = %d\n", + cam_mode_sel, cur_sel, pre_sel); + + if (s5k3h1gx_ctrl->curr_res != s5k3h1gx_ctrl->prev_res + || pre_sel != cur_sel + ) { + rc = s5k3h1gx_setting(s5k3h1gx_ctrl->prev_res); + if (rc < 0) + return rc; + } else { + s5k3h1gx_ctrl->curr_res = s5k3h1gx_ctrl->prev_res; + } + s5k3h1gx_ctrl->sensormode = mode; + + pre_sel = cur_sel; + + preview_frame_count = 0; + rc = + s5k3h1gx_write_exp_gain(s5k3h1gx_ctrl->my_reg_gain, + s5k3h1gx_ctrl->my_reg_line_count); + + return rc; + +} /* end of s5k3h1gx_video_config */ + +static int32_t s5k3h1gx_snapshot_config(int mode) +{ + int32_t rc = 0; + s5k3h1gx_ctrl->sensormode = mode; + + if (s5k3h1gx_ctrl->curr_res != s5k3h1gx_ctrl->pict_res) { + rc = s5k3h1gx_setting(s5k3h1gx_ctrl->pict_res); + if (rc < 0) + return rc; + } else { + s5k3h1gx_ctrl->curr_res = s5k3h1gx_ctrl->pict_res; + } + s5k3h1gx_ctrl->sensormode = mode; + + return rc; + +} /*end of s5k3h1gx_snapshot_config*/ + +static int32_t s5k3h1gx_raw_snapshot_config(int mode) +{ + int32_t rc = 0; + s5k3h1gx_ctrl->sensormode = mode; + if (s5k3h1gx_ctrl->curr_res != s5k3h1gx_ctrl->pict_res) { + rc = s5k3h1gx_setting(s5k3h1gx_ctrl->pict_res); + if (rc < 0) + return rc; + } else { + s5k3h1gx_ctrl->curr_res = s5k3h1gx_ctrl->pict_res; + } /* Update sensor resolution */ + + s5k3h1gx_ctrl->sensormode = mode; + + return rc; + +} /*end of s5k3h1gx_raw_snapshot_config*/ + +static int32_t s5k3h1gx_set_sensor_mode(int mode, + int res) +{ + int32_t rc = 0; + + switch (mode) { + case SENSOR_PREVIEW_MODE: + rc = s5k3h1gx_video_config(mode); + break; + + case SENSOR_SNAPSHOT_MODE: + rc = s5k3h1gx_snapshot_config(mode); + break; + + case SENSOR_RAW_SNAPSHOT_MODE: + rc = s5k3h1gx_raw_snapshot_config(mode); + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int s5k3h1gx_probe_init_done(const struct msm_camera_sensor_info *data) +{ + int32_t rc = 0; + + pr_info("[Camera] gpio_request(%d, \"s5k3h1gx\")\n", data->sensor_pwd); + rc = gpio_request(data->sensor_pwd, "s5k3h1gx"); + if (!rc) + gpio_direction_output(data->sensor_pwd, 0); + else + pr_err("GPIO (%d) request failed\n", data->sensor_pwd); + + gpio_free(data->sensor_pwd); + + if (data->sensor_reset) { + msleep(5); + rc = gpio_request(data->sensor_reset, "s5k3h1gx"); + if (!rc) + gpio_direction_output(data->sensor_reset, 0); + else + pr_err("GPIO (%d) request faile\n", data->sensor_reset); + gpio_free(data->sensor_reset); + } + + + return 0; +} + +static int32_t s5k3h1gx_power_down(void) +{ + return 0; +} + +static int s5k3h1gx_probe_init_sensor(const struct msm_camera_sensor_info *data) +{ + int32_t rc = 0; + uint16_t chipid = 0; + + pr_info("%s\n", __func__); + + /* Read sensor Model ID: */ + rc = s5k3h1gx_i2c_read(S5K3H1GX_REG_MODEL_ID, &chipid, 2); + if (rc < 0) { + pr_err("read sensor id fail\n"); + goto init_probe_fail; + } else + data->camera_set_source(SECOND_SOURCE); + + /* Compare sensor ID to S5K3H1GX ID: */ + pr_info("%s, Expected id=0x%x\n", __func__, S5K3H1GX_MODEL_ID); + pr_info("%s, Read id=0x%x\n", __func__, chipid); + + if (chipid != S5K3H1GX_MODEL_ID) { + pr_err("sensor model id is incorrect\n"); + rc = -ENODEV; + goto init_probe_fail; + } + + pr_info("s5k3h1gx_probe_init_sensor finishes\n"); + goto init_probe_done; + +init_probe_fail: + s5k3h1gx_probe_init_done(data); +init_probe_done: + return rc; +} + +static void s5k3h1gx_setup_af_tbl(void) +{ + int i; + uint16_t s5k3h1gx_nl_region_boundary1 = 3; + uint16_t s5k3h1gx_nl_region_boundary2 = 5; + uint16_t s5k3h1gx_nl_region_code_per_step1 = 40; + uint16_t s5k3h1gx_nl_region_code_per_step2 = 20; + uint16_t s5k3h1gx_l_region_code_per_step = 16; + + s5k3h1gx_pos_tbl[0] = 0; + + for (i = 1; i <= S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR; i++) { + if (i <= s5k3h1gx_nl_region_boundary1) + s5k3h1gx_pos_tbl[i] = s5k3h1gx_pos_tbl[i-1] + + s5k3h1gx_nl_region_code_per_step1; + else if (i <= s5k3h1gx_nl_region_boundary2) + s5k3h1gx_pos_tbl[i] = s5k3h1gx_pos_tbl[i-1] + + s5k3h1gx_nl_region_code_per_step2; + else + s5k3h1gx_pos_tbl[i] = s5k3h1gx_pos_tbl[i-1] + + s5k3h1gx_l_region_code_per_step; + } +} + +static int32_t +s5k3h1gx_go_to_position(uint32_t lens_pos, uint8_t mask) +{ + int32_t rc = 0; + unsigned char buf[2]; + uint8_t code_val_msb, code_val_lsb; + + code_val_msb = lens_pos >> 4; + code_val_lsb = (lens_pos & 0x000F) << 4; + code_val_lsb |= mask; + + buf[0] = code_val_msb; + buf[1] = code_val_lsb; + rc = s5k3h1gx_i2c_txdata(S5K3H1GX_AF_I2C_ADDR >> 1, buf, 2); + if (rc < 0) + pr_err("i2c_write failed, saddr = 0x%x addr = 0x%x, val =0x%x!\n", + S5K3H1GX_AF_I2C_ADDR >> 1, buf[0], buf[1]); + + return rc; +} + +/*20101011 for LSC calibration*/ +static int s5k3h1gx_QCT_LSC_calibration_set_rawflag(void) +{ + global_mode = 1; + return 1; +} + +static int s5k3h1gx_i2c_read_fuseid(struct sensor_cfg_data *cdata) +{ + + int32_t rc; + unsigned short i, R1, R2, R3; + unsigned short OTP[10]; + + pr_info("%s: sensor OTP information:\n", __func__); + + for (i = 0; i < 10; i++) + OTP[i] = 5; + + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, 0x3124, 0x10); + if (rc < 0) + pr_info("%s: i2c_write_b 0x30FB fail\n", __func__); + + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, 0x3127, 0xF1); + if (rc < 0) + pr_info("%s: i2c_write_b 0x30FB fail\n", __func__); + + mdelay(4); + + for (i = 0; i < 10; i++) { + rc = s5k3h1gx_i2c_write_b(s5k3h1gx_client->addr, 0x312B, i); + if (rc < 0) + pr_info("%s: i2c_write_b 0x310C fail\n", __func__); + rc = s5k3h1gx_i2c_read_b(s5k3h1gx_client->addr, 0x312C, &R1); + if (rc < 0) + pr_info("%s: i2c_read_b 0x310F fail\n", __func__); + rc = s5k3h1gx_i2c_read_b(s5k3h1gx_client->addr, 0x312D, &R2); + if (rc < 0) + pr_info("%s: i2c_read_b 0x310E fail\n", __func__); + rc = s5k3h1gx_i2c_read_b(s5k3h1gx_client->addr, 0x312E, &R3); + if (rc < 0) + pr_info("%s: i2c_read_b 0x310D fail\n", __func__); + + if ((R3&0x0F) != 0) + OTP[i] = (short)(R3&0x0F); + else if ((R2&0x0F) != 0) + OTP[i] = (short)(R2&0x0F); + else if ((R2>>4) != 0) + OTP[i] = (short)(R2>>4); + else if ((R1&0x0F) != 0) + OTP[i] = (short)(R1&0x0F); + else + OTP[i] = (short)(R1>>4); + + } + pr_info("%s: VenderID=%x,LensID=%x,SensorID=%x%x\n", __func__, + OTP[0], OTP[1], OTP[2], OTP[3]); + pr_info("%s: ModuleFuseID= %x%x%x%x%x%x\n", __func__, + OTP[4], OTP[5], OTP[6], OTP[7], OTP[8], OTP[9]); + + cdata->cfg.fuse.fuse_id_word1 = 0; + cdata->cfg.fuse.fuse_id_word2 = 0; + cdata->cfg.fuse.fuse_id_word3 = (OTP[0]); + cdata->cfg.fuse.fuse_id_word4 = + (OTP[4]<<20) | + (OTP[5]<<16) | + (OTP[6]<<12) | + (OTP[7]<<8) | + (OTP[8]<<4) | + (OTP[9]); + + pr_info("s5k3h1gx: fuse->fuse_id_word1:%d\n", + cdata->cfg.fuse.fuse_id_word1); + pr_info("s5k3h1gx: fuse->fuse_id_word2:%d\n", + cdata->cfg.fuse.fuse_id_word2); + pr_info("s5k3h1gx: fuse->fuse_id_word3:0x%08x\n", + cdata->cfg.fuse.fuse_id_word3); + pr_info("s5k3h1gx: fuse->fuse_id_word4:0x%08x\n", + cdata->cfg.fuse.fuse_id_word4); + return 0; +} + +static int s5k3h1gx_sensor_open_init(struct msm_camera_sensor_info *data) +{ + int32_t rc = 0; + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + pr_info("Calling s5k3h1gx_sensor_open_init\n"); + + down(&s5k3h1gx_sem); + + if (data == NULL) { + pr_info("data is a NULL pointer\n"); + goto init_fail; + } + + s5k3h1gx_ctrl = kzalloc(sizeof(struct s5k3h1gx_ctrl), GFP_KERNEL); + + if (!s5k3h1gx_ctrl) { + rc = -ENOMEM; + goto init_fail; + } + + s5k3h1gx_ctrl->curr_lens_pos = -1; + s5k3h1gx_ctrl->fps_divider = 1 * 0x00000400; + s5k3h1gx_ctrl->pict_fps_divider = 1 * 0x00000400; + s5k3h1gx_ctrl->set_test = TEST_OFF; + s5k3h1gx_ctrl->prev_res = QTR_SIZE; + s5k3h1gx_ctrl->pict_res = FULL_SIZE; + s5k3h1gx_ctrl->curr_res = INVALID_SIZE; + s5k3h1gx_ctrl->reg_update = REG_INIT; + + if (data) + s5k3h1gx_ctrl->sensordata = data; + + /* switch pclk and mclk between main cam and 2nd cam */ + pr_info("doing clk switch (s5k3h1gx)\n"); + + if (data->camera_clk_switch != NULL) + data->camera_clk_switch(); + + msm_camio_probe_on(s5k3h1gx_pdev); + + /* for parallel interface */ + if (!sinfo->csi_if) { + mdelay(20); + msm_camio_camif_pad_reg_reset(); + mdelay(20); + } + pr_info("[Camera] gpio_request(%d, \"s5k3h1gx\")\n", data->sensor_pwd); + rc = gpio_request(data->sensor_pwd, "s5k3h1gx"); + if (!rc) { + gpio_direction_output(data->sensor_pwd, 1); + } else { + pr_err("GPIO (%d) request failed\n", data->sensor_pwd); + goto init_fail; + } + gpio_free(data->sensor_pwd); + msleep(5); + rc = gpio_request(data->sensor_reset, "s5k3h1gx"); + if (!rc) { + gpio_direction_output(data->sensor_reset, 1); + } else { + pr_err("GPIO (%d) request failed\n", data->sensor_reset); + goto init_fail; + } + gpio_free(data->sensor_reset); + msleep(1); + + /* read sensor id */ + rc = s5k3h1gx_probe_init_sensor(data); + + if (rc < 0) + goto init_fail; + + if (!sinfo->csi_if) { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.common_parallel, + s5k3h1gx_regs.common_parallel_size); + } + else + { + rc = s5k3h1gx_i2c_write_table( + s5k3h1gx_regs.common_mipi, + s5k3h1gx_regs.common_mipi_size); + } + + if (rc < 0) + goto init_fail; + + rc = s5k3h1gx_i2c_write_b( + s5k3h1gx_client->addr, 0x0100, 0x01); + + /* set up lens position table */ + s5k3h1gx_setup_af_tbl(); + s5k3h1gx_go_to_position(0, 0); + s5k3h1gx_ctrl->curr_lens_pos = 0; + s5k3h1gx_ctrl->curr_step_pos = 0; + + goto init_done; + +init_fail: + pr_err("%s: init_fail\n", __func__); + +init_done: + up(&s5k3h1gx_sem); + pr_info("%s: init_done\n", __func__); + return rc; + +} /* end of s5k3h1gx_sensor_open_init */ + +static int s5k3h1gx_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&s5k3h1gx_wait_queue); + return 0; +} + +static const struct i2c_device_id s5k3h1gx_i2c_id[] = { + { "s5k3h1gx", 0}, + { } +}; + +static int s5k3h1gx_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + pr_info("s5k3h1gx_probe called!\n"); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("i2c_check_functionality failed\n"); + goto probe_failure; + } + + s5k3h1gx_sensorw = kzalloc(sizeof(struct s5k3h1gx_work), GFP_KERNEL); + if (!s5k3h1gx_sensorw) { + pr_err("kzalloc failed.\n"); + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, s5k3h1gx_sensorw); + s5k3h1gx_init_client(client); + s5k3h1gx_client = client; + + msleep(50); + + pr_info("s5k3h1gx_probe successed! rc = %d\n", rc); + return 0; + +probe_failure: + pr_err("s5k3h1gx_probe failed! rc = %d\n", rc); + return rc; +} + +static int __exit s5k3h1gx_i2c_remove(struct i2c_client *client) +{ + struct s5k3h1gx_work_t *sensorw = i2c_get_clientdata(client); + free_irq(client->irq, sensorw); + deinit_suspend(); + s5k3h1gx_client = NULL; + kfree(sensorw); + return 0; +} + +static struct i2c_driver s5k3h1gx_i2c_driver = { + .id_table = s5k3h1gx_i2c_id, + .probe = s5k3h1gx_i2c_probe, + .remove = __exit_p(s5k3h1gx_i2c_remove), + .driver = { + .name = "s5k3h1gx", + }, +}; + +static const char *S5K3H1GXVendor = "samsung"; +static const char *S5K3H1GXNAME = "S5K3H1GX"; +static const char *S5K3H1GXSize = "8M"; + +static ssize_t sensor_vendor_show( struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", S5K3H1GXVendor, S5K3H1GXNAME, S5K3H1GXSize); + ret = strlen(buf) + 1; + + return ret; +} + +DEFINE_MUTEX(s5k3h1gx_cam_mode_lock); + +static ssize_t sensor_read_cam_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + mutex_lock(&s5k3h1gx_cam_mode_lock); + length = sprintf(buf, "%d\n", cam_mode_sel); + mutex_unlock(&s5k3h1gx_cam_mode_lock); + return length; +} + +static ssize_t sensor_set_cam_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + mutex_lock(&s5k3h1gx_cam_mode_lock); + tmp = buf[0] - 0x30; /* only get the first char */ + cam_mode_sel = tmp; + mutex_unlock(&s5k3h1gx_cam_mode_lock); + return count; +} + +static ssize_t sensor_read_node( struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", sensor_probe_node); + return length; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); +static DEVICE_ATTR(cam_mode, 0644, sensor_read_cam_mode, sensor_set_cam_mode); +static DEVICE_ATTR(node, 0444, sensor_read_node, NULL); + +static struct kobject *android_s5k3h1gx = NULL; + +static int s5k3h1gx_sysfs_init(void) +{ + int ret = 0; + pr_info("s5k3h1gx:kobject creat and add\n"); + android_s5k3h1gx = kobject_create_and_add("android_camera", NULL); + if (android_s5k3h1gx == NULL) { + pr_info("s5k3h1gx_sysfs_init: subsystem_register failed\n"); + ret = -ENOMEM; + return ret ; + } + pr_info("s5k3h1gx:sysfs_create_file\n"); + ret = sysfs_create_file(android_s5k3h1gx, &dev_attr_sensor.attr); + if (ret) { + pr_info("s5k3h1gx_sysfs_init: sysfs_create_file failed\n"); + ret = -EFAULT; + goto error; + } + + ret = sysfs_create_file(android_s5k3h1gx, &dev_attr_cam_mode.attr); + if (ret) { + pr_info("s5k3h1gx_sysfs_init: dev_attr_cam_mode failed\n"); + ret = -EFAULT; + goto error; + } + ret = sysfs_create_file(android_s5k3h1gx, &dev_attr_node.attr); + if (ret) { + pr_info("s5k3h1gx_sysfs_init: dev_attr_node failed\n"); + ret = -EFAULT; + goto error; + } + + return ret; + +error: + kobject_del(android_s5k3h1gx); + return ret; +} + +static int32_t +s5k3h1gx_move_focus(int direction, int32_t num_steps) +{ + uint16_t s5k3h1gx_sw_damping_time_wait = 1; + uint16_t s5k3h1gx_damping_threshold = 10; + uint8_t s5k3h1gx_mode_mask = 0x02; + int16_t step_direction; + int16_t curr_lens_pos; + int16_t curr_step_pos; + int16_t dest_lens_pos; + int16_t dest_step_pos; + int16_t target_dist; + int16_t small_step; + int16_t next_lens_pos; + int16_t time_wait_per_step; + int32_t rc = 0, time_wait; + int8_t s5k3h1gx_sw_damping_required = 0; + uint16_t s5k3h1gx_max_fps_val; + struct msm_camera_sensor_info *sinfo = s5k3h1gx_pdev->dev.platform_data; + + if (num_steps > S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR) + num_steps = S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR; + else if (num_steps == 0) + return -EINVAL; + + if (direction == MOVE_NEAR) + step_direction = 1; + else if (direction == MOVE_FAR) + step_direction = -1; + else + return -EINVAL; + + /* need to decide about default position and power supplied + * at start up and reset */ + curr_lens_pos = s5k3h1gx_ctrl->curr_lens_pos; + curr_step_pos = s5k3h1gx_ctrl->curr_step_pos; + + if (curr_lens_pos < s5k3h1gx_ctrl->init_curr_lens_pos) + curr_lens_pos = s5k3h1gx_ctrl->init_curr_lens_pos; + + dest_step_pos = curr_step_pos + (step_direction * num_steps); + + if (dest_step_pos < 0) + dest_step_pos = 0; + else if (dest_step_pos > S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR) + dest_step_pos = S5K3H1GX_TOTAL_STEPS_NEAR_TO_FAR; + + if (dest_step_pos == s5k3h1gx_ctrl->curr_step_pos) + return rc; + + dest_lens_pos = s5k3h1gx_pos_tbl[dest_step_pos]; + target_dist = step_direction * (dest_lens_pos - curr_lens_pos); + + if (sinfo->csi_if) + s5k3h1gx_max_fps_val = S5K3H1GX_MAX_FPS; + else + s5k3h1gx_max_fps_val = S5K3H1GX_MAX_FPS_PARALLEL; + + + /* HW damping */ + if (step_direction < 0 + && target_dist >= s5k3h1gx_pos_tbl[s5k3h1gx_damping_threshold]) { + s5k3h1gx_sw_damping_required = 1; + time_wait = 1000000 + / s5k3h1gx_max_fps_val + - S5K3H1GX_SW_DAMPING_STEP * s5k3h1gx_sw_damping_time_wait * 1000; + } else + time_wait = 1000000 / s5k3h1gx_max_fps_val; + + time_wait_per_step = (int16_t) (time_wait / target_dist); + + if (time_wait_per_step >= 800) + /* ~800 */ + s5k3h1gx_mode_mask = 0x5; + else if (time_wait_per_step >= 400) + /* ~400 */ + s5k3h1gx_mode_mask = 0x4; + else if (time_wait_per_step >= 200) + /* 200~400 */ + s5k3h1gx_mode_mask = 0x3; + else if (time_wait_per_step >= 100) + /* 100~200 */ + s5k3h1gx_mode_mask = 0x2; + else if (time_wait_per_step >= 50) + /* 50~100 */ + s5k3h1gx_mode_mask = 0x1; + else { + if (time_wait >= 17600) + s5k3h1gx_mode_mask = 0x0D; + else if (time_wait >= 8800) + s5k3h1gx_mode_mask = 0x0C; + else if (time_wait >= 4400) + s5k3h1gx_mode_mask = 0x0B; + else if (time_wait >= 2200) + s5k3h1gx_mode_mask = 0x0A; + else + s5k3h1gx_mode_mask = 0x09; + } + + if (s5k3h1gx_sw_damping_required) { + small_step = (uint16_t) target_dist / S5K3H1GX_SW_DAMPING_STEP; + if ((target_dist % S5K3H1GX_SW_DAMPING_STEP) != 0) + small_step = small_step + 1; + + for (next_lens_pos = curr_lens_pos + (step_direction*small_step); + (step_direction*next_lens_pos) <= (step_direction*dest_lens_pos); + next_lens_pos += (step_direction*small_step)) { + rc = s5k3h1gx_go_to_position(next_lens_pos, s5k3h1gx_mode_mask); + if (rc < 0) { + CDBG("s5k3h1gx_go_to_position Failed in Move Focus!!!\n"); + return rc; + } + curr_lens_pos = next_lens_pos; + mdelay(s5k3h1gx_sw_damping_time_wait); + } + + if (curr_lens_pos != dest_lens_pos) { + rc = s5k3h1gx_go_to_position(dest_lens_pos, s5k3h1gx_mode_mask); + if (rc < 0) { + pr_err("s5k3h1gx_go_to_position Failed in Move Focus!!!\n"); + return rc; + } + mdelay(s5k3h1gx_sw_damping_time_wait); + } + } else { + rc = s5k3h1gx_go_to_position(dest_lens_pos, s5k3h1gx_mode_mask); + if (rc < 0) { + pr_err("s5k3h1gx_go_to_position Failed in Move Focus!!!\n"); + return rc; + } + } + + s5k3h1gx_ctrl->curr_lens_pos = dest_lens_pos; + s5k3h1gx_ctrl->curr_step_pos = dest_step_pos; + + return rc; +} + +static int32_t +s5k3h1gx_set_default_focus(void) +{ + int32_t rc = 0; + if (s5k3h1gx_ctrl->curr_step_pos != 0) { + rc = s5k3h1gx_move_focus(MOVE_FAR, s5k3h1gx_ctrl->curr_step_pos); + if (rc < 0) { + pr_err("s5k3h1gx_set_default_focus Failed!!!\n"); + return rc; + } + } else { + rc = s5k3h1gx_go_to_position(0, 0x02); + if (rc < 0) { + pr_err("s5k3h1gx_go_to_position Failed!!!\n"); + return rc; + } + } + + s5k3h1gx_ctrl->curr_lens_pos = 0; + s5k3h1gx_ctrl->init_curr_lens_pos = 0; + s5k3h1gx_ctrl->curr_step_pos = 0; + + return rc; +} + +uint8_t s5k3h1gx_preview_skip_frame(void) +{ + if (s5k3h1gx_ctrl->sensormode == SENSOR_PREVIEW_MODE + && preview_frame_count < 2) { + preview_frame_count++; + return 1; + } + return 0; +} + +int s5k3h1gx_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cdata; + long rc = 0; + + if (copy_from_user(&cdata, + (void *)argp, + sizeof(struct sensor_cfg_data))) + return -EFAULT; + + down(&s5k3h1gx_sem); + + CDBG("s5k3h1gx_sensor_config: cfgtype = %d\n", + cdata.cfgtype); + switch (cdata.cfgtype) { + case CFG_GET_PICT_FPS: + s5k3h1gx_get_pict_fps( + cdata.cfg.gfps.prevfps, + &(cdata.cfg.gfps.pictfps)); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_L_PF: + cdata.cfg.prevl_pf = + s5k3h1gx_get_prev_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PREV_P_PL: + cdata.cfg.prevp_pl = + s5k3h1gx_get_prev_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_L_PF: + cdata.cfg.pictl_pf = + s5k3h1gx_get_pict_lines_pf(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_P_PL: + cdata.cfg.pictp_pl = + s5k3h1gx_get_pict_pixels_pl(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_GET_PICT_MAX_EXP_LC: + cdata.cfg.pict_max_exp_lc = + s5k3h1gx_get_pict_max_exp_lc(); + + if (copy_to_user((void *)argp, + &cdata, + sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + break; + + case CFG_SET_FPS: + case CFG_SET_PICT_FPS: + rc = s5k3h1gx_set_fps(&(cdata.cfg.fps)); + break; + + case CFG_SET_EXP_GAIN: + rc = s5k3h1gx_write_exp_gain( + cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_PICT_EXP_GAIN: + rc = s5k3h1gx_set_pict_exp_gain( + cdata.cfg.exp_gain.gain, + cdata.cfg.exp_gain.line); + break; + + case CFG_SET_MODE: + rc = s5k3h1gx_set_sensor_mode(cdata.mode, + cdata.rs); + break; + + case CFG_PWR_DOWN: + rc = s5k3h1gx_power_down(); + break; + + case CFG_MOVE_FOCUS: + rc = + s5k3h1gx_move_focus( + cdata.cfg.focus.dir, + cdata.cfg.focus.steps); + break; + + case CFG_SET_DEFAULT_FOCUS: + rc = + s5k3h1gx_set_default_focus(); + break; + + case CFG_I2C_IOCTL_R_OTP:{ + pr_info("Line:%d CFG_I2C_IOCTL_R_OTP \n", __LINE__); + rc = s5k3h1gx_i2c_read_fuseid(&cdata); + if (copy_to_user(argp, &cdata, sizeof(struct sensor_cfg_data))) + rc = -EFAULT; + } + break; + + /*20101011 for QCT mesh LSC calibration*/ + case CFG_SET_QCT_LSC_RAW_CAPTURE: + pr_info("Line:%d : CFG_SET_QCT_LSC_RAW_CAPTURE \n", __LINE__); + rc = s5k3h1gx_QCT_LSC_calibration_set_rawflag(); + break; + + default: + rc = -EFAULT; + break; + } + + prevent_suspend(); + up(&s5k3h1gx_sem); + + return rc; +} + +static int s5k3h1gx_sensor_release(void) +{ + int rc = -EBADF; + + down(&s5k3h1gx_sem); + pr_info("%s, %d\n", __func__, __LINE__); + + if (s5k3h1gx_ctrl) { + rc = gpio_request( + s5k3h1gx_ctrl->sensordata->sensor_pwd, "s5k3h1gx"); + if (!rc) + gpio_direction_output( + s5k3h1gx_ctrl->sensordata->sensor_pwd, 0); + else + pr_err("GPIO (%d) request failed\n", + s5k3h1gx_ctrl->sensordata->sensor_pwd); + gpio_free(s5k3h1gx_ctrl->sensordata->sensor_pwd); + } + + msleep(5); + rc = gpio_request( + s5k3h1gx_ctrl->sensordata->sensor_reset, "s5k3h1gx"); + if (!rc) + gpio_direction_output( + s5k3h1gx_ctrl->sensordata->sensor_reset, 0); + else + pr_err("GPIO (%d) request faile\n", + s5k3h1gx_ctrl->sensordata->sensor_reset); + gpio_free(s5k3h1gx_ctrl->sensordata->sensor_reset); + + + msm_camio_probe_off(s5k3h1gx_pdev); + + if (s5k3h1gx_ctrl != NULL) { + kfree(s5k3h1gx_ctrl); + s5k3h1gx_ctrl = NULL; + } + + allow_suspend(); + pr_info("s5k3h1gx_release completed\n"); + up(&s5k3h1gx_sem); + + return rc; +} + +static int s5k3h1gx_sensor_probe(struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = 0; + pr_info("s5k3h1gx_sensor_probe()\n"); + + rc = i2c_add_driver(&s5k3h1gx_i2c_driver); + if (rc < 0 || s5k3h1gx_client == NULL) { + rc = -ENOTSUPP; + goto probe_fail; + } + + pr_info("s5k3h1 s->node %d\n", s->node); + sensor_probe_node = s->node; + + /* switch PCLK and MCLK to Main cam */ + pr_info("s5k3h1gx: s5k3h1gx_sensor_probe: switch clk\n"); + if (info->camera_clk_switch != NULL) + info->camera_clk_switch(); + + mdelay(20); + + pr_info("[Camera] gpio_request(%d, \"s5k3h1gx\")\n", info->sensor_pwd); + rc = gpio_request(info->sensor_pwd, "s5k3h1gx"); + if (!rc) + gpio_direction_output(info->sensor_pwd, 1); + else + pr_err("GPIO (%d) request failed\n", info->sensor_pwd); + gpio_free(info->sensor_pwd); + + msleep(100); + + /* read sensor id */ + rc = s5k3h1gx_probe_init_sensor(info); + + if (rc < 0) + goto probe_fail; + + /* Initialize Sensor registers */ + rc = initialize_s5k3h1gx_registers(); + if (rc < 0) + return rc; + + if (info->camera_main_set_probe != NULL) + info->camera_main_set_probe(true); + + init_suspend(); + s->s_init = s5k3h1gx_sensor_open_init; + s->s_release = s5k3h1gx_sensor_release; + s->s_config = s5k3h1gx_sensor_config; + s5k3h1gx_probe_init_done(info); + s5k3h1gx_sysfs_init(); + + pr_info("%s: s5k3h1gx_probe_init_done %d\n", __func__, __LINE__); + goto probe_done; + +probe_fail: + pr_err("SENSOR PROBE FAILS!\n"); +probe_done: + return rc; + +} + +static int __s5k3h1gx_probe(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + pr_info("s5k3h1gx_probe\n"); + s5k3h1gx_pdev = pdev; + if (sdata->camera_main_get_probe != NULL) { + if (sdata->camera_main_get_probe()) { + pr_info("__s5k3h1gx_probe camera main get probed already.\n"); + return 0; + } + } + return msm_camera_drv_start(pdev, s5k3h1gx_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __s5k3h1gx_probe, + .driver = { + .name = "msm_camera_s5k3h1gx", + .owner = THIS_MODULE, + }, +}; + +static int __init s5k3h1gx_init(void) +{ + return platform_driver_register(&msm_camera_driver); +} + +module_init(s5k3h1gx_init); diff --git a/drivers/media/video/msm/s5k3h1gx.h b/drivers/media/video/msm/s5k3h1gx.h new file mode 100644 index 0000000000000..cfd75832684e4 --- /dev/null +++ b/drivers/media/video/msm/s5k3h1gx.h @@ -0,0 +1,93 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora Forum nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * Alternatively, provided that this notice is retained in full, this software + * may be relicensed by the recipient under the terms of the GNU General Public + * License version 2 ("GPL") and only version 2, in which case the provisions of + * the GPL apply INSTEAD OF those given above. If the recipient relicenses the + * software under the GPL, then the identification text in the MODULE_LICENSE + * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a + * recipient changes the license terms to the GPL, subsequent recipients shall + * not relicense under alternate licensing terms, including the BSD or dual + * BSD/GPL terms. In addition, the following license statement immediately + * below and between the words START and END shall also then apply when this + * software is relicensed under the GPL: + * + * START + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 and only version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * END + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef S5K3H1GX_H +#define S5K3H1GX_H + +#include +#include + +struct s5k3h1gx_i2c_reg_conf { + unsigned short waddr; + unsigned char bdata; +}; + +struct s5k3h1gx_reg_t { + struct s5k3h1gx_i2c_reg_conf *init_mipi; + uint16_t init_mipi_size; + struct s5k3h1gx_i2c_reg_conf *init_parallel; + uint16_t init_parallel_size; + + struct s5k3h1gx_i2c_reg_conf *common_mipi; + uint16_t common_mipi_size; + struct s5k3h1gx_i2c_reg_conf *common_parallel; + uint16_t common_parallel_size; + + struct s5k3h1gx_i2c_reg_conf *qtr_mipi; + uint16_t qtr_mipi_size; + struct s5k3h1gx_i2c_reg_conf *qtr_parallel; + uint16_t qtr_parallel_size; + + struct s5k3h1gx_i2c_reg_conf *full_mipi; + uint16_t full_mipi_size; + struct s5k3h1gx_i2c_reg_conf *full_parallel; + uint16_t full_parallel_size; +}; + +extern struct s5k3h1gx_reg_t s5k3h1gx_regs; + +#endif + diff --git a/drivers/media/video/msm/s5k3h1gx_reg.c b/drivers/media/video/msm/s5k3h1gx_reg.c new file mode 100644 index 0000000000000..3854828c6ba8e --- /dev/null +++ b/drivers/media/video/msm/s5k3h1gx_reg.c @@ -0,0 +1,420 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "s5k3h1gx.h" + + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_init_settings_array_mipi[] = +{ + /* Because we will turn off power source after probe, there is no need to set register here */ +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_init_settings_array_parallel[] = +{ + /* Because we will turn off power source after probe, there is no need to set register here */ +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_common_settings_array_mipi[] = +{ + { 0x0100 , 0x00 }, + { 0x3091 , 0x00 }, + { 0x310E , 0x00 }, + { 0x0111 , 0x01 }, + { 0x3098 , 0xAB }, + { 0x309A , 0x01 }, + { 0x310D , 0xC6 }, + { 0x30C3 , 0x40 }, + { 0x308E , 0x05 }, + { 0x308F , 0x88 }, + { 0x30BB , 0x02 }, + { 0x30C7 , 0x1A }, + { 0x30BC , 0x38 }, + { 0x30BD , 0x40 }, + { 0x3110 , 0x70 }, + { 0x3111 , 0x80 }, + { 0x3112 , 0x7B }, + { 0x3113 , 0xC0 }, + { 0x3000 , 0x08 }, + { 0x3001 , 0x05 }, + { 0x3002 , 0x0D }, + { 0x3003 , 0x21 }, + { 0x3004 , 0x62 }, + { 0x3005 , 0x0B }, + { 0x3006 , 0x6D }, + { 0x3007 , 0x02 }, + { 0x3008 , 0x62 }, + { 0x3009 , 0x62 }, + { 0x300A , 0x41 }, + { 0x300B , 0x10 }, + { 0x300C , 0x21 }, + { 0x300D , 0x04 }, + { 0x307E , 0x03 }, + { 0x307F , 0xA5 }, + { 0x3080 , 0x04 }, + { 0x3081 , 0x29 }, + { 0x3082 , 0x03 }, + { 0x3083 , 0x21 }, + { 0x3011 , 0x5F }, + { 0x3156 , 0xE2 }, + { 0x3027 , 0x0E }, + { 0x300f , 0x02 }, + { 0x3072 , 0x13 }, + { 0x3073 , 0x61 }, + { 0x3074 , 0x92 }, + { 0x3075 , 0x10 }, + { 0x3076 , 0xA2 }, + { 0x3077 , 0x02 }, + { 0x3078 , 0x91 }, + { 0x3079 , 0x91 }, + { 0x307A , 0x61 }, + { 0x307B , 0x18 }, + { 0x307C , 0x61 }, + { 0x3010 , 0x10 }, + { 0x3017 , 0x74 }, + { 0x3018 , 0x00 }, + { 0x3020 , 0x02 }, + { 0x3021 , 0x24 }, + { 0x3023 , 0x40 }, + { 0x3024 , 0x08 }, + { 0x3025 , 0x08 }, + { 0x301C , 0xD4 }, + { 0x315D , 0x00 }, + { 0x3053 , 0xCF }, + { 0x3054 , 0x00 }, + { 0x3055 , 0x35 }, + { 0x3062 , 0x04 }, + { 0x3063 , 0x38 }, + { 0x3016 , 0x2c }, + { 0x3157 , 0x02 }, + { 0x3158 , 0x00 }, + { 0x315B , 0x02 }, + { 0x315C , 0x00 }, + { 0x301B , 0x04 }, + { 0x301A , 0xC4 }, + { 0x302d , 0x19 }, + { 0x302b , 0x04 }, + { 0x0305 , 0x04 }, /* pre_pll_clk_div = 4 */ + { 0x0306 , 0x00 }, /* pll_multiplier */ + { 0x0307 , 0x66 }, /* pll_multiplier = 102 */ + { 0x0303 , 0x01 }, /* vt_sys_clk_div = 1 */ + { 0x0301 , 0x05 }, /* vt_pix_clk_div = 5 */ + { 0x030B , 0x01 }, /* op_sys_clk_div = 1 */ + { 0x0309 , 0x05 }, /* op_pix_clk_div = 5 */ +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_common_settings_array_parallel[] = +{ + { 0x0100 , 0x00 }, + /* MIPI/CCP/Parallel Setting */ + { 0x3091 , 0x00 }, + { 0x3065 , 0x15 }, /* sync mode */ + { 0x310E , 0x08 }, /* reg_sel 08h:parallel / 04h: CCP / 00h : MIPI */ + { 0x0111 , 0x01 }, /* CCP2_signalling_mode */ + { 0x308E , 0x05 }, + { 0x308F , 0x88 }, + /* Manufacture Setting */ + { 0x3000 , 0x08 }, + { 0x3001 , 0x05 }, + { 0x3002 , 0x0D }, + { 0x3003 , 0x21 }, + { 0x3004 , 0x62 }, + { 0x3005 , 0x0B }, + { 0x3006 , 0x6D }, + { 0x3007 , 0x02 }, + { 0x3008 , 0x62 }, + { 0x3009 , 0x62 }, + { 0x300A , 0x41 }, + { 0x300B , 0x10 }, + { 0x300C , 0x21 }, + { 0x300D , 0x04 }, + { 0x307E , 0x03 }, + { 0x307F , 0xA5 }, + { 0x3080 , 0x04 }, + { 0x3081 , 0x29 }, + { 0x3082 , 0x03 }, + { 0x3083 , 0x21 }, + { 0x3011 , 0x5F }, + { 0x3156 , 0xE2 }, + /* { 0x3027 , 0x0E }, */ + { 0x300f , 0x02 }, + { 0x3072 , 0x13 }, + { 0x3073 , 0x61 }, + { 0x3074 , 0x92 }, + { 0x3075 , 0x10 }, + { 0x3076 , 0xA2 }, + { 0x3077 , 0x02 }, + { 0x3078 , 0x91 }, + { 0x3079 , 0x91 }, + { 0x307A , 0x61 }, + { 0x307B , 0x18 }, + { 0x307C , 0x61 }, + { 0x3010 , 0x10 }, + { 0x3017 , 0x74 }, + { 0x3018 , 0x00 }, + { 0x3020 , 0x02 }, + { 0x3021 , 0x24 }, + { 0x3023 , 0x40 }, + { 0x3024 , 0x08 }, + { 0x3025 , 0x08 }, + { 0x301C , 0xD4 }, + { 0x315D , 0x00 }, + { 0x3053 , 0xCF }, + { 0x3054 , 0x00 }, + { 0x3055 , 0x35 }, + { 0x3062 , 0x04 }, + { 0x3063 , 0x38 }, + { 0x3016 , 0x2c }, + { 0x3157 , 0x02 }, + { 0x3158 , 0x00 }, + { 0x315B , 0x02 }, + { 0x315C , 0x00 }, + { 0x301A , 0xC4 }, + { 0x301B , 0x04 }, + { 0x302d , 0x19 }, + { 0x302b , 0x04 }, + { 0x310d , 0xe6 }, +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_qtr_settings_array_mipi[] = +{ + { 0x30CC , 0xA0 }, /* DPHY_band_ctrl 560 ~ 640Mbps */ + { 0x0344 , 0x00 }, /* Xaddrstart 0d */ + { 0x0345 , 0x00 }, + { 0x0346 , 0x00 }, /* Yaddrstart 0d */ + { 0x0347 , 0x00 }, + { 0x0348 , 0x0C }, /* Xaddrend 3279d */ + { 0x0349 , 0xCf }, + { 0x034A , 0x09 }, /* Yaddrend 2463d */ + { 0x034B , 0x9F }, + { 0x0381 , 0x01 }, /* x_even_inc = 1 */ + { 0x0383 , 0x01 }, /* x_odd_inc = 1 */ + { 0x0385 , 0x01 }, /* y_even_inc = 1 */ + { 0x0387 , 0x03 }, /* y_odd_inc = 3 */ + { 0x034C , 0x06 }, /* x_output_size = 1640 */ + { 0x034D , 0x68 }, + { 0x034E , 0x04 }, /* y_output_size = 1232 */ + { 0x034F , 0xD0 }, + { 0x0200 , 0x02 }, /* fine integration time */ + { 0x0201 , 0x50 }, + { 0x0202 , 0x03 }, /* Coarse integration time */ + { 0x0203 , 0xA0 }, + { 0x0204 , 0x00 }, /* Analog gain */ + { 0x0205 , 0x20 }, + { 0x0342 , 0x0D }, /* Line_length_pck 3470d */ + { 0x0343 , 0x8E }, + { 0x0340 , 0x04 }, /* Frame_length_lines 1248d */ + { 0x0341 , 0xE0 }, + { 0x300E , 0xED }, /* Reserved */ + { 0x3085 , 0x00 }, /* Reserved */ + { 0x301D , 0x81 }, /* Reserved */ + { 0x3086 , 0x03 }, /* Reserved */ + { 0x3087 , 0x34 }, /* Reserved */ + { 0x3065 , 0x15 }, /* Reserved */ + { 0x3028 , 0x40 }, /* Reserved */ + { 0x0100 , 0x01 }, +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_qtr_settings_array_parallel[] = +{ + /* PLL settings MCLK:24MHz,vt_pix_clk_freq_mhz=130.2MHz,op_sys_clk_freq_mhz=65.1MHz */ + { 0x0305 , 0x08 }, /* pre_pll_clk_div = 8 */ + { 0x0306 , 0x01 }, /* pll_multiplier */ + { 0x0307 , 0x40 }, /* pll_multiplier = 320 */ + { 0x0303 , 0x01 }, /* vt_sys_clk_div = 1 */ + { 0x0301 , 0x05 }, /* vt_pix_clk_div = 5 */ + { 0x030B , 0x01 }, /* op_sys_clk_div = 1 */ + { 0x0309 , 0x0A }, /* op_pix_clk_div = 10 */ + /* DBLR Clock setting = 96Mhz = vt_pix_clk_freq_mhz/2 */ + { 0x3027 , 0x7E }, + + /* Readout H:1/2 SubSampling binning, V:1/2 SubSampling binning */ + { 0x0344 , 0x00 }, /* X addr start 0d */ + { 0x0345 , 0x00 }, + { 0x0346 , 0x00 }, /* Y addr start 0d */ + { 0x0347 , 0x00 }, + { 0x0348 , 0x0C }, /* X addr end 3279d */ + { 0x0349 , 0xCF }, + { 0x034A , 0x09 }, /* Y addr end 2463d */ + { 0x034B , 0x9F }, + { 0x0381 , 0x01 }, /* x_even_inc = 1 */ + { 0x0383 , 0x01 }, /* x_odd_inc = 1 */ + { 0x0385 , 0x01 }, /* y_even_inc = 1 */ + { 0x0387 , 0x03 }, /* y_odd_inc = 3 */ + /* ------------- */ + { 0x0401 , 0x01 }, /* Derating_en = 1 (disable) */ + { 0x0405 , 0x10 }, + { 0x0700 , 0x05 }, /* fifo_threshold = 1328 */ + { 0x0701 , 0x30 }, + /* ------------- */ + { 0x034C , 0x06 }, /* x_output_size = 1640 */ + { 0x034D , 0x68 }, + { 0x034E , 0x04 }, /* y_output_size = 1232 */ + { 0x034F , 0xD0 }, + { 0x0200 , 0x03 }, /* fine integration time */ + { 0x0201 , 0x50 }, + /* ------------- */ + { 0x0202 , 0x03 }, /* Coarse integration time */ + { 0x0203 , 0xA0 }, /* DB */ + /* ------------- */ + { 0x0204 , 0x00 }, /* Analog gain */ + { 0x0205 , 0x20 }, + { 0x0342 , 0x0D }, /* Line_length_pck 3470d */ + { 0x0343 , 0x8E }, + { 0x0340 , 0x07 }, /* Frame_length_lines 1843d */ + { 0x0341 , 0x33 }, + /* Manufacture Setting */ + { 0x300E , 0xED }, + { 0x3085 , 0x00 }, + { 0x301D , 0x81 }, + { 0x3028 , 0x40 }, + { 0x3086 , 0x03 }, + { 0x3087 , 0x34 }, + { 0x3065 , 0x15 }, + /* ------------- */ + { 0x310C , 0x50 }, /* pclk invert */ + { 0x3117 , 0x0F }, /* H/V sync driving strength 6mA */ + { 0x3118 , 0xF0 }, /* parallel data driving strength 6mA */ + /* ------------- */ + + /*{ 0x0100 , 0x01 },*/ +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_full_settings_array_mipi[] = +{ + { 0x30CC , 0xA0 }, /* DPHY_band_ctrl 560 ~ 640Mbps */ + { 0x0344 , 0x00 }, /* X addr start 0d */ + { 0x0345 , 0x00 }, + { 0x0346 , 0x00 }, /* Y addr start 0d */ + { 0x0347 , 0x00 }, + { 0x0348 , 0x0C }, /* X addr end 3279d */ + { 0x0349 , 0xCf }, + { 0x034A , 0x09 }, /* Y addr end 2463d */ + { 0x034B , 0x9F }, + { 0x0381 , 0x01 }, /* x_even_inc = 1 */ + { 0x0383 , 0x01 }, /* x_odd_inc = 1 */ + { 0x0385 , 0x01 }, /* y_even_inc = 1 */ + { 0x0387 , 0x01 }, /* y_odd_inc = 1 */ + { 0x034C , 0x0C }, /* x_output_size = 3280 */ + { 0x034D , 0xD0 }, + { 0x034E , 0x09 }, /* y_output_size = 2464 */ + { 0x034F , 0xA0 }, + { 0x0200 , 0x02 }, /* fine integration time */ + { 0x0201 , 0x50 }, + { 0x0202 , 0x04 }, /* Coarse integration time */ + { 0x0203 , 0xE7 }, + { 0x0204 , 0x00 }, /* Analog gain */ + { 0x0205 , 0x20 }, + { 0x0342 , 0x0D }, /* Line_length_pck */ + { 0x0343 , 0x8E }, + { 0x0340 , 0x09 }, /* Frame_length_lines */ + { 0x0341 , 0xB0 }, + { 0x300E , 0xE9 }, /* Reserved */ + { 0x3085 , 0x01 }, /* Reserved */ + { 0x301D , 0x01 }, /* Reserved */ + { 0x3086 , 0x03 }, /* Reserved */ + { 0x3087 , 0x34 }, /* Reserved */ + { 0x3065 , 0x15 }, /* Reserved */ + { 0x3028 , 0x41 }, /* Reserved */ + { 0x0100 , 0x01 }, +}; + +struct s5k3h1gx_i2c_reg_conf s5k3h1gx_full_settings_array_parallel[] = +{ + /* PLL settings MCLK:24MHz,vt_pix_clk_freq_mhz=96MHz,op_sys_clk_freq_mhz=96MHz */ + { 0x0305 , 0x04 }, /* pre_pll_clk_div = 4 */ + { 0x0306 , 0x00 }, /* pll_multiplier */ + { 0x0307 , 0xA0 }, /* pll_multiplier = 160 */ + { 0x0303 , 0x01 }, /* vt_sys_clk_div = 1 */ + { 0x0301 , 0x0A }, /* vt_pix_clk_div = 10 */ + { 0x030B , 0x01 }, /* op_sys_clk_div = 1 */ + { 0x0309 , 0x0A }, /* op_pix_clk_div = 10 */ + /* DBLR Clock setting = 96Mhz = vt_pix_clk_freq_mhz */ + { 0x3027 , 0x3E }, + /* Readout Full */ + { 0x0344 , 0x00 }, /* X addr start 0d */ + { 0x0345 , 0x00 }, + { 0x0346 , 0x00 }, /* Y addr start 0d */ + { 0x0347 , 0x00 }, + { 0x0348 , 0x0C }, /* X addr end 3279d */ + { 0x0349 , 0xCF }, + { 0x034A , 0x09 }, /* Y addr end 2463d */ + { 0x034B , 0x9F }, + { 0x0381 , 0x01 }, /* x_even_inc = 1 */ + { 0x0383 , 0x01 }, /* x_odd_inc = 1 */ + { 0x0385 , 0x01 }, /* y_even_inc = 1 */ + { 0x0387 , 0x01 }, /* y_odd_inc = 1 */ + /* ------------- */ + { 0x0401 , 0x00 }, /* Scaler OFF */ + { 0x0405 , 0x10 }, /* Scaling ratio 16/16 */ + { 0x0700 , 0x03 }, /* fifo_threshold = 818d */ + { 0x0701 , 0x32 }, + /* ------------- */ + { 0x034C , 0x0C }, /* x_output_size = 3280 */ + { 0x034D , 0xD0 }, + { 0x034E , 0x09 }, /* y_output_size = 2464 */ + { 0x034F , 0xA0 }, + { 0x0200 , 0x03 }, /* fine integration time */ + { 0x0201 , 0x50 }, + { 0x0202 , 0x04 }, /* Coarse integration time */ + { 0x0203 , 0xE7 }, + { 0x0204 , 0x00 }, /* Analog gain */ + { 0x0205 , 0x20 }, + { 0x0342 , 0x0D }, /* Line_length_pck 3470d */ + { 0x0343 , 0x8E }, + { 0x0340 , 0x09 }, /* Frame_length_lines 2480d */ + { 0x0341 , 0xB0 }, + /* Manufacture Setting */ + { 0x300E , 0xE9 }, + { 0x3085 , 0x01 }, + { 0x301D , 0x01 }, + { 0x3086 , 0x03 }, + { 0x3087 , 0x34 }, + { 0x3028 , 0x41 }, + { 0x3065 , 0x15 }, + /* ------------- */ + { 0x310C , 0x50 }, /* pclk invert */ + { 0x3117 , 0x0F }, /* H/V sync driving strength 6mA */ + { 0x3118 , 0xF0 }, /* parallel data driving strength 6mA */ + /* ------------- */ + + /*{ 0x0100 , 0x01 },*/ +}; + + + +struct s5k3h1gx_reg_t s5k3h1gx_regs = { + .init_mipi = &s5k3h1gx_init_settings_array_mipi[0], + .init_mipi_size = ARRAY_SIZE(s5k3h1gx_init_settings_array_mipi), + .init_parallel = &s5k3h1gx_init_settings_array_parallel[0], + .init_parallel_size = ARRAY_SIZE(s5k3h1gx_init_settings_array_parallel), + + .common_mipi = &s5k3h1gx_common_settings_array_mipi[0], + .common_mipi_size = ARRAY_SIZE(s5k3h1gx_common_settings_array_mipi), + .common_parallel = &s5k3h1gx_common_settings_array_parallel[0], + .common_parallel_size = ARRAY_SIZE(s5k3h1gx_common_settings_array_parallel), + + .qtr_mipi = &s5k3h1gx_qtr_settings_array_mipi[0], + .qtr_mipi_size = ARRAY_SIZE(s5k3h1gx_qtr_settings_array_mipi), + .qtr_parallel = &s5k3h1gx_qtr_settings_array_parallel[0], + .qtr_parallel_size = ARRAY_SIZE(s5k3h1gx_qtr_settings_array_parallel), + + .full_mipi = &s5k3h1gx_full_settings_array_mipi[0], + .full_mipi_size = ARRAY_SIZE(s5k3h1gx_full_settings_array_mipi), + .full_parallel = &s5k3h1gx_full_settings_array_parallel[0], + .full_parallel_size = ARRAY_SIZE(s5k3h1gx_full_settings_array_parallel), +}; diff --git a/drivers/media/video/msm/s5k6aafx.c b/drivers/media/video/msm/s5k6aafx.c new file mode 100644 index 0000000000000..b7b84b4d45d85 --- /dev/null +++ b/drivers/media/video/msm/s5k6aafx.c @@ -0,0 +1,1022 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "s5k6aafx.h" + +struct s5k6aafx_work { + struct work_struct work; +}; + +static struct s5k6aafx_work *s5k6aafx_sensorw; +static struct i2c_client *s5k6aafx_client; + +struct s5k6aafx_ctrl { + const struct msm_camera_sensor_info *sensordata; +}; + +static struct s5k6aafx_ctrl *s5k6aafx_ctrl; +static struct platform_device *s5k6aafx_pdev; + +static int op_mode; +static DECLARE_WAIT_QUEUE_HEAD(s5k6aafx_wait_queue); +DEFINE_SEMAPHORE(s5k6aafx_sem); + +static int sensor_probe_node = 0; +static enum frontcam_t previous_mirror_mode; + +static enum wb_mode current_wb = CAMERA_AWB_AUTO; +static int s5k6aafx_set_wb(enum wb_mode wb_value); + +#define MAX_I2C_RETRIES 20 +static int i2c_transfer_retry(struct i2c_adapter *adap, + struct i2c_msg *msgs, + int len) +{ + int i2c_retry = 0; + int ns; /* number sent */ + + while (i2c_retry++ < MAX_I2C_RETRIES) { + ns = i2c_transfer(adap, msgs, len); + if (ns == len) + break; + pr_err("%s: try %d/%d: i2c_transfer sent: %d, len %d\n", + __func__, + i2c_retry, MAX_I2C_RETRIES, ns, len); + msleep(10); + } + + return ns == len ? 0 : -EIO; +} + + +static int s5k6aafx_i2c_txdata(unsigned short saddr, + unsigned char *txdata, int length) +{ + struct i2c_msg msg[] = { + { + .addr = saddr, + .flags = 0, + .len = length, + .buf = txdata, + }, + }; + + if (i2c_transfer_retry(s5k6aafx_client->adapter, msg, 1) < 0) { + pr_info("s5k6aafx_i2c_txdata failed\n"); + return -EIO; + } + + return 0; +} + +static int s5k6aafx_i2c_write(unsigned short saddr, + unsigned short waddr, unsigned short wdata) +{ + int rc = -EIO; + unsigned char buf[4]; + memset(buf, 0, sizeof(buf)); + + buf[0] = (waddr & 0xFF00) >> 8; + buf[1] = (waddr & 0x00FF); + buf[2] = (wdata & 0xFF00) >> 8; + buf[3] = (wdata & 0x00FF); + rc = s5k6aafx_i2c_txdata(saddr, buf, 4); + if (rc < 0) + pr_info("i2c_write failed, addr = 0x%x, val = 0x%x!\n", + waddr, wdata); + + return rc; +} + +static int s5k6aafx_i2c_write_table(struct s5k6aafx_i2c_reg_conf const + *reg_conf_tbl, int num_of_items_in_table) +{ + int i; + int rc = -EIO; + + for (i = 0; i < num_of_items_in_table; i++) { + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, + reg_conf_tbl->waddr, reg_conf_tbl->wdata); + if (rc < 0) + break; + reg_conf_tbl++; + } + return rc; +} + +static int s5k6aafx_i2c_rxdata(unsigned short saddr, + unsigned char *rxdata, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = saddr, + .flags = 0, + .len = 2, + .buf = rxdata, + }, + { + .addr = saddr, + .flags = I2C_M_RD, + .len = length, + .buf = rxdata, + }, + }; + + if (i2c_transfer_retry(s5k6aafx_client->adapter, msgs, 2) < 0) { + pr_info("s5k6aafx_i2c_rxdata failed!\n"); + return -EIO; + } + + return 0; +} + +static int s5k6aafx_i2c_read(unsigned short saddr, + unsigned short raddr, unsigned short *rdata) +{ + int32_t rc = 0; + unsigned char buf[4]; + + if (!rdata) + return -EIO; + + memset(buf, 0, sizeof(buf)); + + buf[0] = (raddr & 0xFF00)>>8; + buf[1] = (raddr & 0x00FF); + + rc = s5k6aafx_i2c_rxdata(saddr, buf, 2); + if (rc < 0){ + printk(KERN_ERR "s5k6aafx_i2c_read failed!\n"); + return rc; + } + + *rdata = buf[0] << 8 | buf[1]; + + return rc; +} + +static int s5k6aafx_gpio_pull(int gpio_pin, int pull_mode) +{ + int rc = 0; + rc = gpio_request(gpio_pin, "s5k6aafx"); + if (!rc) + gpio_direction_output(gpio_pin, pull_mode); + else + pr_err("GPIO(%d) request failed\n", gpio_pin); + gpio_free(gpio_pin); + return rc; +} + +static int s5k6aafx_set_sensor_mode(int mode) +{ + switch (mode) { + case SENSOR_PREVIEW_MODE: + pr_info("s5k6aafx:sensor set mode: preview\n"); + op_mode = SENSOR_PREVIEW_MODE; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_I2C_MODE_GENERAL); + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDH, S5K6AAFX_ADDH_SW_REG_INT); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDL, 0x01F4); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, 0x0000); /* REG_TC_GP_EnableCapture */ + /* REG_TC_GP_EnableCaptureChanged */ + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, 0x0001); + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDL, 0x0400); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, 0x007F); + s5k6aafx_set_wb(current_wb); + + break; + + case SENSOR_SNAPSHOT_MODE: + pr_info("s5k6aafx:sensor set mode: snapshot\n"); + op_mode = SENSOR_SNAPSHOT_MODE; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_I2C_MODE_GENERAL); + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDH, S5K6AAFX_ADDH_SW_REG_INT); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDL, 0x01F4); + /* REG_TC_GP_EnableCapture */ + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, 0x0001); + /* REG_TC_GP_EnableCaptureChanged */ + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, 0x0001); + + break; + default: + return -EINVAL; + } + + return 0; +} + +static int s5k6aafx_set_effect(int effect) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (effect) { + case CAMERA_EFFECT_OFF: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EE, 0x0000); + break; + case CAMERA_EFFECT_MONO: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EE, 0x0001); + break; + case CAMERA_EFFECT_NEGATIVE: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EE, 0x0002); + break; + case CAMERA_EFFECT_SEPIA: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EE, 0x0003); + break; + case CAMERA_EFFECT_AQUA: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EE, 0x0004); + break; + default: + return -EINVAL; + } + return 0; +} + + +static int s5k6aafx_set_antibanding(enum antibanding_mode antibanding_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (antibanding_value) { + case CAMERA_ANTI_BANDING_50HZ: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DC, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DE, 0x0001); + break; + case CAMERA_ANTI_BANDING_60HZ: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DC, 0x0002); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DE, 0x0001); + break; + case CAMERA_ANTI_BANDING_AUTO: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DC, 0x0002); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DE, 0x0001); + break; + } + return 0; +} + + +static int s5k6aafx_set_brightness(enum brightness_t brightness_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (brightness_value) { + case CAMERA_BRIGHTNESS_N4: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0xFF81); + break; + case CAMERA_BRIGHTNESS_N3: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0xFFA1); + break; + case CAMERA_BRIGHTNESS_N2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0xFFC1); + break; + case CAMERA_BRIGHTNESS_N1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0xFFE1); + break; + case CAMERA_BRIGHTNESS_D: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0x0000); + break; + case CAMERA_BRIGHTNESS_P1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0x001F); + break; + case CAMERA_BRIGHTNESS_P2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0x003F); + break; + case CAMERA_BRIGHTNESS_P3: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0x005F); + break; + case CAMERA_BRIGHTNESS_P4: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E4, 0x007F); + break; + default: + break; + } + return 0; +} + +static int s5k6aafx_set_wb(enum wb_mode wb_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (wb_value) { + case CAMERA_AWB_AUTO: /*auto*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0400, 0x007F); + break; + case CAMERA_AWB_CLOUDY: /*cloudy*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0400, 0x0077); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D0, 0x0147); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D2, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D4, 0x0100); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D6, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D8, 0x0138); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DA, 0x0001); + break; + case CAMERA_AWB_INDOOR_HOME: /*Fluorescent*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0400, 0x0077); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D0, 0x00D9); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D2, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D4, 0x0100); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D6, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D8, 0x0200); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DA, 0x0001); + break; + case CAMERA_AWB_INDOOR_OFFICE: /*Incandescent*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0400, 0x0077); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D0, 0x00D9); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D2, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D4, 0x0100); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D6, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D8, 0x0219); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DA, 0x0001); + break; + case CAMERA_AWB_SUNNY: /*outdoor*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0400, 0x0077); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D0, 0x0133); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D2, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D4, 0x0100); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D6, 0x0001); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03D8, 0x0119); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x03DA, 0x0001); + break; + default: + break; + } + current_wb = wb_value; + return 0; +} + + +static int s5k6aafx_set_sharpness(enum sharpness_mode sharpness_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (sharpness_value) { + case CAMERA_SHARPNESS_X0: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EA, 0xFF81); + break; + case CAMERA_SHARPNESS_X1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EA, 0xFFC1); + break; + case CAMERA_SHARPNESS_X2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EA, 0x0000); + break; + case CAMERA_SHARPNESS_X3: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EA, 0x003F); + break; + case CAMERA_SHARPNESS_X4: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01EA, 0x007F); + break; + default: + break; + } + return 0; +} + + +static int s5k6aafx_set_saturation(enum saturation_mode saturation_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (saturation_value) { + case CAMERA_SATURATION_X0: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E8, 0xFF81); + break; + case CAMERA_SATURATION_X05: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E8, 0xFFC1); + break; + case CAMERA_SATURATION_X1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E8, 0x0000); + break; + case CAMERA_SATURATION_X15: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E8, 0x003F); + break; + case CAMERA_SATURATION_X2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E8, 0x007F); + break; + default: + break; + } + return 0; +} + +static int s5k6aafx_set_contrast(enum contrast_mode contrast_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (contrast_value) { + case CAMERA_CONTRAST_N2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E6, 0xFF81); + break; + case CAMERA_CONTRAST_N1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E6, 0xFFC1); + break; + case CAMERA_CONTRAST_D: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E6, 0x0000); + break; + case CAMERA_CONTRAST_P1: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E6, 0x003F); + break; + case CAMERA_CONTRAST_P2: + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x01E6, 0x007F); + break; + default: + break; + } + return 0; +} + +static int s5k6aafx_set_front_camera_mode(enum frontcam_t frontcam_value) +{ + if (op_mode == SENSOR_SNAPSHOT_MODE || previous_mirror_mode == frontcam_value) + return 0; + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_ADDH_SW_REG_INT); + + switch (frontcam_value) { + case CAMERA_MIRROR: + /*mirror and flip*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x02D4, 0x0002); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x02D6, 0x0002); + + break; + case CAMERA_REVERSE: + /*reverse mode*/ + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x02D4, 0x0003); + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x02D6, 0x0003); + + break; + + default: + break; + } + + s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x021E, 0x0001); + + previous_mirror_mode = frontcam_value; + + return 0; +} + +#if 0 +static int s5k6aafx_set_metering_mode(enum aec_metering_mode metering_value) +{ + uint16_t weight_table[32]; + uint8_t i; + + if (op_mode == SENSOR_SNAPSHOT_MODE) + return 0; + + for (i = 0; i < 32; i++) + weight_table[i] = 0x0101; + + if (metering_value == CAMERA_METERING_CENTERING) { + weight_table[9] = 0x0303; + weight_table[10] = 0x0303; + weight_table[13] = 0x0303; /* 0x0305 */ + weight_table[14] = 0x0303; /* 0x0503 */ + weight_table[17] = 0x0303; /* 0x0305 */ + weight_table[18] = 0x0303; /* 0x0503 */ + weight_table[21] = 0x0303; + weight_table[22] = 0x0303; + } else if (metering_value == CAMERA_METERING_SPOT) { + weight_table[13] = 0x0501; + weight_table[14] = 0x0105; + weight_table[17] = 0x0501; + weight_table[18] = 0x0105; + } else if (metering_value >= CAMERA_METERING_ZONE1 && + metering_value <= CAMERA_METERING_ZONE16) { + i = metering_value - CAMERA_METERING_ZONE1; + i += (i & 0xFC); /* i=i+((int)(i/4))*4 */ + weight_table[i] = 0x0505; + weight_table[i+4] = 0x0505; + } + + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_I2C_MODE_GENERAL); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_W_ADDH, S5K6AAFX_ADDH_SW_REG_INT); + s5k6aafx_i2c_write(s5k6aafx_client->addr, S5K6AAFX_REG_W_ADDL, 0x100E); + + for (i = 0; i < 32; i++) { + CDBG("write table[%d]=%x\n", i, weight_table[i]); + s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, weight_table[i]); + } + + return 0; +} +#endif + +static int s5k6aafx_sensor_read_id(const struct msm_camera_sensor_info *data) +{ + uint16_t model_id; + int rc = 0; + + pr_info("s5k6aafx_sensor_read_id\n"); + /* Read the Model ID of the sensor */ + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_I2C_MODE, S5K6AAFX_I2C_MODE_GENERAL); + if (rc < 0) + goto init_probe_fail; + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_R_ADDH, S5K6AAFX_ADDH_SW_REG_INT); + if (rc < 0) + goto init_probe_fail; + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, + S5K6AAFX_REG_R_ADDL, S5K6AAFX_REG_MODEL_ID); + if (rc < 0) + goto init_probe_fail; + rc = s5k6aafx_i2c_read(s5k6aafx_client->addr, + S5K6AAFX_REG_WR, &model_id); + if (rc < 0) + goto init_probe_fail; + + pr_info("s5k6aafx: model_id = 0x%x\n", model_id); + /* Check if it matches it with the value in Datasheet */ + if (model_id != S5K6AAFX_MODEL_ID) { + pr_info("invalid model id\n"); + rc = -EINVAL; + goto init_probe_fail; + } + +init_probe_fail: + return rc; + +} + +int s5k6aafx_sensor_open_init(struct msm_camera_sensor_info *data) +{ + int rc = 0; + s5k6aafx_ctrl = kzalloc(sizeof(struct s5k6aafx_ctrl), GFP_KERNEL); + if (!s5k6aafx_ctrl) { + pr_info("s5k6aafx_init failed!\n"); + rc = -ENOMEM; + goto init_done; + } + + if (data == NULL) { + pr_err("%s sensor data is NULL\n", __func__); + return -EINVAL; + } + s5k6aafx_ctrl->sensordata = data; + + + if (s5k6aafx_gpio_pull(data->sensor_pwd, 1) < 0) + goto init_fail; + mdelay(1); + + /*switch PCLK and MCLK to 2nd cam*/ + pr_info("s5k6aafx: s5k6aafx_sensor_probe switch clk\n"); + if(data->camera_clk_switch != NULL) + data->camera_clk_switch(); + + /*MCLK enable*/ + pr_info("s5k6aafx: MCLK enable clk\n"); + msm_camio_probe_on(s5k6aafx_pdev); + mdelay(3); + + if (s5k6aafx_gpio_pull(data->sensor_reset, 1) < 0) + goto init_fail; + mdelay(10); + msm_camio_camif_pad_reg_reset(); + /*reset sensor*/ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.reset_init[0], + s5k6aafx_regs.reset_init_size); + if (rc < 0) + goto init_fail; + mdelay(100); + + /*T&P setting*/ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.TP_init[0], + s5k6aafx_regs.TP_init_size); + if (rc < 0) + goto init_fail; + + /*analog setting*/ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.analog_setting_init[0], + s5k6aafx_regs.analog_setting_init_size); + if (rc < 0) + goto init_fail; + mdelay(10); + + /*set initial register*/ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.register_init[0], + s5k6aafx_regs.register_init_size); + if (rc < 0) + goto init_fail; + + /*set clock*/ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.clk_init[0], + s5k6aafx_regs.clk_init_size); + + if (rc < 0) + goto init_fail; + mdelay(100); + + /* preview configuration */ + rc = s5k6aafx_i2c_write_table(&s5k6aafx_regs.prev_snap_conf_init[0], + s5k6aafx_regs.prev_snap_conf_init_size); + + if (rc < 0) + goto init_fail; + + rc = s5k6aafx_sensor_read_id(data); + if (rc < 0) + goto init_fail; + + previous_mirror_mode = -1; +init_done: + return rc; + +init_fail: + kfree(s5k6aafx_ctrl); + return rc; +} + +static int s5k6aafx_init_client(struct i2c_client *client) +{ + /* Initialize the MSM_CAMI2C Chip */ + init_waitqueue_head(&s5k6aafx_wait_queue); + return 0; +} + +int s5k6aafx_sensor_config(void __user *argp) +{ + struct sensor_cfg_data cfg_data; + long rc = 0; + if (copy_from_user(&cfg_data, + (void *)argp, sizeof(struct sensor_cfg_data))) + return -EFAULT; + + switch (cfg_data.cfgtype) { + case CFG_SET_MODE: + rc = s5k6aafx_set_sensor_mode(cfg_data.mode); + break; + case CFG_SET_EFFECT: + rc = s5k6aafx_set_effect(cfg_data.cfg.effect); + break; + case CFG_SET_ANTIBANDING: + rc = s5k6aafx_set_antibanding + (cfg_data.cfg.antibanding_value); + break; + case CFG_SET_BRIGHTNESS: + rc = s5k6aafx_set_brightness + (cfg_data.cfg.brightness_value); + break; + case CFG_SET_WB: + rc = s5k6aafx_set_wb(cfg_data.cfg.wb_value); + break; + case CFG_SET_SHARPNESS: + rc = s5k6aafx_set_sharpness + (cfg_data.cfg.sharpness_value); + break; + case CFG_SET_SATURATION: + rc = s5k6aafx_set_saturation + (cfg_data.cfg.saturation_value); + break; + case CFG_SET_CONTRAST: + rc = s5k6aafx_set_contrast(cfg_data.cfg.contrast_value); + break; + case CFG_SET_FRONT_CAMERA_MODE: + rc = s5k6aafx_set_front_camera_mode(cfg_data.cfg.frontcam_value); + break; +#if 0 + case CFG_SET_EXPOSURE_MODE: + rc = s5k6aafx_set_metering_mode + (cfg_data.cfg.metering_value); + break; +#endif + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int s5k6aafx_sensor_release(void) +{ + int rc = 0; + down(&s5k6aafx_sem); + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x022e, 0x01); + rc = s5k6aafx_i2c_write(s5k6aafx_client->addr, 0x0230, 0x01); + mdelay(133); + + if (s5k6aafx_ctrl) + s5k6aafx_gpio_pull(s5k6aafx_ctrl->sensordata->sensor_reset, 0); + mdelay(1); + msm_camio_probe_off(s5k6aafx_pdev); + mdelay(1); + if (s5k6aafx_ctrl) + s5k6aafx_gpio_pull(s5k6aafx_ctrl->sensordata->sensor_pwd, 0); + + if (s5k6aafx_ctrl) { + kfree(s5k6aafx_ctrl); + s5k6aafx_ctrl = NULL; + } + + up(&s5k6aafx_sem); + + return rc; +} + +static const char *S5K6AAFXVendor = "Samsung"; +static const char *S5K6AAFXNAME = "s5k6aafx"; +static const char *S5K6AAFXSize = "1M"; +static uint32_t htcwc_value; + +static ssize_t sensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "%s %s %s\n", S5K6AAFXVendor, S5K6AAFXNAME, S5K6AAFXSize); + ret = strlen(buf) + 1; + + return ret; +} + +static ssize_t htcwc_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", htcwc_value); + return length; +} + +static ssize_t htcwc_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + uint32_t tmp = 0; + + tmp = buf[0] - 0x30; /* only get the first char */ + +#if 0 + if (strcmp(current->comm,"com.android.camera")!=0){ + pr_info("No permission : not camera ap\n"); + return -EINVAL; + } +#endif + + htcwc_value = tmp; + //pr_info("current_comm = %s\n", current->comm); + pr_info("htcwc_value = %d\n", htcwc_value); + return count; +} + +static ssize_t sensor_read_node(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t length; + length = sprintf(buf, "%d\n", sensor_probe_node); + return length; +} + +static DEVICE_ATTR(sensor, 0444, sensor_vendor_show, NULL); +static DEVICE_ATTR(htcwc, 0777, htcwc_get, htcwc_set); +static DEVICE_ATTR(node, 0444, sensor_read_node, NULL); + +static struct kobject *android_s5k6aafx; + +static int s5k6aafx_sysfs_init(void) +{ + int ret ; + pr_info("s5k6aafx:kobject creat and add\n"); + android_s5k6aafx = kobject_create_and_add("android_camera2", NULL); + if (android_s5k6aafx == NULL) { + pr_info("s5k6aafx_sysfs_init: subsystem_register " \ + "failed\n"); + ret = -ENOMEM; + return ret ; + } + pr_info("s5k6aafx:sysfs_create_file\n"); + ret = sysfs_create_file(android_s5k6aafx, &dev_attr_sensor.attr); + if (ret) { + pr_info("s5k6aafx_sysfs_init: sysfs_create_file " \ + "failed\n"); + kobject_del(android_s5k6aafx); + } + + ret = sysfs_create_file(android_s5k6aafx, &dev_attr_htcwc.attr); + if (ret) { + pr_info("s5k6aafx_sysfs_init: sysfs_create_file htcwc failed\n"); + kobject_del(android_s5k6aafx); + } + + ret = sysfs_create_file(android_s5k6aafx, &dev_attr_node.attr); + if (ret) { + pr_info("s5k6aafx_sysfs_init: dev_attr_node failed\n"); + kobject_del(android_s5k6aafx); + } + + return 0 ; +} + + +static int s5k6aafx_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc = 0; + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + rc = -ENOTSUPP; + goto probe_failure; + } + + s5k6aafx_sensorw = kzalloc(sizeof(struct s5k6aafx_work), GFP_KERNEL); + + if (!s5k6aafx_sensorw) { + rc = -ENOMEM; + goto probe_failure; + } + + i2c_set_clientdata(client, s5k6aafx_sensorw); + s5k6aafx_init_client(client); + s5k6aafx_client = client; + + pr_info("s5k6aafx_probe succeeded!\n"); + + return 0; + +probe_failure: + kfree(s5k6aafx_sensorw); + s5k6aafx_sensorw = NULL; + pr_info("s5k6aafx_probe failed!\n"); + return rc; +} + +static const struct i2c_device_id s5k6aafx_i2c_id[] = { + {"s5k6aafx", 0}, + {}, +}; + +static struct i2c_driver s5k6aafx_i2c_driver = { + .id_table = s5k6aafx_i2c_id, + .probe = s5k6aafx_i2c_probe, + .remove = __exit_p(s5k6aafx_i2c_remove), + .driver = { + .name = "s5k6aafx", + }, +}; + +static int s5k6aafx_sensor_probe(struct msm_camera_sensor_info *info, + struct msm_sensor_ctrl *s) +{ + int rc = i2c_add_driver(&s5k6aafx_i2c_driver); + if (rc < 0 || s5k6aafx_client == NULL) { + rc = -ENOTSUPP; + goto probe_done; + } + + pr_info("s5k6aafx s->node %d\n", s->node); + sensor_probe_node = s->node; + /*2nd camera pwd*/ + if (s5k6aafx_gpio_pull(info->sensor_pwd, 1) < 0) + goto probe_fail; + /*main camera pwd*/ + if (s5k6aafx_gpio_pull(105, 0) < 0) + goto probe_fail; + mdelay(5); + + /*switch clk source*/ + pr_info("s5k6aafx: s5k6aafx_sensor_probe switch clk\n"); + if(info->camera_clk_switch != NULL) + info->camera_clk_switch(); + + /*MCLK enable*/ + pr_info("s5k6aafx: MCLK enable clk\n"); + msm_camio_probe_on(s5k6aafx_pdev); + mdelay(10); + + if (s5k6aafx_gpio_pull(info->sensor_reset, 1) < 0) + goto probe_fail; + mdelay(10); + + rc = s5k6aafx_sensor_read_id(info); + if (rc < 0) + goto probe_fail; + if (info->camera_main_set_probe != NULL) + info->camera_main_set_probe(true); + s->s_init = s5k6aafx_sensor_open_init; + s->s_release = s5k6aafx_sensor_release; + s->s_config = s5k6aafx_sensor_config; + + /*init done*/ + msm_camio_probe_off(s5k6aafx_pdev); + + s5k6aafx_gpio_pull(info->sensor_pwd, 0); + mdelay(5); + + s5k6aafx_sysfs_init(); + + mdelay(5); + +probe_done: + pr_info("%s %s:%d\n", __FILE__, __func__, __LINE__); + return rc; +probe_fail: + msm_camio_probe_off(s5k6aafx_pdev); + pr_err("S5K6AAFX probe failed\n"); + return rc; + +} + +static int __s5k6aafx_probe(struct platform_device *pdev) +{ + struct msm_camera_sensor_info *sdata = pdev->dev.platform_data; + + s5k6aafx_pdev = pdev; + if (sdata->camera_main_get_probe != NULL) { + if (sdata->camera_main_get_probe()) { + pr_info("__s5k6aafx_probe camera main get probed already.\n"); + return 0; + } + } + /*init reset /1 pwd/2pwd*/ + s5k6aafx_gpio_pull(sdata->sensor_pwd, 0); + s5k6aafx_gpio_pull(105, 1); + mdelay(10); + s5k6aafx_gpio_pull(sdata->sensor_reset, 0); + return msm_camera_drv_start(pdev, s5k6aafx_sensor_probe); +} + +static struct platform_driver msm_camera_driver = { + .probe = __s5k6aafx_probe, + .driver = { + .name = "msm_camera_s5k6aafx", + .owner = THIS_MODULE, + }, +}; + +static int __init s5k6aafx_init(void) +{ + + return platform_driver_register(&msm_camera_driver); +} + +module_init(s5k6aafx_init); diff --git a/drivers/media/video/msm/s5k6aafx.h b/drivers/media/video/msm/s5k6aafx.h new file mode 100644 index 0000000000000..43d20b1d791b0 --- /dev/null +++ b/drivers/media/video/msm/s5k6aafx.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef S5K6AAFX_H +#define S5K6AAFX_H + +#include +#include + +/* S5K6AAFX Registers and their values */ +/* Sensor Core Registers */ +#define S5K6AAFX_REG_I2C_MODE 0xFCFC +#define S5K6AAFX_I2C_MODE_SENSOR 0x0000 +#define S5K6AAFX_I2C_MODE_GENERAL 0xD000 + +#define S5K6AAFX_REG_MODEL_ID 0x0152 +#define S5K6AAFX_MODEL_ID 0x06AA + +/* Mode select register */ +#define S5K6AAFX_REG_MODE_SELECT 0x107E +#define S5K6AAFX_MODE_SELECT_STREAM 0x0000 +#define S5K6AAFX_MODE_SELECT_SW_STANDBY 0x0001 + +#define S5K6AAFX_ADDH_SW_REG_INT 0x7000 +#define S5K6AAFX_REG_W_ADDH 0x0028 +#define S5K6AAFX_REG_W_ADDL 0x002A +#define S5K6AAFX_REG_R_ADDH 0x002C +#define S5K6AAFX_REG_R_ADDL 0x002E +#define S5K6AAFX_REG_WR 0x0F12 + +#define S5K6AAFX_QTR_SIZE_WIDTH 0x0280 +#define S5K6AAFX_QTR_SIZE_HEIGHT 0x01E0 +#define S5K6AAFX_FULL_SIZE_WIDTH 0x0500 +#define S5K6AAFX_FULL_SIZE_HEIGHT 0x0400 +#define S5K6AAFX_ADJ_FULL_SIZE_WIDTH S5K6AAFX_QTR_SIZE_WIDTH*2 +#define S5K6AAFX_ADJ_FULL_SIZE_HEIGHT S5K6AAFX_QTR_SIZE_HEIGHT*2 + +extern struct s5k6aafx_reg s5k6aafx_regs; + +struct s5k6aafx_i2c_reg_conf { + unsigned short waddr; + unsigned short wdata; +}; + +struct s5k6aafx_reg { + const struct s5k6aafx_i2c_reg_conf *reset_init; + uint16_t reset_init_size; + const struct s5k6aafx_i2c_reg_conf *TP_init; + uint16_t TP_init_size; + const struct s5k6aafx_i2c_reg_conf *analog_setting_init; + uint16_t analog_setting_init_size; + const struct s5k6aafx_i2c_reg_conf *register_init; + uint16_t register_init_size; + const struct s5k6aafx_i2c_reg_conf *clk_init; + uint16_t clk_init_size; + const struct s5k6aafx_i2c_reg_conf *prev_snap_conf_init; + uint16_t prev_snap_conf_init_size; + /* for full-size preview */ + const struct s5k6aafx_i2c_reg_conf *clk_init_tb2; + uint16_t clk_init_tb2_size; + const struct s5k6aafx_i2c_reg_conf *prev_snap_conf_init_tb2; + uint16_t prev_snap_conf_init_tb2_size; +}; + +#endif /* S5K6AAFX_H */ diff --git a/drivers/media/video/msm/s5k6aafx_reg_mecha.c b/drivers/media/video/msm/s5k6aafx_reg_mecha.c new file mode 100644 index 0000000000000..fc54aa2035c62 --- /dev/null +++ b/drivers/media/video/msm/s5k6aafx_reg_mecha.c @@ -0,0 +1,2647 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "s5k6aafx.h" + +static const struct s5k6aafx_i2c_reg_conf const reset_init_tbl[] = { + /* change to general mode */ + {S5K6AAFX_REG_I2C_MODE, S5K6AAFX_I2C_MODE_GENERAL}, + {0x0010, 0x0001}, /* Reset */ + {0x1030, 0x0000}, /* Clear host interrupt so main will wait */ + {0x0014, 0x0001}, /* ARM go */ + /* delay 100ms */ +}; + +static const struct s5k6aafx_i2c_reg_conf const TP_init_tbl[] = { + + /* Start T&P part */ + /* DO NOT DELETE T&P SECTION COMMENTS! They are required to debug T&P related issues. */ + /* svn://transrdsrv/svn/svnroot/System/Software/tcevb/SDK+FW/ISP_Oscar/Firmware */ + /* Rev: 33110-33110 */ + /* Signature: */ + /* md5 f0ba942df15b96de5c09e6cf13fed9c9 .btp */ + /* md5 8bc59f72129cb36e6f6db4be5ddca1f6 .htp */ + /* md5 954ec97efcabad291d89f63e29f32490 .RegsMap.h */ + /* md5 5c29fe50b51e7e860313f5b3b6452bfd .RegsMap.bin */ + /* md5 6211407baaa234b753431cde4ba32402 .base.RegsMap.h */ + /* md5 90cc21d42cc5f02eb80b2586e5c46d9b .base.RegsMap.bin */ + + {S5K6AAFX_REG_W_ADDH, S5K6AAFX_ADDH_SW_REG_INT}, + {S5K6AAFX_REG_W_ADDL, 0x1D60}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x4936}, + {S5K6AAFX_REG_WR, 0x4836}, + {S5K6AAFX_REG_WR, 0x2205}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA4E}, + {S5K6AAFX_REG_WR, 0x4935}, + {S5K6AAFX_REG_WR, 0x2002}, + {S5K6AAFX_REG_WR, 0x83C8}, + {S5K6AAFX_REG_WR, 0x2001}, + {S5K6AAFX_REG_WR, 0x3120}, + {S5K6AAFX_REG_WR, 0x8088}, + {S5K6AAFX_REG_WR, 0x4933}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0x8008}, + {S5K6AAFX_REG_WR, 0x4933}, + {S5K6AAFX_REG_WR, 0x8048}, + {S5K6AAFX_REG_WR, 0x4933}, + {S5K6AAFX_REG_WR, 0x4833}, + {S5K6AAFX_REG_WR, 0x2204}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA3E}, + {S5K6AAFX_REG_WR, 0x4932}, + {S5K6AAFX_REG_WR, 0x4833}, + {S5K6AAFX_REG_WR, 0x2206}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA39}, + {S5K6AAFX_REG_WR, 0x4932}, + {S5K6AAFX_REG_WR, 0x4832}, + {S5K6AAFX_REG_WR, 0x2207}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA34}, + {S5K6AAFX_REG_WR, 0x4931}, + {S5K6AAFX_REG_WR, 0x4832}, + {S5K6AAFX_REG_WR, 0x2208}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA2F}, + {S5K6AAFX_REG_WR, 0x4931}, + {S5K6AAFX_REG_WR, 0x4831}, + {S5K6AAFX_REG_WR, 0x2209}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA2A}, + {S5K6AAFX_REG_WR, 0x4930}, + {S5K6AAFX_REG_WR, 0x4831}, + {S5K6AAFX_REG_WR, 0x220A}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA25}, + {S5K6AAFX_REG_WR, 0x4930}, + {S5K6AAFX_REG_WR, 0x4830}, + {S5K6AAFX_REG_WR, 0x220B}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA20}, + {S5K6AAFX_REG_WR, 0x482F}, + {S5K6AAFX_REG_WR, 0x4930}, + {S5K6AAFX_REG_WR, 0x6108}, + {S5K6AAFX_REG_WR, 0x4830}, + {S5K6AAFX_REG_WR, 0x39FF}, + {S5K6AAFX_REG_WR, 0x3901}, + {S5K6AAFX_REG_WR, 0x6748}, + {S5K6AAFX_REG_WR, 0x482F}, + {S5K6AAFX_REG_WR, 0x1C0A}, + {S5K6AAFX_REG_WR, 0x32C0}, + {S5K6AAFX_REG_WR, 0x6390}, + {S5K6AAFX_REG_WR, 0x482E}, + {S5K6AAFX_REG_WR, 0x6708}, + {S5K6AAFX_REG_WR, 0x491A}, + {S5K6AAFX_REG_WR, 0x482D}, + {S5K6AAFX_REG_WR, 0x3108}, + {S5K6AAFX_REG_WR, 0x60C1}, + {S5K6AAFX_REG_WR, 0x6882}, + {S5K6AAFX_REG_WR, 0x1A51}, + {S5K6AAFX_REG_WR, 0x8201}, + {S5K6AAFX_REG_WR, 0x4C2B}, + {S5K6AAFX_REG_WR, 0x2607}, + {S5K6AAFX_REG_WR, 0x6821}, + {S5K6AAFX_REG_WR, 0x0736}, + {S5K6AAFX_REG_WR, 0x42B1}, + {S5K6AAFX_REG_WR, 0xDA05}, + {S5K6AAFX_REG_WR, 0x4829}, + {S5K6AAFX_REG_WR, 0x22D8}, + {S5K6AAFX_REG_WR, 0x1C05}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA09}, + {S5K6AAFX_REG_WR, 0x6025}, + {S5K6AAFX_REG_WR, 0x68A1}, + {S5K6AAFX_REG_WR, 0x42B1}, + {S5K6AAFX_REG_WR, 0xDA07}, + {S5K6AAFX_REG_WR, 0x4825}, + {S5K6AAFX_REG_WR, 0x2224}, + {S5K6AAFX_REG_WR, 0x3824}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xFA00}, + {S5K6AAFX_REG_WR, 0x4822}, + {S5K6AAFX_REG_WR, 0x3824}, + {S5K6AAFX_REG_WR, 0x60A0}, + {S5K6AAFX_REG_WR, 0x4D22}, + {S5K6AAFX_REG_WR, 0x6D29}, + {S5K6AAFX_REG_WR, 0x42B1}, + {S5K6AAFX_REG_WR, 0xDA07}, + {S5K6AAFX_REG_WR, 0x481F}, + {S5K6AAFX_REG_WR, 0x228F}, + {S5K6AAFX_REG_WR, 0x00D2}, + {S5K6AAFX_REG_WR, 0x30D8}, + {S5K6AAFX_REG_WR, 0x1C04}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF9F2}, + {S5K6AAFX_REG_WR, 0x652C}, + {S5K6AAFX_REG_WR, 0xBC70}, + {S5K6AAFX_REG_WR, 0xBC08}, + {S5K6AAFX_REG_WR, 0x4718}, + {S5K6AAFX_REG_WR, 0x218B}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x127B}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0398}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1376}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x2370}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1F0D}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x890D}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x1F2F}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x27A9}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x1FE1}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x27C5}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x2043}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x285F}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x2003}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x28FF}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x20CD}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x6181}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x20EF}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x6663}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x2123}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0100}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1EC1}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1EAD}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1F79}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x04AC}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x06CC}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x23A4}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0704}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0xB510}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF9B9}, + {S5K6AAFX_REG_WR, 0x48C3}, + {S5K6AAFX_REG_WR, 0x49C3}, + {S5K6AAFX_REG_WR, 0x8800}, + {S5K6AAFX_REG_WR, 0x8048}, + {S5K6AAFX_REG_WR, 0xBC10}, + {S5K6AAFX_REG_WR, 0xBC08}, + {S5K6AAFX_REG_WR, 0x4718}, + {S5K6AAFX_REG_WR, 0xB5F8}, + {S5K6AAFX_REG_WR, 0x1C06}, + {S5K6AAFX_REG_WR, 0x4DC0}, + {S5K6AAFX_REG_WR, 0x68AC}, + {S5K6AAFX_REG_WR, 0x1C30}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF9B3}, + {S5K6AAFX_REG_WR, 0x68A9}, + {S5K6AAFX_REG_WR, 0x4ABC}, + {S5K6AAFX_REG_WR, 0x42A1}, + {S5K6AAFX_REG_WR, 0xD003}, + {S5K6AAFX_REG_WR, 0x4BBD}, + {S5K6AAFX_REG_WR, 0x8A1B}, + {S5K6AAFX_REG_WR, 0x3301}, + {S5K6AAFX_REG_WR, 0x8013}, + {S5K6AAFX_REG_WR, 0x8813}, + {S5K6AAFX_REG_WR, 0x1C14}, + {S5K6AAFX_REG_WR, 0x2B00}, + {S5K6AAFX_REG_WR, 0xD00F}, + {S5K6AAFX_REG_WR, 0x2201}, + {S5K6AAFX_REG_WR, 0x4281}, + {S5K6AAFX_REG_WR, 0xD003}, + {S5K6AAFX_REG_WR, 0x8C2F}, + {S5K6AAFX_REG_WR, 0x42B9}, + {S5K6AAFX_REG_WR, 0xD300}, + {S5K6AAFX_REG_WR, 0x2200}, + {S5K6AAFX_REG_WR, 0x60AE}, + {S5K6AAFX_REG_WR, 0x2A00}, + {S5K6AAFX_REG_WR, 0xD003}, + {S5K6AAFX_REG_WR, 0x8C28}, + {S5K6AAFX_REG_WR, 0x42B0}, + {S5K6AAFX_REG_WR, 0xD800}, + {S5K6AAFX_REG_WR, 0x1C30}, + {S5K6AAFX_REG_WR, 0x1E59}, + {S5K6AAFX_REG_WR, 0x8021}, + {S5K6AAFX_REG_WR, 0xBCF8}, + {S5K6AAFX_REG_WR, 0xBC08}, + {S5K6AAFX_REG_WR, 0x4718}, + {S5K6AAFX_REG_WR, 0xB510}, + {S5K6AAFX_REG_WR, 0x1C04}, + {S5K6AAFX_REG_WR, 0x48AF}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF997}, + {S5K6AAFX_REG_WR, 0x4AAD}, + {S5K6AAFX_REG_WR, 0x4BAE}, + {S5K6AAFX_REG_WR, 0x8811}, + {S5K6AAFX_REG_WR, 0x885B}, + {S5K6AAFX_REG_WR, 0x8852}, + {S5K6AAFX_REG_WR, 0x4359}, + {S5K6AAFX_REG_WR, 0x1889}, + {S5K6AAFX_REG_WR, 0x4288}, + {S5K6AAFX_REG_WR, 0xD800}, + {S5K6AAFX_REG_WR, 0x1C08}, + {S5K6AAFX_REG_WR, 0x6020}, + {S5K6AAFX_REG_WR, 0xE7C5}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x1C05}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF98F}, + {S5K6AAFX_REG_WR, 0x49A5}, + {S5K6AAFX_REG_WR, 0x8989}, + {S5K6AAFX_REG_WR, 0x4348}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0x0C00}, + {S5K6AAFX_REG_WR, 0x2101}, + {S5K6AAFX_REG_WR, 0x0349}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF98E}, + {S5K6AAFX_REG_WR, 0x1C04}, + {S5K6AAFX_REG_WR, 0x489F}, + {S5K6AAFX_REG_WR, 0x8F80}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF991}, + {S5K6AAFX_REG_WR, 0x1C01}, + {S5K6AAFX_REG_WR, 0x20FF}, + {S5K6AAFX_REG_WR, 0x43C0}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF994}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF998}, + {S5K6AAFX_REG_WR, 0x1C01}, + {S5K6AAFX_REG_WR, 0x4898}, + {S5K6AAFX_REG_WR, 0x8840}, + {S5K6AAFX_REG_WR, 0x4360}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0x0C00}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF97A}, + {S5K6AAFX_REG_WR, 0x6028}, + {S5K6AAFX_REG_WR, 0xBC70}, + {S5K6AAFX_REG_WR, 0xBC08}, + {S5K6AAFX_REG_WR, 0x4718}, + {S5K6AAFX_REG_WR, 0xB5F1}, + {S5K6AAFX_REG_WR, 0xB082}, + {S5K6AAFX_REG_WR, 0x4D96}, + {S5K6AAFX_REG_WR, 0x4E91}, + {S5K6AAFX_REG_WR, 0x88A8}, + {S5K6AAFX_REG_WR, 0x1C2C}, + {S5K6AAFX_REG_WR, 0x3420}, + {S5K6AAFX_REG_WR, 0x4F90}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD018}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF988}, + {S5K6AAFX_REG_WR, 0x9001}, + {S5K6AAFX_REG_WR, 0x9802}, + {S5K6AAFX_REG_WR, 0x6B39}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF974}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF978}, + {S5K6AAFX_REG_WR, 0x9901}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF95F}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0x8871}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF96A}, + {S5K6AAFX_REG_WR, 0x0400}, + {S5K6AAFX_REG_WR, 0x0C00}, + {S5K6AAFX_REG_WR, 0x21FF}, + {S5K6AAFX_REG_WR, 0x3101}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF97A}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0x88E8}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD00A}, + {S5K6AAFX_REG_WR, 0x4980}, + {S5K6AAFX_REG_WR, 0x8820}, + {S5K6AAFX_REG_WR, 0x3128}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF979}, + {S5K6AAFX_REG_WR, 0x8D38}, + {S5K6AAFX_REG_WR, 0x8871}, + {S5K6AAFX_REG_WR, 0x4348}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0x0C00}, + {S5K6AAFX_REG_WR, 0x8538}, + {S5K6AAFX_REG_WR, 0xBCFE}, + {S5K6AAFX_REG_WR, 0xBC08}, + {S5K6AAFX_REG_WR, 0x4718}, + {S5K6AAFX_REG_WR, 0xB510}, + {S5K6AAFX_REG_WR, 0x1C04}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF974}, + {S5K6AAFX_REG_WR, 0x6821}, + {S5K6AAFX_REG_WR, 0x0409}, + {S5K6AAFX_REG_WR, 0x0C09}, + {S5K6AAFX_REG_WR, 0x1A40}, + {S5K6AAFX_REG_WR, 0x4976}, + {S5K6AAFX_REG_WR, 0x6849}, + {S5K6AAFX_REG_WR, 0x4281}, + {S5K6AAFX_REG_WR, 0xD800}, + {S5K6AAFX_REG_WR, 0x1C08}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF971}, + {S5K6AAFX_REG_WR, 0x6020}, + {S5K6AAFX_REG_WR, 0xE75B}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x6801}, + {S5K6AAFX_REG_WR, 0x040D}, + {S5K6AAFX_REG_WR, 0x0C2D}, + {S5K6AAFX_REG_WR, 0x6844}, + {S5K6AAFX_REG_WR, 0x486F}, + {S5K6AAFX_REG_WR, 0x8981}, + {S5K6AAFX_REG_WR, 0x1C28}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF927}, + {S5K6AAFX_REG_WR, 0x8060}, + {S5K6AAFX_REG_WR, 0x4970}, + {S5K6AAFX_REG_WR, 0x69C9}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF968}, + {S5K6AAFX_REG_WR, 0x1C01}, + {S5K6AAFX_REG_WR, 0x80A0}, + {S5K6AAFX_REG_WR, 0x0228}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF92D}, + {S5K6AAFX_REG_WR, 0x0400}, + {S5K6AAFX_REG_WR, 0x0C00}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0x496B}, + {S5K6AAFX_REG_WR, 0x2300}, + {S5K6AAFX_REG_WR, 0x5EC9}, + {S5K6AAFX_REG_WR, 0x4288}, + {S5K6AAFX_REG_WR, 0xDA02}, + {S5K6AAFX_REG_WR, 0x20FF}, + {S5K6AAFX_REG_WR, 0x3001}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xE797}, + {S5K6AAFX_REG_WR, 0xB5F8}, + {S5K6AAFX_REG_WR, 0x1C04}, + {S5K6AAFX_REG_WR, 0x4867}, + {S5K6AAFX_REG_WR, 0x4E65}, + {S5K6AAFX_REG_WR, 0x7800}, + {S5K6AAFX_REG_WR, 0x6AB7}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD100}, + {S5K6AAFX_REG_WR, 0x6A37}, + {S5K6AAFX_REG_WR, 0x495D}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0x688D}, + {S5K6AAFX_REG_WR, 0xD100}, + {S5K6AAFX_REG_WR, 0x684D}, + {S5K6AAFX_REG_WR, 0x4859}, + {S5K6AAFX_REG_WR, 0x8841}, + {S5K6AAFX_REG_WR, 0x6820}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF94B}, + {S5K6AAFX_REG_WR, 0x8DF1}, + {S5K6AAFX_REG_WR, 0x434F}, + {S5K6AAFX_REG_WR, 0x0A3A}, + {S5K6AAFX_REG_WR, 0x4282}, + {S5K6AAFX_REG_WR, 0xD30C}, + {S5K6AAFX_REG_WR, 0x4D5C}, + {S5K6AAFX_REG_WR, 0x26FF}, + {S5K6AAFX_REG_WR, 0x8829}, + {S5K6AAFX_REG_WR, 0x3601}, + {S5K6AAFX_REG_WR, 0x43B1}, + {S5K6AAFX_REG_WR, 0x8029}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF944}, + {S5K6AAFX_REG_WR, 0x6020}, + {S5K6AAFX_REG_WR, 0x8828}, + {S5K6AAFX_REG_WR, 0x4330}, + {S5K6AAFX_REG_WR, 0x8028}, + {S5K6AAFX_REG_WR, 0xE73B}, + {S5K6AAFX_REG_WR, 0x1C0A}, + {S5K6AAFX_REG_WR, 0x436A}, + {S5K6AAFX_REG_WR, 0x0A12}, + {S5K6AAFX_REG_WR, 0x4282}, + {S5K6AAFX_REG_WR, 0xD304}, + {S5K6AAFX_REG_WR, 0x0200}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF8F3}, + {S5K6AAFX_REG_WR, 0x6020}, + {S5K6AAFX_REG_WR, 0xE7F4}, + {S5K6AAFX_REG_WR, 0x6025}, + {S5K6AAFX_REG_WR, 0xE7F2}, + {S5K6AAFX_REG_WR, 0xB410}, + {S5K6AAFX_REG_WR, 0x4848}, + {S5K6AAFX_REG_WR, 0x4950}, + {S5K6AAFX_REG_WR, 0x89C0}, + {S5K6AAFX_REG_WR, 0x2316}, + {S5K6AAFX_REG_WR, 0x5ECC}, + {S5K6AAFX_REG_WR, 0x1C02}, + {S5K6AAFX_REG_WR, 0x42A0}, + {S5K6AAFX_REG_WR, 0xDC00}, + {S5K6AAFX_REG_WR, 0x1C22}, + {S5K6AAFX_REG_WR, 0x82CA}, + {S5K6AAFX_REG_WR, 0x2318}, + {S5K6AAFX_REG_WR, 0x5ECA}, + {S5K6AAFX_REG_WR, 0x4290}, + {S5K6AAFX_REG_WR, 0xDC00}, + {S5K6AAFX_REG_WR, 0x1C10}, + {S5K6AAFX_REG_WR, 0x8308}, + {S5K6AAFX_REG_WR, 0xBC10}, + {S5K6AAFX_REG_WR, 0x4770}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x1C06}, + {S5K6AAFX_REG_WR, 0x4C45}, + {S5K6AAFX_REG_WR, 0x2501}, + {S5K6AAFX_REG_WR, 0x8820}, + {S5K6AAFX_REG_WR, 0x02AD}, + {S5K6AAFX_REG_WR, 0x43A8}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF91E}, + {S5K6AAFX_REG_WR, 0x6030}, + {S5K6AAFX_REG_WR, 0xF7FF}, + {S5K6AAFX_REG_WR, 0xFFE0}, + {S5K6AAFX_REG_WR, 0x8820}, + {S5K6AAFX_REG_WR, 0x4328}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xE741}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x4C3D}, + {S5K6AAFX_REG_WR, 0x2501}, + {S5K6AAFX_REG_WR, 0x8820}, + {S5K6AAFX_REG_WR, 0x02ED}, + {S5K6AAFX_REG_WR, 0x43A8}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF916}, + {S5K6AAFX_REG_WR, 0xF7FF}, + {S5K6AAFX_REG_WR, 0xFFD1}, + {S5K6AAFX_REG_WR, 0x8820}, + {S5K6AAFX_REG_WR, 0x4328}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xE732}, + {S5K6AAFX_REG_WR, 0x230D}, + {S5K6AAFX_REG_WR, 0x071B}, + {S5K6AAFX_REG_WR, 0x18C3}, + {S5K6AAFX_REG_WR, 0x8818}, + {S5K6AAFX_REG_WR, 0x2A00}, + {S5K6AAFX_REG_WR, 0xD001}, + {S5K6AAFX_REG_WR, 0x4308}, + {S5K6AAFX_REG_WR, 0xE000}, + {S5K6AAFX_REG_WR, 0x4388}, + {S5K6AAFX_REG_WR, 0x8018}, + {S5K6AAFX_REG_WR, 0x4770}, + {S5K6AAFX_REG_WR, 0xB570}, + {S5K6AAFX_REG_WR, 0x2402}, + {S5K6AAFX_REG_WR, 0x4932}, + {S5K6AAFX_REG_WR, 0x8809}, + {S5K6AAFX_REG_WR, 0x078A}, + {S5K6AAFX_REG_WR, 0xD500}, + {S5K6AAFX_REG_WR, 0x2406}, + {S5K6AAFX_REG_WR, 0x2900}, + {S5K6AAFX_REG_WR, 0xD01F}, + {S5K6AAFX_REG_WR, 0x1C02}, + {S5K6AAFX_REG_WR, 0x207D}, + {S5K6AAFX_REG_WR, 0x00C0}, + {S5K6AAFX_REG_WR, 0x2600}, + {S5K6AAFX_REG_WR, 0x4D2D}, + {S5K6AAFX_REG_WR, 0x2A00}, + {S5K6AAFX_REG_WR, 0xD019}, + {S5K6AAFX_REG_WR, 0x2101}, + {S5K6AAFX_REG_WR, 0x8229}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF8F9}, + {S5K6AAFX_REG_WR, 0x2200}, + {S5K6AAFX_REG_WR, 0x2101}, + {S5K6AAFX_REG_WR, 0x482A}, + {S5K6AAFX_REG_WR, 0x0309}, + {S5K6AAFX_REG_WR, 0xF7FF}, + {S5K6AAFX_REG_WR, 0xFFDB}, + {S5K6AAFX_REG_WR, 0x2008}, + {S5K6AAFX_REG_WR, 0x4304}, + {S5K6AAFX_REG_WR, 0x1C21}, + {S5K6AAFX_REG_WR, 0x4C26}, + {S5K6AAFX_REG_WR, 0x2200}, + {S5K6AAFX_REG_WR, 0x3C14}, + {S5K6AAFX_REG_WR, 0x1C20}, + {S5K6AAFX_REG_WR, 0xF7FF}, + {S5K6AAFX_REG_WR, 0xFFD2}, + {S5K6AAFX_REG_WR, 0x2200}, + {S5K6AAFX_REG_WR, 0x2121}, + {S5K6AAFX_REG_WR, 0x1C20}, + {S5K6AAFX_REG_WR, 0xF7FF}, + {S5K6AAFX_REG_WR, 0xFFCD}, + {S5K6AAFX_REG_WR, 0x802E}, + {S5K6AAFX_REG_WR, 0xE6FD}, + {S5K6AAFX_REG_WR, 0x822E}, + {S5K6AAFX_REG_WR, 0x0789}, + {S5K6AAFX_REG_WR, 0x0FC9}, + {S5K6AAFX_REG_WR, 0x0089}, + {S5K6AAFX_REG_WR, 0x223B}, + {S5K6AAFX_REG_WR, 0x4311}, + {S5K6AAFX_REG_WR, 0x8029}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF8DA}, + {S5K6AAFX_REG_WR, 0xE7F4}, + {S5K6AAFX_REG_WR, 0xB510}, + {S5K6AAFX_REG_WR, 0x491B}, + {S5K6AAFX_REG_WR, 0x8FC8}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD007}, + {S5K6AAFX_REG_WR, 0x2000}, + {S5K6AAFX_REG_WR, 0x87C8}, + {S5K6AAFX_REG_WR, 0x8F88}, + {S5K6AAFX_REG_WR, 0x4C19}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD002}, + {S5K6AAFX_REG_WR, 0x2008}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xE689}, + {S5K6AAFX_REG_WR, 0x4815}, + {S5K6AAFX_REG_WR, 0x3060}, + {S5K6AAFX_REG_WR, 0x8900}, + {S5K6AAFX_REG_WR, 0x2800}, + {S5K6AAFX_REG_WR, 0xD103}, + {S5K6AAFX_REG_WR, 0x4814}, + {S5K6AAFX_REG_WR, 0x2101}, + {S5K6AAFX_REG_WR, 0xF000}, + {S5K6AAFX_REG_WR, 0xF8CA}, + {S5K6AAFX_REG_WR, 0x2010}, + {S5K6AAFX_REG_WR, 0x8020}, + {S5K6AAFX_REG_WR, 0xE7F2}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x1376}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x2370}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x14D8}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x235C}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0xF4B0}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x1554}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1AB8}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0080}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x046C}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0468}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x1100}, + {S5K6AAFX_REG_WR, 0xD000}, + {S5K6AAFX_REG_WR, 0x198C}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0AC4}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0xB0A0}, + {S5K6AAFX_REG_WR, 0xD000}, + {S5K6AAFX_REG_WR, 0xB0B4}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x01B8}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x044E}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x0450}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x9CE7}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xF004}, + {S5K6AAFX_REG_WR, 0xE51F}, + {S5K6AAFX_REG_WR, 0x9FB8}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x14C1}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x27E1}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x88DF}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x275D}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x1ED3}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x27C5}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xF004}, + {S5K6AAFX_REG_WR, 0xE51F}, + {S5K6AAFX_REG_WR, 0xA144}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x1F87}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x27A9}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x1ECB}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x28FF}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x26F9}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x4027}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x9F03}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xF004}, + {S5K6AAFX_REG_WR, 0xE51F}, + {S5K6AAFX_REG_WR, 0x9D9C}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x285F}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x6181}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x6663}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x85D9}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x4778}, + {S5K6AAFX_REG_WR, 0x46C0}, + {S5K6AAFX_REG_WR, 0xC000}, + {S5K6AAFX_REG_WR, 0xE59F}, + {S5K6AAFX_REG_WR, 0xFF1C}, + {S5K6AAFX_REG_WR, 0xE12F}, + {S5K6AAFX_REG_WR, 0x2001}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xE848}, + {S5K6AAFX_REG_WR, 0x0001}, + {S5K6AAFX_REG_WR, 0xE848}, + {S5K6AAFX_REG_WR, 0x0001}, + {S5K6AAFX_REG_WR, 0x0500}, + {S5K6AAFX_REG_WR, 0x0064}, + {S5K6AAFX_REG_WR, 0x0002}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + /* End T&P part */ +}; + +static const struct s5k6aafx_i2c_reg_conf const analog_setting_init_tbl[] = { + /* Start tuning part */ + + /* Analog Settings */ + {S5K6AAFX_REG_W_ADDL, 0x1102}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x1108}, + {S5K6AAFX_REG_WR, 0x0090}, + + {0xF40C, 0x0060}, + + {S5K6AAFX_REG_W_ADDL, 0x11B6}, + {S5K6AAFX_REG_WR, 0x0020}, + {S5K6AAFX_REG_WR, 0x0010}, + {S5K6AAFX_REG_WR, 0x0008}, + {S5K6AAFX_REG_WR, 0x0004}, + + {S5K6AAFX_REG_W_ADDL, 0x119C}, + {S5K6AAFX_REG_WR, 0x0040}, + + {S5K6AAFX_REG_W_ADDL, 0x07B6}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0002}, + {S5K6AAFX_REG_WR, 0x0003}, + {S5K6AAFX_REG_WR, 0x0006}, + {S5K6AAFX_REG_WR, 0x000C}, + {S5K6AAFX_REG_WR, 0x0018}, + + {0x1000, 0x0001}, /* Set host interrupt so main start run */ + + /* delay 10ms */ + +}; + +static const struct s5k6aafx_i2c_reg_conf const register_init_tbl[] = { +// Start user init script + +// End user init script + + /* param_start - TVAR_ash_GASAlpha */ + /* parawrite _start - TVAR_ash_GASAlpha */ + {S5K6AAFX_REG_W_ADDL, 0x0712}, + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[0] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[1] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[2] */ + {S5K6AAFX_REG_WR, 0x00B0}, /* TVAR_ash_GASAlpha[3] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[4] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[5] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[6] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[7] */ + {S5K6AAFX_REG_WR, 0x00C8}, /* TVAR_ash_GASAlpha[8] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[9] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[10] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[11] */ + {S5K6AAFX_REG_WR, 0x00D8}, /* TVAR_ash_GASAlpha[12] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[13] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[14] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[15] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[16] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[17] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[18] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[19] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[20] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[21] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[22] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[23] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[24] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[25] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[26] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_GASAlpha[27] */ + /* parawrite _end - TVAR_ash_GASAlpha */ + + /* parawrite _start - TVAR_ash_GASOutdoorAlpha */ + {S5K6AAFX_REG_W_ADDL, 0x074A}, + {S5K6AAFX_REG_WR, 0x00FB}, //TVAR_ash_GASOutdoorAlpha[0] + {S5K6AAFX_REG_WR, 0x00FF}, //TVAR_ash_GASOutdoorAlpha[1] + {S5K6AAFX_REG_WR, 0x00F9}, //TVAR_ash_GASOutdoorAlpha[2] + {S5K6AAFX_REG_WR, 0x0104}, //TVAR_ash_GASOutdoorAlpha[3] + /* parawrite _end - TVAR_ash_GASOutdoorAlpha */ + + {S5K6AAFX_REG_W_ADDL, 0x075A}, + {S5K6AAFX_REG_WR, 0x0001}, /* ash_bParabolicEstiwrite ation */ + {S5K6AAFX_REG_WR, 0x0282}, /* ash_uParabolicCenterX */ + {S5K6AAFX_REG_WR, 0x0216}, /* ash_uParabolicCenterY */ + {S5K6AAFX_REG_WR, 0x000B}, /* ash_uParabolicScalingA */ + {S5K6AAFX_REG_WR, 0x000E}, /* ash_uParabolicScalingB */ + + /* parawrite _start - TVAR_ash_pGAS */ + {S5K6AAFX_REG_W_ADDL, 0x247C}, + {S5K6AAFX_REG_WR, 0x02B2}, /* TVAR_ash_pGAS[0] */ + {S5K6AAFX_REG_WR, 0x01C3}, /* TVAR_ash_pGAS[1] */ + {S5K6AAFX_REG_WR, 0x0138}, /* TVAR_ash_pGAS[2] */ + {S5K6AAFX_REG_WR, 0x0100}, /* TVAR_ash_pGAS[3] */ + {S5K6AAFX_REG_WR, 0x00E5}, /* TVAR_ash_pGAS[4] */ + {S5K6AAFX_REG_WR, 0x00D1}, /* TVAR_ash_pGAS[5] */ + {S5K6AAFX_REG_WR, 0x00C6}, /* TVAR_ash_pGAS[6] */ + {S5K6AAFX_REG_WR, 0x00CA}, /* TVAR_ash_pGAS[7] */ + {S5K6AAFX_REG_WR, 0x00DA}, /* TVAR_ash_pGAS[8] */ + {S5K6AAFX_REG_WR, 0x00EE}, /* TVAR_ash_pGAS[9] */ + {S5K6AAFX_REG_WR, 0x0110}, /* TVAR_ash_pGAS[10] */ + {S5K6AAFX_REG_WR, 0x0179}, /* TVAR_ash_pGAS[11] */ + {S5K6AAFX_REG_WR, 0x0232}, /* TVAR_ash_pGAS[12] */ + {S5K6AAFX_REG_WR, 0x01EC}, /* TVAR_ash_pGAS[13] */ + {S5K6AAFX_REG_WR, 0x0148}, /* TVAR_ash_pGAS[14] */ + {S5K6AAFX_REG_WR, 0x00F3}, /* TVAR_ash_pGAS[15] */ + {S5K6AAFX_REG_WR, 0x00C7}, /* TVAR_ash_pGAS[16] */ + {S5K6AAFX_REG_WR, 0x00A3}, /* TVAR_ash_pGAS[17] */ + {S5K6AAFX_REG_WR, 0x0089}, /* TVAR_ash_pGAS[18] */ + {S5K6AAFX_REG_WR, 0x007A}, /* TVAR_ash_pGAS[19] */ + {S5K6AAFX_REG_WR, 0x0081}, /* TVAR_ash_pGAS[20] */ + {S5K6AAFX_REG_WR, 0x0093}, /* TVAR_ash_pGAS[21] */ + {S5K6AAFX_REG_WR, 0x00AF}, /* TVAR_ash_pGAS[22] */ + {S5K6AAFX_REG_WR, 0x00CF}, /* TVAR_ash_pGAS[23] */ + {S5K6AAFX_REG_WR, 0x010A}, /* TVAR_ash_pGAS[24] */ + {S5K6AAFX_REG_WR, 0x0181}, /* TVAR_ash_pGAS[25] */ + {S5K6AAFX_REG_WR, 0x015D}, /* TVAR_ash_pGAS[26] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[27] */ + {S5K6AAFX_REG_WR, 0x00B5}, /* TVAR_ash_pGAS[28] */ + {S5K6AAFX_REG_WR, 0x0083}, /* TVAR_ash_pGAS[29] */ + {S5K6AAFX_REG_WR, 0x0058}, /* TVAR_ash_pGAS[30] */ + {S5K6AAFX_REG_WR, 0x003F}, /* TVAR_ash_pGAS[31] */ + {S5K6AAFX_REG_WR, 0x0036}, /* TVAR_ash_pGAS[32] */ + {S5K6AAFX_REG_WR, 0x0038}, /* TVAR_ash_pGAS[33] */ + {S5K6AAFX_REG_WR, 0x0048}, /* TVAR_ash_pGAS[34] */ + {S5K6AAFX_REG_WR, 0x0065}, /* TVAR_ash_pGAS[35] */ + {S5K6AAFX_REG_WR, 0x008E}, /* TVAR_ash_pGAS[36] */ + {S5K6AAFX_REG_WR, 0x00C0}, /* TVAR_ash_pGAS[37] */ + {S5K6AAFX_REG_WR, 0x010A}, /* TVAR_ash_pGAS[38] */ + {S5K6AAFX_REG_WR, 0x0119}, /* TVAR_ash_pGAS[39] */ + {S5K6AAFX_REG_WR, 0x00C7}, /* TVAR_ash_pGAS[40] */ + {S5K6AAFX_REG_WR, 0x008A}, /* TVAR_ash_pGAS[41] */ + {S5K6AAFX_REG_WR, 0x0056}, /* TVAR_ash_pGAS[42] */ + {S5K6AAFX_REG_WR, 0x0030}, /* TVAR_ash_pGAS[43] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[44] */ + {S5K6AAFX_REG_WR, 0x0012}, /* TVAR_ash_pGAS[45] */ + {S5K6AAFX_REG_WR, 0x0011}, /* TVAR_ash_pGAS[46] */ + {S5K6AAFX_REG_WR, 0x001C}, /* TVAR_ash_pGAS[47] */ + {S5K6AAFX_REG_WR, 0x0036}, /* TVAR_ash_pGAS[48] */ + {S5K6AAFX_REG_WR, 0x005F}, /* TVAR_ash_pGAS[49] */ + {S5K6AAFX_REG_WR, 0x0096}, /* TVAR_ash_pGAS[50] */ + {S5K6AAFX_REG_WR, 0x00D2}, /* TVAR_ash_pGAS[51] */ + {S5K6AAFX_REG_WR, 0x00FA}, /* TVAR_ash_pGAS[52] */ + {S5K6AAFX_REG_WR, 0x00B7}, /* TVAR_ash_pGAS[53] */ + {S5K6AAFX_REG_WR, 0x0073}, /* TVAR_ash_pGAS[54] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[55] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[56] */ + {S5K6AAFX_REG_WR, 0x000C}, /* TVAR_ash_pGAS[57] */ + {S5K6AAFX_REG_WR, 0x0004}, /* TVAR_ash_pGAS[58] */ + {S5K6AAFX_REG_WR, 0x0004}, /* TVAR_ash_pGAS[59] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[60] */ + {S5K6AAFX_REG_WR, 0x001C}, /* TVAR_ash_pGAS[61] */ + {S5K6AAFX_REG_WR, 0x0045}, /* TVAR_ash_pGAS[62] */ + {S5K6AAFX_REG_WR, 0x0083}, /* TVAR_ash_pGAS[63] */ + {S5K6AAFX_REG_WR, 0x00BA}, /* TVAR_ash_pGAS[64] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[65] */ + {S5K6AAFX_REG_WR, 0x00B2}, /* TVAR_ash_pGAS[66] */ + {S5K6AAFX_REG_WR, 0x006B}, /* TVAR_ash_pGAS[67] */ + {S5K6AAFX_REG_WR, 0x0034}, /* TVAR_ash_pGAS[68] */ + {S5K6AAFX_REG_WR, 0x0016}, /* TVAR_ash_pGAS[69] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[70] */ + {S5K6AAFX_REG_WR, 0x0000}, /* TVAR_ash_pGAS[71] */ + {S5K6AAFX_REG_WR, 0x0000}, /* TVAR_ash_pGAS[72] */ + {S5K6AAFX_REG_WR, 0x0006}, /* TVAR_ash_pGAS[73] */ + {S5K6AAFX_REG_WR, 0x0018}, /* TVAR_ash_pGAS[74] */ + {S5K6AAFX_REG_WR, 0x003F}, /* TVAR_ash_pGAS[75] */ + {S5K6AAFX_REG_WR, 0x0080}, /* TVAR_ash_pGAS[76] */ + {S5K6AAFX_REG_WR, 0x00BA}, /* TVAR_ash_pGAS[77] */ + {S5K6AAFX_REG_WR, 0x00FD}, /* TVAR_ash_pGAS[78] */ + {S5K6AAFX_REG_WR, 0x00BE}, /* TVAR_ash_pGAS[79] */ + {S5K6AAFX_REG_WR, 0x0075}, /* TVAR_ash_pGAS[80] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[81] */ + {S5K6AAFX_REG_WR, 0x001A}, /* TVAR_ash_pGAS[82] */ + {S5K6AAFX_REG_WR, 0x000B}, /* TVAR_ash_pGAS[83] */ + {S5K6AAFX_REG_WR, 0x0003}, /* TVAR_ash_pGAS[84] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[85] */ + {S5K6AAFX_REG_WR, 0x000D}, /* TVAR_ash_pGAS[86] */ + {S5K6AAFX_REG_WR, 0x0022}, /* TVAR_ash_pGAS[87] */ + {S5K6AAFX_REG_WR, 0x004C}, /* TVAR_ash_pGAS[88] */ + {S5K6AAFX_REG_WR, 0x008E}, /* TVAR_ash_pGAS[89] */ + {S5K6AAFX_REG_WR, 0x00CB}, /* TVAR_ash_pGAS[90] */ + {S5K6AAFX_REG_WR, 0x0121}, /* TVAR_ash_pGAS[91] */ + {S5K6AAFX_REG_WR, 0x00DB}, /* TVAR_ash_pGAS[92] */ + {S5K6AAFX_REG_WR, 0x0096}, /* TVAR_ash_pGAS[93] */ + {S5K6AAFX_REG_WR, 0x0057}, /* TVAR_ash_pGAS[94] */ + {S5K6AAFX_REG_WR, 0x002C}, /* TVAR_ash_pGAS[95] */ + {S5K6AAFX_REG_WR, 0x0016}, /* TVAR_ash_pGAS[96] */ + {S5K6AAFX_REG_WR, 0x000F}, /* TVAR_ash_pGAS[97] */ + {S5K6AAFX_REG_WR, 0x0011}, /* TVAR_ash_pGAS[98] */ + {S5K6AAFX_REG_WR, 0x001E}, /* TVAR_ash_pGAS[99] */ + {S5K6AAFX_REG_WR, 0x003B}, /* TVAR_ash_pGAS[100] */ + {S5K6AAFX_REG_WR, 0x006D}, /* TVAR_ash_pGAS[101] */ + {S5K6AAFX_REG_WR, 0x00AE}, /* TVAR_ash_pGAS[102] */ + {S5K6AAFX_REG_WR, 0x00F0}, /* TVAR_ash_pGAS[103] */ + {S5K6AAFX_REG_WR, 0x0163}, /* TVAR_ash_pGAS[104] */ + {S5K6AAFX_REG_WR, 0x0107}, /* TVAR_ash_pGAS[105] */ + {S5K6AAFX_REG_WR, 0x00C6}, /* TVAR_ash_pGAS[106] */ + {S5K6AAFX_REG_WR, 0x0085}, /* TVAR_ash_pGAS[107] */ + {S5K6AAFX_REG_WR, 0x0053}, /* TVAR_ash_pGAS[108] */ + {S5K6AAFX_REG_WR, 0x0034}, /* TVAR_ash_pGAS[109] */ + {S5K6AAFX_REG_WR, 0x0029}, /* TVAR_ash_pGAS[110] */ + {S5K6AAFX_REG_WR, 0x002F}, /* TVAR_ash_pGAS[111] */ + {S5K6AAFX_REG_WR, 0x0042}, /* TVAR_ash_pGAS[112] */ + {S5K6AAFX_REG_WR, 0x0066}, /* TVAR_ash_pGAS[113] */ + {S5K6AAFX_REG_WR, 0x009E}, /* TVAR_ash_pGAS[114] */ + {S5K6AAFX_REG_WR, 0x00DC}, /* TVAR_ash_pGAS[115] */ + {S5K6AAFX_REG_WR, 0x012D}, /* TVAR_ash_pGAS[116] */ + {S5K6AAFX_REG_WR, 0x01E1}, /* TVAR_ash_pGAS[117] */ + {S5K6AAFX_REG_WR, 0x014C}, /* TVAR_ash_pGAS[118] */ + {S5K6AAFX_REG_WR, 0x0102}, /* TVAR_ash_pGAS[119] */ + {S5K6AAFX_REG_WR, 0x00CA}, /* TVAR_ash_pGAS[120] */ + {S5K6AAFX_REG_WR, 0x0096}, /* TVAR_ash_pGAS[121] */ + {S5K6AAFX_REG_WR, 0x0072}, /* TVAR_ash_pGAS[122] */ + {S5K6AAFX_REG_WR, 0x0062}, /* TVAR_ash_pGAS[123] */ + {S5K6AAFX_REG_WR, 0x0068}, /* TVAR_ash_pGAS[124] */ + {S5K6AAFX_REG_WR, 0x007F}, /* TVAR_ash_pGAS[125] */ + {S5K6AAFX_REG_WR, 0x00A9}, /* TVAR_ash_pGAS[126] */ + {S5K6AAFX_REG_WR, 0x00D7}, /* TVAR_ash_pGAS[127] */ + {S5K6AAFX_REG_WR, 0x011B}, /* TVAR_ash_pGAS[128] */ + {S5K6AAFX_REG_WR, 0x0196}, /* TVAR_ash_pGAS[129] */ + {S5K6AAFX_REG_WR, 0x029C}, /* TVAR_ash_pGAS[130] */ + {S5K6AAFX_REG_WR, 0x01C0}, /* TVAR_ash_pGAS[131] */ + {S5K6AAFX_REG_WR, 0x0144}, /* TVAR_ash_pGAS[132] */ + {S5K6AAFX_REG_WR, 0x0108}, /* TVAR_ash_pGAS[133] */ + {S5K6AAFX_REG_WR, 0x00DE}, /* TVAR_ash_pGAS[134] */ + {S5K6AAFX_REG_WR, 0x00BB}, /* TVAR_ash_pGAS[135] */ + {S5K6AAFX_REG_WR, 0x00AB}, /* TVAR_ash_pGAS[136] */ + {S5K6AAFX_REG_WR, 0x00AC}, /* TVAR_ash_pGAS[137] */ + {S5K6AAFX_REG_WR, 0x00C7}, /* TVAR_ash_pGAS[138] */ + {S5K6AAFX_REG_WR, 0x00E8}, /* TVAR_ash_pGAS[139] */ + {S5K6AAFX_REG_WR, 0x011A}, /* TVAR_ash_pGAS[140] */ + {S5K6AAFX_REG_WR, 0x017B}, /* TVAR_ash_pGAS[141] */ + {S5K6AAFX_REG_WR, 0x0222}, /* TVAR_ash_pGAS[142] */ + {S5K6AAFX_REG_WR, 0x0281}, /* TVAR_ash_pGAS[143] */ + {S5K6AAFX_REG_WR, 0x019C}, /* TVAR_ash_pGAS[144] */ + {S5K6AAFX_REG_WR, 0x011A}, /* TVAR_ash_pGAS[145] */ + {S5K6AAFX_REG_WR, 0x00E7}, /* TVAR_ash_pGAS[146] */ + {S5K6AAFX_REG_WR, 0x00CF}, /* TVAR_ash_pGAS[147] */ + {S5K6AAFX_REG_WR, 0x00BE}, /* TVAR_ash_pGAS[148] */ + {S5K6AAFX_REG_WR, 0x00B3}, /* TVAR_ash_pGAS[149] */ + {S5K6AAFX_REG_WR, 0x00B2}, /* TVAR_ash_pGAS[150] */ + {S5K6AAFX_REG_WR, 0x00BA}, /* TVAR_ash_pGAS[151] */ + {S5K6AAFX_REG_WR, 0x00C7}, /* TVAR_ash_pGAS[152] */ + {S5K6AAFX_REG_WR, 0x00E0}, /* TVAR_ash_pGAS[153] */ + {S5K6AAFX_REG_WR, 0x0139}, /* TVAR_ash_pGAS[154] */ + {S5K6AAFX_REG_WR, 0x01E4}, /* TVAR_ash_pGAS[155] */ + {S5K6AAFX_REG_WR, 0x01B4}, /* TVAR_ash_pGAS[156] */ + {S5K6AAFX_REG_WR, 0x011D}, /* TVAR_ash_pGAS[157] */ + {S5K6AAFX_REG_WR, 0x00D8}, /* TVAR_ash_pGAS[158] */ + {S5K6AAFX_REG_WR, 0x00B4}, /* TVAR_ash_pGAS[159] */ + {S5K6AAFX_REG_WR, 0x0093}, /* TVAR_ash_pGAS[160] */ + {S5K6AAFX_REG_WR, 0x007B}, /* TVAR_ash_pGAS[161] */ + {S5K6AAFX_REG_WR, 0x0070}, /* TVAR_ash_pGAS[162] */ + {S5K6AAFX_REG_WR, 0x0072}, /* TVAR_ash_pGAS[163] */ + {S5K6AAFX_REG_WR, 0x007F}, /* TVAR_ash_pGAS[164] */ + {S5K6AAFX_REG_WR, 0x0091}, /* TVAR_ash_pGAS[165] */ + {S5K6AAFX_REG_WR, 0x00A9}, /* TVAR_ash_pGAS[166] */ + {S5K6AAFX_REG_WR, 0x00D6}, /* TVAR_ash_pGAS[167] */ + {S5K6AAFX_REG_WR, 0x0142}, /* TVAR_ash_pGAS[168] */ + {S5K6AAFX_REG_WR, 0x013A}, /* TVAR_ash_pGAS[169] */ + {S5K6AAFX_REG_WR, 0x00D3}, /* TVAR_ash_pGAS[170] */ + {S5K6AAFX_REG_WR, 0x00AA}, /* TVAR_ash_pGAS[171] */ + {S5K6AAFX_REG_WR, 0x007C}, /* TVAR_ash_pGAS[172] */ + {S5K6AAFX_REG_WR, 0x0055}, /* TVAR_ash_pGAS[173] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[174] */ + {S5K6AAFX_REG_WR, 0x0035}, /* TVAR_ash_pGAS[175] */ + {S5K6AAFX_REG_WR, 0x0036}, /* TVAR_ash_pGAS[176] */ + {S5K6AAFX_REG_WR, 0x0044}, /* TVAR_ash_pGAS[177] */ + {S5K6AAFX_REG_WR, 0x005B}, /* TVAR_ash_pGAS[178] */ + {S5K6AAFX_REG_WR, 0x007A}, /* TVAR_ash_pGAS[179] */ + {S5K6AAFX_REG_WR, 0x009E}, /* TVAR_ash_pGAS[180] */ + {S5K6AAFX_REG_WR, 0x00DF}, /* TVAR_ash_pGAS[181] */ + {S5K6AAFX_REG_WR, 0x00F9}, /* TVAR_ash_pGAS[182] */ + {S5K6AAFX_REG_WR, 0x00B5}, /* TVAR_ash_pGAS[183] */ + {S5K6AAFX_REG_WR, 0x0083}, /* TVAR_ash_pGAS[184] */ + {S5K6AAFX_REG_WR, 0x0052}, /* TVAR_ash_pGAS[185] */ + {S5K6AAFX_REG_WR, 0x002D}, /* TVAR_ash_pGAS[186] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[187] */ + {S5K6AAFX_REG_WR, 0x0013}, /* TVAR_ash_pGAS[188] */ + {S5K6AAFX_REG_WR, 0x0012}, /* TVAR_ash_pGAS[189] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[190] */ + {S5K6AAFX_REG_WR, 0x0031}, /* TVAR_ash_pGAS[191] */ + {S5K6AAFX_REG_WR, 0x0055}, /* TVAR_ash_pGAS[192] */ + {S5K6AAFX_REG_WR, 0x007F}, /* TVAR_ash_pGAS[193] */ + {S5K6AAFX_REG_WR, 0x00AF}, /* TVAR_ash_pGAS[194] */ + {S5K6AAFX_REG_WR, 0x00E0}, /* TVAR_ash_pGAS[195] */ + {S5K6AAFX_REG_WR, 0x00A6}, /* TVAR_ash_pGAS[196] */ + {S5K6AAFX_REG_WR, 0x006C}, /* TVAR_ash_pGAS[197] */ + {S5K6AAFX_REG_WR, 0x0039}, /* TVAR_ash_pGAS[198] */ + {S5K6AAFX_REG_WR, 0x001A}, /* TVAR_ash_pGAS[199] */ + {S5K6AAFX_REG_WR, 0x000D}, /* TVAR_ash_pGAS[200] */ + {S5K6AAFX_REG_WR, 0x0007}, /* TVAR_ash_pGAS[201] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[202] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[203] */ + {S5K6AAFX_REG_WR, 0x0018}, /* TVAR_ash_pGAS[204] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[205] */ + {S5K6AAFX_REG_WR, 0x0070}, /* TVAR_ash_pGAS[206] */ + {S5K6AAFX_REG_WR, 0x009C}, /* TVAR_ash_pGAS[207] */ + {S5K6AAFX_REG_WR, 0x00DA}, /* TVAR_ash_pGAS[208] */ + {S5K6AAFX_REG_WR, 0x00A2}, /* TVAR_ash_pGAS[209] */ + {S5K6AAFX_REG_WR, 0x0065}, /* TVAR_ash_pGAS[210] */ + {S5K6AAFX_REG_WR, 0x0031}, /* TVAR_ash_pGAS[211] */ + {S5K6AAFX_REG_WR, 0x0015}, /* TVAR_ash_pGAS[212] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[213] */ + {S5K6AAFX_REG_WR, 0x0003}, /* TVAR_ash_pGAS[214] */ + {S5K6AAFX_REG_WR, 0x0002}, /* TVAR_ash_pGAS[215] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[216] */ + {S5K6AAFX_REG_WR, 0x0014}, /* TVAR_ash_pGAS[217] */ + {S5K6AAFX_REG_WR, 0x0038}, /* TVAR_ash_pGAS[218] */ + {S5K6AAFX_REG_WR, 0x006D}, /* TVAR_ash_pGAS[219] */ + {S5K6AAFX_REG_WR, 0x009C}, /* TVAR_ash_pGAS[220] */ + {S5K6AAFX_REG_WR, 0x00DF}, /* TVAR_ash_pGAS[221] */ + {S5K6AAFX_REG_WR, 0x00A8}, /* TVAR_ash_pGAS[222] */ + {S5K6AAFX_REG_WR, 0x006B}, /* TVAR_ash_pGAS[223] */ + {S5K6AAFX_REG_WR, 0x0038}, /* TVAR_ash_pGAS[224] */ + {S5K6AAFX_REG_WR, 0x0019}, /* TVAR_ash_pGAS[225] */ + {S5K6AAFX_REG_WR, 0x000C}, /* TVAR_ash_pGAS[226] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[227] */ + {S5K6AAFX_REG_WR, 0x0006}, /* TVAR_ash_pGAS[228] */ + {S5K6AAFX_REG_WR, 0x000B}, /* TVAR_ash_pGAS[229] */ + {S5K6AAFX_REG_WR, 0x001D}, /* TVAR_ash_pGAS[230] */ + {S5K6AAFX_REG_WR, 0x0043}, /* TVAR_ash_pGAS[231] */ + {S5K6AAFX_REG_WR, 0x0075}, /* TVAR_ash_pGAS[232] */ + {S5K6AAFX_REG_WR, 0x00A6}, /* TVAR_ash_pGAS[233] */ + {S5K6AAFX_REG_WR, 0x00FA}, /* TVAR_ash_pGAS[234] */ + {S5K6AAFX_REG_WR, 0x00BE}, /* TVAR_ash_pGAS[235] */ + {S5K6AAFX_REG_WR, 0x0087}, /* TVAR_ash_pGAS[236] */ + {S5K6AAFX_REG_WR, 0x004F}, /* TVAR_ash_pGAS[237] */ + {S5K6AAFX_REG_WR, 0x0028}, /* TVAR_ash_pGAS[238] */ + {S5K6AAFX_REG_WR, 0x0016}, /* TVAR_ash_pGAS[239] */ + {S5K6AAFX_REG_WR, 0x000F}, /* TVAR_ash_pGAS[240] */ + {S5K6AAFX_REG_WR, 0x0010}, /* TVAR_ash_pGAS[241] */ + {S5K6AAFX_REG_WR, 0x001A}, /* TVAR_ash_pGAS[242] */ + {S5K6AAFX_REG_WR, 0x0033}, /* TVAR_ash_pGAS[243] */ + {S5K6AAFX_REG_WR, 0x005D}, /* TVAR_ash_pGAS[244] */ + {S5K6AAFX_REG_WR, 0x008F}, /* TVAR_ash_pGAS[245] */ + {S5K6AAFX_REG_WR, 0x00C2}, /* TVAR_ash_pGAS[246] */ + {S5K6AAFX_REG_WR, 0x0132}, /* TVAR_ash_pGAS[247] */ + {S5K6AAFX_REG_WR, 0x00DF}, /* TVAR_ash_pGAS[248] */ + {S5K6AAFX_REG_WR, 0x00B0}, /* TVAR_ash_pGAS[249] */ + {S5K6AAFX_REG_WR, 0x0077}, /* TVAR_ash_pGAS[250] */ + {S5K6AAFX_REG_WR, 0x004A}, /* TVAR_ash_pGAS[251] */ + {S5K6AAFX_REG_WR, 0x0031}, /* TVAR_ash_pGAS[252] */ + {S5K6AAFX_REG_WR, 0x0027}, /* TVAR_ash_pGAS[253] */ + {S5K6AAFX_REG_WR, 0x002B}, /* TVAR_ash_pGAS[254] */ + {S5K6AAFX_REG_WR, 0x003A}, /* TVAR_ash_pGAS[255] */ + {S5K6AAFX_REG_WR, 0x0057}, /* TVAR_ash_pGAS[256] */ + {S5K6AAFX_REG_WR, 0x0083}, /* TVAR_ash_pGAS[257] */ + {S5K6AAFX_REG_WR, 0x00B0}, /* TVAR_ash_pGAS[258] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[259] */ + {S5K6AAFX_REG_WR, 0x019B}, /* TVAR_ash_pGAS[260] */ + {S5K6AAFX_REG_WR, 0x0117}, /* TVAR_ash_pGAS[261] */ + {S5K6AAFX_REG_WR, 0x00D9}, /* TVAR_ash_pGAS[262] */ + {S5K6AAFX_REG_WR, 0x00B0}, /* TVAR_ash_pGAS[263] */ + {S5K6AAFX_REG_WR, 0x0085}, /* TVAR_ash_pGAS[264] */ + {S5K6AAFX_REG_WR, 0x0067}, /* TVAR_ash_pGAS[265] */ + {S5K6AAFX_REG_WR, 0x0059}, /* TVAR_ash_pGAS[266] */ + {S5K6AAFX_REG_WR, 0x005C}, /* TVAR_ash_pGAS[267] */ + {S5K6AAFX_REG_WR, 0x006F}, /* TVAR_ash_pGAS[268] */ + {S5K6AAFX_REG_WR, 0x008D}, /* TVAR_ash_pGAS[269] */ + {S5K6AAFX_REG_WR, 0x00AE}, /* TVAR_ash_pGAS[270] */ + {S5K6AAFX_REG_WR, 0x00DE}, /* TVAR_ash_pGAS[271] */ + {S5K6AAFX_REG_WR, 0x0146}, /* TVAR_ash_pGAS[272] */ + {S5K6AAFX_REG_WR, 0x0249}, /* TVAR_ash_pGAS[273] */ + {S5K6AAFX_REG_WR, 0x017C}, /* TVAR_ash_pGAS[274] */ + {S5K6AAFX_REG_WR, 0x010F}, /* TVAR_ash_pGAS[275] */ + {S5K6AAFX_REG_WR, 0x00DF}, /* TVAR_ash_pGAS[276] */ + {S5K6AAFX_REG_WR, 0x00C0}, /* TVAR_ash_pGAS[277] */ + {S5K6AAFX_REG_WR, 0x00A6}, /* TVAR_ash_pGAS[278] */ + {S5K6AAFX_REG_WR, 0x0095}, /* TVAR_ash_pGAS[279] */ + {S5K6AAFX_REG_WR, 0x0096}, /* TVAR_ash_pGAS[280] */ + {S5K6AAFX_REG_WR, 0x00A8}, /* TVAR_ash_pGAS[281] */ + {S5K6AAFX_REG_WR, 0x00C0}, /* TVAR_ash_pGAS[282] */ + {S5K6AAFX_REG_WR, 0x00E3}, /* TVAR_ash_pGAS[283] */ + {S5K6AAFX_REG_WR, 0x012E}, /* TVAR_ash_pGAS[284] */ + {S5K6AAFX_REG_WR, 0x01BF}, /* TVAR_ash_pGAS[285] */ + {S5K6AAFX_REG_WR, 0x0289}, /* TVAR_ash_pGAS[286] */ + {S5K6AAFX_REG_WR, 0x019B}, /* TVAR_ash_pGAS[287] */ + {S5K6AAFX_REG_WR, 0x0116}, /* TVAR_ash_pGAS[288] */ + {S5K6AAFX_REG_WR, 0x00DE}, /* TVAR_ash_pGAS[289] */ + {S5K6AAFX_REG_WR, 0x00C0}, /* TVAR_ash_pGAS[290] */ + {S5K6AAFX_REG_WR, 0x00A9}, /* TVAR_ash_pGAS[291] */ + {S5K6AAFX_REG_WR, 0x009D}, /* TVAR_ash_pGAS[292] */ + {S5K6AAFX_REG_WR, 0x00A4}, /* TVAR_ash_pGAS[293] */ + {S5K6AAFX_REG_WR, 0x00B8}, /* TVAR_ash_pGAS[294] */ + {S5K6AAFX_REG_WR, 0x00D8}, /* TVAR_ash_pGAS[295] */ + {S5K6AAFX_REG_WR, 0x0106}, /* TVAR_ash_pGAS[296] */ + {S5K6AAFX_REG_WR, 0x0175}, /* TVAR_ash_pGAS[297] */ + {S5K6AAFX_REG_WR, 0x0239}, /* TVAR_ash_pGAS[298] */ + {S5K6AAFX_REG_WR, 0x01C5}, /* TVAR_ash_pGAS[299] */ + {S5K6AAFX_REG_WR, 0x0125}, /* TVAR_ash_pGAS[300] */ + {S5K6AAFX_REG_WR, 0x00D9}, /* TVAR_ash_pGAS[301] */ + {S5K6AAFX_REG_WR, 0x00B2}, /* TVAR_ash_pGAS[302] */ + {S5K6AAFX_REG_WR, 0x008D}, /* TVAR_ash_pGAS[303] */ + {S5K6AAFX_REG_WR, 0x006F}, /* TVAR_ash_pGAS[304] */ + {S5K6AAFX_REG_WR, 0x0062}, /* TVAR_ash_pGAS[305] */ + {S5K6AAFX_REG_WR, 0x006A}, /* TVAR_ash_pGAS[306] */ + {S5K6AAFX_REG_WR, 0x0084}, /* TVAR_ash_pGAS[307] */ + {S5K6AAFX_REG_WR, 0x00A8}, /* TVAR_ash_pGAS[308] */ + {S5K6AAFX_REG_WR, 0x00CD}, /* TVAR_ash_pGAS[309] */ + {S5K6AAFX_REG_WR, 0x010D}, /* TVAR_ash_pGAS[310] */ + {S5K6AAFX_REG_WR, 0x0189}, /* TVAR_ash_pGAS[311] */ + {S5K6AAFX_REG_WR, 0x0143}, /* TVAR_ash_pGAS[312] */ + {S5K6AAFX_REG_WR, 0x00DF}, /* TVAR_ash_pGAS[313] */ + {S5K6AAFX_REG_WR, 0x00AF}, /* TVAR_ash_pGAS[314] */ + {S5K6AAFX_REG_WR, 0x007D}, /* TVAR_ash_pGAS[315] */ + {S5K6AAFX_REG_WR, 0x0050}, /* TVAR_ash_pGAS[316] */ + {S5K6AAFX_REG_WR, 0x0036}, /* TVAR_ash_pGAS[317] */ + {S5K6AAFX_REG_WR, 0x002D}, /* TVAR_ash_pGAS[318] */ + {S5K6AAFX_REG_WR, 0x0032}, /* TVAR_ash_pGAS[319] */ + {S5K6AAFX_REG_WR, 0x0048}, /* TVAR_ash_pGAS[320] */ + {S5K6AAFX_REG_WR, 0x006F}, /* TVAR_ash_pGAS[321] */ + {S5K6AAFX_REG_WR, 0x009F}, /* TVAR_ash_pGAS[322] */ + {S5K6AAFX_REG_WR, 0x00CF}, /* TVAR_ash_pGAS[323] */ + {S5K6AAFX_REG_WR, 0x0118}, /* TVAR_ash_pGAS[324] */ + {S5K6AAFX_REG_WR, 0x010C}, /* TVAR_ash_pGAS[325] */ + {S5K6AAFX_REG_WR, 0x00C3}, /* TVAR_ash_pGAS[326] */ + {S5K6AAFX_REG_WR, 0x008C}, /* TVAR_ash_pGAS[327] */ + {S5K6AAFX_REG_WR, 0x0056}, /* TVAR_ash_pGAS[328] */ + {S5K6AAFX_REG_WR, 0x002D}, /* TVAR_ash_pGAS[329] */ + {S5K6AAFX_REG_WR, 0x0017}, /* TVAR_ash_pGAS[330] */ + {S5K6AAFX_REG_WR, 0x000D}, /* TVAR_ash_pGAS[331] */ + {S5K6AAFX_REG_WR, 0x0010}, /* TVAR_ash_pGAS[332] */ + {S5K6AAFX_REG_WR, 0x001F}, /* TVAR_ash_pGAS[333] */ + {S5K6AAFX_REG_WR, 0x0040}, /* TVAR_ash_pGAS[334] */ + {S5K6AAFX_REG_WR, 0x0070}, /* TVAR_ash_pGAS[335] */ + {S5K6AAFX_REG_WR, 0x00A6}, /* TVAR_ash_pGAS[336] */ + {S5K6AAFX_REG_WR, 0x00DB}, /* TVAR_ash_pGAS[337] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[338] */ + {S5K6AAFX_REG_WR, 0x00B6}, /* TVAR_ash_pGAS[339] */ + {S5K6AAFX_REG_WR, 0x0078}, /* TVAR_ash_pGAS[340] */ + {S5K6AAFX_REG_WR, 0x003E}, /* TVAR_ash_pGAS[341] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[342] */ + {S5K6AAFX_REG_WR, 0x000B}, /* TVAR_ash_pGAS[343] */ + {S5K6AAFX_REG_WR, 0x0004}, /* TVAR_ash_pGAS[344] */ + {S5K6AAFX_REG_WR, 0x0003}, /* TVAR_ash_pGAS[345] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[346] */ + {S5K6AAFX_REG_WR, 0x001F}, /* TVAR_ash_pGAS[347] */ + {S5K6AAFX_REG_WR, 0x004B}, /* TVAR_ash_pGAS[348] */ + {S5K6AAFX_REG_WR, 0x0088}, /* TVAR_ash_pGAS[349] */ + {S5K6AAFX_REG_WR, 0x00B6}, /* TVAR_ash_pGAS[350] */ + {S5K6AAFX_REG_WR, 0x00EA}, /* TVAR_ash_pGAS[351] */ + {S5K6AAFX_REG_WR, 0x00B4}, /* TVAR_ash_pGAS[352] */ + {S5K6AAFX_REG_WR, 0x0070}, /* TVAR_ash_pGAS[353] */ + {S5K6AAFX_REG_WR, 0x0037}, /* TVAR_ash_pGAS[354] */ + {S5K6AAFX_REG_WR, 0x0016}, /* TVAR_ash_pGAS[355] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[356] */ + {S5K6AAFX_REG_WR, 0x0000}, /* TVAR_ash_pGAS[357] */ + {S5K6AAFX_REG_WR, 0x0000}, /* TVAR_ash_pGAS[358] */ + {S5K6AAFX_REG_WR, 0x0002}, /* TVAR_ash_pGAS[359] */ + {S5K6AAFX_REG_WR, 0x0013}, /* TVAR_ash_pGAS[360] */ + {S5K6AAFX_REG_WR, 0x0038}, /* TVAR_ash_pGAS[361] */ + {S5K6AAFX_REG_WR, 0x0071}, /* TVAR_ash_pGAS[362] */ + {S5K6AAFX_REG_WR, 0x00A0}, /* TVAR_ash_pGAS[363] */ + {S5K6AAFX_REG_WR, 0x00F1}, /* TVAR_ash_pGAS[364] */ + {S5K6AAFX_REG_WR, 0x00B8}, /* TVAR_ash_pGAS[365] */ + {S5K6AAFX_REG_WR, 0x0076}, /* TVAR_ash_pGAS[366] */ + {S5K6AAFX_REG_WR, 0x003E}, /* TVAR_ash_pGAS[367] */ + {S5K6AAFX_REG_WR, 0x001C}, /* TVAR_ash_pGAS[368] */ + {S5K6AAFX_REG_WR, 0x000B}, /* TVAR_ash_pGAS[369] */ + {S5K6AAFX_REG_WR, 0x0002}, /* TVAR_ash_pGAS[370] */ + {S5K6AAFX_REG_WR, 0x0000}, /* TVAR_ash_pGAS[371] */ + {S5K6AAFX_REG_WR, 0x0004}, /* TVAR_ash_pGAS[372] */ + {S5K6AAFX_REG_WR, 0x0014}, /* TVAR_ash_pGAS[373] */ + {S5K6AAFX_REG_WR, 0x0037}, /* TVAR_ash_pGAS[374] */ + {S5K6AAFX_REG_WR, 0x0068}, /* TVAR_ash_pGAS[375] */ + {S5K6AAFX_REG_WR, 0x0095}, /* TVAR_ash_pGAS[376] */ + {S5K6AAFX_REG_WR, 0x010B}, /* TVAR_ash_pGAS[377] */ + {S5K6AAFX_REG_WR, 0x00CC}, /* TVAR_ash_pGAS[378] */ + {S5K6AAFX_REG_WR, 0x0093}, /* TVAR_ash_pGAS[379] */ + {S5K6AAFX_REG_WR, 0x0056}, /* TVAR_ash_pGAS[380] */ + {S5K6AAFX_REG_WR, 0x002B}, /* TVAR_ash_pGAS[381] */ + {S5K6AAFX_REG_WR, 0x0015}, /* TVAR_ash_pGAS[382] */ + {S5K6AAFX_REG_WR, 0x000B}, /* TVAR_ash_pGAS[383] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[384] */ + {S5K6AAFX_REG_WR, 0x000E}, /* TVAR_ash_pGAS[385] */ + {S5K6AAFX_REG_WR, 0x0021}, /* TVAR_ash_pGAS[386] */ + {S5K6AAFX_REG_WR, 0x0043}, /* TVAR_ash_pGAS[387] */ + {S5K6AAFX_REG_WR, 0x0070}, /* TVAR_ash_pGAS[388] */ + {S5K6AAFX_REG_WR, 0x00A0}, /* TVAR_ash_pGAS[389] */ + {S5K6AAFX_REG_WR, 0x0143}, /* TVAR_ash_pGAS[390] */ + {S5K6AAFX_REG_WR, 0x00EB}, /* TVAR_ash_pGAS[391] */ + {S5K6AAFX_REG_WR, 0x00B8}, /* TVAR_ash_pGAS[392] */ + {S5K6AAFX_REG_WR, 0x007E}, /* TVAR_ash_pGAS[393] */ + {S5K6AAFX_REG_WR, 0x004E}, /* TVAR_ash_pGAS[394] */ + {S5K6AAFX_REG_WR, 0x002F}, /* TVAR_ash_pGAS[395] */ + {S5K6AAFX_REG_WR, 0x0021}, /* TVAR_ash_pGAS[396] */ + {S5K6AAFX_REG_WR, 0x0020}, /* TVAR_ash_pGAS[397] */ + {S5K6AAFX_REG_WR, 0x0027}, /* TVAR_ash_pGAS[398] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[399] */ + {S5K6AAFX_REG_WR, 0x005D}, /* TVAR_ash_pGAS[400] */ + {S5K6AAFX_REG_WR, 0x0084}, /* TVAR_ash_pGAS[401] */ + {S5K6AAFX_REG_WR, 0x00BD}, /* TVAR_ash_pGAS[402] */ + {S5K6AAFX_REG_WR, 0x01AD}, /* TVAR_ash_pGAS[403] */ + {S5K6AAFX_REG_WR, 0x0122}, /* TVAR_ash_pGAS[404] */ + {S5K6AAFX_REG_WR, 0x00E3}, /* TVAR_ash_pGAS[405] */ + {S5K6AAFX_REG_WR, 0x00B5}, /* TVAR_ash_pGAS[406] */ + {S5K6AAFX_REG_WR, 0x0087}, /* TVAR_ash_pGAS[407] */ + {S5K6AAFX_REG_WR, 0x0064}, /* TVAR_ash_pGAS[408] */ + {S5K6AAFX_REG_WR, 0x0051}, /* TVAR_ash_pGAS[409] */ + {S5K6AAFX_REG_WR, 0x004E}, /* TVAR_ash_pGAS[410] */ + {S5K6AAFX_REG_WR, 0x0057}, /* TVAR_ash_pGAS[411] */ + {S5K6AAFX_REG_WR, 0x006A}, /* TVAR_ash_pGAS[412] */ + {S5K6AAFX_REG_WR, 0x007F}, /* TVAR_ash_pGAS[413] */ + {S5K6AAFX_REG_WR, 0x00A8}, /* TVAR_ash_pGAS[414] */ + {S5K6AAFX_REG_WR, 0x0101}, /* TVAR_ash_pGAS[415] */ + {S5K6AAFX_REG_WR, 0x0267}, /* TVAR_ash_pGAS[416] */ + {S5K6AAFX_REG_WR, 0x018C}, /* TVAR_ash_pGAS[417] */ + {S5K6AAFX_REG_WR, 0x0119}, /* TVAR_ash_pGAS[418] */ + {S5K6AAFX_REG_WR, 0x00E5}, /* TVAR_ash_pGAS[419] */ + {S5K6AAFX_REG_WR, 0x00C2}, /* TVAR_ash_pGAS[420] */ + {S5K6AAFX_REG_WR, 0x00A2}, /* TVAR_ash_pGAS[421] */ + {S5K6AAFX_REG_WR, 0x008D}, /* TVAR_ash_pGAS[422] */ + {S5K6AAFX_REG_WR, 0x0086}, /* TVAR_ash_pGAS[423] */ + {S5K6AAFX_REG_WR, 0x008C}, /* TVAR_ash_pGAS[424] */ + {S5K6AAFX_REG_WR, 0x0099}, /* TVAR_ash_pGAS[425] */ + {S5K6AAFX_REG_WR, 0x00B0}, /* TVAR_ash_pGAS[426] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[427] */ + {S5K6AAFX_REG_WR, 0x016C}, /* TVAR_ash_pGAS[428] */ + {S5K6AAFX_REG_WR, 0x01F3}, /* TVAR_ash_pGAS[429] */ + {S5K6AAFX_REG_WR, 0x0136}, /* TVAR_ash_pGAS[430] */ + {S5K6AAFX_REG_WR, 0x00D6}, /* TVAR_ash_pGAS[431] */ + {S5K6AAFX_REG_WR, 0x00B3}, /* TVAR_ash_pGAS[432] */ + {S5K6AAFX_REG_WR, 0x00A1}, /* TVAR_ash_pGAS[433] */ + {S5K6AAFX_REG_WR, 0x0095}, /* TVAR_ash_pGAS[434] */ + {S5K6AAFX_REG_WR, 0x008E}, /* TVAR_ash_pGAS[435] */ + {S5K6AAFX_REG_WR, 0x0098}, /* TVAR_ash_pGAS[436] */ + {S5K6AAFX_REG_WR, 0x00AD}, /* TVAR_ash_pGAS[437] */ + {S5K6AAFX_REG_WR, 0x00C5}, /* TVAR_ash_pGAS[438] */ + {S5K6AAFX_REG_WR, 0x00ED}, /* TVAR_ash_pGAS[439] */ + {S5K6AAFX_REG_WR, 0x014D}, /* TVAR_ash_pGAS[440] */ + {S5K6AAFX_REG_WR, 0x0207}, /* TVAR_ash_pGAS[441] */ + {S5K6AAFX_REG_WR, 0x014C}, /* TVAR_ash_pGAS[442] */ + {S5K6AAFX_REG_WR, 0x00D1}, /* TVAR_ash_pGAS[443] */ + {S5K6AAFX_REG_WR, 0x00A4}, /* TVAR_ash_pGAS[444] */ + {S5K6AAFX_REG_WR, 0x0091}, /* TVAR_ash_pGAS[445] */ + {S5K6AAFX_REG_WR, 0x0077}, /* TVAR_ash_pGAS[446] */ + {S5K6AAFX_REG_WR, 0x0062}, /* TVAR_ash_pGAS[447] */ + {S5K6AAFX_REG_WR, 0x005E}, /* TVAR_ash_pGAS[448] */ + {S5K6AAFX_REG_WR, 0x006A}, /* TVAR_ash_pGAS[449] */ + {S5K6AAFX_REG_WR, 0x0081}, /* TVAR_ash_pGAS[450] */ + {S5K6AAFX_REG_WR, 0x009F}, /* TVAR_ash_pGAS[451] */ + {S5K6AAFX_REG_WR, 0x00BE}, /* TVAR_ash_pGAS[452] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[453] */ + {S5K6AAFX_REG_WR, 0x0162}, /* TVAR_ash_pGAS[454] */ + {S5K6AAFX_REG_WR, 0x00DB}, /* TVAR_ash_pGAS[455] */ + {S5K6AAFX_REG_WR, 0x008C}, /* TVAR_ash_pGAS[456] */ + {S5K6AAFX_REG_WR, 0x0079}, /* TVAR_ash_pGAS[457] */ + {S5K6AAFX_REG_WR, 0x005D}, /* TVAR_ash_pGAS[458] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[459] */ + {S5K6AAFX_REG_WR, 0x002B}, /* TVAR_ash_pGAS[460] */ + {S5K6AAFX_REG_WR, 0x002B}, /* TVAR_ash_pGAS[461] */ + {S5K6AAFX_REG_WR, 0x0033}, /* TVAR_ash_pGAS[462] */ + {S5K6AAFX_REG_WR, 0x004A}, /* TVAR_ash_pGAS[463] */ + {S5K6AAFX_REG_WR, 0x006A}, /* TVAR_ash_pGAS[464] */ + {S5K6AAFX_REG_WR, 0x0092}, /* TVAR_ash_pGAS[465] */ + {S5K6AAFX_REG_WR, 0x00B2}, /* TVAR_ash_pGAS[466] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* TVAR_ash_pGAS[467] */ + {S5K6AAFX_REG_WR, 0x00A2}, /* TVAR_ash_pGAS[468] */ + {S5K6AAFX_REG_WR, 0x0072}, /* TVAR_ash_pGAS[469] */ + {S5K6AAFX_REG_WR, 0x0059}, /* TVAR_ash_pGAS[470] */ + {S5K6AAFX_REG_WR, 0x003A}, /* TVAR_ash_pGAS[471] */ + {S5K6AAFX_REG_WR, 0x001E}, /* TVAR_ash_pGAS[472] */ + {S5K6AAFX_REG_WR, 0x0011}, /* TVAR_ash_pGAS[473] */ + {S5K6AAFX_REG_WR, 0x000F}, /* TVAR_ash_pGAS[474] */ + {S5K6AAFX_REG_WR, 0x0012}, /* TVAR_ash_pGAS[475] */ + {S5K6AAFX_REG_WR, 0x0020}, /* TVAR_ash_pGAS[476] */ + {S5K6AAFX_REG_WR, 0x003B}, /* TVAR_ash_pGAS[477] */ + {S5K6AAFX_REG_WR, 0x005E}, /* TVAR_ash_pGAS[478] */ + {S5K6AAFX_REG_WR, 0x0084}, /* TVAR_ash_pGAS[479] */ + {S5K6AAFX_REG_WR, 0x00AD}, /* TVAR_ash_pGAS[480] */ + {S5K6AAFX_REG_WR, 0x008B}, /* TVAR_ash_pGAS[481] */ + {S5K6AAFX_REG_WR, 0x0065}, /* TVAR_ash_pGAS[482] */ + {S5K6AAFX_REG_WR, 0x0045}, /* TVAR_ash_pGAS[483] */ + {S5K6AAFX_REG_WR, 0x0024}, /* TVAR_ash_pGAS[484] */ + {S5K6AAFX_REG_WR, 0x000F}, /* TVAR_ash_pGAS[485] */ + {S5K6AAFX_REG_WR, 0x0007}, /* TVAR_ash_pGAS[486] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[487] */ + {S5K6AAFX_REG_WR, 0x0005}, /* TVAR_ash_pGAS[488] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[489] */ + {S5K6AAFX_REG_WR, 0x0017}, /* TVAR_ash_pGAS[490] */ + {S5K6AAFX_REG_WR, 0x0037}, /* TVAR_ash_pGAS[491] */ + {S5K6AAFX_REG_WR, 0x0060}, /* TVAR_ash_pGAS[492] */ + {S5K6AAFX_REG_WR, 0x0083}, /* TVAR_ash_pGAS[493] */ + {S5K6AAFX_REG_WR, 0x0081}, /* TVAR_ash_pGAS[494] */ + {S5K6AAFX_REG_WR, 0x0060}, /* TVAR_ash_pGAS[495] */ + {S5K6AAFX_REG_WR, 0x003D}, /* TVAR_ash_pGAS[496] */ + {S5K6AAFX_REG_WR, 0x001D}, /* TVAR_ash_pGAS[497] */ + {S5K6AAFX_REG_WR, 0x000C}, /* TVAR_ash_pGAS[498] */ + {S5K6AAFX_REG_WR, 0x0006}, /* TVAR_ash_pGAS[499] */ + {S5K6AAFX_REG_WR, 0x0001}, /* TVAR_ash_pGAS[500] */ + {S5K6AAFX_REG_WR, 0x0001}, /* TVAR_ash_pGAS[501] */ + {S5K6AAFX_REG_WR, 0x0002}, /* TVAR_ash_pGAS[502] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[503] */ + {S5K6AAFX_REG_WR, 0x0022}, /* TVAR_ash_pGAS[504] */ + {S5K6AAFX_REG_WR, 0x0047}, /* TVAR_ash_pGAS[505] */ + {S5K6AAFX_REG_WR, 0x0068}, /* TVAR_ash_pGAS[506] */ + {S5K6AAFX_REG_WR, 0x0084}, /* TVAR_ash_pGAS[507] */ + {S5K6AAFX_REG_WR, 0x0064}, /* TVAR_ash_pGAS[508] */ + {S5K6AAFX_REG_WR, 0x0042}, /* TVAR_ash_pGAS[509] */ + {S5K6AAFX_REG_WR, 0x0023}, /* TVAR_ash_pGAS[510] */ + {S5K6AAFX_REG_WR, 0x0010}, /* TVAR_ash_pGAS[511] */ + {S5K6AAFX_REG_WR, 0x0007}, /* TVAR_ash_pGAS[512] */ + {S5K6AAFX_REG_WR, 0x0002}, /* TVAR_ash_pGAS[513] */ + {S5K6AAFX_REG_WR, 0x0001}, /* TVAR_ash_pGAS[514] */ + {S5K6AAFX_REG_WR, 0x0001}, /* TVAR_ash_pGAS[515] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[516] */ + {S5K6AAFX_REG_WR, 0x001C}, /* TVAR_ash_pGAS[517] */ + {S5K6AAFX_REG_WR, 0x0039}, /* TVAR_ash_pGAS[518] */ + {S5K6AAFX_REG_WR, 0x005B}, /* TVAR_ash_pGAS[519] */ + {S5K6AAFX_REG_WR, 0x009C}, /* TVAR_ash_pGAS[520] */ + {S5K6AAFX_REG_WR, 0x0076}, /* TVAR_ash_pGAS[521] */ + {S5K6AAFX_REG_WR, 0x005B}, /* TVAR_ash_pGAS[522] */ + {S5K6AAFX_REG_WR, 0x0037}, /* TVAR_ash_pGAS[523] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[524] */ + {S5K6AAFX_REG_WR, 0x000F}, /* TVAR_ash_pGAS[525] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[526] */ + {S5K6AAFX_REG_WR, 0x0008}, /* TVAR_ash_pGAS[527] */ + {S5K6AAFX_REG_WR, 0x0009}, /* TVAR_ash_pGAS[528] */ + {S5K6AAFX_REG_WR, 0x0011}, /* TVAR_ash_pGAS[529] */ + {S5K6AAFX_REG_WR, 0x0025}, /* TVAR_ash_pGAS[530] */ + {S5K6AAFX_REG_WR, 0x003E}, /* TVAR_ash_pGAS[531] */ + {S5K6AAFX_REG_WR, 0x005F}, /* TVAR_ash_pGAS[532] */ + {S5K6AAFX_REG_WR, 0x00D0}, /* TVAR_ash_pGAS[533] */ + {S5K6AAFX_REG_WR, 0x0095}, /* TVAR_ash_pGAS[534] */ + {S5K6AAFX_REG_WR, 0x007E}, /* TVAR_ash_pGAS[535] */ + {S5K6AAFX_REG_WR, 0x005C}, /* TVAR_ash_pGAS[536] */ + {S5K6AAFX_REG_WR, 0x003A}, /* TVAR_ash_pGAS[537] */ + {S5K6AAFX_REG_WR, 0x0025}, /* TVAR_ash_pGAS[538] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[539] */ + {S5K6AAFX_REG_WR, 0x001B}, /* TVAR_ash_pGAS[540] */ + {S5K6AAFX_REG_WR, 0x001E}, /* TVAR_ash_pGAS[541] */ + {S5K6AAFX_REG_WR, 0x0027}, /* TVAR_ash_pGAS[542] */ + {S5K6AAFX_REG_WR, 0x003A}, /* TVAR_ash_pGAS[543] */ + {S5K6AAFX_REG_WR, 0x004F}, /* TVAR_ash_pGAS[544] */ + {S5K6AAFX_REG_WR, 0x007B}, /* TVAR_ash_pGAS[545] */ + {S5K6AAFX_REG_WR, 0x012F}, /* TVAR_ash_pGAS[546] */ + {S5K6AAFX_REG_WR, 0x00C8}, /* TVAR_ash_pGAS[547] */ + {S5K6AAFX_REG_WR, 0x00A7}, /* TVAR_ash_pGAS[548] */ + {S5K6AAFX_REG_WR, 0x008E}, /* TVAR_ash_pGAS[549] */ + {S5K6AAFX_REG_WR, 0x006F}, /* TVAR_ash_pGAS[550] */ + {S5K6AAFX_REG_WR, 0x0057}, /* TVAR_ash_pGAS[551] */ + {S5K6AAFX_REG_WR, 0x0048}, /* TVAR_ash_pGAS[552] */ + {S5K6AAFX_REG_WR, 0x0047}, /* TVAR_ash_pGAS[553] */ + {S5K6AAFX_REG_WR, 0x0049}, /* TVAR_ash_pGAS[554] */ + {S5K6AAFX_REG_WR, 0x004F}, /* TVAR_ash_pGAS[555] */ + {S5K6AAFX_REG_WR, 0x0058}, /* TVAR_ash_pGAS[556] */ + {S5K6AAFX_REG_WR, 0x006E}, /* TVAR_ash_pGAS[557] */ + {S5K6AAFX_REG_WR, 0x00B9}, /* TVAR_ash_pGAS[558] */ + {S5K6AAFX_REG_WR, 0x01CB}, /* TVAR_ash_pGAS[559] */ + {S5K6AAFX_REG_WR, 0x0123}, /* TVAR_ash_pGAS[560] */ + {S5K6AAFX_REG_WR, 0x00D5}, /* TVAR_ash_pGAS[561] */ + {S5K6AAFX_REG_WR, 0x00B9}, /* TVAR_ash_pGAS[562] */ + {S5K6AAFX_REG_WR, 0x00A2}, /* TVAR_ash_pGAS[563] */ + {S5K6AAFX_REG_WR, 0x008E}, /* TVAR_ash_pGAS[564] */ + {S5K6AAFX_REG_WR, 0x0080}, /* TVAR_ash_pGAS[565] */ + {S5K6AAFX_REG_WR, 0x007B}, /* TVAR_ash_pGAS[566] */ + {S5K6AAFX_REG_WR, 0x0079}, /* TVAR_ash_pGAS[567] */ + {S5K6AAFX_REG_WR, 0x0078}, /* TVAR_ash_pGAS[568] */ + {S5K6AAFX_REG_WR, 0x0081}, /* TVAR_ash_pGAS[569] */ + {S5K6AAFX_REG_WR, 0x00A9}, /* TVAR_ash_pGAS[570] */ + {S5K6AAFX_REG_WR, 0x0108}, /* TVAR_ash_pGAS[571] */ + /* parawrite _end - TVAR_ash_pGAS */ + + {S5K6AAFX_REG_W_ADDL, 0x0C48}, + {S5K6AAFX_REG_WR, 0x0550}, /* R*/ + {S5K6AAFX_REG_WR, 0x0400}, /* G*/ + {S5K6AAFX_REG_WR, 0x0600}, /*B */ + +#if 0 + {S5K6AAFX_REG_W_ADDL, 0x0F12}, + {S5K6AAFX_REG_WR, 0x02C9}, /* awbb_GLocusR */ + {S5K6AAFX_REG_WR, 0x033F}, /* awbb_GLocusB */ +#endif + + /* param_start - TVAR_ash_AwbAshCord */ + {S5K6AAFX_REG_W_ADDL, 0x0704}, + {S5K6AAFX_REG_WR, 0x00ED/*0x00C7*/}, /* TVAR_ash_AwbAshCord[0] */ + {S5K6AAFX_REG_WR, 0x0124/*0x00F7*/}, /* TVAR_ash_AwbAshCord[1] */ + {S5K6AAFX_REG_WR, 0x012B/*0x0107*/}, /* TVAR_ash_AwbAshCord[2] */ + {S5K6AAFX_REG_WR, 0x014A/*0x0142*/}, /* TVAR_ash_AwbAshCord[3] */ + {S5K6AAFX_REG_WR, 0x0190/*0x017A*/}, /* TVAR_ash_AwbAshCord[4] */ + {S5K6AAFX_REG_WR, 0x01B2/*0x01A0*/}, /* TVAR_ash_AwbAshCord[5] */ + {S5K6AAFX_REG_WR, 0x01C4/*0x01B6*/}, /* TVAR_ash_AwbAshCord[6] */ + /* param_end - TVAR_ash_AwbAshCord */ + + {S5K6AAFX_REG_W_ADDL, 0x0754}, + {S5K6AAFX_REG_WR, 0x247C}, + {S5K6AAFX_REG_WR, 0x7000}, + + {S5K6AAFX_REG_W_ADDL, 0x0E1A}, + {S5K6AAFX_REG_WR, 0x0138}, + + /* AWB Speed */ + {S5K6AAFX_REG_W_ADDL, 0x0E7C}, + {S5K6AAFX_REG_WR, 0x0010}, + {S5K6AAFX_REG_WR, 0x0003}, + + /* AWB grid */ + {S5K6AAFX_REG_W_ADDL, 0x0E42}, + {S5K6AAFX_REG_WR, 0x0001}, + + {S5K6AAFX_REG_W_ADDL, 0x0E82}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xFFC4}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xFFDA}, + {S5K6AAFX_REG_WR, 0xFFF2}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xFFC4}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xFFC4}, + {S5K6AAFX_REG_WR, 0x0000}, + + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0064}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0064}, + {S5K6AAFX_REG_WR, 0x004B}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0064}, + //{S5K6AAFX_REG_WR, 0x0030}, + //{S5K6AAFX_REG_WR, 0x0040}, + //{S5K6AAFX_REG_WR, 0x0000}, + //{S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0064}, + {S5K6AAFX_REG_WR, 0x0000}, + + /* param_start - awbb_IndoorGrZones_m_BGrid */ + {S5K6AAFX_REG_W_ADDL, 0x0C50}, + {S5K6AAFX_REG_WR, 0x0389}, + {S5K6AAFX_REG_WR, 0x0396}, + {S5K6AAFX_REG_WR, 0x0365}, + {S5K6AAFX_REG_WR, 0x03A4}, + {S5K6AAFX_REG_WR, 0x0343}, + {S5K6AAFX_REG_WR, 0x03A4}, + {S5K6AAFX_REG_WR, 0x0323}, + {S5K6AAFX_REG_WR, 0x0390}, + {S5K6AAFX_REG_WR, 0x0300}, + {S5K6AAFX_REG_WR, 0x036D}, + {S5K6AAFX_REG_WR, 0x02E0}, + {S5K6AAFX_REG_WR, 0x0356}, + {S5K6AAFX_REG_WR, 0x02CC}, + {S5K6AAFX_REG_WR, 0x033C}, + {S5K6AAFX_REG_WR, 0x02C0}, + {S5K6AAFX_REG_WR, 0x0325}, + {S5K6AAFX_REG_WR, 0x02B4}, + {S5K6AAFX_REG_WR, 0x0303}, + {S5K6AAFX_REG_WR, 0x02AA}, + {S5K6AAFX_REG_WR, 0x02E7}, + {S5K6AAFX_REG_WR, 0x02A1}, + {S5K6AAFX_REG_WR, 0x02D5}, + {S5K6AAFX_REG_WR, 0x0298}, + {S5K6AAFX_REG_WR, 0x02C9}, + {S5K6AAFX_REG_WR, 0x028D}, + {S5K6AAFX_REG_WR, 0x02BF}, + {S5K6AAFX_REG_WR, 0x0284}, + {S5K6AAFX_REG_WR, 0x02B4}, + {S5K6AAFX_REG_WR, 0x0279}, + {S5K6AAFX_REG_WR, 0x02A9}, + {S5K6AAFX_REG_WR, 0x026D}, + {S5K6AAFX_REG_WR, 0x02A1}, + {S5K6AAFX_REG_WR, 0x0260}, + {S5K6AAFX_REG_WR, 0x029B}, + {S5K6AAFX_REG_WR, 0x025A}, + {S5K6AAFX_REG_WR, 0x0291}, + {S5K6AAFX_REG_WR, 0x0252}, + {S5K6AAFX_REG_WR, 0x028A}, + {S5K6AAFX_REG_WR, 0x024F}, + {S5K6AAFX_REG_WR, 0x0284}, + {S5K6AAFX_REG_WR, 0x024C}, + {S5K6AAFX_REG_WR, 0x0279}, + {S5K6AAFX_REG_WR, 0x0259}, + {S5K6AAFX_REG_WR, 0x0265}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x0000}, + /* param_end - awbb_IndoorGrZones_m_BGrid */ + + {S5K6AAFX_REG_WR, 0x0004}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x0CF8}, + {S5K6AAFX_REG_WR, 0x0115}, + {S5K6AAFX_REG_WR, 0x0000}, + + /* param_end - awbb_OutdoorGrZones_m_BGrid */ + {S5K6AAFX_REG_W_ADDL, 0x0d08}, + {S5K6AAFX_REG_WR, 0X026D}, + {S5K6AAFX_REG_WR, 0X029D}, + {S5K6AAFX_REG_WR, 0X025B}, + {S5K6AAFX_REG_WR, 0X029D}, + {S5K6AAFX_REG_WR, 0X024D}, + {S5K6AAFX_REG_WR, 0X0293}, + {S5K6AAFX_REG_WR, 0X0240}, + {S5K6AAFX_REG_WR, 0X0286}, + {S5K6AAFX_REG_WR, 0X0236}, + {S5K6AAFX_REG_WR, 0X0279}, + {S5K6AAFX_REG_WR, 0X022D}, + {S5K6AAFX_REG_WR, 0X026C}, + {S5K6AAFX_REG_WR, 0X0225}, + {S5K6AAFX_REG_WR, 0X0260}, + {S5K6AAFX_REG_WR, 0X0225}, + {S5K6AAFX_REG_WR, 0X0254}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_WR, 0X0000}, + + {S5K6AAFX_REG_WR, 0X0004}, + {S5K6AAFX_REG_WR, 0X0000}, + {S5K6AAFX_REG_W_ADDL, 0x0D74}, + {S5K6AAFX_REG_WR, 0X0222}, + {S5K6AAFX_REG_WR, 0X0000}, + + /* param_start - SARR_usGammaLutRGBIndoor */ + {S5K6AAFX_REG_W_ADDL, 0x04C8}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x000A}, + {S5K6AAFX_REG_WR, 0x0028}, + {S5K6AAFX_REG_WR, 0x003E}, + {S5K6AAFX_REG_WR, 0x0072}, + {S5K6AAFX_REG_WR, 0x00DA}, + {S5K6AAFX_REG_WR, 0x0129}, + {S5K6AAFX_REG_WR, 0x0165}, + {S5K6AAFX_REG_WR, 0x01C5}, + {S5K6AAFX_REG_WR, 0x0224}, + {S5K6AAFX_REG_WR, 0x028F}, + {S5K6AAFX_REG_WR, 0x02EE}, + {S5K6AAFX_REG_WR, 0x033C}, + {S5K6AAFX_REG_WR, 0x0380}, + {S5K6AAFX_REG_WR, 0x03C3}, + {S5K6AAFX_REG_WR, 0x03FF}, + + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x000A}, + {S5K6AAFX_REG_WR, 0x0028}, + {S5K6AAFX_REG_WR, 0x003E}, + {S5K6AAFX_REG_WR, 0x0072}, + {S5K6AAFX_REG_WR, 0x00DA}, + {S5K6AAFX_REG_WR, 0x0129}, + {S5K6AAFX_REG_WR, 0x0165}, + {S5K6AAFX_REG_WR, 0x01C5}, + {S5K6AAFX_REG_WR, 0x0224}, + {S5K6AAFX_REG_WR, 0x028F}, + {S5K6AAFX_REG_WR, 0x02EE}, + {S5K6AAFX_REG_WR, 0x033C}, + {S5K6AAFX_REG_WR, 0x0380}, + {S5K6AAFX_REG_WR, 0x03C3}, + {S5K6AAFX_REG_WR, 0x03FF}, + + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0x000A}, + {S5K6AAFX_REG_WR, 0x0028}, + {S5K6AAFX_REG_WR, 0x003E}, + {S5K6AAFX_REG_WR, 0x0072}, + {S5K6AAFX_REG_WR, 0x00DA}, + {S5K6AAFX_REG_WR, 0x0129}, + {S5K6AAFX_REG_WR, 0x0165}, + {S5K6AAFX_REG_WR, 0x01C5}, + {S5K6AAFX_REG_WR, 0x0224}, + {S5K6AAFX_REG_WR, 0x028F}, + {S5K6AAFX_REG_WR, 0x02EE}, + {S5K6AAFX_REG_WR, 0x033C}, + {S5K6AAFX_REG_WR, 0x0380}, + {S5K6AAFX_REG_WR, 0x03C3}, + {S5K6AAFX_REG_WR, 0x03FF}, + /* param_end - SARR_usGammaLutRGBIndoor */ + + + {S5K6AAFX_REG_W_ADDL, 0x1000}, + {S5K6AAFX_REG_WR, 0x003F}, + + {S5K6AAFX_REG_W_ADDL, 0x0474}, + {S5K6AAFX_REG_WR, 0x0112/*0x010F*//*0x0114*/}, + {S5K6AAFX_REG_WR, 0x00EF/*0x00F1*//*0x00F9*/}, + + {S5K6AAFX_REG_W_ADDL, 0x2180}, + {S5K6AAFX_REG_WR, 0x0000}, + + {S5K6AAFX_REG_W_ADDL, 0x1006}, + {S5K6AAFX_REG_WR, 0x001F}, + + {S5K6AAFX_REG_W_ADDL, 0x108E}, + {S5K6AAFX_REG_WR, 0x00C7}, + {S5K6AAFX_REG_WR, 0x00F7}, + {S5K6AAFX_REG_WR, 0x0107}, + {S5K6AAFX_REG_WR, 0x0142}, + {S5K6AAFX_REG_WR, 0x017A}, + {S5K6AAFX_REG_WR, 0x01A0}, + {S5K6AAFX_REG_WR, 0x01B6}, + + {S5K6AAFX_REG_WR, 0x0100/*0x0112*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x0122*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x0136*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x00F6*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x0100*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x00FE*/}, + {S5K6AAFX_REG_WR, 0x0100/*0x0100*/}, + + /* param_start - TVAR_wbt_pBaseCcms */ + {S5K6AAFX_REG_W_ADDL, 0x23A4}, + {S5K6AAFX_REG_WR, 0x01FA}, /* H */ + {S5K6AAFX_REG_WR, 0xFFB9}, + {S5K6AAFX_REG_WR, 0xFFF8}, + {S5K6AAFX_REG_WR, 0x0116}, + {S5K6AAFX_REG_WR, 0x00BD}, + {S5K6AAFX_REG_WR, 0xFF38}, + {S5K6AAFX_REG_WR, 0xFF23}, + {S5K6AAFX_REG_WR, 0x01AB}, + {S5K6AAFX_REG_WR, 0xFF81}, + {S5K6AAFX_REG_WR, 0xFF0D}, + {S5K6AAFX_REG_WR, 0x0169}, + {S5K6AAFX_REG_WR, 0x00DE}, + {S5K6AAFX_REG_WR, 0xFFEF}, + {S5K6AAFX_REG_WR, 0xFFCA}, + {S5K6AAFX_REG_WR, 0x014D}, + {S5K6AAFX_REG_WR, 0x01C3}, + {S5K6AAFX_REG_WR, 0xFF7E}, + {S5K6AAFX_REG_WR, 0x016F}, + + {S5K6AAFX_REG_WR, 0x01FA}, /* A */ + {S5K6AAFX_REG_WR, 0xFFB9}, + {S5K6AAFX_REG_WR, 0xFFF8}, + {S5K6AAFX_REG_WR, 0x0116}, + {S5K6AAFX_REG_WR, 0x00BD}, + {S5K6AAFX_REG_WR, 0xFF38}, + {S5K6AAFX_REG_WR, 0xFF23}, + {S5K6AAFX_REG_WR, 0x01AB}, + {S5K6AAFX_REG_WR, 0xFF81}, + {S5K6AAFX_REG_WR, 0xFF0D}, + {S5K6AAFX_REG_WR, 0x0169}, + {S5K6AAFX_REG_WR, 0x00DE}, + {S5K6AAFX_REG_WR, 0xFFEF}, + {S5K6AAFX_REG_WR, 0xFFCA}, + {S5K6AAFX_REG_WR, 0x014D}, + {S5K6AAFX_REG_WR, 0x01C3}, + {S5K6AAFX_REG_WR, 0xFF7E}, + {S5K6AAFX_REG_WR, 0x016F}, + + {S5K6AAFX_REG_WR, 0x01FA}, /* WW */ + {S5K6AAFX_REG_WR, 0xFFB9}, + {S5K6AAFX_REG_WR, 0xFFF8}, + {S5K6AAFX_REG_WR, 0x0116}, + {S5K6AAFX_REG_WR, 0x00BD}, + {S5K6AAFX_REG_WR, 0xFF38}, + {S5K6AAFX_REG_WR, 0xFF23}, + {S5K6AAFX_REG_WR, 0x01AB}, + {S5K6AAFX_REG_WR, 0xFF81}, + {S5K6AAFX_REG_WR, 0xFF0D}, + {S5K6AAFX_REG_WR, 0x0169}, + {S5K6AAFX_REG_WR, 0x00DE}, + {S5K6AAFX_REG_WR, 0xFFEF}, + {S5K6AAFX_REG_WR, 0xFFCA}, + {S5K6AAFX_REG_WR, 0x014D}, + {S5K6AAFX_REG_WR, 0x01C3}, + {S5K6AAFX_REG_WR, 0xFF7E}, + {S5K6AAFX_REG_WR, 0x016F}, + + {S5K6AAFX_REG_WR, 0x01FA}, /* CW */ + {S5K6AAFX_REG_WR, 0xFFB9}, + {S5K6AAFX_REG_WR, 0xFFF8}, + {S5K6AAFX_REG_WR, 0x0116}, + {S5K6AAFX_REG_WR, 0x00BD}, + {S5K6AAFX_REG_WR, 0xFF38}, + {S5K6AAFX_REG_WR, 0xFF23}, + {S5K6AAFX_REG_WR, 0x01AB}, + {S5K6AAFX_REG_WR, 0xFF81}, + {S5K6AAFX_REG_WR, 0xFF0D}, + {S5K6AAFX_REG_WR, 0x0169}, + {S5K6AAFX_REG_WR, 0x00DE}, + {S5K6AAFX_REG_WR, 0xFFEF}, + {S5K6AAFX_REG_WR, 0xFFCA}, + {S5K6AAFX_REG_WR, 0x014D}, + {S5K6AAFX_REG_WR, 0x01C3}, + {S5K6AAFX_REG_WR, 0xFF7E}, + {S5K6AAFX_REG_WR, 0x016F}, + + {S5K6AAFX_REG_WR, 0x0276/*0x028D*/}, /* D50 */ + {S5K6AAFX_REG_WR, 0xFFCA/*0xFFBC*/}, + {S5K6AAFX_REG_WR, 0xFFCF/*0xFFA9*/}, + {S5K6AAFX_REG_WR, 0x010D/*0x0124*/}, + {S5K6AAFX_REG_WR, 0x010E/*0x0115*/}, + {S5K6AAFX_REG_WR, 0xFF6D/*0xFF1B*/}, + {S5K6AAFX_REG_WR, 0xFE97/*0xFEC5*/}, + {S5K6AAFX_REG_WR, 0x0144/*0x01F8*/}, + {S5K6AAFX_REG_WR, 0xFF9E/*0xFF4B*/}, + {S5K6AAFX_REG_WR, 0xFE8A/*0xFEA4*/}, + {S5K6AAFX_REG_WR, 0x0137/*0x0198*/}, + {S5K6AAFX_REG_WR, 0x0131/*0x00D0*/}, + {S5K6AAFX_REG_WR, 0xFFF3/*0xFFE7*/}, + {S5K6AAFX_REG_WR, 0xFFF3/*0xFFB1*/}, + {S5K6AAFX_REG_WR, 0x0193/*0x016E*/}, + {S5K6AAFX_REG_WR, 0x0269/*0x0217*/}, + {S5K6AAFX_REG_WR, 0xFFBD/*0xFF43*/}, + {S5K6AAFX_REG_WR, 0x0162/*0x019E*/}, + + + {S5K6AAFX_REG_WR, 0x01A0}, /* D65 */ + {S5K6AAFX_REG_WR, 0xFFC4}, + {S5K6AAFX_REG_WR, 0xFF8F}, + {S5K6AAFX_REG_WR, 0x00FC}, + {S5K6AAFX_REG_WR, 0x012D}, + {S5K6AAFX_REG_WR, 0xFF21}, + {S5K6AAFX_REG_WR, 0xFF5C}, + {S5K6AAFX_REG_WR, 0x0169}, + {S5K6AAFX_REG_WR, 0xFF92}, + {S5K6AAFX_REG_WR, 0xFF60}, + {S5K6AAFX_REG_WR, 0x013C}, + {S5K6AAFX_REG_WR, 0x0171}, + {S5K6AAFX_REG_WR, 0x0004}, + {S5K6AAFX_REG_WR, 0xFFD3}, + {S5K6AAFX_REG_WR, 0x01DF}, + {S5K6AAFX_REG_WR, 0x01A4}, + {S5K6AAFX_REG_WR, 0xFF97}, + {S5K6AAFX_REG_WR, 0x016E}, + /* param_end - TVAR_wbt_pBaseCcms */ + + /* param_start - TVAR_wbt_pOutdoorCcm */ + {S5K6AAFX_REG_W_ADDL, 0x2380}, + {S5K6AAFX_REG_WR, 0x01F2/*0x01F2*//*0x019D*/}, /* TVAR_wbt_pOutdoorCcm[0] */ + {S5K6AAFX_REG_WR, 0xFFC3/*0xFFc3*//*0xFFC8*/}, /* TVAR_wbt_pOutdoorCcm[1] */ + {S5K6AAFX_REG_WR, 0xFFE3/*0xFFe3*//*0x000C*/}, /* TVAR_wbt_pOutdoorCcm[2] */ + {S5K6AAFX_REG_WR, 0x00F9/*0x00F9*//*0x008D*/}, /* TVAR_wbt_pOutdoorCcm[3] */ + {S5K6AAFX_REG_WR, 0x013F/*0x013F*//*0x00CD*/}, /* TVAR_wbt_pOutdoorCcm[4] */ + {S5K6AAFX_REG_WR, 0xFF6E/*0xFF6E*//*0xFF3F*/}, /* TVAR_wbt_pOutdoorCcm[5] */ + {S5K6AAFX_REG_WR, 0xFEBB/*0xFEbb*//*0xFEDD*/}, /* TVAR_wbt_pOutdoorCcm[6] */ + {S5K6AAFX_REG_WR, 0x01F2/*0x01F2*//*0x01C6*/}, /* TVAR_wbt_pOutdoorCcm[7] */ + {S5K6AAFX_REG_WR, 0xFEFA/*0xFEFA*//*0xFF77*/}, /* TVAR_wbt_pOutdoorCcm[8] */ + {S5K6AAFX_REG_WR, 0xFF37/*0xFF37*//*0xFEAB*/}, /* TVAR_wbt_pOutdoorCcm[9] */ + {S5K6AAFX_REG_WR, 0x01A2/*0x01A2*//*0x015D*/}, /* TVAR_wbt_pOutdoorCcm[10] */ + {S5K6AAFX_REG_WR, 0x0126/*0x0126*//*0x0082*/}, /* TVAR_wbt_pOutdoorCcm[11] */ + {S5K6AAFX_REG_WR, 0xFFE0/*0xFFE0*//*0xFFCA*/}, /* TVAR_wbt_pOutdoorCcm[12] */ + {S5K6AAFX_REG_WR, 0xFFBF/*0xFFBF*//*0xFFA2*/}, /* TVAR_wbt_pOutdoorCcm[13] */ + {S5K6AAFX_REG_WR, 0x01E6/*0x01E6*//*0x016F*/}, /* TVAR_wbt_pOutdoorCcm[14] */ + {S5K6AAFX_REG_WR, 0x0186/*0x0186*//*0x0171*/}, /* TVAR_wbt_pOutdoorCcm[15] */ + {S5K6AAFX_REG_WR, 0xFF4B/*0xFF4B*//*0xFF35*/}, /* TVAR_wbt_pOutdoorCcm[16] */ + {S5K6AAFX_REG_WR, 0x01B1/*0x01B1*//*0x013E*/}, /* TVAR_wbt_pOutdoorCcm[17] */ + /* param_end - TVAR_wbt_pOutdoorCcm */ + + {S5K6AAFX_REG_W_ADDL, 0x06D4}, + {S5K6AAFX_REG_WR, 0x2380}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_W_ADDL, 0x06CC}, + {S5K6AAFX_REG_WR, 0x23A4}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_W_ADDL, 0x06E8}, + {S5K6AAFX_REG_WR, 0x23A4}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x23C8}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x23EC}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x2410}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x2434}, + {S5K6AAFX_REG_WR, 0x7000}, + {S5K6AAFX_REG_WR, 0x2458}, + {S5K6AAFX_REG_WR, 0x7000}, + + {S5K6AAFX_REG_W_ADDL, 0x06DA}, + {S5K6AAFX_REG_WR, 0x00BF}, /* SARR_AwbCcmCord[0] */ + {S5K6AAFX_REG_WR, 0x00E6}, /* SARR_AwbCcmCord[1] */ + {S5K6AAFX_REG_WR, 0x00F2}, /* SARR_AwbCcmCord[2] */ + {S5K6AAFX_REG_WR, 0x0143}, /* SARR_AwbCcmCord[3] */ + {S5K6AAFX_REG_WR, 0x0178}, /* SARR_AwbCcmCord[4] */ + {S5K6AAFX_REG_WR, 0x01A3}, /* SARR_AwbCcmCord[5] */ + + /* param_start - SARR_uNormBrInDoor */ + {S5K6AAFX_REG_W_ADDL, 0x07E8}, + {S5K6AAFX_REG_WR, 0x0016/*0x000A*/}, /* SARR_uNormBrInDoor[0] */ + {S5K6AAFX_REG_WR, 0x0028/*0x0019*/}, /* SARR_uNormBrInDoor[1] */ + {S5K6AAFX_REG_WR, 0x0096/*0x0096*/}, /* SARR_uNormBrInDoor[2] */ + {S5K6AAFX_REG_WR, 0x01F4/*0x01F4*/}, /* SARR_uNormBrInDoor[3] */ + {S5K6AAFX_REG_WR, 0x07D0/*0x07D0*/}, /* SARR_uNormBrInDoor[4] */ + /* param_end - SARR_uNormBrInDoor */ + + /* param_start - afit_uNoiseIndInDoor */ + {S5K6AAFX_REG_W_ADDL, 0x07D0}, + {S5K6AAFX_REG_WR, 0x0030}, /* afit_uNoiseIndInDoor[0] */ + {S5K6AAFX_REG_WR, 0x0046}, /* afit_uNoiseIndInDoor[1] */ + {S5K6AAFX_REG_WR, 0x0088}, /* afit_uNoiseIndInDoor[2] */ + {S5K6AAFX_REG_WR, 0x0205}, /* afit_uNoiseIndInDoor[3] */ + {S5K6AAFX_REG_WR, 0x02BC}, /* afit_uNoiseIndInDoor[4] */ + /* param_end - afit_uNoiseIndInDoor */ + + {S5K6AAFX_REG_W_ADDL, 0x07E6}, + {S5K6AAFX_REG_WR, 0x0000}, /* afit_bUseNoiseInd */ + + /* param_start - TVAR_afit_pBaseVals */ + {S5K6AAFX_REG_W_ADDL, 0x0828}, + {S5K6AAFX_REG_WR, 0x0010}, /*TVAR_afit_pBaseVals[0] 70000828 */ + {S5K6AAFX_REG_WR, 0x0031}, /*TVAR_afit_pBaseVals[1] 7000082A */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[2] 7000082C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[3] 7000082E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[4] 70000830 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[5] 70000832 */ + {S5K6AAFX_REG_WR, 0x0021}, /*TVAR_afit_pBaseVals[6] 70000834 */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[7] 70000836 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[8] 70000838 */ + {S5K6AAFX_REG_WR, 0x00FF}, /*TVAR_afit_pBaseVals[9] 7000083A */ + {S5K6AAFX_REG_WR, 0x0129}, /*TVAR_afit_pBaseVals[10] 7000083C */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[11] 7000083E */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[12] 70000840 */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[13] 70000842 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[14] 70000844 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[15] 70000846 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[16] 70000848 */ + {S5K6AAFX_REG_WR, 0x0344}, /*TVAR_afit_pBaseVals[17] 7000084A */ + {S5K6AAFX_REG_WR, 0x033A}, /*TVAR_afit_pBaseVals[18] 7000084C */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[19] 7000084E */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[20] 70000850 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[21] 70000852 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[22] 70000854 */ + {S5K6AAFX_REG_WR, 0x001E}, /*TVAR_afit_pBaseVals[23] 70000856 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[24] 70000858 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[25] 7000085A */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[26] 7000085C */ + {S5K6AAFX_REG_WR, 0x0010}, /*TVAR_afit_pBaseVals[27] 7000085E */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[28] 70000860 */ + {S5K6AAFX_REG_WR, 0x0106}, /*TVAR_afit_pBaseVals[29] 70000862 */ + {S5K6AAFX_REG_WR, 0x006F}, /*TVAR_afit_pBaseVals[30] 70000864 */ + {S5K6AAFX_REG_WR, 0x0C0F}, /*TVAR_afit_pBaseVals[31] 70000866 */ + {S5K6AAFX_REG_WR, 0x0C0F}, /*TVAR_afit_pBaseVals[32] 70000868 */ + {S5K6AAFX_REG_WR, 0x0303}, /*TVAR_afit_pBaseVals[33] 7000086A */ + {S5K6AAFX_REG_WR, 0x0303}, /*TVAR_afit_pBaseVals[34] 7000086C */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[35] 7000086E */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[36] 70000870 */ + {S5K6AAFX_REG_WR, 0x2828}, /*TVAR_afit_pBaseVals[37] 70000872 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[38] 70000874 */ + {S5K6AAFX_REG_WR, 0x020A}, /*TVAR_afit_pBaseVals[39] 70000876 */ + {S5K6AAFX_REG_WR, 0x0480}, /*TVAR_afit_pBaseVals[40] 70000878 */ + {S5K6AAFX_REG_WR, 0x0E08}, /*TVAR_afit_pBaseVals[41] 7000087A */ + {S5K6AAFX_REG_WR, 0x030A}, /*TVAR_afit_pBaseVals[42] 7000087C */ + {S5K6AAFX_REG_WR, 0x0A03}, /*TVAR_afit_pBaseVals[43] 7000087E */ + {S5K6AAFX_REG_WR, 0x0A11}, /*TVAR_afit_pBaseVals[44] 70000880 */ + {S5K6AAFX_REG_WR, 0x000F}, /*TVAR_afit_pBaseVals[45] 70000882 */ + {S5K6AAFX_REG_WR, 0x0500}, /*TVAR_afit_pBaseVals[46] 70000884 */ + {S5K6AAFX_REG_WR, 0x0914}, /*TVAR_afit_pBaseVals[47] 70000886 */ + {S5K6AAFX_REG_WR, 0x0012}, /*TVAR_afit_pBaseVals[48] 70000888 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[49] 7000088A */ + {S5K6AAFX_REG_WR, 0x0005}, /*TVAR_afit_pBaseVals[50] 7000088C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[51] 7000088E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[52] 70000890 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[53] 70000892 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[54] 70000894 */ + {S5K6AAFX_REG_WR, 0x0A00}, /*TVAR_afit_pBaseVals[55] 70000896 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[56] 70000898 */ + {S5K6AAFX_REG_WR, 0x014C}, /*TVAR_afit_pBaseVals[57] 7000089A */ + {S5K6AAFX_REG_WR, 0x014D}, /*TVAR_afit_pBaseVals[58] 7000089C */ + {S5K6AAFX_REG_WR, 0x0100}, /*TVAR_afit_pBaseVals[59] 7000089E */ + {S5K6AAFX_REG_WR, 0xA020/*0x8020*/}, /*TVAR_afit_pBaseVals[60] 700008A0 */ //CKLIN_20100908 White level low brightness 2.5 pass + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[61] 700008A2 */ + {S5K6AAFX_REG_WR, 0x0001/*0x000A*/}, /*TVAR_afit_pBaseVals[62] 700008A4 */ //CKLIN_20100908 Black Noise pass + + {S5K6AAFX_REG_WR, 0xFFFE}, /*TVAR_afit_pBaseVals[63] 700008A6 */ + {S5K6AAFX_REG_WR, 0x0031}, /*TVAR_afit_pBaseVals[64] 700008A8 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[65] 700008AA */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[66] 700008AC */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[67] 700008AE */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[68] 700008B0 */ + {S5K6AAFX_REG_WR, 0x000C}, /*TVAR_afit_pBaseVals[69] 700008B2 */ + {S5K6AAFX_REG_WR, 0x000E}, /*TVAR_afit_pBaseVals[70] 700008B4 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[71] 700008B6 */ + {S5K6AAFX_REG_WR, 0x00FF}, /*TVAR_afit_pBaseVals[72] 700008B8 */ + {S5K6AAFX_REG_WR, 0x0129}, /*TVAR_afit_pBaseVals[73] 700008BA */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[74] 700008BC */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[75] 700008BE */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[76] 700008C0 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[77] 700008C2 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[78] 700008C4 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[79] 700008C6 */ + {S5K6AAFX_REG_WR, 0x0114}, /*TVAR_afit_pBaseVals[80] 700008C8 */ + {S5K6AAFX_REG_WR, 0x020A}, /*TVAR_afit_pBaseVals[81] 700008CA */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[82] 700008CC */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[83] 700008CE */ + {S5K6AAFX_REG_WR, 0x0018}, /*TVAR_afit_pBaseVals[84] 700008D0 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[85] 700008D2 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[86] 700008D4 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[87] 700008D6 */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[88] 700008D8 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[89] 700008DA */ + {S5K6AAFX_REG_WR, 0x0010}, /*TVAR_afit_pBaseVals[90] 700008DC */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[91] 700008DE */ + {S5K6AAFX_REG_WR, 0x0106}, /*TVAR_afit_pBaseVals[92] 700008E0 */ + {S5K6AAFX_REG_WR, 0x006F}, /*TVAR_afit_pBaseVals[93] 700008E2 */ + {S5K6AAFX_REG_WR, 0x050F}, /*TVAR_afit_pBaseVals[94] 700008E4 */ + {S5K6AAFX_REG_WR, 0x0A1F}, /*TVAR_afit_pBaseVals[95] 700008E6 */ + {S5K6AAFX_REG_WR, 0x0203}, /*TVAR_afit_pBaseVals[96] 700008E8 */ + {S5K6AAFX_REG_WR, 0x0303}, /*TVAR_afit_pBaseVals[97] 700008EA */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[98] 700008EC */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[99] 700008EE */ + {S5K6AAFX_REG_WR, 0x2828}, /*TVAR_afit_pBaseVals[100] 700008F0 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[101] 700008F2 */ + {S5K6AAFX_REG_WR, 0x020A}, /*TVAR_afit_pBaseVals[102] 700008F4 */ + {S5K6AAFX_REG_WR, 0x0480}, /*TVAR_afit_pBaseVals[103] 700008F6 */ + {S5K6AAFX_REG_WR, 0x0E08}, /*TVAR_afit_pBaseVals[104] 700008F8 */ + {S5K6AAFX_REG_WR, 0x030A}, /*TVAR_afit_pBaseVals[105] 700008FA */ + {S5K6AAFX_REG_WR, 0x1403}, /*TVAR_afit_pBaseVals[106] 700008FC */ + {S5K6AAFX_REG_WR, 0x0A11}, /*TVAR_afit_pBaseVals[107] 700008FE */ + {S5K6AAFX_REG_WR, 0x0A0F}, /*TVAR_afit_pBaseVals[108] 70000900 */ + {S5K6AAFX_REG_WR, 0x050A}, /*TVAR_afit_pBaseVals[109] 70000902 */ + {S5K6AAFX_REG_WR, 0x101E}, /*TVAR_afit_pBaseVals[110] 70000904 */ + {S5K6AAFX_REG_WR, 0x101E}, /*TVAR_afit_pBaseVals[111] 70000906 */ + {S5K6AAFX_REG_WR, 0x0A08}, /*TVAR_afit_pBaseVals[112] 70000908 */ + {S5K6AAFX_REG_WR, 0x0005}, /*TVAR_afit_pBaseVals[113] 7000090A */ + {S5K6AAFX_REG_WR, 0x0400}, /*TVAR_afit_pBaseVals[114] 7000090C */ + {S5K6AAFX_REG_WR, 0x0400}, /*TVAR_afit_pBaseVals[115] 7000090E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[116] 70000910 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[117] 70000912 */ + {S5K6AAFX_REG_WR, 0x0A00}, /*TVAR_afit_pBaseVals[118] 70000914 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[119] 70000916 */ + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[120] 70000918 */ + {S5K6AAFX_REG_WR, 0x0151}, /*TVAR_afit_pBaseVals[121] 7000091A */ + {S5K6AAFX_REG_WR, 0x0100}, /*TVAR_afit_pBaseVals[122] 7000091C */ + {S5K6AAFX_REG_WR, 0x9820}, /*TVAR_afit_pBaseVals[123] 7000091E */ + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[124] 70000920 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[125] 70000922 */ + + {S5K6AAFX_REG_WR, 0xFFFB}, /*TVAR_afit_pBaseVals[126] 70000924 */ + {S5K6AAFX_REG_WR, 0x0031}, /*TVAR_afit_pBaseVals[127] 70000926 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[128] 70000928 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[129] 7000092A */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[130] 7000092C */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[131] 7000092E */ + {S5K6AAFX_REG_WR, 0x0008}, /*TVAR_afit_pBaseVals[132] 70000930 */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[133] 70000932 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[134] 70000934 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[135] 70000936 */ + {S5K6AAFX_REG_WR, 0x0002}, /*TVAR_afit_pBaseVals[136] 70000938 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[137] 7000093A */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[138] 7000093C */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[139] 7000093E */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[140] 70000940 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[141] 70000942 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[142] 70000944 */ + {S5K6AAFX_REG_WR, 0x0014}, /*TVAR_afit_pBaseVals[143] 70000946 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[144] 70000948 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[145] 7000094A */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[146] 7000094C */ + {S5K6AAFX_REG_WR, 0x001C}, /*TVAR_afit_pBaseVals[147] 7000094E */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[148] 70000950 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[149] 70000952 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[150] 70000954 */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[151] 70000956 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[152] 70000958 */ + {S5K6AAFX_REG_WR, 0x0010}, /*TVAR_afit_pBaseVals[153] 7000095A */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[154] 7000095C */ + {S5K6AAFX_REG_WR, 0x0106}, /*TVAR_afit_pBaseVals[155] 7000095E */ + {S5K6AAFX_REG_WR, 0x006F}, /*TVAR_afit_pBaseVals[156] 70000960 */ + {S5K6AAFX_REG_WR, 0x0205}, /*TVAR_afit_pBaseVals[157] 70000962 */ + {S5K6AAFX_REG_WR, 0x051E}, /*TVAR_afit_pBaseVals[158] 70000964 */ + {S5K6AAFX_REG_WR, 0x0101}, /*TVAR_afit_pBaseVals[159] 70000966 */ + {S5K6AAFX_REG_WR, 0x0202}, /*TVAR_afit_pBaseVals[160] 70000968 */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[161] 7000096A */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[162] 7000096C */ + {S5K6AAFX_REG_WR, 0x2828}, /*TVAR_afit_pBaseVals[163] 7000096E */ + {S5K6AAFX_REG_WR, 0x0606}, /*TVAR_afit_pBaseVals[164] 70000970 */ + {S5K6AAFX_REG_WR, 0x0205}, /*TVAR_afit_pBaseVals[165] 70000972 */ + {S5K6AAFX_REG_WR, 0x0480}, /*TVAR_afit_pBaseVals[166] 70000974 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[167] 70000976 */ + {S5K6AAFX_REG_WR, 0x0005}, /*TVAR_afit_pBaseVals[168] 70000978 */ + {S5K6AAFX_REG_WR, 0x1903}, /*TVAR_afit_pBaseVals[169] 7000097A */ + {S5K6AAFX_REG_WR, 0x1911}, /*TVAR_afit_pBaseVals[170] 7000097C */ + {S5K6AAFX_REG_WR, 0x0A0F}, /*TVAR_afit_pBaseVals[171] 7000097E */ + {S5K6AAFX_REG_WR, 0x050A}, /*TVAR_afit_pBaseVals[172] 70000980 */ + {S5K6AAFX_REG_WR, 0x2028}, /*TVAR_afit_pBaseVals[173] 70000982 */ + {S5K6AAFX_REG_WR, 0x2028}, /*TVAR_afit_pBaseVals[174] 70000984 */ + {S5K6AAFX_REG_WR, 0x0A08}, /*TVAR_afit_pBaseVals[175] 70000986 */ + {S5K6AAFX_REG_WR, 0x0007}, /*TVAR_afit_pBaseVals[176] 70000988 */ + {S5K6AAFX_REG_WR, 0x0403}, /*TVAR_afit_pBaseVals[177] 7000098A */ + {S5K6AAFX_REG_WR, 0x0402}, /*TVAR_afit_pBaseVals[178] 7000098C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[179] 7000098E */ + {S5K6AAFX_REG_WR, 0x0203}, /*TVAR_afit_pBaseVals[180] 70000990 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[181] 70000992 */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[182] 70000994 */ + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[183] 70000996 */ + {S5K6AAFX_REG_WR, 0x0170}, /*TVAR_afit_pBaseVals[184] 70000998 */ + {S5K6AAFX_REG_WR, 0x0100}, /*TVAR_afit_pBaseVals[185] 7000099A */ + {S5K6AAFX_REG_WR, 0x8050/*0x8030*/}, /*TVAR_afit_pBaseVals[186] 7000099C */ //CKLIN_20100908 Shading pass + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[187] 7000099E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[188] 700009A0 */ + + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[189] 700009A2 */ + {S5K6AAFX_REG_WR, 0x0031}, /*TVAR_afit_pBaseVals[190] 700009A4 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[191] 700009A6 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[192] 700009A8 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[193] 700009AA */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[194] 700009AC */ + {S5K6AAFX_REG_WR, 0x0008}, /*TVAR_afit_pBaseVals[195] 700009AE */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[196] 700009B0 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[197] 700009B2 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[198] 700009B4 */ + {S5K6AAFX_REG_WR, 0x0002}, /*TVAR_afit_pBaseVals[199] 700009B6 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[200] 700009B8 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[201] 700009BA */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[202] 700009BC */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[203] 700009BE */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[204] 700009C0 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[205] 700009C2 */ + {S5K6AAFX_REG_WR, 0x0014}, /*TVAR_afit_pBaseVals[206] 700009C4 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[207] 700009C6 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[208] 700009C8 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[209] 700009CA */ + {S5K6AAFX_REG_WR, 0x001C}, /*TVAR_afit_pBaseVals[210] 700009CC */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[211] 700009CE */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[212] 700009D0 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[213] 700009D2 */ + {S5K6AAFX_REG_WR, 0x0028}, /*TVAR_afit_pBaseVals[214] 700009D4 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[215] 700009D6 */ + {S5K6AAFX_REG_WR, 0x0010}, /*TVAR_afit_pBaseVals[216] 700009D8 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[217] 700009DA */ + {S5K6AAFX_REG_WR, 0x0106}, /*TVAR_afit_pBaseVals[218] 700009DC */ + {S5K6AAFX_REG_WR, 0x006F}, /*TVAR_afit_pBaseVals[219] 700009DE */ + {S5K6AAFX_REG_WR, 0x0205}, /*TVAR_afit_pBaseVals[220] 700009E0 */ + {S5K6AAFX_REG_WR, 0x051E}, /*TVAR_afit_pBaseVals[221] 700009E2 */ + {S5K6AAFX_REG_WR, 0x0101}, /*TVAR_afit_pBaseVals[222] 700009E4 */ + {S5K6AAFX_REG_WR, 0x0202}, /*TVAR_afit_pBaseVals[223] 700009E6 */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[224] 700009E8 */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[225] 700009EA */ + {S5K6AAFX_REG_WR, 0x2828}, /*TVAR_afit_pBaseVals[226] 700009EC */ + {S5K6AAFX_REG_WR, 0x0606}, /*TVAR_afit_pBaseVals[227] 700009EE */ + {S5K6AAFX_REG_WR, 0x0205}, /*TVAR_afit_pBaseVals[228] 700009F0 */ + {S5K6AAFX_REG_WR, 0x0480}, /*TVAR_afit_pBaseVals[229] 700009F2 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[230] 700009F4 */ + {S5K6AAFX_REG_WR, 0x0005}, /*TVAR_afit_pBaseVals[231] 700009F6 */ + {S5K6AAFX_REG_WR, 0x1903}, /*TVAR_afit_pBaseVals[232] 700009F8 */ + {S5K6AAFX_REG_WR, 0x1911}, /*TVAR_afit_pBaseVals[233] 700009FA */ + {S5K6AAFX_REG_WR, 0x0A0F}, /*TVAR_afit_pBaseVals[234] 700009FC */ + {S5K6AAFX_REG_WR, 0x050A}, /*TVAR_afit_pBaseVals[235] 700009FE */ + {S5K6AAFX_REG_WR, 0x2028}, /*TVAR_afit_pBaseVals[236] 70000A00 */ + {S5K6AAFX_REG_WR, 0x2028}, /*TVAR_afit_pBaseVals[237] 70000A02 */ + {S5K6AAFX_REG_WR, 0x0A08}, /*TVAR_afit_pBaseVals[238] 70000A04 */ + {S5K6AAFX_REG_WR, 0x0007}, /*TVAR_afit_pBaseVals[239] 70000A06 */ + {S5K6AAFX_REG_WR, 0x0403}, /*TVAR_afit_pBaseVals[240] 70000A08 */ + {S5K6AAFX_REG_WR, 0x0402}, /*TVAR_afit_pBaseVals[241] 70000A0A */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[242] 70000A0C */ + {S5K6AAFX_REG_WR, 0x0203}, /*TVAR_afit_pBaseVals[243] 70000A0E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[244] 70000A10 */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[245] 70000A12 */ + {S5K6AAFX_REG_WR, 0x0170}, /*TVAR_afit_pBaseVals[246] 70000A14 */ + {S5K6AAFX_REG_WR, 0x0175}, /*TVAR_afit_pBaseVals[247] 70000A16 */ + {S5K6AAFX_REG_WR, 0x0100}, /*TVAR_afit_pBaseVals[248] 70000A18 */ + {S5K6AAFX_REG_WR, 0x8070/*0x8068*/}, /*TVAR_afit_pBaseVals[249] 70000A1A */ //CKLIN_20100908 Shading pass + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[250] 70000A1C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[251] 70000A1E */ + + {S5K6AAFX_REG_WR, 0x0032/*0x0000*/}, /*TVAR_afit_pBaseVals[252] 70000A20 */ //CKLIN_20100908 White level high brightness 286~2292 pass + {S5K6AAFX_REG_WR, 0x0031}, /*TVAR_afit_pBaseVals[253] 70000A22 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[254] 70000A24 */ + {S5K6AAFX_REG_WR, 0x0014}, /*TVAR_afit_pBaseVals[255] 70000A26 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[256] 70000A28 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[257] 70000A2A */ + {S5K6AAFX_REG_WR, 0x000E}, /*TVAR_afit_pBaseVals[258] 70000A2C */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[259] 70000A2E */ + {S5K6AAFX_REG_WR, 0x0020}, /*TVAR_afit_pBaseVals[260] 70000A30 */ + {S5K6AAFX_REG_WR, 0x0050}, /*TVAR_afit_pBaseVals[261] 70000A32 */ + {S5K6AAFX_REG_WR, 0x0002}, /*TVAR_afit_pBaseVals[262] 70000A34 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[263] 70000A36 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[264] 70000A38 */ + {S5K6AAFX_REG_WR, 0x000A}, /*TVAR_afit_pBaseVals[265] 70000A3A */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[266] 70000A3C */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[267] 70000A3E */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[268] 70000A40 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[269] 70000A42 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[270] 70000A44 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[271] 70000A46 */ + {S5K6AAFX_REG_WR, 0x03FF}, /*TVAR_afit_pBaseVals[272] 70000A48 */ + {S5K6AAFX_REG_WR, 0x0014}, /*TVAR_afit_pBaseVals[273] 70000A4A */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[274] 70000A4C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[275] 70000A4E */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[276] 70000A50 */ + {S5K6AAFX_REG_WR, 0x0020}, /*TVAR_afit_pBaseVals[277] 70000A52 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[278] 70000A54 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[279] 70000A56 */ + {S5K6AAFX_REG_WR, 0x0032}, /*TVAR_afit_pBaseVals[280] 70000A58 */ + {S5K6AAFX_REG_WR, 0x0106}, /*TVAR_afit_pBaseVals[281] 70000A5A */ + {S5K6AAFX_REG_WR, 0x006F}, /*TVAR_afit_pBaseVals[282] 70000A5C */ + {S5K6AAFX_REG_WR, 0x0202}, /*TVAR_afit_pBaseVals[283] 70000A5E */ + {S5K6AAFX_REG_WR, 0x051E}, /*TVAR_afit_pBaseVals[284] 70000A60 */ + {S5K6AAFX_REG_WR, 0x0101}, /*TVAR_afit_pBaseVals[285] 70000A62 */ + {S5K6AAFX_REG_WR, 0x0202}, /*TVAR_afit_pBaseVals[286] 70000A64 */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[287] 70000A66 */ + {S5K6AAFX_REG_WR, 0x140A}, /*TVAR_afit_pBaseVals[288] 70000A68 */ + {S5K6AAFX_REG_WR, 0x2828}, /*TVAR_afit_pBaseVals[289] 70000A6A */ + {S5K6AAFX_REG_WR, 0x0606}, /*TVAR_afit_pBaseVals[290] 70000A6C */ + {S5K6AAFX_REG_WR, 0x0205}, /*TVAR_afit_pBaseVals[291] 70000A6E */ + {S5K6AAFX_REG_WR, 0x0880}, /*TVAR_afit_pBaseVals[292] 70000A70 */ + {S5K6AAFX_REG_WR, 0x000F}, /*TVAR_afit_pBaseVals[293] 70000A72 */ + {S5K6AAFX_REG_WR, 0x0005}, /*TVAR_afit_pBaseVals[294] 70000A74 */ + {S5K6AAFX_REG_WR, 0x1903}, /*TVAR_afit_pBaseVals[295] 70000A76 */ + {S5K6AAFX_REG_WR, 0x1911}, /*TVAR_afit_pBaseVals[296] 70000A78 */ + {S5K6AAFX_REG_WR, 0x0A0F}, /*TVAR_afit_pBaseVals[297] 70000A7A */ + {S5K6AAFX_REG_WR, 0x050A}, /*TVAR_afit_pBaseVals[298] 70000A7C */ + {S5K6AAFX_REG_WR, 0x2020}, /*TVAR_afit_pBaseVals[299] 70000A7E */ + {S5K6AAFX_REG_WR, 0x2020}, /*TVAR_afit_pBaseVals[300] 70000A80 */ + {S5K6AAFX_REG_WR, 0x0A08}, /*TVAR_afit_pBaseVals[301] 70000A82 */ + {S5K6AAFX_REG_WR, 0x0007}, /*TVAR_afit_pBaseVals[302] 70000A84 */ + {S5K6AAFX_REG_WR, 0x0408}, /*TVAR_afit_pBaseVals[303] 70000A86 */ + {S5K6AAFX_REG_WR, 0x0406}, /*TVAR_afit_pBaseVals[304] 70000A88 */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[305] 70000A8A */ + {S5K6AAFX_REG_WR, 0x0608}, /*TVAR_afit_pBaseVals[306] 70000A8C */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[307] 70000A8E */ + {S5K6AAFX_REG_WR, 0x0006}, /*TVAR_afit_pBaseVals[308] 70000A90 */ + {S5K6AAFX_REG_WR, 0x0170}, /*TVAR_afit_pBaseVals[309] 70000A92 */ + {S5K6AAFX_REG_WR, 0x0175}, /*TVAR_afit_pBaseVals[310] 70000A94 */ + {S5K6AAFX_REG_WR, 0x0100}, /*TVAR_afit_pBaseVals[311] 70000A96 */ + {S5K6AAFX_REG_WR, 0x7058/*0x7050*/}, /*TVAR_afit_pBaseVals[312] 70000A98 */ //CKLIN_20100908 Shading pass + {S5K6AAFX_REG_WR, 0x0180}, /*TVAR_afit_pBaseVals[313] 70000A9A */ + {S5K6AAFX_REG_WR, 0x0000}, /*TVAR_afit_pBaseVals[314] 70000A9C */ + /* param_end - TVAR_afit_pBaseVals */ + + /* param_start - afit_pConstBaseVals */ + {S5K6AAFX_REG_WR, 0x00FF}, /* afit_pConstBaseVals[0] */ + {S5K6AAFX_REG_WR, 0x00FF}, /* afit_pConstBaseVals[1] */ + {S5K6AAFX_REG_WR, 0x0800}, /* afit_pConstBaseVals[2] */ + {S5K6AAFX_REG_WR, 0x0600}, /* afit_pConstBaseVals[3] */ + {S5K6AAFX_REG_WR, 0x0000}, /* afit_pConstBaseVals[4] */ + {S5K6AAFX_REG_WR, 0x0000}, /* afit_pConstBaseVals[5] */ + {S5K6AAFX_REG_WR, 0x0000}, /* afit_pConstBaseVals[6] */ + {S5K6AAFX_REG_WR, 0x0300}, /* afit_pConstBaseVals[7] */ + {S5K6AAFX_REG_WR, 0x0002}, /* afit_pConstBaseVals[8] */ + {S5K6AAFX_REG_WR, 0x0400}, /* afit_pConstBaseVals[9] */ + {S5K6AAFX_REG_WR, 0x0106}, /* afit_pConstBaseVals[10] */ + {S5K6AAFX_REG_WR, 0x0005}, /* afit_pConstBaseVals[11] */ + {S5K6AAFX_REG_WR, 0x0000}, /* afit_pConstBaseVals[12] */ + {S5K6AAFX_REG_WR, 0x0703}, /* afit_pConstBaseVals[13] */ + {S5K6AAFX_REG_WR, 0x0000}, /* afit_pConstBaseVals[14] */ + {S5K6AAFX_REG_WR, 0xFFD6}, /* afit_pConstBaseVals[15] */ + {S5K6AAFX_REG_WR, 0x53C1}, /* afit_pConstBaseVals[16] */ + {S5K6AAFX_REG_WR, 0xE1FE}, /* afit_pConstBaseVals[17] */ + {S5K6AAFX_REG_WR, 0x0001}, /* afit_pConstBaseVals[18] */ + /* param_end - afit_pConstBaseVals */ + + + {S5K6AAFX_REG_W_ADDL, 0x0488}, + {S5K6AAFX_REG_WR, 0x416E}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xA316}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x2174}, + {S5K6AAFX_REG_WR, 0xF424}, + {S5K6AAFX_REG_WR, 0x0000}, + + {S5K6AAFX_REG_W_ADDL, 0x0490}, + {S5K6AAFX_REG_WR, 0x416E}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_WR, 0xA316}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x2178}, + {S5K6AAFX_REG_WR, 0xF424}, + {S5K6AAFX_REG_WR, 0x0000}, + + {S5K6AAFX_REG_W_ADDL, 0x0498}, + {S5K6AAFX_REG_WR, 0x01E8}, + {S5K6AAFX_REG_WR, 0x0310/*0x0270*/}, + {S5K6AAFX_REG_W_ADDL, 0x217C}, + {S5K6AAFX_REG_WR, 0x0580}, + {S5K6AAFX_REG_W_ADDL, 0x049C}, + {S5K6AAFX_REG_WR, 0x0160}, + + + /* WRITE #AWBBTune_EVT4_uMaxExp3 0000 */ + /* WRITE #AWBBTune_EVT4_uCapMaxExp3 0000 */ + /* WRITE #AWBBTune_EVT4_uMaxAnGain3 0000 */ + + {S5K6AAFX_REG_W_ADDL, 0x2170}, + {S5K6AAFX_REG_WR, 0x0001}, + {S5K6AAFX_REG_WR, 0x0090}, + + /* AE Weight */ + {S5K6AAFX_REG_W_ADDL, 0x100E}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0201}, + {S5K6AAFX_REG_WR, 0x0202}, + {S5K6AAFX_REG_WR, 0x0202}, + {S5K6AAFX_REG_WR, 0x0102}, + {S5K6AAFX_REG_WR, 0x0201}, + {S5K6AAFX_REG_WR, 0x0302}, + {S5K6AAFX_REG_WR, 0x0203}, + {S5K6AAFX_REG_WR, 0x0102}, + {S5K6AAFX_REG_WR, 0x0201}, + {S5K6AAFX_REG_WR, 0x0302}, + {S5K6AAFX_REG_WR, 0x0203}, + {S5K6AAFX_REG_WR, 0x0102}, + {S5K6AAFX_REG_WR, 0x0201}, + {S5K6AAFX_REG_WR, 0x0302}, + {S5K6AAFX_REG_WR, 0x0203}, + {S5K6AAFX_REG_WR, 0x0102}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0202}, + {S5K6AAFX_REG_WR, 0x0202}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, + {S5K6AAFX_REG_WR, 0x0101}, +}; + +static const struct s5k6aafx_i2c_reg_conf const clk_init_tbl[] = { + /* clk Settings */ + {S5K6AAFX_REG_W_ADDL, 0x01B8}, + {S5K6AAFX_REG_WR, 0x5DC0}, /* 24MHz input clock */ + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x01C6}, + {S5K6AAFX_REG_WR, 0x0002}, /* PLL configurations */ + {S5K6AAFX_REG_W_ADDL, 0x01CC}, + {S5K6AAFX_REG_WR, 0x1770}, /* 1st system CLK 24MHz */ + {S5K6AAFX_REG_WR, 0x1770}, /* 24MHz output clock */ + {S5K6AAFX_REG_WR, 0x1770}, /* 2nd system CLK */ + {S5K6AAFX_REG_WR, 0x1B58}, + {S5K6AAFX_REG_WR, 0x36B0}, + {S5K6AAFX_REG_WR, 0x36B0}, + {S5K6AAFX_REG_W_ADDL, 0x01E0}, + {S5K6AAFX_REG_WR, 0x0001}, + + /* delay 100ms */ +}; + +static const struct s5k6aafx_i2c_reg_conf const clk_init_tb2[] = { + /* clk Settings */ + {S5K6AAFX_REG_W_ADDL, 0x01B8}, + {S5K6AAFX_REG_WR, 0x5DC0}, /* 24MHz input clock */ + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x01C6}, + {S5K6AAFX_REG_WR, 0x0002}, /* PLL configurations */ + {S5K6AAFX_REG_W_ADDL, 0x01CC}, + {S5K6AAFX_REG_WR, 0x1770}, /* 1st system CLK 24MHz */ + {S5K6AAFX_REG_WR, 0x1770}, /* 24MHz output clock */ + {S5K6AAFX_REG_WR, 0x1770}, /* 2nd system CLK */ + {S5K6AAFX_REG_WR, 0x1770}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_W_ADDL, 0x01E0}, + {S5K6AAFX_REG_WR, 0x0001}, + + /* delay 100ms */ +}; + +static const struct s5k6aafx_i2c_reg_conf const prev_snap_conf_init_tbl[] = { + /* PREVIEW CONFIGURATION 3 (VGA, YUV) */ + {S5K6AAFX_REG_W_ADDL, 0x02B4}, + {S5K6AAFX_REG_WR, S5K6AAFX_QTR_SIZE_WIDTH}, + {S5K6AAFX_REG_WR, S5K6AAFX_QTR_SIZE_HEIGHT}, + {S5K6AAFX_REG_WR, 0x0005}, /* YUV */ + {S5K6AAFX_REG_W_ADDL, 0x02C0}, + {S5K6AAFX_REG_WR, 0x0000}, /* PLL config */ + {S5K6AAFX_REG_W_ADDL, 0x02BA}, + {S5K6AAFX_REG_WR, 0x1770}, + {S5K6AAFX_REG_WR, 0x1700}, + {S5K6AAFX_REG_WR, 0x0042}, + {S5K6AAFX_REG_W_ADDL, 0x02C4}, + {S5K6AAFX_REG_WR, 0x0001}, /* 1b: Avg S.S 2b: SXGA */ + {S5K6AAFX_REG_W_ADDL, 0x02C2}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x02C6}, + {S5K6AAFX_REG_WR, 0x03E8}, + {S5K6AAFX_REG_WR, 0x0168}, + {S5K6AAFX_REG_W_ADDL, 0x02D4},/* flip and mirror */ + {S5K6AAFX_REG_WR, 0x0002},/* 0x02D4 value */ + {S5K6AAFX_REG_WR, 0x0002},/* 0x02D4 value */ + + /*********** APPLY PREVIEW CONFIGURATION & RUN PREVIEW ***********/ + /* REG_TC_GP_ActivePrevConfig-Select preview configuration_3 */ + {S5K6AAFX_REG_W_ADDL, 0x021C}, + {S5K6AAFX_REG_WR, 0x0003}, + /* REG_TC_GP_PrevOpenAfterChange */ + {S5K6AAFX_REG_W_ADDL, 0x0220}, + {S5K6AAFX_REG_WR, 0x0001}, + /* REG_TC_GP_NewConfigSync-Update preview configuration */ + {S5K6AAFX_REG_W_ADDL, 0x01F8}, + {S5K6AAFX_REG_WR, 0x0001}, + /* REG_TC_GP_PrevConfigChanged */ + {S5K6AAFX_REG_W_ADDL, 0x021E}, + {S5K6AAFX_REG_WR, 0x0001}, /* Enable output after config change */ + {S5K6AAFX_REG_W_ADDL, 0x01F0}, + {S5K6AAFX_REG_WR, 0x0001}, /* REG_TC_GP_EnablePreview - Start preview */ + {S5K6AAFX_REG_WR, 0x0001}, /* REG_TC_GP_EnablePreviewChanged */ + + + /* CAPTURE CONFIGURATION 0 (SXGA, YUV) */ + {S5K6AAFX_REG_W_ADDL, 0x030E}, + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_WIDTH}, /* 1280 */ + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_HEIGHT}, /* 1024 */ + {S5K6AAFX_REG_WR, 0x0005}, /* YUV */ + {S5K6AAFX_REG_W_ADDL, 0x031A}, + {S5K6AAFX_REG_WR, 0x0000}, /* PLL config */ + {S5K6AAFX_REG_W_ADDL, 0x0314}, + {S5K6AAFX_REG_WR, 0x1770}, + {S5K6AAFX_REG_WR, 0x1770}, + {S5K6AAFX_REG_WR, 0x0042}, + {S5K6AAFX_REG_W_ADDL, 0x031E}, + {S5K6AAFX_REG_WR, 0x0002}, /* 1b: Avg S.S 2b: SXGA */ + {S5K6AAFX_REG_W_ADDL, 0x031C}, + {S5K6AAFX_REG_WR, 0x0002}, + {S5K6AAFX_REG_W_ADDL, 0x0320}, + {S5K6AAFX_REG_WR, 0x0535}, + {S5K6AAFX_REG_WR, 0x0000}, + + /* REG_TC_GP_CapConfigChanged */ + {S5K6AAFX_REG_W_ADDL, 0x0226}, + {S5K6AAFX_REG_WR, 0x0001}, /* Enable output after config change */ + + {S5K6AAFX_REG_W_ADDL, 0x01FA}, + /* REG_TC_GP_PrevReqInputWidth */ + {S5K6AAFX_REG_WR, S5K6AAFX_ADJ_FULL_SIZE_WIDTH}, + /* REG_TC_GP_PrevReqInputHeight */ + {S5K6AAFX_REG_WR, S5K6AAFX_ADJ_FULL_SIZE_HEIGHT}, + /* REG_TC_GP_PrevInputWidthOfs */ + {S5K6AAFX_REG_WR, (S5K6AAFX_FULL_SIZE_WIDTH-S5K6AAFX_ADJ_FULL_SIZE_WIDTH)/2}, + /* REG_TC_GP_PrevInputHeightOfs */ + {S5K6AAFX_REG_WR, (S5K6AAFX_FULL_SIZE_HEIGHT-S5K6AAFX_ADJ_FULL_SIZE_HEIGHT)/2}, + {S5K6AAFX_REG_W_ADDL, 0x020A}, + /* REG_TC_GP_PrevZoomReqInputWidth */ + {S5K6AAFX_REG_WR, S5K6AAFX_ADJ_FULL_SIZE_WIDTH}, + /* REG_TC_GP_PrevZoomReqInputHeight */ + {S5K6AAFX_REG_WR, S5K6AAFX_ADJ_FULL_SIZE_HEIGHT}, + /* REG_TC_GP_PrevZoomReqInputWidthOfs */ + {S5K6AAFX_REG_WR, 0x0000}, + /* REG_TC_GP_PrevZoomReqInputHeightOfs */ + {S5K6AAFX_REG_WR, 0x0000}, + + /* REG_TC_GEP_InputsChangeRequest */ + {S5K6AAFX_REG_W_ADDL, 0x021A}, + {S5K6AAFX_REG_WR, 0x0001}, + +}; + +static const struct s5k6aafx_i2c_reg_conf const prev_snap_conf_init_tb2[] = { + /* PREVIEW CONFIGURATION 3 (VGA, YUV) */ + {S5K6AAFX_REG_W_ADDL, 0x02B4}, + {S5K6AAFX_REG_WR, S5K6AAFX_QTR_SIZE_WIDTH}, + {S5K6AAFX_REG_WR, S5K6AAFX_QTR_SIZE_HEIGHT}, + {S5K6AAFX_REG_WR, 0x0005}, /* YUV */ + {S5K6AAFX_REG_W_ADDL, 0x02C0}, + {S5K6AAFX_REG_WR, 0x0001}, /* PLL config */ + {S5K6AAFX_REG_W_ADDL, 0x02BA}, + {S5K6AAFX_REG_WR, 0x1770}, + {S5K6AAFX_REG_WR, 0x1700}, + {S5K6AAFX_REG_WR, 0x0042}, + {S5K6AAFX_REG_W_ADDL, 0x02C4}, + {S5K6AAFX_REG_WR, 0x0001}, /* 1b: Avg S.S 2b: SXGA */ + {S5K6AAFX_REG_W_ADDL, 0x0250}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x02C6}, + {S5K6AAFX_REG_WR, 0x0535}, + {S5K6AAFX_REG_WR, 0x0168}, + {S5K6AAFX_REG_W_ADDL, 0x02D4},/* flip and mirror */ + {S5K6AAFX_REG_WR, 0x0001},/* 0x0288 value */ + {S5K6AAFX_REG_WR, 0x0001}, + + /* PREVIEW CONFIGURATION 1 (SXGA, YUV) */ + {S5K6AAFX_REG_W_ADDL, 0x0268}, + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_WIDTH}, + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_HEIGHT}, + {S5K6AAFX_REG_WR, 0x0005}, /* YUV */ + {S5K6AAFX_REG_W_ADDL, 0x0274}, + {S5K6AAFX_REG_WR, 0x0001}, /* PLL config */ + {S5K6AAFX_REG_W_ADDL, 0x026E}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_WR, 0x0042}, + {S5K6AAFX_REG_W_ADDL, 0x0278}, + {S5K6AAFX_REG_WR, 0x0002}, /* 1b: Avg S.S 2b: SXGA */ + {S5K6AAFX_REG_W_ADDL, 0x0276}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x027A}, + {S5K6AAFX_REG_WR, 0x0535}, + {S5K6AAFX_REG_WR, 0x029A}, + {S5K6AAFX_REG_W_ADDL, 0x0288}, /* flip and mirror */ + {S5K6AAFX_REG_WR, 0x0001}, /* 0x0288 value */ + {S5K6AAFX_REG_WR, 0x0001}, /* 0x0288 value */ + + /*********** APPLY PREVIEW CONFIGURATION & RUN PREVIEW ***********/ + /* REG_TC_GP_ActivePrevConfig-Select preview configuration_3 */ + {S5K6AAFX_REG_W_ADDL, 0x021C}, + {S5K6AAFX_REG_WR, 0x0001},/*Preview: 3 : VGA 30fps. 1:Full size 15fps*/ + /* REG_TC_GP_PrevOpenAfterChange */ + {S5K6AAFX_REG_W_ADDL, 0x0220}, + {S5K6AAFX_REG_WR, 0x0001}, + /* REG_TC_GP_NewConfigSync-Update preview configuration */ + {S5K6AAFX_REG_W_ADDL, 0x01F8}, + {S5K6AAFX_REG_WR, 0x0001}, + /* REG_TC_GP_PrevConfigChanged */ + {S5K6AAFX_REG_W_ADDL, 0x021E}, + {S5K6AAFX_REG_WR, 0x0001}, /* Enable output after config change */ + {S5K6AAFX_REG_W_ADDL, 0x01F0}, + {S5K6AAFX_REG_WR, 0x0001}, /* REG_TC_GP_EnablePreview - Start preview */ + {S5K6AAFX_REG_WR, 0x0001}, /* REG_TC_GP_EnablePreviewChanged */ + + + /* CAPTURE CONFIGURATION 0 (SXGA, YUV) */ + {S5K6AAFX_REG_W_ADDL, 0x030E}, + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_WIDTH}, /* 1280 */ + {S5K6AAFX_REG_WR, S5K6AAFX_FULL_SIZE_HEIGHT}, /* 1024 */ + {S5K6AAFX_REG_WR, 0x0005}, /* YUV */ + {S5K6AAFX_REG_W_ADDL, 0x031A}, + {S5K6AAFX_REG_WR, 0x0001}, /* PLL config */ + {S5K6AAFX_REG_W_ADDL, 0x0314}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_WR, 0x2EE0}, + {S5K6AAFX_REG_WR, 0x0042}, + {S5K6AAFX_REG_W_ADDL, 0x031E}, + {S5K6AAFX_REG_WR, 0x0002}, /* 1b: Avg S.S 2b: SXGA */ + {S5K6AAFX_REG_W_ADDL, 0x031C}, + {S5K6AAFX_REG_WR, 0x0000}, + {S5K6AAFX_REG_W_ADDL, 0x0320}, + {S5K6AAFX_REG_WR, 0x0535}, + {S5K6AAFX_REG_WR, 0x029A}, + + /* REG_TC_GP_CapConfigChanged */ + {S5K6AAFX_REG_W_ADDL, 0x0226}, + {S5K6AAFX_REG_WR, 0x0001}, /* Enable output after config change */ +}; + + +struct s5k6aafx_reg s5k6aafx_regs = { + .reset_init = &reset_init_tbl[0], + .reset_init_size = ARRAY_SIZE(reset_init_tbl), + .TP_init = &TP_init_tbl[0], + .TP_init_size = ARRAY_SIZE(TP_init_tbl), + .analog_setting_init = &analog_setting_init_tbl[0], + .analog_setting_init_size = ARRAY_SIZE(analog_setting_init_tbl), + .register_init = ®ister_init_tbl[0], + .register_init_size = ARRAY_SIZE(register_init_tbl), + .clk_init = &clk_init_tbl[0], + .clk_init_size = ARRAY_SIZE(clk_init_tbl), + .prev_snap_conf_init = &prev_snap_conf_init_tbl[0], + .prev_snap_conf_init_size = ARRAY_SIZE(prev_snap_conf_init_tbl), + /* for full-size preview */ + .clk_init_tb2 = &clk_init_tb2[0], + .clk_init_tb2_size = ARRAY_SIZE(clk_init_tb2), + .prev_snap_conf_init_tb2 = &prev_snap_conf_init_tb2[0], + .prev_snap_conf_init_tb2_size = ARRAY_SIZE(prev_snap_conf_init_tb2), +}; diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c index 84984f64b234a..ce56a1cdbf0a6 100644 --- a/drivers/media/video/sn9c102/sn9c102_core.c +++ b/drivers/media/video/sn9c102/sn9c102_core.c @@ -1430,9 +1430,9 @@ static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, sn9c102_show_i2c_reg, sn9c102_store_i2c_reg); static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, sn9c102_show_i2c_val, sn9c102_store_i2c_val); -static DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green); -static DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue); -static DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red); +static DEVICE_ATTR(green, S_IWUSR, NULL, sn9c102_store_green); +static DEVICE_ATTR(blue, S_IWUSR, NULL, sn9c102_store_blue); +static DEVICE_ATTR(red, S_IWUSR, NULL, sn9c102_store_red); static DEVICE_ATTR(frame_header, S_IRUGO, sn9c102_show_frame_header, NULL); diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c index 5d4cf3b3d4350..22fa8202d5ca3 100644 --- a/drivers/media/video/tda9840.c +++ b/drivers/media/video/tda9840.c @@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c index 19621ed523ec6..827425c5b866e 100644 --- a/drivers/media/video/tea6415c.c +++ b/drivers/media/video/tea6415c.c @@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6415c_ops); diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c index 5ea840401f218..f350b6c245001 100644 --- a/drivers/media/video/tea6420.c +++ b/drivers/media/video/tea6420.c @@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); + sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tea6420_ops); diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c index df33a1d188bbf..a794ae62aebfc 100644 --- a/drivers/media/video/tlg2300/pd-video.c +++ b/drivers/media/video/tlg2300/pd-video.c @@ -764,10 +764,8 @@ static int pd_vidioc_s_fmt(struct poseidon *pd, struct v4l2_pix_format *pix) } ret |= send_set_req(pd, VIDEO_ROSOLU_SEL, vid_resol, &cmd_status); - if (ret || cmd_status) { - mutex_unlock(&pd->lock); + if (ret || cmd_status) return -EBUSY; - } pix_def->pixelformat = pix->pixelformat; /* save it */ pix->height = (context->tvnormid & V4L2_STD_525_60) ? 480 : 576; diff --git a/drivers/media/video/upd64031a.c b/drivers/media/video/upd64031a.c index f8138c75be8be..1aab96a882034 100644 --- a/drivers/media/video/upd64031a.c +++ b/drivers/media/video/upd64031a.c @@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL); + state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c index 28e0e6b6ca849..9bbe61700fd5c 100644 --- a/drivers/media/video/upd64083.c +++ b/drivers/media/video/upd64083.c @@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client, v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); - state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL); + state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index a1e9dfb52f698..6459b8cba2238 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, break; + case UVC_OTT_VENDOR_SPECIFIC: + case UVC_OTT_DISPLAY: + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: + if (uvc_trace_param & UVC_TRACE_PROBE) + printk(" OT %d", entity->id); + + break; + case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(entity)) { if (uvc_trace_param & UVC_TRACE_PROBE) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 5673d673504b8..545c0294813d9 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { - struct uvc_format *format; + struct uvc_format *format = NULL; struct uvc_frame *frame = NULL; unsigned int i; - if (ctrl->bFormatIndex <= 0 || - ctrl->bFormatIndex > stream->nformats) - return; + for (i = 0; i < stream->nformats; ++i) { + if (stream->format[i].index == ctrl->bFormatIndex) { + format = &stream->format[i]; + break; + } + } - format = &stream->format[ctrl->bFormatIndex - 1]; + if (format == NULL) + return; for (i = 0; i < format->nframes; ++i) { if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index ce64fe16bc604..3504fc6d7b713 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -131,14 +131,17 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) + if (err) { + module_put(sd->owner); return err; + } } /* This just returns 0 if either of the two args is NULL */ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler); if (err) { if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); + module_put(sd->owner); return err; } spin_lock(&v4l2_dev->lock); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index fd018366d6701..168cba49f1596 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -143,6 +143,15 @@ config TPS6507X This driver can also be built as a module. If so, the module will be called tps6507x. +config TPS65200 + tristate "tps65200 Driver" + depends on I2C + help + TPS65200 Switch charger implemented by HTC. + This could enable battery driver to set + Charging current 100mA~500mA or 500mA~1000mA + by GPIO or Register + config MENELAUS bool "Texas Instruments TWL92330/Menelaus PM chip" depends on I2C=y && ARCH_OMAP2 @@ -624,6 +633,14 @@ config MFD_WL1273_CORE driver connects the radio-wl1273 V4L2 module and the wl1273 audio codec. +config PM8058 + bool "Qualcomm PM8058 Power Management IC" + depends on MSM_SSBI && ARCH_MSM7X30 + default y if MSM_SSBI && ARCH_MSM7X30 + help + Say yes here if your board is equipped with the Qualcomm + PM8058 PMIC. + endif # MFD_SUPPORT menu "Multimedia Capabilities Port drivers" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index a54e2c7c6a1c5..eff53307c8de6 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o obj-$(CONFIG_TPS65010) += tps65010.o obj-$(CONFIG_TPS6507X) += tps6507x.o +obj-$(CONFIG_TPS65200) += tps65200.o obj-$(CONFIG_MENELAUS) += menelaus.o obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o @@ -82,4 +83,5 @@ obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o obj-$(CONFIG_MFD_TPS6586X) += tps6586x.o obj-$(CONFIG_MFD_VX855) += vx855.o obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o +obj-$(CONFIG_PM8058) += pm8058-core.o obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 4193af5f27439..1707d224232d1 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -613,7 +613,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) ab3100_get_priv.ab3100 = ab3100; ab3100_get_priv.mode = false; ab3100_get_reg_file = debugfs_create_file("get_reg", - S_IWUGO, ab3100_dir, &ab3100_get_priv, + S_IWUSR, ab3100_dir, &ab3100_get_priv, &ab3100_get_set_reg_fops); if (!ab3100_get_reg_file) { err = -ENOMEM; @@ -623,7 +623,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) ab3100_set_priv.ab3100 = ab3100; ab3100_set_priv.mode = true; ab3100_set_reg_file = debugfs_create_file("set_reg", - S_IWUGO, ab3100_dir, &ab3100_set_priv, + S_IWUSR, ab3100_dir, &ab3100_set_priv, &ab3100_get_set_reg_fops); if (!ab3100_set_reg_file) { err = -ENOMEM; diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index 5fbca346b998d..681984df1c286 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c @@ -1053,17 +1053,17 @@ static inline void ab3550_setup_debugfs(struct ab3550 *ab) goto exit_destroy_dir; ab3550_bank_file = debugfs_create_file("register-bank", - (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops); + (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_bank_fops); if (!ab3550_bank_file) goto exit_destroy_reg; ab3550_address_file = debugfs_create_file("register-address", - (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops); + (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_address_fops); if (!ab3550_address_file) goto exit_destroy_bank; ab3550_val_file = debugfs_create_file("register-value", - (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops); + (S_IRUGO | S_IWUSR), ab3550_dir, ab, &ab3550_val_fops); if (!ab3550_val_file) goto exit_destroy_address; diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index 3c1541ae72232..64748e42ac039 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -585,18 +585,18 @@ static int __devinit ab8500_debug_probe(struct platform_device *plf) goto exit_destroy_dir; ab8500_bank_file = debugfs_create_file("register-bank", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops); + (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops); if (!ab8500_bank_file) goto exit_destroy_reg; ab8500_address_file = debugfs_create_file("register-address", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, + (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_address_fops); if (!ab8500_address_file) goto exit_destroy_bank; ab8500_val_file = debugfs_create_file("register-value", - (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops); + (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops); if (!ab8500_val_file) goto exit_destroy_address; diff --git a/drivers/mfd/pm8058-core.c b/drivers/mfd/pm8058-core.c new file mode 100644 index 0000000000000..24410b5877c96 --- /dev/null +++ b/drivers/mfd/pm8058-core.c @@ -0,0 +1,958 @@ +/* + * Copyright (c) 2010 Google, Inc. + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Author: Dima Zavin + * - Based on a driver from Code Aurora Forum. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +enum { + DEBUG_IRQS = 1U << 0, +}; +static int debug_mask = 0; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define REG_HWREV 0x0002 /* PMIC4 revision */ + +#define REG_IRQ_PERM 0x01a6 +#define REG_IRQ_PERM_BLK_SEL 0x01ac +#define REG_IRQ_ROOT 0x01bb +#define REG_IRQ_M_STATUS1 0x01bc +#define REG_IRQ_M_STATUS2 0x01bd +#define REG_IRQ_M_STATUS3 0x01be +#define REG_IRQ_M_STATUS4 0x01bf +#define REG_IRQ_BLK_SEL 0x01c0 +#define REG_IRQ_IT_STATUS 0x01c1 +#define REG_IRQ_CONFIG 0x01c2 +#define REG_IRQ_RT_STATUS 0x01c3 +#define REG_GPIO_CTRL(x) (0x0150 + (x)) + +#define IRQ_CFG_CLR (1 << 3) +#define IRQ_CFG_MASK_RE (1 << 2) +#define IRQ_CFG_MASK_FE (1 << 1) +#define IRQ_CFG_LVL_SEL (1 << 0) + +#define NUM_BLOCKS 32 +#define IRQS_PER_BLOCK 8 +#define NUM_PMIRQS (NUM_BLOCKS * IRQS_PER_BLOCK) + +/* XXX: why are mpp's different than gpios? should we just put them into + * the gpio space? */ +#define MPP_IRQ_OFFSET (16 * 8) +#define GPIO_IRQ_OFFSET (24 * 8) +#define KEYPAD_IRQ_OFFSET (9 * 8 + 2) +#define CHARGER_IRQ_OFFSET (1 * 8 + 7) + +/* this defines banks of irq space. We want to provide a compact irq space + * to the kernel, but there several ranges of irqs in an otherwise sparse + * map of available/accessible irqs on the pm8058. So, + * + * bank 0 - GPIO IRQs start=(24 * 8) cnt=40 (gpios 0-39) + * bank 1 - MPP IRQs start=(16 * 8) cnt=12 (mpps 0-11) + * bank 2 - keypad irqs start=(9*8 + 1) cnt=2 + * bank 3 - charger irqs start=(1*8 + 7) cnt=7 (ends at 2*8 + 5) + * + */ +struct pm8058_irq_bank { + unsigned int start; /* will be added to the chip irq_base */ + unsigned int cnt; + unsigned int offset; /* offset into device's real irq map */ +}; + +static struct pm8058_irq_bank pm8058_irq_banks[] = { + { + .start = PM8058_FIRST_GPIO_IRQ, + .cnt = PM8058_NUM_GPIO_IRQS, + .offset = GPIO_IRQ_OFFSET, + }, + { + .start = PM8058_FIRST_MPP_IRQ, + .cnt = PM8058_NUM_MPP_IRQS, + .offset = MPP_IRQ_OFFSET, + }, + { + .start = PM8058_FIRST_KEYPAD_IRQ, + .cnt = PM8058_NUM_KEYPAD_IRQS, + .offset = KEYPAD_IRQ_OFFSET, + }, + { + .start = PM8058_FIRST_CHARGER_IRQ, + .cnt = PM8058_NUM_CHARGER_IRQS, + .offset = CHARGER_IRQ_OFFSET, + }, +}; +#define NUM_IRQ_BANKS ARRAY_SIZE(pm8058_irq_banks) + +struct pm8058_irq_group { + u16 stat_reg; + u8 valid_mask; + u8 root_mask; + u8 block_offset; +}; + +static const struct pm8058_irq_group pm8058_irq_groups[] = { + { + .stat_reg = REG_IRQ_M_STATUS1, + .valid_mask = 0x6, + .root_mask = 0x2, + .block_offset = 0, + }, + { + .stat_reg = REG_IRQ_M_STATUS2, + .valid_mask = 0x2, + .root_mask = 0x4, + .block_offset = 8, + }, + { + .stat_reg = REG_IRQ_M_STATUS4, + .valid_mask = 0x1f, + .root_mask = 0x10, + .block_offset = 24, + }, +}; +#define NUM_ROOT_GROUPS ARRAY_SIZE(pm8058_irq_groups) + +struct pm8058_irq_info { + u8 cfg; + u8 cfg_val; + u8 mask; + u8 blk; + u8 blk_bit; + u8 wake; +}; + +struct pm8058 { + struct device *dev; + unsigned int devirq; + + spinlock_t lock; + + unsigned int irq_base; + struct pm8058_irq_info irqs[PM8058_NUM_IRQS]; + unsigned int pmirqs[NUM_PMIRQS]; + int wake_cnt; + + struct gpio_chip gpio_chip; + u8 gpio_flags[PM8058_NUM_GPIOS]; + + struct pm8058_platform_data *pdata; + + struct platform_device *kp_pdev; + struct platform_device *charger_pdev; +}; + +static struct pm8058 *the_pm8058; + +static int read_irq_block_reg(struct pm8058 *pmic, u8 blk, u16 reg, u8 *val); +static int get_curr_irq_stat(struct pm8058 *pmic, unsigned int irq); + +int pm8058_readb(struct device *dev, u16 addr, u8 *val) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + + return msm_ssbi_read(pmic->dev->parent, addr, val, 1); +} +EXPORT_SYMBOL(pm8058_readb); + +int pm8058_writeb(struct device *dev, u16 addr, u8 val) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + + return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); +} +EXPORT_SYMBOL(pm8058_writeb); + +int pm8058_write_buf(struct device *dev, u16 addr, u8 *buf, int cnt) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + + return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); +} +EXPORT_SYMBOL(pm8058_write_buf); + +int pm8058_read_buf(struct device *dev, u16 addr, u8 *buf, int cnt) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + + return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); +} +EXPORT_SYMBOL(pm8058_read_buf); + +static int _dir_map[] = { + [0] = 0x3, + [PM8058_GPIO_INPUT] = 0x0, + [PM8058_GPIO_OUTPUT] = 0x2, + [PM8058_GPIO_OUTPUT_HIGH] = 0x2, +}; + +int pm8058_gpio_mux_cfg(struct device *dev, unsigned int gpio, + struct pm8058_pin_config *cfg) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + unsigned long flags; + int ret; + u8 bank[6]; + + gpio -= pmic->gpio_chip.base; + + /* bit 7 - write + * bit 6:4 - bank select */ + bank[0] = ((1 << 7) | (0 << 4) | ((cfg->vin_src & 0x7) << 1) | 0x1); + bank[1] = ((1 << 7) | (1 << 4) | (_dir_map[cfg->dir] << 2) | + ((cfg->flags & PM8058_GPIO_OPEN_DRAIN ? 0x1 : 0) << 1) | + ((cfg->dir & PM8058_GPIO_OUTPUT_HIGH ? 0x1 : 0) << 0)); + bank[2] = ((1 << 7) | (2 << 4) | ((cfg->pull_up & 0x7) << 1)); + bank[3] = ((1 << 7) | (3 << 4) | + ((cfg->strength & 0x3) << 2) | + ((cfg->flags & PM8058_GPIO_HIGH_Z ? 0x1 : 0x0) << 0)); + bank[4] = ((1 << 7) | (4 << 4) | ((cfg->func & 0x7) << 1)); + bank[5] = ((1 << 7) | (5 << 4) | + ((cfg->flags & PM8058_GPIO_INV_IRQ_POL ? 0 : 1) << 3)); + + spin_lock_irqsave(&pmic->lock, flags); + + pmic->gpio_flags[gpio] = cfg->flags | PM8058_GPIO_CONFIGURED; + ret = pm8058_write_buf(pmic->dev, REG_GPIO_CTRL(gpio), + bank, sizeof(bank)); + + spin_unlock_irqrestore(&pmic->lock, flags); + + if (ret) + pr_err("%s: failed writing config for gpio %d (%d)\n", __func__, + gpio, ret); + return ret; +} +EXPORT_SYMBOL(pm8058_gpio_mux_cfg); + +int pm8058_gpio_mux(unsigned int gpio, struct pm8058_pin_config *cfg) +{ + if (!the_pm8058) + return -ENODEV; + return pm8058_gpio_mux_cfg(the_pm8058->dev, gpio, cfg); +} +EXPORT_SYMBOL(pm8058_gpio_mux); + +/* gpio funcs */ +static int read_gpio_bank(struct pm8058 *pmic, unsigned gpio, u8 bank, u8 *val) +{ + int ret; + + ret = pm8058_writeb(pmic->dev, REG_GPIO_CTRL(gpio), (bank & 0x7) << 4); + if (ret) + goto out; + ret = pm8058_readb(pmic->dev, REG_GPIO_CTRL(gpio), val); + if (ret) + goto out; +out: + return ret; +} + +static int pm8058_gpio_request(struct gpio_chip *chip, unsigned gpio) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + unsigned long flags; + int ret; + u8 bank1; + u8 bank3; + u8 bank5; + + spin_lock_irqsave(&pmic->lock, flags); + if (pmic->gpio_flags[gpio] & PM8058_GPIO_CONFIGURED) { + ret = 0; + goto out; + } + + ret = read_gpio_bank(pmic, gpio, 1, &bank1); + if (ret) { + pr_err("%s: can't read bank 1\n", __func__); + goto out; + } + + ret = read_gpio_bank(pmic, gpio, 3, &bank3); + if (ret) { + pr_err("%s: can't read bank 3\n", __func__); + goto out; + } + + ret = read_gpio_bank(pmic, gpio, 5, &bank5); + if (ret) { + pr_err("%s: can't read bank 5\n", __func__); + goto out; + } + + pmic->gpio_flags[gpio] = bank1 & 0x2 ? PM8058_GPIO_OPEN_DRAIN : 0; + pmic->gpio_flags[gpio] |= bank3 & 0x1 ? PM8058_GPIO_HIGH_Z : 0; + pmic->gpio_flags[gpio] |= bank5 & 0x8 ? 0 : PM8058_GPIO_INV_IRQ_POL; + pmic->gpio_flags[gpio] |= PM8058_GPIO_CONFIGURED; + +out: + spin_unlock_irqrestore(&pmic->lock, flags); + return 0; +} + +static void pm8058_gpio_free(struct gpio_chip *chip, unsigned gpio) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + unsigned long flags; + + /* XXX: set high Z maybe?? */ + spin_lock_irqsave(&pmic->lock, flags); + pmic->gpio_flags[gpio] = 0; + spin_unlock_irqrestore(&pmic->lock, flags); +} + +static int gpio_set_dir(struct pm8058 *pmic, unsigned gpio, int dir) +{ + unsigned long flags; + int ret; + u8 val; + + spin_lock_irqsave(&pmic->lock, flags); + /* only need to write bank1 */ + val = (pmic->gpio_flags[gpio] & PM8058_GPIO_OPEN_DRAIN ? 0x1 : 0) << 1; + val |= ((1 << 7) | (1 << 4) | (_dir_map[dir] << 2) | + (dir & PM8058_GPIO_OUTPUT_HIGH ? 0x1 : 0x0)); + ret = pm8058_writeb(pmic->dev, REG_GPIO_CTRL(gpio), val); + if (ret) + pr_err("%s: erorr setting dir %x (%d)\n", __func__, dir, ret); + + spin_unlock_irqrestore(&pmic->lock, flags); + return ret; +} + +static int pm8058_gpio_direction_in(struct gpio_chip *chip, unsigned gpio) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + + return gpio_set_dir(pmic, gpio, PM8058_GPIO_INPUT); +} + +static int pm8058_gpio_direction_out(struct gpio_chip *chip, unsigned gpio, + int val) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + + val = val ? PM8058_GPIO_OUTPUT_HIGH : PM8058_GPIO_OUTPUT; + return gpio_set_dir(pmic, gpio, val); +} + +static void pm8058_gpio_set(struct gpio_chip *chip, unsigned gpio, int val) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + + /* XXX: for now, let's always force the gpio to be an output when + * the user calls this func. I'm not even sure that it's wrong to + * assume that. */ + val = val ? PM8058_GPIO_OUTPUT_HIGH : PM8058_GPIO_OUTPUT; + gpio_set_dir(pmic, gpio, val); +} + +static int pm8058_gpio_get(struct gpio_chip *chip, unsigned gpio) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + + /* XXX: assumes gpio maps 1:1 to irq @ 0 */ + return get_curr_irq_stat(pmic, gpio); +} + +static int pm8058_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) +{ + struct pm8058 *pmic = container_of(chip, struct pm8058, gpio_chip); + return pmic->irq_base + gpio; +} + +static struct gpio_chip pm8058_base_gpio_chip = { + .label = "pm8058", + .owner = THIS_MODULE, + .request = pm8058_gpio_request, + .free = pm8058_gpio_free, + .direction_input = pm8058_gpio_direction_in, + .get = pm8058_gpio_get, + .direction_output = pm8058_gpio_direction_out, + .set = pm8058_gpio_set, + .to_irq = pm8058_gpio_to_irq, +}; + +/* irq funcs */ +static int read_irq_block_reg(struct pm8058 *pmic, u8 blk, u16 reg, u8 *val) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&pmic->lock, flags); + ret = pm8058_writeb(pmic->dev, REG_IRQ_BLK_SEL, blk); + if (ret) { + pr_err("%s: error setting block select (%d)\n", __func__, ret); + goto done; + } + + ret = pm8058_readb(pmic->dev, reg, val); + if (ret) + pr_err("%s: error setting bit select (%d)\n", __func__, ret); + +done: + spin_unlock_irqrestore(&pmic->lock, flags); + return ret; +} + +static int get_curr_irq_stat(struct pm8058 *pmic, unsigned int irq) +{ + int ret; + u8 val; + + ret = read_irq_block_reg(pmic, pmic->irqs[irq].blk, REG_IRQ_RT_STATUS, + &val); + if (ret) { + pr_err("%s: can't read irq %d status\n", __func__, irq); + goto done; + } + + ret = !!(val & (1 << pmic->irqs[irq].blk_bit)); + +done: + return ret; +} + +int pm8058_irq_get_status(struct device *dev, unsigned int irq) +{ + struct pm8058 *pmic = dev_get_drvdata(dev); + + if (irq >= PM8058_NUM_IRQS) + return -EINVAL; + return get_curr_irq_stat(pmic, irq); +} +EXPORT_SYMBOL(pm8058_irq_get_status); + +static int cfg_irq_blk_bit_perm(struct pm8058 *pmic, u8 blk, u8 mask) +{ + int ret; + unsigned long flags; + u8 tmp; + + spin_lock_irqsave(&pmic->lock, flags); + ret = pm8058_writeb(pmic->dev, REG_IRQ_PERM_BLK_SEL, blk); + if (ret) { + pr_err("%s: error setting block select (%d)\n", __func__, ret); + goto done; + } + + ret = pm8058_readb(pmic->dev, REG_IRQ_PERM, &tmp); + if (ret) { + pr_err("%s: error getting (%d)\n", __func__, ret); + goto done; + } + + ret = pm8058_writeb(pmic->dev, REG_IRQ_PERM, tmp | mask); + if (ret) + pr_err("%s: error writing %d 0x%x 0x%x (0x%x)\n", __func__, + ret, blk, REG_IRQ_PERM, mask); + +done: + spin_unlock_irqrestore(&pmic->lock, flags); + return ret; +} + +static int _write_irq_blk_bit_cfg(struct pm8058 *pmic, u8 blk, u8 bit, u8 cfg) +{ + int ret; + + ret = pm8058_writeb(pmic->dev, REG_IRQ_BLK_SEL, blk); + if (ret) { + pr_err("%s: error setting block select (%d)\n", __func__, ret); + goto done; + } + + cfg = (1 << 7) | (cfg & 0xf) | (bit << 4); + ret = pm8058_writeb(pmic->dev, REG_IRQ_CONFIG, cfg); + if (ret) + pr_err("%s: error writing irq cfg (%d)\n", __func__, ret); + +done: + return ret; +} + +static int write_irq_config_locked(struct pm8058 *pmic, unsigned int irq, + u8 cfg) +{ + return _write_irq_blk_bit_cfg(pmic, pmic->irqs[irq].blk, + pmic->irqs[irq].blk_bit, cfg); +} + +static int do_irq_master(struct pm8058 *pmic, int group) +{ + int i; + int j; + int ret; + u8 val; + unsigned long stat; + + ret = pm8058_readb(pmic->dev, pm8058_irq_groups[group].stat_reg, &val); + if (ret) { + pr_err("%s: Can't read master status\n", __func__); + goto done; + } + + if (debug_mask & DEBUG_IRQS) + pr_info("%s: master %d %02x\n", __func__, group, val); + stat = val & pm8058_irq_groups[group].valid_mask; + for_each_set_bit(i, &stat, BITS_PER_BYTE) { + u8 blk = pm8058_irq_groups[group].block_offset + i; + unsigned long blk_stat; + + ret = read_irq_block_reg(pmic, blk, REG_IRQ_IT_STATUS, &val); + if (ret) { + pr_err("%s: can't read block status\n", __func__); + goto done; + } + + blk_stat = val; + for_each_set_bit(j, &blk_stat, BITS_PER_BYTE) { + u8 irq = blk * 8 + j; + + /* XXX: we should mask these out and count em' */ + if (pmic->pmirqs[irq] == 0xffffffffU) { + pr_warning("Unexpected pmirq %d\n", irq); + continue; + } + generic_handle_irq(pmic->pmirqs[irq] + pmic->irq_base); + } + } + +done: + return ret; +} + +static void pm8058_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct pm8058 *pmic = get_irq_data(irq); + int ret; + int i; + u8 root; + + desc->chip->ack(irq); + ret = pm8058_readb(pmic->dev, REG_IRQ_ROOT, &root); + if (ret) { + pr_err("%s: Can't read root status\n", __func__); + return; + } + + if (debug_mask & DEBUG_IRQS) + pr_info("%s: root %02x\n", __func__, root); + for (i = 0; i < NUM_ROOT_GROUPS; ++i) { + if (root & pm8058_irq_groups[i].root_mask) + do_irq_master(pmic, i); + } +} + +static void pm8058_irq_ack(unsigned int _irq) +{ + struct pm8058 *pmic = get_irq_chip_data(_irq); + unsigned int irq = _irq - pmic->irq_base; + unsigned long flags; + + spin_lock_irqsave(&pmic->lock, flags); + write_irq_config_locked(pmic, irq, + pmic->irqs[irq].cfg_val | IRQ_CFG_CLR); + spin_unlock_irqrestore(&pmic->lock, flags); +} + +static void pm8058_irq_mask(unsigned int _irq) +{ + struct pm8058 *pmic = get_irq_chip_data(_irq); + unsigned int irq = _irq - pmic->irq_base; + struct pm8058_irq_info *irq_info = &pmic->irqs[irq]; + unsigned long flags; + + spin_lock_irqsave(&pmic->lock, flags); + irq_info->mask = IRQ_CFG_MASK_FE | IRQ_CFG_MASK_RE; + irq_info->cfg_val = irq_info->cfg | irq_info->mask; + write_irq_config_locked(pmic, irq, irq_info->cfg_val); + spin_unlock_irqrestore(&pmic->lock, flags); +} + +static void pm8058_irq_unmask(unsigned int _irq) +{ + struct pm8058 *pmic = get_irq_chip_data(_irq); + unsigned int irq = _irq - pmic->irq_base; + struct pm8058_irq_info *irq_info = &pmic->irqs[irq]; + unsigned long flags; + + spin_lock_irqsave(&pmic->lock, flags); + irq_info->mask = 0; + irq_info->cfg_val = irq_info->cfg; + write_irq_config_locked(pmic, irq, irq_info->cfg_val); + spin_unlock_irqrestore(&pmic->lock, flags); +} + +static void pm8058_irq_disable(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + pm8058_irq_mask(irq); + desc->status |= IRQ_MASKED; +} + +static void pm8058_irq_enable(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + pm8058_irq_unmask(irq); + desc->status &= ~IRQ_MASKED; +} + +static int pm8058_irq_set_type(unsigned int _irq, unsigned int flow_type) +{ + struct pm8058 *pmic = get_irq_chip_data(_irq); + unsigned int irq = _irq - pmic->irq_base; + struct pm8058_irq_info *irq_info = &pmic->irqs[irq]; + unsigned long flags; + int ret; + u8 cfg; + + cfg = IRQ_CFG_MASK_RE | IRQ_CFG_MASK_FE; + if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { + if (flow_type & IRQF_TRIGGER_RISING) + cfg &= ~IRQ_CFG_MASK_RE; + if (flow_type & IRQF_TRIGGER_FALLING) + cfg &= ~IRQ_CFG_MASK_FE; + __set_irq_handler_unlocked(_irq, handle_edge_irq); + } else { + cfg |= IRQ_CFG_LVL_SEL; + if (flow_type & IRQF_TRIGGER_HIGH) + cfg &= ~IRQ_CFG_MASK_RE; + else + cfg &= ~IRQ_CFG_MASK_FE; + __set_irq_handler_unlocked(_irq, handle_level_irq); + } + + /* in case the irq was masked when the type was set, we don't want + * to unmask it */ + spin_lock_irqsave(&pmic->lock, flags); + irq_info->cfg = cfg; + irq_info->cfg_val = irq_info->cfg | irq_info->mask; + ret = write_irq_config_locked(pmic, irq, + irq_info->cfg_val | IRQ_CFG_CLR); + spin_unlock_irqrestore(&pmic->lock, flags); + + return ret; +} + +static int pm8058_irq_set_wake(unsigned int _irq, unsigned int on) +{ + struct pm8058 *pmic = get_irq_chip_data(_irq); + unsigned int irq = _irq - pmic->irq_base; + struct pm8058_irq_info *irq_info = &pmic->irqs[irq]; + unsigned long flags; + + spin_lock_irqsave(&pmic->lock, flags); + if (on) { + if (!irq_info->wake) { + irq_info->wake = 1; + pmic->wake_cnt++; + } + } else { + if (irq_info->wake) { + irq_info->wake = 0; + pmic->wake_cnt--; + } + } + spin_unlock_irqrestore(&pmic->lock, flags); + + return 0; +} + +static struct irq_chip pm8058_irq_chip = { + .name = "pm8058", + .ack = pm8058_irq_ack, + .mask = pm8058_irq_mask, + .unmask = pm8058_irq_unmask, + .disable = pm8058_irq_disable, + .enable = pm8058_irq_enable, + .set_type = pm8058_irq_set_type, + .set_wake = pm8058_irq_set_wake, +}; + +static int pm8058_irq_init(struct pm8058 *pmic, unsigned int irq_base) +{ + int i; + int j; + + /* mask/clear all the irqs */ + for (i = 0; i < NUM_BLOCKS; ++i) + for (j = 0; j < IRQS_PER_BLOCK; ++j) + _write_irq_blk_bit_cfg(pmic, i, j, (IRQ_CFG_MASK_RE | + IRQ_CFG_MASK_FE | + IRQ_CFG_CLR)); + + memset(pmic->pmirqs, 0xff, NUM_PMIRQS * sizeof(pmic->pmirqs[0])); + for (i = 0; i < NUM_IRQ_BANKS; ++i) { + struct pm8058_irq_bank *bank = &pm8058_irq_banks[i]; + + for (j = 0; j < bank->cnt; ++j) { + unsigned int irq = bank->start + j; + unsigned int pmirq = bank->offset + j; + + BUG_ON(irq >= PM8058_NUM_IRQS); + + /* by default mask the irq */ + pmic->irqs[irq].cfg = 0; + pmic->irqs[irq].mask = + IRQ_CFG_MASK_RE | IRQ_CFG_MASK_FE; + pmic->irqs[irq].cfg_val = pmic->irqs[irq].mask; + pmic->irqs[irq].blk = pmirq / 8; + pmic->irqs[irq].blk_bit = pmirq % 8; + pmic->pmirqs[pmirq] = irq; + + BUG_ON(pmic->irqs[irq].blk >= NUM_BLOCKS); + + /* XXX: slightly inefficient since we can end up + * doing it 8 times per block per bank, but it's + * the easiet. Optimize if gets too slow. */ + + /* ensure we set the permissions for the irqs in + * this bank */ + cfg_irq_blk_bit_perm(pmic, pmic->irqs[irq].blk, + 1 << pmic->irqs[irq].blk_bit); + + set_irq_chip(irq_base + irq, &pm8058_irq_chip); + set_irq_chip_data(irq_base + irq, pmic); + set_irq_handler(irq_base + irq, handle_edge_irq); + set_irq_flags(irq_base + irq, IRQF_VALID); + } + + } + + return 0; +} + +static struct platform_device *add_child_device( + struct pm8058 *pmic, const char *name, void *pdata, + struct resource *res, int num_res) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, -1); + if (!pdev) { + pr_err("%s: cannot allocate pdev for '%s'\n", __func__, name); + ret = -ENOMEM; + goto err; + } + + pdev->dev.parent = pmic->dev; + pdev->dev.platform_data = pdata; + + ret = platform_device_add_resources(pdev, res, num_res); + if (ret) { + pr_err("%s: can't add resources for '%s'\n", __func__, name); + goto err; + } + + ret = platform_device_add(pdev); + if (ret) { + pr_err("%s: cannot add child platform device '%s'\n", __func__, + name); + goto err; + } + return pdev; + +err: + if (pdev) + platform_device_put(pdev); + return ERR_PTR(ret); +} + +static int add_keypad_device(struct pm8058 *pmic, void *pdata) +{ + struct platform_device *pdev; + struct resource irq_res[] = { + { + .start = pmic->irq_base + PM8058_KEYPAD_IRQ, + .end = pmic->irq_base + PM8058_KEYPAD_IRQ, + .flags = IORESOURCE_IRQ, + .name = "kp_sense", + }, + { + .start = pmic->irq_base + PM8058_KEYPAD_STUCK_IRQ, + .end = pmic->irq_base + PM8058_KEYPAD_STUCK_IRQ, + .flags = IORESOURCE_IRQ, + .name = "kp_stuck", + } + }; + + pdev = add_child_device(pmic, "pm8058-keypad", pdata, irq_res, + ARRAY_SIZE(irq_res)); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + pmic->kp_pdev = pdev; + return 0; +} + +static int add_charger_device(struct pm8058 *pmic, void *pdata) +{ + struct platform_device *pdev; + struct resource irq_res[] = { + { + .start = pmic->irq_base + PM8058_CHGVAL_IRQ, + .end = pmic->irq_base + PM8058_CHGVAL_IRQ, + .flags = IORESOURCE_IRQ, + .name = "chgval_irq", + }, + { + .start = pmic->irq_base + PM8058_FASTCHG_IRQ, + .end = pmic->irq_base + PM8058_FASTCHG_IRQ, + .flags = IORESOURCE_IRQ, + .name = "fastchg_irq", + } + }; + + pdev = add_child_device(pmic, "pm8058-charger", pdata, irq_res, + ARRAY_SIZE(irq_res)); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + pmic->charger_pdev = pdev; + return 0; +} + +static int pm8058_probe(struct platform_device *pdev) +{ + struct pm8058_platform_data *pdata = pdev->dev.platform_data; + struct pm8058 *pmic; + int devirq; + int ret; + u8 val; + + if (!pdata) { + pr_err("%s: no platform data\n", __func__); + return -EINVAL; + } + + devirq = platform_get_irq(pdev, 0); + if (devirq < 0) { + pr_err("%s: missing devirq\n", __func__); + return devirq; + } + + pmic = kzalloc(sizeof(struct pm8058), GFP_KERNEL); + if (!pmic) { + pr_err("%s: Cannot alloc pm8058 struct\n", __func__); + return -ENOMEM; + } + + /* Read PMIC chip revision */ + ret = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); + if (ret) + goto err_read_rev; + pr_info("%s: PMIC revision: %x\n", __func__, val); + + pmic->dev = &pdev->dev; + pmic->irq_base = pdata->irq_base; + pmic->devirq = devirq; + spin_lock_init(&pmic->lock); + pmic->pdata = pdata; + platform_set_drvdata(pdev, pmic); + + ret = pm8058_irq_init(pmic, pmic->irq_base); + if (ret) + goto err_irq_init; + + memcpy(&pmic->gpio_chip, &pm8058_base_gpio_chip, + sizeof(struct gpio_chip)); + pmic->gpio_chip.dev = pmic->dev; + pmic->gpio_chip.base = pdata->gpio_base; + pmic->gpio_chip.ngpio = PM8058_NUM_GPIOS; + + ret = gpiochip_add(&pmic->gpio_chip); + if (ret) { + pr_err("%s: can't register gpio chip\n", __func__); + goto err_gpiochip_add; + } + + set_irq_type(devirq, IRQ_TYPE_LEVEL_LOW); + set_irq_data(devirq, pmic); + set_irq_chained_handler(devirq, pm8058_irq_handler); + set_irq_wake(devirq, 1); + + the_pm8058 = pmic; + + if (pdata->init) { + ret = pdata->init(pmic->dev); + if (ret) { + pr_err("%s: error in board init\n", __func__); + goto err_pdata_init; + } + } + + if (pdata->keypad_pdata) { + ret = add_keypad_device(pmic, pdata->keypad_pdata); + if (ret) { + pr_err("%s: can't add child keypad device\n", __func__); + goto err_add_kp_dev; + } + } + + if (pdata->charger_pdata) { + ret = add_charger_device(pmic, pdata->charger_pdata); + if (ret) { + pr_err("%s: can't add child charger dev\n", __func__); + goto err_add_charger_dev; + } + } + + return 0; + +err_add_charger_dev: + if (pmic->kp_pdev) + platform_device_put(pmic->kp_pdev); +err_add_kp_dev: +err_pdata_init: + the_pm8058 = NULL; + set_irq_wake(devirq, 0); + set_irq_chained_handler(devirq, NULL); + WARN_ON(gpiochip_remove(&pmic->gpio_chip)); +err_gpiochip_add: +err_irq_init: + platform_set_drvdata(pdev, NULL); +err_read_rev: + kfree(pmic); + return ret; +} + +static struct platform_driver pm8058_driver = { + .probe = pm8058_probe, + .driver = { + .name = "pm8058-core", + .owner = THIS_MODULE, + }, +}; + +static int __init pm8058_init(void) +{ + return platform_driver_register(&pm8058_driver); +} +postcore_initcall(pm8058_init); diff --git a/drivers/mfd/tps65200.c b/drivers/mfd/tps65200.c new file mode 100644 index 0000000000000..4d987dc8f78bd --- /dev/null +++ b/drivers/mfd/tps65200.c @@ -0,0 +1,356 @@ +/* drivers/i2c/chips/tps65200.c + * + * Copyright (C) 2009 HTC Corporation + * Author: Josh Hsiao + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include + +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static int tps65200_initial = -1; +/** + * Insmod parameters + */ +I2C_CLIENT_INSMOD_1(tps65200); + +static int tps65200_probe(struct i2c_client *client, + const struct i2c_device_id *id); +static int tps65200_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info); +static int tps65200_remove(struct i2c_client *client); + + +/* Supersonic for Switch charger */ +struct tps65200_i2c_client { + struct i2c_client *client; + u8 address; + /* max numb of i2c_msg required is for read =2 */ + struct i2c_msg xfer_msg[2]; + /* To lock access to xfer_msg */ + struct mutex xfer_lock; +}; +static struct tps65200_i2c_client tps65200_i2c_module; +/** +Function:tps65200_i2c_write +Target: Write a byte to Switch charger +Timing: TBD +INPUT: value-> write value + reg -> reg offset + num-> number of byte to write +return :TRUE-->OK + FALSE-->Fail + */ +static int tps65200_i2c_write(u8 *value, u8 reg, u8 num_bytes) +{ + int ret; + struct tps65200_i2c_client *tps; + struct i2c_msg *msg; + + tps = &tps65200_i2c_module; + + mutex_lock(&tps->xfer_lock); + /* + * [MSG1]: fill the register address data + * fill the data Tx buffer + */ + msg = &tps->xfer_msg[0]; + msg->addr = tps->address; + msg->len = num_bytes + 1; + msg->flags = 0; + msg->buf = value; + /* over write the first byte of buffer with the register address */ + *value = reg; + ret = i2c_transfer(tps->client->adapter, tps->xfer_msg, 1); + mutex_unlock(&tps->xfer_lock); + + /* i2cTransfer returns num messages.translate it pls.. */ + if (ret >= 0) + ret = 0; + return ret; +} + + +/** +Function:tps65200_i2c_read +Target: Read a byte from Switch charger +Timing: TBD +INPUT: value-> store buffer + reg -> reg offset to read + num-> number of byte to read +return :TRUE-->OK + FALSE-->Fail + */ +static int tps65200_i2c_read(u8 *value, u8 reg, u8 num_bytes) +{ + int ret; + u8 val; + struct tps65200_i2c_client *tps; + struct i2c_msg *msg; + + tps = &tps65200_i2c_module; + + mutex_lock(&tps->xfer_lock); + /* [MSG1] fill the register address data */ + msg = &tps->xfer_msg[0]; + msg->addr = tps->address; + msg->len = 1; + msg->flags = 0; /* Read the register value */ + val = reg; + msg->buf = &val; + /* [MSG2] fill the data rx buffer */ + msg = &tps->xfer_msg[1]; + msg->addr = tps->address; + msg->flags = I2C_M_RD; /* Read the register value */ + msg->len = num_bytes; /* only n bytes */ + msg->buf = value; + ret = i2c_transfer(tps->client->adapter, tps->xfer_msg, 2); + mutex_unlock(&tps->xfer_lock); + + /* i2cTransfer returns num messages.translate it pls.. */ + if (ret >= 0) + ret = 0; + return ret; +} + + +/** +Function:tps65200_i2c_write_byte +Target: Write a byte from Switch charger +Timing: TBD +INPUT: value-> store buffer + reg -> reg offset to read +return :TRUE-->OK + FALSE-->Fail + */ +static int tps65200_i2c_write_byte(u8 value, u8 reg) +{ + /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ + int result; + u8 temp_buffer[2] = { 0 }; + /* offset 1 contains the data */ + temp_buffer[1] = value; + result = tps65200_i2c_write(temp_buffer, reg, 1); + if (result != 0) + pr_info("TPS65200 I2C write fail = %d\n", result); + + return result; +} + +/** +Function:tps65200_i2c_read_byte +Target: Read a byte from Switch charger +Timing: TBD +INPUT: value-> store buffer + reg -> reg offset to read +return :TRUE-->OK + FALSE-->Fail + */ +static int tps65200_i2c_read_byte(u8 *value, u8 reg) +{ + int result = 0; + result = tps65200_i2c_read(value, reg, 1); + if (result != 0) + pr_info("TPS65200 I2C read fail = %d\n", result); + + return result; +} + +int tps_set_charger_ctrl(u32 ctl) +{ + int result = 0; + u8 version; + u8 status; + u8 regh; + + if (tps65200_initial < 0) + return 0; + + switch (ctl) { + case DISABLE: + pr_info("Switch charger OFF\n"); + tps65200_i2c_write_byte(0x29, 0x01); + tps65200_i2c_write_byte(0x28, 0x00); + break; + case ENABLE_SLOW_CHG: + pr_info("Switch charger ON (SLOW)\n"); + tps65200_i2c_write_byte(0x29, 0x01); + tps65200_i2c_write_byte(0x2A, 0x00); + tps65200_i2c_write_byte(0x86, 0x03); + tps65200_i2c_write_byte(0x63, 0x02); + break; + case ENABLE_FAST_CHG: + pr_info("Switch charger ON (FAST)\n"); + tps65200_i2c_write_byte(0x29, 0x01); + tps65200_i2c_write_byte(0x2A, 0x00); + tps65200_i2c_write_byte(0x86, 0x03); + tps65200_i2c_write_byte(0xA3, 0x02); + tps65200_i2c_read_byte(®h, 0x01); + pr_info("1.batt: Switch charger ON (FAST): regh 0x01=%x\n", regh); + tps65200_i2c_read_byte(®h, 0x00); + pr_info("2.batt: Switch charger ON (FAST): regh 0x00=%x\n", regh); + tps65200_i2c_read_byte(®h, 0x03); + pr_info("2.batt: Switch charger ON (FAST): regh 0x03=%x\n", regh); + tps65200_i2c_read_byte(®h, 0x02); + pr_info("2.batt: Switch charger ON (FAST): regh 0x02=%x\n", regh); + break; + case CHECK_CHG: + pr_info("Switch charger CHECK \n"); + tps65200_i2c_read_byte(&status, 0x06); + tps65200_i2c_read_byte(®h, 0x09); + pr_info("TPS65200 STATUS_A%x, INT2:%x\n", status, regh); + break; + case SET_ICL500: + pr_info("Switch charger SET_ICL500 \n"); + tps65200_i2c_write_byte(0xA3, 0x02); + break; + case SET_ICL100: + pr_info("Switch charger SET_ICL100 \n"); + tps65200_i2c_write_byte(0x23, 0x02); + break; + case CHECK_INT1: + pr_info("Switch charger CHECK_INT1 \n"); + tps65200_i2c_read_byte(&status, 0x08); + pr_info("Switch charger CHECK_INT1: regh 0x08h=%x\n", status); + result = (int)status; + break; + case CHECK_INT2: + pr_info("Switch charger CHECK_INT2 \n"); + tps65200_i2c_read_byte(&status, 0x09); + pr_info("TPS65200 INT2 %x\n", status); + result = (int)status; + break; + case CHECK_CONTROL: + pr_info("Switch charger CHECK_CONTROL \n"); + tps65200_i2c_read_byte(&status, 0x00); + pr_info("TPS65200 regh 0x00=%x\n", regh); + break; + case OVERTEMP_VREG_4060: + pr_info("Switch charger OVERTEMP_VREG_4060 \n"); + tps65200_i2c_read_byte(®h, 0x02); + regh = (regh & 0xC0) | 0x1C; + tps65200_i2c_write_byte(regh, 0x02); + tps65200_i2c_read_byte(®h, 0x02); + pr_info("Switch charger OVERTEMP_VREG_4060: regh 0x02=%x\n", regh); + break; + case NORMALTEMP_VREG_4200: + pr_info("Switch charger NORMALTEMP_VREG_4200 \n"); + tps65200_i2c_read_byte(®h, 0x02); + regh = (regh & 0xC0) | 0X23; + tps65200_i2c_write_byte(regh, 0x02); + tps65200_i2c_read_byte(®h, 0x02); + pr_info("Switch charger NORMALTEMP_VREG_4200: regh 0x02=%x\n", regh); + break; + default: + pr_info("%s: Not supported battery ctr called.!", __func__); + result = -EINVAL; + break; + } + + return result; +} +EXPORT_SYMBOL(tps_set_charger_ctrl); +static int cable_status_handler_func(struct notifier_block *nfb, + unsigned long action, void *param) +{ + u32 ctl = (u32)action; + pr_info("TPS65200 Switch charger set control%d\n", ctl); + tps_set_charger_ctrl(ctl); + + return NOTIFY_OK; +} + +static struct notifier_block cable_status_handler = { + .notifier_call = cable_status_handler_func, +}; + +static int tps65200_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) +{ + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WRITE_BYTE_DATA | + I2C_FUNC_SMBUS_BYTE)) + return -ENODEV; + + strlcpy(info->type, "tps65200", I2C_NAME_SIZE); + + return 0; +} + +static int tps65200_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tps65200_i2c_client *data = &tps65200_i2c_module; + + if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { + dev_dbg(&client->dev, "[TPS65200]:I2C fail\n"); + return -EIO; + } + + register_notifier_cable_status(&cable_status_handler); + + data->address = client->addr; + data->client = client; + mutex_init(&data->xfer_lock); + tps65200_initial = 1; + pr_info("[TPS65200]: Driver registration done\n"); + return 0; +} + +static int tps65200_remove(struct i2c_client *client) +{ + struct tps65200_i2c_client *data = i2c_get_clientdata(client); + int idx; + if (data->client && data->client != client) + i2c_unregister_device(data->client); + tps65200_i2c_module.client = NULL; + return 0; +} +static const struct i2c_device_id tps65200_id[] = { + { "tps65200", 0 }, + { }, +}; +static struct i2c_driver tps65200_driver = { + .driver.name = "tps65200", + .id_table = tps65200_id, + .probe = tps65200_probe, + .remove = tps65200_remove, +}; + +static int __init sensors_tps65200_init(void) +{ + int res; + + res = i2c_add_driver(&tps65200_driver); + if (res) { + pr_info("[TPS65200]: Driver registration failed \n"); + return res; + } + return res; +} + +static void __exit sensors_tps65200_exit(void) +{ + i2c_del_driver(&tps65200_driver); +} + +MODULE_AUTHOR("Josh Hsiao "); +MODULE_DESCRIPTION("tps65200 driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_tps65200_init); +module_exit(sensors_tps65200_exit); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 722fc12d5a4ac..88c6e8e40251c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -390,6 +390,24 @@ config SENSORS_AK8975 If you say yes here you get support for Asahi Kasei's orientation sensor AK8975. +config SENSORS_AKM8973 + tristate "AKM8973 Compass Driver" + depends on I2C + help + AKM8973 Compass Driver implemented by HTC. + +config SENSORS_AKM8976 + tristate "AKM8976 Compass Driver" + depends on I2C + help + AKM8976 Compass Driver implemented by HTC. + +config VP_A1026 + tristate "A1026 Voice Processor Driver" + depends on I2C + help + A1026 Voice Processor Driver implemented by HTC. + config EP93XX_PWM tristate "EP93xx PWM support" depends on ARCH_EP93XX @@ -483,6 +501,13 @@ config WL127X_RFKILL Creates an rfkill entry in sysfs for power control of Bluetooth TI wl127x chips. +config SENSORS_BMA150_SPI + tristate "BMA150 G-sensor Driver" + depends on MICROP_COMMON + default y + help + BMA150 G-sensor Driver implemented by HTC. + config APANIC bool "Android kernel panic diagnostics driver" default n @@ -503,5 +528,6 @@ source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" source "drivers/misc/iwmc3200top/Kconfig" source "drivers/misc/ti-st/Kconfig" +source "drivers/misc/video_core/720p/Kconfig" endif # MISC_DEVICES diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 21f4bc8a5e3de..94f117cbe79b5 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -48,3 +48,13 @@ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o obj-$(CONFIG_APANIC) += apanic.o obj-$(CONFIG_SENSORS_AK8975) += akm8975.o +ifeq ($(CONFIG_MICROP_COMMON),y) + obj-$(CONFIG_SENSORS_AKM8973) += akm8973_htc.o +else + obj-$(CONFIG_SENSORS_AKM8973) += akm8973.o +endif +obj-$(CONFIG_SENSORS_AKM8976) += akm8976.o +obj-$(CONFIG_VP_A1026) += a1026.o +obj-$(CONFIG_SENSORS_BMA150_SPI)+= bma150_spi.o +obj-$(CONFIG_MSM_720P_CORE) += video_core/720p/ + diff --git a/drivers/misc/a1026.c b/drivers/misc/a1026.c new file mode 100644 index 0000000000000..b3ee9d2c9474e --- /dev/null +++ b/drivers/misc/a1026.c @@ -0,0 +1,1170 @@ +/* drivers/i2c/chips/a1026.c - a1026 voice processor driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG (0) +#define ENABLE_DIAG_IOCTLS (0) + +static struct i2c_client *this_client; +static struct a1026_platform_data *pdata; + +static int execute_cmdmsg(unsigned int); + +static struct mutex a1026_lock; +static int a1026_opened; +static int a1026_suspended; +static int control_a1026_clk; +static unsigned int a1026_NS_state = A1026_NS_STATE_AUTO; +static int a1026_current_config = A1026_PATH_SUSPEND; +static int a1026_param_ID; + +struct vp_ctxt { + unsigned char *data; + unsigned int img_size; +}; + +struct vp_ctxt the_vp; + +static int a1026_i2c_read(char *rxData, int length) +{ + int rc; + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + rc = i2c_transfer(this_client->adapter, msgs, 1); + if (rc < 0) { + pr_err("%s: transfer error %d\n", __func__, rc); + return rc; + } + +#if DEBUG + { + int i = 0; + for (i = 0; i < length; i++) + pr_info("%s: rx[%d] = %2x\n", __func__, i, rxData[i]); + } +#endif + + return 0; +} + +static int a1026_i2c_write(char *txData, int length) +{ + int rc; + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + rc = i2c_transfer(this_client->adapter, msg, 1); + if (rc < 0) { + pr_err("%s: transfer error %d\n", __func__, rc); + return rc; + } + +#if DEBUG + { + int i = 0; + for (i = 0; i < length; i++) + pr_info("%s: tx[%d] = %2x\n", __func__, i, txData[i]); + } +#endif + + return 0; +} + +static int a1026_open(struct inode *inode, struct file *file) +{ + int rc = 0; + struct vp_ctxt *vp = &the_vp; + + mutex_lock(&a1026_lock); + + if (a1026_opened) { + pr_err("%s: busy\n", __func__); + rc = -EBUSY; + goto done; + } + + file->private_data = vp; + vp->img_size = 0; + a1026_opened = 1; +done: + mutex_unlock(&a1026_lock); + return rc; +} + +static int a1026_release(struct inode *inode, struct file *file) +{ + mutex_lock(&a1026_lock); + a1026_opened = 0; + mutex_unlock(&a1026_lock); + + return 0; +} + +static void a1026_i2c_sw_reset(unsigned int reset_cmd) +{ + int rc = 0; + unsigned char msgbuf[4]; + + msgbuf[0] = (reset_cmd >> 24) & 0xFF; + msgbuf[1] = (reset_cmd >> 16) & 0xFF; + msgbuf[2] = (reset_cmd >> 8) & 0xFF; + msgbuf[3] = reset_cmd & 0xFF; + + pr_info("%s: %08x\n", __func__, reset_cmd); + + rc = a1026_i2c_write(msgbuf, 4); + if (!rc) + msleep(20); +} + +static ssize_t a1026_bootup_init(struct file *file, struct a1026img *img) +{ + struct vp_ctxt *vp = file->private_data; + int rc, pass = 0; + int remaining; + int retry = RETRY_CNT; + unsigned char *index; + char buf[2]; + + if (img->img_size > A1026_MAX_FW_SIZE) { + pr_err("%s: invalid a1026 image size %d\n", __func__, + img->img_size); + return -EINVAL; + } + + vp->data = kmalloc(img->img_size, GFP_KERNEL); + if (!vp->data) { + pr_err("%s: out of memory\n", __func__); + return -ENOMEM; + } + vp->img_size = img->img_size; + if (copy_from_user(vp->data, img->buf, img->img_size)) { + pr_err("%s: copy from user failed\n", __func__); + kfree(vp->data); + return -EFAULT; + } + + while (retry--) { + /* Reset A1026 chip */ + gpio_set_value(pdata->gpio_a1026_reset, 0); + + /* Enable A1026 clock */ + if (control_a1026_clk) + gpio_set_value(pdata->gpio_a1026_clk, 1); + mdelay(1); + + /* Take out of reset */ + gpio_set_value(pdata->gpio_a1026_reset, 1); + + msleep(50); /* Delay before send I2C command */ + + /* Boot Cmd to A1026 */ + buf[0] = A1026_msg_BOOT >> 8; + buf[1] = A1026_msg_BOOT & 0xff; + + rc = a1026_i2c_write(buf, 2); + if (rc < 0) { + pr_err("%s: set boot mode error (%d retries left)\n", + __func__, retry); + continue; + } + + mdelay(1); /* use polling */ + rc = a1026_i2c_read(buf, 1); + if (rc < 0) { + pr_err("%s: boot mode ack error (%d retries left)\n", + __func__, retry); + continue; + } + + if (buf[0] != A1026_msg_BOOT_ACK) { + pr_err("%s: not a boot-mode ack (%d retries left)\n", + __func__, retry); + continue; + } + + remaining = vp->img_size / 32; + index = vp->data; + + pr_info("%s: starting to load image (%d passes)...\n", + __func__, + remaining + !!(vp->img_size % 32)); + + for (; remaining; remaining--, index += 32) { + rc = a1026_i2c_write(index, 32); + if (rc < 0) + break; + } + + if (rc >= 0 && vp->img_size % 32) + rc = a1026_i2c_write(index, vp->img_size % 32); + + if (rc < 0) { + pr_err("%s: fw load error %d (%d retries left)\n", + __func__, rc, retry); + continue; + } + + msleep(20); /* Delay time before issue a Sync Cmd */ + + pr_info("%s: firmware loaded successfully\n", __func__); + + rc = execute_cmdmsg(A100_msg_Sync); + if (rc < 0) { + pr_err("%s: sync command error %d (%d retries left)\n", + __func__, rc, retry); + continue; + } + + pass = 1; + break; + } + + /* Put A1026 into sleep mode */ + rc = execute_cmdmsg(A100_msg_Sleep); + if (rc < 0) { + pr_err("%s: suspend error\n", __func__); + goto set_suspend_err; + } + + a1026_suspended = 1; + a1026_current_config = A1026_PATH_SUSPEND; + + msleep(120); + /* Disable A1026 clock */ + if (control_a1026_clk) + gpio_set_value(pdata->gpio_a1026_clk, 0); + +set_suspend_err: + if (pass && !rc) + pr_info("%s: initialized!\n", __func__); + else + pr_err("%s: initialization failed\n", __func__); + + kfree(vp->data); + return rc; +} + +unsigned char phonecall_receiver[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:2-mic Close Talk (CT) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x05, /* SetAlgorithmParm, 0x0005:25dB Max Suppression */ + 0x80,0x17,0x00,0x20, /* SetAlgorithmParmID, 0x0020:Tx PostEq Mode */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:On always */ + 0x80,0x1B,0x00,0x0C, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x1B,0x01,0x0C, /* SetDigitalInputGain, 0x01:Secondary Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x15,0x00,0xFA, /* SetDigitalOutputGain, 0x00:Tx, 0xFA:(-6 dB) */ +}; + +unsigned char phonecall_headset[] = { + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0xF8, /* SetDigitalOutputGain, 0x00:Tx, 0xF8:(-8 dB) */ +}; + +unsigned char phonecall_speaker[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:1-mic Desktop/Vehicle (DV) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002 */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0xFD, /* SetDigitalOutputGain, 0x00:Tx, 0xFD:(-3 dB) */ +}; + +unsigned char phonecall_bt[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x03, /* SetAlgorithmParm, 0x0003:1-mic External (MD) */ + 0x80,0x26,0x00,0x06, /* SelectRouting, 0x0006:Snk,Snk,Fei,Pri - Zro,Csp,Feo (PCM0->PCM1+ADCs) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x00, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x00:(0 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char phonecall_tty[] = { + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x00, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x00:(0 dB) */ + 0x80,0x15,0x00,0xFB, /* SetDigitalOutputGain, 0x00:Tx, 0xFB:(-5 dB) */ +}; + +unsigned char INT_MIC_recording_receiver[] = { + 0x80,0x26,0x00,0x07, /* SelectRouting, 0x0007:Pri,Snk,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char EXT_MIC_recording[] = { + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char INT_MIC_recording_speaker[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:1-mic Desktop/Vehicle (DV) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char BACK_MIC_recording[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:1-mic Desktop/Vehicle (DV) */ + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x01, /* SetAlgorithmParm, 0x0001:Yes */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No Suppression */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x06, /* SetDigitalOutputGain, 0x00:Tx, 0x06:(6 dB) */ +}; + +unsigned char vr_no_ns_receiver[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:2-mic Close Talk (CT) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x0C, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x1B,0x01,0x0C, /* SetDigitalInputGain, 0x01:Secondary Mic (Tx), 0x09:(12 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_no_ns_headset[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x03, /* SetAlgorithmParm, 0x0003:1M-DG (1-mic digital input) */ + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_no_ns_speaker[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:1-mic Desktop/Vehicle (DV) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x0C, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_no_ns_bt[] = { + 0x80,0x26,0x00,0x06, /* SelectRouting, 0x0006:Snk,Snk,Fei,Pri - Zro,Csp,Feo (PCM0->PCM1+ADCs) */ + 0x80,0x1C,0x00,0x00, /* VoiceProcessingOn, 0x0000:No */ + 0x80,0x1B,0x00,0x00, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x00:(0 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_ns_receiver[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:2-mic Close Talk (CT) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x04, /* SetAlgorithmParm, 0x0004:20dB Max Suppression */ + 0x80,0x1B,0x00,0x0C, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x1B,0x01,0x0C, /* SetDigitalInputGain, 0x01:Secondary Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_ns_headset[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x03, /* SetAlgorithmParm, 0x0003:1-mic External (MD) */ + 0x80,0x26,0x00,0x15, /* SelectRouting, 0x0015:Snk,Pri,Snk,Snk - Csp,Zro,Zro (none) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:20dB Max Suppression */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x1B,0x00,0x12, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x12:(18 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_ns_speaker[] = { + 0x80,0x17,0x00,0x02, /* SetAlgorithmParmID, 0x0002:Microphone Configuration */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:1-mic Desktop/Vehicle (DV) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x04, /* SetAlgorithmParm, 0x0004:20dB Max Suppression */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x1B,0x00,0x0C, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x0C:(12 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char vr_ns_bt[] = { + 0x80,0x26,0x00,0x06, /* SelectRouting, 0x0006:Snk,Snk,Fei,Pri - Zro,Csp,Feo (PCM0->PCM1+ADCs) */ + 0x80,0x1C,0x00,0x01, /* VoiceProcessingOn, 0x0001:Yes */ + 0x80,0x17,0x00,0x00, /* SetAlgorithmParmID, 0x0000:Suppression Strength */ + 0x80,0x18,0x00,0x02, /* SetAlgorithmParm, 0x0002:20dB Max Suppression */ + 0x80,0x17,0x00,0x04, /* SetAlgorithmParmID, 0x0004:Use AGC */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x17,0x00,0x1A, /* SetAlgorithmParmID, 0x001A:Use ComfortNoise */ + 0x80,0x18,0x00,0x00, /* SetAlgorithmParm, 0x0000:No */ + 0x80,0x1B,0x00,0x00, /* SetDigitalInputGain, 0x00:Primay Mic (Tx), 0x00:(0 dB) */ + 0x80,0x15,0x00,0x00, /* SetDigitalOutputGain, 0x00:Tx, 0x00:(0 dB) */ +}; + +unsigned char suspend_mode[] = { + 0x80,0x10,0x00,0x01 +}; + +static ssize_t chk_wakeup_a1026(void) +{ + int rc = 0, retry = 3; + + if (a1026_suspended == 1) { + /* Enable A1026 clock */ + if (control_a1026_clk) { + gpio_set_value(pdata->gpio_a1026_clk, 1); + mdelay(1); + } + + gpio_set_value(pdata->gpio_a1026_wakeup, 0); + msleep(120); + + do { + rc = execute_cmdmsg(A100_msg_Sync); + } while ((rc < 0) && --retry); + + gpio_set_value(pdata->gpio_a1026_wakeup, 1); + if (rc < 0) { + pr_err("%s: failed (%d)\n", __func__, rc); + goto wakeup_sync_err; + } + + a1026_suspended = 0; + } +wakeup_sync_err: + return rc; +} + +/* Filter commands according to noise suppression state forced by + * A1026_SET_NS_STATE ioctl. + * + * For this function to operate properly, all configurations must include + * both A100_msg_Bypass and Mic_Config commands even if default values + * are selected or if Mic_Config is useless because VP is off + */ +int a1026_filter_vp_cmd(int cmd, int mode) +{ + int msg = (cmd >> 16) & 0xFFFF; + int filtered_cmd = cmd; + + if (a1026_NS_state == A1026_NS_STATE_AUTO) + return cmd; + + switch (msg) { + case A100_msg_Bypass: + if (a1026_NS_state == A1026_NS_STATE_OFF) + filtered_cmd = A1026_msg_VP_OFF; + else + filtered_cmd = A1026_msg_VP_ON; + break; + case A100_msg_SetAlgorithmParmID: + a1026_param_ID = cmd & 0xFFFF; + break; + case A100_msg_SetAlgorithmParm: + if (a1026_param_ID == Mic_Config) { + if (a1026_NS_state == A1026_NS_STATE_CT) + filtered_cmd = (msg << 16); + else if (a1026_NS_state == A1026_NS_STATE_FT) + filtered_cmd = (msg << 16) | 0x0002; + } + break; + default: + if (mode == A1026_CONFIG_VP) + filtered_cmd = -1; + break; + } + + pr_info("%s: %x filtered = %x, a1026_NS_state %d, mode %d\n", __func__, + cmd, filtered_cmd, a1026_NS_state, mode); + + return filtered_cmd; +} + +int a1026_set_config(char newid, int mode) +{ + int i = 0, rc = 0, size = 0; + int number_of_cmd_sets, rd_retry_cnt; + unsigned int sw_reset = 0; + unsigned char *i2c_cmds; + unsigned char *index = 0; + unsigned char ack_buf[A1026_CMD_FIFO_DEPTH * 4]; + unsigned char rdbuf[4]; + + if ((a1026_suspended) && (newid == A1026_PATH_SUSPEND)) + return rc; + + rc = chk_wakeup_a1026(); + if (rc < 0) + return rc; + + sw_reset = ((A100_msg_Reset << 16) | RESET_IMMEDIATE); + + switch (newid) { + case A1026_PATH_INCALL_RECEIVER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = phonecall_receiver; + size = sizeof(phonecall_receiver); + break; + case A1026_PATH_INCALL_HEADSET: + gpio_set_value(pdata->gpio_a1026_micsel, 1); + i2c_cmds = phonecall_headset; + size = sizeof(phonecall_headset); + break; + case A1026_PATH_INCALL_SPEAKER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = phonecall_speaker; + size = sizeof(phonecall_speaker); + break; + case A1026_PATH_INCALL_BT: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = phonecall_bt; + size = sizeof(phonecall_bt); + break; + case A1026_PATH_INCALL_TTY: + gpio_set_value(pdata->gpio_a1026_micsel, 1); + i2c_cmds = phonecall_tty; + size = sizeof(phonecall_tty); + break; + case A1026_PATH_VR_NO_NS_RECEIVER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_no_ns_receiver; + size = sizeof(vr_no_ns_receiver); + break; + case A1026_PATH_VR_NO_NS_HEADSET: + gpio_set_value(pdata->gpio_a1026_micsel, 1); + i2c_cmds = vr_no_ns_headset; + size = sizeof(vr_no_ns_headset); + break; + case A1026_PATH_VR_NO_NS_SPEAKER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_no_ns_speaker; + size = sizeof(vr_no_ns_speaker); + break; + case A1026_PATH_VR_NO_NS_BT: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_no_ns_bt; + size = sizeof(vr_no_ns_bt); + break; + case A1026_PATH_VR_NS_RECEIVER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_ns_receiver; + size = sizeof(vr_ns_receiver); + break; + case A1026_PATH_VR_NS_HEADSET: + gpio_set_value(pdata->gpio_a1026_micsel, 1); + i2c_cmds = vr_ns_headset; + size = sizeof(vr_ns_headset); + break; + case A1026_PATH_VR_NS_SPEAKER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_ns_speaker; + size = sizeof(vr_ns_speaker); + break; + case A1026_PATH_VR_NS_BT: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = vr_ns_bt; + size = sizeof(vr_ns_bt); + break; + case A1026_PATH_RECORD_RECEIVER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = INT_MIC_recording_receiver; + size = sizeof(INT_MIC_recording_receiver); + break; + case A1026_PATH_RECORD_HEADSET: + gpio_set_value(pdata->gpio_a1026_micsel, 1); + i2c_cmds = EXT_MIC_recording; + size = sizeof(EXT_MIC_recording); + break; + case A1026_PATH_RECORD_SPEAKER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = INT_MIC_recording_speaker; + size = sizeof(INT_MIC_recording_speaker); + break; + case A1026_PATH_RECORD_BT: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = phonecall_bt; + size = sizeof(phonecall_bt); + break; + case A1026_PATH_SUSPEND: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = (unsigned char *)suspend_mode; + size = sizeof(suspend_mode); + break; + case A1026_PATH_CAMCORDER: + gpio_set_value(pdata->gpio_a1026_micsel, 0); + i2c_cmds = BACK_MIC_recording; + size = sizeof(BACK_MIC_recording); + break; + default: + pr_err("%s: invalid cmd %d\n", __func__, newid); + rc = -1; + goto input_err; + break; + } + + a1026_current_config = newid; + pr_info("%s: change to mode %d\n", __func__, newid); + + pr_info("%s: block write start (size = %d)\n", __func__, size); +#if DEBUG + for (i = 1; i <= size; i++) { + pr_info("%x ", *(i2c_cmds + i - 1)); + if ( !(i % 4)) + pr_info("\n"); + } +#endif + + rc = a1026_i2c_write(i2c_cmds, size); + if (rc < 0) { + pr_err("A1026 CMD block write error!\n"); + a1026_i2c_sw_reset(sw_reset); + return rc; + } + pr_info("%s: block write end\n", __func__); + + /* Don't need to get Ack after sending out a suspend command */ + if (*i2c_cmds == 0x80 && *(i2c_cmds + 1) == 0x10 + && *(i2c_cmds + 2) == 0x00 && *(i2c_cmds + 3) == 0x01) { + a1026_suspended = 1; + /* Disable A1026 clock */ + msleep(120); + if (control_a1026_clk) + gpio_set_value(pdata->gpio_a1026_clk, 0); + return rc; + } + + memset(ack_buf, 0, sizeof(ack_buf)); + msleep(20); + pr_info("%s: CMD ACK block read start\n", __func__); + rc = a1026_i2c_read(ack_buf, size); + if (rc < 0) { + pr_err("%s: CMD ACK block read error\n", __func__); + a1026_i2c_sw_reset(sw_reset); + return rc; + } else { + pr_info("%s: CMD ACK block read end\n", __func__); +#if DEBUG + for (i = 1; i <= size; i++) { + pr_info("%x ", ack_buf[i-1]); + if ( !(i % 4)) + pr_info("\n"); + } +#endif + index = ack_buf; + number_of_cmd_sets = size / 4; + do { + if (*index == 0x00) { + rd_retry_cnt = POLLING_RETRY_CNT; +rd_retry: + if (rd_retry_cnt--) { + memset(rdbuf, 0, sizeof(rdbuf)); + rc = a1026_i2c_read(rdbuf, 4); + if (rc < 0) + return rc; +#if DEBUG + for (i = 0; i < sizeof(rdbuf); i++) { + pr_info("0x%x\n", rdbuf[i]); + } + pr_info("-----------------\n"); +#endif + if (rdbuf[0] == 0x00) { + msleep(20); + goto rd_retry; + } + } else { + pr_err("%s: CMD ACK Not Ready\n", + __func__); + return -EBUSY; + } + } else if (*index == 0xff) { /* illegal cmd */ + return -ENOEXEC; + } else if (*index == 0x80) { + index += 4; + } + } while (--number_of_cmd_sets); + } +input_err: + return rc; +} + +int execute_cmdmsg(unsigned int msg) +{ + int rc = 0; + int retries, pass = 0; + unsigned char msgbuf[4]; + unsigned char chkbuf[4]; + unsigned int sw_reset = 0; + + sw_reset = ((A100_msg_Reset << 16) | RESET_IMMEDIATE); + + msgbuf[0] = (msg >> 24) & 0xFF; + msgbuf[1] = (msg >> 16) & 0xFF; + msgbuf[2] = (msg >> 8) & 0xFF; + msgbuf[3] = msg & 0xFF; + + memcpy(chkbuf, msgbuf, 4); + + rc = a1026_i2c_write(msgbuf, 4); + if (rc < 0) { + pr_err("%s: error %d\n", __func__, rc); + a1026_i2c_sw_reset(sw_reset); + return rc; + } + + /* We don't need to get Ack after sending out a suspend command */ + if (msg == A100_msg_Sleep) + return rc; + + retries = POLLING_RETRY_CNT; + while (retries--) { + rc = 0; + + msleep(20); /* use polling */ + memset(msgbuf, 0, sizeof(msgbuf)); + rc = a1026_i2c_read(msgbuf, 4); + if (rc < 0) { + pr_err("%s: ack-read error %d (%d retries)\n", __func__, + rc, retries); + continue; + } + + if (msgbuf[0] == 0x80 && msgbuf[1] == chkbuf[1]) { + pass = 1; + break; + } else if (msgbuf[0] == 0xff && msgbuf[1] == 0xff) { + pr_err("%s: illegal cmd %08x\n", __func__, msg); + rc = -EINVAL; + break; + } else if ( msgbuf[0] == 0x00 && msgbuf[1] == 0x00 ) { + pr_info("%s: not ready (%d retries)\n", __func__, + retries); + rc = -EBUSY; + } else { + pr_info("%s: cmd/ack mismatch: (%d retries left)\n", + __func__, + retries); +#if DEBUG + pr_info("%s: msgbuf[0] = %x\n", __func__, msgbuf[0]); + pr_info("%s: msgbuf[1] = %x\n", __func__, msgbuf[1]); + pr_info("%s: msgbuf[2] = %x\n", __func__, msgbuf[2]); + pr_info("%s: msgbuf[3] = %x\n", __func__, msgbuf[3]); +#endif + rc = -EBUSY; + } + } + + if (!pass) { + pr_err("%s: failed execute cmd %08x (%d)\n", __func__, + msg, rc); + a1026_i2c_sw_reset(sw_reset); + } + return rc; +} + +#if ENABLE_DIAG_IOCTLS +static int a1026_set_mic_state(char miccase) +{ + int rc = 0; + unsigned int cmd_msg = 0; + + switch (miccase) { + case 1: /* Mic-1 ON / Mic-2 OFF */ + cmd_msg = 0x80260007; + break; + case 2: /* Mic-1 OFF / Mic-2 ON */ + cmd_msg = 0x80260015; + break; + case 3: /* both ON */ + cmd_msg = 0x80260001; + break; + case 4: /* both OFF */ + cmd_msg = 0x80260006; + break; + default: + pr_info("%s: invalid input %d\n", __func__, miccase); + rc = -EINVAL; + break; + } + rc = execute_cmdmsg(cmd_msg); + return rc; +} + +static int exe_cmd_in_file(unsigned char *incmd) +{ + int rc = 0; + int i = 0; + unsigned int cmd_msg = 0; + unsigned char tmp = 0; + + for (i = 0; i < 4; i++) { + tmp = *(incmd + i); + cmd_msg |= (unsigned int)tmp; + if (i != 3) + cmd_msg = cmd_msg << 8; + } + rc = execute_cmdmsg(cmd_msg); + if (rc < 0) + pr_err("%s: cmd %08x error %d\n", __func__, cmd_msg, rc); + return rc; +} +#endif /* ENABLE_DIAG_IOCTLS */ + +static long +a1026_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct a1026img img; + int rc = 0; +#if ENABLE_DIAG_IOCTLS + char msg[4]; + int mic_cases = 0; + int mic_sel = 0; +#endif + int pathid = 0; + unsigned int ns_state; + + mutex_lock(&a1026_lock); + switch (cmd) { + case A1026_BOOTUP_INIT: + img.buf = 0; + img.img_size = 0; + if (copy_from_user(&img, argp, sizeof(img))) { + rc = -EFAULT; + goto out; + } + rc = a1026_bootup_init(file, &img); + break; + case A1026_SET_CONFIG: + if (copy_from_user(&pathid, argp, sizeof(pathid))) { + rc = -EFAULT; + goto out; + } + if (pathid < 0 || pathid >= A1026_PATH_MAX) { + rc = -EINVAL; + goto out; + } + rc = a1026_set_config(pathid, A1026_CONFIG_FULL); + if (rc < 0) + pr_err("%s: A1026_SET_CONFIG (%d) error %d!\n", + __func__, pathid, rc); + break; + case A1026_SET_NS_STATE: + if (copy_from_user(&ns_state, argp, sizeof(ns_state))) { + rc = -EFAULT; + goto out; + } + pr_info("%s: set noise suppression %d\n", __func__, ns_state); + if (ns_state < 0 || ns_state >= A1026_NS_NUM_STATES) { + rc = -EINVAL; + goto out; + } + a1026_NS_state = ns_state; + if (!a1026_suspended) + a1026_set_config(a1026_current_config, + A1026_CONFIG_VP); + break; +#if ENABLE_DIAG_IOCTLS + case A1026_SET_MIC_ONOFF: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + if (copy_from_user(&mic_cases, argp, sizeof(mic_cases))) { + rc = -EFAULT; + goto out; + } + rc = a1026_set_mic_state(mic_cases); + if (rc < 0) + pr_err("%s: A1026_SET_MIC_ONOFF %d error %d!\n", + __func__, mic_cases, rc); + break; + case A1026_SET_MICSEL_ONOFF: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + if (copy_from_user(&mic_sel, argp, sizeof(mic_sel))) { + rc = -EFAULT; + goto out; + } + gpio_set_value(pdata->gpio_a1026_micsel, !!mic_sel); + rc = 0; + break; + case A1026_READ_DATA: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + rc = a1026_i2c_read(msg, 4); + if (copy_to_user(argp, &msg, 4)) { + rc = -EFAULT; + goto out; + } + break; + case A1026_WRITE_MSG: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + if (copy_from_user(msg, argp, sizeof(msg))) { + rc = -EFAULT; + goto out; + } + rc = a1026_i2c_write(msg, 4); + break; + case A1026_SYNC_CMD: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + msg[0] = 0x80; + msg[1] = 0x00; + msg[2] = 0x00; + msg[3] = 0x00; + rc = a1026_i2c_write(msg, 4); + break; + case A1026_SET_CMD_FILE: + rc = chk_wakeup_a1026(); + if (rc < 0) + goto out; + if (copy_from_user(msg, argp, sizeof(msg))) { + rc = -EFAULT; + goto out; + } + rc = exe_cmd_in_file(msg); + break; +#endif /* ENABLE_DIAG_IOCTLS */ + default: + pr_err("%s: invalid command %d\n", __func__, _IOC_NR(cmd)); + rc = -EINVAL; + break; + } + +out: + mutex_unlock(&a1026_lock); + return rc; +} + +static const struct file_operations a1026_fops = { + .owner = THIS_MODULE, + .open = a1026_open, + .release = a1026_release, + .unlocked_ioctl = a1026_ioctl, +}; + +static struct miscdevice a1026_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "audience_a1026", + .fops = &a1026_fops, +}; + +static int a1026_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + int rc = 0; + + pdata = client->dev.platform_data; + + if (pdata == NULL) { + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (pdata == NULL) { + rc = -ENOMEM; + pr_err("%s: platform data is NULL\n", __func__); + goto err_alloc_data_failed; + } + } + + this_client = client; + + rc = gpio_request(pdata->gpio_a1026_clk, "a1026"); + if (rc < 0) { + control_a1026_clk = 0; + goto chk_gpio_micsel; + } + control_a1026_clk = 1; + + rc = gpio_direction_output(pdata->gpio_a1026_clk, 1); + if (rc < 0) { + pr_err("%s: request clk gpio direction failed\n", __func__); + goto err_free_gpio_clk; + } + +chk_gpio_micsel: + rc = gpio_request(pdata->gpio_a1026_micsel, "a1026"); + if (rc < 0) { + pr_err("%s: gpio request mic_sel pin failed\n", __func__); + goto err_free_gpio_micsel; + } + + rc = gpio_direction_output(pdata->gpio_a1026_micsel, 1); + if (rc < 0) { + pr_err("%s: request mic_sel gpio direction failed\n", __func__); + goto err_free_gpio_micsel; + } + + rc = gpio_request(pdata->gpio_a1026_wakeup, "a1026"); + if (rc < 0) { + pr_err("%s: gpio request wakeup pin failed\n", __func__); + goto err_free_gpio; + } + + rc = gpio_direction_output(pdata->gpio_a1026_wakeup, 1); + if (rc < 0) { + pr_err("%s: request wakeup gpio direction failed\n", __func__); + goto err_free_gpio; + } + + rc = gpio_request(pdata->gpio_a1026_reset, "a1026"); + if (rc < 0) { + pr_err("%s: gpio request reset pin failed\n", __func__); + goto err_free_gpio; + } + + rc = gpio_direction_output(pdata->gpio_a1026_reset, 1); + if (rc < 0) { + pr_err("%s: request reset gpio direction failed\n", __func__); + goto err_free_gpio_all; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + pr_err("%s: i2c check functionality error\n", __func__); + rc = -ENODEV; + goto err_free_gpio_all; + } + + if (control_a1026_clk) + gpio_set_value(pdata->gpio_a1026_clk, 1); + gpio_set_value(pdata->gpio_a1026_micsel, 0); + gpio_set_value(pdata->gpio_a1026_wakeup, 1); + gpio_set_value(pdata->gpio_a1026_reset, 1); + + rc = misc_register(&a1026_device); + if (rc) { + pr_err("%s: a1026_device register failed\n", __func__); + goto err_free_gpio_all; + } + + return 0; + +err_free_gpio_all: + gpio_free(pdata->gpio_a1026_reset); +err_free_gpio: + gpio_free(pdata->gpio_a1026_wakeup); +err_free_gpio_micsel: + gpio_free(pdata->gpio_a1026_micsel); +err_free_gpio_clk: + if (control_a1026_clk) + gpio_free(pdata->gpio_a1026_clk); +err_alloc_data_failed: + return rc; +} + +static int a1026_remove(struct i2c_client *client) +{ + struct a1026_platform_data *p1026data = i2c_get_clientdata(client); + kfree(p1026data); + + return 0; +} + +static int a1026_suspend(struct i2c_client *client, pm_message_t mesg) +{ + return 0; +} + +static int a1026_resume(struct i2c_client *client) +{ + return 0; +} + +static const struct i2c_device_id a1026_id[] = { + { "audience_a1026", 0 }, + { } +}; + +static struct i2c_driver a1026_driver = { + .probe = a1026_probe, + .remove = a1026_remove, + .suspend = a1026_suspend, + .resume = a1026_resume, + .id_table = a1026_id, + .driver = { + .name = "audience_a1026", + }, +}; + +static int __init a1026_init(void) +{ + pr_info("%s\n", __func__); + mutex_init(&a1026_lock); + + return i2c_add_driver(&a1026_driver); +} + +static void __exit a1026_exit(void) +{ + i2c_del_driver(&a1026_driver); +} + +module_init(a1026_init); +module_exit(a1026_exit); + +MODULE_DESCRIPTION("A1026 voice processor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/akm8973.c b/drivers/misc/akm8973.c new file mode 100644 index 0000000000000..3e756fcee0d80 --- /dev/null +++ b/drivers/misc/akm8973.c @@ -0,0 +1,777 @@ +/* + * drivers/i2c/chips/akm8973.c - akm8973 compass driver + * + * Copyright (C) 2008-2009 HTC Corporation. + * Author: viral wang + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 0 +#define MAX_FAILURE_COUNT 3 + +static struct i2c_client *this_client; + +struct akm8973_data { + struct input_dev *input_dev; + struct work_struct work; +}; + +/* Addresses to scan -- protected by sense_data_mutex */ +static char sense_data[RBUFF_SIZE + 1]; +static struct mutex sense_data_mutex; +#define AKM8973_RETRY_COUNT 10 +static DECLARE_WAIT_QUEUE_HEAD(data_ready_wq); +static DECLARE_WAIT_QUEUE_HEAD(open_wq); + +static atomic_t data_ready; +static atomic_t open_count; +static atomic_t open_flag; +static atomic_t reserve_open_flag; + +static atomic_t m_flag; +static atomic_t a_flag; +static atomic_t t_flag; +static atomic_t mv_flag; + +static int failure_count = 0; + +static short akmd_delay = 0; + +static struct akm8973_platform_data *pdata; + +static int AKI2C_RxData(char *rxData, int length) +{ + uint8_t loop_i; + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = 1, + .buf = rxData, + }, + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + for (loop_i = 0; loop_i < AKM8973_RETRY_COUNT; loop_i++) { + if (i2c_transfer(this_client->adapter, msgs, 2) > 0) { + break; + } + mdelay(10); + } + + if (loop_i >= AKM8973_RETRY_COUNT) { + printk(KERN_ERR "%s retry over %d\n", __func__, AKM8973_RETRY_COUNT); + return -EIO; + } + return 0; +} + +static int AKI2C_TxData(char *txData, int length) +{ + uint8_t loop_i; + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + for (loop_i = 0; loop_i < AKM8973_RETRY_COUNT; loop_i++) { + if (i2c_transfer(this_client->adapter, msg, 1) > 0) { + break; + } + mdelay(10); + } + + if (loop_i >= AKM8973_RETRY_COUNT) { + printk(KERN_ERR "%s retry over %d\n", __func__, AKM8973_RETRY_COUNT); + return -EIO; + } + return 0; +} + +static void AKECS_Reset(void) +{ + gpio_set_value(pdata->reset, 0); + udelay(120); + gpio_set_value(pdata->reset, 1); +} + +static int AKECS_StartMeasure(void) +{ + char buffer[2]; + + atomic_set(&data_ready, 0); + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_MEASURE; + + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_PowerDown(void) +{ + char buffer[2]; + int ret; + + /* Set powerdown mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_POWERDOWN; + /* Set data */ + ret = AKI2C_TxData(buffer, 2); + if (ret < 0) + return ret; + + /* Dummy read for clearing INT pin */ + buffer[0] = AKECS_REG_TMPS; + /* Read data */ + ret = AKI2C_RxData(buffer, 1); + if (ret < 0) + return ret; + return ret; +} + +static int AKECS_StartE2PRead(void) +{ + char buffer[2]; + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_E2P_READ; + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_GetData(void) +{ + char buffer[RBUFF_SIZE + 1]; + int ret; + + memset(buffer, 0, RBUFF_SIZE + 1); + buffer[0] = AKECS_REG_ST; + ret = AKI2C_RxData(buffer, RBUFF_SIZE+1); + if (ret < 0) + return ret; + + mutex_lock(&sense_data_mutex); + memcpy(sense_data, buffer, sizeof(buffer)); + atomic_set(&data_ready, 1); + wake_up(&data_ready_wq); + mutex_unlock(&sense_data_mutex); + + return 0; +} + +static int AKECS_SetMode(char mode) +{ + int ret; + + switch (mode) { + case AKECS_MODE_MEASURE: + ret = AKECS_StartMeasure(); + break; + case AKECS_MODE_E2P_READ: + ret = AKECS_StartE2PRead(); + break; + case AKECS_MODE_POWERDOWN: + ret = AKECS_PowerDown(); + break; + default: + return -EINVAL; + } + + /* wait at least 300us after changing mode */ + mdelay(1); + return ret; +} + +static int AKECS_TransRBuff(char *rbuf, int size) +{ + wait_event_interruptible_timeout(data_ready_wq, + atomic_read(&data_ready), 1000); + if (!atomic_read(&data_ready)) { + /* Ignore data errors if there are no open handles */ + if (atomic_read(&open_count) > 0) { + printk(KERN_ERR + "AKM8973 AKECS_TransRBUFF: Data not ready\n"); + failure_count++; + if (failure_count >= MAX_FAILURE_COUNT) { + printk(KERN_ERR + "AKM8973 AKECS_TransRBUFF: successive %d failure.\n", + failure_count); + atomic_set(&open_flag, -1); + wake_up(&open_wq); + failure_count = 0; + } + } + return -1; + } + + mutex_lock(&sense_data_mutex); + memcpy(&rbuf[1], &sense_data[1], size); + atomic_set(&data_ready, 0); + mutex_unlock(&sense_data_mutex); + + failure_count = 0; + return 0; +} + + +static void AKECS_Report_Value(short *rbuf) +{ + struct akm8973_data *data = i2c_get_clientdata(this_client); +#if DEBUG + printk(KERN_INFO"AKECS_Report_Value: yaw = %d, pitch = %d, roll = %d\n", rbuf[0], + rbuf[1], rbuf[2]); + printk(KERN_INFO" tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], + rbuf[4], rbuf[5]); + printk(KERN_INFO" G_Sensor: x = %d LSB, y = %d LSB, z = %d LSB\n", + rbuf[6], rbuf[7], rbuf[8]); +#endif + /* Report magnetic sensor information */ + if (atomic_read(&m_flag)) { + input_report_abs(data->input_dev, ABS_RX, rbuf[0]); + input_report_abs(data->input_dev, ABS_RY, rbuf[1]); + input_report_abs(data->input_dev, ABS_RZ, rbuf[2]); + input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]); + } + + /* Report acceleration sensor information */ + if (atomic_read(&a_flag)) { + input_report_abs(data->input_dev, ABS_X, rbuf[6]); + input_report_abs(data->input_dev, ABS_Y, rbuf[7]); + input_report_abs(data->input_dev, ABS_Z, rbuf[8]); + input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]); + } + + /* Report temperature information */ + if (atomic_read(&t_flag)) + input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]); + + if (atomic_read(&mv_flag)) { + input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]); + input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]); + input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]); + } + + input_sync(data->input_dev); +} + +static DEFINE_MUTEX(akmd_lock); + +static int AKECS_GetOpenStatus(void) +{ + mutex_unlock(&akmd_lock); + wait_event_interruptible(open_wq, (atomic_read(&open_flag) != 0)); + mutex_lock(&akmd_lock); + return atomic_read(&open_flag); +} + +static int AKECS_GetCloseStatus(void) +{ + mutex_unlock(&akmd_lock); + wait_event_interruptible(open_wq, (atomic_read(&open_flag) <= 0)); + mutex_lock(&akmd_lock); + return atomic_read(&open_flag); +} + +static void AKECS_CloseDone(void) +{ + mutex_lock(&akmd_lock); + atomic_set(&m_flag, 1); + atomic_set(&a_flag, 1); + atomic_set(&t_flag, 1); + atomic_set(&mv_flag, 1); + mutex_unlock(&akmd_lock); +} + +static int akm_aot_open(struct inode *inode, struct file *file) +{ + int ret = -1; + mutex_lock(&akmd_lock); + if (atomic_cmpxchg(&open_count, 0, 1) == 0) { + if (atomic_cmpxchg(&open_flag, 0, 1) == 0) { + atomic_set(&reserve_open_flag, 1); + enable_irq(this_client->irq); + wake_up(&open_wq); + ret = 0; + } + } + mutex_unlock(&akmd_lock); + return ret; +} + +static int akm_aot_release(struct inode *inode, struct file *file) +{ + mutex_lock(&akmd_lock); + atomic_set(&reserve_open_flag, 0); + atomic_set(&open_flag, 0); + atomic_set(&open_count, 0); + wake_up(&open_wq); + disable_irq(this_client->irq); + mutex_unlock(&akmd_lock); + return 0; +} + +static long +akm_aot_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + short flag; + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + case ECS_IOCTL_APP_SET_AFLAG: + case ECS_IOCTL_APP_SET_TFLAG: + case ECS_IOCTL_APP_SET_MVFLAG: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + if (flag < 0 || flag > 1) + return -EINVAL; + break; + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&akmd_lock); + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + atomic_set(&m_flag, flag); + break; + case ECS_IOCTL_APP_GET_MFLAG: + flag = atomic_read(&m_flag); + break; + case ECS_IOCTL_APP_SET_AFLAG: + atomic_set(&a_flag, flag); + break; + case ECS_IOCTL_APP_GET_AFLAG: + flag = atomic_read(&a_flag); + break; + case ECS_IOCTL_APP_SET_TFLAG: + atomic_set(&t_flag, flag); + break; + case ECS_IOCTL_APP_GET_TFLAG: + flag = atomic_read(&t_flag); + break; + case ECS_IOCTL_APP_SET_MVFLAG: + atomic_set(&mv_flag, flag); + break; + case ECS_IOCTL_APP_GET_MVFLAG: + flag = atomic_read(&mv_flag); + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + mutex_unlock(&akmd_lock); + return -ENOTTY; + } + mutex_unlock(&akmd_lock); + + switch (cmd) { + case ECS_IOCTL_APP_GET_MFLAG: + case ECS_IOCTL_APP_GET_AFLAG: + case ECS_IOCTL_APP_GET_TFLAG: + case ECS_IOCTL_APP_GET_MVFLAG: + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akmd_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + +static int akmd_release(struct inode *inode, struct file *file) +{ + AKECS_CloseDone(); + return 0; +} + +static long +akmd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + + void __user *argp = (void __user *)arg; + + char msg[RBUFF_SIZE + 1], rwbuf[5]; + int ret = -1, status; + short mode, value[12], delay; + char project_name[64]; + short layouts[4][3][3]; + int i, j, k; + + switch (cmd) { + case ECS_IOCTL_WRITE: + case ECS_IOCTL_READ: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) + return -EFAULT; + break; + case ECS_IOCTL_SET_YPR: + if (copy_from_user(&value, argp, sizeof(value))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&akmd_lock); + switch (cmd) { + case ECS_IOCTL_WRITE: + if (rwbuf[0] < 2) { + ret = -EINVAL; + goto err; + } + ret = AKI2C_TxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + goto err; + break; + case ECS_IOCTL_READ: + if (rwbuf[0] < 1) { + ret = -EINVAL; + goto err; + } + ret = AKI2C_RxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + goto err; + break; + case ECS_IOCTL_RESET: + AKECS_Reset(); + break; + case ECS_IOCTL_SET_MODE: + ret = AKECS_SetMode((char)mode); + if (ret < 0) + goto err; + break; + case ECS_IOCTL_GETDATA: + ret = AKECS_TransRBuff(msg, RBUFF_SIZE); + if (ret < 0) + goto err; + break; + case ECS_IOCTL_SET_YPR: + AKECS_Report_Value(value); + break; + case ECS_IOCTL_GET_OPEN_STATUS: + status = AKECS_GetOpenStatus(); + break; + case ECS_IOCTL_GET_CLOSE_STATUS: + status = AKECS_GetCloseStatus(); + break; + case ECS_IOCTL_GET_DELAY: + delay = akmd_delay; + break; + case ECS_IOCTL_GET_PROJECT_NAME: + strncpy(project_name, pdata->project_name, 64); + break; + case ECS_IOCTL_GET_MATRIX: + for (i = 0; i < 4; i++) + for (j = 0; j < 3; j++) + for (k = 0; k < 3; k++) { + layouts[i][j][k] = pdata->layouts[i][j][k]; + } + break; + default: + ret = -ENOTTY; + goto err; + } + mutex_unlock(&akmd_lock); + + switch (cmd) { + case ECS_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_GETDATA: + if (copy_to_user(argp, &msg, sizeof(msg))) + return -EFAULT; + break; + case ECS_IOCTL_GET_OPEN_STATUS: + case ECS_IOCTL_GET_CLOSE_STATUS: + if (copy_to_user(argp, &status, sizeof(status))) + return -EFAULT; + break; + case ECS_IOCTL_GET_DELAY: + if (copy_to_user(argp, &delay, sizeof(delay))) + return -EFAULT; + break; + case ECS_IOCTL_GET_PROJECT_NAME: + if (copy_to_user(argp, project_name, sizeof(project_name))) + return -EFAULT; + break; + case ECS_IOCTL_GET_MATRIX: + if (copy_to_user(argp, layouts, sizeof(layouts))) + return -EFAULT; + break; + default: + break; + } + + return 0; + +err: + mutex_unlock(&akmd_lock); + return ret; +} + +static void akm_work_func(struct work_struct *work) +{ + if (AKECS_GetData() < 0) + printk(KERN_ERR "AKM8973 akm_work_func: Get data failed\n"); + enable_irq(this_client->irq); +} + +static irqreturn_t akm8973_interrupt(int irq, void *dev_id) +{ + struct akm8973_data *data = dev_id; + disable_irq_nosync(this_client->irq); + schedule_work(&data->work); + return IRQ_HANDLED; +} + +static struct file_operations akmd_fops = { + .owner = THIS_MODULE, + .open = akmd_open, + .release = akmd_release, + .unlocked_ioctl = akmd_ioctl, +}; + +static struct file_operations akm_aot_fops = { + .owner = THIS_MODULE, + .open = akm_aot_open, + .release = akm_aot_release, + .unlocked_ioctl = akm_aot_ioctl, +}; + + +static struct miscdevice akm_aot_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8973_aot", + .fops = &akm_aot_fops, +}; + + +static struct miscdevice akmd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8973_daemon", + .fops = &akmd_fops, +}; + +int akm8973_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct akm8973_data *akm; + int err = 0; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit_check_functionality_failed; + } + + akm = kzalloc(sizeof(struct akm8973_data), GFP_KERNEL); + if (!akm) { + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + INIT_WORK(&akm->work, akm_work_func); + i2c_set_clientdata(client, akm); + + pdata = client->dev.platform_data; + if (pdata == NULL) { + printk(KERN_ERR"AKM8973 akm8973_probe: platform data is NULL\n"); + goto exit_platform_data_null; + } + this_client = client; + + err = AKECS_PowerDown(); + if (err < 0) { + printk(KERN_ERR"AKM8973 akm8973_probe: set power down mode error\n"); + goto exit_set_mode_failed; + } + + err = request_irq(client->irq, akm8973_interrupt, IRQF_TRIGGER_HIGH, + "akm8973", akm); + disable_irq(this_client->irq); + + if (err < 0) { + printk(KERN_ERR"AKM8973 akm8973_probe: request irq failed\n"); + goto exit_irq_request_failed; + } + + akm->input_dev = input_allocate_device(); + + if (!akm->input_dev) { + err = -ENOMEM; + printk(KERN_ERR + "AKM8973 akm8973_probe: Failed to allocate input device\n"); + goto exit_input_dev_alloc_failed; + } + + set_bit(EV_ABS, akm->input_dev->evbit); + /* yaw */ + input_set_abs_params(akm->input_dev, ABS_RX, 0, 360, 0, 0); + /* pitch */ + input_set_abs_params(akm->input_dev, ABS_RY, -180, 180, 0, 0); + /* roll */ + input_set_abs_params(akm->input_dev, ABS_RZ, -90, 90, 0, 0); + /* x-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_X, -1872, 1872, 0, 0); + /* y-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Y, -1872, 1872, 0, 0); + /* z-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Z, -1872, 1872, 0, 0); + /* temparature */ + input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0); + /* status of magnetic sensor */ + input_set_abs_params(akm->input_dev, ABS_RUDDER, -32768, 3, 0, 0); + /* status of acceleration sensor */ + input_set_abs_params(akm->input_dev, ABS_WHEEL, -32768, 3, 0, 0); + /* step count */ + input_set_abs_params(akm->input_dev, ABS_GAS, 0, 65535, 0, 0); + /* x-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0X, -2048, 2032, 0, 0); + /* y-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0Y, -2048, 2032, 0, 0); + /* z-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_BRAKE, -2048, 2032, 0, 0); + + akm->input_dev->name = "compass"; + + err = input_register_device(akm->input_dev); + + if (err) { + printk(KERN_ERR + "AKM8973 akm8973_probe: Unable to register input device: %s\n", + akm->input_dev->name); + goto exit_input_register_device_failed; + } + + err = misc_register(&akmd_device); + if (err) { + printk(KERN_ERR "AKM8973 akm8973_probe: akmd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_aot_device); + if (err) { + printk(KERN_ERR + "AKM8973 akm8973_probe: akm_aot_device register failed\n"); + goto exit_misc_device_register_failed; + } + + mutex_init(&sense_data_mutex); + + init_waitqueue_head(&data_ready_wq); + init_waitqueue_head(&open_wq); + + /* As default, report all information */ + atomic_set(&m_flag, 1); + atomic_set(&a_flag, 1); + atomic_set(&t_flag, 1); + atomic_set(&mv_flag, 1); + + return 0; + +exit_misc_device_register_failed: +exit_input_register_device_failed: + input_free_device(akm->input_dev); +exit_input_dev_alloc_failed: + free_irq(client->irq, akm); +exit_irq_request_failed: +exit_set_mode_failed: +exit_platform_data_null: + kfree(akm); +exit_alloc_data_failed: +exit_check_functionality_failed: + return err; + +} + +static int akm8973_remove(struct i2c_client *client) +{ + struct akm8973_data *akm = i2c_get_clientdata(client); + free_irq(client->irq, akm); + input_unregister_device(akm->input_dev); + kfree(akm); + return 0; +} +static const struct i2c_device_id akm8973_id[] = { + { AKM8973_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver akm8973_driver = { + .probe = akm8973_probe, + .remove = akm8973_remove, + .id_table = akm8973_id, + .driver = { + .name = AKM8973_I2C_NAME, + }, +}; + +static int __init akm8973_init(void) +{ + printk(KERN_INFO "AKM8973 compass driver: init\n"); + return i2c_add_driver(&akm8973_driver); +} + +static void __exit akm8973_exit(void) +{ + i2c_del_driver(&akm8973_driver); +} + +module_init(akm8973_init); +module_exit(akm8973_exit); + +MODULE_AUTHOR("viral wang "); +MODULE_DESCRIPTION("AKM8973 compass driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/akm8973_htc.c b/drivers/misc/akm8973_htc.c new file mode 100644 index 0000000000000..fe86f97b91d12 --- /dev/null +++ b/drivers/misc/akm8973_htc.c @@ -0,0 +1,795 @@ +/* drivers/i2c/chips/akm8973.c - akm8973 compass driver + * + * Copyright (C) 2008-2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 0 +#define MAX_FAILURE_COUNT 3 + +static struct i2c_client *this_client; + +struct akm8973_data { + struct input_dev *input_dev; + struct work_struct work; + struct early_suspend early_suspend_akm; +}; + +/* Addresses to scan -- protected by sense_data_mutex */ +static char sense_data[RBUFF_SIZE + 1]; +static struct mutex sense_data_mutex; +#define AKM8973_RETRY_COUNT 10 +static DECLARE_WAIT_QUEUE_HEAD(data_ready_wq); +static DECLARE_WAIT_QUEUE_HEAD(open_wq); + +static atomic_t data_ready; +static atomic_t open_count; +static atomic_t open_flag; +static atomic_t reserve_open_flag; + +static atomic_t m_flag; +static atomic_t a_flag; +static atomic_t t_flag; +static atomic_t mv_flag; + +static int failure_count = 0; + +static short akmd_delay = 0; + +static atomic_t suspend_flag = ATOMIC_INIT(0); + +static struct akm8973_platform_data *pdata; + +static int AKI2C_RxData(char *rxData, int length) +{ + uint8_t loop_i; + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = 1, + .buf = rxData, + }, + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + for (loop_i = 0; loop_i < AKM8973_RETRY_COUNT; loop_i++) { + if (i2c_transfer(this_client->adapter, msgs, 2) > 0) { + break; + } + mdelay(10); + } + + if (loop_i >= AKM8973_RETRY_COUNT) { + printk(KERN_ERR "%s retry over %d\n", __func__, AKM8973_RETRY_COUNT); + return -EIO; + } + return 0; +} + +static int AKI2C_TxData(char *txData, int length) +{ + uint8_t loop_i; + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + for (loop_i = 0; loop_i < AKM8973_RETRY_COUNT; loop_i++) { + if (i2c_transfer(this_client->adapter, msg, 1) > 0) { + break; + } + mdelay(10); + } + + if (loop_i >= AKM8973_RETRY_COUNT) { + printk(KERN_ERR "%s retry over %d\n", __func__, AKM8973_RETRY_COUNT); + return -EIO; + } + return 0; +} + +static void AKECS_Reset(void) +{ + gpio_set_value(pdata->reset, 0); + udelay(120); + gpio_set_value(pdata->reset, 1); +} + +static int AKECS_StartMeasure(void) +{ + char buffer[2]; + + atomic_set(&data_ready, 0); + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_MEASURE; + + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_PowerDown(void) +{ + char buffer[2]; + int ret; + + /* Set powerdown mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_POWERDOWN; + /* Set data */ + ret = AKI2C_TxData(buffer, 2); + if (ret < 0) + return ret; + + /* Dummy read for clearing INT pin */ + buffer[0] = AKECS_REG_TMPS; + /* Read data */ + ret = AKI2C_RxData(buffer, 1); + if (ret < 0) + return ret; + return ret; +} + +static int AKECS_StartE2PRead(void) +{ + char buffer[2]; + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_E2P_READ; + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_GetData(void) +{ + char buffer[RBUFF_SIZE + 1]; + int ret; + + memset(buffer, 0, RBUFF_SIZE + 1); + buffer[0] = AKECS_REG_ST; + ret = AKI2C_RxData(buffer, RBUFF_SIZE+1); + if (ret < 0) + return ret; + + mutex_lock(&sense_data_mutex); + memcpy(sense_data, buffer, sizeof(buffer)); + atomic_set(&data_ready, 1); + wake_up(&data_ready_wq); + mutex_unlock(&sense_data_mutex); + + return 0; +} + +static int AKECS_SetMode(char mode) +{ + int ret; + + switch (mode) { + case AKECS_MODE_MEASURE: + ret = AKECS_StartMeasure(); + break; + case AKECS_MODE_E2P_READ: + ret = AKECS_StartE2PRead(); + break; + case AKECS_MODE_POWERDOWN: + ret = AKECS_PowerDown(); + break; + default: + return -EINVAL; + } + + /* wait at least 300us after changing mode */ + mdelay(1); + return ret; +} + +static int AKECS_TransRBuff(char *rbuf, int size) +{ + wait_event_interruptible_timeout(data_ready_wq, + atomic_read(&data_ready), 1000); + if (!atomic_read(&data_ready)) { + if (!atomic_read(&suspend_flag)) { + printk(KERN_ERR + "AKM8973 AKECS_TransRBUFF: Data not ready\n"); + failure_count++; + if (failure_count >= MAX_FAILURE_COUNT) { + printk(KERN_ERR + "AKM8973 AKECS_TransRBUFF: successive %d failure.\n", + failure_count); + atomic_set(&open_flag, -1); + wake_up(&open_wq); + failure_count = 0; + } + } + return -1; + } + + mutex_lock(&sense_data_mutex); + memcpy(&rbuf[1], &sense_data[1], size); + atomic_set(&data_ready, 0); + mutex_unlock(&sense_data_mutex); + + failure_count = 0; + return 0; +} + + +static void AKECS_Report_Value(short *rbuf) +{ + struct akm8973_data *data = i2c_get_clientdata(this_client); +#if DEBUG + printk(KERN_INFO"AKECS_Report_Value: yaw = %d, pitch = %d, roll = %d\n", rbuf[0], + rbuf[1], rbuf[2]); + printk(KERN_INFO" tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], + rbuf[4], rbuf[5]); + printk(KERN_INFO" G_Sensor: x = %d LSB, y = %d LSB, z = %d LSB\n", + rbuf[6], rbuf[7], rbuf[8]); +#endif + /* Report magnetic sensor information */ + if (atomic_read(&m_flag)) { + input_report_abs(data->input_dev, ABS_RX, rbuf[0]); + input_report_abs(data->input_dev, ABS_RY, rbuf[1]); + input_report_abs(data->input_dev, ABS_RZ, rbuf[2]); + input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]); + } + + /* Report acceleration sensor information */ + if (atomic_read(&a_flag)) { + input_report_abs(data->input_dev, ABS_X, rbuf[6]); + input_report_abs(data->input_dev, ABS_Y, rbuf[7]); + input_report_abs(data->input_dev, ABS_Z, rbuf[8]); + input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]); + } + + /* Report temperature information */ + if (atomic_read(&t_flag)) + input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]); + + if (atomic_read(&mv_flag)) { + input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]); + input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]); + input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]); + } + + input_sync(data->input_dev); +} + +static int AKECS_GetOpenStatus(void) +{ + wait_event_interruptible(open_wq, (atomic_read(&open_flag) != 0)); + return atomic_read(&open_flag); +} + +static int AKECS_GetCloseStatus(void) +{ + wait_event_interruptible(open_wq, (atomic_read(&open_flag) <= 0)); + return atomic_read(&open_flag); +} + +static void AKECS_CloseDone(void) +{ + atomic_set(&m_flag, 0); + atomic_set(&a_flag, 0); + atomic_set(&t_flag, 0); + atomic_set(&mv_flag, 0); +} + +static int akm_aot_open(struct inode *inode, struct file *file) +{ + int ret = -1; + if (atomic_cmpxchg(&open_count, 0, 1) == 0) { + if (atomic_cmpxchg(&open_flag, 0, 1) == 0) { + atomic_set(&reserve_open_flag, 1); + wake_up(&open_wq); + ret = 0; + } + } + return ret; +} + +static int akm_aot_release(struct inode *inode, struct file *file) +{ + atomic_set(&reserve_open_flag, 0); + atomic_set(&open_flag, 0); + atomic_set(&open_count, 0); + wake_up(&open_wq); + return 0; +} + +static long +akm_aot_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + +{ + void __user *argp = (void __user *)arg; + short flag; + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + case ECS_IOCTL_APP_SET_AFLAG: + case ECS_IOCTL_APP_SET_TFLAG: + case ECS_IOCTL_APP_SET_MVFLAG: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + if (flag < 0 || flag > 1) + return -EINVAL; + break; + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + atomic_set(&m_flag, flag); + break; + case ECS_IOCTL_APP_GET_MFLAG: + flag = atomic_read(&m_flag); + break; + case ECS_IOCTL_APP_SET_AFLAG: + atomic_set(&a_flag, flag); + break; + case ECS_IOCTL_APP_GET_AFLAG: + flag = atomic_read(&a_flag); + break; + case ECS_IOCTL_APP_SET_TFLAG: + atomic_set(&t_flag, flag); + break; + case ECS_IOCTL_APP_GET_TFLAG: + flag = atomic_read(&t_flag); + break; + case ECS_IOCTL_APP_SET_MVFLAG: + atomic_set(&mv_flag, flag); + break; + case ECS_IOCTL_APP_GET_MVFLAG: + flag = atomic_read(&mv_flag); + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_APP_GET_MFLAG: + case ECS_IOCTL_APP_GET_AFLAG: + case ECS_IOCTL_APP_GET_TFLAG: + case ECS_IOCTL_APP_GET_MVFLAG: + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akmd_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + +static int akmd_release(struct inode *inode, struct file *file) +{ + AKECS_CloseDone(); + return 0; +} + +static long +akmd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + +{ + + void __user *argp = (void __user *)arg; + + char msg[RBUFF_SIZE + 1], rwbuf[5]; + int ret = -1, status; + short mode, value[12], delay; + char project_name[64]; + short layouts[4][3][3]; + int i, j, k; + + + switch (cmd) { + case ECS_IOCTL_WRITE: + case ECS_IOCTL_READ: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) + return -EFAULT; + break; + case ECS_IOCTL_SET_YPR: + if (copy_from_user(&value, argp, sizeof(value))) + return -EFAULT; + break; + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_WRITE: + if (rwbuf[0] < 2) + return -EINVAL; + ret = AKI2C_TxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_READ: + if (rwbuf[0] < 1) + return -EINVAL; + ret = AKI2C_RxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_RESET: + AKECS_Reset(); + break; + case ECS_IOCTL_SET_MODE: + ret = AKECS_SetMode((char)mode); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_GETDATA: + ret = AKECS_TransRBuff(msg, RBUFF_SIZE); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_YPR: + AKECS_Report_Value(value); + break; + case ECS_IOCTL_GET_OPEN_STATUS: + status = AKECS_GetOpenStatus(); + break; + case ECS_IOCTL_GET_CLOSE_STATUS: + status = AKECS_GetCloseStatus(); + break; + case ECS_IOCTL_GET_DELAY: + delay = akmd_delay; + break; + case ECS_IOCTL_GET_PROJECT_NAME: + strncpy(project_name, pdata->project_name, 64); + break; + case ECS_IOCTL_GET_MATRIX: + for (i = 0; i < 4; i++) + for (j = 0; j < 3; j++) + for (k = 0; k < 3; k++) + layouts[i][j][k] = + pdata->layouts[i][j][k]; + break; + default: + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_GETDATA: + if (copy_to_user(argp, &msg, sizeof(msg))) + return -EFAULT; + break; + case ECS_IOCTL_GET_OPEN_STATUS: + case ECS_IOCTL_GET_CLOSE_STATUS: + if (copy_to_user(argp, &status, sizeof(status))) + return -EFAULT; + break; + case ECS_IOCTL_GET_DELAY: + if (copy_to_user(argp, &delay, sizeof(delay))) + return -EFAULT; + break; + case ECS_IOCTL_GET_PROJECT_NAME: + if (copy_to_user(argp, project_name, sizeof(project_name))) + return -EFAULT; + break; + case ECS_IOCTL_GET_MATRIX: + if (copy_to_user(argp, layouts, sizeof(layouts))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static void akm_work_func(struct work_struct *work) +{ + if (AKECS_GetData() < 0) + printk(KERN_ERR "AKM8973 akm_work_func: Get data failed\n"); + enable_irq(this_client->irq); +} + +static irqreturn_t akm8973_interrupt(int irq, void *dev_id) +{ + struct akm8973_data *data = dev_id; + disable_irq_nosync(this_client->irq); + schedule_work(&data->work); + return IRQ_HANDLED; +} + +static void akm8973_early_suspend(struct early_suspend *handler) +{ + atomic_set(&suspend_flag, 1); + atomic_set(&reserve_open_flag, atomic_read(&open_flag)); + atomic_set(&open_flag, 0); + wake_up(&open_wq); + disable_irq(this_client->irq); +} + +static void akm8973_early_resume(struct early_suspend *handler) +{ + enable_irq(this_client->irq); + atomic_set(&suspend_flag, 0); + atomic_set(&open_flag, atomic_read(&reserve_open_flag)); + wake_up(&open_wq); +} + +static struct file_operations akmd_fops = { + .owner = THIS_MODULE, + .open = akmd_open, + .release = akmd_release, + .unlocked_ioctl = akmd_ioctl, +}; + +static struct file_operations akm_aot_fops = { + .owner = THIS_MODULE, + .open = akm_aot_open, + .release = akm_aot_release, + .unlocked_ioctl = akm_aot_ioctl, +}; + + +static struct miscdevice akm_aot_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8973_aot", + .fops = &akm_aot_fops, +}; + + +static struct miscdevice akmd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8973_daemon", + .fops = &akmd_fops, +}; + +int akm8973_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct akm8973_data *akm; + int err = 0; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit_check_functionality_failed; + } + + akm = kzalloc(sizeof(struct akm8973_data), GFP_KERNEL); + if (!akm) { + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + INIT_WORK(&akm->work, akm_work_func); + i2c_set_clientdata(client, akm); + + pdata = client->dev.platform_data; + if (pdata == NULL) { + printk(KERN_ERR"AKM8973 akm8973_probe: platform data is NULL\n"); + goto exit_platform_data_null; + } + this_client = client; + + if (pdata && pdata->reset) { + err = gpio_request(pdata->reset, "akm8973"); + if (err < 0) { + printk(KERN_ERR "%s: request reset gpio failed\n", + __func__); + goto err_request_reset_gpio; + } + err = gpio_direction_output(pdata->reset, 1); + if (err < 0) { + printk(KERN_ERR + "%s: request reset gpio failed\n", __func__); + goto err_set_reset_gpio; + } + } else { + printk(KERN_ERR "%s: pdata or pdata->reset is NULL\n", + __func__); + goto err_request_reset_gpio; + } + + err = AKECS_PowerDown(); + if (err < 0) { + printk(KERN_ERR"AKM8973 akm8973_probe: set power down mode error\n"); + goto exit_set_mode_failed; + } + + err = request_irq(client->irq, akm8973_interrupt, IRQF_TRIGGER_HIGH, + "akm8973", akm); + + if (err < 0) { + printk(KERN_ERR"AKM8973 akm8973_probe: request irq failed\n"); + goto exit_irq_request_failed; + } + + akm->input_dev = input_allocate_device(); + + if (!akm->input_dev) { + err = -ENOMEM; + printk(KERN_ERR + "AKM8973 akm8973_probe: Failed to allocate input device\n"); + goto exit_input_dev_alloc_failed; + } + + set_bit(EV_ABS, akm->input_dev->evbit); + /* yaw */ + input_set_abs_params(akm->input_dev, ABS_RX, 0, 360, 0, 0); + /* pitch */ + input_set_abs_params(akm->input_dev, ABS_RY, -180, 180, 0, 0); + /* roll */ + input_set_abs_params(akm->input_dev, ABS_RZ, -90, 90, 0, 0); + /* x-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_X, -1872, 1872, 0, 0); + /* y-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Y, -1872, 1872, 0, 0); + /* z-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Z, -1872, 1872, 0, 0); + /* temparature */ + input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0); + /* status of magnetic sensor */ + input_set_abs_params(akm->input_dev, ABS_RUDDER, -32768, 3, 0, 0); + /* status of acceleration sensor */ + input_set_abs_params(akm->input_dev, ABS_WHEEL, -32768, 3, 0, 0); + /* step count */ + input_set_abs_params(akm->input_dev, ABS_GAS, 0, 65535, 0, 0); + /* x-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0X, -2048, 2032, 0, 0); + /* y-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0Y, -2048, 2032, 0, 0); + /* z-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_BRAKE, -2048, 2032, 0, 0); + + akm->input_dev->name = "compass"; + + err = input_register_device(akm->input_dev); + + if (err) { + printk(KERN_ERR + "AKM8973 akm8973_probe: Unable to register input device: %s\n", + akm->input_dev->name); + goto exit_input_register_device_failed; + } + + err = misc_register(&akmd_device); + if (err) { + printk(KERN_ERR "AKM8973 akm8973_probe: akmd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_aot_device); + if (err) { + printk(KERN_ERR + "AKM8973 akm8973_probe: akm_aot_device register failed\n"); + goto exit_misc_device_register_failed; + } + + mutex_init(&sense_data_mutex); + + init_waitqueue_head(&data_ready_wq); + init_waitqueue_head(&open_wq); + + /* As default, do not report all information */ + atomic_set(&m_flag, 0); + atomic_set(&a_flag, 0); + atomic_set(&t_flag, 0); + atomic_set(&mv_flag, 0); + + akm->early_suspend_akm.suspend = akm8973_early_suspend; + akm->early_suspend_akm.resume = akm8973_early_resume; + register_early_suspend(&akm->early_suspend_akm); + + return 0; + +exit_misc_device_register_failed: +exit_input_register_device_failed: + input_free_device(akm->input_dev); +exit_input_dev_alloc_failed: + free_irq(client->irq, akm); +exit_irq_request_failed: +exit_set_mode_failed: +err_set_reset_gpio: + gpio_free(pdata->reset); +err_request_reset_gpio: +exit_platform_data_null: + kfree(akm); +exit_alloc_data_failed: +exit_check_functionality_failed: + return err; + +} + +static int akm8973_remove(struct i2c_client *client) +{ + struct akm8973_data *akm = i2c_get_clientdata(client); + free_irq(client->irq, akm); + input_unregister_device(akm->input_dev); + kfree(akm); + if (pdata && pdata->reset) + gpio_free(pdata->reset); + return 0; +} +static const struct i2c_device_id akm8973_id[] = { + { AKM8973_I2C_NAME, 0 }, + { } +}; + +static struct i2c_driver akm8973_driver = { + .probe = akm8973_probe, + .remove = akm8973_remove, + .id_table = akm8973_id, + .driver = { + .name = AKM8973_I2C_NAME, + }, +}; + +static int __init akm8973_init(void) +{ + printk(KERN_INFO "AKM8973 compass driver: init\n"); + return i2c_add_driver(&akm8973_driver); +} + +static void __exit akm8973_exit(void) +{ + i2c_del_driver(&akm8973_driver); +} + +module_init(akm8973_init); +module_exit(akm8973_exit); + +MODULE_AUTHOR("viral wang "); +MODULE_DESCRIPTION("AKM8973 compass driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/akm8976.c b/drivers/misc/akm8976.c new file mode 100644 index 0000000000000..ee99ae83a1c29 --- /dev/null +++ b/drivers/misc/akm8976.c @@ -0,0 +1,1132 @@ +/* drivers/i2c/chips/akm8976.c - akm8976 compass driver + * + * Copyright (C) 2007-2008 HTC Corporation. + * Author: Hou-Kun Chen + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 0 +#define MAX_FAILURE_COUNT 10 + +static struct i2c_client *this_client; + +struct akm8976_data { + struct input_dev *input_dev; + struct work_struct work; +}; + +/* Addresses to scan -- protected by sense_data_mutex */ +static char sense_data[RBUFF_SIZE + 1]; +static struct mutex sense_data_mutex; + +static DECLARE_WAIT_QUEUE_HEAD(data_ready_wq); +static DECLARE_WAIT_QUEUE_HEAD(open_wq); + +static char cspec_num; +static atomic_t cspec_frq; + +static atomic_t data_ready; +static atomic_t open_count; +static atomic_t open_flag; +static atomic_t reserve_open_flag; + +static atomic_t m_flag; +static atomic_t a_flag; +static atomic_t t_flag; +static atomic_t mv_flag; + +static int pffd_mode = 0; +static int failure_count = 0; + +static short akmd_delay = 0; + +static atomic_t suspend_flag = ATOMIC_INIT(0); + +static struct akm8976_platform_data *pdata; +static int revision = -1; +/* AKM HW info */ +static ssize_t gsensor_vendor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + sprintf(buf, "AK8976A_%#x\n", revision); + ret = strlen(buf) + 1; + + return ret; +} + +static DEVICE_ATTR(vendor, 0444, gsensor_vendor_show, NULL); + +static struct kobject *android_gsensor_kobj; + +static int gsensor_sysfs_init(void) +{ + int ret ; + + android_gsensor_kobj = kobject_create_and_add("android_gsensor", NULL); + if (android_gsensor_kobj == NULL) { + printk(KERN_ERR + "AKM8976 gsensor_sysfs_init:"\ + "subsystem_register failed\n"); + ret = -ENOMEM; + goto err; + } + + ret = sysfs_create_file(android_gsensor_kobj, &dev_attr_vendor.attr); + if (ret) { + printk(KERN_ERR + "AKM8976 gsensor_sysfs_init:"\ + "sysfs_create_group failed\n"); + goto err4; + } + + return 0 ; +err4: + kobject_del(android_gsensor_kobj); +err: + return ret ; +} + +/* following are the sysfs callback functions */ + +#define config_ctrl_reg(name,address) \ +static ssize_t name##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client,address)); \ +} \ +static ssize_t name##_store(struct device *dev, struct device_attribute *attr, \ + const char *buf,size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + unsigned long val = simple_strtoul(buf, NULL, 10); \ + if (val > 0xff) \ + return -EINVAL; \ + i2c_smbus_write_byte_data(client,address, val); \ + return count; \ +} \ +static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store) + +config_ctrl_reg(ms1, AKECS_REG_MS1); +config_ctrl_reg(ms2, AKECS_REG_MS2); +config_ctrl_reg(ms3, AKECS_REG_MS3); + +static int AKI2C_RxData(char *rxData, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = 1, + .buf = rxData, + }, + { + .addr = this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = rxData, + }, + }; + + if (i2c_transfer(this_client->adapter, msgs, 2) < 0) { + printk(KERN_ERR "AKM8976 AKI2C_RxData: transfer error\n"); + return -EIO; + } else + return 0; +} + +static int AKI2C_TxData(char *txData, int length) +{ + + struct i2c_msg msg[] = { + { + .addr = this_client->addr, + .flags = 0, + .len = length, + .buf = txData, + }, + }; + + if (i2c_transfer(this_client->adapter, msg, 1) < 0) { + printk(KERN_ERR "AKM8976 AKI2C_TxData: transfer error\n"); + return -EIO; + } else + return 0; +} + +static int AKECS_Init(void) +{ + char buffer[4]; + + cspec_num = CSPEC_SEQ_NUM; + atomic_set(&cspec_frq, CSPEC_SFRQ_32); + + /* Prepare data */ + buffer[0] = AKECS_REG_MS2; + buffer[1] = ((CSPEC_AINT << 7) | + (cspec_num << 5) | + (atomic_read(&cspec_frq) << 4) | + (CSPEC_MCS << 1) | (CSPEC_MKS)); + buffer[2] = (CSPEC_INTEN << 2); + + return AKI2C_TxData(buffer, 3); +} + +static void AKECS_Reset(void) +{ + gpio_set_value(pdata->reset, 0); + udelay(120); + gpio_set_value(pdata->reset, 1); +} + +static int AKECS_StartMeasure(void) +{ + char buffer[2]; + int ret; + + buffer[0] = AKECS_REG_MS2; + buffer[1] = ((CSPEC_AINT << 7) | + (cspec_num << 5) | + (atomic_read(&cspec_frq) << 4) | + (CSPEC_MCS << 1) | (CSPEC_MKS)); + + /* Set data */ + ret = AKI2C_TxData(buffer, 2); + if (ret < 0) + return ret; + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_MEASURE; + + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_StartPFFD(void) +{ + char buffer[2]; + int ret; + + /* Set PFFD mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_PFFD; + /* Set data */ + ret = AKI2C_TxData(buffer, 2); + if (ret < 0) + return ret; + + ret = gpio_direction_output(pdata->clk_on, 1); + if (ret < 0) + return ret; + + pffd_mode = 1; + return ret; +} + +static int AKECS_PowerDown(void) +{ + char buffer[2]; + int ret; + + /* Set powerdown mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_POWERDOWN; + /* Set data */ + ret = AKI2C_TxData(buffer, 2); + if (ret < 0) + return ret; + + /* Dummy read for clearing INT pin */ + buffer[0] = AKECS_REG_TMPS; + /* Read data */ + ret = AKI2C_RxData(buffer, 1); + if (ret < 0) + return ret; + + if (pffd_mode == 1) { + pffd_mode = 0; + ret = gpio_direction_output(pdata->clk_on, 0); + } + return ret; +} + +static int AKECS_StartE2PRead(void) +{ + char buffer[2]; + + /* Set measure mode */ + buffer[0] = AKECS_REG_MS1; + buffer[1] = AKECS_MODE_E2P_READ; + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_GetData(void) +{ + char buffer[RBUFF_SIZE + 1]; + int ret; + + memset(buffer, 0, RBUFF_SIZE + 1); + buffer[0] = AKECS_REG_ST; + ret = AKI2C_RxData(buffer, 32); + if (ret < 0) + return ret; + + mutex_lock(&sense_data_mutex); + memcpy(sense_data, buffer, sizeof(buffer)); + atomic_set(&data_ready, 1); + wake_up(&data_ready_wq); + mutex_unlock(&sense_data_mutex); + + return 0; +} + +static int AKECS_SetMode(char mode) +{ + int ret, status; + char buffer[1]; + + if (mode == AKECS_MODE_MEASURE_SNG) { + /* Check INT pin before mode setting */ + status = gpio_get_value(pdata->intr); + if (status) { + printk(KERN_INFO + "AKM8976 AKECS_SetMode:"\ + "dummy read to reset INT pin \n"); + buffer[0] = AKECS_REG_TMPS; + ret = AKI2C_RxData(buffer, 1); + if (ret < 0) + return ret; + status = gpio_get_value(pdata->intr); + printk(KERN_INFO + "AKM8976 AKECS_SetMode:"\ + "after dummy read, status = %d \n", + status); + } + } + + switch (mode) { + case AKECS_MODE_MEASURE_SNG: + cspec_num = CSPEC_SNG_NUM; + ret = AKECS_StartMeasure(); + break; + case AKECS_MODE_MEASURE_SEQ: + cspec_num = CSPEC_SEQ_NUM; + ret = AKECS_StartMeasure(); + break; + case AKECS_MODE_PFFD: + ret = AKECS_StartPFFD(); + break; + case AKECS_MODE_E2P_READ: + ret = AKECS_StartE2PRead(); + break; + case AKECS_MODE_POWERDOWN: + ret = AKECS_PowerDown(); + break; + default: + return -EINVAL; + } + + /* wait at least 300us after changing mode */ + msleep(1); + return ret; +} + +static int AKECS_TransRBuff(char *rbuf, int size) +{ + wait_event_interruptible_timeout(data_ready_wq, + atomic_read(&data_ready), 1000); + + if (!atomic_read(&data_ready)) { + if (!atomic_read(&suspend_flag)) { + printk(KERN_ERR + "AKM8976 AKECS_TransRBUFF: Data not ready\n"); + failure_count++; + if (failure_count >= MAX_FAILURE_COUNT) { + printk(KERN_ERR + "AKM8976 AKECS_TransRBUFF:"\ + "successive %d failure.\n", + failure_count); + atomic_set(&open_flag, -1); + wake_up(&open_wq); + failure_count = 0; + } + } + return -1; + } + + if ((sense_data[0] & 0x02) == 0x02) { + printk(KERN_ERR "AKM8976 AKECS_TransRBUFF: Data error\n"); + return -1; + } + + mutex_lock(&sense_data_mutex); + memcpy(&rbuf[1], &sense_data[1], size); + atomic_set(&data_ready, 0); + mutex_unlock(&sense_data_mutex); + + + failure_count = 0; + return 0; +} + +static int AKECS_Set_PERST(void) +{ + char buffer[2]; + + buffer[0] = AKECS_REG_MS3; + buffer[1] = ((CSPEC_INTEN << 2) | 0x01); + + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static int AKECS_Set_G0RST(void) +{ + char buffer[2]; + + buffer[0] = AKECS_REG_MS3; + buffer[1] = ((CSPEC_INTEN << 2) | 0x02); + + /* Set data */ + return AKI2C_TxData(buffer, 2); +} + +static void AKECS_Report_Value(short *rbuf) +{ + struct akm8976_data *data = i2c_get_clientdata(this_client); +#if DEBUG + printk(KERN_INFO + "AKECS_Report_Value: yaw = %d, pitch = %d, roll = %d\n", + rbuf[0], rbuf[1], rbuf[2]); + printk(KERN_INFO + " tmp = %d, m_stat= %d, g_stat=%d\n", + rbuf[3], rbuf[4], rbuf[5]); + printk(KERN_INFO + " G_Sensor: x = %d LSB, y = %d LSB, z = %d LSB\n", + rbuf[6], rbuf[7], rbuf[8]); +#endif + /* Report magnetic sensor information */ + if (atomic_read(&m_flag)) { + input_report_abs(data->input_dev, ABS_RX, rbuf[0]); + input_report_abs(data->input_dev, ABS_RY, rbuf[1]); + input_report_abs(data->input_dev, ABS_RZ, rbuf[2]); + input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]); + } + + /* Report acceleration sensor information */ + if (atomic_read(&a_flag)) { + input_report_abs(data->input_dev, ABS_X, rbuf[6]); + input_report_abs(data->input_dev, ABS_Y, rbuf[7]); + input_report_abs(data->input_dev, ABS_Z, rbuf[8]); + input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]); + } + + /* Report temperature information */ + if (atomic_read(&t_flag)) { + input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]); + } + + if (atomic_read(&mv_flag)) { + input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]); + input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]); + input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]); + } + + input_sync(data->input_dev); +} + +static void AKECS_Report_StepCount(short count) +{ + struct akm8976_data *data = i2c_get_clientdata(this_client); +#if DEBUG + printk(KERN_INFO"AKECS_Report_StepCount: %d \n", count); +#endif + + /* Report pedometer information */ + input_report_abs(data->input_dev, ABS_GAS, count); + input_sync(data->input_dev); +} + +static int AKECS_GetOpenStatus(void) +{ + wait_event_interruptible(open_wq, (atomic_read(&open_flag) != 0)); + return atomic_read(&open_flag); +} + +static int AKECS_GetCloseStatus(void) +{ + wait_event_interruptible(open_wq, (atomic_read(&open_flag) <= 0)); + return atomic_read(&open_flag); +} + +static void AKECS_CloseDone(void) +{ + atomic_set(&m_flag, 1); + atomic_set(&a_flag, 1); + atomic_set(&t_flag, 1); + atomic_set(&mv_flag, 1); +} + +static int akm_aot_open(struct inode *inode, struct file *file) +{ + int ret = -1; + if (atomic_cmpxchg(&open_count, 0, 1) == 0) { + if (atomic_cmpxchg(&open_flag, 0, 1) == 0) { + atomic_set(&reserve_open_flag, 1); + wake_up(&open_wq); + ret = 0; + } + } + return ret; +} + +static int akm_aot_release(struct inode *inode, struct file *file) +{ + atomic_set(&reserve_open_flag, 0); + atomic_set(&open_flag, 0); + atomic_set(&open_count, 0); + wake_up(&open_wq); + return 0; +} + +static int +akm_aot_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + short flag; + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + case ECS_IOCTL_APP_SET_AFLAG: + case ECS_IOCTL_APP_SET_TFLAG: + case ECS_IOCTL_APP_SET_MVFLAG: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + if (flag < 0 || flag > 1) + return -EINVAL; + break; + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + atomic_set(&m_flag, flag); + break; + case ECS_IOCTL_APP_GET_MFLAG: + flag = atomic_read(&m_flag); + break; + case ECS_IOCTL_APP_SET_AFLAG: + atomic_set(&a_flag, flag); + break; + case ECS_IOCTL_APP_GET_AFLAG: + flag = atomic_read(&a_flag); + break; + case ECS_IOCTL_APP_SET_TFLAG: + atomic_set(&t_flag, flag); + break; + case ECS_IOCTL_APP_GET_TFLAG: + flag = atomic_read(&t_flag); + break; + case ECS_IOCTL_APP_SET_MVFLAG: + atomic_set(&mv_flag, flag); + break; + case ECS_IOCTL_APP_GET_MVFLAG: + flag = atomic_read(&mv_flag); + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_APP_GET_MFLAG: + case ECS_IOCTL_APP_GET_AFLAG: + case ECS_IOCTL_APP_GET_TFLAG: + case ECS_IOCTL_APP_GET_MVFLAG: + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akm_pffd_open(struct inode *inode, struct file *file) +{ + int ret = -1; + if (atomic_cmpxchg(&open_count, 0, 1) == 0) { + if (atomic_cmpxchg(&open_flag, 0, 2) == 0) { + atomic_set(&reserve_open_flag, 2); + wake_up(&open_wq); + ret = 0; + } + } + return ret; +} + +static int akm_pffd_release(struct inode *inode, struct file *file) +{ + atomic_set(&reserve_open_flag, 0); + atomic_set(&open_flag, 0); + atomic_set(&open_count, 0); + wake_up(&open_wq); + return 0; +} + +static int +akm_pffd_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + short flag; + int ret; + + switch (cmd) { + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_APP_RESET_PEDOMETER: + ret = AKECS_Set_PERST(); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akmd_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + +static int akmd_release(struct inode *inode, struct file *file) +{ + AKECS_CloseDone(); + return 0; +} + +static int +akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + + void __user *argp = (void __user *)arg; + + char msg[RBUFF_SIZE + 1], rwbuf[5], numfrq[2]; + int ret = -1, status; + short mode, value[12], step_count, delay; + char *pbuffer = 0; + + switch (cmd) { + case ECS_IOCTL_READ: + case ECS_IOCTL_WRITE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_SET_MODE: + if (copy_from_user(&mode, argp, sizeof(mode))) + return -EFAULT; + break; + case ECS_IOCTL_SET_YPR: + if (copy_from_user(&value, argp, sizeof(value))) + return -EFAULT; + break; + case ECS_IOCTL_SET_STEP_CNT: + if (copy_from_user(&step_count, argp, sizeof(step_count))) + return -EFAULT; + break; + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_INIT: + ret = AKECS_Init(); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_RESET: + AKECS_Reset(); + break; + case ECS_IOCTL_READ: + if (rwbuf[0] < 1) + return -EINVAL; + ret = AKI2C_RxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_WRITE: + if (rwbuf[0] < 2) + return -EINVAL; + ret = AKI2C_TxData(&rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_MODE: + ret = AKECS_SetMode((char)mode); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_GETDATA: + ret = AKECS_TransRBuff(msg, RBUFF_SIZE); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_GET_NUMFRQ: + numfrq[0] = cspec_num; + numfrq[1] = atomic_read(&cspec_frq); + break; + case ECS_IOCTL_SET_PERST: + ret = AKECS_Set_PERST(); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_G0RST: + ret = AKECS_Set_G0RST(); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_YPR: + AKECS_Report_Value(value); + break; + case ECS_IOCTL_GET_OPEN_STATUS: + status = AKECS_GetOpenStatus(); + break; + case ECS_IOCTL_GET_CLOSE_STATUS: + status = AKECS_GetCloseStatus(); + break; + case ECS_IOCTL_SET_STEP_CNT: + AKECS_Report_StepCount(step_count); + break; + case ECS_IOCTL_GET_CALI_DATA: + pbuffer = get_akm_cal_ram(); + break; + case ECS_IOCTL_GET_DELAY: + delay = akmd_delay; + break; + default: + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_GETDATA: + if (copy_to_user(argp, &msg, sizeof(msg))) + return -EFAULT; + break; + case ECS_IOCTL_GET_NUMFRQ: + if (copy_to_user(argp, &numfrq, sizeof(numfrq))) + return -EFAULT; + break; + case ECS_IOCTL_GET_OPEN_STATUS: + case ECS_IOCTL_GET_CLOSE_STATUS: + if (copy_to_user(argp, &status, sizeof(status))) + return -EFAULT; + break; + case ECS_IOCTL_GET_CALI_DATA: + if (copy_to_user(argp, pbuffer, MAX_CALI_SIZE)) + return -EFAULT; + break; + case ECS_IOCTL_GET_DELAY: + if (copy_to_user(argp, &delay, sizeof(delay))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static void akm_work_func(struct work_struct *work) +{ + if (AKECS_GetData() < 0) + printk(KERN_ERR "AKM8976 akm_work_func: Get data failed\n"); + enable_irq(this_client->irq); +} + +static irqreturn_t akm8976_interrupt(int irq, void *dev_id) +{ + struct akm8976_data *data = dev_id; + disable_irq_nosync(this_client->irq); + schedule_work(&data->work); + return IRQ_HANDLED; +} + +static int akm8976_init_client(struct i2c_client *client) +{ + struct akm8976_data *data; + int ret; + + data = i2c_get_clientdata(client); + + mutex_init(&sense_data_mutex); + + ret = request_irq(client->irq, akm8976_interrupt, IRQF_TRIGGER_HIGH, + "akm8976", data); + + if (ret < 0) { + printk(KERN_ERR "akm8976_init_client: request irq failed\n"); + goto err; + } + + pdata = client->dev.platform_data; + if (pdata == NULL) { + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (pdata == NULL) { + ret = -ENOMEM; + goto err_alloc_data_failed; + } else { + pdata->reset = ECS_RST; + pdata->clk_on = ECS_CLK_ON; + pdata->intr = ECS_INTR; + } + } + + ret = gpio_request(pdata->reset, "akm8976"); + if (ret < 0) { + printk(KERN_ERR + "akm8976_init_client: request reset gpio failed\n"); + goto err_free_irq; + } + ret = gpio_direction_output(pdata->reset, 1); + if (ret < 0) { + printk(KERN_ERR + "akm8976_init_client: request reset gpio failed\n"); + goto err_free_gpio; + } + + ret = gpio_request(pdata->clk_on, "akm8976"); + if (ret < 0) { + printk(KERN_ERR + "akm8976_init_client: request clock gpio failed\n"); + goto err_free_gpio; + } + + ret = gpio_direction_output(pdata->clk_on, 0); + if (ret < 0) { + printk(KERN_ERR + "akm8976_init_client: request clock gpio failed\n"); + goto err_free_gpio_2; + } + + init_waitqueue_head(&data_ready_wq); + init_waitqueue_head(&open_wq); + + /* As default, report all information */ + atomic_set(&m_flag, 1); + atomic_set(&a_flag, 1); + atomic_set(&t_flag, 1); + atomic_set(&mv_flag, 1); + + return 0; + +err_free_gpio_2: + gpio_free(pdata->clk_on); +err_free_gpio: + gpio_free(pdata->reset); +err_free_irq: + free_irq(client->irq, 0); +err_alloc_data_failed: +err: + return ret; +} + +static struct file_operations akmd_fops = { + .owner = THIS_MODULE, + .open = akmd_open, + .release = akmd_release, + .ioctl = akmd_ioctl, +}; + +static struct file_operations akm_aot_fops = { + .owner = THIS_MODULE, + .open = akm_aot_open, + .release = akm_aot_release, + .ioctl = akm_aot_ioctl, +}; + +static struct file_operations akm_pffd_fops = { + .owner = THIS_MODULE, + .open = akm_pffd_open, + .release = akm_pffd_release, + .ioctl = akm_pffd_ioctl, +}; + +static struct miscdevice akm_aot_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8976_aot", + .fops = &akm_aot_fops, +}; + +static struct miscdevice akm_pffd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8976_pffd", + .fops = &akm_pffd_fops, +}; + +static struct miscdevice akmd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8976_daemon", + .fops = &akmd_fops, +}; + +static int akm8976_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + struct akm8976_data *akm; + int err; + char rxData[2]; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + err = -ENODEV; + goto exit_check_functionality_failed; + } + + akm = kzalloc(sizeof(struct akm8976_data), GFP_KERNEL); + if (!akm) { + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + INIT_WORK(&akm->work, akm_work_func); + i2c_set_clientdata(client, akm); + akm8976_init_client(client); + this_client = client; + + /* Set EEPROM access mode */ + err = AKECS_StartE2PRead(); + if (err < 0) + goto exit_input_dev_alloc_failed; + /* Read ETS from EEPROM */ + rxData[0] = 0x42; + err = AKI2C_RxData(rxData, 1); + if (err < 0) + goto exit_input_dev_alloc_failed; + revision = (0x03 & (rxData[0] >> 6)); + + /* Set Power down mode */ + err = AKECS_PowerDown(); + if (err < 0) + goto exit_input_dev_alloc_failed; + + akm->input_dev = input_allocate_device(); + + if (!akm->input_dev) { + err = -ENOMEM; + printk(KERN_ERR + "akm8976_probe: Failed to allocate input device\n"); + goto exit_input_dev_alloc_failed; + } + + set_bit(EV_ABS, akm->input_dev->evbit); + /* yaw */ + input_set_abs_params(akm->input_dev, ABS_RX, 0, 360, 0, 0); + /* pitch */ + input_set_abs_params(akm->input_dev, ABS_RY, -180, 180, 0, 0); + /* roll */ + input_set_abs_params(akm->input_dev, ABS_RZ, -90, 90, 0, 0); + /* x-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_X, -1872, 1872, 0, 0); + /* y-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Y, -1872, 1872, 0, 0); + /* z-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Z, -1872, 1872, 0, 0); + /* temparature */ + input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0); + /* status of magnetic sensor */ + input_set_abs_params(akm->input_dev, ABS_RUDDER, -32768, 3, 0, 0); + /* status of acceleration sensor */ + input_set_abs_params(akm->input_dev, ABS_WHEEL, -32768, 3, 0, 0); + /* step count */ + input_set_abs_params(akm->input_dev, ABS_GAS, 0, 65535, 0, 0); + /* x-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0X, -2048, 2032, 0, 0); + /* y-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0Y, -2048, 2032, 0, 0); + /* z-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_BRAKE, -2048, 2032, 0, 0); + + akm->input_dev->name = "compass"; + + err = input_register_device(akm->input_dev); + + if (err) { + printk(KERN_ERR + "akm8976_probe: Unable to register input device: %s\n", + akm->input_dev->name); + goto exit_input_register_device_failed; + } + + err = misc_register(&akmd_device); + if (err) { + printk(KERN_ERR + "akm8976_probe: akmd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_aot_device); + if (err) { + printk(KERN_ERR + "akm8976_probe: akm_aot_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_pffd_device); + if (err) { + printk(KERN_ERR + "akm8976_probe: akm_pffd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = device_create_file(&client->dev, &dev_attr_ms1); + err = device_create_file(&client->dev, &dev_attr_ms2); + err = device_create_file(&client->dev, &dev_attr_ms3); + + gsensor_sysfs_init(); + + return 0; + +exit_misc_device_register_failed: +exit_input_register_device_failed: + input_free_device(akm->input_dev); +exit_input_dev_alloc_failed: + kfree(akm); +exit_alloc_data_failed: +exit_check_functionality_failed: + return err; +} + +static int akm8976_remove(struct i2c_client *client) +{ + struct akm8976_data *akm = i2c_get_clientdata(client); + free_irq(client->irq, akm); + input_unregister_device(akm->input_dev); + kfree(akm); + return 0; +} + +static int akm8976_suspend(struct i2c_client *client, pm_message_t mesg) +{ + atomic_set(&suspend_flag, 1); + if (atomic_read(&open_flag) == 2) + AKECS_SetMode(AKECS_MODE_POWERDOWN); + + atomic_set(&reserve_open_flag, atomic_read(&open_flag)); + atomic_set(&open_flag, 0); + wake_up(&open_wq); + disable_irq(this_client->irq); + return 0; +} + +static int akm8976_resume(struct i2c_client *client) +{ + enable_irq(this_client->irq); + if (atomic_read(&open_flag) == 2) + AKECS_SetMode(AKECS_MODE_PFFD); + atomic_set(&suspend_flag, 0); + atomic_set(&open_flag, atomic_read(&reserve_open_flag)); + wake_up(&open_wq); + return 0; +} + +static const struct i2c_device_id akm8976_id[] = { + { "akm8976", 0 }, + { } +}; + +static struct i2c_driver akm8976_driver = { + .probe = akm8976_probe, + .remove = akm8976_remove, + .suspend = akm8976_suspend, + .resume = akm8976_resume, + .id_table = akm8976_id, + .driver = { + .name = "akm8976", + }, +}; + +static int __init akm8976_init(void) +{ + printk(KERN_INFO "AKM8976A compass driver: init\n"); + return i2c_add_driver(&akm8976_driver); +} + +static void __exit akm8976_exit(void) +{ + i2c_del_driver(&akm8976_driver); +} + +module_init(akm8976_init); +module_exit(akm8976_exit); + +MODULE_AUTHOR("Hou-Kun Chen "); +MODULE_DESCRIPTION("AKM8976A compass driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/bma150_spi.c b/drivers/misc/bma150_spi.c new file mode 100644 index 0000000000000..f00a3c9cb5f39 --- /dev/null +++ b/drivers/misc/bma150_spi.c @@ -0,0 +1,497 @@ +/* drivers/misc/bma150_spi.c - bma150 G-sensor driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct early_suspend bma_early_suspend; + +static struct bma150_platform_data *this_pdata; + +static struct mutex gsensor_RW_mutex; +static struct mutex gsensor_set_mode_mutex; + +static int spi_microp_enable(uint8_t on) +{ + int ret; + ret = microp_spi_vote_enable(SPI_GSENSOR, on); + if (ret < 0) + printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); + + return ret; +} + +static int spi_gsensor_read(uint8_t *data) +{ + int ret; + + mutex_lock(&gsensor_RW_mutex); + + ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_REG_DATA_REQ, data, 1); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_REG_DATA, data, 2); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + mutex_unlock(&gsensor_RW_mutex); + + return ret; +} + +static int spi_gsensor_write(uint8_t *data) +{ + int ret; + + mutex_lock(&gsensor_RW_mutex); + + ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_REG, data, 2); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + mutex_unlock(&gsensor_RW_mutex); + + return ret; +} + +static int spi_gsensor_init_hw(void) +{ + char buffer[2]; + + memset(buffer, 0x0, sizeof(buffer)); + buffer[0] = RANGE_BWIDTH_REG; + if (spi_gsensor_read(buffer) < 0) + return -EIO; + + /*printk("spi_gsensor_init_hw,read RANGE_BWIDTH_REG = %x " + , buffer[1]);*/ + + buffer[1] = (buffer[1]&0xe0); + buffer[0] = RANGE_BWIDTH_REG; + if (spi_gsensor_write(buffer) < 0) + return -EIO; + + buffer[0] = SMB150_CONF2_REG; + if (spi_gsensor_read(buffer) < 0) + return -EIO; + + buffer[1] = buffer[1]|1<<3; + buffer[0] = SMB150_CONF2_REG; + if (spi_gsensor_write(buffer) < 0) + return -EIO; + + return 0; +} + +/* +static int spi_gsensor_read_version(void) +{ + uint8_t buffer[2]; + int ret = -EIO; + + buffer[0] = VERSION_REG; + buffer[1] = 1; + ret = spi_gsensor_read(buffer); + if (ret < 0) { + printk(KERN_ERR "%s: get al_version fail(%d)\n", __func__, ret); + return ret; + } + printk(KERN_INFO "%s: al_version: 0x%2.2X\n", __func__, buffer[0]); + + buffer[0] = CHIP_ID_REG; + buffer[1] = 1; + ret = spi_gsensor_read(buffer); + if (ret < 0) { + printk(KERN_ERR "%s: get chip_id fail(%d)\n", __func__, ret); + return ret; + } + printk(KERN_INFO "%s: chip_id: 0x%2.2X\n", __func__, buffer[0]); + return 0; +} +*/ +static int spi_bma150_TransRBuff(short *rbuf) +{ + int ret; + unsigned char buffer[6]; + memset(buffer, 0, 6); + + mutex_lock(&gsensor_RW_mutex); + + buffer[0] = 1; + ret = microp_i2c_write(MICROP_I2C_WCMD_GSENSOR_DATA_REQ, buffer, 1); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_write_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + + if (this_pdata && this_pdata->microp_new_cmd && + this_pdata->microp_new_cmd == 1) { + /*printk(KERN_DEBUG "%s: New MicroP command\n", __func__);*/ + ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_DATA, buffer, 6); + rbuf[0] = buffer[0]<<2|buffer[1]>>6; + if (rbuf[0]&0x200) + rbuf[0] -= 1<<10; + rbuf[1] = buffer[2]<<2|buffer[3]>>6; + if (rbuf[1]&0x200) + rbuf[1] -= 1<<10; + rbuf[2] = buffer[4]<<2|buffer[5]>>6; + if (rbuf[2]&0x200) + rbuf[2] -= 1<<10; + } else { + /* For Passion with V01 ~ V05 Microp */ + /*printk(KERN_DEBUG "%s: Old MicroP command\n", __func__);*/ + ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_X_DATA, + buffer, 2); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + rbuf[0] = buffer[0]<<2|buffer[1]>>6; + if (rbuf[0]&0x200) + rbuf[0] -= 1<<10; + + ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_Y_DATA, + buffer, 2); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + rbuf[1] = buffer[0]<<2|buffer[1]>>6; + if (rbuf[1]&0x200) + rbuf[1] -= 1<<10; + + ret = microp_i2c_read(MICROP_I2C_RCMD_GSENSOR_Z_DATA, + buffer, 2); + if (ret < 0) { + printk(KERN_ERR "%s: i2c_read_block fail\n", __func__); + mutex_unlock(&gsensor_RW_mutex); + return ret; + } + rbuf[2] = buffer[0]<<2|buffer[1]>>6; + if (rbuf[2]&0x200) + rbuf[2] -= 1<<10; + } +/* printk("X=%d, Y=%d, Z=%d\n",rbuf[0],rbuf[1],rbuf[2]);*/ + +/* printk(KERN_DEBUG "%s: 0x%2.2X 0x%2.2X 0x%2.2X \ +0x%2.2X 0x%2.2X 0x%2.2X\n", + __func__, buffer[0], buffer[1], buffer[2], \ + buffer[3], buffer[4], buffer[5]);*/ + + mutex_unlock(&gsensor_RW_mutex); + + return 1; +} + +static int __spi_bma150_set_mode(char mode) +{ + char buffer[2]; + int ret; + mutex_lock(&gsensor_set_mode_mutex); + if (mode == BMA_MODE_NORMAL) { + spi_microp_enable(1); + printk(KERN_INFO "%s: BMA get into NORMAL mode!\n", + __func__); + } + + buffer[0] = SMB150_CTRL_REG; + ret = spi_gsensor_read(buffer); + if (ret < 0) { + mutex_unlock(&gsensor_set_mode_mutex); + return -1; + } + + buffer[1] = (buffer[1]&0xfe)|mode; + buffer[0] = SMB150_CTRL_REG; + ret = spi_gsensor_write(buffer); + + if (mode == BMA_MODE_SLEEP) { + spi_microp_enable(0); + printk(KERN_INFO "%s: BMA get into SLEEP mode!\n", + __func__); + } + mutex_unlock(&gsensor_set_mode_mutex); + return ret; +} + +static DEFINE_MUTEX(bma150_lock); + +static int spi_bma150_open(struct inode *inode, struct file *file) +{ + return nonseekable_open(inode, file); +} + +static int spi_bma150_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static long spi_bma150_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + char rwbuf[8]; + char *toRbuf; + int ret = -1; + short buf[8], temp; + + switch (cmd) { + case BMA_IOCTL_READ: + case BMA_IOCTL_WRITE: + case BMA_IOCTL_SET_MODE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_from_user(&buf, argp, sizeof(buf))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&bma150_lock); + switch (cmd) { + case BMA_IOCTL_INIT: + ret = spi_gsensor_init_hw(); + if (ret < 0) + goto err; + break; + + case BMA_IOCTL_READ: + if (rwbuf[0] < 1) { + ret = -EINVAL; + goto err; + } + ret = spi_gsensor_read(&rwbuf[1]); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_WRITE: + if (rwbuf[0] < 2) { + ret = -EINVAL; + goto err; + } + ret = spi_gsensor_write(&rwbuf[1]); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_READ_ACCELERATION: + ret = spi_bma150_TransRBuff(&buf[0]); + if (ret < 0) + goto err; + break; + case BMA_IOCTL_SET_MODE: + /*printk(KERN_DEBUG + "%s: BMA_IOCTL_SET_MODE by ioctl = %d\n", + __func__,rwbuf[0]);*/ + ret = __spi_bma150_set_mode(rwbuf[0]); + if (ret < 0) + return ret; + break; + case BMA_IOCTL_GET_INT: + temp = 0; + break; + case BMA_IOCTL_GET_CHIP_LAYOUT: + if (this_pdata) + temp = this_pdata->chip_layout; + break; + default: + ret = -ENOTTY; + goto err; + } + mutex_unlock(&bma150_lock); + + switch (cmd) { + case BMA_IOCTL_READ: + toRbuf = &rwbuf[1]; + if (copy_to_user(argp, toRbuf, sizeof(rwbuf)-1)) + return -EFAULT; + break; + case BMA_IOCTL_READ_ACCELERATION: + if (copy_to_user(argp, &buf, sizeof(buf))) + return -EFAULT; + break; + case BMA_IOCTL_GET_INT: + if (copy_to_user(argp, &temp, sizeof(temp))) + return -EFAULT; + break; + case BMA_IOCTL_GET_CHIP_LAYOUT: + if (copy_to_user(argp, &temp, sizeof(temp))) + return -EFAULT; + break; + default: + break; + } + + return 0; +err: + mutex_unlock(&bma150_lock); + return ret; +} + +static struct file_operations spi_bma_fops = { + .owner = THIS_MODULE, + .open = spi_bma150_open, + .release = spi_bma150_release, + .unlocked_ioctl = spi_bma150_ioctl, +}; + +static struct miscdevice spi_bma_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "bma150", + .fops = &spi_bma_fops, +}; + +static void bma150_early_suspend(struct early_suspend *handler) +{ + int ret = 0; + ret = __spi_bma150_set_mode(BMA_MODE_SLEEP); + + printk(KERN_DEBUG + "%s: spi_bma150_set_mode returned = %d!\n", + __func__, ret); +} + +static void bma150_early_resume(struct early_suspend *handler) +{ + /*printk(KERN_DEBUG + "%s: spi_bma150_set_mode returned = %d!\n", + __func__, ret);*/ +} + +static int spi_gsensor_initial(void) +{ + int ret; +/* ret = spi_microp_enable(1); + if (ret < 0) { + printk(KERN_ERR "%s: spi_microp_enable fail\n", __func__); + return ret; + }*/ +/* ret = spi_gsensor_read_version(); + if (ret < 0) { + printk(KERN_ERR "%s: get version fail\n", __func__); + return ret; + }*/ + +/* ret = microp_gsensor_init_hw(client); + if (ret < 0) { + printk(KERN_ERR "%s: init g-sensor fail\n", __func__); + return ret; + } +*/ + ret = misc_register(&spi_bma_device); + if (ret < 0) { + printk(KERN_ERR "%s: init misc_register fail\n", __func__); + return ret; + } + + mutex_init(&gsensor_RW_mutex); + mutex_init(&gsensor_set_mode_mutex); + + + ret = spi_microp_enable(1); + if (ret) { + printk(KERN_ERR "%s: spi_microp_enable(1) fail!\n", __func__); + goto err_spi_enable; + } + + ret = __spi_bma150_set_mode(BMA_MODE_SLEEP); + if (ret) { + printk(KERN_ERR "%s: set BMA_MODE_SLEEP fail!\n", __func__); + goto err_set_mode; + } + + bma_early_suspend.suspend = bma150_early_suspend; + bma_early_suspend.resume = bma150_early_resume; + register_early_suspend(&bma_early_suspend); + + return 0; + +err_set_mode: + spi_microp_enable(0); +err_spi_enable: + misc_deregister(&spi_bma_device); + + return ret; +} + +static int spi_bma150_probe(struct platform_device *pdev) +{ + printk(KERN_INFO "%s: G-sensor connect with microP: " + "start initial\n", __func__); + + this_pdata = pdev->dev.platform_data; +/* + printk(KERN_DEBUG "%s: this_pdata->microp_new_cmd = %d\n", + __func__, this_pdata->microp_new_cmd); +*/ + spi_gsensor_initial(); + + return 0; +} + +static int spi_bma150_remove(struct platform_device *pdev) +{ + mutex_destroy(&gsensor_set_mode_mutex); + return 0; +} + +static struct platform_driver spi_bma150_driver = { + .probe = spi_bma150_probe, + .remove = spi_bma150_remove, + .driver = { + .name = BMA150_G_SENSOR_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init spi_bma150_init(void) +{ + return platform_driver_register(&spi_bma150_driver); + +} + +static void __exit spi_bma150_exit(void) +{ + platform_driver_unregister(&spi_bma150_driver); +} + +module_init(spi_bma150_init); +module_exit(spi_bma150_exit); + +MODULE_DESCRIPTION("BMA150 G-sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c index 46b3439673e90..16d7179e2f9b8 100644 --- a/drivers/misc/ep93xx_pwm.c +++ b/drivers/misc/ep93xx_pwm.c @@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev, static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); -static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); -static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); -static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); static struct attribute *ep93xx_pwm_attrs[] = { diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c index abb73c1431646..c19241a1ae56b 100644 --- a/drivers/misc/pmem.c +++ b/drivers/misc/pmem.c @@ -1,6 +1,7 @@ /* drivers/android/pmem.c * * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -19,20 +20,40 @@ #include #include #include -#include #include #include #include -#include +#include +#ifdef CONFIG_MEMORY_HOTPLUG +#include +#include +#endif #include #include #include +#include +#include + +#define PMEM_MAX_USER_SPACE_DEVICES (10) +#define PMEM_MAX_KERNEL_SPACE_DEVICES (2) +#define PMEM_MAX_DEVICES \ + (PMEM_MAX_USER_SPACE_DEVICES + PMEM_MAX_KERNEL_SPACE_DEVICES) -#define PMEM_MAX_DEVICES 10 -#define PMEM_MAX_ORDER 128 +#define PMEM_MAX_ORDER (128) #define PMEM_MIN_ALLOC PAGE_SIZE +#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64) + +#define PMEM_32BIT_WORD_ORDER (5) +#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1) + +#ifdef CONFIG_ANDROID_PMEM_DEBUG #define PMEM_DEBUG 1 +#else +#define PMEM_DEBUG 0 +#endif + +#define SYSTEM_ALLOC_RETRY 10 /* indicates that a refernce to this file has been taken via get_pmem_file, * the file should not be released until put_pmem_file is called */ @@ -50,7 +71,6 @@ #define PMEM_FLAGS_SUBMAP 0x1 << 3 #define PMEM_FLAGS_UNSUBMAP 0x1 << 4 - struct pmem_data { /* in alloc mode: an index into the bitmap * in no_alloc mode: the size of the allocation */ @@ -93,13 +113,42 @@ struct pmem_region_node { #define PMEM_DEBUG_MSGS 0 #if PMEM_DEBUG_MSGS #define DLOG(fmt,args...) \ - do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ + do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ ##args); } \ while (0) #else #define DLOG(x...) do {} while (0) #endif +enum pmem_align { + PMEM_ALIGN_4K, + PMEM_ALIGN_1M, +}; + +#define PMEM_NAME_SIZE 16 + +#define MEMORY_STABLE 0 +#define MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED 1 +#define MEMORY_UNSTABLE_MEMORY_ALLOCATED 2 + +#define NO_UNSTABLE_MEMORY 0 +#define UNSTABLE_UNINITIALIZED 1 +#define UNSTABLE_INITIALIZED 2 + +int unstable_pmem_present; +/* start of unstable PMEM physical memory */ +unsigned long unstable_pmem_start; +/* size of unstable PMEM physical memory */ +unsigned long unstable_pmem_size; + +struct alloc_list { + void *addr; /* physical addr of allocation */ + void *aaddr; /* aligned physical addr */ + unsigned int size; /* total size of allocation */ + unsigned char __iomem *vaddr; /* Virtual addr */ + struct list_head allocs; +}; + struct pmem_info { struct miscdevice dev; /* physical start address of the remaped pmem space */ @@ -112,62 +161,131 @@ struct pmem_info { unsigned long num_entries; /* pfn of the garbage page in memory */ unsigned long garbage_pfn; + /* memory state (stable/unstable with or without memory */ + int memory_state; + + char name[PMEM_NAME_SIZE]; + /* index of the garbage page in the pmem space */ int garbage_index; - /* the bitmap for the region indicating which entries are allocated - * and which are free */ - struct pmem_bits *bitmap; - /* indicates the region should not be managed with an allocator */ - unsigned no_allocator; + + enum pmem_allocator_type allocator_type; + + int (*allocate)(const int, + const unsigned long, + const unsigned int); + int (*free)(int, int); + int (*free_space)(int, struct pmem_freespace *); + unsigned long (*len)(int, struct pmem_data *); + unsigned long (*start_addr)(int, struct pmem_data *); + int (*kapi_free_index)(const int32_t, int); + + /* actual size of memory element, e.g.: (4 << 10) is 4K */ + unsigned int quantum; + /* indicates maps of this region should be cached, if a mix of * cached and uncached is desired, set this and open the device with * O_SYNC to get an uncached region */ unsigned cached; unsigned buffered; - /* in no_allocator mode the first mapper gets the whole space and sets - * this flag */ - unsigned allocated; + union { + struct { + /* in all_or_nothing allocator mode the first mapper + * gets the whole space and sets this flag */ + unsigned allocated; + } all_or_nothing; + + struct { + /* the buddy allocator bitmap for the region + * indicating which entries are allocated and which + * are free. + */ + + struct pmem_bits *buddy_bitmap; + } buddy_bestfit; + + struct { + unsigned int bitmap_free; /* # of zero bits/quanta */ + uint32_t *bitmap; + int32_t bitmap_allocs; + struct { + short bit; + unsigned short quanta; + } *bitm_alloc; + } bitmap; + + struct { + unsigned long used; /* Bytes currently allocated */ + struct list_head alist; /* List of allocations */ + } system_mem; + } allocator; + + int id; + struct kobject kobj; + /* for debugging, creates a list of pmem file structs, the - * data_list_lock should be taken before pmem_data->sem if both are + * data_list_mutex should be taken before pmem_data->sem if both are * needed */ - struct mutex data_list_lock; + struct mutex data_list_mutex; struct list_head data_list; - /* pmem_sem protects the bitmap array - * a write lock should be held when modifying entries in bitmap - * a read lock should be held when reading data from bits or - * dereferencing a pointer into bitmap - * - * pmem_data->sem protects the pmem data of a particular file - * Many of the function that require the pmem_data->sem have a non- - * locking version for when the caller is already holding that sem. + /* arena_mutex protects the global allocation arena * * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: - * down(pmem_data->sem) => down(bitmap_sem) + * down(pmem_data->sem) => mutex_lock(arena_mutex) */ - struct rw_semaphore bitmap_sem; + struct mutex arena_mutex; long (*ioctl)(struct file *, unsigned int, unsigned long); int (*release)(struct inode *, struct file *); }; +#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id) static struct pmem_info pmem[PMEM_MAX_DEVICES]; static int id_count; +static struct { + const char * const name; + const int memtype; + const int fallback_memtype; + int info_id; +} kapi_memtypes[] = { +#ifdef CONFIG_KERNEL_PMEM_SMI_REGION + { PMEM_KERNEL_SMI_DATA_NAME, + PMEM_MEMTYPE_SMI, + PMEM_MEMTYPE_EBI1, /* Fall back to EBI1 automatically */ + -1 }, +#endif + { PMEM_KERNEL_EBI1_DATA_NAME, + PMEM_MEMTYPE_EBI1, + PMEM_INVALID_MEMTYPE, /* MUST be set invalid if no fallback */ + -1 }, +}; -#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) -#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order -#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) -#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) -#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) -#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) -#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) -#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ - PMEM_LEN(id, index)) -#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) -#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ - PMEM_LEN(id, index)) +#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */ +static struct kset *pmem_kset; + +#define PMEM_IS_FREE_BUDDY(id, index) \ + (!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated)) +#define PMEM_BUDDY_ORDER(id, index) \ + (pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order) +#define PMEM_BUDDY_INDEX(id, index) \ + (index ^ (1 << PMEM_BUDDY_ORDER(id, index))) +#define PMEM_BUDDY_NEXT_INDEX(id, index) \ + (index + (1 << PMEM_BUDDY_ORDER(id, index))) +#define PMEM_OFFSET(index) (index * pmem[id].quantum) +#define PMEM_START_ADDR(id, index) \ + (PMEM_OFFSET(index) + pmem[id].base) +#define PMEM_BUDDY_LEN(id, index) \ + ((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum) +#define PMEM_END_ADDR(id, index) \ + (PMEM_START_ADDR(id, index) + PMEM_LEN(id, index)) +#define PMEM_START_VADDR(id, index) \ + (PMEM_OFFSET(id, index) + pmem[id].vbase) +#define PMEM_END_VADDR(id, index) \ + (PMEM_START_VADDR(id, index) + PMEM_LEN(id, index)) #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) -#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ +#define PMEM_IS_SUBMAP(data) \ + ((data->flags & PMEM_FLAGS_SUBMAP) && \ (!(data->flags & PMEM_FLAGS_UNSUBMAP))) static int pmem_release(struct inode *, struct file *); @@ -182,79 +300,361 @@ struct file_operations pmem_fops = { .unlocked_ioctl = pmem_ioctl, }; +#define PMEM_ATTR(_name, _mode, _show, _store) { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +} + +struct pmem_attr { + struct attribute attr; + ssize_t(*show) (const int id, char * const); + ssize_t(*store) (const int id, const char * const, const size_t count); +}; +#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr) + +#define RW_PMEM_ATTR(name) \ +static struct pmem_attr pmem_attr_## name = \ + PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name) + +#define RO_PMEM_ATTR(name) \ +static struct pmem_attr pmem_attr_## name = \ + PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL) + +#define WO_PMEM_ATTR(name) \ +static struct pmem_attr pmem_attr_## name = \ + PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name) + +static ssize_t show_pmem(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct pmem_attr *a = to_pmem_attr(attr); + return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO; +} + +static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct pmem_attr *a = to_pmem_attr(attr); + return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO; +} + +static struct sysfs_ops pmem_ops = { + .show = show_pmem, + .store = store_pmem, +}; + +static ssize_t show_pmem_base(int id, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n", + pmem[id].base, pmem[id].base); +} +RO_PMEM_ATTR(base); + +static ssize_t show_pmem_size(int id, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n", + pmem[id].size, pmem[id].size); +} +RO_PMEM_ATTR(size); + +static ssize_t show_pmem_allocator_type(int id, char *buf) +{ + switch (pmem[id].allocator_type) { + case PMEM_ALLOCATORTYPE_ALLORNOTHING: + return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing"); + case PMEM_ALLOCATORTYPE_BUDDYBESTFIT: + return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit"); + case PMEM_ALLOCATORTYPE_BITMAP: + return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap"); + case PMEM_ALLOCATORTYPE_SYSTEM: + return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap"); + default: + return scnprintf(buf, PAGE_SIZE, + "??? Invalid allocator type (%d) for this region! " + "Something isn't right.\n", + pmem[id].allocator_type); + } +} +RO_PMEM_ATTR(allocator_type); + +static ssize_t show_pmem_mapped_regions(int id, char *buf) +{ + struct list_head *elt; + int ret; + + ret = scnprintf(buf, PAGE_SIZE, + "pid #: mapped regions (offset, len) (offset,len)...\n"); + + mutex_lock(&pmem[id].data_list_mutex); + list_for_each(elt, &pmem[id].data_list) { + struct pmem_data *data = + list_entry(elt, struct pmem_data, list); + struct list_head *elt2; + + down_read(&data->sem); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:", + data->pid); + list_for_each(elt2, &data->region_list) { + struct pmem_region_node *region_node = list_entry(elt2, + struct pmem_region_node, + list); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "(%lx,%lx) ", + region_node->region.offset, + region_node->region.len); + } + up_read(&data->sem); + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); + } + mutex_unlock(&pmem[id].data_list_mutex); + return ret; +} +RO_PMEM_ATTR(mapped_regions); + +#define PMEM_COMMON_SYSFS_ATTRS \ + &pmem_attr_base.attr, \ + &pmem_attr_size.attr, \ + &pmem_attr_allocator_type.attr, \ + &pmem_attr_mapped_regions.attr + + +static ssize_t show_pmem_allocated(int id, char *buf) +{ + ssize_t ret; + + mutex_lock(&pmem[id].arena_mutex); + ret = scnprintf(buf, PAGE_SIZE, "%s\n", + pmem[id].allocator.all_or_nothing.allocated ? + "is allocated" : "is NOT allocated"); + mutex_unlock(&pmem[id].arena_mutex); + return ret; +} +RO_PMEM_ATTR(allocated); + +static struct attribute *pmem_allornothing_attrs[] = { + PMEM_COMMON_SYSFS_ATTRS, + + &pmem_attr_allocated.attr, + + NULL +}; + +static struct kobj_type pmem_allornothing_ktype = { + .sysfs_ops = &pmem_ops, + .default_attrs = pmem_allornothing_attrs, +}; + +static ssize_t show_pmem_total_entries(int id, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries); +} +RO_PMEM_ATTR(total_entries); + +static ssize_t show_pmem_quantum_size(int id, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n", + pmem[id].quantum, pmem[id].quantum); +} +RO_PMEM_ATTR(quantum_size); + +static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf) +{ + int ret, i; + + mutex_lock(&pmem[id].data_list_mutex); + ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n"); + + for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret); + i = PMEM_BUDDY_NEXT_INDEX(id, i)) + ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n", + i, PMEM_BUDDY_ORDER(id, i), + PMEM_BUDDY_LEN(id, i), + !PMEM_IS_FREE_BUDDY(id, i)); + + mutex_unlock(&pmem[id].data_list_mutex); + return ret; +} +RO_PMEM_ATTR(buddy_bitmap_dump); + +#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \ + &pmem_attr_quantum_size.attr, \ + &pmem_attr_total_entries.attr + +static struct attribute *pmem_buddy_bestfit_attrs[] = { + PMEM_COMMON_SYSFS_ATTRS, + + PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS, + + &pmem_attr_buddy_bitmap_dump.attr, + + NULL +}; + +static struct kobj_type pmem_buddy_bestfit_ktype = { + .sysfs_ops = &pmem_ops, + .default_attrs = pmem_buddy_bestfit_attrs, +}; + +static ssize_t show_pmem_free_quanta(int id, char *buf) +{ + ssize_t ret; + + mutex_lock(&pmem[id].arena_mutex); + ret = scnprintf(buf, PAGE_SIZE, "%u\n", + pmem[id].allocator.bitmap.bitmap_free); + mutex_unlock(&pmem[id].arena_mutex); + return ret; +} +RO_PMEM_ATTR(free_quanta); + +static ssize_t show_pmem_bits_allocated(int id, char *buf) +{ + ssize_t ret; + unsigned int i; + + mutex_lock(&pmem[id].arena_mutex); + + ret = scnprintf(buf, PAGE_SIZE, + "id: %d\nbitnum\tindex\tquanta allocated\n", id); + + for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) + if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1) + ret += scnprintf(buf + ret, PAGE_SIZE - ret, + "%u\t%u\t%u\n", + i, + pmem[id].allocator.bitmap.bitm_alloc[i].bit, + pmem[id].allocator.bitmap.bitm_alloc[i].quanta + ); + + mutex_unlock(&pmem[id].arena_mutex); + return ret; +} +RO_PMEM_ATTR(bits_allocated); + +static struct attribute *pmem_bitmap_attrs[] = { + PMEM_COMMON_SYSFS_ATTRS, + + PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS, + + &pmem_attr_free_quanta.attr, + &pmem_attr_bits_allocated.attr, + + NULL +}; + +static struct attribute *pmem_system_attrs[] = { + PMEM_COMMON_SYSFS_ATTRS, + + NULL +}; + +static struct kobj_type pmem_bitmap_ktype = { + .sysfs_ops = &pmem_ops, + .default_attrs = pmem_bitmap_attrs, +}; + +static struct kobj_type pmem_system_ktype = { + .sysfs_ops = &pmem_ops, + .default_attrs = pmem_system_attrs, +}; + static int get_id(struct file *file) { return MINOR(file->f_dentry->d_inode->i_rdev); } -int is_pmem_file(struct file *file) +static char *get_name(struct file *file) +{ + int id = get_id(file); + return pmem[id].name; +} + +static int is_pmem_file(struct file *file) { int id; if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) return 0; + id = get_id(file); - if (unlikely(id >= PMEM_MAX_DEVICES)) - return 0; - if (unlikely(file->f_dentry->d_inode->i_rdev != - MKDEV(MISC_MAJOR, pmem[id].dev.minor))) - return 0; - return 1; + return (unlikely(id >= PMEM_MAX_DEVICES || + file->f_dentry->d_inode->i_rdev != + MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1; } static int has_allocation(struct file *file) { - struct pmem_data *data; - /* check is_pmem_file first if not accessed via pmem_file_ops */ - - if (unlikely(!file->private_data)) - return 0; - data = (struct pmem_data *)file->private_data; - if (unlikely(data->index < 0)) - return 0; - return 1; + /* must be called with at least read lock held on + * ((struct pmem_data *)(file->private_data))->sem which + * means that file is guaranteed not to be NULL upon entry!! + * check is_pmem_file first if not accessed via pmem_file_ops */ + struct pmem_data *pdata = file->private_data; + return pdata && pdata->index != -1; } static int is_master_owner(struct file *file) { struct file *master_file; - struct pmem_data *data; + struct pmem_data *data = file->private_data; int put_needed, ret = 0; - if (!is_pmem_file(file) || !has_allocation(file)) + if (!has_allocation(file)) return 0; - data = (struct pmem_data *)file->private_data; if (PMEM_FLAGS_MASTERMAP & data->flags) return 1; master_file = fget_light(data->master_fd, &put_needed); if (master_file && data->master_file == master_file) ret = 1; - fput_light(master_file, put_needed); + if (master_file) + fput_light(master_file, put_needed); return ret; } -static int pmem_free(int id, int index) +static int pmem_free_all_or_nothing(int id, int index) { - /* caller should hold the write lock on pmem_sem! */ - int buddy, curr = index; + /* caller should hold the lock on arena_mutex! */ DLOG("index %d\n", index); - if (pmem[id].no_allocator) { - pmem[id].allocated = 0; - return 0; - } + pmem[id].allocator.all_or_nothing.allocated = 0; + return 0; +} + +static int pmem_free_space_all_or_nothing(int id, + struct pmem_freespace *fs) +{ + /* caller should hold the lock on arena_mutex! */ + fs->total = (unsigned long) + pmem[id].allocator.all_or_nothing.allocated == 0 ? + pmem[id].size : 0; + + fs->largest = fs->total; + return 0; +} + + +static int pmem_free_buddy_bestfit(int id, int index) +{ + /* caller should hold the lock on arena_mutex! */ + int curr = index; + DLOG("index %d\n", index); + + /* clean up the bitmap, merging any buddies */ - pmem[id].bitmap[curr].allocated = 0; + pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0; /* find a slots buddy Buddy# = Slot# ^ (1 << order) * if the buddy is also free merge them * repeat until the buddy is not free or end of the bitmap is reached */ do { - buddy = PMEM_BUDDY_INDEX(id, curr); - if (PMEM_IS_FREE(id, buddy) && - PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { - PMEM_ORDER(id, buddy)++; - PMEM_ORDER(id, curr)++; + int buddy = PMEM_BUDDY_INDEX(id, curr); + if (buddy < pmem[id].num_entries && + PMEM_IS_FREE_BUDDY(id, buddy) && + PMEM_BUDDY_ORDER(id, buddy) == + PMEM_BUDDY_ORDER(id, curr)) { + PMEM_BUDDY_ORDER(id, buddy)++; + PMEM_BUDDY_ORDER(id, curr)++; curr = min(buddy, curr); } else { break; @@ -264,43 +664,222 @@ static int pmem_free(int id, int index) return 0; } + +static int pmem_free_space_buddy_bestfit(int id, + struct pmem_freespace *fs) +{ + /* caller should hold the lock on arena_mutex! */ + int curr; + unsigned long size; + fs->total = 0; + fs->largest = 0; + + for (curr = 0; curr < pmem[id].num_entries; + curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) { + if (PMEM_IS_FREE_BUDDY(id, curr)) { + size = PMEM_BUDDY_LEN(id, curr); + if (size > fs->largest) + fs->largest = size; + fs->total += size; + } + } + return 0; +} + + +static inline uint32_t start_mask(int bit_start) +{ + return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK); +} + +static inline uint32_t end_mask(int bit_end) +{ + return (uint32_t)(~0) >> + ((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK); +} + +static inline int compute_total_words(int bit_end, int word_index) +{ + return ((bit_end + BITS_PER_LONG - 1) >> + PMEM_32BIT_WORD_ORDER) - word_index; +} + +static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end) +{ + int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words; + + total_words = compute_total_words(bit_end, word_index); + if (total_words > 0) { + if (total_words == 1) { + bitp[word_index] &= + ~(start_mask(bit_start) & end_mask(bit_end)); + } else { + bitp[word_index++] &= ~start_mask(bit_start); + if (total_words > 2) { + int total_bytes; + + total_words -= 2; + total_bytes = total_words << 2; + + memset(&bitp[word_index], 0, total_bytes); + word_index += total_words; + } + bitp[word_index] &= ~end_mask(bit_end); + } + } +} + +static int pmem_free_bitmap(int id, int bitnum) +{ + /* caller should hold the lock on arena_mutex! */ + int i; + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; + + DLOG("bitnum %d\n", bitnum); + + for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) { + const int curr_bit = + pmem[id].allocator.bitmap.bitm_alloc[i].bit; + + if (curr_bit == bitnum) { + const int curr_quanta = + pmem[id].allocator.bitmap.bitm_alloc[i].quanta; + + bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap, + curr_bit, curr_bit + curr_quanta); + pmem[id].allocator.bitmap.bitmap_free += curr_quanta; + pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1; + pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0; + return 0; + } + } + printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id" + " %d, pid %d(%s)\n", __func__, bitnum, id, current->pid, + get_task_comm(currtask_name, current)); + + return -1; +} + +static int pmem_free_system(int id, int index) +{ + /* caller should hold the lock on arena_mutex! */ + struct alloc_list *item; + + DLOG("index %d\n", index); + if (index != 0) + item = (struct alloc_list *)index; + else + return 0; + + if (item->vaddr != NULL) { + iounmap(item->vaddr); + kfree(__va(item->addr)); + list_del(&item->allocs); + kfree(item); + } + + return 0; +} + +static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs) +{ + int i, j; + int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs; + int alloc_start = 0; + int next_alloc; + unsigned long size = 0; + + fs->total = 0; + fs->largest = 0; + + for (i = 0; i < max_allocs; i++) { + + int alloc_quanta = 0; + int alloc_idx = 0; + next_alloc = pmem[id].num_entries; + + /* Look for the lowest bit where next allocation starts */ + for (j = 0; j < max_allocs; j++) { + const int curr_alloc = pmem[id].allocator. + bitmap.bitm_alloc[j].bit; + if (curr_alloc != -1) { + if (alloc_start == curr_alloc) + alloc_idx = j; + if (alloc_start >= curr_alloc) + continue; + if (curr_alloc < next_alloc) + next_alloc = curr_alloc; + } + } + alloc_quanta = pmem[id].allocator.bitmap. + bitm_alloc[alloc_idx].quanta; + size = (next_alloc - (alloc_start + alloc_quanta)) * + pmem[id].quantum; + + if (size > fs->largest) + fs->largest = size; + fs->total += size; + + if (next_alloc == pmem[id].num_entries) + break; + else + alloc_start = next_alloc; + } + + return 0; +} + +static int pmem_free_space_system(int id, struct pmem_freespace *fs) +{ + fs->total = pmem[id].size; + fs->largest = pmem[id].size; + + return 0; +} + static void pmem_revoke(struct file *file, struct pmem_data *data); static int pmem_release(struct inode *inode, struct file *file) { - struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_data *data = file->private_data; struct pmem_region_node *region_node; struct list_head *elt, *elt2; int id = get_id(file), ret = 0; - - mutex_lock(&pmem[id].data_list_lock); +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n", + current->pid, get_task_comm(currtask_name, current), + file, file_count(file), get_name(file), id); + mutex_lock(&pmem[id].data_list_mutex); /* if this file is a master, revoke all the memory in the connected * files */ if (PMEM_FLAGS_MASTERMAP & data->flags) { - struct pmem_data *sub_data; list_for_each(elt, &pmem[id].data_list) { - sub_data = list_entry(elt, struct pmem_data, list); + struct pmem_data *sub_data = + list_entry(elt, struct pmem_data, list); + int is_master; + down_read(&sub_data->sem); - if (PMEM_IS_SUBMAP(sub_data) && - file == sub_data->master_file) { - up_read(&sub_data->sem); + is_master = (PMEM_IS_SUBMAP(sub_data) && + file == sub_data->master_file); + up_read(&sub_data->sem); + + if (is_master) pmem_revoke(file, sub_data); - } else - up_read(&sub_data->sem); } } list_del(&data->list); - mutex_unlock(&pmem[id].data_list_lock); - + mutex_unlock(&pmem[id].data_list_mutex); down_write(&data->sem); - /* if its not a conencted file and it has an allocation, free it */ + /* if it is not a connected file and it has an allocation, free it */ if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { - down_write(&pmem[id].bitmap_sem); - ret = pmem_free(id, data->index); - up_write(&pmem[id].bitmap_sem); + mutex_lock(&pmem[id].arena_mutex); + ret = pmem[id].free(id, data->index); + mutex_unlock(&pmem[id].arena_mutex); } /* if this file is a submap (mapped, connected file), downref the @@ -333,15 +912,19 @@ static int pmem_open(struct inode *inode, struct file *file) struct pmem_data *data; int id = get_id(file); int ret = 0; +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif - DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); - /* setup file->private_data to indicate its unmapped */ - /* you can only open a pmem device one time */ - if (file->private_data != NULL) - return -1; + if (pmem[id].memory_state == MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED) + return -ENODEV; + DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n", + current->pid, get_task_comm(currtask_name, current), + file, file_count(file), get_name(file), id); data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); if (!data) { - printk("pmem: unable to allocate memory for pmem metadata."); + printk(KERN_ALERT "pmem: %s: unable to allocate memory for " + "pmem metadata.", __func__); return -1; } data->flags = 0; @@ -359,17 +942,17 @@ static int pmem_open(struct inode *inode, struct file *file) file->private_data = data; INIT_LIST_HEAD(&data->list); - mutex_lock(&pmem[id].data_list_lock); + mutex_lock(&pmem[id].data_list_mutex); list_add(&data->list, &pmem[id].data_list); - mutex_unlock(&pmem[id].data_list_lock); + mutex_unlock(&pmem[id].data_list_mutex); return ret; } -static unsigned long pmem_order(unsigned long len) +static unsigned long pmem_order(unsigned long len, int id) { int i; - len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; + len = (len + pmem[id].quantum - 1)/pmem[id].quantum; len--; for (i = 0; i < sizeof(len)*8; i++) if (len >> i == 0) @@ -377,75 +960,386 @@ static unsigned long pmem_order(unsigned long len) return i; } -static int pmem_allocate(int id, unsigned long len) +static int pmem_allocator_all_or_nothing(const int id, + const unsigned long len, + const unsigned int align) { - /* caller should hold the write lock on pmem_sem! */ - /* return the corresponding pdata[] entry */ - int curr = 0; - int end = pmem[id].num_entries; - int best_fit = -1; - unsigned long order = pmem_order(len); + /* caller should hold the lock on arena_mutex! */ + DLOG("all or nothing\n"); + if ((len > pmem[id].size) || + pmem[id].allocator.all_or_nothing.allocated) + return -1; + pmem[id].allocator.all_or_nothing.allocated = 1; + return len; +} - if (pmem[id].no_allocator) { - DLOG("no allocator"); - if ((len > pmem[id].size) || pmem[id].allocated) - return -1; - pmem[id].allocated = 1; - return len; - } +static int pmem_allocator_buddy_bestfit(const int id, + const unsigned long len, + unsigned int align) +{ + /* caller should hold the lock on arena_mutex! */ + int curr; + int best_fit = -1; + unsigned long order; + DLOG("buddy bestfit\n"); + order = pmem_order(len, id); if (order > PMEM_MAX_ORDER) - return -1; + goto out; + DLOG("order %lx\n", order); - /* look through the bitmap: - * if you find a free slot of the correct order use it - * otherwise, use the best fit (smallest with size > order) slot + /* Look through the bitmap. + * If a free slot of the correct order is found, use it. + * Otherwise, use the best fit (smallest with size > order) slot. */ - while (curr < end) { - if (PMEM_IS_FREE(id, curr)) { - if (PMEM_ORDER(id, curr) == (unsigned char)order) { + for (curr = 0; + curr < pmem[id].num_entries; + curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) + if (PMEM_IS_FREE_BUDDY(id, curr)) { + if (PMEM_BUDDY_ORDER(id, curr) == + (unsigned char)order) { /* set the not free bit and clear others */ best_fit = curr; break; } - if (PMEM_ORDER(id, curr) > (unsigned char)order && + if (PMEM_BUDDY_ORDER(id, curr) > + (unsigned char)order && (best_fit < 0 || - PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) + PMEM_BUDDY_ORDER(id, curr) < + PMEM_BUDDY_ORDER(id, best_fit))) best_fit = curr; } - curr = PMEM_NEXT_INDEX(id, curr); - } - /* if best_fit < 0, there are no suitable slots, - * return an error - */ + /* if best_fit < 0, there are no suitable slots; return an error */ if (best_fit < 0) { - printk("pmem: no space left to allocate!\n"); - return -1; +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: %s: no space left to allocate!\n", + __func__); +#endif + goto out; } /* now partition the best fit: * split the slot into 2 buddies of order - 1 * repeat until the slot is of the correct order */ - while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { + while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) { int buddy; - PMEM_ORDER(id, best_fit) -= 1; + PMEM_BUDDY_ORDER(id, best_fit) -= 1; buddy = PMEM_BUDDY_INDEX(id, best_fit); - PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); + PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit); } - pmem[id].bitmap[best_fit].allocated = 1; + pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1; +out: return best_fit; } -static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot) + +static inline unsigned long paddr_from_bit(const int id, const int bitnum) { - int id = get_id(file); -#ifdef pgprot_noncached - if (pmem[id].cached == 0 || file->f_flags & O_SYNC) - return pgprot_noncached(vma_prot); -#endif + return pmem[id].base + pmem[id].quantum * bitnum; +} + +static inline unsigned long bit_from_paddr(const int id, + const unsigned long paddr) +{ + return (paddr - pmem[id].base) / pmem[id].quantum; +} + +static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end) +{ + int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words; + + total_words = compute_total_words(bit_end, word_index); + if (total_words > 0) { + if (total_words == 1) { + bitp[word_index] |= + (start_mask(bit_start) & end_mask(bit_end)); + } else { + bitp[word_index++] |= start_mask(bit_start); + if (total_words > 2) { + int total_bytes; + + total_words -= 2; + total_bytes = total_words << 2; + + memset(&bitp[word_index], ~0, total_bytes); + word_index += total_words; + } + bitp[word_index] |= end_mask(bit_end); + } + } +} + +static int +bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc, + int total_bits, int spacing) +{ + int bit_start, last_bit, word_index; + + if (num_bits_to_alloc <= 0) + return -1; + + for (bit_start = 0; ; + bit_start = (last_bit + + (word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1) + & ~(spacing - 1)) { + int bit_end = bit_start + num_bits_to_alloc, total_words; + + if (bit_end > total_bits) + return -1; /* out of contiguous memory */ + + word_index = bit_start >> PMEM_32BIT_WORD_ORDER; + total_words = compute_total_words(bit_end, word_index); + + if (total_words <= 0) + return -1; + + if (total_words == 1) { + last_bit = fls(bitp[word_index] & + (start_mask(bit_start) & + end_mask(bit_end))); + if (last_bit) + continue; + } else { + int end_word = word_index + (total_words - 1); + last_bit = + fls(bitp[word_index] & start_mask(bit_start)); + if (last_bit) + continue; + + for (word_index++; + word_index < end_word; + word_index++) { + last_bit = fls(bitp[word_index]); + if (last_bit) + break; + } + if (last_bit) + continue; + + last_bit = fls(bitp[word_index] & end_mask(bit_end)); + if (last_bit) + continue; + } + bitmap_bits_set_all(bitp, bit_start, bit_end); + return bit_start; + } + return -1; +} + +static int reserve_quanta(const unsigned int quanta_needed, + const int id, + unsigned int align) +{ + /* alignment should be a valid power of 2 */ + int ret = -1, start_bit = 0, spacing = 1; + + /* Sanity check */ + if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) { +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: %s: request (%d) too big for" + " available free (%d)\n", __func__, quanta_needed, + pmem[id].allocator.bitmap.bitmap_free); +#endif + return -1; + } + + start_bit = bit_from_paddr(id, + (pmem[id].base + align - 1) & ~(align - 1)); + if (start_bit <= -1) { +#if PMEM_DEBUG + printk(KERN_ALERT + "pmem: %s: bit_from_paddr fails for" + " %u alignment.\n", __func__, align); +#endif + return -1; + } + spacing = align / pmem[id].quantum; + spacing = spacing > 1 ? spacing : 1; + + ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap, + quanta_needed, + (pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum, + spacing); + +#if PMEM_DEBUG + if (ret < 0) + printk(KERN_ALERT "pmem: %s: not enough contiguous bits free " + "in bitmap! Region memory is either too fragmented or" + " request is too large for available memory.\n", + __func__); +#endif + + return ret; +} + +static int pmem_allocator_bitmap(const int id, + const unsigned long len, + const unsigned int align) +{ + /* caller should hold the lock on arena_mutex! */ + int bitnum, i; + unsigned int quanta_needed; + + DLOG("bitmap id %d, len %ld, align %u\n", id, len, align); + if (!pmem[id].allocator.bitmap.bitm_alloc) { +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n", + id); +#endif + return -1; + } + + quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum; + DLOG("quantum size %u quanta needed %u free %u id %d\n", + pmem[id].quantum, quanta_needed, + pmem[id].allocator.bitmap.bitmap_free, id); + + if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) { +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: memory allocation failure. " + "PMEM memory region exhausted, id %d." + " Unable to comply with allocation request.\n", id); +#endif + return -1; + } + + bitnum = reserve_quanta(quanta_needed, id, align); + if (bitnum == -1) + goto leave; + + for (i = 0; + i < pmem[id].allocator.bitmap.bitmap_allocs && + pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1; + i++) + ; + + if (i >= pmem[id].allocator.bitmap.bitmap_allocs) { + void *temp; + int32_t new_bitmap_allocs = + pmem[id].allocator.bitmap.bitmap_allocs << 1; + int j; + + if (!new_bitmap_allocs) { /* failed sanity check!! */ +#if PMEM_DEBUG + pr_alert("pmem: bitmap_allocs number" + " wrapped around to zero! Something " + "is VERY wrong.\n"); +#endif + return -1; + } + + if (new_bitmap_allocs > pmem[id].num_entries) { + /* failed sanity check!! */ +#if PMEM_DEBUG + pr_alert("pmem: required bitmap_allocs" + " number exceeds maximum entries possible" + " for current quanta\n"); +#endif + return -1; + } + + temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc, + new_bitmap_allocs * + sizeof(*pmem[id].allocator.bitmap.bitm_alloc), + GFP_KERNEL); + if (!temp) { +#if PMEM_DEBUG + pr_alert("pmem: can't realloc bitmap_allocs," + "id %d, current num bitmap allocs %d\n", + id, pmem[id].allocator.bitmap.bitmap_allocs); +#endif + return -1; + } + pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs; + pmem[id].allocator.bitmap.bitm_alloc = temp; + + for (j = i; j < new_bitmap_allocs; j++) { + pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1; + pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0; + } + + DLOG("increased # of allocated regions to %d for id %d\n", + pmem[id].allocator.bitmap.bitmap_allocs, id); + } + + DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i); + + pmem[id].allocator.bitmap.bitmap_free -= quanta_needed; + pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum; + pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed; +leave: + return bitnum; +} + +static int pmem_allocator_system(const int id, + const unsigned long len, + const unsigned int align) +{ + /* caller should hold the lock on arena_mutex! */ + struct alloc_list *list; + unsigned long aligned_len; + int count = SYSTEM_ALLOC_RETRY; + void *buf; + + DLOG("system id %d, len %ld, align %u\n", id, len, align); + + if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) { + DLOG("requested size would be larger than quota\n"); + return -1; + } + + /* Handle alignment */ + aligned_len = len + align; + + /* Attempt allocation */ + list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL); + if (list == NULL) { + printk(KERN_ERR "pmem: failed to allocate system metadata\n"); + return -1; + } + list->vaddr = NULL; + + buf = NULL; + while ((buf == NULL) && count--) { + buf = kmalloc((aligned_len), GFP_KERNEL); + if (buf == NULL) { + DLOG("pmem: kmalloc %d temporarily failed len= %ld\n", + count, aligned_len); + } + } + if (!buf) { + printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n", + id, aligned_len); + kfree(list); + return -1; + } + list->size = aligned_len; + list->addr = (void *)__pa(buf); + list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) & + ~(align - 1)); + + if (!pmem[id].cached) + list->vaddr = ioremap(__pa(buf), aligned_len); + else + list->vaddr = ioremap_cached(__pa(buf), aligned_len); + + INIT_LIST_HEAD(&list->allocs); + list_add(&list->allocs, &pmem[id].allocator.system_mem.alist); + + return (int)list; +} + +static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot) +{ + int id = get_id(file); +#ifdef pgprot_writecombine + if (pmem[id].cached == 0 || file->f_flags & O_SYNC) + /* on ARMv6 and ARMv7 this expands to Normal Noncached */ + return pgprot_writecombine(vma_prot); +#endif #ifdef pgprot_ext_buffered else if (pmem[id].buffered) return pgprot_ext_buffered(vma_prot); @@ -453,26 +1347,80 @@ static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot) return vma_prot; } -static unsigned long pmem_start_addr(int id, struct pmem_data *data) +static unsigned long pmem_start_addr_all_or_nothing(int id, + struct pmem_data *data) { - if (pmem[id].no_allocator) - return PMEM_START_ADDR(id, 0); - else - return PMEM_START_ADDR(id, data->index); + return PMEM_START_ADDR(id, 0); +} +static unsigned long pmem_start_addr_buddy_bestfit(int id, + struct pmem_data *data) +{ + return PMEM_START_ADDR(id, data->index); } -static void *pmem_start_vaddr(int id, struct pmem_data *data) +static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data) +{ + return data->index * pmem[id].quantum + pmem[id].base; +} + +static unsigned long pmem_start_addr_system(int id, struct pmem_data *data) { - return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; + return (unsigned long)(((struct alloc_list *)(data->index))->aaddr); } -static unsigned long pmem_len(int id, struct pmem_data *data) +static void *pmem_start_vaddr(int id, struct pmem_data *data) { - if (pmem[id].no_allocator) - return data->index; + if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) + return ((struct alloc_list *)(data->index))->vaddr; else - return PMEM_LEN(id, data->index); + return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase; +} + +static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data) +{ + return data->index; +} + +static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data) +{ + return PMEM_BUDDY_LEN(id, data->index); +} + +static unsigned long pmem_len_bitmap(int id, struct pmem_data *data) +{ + int i; + unsigned long ret = 0; + + mutex_lock(&pmem[id].arena_mutex); + + for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) + if (pmem[id].allocator.bitmap.bitm_alloc[i].bit == + data->index) { + ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta * + pmem[id].quantum; + break; + } + + mutex_unlock(&pmem[id].arena_mutex); +#if PMEM_DEBUG + if (i >= pmem[id].allocator.bitmap.bitmap_allocs) + pr_alert("pmem: %s: can't find bitnum %d in " + "alloc'd array!\n", __func__, data->index); +#endif + return ret; +} + +static unsigned long pmem_len_system(int id, struct pmem_data *data) +{ + unsigned long ret = 0; + + mutex_lock(&pmem[id].arena_mutex); + + ret = ((struct alloc_list *)data->index)->size; + mutex_unlock(&pmem[id].arena_mutex); + + return ret; } static int pmem_map_garbage(int id, struct vm_area_struct *vma, @@ -509,18 +1457,25 @@ static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, struct pmem_data *data, unsigned long offset, unsigned long len) { + int ret; DLOG("map offset %lx len %lx\n", offset, len); BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); - if (io_remap_pfn_range(vma, vma->vm_start + offset, - (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, - len, vma->vm_page_prot)) { - return -EAGAIN; + ret = io_remap_pfn_range(vma, vma->vm_start + offset, + (pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT, + len, vma->vm_page_prot); + if (ret) { +#if PMEM_DEBUG + pr_alert("pmem: %s: io_remap_pfn_range fails with " + "return value: %d!\n", __func__, ret); +#endif + + ret = -EAGAIN; } - return 0; + return ret; } static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, @@ -538,13 +1493,21 @@ static void pmem_vma_open(struct vm_area_struct *vma) struct file *file = vma->vm_file; struct pmem_data *data = file->private_data; int id = get_id(file); + +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n", + get_name(file), id, current->pid, + get_task_comm(currtask_name, current), + current->parent->pid, file, file_count(file)); /* this should never be called as we don't support copying pmem * ranges via fork */ + down_read(&data->sem); BUG_ON(!has_allocation(file)); - down_write(&data->sem); /* remap the garbage pages, forkers don't get access to the data */ pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); - up_write(&data->sem); + up_read(&data->sem); } static void pmem_vma_close(struct vm_area_struct *vma) @@ -552,15 +1515,29 @@ static void pmem_vma_close(struct vm_area_struct *vma) struct file *file = vma->vm_file; struct pmem_data *data = file->private_data; - DLOG("current %u ppid %u file %p count %d\n", current->pid, - current->parent->pid, file, file_count(file)); - if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { - printk(KERN_WARNING "pmem: something is very wrong, you are " +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n", + get_name(file), get_id(file), current->pid, + get_task_comm(currtask_name, current), + current->parent->pid, file, file_count(file)); + + if (unlikely(!is_pmem_file(file))) { + pr_warning("pmem: something is very wrong, you are " "closing a vm backing an allocation that doesn't " "exist!\n"); return; } + down_write(&data->sem); + if (unlikely(!has_allocation(file))) { + up_write(&data->sem); + pr_warning("pmem: something is very wrong, you are " + "closing a vm backing an allocation that doesn't " + "exist!\n"); + return; + } if (data->vma == vma) { data->vma = NULL; if ((data->flags & PMEM_FLAGS_CONNECTED) && @@ -578,64 +1555,79 @@ static struct vm_operations_struct vm_ops = { static int pmem_mmap(struct file *file, struct vm_area_struct *vma) { - struct pmem_data *data; + struct pmem_data *data = file->private_data; int index; unsigned long vma_size = vma->vm_end - vma->vm_start; int ret = 0, id = get_id(file); + if (!data) { + pr_err("pmem: Invalid file descriptor, no private data\n"); + return -EINVAL; + } +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid, + get_task_comm(currtask_name, current), vma_size, + get_name(file), id); if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { #if PMEM_DEBUG - printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" + pr_err("pmem: mmaps must be at offset zero, aligned" " and a multiple of pages_size.\n"); #endif return -EINVAL; } - data = (struct pmem_data *)file->private_data; down_write(&data->sem); /* check this file isn't already mmaped, for submaps check this file * has never been mmaped */ - if ((data->flags & PMEM_FLAGS_SUBMAP) || + if ((data->flags & PMEM_FLAGS_MASTERMAP) || + (data->flags & PMEM_FLAGS_SUBMAP) || (data->flags & PMEM_FLAGS_UNSUBMAP)) { #if PMEM_DEBUG - printk(KERN_ERR "pmem: you can only mmap a pmem file once, " + pr_err("pmem: you can only mmap a pmem file once, " "this file is already mmaped. %x\n", data->flags); #endif ret = -EINVAL; goto error; } /* if file->private_data == unalloced, alloc*/ - if (data && data->index == -1) { - down_write(&pmem[id].bitmap_sem); - index = pmem_allocate(id, vma->vm_end - vma->vm_start); - up_write(&pmem[id].bitmap_sem); + if (data->index == -1) { + mutex_lock(&pmem[id].arena_mutex); + index = pmem[id].allocate(id, + vma->vm_end - vma->vm_start, + SZ_4K); + mutex_unlock(&pmem[id].arena_mutex); + /* either no space was available or an error occured */ + if (index == -1) { + pr_err("pmem: mmap unable to allocate memory" + "on %s\n", get_name(file)); + ret = -ENOMEM; + goto error; + } + /* store the index of a successful allocation */ data->index = index; } - /* either no space was available or an error occured */ - if (!has_allocation(file)) { - ret = -EINVAL; - printk("pmem: could not find allocation for map.\n"); - goto error; - } - if (pmem_len(id, data) < vma_size) { + if (pmem[id].len(id, data) < vma_size) { #if PMEM_DEBUG - printk(KERN_WARNING "pmem: mmap size [%lu] does not match" - "size of backing region [%lu].\n", vma_size, - pmem_len(id, data)); + pr_err("pmem: mmap size [%lu] does not match" + " size of backing region [%lu].\n", vma_size, + pmem[id].len(id, data)); #endif ret = -EINVAL; goto error; } - vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; - vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot); + vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT; + + vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot); if (data->flags & PMEM_FLAGS_CONNECTED) { struct pmem_region_node *region_node; struct list_head *elt; if (pmem_map_garbage(id, vma, data, 0, vma_size)) { - printk("pmem: mmap failed in kernel!\n"); + pr_alert("pmem: mmap failed in kernel!\n"); ret = -EAGAIN; goto error; } @@ -663,7 +1655,7 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma) current->pid); } else { if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { - printk(KERN_INFO "pmem: mmap failed in kernel!\n"); + pr_err("pmem: mmap failed in kernel!\n"); ret = -EAGAIN; goto error; } @@ -681,103 +1673,155 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma) int get_pmem_user_addr(struct file *file, unsigned long *start, unsigned long *len) { - struct pmem_data *data; - if (!is_pmem_file(file) || !has_allocation(file)) { + int ret = -1; + + if (is_pmem_file(file)) { + struct pmem_data *data = file->private_data; + + down_read(&data->sem); + if (has_allocation(file)) { + if (data->vma) { + *start = data->vma->vm_start; + *len = data->vma->vm_end - data->vma->vm_start; + } else { + *start = *len = 0; #if PMEM_DEBUG - printk(KERN_INFO "pmem: requested pmem data from invalid" - "file.\n"); + pr_err("pmem: %s: no vma present.\n", + __func__); #endif - return -1; - } - data = (struct pmem_data *)file->private_data; - down_read(&data->sem); - if (data->vma) { - *start = data->vma->vm_start; - *len = data->vma->vm_end - data->vma->vm_start; - } else { - *start = 0; - *len = 0; + } + ret = 0; + } + up_read(&data->sem); } - up_read(&data->sem); - return 0; + +#if PMEM_DEBUG + if (ret) + pr_err("pmem: %s: requested pmem data from invalid" + "file.\n", __func__); +#endif + return ret; } int get_pmem_addr(struct file *file, unsigned long *start, unsigned long *vstart, unsigned long *len) { - struct pmem_data *data; - int id; + int ret = -1; - if (!is_pmem_file(file) || !has_allocation(file)) { - return -1; - } + if (is_pmem_file(file)) { + struct pmem_data *data = file->private_data; - data = (struct pmem_data *)file->private_data; - if (data->index == -1) { -#if PMEM_DEBUG - printk(KERN_INFO "pmem: requested pmem data from file with no " - "allocation.\n"); - return -1; -#endif - } - id = get_id(file); + down_read(&data->sem); + if (has_allocation(file)) { + int id = get_id(file); - down_read(&data->sem); - *start = pmem_start_addr(id, data); - *len = pmem_len(id, data); - *vstart = (unsigned long)pmem_start_vaddr(id, data); - up_read(&data->sem); + *start = pmem[id].start_addr(id, data); + *len = pmem[id].len(id, data); + *vstart = (unsigned long) + pmem_start_vaddr(id, data); + up_read(&data->sem); #if PMEM_DEBUG - down_write(&data->sem); - data->ref++; - up_write(&data->sem); + down_write(&data->sem); + data->ref++; + up_write(&data->sem); #endif - return 0; + DLOG("returning start %#lx len %lu " + "vstart %#lx\n", + *start, *len, *vstart); + ret = 0; + } else { + up_read(&data->sem); + } + } + return ret; } -int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, +int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart, unsigned long *len, struct file **filp) { - struct file *file; + int ret = -1; + struct file *file = fget(fd); - file = fget(fd); if (unlikely(file == NULL)) { - printk(KERN_INFO "pmem: requested data from file descriptor " - "that doesn't exist."); - return -1; + pr_err("pmem: %s: requested data from file " + "descriptor that doesn't exist.\n", __func__); + } else { +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)" + " dev %s(id: %d)\n", filp, + file->f_dentry->d_inode->i_rdev, + current->pid, get_task_comm(currtask_name, current), + file, file_count(file), get_name(file), get_id(file)); + + if (!get_pmem_addr(file, start, vstart, len)) { + if (filp) + *filp = file; + ret = 0; + } else { + fput(file); + } } + return ret; +} +EXPORT_SYMBOL(get_pmem_file); - if (get_pmem_addr(file, start, vstart, len)) - goto end; - - if (filp) - *filp = file; - return 0; -end: - fput(file); - return -1; +int get_pmem_fd(int fd, unsigned long *start, unsigned long *len) +{ + unsigned long vstart; + return get_pmem_file(fd, start, &vstart, len, NULL); } +EXPORT_SYMBOL(get_pmem_fd); void put_pmem_file(struct file *file) { - struct pmem_data *data; - int id; - - if (!is_pmem_file(file)) - return; - id = get_id(file); - data = (struct pmem_data *)file->private_data; +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n", + file->f_dentry->d_inode->i_rdev, current->pid, + get_task_comm(currtask_name, current), file, + file_count(file), get_name(file), get_id(file)); + if (is_pmem_file(file)) { #if PMEM_DEBUG - down_write(&data->sem); - if (data->ref == 0) { - printk("pmem: pmem_put > pmem_get %s (pid %d)\n", - pmem[id].dev.name, data->pid); - BUG(); - } - data->ref--; - up_write(&data->sem); + struct pmem_data *data = file->private_data; + + down_write(&data->sem); + if (!data->ref--) { + data->ref++; + pr_alert("pmem: pmem_put > pmem_get %s " + "(pid %d)\n", + pmem[get_id(file)].dev.name, data->pid); + BUG(); + } + up_write(&data->sem); #endif - fput(file); + fput(file); + } +} +EXPORT_SYMBOL(put_pmem_file); + +void put_pmem_fd(int fd) +{ + int put_needed; + struct file *file = fget_light(fd, &put_needed); + + if (file) { + put_pmem_file(file); + fput_light(file, put_needed); + } +} + +void flush_pmem_fd(int fd, unsigned long offset, unsigned long len) +{ + int fput_needed; + struct file *file = fget_light(fd, &fput_needed); + + if (file) { + flush_pmem_file(file, offset, len); + fput_light(file, fput_needed); + } } void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) @@ -788,21 +1832,50 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) struct pmem_region_node *region_node; struct list_head *elt; void *flush_start, *flush_end; - - if (!is_pmem_file(file) || !has_allocation(file)) { +#ifdef CONFIG_OUTER_CACHE + unsigned long phy_start, phy_end; +#endif + if (!is_pmem_file(file)) return; - } id = get_id(file); - data = (struct pmem_data *)file->private_data; - if (!pmem[id].cached || file->f_flags & O_SYNC) + if (!pmem[id].cached) return; + /* is_pmem_file fails if !file */ + data = file->private_data; + down_read(&data->sem); + if (!has_allocation(file)) + goto end; + vaddr = pmem_start_vaddr(id, data); + + if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) { + dmac_flush_range(vaddr, + (void *)((unsigned long)vaddr + + ((struct alloc_list *)(data->index))->size)); +#ifdef CONFIG_OUTER_CACHE + phy_start = pmem_start_addr_system(id, data); + + phy_end = phy_start + + ((struct alloc_list *)(data->index))->size; + + outer_flush_range(phy_start, phy_end); +#endif + goto end; + } /* if this isn't a submmapped file, flush the whole thing */ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { - dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); + dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data)); +#ifdef CONFIG_OUTER_CACHE + phy_start = (unsigned long)vaddr - + (unsigned long)pmem[id].vbase + pmem[id].base; + + phy_end = phy_start + pmem[id].len(id, data); + + outer_flush_range(phy_start, phy_end); +#endif goto end; } /* otherwise, flush the region of the file we are drawing */ @@ -814,6 +1887,15 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) flush_start = vaddr + region_node->region.offset; flush_end = flush_start + region_node->region.len; dmac_flush_range(flush_start, flush_end); +#ifdef CONFIG_OUTER_CACHE + + phy_start = (unsigned long)flush_start - + (unsigned long)pmem[id].vbase + pmem[id].base; + + phy_end = phy_start + region_node->region.len; + + outer_flush_range(phy_start, phy_end); +#endif break; } } @@ -821,45 +1903,283 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) up_read(&data->sem); } +int pmem_cache_maint(struct file *file, unsigned int cmd, + struct pmem_addr *pmem_addr) +{ + struct pmem_data *data; + int id; + unsigned long vaddr, paddr, length, offset, + pmem_len, pmem_start_addr; + + /* Called from kernel-space so file may be NULL */ + if (!file) + return -EBADF; + + data = file->private_data; + id = get_id(file); + + if (!pmem[id].cached) + return 0; + + offset = pmem_addr->offset; + length = pmem_addr->length; + + down_read(&data->sem); + if (!has_allocation(file)) { + up_read(&data->sem); + return -EINVAL; + } + pmem_len = pmem[id].len(id, data); + pmem_start_addr = pmem[id].start_addr(id, data); + up_read(&data->sem); + + if (offset + length > pmem_len) + return -EINVAL; + + vaddr = pmem_addr->vaddr; + paddr = pmem_start_addr + offset; + + DLOG("pmem cache maint on dev %s(id: %d)" + "(vaddr %lx paddr %lx len %lu bytes)\n", + get_name(file), id, vaddr, paddr, length); + if (cmd == PMEM_CLEAN_INV_CACHES) + clean_and_invalidate_caches(vaddr, + length, paddr); + else if (cmd == PMEM_CLEAN_CACHES) + clean_caches(vaddr, length, paddr); + else if (cmd == PMEM_INV_CACHES) + invalidate_caches(vaddr, length, paddr); + + return 0; +} +EXPORT_SYMBOL(pmem_cache_maint); + +int32_t pmem_kalloc(const size_t size, const uint32_t flags) +{ + int info_id, i, memtype, fallback = 0; + unsigned int align; + int32_t index = -1; + + switch (flags & PMEM_ALIGNMENT_MASK) { + case PMEM_ALIGNMENT_4K: + align = SZ_4K; + break; + case PMEM_ALIGNMENT_1M: + align = SZ_1M; + break; + default: + pr_alert("pmem: %s: Invalid alignment %#x\n", + __func__, (flags & PMEM_ALIGNMENT_MASK)); + return -EINVAL; + } + + memtype = flags & PMEM_MEMTYPE_MASK; +retry_memalloc: + info_id = -1; + for (i = 0; i < ARRAY_SIZE(kapi_memtypes); i++) + if (kapi_memtypes[i].memtype == memtype) { + info_id = kapi_memtypes[i].info_id; + break; + } + if (info_id < 0) { + pr_alert("pmem: %s: Kernel %#x memory arena is not " + "initialized. Check board file!\n", + __func__, (flags & PMEM_MEMTYPE_MASK)); + return -EINVAL; + } + + if (!pmem[info_id].allocate) { + pr_alert("pmem: %s: Attempt to allocate size %u, alignment %#x" + " from non-existent PMEM kernel region %d. " + "Driver/board setup is faulty!", + __func__, size, (flags & PMEM_ALIGNMENT_MASK), + info_id); + return -ENOMEM; + } + +#if PMEM_DEBUG + if (align != SZ_4K && + (pmem[info_id].allocator_type == + PMEM_ALLOCATORTYPE_ALLORNOTHING || + pmem[info_id].allocator_type == + PMEM_ALLOCATORTYPE_BUDDYBESTFIT)) + pr_warning("pmem: %s: alignment other than on 4K " + "pages not supported with %s allocator for PMEM " + "memory region '%s'. Memory will be aligned to 4K " + "boundary. Check your board file or allocation " + "invocation.\n", __func__, + (pmem[info_id].allocator_type == + PMEM_ALLOCATORTYPE_ALLORNOTHING ? + "'All Or Nothing'" + : + "'Buddy / Best Fit'"), + pmem[info_id].dev.name); +#endif + + mutex_lock(&pmem[info_id].arena_mutex); + index = pmem[info_id].allocate(info_id, size, align); + mutex_unlock(&pmem[info_id].arena_mutex); + + if (index < 0 && + !fallback && + kapi_memtypes[i].fallback_memtype != PMEM_INVALID_MEMTYPE) { + fallback = 1; + memtype = kapi_memtypes[i].fallback_memtype; + goto retry_memalloc; + } + + return index >= 0 ? + index * pmem[info_id].quantum + pmem[info_id].base : -ENOMEM; +} +EXPORT_SYMBOL(pmem_kalloc); + +static int pmem_kapi_free_index_allornothing(const int32_t physaddr, int id) +{ + return physaddr == pmem[id].base ? 0 : -1; +} + +static int pmem_kapi_free_index_buddybestfit(const int32_t physaddr, int id) +{ + return (physaddr >= pmem[id].base && + physaddr < (pmem[id].base + pmem[id].size && + !(physaddr % pmem[id].quantum))) ? + (physaddr - pmem[id].base) / pmem[id].quantum : -1; +} + +static int pmem_kapi_free_index_bitmap(const int32_t physaddr, int id) +{ + return (physaddr >= pmem[id].base && + physaddr < (pmem[id].base + pmem[id].size)) ? + bit_from_paddr(id, physaddr) : -1; +} + +static int pmem_kapi_free_index_system(const int32_t physaddr, int id) +{ + return 0; +} + +int pmem_kfree(const int32_t physaddr) +{ + int i; + for (i = 0; i < ARRAY_SIZE(kapi_memtypes); i++) { + int index; + int id = kapi_memtypes[i].info_id; + + if (id < 0) + continue; + + if (!pmem[id].allocate) { +#if PMEM_DEBUG + pr_alert("pmem: %s: " + "Attempt to free physical address %#x " + "from unregistered PMEM kernel region" + " %d. Driver/board setup is faulty!", + __func__, physaddr, id); +#endif + return -EINVAL; + } + + index = pmem[id].kapi_free_index(physaddr, id); + if (index >= 0) + return pmem[id].free(id, index) ? -EINVAL : 0; + } +#if PMEM_DEBUG + pr_alert("pmem: %s: Failed to free physaddr %#x, does not " + "seem be value returned by pmem_kalloc()!", + __func__, physaddr); +#endif + return -EINVAL; +} +EXPORT_SYMBOL(pmem_kfree); + static int pmem_connect(unsigned long connect, struct file *file) { - struct pmem_data *data = (struct pmem_data *)file->private_data; - struct pmem_data *src_data; - struct file *src_file; int ret = 0, put_needed; + struct file *src_file; + + if (!file) { + pr_err("pmem: %s: NULL file pointer passed in, " + "bailing out!\n", __func__); + ret = -EINVAL; + goto leave; + } + + src_file = fget_light(connect, &put_needed); + + if (!src_file) { + pr_err("pmem: %s: src file not found!\n", __func__); + ret = -EBADF; + goto leave; + } + + if (src_file == file) { /* degenerative case, operator error */ + pr_err("pmem: %s: src_file and passed in file are " + "the same; refusing to connect to self!\n", __func__); + ret = -EINVAL; + goto put_src_file; + } + + if (unlikely(!is_pmem_file(src_file))) { + pr_err("pmem: %s: src file is not a pmem file!\n", + __func__); + ret = -EINVAL; + goto put_src_file; + } else { + struct pmem_data *src_data = src_file->private_data; + + if (!src_data) { + pr_err("pmem: %s: src file pointer has no" + "private data, bailing out!\n", __func__); + ret = -EINVAL; + goto put_src_file; + } + + down_read(&src_data->sem); + + if (unlikely(!has_allocation(src_file))) { + up_read(&src_data->sem); + pr_err("pmem: %s: src file has no allocation!\n", + __func__); + ret = -EINVAL; + } else { + struct pmem_data *data; + int src_index = src_data->index; + + up_read(&src_data->sem); + + data = file->private_data; + if (!data) { + pr_err("pmem: %s: passed in file " + "pointer has no private data, bailing" + " out!\n", __func__); + ret = -EINVAL; + goto put_src_file; + } + + down_write(&data->sem); + if (has_allocation(file) && + (data->index != src_index)) { + up_write(&data->sem); + + pr_err("pmem: %s: file is already " + "mapped but doesn't match this " + "src_file!\n", __func__); + ret = -EINVAL; + } else { + data->index = src_index; + data->flags |= PMEM_FLAGS_CONNECTED; + data->master_fd = connect; + data->master_file = src_file; - down_write(&data->sem); - /* retrieve the src file and check it is a pmem file with an alloc */ - src_file = fget_light(connect, &put_needed); - DLOG("connect %p to %p\n", file, src_file); - if (!src_file) { - printk("pmem: src file not found!\n"); - ret = -EINVAL; - goto err_no_file; - } - if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { - printk(KERN_INFO "pmem: src file is not a pmem file or has no " - "alloc!\n"); - ret = -EINVAL; - goto err_bad_file; - } - src_data = (struct pmem_data *)src_file->private_data; + up_write(&data->sem); - if (has_allocation(file) && (data->index != src_data->index)) { - printk("pmem: file is already mapped but doesn't match this" - " src_file!\n"); - ret = -EINVAL; - goto err_bad_file; + DLOG("connect %p to %p\n", file, src_file); + } + } } - data->index = src_data->index; - data->flags |= PMEM_FLAGS_CONNECTED; - data->master_fd = connect; - data->master_file = src_file; - -err_bad_file: +put_src_file: fput_light(src_file, put_needed); -err_no_file: - up_write(&data->sem); +leave: return ret; } @@ -878,16 +2198,23 @@ static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, { int ret = 0; struct mm_struct *mm = NULL; +#if PMEM_DEBUG_MSGS + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + DLOG("pid %u(%s) file %p(%ld)\n", + current->pid, get_task_comm(currtask_name, current), + file, file_count(file)); + *locked_mm = NULL; lock_mm: down_read(&data->sem); if (PMEM_IS_SUBMAP(data)) { mm = get_task_mm(data->task); if (!mm) { + up_read(&data->sem); #if PMEM_DEBUG - printk("pmem: can't remap task is gone!\n"); + pr_alert("pmem: can't remap - task is gone!\n"); #endif - up_read(&data->sem); return -1; } } @@ -902,7 +2229,7 @@ static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, * once */ if (PMEM_IS_SUBMAP(data) && !mm) { pmem_unlock_data_and_mm(data, mm); - up_write(&data->sem); + DLOG("mapping contention, repeating mmap op\n"); goto lock_mm; } /* now check that vma.mm is still there, it could have been @@ -916,6 +2243,9 @@ static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, data->flags &= ~(PMEM_FLAGS_SUBMAP); } pmem_unlock_data_and_mm(data, mm); +#if PMEM_DEBUG + pr_alert("pmem: vma.mm went away!\n"); +#endif return -1; } *locked_mm = mm; @@ -930,14 +2260,28 @@ int pmem_remap(struct pmem_region *region, struct file *file, struct mm_struct *mm = NULL; struct list_head *elt, *elt2; int id = get_id(file); - struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_data *data; + + DLOG("operation %#x, region offset %ld, region len %ld\n", + operation, region->offset, region->len); + + if (!is_pmem_file(file)) { +#if PMEM_DEBUG + pr_err("pmem: remap request for non-pmem file descriptor\n"); +#endif + return -EINVAL; + } + + /* is_pmem_file fails if !file */ + data = file->private_data; /* pmem region must be aligned on a page boundry */ if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || !PMEM_IS_PAGE_ALIGNED(region->len))) { #if PMEM_DEBUG - printk("pmem: request for unaligned pmem suballocation " - "%lx %lx\n", region->offset, region->len); + pr_err("pmem: request for unaligned pmem" + "suballocation %lx %lx\n", + region->offset, region->len); #endif return -EINVAL; } @@ -955,18 +2299,18 @@ int pmem_remap(struct pmem_region *region, struct file *file, * that back in it */ if (!is_master_owner(file)) { #if PMEM_DEBUG - printk("pmem: remap requested from non-master process\n"); + pr_err("pmem: remap requested from non-master process\n"); #endif ret = -EINVAL; goto err; } /* check that the requested range is within the src allocation */ - if (unlikely((region->offset > pmem_len(id, data)) || - (region->len > pmem_len(id, data)) || - (region->offset + region->len > pmem_len(id, data)))) { + if (unlikely((region->offset > pmem[id].len(id, data)) || + (region->len > pmem[id].len(id, data)) || + (region->offset + region->len > pmem[id].len(id, data)))) { #if PMEM_DEBUG - printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); + pr_err("pmem: suballoc doesn't fit in src_file!\n"); #endif ret = -EINVAL; goto err; @@ -978,7 +2322,7 @@ int pmem_remap(struct pmem_region *region, struct file *file, if (!region_node) { ret = -ENOMEM; #if PMEM_DEBUG - printk(KERN_INFO "No space to allocate metadata!"); + pr_alert("pmem: No space to allocate remap metadata!"); #endif goto err; } @@ -999,8 +2343,8 @@ int pmem_remap(struct pmem_region *region, struct file *file, } if (!found) { #if PMEM_DEBUG - printk("pmem: Unmap region does not map any mapped " - "region!"); + pr_err("pmem: Unmap region does not map any" + " mapped region!"); #endif ret = -EINVAL; goto err; @@ -1010,10 +2354,10 @@ int pmem_remap(struct pmem_region *region, struct file *file, if (data->vma && PMEM_IS_SUBMAP(data)) { if (operation == PMEM_MAP) ret = pmem_remap_pfn_range(id, data->vma, data, - region->offset, region->len); + region->offset, region->len); else if (operation == PMEM_UNMAP) ret = pmem_unmap_pfn_range(id, data->vma, data, - region->offset, region->len); + region->offset, region->len); } err: @@ -1054,63 +2398,83 @@ static void pmem_revoke(struct file *file, struct pmem_data *data) static void pmem_get_size(struct pmem_region *region, struct file *file) { - struct pmem_data *data = (struct pmem_data *)file->private_data; + /* called via ioctl file op, so file guaranteed to be not NULL */ + struct pmem_data *data = file->private_data; int id = get_id(file); + down_read(&data->sem); if (!has_allocation(file)) { region->offset = 0; region->len = 0; - return; } else { - region->offset = pmem_start_addr(id, data); - region->len = pmem_len(id, data); + region->offset = pmem[id].start_addr(id, data); + region->len = pmem[id].len(id, data); } - DLOG("offset %lx len %lx\n", region->offset, region->len); + up_read(&data->sem); + DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len); } static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct pmem_data *data; + /* called from user space as file op, so file guaranteed to be not + * NULL + */ + struct pmem_data *data = file->private_data; int id = get_id(file); +#if PMEM_DEBUG_MSGS + char currtask_name[ + FIELD_SIZEOF(struct task_struct, comm) + 1]; +#endif + + DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n", + current->pid, get_task_comm(currtask_name, current), + file, file_count(file), cmd, get_name(file), id); switch (cmd) { case PMEM_GET_PHYS: { struct pmem_region region; + DLOG("get_phys\n"); + down_read(&data->sem); if (!has_allocation(file)) { region.offset = 0; region.len = 0; } else { - data = (struct pmem_data *)file->private_data; - region.offset = pmem_start_addr(id, data); - region.len = pmem_len(id, data); + region.offset = pmem[id].start_addr(id, data); + region.len = pmem[id].len(id, data); } - printk(KERN_INFO "pmem: request for physical address of pmem region " - "from process %d.\n", current->pid); + up_read(&data->sem); + if (copy_to_user((void __user *)arg, ®ion, sizeof(struct pmem_region))) return -EFAULT; + + DLOG("pmem: successful request for " + "physical address of pmem region id %d, " + "offset 0x%lx, len 0x%lx\n", + id, region.offset, region.len); + break; } case PMEM_MAP: { struct pmem_region region; + DLOG("map\n"); if (copy_from_user(®ion, (void __user *)arg, sizeof(struct pmem_region))) return -EFAULT; - data = (struct pmem_data *)file->private_data; return pmem_remap(®ion, file, PMEM_MAP); } break; case PMEM_UNMAP: { struct pmem_region region; + DLOG("unmap\n"); if (copy_from_user(®ion, (void __user *)arg, sizeof(struct pmem_region))) return -EFAULT; - data = (struct pmem_data *)file->private_data; return pmem_remap(®ion, file, PMEM_UNMAP); break; } @@ -1136,169 +2500,590 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EFAULT; break; } + case PMEM_GET_FREE_SPACE: + { + struct pmem_freespace fs; + DLOG("get freespace on %s(id: %d)\n", + get_name(file), id); + + mutex_lock(&pmem[id].arena_mutex); + pmem[id].free_space(id, &fs); + mutex_unlock(&pmem[id].arena_mutex); + + DLOG("%s(id: %d) total free %lu, largest %lu\n", + get_name(file), id, fs.total, fs.largest); + + if (copy_to_user((void __user *)arg, &fs, + sizeof(struct pmem_freespace))) + return -EFAULT; + break; + } + case PMEM_ALLOCATE: { - if (has_allocation(file)) + int ret = 0; + DLOG("allocate, id %d\n", id); + down_write(&data->sem); + if (has_allocation(file)) { + pr_err("pmem: Existing allocation found on " + "this file descrpitor\n"); + up_write(&data->sem); return -EINVAL; - data = (struct pmem_data *)file->private_data; - data->index = pmem_allocate(id, arg); - break; + } + + mutex_lock(&pmem[id].arena_mutex); + data->index = pmem[id].allocate(id, + arg, + SZ_4K); + mutex_unlock(&pmem[id].arena_mutex); + ret = data->index == -1 ? -ENOMEM : + data->index; + up_write(&data->sem); + return ret; + } + case PMEM_ALLOCATE_ALIGNED: + { + struct pmem_allocation alloc; + int ret = 0; + + if (copy_from_user(&alloc, (void __user *)arg, + sizeof(struct pmem_allocation))) + return -EFAULT; + DLOG("allocate id align %d %u\n", id, alloc.align); + down_write(&data->sem); + if (has_allocation(file)) { + pr_err("pmem: Existing allocation found on " + "this file descrpitor\n"); + up_write(&data->sem); + return -EINVAL; + } + + if (alloc.align & (alloc.align - 1)) { + pr_err("pmem: Alignment is not a power of 2\n"); + return -EINVAL; + } + + if (alloc.align != SZ_4K && + (pmem[id].allocator_type != + PMEM_ALLOCATORTYPE_BITMAP)) { + pr_err("pmem: Non 4k alignment requires bitmap" + " allocator on %s\n", pmem[id].name); + return -EINVAL; + } + + if (alloc.align > SZ_1M || + alloc.align < SZ_4K) { + pr_err("pmem: Invalid Alignment (%u) " + "specified\n", alloc.align); + return -EINVAL; + } + + mutex_lock(&pmem[id].arena_mutex); + data->index = pmem[id].allocate(id, + alloc.size, + alloc.align); + mutex_unlock(&pmem[id].arena_mutex); + ret = data->index == -1 ? -ENOMEM : + data->index; + up_write(&data->sem); + return ret; } case PMEM_CONNECT: DLOG("connect\n"); return pmem_connect(arg, file); - break; - case PMEM_CACHE_FLUSH: + case PMEM_CLEAN_INV_CACHES: + case PMEM_CLEAN_CACHES: + case PMEM_INV_CACHES: { - struct pmem_region region; - DLOG("flush\n"); - if (copy_from_user(®ion, (void __user *)arg, - sizeof(struct pmem_region))) + struct pmem_addr pmem_addr; + + if (copy_from_user(&pmem_addr, (void __user *)arg, + sizeof(struct pmem_addr))) return -EFAULT; - flush_pmem_file(file, region.offset, region.len); - break; + + return pmem_cache_maint(file, cmd, &pmem_addr); } default: if (pmem[id].ioctl) return pmem[id].ioctl(file, cmd, arg); + + DLOG("ioctl invalid (%#x)\n", cmd); return -EINVAL; } return 0; } -#if PMEM_DEBUG -static ssize_t debug_open(struct inode *inode, struct file *file) +static void ioremap_pmem(int id) +{ + if (pmem[id].cached) + pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size); +#ifdef ioremap_ext_buffered + else if (pmem[id].buffered) + pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, + pmem[id].size); +#endif + else + pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static int pmem_mapped_regions(int id) { - file->private_data = inode->i_private; + struct list_head *elt; + + mutex_lock(&pmem[id].data_list_mutex); + list_for_each(elt, &pmem[id].data_list) { + struct pmem_data *data = + list_entry(elt, struct pmem_data, list); + + if (data) { + mutex_unlock(&pmem[id].data_list_mutex); + return 1; + } + } + mutex_unlock(&pmem[id].data_list_mutex); return 0; } -static ssize_t debug_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) +static int active_unstable_pmem(void) { - struct list_head *elt, *elt2; - struct pmem_data *data; - struct pmem_region_node *region_node; - int id = (int)file->private_data; - const int debug_bufmax = 4096; - static char buffer[4096]; - int n = 0; + int id; - DLOG("debug open\n"); - n = scnprintf(buffer, debug_bufmax, - "pid #: mapped regions (offset, len) (offset,len)...\n"); + for (id = 0; id < id_count; id++) { + if (pmem[id].memory_state == MEMORY_STABLE) + continue; + if (pmem_mapped_regions(id)) + return 1; + } - mutex_lock(&pmem[id].data_list_lock); - list_for_each(elt, &pmem[id].data_list) { - data = list_entry(elt, struct pmem_data, list); - down_read(&data->sem); - n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", - data->pid); - list_for_each(elt2, &data->region_list) { - region_node = list_entry(elt2, struct pmem_region_node, - list); - n += scnprintf(buffer + n, debug_bufmax - n, - "(%lx,%lx) ", - region_node->region.offset, - region_node->region.len); + return 0; +} + +static void reserve_unstable_pmem(unsigned long unstable_pmem_start, + unsigned long unstable_pmem_size) +{ + reserve_hotplug_pages(unstable_pmem_start >> PAGE_SHIFT, + unstable_pmem_size >> PAGE_SHIFT); +} + +static void unreserve_unstable_pmem(unsigned long unstable_pmem_start, + unsigned long unstable_pmem_size) +{ + unreserve_hotplug_pages(unstable_pmem_start >> PAGE_SHIFT, + unstable_pmem_size >> PAGE_SHIFT); +} + +static void pmem_setup_unstable_devices(unsigned long start_pfn, + unsigned long nr_pages) +{ + int id; + unsigned long tmp; + + unstable_pmem_start = start_pfn << PAGE_SHIFT; + tmp = unstable_pmem_start; + + for (id = 0; id < id_count; id++) { + if (pmem[id].memory_state == MEMORY_STABLE) + continue; + + pmem[id].base = tmp; + pr_info("reserving %lx bytes unstable memory at %lx \ + for %s\n", pmem[id].size, pmem[id].base, pmem[id].name); + tmp += pmem[id].size; + } + unstable_pmem_size = tmp - unstable_pmem_start; + + for (id = 0; id < id_count; id++) { + if (pmem[id].memory_state == + MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED) { + ioremap_pmem(id); + pmem[id].garbage_pfn = + page_to_pfn(alloc_page(GFP_KERNEL)); + + if (pmem[id].vbase == 0) + continue; + pmem[id].memory_state = + MEMORY_UNSTABLE_MEMORY_ALLOCATED; } - n += scnprintf(buffer + n, debug_bufmax - n, "\n"); - up_read(&data->sem); } - mutex_unlock(&pmem[id].data_list_lock); +} + +static int pmem_mem_going_offline_callback(void *arg) +{ + struct memory_notify *marg = arg; + int id; + + if ((marg->start_pfn << PAGE_SHIFT) != unstable_pmem_start) + return 0; + + if (active_unstable_pmem()) { + pr_alert("unstable PMEM memory device in use \ + prevents memory hotremove!\n"); + return -EAGAIN; + } + + unreserve_unstable_pmem(unstable_pmem_start, unstable_pmem_size); - n++; - buffer[n] = 0; - return simple_read_from_buffer(buf, count, ppos, buffer, n); + for (id = 0; id < id_count; id++) { + if (pmem[id].memory_state == MEMORY_UNSTABLE_MEMORY_ALLOCATED) + pmem[id].memory_state = + MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED; + } + return 0; } -static struct file_operations debug_fops = { - .read = debug_read, - .open = debug_open, -}; -#endif +static int pmem_mem_online_callback(void *arg) +{ + struct memory_notify *marg = arg; + int id; -#if 0 -static struct miscdevice pmem_dev = { - .name = "pmem", - .fops = &pmem_fops, -}; + + if (unstable_pmem_present == UNSTABLE_UNINITIALIZED) { + pmem_setup_unstable_devices(marg->start_pfn, marg->nr_pages); + pr_alert("unstable pmem start %lx size %lx\n", + unstable_pmem_start, unstable_pmem_size); + unstable_pmem_present = UNSTABLE_INITIALIZED; + } + + if ((marg->start_pfn << PAGE_SHIFT) != unstable_pmem_start) + return 0; + + reserve_unstable_pmem(unstable_pmem_start, unstable_pmem_size); + + for (id = 0; id < id_count; id++) { + if (pmem[id].memory_state == + MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED) { + if (pmem[id].vbase == 0) + ioremap_pmem(id); + if (pmem[id].vbase == 0) + continue; + pmem[id].memory_state = + MEMORY_UNSTABLE_MEMORY_ALLOCATED; + } + } + return 0; +} + +static int pmem_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + int ret = 0; + + if (unstable_pmem_present == NO_UNSTABLE_MEMORY) + return 0; + + switch (action) { + case MEM_ONLINE: + ret = pmem_mem_online_callback(arg); + break; + case MEM_GOING_OFFLINE: + ret = pmem_mem_going_offline_callback(arg); + break; + case MEM_OFFLINE: + case MEM_GOING_ONLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + break; + } + if (ret) + ret = notifier_from_errno(ret); + else + ret = NOTIFY_OK; + return ret; +} #endif int pmem_setup(struct android_pmem_platform_data *pdata, long (*ioctl)(struct file *, unsigned int, unsigned long), int (*release)(struct inode *, struct file *)) { - int err = 0; - int i, index = 0; - int id = id_count; - id_count++; + int i, index = 0, kapi_memtype_idx = -1, id, is_kernel_memtype = 0; + + if (id_count >= PMEM_MAX_DEVICES) { + pr_alert("pmem: %s: unable to register driver(%s) - no more " + "devices available!\n", __func__, pdata->name); + goto err_no_mem; + } + + if (!pdata->size) { + pr_alert("pmem: %s: unable to register pmem driver(%s) - zero " + "size passed in!\n", __func__, pdata->name); + goto err_no_mem; + } + + id = id_count++; + + pmem[id].id = id; + + if (pmem[id].allocate) { + pr_alert("pmem: %s: unable to register pmem driver - " + "duplicate registration of %s!\n", + __func__, pdata->name); + goto err_no_mem; + } + + pmem[id].allocator_type = pdata->allocator_type; + + for (i = 0; i < ARRAY_SIZE(kapi_memtypes); i++) { + if (!strcmp(kapi_memtypes[i].name, pdata->name)) { + if (kapi_memtypes[i].info_id >= 0) { + pr_alert("Unable to register kernel pmem " + "driver - duplicate registration of " + "%s!\n", pdata->name); + goto err_no_mem; + } + if (pdata->cached) { + pr_alert("kernel arena memory must " + "NOT be configured as 'cached'. Check " + "and fix your board file. Failing " + "pmem driver %s registration!", + pdata->name); + goto err_no_mem; + } + + is_kernel_memtype = 1; + kapi_memtypes[i].info_id = id; + kapi_memtype_idx = i; + break; + } + } + + /* 'quantum' is a "hidden" variable that defaults to 0 in the board + * files */ + pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC; + if (pmem[id].quantum < PMEM_MIN_ALLOC || + !is_power_of_2(pmem[id].quantum)) { + pr_alert("pmem: %s: unable to register pmem driver %s - " + "invalid quantum value (%#x)!\n", + __func__, pdata->name, pmem[id].quantum); + goto err_reset_pmem_info; + } + + if (pdata->start % pmem[id].quantum) { + /* bad alignment for start! */ + pr_alert("pmem: %s: Unable to register driver %s - " + "improperly aligned memory region start address " + "(%#lx) as checked against quantum value of %#x!\n", + __func__, pdata->name, pdata->start, + pmem[id].quantum); + goto err_reset_pmem_info; + } + + if (pdata->size % pmem[id].quantum) { + /* bad alignment for size! */ + pr_alert("pmem: %s: Unable to register driver %s - " + "memory region size (%#lx) is not a multiple of " + "quantum size(%#x)!\n", __func__, pdata->name, + pdata->size, pmem[id].quantum); + goto err_reset_pmem_info; + } - pmem[id].no_allocator = pdata->no_allocator; pmem[id].cached = pdata->cached; pmem[id].buffered = pdata->buffered; pmem[id].base = pdata->start; pmem[id].size = pdata->size; - pmem[id].ioctl = ioctl; - pmem[id].release = release; - init_rwsem(&pmem[id].bitmap_sem); - mutex_init(&pmem[id].data_list_lock); - INIT_LIST_HEAD(&pmem[id].data_list); - pmem[id].dev.name = pdata->name; - pmem[id].dev.minor = id; - pmem[id].dev.fops = &pmem_fops; - printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); + strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE); - err = misc_register(&pmem[id].dev); - if (err) { - printk(KERN_ALERT "Unable to register pmem driver!\n"); - goto err_cant_register_device; + if (pdata->unstable) { + pmem[id].memory_state = MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED; + unstable_pmem_present = UNSTABLE_UNINITIALIZED; } - pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; - pmem[id].bitmap = kmalloc(pmem[id].num_entries * - sizeof(struct pmem_bits), GFP_KERNEL); - if (!pmem[id].bitmap) - goto err_no_mem_for_metadata; + pmem[id].num_entries = pmem[id].size / pmem[id].quantum; + + memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj)); + pmem[id].kobj.kset = pmem_kset; + + switch (pmem[id].allocator_type) { + case PMEM_ALLOCATORTYPE_ALLORNOTHING: + pmem[id].allocate = pmem_allocator_all_or_nothing; + pmem[id].free = pmem_free_all_or_nothing; + pmem[id].free_space = pmem_free_space_all_or_nothing; + pmem[id].kapi_free_index = pmem_kapi_free_index_allornothing; + pmem[id].len = pmem_len_all_or_nothing; + pmem[id].start_addr = pmem_start_addr_all_or_nothing; + pmem[id].num_entries = 1; + pmem[id].quantum = pmem[id].size; + pmem[id].allocator.all_or_nothing.allocated = 0; - memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * - pmem[id].num_entries); + if (kobject_init_and_add(&pmem[id].kobj, + &pmem_allornothing_ktype, NULL, + "%s", pdata->name)) + goto out_put_kobj; - for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { - if ((pmem[id].num_entries) & 1<= 0; i--) + if ((pmem[id].num_entries) & 1<name)) + goto out_put_kobj; + + break; + + case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */ + pmem[id].allocator.bitmap.bitm_alloc = kmalloc( + PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS * + sizeof(*pmem[id].allocator.bitmap.bitm_alloc), + GFP_KERNEL); + if (!pmem[id].allocator.bitmap.bitm_alloc) { + pr_alert("pmem: %s: Unable to register pmem " + "driver %s - can't allocate " + "bitm_alloc!\n", + __func__, pdata->name); + goto err_reset_pmem_info; } - } - if (pmem[id].cached) - pmem[id].vbase = ioremap_cached(pmem[id].base, - pmem[id].size); -#ifdef ioremap_ext_buffered - else if (pmem[id].buffered) - pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, - pmem[id].size); + if (kobject_init_and_add(&pmem[id].kobj, + &pmem_bitmap_ktype, NULL, + "%s", pdata->name)) + goto out_put_kobj; + + for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) { + pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1; + pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0; + } + + pmem[id].allocator.bitmap.bitmap_allocs = + PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; + + pmem[id].allocator.bitmap.bitmap = + kcalloc((pmem[id].num_entries + 31) / 32, + sizeof(unsigned int), GFP_KERNEL); + if (!pmem[id].allocator.bitmap.bitmap) { + pr_alert("pmem: %s: Unable to register pmem " + "driver - can't allocate bitmap!\n", + __func__); + goto err_cant_register_device; + } + pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries; + + pmem[id].allocate = pmem_allocator_bitmap; + pmem[id].free = pmem_free_bitmap; + pmem[id].free_space = pmem_free_space_bitmap; + pmem[id].kapi_free_index = pmem_kapi_free_index_bitmap; + pmem[id].len = pmem_len_bitmap; + pmem[id].start_addr = pmem_start_addr_bitmap; + + DLOG("bitmap allocator id %d (%s), num_entries %u, raw size " + "%lu, quanta size %u\n", + id, pdata->name, pmem[id].allocator.bitmap.bitmap_free, + pmem[id].size, pmem[id].quantum); + break; + + case PMEM_ALLOCATORTYPE_SYSTEM: + +#ifdef CONFIG_MEMORY_HOTPLUG + goto err_no_mem; #endif - else - pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); - if (pmem[id].vbase == 0) - goto error_cant_remap; + INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist); + + pmem[id].allocator.system_mem.used = 0; + pmem[id].vbase = NULL; + + if (kobject_init_and_add(&pmem[id].kobj, + &pmem_system_ktype, NULL, + "%s", pdata->name)) + goto out_put_kobj; + + pmem[id].allocate = pmem_allocator_system; + pmem[id].free = pmem_free_system; + pmem[id].free_space = pmem_free_space_system; + pmem[id].kapi_free_index = pmem_kapi_free_index_system; + pmem[id].len = pmem_len_system; + pmem[id].start_addr = pmem_start_addr_system; + pmem[id].num_entries = 0; + pmem[id].quantum = PAGE_SIZE; + + DLOG("system allocator id %d (%s), raw size %lu\n", + id, pdata->name, pmem[id].size); + break; + + default: + pr_alert("Invalid allocator type (%d) for pmem driver\n", + pdata->allocator_type); + goto err_reset_pmem_info; + } + + pmem[id].ioctl = ioctl; + pmem[id].release = release; + mutex_init(&pmem[id].arena_mutex); + mutex_init(&pmem[id].data_list_mutex); + INIT_LIST_HEAD(&pmem[id].data_list); + + pmem[id].dev.name = pdata->name; + if (!is_kernel_memtype) { + pmem[id].dev.minor = id; + pmem[id].dev.fops = &pmem_fops; + pr_info("pmem: Initializing %s (user-space) as %s\n", + pdata->name, pdata->cached ? "cached" : "non-cached"); + + if (misc_register(&pmem[id].dev)) { + pr_alert("Unable to register pmem driver!\n"); + goto err_cant_register_device; + } + } else { /* kernel region, no user accessible device */ + pmem[id].dev.minor = -1; + pr_info("pmem: Initializing %s (in-kernel)\n", pdata->name); + } + + /* do not set up unstable pmem now, wait until first memory hotplug */ + if (pmem[id].memory_state == MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED) + return 0; + + if ((!is_kernel_memtype) && + (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM)) { + ioremap_pmem(id); + if (pmem[id].vbase == 0) { + pr_err("pmem: ioremap failed for device %s\n", + pmem[id].name); + goto error_cant_remap; + } + } pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); - if (pmem[id].no_allocator) - pmem[id].allocated = 0; -#if PMEM_DEBUG - debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, - &debug_fops); -#endif return 0; + error_cant_remap: - kfree(pmem[id].bitmap); -err_no_mem_for_metadata: - misc_deregister(&pmem[id].dev); + if (!is_kernel_memtype) + misc_deregister(&pmem[id].dev); err_cant_register_device: +out_put_kobj: + kobject_put(&pmem[id].kobj); + if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT) + kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap); + else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) { + kfree(pmem[id].allocator.bitmap.bitmap); + kfree(pmem[id].allocator.bitmap.bitm_alloc); + } +err_reset_pmem_info: + pmem[id].allocate = 0; + pmem[id].dev.minor = -1; + if (kapi_memtype_idx >= 0) + kapi_memtypes[i].info_id = -1; +err_no_mem: return -1; } @@ -1307,31 +3092,65 @@ static int pmem_probe(struct platform_device *pdev) struct android_pmem_platform_data *pdata; if (!pdev || !pdev->dev.platform_data) { - printk(KERN_ALERT "Unable to probe pmem!\n"); + pr_alert("Unable to probe pmem!\n"); return -1; } pdata = pdev->dev.platform_data; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + return pmem_setup(pdata, NULL, NULL); } - static int pmem_remove(struct platform_device *pdev) { int id = pdev->id; __free_page(pfn_to_page(pmem[id].garbage_pfn)); + pm_runtime_disable(&pdev->dev); misc_deregister(&pmem[id].dev); return 0; } +static int pmem_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending...\n"); + return 0; +} + +static int pmem_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming...\n"); + return 0; +} + +static const struct dev_pm_ops pmem_dev_pm_ops = { + .runtime_suspend = pmem_runtime_suspend, + .runtime_resume = pmem_runtime_resume, +}; + static struct platform_driver pmem_driver = { .probe = pmem_probe, .remove = pmem_remove, - .driver = { .name = "android_pmem" } + .driver = { .name = "android_pmem", + .pm = &pmem_dev_pm_ops, + } }; static int __init pmem_init(void) { + /* create /sys/kernel/ directory */ + pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME, + NULL, kernel_kobj); + if (!pmem_kset) { + pr_err("pmem(%s):kset_create_and_add fail\n", __func__); + return -ENOMEM; + } + +#ifdef CONFIG_MEMORY_HOTPLUG + hotplug_memory_notifier(pmem_memory_callback, 0); +#endif return platform_driver_register(&pmem_driver); } diff --git a/drivers/misc/video_core/720p/Kconfig b/drivers/misc/video_core/720p/Kconfig new file mode 100644 index 0000000000000..53808f9da83fb --- /dev/null +++ b/drivers/misc/video_core/720p/Kconfig @@ -0,0 +1,35 @@ +# +# VIDEO CORE +# +menuconfig MSM_720P_CORE + bool "720P Core Video Driver" + depends on ARCH_MSM7X30 + default n + ---help--- + Say Y here to see options for video device drivers. + If you say N, all options in this submenu will be skipped and disabled. + +if MSM_720P_CORE + +config MSM_VIDEO_CORE_REG + tristate "MSM Video core registration" + depends on MSM_720P_CORE + default n + help + This option enables support for Video core. + +config MSM_VIDEO_CORE_VENC + tristate "Video encoder" + depends on MSM_VIDEO_CORE_REG + default n + help + This option enables support for Video encoder. + +config MSM_VIDEO_CORE_VDEC + tristate "Video decoder" + depends on MSM_VIDEO_CORE_REG + default n + help + This option enables support for Video decoder. + +endif # MSM_720P_CORE diff --git a/drivers/misc/video_core/720p/Makefile b/drivers/misc/video_core/720p/Makefile new file mode 100644 index 0000000000000..77aa694edea00 --- /dev/null +++ b/drivers/misc/video_core/720p/Makefile @@ -0,0 +1,40 @@ + +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/ddl +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/dec +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/enc +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/resource_tracker +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/scheduler +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/vcd +EXTRA_CFLAGS += -Idrivers/misc/video_core/720p/init + +obj-$(CONFIG_MSM_VIDEO_CORE_REG) += video_corereg.o +video_corereg-objs := ddl/vcd_ddl_firmware.o \ + ddl/vcd_ddl_metadata.o \ + ddl/video_core_720p.o \ + ddl/vcd_ddl_utils.o \ + ddl/vcd_ddl.o \ + ddl/vcd_ddl_helper.o \ + ddl/vcd_ddl_interrupt_handler.o \ + ddl/vcd_ddl_hal.o \ + ddl/vcd_ddl_properties.o \ + init/video_core_init.o \ + resource_tracker/vcd_res_tracker.o \ + scheduler/vid_frame_scheduler_utils.o \ + scheduler/vid_frame_scheduler.o \ + scheduler/vid_frame_scheduler_api.o \ + vcd/vcd_api.o \ + vcd/vcd_power_sm.o \ + vcd/vcd_client_sm.o \ + vcd/vcd_device_sm.o \ + vcd/vcd_sub.o \ + ddl/vcd_ddl_errors.o + +obj-$(CONFIG_MSM_VIDEO_CORE_VDEC) += video_decoder.o + +video_decoder-objs := dec/vdec.o + +obj-$(CONFIG_MSM_VIDEO_CORE_VENC) += video_encoder.o + +video_encoder-objs := enc/venc.o \ + enc/venc_internal.o diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl.c b/drivers/misc/video_core/720p/ddl/vcd_ddl.c new file mode 100644 index 0000000000000..22b73621d7381 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl.c @@ -0,0 +1,575 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +u32 ddl_device_init(struct ddl_init_config *ddl_init_config, void *client_data) +{ + struct ddl_context *ddl_ctxt; + u32 status = VCD_S_SUCCESS; + + if (!ddl_init_config || !ddl_init_config->ddl_callback || + !ddl_init_config->core_virtual_base_addr) { + pr_err("ddl_dev_init:Bad_argument\n"); + return VCD_ERR_ILLEGAL_PARM; + } + + ddl_ctxt = ddl_get_context(); + + if (DDL_IS_INITIALIZED(ddl_ctxt)) { + pr_err("ddl_dev_init:Multiple_init\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_ctxt)) { + pr_err("ddl_dev_init:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + + memset(ddl_ctxt, 0, sizeof(struct ddl_context)); + + DDL_BUSY(ddl_ctxt); + + ddl_ctxt->ddl_callback = ddl_init_config->ddl_callback; + ddl_ctxt->pf_interrupt_clr = ddl_init_config->pf_interrupt_clr; + ddl_ctxt->core_virtual_base_addr = + ddl_init_config->core_virtual_base_addr; + ddl_ctxt->client_data = client_data; + + ddl_ctxt->intr_status = DDL_INVALID_INTR_STATUS; + + vidc_720p_set_device_virtual_base(ddl_ctxt->core_virtual_base_addr); + + ddl_ctxt->current_ddl = NULL; + ddl_move_command_state(ddl_ctxt, DDL_CMD_INVALID); + + ddl_client_transact(DDL_INIT_CLIENTS, NULL); + + if (!ddl_dma_alloc(&ddl_ctxt->context_buf_addr, DDL_CONTEXT_MEMORY, npelly_context)) { + pr_err("ddl_dev_init:Context_alloc_fail\n"); + status = VCD_ERR_ALLOC_FAIL; + goto out; + } + if (!ddl_dma_alloc(&ddl_ctxt->db_line_buffer, DDL_DB_LINE_BUF_SIZE, npelly_dbl)) { + pr_err("ddl_dev_init:Line_buf_alloc_fail\n"); + status = VCD_ERR_ALLOC_FAIL; + goto out; + } + if (!ddl_dma_alloc(&ddl_ctxt->data_partition_tempbuf, + DDL_MPEG4_DATA_PARTITION_BUF_SIZE, npelly_mpeg4)) { + pr_err("ddl_dev_init:" + "Data_partition_buf_alloc_fail\n"); + status = VCD_ERR_ALLOC_FAIL; + goto out; + } + if (!ddl_dma_alloc(&ddl_ctxt->metadata_shared_input, + DDL_METADATA_TOTAL_INPUTBUFSIZE, npelly_meta)) { + pr_err("ddl_dev_init:" + "metadata_shared_input_alloc_fail\n"); + status = VCD_ERR_ALLOC_FAIL; + goto out; + } + if (!ddl_dma_alloc(&ddl_ctxt->dbg_core_dump, DDL_DBG_CORE_DUMP_SIZE, npelly_debug)) { + pr_err("ddl_dev_init:" + "dbg_core_dump_alloc_failed\n"); + status = VCD_ERR_ALLOC_FAIL; + ddl_ctxt->enable_dbg_core_dump = 0; + goto out; + } + +out: + if (status) { + ddl_release_context_buffers(ddl_ctxt); + DDL_IDLE(ddl_ctxt); + return status; + } + + ddl_move_command_state(ddl_ctxt, DDL_CMD_DMA_INIT); + + ddl_core_init(ddl_ctxt); + + return status; +} + +u32 ddl_device_release(void *client_data) +{ + struct ddl_context *ddl_ctxt; + + ddl_ctxt = ddl_get_context(); + + if (DDL_IS_BUSY(ddl_ctxt)) { + pr_err("ddl_dev_rel:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + + if (!DDL_IS_INITIALIZED(ddl_ctxt)) { + pr_err("ddl_dev_rel:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (!ddl_client_transact(DDL_ACTIVE_CLIENT, NULL)) { + pr_err("ddl_dev_rel:Client_present_err\n"); + return VCD_ERR_CLIENT_PRESENT; + } + DDL_BUSY(ddl_ctxt); + + ddl_ctxt->device_state = DDL_DEVICE_NOTINIT; + ddl_ctxt->client_data = client_data; + ddl_move_command_state(ddl_ctxt, DDL_CMD_INVALID); + vidc_720p_stop_fw(); + + pr_debug("FW_ENDDONE\n"); + ddl_release_context_buffers(ddl_ctxt); + + DDL_IDLE(ddl_ctxt); + + return VCD_S_SUCCESS; +} + +u32 ddl_open(u32 **ddl_handle, u32 decoding) +{ + struct ddl_context *ddl_context; + struct ddl_client_context *ddl; + u32 status; + + if (!ddl_handle) { + pr_err("ddl_open:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + + ddl_context = ddl_get_context(); + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_open:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + + status = ddl_client_transact(DDL_GET_CLIENT, &ddl); + + if (status) { + pr_err("ddl_open:Client_trasac_failed\n"); + return status; + } + + ddl_move_client_state(ddl, DDL_CLIENT_OPEN); + + ddl->codec_data.hdr.decoding = decoding; + ddl->decoding = decoding; + + ddl_set_default_meta_data_hdr(ddl); + + ddl_set_initial_default_values(ddl); + + *ddl_handle = (u32 *) ddl; + return VCD_S_SUCCESS; +} + +u32 ddl_close(u32 **ddl_handle) +{ + struct ddl_context *ddl_context; + struct ddl_client_context **pp_ddl = (struct ddl_client_context **) + ddl_handle; + + if (!pp_ddl || !*pp_ddl) { + pr_err("ddl_close:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + + ddl_context = ddl_get_context(); + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_close:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (!DDLCLIENT_STATE_IS(*pp_ddl, DDL_CLIENT_OPEN)) { + pr_err("ddl_close:Not_in_open_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + + ddl_move_client_state(*pp_ddl, DDL_CLIENT_INVALID); + + ddl_client_transact(DDL_FREE_CLIENT, pp_ddl); + + return VCD_S_SUCCESS; +} + +u32 ddl_encode_start(u32 *ddl_handle, void *client_data) +{ + struct ddl_client_context *ddl = + (struct ddl_client_context *)ddl_handle; + struct ddl_context *ddl_context; + struct ddl_encoder_data *enc; + u32 dpb_size; + + ddl_context = ddl_get_context(); + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_enc_start:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_enc_start:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || ddl->decoding) { + pr_err("ddl_enc_start:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { + pr_err("ddl_enc_start:Not_opened\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (!ddl_encoder_ready_to_start(ddl)) { + pr_err("ddl_enc_start:Err_param_settings\n"); + return VCD_ERR_ILLEGAL_OP; + } + + enc = &ddl->codec_data.encoder; + + dpb_size = ddl_get_yuv_buffer_size(&enc->frame_size, + &enc->re_con_buf_format, false); + + dpb_size *= DDL_ENC_NUM_DPB_BUFFERS; + if (!ddl_dma_alloc(&enc->enc_dpb_addr, dpb_size, npelly_enc_dpb)) { + pr_err("ddl_enc_start:Dpb_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + + if ((enc->codec_type.codec == VCD_CODEC_MPEG4 && + !enc->short_header.short_header) || + enc->codec_type.codec == VCD_CODEC_H264) { + if (!ddl_dma_alloc(&enc->seq_header, DDL_ENC_SEQHEADER_SIZE, npelly_enc_seq)) { + ddl_dma_free(&enc->enc_dpb_addr); + pr_err("ddl_enc_start:Seq_hdr_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + } else { + enc->seq_header.size = 0; + enc->seq_header.virt_addr = NULL; + } + + DDL_BUSY(ddl_context); + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + ddl_channel_set(ddl); + return VCD_S_SUCCESS; +} + +u32 ddl_decode_start(u32 *ddl_handle, struct vcd_phys_sequence_hdr *hdr, + void *client_data) +{ + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + struct ddl_context *ddl_context; + struct ddl_decoder_data *decoder; + + ddl_context = ddl_get_context(); + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_dec_start:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_dec_start:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || !ddl->decoding) { + pr_err("ddl_dec_start:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { + pr_err("ddl_dec_start:Not_in_opened_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (hdr && (!hdr->sz || !hdr->addr)) { + pr_err("ddl_dec_start:Bad_param_seq_header\n"); + return VCD_ERR_ILLEGAL_PARM; + } + + if (!ddl_decoder_ready_to_start(ddl, hdr)) { + pr_err("ddl_dec_start:Err_param_settings\n"); + return VCD_ERR_ILLEGAL_OP; + } + + DDL_BUSY(ddl_context); + + decoder = &ddl->codec_data.decoder; + if (hdr) { + decoder->header_in_start = true; + decoder->decode_config = *hdr; + } else { + decoder->header_in_start = false; + decoder->decode_config.sz = 0; + } + + if (decoder->codec_type.codec == VCD_CODEC_H264) { + if (!ddl_dma_alloc(&decoder->h264Vsp_temp_buffer, + DDL_DECODE_H264_VSPTEMP_BUFSIZE, + npelly_dec_h264)) { + DDL_IDLE(ddl_context); + pr_err("ddl_dec_start:H264Sps_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + } + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + + ddl_channel_set(ddl); + return VCD_S_SUCCESS; +} + +u32 ddl_decode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *in_bits, + void *client_data) +{ + u32 vcd_status = VCD_S_SUCCESS; + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + struct ddl_context *ddl_context = ddl_get_context(); + +#ifdef CORE_TIMING_INFO + ddl_get_core_start_time(0); +#endif + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_dec_frame:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_dec_frame:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || !ddl->decoding) { + pr_err("ddl_dec_frame:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (!in_bits || ((!in_bits->vcd_frm.phys_addr || + !in_bits->vcd_frm.data_len) && + !(VCD_FRAME_FLAG_EOS & in_bits->vcd_frm.flags))) { + pr_err("ddl_dec_frame:Bad_input_param\n"); + return VCD_ERR_ILLEGAL_PARM; + } + + DDL_BUSY(ddl_context); + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + + ddl->input_frame = *in_bits; + + if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { + ddl_decode_frame_run(ddl); + } else { + if (!ddl->codec_data.decoder.dp_buf.no_of_dec_pic_buf) { + pr_err("ddl_dec_frame:Dpbs_requied\n"); + vcd_status = VCD_ERR_ILLEGAL_OP; + } else if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { + vcd_status = ddl_decode_set_buffers(ddl); + } else if (DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_INITCODEC)) { + ddl->codec_data.decoder.decode_config.addr = + ddl->input_frame.vcd_frm.phys_addr; + ddl->codec_data.decoder.decode_config.sz = + ddl->input_frame.vcd_frm.data_len; + ddl_decode_init_codec(ddl); + } else { + pr_err("Dec_frame:Wrong_state\n"); + vcd_status = VCD_ERR_ILLEGAL_OP; + } + if (vcd_status) + DDL_IDLE(ddl_context); + } + return vcd_status; +} + +u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, + struct ddl_frame_data_tag *out_bits, void *client_data) +{ + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + struct ddl_context *ddl_context = ddl_get_context(); + +#ifdef CORE_TIMING_INFO + ddl_get_core_start_time(1); +#endif + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_encode_frame:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_encode_frame:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || ddl->decoding) { + pr_err("ddl_encode_frame:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (!input_frame || !input_frame->vcd_frm.phys_addr || + ddl->codec_data.encoder.input_buf_req.size != + input_frame->vcd_frm.data_len) { + pr_err("ddl_encode_frame:Bad_input_params\n"); + return VCD_ERR_ILLEGAL_PARM; + } + if ((input_frame->vcd_frm.phys_addr + input_frame->vcd_frm.offset) & + DDL_STREAMBUF_ALIGN_GUARD_BYTES) { + pr_err("ddl_encode_frame:unaligned_yuv_start_addr\n"); + return VCD_ERR_ILLEGAL_PARM; + } + if (!out_bits || !out_bits->vcd_frm.phys_addr || + !out_bits->vcd_frm.alloc_len) { + pr_err("ddl_encode_frame:Bad_output_params\n"); + return VCD_ERR_ILLEGAL_PARM; + } + if ((ddl->codec_data.encoder.output_buf_req.size + + out_bits->vcd_frm.offset) > + out_bits->vcd_frm.alloc_len) { + pr_err("ddl_encode_frame:offset > min_buf_size\n"); + } + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { + pr_err("ddl_encode_frame:Wrong_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + + DDL_BUSY(ddl_context); + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + + ddl->input_frame = *input_frame; + ddl->output_frame = *out_bits; + + ddl_encode_frame_run(ddl); + return VCD_S_SUCCESS; +} + +u32 ddl_decode_end(u32 *ddl_handle, void *client_data) +{ + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + struct ddl_context *ddl_context; + + ddl_context = ddl_get_context(); + +#ifdef CORE_TIMING_INFO + ddl_reset_time_variables(0); +#endif + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_dec_end:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_dec_end:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || !ddl->decoding) { + pr_err("ddl_dec_end:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && + !DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_INITCODEC) && + !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) && + !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)) { + pr_err("ddl_dec_end:Wrong_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + DDL_BUSY(ddl_context); + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + + ddl_channel_end(ddl); + return VCD_S_SUCCESS; +} + +u32 ddl_encode_end(u32 *ddl_handle, void *client_data) +{ + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + struct ddl_context *ddl_context; + + ddl_context = ddl_get_context(); + +#ifdef CORE_TIMING_INFO + ddl_reset_time_variables(1); +#endif + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_enc_end:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + if (DDL_IS_BUSY(ddl_context)) { + pr_err("ddl_enc_end:Ddl_busy\n"); + return VCD_ERR_BUSY; + } + if (!ddl || ddl->decoding) { + pr_err("ddl_enc_end:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME) && + !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC) && + !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_FATAL_ERROR)) { + pr_err("ddl_enc_end:Wrong_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + DDL_BUSY(ddl_context); + + ddl_context->current_ddl = ddl; + ddl_context->client_data = client_data; + + ddl_channel_end(ddl); + return VCD_S_SUCCESS; +} + +u32 ddl_reset_hw(u32 mode) +{ + struct ddl_context *ddl_context; + struct ddl_client_context *ddl; + int client_num; + + pr_debug("ddl_reset_hw:called\n"); + ddl_context = ddl_get_context(); + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + DDL_BUSY(ddl_context); + + if (ddl_context->core_virtual_base_addr) + vidc_720p_do_sw_reset(); + + ddl_context->device_state = DDL_DEVICE_NOTINIT; + for (client_num = 0; client_num < VCD_MAX_NO_CLIENT; ++client_num) { + ddl = ddl_context->ddl_clients[client_num]; + ddl_context->ddl_clients[client_num] = NULL; + if (ddl) { + ddl_release_client_internal_buffers(ddl); + ddl_client_transact(DDL_FREE_CLIENT, &ddl); + } + } + + ddl_release_context_buffers(ddl_context); + memset(ddl_context, 0, sizeof(struct ddl_context)); + + return true; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl.h b/drivers/misc/video_core/720p/ddl/vcd_ddl.h new file mode 100644 index 0000000000000..8f4e92438c53f --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl.h @@ -0,0 +1,286 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_H_ +#define _VCD_DDL_H_ +#include "vcd_ddl_api.h" +#include "vcd_ddl_utils.h" +#include "vcd_ddl_firmware.h" +#include "video_core_720p.h" + +#define DDL_BUSY_STATE 1 +#define DDL_IDLE_STATE 0 +#define DDL_ERROR_STATE 2 +#define DDL_IS_BUSY(ddl_context) \ + (((ddl_context)->ddl_busy != DDL_IDLE_STATE)) +#define DDL_BUSY(ddl_context) \ + ((ddl_context)->ddl_busy = DDL_BUSY_STATE) +#define DDL_IDLE(ddl_context) \ + ((ddl_context)->ddl_busy = DDL_IDLE_STATE) +#define DDL_ERROR(ddl_context) \ + ((ddl_context)->ddl_busy = DDL_ERROR_STATE) + +#define DDL_DEVICE_NOTINIT 0 +#define DDL_DEVICE_INITED 1 +#define DDL_DEVICE_HWFATAL 2 +#define DDL_IS_INITIALIZED(ddl_context) \ + (ddl_context->device_state == DDL_DEVICE_INITED) + +#define DDLCOMMAND_STATE_IS(ddl_context, command_state) \ + (command_state == (ddl_context)->cmd_state) + +#define DDLCLIENT_STATE_IS(ddl, cs) \ + (cs == (ddl)->client_state) + +#define DDL_DPB_OP_INIT 1 +#define DDL_DPB_OP_MARK_FREE 2 +#define DDL_DPB_OP_MARK_BUSY 3 +#define DDL_DPB_OP_SET_MASK 4 +#define DDL_DPB_OP_RETRIEVE 5 + +#define DDL_INIT_CLIENTS 0 +#define DDL_GET_CLIENT 1 +#define DDL_FREE_CLIENT 2 +#define DDL_ACTIVE_CLIENT 3 + +#define DDL_INVALID_CHANNEL_ID ((u32)~0) +#define DDL_INVALID_CODEC_TYPE ((u32)~0) +#define DDL_INVALID_INTR_STATUS ((u32)~0) + +#define DDL_ENC_REQ_IFRAME 0x1 +#define DDL_ENC_CHANGE_IPERIOD 0x2 +#define DDL_ENC_CHANGE_BITRATE 0x4 +#define DDL_ENC_CHANGE_FRAMERATE 0x8 + +#define DDL_DEC_REQ_OUTPUT_FLUSH 0x1 + +struct ddl_dma_buffer { + void *virt_addr; + phys_addr_t phys_addr; + size_t size; +}; + +enum ddl_cmd_state { + DDL_CMD_INVALID = 0x0, + DDL_CMD_DMA_INIT = 0x1, + DDL_CMD_CPU_RESET = 0x2, + DDL_CMD_CHANNEL_SET = 0x3, + DDL_CMD_INIT_CODEC = 0x4, + DDL_CMD_HEADER_PARSE = 0x5, + DDL_CMD_DECODE_SET_DPB = 0x6, + DDL_CMD_DECODE_FRAME = 0x7, + DDL_CMD_ENCODE_FRAME = 0x8, + DDL_CMD_EOS = 0x9, + DDL_CMD_CHANNEL_END = 0xA, + DDL_CMD_32BIT = 0x7FFFFFFF +}; + +enum ddl_client_state { + DDL_CLIENT_INVALID = 0x0, + DDL_CLIENT_OPEN = 0x1, + DDL_CLIENT_WAIT_FOR_CHDONE = 0x2, + DDL_CLIENT_WAIT_FOR_INITCODEC = 0x3, + DDL_CLIENT_WAIT_FOR_INITCODECDONE = 0x4, + DDL_CLIENT_WAIT_FOR_DPB = 0x5, + DDL_CLIENT_WAIT_FOR_DPBDONE = 0x6, + DDL_CLIENT_WAIT_FOR_FRAME = 0x7, + DDL_CLIENT_WAIT_FOR_FRAME_DONE = 0x8, + DDL_CLIENT_WAIT_FOR_EOS_DONE = 0x9, + DDL_CLIENT_WAIT_FOR_CHEND = 0xA, + DDL_CLIENT_FATAL_ERROR = 0xB, + DDL_CLIENT_32BIT = 0x7FFFFFFF +}; + +struct ddl_mask { + u32 client_mask; + u32 hw_mask; +}; + +struct ddl_context; + +struct ddl_client_context; + +struct ddl_codec_data_hdr { + u32 decoding; +}; + +struct ddl_encoder_data { + struct ddl_codec_data_hdr hdr; + struct vcd_property_codec codec_type; + struct vcd_property_frame_size frame_size; + struct vcd_property_frame_rate frame_rate; + struct vcd_property_target_bitrate target_bit_rate; + struct vcd_property_profile profile; + struct vcd_property_level level; + struct vcd_property_rate_control rc_type; + struct vcd_property_multi_slice multi_slice; + u32 meta_data_enable_flag; + u32 suffix; + struct ddl_dma_buffer meta_data_input; + phys_addr_t meta_data_offset; + struct vcd_property_short_header short_header; + struct vcd_property_vop_timing vop_timing; + u32 hdr_ext_control; + struct vcd_property_db_config db_control; + struct vcd_property_entropy_control entropy_control; + struct vcd_property_i_period period; + struct vcd_property_session_qp session_qp; + struct vcd_property_qp_range qp_range; + struct vcd_property_rc_level rc_level; + u32 r_cframe_skip; + u32 vb_vbuffer_size; + struct vcd_property_frame_level_rc_params frame_level_rc; + struct vcd_property_adaptive_rc_params adaptive_rc; + struct vcd_property_intra_refresh_mb_number intra_refresh; + struct vcd_property_buffer_format buf_format; + struct vcd_property_buffer_format re_con_buf_format; + u32 dynamic_prop_change; + u32 dynmic_prop_change_req; + u32 ext_enc_control_val; + struct vidc_720p_enc_frame_info enc_frame_info; + struct ddl_dma_buffer enc_dpb_addr; + struct ddl_dma_buffer seq_header; + struct vcd_buffer_requirement input_buf_req; + struct vcd_buffer_requirement output_buf_req; + struct vcd_buffer_requirement client_input_buf_req; + struct vcd_buffer_requirement client_output_buf_req; +}; + +struct ddl_decoder_data { + struct ddl_codec_data_hdr hdr; + struct vcd_property_codec codec_type; + struct vcd_property_buffer_format buf_format; + struct vcd_property_frame_size frame_size; + struct vcd_property_frame_size client_frame_size; + struct vcd_property_profile profile; + struct vcd_property_level level; + u32 progressive_only; + u32 meta_data_enable_flag; + u32 suffix; + struct ddl_dma_buffer meta_data_input; + struct ddl_dma_buffer ref_buffer; + size_t meta_data_offset; + struct vcd_property_post_filter post_filter; + struct vcd_phys_sequence_hdr decode_config; + u32 header_in_start; + u32 min_dpb_num; + size_t y_cb_cr_size; + struct ddl_property_dec_pic_buffers dp_buf; + struct ddl_mask dpb_mask; + u32 dynamic_prop_change; + u32 dynmic_prop_change_req; + struct vidc_720p_dec_disp_info dec_disp_info; + struct ddl_dma_buffer dpb_comv_buffer; + struct ddl_dma_buffer h264Vsp_temp_buffer; + struct vcd_buffer_requirement actual_input_buf_req; + struct vcd_buffer_requirement min_input_buf_req; + struct vcd_buffer_requirement client_input_buf_req; + struct vcd_buffer_requirement actual_output_buf_req; + struct vcd_buffer_requirement min_output_buf_req; + struct vcd_buffer_requirement client_output_buf_req; +}; + +union ddl_codec_data { + struct ddl_codec_data_hdr hdr; + struct ddl_decoder_data decoder; + struct ddl_encoder_data encoder; +}; + +struct ddl_context { + u8 *core_virtual_base_addr; + void (*ddl_callback) (u32 event, u32 status, void *payload, u32 size, + u32 *ddl_handle, void *const client_data); + void *client_data; + void (*pf_interrupt_clr) (void); + enum ddl_cmd_state cmd_state; + struct ddl_client_context *current_ddl; + struct ddl_dma_buffer context_buf_addr; + struct ddl_dma_buffer db_line_buffer; + struct ddl_dma_buffer data_partition_tempbuf; + struct ddl_dma_buffer metadata_shared_input; + struct ddl_dma_buffer dbg_core_dump; + u32 enable_dbg_core_dump; + struct ddl_client_context *ddl_clients[VCD_MAX_NO_CLIENT]; + u32 device_state; + u32 ddl_busy; + u32 intr_status; + u32 cmd_err_status; + u32 disp_pic_err_status; + u32 op_failed; +}; + +struct ddl_client_context { + struct ddl_context *ddl_context; + enum ddl_client_state client_state; + u32 decoding; + u32 channel_id; + struct ddl_frame_data_tag input_frame; + struct ddl_frame_data_tag output_frame; + union ddl_codec_data codec_data; +}; + +struct ddl_context *ddl_get_context(void); +void ddl_move_command_state(struct ddl_context *ddl_context, + enum ddl_cmd_state command_state); +void ddl_move_client_state(struct ddl_client_context *ddl, + enum ddl_client_state client_state); +void ddl_core_init(struct ddl_context *); +void ddl_core_start_cpu(struct ddl_context *); +void ddl_channel_set(struct ddl_client_context *); +void ddl_channel_end(struct ddl_client_context *); +void ddl_encode_init_codec(struct ddl_client_context *); +void ddl_decode_init_codec(struct ddl_client_context *); +void ddl_encode_frame_run(struct ddl_client_context *); +void ddl_decode_frame_run(struct ddl_client_context *); +void ddl_decode_eos_run(struct ddl_client_context *); +void ddl_release_context_buffers(struct ddl_context *); +void ddl_release_client_internal_buffers(struct ddl_client_context *ddl); +u32 ddl_decode_set_buffers(struct ddl_client_context *); +u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *dec, + struct ddl_frame_data_tag *in_out_frame, u32 operation); +u32 ddl_client_transact(u32, struct ddl_client_context **); +void ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *dec, + u32 estimate); +void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data *enc); +void ddl_set_default_dec_property(struct ddl_client_context *); +u32 ddl_encoder_ready_to_start(struct ddl_client_context *); +u32 ddl_decoder_ready_to_start(struct ddl_client_context *, + struct vcd_phys_sequence_hdr *); +size_t ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size, + struct vcd_property_buffer_format *buf_format, u32 interlace); +void ddl_calculate_stride(struct vcd_property_frame_size *frame_size, + u32 interlace); +void ddl_encode_dynamic_property(struct ddl_client_context *ddl, u32 enable); +void ddl_decode_dynamic_property(struct ddl_client_context *ddl, u32 enable); +void ddl_set_initial_default_values(struct ddl_client_context *ddl); +u32 ddl_handle_core_errors(struct ddl_context *ddl_context); +void ddl_client_fatal_cb(struct ddl_context *ddl_context); +void ddl_hw_fatal_cb(struct ddl_context *ddl_context); +u32 ddl_hal_engine_reset(struct ddl_context *ddl_context); + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_api.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_api.h new file mode 100644 index 0000000000000..b3ce6b78118cd --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_api.h @@ -0,0 +1,68 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_API_H_ +#define _VCD_DDL_API_H_ +#include "vcd_ddl_internal_property.h" + +struct ddl_init_config { + u8 *core_virtual_base_addr; + void (*pf_interrupt_clr) (void); + void (*ddl_callback) (u32 event, u32 status, void *payload, u32 size, + u32 *ddl_handle, void *const client_data); +}; + +struct ddl_frame_data_tag { + struct vcd_frame_data vcd_frm; + u32 intrlcd_ip_frm_tag; + u32 frm_trans_end; + u32 frm_delta; +}; + +u32 ddl_device_init(struct ddl_init_config *ddl_init_config, void *client_data); +u32 ddl_device_release(void *client_data); +u32 ddl_open(u32 **ddl_handle, u32 decoding); +u32 ddl_close(u32 **ddl_handle); +u32 ddl_encode_start(u32 *ddl_handle, void *client_data); +u32 ddl_encode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *input_frame, + struct ddl_frame_data_tag *output_bit, void *client_data); +u32 ddl_encode_end(u32 *ddl_handle, void *client_data); +u32 ddl_decode_start(u32 *ddl_handle, struct vcd_phys_sequence_hdr *header, + void *client_data); +u32 ddl_decode_frame(u32 *ddl_handle, struct ddl_frame_data_tag *in_bits, + void *client_data); +u32 ddl_decode_end(u32 *ddl_handle, void *client_data); +u32 ddl_set_property(u32 *ddl_handle, struct vcd_property_hdr *property_hdr, + void *property_value); +u32 ddl_get_property(u32 *ddl_handle, struct vcd_property_hdr *property_hdr, + void *property_value); +void ddl_read_and_clear_interrupt(void); +u32 ddl_process_core_response(void); +u32 ddl_reset_hw(u32 mode); + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_core.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_core.h new file mode 100644 index 0000000000000..8e0cfcdaebee2 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_core.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_CORE_H_ +#define _VCD_DDL_CORE_H_ + +#define DDL_LINEAR_BUF_ALIGN_MASK 0xFFFFFFF8U +#define DDL_LINEAR_BUF_ALIGN_GUARD_BYTES 0x7 +#define DDL_LINEAR_BUFFER_ALIGN_BYTES 8 + +#define DDL_TILE_BUF_ALIGN_MASK 0xFFFFE000U +#define DDL_TILE_BUF_ALIGN_GUARD_BYTES 0x1FFF +#define DDL_TILE_BUFFER_ALIGN_BYTES 8192 + +#define DDL_MAX_FRAME_WIDTH 1280 +#define DDL_MAX_FRAME_HEIGHT 720 + +#define DDL_MAX_DP_FRAME_WIDTH 352 +#define DDL_MAX_DP_FRAME_HEIGHT 288 + +#define DDL_SW_RESET_SLEEP 10 + +#define VCD_MAX_NO_CLIENT 4 +#define VCD_FRAME_COMMAND_DEPTH 1 +#define VCD_GENERAL_COMMAND_DEPTH 1 +#define VCD_COMMAND_EXCLUSIVE true + +#define DDL_HW_TIMEOUT_IN_MS 1000 + +#define DDL_STREAMBUF_ALIGN_GUARD_BYTES 0x7 + +#define DDL_CONTEXT_MEMORY (1024 * 15 * (VCD_MAX_NO_CLIENT + 1)) +#define DDL_DB_LINE_BUF_SIZE \ +(((((DDL_MAX_FRAME_WIDTH * 4) - 1) / 256) + 1) * 8 * 1024) +#define DDL_MPEG4_DATA_PARTITION_BUF_SIZE (64 * 1024) +#define DDL_DECODE_H264_VSPTEMP_BUFSIZE 0x51c00 +#define DDL_ENC_NUM_DPB_BUFFERS 2 + +#define DDL_DBG_CORE_DUMP_SIZE (10 * 1024) + +#define DDL_BUFEND_PAD 256 +#define DDL_ENC_SEQHEADER_SIZE (256+DDL_BUFEND_PAD) +#define DDL_MAX_BUFFER_COUNT 32 + +#define DDL_MPEG_REFBUF_COUNT 2 + +#define DDL_MPEG_COMV_BUF_NO 2 +#define DDL_H263_COMV_BUF_NO 0 +#define DDL_COMV_BUFLINE_NO 128 +#define DDL_VC1_COMV_BUFLINE_NO 32 +#define DDL_MINIMUM_BYTE_PER_SLICE 1920 + +#define DDL_MAX_H264_QP 51 +#define DDL_MAX_MPEG4_QP 31 + +//TODO clean this dirty thing +#define DDL_PADDING_HACK(addr) \ + (addr) = (u32)((((u32)(addr) + DDL_STREAMBUF_ALIGN_GUARD_BYTES) & \ + ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES)) + DDL_BUFEND_PAD) + +#define DDL_FRAMESIZE_DIV_FACTOR 0xF +#define DDL_ALLOW_DEC_FRAMESIZE(w, h) (\ + (w) <= DDL_MAX_FRAME_WIDTH && \ + (h) <= DDL_MAX_FRAME_HEIGHT && \ + (((w) >= 32 && (h) >= 16) || ((w) >= 16 && (h) >= 32))) + +#define DDL_ALLOW_ENC_FRAMESIZE(w, h) (\ + (w) <= DDL_MAX_FRAME_WIDTH && \ + (h) <= DDL_MAX_FRAME_HEIGHT && \ + (w) >= 32 && (h) >= 32 && \ + !((w) & DDL_FRAMESIZE_DIV_FACTOR) && \ + !((h) & DDL_FRAMESIZE_DIV_FACTOR)) + +#define DDL_TILE_ALIGN_WIDTH 128 +#define DDL_TILE_ALIGN_HEIGHT 32 +#define DDL_TILE_MULTIPLY_FACTOR 8192 +#define DDL_TILE_ALIGN(val, grid) \ + (((val) + (grid) - 1) / (grid) * (grid)) + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_errors.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_errors.c new file mode 100644 index 0000000000000..1aad338fbadc6 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_errors.c @@ -0,0 +1,509 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_ddl_utils.h" +#include "vcd_ddl.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define ERR(x...) printk(KERN_ERR x) + +#define INVALID_CHANNEL_NUMBER 1 +#define INVALID_COMMAND_ID 2 +#define CHANNEL_ALREADY_IN_USE 3 +#define CHANNEL_NOT_SET_BEFORE_CHANNEL_CLOSE 4 +#define CHANNEL_SET_ERROR_INIT_CODEC 5 +#define INIT_CODEC_ALREADY_CALLED 6 +#define CHANNEL_SET_ERROR_INIT_BUFFERS 7 +#define INIT_CODEC_ERROR_INIT_BUFFERS 8 +#define INIT_BUFFER_ALREADY_CALLED 9 +#define CHANNEL_SET_ERROR_FRAME_RUN 10 +#define INIT_CODEC_ERROR_FRAME_RUN 11 +#define INIT_BUFFERS_ERROR_FRAME_RUN 12 +#define CODEC_LIMIT_EXCEEDED 13 +#define FIRMWARE_SIZE_ZERO 14 +#define FIRMWARE_ADDRESS_EXT_ZERO 15 +#define CONTEXT_DMA_IN_ERROR 16 +#define CONTEXT_DMA_OUT_ERROR 17 +#define PROGRAM_DMA_ERROR 18 +#define CONTEXT_STORE_EXT_ADD_ZERO 19 +#define MEM_ALLOCATION_FAILED 20 + + +#define UNSUPPORTED_FEATURE_IN_PROFILE 27 +#define RESOLUTION_NOT_SUPPORTED 28 +#define HEADER_NOT_FOUND 52 +#define MB_NUM_INVALID 61 +#define FRAME_RATE_NOT_SUPPORTED 62 +#define INVALID_QP_VALUE 63 +#define INVALID_RC_REACTION_COEFFICIENT 64 +#define INVALID_CPB_SIZE_AT_GIVEN_LEVEL 65 + +#define ALLOC_DPB_SIZE_NOT_SUFFICIENT 71 +#define ALLOC_DB_SIZE_NOT_SUFFICIENT 72 +#define ALLOC_COMV_SIZE_NOT_SUFFICIENT 73 +#define NUM_BUF_OUT_OF_RANGE 74 +#define NULL_CONTEXT_POINTER 75 +#define NULL_COMAMND_CONTROL_COMM_POINTER 76 +#define NULL_METADATA_INPUT_POINTER 77 +#define NULL_DPB_POINTER 78 +#define NULL_DB_POINTER 79 +#define NULL_COMV_POINTER 80 + +#define DIVIDE_BY_ZERO 81 +#define BIT_STREAM_BUF_EXHAUST 82 +#define DMA_NOT_STOPPED 83 +#define DMA_TX_NOT_COMPLETE 84 + +#define MB_HEADER_NOT_DONE 85 +#define MB_COEFF_NOT_DONE 86 +#define CODEC_SLICE_NOT_DONE 87 +#define VME_NOT_READY 88 +#define VC1_BITPLANE_DECODE_ERR 89 + + +#define VSP_NOT_READY 90 +#define BUFFER_FULL_STATE 91 + +#define RESOLUTION_MISMATCH 112 +#define NV_QUANT_ERR 113 +#define SYNC_MARKER_ERR 114 +#define FEATURE_NOT_SUPPORTED 115 +#define MEM_CORRUPTION 116 +#define INVALID_REFERENCE_FRAME 117 +#define PICTURE_CODING_TYPE_ERR 118 +#define MV_RANGE_ERR 119 +#define PICTURE_STRUCTURE_ERR 120 +#define SLICE_ADDR_INVALID 121 +#define NON_PAIRED_FIELD_NOT_SUPPORTED 122 +#define NON_FRAME_DATA_RECEIVED 123 +#define INCOMPLETE_FRAME 124 +#define NO_BUFFER_RELEASED_FROM_HOST 125 +#define PICTURE_MANAGEMENT_ERROR 128 +#define INVALID_MMCO 129 +#define INVALID_PIC_REORDERING 130 +#define INVALID_POC_TYPE 131 +#define ACTIVE_SPS_NOT_PRESENT 132 +#define ACTIVE_PPS_NOT_PRESENT 133 +#define INVALID_SPS_ID 134 +#define INVALID_PPS_ID 135 + + +#define METADATA_NO_SPACE_QP 151 +#define METADATA_NO_SAPCE_CONCEAL_MB 152 +#define METADATA_NO_SPACE_VC1_PARAM 153 +#define METADATA_NO_SPACE_SEI 154 +#define METADATA_NO_SPACE_VUI 155 +#define METADATA_NO_SPACE_EXTRA 156 +#define METADATA_NO_SPACE_DATA_NONE 157 +#define FRAME_RATE_UNKNOWN 158 +#define ASPECT_RATIO_UNKOWN 159 +#define COLOR_PRIMARIES_UNKNOWN 160 +#define TRANSFER_CHAR_UNKWON 161 +#define MATRIX_COEFF_UNKNOWN 162 +#define NON_SEQ_SLICE_ADDR 163 +#define BROKEN_LINK 164 +#define FRAME_CONCEALED 165 +#define PROFILE_UNKOWN 166 +#define LEVEL_UNKOWN 167 +#define BIT_RATE_NOT_SUPPORTED 168 +#define COLOR_DIFF_FORMAT_NOT_SUPPORTED 169 +#define NULL_EXTRA_METADATA_POINTER 170 +#define SYNC_POINT_NOT_RECEIVED_STARTED_DECODING 171 +#define NULL_FW_DEBUG_INFO_POINTER 172 +#define ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT 173 +#define MAX_STAGE_COUNTER_EXCEEDED 174 + +#define METADATA_NO_SPACE_MB_INFO 180 +#define METADATA_NO_SPACE_SLICE_SIZE 181 +#define RESOLUTION_WARNING 182 + +void ddl_hw_fatal_cb(struct ddl_context *ddl_context) +{ + /* Invalidate the command state */ + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + ddl_context->device_state = DDL_DEVICE_HWFATAL; + + /* callback to the client to indicate hw fatal error */ + ddl_context->ddl_callback(VCD_EVT_IND_HWERRFATAL, + VCD_ERR_HW_FATAL, NULL, 0, + (void *)ddl_context->current_ddl, + ddl_context->client_data); + + DDL_IDLE(ddl_context); +} + +static u32 ddl_handle_hw_fatal_errors(struct ddl_context + *ddl_context) +{ + u32 status = false; + + switch (ddl_context->cmd_err_status) { + + case INVALID_CHANNEL_NUMBER: + case INVALID_COMMAND_ID: + case CHANNEL_ALREADY_IN_USE: + case CHANNEL_NOT_SET_BEFORE_CHANNEL_CLOSE: + case CHANNEL_SET_ERROR_INIT_CODEC: + case INIT_CODEC_ALREADY_CALLED: + case CHANNEL_SET_ERROR_INIT_BUFFERS: + case INIT_CODEC_ERROR_INIT_BUFFERS: + case INIT_BUFFER_ALREADY_CALLED: + case CHANNEL_SET_ERROR_FRAME_RUN: + case INIT_CODEC_ERROR_FRAME_RUN: + case INIT_BUFFERS_ERROR_FRAME_RUN: + case CODEC_LIMIT_EXCEEDED: + case FIRMWARE_SIZE_ZERO: + case FIRMWARE_ADDRESS_EXT_ZERO: + + case CONTEXT_DMA_IN_ERROR: + case CONTEXT_DMA_OUT_ERROR: + case PROGRAM_DMA_ERROR: + case CONTEXT_STORE_EXT_ADD_ZERO: + case MEM_ALLOCATION_FAILED: + + case DIVIDE_BY_ZERO: + case DMA_NOT_STOPPED: + case DMA_TX_NOT_COMPLETE: + + case VSP_NOT_READY: + case BUFFER_FULL_STATE: + ERR("HW FATAL ERROR"); + ddl_hw_fatal_cb(ddl_context); + status = true; + break; + } + return status; +} + +void ddl_client_fatal_cb(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = + ddl_context->current_ddl; + + if (ddl_context->cmd_state == DDL_CMD_DECODE_FRAME) + ddl_decode_dynamic_property(ddl, false); + else if (ddl_context->cmd_state == DDL_CMD_ENCODE_FRAME) + ddl_encode_dynamic_property(ddl, false); + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + ddl_move_client_state(ddl, DDL_CLIENT_FATAL_ERROR); + + ddl_context->ddl_callback + ( + VCD_EVT_IND_HWERRFATAL, + VCD_ERR_CLIENT_FATAL, + NULL, + 0, + (void *)ddl, + ddl_context->client_data + ); + + DDL_IDLE(ddl_context); +} + +static u32 ddl_handle_client_fatal_errors(struct ddl_context + *ddl_context) +{ + u32 status = false; + + switch (ddl_context->cmd_err_status) { + case UNSUPPORTED_FEATURE_IN_PROFILE: + case RESOLUTION_NOT_SUPPORTED: + case HEADER_NOT_FOUND: + case INVALID_SPS_ID: + case INVALID_PPS_ID: + + case MB_NUM_INVALID: + case FRAME_RATE_NOT_SUPPORTED: + case INVALID_QP_VALUE: + case INVALID_RC_REACTION_COEFFICIENT: + case INVALID_CPB_SIZE_AT_GIVEN_LEVEL: + + case ALLOC_DPB_SIZE_NOT_SUFFICIENT: + case ALLOC_DB_SIZE_NOT_SUFFICIENT: + case ALLOC_COMV_SIZE_NOT_SUFFICIENT: + case NUM_BUF_OUT_OF_RANGE: + case NULL_CONTEXT_POINTER: + case NULL_COMAMND_CONTROL_COMM_POINTER: + case NULL_METADATA_INPUT_POINTER: + case NULL_DPB_POINTER: + case NULL_DB_POINTER: + case NULL_COMV_POINTER: + { + status = true; + break; + } + } + + if (!status) + ERR("UNKNOWN-OP-FAILED"); + + ddl_client_fatal_cb(ddl_context); + + return true; +} + +static void ddl_input_failed_cb(struct ddl_context *ddl_context, + u32 vcd_event, u32 vcd_status) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + if (ddl->decoding) + ddl_decode_dynamic_property(ddl, false); + else + ddl_encode_dynamic_property(ddl, false); + + ddl_context->ddl_callback(vcd_event, + vcd_status, &ddl->input_frame, + sizeof(struct ddl_frame_data_tag), + (void *)ddl, ddl_context->client_data); + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); +} + +static u32 ddl_handle_core_recoverable_errors(struct ddl_context \ + *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + u32 vcd_status = VCD_S_SUCCESS; + u32 vcd_event = VCD_EVT_RESP_INPUT_DONE; + u32 eos = false, pending_display = 0, release_mask = 0; + + if (ddl_context->cmd_state != DDL_CMD_DECODE_FRAME && + ddl_context->cmd_state != DDL_CMD_ENCODE_FRAME) { + return false; + } + switch (ddl_context->cmd_err_status) { + case NON_PAIRED_FIELD_NOT_SUPPORTED: + { + vcd_status = VCD_ERR_INTRLCD_FIELD_DROP; + break; + } + case NO_BUFFER_RELEASED_FROM_HOST: + { + /* lets check sanity of this error */ + release_mask = + ddl->codec_data.decoder.dpb_mask.hw_mask; + while (release_mask > 0) { + if ((release_mask & 0x1)) + pending_display += 1; + release_mask >>= 1; + } + + if (pending_display >= + ddl->codec_data.decoder.min_dpb_num) { + DBG("FWISSUE-REQBUF!!"); + /* callback to client for client fatal error */ + ddl_client_fatal_cb(ddl_context); + return true ; + } + vcd_event = VCD_EVT_RESP_OUTPUT_REQ; + break; + } + case BIT_STREAM_BUF_EXHAUST: + case MB_HEADER_NOT_DONE: + case MB_COEFF_NOT_DONE: + case CODEC_SLICE_NOT_DONE: + case VME_NOT_READY: + case VC1_BITPLANE_DECODE_ERR: + { + u32 reset_core; + /* need to reset the internal core hw engine */ + reset_core = ddl_hal_engine_reset(ddl_context); + if (!reset_core) + return true; + /* fall through to process bitstream error handling */ + } + case RESOLUTION_MISMATCH: + case NV_QUANT_ERR: + case SYNC_MARKER_ERR: + case FEATURE_NOT_SUPPORTED: + case MEM_CORRUPTION: + case INVALID_REFERENCE_FRAME: + case PICTURE_CODING_TYPE_ERR: + case MV_RANGE_ERR: + case PICTURE_STRUCTURE_ERR: + case SLICE_ADDR_INVALID: + case NON_FRAME_DATA_RECEIVED: + case INCOMPLETE_FRAME: + case PICTURE_MANAGEMENT_ERROR: + case INVALID_MMCO: + case INVALID_PIC_REORDERING: + case INVALID_POC_TYPE: + case ACTIVE_SPS_NOT_PRESENT: + case ACTIVE_PPS_NOT_PRESENT: + { + vcd_status = VCD_ERR_BITSTREAM_ERR; + break; + } + } + + if (!vcd_status && vcd_event == VCD_EVT_RESP_INPUT_DONE) + return false; + + ddl->input_frame.frm_trans_end = true; + + eos = ((vcd_event == VCD_EVT_RESP_INPUT_DONE) && + ((VCD_FRAME_FLAG_EOS & ddl->input_frame. + vcd_frm.flags))); + + if ((ddl->decoding && eos) || + (!ddl->decoding)) + ddl->input_frame.frm_trans_end = false; + + if (vcd_event == VCD_EVT_RESP_INPUT_DONE && + ddl->decoding && + !ddl->codec_data.decoder.header_in_start && + !ddl->codec_data.decoder.dec_disp_info.img_size_x && + !ddl->codec_data.decoder.dec_disp_info.img_size_y + ) { + /* this is first frame seq. header only case */ + vcd_status = VCD_S_SUCCESS; + ddl->input_frame.vcd_frm.flags |= + VCD_FRAME_FLAG_CODECCONFIG; + ddl->input_frame.frm_trans_end = !eos; + /* put just some non - zero value */ + ddl->codec_data.decoder.dec_disp_info.img_size_x = 0xff; + } + /* inform client about input failed */ + ddl_input_failed_cb(ddl_context, vcd_event, vcd_status); + + /* for Encoder case, we need to send output done also */ + if (!ddl->decoding) { + /* transaction is complete after this callback */ + ddl->output_frame.frm_trans_end = !eos; + /* error case: NO data present */ + ddl->output_frame.vcd_frm.data_len = 0; + /* call back to client for output frame done */ + ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, + VCD_ERR_FAIL, &(ddl->output_frame), + sizeof(struct ddl_frame_data_tag), + (void *)ddl, ddl_context->client_data); + + if (eos) { + DBG("ENC-EOS_DONE"); + /* send client EOS DONE callback */ + ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, + VCD_S_SUCCESS, NULL, 0, (void *)ddl, + ddl_context->client_data); + } + } + + /* if it is decoder EOS case */ + if (ddl->decoding && eos) + ddl_decode_eos_run(ddl); + else + DDL_IDLE(ddl_context); + + return true; +} + +static u32 ddl_handle_core_warnings(u32 err_status) +{ + u32 status = false; + + switch (err_status) { + case FRAME_RATE_UNKNOWN: + case ASPECT_RATIO_UNKOWN: + case COLOR_PRIMARIES_UNKNOWN: + case TRANSFER_CHAR_UNKWON: + case MATRIX_COEFF_UNKNOWN: + case NON_SEQ_SLICE_ADDR: + case BROKEN_LINK: + case FRAME_CONCEALED: + case PROFILE_UNKOWN: + case LEVEL_UNKOWN: + case BIT_RATE_NOT_SUPPORTED: + case COLOR_DIFF_FORMAT_NOT_SUPPORTED: + case NULL_EXTRA_METADATA_POINTER: + case SYNC_POINT_NOT_RECEIVED_STARTED_DECODING: + + case NULL_FW_DEBUG_INFO_POINTER: + case ALLOC_DEBUG_INFO_SIZE_INSUFFICIENT: + case MAX_STAGE_COUNTER_EXCEEDED: + + case METADATA_NO_SPACE_MB_INFO: + case METADATA_NO_SPACE_SLICE_SIZE: + case RESOLUTION_WARNING: + + /* decoder warnings */ + case METADATA_NO_SPACE_QP: + case METADATA_NO_SAPCE_CONCEAL_MB: + case METADATA_NO_SPACE_VC1_PARAM: + case METADATA_NO_SPACE_SEI: + case METADATA_NO_SPACE_VUI: + case METADATA_NO_SPACE_EXTRA: + case METADATA_NO_SPACE_DATA_NONE: + { + status = true; + DBG("CMD-WARNING-IGNORED!!"); + break; + } + } + return status; +} + +u32 ddl_handle_core_errors(struct ddl_context *ddl_context) +{ + u32 status = false; + + if (!ddl_context->cmd_err_status && + !ddl_context->disp_pic_err_status) + return false; + + if (ddl_context->cmd_state == DDL_CMD_INVALID) { + DBG("SPURIOUS_INTERRUPT_ERROR"); + return true; + } + + if (!ddl_context->op_failed) { + u32 disp_status; + status = ddl_handle_core_warnings(ddl_context-> + cmd_err_status); + disp_status = ddl_handle_core_warnings( + ddl_context->disp_pic_err_status); + if (!status && !disp_status) + DBG("ddl_warning:Unknown"); + + return false; + } + + ERR("\n %s(): OPFAILED!!", __func__); + ERR("\n CMD_ERROR_STATUS = %u, DISP_ERR_STATUS = %u", + ddl_context->cmd_err_status, + ddl_context->disp_pic_err_status); + + status = ddl_handle_hw_fatal_errors(ddl_context); + + if (!status) + status = ddl_handle_core_recoverable_errors(ddl_context); + + if (!status) + status = ddl_handle_client_fatal_errors(ddl_context); + + return status; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.c new file mode 100644 index 0000000000000..d501893885588 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.c @@ -0,0 +1,212 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include + +#include "video_core_type.h" +#include "vcd_ddl_firmware.h" + +struct vcd_firmware_table { + bool prepared; + struct device *dev; + struct vcd_firmware fw[6]; +}; + +//TODO max_sz is kinda sucky, a better way? +static struct vcd_firmware_table vcd_fw_table = { + .prepared = false, + .fw[0] = { + .filename = "vidc_720p_command_control.fw", + .change_endian = false, + .max_sz = 12288, + }, + .fw[1] = { + .filename = "vidc_720p_mp4_dec_mc.fw", + .change_endian = true, + .max_sz = 32768, + }, + .fw[2] = { + .filename = "vidc_720p_h263_dec_mc.fw", + .change_endian = true, + .max_sz = 24576, + }, + .fw[3] = { + .filename = "vidc_720p_h264_dec_mc.fw", + .change_endian = true, + .max_sz = 45056, + }, + .fw[4] = { + .filename = "vidc_720p_mp4_enc_mc.fw", + .change_endian = true, + .max_sz = 32768, + }, + .fw[5] = { + .filename = "vidc_720p_h264_enc_mc.fw", + .change_endian = true, + .max_sz = 36864, + }, + +}; + +static void vcd_fw_change_endian(struct vcd_firmware *vcd_fw) +{ + size_t i; + u8 tmp; + u8 *fw = vcd_fw->virt_addr; + for (i = 0; i < vcd_fw->sz; i += 4) { + tmp = fw[i]; + fw[i] = fw[i + 3]; + fw[i + 3] = tmp; + + tmp = fw[i + 1]; + fw[i + 1] = fw[i + 2]; + fw[i + 2] = tmp; + } +} + +static int vcd_fw_prepare(struct vcd_firmware *vcd_fw) +{ + int rc; + const struct firmware *fw; + + rc = request_firmware(&fw, vcd_fw->filename, vcd_fw_table.dev); + if (rc) { + pr_err("request_firmware(%s) failed %d\n", vcd_fw->filename, + rc); + return rc; + } + + if (fw->size > vcd_fw->max_sz) { + pr_err("firmware %s is larger than allocated size (%u > %u)\n", + vcd_fw->filename, fw->size, vcd_fw->max_sz); + rc = -ENOMEM; + goto out; + } + vcd_fw->sz = fw->size; + memcpy(vcd_fw->virt_addr, fw->data, fw->size); + + if (vcd_fw->change_endian) + vcd_fw_change_endian(vcd_fw); + + pr_info("prepared firmware %s\n", vcd_fw->filename); + +out: + release_firmware(fw); + return rc; +} + +int vcd_fw_prepare_all() +{ + int i; + int rc = 0; + + if (vcd_fw_table.prepared) + goto out; + + for (i = 0; i < ARRAY_SIZE(vcd_fw_table.fw); i++) { + rc = vcd_fw_prepare(&vcd_fw_table.fw[i]); + if (rc) + goto out; + } + vcd_fw_table.prepared = true; + +out: + return rc; +} + +int vcd_fw_init(struct device *dev) { + int i; + vcd_fw_table.dev = dev; + for (i = 0; i < ARRAY_SIZE(vcd_fw_table.fw); i++) { + struct vcd_firmware *fw = &vcd_fw_table.fw[i]; + fw->virt_addr = dma_alloc_coherent(NULL, fw->max_sz, + &fw->phys_addr, GFP_KERNEL); + if (!fw->virt_addr) { + pr_err("failed to allocate %d for %s\n", fw->max_sz, + fw->filename); + vcd_fw_exit(); + return -ENOMEM; + } + } + return 0; +} + +void vcd_fw_exit(void) { + int i; + vcd_fw_table.prepared = false; + for (i = 0; i < ARRAY_SIZE(vcd_fw_table.fw); i++) { + struct vcd_firmware *fw = &vcd_fw_table.fw[i]; + if (!fw->virt_addr) + continue; + dma_free_coherent(NULL, fw->max_sz, fw->virt_addr, + fw->phys_addr); + } +} + +struct vcd_firmware *vcd_fw_get_boot_fw(void) +{ + if (!vcd_fw_table.prepared) + return NULL; + return &vcd_fw_table.fw[0]; +} + +struct vcd_firmware *vcd_fw_get_fw(bool is_decode, enum vcd_codec codec) +{ + if (!vcd_fw_table.prepared) + return NULL; + + if (is_decode) { + switch (codec) { + case VCD_CODEC_DIVX_4: + case VCD_CODEC_DIVX_5: + case VCD_CODEC_DIVX_6: + case VCD_CODEC_XVID: + case VCD_CODEC_MPEG4: + return &vcd_fw_table.fw[1]; + case VCD_CODEC_H264: + return &vcd_fw_table.fw[3]; + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + /* vidc_720p_vc1_dec_mc.fw - untested */ + break; + case VCD_CODEC_MPEG2: + /* vidc_720p_mp2_dec_mc.fw - untested */ + break; + case VCD_CODEC_H263: + return &vcd_fw_table.fw[2]; + default: + break; + } + } else { + switch (codec) { + case VCD_CODEC_H263: + case VCD_CODEC_MPEG4: + return &vcd_fw_table.fw[4]; + case VCD_CODEC_H264: + return &vcd_fw_table.fw[5]; + default: + break; + } + } + return NULL; +} + +bool vcd_fw_is_codec_supported(bool is_decode, enum vcd_codec codec) +{ + return vcd_fw_get_fw(is_decode, codec) != NULL; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.h new file mode 100644 index 0000000000000..467765ab285bb --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_firmware.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_FIRMWARE_H_ +#define _VCD_DDL_FIRMWARE_H_ + +#include + +#include "vcd_property.h" + +struct vcd_firmware { + const char *filename; + bool change_endian; + phys_addr_t phys_addr; + void *virt_addr; + size_t sz; /* real size of firmware (unknown until load time) */ + size_t max_sz; /* size for allocation at init time */ +}; + +int vcd_fw_init(struct device *dev); +void vcd_fw_exit(void); +int vcd_fw_prepare_all(void); +struct vcd_firmware *vcd_fw_get_boot_fw(void); +struct vcd_firmware *vcd_fw_get_fw(bool is_decode, enum vcd_codec codec); +bool vcd_fw_is_codec_supported(bool is_decode, enum vcd_codec codec); + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_hal.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_hal.c new file mode 100644 index 0000000000000..1b1b488bb22f3 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_hal.c @@ -0,0 +1,735 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" + +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +void ddl_core_init(struct ddl_context *ddl_context) +{ + char *psz_version; + enum vidc_720p_endian_type dma_endian; + u32 interrupt_off; + enum vidc_720p_interrupt_level_selection_type interrupt_sel; + u32 intr_mask = 0x0; + struct vcd_firmware *vcd_fw; + + vcd_fw = vcd_fw_get_boot_fw(); + dma_endian = VIDC_720P_BIG_ENDIAN; /* use default endian */ + + interrupt_off = false; + interrupt_sel = VIDC_720P_INTERRUPT_LEVEL_SEL; + + intr_mask |= VIDC_720P_INTR_BUFFER_FULL; + intr_mask |= VIDC_720P_INTR_FW_DONE; + intr_mask |= VIDC_720P_INTR_DMA_DONE; + intr_mask |= VIDC_720P_INTR_FRAME_DONE; + + vidc_720p_do_sw_reset(); + + vidc_720p_init(&psz_version, vcd_fw->sz, vcd_fw->phys_addr, dma_endian, + interrupt_off, interrupt_sel, intr_mask); + return; +} + +void ddl_core_start_cpu(struct ddl_context *ddl_context) +{ + enum vidc_720p_endian_type dma_endian; + u32 dbg_core_dump_buf_size = 0; + + dma_endian = VIDC_720P_LITTLE_ENDIAN; /* use reverse endian */ + + ddl_move_command_state(ddl_context, DDL_CMD_CPU_RESET); + + DBG("VSP_BUF_ADDR_SIZE %d", ddl_context->context_buf_addr.size); + if (ddl_context->enable_dbg_core_dump) { + dbg_core_dump_buf_size = ddl_context->dbg_core_dump.size; + } + + vidc_720p_start_cpu(dma_endian, ddl_context->context_buf_addr.phys_addr, + ddl_context->dbg_core_dump.phys_addr, dbg_core_dump_buf_size); +} + +void ddl_channel_set(struct ddl_client_context *ddl) +{ + enum vidc_720p_enc_dec_selection_type enc_dec_sel; + enum vidc_720p_codec_type codec; + enum vcd_codec vcd_codec; + struct vcd_firmware *vcd_fw; + + if (ddl->decoding) { + enc_dec_sel = VIDC_720P_DECODER; + vcd_codec = ddl->codec_data.decoder.codec_type.codec; + } else { + enc_dec_sel = VIDC_720P_ENCODER; + vcd_codec = ddl->codec_data.encoder.codec_type.codec; + } + switch (vcd_codec) { + default: + case VCD_CODEC_MPEG4: + codec = VIDC_720P_MPEG4; + if (ddl->decoding) { + vidc_720p_decode_set_mpeg4_data_partitionbuffer( + ddl->ddl_context->data_partition_tempbuf.phys_addr); + } + break; + case VCD_CODEC_H264: + codec = VIDC_720P_H264; + break; + case VCD_CODEC_DIVX_4: + case VCD_CODEC_DIVX_5: + case VCD_CODEC_DIVX_6: + codec = VIDC_720P_DIVX; + break; + case VCD_CODEC_XVID: + codec = VIDC_720P_XVID; + break; + case VCD_CODEC_H263: + codec = VIDC_720P_H263; + break; + case VCD_CODEC_MPEG2: + codec = VIDC_720P_MPEG2; + break; + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + codec = VIDC_720P_VC1; + break; + } + + vcd_fw = vcd_fw_get_fw(ddl->decoding, vcd_codec); + + ddl_move_command_state(ddl->ddl_context, DDL_CMD_CHANNEL_SET); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_CHDONE); + + vidc_720p_set_channel(ddl->channel_id, enc_dec_sel, codec, + vcd_fw->phys_addr, vcd_fw->sz); +} + +void ddl_decode_init_codec(struct ddl_client_context *ddl) +{ + u32 seq_h = 0, seq_e = 0, start_byte_num = 0; + struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); + struct vcd_phys_sequence_hdr *seq_hdr = &decoder->decode_config; + enum vidc_720p_memory_access_method_type mem_access_method; + + ddl_metadata_enable(ddl); + + vidc_720p_decode_set_error_control(true); + + vidc_720p_decode_set_mpeg4Post_filter(decoder->post_filter.post_filter); + if (decoder->codec_type.codec == VCD_CODEC_VC1_RCV) { + vidc_720p_set_frame_size(decoder->client_frame_size.width, + decoder->client_frame_size.height); + } else { + vidc_720p_set_frame_size(0x0, 0x0); + } + + switch (decoder->buf_format.buffer_format) { + default: + case VCD_BUFFER_FORMAT_NV12: + mem_access_method = VIDC_720P_TILE_LINEAR; + break; + case VCD_BUFFER_FORMAT_TILE_4x2: + mem_access_method = VIDC_720P_TILE_64x32; + break; + } + pr_debug("HEADER-PARSE-START\n"); + + seq_h = seq_hdr->addr; + start_byte_num = 8 - (seq_h & DDL_STREAMBUF_ALIGN_GUARD_BYTES); + seq_e = seq_h + seq_hdr->sz; + seq_h &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES); + DDL_PADDING_HACK(seq_e); + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE); + ddl_move_command_state(ddl->ddl_context, DDL_CMD_HEADER_PARSE); + + vidc_720p_decode_bitstream_header(ddl->channel_id, seq_hdr->sz, + start_byte_num, seq_h, seq_e, mem_access_method); +} + +void ddl_decode_dynamic_property(struct ddl_client_context *ddl, u32 enable) +{ + struct ddl_decoder_data *decoder = &ddl->codec_data.decoder; + struct vcd_frame_data *bit_stream = &ddl->input_frame.vcd_frm; + + if (!enable) { + if (decoder->dynmic_prop_change_req) { + decoder->dynmic_prop_change_req = false; + vidc_720p_decode_dynamic_req_reset(); + } + return; + } + if ((decoder->dynamic_prop_change & DDL_DEC_REQ_OUTPUT_FLUSH)) { + decoder->dynmic_prop_change_req = true; + decoder->dynamic_prop_change &= ~(DDL_DEC_REQ_OUTPUT_FLUSH); + decoder->dpb_mask.hw_mask = 0; + vidc_720p_decode_dynamic_req_set(VIDC_720P_FLUSH_REQ); + } + if ((decoder->meta_data_enable_flag & VCD_METADATA_PASSTHROUGH) && + (VCD_FRAME_FLAG_EXTRADATA & bit_stream->flags)) { + phys_addr_t extra_datastart = bit_stream->phys_addr + + bit_stream->offset + bit_stream->data_len; + extra_datastart = (extra_datastart + 3) & ~0x03; + + decoder->dynmic_prop_change_req = true; + + vidc_720p_decode_setpassthrough_start(extra_datastart); + + vidc_720p_decode_dynamic_req_set(VIDC_720P_EXTRADATA); + } +} + +void ddl_encode_dynamic_property(struct ddl_client_context *ddl, u32 enable) +{ + struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); + u32 enc_param_change = 0; + + if (!enable) { + if (encoder->dynmic_prop_change_req) { + encoder->dynmic_prop_change_req = false; + encoder->ext_enc_control_val &= + ~(VIDC_720P_ENC_IFRAME_REQ); + vidc_720p_encode_set_control_param + (encoder->ext_enc_control_val); + vidc_720p_encoder_set_param_change(enc_param_change); + } + return; + } + if ((encoder->dynamic_prop_change & DDL_ENC_REQ_IFRAME)) { + encoder->dynamic_prop_change &= ~(DDL_ENC_REQ_IFRAME); + encoder->ext_enc_control_val |= VIDC_720P_ENC_IFRAME_REQ; + vidc_720p_encode_set_control_param + (encoder->ext_enc_control_val); + } + if ((encoder->dynamic_prop_change & DDL_ENC_CHANGE_BITRATE)) { + vidc_720p_encode_set_bit_rate( + encoder->target_bit_rate.target_bitrate); + enc_param_change |= VIDC_720P_ENC_BITRATE_CHANGE; + encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_BITRATE); + } + if ((encoder->dynamic_prop_change & DDL_ENC_CHANGE_IPERIOD)) { + vidc_720p_encode_set_i_period(encoder->period.frames); + enc_param_change |= VIDC_720P_ENC_IPERIOD_CHANGE; + encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_IPERIOD); + } + if ((encoder->dynamic_prop_change & + DDL_ENC_CHANGE_FRAMERATE)) { + vidc_720p_encode_set_fps + ((encoder->frame_rate.fps_numerator * 1000) / + encoder->frame_rate.fps_denominator); + enc_param_change |= VIDC_720P_ENC_FRAMERATE_CHANGE; + encoder->dynamic_prop_change &= ~(DDL_ENC_CHANGE_FRAMERATE); + } + if (enc_param_change) + vidc_720p_encoder_set_param_change(enc_param_change); +} + +static void ddl_encode_set_profile_level(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); + u32 profile; + u32 level; + + switch (encoder->profile.profile) { + default: + case VCD_PROFILE_MPEG4_SP: + profile = VIDC_720P_PROFILE_MPEG4_SP; + break; + case VCD_PROFILE_MPEG4_ASP: + profile = VIDC_720P_PROFILE_MPEG4_ASP; + break; + case VCD_PROFILE_H264_BASELINE: + profile = VIDC_720P_PROFILE_H264_BASELINE; + break; + case VCD_PROFILE_H264_MAIN: + profile = VIDC_720P_PROFILE_H264_MAIN; + break; + case VCD_PROFILE_H264_HIGH: + profile = VIDC_720P_PROFILE_H264_HIGH; + break; + case VCD_PROFILE_H263_BASELINE: + profile = VIDC_720P_PROFILE_H263_BASELINE; + break; + } + switch (encoder->level.level) { + default: + case VCD_LEVEL_MPEG4_0: + level = VIDC_720P_MPEG4_LEVEL0; + break; + case VCD_LEVEL_MPEG4_0b: + level = VIDC_720P_MPEG4_LEVEL0b; + break; + case VCD_LEVEL_MPEG4_1: + level = VIDC_720P_MPEG4_LEVEL1; + break; + case VCD_LEVEL_MPEG4_2: + level = VIDC_720P_MPEG4_LEVEL2; + break; + case VCD_LEVEL_MPEG4_3: + level = VIDC_720P_MPEG4_LEVEL3; + break; + case VCD_LEVEL_MPEG4_3b: + level = VIDC_720P_MPEG4_LEVEL3b; + break; + case VCD_LEVEL_MPEG4_4: + case VCD_LEVEL_MPEG4_4a: + level = VIDC_720P_MPEG4_LEVEL4a; + break; + case VCD_LEVEL_MPEG4_5: + level = VIDC_720P_MPEG4_LEVEL5; + break; + case VCD_LEVEL_MPEG4_6: + level = VIDC_720P_MPEG4_LEVEL6; + break; + case VCD_LEVEL_H264_1: + level = VIDC_720P_H264_LEVEL1; + break; + case VCD_LEVEL_H264_1b: + level = VIDC_720P_H264_LEVEL1b; + break; + case VCD_LEVEL_H264_1p1: + level = VIDC_720P_H264_LEVEL1p1; + break; + case VCD_LEVEL_H264_1p2: + level = VIDC_720P_H264_LEVEL1p2; + break; + case VCD_LEVEL_H264_1p3: + level = VIDC_720P_H264_LEVEL1p3; + break; + case VCD_LEVEL_H264_2: + level = VIDC_720P_H264_LEVEL2; + break; + case VCD_LEVEL_H264_2p1: + level = VIDC_720P_H264_LEVEL2p1; + break; + case VCD_LEVEL_H264_2p2: + level = VIDC_720P_H264_LEVEL2p2; + break; + case VCD_LEVEL_H264_3: + level = VIDC_720P_H264_LEVEL3; + break; + case VCD_LEVEL_H264_3p1: + level = VIDC_720P_H264_LEVEL3p1; + break; + case VCD_LEVEL_H263_10: + level = VIDC_720P_H263_LEVEL10; + break; + case VCD_LEVEL_H263_20: + level = VIDC_720P_H263_LEVEL20; + break; + case VCD_LEVEL_H263_30: + level = VIDC_720P_H263_LEVEL30; + break; + case VCD_LEVEL_H263_40: + level = VIDC_720P_H263_LEVEL40; + break; + case VCD_LEVEL_H263_45: + level = VIDC_720P_H263_LEVEL45; + break; + case VCD_LEVEL_H263_50: + level = VIDC_720P_H263_LEVEL50; + break; + case VCD_LEVEL_H263_60: + level = VIDC_720P_H263_LEVEL60; + break; + case VCD_LEVEL_H263_70: + level = VIDC_720P_H263_LEVEL70; + break; + } + vidc_720p_encode_set_profile(profile, level); +} + +void ddl_encode_init_codec(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); + enum vidc_720p_memory_access_method_type mem_access_method; + enum vidc_720p_DBConfig_type db_config; + enum vidc_720p_MSlice_selection_type m_slice_sel; + + ddl_encode_set_profile_level(ddl); + + vidc_720p_set_frame_size + (encoder->frame_size.width, encoder->frame_size.height); + vidc_720p_encode_set_qp_params + (encoder->qp_range.max_qp, encoder->qp_range.min_qp); + vidc_720p_encode_set_rc_config + (encoder->rc_level.frame_level_rc, + encoder->rc_level.mb_level_rc, + encoder->session_qp.iframe_qp, + encoder->session_qp.frame_qp); + + if (encoder->r_cframe_skip) { + if (encoder->vb_vbuffer_size) { + encoder->ext_enc_control_val = (0x2 << 0x2) | + (encoder->vb_vbuffer_size << 0x10); + } else + encoder->ext_enc_control_val = (0x1 << 2); + } else + encoder->ext_enc_control_val = 0; + + vidc_720p_encode_set_fps + ((encoder->frame_rate.fps_numerator * 1000) / + encoder->frame_rate.fps_denominator); + + vidc_720p_encode_set_vop_time( + encoder->vop_timing.vop_time_resolution, 0); + + if (encoder->rc_level.frame_level_rc) { + vidc_720p_encode_set_bit_rate + (encoder->target_bit_rate.target_bitrate); + + vidc_720p_encode_set_frame_level_rc_params + (encoder->frame_level_rc.reaction_coeff); + } + if (encoder->rc_level.mb_level_rc) { + vidc_720p_encode_set_mb_level_rc_params + (encoder->adaptive_rc.dark_region_as_flag, + encoder->adaptive_rc.smooth_region_as_flag, + encoder->adaptive_rc.static_region_as_flag, + encoder->adaptive_rc.activity_region_flag); + } + if (encoder->codec_type.codec == VCD_CODEC_MPEG4) { + vidc_720p_encode_set_short_header + (encoder->short_header.short_header); + + if (encoder->hdr_ext_control) { + vidc_720p_encode_set_hec_period + (encoder->hdr_ext_control); + encoder->ext_enc_control_val |= (0x1 << 0x1); + } + } + /* set extended encoder control settings */ + vidc_720p_encode_set_control_param + (encoder->ext_enc_control_val); + + if (encoder->codec_type.codec == VCD_CODEC_H264) { + enum vidc_720p_entropy_sel_type entropy_sel; + enum vidc_720p_cabac_model_type cabac_model_number; + switch (encoder->entropy_control.entropy_sel) { + default: + case VCD_ENTROPY_SEL_CAVLC: + entropy_sel = VIDC_720P_ENTROPY_SEL_CAVLC; + break; + case VCD_ENTROPY_SEL_CABAC: + entropy_sel = VIDC_720P_ENTROPY_SEL_CABAC; + break; + } + switch (encoder->entropy_control.cabac_model) { + default: + case VCD_CABAC_MODEL_NUMBER_0: + cabac_model_number = VIDC_720P_CABAC_MODEL_NUMBER_0; + break; + case VCD_CABAC_MODEL_NUMBER_1: + cabac_model_number = VIDC_720P_CABAC_MODEL_NUMBER_1; + break; + case VCD_CABAC_MODEL_NUMBER_2: + cabac_model_number = VIDC_720P_CABAC_MODEL_NUMBER_2; + break; + } + vidc_720p_encode_set_entropy_control + (entropy_sel, cabac_model_number); + switch (encoder->db_control.db_config) { + default: + case VCD_DB_ALL_BLOCKING_BOUNDARY: + db_config = VIDC_720P_DB_ALL_BLOCKING_BOUNDARY; + break; + case VCD_DB_DISABLE: + db_config = VIDC_720P_DB_DISABLE; + break; + case VCD_DB_SKIP_SLICE_BOUNDARY: + db_config = VIDC_720P_DB_SKIP_SLICE_BOUNDARY; + break; + } + vidc_720p_encode_set_db_filter_control + (db_config, + encoder->db_control.slice_alpha_offset, + encoder->db_control.slice_beta_offset); + } + + vidc_720p_encode_set_intra_refresh_mb_number + (encoder->intra_refresh.cir_mb_number); + + switch (encoder->multi_slice.m_slice_sel) { + default: + case VCD_MSLICE_OFF: + m_slice_sel = VIDC_720P_MSLICE_OFF; + break; + case VCD_MSLICE_BY_MB_COUNT: + m_slice_sel = VIDC_720P_MSLICE_BY_MB_COUNT; + break; + case VCD_MSLICE_BY_BYTE_COUNT: + m_slice_sel = VIDC_720P_MSLICE_BY_BYTE_COUNT; + break; + case VCD_MSLICE_BY_GOB: + m_slice_sel = VIDC_720P_MSLICE_BY_GOB; + break; + } + vidc_720p_encode_set_multi_slice_info(m_slice_sel, + encoder->multi_slice.m_slice_size); + + vidc_720p_encode_set_dpb_buffer(encoder->enc_dpb_addr.phys_addr, + encoder->enc_dpb_addr.size); + + pr_debug("ENC_DPB_ADDR_SIZE %u\n", encoder->enc_dpb_addr.size); + + vidc_720p_encode_set_i_period(encoder->period.frames); + + ddl_metadata_enable(ddl); + + if (encoder->seq_header.virt_addr) { + phys_addr_t ext_buffer_start; + phys_addr_t ext_buffer_end; + u32 start_byte_num; + ext_buffer_start = encoder->seq_header.phys_addr; + ext_buffer_end = ext_buffer_start + encoder->seq_header.size; + start_byte_num = ext_buffer_start & + DDL_STREAMBUF_ALIGN_GUARD_BYTES; + ext_buffer_start &= ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + ext_buffer_end &= ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + pr_debug("ENC_SEQHDR_ALLOC_SIZE %u\n", + encoder->seq_header.size); + vidc_720p_encode_set_seq_header_buffer(ext_buffer_start, + ext_buffer_end, start_byte_num); + } + + if (encoder->re_con_buf_format.buffer_format == + VCD_BUFFER_FORMAT_NV12) + mem_access_method = VIDC_720P_TILE_LINEAR; + else + mem_access_method = VIDC_720P_TILE_16x16; + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODECDONE); + ddl_move_command_state(ddl->ddl_context, DDL_CMD_INIT_CODEC); + + vidc_720p_encode_init_codec(ddl->channel_id, mem_access_method); +} + +void ddl_channel_end(struct ddl_client_context *ddl) +{ + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_CHEND); + ddl_move_command_state(ddl->ddl_context, DDL_CMD_CHANNEL_END); + + vidc_720p_submit_command(ddl->channel_id, VIDC_720P_CMD_CHEND); +} + +void ddl_encode_frame_run(struct ddl_client_context *ddl) +{ + phys_addr_t ext_buffer_start; + phys_addr_t ext_buffer_end; + phys_addr_t y_addr; + phys_addr_t c_addr; + u32 start_byte_number; + struct ddl_encoder_data *encoder = &ddl->codec_data.encoder; + struct vcd_frame_data *stream = &ddl->output_frame.vcd_frm; + + ext_buffer_start = stream->phys_addr + stream->offset; + ext_buffer_end = ddl_encode_set_metadata_output_buf(ddl); + start_byte_number = ext_buffer_start & DDL_STREAMBUF_ALIGN_GUARD_BYTES; + if (start_byte_number) { + u32 *data; + ext_buffer_start &= ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + data = (u32 *)((u32)stream->virt_addr + stream->offset - + start_byte_number); + vidc_720p_encode_unalign_bitstream(data[0], data[1]); + } + + y_addr = ddl->input_frame.vcd_frm.phys_addr + + ddl->input_frame.vcd_frm.offset; + c_addr = y_addr + encoder->frame_size.height * + encoder->frame_size.width; + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE); + ddl_move_command_state(ddl->ddl_context, DDL_CMD_ENCODE_FRAME); + + if (encoder->dynamic_prop_change) { + encoder->dynmic_prop_change_req = true; + ddl_encode_dynamic_property(ddl, true); + } + vidc_720p_encode_set_vop_time(encoder->vop_timing.vop_time_resolution, + ddl->input_frame.frm_delta); + + vidc_720p_encode_frame(ddl->channel_id, ext_buffer_start, + ext_buffer_end, start_byte_number, y_addr, c_addr); +} + +u32 ddl_decode_set_buffers(struct ddl_client_context *ddl) +{ + struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder); + u32 comv_buf_size = DDL_COMV_BUFLINE_NO, comv_buf_no = 0; + u32 ref_buf_no = 0; + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB)) { + pr_debug("STATE-CRITICAL\n"); + return VCD_ERR_FAIL; + } + + switch (decoder->codec_type.codec) { + default: + case VCD_CODEC_DIVX_4: + case VCD_CODEC_DIVX_5: + case VCD_CODEC_DIVX_6: + case VCD_CODEC_XVID: + case VCD_CODEC_MPEG2: + case VCD_CODEC_MPEG4: + comv_buf_no = DDL_MPEG_COMV_BUF_NO; + ref_buf_no = DDL_MPEG_REFBUF_COUNT; + break; + case VCD_CODEC_H263: + comv_buf_no = DDL_H263_COMV_BUF_NO; + break; + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + comv_buf_no = decoder->client_output_buf_req.actual_count + 1; + comv_buf_size = DDL_VC1_COMV_BUFLINE_NO; + break; + case VCD_CODEC_H264: + comv_buf_no = decoder->client_output_buf_req.actual_count; + break; + } + + if (comv_buf_no) { + comv_buf_size *= (comv_buf_no * + (((decoder->client_frame_size.width + 15) >> 4)) * + (((decoder->client_frame_size.height + 15) >> 4) + 1)); + if (!ddl_dma_alloc(&decoder->dpb_comv_buffer, comv_buf_size, npelly_dec_dpb)) { + pr_err("Dec_set_buf:Comv_buf_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + vidc_720p_decode_set_comv_buffer(decoder->dpb_comv_buffer. + phys_addr, decoder->dpb_comv_buffer.size); + } + decoder->ref_buffer.phys_addr = 0; + if (ref_buf_no) { + u32 size, yuv_size; + yuv_size = ddl_get_yuv_buffer_size(&decoder-> + client_frame_size, &decoder->buf_format, + (!decoder->progressive_only)); + size = yuv_size * ref_buf_no; + + if (!ddl_dma_alloc(&decoder->ref_buffer, size, npelly_dec_ref)) { + ddl_dma_free(&decoder->dpb_comv_buffer); + pr_err("Dec_set_buf:mpeg_ref_buf_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + } + ddl_decode_set_metadata_output(decoder); + + ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_INIT); + + if (decoder->codec_type.codec == VCD_CODEC_H264) { + vidc_720p_decode_setH264VSPBuffer( + decoder->h264Vsp_temp_buffer.phys_addr); + pr_debug("VSP_BUF_ADDR_SIZE %u\n", + decoder->h264Vsp_temp_buffer.size); + } + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE); + ddl_move_command_state(ddl->ddl_context, DDL_CMD_DECODE_SET_DPB); + + vidc_720p_submit_command(ddl->channel_id, + VIDC_720P_CMD_INITBUFFERS); + return VCD_S_SUCCESS; +} + +void ddl_decode_frame_run(struct ddl_client_context *ddl) +{ + phys_addr_t ext_buffer_start; + phys_addr_t ext_buffer_end = 0; + u32 start_byte_num; + struct ddl_decoder_data *decoder = &ddl->codec_data.decoder; + struct vcd_frame_data *bit_stream = &ddl->input_frame.vcd_frm; + + if (!bit_stream->data_len || !bit_stream->phys_addr) { + ddl_decode_eos_run(ddl); + return; + } + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE); + + ddl_decode_dynamic_property(ddl, true); + + ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK); + + ext_buffer_start = bit_stream->phys_addr + bit_stream->offset; + start_byte_num = 8 - (ext_buffer_start & + DDL_STREAMBUF_ALIGN_GUARD_BYTES); + ext_buffer_end = ext_buffer_start + bit_stream->data_len; + ext_buffer_start &= ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + DDL_PADDING_HACK(ext_buffer_end); + + ddl_move_command_state(ddl->ddl_context, DDL_CMD_DECODE_FRAME); + + vidc_720p_decode_frame(ddl->channel_id, ext_buffer_start, + ext_buffer_end, bit_stream->data_len, start_byte_num, + bit_stream->ip_frm_tag); +} + +void ddl_decode_eos_run(struct ddl_client_context *ddl) +{ + struct ddl_decoder_data *decoder = &ddl->codec_data.decoder; + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE); + + ddl_decode_dynamic_property(ddl, true); + + ddl_decoder_dpb_transact(decoder, NULL, DDL_DPB_OP_SET_MASK); + + decoder->dynmic_prop_change_req = true; + + ddl_move_command_state(ddl->ddl_context, DDL_CMD_EOS); + + vidc_720p_issue_eos(ddl->channel_id); +} + +u32 ddl_hal_engine_reset(struct ddl_context *ddl_context) +{ + u32 eng_reset; + u32 channel_id = 0; + enum vidc_720p_endian_type dma_endian; + enum vidc_720p_interrupt_level_selection_type interrupt_sel; + u32 intr_mask = 0x0; + + if (ddl_context->current_ddl) + channel_id = ddl_context->current_ddl->channel_id; + + interrupt_sel = VIDC_720P_INTERRUPT_LEVEL_SEL; + /* Enable all the supported interrupt */ + intr_mask |= VIDC_720P_INTR_BUFFER_FULL; + intr_mask |= VIDC_720P_INTR_FW_DONE; + intr_mask |= VIDC_720P_INTR_DMA_DONE; + intr_mask |= VIDC_720P_INTR_FRAME_DONE; + + /* use reverse endian after boot code download */ + dma_endian = VIDC_720P_LITTLE_ENDIAN; + + /* Need to reset MFC silently */ + eng_reset = vidc_720p_engine_reset(channel_id, dma_endian, + interrupt_sel, intr_mask); + if (!eng_reset) { + /* call the hw fatal callback if engine reset fails */ + ddl_hw_fatal_cb(ddl_context); + } + return eng_reset ; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_helper.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_helper.c new file mode 100644 index 0000000000000..17bf125ecd7e9 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_helper.c @@ -0,0 +1,218 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_ddl_utils.h" + +struct ddl_context *ddl_get_context(void) +{ + static struct ddl_context ddl_context; + return &ddl_context; +} + +void ddl_move_client_state(struct ddl_client_context *ddl, + enum ddl_client_state client_state) +{ + ddl->client_state = client_state; +} + +void ddl_move_command_state(struct ddl_context *ddl_context, + enum ddl_cmd_state command_state) +{ + ddl_context->cmd_state = command_state; +} + +u32 ddl_client_transact(u32 operation, struct ddl_client_context **pddl_client) +{ + u32 ret_status = VCD_ERR_FAIL; + u32 i; + struct ddl_context *ddl_context; + + ddl_context = ddl_get_context(); + switch (operation) { + case DDL_FREE_CLIENT: + if (pddl_client && *pddl_client) { + u32 channel_id; + channel_id = (*pddl_client)->channel_id; + if (channel_id < VCD_MAX_NO_CLIENT) + ddl_context->ddl_clients[channel_id] = NULL; + else + pr_warn("CHID_CORRUPTION\n"); + kfree(*pddl_client); + *pddl_client = NULL; + ret_status = VCD_S_SUCCESS; + } + break; + case DDL_GET_CLIENT: + ret_status = VCD_ERR_MAX_CLIENT; + for (i = 0; i < VCD_MAX_NO_CLIENT && ret_status == + VCD_ERR_MAX_CLIENT; ++i) { + if (!ddl_context->ddl_clients[i]) { + *pddl_client = (struct ddl_client_context *) + kzalloc((sizeof( + struct ddl_client_context)), + GFP_KERNEL); + if (!*pddl_client) { + ret_status = VCD_ERR_ALLOC_FAIL; + break; + } + ddl_context->ddl_clients[i] = *pddl_client; + (*pddl_client)->channel_id = i; + (*pddl_client)->ddl_context = ddl_context; + ret_status = VCD_S_SUCCESS; + } + } + break; + case DDL_INIT_CLIENTS: + for (i = 0; i < VCD_MAX_NO_CLIENT; ++i) + ddl_context->ddl_clients[i] = NULL; + ret_status = VCD_S_SUCCESS; + break; + case DDL_ACTIVE_CLIENT: + for (i = 0; i < VCD_MAX_NO_CLIENT; ++i) { + if (ddl_context->ddl_clients[i]) { + ret_status = VCD_S_SUCCESS; + break; + } + } + break; + default: + ret_status = VCD_ERR_ILLEGAL_PARM; + break; + } + return ret_status; +} + +u32 ddl_decoder_dpb_transact(struct ddl_decoder_data *dec, + struct ddl_frame_data_tag *in_out_frame, u32 operation) +{ + u32 vcd_status = VCD_S_SUCCESS; + u32 i; + struct ddl_frame_data_tag *found_frame = NULL; + struct ddl_mask *dpb_mask = &dec->dpb_mask; + + switch (operation) { + case DDL_DPB_OP_MARK_BUSY: + case DDL_DPB_OP_MARK_FREE: + for (i = 0; i < dec->dp_buf.no_of_dec_pic_buf; ++i) { + if (in_out_frame->vcd_frm.phys_addr == dec->dp_buf. + dec_pic_buffers[i].vcd_frm.phys_addr) { + found_frame = &dec->dp_buf.dec_pic_buffers[i]; + break; + } + } + + if (!found_frame) { + in_out_frame->vcd_frm.phys_addr = 0; + vcd_status = VCD_ERR_BAD_POINTER; + pr_debug("BUF_NOT_FOUND\n"); + break; + } + if (operation == DDL_DPB_OP_MARK_BUSY) { + dpb_mask->hw_mask &= ~(0x1 << i); + *in_out_frame = *found_frame; + } else if (operation == DDL_DPB_OP_MARK_FREE) { + dpb_mask->client_mask |= 0x1 << i; + *found_frame = *in_out_frame; + } + + break; + case DDL_DPB_OP_SET_MASK: + dpb_mask->hw_mask |= dpb_mask->client_mask; + dpb_mask->client_mask = 0; + vidc_720p_decode_set_dpb_release_buffer_mask(dpb_mask->hw_mask); + break; + case DDL_DPB_OP_INIT: + { + size_t dpb_size = !dec->meta_data_offset ? + dec->dp_buf.dec_pic_buffers[0].vcd_frm.alloc_len : + dec->meta_data_offset; + vidc_720p_decode_set_dpb_details(dec->dp_buf.no_of_dec_pic_buf, + dpb_size, dec->ref_buffer.phys_addr); + for (i = 0; i < dec->dp_buf.no_of_dec_pic_buf; ++i) { + vidc_720p_decode_set_dpb_buffers(i, dec->dp_buf. + dec_pic_buffers[i].vcd_frm.phys_addr); + pr_debug("DEC_DPB_BUFn_SIZE %u\n", dec->dp_buf. + dec_pic_buffers[i].vcd_frm.alloc_len); + } + break; + } + case DDL_DPB_OP_RETRIEVE: + { + u32 position; + u32 *mask; + if (dpb_mask->client_mask) { + mask = &dpb_mask->client_mask; + } else if (dpb_mask->hw_mask) { + mask = &dpb_mask->hw_mask; + } else { + in_out_frame->vcd_frm.phys_addr = 0; + break; + } + position = 0x1; + for (i = 0; i < dec->dp_buf.no_of_dec_pic_buf; ++i) { + if (*mask & position) { + found_frame = &dec->dp_buf.dec_pic_buffers[i]; + *mask &= ~position; + *in_out_frame = *found_frame; + break; + } + position <<= 1; + } + if (!found_frame) + in_out_frame->vcd_frm.phys_addr = 0; + break; + } + } + return vcd_status; +} + +void ddl_release_context_buffers(struct ddl_context *ddl_context) +{ + ddl_dma_free(&ddl_context->context_buf_addr); + ddl_dma_free(&ddl_context->db_line_buffer); + ddl_dma_free(&ddl_context->data_partition_tempbuf); + ddl_dma_free(&ddl_context->metadata_shared_input); + ddl_dma_free(&ddl_context->dbg_core_dump); +} + +void ddl_release_client_internal_buffers(struct ddl_client_context *ddl) +{ + if (ddl->decoding) { + struct ddl_decoder_data *dec = &(ddl->codec_data.decoder); + ddl_dma_free(&dec->h264Vsp_temp_buffer); + ddl_dma_free(&dec->dpb_comv_buffer); + ddl_dma_free(&dec->ref_buffer); + kfree(dec->dp_buf.dec_pic_buffers); + dec->dp_buf.dec_pic_buffers = NULL; + ddl_decode_dynamic_property(ddl, false); + dec->decode_config.sz = 0; + dec->decode_config.addr = 0; + dec->dpb_mask.client_mask = 0; + dec->dpb_mask.hw_mask = 0; + dec->dp_buf.no_of_dec_pic_buf = 0; + dec->dynamic_prop_change = 0; + + } else { + struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); + ddl_dma_free(&encoder->enc_dpb_addr); + ddl_dma_free(&encoder->seq_header); + ddl_encode_dynamic_property(ddl, false); + encoder->dynamic_prop_change = 0; + } +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_internal_property.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_internal_property.h new file mode 100644 index 0000000000000..3f3c6e2cbb5ec --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_internal_property.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_INTERNAL_PROPERTY_H_ +#define _VCD_DDL_INTERNAL_PROPERTY_H_ +#include "vcd_api.h" + +#define VCD_EVT_RESP_DDL_BASE 0x3000 +#define VCD_EVT_RESP_DEVICE_INIT (VCD_EVT_RESP_DDL_BASE + 0x1) +#define VCD_EVT_RESP_OUTPUT_REQ (VCD_EVT_RESP_DDL_BASE + 0x2) +#define VCD_EVT_RESP_EOS_DONE (VCD_EVT_RESP_DDL_BASE + 0x3) +#define VCD_EVT_RESP_TRANSACTION_PENDING (VCD_EVT_RESP_DDL_BASE + 0x4) + +#define VCD_S_DDL_ERR_BASE 0x90000000 +#define VCD_ERR_MAX_NO_CODEC (VCD_S_DDL_ERR_BASE + 0x1) +#define VCD_ERR_CLIENT_PRESENT (VCD_S_DDL_ERR_BASE + 0x2) +#define VCD_ERR_CLIENT_FATAL (VCD_S_DDL_ERR_BASE + 0x3) + +#define VCD_I_CUSTOM_BASE (VCD_I_RESERVED_BASE) +#define VCD_I_RC_LEVEL_CONFIG (VCD_I_CUSTOM_BASE + 0x1) +#define VCD_I_FRAME_LEVEL_RC (VCD_I_CUSTOM_BASE + 0x2) +#define VCD_I_ADAPTIVE_RC (VCD_I_CUSTOM_BASE + 0x3) +#define VCD_I_CUSTOM_DDL_BASE (VCD_I_RESERVED_BASE + 0x100) +#define DDL_I_INPUT_BUF_REQ (VCD_I_CUSTOM_DDL_BASE + 0x1) +#define DDL_I_OUTPUT_BUF_REQ (VCD_I_CUSTOM_DDL_BASE + 0x2) +#define DDL_I_DPB (VCD_I_CUSTOM_DDL_BASE + 0x3) +#define DDL_I_DPB_RELEASE (VCD_I_CUSTOM_DDL_BASE + 0x4) +#define DDL_I_DPB_RETRIEVE (VCD_I_CUSTOM_DDL_BASE + 0x5) +#define DDL_I_REQ_OUTPUT_FLUSH (VCD_I_CUSTOM_DDL_BASE + 0x6) +#define DDL_I_SEQHDR_ALIGN_BYTES (VCD_I_CUSTOM_DDL_BASE + 0x7) +#define DDL_I_SEQHDR_PRESENT (VCD_I_CUSTOM_DDL_BASE + 0xb) +#define DDL_I_CAPABILITY (VCD_I_CUSTOM_DDL_BASE + 0x8) +#define DDL_I_FRAME_PROC_UNITS (VCD_I_CUSTOM_DDL_BASE + 0x9) + +struct vcd_property_rc_level { + u32 frame_level_rc; + u32 mb_level_rc; +}; + +struct vcd_property_frame_level_rc_params { + u32 reaction_coeff; +}; + +struct vcd_property_adaptive_rc_params { + u32 dark_region_as_flag; + u32 smooth_region_as_flag; + u32 static_region_as_flag; + u32 activity_region_flag; +}; + +struct ddl_frame_data_tag; + +struct ddl_property_dec_pic_buffers { + struct ddl_frame_data_tag *dec_pic_buffers; + u32 no_of_dec_pic_buf; +}; + +struct ddl_property_capability { + u32 max_num_client; + u32 general_command_depth; + u32 frame_command_depth; + u32 exclusive; + u32 ddl_time_out_in_ms; +}; + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_interrupt_handler.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_interrupt_handler.c new file mode 100644 index 0000000000000..6269f05a275d1 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_interrupt_handler.c @@ -0,0 +1,878 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" + +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +static void ddl_decoder_input_done_callback(struct ddl_client_context *ddl, + u32 frame_transact_end); +static void ddl_decoder_ouput_done_callback(struct ddl_client_context *ddl, + u32 frame_transact_end); + +static u32 ddl_get_frame_type(struct vcd_frame_data *frame, u32 frame_type); + +static void ddl_getdec_profilelevel(struct ddl_decoder_data *dec, + u32 profile, u32 level); + +static void ddl_dma_done_callback(struct ddl_context *ddl_context) +{ + if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_DMA_INIT)) { + pr_debug("UNKNOWN_DMADONE\n"); + return; + } + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + pr_debug("DMA_DONE"); + ddl_core_start_cpu(ddl_context); +} + +static void ddl_cpu_started_callback(struct ddl_context *ddl_context) +{ + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + pr_debug("CPU-STARTED"); + + if (!vidc_720p_cpu_start()) { + ddl_hw_fatal_cb(ddl_context); + return; + } + + vidc_720p_set_deblock_line_buffer(ddl_context->db_line_buffer.phys_addr, + ddl_context->db_line_buffer.size); + ddl_context->device_state = DDL_DEVICE_INITED; + ddl_context->ddl_callback(VCD_EVT_RESP_DEVICE_INIT, VCD_S_SUCCESS, + NULL, 0, NULL, ddl_context->client_data); + DDL_IDLE(ddl_context); +} + + +static void ddl_eos_done_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + u32 displaystatus; + + if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_EOS)) { + pr_debug("UNKWN_EOSDONE"); + ddl_client_fatal_cb(ddl_context); + return; + } + + if (!ddl || !ddl->decoding || !DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_EOS_DONE)) { + pr_debug("STATE-CRITICAL-EOSDONE"); + ddl_client_fatal_cb(ddl_context); + return; + } + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + vidc_720p_eos_info(&displaystatus); + if ((enum vidc_720p_display_status_type) displaystatus + != VIDC_720P_EMPTY_BUFFER) { + pr_debug("EOSDONE-EMPTYBUF-ISSUE"); + } + + ddl_decode_dynamic_property(ddl, false); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); + pr_debug("EOS_DONE"); + ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, VCD_S_SUCCESS, NULL, + 0, (u32 *) ddl, ddl_context->client_data); + + DDL_IDLE(ddl_context); +} + +static u32 ddl_channel_set_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + u32 return_status = false; + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHDONE)) { + pr_debug("STATE-CRITICAL-CHSET"); + DDL_IDLE(ddl_context); + return return_status; + } + pr_debug("Channel-set"); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_INITCODEC); + + if (ddl->decoding) { + if (ddl->codec_data.decoder.header_in_start) { + ddl_decode_init_codec(ddl); + } else { + ddl_context->ddl_callback(VCD_EVT_RESP_START, + VCD_S_SUCCESS, NULL, 0, (u32 *) ddl, + ddl_context->client_data); + DDL_IDLE(ddl_context); + return_status = true; + } + } else { + ddl_encode_init_codec(ddl); + } + return return_status; +} + +static void ddl_init_codec_done_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + struct ddl_encoder_data *encoder; + + if (!ddl || ddl->decoding || !DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_INITCODECDONE)) { + pr_debug("STATE-CRITICAL-INITCODEC"); + ddl_client_fatal_cb(ddl_context); + return; + } + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); + pr_debug("INIT_CODEC_DONE"); + + encoder = &ddl->codec_data.encoder; + if (encoder->seq_header.virt_addr) + vidc_720p_encode_get_header(&encoder->seq_header.size); + + ddl_context->ddl_callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, + (u32 *) ddl, ddl_context->client_data); + + DDL_IDLE(ddl_context); +} + +static u32 ddl_header_done_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + struct ddl_decoder_data *dec; + struct vidc_720p_seq_hdr_info_type seq_hdr_info; + u32 vcd_event = VCD_EVT_RESP_START; + u32 vcd_status = VCD_S_SUCCESS; + u32 req_cb = true, bret = true; + + if (!DDLCOMMAND_STATE_IS(ddl_context, DDL_CMD_HEADER_PARSE)) { + pr_debug("UNKWN_HEADERDONE"); + ddl_client_fatal_cb(ddl_context); + return true; + } + + if (!ddl || !ddl->decoding || !DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_INITCODECDONE)) { + pr_debug("STATE-CRITICAL-HDDONE"); + ddl_client_fatal_cb(ddl_context); + return true; + } + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_DPB); + pr_debug("HEADER_DONE"); + + vidc_720p_decode_get_seq_hdr_info(&seq_hdr_info); + + dec = &(ddl->codec_data.decoder); + dec->frame_size.width = seq_hdr_info.img_size_x; + dec->frame_size.height = seq_hdr_info.img_size_y; + dec->min_dpb_num = seq_hdr_info.min_num_dpb; + dec->y_cb_cr_size = seq_hdr_info.min_dpb_size; + dec->progressive_only = 1 - seq_hdr_info.progressive; + ddl_getdec_profilelevel(dec, seq_hdr_info.profile, seq_hdr_info.level); + ddl_calculate_stride(&dec->frame_size, !dec->progressive_only); + if (seq_hdr_info.crop_exists) { + dec->frame_size.width -= (seq_hdr_info.crop_right_offset + + seq_hdr_info.crop_left_offset); + dec->frame_size.height -= (seq_hdr_info.crop_top_offset + + seq_hdr_info.crop_bottom_offset); + } + ddl_set_default_decoder_buffer_req(dec, false); + if (seq_hdr_info.data_partitioned == 0x1 && + dec->codec_type.codec == VCD_CODEC_MPEG4 && + seq_hdr_info.img_size_x > DDL_MAX_DP_FRAME_WIDTH && + seq_hdr_info.img_size_y > DDL_MAX_DP_FRAME_HEIGHT) { + ddl_client_fatal_cb(ddl_context); + return true; + } + + + if (dec->header_in_start) { + dec->client_frame_size = dec->frame_size; + dec->client_output_buf_req = dec->actual_output_buf_req; + if ((dec->frame_size.width * dec->frame_size.height) >= + (800*480)) { + if ((dec->actual_output_buf_req.actual_count + 2) < 10) + dec->client_output_buf_req.actual_count = 10; + else + dec->client_output_buf_req.actual_count += 2; + } else + dec->client_output_buf_req.actual_count = + dec->actual_output_buf_req.actual_count + 5; + + dec->client_input_buf_req = dec->actual_input_buf_req; + } else { + DBG("%s(): width = %d client_frame_size.width = %d\n", + __func__, dec->frame_size.width, + dec->client_frame_size.width); + DBG("%s(): height = %d client_frame_size.height = %d\n", + __func__, dec->frame_size.height, + dec->client_frame_size.height); + DBG("%s(): size = %d client_frame_size size = %d\n", + __func__, dec->actual_output_buf_req.size, + dec->client_output_buf_req.size); + DBG("%s(): min_dpb_num = %d actual_count = %d\n", __func__, + dec->min_dpb_num, + dec->client_output_buf_req.actual_count); + + bret = false; + + if (dec->frame_size.width == dec->client_frame_size.width && + dec->frame_size.height == + dec->client_frame_size.height && + dec->actual_output_buf_req.size <= + dec->client_output_buf_req.size && + dec->min_dpb_num <= + dec->client_output_buf_req.actual_count) { + vcd_status = ddl_decode_set_buffers(ddl); + if (!vcd_status) { + req_cb = false; + } else { + ddl_client_fatal_cb(ddl_context); + req_cb = true; + } + } else { + dec->client_frame_size = dec->frame_size; + dec->client_output_buf_req = dec->actual_output_buf_req; + dec->client_input_buf_req = dec->actual_input_buf_req; + pr_err("%s:Decode_reconfig_not_supported\n", __func__); + vcd_event = VCD_EVT_IND_RECONFIG; + } + } + + if (req_cb) { + ddl_context->ddl_callback(vcd_event, vcd_status, NULL, 0, + (u32 *) ddl, ddl_context->client_data); + DDL_IDLE(ddl_context); + } + return bret; +} + +static u32 ddl_dpb_buffers_set_done_callback(struct ddl_context + *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPBDONE)) { + pr_debug("STATE-CRITICAL-DPBDONE\n"); + ddl_client_fatal_cb(ddl_context); + return true; + } + pr_debug("INTR_DPBDONE\n"); + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); + ddl_decode_frame_run(ddl); + return false; +} + +static void ddl_encoder_frame_run_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder); + u32 eos_present = false; + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)) { + pr_debug("STATE-CRITICAL-ENCFRMRUN\n"); + ddl_client_fatal_cb(ddl_context); + return; + } + + pr_debug("ENC_FRM_RUN_DONE\n"); + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + vidc_720p_enc_frame_info(&encoder->enc_frame_info); + + ddl->output_frame.vcd_frm.ip_frm_tag = + ddl->input_frame.vcd_frm.ip_frm_tag; + ddl->output_frame.vcd_frm.data_len = + encoder->enc_frame_info.enc_size; + ddl->output_frame.vcd_frm.flags |= VCD_FRAME_FLAG_ENDOFFRAME; + ddl_get_frame_type(&(ddl->output_frame.vcd_frm), + encoder->enc_frame_info.frame_type); + ddl_process_encoder_metadata(ddl); + + ddl_encode_dynamic_property(ddl, false); + + ddl->input_frame.frm_trans_end = false; + ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS, + &(ddl->input_frame), sizeof(struct ddl_frame_data_tag), + (u32 *) ddl, ddl_context->client_data); + +#ifdef CORE_TIMING_INFO + ddl_calc_core_time(1); +#endif + /* check the presence of EOS */ + eos_present = VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags; + + ddl->output_frame.frm_trans_end = !eos_present; + ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, + &(ddl->output_frame), sizeof(struct ddl_frame_data_tag), + (u32 *) ddl, ddl_context->client_data); + + if (eos_present) { + pr_debug("ENC-EOS_DONE\n"); + ddl_context->ddl_callback(VCD_EVT_RESP_EOS_DONE, + VCD_S_SUCCESS, NULL, 0, (u32 *)ddl, + ddl_context->client_data); + } + + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); + DDL_IDLE(ddl_context); +} + +static u32 ddl_decoder_frame_run_callback(struct ddl_context + *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + struct ddl_decoder_data *dec = &(ddl->codec_data.decoder); + struct vidc_720p_dec_disp_info *dec_disp_info = &(dec->dec_disp_info); + u32 callback_end = false; + u32 status = true, eos_present = false;; + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME_DONE)) { + pr_debug("STATE-CRITICAL-DECFRMRUN\n"); + ddl_client_fatal_cb(ddl_context); + return true; + } + + pr_debug("DEC_FRM_RUN_DONE\n"); + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + vidc_720p_decode_display_info(dec_disp_info); + + ddl_decode_dynamic_property(ddl, false); + + if (dec_disp_info->resl_change) { + pr_err("ddl_dec_frm_done:" + "Dec_reconfig_no_tsupported\n"); + } + + if ((VCD_FRAME_FLAG_EOS & ddl->input_frame.vcd_frm.flags)) { + callback_end = false; + eos_present = true; + } + + if (dec_disp_info->disp_status == VIDC_720P_DECODE_ONLY || + dec_disp_info->disp_status == + VIDC_720P_DECODE_AND_DISPLAY) { + if (!eos_present) + callback_end = (dec_disp_info->disp_status == + VIDC_720P_DECODE_ONLY); + + ddl_decoder_input_done_callback(ddl, callback_end); + } + + if (dec_disp_info->disp_status == VIDC_720P_DECODE_AND_DISPLAY || + dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) { + if (!eos_present) + callback_end = (dec_disp_info->disp_status == + VIDC_720P_DECODE_AND_DISPLAY); + + ddl_decoder_ouput_done_callback(ddl, callback_end); + } + + if (dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) { + /* send the same input once again for decoding */ + ddl_decode_frame_run(ddl); + /* client need to ignore the interrupt */ + status = false; + } else if (eos_present) { + /* send EOS command to HW */ + ddl_decode_eos_run(ddl); + /* client need to ignore the interrupt */ + status = false; + } else { + ddl_move_client_state(ddl, DDL_CLIENT_WAIT_FOR_FRAME); + /* move to Idle */ + DDL_IDLE(ddl_context); + } + return status; +} + +static u32 ddl_eos_frame_done_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl = ddl_context->current_ddl; + struct ddl_decoder_data *dec = &(ddl->codec_data.decoder); + struct vidc_720p_dec_disp_info *dec_disp_info = + &(dec->dec_disp_info); + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_EOS_DONE)) { + pr_err("STATE-CRITICAL-EOSFRMRUN\n"); + ddl_client_fatal_cb(ddl_context); + return true; + } + pr_debug("EOS_FRM_RUN_DONE\n"); + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + + vidc_720p_decode_display_info(dec_disp_info); + + ddl_decode_dynamic_property(ddl, false); + + if (ddl_context->op_failed == 0x1) + pr_err("ddl_eos_frm_done:OPFAILED!!\n"); + else if (dec_disp_info->resl_change) + pr_err("ddl_eos_frm_done:Dec_reconfig!!\n"); + + if (dec_disp_info->disp_status == VIDC_720P_DISPLAY_ONLY) + ddl_decoder_ouput_done_callback(ddl, false); + else + pr_debug("STATE-CRITICAL-WRONG-DISP-STATUS\n"); + + ddl_decoder_dpb_transact(dec, NULL, DDL_DPB_OP_SET_MASK); + ddl_move_command_state(ddl_context, DDL_CMD_EOS); + vidc_720p_submit_command(ddl->channel_id, VIDC_720P_CMD_FRAMERUN); + return false; +} + +static void ddl_channel_end_callback(struct ddl_context *ddl_context) +{ + struct ddl_client_context *ddl; + + ddl_move_command_state(ddl_context, DDL_CMD_INVALID); + pr_debug("CH_END_DONE\n"); + + ddl = ddl_context->current_ddl; + if (!ddl || !DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_CHEND)) { + pr_debug("STATE-CRITICAL-CHEND\n"); + DDL_IDLE(ddl_context); + return; + } + + ddl_release_client_internal_buffers(ddl); + ddl_context->ddl_callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, + (u32 *) ddl, ddl_context->client_data); + ddl_move_client_state(ddl, DDL_CLIENT_OPEN); + DDL_IDLE(ddl_context); +} + +static u32 ddl_operation_done_callback(struct ddl_context *ddl_context) +{ + u32 return_status = true; + + switch (ddl_context->cmd_state) { + case DDL_CMD_DECODE_FRAME: + return_status = ddl_decoder_frame_run_callback(ddl_context); + break; + case DDL_CMD_ENCODE_FRAME: + ddl_encoder_frame_run_callback(ddl_context); + break; + case DDL_CMD_CHANNEL_SET: + return_status = ddl_channel_set_callback(ddl_context); + break; + case DDL_CMD_INIT_CODEC: + ddl_init_codec_done_callback(ddl_context); + break; + case DDL_CMD_HEADER_PARSE: + return_status = ddl_header_done_callback(ddl_context); + break; + case DDL_CMD_DECODE_SET_DPB: + return_status = ddl_dpb_buffers_set_done_callback(ddl_context); + break; + case DDL_CMD_CHANNEL_END: + ddl_channel_end_callback(ddl_context); + break; + case DDL_CMD_EOS: + return_status = ddl_eos_frame_done_callback(ddl_context); + break; + case DDL_CMD_CPU_RESET: + ddl_cpu_started_callback(ddl_context); + break; + default: + pr_debug("UNKWN_OPDONE\n"); + return_status = false; + break; + } + return return_status; +} + +static u32 ddl_process_intr_status(struct ddl_context *ddl_context, + u32 int_status) +{ + u32 status = true; + switch (int_status) { + case VIDC_720P_INTR_FRAME_DONE: + status = ddl_operation_done_callback(ddl_context); + break; + case VIDC_720P_INTR_DMA_DONE: + ddl_dma_done_callback(ddl_context); + status = false; + break; + case VIDC_720P_INTR_FW_DONE: + ddl_eos_done_callback(ddl_context); + break; + case VIDC_720P_INTR_BUFFER_FULL: + pr_err("BUF_FULL_INTR\n"); + break; + default: + pr_err("UNKWN_INTR\n"); + break; + } + return status; +} + +void ddl_read_and_clear_interrupt(void) +{ + struct ddl_context *ddl_context; + + ddl_context = ddl_get_context(); + if (!ddl_context->core_virtual_base_addr) { + pr_err("SPURIOUS_INTERRUPT\n"); + return; + } + vidc_720p_get_interrupt_status(&ddl_context->intr_status, + &ddl_context->cmd_err_status, + &ddl_context->disp_pic_err_status, + &ddl_context->op_failed); + + vidc_720p_interrupt_done_clear(); +} + +u32 ddl_process_core_response(void) +{ + struct ddl_context *ddl_context; + u32 return_status = true; + + ddl_context = ddl_get_context(); + if (!ddl_context->core_virtual_base_addr) { + pr_err("UNKWN_INTR\n"); + return false; + } + if (ddl_context->intr_status == DDL_INVALID_INTR_STATUS) { + pr_err("INTERRUPT_NOT_READ\n"); + return false; + } + + if (!ddl_handle_core_errors(ddl_context)) { + return_status = ddl_process_intr_status(ddl_context, + ddl_context->intr_status); + } + + if (ddl_context->pf_interrupt_clr) + (*ddl_context->pf_interrupt_clr)(); + + ddl_context->intr_status = DDL_INVALID_INTR_STATUS; + return return_status; +} + +static void ddl_decoder_input_done_callback( + struct ddl_client_context *ddl, u32 frame_transact_end) +{ + struct vidc_720p_dec_disp_info *dec_disp_info = + &(ddl->codec_data.decoder.dec_disp_info); + struct vcd_frame_data *input_vcd_frm = &(ddl->input_frame.vcd_frm); + ddl_get_frame_type(input_vcd_frm, dec_disp_info->input_frame_type); + + input_vcd_frm->interlaced = (dec_disp_info->input_is_interlace); + + input_vcd_frm->offset += dec_disp_info->input_bytes_consumed; + input_vcd_frm->data_len -= dec_disp_info->input_bytes_consumed; + + ddl->input_frame.frm_trans_end = frame_transact_end; + ddl->ddl_context->ddl_callback(VCD_EVT_RESP_INPUT_DONE, + VCD_S_SUCCESS, + &ddl->input_frame, + sizeof(struct ddl_frame_data_tag), + (void *)ddl, + ddl->ddl_context->client_data); +} + +static void ddl_decoder_ouput_done_callback(struct ddl_client_context *ddl, + u32 frame_transact_end) +{ + struct ddl_decoder_data *dec = &ddl->codec_data.decoder; + struct vidc_720p_dec_disp_info *dec_disp_info = &dec->dec_disp_info; + struct ddl_frame_data_tag *output_frame = &ddl->output_frame; + struct vcd_frame_data *output_vcd_frm = &output_frame->vcd_frm; + u32 vcd_status; + phys_addr_t free_luma_dpb = 0; + + output_vcd_frm->phys_addr = dec_disp_info->y_addr; + + if (dec->codec_type.codec == VCD_CODEC_MPEG4 || + dec->codec_type.codec == VCD_CODEC_VC1 || + dec->codec_type.codec == VCD_CODEC_VC1_RCV) { + vidc_720p_decode_skip_frm_details(&free_luma_dpb); + if (free_luma_dpb) + output_vcd_frm->phys_addr = free_luma_dpb; + } + + vcd_status = ddl_decoder_dpb_transact(dec, output_frame, + DDL_DPB_OP_MARK_BUSY); + + output_vcd_frm->ip_frm_tag = dec_disp_info->tag_top; + if (dec_disp_info->crop_exists == 0x1) { + output_vcd_frm->dec_op_prop.disp_frm.left = + dec_disp_info->crop_left_offset; + output_vcd_frm->dec_op_prop.disp_frm.top = + dec_disp_info->crop_top_offset; + output_vcd_frm->dec_op_prop.disp_frm.right = + dec_disp_info->img_size_x - + dec_disp_info->crop_right_offset; + output_vcd_frm->dec_op_prop.disp_frm.bottom = + dec_disp_info->img_size_y - + dec_disp_info->crop_bottom_offset; + } else { + output_vcd_frm->dec_op_prop.disp_frm.left = 0; + output_vcd_frm->dec_op_prop.disp_frm.top = 0; + output_vcd_frm->dec_op_prop.disp_frm.right = + dec_disp_info->img_size_x; + output_vcd_frm->dec_op_prop.disp_frm.bottom = + dec_disp_info->img_size_y; + } + if (!dec_disp_info->disp_is_interlace) { + output_vcd_frm->interlaced = false; + output_frame->intrlcd_ip_frm_tag = VCD_FRAMETAG_INVALID; + } else { + output_vcd_frm->interlaced = true; + output_frame->intrlcd_ip_frm_tag = dec_disp_info->tag_bottom; + } + + output_vcd_frm->offset = 0; + output_vcd_frm->data_len = dec->y_cb_cr_size; + + if (free_luma_dpb) + output_vcd_frm->data_len = 0; + + output_vcd_frm->flags |= VCD_FRAME_FLAG_ENDOFFRAME; + + if (!vcd_status) + ddl_process_decoder_metadata(ddl); + output_frame->frm_trans_end = frame_transact_end; + +#ifdef CORE_TIMING_INFO + ddl_calc_core_time(0); +#endif + + ddl->ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE, + vcd_status, output_frame, sizeof(struct ddl_frame_data_tag), + (void *)ddl, ddl->ddl_context->client_data); +} + +static u32 ddl_get_frame_type(struct vcd_frame_data *frame, u32 frame_type) +{ + enum vidc_720p_frame_type e_frame_type = + (enum vidc_720p_frame_type)frame_type; + u32 status = true; + + switch (e_frame_type) { + case VIDC_720P_IFRAME: + frame->flags |= VCD_FRAME_FLAG_SYNCFRAME; + frame->frame_type = VCD_FRAME_I; + break; + case VIDC_720P_PFRAME: + frame->frame_type = VCD_FRAME_P; + break; + case VIDC_720P_BFRAME: + frame->frame_type = VCD_FRAME_B; + break; + case VIDC_720P_NOTCODED: + frame->data_len = 0; + break; + default: + pr_debug("CRITICAL-FRAMETYPE\n"); + status = false; + break; + } + return status; +} + +static void ddl_getmpeg4_declevel(enum vcd_codec_level_type *vcd_level, + u32 level) +{ + switch (level) { + case VIDC_720P_MPEG4_LEVEL0: + *vcd_level = VCD_LEVEL_MPEG4_0; + break; + case VIDC_720P_MPEG4_LEVEL0b: + *vcd_level = VCD_LEVEL_MPEG4_0b; + break; + case VIDC_720P_MPEG4_LEVEL1: + *vcd_level = VCD_LEVEL_MPEG4_1; + break; + case VIDC_720P_MPEG4_LEVEL2: + *vcd_level = VCD_LEVEL_MPEG4_2; + break; + case VIDC_720P_MPEG4_LEVEL3: + *vcd_level = VCD_LEVEL_MPEG4_3; + break; + case VIDC_720P_MPEG4_LEVEL3b: + *vcd_level = VCD_LEVEL_MPEG4_3b; + break; + case VIDC_720P_MPEG4_LEVEL4a: + *vcd_level = VCD_LEVEL_MPEG4_4a; + break; + case VIDC_720P_MPEG4_LEVEL5: + *vcd_level = VCD_LEVEL_MPEG4_5; + break; + case VIDC_720P_MPEG4_LEVEL6: + *vcd_level = VCD_LEVEL_MPEG4_6; + break; + } +} + +static void ddl_geth264_declevel(enum vcd_codec_level_type *vcd_level, + u32 level) +{ + switch (level) { + case VIDC_720P_H264_LEVEL1: + *vcd_level = VCD_LEVEL_H264_1; + break; + case VIDC_720P_H264_LEVEL1b: + *vcd_level = VCD_LEVEL_H264_1b; + break; + case VIDC_720P_H264_LEVEL1p1: + *vcd_level = VCD_LEVEL_H264_1p1; + break; + case VIDC_720P_H264_LEVEL1p2: + *vcd_level = VCD_LEVEL_H264_1p2; + break; + case VIDC_720P_H264_LEVEL1p3: + *vcd_level = VCD_LEVEL_H264_1p3; + break; + case VIDC_720P_H264_LEVEL2: + *vcd_level = VCD_LEVEL_H264_2; + break; + case VIDC_720P_H264_LEVEL2p1: + *vcd_level = VCD_LEVEL_H264_2p1; + break; + case VIDC_720P_H264_LEVEL2p2: + *vcd_level = VCD_LEVEL_H264_2p2; + break; + case VIDC_720P_H264_LEVEL3: + *vcd_level = VCD_LEVEL_H264_3; + break; + case VIDC_720P_H264_LEVEL3p1: + *vcd_level = VCD_LEVEL_H264_3p1; + break; + case VIDC_720P_H264_LEVEL3p2: + *vcd_level = VCD_LEVEL_H264_3p2; + break; + } +} + +static void ddl_get_vc1_dec_level(enum vcd_codec_level_type *vcd_level, + u32 level, enum vcd_codec_profile_type vc1_profile) +{ + if (vc1_profile == VCD_PROFILE_VC1_ADVANCE) { + switch (level) { + case VIDC_720P_VC1_LEVEL0: + *vcd_level = VCD_LEVEL_VC1_0; + break; + case VIDC_720P_VC1_LEVEL1: + *vcd_level = VCD_LEVEL_VC1_1; + break; + case VIDC_720P_VC1_LEVEL2: + *vcd_level = VCD_LEVEL_VC1_2; + break; + case VIDC_720P_VC1_LEVEL3: + *vcd_level = VCD_LEVEL_VC1_3; + break; + case VIDC_720P_VC1_LEVEL4: + *vcd_level = VCD_LEVEL_VC1_4; + break; + } + return; + } + + /* now determine the Main and Simple profile level */ + switch (level) { + case VIDC_720P_VC1_LEVEL_LOW: + *vcd_level = VCD_LEVEL_VC1_LOW; + break; + case VIDC_720P_VC1_LEVEL_MED: + *vcd_level = VCD_LEVEL_VC1_MEDIUM; + break; + case VIDC_720P_VC1_LEVEL_HIGH: + *vcd_level = VCD_LEVEL_VC1_HIGH; + break; + } +} + +static void ddl_get_mpeg2_dec_level(enum vcd_codec_level_type *vcd_level, + u32 level) +{ + switch (level) { + case VIDCL_720P_MPEG2_LEVEL_LOW: + *vcd_level = VCD_LEVEL_MPEG2_LOW; + break; + case VIDCL_720P_MPEG2_LEVEL_MAIN: + *vcd_level = VCD_LEVEL_MPEG2_MAIN; + break; + case VIDCL_720P_MPEG2_LEVEL_HIGH14: + *vcd_level = VCD_LEVEL_MPEG2_HIGH_14; + break; + } +} + +static void ddl_getdec_profilelevel(struct ddl_decoder_data *dec, + u32 profile, u32 level) +{ + enum vcd_codec_profile_type vcd_profile = VCD_PROFILE_UNKNOWN; + enum vcd_codec_level_type vcd_level = VCD_LEVEL_UNKNOWN; + + switch (dec->codec_type.codec) { + case VCD_CODEC_MPEG4: + if (profile == VIDC_720P_PROFILE_MPEG4_SP) + vcd_profile = VCD_PROFILE_MPEG4_SP; + else if (profile == VIDC_720P_PROFILE_MPEG4_ASP) + vcd_profile = VCD_PROFILE_MPEG4_ASP; + + ddl_getmpeg4_declevel(&vcd_level, level); + break; + case VCD_CODEC_H264: + if (profile == VIDC_720P_PROFILE_H264_BASELINE) + vcd_profile = VCD_PROFILE_H264_BASELINE; + else if (profile == VIDC_720P_PROFILE_H264_MAIN) + vcd_profile = VCD_PROFILE_H264_MAIN; + else if (profile == VIDC_720P_PROFILE_H264_HIGH) + vcd_profile = VCD_PROFILE_H264_HIGH; + ddl_geth264_declevel(&vcd_level, level); + break; + default: + case VCD_CODEC_H263: + break; + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + if (profile == VIDC_720P_PROFILE_VC1_SP) + vcd_profile = VCD_PROFILE_VC1_SIMPLE; + else if (profile == VIDC_720P_PROFILE_VC1_MAIN) + vcd_profile = VCD_PROFILE_VC1_MAIN; + else if (profile == VIDC_720P_PROFILE_VC1_ADV) + vcd_profile = VCD_PROFILE_VC1_ADVANCE; + ddl_get_vc1_dec_level(&vcd_level, level, vcd_profile); + break; + case VCD_CODEC_MPEG2: + if (profile == VIDC_720P_PROFILE_MPEG2_MAIN) + vcd_profile = VCD_PROFILE_MPEG2_MAIN; + else if (profile == VIDC_720P_PROFILE_MPEG2_SP) + vcd_profile = VCD_PROFILE_MPEG2_SIMPLE; + ddl_get_mpeg2_dec_level(&vcd_level, level); + break; + } + + dec->profile.profile = vcd_profile; + dec->level.level = vcd_level; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.c new file mode 100644 index 0000000000000..230e7ec774068 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.c @@ -0,0 +1,545 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +static u32 *ddl_metadata_hdr_entry(struct ddl_client_context *ddl, + u32 meta_data_type) +{ + u32 skip_words; + u32 *buffer; + + if (ddl->decoding) { + buffer = ddl->codec_data.decoder.meta_data_input.virt_addr; + skip_words = 32 + 1; + buffer += skip_words; + + switch (meta_data_type) { + default: + case VCD_METADATA_DATANONE: + skip_words = 0; + break; + case VCD_METADATA_QPARRAY: + skip_words = 3; + break; + case VCD_METADATA_CONCEALMB: + skip_words = 6; + break; + case VCD_METADATA_VC1: + skip_words = 9; + break; + case VCD_METADATA_SEI: + skip_words = 12; + break; + case VCD_METADATA_VUI: + skip_words = 15; + break; + case VCD_METADATA_PASSTHROUGH: + skip_words = 18; + break; + case VCD_METADATA_QCOMFILLER: + skip_words = 21; + break; + } + } else { + buffer = ddl->codec_data.encoder.meta_data_input.virt_addr; + skip_words = 2; + buffer += skip_words; + + switch (meta_data_type) { + default: + case VCD_METADATA_DATANONE: + skip_words = 0; + break; + case VCD_METADATA_ENC_SLICE: + skip_words = 3; + break; + case VCD_METADATA_QCOMFILLER: + skip_words = 6; + break; + } + } + + buffer += skip_words; + return buffer; +} + +void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl) +{ + struct ddl_dma_buffer *main_buffer = + &ddl->ddl_context->metadata_shared_input; + struct ddl_dma_buffer *b; + u32 *hdr; + + if (ddl->decoding) + b = &ddl->codec_data.decoder.meta_data_input; + else + b = &ddl->codec_data.encoder.meta_data_input; + + b->phys_addr = main_buffer->phys_addr + + DDL_METADATA_CLIENT_INPUTBUFSIZE * ddl->channel_id; + b->virt_addr = (void *)((u8 *)main_buffer->virt_addr + + DDL_METADATA_CLIENT_INPUTBUFSIZE * ddl->channel_id); + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 1; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 1; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QCOMFILLER; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_DATANONE); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 2; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 2; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_DATANONE; + + if (ddl->decoding) { + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QPARRAY); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 3; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 3; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QPARRAY; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_CONCEALMB); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 4; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 4; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_CONCEALMB; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_SEI); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 5; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 5; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_SEI; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VUI); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 6; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 6; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VUI; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VC1); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 7; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 7; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VC1; + + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_PASSTHROUGH); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 8; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 8; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = + VCD_METADATA_PASSTHROUGH; + + } else { + hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_ENC_SLICE); + hdr[DDL_METADATA_HDR_VERSION_INDEX] = 9; + hdr[DDL_METADATA_HDR_PORT_INDEX] = 9; + hdr[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_ENC_SLICE; + } +} + +static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl) +{ + u32 flag = 0; + + if (ddl->decoding) { + enum vcd_codec codec = + ddl->codec_data.decoder.codec_type.codec; + + flag |= VCD_METADATA_CONCEALMB | VCD_METADATA_PASSTHROUGH | + VCD_METADATA_QPARRAY; + if (codec == VCD_CODEC_H264) + flag |= VCD_METADATA_SEI | VCD_METADATA_VUI; + else if (codec == VCD_CODEC_VC1 || codec == VCD_CODEC_VC1_RCV) + flag |= VCD_METADATA_VC1; + } else { + flag |= VCD_METADATA_ENC_SLICE; + } + + return flag; +} + +void ddl_set_default_metadata_flag(struct ddl_client_context *ddl) +{ + if (ddl->decoding) + ddl->codec_data.decoder.meta_data_enable_flag = 0; + else + ddl->codec_data.encoder.meta_data_enable_flag = 0; +} + +void ddl_set_default_decoder_metadata_buffer_size(struct ddl_decoder_data *dec, + struct vcd_property_frame_size *frame_size, + struct vcd_buffer_requirement *output_buf_req) +{ + u32 flag = dec->meta_data_enable_flag; + u32 suffix = 0; + u32 size = 0; + + if (!flag) { + dec->suffix = 0; + return; + } + + if (flag & VCD_METADATA_QPARRAY) { + u32 num_of_mb = ((frame_size->width * frame_size->height) >> 8); + size = DDL_METADATA_HDR_SIZE; + size += num_of_mb; + DDL_METADATA_ALIGNSIZE(size); + suffix += size; + } + if (flag & VCD_METADATA_CONCEALMB) { + u32 num_of_mb = ((frame_size->width * frame_size->height) >> 8); + size = DDL_METADATA_HDR_SIZE; + size *= (4 * num_of_mb / 2); + DDL_METADATA_ALIGNSIZE(size); + suffix += size; + } + if (flag & VCD_METADATA_VC1) { + size = DDL_METADATA_HDR_SIZE; + size += DDL_METADATA_VC1_PAYLOAD_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += size; + } + if (flag & VCD_METADATA_SEI) { + size = DDL_METADATA_HDR_SIZE; + size += DDL_METADATA_SEI_PAYLOAD_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += (size * DDL_METADATA_SEI_MAX); + } + if (flag & VCD_METADATA_VUI) { + size = DDL_METADATA_HDR_SIZE; + size += DDL_METADATA_VUI_PAYLOAD_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += (size); + } + if (flag & VCD_METADATA_PASSTHROUGH) { + size = DDL_METADATA_HDR_SIZE; + size += DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += (size); + } + size = DDL_METADATA_EXTRADATANONE_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += (size); + + suffix += DDL_METADATA_EXTRAPAD_SIZE; + DDL_METADATA_ALIGNSIZE(suffix); + + dec->suffix = suffix; + output_buf_req->size += suffix; + return; +} + +void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data *enc) +{ + u32 flag = enc->meta_data_enable_flag; + u32 suffix = 0; + u32 size = 0; + + if (!flag) { + enc->suffix = 0; + return; + } + + if (flag & VCD_METADATA_ENC_SLICE) { + u32 num_of_mb = enc->frame_size.width * enc->frame_size.height / + 16 / 16; + size = DDL_METADATA_HDR_SIZE + 4 + 8 * num_of_mb; + DDL_METADATA_ALIGNSIZE(size); + suffix += size; + } + + size = DDL_METADATA_EXTRADATANONE_SIZE; + DDL_METADATA_ALIGNSIZE(size); + suffix += (size); + + suffix += DDL_METADATA_EXTRAPAD_SIZE; + DDL_METADATA_ALIGNSIZE(suffix); + + enc->suffix = suffix; + enc->output_buf_req.size += suffix; +} + +static u32 ddl_set_metadata_enable_client_open(struct ddl_client_context *ddl, + struct vcd_property_meta_data_enable *meta_data_enable, + u32 *meta_data_enable_flag) +{ + if (!meta_data_enable->meta_data_enable_flag) { + *meta_data_enable_flag = 0; + if (ddl->decoding) { + ddl_set_default_decoder_buffer_req( + &ddl->codec_data.decoder, true); + } else { + ddl_set_default_encoder_buffer_req( + &ddl->codec_data.encoder); + } + + } else { + u32 flag = ddl_supported_metadata_flag(ddl); + flag &= meta_data_enable->meta_data_enable_flag; + if (flag) { + flag |= DDL_METADATA_MANDATORY; + if (flag != *meta_data_enable_flag) { + *meta_data_enable_flag = flag; + + if (ddl->decoding) { + ddl_set_default_decoder_buffer_req( + &ddl->codec_data.decoder, true); + } else { + ddl_set_default_encoder_buffer_req( + &ddl->codec_data.encoder); + } + + } + } + } + return VCD_S_SUCCESS; +} + +static u32 ddl_set_metadata_header(struct ddl_client_context *ddl, + struct vcd_property_hdr *property_hdr, + struct vcd_property_metadata_hdr *hdr) +{ + u32 flag; + if (sizeof(struct vcd_property_metadata_hdr) != property_hdr->sz) + return VCD_ERR_ILLEGAL_PARM; + + flag = ddl_supported_metadata_flag(ddl); + flag |= DDL_METADATA_MANDATORY; + flag &= hdr->meta_data_id_type; + if (!(flag & (flag - 1))) { + u32 *hdr_entry = ddl_metadata_hdr_entry(ddl, flag); + hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = hdr->version; + hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = hdr->port_index; + hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = hdr->type; + return VCD_S_SUCCESS; + } + return VCD_ERR_ILLEGAL_PARM; +} + +u32 ddl_set_metadata_params(struct ddl_client_context *ddl, + struct vcd_property_hdr *prop, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + if (prop->id == VCD_I_METADATA_ENABLE) { + struct vcd_property_meta_data_enable *meta_data_enable = + (struct vcd_property_meta_data_enable *)value; + u32 *meta_data_enable_flag; + enum vcd_codec codec; + if (ddl->decoding) { + meta_data_enable_flag = &ddl->codec_data.decoder. + meta_data_enable_flag; + codec = ddl->codec_data.decoder.codec_type.codec; + } else { + meta_data_enable_flag = &ddl->codec_data.encoder. + meta_data_enable_flag; + codec = ddl->codec_data.encoder.codec_type.codec; + } + if (sizeof(struct vcd_property_meta_data_enable) == prop->sz && + DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && + codec) { + vcd_status = ddl_set_metadata_enable_client_open(ddl, + meta_data_enable, meta_data_enable_flag); + } + } else if (prop->id == VCD_I_METADATA_HEADER) { + vcd_status = ddl_set_metadata_header(ddl, prop, value); + } + return vcd_status; +} + +u32 ddl_get_metadata_params(struct ddl_client_context *ddl, + struct vcd_property_hdr *prop, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct vcd_property_meta_data_enable *enable; + struct vcd_property_metadata_hdr *hdr; + + if (prop->id == VCD_I_METADATA_ENABLE && prop->sz == sizeof(*enable)) { + enable = value; + enable->meta_data_enable_flag = ddl->decoding ? + ddl->codec_data.decoder.meta_data_enable_flag : + ddl->codec_data.encoder.meta_data_enable_flag; + vcd_status = VCD_S_SUCCESS; + } else if (prop->id == VCD_I_METADATA_HEADER && + sizeof(*hdr) == prop->sz) { + u32 flag = ddl_supported_metadata_flag(ddl); + hdr = value; + flag |= DDL_METADATA_MANDATORY; + flag &= hdr->meta_data_id_type; + if (!(flag & (flag - 1))) { + u32 *hdr_entry = ddl_metadata_hdr_entry(ddl, flag); + hdr->version = + hdr_entry[DDL_METADATA_HDR_VERSION_INDEX]; + hdr->port_index = + hdr_entry[DDL_METADATA_HDR_PORT_INDEX]; + hdr->type = hdr_entry[DDL_METADATA_HDR_TYPE_INDEX]; + vcd_status = VCD_S_SUCCESS; + } + } + return vcd_status; +} + +void ddl_metadata_enable(struct ddl_client_context *ddl) +{ + u32 flag, hal_flag = 0; + phys_addr_t input; + if (ddl->decoding) { + flag = ddl->codec_data.decoder.meta_data_enable_flag; + input = ddl->codec_data.decoder.meta_data_input.phys_addr; + } else { + flag = ddl->codec_data.encoder.meta_data_enable_flag; + input = ddl->codec_data.encoder.meta_data_input.phys_addr; + } + if (flag) { + if (flag & VCD_METADATA_QPARRAY) + hal_flag |= VIDC_720P_METADATA_ENABLE_QP; + if (flag & VCD_METADATA_CONCEALMB) + hal_flag |= VIDC_720P_METADATA_ENABLE_CONCEALMB; + if (flag & VCD_METADATA_VC1) + hal_flag |= VIDC_720P_METADATA_ENABLE_VC1; + if (flag & VCD_METADATA_SEI) + hal_flag |= VIDC_720P_METADATA_ENABLE_SEI; + if (flag & VCD_METADATA_VUI) + hal_flag |= VIDC_720P_METADATA_ENABLE_VUI; + if (flag & VCD_METADATA_ENC_SLICE) + hal_flag |= VIDC_720P_METADATA_ENABLE_ENCSLICE; + if (flag & VCD_METADATA_PASSTHROUGH) + hal_flag |= VIDC_720P_METADATA_ENABLE_PASSTHROUGH; + } else { + input = 0; + } + vidc_720p_metadata_enable(hal_flag, input); +} + +phys_addr_t ddl_encode_set_metadata_output_buf(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *encoder = &ddl->codec_data.encoder; + u32 *buffer; + struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm); + phys_addr_t ext_buffer_end; + phys_addr_t hw_metadata_start; + + ext_buffer_end = stream->phys_addr + stream->alloc_len; + if (!encoder->meta_data_enable_flag) { + ext_buffer_end &= ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + return ext_buffer_end; + } + hw_metadata_start = (ext_buffer_end - encoder->suffix) & + ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + + ext_buffer_end = (hw_metadata_start - 1) & + ~DDL_STREAMBUF_ALIGN_GUARD_BYTES; + + buffer = encoder->meta_data_input.virt_addr; + + *buffer++ = encoder->suffix; + + *buffer = hw_metadata_start; + + encoder->meta_data_offset = hw_metadata_start - stream->phys_addr; + + return ext_buffer_end; +} + +void ddl_decode_set_metadata_output(struct ddl_decoder_data *dec) +{ + int i; + u32 *buffer; + + if (!dec->meta_data_enable_flag) { + dec->meta_data_offset = 0; + return; + } + + dec->meta_data_offset = ddl_get_yuv_buffer_size(&dec->client_frame_size, + &dec->buf_format, !dec->progressive_only); + + buffer = dec->meta_data_input.virt_addr; + + *buffer++ = dec->suffix; + + for (i = 0; i < dec->dp_buf.no_of_dec_pic_buf; ++i) + *buffer++ = dec->dp_buf.dec_pic_buffers[i].vcd_frm.phys_addr + + dec->meta_data_offset; +} + +//TOOD consider combining ddl_process_xxx_metadata +void ddl_process_encoder_metadata(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *enc = &ddl->codec_data.encoder; + struct vcd_frame_data *frm = &ddl->output_frame.vcd_frm; + u32 *qfill_hdr; + u32 *qfill; + unsigned long tmp; + size_t qfill_sz; + + if (!enc->meta_data_enable_flag) { + frm->flags &= ~VCD_FRAME_FLAG_EXTRADATA; + return; + } + + if (!enc->enc_frame_info.metadata_exists) { + frm->flags &= ~VCD_FRAME_FLAG_EXTRADATA; + return; + } + frm->flags |= VCD_FRAME_FLAG_EXTRADATA; + + tmp = (unsigned long)frm->virt_addr + frm->offset + frm->data_len; + qfill = (u32 *)ALIGN(tmp, 4); + + qfill_sz = enc->meta_data_offset + (u8 *)frm->virt_addr - (u8 *)qfill; + + qfill_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); + + *qfill++ = qfill_sz; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_VERSION_INDEX]; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_PORT_INDEX]; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_TYPE_INDEX]; + *qfill = qfill_sz - DDL_METADATA_HDR_SIZE; +} + +void ddl_process_decoder_metadata(struct ddl_client_context *ddl) +{ + struct ddl_decoder_data *dec = &ddl->codec_data.decoder; + struct vcd_frame_data *frm = &ddl->output_frame.vcd_frm; + u32 *qfill_hdr; + u32 *qfill; + size_t qfill_sz; + unsigned long tmp; + + if (!dec->meta_data_enable_flag) { + frm->flags &= ~VCD_FRAME_FLAG_EXTRADATA; + return; + } + if (!dec->dec_disp_info.metadata_exists) { + frm->flags &= ~VCD_FRAME_FLAG_EXTRADATA; + return; + } + frm->flags |= VCD_FRAME_FLAG_EXTRADATA; + + if (frm->data_len == dec->meta_data_offset) + return; + + tmp = (unsigned long)frm->virt_addr + frm->offset + frm->data_len; + qfill = (u32 *)ALIGN(tmp, 4); + + qfill_sz = dec->meta_data_offset + (u8 *)frm->virt_addr - (u8 *)qfill; + + qfill_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER); + + *qfill++ = qfill_sz; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_VERSION_INDEX]; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_PORT_INDEX]; + *qfill++ = qfill_hdr[DDL_METADATA_HDR_TYPE_INDEX]; + *qfill = qfill_sz - DDL_METADATA_HDR_SIZE; +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.h new file mode 100644 index 0000000000000..b0fa84c50547a --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_metadata.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_METADATA_H_ +#define _VCD_DDL_METADATA_H_ + +#define DDL_MAX_DEC_METADATATYPE (8) +#define DDL_MAX_ENC_METADATATYPE (3) + +#define DDL_METADATA_EXTRAPAD_SIZE (256) +#define DDL_METADATA_HDR_SIZE (20) + +#define DDL_METADATA_EXTRADATANONE_SIZE (24) + +#define DDL_METADATA_ALIGNSIZE(x) ((x) = (((x) + 0x7) & ~0x7)) + +#define DDL_METADATA_MANDATORY (VCD_METADATA_DATANONE | VCD_METADATA_QCOMFILLER) + +#define DDL_METADATA_VC1_PAYLOAD_SIZE (38*4) + +#define DDL_METADATA_SEI_PAYLOAD_SIZE (100) +#define DDL_METADATA_SEI_MAX (5) + +#define DDL_METADATA_VUI_PAYLOAD_SIZE (256) + +#define DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE (68) + +#define DDL_METADATA_CLIENT_INPUTBUFSIZE (256) +#define DDL_METADATA_TOTAL_INPUTBUFSIZE \ + (DDL_METADATA_CLIENT_INPUTBUFSIZE * VCD_MAX_NO_CLIENT) + +#define DDL_METADATA_HDR_VERSION_INDEX 0 +#define DDL_METADATA_HDR_PORT_INDEX 1 +#define DDL_METADATA_HDR_TYPE_INDEX 2 + + +void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl); +u32 ddl_get_metadata_params(struct ddl_client_context *ddl, + struct vcd_property_hdr *property_hdr, void *property_value); +u32 ddl_set_metadata_params(struct ddl_client_context *ddl, + struct vcd_property_hdr *property_hdr, void *property_value); +void ddl_set_default_metadata_flag(struct ddl_client_context *ddl); +void ddl_set_default_decoder_metadata_buffer_size(struct ddl_decoder_data *dec, + struct vcd_property_frame_size *frame_size, + struct vcd_buffer_requirement *output_buf_req); +void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data *enc); +void ddl_metadata_enable(struct ddl_client_context *ddl); +phys_addr_t ddl_encode_set_metadata_output_buf(struct ddl_client_context *ddl); +void ddl_decode_set_metadata_output(struct ddl_decoder_data *decoder); +void ddl_process_encoder_metadata(struct ddl_client_context *ddl); +void ddl_process_decoder_metadata(struct ddl_client_context *ddl); + +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_properties.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_properties.c new file mode 100644 index 0000000000000..6e4037b219677 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_properties.c @@ -0,0 +1,1395 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" + +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +static u32 ddl_set_dec_property(struct ddl_client_context *pddl, + struct vcd_property_hdr *hdr, void *value); +static u32 ddl_set_enc_property(struct ddl_client_context *pddl, + struct vcd_property_hdr *hdr, void *value); +static u32 ddl_get_dec_property(struct ddl_client_context *pddl, + struct vcd_property_hdr *hdr, void *value); +static u32 ddl_get_enc_property(struct ddl_client_context *pddl, + struct vcd_property_hdr *hdr, void *value); +static u32 ddl_set_enc_dynamic_property(struct ddl_encoder_data *enc, + struct vcd_property_hdr *hdr, void *value); +static void ddl_set_default_enc_property(struct ddl_client_context *ddl); +static void ddl_set_default_enc_profile(struct ddl_encoder_data *enc); +static void ddl_set_default_enc_level(struct ddl_encoder_data *enc); +static void ddl_set_default_enc_vop_timing(struct ddl_encoder_data *enc); +static void ddl_set_default_enc_intra_period(struct ddl_encoder_data *enc); +static void ddl_set_default_enc_rc_params(struct ddl_encoder_data *enc); +static u32 ddl_valid_buffer_requirement(struct vcd_buffer_requirement + *orig, struct vcd_buffer_requirement *req); +static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *dec); +static u32 ddl_set_dec_buffers(struct ddl_decoder_data *dec, + struct ddl_property_dec_pic_buffers *dpb); + +u32 ddl_set_property(u32 *ddl_handle, struct vcd_property_hdr *hdr, + void *value) +{ + u32 vcd_status; + struct ddl_context *ddl_context; + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + + if (!hdr || !value) { + pr_err("ddl_set_prop:Bad_argument\n"); + return VCD_ERR_ILLEGAL_PARM; + } + ddl_context = ddl_get_context(); + + if (!DDL_IS_INITIALIZED(ddl_context)) { + pr_err("ddl_set_prop:Not_inited\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (!ddl) { + pr_err("ddl_set_prop:Bad_handle\n"); + return VCD_ERR_BAD_HANDLE; + } + if (ddl->decoding) + vcd_status = ddl_set_dec_property(ddl, hdr, value); + else + vcd_status = ddl_set_enc_property(ddl, hdr, value); + if (vcd_status) + pr_err("ddl_set_prop:FAILED\n"); + + return vcd_status; +} + +u32 ddl_get_property(u32 *ddl_handle, struct vcd_property_hdr *hdr, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct ddl_context *ddl_context; + struct ddl_client_context *ddl = (struct ddl_client_context *) + ddl_handle; + + if (!hdr || !value) + return VCD_ERR_ILLEGAL_PARM; + + if (hdr->id == DDL_I_CAPABILITY) { + struct ddl_property_capability *cap; + if (sizeof(*cap) == hdr->sz) { + cap = value; + cap->max_num_client = VCD_MAX_NO_CLIENT; + cap->exclusive = VCD_COMMAND_EXCLUSIVE; + cap->frame_command_depth = VCD_FRAME_COMMAND_DEPTH; + cap->general_command_depth = VCD_GENERAL_COMMAND_DEPTH; + cap->ddl_time_out_in_ms = DDL_HW_TIMEOUT_IN_MS; + vcd_status = VCD_S_SUCCESS; + } + return vcd_status; + } + ddl_context = ddl_get_context(); + if (!DDL_IS_INITIALIZED(ddl_context)) + return VCD_ERR_ILLEGAL_OP; + + if (!ddl) + return VCD_ERR_BAD_HANDLE; + + if (ddl->decoding) + vcd_status = ddl_get_dec_property(ddl, hdr, value); + else + vcd_status = ddl_get_enc_property(ddl, hdr, value); + if (vcd_status) + pr_err("ddl_get_prop:FAILED\n"); + + return vcd_status; +} + +u32 ddl_decoder_ready_to_start(struct ddl_client_context *ddl, + struct vcd_phys_sequence_hdr *seq_hdr) +{ + struct ddl_decoder_data *dec = &ddl->codec_data.decoder; + if (!dec->codec_type.codec) { + pr_err("ddl_dec_start_check:Codec_not_set\n"); + return false; + } + if (!seq_hdr && (!dec->client_frame_size.height || + !dec->client_frame_size.width)) { + pr_err("ddl_dec_start_check:" + "Client_height_width_default\n"); + return false; + } + return true; +} + +u32 ddl_encoder_ready_to_start(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *enc = &ddl->codec_data.encoder; + + if (!enc->codec_type.codec || !enc->frame_size.height || + !enc->frame_size.width || + !enc->frame_rate.fps_denominator || + !enc->frame_rate.fps_numerator || + !enc->target_bit_rate.target_bitrate) { + return false; + } + return true; +} + +static u32 ddl_set_dec_property(struct ddl_client_context *ddl, + struct vcd_property_hdr *hdr, void *value) { + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct ddl_decoder_data *dec = &ddl->codec_data.decoder; + switch (hdr->id) { + case DDL_I_DPB_RELEASE: + if (sizeof(struct ddl_frame_data_tag) == hdr->sz && + dec->dp_buf.no_of_dec_pic_buf) { + vcd_status = ddl_decoder_dpb_transact(dec, value, + DDL_DPB_OP_MARK_FREE); + } + break; + case DDL_I_DPB: + { + struct ddl_property_dec_pic_buffers *dpb = value; + if (sizeof(*dpb) == hdr->sz && + (DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_INITCODEC) || + DDLCLIENT_STATE_IS(ddl, + DDL_CLIENT_WAIT_FOR_DPB)) && + dpb->no_of_dec_pic_buf >= + dec->client_output_buf_req.actual_count) { + vcd_status = ddl_set_dec_buffers(dec, dpb); + } + break; + } + case DDL_I_REQ_OUTPUT_FLUSH: + if (sizeof(u32) == hdr->sz) { + dec->dynamic_prop_change |= DDL_DEC_REQ_OUTPUT_FLUSH; + dec->dpb_mask.client_mask = 0; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_INPUT_BUF_REQ: + { + struct vcd_buffer_requirement *buf_req = value; + if (sizeof(*buf_req) == hdr->sz && + ddl_valid_buffer_requirement( + &dec->min_input_buf_req, buf_req)) { + dec->client_input_buf_req = *buf_req; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case DDL_I_OUTPUT_BUF_REQ: + { + struct vcd_buffer_requirement *buf_req = value; + if (sizeof(*buf_req) == hdr->sz && + ddl_valid_buffer_requirement( + &dec->min_output_buf_req, buf_req)) { + dec->client_output_buf_req = *buf_req; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_CODEC: + { + struct vcd_property_codec *codec = value; + if (sizeof(*codec) == hdr->sz && + DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { + if (!vcd_fw_is_codec_supported(true, codec->codec)) { + vcd_status = VCD_ERR_NOT_SUPPORTED; + break; + } + dec->codec_type = *codec; + ddl_set_default_dec_property(ddl); + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_POST_FILTER: + if (sizeof(struct vcd_property_post_filter) == hdr->sz && + DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && + (dec->codec_type.codec == VCD_CODEC_MPEG4 || + dec->codec_type.codec == VCD_CODEC_MPEG2)) { + dec->post_filter = *(struct vcd_property_post_filter *) + value; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_FRAME_SIZE: + { + struct vcd_property_frame_size *frame_size = value; + if ((sizeof(*frame_size) == hdr->sz) && + DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { + if (dec->client_frame_size.height != frame_size->height + || dec->client_frame_size.width != + frame_size->width) { + dec->client_frame_size = *frame_size; + ddl_calculate_stride(&dec->client_frame_size, + !dec->progressive_only); + ddl_set_default_decoder_buffer_req(dec, true); + } + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_BUFFER_FORMAT: + { + struct vcd_property_buffer_format *tile = value; + if (sizeof(*tile) == hdr->sz && + DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) && + (tile->buffer_format == VCD_BUFFER_FORMAT_NV12 + || tile->buffer_format == + VCD_BUFFER_FORMAT_TILE_4x2)) { + if (tile->buffer_format != + dec->buf_format.buffer_format) { + dec->buf_format = *tile; + ddl_set_default_decoder_buffer_req(dec, true); + } + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_METADATA_ENABLE: + case VCD_I_METADATA_HEADER: + vcd_status = ddl_set_metadata_params(ddl, hdr, value); + break; + default: + vcd_status = VCD_ERR_ILLEGAL_OP; + break; + } + return vcd_status; +} + +static u32 ddl_set_enc_property(struct ddl_client_context *ddl, + struct vcd_property_hdr *hdr, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct ddl_encoder_data *enc = &ddl->codec_data.encoder; + + if (DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_FRAME)) { + vcd_status = ddl_set_enc_dynamic_property(enc, hdr, value); + return vcd_status; + } + + if (!DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN)) { + pr_err("ddl_set_enc_property:" + "Fails_as_not_in_open_state\n"); + return VCD_ERR_ILLEGAL_OP; + } + + switch (hdr->id) { + case VCD_I_TARGET_BITRATE: + { + struct vcd_property_target_bitrate *bitrate = value; + if (sizeof(*bitrate) == hdr->sz && + bitrate->target_bitrate) { + enc->target_bit_rate = *bitrate; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_FRAME_RATE: + { + struct vcd_property_frame_rate *framerate = value; + if (sizeof(*framerate) == hdr->sz && + framerate->fps_denominator && + framerate->fps_numerator) { + enc->frame_rate = *framerate; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_FRAME_SIZE: + { + struct vcd_property_frame_size *framesize = value; + if (sizeof(*framesize) == hdr->sz && + DDL_ALLOW_ENC_FRAMESIZE(framesize->width, + framesize->height)) { + enc->frame_size = *framesize; + ddl_calculate_stride(&enc->frame_size, false); + ddl_set_default_encoder_buffer_req(enc); + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_CODEC: + { + struct vcd_property_codec *codec = value; + if (sizeof(*codec) == hdr->sz) { + if (!vcd_fw_is_codec_supported(false, codec->codec)) { + vcd_status = VCD_ERR_NOT_SUPPORTED; + break; + } + enc->codec_type = *codec; + ddl_set_default_enc_property(ddl); + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_REQ_IFRAME: + vcd_status = VCD_S_SUCCESS; + break; + case VCD_I_INTRA_PERIOD: + { + struct vcd_property_i_period *iperiod = value; + if (sizeof(*iperiod) == hdr->sz && !iperiod->bframes) { + enc->period = *iperiod; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_PROFILE: + { + struct vcd_property_profile *profile = value; + if (sizeof(*profile) == hdr->sz && + ((enc->codec_type.codec == VCD_CODEC_MPEG4 && + (profile->profile == VCD_PROFILE_MPEG4_SP || + profile->profile == VCD_PROFILE_MPEG4_ASP)) || + ((enc->codec_type.codec == VCD_CODEC_H264 && + profile->profile >= VCD_PROFILE_H264_BASELINE && + profile->profile <= VCD_PROFILE_H264_HIGH)) || + (enc->codec_type.codec == VCD_CODEC_H263 && + profile->profile == VCD_PROFILE_H263_BASELINE)) + ) { + enc->profile = *profile; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_LEVEL: + { + struct vcd_property_level *level = value; + if (sizeof(*level) == hdr->sz && + ((enc->codec_type.codec == VCD_CODEC_MPEG4 && + level->level >= VCD_LEVEL_MPEG4_0 && + level->level <= VCD_LEVEL_MPEG4_6) || + (enc->codec_type.codec == VCD_CODEC_H264 && + level->level >= VCD_LEVEL_H264_1 && + level->level <= VCD_LEVEL_H264_3p1) || + (enc->codec_type.codec == VCD_CODEC_H263 && + level->level >= VCD_LEVEL_H263_10 && + level->level <= VCD_LEVEL_H263_70))) { + enc->level = *level; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_MULTI_SLICE: + { + struct vcd_property_multi_slice *multislice = value; + switch (multislice->m_slice_sel) { + case VCD_MSLICE_OFF: + vcd_status = VCD_S_SUCCESS; + break; + case VCD_MSLICE_BY_GOB: + if (enc->codec_type.codec == VCD_CODEC_H263) + vcd_status = VCD_S_SUCCESS; + break; + case VCD_MSLICE_BY_MB_COUNT: + if (multislice->m_slice_size >= 1 && + (multislice->m_slice_size <= + (enc->frame_size.height + * enc->frame_size.width / 16 / 16))) { + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_MSLICE_BY_BYTE_COUNT: + if (multislice->m_slice_size < + DDL_MINIMUM_BYTE_PER_SLICE) { + vcd_status = VCD_S_SUCCESS; + break; + } + default: + break; + } + if (sizeof(struct vcd_property_multi_slice) == hdr->sz && + !vcd_status) { + enc->multi_slice = *multislice; + } + break; + } + case VCD_I_RATE_CONTROL: + { + struct vcd_property_rate_control *ratecontrol_type = value; + if (sizeof(*ratecontrol_type) == hdr->sz && + ratecontrol_type->rate_control >= + VCD_RATE_CONTROL_OFF && + ratecontrol_type->rate_control <= + VCD_RATE_CONTROL_CBR_CFR) { + enc->rc_type = *ratecontrol_type; + ddl_set_default_enc_rc_params(enc); + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_SHORT_HEADER: + if (sizeof(struct vcd_property_short_header) == hdr->sz && + enc->codec_type.codec == VCD_CODEC_MPEG4) { + enc->short_header = + *(struct vcd_property_short_header *)value; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_VOP_TIMING: + { + struct vcd_property_vop_timing *voptime = value; + if (sizeof(*voptime) == hdr->sz && enc->frame_rate.fps_numerator + <= voptime->vop_time_resolution) { + enc->vop_timing = *voptime; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_HEADER_EXTENSION: + if (sizeof(u32) == hdr->sz && enc->codec_type.codec == + VCD_CODEC_MPEG4) { + enc->hdr_ext_control = *(u32 *)value; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_ENTROPY_CTRL: + { + struct vcd_property_entropy_control *entropy = value; + if (sizeof(*entropy) == hdr->sz && + enc->codec_type.codec == VCD_CODEC_H264 && + entropy->entropy_sel >= VCD_ENTROPY_SEL_CAVLC && + entropy->entropy_sel <= VCD_ENTROPY_SEL_CABAC) { + enc->entropy_control = *entropy; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_DEBLOCKING: + { + struct vcd_property_db_config *db = value; + if (sizeof(*db) == hdr->sz && + enc->codec_type.codec == VCD_CODEC_H264 && + db->db_config >= VCD_DB_ALL_BLOCKING_BOUNDARY && + db->db_config <= VCD_DB_SKIP_SLICE_BOUNDARY) { + enc->db_control = *db; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_QP_RANGE: + { + struct vcd_property_qp_range *qp = value; + if (sizeof(*qp) == hdr->sz && qp->min_qp <= qp->max_qp && + ((enc->codec_type.codec == VCD_CODEC_H264 && + qp->max_qp <= DDL_MAX_H264_QP) || + qp->max_qp <= DDL_MAX_MPEG4_QP)) { + enc->qp_range = *qp; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_SESSION_QP: + { + struct vcd_property_session_qp *qp = value; + if ((sizeof(*qp) == hdr->sz) && + qp->iframe_qp >= enc->qp_range.min_qp && + qp->iframe_qp <= enc->qp_range.max_qp && + qp->frame_qp >= enc->qp_range.min_qp && + qp->frame_qp <= enc->qp_range.max_qp) { + enc->session_qp = *qp; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_RC_LEVEL_CONFIG: + { + struct vcd_property_rc_level *rc_level = value; + if (sizeof(*rc_level) == hdr->sz && + (enc->rc_type.rate_control >= + VCD_RATE_CONTROL_VBR_VFR || + enc->rc_type.rate_control <= + VCD_RATE_CONTROL_CBR_VFR) && + (!rc_level->mb_level_rc || + enc->codec_type.codec == VCD_CODEC_H264)) { + enc->rc_level = *rc_level; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_FRAME_LEVEL_RC: + { + struct vcd_property_frame_level_rc_params *rc = value; + if (sizeof(*rc) == hdr->sz && rc->reaction_coeff && + enc->rc_level.frame_level_rc) { + enc->frame_level_rc = *rc; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_ADAPTIVE_RC: + { + struct vcd_property_adaptive_rc_params *rc = value; + if (sizeof(*rc) == hdr->sz && enc->codec_type.codec == + VCD_CODEC_H264 && enc->rc_level.mb_level_rc) { + enc->adaptive_rc = *rc; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_INTRA_REFRESH: + { + struct vcd_property_intra_refresh_mb_number *mbnum = value; + u32 frame_mbnum = (enc->frame_size.width / 16) * + (enc->frame_size.height / 16); + if (sizeof(*mbnum) == hdr->sz && mbnum->cir_mb_number <= + frame_mbnum) { + enc->intra_refresh = *mbnum; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_BUFFER_FORMAT: + { + struct vcd_property_buffer_format *tile = value; + if (sizeof(*tile) == hdr->sz && tile->buffer_format == + VCD_BUFFER_FORMAT_NV12) { + enc->buf_format = *tile; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case DDL_I_INPUT_BUF_REQ: + { + struct vcd_buffer_requirement *buf_req = value; + if (sizeof(*buf_req) == hdr->sz && ddl_valid_buffer_requirement( + &enc->input_buf_req, buf_req)) { + enc->client_input_buf_req = *buf_req; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case DDL_I_OUTPUT_BUF_REQ: + { + struct vcd_buffer_requirement *buf_req = value; + if (sizeof(*buf_req) == hdr->sz && ddl_valid_buffer_requirement( + &enc->output_buf_req, buf_req)) { + enc->client_output_buf_req = *buf_req; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_METADATA_ENABLE: + case VCD_I_METADATA_HEADER: + vcd_status = ddl_set_metadata_params(ddl, hdr, value); + break; + default: + vcd_status = VCD_ERR_ILLEGAL_OP; + break; + } + return vcd_status; +} + +static u32 ddl_get_dec_property(struct ddl_client_context *ddl, + struct vcd_property_hdr *hdr, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct ddl_decoder_data *dec = &ddl->codec_data.decoder; + + switch (hdr->id) { + case VCD_I_FRAME_SIZE: + if (sizeof(struct vcd_property_frame_size) == hdr->sz) { + if (dec->client_frame_size.width) { + struct vcd_property_frame_size *size = value; + *size = dec->client_frame_size; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_PROFILE: + if (sizeof(struct vcd_property_profile) == hdr->sz) { + *(struct vcd_property_profile *)value = dec->profile; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_LEVEL: + if (sizeof(struct vcd_property_level) == hdr->sz) { + *(struct vcd_property_level *)value = dec->level; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_PROGRESSIVE_ONLY: + if (sizeof(u32) == hdr->sz) { + *(u32 *)value = dec->progressive_only; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_INPUT_BUF_REQ: + if (sizeof(struct vcd_buffer_requirement) == hdr->sz) { + if (dec->client_input_buf_req.size) { + *(struct vcd_buffer_requirement *)value = + dec->client_input_buf_req; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case DDL_I_OUTPUT_BUF_REQ: + if (sizeof(struct vcd_buffer_requirement) == hdr->sz) { + if (dec->client_output_buf_req.size) { + *(struct vcd_buffer_requirement *)value = + dec->client_output_buf_req; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_CODEC: + if (sizeof(struct vcd_property_codec) == hdr->sz) { + if (dec->codec_type.codec) { + *(struct vcd_property_codec *)value = + dec->codec_type; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_BUFFER_FORMAT: + if (sizeof(struct vcd_property_buffer_format) == hdr->sz) { + *(struct vcd_property_buffer_format *)value = + dec->buf_format; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_POST_FILTER: + if (sizeof(struct vcd_property_post_filter) == hdr->sz) { + *(struct vcd_property_post_filter *)value = + dec->post_filter; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_SEQHDR_ALIGN_BYTES: + if (sizeof(u32) == hdr->sz) { + *(u32 *)value = DDL_LINEAR_BUFFER_ALIGN_BYTES; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_FRAME_PROC_UNITS: + if (sizeof(u32) == hdr->sz && dec->client_frame_size.width && + dec->client_frame_size.height) { + *(u32 *)value = ((dec->client_frame_size.width >> 4) * + (dec->client_frame_size.height >> 4)); + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_DPB_RETRIEVE: + if (sizeof(struct ddl_frame_data_tag) == hdr->sz) { + vcd_status = ddl_decoder_dpb_transact(dec, + (struct ddl_frame_data_tag *)value, + DDL_DPB_OP_RETRIEVE); + } + break; + case VCD_I_METADATA_ENABLE: + case VCD_I_METADATA_HEADER: + vcd_status = ddl_get_metadata_params(ddl, hdr, value); + break; + default: + vcd_status = VCD_ERR_ILLEGAL_OP; + break; + } + return vcd_status; +} + +static u32 ddl_get_enc_property(struct ddl_client_context *ddl, + struct vcd_property_hdr *hdr, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + struct ddl_encoder_data *enc = &ddl->codec_data.encoder; + + struct vcd_property_entropy_control *entropy_control; + struct vcd_property_intra_refresh_mb_number *intra_refresh; + + switch (hdr->id) { + case VCD_I_CODEC: + if (sizeof(struct vcd_property_codec) == hdr->sz) { + *(struct vcd_property_codec *)value = enc->codec_type; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_FRAME_SIZE: + if (sizeof(struct vcd_property_frame_size) == hdr->sz) { + *(struct vcd_property_frame_size *)value = + enc->frame_size; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_FRAME_RATE: + if (sizeof(struct vcd_property_frame_rate) == hdr->sz) { + *(struct vcd_property_frame_rate *)value = + enc->frame_rate; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_TARGET_BITRATE: + if (sizeof(struct vcd_property_target_bitrate) == hdr->sz) { + *(struct vcd_property_target_bitrate *)value = + enc->target_bit_rate; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_RATE_CONTROL: + if (sizeof(struct vcd_property_rate_control) == hdr->sz) { + *(struct vcd_property_rate_control *)value = + enc->rc_type; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_PROFILE: + if (sizeof(struct vcd_property_profile) == hdr->sz) { + *(struct vcd_property_profile *)value = enc->profile; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_LEVEL: + if (sizeof(struct vcd_property_level) == hdr->sz) { + *(struct vcd_property_level *)value = enc->level; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_MULTI_SLICE: + if (sizeof(struct vcd_property_multi_slice) == hdr->sz) { + *(struct vcd_property_multi_slice *)value = + enc->multi_slice; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_SEQ_HEADER: + { + struct vcd_sequence_hdr *seq_hdr = value; + if (enc->seq_header.size && sizeof(struct vcd_sequence_hdr) == + hdr->sz && enc->seq_header.size <= + seq_hdr->sz) { + memcpy(seq_hdr->addr, enc->seq_header.virt_addr, + enc->seq_header.size); + seq_hdr->sz = enc->seq_header.size; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case DDL_I_SEQHDR_PRESENT: + if (sizeof(u32) == hdr->sz) { + if ((enc->codec_type.codec == VCD_CODEC_MPEG4 && + !enc->short_header.short_header) || + enc->codec_type.codec == + VCD_CODEC_H264) + *(u32 *)value = 0x1; + else + *(u32 *)value = 0x0; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_VOP_TIMING: + if (sizeof(struct vcd_property_vop_timing) == hdr->sz) { + *(struct vcd_property_vop_timing *)value = + enc->vop_timing; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_SHORT_HEADER: + if (sizeof(struct vcd_property_short_header) == hdr->sz) { + if (enc->codec_type.codec == VCD_CODEC_MPEG4) { + *(struct vcd_property_short_header *)value = + enc->short_header; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_ENTROPY_CTRL: + entropy_control = value; + if (sizeof(struct vcd_property_entropy_control) == hdr->sz) { + if (enc->codec_type.codec == VCD_CODEC_H264) { + *entropy_control = enc->entropy_control; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_DEBLOCKING: + if (sizeof(struct vcd_property_db_config) == hdr->sz) { + if (enc->codec_type.codec == VCD_CODEC_H264) { + *(struct vcd_property_db_config *)value = + enc->db_control; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_INTRA_PERIOD: + if (sizeof(struct vcd_property_i_period) == hdr->sz) { + *(struct vcd_property_i_period *)value = enc->period; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_QP_RANGE: + if (sizeof(struct vcd_property_qp_range) == hdr->sz) { + *(struct vcd_property_qp_range *)value = enc->qp_range; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_SESSION_QP: + if (sizeof(struct vcd_property_session_qp) == hdr->sz) { + *(struct vcd_property_session_qp *)value = + enc->session_qp; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_RC_LEVEL_CONFIG: + if (sizeof(struct vcd_property_rc_level) == hdr->sz) { + *(struct vcd_property_rc_level *)value = enc->rc_level; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_FRAME_LEVEL_RC: + if (sizeof(struct vcd_property_frame_level_rc_params) == + hdr->sz) { + *(struct vcd_property_frame_level_rc_params *)value = + enc->frame_level_rc; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_ADAPTIVE_RC: + if (sizeof(struct vcd_property_adaptive_rc_params) == + hdr->sz) { + *(struct vcd_property_adaptive_rc_params *)value = + enc->adaptive_rc; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_INTRA_REFRESH: + intra_refresh = value; + if (sizeof(struct vcd_property_intra_refresh_mb_number) == + hdr->sz) { + *intra_refresh = enc->intra_refresh; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_INPUT_BUF_REQ: + if (sizeof(struct vcd_buffer_requirement) == hdr->sz) { + if (enc->output_buf_req.size) { + *(struct vcd_buffer_requirement *)value = + enc->client_input_buf_req; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case DDL_I_OUTPUT_BUF_REQ: + if (sizeof(struct vcd_buffer_requirement) == hdr->sz) { + if (enc->output_buf_req.size) { + *(struct vcd_buffer_requirement *)value = + enc->client_output_buf_req; + vcd_status = VCD_S_SUCCESS; + } else { + vcd_status = VCD_ERR_ILLEGAL_OP; + } + } + break; + case VCD_I_BUFFER_FORMAT: + if (sizeof(struct vcd_property_buffer_format) == hdr->sz) { + *(struct vcd_property_buffer_format *)value = + enc->buf_format; + vcd_status = VCD_S_SUCCESS; + } + break; + case DDL_I_FRAME_PROC_UNITS: + if (sizeof(u32) == hdr->sz && enc->frame_size.width && + enc->frame_size.height) { + *(u32 *)value = ((enc->frame_size.width >> 4) * + (enc->frame_size.height >> 4)); + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_HEADER_EXTENSION: + if (sizeof(u32) == hdr->sz && enc->codec_type.codec == + VCD_CODEC_MPEG4) { + *(u32 *)value = enc->hdr_ext_control; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_METADATA_ENABLE: + case VCD_I_METADATA_HEADER: + vcd_status = ddl_get_metadata_params(ddl, hdr, value); + break; + default: + vcd_status = VCD_ERR_ILLEGAL_OP; + break; + } + return vcd_status; +} + +static u32 ddl_set_enc_dynamic_property(struct ddl_encoder_data *enc, + struct vcd_property_hdr *hdr, void *value) +{ + u32 vcd_status = VCD_ERR_ILLEGAL_PARM; + switch (hdr->id) { + case VCD_I_REQ_IFRAME: + if (sizeof(struct vcd_property_req_i_frame) == hdr->sz) { + enc->dynamic_prop_change |= DDL_ENC_REQ_IFRAME; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_TARGET_BITRATE: + if (sizeof(struct vcd_property_target_bitrate) == hdr->sz) { + enc->target_bit_rate = + *(struct vcd_property_target_bitrate *)value; + enc->dynamic_prop_change |= DDL_ENC_CHANGE_BITRATE; + vcd_status = VCD_S_SUCCESS; + } + break; + case VCD_I_INTRA_PERIOD: + { + struct vcd_property_i_period *iperiod = value; + if (sizeof(struct vcd_property_i_period) == hdr->sz && + !iperiod->bframes) { + enc->period = *iperiod; + enc->dynamic_prop_change |= DDL_ENC_CHANGE_IPERIOD; + vcd_status = VCD_S_SUCCESS; + } + break; + } + case VCD_I_FRAME_RATE: + { + struct vcd_property_frame_rate *frame_rate = value; + if (sizeof(struct vcd_property_frame_rate) == hdr->sz && + frame_rate->fps_denominator && + frame_rate->fps_numerator && + frame_rate->fps_denominator <= + frame_rate->fps_numerator) { + enc->frame_rate = *frame_rate; + enc->dynamic_prop_change |= DDL_ENC_CHANGE_FRAMERATE; + vcd_status = VCD_S_SUCCESS; + } + break; + } + default: + vcd_status = VCD_ERR_ILLEGAL_OP; + break; + } + return vcd_status; +} + +void ddl_set_default_dec_property(struct ddl_client_context *ddl) +{ + struct ddl_decoder_data *dec = &(ddl->codec_data.decoder); + + if (dec->codec_type.codec == VCD_CODEC_MPEG4 || + dec->codec_type.codec == VCD_CODEC_MPEG2) { + dec->post_filter.post_filter = true; + } else { + dec->post_filter.post_filter = false; + } + dec->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12; + dec->client_frame_size.height = 144; + dec->client_frame_size.width = 176; + dec->client_frame_size.stride = 176; + dec->client_frame_size.scan_lines = 144; + dec->progressive_only = 1; + ddl_set_default_metadata_flag(ddl); + + ddl_set_default_decoder_buffer_req(dec, true); +} + +static void ddl_set_default_enc_property(struct ddl_client_context *ddl) +{ + struct ddl_encoder_data *enc = &(ddl->codec_data.encoder); + + ddl_set_default_enc_profile(enc); + ddl_set_default_enc_level(enc); + + enc->rc_type.rate_control = VCD_RATE_CONTROL_VBR_VFR; + ddl_set_default_enc_rc_params(enc); + + ddl_set_default_enc_intra_period(enc); + + enc->intra_refresh.cir_mb_number = 0; + ddl_set_default_enc_vop_timing(enc); + + enc->multi_slice.m_slice_size = VCD_MSLICE_OFF; + enc->short_header.short_header = false; + + enc->entropy_control.entropy_sel = VCD_ENTROPY_SEL_CAVLC; + enc->entropy_control.cabac_model = VCD_CABAC_MODEL_NUMBER_0; + enc->db_control.db_config = VCD_DB_ALL_BLOCKING_BOUNDARY; + enc->db_control.slice_alpha_offset = 0; + enc->db_control.slice_beta_offset = 0; + + enc->re_con_buf_format.buffer_format = VCD_BUFFER_FORMAT_TILE_4x2; + + enc->buf_format.buffer_format = VCD_BUFFER_FORMAT_NV12; + + enc->hdr_ext_control = 0; + + ddl_set_default_metadata_flag(ddl); + + ddl_set_default_encoder_buffer_req(enc); +} + +static void ddl_set_default_enc_profile(struct ddl_encoder_data *enc) +{ + enum vcd_codec codec = enc->codec_type.codec; + if (codec == VCD_CODEC_MPEG4) + enc->profile.profile = VCD_PROFILE_MPEG4_SP; + else if (codec == VCD_CODEC_H264) + enc->profile.profile = VCD_PROFILE_H264_BASELINE; + else + enc->profile.profile = VCD_PROFILE_H263_BASELINE; +} + +static void ddl_set_default_enc_level(struct ddl_encoder_data *enc) +{ + enum vcd_codec codec = enc->codec_type.codec; + if (codec == VCD_CODEC_MPEG4) + enc->level.level = VCD_LEVEL_MPEG4_1; + else if (codec == VCD_CODEC_H264) + enc->level.level = VCD_LEVEL_H264_1; + else + enc->level.level = VCD_LEVEL_H263_10; +} + +static void ddl_set_default_enc_vop_timing(struct ddl_encoder_data *enc) +{ + enc->vop_timing.vop_time_resolution = (2 * + enc->frame_rate.fps_numerator) / + enc->frame_rate.fps_denominator; +} + +static void ddl_set_default_enc_intra_period(struct ddl_encoder_data *enc) +{ + switch (enc->rc_type.rate_control) { + default: + case VCD_RATE_CONTROL_VBR_VFR: + case VCD_RATE_CONTROL_VBR_CFR: + case VCD_RATE_CONTROL_CBR_VFR: + case VCD_RATE_CONTROL_OFF: + enc->period.frames = ((enc->frame_rate.fps_numerator << 1) / + enc->frame_rate.fps_denominator) - 1; + break; + case VCD_RATE_CONTROL_CBR_CFR: + enc->period.frames = ((enc->frame_rate.fps_numerator >> 1) / + enc->frame_rate.fps_denominator) - 1; + break; + } + enc->period.bframes = 0; +} + +static void ddl_set_default_enc_rc_params(struct ddl_encoder_data *enc) +{ + enum vcd_codec codec = enc->codec_type.codec; + + enc->rc_level.frame_level_rc = true; + enc->qp_range.min_qp = 0x1; + + if (codec == VCD_CODEC_H264) { + enc->qp_range.max_qp = 0x33; + enc->session_qp.iframe_qp = 0x19; + enc->session_qp.frame_qp = 0x19; + + enc->rc_level.mb_level_rc = true; + enc->adaptive_rc.activity_region_flag = true; + enc->adaptive_rc.dark_region_as_flag = true; + enc->adaptive_rc.smooth_region_as_flag = true; + enc->adaptive_rc.static_region_as_flag = true; + } else { + enc->qp_range.max_qp = 0x1f; + enc->session_qp.iframe_qp = 0x14; + enc->session_qp.frame_qp = 0x14; + enc->rc_level.mb_level_rc = false; + } + + switch (enc->rc_type.rate_control) { + default: + case VCD_RATE_CONTROL_VBR_VFR: + enc->r_cframe_skip = 1; + enc->frame_level_rc.reaction_coeff = 0x1f4; + break; + case VCD_RATE_CONTROL_VBR_CFR: + enc->r_cframe_skip = 0; + enc->frame_level_rc.reaction_coeff = 0x1f4; + break; + case VCD_RATE_CONTROL_CBR_VFR: + enc->r_cframe_skip = 1; + if (codec != VCD_CODEC_H264) { + enc->session_qp.iframe_qp = 0xf; + enc->session_qp.frame_qp = 0xf; + } + + enc->frame_level_rc.reaction_coeff = 0x6; + break; + case VCD_RATE_CONTROL_CBR_CFR: + enc->r_cframe_skip = 0; + enc->frame_level_rc.reaction_coeff = 0x6; + break; + case VCD_RATE_CONTROL_OFF: + enc->r_cframe_skip = 0; + enc->rc_level.frame_level_rc = false; + enc->rc_level.mb_level_rc = false; + break; + } +} + +void ddl_set_default_encoder_buffer_req(struct ddl_encoder_data *enc) +{ + u32 y_cb_cr_size; + + y_cb_cr_size = ddl_get_yuv_buffer_size(&enc->frame_size, + &enc->buf_format, false); + + memset(&enc->input_buf_req, 0, sizeof(struct vcd_buffer_requirement)); + + enc->input_buf_req.min_count = 1; + enc->input_buf_req.actual_count = enc->input_buf_req.min_count; + enc->input_buf_req.max_count = DDL_MAX_BUFFER_COUNT; + enc->input_buf_req.size = y_cb_cr_size; + enc->input_buf_req.align = DDL_LINEAR_BUFFER_ALIGN_BYTES; + + enc->client_input_buf_req = enc->input_buf_req; + + memset(&enc->output_buf_req, 0, sizeof(struct vcd_buffer_requirement)); + + enc->output_buf_req.min_count = 2; + enc->output_buf_req.actual_count = enc->output_buf_req.min_count; + enc->output_buf_req.max_count = DDL_MAX_BUFFER_COUNT; + enc->output_buf_req.align = DDL_LINEAR_BUFFER_ALIGN_BYTES; + enc->output_buf_req.size = y_cb_cr_size; + ddl_set_default_encoder_metadata_buffer_size(enc); + enc->client_output_buf_req = enc->output_buf_req; +} + +void ddl_set_default_decoder_buffer_req(struct ddl_decoder_data *dec, + u32 estimate) +{ + size_t y_cb_cr_size; + u32 min_dpb; + struct vcd_property_frame_size *frame_size; + struct vcd_buffer_requirement *output_buf_req, *input_buf_req; + + if (!dec->codec_type.codec) + return; + + if (estimate) { + frame_size = &dec->client_frame_size; + output_buf_req = &dec->client_output_buf_req; + input_buf_req = &dec->client_input_buf_req; + min_dpb = ddl_decoder_min_num_dpb(dec); + y_cb_cr_size = ddl_get_yuv_buffer_size(frame_size, + &dec->buf_format, !dec->progressive_only); + } else { + frame_size = &dec->frame_size; + output_buf_req = &dec->actual_output_buf_req; + input_buf_req = &dec->actual_input_buf_req; + y_cb_cr_size = dec->y_cb_cr_size; + min_dpb = dec->min_dpb_num; + } + + memset(output_buf_req, 0, sizeof(struct vcd_buffer_requirement)); + + output_buf_req->min_count = min_dpb; + output_buf_req->actual_count = output_buf_req->min_count; + output_buf_req->max_count = DDL_MAX_BUFFER_COUNT; + output_buf_req->size = y_cb_cr_size; + if (dec->buf_format.buffer_format != VCD_BUFFER_FORMAT_NV12) + output_buf_req->align = DDL_TILE_BUFFER_ALIGN_BYTES; + else + output_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES; + + ddl_set_default_decoder_metadata_buffer_size(dec, frame_size, + output_buf_req); + + dec->min_output_buf_req = *output_buf_req; + + memset(input_buf_req, 0, sizeof(struct vcd_buffer_requirement)); + + input_buf_req->min_count = 1; + input_buf_req->actual_count = input_buf_req->min_count; + input_buf_req->max_count = DDL_MAX_BUFFER_COUNT; + input_buf_req->size = y_cb_cr_size; + + if (input_buf_req->size >= (1280 * 720 * 3) >> 1) + input_buf_req->size >>= 1; + + input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES; + + dec->min_input_buf_req = *input_buf_req; +} + +size_t ddl_get_yuv_buffer_size(struct vcd_property_frame_size *frame_size, + struct vcd_property_buffer_format *buf_format, u32 interlace) +{ + u32 width = frame_size->stride; + u32 height = frame_size->scan_lines; + size_t sz; + + if (buf_format->buffer_format != VCD_BUFFER_FORMAT_NV12) { + size_t component_sz; + u32 width_round_up; + u32 height_round_up; + u32 height_chroma = (height >> 1); + + width_round_up = DDL_TILE_ALIGN(width, DDL_TILE_ALIGN_WIDTH); + height_round_up = DDL_TILE_ALIGN(height, DDL_TILE_ALIGN_HEIGHT); + + component_sz = width_round_up * height_round_up; + component_sz = DDL_TILE_ALIGN(component_sz, + DDL_TILE_MULTIPLY_FACTOR); + + sz = (component_sz + DDL_TILE_BUF_ALIGN_GUARD_BYTES) & + DDL_TILE_BUF_ALIGN_MASK; + + height_round_up = DDL_TILE_ALIGN(height_chroma, + DDL_TILE_ALIGN_HEIGHT); + component_sz = width_round_up * height_round_up; + component_sz = DDL_TILE_ALIGN(component_sz, + DDL_TILE_MULTIPLY_FACTOR); + sz += component_sz; + } else { + sz = height * width; + sz += sz >> 1; + } + return sz; +} + +void ddl_calculate_stride(struct vcd_property_frame_size *frame_size, + u32 interlace) +{ + frame_size->stride = ((frame_size->width + 15) >> 4) << 4; + + if (interlace) + frame_size->scan_lines = ((frame_size->height + 31) >> 5) << 5; + else + frame_size->scan_lines = ((frame_size->height + 15) >> 4) << 4; +} + +static u32 ddl_valid_buffer_requirement(struct vcd_buffer_requirement + *orig, struct vcd_buffer_requirement *req) +{ + u32 status = false; + if (orig->max_count >= req->actual_count && + orig->actual_count <= req->actual_count && + orig->align <= req->align && orig->size <= req->size) { + status = true; + } else { + pr_err("ddl_valid_buf_req:Failed\n"); + } + return status; +} + +static u32 ddl_decoder_min_num_dpb(struct ddl_decoder_data *dec) +{ + u32 min_dpb = 0; + switch (dec->codec_type.codec) { + default: + case VCD_CODEC_MPEG4: + case VCD_CODEC_MPEG2: + case VCD_CODEC_DIVX_4: + case VCD_CODEC_DIVX_5: + case VCD_CODEC_DIVX_6: + case VCD_CODEC_XVID: + min_dpb = 3; + break; + case VCD_CODEC_H263: + min_dpb = 2; + break; + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + min_dpb = 4; + break; + case VCD_CODEC_H264: + { + u32 yuv_size = (dec->client_frame_size.height * + dec->client_frame_size.width * 3) >> 1; + min_dpb = 6912000 / yuv_size; + if (min_dpb > 16) + min_dpb = 16; + + min_dpb += 2; + break; + } + } + return min_dpb; +} + +static u32 ddl_set_dec_buffers(struct ddl_decoder_data *dec, + struct ddl_property_dec_pic_buffers *dpb) +{ + u32 vcd_status = VCD_S_SUCCESS; + u32 i; + for (i = 0; !vcd_status && i < dpb->no_of_dec_pic_buf; ++i) { + if (!IS_ALIGNED(dpb->dec_pic_buffers[i].vcd_frm.phys_addr, + dec->client_output_buf_req.align) || + dpb->dec_pic_buffers[i].vcd_frm.alloc_len < + dec->client_output_buf_req.size) { + vcd_status = VCD_ERR_ILLEGAL_PARM; + pr_err("ddl_set_prop:" + "Dpb_align_fail_or_alloc_size_small\n"); + return vcd_status; + } + } + + if (dec->dp_buf.no_of_dec_pic_buf) { + kfree(dec->dp_buf.dec_pic_buffers); + dec->dp_buf.dec_pic_buffers = NULL; + dec->dp_buf.no_of_dec_pic_buf = 0; + } + dec->dp_buf.dec_pic_buffers = kmalloc(dpb->no_of_dec_pic_buf * + sizeof(struct ddl_frame_data_tag), GFP_KERNEL); + + if (!dec->dp_buf.dec_pic_buffers) { + pr_err("ddl_dec_set_prop:" + "Dpb_container_alloc_failed\n"); + return VCD_ERR_ALLOC_FAIL; + } + dec->dp_buf.no_of_dec_pic_buf = dpb->no_of_dec_pic_buf; + for (i = 0; i < dpb->no_of_dec_pic_buf; ++i) + dec->dp_buf.dec_pic_buffers[i] = dpb->dec_pic_buffers[i]; + + dec->dpb_mask.client_mask = 0; + dec->dpb_mask.hw_mask = 0; + dec->dynamic_prop_change = 0; + return VCD_S_SUCCESS; +} + +void ddl_set_initial_default_values(struct ddl_client_context *ddl) +{ + if (ddl->decoding) { + ddl->codec_data.decoder.codec_type.codec = VCD_CODEC_MPEG4; + ddl_set_default_dec_property(ddl); + } else { + struct ddl_encoder_data *enc = &(ddl->codec_data.encoder); + enc->codec_type.codec = VCD_CODEC_MPEG4; + + enc->target_bit_rate.target_bitrate = 64000; + enc->frame_size.width = 176; + enc->frame_size.height = 144; + enc->frame_size.stride = 176; + enc->frame_size.scan_lines = 144; + enc->frame_rate.fps_numerator = 30; + enc->frame_rate.fps_denominator = 1; + ddl_set_default_enc_property(ddl); + } +} diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.c b/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.c new file mode 100644 index 0000000000000..922f071ed75d5 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.c @@ -0,0 +1,154 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_ddl_utils.h" +#include "vcd_ddl_metadata.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define ERR(x...) printk(KERN_ERR x) + +#ifdef CORE_TIMING_INFO +static unsigned int g_ddl_dec_t1, g_ddl_enc_t1; +static unsigned int g_ddl_dec_ttotal, g_ddl_enc_ttotal; +static unsigned int g_ddl_dec_count, g_ddl_enc_count; +#endif + +size_t npelly_size[] = { + 0x100000, + 0x080000, + 0x51c00, + DDL_CONTEXT_MEMORY, + DDL_DB_LINE_BUF_SIZE, + DDL_MPEG4_DATA_PARTITION_BUF_SIZE, + DDL_METADATA_TOTAL_INPUTBUFSIZE, + DDL_DBG_CORE_DUMP_SIZE, + 0x040000, + DDL_ENC_SEQHEADER_SIZE, +}; + +struct ddl_dma_buffer npelly_b[30]; + +u32 npelly_init(void) { + int i; + printk("\nnpelly npelly max_key = %d\n", npelly_max_key); + for (i=0; isize = npelly_size[i]; + b->virt_addr = dma_alloc_coherent(NULL, b->size, + &b->phys_addr, GFP_KERNEL); + if (!b->virt_addr) { + printk("\nnpelly %s: Could not allocate %d for %d\n", + __FUNCTION__, b->size, i); + return -1; + } + printk("\nnpelly ALLOC %d for %d\n", b->size, i); + memset(b->virt_addr, 0, b->size); + } + return 0; +} + +void *ddl_dma_alloc(struct ddl_dma_buffer *b, size_t sz, enum npelly_key key) +{ + printk("\nnpelly RETRIEVE %d for %d\n", sz, key); + + if (sz > npelly_b[key].size) { + printk("\nnpelly OH SHIT, %d > %d for %d\n", sz, npelly_b[key].size, key); + BUG_ON(true); + } + *b = npelly_b[key]; + b->size = sz; + memset(b->virt_addr, 0, sz); + + return b->virt_addr; +} + +void ddl_dma_free(struct ddl_dma_buffer *b) +{ + printk("\nnpelly RELEASE %d\n", b->size); + + b->virt_addr = NULL; + b->size = 0; +} + +#ifdef CORE_TIMING_INFO +void ddl_get_core_start_time(u8 codec_type) +{ + u32 *ddl_t1 = NULL; + if (!codec_type) + ddl_t1 = &g_ddl_dec_t1; + else if (codec_type == 1) + ddl_t1 = &g_ddl_enc_t1; + + if (!*ddl_t1) { + struct timeval ddl_tv; + do_gettimeofday(&ddl_tv); + *ddl_t1 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); + } +} + +void ddl_calc_core_time(u8 codec_type) +{ + u32 *ddl_t1 = NULL, *ddl_ttotal = NULL, + *ddl_count = NULL; + if (!codec_type) { + DBG("\n720p Core Decode "); + ddl_t1 = &g_ddl_dec_t1; + ddl_ttotal = &g_ddl_dec_ttotal; + ddl_count = &g_ddl_dec_count; + } else if (codec_type == 1) { + DBG("\n720p Core Encode "); + ddl_t1 = &g_ddl_enc_t1; + ddl_ttotal = &g_ddl_enc_ttotal; + ddl_count = &g_ddl_enc_count; + } + + if (*ddl_t1) { + int ddl_t2; + struct timeval ddl_tv; + do_gettimeofday(&ddl_tv); + ddl_t2 = (ddl_tv.tv_sec * 1000) + (ddl_tv.tv_usec / 1000); + *ddl_ttotal += (ddl_t2 - *ddl_t1); + *ddl_count = *ddl_count + 1; + DBG("time %u, average time %u, count %u", + ddl_t2 - *ddl_t1, (*ddl_ttotal)/(*ddl_count), + *ddl_count); + *ddl_t1 = 0; + } +} + +void ddl_reset_time_variables(u8 codec_type) +{ + if (!codec_type) { + DBG("\n Reset Decoder time variables"); + g_ddl_dec_t1 = 0; + g_ddl_dec_ttotal = 0; + g_ddl_dec_count = 0; + } else if (codec_type == 1) { + DBG("\n Reset Encoder time variables "); + g_ddl_enc_t1 = 0; + g_ddl_enc_ttotal = 0; + g_ddl_enc_count = 0; + } +} +#endif diff --git a/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.h b/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.h new file mode 100644 index 0000000000000..bf83a735a7977 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/vcd_ddl_utils.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DDL_UTILS_H_ +#define _VCD_DDL_UTILS_H_ + +#include "vcd_ddl_core.h" +#include "vcd_ddl.h" + +//TODO get rid of this hack +enum npelly_key { + npelly_dec_dpb = 0, + npelly_dec_ref, + npelly_dec_h264, + npelly_context, + npelly_dbl, + npelly_mpeg4, + npelly_meta, + npelly_debug, + npelly_enc_dpb, + npelly_enc_seq, + npelly_max_key, +}; + +void *ddl_dma_alloc(struct ddl_dma_buffer *, size_t, enum npelly_key key); +void ddl_dma_free(struct ddl_dma_buffer *); + +#ifdef CORE_TIMING_INFO +void ddl_get_core_start_time(u8 codec_type); + +void ddl_calc_core_time(u8 codec_type); + +void ddl_reset_time_variables(u8 codec_type); +#endif + +#endif diff --git a/drivers/misc/video_core/720p/ddl/video_core_720p.c b/drivers/misc/video_core/720p/ddl/video_core_720p.c new file mode 100644 index 0000000000000..053a3a8d7cade --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/video_core_720p.c @@ -0,0 +1,754 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include + +#include "video_core_type.h" +#include "video_core_720p.h" + +#define VIDC_720P_VERSION_STRING "VIDC_V1.0" + +unsigned long vid_c_base_addr; + +void vidc_720p_set_device_virtual_base(void *virt_addr) +{ + vid_c_base_addr = (unsigned long)virt_addr; +} + +static inline void vidc_720p_write(unsigned long offset, unsigned long val) +{ + pr_debug("REG 0x%08lx: write 0x%08lx\n", offset, val); + mb(); + iowrite32(val, vid_c_base_addr + offset); +} + +static inline unsigned long vidc_720p_read(unsigned long offset) +{ + unsigned long val; + mb(); + val = ioread32(vid_c_base_addr + offset); + pr_debug("REG 0x%08lx: read 0x%08lx\n", offset, val); + return val; +} + +void vidc_720p_init(char **ppsz_version, size_t sz, phys_addr_t phys_addr, + enum vidc_720p_endian_type dma_endian, u32 interrupt_off, + enum vidc_720p_interrupt_level_selection_type interrupt_sel, + u32 interrupt_mask) +{ + if (ppsz_version) + *ppsz_version = VIDC_720P_VERSION_STRING; + + if (interrupt_sel == VIDC_720P_INTERRUPT_LEVEL_SEL) + vidc_720p_write(0x0504, 0); + else + vidc_720p_write(0x0504, 1); + + if (interrupt_off) + vidc_720p_write(0x0500, 1); + else + vidc_720p_write(0x0500, 0); + + vidc_720p_write(0x0508, 1); + + vidc_720p_write(0x0518, 0); + + vidc_720p_write(0x0518, interrupt_mask); + + vidc_720p_write(0x0044, dma_endian); + + vidc_720p_write(0x0138, 0); + + vidc_720p_write(0x0110, 1); + + vidc_720p_write(0x000C, sz / 4); /* word size */ + + vidc_720p_write(0x0014, phys_addr); + + vidc_720p_write(0x0020, 0); + + vidc_720p_write(0x0000, 1); +} + +u32 vidc_720p_do_sw_reset(void) +{ + u32 fw_start; + udelay(5); + vidc_720p_write(0x0108, 0); + udelay(5); + vidc_720p_write(0x0134, 0); + udelay(5); + vidc_720p_write(0x0130, 1); + udelay(15); + vidc_720p_write(0x0130, 0); + udelay(5); + fw_start = vidc_720p_read(0x0134); + + if (!fw_start) { + pr_debug("VIDC-SW-RESET-FAILS!\n"); + return false; + } + return true; +} + +u32 vidc_720p_reset_is_success() +{ + u32 stagecounter; + stagecounter = vidc_720p_read(0x0414); + stagecounter &= 0xff; + if (stagecounter != 0xe5) { + pr_debug("VIDC-CPU_RESET-FAILS!\n"); + vidc_720p_write(0x0108, 0); + msleep(10); + return false; + } + return true; +} + +void vidc_720p_start_cpu(enum vidc_720p_endian_type dma_endian, + phys_addr_t icontext_bufferstart, phys_addr_t debug_core_dump_addr, + size_t debug_buffer_size) +{ + u32 dbg_info_input0_reg = 0x1; + + vidc_720p_write(0x0110, 0); + vidc_720p_write(0x0230, icontext_bufferstart); + vidc_720p_write(0x0044, dma_endian); + if (debug_buffer_size) { + dbg_info_input0_reg = (debug_buffer_size << 0x10) + | (0x2 << 1) | 0x1; + vidc_720p_write(0x0D10, debug_core_dump_addr); + } + vidc_720p_write(0x0D0C, dbg_info_input0_reg); + vidc_720p_write(0x0108, 1); +} + +u32 vidc_720p_cpu_start() +{ + u32 fw_status; + + fw_status = vidc_720p_read(0x0C14); + if (fw_status != 0x02) + return false; + return true; +} + + +void vidc_720p_stop_fw(void) +{ + vidc_720p_write(0x0134, 0); + vidc_720p_write(0x0108, 0); +} + +void vidc_720p_get_interrupt_status(u32 *interrupt_status, + u32 *cmd_err_status, u32 *disp_pic_err_status, u32 *op_failed) +{ + u32 err_status; + + *interrupt_status = vidc_720p_read(0x0514); + err_status = vidc_720p_read(0x0E9C); + *cmd_err_status = err_status & 0xffff; + *disp_pic_err_status = (err_status & 0xffff0000) >> 16; + *op_failed = (vidc_720p_read(0x0EC0) & 0x2) >> 1; +} + +void vidc_720p_interrupt_done_clear(void) +{ + vidc_720p_write(0x0508, 1); + vidc_720p_write(0x0104, 4); +} + +void vidc_720p_submit_command(u32 ch_id, u32 cmd_id) +{ + u32 fw_status; + + vidc_720p_write(0x0104, ch_id); + vidc_720p_write(0x0D00, cmd_id); + + fw_status = vidc_720p_read(0x0C14); + vidc_720p_write(0x0D1C, fw_status); +} + +u32 vidc_720p_engine_reset(u32 ch_id, enum vidc_720p_endian_type dma_endian, + enum vidc_720p_interrupt_level_selection_type interrupt_sel, + u32 interrupt_mask) +{ + u32 op_done; + u32 counter = 0; + + pr_debug("ENG-RESET!!\n"); + /* issue the engine reset command */ + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_MFC_ENGINE_RESET); + + do { + udelay(20); + op_done = vidc_720p_read(0x050C); + counter++; + } while (!op_done && counter < 10); + + if (!op_done) + return false; /* reset fails */ + + /* write invalid channel id */ + vidc_720p_write(0x0104, 4); + + /* Set INT_PULSE_SEL */ + if (interrupt_sel == VIDC_720P_INTERRUPT_LEVEL_SEL) + vidc_720p_write(0x0504, 0); + else + vidc_720p_write(0x0504, 1); + + if (!interrupt_mask) { + /* Disable interrupt */ + vidc_720p_write(0x0500, 1); + } else { + /* Enable interrupt */ + vidc_720p_write(0x0500, 0); + } + + /* Clear any pending interrupt */ + vidc_720p_write(0x0508, 1); + + /* Set INT_ENABLE_REG */ + vidc_720p_write(0x0518, interrupt_mask); + + /* Sets the DMA endianness */ + vidc_720p_write(0x0044, dma_endian); + + /* return engine reset success */ + return true ; +} + +void vidc_720p_set_channel(u32 ch_id, enum vidc_720p_enc_dec_selection_type + enc_dec_sel, enum vidc_720p_codec_type codec, phys_addr_t pi_fw, + size_t firmware_size) +{ + u32 std_sel = codec; + + vidc_720p_write(0x012C, 0); + + if (enc_dec_sel) + std_sel |= 0x10; + + vidc_720p_write(0x0100, std_sel); + + switch (codec) { + default: + case VIDC_720P_DIVX: + case VIDC_720P_XVID: + case VIDC_720P_MPEG4: + if (enc_dec_sel == VIDC_720P_ENCODER) + vidc_720p_write(0x0200, pi_fw); + else + vidc_720p_write(0x0204, pi_fw); + break; + case VIDC_720P_H264: + if (enc_dec_sel == VIDC_720P_ENCODER) + vidc_720p_write(0x0208, pi_fw); + else + vidc_720p_write(0x020C, pi_fw); + break; + case VIDC_720P_H263: + if (enc_dec_sel == VIDC_720P_ENCODER) + vidc_720p_write(0x0200, pi_fw); + else + vidc_720p_write(0x0218, pi_fw); + break; + case VIDC_720P_VC1: + vidc_720p_write(0x0210, pi_fw); + break; + case VIDC_720P_MPEG2: + vidc_720p_write(0x40293, pi_fw); + break; + } + vidc_720p_write(0x000C, firmware_size / 4); /* word size */ + + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_CHSET); +} + +void vidc_720p_encode_set_profile(u32 profile, u32 level) +{ + u32 profile_level = profile|(level << 0x8); + + vidc_720p_write(0x0300, profile_level); +} + +void vidc_720p_set_frame_size(u32 size_x, u32 size_y) +{ + vidc_720p_write(0x0118, size_x); + + vidc_720p_write(0x011C, size_y); +} + +void vidc_720p_encode_set_fps(u32 rc_frame_rate) +{ + vidc_720p_write(0x0D14, rc_frame_rate); +} + +void vidc_720p_encode_set_short_header(u32 short_header) +{ + vidc_720p_write(0x0318, short_header); +} + +void vidc_720p_encode_set_vop_time(u32 vop_time_resolution, + u32 vop_time_increment) +{ + u32 enable_vop, vop_timing_reg; + + if (!vop_time_resolution) + vidc_720p_write(0x0E00, 0x0); + else { + enable_vop = 0x1; + vop_timing_reg = (enable_vop << 0x1f) | + (vop_time_resolution << 0x10) | vop_time_increment; + vidc_720p_write(0x0E00, vop_timing_reg); + } +} + +void vidc_720p_encode_set_hec_period(u32 hec_period) +{ + vidc_720p_write(0x0EB0, hec_period); +} + +void vidc_720p_encode_set_qp_params(u32 max_qp, u32 min_qp) +{ + u32 qp = min_qp | (max_qp << 0x8); + + vidc_720p_write(0x0A0C, qp); +} + +void vidc_720p_encode_set_rc_config(u32 enable_frame_level_rc, + u32 enable_mb_level_rc_flag, u32 iframe_qp, u32 pframe_qp) +{ + u32 rc_config = iframe_qp; + + if (enable_frame_level_rc) + rc_config |= (0x1 << 0x9); + + if (enable_mb_level_rc_flag) + rc_config |= (0x1 << 0x8); + + vidc_720p_write(0x0A00, rc_config); + vidc_720p_write(0x0A04, pframe_qp); +} + +void vidc_720p_encode_set_bit_rate(u32 target_bitrate) +{ + vidc_720p_write(0x0A08, target_bitrate); +} + +void vidc_720p_encoder_set_param_change(u32 enc_param_change) +{ + vidc_720p_write(0x0E08, enc_param_change); +} + +void vidc_720p_encode_set_control_param(u32 param_val) +{ + vidc_720p_write(0x0EC8, param_val); +} + +void vidc_720p_encode_set_frame_level_rc_params(u32 reaction_coeff) +{ + vidc_720p_write(0x0A10, reaction_coeff); +} + +void vidc_720p_encode_set_mb_level_rc_params(u32 dark_region_as_flag, + u32 smooth_region_as_flag, u32 static_region_as_flag, + u32 activity_region_flag) +{ + u32 mb_level_rc = 0x0; + + if (activity_region_flag) + mb_level_rc |= 0x1; + if (static_region_as_flag) + mb_level_rc |= (0x1 << 0x1); + if (smooth_region_as_flag) + mb_level_rc |= (0x1 << 0x2); + if (dark_region_as_flag) + mb_level_rc |= (0x1 << 0x3); + /* Write MB level rate control */ + vidc_720p_write(0x0A14, mb_level_rc); +} + +void vidc_720p_encode_set_entropy_control(enum vidc_720p_entropy_sel_type + entropy_sel, enum vidc_720p_cabac_model_type cabac_model_number) +{ + u32 num; + u32 entropy_params = entropy_sel; + + /* Set Model Number */ + if (entropy_sel == VIDC_720P_ENTROPY_SEL_CABAC) { + num = (u32)cabac_model_number; + entropy_params |= (num << 0x2); + } + /* Set Entropy parameters */ + vidc_720p_write(0x0310, entropy_params); +} + +void vidc_720p_encode_set_db_filter_control(enum vidc_720p_DBConfig_type + db_config, u32 slice_alpha_offset, u32 slice_beta_offset) +{ + u32 deblock_params; + + deblock_params = db_config; + deblock_params |= + (slice_beta_offset << 0x2) | (slice_alpha_offset << 0x7); + + /* Write deblocking control settings */ + vidc_720p_write(0x0314, deblock_params); +} + +void vidc_720p_encode_set_intra_refresh_mb_number(u32 cir_mb_number) +{ + vidc_720p_write(0x0810, cir_mb_number); +} + +void vidc_720p_encode_set_multi_slice_info(enum vidc_720p_MSlice_selection_type + m_slice_sel, u32 multi_slice_size) +{ + switch (m_slice_sel) { + case VIDC_720P_MSLICE_BY_MB_COUNT: + vidc_720p_write(0x0EA8, 0x1); + vidc_720p_write(0x1517, m_slice_sel); + vidc_720p_write(0x0324, multi_slice_size); + break; + case VIDC_720P_MSLICE_BY_BYTE_COUNT: + vidc_720p_write(0x0EA8, 0x1); + vidc_720p_write(0x1517, m_slice_sel); + vidc_720p_write(0x0328, multi_slice_size); + break; + case VIDC_720P_MSLICE_BY_GOB: + vidc_720p_write(0x0EA8, 0x1); + break; + default: + case VIDC_720P_MSLICE_OFF: + vidc_720p_write(0x0EA8, 0x0); + break; + } +} + +void vidc_720p_encode_set_dpb_buffer(dma_addr_t pi_enc_dpb_addr, + size_t alloc_len) +{ + vidc_720p_write(0x080C, pi_enc_dpb_addr); + vidc_720p_write(0x0ED4, alloc_len); +} + +void vidc_720p_encode_set_i_period(u32 period) +{ + vidc_720p_write(0x0308, period); +} + +void vidc_720p_encode_init_codec(u32 ch_id, + enum vidc_720p_memory_access_method_type memory_access_model) +{ + vidc_720p_write(0x0600, memory_access_model); + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_INITCODEC); +} + +void vidc_720p_encode_unalign_bitstream(u32 upper_unalign_word, + u32 lower_unalign_word) +{ + vidc_720p_write(0x0EA0, upper_unalign_word); + vidc_720p_write(0x0EA4, lower_unalign_word); +} + +void vidc_720p_encode_set_seq_header_buffer(phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, u32 start_byte_num) +{ + vidc_720p_write(0x0018, ext_buffer_start); + + vidc_720p_write(0x0024, ext_buffer_start); + + vidc_720p_write(0x001C, ext_buffer_end); + + vidc_720p_write(0x005C, start_byte_num); +} + +void vidc_720p_encode_frame(u32 ch_id, phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, u32 start_byte_number, phys_addr_t y_addr, + phys_addr_t c_addr) +{ + vidc_720p_write(0x0018, ext_buffer_start); + + vidc_720p_write(0x001C, ext_buffer_end); + + vidc_720p_write(0x0024, ext_buffer_start); + + vidc_720p_write(0x005C, start_byte_number); + + vidc_720p_write(0x99105, y_addr); + + vidc_720p_write(0x0804, c_addr); + + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_FRAMERUN); +} + +void vidc_720p_encode_get_header(u32 *pi_enc_header_size) +{ + *pi_enc_header_size = vidc_720p_read(0x0060); +} + +void vidc_720p_enc_frame_info(struct vidc_720p_enc_frame_info *enc_frame_info) +{ + enc_frame_info->enc_size = vidc_720p_read(0x0058); + + enc_frame_info->frame_type = vidc_720p_read(0x0EBC); + + enc_frame_info->frame_type &= 0x03; + + enc_frame_info->metadata_exists = vidc_720p_read(0x0EB8); +} + +void vidc_720p_decode_bitstream_header(u32 ch_id, u32 dec_unit_size, + u32 start_byte_num, u32 ext_buffer_start, u32 ext_buffer_end, + enum vidc_720p_memory_access_method_type memory_access_model) +{ + vidc_720p_write(0x0E04, 0x0); + + vidc_720p_write(0x0018, ext_buffer_start); + + vidc_720p_write(0x001C, ext_buffer_end); + + vidc_720p_write(0x0024, ext_buffer_end); + + vidc_720p_write(0x0054, dec_unit_size); + + vidc_720p_write(0x005C, start_byte_num); + + vidc_720p_write(0x0600, memory_access_model); + + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_INITCODEC); +} + +void vidc_720p_decode_get_seq_hdr_info(struct vidc_720p_seq_hdr_info_type + *seq_hdr_info) +{ + unsigned long tmp; + + seq_hdr_info->img_size_x = vidc_720p_read(0x0118); + + seq_hdr_info->img_size_y = vidc_720p_read(0x011C); + + seq_hdr_info->min_num_dpb = vidc_720p_read(0x0E10); + + seq_hdr_info->min_dpb_size = vidc_720p_read(0x0C10); + + seq_hdr_info->dec_frm_size = vidc_720p_read(0x0C08); + + tmp = vidc_720p_read(0x0C0C); + seq_hdr_info->profile = tmp & 0x1f; + seq_hdr_info->level = (tmp & 0xff00) >> 8; + + tmp = vidc_720p_read(0x0408); + seq_hdr_info->progressive = (tmp & 0x4) >> 2; + /* bit 3 is for crop existence */ + seq_hdr_info->crop_exists = (tmp & 0x8) >> 3; + + if (seq_hdr_info->crop_exists) { + /* read the cropping information */ + tmp = vidc_720p_read(0x0C00); + seq_hdr_info->crop_right_offset = (tmp & 0xffff0000) >> 0x10; + seq_hdr_info->crop_left_offset = tmp & 0xffff; + tmp = vidc_720p_read(0x0C04); + seq_hdr_info->crop_bottom_offset = (tmp & 0xffff0000) >> 0x10; + seq_hdr_info->crop_top_offset = tmp & 0xffff; + } + /* Read the MPEG4 data partitioning indication */ + seq_hdr_info->data_partitioned = (vidc_720p_read(0x0EBC) & 0x8) >> 3; +} + +void vidc_720p_decode_set_dpb_release_buffer_mask(u32 dpb_release_buffer_mask) +{ + vidc_720p_write(0x0E98, dpb_release_buffer_mask); +} + +void vidc_720p_decode_set_dpb_buffers(u32 i, phys_addr_t dpb_buffer) +{ + vidc_720p_write(0x0E18 + sizeof(i) * i, dpb_buffer); +} + +void vidc_720p_decode_set_comv_buffer(phys_addr_t pi_dpb_comv_buffer, + size_t alloc_len) +{ + vidc_720p_write(0x0904, pi_dpb_comv_buffer); + + vidc_720p_write(0x0D08, alloc_len); +} + +void vidc_720p_decode_set_dpb_details(u32 num_dpb, size_t alloc_len, + phys_addr_t ref_buffer) +{ + vidc_720p_write(0x0900, ref_buffer); + + vidc_720p_write(0x0908, 0); + + vidc_720p_write(0x0E14, num_dpb); + + vidc_720p_write(0x0ED4, alloc_len); +} + +void vidc_720p_decode_set_mpeg4Post_filter(u32 enable_post_filter) +{ + if (enable_post_filter) + vidc_720p_write(0x0124, 0x1); + else + vidc_720p_write(0x0124, 0x0); +} + +void vidc_720p_decode_set_error_control(u32 enable_error_control) +{ + if (enable_error_control) + vidc_720p_write(0x013C, 0); + else + vidc_720p_write(0x013C, 1); +} + +void vidc_720p_set_deblock_line_buffer(dma_addr_t pi_deblock_line_buffer_start, + size_t alloc_len) +{ + vidc_720p_write(0x0234, pi_deblock_line_buffer_start); + + vidc_720p_write(0x0D04, alloc_len); +} + +void vidc_720p_decode_set_mpeg4_data_partitionbuffer(dma_addr_t vsp_buf_start) +{ + vidc_720p_write(0x0230, vsp_buf_start); +} + +void vidc_720p_decode_setH264VSPBuffer(dma_addr_t pi_vsp_temp_buffer_start) +{ + vidc_720p_write(0x0230, pi_vsp_temp_buffer_start); +} + +void vidc_720p_decode_frame(u32 ch_id, phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, size_t dec_unit_size, u32 start_byte_num, + u32 input_frame_tag) +{ + vidc_720p_write(0x0018, ext_buffer_start); + + vidc_720p_write(0x001C, ext_buffer_end); + + vidc_720p_write(0x0024, ext_buffer_end); + + vidc_720p_write(0x005C, start_byte_num); + + vidc_720p_write(0x0EE0, input_frame_tag); + + vidc_720p_write(0x0054, dec_unit_size); + + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_FRAMERUN); +} + +void vidc_720p_issue_eos(u32 ch_id) +{ + vidc_720p_write(0x0028, 0x1); + + vidc_720p_write(0x0054, 0); + + vidc_720p_submit_command(ch_id, VIDC_720P_CMD_FRAMERUN); +} + +void vidc_720p_eos_info(u32 *disp_status) +{ + *disp_status = vidc_720p_read(0x0408) & 0x3; +} + +void vidc_720p_decode_display_info(struct vidc_720p_dec_disp_info *disp_info) +{ + unsigned long tmp; + + tmp = vidc_720p_read(0x0408); + + disp_info->disp_status = (enum vidc_720p_display_status_type) + (tmp & 0x3); + + disp_info->disp_is_interlace = (tmp & 0x4) >> 2; + disp_info->crop_exists = (tmp & 0x8) >> 3; + + disp_info->resl_change = (tmp & 0x30) >> 4; + + disp_info->reconfig_flush_done = vidc_720p_read(0x0EC0) & 0x1; + + disp_info->img_size_x = vidc_720p_read(0x0118); + disp_info->img_size_y = vidc_720p_read(0x011C); + disp_info->y_addr = vidc_720p_read(0x0400); + disp_info->c_addr = vidc_720p_read(0x0404); + disp_info->tag_top = vidc_720p_read(0x0EA8); + disp_info->tag_bottom = vidc_720p_read(0x0EE4); + disp_info->pic_time_top = vidc_720p_read(0x0ED8); + disp_info->pic_time_bottom = vidc_720p_read(0x0EDC); + + if (disp_info->crop_exists) { + tmp = vidc_720p_read(0x0C00); + disp_info->crop_right_offset = (tmp & 0xffff0000) >> 0x10; + disp_info->crop_left_offset = tmp & 0xffff; + tmp = vidc_720p_read(0x0C04); + disp_info->crop_bottom_offset = (tmp & 0xffff0000) >> 0x10; + disp_info->crop_top_offset = tmp & 0xffff; + } + disp_info->metadata_exists = vidc_720p_read(0x0EB8); + + disp_info->input_bytes_consumed = vidc_720p_read(0x0C08); + + disp_info->input_frame_num = vidc_720p_read(0x0410); + + disp_info->input_frame_type = vidc_720p_read(0x0EBC) & 0x7; + + disp_info->input_is_interlace = (disp_info->input_frame_type & 0x4) >> + 2; + + disp_info->input_frame_type &= 0x3; +} + +void vidc_720p_decode_skip_frm_details(phys_addr_t *free_luma_dpb) +{ + u32 disp_frm_type; + + disp_frm_type = vidc_720p_read(0x0EB4); + + if (disp_frm_type == VIDC_720P_NOTCODED) + *free_luma_dpb = vidc_720p_read(0x0C18); +} + +void vidc_720p_metadata_enable(u32 flag, phys_addr_t input_buffer) +{ + vidc_720p_write(0x0EC4, flag); + vidc_720p_write(0x0ED0, input_buffer); +} + +void vidc_720p_decode_dynamic_req_reset(void) +{ + vidc_720p_write(0x0EE8, 0x0); + vidc_720p_write(0x0EAC, 0x0); + vidc_720p_write(0x0028, 0x0); +} + +void vidc_720p_decode_dynamic_req_set(u32 property) +{ + if (property == VIDC_720P_FLUSH_REQ) + vidc_720p_write(0x0EE8, 0x1); + else if (property == VIDC_720P_EXTRADATA) + vidc_720p_write(0x0EAC, 0x1); +} + +void vidc_720p_decode_setpassthrough_start(phys_addr_t pass_startaddr) +{ + vidc_720p_write(0x0D18, pass_startaddr); +} diff --git a/drivers/misc/video_core/720p/ddl/video_core_720p.h b/drivers/misc/video_core/720p/ddl/video_core_720p.h new file mode 100644 index 0000000000000..14853f9502230 --- /dev/null +++ b/drivers/misc/video_core/720p/ddl/video_core_720p.h @@ -0,0 +1,395 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef VID_C_720P_H +#define VID_C_720P_H + +#include +#include +#include + +/** List all the levels and their register values */ + +#define VIDC_720P_PROFILE_MPEG4_SP 0 +#define VIDC_720P_PROFILE_MPEG4_ASP 1 +#define VIDC_720P_PROFILE_H264_BASELINE 0 +#define VIDC_720P_PROFILE_H264_MAIN 1 +#define VIDC_720P_PROFILE_H264_HIGH 2 +#define VIDC_720P_PROFILE_H263_BASELINE 0 + +#define VIDC_720P_PROFILE_VC1_SP 0 +#define VIDC_720P_PROFILE_VC1_MAIN 1 +#define VIDC_720P_PROFILE_VC1_ADV 2 +#define VIDC_720P_PROFILE_MPEG2_MAIN 4 +#define VIDC_720P_PROFILE_MPEG2_SP 5 + +#define VIDC_720P_MPEG4_LEVEL0 0 +#define VIDC_720P_MPEG4_LEVEL0b 9 +#define VIDC_720P_MPEG4_LEVEL1 1 +#define VIDC_720P_MPEG4_LEVEL2 2 +#define VIDC_720P_MPEG4_LEVEL3 3 +#define VIDC_720P_MPEG4_LEVEL3b 7 +#define VIDC_720P_MPEG4_LEVEL4a 4 +#define VIDC_720P_MPEG4_LEVEL5 5 +#define VIDC_720P_MPEG4_LEVEL6 6 + +#define VIDC_720P_H264_LEVEL1 10 +#define VIDC_720P_H264_LEVEL1b 9 +#define VIDC_720P_H264_LEVEL1p1 11 +#define VIDC_720P_H264_LEVEL1p2 12 +#define VIDC_720P_H264_LEVEL1p3 13 +#define VIDC_720P_H264_LEVEL2 20 +#define VIDC_720P_H264_LEVEL2p1 21 +#define VIDC_720P_H264_LEVEL2p2 22 +#define VIDC_720P_H264_LEVEL3 30 +#define VIDC_720P_H264_LEVEL3p1 31 +#define VIDC_720P_H264_LEVEL3p2 32 + +#define VIDC_720P_H263_LEVEL10 10 +#define VIDC_720P_H263_LEVEL20 20 +#define VIDC_720P_H263_LEVEL30 30 +#define VIDC_720P_H263_LEVEL40 40 +#define VIDC_720P_H263_LEVEL45 45 +#define VIDC_720P_H263_LEVEL50 50 +#define VIDC_720P_H263_LEVEL60 60 +#define VIDC_720P_H263_LEVEL70 70 + +#define VIDC_720P_VC1_LEVEL_LOW 0 +#define VIDC_720P_VC1_LEVEL_MED 2 +#define VIDC_720P_VC1_LEVEL_HIGH 4 +#define VIDC_720P_VC1_LEVEL0 0 +#define VIDC_720P_VC1_LEVEL1 1 +#define VIDC_720P_VC1_LEVEL2 2 +#define VIDC_720P_VC1_LEVEL3 3 +#define VIDC_720P_VC1_LEVEL4 4 + +#define VIDCL_720P_MPEG2_LEVEL_LOW 10 +#define VIDCL_720P_MPEG2_LEVEL_MAIN 8 +#define VIDCL_720P_MPEG2_LEVEL_HIGH14 6 + +#define VIDC_720P_CMD_CHSET 0x0 +#define VIDC_720P_CMD_CHEND 0x2 +#define VIDC_720P_CMD_INITCODEC 0x3 +#define VIDC_720P_CMD_FRAMERUN 0x4 +#define VIDC_720P_CMD_INITBUFFERS 0x5 +#define VIDC_720P_CMD_FRAMERUN_REALLOCATE 0x6 +#define VIDC_720P_CMD_MFC_ENGINE_RESET 0x7 + +enum vidc_720p_endian_type { + VIDC_720P_BIG_ENDIAN = 0x0, + VIDC_720P_LITTLE_ENDIAN = 0x1 +}; + +enum vidc_720p_memory_access_method_type { + VIDC_720P_TILE_LINEAR = 0, + VIDC_720P_TILE_16x16 = 2, + VIDC_720P_TILE_64x32 = 3 +}; + +enum vidc_720p_interrupt_control_mode_type { + VIDC_720P_INTERRUPT_MODE = 0, + VIDC_720P_POLL_MODE = 1 +}; + +enum vidc_720p_interrupt_level_selection_type { + VIDC_720P_INTERRUPT_LEVEL_SEL = 0, + VIDC_720P_INTERRUPT_PULSE_SEL = 1 +}; + +#define VIDC_720P_INTR_BUFFER_FULL 0x002 +#define VIDC_720P_INTR_FW_DONE 0x020 +#define VIDC_720P_INTR_HEADER_DONE 0x040 +#define VIDC_720P_INTR_DMA_DONE 0x080 +#define VIDC_720P_INTR_FRAME_DONE 0x100 + +enum vidc_720p_enc_dec_selection_type { + VIDC_720P_DECODER = 0, + VIDC_720P_ENCODER = 1 +}; + +enum vidc_720p_codec_type { + VIDC_720P_MPEG4 = 0, + VIDC_720P_H264 = 1, + VIDC_720P_DIVX = 2, + VIDC_720P_XVID = 3, + VIDC_720P_H263 = 4, + VIDC_720P_MPEG2 = 5, + VIDC_720P_VC1 = 6 +}; + +enum vidc_720p_frame_type { + VIDC_720P_NOTCODED = 0, + VIDC_720P_IFRAME = 1, + VIDC_720P_PFRAME = 2, + VIDC_720P_BFRAME = 3 +}; + +enum vidc_720p_entropy_sel_type { + VIDC_720P_ENTROPY_SEL_CAVLC = 0, + VIDC_720P_ENTROPY_SEL_CABAC = 1 +}; + +enum vidc_720p_cabac_model_type { + VIDC_720P_CABAC_MODEL_NUMBER_0 = 0, + VIDC_720P_CABAC_MODEL_NUMBER_1 = 1, + VIDC_720P_CABAC_MODEL_NUMBER_2 = 2 +}; + +enum vidc_720p_DBConfig_type { + VIDC_720P_DB_ALL_BLOCKING_BOUNDARY = 0, + VIDC_720P_DB_DISABLE = 1, + VIDC_720P_DB_SKIP_SLICE_BOUNDARY = 2 +}; + +enum vidc_720p_MSlice_selection_type { + VIDC_720P_MSLICE_BY_MB_COUNT = 0, + VIDC_720P_MSLICE_BY_BYTE_COUNT = 1, + VIDC_720P_MSLICE_BY_GOB = 2, + VIDC_720P_MSLICE_OFF = 3 +}; + +enum vidc_720p_display_status_type { + VIDC_720P_DECODE_ONLY = 0, + VIDC_720P_DECODE_AND_DISPLAY = 1, + VIDC_720P_DISPLAY_ONLY = 2, + VIDC_720P_EMPTY_BUFFER = 3 +}; + +#define VIDC_720P_ENC_IFRAME_REQ 0x1 +#define VIDC_720P_ENC_IPERIOD_CHANGE 0x1 +#define VIDC_720P_ENC_FRAMERATE_CHANGE 0x2 +#define VIDC_720P_ENC_BITRATE_CHANGE 0x4 + +#define VIDC_720P_FLUSH_REQ 0x1 +#define VIDC_720P_EXTRADATA 0x2 + +#define VIDC_720P_METADATA_ENABLE_QP 0x01 +#define VIDC_720P_METADATA_ENABLE_CONCEALMB 0x02 +#define VIDC_720P_METADATA_ENABLE_VC1 0x04 +#define VIDC_720P_METADATA_ENABLE_SEI 0x08 +#define VIDC_720P_METADATA_ENABLE_VUI 0x10 +#define VIDC_720P_METADATA_ENABLE_ENCSLICE 0x20 +#define VIDC_720P_METADATA_ENABLE_PASSTHROUGH 0x40 + +struct vidc_720p_dec_disp_info { + enum vidc_720p_display_status_type disp_status; + u32 resl_change; + u32 reconfig_flush_done; + u32 img_size_x; + u32 img_size_y; + phys_addr_t y_addr; + phys_addr_t c_addr; + u32 tag_top; + u32 pic_time_top; + u32 disp_is_interlace; + u32 tag_bottom; + u32 pic_time_bottom; + u32 metadata_exists; + u32 crop_exists; + u32 crop_right_offset; + u32 crop_left_offset; + u32 crop_bottom_offset; + u32 crop_top_offset; + u32 input_frame_type; + u32 input_bytes_consumed; + u32 input_is_interlace; + u32 input_frame_num; +}; + +struct vidc_720p_seq_hdr_info_type { + u32 img_size_x; + u32 img_size_y; + u32 dec_frm_size; + u32 min_num_dpb; + u32 min_dpb_size; + u32 profile; + u32 level; + u32 progressive; + u32 data_partitioned; + u32 crop_exists; + u32 crop_right_offset; + u32 crop_left_offset; + u32 crop_bottom_offset; + u32 crop_top_offset; +}; + +struct vidc_720p_enc_frame_info { + u32 enc_size; + u32 frame_type; + u32 metadata_exists; +}; + +void vidc_720p_set_device_virtual_base(void *virt_addr); + +void vidc_720p_init(char **ppsz_version, size_t sz, phys_addr_t phys_addr, + enum vidc_720p_endian_type dma_endian, u32 interrupt_off, + enum vidc_720p_interrupt_level_selection_type interrupt_sel, + u32 interrupt_mask); + +u32 vidc_720p_do_sw_reset(void); + +u32 vidc_720p_reset_is_success(void); + +void vidc_720p_start_cpu(enum vidc_720p_endian_type dma_endian, + phys_addr_t icontext_bufferstart, phys_addr_t debug_core_dump_addr, + size_t debug_buffer_size); + +u32 vidc_720p_cpu_start(void); + +void vidc_720p_stop_fw(void); + +void vidc_720p_get_interrupt_status(u32 *interrupt_status, u32 *cmd_err_status, + u32 *disp_pic_err_status, u32 *op_failed); + +void vidc_720p_interrupt_done_clear(void); + +void vidc_720p_submit_command(u32 ch_id, u32 cmd_id); + + +void vidc_720p_set_channel(u32 ch_id, + enum vidc_720p_enc_dec_selection_type enc_dec_sel, + enum vidc_720p_codec_type codec, dma_addr_t pi_fw, + size_t firmware_size); + +u32 vidc_720p_engine_reset(u32 ch_id, + enum vidc_720p_endian_type dma_endian, + enum vidc_720p_interrupt_level_selection_type interrupt_sel, + u32 interrupt_mask +); + +void vidc_720p_encode_set_profile(u32 profile, u32 level); + +void vidc_720p_set_frame_size(u32 size_x, u32 size_y); + +void vidc_720p_encode_set_fps(u32 rc_frame_rate); + +void vidc_720p_encode_set_vop_time(u32 vop_time_resolution, + u32 vop_time_increment); + +void vidc_720p_encode_set_hec_period(u32 hec_period); + +void vidc_720p_encode_set_short_header(u32 short_header); + +void vidc_720p_encode_set_qp_params(u32 max_qp, u32 min_qp); + +void vidc_720p_encode_set_rc_config(u32 enable_frame_level_rc, + u32 enable_mb_level_rc_flag, u32 iframe_qp, u32 pframe_qp); + +void vidc_720p_encode_set_bit_rate(u32 target_bitrate); + +void vidc_720p_encoder_set_param_change(u32 enc_param_change); + +void vidc_720p_encode_set_control_param(u32 param_val); + +void vidc_720p_encode_set_frame_level_rc_params(u32 reaction_coeff); + +void vidc_720p_encode_set_mb_level_rc_params(u32 dark_region_as_flag, + u32 smooth_region_as_flag, u32 static_region_as_flag, + u32 activity_region_flag); + +void vidc_720p_encode_set_entropy_control(enum vidc_720p_entropy_sel_type + entropy_sel, enum vidc_720p_cabac_model_type cabac_model_number); + +void vidc_720p_encode_set_db_filter_control(enum vidc_720p_DBConfig_type + db_config, u32 slice_alpha_offset, u32 slice_beta_offset); + +void vidc_720p_encode_set_intra_refresh_mb_number(u32 cir_mb_number); + +void vidc_720p_encode_set_multi_slice_info(enum vidc_720p_MSlice_selection_type + m_slice_sel, u32 multi_slice_size); + +void vidc_720p_encode_set_dpb_buffer(phys_addr_t pi_enc_dpb_addr, + size_t alloc_len); + +void vidc_720p_set_deblock_line_buffer(phys_addr_t pi_deblock_line_buffer_start, + size_t alloc_len); + +void vidc_720p_encode_set_i_period(u32 period); + +void vidc_720p_encode_init_codec(u32 ch_id, + enum vidc_720p_memory_access_method_type memory_access_model); + +void vidc_720p_encode_unalign_bitstream(u32 upper_unalign_word, + u32 lower_unalign_word); + +void vidc_720p_encode_set_seq_header_buffer(phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, u32 start_byte_num); + +void vidc_720p_encode_frame(u32 ch_id, phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, u32 start_byte_number, phys_addr_t y_addr, + phys_addr_t c_addr); + +void vidc_720p_encode_get_header(u32 *pi_enc_header_size); + +void vidc_720p_enc_frame_info(struct vidc_720p_enc_frame_info *enc_frame_info); + +void vidc_720p_decode_bitstream_header(u32 ch_id, u32 dec_unit_size, + u32 start_byte_num, u32 ext_buffer_start, u32 ext_buffer_end, + enum vidc_720p_memory_access_method_type memory_access_model); + +void vidc_720p_decode_get_seq_hdr_info(struct vidc_720p_seq_hdr_info_type + *seq_hdr_info); + +void vidc_720p_decode_set_dpb_release_buffer_mask(u32 dpb_release_buffer_mask); + +void vidc_720p_decode_set_dpb_buffers(u32 buf_index, phys_addr_t pi_dpb_buffer); + +void vidc_720p_decode_set_comv_buffer(dma_addr_t pi_dpb_comv_buffer, + size_t alloc_len); + +void vidc_720p_decode_set_dpb_details(u32 num_dpb, size_t alloc_len, + phys_addr_t ref_buffer); + +void vidc_720p_decode_set_mpeg4Post_filter(u32 enable_post_filter); + +void vidc_720p_decode_set_error_control(u32 enable_error_control); + +void vidc_720p_decode_set_mpeg4_data_partitionbuffer(dma_addr_t vsp_buf_start); + +void vidc_720p_decode_setH264VSPBuffer(dma_addr_t pi_vsp_temp_buffer_start); + +void vidc_720p_decode_frame(u32 ch_id, phys_addr_t ext_buffer_start, + phys_addr_t ext_buffer_end, size_t dec_unit_size, u32 start_byte_num, + u32 input_frame_tag); + +void vidc_720p_issue_eos(u32 ch_id); +void vidc_720p_eos_info(u32 *disp_status); + +void vidc_720p_decode_display_info(struct vidc_720p_dec_disp_info *disp_info); + +void vidc_720p_decode_skip_frm_details(phys_addr_t *free_luma_dpb); + +void vidc_720p_metadata_enable(u32 flag, phys_addr_t input_buffer); + +void vidc_720p_decode_dynamic_req_reset(void); + +void vidc_720p_decode_dynamic_req_set(u32 property); + +void vidc_720p_decode_setpassthrough_start(phys_addr_t pass_startaddr); + +#endif diff --git a/drivers/misc/video_core/720p/dec/vdec.c b/drivers/misc/video_core/720p/dec/vdec.c new file mode 100644 index 0000000000000..36ca5563b7407 --- /dev/null +++ b/drivers/misc/video_core/720p/dec/vdec.c @@ -0,0 +1,1525 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vcd_ddl_firmware.h" +#include "video_core_type.h" +#include "vcd_api.h" +#include "vdec_internal.h" +#include "video_core_init.h" + + +#define VID_C_HCLK_RATE 170667000 + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define INFO(x...) printk(KERN_INFO x) +#define ERR(x...) printk(KERN_ERR x) + +#define VID_DEC_NAME "msm_vidc_dec" + +static struct vid_dec_dev *vidc_dec_dev; +static dev_t vidc_dec_dev_num; +static struct class *vidc_dec_class; + +static s32 vid_dec_get_empty_client_index(void) +{ + u32 i, found = false; + + for (i = 0; i < VID_DEC_MAX_DECODER_CLIENTS; i++) { + if (!vidc_dec_dev->vdec_clients[i].vcd_handle) { + found = true; + break; + } + } + if (!found) { + ERR("%s():ERROR No space for new client\n", __func__); + return -1; + } + DBG("%s(): available client index = %u\n", __func__, i); + return i; +} + +u32 vid_dec_get_status(u32 status) +{ + u32 vdec_status; + + switch (status) { + case VCD_ERR_BITSTREAM_ERR: + case VCD_S_SUCCESS: + vdec_status = VDEC_S_SUCCESS; + break; + case VCD_ERR_FAIL: + vdec_status = VDEC_S_EFAIL; + break; + case VCD_ERR_ALLOC_FAIL: + vdec_status = VDEC_S_ENOSWRES; + break; + case VCD_ERR_ILLEGAL_OP: + vdec_status = VDEC_S_EINVALCMD; + break; + case VCD_ERR_ILLEGAL_PARM: + vdec_status = VDEC_S_EBADPARAM; + break; + case VCD_ERR_BAD_POINTER: + case VCD_ERR_BAD_HANDLE: + vdec_status = VDEC_S_EFATAL; + break; + case VCD_ERR_NOT_SUPPORTED: + vdec_status = VDEC_S_ENOTSUPP; + break; + case VCD_ERR_BAD_STATE: + vdec_status = VDEC_S_EINVALSTATE; + break; + case VCD_ERR_BUSY: + vdec_status = VDEC_S_BUSY; + break; + case VCD_ERR_MAX_CLIENT: + vdec_status = VDEC_S_ENOHWRES; + break; + default: + vdec_status = VDEC_S_EFAIL; + break; + } + + return vdec_status; +} + +static void vid_dec_notify_client(struct video_client_ctx *client_ctx) +{ + if (client_ctx) + complete(&client_ctx->event); +} + +void vid_dec_vcd_open_done(struct video_client_ctx *client_ctx, + struct vcd_handle_container *handle_container) +{ + DBG("vid_dec_vcd_open_done\n"); + + if (!client_ctx) { + ERR("%s(): ERROR. client_ctx is NULL\n", __func__); + return; + } + + if (handle_container) + client_ctx->vcd_handle = handle_container->handle; + else + ERR("%s(): ERROR. handle_container is NULL\n", __func__); + + vid_dec_notify_client(client_ctx); +} + +static void vid_dec_input_frame_done(struct video_client_ctx *client_ctx, + u32 event, u32 status, struct vcd_frame_data *vcd_frame_data) +{ + struct vid_dec_msg *vdec_msg; + + if (!client_ctx || !vcd_frame_data) { + ERR("vid_dec_input_frame_done() NULL pointer\n"); + return; + } + + vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL); + if (!vdec_msg) { + ERR("%s: cannot allocate vid_dec_msg buffer\n", __func__); + return; + } + + vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status); + + if (event == VCD_EVT_RESP_INPUT_DONE) { + vdec_msg->vdec_msg_info.msgcode = + VDEC_MSG_RESP_INPUT_BUFFER_DONE; + DBG("Send INPUT_DON message to client = %p\n", client_ctx); + + } else if (event == VCD_EVT_RESP_INPUT_FLUSHED) { + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_INPUT_FLUSHED; + DBG("Send INPUT_FLUSHED message to client = %p\n", client_ctx); + } else { + ERR("%s: invalid event type\n", __func__); + return; + } + + vdec_msg->vdec_msg_info.msgdata.input_frame_clientdata = + vcd_frame_data->client_data; + vdec_msg->vdec_msg_info.msgdatasize = sizeof(void *); + + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&vdec_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + +static void vid_dec_output_frame_done(struct video_client_ctx *client_ctx, + u32 event, u32 status, struct vcd_frame_data *vcd_frame_data) +{ + struct vid_dec_msg *vdec_msg; + + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + struct vdec_output_frameinfo *frm; + + if (!client_ctx || !vcd_frame_data) { + ERR("%s: NULL pointer\n", __func__); + return; + } + + vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL); + if (!vdec_msg) { + ERR("%s: cannot allocate vid_dec_msg buffer\n", __func__); + return; + } + + vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status); + + if (event == VCD_EVT_RESP_OUTPUT_DONE) { + vdec_msg->vdec_msg_info.msgcode = + VDEC_MSG_RESP_OUTPUT_BUFFER_DONE; + } else if (event == VCD_EVT_RESP_OUTPUT_FLUSHED) { + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_OUTPUT_FLUSHED; + } else { + ERR("QVD: vid_dec_output_frame_done invalid cmd type\n"); + return; + } + + kern_addr = vcd_frame_data->virt_addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT, false, + &user_addr, &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + ERR("vid_dec_output_frame_done UVA can not be found\n"); + vdec_msg->vdec_msg_info.status_code = VDEC_S_EFATAL; + goto out; + } + + frm = &vdec_msg->vdec_msg_info.msgdata.output_frame; + /* Buffer address in user space */ + frm->user_addr = user_addr; + frm->phys_addr = vcd_frame_data->phys_addr; + /* Data length */ + frm->len = vcd_frame_data->data_len; + frm->flags = vcd_frame_data->flags; + /* timestamp pass-through from input frame */ + frm->time_stamp = vcd_frame_data->time_stamp; + /* Output frame client data */ + frm->client_data = vcd_frame_data->client_data; + /* Associated input frame client data */ + frm->input_frame_clientdata = (void *)vcd_frame_data->ip_frm_tag; + /* Decoded picture width and height */ + frm->framesize.bottom = vcd_frame_data->dec_op_prop.disp_frm.bottom; + frm->framesize.left = vcd_frame_data->dec_op_prop.disp_frm.left; + frm->framesize.right = vcd_frame_data->dec_op_prop.disp_frm.right; + frm->framesize.top = vcd_frame_data->dec_op_prop.disp_frm.top; + vdec_msg->vdec_msg_info.msgdatasize = sizeof(*frm); + +out: + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&vdec_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + +static void vid_dec_lean_event(struct video_client_ctx *client_ctx, + u32 event, u32 status) +{ + struct vid_dec_msg *vdec_msg; + + if (!client_ctx) { + ERR("%s(): !client_ctx pointer\n", __func__); + return; + } + + vdec_msg = kzalloc(sizeof(struct vid_dec_msg), GFP_KERNEL); + if (!vdec_msg) { + ERR("%s(): cannot allocate vid_dec_msg buffer\n", __func__); + return; + } + + vdec_msg->vdec_msg_info.status_code = vid_dec_get_status(status); + + switch (event) { + case VCD_EVT_IND_RECONFIG: + INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_CONFIG_CHANGED" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_CONFIG_CHANGED; + break; + case VCD_EVT_IND_RESOURCES_LOST: + INFO("msm_vidc_dec: Sending VDEC_EVT_RESOURCES_LOST" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_EVT_RESOURCES_LOST; + break; + case VCD_EVT_RESP_FLUSH_INPUT_DONE: + INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_INPUT_DONE" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = + VDEC_MSG_RESP_FLUSH_INPUT_DONE; + break; + case VCD_EVT_RESP_FLUSH_OUTPUT_DONE: + INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_FLUSH_OUTPUT_DONE" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = + VDEC_MSG_RESP_FLUSH_OUTPUT_DONE; + break; + case VCD_EVT_IND_HWERRFATAL: + INFO("msm_vidc_dec: Sending VDEC_MSG_EVT_HW_ERROR to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_EVT_HW_ERROR; + break; + case VCD_EVT_RESP_START: + INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_START_DONE" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_START_DONE; + break; + case VCD_EVT_RESP_STOP: + INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_STOP_DONE" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_STOP_DONE; + break; + case VCD_EVT_RESP_PAUSE: + INFO("msm_vidc_dec: Sending VDEC_MSG_RESP_PAUSE_DONE" + " to client\n"); + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_PAUSE_DONE; + break; + default: + ERR("%s() : unknown event type\n", __func__); + break; + } + + vdec_msg->vdec_msg_info.msgdatasize = 0; + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&vdec_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + + +void vid_dec_vcd_cb(u32 event, u32 status, void *info, u32 size, void *handle, + void *const client_data) +{ + struct video_client_ctx *client_ctx = (struct video_client_ctx *) + client_data; + + DBG("Entering %s()\n", __func__); + + if (!client_ctx) { + ERR("%s(): client_ctx is NULL\n", __func__); + return; + } + + client_ctx->event_status = status; + + switch (event) { + case VCD_EVT_RESP_OPEN: + vid_dec_vcd_open_done(client_ctx, info); + break; + case VCD_EVT_RESP_INPUT_DONE: + case VCD_EVT_RESP_INPUT_FLUSHED: + vid_dec_input_frame_done(client_ctx, event, status, info); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + case VCD_EVT_RESP_OUTPUT_FLUSHED: + vid_dec_output_frame_done(client_ctx, event, status, info); + break; + case VCD_EVT_RESP_PAUSE: + case VCD_EVT_RESP_STOP: + case VCD_EVT_RESP_FLUSH_INPUT_DONE: + case VCD_EVT_RESP_FLUSH_OUTPUT_DONE: + case VCD_EVT_IND_RECONFIG: + case VCD_EVT_IND_HWERRFATAL: + case VCD_EVT_IND_RESOURCES_LOST: + vid_dec_lean_event(client_ctx, event, status); + break; + case VCD_EVT_RESP_START: + if (!client_ctx->seq_header_set) + vid_dec_lean_event(client_ctx, event, status); + else + vid_dec_notify_client(client_ctx); + break; + default: + ERR("%s(): Error - Invalid event type %u\n", __func__, event); + break; + } +} + +static u32 vid_dec_set_codec(struct video_client_ctx *client_ctx, + enum vdec_codec *vdec_codec_type) +{ + u32 result = true; + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_codec codec_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !vdec_codec_type) + return false; + + vcd_property_hdr.id = VCD_I_CODEC; + vcd_property_hdr.sz = sizeof(struct vcd_property_codec); + + switch (*vdec_codec_type) { + case VDEC_CODECTYPE_MPEG4: + codec_type.codec = VCD_CODEC_MPEG4; + break; + case VDEC_CODECTYPE_H264: + codec_type.codec = VCD_CODEC_H264; + break; + case VDEC_CODECTYPE_DIVX_3: + codec_type.codec = VCD_CODEC_DIVX_3; + break; + case VDEC_CODECTYPE_XVID: + codec_type.codec = VCD_CODEC_XVID; + break; + case VDEC_CODECTYPE_H263: + codec_type.codec = VCD_CODEC_H263; + break; + case VDEC_CODECTYPE_MPEG2: + codec_type.codec = VCD_CODEC_MPEG2; + break; + case VDEC_CODECTYPE_VC1: + codec_type.codec = VCD_CODEC_VC1; + break; + default: + result = false; + break; + } + + if (result) { + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &codec_type); + if (vcd_status) + result = false; + } + return result; +} + +static u32 vid_dec_set_output_format(struct video_client_ctx *client_ctx, + enum vdec_output_format *output_format) +{ + u32 result = true; + struct vcd_property_hdr prop_hdr; + struct vcd_property_buffer_format buffer_format; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !output_format) + return false; + + prop_hdr.id = VCD_I_BUFFER_FORMAT;; + prop_hdr.sz = sizeof(struct vcd_property_buffer_format); + + switch (*output_format) { + case VDEC_YUV_FORMAT_NV12: + buffer_format.buffer_format = VCD_BUFFER_FORMAT_NV12; + break; + case VDEC_YUV_FORMAT_TILE_4x2: + buffer_format.buffer_format = VCD_BUFFER_FORMAT_TILE_4x2; + break; + default: + result = false; + break; + } + + if (!result) + return false; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, &prop_hdr, + &buffer_format); + + //TODO fix false/true silliness + if (vcd_status) + return false; + else + return true; +} + +static u32 vid_dec_set_frame_resolution(struct video_client_ctx *client_ctx, + struct vdec_picsize *video_resoultion) +{ + struct vcd_property_hdr prop_hdr; + struct vcd_property_frame_size res; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !video_resoultion) + return false; + + prop_hdr.id = VCD_I_FRAME_SIZE; + prop_hdr.sz = sizeof(struct vcd_property_frame_size); + res.width = video_resoultion->frame_width; + res.height = video_resoultion->frame_height; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, &prop_hdr, &res); + + if (vcd_status) + return false; + else + return true; +} + +static u32 vid_dec_get_frame_resolution(struct video_client_ctx *client_ctx, + struct vdec_picsize *video_res) +{ + struct vcd_property_hdr prop_hdr; + struct vcd_property_frame_size frame_res; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !video_res) + return false; + + prop_hdr.id = VCD_I_FRAME_SIZE; + prop_hdr.sz = sizeof(struct vcd_property_frame_size); + + vcd_status = vcd_get_property(client_ctx->vcd_handle, &prop_hdr, + &frame_res); + + video_res->frame_width = frame_res.width; + video_res->frame_height = frame_res.height; + video_res->scan_lines = frame_res.scan_lines; + video_res->stride = frame_res.stride; + + if (vcd_status) + return false; + else + return true; +} + +static u32 vid_dec_get_buffer_req(struct video_client_ctx *client_ctx, + struct vdec_allocatorproperty *vdec_buf_req) +{ + u32 vcd_status = VCD_ERR_FAIL; + struct vcd_buffer_requirement vcd_buf_req; + + if (!client_ctx || !vdec_buf_req) + return false; + + if (vdec_buf_req->buffer_type == VDEC_BUFFER_TYPE_INPUT) { + vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle, + VCD_BUFFER_INPUT, &vcd_buf_req); + } else { + vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle, + VCD_BUFFER_OUTPUT, &vcd_buf_req); + } + + if (vcd_status) + return false; + + vdec_buf_req->mincount = vcd_buf_req.min_count; + vdec_buf_req->maxcount = vcd_buf_req.max_count; + vdec_buf_req->actualcount = vcd_buf_req.actual_count; + vdec_buf_req->buffer_size = vcd_buf_req.size; + vdec_buf_req->alignment = vcd_buf_req.align; + vdec_buf_req->buf_poolid = vcd_buf_req.buf_pool_id; + + return true; +} + +static u32 vid_dec_set_buffer(struct video_client_ctx *client_ctx, + struct vdec_setbuffer_cmd *b_info) +{ + enum vcd_buffer_type buffer_type; + enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT; + u32 vcd_status = VCD_ERR_FAIL; + + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + unsigned long len; + int pmem_fd; + struct file *file; + struct buf_addr_table *addr_table; + s32 buffer_index = -1; + + if (!client_ctx || !b_info) + return false; + + user_addr = b_info->buffer.addr; + + if (b_info->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) + dir_buffer = BUFFER_TYPE_OUTPUT; + + /* if buffer already set, ignore */ + if (vid_c_lookup_addr_table(client_ctx, dir_buffer, true, &user_addr, + &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + DBG("%s: user_addr = %p is already set\n", __func__, user_addr); + return true; + } + + if (get_pmem_file(b_info->buffer.pmem_fd, (unsigned long *)&phys_addr, + (unsigned long *)&kern_addr, &len, &file)) { + ERR("%s: get_pmem_file failed\n", __func__); + return false; + } + put_pmem_file(file); + if (b_info->buffer_type == VDEC_BUFFER_TYPE_INPUT) { + buffer_type = VCD_BUFFER_INPUT; + client_ctx->num_of_input_buffers++; + if (client_ctx->num_of_input_buffers > MAX_VIDEO_NUM_OF_BUFF) { + ERR("%s(): num_of_input_buffers reached max value" + " MAX_VIDEO_NUM_OF_BUFF\n", __func__); + client_ctx->num_of_input_buffers--; + return false; + } + buffer_index = client_ctx->num_of_input_buffers - 1; + addr_table = &client_ctx->input_buf_addr_table[buffer_index]; + addr_table->user_addr = b_info->buffer.addr; + addr_table->kern_addr = kern_addr; + addr_table->phys_addr = phys_addr; + addr_table->pmem_fd = b_info->buffer.pmem_fd; + addr_table->file = file; + } else { + buffer_type = VCD_BUFFER_OUTPUT; + client_ctx->num_of_output_buffers++; + if (client_ctx->num_of_output_buffers > MAX_VIDEO_NUM_OF_BUFF) { + ERR("%s(): num_of_outut_buffers reached max value" + " MAX_VIDEO_NUM_OF_BUFF\n", __func__); + client_ctx->num_of_output_buffers--; + return false; + } + buffer_index = client_ctx->num_of_output_buffers - 1; + addr_table = &client_ctx->output_buf_addr_table[buffer_index]; + kern_addr = (u8 *)kern_addr + b_info->buffer.offset; + phys_addr += b_info->buffer.offset; + addr_table->user_addr = b_info->buffer.addr; + addr_table->kern_addr = kern_addr; + addr_table->phys_addr = phys_addr; + addr_table->pmem_fd = b_info->buffer.pmem_fd; + addr_table->file = file; + } + + vcd_status = vcd_set_buffer(client_ctx->vcd_handle, buffer_type, + kern_addr, b_info->buffer.sz); + + if (!vcd_status) + return true; + else + return false; +} + +static u32 vid_dec_free_buffer(struct video_client_ctx *client_ctx, + struct vdec_setbuffer_cmd *buffer_info) +{ + enum vcd_buffer_type buffer_type; + enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT; + u32 vcd_status = VCD_ERR_FAIL; + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + + if (!client_ctx || !buffer_info) + return false; + + user_addr = buffer_info->buffer.addr; + + if (buffer_info->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) + dir_buffer = BUFFER_TYPE_OUTPUT; + + /*If buffer already set, ignore */ + if (!vid_c_lookup_addr_table(client_ctx, dir_buffer, true, &user_addr, + &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + DBG("%s: user_addr = %p is already set\n", __func__, user_addr); + return true; + } + + if (buffer_info->buffer_type == VDEC_BUFFER_TYPE_INPUT) + buffer_type = VCD_BUFFER_INPUT; + else + buffer_type = VCD_BUFFER_OUTPUT; + vcd_status = vcd_free_buffer(client_ctx->vcd_handle, buffer_type, + kern_addr); + + if (!vcd_status) + return true; + else + return false; +} + +static u32 vid_dec_pause_resume(struct video_client_ctx *client_ctx, u32 pause) +{ + u32 vcd_status; + + if (!client_ctx) { + ERR("%s: Invalid client_ctx\n", __func__); + return false; + } + + if (pause) { + INFO("msm_vidc_dec: PAUSE command from client = %p\n", + client_ctx); + vcd_status = vcd_pause(client_ctx->vcd_handle); + } else { + INFO("msm_vidc_dec: RESUME command from client = %p\n", + client_ctx); + vcd_status = vcd_resume(client_ctx->vcd_handle); + } + + if (vcd_status) + return false; + + return true; + +} +static u32 vid_dec_start(struct video_client_ctx *client_ctx) +{ + struct vid_dec_msg *vdec_msg = NULL; + u32 vcd_status; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + if (!client_ctx) { + ERR("\n Invalid client_ctx"); + return false; + } + + if (!client_ctx->seq_header_set) { + INFO("%s: Calling decode_start()\n", __func__); + vcd_status = vcd_decode_start(client_ctx->vcd_handle, NULL); + + if (vcd_status) { + ERR("%s: vcd_decode_start failed vcd_status = %u\n", + __func__, vcd_status); + return false; + } + return true; + } + + INFO("%s(): Seq Hdr set: Send START_DONE to client\n", __func__); + vdec_msg = kzalloc(sizeof(*vdec_msg), GFP_KERNEL); + if (!vdec_msg) { + ERR("%s: cannot allocate buffer\n", __func__); + return false; + } + vdec_msg->vdec_msg_info.msgcode = VDEC_MSG_RESP_START_DONE; + vdec_msg->vdec_msg_info.status_code = VDEC_S_SUCCESS; + vdec_msg->vdec_msg_info.msgdatasize = 0; + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&vdec_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + + wake_up(&client_ctx->msg_wait); + + DBG("Send START_DONE message to client = %p\n", client_ctx); + + return true; +} + +static u32 vid_dec_stop(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + if (!client_ctx) { + ERR("Invalid client_ctx\n"); + return false; + } + + INFO("%s: Calling vcd_stop()\n", __func__); + vcd_status = vcd_stop(client_ctx->vcd_handle); + if (vcd_status) { + ERR("%s: vcd_stop failed %u\n", __func__, vcd_status); + return false; + } + DBG("Send STOP_DONE message to client = %p\n", client_ctx); + return true; +} + +static u32 vid_dec_decode_frame(struct video_client_ctx *client_ctx, + struct vdec_input_frameinfo *frm_info) +{ + struct vcd_frame_data vcd_input_buffer; + void *kern_addr; + void __user *user_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !frm_info) + return false; + + user_addr = frm_info->user_addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT, true, + &user_addr, &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + ERR("%s: kern_addr not found\n", __func__); + return false; + } + + /* kernel_vaddr is found. send the frame to VCD */ + memset((void *)&vcd_input_buffer, 0, sizeof(vcd_input_buffer)); + vcd_input_buffer.virt_addr = (u8 *)kern_addr + frm_info->pmem_offset; + vcd_input_buffer.offset = frm_info->offset; + vcd_input_buffer.client_data = frm_info->client_data; + vcd_input_buffer.ip_frm_tag = (u32)frm_info->client_data; + vcd_input_buffer.data_len = frm_info->data_len; + vcd_input_buffer.time_stamp = frm_info->timestamp; + /* Rely on VCD using the same flags as OMX */ + vcd_input_buffer.flags = frm_info->flags; + + vcd_status = vcd_decode_frame(client_ctx->vcd_handle, + &vcd_input_buffer); + + if (vcd_status) { + ERR("%s: vcd_decode_frame failed = %u\n", __func__, vcd_status); + return false; + } + return true; +} + +static u32 vid_dec_fill_output_buffer(struct video_client_ctx *client_ctx, + struct vdec_fillbuffer_cmd *fill_buffer_cmd) +{ + void *kern_addr; + void __user *user_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + u32 vcd_status = VCD_ERR_FAIL; + struct vcd_frame_data vcd_frame; + + if (!client_ctx || !fill_buffer_cmd) + return false; + + user_addr = fill_buffer_cmd->buffer.addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT, true, + &user_addr, &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + ERR("%s: kern_addr not found\n", __func__); + return false; + } + + memset((void *)&vcd_frame, 0, sizeof(vcd_frame)); + vcd_frame.virt_addr = kern_addr; + vcd_frame.client_data = fill_buffer_cmd->client_data; + vcd_frame.alloc_len = fill_buffer_cmd->buffer.sz; + + vcd_status = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame); + if (vcd_status) { + ERR("%s: vcd_fill_output_buffer failed = %u\n", __func__, + vcd_status); + return false; + } + return true; +} + +static u32 vid_dec_flush(struct video_client_ctx *client_ctx, + enum vdec_bufferflush flush_dir) +{ + u32 vcd_status = VCD_ERR_FAIL; + + INFO("msm_vidc_dec: %s called with dir = %u\n", __func__, flush_dir); + if (!client_ctx) { + ERR("Invalid client_ctx\n"); + return false; + } + + switch (flush_dir) { + case VDEC_FLUSH_TYPE_INPUT: + vcd_status = vcd_flush(client_ctx->vcd_handle, VCD_FLUSH_INPUT); + break; + case VDEC_FLUSH_TYPE_OUTPUT: + vcd_status = vcd_flush(client_ctx->vcd_handle, + VCD_FLUSH_OUTPUT); + break; + case VDEC_FLUSH_TYPE_ALL: + vcd_status = vcd_flush(client_ctx->vcd_handle, VCD_FLUSH_ALL); + break; + default: + ERR("%s: Invalid flush cmd. flush_dir = %u\n", __func__, + flush_dir); + return false; + } + + if (vcd_status) { + ERR("%s: vcd_flush failed. vcd_status = %u flush_dir = %u\n", + __func__, vcd_status, flush_dir); + return false; + } + return true; +} + +static u32 vid_dec_msg_pending(struct video_client_ctx *client_ctx) +{ + u32 islist_empty = 0; + mutex_lock(&client_ctx->msg_queue_lock); + islist_empty = list_empty(&client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + + if (islist_empty) { + DBG("%s: vid_dec msg queue empty\n", __func__); + if (client_ctx->stop_msg) { + DBG("%s: List empty and Stop Msg set\n", __func__); + return client_ctx->stop_msg; + } + } else { + DBG("%s: vid_dec msg queue Not empty\n", __func__); + } + + return !islist_empty; +} + +static u32 vid_dec_get_next_msg(struct video_client_ctx *client_ctx, + struct vdec_msginfo *vdec_msg_info) +{ + int rc; + struct vid_dec_msg *vid_dec_msg = NULL; + + if (!client_ctx) + return false; + + rc = wait_event_interruptible(client_ctx->msg_wait, + vid_dec_msg_pending(client_ctx)); + if (rc < 0 || client_ctx->stop_msg) { + DBG("rc = %d, stop_msg = %u\n", rc, client_ctx->stop_msg); + return false; + } + + mutex_lock(&client_ctx->msg_queue_lock); + if (!list_empty(&client_ctx->msg_queue)) { + DBG("%s(): After Wait\n", __func__); + vid_dec_msg = list_first_entry(&client_ctx->msg_queue, + struct vid_dec_msg, list); + list_del(&vid_dec_msg->list); + memcpy(vdec_msg_info, &vid_dec_msg->vdec_msg_info, + sizeof(struct vdec_msginfo)); + kfree(vid_dec_msg); + } + mutex_unlock(&client_ctx->msg_queue_lock); + return true; +} + +static int vid_dec_ioctl(struct inode *inode, struct file *file, + unsigned cmd, unsigned long arg) +{ + struct video_client_ctx *client_ctx = NULL; + struct vdec_ioctl_msg vdec_msg; + u32 vcd_status; + void *kern_addr; + phys_addr_t phys_addr; + struct file *pmem_file; + u32 result = true; + void __user *u_arg = (void __user *)arg; + + DBG("%s\n", __func__); + + if (_IOC_TYPE(cmd) != VDEC_IOCTL_MAGIC) + return -ENOTTY; + + client_ctx = (struct video_client_ctx *)file->private_data; + if (!client_ctx) { + ERR("!client_ctx. Cannot attach to device handle\n"); + return -ENODEV; + } + + switch (cmd) { + case VDEC_IOCTL_SET_CODEC: + { + enum vdec_codec codec_type; + struct vcd_property_meta_data_enable metdata_disable; + struct vcd_property_hdr header_type; + DBG("VDEC_IOCTL_SET_CODEC\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&codec_type, vdec_msg.in, + sizeof(codec_type))) + return -EFAULT; + DBG("setting code type = %u\n", codec_type); + result = vid_dec_set_codec(client_ctx, &codec_type); + if (!result) + return -EIO; + metdata_disable.meta_data_enable_flag = 0; + header_type.sz = sizeof(metdata_disable); + header_type.id = VCD_I_METADATA_ENABLE; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &header_type, (void *)&metdata_disable); + + if (vcd_status) { + ERR("%s: vcd_set_property Failed for Meta Data Disable" + "\n", __func__); + return -ENODEV; + } + DBG("Disabled Meta Data\n"); + break; + } + case VDEC_IOCTL_SET_OUTPUT_FORMAT: + { + enum vdec_output_format out_format; + DBG("VDEC_IOCTL_SET_OUTPUT_FORMAT\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&out_format, vdec_msg.in, + sizeof(out_format))) + return -EFAULT; + + result = vid_dec_set_output_format(client_ctx, &out_format); + + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_SET_PICRES: + { + struct vdec_picsize video_res; + DBG("VDEC_IOCTL_SET_PICRES\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&video_res, vdec_msg.in, sizeof(video_res))) + return -EFAULT; + result = vid_dec_set_frame_resolution(client_ctx, + &video_res); + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_GET_PICRES: + { + struct vdec_picsize video_res; + DBG("VDEC_IOCTL_GET_PICRES\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&video_res, vdec_msg.out, sizeof(video_res))) + return -EFAULT; + + result = vid_dec_get_frame_resolution(client_ctx, &video_res); + + if (!result) + return -EIO; + + if (copy_to_user(vdec_msg.out, &video_res,sizeof(video_res))) + return -EFAULT; + break; + } + case VDEC_IOCTL_SET_BUFFER_REQ: + { + //TODO unify these types + struct vdec_allocatorproperty vdec_buf_req; + struct vcd_buffer_requirement vcd_buf_req; + + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + + if (copy_from_user(&vdec_buf_req, vdec_msg.in, + sizeof(vdec_buf_req))) + return -EFAULT; + + vcd_buf_req.actual_count = vdec_buf_req.actualcount; + vcd_buf_req.align = vdec_buf_req.alignment; + vcd_buf_req.max_count = vdec_buf_req.maxcount; + vcd_buf_req.min_count = vdec_buf_req.mincount; + vcd_buf_req.size = vdec_buf_req.buffer_size; + + switch (vdec_buf_req.buffer_type) { + case VDEC_BUFFER_TYPE_INPUT: + vcd_status = vcd_set_buffer_requirements( + client_ctx->vcd_handle, VCD_BUFFER_INPUT, + &vcd_buf_req); + break; + case VDEC_BUFFER_TYPE_OUTPUT: + vcd_status = vcd_set_buffer_requirements( + client_ctx->vcd_handle, VCD_BUFFER_OUTPUT, + &vcd_buf_req); + break; + default: + vcd_status = VCD_ERR_BAD_POINTER; + break; + } + + if (vcd_status) + return -EFAULT; + break; + } + case VDEC_IOCTL_GET_BUFFER_REQ: + { + struct vdec_allocatorproperty vdec_buf_req; + DBG("VDEC_IOCTL_GET_BUFFER_REQ\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&vdec_buf_req, vdec_msg.out, + sizeof(vdec_buf_req))) + return -EFAULT; + + result = vid_dec_get_buffer_req(client_ctx, &vdec_buf_req); + if (!result) + return -EIO; + + if (copy_to_user(vdec_msg.out, &vdec_buf_req, + sizeof(vdec_buf_req))) + return -EFAULT; + break; + } + case VDEC_IOCTL_SET_BUFFER: + { + struct vdec_setbuffer_cmd setbuffer; + DBG("VDEC_IOCTL_SET_BUFFER\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&setbuffer, vdec_msg.in, sizeof(setbuffer))) + return -EFAULT; + result = vid_dec_set_buffer(client_ctx, &setbuffer); + break; + } + case VDEC_IOCTL_FREE_BUFFER: + { + struct vdec_setbuffer_cmd setbuffer; + DBG("VDEC_IOCTL_FREE_BUFFER\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&setbuffer, vdec_msg.in, sizeof(setbuffer))) + return -EFAULT; + + result = vid_dec_free_buffer(client_ctx, &setbuffer); + + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_CMD_START: + DBG("VDEC_IOCTL_CMD_START\n"); + result = vid_dec_start(client_ctx); + + if (!result) + return -EIO; + break; + case VDEC_IOCTL_CMD_STOP: + DBG("VDEC_IOCTL_CMD_STOP\n"); + result = vid_dec_stop(client_ctx); + + if (!result) + return -EIO; + break; + case VDEC_IOCTL_CMD_PAUSE: + DBG("VDEC_IOCTL_CMD_PAUSE\n"); + result = vid_dec_pause_resume(client_ctx, true); + + if (!result) + return -EIO; + break; + case VDEC_IOCTL_CMD_RESUME: + DBG("VDEC_IOCTL_CMD_RESUME\n"); + result = vid_dec_pause_resume(client_ctx, false); + + if (!result) + return -EIO; + break; + case VDEC_IOCTL_DECODE_FRAME: + { + struct vdec_input_frameinfo frm_info; + DBG("VDEC_IOCTL_DECODE_FRAME\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&frm_info, vdec_msg.in, sizeof(frm_info))) + return -EFAULT; + + result = vid_dec_decode_frame(client_ctx, &frm_info); + + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_FILL_OUTPUT_BUFFER: + { + struct vdec_fillbuffer_cmd fill_cmd; + + DBG("VDEC_IOCTL_FILL_OUTPUT_BUFFER\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&fill_cmd, vdec_msg.in, sizeof(fill_cmd))) + return -EFAULT; + result = vid_dec_fill_output_buffer(client_ctx, &fill_cmd); + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_CMD_FLUSH: + { + enum vdec_bufferflush flush_dir; + DBG("VDEC_IOCTL_CMD_FLUSH\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_from_user(&flush_dir, vdec_msg.in, sizeof(flush_dir))) + return -EFAULT; + + result = vid_dec_flush(client_ctx, flush_dir); + + if (!result) + return -EIO; + break; + } + case VDEC_IOCTL_GET_NEXT_MSG: + { + struct vdec_msginfo msg_info; + DBG("VDEC_IOCTL_GET_NEXT_MSG\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + result = vid_dec_get_next_msg(client_ctx, &msg_info); + + if (!result) + return -EIO; + if (copy_to_user(vdec_msg.out, &msg_info, sizeof(msg_info))) + return -EFAULT; + break; + } + case VDEC_IOCTL_STOP_NEXT_MSG: + DBG("VDEC_IOCTL_STOP_NEXT_MSG\n"); + client_ctx->stop_msg = 1; + wake_up(&client_ctx->msg_wait); + break; + case VDEC_IOCTL_SET_SEQUENCE_HEADER: + { + struct vdec_seqheader vdec_seq_hdr; + struct vcd_sequence_hdr vcd_seq_hdr; + unsigned long sz; + DBG("VDEC_IOCTL_SET_SEQUENCE_HEADER\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) { + ERR("Copy from user vdec_msg failed\n"); + return -EFAULT; + } + if (copy_from_user(&vdec_seq_hdr, vdec_msg.in, + sizeof(vdec_seq_hdr))) { + ERR("Copy from user seq_header failed\n"); + return -EFAULT; + } + if (!vdec_seq_hdr.sz) { + ERR("Seq len is zero\n"); + return -EFAULT; + } + + if (get_pmem_file(vdec_seq_hdr.pmem_fd, + (unsigned long *)&phys_addr, + (unsigned long *)&kern_addr, &sz, &pmem_file)) { + ERR("%s: get_pmem_file failed\n", __func__); + return false; + } + put_pmem_file(pmem_file); + + vcd_seq_hdr.sz = vdec_seq_hdr.sz; + kern_addr = (u8 *)kern_addr + vdec_seq_hdr.pmem_offset; + vcd_seq_hdr.addr = kern_addr; + if (!vcd_seq_hdr.addr) { + ERR("Sequence Header pointer failed\n"); + return -EFAULT; + } + client_ctx->seq_header_set = true; + if (vcd_decode_start(client_ctx->vcd_handle, &vcd_seq_hdr)) { + ERR("Decode start Failed\n"); + client_ctx->seq_header_set = false; + return -EFAULT; + } + DBG("Wait Client completion Sequence Header\n"); + wait_for_completion(&client_ctx->event); + vcd_seq_hdr.addr = NULL; + if (client_ctx->event_status) { + ERR("Set Seq Header status is failed"); + return -EFAULT; + } + break; + } + case VDEC_IOCTL_GET_NUMBER_INSTANCES: + DBG("VDEC_IOCTL_GET_NUMBER_INSTANCES\n"); + if (copy_from_user(&vdec_msg, u_arg, sizeof(vdec_msg))) + return -EFAULT; + if (copy_to_user(vdec_msg.out, &vidc_dec_dev->num_clients, + sizeof(vidc_dec_dev->num_clients))) + return -EFAULT; + break; + default: + ERR("%s(): Unsupported ioctl\n", __func__); + return -ENOTTY; + } + + return 0; +} + +static u32 vid_dec_close_client(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + int rc; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + if (!client_ctx || !client_ctx->vcd_handle) { + ERR("Invalid client_ctx\n"); + return false; + } + + mutex_lock(&vidc_dec_dev->lock); + vcd_status = vcd_stop(client_ctx->vcd_handle); + + if (!vcd_status) { + rc = wait_for_completion_timeout(&client_ctx->event, + (5 * HZ) / 10); + if (!rc) + DBG("%s:ERROR vcd_stop time out rc = %d\n", __func__, + rc); + + if (client_ctx->event_status) + ERR("%s:ERROR vcd_stop event_status failure\n", + __func__); + } + vcd_status = vcd_close(client_ctx->vcd_handle); + + if (vcd_status) { + mutex_unlock(&vidc_dec_dev->lock); + return false; + } + memset((void *)client_ctx, 0, sizeof(*client_ctx)); + vidc_dec_dev->num_clients--; + mutex_unlock(&vidc_dec_dev->lock); + return true; +} + +static int vid_dec_open(struct inode *inode, struct file *file) +{ + int rc; + s32 client_index; + struct video_client_ctx *client_ctx; + u32 vcd_status = VCD_ERR_FAIL; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + mutex_lock(&vidc_dec_dev->lock); + + if (vidc_dec_dev->num_clients == VID_DEC_MAX_DECODER_CLIENTS) { + ERR("%s: ERROR: max number of clients limit reached\n", + __func__); + mutex_unlock(&vidc_dec_dev->lock); + return -ENODEV; + } + +#ifndef USE_RES_TRACKER + DBG("Resource Tracker not in use"); + if (!vid_c_enable_clk(VID_C_HCLK_RATE)) { + ERR("%s: ERROR: clock enabled failed\n", __func__); + mutex_unlock(&vidc_dec_dev->lock); + return -ENODEV; + } +#endif + + DBG("Virtual Address of ioremap is %p\n", vidc_dec_dev->virt_base); + + if (!vidc_dec_dev->num_clients) { + rc = vcd_fw_prepare_all(); + if (rc) + return rc; + } + + client_index = vid_dec_get_empty_client_index(); + if (client_index == -1) { + ERR("%s: No free clients client_index == -1\n", __func__); + return -ENODEV; + } + client_ctx = &vidc_dec_dev->vdec_clients[client_index]; + vidc_dec_dev->num_clients++; + init_completion(&client_ctx->event); + mutex_init(&client_ctx->msg_queue_lock); + INIT_LIST_HEAD(&client_ctx->msg_queue); + init_waitqueue_head(&client_ctx->msg_wait); + client_ctx->stop_msg = 0; + + vcd_status = vcd_open(vidc_dec_dev->device_handle, true, + vid_dec_vcd_cb, client_ctx); + wait_for_completion(&client_ctx->event); + client_ctx->seq_header_set = false; + file->private_data = client_ctx; + mutex_unlock(&vidc_dec_dev->lock); + return 0; +} + +static int vid_dec_release(struct inode *inode, struct file *file) +{ + struct video_client_ctx *client_ctx = file->private_data; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + vid_dec_close_client(client_ctx); +#ifndef USE_RES_TRACKER + vid_c_disable_clk(); +#endif + INFO("msm_vidc_dec: Return from %s\n", __func__); + return 0; +} + +static const struct file_operations vid_dec_fops = { + .owner = THIS_MODULE, + .open = vid_dec_open, + .release = vid_dec_release, + .ioctl = vid_dec_ioctl, +}; + +void vid_dec_interrupt_deregister(void) +{ +} + +void vid_dec_interrupt_register(void *device_name) +{ +} + +void vid_dec_interrupt_clear(void) +{ +} + +void *vid_dec_map_dev_base_addr(void *device_name) +{ + return vidc_dec_dev->virt_base; +} + +static int vid_dec_vcd_init(void) +{ + int rc; + struct vcd_init_config vcd_init_config; + u32 i; + + /* init_timer(&hw_timer); */ + INFO("msm_vidc_dec: Inside %s\n", __func__); + vidc_dec_dev->num_clients = 0; + + for (i = 0; i < VID_DEC_MAX_DECODER_CLIENTS; i++) { + memset((void *)&vidc_dec_dev->vdec_clients[i], 0, + sizeof(vidc_dec_dev->vdec_clients[i])); + } + + mutex_init(&vidc_dec_dev->lock); + vidc_dec_dev->virt_base = vid_c_get_ioaddr(); + DBG("%s: base address for VIDC core %p\n", __func__, + vidc_dec_dev->virt_base); + + if (!vidc_dec_dev->virt_base) { + ERR("%s: ioremap failed\n", __func__); + return -ENOMEM; + } + + vcd_init_config.device_name = "VID_C"; + vcd_init_config.pf_map_dev_base_addr = vid_dec_map_dev_base_addr; + vcd_init_config.pf_interrupt_clr = vid_dec_interrupt_clear; + vcd_init_config.pf_register_isr = vid_dec_interrupt_register; + vcd_init_config.pf_deregister_isr = vid_dec_interrupt_deregister; + vcd_init_config.pf_timer_create = vid_c_timer_create; + vcd_init_config.pf_timer_release = vid_c_timer_release; + vcd_init_config.pf_timer_start = vid_c_timer_start; + vcd_init_config.pf_timer_stop = vid_c_timer_stop; + + rc = vcd_init(&vcd_init_config, &vidc_dec_dev->device_handle); + if (rc) { + ERR("%s: vcd_init failed\n", __func__); + return -ENODEV; + } + return 0; +} + +static int __init vid_dec_init(void) +{ + int rc = 0; + struct device *class_devp; + + INFO("msm_vidc_dec: Inside %s\n", __func__); + vidc_dec_dev = kzalloc(sizeof(*vidc_dec_dev), GFP_KERNEL); + if (!vidc_dec_dev) { + ERR("%s Unable to allocate memory for vid_dec_dev\n", __func__); + return -ENOMEM; + } + + rc = alloc_chrdev_region(&vidc_dec_dev_num, 0, 1, VID_DEC_NAME); + if (rc < 0) { + ERR("%s: alloc_chrdev_region failed rc = %d\n", __func__, rc); + goto error_vid_dec_alloc_chrdev_region; + } + + vidc_dec_class = class_create(THIS_MODULE, VID_DEC_NAME); + if (IS_ERR(vidc_dec_class)) { + rc = PTR_ERR(vidc_dec_class); + ERR("%s: couldn't create vid_dec_class %d\n", __func__, rc); + goto error_vid_dec_class_create; + } + + class_devp = device_create(vidc_dec_class, NULL, vidc_dec_dev_num, NULL, + VID_DEC_NAME); + + if (IS_ERR(class_devp)) { + rc = PTR_ERR(class_devp); + ERR("%s: class device_create failed %d\n", __func__, rc); + goto error_vid_dec_class_device_create; + } + + vidc_dec_dev->device = class_devp; + + cdev_init(&vidc_dec_dev->cdev, &vid_dec_fops); + vidc_dec_dev->cdev.owner = THIS_MODULE; + rc = cdev_add(&vidc_dec_dev->cdev, vidc_dec_dev_num, 1); + + if (rc < 0) { + ERR("%s: cdev_add failed %d\n", __func__, rc); + goto error_vid_dec_cdev_add; + } + + return vid_dec_vcd_init(); + +error_vid_dec_cdev_add: + device_destroy(vidc_dec_class, vidc_dec_dev_num); +error_vid_dec_class_device_create: + class_destroy(vidc_dec_class); +error_vid_dec_class_create: + unregister_chrdev_region(vidc_dec_dev_num, 1); +error_vid_dec_alloc_chrdev_region: + kfree(vidc_dec_dev); + + return rc; +} + +static void __exit vid_dec_exit(void) +{ + INFO("msm_vidc_dec: Inside %s\n", __func__); + cdev_del(&(vidc_dec_dev->cdev)); + device_destroy(vidc_dec_class, vidc_dec_dev_num); + class_destroy(vidc_dec_class); + unregister_chrdev_region(vidc_dec_dev_num, 1); + kfree(vidc_dec_dev); + INFO("msm_vidc_dec: Return from %s\n", __func__); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Video decoder driver"); +MODULE_VERSION("1.0"); + +module_init(vid_dec_init); +module_exit(vid_dec_exit); diff --git a/drivers/misc/video_core/720p/dec/vdec_internal.h b/drivers/misc/video_core/720p/dec/vdec_internal.h new file mode 100644 index 0000000000000..f6ce51027fa0c --- /dev/null +++ b/drivers/misc/video_core/720p/dec/vdec_internal.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef VDEC_INTERNAL_H +#define VDEC_INTERNAL_H + +#include +#include +#include "video_core_init.h" + +#define VID_DEC_MAX_DECODER_CLIENTS 16 + +struct vid_dec_msg { + struct list_head list; + struct vdec_msginfo vdec_msg_info; +}; + +struct vid_dec_dev { + struct cdev cdev; + struct device *device; + resource_size_t phys_base; + void __iomem *virt_base; + unsigned int irq; + struct clk *hclk; + struct clk *hclk_div2; + struct clk *pclk; + unsigned long hclk_rate; + struct mutex lock; + s32 device_handle; + struct video_client_ctx vdec_clients[VID_DEC_MAX_DECODER_CLIENTS]; + u32 num_clients; + void(*pf_timer_handler)(void *); +}; + +#endif diff --git a/drivers/misc/video_core/720p/enc/venc.c b/drivers/misc/video_core/720p/enc/venc.c new file mode 100644 index 0000000000000..6b81018e782d1 --- /dev/null +++ b/drivers/misc/video_core/720p/enc/venc.c @@ -0,0 +1,1486 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vcd_ddl_firmware.h" +#include "video_core_type.h" +#include "vcd_api.h" +#include "venc_internal.h" +#include "video_core_init.h" + +#define VID_ENC_NAME "msm_vidc_enc" +#define VID_C_HCLK_RATE 170667000 + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define INFO(x...) printk(KERN_INFO x) +#define ERR(x...) printk(KERN_ERR x) + +static struct vid_enc_dev *vidc_enc_dev; +static dev_t vidc_enc_dev_num; +static struct class *vid_enc_class; +static int vid_enc_ioctl(struct inode *inode, struct file *file, unsigned cmd, + unsigned long arg); +//TOOD stop_cmd wtf +static int stop_cmd; + +static s32 vid_enc_get_empty_client_index(void) +{ + u32 i; + u32 found = false; + + for (i = 0; i < VID_ENC_MAX_ENCODER_CLIENTS; i++) { + if (!vidc_enc_dev->venc_clients[i].vcd_handle) { + found = true; + break; + } + } + if (!found) { + ERR("%s: ERROR No space for new client\n", __func__); + return -1; + } + DBG("%s: available client index = %u\n", __func__, i); + return i; +} + +//TODO collapse this crap +u32 vid_enc_get_status(u32 status) +{ + u32 venc_status; + + switch (status) { + case VCD_S_SUCCESS: + venc_status = VEN_S_SUCCESS; + break; + case VCD_ERR_FAIL: + venc_status = VEN_S_EFAIL; + break; + case VCD_ERR_ALLOC_FAIL: + venc_status = VEN_S_ENOSWRES; + break; + case VCD_ERR_ILLEGAL_OP: + venc_status = VEN_S_EINVALCMD; + break; + case VCD_ERR_ILLEGAL_PARM: + venc_status = VEN_S_EBADPARAM; + break; + case VCD_ERR_BAD_POINTER: + case VCD_ERR_BAD_HANDLE: + venc_status = VEN_S_EFATAL; + break; + case VCD_ERR_NOT_SUPPORTED: + venc_status = VEN_S_ENOTSUPP; + break; + case VCD_ERR_BAD_STATE: + venc_status = VEN_S_EINVALSTATE; + break; + case VCD_ERR_MAX_CLIENT: + venc_status = VEN_S_ENOHWRES; + break; + default: + venc_status = VEN_S_EFAIL; + break; + } + return venc_status; +} + +static void vid_enc_notify_client(struct video_client_ctx *client_ctx) +{ + if (client_ctx) + complete(&client_ctx->event); +} + +void vid_enc_vcd_open_done(struct video_client_ctx *client_ctx, + struct vcd_handle_container *handle_container) +{ + DBG("vid_enc_vcd_open_done\n"); + + if (!client_ctx) { + ERR("%s(): ERROR. client_ctx is NULL\n", __func__); + return; + } + if (handle_container) + client_ctx->vcd_handle = handle_container->handle; + else + ERR("%s: ERROR. handle_container is NULL\n", __func__); + vid_enc_notify_client(client_ctx); +} + +static void vid_enc_input_frame_done(struct video_client_ctx *client_ctx, + u32 event, u32 status, struct vcd_frame_data *vcd_frame_data) +{ + struct vid_enc_msg *venc_msg; + + if (!client_ctx || !vcd_frame_data) { + ERR("%s: NULL pointer\n", __func__); + return; + } + + venc_msg = kzalloc(sizeof(struct vid_enc_msg), GFP_KERNEL); + if (!venc_msg) { + ERR("%s: cannot allocate vid_enc_msg buffer\n", __func__); + return; + } + + venc_msg->venc_msg_info.statuscode = vid_enc_get_status(status); + + if (event == VCD_EVT_RESP_INPUT_DONE) { + venc_msg->venc_msg_info.msgcode = VEN_MSG_INPUT_BUFFER_DONE; + DBG("Send INPUT_DON message to client = %p\n", client_ctx); + } else if (event == VCD_EVT_RESP_INPUT_FLUSHED) { + venc_msg->venc_msg_info.msgcode = VEN_MSG_INPUT_BUFFER_DONE; + DBG("Send INPUT_FLUSHED message to client = %p\n", client_ctx); + } else { + ERR("vid_enc_input_frame_done(): invalid event type\n"); + return; + } + + venc_msg->venc_msg_info.buf.clientdata = vcd_frame_data->client_data; + venc_msg->venc_msg_info.msgdata_size = sizeof(struct vid_enc_msg); + + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&venc_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + +static void vid_enc_output_frame_done(struct video_client_ctx *client_ctx, + u32 event, u32 status, struct vcd_frame_data *frm_data) +{ + struct vid_enc_msg *venc_msg; + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buf_index = -1; + + if (!client_ctx || !frm_data) { + ERR("%s: NULL pointer\n", __func__); + return; + } + + venc_msg = kzalloc(sizeof(struct vid_enc_msg), GFP_KERNEL); + if (!venc_msg) { + ERR("%s: cannot allocate vid_enc_msg buffer\n", __func__); + return; + } + + venc_msg->venc_msg_info.statuscode = vid_enc_get_status(status); + + if (event == VCD_EVT_RESP_OUTPUT_DONE) { + venc_msg->venc_msg_info.msgcode = VEN_MSG_OUTPUT_BUFFER_DONE; + } else if (event == VCD_EVT_RESP_OUTPUT_FLUSHED) { + venc_msg->venc_msg_info.msgcode = VEN_MSG_OUTPUT_BUFFER_DONE; + } else { + ERR("QVD: vid_enc_output_frame_done invalid cmd type\n"); + return; + } + + kern_addr = frm_data->virt_addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT, false, + &user_addr, &kern_addr, &phys_addr, &pmem_fd, &file, + &buf_index)) { + ERR("vid_enc_output_frame_done UVA can not be found\n"); + venc_msg->venc_msg_info.statuscode = VEN_S_EFATAL; + goto out; + } + + + /* Buffer address in user space */ + venc_msg->venc_msg_info.buf.addr = user_addr; + venc_msg->venc_msg_info.buf.clientdata = frm_data->client_data; + /* Data length */ + venc_msg->venc_msg_info.buf.len = frm_data->data_len; + venc_msg->venc_msg_info.buf.flags = frm_data->flags; + /* time-stamp pass-through from input frame */ + venc_msg->venc_msg_info.buf.timestamp = frm_data->time_stamp; + /* Decoded picture width and height */ + venc_msg->venc_msg_info.msgdata_size = sizeof(struct venc_buffer); + +out: + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&venc_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + +static void vid_enc_lean_event(struct video_client_ctx *client_ctx, + u32 event, u32 status) +{ + struct vid_enc_msg *venc_msg; + if (!client_ctx) { + ERR("%s(): !client_ctx pointer\n", __func__); + return; + } + + venc_msg = kzalloc(sizeof(struct vid_enc_msg), GFP_KERNEL); + if (!venc_msg) { + ERR("%s(): cannot allocate vid_enc_msg buffer\n", __func__); + return; + } + + venc_msg->venc_msg_info.statuscode = vid_enc_get_status(status); + + switch (event) { + case VCD_EVT_RESP_FLUSH_INPUT_DONE: + INFO("%s: Sending VCD_EVT_RESP_FLUSH_INPUT_DONE to client\n", + __func__); + venc_msg->venc_msg_info.msgcode = VEN_MSG_FLUSH_INPUT_DONE; + break; + case VCD_EVT_RESP_FLUSH_OUTPUT_DONE: + INFO("%s: Sending VCD_EVT_RESP_FLUSH_OUTPUT_DONE to client\n", + __func__); + venc_msg->venc_msg_info.msgcode = VEN_MSG_FLUSH_OUPUT_DONE; + break; + case VCD_EVT_RESP_START: + INFO("%s: Sending VCD_EVT_RESP_START to client\n", __func__); + venc_msg->venc_msg_info.msgcode = VEN_MSG_START; + break; + case VCD_EVT_RESP_STOP: + INFO("%s: Sending VCD_EVT_RESP_STOP to client\n", __func__); + venc_msg->venc_msg_info.msgcode = VEN_MSG_STOP; + break; + case VCD_EVT_RESP_PAUSE: + INFO("%s: Sending VCD_EVT_RESP_PAUSE to client\n", __func__); + venc_msg->venc_msg_info.msgcode = VEN_MSG_PAUSE; + break; + default: + ERR("%s: unknown event type\n", __func__); + break; + } + + venc_msg->venc_msg_info.msgdata_size = 0; + + mutex_lock(&client_ctx->msg_queue_lock); + list_add_tail(&venc_msg->list, &client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + wake_up(&client_ctx->msg_wait); +} + + +void vid_enc_vcd_cb(u32 event, u32 status, void *info, u32 size, void *handle, + void *const client_data) +{ + struct video_client_ctx *client_ctx = client_data; + + DBG("Entering %s\n", __func__); + + if (!client_ctx) { + ERR("%s: client_ctx is NULL\n", __func__); + return; + } + + client_ctx->event_status = status; + + switch (event) { + case VCD_EVT_RESP_OPEN: + vid_enc_vcd_open_done(client_ctx, info); + break; + case VCD_EVT_RESP_INPUT_DONE: + case VCD_EVT_RESP_INPUT_FLUSHED: + vid_enc_input_frame_done(client_ctx, event, status, info); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + case VCD_EVT_RESP_OUTPUT_FLUSHED: + vid_enc_output_frame_done(client_ctx, event, status, info); + break; + case VCD_EVT_RESP_PAUSE: + case VCD_EVT_RESP_START: + case VCD_EVT_RESP_STOP: + case VCD_EVT_RESP_FLUSH_INPUT_DONE: + case VCD_EVT_RESP_FLUSH_OUTPUT_DONE: + case VCD_EVT_IND_RECONFIG: + case VCD_EVT_IND_HWERRFATAL: + case VCD_EVT_IND_RESOURCES_LOST: + vid_enc_lean_event(client_ctx, event, status); + break; + default: + ERR("%s: Error invalid event type %u\n", __func__, event); + break; + } +} + +static u32 vid_enc_msg_pending(struct video_client_ctx *client_ctx) +{ + u32 islist_empty = 0; + + mutex_lock(&client_ctx->msg_queue_lock); + islist_empty = list_empty(&client_ctx->msg_queue); + mutex_unlock(&client_ctx->msg_queue_lock); + + if (islist_empty) { + DBG("%s: vid_enc msg queue empty\n", __func__); + if (client_ctx->stop_msg) { + DBG("%s: List empty and Stop Msg set\n", __func__); + return client_ctx->stop_msg; + } + } else + DBG("%s: vid_enc msg queue Not empty\n", __func__); + + return !islist_empty; +} + +static u32 vid_enc_get_next_msg(struct video_client_ctx *client_ctx, + struct venc_msg *venc_msg_info) +{ + int rc; + struct vid_enc_msg *vid_enc_msg = NULL; + + if (!client_ctx) + return false; + + rc = wait_event_interruptible(client_ctx->msg_wait, + vid_enc_msg_pending(client_ctx)); + + if (rc < 0 || client_ctx->stop_msg) { + DBG("rc = %d, stop_msg = %u\n", rc, client_ctx->stop_msg); + return false; + } + + mutex_lock(&client_ctx->msg_queue_lock); + + if (!list_empty(&client_ctx->msg_queue)) { + DBG("%s: After Wait\n", __func__); + vid_enc_msg = list_first_entry(&client_ctx->msg_queue, + struct vid_enc_msg, list); + list_del(&vid_enc_msg->list); + memcpy(venc_msg_info, &vid_enc_msg->venc_msg_info, + sizeof(struct venc_msg)); + kfree(vid_enc_msg); + } + mutex_unlock(&client_ctx->msg_queue_lock); + return true; +} + +static u32 vid_enc_close_client(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + int rc; + + INFO("msm_vidc_enc: Inside %s\n", __func__); + if (!client_ctx || !client_ctx->vcd_handle) { + ERR("%s: Invalid client_ctx\n", __func__); + return false; + } + + mutex_lock(&vidc_enc_dev->lock); + + if (!stop_cmd) { + vcd_status = vcd_stop(client_ctx->vcd_handle); + DBG("Waiting for VCD_STOP: Before Timeout\n"); + if (!vcd_status) { + rc = wait_for_completion_timeout(&client_ctx->event, + 5 * HZ); + if (!rc) { + ERR("%s: ERROR vcd_stop time out %d\n", + __func__, rc); + } + + if (client_ctx->event_status) { + ERR("%s :ERROR vcd_stop Not success\n", + __func__); + } + } + } + DBG("VCD_STOPPED: After Timeout, calling VCD_CLOSE\n"); + vcd_status = vcd_close(client_ctx->vcd_handle); + + if (vcd_status) { + mutex_unlock(&vidc_enc_dev->lock); + return false; + } + + memset((void *)client_ctx, 0, sizeof(struct video_client_ctx)); + + vidc_enc_dev->num_clients--; + stop_cmd = 0; + mutex_unlock(&vidc_enc_dev->lock); + return true; +} + + +static int vid_enc_open(struct inode *inode, struct file *file) +{ + int rc = 0; + s32 client_index; + struct video_client_ctx *client_ctx; + u32 vcd_status = VCD_ERR_FAIL; + + INFO("msm_vidc_enc: Inside %s\n", __func__); + + mutex_lock(&vidc_enc_dev->lock); + + stop_cmd = 0; + if (vidc_enc_dev->num_clients == VID_ENC_MAX_ENCODER_CLIENTS) { + ERR("ERROR: vid_enc_open() max number of clients limit reached" + "\n"); + rc = -ENODEV; + goto out; + } + +#ifndef USE_RES_TRACKER + DBG("Resource Tracker not in use"); + if (!vid_c_enable_clk(VID_C_HCLK_RATE)) { + ERR("ERROR: vid_enc_open() clock enabled failed\n"); + rc = -ENODEV; + goto out; + } +#endif + + DBG("Virtual Address of ioremap is %p\n", vidc_enc_dev->virt_base); + + if (!vidc_enc_dev->num_clients) { + rc = vcd_fw_prepare_all(); + if (rc) + goto out; + } + + client_index = vid_enc_get_empty_client_index(); + + if (client_index == -1) { + ERR("%s: No free clients client_index == -1\n", __func__); + rc = -ENODEV; + goto out; + } + + client_ctx = &vidc_enc_dev->venc_clients[client_index]; + vidc_enc_dev->num_clients++; + + init_completion(&client_ctx->event); + mutex_init(&client_ctx->msg_queue_lock); + INIT_LIST_HEAD(&client_ctx->msg_queue); + init_waitqueue_head(&client_ctx->msg_wait); + vcd_status = vcd_open(vidc_enc_dev->device_handle, false, + vid_enc_vcd_cb, client_ctx); + client_ctx->stop_msg = 0; + + wait_for_completion(&client_ctx->event); + file->private_data = client_ctx; + +out: + mutex_unlock(&vidc_enc_dev->lock); + return rc; +} + +static int vid_enc_release(struct inode *inode, struct file *file) +{ + struct video_client_ctx *client_ctx = file->private_data; + INFO("msm_vidc_enc: Inside %s\n", __func__); + vid_enc_close_client(client_ctx); +#ifndef USE_RES_TRACKER + vid_c_disable_clk(); +#endif + INFO("msm_vidc_enc: Return from %s\n", __func__); + return 0; +} + +static const struct file_operations vid_enc_fops = { + .owner = THIS_MODULE, + .open = vid_enc_open, + .release = vid_enc_release, + .ioctl = vid_enc_ioctl +}; + +void vid_enc_interrupt_deregister(void) +{ +} + +void vid_enc_interrupt_register(void *device_name) +{ +} + +void vid_enc_interrupt_clear(void) +{ +} + +void *vid_enc_map_dev_base_addr(void *device_name) +{ + return vidc_enc_dev->virt_base; +} + +static int vid_enc_vcd_init(void) +{ + int rc; + struct vcd_init_config vcd_init_config; + u32 i; + + INFO("msm_vidc_enc: Inside %s\n", __func__); + vidc_enc_dev->num_clients = 0; + + for (i = 0; i < VID_ENC_MAX_ENCODER_CLIENTS; i++) + memset((void *)&vidc_enc_dev->venc_clients[i], 0, + sizeof(vidc_enc_dev->venc_clients[i])); + + mutex_init(&vidc_enc_dev->lock); + vidc_enc_dev->virt_base = vid_c_get_ioaddr(); + + if (!vidc_enc_dev->virt_base) { + ERR("%s: ioremap failed\n", __func__); + return -ENOMEM; + } + + vcd_init_config.device_name = "VID_C"; + vcd_init_config.pf_map_dev_base_addr = vid_enc_map_dev_base_addr; + vcd_init_config.pf_interrupt_clr = vid_enc_interrupt_clear; + vcd_init_config.pf_register_isr = vid_enc_interrupt_register; + vcd_init_config.pf_deregister_isr = vid_enc_interrupt_deregister; + + rc = vcd_init(&vcd_init_config, &vidc_enc_dev->device_handle); + + if (rc) { + ERR("%s: vcd_init failed\n", __func__); + return -ENODEV; + } + return 0; +} + +static int __init vid_enc_init(void) +{ + int rc = 0; + struct device *class_devp; + + INFO("msm_vidc_enc: Inside %s\n", __func__); + vidc_enc_dev = kzalloc(sizeof(struct vid_enc_dev), GFP_KERNEL); + if (!vidc_enc_dev) { + ERR("%s Unable to allocate memory for vid_enc_dev\n", __func__); + return -ENOMEM; + } + + rc = alloc_chrdev_region(&vidc_enc_dev_num, 0, 1, VID_ENC_NAME); + if (rc < 0) { + ERR("%s: alloc_chrdev_region Failed rc = %d\n", __func__, rc); + goto error_vid_enc_alloc_chrdev_region; + } + + vid_enc_class = class_create(THIS_MODULE, VID_ENC_NAME); + if (IS_ERR(vid_enc_class)) { + rc = PTR_ERR(vid_enc_class); + ERR("%s: couldn't create vid_enc_class %d\n", __func__, rc); + goto error_vid_enc_class_create; + } + + class_devp = device_create(vid_enc_class, NULL, vidc_enc_dev_num, NULL, + VID_ENC_NAME); + if (IS_ERR(class_devp)) { + rc = PTR_ERR(class_devp); + ERR("%s: class device_create failed %d\n", __func__, rc); + goto error_vid_enc_class_device_create; + } + + vidc_enc_dev->device = class_devp; + + cdev_init(&vidc_enc_dev->cdev, &vid_enc_fops); + vidc_enc_dev->cdev.owner = THIS_MODULE; + rc = cdev_add(&vidc_enc_dev->cdev, vidc_enc_dev_num, 1); + + if (rc < 0) { + ERR("%s: cdev_add failed %d\n", __func__, rc); + goto error_vid_enc_cdev_add; + } + vid_enc_vcd_init(); + return 0; + +error_vid_enc_cdev_add: + device_destroy(vid_enc_class, vidc_enc_dev_num); +error_vid_enc_class_device_create: + class_destroy(vid_enc_class); +error_vid_enc_class_create: + unregister_chrdev_region(vidc_enc_dev_num, 1); +error_vid_enc_alloc_chrdev_region: + kfree(vidc_enc_dev); + + return rc; +} + +static void __exit vid_enc_exit(void) +{ + INFO("msm_vidc_enc: Inside %s\n", __func__); + cdev_del(&vidc_enc_dev->cdev); + device_destroy(vid_enc_class, vidc_enc_dev_num); + class_destroy(vid_enc_class); + unregister_chrdev_region(vidc_enc_dev_num, 1); + kfree(vidc_enc_dev); + INFO("msm_vidc_enc: Return from %s\n", __func__); +} + +static int vid_enc_ioctl(struct inode *inode, struct file *file, + unsigned cmd, unsigned long arg) +{ + void __user *u_arg = (void __user *)arg; + struct video_client_ctx *client_ctx; + struct venc_ioctl_msg venc_msg; + u32 result = true; + + DBG("%s\n", __func__); + + client_ctx = file->private_data; + if (!client_ctx) { + ERR("!client_ctx. Cannot attach to device handle\n"); + return -ENODEV; + } + + switch (cmd) { + case VEN_IOCTL_CMD_READ_NEXT_MSG: + { + struct venc_msg cb_msg; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + DBG("VEN_IOCTL_CMD_READ_NEXT_MSG\n"); + result = vid_enc_get_next_msg(client_ctx, &cb_msg); + if (!result) { + ERR("VEN_IOCTL_CMD_READ_NEXT_MSG failed\n"); + return -EIO; + } + if (copy_to_user(venc_msg.out, &cb_msg, sizeof(cb_msg))) + return -EFAULT; + break; + } + case VEN_IOCTL_CMD_STOP_READ_MSG: + DBG("VEN_IOCTL_CMD_STOP_READ_MSG\n"); + client_ctx->stop_msg = 1; + wake_up(&client_ctx->msg_wait); + break; + case VEN_IOCTL_CMD_ENCODE_FRAME: + case VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER: + { + struct venc_buffer enc_buf; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_CMD_ENCODE_FRAME/" + "VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER\n"); + + if (copy_from_user(&enc_buf, venc_msg.in, sizeof(enc_buf))) + return -EFAULT; + + if (cmd == VEN_IOCTL_CMD_ENCODE_FRAME) + result = vid_enc_encode_frame(client_ctx, &enc_buf); + else + result = vid_enc_fill_output_buffer(client_ctx, + &enc_buf); + + if (!result) { + DBG("VEN_IOCTL_CMD_ENCODE_FRAME/" + "VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_SET_INPUT_BUFFER: + case VEN_IOCTL_SET_OUTPUT_BUFFER: + { + struct venc_bufferpayload buf_info; + enum venc_buffer_dir buf_dir; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_INPUT_BUFFER/VEN_IOCTL_SET_OUTPUT_BUFFER\n"); + + if (copy_from_user(&buf_info, venc_msg.in, sizeof(buf_info))) + return -EFAULT; + + buf_dir = VEN_BUFFER_TYPE_INPUT; + if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER) + buf_dir = VEN_BUFFER_TYPE_OUTPUT; + + result = vid_enc_set_buffer(client_ctx, &buf_info, buf_dir); + if (!result) { + DBG("VEN_IOCTL_SET_INPUT_BUFFER" + "/VEN_IOCTL_SET_OUTPUT_BUFFER failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_SET_INPUT_BUFFER_REQ: + case VEN_IOCTL_SET_OUTPUT_BUFFER_REQ: + { + struct venc_allocatorproperty alloc; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_INPUT_BUFFER_REQ" + "/VEN_IOCTL_SET_OUTPUT_BUFFER_REQ\n"); + + if (copy_from_user(&alloc, venc_msg.in, sizeof(alloc))) + return -EFAULT; + + if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER_REQ) + result = vid_enc_set_buffer_req(client_ctx, &alloc, + false); + else + result = vid_enc_set_buffer_req(client_ctx, &alloc, + true); + if (!result) { + DBG("setting VEN_IOCTL_SET_OUTPUT_BUFFER_REQ/" + "VEN_IOCTL_SET_INPUT_BUFFER_REQ failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_INPUT_BUFFER_REQ: + case VEN_IOCTL_GET_OUTPUT_BUFFER_REQ: + { + struct venc_allocatorproperty alloc; + + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_INPUT_BUFFER_REQ/" + "VEN_IOCTL_GET_OUTPUT_BUFFER_REQ\n"); + + if (cmd == VEN_IOCTL_GET_OUTPUT_BUFFER_REQ) + result = vid_enc_get_buffer_req(client_ctx, &alloc, + false); + else + result = vid_enc_get_buffer_req(client_ctx, &alloc, + true); + + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &alloc, sizeof(alloc))) + return -EFAULT; + break; + } + case VEN_IOCTL_CMD_FLUSH: + { + struct venc_bufferflush buf_flush; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_CMD_FLUSH\n"); + + if (copy_from_user(&buf_flush, venc_msg.in, sizeof(buf_flush))) + return -EFAULT; + + INFO("%s: Calling vid_enc_flush with mode = %lu\n", __func__, + buf_flush.flush_mode); + result = vid_enc_flush(client_ctx, &buf_flush); + if (!result) { + ERR("setting VEN_IOCTL_CMD_FLUSH failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_CMD_START: + INFO("%s: Executing VEN_IOCTL_CMD_START\n", __func__); + result = vid_enc_start(client_ctx); + if (!result) { + ERR("setting VEN_IOCTL_CMD_START failed\n"); + return -EIO; + } + break; + case VEN_IOCTL_CMD_STOP: + INFO("%s: Executing VEN_IOCTL_CMD_STOP", __func__); + result = vid_enc_stop(client_ctx); + if (!result) { + ERR("setting VEN_IOCTL_CMD_STOP failed\n"); + return -EIO; + } + stop_cmd = 1; + break; + case VEN_IOCTL_CMD_PAUSE: + INFO("%s: Executing VEN_IOCTL_CMD_PAUSE\n", __func__); + result = vid_enc_pause(client_ctx); + if (!result) { + ERR("setting VEN_IOCTL_CMD_PAUSE failed\n"); + return -EIO; + } + break; + case VEN_IOCTL_CMD_RESUME: + INFO("%s: Executing VEN_IOCTL_CMD_RESUME\n", __func__); + result = vid_enc_resume(client_ctx); + if (!result) { + ERR("setting VEN_IOCTL_CMD_RESUME failed\n"); + return -EIO; + } + break; + case VEN_IOCTL_SET_QP_RANGE: + { + struct venc_qprange qprange; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_QP_RANGE\n"); + + if (copy_from_user(&qprange, venc_msg.in, sizeof(qprange))) + return -EFAULT; + + result = vid_enc_set_get_qprange(client_ctx, &qprange, true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_QP_RANGE failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_QP_RANGE: + { + struct venc_qprange qprange; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_QP_RANGE\n"); + result = vid_enc_set_get_qprange(client_ctx, &qprange, false); + + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &qprange, sizeof(qprange))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_HEC: + { + struct venc_headerextension ext; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_HEC\n"); + + if (copy_from_user(&ext, venc_msg.in, sizeof(ext))) + return -EFAULT; + + result = vid_enc_set_get_headerextension(client_ctx, &ext, + true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_HEC failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_HEC: + { + struct venc_headerextension ext; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_HEC\n"); + result = vid_enc_set_get_headerextension(client_ctx, &ext, + false); + + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &ext, sizeof(ext))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_TARGET_BITRATE: + { + struct venc_targetbitrate rate; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_TARGET_BITRATE\n"); + + if (copy_from_user(&rate, venc_msg.in, sizeof(rate))) + return -EFAULT; + + result = vid_enc_set_get_bitrate(client_ctx, &rate, true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_TARGET_BITRATE failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_TARGET_BITRATE: + { + struct venc_targetbitrate rate; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_TARGET_BITRATE\n"); + result = vid_enc_set_get_bitrate(client_ctx, &rate, false); + + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &rate, sizeof(rate))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_FRAME_RATE: + { + struct venc_framerate frm_rate; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_FRAME_RATE\n"); + + if (copy_from_user(&frm_rate, venc_msg.in, sizeof(frm_rate))) + return -EFAULT; + + result = vid_enc_set_get_framerate(client_ctx, &frm_rate, + true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_FRAME_RATE failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_FRAME_RATE: + { + struct venc_framerate frm_rate; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_FRAME_RATE\n"); + result = vid_enc_set_get_framerate(client_ctx, &frm_rate, + false); + + if (result) { + if (copy_to_user(venc_msg.out, + &frm_rate, sizeof(frm_rate))) + return -EFAULT; + } else + return -EIO; + break; + } + case VEN_IOCTL_SET_VOP_TIMING_CFG: + { + struct venc_voptimingcfg timing; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_VOP_TIMING_CFG\n"); + + if (copy_from_user(&timing, venc_msg.in, sizeof(timing))) + return -EFAULT; + + result = vid_enc_set_get_voptimingcfg(client_ctx, &timing, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_VOP_TIMING_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_VOP_TIMING_CFG: + { + struct venc_voptimingcfg timing; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_VOP_TIMING_CFG\n"); + result = vid_enc_set_get_voptimingcfg(client_ctx, &timing, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &timing, sizeof(timing))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_RATE_CTRL_CFG: + { + struct venc_ratectrlcfg rate_ctrl; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_RATE_CTRL_CFG\n"); + + if (copy_from_user(&rate_ctrl, venc_msg.in, sizeof(rate_ctrl))) + return -EFAULT; + + result = vid_enc_set_get_ratectrlcfg(client_ctx, &rate_ctrl, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_RATE_CTRL_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_RATE_CTRL_CFG: + { + struct venc_ratectrlcfg rate_ctrl; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_RATE_CTRL_CFG\n"); + result = vid_enc_set_get_ratectrlcfg(client_ctx, &rate_ctrl, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &rate_ctrl, sizeof(rate_ctrl))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_MULTI_SLICE_CFG: + { + struct venc_multiclicecfg slice; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_MULTI_SLICE_CFG\n"); + + if (copy_from_user(&slice, venc_msg.in, sizeof(slice))) + return -EFAULT; + + result = vid_enc_set_get_multiclicecfg(client_ctx, &slice, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_MULTI_SLICE_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_MULTI_SLICE_CFG: + { + struct venc_multiclicecfg slice; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_MULTI_SLICE_CFG\n"); + result = vid_enc_set_get_multiclicecfg(client_ctx, &slice, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &slice, sizeof(slice))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_INTRA_REFRESH: + { + struct venc_intrarefresh refresh; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_INTRA_REFRESH\n"); + + if (copy_from_user(&refresh, venc_msg.in, sizeof(refresh))) + return -EFAULT; + + result = vid_enc_set_get_intrarefresh(client_ctx, &refresh, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_INTRA_REFRESH failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_INTRA_REFRESH: + { + struct venc_intrarefresh refresh; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_DEBLOCKING_CFG\n"); + result = vid_enc_set_get_intrarefresh(client_ctx, &refresh, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &refresh, sizeof(refresh))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_DEBLOCKING_CFG: + { + struct venc_dbcfg dbcfg; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_DEBLOCKING_CFG\n"); + + if (copy_from_user(&dbcfg, venc_msg.in, sizeof(dbcfg))) + return -EFAULT; + result = vid_enc_set_get_dbcfg(client_ctx, &dbcfg, true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_DEBLOCKING_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_DEBLOCKING_CFG: + { + struct venc_dbcfg dbcfg; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_DEBLOCKING_CFG\n"); + result = vid_enc_set_get_dbcfg(client_ctx, &dbcfg, false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &dbcfg, sizeof(dbcfg))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_ENTROPY_CFG: + { + struct venc_entropycfg entropy; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_ENTROPY_CFG\n"); + + if (copy_from_user(&entropy, venc_msg.in, sizeof(entropy))) + return -EFAULT; + + result = vid_enc_set_get_entropy_cfg(client_ctx, &entropy, + true); + + if (!result) { + ERR("setting VEN_IOCTL_SET_ENTROPY_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_ENTROPY_CFG: + { + struct venc_entropycfg entropy; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_ENTROPY_CFG\n"); + + result = vid_enc_set_get_entropy_cfg(client_ctx, &entropy, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &entropy, sizeof(entropy))) + return -EFAULT; + break; + } + case VEN_IOCTL_GET_SEQUENCE_HDR: + { + int rc = 0; + struct venc_seqheader hdr; + struct venc_seqheader hdr_user; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_SEQUENCE_HDR\n"); + + if (copy_from_user(&hdr, venc_msg.in, sizeof(hdr))) + return -EFAULT; + if (copy_from_user(&hdr_user, venc_msg.in, sizeof(hdr_user))) + return -EFAULT; + + hdr.buf = NULL; + result = vid_enc_get_sequence_header(client_ctx, &hdr); + if (!result) + rc = -EIO; + if (!rc || copy_to_user(hdr_user.buf, hdr.buf, hdr.hdr_len)) + rc = -EFAULT; + if (!rc || copy_to_user(&hdr_user.hdr_len, &hdr.hdr_len, + sizeof(hdr.hdr_len))) + rc = -EFAULT; + + kfree(hdr.buf); + hdr.buf = NULL; + if (rc) + return rc; + break; + } + case VEN_IOCTL_GET_CAPABILITY: + return -EIO; + case VEN_IOCTL_CMD_REQUEST_IFRAME: + result = vid_enc_request_iframe(client_ctx); + if (!result) { + ERR("setting VEN_IOCTL_CMD_REQUEST_IFRAME failed\n"); + return -EIO; + } + break; + case VEN_IOCTL_SET_INTRA_PERIOD: + { + struct venc_intraperiod period; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_INTRA_PERIOD\n"); + + if (copy_from_user(&period, venc_msg.in, sizeof(period))) + return -EFAULT; + + result = vid_enc_set_get_intraperiod(client_ctx, &period, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_INTRA_PERIOD failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_INTRA_PERIOD: + { + struct venc_intraperiod period; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_SESSION_QP\n"); + + result = vid_enc_set_get_intraperiod(client_ctx, &period, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &period, sizeof(period))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_SESSION_QP: + { + struct venc_sessionqp qp; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_SESSION_QP\n"); + + if (copy_from_user(&qp, venc_msg.in, sizeof(qp))) + return -EFAULT; + + result = vid_enc_set_get_session_qp(client_ctx, &qp, true); + if (!result) { + ERR("setting VEN_IOCTL_SET_SESSION_QP failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_SESSION_QP: + { + struct venc_sessionqp qp; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_SESSION_QP\n"); + + result = vid_enc_set_get_session_qp(client_ctx, &qp, false); + + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &qp, sizeof(qp))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_PROFILE_LEVEL: + { + struct ven_profilelevel level; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_PROFILE_LEVEL\n"); + + if (copy_from_user(&level, venc_msg.in, sizeof(level))) + return -EFAULT; + + result = vid_enc_set_get_profile_level(client_ctx, &level, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_PROFILE_LEVEL failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_PROFILE_LEVEL: + { + struct ven_profilelevel level; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_CODEC_PROFILE\n"); + + result = vid_enc_set_get_profile_level(client_ctx, &level, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &level, sizeof(level))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_CODEC_PROFILE: + { + struct venc_profile profile; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_SET_CODEC_PROFILE\n"); + + if (copy_from_user(&profile, venc_msg.in, sizeof(profile))) + return -EFAULT; + + result = vid_enc_set_get_profile(client_ctx, &profile, true); + if (!result) { + ERR("setting VEN_IOCTL_SET_CODEC_PROFILE failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_CODEC_PROFILE: + { + struct venc_profile profile; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_CODEC_PROFILE\n"); + + result = vid_enc_set_get_profile(client_ctx, &profile, false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &profile, sizeof(profile))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_SHORT_HDR: + { + struct venc_switch enc_switch; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("Getting VEN_IOCTL_SET_SHORT_HDR\n"); + + if (copy_from_user(&enc_switch, venc_msg.in, + sizeof(enc_switch))) + return -EFAULT; + + result = vid_enc_set_get_short_header(client_ctx, &enc_switch, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_SHORT_HDR failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_SHORT_HDR: + { + struct venc_switch enc_switch; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_LIVE_MODE\n"); + + result = vid_enc_set_get_short_header(client_ctx, &enc_switch, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &enc_switch, sizeof(enc_switch))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_BASE_CFG: + { + struct venc_basecfg base; + DBG("VEN_IOCTL_SET_BASE_CFG\n"); + + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + if (copy_from_user(&base, venc_msg.in, sizeof(base))) + return -EFAULT; + + DBG("setting VEN_IOCTL_SET_BASE_CFG\n"); + + result = vid_enc_set_get_base_cfg(client_ctx, &base, true); + if (!result) { + ERR("setting VEN_IOCTL_SET_BASE_CFG failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_BASE_CFG: + { + struct venc_basecfg base; + DBG("VEN_IOCTL_GET_BASE_CFG\n"); + + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("Getting VEN_IOCTL_SET_BASE_CFG\n"); + + result = vid_enc_set_get_base_cfg(client_ctx, &base, false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &base, sizeof(base))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_LIVE_MODE: + { + struct venc_switch enc_switch; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("Getting VEN_IOCTL_SET_LIVE_MODE\n"); + + if (copy_from_user(&enc_switch, venc_msg.in, sizeof(enc_switch))) + return -EFAULT; + + result = vid_enc_set_get_live_mode(client_ctx, &enc_switch, + true); + if (!result) { + ERR("setting VEN_IOCTL_SET_LIVE_MODE failed\n"); + return -EIO; + } + break; + } + case VEN_IOCTL_GET_LIVE_MODE: + { + struct venc_switch enc_switch; + if (copy_from_user(&venc_msg, u_arg, sizeof(venc_msg))) + return -EFAULT; + + DBG("VEN_IOCTL_GET_LIVE_MODE\n"); + + result = vid_enc_set_get_live_mode(client_ctx, &enc_switch, + false); + if (!result) + return -EIO; + if (copy_to_user(venc_msg.out, &enc_switch, sizeof(enc_switch))) + return -EFAULT; + break; + } + case VEN_IOCTL_SET_AC_PREDICTION: + case VEN_IOCTL_GET_AC_PREDICTION: + case VEN_IOCTL_SET_RVLC: + case VEN_IOCTL_GET_RVLC: + case VEN_IOCTL_SET_ROTATION: + case VEN_IOCTL_GET_ROTATION: + case VEN_IOCTL_SET_DATA_PARTITION: + case VEN_IOCTL_GET_DATA_PARTITION: + default: + ERR("%s: Unsupported ioctl %d\n", __func__, cmd); + return -ENOTTY; + } + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Video encoder driver"); +MODULE_VERSION("1.0"); + +module_init(vid_enc_init); +module_exit(vid_enc_exit); diff --git a/drivers/misc/video_core/720p/enc/venc_internal.c b/drivers/misc/video_core/720p/enc/venc_internal.c new file mode 100644 index 0000000000000..11e8d6651f4a2 --- /dev/null +++ b/drivers/misc/video_core/720p/enc/venc_internal.c @@ -0,0 +1,1576 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "video_core_type.h" +#include "vcd_api.h" +#include "venc_internal.h" +#include "video_core_init.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define ERR(x...) printk(KERN_ERR x) + +u32 vid_enc_set_get_base_cfg(struct video_client_ctx *client_ctx, + struct venc_basecfg *config, u32 set_flag) +{ + struct venc_targetbitrate venc_bitrate; + struct venc_framerate frame_rate; + u32 current_codec_type; + + if (!client_ctx || !config) + return false; + + if (!vid_enc_set_get_codec(client_ctx, ¤t_codec_type, false)) + return false; + + DBG("%s: Current Codec Type = %u\n", __func__, current_codec_type); + if (current_codec_type != config->codectype) { + if (!vid_enc_set_get_codec(client_ctx, &config->codectype, + set_flag)) + return false; + } + + if (!vid_enc_set_get_inputformat(client_ctx, &config->inputformat, + set_flag)) + return false; + + if (!vid_enc_set_get_framesize(client_ctx, &config->input_height, + &config->input_width, set_flag)) + return false; + + if (set_flag) + venc_bitrate.target_bitrate = config->targetbitrate; + + if (!vid_enc_set_get_bitrate(client_ctx, &venc_bitrate, set_flag)) + return false; + + if (!set_flag) + config->targetbitrate = venc_bitrate.target_bitrate; + + if (set_flag) { + frame_rate.fps_denominator = config->fps_den; + frame_rate.fps_numerator = config->fps_num; + } + + if (!vid_enc_set_get_framerate(client_ctx, &frame_rate, set_flag)) + return false; + + if (!set_flag) { + config->fps_den = frame_rate.fps_denominator; + config->fps_num = frame_rate.fps_numerator; + } + + return true; +} + +u32 vid_enc_set_get_inputformat(struct video_client_ctx *client_ctx, + u32 *input_format, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_buffer_format format_type; + u32 vcd_status = VCD_ERR_FAIL; + u32 status = true; + + if (!client_ctx || !input_format) + return false; + + vcd_property_hdr.id = VCD_I_BUFFER_FORMAT; + vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_format); + + if (set_flag) { + switch (*input_format) { + case VEN_INPUTFMT_NV12: + format_type.buffer_format = VCD_BUFFER_FORMAT_NV12; + break; + case VEN_INPUTFMT_NV21: + format_type.buffer_format = VCD_BUFFER_FORMAT_TILE_4x2; + break; + default: + status = false; + break; + } + + if (!status) + return status; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &format_type); + if (vcd_status) { + status = false; + ERR("%s(): Set VCD_I_BUFFER_FORMAT Failed\n", __func__); + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &format_type); + + if (vcd_status) { + status = false; + ERR("%s(): Get VCD_I_BUFFER_FORMAT Failed\n", __func__); + return status; + } + switch (format_type.buffer_format) { + case VCD_BUFFER_FORMAT_NV12: + *input_format = VEN_INPUTFMT_NV12; + break; + case VCD_BUFFER_FORMAT_TILE_4x2: + *input_format = VEN_INPUTFMT_NV21; + break; + default: + status = false; + break; + } + } + return status; +} + +u32 vid_enc_set_get_codec(struct video_client_ctx *client_ctx, u32 *codec_type, + u32 set_flag) +{ + struct vcd_property_codec vcd_property_codec; + struct vcd_property_hdr vcd_property_hdr; + u32 vcd_status = VCD_ERR_FAIL; + u32 status = true; + + if (!client_ctx || !codec_type) + return false; + + vcd_property_hdr.id = VCD_I_CODEC; + vcd_property_hdr.sz = sizeof(struct vcd_property_codec); + + if (set_flag) { + switch (*codec_type) { + case VEN_CODEC_MPEG4: + vcd_property_codec.codec = VCD_CODEC_MPEG4; + break; + case VEN_CODEC_H263: + vcd_property_codec.codec = VCD_CODEC_H263; + break; + case VEN_CODEC_H264: + vcd_property_codec.codec = VCD_CODEC_H264; + break; + default: + status = false; + break; + } + + if (!status) + return status; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_property_codec); + if (vcd_status) { + status = false; + ERR("%s: Set VCD_I_CODEC Failed\n", __func__); + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_property_codec); + + if (vcd_status) { + status = false; + ERR("%s(): Get VCD_I_CODEC Failed\n", __func__); + return status; + } + switch (vcd_property_codec.codec) { + case VCD_CODEC_H263: + *codec_type = VEN_CODEC_H263; + break; + case VCD_CODEC_H264: + *codec_type = VEN_CODEC_H264; + break; + case VCD_CODEC_MPEG4: + *codec_type = VEN_CODEC_MPEG4; + break; + case VCD_CODEC_DIVX_3: + case VCD_CODEC_DIVX_4: + case VCD_CODEC_DIVX_5: + case VCD_CODEC_DIVX_6: + case VCD_CODEC_MPEG1: + case VCD_CODEC_MPEG2: + case VCD_CODEC_VC1: + case VCD_CODEC_VC1_RCV: + case VCD_CODEC_XVID: + default: + status = false; + break; + } + } + return status; +} + +u32 vid_enc_set_get_framesize(struct video_client_ctx *client_ctx, u32 *height, + u32 *width, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_frame_size frame_size; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !height || !width) + return false; + + vcd_property_hdr.id = VCD_I_FRAME_SIZE; + vcd_property_hdr.sz = sizeof(struct vcd_property_frame_size); + + if (set_flag) { + frame_size.height = *height; + frame_size.width = *width; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &frame_size); + + if (vcd_status) { + ERR("%s(): Set VCD_I_FRAME_SIZE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &frame_size); + + if (vcd_status) { + ERR("%s(): Get VCD_I_FRAME_SIZE Failed\n", __func__); + return false; + } + *height = frame_size.height; + *width = frame_size.width; + } + return true; +} + +u32 vid_enc_set_get_bitrate(struct video_client_ctx *client_ctx, + struct venc_targetbitrate *venc_bitrate, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_target_bitrate bit_rate; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !venc_bitrate) + return false; + + vcd_property_hdr.id = VCD_I_TARGET_BITRATE; + vcd_property_hdr.sz = sizeof(struct vcd_property_target_bitrate); + if (set_flag) { + bit_rate.target_bitrate = venc_bitrate->target_bitrate; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &bit_rate); + + if (vcd_status) { + ERR("%s(): Set VCD_I_TARGET_BITRATE Failed\n", + __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &bit_rate); + + if (vcd_status) { + ERR("%s(): Get VCD_I_TARGET_BITRATE Failed\n", + __func__); + return false; + } + venc_bitrate->target_bitrate = bit_rate.target_bitrate; + } + return true; +} + +u32 vid_enc_set_get_framerate(struct video_client_ctx *client_ctx, + struct venc_framerate *frame_rate, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_frame_rate vcd_frame_rate; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !frame_rate) + return false; + + vcd_property_hdr.id = VCD_I_FRAME_RATE; + vcd_property_hdr.sz = sizeof(struct vcd_property_frame_rate); + + if (set_flag) { + vcd_frame_rate.fps_denominator = frame_rate->fps_denominator; + vcd_frame_rate.fps_numerator = frame_rate->fps_numerator; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_frame_rate); + + if (vcd_status) { + ERR("%s(): Set VCD_I_FRAME_RATE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_frame_rate); + + if (vcd_status) { + ERR("%s(): Get VCD_I_FRAME_RATE Failed\n", __func__); + return false; + } + frame_rate->fps_denominator = vcd_frame_rate.fps_denominator; + frame_rate->fps_numerator = vcd_frame_rate.fps_numerator; + } + return true; +} + +u32 vid_enc_set_get_live_mode(struct video_client_ctx *client_ctx, + struct venc_switch *encoder_switch, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_live live_mode; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx) + return false; + + vcd_property_hdr.id = VCD_I_LIVE; + vcd_property_hdr.sz = sizeof(struct vcd_property_live); + + if (set_flag) { + live_mode.live = 1; + if (!encoder_switch->status) + live_mode.live = 0; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &live_mode); + if (vcd_status) { + ERR("%s(): Set VCD_I_LIVE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &live_mode); + + if (vcd_status) { + ERR("%s(): Get VCD_I_LIVE Failed\n", __func__); + return false; + } + encoder_switch->status = 1; + if (!live_mode.live) + encoder_switch->status = 0; + } + return true; +} + +u32 vid_enc_set_get_short_header(struct video_client_ctx *client_ctx, + struct venc_switch *encoder_switch, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_short_header short_header; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !encoder_switch) + return false; + + vcd_property_hdr.id = VCD_I_SHORT_HEADER; + vcd_property_hdr.sz = sizeof(struct vcd_property_short_header); + + if (set_flag) { + short_header.short_header = (u32) encoder_switch->status; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &short_header); + + if (vcd_status) { + ERR("%s(): Set VCD_I_SHORT_HEADER Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &short_header); + + if (vcd_status) { + ERR("%s(): Get VCD_I_SHORT_HEADER Failed\n", __func__); + return false; + } + encoder_switch->status = (u8)short_header.short_header; + } + return true; +} + +u32 vid_enc_set_get_profile(struct video_client_ctx *client_ctx, + struct venc_profile *profile, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_profile profile_type; + u32 vcd_status = VCD_ERR_FAIL; + u32 status = true; + + if (!client_ctx || !profile) + return false; + + vcd_property_hdr.id = VCD_I_PROFILE; + vcd_property_hdr.sz = sizeof(struct vcd_property_profile); + + if (set_flag) { + switch (profile->profile) { + case VEN_PROFILE_MPEG4_SP: + profile_type.profile = VCD_PROFILE_MPEG4_SP; + break; + case VEN_PROFILE_MPEG4_ASP: + profile_type.profile = VCD_PROFILE_MPEG4_ASP; + break; + case VEN_PROFILE_H264_BASELINE: + profile_type.profile = VCD_PROFILE_H264_BASELINE; + break; + case VEN_PROFILE_H264_MAIN: + profile_type.profile = VCD_PROFILE_H264_MAIN; + break; + case VEN_PROFILE_H264_HIGH: + profile_type.profile = VCD_PROFILE_H264_HIGH; + break; + case VEN_PROFILE_H263_BASELINE: + profile_type.profile = VCD_PROFILE_H263_BASELINE; + break; + default: + status = false; + break; + } + + if (!status) + return status; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &profile_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_PROFILE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &profile_type); + + if (vcd_status) { + ERR("%s(): Get VCD_I_PROFILE Failed\n", __func__); + return false; + } + switch (profile_type.profile) { + case VCD_PROFILE_H263_BASELINE: + profile->profile = VEN_PROFILE_H263_BASELINE; + break; + case VCD_PROFILE_H264_BASELINE: + profile->profile = VEN_PROFILE_H264_BASELINE; + break; + case VCD_PROFILE_H264_HIGH: + profile->profile = VEN_PROFILE_H264_HIGH; + break; + case VCD_PROFILE_H264_MAIN: + profile->profile = VEN_PROFILE_H264_MAIN; + break; + case VCD_PROFILE_MPEG4_ASP: + profile->profile = VEN_PROFILE_MPEG4_ASP; + break; + case VCD_PROFILE_MPEG4_SP: + profile->profile = VEN_PROFILE_MPEG4_SP; + break; + default: + status = false; + break; + } + } + return status; +} + +u32 vid_enc_set_get_profile_level(struct video_client_ctx *client_ctx, + struct ven_profilelevel *profile_level, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_level level_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !profile_level) + return false; + + vcd_property_hdr.id = VCD_I_LEVEL; + vcd_property_hdr.sz = sizeof(struct vcd_property_level); + + if (set_flag) { + switch (profile_level->level) { + //TODO: collapse this crap + case VEN_LEVEL_MPEG4_0: + level_type.level = VCD_LEVEL_MPEG4_0; + break; + case VEN_LEVEL_MPEG4_1: + level_type.level = VCD_LEVEL_MPEG4_1; + break; + case VEN_LEVEL_MPEG4_2: + level_type.level = VCD_LEVEL_MPEG4_2; + break; + case VEN_LEVEL_MPEG4_3: + level_type.level = VCD_LEVEL_MPEG4_3; + break; + case VEN_LEVEL_MPEG4_4: + level_type.level = VCD_LEVEL_MPEG4_4; + break; + case VEN_LEVEL_MPEG4_5: + level_type.level = VCD_LEVEL_MPEG4_5; + break; + case VEN_LEVEL_MPEG4_3b: + level_type.level = VCD_LEVEL_MPEG4_3b; + break; + case VEN_LEVEL_MPEG4_6: + level_type.level = VCD_LEVEL_MPEG4_6; + break; + case VEN_LEVEL_H264_1: + level_type.level = VCD_LEVEL_H264_1; + break; + case VEN_LEVEL_H264_1b: + level_type.level = VCD_LEVEL_H264_1b; + break; + case VEN_LEVEL_H264_1p1: + level_type.level = VCD_LEVEL_H264_1p1; + break; + case VEN_LEVEL_H264_1p2: + level_type.level = VCD_LEVEL_H264_1p2; + break; + case VEN_LEVEL_H264_1p3: + level_type.level = VCD_LEVEL_H264_1p3; + break; + case VEN_LEVEL_H264_2: + level_type.level = VCD_LEVEL_H264_2; + break; + case VEN_LEVEL_H264_2p1: + level_type.level = VCD_LEVEL_H264_2p1; + break; + case VEN_LEVEL_H264_2p2: + level_type.level = VCD_LEVEL_H264_2p2; + break; + case VEN_LEVEL_H264_3: + level_type.level = VCD_LEVEL_H264_3; + break; + case VEN_LEVEL_H264_3p1: + level_type.level = VCD_LEVEL_H264_3p1; + break; + case VEN_LEVEL_H263_10: + level_type.level = VCD_LEVEL_H263_10; + break; + case VEN_LEVEL_H263_20: + level_type.level = VCD_LEVEL_H263_20; + break; + case VEN_LEVEL_H263_30: + level_type.level = VCD_LEVEL_H263_30; + break; + case VEN_LEVEL_H263_40: + level_type.level = VCD_LEVEL_H263_40; + break; + case VEN_LEVEL_H263_45: + level_type.level = VCD_LEVEL_H263_45; + break; + case VEN_LEVEL_H263_50: + level_type.level = VCD_LEVEL_H263_50; + break; + case VEN_LEVEL_H263_60: + level_type.level = VCD_LEVEL_H263_60; + break; + case VEN_LEVEL_H263_70: + level_type.level = VCD_LEVEL_H263_70; + break; + default: + return false; + } + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &level_type); + + if (vcd_status) { + ERR("%s: Set VCD_I_LEVEL Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &level_type); + + if (vcd_status) { + ERR("%s(): Get VCD_I_LEVEL Failed\n", __func__); + return false; + } + switch (level_type.level) { + case VCD_LEVEL_MPEG4_0: + profile_level->level = VEN_LEVEL_MPEG4_0; + break; + case VCD_LEVEL_MPEG4_1: + profile_level->level = VEN_LEVEL_MPEG4_1; + break; + case VCD_LEVEL_MPEG4_2: + profile_level->level = VEN_LEVEL_MPEG4_2; + break; + case VCD_LEVEL_MPEG4_3: + profile_level->level = VEN_LEVEL_MPEG4_3; + break; + case VCD_LEVEL_MPEG4_4: + profile_level->level = VEN_LEVEL_MPEG4_4; + break; + case VCD_LEVEL_MPEG4_5: + profile_level->level = VEN_LEVEL_MPEG4_5; + break; + case VCD_LEVEL_MPEG4_3b: + profile_level->level = VEN_LEVEL_MPEG4_3b; + break; + case VCD_LEVEL_H264_1: + profile_level->level = VEN_LEVEL_H264_1; + break; + case VCD_LEVEL_H264_1b: + profile_level->level = VEN_LEVEL_H264_1b; + break; + case VCD_LEVEL_H264_1p1: + profile_level->level = VEN_LEVEL_H264_1p1; + break; + case VCD_LEVEL_H264_1p2: + profile_level->level = VEN_LEVEL_H264_1p2; + break; + case VCD_LEVEL_H264_1p3: + profile_level->level = VEN_LEVEL_H264_1p3; + break; + case VCD_LEVEL_H264_2: + profile_level->level = VEN_LEVEL_H264_2; + break; + case VCD_LEVEL_H264_2p1: + profile_level->level = VEN_LEVEL_H264_2p1; + break; + case VCD_LEVEL_H264_2p2: + profile_level->level = VEN_LEVEL_H264_2p2; + break; + case VCD_LEVEL_H264_3: + profile_level->level = VEN_LEVEL_H264_3; + break; + case VCD_LEVEL_H264_3p1: + profile_level->level = VEN_LEVEL_H264_3p1; + break; + case VCD_LEVEL_H264_3p2: + case VCD_LEVEL_H264_4: + return false; + case VCD_LEVEL_H263_10: + profile_level->level = VEN_LEVEL_H263_10; + break; + case VCD_LEVEL_H263_20: + profile_level->level = VEN_LEVEL_H263_20; + break; + case VCD_LEVEL_H263_30: + profile_level->level = VEN_LEVEL_H263_30; + break; + case VCD_LEVEL_H263_40: + profile_level->level = VEN_LEVEL_H263_40; + break; + case VCD_LEVEL_H263_45: + profile_level->level = VEN_LEVEL_H263_45; + break; + case VCD_LEVEL_H263_50: + profile_level->level = VEN_LEVEL_H263_50; + break; + case VCD_LEVEL_H263_60: + profile_level->level = VEN_LEVEL_H263_60; + break; + case VCD_LEVEL_H263_70: + default: + return false; + } + } + return true; +} + +u32 vid_enc_set_get_session_qp(struct video_client_ctx *client_ctx, + struct venc_sessionqp *session_qp, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_session_qp qp_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !session_qp) + return false; + + vcd_property_hdr.id = VCD_I_SESSION_QP; + vcd_property_hdr.sz = sizeof(struct vcd_property_session_qp); + + if (set_flag) { + qp_type.iframe_qp = session_qp->iframeqp; + qp_type.frame_qp = session_qp->pframqp; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &qp_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_SESSION_QP Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &qp_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_SESSION_QP Failed\n", __func__); + return false; + } + session_qp->iframeqp = qp_type.iframe_qp; + session_qp->pframqp = qp_type.frame_qp; + } + return true; +} + +u32 vid_enc_set_get_intraperiod(struct video_client_ctx *client_ctx, + struct venc_intraperiod *intraperiod, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_i_period period_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !intraperiod) + return false; + + vcd_property_hdr.id = VCD_I_INTRA_PERIOD; + vcd_property_hdr.sz = sizeof(struct vcd_property_i_period); + + if (set_flag) { + period_type.frames = intraperiod->num_pframes; + period_type.bframes = 0; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &period_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_INTRA_PERIOD Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &period_type); + + if (vcd_status) { + ERR("%s(): Get VCD_I_INTRA_PERIOD Failed\n", __func__); + return false; + } + intraperiod->num_pframes = period_type.frames; + } + return true; +} + +u32 vid_enc_request_iframe(struct video_client_ctx *client_ctx) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_req_i_frame request; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx) + return false; + + vcd_property_hdr.id = VCD_I_REQ_IFRAME; + vcd_property_hdr.sz = sizeof(struct vcd_property_req_i_frame); + request.req_i_frame = 1; + + vcd_status = vcd_set_property(client_ctx->vcd_handle, &vcd_property_hdr, + &request); + + if (vcd_status) { + ERR("%s(): Set VCD_I_REQ_IFRAME Failed\n", __func__); + return false; + } + return true; +} + +u32 vid_enc_get_sequence_header(struct video_client_ctx *client_ctx, + struct venc_seqheader *seq_header) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_sequence_hdr hdr_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !seq_header || !seq_header->buf_sz) + return false; + + vcd_property_hdr.id = VCD_I_SEQ_HEADER; + vcd_property_hdr.sz = sizeof(struct vcd_sequence_hdr); + + hdr_type.addr = kzalloc(seq_header->buf_sz, GFP_KERNEL); + seq_header->buf = hdr_type.addr; + if (!hdr_type.addr) + return false; + + hdr_type.sz = seq_header->buf_sz; + vcd_status = vcd_get_property(client_ctx->vcd_handle, &vcd_property_hdr, + &hdr_type); + + if (vcd_status) { + ERR("%s: Get VCD_I_SEQ_HEADER Failed\n", __func__); + return false; + } + return true; +} + +u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx, + struct venc_entropycfg *entropy_cfg, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_entropy_control control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !entropy_cfg) + return false; + + vcd_property_hdr.id = VCD_I_ENTROPY_CTRL; + vcd_property_hdr.sz = sizeof(struct vcd_property_entropy_control); + + if (set_flag) { + switch (entropy_cfg->cabacmodel) { + case VEN_ENTROPY_MODEL_CAVLC: + control_type.entropy_sel = VCD_ENTROPY_SEL_CAVLC; + break; + case VEN_ENTROPY_MODEL_CABAC: + control_type.entropy_sel = VCD_ENTROPY_SEL_CABAC; + break; + default: + return false; + } + + if (entropy_cfg->cabacmodel == VCD_ENTROPY_SEL_CABAC) { + switch (entropy_cfg->cabacmodel) { + case VEN_CABAC_MODEL_0: + control_type.cabac_model = + VCD_CABAC_MODEL_NUMBER_0; + break; + case VEN_CABAC_MODEL_1: + control_type.cabac_model = + VCD_CABAC_MODEL_NUMBER_1; + break; + case VEN_CABAC_MODEL_2: + control_type.cabac_model = + VCD_CABAC_MODEL_NUMBER_2; + break; + default: + return false; + } + } + + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s(): Set VCD_I_ENTROPY_CTRL Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s(): Get VCD_I_ENTROPY_CTRL Failed\n", __func__); + return false; + } + + switch (control_type.entropy_sel) { + case VCD_ENTROPY_SEL_CABAC: + entropy_cfg->cabacmodel = VEN_ENTROPY_MODEL_CABAC; + break; + case VCD_ENTROPY_SEL_CAVLC: + entropy_cfg->cabacmodel = VEN_ENTROPY_MODEL_CAVLC; + break; + default: + return false; + } + + if (control_type.entropy_sel == VCD_ENTROPY_SEL_CABAC) { + switch (control_type.cabac_model) { + case VCD_CABAC_MODEL_NUMBER_0: + entropy_cfg->cabacmodel = VEN_CABAC_MODEL_0; + break; + case VCD_CABAC_MODEL_NUMBER_1: + entropy_cfg->cabacmodel = VEN_CABAC_MODEL_1; + break; + case VCD_CABAC_MODEL_NUMBER_2: + entropy_cfg->cabacmodel = VEN_CABAC_MODEL_2; + break; + default: + return false; + } + } + } + return true; +} + +u32 vid_enc_set_get_dbcfg(struct video_client_ctx *client_ctx, + struct venc_dbcfg *dbcfg, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_db_config control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !dbcfg) + return false; + + vcd_property_hdr.id = VCD_I_DEBLOCKING; + vcd_property_hdr.sz = sizeof(struct vcd_property_db_config); + + if (set_flag) { + switch (dbcfg->db_mode) { + case VEN_DB_DISABLE: + control_type.db_config = VCD_DB_DISABLE; + break; + case VEN_DB_ALL_BLKG_BNDRY: + control_type.db_config = VCD_DB_ALL_BLOCKING_BOUNDARY; + break; + case VEN_DB_SKIP_SLICE_BNDRY: + control_type.db_config = VCD_DB_SKIP_SLICE_BOUNDARY; + break; + default: + return false; + } + + control_type.slice_alpha_offset = dbcfg->slicealpha_offset; + control_type.slice_beta_offset = dbcfg->slicebeta_offset; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s: Set VCD_I_DEBLOCKING Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s: Get VCD_I_DEBLOCKING Failed\n", __func__); + return false; + } + switch (control_type.db_config) { + case VCD_DB_ALL_BLOCKING_BOUNDARY: + dbcfg->db_mode = VEN_DB_ALL_BLKG_BNDRY; + break; + case VCD_DB_DISABLE: + dbcfg->db_mode = VEN_DB_DISABLE; + break; + case VCD_DB_SKIP_SLICE_BOUNDARY: + dbcfg->db_mode = VEN_DB_SKIP_SLICE_BNDRY; + break; + default: + return false; + } + dbcfg->slicealpha_offset = control_type.slice_alpha_offset; + dbcfg->slicebeta_offset = control_type.slice_beta_offset; + } + return true; +} + +u32 vid_enc_set_get_intrarefresh(struct video_client_ctx *client_ctx, + struct venc_intrarefresh *intrarefresh, u32 set_flag) +{ + struct vcd_property_hdr prop_hdr; + struct vcd_property_intra_refresh_mb_number control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !intrarefresh) + return false; + + prop_hdr.id = VCD_I_INTRA_REFRESH; + prop_hdr.sz = sizeof(struct vcd_property_intra_refresh_mb_number); + + if (set_flag) { + control_type.cir_mb_number = intrarefresh->mbcount; + vcd_status = vcd_set_property(client_ctx->vcd_handle, &prop_hdr, + &control_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_INTRA_REFRESH Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, &prop_hdr, + &control_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_INTRA_REFRESH Failed\n", __func__); + return false; + } + intrarefresh->mbcount = control_type.cir_mb_number; + } + return true; +} + +u32 vid_enc_set_get_multiclicecfg(struct video_client_ctx *client_ctx, + struct venc_multiclicecfg *multiclicecfg, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_multi_slice control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !multiclicecfg) + return false; + + vcd_property_hdr.id = VCD_I_MULTI_SLICE; + vcd_property_hdr.sz = sizeof(struct vcd_property_multi_slice); + + if (set_flag) { + switch (multiclicecfg->mslice_mode) { + case VEN_MSLICE_OFF: + control_type.m_slice_sel = VCD_MSLICE_OFF; + break; + case VEN_MSLICE_CNT_MB: + control_type.m_slice_sel = VCD_MSLICE_BY_MB_COUNT; + break; + case VEN_MSLICE_CNT_BYTE: + control_type.m_slice_sel = VCD_MSLICE_BY_BYTE_COUNT; + break; + case VEN_MSLICE_GOB: + control_type.m_slice_sel = VCD_MSLICE_BY_GOB; + break; + default: + return false; + } + + control_type.m_slice_size = multiclicecfg->mslice_size; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + + if (vcd_status) { + ERR("%s: Set VCD_I_MULTI_SLICE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + + if (vcd_status) { + ERR("%s: Get VCD_I_MULTI_SLICE Failed\n", __func__); + return false; + } + multiclicecfg->mslice_size = control_type.m_slice_size; + switch (control_type.m_slice_sel) { + case VCD_MSLICE_OFF: + multiclicecfg->mslice_mode = VEN_MSLICE_OFF; + break; + case VCD_MSLICE_BY_MB_COUNT: + multiclicecfg->mslice_mode = VEN_MSLICE_CNT_MB; + break; + case VCD_MSLICE_BY_BYTE_COUNT: + multiclicecfg->mslice_mode = VEN_MSLICE_CNT_BYTE; + break; + case VCD_MSLICE_BY_GOB: + multiclicecfg->mslice_mode = VEN_MSLICE_GOB; + break; + default: + return false; + } + } + return true; +} + +u32 vid_enc_set_get_ratectrlcfg(struct video_client_ctx *client_ctx, + struct venc_ratectrlcfg *ratectrlcfg, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_rate_control control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !ratectrlcfg) + return false; + + vcd_property_hdr.id = VCD_I_RATE_CONTROL; + vcd_property_hdr.sz = sizeof(struct vcd_property_rate_control); + + if (set_flag) { + switch (ratectrlcfg->rcmode) { + case VEN_RC_OFF: + control_type.rate_control = VCD_RATE_CONTROL_OFF; + break; + case VEN_RC_CBR_VFR: + control_type.rate_control = VCD_RATE_CONTROL_CBR_VFR; + break; + case VEN_RC_VBR_CFR: + control_type.rate_control = VCD_RATE_CONTROL_VBR_CFR; + break; + case VEN_RC_VBR_VFR: + control_type.rate_control = VCD_RATE_CONTROL_VBR_VFR; + break; + default: + return false; + } + + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s(): Set VCD_I_RATE_CONTROL Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + + if (vcd_status) { + ERR("%s(): Get VCD_I_RATE_CONTROL Failed\n", __func__); + return false; + } + + switch (control_type.rate_control) { + case VCD_RATE_CONTROL_OFF: + ratectrlcfg->rcmode = VEN_RC_OFF; + break; + case VCD_RATE_CONTROL_CBR_VFR: + ratectrlcfg->rcmode = VEN_RC_CBR_VFR; + break; + case VCD_RATE_CONTROL_VBR_CFR: + ratectrlcfg->rcmode = VEN_RC_VBR_CFR; + break; + case VCD_RATE_CONTROL_VBR_VFR: + ratectrlcfg->rcmode = VEN_RC_VBR_VFR; + break; + default: + return false; + } + } + return true; +} + +u32 vid_enc_set_get_voptimingcfg(struct video_client_ctx *client_ctx, + struct venc_voptimingcfg *venc_timing, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_vop_timing vcd_timing; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !venc_timing) + return false; + + vcd_property_hdr.id = VCD_I_VOP_TIMING; + vcd_property_hdr.sz = sizeof(struct vcd_property_vop_timing); + + if (set_flag) { + vcd_timing.vop_time_resolution = + venc_timing->voptime_resolution; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_timing); + + if (vcd_status) { + ERR("%s(): Set VCD_I_VOP_TIMING Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &vcd_timing); + if (vcd_status) { + ERR("%s(): Get VCD_I_VOP_TIMING Failed\n", __func__); + return false; + } + venc_timing->voptime_resolution = + vcd_timing.vop_time_resolution; + } + return true; +} + +u32 vid_enc_set_get_headerextension(struct video_client_ctx *client_ctx, + struct venc_headerextension *headerextension, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + u32 control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !headerextension) + return false; + + vcd_property_hdr.id = VCD_I_HEADER_EXTENSION; + vcd_property_hdr.sz = sizeof(u32); + + if (set_flag) { + control_type = headerextension->header_extension; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s: Set VCD_I_HEADER_EXTENSION Fail\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s: Get VCD_I_HEADER_EXTENSION Fail\n", __func__); + return false; + } + headerextension->header_extension = control_type; + } + return true; +} + +u32 vid_enc_set_get_qprange(struct video_client_ctx *client_ctx, + struct venc_qprange *qprange, u32 set_flag) +{ + struct vcd_property_hdr vcd_property_hdr; + struct vcd_property_qp_range control_type; + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !qprange) + return false; + + vcd_property_hdr.id = VCD_I_QP_RANGE; + vcd_property_hdr.sz = sizeof(struct vcd_property_qp_range); + + if (set_flag) { + control_type.max_qp = qprange->maxqp; + control_type.min_qp = qprange->minqp; + vcd_status = vcd_set_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + + if (vcd_status) { + ERR("%s(): Set VCD_I_QP_RANGE Failed\n", __func__); + return false; + } + } else { + vcd_status = vcd_get_property(client_ctx->vcd_handle, + &vcd_property_hdr, &control_type); + if (vcd_status) { + ERR("%s(): Get VCD_I_QP_RANGE Failed\n", __func__); + return false; + } + qprange->maxqp = control_type.max_qp; + qprange->minqp = control_type.min_qp; + } + return true; +} + +u32 vid_enc_start(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + if (!client_ctx) + return false; + + vcd_status = vcd_encode_start(client_ctx->vcd_handle); + if (vcd_status) { + ERR("%s: vcd_encode_start failed %u\n", __func__, vcd_status); + return false; + } + return true; +} + + +u32 vid_enc_stop(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + if (!client_ctx) + return false; + vcd_status = vcd_stop(client_ctx->vcd_handle); + if (vcd_status) { + ERR("%s: vcd_stop failed %u\n", __func__, vcd_status); + return false; + } + DBG("Send STOP_DONE message to client = %p\n", client_ctx); + return true; +} + +u32 vid_enc_pause(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + if (!client_ctx) + return false; + + DBG("PAUSE command from client = %p\n", client_ctx); + vcd_status = vcd_pause(client_ctx->vcd_handle); + if (vcd_status) + return false; + return true; +} + +u32 vid_enc_resume(struct video_client_ctx *client_ctx) +{ + u32 vcd_status; + + if (!client_ctx) + return false; + + DBG("Resume command from client = %p\n", client_ctx); + vcd_status = vcd_resume(client_ctx->vcd_handle); + if (vcd_status) + return false; + return true; +} + +u32 vid_enc_flush(struct video_client_ctx *client_ctx, + struct venc_bufferflush *bufferflush) +{ + u32 mode; + u32 vcd_status; + + if (!client_ctx || !bufferflush) + return false; + + switch (bufferflush->flush_mode) { + case VEN_FLUSH_INPUT: + mode = VCD_FLUSH_INPUT; + break; + case VEN_FLUSH_OUTPUT: + mode = VCD_FLUSH_OUTPUT; + break; + case VEN_FLUSH_ALL: + mode = VCD_FLUSH_ALL; + break; + default: + return false; + break; + } + vcd_status = vcd_flush(client_ctx->vcd_handle, mode); + if (vcd_status) + return false; + return true; +} + +u32 vid_enc_get_buffer_req(struct video_client_ctx *client_ctx, + struct venc_allocatorproperty *venc_buf_req, u32 input_dir) +{ + enum vcd_buffer_type buffer; + struct vcd_buffer_requirement buffer_req; + u32 vcd_status; + + if (!client_ctx || !venc_buf_req) + return false; + + buffer = VCD_BUFFER_OUTPUT; + if (input_dir) + buffer = VCD_BUFFER_INPUT; + + vcd_status = vcd_get_buffer_requirements(client_ctx->vcd_handle, + buffer, &buffer_req); + if (vcd_status) + return false; + + venc_buf_req->actualcount = buffer_req.actual_count; + venc_buf_req->alignment = buffer_req.align; + venc_buf_req->datasize = buffer_req.size; + venc_buf_req->mincount = buffer_req.min_count; + venc_buf_req->maxcount = buffer_req.max_count; + venc_buf_req->alignment = buffer_req.align; + venc_buf_req->bufpoolid = buffer_req.buf_pool_id; + venc_buf_req->suffixsize = 0; + + return true; +} + +u32 vid_enc_set_buffer_req(struct video_client_ctx *client_ctx, + struct venc_allocatorproperty *venc_buf_req, u32 input_dir) +{ + enum vcd_buffer_type buffer; + struct vcd_buffer_requirement buffer_req; + u32 vcd_status; + + if (!client_ctx || !venc_buf_req) + return false; + + buffer = VCD_BUFFER_OUTPUT; + if (input_dir) + buffer = VCD_BUFFER_INPUT; + + buffer_req.actual_count = venc_buf_req->actualcount; + buffer_req.align = venc_buf_req->alignment; + buffer_req.size = venc_buf_req->datasize; + buffer_req.min_count = venc_buf_req->mincount; + buffer_req.max_count = venc_buf_req->maxcount; + buffer_req.align = venc_buf_req->alignment; + buffer_req.buf_pool_id = 0; + + vcd_status = vcd_set_buffer_requirements(client_ctx->vcd_handle, + buffer, &buffer_req); + + if (vcd_status) + return false; + return true; +} + +u32 vid_enc_set_buffer(struct video_client_ctx *client_ctx, + struct venc_bufferpayload *buf_info, enum venc_buffer_dir buf_type) +{ + u32 vcd_status = VCD_ERR_FAIL; + enum vcd_buffer_type buffer_vcd_type; + enum buffer_dir dir_buffer = BUFFER_TYPE_INPUT; + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + unsigned long len; + int pmem_fd; + struct file *file; + struct buf_addr_table *buf_addr_table; + + s32 buf_index = -1; + + if (!client_ctx || !buf_info) + return false; + + user_addr = buf_info->buffer; + + if (buf_type == VEN_BUFFER_TYPE_OUTPUT) + dir_buffer = BUFFER_TYPE_OUTPUT; + + /* if buffer already set, ignore */ + if (vid_c_lookup_addr_table(client_ctx, dir_buffer, true, &user_addr, + &kern_addr, &phys_addr, &pmem_fd, &file, + &buf_index)) { + + DBG("%s: user_addr = %p is already set\n", __func__, user_addr); + return true; + } + + if (get_pmem_file(buf_info->fd, (unsigned long *)&phys_addr, + (unsigned long *)&kern_addr, + &len, &file)) { + ERR("%s: get_pmem_file failed\n", __func__); + return false; + } + put_pmem_file(file); + if (buf_type == VEN_BUFFER_TYPE_INPUT) { + buffer_vcd_type = VCD_BUFFER_INPUT; + client_ctx->num_of_input_buffers++; + + if (client_ctx->num_of_input_buffers > VID_ENC_MAX_NUM_OF_BUFF + ) { + ERR("%s: num_of_input_buffers reached max value" + " VID_ENC_MAX_NUM_OF_BUFF\n", __func__); + client_ctx->num_of_input_buffers--; + return false; + } + + buf_index = client_ctx->num_of_input_buffers - 1; + buf_addr_table = &client_ctx->input_buf_addr_table[buf_index]; + buf_addr_table->user_addr = buf_info->buffer; + kern_addr = (u8 *)kern_addr + buf_info->offset; + phys_addr += buf_info->offset; + buf_addr_table->kern_addr = kern_addr; + buf_addr_table->phys_addr = phys_addr; + buf_addr_table->pmem_fd = buf_info->fd; + buf_addr_table->file = file; + } else { + buffer_vcd_type = VCD_BUFFER_OUTPUT; + + client_ctx->num_of_output_buffers++; + + if (client_ctx->num_of_output_buffers > + VID_ENC_MAX_NUM_OF_BUFF) { + ERR("%s: num_of_outut_buffers reached max value" + " VID_ENC_MAX_NUM_OF_BUFF\n", __func__); + client_ctx->num_of_output_buffers--; + return false; + } + + buf_index = client_ctx->num_of_output_buffers - 1; + + buf_addr_table = &client_ctx->output_buf_addr_table[buf_index]; + kern_addr = (u8 *)kern_addr + buf_info->offset; + phys_addr += buf_info->offset; + buf_addr_table->user_addr = buf_info->buffer; + buf_addr_table->kern_addr = kern_addr; + buf_addr_table->phys_addr = phys_addr; + buf_addr_table->pmem_fd = buf_info->fd; + buf_addr_table->file = file; + } + + vcd_status = vcd_set_buffer(client_ctx->vcd_handle, buffer_vcd_type, + kern_addr, buf_info->sz); + + if (!vcd_status) + return true; + else + return false; +} + +u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, + struct venc_buffer *input_frame_info) +{ + struct vcd_frame_data vcd_input_buffer; + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + + u32 vcd_status = VCD_ERR_FAIL; + + if (!client_ctx || !input_frame_info) + return false; + + user_addr = input_frame_info->addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT, true, + &user_addr, &kern_addr, &phys_addr, &pmem_fd, &file, + &buffer_index)) { + ERR("%s: kernel_vaddr not found\n", __func__); + return false; + } + + /* kern_addr is found. send the frame to VCD */ + memset((void *)&vcd_input_buffer, 0, sizeof(vcd_input_buffer)); + + vcd_input_buffer.virt_addr = (u8 *)kern_addr + input_frame_info->offset; + vcd_input_buffer.offset = input_frame_info->offset; + vcd_input_buffer.client_data = input_frame_info->clientdata; + vcd_input_buffer.ip_frm_tag = (u32)input_frame_info->clientdata; + vcd_input_buffer.data_len = input_frame_info->len; + vcd_input_buffer.time_stamp = input_frame_info->timestamp; + + /* Rely on VCD using the same flags as OMX */ + vcd_input_buffer.flags = input_frame_info->flags; + + vcd_status = vcd_encode_frame(client_ctx->vcd_handle, + &vcd_input_buffer); + + if (vcd_status) { + ERR("%s: vcd_encode_frame failed = %u\n", __func__, vcd_status); + return false; + } + return true; +} + +u32 vid_enc_fill_output_buffer(struct video_client_ctx *client_ctx, + struct venc_buffer *output_frame_info) +{ + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + u32 vcd_status = VCD_ERR_FAIL; + + struct vcd_frame_data vcd_frame; + + if (!client_ctx || !output_frame_info) + return false; + + user_addr = output_frame_info->addr; + + if (!vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT, + true, &user_addr, &kern_addr, &phys_addr, &pmem_fd, + &file, &buffer_index)) { + ERR("%s: kernel_vaddr not found\n", __func__); + return false; + } + + memset((void *)&vcd_frame, 0, sizeof(vcd_frame)); + vcd_frame.virt_addr = kern_addr; + vcd_frame.client_data = output_frame_info->clientdata; + vcd_frame.alloc_len = output_frame_info->sz; + + vcd_status = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame); + if (vcd_status) { + ERR("%s: vcd_fill_output_buffer %u\n", __func__, vcd_status); + return false; + } + return true; +} diff --git a/drivers/misc/video_core/720p/enc/venc_internal.h b/drivers/misc/video_core/720p/enc/venc_internal.h new file mode 100644 index 0000000000000..517783acfbcc0 --- /dev/null +++ b/drivers/misc/video_core/720p/enc/venc_internal.h @@ -0,0 +1,160 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef VENC_INTERNAL_H +#define VENC_INTERNAL_H + +#include +#include + +#include "video_core_init.h" + +#define VID_ENC_MAX_ENCODER_CLIENTS 16 +#define VID_ENC_MAX_NUM_OF_BUFF 100 + +enum venc_buffer_dir { + VEN_BUFFER_TYPE_INPUT, + VEN_BUFFER_TYPE_OUTPUT +}; + +struct vid_enc_msg { + struct list_head list; + struct venc_msg venc_msg_info; +}; + +struct vid_enc_dev { + + struct cdev cdev; + struct device *device; + resource_size_t phys_base; + void __iomem *virt_base; + unsigned int irq; + struct clk *hclk; + struct clk *hclk_div2; + struct clk *pclk; + unsigned long hclk_rate; + struct mutex lock; + s32 device_handle; + struct video_client_ctx venc_clients[VID_ENC_MAX_ENCODER_CLIENTS]; + u32 num_clients; +}; + +u32 vid_enc_set_get_base_cfg(struct video_client_ctx *client_ctx, + struct venc_basecfg *base_config, u32 set_flag); + +u32 vid_enc_set_get_inputformat(struct video_client_ctx *client_ctx, + u32 *input_format, u32 set_flag); + +u32 vid_enc_set_get_codec(struct video_client_ctx *client_ctx, u32 *codec_type, + u32 set_flag); + +u32 vid_enc_set_get_framesize(struct video_client_ctx *client_ctx, + u32 *height, u32 *width, u32 set_flag); + +u32 vid_enc_set_get_bitrate(struct video_client_ctx *client_ctx, + struct venc_targetbitrate *venc_bitrate, u32 set_flag); + +u32 vid_enc_set_get_framerate(struct video_client_ctx *client_ctx, + struct venc_framerate *frame_rate, u32 set_flag); + +u32 vid_enc_set_get_live_mode(struct video_client_ctx *client_ctx, + struct venc_switch *encoder_switch, u32 set_flag); + +u32 vid_enc_set_get_short_header(struct video_client_ctx *client_ctx, + struct venc_switch *encoder_switch, u32 set_flag); + +u32 vid_enc_set_get_profile(struct video_client_ctx *client_ctx, + struct venc_profile *profile, u32 set_flag); + +u32 vid_enc_set_get_profile_level(struct video_client_ctx *client_ctx, + struct ven_profilelevel *profile_level, u32 set_flag); + +u32 vid_enc_set_get_session_qp(struct video_client_ctx *client_ctx, + struct venc_sessionqp *session_qp, u32 set_flag); + +u32 vid_enc_set_get_intraperiod(struct video_client_ctx *client_ctx, + struct venc_intraperiod *intraperiod, u32 set_flag); + +u32 vid_enc_request_iframe(struct video_client_ctx *client_ctx); + +u32 vid_enc_get_sequence_header(struct video_client_ctx *client_ctx, + struct venc_seqheader *seq_header); + +u32 vid_enc_set_get_entropy_cfg(struct video_client_ctx *client_ctx, + struct venc_entropycfg *entropy_cfg, u32 set_flag); + +u32 vid_enc_set_get_dbcfg(struct video_client_ctx *client_ctx, + struct venc_dbcfg *dbcfg, u32 set_flag); + +u32 vid_enc_set_get_intrarefresh(struct video_client_ctx *client_ctx, + struct venc_intrarefresh *intrarefresh, u32 set_flag); + +u32 vid_enc_set_get_multiclicecfg(struct video_client_ctx *client_ctx, + struct venc_multiclicecfg *multiclicecfg, u32 set_flag); + +u32 vid_enc_set_get_ratectrlcfg(struct video_client_ctx *client_ctx, + struct venc_ratectrlcfg *ratectrlcfg, u32 set_flag); + +u32 vid_enc_set_get_voptimingcfg(struct video_client_ctx *client_ctx, + struct venc_voptimingcfg *voptimingcfg, u32 set_flag); + +u32 vid_enc_set_get_headerextension(struct video_client_ctx *client_ctx, + struct venc_headerextension *headerextension, u32 set_flag); + +u32 vid_enc_set_get_qprange(struct video_client_ctx *client_ctx, + struct venc_qprange *qprange, u32 set_flag); + +u32 vid_enc_start(struct video_client_ctx *client_ctx); + +u32 vid_enc_stop(struct video_client_ctx *client_ctx); + +u32 vid_enc_pause(struct video_client_ctx *client_ctx); + +u32 vid_enc_resume(struct video_client_ctx *client_ctx); + +u32 vid_enc_flush(struct video_client_ctx *client_ctx, + struct venc_bufferflush *bufferflush); + +u32 vid_enc_get_buffer_req(struct video_client_ctx *client_ctx, + struct venc_allocatorproperty *venc_buf_req, u32 input_dir); + +u32 vid_enc_set_buffer_req(struct video_client_ctx *client_ctx, + struct venc_allocatorproperty *venc_buf_req, u32 input_dir); + +u32 vid_enc_set_buffer(struct video_client_ctx *client_ctx, + struct venc_bufferpayload *buffer_info, + enum venc_buffer_dir buffer_type); + +u32 vid_enc_encode_frame(struct video_client_ctx *client_ctx, + struct venc_buffer *input_frame_info); + +u32 vid_enc_fill_output_buffer(struct video_client_ctx *client_ctx, + struct venc_buffer *output_frame_info); + +#endif diff --git a/drivers/misc/video_core/720p/init/video_core_init.c b/drivers/misc/video_core/720p/init/video_core_init.c new file mode 100644 index 0000000000000..9af634cd16c4d --- /dev/null +++ b/drivers/misc/video_core/720p/init/video_core_init.c @@ -0,0 +1,766 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vcd_ddl_firmware.h" +#include "vcd_api.h" +#include "video_core_init_internal.h" +#include "video_core_init.h" + +#if DEBUG +#define DBG(x...) printk(KERN_DEBUG x) +#else +#define DBG(x...) +#endif + +#define VID_C_NAME "msm_vidc_reg" + +#define ERR(x...) printk(KERN_ERR x) + +static struct vid_c_dev *vidc_dev; +static dev_t vidc_dev_num; +static struct class *vidc_class; + +static const struct file_operations vid_c_fops = { + .owner = THIS_MODULE, + .open = NULL, + .release = NULL, + .ioctl = NULL, +}; + +struct workqueue_struct *vid_c_wq; +struct workqueue_struct *vidc_timer_wq; +static irqreturn_t vid_c_isr(int irq, void *dev); +static spinlock_t vidc_spin_lock; + + +static void vid_c_timer_fn(unsigned long data) +{ + unsigned long flag; + struct vid_c_timer *hw_timer = NULL; + + DBG("%s: Timer expired\n", __func__); + spin_lock_irqsave(&vidc_spin_lock, flag); + hw_timer = (struct vid_c_timer *)data; + list_add_tail(&hw_timer->list, &vidc_dev->vidc_timer_queue); + spin_unlock_irqrestore(&vidc_spin_lock, flag); + DBG("Queue the work for timer\n"); + queue_work(vidc_timer_wq, &vidc_dev->vidc_timer_worker); +} + +static void vid_c_timer_handler(struct work_struct *work) +{ + unsigned long flag = 0; + u32 islist_empty = 0; + struct vid_c_timer *hw_timer = NULL; + + DBG("%s: Timer expired\n", __func__); + do { + spin_lock_irqsave(&vidc_spin_lock, flag); + islist_empty = list_empty(&vidc_dev->vidc_timer_queue); + if (!islist_empty) { + hw_timer = list_first_entry(&vidc_dev->vidc_timer_queue, + struct vid_c_timer, list); + list_del(&hw_timer->list); + } + spin_unlock_irqrestore(&vidc_spin_lock, flag); + if (!islist_empty && hw_timer && hw_timer->cb_func) + hw_timer->cb_func(hw_timer->userdata); + } while (!islist_empty); +} + +static void vid_c_work_handler(struct work_struct *work) +{ + DBG("vid_c_work_handler()"); + vcd_read_and_clear_interrupt(); + vcd_response_handler(); + enable_irq(vidc_dev->irq); + DBG("vid_c_work_handler() done"); +} + +static DECLARE_WORK(vid_c_work, vid_c_work_handler); + +static int __init vid_c_720p_probe(struct platform_device *pdev) +{ + int rc; + struct resource *resource; + DBG("Enter %s\n", __func__); + + if (pdev->id) { + ERR("Invalid platform device ID = %d\n", pdev->id); + return -EINVAL; + } + vidc_dev->irq = platform_get_irq(pdev, 0); + if (unlikely(vidc_dev->irq < 0)) { + ERR("%s: Invalid irq = %d\n", __func__, vidc_dev->irq); + return -ENXIO; + } + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!resource)) { + ERR("%s: Invalid resource\n", __func__); + return -ENXIO; + } + + vidc_dev->phys_base = resource->start; + vidc_dev->virt_base = ioremap(resource->start, + resource->end - resource->start + 1); + + if (!vidc_dev->virt_base) { + ERR("%s: ioremap failed\n", __func__); + return -ENOMEM; + } + vidc_dev->device = &pdev->dev; + mutex_init(&vidc_dev->lock); + + vid_c_wq = create_singlethread_workqueue("vid_c_worker_queue"); + if (!vid_c_wq) { + ERR("%s: create workqueue failed\n", __func__); + return -ENOMEM; + } + + rc = vcd_fw_init(vidc_dev->device); + if (rc) + ERR("%s: failed to prepare firmware %d\n", __func__, rc); + + return rc; +} + +static int __devexit vid_c_720p_remove(struct platform_device *pdev) +{ + if (pdev->id) { + ERR("Invalid plaform device ID = %d\n", pdev->id); + return -EINVAL; + } + vcd_fw_exit(); + return 0; +} + +static struct platform_driver msm_vid_c_720p_platform_driver = { + .probe = vid_c_720p_probe, + .remove = vid_c_720p_remove, + .driver = { + .name = "msm_vidc_720p", + }, +}; + +static void __exit vid_c_exit(void) +{ + platform_driver_unregister(&msm_vid_c_720p_platform_driver); +} + +static irqreturn_t vid_c_isr(int irq, void *dev) +{ + DBG("vid_c_isr() %d\n", irq); + disable_irq_nosync(irq); + queue_work(vid_c_wq, &vid_c_work); + return IRQ_HANDLED; +} + +static int __init vid_c_init(void) +{ + int rc = 0; + struct device *class_devp; + + vidc_dev = kzalloc(sizeof(struct vid_c_dev), GFP_KERNEL); + if (!vidc_dev) { + ERR("%s Unable to allocate memory for vid_c_dev\n", + __func__); + return -ENOMEM; + } + + rc = alloc_chrdev_region(&vidc_dev_num, 0, 1, VID_C_NAME); + if (rc < 0) { + ERR("%s: alloc_chrdev_region failed %d\n", __func__, rc); + goto error_vid_c_alloc_chrdev_region; + } + + vidc_class = class_create(THIS_MODULE, VID_C_NAME); + if (IS_ERR(vidc_class)) { + rc = PTR_ERR(vidc_class); + ERR("%s: couldn't create vid_c_class %d\n", __func__, rc); + goto error_vid_c_class_create; + } + + class_devp = device_create(vidc_class, NULL, vidc_dev_num, NULL, + VID_C_NAME); + + if (IS_ERR(class_devp)) { + rc = PTR_ERR(class_devp); + ERR("%s: class device_create failed %d\n", __func__, rc); + goto error_vid_c_class_device_create; + } + + cdev_init(&vidc_dev->cdev, &vid_c_fops); + vidc_dev->cdev.owner = THIS_MODULE; + rc = cdev_add(&(vidc_dev->cdev), vidc_dev_num, 1); + + if (rc < 0) { + ERR("%s: cdev_add failed %d\n", __func__, rc); + goto error_vid_c_cdev_add; + } + + rc = platform_driver_register(&msm_vid_c_720p_platform_driver); + if (rc) { + ERR("%s failed to load\n", __func__); + goto error_vid_c_platfom_register; + } + + rc = request_irq(vidc_dev->irq, vid_c_isr, IRQF_TRIGGER_HIGH, + "vid_c", vidc_dev->device); + if (unlikely(rc)) { + ERR("%s:request_irq failed\n", __func__); + goto error_vid_c_platfom_register; + } + + vidc_timer_wq = create_singlethread_workqueue("vidc_timer_wq"); + if (!vidc_timer_wq) { + ERR("%s: create workqueue failed\n", __func__); + rc = -ENOMEM; + goto error_vid_c_platfom_register; + } + + DBG("Disabling IRQ in %s\n", __func__); + disable_irq_nosync(vidc_dev->irq); + INIT_WORK(&vidc_dev->vidc_timer_worker, vid_c_timer_handler); + spin_lock_init(&vidc_spin_lock); + INIT_LIST_HEAD(&vidc_dev->vidc_timer_queue); + vidc_dev->clock_enabled = 0; + vidc_dev->ref_count = 0; + vidc_dev->firmware_refcount = 0; + vidc_dev->get_firmware = 0; + + return 0; + +error_vid_c_platfom_register: + cdev_del(&(vidc_dev->cdev)); +error_vid_c_cdev_add: + device_destroy(vidc_class, vidc_dev_num); +error_vid_c_class_device_create: + class_destroy(vidc_class); +error_vid_c_class_create: + unregister_chrdev_region(vidc_dev_num, 1); +error_vid_c_alloc_chrdev_region: + kfree(vidc_dev); + + return rc; +} + +void __iomem *vid_c_get_ioaddr() +{ + return vidc_dev->virt_base; +} +EXPORT_SYMBOL(vid_c_get_ioaddr); +#ifdef USE_RES_TRACKER + +u32 vid_c_enable_pwr_rail() +{ + int rc; + + mutex_lock(&vidc_dev->lock); + + if (vidc_dev->rail_enabled) { + mutex_unlock(&vidc_dev->lock); + return true; + } + + //TODO: internal_pwr_rail_mode(MFC_CLK_ID, MANUAL) + + vidc_dev->pclk = clk_get(vidc_dev->device, "mfc_pclk"); + if (IS_ERR(vidc_dev->pclk)) { + ERR("%s: mfc_pclk get failed\n", __func__); + goto err; + } + + vidc_dev->hclk = clk_get(vidc_dev->device, "mfc_clk"); + if (IS_ERR(vidc_dev->hclk)) { + ERR("%s: mfc_clk get failed\n", __func__); + goto err; + } + + vidc_dev->hclk_div2 = clk_get(vidc_dev->device, "mfc_div2_clk"); + if (IS_ERR(vidc_dev->hclk_div2)) { + ERR("%s: mfc_div2_clk get failed\n", __func__); + goto err; + } + + //TODO: internal_pwr_rail_ctl(MFC_CLK_ID, 1) + + //TODO msleep must die + msleep(20); + + rc = clk_reset(vidc_dev->pclk, CLK_RESET_DEASSERT); + if (rc) { + ERR("clk_reset failed %d\n", rc); + goto err; + } + //TODO msleep must die + msleep(20); + + vidc_dev->rail_enabled = 1; + mutex_unlock(&vidc_dev->lock); + return true; + +err: + if (!IS_ERR(vidc_dev->pclk)) + clk_put(vidc_dev->pclk); + if (!IS_ERR(vidc_dev->hclk)) + clk_put(vidc_dev->hclk); + if (!IS_ERR(vidc_dev->hclk_div2)) + clk_put(vidc_dev->hclk_div2); + mutex_unlock(&vidc_dev->lock); + return false; +} +EXPORT_SYMBOL(vid_c_enable_pwr_rail); + +u32 vid_c_disable_pwr_rail() +{ + int rc = -1; + mutex_lock(&vidc_dev->lock); + + if (vidc_dev->clock_enabled) { + mutex_unlock(&vidc_dev->lock); + DBG("Calling CLK disable in power down\n"); + vid_c_disable_clk(); + mutex_lock(&vidc_dev->lock); + } + + if (!vidc_dev->rail_enabled) { + mutex_unlock(&vidc_dev->lock); + return false; + } + + vidc_dev->rail_enabled = 0; + rc = clk_reset(vidc_dev->pclk, CLK_RESET_ASSERT); + if (rc) { + ERR("clk_reset failed %d\n", rc); + mutex_unlock(&vidc_dev->lock); + return false; + } + msleep(20); + + //TODO: internal_pwr_rail_ctl(MFC_CLK_ID, 0) + + clk_put(vidc_dev->hclk_div2); + clk_put(vidc_dev->hclk); + clk_put(vidc_dev->pclk); + + mutex_unlock(&vidc_dev->lock); + + return true; +} +EXPORT_SYMBOL(vid_c_disable_pwr_rail); + +u32 vid_c_enable_clk() +{ + mutex_lock(&vidc_dev->lock); + + if (!vidc_dev->rail_enabled) { + goto err; + } + if (vidc_dev->clock_enabled) { + mutex_unlock(&vidc_dev->lock); + return true; + } + + DBG("Enabling IRQ in %s\n", __func__); + enable_irq(vidc_dev->irq); + DBG("%s: Enabling the clocks ...\n", __func__); + + if (clk_enable(vidc_dev->pclk)) { + ERR("vidc pclk enable failed\n"); + goto err; + } + + if (clk_enable(vidc_dev->hclk)) { + ERR("vidc hclk enable failed\n"); + goto err; + } + + if (clk_enable(vidc_dev->hclk_div2)) { + ERR("vidc hclk_div2 enable failed\n"); + goto err; + } + + vidc_dev->clock_enabled = 1; + mutex_unlock(&vidc_dev->lock); + return true; +err: + mutex_unlock(&vidc_dev->lock); + return false; +} +EXPORT_SYMBOL(vid_c_enable_clk); + +u32 vid_c_sel_clk_rate(unsigned long hclk_rate) +{ + mutex_lock(&vidc_dev->lock); + if (clk_set_rate(vidc_dev->hclk, hclk_rate)) { + ERR("vidc hclk set rate failed\n"); + mutex_unlock(&vidc_dev->lock); + return false; + } + vidc_dev->hclk_rate = hclk_rate; + mutex_unlock(&vidc_dev->lock); + return true; +} +EXPORT_SYMBOL(vid_c_sel_clk_rate); + +u32 vid_c_get_clk_rate(unsigned long *phclk_rate) +{ + if (!phclk_rate) { + ERR("vid_c_get_clk_rate(): phclk_rate is NULL\n"); + return false; + } + mutex_lock(&vidc_dev->lock); + *phclk_rate = clk_get_rate(vidc_dev->hclk); + if (!(*phclk_rate)) { + ERR("vidc hclk get rate failed\n"); + mutex_unlock(&vidc_dev->lock); + return false; + } + mutex_unlock(&vidc_dev->lock); + return true; +} +EXPORT_SYMBOL(vid_c_get_clk_rate); + +u32 vid_c_disable_clk(void) +{ + mutex_lock(&vidc_dev->lock); + + if (!vidc_dev->clock_enabled) { + mutex_unlock(&vidc_dev->lock); + return false; + } + + DBG("Disabling IRQ in %s\n", __func__); + disable_irq_nosync(vidc_dev->irq); + DBG("%s: Disabling the clocks ...\n", __func__); + + vidc_dev->clock_enabled = 0; + clk_disable(vidc_dev->hclk); + clk_disable(vidc_dev->hclk_div2); + clk_disable(vidc_dev->pclk); + + mutex_unlock(&vidc_dev->lock); + + return true; +} +EXPORT_SYMBOL(vid_c_disable_clk); + +//TODO: consider deleting USE_RES_TRACKER +#else + +u32 vid_c_enable_clk(unsigned long hclk_rate) +{ + int rc = -1; + mutex_lock(&vidc_dev->lock); + vidc_dev->ref_count++; + + if (!vidc_dev->clock_enabled) { + DBG("Enabling IRQ in %s()\n", __func__); + enable_irq(vidc_dev->irq); + + rc = internal_pwr_rail_mode + (PWR_RAIL_MFC_CLK, PWR_RAIL_CTL_MANUAL); + if (rc) { + ERR("%s(): internal_pwr_rail_mode failed %d\n", + __func__, rc); + return false; + } + DBG("%s(): internal_pwr_rail_mode Success %d\n", + __func__, rc); + + vidc_dev->pclk = + clk_get(vidc_dev->device, "mfc_pclk"); + + if (IS_ERR(vidc_dev->pclk)) { + ERR("%s(): mfc_pclk get failed\n", __func__); + + mutex_unlock(&vidc_dev->lock); + return false; + } + + vidc_dev->hclk = + clk_get(vidc_dev->device, "mfc_clk"); + + if (IS_ERR(vidc_dev->hclk)) { + ERR("%s(): mfc_clk get failed\n", __func__); + + clk_put(vidc_dev->pclk); + mutex_unlock(&vidc_dev->lock); + return false; + } + + vidc_dev->hclk_div2 = + clk_get(vidc_dev->device, "mfc_div2_clk"); + + if (IS_ERR(vidc_dev->pclk)) { + ERR("%s(): mfc_div2_clk get failed\n", __func__); + + clk_put(vidc_dev->pclk); + clk_put(vidc_dev->hclk); + mutex_unlock(&vidc_dev->lock); + return false; + } + + vidc_dev->hclk_rate = hclk_rate; + + if (clk_set_rate(vidc_dev->hclk, + vidc_dev->hclk_rate)) { + ERR("vid_c hclk set rate failed\n"); + clk_put(vidc_dev->pclk); + clk_put(vidc_dev->hclk); + clk_put(vidc_dev->hclk_div2); + mutex_unlock(&vidc_dev->lock); + return false; + } + + if (clk_enable(vidc_dev->pclk)) { + ERR("vid_c pclk Enable failed\n"); + + clk_put(vidc_dev->hclk); + clk_put(vidc_dev->hclk_div2); + mutex_unlock(&vidc_dev->lock); + return false; + } + + if (clk_enable(vidc_dev->hclk)) { + ERR("vid_c hclk Enable failed\n"); + clk_put(vidc_dev->pclk); + clk_put(vidc_dev->hclk_div2); + mutex_unlock(&vidc_dev->lock); + return false; + } + + if (clk_enable(vidc_dev->hclk_div2)) { + ERR("vid_c hclk Enable failed\n"); + clk_put(vidc_dev->hclk); + clk_put(vidc_dev->pclk); + mutex_unlock(&vidc_dev->lock); + return false; + } + msleep(20); + rc = internal_pwr_rail_ctl(PWR_RAIL_MFC_CLK, 1); + if (rc) { + ERR("\n internal_pwr_rail_ctl failed %d\n", rc); + return false; + } + DBG("%s(): internal_pwr_rail_ctl Success %d\n", + __func__, rc); + msleep(20); + rc = clk_reset(vidc_dev->pclk, CLK_RESET_DEASSERT); + if (rc) { + ERR("\n clk_reset failed %d\n", rc); + return false; + } + msleep(20); + } + vidc_dev->clock_enabled = 1; + mutex_unlock(&vidc_dev->lock); + return true; +} +EXPORT_SYMBOL(vid_c_enable_clk); + +u32 vid_c_disable_clk(void) +{ + int rc = -1; + mutex_lock(&vidc_dev->lock); + + if (!vidc_dev->ref_count || + !vidc_dev->clock_enabled) { + return false; + } + + if (vidc_dev->ref_count > 0) + vidc_dev->ref_count--; + + if (!vidc_dev->ref_count) { + DBG("Disabling IRQ in %s()\n", __func__); + disable_irq_nosync(vidc_dev->irq); + rc = clk_reset(vidc_dev->pclk, CLK_RESET_ASSERT); + if (rc) { + ERR("\n clk_reset failed %d\n", rc); + return false; + } + msleep(20); + + rc = internal_pwr_rail_ctl(PWR_RAIL_MFC_CLK, 0); + if (rc) { + ERR("\n internal_pwr_rail_ctl failed %d\n", rc); + return false; + } + + vidc_dev->clock_enabled = 0; + clk_disable(vidc_dev->hclk); + clk_disable(vidc_dev->hclk_div2); + clk_disable(vidc_dev->pclk); + + clk_put(vidc_dev->hclk_div2); + clk_put(vidc_dev->hclk); + clk_put(vidc_dev->pclk); + + } + mutex_unlock(&vidc_dev->lock); + return true; +} +EXPORT_SYMBOL(vid_c_disable_clk); + +#endif + +u32 vid_c_lookup_addr_table(struct video_client_ctx *client_ctx, + enum buffer_dir buffer_type, u32 search_with_user_vaddr, + void __user **user_addr, void **kern_addr, phys_addr_t *phys_addr, + int *pmem_fd, struct file **file, s32 *buffer_index) +{ + u32 num_of_buffers; + u32 i; + struct buf_addr_table *buf_addr_table; + u32 found = false; + + if (!client_ctx) + return false; + + if (buffer_type == BUFFER_TYPE_INPUT) { + buf_addr_table = client_ctx->input_buf_addr_table; + num_of_buffers = client_ctx->num_of_input_buffers; + DBG("%s: buffer_type = INPUT\n", __func__); + } else { + buf_addr_table = client_ctx->output_buf_addr_table; + num_of_buffers = client_ctx->num_of_output_buffers; + DBG("%s: buffer_type = OUTPUT\n", __func__); + } + + for (i = 0; i < num_of_buffers; ++i) { + if (search_with_user_vaddr) { + if (*user_addr == buf_addr_table[i].user_addr) { + *kern_addr = buf_addr_table[i].kern_addr; + found = true; + DBG("%s: client_ctx=%p user_addr=%p is found\n", + __func__, client_ctx, *user_addr); + break; + } + } else { + if (*kern_addr == buf_addr_table[i].kern_addr) { + *user_addr = buf_addr_table[i].user_addr; + found = true; + DBG("%s: client_ctx=%p kern_addr=%p is found", + __func__, client_ctx, *kern_addr); + break; + } + } + } + + if (!found) { + if (search_with_user_vaddr) + DBG("%s: client_ctx=%p user_addr=%p not found\n", + __func__, client_ctx, *user_addr); + else + DBG("%s: client_ctx=%p kern_addr=%p not found\n", + __func__, client_ctx, *kern_addr); + return false; + } + + *phys_addr = buf_addr_table[i].phys_addr; + *pmem_fd = buf_addr_table[i].pmem_fd; + *file = buf_addr_table[i].file; + *buffer_index = i; + + if (search_with_user_vaddr) + DBG("kern_addr=%p phys_addr=%X pmem_fd=%d " + "struct *file=%p buffer_index=%d\n", *kern_addr, + *phys_addr, *pmem_fd, *file, *buffer_index); + else + DBG("user_addr=%p phys_addr=%X pmem_fd=%d, " + "struct *file=%p buffer_index=%d\n", *user_addr, + *phys_addr, *pmem_fd, *file, *buffer_index); + return true; +} +EXPORT_SYMBOL(vid_c_lookup_addr_table); + +u32 vid_c_timer_create(void (*pf_timer_handler)(void *), void *user_data, + void **pp_timer_handle) +{ + struct vid_c_timer *hw_timer = NULL; + if (!pf_timer_handler || !pp_timer_handle) { + DBG("%s: timer creation failed\n", __func__); + return false; + } + hw_timer = kzalloc(sizeof(struct vid_c_timer), GFP_KERNEL); + if (!hw_timer) { + DBG("%s: timer creation failed in allocation\n", __func__); + return false; + } + init_timer(&hw_timer->hw_timeout); + hw_timer->hw_timeout.data = (unsigned long)hw_timer; + hw_timer->hw_timeout.function = vid_c_timer_fn; + hw_timer->cb_func = pf_timer_handler; + hw_timer->userdata = user_data; + *pp_timer_handle = hw_timer; + return true; +} +EXPORT_SYMBOL(vid_c_timer_create); + +void vid_c_timer_release(void *timer_handle) +{ + kfree(timer_handle); +} +EXPORT_SYMBOL(vid_c_timer_release); + +void vid_c_timer_start(void *timer_handle, u32 time_out) +{ + struct vid_c_timer *hw_timer = timer_handle; + DBG("%s: start timer\n ", __func__); + if (hw_timer) { + hw_timer->hw_timeout.expires = jiffies + 1 * HZ; + add_timer(&hw_timer->hw_timeout); + } +} +EXPORT_SYMBOL(vid_c_timer_start); + +void vid_c_timer_stop(void *timer_handle) +{ + struct vid_c_timer *hw_timer = timer_handle; + DBG("%s: stop timer\n ", __func__); + if (hw_timer) + del_timer(&hw_timer->hw_timeout); +} +EXPORT_SYMBOL(vid_c_timer_stop); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Video decoder/encoder driver Init Module"); +MODULE_VERSION("1.0"); +module_init(vid_c_init); +module_exit(vid_c_exit); diff --git a/drivers/misc/video_core/720p/init/video_core_init.h b/drivers/misc/video_core/720p/init/video_core_init.h new file mode 100644 index 0000000000000..d8e40f35aa359 --- /dev/null +++ b/drivers/misc/video_core/720p/init/video_core_init.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef VIDEO_CORE_INIT_H +#define VIDEO_CORE_INIT_H + +#include "video_core_type.h" + +#define MAX_VIDEO_NUM_OF_BUFF 100 + +enum buffer_dir { + BUFFER_TYPE_INPUT, + BUFFER_TYPE_OUTPUT +}; + +struct buf_addr_table { + void __user *user_addr; + void *kern_addr; + phys_addr_t phys_addr; + int pmem_fd; + struct file *file; +}; + +struct video_client_ctx { + void *vcd_handle; + u32 num_of_input_buffers; + u32 num_of_output_buffers; + struct buf_addr_table input_buf_addr_table[MAX_VIDEO_NUM_OF_BUFF]; + struct buf_addr_table output_buf_addr_table[MAX_VIDEO_NUM_OF_BUFF]; + struct list_head msg_queue; + struct mutex msg_queue_lock; + wait_queue_head_t msg_wait; + struct completion event; + u32 event_status; + u32 seq_header_set; + u32 stop_msg; +}; + +void __iomem *vid_c_get_ioaddr(void); + +#ifdef USE_RES_TRACKER + +u32 vid_c_sel_clk_rate(unsigned long hclk_rate); +u32 vid_c_get_clk_rate(unsigned long *phclk_rate); +u32 vid_c_enable_clk(void); +u32 vid_c_disable_clk(void); +u32 vid_c_enable_pwr_rail(void); +u32 vid_c_disable_pwr_rail(void); + +#else +u32 vid_c_enable_clk(unsigned long hclk_rate); +u32 vid_c_disable_clk(void); +#endif + +int vid_c_load_firmware(void); +void vid_c_release_firmware(void); +u32 vid_c_lookup_addr_table(struct video_client_ctx *client_ctx, + enum buffer_dir buffer_type, u32 search_with_user_vaddr, + void __user **user_addr, void **kernel_addr, phys_addr_t *phys_addr, + int *pmem_fd, struct file **file, s32 *buffer_index); + +u32 vid_c_timer_create(void (*pf_timer_handler)(void *), + void *user_data, void **pp_timer_handle); +void vid_c_timer_release(void *timer_handle); +void vid_c_timer_start(void *timer_handle, u32 time_out); +void vid_c_timer_stop(void *timer_handle); + + +#endif diff --git a/drivers/misc/video_core/720p/init/video_core_init_internal.h b/drivers/misc/video_core/720p/init/video_core_init_internal.h new file mode 100644 index 0000000000000..b72e9f26f811d --- /dev/null +++ b/drivers/misc/video_core/720p/init/video_core_init_internal.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef VIDEO_CORE_INIT_INTERNAL_H +#define VIDEO_CORE_INIT_INTERNAL_H + +#include + +struct vid_c_timer { + struct list_head list; + struct timer_list hw_timeout; + void (*cb_func)(void *); + void *userdata; +}; + +struct vid_c_dev { + struct cdev cdev; + struct device *device; + resource_size_t phys_base; + void __iomem *virt_base; + unsigned int irq; + struct clk *hclk; + struct clk *hclk_div2; + struct clk *pclk; + unsigned long hclk_rate; + unsigned int clock_enabled; + unsigned int rail_enabled; + unsigned int ref_count; + unsigned int firmware_refcount; + unsigned int get_firmware; + struct mutex lock; + s32 device_handle; + struct list_head vidc_timer_queue; + struct work_struct vidc_timer_worker; +}; + +#endif diff --git a/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.c b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.c new file mode 100644 index 0000000000000..7ad199bf285f6 --- /dev/null +++ b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.c @@ -0,0 +1,275 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_res_tracker.h" +#include "video_core_init.h" + +#include +#ifdef AXI_CLK_SCALING +#include +#endif + +#define MSM_AXI_QOS_NAME "msm_vidc_reg" + +#define QVGA_PERF_LEVEL (300 * 30) +#define VGA_PERF_LEVEL (1200 * 30) +#define WVGA_PERF_LEVEL (1500 * 30) + +static unsigned int mfc_clk_freq_table[3] = { + 61440000, 122880000, 170667000 +}; + +#ifndef CONFIG_MSM_NPA_SYSTEM_BUS +static unsigned int axi_clk_freq_table_enc[2] = { + 122880, 192000 +}; +static unsigned int axi_clk_freq_table_dec[2] = { + 122880, 192000 +}; +#else +static unsigned int axi_clk_freq_table_enc[2] = { + MSM_AXI_FLOW_VIDEO_RECORDING_720P, + MSM_AXI_FLOW_VIDEO_RECORDING_720P +}; +static unsigned int axi_clk_freq_table_dec[2] = { + MSM_AXI_FLOW_VIDEO_PLAYBACK_720P, + MSM_AXI_FLOW_VIDEO_PLAYBACK_720P +}; +#endif +static u32 res_trk_convert_freq_to_perf_lvl(u64 freq) +{ + u64 perf_lvl; + u64 temp; + + pr_debug("\n %s():: freq = %u\n", __func__, (u32)freq); + + if (!freq) + return 0; + + temp = freq * 1000; + do_div(temp, VCD_RESTRK_HZ_PER_1000_PERFLVL); + perf_lvl = (u32)temp; + pr_debug("\n %s(): perf_lvl = %u\n", __func__, (u32)perf_lvl); + + return (u32)perf_lvl; +} + +static u32 res_trk_convert_perf_lvl_to_freq(u64 perf_lvl) +{ + u64 freq, temp; + + pr_debug("\n %s():: perf_lvl = %u\n", __func__, + (u32)perf_lvl); + temp = (perf_lvl * VCD_RESTRK_HZ_PER_1000_PERFLVL) + 999; + do_div(temp, 1000); + freq = (u32)temp; + pr_debug("\n %s(): freq = %u\n", __func__, (u32)freq); + + return (u32)freq; +} + +u32 res_trk_power_up(void) +{ + pr_debug("clk_regime_rail_enable\n"); + pr_debug("clk_regime_sel_rail_control\n"); +#ifdef AXI_CLK_SCALING +{ + int rc; + pr_debug("\n res_trk_power_up():: " + "Calling AXI add requirement\n"); + rc = pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ, + MSM_AXI_QOS_NAME, PM_QOS_DEFAULT_VALUE); + if (rc < 0) { + pr_err("Request AXI bus QOS fails. rc = %d\n", rc); + return false; + } +} +#endif + +#ifdef USE_RES_TRACKER + pr_debug("\n res_trk_power_up():: Calling " + "vid_c_enable_pwr_rail()\n"); + return vid_c_enable_pwr_rail(); +#endif + return true; +} + +u32 res_trk_power_down(void) +{ + pr_debug("clk_regime_rail_disable\n"); +#ifdef AXI_CLK_SCALING + pr_debug("\n res_trk_power_down()::" + "Calling AXI remove requirement\n"); + pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ, + MSM_AXI_QOS_NAME); +#endif + +#ifdef USE_RES_TRACKER + pr_debug("\n res_trk_power_down():: Calling " + "vid_c_disable_pwr_rail()\n"); + return vid_c_disable_pwr_rail(); +#endif + return true; +} + +u32 res_trk_enable_clocks(void) +{ + pr_debug("clk_regime_msm_enable\n"); +#ifdef USE_RES_TRACKER + pr_debug("\n res_trk_enable_clocks():: Calling " + "vid_c_enable_clk()\n"); + return vid_c_enable_clk(); +#endif + return true; +} + +u32 res_trk_disable_clocks(void) +{ + pr_debug("clk_regime_msm_disable\n"); + +#ifdef USE_RES_TRACKER + pr_debug("\n res_trk_disable_clocks():: Calling " + "vid_c_disable_clk()\n"); + return vid_c_disable_clk(); +#endif + return true; +} + +u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl) +{ + if (!pn_max_perf_lvl) { + pr_err("%s(): pn_max_perf_lvl is NULL\n", __func__); + return false; + } + + *pn_max_perf_lvl = VCD_RESTRK_MAX_PERF_LEVEL; + return true; +} + +u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl, + struct vcd_clnt_ctxt *cctxt) +{ + u32 axi_freq = 0, mfc_freq = 0, calc_mfc_freq = 0; + + if (!pn_set_perf_lvl) { + pr_err("%s(): pn_perf_lvl is NULL\n", __func__); + return false; + } + + pr_debug("%s(), req_perf_lvl = %d\n", __func__, req_perf_lvl); + if (cctxt) { + calc_mfc_freq = res_trk_convert_perf_lvl_to_freq( + (u64)req_perf_lvl); + + if (calc_mfc_freq < VCD_RESTRK_MIN_FREQ_POINT) + calc_mfc_freq = VCD_RESTRK_MIN_FREQ_POINT; + else if (calc_mfc_freq > VCD_RESTRK_MAX_FREQ_POINT) + calc_mfc_freq = VCD_RESTRK_MAX_FREQ_POINT; + + if (!cctxt->decoding) { + if (req_perf_lvl >= VGA_PERF_LEVEL) { + mfc_freq = mfc_clk_freq_table[2]; + axi_freq = axi_clk_freq_table_enc[1]; + } else { + mfc_freq = mfc_clk_freq_table[0]; + axi_freq = axi_clk_freq_table_enc[0]; + } + pr_debug("\n ENCODER: axi_freq = %u" + ", mfc_freq = %u, calc_mfc_freq = %u," + " req_perf_lvl = %u", axi_freq, + mfc_freq, calc_mfc_freq, + req_perf_lvl); + } else { + if (req_perf_lvl <= QVGA_PERF_LEVEL) { + mfc_freq = mfc_clk_freq_table[0]; + axi_freq = axi_clk_freq_table_dec[0]; + } else { + axi_freq = axi_clk_freq_table_dec[0]; + if (req_perf_lvl <= VGA_PERF_LEVEL) + mfc_freq = mfc_clk_freq_table[0]; + else if (req_perf_lvl <= WVGA_PERF_LEVEL) + mfc_freq = mfc_clk_freq_table[1]; + else { + mfc_freq = mfc_clk_freq_table[2]; + axi_freq = axi_clk_freq_table_dec[1]; + } + } + pr_debug("\n DECODER: axi_freq = %u" + ", mfc_freq = %u, calc_mfc_freq = %u," + " req_perf_lvl = %u", axi_freq, + mfc_freq, calc_mfc_freq, + req_perf_lvl); + } + } else { + pr_debug("%s() WARNING:: cctxt is NULL\n", __func__); + return true; + } + +#ifdef AXI_CLK_SCALING + if (req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) { + int rc = -1; + pr_debug("\n %s(): Setting AXI freq to %u", + __func__, axi_freq); + rc = pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ, + MSM_AXI_QOS_NAME, axi_freq); + + if (rc < 0) { + pr_err("\n Update AXI bus QOS fails,rc = %d\n", rc); + return false; + } + } +#endif + +#ifdef USE_RES_TRACKER + if (req_perf_lvl != VCD_RESTRK_MIN_PERF_LEVEL) { + pr_debug("\n %s(): Setting MFC freq to %u", + __func__, mfc_freq); + if (!vid_c_sel_clk_rate(mfc_freq)) { + pr_err("%s(): vid_c_sel_clk_rate FAILED\n", __func__); + *pn_set_perf_lvl = 0; + return false; + } + } +#endif + + *pn_set_perf_lvl = + res_trk_convert_freq_to_perf_lvl((u64) mfc_freq); + return true; +} + +u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl) +{ + unsigned long freq; + + if (!pn_perf_lvl) { + pr_err("%s(): pn_perf_lvl is NULL\n", __func__); + return false; + } + pr_debug("clk_regime_msm_get_clk_freq_hz\n"); + if (!vid_c_get_clk_rate(&freq)) { + pr_err("%s(): vid_c_get_clk_rate FAILED\n", __func__); + *pn_perf_lvl = 0; + return false; + } + + *pn_perf_lvl = res_trk_convert_freq_to_perf_lvl((u64) freq); + pr_debug("%s(): freq = %lu, *pn_perf_lvl = %u\n", __func__, + freq, *pn_perf_lvl); + return true; +} diff --git a/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.h b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.h new file mode 100644 index 0000000000000..f937e79fcc35f --- /dev/null +++ b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VIDEO_720P_RESOURCE_TRACKER_H_ +#define _VIDEO_720P_RESOURCE_TRACKER_H_ + +#include "vcd_res_tracker_api.h" + +#define VCD_RESTRK_MIN_PERF_LEVEL 37900 +#define VCD_RESTRK_MAX_PERF_LEVEL 108000 +#define VCD_RESTRK_MIN_FREQ_POINT 61440000 +#define VCD_RESTRK_MAX_FREQ_POINT 170667000 +#define VCD_RESTRK_HZ_PER_1000_PERFLVL 1580250 + +#endif diff --git a/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker_api.h b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker_api.h new file mode 100644 index 0000000000000..b099d09330f15 --- /dev/null +++ b/drivers/misc/video_core/720p/resource_tracker/vcd_res_tracker_api.h @@ -0,0 +1,43 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VIDEO_720P_RESOURCE_TRACKER_API_H_ +#define _VIDEO_720P_RESOURCE_TRACKER_API_H_ + +#include "vcd_core.h" + +u32 res_trk_power_up(void); +u32 res_trk_power_down(void); +u32 res_trk_enable_clocks(void); +u32 res_trk_disable_clocks(void); +u32 res_trk_get_max_perf_level(u32 *pn_max_perf_lvl); +u32 res_trk_set_perf_level(u32 req_perf_lvl, u32 *pn_set_perf_lvl, + struct vcd_clnt_ctxt *cctxt); +u32 res_trk_get_curr_perf_level(u32 *pn_perf_lvl); + +#endif diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.c b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.c new file mode 100644 index 0000000000000..169f082f086ba --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.c @@ -0,0 +1,1247 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" + +#include "vid_frame_scheduler_api.h" +#include "vid_frame_scheduler.h" + +static const u32 SCHED_TKNBKT_SIZE_FACTOR = 5; +static const u32 SCHED_TKNBKT_FILL_NORMLZ_SCALE = 100; +static const u32 SCHED_TIME_MAX = 0xffffffff; + + +SCHED_INLINE u32 SCHED_SUCCEEDED(enum sched_status status) +{ + SCHED_MSG_LOW("SCHED_SUCCEEDED check: status = %d", status); + + if (status == SCHED_S_OK) + return true; + else + return false; + +} + +SCHED_INLINE u32 SCHED_FAILED(enum sched_status status) +{ + SCHED_MSG_LOW("SCHED_FAILED check: status = %d", status); + + if (status >= SCHED_S_EFAIL) + return true; + else + return false; + +} + +static void sched_clear_clnt_ctx(struct sched_clnt_ctx *ctx) +{ + if (ctx->clnt_frm_q) + SCHED_FREE(ctx->clnt_frm_q); + (void)SCHED_CRITSEC_RELEASE(ctx->clnt_cs); +} + +SCHED_INLINE void sched_free_clnt_node( + struct _sched_clnt_list_node *clnt_node) +{ + sched_clear_clnt_ctx(&clnt_node->data); + SCHED_FREE(clnt_node); + +} + +enum sched_status sched_clear_clnt_list( + struct _sched_clnt_list_node *clnt_lst) { + struct _sched_clnt_list_node *clnt_node; + + while (clnt_lst) { + (void)SCHED_CRITSEC_ENTER(clnt_lst->data.clnt_cs); + clnt_node = clnt_lst; + clnt_lst = clnt_lst->next; + sched_free_clnt_node(clnt_node); + } + return SCHED_S_OK; +} + +static SCHED_INLINE enum sched_status sched_alloc_frm_q( + struct sched_clnt_ctx *ctx) +{ + ctx->clnt_frm_q = (struct sched_clnt_q_elem *) + SCHED_MALLOC(sizeof(struct sched_clnt_q_elem) * + ctx->max_queue_len); + + if (!ctx->clnt_frm_q) { + SCHED_MSG_ERR("Could not allocate clnt frm Q. Out of memory"); + return SCHED_S_ENOMEM; + } + + SCHED_MEMSET(ctx->clnt_frm_q, + 0, sizeof(struct sched_clnt_q_elem) * ctx->max_queue_len); + ctx->q_head = 0; + ctx->q_tail = -1; + ctx->q_len = 0; + SCHED_MSG_MED("Clnt frm Q allocted & initialized"); + return SCHED_S_OK; + +} + +static SCHED_INLINE void sched_de_q_head_frm + (struct sched_clnt_ctx *ctx, + struct sched_clnt_q_elem *q_elem) { + *q_elem = ctx->clnt_frm_q[ctx->q_head]; + + memset(&ctx->clnt_frm_q[ctx->q_head], 0, + sizeof(struct sched_clnt_q_elem)); + + /*Update the circular queue head index.*/ + ctx->q_head = (ctx->q_head + 1) % ctx->max_queue_len; + ctx->q_len--; +} + +static SCHED_INLINE void sched_tkn_bkt_fill_normalize + (struct sched_clnt_ctx *ctx) +{ + ctx->bkt_curr_tkns_nmlzd = + (ctx->bkt_curr_tkns * SCHED_TKNBKT_FILL_NORMLZ_SCALE) / + ctx->tkn_per_frm; +} + +static void sched_tkn_bkt_config(struct sched_clnt_ctx *ctx) +{ + ctx->bkt_size = ctx->tkn_per_frm * SCHED_TKNBKT_SIZE_FACTOR; + ctx->bkt_quies_cap = ctx->bkt_size; + ctx->bkt_curr_tkns = + SCHED_MIN(ctx->bkt_curr_tkns, ctx->bkt_size); +} + +static void sched_tkn_bkt_supply( + struct sched_clnt_ctx *ctx, u32 curr_time) +{ + u32 delta; + u32 num_tkns; + + /*Check if there's time wrap-around since last token supply time.*/ + if (curr_time < ctx->bkt_lst_sup_time) { + SCHED_MSG_HIGH("Current time wrap around detected"); + delta = + SCHED_TIME_MAX - ctx->bkt_lst_sup_time + curr_time; + } else + delta = curr_time - ctx->bkt_lst_sup_time; + + /*Proceed only if there is any time elapsed since our last supply + time.*/ + if (delta > 0) { + /*Calculate the number of tokens that we can supply based on + time elapsed and the client's token supply rate.*/ + num_tkns = delta * ctx->curr_p_tkn_rate / 1000; + + if (num_tkns > 0) { + ctx->bkt_curr_tkns = SCHED_MIN(ctx->bkt_size, + ctx->bkt_curr_tkns + num_tkns); + + if ((delta * ctx->curr_p_tkn_rate % 1000)) { + delta = (num_tkns * 1000 + + (ctx->curr_p_tkn_rate >> 1)) + / ctx->curr_p_tkn_rate; + if ((SCHED_TIME_MAX - + ctx->bkt_lst_sup_time) < delta) { + SCHED_MSG_HIGH + ("Handling for current time wrap " + "around"); + + ctx->bkt_lst_sup_time = delta - + (SCHED_TIME_MAX - + ctx->bkt_lst_sup_time); + } else + ctx->bkt_lst_sup_time += delta; + } else + ctx->bkt_lst_sup_time = curr_time; + + if (ctx->bkt_curr_tkns > + (s32) ctx->bkt_quies_cap) { + SCHED_MSG_HIGH + ("Client Quiesence detected. Capping " + "bkt_curr_tkns"); + ctx->bkt_curr_tkns = ctx->tkn_per_frm; + } + sched_tkn_bkt_fill_normalize(ctx); + } + } +} + +static SCHED_INLINE void sched_tkn_bkt_consume( + struct sched_clnt_ctx *ctx) { + ctx->bkt_curr_tkns -= ctx->tkn_per_frm; +} + +static SCHED_INLINE u32 sched_clnt_frm_is_cnfmnt + (struct sched_clnt_ctx *ctx) +{ + if (ctx->bkt_curr_tkns >= (s32) ctx->tkn_per_frm) + return true; + else + return false; +} /* end of sched_clnt_frm_is_conformant */ + +static struct sched_clnt_ctx *sched_elect_cnfmnt + (struct sched_clnt_ctx *prov_elect, + struct sched_clnt_ctx *new_cand) { + + /*If there is no provisional elect client then the new candidate + becomes the first one.*/ + if (!prov_elect) + return new_cand; + + + /*Here we want to pick the client who has accumulated the most tokens + from the time of attaining single frame conformance. + Since we are comparing between clients we use the available normalized + token bucket occupancy value.*/ + if (prov_elect->bkt_curr_tkns_nmlzd >= + new_cand->bkt_curr_tkns_nmlzd) { + return prov_elect; + } else { + /*We had held on to this provisional elect conformant + client critical section. Since new candidate has won the + election leave critical section of earlier provisional + elect. + */ + (void)SCHED_CRITSEC_LEAVE(prov_elect->clnt_cs); + return new_cand; + } +} + +static struct sched_clnt_ctx *sched_elect_non_cnfmnt + (struct sched_clnt_ctx *prov_elect, + struct sched_clnt_ctx *new_cand) { + + /*If there is no provisional elect client then the new candidate + becomes the first one.*/ + if (!prov_elect) + return new_cand; + /*Here we want to pick the client who is closest to attaining a single + frame conformance. + Since we are comparing between clients we use the available + normalized token bucket occupancy value. + Also if the provisional elect or the new contender (in that order) + have an end of frame marker set we give it priority over deciding + by frame conformance method mentiond earlier.*/ + if (prov_elect->eof_marker > 0) { + return prov_elect; + } else if (new_cand->eof_marker > 0) { + /*We had held on to this provisional elect non conformant client + critical section. Since new candidate has won the election + leave critical section of earlier provisional elect. + */ + (void)SCHED_CRITSEC_LEAVE(prov_elect->clnt_cs); + + return new_cand; + } else if (prov_elect->bkt_curr_tkns_nmlzd >= + new_cand->bkt_curr_tkns_nmlzd) { + return prov_elect; + } else { + /*Similar to above case leave critical section of earlier + provisional elect.*/ + (void)SCHED_CRITSEC_LEAVE(prov_elect->clnt_cs); + return new_cand; + } + +} + +static struct sched_clnt_ctx *sched_elect_non_rt + (struct sched_ctx *sched_ctx) { + struct _sched_clnt_list_node *node = NULL; + struct _sched_clnt_list_node *start_node = NULL; + u32 found = false; + + /*For non real time clients we are using a round robin election + algorithm. + Based on the last scheduled client we find the next to schedule + and return its context. + We also need to skip the client if certain conditions (mentioned below) + are not met*/ + if (!sched_ctx->non_rt_last_sched) + start_node = node = sched_ctx->non_rt_head; + else { + if (!sched_ctx->non_rt_last_sched->next) + start_node = sched_ctx->non_rt_head; + else + start_node = sched_ctx->non_rt_last_sched->next; + + node = start_node; + } + + do { + + (void)SCHED_CRITSEC_ENTER(node->data.clnt_cs); + + /*Check if the client can be considered for this round of scheduling.*/ + if (sched_consider_clnt_for_sched(&node->data)) { + found = true; + sched_ctx->non_rt_last_sched = node; + } + + /*If this client is not the election winner then leave its critical + section. + If we have found a winner we want to hold on to its critical + section. We would leave its critical section after we are done + with dequeueing a frame from the client context.*/ + if (!found) + (void)SCHED_CRITSEC_LEAVE(node->data.clnt_cs); + + if (!node->next) + node = sched_ctx->non_rt_head; + else + node = node->next; + + } while (node != start_node); + + if (found) { + SCHED_MSG_LOW("Non real time client selected"); + + return &sched_ctx->non_rt_last_sched->data; + } else { + SCHED_MSG_MED + ("No non-real time client available for scheduling"); + + return NULL; + } + +} + +static enum sched_status sched_process_set_p_tkn_rate( + struct sched_ctx *sched_ctx, + struct sched_clnt_ctx *clnt_ctx, + union sched_value_type *param_value) { + u32 curr_time = 0; + + if (param_value->un_value == clnt_ctx->curr_p_tkn_rate) + return SCHED_S_OK; + + + if ((sched_ctx->total_clnt_bw - clnt_ctx->curr_p_tkn_rate + + param_value->un_value) > sched_ctx->perf_lvl) { + SCHED_MSG_HIGH + ("Perf level insufficient for requested P Tkn rate"); + + } + + /*Get current time. We need this for token supply. + If we didn't get a valid current time value just return*/ + if (SCHED_FAILED(SCHED_GET_CURRENT_TIME(&curr_time))) { + SCHED_MSG_ERR("Get current time failed"); + + return SCHED_S_EFAIL; + } + + /*Before we go ahead and update the Current tkn rate, we fill + the token bucket upto current time instance.*/ + sched_tkn_bkt_supply(clnt_ctx, curr_time); + + /*Next, update the current value of total client bandwidth with + the new tkn rate of the client.*/ + sched_ctx->total_clnt_bw = sched_ctx->total_clnt_bw - + clnt_ctx->curr_p_tkn_rate + param_value->un_value; + clnt_ctx->curr_p_tkn_rate = param_value->un_value; + + /*Since the current Ptkn rate (i.e. current alloted bandwidth) + of the client has changed we need to update client's token + bucket configuration*/ + sched_tkn_bkt_config(clnt_ctx); + return SCHED_S_OK; +} + +static enum sched_status sched_process_add_rt_clnt( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node) { + enum sched_status status; + struct sched_clnt_ctx *clnt_ctx = &clnt_node->data; + struct _sched_clnt_list_node *tmp_node; + + /*Validate real time client specific parameters.*/ + if (!clnt_ctx->curr_p_tkn_rate) + SCHED_MSG_HIGH("Allocated token rate is zero"); + + /*Check if our performance level setting can sustain the new client*/ + if (sched_ctx->total_clnt_bw + clnt_ctx->curr_p_tkn_rate > + sched_ctx->perf_lvl) { + SCHED_MSG_HIGH("Not enough bandwidth to support client"); + SCHED_MSG_HIGH + ("curr_perflvl=%d, curr_bw=%d, newclnt_ptknrate=%d", + sched_ctx->perf_lvl, sched_ctx->total_clnt_bw, + clnt_ctx->curr_p_tkn_rate); + + } + /*Allocate the client frame queue*/ + status = sched_alloc_frm_q(clnt_ctx); + + if (SCHED_SUCCEEDED(status)) { + /*Allocate the token bucket*/ + sched_tkn_bkt_config(clnt_ctx); + /*We start with empty token bucket*/ + clnt_ctx->bkt_curr_tkns = 0; + clnt_ctx->bkt_curr_tkns_nmlzd = 0; + /*Add the client to the real time client list and increase the + total client bandwidth.*/ + tmp_node = sched_ctx->rt_head; + sched_ctx->rt_head = clnt_node; + sched_ctx->rt_head->next = tmp_node; + sched_ctx->rt_clnts++; + sched_ctx->total_clnt_bw += clnt_ctx->curr_p_tkn_rate; + } + return status; +} + +static enum sched_status sched_process_add_non_rt_clnt( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node) { + enum sched_status status; + struct sched_clnt_ctx *clnt_ctx = &clnt_node->data; + struct _sched_clnt_list_node *tmp_node; + + /*Allocate the client frame queue*/ + status = sched_alloc_frm_q(clnt_ctx); + if (SCHED_SUCCEEDED(status)) { + /*Add the client to the real time client list and increase the + total client bandwidth.*/ + tmp_node = sched_ctx->non_rt_head; + sched_ctx->non_rt_head = clnt_node; + sched_ctx->non_rt_head->next = tmp_node; + sched_ctx->non_rt_clnts++; + } + return status; +} + +enum sched_status sched_process_add_clnt( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, + struct sched_client_init_param *init_param) { + enum sched_status status = SCHED_S_OK; + + SCHED_MEMSET(clnt_node, 0, sizeof(struct _sched_clnt_list_node)); + + /*Validate all initialization parameters*/ + if (!init_param->tkn_per_frm || + !init_param->frm_rate.numer || + !init_param->frm_rate.denom || + !init_param->max_queue_len || + !init_param->o_tkn_max || + !init_param->o_tkn_per_ip_frm || + init_param->o_tkn_init > init_param->o_tkn_max || + init_param->o_tkn_per_ip_frm > init_param->o_tkn_max) { + SCHED_MSG_ERR("Bad initialization parameters"); + return SCHED_S_EBADPARM; + } + + /*Store all initialization parameters*/ + clnt_node->data.client_ctgy = init_param->client_ctgy; + clnt_node->data.curr_p_tkn_rate = init_param->alloc_p_tkn_rate; + clnt_node->data.frm_rate = init_param->frm_rate; + clnt_node->data.max_queue_len = init_param->max_queue_len; + clnt_node->data.o_tkn_max = init_param->o_tkn_max; + clnt_node->data.o_tkn_per_ip_frm = init_param->o_tkn_per_ip_frm; + clnt_node->data.curr_o_tkns = init_param->o_tkn_init; + clnt_node->data.tkn_per_frm = init_param->tkn_per_frm; + clnt_node->data.client_data = init_param->client_data; + clnt_node->data.sched_state = true; + + SCHED_MSG_HIGH("Adding new client of category %d", + clnt_node->data.client_ctgy); + SCHED_MSG_MED("Allocated P token rate (per sec) = %d", + clnt_node->data.curr_p_tkn_rate); + SCHED_MSG_MED("Frame rate = %d / %d", + clnt_node->data.frm_rate.numer, + clnt_node->data.frm_rate.denom); + SCHED_MSG_MED("Max_queue_len = %d", clnt_node->data.max_queue_len); + SCHED_MSG_MED("Max O tokens = %d", clnt_node->data.o_tkn_max); + SCHED_MSG_MED("O tokens threshold = %d", + clnt_node->data.o_tkn_per_ip_frm); + SCHED_MSG_MED("P tokens per frame = %d", + clnt_node->data.tkn_per_frm); + SCHED_MSG_MED("Client data ptr = %p", clnt_node->data.client_data); + + if (SCHED_FAILED(SCHED_CRITSEC_CREATE(&clnt_node->data.clnt_cs))) + return SCHED_S_EFAIL; + + /*Configure the client context based on client category.*/ + switch (clnt_node->data.client_ctgy) { + case SCHED_CLNT_RT_BUFF: + case SCHED_CLNT_RT_NOBUFF: + { + status = + sched_process_add_rt_clnt(sched_ctx, clnt_node); + break; + } + + case SCHED_CLNT_NONRT: + { + status = + sched_process_add_non_rt_clnt(sched_ctx, + clnt_node); + break; + } + + default: + { + status = SCHED_S_EBADPARM; + break; + } + + } + return status; +} + +enum sched_status sched_process_remove_clnt( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node) { + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + + /*Handling if the client frame queue is not empty. Just return + and let Codec driver dequeue all frames for this client + before calling remove client*/ + if (clnt_node->data.q_len) { + SCHED_MSG_ERR("Cannot remove client. Queue is not empty"); + return SCHED_S_EINVALST; + } + + /*Based on client category, remove the client node from the + appropriate scheduler client list*/ + switch (clnt_node->data.client_ctgy) { + case SCHED_CLNT_RT_BUFF: + case SCHED_CLNT_RT_NOBUFF: + { + + sched_remove_node_from_list(&sched_ctx->rt_head, + clnt_node); + sched_ctx->rt_clnts--; + sched_ctx->total_clnt_bw -= + clnt_node->data.curr_p_tkn_rate; + break; + } + + case SCHED_CLNT_NONRT: + { + sched_remove_node_from_list(&sched_ctx->non_rt_head, + clnt_node); + sched_ctx->non_rt_clnts--; + break; + } + + default: + { + SCHED_ASSERT(0); + break; + } + } + + /*Now that client node is off the scheduler client list free up + resources that its been using.*/ + SCHED_MSG_HIGH("Removing new client of category %d", + clnt_node->data.client_ctgy); + SCHED_MSG_MED("Allocated P token rate (per sec) = %d", + clnt_node->data.curr_p_tkn_rate); + SCHED_MSG_MED("Frame rate = %d / %d", + clnt_node->data.frm_rate.numer, + clnt_node->data.frm_rate.denom); + SCHED_MSG_MED("Max_queue_len = %d", clnt_node->data.max_queue_len); + SCHED_MSG_MED("Max O tokens = %d", clnt_node->data.o_tkn_max); + SCHED_MSG_MED("P tokens per frame = %d", + clnt_node->data.tkn_per_frm); + SCHED_MSG_MED("Client data ptr = %p", clnt_node->data.client_data); + sched_free_clnt_node(clnt_node); + return SCHED_S_OK; +} + +enum sched_status sched_process_flush_clnt_buff( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, void **pp_frm_data) { + struct sched_clnt_ctx *clnt_ctx; + enum sched_status status = SCHED_S_OK; + struct sched_clnt_q_elem q_elem; + + clnt_ctx = &clnt_node->data; + + /*If the client queue is empty just return an QEMPTY status*/ + if (!clnt_ctx->q_len) { + status = SCHED_S_QEMPTY; + } else { + clnt_ctx->flushing = true; + + /*If the client queue is not empty just remove and return the + element at the front of the queue.*/ + sched_de_q_head_frm(clnt_ctx, &q_elem); + *pp_frm_data = q_elem.frm_data; + } + + /*If the Queue was orginially empty OR if it got empty after latest + De_queue we reset the flushing and First_frame flags. + Token bucket contents are also emptied.Queue pointers are reset. + o_tkns are restored.*/ + if (!clnt_ctx->q_len) { + clnt_ctx->flushing = false; + clnt_ctx->first_frm = false; + clnt_ctx->bkt_curr_tkns = 0; + clnt_ctx->bkt_curr_tkns_nmlzd = 0; + clnt_ctx->bkt_lst_sup_time = 0; + clnt_ctx->q_head = 0; + clnt_ctx->q_tail = -1; + SCHED_MSG_HIGH + ("Client flushed and re-initialized. Client category %d", + clnt_ctx->client_ctgy); + SCHED_MSG_MED("Client allocated P token rate (per sec) = %d", + clnt_ctx->curr_p_tkn_rate); + SCHED_MSG_MED("Client frame rate = %d / %d", + clnt_ctx->frm_rate.numer, + clnt_ctx->frm_rate.denom); + SCHED_MSG_MED("Client P tokens per frame = %d", + clnt_ctx->tkn_per_frm); + } + return status; +} + +SCHED_INLINE enum sched_status sched_process_mark_clnt_eof( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node) { + + if (!clnt_node->data.q_len) + return SCHED_S_QEMPTY; + + + if (!clnt_node->data.clnt_frm_q[clnt_node->data.q_tail].eof) { + /*Just increment the EOF marker count in the client context.*/ + clnt_node->data.eof_marker++; + clnt_node->data.clnt_frm_q[clnt_node->data.q_tail]. + eof = true; + } else + SCHED_MSG_HIGH("Current frame is already marked EOF"); + + SCHED_MSG_HIGH("Client marked for end of frames. Client category %d", + clnt_node->data.client_ctgy); + SCHED_MSG_MED("Client allocated P token rate (per sec) = %d", + clnt_node->data.curr_p_tkn_rate); + SCHED_MSG_MED("Client frame rate = %d / %d", + clnt_node->data.frm_rate.numer, + clnt_node->data.frm_rate.denom); + SCHED_MSG_MED("Client P tokens per frame = %d", + clnt_node->data.tkn_per_frm); + return SCHED_S_OK; +} + +enum sched_status sched_process_update_clnt_o_tkn( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, + u32 type, u32 o_tkn) { + + /*Act based on the type of update.*/ + + if (type) { + /*Just replenish the output tokens the client currently has with + the provided number while not going over the max value.*/ + clnt_node->data.curr_o_tkns = + SCHED_MIN(clnt_node->data.curr_o_tkns + o_tkn, + clnt_node->data.o_tkn_max); + } else { + /*Just subtract the give number of output tokens from the count + the client currently has while not going less than 0.*/ + if (o_tkn >= clnt_node->data.curr_o_tkns) + clnt_node->data.curr_o_tkns = 0; + else + clnt_node->data.curr_o_tkns -= o_tkn; + + } + + SCHED_MSG_LOW("%d O tokens restored for client", o_tkn); + SCHED_MSG_LOW("Client Curr_o_tkns = %d", + clnt_node->data.curr_o_tkns); + SCHED_MSG_LOW("Client category = %d", clnt_node->data.client_ctgy); + SCHED_MSG_LOW("Client allocated P token rate (per sec) = %d", + clnt_node->data.curr_p_tkn_rate); + SCHED_MSG_LOW("Client frame rate = %d / %d", + clnt_node->data.frm_rate.numer, + clnt_node->data.frm_rate.denom); + SCHED_MSG_LOW("Client P tokens per frame = %d", + clnt_node->data.tkn_per_frm); + return SCHED_S_OK; +} + +enum sched_status sched_process_en_q_frm( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, void *frm_data) { + struct sched_clnt_ctx *clnt_ctx; + u32 curr_time = 0; + + clnt_ctx = &clnt_node->data; + + /*Check if the client queue is full already*/ + if (clnt_ctx->q_len == clnt_ctx->max_queue_len) { + SCHED_MSG_HIGH("Cannot enqueue. Client queue is full"); + + return SCHED_S_QFULL; + } + + /*Check if the client queue is being flushed.*/ + if (clnt_ctx->flushing) { + SCHED_MSG_ERR("Cannot enqueue. Client queue is being flushed"); + + return SCHED_S_EINVALST; + } + + /*Reposition tail, increase Q length and add the frame data to Q*/ + clnt_ctx->q_tail = + (clnt_ctx->q_tail + 1) % clnt_ctx->max_queue_len; + + clnt_ctx->q_len++; + + clnt_ctx->clnt_frm_q[clnt_ctx->q_tail].frm_data = frm_data; + clnt_ctx->clnt_frm_q[clnt_ctx->q_tail].eof = false; + + /*If this is the first frame being queued for this client then, + get current time. We now start the token supply clock for the client. + Supply tokens required for a single frame processing while storing + the current time as the last supply time and marking that first + frame is received.*/ + if (!clnt_ctx->first_frm) { + SCHED_MSG_HIGH("Client first frame enqueued"); + if (clnt_ctx->client_ctgy != SCHED_CLNT_NONRT) { + if (SCHED_SUCCEEDED + (SCHED_GET_CURRENT_TIME(&curr_time))) { + clnt_ctx->bkt_curr_tkns = + clnt_ctx->tkn_per_frm; + clnt_ctx->bkt_lst_sup_time = curr_time; + clnt_ctx->first_frm = true; + } + } else + clnt_ctx->first_frm = true; + } + + SCHED_MSG_LOW("Client frame enqueued. Queue fill status = %d / %d", + clnt_ctx->q_len, clnt_ctx->max_queue_len); + SCHED_MSG_LOW("Client category = %d", clnt_ctx->client_ctgy); + SCHED_MSG_LOW("Client allocated P token rate (per sec) = %d", + clnt_ctx->curr_p_tkn_rate); + SCHED_MSG_LOW("Client frame rate = %d / %d", + clnt_ctx->frm_rate.numer, + clnt_ctx->frm_rate.denom); + SCHED_MSG_LOW("Client P tokens per frame = %d", + clnt_ctx->tkn_per_frm); + + return SCHED_S_OK; + +} + +enum sched_status sched_process_re_en_q_frm( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, + void *frm_data) +{ + struct sched_clnt_ctx *clnt_ctx; + u32 curr_time = 0; + + clnt_ctx = &clnt_node->data; + + if (clnt_ctx->q_len == clnt_ctx->max_queue_len) { + SCHED_MSG_ERR("Cannot re-enqueue. Client queue is full"); + return SCHED_S_QFULL; + } + + if (clnt_ctx->flushing) { + SCHED_MSG_ERR("Cannot re-enqueue. Client" + " queue is being flushed"); + return SCHED_S_EINVALST; + } + + clnt_ctx->q_head = + (clnt_ctx->q_head + clnt_ctx->max_queue_len - 1) % + clnt_ctx->max_queue_len; + + clnt_ctx->q_len++; + + clnt_ctx->clnt_frm_q[clnt_ctx->q_head].frm_data = + frm_data; + clnt_ctx->clnt_frm_q[clnt_ctx->q_head].eof = + false; + + if (clnt_ctx->client_ctgy != SCHED_CLNT_NONRT) { + if (!clnt_ctx->first_frm) { + SCHED_MSG_HIGH("Client frame " + "re-enqueued as first frame"); + if (SCHED_SUCCEEDED + (SCHED_GET_CURRENT_TIME(&curr_time))) { + clnt_ctx->bkt_curr_tkns = + clnt_ctx->tkn_per_frm; + clnt_ctx->bkt_lst_sup_time = + curr_time; + clnt_ctx->first_frm = + true; + } + } else + clnt_ctx->bkt_curr_tkns += + clnt_ctx->tkn_per_frm; + } else + clnt_ctx->first_frm = true; + + + SCHED_MSG_LOW("Client frame re-enqueued. Queue fill status = %d / %d", + clnt_ctx->q_len, clnt_ctx->max_queue_len); + SCHED_MSG_LOW("Client category = %d", clnt_ctx->client_ctgy); + SCHED_MSG_LOW("Client allocated P token rate (per sec) = %d", + clnt_ctx->curr_p_tkn_rate); + SCHED_MSG_LOW("Client frame rate = %d / %d", + clnt_ctx->frm_rate.numer, + clnt_ctx->frm_rate.denom); + SCHED_MSG_LOW("Client P tokens per frame = %d", + clnt_ctx->tkn_per_frm); + + return SCHED_S_OK; + +} + +enum sched_status sched_process_de_q_frm_rt_clnt( + struct sched_ctx *sched_ctx, + struct sched_clnt_ctx **pp_conf_elect_ctx, + struct sched_clnt_ctx **pp_non_conf_elect_ctx) { + u32 curr_time = 0; + struct _sched_clnt_list_node *clnt_node; + struct sched_clnt_ctx *clnt_ctx; + + *pp_conf_elect_ctx = NULL; + *pp_non_conf_elect_ctx = NULL; + + /*Get current time. We need this for token supply. + If we didn't get a valid current time value just return*/ + if (SCHED_FAILED(SCHED_GET_CURRENT_TIME(&curr_time))) { + SCHED_MSG_ERR("Get current time failed"); + + return SCHED_S_EFAIL; + } + + /*Run through the list of real time clients. + Consider only the clients that have queued atleast one frame since + being admitted into the scheduler. + Supply tokens equivalent to elapsed time since last supply. + Also in this same pass, check if each client has a conformant + frame or not.*/ + clnt_node = sched_ctx->rt_head; + while (clnt_node) { + clnt_ctx = &clnt_node->data; + + (void)SCHED_CRITSEC_ENTER(clnt_ctx->clnt_cs); + + if (sched_consider_clnt_for_sched(clnt_ctx)) { + sched_tkn_bkt_supply(clnt_ctx, curr_time); + if (sched_clnt_frm_is_cnfmnt(clnt_ctx)) { + *pp_conf_elect_ctx = + sched_elect_cnfmnt(*pp_conf_elect_ctx, + clnt_ctx); + } else { + if (!*pp_conf_elect_ctx) { + *pp_non_conf_elect_ctx = + sched_elect_non_cnfmnt + (*pp_non_conf_elect_ctx, + clnt_ctx); + } else if (*pp_non_conf_elect_ctx) { + (void) + SCHED_CRITSEC_LEAVE( + (*pp_non_conf_elect_ctx)->clnt_cs); + *pp_non_conf_elect_ctx = NULL; + + } + } + } + if (clnt_ctx != *pp_conf_elect_ctx + && clnt_ctx != *pp_non_conf_elect_ctx) + (void)SCHED_CRITSEC_LEAVE(clnt_ctx->clnt_cs); + clnt_node = clnt_node->next; + } + + return SCHED_S_OK; + +} + +enum sched_status sched_process_de_q_frm( + struct sched_ctx *sched_ctx, + void **pp_frm_data, void **pp_client_data) { + enum sched_status status; + struct sched_clnt_ctx *sched_clnt_ctx = NULL; + struct sched_clnt_ctx *conf_elect_ctx; + struct sched_clnt_ctx *non_conf_elect_ctx; + struct sched_clnt_q_elem q_elem; + + status = sched_process_de_q_frm_rt_clnt(sched_ctx, + &conf_elect_ctx, + &non_conf_elect_ctx); + if (SCHED_FAILED(status)) { + SCHED_MSG_ERR("sched_process_de_q_frm_rt_clnt ret err=%d", + status); + + return status; + } + + /*At this point we have looked at all real time clients in the + scheduler list and have run their elections. + We used the following frame service order to pick the client to + schedule: + a) client with conformant frame + b) client with non-conformant frame + c) non real-time client*/ + if (conf_elect_ctx) { + SCHED_MSG_LOW("Conformant frame client selected"); + sched_tkn_bkt_consume(conf_elect_ctx); + sched_clnt_ctx = conf_elect_ctx; + } else if (non_conf_elect_ctx) { + SCHED_MSG_LOW("Non-Conformant frame client selected"); + sched_tkn_bkt_consume(non_conf_elect_ctx); + sched_clnt_ctx = non_conf_elect_ctx; + } else if (sched_ctx->non_rt_clnts) + sched_clnt_ctx = sched_elect_non_rt(sched_ctx); + + /*If we have a client that we can schedule, then dequeue the frame + at the head of its queue.*/ + if (sched_clnt_ctx) { + *pp_client_data = sched_clnt_ctx->client_data; + + sched_de_q_head_frm(sched_clnt_ctx, &q_elem); + + *pp_frm_data = q_elem.frm_data; + + sched_clnt_ctx->curr_o_tkns -= + sched_clnt_ctx->o_tkn_per_ip_frm; + + /*If the dequeued frame was marked EOF we need to decrement the + eof_marker count.*/ + if (q_elem.eof) { + SCHED_MSG_MED + ("Last frame for EOF marked client dequeued"); + + sched_clnt_ctx->eof_marker--; + + status = SCHED_S_EOF; + } + + SCHED_MSG_LOW + ("Client frame Dequeued. Queue fill status = %d / %d", + sched_clnt_ctx->q_len, + sched_clnt_ctx->max_queue_len); + SCHED_MSG_LOW("Client category = %d", + sched_clnt_ctx->client_ctgy); + SCHED_MSG_LOW("Client allocated P token rate (per sec) = %d", + sched_clnt_ctx->curr_p_tkn_rate); + SCHED_MSG_LOW("Client frame rate = %d / %d", + sched_clnt_ctx->frm_rate.numer, + sched_clnt_ctx->frm_rate.denom); + SCHED_MSG_LOW("Client P tokens per frame = %d", + sched_clnt_ctx->tkn_per_frm); + + /*We had held on to the election winning client critical + section. Leave client critical section before we exit.*/ + (void)SCHED_CRITSEC_LEAVE(sched_clnt_ctx->clnt_cs); + } else { + status = SCHED_S_QEMPTY; + } + + return status; + +} + +enum sched_status sched_process_sched_lvl_get_param( + struct sched_ctx *sched_ctx, + enum sched_index param_index, + union sched_value_type *param_value) +{ + enum sched_status status = SCHED_S_OK; + + switch (param_index) { + case SCHED_I_PERFLEVEL: + { + param_value->un_value = sched_ctx->perf_lvl; + break; + } + + default: + { + status = SCHED_S_EBADPARM; + break; + } + } + return status; +} + +enum sched_status sched_process_sched_lvl_set_param( + struct sched_ctx *sched_ctx, + enum sched_index param_index, + union sched_value_type *param_value) +{ + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("Set_sched_param index = %u, value = %p", + param_index, (void *)param_value); + + switch (param_index) { + case SCHED_I_PERFLEVEL: + { + if (sched_ctx->total_clnt_bw > + param_value->un_value) { + SCHED_MSG_HIGH + ("Perf level being lowered than current " + "bandwidth"); + SCHED_MSG_HIGH + ("curr_perflvl=%d, new_perflvl=%d, " + "curr_bw=%d", + sched_ctx->perf_lvl, + param_value->un_value, + sched_ctx->total_clnt_bw); + } + + sched_ctx->perf_lvl = param_value->un_value; + + break; + } + + default: + { + status = SCHED_S_EBADPARM; + break; + } + } + return status; +} + +enum sched_status sched_process_clnt_lvl_get_param( + struct sched_ctx *sched_ctx, + struct sched_clnt_ctx *clnt_ctx, + enum sched_index param_index, + union sched_value_type *param_value) { + enum sched_status status = SCHED_S_OK; + + switch (param_index) { + case SCHED_I_CLNT_CURRQLEN: + { + param_value->un_value = clnt_ctx->q_len; + break; + } + + case SCHED_I_CLNT_PTKNRATE: + { + param_value->un_value = clnt_ctx->curr_p_tkn_rate; + break; + } + + case SCHED_I_CLNT_PTKNPERFRM: + { + param_value->un_value = clnt_ctx->tkn_per_frm; + break; + } + + case SCHED_I_CLNT_FRAMERATE: + { + param_value->frm_rate = clnt_ctx->frm_rate; + break; + } + + case SCHED_I_CLNT_OTKNMAX: + { + param_value->un_value = clnt_ctx->o_tkn_max; + break; + } + + case SCHED_I_CLNT_OTKNPERIPFRM: + { + param_value->un_value = + clnt_ctx->o_tkn_per_ip_frm; + break; + } + + case SCHED_I_CLNT_OTKNCURRENT: + { + param_value->un_value = clnt_ctx->curr_o_tkns; + break; + } + + default: + { + status = SCHED_S_EBADPARM; + break; + } + } + return status; +} + +enum sched_status sched_process_clnt_lvl_set_param( + struct sched_ctx *sched_ctx, + struct sched_clnt_ctx *clnt_ctx, + enum sched_index param_index, + union sched_value_type *param_value) +{ + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("Set_clnt_param index = %u, value = %p", + param_index, (void *)param_value); + + switch (param_index) { + case SCHED_I_CLNT_CURRQLEN: + case SCHED_I_CLNT_OTKNCURRENT: + { + status = SCHED_S_EINVALOP; + break; + } + + case SCHED_I_CLNT_PTKNRATE: + { + status = + sched_process_set_p_tkn_rate(sched_ctx, + clnt_ctx, + param_value); + break; + } + + case SCHED_I_CLNT_PTKNPERFRM: + { + + clnt_ctx->tkn_per_frm = param_value->un_value; + sched_tkn_bkt_config(clnt_ctx); + break; + } + + case SCHED_I_CLNT_FRAMERATE: + { + clnt_ctx->frm_rate = param_value->frm_rate; + break; + } + + case SCHED_I_CLNT_OTKNMAX: + { + if (param_value->un_value < + clnt_ctx->o_tkn_per_ip_frm) { + status = SCHED_S_EBADPARM; + } else { + clnt_ctx->o_tkn_max = + param_value->un_value; + + clnt_ctx->curr_o_tkns = + SCHED_MIN(clnt_ctx->curr_o_tkns, + clnt_ctx->o_tkn_max); + } + break; + } + + case SCHED_I_CLNT_OTKNPERIPFRM: + { + if (param_value->un_value > clnt_ctx->o_tkn_max) { + status = SCHED_S_EBADPARM; + } else { + clnt_ctx->o_tkn_per_ip_frm = + param_value->un_value; + } + break; + } + + default: + { + status = SCHED_S_EBADPARM; + break; + } + } + + return status; + +} + +enum sched_status sched_process_suspend_resume_clnt( + struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, u32 state) { + u32 curr_time; + struct sched_clnt_ctx *clnt_ctx = &clnt_node->data; + + SCHED_MSG_HIGH("Current client sched_state=%d. Requested state=%d", + clnt_ctx->sched_state, state); + + if (clnt_ctx->sched_state == state) + return SCHED_S_OK; + + + clnt_ctx->sched_state = state; + + if (!SCHED_SUCCEEDED(SCHED_GET_CURRENT_TIME(&curr_time))) { + SCHED_MSG_ERR("Get current time failed"); + + return SCHED_S_OK; + } + + /* RESUME */ + if (state) { + clnt_ctx->bkt_lst_sup_time = curr_time; + } else { /* SUSPEND */ + /*As we are suspending the client we fill the token bucket upto + current time instance.*/ + sched_tkn_bkt_supply(clnt_ctx, curr_time); + } + + SCHED_MSG_MED("Client category %d", clnt_ctx->client_ctgy); + SCHED_MSG_MED("Client allocated P token rate (per sec) = %d", + clnt_ctx->curr_p_tkn_rate); + SCHED_MSG_MED("Client frame rate = %d / %d", + clnt_ctx->frm_rate.numer, + clnt_ctx->frm_rate.denom); + SCHED_MSG_MED("Client P tokens per frame = %d", + clnt_ctx->tkn_per_frm); + + return SCHED_S_OK; + +} + +void sched_remove_node_from_list( + struct _sched_clnt_list_node **pp_head, + struct _sched_clnt_list_node *node) +{ + u32 found = false; + struct _sched_clnt_list_node *curr = *pp_head; + + if (!*pp_head || !node) { + SCHED_MSG_ERR("Bad params. head %p, node %p", *pp_head, + node); + return; + } + + if (node == *pp_head) { + *pp_head = node->next; + return; + } + + while (!found && curr) { + if (node == curr->next) { + curr->next = node->next; + found = true; + } + + curr = curr->next; + } + +} + +SCHED_INLINE u32 sched_consider_clnt_for_sched( + struct sched_clnt_ctx *clnt_ctx) +{ + if (clnt_ctx->first_frm && + clnt_ctx->sched_state && + !clnt_ctx->flushing && + clnt_ctx->q_len && + clnt_ctx->curr_o_tkns >= clnt_ctx->o_tkn_per_ip_frm) { + return true; + } else { + return false; + } +} diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.h b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.h new file mode 100644 index 0000000000000..e38e9af73be33 --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler.h @@ -0,0 +1,138 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VID_FRAME_SCHEDULER_H_ +#define _VID_FRAME_SCHEDULER_H_ +#include "vid_frame_scheduler_utils.h" + +struct sched_clnt_q_elem { + void *frm_data; + u32 eof; + +}; + +struct sched_clnt_ctx { + enum sched_client_ctgy client_ctgy; + struct sched_client_frm_rate frm_rate; + u32 tkn_per_frm; + u32 curr_p_tkn_rate; + u32 o_tkn_max; + u32 o_tkn_per_ip_frm; + u32 curr_o_tkns; + u32 bkt_size; + u32 bkt_quies_cap; + s32 bkt_curr_tkns; + s32 bkt_curr_tkns_nmlzd; + u32 bkt_lst_sup_time; + u32 max_queue_len; + struct sched_clnt_q_elem *clnt_frm_q; + s32 q_head; + s32 q_tail; + u32 q_len; + u32 first_frm; + u32 eof_marker; + u32 flushing; + u32 sched_state; + void *client_data; + u32 *clnt_cs; +}; + +struct _sched_clnt_list_node { + struct sched_clnt_ctx data; + struct _sched_clnt_list_node *next; + +}; + +struct _sched_clnt_list_node; + +struct sched_ctx { + u32 perf_lvl; + struct _sched_clnt_list_node *rt_head; + u32 rt_clnts; + struct _sched_clnt_list_node *non_rt_head; + u32 non_rt_clnts; + struct _sched_clnt_list_node *non_rt_last_sched; + u32 total_clnt_bw; + u32 *sched_cs; +}; + +SCHED_INLINE u32 SCHED_SUCCEEDED(enum sched_status status); +SCHED_INLINE u32 SCHED_FAILED(enum sched_status status); +SCHED_INLINE void sched_free_clnt_node + (struct _sched_clnt_list_node *clnt_node); +enum sched_status sched_clear_clnt_list + (struct _sched_clnt_list_node *clnt_lst); +enum sched_status sched_process_add_clnt + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, + struct sched_client_init_param *init_param); +enum sched_status sched_process_remove_clnt + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node); +enum sched_status sched_process_flush_clnt_buff + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, void **pp_frm_data); +SCHED_INLINE enum sched_status sched_process_mark_clnt_eof + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node); +enum sched_status sched_process_update_clnt_o_tkn + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, u32 type, u32 o_tkn); +enum sched_status sched_process_en_q_frm + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, void *frm_data); +enum sched_status sched_process_re_en_q_frm +(struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, void *frm_data); +enum sched_status sched_process_de_q_frm + (struct sched_ctx *sched_ctx, + void **pp_frm_data, void **pp_client_data); +enum sched_status sched_process_sched_lvl_get_param + (struct sched_ctx *sched_ctx, + enum sched_index param_index, union sched_value_type *param_value); +enum sched_status sched_process_sched_lvl_set_param + (struct sched_ctx *sched_ctx, + enum sched_index param_index, union sched_value_type *param_value); +enum sched_status sched_process_clnt_lvl_get_param + (struct sched_ctx *sched_ctx, + struct sched_clnt_ctx *clnt_ctx, + enum sched_index param_index, union sched_value_type *param_value); +enum sched_status sched_process_clnt_lvl_set_param + (struct sched_ctx *sched_ctx, + struct sched_clnt_ctx *clnt_ctx, + enum sched_index param_index, union sched_value_type *param_value); +enum sched_status sched_process_suspend_resume_clnt + (struct sched_ctx *sched_ctx, + struct _sched_clnt_list_node *clnt_node, u32 state); +void sched_remove_node_from_list + (struct _sched_clnt_list_node **pp_head, + struct _sched_clnt_list_node *node); +SCHED_INLINE u32 sched_consider_clnt_for_sched + (struct sched_clnt_ctx *clnt_ctx); + +#endif diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.c b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.c new file mode 100644 index 0000000000000..7c55784ebaed8 --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.c @@ -0,0 +1,426 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" + +#include "vid_frame_scheduler_api.h" +#include "vid_frame_scheduler.h" + +enum sched_status sched_create( + struct sched_init_param *init_param, void **handle) +{ + struct sched_ctx *sched_ctx; + + SCHED_MSG_HIGH("sched_create API"); + + if (!handle || !init_param) { + SCHED_MSG_ERR + ("Bad input parameters: handle=%p, init_param=%p", + handle, init_param); + return SCHED_S_EBADPARM; + } + + if (!init_param->perf_lvl) { + SCHED_MSG_ERR("Invalid Perf level=%u", + init_param->perf_lvl); + return SCHED_S_EBADPARM; + } + + sched_ctx = + (struct sched_ctx *) + SCHED_MALLOC(sizeof(struct sched_ctx)); + + if (!sched_ctx) { + SCHED_MSG_ERR("Could not allocate sched ctx. Out of memory"); + return SCHED_S_ENOMEM; + } + + SCHED_MEMSET(sched_ctx, 0, sizeof(struct sched_ctx)); + sched_ctx->perf_lvl = init_param->perf_lvl; + + if (SCHED_FAILED(SCHED_CRITSEC_CREATE(&sched_ctx->sched_cs))) { + SCHED_FREE(sched_ctx); + return SCHED_S_EFAIL; + } + + *handle = sched_ctx; + + SCHED_MSG_MED("Sched instance created. All went well"); + + return SCHED_S_OK; + +} + +enum sched_status sched_destroy(void *handle) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + + SCHED_MSG_HIGH("sched_destroy API"); + + if (!sched_ctx) { + SCHED_MSG_ERR("Bad input parameters"); + return SCHED_S_EBADPARM; + } + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + (void)sched_clear_clnt_list(sched_ctx->rt_head); + (void)sched_clear_clnt_list(sched_ctx->rt_head); + SCHED_MSG_MED("Sched clnt lists are cleared & released"); + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + (void)SCHED_CRITSEC_RELEASE(sched_ctx->sched_cs); + SCHED_MEMSET(sched_ctx, 0, sizeof(struct sched_ctx)); + SCHED_FREE(sched_ctx); + SCHED_MSG_MED("Sched instance deleted"); + return SCHED_S_OK; +} + +enum sched_status sched_get_param( + void *handle, + enum sched_index param_index, + union sched_value_type *param_value) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + enum sched_status status; + + SCHED_MSG_HIGH("sched_get_param API"); + + if (!sched_ctx || !param_value) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, param_value=%p", + sched_ctx, param_value); + + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + + status = + sched_process_sched_lvl_get_param(sched_ctx, param_index, + param_value); + + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + return status; +} + +enum sched_status sched_set_param( + void *handle, + enum sched_index param_index, + union sched_value_type *param_value) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + enum sched_status status; + + SCHED_MSG_HIGH("sched_set_param API"); + + if (!sched_ctx || !param_value) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, param_value=%p", + sched_ctx, param_value); + return SCHED_S_EBADPARM; + } + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + status = + sched_process_sched_lvl_set_param(sched_ctx, param_index, + param_value); + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + return status; +} + +enum sched_status sched_add_client( + void *handle, + struct sched_client_init_param *init_param, + void **client_hdl) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + enum sched_status status = SCHED_S_OK; + struct _sched_clnt_list_node *new_clnt; + + SCHED_MSG_HIGH("sched_add_client API"); + + if (!sched_ctx || !init_param || + !client_hdl) { + SCHED_MSG_ERR("Bad input parameters"); + + return SCHED_S_EBADPARM; + } + + new_clnt = (struct _sched_clnt_list_node *) + SCHED_MALLOC(sizeof(struct _sched_clnt_list_node)); + if (!new_clnt) { + SCHED_MSG_ERR("Could not allocate client ctx. Out of memory"); + return SCHED_S_ENOMEM; + } + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + status = sched_process_add_clnt(sched_ctx, new_clnt, init_param); + + if (SCHED_FAILED(status)) { + SCHED_MSG_ERR("Add_client failed with err=%d", status); + sched_free_clnt_node(new_clnt); + new_clnt = NULL; + } + + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + *client_hdl = new_clnt; + SCHED_MSG_MED("Sched client instance created. All went well"); + return status; +} + +enum sched_status sched_remove_client(void *handle, void *client_hdl) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_remove_client API"); + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + status = sched_process_remove_clnt(sched_ctx, clnt_node); + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + return status; +} + +enum sched_status sched_flush_client_buffer( + void *handle, void *client_hdl, void **pp_frm_data) +{ + + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_flush_client_buffer API"); + if (!sched_ctx || !clnt_node || !pp_frm_data) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = + sched_process_flush_clnt_buff(sched_ctx, clnt_node, + pp_frm_data); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_mark_client_eof(void *handle, void *client_hdl) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_mark_client_eof API"); + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = sched_process_mark_clnt_eof(sched_ctx, clnt_node); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_update_client_o_tkn( + void *handle, void *client_hdl, u32 type, u32 o_tkn) +{ + + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_restore_client_o_tkn API"); + + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = + sched_process_update_clnt_o_tkn(sched_ctx, clnt_node, type, + o_tkn); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_queue_frame( + void *handle, void *client_hdl, void *frm_data) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_queue_frame API"); + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = sched_process_en_q_frm(sched_ctx, clnt_node, frm_data); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_re_queue_frame( +void *handle, void *client_hdl, void *frm_data) +{ + struct sched_ctx* sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("\n sched_re_queue_frame API"); + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR("Bad input parameters:" + "sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = sched_process_re_en_q_frm(sched_ctx, clnt_node, + frm_data); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_de_queue_frame( + void *handle, void **pp_frm_data, void **pp_client_data) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + enum sched_status status = SCHED_S_OK; + + SCHED_MSG_HIGH("sched_de_queue_frame API"); + + if (!sched_ctx || !pp_frm_data + || !pp_client_data) { + SCHED_MSG_ERR("Bad input parameters: sched_ctx=%p, " + "pp_frm_data=%p, pp_client_data=%p", + sched_ctx, pp_frm_data, + pp_client_data); + return SCHED_S_EBADPARM; + } + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + status = + sched_process_de_q_frm(sched_ctx, pp_frm_data, pp_client_data); + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + return status; +} + +enum sched_status sched_get_client_param( + void *handle, void *client_hdl, + enum sched_index param_index, + union sched_value_type *param_value) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status; + + SCHED_MSG_HIGH("sched_get_client_param API"); + + if (!sched_ctx || !clnt_node || + !param_value) { + SCHED_MSG_ERR("Bad input parameters: sched_ctx=%p, " + "clnt_node=%p, param_value=%p", + sched_ctx, clnt_node, + param_value); + + return SCHED_S_EBADPARM; + } + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = sched_process_clnt_lvl_get_param(sched_ctx, + &clnt_node->data, + param_index, param_value); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} + +enum sched_status sched_set_client_param( + void *handle, void *client_hdl, + enum sched_index param_index, + union sched_value_type *param_value) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status; + + SCHED_MSG_HIGH("sched_set_client_param API"); + + if (!sched_ctx || !clnt_node || + !param_value) { + SCHED_MSG_ERR("Bad input parameters: " + "sched_ctx=%p, clnt_node=%p, " + "param_value=%p", sched_ctx, clnt_node, + param_value); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(sched_ctx->sched_cs); + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + + status = sched_process_clnt_lvl_set_param(sched_ctx, + &clnt_node->data, param_index, param_value); + + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + (void)SCHED_CRITSEC_LEAVE(sched_ctx->sched_cs); + return status; +} + +enum sched_status sched_suspend_resume_client( + void *handle, void *client_hdl, u32 state) +{ + struct sched_ctx *sched_ctx = (struct sched_ctx *)handle; + struct _sched_clnt_list_node *clnt_node = + (struct _sched_clnt_list_node *)client_hdl; + enum sched_status status; + + SCHED_MSG_HIGH("sched_client_suspend_resume API"); + + if (!sched_ctx || !clnt_node) { + SCHED_MSG_ERR + ("Bad input parameters: sched_ctx=%p, clnt_node=%p", + sched_ctx, clnt_node); + return SCHED_S_EBADPARM; + } + + (void)SCHED_CRITSEC_ENTER(clnt_node->data.clnt_cs); + status = + sched_process_suspend_resume_clnt(sched_ctx, clnt_node, + state); + (void)SCHED_CRITSEC_LEAVE(clnt_node->data.clnt_cs); + return status; +} diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.h b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.h new file mode 100644 index 0000000000000..29e0692b72e75 --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_api.h @@ -0,0 +1,150 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _SCHEDULER_API_H_ +#define _SCHEDULER_API_H_ + +enum sched_status { + SCHED_S_OK = 0x0, + SCHED_S_NOPTKN, + SCHED_S_NOOTKN, + SCHED_S_SLEEP, + SCHED_S_QEMPTY, + SCHED_S_QFULL, + SCHED_S_EOF, + SCHED_S_EFAIL = 0x64, + SCHED_S_ENOMEM, + SCHED_S_EBADPARM, + SCHED_S_EINVALOP, + SCHED_S_ENOTIMPL, + SCHED_S_ENORES, + SCHED_S_EINVALST, + SCHED_S_MAX = 0x7fffffff +}; + +enum sched_index { + SCHED_I_START_UNUSED = 0x0, + SCHED_I_PERFLEVEL, + SCHED_I_CLNT_START_UNUSED = 0x63, + SCHED_I_CLNT_CURRQLEN, + SCHED_I_CLNT_PTKNRATE, + SCHED_I_CLNT_PTKNPERFRM, + SCHED_I_CLNT_FRAMERATE, + SCHED_I_CLNT_OTKNMAX, + SCHED_I_CLNT_OTKNPERIPFRM, + SCHED_I_CLNT_OTKNCURRENT, + SCHED_I_MAX = 0x7fffffff +}; + +struct sched_client_frm_rate { + u32 numer; + u32 denom; + +}; + +union sched_value_type { + u32 un_value; + struct sched_client_frm_rate frm_rate; + +}; + +struct sched_init_param { + u32 perf_lvl; + +}; + +enum sched_client_ctgy { + SCHED_CLNT_RT_BUFF = 0, + SCHED_CLNT_RT_NOBUFF, + SCHED_CLNT_NONRT, + SCHED_CLNT_MAX = 0x7fffffff +}; + +struct sched_client_init_param { + enum sched_client_ctgy client_ctgy; + u32 max_queue_len; + struct sched_client_frm_rate frm_rate; + u32 tkn_per_frm; + u32 alloc_p_tkn_rate; + u32 o_tkn_max; + u32 o_tkn_per_ip_frm; + u32 o_tkn_init; + + void *client_data; + +}; + +enum sched_status sched_create + (struct sched_init_param *init_param, void **handle); + +enum sched_status sched_destroy(void *handle); + +enum sched_status sched_get_param + (void *handle, + enum sched_index param_index, union sched_value_type *param_value); + +enum sched_status sched_set_param + (void *handle, + enum sched_index param_index, union sched_value_type *param_value); + +enum sched_status sched_add_client + (void *handle, + struct sched_client_init_param *init_param, void **client_hdl); + +enum sched_status sched_remove_client(void *handle, void *client_hdl); + +enum sched_status sched_flush_client_buffer + (void *handle, void *client_hdl, void **pp_frm_data); + +enum sched_status sched_mark_client_eof(void *handle, void *client_hdl); + +enum sched_status sched_update_client_o_tkn + (void *handle, void *client_hdl, u32 type, u32 o_tkn); + +enum sched_status sched_queue_frame + (void *handle, void *client_hdl, void *frm_data); +enum sched_status sched_re_queue_frame +(void *handle, void *client_hdl, void *frm_data); + +enum sched_status sched_de_queue_frame + (void *handle, void **pp_frm_data, void **pp_client_data); + +enum sched_status sched_get_client_param + (void *handle, + void *client_hdl, + enum sched_index param_index, union sched_value_type *param_value); + +enum sched_status sched_set_client_param + (void *handle, + void *client_hdl, + enum sched_index param_index, union sched_value_type *param_value); + +enum sched_status sched_suspend_resume_client + (void *handle, void *client_hdl, u32 state); + +#endif diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.c b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.c new file mode 100644 index 0000000000000..f6a7b83b5b56a --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.c @@ -0,0 +1,154 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vid_frame_scheduler_utils.h" + +/** + * SCHED_ASSERT () - This function is a wrapper to underlying ASSERT + * @val: value to be checked for + * function. + * DEPENDENCIES: None + * Returns none + */ +SCHED_INLINE void SCHED_ASSERT(int val) +{ + +} /* end of SCHED_ASSERT */ + +/** + * SCHED_MIN () - This function will find minimum of two values + * @x: value 1 + * @y: value 2 + * DEPENDENCIES: None + * Returns none + */ +SCHED_INLINE int SCHED_MIN(int x, int y) +{ + if (x < y) + return x; + else + return y; + +} /* end of SCHED_MIN */ + +/** + * SCHED_MALLOC () - This function is a wrapper to underlying malloc + * @size: memory size to be allocated + * function + * DEPENDENCIES: None + * Returns none + */ +SCHED_INLINE void *SCHED_MALLOC(int size) +{ + return kmalloc(size, GFP_KERNEL); +} /* end of SCHED_MALLOC */ + +/** + * SCHED_FREE () - This function is a wrapper to underlying memory free + * @ptr: memory to be freed + * function + * DEPENDENCIES: None + * Returns none + */ +SCHED_INLINE void SCHED_FREE(void *ptr) +{ + kfree(ptr); +} /* end of SCHED_FREE */ + +/** + * SCHED_MEMSET () - This function is a wrapper to underlying memory set + * @ptr: ptr to memory + * @val: value to be set + * @size: memory size to be set + * function + * DEPENDENCIES: None + * Returns none + */ +SCHED_INLINE void *SCHED_MEMSET(void *ptr, int val, int size) +{ + return memset(ptr, val, size); +} /* end of SCHED_MEMSET */ + +/** + * SCHED_GET_CURRENT_TIME () - This function is a wrapper to underlying get time + * @pn_time: ptr time value in milliseconds + * function + * DEPENDENCIES: None + * Returns SCHED_S_OK on success + */ +SCHED_INLINE enum sched_status SCHED_GET_CURRENT_TIME(u32 *pn_time) +{ + struct timeval tv; + do_gettimeofday(&tv); + *pn_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); + return SCHED_S_OK; + +} /* end of SCHED_GET_CURRENT_TIME */ + +/** + * SCHED_CRITSEC_CREATE () - This function is a wrapper to creating a critical + * @cs: ptr to a critical section type + * section + * DEPENDENCIES: None + * Returns SCHED_S_OK on success + */ +SCHED_INLINE enum sched_status SCHED_CRITSEC_CREATE(u32 **cs) +{ + return SCHED_S_OK; + +} /* end of SCHED_CRITSEC_CREATE */ + +/** + * SCHED_CRITSEC_RELEASE () - This function is a wrapper to releasing a critical + * @cs: critical section handle type + * section resource + * DEPENDENCIES: None + * Returns SCHED_S_OK on success + */ +SCHED_INLINE enum sched_status SCHED_CRITSEC_RELEASE(u32 *cs) +{ + return SCHED_S_OK; + +} /* end of SCHED_CRITSEC_RELEASE */ + +/** + * SCHED_CRITSEC_ENTER () - This function is a wrapper to enter a critical + * @cs: critical section handle type + * section + * DEPENDENCIES: None + * Returns SCHED_S_OK on success + */ +SCHED_INLINE enum sched_status SCHED_CRITSEC_ENTER(u32 *cs) +{ + return SCHED_S_OK; + +} /* end of SCHED_CRITSEC_ENTER */ + +/** + * SCHED_CRITSEC_LEAVE () - This function is a wrapper to leave a critical + * @cs: critical section handle type + * section + * DEPENDENCIES: None + * Returns SCHED_S_OK on success + */ +SCHED_INLINE enum sched_status SCHED_CRITSEC_LEAVE(u32 *cs) +{ + return SCHED_S_OK; + +} /* end of SCHED_CRITSEC_LEAVE */ diff --git a/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.h b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.h new file mode 100644 index 0000000000000..f5e1d7dede4d8 --- /dev/null +++ b/drivers/misc/video_core/720p/scheduler/vid_frame_scheduler_utils.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _SCHEDULER_UTILS_H_ +#define _SCHEDULER_UTILS_H_ + +#include "vid_frame_scheduler_api.h" + +//TODO lots of low hanging fruit in here +#define SCHED_INLINE + +#if DEBUG + +#define SCHED_MSG_LOW(xx_fmt, ...) printk(KERN_INFO "\n " \ + xx_fmt, ## __VA_ARGS__) +#define SCHED_MSG_MED(xx_fmt, ...) printk(KERN_INFO "\n" \ + xx_fmt, ## __VA_ARGS__) +#define SCHED_MSG_HIGH(xx_fmt, ...) printk(KERN_WARNING "\n" \ + xx_fmt, ## __VA_ARGS__) + +#else + +#define SCHED_MSG_LOW(xx_fmt...) +#define SCHED_MSG_MED(xx_fmt...) +#define SCHED_MSG_HIGH(xx_fmt...) + +#endif + +#define SCHED_MSG_ERR(xx_fmt, ...) printk(KERN_ERR "\n err: " \ + xx_fmt, ## __VA_ARGS__) +#define SCHED_MSG_FATAL(xx_fmt, ...) printk(KERN_ERR "\n " \ + xx_fmt, ## __VA_ARGS__) + +SCHED_INLINE void SCHED_ASSERT(int val); + +SCHED_INLINE int SCHED_MIN(int x, int y); + +SCHED_INLINE enum sched_status SCHED_CRITSEC_CREATE(u32 **cs); + +SCHED_INLINE enum sched_status SCHED_CRITSEC_RELEASE(u32 *cs); + +SCHED_INLINE enum sched_status SCHED_CRITSEC_ENTER(u32 *cs); + +SCHED_INLINE enum sched_status SCHED_CRITSEC_LEAVE(u32 *cs); + +SCHED_INLINE void *SCHED_MALLOC(int size); + +SCHED_INLINE void SCHED_FREE(void *ptr); + +SCHED_INLINE void *SCHED_MEMSET(void *ptr, int val, int size); + +SCHED_INLINE enum sched_status SCHED_GET_CURRENT_TIME(u32 *pn_time); + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd.h b/drivers/misc/video_core/720p/vcd/vcd.h new file mode 100644 index 0000000000000..1367e7d4a73f3 --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd.h @@ -0,0 +1,320 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_H_ +#define _VCD_H_ + +#include "vcd_api.h" +#include "vid_frame_scheduler_api.h" +#include "vcd_ddl_api.h" +#include "vcd_res_tracker_api.h" +#include "vcd_util.h" +#include "vcd_client_sm.h" +#include "vcd_core.h" +#include "vcd_device_sm.h" + +void vcd_reset_device_channels(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_get_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc); + +u32 vcd_get_command_channel_in_loop(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc); + +void vcd_mark_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +void vcd_release_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +void vcd_release_multiple_command_channels(struct vcd_dev_ctxt *dev_ctxt, + u32 channels); + +void vcd_release_interim_command_channels(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_get_frame_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc); + +u32 vcd_get_frame_channel_in_loop(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc); + +void vcd_mark_frame_channel(struct vcd_dev_ctxt *dev_ctxt); + +void vcd_release_frame_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +void vcd_release_multiple_frame_channels(struct vcd_dev_ctxt *dev_ctxt, + u32 channels); + +void vcd_release_interim_frame_channels(struct vcd_dev_ctxt *dev_ctxt); +u32 vcd_core_is_busy(struct vcd_dev_ctxt *dev_ctxt); + +void vcd_device_timer_start(struct vcd_dev_ctxt *dev_ctxt); +void vcd_device_timer_stop(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_init_device_context(struct vcd_drv_ctxt *drv_ctxt, u32 ev_code); + +u32 vcd_deinit_device_context(struct vcd_drv_ctxt *drv_ctxt, u32 ev_code); + +u32 vcd_init_client_context(struct vcd_clnt_ctxt *cctxt); + +void vcd_destroy_client_context(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_check_for_client_context(struct vcd_dev_ctxt *dev_ctxt, s32 driver_id); + +u32 vcd_validate_driver_handle(struct vcd_dev_ctxt *dev_ctxt, + s32 driver_handle); + +void vcd_handle_for_last_clnt_close(struct vcd_dev_ctxt *dev_ctxt, + u32 send_deinit); + +u32 vcd_common_allocate_set_buffer(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type buffer, size_t sz, + struct vcd_buffer_pool **pp_buf_pool); + +u32 vcd_set_buffer_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, void *buf, size_t sz); + +u32 vcd_allocate_buffer_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, size_t buf_size, void **virt_addr, + phys_addr_t *phys_addr); + +u32 vcd_free_one_buffer_internal(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, u8 *buffer); + +u32 vcd_free_buffers_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool); + +u32 vcd_alloc_buffer_pool_entries(struct vcd_buffer_pool *buf_pool, + struct vcd_buffer_requirement *buf_req); + +void vcd_free_buffer_pool_entries(struct vcd_buffer_pool *buf_pool); + +void vcd_flush_in_use_buffer_pool_entries(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, u32 event); + +void vcd_reset_buffer_pool_for_reuse(struct vcd_buffer_pool *buf_pool); + +struct vcd_buffer_entry *vcd_get_free_buffer_pool_entry( + struct vcd_buffer_pool *pool); + +struct vcd_buffer_entry *vcd_find_buffer_pool_entry(struct vcd_buffer_pool + *pool, void *virt_addr); + +struct vcd_buffer_entry *vcd_buffer_pool_entry_de_q( + struct vcd_buffer_pool *pool); + +u32 vcd_buffer_pool_entry_en_q(struct vcd_buffer_pool *pool, + struct vcd_buffer_entry *entry); + +u32 vcd_client_cmd_en_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type command); + +void vcd_client_cmd_flush_and_en_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type command); + +u32 vcd_client_cmd_de_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type *command); + +u32 vcd_handle_recvd_eos(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame, u32 * pb_eos_handled); + +u32 vcd_handle_first_decode_frame(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_add_client_to_sched(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_handle_input_frame(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame); + +u32 vcd_store_seq_hdr(struct vcd_clnt_ctxt *cctxt, + struct vcd_sequence_hdr *seq_hdr); + +u32 vcd_set_frame_size(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_size *frm_size); + +u32 vcd_set_frame_rate(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_rate *fps); + +u32 vcd_calculate_frame_delta(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *frame); + +struct vcd_buffer_entry *vcd_check_fill_output_buffer( + struct vcd_clnt_ctxt *cctxt, struct vcd_frame_data *buffer); + +u32 vcd_requeue_input_frame(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *cctxt, struct vcd_buffer_entry *buf_entry); + +u32 vcd_schedule_frame(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt **pp_cctxt, + struct vcd_buffer_entry **pp_ip_buf_entry); + +u32 vcd_map_sched_status(enum sched_status sched_status); + +u32 vcd_submit_command_in_continue(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +u32 vcd_submit_cmd_sess_start(struct vcd_transc *transc); + +u32 vcd_submit_cmd_sess_end(struct vcd_transc *transc); + +void vcd_submit_cmd_client_close(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_submit_frame(struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc); + +u32 vcd_try_submit_frame_in_continue(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +u32 vcd_process_cmd_sess_start(struct vcd_clnt_ctxt *cctxt); + +void vcd_try_submit_frame(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_setup_with_ddl_capabilities(struct vcd_dev_ctxt *dev_ctxt); +void vcd_handle_submit_frame_failed(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc); + +struct vcd_transc *vcd_get_free_trans_tbl_entry(struct vcd_dev_ctxt *dev_ctxt); + +void vcd_release_trans_tbl_entry(struct vcd_transc *trans_entry); + +void vcd_release_all_clnt_frm_transc(struct vcd_clnt_ctxt *cctxt); +void vcd_release_all_clnt_def_frm_transc(struct vcd_clnt_ctxt *cctxt); +void vcd_release_all_clnt_transc(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_handle_input_done(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 event, u32 status); + +void vcd_handle_input_done_in_eos(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status); + +void vcd_handle_input_done_failed(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc); + +void vcd_handle_input_done_for_interlacing(struct vcd_clnt_ctxt *cctxt); + +void vcd_handle_input_done_with_trans_end(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_handle_frame_done(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 event, u32 status); + +void vcd_handle_frame_done_for_interlacing(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc_ip1, struct ddl_frame_data_tag *op_frm, + u32 status); + +u32 vcd_handle_first_frame_done(struct vcd_clnt_ctxt *cctxt, void *payload); + +void vcd_handle_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status); + +u32 vcd_handle_first_encode_frame_done(struct vcd_clnt_ctxt *cctxt, + void *payload); + +u32 vcd_handle_output_required(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status); + +u32 vcd_handle_output_required_in_flushing(struct vcd_clnt_ctxt *cctxt, + void *payload); + +u32 vcd_handle_output_req_tran_end_in_eos(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_validate_io_done_pyld(void *payload, u32 status); + +void vcd_handle_eos_trans_end(struct vcd_clnt_ctxt *cctxt); + +void vcd_handle_eos_done(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status); + +void vcd_send_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame, u32 valid_opbuf); + +void vcd_send_frame_done_in_eos_for_dec(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame); + +void vcd_send_frame_done_in_eos_for_enc(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame); + +void vcd_handle_start_done(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status); + +void vcd_handle_stop_done(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status); + +void vcd_handle_stop_done_in_starting(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status); + +void vcd_handle_stop_done_in_invalid(struct vcd_clnt_ctxt *cctxt, u32 status); + +void vcd_send_flush_done(struct vcd_clnt_ctxt *cctxt, u32 status); + +void vcd_process_pending_flush_in_eos(struct vcd_clnt_ctxt *cctxt); + +void vcd_process_pending_stop_in_eos(struct vcd_clnt_ctxt *cctxt); + +void vcd_handle_trans_pending(struct vcd_clnt_ctxt *cctxt); + +void vcd_flush_output_buffers(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_flush_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode); +void vcd_flush_buffers_in_err_fatal(struct vcd_clnt_ctxt *cctxt); + +u32 vcd_power_event(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, + u32 event); + +u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event, + struct vcd_clnt_ctxt *cctxt); + +u32 vcd_client_power_event(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *cctxt, u32 event); + +u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *cctxt); + +u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl, + struct vcd_clnt_ctxt *cctxt); + +u32 vcd_update_clnt_perf_lvl(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_rate *fps, u32 frm_p_units); + +u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt); + +u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt); + +void vcd_handle_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event, u32 status); + +void vcd_handle_device_err_fatal(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *cctxt); + +void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event); + +void vcd_handle_err_in_starting(struct vcd_clnt_ctxt *cctxt, u32 status); + +void vcd_handle_ind_hw_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status); + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_api.c b/drivers/misc/video_core/720p/vcd/vcd_api.c new file mode 100644 index 0000000000000..202f61d88a77c --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_api.c @@ -0,0 +1,882 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd.h" + +u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_MED("vcd_init:"); + + if (!config || !driver_handle || !config->pf_map_dev_base_addr) { + VCD_MSG_ERROR("Bad parameters"); + + return VCD_ERR_ILLEGAL_PARM; + } + + drv_ctxt = vcd_get_drv_context(); + + if (!drv_ctxt->dev_mutex) { + drv_ctxt->dev_mutex = kmalloc(sizeof(struct mutex), GFP_KERNEL); + if (!drv_ctxt->dev_mutex) { + VCD_MSG_ERROR("Failed: vcd_critical_section_create"); + return VCD_ERR_ALLOC_FAIL; + } + mutex_init(drv_ctxt->dev_mutex); + } + + mutex_lock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_init) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_init(drv_ctxt, config, driver_handle); + } else { + VCD_MSG_ERROR("Unsupported API in device state %d\n", + drv_ctxt->dev_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_init); + +u32 vcd_term(s32 driver_handle) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_MED("vcd_term:"); + + drv_ctxt = vcd_get_drv_context(); + + if (!drv_ctxt->dev_mutex) { + VCD_MSG_ERROR("No critical section object"); + + return VCD_ERR_BAD_STATE; + } + + mutex_lock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_term) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_term(drv_ctxt, driver_handle); + } else { + VCD_MSG_ERROR("Unsupported API in device state %d\n", + drv_ctxt->dev_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state == VCD_DEVICE_STATE_NULL) { + VCD_MSG_HIGH + ("Device in NULL state. Releasing critical section\n"); + + mutex_destroy(drv_ctxt->dev_mutex); + kfree(drv_ctxt->dev_mutex); + drv_ctxt->dev_mutex = NULL; + } + + return rc; + +} +EXPORT_SYMBOL(vcd_term); + +u32 vcd_open(s32 driver_handle, u32 decoding, + void (*callback) (u32 event, u32 status, void *info, u32 size, + void *handle, void *const client_data), void *client_data) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_MED("vcd_open:\n"); + + if (!callback) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_ILLEGAL_PARM; + } + + drv_ctxt = vcd_get_drv_context(); + + if (!drv_ctxt->dev_mutex) { + VCD_MSG_ERROR("No critical section object\n"); + + return VCD_ERR_BAD_STATE; + } + + mutex_lock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_open) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_open(drv_ctxt, driver_handle, decoding, callback, + client_data); + } else { + VCD_MSG_ERROR("Unsupported API in device state %d\n", + drv_ctxt->dev_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_open); + +u32 vcd_close(void *handle) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_close:"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_close) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_close(drv_ctxt, cctxt); + } else { + VCD_MSG_ERROR("Unsupported API in device state %d\n", + drv_ctxt->dev_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + return rc; + +} +EXPORT_SYMBOL(vcd_close); + +u32 vcd_encode_start(void *handle) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_encode_start:"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_encode_start && + drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_encode_start(cctxt); + } else { + VCD_MSG_ERROR + ("Unsupported API dev power state %d OR client state %d\n", + drv_ctxt->dev_ctxt.pwr_state, + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_encode_start); + +u32 vcd_encode_frame(void *handle, struct vcd_frame_data *input_frame) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_encode_frame:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!input_frame) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_encode_frame) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_encode_frame(cctxt, input_frame); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_encode_frame); + +u32 vcd_decode_start(void *handle, struct vcd_sequence_hdr *seq_hdr) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_decode_start:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_decode_start && + drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_decode_start(cctxt, seq_hdr); + } else { + VCD_MSG_ERROR + ("Unsupported API dev power state %d OR client state %d\n", + drv_ctxt->dev_ctxt.pwr_state, + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_decode_start); + +u32 vcd_decode_frame(void *handle, struct vcd_frame_data *input_frame) +{ + struct vcd_clnt_ctxt *cctxt = + (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_decode_frame:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!input_frame) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_decode_frame) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_decode_frame(cctxt, input_frame); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_decode_frame); + +u32 vcd_pause(void *handle) +{ + struct vcd_drv_ctxt *drv_ctxt; + struct vcd_clnt_ctxt *cctxt = + (struct vcd_clnt_ctxt *)handle; + u32 rc; + + VCD_MSG_MED("vcd_pause:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_pause) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_pause(cctxt); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_pause); + +u32 vcd_resume(void *handle) +{ + struct vcd_drv_ctxt *drv_ctxt; + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + u32 rc; + + VCD_MSG_MED("vcd_resume:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_resume && + drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_resume(drv_ctxt, cctxt); + } else { + VCD_MSG_ERROR + ("Unsupported API dev power state %d OR client state %d\n", + drv_ctxt->dev_ctxt.pwr_state, + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_resume); + +u32 vcd_flush(void *handle, u32 mode) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_flush:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_flush) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_flush(cctxt, mode); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_flush); + +u32 vcd_stop(void *handle) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_stop:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_stop && + drv_ctxt->dev_ctxt.pwr_state != VCD_PWR_STATE_SLEEP) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_stop(cctxt); + } else { + VCD_MSG_ERROR + ("Unsupported API dev power state %d OR client state %d\n", + drv_ctxt->dev_ctxt.pwr_state, + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_stop); + +u32 vcd_set_property(void *handle, struct vcd_property_hdr *prop_hdr, + void *prop_val) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_set_property:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!prop_hdr || !prop_val) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_set_property) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_set_property(cctxt, prop_hdr, prop_val); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_set_property); + +u32 vcd_get_property(void *handle, struct vcd_property_hdr *prop_hdr, + void *prop_val) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_get_property:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!prop_hdr || !prop_val) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_get_property) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_get_property(cctxt, prop_hdr, prop_val); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_get_property); + +u32 vcd_set_buffer_requirements(void *handle, enum vcd_buffer_type buffer_type, + struct vcd_buffer_requirement *buffer_req) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_set_buffer_requirements:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!buffer_req) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr. + pf_set_buffer_requirements) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_set_buffer_requirements(cctxt, buffer_type, buffer_req); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_set_buffer_requirements); + +u32 vcd_get_buffer_requirements(void *handle, enum vcd_buffer_type buffer_type, + struct vcd_buffer_requirement *buffer_req) +{ + struct vcd_clnt_ctxt *cctxt = (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_get_buffer_requirements:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!buffer_req) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr. + pf_get_buffer_requirements) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_get_buffer_requirements(cctxt, buffer_type, buffer_req); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_get_buffer_requirements); + +u32 vcd_set_buffer(void *handle, enum vcd_buffer_type buffer_type, void *buffer, + size_t buf_size) +{ + struct vcd_clnt_ctxt *cctxt = handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_set_buffer:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!buffer || !buf_size) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_set_buffer) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_set_buffer(cctxt, buffer_type, buffer, buf_size); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_set_buffer); + +u32 vcd_allocate_buffer(void *handle, enum vcd_buffer_type buffer_type, + size_t sz, void **virt_addr, phys_addr_t *phys_addr) +{ + struct vcd_clnt_ctxt *cctxt = handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_allocate_buffer:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + return VCD_ERR_BAD_HANDLE; + } + + if (!virt_addr || !phys_addr || !sz) { + VCD_MSG_ERROR("Bad parameters\n"); + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_allocate_buffer) { + rc = cctxt->clnt_state.state_table->ev_hdlr.pf_allocate_buffer( + cctxt, buffer_type, sz, virt_addr, phys_addr); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_allocate_buffer); + +u32 vcd_free_buffer(void *handle, enum vcd_buffer_type buffer_type, void *buf) +{ + struct vcd_clnt_ctxt *cctxt = handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_free_buffer:"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + return VCD_ERR_BAD_HANDLE; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_free_buffer) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_free_buffer(cctxt, buffer_type, buf); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_free_buffer); + +u32 vcd_fill_output_buffer(void *handle, struct vcd_frame_data *buffer) +{ + struct vcd_clnt_ctxt *cctxt = + (struct vcd_clnt_ctxt *)handle; + struct vcd_drv_ctxt *drv_ctxt; + u32 rc; + + VCD_MSG_MED("vcd_fill_output_buffer:\n"); + + if (!cctxt || cctxt->signature != VCD_SIGNATURE) { + VCD_MSG_ERROR("Bad client handle\n"); + + return VCD_ERR_BAD_HANDLE; + } + + if (!buffer) { + VCD_MSG_ERROR("Bad parameters\n"); + + return VCD_ERR_BAD_POINTER; + } + + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_fill_output_buffer) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_fill_output_buffer(cctxt, buffer); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d\n", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_fill_output_buffer); + +u32 vcd_set_device_power(s32 driver_handle, enum vcd_power_state pwr_state) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_MED("vcd_set_device_power:\n"); + + drv_ctxt = vcd_get_drv_context(); + + if (!drv_ctxt->dev_mutex) { + VCD_MSG_ERROR("No critical section object\n"); + + return VCD_ERR_BAD_STATE; + } + + mutex_lock(drv_ctxt->dev_mutex); + + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_set_dev_pwr) { + rc = drv_ctxt->dev_state.state_table->ev_hdlr. + pf_set_dev_pwr(drv_ctxt, pwr_state); + } else { + VCD_MSG_ERROR("Unsupported API in device state %d\n", + drv_ctxt->dev_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + mutex_unlock(drv_ctxt->dev_mutex); + + return rc; + +} +EXPORT_SYMBOL(vcd_set_device_power); + +void vcd_read_and_clear_interrupt(void) +{ + VCD_MSG_LOW("vcd_read_and_clear_interrupt:\n"); + ddl_read_and_clear_interrupt(); +} + + +void vcd_response_handler(void) +{ + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_LOW("vcd_response_handler:\n"); + drv_ctxt = vcd_get_drv_context(); + + mutex_lock(drv_ctxt->dev_mutex); + + if (!ddl_process_core_response()) { + VCD_MSG_HIGH("ddl_process_core_response indicated no further" + "processing\n"); + mutex_unlock(drv_ctxt->dev_mutex); + return; + } + + if (drv_ctxt->dev_ctxt.cont) + vcd_continue(); + mutex_unlock(drv_ctxt->dev_mutex); +} +EXPORT_SYMBOL(vcd_response_handler); + + + + + + diff --git a/drivers/misc/video_core/720p/vcd/vcd_api.h b/drivers/misc/video_core/720p/vcd/vcd_api.h new file mode 100644 index 0000000000000..436833142680b --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_api.h @@ -0,0 +1,153 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_API_H_ +#define _VCD_API_H_ +#include "vcd_property.h" +#include "vcd_status.h" + +#define VCD_FRAME_FLAG_EOS 0x00000001 +#define VCD_FRAME_FLAG_ENDOFFRAME 0x00000010 +#define VCD_FRAME_FLAG_SYNCFRAME 0x00000020 +#define VCD_FRAME_FLAG_EXTRADATA 0x00000040 +#define VCD_FRAME_FLAG_CODECCONFIG 0x00000080 + +#define VCD_FLUSH_INPUT 0x0001 +#define VCD_FLUSH_OUTPUT 0x0002 +#define VCD_FLUSH_ALL 0x0003 + +#define VCD_FRAMETAG_INVALID 0xffffffff + +struct vcd_handle_container { + void *handle; +}; +struct vcd_flush_cmd { + u32 mode; +}; + +enum vcd_frame { + VCD_FRAME_YUV = 1, + VCD_FRAME_I, + VCD_FRAME_P, + VCD_FRAME_B, + VCD_FRAME_32BIT = 0x7fffffff +}; + +enum vcd_power_state { + VCD_PWR_STATE_ON = 1, + VCD_PWR_STATE_SLEEP, +}; + +struct vcd_frame_data { + void *virt_addr; + phys_addr_t phys_addr; + size_t alloc_len; + size_t data_len; + size_t offset; + s64 time_stamp; + u32 flags; + void *client_data; + struct vcd_property_dec_output_buffer dec_op_prop; + u32 interlaced; + enum vcd_frame frame_type; + u32 ip_frm_tag; +}; + +struct vcd_phys_sequence_hdr { + phys_addr_t addr; + size_t sz; +}; + +struct vcd_sequence_hdr { + void *addr; + size_t sz; +}; + +enum vcd_buffer_type { + VCD_BUFFER_INPUT = 0x1, + VCD_BUFFER_OUTPUT = 0x2, + VCD_BUFFER_INVALID = 0x3, + VCD_BUFFER_32BIT = 0x7FFFFFFF +}; + +struct vcd_buffer_requirement { + u32 min_count; + u32 actual_count; + u32 max_count; + size_t size; + u32 align; + u32 buf_pool_id; +}; + +struct vcd_init_config { + void *device_name; + void *(*pf_map_dev_base_addr) (void *device_name); + void (*pf_un_map_dev_base_addr) (void); + void (*pf_interrupt_clr) (void); + void (*pf_register_isr) (void *device_name); + void (*pf_deregister_isr) (void); + u32 (*pf_timer_create) (void (*pf_timer_handler)(void *), + void *user_data, void **pp_timer_handle); + void (*pf_timer_release) (void *timer_handle); + void (*pf_timer_start) (void *timer_handle, u32 time_out); + void (*pf_timer_stop) (void *timer_handle); +}; + +u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle); +u32 vcd_term(s32 driver_handle); +u32 vcd_open(s32 driver_handle, u32 decoding, + void (*callback) (u32 event, u32 status, void *info, u32 size, + void *handle, void *const client_data), void *client_data); +u32 vcd_close(void *handle); +u32 vcd_encode_start(void *handle); +u32 vcd_encode_frame(void *handle, struct vcd_frame_data *input_frame); +u32 vcd_decode_start(void *handle, struct vcd_sequence_hdr *seq_hdr); +u32 vcd_decode_frame(void *handle, struct vcd_frame_data *input_frame); +u32 vcd_pause(void *handle); +u32 vcd_resume(void *handle); +u32 vcd_flush(void *handle, u32 mode); +u32 vcd_stop(void *handle); +u32 vcd_set_property(void *handle, struct vcd_property_hdr *prop_hdr, + void *prop_val); +u32 vcd_get_property(void *handle, struct vcd_property_hdr *prop_hdr, + void *prop_val); +u32 vcd_set_buffer_requirements(void *handle, enum vcd_buffer_type buffer_type, + struct vcd_buffer_requirement *buffer_req); +u32 vcd_get_buffer_requirements(void *handle, enum vcd_buffer_type buffer_type, + struct vcd_buffer_requirement *buffer_req); +u32 vcd_set_buffer(void *handle, enum vcd_buffer_type buffer_type, + void *buffer, size_t buf_size); +u32 vcd_allocate_buffer(void *handle, enum vcd_buffer_type buffer_type, + size_t sz, void **virt_addr, phys_addr_t *phys_addr); +u32 vcd_free_buffer(void *handle, enum vcd_buffer_type buffer_type, void *buf); +u32 vcd_fill_output_buffer(void *handle, struct vcd_frame_data *buffer); +u32 vcd_set_device_power(s32 driver_handle, enum vcd_power_state pwr_state); +void vcd_read_and_clear_interrupt(void); +void vcd_response_handler(void); + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_client_sm.c b/drivers/misc/video_core/720p/vcd/vcd_client_sm.c new file mode 100644 index 0000000000000..4617f8079539f --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_client_sm.c @@ -0,0 +1,1499 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd.h" + +static const struct vcd_clnt_state_table *vcd_clnt_state_table[]; + +void vcd_clnt_handle_device_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event) +{ + if (cctxt->clnt_state.state != VCD_CLIENT_STATE_INVALID) { + cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0, + cctxt, cctxt->client_data); + vcd_flush_buffers_in_err_fatal(cctxt); + vcd_do_client_state_transition(cctxt, + VCD_CLIENT_STATE_INVALID, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } +} + +static u32 vcd_close_in_open(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_close_in_open:"); + if (cctxt->in_buf_pool.allocated || cctxt->out_buf_pool.allocated) { + VCD_MSG_ERROR("Allocated buffers are not freed yet\n"); + return VCD_ERR_ILLEGAL_OP; + } + vcd_destroy_client_context(cctxt); + return rc; +} + +static u32 vcd_close_in_invalid(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("vcd_close_in_invalid:\n"); + if (cctxt->in_buf_pool.allocated || cctxt->out_buf_pool.allocated) { + VCD_MSG_ERROR("Allocated buffers are not freed yet\n"); + return VCD_ERR_ILLEGAL_OP; + } + + if (cctxt->status.cleaning_up) + cctxt->status.close_pending = true; + else + vcd_destroy_client_context(cctxt); + return VCD_S_SUCCESS; +} + +static u32 vcd_start_in_run_cmn(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("vcd_start_in_run_cmn:\n"); + cctxt->callback(VCD_EVT_RESP_START, VCD_S_SUCCESS, NULL, 0, cctxt, + cctxt->client_data); + return VCD_S_SUCCESS; +} + +static u32 vcd_encode_start_in_open(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_property_hdr prop_hdr; + struct vcd_property_vop_timing timing; + + VCD_MSG_LOW("vcd_encode_start_in_open:\n"); + + if (cctxt->decoding) { + VCD_MSG_ERROR("vcd_encode_init for decoder client\n"); + + return VCD_ERR_ILLEGAL_OP; + } + + if (!cctxt->in_buf_pool.entries || !cctxt->out_buf_pool.entries || + cctxt->in_buf_pool.validated != + cctxt->in_buf_pool.count || + cctxt->out_buf_pool.validated != + cctxt->out_buf_pool.count) { + VCD_MSG_ERROR("Buffer pool is not completely setup yet\n"); + + return VCD_ERR_BAD_STATE; + } + + rc = vcd_add_client_to_sched(cctxt); + + VCD_FAILED_RETURN(rc, "Failed: vcd_add_client_to_sched\n"); + + prop_hdr.id = VCD_I_VOP_TIMING; + prop_hdr.sz = sizeof(struct vcd_property_vop_timing); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &timing); + + VCD_FAILED_RETURN(rc, "Failed: Get VCD_I_VOP_TIMING\n"); + if (!timing.vop_time_resolution) { + VCD_MSG_ERROR("Vop_time_resolution value is zero\n"); + return VCD_ERR_FAIL; + } + cctxt->time_resoln = timing.vop_time_resolution; + + rc = vcd_process_cmd_sess_start(cctxt); + + if (!VCD_FAILED(rc)) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STARTING, + CLIENT_STATE_EVENT_NUMBER(pf_encode_start)); + } + + return rc; +} + +static u32 vcd_encode_start_in_run(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("vcd_encode_start_in_run:\n"); + vcd_start_in_run_cmn(cctxt); + return VCD_S_SUCCESS; +} + + +static u32 vcd_encode_frame_cmn(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame) +{ + VCD_MSG_LOW("vcd_encode_frame_cmn in %d:", cctxt->clnt_state.state); + + if (cctxt->decoding) { + VCD_MSG_ERROR("vcd_encode_frame for decoder client\n"); + + return VCD_ERR_ILLEGAL_OP; + } + + return vcd_handle_input_frame(cctxt, input_frame); +} + +static u32 vcd_decode_start_in_open(struct vcd_clnt_ctxt *cctxt, + struct vcd_sequence_hdr *seq_hdr) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_decode_start_in_open:\n"); + + if (!cctxt->decoding) { + VCD_MSG_ERROR("vcd_decode_init for encoder client\n"); + + return VCD_ERR_ILLEGAL_OP; + } + + if (seq_hdr) { + VCD_MSG_HIGH("Seq hdr supplied. len = %d\n", seq_hdr->sz); + rc = vcd_store_seq_hdr(cctxt, seq_hdr); + + } else { + VCD_MSG_HIGH("Seq hdr not supplied\n"); + cctxt->seq_hdr.sz = 0; + cctxt->seq_hdr.addr = NULL; + } + + VCD_FAILED_RETURN(rc, "Err processing seq hdr\n"); + + rc = vcd_process_cmd_sess_start(cctxt); + + if (!VCD_FAILED(rc)) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STARTING, + CLIENT_STATE_EVENT_NUMBER(pf_decode_start)); + } + + return rc; +} + +static u32 vcd_decode_start_in_run(struct vcd_clnt_ctxt *cctxt, + struct vcd_sequence_hdr *seqhdr) +{ + VCD_MSG_LOW("vcd_decode_start_in_run:\n"); + vcd_start_in_run_cmn(cctxt); + return VCD_S_SUCCESS; +} + +static u32 vcd_decode_frame_cmn(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame) +{ + VCD_MSG_LOW("vcd_decode_frame_cmn in %d:\n", cctxt->clnt_state.state); + + if (!cctxt->decoding) { + VCD_MSG_ERROR("Decode_frame api called for Encoder client\n"); + + return VCD_ERR_ILLEGAL_OP; + } + + return vcd_handle_input_frame(cctxt, input_frame); +} + +static u32 vcd_pause_in_run(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_pause_in_run:\n"); + + if (cctxt->sched_clnt_valid) { + rc = vcd_map_sched_status(sched_suspend_resume_client( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + false)); + } + + VCD_FAILED_RETURN(rc, "Failed: sched_suspend_resume_client\n"); + + if (cctxt->status.frame_submitted > 0) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_PAUSING, + CLIENT_STATE_EVENT_NUMBER(pf_pause)); + + } else { + VCD_MSG_HIGH("No client frames are currently being processed\n"); + + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_PAUSED, + CLIENT_STATE_EVENT_NUMBER(pf_pause)); + + cctxt->callback(VCD_EVT_RESP_PAUSE, VCD_S_SUCCESS, NULL, 0, + cctxt, cctxt->client_data); + + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_PAUSE); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_PAUSE_END failed\n"); + + } + + return VCD_S_SUCCESS; +} + +static u32 vcd_resume_in_paused(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_resume_in_paused:\n"); + + if (cctxt->sched_clnt_valid) { + + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_RESUME); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_RESUME failed\n"); + } else { + rc = vcd_map_sched_status(sched_suspend_resume_client( + cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, true)); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: " + "sched_suspend_resume_client\n", rc); + } + + } + if (!VCD_FAILED(rc)) + vcd_try_submit_frame(dev_ctxt); + } + + if (!VCD_FAILED(rc)) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_resume)); + } + + return rc; +} + +static u32 vcd_flush_cmn(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_flush_cmn in %d:\n", cctxt->clnt_state.state); + + rc = vcd_flush_buffers(cctxt, mode); + + VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers\n"); + + if (cctxt->status.frame_submitted > 0) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_FLUSHING, + CLIENT_STATE_EVENT_NUMBER(pf_flush)); + } else { + VCD_MSG_HIGH("All buffers are flushed\n"); + cctxt->status.flush_mode = mode; + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); + } + + return rc; +} + +static u32 vcd_flush_inopen(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + VCD_MSG_LOW("vcd_flush_inopen:\n"); + cctxt->status.flush_mode = mode; + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); + return VCD_S_SUCCESS; +} + +static u32 vcd_flush_in_flushing(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_flush_in_flushing:\n"); + + rc = vcd_flush_buffers(cctxt, mode); + + return rc; +} + +static u32 vcd_flush_in_eos(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + VCD_MSG_LOW("vcd_flush_in_eos:\n"); + + if (mode > VCD_FLUSH_ALL || !mode) { + VCD_MSG_ERROR("Invalid flush mode %d\n", mode); + + return VCD_ERR_ILLEGAL_PARM; + } + + VCD_MSG_MED("Flush mode requested %d\n", mode); + + cctxt->status.flush_mode |= mode; + + return VCD_S_SUCCESS; +} + +static u32 vcd_flush_in_invalid(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + u32 rc = VCD_S_SUCCESS; + VCD_MSG_LOW("vcd_flush_in_invalid:\n"); + if (!cctxt->status.cleaning_up) { + rc = vcd_flush_buffers(cctxt, mode); + if (!VCD_FAILED(rc)) { + VCD_MSG_HIGH("All buffers are flushed\n"); + cctxt->status.flush_mode = mode; + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); + } + } + return rc; +} + +static u32 vcd_stop_cmn(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + struct vcd_transc *transc; + + VCD_MSG_LOW("vcd_stop_cmn in %d:\n", cctxt->clnt_state.state); + + rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); + + VCD_FAILED_RETURN(rc, "Failed: vcd_flush_buffers\n"); + + if (!cctxt->status.frame_submitted) { + + if (vcd_get_command_channel(dev_ctxt, &transc)) { + rc = vcd_power_event(dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_CMD_BEGIN); + + if (!VCD_FAILED(rc)) { + transc->type = VCD_CMD_CODEC_STOP; + transc->cctxt = cctxt; + + rc = vcd_submit_cmd_sess_end(transc); + } else { + VCD_MSG_ERROR("Failed:" + " VCD_EVT_PWR_CLNT_CMD_BEGIN\n"); + } + + if (VCD_FAILED(rc)) + vcd_release_command_channel(dev_ctxt, transc); + + } else { + vcd_client_cmd_flush_and_en_q(cctxt, + VCD_CMD_CODEC_STOP); + } + } + + if (VCD_FAILED(rc)) { + vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_FAIL); + } else { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STOPPING, + CLIENT_STATE_EVENT_NUMBER(pf_stop)); + } + + return rc; +} + + +static u32 vcd_stop_inopen(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("vcd_stop_inopen:\n"); + + cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, cctxt, + cctxt->client_data); + + return VCD_S_SUCCESS; +} + +static u32 vcd_stop_in_run(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_stop_in_run:\n"); + + rc = vcd_stop_cmn(cctxt); + + if (!VCD_FAILED(rc) && cctxt->status.b1st_frame_recvd) { + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_LAST_FRAME); + } + + return rc; +} + +static u32 vcd_stop_in_eos(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_stop_in_eos:\n"); + + cctxt->status.stop_pending = true; + + return rc; +} + +static u32 vcd_stop_in_invalid(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("vcd_stop_in_invalid:\n"); + if (cctxt->status.cleaning_up) { + cctxt->status.stop_pending = true; + } else { + vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); + cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, + cctxt, cctxt->client_data); + } + return VCD_S_SUCCESS; +} + +static u32 vcd_set_property_cmn(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_hdr *prop_hdr, void *prop_val) +{ + u32 rc; + + VCD_MSG_LOW("vcd_set_property_cmn in %d:\n", cctxt->clnt_state.state); + VCD_MSG_LOW("property Id = %d\n", prop_hdr->id); + + if (!prop_hdr->sz || !prop_hdr->id) { + VCD_MSG_MED("Bad parameters\n"); + + return VCD_ERR_ILLEGAL_PARM; + } + + rc = ddl_set_property(cctxt->ddl_handle, prop_hdr, prop_val); + + VCD_FAILED_RETURN(rc, "Failed: ddl_set_property\n"); + + switch (prop_hdr->id) { + case VCD_I_LIVE: + { + struct vcd_property_live *live = (struct vcd_property_live *) + prop_val; + cctxt->live = live->live; + break; + } + case VCD_I_FRAME_RATE: + if (cctxt->sched_clnt_valid) { + rc = vcd_set_frame_rate(cctxt, + (struct vcd_property_frame_rate *)prop_val); + } + break; + case VCD_I_FRAME_SIZE: + if (cctxt->sched_clnt_valid) { + rc = vcd_set_frame_size(cctxt, + (struct vcd_property_frame_size *)prop_val); + } + break; + default: + break; + } + + return rc; +} + +static u32 vcd_get_property_cmn(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_hdr *prop_hdr, void *prop_val) +{ + VCD_MSG_LOW("vcd_get_property_cmn in %d:\n", cctxt->clnt_state.state); + VCD_MSG_LOW("property id = %d\n", prop_hdr->id); + if (!prop_hdr->sz || !prop_hdr->id) { + VCD_MSG_MED("Bad parameters\n"); + + return VCD_ERR_ILLEGAL_PARM; + } + return ddl_get_property(cctxt->ddl_handle, prop_hdr, prop_val); +} + +static u32 vcd_set_buffer_requirements_cmn(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, + struct vcd_buffer_requirement *buffer_req) +{ + struct vcd_property_hdr prop_hdr; + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_pool *buf_pool; + + VCD_MSG_LOW("vcd_set_buffer_requirements_cmn in %d:\n", + cctxt->clnt_state.state); + + if (!cctxt->decoding && cctxt->clnt_state.state != + VCD_CLIENT_STATE_OPEN) { + VCD_MSG_ERROR("Bad state (%d) for encoder\n", + cctxt->clnt_state.state); + + return VCD_ERR_BAD_STATE; + } + + VCD_MSG_MED("Buffer type = %d\n", vcd_buffer_type); + + if (vcd_buffer_type == VCD_BUFFER_INPUT) { + prop_hdr.id = DDL_I_INPUT_BUF_REQ; + buf_pool = &cctxt->in_buf_pool; + } else if (vcd_buffer_type == VCD_BUFFER_OUTPUT) { + prop_hdr.id = DDL_I_OUTPUT_BUF_REQ; + buf_pool = &cctxt->out_buf_pool; + } else { + rc = VCD_ERR_ILLEGAL_PARM; + } + + VCD_FAILED_RETURN(rc, "Invalid buffer type provided\n"); + + if (buf_pool->validated > 0) { + VCD_MSG_ERROR("Need to free allocated buffers\n"); + + return VCD_ERR_ILLEGAL_OP; + } + + prop_hdr.sz = sizeof(*buffer_req); + + rc = ddl_set_property(cctxt->ddl_handle, &prop_hdr, buffer_req); + + VCD_FAILED_RETURN(rc, "Failed: ddl_set_property\n"); + + if (buf_pool->entries) { + VCD_MSG_MED("Resetting buffer requirements\n"); + + vcd_free_buffer_pool_entries(buf_pool); + } + + rc = vcd_alloc_buffer_pool_entries(buf_pool, buffer_req); + + return rc; +} + +static u32 vcd_get_buffer_requirements_cmn(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, + struct vcd_buffer_requirement *buffer_req) +{ + struct vcd_property_hdr prop_hdr; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_get_buffer_requirements_cmn in %d:\n", + cctxt->clnt_state.state); + + VCD_MSG_MED("Buffer type = %d\n", vcd_buffer_type); + + if (vcd_buffer_type == VCD_BUFFER_INPUT) + prop_hdr.id = DDL_I_INPUT_BUF_REQ; + else if (vcd_buffer_type == VCD_BUFFER_OUTPUT) + prop_hdr.id = DDL_I_OUTPUT_BUF_REQ; + else + rc = VCD_ERR_ILLEGAL_PARM; + + VCD_FAILED_RETURN(rc, "Invalid buffer type provided\n"); + + prop_hdr.sz = sizeof(*buffer_req); + + return ddl_get_property(cctxt->ddl_handle, &prop_hdr, buffer_req); + +} + +static u32 vcd_set_buffer_cmn(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, void *buf, size_t sz) +{ + u32 rc; + struct vcd_buffer_pool *buf_pool; + + VCD_MSG_LOW("vcd_set_buffer_cmn in %d:\n", cctxt->clnt_state.state); + + rc = vcd_common_allocate_set_buffer(cctxt, vcd_buffer_type, sz, + &buf_pool); + + if (!VCD_FAILED(rc)) { + rc = vcd_set_buffer_internal(cctxt, buf_pool, buf, sz); + } + + return rc; +} + +static u32 vcd_allocate_buffer_cmn(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, size_t sz, void **virt_addr, + phys_addr_t *phys_addr) +{ + u32 rc; + struct vcd_buffer_pool *buf_pool; + + VCD_MSG_LOW("vcd_allocate_buffer_cmn in %d:\n", + cctxt->clnt_state.state); + + rc = vcd_common_allocate_set_buffer(cctxt, vcd_buffer_type, sz, + &buf_pool); + + if (!VCD_FAILED(rc)) { + rc = vcd_allocate_buffer_internal(cctxt, buf_pool, sz, + virt_addr, phys_addr); + } + + return rc; +} + +static u32 vcd_free_buffer_cmn(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, void *buf) +{ + VCD_MSG_LOW("vcd_free_buffer_cmn in %d:\n", cctxt->clnt_state.state); + + return vcd_free_one_buffer_internal(cctxt, vcd_buffer_type, buf); +} + +static u32 vcd_fill_output_buffer_cmn(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *buffer) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_entry *buf_entry; + struct vcd_frame_data *frm_entry; + u32 q_result = true; + + VCD_MSG_LOW("vcd_fill_output_buffer_cmn in %d:\n", + cctxt->clnt_state.state); + + buf_entry = vcd_check_fill_output_buffer(cctxt, buffer); + if (!buf_entry) + return VCD_ERR_BAD_POINTER; + + q_result = vcd_buffer_pool_entry_en_q(&cctxt->out_buf_pool, buf_entry); + + if (!q_result && !cctxt->decoding) { + VCD_MSG_ERROR("Failed: vcd_buffer_pool_entry_en_q\n"); + + return VCD_ERR_FAIL; + } + + frm_entry = &buf_entry->frame; + + *frm_entry = *buffer; + frm_entry->phys_addr = buf_entry->phys_addr; + frm_entry->ip_frm_tag = VCD_FRAMETAG_INVALID; + frm_entry->data_len = 0; + + if (cctxt->sched_clnt_valid) { + if (cctxt->decoding && cctxt->status.b1st_frame_recvd) { + struct vcd_property_hdr prop_hdr; + struct ddl_frame_data_tag ddl_frm; + + prop_hdr.id = DDL_I_DPB_RELEASE; + prop_hdr.sz = sizeof(struct ddl_frame_data_tag); + + memset(&ddl_frm, 0, sizeof(ddl_frm)); + ddl_frm.vcd_frm = *frm_entry; + ddl_frm.intrlcd_ip_frm_tag = VCD_FRAMETAG_INVALID; + + rc = ddl_set_property(cctxt->ddl_handle, &prop_hdr, + &ddl_frm); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("Error returning output buffer to" + " HW. rc = 0x%x\n", rc); + + buf_entry->in_use = false; + } else { + cctxt->out_buf_pool.in_use++; + buf_entry->in_use = true; + } + } + + if (!VCD_FAILED(rc)) { + rc = vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm)); + } + + if (!VCD_FAILED(rc)) + vcd_try_submit_frame(cctxt->dev_ctxt); + + } + + return rc; +} + +static u32 vcd_fill_output_buffer_in_eos(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *buffer) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_entry *buf_entry; + + VCD_MSG_LOW("vcd_fill_output_buffer_in_eos:\n"); + + buf_entry = vcd_check_fill_output_buffer(cctxt, buffer); + if (!buf_entry) + return VCD_ERR_BAD_POINTER; + + if (cctxt->status.eos_wait_for_op_buf) { + VCD_MSG_HIGH("Got an output buffer we were waiting for\n"); + + buf_entry->frame = *buffer; + + buf_entry->frame.data_len = 0; + buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS; + buf_entry->frame.ip_frm_tag = + cctxt->status.eos_trig_ip_frm.ip_frm_tag; + buf_entry->frame.time_stamp = + cctxt->status.eos_trig_ip_frm.time_stamp; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, + &buf_entry->frame, sizeof(struct vcd_frame_data), + cctxt, cctxt->client_data); + + cctxt->status.eos_wait_for_op_buf = false; + + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_fill_output_buffer)); + + } else { + rc = vcd_fill_output_buffer_cmn(cctxt, buffer); + } + + return rc; +} + +static void vcd_clnt_cb_in_starting(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + struct vcd_transc *transc = (struct vcd_transc *)client_data; + VCD_MSG_LOW("vcd_clnt_cb_in_starting:\n"); + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("vcd_clnt_cb_in_initing: Wrong DDL handle %p\n", + ddl_handle); + return; + } + + switch (event) { + case VCD_EVT_RESP_START: + vcd_handle_start_done(cctxt, (struct vcd_transc *)client_data, + status); + break; + case VCD_EVT_RESP_STOP: + vcd_handle_stop_done_in_starting(cctxt, (struct vcd_transc *) + client_data, status); + break; + case VCD_EVT_IND_HWERRFATAL: + cctxt->status.cmd_submitted--; + vcd_mark_command_channel(cctxt->dev_ctxt, transc); + vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START, status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d " + "from DDL\n", event, status); + dev_ctxt->cont = false; + break; + } +} + +static void vcd_clnt_cb_in_run(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + + return; + } + + switch (event) { + case VCD_EVT_RESP_INPUT_DONE: + rc = vcd_handle_input_done(cctxt, payload, event, status); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + if (!cctxt->status.b1st_op_done_recvd) { + if (!VCD_FAILED(status)) { + rc = vcd_handle_first_frame_done(cctxt, + payload); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: " + "vcd_handle_first_frame_" + "done\n", rc); + status = VCD_ERR_FAIL; + } else { + cctxt->status.b1st_op_done_recvd = true; + } + } + } + + rc = vcd_handle_frame_done(cctxt, payload, event, status); + + break; + case VCD_EVT_RESP_OUTPUT_REQ: + rc = vcd_handle_output_required(cctxt, payload, status); + break; + case VCD_EVT_IND_RECONFIG: + break; + case VCD_EVT_RESP_TRANSACTION_PENDING: + vcd_handle_trans_pending(cctxt); + break; + case VCD_EVT_IND_HWERRFATAL: + vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, + status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + dev_ctxt->cont = false; + break; + } + + if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_REQ)) { + + if (((struct ddl_frame_data_tag *)payload)->frm_trans_end) + vcd_mark_frame_channel(cctxt->dev_ctxt); + } +} + +static void vcd_clnt_cb_in_eos(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + struct vcd_transc *transc = NULL; + + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + + return; + } + + switch (event) { + case VCD_EVT_RESP_INPUT_DONE: + vcd_handle_input_done_in_eos(cctxt, payload, status); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + vcd_handle_frame_done_in_eos(cctxt, payload, status); + break; + case VCD_EVT_RESP_OUTPUT_REQ: + vcd_handle_output_required(cctxt, payload, status); + break; + case VCD_EVT_RESP_EOS_DONE: + transc = (struct vcd_transc *)client_data; + vcd_handle_eos_done(cctxt, transc, status); + break; + case VCD_EVT_IND_HWERRFATAL: + vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, + status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + + dev_ctxt->cont = false; + break; + } + + if (event == VCD_EVT_RESP_INPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_REQ) { + if (payload && ((struct ddl_frame_data_tag *) + payload)->frm_trans_end) { + vcd_mark_frame_channel(cctxt->dev_ctxt); + if (!cctxt->status.frame_submitted) + vcd_handle_eos_trans_end(cctxt); + } + } +} + +static void vcd_clnt_cb_in_flushing(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_clnt_cb_in_flushing:\n"); + + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + return; + } + + switch (event) { + case VCD_EVT_RESP_INPUT_DONE: + rc = vcd_handle_input_done(cctxt, payload, + VCD_EVT_RESP_INPUT_FLUSHED, status); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + rc = vcd_handle_frame_done(cctxt, payload, + VCD_EVT_RESP_OUTPUT_FLUSHED, status); + break; + case VCD_EVT_RESP_OUTPUT_REQ: + rc = vcd_handle_output_required_in_flushing(cctxt, payload); + break; + case VCD_EVT_IND_HWERRFATAL: + vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, + status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + dev_ctxt->cont = false; + break; + } + + if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_REQ) && + ((struct ddl_frame_data_tag *)payload)->frm_trans_end) { + + vcd_mark_frame_channel(cctxt->dev_ctxt); + + if (!cctxt->status.frame_submitted) { + VCD_MSG_HIGH("All pending frames recvd from DDL\n"); + + if (cctxt->status.flush_mode & VCD_FLUSH_OUTPUT) { + vcd_flush_output_buffers(cctxt); + + vcd_release_all_clnt_frm_transc(cctxt); + } + + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); + vcd_release_interim_frame_channels(dev_ctxt); + VCD_MSG_HIGH("Flush complete\n"); + vcd_release_all_clnt_def_frm_transc(cctxt); + vcd_do_client_state_transition(cctxt, + VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } + } +} + +static void vcd_clnt_cb_in_stopping(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_clnt_cb_in_stopping:\n"); + + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + return; + } + + switch (event) { + case VCD_EVT_RESP_INPUT_DONE: + rc = vcd_handle_input_done(cctxt, payload, + VCD_EVT_RESP_INPUT_FLUSHED, status); + + break; + case VCD_EVT_RESP_OUTPUT_DONE: + rc = vcd_handle_frame_done(cctxt, payload, + VCD_EVT_RESP_OUTPUT_FLUSHED, status); + + break; + case VCD_EVT_RESP_OUTPUT_REQ: + rc = vcd_handle_output_required_in_flushing(cctxt, payload); + break; + case VCD_EVT_RESP_STOP: + vcd_handle_stop_done(cctxt, (struct vcd_transc *)client_data, + status); + break; + case VCD_EVT_IND_HWERRFATAL: + vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_STOP, status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + + dev_ctxt->cont = false; + break; + } + + if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_REQ) && + ((struct ddl_frame_data_tag *)payload)->frm_trans_end) { + + vcd_mark_frame_channel(cctxt->dev_ctxt); + + if (!cctxt->status.frame_submitted) { + VCD_MSG_HIGH("All pending frames recvd from DDL\n"); + + vcd_flush_output_buffers(cctxt); + + cctxt->status.flush_mode = 0; + + vcd_release_all_clnt_frm_transc(cctxt); + + VCD_MSG_HIGH("All buffers flushed. Enqueuing stop cmd\n"); + + vcd_client_cmd_flush_and_en_q(cctxt, + VCD_CMD_CODEC_STOP); + } + } +} + +static void vcd_clnt_cb_in_pausing(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_clnt_cb_in_pausing:\n"); + + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + + return; + } + + switch (event) { + case VCD_EVT_RESP_INPUT_DONE: + rc = vcd_handle_input_done(cctxt, payload, event, status); + break; + case VCD_EVT_RESP_OUTPUT_DONE: + rc = vcd_handle_frame_done(cctxt, payload, event, status); + break; + case VCD_EVT_RESP_OUTPUT_REQ: + rc = vcd_handle_output_required(cctxt, payload, status); + break; + case VCD_EVT_IND_HWERRFATAL: + vcd_handle_ind_hw_err_fatal(cctxt, VCD_EVT_RESP_PAUSE, status); + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + + dev_ctxt->cont = false; + break; + } + + if (!VCD_FAILED(rc) && (event == VCD_EVT_RESP_INPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_DONE || + event == VCD_EVT_RESP_OUTPUT_REQ) && + ((struct ddl_frame_data_tag *)payload)->frm_trans_end) { + + vcd_mark_frame_channel(cctxt->dev_ctxt); + + if (!cctxt->status.frame_submitted) { + VCD_MSG_HIGH("All pending frames recvd from DDL\n"); + + cctxt->callback(VCD_EVT_RESP_PAUSE, VCD_S_SUCCESS, NULL, + 0, cctxt, cctxt->client_data); + + vcd_do_client_state_transition(cctxt, + VCD_CLIENT_STATE_PAUSED, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_PAUSE); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_PAUSE_END " + "failed\n"); + } + } + } +} + +static void vcd_clnt_cb_in_invalid(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + VCD_MSG_LOW("vcd_clnt_cb_in_invalid:\n"); + if (cctxt->ddl_handle != ddl_handle) { + VCD_MSG_ERROR("ddl_handle mismatch\n"); + return; + } + switch (event) { + case VCD_EVT_RESP_STOP: + vcd_handle_stop_done_in_invalid(cctxt, status); + break; + case VCD_EVT_RESP_INPUT_DONE: + case VCD_EVT_RESP_OUTPUT_DONE: + case VCD_EVT_RESP_OUTPUT_REQ: + case VCD_EVT_RESP_TRANSACTION_PENDING: + break; + case VCD_EVT_IND_HWERRFATAL: + if (status == VCD_ERR_HW_FATAL) + vcd_handle_stop_done_in_invalid(cctxt, status); + + break; + default: + VCD_MSG_ERROR("Unexpected callback event=%d status=%d from DDL\n", + event, status); + dev_ctxt->cont = false; + break; + } +} + +static void vcd_clnt_enter_open(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_OPEN on api %d\n", ev_code); +} + +static void vcd_clnt_enter_starting(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_STARTING on api %d\n", ev_code); +} + +static void vcd_clnt_enter_run(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_RUN on api %d\n", ev_code); +} + +static void vcd_clnt_enter_flushing(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_FLUSHING on api %d\n", ev_code); +} + +static void vcd_clnt_enter_stopping(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_STOPPING on api %d\n", ev_code); +} + +static void vcd_clnt_enter_eos(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + u32 rc; + + VCD_MSG_MED("Entering CLIENT_STATE_EOS on api %d\n", ev_code); + rc = vcd_map_sched_status(sched_suspend_resume_client( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, false)); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Failed: sched_suspend_resume_client. rc=0x%x\n", + rc); +} + +static void vcd_clnt_enter_pausing(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_PAUSING on api %d\n", ev_code); +} + +static void vcd_clnt_enter_paused(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_PAUSED on api %d\n", ev_code); +} + +static void vcd_clnt_enter_invalid(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering CLIENT_STATE_INVALID on api %d\n", ev_code); + cctxt->ddl_hdl_valid = false; +} + +static void vcd_clnt_exit_open(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_OPEN on api %d\n", ev_code); +} + +static void vcd_clnt_exit_starting(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_STARTING on api %d\n", ev_code); +} + +static void vcd_clnt_exit_run(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_RUN on api %d\n", ev_code); +} + +static void vcd_clnt_exit_flushing(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_FLUSHING on api %d\n", ev_code); +} + +static void vcd_clnt_exit_stopping(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_STOPPING on api %d\n", ev_code); +} + +static void vcd_clnt_exit_eos(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + u32 rc; + VCD_MSG_MED("Exiting CLIENT_STATE_EOS on api %d\n", ev_code); + rc = vcd_map_sched_status(sched_suspend_resume_client( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true)); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Failed: sched_suspend_resume_client. rc=0x%x\n", + rc); +} + +static void vcd_clnt_exit_pausing(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_PAUSING on api %d\n", ev_code); +} + +static void vcd_clnt_exit_paused(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_PAUSED on api %d\n", ev_code); +} + +static void vcd_clnt_exit_invalid(struct vcd_clnt_ctxt *cctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting CLIENT_STATE_INVALID on api %d\n", ev_code); +} + +void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt, + enum vcd_clnt_state_enum to_state, u32 ev_code) +{ + struct vcd_clnt_state_ctxt *state_ctxt; + + if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) { + VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d\n", cctxt, + to_state); + } + + state_ctxt = &cctxt->clnt_state; + + if (state_ctxt->state == to_state) { + VCD_MSG_HIGH("Client already in requested to_state=%d\n", + to_state); + return; + } + + VCD_MSG_MED("vcd_do_client_state_transition: C%d -> C%d, for api %d\n", + (int)state_ctxt->state, (int)to_state, ev_code); + + if (state_ctxt->state_table->pf_exit) + state_ctxt->state_table->pf_exit(cctxt, ev_code); + + + state_ctxt->state = to_state; + state_ctxt->state_table = vcd_clnt_state_table[to_state]; + + if (state_ctxt->state_table->pf_entry) + state_ctxt->state_table->pf_entry(cctxt, ev_code); +} + +const struct vcd_clnt_state_table *vcd_get_client_state_table( + enum vcd_clnt_state_enum state) +{ + return vcd_clnt_state_table[state]; +} + +static const struct vcd_clnt_state_table vcd_clnt_table_open = { + { + vcd_close_in_open, + vcd_encode_start_in_open, + NULL, + vcd_decode_start_in_open, + NULL, + NULL, + NULL, + vcd_flush_inopen, + vcd_stop_inopen, + vcd_set_property_cmn, + vcd_get_property_cmn, + vcd_set_buffer_requirements_cmn, + vcd_get_buffer_requirements_cmn, + vcd_set_buffer_cmn, + vcd_allocate_buffer_cmn, + vcd_free_buffer_cmn, + vcd_fill_output_buffer_cmn, + NULL, + }, + vcd_clnt_enter_open, + vcd_clnt_exit_open +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_starting = { + { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_clnt_cb_in_starting, + }, + vcd_clnt_enter_starting, + vcd_clnt_exit_starting +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_run = { + { + NULL, + vcd_encode_start_in_run, + vcd_encode_frame_cmn, + vcd_decode_start_in_run, + vcd_decode_frame_cmn, + vcd_pause_in_run, + NULL, + vcd_flush_cmn, + vcd_stop_in_run, + vcd_set_property_cmn, + vcd_get_property_cmn, + vcd_set_buffer_requirements_cmn, + vcd_get_buffer_requirements_cmn, + vcd_set_buffer_cmn, + vcd_allocate_buffer_cmn, + NULL, + vcd_fill_output_buffer_cmn, + vcd_clnt_cb_in_run, + }, + vcd_clnt_enter_run, + vcd_clnt_exit_run +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_flushing = { + { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_flush_in_flushing, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_clnt_cb_in_flushing, + }, + vcd_clnt_enter_flushing, + vcd_clnt_exit_flushing +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_stopping = { + { + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_get_property_cmn, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_clnt_cb_in_stopping, + }, + vcd_clnt_enter_stopping, + vcd_clnt_exit_stopping +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_eos = { + { + NULL, + NULL, + vcd_encode_frame_cmn, + NULL, + vcd_decode_frame_cmn, + NULL, + NULL, + vcd_flush_in_eos, + vcd_stop_in_eos, + NULL, + vcd_get_property_cmn, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_fill_output_buffer_in_eos, + vcd_clnt_cb_in_eos, + }, + vcd_clnt_enter_eos, + vcd_clnt_exit_eos +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_pausing = { + { + NULL, + NULL, + vcd_encode_frame_cmn, + NULL, + vcd_decode_frame_cmn, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_get_property_cmn, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_fill_output_buffer_cmn, + vcd_clnt_cb_in_pausing, + }, + vcd_clnt_enter_pausing, + vcd_clnt_exit_pausing +}; + +static const struct vcd_clnt_state_table vcd_clnt_table_paused = { + { + NULL, + NULL, + vcd_encode_frame_cmn, + NULL, + vcd_decode_frame_cmn, + NULL, + vcd_resume_in_paused, + vcd_flush_cmn, + vcd_stop_cmn, + vcd_set_property_cmn, + vcd_get_property_cmn, + vcd_set_buffer_requirements_cmn, + vcd_get_buffer_requirements_cmn, + vcd_set_buffer_cmn, + vcd_allocate_buffer_cmn, + NULL, + vcd_fill_output_buffer_cmn, + NULL, + }, + vcd_clnt_enter_paused, + vcd_clnt_exit_paused +}; +static const struct vcd_clnt_state_table vcd_clnt_table_invalid = { + { + vcd_close_in_invalid, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_flush_in_invalid, + vcd_stop_in_invalid, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + vcd_free_buffer_cmn, + NULL, + vcd_clnt_cb_in_invalid, + }, + vcd_clnt_enter_invalid, + vcd_clnt_exit_invalid +}; + +static const struct vcd_clnt_state_table *vcd_clnt_state_table[] = { + NULL, + &vcd_clnt_table_open, + &vcd_clnt_table_starting, + &vcd_clnt_table_run, + &vcd_clnt_table_flushing, + &vcd_clnt_table_pausing, + &vcd_clnt_table_paused, + &vcd_clnt_table_stopping, + &vcd_clnt_table_eos, + &vcd_clnt_table_invalid +}; diff --git a/drivers/misc/video_core/720p/vcd/vcd_client_sm.h b/drivers/misc/video_core/720p/vcd/vcd_client_sm.h new file mode 100644 index 0000000000000..8f3a975544431 --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_client_sm.h @@ -0,0 +1,112 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_CLIENT_SM_H_ +#define _VCD_CLIENT_SM_H_ +#include "vcd_api.h" +#include "vcd_ddl_api.h" + +struct vcd_clnt_state_table; +struct vcd_clnt_state_ctxt; +struct vcd_clnt_ctxt; + +enum vcd_clnt_state_enum { + VCD_CLIENT_STATE_NULL = 0, + VCD_CLIENT_STATE_OPEN, + VCD_CLIENT_STATE_STARTING, + VCD_CLIENT_STATE_RUN, + VCD_CLIENT_STATE_FLUSHING, + VCD_CLIENT_STATE_PAUSING, + VCD_CLIENT_STATE_PAUSED, + VCD_CLIENT_STATE_STOPPING, + VCD_CLIENT_STATE_EOS, + VCD_CLIENT_STATE_INVALID, + VCD_CLIENT_STATE_MAX, + VCD_CLIENT_STATE_32BIT = 0x7FFFFFFF +}; + +#define CLIENT_STATE_EVENT_NUMBER(ppf) \ + ((u32 *) (&(((struct vcd_clnt_state_table*)0)->ev_hdlr.ppf)) - \ + (u32 *) (&(((struct vcd_clnt_state_table*)0)->ev_hdlr.pf_close)) + 1) + +struct vcd_clnt_state_table { + struct { + u32(*pf_close) (struct vcd_clnt_ctxt *cctxt); + u32(*pf_encode_start) (struct vcd_clnt_ctxt *cctxt); + u32(*pf_encode_frame) (struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame); + u32(*pf_decode_start) (struct vcd_clnt_ctxt *cctxt, + struct vcd_sequence_hdr *seq_hdr); + u32(*pf_decode_frame) (struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame); + u32(*pf_pause) (struct vcd_clnt_ctxt *cctxt); + u32(*pf_resume) (struct vcd_clnt_ctxt *cctxt); + u32(*pf_flush) (struct vcd_clnt_ctxt *cctxt, u32 mode); + u32(*pf_stop) (struct vcd_clnt_ctxt *cctxt); + u32(*pf_set_property) (struct vcd_clnt_ctxt *cctxt, + struct vcd_property_hdr *prop_hdr, void *prop); + u32(*pf_get_property) (struct vcd_clnt_ctxt *cctxt, + struct vcd_property_hdr *prop_hdr, + void *prop); + u32(*pf_set_buffer_requirements) (struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, + struct vcd_buffer_requirement *buffer_req); + u32(*pf_get_buffer_requirements) (struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, + struct vcd_buffer_requirement *buffer_req); + u32(*pf_set_buffer) (struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, void *buffer, + size_t buf_size); + u32(*pf_allocate_buffer) (struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, size_t sz, + void **virt_addr, phys_addr_t *phys_addr); + u32(*pf_free_buffer) (struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, void *buf); + u32(*pf_fill_output_buffer) (struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *buffer); + void (*pf_clnt_cb) (struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data); + } ev_hdlr; + + void (*pf_entry) (struct vcd_clnt_ctxt *cctxt, s32 state_event_type); + void (*pf_exit) (struct vcd_clnt_ctxt *cctxt, s32 state_event_type); +}; + +struct vcd_clnt_state_ctxt { + const struct vcd_clnt_state_table *state_table; + enum vcd_clnt_state_enum state; +}; + +extern void vcd_do_client_state_transition(struct vcd_clnt_ctxt *cctxt, + enum vcd_clnt_state_enum to_state, u32 ev_code); + +extern const struct vcd_clnt_state_table *vcd_get_client_state_table( + enum vcd_clnt_state_enum state); + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_core.h b/drivers/misc/video_core/720p/vcd/vcd_core.h new file mode 100644 index 0000000000000..f855d7bda9add --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_core.h @@ -0,0 +1,258 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_CORE_H_ +#define _VCD_CORE_H_ + +#include "vcd_api.h" +#include "vid_frame_scheduler_api.h" +#include "vcd_ddl_api.h" + +#include "vcd_util.h" +#include "vcd_client_sm.h" +#include "vcd_power_sm.h" + +#define VCD_SIGNATURE 0x75017591U + +#define VCD_MIN_PERF_LEVEL 37900 + +#define VCD_MAX_SCHEDULER_QUEUE_DURATION 1 + +#define VCD_MAX_SCHEDULER_QUEUE_SIZE(fps_n, fps_d) \ + (fps_n / fps_d * VCD_MAX_SCHEDULER_QUEUE_DURATION) + +#define VCD_SCHEDULER_INITIAL_PERF_LEVEL 108000 + +#define VCD_SCHEDULER_ENC_DFLT_OTKN_PERFRM 1 + +#define VCD_SCHEDULER_DEC_DFLT_OTKN_PERFRM 1 + +#define VCD_DRIVER_INSTANCE_MAX 4 + +#define VCD_MAX_CLIENT_TRANSACTIONS 32 + +#define VCD_SEQ_HDR_PADDING_BYTES 256 + +#define VCD_DEC_NUM_INTERLACED_FIELDS 2 + +#define VCD_TIMESTAMP_RESOLUTION 1000000 +#define VCD_DEC_INITIAL_FRAME_RATE 30 + +#define VCD_S_SCHED_STAT_BASE 0x20000000 +#define VCD_S_SCHED_EOS (VCD_S_SCHED_STAT_BASE + 0x1) +#define VCD_S_SCHED_SLEEP (VCD_S_SCHED_STAT_BASE + 0x2) +#define VCD_S_SCHED_QEMPTY (VCD_S_SCHED_STAT_BASE + 0x3) +#define VCD_S_SCHED_QFULL (VCD_S_SCHED_STAT_BASE + 0x4) + +enum vcd_command_type { + VCD_CMD_NONE, + VCD_CMD_DEVICE_INIT, + VCD_CMD_DEVICE_TERM, + VCD_CMD_DEVICE_RESET, + VCD_CMD_CODEC_START, + VCD_CMD_CODEC_STOP, + VCD_CMD_CODE_FRAME, + VCD_CMD_OUTPUT_FLUSH, + VCD_CMD_CLIENT_CLOSE +}; + +//TODO: remove this +struct vcd_cmd_q_element { + enum vcd_command_type pending_cmd; +}; + +struct vcd_dma_buffer { + void *virt_addr; + phys_addr_t phys_addr; + size_t size; +}; + +struct vcd_buffer_entry { + u32 valid; + struct vcd_dma_buffer buffer; + void *virt_addr; + phys_addr_t phys_addr; + size_t size; +// u8 *alloc; +// u8 *virtual; // aligned so == alloc +// u8 *physical; +// u32 size; + u32 allocated; // true when allocated + u32 in_use; + struct vcd_frame_data frame; + +}; + +struct vcd_buffer_pool { + struct vcd_buffer_entry *entries; + u32 count; + struct vcd_buffer_requirement buf_req; + u32 validated; + u32 allocated; + u32 in_use; + struct vcd_buffer_entry **queue; + u16 q_len; + u16 q_head; + u16 q_tail; + +}; + +struct vcd_transc { + u32 in_use; + enum vcd_command_type type; + struct vcd_clnt_ctxt *cctxt; + + struct vcd_buffer_entry *ip_buf_entry; + + s64 time_stamp; + u32 ip_frm_tag; + enum vcd_frame frame_type; + + struct vcd_buffer_entry *op_buf_entry; + + u32 input_done; + u32 frame_done; +}; + +struct vcd_dev_ctxt { + u32 ddl_cmd_concurrency; + u32 ddl_frame_ch_depth; + u32 ddl_cmd_ch_depth; + u32 ddl_frame_ch_interim; + u32 ddl_cmd_ch_interim; + u32 ddl_frame_ch_free; + u32 ddl_cmd_ch_free; + + void *sched_hdl; + + struct vcd_init_config config; + + u32 driver_ids[VCD_DRIVER_INSTANCE_MAX]; + u32 refs; + u8 *device_base_addr; + void *hw_timer_handle; + u32 hw_time_out; + struct vcd_clnt_ctxt *cctxt_list_head; + + enum vcd_command_type pending_cmd; + + u32 cont; + + struct vcd_transc *trans_tbl; + u32 trans_tbl_size; + + enum vcd_power_state pwr_state; + enum vcd_pwr_clk_state_type pwr_clk_state; + u32 active_clnts; + u32 max_perf_lvl; + u32 reqd_perf_lvl; + u32 curr_perf_lvl; + u32 set_perf_lvl_pending; + +}; + +struct vcd_clnt_status { + u32 req_perf_lvl; + + u32 b1st_frame_recvd; + u32 b1st_ip_done_recvd; + u32 b1st_op_done_recvd; + + u32 frame_submitted; + u32 frame_delayed; + u32 cmd_submitted; + + u32 int_field_cnt; + + s64 first_ts; + s64 prev_ts; + u32 time_elapsed; + + u32 stop_pending; + u32 flush_mode; + + u32 eos_wait_for_op_buf; + struct vcd_frame_data eos_trig_ip_frm; + + u32 eos_prev_valid; + struct ddl_frame_data_tag eos_prev_op_frm; + u32 last_err; + u32 last_evt; + u32 cleaning_up; + u32 close_pending; +}; + +struct vcd_clnt_ctxt { + u32 signature; + struct vcd_clnt_state_ctxt clnt_state; + + s32 driver_id; + + u32 live; + u32 decoding; + + struct vcd_property_frame_rate frm_rate; + u32 frm_p_units; + u32 reqd_perf_lvl; + u32 time_resoln; + + struct vcd_buffer_pool in_buf_pool; + struct vcd_buffer_pool out_buf_pool; + + void (*callback) (u32 event, u32 status, void *info, u32 size, + void *handle, void *const client_data); + void *client_data; + + u32 sched_clnt_valid; + void *sched_clnt_hdl; + u32 sched_o_tkn_per_ip_frm; + u32 ddl_hdl_valid; + u32 *ddl_handle; + struct vcd_dev_ctxt *dev_ctxt; + struct vcd_cmd_q_element cmd_q; + + struct vcd_sequence_hdr seq_hdr; + phys_addr_t seq_hdr_phys_addr; + + struct vcd_clnt_status status; + + struct vcd_clnt_ctxt *next; +}; + +#define VCD_BUFFERPOOL_INUSE_DECREMENT(val) \ +do { \ + if ((val) > 0) \ + val--; \ + else { \ + VCD_MSG_ERROR("%s(): Inconsistent val given in " \ + " VCD_BUFFERPOOL_INUSE_DECREMENT\n", __func__); \ + vcd_assert(); \ + } \ +} while (0) + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_device_sm.c b/drivers/misc/video_core/720p/vcd/vcd_device_sm.c new file mode 100644 index 0000000000000..9e3a6a7530d0a --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_device_sm.c @@ -0,0 +1,1109 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd.h" + +static const struct vcd_dev_state_table *vcd_dev_state_table[]; +static const struct vcd_dev_state_table vcd_dev_table_null; + +struct vcd_drv_ctxt *vcd_get_drv_context(void) +{ + static struct vcd_drv_ctxt drv_context = { + {&vcd_dev_table_null, VCD_DEVICE_STATE_NULL}, + {0}, + 0 + }; + + return &drv_context; + +} + +void vcd_do_device_state_transition(struct vcd_drv_ctxt *drv_ctxt, + enum vcd_dev_state_enum to_state, u32 ev_code) +{ + struct vcd_dev_state_ctxt *state_ctxt; + + if (!drv_ctxt || to_state >= VCD_DEVICE_STATE_MAX) { + VCD_MSG_ERROR("Bad parameters. drv_ctxt=%p, to_state=%d", + drv_ctxt, to_state); + } + + state_ctxt = &drv_ctxt->dev_state; + + if (state_ctxt->state == to_state) { + VCD_MSG_HIGH("Device already in requested to_state=%d", + to_state); + + return; + } + + VCD_MSG_MED("vcd_do_device_state_transition: D%d -> D%d, for api %d", + (int)state_ctxt->state, (int)to_state, ev_code); + + if (state_ctxt->state_table->pf_exit) + state_ctxt->state_table->pf_exit(drv_ctxt, ev_code); + + + state_ctxt->state = to_state; + state_ctxt->state_table = vcd_dev_state_table[to_state]; + + if (state_ctxt->state_table->pf_entry) + state_ctxt->state_table->pf_entry(drv_ctxt, ev_code); +} + +void vcd_hw_timeout_handler(void *user_data) +{ + struct vcd_drv_ctxt *drv_ctxt; + + VCD_MSG_HIGH("vcd_hw_timeout_handler:"); + user_data = NULL; + drv_ctxt = vcd_get_drv_context(); + mutex_lock(drv_ctxt->dev_mutex); + if (drv_ctxt->dev_state.state_table->ev_hdlr.pf_timeout) + drv_ctxt->dev_state.state_table->ev_hdlr.pf_timeout(drv_ctxt, + user_data); + else + VCD_MSG_ERROR("hw_timeout unsupported in device state %d", + drv_ctxt->dev_state.state); + mutex_unlock(drv_ctxt->dev_mutex); +} + +void vcd_ddl_callback(u32 event, u32 status, void *payload, + u32 size, u32 *ddl_handle, void *const client_data) +{ + struct vcd_drv_ctxt *drv_ctxt; + struct vcd_dev_ctxt *dev_ctxt; + struct vcd_dev_state_ctxt *dev_state; + struct vcd_clnt_ctxt *cctxt; + struct vcd_transc *transc; + + VCD_MSG_LOW("vcd_ddl_callback:"); + + VCD_MSG_LOW("event=0x%x status=0x%x", event, status); + + drv_ctxt = vcd_get_drv_context(); + dev_ctxt = &drv_ctxt->dev_ctxt; + dev_state = &drv_ctxt->dev_state; + + dev_ctxt->cont = true; + vcd_device_timer_stop(dev_ctxt); + + switch (dev_state->state) { + case VCD_DEVICE_STATE_NULL: + VCD_MSG_HIGH("Callback unexpected in NULL state"); + break; + case VCD_DEVICE_STATE_NOT_INIT: + VCD_MSG_HIGH("Callback unexpected in NOT_INIT state"); + break; + case VCD_DEVICE_STATE_INITING: + if (dev_state->state_table->ev_hdlr.pf_dev_cb) { + dev_state->state_table->ev_hdlr.pf_dev_cb(drv_ctxt, + event, status, payload, size, ddl_handle, + client_data); + } else { + VCD_MSG_HIGH("No device handler in %d state", + dev_state->state); + } + break; + case VCD_DEVICE_STATE_READY: + transc = (struct vcd_transc *)client_data; + + if (!transc || !transc->in_use || !transc->cctxt) { + VCD_MSG_ERROR("Invalid clientdata received from DDL "); + } else { + cctxt = transc->cctxt; + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_clnt_cb) { + cctxt->clnt_state.state_table->ev_hdlr. + pf_clnt_cb(cctxt, event, status, + payload, size, ddl_handle, client_data); + } else { + VCD_MSG_HIGH("No client handler in" + " (dsm:READY, csm:%d) state", + (int)cctxt->clnt_state.state); + + if (VCD_FAILED(status)) { + VCD_MSG_FATAL("DDL callback" + " returned failure 0x%x", + status); + } + } + } + break; + default: + VCD_MSG_ERROR("Unknown state"); + break; + } + +} + +u32 vcd_init_device_context(struct vcd_drv_ctxt *drv_ctxt, u32 ev_code) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + struct sched_init_param sched_init; + u32 rc; + struct ddl_init_config ddl_init; + + VCD_MSG_LOW("vcd_init_device_context:"); + + dev_ctxt->pending_cmd = VCD_CMD_NONE; + + rc = vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_BEGIN); + VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_INIT_BEGIN failed"); + + VCD_MSG_HIGH("Device powered ON and clocked"); + + sched_init.perf_lvl = dev_ctxt->max_perf_lvl; + rc = vcd_map_sched_status(sched_create(&sched_init, + &dev_ctxt->sched_hdl)); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: sched_create", rc); + + vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_FAIL); + + return rc; + } + + VCD_MSG_HIGH("Created scheduler instance."); + + ddl_init.core_virtual_base_addr = dev_ctxt->device_base_addr; + ddl_init.pf_interrupt_clr = dev_ctxt->config.pf_interrupt_clr; + ddl_init.ddl_callback = vcd_ddl_callback; + + rc = ddl_device_init(&ddl_init, NULL); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_init", rc); + + sched_destroy(dev_ctxt->sched_hdl); + dev_ctxt->sched_hdl = NULL; + + vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_FAIL); + } else { + vcd_device_timer_start(dev_ctxt); + vcd_do_device_state_transition(drv_ctxt, + VCD_DEVICE_STATE_INITING, ev_code); + } + + return rc; +} + +void vcd_handle_device_init_failed(struct vcd_drv_ctxt *drv_ctxt, u32 status) +{ + struct vcd_clnt_ctxt *client; + struct vcd_clnt_ctxt *tmp_client; + + VCD_MSG_ERROR("Device init failed. status = %d", status); + + client = drv_ctxt->dev_ctxt.cctxt_list_head; + while (client) { + client->callback(VCD_EVT_RESP_OPEN, status, NULL, 0, 0, + client->client_data); + + tmp_client = client; + client = client->next; + + vcd_destroy_client_context(tmp_client); + } + if (ddl_device_release(NULL)) + VCD_MSG_ERROR("Failed: ddl_device_release"); + + (void)sched_destroy(drv_ctxt->dev_ctxt.sched_hdl); + drv_ctxt->dev_ctxt.sched_hdl = NULL; + + if (vcd_power_event(&drv_ctxt->dev_ctxt, NULL, + VCD_EVT_PWR_DEV_INIT_FAIL)) + VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_FAIL failed"); + + vcd_do_device_state_transition(drv_ctxt, VCD_DEVICE_STATE_NOT_INIT, + DEVICE_STATE_EVENT_NUMBER(pf_dev_cb)); +} + +u32 vcd_deinit_device_context(struct vcd_drv_ctxt *drv_ctxt, u32 ev_code) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_deinit_device_context:"); + + rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL, + VCD_EVT_PWR_DEV_TERM_BEGIN); + + VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed"); + + rc = ddl_device_release(NULL); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_device_release", rc); + + vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_TERM_FAIL); + } else { + sched_destroy(dev_ctxt->sched_hdl); + dev_ctxt->sched_hdl = NULL; + + vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_TERM_END); + + vcd_do_device_state_transition(drv_ctxt, + VCD_DEVICE_STATE_NOT_INIT, ev_code); + } + return rc; +} + +void vcd_term_driver_context(struct vcd_drv_ctxt *drv_ctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + + VCD_MSG_HIGH("All driver instances terminated"); + + if (dev_ctxt->config.pf_deregister_isr) + dev_ctxt->config.pf_deregister_isr(); + + if (dev_ctxt->config.pf_un_map_dev_base_addr) + dev_ctxt->config.pf_un_map_dev_base_addr(); + + if (dev_ctxt->config.pf_timer_release) + dev_ctxt->config.pf_timer_release(dev_ctxt->hw_timer_handle); + + kfree(dev_ctxt->trans_tbl); + + memset(dev_ctxt, 0, sizeof(struct vcd_dev_ctxt)); + + vcd_do_device_state_transition(drv_ctxt, VCD_DEVICE_STATE_NULL, + DEVICE_STATE_EVENT_NUMBER(pf_term)); + +} + +u32 vcd_reset_device_context(struct vcd_drv_ctxt *drv_ctxt, u32 ev_code) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_reset_device_context:"); + vcd_reset_device_channels(dev_ctxt); + rc = vcd_power_event(&drv_ctxt->dev_ctxt, NULL, + VCD_EVT_PWR_DEV_TERM_BEGIN); + VCD_FAILED_RETURN(rc, "VCD_EVT_PWR_DEV_TERM_BEGIN failed"); + if (ddl_reset_hw(0)) + VCD_MSG_HIGH("HW Reset done"); + else + VCD_MSG_FATAL("HW Reset failed"); + + vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_TERM_END); + + return VCD_S_SUCCESS; +} + +void vcd_handle_device_err_fatal(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *trig_clnt) +{ + struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head; + VCD_MSG_LOW("vcd_handle_device_err_fatal:"); + while (cctxt) { + if (cctxt != trig_clnt) { + vcd_clnt_handle_device_err_fatal(cctxt, + VCD_EVT_IND_HWERRFATAL); + } + cctxt = cctxt->next; + } + dev_ctxt->pending_cmd = VCD_CMD_DEVICE_RESET; + vcd_do_device_state_transition(vcd_get_drv_context(), + VCD_DEVICE_STATE_INVALID, DEVICE_STATE_EVENT_NUMBER(pf_dev_cb)); +} + +void vcd_handle_for_last_clnt_close( + struct vcd_dev_ctxt *dev_ctxt, u32 send_deinit) +{ + if (!dev_ctxt->cctxt_list_head) { + VCD_MSG_HIGH("All clients are closed"); + if (send_deinit) + vcd_deinit_device_context(vcd_get_drv_context(), + DEVICE_STATE_EVENT_NUMBER(pf_close)); + else + dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM; + } +} +void vcd_continue(void) +{ + struct vcd_drv_ctxt *drv_ctxt; + struct vcd_dev_ctxt *dev_ctxt; + u32 cont; + struct vcd_transc *transc; + u32 rc; + VCD_MSG_LOW("vcd_continue:"); + + drv_ctxt = vcd_get_drv_context(); + dev_ctxt = &drv_ctxt->dev_ctxt; + + dev_ctxt->cont = false; + + if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_INIT) { + VCD_MSG_HIGH("VCD_CMD_DEVICE_INIT is pending"); + + dev_ctxt->pending_cmd = VCD_CMD_NONE; + + vcd_init_device_context(drv_ctxt, + DEVICE_STATE_EVENT_NUMBER(pf_open)); + } else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_TERM) { + VCD_MSG_HIGH("VCD_CMD_DEVICE_TERM is pending"); + + dev_ctxt->pending_cmd = VCD_CMD_NONE; + + vcd_deinit_device_context(drv_ctxt, + DEVICE_STATE_EVENT_NUMBER(pf_close)); + } else if (dev_ctxt->pending_cmd == VCD_CMD_DEVICE_RESET) { + VCD_MSG_HIGH("VCD_CMD_DEVICE_RESET is pending"); + dev_ctxt->pending_cmd = VCD_CMD_NONE; + vcd_reset_device_context(drv_ctxt, + DEVICE_STATE_EVENT_NUMBER(pf_dev_cb)); + } else { + if (dev_ctxt->set_perf_lvl_pending) { + rc = vcd_power_event(dev_ctxt, NULL, + VCD_EVT_PWR_DEV_SET_PERFLVL); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR + ("VCD_EVT_PWR_CLNT_SET_PERFLVL failed"); + VCD_MSG_HIGH("Not running at desired perf " + "level.curr=%d, reqd=%d", + dev_ctxt->curr_perf_lvl, + dev_ctxt->reqd_perf_lvl); + } else { + dev_ctxt->set_perf_lvl_pending = false; + } + } + + do { + cont = false; + + if (vcd_get_command_channel_in_loop(dev_ctxt, + &transc)) { + if (vcd_submit_command_in_continue(dev_ctxt, + transc)) + cont = true; + else { + VCD_MSG_MED + ("No more commands to submit"); + + vcd_release_command_channel(dev_ctxt, + transc); + + vcd_release_interim_command_channels( + dev_ctxt); + } + } + } while (cont); + + do { + cont = false; + + if (vcd_get_frame_channel_in_loop(dev_ctxt, &transc)) { + if (vcd_try_submit_frame_in_continue(dev_ctxt, + transc)) { + cont = true; + } else { + VCD_MSG_MED("No more frames to submit"); + + vcd_release_frame_channel(dev_ctxt, + transc); + + vcd_release_interim_frame_channels( + dev_ctxt); + } + } + + } while (cont); + + if (!vcd_core_is_busy(dev_ctxt)) { + rc = vcd_power_event(dev_ctxt, NULL, + VCD_EVT_PWR_CLNT_CMD_END); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Failed:" + "VCD_EVT_PWR_CLNT_CMD_END"); + } + } +} + +static void vcd_pause_all_sessions(struct vcd_dev_ctxt *dev_ctxt) +{ + struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head; + u32 rc; + + while (cctxt) { + if (cctxt->clnt_state.state_table->ev_hdlr.pf_pause) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_pause(cctxt); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Client pause failed"); + + } + + cctxt = cctxt->next; + } +} + +static void vcd_resume_all_sessions(struct vcd_dev_ctxt *dev_ctxt) +{ + struct vcd_clnt_ctxt *cctxt = dev_ctxt->cctxt_list_head; + u32 rc; + + while (cctxt) { + if (cctxt->clnt_state.state_table->ev_hdlr.pf_resume) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_resume(cctxt); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Client resume failed"); + + } + + cctxt = cctxt->next; + } +} + +static u32 vcd_init_cmn(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + s32 driver_id; + + if (dev_ctxt->config.pf_interrupt_clr != config->pf_interrupt_clr || + dev_ctxt->config.pf_register_isr != + config->pf_register_isr || + dev_ctxt->config.pf_deregister_isr != + config->pf_deregister_isr || + dev_ctxt->config.pf_map_dev_base_addr != + config->pf_map_dev_base_addr || + dev_ctxt->config.pf_un_map_dev_base_addr != + config->pf_un_map_dev_base_addr) { + VCD_MSG_ERROR("Device config mismatch"); + VCD_MSG_HIGH("VCD will be using config from 1st vcd_init"); + } + + *driver_handle = 0; + + driver_id = 0; + while (driver_id < VCD_DRIVER_INSTANCE_MAX && + dev_ctxt->driver_ids[driver_id]) { + ++driver_id; + } + + if (driver_id == VCD_DRIVER_INSTANCE_MAX) { + VCD_MSG_ERROR("Max driver instances reached"); + + return VCD_ERR_FAIL; + } + + ++dev_ctxt->refs; + dev_ctxt->driver_ids[driver_id] = true; + *driver_handle = driver_id + 1; + + VCD_MSG_HIGH("Driver_id = %d. No of driver instances = %d", + driver_id, dev_ctxt->refs); + + return VCD_S_SUCCESS; + +} + +static u32 vcd_init_in_null(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle) { + u32 rc = VCD_S_SUCCESS; + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + u32 done_create_timer = false; + VCD_MSG_LOW("vcd_init_in_dev_null:"); + + dev_ctxt->config = *config; + + dev_ctxt->device_base_addr = (u8 *)config->pf_map_dev_base_addr( + dev_ctxt->config.device_name); + + if (!dev_ctxt->device_base_addr) { + VCD_MSG_ERROR("NULL Device_base_addr"); + + return VCD_ERR_FAIL; + } + + if (config->pf_register_isr) + config->pf_register_isr(dev_ctxt->config.device_name); + + if (config->pf_timer_create) { + if (config->pf_timer_create(vcd_hw_timeout_handler, NULL, + &dev_ctxt->hw_timer_handle)) + done_create_timer = true; + else { + VCD_MSG_ERROR("timercreate failed"); + return VCD_ERR_FAIL; + } + } + + + rc = vcd_init_cmn(drv_ctxt, config, driver_handle); + + if (!VCD_FAILED(rc)) { + vcd_do_device_state_transition(drv_ctxt, + VCD_DEVICE_STATE_NOT_INIT, + DEVICE_STATE_EVENT_NUMBER(pf_init)); + } else { + if (dev_ctxt->config.pf_un_map_dev_base_addr) + dev_ctxt->config.pf_un_map_dev_base_addr(); + + if (dev_ctxt->config.pf_deregister_isr) + dev_ctxt->config.pf_deregister_isr(); + + if (done_create_timer && dev_ctxt->config.pf_timer_release) + dev_ctxt->config.pf_timer_release( + dev_ctxt->hw_timer_handle); + } + + return rc; + +} + +u32 npelly_init(void); + +static u32 vcd_init_in_not_init(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle) +{ + u32 rc; + VCD_MSG_LOW("vcd_init_in_dev_not_init:"); + + rc = npelly_init(); + + if (rc) + return rc; + return vcd_init_cmn(drv_ctxt, config, driver_handle); + +} + +static u32 vcd_init_in_initing(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle) +{ + VCD_MSG_LOW("vcd_init_in_dev_initing:"); + + return vcd_init_cmn(drv_ctxt, config, driver_handle); + +} + +static u32 vcd_init_in_ready(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle) +{ + VCD_MSG_LOW("vcd_init_in_dev_ready:"); + + return vcd_init_cmn(drv_ctxt, config, driver_handle); +} + +static u32 vcd_term_cmn(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + + if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) { + VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle); + + return VCD_ERR_BAD_HANDLE; + } + + if (vcd_check_for_client_context(dev_ctxt, driver_handle - 1)) { + VCD_MSG_ERROR("Driver has active client"); + + return VCD_ERR_BAD_STATE; + } + + --dev_ctxt->refs; + dev_ctxt->driver_ids[driver_handle - 1] = false; + + VCD_MSG_HIGH("Driver_id %d terminated. No of driver instances = %d", + driver_handle - 1, dev_ctxt->refs); + + return VCD_S_SUCCESS; +} + +static u32 vcd_term_in_not_init(struct vcd_drv_ctxt *drv_ctxt, + s32 driver_handle) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + u32 rc; + + VCD_MSG_LOW("vcd_term_in_dev_not_init:"); + + rc = vcd_term_cmn(drv_ctxt, driver_handle); + + if (!VCD_FAILED(rc) && !dev_ctxt->refs) + vcd_term_driver_context(drv_ctxt); + + return rc; +} + +static u32 vcd_term_in_initing(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle) +{ + VCD_MSG_LOW("vcd_term_in_dev_initing:"); + + return vcd_term_cmn(drv_ctxt, driver_handle); +} + +static u32 vcd_term_in_ready(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle) +{ + VCD_MSG_LOW("vcd_term_in_dev_ready:"); + + return vcd_term_cmn(drv_ctxt, driver_handle); +} + +static u32 vcd_term_in_invalid(struct vcd_drv_ctxt *drv_ctxt, + s32 driver_handle) +{ + u32 rc; + VCD_MSG_LOW("vcd_term_in_invalid:"); + rc = vcd_term_cmn(drv_ctxt, driver_handle); + if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.refs) + vcd_term_driver_context(drv_ctxt); + + return rc; +} + +static u32 vcd_open_cmn(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle, + u32 decoding, void (*callback) (u32 event, u32 status, void *info, + u32 size, void *handle, void *const client_data), void *client_data, + struct vcd_clnt_ctxt **pp_cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + struct vcd_clnt_ctxt *cctxt; + struct vcd_clnt_ctxt *client; + + if (!vcd_validate_driver_handle(dev_ctxt, driver_handle)) { + VCD_MSG_ERROR("Invalid driver handle = %d", driver_handle); + + return VCD_ERR_BAD_HANDLE; + } + + cctxt = kzalloc(sizeof(struct vcd_clnt_ctxt), GFP_KERNEL); + if (!cctxt) { + VCD_MSG_ERROR("No memory for client ctxt"); + return VCD_ERR_ALLOC_FAIL; + } + + cctxt->dev_ctxt = dev_ctxt; + cctxt->driver_id = driver_handle - 1; + cctxt->decoding = decoding; + cctxt->callback = callback; + cctxt->client_data = client_data; + + client = dev_ctxt->cctxt_list_head; + dev_ctxt->cctxt_list_head = cctxt; + cctxt->next = client; + + *pp_cctxt = cctxt; + + return VCD_S_SUCCESS; + +} + +static u32 vcd_open_in_not_init(struct vcd_drv_ctxt *drv_ctxt, + s32 driver_handle, u32 decoding, void (*callback) (u32 event, + u32 status, void *info, u32 size, void *handle, + void *const client_data), void *client_data) +{ + struct vcd_clnt_ctxt *cctxt; + u32 rc; + + VCD_MSG_LOW("vcd_open_in_dev_not_init:"); + + rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback, + client_data, &cctxt); + + VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn"); + + rc = vcd_init_device_context(drv_ctxt, + DEVICE_STATE_EVENT_NUMBER(pf_open)); + + if (VCD_FAILED(rc)) + vcd_destroy_client_context(cctxt); + + return rc; +} + +static u32 vcd_open_in_initing(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle, + u32 decoding, void (*callback) (u32 event, u32 status, void *info, + u32 size, void *handle, void *const client_data), void *client_data) +{ + struct vcd_clnt_ctxt *cctxt; + + VCD_MSG_LOW("vcd_open_in_dev_initing:"); + + return vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback, + client_data, &cctxt); +} + +static u32 vcd_open_in_ready(struct vcd_drv_ctxt *drv_ctxt, s32 driver_handle, + u32 decoding, void (*callback) (u32 event, u32 status, void *info, + u32 size, void *handle, void *const client_data), void *client_data) +{ + struct vcd_clnt_ctxt *cctxt; + struct vcd_handle_container container; + u32 rc; + + VCD_MSG_LOW("vcd_open_in_dev_ready:"); + + rc = vcd_open_cmn(drv_ctxt, driver_handle, decoding, callback, + client_data, &cctxt); + + VCD_FAILED_RETURN(rc, "Failed: vcd_open_cmn"); + + rc = vcd_init_client_context(cctxt); + + if (!VCD_FAILED(rc)) { + container.handle = (void *)cctxt; + + callback(VCD_EVT_RESP_OPEN, VCD_S_SUCCESS, &container, + sizeof(container), container.handle, client_data); + } else { + VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_init_client_context", rc); + + vcd_destroy_client_context(cctxt); + } + + return rc; +} + +static u32 vcd_close_in_ready(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_clnt_ctxt *cctxt) { + u32 rc; + + VCD_MSG_LOW("vcd_close_in_dev_ready:"); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_close) { + rc = cctxt->clnt_state.state_table->ev_hdlr. + pf_close(cctxt); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + if (!VCD_FAILED(rc)) + vcd_handle_for_last_clnt_close(&drv_ctxt->dev_ctxt, true); + + return rc; +} + +static u32 vcd_close_in_dev_invalid(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_clnt_ctxt *cctxt) +{ + u32 rc; + VCD_MSG_LOW("vcd_close_in_dev_invalid:"); + if (cctxt->clnt_state.state_table->ev_hdlr.pf_close) { + rc = cctxt->clnt_state.state_table->ev_hdlr.pf_close(cctxt); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d", + cctxt->clnt_state.state); + rc = VCD_ERR_BAD_STATE; + } + if (!VCD_FAILED(rc) && !drv_ctxt->dev_ctxt.cctxt_list_head) { + VCD_MSG_HIGH("All INVALID clients are closed"); + vcd_do_device_state_transition(drv_ctxt, + VCD_DEVICE_STATE_NOT_INIT, + DEVICE_STATE_EVENT_NUMBER(pf_close)); + } + return rc; +} + +static u32 vcd_resume_in_ready(struct vcd_drv_ctxt *drv_ctxt, + struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_resume_in_ready:"); + + if (cctxt->clnt_state.state_table->ev_hdlr.pf_resume) { + rc = cctxt->clnt_state.state_table->ev_hdlr.pf_resume(cctxt); + } else { + VCD_MSG_ERROR("Unsupported API in client state %d", + cctxt->clnt_state.state); + + rc = VCD_ERR_BAD_STATE; + } + + return rc; +} + +static u32 vcd_set_dev_pwr_in_ready(struct vcd_drv_ctxt *drv_ctxt, + enum vcd_power_state pwr_state) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + + VCD_MSG_LOW("vcd_set_dev_pwr_in_ready:"); + + switch (pwr_state) { + case VCD_PWR_STATE_SLEEP: + vcd_pause_all_sessions(dev_ctxt); + + dev_ctxt->pwr_state = VCD_PWR_STATE_SLEEP; + + break; + case VCD_PWR_STATE_ON: + if (dev_ctxt->pwr_state == VCD_PWR_STATE_SLEEP) + vcd_resume_all_sessions(dev_ctxt); + + dev_ctxt->pwr_state = VCD_PWR_STATE_ON; + break; + default: + VCD_MSG_ERROR("Invalid power state requested %d", + pwr_state); + break; + } + return rc; +} + +static void vcd_dev_cb_in_initing(struct vcd_drv_ctxt *drv_ctxt, u32 event, + u32 status, void *payload, u32 size, u32 *ddl_handle, + void *const client_data) +{ + struct vcd_dev_ctxt *dev_ctxt; + struct vcd_clnt_ctxt *client; + struct vcd_clnt_ctxt *tmp_client; + struct vcd_handle_container container; + u32 rc = VCD_S_SUCCESS; + u32 client_inited = false; + u32 fail_all_open = false; + + VCD_MSG_LOW("vcd_dev_cb_in_initing:"); + + if (event != VCD_EVT_RESP_DEVICE_INIT) { + VCD_MSG_ERROR("vcd_dev_cb_in_initing: Unexpected event %d", + (int)event); + return; + } + + dev_ctxt = &drv_ctxt->dev_ctxt; + + dev_ctxt->cont = false; + + if (VCD_FAILED(status)) { + vcd_handle_device_init_failed(drv_ctxt, status); + + return; + } + + vcd_do_device_state_transition(drv_ctxt, VCD_DEVICE_STATE_READY, + DEVICE_STATE_EVENT_NUMBER(pf_open)); + + if (!dev_ctxt->cctxt_list_head) { + VCD_MSG_HIGH("All clients are closed"); + + dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM; + + return; + } + + if (!dev_ctxt->ddl_cmd_ch_depth || !dev_ctxt->trans_tbl) + rc = vcd_setup_with_ddl_capabilities(dev_ctxt); + + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x: Failed " + "vcd_setup_with_ddl_capabilities", rc); + + fail_all_open = true; + } + + client = dev_ctxt->cctxt_list_head; + while (client) { + if (!fail_all_open) + rc = vcd_init_client_context(client); + + + if (!VCD_FAILED(rc)) { + container.handle = (void *)client; + client->callback(VCD_EVT_RESP_OPEN, VCD_S_SUCCESS, + &container, sizeof(container), container.handle, + client->client_data); + + client = client->next; + + client_inited = true; + } else { + VCD_MSG_ERROR("rc = 0x%x, Failed: " + "vcd_init_client_context", rc); + + client->callback(VCD_EVT_RESP_OPEN, rc, NULL, 0, 0, + client->client_data); + + tmp_client = client; + client = client->next; + + vcd_destroy_client_context(tmp_client); + } + } + + if (!client_inited || fail_all_open) { + VCD_MSG_ERROR("All client open requests failed"); + + dev_ctxt->pending_cmd = VCD_CMD_DEVICE_TERM; + } else if (vcd_power_event(dev_ctxt, NULL, VCD_EVT_PWR_DEV_INIT_END)) { + VCD_MSG_ERROR("VCD_EVT_PWR_DEV_INIT_END failed"); + } +} + +static void vcd_hw_timeout_cmn(struct vcd_drv_ctxt *drv_ctxt, void *user_data) +{ + struct vcd_dev_ctxt *dev_ctxt = &drv_ctxt->dev_ctxt; + VCD_MSG_LOW("vcd_hw_timeout_cmn:"); + vcd_device_timer_stop(dev_ctxt); + + vcd_handle_device_err_fatal(dev_ctxt, NULL); + + /* Reset HW. */ + vcd_reset_device_context(drv_ctxt, DEVICE_STATE_EVENT_NUMBER( + pf_timeout)); +} + +static void vcd_dev_enter_null(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering DEVICE_STATE_NULL on api %d", ev_code); +} + +static void vcd_dev_enter_not_init(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering DEVICE_STATE_NOT_INIT on api %d", ev_code); +} + +static void vcd_dev_enter_initing(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering DEVICE_STATE_INITING on api %d", ev_code); +} + +static void vcd_dev_enter_ready(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering DEVICE_STATE_READY on api %d", ev_code); +} + +static void vcd_dev_enter_invalid(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Entering DEVICE_STATE_INVALID on api %d", ev_code); +} + +static void vcd_dev_exit_null(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting DEVICE_STATE_NULL on api %d", ev_code); +} + +static void vcd_dev_exit_not_init(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting DEVICE_STATE_NOT_INIT on api %d", ev_code); +} + +static void vcd_dev_exit_initing(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting DEVICE_STATE_INITING on api %d", ev_code); +} + +static void vcd_dev_exit_ready(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting DEVICE_STATE_READY on api %d", ev_code); +} + +static void vcd_dev_exit_invalid(struct vcd_drv_ctxt *drv_ctxt, s32 ev_code) +{ + VCD_MSG_MED("Exiting DEVICE_STATE_INVALID on api %d", ev_code); +} + +static const struct vcd_dev_state_table vcd_dev_table_null = { + { + vcd_init_in_null, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + }, + vcd_dev_enter_null, + vcd_dev_exit_null +}; + +static const struct vcd_dev_state_table vcd_dev_table_not_init = { + { + vcd_init_in_not_init, + vcd_term_in_not_init, + vcd_open_in_not_init, + NULL, + NULL, + NULL, + NULL, + NULL, + }, + vcd_dev_enter_not_init, + vcd_dev_exit_not_init +}; + +static const struct vcd_dev_state_table vcd_dev_table_initing = { + { + vcd_init_in_initing, + vcd_term_in_initing, + vcd_open_in_initing, + NULL, + NULL, + NULL, + vcd_dev_cb_in_initing, + vcd_hw_timeout_cmn, + }, + vcd_dev_enter_initing, + vcd_dev_exit_initing +}; + +static const struct vcd_dev_state_table vcd_dev_table_ready = { + { + vcd_init_in_ready, + vcd_term_in_ready, + vcd_open_in_ready, + vcd_close_in_ready, + vcd_resume_in_ready, + vcd_set_dev_pwr_in_ready, + NULL, + vcd_hw_timeout_cmn, + }, + vcd_dev_enter_ready, + vcd_dev_exit_ready +}; + +static const struct vcd_dev_state_table vcd_dev_table_in_invalid = { + { + NULL, + vcd_term_in_invalid, + NULL, + vcd_close_in_dev_invalid, + NULL, + NULL, + NULL, + NULL, + }, + vcd_dev_enter_invalid, + vcd_dev_exit_invalid +}; + +static const struct vcd_dev_state_table *vcd_dev_state_table[] = { + &vcd_dev_table_null, + &vcd_dev_table_not_init, + &vcd_dev_table_initing, + &vcd_dev_table_ready, + &vcd_dev_table_in_invalid +}; diff --git a/drivers/misc/video_core/720p/vcd/vcd_device_sm.h b/drivers/misc/video_core/720p/vcd/vcd_device_sm.h new file mode 100644 index 0000000000000..0ba70d25b949a --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_device_sm.h @@ -0,0 +1,106 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DEVICE_SM_H_ +#define _VCD_DEVICE_SM_H_ + +#include "vcd_api.h" +#include "vcd_ddl_api.h" +#include "vcd_core.h" + +struct vcd_dev_state_table; +struct vcd_dev_state_ctxt; +struct vcd_drv_ctxt; + +enum vcd_dev_state_enum { + VCD_DEVICE_STATE_NULL = 0, + VCD_DEVICE_STATE_NOT_INIT, + VCD_DEVICE_STATE_INITING, + VCD_DEVICE_STATE_READY, + VCD_DEVICE_STATE_INVALID, + VCD_DEVICE_STATE_MAX, + VCD_DEVICE_STATE_32BIT = 0x7FFFFFFF +}; + +struct vcd_dev_state_table { + struct { + u32(*pf_init) (struct vcd_drv_ctxt *drv_ctxt, + struct vcd_init_config *config, s32 *driver_handle); + + u32(*pf_term) (struct vcd_drv_ctxt *drv_ctxt, + s32 driver_handle); + + u32(*pf_open) (struct vcd_drv_ctxt *drv_ctxt, + s32 driver_handle, u32 decoding, + void (*callback) (u32 event, u32 status, void *info, + u32 size, void *handle, void *const client_data), + void *client_data); + + u32(*pf_close) (struct vcd_drv_ctxt *drv_ctxt, + struct vcd_clnt_ctxt *cctxt); + + u32(*pf_resume) (struct vcd_drv_ctxt *drv_ctxt, + struct vcd_clnt_ctxt *cctxt); + + u32(*pf_set_dev_pwr) (struct vcd_drv_ctxt *drv_ctxt, + enum vcd_power_state pwr_state); + + void (*pf_dev_cb) (struct vcd_drv_ctxt *drv_ctxt, + u32 event, u32 status, void *payload, u32 size, + u32 *ddl_handle, void *const client_data); + + void (*pf_timeout) (struct vcd_drv_ctxt *drv_ctxt, + void *user_data); + } ev_hdlr; + + void (*pf_entry) (struct vcd_drv_ctxt *drv_ctxt, s32 state_event_type); + void (*pf_exit) (struct vcd_drv_ctxt *drv_ctxt, s32 state_event_type); +}; + +#define DEVICE_STATE_EVENT_NUMBER(ppf) \ + ((u32 *) (&(((struct vcd_dev_state_table*)0)->ev_hdlr.ppf)) - \ + (u32 *) (&(((struct vcd_dev_state_table*)0)->ev_hdlr.pf_init)) \ + + 1) + +struct vcd_dev_state_ctxt { + const struct vcd_dev_state_table *state_table; + + enum vcd_dev_state_enum state; +}; + +struct vcd_drv_ctxt { + struct vcd_dev_state_ctxt dev_state; + struct vcd_dev_ctxt dev_ctxt; + struct mutex *dev_mutex; +}; + +extern struct vcd_drv_ctxt *vcd_get_drv_context(void); + +void vcd_continue(void); + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_power_sm.c b/drivers/misc/video_core/720p/vcd/vcd_power_sm.c new file mode 100644 index 0000000000000..56854170f4bd2 --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_power_sm.c @@ -0,0 +1,316 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include "video_core_type.h" +#include "vcd_power_sm.h" +#include "vcd_core.h" +#include "vcd.h" + +u32 vcd_power_event(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt, + u32 event) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_MED("Device power state = %d\n", dev_ctxt->pwr_clk_state); + VCD_MSG_MED("event = 0x%x\n", event); + switch (event) { + case VCD_EVT_PWR_DEV_INIT_BEGIN: + case VCD_EVT_PWR_DEV_INIT_END: + case VCD_EVT_PWR_DEV_INIT_FAIL: + case VCD_EVT_PWR_DEV_TERM_BEGIN: + case VCD_EVT_PWR_DEV_TERM_END: + case VCD_EVT_PWR_DEV_TERM_FAIL: + case VCD_EVT_PWR_DEV_SLEEP_BEGIN: + case VCD_EVT_PWR_DEV_SLEEP_END: + case VCD_EVT_PWR_DEV_SET_PERFLVL: + case VCD_EVT_PWR_DEV_HWTIMEOUT: + rc = vcd_device_power_event(dev_ctxt, event, cctxt); + break; + case VCD_EVT_PWR_CLNT_CMD_BEGIN: + case VCD_EVT_PWR_CLNT_CMD_END: + case VCD_EVT_PWR_CLNT_CMD_FAIL: + case VCD_EVT_PWR_CLNT_PAUSE: + case VCD_EVT_PWR_CLNT_RESUME: + case VCD_EVT_PWR_CLNT_FIRST_FRAME: + case VCD_EVT_PWR_CLNT_LAST_FRAME: + case VCD_EVT_PWR_CLNT_ERRFATAL: + rc = vcd_client_power_event(dev_ctxt, cctxt, event); + break; + } + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("vcd_power_event: event 0x%x failed\n", event); + + return rc; + +} + +u32 vcd_device_power_event(struct vcd_dev_ctxt *dev_ctxt, u32 event, + struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_ERR_FAIL; + u32 set_perf_lvl; + + switch (event) { + case VCD_EVT_PWR_DEV_INIT_BEGIN: + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF && + res_trk_get_max_perf_level( + &dev_ctxt->max_perf_lvl) && + res_trk_power_up()) { + dev_ctxt->pwr_clk_state = + VCD_PWRCLK_STATE_ON_NOTCLOCKED; + dev_ctxt->curr_perf_lvl = 0; + dev_ctxt->reqd_perf_lvl = 0; + dev_ctxt->active_clnts = 0; + dev_ctxt->set_perf_lvl_pending = false; + rc = vcd_enable_clock(dev_ctxt, cctxt); + if (VCD_FAILED(rc)) { + res_trk_power_down(); + dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; + } + } + break; + case VCD_EVT_PWR_DEV_INIT_END: + case VCD_EVT_PWR_DEV_TERM_FAIL: + case VCD_EVT_PWR_DEV_SLEEP_BEGIN: + case VCD_EVT_PWR_DEV_HWTIMEOUT: + rc = vcd_gate_clock(dev_ctxt); + break; + case VCD_EVT_PWR_DEV_INIT_FAIL: + case VCD_EVT_PWR_DEV_TERM_END: + if (dev_ctxt->pwr_clk_state != VCD_PWRCLK_STATE_OFF) { + vcd_disable_clock(dev_ctxt); + res_trk_power_down(); + + dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_OFF; + dev_ctxt->curr_perf_lvl = 0; + dev_ctxt->reqd_perf_lvl = 0; + dev_ctxt->active_clnts = 0; + dev_ctxt->set_perf_lvl_pending = false; + rc = VCD_S_SUCCESS; + } + break; + case VCD_EVT_PWR_DEV_TERM_BEGIN: + case VCD_EVT_PWR_DEV_SLEEP_END: + rc = vcd_un_gate_clock(dev_ctxt); + break; + case VCD_EVT_PWR_DEV_SET_PERFLVL: + set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? + dev_ctxt->reqd_perf_lvl : VCD_MIN_PERF_LEVEL; + rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl, cctxt); + break; + } + return rc; +} + +u32 vcd_client_power_event(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt + *cctxt, u32 event) +{ + u32 rc = VCD_ERR_FAIL; + + switch (event) { + case VCD_EVT_PWR_CLNT_CMD_BEGIN: + rc = vcd_un_gate_clock(dev_ctxt); + break; + case VCD_EVT_PWR_CLNT_CMD_END: + rc = vcd_gate_clock(dev_ctxt); + break; + case VCD_EVT_PWR_CLNT_CMD_FAIL: + if (!vcd_core_is_busy(dev_ctxt)) + rc = vcd_gate_clock(dev_ctxt); + break; + case VCD_EVT_PWR_CLNT_PAUSE: + case VCD_EVT_PWR_CLNT_LAST_FRAME: + case VCD_EVT_PWR_CLNT_ERRFATAL: + if (!cctxt) + break; + rc = VCD_S_SUCCESS; + if (cctxt->status.req_perf_lvl) { + dev_ctxt->reqd_perf_lvl -= cctxt->reqd_perf_lvl; + cctxt->status.req_perf_lvl = false; + + rc = vcd_set_perf_level(dev_ctxt, + dev_ctxt->reqd_perf_lvl, cctxt); + } + break; + case VCD_EVT_PWR_CLNT_RESUME: + case VCD_EVT_PWR_CLNT_FIRST_FRAME: + if (!cctxt) + break; + rc = VCD_S_SUCCESS; + if (!cctxt->status.req_perf_lvl) { + dev_ctxt->reqd_perf_lvl += cctxt->reqd_perf_lvl; + cctxt->status.req_perf_lvl = true; + + rc = vcd_set_perf_level(dev_ctxt, + dev_ctxt->reqd_perf_lvl, cctxt); + } + break; + } + + return rc; +} + +u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + u32 set_perf_lvl; + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { + VCD_MSG_ERROR("vcd_enable_clock(): Already in state " + "VCD_PWRCLK_STATE_OFF\n"); + vcd_assert(); + rc = VCD_ERR_FAIL; + } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) { + + set_perf_lvl = dev_ctxt->reqd_perf_lvl > 0 ? + dev_ctxt->reqd_perf_lvl : VCD_MIN_PERF_LEVEL; + + rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl, cctxt); + + if (!VCD_FAILED(rc)) { + if (res_trk_enable_clocks()) { + dev_ctxt->pwr_clk_state = + VCD_PWRCLK_STATE_ON_CLOCKED; + } + } else { + rc = VCD_ERR_FAIL; + } + + } + + if (!VCD_FAILED(rc)) + dev_ctxt->active_clnts++; + + return rc; +} + +u32 vcd_disable_clock(struct vcd_dev_ctxt *dev_ctxt) +{ + u32 rc = VCD_S_SUCCESS; + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) { + VCD_MSG_ERROR("vcd_disable_clock(): Already in state " + "VCD_PWRCLK_STATE_OFF\n"); + vcd_assert(); + rc = VCD_ERR_FAIL; + } else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED || + dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) { + dev_ctxt->active_clnts--; + + if (!dev_ctxt->active_clnts) { + if (!res_trk_disable_clocks()) + rc = VCD_ERR_FAIL; + + dev_ctxt->pwr_clk_state = + VCD_PWRCLK_STATE_ON_NOTCLOCKED; + dev_ctxt->curr_perf_lvl = 0; + } + } + + return rc; +} + +u32 vcd_set_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl, + struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + if (!vcd_core_is_busy(dev_ctxt)) { + if (res_trk_set_perf_level(perf_lvl, &dev_ctxt->curr_perf_lvl, + cctxt)) { + dev_ctxt->set_perf_lvl_pending = false; + } else { + rc = VCD_ERR_FAIL; + dev_ctxt->set_perf_lvl_pending = true; + } + + } else { + dev_ctxt->set_perf_lvl_pending = true; + } + + return rc; +} + +u32 vcd_update_clnt_perf_lvl(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_rate *fps, u32 frm_p_units) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 new_perf_lvl; + + new_perf_lvl = frm_p_units * fps->fps_numerator / fps->fps_denominator; + + if (cctxt->status.req_perf_lvl) { + dev_ctxt->reqd_perf_lvl = dev_ctxt->reqd_perf_lvl - + cctxt->reqd_perf_lvl + new_perf_lvl; + + rc = vcd_set_perf_level(cctxt->dev_ctxt, + dev_ctxt->reqd_perf_lvl, cctxt); + } + + cctxt->reqd_perf_lvl = new_perf_lvl; + + return rc; +} + +u32 vcd_gate_clock(struct vcd_dev_ctxt *dev_ctxt) +{ + u32 rc = VCD_S_SUCCESS; + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || + dev_ctxt->pwr_clk_state == + VCD_PWRCLK_STATE_ON_NOTCLOCKED) { + VCD_MSG_ERROR("%s: Clk is Off or Not Clked yet\n", __func__); + vcd_assert(); + return VCD_ERR_FAIL; + } + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKGATED) + return rc; + + if (res_trk_disable_clocks()) + dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKGATED; + else + rc = VCD_ERR_FAIL; + + return rc; +} + +u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt) +{ + u32 rc = VCD_S_SUCCESS; + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF || + dev_ctxt->pwr_clk_state == + VCD_PWRCLK_STATE_ON_NOTCLOCKED) { + VCD_MSG_ERROR("%s: Clk is Off or Not Clked yet\n", __func__); + vcd_assert(); + return VCD_ERR_FAIL; + } + + if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED) + return rc; + + if (res_trk_enable_clocks()) + dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED; + else + rc = VCD_ERR_FAIL; + + return rc; +} diff --git a/drivers/misc/video_core/720p/vcd/vcd_power_sm.h b/drivers/misc/video_core/720p/vcd/vcd_power_sm.h new file mode 100644 index 0000000000000..b2af5ddad8a7a --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_power_sm.h @@ -0,0 +1,59 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_POWERSM_H_ +#define _VCD_POWERSM_H_ + +#define VCD_EVT_PWR_BASE 0x5000 +#define VCD_EVT_PWR_DEV_INIT_BEGIN (VCD_EVT_PWR_BASE + 0x1) +#define VCD_EVT_PWR_DEV_INIT_END (VCD_EVT_PWR_BASE + 0x2) +#define VCD_EVT_PWR_DEV_INIT_FAIL (VCD_EVT_PWR_BASE + 0x3) +#define VCD_EVT_PWR_DEV_TERM_BEGIN (VCD_EVT_PWR_BASE + 0x4) +#define VCD_EVT_PWR_DEV_TERM_END (VCD_EVT_PWR_BASE + 0x5) +#define VCD_EVT_PWR_DEV_TERM_FAIL (VCD_EVT_PWR_BASE + 0x6) +#define VCD_EVT_PWR_DEV_SLEEP_BEGIN (VCD_EVT_PWR_BASE + 0x7) +#define VCD_EVT_PWR_DEV_SLEEP_END (VCD_EVT_PWR_BASE + 0x8) +#define VCD_EVT_PWR_DEV_SET_PERFLVL (VCD_EVT_PWR_BASE + 0x9) +#define VCD_EVT_PWR_DEV_HWTIMEOUT (VCD_EVT_PWR_BASE + 0xa) +#define VCD_EVT_PWR_CLNT_CMD_BEGIN (VCD_EVT_PWR_BASE + 0xb) +#define VCD_EVT_PWR_CLNT_CMD_END (VCD_EVT_PWR_BASE + 0xc) +#define VCD_EVT_PWR_CLNT_CMD_FAIL (VCD_EVT_PWR_BASE + 0xd) +#define VCD_EVT_PWR_CLNT_PAUSE (VCD_EVT_PWR_BASE + 0xe) +#define VCD_EVT_PWR_CLNT_RESUME (VCD_EVT_PWR_BASE + 0xf) +#define VCD_EVT_PWR_CLNT_FIRST_FRAME (VCD_EVT_PWR_BASE + 0x10) +#define VCD_EVT_PWR_CLNT_LAST_FRAME (VCD_EVT_PWR_BASE + 0x11) +#define VCD_EVT_PWR_CLNT_ERRFATAL (VCD_EVT_PWR_BASE + 0x12) + +enum vcd_pwr_clk_state_type { + VCD_PWRCLK_STATE_OFF = 0, + VCD_PWRCLK_STATE_ON_NOTCLOCKED, + VCD_PWRCLK_STATE_ON_CLOCKED, + VCD_PWRCLK_STATE_ON_CLOCKGATED +}; + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_property.h b/drivers/misc/video_core/720p/vcd/vcd_property.h new file mode 100644 index 0000000000000..4df31b672bcad --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_property.h @@ -0,0 +1,313 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_DRIVER_PROPERTY_H_ +#define _VCD_DRIVER_PROPERTY_H_ + +#define VCD_START_BASE 0x0 +#define VCD_I_LIVE (VCD_START_BASE + 0x1) +#define VCD_I_CODEC (VCD_START_BASE + 0x2) +#define VCD_I_FRAME_SIZE (VCD_START_BASE + 0x3) +#define VCD_I_METADATA_ENABLE (VCD_START_BASE + 0x4) +#define VCD_I_METADATA_HEADER (VCD_START_BASE + 0x5) +#define VCD_I_PROFILE (VCD_START_BASE + 0x6) +#define VCD_I_LEVEL (VCD_START_BASE + 0x7) +#define VCD_I_BUFFER_FORMAT (VCD_START_BASE + 0x8) +#define VCD_I_FRAME_RATE (VCD_START_BASE + 0x9) +#define VCD_I_TARGET_BITRATE (VCD_START_BASE + 0xA) +#define VCD_I_MULTI_SLICE (VCD_START_BASE + 0xB) +#define VCD_I_ENTROPY_CTRL (VCD_START_BASE + 0xC) +#define VCD_I_DEBLOCKING (VCD_START_BASE + 0xD) +#define VCD_I_RATE_CONTROL (VCD_START_BASE + 0xE) +#define VCD_I_QP_RANGE (VCD_START_BASE + 0xF) +#define VCD_I_SESSION_QP (VCD_START_BASE + 0x10) +#define VCD_I_INTRA_PERIOD (VCD_START_BASE + 0x11) +#define VCD_I_VOP_TIMING (VCD_START_BASE + 0x12) +#define VCD_I_SHORT_HEADER (VCD_START_BASE + 0x13) +#define VCD_I_SEQ_HEADER (VCD_START_BASE + 0x14) +#define VCD_I_HEADER_EXTENSION (VCD_START_BASE + 0x15) +#define VCD_I_INTRA_REFRESH (VCD_START_BASE + 0x16) +#define VCD_I_POST_FILTER (VCD_START_BASE + 0x17) +#define VCD_I_PROGRESSIVE_ONLY (VCD_START_BASE + 0x18) + +#define VCD_START_REQ (VCD_START_BASE + 0x1000) +#define VCD_I_REQ_IFRAME (VCD_START_REQ + 0x1) + +#define VCD_I_RESERVED_BASE (VCD_START_BASE + 0x10000) + +struct vcd_property_hdr { + u32 id; + size_t sz; +}; + +//TODO: Remove? +struct vcd_property_live { + u32 live; +}; + +enum vcd_codec { + VCD_CODEC_H264 = 0x1, + VCD_CODEC_H263 = 0x2, + VCD_CODEC_MPEG1 = 0x3, + VCD_CODEC_MPEG2 = 0x4, + VCD_CODEC_MPEG4 = 0x5, + VCD_CODEC_DIVX_3 = 0x6, + VCD_CODEC_DIVX_4 = 0x7, + VCD_CODEC_DIVX_5 = 0x8, + VCD_CODEC_DIVX_6 = 0x9, + VCD_CODEC_XVID = 0xA, + VCD_CODEC_VC1 = 0xB, + VCD_CODEC_VC1_RCV = 0xC +}; + +struct vcd_property_codec { + enum vcd_codec codec; +}; + +struct vcd_property_frame_size { + u32 width; + u32 height; + u32 stride; + u32 scan_lines; +}; + + +#define VCD_METADATA_DATANONE 0x001 +#define VCD_METADATA_QCOMFILLER 0x002 +#define VCD_METADATA_QPARRAY 0x004 +#define VCD_METADATA_CONCEALMB 0x008 +#define VCD_METADATA_SEI 0x010 +#define VCD_METADATA_VUI 0x020 +#define VCD_METADATA_VC1 0x040 +#define VCD_METADATA_PASSTHROUGH 0x080 +#define VCD_METADATA_ENC_SLICE 0x100 + +struct vcd_property_meta_data_enable { + u32 meta_data_enable_flag; +}; + +struct vcd_property_metadata_hdr { + u32 meta_data_id_type; + u32 version; + u32 port_index; + u32 type; +}; + +struct vcd_property_frame_rate { + u32 fps_denominator; + u32 fps_numerator; +}; + +struct vcd_property_target_bitrate { + u32 target_bitrate; +}; + +enum vcd_yuv_buffer_format_type { + VCD_BUFFER_FORMAT_NV12 = 0x1, + VCD_BUFFER_FORMAT_TILE_4x2 = 0x2, + VCD_BUFFER_FORMAT_NV12_16M2KA = 0x3 +}; + +struct vcd_property_buffer_format { + enum vcd_yuv_buffer_format_type buffer_format; +}; + +struct vcd_property_post_filter { + u32 post_filter; +}; + +enum vcd_codec_profile_type { + VCD_PROFILE_UNKNOWN = 0x0, + VCD_PROFILE_MPEG4_SP = 0x1, + VCD_PROFILE_MPEG4_ASP = 0x2, + VCD_PROFILE_H264_BASELINE = 0x3, + VCD_PROFILE_H264_MAIN = 0x4, + VCD_PROFILE_H264_HIGH = 0x5, + VCD_PROFILE_H263_BASELINE = 0x6, + VCD_PROFILE_VC1_SIMPLE = 0x7, + VCD_PROFILE_VC1_MAIN = 0x8, + VCD_PROFILE_VC1_ADVANCE = 0x9, + VCD_PROFILE_MPEG2_MAIN = 0xA, + VCD_PROFILE_MPEG2_SIMPLE = 0xB +}; + +struct vcd_property_profile { + enum vcd_codec_profile_type profile; +}; + +enum vcd_codec_level_type { + VCD_LEVEL_UNKNOWN = 0x0, + VCD_LEVEL_MPEG4_0 = 0x1, + VCD_LEVEL_MPEG4_0b = 0x2, + VCD_LEVEL_MPEG4_1 = 0x3, + VCD_LEVEL_MPEG4_2 = 0x4, + VCD_LEVEL_MPEG4_3 = 0x5, + VCD_LEVEL_MPEG4_3b = 0x6, + VCD_LEVEL_MPEG4_4 = 0x7, + VCD_LEVEL_MPEG4_4a = 0x8, + VCD_LEVEL_MPEG4_5 = 0x9, + VCD_LEVEL_MPEG4_6 = 0xA, + VCD_LEVEL_MPEG4_7 = 0xB, + VCD_LEVEL_MPEG4_X = 0xC, + VCD_LEVEL_H264_1 = 0x10, + VCD_LEVEL_H264_1b = 0x11, + VCD_LEVEL_H264_1p1 = 0x12, + VCD_LEVEL_H264_1p2 = 0x13, + VCD_LEVEL_H264_1p3 = 0x14, + VCD_LEVEL_H264_2 = 0x15, + VCD_LEVEL_H264_2p1 = 0x16, + VCD_LEVEL_H264_2p2 = 0x17, + VCD_LEVEL_H264_3 = 0x18, + VCD_LEVEL_H264_3p1 = 0x19, + VCD_LEVEL_H264_3p2 = 0x1A, + VCD_LEVEL_H264_4 = 0x1B, + VCD_LEVEL_H264_X = 0x1C, + VCD_LEVEL_H263_10 = 0x20, + VCD_LEVEL_H263_20 = 0x21, + VCD_LEVEL_H263_30 = 0x22, + VCD_LEVEL_H263_40 = 0x23, + VCD_LEVEL_H263_45 = 0x24, + VCD_LEVEL_H263_50 = 0x25, + VCD_LEVEL_H263_60 = 0x26, + VCD_LEVEL_H263_70 = 0x27, + VCD_LEVEL_H263_X = 0x28, + VCD_LEVEL_MPEG2_LOW = 0x30, + VCD_LEVEL_MPEG2_MAIN = 0x31, + VCD_LEVEL_MPEG2_HIGH_14 = 0x32, + VCD_LEVEL_MPEG2_HIGH = 0x33, + VCD_LEVEL_MPEG2_X = 0x34, + VCD_LEVEL_VC1_LOW = 0x40, + VCD_LEVEL_VC1_MEDIUM = 0x41, + VCD_LEVEL_VC1_HIGH = 0x42, + VCD_LEVEL_VC1_0 = 0x43, + VCD_LEVEL_VC1_1 = 0x44, + VCD_LEVEL_VC1_2 = 0x45, + VCD_LEVEL_VC1_3 = 0x46, + VCD_LEVEL_VC1_4 = 0x47, + VCD_LEVEL_VC1_X = 0x48 +}; + +struct vcd_property_level { + enum vcd_codec_level_type level; +}; + +enum vcd_m_slice_sel_type { + VCD_MSLICE_OFF = 0x1, + VCD_MSLICE_BY_MB_COUNT = 0x2, + VCD_MSLICE_BY_BYTE_COUNT = 0x3, + VCD_MSLICE_BY_GOB = 0x4 +}; + +struct vcd_property_multi_slice { + enum vcd_m_slice_sel_type m_slice_sel; + u32 m_slice_size; +}; + +enum vcd_entropy_sel_type { + VCD_ENTROPY_SEL_CAVLC = 0x1, + VCD_ENTROPY_SEL_CABAC = 0x2 +}; + +enum vcd_cabac_model_type { + VCD_CABAC_MODEL_NUMBER_0 = 0x1, + VCD_CABAC_MODEL_NUMBER_1 = 0x2, + VCD_CABAC_MODEL_NUMBER_2 = 0x3 +}; + +struct vcd_property_entropy_control { + enum vcd_entropy_sel_type entropy_sel; + enum vcd_cabac_model_type cabac_model; +}; + +enum vcd_db_config_type { + VCD_DB_ALL_BLOCKING_BOUNDARY = 0x1, + VCD_DB_DISABLE = 0x2, + VCD_DB_SKIP_SLICE_BOUNDARY = 0x3 +}; +struct vcd_property_db_config { + enum vcd_db_config_type db_config; + u32 slice_alpha_offset; + u32 slice_beta_offset; +}; + +enum vcd_rate_control_type { + VCD_RATE_CONTROL_OFF = 0x1, + VCD_RATE_CONTROL_VBR_VFR = 0x2, + VCD_RATE_CONTROL_VBR_CFR = 0x3, + VCD_RATE_CONTROL_CBR_VFR = 0x4, + VCD_RATE_CONTROL_CBR_CFR = 0x5 +}; + +struct vcd_property_rate_control { + enum vcd_rate_control_type rate_control; +}; + +struct vcd_property_qp_range { + u32 max_qp; + u32 min_qp; +}; + +struct vcd_property_session_qp { + u32 iframe_qp; + u32 frame_qp; + u32 bframe_qp; +}; + +struct vcd_property_i_period { + u32 frames; + u32 bframes; +}; + +struct vcd_property_vop_timing { + u32 vop_time_resolution; +}; + +struct vcd_property_short_header { + u32 short_header; +}; + +struct vcd_property_intra_refresh_mb_number { + u32 cir_mb_number; +}; + +struct vcd_property_req_i_frame { + u32 req_i_frame; +}; + +struct vcd_frame_rect { + u32 left; + u32 top; + u32 right; + u32 bottom; +}; + +struct vcd_property_dec_output_buffer { + struct vcd_frame_rect disp_frm; +}; + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_status.h b/drivers/misc/video_core/720p/vcd/vcd_status.h new file mode 100644 index 0000000000000..702ed549da753 --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_status.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _VCD_ERR_STATUS_H_ +#define _VCD_ERR_STATUS_H_ + +#define VCD_EVT_RESP_BASE 0x1000 +#define VCD_EVT_RESP_OPEN (VCD_EVT_RESP_BASE + 0x1) +#define VCD_EVT_RESP_START (VCD_EVT_RESP_BASE + 0x2) +#define VCD_EVT_RESP_STOP (VCD_EVT_RESP_BASE + 0x3) +#define VCD_EVT_RESP_PAUSE (VCD_EVT_RESP_BASE + 0x4) +#define VCD_EVT_RESP_FLUSH_INPUT_DONE (VCD_EVT_RESP_BASE + 0x5) +#define VCD_EVT_RESP_FLUSH_OUTPUT_DONE (VCD_EVT_RESP_BASE + 0x6) +#define VCD_EVT_RESP_INPUT_FLUSHED (VCD_EVT_RESP_BASE + 0x7) +#define VCD_EVT_RESP_OUTPUT_FLUSHED (VCD_EVT_RESP_BASE + 0x8) +#define VCD_EVT_RESP_INPUT_DONE (VCD_EVT_RESP_BASE + 0x9) +#define VCD_EVT_RESP_OUTPUT_DONE (VCD_EVT_RESP_BASE + 0xa) + +#define VCD_EVT_IND_BASE 0x2000 +#define VCD_EVT_IND_RECONFIG (VCD_EVT_IND_BASE + 0x1) +#define VCD_EVT_IND_HWERRFATAL (VCD_EVT_IND_BASE + 0x2) +#define VCD_EVT_IND_RESOURCES_LOST (VCD_EVT_IND_BASE + 0x3) + +#define VCD_S_SUCCESS 0x0 + +#define VCD_S_ERR_BASE 0x80000000 +#define VCD_ERR_FAIL (VCD_S_ERR_BASE + 0x1) +#define VCD_ERR_ALLOC_FAIL (VCD_S_ERR_BASE + 0x2) +#define VCD_ERR_ILLEGAL_OP (VCD_S_ERR_BASE + 0x3) +#define VCD_ERR_ILLEGAL_PARM (VCD_S_ERR_BASE + 0x4) +#define VCD_ERR_BAD_POINTER (VCD_S_ERR_BASE + 0x5) +#define VCD_ERR_BAD_HANDLE (VCD_S_ERR_BASE + 0x6) +#define VCD_ERR_NOT_SUPPORTED (VCD_S_ERR_BASE + 0x7) +#define VCD_ERR_BAD_STATE (VCD_S_ERR_BASE + 0x8) +#define VCD_ERR_BUSY (VCD_S_ERR_BASE + 0x9) +#define VCD_ERR_MAX_CLIENT (VCD_S_ERR_BASE + 0xa) +#define VCD_ERR_IFRAME_EXPECTED (VCD_S_ERR_BASE + 0xb) +#define VCD_ERR_INTRLCD_FIELD_DROP (VCD_S_ERR_BASE + 0xc) +#define VCD_ERR_HW_FATAL (VCD_S_ERR_BASE + 0xd) +#define VCD_ERR_BITSTREAM_ERR (VCD_S_ERR_BASE + 0xe) +#define VCD_FAILED(rc) ((rc > VCD_S_ERR_BASE) ? true : false) + +#endif diff --git a/drivers/misc/video_core/720p/vcd/vcd_sub.c b/drivers/misc/video_core/720p/vcd/vcd_sub.c new file mode 100644 index 0000000000000..a088cbc26709a --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_sub.c @@ -0,0 +1,2921 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include + +#include "video_core_type.h" +#include "vcd.h" +#include "vdec_internal.h" + +static phys_addr_t vcd_pmem_get_physical(struct video_client_ctx *client_ctx, + void *kern_addr) +{ + phys_addr_t phys_addr; + void __user *user_addr; + int pmem_fd; + struct file *file; + s32 buffer_index = -1; + + if (vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_INPUT, + false, &user_addr, &kern_addr, &phys_addr, &pmem_fd, + &file, &buffer_index)) { + return phys_addr; + } + if (vid_c_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT, + false, &user_addr, &kern_addr, &phys_addr, &pmem_fd, + &file, &buffer_index)) { + return phys_addr; + } + VCD_MSG_ERROR("Couldn't get physical address"); + return 0; +} + +void vcd_reset_device_channels(struct vcd_dev_ctxt *dev_ctxt) +{ + dev_ctxt->ddl_frame_ch_free = dev_ctxt->ddl_frame_ch_depth; + dev_ctxt->ddl_cmd_ch_free = dev_ctxt->ddl_cmd_ch_depth; + dev_ctxt->ddl_frame_ch_interim = 0; + dev_ctxt->ddl_cmd_ch_interim = 0; +} + +u32 vcd_get_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc) +{ + u32 result = false; + + *pp_transc = NULL; + + if (dev_ctxt->ddl_cmd_ch_free > 0) { + if (dev_ctxt->ddl_cmd_concurrency) { + --dev_ctxt->ddl_cmd_ch_free; + result = true; + } else if ((dev_ctxt->ddl_frame_ch_free + + dev_ctxt->ddl_frame_ch_interim) == + dev_ctxt->ddl_frame_ch_depth) { + --dev_ctxt->ddl_cmd_ch_free; + result = true; + } + } + + if (result) { + *pp_transc = vcd_get_free_trans_tbl_entry(dev_ctxt); + + if (!*pp_transc) { + result = false; + + vcd_release_command_channel(dev_ctxt, *pp_transc); + } + + } + return result; +} + +u32 vcd_get_command_channel_in_loop(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc) +{ + u32 result = false; + + *pp_transc = NULL; + + if (dev_ctxt->ddl_cmd_ch_interim > 0) { + if (dev_ctxt->ddl_cmd_concurrency) { + --dev_ctxt->ddl_cmd_ch_interim; + result = true; + } else if ((dev_ctxt->ddl_frame_ch_free + + dev_ctxt->ddl_frame_ch_interim) + == dev_ctxt->ddl_frame_ch_depth) { + --dev_ctxt->ddl_cmd_ch_interim; + result = true; + } + } else { + result = vcd_get_command_channel(dev_ctxt, pp_transc); + } + + if (result && !*pp_transc) { + *pp_transc = vcd_get_free_trans_tbl_entry(dev_ctxt); + + if (!*pp_transc) { + result = false; + + ++dev_ctxt->ddl_cmd_ch_interim; + } + } + + return result; +} + +void vcd_mark_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + ++dev_ctxt->ddl_cmd_ch_interim; + + vcd_release_trans_tbl_entry(transc); + if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_ERROR("\n Command channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_command_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + ++dev_ctxt->ddl_cmd_ch_free; + + vcd_release_trans_tbl_entry(transc); + if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_ERROR("\n Command channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_multiple_command_channels(struct vcd_dev_ctxt *dev_ctxt, + u32 channels) +{ + dev_ctxt->ddl_cmd_ch_free += channels; + + if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_ERROR("\n Command channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_interim_command_channels(struct vcd_dev_ctxt *dev_ctxt) +{ + dev_ctxt->ddl_cmd_ch_free += dev_ctxt->ddl_cmd_ch_interim; + dev_ctxt->ddl_cmd_ch_interim = 0; + + if (dev_ctxt->ddl_cmd_ch_interim + dev_ctxt->ddl_cmd_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_ERROR("\n Command channel access counters messed up"); + vcd_assert(); + } +} + +u32 vcd_get_frame_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc) +{ + u32 result = false; + + if (dev_ctxt->ddl_frame_ch_free > 0) { + if (dev_ctxt->ddl_cmd_concurrency) { + --dev_ctxt->ddl_frame_ch_free; + result = true; + } else if ((dev_ctxt->ddl_cmd_ch_free + + dev_ctxt->ddl_cmd_ch_interim) + == dev_ctxt->ddl_cmd_ch_depth) { + --dev_ctxt->ddl_frame_ch_free; + result = true; + } + } + + if (result) { + *pp_transc = vcd_get_free_trans_tbl_entry(dev_ctxt); + + if (!*pp_transc) { + result = false; + + vcd_release_frame_channel(dev_ctxt, *pp_transc); + } else { + (*pp_transc)->type = VCD_CMD_CODE_FRAME; + } + + } + + return result; +} + +u32 vcd_get_frame_channel_in_loop(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc **pp_transc) +{ + u32 result = false; + + *pp_transc = NULL; + + if (dev_ctxt->ddl_frame_ch_interim > 0) { + if (dev_ctxt->ddl_cmd_concurrency) { + --dev_ctxt->ddl_frame_ch_interim; + result = true; + } else if ((dev_ctxt->ddl_cmd_ch_free + + dev_ctxt->ddl_cmd_ch_interim) == + dev_ctxt->ddl_cmd_ch_depth) { + --dev_ctxt->ddl_frame_ch_interim; + result = true; + } + } else { + result = vcd_get_frame_channel(dev_ctxt, pp_transc); + } + + if (result && !*pp_transc) { + *pp_transc = vcd_get_free_trans_tbl_entry(dev_ctxt); + + if (!*pp_transc) { + result = false; + VCD_MSG_FATAL("\n%s: All transactions are busy;" + "Couldnt find free one\n", __func__); + ++dev_ctxt->ddl_frame_ch_interim; + } + + } + + return result; +} + +void vcd_mark_frame_channel(struct vcd_dev_ctxt *dev_ctxt) +{ + ++dev_ctxt->ddl_frame_ch_interim; + + if (dev_ctxt->ddl_frame_ch_interim + dev_ctxt->ddl_frame_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_FATAL("Frame channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_frame_channel(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + ++dev_ctxt->ddl_frame_ch_free; + + vcd_release_trans_tbl_entry(transc); + + if (dev_ctxt->ddl_frame_ch_interim + dev_ctxt->ddl_frame_ch_free > + dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_FATAL("Frame channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_multiple_frame_channels(struct vcd_dev_ctxt + *dev_ctxt, u32 channels) +{ + dev_ctxt->ddl_frame_ch_free += channels; + + if (dev_ctxt->ddl_frame_ch_interim + dev_ctxt->ddl_frame_ch_free > + dev_ctxt->ddl_frame_ch_depth) { + VCD_MSG_FATAL("Frame channel access counters messed up"); + vcd_assert(); + } +} + +void vcd_release_interim_frame_channels(struct vcd_dev_ctxt + *dev_ctxt) +{ + dev_ctxt->ddl_frame_ch_free += dev_ctxt->ddl_frame_ch_interim; + dev_ctxt->ddl_frame_ch_interim = 0; + + if (dev_ctxt->ddl_frame_ch_free > dev_ctxt->ddl_cmd_ch_depth) { + VCD_MSG_FATAL("Frame channel access counters messed up"); + vcd_assert(); + } +} + +u32 vcd_core_is_busy(struct vcd_dev_ctxt *dev_ctxt) +{ + if (((dev_ctxt->ddl_cmd_ch_free + dev_ctxt->ddl_cmd_ch_interim) != + dev_ctxt->ddl_cmd_ch_depth) || + ((dev_ctxt->ddl_frame_ch_free + + dev_ctxt->ddl_frame_ch_interim) != + dev_ctxt->ddl_frame_ch_depth)) { + return true; + } else { + return false; + } +} + +void vcd_device_timer_start(struct vcd_dev_ctxt *dev_ctxt) +{ + if (dev_ctxt->config.pf_timer_start) + dev_ctxt->config.pf_timer_start(dev_ctxt->hw_timer_handle, + dev_ctxt->hw_time_out); +} + +void vcd_device_timer_stop(struct vcd_dev_ctxt *dev_ctxt) +{ + if (dev_ctxt->config.pf_timer_stop) + dev_ctxt->config.pf_timer_stop(dev_ctxt->hw_timer_handle); +} + + +u32 vcd_common_allocate_set_buffer(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type buffer, size_t sz, + struct vcd_buffer_pool **pp_buf_pool) +{ + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_requirement buf_req; + struct vcd_property_hdr prop_hdr; + struct vcd_buffer_pool *buf_pool; + + if (buffer == VCD_BUFFER_INPUT) { + prop_hdr.id = DDL_I_INPUT_BUF_REQ; + buf_pool = &cctxt->in_buf_pool; + } else if (buffer == VCD_BUFFER_OUTPUT) { + prop_hdr.id = DDL_I_OUTPUT_BUF_REQ; + buf_pool = &cctxt->out_buf_pool; + } else { + rc = VCD_ERR_ILLEGAL_PARM; + } + + VCD_FAILED_RETURN(rc, "Invalid buffer type provided"); + + *pp_buf_pool = buf_pool; + + if (buf_pool->count > 0 && buf_pool->validated == buf_pool->count) { + VCD_MSG_ERROR("Buffer pool is full"); + + return VCD_ERR_FAIL; + } + + if (!buf_pool->entries) { + prop_hdr.sz = sizeof(buf_req); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &buf_req); + + if (!VCD_FAILED(rc)) + rc = vcd_alloc_buffer_pool_entries(buf_pool, &buf_req); + else + VCD_MSG_ERROR("rc = 0x%x. Failed ddl_get_property", rc); + } + + if (!VCD_FAILED(rc)) { + if (buf_pool->buf_req.size > sz) { + VCD_MSG_ERROR("required buffer size %u allocated size " + "%u", buf_pool->buf_req.size, sz); + rc = VCD_ERR_ILLEGAL_PARM; + } + } + + return rc; +} + +u32 vcd_set_buffer_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, void *buf, size_t sz) +{ + struct vcd_buffer_entry *buf_entry; + + buf_entry = vcd_find_buffer_pool_entry(buf_pool, buf); + if (buf_entry) { + VCD_MSG_ERROR("This buffer address already exists"); + return VCD_ERR_ILLEGAL_OP; + } + + if (!IS_ALIGNED((unsigned long)buf, buf_pool->buf_req.align)) { + VCD_MSG_ERROR("Provided addr is not aligned"); + return VCD_ERR_BAD_POINTER; + } + + buf_entry = vcd_get_free_buffer_pool_entry(buf_pool); + if (!buf_entry) { + VCD_MSG_ERROR("Can't allocate buffer pool is full"); + return VCD_ERR_FAIL; + } + + printk("npelly adding %p to buf_pool %p\n", buf, buf_entry); + buf_entry->virt_addr = buf; + + buf_entry->phys_addr = vcd_pmem_get_physical(cctxt->client_data, buf); + + if (!buf_entry->phys_addr) { + VCD_MSG_ERROR("Couldn't get physical address"); + return VCD_ERR_BAD_POINTER; + } + + if (!IS_ALIGNED((unsigned long)buf_entry->phys_addr, + buf_pool->buf_req.align)) { + VCD_MSG_ERROR("Physical addr is not aligned"); + return VCD_ERR_BAD_POINTER; + } + + buf_entry->size = sz; + buf_entry->frame.alloc_len = sz; + buf_entry->allocated = false; + + buf_entry->frame.virt_addr = buf_entry->virt_addr; + buf_entry->frame.phys_addr = buf_entry->phys_addr; + + buf_pool->validated++; + + return VCD_S_SUCCESS; + +} + +u32 vcd_allocate_buffer_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, size_t buf_size, void **virt_addr, + phys_addr_t *phys_addr) +{ + struct vcd_buffer_entry *buf_entry; + struct vcd_buffer_requirement *buf_req; +// u32 addr; +// int rc = 0; + + buf_entry = vcd_get_free_buffer_pool_entry(buf_pool); + if (!buf_entry) { + VCD_MSG_ERROR("Can't allocate buffer pool is full"); + return VCD_ERR_FAIL; + } + + buf_req = &buf_pool->buf_req; + + //TODO strip align crap +// buf_size += buf_req->align; + + buf_entry->buffer.virt_addr = dma_alloc_coherent(NULL, buf_size, + &buf_entry->buffer.phys_addr, GFP_KERNEL); + if (!buf_entry->buffer.virt_addr) { + VCD_MSG_ERROR("Buffer allocation failed"); + return VCD_ERR_ALLOC_FAIL; + } + + buf_entry->buffer.size = buf_size; + buf_entry->allocated = true; + + buf_entry->frame.alloc_len = buf_entry->buffer.size; + buf_entry->frame.virt_addr = buf_entry->buffer.virt_addr; + buf_entry->frame.phys_addr = buf_entry->buffer.phys_addr; + + *virt_addr = buf_entry->buffer.virt_addr; + *phys_addr = buf_entry->buffer.phys_addr; + + buf_pool->allocated++; + buf_pool->validated++; + + return VCD_S_SUCCESS; +} + +u32 vcd_free_one_buffer_internal(struct vcd_clnt_ctxt *cctxt, + enum vcd_buffer_type vcd_buffer_type, u8 *buffer) +{ + struct vcd_buffer_pool *buf_pool; + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_entry *buf_entry; + + if (vcd_buffer_type == VCD_BUFFER_INPUT) + buf_pool = &cctxt->in_buf_pool; + else if (vcd_buffer_type == VCD_BUFFER_OUTPUT) + buf_pool = &cctxt->out_buf_pool; + else + rc = VCD_ERR_ILLEGAL_PARM; + + VCD_FAILED_RETURN(rc, "Invalid buffer type provided"); + + buf_entry = vcd_find_buffer_pool_entry(buf_pool, buffer); + if (!buf_entry) { + VCD_MSG_ERROR("Buffer addr %p not found. Can't free buffer", + buffer); + + return VCD_ERR_ILLEGAL_PARM; + } + if (buf_entry->in_use) { + VCD_MSG_ERROR("\n Buffer is in use and is not flushed"); + return VCD_ERR_ILLEGAL_OP; + } + + VCD_MSG_LOW("Freeing buffer %p. Allocated %d", buf_entry->virt_addr, + buf_entry->allocated); + + if (buf_entry->allocated) { + dma_free_coherent(NULL, buf_entry->size, buf_entry->virt_addr, + buf_entry->phys_addr); + buf_entry->virt_addr = NULL; + buf_pool->allocated--; + + } + + memset(buf_entry, 0, sizeof(struct vcd_buffer_entry)); + + buf_pool->validated--; + + return VCD_S_SUCCESS; +} + +u32 vcd_free_buffers_internal(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool) +{ + u32 rc = VCD_S_SUCCESS; + u32 i; + + VCD_MSG_LOW("vcd_free_buffers_internal:"); + + if (!buf_pool->entries) + return rc; + + for (i = 1; i <= buf_pool->count; i++) { + struct vcd_buffer_entry *b = &buf_pool->entries[i]; + if (!b->valid || !b->allocated) + continue; + dma_free_coherent(NULL, b->size, b->virt_addr, b->phys_addr); + } + + vcd_reset_buffer_pool_for_reuse(buf_pool); + + return rc; +} + +u32 vcd_alloc_buffer_pool_entries(struct vcd_buffer_pool *buf_pool, + struct vcd_buffer_requirement *buf_req) +{ + + VCD_MSG_LOW("vcd_alloc_buffer_pool_entries:"); + + buf_pool->buf_req = *buf_req; + + buf_pool->count = buf_req->actual_count; + buf_pool->entries = kzalloc(sizeof(struct vcd_buffer_entry) * + (buf_pool->count + 1), GFP_KERNEL); + + if (!buf_pool->entries) { + VCD_MSG_ERROR("Buf_pool entries alloc failed"); + return VCD_ERR_ALLOC_FAIL; + } + + buf_pool->queue = kzalloc(sizeof(struct vcd_buffer_entry *) * + buf_pool->count, GFP_KERNEL); + + if (!buf_pool->queue) { + VCD_MSG_ERROR("Buf_pool queue alloc failed"); + kfree(buf_pool->entries); + return VCD_ERR_ALLOC_FAIL; + } + + buf_pool->entries[0].valid = true; + + buf_pool->q_head = 0; + buf_pool->q_tail = (u16) (buf_pool->count - 1); + buf_pool->q_len = 0; + + buf_pool->validated = 0; + buf_pool->allocated = 0; + buf_pool->in_use = 0; + + return VCD_S_SUCCESS; +} + +void vcd_free_buffer_pool_entries(struct vcd_buffer_pool *buf_pool) +{ + VCD_MSG_LOW("vcd_free_buffer_pool_entries:"); + + kfree(buf_pool->entries); + kfree(buf_pool->queue); + + memset(buf_pool, 0, sizeof(struct vcd_buffer_pool)); +} + +void vcd_flush_in_use_buffer_pool_entries(struct vcd_clnt_ctxt *cctxt, + struct vcd_buffer_pool *buf_pool, u32 event) +{ + u32 i; + VCD_MSG_LOW("vcd_flush_buffer_pool_entries: event=0x%x", event); + + if (!buf_pool->entries) + return; + + for (i = 0; i <= buf_pool->count; i++) { + if (buf_pool->entries[i].virt_addr && + buf_pool->entries[i].in_use) { + cctxt->callback(event, VCD_S_SUCCESS, + &buf_pool->entries[i].frame, + sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + buf_pool->entries[i].in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT(buf_pool->in_use); + } + } +} + + +void vcd_reset_buffer_pool_for_reuse(struct vcd_buffer_pool *buf_pool) +{ + VCD_MSG_LOW("vcd_reset_buffer_pool_for_reuse:"); + + memset(&buf_pool->entries[1], 0, sizeof(struct vcd_buffer_entry) * + buf_pool->count); + memset(buf_pool->queue, 0, sizeof(struct vcd_buffer_entry *) * + buf_pool->count); + + buf_pool->q_head = 0; + buf_pool->q_tail = (u16) (buf_pool->count - 1); + buf_pool->q_len = 0; + + buf_pool->validated = 0; + buf_pool->allocated = 0; + buf_pool->in_use = 0; + +} + +struct vcd_buffer_entry *vcd_get_free_buffer_pool_entry(struct vcd_buffer_pool + *pool) +{ + int i; + for (i = 1; i <= pool->count; i++) { + if (!pool->entries[i].valid) { + pool->entries[i].valid = true; + return &pool->entries[i]; + } + } + return NULL; +} + +struct vcd_buffer_entry *vcd_find_buffer_pool_entry(struct vcd_buffer_pool + *pool, void *virt_addr) +{ + int i; + for (i = 0; i <= pool->count; i++) + if (pool->entries[i].virt_addr == virt_addr) + return &pool->entries[i]; + return NULL; +} + +u32 vcd_buffer_pool_entry_en_q(struct vcd_buffer_pool *pool, + struct vcd_buffer_entry *entry) +{ + u16 i; + u16 q_cntr; + u32 found = false; + + if (pool->q_len == pool->count) + return false; + + for (i = 0, q_cntr = pool->q_head; !found && i < pool->q_len; + i++, q_cntr = (q_cntr + 1) % pool->count) { + if (pool->queue[q_cntr] == entry) + found = true; + } + + if (found) { + VCD_MSG_HIGH("this output buffer is already present in queue"); + VCD_MSG_HIGH("virt_addr %p phys_addr %x", entry->virt_addr, + entry->phys_addr); + return false; + } + + pool->q_tail = (pool->q_tail + 1) % pool->count; + pool->q_len++; + pool->queue[pool->q_tail] = entry; + + return true; +} + +struct vcd_buffer_entry *vcd_buffer_pool_entry_de_q(struct vcd_buffer_pool + *pool) +{ + struct vcd_buffer_entry *entry; + + if (!pool || !pool->q_len) + return NULL; + + entry = pool->queue[pool->q_head]; + pool->q_head = (pool->q_head + 1) % pool->count; + pool->q_len--; + + return entry; +} + +void vcd_flush_output_buffers(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_buffer_pool *buf_pool; + struct vcd_buffer_entry *buf_entry; + u32 count = 0; + struct vcd_property_hdr prop_hdr; + + VCD_MSG_LOW("vcd_flush_output_buffers:"); + + buf_pool = &cctxt->out_buf_pool; + + buf_entry = vcd_buffer_pool_entry_de_q(buf_pool); + while (buf_entry) { + if (!cctxt->decoding || buf_entry->in_use) { + buf_entry->frame.data_len = 0; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_FLUSHED, + VCD_S_SUCCESS, &buf_entry->frame, + sizeof(struct vcd_frame_data), + cctxt, cctxt->client_data); + + buf_entry->in_use = false; + + count++; + } + + buf_entry = vcd_buffer_pool_entry_de_q(buf_pool); + } + buf_pool->in_use = 0; + + if (cctxt->sched_clnt_valid && count > 0) { + VCD_MSG_LOW("Updating scheduler O tkns = %u", count); + + sched_update_client_o_tkn(cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, false, + count * cctxt->sched_o_tkn_per_ip_frm); + } + + if (cctxt->ddl_hdl_valid && cctxt->decoding) { + prop_hdr.id = DDL_I_REQ_OUTPUT_FLUSH; + prop_hdr.sz = sizeof(u32); + count = 0x1; + + ddl_set_property(cctxt->ddl_handle, &prop_hdr, &count); + } +} + +u32 vcd_flush_buffers(struct vcd_clnt_ctxt *cctxt, u32 mode) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u32 rc = VCD_S_SUCCESS; + struct vcd_buffer_entry *buf_entry; + + VCD_MSG_LOW("vcd_flush_buffers:"); + + if (mode > VCD_FLUSH_ALL || !(mode & VCD_FLUSH_ALL)) { + VCD_MSG_ERROR("Invalid flush mode %d", mode); + + return VCD_ERR_ILLEGAL_PARM; + } + + VCD_MSG_MED("Flush mode %d requested", mode); + + if ((mode & VCD_FLUSH_INPUT) && cctxt->sched_clnt_valid) { + rc = vcd_map_sched_status(sched_flush_client_buffer( + dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + (void **)&buf_entry)); + + while (!VCD_FAILED(rc) && rc != VCD_S_SCHED_QEMPTY && + buf_entry) { + if (buf_entry->virt_addr) { + cctxt->callback(VCD_EVT_RESP_INPUT_FLUSHED, + VCD_S_SUCCESS, &buf_entry->frame, + sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + } + + buf_entry->in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT( + cctxt->in_buf_pool.in_use); + buf_entry = NULL; + rc = vcd_map_sched_status(sched_flush_client_buffer( + dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + (void **)&buf_entry)); + } + + } + VCD_FAILED_RETURN(rc, "Failed: sched_flush_client_buffer"); + + if (cctxt->status.frame_submitted > 0) { + cctxt->status.flush_mode |= mode; + } else { + if (mode & VCD_FLUSH_OUTPUT) { + vcd_flush_output_buffers(cctxt); + vcd_release_all_clnt_frm_transc(cctxt); + } + + } + + return VCD_S_SUCCESS; +} + +void vcd_flush_buffers_in_err_fatal(struct vcd_clnt_ctxt *cctxt) +{ + VCD_MSG_LOW("\n vcd_flush_buffers_in_err_fatal:"); + vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); + vcd_flush_in_use_buffer_pool_entries(cctxt, &cctxt->in_buf_pool, + VCD_EVT_RESP_INPUT_FLUSHED); + vcd_flush_in_use_buffer_pool_entries(cctxt, &cctxt->out_buf_pool, + VCD_EVT_RESP_OUTPUT_FLUSHED); + cctxt->status.flush_mode = VCD_FLUSH_ALL; + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); +} + +u32 vcd_init_client_context(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc; + + VCD_MSG_LOW("vcd_init_client_context:"); + + rc = ddl_open(&cctxt->ddl_handle, cctxt->decoding); + + VCD_FAILED_RETURN(rc, "Failed: ddl_open"); + cctxt->ddl_hdl_valid = true; + + cctxt->clnt_state.state = VCD_CLIENT_STATE_OPEN; + cctxt->clnt_state.state_table = vcd_get_client_state_table( + VCD_CLIENT_STATE_OPEN); + + cctxt->signature = VCD_SIGNATURE; + cctxt->live = true; + + cctxt->cmd_q.pending_cmd = VCD_CMD_NONE; + + return rc; +} + +void vcd_destroy_client_context(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt; + struct vcd_clnt_ctxt *client; + u32 rc = VCD_S_SUCCESS; + int idx; + + VCD_MSG_LOW("vcd_destroy_client_context:"); + + dev_ctxt = cctxt->dev_ctxt; + + if (cctxt == dev_ctxt->cctxt_list_head) { + VCD_MSG_MED("Clnt list head clnt being removed"); + + dev_ctxt->cctxt_list_head = cctxt->next; + } else { + client = dev_ctxt->cctxt_list_head; + while (client && cctxt != client->next) + client = client->next; + + if (client) + client->next = cctxt->next; + + if (!client) { + rc = VCD_ERR_FAIL; + + VCD_MSG_ERROR("Client not found in client list"); + } + } + + if (VCD_FAILED(rc)) + return; + + if (cctxt->sched_clnt_valid) { + rc = VCD_S_SUCCESS; + while (!VCD_FAILED(rc) && rc != VCD_S_SCHED_QEMPTY) { + + rc = vcd_map_sched_status(sched_flush_client_buffer( + dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + (void *)&idx)); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("\n Failed: " + "sched_flush_client_buffer"); + } + + rc = vcd_map_sched_status(sched_remove_client( + dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl)); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("\n Failed: sched_remove_client"); + + cctxt->sched_clnt_valid = false; + } + + if (cctxt->seq_hdr.addr) { + dma_free_coherent(NULL, cctxt->seq_hdr.sz, cctxt->seq_hdr.addr, + cctxt->seq_hdr_phys_addr); + cctxt->seq_hdr.addr = NULL; + } + + vcd_free_buffers_internal(cctxt, &cctxt->in_buf_pool); + vcd_free_buffers_internal(cctxt, &cctxt->out_buf_pool); + vcd_free_buffer_pool_entries(&cctxt->in_buf_pool); + vcd_free_buffer_pool_entries(&cctxt->out_buf_pool); + vcd_release_all_clnt_transc(cctxt); + + if (cctxt->ddl_hdl_valid) { + ddl_close(&cctxt->ddl_handle); + cctxt->ddl_hdl_valid = false; + } + kfree(cctxt); +} + +u32 vcd_check_for_client_context(struct vcd_dev_ctxt *dev_ctxt, s32 driver_id) +{ + struct vcd_clnt_ctxt *client; + + client = dev_ctxt->cctxt_list_head; + while (client && client->driver_id != driver_id) + client = client->next; + + if (!client) + return false; + else + return true; +} + +u32 vcd_validate_driver_handle(struct vcd_dev_ctxt *dev_ctxt, s32 driver_handle) +{ + driver_handle--; + + if (driver_handle < 0 || driver_handle >= VCD_DRIVER_INSTANCE_MAX || + !dev_ctxt->driver_ids[driver_handle]) { + return false; + } else { + return true; + } +} + +u32 vcd_client_cmd_en_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type command) +{ + u32 result; + + if (cctxt->cmd_q.pending_cmd == VCD_CMD_NONE) { + cctxt->cmd_q.pending_cmd = command; + result = true; + } else { + result = false; + } + + return result; +} + +void vcd_client_cmd_flush_and_en_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type command) +{ + cctxt->cmd_q.pending_cmd = command; +} + +u32 vcd_client_cmd_de_q(struct vcd_clnt_ctxt *cctxt, + enum vcd_command_type *command) +{ + if (cctxt->cmd_q.pending_cmd == VCD_CMD_NONE) + return false; + + *command = cctxt->cmd_q.pending_cmd; + cctxt->cmd_q.pending_cmd = VCD_CMD_NONE; + + return true; +} + +u32 vcd_get_next_queued_client_cmd(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt **cctxt, enum vcd_command_type *command) +{ + struct vcd_clnt_ctxt *client = dev_ctxt->cctxt_list_head; + u32 result = false; + + while (client && !result) { + *cctxt = client; + result = vcd_client_cmd_de_q(client, command); + client = client->next; + } + return result; +} + +u32 vcd_map_sched_status(enum sched_status sched_status) +{ + u32 rc = VCD_S_SUCCESS; + + switch (sched_status) { + + case SCHED_S_OK: + rc = VCD_S_SUCCESS; + break; + + case SCHED_S_EOF: + rc = VCD_S_SCHED_EOS; + break; + + case SCHED_S_QEMPTY: + rc = VCD_S_SCHED_QEMPTY; + break; + + case SCHED_S_QFULL: + rc = VCD_S_SCHED_QFULL; + break; + + default: + rc = VCD_ERR_FAIL; + break; + } + + return rc; +} + +u32 vcd_submit_cmd_sess_start(struct vcd_transc *transc) +{ + u32 rc; + struct vcd_phys_sequence_hdr seq_hdr; + + VCD_MSG_LOW("vcd_submit_cmd_sess_start:"); + + if (transc->cctxt->decoding) { + + if (transc->cctxt->seq_hdr.addr) { + seq_hdr.sz = transc->cctxt->seq_hdr.sz; + seq_hdr.addr = transc->cctxt->seq_hdr_phys_addr; + + rc = ddl_decode_start(transc->cctxt->ddl_handle, + &seq_hdr, (void *)transc); + } else { + rc = ddl_decode_start(transc->cctxt->ddl_handle, NULL, + (void *)transc); + } + + } else { + rc = ddl_encode_start(transc->cctxt->ddl_handle, + (void *)transc); + } + if (!VCD_FAILED(rc)) { + transc->cctxt->status.cmd_submitted++; + vcd_device_timer_start(transc->cctxt->dev_ctxt); + } else + VCD_MSG_ERROR("rc = 0x%x. Failed: ddl start", rc); + + return rc; +} + +u32 vcd_submit_cmd_sess_end(struct vcd_transc *transc) +{ + u32 rc; + + VCD_MSG_LOW("vcd_submit_cmd_sess_end:"); + + if (transc->cctxt->decoding) { + rc = ddl_decode_end(transc->cctxt->ddl_handle, + (void *)transc); + } else { + rc = ddl_encode_end(transc->cctxt->ddl_handle, + (void *)transc); + } + if (!VCD_FAILED(rc)) { + transc->cctxt->status.cmd_submitted++; + vcd_device_timer_start(transc->cctxt->dev_ctxt); + } else + VCD_MSG_ERROR("rc = 0x%x. Failed: ddl end", rc); + + return rc; +} + +void vcd_submit_cmd_client_close(struct vcd_clnt_ctxt *cctxt) +{ + ddl_close(&cctxt->ddl_handle); + cctxt->ddl_hdl_valid = false; + cctxt->status.cleaning_up = false; + if (cctxt->status.close_pending) { + vcd_destroy_client_context(cctxt); + vcd_handle_for_last_clnt_close(cctxt->dev_ctxt, true); + } +} + +u32 vcd_submit_command_in_continue(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + struct vcd_property_hdr prop_hdr; + struct vcd_clnt_ctxt *client = NULL; + enum vcd_command_type cmd = VCD_CMD_NONE; + u32 rc = VCD_ERR_FAIL; + u32 result = false; + u32 flush = 0; + u32 event = 0; + + VCD_MSG_LOW("\n vcd_submit_command_in_continue:"); + + while (1) { + result = vcd_get_next_queued_client_cmd(dev_ctxt, &client, + &cmd); + + if (!result) + break; + + transc->type = cmd; + transc->cctxt = client; + + switch (cmd) { + case VCD_CMD_CODEC_START: + rc = vcd_submit_cmd_sess_start(transc); + event = VCD_EVT_RESP_START; + break; + case VCD_CMD_CODEC_STOP: + rc = vcd_submit_cmd_sess_end(transc); + event = VCD_EVT_RESP_STOP; + break; + case VCD_CMD_OUTPUT_FLUSH: + prop_hdr.id = DDL_I_REQ_OUTPUT_FLUSH; + prop_hdr.sz = sizeof(u32); + flush = 0x1; + ddl_set_property(client->ddl_handle, &prop_hdr, &flush); + vcd_release_command_channel(dev_ctxt, transc); + rc = VCD_S_SUCCESS; + break; + case VCD_CMD_CLIENT_CLOSE: + vcd_submit_cmd_client_close(client); + vcd_release_command_channel(dev_ctxt, transc); + rc = VCD_S_SUCCESS; + break; + default: + VCD_MSG_ERROR("\n vcd_submit_command: Unknown" + "command %d", (int)cmd); + vcd_assert(); + break; + } + + if (!VCD_FAILED(rc)) { + break; + } else { + VCD_MSG_ERROR("vcd_submit_command %d: failed 0x%x", + cmd, rc); + client->callback(event, rc, NULL, 0, client, + client->client_data); + } + } + return result; +} + +u32 vcd_schedule_frame(struct vcd_dev_ctxt *dev_ctxt, struct vcd_clnt_ctxt + **pp_cctxt, struct vcd_buffer_entry **pp_ip_buf_entry) +{ + u32 rc = VCD_S_SUCCESS; + VCD_MSG_LOW("vcd_schedule_frame:"); + + if (!dev_ctxt->cctxt_list_head) { + VCD_MSG_HIGH("Client list empty"); + return false; + } + + rc = vcd_map_sched_status(sched_de_queue_frame(dev_ctxt->sched_hdl, + (void **) pp_ip_buf_entry, (void **) pp_cctxt)); + if (VCD_FAILED(rc)) { + VCD_MSG_FATAL("vcd_submit_frame: sched_de_queue_frame" + "failed 0x%x", rc); + return false; + } + + if (rc == VCD_S_SCHED_QEMPTY) { + VCD_MSG_HIGH("No frame available. Sched queues are empty"); + return false; + } + + if (!*pp_cctxt || !*pp_ip_buf_entry) { + VCD_MSG_FATAL("Sched returned invalid values. ctxt=%p," + "ipbuf=%p", *pp_cctxt, *pp_ip_buf_entry); + return false; + } + + if (rc == VCD_S_SCHED_EOS) + (*pp_ip_buf_entry)->frame.flags |= VCD_FRAME_FLAG_EOS; + + return true; +} + +void vcd_try_submit_frame(struct vcd_dev_ctxt *dev_ctxt) +{ + struct vcd_transc *transc; + u32 rc = VCD_S_SUCCESS; + struct vcd_clnt_ctxt *cctxt = NULL; + struct vcd_buffer_entry *ip_buf_entry = NULL; + u32 result = false; + + VCD_MSG_LOW("vcd_try_submit_frame:"); + + if (!vcd_get_frame_channel(dev_ctxt, &transc)) + return; + + if (!vcd_schedule_frame(dev_ctxt, &cctxt, &ip_buf_entry)) { + vcd_release_frame_channel(dev_ctxt, transc); + return; + } + + rc = vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_BEGIN); + + if (!VCD_FAILED(rc)) { + transc->cctxt = cctxt; + transc->ip_buf_entry = ip_buf_entry; + + result = vcd_submit_frame(dev_ctxt, transc); + } else { + VCD_MSG_ERROR("Failed: VCD_EVT_PWR_CLNT_CMD_BEGIN"); + + vcd_requeue_input_frame(dev_ctxt, cctxt, ip_buf_entry); + + vcd_map_sched_status(sched_update_client_o_tkn( + dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + true, cctxt->sched_o_tkn_per_ip_frm)); + } + + if (!result) { + vcd_release_frame_channel(dev_ctxt, transc); + vcd_power_event(dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_CMD_FAIL); + } +} + +u32 vcd_submit_frame(struct vcd_dev_ctxt *dev_ctxt, struct vcd_transc *transc) +{ + struct vcd_clnt_ctxt *cctxt = NULL; + struct vcd_frame_data *ip_frm_entry; + struct vcd_buffer_entry *op_buf_entry = NULL; + u32 rc = VCD_S_SUCCESS; + u32 evcode = 0; + struct ddl_frame_data_tag ddl_ip_frm; + struct ddl_frame_data_tag ddl_op_frm; + + VCD_MSG_LOW("vcd_submit_frame:"); + cctxt = transc->cctxt; + ip_frm_entry = &transc->ip_buf_entry->frame; + + transc->op_buf_entry = op_buf_entry; + transc->ip_frm_tag = ip_frm_entry->ip_frm_tag; + transc->time_stamp = ip_frm_entry->time_stamp; + ip_frm_entry->ip_frm_tag = (u32) transc; + memset(&ddl_ip_frm, 0, sizeof(ddl_ip_frm)); + memset(&ddl_op_frm, 0, sizeof(ddl_op_frm)); + if (cctxt->decoding) { + evcode = CLIENT_STATE_EVENT_NUMBER(pf_decode_frame); + ddl_ip_frm.vcd_frm = *ip_frm_entry; + rc = ddl_decode_frame(cctxt->ddl_handle, &ddl_ip_frm, + (void *) transc); + } else { + op_buf_entry = vcd_buffer_pool_entry_de_q(&cctxt->out_buf_pool); + if (!op_buf_entry) { + VCD_MSG_ERROR("Sched provided frame when no" + "op buffer was present"); + rc = VCD_ERR_FAIL; + } else { + op_buf_entry->in_use = true; + cctxt->out_buf_pool.in_use++; + ddl_ip_frm.vcd_frm = *ip_frm_entry; + ddl_ip_frm.frm_delta = vcd_calculate_frame_delta(cctxt, + ip_frm_entry); + + ddl_op_frm.vcd_frm = op_buf_entry->frame; + + evcode = CLIENT_STATE_EVENT_NUMBER(pf_encode_frame); + + rc = ddl_encode_frame(cctxt->ddl_handle, &ddl_ip_frm, + &ddl_op_frm, (void *) transc); + } + } + ip_frm_entry->ip_frm_tag = transc->ip_frm_tag; + if (!VCD_FAILED(rc)) { + vcd_device_timer_start(dev_ctxt); + cctxt->status.frame_submitted++; + if (ip_frm_entry->flags & VCD_FRAME_FLAG_EOS) + vcd_do_client_state_transition(cctxt, + VCD_CLIENT_STATE_EOS, evcode); + } else { + VCD_MSG_ERROR("Frame submission failed. rc = 0x%x", rc); + vcd_handle_submit_frame_failed(dev_ctxt, transc); + } + return true; +} + +u32 vcd_try_submit_frame_in_continue(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + struct vcd_clnt_ctxt *cctxt = NULL; + struct vcd_buffer_entry *ip_buf_entry = NULL; + + VCD_MSG_LOW("vcd_try_submit_frame_in_continue:"); + + if (!vcd_schedule_frame(dev_ctxt, &cctxt, &ip_buf_entry)) + return false; + + transc->cctxt = cctxt; + transc->ip_buf_entry = ip_buf_entry; + + return vcd_submit_frame(dev_ctxt, transc); +} + +u32 vcd_process_cmd_sess_start(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_transc *transc; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_process_cmd_sess_start:"); + if (vcd_get_command_channel(cctxt->dev_ctxt, &transc)) { + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_CMD_BEGIN); + + if (!VCD_FAILED(rc)) { + transc->type = VCD_CMD_CODEC_START; + transc->cctxt = cctxt; + rc = vcd_submit_cmd_sess_start(transc); + } else { + VCD_MSG_ERROR("Failed: VCD_EVT_PWR_CLNT_CMD_BEGIN"); + } + + if (VCD_FAILED(rc)) { + vcd_release_command_channel(cctxt->dev_ctxt, + transc); + } + } else { + u32 result; + + result = vcd_client_cmd_en_q(cctxt, VCD_CMD_CODEC_START); + if (!result) { + rc = VCD_ERR_BUSY; + VCD_MSG_ERROR("%s(): vcd_client_cmd_en_q() " + "failed\n", __func__); + vcd_assert(); + } + } + + if (VCD_FAILED(rc)) { + vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_CMD_FAIL); + } + + return rc; +} + +void vcd_send_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame, u32 valid_opbuf) +{ + VCD_MSG_LOW("vcd_send_frame_done_in_eos:"); + + if (!input_frame->virt_addr && !valid_opbuf) { + VCD_MSG_MED("Sending NULL output with EOS"); + + cctxt->out_buf_pool.entries[0].frame.flags = VCD_FRAME_FLAG_EOS; + cctxt->out_buf_pool.entries[0].frame.data_len = 0; + cctxt->out_buf_pool.entries[0].frame.time_stamp = + input_frame->time_stamp; + cctxt->out_buf_pool.entries[0].frame.ip_frm_tag = + input_frame->ip_frm_tag; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, + &cctxt->out_buf_pool.entries[0].frame, + sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + + memset(&cctxt->out_buf_pool.entries[0].frame, 0, + sizeof(struct vcd_frame_data)); + } else if (!input_frame->data_len) { + if (cctxt->decoding) + vcd_send_frame_done_in_eos_for_dec(cctxt, input_frame); + else + vcd_send_frame_done_in_eos_for_enc(cctxt, input_frame); + } +} + +void vcd_send_frame_done_in_eos_for_dec(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame) +{ + struct vcd_buffer_entry *buf_entry; + struct vcd_property_hdr prop_hdr; + u32 rc; + struct ddl_frame_data_tag ddl_frm; + + prop_hdr.id = DDL_I_DPB_RETRIEVE; + prop_hdr.sz = sizeof(struct ddl_frame_data_tag); + memset(&ddl_frm, 0, sizeof(ddl_frm)); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &ddl_frm); + + if (VCD_FAILED(rc) || !ddl_frm.vcd_frm.virt_addr) { + cctxt->status.eos_trig_ip_frm = *input_frame; + cctxt->status.eos_wait_for_op_buf = true; + + return; + } + + buf_entry = vcd_find_buffer_pool_entry(&cctxt->out_buf_pool, + ddl_frm.vcd_frm.virt_addr); + if (!buf_entry) { + VCD_MSG_ERROR("Unrecognized buffer address provided %p", + ddl_frm.vcd_frm.virt_addr); + vcd_assert(); + } else { + vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl,\ + false, cctxt->sched_o_tkn_per_ip_frm)); + + VCD_MSG_MED("Sending non-NULL output with EOS"); + + buf_entry->frame.data_len = 0; + buf_entry->frame.offset = 0; + buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS; + buf_entry->frame.ip_frm_tag = input_frame->ip_frm_tag; + buf_entry->frame.time_stamp = input_frame->time_stamp; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, + &buf_entry->frame, sizeof(struct vcd_frame_data), + cctxt, cctxt->client_data); + + buf_entry->in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->out_buf_pool.in_use); + } +} + +void vcd_send_frame_done_in_eos_for_enc(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame) +{ + struct vcd_buffer_entry *op_buf_entry; + + if (!cctxt->out_buf_pool.q_len) { + cctxt->status.eos_trig_ip_frm = *input_frame; + + cctxt->status.eos_wait_for_op_buf = true; + + return; + } + + op_buf_entry = vcd_buffer_pool_entry_de_q(&cctxt->out_buf_pool); + if (!op_buf_entry) { + VCD_MSG_ERROR("%s(): vcd_buffer_pool_entry_de_q() " + "failed\n", __func__); + vcd_assert(); + } else { + vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + false, cctxt->sched_o_tkn_per_ip_frm)); + + VCD_MSG_MED("Sending non-NULL output with EOS"); + + op_buf_entry->frame.data_len = 0; + op_buf_entry->frame.flags |= VCD_FRAME_FLAG_EOS; + op_buf_entry->frame.ip_frm_tag = input_frame->ip_frm_tag; + op_buf_entry->frame.time_stamp = input_frame->time_stamp; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, + &op_buf_entry->frame, sizeof(struct vcd_frame_data), + cctxt, cctxt->client_data); + } +} + +u32 vcd_handle_recvd_eos(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame, u32 *pb_eos_handled) +{ + union sched_value_type sched_val; + u32 rc; + + VCD_MSG_LOW("vcd_handle_recvd_eos:"); + + *pb_eos_handled = false; + + if (input_frame->virt_addr && input_frame->data_len) + return VCD_S_SUCCESS; + + input_frame->data_len = 0; + + rc = vcd_map_sched_status(sched_get_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_CURRQLEN, &sched_val)); + + VCD_FAILED_RETURN(rc, "Failed: sched_get_client_param"); + + if (sched_val.un_value > 0) { + rc = vcd_map_sched_status(sched_mark_client_eof( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl)); + + if (!VCD_FAILED(rc)) { + *pb_eos_handled = true; + } else { + VCD_MSG_ERROR("rc = 0x%x. Failed: " + "sched_mark_client_eof", rc); + } + + } else if (cctxt->decoding && !input_frame->virt_addr) { + rc = vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm)); + } else if (!cctxt->decoding) { + + vcd_send_frame_done_in_eos(cctxt, input_frame, false); + + if (cctxt->status.eos_wait_for_op_buf) { + vcd_do_client_state_transition(cctxt, + VCD_CLIENT_STATE_EOS, + CLIENT_STATE_EVENT_NUMBER(pf_encode_frame)); + } + + *pb_eos_handled = true; + + } + + if (*pb_eos_handled && input_frame->virt_addr && + !input_frame->data_len) { + cctxt->callback(VCD_EVT_RESP_INPUT_DONE, VCD_S_SUCCESS, + input_frame, sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + } + return rc; +} + +u32 vcd_handle_first_decode_frame(struct vcd_clnt_ctxt *cctxt) +{ + struct ddl_property_dec_pic_buffers dpb; + struct vcd_property_hdr prop_hdr; + u32 rc; + u16 i; + u16 q_cntr; + struct ddl_frame_data_tag *frm_entry; + struct ddl_frame_data_tag ddl_frm; + struct vcd_buffer_pool *out_buf_pool; + + VCD_MSG_LOW("vcd_handle_first_decode_frame:"); + + if (!cctxt->in_buf_pool.entries || !cctxt->out_buf_pool.entries || + cctxt->in_buf_pool.validated != + cctxt->in_buf_pool.count || + cctxt->out_buf_pool.validated != + cctxt->out_buf_pool.count) { + VCD_MSG_ERROR("Buffer pool is not completely setup yet"); + + return VCD_ERR_BAD_STATE; + } + + rc = vcd_add_client_to_sched(cctxt); + + VCD_FAILED_RETURN(rc, "Failed: vcd_add_client_to_sched"); + + prop_hdr.id = DDL_I_DPB; + prop_hdr.sz = sizeof(dpb); + + out_buf_pool = &cctxt->out_buf_pool; + + frm_entry = kmalloc(sizeof(struct ddl_frame_data_tag) * + out_buf_pool->count, GFP_KERNEL); + if (!frm_entry) { + VCD_MSG_ERROR("Memory allocation failure"); + return VCD_ERR_ALLOC_FAIL; + } + + for (i = 1; i <= out_buf_pool->count; i++) + frm_entry[i - 1].vcd_frm = out_buf_pool->entries[i].frame; + + dpb.dec_pic_buffers = frm_entry; + dpb.no_of_dec_pic_buf = out_buf_pool->count; + rc = ddl_set_property(cctxt->ddl_handle, &prop_hdr, &dpb); + + kfree(frm_entry); + + VCD_FAILED_RETURN(rc, "Failed: DDL set DDL_I_DPB"); + + if (out_buf_pool->q_len > 0) { + prop_hdr.id = DDL_I_DPB_RELEASE; + prop_hdr.sz = sizeof(struct ddl_frame_data_tag); + + for (i = 0, q_cntr = out_buf_pool->q_head; !VCD_FAILED(rc) && + i < out_buf_pool->q_len; i++, + q_cntr = (q_cntr + 1) % out_buf_pool->count) { + + ddl_frm.vcd_frm = out_buf_pool->queue[q_cntr]->frame; + + rc = ddl_set_property(cctxt->ddl_handle, &prop_hdr, + &ddl_frm); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR + ("Error returning output buffer to HW"); + + out_buf_pool->queue[q_cntr]->in_use = false; + } else { + out_buf_pool->queue[q_cntr]->in_use = true; + out_buf_pool->in_use++; + } + } + + if (VCD_FAILED(rc)) + return rc; + rc = vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm * out_buf_pool->q_len)); + } + return rc; +} + +u32 vcd_setup_with_ddl_capabilities(struct vcd_dev_ctxt *dev_ctxt) +{ + struct vcd_property_hdr prop_hdr; + struct ddl_property_capability capability; + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_LOW("vcd_setup_with_ddl_capabilities:"); + + if (dev_ctxt->ddl_cmd_ch_depth) + goto out; + + prop_hdr.id = DDL_I_CAPABILITY; + prop_hdr.sz = sizeof(capability); + + /* + * Since this is underlying core's property we don't need a + * ddl client handle. + */ + rc = ddl_get_property(NULL, &prop_hdr, &capability); + + if (VCD_FAILED(rc)) + goto out; + + /* + ** Allocate the transaction table. + */ + dev_ctxt->trans_tbl_size = VCD_MAX_CLIENT_TRANSACTIONS * + capability.max_num_client + capability.general_command_depth; + + dev_ctxt->trans_tbl = kzalloc(sizeof(struct vcd_transc) * + dev_ctxt->trans_tbl_size, GFP_KERNEL); + if (!dev_ctxt->trans_tbl) { + VCD_MSG_ERROR("Transaction table alloc failed"); + rc = VCD_ERR_ALLOC_FAIL; + goto out; + } + + /* + ** Set the command/frame depth + */ + dev_ctxt->ddl_cmd_concurrency = !capability.exclusive; + dev_ctxt->ddl_frame_ch_depth = capability.frame_command_depth; + dev_ctxt->ddl_cmd_ch_depth = capability.general_command_depth; + + vcd_reset_device_channels(dev_ctxt); + + dev_ctxt->hw_time_out = capability.ddl_time_out_in_ms; + +out: + return rc; +} + +struct vcd_transc *vcd_get_free_trans_tbl_entry(struct vcd_dev_ctxt *dev_ctxt) +{ + u8 i; + + if (!dev_ctxt->trans_tbl) + return NULL; + + i = 0; + while (i < dev_ctxt->trans_tbl_size && dev_ctxt->trans_tbl[i].in_use) + i++; + + if (i == dev_ctxt->trans_tbl_size) { + return NULL; + } else { + memset(&dev_ctxt->trans_tbl[i], 0, sizeof(struct vcd_transc)); + + dev_ctxt->trans_tbl[i].in_use = true; + + return &dev_ctxt->trans_tbl[i]; + } +} + +void vcd_release_trans_tbl_entry(struct vcd_transc *trans_entry) +{ + if (trans_entry) + trans_entry->in_use = false; +} + +u32 vcd_add_client_to_sched(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_property_hdr prop_hdr; + struct sched_client_init_param sched_input_init; + u32 rc, seqhdr_present = 0;; + + if (cctxt->sched_clnt_valid) { + VCD_MSG_HIGH("Schedulder client is already added "); + return VCD_S_SUCCESS; + } + + prop_hdr.id = DDL_I_FRAME_PROC_UNITS; + prop_hdr.sz = sizeof(cctxt->frm_p_units); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, + &cctxt->frm_p_units); + VCD_FAILED_RETURN(rc, "Failed: Get DDL_I_FRAME_PROC_UNITS"); + + if (cctxt->decoding) { + cctxt->frm_rate.fps_numerator = VCD_DEC_INITIAL_FRAME_RATE; + cctxt->frm_rate.fps_denominator = 1; + + sched_input_init.o_tkn_per_ip_frm = + VCD_SCHEDULER_DEC_DFLT_OTKN_PERFRM; + cctxt->sched_o_tkn_per_ip_frm = + VCD_SCHEDULER_DEC_DFLT_OTKN_PERFRM; + + sched_input_init.o_tkn_max = cctxt->sched_o_tkn_per_ip_frm * + cctxt->out_buf_pool.count+1; + } else { + sched_input_init.o_tkn_per_ip_frm = + VCD_SCHEDULER_ENC_DFLT_OTKN_PERFRM; + cctxt->sched_o_tkn_per_ip_frm = + VCD_SCHEDULER_ENC_DFLT_OTKN_PERFRM; + prop_hdr.id = DDL_I_SEQHDR_PRESENT; + prop_hdr.sz = sizeof(seqhdr_present); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, + &seqhdr_present); + if (!VCD_FAILED(rc)) { + if (seqhdr_present == 0x1) { + VCD_MSG_MED("Sequence hdr present"); + sched_input_init.o_tkn_per_ip_frm++; + } + sched_input_init.o_tkn_max = cctxt->out_buf_pool.count; + prop_hdr.id = VCD_I_FRAME_RATE; + prop_hdr.sz = sizeof(cctxt->frm_rate); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, + &cctxt->frm_rate); + } + } + + VCD_FAILED_RETURN(rc, "Failed: DDL get VCD_I_FRAME_RATE"); + + if (cctxt->live) + sched_input_init.client_ctgy = SCHED_CLNT_RT_NOBUFF; + else + sched_input_init.client_ctgy = SCHED_CLNT_NONRT; + + sched_input_init.max_queue_len = max(cctxt->in_buf_pool.count, + VCD_MAX_SCHEDULER_QUEUE_SIZE(cctxt->frm_rate.fps_numerator, + cctxt->frm_rate.fps_denominator)); + cctxt->reqd_perf_lvl = cctxt->frm_p_units * + cctxt->frm_rate.fps_numerator / cctxt->frm_rate.fps_denominator; + + sched_input_init.frm_rate.numer = cctxt->frm_rate.fps_numerator; + sched_input_init.frm_rate.denom = cctxt->frm_rate.fps_denominator; + sched_input_init.tkn_per_frm = cctxt->frm_p_units; + sched_input_init.alloc_p_tkn_rate = cctxt->reqd_perf_lvl; + + sched_input_init.o_tkn_init = 0; + + sched_input_init.client_data = cctxt; + + rc = vcd_map_sched_status(sched_add_client(cctxt->dev_ctxt->sched_hdl, + &sched_input_init, &cctxt->sched_clnt_hdl)); + + if (!VCD_FAILED(rc)) + cctxt->sched_clnt_valid = true; + + return rc; +} + +u32 vcd_handle_input_done(struct vcd_clnt_ctxt *cctxt, void *payload, u32 event, + u32 status) +{ + struct vcd_transc *transc; + struct ddl_frame_data_tag *frame = (struct ddl_frame_data_tag *)payload; + u32 rc; + + if (!cctxt->status.frame_submitted && !cctxt->status.frame_delayed) { + VCD_MSG_ERROR("Input done was not expected"); + vcd_assert(); + + return VCD_ERR_BAD_STATE; + } + + rc = vcd_validate_io_done_pyld(payload, status); + VCD_FAILED_RETURN(rc, "Bad input done payload"); + + transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag; + + if (transc->ip_buf_entry->frame.virt_addr != frame->vcd_frm.virt_addr || + !transc->ip_buf_entry->in_use) { + VCD_MSG_ERROR("Bad frm transaction state"); + vcd_assert(); + } + + frame->vcd_frm.ip_frm_tag = transc->ip_frm_tag; + + cctxt->callback(event, status, &frame->vcd_frm, + sizeof(struct vcd_frame_data), cctxt, cctxt->client_data); + + transc->frame_type = frame->vcd_frm.frame_type; + + transc->ip_buf_entry->in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->in_buf_pool.in_use); + transc->ip_buf_entry = NULL; + transc->input_done = true; + + if (transc->input_done && transc->frame_done) + transc->in_use = false; + + if (VCD_FAILED(status)) { + VCD_MSG_ERROR("INPUT_DONE returned err = 0x%x", status); + vcd_handle_input_done_failed(cctxt, transc); + } + + if (cctxt->status.frame_submitted > 0) + cctxt->status.frame_submitted--; + else + cctxt->status.frame_delayed--; + + if (!VCD_FAILED(status) && cctxt->decoding) { + if (frame->vcd_frm.interlaced) + vcd_handle_input_done_for_interlacing(cctxt); + if (frame->frm_trans_end) + vcd_handle_input_done_with_trans_end(cctxt); + } + + return VCD_S_SUCCESS; +} + +void vcd_handle_input_done_in_eos(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status) +{ + struct vcd_transc *transc; + struct ddl_frame_data_tag *frame = (struct ddl_frame_data_tag *)payload; + + if (VCD_FAILED(vcd_validate_io_done_pyld(payload, status))) + return; + + transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag; + + vcd_handle_input_done(cctxt, payload, VCD_EVT_RESP_INPUT_DONE, status); + + if ((frame->vcd_frm.flags & VCD_FRAME_FLAG_EOS)) { + VCD_MSG_HIGH("Got input done for EOS initiator"); + transc->input_done = false; + transc->in_use = true; + } +} + +u32 vcd_validate_io_done_pyld(void *payload, u32 status) +{ + struct ddl_frame_data_tag *frame = (struct ddl_frame_data_tag *)payload; + + if (!frame) { + VCD_MSG_ERROR("Bad payload from DDL"); + vcd_assert(); + + return VCD_ERR_BAD_POINTER; + } + + if (!frame->vcd_frm.ip_frm_tag || frame->vcd_frm.ip_frm_tag == + VCD_FRAMETAG_INVALID) { + VCD_MSG_ERROR("bad input frame tag"); + vcd_assert(); + return VCD_ERR_BAD_POINTER; + } + + if (!frame->vcd_frm.virt_addr && status != VCD_ERR_INTRLCD_FIELD_DROP) + return VCD_ERR_BAD_POINTER; + + return VCD_S_SUCCESS; +} + +void vcd_handle_input_done_failed(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc) +{ + if (cctxt->decoding) { + vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm)); + + transc->in_use = false; + } +} + +void vcd_handle_input_done_for_interlacing(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc; + + cctxt->status.int_field_cnt++; + + if (cctxt->status.int_field_cnt == 1) { + rc = vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm)); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("sched_update_client_o_tkn failed"); + } else if (cctxt->status.int_field_cnt == VCD_DEC_NUM_INTERLACED_FIELDS) + cctxt->status.int_field_cnt = 0; +} + +void vcd_handle_input_done_with_trans_end(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc; + union sched_value_type sched_val; + if (!cctxt->decoding) + return; + + if (cctxt->out_buf_pool.in_use < cctxt->out_buf_pool.buf_req.min_count) + return; + + rc = vcd_map_sched_status(sched_get_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_OTKNCURRENT, &sched_val)); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("sched_get_client_param:OTKNCURRENT failed"); + return; + } + + if (!sched_val.un_value) { + VCD_MSG_MED("All output buffers with core are pending display"); + + rc = vcd_map_sched_status(sched_update_client_o_tkn( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, true, + cctxt->sched_o_tkn_per_ip_frm)); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("sched_update_client_o_tkn failed"); + } +} + +u32 vcd_handle_output_required(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status) +{ + struct vcd_transc *transc; + struct ddl_frame_data_tag *frame = (struct ddl_frame_data_tag *)payload; + u32 rc; + + if (!cctxt->status.frame_submitted && !cctxt->status.frame_delayed) { + VCD_MSG_ERROR("\n Input done was not expected"); + return VCD_ERR_BAD_STATE; + } + + rc = vcd_validate_io_done_pyld(payload, status); + VCD_FAILED_RETURN(rc, "\n Bad input done payload"); + + transc = (struct vcd_transc *)frame->vcd_frm.ip_frm_tag; + + if (transc->ip_buf_entry->frame.virt_addr != frame->vcd_frm.virt_addr || + !transc->ip_buf_entry->in_use) { + VCD_MSG_ERROR("\n Bad frm transaction state"); + return VCD_ERR_BAD_STATE; + } + + rc = vcd_map_sched_status(sched_re_queue_frame( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + (void *) transc->ip_buf_entry)); + + VCD_FAILED_RETURN(rc, "Failed: sched_queue_frame"); + + if (transc->ip_buf_entry->frame.flags & VCD_FRAME_FLAG_EOS) { + rc = vcd_map_sched_status(sched_mark_client_eof( + cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl)); + } + + VCD_FAILED_RETURN(rc, "Failed: sched_mark_client_eof"); + + transc->ip_buf_entry = NULL; + transc->in_use = false; + frame->frm_trans_end = true; + + if (VCD_FAILED(status)) + VCD_MSG_ERROR("\n OUTPUT_REQ returned err = 0x%x", status); + + if (cctxt->status.frame_submitted > 0) + cctxt->status.frame_submitted--; + else + cctxt->status.frame_delayed--; + + if (!VCD_FAILED(status) && cctxt->decoding && + frame->vcd_frm.interlaced) { + if (cctxt->status.int_field_cnt > 0) + VCD_MSG_ERROR("\n Not expected: OUTPUT_REQ" + "for 2nd interlace field"); + } + + return VCD_S_SUCCESS; +} + +u32 vcd_handle_output_required_in_flushing(struct vcd_clnt_ctxt *cctxt, + void *payload) +{ + u32 rc; + struct vcd_transc *transc; + + rc = vcd_validate_io_done_pyld(payload, VCD_S_SUCCESS); + VCD_FAILED_RETURN(rc, "Bad input done payload"); + + transc = (struct vcd_transc *) (((struct ddl_frame_data_tag *)payload)-> + vcd_frm.ip_frm_tag); + + ((struct ddl_frame_data_tag *)payload)->vcd_frm.interlaced = false; + + rc = vcd_handle_input_done(cctxt, payload, VCD_EVT_RESP_INPUT_FLUSHED, + VCD_S_SUCCESS); + + transc->in_use = false; + ((struct ddl_frame_data_tag *)payload)->frm_trans_end = true; + + return rc; +} + +u32 vcd_handle_frame_done(struct vcd_clnt_ctxt *cctxt, void *payload, u32 event, + u32 status) +{ + struct vcd_buffer_entry *op_buf_entry; + struct ddl_frame_data_tag *op_frm = (struct ddl_frame_data_tag *) + payload; + struct vcd_transc *transc; + u32 rc; + + rc = vcd_validate_io_done_pyld(payload, status); + VCD_FAILED_RETURN(rc, "Bad payload recvd"); + + transc = (struct vcd_transc *)op_frm->vcd_frm.ip_frm_tag; + + if (op_frm->vcd_frm.virt_addr) { + + if (!transc->op_buf_entry) { + op_buf_entry = vcd_find_buffer_pool_entry( + &cctxt->out_buf_pool, op_frm->vcd_frm.virt_addr); + } else { + op_buf_entry = transc->op_buf_entry; + } + + if (!op_buf_entry) { + VCD_MSG_ERROR("Invalid output buffer returned" + "from DDL"); + vcd_assert(); + rc = VCD_ERR_BAD_POINTER; + } else if (!op_buf_entry->in_use) { + VCD_MSG_ERROR("Bad output buffer %p recv from DDL", + op_buf_entry->frame.virt_addr); + vcd_assert(); + rc = VCD_ERR_BAD_POINTER; + } else { + op_buf_entry->in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT( + cctxt->out_buf_pool.in_use); + VCD_MSG_LOW("outBufPool.InUse = %d", + cctxt->out_buf_pool.in_use); + } + } + VCD_FAILED_RETURN(rc, "Bad output buffer pointer"); + op_frm->vcd_frm.time_stamp = transc->time_stamp; + op_frm->vcd_frm.ip_frm_tag = transc->ip_frm_tag; + op_frm->vcd_frm.frame_type = transc->frame_type; + + transc->frame_done = true; + + if (transc->input_done && transc->frame_done) + transc->in_use = false; + + if (status == VCD_ERR_INTRLCD_FIELD_DROP || + (op_frm->intrlcd_ip_frm_tag != VCD_FRAMETAG_INVALID && + op_frm->intrlcd_ip_frm_tag)) { + vcd_handle_frame_done_for_interlacing(cctxt, transc, op_frm, + status); + } + + if (status != VCD_ERR_INTRLCD_FIELD_DROP) { + cctxt->callback(event, status, &op_frm->vcd_frm, + sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + } + return VCD_S_SUCCESS; +} + +void vcd_handle_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt, void *payload, + u32 status) +{ + struct ddl_frame_data_tag *frame = (struct ddl_frame_data_tag *)payload; + + VCD_MSG_LOW("vcd_handle_frame_done_in_eos:"); + + if (VCD_FAILED(vcd_validate_io_done_pyld(payload, status))) + return; + + if (cctxt->status.eos_prev_valid) { + vcd_handle_frame_done(cctxt, + (void *)&cctxt->status.eos_prev_op_frm, + VCD_EVT_RESP_OUTPUT_DONE, status); + } + + cctxt->status.eos_prev_op_frm = *frame; + cctxt->status.eos_prev_valid = true; +} + +void vcd_handle_frame_done_for_interlacing(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc_ip1, struct ddl_frame_data_tag *op_frm, + u32 status) +{ + struct vcd_transc *transc_ip2 = (struct vcd_transc *) + op_frm->intrlcd_ip_frm_tag; + + if (status == VCD_ERR_INTRLCD_FIELD_DROP) { + cctxt->status.int_field_cnt = 0; + return; + } + + op_frm->intrlcd_ip_frm_tag = transc_ip2->ip_frm_tag; + + transc_ip2->frame_done = true; + + if (transc_ip2->input_done && transc_ip2->frame_done) + transc_ip2->in_use = false; + + if (!transc_ip1->frame_type || !transc_ip2->frame_type) { + VCD_MSG_ERROR("DDL didn't provided frame type"); + return; + } +} + +u32 vcd_handle_first_frame_done(struct vcd_clnt_ctxt *cctxt, void *payload) +{ + if (!cctxt->decoding) + return vcd_handle_first_encode_frame_done(cctxt, payload); + + return VCD_S_SUCCESS; +} + +u32 vcd_handle_first_encode_frame_done(struct vcd_clnt_ctxt *cctxt, + void *payload) +{ + struct vcd_buffer_entry *buf_entry; + struct vcd_frame_data *frm_entry; + u32 rc, seqhdr_present; + struct vcd_property_hdr prop_hdr; + struct vcd_sequence_hdr seq_hdr; + struct vcd_property_codec codec; + union sched_value_type sched_val; + struct vcd_transc *transc; + struct ddl_frame_data_tag *payload_frm = (struct ddl_frame_data_tag *) + payload; + VCD_MSG_LOW("vcd_handle_first_encode_frame_done:"); + + rc = vcd_validate_io_done_pyld(payload, VCD_S_SUCCESS); + VCD_FAILED_RETURN(rc, "Validate frame done payload failed"); + + transc = (struct vcd_transc *)payload_frm->vcd_frm.ip_frm_tag; + + prop_hdr.id = DDL_I_SEQHDR_PRESENT; + prop_hdr.sz = sizeof(seqhdr_present); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &seqhdr_present); + VCD_FAILED_RETURN(rc, "Failed: DDL_I_SEQHDR_PRESENT"); + if (!seqhdr_present) + return VCD_S_SUCCESS; + + buf_entry = vcd_buffer_pool_entry_de_q(&cctxt->out_buf_pool); + + if (!buf_entry) { + VCD_MSG_ERROR("Sched provided frame when 2nd op buffer " + "was unavailable"); + + rc = VCD_ERR_FAIL; + vcd_assert(); + return rc; + } + + frm_entry = &buf_entry->frame; + prop_hdr.id = VCD_I_CODEC; + prop_hdr.sz = sizeof(struct vcd_property_codec); + + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &codec); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: ddl_get_property:VCD_I_CODEC", + rc); + goto out; + } + + if (codec.codec != VCD_CODEC_H263) { + prop_hdr.id = VCD_I_SEQ_HEADER; + prop_hdr.sz = sizeof(struct vcd_sequence_hdr); + + seq_hdr.addr = frm_entry->virt_addr; + seq_hdr.sz = buf_entry->size; + + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &seq_hdr); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: " + "ddl_get_property:VCD_I_SEQ_HEADER", rc); + goto out; + } + } else { + VCD_MSG_LOW("Codec Type is H.263\n"); + } + + sched_val.un_value = VCD_SCHEDULER_ENC_DFLT_OTKN_PERFRM; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_OTKNPERIPFRM, &sched_val)); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x.Failed: sched_set_client_param", rc); + goto out; + } + + frm_entry->data_len = seq_hdr.sz; + frm_entry->time_stamp = transc->time_stamp; + frm_entry->ip_frm_tag = transc->ip_frm_tag; + frm_entry->flags |= VCD_FRAME_FLAG_CODECCONFIG; + + cctxt->callback(VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS, frm_entry, + sizeof(struct vcd_frame_data), cctxt, cctxt->client_data); + +out: + if (VCD_FAILED(rc)) + vcd_buffer_pool_entry_en_q(&cctxt->out_buf_pool, buf_entry); + + return rc; +} + +void vcd_handle_eos_trans_end(struct vcd_clnt_ctxt *cctxt) +{ + if (cctxt->status.eos_prev_valid) { + vcd_handle_frame_done(cctxt, + (void *)&cctxt->status.eos_prev_op_frm, + VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS); + + cctxt->status.eos_prev_valid = false; + } + + if (cctxt->status.flush_mode) + vcd_process_pending_flush_in_eos(cctxt); + + if (cctxt->status.stop_pending) + vcd_process_pending_stop_in_eos(cctxt); + else { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } +} + +void vcd_handle_eos_done(struct vcd_clnt_ctxt *cctxt, struct vcd_transc *transc, + u32 status) +{ + struct vcd_frame_data vcd_frm; + VCD_MSG_LOW("vcd_handle_eos_done:"); + + if (VCD_FAILED(status)) + VCD_MSG_ERROR("EOS DONE returned error = 0x%x", status); + + if (cctxt->status.eos_prev_valid) { + cctxt->status.eos_prev_op_frm.vcd_frm.flags |= + VCD_FRAME_FLAG_EOS; + + vcd_handle_frame_done(cctxt, + (void *)&cctxt->status.eos_prev_op_frm, + VCD_EVT_RESP_OUTPUT_DONE, VCD_S_SUCCESS); + + cctxt->status.eos_prev_valid = false; + } else { + if (transc->ip_buf_entry) { + transc->ip_buf_entry->frame.ip_frm_tag = + transc->ip_frm_tag; + + vcd_send_frame_done_in_eos(cctxt, + &transc->ip_buf_entry->frame, false); + } else { + memset(&vcd_frm, 0, sizeof(struct vcd_frame_data)); + vcd_frm.ip_frm_tag = transc->ip_frm_tag; + vcd_frm.time_stamp = transc->time_stamp; + vcd_frm.flags = VCD_FRAME_FLAG_EOS; + vcd_send_frame_done_in_eos(cctxt, &vcd_frm, true); + } + } + if (transc->ip_buf_entry) { + if (transc->ip_buf_entry->frame.virt_addr) { + transc->ip_buf_entry->frame.ip_frm_tag = + transc->ip_frm_tag; + + cctxt->callback(VCD_EVT_RESP_INPUT_DONE, + VCD_S_SUCCESS, &transc->ip_buf_entry->frame, + sizeof(struct vcd_frame_data), cctxt, + cctxt->client_data); + } + transc->ip_buf_entry->in_use = false; + VCD_BUFFERPOOL_INUSE_DECREMENT(cctxt->in_buf_pool.in_use); + transc->ip_buf_entry = NULL; + cctxt->status.frame_submitted--; + } + + transc->in_use = false; + vcd_mark_frame_channel(cctxt->dev_ctxt); + if (cctxt->status.flush_mode) + vcd_process_pending_flush_in_eos(cctxt); + + if (cctxt->status.stop_pending) { + vcd_process_pending_stop_in_eos(cctxt); + } else if (!cctxt->status.eos_wait_for_op_buf) { + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } +} + +void vcd_handle_start_done(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status) +{ + cctxt->status.cmd_submitted--; + vcd_mark_command_channel(cctxt->dev_ctxt, transc); + + if (!VCD_FAILED(status)) { + cctxt->callback(VCD_EVT_RESP_START, status, NULL, 0, cctxt, + cctxt->client_data); + + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_RUN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } else { + VCD_MSG_ERROR("ddl callback returned failure.status = 0x%x", + status); + vcd_handle_err_in_starting(cctxt, status); + } +} + +void vcd_handle_stop_done(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status) +{ + u32 rc = VCD_S_SUCCESS; + u32 seq_hdrpresent = 0; + union sched_value_type sched_val; + struct vcd_property_hdr prop_hdr; + VCD_MSG_LOW("vcd_handle_stop_done:"); + cctxt->status.cmd_submitted--; + vcd_mark_command_channel(cctxt->dev_ctxt, transc); + + if (VCD_FAILED(status)) { + VCD_MSG_FATAL("STOP_DONE returned error = 0x%x", status); + status = VCD_ERR_HW_FATAL; + vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt); + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_INVALID, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + goto out; + } + + if (!cctxt->decoding) { + prop_hdr.id = DDL_I_SEQHDR_PRESENT; + prop_hdr.sz = sizeof(seq_hdrpresent); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, + &seq_hdrpresent); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("Failed: DDL Get DDL_I_SEQHDR_PRESENT %d", + rc); + goto open_out; + } + if (seq_hdrpresent == 0x1) { + sched_val.un_value = VCD_SCHEDULER_ENC_DFLT_OTKN_PERFRM + + 1; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, + SCHED_I_CLNT_OTKNPERIPFRM, &sched_val)); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("Failed: sched_set_client_param " + "%d", rc); + } + } +open_out: + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_OPEN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + +out: + cctxt->callback(VCD_EVT_RESP_STOP, status, NULL, 0, cctxt, + cctxt->client_data); + + memset(&cctxt->status, 0, sizeof(struct vcd_clnt_status)); +} + +void vcd_handle_stop_done_in_starting(struct vcd_clnt_ctxt *cctxt, + struct vcd_transc *transc, u32 status) +{ + VCD_MSG_LOW("vcd_handle_stop_done_in_starting:"); + cctxt->status.cmd_submitted--; + vcd_mark_command_channel(cctxt->dev_ctxt, transc); + if (!VCD_FAILED(status)) { + cctxt->callback(VCD_EVT_RESP_START, cctxt->status.last_err, + NULL, 0, cctxt, cctxt->client_data); + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_OPEN, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } else { + VCD_MSG_FATAL("VCD Cleanup: STOP_DONE returned error " + "= 0x%x", status); + vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START, + VCD_ERR_HW_FATAL); + } +} + +void vcd_handle_stop_done_in_invalid(struct vcd_clnt_ctxt *cctxt, u32 status) +{ + u32 rc; + VCD_MSG_LOW("vcd_handle_stop_done_in_invalid:"); + if (!VCD_FAILED(status)) { + vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CLIENT_CLOSE); + if (cctxt->status.frame_submitted) { + vcd_release_multiple_frame_channels(cctxt->dev_ctxt, + cctxt->status.frame_submitted); + + cctxt->status.frame_submitted = 0; + cctxt->status.frame_delayed = 0; + } + if (cctxt->status.cmd_submitted) { + vcd_release_multiple_command_channels(cctxt->dev_ctxt, + cctxt->status.cmd_submitted); + cctxt->status.cmd_submitted = 0; + } + } else { + VCD_MSG_FATAL("VCD Cleanup: STOP_DONE returned error " + "= 0x%x", status); + vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt); + cctxt->status.cleaning_up = false; + } + vcd_flush_buffers_in_err_fatal(cctxt); + VCD_MSG_HIGH("VCD cleanup: All buffers are returned"); + if (cctxt->status.stop_pending) { + cctxt->callback(VCD_EVT_RESP_STOP, VCD_S_SUCCESS, NULL, 0, + cctxt, cctxt->client_data); + cctxt->status.stop_pending = false; + } + rc = vcd_power_event(cctxt->dev_ctxt, cctxt, VCD_EVT_PWR_CLNT_ERRFATAL); + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("VCD_EVT_PWR_CLNT_ERRFATAL failed"); + if (!cctxt->status.cleaning_up && + cctxt->status.close_pending) { + vcd_destroy_client_context(cctxt); + vcd_handle_for_last_clnt_close(cctxt->dev_ctxt, false); + } +} + +u32 vcd_handle_input_frame(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *input_frame) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + struct vcd_buffer_entry *buf_entry; + struct vcd_frame_data *frm_entry; + u32 rc = VCD_S_SUCCESS; + u32 eos_handled = false; + + VCD_MSG_LOW("vcd_handle_input_frame:"); + + VCD_MSG_LOW("input buffer: addr=(0x%p), size=(%d), len=(%d)", + input_frame->virt_addr, input_frame->alloc_len, + input_frame->data_len); + + if ((!input_frame->virt_addr || !input_frame->data_len) && + !(input_frame->flags & VCD_FRAME_FLAG_EOS)) { + VCD_MSG_ERROR("Bad frame ptr/len/EOS combination"); + + return VCD_ERR_ILLEGAL_PARM; + } + + if (!cctxt->status.b1st_frame_recvd) { + if (cctxt->decoding) + rc = vcd_handle_first_decode_frame(cctxt); + + if (!VCD_FAILED(rc)) { + cctxt->status.first_ts = input_frame->time_stamp; + cctxt->status.prev_ts = cctxt->status.first_ts; + + cctxt->status.b1st_frame_recvd = true; + + vcd_power_event(cctxt->dev_ctxt, cctxt, + VCD_EVT_PWR_CLNT_FIRST_FRAME); + } + } + VCD_FAILED_RETURN(rc, "Failed: Frist frame handling"); + + buf_entry = vcd_find_buffer_pool_entry(&cctxt->in_buf_pool, + input_frame->virt_addr); + if (!buf_entry) { + VCD_MSG_ERROR("Bad buffer addr: %p", input_frame->virt_addr); + return VCD_ERR_FAIL; + } + + if (buf_entry->in_use) { + VCD_MSG_ERROR("An inuse input frame is being re-queued to " + "scheduler"); + return VCD_ERR_FAIL; + } + + if (input_frame->alloc_len > buf_entry->size) { + VCD_MSG_ERROR("Bad buffer Alloc_len %d, Actual size=%d", + input_frame->alloc_len, buf_entry->size); + + return VCD_ERR_ILLEGAL_PARM; + } + + frm_entry = &buf_entry->frame; + + *frm_entry = *input_frame; + frm_entry->phys_addr = buf_entry->phys_addr; + + if (input_frame->flags & VCD_FRAME_FLAG_EOS) + rc = vcd_handle_recvd_eos(cctxt, input_frame, &eos_handled); + + if (VCD_FAILED(rc) || eos_handled) { + VCD_MSG_HIGH("rc = 0x%x, eos_handled = %d", rc, eos_handled); + + return rc; + } + + rc = vcd_map_sched_status(sched_queue_frame(dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, (void *)buf_entry)); + + VCD_FAILED_RETURN(rc, "Failed: sched_queue_frame"); + + buf_entry->in_use = true; + cctxt->in_buf_pool.in_use++; + if (input_frame->flags & VCD_FRAME_FLAG_EOS) { + rc = vcd_map_sched_status(sched_mark_client_eof( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl)); + } + + VCD_FAILED_RETURN(rc, "Failed: sched_mark_client_eof"); + + vcd_try_submit_frame(dev_ctxt); + return rc; +} + +void vcd_release_all_clnt_frm_transc(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u8 i; + + VCD_MSG_LOW("vcd_release_all_clnt_frm_transc:"); + + for (i = 0; i < dev_ctxt->trans_tbl_size; i++) { + if (dev_ctxt->trans_tbl[i].in_use && + cctxt == dev_ctxt->trans_tbl[i].cctxt && + dev_ctxt->trans_tbl[i].type == + VCD_CMD_CODE_FRAME) { + vcd_release_trans_tbl_entry(&dev_ctxt->trans_tbl[i]); + } + } +} + +void vcd_release_all_clnt_def_frm_transc(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u8 i; + + VCD_MSG_LOW("vcd_release_all_clnt_def_frm_transc:"); + + for (i = 0; i < dev_ctxt->trans_tbl_size; i++) { + if (dev_ctxt->trans_tbl[i].in_use && + cctxt == dev_ctxt->trans_tbl[i].cctxt && + dev_ctxt->trans_tbl[i].type == VCD_CMD_NONE) { + vcd_release_trans_tbl_entry(&dev_ctxt->trans_tbl[i]); + } + } +} + +void vcd_release_all_clnt_transc(struct vcd_clnt_ctxt *cctxt) +{ + struct vcd_dev_ctxt *dev_ctxt = cctxt->dev_ctxt; + u8 i; + + VCD_MSG_LOW("vcd_release_all_clnt_def_frm_transc:"); + + for (i = 0; i < dev_ctxt->trans_tbl_size; i++) { + if (dev_ctxt->trans_tbl[i].in_use && + cctxt == dev_ctxt->trans_tbl[i].cctxt) { + vcd_release_trans_tbl_entry(&dev_ctxt->trans_tbl[i]); + } + } +} + +void vcd_send_flush_done(struct vcd_clnt_ctxt *cctxt, u32 status) +{ + VCD_MSG_LOW("vcd_send_flush_done:"); + + if (cctxt->status.flush_mode & VCD_FLUSH_INPUT) { + cctxt->callback(VCD_EVT_RESP_FLUSH_INPUT_DONE, status, NULL, 0, + cctxt, cctxt->client_data); + cctxt->status.flush_mode &= ~VCD_FLUSH_INPUT; + } + + if (cctxt->status.flush_mode & VCD_FLUSH_OUTPUT) { + cctxt->callback(VCD_EVT_RESP_FLUSH_OUTPUT_DONE, status, NULL, 0, + cctxt, cctxt->client_data); + cctxt->status.flush_mode &= ~VCD_FLUSH_OUTPUT; + } +} + +u32 vcd_store_seq_hdr(struct vcd_clnt_ctxt *cctxt, + struct vcd_sequence_hdr *seq_hdr) +{ +// u32 rc; +// struct vcd_property_hdr prop_hdr; +// u32 align; +// u32 addr; +// int ret = 0; + + if (!seq_hdr->sz || !seq_hdr->addr) { + VCD_MSG_ERROR("Bad seq hdr"); + return VCD_ERR_BAD_POINTER; + } + + if (cctxt->seq_hdr.addr) { + VCD_MSG_HIGH("Old seq hdr detected"); + + dma_free_coherent(NULL, cctxt->seq_hdr.sz + + VCD_SEQ_HDR_PADDING_BYTES, cctxt->seq_hdr.addr, + cctxt->seq_hdr_phys_addr); + cctxt->seq_hdr.addr = NULL; + } + + cctxt->seq_hdr.sz = seq_hdr->sz; + + //TODO strip out all this alignment crap? +#if 0 + prop_hdr.id = DDL_I_SEQHDR_ALIGN_BYTES; + prop_hdr.size = sizeof(u32); + + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &align); + + VCD_FAILED_RETURN(rc, + "Failed: ddl_get_property DDL_I_SEQHDR_ALIGN_BYTES"); + + VCD_MSG_MED("Seq hdr alignment bytes = %d", align); +#endif + + cctxt->seq_hdr.addr = dma_alloc_coherent(NULL, + cctxt->seq_hdr.sz + VCD_SEQ_HDR_PADDING_BYTES, + &cctxt->seq_hdr_phys_addr, GFP_KERNEL); + if (!cctxt->seq_hdr.addr) { + VCD_MSG_ERROR("Seq hdr allocation failed"); + return VCD_ERR_ALLOC_FAIL; + } + + memset(cctxt->seq_hdr.addr, 0, + cctxt->seq_hdr.sz + VCD_SEQ_HDR_PADDING_BYTES); + memcpy(cctxt->seq_hdr.addr, seq_hdr->addr, seq_hdr->sz); + + return VCD_S_SUCCESS; +} + +u32 vcd_set_frame_rate(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_rate *fps) +{ + union sched_value_type sched_val; + u32 rc; + + sched_val.frm_rate.numer = fps->fps_numerator; + sched_val.frm_rate.denom = fps->fps_denominator; + cctxt->frm_rate = *fps; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_FRAMERATE, &sched_val)); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: Set SCHED_I_CLNT_FRAMERATE", + rc); + } + + rc = vcd_update_clnt_perf_lvl(cctxt, &cctxt->frm_rate, + cctxt->frm_p_units); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_update_clnt_perf_lvl", + rc); + } + + sched_val.un_value = cctxt->reqd_perf_lvl; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_PTKNRATE, &sched_val)); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: Set SCHED_I_CLNT_PTKNRATE", + rc); + } + + return VCD_S_SUCCESS; +} + +u32 vcd_set_frame_size(struct vcd_clnt_ctxt *cctxt, + struct vcd_property_frame_size *frm_size) +{ + struct vcd_property_hdr prop_hdr; + union sched_value_type sched_val; + u32 rc; + u32 frm_p_units; + frm_size = NULL; + + prop_hdr.id = DDL_I_FRAME_PROC_UNITS; + prop_hdr.sz = sizeof(frm_p_units); + rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr, &frm_p_units); + + VCD_FAILED_RETURN(rc, "Failed: Get DDL_I_FRAME_PROC_UNITS"); + + cctxt->frm_p_units = sched_val.un_value = frm_p_units; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_PTKNPERFRM, &sched_val)); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: Set SCHED_I_CLNT_PTKNPERFRM", + rc); + } + + rc = vcd_update_clnt_perf_lvl(cctxt, &cctxt->frm_rate, frm_p_units); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_update_clnt_perf_lvl", + rc); + } + + sched_val.un_value = cctxt->reqd_perf_lvl; + + rc = vcd_map_sched_status(sched_set_client_param( + cctxt->dev_ctxt->sched_hdl, cctxt->sched_clnt_hdl, + SCHED_I_CLNT_PTKNRATE, &sched_val)); + + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("rc = 0x%x. Failed: Set SCHED_I_CLNT_PTKNRATE", + rc); + } + + return VCD_S_SUCCESS; +} + +void vcd_process_pending_flush_in_eos(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + VCD_MSG_HIGH("Buffer flush is pending"); + + rc = vcd_flush_buffers(cctxt, cctxt->status.flush_mode); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_flush_buffers", rc); + + cctxt->status.eos_wait_for_op_buf = false; + + vcd_send_flush_done(cctxt, VCD_S_SUCCESS); +} + +void vcd_process_pending_stop_in_eos(struct vcd_clnt_ctxt *cctxt) +{ + u32 rc = VCD_S_SUCCESS; + + rc = vcd_flush_buffers(cctxt, VCD_FLUSH_ALL); + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("rc = 0x%x. Failed: vcd_flush_buffers", rc); + + VCD_MSG_HIGH("All buffers are returned. Enqueuing stop cmd"); + + vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP); + cctxt->status.stop_pending = false; + + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_STOPPING, + CLIENT_STATE_EVENT_NUMBER(pf_stop)); +} + +u32 vcd_calculate_frame_delta(struct vcd_clnt_ctxt *cctxt, + struct vcd_frame_data *frame) +{ + u32 frm_delta; + u64 temp, temp1; + + temp = frame->time_stamp - cctxt->status.prev_ts; + + VCD_MSG_LOW("Curr_ts=%lld Prev_ts=%lld Diff=%llu", frame->time_stamp, + cctxt->status.prev_ts, temp); + + temp = temp * cctxt->time_resoln; + temp = (temp + (VCD_TIMESTAMP_RESOLUTION >> 1)); + temp1 = do_div(temp, VCD_TIMESTAMP_RESOLUTION); + frm_delta = temp; + VCD_MSG_LOW("temp1=%lld temp=%lld", temp1, temp); + cctxt->status.time_elapsed += frm_delta; + + temp = ((u64)cctxt->status.time_elapsed * VCD_TIMESTAMP_RESOLUTION); + temp = (temp + (cctxt->time_resoln >> 1)); + temp1 = do_div(temp, cctxt->time_resoln); + + cctxt->status.prev_ts = cctxt->status.first_ts + temp; + + VCD_MSG_LOW("Time_elapsed=%u, Drift=%llu, new Prev_ts=%lld", + cctxt->status.time_elapsed, temp1, cctxt->status.prev_ts); + + return frm_delta; +} + +struct vcd_buffer_entry *vcd_check_fill_output_buffer(struct vcd_clnt_ctxt + *cctxt, struct vcd_frame_data *buffer) +{ + struct vcd_buffer_pool *buf_pool = &cctxt->out_buf_pool; + struct vcd_buffer_entry *buf_entry; + + if (!buf_pool->entries) { + VCD_MSG_ERROR("Buffers not set or allocated yet"); + return NULL; + } + + if (!buffer->virt_addr) { + VCD_MSG_ERROR("NULL buffer address provided"); + return NULL; + } + + buf_entry = vcd_find_buffer_pool_entry(buf_pool, buffer->virt_addr); + if (!buf_entry) { + VCD_MSG_ERROR("Unrecognized buffer address provided %p", + buffer->virt_addr); + return NULL; + } + + if (buf_entry->in_use) { + VCD_MSG_ERROR("An inuse output frame is being provided for " + "reuse"); + return NULL; + } + + if (buffer->alloc_len < buf_pool->buf_req.size || + buffer->alloc_len > buf_entry->size) { + VCD_MSG_ERROR("Bad buffer Alloc_len = %d, Actual size = %d, " + " Min size = %u", buffer->alloc_len, buf_entry->size, + buf_pool->buf_req.size); + return NULL; + } + + return buf_entry; +} + +void vcd_handle_ind_hw_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event, + u32 status) +{ + if (cctxt->status.frame_submitted) { + cctxt->status.frame_submitted--; + vcd_mark_frame_channel(cctxt->dev_ctxt); + } + vcd_handle_err_fatal(cctxt, event, status); +} + +void vcd_handle_err_fatal(struct vcd_clnt_ctxt *cctxt, u32 event, u32 status) +{ + u32 rc; + VCD_MSG_LOW("vcd_handle_err_fatal: event=%x, err=%x", event, status); + if (!VCD_FAILED_FATAL(status)) + return; + + if (VCD_FAILED_DEVICE_FATAL(status)) { + vcd_clnt_handle_device_err_fatal(cctxt, event); + vcd_handle_device_err_fatal(cctxt->dev_ctxt, cctxt); + } else if (VCD_FAILED_CLIENT_FATAL(status)) { + cctxt->status.last_evt = event; + + if (cctxt->sched_clnt_valid) { + rc = vcd_map_sched_status(sched_suspend_resume_client( + cctxt->dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, false)); + if (VCD_FAILED(rc)) { + VCD_MSG_ERROR("Failed: sched_suspend_resume_" + "client rc=0x%x", rc); + } + } + cctxt->callback(event, VCD_ERR_HW_FATAL, NULL, 0, cctxt, + cctxt->client_data); + cctxt->status.cleaning_up = true; + vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP); + vcd_do_client_state_transition(cctxt, VCD_CLIENT_STATE_INVALID, + CLIENT_STATE_EVENT_NUMBER(pf_clnt_cb)); + } +} + +void vcd_handle_err_in_starting(struct vcd_clnt_ctxt *cctxt, u32 status) +{ + VCD_MSG_LOW("\n vcd_handle_err_in_starting:"); + if (VCD_FAILED_FATAL(status)) { + vcd_handle_err_fatal(cctxt, VCD_EVT_RESP_START, status); + } else { + cctxt->status.last_err = status; + VCD_MSG_HIGH("\n VCD cleanup: Enqueuing stop cmd"); + vcd_client_cmd_flush_and_en_q(cctxt, VCD_CMD_CODEC_STOP); + } +} + +void vcd_handle_trans_pending(struct vcd_clnt_ctxt *cctxt) +{ + if (!cctxt->status.frame_submitted) { + VCD_MSG_ERROR("Transaction pending response was not expected"); + vcd_assert(); + return; + } + cctxt->status.frame_submitted--; + cctxt->status.frame_delayed++; + vcd_mark_frame_channel(cctxt->dev_ctxt); +} + +u32 vcd_requeue_input_frame(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_clnt_ctxt *cctxt, struct vcd_buffer_entry *buf_entry) +{ + u32 rc; + rc = vcd_map_sched_status(sched_re_queue_frame(dev_ctxt->sched_hdl, + cctxt->sched_clnt_hdl, (void *) buf_entry)); + + VCD_FAILED_RETURN(rc, "Failed: Sched_ReQueueFrame"); + + if (buf_entry->frame.flags & VCD_FRAME_FLAG_EOS) { + rc = vcd_map_sched_status(sched_mark_client_eof(dev_ctxt-> + sched_hdl, cctxt->sched_clnt_hdl)); + } + + if (VCD_FAILED(rc)) + VCD_MSG_ERROR("rc = 0x%x: Failed: Sched_MarkClientEOF", rc); + + return rc; +} + +void vcd_handle_submit_frame_failed(struct vcd_dev_ctxt *dev_ctxt, + struct vcd_transc *transc) +{ + struct vcd_clnt_ctxt *cctxt = transc->cctxt; + u32 rc; + + vcd_mark_frame_channel(dev_ctxt); + transc->in_use = false; + + vcd_handle_err_fatal(cctxt, VCD_EVT_IND_HWERRFATAL, + VCD_ERR_CLIENT_FATAL); + + if (vcd_get_command_channel(dev_ctxt, &transc)) { + transc->type = VCD_CMD_CODEC_STOP; + transc->cctxt = cctxt; + rc = vcd_submit_cmd_sess_end(transc); + if (VCD_FAILED(rc)) { + vcd_release_command_channel(dev_ctxt, transc); + VCD_MSG_ERROR("rc = 0x%x. Failed: VCD_SubmitCmdSessEnd", + rc); + } + } +} diff --git a/drivers/misc/video_core/720p/vcd/vcd_util.h b/drivers/misc/video_core/720p/vcd/vcd_util.h new file mode 100644 index 0000000000000..2d90bf5bdd36a --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/vcd_util.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _VCD_UTIL_H_ +#define _VCD_UTIL_H_ + +#include "vcd_api.h" + +#if DEBUG + +//TODO what a load of crap in here +#define VCD_MSG_LOW(xx_fmt...) printk(KERN_INFO "\t* " xx_fmt) +#define VCD_MSG_MED(xx_fmt...) printk(KERN_INFO " * " xx_fmt) +#define VCD_MSG_HIGH(xx_fmt...) printk(KERN_WARNING xx_fmt) + +#else + +#define VCD_MSG_LOW(xx_fmt...) +#define VCD_MSG_MED(xx_fmt...) +#define VCD_MSG_HIGH(xx_fmt...) + +#endif + +#define VCD_MSG_ERROR(xx_fmt...) printk(KERN_ERR "err: " xx_fmt) +#define VCD_MSG_FATAL(xx_fmt...) printk(KERN_ERR " " xx_fmt) + +#define VCD_FAILED_RETURN(rc, xx_fmt...) \ + do { \ + if (VCD_FAILED(rc)) { \ + printk(KERN_ERR xx_fmt); \ + return rc; \ + } \ + } while (0) + +#define VCD_FAILED_DEVICE_FATAL(rc) \ + (rc == VCD_ERR_HW_FATAL ? true : false) +#define VCD_FAILED_CLIENT_FATAL(rc) \ + (rc == VCD_ERR_CLIENT_FATAL ? true : false) + +#define VCD_FAILED_FATAL(rc) \ + ((VCD_FAILED_DEVICE_FATAL(rc) || VCD_FAILED_CLIENT_FATAL(rc)) \ + ? true : false) + +#define vcd_assert() VCD_MSG_FATAL("ASSERT") + +#endif diff --git a/drivers/misc/video_core/720p/vcd/video_core_type.h b/drivers/misc/video_core/720p/vcd/video_core_type.h new file mode 100644 index 0000000000000..febd4cf724caf --- /dev/null +++ b/drivers/misc/video_core/720p/vcd/video_core_type.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef VIDEO_CORE_TYPE_H +#define VIDEO_CORE_TYPE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG 1 + +#define USE_RES_TRACKER + +#undef CORE_TIMING_INFO + +#endif diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 73deb479b18cc..78a19abc30b23 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -50,6 +50,13 @@ MODULE_ALIAS("mmc:block"); static DEFINE_MUTEX(block_mutex); +#define INAND_CMD38_ARG_EXT_CSD 113 +#define INAND_CMD38_ARG_ERASE 0x00 +#define INAND_CMD38_ARG_TRIM 0x01 +#define INAND_CMD38_ARG_SECERASE 0x80 +#define INAND_CMD38_ARG_SECTRIM1 0x81 +#define INAND_CMD38_ARG_SECTRIM2 0x88 + /* * The defaults come from config options but can be overriden by module * or bootarg options. @@ -280,6 +287,15 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) else arg = MMC_ERASE_ARG; + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_TRIM_ARG ? + INAND_CMD38_ARG_TRIM : + INAND_CMD38_ARG_ERASE); + if (err) + goto out; + } err = mmc_erase(card, from, nr, arg); out: spin_lock_irq(&md->lock); @@ -314,9 +330,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, else arg = MMC_SECURE_ERASE_ARG; + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_SECURE_TRIM1_ARG ? + INAND_CMD38_ARG_SECTRIM1 : + INAND_CMD38_ARG_SECERASE); + if (err) + goto out; + } err = mmc_erase(card, from, nr, arg); - if (!err && arg == MMC_SECURE_TRIM1_ARG) + if (!err && arg == MMC_SECURE_TRIM1_ARG) { + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + INAND_CMD38_ARG_SECTRIM2); + if (err) + goto out; + } err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); + } out: spin_lock_irq(&md->lock); __blk_end_request(req, err, blk_rq_bytes(req)); @@ -693,6 +726,13 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) return 0; } +static const struct mmc_fixup blk_fixups[] = +{ + MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + END_FIXUP +}; + static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md; @@ -720,6 +760,8 @@ static int mmc_blk_probe(struct mmc_card *card) cap_str, md->read_only ? "(ro)" : ""); mmc_set_drvdata(card, md); + mmc_fixup_device(card, blk_fixups); + #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME mmc_set_bus_resume_policy(card->host, 1); #endif diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index 86b4791193326..639501970b412 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_MMC) += mmc_core.o mmc_core-y := core.o bus.o host.o \ mmc.o mmc_ops.o sd.o sd_ops.o \ sdio.o sdio_ops.o sdio_bus.o \ - sdio_cis.o sdio_io.o sdio_irq.o + sdio_cis.o sdio_io.o sdio_irq.o \ + quirks.o mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 63667a8f140c4..3c4ebc97402cc 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -39,6 +39,8 @@ static ssize_t mmc_type_show(struct device *dev, return sprintf(buf, "SDIO\n"); case MMC_TYPE_SD_COMBO: return sprintf(buf, "SDcombo\n"); + case MMC_TYPE_SDIO_WIMAX: + return sprintf(buf, "SDwimax\n"); default: return -EFAULT; } @@ -79,6 +81,9 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) case MMC_TYPE_SD_COMBO: type = "SDcombo"; break; + case MMC_TYPE_SDIO_WIMAX: + type = "SDwimax"; + break; default: type = NULL; } @@ -120,18 +125,19 @@ static int mmc_bus_remove(struct device *dev) return 0; } -static int mmc_bus_suspend(struct device *dev, pm_message_t state) +static int mmc_bus_pm_suspend(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); int ret = 0; + pm_message_t state = { PM_EVENT_SUSPEND }; if (dev->driver && drv->suspend) ret = drv->suspend(card, state); return ret; } -static int mmc_bus_resume(struct device *dev) +static int mmc_bus_pm_resume(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); @@ -143,7 +149,6 @@ static int mmc_bus_resume(struct device *dev) } #ifdef CONFIG_PM_RUNTIME - static int mmc_runtime_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); @@ -162,21 +167,13 @@ static int mmc_runtime_idle(struct device *dev) { return pm_runtime_suspend(dev); } +#endif /* CONFIG_PM_RUNTIME */ static const struct dev_pm_ops mmc_bus_pm_ops = { - .runtime_suspend = mmc_runtime_suspend, - .runtime_resume = mmc_runtime_resume, - .runtime_idle = mmc_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_pm_suspend, mmc_bus_pm_resume) + SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle) }; -#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops) - -#else /* !CONFIG_PM_RUNTIME */ - -#define MMC_PM_OPS_PTR NULL - -#endif /* !CONFIG_PM_RUNTIME */ - static struct bus_type mmc_bus_type = { .name = "mmc", .dev_attrs = mmc_dev_attrs, @@ -184,9 +181,7 @@ static struct bus_type mmc_bus_type = { .uevent = mmc_bus_uevent, .probe = mmc_bus_probe, .remove = mmc_bus_remove, - .suspend = mmc_bus_suspend, - .resume = mmc_bus_resume, - .pm = MMC_PM_OPS_PTR, + .pm = &mmc_bus_pm_ops, }; int mmc_register_bus(void) @@ -284,6 +279,10 @@ int mmc_add_card(struct mmc_card *card) type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; + break; + case MMC_TYPE_SDIO_WIMAX: + type = "SD-WiMAX"; + break; default: type = "?"; break; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 63b9c4efd1742..74c7ece9d31ee 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -205,6 +205,10 @@ static void mmc_wait_done(struct mmc_request *mrq) complete(mrq->done_data); } +struct msmsdcc_host; +void msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq); +void msmsdcc_stop_data(struct msmsdcc_host *host); + /** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command @@ -216,6 +220,10 @@ static void mmc_wait_done(struct mmc_request *mrq) */ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) { +#ifdef CONFIG_WIMAX + int ret = 0; +#endif + DECLARE_COMPLETION_ONSTACK(complete); mrq->done_data = &complete; @@ -223,7 +231,21 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) mmc_start_request(host, mrq); +#ifdef CONFIG_WIMAX + ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(5000)); + if (ret <= 0) { + struct msmsdcc_host *msm_host = mmc_priv(host); + printk("[ERR] %s: %s wait_for_completion_timeout!\n", __func__, mmc_hostname(host)); + + msmsdcc_stop_data(msm_host); + + mrq->cmd->error = -ETIMEDOUT; + msmsdcc_request_end(msm_host, mrq); + } +#else wait_for_completion(&complete); +#endif + } EXPORT_SYMBOL(mmc_wait_for_req); @@ -302,6 +324,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; + if (mmc_host_clk_rate(card->host)) timeout_us += data->timeout_clks * 1000 / (mmc_host_clk_rate(card->host) / 1000); @@ -866,6 +889,11 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, { int result = 0; int min_uV, max_uV; + int enabled; + + enabled = regulator_is_enabled(supply); + if (enabled < 0) + return enabled; if (vdd_bit) { int tmp; @@ -963,7 +991,7 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ -static void mmc_power_up(struct mmc_host *host) +void mmc_power_up(struct mmc_host *host) { int bit; @@ -1003,8 +1031,9 @@ static void mmc_power_up(struct mmc_host *host) */ mmc_delay(10); } +EXPORT_SYMBOL(mmc_power_up); -static void mmc_power_off(struct mmc_host *host) +void mmc_power_off(struct mmc_host *host) { host->ios.clock = 0; host->ios.vdd = 0; @@ -1017,6 +1046,7 @@ static void mmc_power_off(struct mmc_host *host) host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); } +EXPORT_SYMBOL(mmc_power_off); /* * Cleanup when the last reference to the bus operator is dropped. @@ -1060,9 +1090,10 @@ static inline void mmc_bus_put(struct mmc_host *host) int mmc_resume_bus(struct mmc_host *host) { unsigned long flags; + int err = 0; if (!mmc_bus_needs_resume(host)) - return -EINVAL; + return 0; printk("%s: Starting deferred resume\n", mmc_hostname(host)); spin_lock_irqsave(&host->lock, flags); @@ -1074,15 +1105,20 @@ int mmc_resume_bus(struct mmc_host *host) if (host->bus_ops && !host->bus_dead) { mmc_power_up(host); BUG_ON(!host->bus_ops->resume); - host->bus_ops->resume(host); + err = host->bus_ops->resume(host); + if (err) + goto end; } if (host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); +end: mmc_bus_put(host); - printk("%s: Deferred resume completed\n", mmc_hostname(host)); - return 0; + host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME; + printk(KERN_INFO "%s: Deferred resume %s\n", mmc_hostname(host), + err ? "failed" : "completed"); + return err; } EXPORT_SYMBOL(mmc_resume_bus); @@ -1160,6 +1196,26 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay) EXPORT_SYMBOL(mmc_detect_change); +void mmc_remove_sd_card(struct work_struct *work) +{ + struct mmc_host *host = + container_of(work, struct mmc_host, remove.work); + printk(KERN_INFO "%s: %s\n", mmc_hostname(host), + __func__); + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + if (host->bus_ops->remove) + host->bus_ops->remove(host); + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); + } + mmc_bus_put(host); + wake_unlock(&mmc_delayed_work_wake_lock); + printk(KERN_INFO "%s: %s exit\n", mmc_hostname(host), + __func__); +} + void mmc_init_erase(struct mmc_card *card) { unsigned int sz; @@ -1745,8 +1801,9 @@ EXPORT_SYMBOL(mmc_card_can_sleep); /** * mmc_suspend_host - suspend a host * @host: mmc host + * @state: suspend mode (PM_SUSPEND_xxx) */ -int mmc_suspend_host(struct mmc_host *host) +int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { int err = 0; @@ -1824,11 +1881,23 @@ int mmc_resume_host(struct mmc_host *host) printk(KERN_WARNING "%s: error %d during resume " "(card was removed?)\n", mmc_hostname(host), err); + if (host->bus_ops->remove) + host->bus_ops->remove(host); + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); + /* no need to bother upper layers */ err = 0; } } mmc_bus_put(host); + /* + * We add a slight delay here so that resume can progress + * in parallel. + */ + mmc_detect_change(host, 1); + return err; } EXPORT_SYMBOL(mmc_resume_host); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index ca1fdde29df6c..e4266edfc5302 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -54,6 +54,7 @@ static inline void mmc_delay(unsigned int ms) } void mmc_rescan(struct work_struct *work); +void mmc_remove_sd_card(struct work_struct *work); void mmc_start_host(struct mmc_host *host); void mmc_stop_host(struct mmc_host *host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 1fce4856c5443..2401d9a807100 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) spin_unlock_irqrestore(&host->clk_lock, flags); return; } - mutex_lock(&host->clk_gate_mutex); + mmc_claim_host(host); spin_lock_irqsave(&host->clk_lock, flags); if (!host->clk_requests) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); } spin_unlock_irqrestore(&host->clk_lock, flags); - mutex_unlock(&host->clk_gate_mutex); + mmc_release_host(host); } /* @@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) { unsigned long flags; - mutex_lock(&host->clk_gate_mutex); + mmc_claim_host(host); spin_lock_irqsave(&host->clk_lock, flags); if (host->clk_gated) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) } host->clk_requests++; spin_unlock_irqrestore(&host->clk_lock, flags); - mutex_unlock(&host->clk_gate_mutex); + mmc_release_host(host); } /** @@ -218,7 +218,6 @@ static inline void mmc_host_clk_init(struct mmc_host *host) host->clk_gated = false; INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); spin_lock_init(&host->clk_lock); - mutex_init(&host->clk_gate_mutex); } /** @@ -288,6 +287,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); + INIT_DELAYED_WORK(&host->remove, mmc_remove_sd_card); INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); #ifdef CONFIG_PM host->pm_notify.notifier_call = mmc_pm_notify; diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 83ceeec404f2e..d822ae0232e10 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -253,13 +253,8 @@ static int mmc_read_ext_csd(struct mmc_card *card) ext_csd[EXT_CSD_SEC_CNT + 3] << 24; /* Cards with density > 2GiB are sector addressed */ - if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) { - unsigned boot_sectors; - /* size is in 256K chunks, i.e. 512 sectors each */ - boot_sectors = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512; - card->ext_csd.sectors -= boot_sectors; + if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); - } } switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { @@ -726,6 +721,25 @@ static int mmc_awake(struct mmc_host *host) return err; } +#ifdef CONFIG_MMC_UNSAFE_RESUME + +static const struct mmc_bus_ops mmc_ops = { + .awake = mmc_awake, + .sleep = mmc_sleep, + .remove = mmc_remove, + .detect = mmc_detect, + .suspend = mmc_suspend, + .resume = mmc_resume, + .power_restore = mmc_power_restore, +}; + +static void mmc_attach_bus_ops(struct mmc_host *host) +{ + mmc_attach_bus(host, &mmc_ops); +} + +#else + static const struct mmc_bus_ops mmc_ops = { .awake = mmc_awake, .sleep = mmc_sleep, @@ -757,6 +771,8 @@ static void mmc_attach_bus_ops(struct mmc_host *host) mmc_attach_bus(host, bus_ops); } +#endif + /* * Starting point for MMC card init. */ diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 60842f878dedb..1a6ac280a1cf2 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -387,6 +387,15 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) return err; } +/** + * mmc_switch - modify EXT_CSD register + * @card: the MMC card associated with the data transfer + * @set: cmd set values + * @index: EXT_CSD register index + * @value: value to program into EXT_CSD register + * + * Modifies the EXT_CSD register for selected card. + */ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) { int err; @@ -434,6 +443,8 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) return 0; } +EXPORT_SYMBOL(mmc_switch); + int mmc_send_status(struct mmc_card *card, u32 *status) { int err; diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index e6d44b8a18db5..9276946fa5b71 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -20,7 +20,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid); int mmc_set_relative_addr(struct mmc_card *card); int mmc_send_csd(struct mmc_card *card, u32 *csd); int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value); int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_cid(struct mmc_host *host, u32 *cid); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c new file mode 100644 index 0000000000000..981c11343f457 --- /dev/null +++ b/drivers/mmc/core/quirks.c @@ -0,0 +1,48 @@ +/* + * This file contains work-arounds for many known sdio hardware + * bugs. + * + * Copyright (c) 2011 Pierre Tardy + * Inspired from pci fixup code: + * Copyright (c) 1999 Martin Mares + * + */ + +#include +#include +#include + +static const struct mmc_fixup mmc_fixup_methods[] = { + END_FIXUP +}; + +void mmc_fixup_device(struct mmc_card *card, + const struct mmc_fixup *table) +{ + const struct mmc_fixup *f; + u64 rev = cid_rev_card(card); + + /* Non-core specific workarounds. */ + if (!table) + table = mmc_fixup_methods; + + for (f = table; f->vendor_fixup; f++) { + if ((f->manfid == CID_MANFID_ANY + || f->manfid == card->cid.manfid) && + (f->oemid == CID_OEMID_ANY + || f->oemid == card->cid.oemid) && + (f->name == CID_NAME_ANY + || !strcmp(f->name, card->cid.prod_name)) && + (f->cis_vendor == card->cis.vendor + || f->cis_vendor == (u16) SDIO_ANY_ID) && + (f->cis_device == card->cis.device + || f->cis_device == (u16) SDIO_ANY_ID) && + rev >= f->rev_start && + rev <= f->rev_end) { + dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); + f->vendor_fixup(card, f->data); + } + } +} +EXPORT_SYMBOL(mmc_fixup_device); + diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 51a9f30c12093..1d19ae12bfbee 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -793,6 +793,23 @@ static int mmc_sd_power_restore(struct mmc_host *host) return ret; } +#ifdef CONFIG_MMC_UNSAFE_RESUME + +static const struct mmc_bus_ops mmc_sd_ops = { + .remove = mmc_sd_remove, + .detect = mmc_sd_detect, + .suspend = mmc_sd_suspend, + .resume = mmc_sd_resume, + .power_restore = mmc_sd_power_restore, +}; + +static void mmc_sd_attach_bus_ops(struct mmc_host *host) +{ + mmc_attach_bus(host, &mmc_sd_ops); +} + +#else + static const struct mmc_bus_ops mmc_sd_ops = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, @@ -820,6 +837,7 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) mmc_attach_bus(host, bus_ops); } +#endif /* * Starting point for SD card init. */ diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 797cdb5887fd0..7ca5774c28ab5 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -79,6 +79,8 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, * we cannot use the retries field in mmc_command. */ for (i = 0;i <= retries;i++) { + memset(&mrq, 0, sizeof(struct mmc_request)); + err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d7892386c9405..882c6075b0d5c 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -154,8 +154,9 @@ static int sdio_enable_wide(struct mmc_card *card) { int ret; u8 ctrl; + unsigned int width = MMC_BUS_WIDTH_4; - if (!(card->host->caps & MMC_CAP_4_BIT_DATA)) + if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) return 0; if (card->cccr.low_speed && !card->cccr.wide_bus) @@ -165,13 +166,23 @@ static int sdio_enable_wide(struct mmc_card *card) if (ret) return ret; + if (card->host->caps & MMC_CAP_8_BIT_DATA) { + width = MMC_BUS_WIDTH_8; + ctrl |= SDIO_BUS_WIDTH_8BIT; + } else { + width = MMC_BUS_WIDTH_4; + ctrl |= SDIO_BUS_WIDTH_4BIT; + } + ctrl |= SDIO_BUS_WIDTH_4BIT; ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); if (ret) return ret; - return 1; + mmc_set_bus_width(card->host, width); + + return 0; } /* @@ -399,6 +410,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, if (err) goto remove; + /* + * Update oldcard with the new RCA received from the SDIO + * device -- we're doing this so that it's updated in the + * "card" struct when oldcard overwrites that later. + */ + if (oldcard) + oldcard->rca = card->rca; + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } @@ -478,6 +497,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, card = oldcard; } + mmc_fixup_device(card, NULL); if (card->type == MMC_TYPE_SD_COMBO) { err = mmc_sd_setup_card(host, card, oldcard != NULL); @@ -516,7 +536,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, /* * Switch to wider bus (if supported). */ - err = sdio_enable_4bit_bus(card); + err = sdio_enable_wide(card); if (err > 0) mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); else if (err) @@ -789,14 +809,20 @@ int mmc_attach_sdio(struct mmc_host *host) * The number of functions on the card is encoded inside * the ocr. */ - funcs = (ocr & 0x70000000) >> 28; - card->sdio_funcs = 0; + card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28; #ifdef CONFIG_MMC_EMBEDDED_SDIO if (host->embedded_sdio_data.funcs) card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs; #endif + /* + * If needed, disconnect card detection pull-up resistor. + */ + err = sdio_disable_cd(card); + if (err) + goto remove; + /* * Initialize (but don't add) all present functions. */ @@ -928,10 +954,8 @@ int sdio_reset_comm(struct mmc_card *card) */ mmc_set_clock(host, mmc_sdio_get_max_clock(card)); - err = sdio_enable_4bit_bus(card); - if (err > 0) - mmc_set_bus_width(host, MMC_BUS_WIDTH_4); - else if (err) + err = sdio_enable_wide(card); + if (err) goto err; mmc_release_host(host); diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 549a341446462..d3d0e2e3fd5a5 100755 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -692,11 +692,13 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr, BUG_ON(!func); +#if 0 if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) { if (err_ret) *err_ret = -EINVAL; return; } +#endif ret = mmc_io_rw_direct(func->card, 1, 0, addr, b, NULL); if (err_ret) diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 153ab977a0136..50a711efddffe 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -40,36 +40,135 @@ #include #include #include +#include #include #include #include #include +#include #include "msm_sdcc.h" #define DRIVER_NAME "msm-sdcc" +#define DBG(host, fmt, args...) \ + pr_debug("%s: %s: " fmt "\n", mmc_hostname(host->mmc), __func__ , args) + +#define IRQ_DEBUG 0 + +#define DISABLE_SVLTE_BUSCLK_PWRSAVE 1 + +#if defined(CONFIG_DEBUG_FS) +static void msmsdcc_dbg_createhost(struct msmsdcc_host *); +static struct dentry *debugfs_dir; +static struct dentry *debugfs_file; +static int msmsdcc_dbg_init(void); +#endif + #define BUSCLK_PWRSAVE 1 #define BUSCLK_TIMEOUT (HZ) +#define SQN_BUSCLK_TIMEOUT (5 * HZ) + static unsigned int msmsdcc_fmin = 144000; static unsigned int msmsdcc_fmax = 50000000; static unsigned int msmsdcc_4bit = 1; static unsigned int msmsdcc_pwrsave = 1; static unsigned int msmsdcc_piopoll = 1; static unsigned int msmsdcc_sdioirq; +static unsigned long msmsdcc_irqtime; #define PIO_SPINMAX 30 #define CMD_SPINMAX 20 +#define WRITE_WAIT_DAT0_MAX 10 + +#define VERBOSE_COMMAND_TIMEOUTS 1 +#define SDC_CLK_VERBOSE 1 + +#ifdef CONFIG_WIMAX +extern int mmc_wimax_get_status(void); +extern int mmc_wimax_get_busclk_pwrsave(void); +extern void mmc_wimax_enable_host_wakeup(int on); +#else +static int mmc_wimax_get_status(void) { return 0; } +static int mmc_wimax_get_busclk_pwrsave(void) { return 0; } +#endif + +#if IRQ_DEBUG == 1 +static char *irq_status_bits[] = { "cmdcrcfail", "datcrcfail", "cmdtimeout", + "dattimeout", "txunderrun", "rxoverrun", + "cmdrespend", "cmdsent", "dataend", NULL, + "datablkend", "cmdactive", "txactive", + "rxactive", "txhalfempty", "rxhalffull", + "txfifofull", "rxfifofull", "txfifoempty", + "rxfifoempty", "txdataavlbl", "rxdataavlbl", + "sdiointr", "progdone", "atacmdcompl", + "sdiointrope", "ccstimeout", NULL, NULL, + NULL, NULL, NULL }; + +static void +msmsdcc_print_status(struct msmsdcc_host *host, char *hdr, uint32_t status) +{ + int i; + + printk(KERN_DEBUG "%s-%s ", mmc_hostname(host->mmc), hdr); + for (i = 0; i < 32; i++) { + if (status & (1 << i)) + printk("%s ", irq_status_bits[i]); + } + printk("\n"); +} +#endif + +static int is_sd_platform(struct msm_mmc_platform_data *plat) +{ + if (plat->slot_type && *plat->slot_type == MMC_TYPE_SD) + return 1; + + return 0; +} + +#if BUSCLK_PWRSAVE +static int is_wimax_platform(struct msm_mmc_platform_data *plat) +{ + if (plat->slot_type && *plat->slot_type == MMC_TYPE_SDIO_WIMAX) + return 1; + + return 0; +} static inline void msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr) { + u32 delay = BUSCLK_TIMEOUT; + + if (is_wimax_platform(host->plat) && mmc_wimax_get_status()) { + if (host->curr.mrq) { + printk("%s [WiMAX] %s curr.mrq != NULL", __func__, mmc_hostname(host->mmc)); + return; + } + + if (!host->clks_on) { + printk("%s [WiMAX] %s clks_on is OFF", __func__, mmc_hostname(host->mmc)); + return; + } + } + WARN_ON(!host->clks_on); + if (host->curr.mrq) + printk("%s call %s()", mmc_hostname(host->mmc), __func__); + BUG_ON(host->curr.mrq); + if (is_wimax_platform(host->plat) && mmc_wimax_get_status()) { + if (!mmc_wimax_get_busclk_pwrsave()) + return; + else + delay = SQN_BUSCLK_TIMEOUT; + } + if (deferr) { mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT); } else { @@ -78,12 +177,52 @@ msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr) * timer fired */ if (host->clks_on) { +#if SDC_CLK_VERBOSE + if (is_wimax_platform(host->plat)) { +#ifdef CONFIG_WIMAX + mmc_wimax_enable_host_wakeup(1); +#endif + pr_info("%s: Disable clocks\n", mmc_hostname(host->mmc)); + } +#endif clk_disable(host->clk); clk_disable(host->pclk); host->clks_on = 0; } } } +EXPORT_SYMBOL(msmsdcc_disable_clocks); + +int +msmsdcc_get_sdc_clocks(struct msmsdcc_host *host) +{ + if (host->clks_on) + return 1; + else + return 0; +} +EXPORT_SYMBOL(msmsdcc_get_sdc_clocks); + +static void +msmsdcc_busclk_expired(unsigned long _data) +{ + struct msmsdcc_host *host = (struct msmsdcc_host *) _data; + unsigned long flags; + + /* dev_info(mmc_dev(host->mmc), "Bus clock timer expired - S\n"); */ +#if SDC_CLK_VERBOSE + if (is_wimax_platform(host->plat)) { + pr_info("%s: Bus clock timer expired\n", mmc_hostname(host->mmc)); + } +#endif + + spin_lock_irqsave(&host->lock, flags); + if (host->clks_on) + msmsdcc_disable_clocks(host, 0); + spin_unlock_irqrestore(&host->lock, flags); +} + +#endif static inline int msmsdcc_enable_clocks(struct msmsdcc_host *host) @@ -93,6 +232,15 @@ msmsdcc_enable_clocks(struct msmsdcc_host *host) del_timer_sync(&host->busclk_timer); if (!host->clks_on) { +#if SDC_CLK_VERBOSE + if (is_wimax_platform(host->plat)) { + pr_info("%s: Enable clocks\n", mmc_hostname(host->mmc)); + +#ifdef CONFIG_WIMAX + mmc_wimax_enable_host_wakeup(0); +#endif + } +#endif rc = clk_enable(host->pclk); if (rc) return rc; @@ -107,6 +255,19 @@ msmsdcc_enable_clocks(struct msmsdcc_host *host) } return 0; } +EXPORT_SYMBOL(msmsdcc_enable_clocks); + +static char *mmc_type_str(unsigned int slot_type) +{ + switch (slot_type) { + case MMC_TYPE_MMC: return "MMC"; + case MMC_TYPE_SD: return "SD"; + case MMC_TYPE_SDIO: return "SDIO"; + case MMC_TYPE_SD_COMBO: return "SDIO(combo)"; + case MMC_TYPE_SDIO_WIMAX: return "SDIO(WiMAX)"; + default: return "Unknown type"; + } +} static inline unsigned int msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg) @@ -131,7 +292,7 @@ static void msmsdcc_reset_and_restore(struct msmsdcc_host *host) { u32 mci_clk = 0; u32 mci_mask0 = 0; - int ret = 0; + int ret; /* Save the controller state */ mci_clk = readl(host->base + MMCICLOCK); @@ -161,7 +322,7 @@ static void msmsdcc_reset_and_restore(struct msmsdcc_host *host) mmc_hostname(host->mmc), host->clk_rate, ret); } -static void +void msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) { BUG_ON(host->curr.data); @@ -185,17 +346,30 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); } +EXPORT_SYMBOL(msmsdcc_request_end); -static void +void msmsdcc_stop_data(struct msmsdcc_host *host) { host->curr.data = NULL; host->curr.got_dataend = 0; } +EXPORT_SYMBOL(msmsdcc_stop_data); uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) { - return host->memres->start + MMCIFIFO; + switch (host->pdev_id) { + case 1: + return MSM_SDC1_PHYS + MMCIFIFO; + case 2: + return MSM_SDC2_PHYS + MMCIFIFO; + case 3: + return MSM_SDC3_PHYS + MMCIFIFO; + case 4: + return MSM_SDC4_PHYS + MMCIFIFO; + } + BUG(); + return 0; } static inline void @@ -391,6 +565,7 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); host->dma.hdr.complete_func = msmsdcc_dma_complete_func; + host->dma.hdr.execute_func = NULL; n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); @@ -542,9 +717,11 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data, msmsdcc_start_command_deferred(host, cmd, &c); host->cmd_c = c; } - msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr); + dsb(); + msm_dmov_enqueue_cmd_ext(host->dma.channel, &host->dma.hdr); if (data->flags & MMC_DATA_WRITE) host->prog_scan = true; + } else { msmsdcc_writel(host, timeout, MMCIDATATIMER); @@ -563,9 +740,27 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data, static void msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c) { + struct mmc_data *mdata = cmd->mrq->data; + if (cmd == cmd->mrq->stop) c |= MCI_CSPM_MCIABORT; + if (is_sd_platform(host->plat) && (cmd->opcode == 12) && + (mdata->flags & MMC_DATA_WRITE) && (mdata->blocks > 64)) { + int i; + unsigned dat0 = 67; + + if (host->plat->dat0_gpio) + dat0 = host->plat->dat0_gpio; + + for (i = 0; i < WRITE_WAIT_DAT0_MAX; i++) { + if (gpio_get_value(dat0)) + break; + else + udelay(300); + } + } + host->stats.cmds++; msmsdcc_start_command_deferred(host, cmd, &c); @@ -605,10 +800,16 @@ msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain) { uint32_t *ptr = (uint32_t *) buffer; int count = 0; - - if (remain % 4) - remain = ((remain >> 2) + 1) << 2; - +#ifdef CONFIG_WIMAX + unsigned int val = 0; //For 2 bytes data access and consider normal 4 bytes SDIO alignment + + /* For 2 bytes data access and consider normal 4 bytes SDIO alignment */ + if ( (remain < 4) ) { + val = msmsdcc_readl(host, MMCIFIFO); + memcpy(ptr, &val, remain); + count += remain; + }else +#endif while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) { *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE)); ptr++; @@ -628,6 +829,22 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer, void __iomem *base = host->base; char *ptr = buffer; +#ifdef CONFIG_WIMAX + unsigned int val = 0; //For 2 bytes data access and consider normal 4 bytes SDIO alignment + + /* For 2 bytes data access and consider normal 4 bytes SDIO alignment */ + if ( (remain < 4) ) { + memcpy(&val, ptr, remain); + writel(val, base + MMCIFIFO); + // check the data end + do { + status = readl(base + MMCISTATUS); + }while (!(status & MCI_DATABLOCKEND)); + + return remain; + } else { +#endif + do { unsigned int count, maxcnt, sz; @@ -647,6 +864,9 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer, } while (status & MCI_TXFIFOHALFEMPTY); return ptr - buffer; +#ifdef CONFIG_WIMAX + } +#endif } static int @@ -667,7 +887,46 @@ msmsdcc_pio_irq(int irq, void *dev_id) struct msmsdcc_host *host = dev_id; uint32_t status; + spin_lock(&host->lock); status = msmsdcc_readl(host, MMCISTATUS); +#if IRQ_DEBUG + msmsdcc_print_status(host, "irq1-r", status); +#endif + + /* Workaround when we found sg is NULL (SST) */ + if (host->pio.sg == NULL) { + if (host->pio.sg == NULL) { + printk(KERN_INFO "%s: pio scatter list is null - ", mmc_hostname(host->mmc)); + } + + if (status & MCI_RXACTIVE) { + int read_cnt = 0; + while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) { + msmsdcc_readl(host, MMCIFIFO + (read_cnt % MCI_FIFOSIZE)); + read_cnt += sizeof(uint32_t); + if ((read_cnt) > MCI_FIFOSIZE) + break; + } + msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1); + printk("RX\n"); + } + if (status & MCI_TXACTIVE) { + struct mmc_request *mrq; + + msmsdcc_writel(host, 0, MMCIMASK1); + mrq = host->curr.mrq; + if (mrq) { + mrq->data->error = -EIO; + if (mrq->done) + mrq->done(mrq); + host->curr.mrq = NULL; + } + printk("TX\n"); + } + + spin_unlock(&host->lock); + return IRQ_HANDLED; + } do { unsigned long flags; @@ -731,6 +990,7 @@ msmsdcc_pio_irq(int irq, void *dev_id) if (!host->curr.xfer_remain) msmsdcc_writel(host, 0, MMCIMASK1); + spin_unlock(&host->lock); return IRQ_HANDLED; } @@ -833,8 +1093,11 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status, * Check to see if there is still data to be read, * and simulate a PIO irq. */ - if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) + if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) { + spin_unlock(&host->lock); msmsdcc_pio_irq(1, host); + spin_lock(&host->lock); + } msmsdcc_stop_data(host); if (!data->error) @@ -860,11 +1123,20 @@ msmsdcc_irq(int irq, void *dev_id) do { status = msmsdcc_readl(host, MMCISTATUS); - status &= msmsdcc_readl(host, MMCIMASK0); + +#if IRQ_DEBUG + msmsdcc_print_status(host, "irq0-r", status); +#endif + status &= (msmsdcc_readl(host, MMCIMASK0) | + MCI_DATABLOCKENDMASK); + msmsdcc_writel(host, status, MMCICLEAR); if (status & MCI_SDIOINTR) status &= ~MCI_SDIOINTR; +#if IRQ_DEBUG + msmsdcc_print_status(host, "irq0-p", status); +#endif if (!status) break; @@ -903,7 +1175,7 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) host->stats.reqs++; - if (host->eject) { + if (host->eject || (mmc->card && mmc->card->removed)) { if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) { mrq->cmd->error = 0; mrq->data->bytes_xfered = mrq->data->blksz * @@ -979,11 +1251,13 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: + htc_pwrsink_set(PWRSINK_SDCARD, 0); break; case MMC_POWER_UP: pwr |= MCI_PWR_UP; break; case MMC_POWER_ON: + htc_pwrsink_set(PWRSINK_SDCARD, 100); pwr |= MCI_PWR_ON; break; } @@ -1033,6 +1307,8 @@ msmsdcc_check_status(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; unsigned int status; + unsigned long duration; + int sdcard = is_sd_platform(host->plat); if (!host->plat->status) { mmc_detect_change(host->mmc, 0); @@ -1044,19 +1320,33 @@ msmsdcc_check_status(unsigned long data) if (status ^ host->oldstat) { pr_info("%s: Slot status change detected (%d -> %d)\n", mmc_hostname(host->mmc), host->oldstat, status); - if (status) - mmc_detect_change(host->mmc, (5 * HZ) / 2); - else - mmc_detect_change(host->mmc, 0); + duration = jiffies - msmsdcc_irqtime; + + if (status) { + if (sdcard) { + if (duration < (7 * HZ)) + duration = (7 * HZ) - duration; + else + duration = 10; + } else + duration = (5 * HZ) / 2; + } else + duration = 0; + + mmc_detect_change(host->mmc, duration); + + if (sdcard) + msmsdcc_irqtime = jiffies; } host->oldstat = status; out: if (host->timer.function) + { mod_timer(&host->timer, jiffies + HZ); + } } - static irqreturn_t msmsdcc_platform_status_irq(int irq, void *dev_id) { @@ -1077,15 +1367,6 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id) msmsdcc_check_status((unsigned long) host); } -static void -msmsdcc_busclk_expired(unsigned long _data) -{ - struct msmsdcc_host *host = (struct msmsdcc_host *) _data; - - if (host->clks_on) - msmsdcc_disable_clocks(host, 0); -} - static int msmsdcc_init_dma(struct msmsdcc_host *host) { @@ -1113,6 +1394,58 @@ msmsdcc_init_dma(struct msmsdcc_host *host) return 0; } +#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ +static void +do_resume_work(struct work_struct *work) +{ + struct msmsdcc_host *host = + container_of(work, struct msmsdcc_host, resume_task); + struct mmc_host *mmc = host->mmc; + + if (mmc) { + mmc_resume_host(mmc); + if (host->stat_irq) + enable_irq(host->stat_irq); + } +} + +#endif + + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void msmsdcc_early_suspend(struct early_suspend *h) +{ + struct msmsdcc_host *host = + container_of(h, struct msmsdcc_host, early_suspend); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->polling_enabled = host->mmc->caps & MMC_CAP_NEEDS_POLL; + host->mmc->caps &= ~MMC_CAP_NEEDS_POLL; + spin_unlock_irqrestore(&host->lock, flags); + + if (is_wimax_platform(host->plat)) { + if (host->clks_on) { + msmsdcc_disable_clocks(host, 0); + } + } +}; +static void msmsdcc_late_resume(struct early_suspend *h) +{ + struct msmsdcc_host *host = + container_of(h, struct msmsdcc_host, early_suspend); + unsigned long flags; + + if (host->polling_enabled) { + spin_lock_irqsave(&host->lock, flags); + host->mmc->caps |= MMC_CAP_NEEDS_POLL; + mmc_detect_change(host->mmc, 0); + spin_unlock_irqrestore(&host->lock, flags); + } + +}; +#endif + static int msmsdcc_probe(struct platform_device *pdev) { @@ -1185,9 +1518,16 @@ msmsdcc_probe(struct platform_device *pdev) host->dmares = dmares; spin_lock_init(&host->lock); +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (plat->embedded_sdio) + mmc_set_embedded_sdio_data(mmc, + &plat->embedded_sdio->cis, + &plat->embedded_sdio->cccr, + plat->embedded_sdio->funcs, + plat->embedded_sdio->num_funcs); +#endif tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet, (unsigned long)host); - /* * Setup DMA */ @@ -1247,6 +1587,10 @@ msmsdcc_probe(struct platform_device *pdev) msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0); host->saved_irq0mask = MCI_IRQENABLE; + mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY; + if (plat->built_in) + mmc->pm_flags = MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY; + /* * Setup card detect change */ @@ -1285,11 +1629,11 @@ msmsdcc_probe(struct platform_device *pdev) host->oldstat = host->plat->status(mmc_dev(host->mmc)); host->eject = !host->oldstat; } - +#if BUSCLK_PWRSAVE init_timer(&host->busclk_timer); host->busclk_timer.data = (unsigned long) host; host->busclk_timer.function = msmsdcc_busclk_expired; - +#endif ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) @@ -1303,12 +1647,23 @@ msmsdcc_probe(struct platform_device *pdev) mmc_set_drvdata(pdev, mmc); mmc_add_host(mmc); +#ifdef CONFIG_HAS_EARLYSUSPEND + host->early_suspend.suspend = msmsdcc_early_suspend; + host->early_suspend.resume = msmsdcc_late_resume; + host->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + register_early_suspend(&host->early_suspend); +#endif + pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n", mmc_hostname(mmc), (unsigned long long)memres->start, (unsigned int) cmd_irqres->start, (unsigned int) host->stat_irq, host->dma.channel); + pr_info("%s: Platform slot type: %s\n", mmc_hostname(mmc), + (plat->slot_type) ? mmc_type_str(*plat->slot_type) : "N/A"); pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled")); + pr_info("%s: 8 bit data mode %s\n", mmc_hostname(mmc), + (mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled")); pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n", mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate); pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject); @@ -1326,6 +1681,13 @@ msmsdcc_probe(struct platform_device *pdev) if (host->timer.function) pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); +#if defined(CONFIG_DEBUG_FS) + msmsdcc_dbg_createhost(host); +#endif + +#if BUSCLK_PWRSAVE + msmsdcc_disable_clocks(host, 1); +#endif return 0; cmd_irq_free: free_irq(cmd_irqres->start, host); @@ -1333,7 +1695,8 @@ msmsdcc_probe(struct platform_device *pdev) if (host->stat_irq) free_irq(host->stat_irq, host); clk_disable: - msmsdcc_disable_clocks(host, 0); + clk_disable(host->clk); + clk_disable(host->pclk); clk_put: clk_put(host->clk); pclk_put: @@ -1368,19 +1731,43 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) struct mmc_host *mmc = mmc_get_drvdata(dev); int rc = 0; +#if SDC_CLK_VERBOSE + printk("%s enter\n", __func__); +#endif + if (mmc) { struct msmsdcc_host *host = mmc_priv(mmc); if (host->stat_irq) disable_irq(host->stat_irq); + if (host->plat->built_in) + mmc->pm_flags |= MMC_PM_KEEP_POWER; + if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc); + rc = mmc_suspend_host(mmc, state); if (!rc) msmsdcc_writel(host, 0, MMCIMASK0); - if (host->clks_on) - msmsdcc_disable_clocks(host, 0); +#if BUSCLK_PWRSAVE + del_timer_sync(&host->busclk_timer); +#endif + if (host->clks_on) { +#if SDC_CLK_VERBOSE + if (is_wimax_platform(host->plat)) { + pr_info("%s: Disable clocks in %s\n", mmc_hostname(host->mmc), __func__); + } +#endif + // For suspend case + clk_disable(host->clk); + clk_disable(host->pclk); + host->clks_on = 0; + } } + +#if SDC_CLK_VERBOSE + printk("%s leave\n", __func__); +#endif + return rc; } @@ -1389,6 +1776,10 @@ msmsdcc_resume(struct platform_device *dev) { struct mmc_host *mmc = mmc_get_drvdata(dev); +#if SDC_CLK_VERBOSE + printk("%s enter\n", __func__); +#endif + if (mmc) { struct msmsdcc_host *host = mmc_priv(mmc); @@ -1396,14 +1787,23 @@ msmsdcc_resume(struct platform_device *dev) msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) + if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) { +#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ + schedule_work(&host->resume_task); +#else mmc_resume_host(mmc); +#endif + } + if (host->stat_irq) enable_irq(host->stat_irq); #if BUSCLK_PWRSAVE + if (host->clks_on) msmsdcc_disable_clocks(host, 1); #endif } + + return 0; } #else @@ -1428,10 +1828,128 @@ static int __init msmsdcc_init(void) static void __exit msmsdcc_exit(void) { platform_driver_unregister(&msmsdcc_driver); +#if defined(CONFIG_DEBUG_FS) + debugfs_remove(debugfs_file); + debugfs_remove(debugfs_dir); +#endif } +#ifndef MODULE +static int __init msmsdcc_pwrsave_setup(char *__unused) +{ + msmsdcc_pwrsave = 1; + return 1; +} + +static int __init msmsdcc_nopwrsave_setup(char *__unused) +{ + msmsdcc_pwrsave = 0; + return 1; +} + + +static int __init msmsdcc_fmin_setup(char *str) +{ + unsigned int n; + + if (!get_option(&str, &n)) + return 0; + msmsdcc_fmin = n; + return 1; +} + +static int __init msmsdcc_fmax_setup(char *str) +{ + unsigned int n; + + if (!get_option(&str, &n)) + return 0; + msmsdcc_fmax = n; + return 1; +} +#endif + +__setup("msmsdcc_pwrsave", msmsdcc_pwrsave_setup); +__setup("msmsdcc_nopwrsave", msmsdcc_nopwrsave_setup); +__setup("msmsdcc_fmin=", msmsdcc_fmin_setup); +__setup("msmsdcc_fmax=", msmsdcc_fmax_setup); + module_init(msmsdcc_init); module_exit(msmsdcc_exit); MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver"); MODULE_LICENSE("GPL"); + +#if defined(CONFIG_DEBUG_FS) + +static int +msmsdcc_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +msmsdcc_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct msmsdcc_host *host = (struct msmsdcc_host *) file->private_data; + char buf[1024]; + int max, i; + + i = 0; + max = sizeof(buf) - 1; + + i += scnprintf(buf + i, max - i, "STAT: %p %p %p\n", host->curr.mrq, + host->curr.cmd, host->curr.data); + if (host->curr.cmd) { + struct mmc_command *cmd = host->curr.cmd; + + i += scnprintf(buf + i, max - i, "CMD : %.8x %.8x %.8x\n", + cmd->opcode, cmd->arg, cmd->flags); + } + if (host->curr.data) { + struct mmc_data *data = host->curr.data; + i += scnprintf(buf + i, max - i, + "DAT0: %.8x %.8x %.8x %.8x %.8x %.8x\n", + data->timeout_ns, data->timeout_clks, + data->blksz, data->blocks, data->error, + data->flags); + i += scnprintf(buf + i, max - i, "DAT1: %.8x %.8x %.8x %p\n", + host->curr.xfer_size, host->curr.xfer_remain, + host->curr.data_xfered, host->dma.sg); + } + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static const struct file_operations msmsdcc_dbg_state_ops = { + .read = msmsdcc_dbg_state_read, + .open = msmsdcc_dbg_state_open, +}; + +static void msmsdcc_dbg_createhost(struct msmsdcc_host *host) +{ + if (debugfs_dir) { + debugfs_file = debugfs_create_file(mmc_hostname(host->mmc), + 0644, debugfs_dir, host, + &msmsdcc_dbg_state_ops); + } +} + +static int __init msmsdcc_dbg_init(void) +{ + int err; + + debugfs_dir = debugfs_create_dir("msmsdcc", 0); + if (IS_ERR(debugfs_dir)) { + err = PTR_ERR(debugfs_dir); + debugfs_dir = NULL; + return err; + } + + return 0; +} +device_initcall(msmsdcc_dbg_init); +#endif + diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h index 939557af266d6..de4d95c84c14d 100644 --- a/drivers/mmc/host/msm_sdcc.h +++ b/drivers/mmc/host/msm_sdcc.h @@ -235,8 +235,14 @@ struct msmsdcc_host { struct msmsdcc_pio_data pio; int cmdpoll; struct msmsdcc_stats stats; - +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; + int polling_enabled; +#endif struct tasklet_struct dma_tlet; +#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ + struct work_struct resume_task; +#endif /* Command parameters */ unsigned int cmd_timeout; unsigned int cmd_pio_irqmask; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0dc905b20eee1..b1b2fc04dd72c 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -546,6 +546,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, }, + { + .vendor = PCI_VENDOR_ID_RICOH, + .device = 0xe823, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_ricoh_mmc, + }, + { .vendor = PCI_VENDOR_ID_ENE, .device = PCI_DEVICE_ID_ENE_CB712_SD, @@ -953,6 +961,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host->ioaddr = pci_ioremap_bar(pdev, bar); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); + ret = -ENOMEM; goto release; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 3820adf155d2e..6e8971cbe35d1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param) host = (struct sdhci_host*)param; + /* + * If this tasklet gets rescheduled while running, it will + * be run again afterwards but without any active request. + */ + if (!host->mrq) + return; + spin_lock_irqsave(&host->lock, flags); del_timer(&host->timer); @@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param) * upon error conditions. */ if (!(host->flags & SDHCI_DEVICE_DEAD) && - (mrq->cmd->error || + ((mrq->cmd && mrq->cmd->error) || (mrq->data && (mrq->data->error || (mrq->data->stop && mrq->data->stop->error))) || (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 35081ce77fbdd..03fe3e3d3294b 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -49,6 +49,14 @@ config MTD_MS02NV say M here and read . The module will be called ms02-nv. +config MTD_MSM_NAND + tristate "MSM NAND Device Support" + depends on MTD && ARCH_MSM + select MTD_NAND_IDS + default y + help + Support for some NAND chips connected to the MSM NAND controller. + config MTD_DATAFLASH tristate "Support for AT45xxx DataFlash" depends on SPI_MASTER && EXPERIMENTAL diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index f3226b1d38fca..fe959e82250bd 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_SLRAM) += slram.o obj-$(CONFIG_MTD_PHRAM) += phram.o obj-$(CONFIG_MTD_PMC551) += pmc551.o obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o +obj-$(CONFIG_MTD_MSM_NAND) += msm_nand.o obj-$(CONFIG_MTD_MTDRAM) += mtdram.o obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o diff --git a/drivers/mtd/devices/msm_nand.c b/drivers/mtd/devices/msm_nand.c new file mode 100644 index 0000000000000..0e4c0bffc90c5 --- /dev/null +++ b/drivers/mtd/devices/msm_nand.c @@ -0,0 +1,1778 @@ +/* drivers/mtd/devices/msm_nand.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "msm_nand.h" + +#define MSM_NAND_DMA_BUFFER_SIZE SZ_4K +#define MSM_NAND_DMA_BUFFER_SLOTS \ + (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8)) + +#define SUPPORT_WRONG_ECC_CONFIG 1 + +#define MSM_NAND_CFG0_RAW 0xA80420C0 +#define MSM_NAND_CFG1_RAW 0x5045D + +#define VERBOSE 0 + +static struct nand_hw_info *nand_info; +struct nand_hw_info { + uint32_t flash_id; + uint8_t maker_id; + uint8_t maker_name[10]; + uint8_t width; + uint32_t size; + uint32_t block_count; + uint32_t page_count; + uint32_t page_size; +}; + +struct msm_nand_chip { + struct device *dev; + wait_queue_head_t wait_queue; + atomic_t dma_buffer_busy; + unsigned dma_channel; + uint8_t *dma_buffer; + dma_addr_t dma_addr; + unsigned CFG0, CFG1; + unsigned page_shift; + unsigned last_sector; + unsigned last_sectorsz; +#if SUPPORT_WRONG_ECC_CONFIG + uint32_t ecc_buf_cfg; + uint32_t saved_ecc_buf_cfg; +#endif + struct nand_hw_info dev_info; +}; + +#define CFG1_WIDE_FLASH (1U << 1) + +#ifdef CONFIG_ARCH_MSM7X30 +#define BUF_STAT_UNCORRECTABLE (1U << 8) +#define BUF_STAT_NUM_ERRS_MASK (0xf) +#else +#define BUF_STAT_UNCORRECTABLE (1U << 3) +#define BUF_STAT_NUM_ERRS_MASK (0x7) +#endif + + +/* TODO: move datamover code out */ + +#define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD) +#define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD) +#define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA) +#define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA) + +#define msm_virt_to_dma(chip, vaddr) \ + ((void)(*(vaddr)), (chip)->dma_addr + \ + ((uint8_t *)(vaddr) - (chip)->dma_buffer)) + +/** + * msm_nand_oob_64 - oob info for large (2KB) page + */ +static struct nand_ecclayout msm_nand_oob_64 = { + .oobavail = 16, + .oobfree = { + {30, 16}, + } +}; + +/* + * msm_nand_oob_128 - oob info for 4KB page + */ +static struct nand_ecclayout msm_nand_oob_128 = { + .oobavail = 32, + .oobfree = { + {70, 32}, + } +}; + + +static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size) +{ + unsigned int bitmask, free_bitmask, old_bitmask; + unsigned int need_mask, current_need_mask; + int free_index; + + need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1; + bitmask = atomic_read(&chip->dma_buffer_busy); + free_bitmask = ~bitmask; + do { + free_index = __ffs(free_bitmask); + current_need_mask = need_mask << free_index; + if ((bitmask & current_need_mask) == 0) { + old_bitmask = + atomic_cmpxchg(&chip->dma_buffer_busy, + bitmask, + bitmask | current_need_mask); + if (old_bitmask == bitmask) + return chip->dma_buffer + + free_index * MSM_NAND_DMA_BUFFER_SLOTS; + free_bitmask = 0; /* force return */ + } + /* current free range was too small, clear all free bits */ + /* below the top busy bit within current_need_mask */ + free_bitmask &= + ~(~0U >> (32 - fls(bitmask & current_need_mask))); + } while (free_bitmask); + + return NULL; +} + +static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip, + void *buffer, size_t size) +{ + int index; + unsigned int used_mask; + + used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1; + index = ((uint8_t *)buffer - chip->dma_buffer) / + MSM_NAND_DMA_BUFFER_SLOTS; + atomic_sub(used_mask << index, &chip->dma_buffer_busy); + + wake_up(&chip->wait_queue); +} + +uint32_t flash_read_id(struct msm_nand_chip *chip) +{ + struct { + dmov_s cmd[5]; + unsigned cmdptr; + unsigned data[5]; + } *dma_buffer; + uint32_t rv; + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + dma_buffer->data[0] = 0 | 4; + dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID; + dma_buffer->data[2] = 1; + dma_buffer->data[3] = 0xeeeeeeee; + dma_buffer->data[4] = 0xeeeeeeee; + BUILD_BUG_ON(4 != ARRAY_SIZE(dma_buffer->data) - 1); + + dma_buffer->cmd[0].cmd = 0 | CMD_OCB; + dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[0]); + dma_buffer->cmd[0].dst = MSM_NAND_FLASH_CHIP_SELECT; + dma_buffer->cmd[0].len = 4; + + dma_buffer->cmd[1].cmd = DST_CRCI_NAND_CMD; + dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[1]); + dma_buffer->cmd[1].dst = MSM_NAND_FLASH_CMD; + dma_buffer->cmd[1].len = 4; + + dma_buffer->cmd[2].cmd = 0; + dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[2]); + dma_buffer->cmd[2].dst = MSM_NAND_EXEC_CMD; + dma_buffer->cmd[2].len = 4; + + dma_buffer->cmd[3].cmd = SRC_CRCI_NAND_DATA; + dma_buffer->cmd[3].src = MSM_NAND_FLASH_STATUS; + dma_buffer->cmd[3].dst = msm_virt_to_dma(chip, &dma_buffer->data[3]); + dma_buffer->cmd[3].len = 4; + + dma_buffer->cmd[4].cmd = CMD_OCU | CMD_LC; + dma_buffer->cmd[4].src = MSM_NAND_READ_ID; + dma_buffer->cmd[4].dst = msm_virt_to_dma(chip, &dma_buffer->data[4]); + dma_buffer->cmd[4].len = 4; + BUILD_BUG_ON(4 != ARRAY_SIZE(dma_buffer->cmd) - 1); + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + rv = dma_buffer->data[4]; + pr_info("msn_nand: nandid %x status %x\n", rv, dma_buffer->data[3]); + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + return rv; +} + +int flash_read_config(struct msm_nand_chip *chip) +{ + struct { + dmov_s cmd[2]; + unsigned cmdptr; + unsigned cfg0; + unsigned cfg1; + } *dma_buffer; + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + dma_buffer->cfg0 = 0; + dma_buffer->cfg1 = 0; + + dma_buffer->cmd[0].cmd = CMD_OCB; + dma_buffer->cmd[0].src = MSM_NAND_DEV0_CFG0; + dma_buffer->cmd[0].dst = msm_virt_to_dma(chip, &dma_buffer->cfg0); + dma_buffer->cmd[0].len = 4; + + dma_buffer->cmd[1].cmd = CMD_OCU | CMD_LC; + dma_buffer->cmd[1].src = MSM_NAND_DEV0_CFG1; + dma_buffer->cmd[1].dst = msm_virt_to_dma(chip, &dma_buffer->cfg1); + dma_buffer->cmd[1].len = 4; + BUILD_BUG_ON(1 != ARRAY_SIZE(dma_buffer->cmd) - 1); + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + chip->CFG0 = dma_buffer->cfg0; + chip->CFG1 = dma_buffer->cfg1; + pr_info("msm_nand: read CFG0 = %x CFG1 = %x\n", chip->CFG0, chip->CFG1); + pr_info("msm_nand: CFG0 cw/page=%d ud_sz=%d ecc_sz=%d spare_sz=%d\n", + (chip->CFG0 >> 6) & 7, (chip->CFG0 >> 9) & 0x3ff, + (chip->CFG0 >> 19) & 15, (chip->CFG0 >> 23) & 15); + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + + if ((chip->CFG0 == 0) || (chip->CFG1 == 0)) + return -1; + + return 0; +} + +unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr) +{ + struct { + dmov_s cmd; + unsigned cmdptr; + unsigned data; + } *dma_buffer; + unsigned rv; + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + dma_buffer->cmd.cmd = CMD_LC; + dma_buffer->cmd.src = addr; + dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data); + dma_buffer->cmd.len = 4; + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP; + dma_buffer->data = 0xeeeeeeee; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + rv = dma_buffer->data; + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + + return rv; +} + +void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val) +{ + struct { + dmov_s cmd; + unsigned cmdptr; + unsigned data; + } *dma_buffer; + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + dma_buffer->cmd.cmd = CMD_LC; + dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data); + dma_buffer->cmd.dst = addr; + dma_buffer->cmd.len = 4; + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP; + dma_buffer->data = val; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); +} + +static dma_addr_t +msm_nand_dma_map(struct device *dev, void *addr, size_t size, + enum dma_data_direction dir) +{ + struct page *page; + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + if (virt_addr_valid(addr)) + page = virt_to_page(addr); + else { + if (WARN_ON(size + offset > PAGE_SIZE)) + return ~0; + page = vmalloc_to_page(addr); + } + return dma_map_page(dev, page, offset, size, dir); +} + +static int msm_nand_check_empty(struct mtd_info *mtd, struct mtd_oob_ops *ops, + unsigned long *uncorrected) +{ + unsigned int p, n, end; + uint8_t *datbuf = ops->datbuf; + uint8_t *oobbuf = ops->oobbuf; + size_t oobsize; + int page_count; + + if (ops->mode == MTD_OOB_RAW) + return false; + + page_count = ops->retlen / mtd->writesize; + oobsize = (ops->mode == MTD_OOB_AUTO) ? mtd->oobavail : mtd->oobsize; + + for_each_set_bit(p, uncorrected, page_count) { + if (datbuf) { + datbuf = ops->datbuf + p * mtd->writesize; + for (n = 0; n < mtd->writesize; n++) { + /* empty blocks read 0x54 at these offsets */ + if (datbuf[n] != ((n % 516 == 3) ? 0x54 : 0xff)) + return false; + } + } + if (oobbuf) { + n = p * oobsize; + end = min(n + oobsize, ops->oobretlen); + for(; n < end; n++) + if (oobbuf[n] != 0xff) + return false; + } + if (ops->datbuf) + for (n = 3; n < mtd->writesize; n+= 516) + datbuf[n] = 0xff; + } + return true; +} + +static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct msm_nand_chip *chip = mtd->priv; + + struct { + dmov_s cmd[8 * 5 + 3]; + unsigned cmdptr; + struct { + uint32_t cmd; + uint32_t addr0; + uint32_t addr1; + uint32_t chipsel; + uint32_t cfg0; + uint32_t cfg1; + uint32_t exec; +#if SUPPORT_WRONG_ECC_CONFIG + uint32_t ecccfg; + uint32_t ecccfg_restore; +#endif + struct { + uint32_t flash_status; + uint32_t buffer_status; + } result[8]; + } data; + } *dma_buffer; + dmov_s *cmd; + unsigned n; + unsigned page = from >> chip->page_shift; + uint32_t oob_len = ops->ooblen; + uint32_t sectordatasize; + uint32_t sectoroobsize; + int err, pageerr; + dma_addr_t data_dma_addr = 0; + dma_addr_t oob_dma_addr = 0; + dma_addr_t data_dma_addr_curr = 0; + dma_addr_t oob_dma_addr_curr = 0; + uint32_t oob_col = 0; + unsigned page_count; + unsigned pages_read = 0; + unsigned start_sector = 0; + uint32_t sector_corrected; + uint32_t page_corrected; + uint32_t total_corrected = 0; + uint32_t total_uncorrected = 0; + unsigned long uncorrected_noalloc = 0; + unsigned long *uncorrected = &uncorrected_noalloc; + + if (from & (mtd->writesize - 1)) { + pr_err("%s: unsupported from, 0x%llx\n", + __func__, from); + return -EINVAL; + } + if (ops->mode != MTD_OOB_RAW) { + if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) { + /* when ops->datbuf is NULL, ops->len can be ooblen */ + pr_err("%s: unsupported ops->len, %d\n", + __func__, ops->len); + return -EINVAL; + } + } else { + if (ops->datbuf != NULL && + (ops->len % (mtd->writesize + mtd->oobsize)) != 0) { + pr_err("%s: unsupported ops->len," + " %d for MTD_OOB_RAW\n", __func__, ops->len); + return -EINVAL; + } + } + + if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) { + pr_err("%s: unsupported ops->ooboffs, %d\n", + __func__, ops->ooboffs); + return -EINVAL; + } + + if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OOB_AUTO) + start_sector = chip->last_sector; + + if (ops->oobbuf && !ops->datbuf) { + unsigned tmpoobsz = (ops->mode == MTD_OOB_AUTO) ? + mtd->oobavail : mtd->oobsize; + page_count = DIV_ROUND_UP(ops->ooblen, tmpoobsz); + } else if (ops->mode != MTD_OOB_RAW) + page_count = ops->len / mtd->writesize; + else + page_count = ops->len / (mtd->writesize + mtd->oobsize); + +#if 0 /* yaffs reads more oob data than it needs */ + if (ops->ooblen >= sectoroobsize * 4) { + pr_err("%s: unsupported ops->ooblen, %d\n", + __func__, ops->ooblen); + return -EINVAL; + } +#endif + +#if VERBOSE + pr_info("msm_nand_read_oob %llx %p %x %p %x\n", + from, ops->datbuf, ops->len, ops->oobbuf, ops->ooblen); +#endif + if (ops->datbuf) { + /* memset(ops->datbuf, 0x55, ops->len); */ + data_dma_addr_curr = data_dma_addr = + msm_nand_dma_map(chip->dev, ops->datbuf, ops->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(chip->dev, data_dma_addr)) { + pr_err("msm_nand_read_oob: failed to get dma addr " + "for %p\n", ops->datbuf); + return -EIO; + } + } + if (ops->oobbuf) { + memset(ops->oobbuf, 0xff, ops->ooblen); + oob_dma_addr_curr = oob_dma_addr = + msm_nand_dma_map(chip->dev, ops->oobbuf, + ops->ooblen, DMA_BIDIRECTIONAL); + if (dma_mapping_error(chip->dev, oob_dma_addr)) { + pr_err("msm_nand_read_oob: failed to get dma addr " + "for %p\n", ops->oobbuf); + err = -EIO; + goto err_dma_map_oobbuf_failed; + } + } + if (BITS_TO_LONGS(page_count) > 1) { + uncorrected = kzalloc(BITS_TO_LONGS(page_count) * sizeof(long), + GFP_NOIO); + if (!uncorrected) { + err = -ENOMEM; + goto err_alloc_uncorrected_failed; + } + } + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + oob_col = start_sector * 0x210; + if (chip->CFG1 & CFG1_WIDE_FLASH) + oob_col >>= 1; + + err = 0; + while (page_count-- > 0) { + cmd = dma_buffer->cmd; + + /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */ + if (ops->mode != MTD_OOB_RAW) { + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC; + dma_buffer->data.cfg0 = + (chip->CFG0 & ~(7U << 6)) | + ((chip->last_sector - start_sector) << 6); + dma_buffer->data.cfg1 = chip->CFG1; + } else { + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ; + dma_buffer->data.cfg0 = + (MSM_NAND_CFG0_RAW & ~(7U << 6)) | + (chip->last_sector << 6); + dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW | + (chip->CFG1 & CFG1_WIDE_FLASH); + } + + dma_buffer->data.addr0 = (page << 16) | oob_col; + /* qc example is (page >> 16) && 0xff !? */ + dma_buffer->data.addr1 = (page >> 16) & 0xff; + /* flash0 + undoc bit */ + dma_buffer->data.chipsel = 0 | 4; + + + /* GO bit for the EXEC register */ + dma_buffer->data.exec = 1; + + + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result)); + + for (n = start_sector; n <= chip->last_sector; n++) { + /* flash + buffer status return words */ + dma_buffer->data.result[n].flash_status = 0xeeeeeeee; + dma_buffer->data.result[n].buffer_status = 0xeeeeeeee; + + /* block on cmd ready, then + * write CMD / ADDR0 / ADDR1 / CHIPSEL + * regs in a burst + */ + cmd->cmd = DST_CRCI_NAND_CMD; + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd); + cmd->dst = MSM_NAND_FLASH_CMD; + if (n == start_sector) + cmd->len = 16; + else + cmd->len = 4; + cmd++; + + if (n == start_sector) { + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.cfg0); + cmd->dst = MSM_NAND_DEV0_CFG0; + cmd->len = 8; + cmd++; +#if SUPPORT_WRONG_ECC_CONFIG + if (chip->saved_ecc_buf_cfg != + chip->ecc_buf_cfg) { + dma_buffer->data.ecccfg = + chip->ecc_buf_cfg; + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.ecccfg); + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG; + cmd->len = 4; + cmd++; + } +#endif + } + + /* kick the execute register */ + cmd->cmd = 0; + cmd->src = + msm_virt_to_dma(chip, &dma_buffer->data.exec); + cmd->dst = MSM_NAND_EXEC_CMD; + cmd->len = 4; + cmd++; + + /* block on data ready, then + * read the status register + */ + cmd->cmd = SRC_CRCI_NAND_DATA; + cmd->src = MSM_NAND_FLASH_STATUS; + cmd->dst = msm_virt_to_dma(chip, + &dma_buffer->data.result[n]); + /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */ + cmd->len = 8; + cmd++; + + /* read data block + * (only valid if status says success) + */ + if (ops->datbuf) { + if (ops->mode != MTD_OOB_RAW) + sectordatasize = + (n < chip->last_sector) ? + 516 : chip->last_sectorsz; + else + sectordatasize = 528; + + cmd->cmd = 0; + cmd->src = MSM_NAND_FLASH_BUFFER; + cmd->dst = data_dma_addr_curr; + data_dma_addr_curr += sectordatasize; + cmd->len = sectordatasize; + cmd++; + } + + if (ops->oobbuf && (n == chip->last_sector || + ops->mode != MTD_OOB_AUTO)) { + cmd->cmd = 0; + if (n == chip->last_sector) { + cmd->src = MSM_NAND_FLASH_BUFFER + + chip->last_sectorsz; + sectoroobsize = + (chip->last_sector + 1) * 4; + if (ops->mode != MTD_OOB_AUTO) + sectoroobsize += 10; + } else { + cmd->src = MSM_NAND_FLASH_BUFFER + 516; + sectoroobsize = 10; + } + + cmd->dst = oob_dma_addr_curr; + if (sectoroobsize < oob_len) + cmd->len = sectoroobsize; + else + cmd->len = oob_len; + oob_dma_addr_curr += cmd->len; + oob_len -= cmd->len; + if (cmd->len > 0) + cmd++; + } + } +#if SUPPORT_WRONG_ECC_CONFIG + if (chip->saved_ecc_buf_cfg != chip->ecc_buf_cfg) { + dma_buffer->data.ecccfg_restore = + chip->saved_ecc_buf_cfg; + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.ecccfg_restore); + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG; + cmd->len = 4; + cmd++; + } +#endif + + BUILD_BUG_ON(8 * 5 + 3 != ARRAY_SIZE(dma_buffer->cmd)); + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); + dma_buffer->cmd[0].cmd |= CMD_OCB; + cmd[-1].cmd |= CMD_OCU | CMD_LC; + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) + | CMD_PTR_LP; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR( + msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + /* if any of the writes failed (0x10), or there + * was a protection violation (0x100), we lose + */ + pageerr = 0; + page_corrected = 0; + for (n = start_sector; n <= chip->last_sector; n++) { + uint32_t buf_stat = + dma_buffer->data.result[n].buffer_status; + if (buf_stat & BUF_STAT_UNCORRECTABLE) { + total_uncorrected++; + uncorrected[BIT_WORD(pages_read)] |= + BIT_MASK(pages_read); + pageerr = -EBADMSG; + break; + } + if (dma_buffer->data.result[n].flash_status & 0x110) { + pageerr = -EIO; + break; + } + sector_corrected =buf_stat & BUF_STAT_NUM_ERRS_MASK; + page_corrected += sector_corrected; + if (sector_corrected > 1) + pageerr = -EUCLEAN; + } + if ((!pageerr && page_corrected) || pageerr == -EUCLEAN) { + total_corrected += page_corrected; + /* not thread safe */ + mtd->ecc_stats.corrected += page_corrected; + } + if (pageerr && (pageerr != -EUCLEAN || err == 0)) + err = pageerr; + +#if VERBOSE + pr_info("status: %x %x %x %x %x %x %x %x " + "%x %x %x %x %x %x %x %x\n", + dma_buffer->data.result[0].flash_status, + dma_buffer->data.result[0].buffer_status, + dma_buffer->data.result[1].flash_status, + dma_buffer->data.result[1].buffer_status, + dma_buffer->data.result[2].flash_status, + dma_buffer->data.result[2].buffer_status, + dma_buffer->data.result[3].flash_status, + dma_buffer->data.result[3].buffer_status, + dma_buffer->data.result[4].flash_status, + dma_buffer->data.result[4].buffer_status, + dma_buffer->data.result[5].flash_status, + dma_buffer->data.result[5].buffer_status, + dma_buffer->data.result[6].flash_status, + dma_buffer->data.result[6].buffer_status, + dma_buffer->data.result[7].flash_status, + dma_buffer->data.result[7].buffer_status); +#endif + if (err && err != -EUCLEAN && err != -EBADMSG) + break; + pages_read++; + page++; + } + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + +err_alloc_uncorrected_failed: + if (ops->oobbuf) { + dma_unmap_page(chip->dev, oob_dma_addr, + ops->ooblen, DMA_FROM_DEVICE); + } +err_dma_map_oobbuf_failed: + if (ops->datbuf) { + dma_unmap_page(chip->dev, data_dma_addr, + ops->len, DMA_FROM_DEVICE); + } + + if (ops->mode != MTD_OOB_RAW) + ops->retlen = mtd->writesize * pages_read; + else + ops->retlen = (mtd->writesize + mtd->oobsize) * + pages_read; + ops->oobretlen = ops->ooblen - oob_len; + + if (err == -EBADMSG && msm_nand_check_empty(mtd, ops, uncorrected)) + err = 0; + else if (total_uncorrected) + mtd->ecc_stats.failed += total_uncorrected; /* not threadsafe */ + if (uncorrected != &uncorrected_noalloc) + kfree(uncorrected); + + if (err) + pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n", + from, ops->datbuf ? ops->len : 0, ops->ooblen, err, + total_corrected); + return err; +} + +static int +msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + int ret; + struct mtd_oob_ops ops; + + /* printk("msm_nand_read %llx %x\n", from, len); */ + + ops.mode = MTD_OOB_PLACE; + ops.len = len; + ops.retlen = 0; + ops.ooblen = 0; + ops.datbuf = buf; + ops.oobbuf = NULL; + ret = msm_nand_read_oob(mtd, from, &ops); + *retlen = ops.retlen; + return ret; +} + +static int +msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) +{ + struct msm_nand_chip *chip = mtd->priv; + struct { + dmov_s cmd[8 * 6 + 3]; + unsigned cmdptr; + struct { + uint32_t cmd; + uint32_t addr0; + uint32_t addr1; + uint32_t chipsel; + uint32_t cfg0; + uint32_t cfg1; + uint32_t exec; +#if SUPPORT_WRONG_ECC_CONFIG + uint32_t ecccfg; + uint32_t ecccfg_restore; +#endif + uint32_t flash_status[8]; + uint32_t zeroes; + } data; + } *dma_buffer; + dmov_s *cmd; + unsigned n; + unsigned page = to >> chip->page_shift; + uint32_t oob_len = ops->ooblen; + uint32_t sectordatawritesize; + int err; + dma_addr_t data_dma_addr = 0; + dma_addr_t oob_dma_addr = 0; + dma_addr_t data_dma_addr_curr = 0; + dma_addr_t oob_dma_addr_curr = 0; + unsigned page_count; + unsigned pages_written = 0; + + if (to & (mtd->writesize - 1)) { + pr_err("%s: unsupported to, 0x%llx\n", __func__, to); + return -EINVAL; + } + + if (ops->mode != MTD_OOB_RAW) { + if (ops->ooblen != 0 && ops->mode != MTD_OOB_AUTO) { + pr_err("%s: unsupported ops->mode,%d\n", + __func__, ops->mode); + return -EINVAL; + } + if ((ops->len % mtd->writesize) != 0) { + pr_err("%s: unsupported ops->len, %d\n", + __func__, ops->len); + return -EINVAL; + } + } else { + if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) { + pr_err("%s: unsupported ops->len, " + "%d for MTD_OOB_RAW mode\n", + __func__, ops->len); + return -EINVAL; + } + } + + if (ops->datbuf == NULL) { + pr_err("%s: unsupported ops->datbuf == NULL\n", __func__); + return -EINVAL; + } +#if 0 /* yaffs writes more oob data than it needs */ + if (ops->ooblen >= sectoroobsize * 4) { + pr_err("%s: unsupported ops->ooblen, %d\n", + __func__, ops->ooblen); + return -EINVAL; + } +#endif + if (ops->mode != MTD_OOB_RAW && ops->ooblen != 0 && ops->ooboffs != 0) { + pr_err("%s: unsupported ops->ooboffs, %d\n", + __func__, ops->ooboffs); + return -EINVAL; + } + + if (ops->datbuf) { + data_dma_addr_curr = data_dma_addr = + msm_nand_dma_map(chip->dev, ops->datbuf, + ops->len, DMA_TO_DEVICE); + if (dma_mapping_error(chip->dev, data_dma_addr)) { + pr_err("msm_nand_write_oob: failed to get dma addr " + "for %p\n", ops->datbuf); + return -EIO; + } + } + if (ops->oobbuf) { + oob_dma_addr_curr = oob_dma_addr = + msm_nand_dma_map(chip->dev, ops->oobbuf, + ops->ooblen, DMA_TO_DEVICE); + if (dma_mapping_error(chip->dev, oob_dma_addr)) { + pr_err("msm_nand_write_oob: failed to get dma addr " + "for %p\n", ops->oobbuf); + err = -EIO; + goto err_dma_map_oobbuf_failed; + } + } + if (ops->mode != MTD_OOB_RAW) + page_count = ops->len / mtd->writesize; + else + page_count = ops->len / (mtd->writesize + mtd->oobsize); + + wait_event(chip->wait_queue, (dma_buffer = + msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer)))); + + while (page_count-- > 0) { + cmd = dma_buffer->cmd; + + /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */ + if (ops->mode != MTD_OOB_RAW) { + dma_buffer->data.cfg0 = chip->CFG0; + dma_buffer->data.cfg1 = chip->CFG1; + } else { + dma_buffer->data.cfg0 = + (MSM_NAND_CFG0_RAW & ~(7U << 6)) | + (chip->last_sector << 6); + dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW | + (chip->CFG1 & CFG1_WIDE_FLASH); + } + + dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE; + dma_buffer->data.addr0 = page << 16; + dma_buffer->data.addr1 = (page >> 16) & 0xff; + dma_buffer->data.chipsel = 0 | 4; /* flash0 + undoc bit */ + dma_buffer->data.zeroes = 0; + + + /* GO bit for the EXEC register */ + dma_buffer->data.exec = 1; + + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status)); + + for (n = 0; n <= chip->last_sector ; n++) { + /* status return words */ + dma_buffer->data.flash_status[n] = 0xeeeeeeee; + /* block on cmd ready, then + * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst + */ + cmd->cmd = DST_CRCI_NAND_CMD; + cmd->src = + msm_virt_to_dma(chip, &dma_buffer->data.cmd); + cmd->dst = MSM_NAND_FLASH_CMD; + if (n == 0) + cmd->len = 16; + else + cmd->len = 4; + cmd++; + + if (n == 0) { + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.cfg0); + cmd->dst = MSM_NAND_DEV0_CFG0; + cmd->len = 8; + cmd++; +#if SUPPORT_WRONG_ECC_CONFIG + if (chip->saved_ecc_buf_cfg != + chip->ecc_buf_cfg) { + dma_buffer->data.ecccfg = + chip->ecc_buf_cfg; + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.ecccfg); + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG; + cmd->len = 4; + cmd++; + } +#endif + } + + /* write data block */ + if (ops->mode != MTD_OOB_RAW) + sectordatawritesize = (n < chip->last_sector) ? + 516 : chip->last_sectorsz; + else + sectordatawritesize = 528; + + cmd->cmd = 0; + cmd->src = data_dma_addr_curr; + data_dma_addr_curr += sectordatawritesize; + cmd->dst = MSM_NAND_FLASH_BUFFER; + cmd->len = sectordatawritesize; + cmd++; + + if (ops->oobbuf) { + if (n == chip->last_sector) { + cmd->cmd = 0; + cmd->src = oob_dma_addr_curr; + cmd->dst = MSM_NAND_FLASH_BUFFER + + chip->last_sectorsz; + cmd->len = 516 - chip->last_sectorsz; + if (oob_len <= cmd->len) + cmd->len = oob_len; + oob_dma_addr_curr += cmd->len; + oob_len -= cmd->len; + if (cmd->len > 0) + cmd++; + } + if (ops->mode != MTD_OOB_AUTO) { + /* skip ecc bytes in oobbuf */ + if (oob_len < 10) { + oob_dma_addr_curr += 10; + oob_len -= 10; + } else { + oob_dma_addr_curr += oob_len; + oob_len = 0; + } + } + } + + /* kick the execute register */ + cmd->cmd = 0; + cmd->src = + msm_virt_to_dma(chip, &dma_buffer->data.exec); + cmd->dst = MSM_NAND_EXEC_CMD; + cmd->len = 4; + cmd++; + + /* block on data ready, then + * read the status register + */ + cmd->cmd = SRC_CRCI_NAND_DATA; + cmd->src = MSM_NAND_FLASH_STATUS; + cmd->dst = msm_virt_to_dma(chip, + &dma_buffer->data.flash_status[n]); + cmd->len = 4; + cmd++; + + /* clear the status register in case the OP_ERR is set + * due to the write, to work around a h/w bug */ + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.zeroes); + cmd->dst = MSM_NAND_FLASH_STATUS; + cmd->len = 4; + cmd++; + } +#if SUPPORT_WRONG_ECC_CONFIG + if (chip->saved_ecc_buf_cfg != chip->ecc_buf_cfg) { + dma_buffer->data.ecccfg_restore = + chip->saved_ecc_buf_cfg; + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, + &dma_buffer->data.ecccfg_restore); + cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG; + cmd->len = 4; + cmd++; + } +#endif + dma_buffer->cmd[0].cmd |= CMD_OCB; + cmd[-1].cmd |= CMD_OCU | CMD_LC; + BUILD_BUG_ON(8 * 6 + 3 != ARRAY_SIZE(dma_buffer->cmd)); + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | + CMD_PTR_LP; + + msm_dmov_exec_cmd(chip->dma_channel, + DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR( + msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + /* if any of the writes failed (0x10), or there was a + * protection violation (0x100), or the program success + * bit (0x80) is unset, we lose + */ + err = 0; + for (n = 0; n <= chip->last_sector ; n++) { + if (dma_buffer->data.flash_status[n] & 0x110) { + if (dma_buffer->data.flash_status[n] & 0x10) + pr_err("msm_nand: critical write error," + " 0x%x(%d)\n", page, n); + err = -EIO; + break; + } + if (!(dma_buffer->data.flash_status[n] & 0x80)) { + err = -EIO; + break; + } + } + +#if VERBOSE + pr_info("write page %d: status: %x %x %x %x %x %x %x %x\n", + page, dma_buffer->data.flash_status[0], + dma_buffer->data.flash_status[1], + dma_buffer->data.flash_status[2], + dma_buffer->data.flash_status[3], + dma_buffer->data.flash_status[4], + dma_buffer->data.flash_status[5], + dma_buffer->data.flash_status[6], + dma_buffer->data.flash_status[7]); +#endif + if (err) + break; + pages_written++; + page++; + } + if (ops->mode != MTD_OOB_RAW) + ops->retlen = mtd->writesize * pages_written; + else + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written; + + ops->oobretlen = ops->ooblen - oob_len; + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + + if (ops->oobbuf) + dma_unmap_page(chip->dev, oob_dma_addr, + ops->ooblen, DMA_TO_DEVICE); +err_dma_map_oobbuf_failed: + if (ops->datbuf) + dma_unmap_page(chip->dev, data_dma_addr, + ops->len, DMA_TO_DEVICE); + if (err) + pr_err("msm_nand_write_oob %llx %x %x failed %d\n", + to, ops->len, ops->ooblen, err); + return err; +} + +static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + struct mtd_oob_ops ops; + + ops.mode = MTD_OOB_PLACE; + ops.len = len; + ops.retlen = 0; + ops.ooblen = 0; + ops.datbuf = (uint8_t *)buf; + ops.oobbuf = NULL; + ret = msm_nand_write_oob(mtd, to, &ops); + *retlen = ops.retlen; + return ret; +} + +static int +msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + int err; + struct msm_nand_chip *chip = mtd->priv; + struct { + dmov_s cmd[5]; + unsigned cmdptr; + unsigned data[9]; + } *dma_buffer; + unsigned page = instr->addr >> chip->page_shift; + + if (instr->addr & (mtd->erasesize - 1)) { + pr_err("%s: unsupported erase address, 0x%llx\n", + __func__, instr->addr); + return -EINVAL; + } + if (instr->len != mtd->erasesize) { + pr_err("%s: unsupported erase len, %lld\n", + __func__, instr->len); + return -EINVAL; + } + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + dma_buffer->data[0] = MSM_NAND_CMD_BLOCK_ERASE; + dma_buffer->data[1] = page; + dma_buffer->data[2] = 0; + dma_buffer->data[3] = 0 | 4; + dma_buffer->data[4] = 1; + dma_buffer->data[5] = 0xeeeeeeee; + dma_buffer->data[6] = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */ + dma_buffer->data[7] = chip->CFG1; + dma_buffer->data[8] = 0; + BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data) - 1); + + dma_buffer->cmd[0].cmd = DST_CRCI_NAND_CMD | CMD_OCB; + dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[0]); + dma_buffer->cmd[0].dst = MSM_NAND_FLASH_CMD; + dma_buffer->cmd[0].len = 16; + + dma_buffer->cmd[1].cmd = 0; + dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[6]); + dma_buffer->cmd[1].dst = MSM_NAND_DEV0_CFG0; + dma_buffer->cmd[1].len = 8; + + dma_buffer->cmd[2].cmd = 0; + dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[4]); + dma_buffer->cmd[2].dst = MSM_NAND_EXEC_CMD; + dma_buffer->cmd[2].len = 4; + + dma_buffer->cmd[3].cmd = SRC_CRCI_NAND_DATA; + dma_buffer->cmd[3].src = MSM_NAND_FLASH_STATUS; + dma_buffer->cmd[3].dst = msm_virt_to_dma(chip, &dma_buffer->data[5]); + dma_buffer->cmd[3].len = 4; + + /* clear the status register in case the OP_ERR is set + * due to the write, to work around a h/w bug */ + dma_buffer->cmd[4].cmd = CMD_OCU | CMD_LC; + dma_buffer->cmd[4].src = msm_virt_to_dma(chip, &dma_buffer->data[8]); + dma_buffer->cmd[4].dst = MSM_NAND_FLASH_STATUS; + dma_buffer->cmd[4].len = 4; + + BUILD_BUG_ON(4 != ARRAY_SIZE(dma_buffer->cmd) - 1); + + dma_buffer->cmdptr = + (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP; + + msm_dmov_exec_cmd( + chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + /* we fail if there was an operation error, a mpu error, or the + * erase success bit was not set. + */ + + if (dma_buffer->data[5] & 0x110 || !(dma_buffer->data[5] & 0x80)) { + if (dma_buffer->data[5] & 0x10) + pr_warning("msm_nand: critical erase error, 0x%llx\n", + instr->addr); + err = -EIO; + } else + err = 0; + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + if (err) { + pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr); + instr->fail_addr = instr->addr; + instr->state = MTD_ERASE_FAILED; + } else { + instr->state = MTD_ERASE_DONE; + instr->fail_addr = 0xffffffff; + mtd_erase_callback(instr); + } + return err; +} + +static int +msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ + struct msm_nand_chip *chip = mtd->priv; + int ret; + struct { + dmov_s cmd[5]; + unsigned cmdptr; + struct { + uint32_t cmd; + uint32_t addr0; + uint32_t addr1; + uint32_t chipsel; + uint32_t cfg0; + uint32_t cfg1; + uint32_t exec; + uint32_t ecccfg; + struct { + uint32_t flash_status; + uint32_t buffer_status; + } result; + } data; + } *dma_buffer; + dmov_s *cmd; + uint8_t *buf; + unsigned page = ofs >> chip->page_shift; + + /* Check for invalid offset */ + if (ofs > mtd->size) + return -EINVAL; + if (ofs & (mtd->erasesize - 1)) { + pr_err("%s: unsupported block address, 0x%x\n", + __func__, (uint32_t)ofs); + return -EINVAL; + } + + wait_event(chip->wait_queue, + (dma_buffer = msm_nand_get_dma_buffer(chip , + sizeof(*dma_buffer) + 4))); + buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer); + + /* Read 4 bytes starting from the bad block marker location + * in the last code word of the page + */ + + cmd = dma_buffer->cmd; + + dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ; + dma_buffer->data.cfg0 = MSM_NAND_CFG0_RAW & ~(7U << 6); + dma_buffer->data.cfg1 = MSM_NAND_CFG1_RAW | (chip->CFG1 & CFG1_WIDE_FLASH); + + if (chip->CFG1 & CFG1_WIDE_FLASH) + dma_buffer->data.addr0 = (page << 16) | + ((528 * chip->last_sector) >> 1); + else + dma_buffer->data.addr0 = (page << 16) | + (528 * chip->last_sector); + + dma_buffer->data.addr1 = (page >> 16) & 0xff; + dma_buffer->data.chipsel = 0 | 4; + + dma_buffer->data.exec = 1; + + dma_buffer->data.result.flash_status = 0xeeeeeeee; + dma_buffer->data.result.buffer_status = 0xeeeeeeee; + + cmd->cmd = DST_CRCI_NAND_CMD; + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd); + cmd->dst = MSM_NAND_FLASH_CMD; + cmd->len = 16; + cmd++; + + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0); + cmd->dst = MSM_NAND_DEV0_CFG0; + cmd->len = 8; + cmd++; + + cmd->cmd = 0; + cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec); + cmd->dst = MSM_NAND_EXEC_CMD; + cmd->len = 4; + cmd++; + + cmd->cmd = SRC_CRCI_NAND_DATA; + cmd->src = MSM_NAND_FLASH_STATUS; + cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result); + cmd->len = 8; + cmd++; + + cmd->cmd = 0; + cmd->src = MSM_NAND_FLASH_BUFFER + + (mtd->writesize - 528 * chip->last_sector); + cmd->dst = msm_virt_to_dma(chip, buf); + cmd->len = 4; + cmd++; + + BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd)); + BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); + dma_buffer->cmd[0].cmd |= CMD_OCB; + cmd[-1].cmd |= CMD_OCU | CMD_LC; + + dma_buffer->cmdptr = (msm_virt_to_dma(chip, + dma_buffer->cmd) >> 3) | CMD_PTR_LP; + + msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr))); + + ret = 0; + if (dma_buffer->data.result.flash_status & 0x110) + ret = -EIO; + + if (!ret) { + /* Check for bad block marker byte */ + if (chip->CFG1 & CFG1_WIDE_FLASH) { + if (buf[0] != 0xFF || buf[1] != 0xFF) + ret = 1; + } else { + if (buf[0] != 0xFF) + ret = 1; + } + } + + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4); + return ret; +} + + +static int +msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_oob_ops ops; + int ret; + uint8_t *buf; + + /* Check for invalid offset */ + if (ofs > mtd->size) + return -EINVAL; + if (ofs & (mtd->erasesize - 1)) { + pr_err("%s: unsupported block address, 0x%x\n", + __func__, (uint32_t)ofs); + return -EINVAL; + } + + /* + Write all 0s to the first page + This will set the BB marker to 0 + */ + + /* Use the already existing zero page */ + buf = page_address(ZERO_PAGE()); + + ops.mode = MTD_OOB_RAW; + ops.len = mtd->writesize + mtd->oobsize; + ops.retlen = 0; + ops.ooblen = 0; + ops.datbuf = buf; + ops.oobbuf = NULL; + ret = msm_nand_write_oob(mtd, ofs, &ops); + + return ret; +} + +/** + * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash + * @param mtd MTD device structure + */ +static int msm_nand_suspend(struct mtd_info *mtd) +{ + return 0; +} + +/** + * msm_nand_resume - [MTD Interface] Resume the msm_nand flash + * @param mtd MTD device structure + */ +static void msm_nand_resume(struct mtd_info *mtd) +{ +} + +/** + * Export 3 attributes for HTC SSD HW INFO tool + * >info :basic HW spec of this NAND chip + * >vendor :vendor information + * >pagesize:page size, either 2048 or 4096 + */ +static int param_get_vendor_name(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "%s", nand_info->maker_name); +} +module_param_call(vendor, NULL, param_get_vendor_name, NULL, S_IRUGO); + +static int param_get_nand_info(char *buffer, struct kernel_param *kp) +{ + int result = 0; + result += sprintf(buffer, "<< NAND INFO >>\n"); + result += sprintf(buffer + result, "flash id\t =%X\n", + nand_info->flash_id); + result += sprintf(buffer + result, "vendor\t\t =%s\n", + nand_info->maker_name); + result += sprintf(buffer + result, "width\t\t =%d bits\n", + nand_info->width); + result += sprintf(buffer + result, "size\t\t =%d MB\n", + nand_info->size>>20); + result += sprintf(buffer + result, "block count\t =%d\n", + nand_info->block_count); + result += sprintf(buffer + result, "page count\t =%d", + nand_info->page_count); + return result; +} +module_param_call(info, NULL, param_get_nand_info, NULL, S_IRUGO); + +static int param_get_page_size(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "%d", nand_info->page_size); +} +module_param_call(pagesize, NULL, param_get_page_size, NULL, S_IRUGO); + +/** + * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device + * @param mtd MTD device structure + * @param maxchips Number of chips to scan for + * + * This fills out all the not initialized function pointers + * with the defaults. + * The flash ID is read and the mtd/chip structures are + * filled with the appropriate values. + */ +int msm_nand_scan(struct mtd_info *mtd, int maxchips) +{ + unsigned n; + struct msm_nand_chip *chip = mtd->priv; + uint32_t flash_id; + uint32_t manid; + uint32_t devid; + uint32_t devcfg; + uint32_t busw16; + struct nand_flash_dev *flashdev = NULL; + struct nand_manufacturers *flashman = NULL; + + if (flash_read_config(chip)) { + pr_err("ERRROR: could not save CFG0 & CFG1 state\n"); + return -ENODEV; + } + pr_info("msm_nand: NAND_READ_ID = %x\n", + flash_rd_reg(chip, MSM_NAND_READ_ID)); + flash_wr_reg(chip, MSM_NAND_READ_ID, 0x12345678); + + flash_id = flash_read_id(chip); + manid = flash_id & 0xff; + devid = (flash_id >> 8) & 0xff; + devcfg = (flash_id >> 24) & 0xff; + + for (n = 0; !flashman && nand_manuf_ids[n].id; ++n) + if (nand_manuf_ids[n].id == manid) + flashman = &nand_manuf_ids[n]; + for (n = 0; !flashdev && nand_flash_ids[n].id; ++n) + if (nand_flash_ids[n].id == devid) + flashdev = &nand_flash_ids[n]; + if (!flashdev || !flashman) { + pr_err("ERROR: unknown nand device manuf=%x devid=%x\n", + manid, devid); + return -ENOENT; + } + + if (!flashdev->pagesize) { + mtd->erasesize = (64 * 1024) << ((devcfg >> 4) & 0x3); + mtd->writesize = 1024 << (devcfg & 0x3); + mtd->oobsize = (8 << ((devcfg >> 2) & 1)) * + (mtd->writesize / 512); + busw16 = devcfg & (1 << 6) ? CFG1_WIDE_FLASH : 0; + } else { + mtd->writesize = flashdev->pagesize; + mtd->erasesize = flashdev->erasesize; + mtd->oobsize = flashdev->pagesize / 32; + busw16 = flashdev->options & NAND_BUSWIDTH_16 ? + CFG1_WIDE_FLASH : 0; + } + mtd->size = flashdev->chipsize << 20; + pr_info("msm_nand: manuf %s (0x%x) device 0x%x blocksz %x pagesz %x " + "size %llx\n", flashman->name, flashman->id, flashdev->id, + mtd->erasesize, mtd->writesize, mtd->size); + + if (mtd->writesize == 2048) { + chip->page_shift = 11; + } else if (mtd->writesize == 4096) { + chip->page_shift = 12; + } else { + pr_err("%s: Unsupported page size (%d)\n", __func__, + mtd->writesize); + return -EINVAL; + } + + chip->last_sector = (mtd->writesize / 512) - 1; + chip->last_sectorsz = mtd->writesize - chip->last_sector * 516; + + if (mtd->oobsize == 64) { + mtd->ecclayout = &msm_nand_oob_64; + } else if (mtd->oobsize == 128) { + mtd->ecclayout = &msm_nand_oob_128; + } else { + pr_err("%s: Unsupported oob size (%d)\n", __func__, + mtd->oobsize); + return -EINVAL; + } + mtd->oobavail = mtd->ecclayout->oobavail; + + chip->CFG0 = (chip->last_sector << 6) /* codewords per page */ + | (516 << 9) /* 516 user data bytes */ + | (10 << 19) /* 10 parity bytes */ + | (5 << 27) /* 5 address cycles */ + | (1 << 30) /* Read status before data */ + | (1 << 31) /* Send read cmd */ + /* 0 spare bytes for 16 bit nand or 1 spare bytes for 8 bit */ + | ((busw16 & CFG1_WIDE_FLASH) ? (0 << 23) : (1 << 23)); + chip->CFG1 = (0 << 0) /* Enable ecc */ + | (7 << 2) /* 8 recovery cycles */ + | (0 << 5) /* Allow CS deassertion */ + | ((mtd->writesize - (528 * chip->last_sector) + 1) << 6) + /* Bad block marker location */ + | (0 << 16) /* Bad block in user data area */ + | (2 << 17) /* 6 cycle tWB/tRB */ + | (busw16 & CFG1_WIDE_FLASH); /* preserve wide flag */ + + pr_info("msm_nand: save CFG0 = %x CFG1 = %x\n", chip->CFG0, chip->CFG1); + pr_info("msm_nand: CFG0: cw/page=%d ud_sz=%d ecc_sz=%d spare_sz=%d " + "num_addr_cycles=%d\n", (chip->CFG0 >> 6) & 7, + (chip->CFG0 >> 9) & 0x3ff, (chip->CFG0 >> 19) & 15, + (chip->CFG0 >> 23) & 15, (chip->CFG0 >> 27) & 7); + + n = flash_rd_reg(chip, MSM_NAND_DEV_CMD1); + pr_info("msm_nand: DEV_CMD1: %x\n", n); + + n = flash_rd_reg(chip, MSM_NAND_EBI2_ECC_BUF_CFG); + pr_info("msm_nand: NAND_EBI2_ECC_BUF_CFG: %x\n", n); + +#if SUPPORT_WRONG_ECC_CONFIG + chip->ecc_buf_cfg = 0x203; + chip->saved_ecc_buf_cfg = n; +#endif + + /* Fill in remaining MTD driver data */ + mtd->type = MTD_NANDFLASH; + mtd->flags = MTD_CAP_NANDFLASH; + /* mtd->ecctype = MTD_ECC_SW; */ + mtd->erase = msm_nand_erase; + mtd->point = NULL; + mtd->unpoint = NULL; + mtd->read = msm_nand_read; + mtd->write = msm_nand_write; + mtd->read_oob = msm_nand_read_oob; + mtd->write_oob = msm_nand_write_oob; + /* mtd->sync = msm_nand_sync; */ + mtd->lock = NULL; + /* mtd->unlock = msm_nand_unlock; */ + mtd->suspend = msm_nand_suspend; + mtd->resume = msm_nand_resume; + mtd->block_isbad = msm_nand_block_isbad; + mtd->block_markbad = msm_nand_block_markbad; + mtd->owner = THIS_MODULE; + + /* Information provides to HTC SSD HW Info tool */ + nand_info = &chip->dev_info; + nand_info->flash_id = flash_id; + nand_info->maker_id = (flash_id & 0xff); + switch (nand_info->maker_id) { + case 0xec: + strcpy(nand_info->maker_name, "Samsung"); + break; + case 0xad: + strcpy(nand_info->maker_name, "Hynix"); + break; + case 0x2c: + strcpy(nand_info->maker_name, "Micron"); + break; + default: + strcpy(nand_info->maker_name, "Unknown"); + break; + } + nand_info->width = (CFG1_WIDE_FLASH? 16: 8); + nand_info->size = mtd->size; + nand_info->page_size = mtd->writesize; + nand_info->page_count = mtd->erasesize/mtd->writesize; + nand_info->block_count = mtd->size; + do_div(nand_info->block_count, nand_info->page_size * nand_info->page_count); + + /* Unlock whole block */ + /* msm_nand_unlock_all(mtd); */ + + /* return this->scan_bbt(mtd); */ + return 0; +} +EXPORT_SYMBOL_GPL(msm_nand_scan); + +/** + * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device + * @param mtd MTD device structure + */ +void msm_nand_release(struct mtd_info *mtd) +{ + /* struct msm_nand_chip *this = mtd->priv; */ + +#ifdef CONFIG_MTD_PARTITIONS + /* Deregister partitions */ + del_mtd_partitions(mtd); +#endif + /* Deregister the device */ + del_mtd_device(mtd); +} +EXPORT_SYMBOL_GPL(msm_nand_release); + +#ifdef CONFIG_MTD_PARTITIONS +static const char *part_probes[] = { "cmdlinepart", NULL, }; +#endif + +struct msm_nand_info { + struct mtd_info mtd; + struct mtd_partition *parts; + struct msm_nand_chip msm_nand; +}; + +static int __devinit msm_nand_probe(struct platform_device *pdev) +{ + struct msm_nand_info *info; + struct flash_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + if (pdev->num_resources != 1) { + pr_err("invalid num_resources"); + return -ENODEV; + } + if (pdev->resource[0].flags != IORESOURCE_DMA) { + pr_err("invalid resource type"); + return -ENODEV; + } + + info = kzalloc(sizeof(struct msm_nand_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->msm_nand.dev = &pdev->dev; + + init_waitqueue_head(&info->msm_nand.wait_queue); + + info->msm_nand.dma_channel = pdev->resource[0].start; + /* this currently fails if dev is passed in */ + info->msm_nand.dma_buffer = + dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE, + &info->msm_nand.dma_addr, GFP_KERNEL); + if (info->msm_nand.dma_buffer == NULL) { + err = -ENOMEM; + goto out_free_info; + } + + pr_info("msm_nand: allocated dma buffer at %p, dma_addr %x\n", + info->msm_nand.dma_buffer, info->msm_nand.dma_addr); + + info->mtd.name = dev_name(&pdev->dev); + info->mtd.priv = &info->msm_nand; + info->mtd.owner = THIS_MODULE; + + if (msm_nand_scan(&info->mtd, 1)) { + err = -ENXIO; + goto out_free_dma_buffer; + } + +#ifdef CONFIG_MTD_PARTITIONS + err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); + if (err > 0) + add_mtd_partitions(&info->mtd, info->parts, err); + else if (err <= 0 && pdata && pdata->parts) { + for (i = 0; i < pdata->nr_parts; ++i) { + pdata->parts[i].offset *= info->mtd.erasesize; + pdata->parts[i].size *= info->mtd.erasesize; + } + add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); + } else +#endif + err = add_mtd_device(&info->mtd); + + dev_set_drvdata(&pdev->dev, info); + + return 0; + +out_free_dma_buffer: + dma_free_coherent(/*dev*/ NULL, SZ_4K, info->msm_nand.dma_buffer, + info->msm_nand.dma_addr); +out_free_info: + kfree(info); + + return err; +} + +static int __devexit msm_nand_remove(struct platform_device *pdev) +{ + struct msm_nand_info *info = dev_get_drvdata(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + + if (info) { +#ifdef CONFIG_MTD_PARTITIONS + if (info->parts) + del_mtd_partitions(&info->mtd); + else +#endif + del_mtd_device(&info->mtd); + + msm_nand_release(&info->mtd); + dma_free_coherent(/*dev*/ NULL, SZ_4K, + info->msm_nand.dma_buffer, + info->msm_nand.dma_addr); + kfree(info); + } + + return 0; +} + +#define DRIVER_NAME "msm_nand" + +static struct platform_driver msm_nand_driver = { + .probe = msm_nand_probe, + .remove = __devexit_p(msm_nand_remove), + .driver = { + .name = DRIVER_NAME, + } +}; + +MODULE_ALIAS(DRIVER_NAME); + +static int __init msm_nand_init(void) +{ + return platform_driver_register(&msm_nand_driver); +} + +static void __exit msm_nand_exit(void) +{ + platform_driver_unregister(&msm_nand_driver); +} + +module_init(msm_nand_init); +module_exit(msm_nand_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("msm_nand flash driver code"); diff --git a/drivers/mtd/devices/msm_nand.h b/drivers/mtd/devices/msm_nand.h new file mode 100644 index 0000000000000..18893f182c7cb --- /dev/null +++ b/drivers/mtd/devices/msm_nand.h @@ -0,0 +1,71 @@ +/* drivers/mtd/devices/msm_nand.h + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_MTD_DEVICES_MSM_NAND_H +#define __DRIVERS_MTD_DEVICES_MSM_NAND_H + +#include + +#define MSM_NAND_REG(off) (MSM_NAND_PHYS + (off)) + +#define MSM_NAND_FLASH_CMD MSM_NAND_REG(0x0000) +#define MSM_NAND_ADDR0 MSM_NAND_REG(0x0004) +#define MSM_NAND_ADDR1 MSM_NAND_REG(0x0008) +#define MSM_NAND_FLASH_CHIP_SELECT MSM_NAND_REG(0x000C) +#define MSM_NAND_EXEC_CMD MSM_NAND_REG(0x0010) +#define MSM_NAND_FLASH_STATUS MSM_NAND_REG(0x0014) +#define MSM_NAND_BUFFER_STATUS MSM_NAND_REG(0x0018) +#define MSM_NAND_DEV0_CFG0 MSM_NAND_REG(0x0020) +#define MSM_NAND_DEV0_CFG1 MSM_NAND_REG(0x0024) +#define MSM_NAND_DEV1_CFG0 MSM_NAND_REG(0x0030) +#define MSM_NAND_DEV1_CFG1 MSM_NAND_REG(0x0034) +#define MSM_NAND_READ_ID MSM_NAND_REG(0x0040) +#define MSM_NAND_READ_STATUS MSM_NAND_REG(0x0044) +#define MSM_NAND_CONFIG_DATA MSM_NAND_REG(0x0050) +#define MSM_NAND_CONFIG MSM_NAND_REG(0x0054) +#define MSM_NAND_CONFIG_MODE MSM_NAND_REG(0x0058) +#define MSM_NAND_CONFIG_STATUS MSM_NAND_REG(0x0060) +#define MSM_NAND_MACRO1_REG MSM_NAND_REG(0x0064) +#define MSM_NAND_XFR_STEP1 MSM_NAND_REG(0x0070) +#define MSM_NAND_XFR_STEP2 MSM_NAND_REG(0x0074) +#define MSM_NAND_XFR_STEP3 MSM_NAND_REG(0x0078) +#define MSM_NAND_XFR_STEP4 MSM_NAND_REG(0x007C) +#define MSM_NAND_XFR_STEP5 MSM_NAND_REG(0x0080) +#define MSM_NAND_XFR_STEP6 MSM_NAND_REG(0x0084) +#define MSM_NAND_XFR_STEP7 MSM_NAND_REG(0x0088) +#define MSM_NAND_DEV_CMD0 MSM_NAND_REG(0x00A0) +#define MSM_NAND_DEV_CMD1 MSM_NAND_REG(0x00A4) +#define MSM_NAND_DEV_CMD2 MSM_NAND_REG(0x00A8) +#define MSM_NAND_DEV_CMD_VLD MSM_NAND_REG(0x00AC) +#define MSM_NAND_EBI2_MISR_SIG_REG MSM_NAND_REG(0x00B0) +#define MSM_NAND_EBI2_ECC_BUF_CFG MSM_NAND_REG(0x00F0) +#define MSM_NAND_FLASH_BUFFER MSM_NAND_REG(0x0100) + +/* device commands */ + +#define MSM_NAND_CMD_SOFT_RESET 0x01 +#define MSM_NAND_CMD_PAGE_READ 0x32 +#define MSM_NAND_CMD_PAGE_READ_ECC 0x33 +#define MSM_NAND_CMD_PAGE_READ_ALL 0x34 +#define MSM_NAND_CMD_SEQ_PAGE_READ 0x15 +#define MSM_NAND_CMD_PRG_PAGE 0x36 +#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37 +#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39 +#define MSM_NAND_CMD_BLOCK_ERASE 0x3A +#define MSM_NAND_CMD_FETCH_ID 0x0B +#define MSM_NAND_CMD_STATUS 0x0C +#define MSM_NAND_CMD_RESET 0x0D + +#endif diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 5f5777bd3f75f..c2c93e9206dbb 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; - ops->retlen = 0; + ops->retlen = ops->oobretlen = 0; for (i = 0; i < concat->num_subdev; i++) { struct mtd_info *subdev = concat->subdev[i]; @@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) devops.len = subdev->size - to; err = subdev->write_oob(subdev, to, &devops); - ops->retlen += devops.retlen; + ops->retlen += devops.oobretlen; if (err) return err; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 11b95b3e805f0..2a508612ccf25 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3111,6 +3111,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, chip->chip_shift += 32 - 1; } + chip->badblockbits = 8; + /* Set the bad block position */ if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16)) chip->badblockpos = NAND_LARGE_BADBLOCK_POS; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 773ce72ac77a5..b42d0c1c4c3ee 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1045,12 +1045,12 @@ config SMC911X help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. - Say Y if you want it compiled into the kernel, + Say Y if you want it compiled into the kernel, and read the Ethernet-HOWTO, available from . - This driver is also available as a module. The module will be - called smc911x. If you want to compile it as a module, say M + This driver is also available as a module. The module will be + called smc911x. If you want to compile it as a module, say M here and read config SMSC911X @@ -1525,9 +1525,9 @@ config E100 select MII ---help--- This driver supports Intel(R) PRO/100 family of adapters. - To verify that your adapter is supported, find the board ID number - on the adapter. Look for a label that has a barcode and a number - in the format 123456-001 (six digits hyphen three digits). + To verify that your adapter is supported, find the board ID number + on the adapter. Look for a label that has a barcode and a number + in the format 123456-001 (six digits hyphen three digits). Use the above information and the Adapter & Driver ID Guide at: @@ -1539,7 +1539,7 @@ config E100 - More specific information on configuring the driver is in + More specific information on configuring the driver is in . To compile this driver as a module, choose M here. The module @@ -2080,7 +2080,7 @@ config E1000 depends on PCI ---help--- This driver supports Intel(R) PRO/1000 gigabit ethernet family of - adapters. For more information on how to identify your adapter, go + adapters. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide at: @@ -2090,7 +2090,7 @@ config E1000 - More specific information on configuring the driver is in + More specific information on configuring the driver is in . To compile this driver as a module, choose M here. The module @@ -2280,7 +2280,7 @@ config SKGE and related Gigabit Ethernet adapters. It is a new smaller driver with better performance and more complete ethtool support. - It does not support the link failover and network management + It does not support the link failover and network management features that "portable" vendor supplied sk98lin driver does. This driver supports adapters based on the original Yukon chipset: @@ -2533,7 +2533,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "PCH Gigabit Ethernet" + tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" depends on PCI select MII ---help--- @@ -2545,6 +2545,12 @@ config PCH_GBE to Gigabit Ethernet. This driver enables Gigabit Ethernet function. + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7223. + ML7223 IOH is for MP(Media Phone) use. + ML7223 is companion chip for Intel Atom E6xx series. + ML7223 is completely compatible for Intel EG20T PCH. + endif # NETDEV_1000 # @@ -2759,7 +2765,7 @@ config IXGB - More specific information on configuring the driver is in + More specific information on configuring the driver is in . To compile this driver as a module, choose M here. The module @@ -3263,9 +3269,9 @@ config PPPOE Support for PPP over Ethernet. This driver requires the latest version of pppd from the CVS - repository at cvs.samba.org. Alternatively, see the + repository at cvs.samba.org. Alternatively, see the RoaringPenguin package () - which contains instruction on how to use this driver (under + which contains instruction on how to use this driver (under the heading "Kernel mode PPPoE"). config PPTP @@ -3404,6 +3410,21 @@ config NETCONSOLE If you want to log kernel messages over the network, enable this. See for details. +config MSM_RMNET + tristate "MSM RMNET Virtual Network Device" + depends on ARCH_MSM + default y + help + Virtual ethernet interface for MSM RMNET transport. + +config MSM_RMNET_DEBUG + bool "MSM RMNET debug interface" + depends on MSM_RMNET + default n + help + Debug stats on wakeup counts. + + config NETCONSOLE_DYNAMIC bool "Dynamic reconfiguration of logging targets" depends on NETCONSOLE && SYSFS && CONFIGFS_FS @@ -3439,4 +3460,13 @@ config VMXNET3 To compile this driver as a module, choose M here: the module will be called vmxnet3. +config GAN_ETH + tristate "Kineto GAN compatibility" + depends on INET + default n + help + A virutual ethernet device that adds/removes IP/UDP headers, + as required by Kineto GAN. + + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 430d42fd53bbd..571b1c652ef76 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -107,6 +107,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_SH_ETH) += sh_eth.o obj-$(CONFIG_STMMAC_ETH) += stmmac/ +obj-$(CONFIG_GAN_ETH) += gan-eth.o # # end link order section @@ -144,7 +145,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o +obj-$(CONFIG_NE_H8300) += ne-h8300.o obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o @@ -219,7 +220,7 @@ obj-$(CONFIG_SC92031) += sc92031.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o @@ -231,7 +232,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o obj-$(CONFIG_A2065) += a2065.o -obj-$(CONFIG_HYDRA) += hydra.o 8390.o +obj-$(CONFIG_HYDRA) += hydra.o obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o @@ -294,6 +295,8 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_NETXEN_NIC) += netxen/ + +obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o obj-$(CONFIG_NIU) += niu.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_SFC) += sfc/ diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h index 9ab58097fa2e7..dec81102adc29 100644 --- a/drivers/net/atl1c/atl1c.h +++ b/drivers/net/atl1c/atl1c.h @@ -566,9 +566,9 @@ struct atl1c_adapter { #define __AT_TESTING 0x0001 #define __AT_RESETTING 0x0002 #define __AT_DOWN 0x0003 - u8 work_event; -#define ATL1C_WORK_EVENT_RESET 0x01 -#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 + unsigned long work_event; +#define ATL1C_WORK_EVENT_RESET 0 +#define ATL1C_WORK_EVENT_LINK_CHANGE 1 u32 msg_enable; bool have_msi; diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 3824382faecc1..dffc7f72e7d78 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -325,7 +325,7 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter) } } - adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; + set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); schedule_work(&adapter->common_task); } @@ -337,20 +337,16 @@ static void atl1c_common_task(struct work_struct *work) adapter = container_of(work, struct atl1c_adapter, common_task); netdev = adapter->netdev; - if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { - adapter->work_event &= ~ATL1C_WORK_EVENT_RESET; + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { netif_device_detach(netdev); atl1c_down(adapter); atl1c_up(adapter); netif_device_attach(netdev); - return; } - if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) { - adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE; + if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, + &adapter->work_event)) atl1c_check_link_status(adapter); - } - return; } @@ -369,7 +365,7 @@ static void atl1c_tx_timeout(struct net_device *netdev) struct atl1c_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ - adapter->work_event |= ATL1C_WORK_EVENT_RESET; + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); schedule_work(&adapter->common_task); } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a5d5d0b5b1558..28ea364994245 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1482,8 +1482,11 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best, static int agg_device_up(const struct aggregator *agg) { - return (netif_running(agg->slave->dev) && - netif_carrier_ok(agg->slave->dev)); + struct port *port = agg->lag_ports; + if (!port) + return 0; + return (netif_running(port->slave->dev) && + netif_carrier_ok(port->slave->dev)); } /** diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 5c6fba802f2b7..11ebd8f353caa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -163,8 +163,6 @@ static int tlb_initialize(struct bonding *bond) struct tlb_client_info *new_hashtbl; int i; - spin_lock_init(&(bond_info->tx_hashtbl_lock)); - new_hashtbl = kzalloc(size, GFP_KERNEL); if (!new_hashtbl) { pr_err("%s: Error: Failed to allocate TLB hash table\n", @@ -764,8 +762,6 @@ static int rlb_initialize(struct bonding *bond) int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); int i; - spin_lock_init(&(bond_info->rx_hashtbl_lock)); - new_hashtbl = kmalloc(size, GFP_KERNEL); if (!new_hashtbl) { pr_err("%s: Error: Failed to allocate RLB hash table\n", diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 163e0b06eaa5d..ac8dce5545a6a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1441,12 +1441,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, slave_dev->name); } - /* bond must be initialized by bond_open() before enslaving */ - if (!(bond_dev->flags & IFF_UP)) { - pr_warning("%s: master_dev is not up in bond_enslave\n", - bond_dev->name); - } - /* already enslaved */ if (slave_dev->flags & IFF_SLAVE) { pr_debug("Error, Device was already enslaved\n"); @@ -5157,9 +5151,19 @@ static int bond_init(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); + struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); pr_debug("Begin bond_init for %s\n", bond_dev->name); + /* + * Initialize locks that may be required during + * en/deslave operations. All of the bond_open work + * (of which this is part) should really be moved to + * a phase prior to dev_open + */ + spin_lock_init(&(bond_info->tx_hashtbl_lock)); + spin_lock_init(&(bond_info->rx_hashtbl_lock)); + bond->wq = create_singlethread_workqueue(bond_dev->name); if (!bond->wq) return -ENOMEM; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 8fd0174c53804..ddc316500fa8a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -224,12 +224,6 @@ static ssize_t bonding_store_slaves(struct device *d, struct net_device *dev; struct bonding *bond = to_bond(d); - /* Quick sanity check -- is the bond interface up? */ - if (!(bond->dev->flags & IFF_UP)) { - pr_warning("%s: doing slave updates when interface is down.\n", - bond->dev->name); - } - if (!rtnl_trylock()) return restart_syscall(); diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 0a8de01d52f7a..a616658868d14 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, REG_ID2) >> 5); } + cf->can_dlc = get_can_dlc(fi & 0x0F); if (fi & FI_RTR) { id |= CAN_RTR_FLAG; } else { - cf->can_dlc = get_can_dlc(fi & 0x0F); for (i = 0; i < cf->can_dlc; i++) cf->data[i] = priv->read_reg(priv, dreg++); } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b423965a78d16..1b49df6b24708 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_chan: sl->tty = NULL; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 3e2e734fecb73..f3bbdcef338cf 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->duplex = -1; } - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full - | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half - | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half - | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg - | ADVERTISED_FIBRE); + if (cmd->speed == SPEED_10000) { + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; + } else { + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg + | SUPPORTED_TP); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + | ADVERTISED_TP); + cmd->port = PORT_TP; + } - cmd->port = PORT_FIBRE; cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index f75d3144b8a50..0bd6d30a1ec7c 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev) netif_start_queue(dev); } - init_waitqueue_head(&port->swqe_avail_wq); - init_waitqueue_head(&port->restart_wq); - mutex_unlock(&port->port_lock); return ret; @@ -3273,6 +3270,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); + init_waitqueue_head(&port->swqe_avail_wq); + init_waitqueue_head(&port->restart_wq); + ret = register_netdev(dev); if (ret) { pr_err("register_netdev failed. ret=%d\n", ret); diff --git a/drivers/net/gan-eth.c b/drivers/net/gan-eth.c new file mode 100644 index 0000000000000..84ad68b007c5c --- /dev/null +++ b/drivers/net/gan-eth.c @@ -0,0 +1,321 @@ +/* + * gan-eth.c: "gannet" compatibility for Kineto GAN + * + * Packets received on UDP PORT_RX: + * strip headers, add ETH header and reintroduce + * Packets sent via interface: + * strip ETH header, send to UDP localhost:PORT_TX + * + * And yes, this is a very hackish way to add/strip IP/UDP headers. + * Blame Kineto. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#define MODULE_NAME "ganeth" + +#define PORT_TX 13001 +#define PORT_RX 13010 + +#define IP_DEST INADDR_LOOPBACK + +struct ganeth_priv { + struct socket *tx, *rx; + struct sockaddr_in tx_saddr, rx_saddr; + + struct workqueue_struct *workqueue; + struct work_struct tx_work; + struct sk_buff_head queue; +}; + +static struct net_device *netdev; + +static int ganeth_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int ganeth_stop(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static int ganeth_sendmsg(struct net_device *dev, void *buf, int len) { + struct ganeth_priv *priv = netdev_priv(dev); + struct msghdr msg = { + .msg_name = &priv->tx_saddr, + .msg_namelen = sizeof(priv->tx_saddr), + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + struct kvec iov = { + .iov_base = buf, + .iov_len = len, + }; + int err; + + err = kernel_sendmsg(priv->tx, &msg, &iov, 1, len); + if (err >= 0) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + } else { + dev->stats.tx_errors++; + } + + return err; +} + +static void ganeth_tx_work(struct work_struct *work) +{ + struct ganeth_priv *priv = netdev_priv(netdev); + struct sk_buff *skb; + + while ((skb = skb_dequeue(&priv->queue))) { + ganeth_sendmsg(netdev, skb->data + ETH_HLEN, skb->len - ETH_HLEN); + dev_kfree_skb(skb); + } +} + +static netdev_tx_t ganeth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ganeth_priv *priv = netdev_priv(dev); + + if (skb->protocol != htons(ETH_P_IP)) { + if (skb->protocol == htons(ETH_P_IPV6)) { + /* Silently drop IPV6 */ + goto out_drop; + } + + pr_warning("%s: dropping packet with protocol %d\n", dev->name, ntohs(skb->protocol)); + goto out_tx_err; + } + + if (skb->len < ETH_HLEN) { + pr_err("%s: short packet\n", dev->name); + goto out_tx_err; + } + + skb_queue_tail(&priv->queue, skb); + queue_work(priv->workqueue, &priv->tx_work); + return NETDEV_TX_OK; + +out_tx_err: + dev->stats.tx_errors++; +out_drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void ganeth_tx_timeout(struct net_device *dev) +{ + netif_wake_queue(dev); +} + +static void ganeth_rx_data_ready(struct sock *sk, int len) +{ + struct sk_buff *skb, *rxskb; + struct ethhdr *eth; + void *buf; + unsigned int ulen; + int err; + + skb = skb_recv_datagram(sk, 0, 1, &err); + + if (!skb) + goto out_err; + + ulen = skb->len - sizeof(struct udphdr); + + if (!skb_csum_unnecessary(skb)) { + if (udp_lib_checksum_complete(skb)) { + pr_err("%s: checksum error\n", netdev->name); + goto out_err; + } + } + + dst_confirm(skb_dst(skb)); + + rxskb = dev_alloc_skb(ulen + ETH_HLEN + NET_IP_ALIGN); + if (!rxskb) { + pr_err("%s: failed to allocate skb\n", netdev->name); + goto out_err; + } + + skb_reserve(rxskb, NET_IP_ALIGN); + + /* Ethernet header */ + eth = (struct ethhdr *)skb_put(rxskb, ETH_HLEN); + memset(eth->h_dest, 0, ETH_ALEN); + memcpy(eth->h_source, netdev->dev_addr, ETH_ALEN); + eth->h_proto = htons(ETH_P_IP); + + /* data */ + buf = skb_put(rxskb, ulen); + memcpy(buf, skb->data + sizeof(struct udphdr), ulen); + + rxskb->dev = netdev; + + skb_reset_mac_header(rxskb); + rxskb->protocol = eth->h_proto; + skb_pull(rxskb, ETH_HLEN); /* Eat ethernet header */ + + rxskb->ip_summed = CHECKSUM_NONE; + rxskb->pkt_type = PACKET_HOST; + + if (netif_rx(rxskb) == NET_RX_DROP) + goto out_err; + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += rxskb->len; + skb_free_datagram(sk, skb); + return; + +out_err: + netdev->stats.rx_errors++; + netdev->stats.rx_dropped++; + skb_free_datagram(sk, skb); + return; + +} + +static const struct net_device_ops ganeth_netdev_ops = { + .ndo_open = ganeth_open, + .ndo_stop = ganeth_stop, + .ndo_start_xmit = ganeth_start_xmit, + .ndo_tx_timeout = ganeth_tx_timeout, +}; + +static void __init ganeth_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->netdev_ops = &ganeth_netdev_ops; + + dev->flags |= IFF_NOARP; + dev->features |= NETIF_F_NETNS_LOCAL; + + dev->mtu = 1320; + + dev->watchdog_timeo = msecs_to_jiffies(2000); + + random_ether_addr(dev->dev_addr); +} + +static int __init ganeth_init(void) +{ + struct net_device *dev; + struct ganeth_priv *priv; + int err; + + netdev = dev = alloc_netdev(sizeof(*priv), "gannet%d", ganeth_setup); + + if (!dev) + return -ENOMEM; + + err = register_netdev(dev); + if (err) { + pr_err("%s: error registering netdev\n", __func__); + goto out_free; + } + + priv = netdev_priv(dev); + + priv->workqueue = create_workqueue("gannet"); + INIT_WORK(&priv->tx_work, ganeth_tx_work); + skb_queue_head_init(&priv->queue); + + /* tx */ + err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &priv->tx); + if (err < 0) { + pr_err("%s: error creating tx socket\n", dev->name); + goto out_unregister; + } + + memset(&priv->tx_saddr, 0, sizeof(priv->tx_saddr)); + priv->tx_saddr.sin_family = AF_INET; + priv->tx_saddr.sin_addr.s_addr = htonl(IP_DEST); + priv->tx_saddr.sin_port = htons(PORT_TX); + + /* rx */ + err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &priv->rx); + if (err < 0) { + pr_err("%s: error creating rx socket\n", dev->name); + goto out_release_tx; + } + priv->rx->sk->sk_data_ready = ganeth_rx_data_ready; + + memset(&priv->rx_saddr, 0, sizeof(priv->rx_saddr)); + priv->rx_saddr.sin_family = AF_INET; + priv->rx_saddr.sin_addr.s_addr = htonl(INADDR_ANY); + priv->rx_saddr.sin_port = htons(PORT_RX); + + err = priv->rx->ops->bind( + priv->rx, + (struct sockaddr *)&priv->rx_saddr, + sizeof(priv->rx_saddr) + ); + if (err < 0) { + pr_err("%s: error binding rx socket\n", dev->name); + goto out_release_rx; + } + + return 0; + +out_release_rx: + sock_release(priv->rx); +out_release_tx: + sock_release(priv->tx); +out_unregister: + unregister_netdev(dev); +out_free: + free_netdev(dev); + return err; +} + +static void __exit ganeth_exit(void) +{ + struct ganeth_priv *priv; + + if (!netdev) + return; + + priv = netdev_priv(netdev); + sock_release(priv->rx); + sock_release(priv->tx); + unregister_netdev(netdev); + destroy_workqueue(priv->workqueue); + free_netdev(netdev); + + netdev = NULL; +} + +module_init(ganeth_init); +module_exit(ganeth_exit); + +MODULE_AUTHOR("Christopher Lais "); +MODULE_DESCRIPTION("Virtual IP over UDP Ethernet Device"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("gannet"); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 5ed8f9f9419f1..3da19a5559773 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -950,6 +950,11 @@ static void gfar_detect_errata(struct gfar_private *priv) (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) priv->errata |= GFAR_ERRATA_A002; + /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ + if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || + (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) + priv->errata |= GFAR_ERRATA_12; + if (priv->errata) dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", priv->errata); @@ -2156,8 +2161,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { fcb = gfar_add_fcb(skb); - lstatus |= BD_LFLAG(TXBD_TOE); - gfar_tx_checksum(skb, fcb); + /* as specified by errata */ + if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) + && ((unsigned long)fcb % 0x20) > 0x18)) { + __skb_pull(skb, GMAC_FCB_LEN); + skb_checksum_help(skb); + } else { + lstatus |= BD_LFLAG(TXBD_TOE); + gfar_tx_checksum(skb, fcb); + } } if (vlan_tx_tag_present(skb)) { diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 54de4135e932b..ec5d595ce2e26 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -1039,6 +1039,7 @@ enum gfar_errata { GFAR_ERRATA_74 = 0x01, GFAR_ERRATA_76 = 0x02, GFAR_ERRATA_A002 = 0x04, + GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ }; /* Struct stolen almost completely (and shamelessly) from the FCC enet source diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index c5ef62ceb8403..1cd481c04202f 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { .ndo_open = hydra_open, .ndo_stop = hydra_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6ed577b065df1..47fc7d1810bc2 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -586,8 +586,8 @@ static int macvlan_port_create(struct net_device *dev) err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); - - dev->priv_flags |= IFF_MACVLAN_PORT; + else + dev->priv_flags |= IFF_MACVLAN_PORT; return err; } diff --git a/drivers/net/msm_rmnet.c b/drivers/net/msm_rmnet.c new file mode 100644 index 0000000000000..8a8173aca709a --- /dev/null +++ b/drivers/net/msm_rmnet.c @@ -0,0 +1,466 @@ +/* linux/drivers/net/msm_rmnet.c + * + * Virtual Ethernet Interface for MSM7K Networking + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif + +#include + +/* XXX should come from smd headers */ +#define SMD_PORT_ETHER0 11 +#define POLL_DELAY 1000000 /* 1 second delay interval */ + +struct rmnet_private +{ + smd_channel_t *ch; + struct net_device_stats stats; + const char *chname; + struct wake_lock wake_lock; +#ifdef CONFIG_MSM_RMNET_DEBUG + ktime_t last_packet; + short active_countdown; /* Number of times left to check */ + short restart_count; /* Number of polls seems so far */ + unsigned long wakeups_xmit; + unsigned long wakeups_rcv; + unsigned long timeout_us; + unsigned long awake_time_ms; + struct delayed_work work; +#endif +}; + +static int count_this_packet(void *_hdr, int len) +{ + struct ethhdr *hdr = _hdr; + + if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP)) + return 0; + + return 1; +} + +#ifdef CONFIG_MSM_RMNET_DEBUG +static int in_suspend; +static unsigned long timeout_us; +static struct workqueue_struct *rmnet_wq; + +static void do_check_active(struct work_struct *work) +{ + struct rmnet_private *p = + container_of(work, struct rmnet_private, work.work); + + /* + * Soft timers do not wake the cpu from suspend. + * If we are in suspend, do_check_active is only called once at the + * timeout time instead of polling at POLL_DELAY interval. Otherwise the + * cpu will sleeps and the timer can fire much much later than POLL_DELAY + * casuing a skew in time calculations. + */ + if (in_suspend) { + /* + * Assume for N packets sent durring this session, they are + * uniformly distributed durring the timeout window. + */ + int tmp = p->timeout_us * 2 - + (p->timeout_us / (p->active_countdown + 1)); + tmp /= 1000; + p->awake_time_ms += tmp; + + p->active_countdown = p->restart_count = 0; + return; + } + + /* + * Poll if not in suspend, since this gives more accurate tracking of + * rmnet sessions. + */ + p->restart_count++; + if (--p->active_countdown == 0) { + p->awake_time_ms += p->restart_count * POLL_DELAY / 1000; + p->restart_count = 0; + } else { + queue_delayed_work(rmnet_wq, &p->work, + usecs_to_jiffies(POLL_DELAY)); + } +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +/* + * If early suspend is enabled then we specify two timeout values, + * screen on (default), and screen is off. + */ +static unsigned long timeout_suspend_us; +static struct device *rmnet0; + +/* Set timeout in us when the screen is off. */ +static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n) +{ + timeout_suspend_us = simple_strtoul(buf, NULL, 10); + return n; +} + +static ssize_t timeout_suspend_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us); +} + +static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store); + +static void rmnet_early_suspend(struct early_suspend *handler) { + if (rmnet0) { + struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); + p->timeout_us = timeout_suspend_us; + } + in_suspend = 1; +} + +static void rmnet_late_resume(struct early_suspend *handler) { + if (rmnet0) { + struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0)); + p->timeout_us = timeout_us; + } + in_suspend = 0; +} + +static struct early_suspend rmnet_power_suspend = { + .suspend = rmnet_early_suspend, + .resume = rmnet_late_resume, +}; + +static int __init rmnet_late_init(void) +{ + register_early_suspend(&rmnet_power_suspend); + return 0; +} + +late_initcall(rmnet_late_init); +#endif + +/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */ +static int rmnet_cause_wakeup(struct rmnet_private *p) { + int ret = 0; + ktime_t now; + if (p->timeout_us == 0) /* Check if disabled */ + return 0; + + /* Start timer on a wakeup packet */ + if (p->active_countdown == 0) { + ret = 1; + now = ktime_get_real(); + p->last_packet = now; + if (in_suspend) + queue_delayed_work(rmnet_wq, &p->work, + usecs_to_jiffies(p->timeout_us)); + else + queue_delayed_work(rmnet_wq, &p->work, + usecs_to_jiffies(POLL_DELAY)); + } + + if (in_suspend) + p->active_countdown++; + else + p->active_countdown = p->timeout_us / POLL_DELAY; + + return ret; +} + +static ssize_t wakeups_xmit_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct rmnet_private *p = netdev_priv(to_net_dev(d)); + return sprintf(buf, "%lu\n", p->wakeups_xmit); +} + +DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL); + +static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct rmnet_private *p = netdev_priv(to_net_dev(d)); + return sprintf(buf, "%lu\n", p->wakeups_rcv); +} + +DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL); + +/* Set timeout in us. */ +static ssize_t timeout_store(struct device *d, struct device_attribute *attr, + const char *buf, size_t n) +{ +#ifndef CONFIG_HAS_EARLYSUSPEND + struct rmnet_private *p = netdev_priv(to_net_dev(d)); + p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10); +#else +/* If using early suspend/resume hooks do not write the value on store. */ + timeout_us = simple_strtoul(buf, NULL, 10); +#endif + return n; +} + +static ssize_t timeout_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct rmnet_private *p = netdev_priv(to_net_dev(d)); + p = netdev_priv(to_net_dev(d)); + return sprintf(buf, "%lu\n", timeout_us); +} + +DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store); + +/* Show total radio awake time in ms */ +static ssize_t awake_time_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct rmnet_private *p = netdev_priv(to_net_dev(d)); + return sprintf(buf, "%lu\n", p->awake_time_ms); +} +DEVICE_ATTR(awake_time_ms, 0444, awake_time_show, NULL); + +#endif + +/* Called in soft-irq context */ +static void smd_net_data_handler(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct rmnet_private *p = netdev_priv(dev); + struct sk_buff *skb; + void *ptr = 0; + int sz; + + for (;;) { + sz = smd_cur_packet_size(p->ch); + if (sz == 0) break; + if (smd_read_avail(p->ch) < sz) break; + + if (sz > 1514) { + pr_err("rmnet_recv() discarding %d len\n", sz); + ptr = 0; + } else { + skb = dev_alloc_skb(sz + NET_IP_ALIGN); + if (skb == NULL) { + pr_err("rmnet_recv() cannot allocate skb\n"); + } else { + skb->dev = dev; + skb_reserve(skb, NET_IP_ALIGN); + ptr = skb_put(skb, sz); + wake_lock_timeout(&p->wake_lock, HZ / 2); + if (smd_read(p->ch, ptr, sz) != sz) { + pr_err("rmnet_recv() smd lied about avail?!"); + ptr = 0; + dev_kfree_skb_irq(skb); + } else { + skb->protocol = eth_type_trans(skb, dev); + if (count_this_packet(ptr, skb->len)) { +#ifdef CONFIG_MSM_RMNET_DEBUG + p->wakeups_rcv += + rmnet_cause_wakeup(p); +#endif + p->stats.rx_packets++; + p->stats.rx_bytes += skb->len; + } + netif_rx(skb); + } + continue; + } + } + if (smd_read(p->ch, ptr, sz) != sz) + pr_err("rmnet_recv() smd lied about avail?!"); + } +} + +static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0); + +static void smd_net_notify(void *_dev, unsigned event) +{ + if (event != SMD_EVENT_DATA) + return; + + smd_net_data_tasklet.data = (unsigned long) _dev; + + tasklet_schedule(&smd_net_data_tasklet); +} + +static int rmnet_open(struct net_device *dev) +{ + int r; + struct rmnet_private *p = netdev_priv(dev); + + pr_info("rmnet_open()\n"); + if (!p->ch) { + r = smd_open(p->chname, &p->ch, dev, smd_net_notify); + + if (r < 0) + return -ENODEV; + } + + netif_start_queue(dev); + return 0; +} + +static int rmnet_stop(struct net_device *dev) +{ + pr_info("rmnet_stop()\n"); + netif_stop_queue(dev); + return 0; +} + +static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rmnet_private *p = netdev_priv(dev); + smd_channel_t *ch = p->ch; + + if (smd_write_atomic(ch, skb->data, skb->len) != skb->len) { + pr_err("rmnet fifo full, dropping packet\n"); + } else { + if (count_this_packet(skb->data, skb->len)) { + p->stats.tx_packets++; + p->stats.tx_bytes += skb->len; +#ifdef CONFIG_MSM_RMNET_DEBUG + p->wakeups_xmit += rmnet_cause_wakeup(p); +#endif + } + } + + dev_kfree_skb_irq(skb); + return 0; +} + +static struct net_device_stats *rmnet_get_stats(struct net_device *dev) +{ + struct rmnet_private *p = netdev_priv(dev); + return &p->stats; +} + +static void rmnet_set_multicast_list(struct net_device *dev) +{ +} + +static void rmnet_tx_timeout(struct net_device *dev) +{ + pr_info("rmnet_tx_timeout()\n"); +} + +static struct net_device_ops rmnet_ops = { + .ndo_open = rmnet_open, + .ndo_stop = rmnet_stop, + .ndo_start_xmit = rmnet_xmit, + .ndo_get_stats = rmnet_get_stats, + .ndo_set_multicast_list = rmnet_set_multicast_list, + .ndo_tx_timeout = rmnet_tx_timeout, +}; + +static void __init rmnet_setup(struct net_device *dev) +{ + dev->netdev_ops = &rmnet_ops; + + dev->watchdog_timeo = 20; /* ??? */ + + ether_setup(dev); + + //dev->change_mtu = 0; /* ??? */ + + random_ether_addr(dev->dev_addr); +} + + +static const char *ch_name[3] = { + "SMD_DATA5", + "SMD_DATA6", + "SMD_DATA7", +}; + +static int __init rmnet_init(void) +{ + int ret; + struct device *d; + struct net_device *dev; + struct rmnet_private *p; + unsigned n; + +#ifdef CONFIG_MSM_RMNET_DEBUG + timeout_us = 0; +#ifdef CONFIG_HAS_EARLYSUSPEND + timeout_suspend_us = 0; +#endif +#endif + +#ifdef CONFIG_MSM_RMNET_DEBUG + rmnet_wq = create_workqueue("rmnet"); +#endif + + for (n = 0; n < 3; n++) { + dev = alloc_netdev(sizeof(struct rmnet_private), + "rmnet%d", rmnet_setup); + + if (!dev) + return -ENOMEM; + + d = &(dev->dev); + p = netdev_priv(dev); + p->chname = ch_name[n]; + wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]); +#ifdef CONFIG_MSM_RMNET_DEBUG + p->timeout_us = timeout_us; + p->awake_time_ms = p->wakeups_xmit = p->wakeups_rcv = 0; + p->active_countdown = p->restart_count = 0; + INIT_DELAYED_WORK_DEFERRABLE(&p->work, do_check_active); +#endif + + ret = register_netdev(dev); + if (ret) { + free_netdev(dev); + return ret; + } + +#ifdef CONFIG_MSM_RMNET_DEBUG + if (device_create_file(d, &dev_attr_timeout)) + continue; + if (device_create_file(d, &dev_attr_wakeups_xmit)) + continue; + if (device_create_file(d, &dev_attr_wakeups_rcv)) + continue; + if (device_create_file(d, &dev_attr_awake_time_ms)) + continue; +#ifdef CONFIG_HAS_EARLYSUSPEND + if (device_create_file(d, &dev_attr_timeout_suspend)) + continue; + + /* Only care about rmnet0 for suspend/resume tiemout hooks. */ + if (n == 0) + rmnet0 = d; +#endif +#endif + } + return 0; +} + +module_init(rmnet_init); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index ea5cfe2c3a040..24386a804ea40 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) dma_free_coherent(&pdev->dev, bytes, ss->fw_stats, ss->fw_stats_bus); ss->fw_stats = NULL; + netif_napi_del(&ss->napi); } } kfree(mgp->ss); diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 30be8c634ebdd..7298a34bc7951 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) #ifndef MODULE struct net_device * __init ne_probe(int unit) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); int err; if (!dev) @@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { .ndo_open = ne_open, .ndo_stop = ne_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -637,7 +637,7 @@ int init_module(void) int err; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); if (!dev) break; if (io[this_dev]) { diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index a11380544e6c5..10f86e0c0d744 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -174,7 +174,7 @@ #define MAX_NUM_CARDS 4 -#define MAX_BUFFERS_PER_CMD 32 +#define NETXEN_MAX_FRAGS_PER_TX 14 #define MAX_TSO_HEADER_DESC 2 #define MGMT_CMD_DESC_RESV 4 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ @@ -558,7 +558,7 @@ struct netxen_recv_crb { */ struct netxen_cmd_buffer { struct sk_buff *skb; - struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; u32 frag_count; }; diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 587498e140bb8..3de98cb002ed5 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -901,7 +901,7 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data) struct netxen_adapter *adapter = netdev_priv(netdev); int hw_lro; - if (data & ~ETH_FLAG_LRO) + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) return -EINVAL; if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 33fac32e0d9fd..28139df4734ca 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1841,6 +1841,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct cmd_desc_type0 *hwdesc, *first_desc; struct pci_dev *pdev; int i, k; + int delta = 0; + struct skb_frag_struct *frag; u32 producer; int frag_count, no_of_desc; @@ -1848,6 +1850,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { + frag = &skb_shinfo(skb)->frags[i]; + delta += frag->size; + } + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index b99e90aca37dc..d95e527cd3209 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 +/* Macros for ML7223 */ +#define PCI_VENDOR_ID_ROHM 0x10db +#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 + #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ PCH_GBE_CHIP_TYPE_INTERNAL | \ - PCH_GBE_RGMII_MODE_RGMII | \ - PCH_GBE_CRS_SEL \ + PCH_GBE_RGMII_MODE_RGMII \ ) /* Ethertype field values */ @@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, /* Write meta date of skb */ skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); - if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == - PCH_GBE_RXD_ACC_STAT_TCPIPOK) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { + if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) skb->ip_summed = CHECKSUM_NONE; - } + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&adapter->napi, skb); (*work_done)++; pr_debug("Receive skb->ip_summed: %d length: %d\n", @@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, /* required last entry */ {0} }; diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 78c0e3c9b2b5f..71b1d8fbc301d 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev) lock_sock(sk); if (po->pppoe_dev == dev && - sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppox_unbind_sock(sk); sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); diff --git a/drivers/net/pppolac.c b/drivers/net/pppolac.c index af3202a920a00..c94b8507d92bb 100644 --- a/drivers/net/pppolac.c +++ b/drivers/net/pppolac.c @@ -15,12 +15,15 @@ */ /* This driver handles L2TP data packets between a UDP socket and a PPP channel. - * To keep things simple, only one session per socket is permitted. Packets are - * sent via the socket, so it must keep connected to the same address. One must - * not set sequencing in ICCN but let LNS controll it. Currently this driver - * only works on IPv4 due to the lack of UDP encapsulation support in IPv6. */ + * The socket must keep connected, and only one session per socket is permitted. + * Sequencing of outgoing packets is controlled by LNS. Incoming packets with + * sequences are reordered within a sliding window of one second. Currently + * reordering only happens when a packet is received. It is done for simplicity + * since no additional locks or threads are required. This driver only works on + * IPv4 due to the lack of UDP encapsulation support in IPv6. */ #include +#include #include #include #include @@ -53,14 +56,28 @@ static inline union unaligned *unaligned(void *ptr) return (union unaligned *)ptr; } +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) { struct sock *sk = (struct sock *)sk_udp->sk_user_data; struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; __u8 bits; __u8 *ptr; - /* Drop the packet if it is too short. */ + /* Drop the packet if L2TP header is missing. */ if (skb->len < sizeof(struct udphdr) + 6) goto drop; @@ -99,9 +116,12 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) if (unaligned(ptr)->u32 != opt->local) goto drop; - /* Check the sequence if it is present. According to RFC 2661 section - * 5.4, the only thing to do is to update opt->sequencing. */ - opt->sequencing = bits & L2TP_SEQUENCE_BIT; + /* Check the sequence if it is present. */ + if (bits & L2TP_SEQUENCE_BIT) { + meta->sequence = ptr[4] << 8 | ptr[5]; + if ((__s16)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } /* Skip PPP address and control if they are present. */ if (skb->len >= 2 && skb->data[0] == PPP_ADDR && @@ -112,7 +132,54 @@ static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) if (skb->len >= 1 && skb->data[0] & 1) skb_push(skb, 1)[0] = 0; - /* Finally, deliver the packet to PPP channel. */ + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT); + if (bits & L2TP_SEQUENCE_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s16 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = (__u16)(meta->sequence + 1); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); skb_orphan(skb); ppp_input(&pppox_sk(sk)->chan, skb); return NET_RX_SUCCESS; @@ -163,14 +230,14 @@ static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb) skb->data[1] = PPP_CTRL; /* Install L2TP header. */ - if (opt->sequencing) { + if (atomic_read(&opt->sequencing)) { skb_push(skb, 10); skb->data[0] = L2TP_SEQUENCE_BIT; - skb->data[6] = opt->sequence >> 8; - skb->data[7] = opt->sequence; + skb->data[6] = opt->xmit_sequence >> 8; + skb->data[7] = opt->xmit_sequence; skb->data[8] = 0; skb->data[9] = 0; - opt->sequence++; + opt->xmit_sequence++; } else { skb_push(skb, 6); skb->data[0] = 0; @@ -246,6 +313,7 @@ static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr, po->chan.mtu = PPP_MTU - 80; po->proto.lac.local = unaligned(&addr->local)->u32; po->proto.lac.remote = unaligned(&addr->remote)->u32; + atomic_set(&po->proto.lac.sequencing, 1); po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv; error = ppp_register_channel(&po->chan); @@ -283,6 +351,7 @@ static int pppolac_release(struct socket *sock) if (sk->sk_state != PPPOX_NONE) { struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private; lock_sock(sk_udp); + skb_queue_purge(&sk->sk_receive_queue); pppox_unbind_sock(sk); udp_sk(sk_udp)->encap_type = 0; udp_sk(sk_udp)->encap_rcv = NULL; diff --git a/drivers/net/pppopns.c b/drivers/net/pppopns.c index 298097127c90f..fb8198447938b 100644 --- a/drivers/net/pppopns.c +++ b/drivers/net/pppopns.c @@ -16,11 +16,14 @@ /* This driver handles PPTP data packets between a RAW socket and a PPP channel. * The socket is created in the kernel space and connected to the same address - * of the control socket. To keep things simple, packets are always sent with - * sequence but without acknowledgement. This driver should work on both IPv4 - * and IPv6. */ + * of the control socket. Outgoing packets are always sent with sequences but + * without acknowledgements. Incoming packets with sequences are reordered + * within a sliding window of one second. Currently reordering only happens when + * a packet is received. It is done for simplicity since no additional locks or + * threads are required. This driver should work on both IPv4 and IPv6. */ #include +#include #include #include #include @@ -52,21 +55,35 @@ struct header { __u32 sequence; } __attribute__((packed)); +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) { struct sock *sk = (struct sock *)sk_raw->sk_user_data; struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; struct header *hdr; /* Skip transport header */ skb_pull(skb, skb_transport_header(skb) - skb->data); - /* Drop the packet if it is too short. */ + /* Drop the packet if GRE header is missing. */ if (skb->len < GRE_HEADER_SIZE) goto drop; + hdr = (struct header *)skb->data; /* Check the header. */ - hdr = (struct header *)skb->data; if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local || (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS) goto drop; @@ -81,6 +98,13 @@ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) if (skb->len != ntohs(hdr->length)) goto drop; + /* Check the sequence if it is present. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + meta->sequence = ntohl(hdr->sequence); + if ((__s32)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } + /* Skip PPP address and control if they are present. */ if (skb->len >= 2 && skb->data[0] == PPP_ADDR && skb->data[1] == PPP_CTRL) @@ -90,7 +114,53 @@ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) if (skb->len >= 1 && skb->data[0] & 1) skb_push(skb, 1)[0] = 0; - /* Finally, deliver the packet to PPP channel. */ + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s32 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = meta->sequence + 1; + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); skb_orphan(skb); ppp_input(&pppox_sk(sk)->chan, skb); return NET_RX_SUCCESS; @@ -151,8 +221,8 @@ static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb) hdr->type = PPTP_GRE_TYPE; hdr->length = htons(length); hdr->call = opt->remote; - hdr->sequence = htonl(opt->sequence); - opt->sequence++; + hdr->sequence = htonl(opt->xmit_sequence); + opt->xmit_sequence++; /* Now send the packet via the delivery queue. */ skb_set_owner_w(skb, sk_raw); @@ -261,6 +331,7 @@ static int pppopns_release(struct socket *sock) if (sk->sk_state != PPPOX_NONE) { struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private; lock_sock(sk_raw); + skb_queue_purge(&sk->sk_receive_queue); pppox_unbind_sock(sk); sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready; sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv; diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 44e316fd67b85..0f136ff44b42f 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -99,6 +99,7 @@ #define TX_UDPV6_PKT 0x0c /* Tx defines */ +#define QLCNIC_MAX_FRAGS_PER_TX 14 #define MAX_TSO_HEADER_DESC 2 #define MGMT_CMD_DESC_RESV 4 #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 4c14510e2a87e..45b2755d6cba3 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -1003,7 +1003,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data) struct qlcnic_adapter *adapter = netdev_priv(netdev); int hw_lro; - if (data & ~ETH_FLAG_LRO) + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) return -EINVAL; if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 37c04b4fade3b..92619d752430c 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) struct cmd_desc_type0 *hwdesc, *first_desc; struct pci_dev *pdev; struct ethhdr *phdr; + int delta = 0; int i, k; u32 producer; @@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) + delta += skb_shinfo(skb)->frags[i].size; + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 39c17cecb8b98..0cdff2baaa359 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -6726,7 +6726,7 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data) int rc = 0; int changed = 0; - if (data & ~ETH_FLAG_LRO) + if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO)) return -EINVAL; if (data & ETH_FLAG_LRO) { diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 86cbb9ea2f269..8ec1a9a0bb9ae 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_bufs: sl_free_bufs(sl); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index ee747919a766e..0de1589ffb5b1 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -285,6 +285,20 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) #define SMC_IRQ_FLAGS (-1) /* from resource */ +#elif defined(CONFIG_ARCH_MSM) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) + +#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH + #elif defined(CONFIG_MN10300) /* diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 109751bad3bb3..e7ce8afa28bda 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const char ifname[] = "usbpn%d"; const struct usb_cdc_union_desc *union_header = NULL; - const struct usb_cdc_header_desc *phonet_header = NULL; const struct usb_host_interface *data_desc; struct usb_interface *data_intf; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *dev; struct usbpn_dev *pnd; u8 *data; + int phonet = 0; int len, err; data = intf->altsetting->extra; @@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) (struct usb_cdc_union_desc *)data; break; case 0xAB: - if (phonet_header || dlen < 5) - break; - phonet_header = - (struct usb_cdc_header_desc *)data; + phonet = 1; break; } } @@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) len -= dlen; } - if (!union_header || !phonet_header) + if (!union_header || !phonet) return -EINVAL; data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9a60e415d76be..1189d726419e0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -458,7 +458,7 @@ static const struct driver_info cdc_info = { .manage_power = cdc_manage_power, }; -static const struct driver_info mbm_info = { +static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_WWAN, .bind = cdc_bind, @@ -469,6 +469,7 @@ static const struct driver_info mbm_info = { /*-------------------------------------------------------------------------*/ +#define HUAWEI_VENDOR_ID 0x12D1 static const struct usb_device_id products [] = { /* @@ -578,8 +579,17 @@ static const struct usb_device_id products [] = { }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long)&mbm_info, + .driver_info = (unsigned long)&wwan_info, +}, { + /* Various Huawei modems with a network port like the UMG1831 */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = HUAWEI_VENDOR_ID, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = 255, + .driver_info = (unsigned long)&wwan_info, }, { }, // END }; diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7d42f9a2c0686..81126ff85e057 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -65,6 +65,7 @@ #define IPHETH_USBINTF_PROTO 1 #define IPHETH_BUF_SIZE 1516 +#define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) #define IPHETH_INTFNUM 2 @@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb) return; } - len = urb->actual_length; - buf = urb->transfer_buffer; + if (urb->actual_length <= IPHETH_IP_ALIGN) { + dev->net->stats.rx_length_errors++; + return; + } + len = urb->actual_length - IPHETH_IP_ALIGN; + buf = urb->transfer_buffer + IPHETH_IP_ALIGN; - skb = dev_alloc_skb(NET_IP_ALIGN + len); + skb = dev_alloc_skb(len); if (!skb) { err("%s: dev_alloc_skb: -ENOMEM", __func__); dev->net->stats.rx_dropped++; return; } - skb_reserve(skb, NET_IP_ALIGN); - memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); + memcpy(skb_put(skb, len), buf, len); skb->dev = dev->net; skb->protocol = eth_type_trans(skb, dev->net); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index cc14b4a75048c..cab2846f8311c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -178,6 +178,7 @@ static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; u32 events = le32_to_cpu(adapter->shared->ecr); if (!events) return; @@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -2733,13 +2734,14 @@ static void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) { u32 cfg; + unsigned long flags; /* intr settings */ - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 81254be85b926..976467253d200 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -304,13 +304,16 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; unsigned long flags; - if (data & ~ETH_FLAG_LRO) - return -EOPNOTSUPP; + if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO)) + return -EINVAL; if (lro_requested ^ lro_present) { /* toggle the LRO feature*/ netdev->features ^= NETIF_F_LRO; + /* Update private LRO flag */ + adapter->lro = lro_requested; + /* update harware LRO capability accordingly */ if (lro_requested) adapter->shared->devRead.misc.uptFeatures |= diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c index 1dd3a21b3a436..c5eb034107fdd 100644 --- a/drivers/net/vxge/vxge-ethtool.c +++ b/drivers/net/vxge/vxge-ethtool.c @@ -1117,8 +1117,8 @@ static int vxge_set_flags(struct net_device *dev, u32 data) struct vxgedev *vdev = netdev_priv(dev); enum vxge_hw_status status; - if (data & ~ETH_FLAG_RXHASH) - return -EOPNOTSUPP; + if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH)) + return -EINVAL; if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en) return 0; diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig index 565018ec1e3ba..58383bf2c9ce5 100644 --- a/drivers/net/wimax/Kconfig +++ b/drivers/net/wimax/Kconfig @@ -11,6 +11,7 @@ if WIMAX menu "WiMAX Wireless Broadband devices" source "drivers/net/wimax/i2400m/Kconfig" +source "drivers/net/wimax/SQN/Kconfig" endmenu diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile index 992bc02bc016f..65c573279170c 100644 --- a/drivers/net/wimax/Makefile +++ b/drivers/net/wimax/Makefile @@ -1,5 +1,8 @@ obj-$(CONFIG_WIMAX_I2400M) += i2400m/ +obj-$(CONFIG_WIMAX_SQN) += SQN/ +obj-$(CONFIG_WIMAX_SQN) += wimaxdbg/ +obj-$(CONFIG_WIMAX_SQN) += wimaxuart/ # (from Sam Ravnborg) force kbuild to create built-in.o obj- := dummy.o diff --git a/drivers/net/wimax/SQN/Kconfig b/drivers/net/wimax/SQN/Kconfig new file mode 100644 index 0000000000000..0e2870ce635c8 --- /dev/null +++ b/drivers/net/wimax/SQN/Kconfig @@ -0,0 +1,12 @@ +config WIMAX_SQN + tristate "WiMAX SQN1210 cards support" + depends on WIMAX && MMC + ---help--- + This module adds support for wimax adapters based on + SQN1120 chipset. + + This driver uses the kernel's wimax subsystem. + + If you choose to build a module, it'll be called sequans_sdio. Say M if + unsure. + diff --git a/drivers/net/wimax/SQN/Makefile b/drivers/net/wimax/SQN/Makefile new file mode 100644 index 0000000000000..88b82cd23454b --- /dev/null +++ b/drivers/net/wimax/SQN/Makefile @@ -0,0 +1,101 @@ +# +# This is part of the Sequans SQN1130 driver. +# Copyright 2008 SEQUANS Communications +# Written by Andy Shevchenko , +# Dmitriy Chumak +# + +ifeq ($(KERNELRELEASE),) + +# The following two variables are used to build SDIO +# driver for foreign architecture, for example ARM, +# change them to meet your build environment +SQN_KERNEL_HEADERS_DIR := /usr/src/linux-headers-$(shell uname -r) +SQN_CROSS_COMPILER := arm-eabi- + + +help: + @echo -e "Available targets are: \n\n\ + \thelp -- print this help message\n\n\ + \tsdio_arm_release -- build release SDIO driver for ARM architecture\n\ + \tsdio_arm_debug -- build debug SDIO driver for ARM architecture\n\ + \tsdio -- build SDIO driver for x86 architecture\n\ + \tusb -- build USB driver\n\n\ + \tusb_debian_pkg -- make debian package for USB driver\n\ + \tusb_ubuntu_pkg -- make ubuntu package for USB driver\n\n\ + \tclean -- clean object files and binaries" + +sdio_arm_release: + $(MAKE) -C $(SQN_KERNEL_HEADERS_DIR) \ + ARCH=arm \ + CROSS_COMPILE=$(SQN_CROSS_COMPILER) \ + M=$(shell pwd) \ + CONFIG_USB_SQN='' \ + CONFIG_SDIO_SQN=m \ + SQN_TARGET=CONFIG_SDIO_SQN \ + +sdio_arm_debug: + $(MAKE) -C $(SQN_KERNEL_HEADERS_DIR) \ + ARCH=arm \ + CROSS_COMPILE=$(SQN_CROSS_COMPILER) \ + M=$(shell pwd) \ + CONFIG_USB_SQN='' \ + CONFIG_SDIO_SQN=m \ + SQN_TARGET=CONFIG_SDIO_SQN \ + EXTRA_CFLAGS='-DCONFIG_SDIO_SQN -DDEBUG -Wno-unused-function \ + -DSQN_DEBUG_TRACE -DSQN_DEBUG_DUMP -DSQN_DEBUG_LEVEL_INFO' + +sdio: + $(MAKE) -C $(SQN_KERNEL_HEADERS_DIR) \ + M=$(shell pwd) \ + CONFIG_USB_SQN='' \ + CONFIG_SDIO_SQN=m \ + SQN_TARGET=CONFIG_SDIO_SQN \ + +usb: + $(MAKE) -C $(SQN_KERNEL_HEADERS_DIR) \ + M=$(shell pwd) \ + CONFIG_USB_SQN=m \ + CONFIG_SDIO_SQN='' \ + SQN_TARGET=CONFIG_USB_SQN \ + +usb_debian_pkg: + @./build_deb.sh debian + +usb_ubuntu_pkg: + @./build_deb.sh ubuntu + +clean: + rm -vrf *.o *.ko .*.cmd *.mod.c .*.d modules.order Module.* .tmp_versions *~ + +else # KERNELRELEASE + +SQN_TARGET := SQN_SDIO +SQN_SDIO := m +SQN_USB := + +obj-$(SQN_SDIO) += sequans_sdio.o +obj-$(SQN_USB) += sequans_usb.o + +sequans_sdio-objs := sdio.o sdio-driver.o sdio-fw.o sdio-pm.o thp.o sdio_netlink.o msg.o + +sequans_usb-objs := usb-driver.o thp.o + + +### Compile debug version of the module +#EXTRA_CFLAGS += -D$(SQN_TARGET) -DDEBUG -Wno-unused-function \ + -DSQN_DEBUG_TRACE -DSQN_DEBUG_DUMP -DSQN_DEBUG_LEVEL_INFO + +#EXTRA_CFLAGS += -D$(SQN_TARGET) -DDEBUG -Wno-unused-function \ + -DSQN_DEBUG_TRACE -DSQN_DEBUG_LEVEL_INFO + +#EXTRA_CFLAGS += -D$(SQN_TARGET) -DDEBUG -Wno-unused-function + + +### Compile release version of the module +EXTRA_CFLAGS += -D$(SQN_TARGET) -Wno-unused-function -Wno-unused-label \ + -Wno-unused-variable + +EXTRA_CFLAGS += -DANDROID_KERNEL + +endif diff --git a/drivers/net/wimax/SQN/msg.c b/drivers/net/wimax/SQN/msg.c new file mode 100644 index 0000000000000..5ac66fb67c5a5 --- /dev/null +++ b/drivers/net/wimax/SQN/msg.c @@ -0,0 +1,944 @@ +#include "msg.h" + +#define LARGESTRING 1024 + +#define SPERW (7 * 24 * 3600) +#define SPERD (24 * 3600) +#define SPERH (3600) +#define SPERM (60) + +#define SQN_PRT_MODULE_NAME "wimax_prt" + +void printTime32(u_char *data); + +void sqn_pr_info_dump(char *prefix, unsigned char *data, unsigned int len) { + unsigned int i = 0, pos = 0, temp = 0; + unsigned int width = 16; + unsigned int len_ = (unsigned int)(len); + + char buf[LARGESTRING]; + + int opCode = 0; + int bHandle = 0; + + // sequans_xxx: RX PDU: 0000 ff ff ff ff ff ff 00 1e 90 21 0b d4 08 00 45 00 + // while (i < len_) { // Andrew 0903 + if (i < len_) { + if (i % width == 0) + { + if (len_ >= 40 && !bHandle) { // ARP [ + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x06) ) { + + bHandle = 1; + + // Opcode + opCode = 0; + if ( (((unsigned char *)(data))[i+20] == 0x00) && + (((unsigned char *)(data))[i+21] == 0x01) ) { + printk(KERN_INFO "%s: [ARP request] - ", SQN_PRT_MODULE_NAME); + opCode = 1; + } + else if ( (((unsigned char *)(data))[i+20] == 0x00) && + (((unsigned char *)(data))[i+21] == 0x02) ) { + printk(KERN_INFO "%s: [ARP reply] - ", SQN_PRT_MODULE_NAME); + opCode = 2; + } + + if (opCode == 1) { // request + printk("Who has "); + for (pos=38; pos<42; pos++) { + if (pos<41) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d Tell ", ((unsigned char *)(data))[pos]); + } + for (pos=28; pos<32; pos++) { + if (pos<31) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + } + else if (opCode == 2) { // reply + for (pos=28; pos<32; pos++) { + if (pos<31) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d is at ", ((unsigned char *)(data))[pos]); + } + + for (pos=22; pos<28; pos++) { + if (pos<27) + printk("%02x:", ((unsigned char *)(data))[pos]); + else + printk("%02x, ", ((unsigned char *)(data))[pos]); + } + } + + // Destination MAC + printk("Dst MAC: "); + for (pos=0; pos<6; pos++) { + if (pos<5) + printk("%02x:", ((unsigned char *)(data))[pos]); + else + printk("%02x, ", ((unsigned char *)(data))[pos]); + } + + // Source MAC + printk("Src MAC: "); + for (pos=6; pos<12; pos++) { + if (pos<11) + printk("%02x:", ((unsigned char *)(data))[pos]); + else + printk("%02x\n", ((unsigned char *)(data))[pos]); + } + + } + } // ARP ] + + if (len_ >= 34 && !bHandle) { // ICMP [ + // IP: 0x0800 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x01) + ) { + bHandle = 1; + if ( (((unsigned char *)(data))[i+34] == 0x00) ) { + printk(KERN_INFO "%s: [ICMP] Echo Reply, ", SQN_PRT_MODULE_NAME); + } + else if ( (((unsigned char *)(data))[i+34] == 0x03) ) { + printk(KERN_INFO "%s: [ICMP] Destination Unreachable, ", SQN_PRT_MODULE_NAME); + } + else if ( (((unsigned char *)(data))[i+34] == 0x05) ) { + printk(KERN_INFO "%s: [ICMP] Redirect, ", SQN_PRT_MODULE_NAME); + } + else if ( (((unsigned char *)(data))[i+34] == 0x08) ) { + printk(KERN_INFO "%s: [ICMP] Echo Request, ", SQN_PRT_MODULE_NAME); + } + else if ( (((unsigned char *)(data))[i+34] == 0x09) ) { + printk(KERN_INFO "%s: [ICMP] Router Adventisement, ", SQN_PRT_MODULE_NAME); + } + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d\n", ((unsigned char *)(data))[pos]); + } + } + } // ICMP ] + + if (len_ >= 300 && !bHandle) { // DHCP [ + // IP: 0x0800, UDP: 0x11, port: 0x0044, 0x0043 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + + ( + (((unsigned char *)(data))[i+34] == 0x00) && + ((((unsigned char *)(data))[i+35] == 0x44) || (((unsigned char *)(data))[i+35] == 0x43)) && + (((unsigned char *)(data))[i+36] == 0x00) && + ((((unsigned char *)(data))[i+37] == 0x43) || (((unsigned char *)(data))[i+37] == 0x44)) + ) + ) { + + bHandle = 1; + pos = 282; + + while (pos < len_ && data[pos] != 255) { // while option [ + + switch ( ((unsigned char *)(data))[pos] ) { // Option case [ + + case 0: // pad + break; + + case 1: // Subnetmask + printk(KERN_INFO "%s: Subnet Mask: ", SQN_PRT_MODULE_NAME); + for (temp=0; temp<4; temp++) { + if (temp<3) + printk("%d.", ((unsigned char *)(data))[pos+2+temp]); + else + printk("%d\n", ((unsigned char *)(data))[pos+2+temp]); + } + break; + + case 51: // IP address leasetime + case 58: // T1 + case 59: // T2 + if (((unsigned char *)(data))[pos] == 51) { + printk(KERN_INFO "%s: IP Address Lease Time: ", SQN_PRT_MODULE_NAME); + printTime32(data + pos + 2); + } + + else if (((unsigned char *)(data))[pos] == 58) { + printk(KERN_INFO "%s: Renew Time Value: ", SQN_PRT_MODULE_NAME); + printTime32(data + pos + 2); + } + else if (((unsigned char *)(data))[pos] == 59) { + // printk(KERN_INFO "%s: Option: (59) Rebinding Time Value", SQN_PRT_MODULE_NAME); + // printTime32(data + pos + 2); + } + // printk(KERN_INFO "%s: Length:%d\n", SQN_PRT_MODULE_NAME, (((unsigned char *)(data))[pos+1]) ); + + break; + + case 54: // Server identifier + /* + printk(KERN_INFO "%s: Server Identifier\n", SQN_PRT_MODULE_NAME); + // printk(KERN_INFO "%s: Length:%d\n", SQN_PRT_MODULE_NAME, (((unsigned char *)(data))[pos+1]) ); + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=0; temp<4; temp++) { + if (temp<3) + printk("%d.", ((unsigned char *)(data))[pos+2+temp]); + else + printk("%d\n", ((unsigned char *)(data))[pos+2+temp]); + } + */ + break; + + case 53: // DHCP message type + if ((((unsigned char *)(data))[pos+2]) == 1) { + printk(KERN_INFO "%s: [DHCP Discover]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 2) { + printk(KERN_INFO "%s: [DHCP Offer]\n", SQN_PRT_MODULE_NAME); + + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Your IP + printk(KERN_INFO "%s: Your IP: ", SQN_PRT_MODULE_NAME); + for (temp=58; temp<62; temp++) { + if (temp<61) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Server IP + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=62; temp<66; temp++) { + if (temp<65) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 3) { + printk(KERN_INFO "%s: [DHCP Request]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 4) { + printk(KERN_INFO "%s: [DHCP Decline]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client IP + printk(KERN_INFO "%s: Client IP: ", SQN_PRT_MODULE_NAME); + for (temp=54; temp<58; temp++) { + if (temp<57) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Your IP + printk(KERN_INFO "%s: Your IP: ", SQN_PRT_MODULE_NAME); + for (temp=58; temp<62; temp++) { + if (temp<61) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Server IP + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=62; temp<66; temp++) { + if (temp<65) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 5) { + printk(KERN_INFO "%s: [DHCP Ack]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client IP + printk(KERN_INFO "%s: Client IP: ", SQN_PRT_MODULE_NAME); + for (temp=54; temp<58; temp++) { + if (temp<57) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Your IP + printk(KERN_INFO "%s: Your IP: ", SQN_PRT_MODULE_NAME); + for (temp=58; temp<62; temp++) { + if (temp<61) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Server IP + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=62; temp<66; temp++) { + if (temp<65) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 6) { + printk(KERN_INFO "%s: [DHCP Nack]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client IP + printk(KERN_INFO "%s: Client IP: ", SQN_PRT_MODULE_NAME); + for (temp=54; temp<58; temp++) { + if (temp<57) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Your IP + printk(KERN_INFO "%s: Your IP: ", SQN_PRT_MODULE_NAME); + for (temp=58; temp<62; temp++) { + if (temp<61) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Server IP + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=62; temp<66; temp++) { + if (temp<65) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else if ((((unsigned char *)(data))[pos+2]) == 7) { + printk(KERN_INFO "%s: [DHCP Release]\n", SQN_PRT_MODULE_NAME); + // Source IP + printk(KERN_INFO "%s: Src IP: ", SQN_PRT_MODULE_NAME); + for (temp=26; temp<30; temp++) { + if (temp<29) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Destination IP + printk(KERN_INFO "%s: Dst IP: ", SQN_PRT_MODULE_NAME); + for (temp=30; temp<34; temp++) { + if (temp<33) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client IP + printk(KERN_INFO "%s: Client IP: ", SQN_PRT_MODULE_NAME); + for (temp=54; temp<58; temp++) { + if (temp<57) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Your IP + printk(KERN_INFO "%s: Your IP: ", SQN_PRT_MODULE_NAME); + for (temp=58; temp<62; temp++) { + if (temp<61) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Server IP + printk(KERN_INFO "%s: Server IP: ", SQN_PRT_MODULE_NAME); + for (temp=62; temp<66; temp++) { + if (temp<65) + printk("%d.", ((unsigned char *)(data))[temp]); + else + printk("%d\n", ((unsigned char *)(data))[temp]); + } + + // Client MAC + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=70; temp<76; temp++) { + if (temp<75) + printk("%02x:", ((unsigned char *)(data))[temp]); + else + printk("%02x\n", ((unsigned char *)(data))[temp]); + } + } + else { + printk(KERN_INFO "%s: Type: Unknown\n", SQN_PRT_MODULE_NAME); + } + break; + + case 61: // Client identifier + printk(KERN_INFO "%s: Client identifier\n", SQN_PRT_MODULE_NAME); + printk(KERN_INFO "%s: Client MAC: ", SQN_PRT_MODULE_NAME); + for (temp=0; temp<6; temp++) { + if (temp<5) + printk("%02x:", ((unsigned char *)(data))[pos+3+temp]); + else + printk("%02x\n", ((unsigned char *)(data))[pos+3+temp]); + } + break; + + default: + break; + + } // Option case ] + + // This might go wrong if a mallformed packet is received. + // Maybe from a bogus server which is instructed to reply + // with invalid data and thus causing an exploit. + // My head hurts... but I think it's solved by the checking + // for pos= 34 && !bHandle) { // HTTP [ + // IP: 0x0800, TCP: 0x06, port: 0x0050 (80) + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x06) && + ( + ((((unsigned char *)(data))[i+34] == 0x00) && (((unsigned char *)(data))[i+35] == 0x50)) || + ((((unsigned char *)(data))[i+36] == 0x00) && (((unsigned char *)(data))[i+37] == 0x50)) + ) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [HTTP] request, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d\n", ((unsigned char *)(data))[pos]); + } + } + } // HTTP ] + + if (len_ >= 34 && !bHandle) { // DNS [ + // IP: 0x0800, UDP: 0x11, port: 0x0035 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + ( + ((((unsigned char *)(data))[i+34] == 0x00) && (((unsigned char *)(data))[i+35] == 0x35)) || + ((((unsigned char *)(data))[i+36] == 0x00) && (((unsigned char *)(data))[i+37] == 0x35)) + ) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [DNS] query, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d\n", ((unsigned char *)(data))[pos]); + } + } + } // DNS ] + + else if (len_ >= 34 && !bHandle) { // NTP [ + // IP: 0x0800, UDP: 0x11, port: 0x007b + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + (((unsigned char *)(data))[i+34] == 0x00) && + (((unsigned char *)(data))[i+35] == 0x7b) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [NTP] Sync active, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d\n", ((unsigned char *)(data))[pos]); + } + } + } // NTP ] + + if (len_ >= 12 && !bHandle) { // IPv6 [ + // IPv6: 0x86DD, UDP: 0x11 + if ( (((unsigned char *)(data))[i+12] == 0x86) && + (((unsigned char *)(data))[i+13] == 0xDD) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [IPv6] Network packets, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Dst MAC: "); + for (pos=0; pos<6; pos++) { + if (pos<5) + printk("%d:", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Src MAC: "); + for (pos=6; pos<12; pos++) { + if (pos<11) + printk("%d:", ((unsigned char *)(data))[pos]); + else + printk("%d\n", ((unsigned char *)(data))[pos]); + } + } + } // IPv6 ] + + if (len_ >= 34 && !bHandle) { // Unknown UDP [ + // IP: 0x0800, UDP: 0x11 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [UDP] Network packets, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + printk("Port: %d%d\n", ((unsigned char *)(data))[i+34], ((unsigned char *)(data))[i+35]); + } + } // Unknown ] + + if (len_ >= 34 && !bHandle) { // Unknown TCP [ + // IP: 0x0800, TCP: 0x06 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x06) + ) { + bHandle = 1; + printk(KERN_INFO "%s: [TCP] Network packets, ", SQN_PRT_MODULE_NAME); + + // Source IP + printk("Src IP: "); + for (pos=26; pos<30; pos++) { + if (pos<29) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + // Destination IP + printk("Dst IP: "); + for (pos=30; pos<34; pos++) { + if (pos<33) + printk("%d.", ((unsigned char *)(data))[pos]); + else + printk("%d, ", ((unsigned char *)(data))[pos]); + } + + printk("Port: %d%d\n", ((unsigned char *)(data))[i+34], ((unsigned char *)(data))[i+35]); + } + } // Unknown ] + + // Andrew 0903 + // printk(KERN_INFO "%s: %s: %04x ", SQN_PRT_MODULE_NAME, (prefix), i); + } // if (i % width == 0) + + // Andrew 0903 + // printk("%02x ", ((unsigned char *)(data))[i++]); + if ((i % width == 0) || (i == len_)) + printk("\n"); + } +} + + +#define MAX_DUMP_LEN 48 + +void sqn_pr_info_dump_rawdata(char *prefix, unsigned char *data, unsigned int len) { + unsigned int i = 0; + unsigned int width = 16; + unsigned int len_ = (unsigned int)(len); + + if (len_ > MAX_DUMP_LEN) { + len_ = MAX_DUMP_LEN; + } + + // sequans_xxx: RX PDU: 0000 ff ff ff ff ff ff 00 1e 90 21 0b d4 08 00 45 00 + while (i < len_) { + if (i % width == 0) + printk(KERN_INFO "%s: %s: %04x ", SQN_PRT_MODULE_NAME, (prefix), i); + + printk("%02x ", ((unsigned char *)(data))[i++]); + + if ((i % width == 0) || (i == len_)) + printk("\n"); + } +} + +int sqn_filter_packet_check(char *prefix, unsigned char *data, unsigned int len) { + unsigned int i = 0, pos = 0, temp = 0; + unsigned int width = 16; + unsigned int len_ = (unsigned int)(len); + + char buf[LARGESTRING]; + + int bHandle = 0, bFilter = 0; + + if (i < len_) { + // Unblocked list: + if (len_ >= 40 && !bHandle) { // ARP [ + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x06) ) { + bHandle = 1; + bFilter = 0; + } + } // ARP ] + + if (len_ >= 34 && !bHandle) { // ICMP [ + // IP: 0x0800 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x01) + ) { + bHandle = 1; + bFilter = 0; + } + } // ICMP ] + + if (len_ >= 300 && !bHandle) { // DHCP [ + // IP: 0x0800, UDP: 0x11, port: 0x0044, 0x0043 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + + ( + (((unsigned char *)(data))[i+34] == 0x00) && + ((((unsigned char *)(data))[i+35] == 0x44) || (((unsigned char *)(data))[i+35] == 0x43)) && + (((unsigned char *)(data))[i+36] == 0x00) && + ((((unsigned char *)(data))[i+37] == 0x43) || (((unsigned char *)(data))[i+37] == 0x44)) + ) + ) { + + bHandle = 1; + bFilter = 0; + } + } // DHCP ] + + if (len_ >= 34 && !bHandle) { // HTTP [ + // IP: 0x0800, TCP: 0x06, port: 0x0050 (80) + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x06) && + ( + ((((unsigned char *)(data))[i+34] == 0x00) && (((unsigned char *)(data))[i+35] == 0x50)) || + ((((unsigned char *)(data))[i+36] == 0x00) && (((unsigned char *)(data))[i+37] == 0x50)) + ) + ) { + sqn_pr_info("Drop HTTP packets len:%d\n", len_); + bHandle = 1; + bFilter = 1; + } + } // HTTP ] + + if (len_ >= 34 && !bHandle) { // DNS [ + // IP: 0x0800, UDP: 0x11, port: 0x0035 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + ( + ((((unsigned char *)(data))[i+34] == 0x00) && (((unsigned char *)(data))[i+35] == 0x35)) || + ((((unsigned char *)(data))[i+36] == 0x00) && (((unsigned char *)(data))[i+37] == 0x35)) + ) + ) { + bHandle = 1; + bFilter = 0; + } + } // DNS ] + + else if (len_ >= 34 && !bHandle) { // NTP [ + // IP: 0x0800, UDP: 0x11, port: 0x007b + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) && + (((unsigned char *)(data))[i+34] == 0x00) && + (((unsigned char *)(data))[i+35] == 0x7b) + ) { + sqn_pr_info("Drop NTP packets len:%d\n", len_); + bHandle = 1; + bFilter = 1; + } + } // NTP ] + + // Block list: + if (len_ >= 12 && !bHandle) { // IPv6 [ + // IPv6: 0x86DD, UDP: 0x11 + if ( (((unsigned char *)(data))[i+12] == 0x86) && + (((unsigned char *)(data))[i+13] == 0xDD) + ) { + sqn_pr_info("Drop IPv6 packets len:%d\n", len_); + bHandle = 1; + bFilter = 1; + } + } // IPv6 ] + + if (len_ >= 34 && !bHandle) { // Unknown UDP [ + // IP: 0x0800, UDP: 0x11 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x11) + ) { + sqn_pr_info("Drop UDP packets len:%d\n", len_); + bHandle = 1; + bFilter = 1; + } + } // Unknown UDP ] + + if (len_ >= 34 && !bHandle) { // Unknown TCP [ + // IP: 0x0800, TCP: 0x06 + if ( (((unsigned char *)(data))[i+12] == 0x08) && + (((unsigned char *)(data))[i+13] == 0x00) && + (((unsigned char *)(data))[i+23] == 0x06) + ) { + sqn_pr_info("Drop TCP packets len:%d\n", len_); + bHandle = 1; + bFilter = 1; + } + } // Unknown TCP ] + } + + return bFilter; +} + +// print the data as a 32bits time-value +void printTime32(u_char *data) { + int t = (data[0] << 24) + (data[1] << 16) + (data[2] <<8 ) + data[3]; + printk("%d (", t); + if (t > SPERW) { printk("%dw", t / (SPERW)); t %= SPERW; } + if (t > SPERD) { printk("%dd", t / (SPERD)); t %= SPERD; } + if (t > SPERH) { printk("%dh", t / (SPERH)); t %= SPERH; } + if (t > SPERM) { printk("%dm", t / (SPERM)); t %= SPERM; } + if (t > 0) printk("%ds", t); + printk(")"); +} diff --git a/drivers/net/wimax/SQN/msg.h b/drivers/net/wimax/SQN/msg.h new file mode 100644 index 0000000000000..10c3d0946bf46 --- /dev/null +++ b/drivers/net/wimax/SQN/msg.h @@ -0,0 +1,124 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko , + * Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_MSG_H +#define _SQN_MSG_H + +#include +#include +#include + +#include "version.h" + +#define sqn_pr(level, fmt, arg...) \ +do { \ + char kthread_name[TASK_COMM_LEN] = { 0 }; \ + /* task_lock(current); */ \ + strncpy(kthread_name, current->comm \ + , sizeof(current->comm)); \ + /* task_unlock(current); */ \ + printk(level "%s: %s: %s: " fmt \ + , SQN_MODULE_NAME, kthread_name \ + , __func__ \ + , ##arg); \ +} while (0) + + +#define SQN_DEBUG_DUMP 1 + +#ifdef SQN_DEBUG_DUMP +#define DEBUG_LEVEL KERN_INFO +#define sqn_pr_dbg_dump(prefix, data, len) \ +do { \ + unsigned int i = 0; \ + unsigned int width = 16; \ + unsigned int len_ = (unsigned int)(len); \ + while (i < len_) { \ + if (i % width == 0) \ + printk(DEBUG_LEVEL "%s: %s: %04x ", \ + SQN_MODULE_NAME, (prefix), i); \ + printk("%02x ", ((unsigned char *)(data))[i++]);\ + if ((i % width == 0) || (i == len_)) \ + printk("\n"); \ + } \ +} while (0) +#else /* !SQN_DEBUG_DUMP */ + +#define sqn_pr_dbg_dump(prefix, data, len) do {} while (0) +#endif /* SQN_DEBUG_DUMP */ + + +#if defined(DEBUG) + +#ifdef SQN_DEBUG_LEVEL_INFO +#define DEBUG_LEVEL KERN_INFO +#else +#define DEBUG_LEVEL KERN_DEBUG +#endif + +#define sqn_pr_dbg(fmt, arg...) sqn_pr(DEBUG_LEVEL, fmt, ##arg) + +#ifdef SQN_DEBUG_TRACE + +#define sqn_pr_enter() sqn_pr_dbg("%s\n", "enter") +#define sqn_pr_leave() sqn_pr_dbg("%s\n", "leave") + +#else /* !SQN_DEBUG_TRACE */ + +#define sqn_pr_enter() do {} while (0) +#define sqn_pr_leave() do {} while (0) + +#endif /* SQN_DEBUG_TRACE */ + +#else /* !DEBUG */ + +#define sqn_pr_dbg(fmt, arg...) do {} while (0) + +#define sqn_pr_enter() do {} while (0) +#define sqn_pr_leave() do {} while (0) + +#endif /* DEBUG */ + + +#define sqn_pr_info(fmt, arg...) \ + pr_info("%s: " fmt, SQN_MODULE_NAME, ##arg) + +#define sqn_pr_warn(fmt, arg...) \ + pr_warning("%s: " fmt, SQN_MODULE_NAME, ##arg) + +#define sqn_pr_err(fmt, arg...) \ + pr_err("%s: " fmt, SQN_MODULE_NAME, ##arg) + + +void sqn_pr_info_dump(char *prefix, unsigned char *data, unsigned int len); +void sqn_pr_info_dump_rawdata(char *prefix, unsigned char *data, unsigned int len); +int sqn_filter_packet_check(char *prefix, unsigned char *data, unsigned int len); + +/* +#define sqn_pr_info_dump(prefix, data, len) \ +do { \ + unsigned int i = 0; \ + unsigned int width = 16; \ + unsigned int len_ = (unsigned int)(len); \ + sqn_pr_info_trace(prefix, data, len); \ + while (i < len_) { \ + if (i % width == 0) \ + printk(KERN_INFO "%s: %s: %04x ", \ + SQN_MODULE_NAME, (prefix), i); \ + printk("%02x ", ((unsigned char *)(data))[i++]);\ + if ((i % width == 0) || (i == len_)) \ + printk("\n"); \ + } \ +} while (0) +*/ + +#endif /* _SQN_MSG_H */ diff --git a/drivers/net/wimax/SQN/sdio-driver.c b/drivers/net/wimax/SQN/sdio-driver.c new file mode 100644 index 0000000000000..158eada700bff --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-driver.c @@ -0,0 +1,489 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko , + * Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "sdio-netdev.h" +#include "version.h" +#include "msg.h" +#include "thp.h" +#include "sdio.h" +#include "sdio-pm.h" +#include "sdio-fw.h" +#include "sdio-driver.h" + +#define DRIVER_DEBUG 0 +#define SKB_DEBUG 0 +#define IGNORE_CARRIER_STATE 1 +#define SDIO_CLAIM_HOST_DEBUG 0 + +/*******************************************************************/ +/* Module parameter variables */ +/*******************************************************************/ + +/** firmware_name - specifies the name of firmware binary */ +char *firmware_name = SQN_DEFAULT_FW_NAME; + +/** + * load_firmware - boolean flag, controls whether firmware + * should be loaded or not + */ +int load_firmware = 1; + +bool drop_packet = false; + +module_param(firmware_name, charp, S_IRUGO); +module_param(load_firmware, bool, S_IRUGO); + +struct sqn_private *g_priv = 0; + +//reference sdio-driver.c +extern const uint8_t ss_macaddr[ETH_ALEN]; + +/*******************************************************************/ +/* Network interface functions */ +/*******************************************************************/ + +static int sqn_dev_open(struct net_device *dev) +{ + struct sqn_private *priv = netdev_priv(dev); + + sqn_pr_enter(); + + spin_lock(&priv->drv_lock); + netif_wake_queue(dev); + spin_unlock(&priv->drv_lock); + + sqn_pr_leave(); + return 0; +} + + +static int sqn_dev_stop(struct net_device *dev) +{ + struct sqn_private *priv = netdev_priv(dev); + + sqn_pr_enter(); + + spin_lock(&priv->drv_lock); + netif_stop_queue(dev); + spin_unlock(&priv->drv_lock); + + sqn_pr_leave(); + return 0; +} + + +/*******************************************************************/ +/* TX queue handlers */ +/*******************************************************************/ + +int sqn_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long irq_flags = 0; + struct ethhdr *eth; + struct sqn_private *priv = netdev_priv(dev); + +#if DRIVER_DEBUG + printk(KERN_WARNING "sqn_hard_start_xmit \n"); +#endif + + sqn_pr_enter(); + + sqn_pr_dbg("skb->len = %d\n", skb->len); + +#if SKB_DEBUG + sqn_pr_info("%s: got skb [0x%p] from kernel, users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + + //HTC code: for DDTM + if(drop_packet){ + eth = (struct ethhdr*) skb->data; + if(memcmp(eth->h_dest, ss_macaddr, ETH_ALEN) != 0){ + sqn_pr_dbg("HTC drop_packet enabled: not THP, drop it\n"); +#if DRIVER_DEBUG + printk(KERN_WARNING "sqn_hard_start_xmit: network packet\n"); +#endif + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + dev_kfree_skb_any(skb); + goto out; + }else{ + sqn_pr_dbg("HTC drop_packet enabled: THP, let it live\n"); +#if DRIVER_DEBUG + printk(KERN_WARNING "sqn_hard_start_xmit: thp packet\n"); +#endif + } + } + + if (priv->removed) { + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_LOCKED; + } + + if (skb->len < 1 || (skb->len > SQN_MAX_PDU_LEN)) { + sqn_pr_dbg("skb length %d not in range (1, %d)\n", skb->len, + SQN_MAX_PDU_LEN); + /* + * We'll never manage to send this one; + * drop it and return 'OK' + */ + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + goto out; + } + + /* netif_stop_queue(priv->dev); */ + + priv->add_skb_to_tx_queue(priv, skb, 1); + + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + dev->trans_start = jiffies; + + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + wake_up_interruptible(&priv->tx_waitq); +out: + sqn_pr_leave(); + return NETDEV_TX_OK; +} + + +static void sqn_tx_timeout(struct net_device *dev) +{ + /* struct sqn_private *priv = netdev_priv(dev); */ + + sqn_pr_enter(); + + sqn_pr_err("TX watch dog timeout\n"); + + sqn_pr_leave(); +} + + +static int sqn_tx_thread(void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct sqn_private *priv = netdev_priv(dev); + int rv = 0; + unsigned long irq_flags = 0; + + sqn_pr_enter(); + + /* + * Set PF_NOFREEZE to prevent kernel to freeze this thread + * when going to suspend. We will manually stop it from + * driver's suspend handler. + */ + current->flags |= PF_NOFREEZE; + + for (;;) { + spin_lock_irqsave(&priv->drv_lock, irq_flags); + + if (!(priv->is_tx_queue_empty(priv)) || priv->removed) + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + else { + int rv = 0; + sqn_pr_dbg("wait for skb\n"); + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + + rv = wait_event_interruptible(priv->tx_waitq + , !(priv->is_tx_queue_empty(priv)) + || kthread_should_stop() + || priv->removed); + + /* + * If we've been interrupted by a signal, then we + * should stop a thread + */ + if (0 != rv) { + sqn_pr_dbg("got a signal from kernel %d\n", rv); + break; + } + } + + sqn_pr_dbg("got skb to send, wake up\n"); + + if (kthread_should_stop()) { + sqn_pr_dbg("break from main thread\n"); + break; + } + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + if (priv->removed) { + sqn_pr_dbg("adapter removed; wait to die...\n"); + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + ssleep(1); + continue; + } + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + + rv= priv->hw_host_to_card(priv); + if (rv) + sqn_pr_dbg("failed to send PDU: %d\n", rv); + } + + sqn_pr_leave(); + return 0; +} + + +int sqn_start_tx_thread(struct sqn_private *priv) +{ + int rv = 0; + + sqn_pr_enter(); + + priv->tx_thread = kthread_run(sqn_tx_thread, priv->dev, "sqn_tx"); + + if (IS_ERR(priv->tx_thread)) { + sqn_pr_dbg("error creating TX thread.\n"); + rv = 1; + goto out; + } + + sqn_pr_leave(); +out: + return rv; +} + + +int sqn_stop_tx_thread(struct sqn_private *priv) +{ + int rv = 0; + + sqn_pr_enter(); + + kthread_stop(priv->tx_thread); + wake_up_interruptible(&priv->tx_waitq); + + sqn_pr_leave(); + + return rv; +} + + +/*******************************************************************/ +/* RX queue handlers */ +/*******************************************************************/ + +int sqn_rx_process(struct net_device *dev, struct sk_buff *skb) +{ + int rc = 0; + struct sqn_private *priv = netdev_priv(dev); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s+\n", __func__); */ +#endif + +#if DRIVER_DEBUG + printk(KERN_WARNING "sqn_rx_process \n"); +#endif + + sqn_pr_enter(); + + dev->last_rx = jiffies; + skb->protocol = eth_type_trans(skb, dev); + skb->dev = dev; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len; +#if SKB_DEBUG + sqn_pr_info("%s: push skb [0x%p] to kernel, users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + netif_rx(skb); + /* netif_receive_skb(skb); */ + + sqn_pr_leave(); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s-\n", __func__); */ +#endif + + return rc; +} + + +/*******************************************************************/ +/* Interface statistics */ +/*******************************************************************/ + +static struct net_device_stats *sqn_get_stats(struct net_device *dev) +{ + struct sqn_private *priv = netdev_priv(dev); + + sqn_pr_enter(); + sqn_pr_leave(); + + return &priv->stats; +} + + +/*******************************************************************/ +/* Adding and removing procedures */ +/*******************************************************************/ + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +static const struct net_device_ops sqn_netdev_ops = { + .ndo_open = sqn_dev_open + , .ndo_stop = sqn_dev_stop + , .ndo_start_xmit = sqn_hard_start_xmit + , .ndo_validate_addr = eth_validate_addr + , .ndo_tx_timeout = sqn_tx_timeout + , .ndo_get_stats = sqn_get_stats +}; +#endif + + +struct sqn_private *sqn_add_card(void *card, struct device *realdev) +{ + struct sqn_private *priv = 0; + u8 dummy_wimax_mac_addr[ETH_ALEN] = { 0x00, 0x16, 0x08, 0x00, 0x06, 0x53 }; + + /* Allocate an Ethernet device and register it */ + struct net_device *dev = alloc_netdev(sizeof(struct sqn_private), "wimax%d", ether_setup); + + sqn_pr_enter(); + + if (!dev) { + sqn_pr_err("init wimaxX device failed\n"); + goto done; + } + + priv = netdev_priv(dev); + g_priv = priv; + memset(priv, 0, sizeof(struct sqn_private)); + + /* + * Use dummy WiMAX mac address for development version (boot from + * flash) of WiMAX SDIO cards. Production cards use mac address from + * firmware which is loaded by driver. Random ethernet address can't be + * used if IPv4 convergence layer is enabled on WiMAX base station. + */ + memcpy(priv->mac_addr, dummy_wimax_mac_addr, ETH_ALEN); + + spin_lock_init(&priv->drv_lock); + + /* Fill the private stucture */ + priv->dev = dev; + priv->card = card; + + /* Setup the OS Interface to our functions */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) + dev->open = sqn_dev_open; + dev->stop = sqn_dev_stop; + dev->hard_start_xmit = sqn_hard_start_xmit; + dev->tx_timeout = sqn_tx_timeout; + dev->get_stats = sqn_get_stats; +#else + dev->netdev_ops = &sqn_netdev_ops; +#endif + + /* TODO: Make multicast possible */ + dev->flags &= ~IFF_MULTICAST; + + //wimax interface mtu must be 1400 (in spec) + dev->mtu = 1400; + SET_NETDEV_DEV(dev, realdev); + +done: + sqn_pr_leave(); + return priv; +} + + +int sqn_remove_card(struct sqn_private *priv) +{ + struct net_device *dev = priv->dev; + unsigned long irq_flags = 0; + + sqn_pr_enter(); + + dev = priv->dev; + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + priv->removed = 1; + priv->dev = NULL; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + + /* kthread_stop(priv->tx_thread); */ + /* wake_up_interruptible(&priv->tx_waitq); */ + free_netdev(dev); + + sqn_pr_leave(); + return 0; +} + + +int sqn_start_card(struct sqn_private *priv) +{ + struct net_device *dev = priv->dev; + + sqn_pr_enter(); + + if (register_netdev(dev)) { + sqn_pr_err("cannot register ethX device\n"); + return -1; + } + + sqn_pr_dbg("starting TX thread...\n"); + /* TODO: move waitq initializatio to add_card() */ + init_waitqueue_head(&priv->tx_waitq); + init_waitqueue_head(&priv->rx_waitq); + if (sqn_start_tx_thread(priv)) + goto err_init_adapter; + + sqn_pr_info("%s: Sequans WiMAX adapter\n", dev->name); + +#if IGNORE_CARRIER_STATE + netif_carrier_on(priv->dev); +#else + /* In release version this should be uncommented */ + /* netif_carrier_off(priv->dev); */ +#endif + +done: + sqn_pr_leave(); + return 0; + +err_init_adapter: + /* TODO: Free allocated resources */ + sqn_pr_err("error while init adapter\n"); + free_netdev(dev); + priv = NULL; + + goto done; +} + + +int sqn_stop_card(struct sqn_private *priv) +{ + struct net_device *dev = priv->dev; + + sqn_pr_enter(); + + unregister_netdev(dev); + + sqn_pr_leave(); + return 0; +} diff --git a/drivers/net/wimax/SQN/sdio-driver.h b/drivers/net/wimax/SQN/sdio-driver.h new file mode 100644 index 0000000000000..8c7162741141a --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-driver.h @@ -0,0 +1,31 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_MAIN_H +#define _SQN_MAIN_H + + +extern char *firmware_name; +extern int load_firmware; + + +struct sqn_private *sqn_add_card(void *card, struct device *realdev); +int sqn_remove_card(struct sqn_private *priv); + +int sqn_start_card(struct sqn_private *priv); +int sqn_stop_card(struct sqn_private *priv); + +int sqn_start_tx_thread(struct sqn_private *priv); +int sqn_stop_tx_thread(struct sqn_private *priv); + +int sqn_rx_process(struct net_device *dev, struct sk_buff *skb); + +#endif /* _SQN_MAIN_H */ diff --git a/drivers/net/wimax/SQN/sdio-fw.c b/drivers/net/wimax/SQN/sdio-fw.c new file mode 100644 index 0000000000000..a0fc256846106 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-fw.c @@ -0,0 +1,712 @@ +/* * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + + +#include +#include +#include +#include + +#include "sdio.h" +#include "msg.h" +#include "sdio-sqn.h" +#include "sdio-netdev.h" +#include "version.h" +#include "sdio-fw.h" + +char *fw1130_name = "sqn1130.bin"; +char *fw1210_name = "sqn1210.bin"; + + +/* Tag, Lenght, Value struct */ +struct sqn_tlv { + u32 tag; +#define SWM_INFO_TAG_SQN_ROOT 0x80000000 /* SEQUANS root tag */ +#define SWM_INFO_TAG_SQN_MEMCPY 0x80000005 /* SEQUANS memcpy tag */ +#define SWM_INFO_TAG_SQN_MEMSET 0x80000006 /* SEQUANS memset tag */ +#define SWM_INFO_TAG_SQN_BOOTROM_GROUP 0x80040000 +#define SWM_INFO_TAG_SQN_ID_GROUP 0x80010000 /* SEQUANS identification group tag */ +#define SWM_INFO_TAG_SQN_MAC_ADDRESS 0x80010010 /* SEQUANS mac address tag */ + u32 length; + u8 value[0]; +}; + + +/* body of SWM_INFO_TAG_SQN_MEMCPY tag */ +struct sqn_tag_memcpy { + u32 address; + u32 access_size; + u32 data_size; + u8 data[0]; +}; + + +/* body of SWM_INFO_TAG_SQN_MEMSET tag */ +struct sqn_tag_memset { + u32 address; + u32 access_size; + u32 size; + u8 pattern; +}; + + +#define SQN_1130_SDRAM_BASE 0x00000000 +#define SQN_1130_SDRAM_END 0x03FFFFFF +#define SQN_1130_SDRAMCTL_BASE 0x4B400000 +#define SQN_1130_SDRAMCTL_END 0x4B4003FF + +#define SQN_1210_SDRAM_BASE 0x00000000 +#define SQN_1210_SDRAM_END 0x07FFFFFF +#define SQN_1210_SDRAMCTL_BASE 0x20002000 +#define SQN_1210_SDRAMCTL_END 0x2000207F + +static int is_good_ahb_address(u32 address, enum sqn_card_version card_version) +{ + u32 sdram_base = 0; + u32 sdram_end = 0; + u32 sdram_ctl_base = 0; + u32 sdram_ctl_end = 0; + int status = 0; + + sqn_pr_enter(); + + if (address % 4) + return 0; + + if (SQN_1130 == card_version) { + sqn_pr_dbg("using 1130 AHB address boundaries\n"); + sdram_base = SQN_1130_SDRAM_BASE; + sdram_end = SQN_1130_SDRAM_END; + sdram_ctl_base = SQN_1130_SDRAMCTL_BASE; + sdram_ctl_end = SQN_1130_SDRAMCTL_END; + } else if (SQN_1210 == card_version) { + sqn_pr_dbg("using 1210 AHB address boundaries\n"); + sdram_base = SQN_1210_SDRAM_BASE; + sdram_end = SQN_1210_SDRAM_END; + sdram_ctl_base = SQN_1210_SDRAMCTL_BASE; + sdram_ctl_end = SQN_1210_SDRAMCTL_END; + } else { + sqn_pr_warn("Can't check AHB address because of unknown" + " card version\n"); + status = 0; + goto out; + } + + status = ((sdram_base <= address && address < sdram_end) + || (sdram_ctl_base <= address && address < sdram_ctl_end)); +out: + sqn_pr_leave(); + return status; +} + +// Fix big buffer allocation problem during Firmware loading +/** + * sqn_alloc_big_buffer - tries to alloc a big buffer with kmalloc + * @buf: pointer to buffer + * @size: required buffer size + * @gfp_flags: GFP_* flags + * + * Tries to allocate a buffer of requested size with kmalloc. If it fails, + * then decrease buffer size in two times (adjusting the new size to be a + * multiple of 4) and try again. Use 6 retries in case of failures, after + * this give up and try to alloc 4KB buffer if requested size bigger than + * 4KB, otherwise allocate nothing and return 0. + * + * @return a real size of allocated buffer or 0 if allocation failed + * + * Normal: 3912*4kB 4833*8kB 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 54312kB + */ + +static size_t sqn_alloc_big_buffer(u8 **buf, size_t size, gfp_t gfp_flags) +{ + size_t real_size = size; + // int retries = 6; + // int retries = 3; + + sqn_pr_enter(); + + /* Try to allocate buffer of requested size, if it failes try to + * allocate a twice smaller buffer. Repeat this number of + * times. */ + /* + do + { + *buf = kmalloc(real_size, gfp_flags); + printk("%s: kmalloc %d in %x trial:%d\n", __func__, real_size, *buf, retries); + + if (!(*buf)) { + printk("%s: kmalloc %d failed, trial:%d\n", __func__, real_size, retries); + // real_size /= 2; + real_size /= 4; + // adjust the size to be a multiple of 4 + real_size += real_size % 4 ? 4 - real_size % 4 : 0; + } + } while (retries-- > 0 && !(*buf)); + */ + + // If all retries failed, then allocate 4KB buffer + if (!(*buf)) { + real_size = 8 * 1024; + if (size >= real_size) { + *buf = kmalloc(real_size, gfp_flags); + // printk("%s: kmalloc %d in %x\n", __func__, real_size, *buf); + + // If it also failed, then just return 0, indicating + // that we failed to alloc buffer + if (!(*buf)) + real_size = 0; + } else { + // We should _not_ return buffer bigger than requested + // real_size = 0; + + // printk("%s: We should _not_ return buffer bigger than requested size:%d real_size:%d\n", __func__, size, real_size); + *buf = kmalloc(size, gfp_flags); + real_size = size; + } + } + + sqn_pr_leave(); + + return real_size; +} + +#define SQN_SDIO_ADA_ADDR 0x00002060 +#define SQN_SDIO_ADA_RDWR 0x00002064 + + +static int write_data(struct sdio_func *func, u32 addr, void *data + , u32 size, u32 access_size) +{ + int rv = 0, retry = 0; + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + + sqn_pr_enter(); + sdio_claim_host(func); + + if (is_good_ahb_address(addr, sqn_card->version) + && 0 == (size % 4) && 4 == access_size) + { + /* write data using AHB */ + u8 *buf = 0; + size_t buf_size = 0; + u32 written_size = 0; + +#ifdef DEBUG + u8 *read_data = 0; +#endif + + sqn_pr_dbg("write data using AHB\n"); + sdio_writel(func, addr, SQN_SDIO_ADA_ADDR, &rv); + if (rv) { + sqn_pr_dbg("can't set SQN_SDIO_ADA_ADDR register\n"); + goto out; + } + sqn_pr_dbg("after SQN_SDIO_ADA_ADDR\n"); + + written_size = 0; + buf_size = sqn_alloc_big_buffer(&buf, size, GFP_KERNEL | GFP_DMA); + if (!buf) { + sqn_pr_err("failed to allocate buffer of %u bytes\n", size); + goto out; + } + + do { + memcpy(buf, data + written_size, buf_size); + rv = sdio_writesb(func, SQN_SDIO_ADA_RDWR, buf, buf_size); + if (rv) { + sqn_pr_dbg("can't write to SQN_SDIO_ADA_RDWR register\n"); + goto out; + } + written_size += buf_size; + if (written_size + buf_size > size) + buf_size = size - written_size; + } while (written_size < size); + kfree(buf); + + /* + * Workaround when sdio_writesb doesn't work because DMA + * alignment + */ + /* + int i = 0; + for (; i < size/4; ++i) { + sdio_writel(func, *((u32*)data + i), SQN_SDIO_ADA_RDWR, &rv); + if (rv) { + sqn_pr_dbg("can't write to SQN_SDIO_ADA_RDWR register\n"); + goto out; + } + } + */ + + sqn_pr_dbg("after SQN_SDIO_ADA_RDWR\n"); + + /* ******** only for debugging ******** */ + /* validate written data */ +/* #ifdef DEBUG */ +#if 0 + sqn_pr_dbg("reading data using AHB\n"); + sdio_writel(func, addr, SQN_SDIO_ADA_ADDR, &rv); + if (rv) { + sqn_pr_dbg("can't set SQN_SDIO_ADA_ADDR register\n"); + goto out; + } + sqn_pr_dbg("after SQN_SDIO_ADA_ADDR\n"); + + read_data = kmalloc(size, GFP_KERNEL); + rv = sdio_readsb(func, read_data, SQN_SDIO_ADA_RDWR, size); + if (rv) { + sqn_pr_dbg("can't read from SQN_SDIO_ADA_RDWR register\n"); + kfree(read_data); + goto out; + } + + if (memcmp(data, read_data, size)) + sqn_pr_dbg("WARNING: written data are __not__ equal\n"); + else + sqn_pr_dbg("OK: written data are equal\n"); + + kfree(read_data); +#endif /* DEBUG */ + /* ******** only for debugging ******** */ + + } else if (4 == access_size && size >= 4) { + /* write data using CMD53 */ + sqn_pr_dbg("write data using CMD53\n"); + rv = sdio_memcpy_toio(func, addr, data , size); + } else { + /* write data using CMD52 */ + /* not implemented yet, so we use CMD53 */ + /* rv = sdio_memcpy_toio(func, addr, data , size); */ + int i = 0; + sqn_pr_dbg("write data using CMD52\n"); + for (i = 0; i < size; ++i) { + sdio_writeb(func, *((u8*)data + i), addr + i, &rv); + if (rv) { + sqn_pr_dbg("can't write 1 byte to %xh addr using CMD52\n" + , addr + i); + goto out; + } + } + } + +out: + sdio_release_host(func); + sqn_pr_leave(); + return rv; +} + + +static int sqn_handle_memcpy_tag(struct sdio_func *func + , struct sqn_tag_memcpy * mcpy_tag) +{ + int rv = 0; + + sqn_pr_enter(); + + /* + * Convert values accordingly to platform "endianes" + * (big or little endian) because bootstrapper file + * data is big endian + */ + mcpy_tag->address = be32_to_cpu(mcpy_tag->address); + mcpy_tag->access_size = be32_to_cpu(mcpy_tag->access_size); + mcpy_tag->data_size = be32_to_cpu(mcpy_tag->data_size); + + /* sqn_pr_dbg("----------------------------------------\n"); */ + sqn_pr_dbg("address: 0x%02X access_size: %u data_size: %u\n" + , mcpy_tag->address, mcpy_tag->access_size + , mcpy_tag->data_size); + /* sqn_pr_dbg_dump("|", mcpy_tag->data, mcpy_tag->data_size); */ + + rv = write_data(func, mcpy_tag->address, mcpy_tag->data + , mcpy_tag->data_size, mcpy_tag->access_size); + + sqn_pr_leave(); + return rv; +} + + +static int sqn_handle_memset_tag(struct sdio_func *func + , struct sqn_tag_memset * mset_tag) +{ + int rv = 0; + u8 *buf = 0; + const u32 buf_size = 1024; + u32 left_bytes = 0; + + sqn_pr_enter(); + + /* + * Convert values accordingly to platform "endianes" + * (big or little endian) because bootstrapper file + * data is big endian + */ + mset_tag->address = be32_to_cpu(mset_tag->address); + mset_tag->access_size = be32_to_cpu(mset_tag->access_size); + mset_tag->size = be32_to_cpu(mset_tag->size); + + /* sqn_pr_dbg("----------------------------------------\n"); */ + sqn_pr_dbg("address: 0x%02X access_size: %u size: %u pattern 0x%02X\n" + , mset_tag->address, mset_tag->access_size + , mset_tag->size, mset_tag->pattern); + + buf = kmalloc(buf_size, GFP_KERNEL); + if (0 == buf) + return -ENOMEM; + + memset(buf, mset_tag->pattern, buf_size); + + left_bytes = mset_tag->size; + + while (left_bytes) { + u32 bytes_to_write = min(buf_size, left_bytes); + rv = write_data(func, mset_tag->address, buf, bytes_to_write, + mset_tag->access_size); + if (rv) + goto out; + left_bytes -= bytes_to_write; + } + +out: + kfree(buf); + sqn_pr_leave(); + return rv; +} + + +static int char_to_int(u8 c) +{ + int rv = 0; + + if ('0' <= c && c <= '9') { + rv = c - '0'; + } else if ('a' <= c && c <= 'f') { + rv = c - 'a' + 0xA; + } else if ('A' <= c && c <= 'F') { + rv = c - 'A' + 0xA; + } else { + rv = -1; + } + + return rv; +} + + +static int get_mac_addr_from_str(u8 *data, u32 length, u8 *result) +{ + int rv = 0; + int i = 0; + + sqn_pr_enter(); + + if (0 == length) { + rv = -1; + goto out; + } + + /* + * Check if we have delimiters on appropriate places: + * + * X X : X X : X X : X X : X X : X X + * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + */ + + if ( !( ( ':' == data[2] || '-' == data[2]) + && ( ':' == data[5] || '-' == data[5]) + && ( ':' == data[8] || '-' == data[8]) + && ( ':' == data[11] || '-' == data[11]) + && ( ':' == data[14] || '-' == data[14]) )) + { + sqn_pr_err("can't get mac address from firmware" + " - incorrect mac address\n"); + rv = -1; + goto out; + } + + i = 0; + while (i < length) { + int high = 0; + int low = 0; + + if ((high = char_to_int(data[i])) >= 0 + && (low = char_to_int(data[i + 1])) >= 0) + { + result[i/3] = low; + result[i/3] |= high << 4; + } else { + sqn_pr_err("can't get mac address from firmware" + " - incorrect mac address\n"); + rv = -1; + goto out; + } + + i += 3; + } + +out: + if (length > 0) { + data[length - 1] = 0; + sqn_pr_dbg("mac addr string: %s\n", data); + } + sqn_pr_leave(); + return rv; +} + + +static int sqn_handle_mac_addr_tag(struct sdio_func *func, u8 *data, u32 length) +{ + int rv = 0; + struct sqn_private *priv = + ((struct sqn_sdio_card *)sdio_get_drvdata(func))->priv; + + sqn_pr_enter(); + + /* + * This tag could contain one or two mac addresses in string + * form, delimited by some symbol (space or something else). + * Each mac address written as a string has constant length. + * Thus we can determine the number of mac addresses by the + * length of the tag: + * + * mac addr length in string form: XX:XX:XX:XX:XX:XX = 17 bytes + * tag length: 17 bytes [ + 1 byte + 17 bytes ] + */ + +#define MAC_ADDR_STRING_LEN 17 + + /* + * If we have only one mac addr we should increment it by one + * and use it. + * If we have two mac addresses we should use a second one. + */ + + if (MAC_ADDR_STRING_LEN <= length + && length < 2 * MAC_ADDR_STRING_LEN + 1) + { + sqn_pr_dbg("single mac address\n"); + /* we have only one mac addr */ + get_mac_addr_from_str(data, length, priv->mac_addr); + + // Andrew 0720 + // ++(priv->mac_addr[ETH_ALEN - 1]) + // real MAC: 38:E6:D8:86:00:00 + // hboot will store: 38:E6:D8:85:FF:FF (minus 1) + // sdio need to recovery it by plusing 1: 38:E6:D8:86:00:00 (plus 1) + + if ((++(priv->mac_addr[ETH_ALEN - 1])) == 0x00) + if ((++(priv->mac_addr[ETH_ALEN - 2])) == 0x00) + if ((++(priv->mac_addr[ETH_ALEN - 3])) == 0x00) + if ((++(priv->mac_addr[ETH_ALEN - 4])) == 0x00) + if ((++(priv->mac_addr[ETH_ALEN - 5])) == 0x00) + ++(priv->mac_addr[ETH_ALEN - 6]); + + } + else if (2 * MAC_ADDR_STRING_LEN + 1 == length) { /* we have two macs */ + sqn_pr_dbg("two mac addresses, using second\n"); + get_mac_addr_from_str(data + MAC_ADDR_STRING_LEN + 1 + , length - (MAC_ADDR_STRING_LEN + 1), priv->mac_addr); + } + else { /* incorrect data length */ + sqn_pr_err("can't get mac address from bootloader" + " - incorrect mac address length\n"); + rv = -1; + goto out; + } + + sqn_pr_info("setting MAC address from bootloader: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", priv->mac_addr[0] + , priv->mac_addr[1], priv->mac_addr[2], priv->mac_addr[3] + , priv->mac_addr[4], priv->mac_addr[5]); + +out: + sqn_pr_leave(); + return rv; +} + + +/** sqn_load_bootstrapper - reads a binary boostrapper file, analize it + * and loads data to the card. + * + * Bootstrapper is consists of Tag, Length, Value (TLV) sections. + * Each section starts with 4 bytes tag. Then goes length of data (4 bytes) + * and then the data itself. + * + * All fields of bootstrapper file is in BIG ENDIAN format. + */ +static int sqn_load_bootstrapper(struct sdio_func *func, u8 *data, int size) +{ + struct sqn_tlv *tlv = (struct sqn_tlv*) data; + int rv = 0; + + sqn_pr_enter(); + + while (size > 0) { + /* + * Convert values accordingly to platform "endianes" + * (big or little endian) because bootstrapper file + * data is big endian + */ + tlv->tag = be32_to_cpu(tlv->tag); + tlv->length = be32_to_cpu(tlv->length); + + switch (tlv->tag) { + case SWM_INFO_TAG_SQN_ROOT: + case SWM_INFO_TAG_SQN_BOOTROM_GROUP: + case SWM_INFO_TAG_SQN_ID_GROUP: + /* + * This tag is a "container" tag - it's value field + * contains other tags + */ + + /* sqn_pr_dbg("========================================\n"); */ + sqn_pr_dbg("tag: CONTAINER %x length: %u\n", tlv->tag + , tlv->length); + /* sqn_pr_dbg_dump("|", tlv->value, tlv->length); */ + + /* + * If this is a buggy tag, adjust length to + * the rest of data + */ + if (0 == tlv->length) + tlv->length = size - sizeof(*tlv); + + rv = sqn_load_bootstrapper(func, (u8*) tlv->value + , tlv->length); + if (rv) + goto out; + break; + + case SWM_INFO_TAG_SQN_MEMCPY: + /* sqn_pr_dbg("========================================\n"); */ + sqn_pr_dbg("tag: SWM_INFO_TAG_SQN_MEMCPY length: %u\n" + , tlv->length); + /* sqn_pr_dbg_dump("|", tlv->value, tlv->length); */ + rv = sqn_handle_memcpy_tag(func + , (struct sqn_tag_memcpy*) tlv->value); + if (rv) + goto out; + break; + + case SWM_INFO_TAG_SQN_MEMSET: + /* sqn_pr_dbg("========================================\n"); */ + sqn_pr_dbg("tag: SWM_INFO_TAG_SQN_MEMSET length: %u\n" + , tlv->length); + /* sqn_pr_dbg_dump("|", tlv->value, tlv->length); */ + rv = sqn_handle_memset_tag(func + , (struct sqn_tag_memset*) tlv->value); + if (rv) + goto out; + break; + + case SWM_INFO_TAG_SQN_MAC_ADDRESS: + /* sqn_pr_dbg("========================================\n"); */ + sqn_pr_dbg("tag: SWM_INFO_TAG_SQN_MAC_ADDRESS length: %u\n" + , tlv->length); + /* sqn_pr_dbg_dump("|", tlv->value, tlv->length); */ + + rv = sqn_handle_mac_addr_tag(func, tlv->value + , tlv->length); + if (rv) + goto out; + break; + + default: + /* skip all other tags */ + /* sqn_pr_dbg("========================================\n"); */ + sqn_pr_dbg("tag: UNKNOWN %x length: %u\n" + , tlv->tag, tlv->length); + /* sqn_pr_dbg_dump("|", tlv->value, tlv->length); */ + break; + } + + /* increment tlv to point it to the beginning of the next + * sqn_tlv struct and decrement size accordingly + */ + size = (int)(size - (sizeof(*tlv) + tlv->length)); + tlv = (struct sqn_tlv*) ((u8*)tlv + sizeof(*tlv) + tlv->length); + } + + if (0 != size) { + /* something wrong with parsing of tlv values */ + rv = -1; + goto out; + } + +out: + sqn_pr_leave(); + return rv; +} + + +extern char *firmware_name; + +/** sqn_load_firmware - loads firmware to card + * @func: SDIO function, used to transfer data via SDIO interface, + * also used to obtain pointer to device structure. + * + * But now the only work it does - is loading of bootstrapper to card, + * because firmware is supposed to be loaded by a userspace program. + */ +int sqn_load_firmware(struct sdio_func *func) +{ + int rv = 0; + const struct firmware *fw = 0; +//Create a local firmware_name with path to replace original global firmware_name -- Tony Wu. + const char *firmware_name = "../../../data/wimax/Boot.bin"; + + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + + sqn_pr_enter(); + + sqn_pr_info("trying to find bootloader image: \"%s\"\n", firmware_name); + if ((rv = request_firmware(&fw, firmware_name, &func->dev))) + goto out; + + if (SQN_1130 == sqn_card->version) { + sdio_claim_host(func); + + /* properly setup registers for firmware loading */ + sqn_pr_dbg("setting up SQN_H_SDRAM_NO_EMR register\n"); + sdio_writeb(func, 0, SQN_H_SDRAM_NO_EMR, &rv); + if (rv) { + sdio_release_host(func); + goto out; + } + + sqn_pr_dbg("setting up SQN_H_SDRAMCTL_RSTN register\n"); + sdio_writeb(func, 1, SQN_H_SDRAMCTL_RSTN, &rv); + sdio_release_host(func); + if (rv) + goto out; + } + + sqn_pr_info("loading bootloader to the card...\n"); + if ((rv = sqn_load_bootstrapper(func, (u8*) fw->data, fw->size))) + goto out; + + /* boot the card */ + sqn_pr_info("bootting the card...\n"); + sdio_claim_host(func); // by daniel + sdio_writeb(func, 1, SQN_H_CRSTN, &rv); + sdio_release_host(func); // by daniel + if (rv) + goto out; + sqn_pr_info(" done\n"); + +out: + // To avoid kzalloc leakage in /drivers/base/firmware_class.c + if (fw) { + release_firmware(fw); + fw = NULL; + } + + sqn_pr_leave(); + return rv; +} diff --git a/drivers/net/wimax/SQN/sdio-fw.h b/drivers/net/wimax/SQN/sdio-fw.h new file mode 100644 index 0000000000000..3875091a23bc7 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-fw.h @@ -0,0 +1,24 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + + +#ifndef _SQN_FIRMWARE_H +#define _SQN_FIRMWARE_H + + +#define SQN_DEFAULT_FW_NAME "sequans_boot.bin" +extern char *fw1130_name; +extern char *fw1210_name; + + +int sqn_load_firmware(struct sdio_func *func); + +#endif /* _SQN_FIRMWARE_H */ diff --git a/drivers/net/wimax/SQN/sdio-netdev.h b/drivers/net/wimax/SQN/sdio-netdev.h new file mode 100644 index 0000000000000..273e8801d3222 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-netdev.h @@ -0,0 +1,50 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko , + * Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_NETDEV_H +#define _SQN_NETDEV_H + +#include +#include +#include +#include +#include + + +/* TODO: Move to sqn_sdio.c */ +#define SQN_MAX_PDU_LEN 2048 /* Max PDU length */ + + +extern struct ethtool_ops sqn_ethtool_ops; + +struct sqn_private { + spinlock_t drv_lock; + void *card; + struct net_device *dev; + struct net_device_stats stats; + u8 mac_addr[ETH_ALEN]; + struct task_struct *tx_thread; /* Thread to service TX queue */ + wait_queue_head_t tx_waitq; + wait_queue_head_t rx_waitq; + struct work_struct rx_work_struct; + u8 removed; + + + int (*hw_host_to_card) (struct sqn_private *priv); + + void (*add_skb_to_tx_queue) (struct sqn_private *priv + , struct sk_buff *skb, u8 tail); + + int (*is_tx_queue_empty) (struct sqn_private *priv); +}; + +#endif /* _SQN_NETDEV_H */ diff --git a/drivers/net/wimax/SQN/sdio-pm.c b/drivers/net/wimax/SQN/sdio-pm.c new file mode 100644 index 0000000000000..1fa658d09ac7c --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-pm.c @@ -0,0 +1,867 @@ + /* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +/* + * This file includes code that is responsible for + * power management and LSP notifications handling. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sdio-netdev.h" +#include "sdio.h" +#include "sdio-pm.h" +#include "msg.h" +#include "thp.h" +#include "sdio-sqn.h" + +#define SDIO_CLAIM_HOST_DEBUG 0 + +#if SDIO_CLAIM_HOST_DEBUG +#define sqn_sdio_claim_host(func) \ +({ \ + struct mmc_host *h = (func)->card->host; \ + sqn_pr_info("%s: claim_host+\n", __func__); \ + sqn_pr_info("%s: BEFORE claim: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + sdio_claim_host((func)); \ + sqn_pr_info("%s: AFTER claim: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + sqn_pr_info("%s: claim_host-\n", __func__); \ +}) + +#define sqn_sdio_release_host(func) \ +({ \ + struct mmc_host *h = (func)->card->host; \ + sqn_pr_info("%s: release_host+\n", __func__); \ + sqn_pr_info("%s: BEFORE release: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + sdio_release_host((func)); \ + sqn_pr_info("%s: AFTER release: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + sqn_pr_info("%s: release_host-\n", __func__); \ +}) +#else +#define sqn_sdio_claim_host(func) \ +({ \ + sdio_claim_host((func)); \ +}) + +#define sqn_sdio_release_host(func) \ +({ \ + sdio_release_host((func)); \ +}) +#endif + +#define IGNORE_CARRIER_STATE 1 +extern int mmc_wimax_get_hostwakeup_gpio(void); +extern void mmc_wimax_enable_host_wakeup(int on); + +enum sqn_thsp_service { +#define THSP_LSP_SERVICE_BASE 0x10010000 + THSP_GET_MEDIA_CONNECTION_STATE = THSP_LSP_SERVICE_BASE + , THSP_MEDIA_CONNECTION_STATE_CHANGE + , THSP_SET_POWER_MODE /* deprecated */ + + , THSP_SET_HOST_POWER_MODE + , THSP_SET_HOST_POWER_MODE_ACK + + , THSP_SET_FW_POWER_MODE + , THSP_SET_FW_POWER_MODE_ACK + + , THSP_SQN_STATE_CHANGE + , THSP_SQN_STATE_CHANGE_REPLY + + , THSP_THP_AVAILABLE + , THSP_THP_AVAILABLE_REPLY +}; + + +/* Deprecated */ +/* enum sqn_power_mode { */ + /* SQN_PM_OPERATIONAL */ + /* , SQN_PM_SHUTDOWN */ + /* , SQN_PM_STANDBY */ +/* }; */ + + +enum sqn_host_power_mode { + LSP_HPM_OPERATIONAL + , LSP_HPM_ASLEEP +}; + + +enum sqn_fw_power_mode { + LSP_FPM_OPERATIONAL + , LSP_FPM_SHUTDOWN + , LSP_FPM_STANDBY +}; + + +enum sqn_pm_status { + SQN_PM_STATUS_SUCCES + , SQN_PM_STATUS_CHANGE_IN_PROGRESS + , SQN_PM_STATUS_UNKNOWN +}; + + +enum sqn_thsp_media_connection_state { + THSP_MEDIA_CONNECTION_DISCONNECTED + , THSP_MEDIA_CONNECTION_CONNECTING + , THSP_MEDIA_CONNECTION_CONNECTED + , THSP_MEDIA_CONNECTION_ATTACHED +}; + + +enum sqn_fw_state { + LSP_SQN_ACTIVE + , LSP_SQN_IDLE + , LSP_SQN_DROPPED + , LSP_SQN_REENTRY +}; + + +enum sqn_thp_available_reply { + LSP_THPA_ACK + , LSP_THPA_FINISHED + , LSP_THPA_EXIT +}; + + +struct sqn_eth_header { + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + u16 len; +}; + + +struct sqn_lsp_header { + u32 id; + union { + u32 tid; + enum sqn_thsp_media_connection_state media_con_state; + + struct { + u32 tid; + enum sqn_host_power_mode mode; + } host_power; + + struct { + u32 tid; + enum sqn_fw_power_mode mode; + } fw_power; + + struct { + u32 tid; + enum sqn_fw_state state; + } fw_state; + + struct { + u32 tid; + enum sqn_thp_available_reply reply; + } thp_avl; + } u; +}; + + +struct sqn_lsp_packet { + struct sqn_thp_header thp_header; + struct sqn_lsp_header lsp_header; +}; + + +static u8 g_lsp_host_mac[] = {0x00, 0x16, 0x08, 0xff, 0x00, 0x06}; +static u8 g_lsp_device_mac[] = {0x00, 0x16, 0x08, 0xff, 0x00, 0x05}; + + +/* TODO: add all global variables to private per-card structure */ +static struct sk_buff *g_last_request_skb = 0; +static spinlock_t g_last_request_lock = SPIN_LOCK_UNLOCKED; +static u32 g_last_request_pm = 0; + + +/* TODO: move this to per-card private structure */ +DECLARE_WAIT_QUEUE_HEAD(g_card_sleep_waitq); + +/* Transaction ID for lsp requests */ +static u32 g_tid = 0; +static spinlock_t g_tid_lock = SPIN_LOCK_UNLOCKED; + + +static u32 get_current_tid(void) +{ + u32 tid = 0; + + spin_lock(&g_tid_lock); + tid = g_tid; + spin_unlock(&g_tid_lock); + + return tid; +} + + +static u32 get_next_tid(void) +{ + u32 tid = 0; + + spin_lock(&g_tid_lock); + g_tid += 1; + tid = g_tid; + spin_unlock(&g_tid_lock); + + return tid; +} + + +static void free_last_request(void) +{ + sqn_pr_enter(); + + spin_lock(&g_last_request_lock); + if (0 != g_last_request_skb) { + dev_kfree_skb_any(g_last_request_skb); + g_last_request_skb = 0; + } + spin_unlock(&g_last_request_lock); + + sqn_pr_leave(); +} + + +static struct sk_buff* lsp_to_skb(struct sqn_lsp_packet *lsp_packet) +{ + struct sqn_eth_header eth_header = { + .len = htons(sizeof(struct sqn_lsp_packet)) + }; + + struct sk_buff *skb = + __dev_alloc_skb(sizeof(eth_header) + sizeof(struct sqn_lsp_packet) + , GFP_ATOMIC | GFP_DMA); + + sqn_pr_enter(); + + if (0 == skb) + goto out; + + skb_reserve(skb, 2); + + memcpy(eth_header.dst_addr, g_lsp_device_mac, sizeof(g_lsp_device_mac)); + memcpy(eth_header.src_addr, g_lsp_host_mac, sizeof(g_lsp_host_mac)); + + memcpy(skb->data, ð_header, sizeof(eth_header)); + skb_put(skb, sizeof(eth_header)); + + memcpy(skb->data + skb->len, lsp_packet, sizeof(struct sqn_lsp_packet)); + skb_put(skb, sizeof(struct sqn_lsp_packet)); + + sqn_pr_leave(); + +out: + return skb; + +} + + +static struct sk_buff* construct_lsp_packet(u32 id, u32 param1, u32 param2) +{ + struct sqn_lsp_packet lsp_packet = { + .thp_header = { + .transport_version = 1 + , .flags = 1 + , .seq_number = 0 + , .ack_number = 0 + } + , .lsp_header = { + .id = htonl(id) + } + }; + + struct sk_buff *skb = 0; + + sqn_pr_enter(); + + switch (id) { + case THSP_GET_MEDIA_CONNECTION_STATE: + /* no parameters are needed */ + sqn_pr_dbg("id: THSP_GET_MEDIA_CONNECTION_STATE\n"); + lsp_packet.thp_header.length = + htons(sizeof(struct sqn_lsp_header) - 4); + lsp_packet.thp_header.total_length = + htonl(sizeof(struct sqn_lsp_header) - 4); + break; + case THSP_SET_POWER_MODE: + /* deprecated */ + sqn_pr_dbg("id: THSP_SET_POWER_MODE (deprecated)\n"); + break; + case THSP_SET_HOST_POWER_MODE: + lsp_packet.thp_header.length = + htons(sizeof(struct sqn_lsp_header)); + lsp_packet.thp_header.total_length = + htonl(sizeof(struct sqn_lsp_header)); + lsp_packet.lsp_header.u.host_power.tid = htonl(get_next_tid()); + lsp_packet.lsp_header.u.host_power.mode = htonl(param1); + sqn_pr_dbg("id: THSP_SET_HOST_POWER_MODE, tid: 0x%x, mode: %d\n" + , ntohl(lsp_packet.lsp_header.u.host_power.tid) + , param1); + break; + case THSP_SET_FW_POWER_MODE: + lsp_packet.thp_header.length = + htons(sizeof(struct sqn_lsp_header)); + lsp_packet.thp_header.total_length = + htonl(sizeof(struct sqn_lsp_header)); + lsp_packet.lsp_header.u.fw_power.tid = htonl(get_next_tid()); + lsp_packet.lsp_header.u.fw_power.mode = htonl(param1); + sqn_pr_dbg("id: THSP_SET_FW_POWER_MODE, tid: 0x%x, mode: %d\n" + , htonl(lsp_packet.lsp_header.u.fw_power.tid) + , param1); + break; + case THSP_SQN_STATE_CHANGE_REPLY: + lsp_packet.thp_header.length = + htons(sizeof(struct sqn_lsp_header) - 4); + lsp_packet.thp_header.total_length = + htonl(sizeof(struct sqn_lsp_header) - 4); + lsp_packet.lsp_header.u.fw_state.tid = htonl(param1); + sqn_pr_dbg("id: THSP_SQN_STATE_CHANGE_REPLY, tid: %xh\n" + , param1); + break; + case THSP_THP_AVAILABLE_REPLY: + lsp_packet.thp_header.length = + htons(sizeof(struct sqn_lsp_header)); + lsp_packet.thp_header.total_length = + htonl(sizeof(struct sqn_lsp_header)); + lsp_packet.lsp_header.u.thp_avl.tid = htonl(param1); + lsp_packet.lsp_header.u.thp_avl.reply = htonl(param2); + sqn_pr_dbg("id: THSP_THP_AVAILABLE_REPLY, tid: 0x%x, reply: %d\n" + , param1, param2); + break; + default: + sqn_pr_dbg("id: UNKNOWN\n"); + } + + skb = lsp_to_skb(&lsp_packet); + + sqn_pr_leave(); + + return skb; +} + + +int is_lsp_packet(const struct sk_buff *skb) +{ + struct sqn_eth_header *eth_hdr = (struct sqn_eth_header*)skb->data; + + /* sqn_pr_dbg_dump("skb________", skb->data, skb->len); */ + /* sqn_pr_dbg_dump("lsp_dev_mac", g_lsp_device_mac, sizeof(g_lsp_device_mac)); */ + /* sqn_pr_dbg_dump("skb_addr___", eth_hdr->src_addr, sizeof(g_lsp_device_mac)); */ + + return !memcmp(eth_hdr->dst_addr, g_lsp_host_mac + , sizeof(g_lsp_host_mac)); +} + + +static int sqn_set_power_mode_helper(struct sdio_func *func + , enum sqn_thsp_service command_id, u32 pm) +{ + unsigned long irq_flags = 0; + struct sqn_sdio_card *card = sdio_get_drvdata(func); + + sqn_pr_enter(); + + free_last_request(); + + spin_lock(&g_last_request_lock); + + g_last_request_pm = pm; + g_last_request_skb = construct_lsp_packet(command_id, pm, 0); + + if (0 == g_last_request_skb) + return 1; + + netif_stop_queue(card->priv->dev); + + /* + * We can't call sqn_sdio_tx_skb() from here, because we are not in + * process context + */ + skb_queue_tail(&card->tx_queue, g_last_request_skb); + g_last_request_skb = 0; + + spin_unlock(&g_last_request_lock); + + spin_lock_irqsave(&card->priv->drv_lock, irq_flags); + card->pm_complete = 0; + spin_unlock_irqrestore(&card->priv->drv_lock, irq_flags); + + wake_up_interruptible(&card->priv->tx_waitq); + + sqn_pr_leave(); + + return 0; +} + + +static int sqn_set_host_power_mode(struct sdio_func *func, enum sqn_host_power_mode pm) +{ + int rv = 0; + + sqn_pr_enter(); + + rv = sqn_set_power_mode_helper(func, THSP_SET_HOST_POWER_MODE, pm); + + sqn_pr_leave(); + + return rv; +} + + +static int sqn_set_fw_power_mode(struct sdio_func *func, enum sqn_fw_power_mode pm) +{ + int rv = 0; + + sqn_pr_enter(); + + rv = sqn_set_power_mode_helper(func, THSP_SET_FW_POWER_MODE, pm); + + sqn_pr_leave(); + + return rv; +} + + +static void signal_pm_request_completion(struct sqn_private *priv) +{ + struct sqn_sdio_card *card = priv->card; + unsigned long irq_flags = 0; + + sqn_pr_enter(); + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->pm_complete = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + + wake_up_interruptible(&card->pm_waitq); + + sqn_pr_leave(); +} + + +void signal_card_sleep_completion(struct sqn_private *priv) +{ + struct sqn_sdio_card *card = priv->card; + unsigned long irq_flags = 0; + + sqn_pr_enter(); + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 0; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + wake_up_interruptible(&g_card_sleep_waitq); + sqn_pr_dbg("card sleep completion is signaled\n"); + + sqn_pr_leave(); +} + + +int sqn_notify_host_sleep(struct sdio_func *func) +{ + int rv = 0; + unsigned long irq_flags = 0; + u32 timeout = 0; + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + + sqn_pr_enter(); + + sqn_card->waiting_pm_notification = 1; + + sqn_pr_info("notify card about host goes to sleep...\n"); + sqn_set_host_power_mode(func, LSP_HPM_ASLEEP); + + timeout = 50; + sqn_pr_info("wait for completion (timeout %u msec)...\n", timeout); + rv = wait_event_interruptible_timeout(sqn_card->pm_waitq + , sqn_card->pm_complete, msecs_to_jiffies(timeout)); + if (-ERESTARTSYS == rv) { + sqn_pr_warn("got a signal from kernel %d\n", rv); + } else if (0 == rv) { + /* a timeout elapsed */ + sqn_pr_warn("timeout elapsed - still no ack from card" + ", assume that card in sleep mode now\n"); + sqn_card->is_card_sleeps = 1; + } else { + /* we got an ack from card */ + sqn_pr_info("card in sleep mode now\n"); + sqn_card->is_card_sleeps = 1; + rv = 0; + } + + sqn_card->pm_complete = 0; + sqn_card->waiting_pm_notification = 0; + + sqn_pr_leave(); + + return rv; +} + + +int sqn_notify_host_wakeup(struct sdio_func *func) +{ + int rv = 0; + + sqn_pr_enter(); + + rv = sqn_wakeup_fw(func); + + sqn_pr_leave(); + + return rv; +}; + + +static void handle_sqn_state_change_msg(struct sqn_private *priv + , struct sqn_lsp_packet *lsp) +{ + struct sqn_sdio_card *card = priv->card; + struct sk_buff *skb_reply = 0; + unsigned long irq_flags = 0; + const int card_state = ntohl(lsp->lsp_header.u.fw_state.state); + + sqn_pr_enter(); + + switch (card_state) { + case LSP_SQN_ACTIVE: + sqn_pr_info("card switched to ACTIVE state\n"); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 0; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + break; + case LSP_SQN_IDLE: + sqn_pr_info("card switched to IDLE state\n"); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + break; + case LSP_SQN_DROPPED: + sqn_pr_info("card switched to DROPPED state\n"); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + break; + case LSP_SQN_REENTRY: + sqn_pr_info("card switched to REENTRY state\n"); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + break; + default: + sqn_pr_info("card switched to UNSUPPORTED mode %d/0x%x\n" + , card_state, card_state); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 0; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + break; + } + skb_reply = construct_lsp_packet(THSP_SQN_STATE_CHANGE_REPLY + , ntohl(lsp->lsp_header.u.thp_avl.tid), 0); + if (0 != (skb_reply = sqn_sdio_prepare_skb_for_tx(skb_reply))) + sqn_sdio_tx_skb(card, skb_reply, 0); + wake_up_interruptible(&g_card_sleep_waitq); + + sqn_pr_leave(); +} + + +static void handle_thp_avl_msg(struct sqn_private *priv + , struct sqn_lsp_packet *lsp) +{ + struct sqn_sdio_card *card = priv->card; + struct sk_buff *skb_reply = 0; + enum sqn_thp_available_reply thp_rpl; + unsigned long irq_flags = 0; + + sqn_pr_enter(); + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + /* if (card->is_card_sleeps) { */ + if (priv->is_tx_queue_empty(priv)) { + sqn_pr_dbg("TX queue empty, thp_rpl=FINISH\n"); + /* sqn_pr_dbg("card was asleep, thp_rpl=FINISH\n"); */ + thp_rpl = LSP_THPA_FINISHED; + card->is_card_sleeps = 1; + /* } else if (priv->is_tx_queue_empty(priv)) { */ + /* sqn_pr_dbg("card was not asleep and tx_queue is empty, thp_rpl=FINISHED\n"); */ + /* thp_rpl = LSP_THPA_FINISHED; */ + /* card->is_card_sleeps = 1; */ + } else { + /* sqn_pr_info("card was not asleep but tx_queue is no empty, thp_rpl=EXIT\n"); */ + sqn_pr_dbg("TX queue not empty, thp_rpl=ACK\n"); + /* sqn_pr_dbg("card was not asleep, thp_rpl=ACK\n"); */ + thp_rpl = LSP_THPA_ACK; + card->is_card_sleeps = 0; + } + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + skb_reply = construct_lsp_packet(THSP_THP_AVAILABLE_REPLY + , ntohl(lsp->lsp_header.u.thp_avl.tid) + , thp_rpl); + if (0 != (skb_reply = sqn_sdio_prepare_skb_for_tx(skb_reply))) + sqn_sdio_tx_skb(card, skb_reply, 0); + wake_up_interruptible(&g_card_sleep_waitq); + if (netif_queue_stopped(priv->dev)) + netif_wake_queue(priv->dev); + + sqn_pr_leave(); +} + + +int sqn_handle_lsp_packet(struct sqn_private *priv + , struct sk_buff *skb) +{ + struct sqn_sdio_card *card = priv->card; + unsigned long irq_flags = 0; + struct sqn_lsp_packet *lsp_response = (struct sqn_lsp_packet*) + ((u8*)skb->data + sizeof(struct sqn_eth_header)); + + sqn_pr_enter(); + + if (!is_lsp_packet(skb)) { + sqn_pr_dbg("not LSP packet\n"); + sqn_pr_leave(); + return 0; + } + + sqn_pr_dbg("LSP packet\n"); + + switch (ntohl(lsp_response->lsp_header.id)) { + case THSP_GET_MEDIA_CONNECTION_STATE: + sqn_pr_dbg("id: THSP_GET_MEDIA_CONNECTION_STATE state=%xh\n" + , ntohl(lsp_response->lsp_header.u.media_con_state)); + sqn_pr_warn("THSP_GET_MEDIA_CONNECTION_STATE not implemented\n"); + break; + case THSP_MEDIA_CONNECTION_STATE_CHANGE: + sqn_pr_dbg("id: THSP_MEDIA_CONNECTION_STATE_CHANGE state=%xh\n" + , ntohl(lsp_response->lsp_header.u.media_con_state)); + if (THSP_MEDIA_CONNECTION_ATTACHED + == ntohl(lsp_response->lsp_header.u.media_con_state)) + { + +#if IGNORE_CARRIER_STATE + /* netif_carrier_on(priv->dev); */ + sqn_pr_info("WiMAX carrier PRESENT [ignored]\n"); +#else + netif_carrier_on(priv->dev); + sqn_pr_info("WiMAX carrier PRESENT\n"); +#endif + } else { +#if IGNORE_CARRIER_STATE + /* netif_carrier_off(priv->dev); */ + sqn_pr_info("WiMAX carrier LOST [ignored]\n"); +#else + netif_carrier_off(priv->dev); + sqn_pr_info("WiMAX carrier LOST\n"); +#endif + } + break; + case THSP_SET_HOST_POWER_MODE_ACK: + sqn_pr_dbg("id: THSP_SET_HOST_POWER_MODE_ACK tid=0x%x\n" + , ntohl(lsp_response->lsp_header.u.host_power.tid)); + free_last_request(); + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + signal_pm_request_completion(priv); + break; + case THSP_SET_FW_POWER_MODE_ACK: + sqn_pr_dbg("id: THSP_SET_FW_POWER_MODE_ACK tid=0x%x\n" + , ntohl(lsp_response->lsp_header.u.fw_power.tid)); + sqn_pr_dbg("THSP_SET_FW_POWER_MODE_ACK not implemented\n"); + break; + case THSP_SQN_STATE_CHANGE: + sqn_pr_dbg("id: THSP_SQN_STATE_CHANGE tid=0x%x, state=%xh\n" + , ntohl(lsp_response->lsp_header.u.fw_state.tid) + , ntohl(lsp_response->lsp_header.u.fw_state.state)); + handle_sqn_state_change_msg(priv, lsp_response); + break; + case THSP_THP_AVAILABLE: + sqn_pr_dbg("id: THSP_THP_AVAILABLE tid=0x%x, reply=%xh\n" + , ntohl(lsp_response->lsp_header.u.thp_avl.tid) + , ntohl(lsp_response->lsp_header.u.thp_avl.reply)); + handle_thp_avl_msg(priv, lsp_response); + break; + default: + sqn_pr_dbg("lsp_id: UNKNOWN=0x%x\n" + , ntohl(lsp_response->lsp_header.id)); + } + + dev_kfree_skb_any(skb); + sqn_pr_leave(); + + return 1; +} + + +int sqn_wakeup_fw(struct sdio_func *func) +{ + int rv = 0; + int ver = 0; + int counter = 0; + + int retry_cnt = 3; + u32 wakeup_delay = 0; + unsigned long timeout = msecs_to_jiffies(800); + + unsigned long irq_flags = 0; + struct sqn_private *priv = ((struct sqn_sdio_card *)sdio_get_drvdata(func))->priv; + struct sqn_sdio_card *card = priv->card; + u8 need_to_unlock_wakelock = 0; + + sqn_pr_enter(); + sqn_pr_info("waking up the card...\n"); + + if (!wake_lock_active(&card->wakelock)) { + sqn_pr_dbg("lock wake_lock\n"); + wake_lock(&card->wakelock); + need_to_unlock_wakelock = 1; + } + +retry: + if (priv->removed) + goto out; + + sqn_sdio_claim_host(func); + +#define SDIO_CCCR_CCCR_SDIO_VERSION_VALUE 0x11 + + wakeup_delay = 2; + counter = 5; + do { + ver = sdio_readb(func, SDIO_CCCR_CCCR_SDIO_VERSION, &rv); + // To avoid FW sutck in PLLOFF, SDIO isn't able to wake up it. + mdelay(wakeup_delay); + --counter; + } while((rv || ver != SDIO_CCCR_CCCR_SDIO_VERSION_VALUE) && counter > 0); + + if (rv) { + sqn_pr_err("error when reading SDIO_VERSION\n"); + sqn_sdio_release_host(func); + goto out; + } else { + sqn_pr_dbg("SDIO_VERSION has been read successfully\n"); + } + + sqn_pr_dbg("send wake-up signal to card\n"); + sdio_writeb(func, 1, SQN_SOC_SIGS_LSBS, &rv); + if (rv) + sqn_pr_err("error when writing to SQN_SOC_SIGS_LSBS: %d\n", rv); + + sqn_sdio_release_host(func); + + sqn_pr_info("wait for completion (timeout %d msec)...\n" + , jiffies_to_msecs(timeout)); + + rv = wait_event_interruptible_timeout(g_card_sleep_waitq + , 0 == card->is_card_sleeps || priv->removed, timeout); + + if (priv->removed) + goto out; + + if (-ERESTARTSYS == rv) { + sqn_pr_warn("got a signal from kernel %d\n", rv); + } else if (0 == rv) { + rv = -1; + sqn_pr_err("can't wake up the card - timeout elapsed\n"); + + if (retry_cnt-- > 0 && card->is_card_sleeps) { + sqn_pr_info("retry wake up\n"); + goto retry; + } + sqn_pr_info("giving up to wake up the card\n"); + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->is_card_sleeps = 0; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + } else { + rv = 0; + sqn_pr_info("card is waked up successfully\n"); + } + +out: + if (need_to_unlock_wakelock && wake_lock_active(&card->wakelock)) { + sqn_pr_dbg("wake_lock is active, release it\n"); + wake_unlock(&card->wakelock); + } + + sqn_pr_leave(); + return rv; +} + +extern void mmc_wimax_enable_host_wakeup(int on); + +static void sqn_handle_android_early_suspend(struct early_suspend *h) +{ + sqn_pr_enter(); + sqn_pr_info("%s: enter\n", __func__); + + mmc_wimax_enable_host_wakeup(1); + + sqn_pr_info("%s: leave\n", __func__); + sqn_pr_leave(); +} + + +static void sqn_handle_android_late_resume(struct early_suspend *h) +{ + sqn_pr_enter(); + sqn_pr_info("%s: enter\n", __func__); + + mmc_wimax_enable_host_wakeup(0); + + sqn_pr_info("%s: leave\n", __func__); + sqn_pr_leave(); +} + + +static struct early_suspend sqn_early_suspend_desc = { + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + , .suspend = sqn_handle_android_early_suspend + , .resume = sqn_handle_android_late_resume +}; + + +void register_android_earlysuspend(void) +{ + sqn_pr_enter(); + + register_early_suspend(&sqn_early_suspend_desc); + + sqn_pr_leave(); +} + + +void unregister_android_earlysuspend(void) +{ + sqn_pr_enter(); + + unregister_early_suspend(&sqn_early_suspend_desc); + + sqn_pr_leave(); +} diff --git a/drivers/net/wimax/SQN/sdio-pm.h b/drivers/net/wimax/SQN/sdio-pm.h new file mode 100644 index 0000000000000..540acc3b1a791 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-pm.h @@ -0,0 +1,35 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + + +#ifndef _SQN_POWER_MANAGEMENT_H +#define _SQN_POWER_MANAGEMENT_H + + +int sqn_notify_host_sleep(struct sdio_func *func); + +int sqn_notify_host_wakeup(struct sdio_func *func); + + +int sqn_handle_lsp_packet(struct sqn_private *priv + , struct sk_buff *skb); + + +int sqn_wakeup_fw(struct sdio_func *func); + + +void signal_card_sleep_completion(struct sqn_private *priv); + +void register_android_earlysuspend(void); + +void unregister_android_earlysuspend(void); + +#endif /* _SQN_POWER_MANAGEMENT_H */ diff --git a/drivers/net/wimax/SQN/sdio-sqn.h b/drivers/net/wimax/SQN/sdio-sqn.h new file mode 100644 index 0000000000000..9a9897f57c595 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio-sqn.h @@ -0,0 +1,167 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_SDIO_H +#define _SQN_SDIO_H + +#include "version.h" + +#include +#include +#include +#include + + +enum sqn_card_version { + SQN_1130 = 1 + , SQN_1210 +}; + + +/* Card private information */ +struct sqn_sdio_card { + struct sqn_private *priv; + struct sdio_func *func; + u8 rstn_wr_fifo_flag; + u8 version; + struct sk_buff_head tx_queue; + struct mutex tx_mutex; +#define TX_QUEUE_MAX_LEN 1000 /* max length to which tx_queue is allowed to grow */ +#define TX_QUEUE_WM_LEN 800 /* length, from which we will continue transmission */ + struct sk_buff_head rx_queue; +#define RX_QUEUE_MAX_LEN 1000 /* max length to which rx_queue is allowed to grow */ +#define RX_QUEUE_WM_LEN 800 /* length, from which we will continue transmission */ + struct mutex rx_mutex; + struct mutex rxq_mutex; + wait_queue_head_t pm_waitq; + struct wake_lock wakelock; + struct timer_list wakelock_timer; + + /* Condition flags for event signaling */ + u8 pm_complete; + u8 it_thread_should_stop; + u8 is_card_sleeps; + u8 waiting_pm_notification; +}; + + +void sqn_sdio_stop_it_thread_from_itself(struct sqn_private *priv); + +struct sk_buff* sqn_sdio_prepare_skb_for_tx(struct sk_buff *skb); + +int sqn_sdio_tx_skb(struct sqn_sdio_card *card, struct sk_buff *skb + , u8 claim_host); + + +#define SQN_SDIO_PDU_MINLEN 2 +#define SQN_SDIO_PDU_MAXLEN 2047 + +/* Product IDs */ +#define SDIO_CMN_CISTPLMID_MANF 0x1002 /* Sequans manufacture ID register */ +#define SDIO_CMN_CISTPLMID_CARD 0x1004 /* Sequans SQN1130 card ID register */ + +#define SDIO_VENDOR_ID_SEQUANS 0x039d /* Sequans manufacture ID */ +#define SDIO_DEVICE_ID_SEQUANS_SQN1130 0x046a /* Sequans SQN1130 card ID */ +#define SDIO_DEVICE_ID_SEQUANS_SQN1210 0x1210 /* Sequans SQN1210-rev2 card ID */ + +#define SDIO_CCCR_CCCR_SDIO_VERSION 0x00 +#define SDIO_CCCR_IO_ABORT 0x06 + + +#define SQN_H_VERSION 0x240C + + +/* FIFO dependent list */ +#define SQN_SDIO_RDWR_BASE 0x2000 +#define SQN_SDIO_RDWR_FIFO(x) (SQN_SDIO_RDWR_BASE + (x)*4) + +#define SQN_SDIO_RDLEN_BASE 0x2002 +#define SQN_SDIO_RDLEN_FIFO(x) (SQN_SDIO_RDLEN_BASE + (x)*4) + +#define SQN_SDIO_PCRRT_BASE 0x2010 +#define SQN_SDIO_PCRRT_FIFO(x) (SQN_SDIO_PCRRT_BASE + (x)*2) + +#define SQN_SDIO_PCWRT_BASE 0x2011 +#define SQN_SDIO_PCWRT_FIFO(x) (SQN_SDIO_PCWRT_BASE + (x)*2) + +#define SQN_SDIO_SZ_RD_BASE 0x2018 +#define SQN_SDIO_SZ_RD_FIFO(x) (SQN_SDIO_SZ_RD_BASE + (x)*8) + +#define SQN_SDIO_WM_RD_BASE 0x201a +#define SQN_SDIO_WM_RD_FIFO(x) (SQN_SDIO_WM_RD_BASE + (x)*8) + +#define SQN_SDIO_SZ_WR_BASE 0x201c +#define SQN_SDIO_SZ_WR_FIFO(x) (SQN_SDIO_SZ_WR_BASE + (x)*8) + +#define SQN_SDIO_WM_WR_BASE 0x201e +#define SQN_SDIO_WM_WR_FIFO(x) (SQN_SDIO_WM_WR_BASE + (x)*8) + +#define SQN_SDIO_ESZ_WR_FIFO0 0x2032 /* FIFO0 */ +#define SQN_SDIO_ESZ_WR_FIFO1 0x2036 /* FIFO1 */ +#define SQN_SDIO_ESZ_WR_FIFO2 0x0000 /* No real FIFO */ + +#define SQN_SDIO_RSTN_RD_BASE 0x2038 +#define SQN_SDIO_RSTN_RD_FIFO(x) (SQN_SDIO_RSTN_RD_BASE + (x)*2) + +#define SQN_SDIO_RSTN_WR_BASE 0x2039 +#define SQN_SDIO_RSTN_WR_FIFO(x) (SQN_SDIO_RSTN_WR_BASE + (x)*2) + +#define SQN_SDIO_RD_LEVEL_BASE 0x2048 +#define SQN_SDIO_RD_FIFO_LEVEL(x) (SQN_SDIO_RD_LEVEL_BASE + (x)*4) + +#define SQN_SDIO_WR_LEVEL_BASE 0x204a +#define SQN_SDIO_WR_FIFO_LEVEL(x) (SQN_SDIO_WR_LEVEL_BASE + (x)*4) + +#define SQN_SDIO_RD_BYTESLEFT_BASE 0x2054 +#define SQN_SDIO_RD_FIFO_BYTESLEFT(x) (SQN_SDIO_RD_BYTESLEFT_BASE + (x)*4) + +#define SQN_SDIO_WR_BYTESLEFT_BASE 0x2056 +#define SQN_SDIO_WR_FIFO_BYTESLEFT(x) (SQN_SDIO_WR_BYTESLEFT_BASE + (x)*4) + +/* Interrupt registers */ +#define SQN_SDIO_IT_EN_LSBS 0x2044 +#define SQN_SDIO_IT_EN_MSBS 0x2045 +#define SQN_SDIO_IT_STATUS_LSBS 0x2046 +#define SQN_SDIO_IT_STATUS_MSBS 0x2047 + +/* Firmware loading registers */ +#define SQN_H_GRSTN 0x2400 +#define SQN_H_CRSTN 0x2404 +#define SQN_H_SDRAMCTL_RSTN 0x2414 +#define SQN_H_SDRAM_NO_EMR 0x2415 +#define SQN_H_BOOT_FROM_SPI 0x2411 + + +/* Interrupt flags (LSB) */ +#define SQN_SDIO_IT_WR_FIFO0_WM (1 << 0) +#define SQN_SDIO_IT_RD_FIFO0_WM (1 << 1) +#define SQN_SDIO_IT_WR_FIFO1_WM (1 << 2) +#define SQN_SDIO_IT_RD_FIFO1_WM (1 << 3) +#define SQN_SDIO_IT_WR_FIFO2_WM (1 << 4) +#define SQN_SDIO_IT_RD_FIFO2_WM (1 << 5) +#define SQN_SDIO_IT_RD_EMPTY_WR_FULL (1 << 6) +#define SQN_SDIO_IT_SW_SIGN (1 << 7) + +/* Interrupt flags (MSB) */ +#define SQN_SDIO_IT_WR_FIFO0_FULL_RST (1 << 0) +#define SQN_SDIO_IT_RD_FIFO0_EMPTY_RST (1 << 1) +#define SQN_SDIO_IT_WR_FIFO1_FULL_RST (1 << 2) +#define SQN_SDIO_IT_RD_FIFO1_EMPTY_RST (1 << 3) +#define SQN_SDIO_IT_WR_FIFO2_FULL_RST (1 << 4) +#define SQN_SDIO_IT_RD_FIFO2_EMPTY_RST (1 << 5) +#define SQN_SDIO_IT_RD_BEFORE_RDLEN (1 << 6) +#define SQN_SDIO_IT_UNSUPPORTED_CMD (1 << 7) + +/* Software signaling interrupts */ +#define SQN_SOC_SIGS_LSBS 0x2600 +#define SQN_HTS_SIGS 0x2608 + +#endif /* _SQN_SDIO_H */ diff --git a/drivers/net/wimax/SQN/sdio.c b/drivers/net/wimax/SQN/sdio.c new file mode 100644 index 0000000000000..9fcf59a7028c0 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio.c @@ -0,0 +1,1977 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak , + * Andy Shevchenko + * + * Inspired by if_sdio.c, Copyright 2007 Pierre Ossman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// GPIO_WAKEUP +#include +#include +#include + +#include "version.h" +#include "msg.h" +#include "sdio-netdev.h" +#include "sdio-sqn.h" +#include "sdio.h" +#include "thp.h" +#include "sdio-driver.h" +#include "sdio-fw.h" +#include "sdio-pm.h" + +#define SKB_DEBUG 0 +#define SDIO_CLK_DEBUG 0 +#define DUMP_NET_PKT 1 +int dump_net_pkt = 0; + +#define RESET_BY_SDIO 0 //Rollback to default disabled HW Reset +#define RESET_BY_WIMAXTRACKER 0 + +#if RESET_BY_WIMAXTRACKER +#include "sdio_netlink.h" +#endif + +#define SDIO_CLAIM_HOST_DEBUG 0 +int claim_host_dbg = 0; + +#if SDIO_CLAIM_HOST_DEBUG +#define sqn_sdio_claim_host(func) \ +({ \ + struct mmc_host *h = (func)->card->host; \ + u8 was_blocked = 0; \ + if (mmc_wimax_get_cliam_host_status()) { \ + if (h->claimed) { \ + was_blocked = 1; \ + sqn_pr_info("%s: claim_host+ current 0x%p\n", __func__, current); \ + sqn_pr_info("%s: will block\n", __func__); \ + sqn_pr_info("%s: BEFORE claim: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + } \ + } \ + sdio_claim_host((func)); \ + if (mmc_wimax_get_cliam_host_status()) { \ + if (was_blocked) { \ + sqn_pr_info("%s: AFTER claim: claimed %d, claim_cnt %d, claimer 0x%p\n" \ + , __func__, h->claimed, h->claim_cnt, h->claimer); \ + sqn_pr_info("%s: claim_host- current 0x%p\n", __func__, current); \ + } \ + } \ +}) + +#define sqn_sdio_release_host(func) \ +({ \ + /* struct mmc_host *h = (func)->card->host; */ \ + /* sqn_pr_info("%s: release_host+\n", __func__); */ \ + /* sqn_pr_info("%s: BEFORE release: claimed %d, claim_cnt %d, claimer 0x%p\n" */ \ + /* , __func__, h->claimed, h->claim_cnt, h->claimer); */ \ + sdio_release_host((func)); \ + /* sqn_pr_info("%s: AFTER release: claimed %d, claim_cnt %d, claimer 0x%p\n" */ \ + /* , __func__, h->claimed, h->claim_cnt, h->claimer); */ \ + /* sqn_pr_info("%s: release_host-\n", __func__); */ \ +}) +#else +#define sqn_sdio_claim_host(func) \ +({ \ + sdio_claim_host((func)); \ +}) + +#define sqn_sdio_release_host(func) \ +({ \ + sdio_release_host((func)); \ +}) +#endif + +static const struct sdio_device_id sqn_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_SEQUANS, SDIO_DEVICE_ID_SEQUANS_SQN1130) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_SEQUANS, SDIO_DEVICE_ID_SEQUANS_SQN1210) }, + /* { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) }, */ + { 0 }, +}; +MODULE_DEVICE_TABLE(sdio, sqn_sdio_ids); + +// Wakeup interrupt +extern void mmc_wimax_enable_host_wakeup(int on); + +//HTC:WiMax power ON_OFF function and Card detect function +extern int mmc_wimax_power(int on); +extern void mmc_wimax_set_carddetect(int val); +extern int mmc_wimax_uart_switch(int uart); +extern int mmc_wimax_set_status(int on); +extern int mmc_wimax_get_hostwakeup_gpio(void); +extern int mmc_wimax_get_netlog_status(void); +extern int mmc_wimax_get_cliam_host_status(void); +extern int mmc_wimax_set_CMD53_timeout_trigger_counter(int counter); +extern int mmc_wimax_get_CMD53_timeout_trigger_counter(void); +extern int mmc_wimax_get_sdio_hw_reset(void); +extern int mmc_wimax_get_packet_filter(void); + +extern int mmc_wimax_get_netlog_withraw_status(void); +extern int mmc_wimax_get_sdio_interrupt_log(void); + +/*******************************************************************/ +/* TX handlers */ +/*******************************************************************/ + +static void sqn_sdio_add_skb_to_tx_queue(struct sqn_private *priv + , struct sk_buff *skb, u8 tail) +{ + struct sqn_sdio_card *card = priv->card; + + sqn_pr_enter(); + + if (tail) + skb_queue_tail(&card->tx_queue, skb); + else + skb_queue_head(&card->tx_queue, skb); + + if (skb_queue_len(&card->tx_queue) > TX_QUEUE_MAX_LEN + && !netif_queue_stopped(priv->dev)) + { + sqn_pr_info("tx_queue len %d, disabling netif_queue\n" + , skb_queue_len(&card->tx_queue)); + netif_stop_queue(priv->dev); + } + + if (!card->waiting_pm_notification + && !wake_lock_active(&card->wakelock)) { + sqn_pr_dbg("lock wake_lock\n"); + wake_lock(&card->wakelock); + } + sqn_pr_leave(); +} + + +static int sqn_sdio_is_tx_queue_empty(struct sqn_private *priv) +{ + int rv = 0; + struct sqn_sdio_card *card = priv->card; + + sqn_pr_enter(); + + rv = skb_queue_empty(&card->tx_queue); + + sqn_pr_leave(); + + return rv; +} + + +static int sqn_sdio_get_rstn_wr_fifo_flag(struct sqn_private *priv) +{ + struct sqn_sdio_card *card = priv->card; + + sqn_pr_enter(); + + if (0 == card->rstn_wr_fifo_flag) { + int rv = 0; + + sqn_sdio_claim_host(card->func); + card->rstn_wr_fifo_flag = sdio_readb(card->func, + SQN_SDIO_RSTN_WR_FIFO(2), &rv); + + sqn_sdio_release_host(card->func); + sqn_pr_dbg("RSTN_WR_FIFO2 = %d\n", card->rstn_wr_fifo_flag); + if (rv) { + sqn_pr_err("sdio_readb(RSTN_WR_FIFO2) - return error\n"); + card->rstn_wr_fifo_flag = 0; + goto out; + } + } + + sqn_pr_leave(); +out: + return card->rstn_wr_fifo_flag; +} + + +static int sqn_sdio_recover_after_cmd53_timeout(struct sqn_sdio_card *card) +{ + int rv = 0; + + sqn_pr_enter(); + + sqn_pr_info("Try to recovery after SDIO timeout error\n"); + + sqn_sdio_claim_host(card->func); + sdio_writeb(card->func, 1 << card->func->num, SDIO_CCCR_IO_ABORT, &rv); + sqn_sdio_release_host(card->func); + if (rv) { + sqn_pr_err("sdio_writeb(SDIO_CCCR_IO_ABORT) - return error %d\n" + , rv); + } + + sqn_pr_leave(); + + return rv; +} + + +/** +* sqn_sdio_cmd52_read_buf - read @size bytes into @buf buffer from +* address @addr using CMD52 +* @card: sqn sdio card structure +* @buf: buffer to return value, should be and address of u16, u32 variable +* @size: size of the @buf / count of bytes to read from @addr +* +* @return error status - 0 if success, !0 otherwise +*/ +static int sqn_sdio_cmd52_read_buf(struct sqn_sdio_card *card, void* buf, int size, int addr) +{ + u8 tmpbuf[4] = { 0xa7, 0xa7, 0xa7, 0xa7 }; + int i = 0; + int rv = 0; + + sqn_pr_enter(); + sqn_pr_info("Trying to read %d bytes from 0x%x address using CMD52\n", size, addr); + + sqn_sdio_claim_host(card->func); + for (i = 0; i < size; i++) { + tmpbuf[i] = sdio_readb(card->func, addr + i, &rv); + if (rv) { + sqn_pr_err("sdio_readb(%x) - return error %d\n", addr + i, rv); + break; + } + } + sqn_sdio_release_host(card->func); + + switch (size) { + case sizeof(u16): + *((u16*)buf) = le16_to_cpup((__le16 *)tmpbuf); + break; + case sizeof(u32): + *((u32*)buf) = le32_to_cpup((__le32 *)tmpbuf); + break; + default: + sqn_pr_err("unsupported buffer size: %d\n", size); + } + + sqn_pr_leave(); + + return rv; +} + + +static int sqn_sdio_dump_registers(struct sqn_sdio_card *card) +{ + u8 b8 = 0; + u16 b16 = 0; + int rv = 0; + + sqn_pr_enter(); + sqn_pr_info("------------------ REG DUMP BEGIN ------------------\n"); + + sqn_sdio_claim_host(card->func); + b8 = sdio_readb(card->func, SQN_SDIO_IT_STATUS_LSBS, &rv); + if (rv) + sqn_pr_err("can't read SDIO_IT_STATUS_LSBS: %d\n", rv); + else + sqn_pr_info("SDIO_IT_STATUS_LSBS: 0x%x\n", b8); + + b8 = sdio_readb(card->func, SQN_SDIO_IT_STATUS_MSBS, &rv); + if (rv) + sqn_pr_err("can't read SDIO_IT_STATUS_MSBS: %d\n", rv); + else + sqn_pr_info("SDIO_IT_STATUS_MSBS: 0x%x\n", b8); + + b8 = sdio_readb(card->func, SQN_SDIO_RSTN_WR_FIFO(2), &rv); + if (rv) + sqn_pr_err("can't read SQN_SDIO_RSTN_WR_FIFO2: %d\n", rv); + else + sqn_pr_info("SQN_SDIO_RSTN_WR_FIFO: 0x%x\n", b8); + + b8 = sdio_readb(card->func, SQN_SOC_SIGS_LSBS, &rv); + if (rv) + sqn_pr_err("can't read SQN_SOC_SIGS_LSBS: %d\n", rv); + else + sqn_pr_info("SQN_SOC_SIGS_LSBS: 0x%x\n", b8); + + b8 = sdio_readb(card->func, SQN_HTS_SIGS, &rv); + if (rv) + sqn_pr_err("can't read SQN_HTS_SIGS: %d\n", rv); + else + sqn_pr_info("SQN_HTS_SIGS: 0x%x\n", b8); + + sqn_sdio_release_host(card->func); + + rv = sqn_sdio_cmd52_read_buf(card, &b16, sizeof(b16), SQN_SDIO_WR_FIFO_BYTESLEFT(2)); + if (rv) + sqn_pr_err("can't read SDIO_WR_FIFO_BYTESLEFT2: %d\n", rv); + else + sqn_pr_info("SDIO_WR_FIFO_BYTESLEFT2: 0x%x\n", b16); + + rv = sqn_sdio_cmd52_read_buf(card, &b16, sizeof(b16), SQN_SDIO_WR_FIFO_LEVEL(2)); + if (rv) + sqn_pr_err("can't read SQN_SDIO_WR_FIFO_LEVEL2: %d\n", rv); + else + sqn_pr_info("SQN_SDIO_WR_FIFO_LEVEL2: 0x%x\n", b16); + + rv = sqn_sdio_cmd52_read_buf(card, &b16, sizeof(b16), SQN_SDIO_RD_FIFO_LEVEL(2)); + if (rv) + sqn_pr_err("can't read SQN_SDIO_RD_FIFO_LEVEL2: %d\n", rv); + else + sqn_pr_info("SQN_SDIO_RD_FIFO_LEVEL2: 0x%x\n", b16); + + rv = sqn_sdio_cmd52_read_buf(card, &b16, sizeof(b16), SDIO_CMN_CISTPLMID_MANF); + if (rv) + sqn_pr_err("can't read SDIO_CMN_CISTPLMID_MANF: %d\n", rv); + else + sqn_pr_info("SDIO_CMN_CISTPLMID_MANF: 0x%x\n", b16); + + rv = sqn_sdio_cmd52_read_buf(card, &b16, sizeof(b16), SDIO_CMN_CISTPLMID_CARD); + if (rv) + sqn_pr_err("can't read SDIO_CMN_CISTPLMID_CARD: %d\n", rv); + else + sqn_pr_info("SDIO_CMN_CISTPLMID_CARD: 0x%x\n", b16); + + sqn_pr_info("------------------ REG DUMP END ------------------\n"); + sqn_pr_leave(); + + return rv; +} + + +static int sqn_sdio_get_wr_fifo_level(struct sqn_private *priv) +{ + int level = 0; + int rv = 0; + struct sqn_sdio_card *card = priv->card; + + sqn_pr_enter(); + + sqn_sdio_claim_host(card->func); + /* level = sdio_readw(card->func, SQN_SDIO_WR_FIFO_LEVEL(2), &rv); */ + level = sdio_readl(card->func, 0x2050, &rv); + level = (u32)level >> sizeof(u16); + sqn_sdio_release_host(card->func); + sqn_pr_dbg("SQN_SDIO_WR_FIFO_LEVEL2 = %d\n", level); + if (rv) { + sqn_pr_err("sdio_readw(WR_FIFO_LEVEL2) error %d\r", rv); + level = -1; + if (-ETIMEDOUT == rv) + sqn_pr_info("SDIO CMD53 timeout error\n"); + /* sqn_sdio_recover_after_cmd53_timeout(card); */ + /* sqn_sdio_dump_registers(card); */ + goto out; + } + + sqn_pr_leave(); +out: + return level; +} + +#if DUMP_NET_PKT +uint8_t is_thp_packet(uint8_t *dest_addr); +int is_lsp_packet(const struct sk_buff *skb); +#endif + +struct sk_buff* sqn_sdio_prepare_skb_for_tx(struct sk_buff *skb) +{ +#define PDU_LEN_SIZE 2 +#define CRC_SIZE 4 +#define PAD_TO_VALUE 512 + +#if DUMP_NET_PKT + struct ethhdr *eth = (struct ethhdr *)skb->data; +#endif + + /* + * Calculate padding, to workaround some SDIO controllers we need to pad + * each TX buffer so it size will be a multiple of PAD_TO_VALUE + */ + u32 padding = (skb->len + PDU_LEN_SIZE + CRC_SIZE) % PAD_TO_VALUE ? + PAD_TO_VALUE - (skb->len + PDU_LEN_SIZE + CRC_SIZE) % PAD_TO_VALUE : 0; + + sqn_pr_enter(); + + sqn_pr_dbg("length %d, padding %d\n", skb->len, padding); + + if (skb->len > (SQN_MAX_PDU_LEN - (PDU_LEN_SIZE + CRC_SIZE + padding))) + return 0; + +#if DUMP_NET_PKT + if (mmc_wimax_get_netlog_status()) { + sqn_pr_info("[PRT]-------------------------------------------------------------------\n"); + sqn_pr_info("TX PDU length %d\n", skb->len); + + if (is_thp_packet(eth->h_source)) { + sqn_pr_info("TX THP packet\n"); + } + else if (is_lsp_packet(skb)) { + sqn_pr_info("TX LSP packet\n"); + } + else if (!is_thp_packet(eth->h_source) && !is_lsp_packet(skb)) { + sqn_pr_info_dump("TX PDU", skb->data, skb->len); + } + } + + if (mmc_wimax_get_netlog_withraw_status()) { + // if (!is_thp_packet(eth->h_source) && !is_lsp_packet(skb)) + { + sqn_pr_info("[RAW]-------------------------------------------------------------------\n"); + sqn_pr_info("TX PDU length %d\n", skb->len); + sqn_pr_info_dump_rawdata("TX PDU", skb->data, skb->len); + } + } +#endif + + if (mmc_wimax_get_packet_filter()) { + if (sqn_filter_packet_check("TX PDU", skb->data, skb->len)) { + // sqn_pr_info("Drop TX packets len:%d\n", skb->len); + return 0; + } + } + + /* + * Real size of the PDU is data_len + 2 bytes at begining of PDU + * for pdu_size + 4 bytes at the end of PDU for CRC of data + */ + if (skb_headroom(skb) < PDU_LEN_SIZE || skb_tailroom(skb) < CRC_SIZE + padding) { + struct sk_buff *origin_skb = skb; + gfp_t gfp_mask = GFP_DMA; + if (in_interrupt() || irqs_disabled()) + gfp_mask |= GFP_ATOMIC; + else + gfp_mask |= GFP_KERNEL; + sqn_pr_dbg("relocating TX skb, GFP mask %x\n", gfp_mask); +#if SKB_DEBUG + sqn_pr_info("%s: [0x%p] old before reloc, users %d\n", __func__, origin_skb, atomic_read(&origin_skb->users)); +#endif + skb = skb_copy_expand(skb, PDU_LEN_SIZE, CRC_SIZE + padding + , gfp_mask); +#if SKB_DEBUG + sqn_pr_info("%s: [0x%p] old after reloc, users %d\n", __func__, origin_skb, atomic_read(&origin_skb->users)); +#endif + dev_kfree_skb_any(origin_skb); + if (0 == skb) { + /* An error occured, likely there is no memory to + * expand skb, so we drop it. + */ + return 0; + } +#if SKB_DEBUG + sqn_pr_info("%s: [0x%p] new relocated, users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + } else { + sqn_pr_dbg("TX skb: headroom = %d tailroom = %d\n" + , skb_headroom(skb), skb_tailroom(skb)); + } + + /* + * Add size of PDU before ethernet frame + * It should be in little endian byte order + */ + *((u8*)skb->data -2) = (skb->len + CRC_SIZE) & 0xff; + *((u8*)skb->data -1) = ((skb->len + CRC_SIZE) >> 8) & 0xff; + skb_push(skb, PDU_LEN_SIZE); + + /* + * Add CRC to the end of ethernet frame + * Now it simply set to 0 + */ + memset(skb->tail, 0, CRC_SIZE); + skb_put(skb, CRC_SIZE + padding); + + sqn_pr_leave(); + + return skb; +} + + +int sqn_sdio_tx_skb(struct sqn_sdio_card *card, struct sk_buff *skb + , u8 claim_host) +{ + int rv = 0; + + sqn_pr_enter(); + + if (claim_host) + sqn_sdio_claim_host(card->func); + + rv = sdio_writesb(card->func, SQN_SDIO_RDWR_FIFO(2), skb->data, + skb->len); + if (rv) { + sqn_pr_err("call to sdio_writesb(RDWR_FIFO2) - return error %d\n", rv); + if (-ETIMEDOUT == rv) { + if (claim_host) { + sqn_sdio_release_host(card->func); + claim_host = 0; + } + sqn_pr_info("SDIO CMD53 timeout error: TX PDU length %d, PDU[0] 0x%x, PDU[1] 0x%x\n" + , skb->len, *((u8*)skb->data), *((u8*)skb->data + 1)); + /* sqn_sdio_dump_registers(card); */ + /* sqn_sdio_recover_after_cmd53_timeout(card); */ + } + goto release; + } +release: + if (claim_host) + sqn_sdio_release_host(card->func); +#if SKB_DEBUG + sqn_pr_info("%s: free skb [0x%p] after tx, users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + dev_kfree_skb_any(skb); + + sqn_pr_leave(); + return rv; +} + + +static void sqn_sdio_wake_lock_release_timer_fn(unsigned long data) +{ + struct sqn_sdio_card *card = (struct sqn_sdio_card*) data; + + sqn_pr_enter(); + + /* if TX and RX queues are empty, we can releas a wake_lock */ + if (wake_lock_active(&card->wakelock) + && skb_queue_empty(&card->tx_queue) + && skb_queue_empty(&card->rx_queue)) + { + sqn_pr_dbg("wake_lock is active, release it\n"); + + wake_unlock(&card->wakelock); + } + + sqn_pr_leave(); +} + +static void sqn_sdio_release_wake_lock(struct sqn_sdio_card *card) +{ + u32 delay = 0; + + sqn_pr_enter(); + +// #define SQN_WAKE_LOCK_RELEASE_DELAY_SECONDS 5 +#define SQN_WAKE_LOCK_RELEASE_DELAY_SECONDS 1 + + /* if TX and RX queues are empty, we will wait some time before + * doing actual wake_lock release */ + if (wake_lock_active(&card->wakelock) + && skb_queue_empty(&card->tx_queue) + && skb_queue_empty(&card->rx_queue)) + { + sqn_pr_dbg("shedule wake_lock release in %d sec\n" + , SQN_WAKE_LOCK_RELEASE_DELAY_SECONDS); + + delay = jiffies + msecs_to_jiffies( + SQN_WAKE_LOCK_RELEASE_DELAY_SECONDS * MSEC_PER_SEC); + + mod_timer(&card->wakelock_timer, delay); + } + +#undef SQN_WAKE_LOCK_RELEASE_DELAY_SECONDS + sqn_pr_leave(); +} + +static int sqn_sdio_host_to_card(struct sqn_private *priv) +{ + struct sqn_sdio_card *card = priv->card; + struct sk_buff *skb = 0; + unsigned long irq_flags = 0; + int level = 0; + int rv = 0; + u8 need_to_ulock_mutex = 0; + + sqn_pr_enter(); + + if (priv->removed) { + // sqn_pr_warn("%s: card/driver is removed, do nothing\n", __func__); // Andrew 0524 + goto drv_removed; + } + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + if (card->is_card_sleeps) { + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + /* + * Ignore return value of sqn_wakeup_fw() and try + * to send PDU even if wake up failed + */ + sqn_wakeup_fw(card->func); + } else { + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + } + + if (0 == sqn_sdio_get_rstn_wr_fifo_flag(priv)) { + rv = -1; + goto dequeue_skb; + } + + sqn_pr_dbg("acquire TX mutex\n"); + if (!mutex_trylock(&card->tx_mutex)) { + sqn_pr_dbg("failed to acquire TX mutex, it means we are going" + " to remove a network interface\n"); + need_to_ulock_mutex = 0; + goto out; + } + need_to_ulock_mutex = 1; + + while (!priv->removed && !sqn_sdio_is_tx_queue_empty(priv)) { + skb = skb_dequeue(&card->tx_queue); + if (0 != (skb = sqn_sdio_prepare_skb_for_tx(skb))) { + if (0 == level) { + int count = 20; + while (0 == (level = sqn_sdio_get_wr_fifo_level(priv))) { + if (0 == count--) { + sqn_pr_err("WR_FIFO_LEVEL2 timeout\n"); + rv = -1; + goto free_skb; + } + mdelay(1); + } + + if (level < 0) { + rv = -1; + goto free_skb; + } + } + + sqn_sdio_tx_skb(card, skb, 1); + --level; + + if (!card->waiting_pm_notification + && netif_queue_stopped(priv->dev) + && skb_queue_len(&card->tx_queue) < TX_QUEUE_WM_LEN) + { + sqn_pr_info("tx_queue len %d, enabling netif_queue\n" + , skb_queue_len(&card->tx_queue)); + netif_wake_queue(priv->dev); + } else { + sqn_pr_dbg("tx_queue len %d\n" + , skb_queue_len(&card->tx_queue)); + } + } else { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + } + } +out: + if (need_to_ulock_mutex && mutex_is_locked(&card->tx_mutex)) { + mutex_unlock(&card->tx_mutex); + sqn_pr_dbg("release TX mutex\n"); + } + + sqn_sdio_release_wake_lock(card); + + if ((0 != rv) || (mmc_wimax_get_CMD53_timeout_trigger_counter())) { + /* + * Failed to send PDU - assume that card was removed or + * crashed/reset so initiate card detection. + */ + + if (mmc_wimax_get_CMD53_timeout_trigger_counter()) { + sqn_pr_info("Force CMD53 timeout to reset SDIO!\n"); + mmc_wimax_set_CMD53_timeout_trigger_counter(mmc_wimax_get_CMD53_timeout_trigger_counter()-1); + } + + // Reset WiMAX chip + if (mmc_wimax_get_sdio_hw_reset()) { // mmc_wimax_get_sdio_hw_reset [ + sqn_pr_info("reset WiMAX chip by SDIO\n"); + + // HW Reset + mmc_wimax_power(0); + mdelay(5); + mmc_wimax_power(1); + // To avoid re-initialized SDIO card failed + priv->removed = 1; + + sqn_pr_err("card seems to be dead/removed - initiate reinitialization\n"); + mmc_detect_change(card->func->card->host, 1); + + // SW Reset + // It could avoid we hang in SDIO CMD53 timeout and recovery wimax again. + /* + netif_carrier_off(priv->dev); + priv->removed = 1; + sqn_sdio_claim_host(card->func); + // software card reset + sdio_writeb(card->func, 0, SQN_H_GRSTN, &rv); + sqn_sdio_release_host(card->func); + mmc_detect_change(card->func->card->host, 0); + */ + } + else + { +#if RESET_BY_WIMAXTRACKER + sqn_pr_info("reset WiMAX chip by WimaxTracker\n"); + udp_broadcast(1,"ResetWimax_BySDIO\n"); +#else + sqn_pr_info("No reset WiMAX chip\n"); +#endif + } // mmc_wimax_get_sdio_hw_reset ] + } +drv_removed: + sqn_pr_leave(); + return rv; + +dequeue_skb: + if (!sqn_sdio_is_tx_queue_empty(priv)) { + sqn_pr_dbg("remove skb from TX queue because of error\n"); + skb = skb_dequeue(&card->tx_queue); + } +free_skb: + sqn_pr_dbg("free TX skb because of error\n"); + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + goto out; +} + + +/*******************************************************************/ +/* RX handlers */ +/*******************************************************************/ +static void sqn_sdio_process_rx_queue(struct work_struct *work) +{ + struct sqn_private *priv = container_of(work, struct sqn_private + , rx_work_struct); + struct sqn_sdio_card *card = (struct sqn_sdio_card*) priv->card; + struct sk_buff *skb = 0; + u8 need_to_ulock_mutex = 0; + + sqn_pr_enter(); + + sqn_pr_dbg("acquire RXQ mutex\n"); + if (!mutex_trylock(&card->rxq_mutex)) { + sqn_pr_dbg("failed to acquire RXQ mutex, it means we are going" + " to remove a network interface\n"); + need_to_ulock_mutex = 0; + goto out; + } + need_to_ulock_mutex = 1; + + while (!priv->removed && 0 != (skb = skb_dequeue(&card->rx_queue))) { + sqn_rx_process(card->priv->dev, skb); + if (waitqueue_active(&priv->rx_waitq) + && skb_queue_len(&card->rx_queue) < RX_QUEUE_WM_LEN) + { + sqn_pr_info("rx_queue len %d, enabling rx\n" + , skb_queue_len(&card->rx_queue)); + wake_up_interruptible(&priv->rx_waitq); + } + } +out: + if (need_to_ulock_mutex && mutex_is_locked(&card->rxq_mutex)) { + mutex_unlock(&card->rxq_mutex); + sqn_pr_dbg("release RXQ mutex\n"); + } + + sqn_sdio_release_wake_lock(card); + sqn_pr_leave(); +} + + +static int sqn_sdio_card_to_host(struct sqn_sdio_card *card) +{ + u16 level = 0; + int rv = 0; + u8 need_to_ulock_mutex = 0; + + sqn_pr_enter(); + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s+\n", __func__); + } +#endif + + if (card->priv->removed) { + // sqn_pr_warn("%s: card/driver is removed, do nothing\n", __func__); // Andrew 0524 + goto drv_removed; + } + + sqn_pr_dbg("acquire RX mutex\n"); + if (!mutex_trylock(&card->rx_mutex)) { + sqn_pr_dbg("failed to acquire RX mutex, it means we are going" + " to remove a network interface\n"); + need_to_ulock_mutex = 0; + goto out; + } + need_to_ulock_mutex = 1; + + /* + * NOTE: call to sqn_sdio_claim_host() is already done + * in sqn_sdio_it_lsb() - our caller + */ +check_level: + /* Find out how many PDUs we have to read */ + level = sdio_readw(card->func, SQN_SDIO_RD_FIFO_LEVEL(2), &rv); + if (rv) { + sqn_pr_err("ERROR reading SDIO_RD_FIFO_LEVEL\n"); + goto out; + } + + if (level == 0) { + sqn_pr_dbg("no more PDUs to read\n"); + if (rv < 0) + sqn_pr_warn("%s: no more PDUs left but status = %d\n", __func__, rv); + goto out; + } + + sqn_pr_dbg("PDUs to read %d\n", level); + + while (!card->priv->removed && level--) { + struct sk_buff *skb = 0; +#if DUMP_NET_PKT + struct ethhdr *eth = 0; +#endif + u16 size = 0; + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s: 0\n", __func__); + } +#endif + + /* Get the size of PDU */ + size = sdio_readw(card->func, SQN_SDIO_RDLEN_FIFO(2), &rv); + if (rv) { + sqn_pr_err("can't get FIFO read length, status = %d\n", rv); + goto out; + } + sqn_pr_dbg("PDU #%u length %u\n", (u32)level, (u32)size); + + if (size > SQN_SDIO_PDU_MAXLEN || size < 1) { + sqn_pr_err("RX PDU length %u is not correct\n", + (u32)size); + card->priv->stats.rx_length_errors++; + card->priv->stats.rx_errors++; + continue; + } + + skb = __netdev_alloc_skb(card->priv->dev, SQN_SDIO_PDU_MAXLEN + , GFP_ATOMIC | GFP_DMA); + if (0 == skb) { + sqn_pr_err("failed to alloc RX buffer\n"); + rv = -ENOMEM; + goto out; + } +#if SKB_DEBUG + sqn_pr_info("%s: alloc skb [0x%p], users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + + rv = sdio_readsb(card->func, skb->data, SQN_SDIO_RDWR_FIFO(2), + (int)size); + if (rv) { + sqn_pr_err("RX PDU read failed: %d\n", rv); + continue; + } + skb_put(skb, size); + +#if DUMP_NET_PKT + if (mmc_wimax_get_netlog_status()) { + eth = (struct ethhdr *)skb->data; + + sqn_pr_info("[PRT]-------------------------------------------------------------------\n"); + sqn_pr_info("RX PDU length %d\n", skb->len); + + if (is_thp_packet(eth->h_dest)) { + sqn_pr_info("RX THP packet\n"); + } + else if (is_lsp_packet(skb)) { + sqn_pr_info("RX LSP packet\n"); + } + else if (!is_thp_packet(eth->h_dest) && !is_lsp_packet(skb)) { + sqn_pr_info_dump("RX PDU", skb->data, skb->len); + } + } + + if (mmc_wimax_get_netlog_withraw_status()) { + eth = (struct ethhdr *)skb->data; + // if (!is_thp_packet(eth->h_dest) && !is_lsp_packet(skb)) + { + sqn_pr_info("[RAW]-------------------------------------------------------------------\n"); + sqn_pr_info("RX PDU length %d\n", skb->len); + sqn_pr_info_dump_rawdata("RX PDU", skb->data, skb->len); + } + } +#endif + + if (sqn_handle_lsp_packet(card->priv, skb)) { +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s: 1\n", __func__); + } +#endif + continue; + } + + /* + * If we have some not LSP PDUs to read, then card is not + * asleep any more, so we should notify waiters about this + */ + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s: 2\n", __func__); + } +#endif + + if (card->is_card_sleeps) { + sqn_pr_info("got RX data, card is not asleep\n"); + /* signal_card_sleep_completion(card->priv); */ + card->is_card_sleeps = 0; +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s: 3\n", __func__); + } +#endif + } + + if (!card->waiting_pm_notification + && !wake_lock_active(&card->wakelock)) + { + sqn_pr_dbg("lock wake_lock\n"); + wake_lock(&card->wakelock); + } + + + /* + * Don't use internal RX queue, because kernel has its own. + * Just push RX packet directly to kernel + */ + sqn_rx_process(card->priv->dev, skb); + + /* skb_queue_tail(&card->rx_queue, skb); */ + /* if (skb_queue_len(&card->rx_queue) > RX_QUEUE_MAX_LEN) { */ + /* int rv = 0; */ + /* sqn_pr_info("rx_queue len %d, wait untill it'll be processed\n" */ + /* , skb_queue_len(&card->rx_queue)); */ + /* schedule_work(&card->priv->rx_work_struct); */ + /* rv = wait_event_interruptible(card->priv->rx_waitq */ + /* , skb_queue_len(&card->rx_queue) <= RX_QUEUE_WM_LEN); */ + /* |+ */ + /* * If we've been interrupted by a signal, then we */ + /* * should stop and return */ + /* +| */ + /* if (0 != rv) { */ + /* sqn_pr_warn("got a signal from kernel %d\n", rv); */ + /* goto out; */ + /* } */ + /* sqn_pr_info("rx_queue len %d, continue RX PDUs processing\n" */ + /* , skb_queue_len(&card->rx_queue)); */ + /* } */ + } + + /* sqn_pr_dbg("rx_queue len %d\n" */ + /* , skb_queue_len(&card->rx_queue)); */ + + /* schedule_work(&card->priv->rx_work_struct); */ + + sqn_pr_dbg("check is there more PDU to read\n"); + goto check_level; +out: + sqn_sdio_release_wake_lock(card); + if (need_to_ulock_mutex && mutex_is_locked(&card->rx_mutex)) { + mutex_unlock(&card->rx_mutex); + sqn_pr_dbg("release RX mutex\n"); + } + +drv_removed: + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s-\n", __func__); + } +#endif + sqn_pr_leave(); + return rv; +} + + +/*******************************************************************/ +/* Interrupt handling */ +/*******************************************************************/ + +static int sqn_sdio_it_lsb(struct sdio_func *func) +{ + struct sqn_sdio_card *card = sdio_get_drvdata(func); + int rc = 0; + u8 status = 0; + unsigned long irq_flags = 0; + u8 is_card_sleeps = 0; + + sqn_pr_enter(); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s+\n", __func__); */ +#endif + + /* NOTE: call of sqn_sdio_claim_host() is already done */ + + /* Read the interrupt status */ + status = sdio_readb(func, SQN_SDIO_IT_STATUS_LSBS, &rc); + if (rc) + goto out; + + sqn_pr_dbg("interrupt(LSB): 0x%02X\n", (unsigned char) status); + + spin_lock_irqsave(&card->priv->drv_lock, irq_flags); + is_card_sleeps = card->is_card_sleeps; + spin_unlock_irqrestore(&card->priv->drv_lock, irq_flags); + + /* Handle interrupt */ + if (status & SQN_SDIO_IT_WR_FIFO2_WM) { + sqn_pr_dbg("skipping FIFO2 write watermark interrupt...\n"); + + /* Clear interrupt flag */ + sdio_writeb(func, SQN_SDIO_IT_WR_FIFO2_WM, + SQN_SDIO_IT_STATUS_LSBS, &rc); + } + + if (status & SQN_SDIO_IT_RD_FIFO2_WM) { + rc = sqn_sdio_card_to_host(card); + if (rc) + sqn_pr_err("can't read data from card, error %d\n", rc); + + /* Clear interrupt flag */ + sdio_writeb(func, SQN_SDIO_IT_RD_FIFO2_WM, + SQN_SDIO_IT_STATUS_LSBS, &rc); + } + +out: + sqn_pr_dbg("returned code: %d\n", rc); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s-\n", __func__); */ +#endif + + sqn_pr_leave(); + return rc; +} + + +static int sqn_sdio_it_msb(struct sdio_func *func) +{ + int rc = 0; + u8 status = 0; + + sqn_pr_enter(); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s+\n", __func__); */ +#endif + + /* Read the interrupt status */ + status = sdio_readb(func, SQN_SDIO_IT_STATUS_MSBS, &rc); + if (rc) + goto out; + + sqn_pr_dbg("interrupt(MSB): 0x%02X\n", (unsigned char) status); + + /* TODO: Handle interrupt */ + sqn_pr_dbg("skipping any interrupt...\n"); + + /* Clear interrupt flag */ + sdio_writeb(func, 0xff, SQN_SDIO_IT_STATUS_MSBS, &rc); + +out: + sqn_pr_dbg("returned code: %d\n", rc); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s-\n", __func__); */ +#endif + + sqn_pr_leave(); + return rc; +} + + +/* + * defined in "drivers/mmc/omap2430_hsmmc.c" + * in linux kernel from TI + */ +int sdio_int_enable(int enable, int slot); + + +void sqn_sdio_interrupt(struct sdio_func *func) +{ + unsigned long irq_flags = 0; + u8 is_card_sleeps = 0; + struct sqn_sdio_card *card = sdio_get_drvdata(func); + +#if SDIO_CLAIM_HOST_DEBUG + struct mmc_host *h = (func)->card->host; +#endif + + sqn_pr_enter(); + +#if SDIO_CLAIM_HOST_DEBUG + /* sqn_pr_info("%s+\n", __func__); */ +#endif + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s+: mmc_host: claimed %d, claim_cnt %d, claimer 0x%p\n" + , __func__, h->claimed, h->claim_cnt, h->claimer); + } +#endif + + sqn_sdio_it_lsb(func); + + spin_lock_irqsave(&card->priv->drv_lock, irq_flags); + is_card_sleeps = card->is_card_sleeps; + spin_unlock_irqrestore(&card->priv->drv_lock, irq_flags); + + if (!is_card_sleeps) + sqn_sdio_it_msb(func); + +#if SDIO_CLAIM_HOST_DEBUG + if (mmc_wimax_get_cliam_host_status()) { + sqn_pr_info("%s-\n", __func__); + } +#endif + + sqn_pr_leave(); +} + + +static int sqn_sdio_it_enable(struct sdio_func *func) +{ + u8 enable = 0; + int rv = 0; + + sqn_pr_enter(); + sqn_sdio_claim_host(func); + /* enable LSB */ + enable = SQN_SDIO_IT_WR_FIFO2_WM | SQN_SDIO_IT_RD_FIFO2_WM | + SQN_SDIO_IT_SW_SIGN; + + sdio_writeb(func, enable, SQN_SDIO_IT_EN_LSBS, &rv); + sqn_pr_dbg("enabled LSBS interrupt: rv=0x%02X\n", rv); + if (rv) + goto out; + + sqn_pr_dbg("enabled interrupt(LSB): 0x%02X\n", + (unsigned char) enable); + + /* Set RD watermark to enable interrups for RX packets */ + sdio_writew(func, 1, SQN_SDIO_WM_RD_FIFO(2), &rv); + sqn_pr_dbg("enabled rd watermark: rv=%d\n", rv); + if (rv) { + sqn_pr_err("can't enable rd watermark: rv=%d\n", rv); + goto out; + } +out: + sqn_sdio_release_host(func); + sqn_pr_dbg("returned code: %d\n", rv); + sqn_pr_leave(); + return rv; +} + + +static int sqn_sdio_it_disable(struct sdio_func *func) +{ + int rc = 0; + + sqn_pr_enter(); + sqn_sdio_claim_host(func); + + /* disable LSB */ + sdio_writeb(func, 0, SQN_SDIO_IT_EN_LSBS, &rc); + if (rc) + goto out; + sqn_pr_dbg("disabled interrupt(LSB)\n"); + + /* disable MSB */ + sdio_writeb(func, 0, SQN_SDIO_IT_EN_MSBS, &rc); + if (rc) + goto out; + sqn_pr_dbg("disabled interrupt(MSB)\n"); +out: + sqn_pr_dbg("returned code: %d\n", rc); + sqn_sdio_release_host(func); + sqn_pr_leave(); + return rc; +} + + +void sqn_sdio_stop_it_thread_from_itself(struct sqn_private *priv) +{ + struct sqn_sdio_card *card = priv->card; + unsigned long irq_flags = 0; + sqn_pr_enter(); + + spin_lock_irqsave(&priv->drv_lock, irq_flags); + card->it_thread_should_stop = 1; + spin_unlock_irqrestore(&priv->drv_lock, irq_flags); + + sqn_pr_leave(); +} + +/*******************************************************************/ +/* Driver registration */ +/*******************************************************************/ + +#ifdef DEBUG +static void sqn_sdio_debug_test(struct sdio_func *func) +{ + /* int rc = 0; */ + /* int val = 0; */ + + sqn_pr_enter(); + sqn_sdio_claim_host(func); + + /* sqn_pr_dbg("write SQN_SOC_SIGS_LSBS\n"); */ + /* sdio_writeb(func, 1, SQN_SOC_SIGS_LSBS, &rc); */ + /* if (rc) */ + /* sqn_pr_dbg("error when writing to SQN_SOC_SIGS_LSBS: %d\n", rc); */ + +#if 0 + sqn_pr_dbg("readb 0x04\n"); + val = sdio_readb(func, 0x04, &rc); + if (rc) + sqn_pr_dbg("readb 0x04 failed %x\n", rc); + else + sqn_pr_dbg("readb 0x04 = %x\n", val); + + sqn_pr_dbg("readb 0x2028\n"); + val = sdio_readb(func, 0x2028, &rc); + if (rc) + sqn_pr_dbg("readb 0x2028 failed %x\n", rc); + else + sqn_pr_dbg("readb 0x2028 = %x\n", val); + + sqn_pr_dbg("readw 0x2028\n"); + val = sdio_readw(func, 0x2028, &rc); + if (rc) + sqn_pr_dbg("readw 0x2028 failed %x\n", rc); + else + sqn_pr_dbg("readw 0x2028 = %x\n", val); + + sqn_pr_dbg("readb RSTN\n"); + val = sdio_readb(func, SQN_SDIO_RSTN_WR_FIFO(2), &rc); + if (rc) + sqn_pr_dbg("readb RSTN failed %x\n", rc); + else + sqn_pr_dbg("readb RSTN = %x\n", val); + + sqn_pr_dbg("readl LEVEL\n"); + val = sdio_readw(func, SQN_SDIO_WR_FIFO_LEVEL(2), &rc); + if (rc) + sqn_pr_dbg("readl LEVEL failed %x\n", rc); + else + sqn_pr_dbg("readl LEVEL = %x\n", val); + + sqn_pr_dbg("readl 0x2060\n"); + val = sdio_readl(func, 0x2060, &rc); + if (rc) + sqn_pr_dbg("readl 0x2060 failed %x\n", rc); + else + sqn_pr_dbg("readl 0x2060 = %x\n", val); + + sqn_pr_dbg("writew SQN_SDIO_WM_RD_FIFO(2)\n"); + sdio_writel(func, 1, SQN_SDIO_WM_RD_FIFO(2), &rc); + if (rc) + sqn_pr_dbg("writel SQN_SDIO_WM_RD_FIFO(2) failed %x\n", rc); + else + sqn_pr_dbg("writel SQN_SDIO_WM_RD_FIFO(2) = %x\n", rc); +#endif + + sqn_sdio_release_host(func); + sqn_pr_leave(); +} + + +static void sqn_sdio_print_debug_info(struct sdio_func *func) +{ + sqn_pr_enter(); + + sqn_pr_info("sdio_func: device[%02x]: %04x:%04x\n", func->class, func->vendor, + func->device); + sqn_pr_info("sdio_func: block size: %d (maximum %d)\n", func->cur_blksize, + func->max_blksize); + sqn_pr_info("sdio_func: func->state: 0x%04x, card->state: 0x%04x\n" + , func->state, func->card->state); + sqn_pr_info("mmc_bus: clock=%u, width=%u, mode=%u, vdd=%u\n" + , func->card->host->ios.clock + , func->card->host->ios.bus_width + , func->card->host->ios.bus_mode + , func->card->host->ios.vdd); + + sqn_pr_dbg("host->caps=%x\n", (u32) func->card->host->caps); + + sqn_pr_leave(); +} +#endif /* DEBUG */ + + +static void sqn_sdio_free_tx_queue(struct sqn_sdio_card *card) +{ + struct sk_buff *skb = 0; + while (0 != (skb = skb_dequeue(&card->tx_queue))) + dev_kfree_skb_any(skb); +} + + +static void sqn_sdio_free_rx_queue(struct sqn_sdio_card *card) +{ + struct sk_buff *skb = 0; + while (0 != (skb = skb_dequeue(&card->rx_queue))) + dev_kfree_skb_any(skb); +} + + +static int check_boot_from_host_mode(struct sdio_func *func) +{ + int rv = 0; + int status = 0; + + sqn_sdio_claim_host(func); + status = sdio_readb(func, SQN_H_BOOT_FROM_SPI, &rv); + sqn_sdio_release_host(func); + + if (rv) + { + sqn_pr_err("can't read boot flags from device"); + return 0; + } + + return !status; +} + + +static int sqn_check_card_id(struct sdio_func *func) +{ + int rv = 0; + unsigned short manf_id = 0; + unsigned short card_id = 0; + + sqn_pr_enter(); + sqn_pr_info("Checking card IDs...\n"); + + manf_id = sdio_readw(func, SDIO_CMN_CISTPLMID_MANF, &rv); + if (rv) { + sqn_pr_err("can't read card manufacturer id\n"); + rv = 0; + goto out; + } + + card_id = sdio_readw(func, SDIO_CMN_CISTPLMID_CARD, &rv); + if (rv) { + sqn_pr_err("can't read card id\n"); + rv = 0; + goto out; + } + + if (manf_id != SDIO_VENDOR_ID_SEQUANS + || card_id != SDIO_DEVICE_ID_SEQUANS_SQN1130) + { + sqn_pr_info("found card with UNSUPPORTED manf_id=%x card_id=%x\n" + , manf_id, card_id); + rv = 0; + } else { + sqn_pr_info("found card with SUPPORTED manf_id=%x card_id=%x\n" + , manf_id, card_id); + rv = 1; + } + +out: + sqn_pr_leave(); + return rv; +} + + +static u8 sqn_get_card_version(struct sdio_func *func) +{ + int rv = 0; + u32 version = 0; + + sqn_pr_enter(); + + switch (func->device) { + case SDIO_DEVICE_ID_SEQUANS_SQN1130: + sqn_pr_info("found SQN1130 card\n"); + /* + * Let bootrom/firmware name to be overridden from userspace as + * a module parameter, so we change it only if it was not + * changed from its default value + */ + if (0 == strcmp(firmware_name, SQN_DEFAULT_FW_NAME)) + firmware_name = fw1130_name; + rv = SQN_1130; + break; + case SDIO_DEVICE_ID_SEQUANS_SQN1210: + sqn_pr_info("found SQN1210 card\n"); + /* + * Let firmware_name to be overridden from userspace as a module + * parameter, so we change firmware_name only if it was not + * changed from its default value + */ + if (0 == strcmp(firmware_name, SQN_DEFAULT_FW_NAME)) + firmware_name = fw1210_name; + rv = SQN_1210; + break; + default: + sqn_pr_info("found UNKNOWN card with vendor_id 0x%x" + " dev_id 0x%x\n", func->vendor, func->device); + rv = 0; + } + +/* Maintain in compilable state but don't use it for now */ +#if 0 +/* + * For production devices this is not needed, we can get a device id + * from sdio_func + */ + sqn_pr_info("Checking card version...\n"); + + sqn_sdio_claim_host(func); + version = sdio_readl(func, SQN_H_VERSION, &rv); + sqn_sdio_release_host(func); + if (rv) { + sqn_pr_err("failed to read card version\n"); + rv = 0; + goto out; + } + +#define SQN1130_MAJOR_VERSION 0x06 +#define SQN12x0_MAJOR_VERSION 0x0a + + if (SQN1130_MAJOR_VERSION == (version & 0xff)) + { + sqn_pr_info("found SQN_1130 card with version id 0x%x\n" + , version); + rv = SQN_1130; + } else if (SQN12x0_MAJOR_VERSION == (version & 0xff)) { + sqn_pr_info("found SQN_1210 card with version id 0x%x\n" + , version); + rv = SQN_1210; + } else { + sqn_pr_info("found UNKNOWN card with version id 0x%x\n" + , version); + rv = 0; + } +#endif + +out: + sqn_pr_leave(); + return rv; +} + + +extern u8 _g_card_sleeps; +extern struct sqn_private *g_priv; +struct msmsdcc_host; + +int msmsdcc_enable_clocks(struct msmsdcc_host *host); +void msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr); +int msmsdcc_get_sdc_clocks(struct msmsdcc_host *host); +int sqn_sdio_get_sdc_clocks(void); +void sqn_sdio_set_sdc_clocks(int on); +int sqn_sdio_notify_host_wakeup(void); + +static irqreturn_t wimax_wakeup_gpio_irq_handler(int irq, void *dev_id) +{ + struct sqn_sdio_card *card = g_priv->card; + struct msmsdcc_host *msm_host = mmc_priv(card->func->card->host); + + sqn_pr_enter(); + +#if SDIO_CLK_DEBUG + /* Please, don't disable this log, it will be printed not often, only + * once when host is in sleep mode */ + // sqn_pr_info("WiMAX GPIO interrupt\n"); +#endif + + // To avoid flush the logging in kmsg, remove it. + if (mmc_wimax_get_sdio_interrupt_log()) { + if (printk_ratelimit()) + sqn_pr_info("WiMAX GPIO interrupt\n"); + } + + if (!sqn_sdio_get_sdc_clocks()) { + msmsdcc_enable_clocks(msm_host); + msmsdcc_disable_clocks(msm_host, 5 * HZ); + } + + sqn_pr_leave(); + return IRQ_HANDLED; + +} + +int sqn_sdio_notify_host_wakeup(void) +{ + struct sqn_sdio_card *card = g_priv->card; + int rv = 0; + + sqn_pr_enter(); + + rv = sqn_wakeup_fw(card->func); + + sqn_pr_leave(); + + return rv; +} +EXPORT_SYMBOL(sqn_sdio_notify_host_wakeup); + +int sqn_sdio_get_sdc_clocks(void) +{ + struct sqn_sdio_card *card = g_priv->card; + struct msmsdcc_host *host = mmc_priv(card->func->card->host); + + return msmsdcc_get_sdc_clocks(host); +} +EXPORT_SYMBOL(sqn_sdio_get_sdc_clocks); + +void sqn_sdio_set_sdc_clocks(int on) +{ + struct sqn_sdio_card *card = g_priv->card; + struct msmsdcc_host *host = mmc_priv(card->func->card->host); + + sqn_pr_enter(); + + if (on) { + msmsdcc_enable_clocks(host); + } + else { + msmsdcc_disable_clocks(host, 0); + } + + sqn_pr_leave(); +} +EXPORT_SYMBOL(sqn_sdio_set_sdc_clocks); + +int init_thp_handler(struct net_device *dev); +void cleanup_thp_handler(void); + +static int sqn_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int rv = 0; + struct sqn_sdio_card *sqn_card = 0; + struct sqn_private *priv = 0; + int counter = 0; + int delay = 0; + + int err; + u32 irq; + u32 req_flags = IRQF_TRIGGER_RISING; + + sqn_pr_enter(); + + sqn_pr_info("module parameters: firmware_name='%s' load_firmware=%d\n" + , firmware_name, load_firmware); + +#ifdef DEBUG + sqn_sdio_print_debug_info(func); + /* sqn_sdio_debug_test(func); */ +#endif + + /* Allocate card's private data storage */ + sqn_card = kzalloc(sizeof(struct sqn_sdio_card), GFP_KERNEL); + if (!sqn_card) { + rv = -ENOMEM; + goto out; + } + + sqn_card->version = sqn_get_card_version(func); + + if (0 == sqn_card->version) { + rv = -EPROTO; + goto free_card; + } + + skb_queue_head_init(&sqn_card->tx_queue); + skb_queue_head_init(&sqn_card->rx_queue); + init_waitqueue_head(&sqn_card->pm_waitq); + mutex_init(&sqn_card->tx_mutex); + mutex_init(&sqn_card->rx_mutex); + mutex_init(&sqn_card->rxq_mutex); + + wake_lock_init(&sqn_card->wakelock, WAKE_LOCK_SUSPEND, "sqnsdio"); + setup_timer(&sqn_card->wakelock_timer + , sqn_sdio_wake_lock_release_timer_fn + , (unsigned long) sqn_card); + + sqn_card->func = func; + + /* Activate SDIO function and register interrupt handler */ + sqn_sdio_claim_host(func); + + rv = sdio_enable_func(func); + if (rv) + goto release; + + rv = sdio_claim_irq(func, sqn_sdio_interrupt); + if (rv) + goto disable; + + sqn_sdio_release_host(func); + + sdio_set_drvdata(func, sqn_card); + priv = sqn_add_card(sqn_card, &func->dev); + if (!priv) { + rv = -ENOMEM; + goto reclaim; + } + + sqn_card->priv = priv; + + INIT_WORK(&priv->rx_work_struct, sqn_sdio_process_rx_queue); + priv->card = sqn_card; + priv->hw_host_to_card = sqn_sdio_host_to_card; + priv->add_skb_to_tx_queue = sqn_sdio_add_skb_to_tx_queue; + priv->is_tx_queue_empty = sqn_sdio_is_tx_queue_empty; + + /* Load firmware if card needs it */ + if (check_boot_from_host_mode(sqn_card->func)) + { + rv = sqn_load_firmware(sqn_card->func); + if (rv) + goto err_activate_card; + } + + memcpy(priv->dev->dev_addr, priv->mac_addr, ETH_ALEN); + + rv = sqn_start_card(priv); + if (rv) + goto err_activate_card; + + /* We need to setup thp_handler now, to catch all THP packets + * as soon as they appear after interrupts are enabled + */ + rv = init_thp_handler(priv->dev); + if (rv) + goto unreg_netdev; + + /* Enable interrupts, now everything is set up */ + rv = sqn_sdio_it_enable(sqn_card->func); + if (rv) + goto clean_thp_handler; + + sqn_pr_info("wait until FW is started...\n"); + counter = 20; + delay = 500; + while (0 == sqn_sdio_get_rstn_wr_fifo_flag(priv) && --counter > 0) { + sqn_pr_dbg("FW is not started yet, sleep for %d msecs," + " %d retries left\n" + , delay + , counter); + msleep(delay); + } + + if (0 == sqn_card->rstn_wr_fifo_flag) + sqn_pr_warn("FW is still not started, anyway continue as is...\n"); + + + sqn_pr_info("setup GPIO%d for wakeup form SQN1210\n", mmc_wimax_get_hostwakeup_gpio()); + rv = irq = MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio()); //HOST WAKEUP GPIO as wakeup + + if (rv < 0) { + sqn_pr_warn("wimax-gpio to irq failed\n"); + goto disable; + } + + rv = request_irq(irq, wimax_wakeup_gpio_irq_handler, + req_flags, "WiMAX0", sqn_card->priv->dev); // IRQF_TRIGGER_RISING, raising trigger + if (rv) { + sqn_pr_warn("wimax-gpio request_irq failed=%d\n", rv); + goto disable; + } + + sqn_pr_dbg("disable GPIO%d interrupt\n", mmc_wimax_get_hostwakeup_gpio()); + disable_irq(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio())); + + rv = init_thp(priv->dev); + if (rv) + goto clean_thp_handler; + +#ifdef DEBUG + /* sqn_sdio_debug_test(sqn_card->func); */ +#endif + +out: + sqn_pr_dbg("returned code: %d\n", rv); + if (0 == rv) + sqn_pr_info("card initialized successfuly\n"); + sqn_pr_leave(); + return rv; + +clean_thp: + cleanup_thp(); +clean_thp_handler: + cleanup_thp_handler(); +unreg_netdev: + unregister_netdev(priv->dev); +err_activate_card: + flush_scheduled_work(); + free_netdev(priv->dev); +reclaim: + sqn_sdio_claim_host(func); + sdio_release_irq(func); +disable: + sdio_disable_func(func); +release: + sqn_sdio_release_host(func); + sqn_sdio_free_tx_queue(sqn_card); + sqn_sdio_free_rx_queue(sqn_card); + + /* release a wake_lock if it was not done for a some reason */ + if (wake_lock_active(&sqn_card->wakelock)) { + sqn_pr_dbg("wake_lock is active, release it\n"); + wake_unlock(&sqn_card->wakelock); + } + + wake_lock_destroy(&sqn_card->wakelock); + +free_card: + kfree(sqn_card); + + goto out; +} + + +extern wait_queue_head_t g_card_sleep_waitq; + +static void sqn_sdio_remove(struct sdio_func *func) +{ + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + u8 count = 0; + u32 delay = 0; + int rv = 0; + + sqn_pr_enter(); + + sqn_pr_info("free GPIO%d interrupt\n", mmc_wimax_get_hostwakeup_gpio()); + free_irq(MSM_GPIO_TO_INT(mmc_wimax_get_hostwakeup_gpio()),sqn_card->priv->dev); + +#if defined(DEBUG) + sqn_sdio_print_debug_info(func); +#endif + cleanup_thp(); + + /* + * Let all running threads know that we are starting + * a remove procedure + */ + sqn_card->priv->removed = 1; + delay = 1000; + + sqn_sdio_it_disable(sqn_card->func); + wake_up_interruptible(&g_card_sleep_waitq); + + sqn_pr_info("wait until RX is finished\n"); + count = 5; + while (--count && !(rv = mutex_trylock(&sqn_card->rx_mutex))) + mdelay(delay); + if (!rv) + sqn_pr_warn("%s: failed to acquire RX mutex\n", __func__); + + sqn_stop_card(sqn_card->priv); + wake_up_interruptible(&sqn_card->priv->tx_waitq); + kthread_stop(sqn_card->priv->tx_thread); + + sqn_pr_info("wait until TX is finished\n"); + count = 5; + while (--count && !(rv = mutex_trylock(&sqn_card->tx_mutex))) + mdelay(delay); + if (!rv) + sqn_pr_warn("%s: failed to acquire TX mutex\n", __func__); + + sqn_sdio_claim_host(func); + sdio_release_irq(func); + sdio_disable_func(func); + sqn_sdio_release_host(func); + + sqn_remove_card(sqn_card->priv); + + sqn_sdio_free_tx_queue(sqn_card); + sqn_sdio_free_rx_queue(sqn_card); + + del_timer_sync(&sqn_card->wakelock_timer); + /* release a wake_lock if it was not done for a some reason */ + if (wake_lock_active(&sqn_card->wakelock)) { + sqn_pr_dbg("wake_lock is active, release it\n"); + wake_unlock(&sqn_card->wakelock); + } + + wake_lock_destroy(&sqn_card->wakelock); + + kfree(sqn_card); + sdio_set_drvdata(func, 0); + + sqn_pr_info("card removed successfuly\n"); + mmc_detect_change(func->card->host, msecs_to_jiffies(500)); + sqn_pr_leave(); +} + +int sqn_sdio_suspend(struct sdio_func *func, pm_message_t msg) +{ + int rv = 0; + /* unsigned long irq_flags = 0; */ + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + + sqn_pr_enter(); + sqn_pr_info("%s: enter\n", __func__); + sqn_pr_dbg("pm_message = %x\n", msg.event); + + WARN(!skb_queue_empty(&sqn_card->tx_queue) + , "BANG!!! TX queue is not empty in suspend(): %d" + , skb_queue_len(&sqn_card->tx_queue)); + + WARN(!skb_queue_empty(&sqn_card->rx_queue) + , "BANG!!! RX queue is not empty in suspend(): %d" + , skb_queue_len(&sqn_card->rx_queue)); + + if (sqn_card->is_card_sleeps) { + sqn_pr_info("card already asleep (pm_message = 0x%x)\n" + , msg.event); + goto out; + } + + /* Do nothing when system goes to power off */ + if (PM_EVENT_SUSPEND != msg.event) { + sqn_pr_warn("Not supported pm_message = %x\n", msg.event); + goto out; + } + + if (sqn_notify_host_sleep(func)) { + sqn_pr_warn("Failed to suspend\n"); + goto out; + } +out: + + mmc_wimax_enable_host_wakeup(1); + + sqn_pr_info("%s: leave\n", __func__); + sqn_pr_leave(); + return rv; +} + + +int sqn_sdio_resume(struct sdio_func *func) +{ + int rv = 0; + struct sqn_sdio_card *sqn_card = sdio_get_drvdata(func); + + sqn_pr_enter(); + sqn_pr_info("%s: enter\n", __func__); + + if (netif_queue_stopped(sqn_card->priv->dev)) { + sqn_pr_dbg("wake netif_queue\n"); + netif_wake_queue(sqn_card->priv->dev); + } + + // Dima: we don't need this, card will be woken up when there will be + // some TX data + /* sqn_notify_host_wakeup(func); */ + + mmc_wimax_enable_host_wakeup(0); + + sqn_pr_info("%s: leave\n", __func__); + sqn_pr_leave(); + return rv; +} + +int sqn_sdio_dump_net_pkt(int on) { + + printk("[SDIO] %s: dump_net_pkt: %d\n", __func__, on); + dump_net_pkt = on; + + return 0; +} + +static struct sdio_driver sqn_sdio_driver = { + .name = SQN_MODULE_NAME + , .id_table = sqn_sdio_ids + , .probe = sqn_sdio_probe + , .remove = sqn_sdio_remove + , .suspend = sqn_sdio_suspend + , .resume = sqn_sdio_resume +}; + + +/*******************************************************************/ +/* Module initialization */ +/*******************************************************************/ + +static int __init sqn_sdio_init_module(void) +{ + int rc = 0; + + sqn_pr_enter(); + + sqn_pr_info("Sequans SDIO WiMAX driver, version %s\n" + , SQN_MODULE_VERSION); + sqn_pr_info("Copyright SEQUANS Communications\n"); + + // printk(KERN_WARNING "------------ %s ------------\n", __FUNCTION__); + mmc_wimax_power(1); + mmc_wimax_set_carddetect(1); + // thp_wimax_uart_switch(1); + mmc_wimax_set_status(1); + dump_net_pkt = mmc_wimax_get_netlog_status(); + claim_host_dbg = mmc_wimax_get_cliam_host_status(); + + + rc = sdio_register_driver(&sqn_sdio_driver); + + register_android_earlysuspend(); + +#if RESET_BY_WIMAXTRACKER + sdio_netlink_register(); +#endif + + sqn_pr_info("Driver has been registered\n"); + + sqn_pr_leave(); + + return rc; +} + +static void __exit sqn_sdio_exit_module(void) +{ + sqn_pr_enter(); + + sdio_unregister_driver(&sqn_sdio_driver); + + unregister_android_earlysuspend(); + + sqn_pr_info("Driver has been removed\n"); + + mmc_wimax_set_carddetect(0); + mmc_wimax_power(0); + // thp_wimax_uart_switch(0); + mmc_wimax_set_status(0); + +#if RESET_BY_WIMAXTRACKER + sdio_netlink_deregister(); +#endif + + sqn_pr_leave(); +} + + +module_init(sqn_sdio_init_module); +module_exit(sqn_sdio_exit_module); + + +MODULE_DESCRIPTION("Sequans WiMAX driver for SDIO devices"); +MODULE_AUTHOR("Dmitriy Chumak, Andy Shevchenko"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(SQN_MODULE_VERSION); diff --git a/drivers/net/wimax/SQN/sdio.h b/drivers/net/wimax/SQN/sdio.h new file mode 100644 index 0000000000000..0f8bcd07a22fd --- /dev/null +++ b/drivers/net/wimax/SQN/sdio.h @@ -0,0 +1,20 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Dmitriy Chumak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_SDIO_WRAPPERS_H +#define _SQN_SDIO_WRAPPERS_H + +#include +#include + +int sqn_sdio_dump_net_pkt(int on); + +#endif /* _SQN_SDIO_WRAPPERS_H */ diff --git a/drivers/net/wimax/SQN/sdio_netlink.c b/drivers/net/wimax/SQN/sdio_netlink.c new file mode 100644 index 0000000000000..46068e4e6ca54 --- /dev/null +++ b/drivers/net/wimax/SQN/sdio_netlink.c @@ -0,0 +1,57 @@ +#include "sdio_netlink.h" + +struct sock *netlink_sock; + +void udp_broadcast(int gid,void *payload) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + int size=strlen(payload)+1; + int len = NLMSG_SPACE(size); + void *data; + int ret; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return; + nlh= NLMSG_PUT(skb, 0, 0, 0, size); + nlh->nlmsg_flags = 0; + data=NLMSG_DATA(nlh); + memcpy(data, payload, size); + NETLINK_CB(skb).pid = 0; /* from kernel */ + NETLINK_CB(skb).dst_group = gid; /* unicast */ + ret=netlink_broadcast(netlink_sock, skb, 0, gid, GFP_KERNEL); + + if (ret <0) + { + printk("[SDIO] %s send failed\n", __func__); + return; + } + return; + +nlmsg_failure: /* Used by NLMSG_PUT */ + if (skb) + kfree_skb(skb); +} + +void MyTimerFunction(unsigned long data) +{ + udp_broadcast(1,"ResetWimax_BySDIO\n"); +} + +void udp_receive(struct sk_buff *skb) +{ +} + +int sdio_netlink_register(void) +{ + netlink_sock = netlink_kernel_create(&init_net, NETLINK_USERSOCK, 0,udp_receive, NULL, THIS_MODULE); + return 0; +} + +void sdio_netlink_deregister(void) +{ + sock_release(netlink_sock->sk_socket); + printk("[SDIO] %s: netlink driver remove successfully\n", __func__); +} + diff --git a/drivers/net/wimax/SQN/sdio_netlink.h b/drivers/net/wimax/SQN/sdio_netlink.h new file mode 100644 index 0000000000000..b8c0c3e1d737e --- /dev/null +++ b/drivers/net/wimax/SQN/sdio_netlink.h @@ -0,0 +1,17 @@ +#ifndef _SDIO_NETLINK_H +#define _SDIO_NETLINK_H + +#include +#include +#include +#include +#include +#include + +void udp_broadcast(int gid,void *payload); +void MyTimerFunction(unsigned long data); +void udp_receive(struct sk_buff *skb); +int sdio_netlink_register(void); +void sdio_netlink_deregister(void); + +#endif diff --git a/drivers/net/wimax/SQN/thp.c b/drivers/net/wimax/SQN/thp.c new file mode 100644 index 0000000000000..2926bc1ab3680 --- /dev/null +++ b/drivers/net/wimax/SQN/thp.c @@ -0,0 +1,769 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2009 SEQUANS Communications + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + + +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#include +#endif +#include +#include +#include +#include +#include +#include +#include + + +#include "version.h" +#include "msg.h" +#include "thp.h" + +#include "thp_ioctl.h" + +#define THP_TRACE 0 /* print info messages from THP read/write handlers */ +#define THP_HEADER_DUMP 0 /* verbosely dump header of THP TX/RX packets */ + +#define THP_DEBUG 0 +#define SKB_DEBUG 0 +#define DRVREV SQN_MODULE_VERSION + +extern bool drop_packet; + +static struct mutex thp_lock; + +const uint8_t host_macaddr[ETH_ALEN] = {0x00, 0x16, 0x08, 0xff, 0x00, 0x01}; +const uint8_t ss_macaddr[ETH_ALEN] = {0x00, 0x16, 0x08, 0xff, 0x00, 0x00}; + +extern int sqn_sdio_dump_net_pkt(int on); +extern int mmc_wimax_get_thp_log(void); + +// Queue of packets destined to the Connection Manager +// TODO: check size of the queue, it's should always be one. +struct sk_buff_head to_sqntool_queue; + +DECLARE_WAIT_QUEUE_HEAD(to_sqntool_wait); + +struct net_device *this_device = NULL; + +struct packet_type rx_packet_type = { 0 }; +extern int mmc_wimax_uart_switch(int uart); + +uint8_t is_thp_packet(uint8_t *dest_addr) +{ + return (memcmp(dest_addr, host_macaddr, ETH_ALEN)==0); +} + +inline struct ethhdr *skb2ethhdr(const struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + return (struct ethhdr *)skb->mac.raw; +#else + return (struct ethhdr *)skb_mac_header(skb); +#endif +} + + +/* TODO: Fix kernel version */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) +int thp_handler(struct sk_buff *skb, struct net_device *pDev, struct packet_type *pPt, struct net_device *pOrigDev) +#else +int thp_handler(struct sk_buff *skb, struct net_device *pDev, struct packet_type *pPt) +#endif +{ + struct sk_buff *skb_thp = 0; + struct ethhdr *eth = 0; + + sqn_pr_enter(); + + /* We need only ETH_P_802_2 protocol packets with THP mac address */ + eth = skb2ethhdr(skb); + if(ntohs(skb->protocol) != ETH_P_802_2 || !is_thp_packet(eth->h_dest)) { + //for DDTM, drop all NOT THP packets + if(drop_packet) { + sqn_pr_dbg("HTC CODE: drop packet for DDTM\n"); + skb->pkt_type = PACKET_OTHERHOST; + } + goto not_thp_out; + } + + skb_thp = skb_clone(skb, GFP_ATOMIC); + /* Bugz 22554: strip CRC at the end of packet */ + skb_trim(skb_thp, skb_thp->len - 4); + +#if THP_TRACE + sqn_pr_info("%s: RX packet, len = %d\n", __func__, skb_thp->len); +#endif + sqn_pr_dbg("RX THP packet, length %d\n", skb_thp->len); + skb_queue_tail(&to_sqntool_queue, skb_thp); + + if(skb_queue_len(&to_sqntool_queue) == 256){ + skb_thp = skb_dequeue(&to_sqntool_queue); + kfree_skb(skb_thp); + } + + wake_up_interruptible(&to_sqntool_wait); //Wake up wait queue + +thp_out: + dev_kfree_skb_any(skb); + sqn_pr_leave(); + return NET_RX_DROP; +not_thp_out: + dev_kfree_skb_any(skb); + sqn_pr_leave(); + return NET_RX_SUCCESS; +} + +// Initialization function for THP handler +int init_thp_handler(struct net_device *dev) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "init_thp_handler +\n"); +#endif + + skb_queue_head_init(&to_sqntool_queue); + + /* Define type of intercepted packets */ + rx_packet_type.type = htons(ETH_P_ALL); /* Intercept all packets */ + rx_packet_type.dev = dev; + rx_packet_type.func = thp_handler; /* Network packet handler function */ + + /* Register packet handler */ + dev_add_pack(&rx_packet_type); + +#if THP_DEBUG + printk(KERN_WARNING "init_thp_handler -\n"); +#endif + sqn_pr_leave(); + + return 0; +} + +// Clean up function for THP handler +void cleanup_thp_handler(void) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp_handler +\n"); +#endif + + /* unregister packet handler */ + dev_remove_pack(&rx_packet_type); + + if(!skb_queue_empty(&to_sqntool_queue)) + skb_queue_purge(&to_sqntool_queue) ; + +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp_handler -\n"); +#endif + sqn_pr_leave(); +} + +#define PROC_DIR_NAME "kthp" +#define DRV_REVISION "kthp/drvrev" +#define IFACE_FILENAME "kthp/iface_name" + +char procfs_dir[64] = PROC_DIR_NAME; + +static struct proc_dir_entry* kthp_proc_dir; + +extern struct net_device *this_device; + +/** PROC_FS Read Functions */ + +static int ifacename_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "ifacename_read +\n"); +#endif + + if(this_device) + len += sprintf(page, "%s\n", this_device->name); + + *eof = 1; + +#if THP_DEBUG + printk(KERN_WARNING "ifacename_read -\n"); +#endif + sqn_pr_leave(); + + return len; +} + + +static int drvrev_read(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "drvrev_read +\n"); +#endif + + len += sprintf(page, "%s\n", SQN_MODULE_VERSION); + + *eof = 1; + + sqn_pr_leave(); + + return len; +} + +static int install_entry(char *entry_name, read_proc_t* read_func) +{ + struct proc_dir_entry* proc; + + sqn_pr_enter(); + + proc = create_proc_read_entry(entry_name, S_IFREG | S_IRUGO | S_IWUSR + , NULL, (read_proc_t*)read_func, NULL); + + if (proc) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) + proc->owner = THIS_MODULE; +#endif + } else { + printk(KERN_ALERT"/proc/ %s failed", entry_name); + return 1; + } + + sqn_pr_leave(); + + return 0; +} + +int init_procfs_handler(void) +{ + sqn_pr_enter(); + + kthp_proc_dir = proc_mkdir(procfs_dir, NULL); + + if (kthp_proc_dir) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) + kthp_proc_dir->owner = THIS_MODULE; +#endif + } else { + remove_proc_entry(PROC_DIR_NAME, NULL); + return 1; + } + + if(install_entry(IFACE_FILENAME, ifacename_read) || + install_entry(DRV_REVISION, drvrev_read)) + { + return 1; + } + +#if THP_DEBUG + printk(KERN_WARNING "drvrev_read -\n"); +#endif + sqn_pr_leave(); + + return 0; +} + +void cleanup_procfs_handler(void) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "cleanup_procfs_handler +\n"); +#endif + + remove_proc_entry(IFACE_FILENAME, NULL); + remove_proc_entry(DRV_REVISION, NULL); + + if (kthp_proc_dir) + remove_proc_entry(procfs_dir, NULL); + +#if THP_DEBUG + printk(KERN_WARNING "cleanup_procfs_handler -\n"); +#endif + sqn_pr_leave(); +} + + +#define THP_FILENAME "thp" + + +static dev_t dev_num; +//static int dev_index; +static struct cdev *thp_dev; +static struct class *thp_class; +static struct device *thp_device; + +static uint8_t once_open_flag = 0; + +const char thp_filename[64] = THP_FILENAME; + +/********************File operations*****************************/ +static int thp_open(struct inode*, struct file*); + +static ssize_t thp_release(struct inode*, struct file*); + +static ssize_t thp_read(struct file*, char*, size_t, loff_t*); + +static ssize_t thp_write(struct file *file, const char *buf, + size_t count, loff_t *ppos); + +static unsigned int thp_poll(struct file *filp, poll_table *wait); + +static long thp_ioctl(struct file*, unsigned int, unsigned long); + +struct file_operations thp_fops = +{ + .owner = THIS_MODULE + , .open = thp_open + , .release= thp_release + , .read = thp_read + , .write= thp_write + , .poll = thp_poll + , .unlocked_ioctl = thp_ioctl +}; + +/********************** File Operations BEGIN *****************************/ + +static int thp_open(struct inode * inode, struct file * filp) +{ + mutex_lock(&thp_lock); + + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "thp_open +\n"); +#endif + + // allow multiple open() call for supporting ioctl on HTC Supersonic + /* + if(once_open_flag) + return -EBUSY; + */ + + once_open_flag = 1; + +#if THP_DEBUG + printk(KERN_WARNING "thp_open -\n"); +#endif + sqn_pr_leave(); + + mutex_unlock(&thp_lock); + return 0; +} + +static ssize_t thp_release(struct inode *inode, struct file *filp) +{ + mutex_lock(&thp_lock); + + sqn_pr_enter(); + + once_open_flag = 0; + + if(!skb_queue_empty(&to_sqntool_queue)) + skb_queue_purge(&to_sqntool_queue); + + sqn_pr_leave(); + + mutex_unlock(&thp_lock); + return 0; +} + +static ssize_t thp_read(struct file *filp, char *buf, size_t count, loff_t*ppos) +{ + DECLARE_WAITQUEUE(wait, current); + + struct sk_buff_head *head = &to_sqntool_queue; + struct sk_buff *curr = NULL; + ssize_t retval; + const struct sqn_thp_header *th = 0; + + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "thp_read +\n"); +#endif + + add_wait_queue(&to_sqntool_wait, &wait); + retval = -ERESTARTSYS; + + if(0 == this_device) { + printk(KERN_WARNING "thp_read() device removed\n"); + retval = -EINVAL; + goto out; + } + + while(1) + { + if(!skb_queue_empty(head)) + break; + if(signal_pending(current) || 0 == this_device) { + printk(KERN_WARNING "thp_read() interrupted by signal\n"); + retval = -EINTR; + goto out; + } + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + curr = skb_dequeue(head); + + if (count < curr->len) { + printk(KERN_WARNING "%s: userspace buffer is too small (%u bytes)" + " to hold THP packet (%u bytes)\n" + , __func__, count, curr->len); + retval = -EINVAL; + goto free_skb; + } else { + count = curr->len; + } + + if(copy_to_user(buf, curr->data, count)) + { + printk(KERN_ERR "error copying data to user space\n"); + retval = -EFAULT; + goto free_skb; + } + + if (mmc_wimax_get_thp_log()) { + sqn_pr_info("%s: [to_user]: len = %d\n", __func__, count); + th = (struct sqn_thp_header *) curr->data; + sqn_pr_info("%s: PKTLen: %4u | TVer: 0x0%x | Flags: 0x0%x | Len: %4u" + " | SeqNum: %5u | AckNum: %5u | TLen: %5u\n", __func__ + , count + , th->transport_version + , th->flags + , be16_to_cpu(th->length) + , be16_to_cpu(th->seq_number) + , be16_to_cpu(th->ack_number) + , be32_to_cpu(th->total_length)); + sqn_pr_dbg_dump("THP RX:", curr->data, count); + } + +#if SKB_DEBUG + sqn_pr_info("%s: free skb [0x%p], users %d\n", __func__, curr, atomic_read(&curr->users)); +#endif + + retval = (ssize_t)count; +free_skb: + + dev_kfree_skb_any(curr); + +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&to_sqntool_wait, &wait); + +#if THP_DEBUG + printk(KERN_WARNING "thp_read -\n"); +#endif + sqn_pr_leave(); + + return retval; +} + + +static ssize_t thp_write(struct file *file, const char *buf, + size_t count, loff_t *ppos) +{ + + ssize_t retval = -ENOMEM; + struct sk_buff *skb; + struct ethhdr ethh; + int size = count + ETH_HLEN; + const struct sqn_thp_header *th = 0; + + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "thp_write +\n"); +#endif + + if(0 == this_device) + return -ENODEV; + + skb = __dev_alloc_skb(size, GFP_ATOMIC | GFP_DMA); + if(skb == NULL) + return retval; + +#if SKB_DEBUG + sqn_pr_info("%s: [0x%p] alloc skb, users %d\n", __func__, skb, atomic_read(&skb->users)); +#endif + + memcpy(ethh.h_dest, ss_macaddr, ETH_ALEN); + memcpy(ethh.h_source, host_macaddr, ETH_ALEN); + ethh.h_proto = htons(count); + + memcpy(skb->data, ðh, sizeof(struct ethhdr)); + skb_put(skb, sizeof(struct ethhdr)); + + if(copy_from_user(skb->tail, buf, count)) { + dev_kfree_skb_any(skb); + return -EFAULT; + } + skb_put(skb, count); + + if (mmc_wimax_get_thp_log()) { + sqn_pr_info("%s: [from_user]: len = %d\n", __func__, count); + th = (struct sqn_thp_header *) buf; + sqn_pr_info("%s: PKTLen: %4u | TVer: 0x0%x | Flags: 0x0%x | Len: %4u" + " | SeqNum: %5u | AckNum: %5u | TLen: %5u\n", __func__ + , count + , th->transport_version + , th->flags + , be16_to_cpu(th->length) + , be16_to_cpu(th->seq_number) + , be16_to_cpu(th->ack_number) + , be32_to_cpu(th->total_length)); + + sqn_pr_dbg_dump("THP TX:", skb->data, count); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) + this_device->hard_start_xmit(skb, this_device); +#else + this_device->netdev_ops->ndo_start_xmit(skb, this_device); +#endif + + retval = count; + +#if THP_DEBUG + printk(KERN_WARNING "thp_write -\n"); +#endif + sqn_pr_leave(); + + return retval; +} + + +/* + +*/ +static unsigned int thp_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + + sqn_pr_enter(); +#if THP_DEBUG + //printk(KERN_WARNING "thp_poll +\n"); +#endif + + poll_wait(filp, &to_sqntool_wait, wait); + + if (0 == this_device) { + printk(KERN_WARNING "thp_poll() device removed\n"); + mask = POLLERR; + } else if(skb_queue_empty(&to_sqntool_queue)) { + mask = 0; + } else { + mask = (POLLIN | POLLRDNORM); + } + +#if THP_DEBUG + //printk(KERN_WARNING "thp_poll -\n"); +#endif + sqn_pr_leave(); + + return mask; +} + +static long thp_ioctl(struct file* handle, unsigned int cmd, unsigned long arg) +{ +#if THP_DEBUG + printk(KERN_WARNING "thp_ioctl +\n"); +#endif + mutex_lock(&thp_lock); + sqn_pr_enter(); + + switch (cmd) { + case IOCTL_DROP_PACKETS: + printk(KERN_WARNING "IOCTL_DROP_PACKETS arg=%d\n",(int)arg); + if(arg == 1) + drop_packet = true; + else + drop_packet = false; + break; + + case IOCTL_SWITCH_UART: + printk(KERN_WARNING "IOCTL_SWITCH_UART arg=%d\n",(int)arg); + if(arg == 1) + mmc_wimax_uart_switch(2); // Wimax + else + mmc_wimax_uart_switch(0); // USB + break; + + case IOCTL_SWITCH_NETLOG: + printk(KERN_WARNING "IOCTL_SWITCH_NETLOG arg=%d\n",(int)arg); + if(arg == 0) + sqn_sdio_dump_net_pkt(0); // Enable netlog + else + sqn_sdio_dump_net_pkt(1); // Disable netlog + break; + + default: + printk(KERN_WARNING "UNKNOWN OPERATION in thp_ioctl\n"); + return -1; + } + + sqn_pr_leave(); +#if THP_DEBUG + printk(KERN_WARNING "thp_ioctl -\n"); +#endif + + mutex_unlock(&thp_lock); + return 0; +} + +int init_thp_devfile(void) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "init_thp_devfile +\n"); +#endif + + //Dynamic allocation of device number + if(alloc_chrdev_region(&dev_num, 0, 1, thp_filename)) + return -ENOMEM; + + thp_dev = cdev_alloc(); + if(thp_dev == NULL) + return -ENOMEM; + + thp_dev->ops = &thp_fops; + thp_dev->owner = THIS_MODULE; + + if(cdev_add(thp_dev, dev_num, 1)) + return -ENOMEM; + + thp_class = class_create(THIS_MODULE, thp_filename); + if (IS_ERR(thp_class)) + { + printk("class_create error(0x%x)\n",(unsigned int)(thp_class)); + return PTR_ERR(thp_class); + } + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) + thp_device = device_create(thp_class, NULL, dev_num, NULL, thp_filename); +#else + thp_device = device_create(thp_class, NULL, dev_num, thp_filename); +#endif + if (IS_ERR(thp_device)) + { + printk("device_create error(0x%x)\n", (unsigned int)(thp_device)); + return PTR_ERR(thp_device); + } + +#if THP_DEBUG + printk(KERN_WARNING "init_thp_devfile -\n"); +#endif + sqn_pr_leave(); + + return 0; +} + +/** + \brief dev-fs cleanup function + + * This function is called in module cleanup function + */ +void cleanup_thp_devfile(void) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp_devfile +\n"); +#endif + + /* Unregister entry from /dev */ + device_destroy(thp_class, dev_num); + class_destroy(thp_class); + unregister_chrdev_region(dev_num, 1); + cdev_del(thp_dev); + +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp_devfile -\n"); +#endif + sqn_pr_leave(); +} +/********************** File Operations END *****************************/ + +int thp_wimax_uart_switch(int on) +{ + printk("%s on%d\n", __func__, on); + + if (on) { + mmc_wimax_uart_switch(2); // Wimax + } + else { + mmc_wimax_uart_switch(0); // USB + } + + return 0; +} + +int init_thp(struct net_device* dev) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "init_thp +\n"); +#endif + + if (0 == this_device) { + if(init_procfs_handler()) { + return -1; + } + + if(init_thp_devfile()) + return -1; + + /* Don't call init_thp_handler() here, it will be called from + * probe() before interrupts are enabled, to ensure that we will + * catch all THP packets as soon as they appear + */ + /* if (init_thp_handler(dev)) */ + /* return -1; */ + + this_device = dev; + mutex_init(&thp_lock); + sqn_pr_info("KTHP initialized\n"); + } + +#if THP_DEBUG + printk(KERN_WARNING "init_thp -\n"); +#endif + sqn_pr_leave(); + + return 0; +} + + +void cleanup_thp(void) +{ + sqn_pr_enter(); +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp +\n"); +#endif + + if (this_device) { + cleanup_procfs_handler(); + cleanup_thp_handler(); + cleanup_thp_devfile(); + this_device = 0; + sqn_pr_info("KTHP cleaned up\n"); + } + +#if THP_DEBUG + printk(KERN_WARNING "cleanup_thp -\n"); +#endif + sqn_pr_leave(); +} diff --git a/drivers/net/wimax/SQN/thp.h b/drivers/net/wimax/SQN/thp.h new file mode 100644 index 0000000000000..1079561f6ef2d --- /dev/null +++ b/drivers/net/wimax/SQN/thp.h @@ -0,0 +1,56 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2009 SEQUANS Communications + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_THP2_H +#define _SQN_THP2_H + + +struct sqn_thp_header { + /** Transport protocol version - must be 1 for now. */ + u8 transport_version; + + /* Flags Field is used to relay control information between THP peers + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * | 0 |DAK| 0 |ACK|EOF|MOF|BOF|NOF| + * + * NOF: No fragmentation + * BOF: Begining of fragmentation + * MOF: Middle of fragmentation + * EOF: End of fragmentation + * ACK: The sender acknowledge the reception of the "AckNumber" + * sequence number. DAK: The sender ask the receiver to + * acknowledge the seqence number "seqNumber".*/ + u8 flags; + + /** Length of the transported payload message, (without header). */ + u16 length; + + /** Sequence Number + * Which shall be incremented for each fragment (or no fragmented + * command). */ + u16 seq_number; + + /** Acknowledgment Number + * When ACK=DAK=NAK=0, the ackNumber is equal to the last sequence + * received number. */ + u16 ack_number; + + /** Length of the payload message before fragmentation. + * Note: In case of no fragmentation totalLength is equal to length.*/ + u32 total_length; +}; + + +int init_thp(struct net_device* dev); +int thp_wimax_uart_switch(int on); +void cleanup_thp(void); + +#endif /* _SQN_THP2_H */ + diff --git a/drivers/net/wimax/SQN/thp_ioctl.h b/drivers/net/wimax/SQN/thp_ioctl.h new file mode 100644 index 0000000000000..4d284307439d1 --- /dev/null +++ b/drivers/net/wimax/SQN/thp_ioctl.h @@ -0,0 +1,23 @@ +#ifndef _THP_IOCTL_H_ +#define _THP_IOCTL_H_ + +//ioctl group number +//must be a nonnegative 8-bit number +#define WIMAX_DEV_IOCTLID 'w' + +#define CMD_BASE 65 +//ioctl type within the group +//should be sequentially assigned numbers for each different ioctl operation +//must be a nonnegative 8-bit number +#define CMD_DROP_PACKETS CMD_BASE+0 +#define CMD_SIWTCH_UART CMD_BASE+1 +#define CMD_SIWTCH_NETLOG CMD_BASE+2 + +//write only +//arg=1, drop tx/rx packets +//arg=0, normal mode +#define IOCTL_DROP_PACKETS _IOW(WIMAX_DEV_IOCTLID, CMD_DROP_PACKETS, int) +#define IOCTL_SWITCH_UART _IOW(WIMAX_DEV_IOCTLID, CMD_SIWTCH_UART, int) +#define IOCTL_SWITCH_NETLOG _IOW(WIMAX_DEV_IOCTLID, CMD_SIWTCH_NETLOG, int) + +#endif //_THP_IOCTL_H_ diff --git a/drivers/net/wimax/SQN/version.h b/drivers/net/wimax/SQN/version.h new file mode 100644 index 0000000000000..5c2090b1b2c6d --- /dev/null +++ b/drivers/net/wimax/SQN/version.h @@ -0,0 +1,27 @@ +/* + * This is part of the Sequans SQN1130 driver. + * Copyright 2008 SEQUANS Communications + * Written by Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SQN_VERSION_H +#define _SQN_VERSION_H + +#include + +#ifdef CONFIG_SDIO_SQN +#define SQN_MODULE_NAME "sequans_sdio" +#elif defined (CONFIG_USB_SQN) +#define SQN_MODULE_NAME "sequans_usb" +#else +#define SQN_MODULE_NAME "sequans_xxx" +#endif + +#define SQN_MODULE_VERSION "1.2.153" + +#endif /* _SQN_VERSION_H */ diff --git a/drivers/net/wimax/wimaxdbg/Makefile b/drivers/net/wimax/wimaxdbg/Makefile new file mode 100644 index 0000000000000..3301bf036458b --- /dev/null +++ b/drivers/net/wimax/wimaxdbg/Makefile @@ -0,0 +1,10 @@ +MODULE_NAME = wimaxdbg +KDIR = /lib/modules/$(CURRENT)/build + +obj-m := $(MODULE_NAME).o +all: + @echo "making $(MODULE_NAME)" + @echo "PWD=$(PWD)" + $(MAKE) -C $(KDIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean diff --git a/drivers/net/wimax/wimaxdbg/wimaxdbg.c b/drivers/net/wimax/wimaxdbg/wimaxdbg.c new file mode 100644 index 0000000000000..25118e9ec8700 --- /dev/null +++ b/drivers/net/wimax/wimaxdbg/wimaxdbg.c @@ -0,0 +1,192 @@ +#include +#include +#include +#include +#include +#include + +extern int mmc_wimax_set_netlog_status(int on); +extern int mmc_wimax_set_cliam_host_status(int on); +// extern int sqn_sdio_get_sdc_clocks(void); +// extern void sqn_sdio_set_sdc_clocks(int on); +extern int mmc_wimax_set_busclk_pwrsave(int on); +extern int mmc_wimax_set_CMD53_timeout_trigger_counter(int counter); +extern int mmc_wimax_get_CMD53_timeout_trigger_counter(void); + +extern int sqn_sdio_notify_host_wakeup(void); +extern int mmc_wimax_set_thp_log(int on); +extern int mmc_wimax_set_sdio_hw_reset(int on); +extern int mmc_wimax_set_packet_filter(int on); + +extern int mmc_wimax_set_netlog_withraw_status(int on); +extern int mmc_wimax_set_sdio_interrupt_log(int on); + +#define BUF_LEN 100 + +// # insmod wimaxdbg + +static char *wimaxdbg_name=NULL; +module_param(wimaxdbg_name,charp,0); + +static int dbg_para = 0; +static struct proc_dir_entry *wimaxdbg_proc_file; + +ssize_t wimaxdbg_write(struct file *file, const char *buffer, unsigned long count, void *data) +{ + char buf[16]; + unsigned long len = count; + int n; + + printk(KERN_INFO "%d (%s)\n", (int)len, __func__); + + if (len >= sizeof(buf)) + len = sizeof(buf) - 1; + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + n = simple_strtol(buf, NULL, 10); + dbg_para = n; + + printk(KERN_INFO "%s: dbg_parameter:%d\n", __func__, dbg_para); + if (dbg_para < 2) { // 0: netlog off, 1: netlog on + printk(KERN_INFO "%s: mmc_wimax_set_netlog_status:%d\n", __func__, dbg_para); + mmc_wimax_set_netlog_status(dbg_para); + } + else if (dbg_para == 3) { // 3: sdc_clock off + printk(KERN_INFO "%s: sqn_sdio_set_sdc_clocks:0\n", __func__); + // sqn_sdio_set_sdc_clocks(0); // Need to insert sequans_sdio.ko first + } + else if (dbg_para == 4) { // 4: sdc_clock on + printk(KERN_INFO "%s: sqn_sdio_set_sdc_clocks:1\n", __func__); + // sqn_sdio_set_sdc_clocks(1); // Need to insert sequans_sdio.ko first + } + else if (dbg_para == 5) { // 5: claim_host debug on + printk(KERN_INFO "%s: mmc_wimax_set_cliam_host_status:1\n", __func__); + mmc_wimax_set_cliam_host_status(1); + } + else if (dbg_para == 6) { // 6: claim host debug off + printk(KERN_INFO "%s: mmc_wimax_set_cliam_host_status:0\n", __func__); + mmc_wimax_set_cliam_host_status(0); + } + else if (dbg_para == 7) { // 7: Turn off dynamic SDC CLK OFF + printk(KERN_INFO "%s: mmc_wimax_set_busclk_pwrsave:0\n", __func__); + mmc_wimax_set_busclk_pwrsave(0); + } + else if (dbg_para == 8) { // 8: Turn on dynamic SDC CLK OFF + printk(KERN_INFO "%s: mmc_wimax_set_busclk_pwrsave:1\n", __func__); + mmc_wimax_set_busclk_pwrsave(1); + } + else if (dbg_para == 9) { // 9: Disable force CMD53 timeout testing + printk(KERN_INFO "%s: mmc_wimax_set_CMD53_timeout_trigger_counter:0\n", __func__); + mmc_wimax_set_CMD53_timeout_trigger_counter(0); + } + else if (dbg_para == 10) { // 10: Force CMD53 timeout testing + printk(KERN_INFO "%s: mmc_wimax_set_CMD53_timeout_trigger_counter:1\n", __func__); + mmc_wimax_set_CMD53_timeout_trigger_counter(5); + } + else if (dbg_para == 11) { // 11: Manually re-send host wakeup + // printk(KERN_INFO "%s: sqn_sdio_notify_host_wakeup\n", __func__); + // sqn_sdio_notify_host_wakeup(); // Need to insert sequans_sdio.ko first + } + else if (dbg_para == 12) { // 12: Disable THP logging + printk(KERN_INFO "%s: mmc_wimax_set_thp_log_status:0\n", __func__); + mmc_wimax_set_thp_log(0); + } + else if (dbg_para == 13) { // 13: Enable THP logging + printk(KERN_INFO "%s: mmc_wimax_set_thp_log_status:1\n", __func__); + mmc_wimax_set_thp_log(1); + } + else if (dbg_para == 14) { // 14: Disable SDIO HW RESET, default is disabled it. + printk(KERN_INFO "%s: mmc_wimax_set_sdio_hw_reset:0\n", __func__); + mmc_wimax_set_sdio_hw_reset(0); + } + else if (dbg_para == 15) { // 15: Enable SDIO HW RESET + printk(KERN_INFO "%s: mmc_wimax_set_sdio_hw_reset:1\n", __func__); + mmc_wimax_set_sdio_hw_reset(1); + } + else if (dbg_para == 16) { // 16: Disable SDIO Packet filter + printk(KERN_INFO "%s: mmc_wimax_set_packet_filter:0\n", __func__); + mmc_wimax_set_packet_filter(0); + } + else if (dbg_para == 17) { // 17: Enable SDIO Packet filter + printk(KERN_INFO "%s: mmc_wimax_set_packet_filter:1\n", __func__); + mmc_wimax_set_packet_filter(1); + } + else if (dbg_para == 18) { // 18: Disable SDIO GPIO interrupt logging + printk(KERN_INFO "%s: mmc_wimax_set_sdio_interrupt_log:0\n", __func__); + mmc_wimax_set_sdio_interrupt_log(0); + } + else if (dbg_para == 19) { // 19: Enable SDIO GPIO interrupt logging + printk(KERN_INFO "%s: mmc_wimax_set_sdio_interrupt_log:1\n", __func__); + mmc_wimax_set_sdio_interrupt_log(1); + } + else if (dbg_para == 20) { // 20: Disable dumping raw data for network packets + printk(KERN_INFO "%s: mmc_wimax_set_netlog_withraw_status:0\n", __func__); + mmc_wimax_set_netlog_withraw_status(0); + } + else if (dbg_para == 21) { // 21: Enable dumping raw data for network packets + printk(KERN_INFO "%s: mmc_wimax_set_netlog_withraw_status:1\n", __func__); + mmc_wimax_set_netlog_withraw_status(1); + } + else { + printk(KERN_INFO "%s: None function:%d\n", __func__, dbg_para); + } + + return (len); +} + +ssize_t wimaxdbg_read(char *buf,char **start,off_t offset,int count,int *eof,void *data) +{ + int len=0; + + if(offset>0) + return 0; + + /* + sprintf(buf,"wimxdbg: %d\nsdcclk:%d\n", dbg_para, sqn_sdio_get_sdc_clocks()); + + for(len=0;lenread_proc = wimaxdbg_read; + wimaxdbg_proc_file->write_proc = wimaxdbg_write; + + dbg_para = 0; + + return 0; +} + +void wimaxdbg_cleanup(void) +{ + printk(KERN_INFO "%s: mimaxdbg_module_claen called. Module is now clean\n", __func__); + remove_proc_entry("wimaxdbg",NULL); +} + +module_init(wimaxdbg_init); +module_exit(wimaxdbg_cleanup); + +MODULE_DESCRIPTION("HTC wimaxdbg for SDIO devices"); +MODULE_AUTHOR("HTC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wimax/wimaxuart/Makefile b/drivers/net/wimax/wimaxuart/Makefile new file mode 100644 index 0000000000000..537f716c2e162 --- /dev/null +++ b/drivers/net/wimax/wimaxuart/Makefile @@ -0,0 +1,10 @@ +MODULE_NAME = wimaxuart +KDIR = /lib/modules/$(CURRENT)/build + +obj-m := $(MODULE_NAME).o +all: + @echo "making $(MODULE_NAME)" + @echo "PWD=$(PWD)" + $(MAKE) -C $(KDIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean diff --git a/drivers/net/wimax/wimaxuart/wimaxuart.c b/drivers/net/wimax/wimaxuart/wimaxuart.c new file mode 100644 index 0000000000000..07a2993c5f3ec --- /dev/null +++ b/drivers/net/wimax/wimaxuart/wimaxuart.c @@ -0,0 +1,93 @@ +#include +#include +#include +#include +#include +#include + +#define BUF_LEN 100 + +// # insmod wimaxuart + +static char *wimaxuart_name=NULL; +module_param(wimaxuart_name,charp,0); + +extern int mmc_wimax_uart_switch(int uart); + +static struct proc_dir_entry *wimaxuart_proc_file; +int uart_switch = 0; + +ssize_t wimaxuart_write(struct file *file, const char *buffer, unsigned long count, void *data) +{ + char buf[16]; + unsigned long len = count; + int n; + + printk(KERN_INFO "%d (%s)\n", (int)len, __func__); + + if (len >= sizeof(buf)) + len = sizeof(buf) - 1; + + if (copy_from_user(buf, buffer, len)) + return -EFAULT; + + buf[len] = '\0'; + + n = simple_strtol(buf, NULL, 10); + uart_switch = n; + + printk("%s: uart_switch:%d\n", __func__, uart_switch); + mmc_wimax_uart_switch(uart_switch); + + return (len); +} + +ssize_t wimaxuart_read(char *buf,char **start,off_t offset,int count,int *eof,void *data) +{ + int len=0; + + if(offset>0) + return 0; + + sprintf(buf,"wimaxuart: %d\n", uart_switch); + + for(len=0;lenread_proc = wimaxuart_read; + wimaxuart_proc_file->write_proc = wimaxuart_write; + + uart_switch = 0; + + return 0; +} + +void wimaxuart_cleanup(void) +{ + printk(KERN_INFO "%s: wimaxuart_module_claen called. Module is now clean\n", __func__); + remove_proc_entry("wimaxuart",NULL); +} + +module_init(wimaxuart_init); +module_exit(wimaxuart_cleanup); + +MODULE_DESCRIPTION("HTC wimaxuart for SDIO devices"); +MODULE_AUTHOR("HTC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index f5fbadaaaf4a9..2c8eca7c7e24c 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -268,10 +268,16 @@ config MWL8K To compile this driver as a module, choose M here: the module will be called mwl8k. If unsure, say N. +config WIFI_CONTROL_FUNC + bool "Enable WiFi control function abstraction" + help + Enables Power/Reset/Carddetect function abstraction + source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/b43/Kconfig" source "drivers/net/wireless/b43legacy/Kconfig" source "drivers/net/wireless/bcm4329/Kconfig" +source "drivers/net/wireless/bcmdhd/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/ipw2x00/Kconfig" source "drivers/net/wireless/iwlwifi/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 8d3feccf712b0..03c4bec05ff4f 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -57,3 +57,4 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/ obj-$(CONFIG_IWM) += iwmc3200wifi/ obj-$(CONFIG_BCM4329) += bcm4329/ +obj-$(CONFIG_BCMDHD) += bcmdhd/ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 4819747fa4c3a..80f0dee83843e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -652,7 +652,7 @@ static const struct ar9300_eeprom ar9300_x113 = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { - .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, + .opFlags = AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, @@ -922,7 +922,7 @@ static const struct ar9300_eeprom ar9300_x113 = { .db_stage2 = {3, 3, 3}, /* 3 chain */ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */ - .xpaBiasLvl = 0, + .xpaBiasLvl = 0xf, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ @@ -3994,6 +3994,16 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) ); + /* Write the power for duplicated frames - HT40 */ + + /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */ + REG_WRITE(ah, 0xa3e0, + POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | + POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | + POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | + POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) + ); + /* Write the HT20 power per rate set */ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 4ceddbbdfcee6..038a0cbfc6e7c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, */ if (rxsp->status11 & AR_CRCErr) rxs->rs_status |= ATH9K_RXERR_CRC; - if (rxsp->status11 & AR_PHYErr) { + else if (rxsp->status11 & AR_PHYErr) { phyerr = MS(rxsp->status11, AR_PHYErrCode); /* * If we reach a point here where AR_PostDelimCRCErr is @@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_phyerr = phyerr; } - } - if (rxsp->status11 & AR_DecryptCRCErr) + } else if (rxsp->status11 & AR_DecryptCRCErr) rxs->rs_status |= ATH9K_RXERR_DECRYPT; - if (rxsp->status11 & AR_MichaelErr) + else if (rxsp->status11 & AR_MichaelErr) rxs->rs_status |= ATH9K_RXERR_MIC; + if (rxsp->status11 & AR_KeyMiss) rxs->rs_status |= ATH9K_RXERR_DECRYPT; } diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index b68a1acbddd01..8482eebd11679 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -69,15 +69,21 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, int16_t *nfarray) { struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_conf *conf = &common->hw->conf; struct ath_nf_limits *limit; struct ath9k_nfcal_hist *h; bool high_nf_mid = false; + u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; int i; h = cal->nfCalHist; limit = ath9k_hw_get_nf_limits(ah, ah->curchan); for (i = 0; i < NUM_NF_READINGS; i++) { + if (!(chainmask & (1 << i)) || + ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))) + continue; + h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) @@ -225,6 +231,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) int32_t val; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); + struct ieee80211_conf *conf = &common->hw->conf; s16 default_nf = ath9k_hw_get_default_nf(ah, chan); if (ah->caldata) @@ -234,6 +241,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) if (chainmask & (1 << i)) { s16 nfval; + if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) + continue; + if (h) nfval = h[i].privNF; else @@ -293,6 +303,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { + if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) + continue; + val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (-50) << 1) & 0x1ff); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 9f01e50d5cda7..a3b77ae3827c2 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->hw_version.devid == AR5416_AR9100_DEVID) ah->hw_version.macVersion = AR_SREV_VERSION_9100; + /* + * Read back AR_WA into a permanent copy and set bits 14 and 17. + * We need to do this to avoid RMW of this register. We cannot + * read the reg when chip is asleep. + */ + ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | + AR_WA_ASPM_TIMER_BASED_DISABLE); + + ath9k_hw_read_revisions(ah); + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; @@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) ath9k_hw_init_mode_regs(ah); - /* - * Read back AR_WA into a permanent copy and set bits 14 and 17. - * We need to do this to avoid RMW of this register. We cannot - * read the reg when chip is asleep. - */ - ah->WARegVal = REG_READ(ah, AR_WA); - ah->WARegVal |= (AR_WA_D3_L1_DISABLE | - AR_WA_ASPM_TIMER_BASED_DISABLE); if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, 0, 0); @@ -1082,8 +1085,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) return false; } - ath9k_hw_read_revisions(ah); - return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } @@ -1217,15 +1218,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ah->txchainmask = common->tx_chainmask; ah->rxchainmask = common->rx_chainmask; - if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { - ath9k_hw_abortpcurecv(ah); - if (!ath9k_hw_stopdmarecv(ah)) { - ath_dbg(common, ATH_DBG_XMIT, - "Failed to stop receive dma\n"); - bChannelChange = false; - } - } - if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) return -EIO; diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 2915b11edefb9..e9fc97d4c9017 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { + /* + * Treat these errors as mutually exclusive to avoid spurious + * extra error reports from the hardware. If a CRC error is + * reported, then decryption and MIC errors are irrelevant, + * the frame is going to be dropped either way + */ if (ads.ds_rxstatus8 & AR_CRCErr) rs->rs_status |= ATH9K_RXERR_CRC; - if (ads.ds_rxstatus8 & AR_PHYErr) { + else if (ads.ds_rxstatus8 & AR_PHYErr) { rs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); rs->rs_phyerr = phyerr; - } - if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; - if (ads.ds_rxstatus8 & AR_MichaelErr) + else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; + if (ads.ds_rxstatus8 & AR_KeyMiss) rs->rs_status |= ATH9K_RXERR_DECRYPT; } @@ -770,28 +776,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_abortpcurecv); -bool ath9k_hw_stopdmarecv(struct ath_hw *ah) +bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) { #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ #define AH_RX_TIME_QUANTUM 100 /* usec */ struct ath_common *common = ath9k_hw_common(ah); + u32 mac_status, last_mac_status = 0; int i; + /* Enable access to the DMA observation bus */ + REG_WRITE(ah, AR_MACMISC, + ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | + (AR_MACMISC_MISC_OBS_BUS_1 << + AR_MACMISC_MISC_OBS_BUS_MSB_S))); + REG_WRITE(ah, AR_CR, AR_CR_RXD); /* Wait for rx enable bit to go low */ for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) break; + + if (!AR_SREV_9300_20_OR_LATER(ah)) { + mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; + if (mac_status == 0x1c0 && mac_status == last_mac_status) { + *reset = true; + break; + } + + last_mac_status = mac_status; + } + udelay(AH_TIME_QUANTUM); } if (i == 0) { ath_err(common, - "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", + "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", AH_RX_STOP_DMA_TIMEOUT / 1000, REG_READ(ah, AR_CR), - REG_READ(ah, AR_DIAG_SW)); + REG_READ(ah, AR_DIAG_SW), + REG_READ(ah, AR_DMADBG_7)); return false; } else { return true; diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 7512f97e8f49a..d9cc2996107e9 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -692,7 +692,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); void ath9k_hw_abortpcurecv(struct ath_hw *ah); -bool ath9k_hw_stopdmarecv(struct ath_hw *ah); +bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); int ath9k_hw_beaconq_setup(struct ath_hw *ah); /* Interrupt Handling */ diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a09d15f7aa6e0..0848e09954749 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1063,6 +1063,8 @@ static int ath9k_start(struct ieee80211_hw *hw) "Starting driver with initial channel: %d MHz\n", curchan->center_freq); + ath9k_ps_wakeup(sc); + mutex_lock(&sc->mutex); if (ath9k_wiphy_started(sc)) { @@ -1179,6 +1181,8 @@ static int ath9k_start(struct ieee80211_hw *hw) mutex_unlock: mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); + return r; } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index b2497b8601e5b..1e0c1e3b514c7 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -439,9 +439,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) * mode interface or when in monitor mode. AP mode does not need this * since it receives all in-BSS frames anyway. */ - if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && - (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || - (sc->sc_ah->is_monitoring)) + if (sc->sc_ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; if (sc->rx.rxfilter & FIF_CONTROL) @@ -515,12 +513,12 @@ int ath_startrecv(struct ath_softc *sc) bool ath_stoprecv(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; - bool stopped; + bool stopped, reset = false; spin_lock_bh(&sc->rx.rxbuflock); ath9k_hw_abortpcurecv(ah); ath9k_hw_setrxfilter(ah, 0); - stopped = ath9k_hw_stopdmarecv(ah); + stopped = ath9k_hw_stopdmarecv(ah, &reset); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_edma_stop_recv(sc); @@ -535,7 +533,7 @@ bool ath_stoprecv(struct ath_softc *sc) "confusing the DMA engine when we start RX up\n"); ATH_DBG_WARN_ON_ONCE(!stopped); } - return stopped; + return stopped && !reset; } void ath_flushrecv(struct ath_softc *sc) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 07b7804aec5ba..5c9d83b103f7c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1699,8 +1699,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, u8 tidno; spin_lock_bh(&txctl->txq->axq_lock); - - if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) { + if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && + ieee80211_is_data_qos(hdr->frame_control)) { tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tid = ATH_AN_2_TID(txctl->an, tidno); diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 248c670fdfbef..5c2cfe6941524 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {APL9_WORLD, CTL_ETSI, CTL_ETSI}, {APL3_FCCA, CTL_FCC, CTL_FCC}, + {APL7_FCCA, CTL_FCC, CTL_FCC}, {APL1_ETSIC, CTL_FCC, CTL_ETSI}, {APL2_ETSIC, CTL_FCC, CTL_ETSI}, {APL2_APLD, CTL_FCC, NO_CTL}, diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 3d5566e7af0ad..ff0f5ba14b2cf 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } - if (unlikely(len > ring->rx_buffersize)) { + if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index a01c2100f1664..e8a80a1251bf6 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -163,7 +163,7 @@ struct b43_dmadesc_generic { /* DMA engine tuning knobs */ #define B43_TXRING_SLOTS 256 #define B43_RXRING_SLOTS 64 -#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN +#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN) /* Pointer poison */ #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) diff --git a/drivers/net/wireless/bcm4329/Makefile b/drivers/net/wireless/bcm4329/Makefile index 3f49a643e8ff3..0c84dd57d3642 100644 --- a/drivers/net/wireless/bcm4329/Makefile +++ b/drivers/net/wireless/bcm4329/Makefile @@ -3,11 +3,11 @@ DHDCFLAGS = -DLINUX -DBCMDRIVER -DBCMDONGLEHOST -DDHDTHREAD -DBCMWPA2 \ -DUNRELEASEDCHIP -Dlinux -DDHD_SDALIGN=64 -DMAX_HDR_READ=64 \ -DDHD_FIRSTREAD=64 -DDHD_GPL -DDHD_SCHED -DBDC -DTOE -DDHD_BCMEVENTS \ -DSHOW_EVENTS -DBCMSDIO -DDHD_GPL -DBCMLXSDMMC -DBCMPLATFORM_BUS \ - -Wall -Wstrict-prototypes -Werror -DOOB_INTR_ONLY -DCUSTOMER_HW2 \ + -Wall -Wstrict-prototypes -DOOB_INTR_ONLY -DCUSTOMER_HW2 \ -DDHD_USE_STATIC_BUF -DMMC_SDIO_ABORT -DDHD_DEBUG_TRAP -DSOFTAP \ -DEMBEDDED_PLATFORM -DARP_OFFLOAD_SUPPORT -DPKT_FILTER_SUPPORT \ -DGET_CUSTOM_MAC_ENABLE -DSET_RANDOM_MAC_SOFTAP -DCSCAN -DHW_OOB \ - -DKEEP_ALIVE \ + -DKEEP_ALIVE -DPNO_SUPPORT \ -Idrivers/net/wireless/bcm4329 -Idrivers/net/wireless/bcm4329/include DHDOFILES = dhd_linux.o linux_osl.o bcmutils.o dhd_common.o dhd_custom_gpio.o \ diff --git a/drivers/net/wireless/bcm4329/bcmsdh_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_linux.c index 3b7da4263fd8e..94f19a1c46a4f 100644 --- a/drivers/net/wireless/bcm4329/bcmsdh_linux.c +++ b/drivers/net/wireless/bcm4329/bcmsdh_linux.c @@ -301,7 +301,7 @@ int bcmsdh_remove(struct device *dev) MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); osl_detach(osh); -#if !defined(BCMLXSDMMC) +#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) dev_set_drvdata(dev, NULL); #endif /* !defined(BCMLXSDMMC) */ @@ -643,14 +643,30 @@ int bcmsdh_register_oob_intr(void * dhdp) return 0; } +void bcmsdh_set_irq(int flag) +{ + if (sdhcinfo->oob_irq_registered) { + SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag)); + if (flag) { + enable_irq(sdhcinfo->oob_irq); + enable_irq_wake(sdhcinfo->oob_irq); + } else { + disable_irq_wake(sdhcinfo->oob_irq); + disable_irq(sdhcinfo->oob_irq); + } + } +} + void bcmsdh_unregister_oob_intr(void) { SDLX_MSG(("%s: Enter\n", __FUNCTION__)); - set_irq_wake(sdhcinfo->oob_irq, 0); - disable_irq(sdhcinfo->oob_irq); /* just in case.. */ - free_irq(sdhcinfo->oob_irq, NULL); - sdhcinfo->oob_irq_registered = FALSE; + if (sdhcinfo->oob_irq_registered) { + set_irq_wake(sdhcinfo->oob_irq, 0); + disable_irq(sdhcinfo->oob_irq); /* just in case.. */ + free_irq(sdhcinfo->oob_irq, NULL); + sdhcinfo->oob_irq_registered = FALSE; + } } #endif /* defined(OOB_INTR_ONLY) */ /* Module parameters specific to each host-controller driver */ diff --git a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c index 8992a4267f9f7..5a1a46c93571c 100644 --- a/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c +++ b/drivers/net/wireless/bcm4329/bcmsdh_sdmmc_linux.c @@ -82,7 +82,6 @@ PBCMSDH_SDMMC_INSTANCE gInstance; extern int bcmsdh_probe(struct device *dev); extern int bcmsdh_remove(struct device *dev); -struct device sdmmc_dev; static int bcmsdh_sdmmc_probe(struct sdio_func *func, const struct sdio_device_id *id) @@ -102,7 +101,7 @@ static int bcmsdh_sdmmc_probe(struct sdio_func *func, if(func->device == 0x4) { /* 4318 */ gInstance->func[2] = NULL; sd_trace(("NIC found, calling bcmsdh_probe...\n")); - ret = bcmsdh_probe(&sdmmc_dev); + ret = bcmsdh_probe(&func->dev); } } @@ -110,7 +109,7 @@ static int bcmsdh_sdmmc_probe(struct sdio_func *func, if (func->num == 2) { sd_trace(("F2 found, calling bcmsdh_probe...\n")); - ret = bcmsdh_probe(&sdmmc_dev); + ret = bcmsdh_probe(&func->dev); } return ret; @@ -126,7 +125,7 @@ static void bcmsdh_sdmmc_remove(struct sdio_func *func) if (func->num == 2) { sd_trace(("F2 found, calling bcmsdh_remove...\n")); - bcmsdh_remove(&sdmmc_dev); + bcmsdh_remove(&func->dev); } } @@ -250,10 +249,8 @@ int sdio_function_init(void) if (!gInstance) return -ENOMEM; - bzero(&sdmmc_dev, sizeof(sdmmc_dev)); error = sdio_register_driver(&bcmsdh_sdmmc_driver); - return error; } @@ -265,7 +262,6 @@ void sdio_function_cleanup(void) { sd_trace(("%s Enter\n", __FUNCTION__)); - sdio_unregister_driver(&bcmsdh_sdmmc_driver); if (gInstance) diff --git a/drivers/net/wireless/bcm4329/bcmspibrcm.c b/drivers/net/wireless/bcm4329/bcmspibrcm.c deleted file mode 100644 index 0f131a40f4b8e..0000000000000 --- a/drivers/net/wireless/bcm4329/bcmspibrcm.c +++ /dev/null @@ -1,1726 +0,0 @@ -/* - * Broadcom BCMSDH to gSPI Protocol Conversion Layer - * - * Copyright (C) 2010, Broadcom Corporation - * All Rights Reserved. - * - * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; - * the contents of this file may not be disclosed to third parties, copied - * or duplicated in any form, in whole or in part, without the prior - * written permission of Broadcom Corporation. - * - * $Id: bcmspibrcm.c,v 1.11.2.10.2.9.6.11 2009/05/21 13:21:57 Exp $ - */ - -#define HSMODE - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include /* bcmsdh to/from specific controller APIs */ -#include /* ioctl/iovars */ -#include - -#include - - -#include -#include - -#define F0_RESPONSE_DELAY 16 -#define F1_RESPONSE_DELAY 16 -#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY - -#define CMDLEN 4 - -#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) - -/* Globals */ -uint sd_msglevel = 0; - -uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ -uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ -uint sd_f2_blocksize = 64; /* Default blocksize */ - - -uint sd_divisor = 2; -uint sd_power = 1; /* Default to SD Slot powered ON */ -uint sd_clock = 1; /* Default to SD Clock turned ON */ -uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ - -uint8 spi_outbuf[SPI_MAX_PKT_LEN]; -uint8 spi_inbuf[SPI_MAX_PKT_LEN]; - -/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits - * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. - */ -#define BUF2_PKT_LEN 128 -uint8 spi_outbuf2[BUF2_PKT_LEN]; -uint8 spi_inbuf2[BUF2_PKT_LEN]; - -/* Prototypes */ -static bool bcmspi_test_card(sdioh_info_t *sd); -static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); -static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); -static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, - uint32 *data, uint32 datalen); -static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, - int regsize, uint32 *data); -static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, - int regsize, uint32 data); -static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, - uint8 *data); -static int bcmspi_driver_init(sdioh_info_t *sd); -static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, - uint32 addr, int nbytes, uint32 *data); -static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, - uint32 *data); -static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); -static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); - -/* - * Public entry points & extern's - */ -extern sdioh_info_t * -sdioh_attach(osl_t *osh, void *bar0, uint irq) -{ - sdioh_info_t *sd; - - sd_trace(("%s\n", __FUNCTION__)); - if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { - sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); - return NULL; - } - bzero((char *)sd, sizeof(sdioh_info_t)); - sd->osh = osh; - if (spi_osinit(sd) != 0) { - sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); - MFREE(sd->osh, sd, sizeof(sdioh_info_t)); - return NULL; - } - - sd->bar0 = bar0; - sd->irq = irq; - sd->intr_handler = NULL; - sd->intr_handler_arg = NULL; - sd->intr_handler_valid = FALSE; - - /* Set defaults */ - sd->use_client_ints = TRUE; - sd->sd_use_dma = FALSE; /* DMA Not supported */ - - /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit - * mode - */ - sd->wordlen = 2; - - if (!spi_hw_attach(sd)) { - sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); - spi_osfree(sd); - MFREE(sd->osh, sd, sizeof(sdioh_info_t)); - return (NULL); - } - - if (bcmspi_driver_init(sd) != SUCCESS) { - sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); - spi_hw_detach(sd); - spi_osfree(sd); - MFREE(sd->osh, sd, sizeof(sdioh_info_t)); - return (NULL); - } - - if (spi_register_irq(sd, irq) != SUCCESS) { - sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); - spi_hw_detach(sd); - spi_osfree(sd); - MFREE(sd->osh, sd, sizeof(sdioh_info_t)); - return (NULL); - } - - sd_trace(("%s: Done\n", __FUNCTION__)); - - return sd; -} - -extern SDIOH_API_RC -sdioh_detach(osl_t *osh, sdioh_info_t *sd) -{ - sd_trace(("%s\n", __FUNCTION__)); - if (sd) { - sd_err(("%s: detaching from hardware\n", __FUNCTION__)); - spi_free_irq(sd->irq, sd); - spi_hw_detach(sd); - spi_osfree(sd); - MFREE(sd->osh, sd, sizeof(sdioh_info_t)); - } - return SDIOH_API_RC_SUCCESS; -} - -/* Configure callback to client when we recieve client interrupt */ -extern SDIOH_API_RC -sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) -{ - sd_trace(("%s: Entering\n", __FUNCTION__)); - sd->intr_handler = fn; - sd->intr_handler_arg = argh; - sd->intr_handler_valid = TRUE; - return SDIOH_API_RC_SUCCESS; -} - -extern SDIOH_API_RC -sdioh_interrupt_deregister(sdioh_info_t *sd) -{ - sd_trace(("%s: Entering\n", __FUNCTION__)); - sd->intr_handler_valid = FALSE; - sd->intr_handler = NULL; - sd->intr_handler_arg = NULL; - return SDIOH_API_RC_SUCCESS; -} - -extern SDIOH_API_RC -sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) -{ - sd_trace(("%s: Entering\n", __FUNCTION__)); - *onoff = sd->client_intr_enabled; - return SDIOH_API_RC_SUCCESS; -} - -#if defined(DHD_DEBUG) -extern bool -sdioh_interrupt_pending(sdioh_info_t *sd) -{ - return 0; -} -#endif - -extern SDIOH_API_RC -sdioh_query_device(sdioh_info_t *sd) -{ - /* Return a BRCM ID appropriate to the dongle class */ - return (sd->num_funcs > 1) ? BCM4329_D11NDUAL_ID : BCM4318_D11G_ID; -} - -/* Provide dstatus bits of spi-transaction for dhd layers. */ -extern uint32 -sdioh_get_dstatus(sdioh_info_t *sd) -{ - return sd->card_dstatus; -} - -extern void -sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) -{ - sd->chip = chip; - sd->chiprev = chiprev; -} - -extern void -sdioh_dwordmode(sdioh_info_t *sd, bool set) -{ - uint8 reg = 0; - int status; - - if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != - SUCCESS) { - sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); - return; - } - - if (set) { - reg |= DWORD_PKT_LEN_EN; - sd->dwordmode = TRUE; - sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ - } else { - reg &= ~DWORD_PKT_LEN_EN; - sd->dwordmode = FALSE; - sd->client_block_size[SPI_FUNC_2] = 2048; - } - - if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != - SUCCESS) { - sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); - return; - } -} - - -uint -sdioh_query_iofnum(sdioh_info_t *sd) -{ - return sd->num_funcs; -} - -/* IOVar table */ -enum { - IOV_MSGLEVEL = 1, - IOV_BLOCKMODE, - IOV_BLOCKSIZE, - IOV_DMA, - IOV_USEINTS, - IOV_NUMINTS, - IOV_NUMLOCALINTS, - IOV_HOSTREG, - IOV_DEVREG, - IOV_DIVISOR, - IOV_SDMODE, - IOV_HISPEED, - IOV_HCIREGS, - IOV_POWER, - IOV_CLOCK, - IOV_SPIERRSTATS, - IOV_RESP_DELAY_ALL -}; - -const bcm_iovar_t sdioh_iovars[] = { - {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, - {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ - {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, - {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, - {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, - {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, - {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, - {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, - {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, - {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, - {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, - {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, - {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, - {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, - {NULL, 0, 0, 0, 0 } -}; - -int -sdioh_iovar_op(sdioh_info_t *si, const char *name, - void *params, int plen, void *arg, int len, bool set) -{ - const bcm_iovar_t *vi = NULL; - int bcmerror = 0; - int val_size; - int32 int_val = 0; - bool bool_val; - uint32 actionid; -/* - sdioh_regs_t *regs; -*/ - - ASSERT(name); - ASSERT(len >= 0); - - /* Get must have return space; Set does not take qualifiers */ - ASSERT(set || (arg && len)); - ASSERT(!set || (!params && !plen)); - - sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); - - if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { - bcmerror = BCME_UNSUPPORTED; - goto exit; - } - - if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) - goto exit; - - /* Set up params so get and set can share the convenience variables */ - if (params == NULL) { - params = arg; - plen = len; - } - - if (vi->type == IOVT_VOID) - val_size = 0; - else if (vi->type == IOVT_BUFFER) - val_size = len; - else - val_size = sizeof(int); - - if (plen >= (int)sizeof(int_val)) - bcopy(params, &int_val, sizeof(int_val)); - - bool_val = (int_val != 0) ? TRUE : FALSE; - - actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); - switch (actionid) { - case IOV_GVAL(IOV_MSGLEVEL): - int_val = (int32)sd_msglevel; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_MSGLEVEL): - sd_msglevel = int_val; - break; - - case IOV_GVAL(IOV_BLOCKSIZE): - if ((uint32)int_val > si->num_funcs) { - bcmerror = BCME_BADARG; - break; - } - int_val = (int32)si->client_block_size[int_val]; - bcopy(&int_val, arg, val_size); - break; - - case IOV_GVAL(IOV_DMA): - int_val = (int32)si->sd_use_dma; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_DMA): - si->sd_use_dma = (bool)int_val; - break; - - case IOV_GVAL(IOV_USEINTS): - int_val = (int32)si->use_client_ints; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_USEINTS): - break; - - case IOV_GVAL(IOV_DIVISOR): - int_val = (uint32)sd_divisor; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_DIVISOR): - sd_divisor = int_val; - if (!spi_start_clock(si, (uint16)sd_divisor)) { - sd_err(("%s: set clock failed\n", __FUNCTION__)); - bcmerror = BCME_ERROR; - } - break; - - case IOV_GVAL(IOV_POWER): - int_val = (uint32)sd_power; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_POWER): - sd_power = int_val; - break; - - case IOV_GVAL(IOV_CLOCK): - int_val = (uint32)sd_clock; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_CLOCK): - sd_clock = int_val; - break; - - case IOV_GVAL(IOV_SDMODE): - int_val = (uint32)sd_sdmode; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_SDMODE): - sd_sdmode = int_val; - break; - - case IOV_GVAL(IOV_HISPEED): - int_val = (uint32)sd_hiok; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_HISPEED): - sd_hiok = int_val; - - if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { - sd_err(("%s: Failed changing highspeed mode to %d.\n", - __FUNCTION__, sd_hiok)); - bcmerror = BCME_ERROR; - return ERROR; - } - break; - - case IOV_GVAL(IOV_NUMINTS): - int_val = (int32)si->intrcount; - bcopy(&int_val, arg, val_size); - break; - - case IOV_GVAL(IOV_NUMLOCALINTS): - int_val = (int32)si->local_intrcount; - bcopy(&int_val, arg, val_size); - break; - case IOV_GVAL(IOV_DEVREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - uint8 data; - - if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { - bcmerror = BCME_SDIO_ERROR; - break; - } - - int_val = (int)data; - bcopy(&int_val, arg, sizeof(int_val)); - break; - } - - case IOV_SVAL(IOV_DEVREG): - { - sdreg_t *sd_ptr = (sdreg_t *)params; - uint8 data = (uint8)sd_ptr->value; - - if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { - bcmerror = BCME_SDIO_ERROR; - break; - } - break; - } - - - case IOV_GVAL(IOV_SPIERRSTATS): - { - bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); - break; - } - - case IOV_SVAL(IOV_SPIERRSTATS): - { - bzero(&si->spierrstats, sizeof(struct spierrstats_t)); - break; - } - - case IOV_GVAL(IOV_RESP_DELAY_ALL): - int_val = (int32)si->resp_delay_all; - bcopy(&int_val, arg, val_size); - break; - - case IOV_SVAL(IOV_RESP_DELAY_ALL): - si->resp_delay_all = (bool)int_val; - int_val = STATUS_ENABLE|INTR_WITH_STATUS; - if (si->resp_delay_all) - int_val |= RESP_DELAY_ALL; - else { - if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, - F1_RESPONSE_DELAY) != SUCCESS) { - sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); - bcmerror = BCME_SDIO_ERROR; - break; - } - } - - if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) - != SUCCESS) { - sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); - bcmerror = BCME_SDIO_ERROR; - break; - } - break; - - default: - bcmerror = BCME_UNSUPPORTED; - break; - } -exit: - - return bcmerror; -} - -extern SDIOH_API_RC -sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) -{ - SDIOH_API_RC status; - /* No lock needed since sdioh_request_byte does locking */ - status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); - return status; -} - -extern SDIOH_API_RC -sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) -{ - /* No lock needed since sdioh_request_byte does locking */ - SDIOH_API_RC status; - - if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { - uint8 dummy_data; - status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); - if (status) { - sd_err(("sdioh_cfg_read() failed.\n")); - return status; - } - } - - status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); - return status; -} - -extern SDIOH_API_RC -sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) -{ - uint32 count; - int offset; - uint32 cis_byte; - uint16 *cis = (uint16 *)cisd; - uint bar0 = SI_ENUM_BASE; - int status; - uint8 data; - - sd_trace(("%s: Func %d\n", __FUNCTION__, func)); - - spi_lock(sd); - - /* Set sb window address to 0x18000000 */ - data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; - status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); - if (status == SUCCESS) { - data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; - status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); - } else { - sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); - spi_unlock(sd); - return (BCME_ERROR); - } - if (status == SUCCESS) { - data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; - status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); - } else { - sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); - spi_unlock(sd); - return (BCME_ERROR); - } - - offset = CC_OTP; /* OTP offset in chipcommon. */ - for (count = 0; count < length/2; count++) { - if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { - sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); - spi_unlock(sd); - return (BCME_ERROR); - } - - *cis = (uint16)cis_byte; - cis++; - offset += 2; - } - - spi_unlock(sd); - - return (BCME_OK); -} - -extern SDIOH_API_RC -sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) -{ - int status; - uint32 cmd_arg; - uint32 dstatus; - uint32 data = (uint32)(*byte); - - spi_lock(sd); - - cmd_arg = 0; - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, rw, func, - regaddr, data)); - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, - cmd_arg, &data, 1)) != SUCCESS) { - spi_unlock(sd); - return status; - } - - if (rw == SDIOH_READ) - *byte = (uint8)data; - - bcmspi_cmd_getdstatus(sd, &dstatus); - if (dstatus) - sd_trace(("dstatus =0x%x\n", dstatus)); - - spi_unlock(sd); - return SDIOH_API_RC_SUCCESS; -} - -extern SDIOH_API_RC -sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, - uint32 *word, uint nbytes) -{ - int status; - - spi_lock(sd); - - if (rw == SDIOH_READ) - status = bcmspi_card_regread(sd, func, addr, nbytes, word); - else - status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); - - spi_unlock(sd); - return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); -} - -extern SDIOH_API_RC -sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, - uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) -{ - int len; - int buflen = (int)buflen_u; - bool fifo = (fix_inc == SDIOH_DATA_FIX); - - spi_lock(sd); - - ASSERT(reg_width == 4); - ASSERT(buflen_u < (1 << 30)); - ASSERT(sd->client_block_size[func]); - - sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", - __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', - buflen_u, sd->r_cnt, sd->t_cnt, pkt)); - - /* Break buffer down into blocksize chunks. */ - while (buflen > 0) { - len = MIN(sd->client_block_size[func], buflen); - if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { - sd_err(("%s: bcmspi_card_buf %s failed\n", - __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); - spi_unlock(sd); - return SDIOH_API_RC_FAIL; - } - buffer += len; - buflen -= len; - if (!fifo) - addr += len; - } - spi_unlock(sd); - return SDIOH_API_RC_SUCCESS; -} - -/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. - * Its main aim is to have simpler spi writes rather than recursive writes. - * e.g. When there is a need to program response delay on the fly after detecting the SPI-func - * this call will allow to program the response delay. - */ -static int -bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) -{ - uint32 cmd_arg; - uint32 datalen = 1; - uint32 hostlen; - - cmd_arg = 0; - - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - - - /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen - * according to the wordlen mode(16/32bit) the device is in. - */ - ASSERT(sd->wordlen == 4 || sd->wordlen == 2); - datalen = ROUNDUP(datalen, sd->wordlen); - - /* Start by copying command in the spi-outbuffer */ - if (sd->wordlen == 4) { /* 32bit spid */ - *(uint32 *)spi_outbuf2 = bcmswap32(cmd_arg); - if (datalen & 0x3) - datalen += (4 - (datalen & 0x3)); - } else if (sd->wordlen == 2) { /* 16bit spid */ - *(uint16 *)spi_outbuf2 = bcmswap16(cmd_arg & 0xffff); - *(uint16 *)&spi_outbuf2[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16); - if (datalen & 0x1) - datalen++; - } else { - sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", - __FUNCTION__, 8 * sd->wordlen)); - return ERROR; - } - - /* for Write, put the data into the output buffer */ - if (datalen != 0) { - if (sd->wordlen == 4) { /* 32bit spid */ - *(uint32 *)&spi_outbuf2[CMDLEN] = bcmswap32(byte); - } else if (sd->wordlen == 2) { /* 16bit spid */ - *(uint16 *)&spi_outbuf2[CMDLEN] = bcmswap16(byte & 0xffff); - *(uint16 *)&spi_outbuf2[CMDLEN + 2] = - bcmswap16((byte & 0xffff0000) >> 16); - } - } - - /* +4 for cmd, +4 for dstatus */ - hostlen = datalen + 8; - hostlen += (4 - (hostlen & 0x3)); - spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); - - /* Last 4bytes are dstatus. Device is configured to return status bits. */ - if (sd->wordlen == 4) { /* 32bit spid */ - sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); - } else if (sd->wordlen == 2) { /* 16bit spid */ - sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) | - (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16)); - } else { - sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", - __FUNCTION__, 8 * sd->wordlen)); - return ERROR; - } - - if (sd->card_dstatus) - sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); - - return (BCME_OK); -} - -/* Program the response delay corresponding to the spi function */ -static int -bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) -{ - if (sd->resp_delay_all == FALSE) - return (BCME_OK); - - if (sd->prev_fun == func) - return (BCME_OK); - - if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) - return (BCME_OK); - - bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); - - /* Remember function for which to avoid reprogramming resp-delay in next iteration */ - sd->prev_fun = func; - - return (BCME_OK); - -} - -#define GSPI_RESYNC_PATTERN 0x0 - -/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. - * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is - * synchronised and all queued resuests are cancelled. - */ -static int -bcmspi_resync_f1(sdioh_info_t *sd) -{ - uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; - - - /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen - * according to the wordlen mode(16/32bit) the device is in. - */ - ASSERT(sd->wordlen == 4 || sd->wordlen == 2); - datalen = ROUNDUP(datalen, sd->wordlen); - - /* Start by copying command in the spi-outbuffer */ - *(uint32 *)spi_outbuf2 = cmd_arg; - - /* for Write, put the data into the output buffer */ - *(uint32 *)&spi_outbuf2[CMDLEN] = data; - - /* +4 for cmd, +4 for dstatus */ - spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); - - /* Last 4bytes are dstatus. Device is configured to return status bits. */ - if (sd->wordlen == 4) { /* 32bit spid */ - sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); - } else if (sd->wordlen == 2) { /* 16bit spid */ - sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN ]) | - (bcmswap16(*(uint16 *)&spi_inbuf2[datalen + CMDLEN + 2]) << 16)); - } else { - sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", - __FUNCTION__, 8 * sd->wordlen)); - return ERROR; - } - - if (sd->card_dstatus) - sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); - - return (BCME_OK); -} - -uint32 dstatus_count = 0; - -static int -bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) -{ - uint32 dstatus = sd->card_dstatus; - struct spierrstats_t *spierrstats = &sd->spierrstats; - int err = SUCCESS; - - sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); - - /* Store dstatus of last few gSPI transactions */ - spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; - spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; - dstatus_count++; - - if (sd->card_init_done == FALSE) - return err; - - if (dstatus & STATUS_DATA_NOT_AVAILABLE) { - spierrstats->dna++; - sd_trace(("Read data not available on F1 addr = 0x%x\n", - GFIELD(cmd_arg, SPI_REG_ADDR))); - /* Clear dna bit */ - bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); - } - - if (dstatus & STATUS_UNDERFLOW) { - spierrstats->rdunderflow++; - sd_err(("FIFO underflow happened due to current F2 read command.\n")); - } - - if (dstatus & STATUS_OVERFLOW) { - spierrstats->wroverflow++; - sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); - if ((sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 0)) { - bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); - bcmspi_resync_f1(sd); - sd_err(("Recovering from F1 FIFO overflow.\n")); - } else { - err = ERROR_OF; - } - } - - if (dstatus & STATUS_F2_INTR) { - spierrstats->f2interrupt++; - sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); - } - - if (dstatus & STATUS_F3_INTR) { - spierrstats->f3interrupt++; - sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); - } - - if (dstatus & STATUS_HOST_CMD_DATA_ERR) { - spierrstats->hostcmddataerr++; - sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); - } - - if (dstatus & STATUS_F2_PKT_AVAILABLE) { - spierrstats->f2pktavailable++; - sd_trace(("Packet is available/ready in F2 TX FIFO\n")); - sd_trace(("Packet length = %d\n", sd->dwordmode ? - ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : - ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); - } - - if (dstatus & STATUS_F3_PKT_AVAILABLE) { - spierrstats->f3pktavailable++; - sd_err(("Packet is available/ready in F3 TX FIFO\n")); - sd_err(("Packet length = %d\n", - (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); - } - - return err; -} - -extern int -sdioh_abort(sdioh_info_t *sd, uint func) -{ - return 0; -} - -int -sdioh_start(sdioh_info_t *sd, int stage) -{ - return SUCCESS; -} - -int -sdioh_stop(sdioh_info_t *sd) -{ - return SUCCESS; -} - - - -/* - * Private/Static work routines - */ -static int -bcmspi_host_init(sdioh_info_t *sd) -{ - - /* Default power on mode */ - sd->sd_mode = SDIOH_MODE_SPI; - sd->polled_mode = TRUE; - sd->host_init_done = TRUE; - sd->card_init_done = FALSE; - sd->adapter_slot = 1; - - return (SUCCESS); -} - -static int -get_client_blocksize(sdioh_info_t *sd) -{ - uint32 regdata[2]; - int status; - - /* Find F1/F2/F3 max packet size */ - if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, - 8, regdata)) != SUCCESS) { - return status; - } - - sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", - regdata[0], regdata[1])); - - sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; - sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); - ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); - - sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; - sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); - ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); - - sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; - sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); - ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); - - return 0; -} - -static int -bcmspi_client_init(sdioh_info_t *sd) -{ - uint32 status_en_reg = 0; - sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); - -#ifdef HSMODE - if (!spi_start_clock(sd, (uint16)sd_divisor)) { - sd_err(("spi_start_clock failed\n")); - return ERROR; - } -#else - /* Start at ~400KHz clock rate for initialization */ - if (!spi_start_clock(sd, 128)) { - sd_err(("spi_start_clock failed\n")); - return ERROR; - } -#endif /* HSMODE */ - - if (!bcmspi_host_device_init_adapt(sd)) { - sd_err(("bcmspi_host_device_init_adapt failed\n")); - return ERROR; - } - - if (!bcmspi_test_card(sd)) { - sd_err(("bcmspi_test_card failed\n")); - return ERROR; - } - - sd->num_funcs = SPI_MAX_IOFUNCS; - - get_client_blocksize(sd); - - /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ - bcmspi_resync_f1(sd); - - sd->dwordmode = FALSE; - - bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); - - sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); - status_en_reg |= INTR_WITH_STATUS; - - - if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, - status_en_reg & 0xff) != SUCCESS) { - sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); - return ERROR; - } - - -#ifndef HSMODE - /* After configuring for High-Speed mode, set the desired clock rate. */ - if (!spi_start_clock(sd, 4)) { - sd_err(("spi_start_clock failed\n")); - return ERROR; - } -#endif /* HSMODE */ - - sd->card_init_done = TRUE; - - - return SUCCESS; -} - -static int -bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) -{ - uint32 regdata; - int status; - - if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, - 4, ®data)) != SUCCESS) - return status; - - sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); - - - if (hsmode == TRUE) { - sd_trace(("Attempting to enable High-Speed mode.\n")); - - if (regdata & HIGH_SPEED_MODE) { - sd_trace(("Device is already in High-Speed mode.\n")); - return status; - } else { - regdata |= HIGH_SPEED_MODE; - sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); - if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, - 4, regdata)) != SUCCESS) { - return status; - } - } - } else { - sd_trace(("Attempting to disable High-Speed mode.\n")); - - if (regdata & HIGH_SPEED_MODE) { - regdata &= ~HIGH_SPEED_MODE; - sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); - if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, - 4, regdata)) != SUCCESS) - return status; - } - else { - sd_trace(("Device is already in Low-Speed mode.\n")); - return status; - } - } - - spi_controller_highspeed_mode(sd, hsmode); - - return TRUE; -} - -#define bcmspi_find_curr_mode(sd) { \ - sd->wordlen = 2; \ - status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ - regdata &= 0xff; \ - if ((regdata == 0xad) || (regdata == 0x5b) || \ - (regdata == 0x5d) || (regdata == 0x5a)) \ - break; \ - sd->wordlen = 4; \ - status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ - regdata &= 0xff; \ - if ((regdata == 0xad) || (regdata == 0x5b) || \ - (regdata == 0x5d) || (regdata == 0x5a)) \ - break; \ - sd_trace(("Silicon testability issue: regdata = 0x%x." \ - " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ - OSL_DELAY(100000); \ -} - -#define INIT_ADAPT_LOOP 100 - -/* Adapt clock-phase-speed-bitwidth between host and device */ -static bool -bcmspi_host_device_init_adapt(sdioh_info_t *sd) -{ - uint32 wrregdata, regdata = 0; - int status; - int i; - - /* Due to a silicon testability issue, the first command from the Host - * to the device will get corrupted (first bit will be lost). So the - * Host should poll the device with a safe read request. ie: The Host - * should try to read F0 addr 0x14 using the Fixed address mode - * (This will prevent a unintended write command to be detected by device) - */ - for (i = 0; i < INIT_ADAPT_LOOP; i++) { - /* If device was not power-cycled it will stay in 32bit mode with - * response-delay-all bit set. Alternate the iteration so that - * read either with or without response-delay for F0 to succeed. - */ - bcmspi_find_curr_mode(sd); - sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; - - bcmspi_find_curr_mode(sd); - sd->dwordmode = TRUE; - - bcmspi_find_curr_mode(sd); - sd->dwordmode = FALSE; - } - - /* Bail out, device not detected */ - if (i == INIT_ADAPT_LOOP) - return FALSE; - - /* Softreset the spid logic */ - if ((sd->dwordmode) || (sd->wordlen == 4)) { - bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); - bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); - sd_trace(("reset reg read = 0x%x\n", regdata)); - sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, - sd->wordlen, sd->resp_delay_all)); - /* Restore default state after softreset */ - sd->wordlen = 2; - sd->dwordmode = FALSE; - } - - if (sd->wordlen == 4) { - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != - SUCCESS) - return FALSE; - if (regdata == TEST_RO_DATA_32BIT_LE) { - sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", - regdata)); - sd_trace(("Spid power was left on.\n")); - } else { - sd_err(("Spid power was left on but signature read failed." - " Value read = 0x%x\n", regdata)); - return FALSE; - } - } else { - sd->wordlen = 2; - -#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ - - wrregdata = (CTRL_REG_DEFAULT); - sd->resp_delay_all = TRUE; - if (sd->resp_delay_all == TRUE) { - /* Enable response delay for all */ - wrregdata |= (RESP_DELAY_ALL << 16); - /* Program response delay value */ - wrregdata &= 0xffff00ff; - wrregdata |= (F1_RESPONSE_DELAY << 8); - sd->prev_fun = SPI_FUNC_1; - bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); - } - - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) - return FALSE; - sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); - -#ifndef HSMODE - wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); - wrregdata &= ~HIGH_SPEED_MODE; - bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); -#endif /* HSMODE */ - - for (i = 0; i < INIT_ADAPT_LOOP; i++) { - if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { - sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, - ®data)) != SUCCESS) - return FALSE; - } - OSL_DELAY(1000); - } - - - /* Change to host controller intr-polarity of active-low */ - wrregdata &= ~INTR_POLARITY; - sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", - wrregdata)); - /* Change to 32bit mode */ - wrregdata |= WORD_LENGTH_32; - bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); - - /* Change command/data packaging in 32bit LE mode */ - sd->wordlen = 4; - - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) - return FALSE; - - if (regdata == TEST_RO_DATA_32BIT_LE) { - sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); - sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); - } else { - sd_err(("Stale spid reg values read as it was kept powered. Value read =" - "0x%x\n", regdata)); - return FALSE; - } - } - - - return TRUE; -} - -static bool -bcmspi_test_card(sdioh_info_t *sd) -{ - uint32 regdata; - int status; - - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) - return FALSE; - - if (regdata == (TEST_RO_DATA_32BIT_LE)) - sd_trace(("32bit LE regdata = 0x%x\n", regdata)); - else { - sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); - return FALSE; - } - - -#define RW_PATTERN1 0xA0A1A2A3 -#define RW_PATTERN2 0x4B5B6B7B - - regdata = RW_PATTERN1; - if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) - return FALSE; - regdata = 0; - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) - return FALSE; - if (regdata != RW_PATTERN1) { - sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", - RW_PATTERN1, regdata)); - return FALSE; - } else - sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); - - regdata = RW_PATTERN2; - if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) - return FALSE; - regdata = 0; - if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) - return FALSE; - if (regdata != RW_PATTERN2) { - sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", - RW_PATTERN2, regdata)); - return FALSE; - } else - sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); - - return TRUE; -} - -static int -bcmspi_driver_init(sdioh_info_t *sd) -{ - sd_trace(("%s\n", __FUNCTION__)); - if ((bcmspi_host_init(sd)) != SUCCESS) { - return ERROR; - } - - if (bcmspi_client_init(sd) != SUCCESS) { - return ERROR; - } - - return SUCCESS; -} - -/* Read device reg */ -static int -bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) -{ - int status; - uint32 cmd_arg, dstatus; - - ASSERT(regsize); - - if (func == 2) - sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); - - cmd_arg = 0; - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func, - regaddr, *data)); - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) - != SUCCESS) - return status; - - bcmspi_cmd_getdstatus(sd, &dstatus); - if (dstatus) - sd_trace(("dstatus =0x%x\n", dstatus)); - - return SUCCESS; -} - -static int -bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) -{ - - int status; - uint32 cmd_arg; - uint32 dstatus; - - ASSERT(regsize); - - if (func == 2) - sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); - - cmd_arg = 0; - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) - != SUCCESS) - return status; - - sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 0, func, - regaddr, *data)); - - bcmspi_cmd_getdstatus(sd, &dstatus); - sd_trace(("dstatus =0x%x\n", dstatus)); - return SUCCESS; -} - -/* write a device register */ -static int -bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) -{ - int status; - uint32 cmd_arg, dstatus; - - ASSERT(regsize); - - cmd_arg = 0; - - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - sd_trace(("%s: rw=%d, func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, 1, func, - regaddr, data)); - - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) - != SUCCESS) - return status; - - bcmspi_cmd_getdstatus(sd, &dstatus); - if (dstatus) - sd_trace(("dstatus =0x%x\n", dstatus)); - - return SUCCESS; -} - -/* write a device register - 1 byte */ -static int -bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) -{ - int status; - uint32 cmd_arg; - uint32 dstatus; - uint32 data = (uint32)(*byte); - - cmd_arg = 0; - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - sd_trace(("%s: func=%d, regaddr=0x%08x, data=0x%x\n", __FUNCTION__, func, - regaddr, data)); - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, - cmd_arg, &data, 1)) != SUCCESS) { - return status; - } - - bcmspi_cmd_getdstatus(sd, &dstatus); - if (dstatus) - sd_trace(("dstatus =0x%x\n", dstatus)); - - return SUCCESS; -} - -void -bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) -{ - *dstatus_buffer = sd->card_dstatus; -} - -/* 'data' is of type uint32 whereas other buffers are of type uint8 */ -static int -bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, - uint32 *data, uint32 datalen) -{ - uint32 i, j; - uint8 resp_delay = 0; - int err = SUCCESS; - uint32 hostlen; - uint32 spilen = 0; - uint32 dstatus_idx = 0; - uint16 templen, buslen, len, *ptr = NULL; - - sd_trace(("spi cmd = 0x%x\n", cmd_arg)); - - if (DWORDMODE_ON) { - spilen = GFIELD(cmd_arg, SPI_LEN); - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || - (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) - dstatus_idx = spilen * 3; - - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && - (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { - spilen = spilen << 2; - dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; - /* convert len to mod16 size */ - spilen = ROUNDUP(spilen, 16); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); - } - } - - /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen - * according to the wordlen mode(16/32bit) the device is in. - */ - if (sd->wordlen == 4) { /* 32bit spid */ - *(uint32 *)spi_outbuf = bcmswap32(cmd_arg); - if (datalen & 0x3) - datalen += (4 - (datalen & 0x3)); - } else if (sd->wordlen == 2) { /* 16bit spid */ - *(uint16 *)spi_outbuf = bcmswap16(cmd_arg & 0xffff); - *(uint16 *)&spi_outbuf[2] = bcmswap16((cmd_arg & 0xffff0000) >> 16); - if (datalen & 0x1) - datalen++; - if (datalen < 4) - datalen = ROUNDUP(datalen, 4); - } else { - sd_err(("Host is %d bit spid, could not create SPI command.\n", - 8 * sd->wordlen)); - return ERROR; - } - - /* for Write, put the data into the output buffer */ - if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { - /* We send len field of hw-header always a mod16 size, both from host and dongle */ - if (DWORDMODE_ON) { - if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { - ptr = (uint16 *)&data[0]; - templen = *ptr; - /* ASSERT(*ptr == ~*(ptr + 1)); */ - templen = ROUNDUP(templen, 16); - *ptr = templen; - sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); - } - } - - if (datalen != 0) { - for (i = 0; i < datalen/4; i++) { - if (sd->wordlen == 4) { /* 32bit spid */ - *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = - bcmswap32(data[i]); - } else if (sd->wordlen == 2) { /* 16bit spid */ - *(uint16 *)&spi_outbuf[i * 4 + CMDLEN] = - bcmswap16(data[i] & 0xffff); - *(uint16 *)&spi_outbuf[i * 4 + CMDLEN + 2] = - bcmswap16((data[i] & 0xffff0000) >> 16); - } - } - } - } - - /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ - if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { - int func = GFIELD(cmd_arg, SPI_FUNCTION); - switch (func) { - case 0: - resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; - break; - case 1: - resp_delay = F1_RESPONSE_DELAY; - break; - case 2: - resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; - break; - default: - ASSERT(0); - break; - } - /* Program response delay */ - bcmspi_prog_resp_delay(sd, func, resp_delay); - } - - /* +4 for cmd and +4 for dstatus */ - hostlen = datalen + 8 + resp_delay; - hostlen += dstatus_idx; - hostlen += (4 - (hostlen & 0x3)); - spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); - - /* for Read, get the data into the input buffer */ - if (datalen != 0) { - if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ - for (j = 0; j < datalen/4; j++) { - if (sd->wordlen == 4) { /* 32bit spid */ - data[j] = bcmswap32(*(uint32 *)&spi_inbuf[j * 4 + - CMDLEN + resp_delay]); - } else if (sd->wordlen == 2) { /* 16bit spid */ - data[j] = (bcmswap16(*(uint16 *)&spi_inbuf[j * 4 + - CMDLEN + resp_delay])) | - ((bcmswap16(*(uint16 *)&spi_inbuf[j * 4 + - CMDLEN + resp_delay + 2])) << 16); - } - } - - if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { - ptr = (uint16 *)&data[0]; - templen = *ptr; - buslen = len = ~(*(ptr + 1)); - buslen = ROUNDUP(buslen, 16); - /* populate actual len in hw-header */ - if (templen == buslen) - *ptr = len; - } - } - } - - /* Restore back the len field of the hw header */ - if (DWORDMODE_ON) { - if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && - (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { - ptr = (uint16 *)&data[0]; - *ptr = (uint16)(~*(ptr+1)); - } - } - - dstatus_idx += (datalen + CMDLEN + resp_delay); - /* Last 4bytes are dstatus. Device is configured to return status bits. */ - if (sd->wordlen == 4) { /* 32bit spid */ - sd->card_dstatus = bcmswap32(*(uint32 *)&spi_inbuf[dstatus_idx]); - } else if (sd->wordlen == 2) { /* 16bit spid */ - sd->card_dstatus = (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx]) | - (bcmswap16(*(uint16 *)&spi_inbuf[dstatus_idx + 2]) << 16)); - } else { - sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", - 8 * sd->wordlen)); - return ERROR; - } - if (sd->card_dstatus == 0xffffffff) { - sd_err(("looks like not a GSPI device or device is not powered.\n")); - } - - err = bcmspi_update_stats(sd, cmd_arg); - - return err; - -} - -static int -bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, - uint32 addr, int nbytes, uint32 *data) -{ - int status; - uint32 cmd_arg; - bool write = rw == SDIOH_READ ? 0 : 1; - uint retries = 0; - - bool enable; - uint32 spilen; - - cmd_arg = 0; - - ASSERT(nbytes); - ASSERT(nbytes <= sd->client_block_size[func]); - - if (write) sd->t_cnt++; else sd->r_cnt++; - - if (func == 2) { - /* Frame len check limited by gSPI. */ - if ((nbytes > 2000) && write) { - sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); - } - /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ - /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ - if (write) { - uint32 dstatus; - /* check F2 ready with cached one */ - bcmspi_cmd_getdstatus(sd, &dstatus); - if ((dstatus & STATUS_F2_RX_READY) == 0) { - retries = WAIT_F2RXFIFORDY; - enable = 0; - while (retries-- && !enable) { - OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); - bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, - &dstatus); - if (dstatus & STATUS_F2_RX_READY) - enable = TRUE; - } - if (!enable) { - struct spierrstats_t *spierrstats = &sd->spierrstats; - spierrstats->f2rxnotready++; - sd_err(("F2 FIFO is not ready to receive data.\n")); - return ERROR; - } - sd_trace(("No of retries on F2 ready %d\n", - (WAIT_F2RXFIFORDY - retries))); - } - } - } - - /* F2 transfers happen on 0 addr */ - addr = (func == 2) ? 0 : addr; - - /* In pio mode buffer is read using fixed address fifo in func 1 */ - if ((func == 1) && (fifo)) - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); - else - cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); - - cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); - cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); - cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); - spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); - if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { - /* convert len to mod4 size */ - spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); - cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); - } else - cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); - - if ((func == 2) && (fifo == 1)) { - sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", - __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", - addr, nbytes, sd->r_cnt, sd->t_cnt)); - } - - sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); - sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", - __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", - addr, nbytes, sd->r_cnt, sd->t_cnt)); - - - if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, - data, nbytes)) != SUCCESS) { - sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, - (write ? "write" : "read"))); - return status; - } - - /* gSPI expects that hw-header-len is equal to spi-command-len */ - if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { - ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); - ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); - } - - if ((nbytes > 2000) && !write) { - sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); - } - - return SUCCESS; -} - -/* Reset and re-initialize the device */ -int -sdioh_sdio_reset(sdioh_info_t *si) -{ - si->card_init_done = FALSE; - return bcmspi_client_init(si); -} diff --git a/drivers/net/wireless/bcm4329/dhd.h b/drivers/net/wireless/bcm4329/dhd.h index 1ddf1ff61e705..c18a858e7d33c 100644 --- a/drivers/net/wireless/bcm4329/dhd.h +++ b/drivers/net/wireless/bcm4329/dhd.h @@ -24,7 +24,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd.h,v 1.32.4.7.2.4.14.49.4.7 2010/11/12 22:48:36 Exp $ + * $Id: dhd.h,v 1.32.4.7.2.4.14.49.4.9 2011/01/14 22:40:45 Exp $ */ /**************** @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -143,7 +144,7 @@ typedef struct dhd_pub { ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ - ulong fc_packets; /* Number of flow control pkts recvd */ + ulong fc_packets; /* Number of flow control pkts recvd */ /* Last error return */ int bcmerror; @@ -155,6 +156,7 @@ typedef struct dhd_pub { /* Suspend disable flag and "in suspend" flag */ int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ int in_suspend; /* flag set to 1 when early suspend called */ + int hang_was_sent; /* flag that message was send at least once */ #ifdef PNO_SUPPORT int pno_enable; /* pno status : "1" is pno enable */ #endif /* PNO_SUPPORT */ @@ -179,7 +181,7 @@ typedef struct dhd_pub { wait_event_interruptible_timeout(a, FALSE, HZ/100); \ } \ } while (0) - #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 30) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0) #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) @@ -214,6 +216,20 @@ typedef struct dhd_pub { #define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ +inline static void NETIF_ADDR_LOCK(struct net_device *dev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) + netif_addr_lock_bh(dev); +#endif +} + +inline static void NETIF_ADDR_UNLOCK(struct net_device *dev) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) + netif_addr_unlock_bh(dev); +#endif +} + /* Wakelock Functions */ extern int dhd_os_wake_lock(dhd_pub_t *pub); extern int dhd_os_wake_unlock(dhd_pub_t *pub); @@ -434,4 +450,16 @@ extern char nv_path[MOD_PARAM_PATHLEN]; extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); +/* dhd_commn arp offload wrapers */ +extern void dhd_arp_cleanup(dhd_pub_t *dhd); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr); + +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +extern int net_os_set_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); + #endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcm4329/dhd_cdc.c b/drivers/net/wireless/bcm4329/dhd_cdc.c index 61f6a6f393a97..4bec0b606dc9d 100644 --- a/drivers/net/wireless/bcm4329/dhd_cdc.c +++ b/drivers/net/wireless/bcm4329/dhd_cdc.c @@ -150,7 +150,8 @@ dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len) memcpy(prot->buf, buf, len); if ((ret = dhdcdc_msg(dhd)) < 0) { - DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); goto done; } @@ -205,6 +206,18 @@ dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + memset(msg, 0, sizeof(cdc_ioctl_t)); msg->cmd = htol32(cmd); @@ -251,7 +264,7 @@ dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) dhd_prot_t *prot = dhd->prot; int ret = -1; - if (dhd->busstate == DHD_BUS_DOWN) { + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); return ret; } diff --git a/drivers/net/wireless/bcm4329/dhd_common.c b/drivers/net/wireless/bcm4329/dhd_common.c index 4331d39a68f11..7235a9e56dbaf 100644 --- a/drivers/net/wireless/bcm4329/dhd_common.c +++ b/drivers/net/wireless/bcm4329/dhd_common.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.20 2010/12/20 23:37:28 Exp $ + * $Id: dhd_common.c,v 1.5.6.8.2.6.6.69.4.25 2011-02-11 21:16:02 Exp $ */ #include #include @@ -992,6 +992,9 @@ dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_ wl_pkt_filter_enable_t enable_parm; wl_pkt_filter_enable_t * pkt_filterp; + if (!arg) + return; + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); goto fail; @@ -1065,6 +1068,9 @@ dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) char *arg_save = 0, *arg_org = 0; #define BUF_SIZE 2048 + if (!arg) + return; + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); goto fail; @@ -1220,6 +1226,82 @@ dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) } #endif + +void dhd_arp_cleanup(dhd_pub_t *dhd) +{ +#ifdef ARP_OFFLOAD_SUPPORT + int ret = 0; + int iov_len = 0; + char iovbuf[128]; + + if (dhd == NULL) return; + + dhd_os_proto_block(dhd); + + iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + + iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + + dhd_os_proto_unblock(dhd); + +#endif /* ARP_OFFLOAD_SUPPORT */ +} + +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, u32 ipaddr) +{ +#ifdef ARP_OFFLOAD_SUPPORT + int iov_len = 0; + char iovbuf[32]; + int retcode; + + dhd_os_proto_block(dhd); + + iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf)); + retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, iov_len); + + dhd_os_proto_unblock(dhd); + + if (retcode) + DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ARP ipaddr entry added\n", + __FUNCTION__)); +#endif /* ARP_OFFLOAD_SUPPORT */ +} + + +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen) +{ +#ifdef ARP_OFFLOAD_SUPPORT + int retcode; + int iov_len = 0; + + if (!buf) + return -1; + + dhd_os_proto_block(dhd); + + iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen); + retcode = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, buflen); + + dhd_os_proto_unblock(dhd); + + if (retcode) { + DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, retcode)); + + return -1; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + return 0; +} + + int dhd_preinit_ioctls(dhd_pub_t *dhd) { @@ -1813,6 +1895,41 @@ dhd_iscan_get_partial_result(void *dhdp, uint *scan_count) #endif +/* + * returns = TRUE if associated, FALSE if not associated + */ +bool is_associated(dhd_pub_t *dhd, void *bss_buf) +{ + char bssid[ETHER_ADDR_LEN], zbuf[ETHER_ADDR_LEN]; + int ret = -1; + + bzero(bssid, ETHER_ADDR_LEN); + bzero(zbuf, ETHER_ADDR_LEN); + + ret = dhdcdc_set_ioctl(dhd, 0, WLC_GET_BSSID, (char *)bssid, ETHER_ADDR_LEN); + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTASSOCIATED) { + DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret)); + } + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) { + /* STA is assocoated BSSID is non zero */ + + if (bss_buf) { + /* return bss if caller provided buf */ + memcpy(bss_buf, bssid, ETHER_ADDR_LEN); + } + return TRUE; + } else { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } +} + /* Function to estimate possible DTIM_SKIP value */ int dhd_get_dtim_skip(dhd_pub_t *dhd) { @@ -1903,6 +2020,15 @@ int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled) return ret; } + memset(iovbuf, 0, sizeof(iovbuf)); + + /* Check if disassoc to enable pno */ + if (pfn_enabled && (is_associated(dhd, NULL) == TRUE)) { + DHD_ERROR(("%s pno enable called in assoc mode ret=%d\n", \ + __FUNCTION__, ret)); + return ret; + } + /* Enable/disable PNO */ if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) { if ((ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf))) < 0) { @@ -1921,7 +2047,8 @@ int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled) /* Function to execute combined scan */ int -dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) +dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, \ + int pno_repeat, int pno_freq_expo_max) { int err = -1; char iovbuf[128]; @@ -1966,12 +2093,23 @@ dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) pfn_param.version = htod32(PFN_VERSION); pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT)); + /* check and set extra pno params */ + if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat_scan = htod32(pno_repeat); + pfn_param.max_freq_adjust = htod32(pno_freq_expo_max); + } + /* set up pno scan fr */ if (scan_fr != 0) pfn_param.scan_freq = htod32(scan_fr); - if (pfn_param.scan_freq > PNO_SCAN_MAX_FW) { - DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW)); + if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) { + DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC)); + return err; + } + if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) { + DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); return err; } @@ -1983,8 +2121,6 @@ dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) pfn_element.bss_type = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); pfn_element.auth = (DOT11_OPEN_SYSTEM); - pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); - pfn_element.wsec = htod32(0); pfn_element.infra = htod32(1); memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len); @@ -2000,8 +2136,9 @@ dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) return err; } else - DHD_ERROR(("%s set OK with PNO time=%d\n", __FUNCTION__, \ - pfn_param.scan_freq)); + DHD_ERROR(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n", \ + __FUNCTION__, pfn_param.scan_freq, \ + pfn_param.repeat_scan, pfn_param.max_freq_adjust)); } else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err)); } diff --git a/drivers/net/wireless/bcm4329/dhd_custom_gpio.c b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c index 8c6ec470b8bd2..a1a4297f05162 100644 --- a/drivers/net/wireless/bcm4329/dhd_custom_gpio.c +++ b/drivers/net/wireless/bcm4329/dhd_custom_gpio.c @@ -20,7 +20,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * -* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.1 2010/09/02 23:13:16 Exp $ +* $Id: dhd_custom_gpio.c,v 1.1.4.8.4.4 2011/01/20 20:23:09 Exp $ */ @@ -47,6 +47,7 @@ int wifi_set_carddetect(int on); int wifi_set_power(int on, unsigned long msec); int wifi_get_irq_number(unsigned long *irq_flags_ptr); int wifi_get_mac_addr(unsigned char *buf); +void *wifi_get_country_code(char *ccode); #endif #if defined(OOB_INTR_ONLY) diff --git a/drivers/net/wireless/bcm4329/dhd_linux.c b/drivers/net/wireless/bcm4329/dhd_linux.c index f14f5e81b3213..7b13c7668c137 100644 --- a/drivers/net/wireless/bcm4329/dhd_linux.c +++ b/drivers/net/wireless/bcm4329/dhd_linux.c @@ -22,7 +22,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.35 2010/11/17 03:13:21 Exp $ + * $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.40 2011/02/03 19:55:18 Exp $ */ #ifdef CONFIG_WIFI_CONTROL_FUNC @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include #include @@ -127,6 +129,17 @@ int wifi_get_mac_addr(unsigned char *buf) return -EOPNOTSUPP; } +void *wifi_get_country_code(char *ccode) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + if (!ccode) + return NULL; + if (wifi_control_data && wifi_control_data->get_country_code) { + return wifi_control_data->get_country_code(ccode); + } + return NULL; +} + static int wifi_probe(struct platform_device *pdev) { struct wifi_platform_data *wifi_ctrl = @@ -157,14 +170,21 @@ static int wifi_remove(struct platform_device *pdev) up(&wifi_control_sem); return 0; } + static int wifi_suspend(struct platform_device *pdev, pm_message_t state) { DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(0); +#endif /* (OOB_INTR_ONLY) */ return 0; } static int wifi_resume(struct platform_device *pdev) { DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ return 0; } @@ -191,6 +211,12 @@ void wifi_del_dev(void) } #endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */ +static int dhd_device_event(struct notifier_block *this, unsigned long event, + void *ptr); + +static struct notifier_block dhd_notifier = { + .notifier_call = dhd_device_event +}; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) #include @@ -283,7 +309,6 @@ typedef struct dhd_info { int wl_count; int wl_packet; - int hang_was_sent; /* flag that message was send at least once */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) struct mutex wl_start_lock; /* mutex when START called to prevent any other Linux calls */ #endif @@ -529,7 +554,7 @@ static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) #if defined(CONFIG_HAS_EARLYSUSPEND) static int dhd_set_suspend(int value, dhd_pub_t *dhd) { - int power_mode = PM_MAX; + int power_mode = PM_FAST; /* wl_pkt_filter_enable_t enable_parm; */ char iovbuf[32]; int bcn_li_dtim = 3; @@ -764,13 +789,13 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) ASSERT(dhd && dhd->iflist[ifidx]); dev = dhd->iflist[ifidx]->net; - netif_addr_lock_bh(dev); + NETIF_ADDR_LOCK(dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) cnt = netdev_mc_count(dev); #else cnt = dev->mc_count; #endif - netif_addr_unlock_bh(dev); + NETIF_ADDR_UNLOCK(dev); /* Determine initial value of allmulti flag */ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; @@ -790,7 +815,7 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) memcpy(bufp, &cnt, sizeof(cnt)); bufp += sizeof(cnt); - netif_addr_lock_bh(dev); + NETIF_ADDR_LOCK(dev); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) netdev_for_each_mc_addr(ha, dev) { if (!cnt) @@ -800,12 +825,12 @@ _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) cnt--; } #else - for (mclist = dev->mc_list;(mclist && (cnt > 0)); cnt--, mclist = mclist->next) { + for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) { memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); bufp += ETHER_ADDR_LEN; } #endif - netif_addr_unlock_bh(dev); + NETIF_ADDR_UNLOCK(dev); memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; @@ -1484,7 +1509,8 @@ dhd_dpc_thread(void *data) dhd_os_wake_unlock(&dhd->pub); } } else { - dhd_bus_stop(dhd->pub.bus, TRUE); + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); dhd_os_wake_unlock(&dhd->pub); } } @@ -1742,6 +1768,14 @@ dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) dhd_os_wake_lock(&dhd->pub); + /* send to dongle only if we are not waiting for reload already */ + if (dhd->pub.hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + dhd_os_wake_lock_timeout_enable(&dhd->pub); + dhd_os_wake_unlock(&dhd->pub); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + ifidx = dhd_net2idx(dhd, net); DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); @@ -1885,7 +1919,7 @@ dhd_stop(struct net_device *net) #else DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation ...\n", __FUNCTION__)); #endif /* !defined(IGNORE_ETH0_DOWN) */ - + dhd->pub.hang_was_sent = 0; OLD_MOD_DEC_USE_COUNT; return 0; } @@ -1905,6 +1939,9 @@ dhd_open(struct net_device *net) ifidx = dhd_net2idx(dhd, net); DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + if (ifidx == DHD_BAD_IF) + return -1; + if ((dhd->iflist[ifidx]) && (dhd->iflist[ifidx]->state == WLC_E_IF_DEL)) { DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); return -1; @@ -2005,6 +2042,7 @@ dhd_del_if(dhd_info_t *dhd, int ifidx) up(&dhd->sysioc_sem); } + dhd_pub_t * dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) { @@ -2161,6 +2199,8 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) register_early_suspend(&dhd->early_suspend); #endif + register_inetaddr_notifier(&dhd_notifier); + return &dhd->pub; fail: @@ -2186,12 +2226,15 @@ dhd_bus_start(dhd_pub_t *dhdp) DHD_TRACE(("%s: \n", __FUNCTION__)); + dhd_os_sdlock(dhdp); + /* try to download image and nvram to the dongle */ if (dhd->pub.busstate == DHD_BUS_DOWN) { if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, fw_path, nv_path))) { DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n", __FUNCTION__, fw_path, nv_path)); + dhd_os_sdunlock(dhdp); return -1; } } @@ -2201,8 +2244,9 @@ dhd_bus_start(dhd_pub_t *dhdp) dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); /* Bring up the bus */ - if ((ret = dhd_bus_init(&dhd->pub, TRUE)) != 0) { + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); + dhd_os_sdunlock(dhdp); return ret; } #if defined(OOB_INTR_ONLY) @@ -2211,6 +2255,7 @@ dhd_bus_start(dhd_pub_t *dhdp) dhd->wd_timer_valid = FALSE; del_timer_sync(&dhd->timer); DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__)); + dhd_os_sdunlock(dhdp); return -ENODEV; } @@ -2223,9 +2268,12 @@ dhd_bus_start(dhd_pub_t *dhdp) dhd->wd_timer_valid = FALSE; del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + dhd_os_sdunlock(dhdp); return -ENODEV; } + dhd_os_sdunlock(dhdp); + #ifdef EMBEDDED_PLATFORM bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf)); @@ -2257,9 +2305,12 @@ dhd_bus_start(dhd_pub_t *dhdp) /* enable dongle roaming event */ setbit(dhdp->eventmask, WLC_E_ROAM); - dhdp->pktfilter_count = 1; + dhdp->pktfilter_count = 4; /* Setup filter to allow only unicast */ dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00"; + dhdp->pktfilter[1] = NULL; + dhdp->pktfilter[2] = NULL; + dhdp->pktfilter[3] = NULL; #endif /* EMBEDDED_PLATFORM */ /* Bus is ready, do any protocol initialization */ @@ -2313,6 +2364,48 @@ static struct net_device_ops dhd_ops_virt = { }; #endif +static int dhd_device_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + + if (!ifa) + return NOTIFY_DONE; + + dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev); + dhd_pub = &dhd->pub; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) + if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) { +#else + if (ifa->ifa_dev->dev->open == &dhd_open) { +#endif + switch (event) { + case NETDEV_UP: + DHD_TRACE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + dhd_arp_cleanup(dhd_pub); + break; + + case NETDEV_DOWN: + DHD_TRACE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + dhd_arp_cleanup(dhd_pub); + break; + + default: + DHD_TRACE(("%s: [%s] Event: %lu\n", + __FUNCTION__, ifa->ifa_label, event)); + break; + } + } + return NOTIFY_DONE; +} + int dhd_net_attach(dhd_pub_t *dhdp, int ifidx) { @@ -2386,6 +2479,7 @@ dhd_net_attach(dhd_pub_t *dhdp, int ifidx) dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2], dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]); + #if defined(CONFIG_WIRELESS_EXT) #if defined(CONFIG_FIRST_SCAN) #ifdef SOFTAP @@ -2451,6 +2545,8 @@ dhd_detach(dhd_pub_t *dhdp) dhd_if_t *ifp; int i; + unregister_inetaddr_notifier(&dhd_notifier); + #if defined(CONFIG_HAS_EARLYSUSPEND) if (dhd->early_suspend.suspend) unregister_early_suspend(&dhd->early_suspend); @@ -2985,6 +3081,35 @@ int net_os_set_dtim_skip(struct net_device *dev, int val) return 0; } +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + char *filterp = NULL; + int ret = 0; + + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) + return ret; + if (num >= dhd->pub.pktfilter_count) + return -EINVAL; + if (add_remove) { + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + break; + case DHD_MULTICAST4_FILTER_NUM: + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + break; + case DHD_MULTICAST6_FILTER_NUM: + filterp = "103 0 0 0 0xFFFF 0x3333"; + break; + default: + return -EINVAL; + } + } + dhd->pub.pktfilter[num] = filterp; + return ret; +} + int net_os_set_packet_filter(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); @@ -3038,11 +3163,12 @@ dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled) /* Linux wrapper to call common dhd_pno_set */ int -dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr) +dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, + ushort scan_fr, int pno_repeat, int pno_freq_expo_max) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); - return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr)); + return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max)); } /* Linux wrapper to get pno status */ @@ -3062,8 +3188,8 @@ int net_os_send_hang_message(struct net_device *dev) int ret = 0; if (dhd) { - if (!dhd->hang_was_sent) { - dhd->hang_was_sent = 1; + if (!dhd->pub.hang_was_sent) { + dhd->pub.hang_was_sent = 1; ret = wl_iw_send_priv_event(dev, "HANG"); } } diff --git a/drivers/net/wireless/bcm4329/dhd_sdio.c b/drivers/net/wireless/bcm4329/dhd_sdio.c index f9b9eceb91c7a..e9093e8106244 100644 --- a/drivers/net/wireless/bcm4329/dhd_sdio.c +++ b/drivers/net/wireless/bcm4329/dhd_sdio.c @@ -146,6 +146,8 @@ DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len); +extern void bcmsdh_set_irq(int flag); + #ifdef DHD_DEBUG /* Device console log buffer state */ typedef struct dhd_console { @@ -1279,7 +1281,8 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); ret = 0; } else { - DHD_INFO(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__)); + if (!bus->dhd->hang_was_sent) + DHD_ERROR(("%s: ctrl_frame_stat == TRUE\n", __FUNCTION__)); ret = -1; } } @@ -5749,7 +5752,9 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) /* Expect app to have torn down any connection before calling */ /* Stop the bus, disable F2 */ dhd_bus_stop(bus, FALSE); - +#if defined(OOB_INTR_ONLY) + bcmsdh_set_irq(FALSE); +#endif /* defined(OOB_INTR_ONLY) */ /* Clean tx/rx buffer pointers, detach from the dongle */ dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE); @@ -5785,6 +5790,7 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); if (bcmerror == BCME_OK) { #if defined(OOB_INTR_ONLY) + bcmsdh_set_irq(TRUE); dhd_enable_oob_intr(bus, TRUE); #endif /* defined(OOB_INTR_ONLY) */ bus->dhd->dongle_reset = FALSE; diff --git a/drivers/net/wireless/bcm4329/include/epivers.h b/drivers/net/wireless/bcm4329/include/epivers.h index 00e3cac14dc5d..cd66a9501cb61 100644 --- a/drivers/net/wireless/bcm4329/include/epivers.h +++ b/drivers/net/wireless/bcm4329/include/epivers.h @@ -33,16 +33,16 @@ #define EPI_RC_NUMBER 248 -#define EPI_INCREMENTAL_NUMBER 20 +#define EPI_INCREMENTAL_NUMBER 23 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 4, 218, 248, 20 +#define EPI_VERSION 4, 218, 248, 23 -#define EPI_VERSION_NUM 0x04daf814 +#define EPI_VERSION_NUM 0x04daf817 -#define EPI_VERSION_STR "4.218.248.20" -#define EPI_ROUTER_VERSION_STR "4.219.248.20" +#define EPI_VERSION_STR "4.218.248.23" +#define EPI_ROUTER_VERSION_STR "4.219.248.23" #endif diff --git a/drivers/net/wireless/bcm4329/include/wlioctl.h b/drivers/net/wireless/bcm4329/include/wlioctl.h index cd7725a70db41..078484830dc78 100644 --- a/drivers/net/wireless/bcm4329/include/wlioctl.h +++ b/drivers/net/wireless/bcm4329/include/wlioctl.h @@ -24,7 +24,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wlioctl.h,v 1.601.4.15.2.14.2.62.4.1 2010/11/17 03:09:28 Exp $ + * $Id: wlioctl.h,v 1.601.4.15.2.14.2.62.4.3 2011/02/09 23:31:02 Exp $ */ @@ -254,7 +254,6 @@ typedef struct wl_join_params { #define WLC_CNTRY_BUF_SZ 4 - typedef enum sup_auth_status { WLC_SUP_DISCONNECTED = 0, @@ -857,7 +856,7 @@ typedef struct wl_ioctl { #define PM_MAX 1 #define PM_FAST 2 -#define LISTEN_INTERVAL 20 +#define LISTEN_INTERVAL 10 #define INTERFERE_NONE 0 #define NON_WLAN 1 @@ -1309,12 +1308,16 @@ enum { #define ENABLE_BKGRD_SCAN_BIT 2 #define IMMEDIATE_SCAN_BIT 3 #define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 #define SORT_CRITERIA_MASK 0x01 #define AUTO_NET_SWITCH_MASK 0x02 #define ENABLE_BKGRD_SCAN_MASK 0x04 #define IMMEDIATE_SCAN_MASK 0x08 #define AUTO_CONNECT_MASK 0x10 +#define ENABLE_BD_SCAN_MASK 0x20 +#define ENABLE_ADAPTSCAN_MASK 0x40 #define PFN_VERSION 1 @@ -1327,6 +1330,8 @@ typedef struct wl_pfn_param { int32 lost_network_timeout; int16 flags; int16 rssi_margin; + int32 repeat_scan; + int32 max_freq_adjust; } wl_pfn_param_t; typedef struct wl_pfn { @@ -1336,14 +1341,12 @@ typedef struct wl_pfn { int32 auth; uint32 wpa_auth; int32 wsec; -#ifdef WLPFN_AUTO_CONNECT - union { - wl_wsec_key_t sec_key; - wsec_pmk_t wpa_sec_key; - } pfn_security; -#endif } wl_pfn_t; +#define PNO_SCAN_MAX_FW 508*1000 +#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 +#define PNO_SCAN_MIN_FW_SEC 10 + #define TOE_TX_CSUM_OL 0x00000001 #define TOE_RX_CSUM_OL 0x00000002 diff --git a/drivers/net/wireless/bcm4329/linux_osl.c b/drivers/net/wireless/bcm4329/linux_osl.c index 980416baf918c..c5c94457ce837 100644 --- a/drivers/net/wireless/bcm4329/linux_osl.c +++ b/drivers/net/wireless/bcm4329/linux_osl.c @@ -155,10 +155,9 @@ osl_attach(void *pdev, uint bustype, bool pkttag) gfp_t flags; flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; - osh = kmalloc(sizeof(osl_t), flags); + osh = kzalloc(sizeof(osl_t), flags); ASSERT(osh); - bzero(osh, sizeof(osl_t)); ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); @@ -247,8 +246,10 @@ void* osl_pktget(osl_t *osh, uint len) { struct sk_buff *skb; + gfp_t flags; - if ((skb = dev_alloc_skb(len))) { + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + if ((skb = __dev_alloc_skb(len, flags))) { skb_put(skb, len); skb->priority = 0; diff --git a/drivers/net/wireless/bcm4329/wl_iw.c b/drivers/net/wireless/bcm4329/wl_iw.c index 8460804c945ad..974c80c1e958b 100644 --- a/drivers/net/wireless/bcm4329/wl_iw.c +++ b/drivers/net/wireless/bcm4329/wl_iw.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.69 2010/12/21 03:00:08 Exp $ + * $Id: wl_iw.c,v 1.51.4.9.2.6.4.142.4.78 2011/02/11 21:27:52 Exp $ */ @@ -54,6 +54,7 @@ typedef const struct si_pub si_t; #define WL_INFORM(x) #define WL_WSEC(x) #define WL_SCAN(x) +#define WL_PNO(x) #define WL_TRACE_COEX(x) #include @@ -115,10 +116,6 @@ static int g_onoff = G_WLAN_SET_ON; wl_iw_extra_params_t g_wl_iw_params; static struct mutex wl_cache_lock; -#ifdef CONFIG_US_NON_DFS_CHANNELS_ONLY -static bool use_non_dfs_channels = true; -#endif - extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, char* stringBuf, uint buflen); #include @@ -161,12 +158,13 @@ extern int dhd_wait_pend8021x(struct net_device *dev); #endif static void *g_scan = NULL; -static volatile uint g_scan_specified_ssid; -static wlc_ssid_t g_specific_ssid; +static volatile uint g_scan_specified_ssid; +static wlc_ssid_t g_specific_ssid; static wlc_ssid_t g_ssid; -static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl; +bool btcoex_is_sco_active(struct net_device *dev); +static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl; #if defined(CONFIG_FIRST_SCAN) static volatile uint g_first_broadcast_scan; static volatile uint g_first_counter_scans; @@ -594,6 +592,36 @@ wl_iw_set_passive_scan( return error; } + +static int +wl_iw_set_txpower( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + int txpower = -1; + + txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1); + if ((txpower >= 0) && (txpower <= 127)) { + txpower |= WL_TXPWR_OVERRIDE; + txpower = htod32(txpower); + + error = dev_wlc_intvar_set(dev, "qtxpower", txpower); + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower)); + } else { + WL_ERROR(("%s: set tx power failed\n", __FUNCTION__)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); + } + + wrqu->data.length = p - extra + 1; + return error; +} + static int wl_iw_get_macaddr( struct net_device *dev, @@ -619,31 +647,31 @@ wl_iw_get_macaddr( return error; } -static int -wl_iw_set_country_code(struct net_device *dev, char *ccode) -{ - char country_code[WLC_CNTRY_BUF_SZ]; - int ret = -1; - - WL_TRACE(("%s\n", __FUNCTION__)); - if (!ccode) - ccode = dhd_bus_country_get(dev); - strncpy(country_code, ccode, sizeof(country_code)); - if (ccode && (country_code[0] != 0)) { -#ifdef CONFIG_US_NON_DFS_CHANNELS_ONLY - if (use_non_dfs_channels && !strncmp(country_code, "US", 2)) - strncpy(country_code, "Q2", WLC_CNTRY_BUF_SZ); - if (!use_non_dfs_channels && !strncmp(country_code, "Q2", 2)) - strncpy(country_code, "US", WLC_CNTRY_BUF_SZ); -#endif - ret = dev_wlc_ioctl(dev, WLC_SET_COUNTRY, &country_code, sizeof(country_code)); - if (ret >= 0) { - WL_TRACE(("%s: set country %s OK\n", __FUNCTION__, country_code)); - dhd_bus_country_set(dev, &country_code[0]); - } - } - return ret; -} +static int +wl_iw_set_country_code(struct net_device *dev, char *ccode) +{ + char country_code[WLC_CNTRY_BUF_SZ]; + int ret = -1; + + WL_TRACE(("%s\n", __FUNCTION__)); + if (!ccode) + ccode = dhd_bus_country_get(dev); + strncpy(country_code, ccode, sizeof(country_code)); + if (ccode && (country_code[0] != 0)) { +#ifdef CONFIG_US_NON_DFS_CHANNELS_ONLY + if (use_non_dfs_channels && !strncmp(country_code, "US", 2)) + strncpy(country_code, "Q2", WLC_CNTRY_BUF_SZ); + if (!use_non_dfs_channels && !strncmp(country_code, "Q2", 2)) + strncpy(country_code, "US", WLC_CNTRY_BUF_SZ); +#endif + ret = dev_wlc_ioctl(dev, WLC_SET_COUNTRY, &country_code, sizeof(country_code)); + if (ret >= 0) { + WL_TRACE(("%s: set country %s OK\n", __FUNCTION__, country_code)); + dhd_bus_country_set(dev, &country_code[0]); + } + } + return ret; +} static int wl_iw_set_country( @@ -666,7 +694,7 @@ wl_iw_set_country( country_code_size = strlen(extra) - country_offset; if (country_offset != 0) { - strncpy(country_code, extra + country_offset + 1, + strncpy(country_code, extra + country_offset +1, MIN(country_code_size, sizeof(country_code))); error = wl_iw_set_country_code(dev, country_code); if (error >= 0) { @@ -677,6 +705,7 @@ wl_iw_set_country( } WL_ERROR(("%s: set country %s failed code %d\n", __FUNCTION__, country_code, error)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); exit: @@ -738,26 +767,40 @@ wl_iw_set_power_mode( #endif -static bool btcoex_is_sco_active(struct net_device *dev) +bool btcoex_is_sco_active(struct net_device *dev) { int ioc_res = 0; bool res = false; - int temp = 0; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); - ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 4, &temp); + WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n", + __FUNCTION__, i, param27)); - if (ioc_res == 0) { - WL_TRACE_COEX(("%s: read btc_params[4] = %x\n", __FUNCTION__, temp)); + if (ioc_res < 0) { + WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__)); + break; + } - if ((temp > 0xea0) && (temp < 0xed8)) { - WL_TRACE_COEX(("%s: BT SCO/eSCO is ACTIVE\n", __FUNCTION__)); + if ((param27 & 0x6) == 2) { + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n", + __FUNCTION__, sco_id_cnt, i)); res = true; - } else { - WL_TRACE_COEX(("%s: BT SCO/eSCO is NOT detected\n", __FUNCTION__)); + break; } - } else { - WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__)); + + msleep(5); } + return res; } @@ -1043,21 +1086,6 @@ wl_iw_set_suspend( return ret; } -#ifdef CONFIG_US_NON_DFS_CHANNELS_ONLY -static int -wl_iw_set_dfs_channels( - struct net_device *dev, - struct iw_request_info *info, - union iwreq_data *wrqu, - char *extra -) -{ - use_non_dfs_channels = *(extra + strlen(SETDFSCHANNELS_CMD) + 1) - '0'; - use_non_dfs_channels = (use_non_dfs_channels != 0) ? false : true; - wl_iw_set_country_code(dev, NULL); - return 0; -} -#endif int wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len) @@ -1343,9 +1371,10 @@ wl_iw_set_pno_set( int nssid = 0; cmd_tlv_t *cmd_tlv_temp; char *str_ptr; - char *str_ptr_end; int tlv_size_left; int pno_time; + int pno_repeat; + int pno_freq_expo_max; #ifdef PNO_SET_DEBUG int i; @@ -1359,6 +1388,10 @@ wl_iw_set_pno_set( 'G', 'O', 'O', 'G', 'T', '1','E', + 'R', + '2', + 'M', + '2', 0x00 }; #endif @@ -1402,6 +1435,7 @@ wl_iw_set_pno_set( cmd_tlv_temp = (cmd_tlv_t *)str_ptr; memset(ssids_local, 0, sizeof(ssids_local)); + pno_repeat = pno_freq_expo_max = 0; if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && \ (cmd_tlv_temp->version == PNO_TLV_VERSION) && \ @@ -1422,9 +1456,28 @@ wl_iw_set_pno_set( goto exit_proc; } str_ptr++; - pno_time = simple_strtoul(str_ptr, &str_ptr_end, 16); - WL_ERROR((" got %d bytes left pno_time %d or %#x\n", \ - tlv_size_left, pno_time, pno_time)); + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + WL_ERROR(("%s pno repeat : corrupted field\n", \ + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", \ + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s: pno_freq_expo_max=%d\n", \ + __FUNCTION__, pno_freq_expo_max)); + } } } else { @@ -1432,7 +1485,7 @@ wl_iw_set_pno_set( goto exit_proc; } - res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time); + res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max); exit_proc: net_os_wake_unlock(dev); @@ -1719,6 +1772,79 @@ int hstr_2_buf(const char *txt, u8 *buf, int len) return 0; } +#if defined(SOFTAP) && defined(SOFTAP_TLV_CFG) + +static int wl_iw_softap_cfg_tlv( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int res = -1; + char *str_ptr; + int tlv_size_left; + + +#define SOFTAP_TLV_DEBUG 1 +#ifdef SOFTAP_TLV_DEBUG +char softap_cmd_example[] = { + + 'S', 'O', 'F', 'T', 'A', 'P', 'S', 'E', 'T', ' ', + + SOFTAP_TLV_PREFIX, SOFTAP_TLV_VERSION, + SOFTAP_TLV_SUBVERSION, SOFTAP_TLV_RESERVED, + + TLV_TYPE_SSID, 9, 'B', 'R', 'C', 'M', ',', 'G', 'O', 'O', 'G', + + TLV_TYPE_SECUR, 4, 'O', 'P', 'E', 'N', + + TLV_TYPE_KEY, 4, 0x31, 0x32, 0x33, 0x34, + + TLV_TYPE_CHANNEL, 4, 0x06, 0x00, 0x00, 0x00 +}; +#endif + + +#ifdef SOFTAP_TLV_DEBUG + { + int i; + if (!(extra = kmalloc(sizeof(softap_cmd_example) +10, GFP_KERNEL))) + return -ENOMEM; + memcpy(extra, softap_cmd_example, sizeof(softap_cmd_example)); + wrqu->data.length = sizeof(softap_cmd_example); + print_buf(extra, wrqu->data.length, 16); + for (i = 0; i < wrqu->data.length; i++) + printf("%c ", extra[i]); + printf("\n"); + } +#endif + + WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + return -1; + } + + if (wrqu->data.length < (strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t))) { + WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__, + wrqu->data.length, strlen(SOFTAP_SET_CMD) + sizeof(cmd_tlv_t))); + return -1; + } + + str_ptr = extra + strlen(SOFTAP_SET_CMD)+1; + tlv_size_left = wrqu->data.length - (strlen(SOFTAP_SET_CMD)+1); + + memset(&my_ap, 0, sizeof(my_ap)); + + return res; +} +#endif + + #ifdef SOFTAP int init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg) { @@ -3686,6 +3812,7 @@ wl_iw_handle_scanresults_ies(char **event_p, char *end, wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1); wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len); event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf); + kfree(buf); #endif break; } @@ -4784,7 +4911,7 @@ wl_iw_set_power( WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); - pm = vwrq->disabled ? PM_OFF : PM_MAX; + pm = vwrq->disabled ? PM_OFF : PM_FAST; pm = htod32(pm); if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) @@ -5727,7 +5854,7 @@ wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nss WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type)); WL_SCAN(("\n###################\n")); } -#endif +#endif if (params_size > WLC_IOCTL_MEDLEN) { WL_ERROR(("Set ISCAN for %s due to params_size=%d \n", \ @@ -5758,7 +5885,7 @@ static int iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info int nssid = 0; int nchan = 0; - WL_TRACE(("\%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", __FUNCTION__, info->cmd, info->flags, wrqu->data.pointer, wrqu->data.length)); @@ -5767,6 +5894,11 @@ static int iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info return -1; } +#ifdef PNO_SET_DEBUG + wl_iw_set_pno_set(dev, info, wrqu, extra); + return 0; +#endif + if (wrqu->data.length != 0) { char *str_ptr; @@ -6294,16 +6426,16 @@ static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap) } if (strlen(ap->country_code)) { - int error = 0; - if ((error = dev_wlc_ioctl(dev, WLC_SET_COUNTRY, - ap->country_code, sizeof(ap->country_code))) >= 0) { - WL_SOFTAP(("%s: set country %s OK\n", - __FUNCTION__, ap->country_code)); - dhd_bus_country_set(dev, &ap->country_code[0]); - } else { - WL_ERROR(("%s: ERROR:%d setting country %s\n", - __FUNCTION__, error, ap->country_code)); - } + int error = 0; + if ((error = dev_wlc_ioctl(dev, WLC_SET_COUNTRY, + ap->country_code, sizeof(ap->country_code))) >= 0) { + WL_SOFTAP(("%s: set country %s OK\n", + __FUNCTION__, ap->country_code)); + dhd_bus_country_set(dev, &ap->country_code[0]); + } else { + WL_ERROR(("%s: ERROR:%d setting country %s\n", + __FUNCTION__, error, ap->country_code)); + } } else { WL_SOFTAP(("%s: Country code is not specified," " will use Radio's default\n", @@ -7113,10 +7245,8 @@ static int wl_iw_set_priv( ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra); else if (strnicmp(extra, SETSUSPEND_CMD, strlen(SETSUSPEND_CMD)) == 0) ret = wl_iw_set_suspend(dev, info, (union iwreq_data *)dwrq, extra); -#ifdef CONFIG_US_NON_DFS_CHANNELS_ONLY - else if (strnicmp(extra, SETDFSCHANNELS_CMD, strlen(SETDFSCHANNELS_CMD)) == 0) - ret = wl_iw_set_dfs_channels(dev, info, (union iwreq_data *)dwrq, extra); -#endif + else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0) + ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra); #if defined(PNO_SUPPORT) else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0) ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra); @@ -7126,21 +7256,40 @@ static int wl_iw_set_priv( ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra); #endif #if defined(CSCAN) - else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0) + else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0) ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra); -#endif +#endif #ifdef CUSTOMER_HW2 else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra); - else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) + else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) { + WL_TRACE_COEX(("%s:got Framwrork cmd: 'BTCOEXMODE'\n", __FUNCTION__)); ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra); + } #else else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra); #endif else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0) ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, RXFILTER_START_CMD, strlen(RXFILTER_START_CMD)) == 0) + ret = net_os_set_packet_filter(dev, 1); + else if (strnicmp(extra, RXFILTER_STOP_CMD, strlen(RXFILTER_STOP_CMD)) == 0) + ret = net_os_set_packet_filter(dev, 0); + else if (strnicmp(extra, RXFILTER_ADD_CMD, strlen(RXFILTER_ADD_CMD)) == 0) { + int filter_num = *(extra + strlen(RXFILTER_ADD_CMD) + 1) - '0'; + ret = net_os_rxfilter_add_remove(dev, TRUE, filter_num); + } + else if (strnicmp(extra, RXFILTER_REMOVE_CMD, strlen(RXFILTER_REMOVE_CMD)) == 0) { + int filter_num = *(extra + strlen(RXFILTER_REMOVE_CMD) + 1) - '0'; + ret = net_os_rxfilter_add_remove(dev, FALSE, filter_num); + } #ifdef SOFTAP +#ifdef SOFTAP_TLV_CFG + else if (strnicmp(extra, SOFTAP_SET_CMD, strlen(SOFTAP_SET_CMD)) == 0) { + wl_iw_softap_cfg_tlv(dev, info, (union iwreq_data *)dwrq, extra); + } +#endif else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) { wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra); } else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) { @@ -7661,9 +7810,10 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) uint32 datalen = ntoh32(e->datalen); uint32 status = ntoh32(e->status); uint32 toto; +#if defined(ROAM_NOT_USED) static uint32 roam_no_success = 0; static bool roam_no_success_send = FALSE; - +#endif memset(&wrqu, 0, sizeof(wrqu)); memset(extra, 0, sizeof(extra)); @@ -7733,11 +7883,24 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) cmd = IWEVREGISTERED; break; case WLC_E_ROAM: + if (status != WLC_E_STATUS_SUCCESS) { + WL_ERROR(("ROAMING did not succeeded, keep status Quo\n")); + goto wl_iw_event_end; + } + + memcpy(wrqu.addr.sa_data, &e->addr.octet, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + cmd = SIOCGIWAP; + if (status == WLC_E_STATUS_SUCCESS) { - memcpy(wrqu.addr.sa_data, &e->addr.octet, ETHER_ADDR_LEN); - wrqu.addr.sa_family = ARPHRD_ETHER; - cmd = SIOCGIWAP; + WL_ASSOC(("%s: WLC_E_ROAM: success\n", __FUNCTION__)); +#if defined(ROAM_NOT_USED) + roam_no_success_send = FALSE; + roam_no_success = 0; +#endif + goto wl_iw_event_end; } +#if defined(ROAM_NOT_USED) else if (status == WLC_E_STATUS_NO_NETWORKS) { roam_no_success++; if ((roam_no_success == 5) && (roam_no_success_send == FALSE)) { @@ -7752,6 +7915,7 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) goto wl_iw_event_end; } } +#endif break; case WLC_E_DEAUTH_IND: case WLC_E_DISASSOC_IND: @@ -7807,8 +7971,10 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) wl_iw_send_priv_event(priv_dev, "AP_UP"); } else { WL_TRACE(("STA_LINK_UP\n")); +#if defined(ROAM_NOT_USED) roam_no_success_send = FALSE; roam_no_success = 0; +#endif } #endif WL_TRACE(("Link UP\n")); @@ -7936,7 +8102,6 @@ wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) #endif #if WIRELESS_EXT > 14 - memset(extra, 0, sizeof(extra)); if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { cmd = IWEVCUSTOM; diff --git a/drivers/net/wireless/bcm4329/wl_iw.h b/drivers/net/wireless/bcm4329/wl_iw.h index 928291fe589a5..86613e4c84433 100644 --- a/drivers/net/wireless/bcm4329/wl_iw.h +++ b/drivers/net/wireless/bcm4329/wl_iw.h @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wl_iw.h,v 1.5.34.1.6.36.4.15 2010/11/17 03:13:51 Exp $ + * $Id: wl_iw.h,v 1.5.34.1.6.36.4.18 2011/02/10 19:33:12 Exp $ */ @@ -52,7 +52,11 @@ #define PNOSETUP_SET_CMD "PNOSETUP " #define PNOENABLE_SET_CMD "PNOFORCE" #define PNODEBUG_SET_CMD "PNODEBUG" -#define SETDFSCHANNELS_CMD "SETDFSCHANNELS" +#define TXPOWER_SET_CMD "TXPOWER" +#define RXFILTER_START_CMD "RXFILTER-START" +#define RXFILTER_STOP_CMD "RXFILTER-STOP" +#define RXFILTER_ADD_CMD "RXFILTER-ADD" +#define RXFILTER_REMOVE_CMD "RXFILTER-REMOVE" #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] #define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" @@ -133,13 +137,13 @@ typedef struct wl_iw_ss_cache { } wl_iw_ss_cache_t; typedef struct wl_iw_ss_cache_ctrl { - wl_iw_ss_cache_t *m_cache_head; - int m_link_down; - int m_timer_expired; - char m_active_bssid[ETHER_ADDR_LEN]; - uint m_prev_scan_mode; - uint m_cons_br_scan_cnt; - struct timer_list *m_timer; + wl_iw_ss_cache_t *m_cache_head; + int m_link_down; + int m_timer_expired; + char m_active_bssid[ETHER_ADDR_LEN]; + uint m_prev_scan_mode; + uint m_cons_br_scan_cnt; + struct timer_list *m_timer; } wl_iw_ss_cache_ctrl_t; typedef enum broadcast_first_scan { @@ -165,7 +169,7 @@ struct ap_profile { }; -#define MACLIST_MODE_DISABLED 0 +#define MACLIST_MODE_DISABLED 0 #define MACLIST_MODE_DENY 1 #define MACLIST_MODE_ALLOW 2 struct mflist { @@ -198,7 +202,6 @@ extern int net_os_wake_lock_timeout_enable(struct net_device *dev); extern int net_os_set_suspend_disable(struct net_device *dev, int val); extern int net_os_set_suspend(struct net_device *dev, int val); extern int net_os_set_dtim_skip(struct net_device *dev, int val); -extern int net_os_set_packet_filter(struct net_device *dev, int val); extern void dhd_bus_country_set(struct net_device *dev, char *country_code); extern char *dhd_bus_country_get(struct net_device *dev); extern int dhd_get_dtim_skip(dhd_pub_t *dhd); @@ -221,11 +224,12 @@ extern int dhd_get_dtim_skip(dhd_pub_t *dhd); extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); extern int dhd_pno_clean(dhd_pub_t *dhd); -extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr); +extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, \ + ushort scan_fr, int pno_repeat, int pno_freq_expo_max); extern int dhd_pno_get_status(dhd_pub_t *dhd); extern int dhd_dev_pno_reset(struct net_device *dev); extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, \ - int nssid, ushort scan_fr); + int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max); extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled); extern int dhd_dev_get_pno_status(struct net_device *dev); @@ -235,8 +239,9 @@ extern int dhd_dev_get_pno_status(struct net_device *dev); #define PNO_TLV_RESERVED '0' #define PNO_TLV_TYPE_SSID_IE 'S' #define PNO_TLV_TYPE_TIME 'T' -#define PNO_EVENT_UP "PNO_EVENT" -#define PNO_SCAN_MAX_FW 508 +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' +#define PNO_EVENT_UP "PNO_EVENT" typedef struct cmd_tlv { char prefix; @@ -245,6 +250,19 @@ typedef struct cmd_tlv { char reserved; } cmd_tlv_t; +#ifdef SOFTAP_TLV_CFG +#define SOFTAP_SET_CMD "SOFTAPSET " +#define SOFTAP_TLV_PREFIX 'A' +#define SOFTAP_TLV_VERSION '1' +#define SOFTAP_TLV_SUBVERSION '0' +#define SOFTAP_TLV_RESERVED '0' + +#define TLV_TYPE_SSID 'S' +#define TLV_TYPE_SECUR 'E' +#define TLV_TYPE_KEY 'K' +#define TLV_TYPE_CHANNEL 'C' +#endif + #if defined(CSCAN) typedef struct cscan_tlv { @@ -279,6 +297,6 @@ extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num); -#endif +#endif -#endif +#endif diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig new file mode 100644 index 0000000000000..8b6b92f624374 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Kconfig @@ -0,0 +1,54 @@ +config BCMDHD + tristate "Broadcom 4329/30 wireless cards support" + depends on MMC + ---help--- + This module adds support for wireless adapters based on + Broadcom 4329/30 chipset. + + This driver uses the kernel's wireless extensions subsystem. + + If you choose to build a module, it'll be called dhd. Say M if + unsure. + +config BCMDHD_FW_PATH + depends on BCMDHD + string "Firmware path" + default "/system/etc/firmware/fw_bcmdhd.bin" + ---help--- + Path to the firmware file. + +config BCMDHD_NVRAM_PATH + depends on BCMDHD + string "NVRAM path" + default "/system/etc/wifi/bcmdhd.cal" + ---help--- + Path to the calibration file. + +config BCMDHD_WEXT + bool "Enable WEXT support" + depends on BCMDHD && CFG80211 = n + select WIRELESS_EXT + select WEXT_PRIV + help + Enables WEXT support + +config DHD_USE_STATIC_BUF + bool "Enable memory preallocation" + depends on BCMDHD + default n + ---help--- + Use memory preallocated in platform + +config DHD_USE_SCHED_SCAN + bool "Use CFG80211 sched scan" + depends on BCMDHD && CFG80211 + default n + ---help--- + Use CFG80211 sched scan + +config DHD_ENABLE_P2P + bool "Enable Wifi Direct" + depends on BCMDHD && CFG80211 + default n + ---help--- + Use Enable Wifi Direct diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile new file mode 100644 index 0000000000000..eda803e3ca798 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Makefile @@ -0,0 +1,38 @@ +# bcmdhd +DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ + -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DWLBTAMP -DBCMFILEIMAGE \ + -DDHDTHREAD -DDHD_GPL -DDHD_SCHED -DDHD_DEBUG -DSDTEST -DBDC -DTOE \ + -DDHD_BCMEVENTS -DSHOW_EVENTS -DDONGLEOVERLAYS -DBCMDBG \ + -DCUSTOMER_HW2 -DCUSTOM_OOB_GPIO_NUM=2 -DOOB_INTR_ONLY -DHW_OOB \ + -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS -DWLP2P \ + -DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \ + -DKEEP_ALIVE -DCSCAN -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \ + -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \ + -DSET_RANDOM_MAC_SOFTAP -DWL_CFG80211_STA_EVENT \ + -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include + +DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \ + dhd_linux_sched.o bcmwifi.o dhd_sdio.o bcmevent.o dhd_bta.o hndpmu.o \ + bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o linux_osl.o \ + bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o dhd_cfg80211.o + +obj-$(CONFIG_BCMDHD) += bcmdhd.o +bcmdhd-objs += $(DHDOFILES) +ifneq ($(CONFIG_WIRELESS_EXT),) +bcmdhd-objs += wl_iw.o +DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT +endif +ifneq ($(CONFIG_CFG80211),) +bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o +DHDCFLAGS += -DWL_CFG80211 +endif +ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),) +DHDCFLAGS += -DWL_SCHED_SCAN +endif +ifneq ($(CONFIG_DHD_ENABLE_P2P),) +DHDCFLAGS += -DWL_ENABLE_P2P_IF +endif +EXTRA_CFLAGS = $(DHDCFLAGS) +ifeq ($(CONFIG_BCMDHD),m) +EXTRA_LDFLAGS += --strip-debug +endif diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c new file mode 100644 index 0000000000000..5ca0993c93331 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/aiutils.c @@ -0,0 +1,675 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: aiutils.c,v 1.26.2.1 2010-03-09 18:41:21 $ + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + + + + + +static uint32 +get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + + while (TRUE) { + ent = R_REG(si_osh(sih), *eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + nom++; + } + + SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); + if (inv + nom) { + SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + } + return ent; +} + +static uint32 +get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, + uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +static void +ai_hwfixup(si_info_t *sii) +{ +} + + +void +ai_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, *eromptr, *eromlim; + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = regs; + break; + + case SPI_BUS: + case SDIO_BUS: + eromptr = (uint32 *)(uintptr)erombase; + break; + + case PCMCIA_BUS: + default: + SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", + regs, erombase, eromptr, eromlim)); + while (eromptr < eromlim) { + uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint32 *base; + uint i, j, idx; + bool br; + + br = FALSE; + + + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); + ai_hwfixup(sii); + return; + } + base = eromptr - 1; + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, base, nmw, nsw, nmp, nsp)); + + if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) + continue; + if ((nmw + nsw == 0)) { + + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + sii->oob_router = addrl; + } + } + continue; + } + + idx = sii->numcores; + + sii->cia[idx] = cia; + sii->cib[idx] = cib; + sii->coreid[idx] = cid; + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + SI_VMSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else + if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("First Slave ASD for core 0x%04x malformed " + "(0x%08x)\n", cid, asd)); + goto error; + } + } + sii->coresba[idx] = addrl; + sii->coresba_size[idx] = sizel; + + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { + sii->coresba2[idx] = addrl; + sii->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + } while (asd != 0); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) + sii->wrapba[idx] = addrl; + } + + + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + if ((nmw == 0) && (i == 0)) + sii->wrapba[idx] = addrl; + } + + + if (br) + continue; + + + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END")); + +error: + sii->numcores = 0; + return; +} + + +void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + uint32 addr = sii->coresba[coreidx]; + uint32 wrap = sii->wrapba[coreidx]; + void *regs; + + if (coreidx >= sii->numcores) + return (NULL); + + + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + + if (!sii->regs[coreidx]) { + sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->regs[coreidx])); + } + sii->curmap = regs = sii->regs[coreidx]; + if (!sii->wrappers[coreidx]) { + sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->wrappers[coreidx])); + } + sii->curwrap = sii->wrappers[coreidx]; + break; + + + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + sii->curwrap = (void *)((uintptr)wrap); + break; + + case PCMCIA_BUS: + default: + ASSERT(0); + regs = NULL; + break; + } + + sii->curmap = regs; + sii->curidx = coreidx; + + return regs; +} + + +int +ai_numaddrspaces(si_t *sih) +{ + return 2; +} + + +uint32 +ai_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + uint cidx; + + sii = SI_INFO(sih); + cidx = sii->curidx; + + if (asidx == 0) + return sii->coresba[cidx]; + else if (asidx == 1) + return sii->coresba2[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + + +uint32 +ai_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + uint cidx; + + sii = SI_INFO(sih); + cidx = sii->curidx; + + if (asidx == 0) + return sii->coresba_size[cidx]; + else if (asidx == 1) + return sii->coresba2_size[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +uint +ai_flag(si_t *sih) +{ + si_info_t *sii; + aidmp_t *ai; + + sii = SI_INFO(sih); + ai = sii->curwrap; + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +void +ai_setint(si_t *sih, int siflag) +{ +} + +uint +ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + uint32 *map = (uint32 *) sii->curwrap; + + if (mask || val) { + uint32 w = R_REG(sii->osh, map+(offset/4)); + w &= ~mask; + w |= val; + W_REG(sii->osh, map+(offset/4), val); + } + + return (R_REG(sii->osh, map+(offset/4))); +} + +uint +ai_corevendor(si_t *sih) +{ + si_info_t *sii; + uint32 cia; + + sii = SI_INFO(sih); + cia = sii->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +ai_corerev(si_t *sih) +{ + si_info_t *sii; + uint32 cib; + + sii = SI_INFO(sih); + cib = sii->cib[sii->curidx]; + return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +bool +ai_iscoreup(si_t *sih) +{ + si_info_t *sii; + aidmp_t *ai; + + sii = SI_INFO(sih); + ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + + +uint +ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii; + + sii = SI_INFO(sih); + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + + fast = TRUE; + + if (!sii->regs[coreidx]) { + sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(sii->regs[coreidx])); + } + r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + + + if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + + origidx = si_coreidx(&sii->pub); + + + r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + + w = R_REG(sii->osh, r); + + if (!fast) { + + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +void +ai_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + aidmp_t *ai; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) + return; + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + OSL_DELAY(10); + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(1); +} + + +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + aidmp_t *ai; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + + ai_core_disable(sih, (bits | resetbits)); + + + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + W_REG(sii->osh, &ai->resetctrl, 0); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); + OSL_DELAY(1); +} + + +void +ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +ai_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + aidmp_t *ai; + uint32 w; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c new file mode 100644 index 0000000000000..6a25d9a5a57f2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmevent.c @@ -0,0 +1,127 @@ +/* + * bcmevent read-only data shared by kernel or app layers + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmevent.c,v 1.8.2.7 2011-02-01 06:23:39 $ + */ + +#include +#include +#include +#include +#include + +#if WLC_E_LAST != 87 +#error "You need to add an entry to bcmevent_names[] for the new event" +#endif + +const bcmevent_name_t bcmevent_names[] = { + { WLC_E_SET_SSID, "SET_SSID" }, + { WLC_E_JOIN, "JOIN" }, + { WLC_E_START, "START" }, + { WLC_E_AUTH, "AUTH" }, + { WLC_E_AUTH_IND, "AUTH_IND" }, + { WLC_E_DEAUTH, "DEAUTH" }, + { WLC_E_DEAUTH_IND, "DEAUTH_IND" }, + { WLC_E_ASSOC, "ASSOC" }, + { WLC_E_ASSOC_IND, "ASSOC_IND" }, + { WLC_E_REASSOC, "REASSOC" }, + { WLC_E_REASSOC_IND, "REASSOC_IND" }, + { WLC_E_DISASSOC, "DISASSOC" }, + { WLC_E_DISASSOC_IND, "DISASSOC_IND" }, + { WLC_E_QUIET_START, "START_QUIET" }, + { WLC_E_QUIET_END, "END_QUIET" }, + { WLC_E_BEACON_RX, "BEACON_RX" }, + { WLC_E_LINK, "LINK" }, + { WLC_E_MIC_ERROR, "MIC_ERROR" }, + { WLC_E_NDIS_LINK, "NDIS_LINK" }, + { WLC_E_ROAM, "ROAM" }, + { WLC_E_TXFAIL, "TXFAIL" }, + { WLC_E_PMKID_CACHE, "PMKID_CACHE" }, + { WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" }, + { WLC_E_PRUNE, "PRUNE" }, + { WLC_E_AUTOAUTH, "AUTOAUTH" }, + { WLC_E_EAPOL_MSG, "EAPOL_MSG" }, + { WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" }, + { WLC_E_ADDTS_IND, "ADDTS_IND" }, + { WLC_E_DELTS_IND, "DELTS_IND" }, + { WLC_E_BCNSENT_IND, "BCNSENT_IND" }, + { WLC_E_BCNRX_MSG, "BCNRX_MSG" }, + { WLC_E_BCNLOST_MSG, "BCNLOST_IND" }, + { WLC_E_ROAM_PREP, "ROAM_PREP" }, + { WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" }, + { WLC_E_PFN_NET_LOST, "PFNLOST_IND" }, +#if defined(IBSS_PEER_DISCOVERY_EVENT) + { WLC_E_IBSS_ASSOC, "IBSS_ASSOC" }, +#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ + { WLC_E_RADIO, "RADIO" }, + { WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" }, + { WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" }, + { WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" }, + { WLC_E_PSK_SUP, "PSK_SUP" }, + { WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" }, + { WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" }, + { WLC_E_ICV_ERROR, "ICV_ERROR" }, + { WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" }, + { WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" }, + { WLC_E_TRACE, "TRACE" }, + { WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" }, + { WLC_E_IF, "IF" }, +#ifdef WLP2P + { WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" }, +#endif + { WLC_E_RSSI, "RSSI" }, + { WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" }, + { WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" }, +#ifdef WIFI_ACT_FRAME + { WLC_E_ACTION_FRAME, "ACTION_FRAME" }, + { WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" }, + { WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" }, +#endif + { WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" }, + { WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" }, +#ifdef WLP2P + { WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" }, + { WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" }, +#endif +#ifdef PROP_TXSTATUS + { WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" }, +#endif + { WLC_E_WAKE_EVENT, "WAKE_EVENT" }, + { WLC_E_DCS_REQUEST, "DCS_REQUEST" }, + { WLC_E_RM_COMPLETE, "RM_COMPLETE" }, +#ifdef WLMEDIA_HTSF + { WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" }, +#endif + { WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" }, + { WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND" }, + { WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" }, + { WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" }, + { WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" }, +#ifdef SOFTAP + { WLC_E_GTK_PLUMBED, "GTK_PLUMBED" }, +#endif + { WLC_E_ASSOC_REQ_IE, "ASSOC_REQ_IE" }, + { WLC_E_ASSOC_RESP_IE, "ASSOC_RESP_IE" } +}; + + +const int bcmevent_names_size = ARRAYSIZE(bcmevent_names); diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c new file mode 100644 index 0000000000000..89320b6f53d45 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh.c @@ -0,0 +1,691 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh.c 344235 2012-07-11 23:47:18Z $ + */ + +/** + * @file bcmsdh.c + */ + +/* ****************** BCMSDH Interface Functions *************************** */ + +#include +#include +#include +#include +#include +#include +#include + +#include /* BRCM API for SDIO clients (such as wl, dhd) */ +#include /* common SDIO/controller interface */ +#include /* SDIO device core hardware definitions. */ + +#include /* SDIO Device and Protocol Specs */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + +/** + * BCMSDH API context + */ +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ +}; +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif + +/* Attach BCMSDH layer to SDIO Host Controller Driver + * + * @param osh OSL Handle. + * @param cfghdl Configuration Handle. + * @param regsva Virtual address of controller registers. + * @param irq Interrupt number of SDIO controller. + * + * @return bcmsdh_info_t Handle to BCMSDH context. + */ +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) { + bcmsdh_detach(osh, bcmsdh); + return NULL; + } + + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + + *regsva = (uint32 *)SI_ENUM_BASE; + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = SI_ENUM_BASE; + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { + if (bcmsdh->sdioh) { + sdioh_detach(osh, bcmsdh->sdioh); + bcmsdh->sdioh = NULL; + } + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif + + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +/** + * Read from SDIO Configuration Space + * @param sdh SDIO Host context. + * @param func_num Function number to read from. + * @param addr Address to read from. + * @param err Error return. + * @return value read from SDIO configuration space. + */ +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) +{ + int err = 0; + uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bar0 != bcmsdh->sbwad || force_set) { + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + if (!err) + bcmsdh->sbwad = bar0; + else + /* invalidate cached window var */ + bcmsdh->sbwad = 0; + + } + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uint32 addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) + return 0xFFFFFFFF; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + +int +bcmsdh_waitlockfree(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return sdioh_waitlockfree(bcmsdh->sdioh); +} + + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ + return 0; +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ + return; +} + + +int +bcmsdh_sleep(void *sdh, bool enab) +{ +#ifdef SDIOH_SLEEP_ENABLED + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_sleep(sd, enab); +#else + return BCME_UNSUPPORTED; +#endif +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c new file mode 100644 index 0000000000000..edecb5f2f4c8c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c @@ -0,0 +1,732 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_linux.c 343302 2012-07-06 13:07:38Z $ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#if defined(OOB_INTR_ONLY) +#include +extern void dhdsdio_isr(void * args); +#include +#include +#include +#endif /* defined(OOB_INTR_ONLY) */ + +/** + * SDIO Host Controller info + */ +typedef struct bcmsdh_hc bcmsdh_hc_t; + +struct bcmsdh_hc { + bcmsdh_hc_t *next; +#ifdef BCMPLATFORM_BUS + struct device *dev; /* platform device handle */ +#else + struct pci_dev *dev; /* pci device handle */ +#endif /* BCMPLATFORM_BUS */ + osl_t *osh; + void *regs; /* SDIO Host Controller address */ + bcmsdh_info_t *sdh; /* SDIO Host Controller handle */ + void *ch; + unsigned int oob_irq; + unsigned long oob_flags; /* OOB Host specifiction as edge and etc */ + bool oob_irq_registered; + bool oob_irq_enable_flag; +#if defined(OOB_INTR_ONLY) + spinlock_t irq_lock; +#endif +}; +static bcmsdh_hc_t *sdhcinfo = NULL; + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL}; + +/* debugging macros */ +#define SDLX_MSG(x) + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#endif /* BCMSDIOH_SPI */ + + return (FALSE); +} + +#if defined(BCMPLATFORM_BUS) +#if defined(BCMLXSDMMC) +/* forward declarations */ +int bcmsdh_probe(struct device *dev); +int bcmsdh_remove(struct device *dev); + +EXPORT_SYMBOL(bcmsdh_probe); +EXPORT_SYMBOL(bcmsdh_remove); + +#else +/* forward declarations */ +static int __devinit bcmsdh_probe(struct device *dev); +static int __devexit bcmsdh_remove(struct device *dev); +#endif /* BCMLXSDMMC */ + +#ifndef BCMLXSDMMC +static +#endif /* BCMLXSDMMC */ +int bcmsdh_probe(struct device *dev) +{ + osl_t *osh = NULL; + bcmsdh_hc_t *sdhc = NULL; + ulong regs = 0; + bcmsdh_info_t *sdh = NULL; +#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) + struct platform_device *pdev; + struct resource *r; +#endif /* BCMLXSDMMC */ + int irq = 0; + uint32 vendevid; + unsigned long irq_flags = 0; + +#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS) + pdev = to_platform_device(dev); + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!r || irq == NO_IRQ) + return -ENXIO; +#endif /* BCMLXSDMMC */ + +#if defined(OOB_INTR_ONLY) +#ifdef HW_OOB + irq_flags = + IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; +#else + irq_flags = IRQF_TRIGGER_FALLING; +#endif /* HW_OOB */ + + /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */ + irq = dhd_customer_oob_irq_map(&irq_flags); + if (irq < 0) { + SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__)); + return 1; + } +#endif /* defined(OOB_INTR_ONLY) */ + /* allocate SDIO Host Controller state info */ + if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) { + SDLX_MSG(("%s: out of memory, allocated %d bytes\n", + __FUNCTION__, + MALLOCED(osh))); + goto err; + } + bzero(sdhc, sizeof(bcmsdh_hc_t)); + sdhc->osh = osh; + + sdhc->dev = (void *)dev; + +#ifdef BCMLXSDMMC + if (!(sdh = bcmsdh_attach(osh, (void *)0, + (void **)®s, irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } +#else + if (!(sdh = bcmsdh_attach(osh, (void *)r->start, + (void **)®s, irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } +#endif /* BCMLXSDMMC */ + sdhc->sdh = sdh; + sdhc->oob_irq = irq; + sdhc->oob_flags = irq_flags; + sdhc->oob_irq_registered = FALSE; /* to make sure.. */ + sdhc->oob_irq_enable_flag = FALSE; +#if defined(OOB_INTR_ONLY) + spin_lock_init(&sdhc->irq_lock); +#endif + + /* chain SDIO Host Controller info together */ + sdhc->next = sdhcinfo; + sdhcinfo = sdhc; + + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(sdh); + /* try to attach to the target device */ + if (!(sdhc->ch = drvinfo.attach((vendevid >> 16), + (vendevid & 0xFFFF), 0, 0, 0, 0, + (void *)regs, NULL, sdh))) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return 0; + + /* error handling */ +err: + if (sdhc) { + if (sdhc->sdh) + bcmsdh_detach(sdhc->osh, sdhc->sdh); + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + } + if (osh) + osl_detach(osh); + return -ENODEV; +} + +#ifndef BCMLXSDMMC +static +#endif /* BCMLXSDMMC */ +int bcmsdh_remove(struct device *dev) +{ + bcmsdh_hc_t *sdhc, *prev; + osl_t *osh; + + sdhc = sdhcinfo; + drvinfo.detach(sdhc->ch); + bcmsdh_detach(sdhc->osh, sdhc->sdh); + + /* find the SDIO Host Controller state for this pdev and take it out from the list */ + for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) { + if (sdhc->dev == (void *)dev) { + if (prev) + prev->next = sdhc->next; + else + sdhcinfo = NULL; + break; + } + prev = sdhc; + } + if (!sdhc) { + SDLX_MSG(("%s: failed\n", __FUNCTION__)); + return 0; + } + + /* release SDIO Host Controller info */ + osh = sdhc->osh; + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + osl_detach(osh); + +#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) + dev_set_drvdata(dev, NULL); +#endif /* !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY) */ + + return 0; +} + +#else /* BCMPLATFORM_BUS */ + +#if !defined(BCMLXSDMMC) +/* forward declarations for PCI probe and remove functions. */ +static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev); + +/** + * pci id table + */ +static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = { + { vendor: PCI_ANY_ID, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: 0, + class_mask: 0, + driver_data: 0, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid); + +/** + * SDIO Host Controller pci driver info + */ +static struct pci_driver bcmsdh_pci_driver = { + node: {}, + name: "bcmsdh", + id_table: bcmsdh_pci_devid, + probe: bcmsdh_pci_probe, + remove: bcmsdh_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif + suspend: NULL, + resume: NULL, + }; + + +extern uint sd_pci_slot; /* Force detection to a particular PCI */ + /* slot only . Allows for having multiple */ + /* WL devices at once in a PC */ + /* Only one instance of dhd will be */ + /* usable at a time */ + /* Upper word is bus number, */ + /* lower word is slot number */ + /* Default value of 0xffffffff turns this */ + /* off */ +module_param(sd_pci_slot, uint, 0); + + +/** + * Detect supported SDIO Host Controller and attach if found. + * + * Determine if the device described by pdev is a supported SDIO Host + * Controller. If so, attach to it and attach to the target device. + */ +static int __devinit +bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + osl_t *osh = NULL; + bcmsdh_hc_t *sdhc = NULL; + ulong regs; + bcmsdh_info_t *sdh = NULL; + int rc; + + if (sd_pci_slot != 0xFFFFffff) { + if (pdev->bus->number != (sd_pci_slot>>16) || + PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) { + SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) + ?"Found compatible SDIOHC" + :"Probing unknown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, + pdev->device)); + return -ENODEV; + } + SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n", + __FUNCTION__, + bcmsdh_chipmatch(pdev->vendor, pdev->device) + ?"Using compatible SDIOHC" + :"WARNING, forced use of unkown device", + pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device)); + } + + if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) || + (pdev->device == PCIXX21_FLASHMEDIA0_ID))) { + uint32 config_reg; + + SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__)); + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + + config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4); + + /* + * Set MMC_SD_DIS bit in FlashMedia Controller. + * Disbling the SD/MMC Controller in the FlashMedia Controller + * allows the Standard SD Host Controller to take over control + * of the SD Slot. + */ + config_reg |= 0x02; + OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg); + osl_detach(osh); + } + /* match this pci device with what we support */ + /* we can't solely rely on this to believe it is our SDIO Host Controller! */ + if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) { + return -ENODEV; + } + + /* this is a pci device we might support */ + SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n", + __FUNCTION__, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn), pdev->irq)); + + /* use bcmsdh_query_device() to get the vendor ID of the target device so + * it will eventually appear in the Broadcom string on the console + */ + + /* allocate SDIO Host Controller state info */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__)); + goto err; + } + if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) { + SDLX_MSG(("%s: out of memory, allocated %d bytes\n", + __FUNCTION__, + MALLOCED(osh))); + goto err; + } + bzero(sdhc, sizeof(bcmsdh_hc_t)); + sdhc->osh = osh; + + sdhc->dev = pdev; + + /* map to address where host can access */ + pci_set_master(pdev); + rc = pci_enable_device(pdev); + if (rc) { + SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__)); + goto err; + } + if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0), + (void **)®s, pdev->irq))) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + + sdhc->sdh = sdh; + + /* try to attach to the target device */ + if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */ + bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0, + (void *)regs, NULL, sdh))) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + /* chain SDIO Host Controller info together */ + sdhc->next = sdhcinfo; + sdhcinfo = sdhc; + + return 0; + + /* error handling */ +err: + if (sdhc) { + if (sdhc->sdh) + bcmsdh_detach(sdhc->osh, sdhc->sdh); + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + } + if (osh) + osl_detach(osh); + return -ENODEV; +} + + +/** + * Detach from target devices and SDIO Host Controller + */ +static void __devexit +bcmsdh_pci_remove(struct pci_dev *pdev) +{ + bcmsdh_hc_t *sdhc, *prev; + osl_t *osh; + + /* find the SDIO Host Controller state for this pdev and take it out from the list */ + for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) { + if (sdhc->dev == pdev) { + if (prev) + prev->next = sdhc->next; + else + sdhcinfo = NULL; + break; + } + prev = sdhc; + } + if (!sdhc) + return; + + drvinfo.detach(sdhc->ch); + + bcmsdh_detach(sdhc->osh, sdhc->sdh); + + /* release SDIO Host Controller info */ + osh = sdhc->osh; + MFREE(osh, sdhc, sizeof(bcmsdh_hc_t)); + osl_detach(osh); +} +#endif /* BCMLXSDMMC */ +#endif /* BCMPLATFORM_BUS */ + +extern int sdio_function_init(void); + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + +#if defined(BCMPLATFORM_BUS) + SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n")); + error = sdio_function_init(); + return error; +#endif /* defined(BCMPLATFORM_BUS) */ + +#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (!(error = pci_module_init(&bcmsdh_pci_driver))) + return 0; +#else + if (!(error = pci_register_driver(&bcmsdh_pci_driver))) + return 0; +#endif + + SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); +#endif /* BCMPLATFORM_BUS */ + + return error; +} + +extern void sdio_function_cleanup(void); + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next) +#endif + +#if defined(BCMLXSDMMC) + sdio_function_cleanup(); +#endif /* BCMLXSDMMC */ + +#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC) + pci_unregister_driver(&bcmsdh_pci_driver); +#endif /* BCMPLATFORM_BUS */ +} + +#if defined(OOB_INTR_ONLY) +void bcmsdh_oob_intr_set(bool enable) +{ + static bool curstate = 1; + unsigned long flags; + + spin_lock_irqsave(&sdhcinfo->irq_lock, flags); + if (curstate != enable) { + if (enable) + enable_irq(sdhcinfo->oob_irq); + else + disable_irq_nosync(sdhcinfo->oob_irq); + curstate = enable; + } + spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + dhd_pub_t *dhdp; + + dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev); + + bcmsdh_oob_intr_set(0); + + if (dhdp == NULL) { + SDLX_MSG(("Out of band GPIO interrupt fired way too early\n")); + return IRQ_HANDLED; + } + + dhdsdio_isr((void *)dhdp->bus); + + return IRQ_HANDLED; +} + +int bcmsdh_register_oob_intr(void * dhdp) +{ + int error = 0; + + SDLX_MSG(("%s Enter \n", __FUNCTION__)); + + /* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */ + + dev_set_drvdata(sdhcinfo->dev, dhdp); + + if (!sdhcinfo->oob_irq_registered) { + SDLX_MSG(("%s IRQ=%d Type=%X \n", __FUNCTION__, + (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags)); + /* Refer to customer Host IRQ docs about proper irqflags definition */ + error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags, + "bcmsdh_sdmmc", NULL); + if (error) + return -ENODEV; + + enable_irq_wake(sdhcinfo->oob_irq); + sdhcinfo->oob_irq_registered = TRUE; + sdhcinfo->oob_irq_enable_flag = TRUE; + } + + return 0; +} + +void bcmsdh_set_irq(int flag) +{ + if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) { + SDLX_MSG(("%s Flag = %d", __FUNCTION__, flag)); + sdhcinfo->oob_irq_enable_flag = flag; + if (flag) { + enable_irq(sdhcinfo->oob_irq); + enable_irq_wake(sdhcinfo->oob_irq); + } else { + disable_irq_wake(sdhcinfo->oob_irq); + disable_irq(sdhcinfo->oob_irq); + } + } +} + +void bcmsdh_unregister_oob_intr(void) +{ + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + + if (sdhcinfo->oob_irq_registered == TRUE) { + bcmsdh_set_irq(FALSE); + free_irq(sdhcinfo->oob_irq, NULL); + sdhcinfo->oob_irq_registered = FALSE; + } +} +#endif /* defined(OOB_INTR_ONLY) */ + +#if defined(BCMLXSDMMC) +void *bcmsdh_get_drvdata(void) +{ + if (!sdhcinfo) + return NULL; + return dev_get_drvdata(sdhcinfo->dev); +} +#endif + +/* Module parameters specific to each host-controller driver */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + +#ifdef BCMSDIOH_STD +extern int sd_uhsimode; +module_param(sd_uhsimode, int, 0); +#endif + +#ifdef BCMSDIOH_TXGLOM +extern uint sd_txglom; +module_param(sd_txglom, uint, 0); +#endif + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); +EXPORT_SYMBOL(bcmsdh_waitlockfree); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c new file mode 100644 index 0000000000000..67c2fbf5ea18c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c @@ -0,0 +1,1415 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc.c 344243 2012-07-12 00:06:23Z $ + */ +#include + +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ + +#include +#include +#include +#include + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +extern volatile bool dhd_mmc_suspend; +#endif +#include "bcmsdh_sdmmc.h" + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +extern int sdio_reset_comm(struct mmc_card *card); + +extern PBCMSDH_SDMMC_INSTANCE gInstance; + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 512; /* Default blocksize */ + +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = 0x01; +uint sd_use_dma = TRUE; +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(gInstance->func[1]); + err_ret = sdio_enable_func(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (gInstance == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (sdioh_sdmmc_osinit(sd) != 0) { + sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + sd->use_rxchain = FALSE; + + gInstance->sd = sd; + + /* Claim host controller */ + sdio_claim_host(gInstance->func[1]); + + sd->client_block_size[1] = 64; + err_ret = sdio_set_block_size(gInstance->func[1], 64); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); + } + + /* Release host controller F1 */ + sdio_release_host(gInstance->func[1]); + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n", + sd_f2_blocksize)); + } + + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; +} + + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + sdio_claim_host(gInstance->func[2]); + sdio_disable_func(gInstance->func[2]); + sdio_release_host(gInstance->func[2]); + + /* Disable Function 1 */ + if (gInstance->func[1]) { + sdio_claim_host(gInstance->func[1]); + sdio_disable_func(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + } + + gInstance->func[1] = NULL; + gInstance->func[2] = NULL; + + /* deregister irq */ + sdioh_sdmmc_osfree(sd); + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(void) +{ + uint8 reg; + int err; + + if (gInstance->func[0]) { + sdio_claim_host(gInstance->func[0]); + + reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(gInstance->func[0]); + return SDIOH_API_RC_FAIL; + } + + /* Enable F1 and F2 interrupts, set master enable */ + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN); + + sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(gInstance->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(void) +{ + uint8 reg; + int err; + + if (gInstance->func[0]) { + sdio_claim_host(gInstance->func[0]); + reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(gInstance->func[0]); + return SDIOH_API_RC_FAIL; + } + + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err); + + sdio_release_host(gInstance->func[0]); + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + } + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (gInstance->func[2]) { + sdio_claim_host(gInstance->func[2]); + sdio_claim_irq(gInstance->func[2], IRQHandlerF2); + sdio_release_host(gInstance->func[2]); + } + + if (gInstance->func[1]) { + sdio_claim_host(gInstance->func[1]); + sdio_claim_irq(gInstance->func[1], IRQHandler); + sdio_release_host(gInstance->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(); +#endif /* !defined(OOB_INTR_ONLY) */ + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (gInstance->func[1]) { + /* register and unmask irq */ + sdio_claim_host(gInstance->func[1]); + sdio_release_irq(gInstance->func[1]); + sdio_release_host(gInstance->func[1]); + } + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + sdio_release_irq(gInstance->func[2]); + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + sdioh_disable_func_intr(); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = (int32)si->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ + else if (sd_ptr->offset & 2) + int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ + else + int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = 0; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; + else + data = SDIO_SEPINT_ACT_HI; + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret; + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { + if (gInstance->func[2]) { + sdio_claim_host(gInstance->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(gInstance->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(gInstance->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d", + err_ret)); + } + } + sdio_release_host(gInstance->func[2]); + } + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(gInstance->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } + } +#endif /* MMC_SDIO_ABORT */ + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + sdio_f0_writeb(gInstance->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } + } + } else { + /* Claim host controller, perform Fn write, and release */ + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret); + } + sdio_release_host(gInstance->func[func]); + } + } + + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(gInstance->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(gInstance->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(gInstance->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(gInstance->func[func]); + + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x", + rw ? "Write" : "Read", err_ret)); + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +static SDIOH_API_RC +sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + uint32 SGCount = 0; + int err_ret = 0; + void *pnext, *pprev; + uint ttl_len, dma_len, lft_len, xfred_len, pkt_len; + uint blk_num; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + ttl_len = xfred_len = 0; + /* at least 4 bytes alignment of skb buff is guaranteed */ + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) + ttl_len += PKTLEN(sd->osh, pnext); + + if (!sd->use_rxchain || ttl_len <= sd->client_block_size[func]) { + blk_num = 0; + dma_len = 0; + } else { + blk_num = ttl_len / sd->client_block_size[func]; + dma_len = blk_num * sd->client_block_size[func]; + } + lft_len = ttl_len - dma_len; + + sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n", + __FUNCTION__, write ? "W" : "R", + ttl_len, func, addr, blk_num, lft_len)); + + if (0 != dma_len) { + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + + /* Set up DMA descriptors */ + pprev = pkt; + for (pnext = pkt; + pnext && dma_len; + pnext = PKTNEXT(sd->osh, pnext)) { + pkt_len = PKTLEN(sd->osh, pnext); + + if (dma_len > pkt_len) + dma_len -= pkt_len; + else { + pkt_len = xfred_len = dma_len; + dma_len = 0; + pkt = pnext; + } + + sg_set_buf(&sd->sg_list[SGCount++], + (uint8*)PKTDATA(sd->osh, pnext), + pkt_len); + + if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) { + sd_err(("%s: sg list entries exceed limit\n", + __FUNCTION__)); + return (SDIOH_API_RC_FAIL); + } + } + + mmc_dat.sg = sd->sg_list; + mmc_dat.sg_len = SGCount; + mmc_dat.blksz = sd->client_block_size[func]; + mmc_dat.blocks = blk_num; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + + mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ + mmc_cmd.arg = write ? 1<<31 : 0; + mmc_cmd.arg |= (func & 0x7) << 28; + mmc_cmd.arg |= 1<<27; + mmc_cmd.arg |= fifo ? 0 : 1<<26; + mmc_cmd.arg |= (addr & 0x1FFFF) << 9; + mmc_cmd.arg |= blk_num & 0x1FF; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + + sdio_claim_host(gInstance->func[func]); + mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card); + mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req); + sdio_release_host(gInstance->func[func]); + + err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; + if (0 != err_ret) { + sd_err(("%s:CMD53 %s failed with code %d\n", + __FUNCTION__, + write ? "write" : "read", + err_ret)); + sd_err(("%s:Disabling rxchain and fire it with PIO\n", + __FUNCTION__)); + sd->use_rxchain = FALSE; + pkt = pprev; + lft_len = ttl_len; + } else if (!fifo) { + addr = addr + ttl_len - lft_len - dma_len; + } + } + + /* PIO mode */ + if (0 != lft_len) { + /* Claim host controller */ + sdio_claim_host(gInstance->func[func]); + for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) { + uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) + + xfred_len; + pkt_len = PKTLEN(sd->osh, pnext); + if (0 != xfred_len) { + pkt_len -= xfred_len; + xfred_len = 0; + } + pkt_len = (pkt_len + 3) & 0xFFFFFFFC; +#ifdef CONFIG_MMC_MSM7X00A + if ((pkt_len % 64) == 32) { + sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__)); + pkt_len += 32; + } +#endif /* CONFIG_MMC_MSM7X00A */ + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio( + gInstance->func[func], + addr, buf, pkt_len); + else if (write) + err_ret = sdio_memcpy_toio( + gInstance->func[func], + addr, buf, pkt_len); + else if (fifo) + err_ret = sdio_readsb( + gInstance->func[func], + buf, addr, pkt_len); + else + err_ret = sdio_memcpy_fromio( + gInstance->func[func], + buf, addr, pkt_len); + + if (err_ret) + sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, SGCount, addr, pkt_len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n", + __FUNCTION__, + (write) ? "TX" : "RX", + pnext, SGCount, addr, pkt_len)); + + if (!fifo) + addr += pkt_len; + SGCount ++; + } + sdio_release_host(gInstance->func[func]); + } + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC Status; + void *mypkt = NULL; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Case 1: we don't have a packet. */ + if (pkt == NULL) { + sd_data(("%s: Creating new %s Packet, len=%d\n", + __FUNCTION__, write ? "TX" : "RX", buflen_u)); +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) { +#else + if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) { +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + sd_err(("%s: PKTGET failed: len %d\n", + __FUNCTION__, buflen_u)); + return SDIOH_API_RC_FAIL; + } + + /* For a write, copy the buffer data into the packet. */ + if (write) { + bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u); + } + + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!write) { + bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u); + } +#ifdef CONFIG_DHD_USE_STATIC_BUF + PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); +#else + PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + } else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) { + /* Case 2: We have a packet, but it is unaligned. */ + + /* In this case, we cannot have a chain. */ + ASSERT(PKTNEXT(sd->osh, pkt) == NULL); + + sd_data(("%s: Creating aligned %s Packet, len=%d\n", + __FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt))); +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { +#else + if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) { +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + sd_err(("%s: PKTGET failed: len %d\n", + __FUNCTION__, PKTLEN(sd->osh, pkt))); + return SDIOH_API_RC_FAIL; + } + + /* For a write, copy the buffer data into the packet. */ + if (write) { + bcopy(PKTDATA(sd->osh, pkt), + PKTDATA(sd->osh, mypkt), + PKTLEN(sd->osh, pkt)); + } + + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt); + + /* For a read, copy the packet data back to the buffer. */ + if (!write) { + bcopy(PKTDATA(sd->osh, mypkt), + PKTDATA(sd->osh, pkt), + PKTLEN(sd->osh, mypkt)); + } +#ifdef CONFIG_DHD_USE_STATIC_BUF + PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE); +#else + PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + } else { /* case 3: We have a packet and it is aligned. */ + sd_data(("%s: Aligned %s Packet, direct DMA\n", + __FUNCTION__, write ? "Tx" : "Rx")); + Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt); + } + + return (Status); +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n")); + sd = gInstance->sd; + + ASSERT(sd != NULL); + sdio_release_host(gInstance->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(gInstance->func[0]); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); + + sd = gInstance->sd; + + ASSERT(sd != NULL); + BCM_REFERENCE(sd); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +int +sdioh_start(sdioh_info_t *si, int stage) +{ + int ret; + sdioh_info_t *sd = gInstance->sd; + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (gInstance->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ + if ((ret = sdio_reset_comm(gInstance->func[0]->card))) { + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + return ret; + } + else { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + /* Claim host controller */ + sdio_claim_host(gInstance->func[1]); + + sd->client_block_size[1] = 64; + if (sdio_set_block_size(gInstance->func[1], 64)) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n")); + } + + /* Release host controller F1 */ + sdio_release_host(gInstance->func[1]); + + if (gInstance->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(gInstance->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + if (sdio_set_block_size(gInstance->func[2], + sd_f2_blocksize)) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d\n", sd_f2_blocksize)); + } + + /* Release host controller F2 */ + sdio_release_host(gInstance->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(gInstance->func[0]); + sdio_claim_irq(gInstance->func[2], IRQHandlerF2); + sdio_claim_irq(gInstance->func[1], IRQHandler); + sdio_release_host(gInstance->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(); +#endif + bcmsdh_oob_intr_set(TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + + return (0); +} + +int +sdioh_stop(sdioh_info_t *si) +{ + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ + if (gInstance->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(gInstance->func[0]); + sdio_release_irq(gInstance->func[1]); + sdio_release_irq(gInstance->func[2]); + sdio_release_host(gInstance->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(); +#endif + bcmsdh_oob_intr_set(FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + return (0); +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return (1); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c new file mode 100644 index 0000000000000..c93e41c6fb40f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c @@ -0,0 +1,366 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc_linux.c 331154 2012-05-04 00:41:40Z $ + */ + +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include /* request_irq() */ + +#include +#include +#include +#include + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) +#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */ +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325) +#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4329) +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4319) +#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4330) +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4334) +#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4324) +#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_43239) +#define SDIO_DEVICE_ID_BROADCOM_43239 43239 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */ + +#include + +#include + +#ifdef WL_CFG80211 +extern void wl_cfg80211_set_parent_dev(void *dev); +#endif + +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern int dhd_os_check_wakelock(void *dhdp); +extern int dhd_os_check_if_up(void *dhdp); +extern void *bcmsdh_get_drvdata(void); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +PBCMSDH_SDMMC_INSTANCE gInstance; + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern int bcmsdh_probe(struct device *dev); +extern int bcmsdh_remove(struct device *dev); +extern volatile bool dhd_mmc_suspend; + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + static struct sdio_func sdio_func_0; + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_trace(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_trace(("sdio_device: 0x%04x\n", func->device)); + sd_trace(("Function#: 0x%04x\n", func->num)); + + if (func->num == 1) { + sdio_func_0.num = 0; + sdio_func_0.card = func->card; + gInstance->func[0] = &sdio_func_0; + if(func->device == 0x4) { /* 4318 */ + gInstance->func[2] = NULL; + sd_trace(("NIC found, calling bcmsdh_probe...\n")); + ret = bcmsdh_probe(&func->dev); + } + } + + gInstance->func[func->num] = func; + + if (func->num == 2) { +#ifdef WL_CFG80211 + wl_cfg80211_set_parent_dev(&func->dev); +#endif + sd_trace(("F2 found, calling bcmsdh_probe...\n")); + ret = bcmsdh_probe(&func->dev); + } + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if (func->num == 2) { + sd_trace(("F2 found, calling bcmsdh_remove...\n")); + bcmsdh_remove(&func->dev); + } else if (func->num == 1) { + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + gInstance->func[1] = NULL; + } +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) }, + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) +static int bcmsdh_sdmmc_suspend(struct device *pdev) +{ + struct sdio_func *func = dev_to_sdio_func(pdev); + mmc_pm_flag_t sdio_flags; + int ret; + + if (func->num != 2) + return 0; + + sd_trace(("%s Enter\n", __FUNCTION__)); + + if (dhd_os_check_wakelock(bcmsdh_get_drvdata())) + return -EBUSY; + sdio_flags = sdio_get_host_pm_caps(func); + + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); + return -EINVAL; + } + + /* keep power while host suspended */ + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) { + sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); + return ret; + } +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(0); +#endif /* defined(OOB_INTR_ONLY) */ + dhd_mmc_suspend = TRUE; + smp_mb(); + + return 0; +} + +static int bcmsdh_sdmmc_resume(struct device *pdev) +{ +#if defined(OOB_INTR_ONLY) + struct sdio_func *func = dev_to_sdio_func(pdev); +#endif + sd_trace(("%s Enter\n", __FUNCTION__)); + dhd_mmc_suspend = FALSE; +#if defined(OOB_INTR_ONLY) + if ((func->num == 2) && dhd_os_check_if_up(bcmsdh_get_drvdata())) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + + smp_mb(); + return 0; +} + +static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { + .suspend = bcmsdh_sdmmc_suspend, + .resume = bcmsdh_sdmmc_resume, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) + .drv = { + .pm = &bcmsdh_sdmmc_pm_ops, + }, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ +}; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + + +int +sdioh_sdmmc_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + return BCME_OK; +} + +void +sdioh_sdmmc_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + +#if !defined(OOB_INTR_ONLY) + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#endif /* !defined(OOB_INTR_ONLY) */ + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable) { + sdioh_sdmmc_devintr_on(sd); + } else { + sdioh_sdmmc_devintr_off(sd); + } + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int sdio_function_init(void) +{ + int error = 0; + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + + gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL); + if (!gInstance) + return -ENOMEM; + + error = sdio_register_driver(&bcmsdh_sdmmc_driver); + + return error; +} + +/* + * module cleanup +*/ +extern int bcmsdh_remove(struct device *dev); +void sdio_function_cleanup(void) +{ + sd_trace(("%s Enter\n", __FUNCTION__)); + + + sdio_unregister_driver(&bcmsdh_sdmmc_driver); + + if (gInstance) + kfree(gInstance); +} diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c new file mode 100644 index 0000000000000..6b578e6536483 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmutils.c @@ -0,0 +1,1965 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmutils.c,v 1.277.2.18 2011-01-26 02:32:08 $ + */ + +#include +#include +#include + +#ifdef BCMDRIVER + +#include +#include +#include + +#else /* !BCMDRIVER */ + +#include +#include +#include + +#if defined(BCMEXTSUP) +#include +#endif + + +#endif /* !BCMDRIVER */ + +#include +#include +#include +#include +#include +#include +#include + +void *_bcmutils_dummy_fn = NULL; + +#ifdef BCMDRIVER + + + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + if (len < 0) + len = 4096; /* "infinite" */ + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + + + +/* return total length of buffer chain */ +uint BCMFASTPATH +pkttotlen(osl_t *osh, void *p) +{ + uint total; + + total = 0; + for (; p; p = PKTNEXT(osh, p)) + total += PKTLEN(osh, p); + return (total); +} + +/* return the last buffer of chained pkt */ +void * +pktlast(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) + cnt++; + + return cnt; +} + + +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * BCMFASTPATH +pktq_penq(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + return p; +} + +void * BCMFASTPATH +pktq_penq_head(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + q = &pq->q[prec]; + p = q->head; + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = PKTLINK(p); + else + PKTSETLINK(prev, PKTLINK(p)); + PKTSETLINK(p, NULL); + PKTFREE(osh, p, dir); + q->len--; + pq->len--; + p = (head ? q->head : PKTLINK(prev)); + } else { + prev = p; + p = PKTLINK(p); + } + } + + if (q->head == NULL) { + ASSERT(q->len == 0); + q->tail = NULL; + } +} + +bool BCMFASTPATH +pktq_pdel(struct pktq *pq, void *pktbuf, int prec) +{ + struct pktq_prec *q; + void *p; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + if (!pktbuf) + return FALSE; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + return FALSE; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->len--; + pq->len--; + PKTSETLINK(pktbuf, NULL); + return TRUE; +} + +void +pktq_init(struct pktq *pq, int num_prec, int max_len) +{ + int prec; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + pq->num_prec = (uint16)num_prec; + + pq->max = (uint16)max_len; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max = pq->max; +} + +void * BCMFASTPATH +pktq_deq(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p; + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + + return p; +} + +void * BCMFASTPATH +pktq_deq_tail(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p, *prev; + int prec; + + if (pq->len == 0) + return NULL; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + return (pq->q[prec].head); +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + + if (pq->len == 0) + return NULL; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + return (pq->q[prec].tail); +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) +{ + int prec; + for (prec = 0; prec < pq->num_prec; prec++) + pktq_pflush(osh, pq, prec, dir, fn, arg); + if (fn == NULL) + ASSERT(pq->len == 0); +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].len; + + return len; +} + +/* Priority dequeue from a specific set of precedences */ +void * BCMFASTPATH +pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p; + int prec; + + if (pq->len == 0) + return NULL; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + return NULL; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + return NULL; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + if (prec_out) + *prec_out = prec; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} + +#endif /* BCMDRIVER */ + +const unsigned char bcm_ctype[] = { + + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +ulong +bcm_strtoul(char *cp, char **endp, uint base) +{ + ulong result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) + { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) + return (ulong)-1; + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = (char *)cp; + + return (result); +} + +int +bcm_atoi(char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char* +bcmstrstr(char *haystack, char *needle) +{ + int len, nlen; + int i; + + if ((haystack == NULL) || (needle == NULL)) + return (haystack); + + nlen = strlen(needle); + len = strlen(haystack) - nlen + 1; + + for (i = 0; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return (&haystack[i]); + return (NULL); +} + +char* +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char* +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(char *p, struct ether_addr *ea) +{ + int i = 0; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &p, 16); + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strncpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char template[] = "%02x:%02x:%02x:%02x:%02x:%02x"; + snprintf(buf, 18, template, + ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff, + ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff); + return (buf); +} + +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +#ifdef BCMDRIVER + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + + + + + +#if defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint BCMFASTPATH +pktsetprio(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *) PKTDATA(NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS46(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } + } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS46(ip_body); + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + rc |= PKTPRIO_DSCP; + } + + ASSERT(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + + +static char bcm_undeferrstr[32]; +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +const char * +bcmerrorstr(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + + + + +/* iovar table lookup */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) +{ + int bcmerror = 0; + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < (int)sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } else if (len) { + /* Set is an action w/o parameters */ + bcmerror = BCME_BUFTOOLONG; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#endif /* BCMDRIVER */ + + +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +static const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +/* + * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if + * accumulating over multiple pieces. + */ +uint32 +hndcrc32(uint8 *pdata, uint nbytes, uint32 crc) +{ + uint8 *pend; +#ifdef __mips__ + uint8 tmp[4]; + ulong *tptr = (ulong *)tmp; + + /* in case the beginning of the buffer isn't aligned */ + pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc); + nbytes -= (pend - pdata); + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + /* handle bulk of data as 32-bit words */ + pend = pdata + (nbytes & 0xfffffffc); + while (pdata < pend) { + *tptr = *(ulong *)pdata; + pdata += sizeof(ulong *); + CRC_INNER_LOOP(32, crc, tmp[0]); + CRC_INNER_LOOP(32, crc, tmp[1]); + CRC_INNER_LOOP(32, crc, tmp[2]); + CRC_INNER_LOOP(32, crc, tmp[3]); + } + + /* 1-3 bytes at end of buffer */ + pend = pdata + (nbytes & 0x03); + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); +#else + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); +#endif /* __mips__ */ + + return crc; +} + +#ifdef notdef +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void +testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* notdef */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(bcm_tlv_t *elt, int *buflen) +{ + int len; + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) + return NULL; + + /* advance to next elt */ + len = elt->len; + elt = (bcm_tlv_t*)(elt->data + len); + *buflen -= (2 + len); + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) + return NULL; + + return elt; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= 2) { + int len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (len + 2))) + return (elt); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); + totlen -= (len + 2); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +bcm_tlv_t * +bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= 2) { + uint id = elt->id; + int len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) + return (NULL); + + /* validate remaining totlen */ + if ((id == key) && (totlen >= (len + 2))) + return (elt); + + elt = (bcm_tlv_t*)((uint8*)elt + (len + 2)); + totlen -= (len + 2); + } + return NULL; +} + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) +{ + int i; + char* p = buf; + char hexstr[16]; + int slen = 0, nlen = 0; + uint32 bit; + const char* name; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags != 0) { + /* print any unnamed bits */ + snprintf(hexstr, 16, "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) + continue; + flags &= ~bit; + nlen = strlen(name); + slen += nlen; + /* count btwn flag space */ + if (flags != 0) + slen += 1; + /* need NULL char as well */ + if (len <= slen) + break; + /* copy NULL char but don't count it */ + strncpy(p, name, nlen + 1); + p += nlen; + /* copy btwn flag space and NULL char */ + if (flags != 0) + p += snprintf(p, 2, " "); + len -= slen; + } + + /* indicate the str was too short */ + if (flags != 0) { + if (len < 2) + p -= 2 - len; /* overwrite last char */ + p += snprintf(p, 2, ">"); + } + + return (int)(p - buf); +} +#endif + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) || defined(WLMEDIA_PEAKRATE) +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} +#endif + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, uchar *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} + +static const char *crypto_algo_names[] = { + "NONE", + "WEP1", + "TKIP", + "WEP128", + "AES_CCM", + "AES_OCB_MSDU", + "AES_OCB_MPDU", + "NALG" + "UNDEF", + "UNDEF", + "UNDEF", + "UNDEF" +}; + +const char * +bcm_crypto_algo_name(uint algo) +{ + return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; +} + + +char * +bcm_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strncpy(buf, name, buflen); + + /* append data onto the end of the name string */ + memcpy(&buf[len], data, datalen); + len += datalen; + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) + break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + + +uint +bcm_bitcount(uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +#ifdef BCMDRIVER + +/* Initialization of bcmstrbuf structure */ +void +bcm_binit(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + r = vsnprintf(b->buf, b->size, fmt, ap); + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + if ((r == -1) || (r >= (int)b->size) || (r == 0)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + + va_end(ap); + + return r; +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(char *name, const uchar *data, int len) +{ + int i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif + +#endif /* BCMDRIVER */ + +/* + * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. + * also accepts nvram files which are already in the format of =\0\=\0 + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +unsigned int +process_nvram_vars(char *varbuf, unsigned int len) +{ + char *dp; + bool findNewline; + int column; + unsigned int buf_len, n; + unsigned int pad = 0; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + for (n = 0; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if (findNewline && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\n') { + if (column == 0) + continue; + *dp++ = 0; + column = 0; + continue; + } + *dp++ = varbuf[n]; + column++; + } + buf_len = (unsigned int)(dp - varbuf); + if (buf_len % 4) { + pad = 4 - buf_len % 4; + if (pad && (buf_len + pad <= len)) { + buf_len += pad; + } + } + + while (dp < varbuf + n) + *dp++ = 0; + + return buf_len; +} diff --git a/drivers/net/wireless/bcmdhd/bcmwifi.c b/drivers/net/wireless/bcmdhd/bcmwifi.c new file mode 100644 index 0000000000000..70722170bdfdc --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi.c @@ -0,0 +1,274 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: bcmwifi.c,v 1.31.8.1 2010-08-03 17:47:05 Exp $ + */ + + +#include + +#ifdef BCMDRIVER +#include +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include +#endif + + + + + +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band, *bw, *sb; + uint channel; + + band = ""; + bw = ""; + sb = ""; + channel = CHSPEC_CHANNEL(chspec); + + if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) || + (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL)) + band = (CHSPEC_IS2G(chspec)) ? "b" : "a"; + if (CHSPEC_IS40(chspec)) { + if (CHSPEC_SB_UPPER(chspec)) { + sb = "u"; + channel += CH_10MHZ_APART; + } else { + sb = "l"; + channel -= CH_10MHZ_APART; + } + } else if (CHSPEC_IS10(chspec)) { + bw = "n"; + } + + + snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb); + return (buf); +} + + +chanspec_t +wf_chspec_aton(char *a) +{ + char *endp = NULL; + uint channel, band, bw, ctl_sb; + char c; + + channel = strtoul(a, &endp, 10); + + + if (endp == a) + return 0; + + if (channel > MAXCHANNEL) + return 0; + + band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + bw = WL_CHANSPEC_BW_20; + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + + a = endp; + + c = tolower(a[0]); + if (c == '\0') + goto done; + + + if (c == 'a' || c == 'b') { + band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G; + a++; + c = tolower(a[0]); + if (c == '\0') + goto done; + } + + + if (c == 'n') { + bw = WL_CHANSPEC_BW_10; + } else if (c == 'l') { + bw = WL_CHANSPEC_BW_40; + ctl_sb = WL_CHANSPEC_CTL_SB_LOWER; + + if (channel <= (MAXCHANNEL - CH_20MHZ_APART)) + channel += CH_10MHZ_APART; + else + return 0; + } else if (c == 'u') { + bw = WL_CHANSPEC_BW_40; + ctl_sb = WL_CHANSPEC_CTL_SB_UPPER; + + if (channel > CH_20MHZ_APART) + channel -= CH_10MHZ_APART; + else + return 0; + } else { + return 0; + } + +done: + return (channel | band | bw | ctl_sb); +} + + +bool +wf_chspec_malformed(chanspec_t chanspec) +{ + + if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec)) + return TRUE; + + if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec)) + return TRUE; + + + if (CHSPEC_IS20_UNCOND(chanspec)) { + if (!CHSPEC_SB_NONE(chanspec)) + return TRUE; + } else { + if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) + return TRUE; + } + + return FALSE; +} + + +uint8 +wf_chspec_ctlchan(chanspec_t chspec) +{ + uint8 ctl_chan; + + + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) { + return CHSPEC_CHANNEL(chspec); + } else { + + ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40); + + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) { + + ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec)); + } else { + ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER); + + ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec)); + } + } + + return ctl_chan; +} + +chanspec_t +wf_chspec_ctlchspec(chanspec_t chspec) +{ + chanspec_t ctl_chspec = 0; + uint8 channel; + + ASSERT(!wf_chspec_malformed(chspec)); + + + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) { + return chspec; + } else { + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) { + channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec)); + } else { + channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec)); + } + ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE; + ctl_chspec |= CHSPEC_BAND(chspec); + } + return ctl_chspec; +} + + +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) + start_factor = WF_CHAN_FACTOR_2_4_G; + else if (freq >= 5000 && freq <= 6000) + start_factor = WF_CHAN_FACTOR_5_G; + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) + return 14; + + base = start_factor / 2; + + + if ((freq < base) || (freq > base + 1000)) + return -1; + + offset = freq - base; + ch = offset / 5; + + + if (offset != (ch * 5)) + return -1; + + + if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) + return -1; + + return ch; +} + + +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (ch > 200)) + freq = -1; + else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) + freq = 2484; + else + freq = ch * 5 + start_factor / 2; + + return freq; +} diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h new file mode 100644 index 0000000000000..6106555f13146 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd.h @@ -0,0 +1,765 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd.h 344123 2012-07-11 09:33:49Z $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#if defined(CHROMIUMOS_COMPAT_WIRELESS) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ + +/* The kernel threading is sdio-specific */ +struct task_struct; +struct sched_param; +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); + +#define ALL_INTERFACES 0xff + +#include + + +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; +struct dhd_cmn; + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA /* Ready for frame transfers */ +}; + +/* Firmware requested operation mode */ +#define STA_MASK 0x0001 +#define HOSTAPD_MASK 0x0002 +#define WFD_MASK 0x0004 +#define SOFTAP_FW_MASK 0x0008 +#define P2P_GO_ENABLED 0x0010 +#define P2P_GC_ENABLED 0x0020 +#define CONCURENT_MASK 0x00F0 + +#define MANUFACTRING_FW "WLTEST" + +/* max sequential rxcntl timeouts to set HANG event */ +#define MAX_CNTL_TIMEOUT 2 + +#define DHD_SCAN_ACTIVE_TIME 40 /* ms : Embedded default Active setting from DHD Driver */ +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD Driver */ + +#define DHD_BEACON_TIMEOUT_NORMAL 4 +#define DHD_BEACON_TIMEOUT_HIGH 10 + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF, + WAKE_LOCK_PRIV, + WAKE_LOCK_DPC, + WAKE_LOCK_IOCTL, + WAKE_LOCK_DOWNLOAD, + WAKE_LOCK_TMOUT, + WAKE_LOCK_WATCHDOG, + WAKE_LOCK_LINK_DOWN_TMOUT, + WAKE_LOCK_PNO_FIND_TMOUT, + WAKE_LOCK_SOFTAP_SET, + WAKE_LOCK_SOFTAP_STOP, + WAKE_LOCK_SOFTAP_START, + WAKE_LOCK_SOFTAP_THREAD, + WAKE_LOCK_MAX +}; + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF +}; + +typedef enum { + DHD_IF_NONE = 0, + DHD_IF_ADD, + DHD_IF_DEL, + DHD_IF_CHANGE, + DHD_IF_DELETING +} dhd_if_state_t; + +#if defined(CONFIG_DHD_USE_STATIC_BUF) + +uint8* dhd_os_prealloc(void *osh, int section, uint size); +void dhd_os_prefree(void *osh, void *addr, uint size); +#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size) +#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size) + +#else + +#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size) +#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size) + +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + +/* Common structure for module and instance linkage */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + struct dhd_cmn *cmn; /* dhd_common module handle */ + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ + + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ +#endif /* PNO_SUPPORT */ + int dtim_skip; /* dtim skip , default 0 means wake each dtim */ + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + wl_country_t dhd_cspec; /* Current Locale info */ + char eventmask[WL_EVENTING_MASK_LEN]; + int op_mode; /* STA, HostAPD, WFD, SoftAP */ + +/* Set this to 1 to use a seperate interface (p2p0) for p2p operations. + * For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework + * see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ +#endif + + uint16 maxdatablks; +#ifdef PROP_TXSTATUS + int wlfc_enabled; + void* wlfc_state; +#endif + bool dongle_isolation; + int hang_was_sent; + int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ + int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ +#endif +} dhd_pub_t; + +typedef struct dhd_cmn { + osl_t *osh; /* OSL handle */ + dhd_pub_t *dhd; +} dhd_cmn_t; + + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do {\ + int retry = 0; \ + SMP_RD_BARRIER_DEPENDS(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + SMP_RD_BARRIER_DEPENDS(); \ + wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, 1); \ + countdown -= 10000; \ + } \ + } while (0) + + #else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ +#ifndef DHDTHREAD +#undef SPINWAIT_SLEEP +#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us) +#endif /* DHDTHREAD */ +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +unsigned long dhd_os_spin_lock(dhd_pub_t *pub); +void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags); + +/* Wakelock Functions */ +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); + +inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + mutex_init(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + mutex_lock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + mutex_unlock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_rx_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_ctrl_timeout_enable(pub, val) +#define DHD_PACKET_TIMEOUT_MS 1000 +#define DHD_EVENT_TIMEOUT_MS 1500 + +/* interface operations (register, remove) should be atomic, use this lock to prevent race + * condition among wifi on/off and interface operation functions + */ +void dhd_net_if_lock(struct net_device *dev); +void dhd_net_if_unlock(struct net_device *dev); + +typedef struct dhd_if_event { + uint8 ifidx; + uint8 action; + uint8 flags; + uint8 bssidx; + uint8 is_AP; +} dhd_if_event_t; + +typedef enum dhd_attach_states +{ + DHD_ATTACH_STATE_INIT = 0x0, + DHD_ATTACH_STATE_NET_ALLOC = 0x1, + DHD_ATTACH_STATE_DHD_ALLOC = 0x2, + DHD_ATTACH_STATE_ADD_IF = 0x4, + DHD_ATTACH_STATE_PROT_ATTACH = 0x8, + DHD_ATTACH_STATE_WL_ATTACH = 0x10, + DHD_ATTACH_STATE_THREADS_CREATED = 0x20, + DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, + DHD_ATTACH_STATE_CFG80211 = 0x80, + DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, + DHD_ATTACH_STATE_DONE = 0x200 +} dhd_attach_states_t; + +/* Value -1 means we are unsuccessful in creating the kthread. */ +#define DHD_PID_KT_INVALID -1 +/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */ +#define DHD_PID_KT_TL_INVALID -2 + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* To allow osl_attach/detach calls from os-independent modules */ +osl_t *dhd_osl_attach(void *pdev, uint bustype); +void dhd_osl_detach(osl_t *osh); + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen); +#if defined(WLP2P) && defined(WL_CFG80211) +/* To allow attach/detach calls corresponding to p2p0 interface */ +extern int dhd_attach_p2p(dhd_pub_t *); +extern int dhd_detach_p2p(dhd_pub_t *); +#endif /* WLP2P && WL_CFG80211 */ +extern int dhd_net_attach(dhd_pub_t *dhdp, int idx); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); +extern void dhd_free(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); + +/* OS independent layer functions */ +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void * dhd_os_open_image(char * filename); +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern void dhd_os_close_image(void * image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_customer_gpio_wlan_ctrl(int onoff); +extern int dhd_custom_get_mac_address(unsigned char *buf); +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret); +extern int dhd_os_send_hang_message(dhd_pub_t *dhdp); +extern int net_os_send_hang_message(struct net_device *dev); +extern void dhd_set_version_info(dhd_pub_t *pub, char *fw); + +#ifdef PNO_SUPPORT +extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); +extern int dhd_pno_clean(dhd_pub_t *dhd); +extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, + ushort scan_fr, int pno_repeat, int pno_freq_expo_max); +extern int dhd_pno_set_ex(dhd_pub_t *dhd, wl_pfn_t* ssidnet, int nssid, + ushort pno_interval, int pno_repeat, int pno_expo_max, int pno_lost_time); +extern int dhd_pno_get_status(dhd_pub_t *dhd); +extern int dhd_dev_pno_reset(struct net_device *dev); +extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, + int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max); +extern int dhd_dev_pno_set_ex(struct net_device *dev, wl_pfn_t* ssidnet, int nssid, + ushort pno_interval, int pno_repeat, int pno_expo_max, int pno_lost_time); +extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled); +extern int dhd_dev_get_pno_status(struct net_device *dev); +#endif /* PNO_SUPPORT */ + +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +#define DHD_MDNS_FILTER_NUM 4 +extern int dhd_os_set_packet_filter(dhd_pub_t *dhdp, int val); +extern int net_os_set_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); + +extern int dhd_get_dtim_skip(dhd_pub_t *dhd); +extern bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd); + +#ifdef DHD_DEBUG +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); +#endif /* DHD_DEBUG */ +#if defined(OOB_INTR_ONLY) +extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr); +#endif /* defined(OOB_INTR_ONLY) */ +extern void dhd_os_sdtxlock(dhd_pub_t * pub); +extern void dhd_os_sdtxunlock(dhd_pub_t * pub); + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); +extern struct net_device * dhd_idx2net(void *pub, int ifidx); +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, + wl_event_msg_t *, void **data_ptr); +extern void wl_event_to_host_order(wl_event_msg_t * evt); + +extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); +extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, + int ifindex); + +extern struct dhd_cmn *dhd_common_init(osl_t *osh); +extern void dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn); + +extern int dhd_do_driver_init(struct net_device *net); +extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle, + char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx); +extern void dhd_del_if(struct dhd_info *dhd, int ifidx); + +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); + +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* send up locally generated event */ +extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); +extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); +extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval); +extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +#ifdef ARP_OFFLOAD_SUPPORT +extern void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); +extern void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable); +#endif /* ARP_OFFLOAD_SUPPORT */ + + +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; + +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; + +#if defined(DHD_DEBUG) +/* Console output poll interval */ +extern uint dhd_console_ms; +extern uint wl_msg_level; +#endif /* defined(DHD_DEBUG) */ + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* ARP offload enable */ +extern uint dhd_arp_enable; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam_disable; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#define DHD_IDLETIME_TICKS 1 + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define KEEP_ALIVE_PERIOD 55000 +#define NULL_PKT_STR "null_pkt" + +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif + + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +extern char fw_path[MOD_PARAM_PATHLEN]; +extern char nv_path[MOD_PARAM_PATHLEN]; + +#define MOD_PARAM_INFOLEN 512 + +#ifdef SOFTAP +extern char fw_path2[MOD_PARAM_PATHLEN]; +#endif + +/* Flag to indicate if we should download firmware on driver load */ +extern uint dhd_download_fw_on_driverload; + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#define DHD_DEL_IF -0xe +#define DHD_BAD_IF -0xf + +#ifdef PROP_TXSTATUS +/* Please be mindful that total pkttag space is 32 octets only */ +typedef struct dhd_pkttag { + /* + b[11 ] - 1 = this packet was sent in response to one time packet request, + do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET]. + b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on] + b[9 ] - 1 = packet is host->firmware (transmit direction) + - 0 = packet received from firmware (firmware->host) + b[8 ] - 1 = packet was sent due to credit_request (pspoll), + packet does not count against FIFO credit. + - 0 = normal transaction, packet counts against FIFO credit + b[7 ] - 1 = AP, 0 = STA + b[6:4] - AC FIFO number + b[3:0] - interface index + */ + uint16 if_flags; + /* destination MAC address for this packet so that not every + module needs to open the packet to find this + */ + uint8 dstn_ether[ETHER_ADDR_LEN]; + /* + This 32-bit goes from host to device for every packet. + */ + uint32 htod_tag; + /* bus specific stuff */ + union { + struct { + void* stuff; + uint32 thing1; + uint32 thing2; + } sd; + struct { + void* bus; + void* urb; + } usb; + } bus_specific; +} dhd_pkttag_t; + +#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue) +#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag) + +#define DHD_PKTTAG_IFMASK 0xf +#define DHD_PKTTAG_IFTYPE_MASK 0x1 +#define DHD_PKTTAG_IFTYPE_SHIFT 7 +#define DHD_PKTTAG_FIFO_MASK 0x7 +#define DHD_PKTTAG_FIFO_SHIFT 4 + +#define DHD_PKTTAG_SIGNALONLY_MASK 0x1 +#define DHD_PKTTAG_SIGNALONLY_SHIFT 10 + +#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1 +#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11 + +#define DHD_PKTTAG_PKTDIR_MASK 0x1 +#define DHD_PKTTAG_PKTDIR_SHIFT 9 + +#define DHD_PKTTAG_CREDITCHECK_MASK 0x1 +#define DHD_PKTTAG_CREDITCHECK_SHIFT 8 + +#define DHD_PKTTAG_INVALID_FIFOID 0x7 + +#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \ + (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT) +#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK) + +#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK) +#define DHD_PKTTAG_IF(tag) (((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK) + +#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \ + (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT) +#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK) + +#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \ + (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT) +#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK) + +#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \ + (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT) +#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK) + +#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \ + (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT) +#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK) + +#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \ + (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) +#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK) + +#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \ + (dstn_MAC_ea), ETHER_ADDR_LEN) +#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether + +typedef int (*f_commitpkt_t)(void* ctx, void* p); + +#ifdef PROP_TXSTATUS_DEBUG +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) +#else +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) +#endif + +#endif /* PROP_TXSTATUS */ + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#ifdef ARP_OFFLOAD_SUPPORT +#define MAX_IPV4_ENTRIES 8 +/* dhd_commn arp offload wrapers */ +void dhd_aoe_hostip_clr(dhd_pub_t *dhd); +void dhd_aoe_arp_clr(dhd_pub_t *dhd); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr); +#endif /* ARP_OFFLOAD_SUPPORT */ + +#endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c new file mode 100644 index 0000000000000..6b782ea4a4d2a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.c @@ -0,0 +1,335 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_bta.c,v 1.10.4.2 2010-12-22 23:47:23 Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +#ifdef SEND_HCI_CMD_VIA_IOCTL +#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE + +/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + uint8 buf[BTA_HCI_CMD_MAX_LEN + 16]; + uint len = sizeof(buf); + wl_ioctl_t ioc; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) + return BCME_BADLEN; + + if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len) + return BCME_BADLEN; + + len = bcm_mkiovar("HCI_cmd", + (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len); + + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = TRUE; + + return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len); +} +#else /* !SEND_HCI_CMD_VIA_IOCTL */ + +static void +dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh) +{ + int prec; + struct pktq *q; + uint count = 0; + + q = dhd_bus_txq(pub->bus); + if (q == NULL) + return; + + DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh)); + + dhd_os_sdlock_txq(pub); + + /* Walk through the txq and toss all HCI ACL data packets */ + PKTQ_PREC_ITER(q, prec) { + void *head_pkt = NULL; + + while (pktq_ppeek(q, prec) != head_pkt) { + void *pkt = pktq_pdeq(q, prec); + int ifidx; + + PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus)); + dhd_prot_hdrpull(pub, &ifidx, pkt); + + if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) { + struct ether_header *eh = + (struct ether_header *)PKTDATA(pub->osh, pkt); + + if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { + struct dot11_llc_snap_header *lsh = + (struct dot11_llc_snap_header *)&eh[1]; + + if (bcmp(lsh, BT_SIG_SNAP_MPROT, + DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && + ntoh16(lsh->type) == BTA_PROT_L2CAP) { + amp_hci_ACL_data_t *ACL_data = + (amp_hci_ACL_data_t *)&lsh[1]; + uint16 handle = ltoh16(ACL_data->handle); + + if (HCI_ACL_DATA_HANDLE(handle) == llh) { + PKTFREE(pub->osh, pkt, TRUE); + count ++; + continue; + } + } + } + } + + dhd_prot_hdrpush(pub, ifidx, pkt); + PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus)); + + if (head_pkt == NULL) + head_pkt = pkt; + pktq_penq(q, prec, pkt); + } + } + + dhd_os_sdunlock_txq(pub); + + DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh)); +} + +/* Handle HCI cmd locally. + * Return 0: continue to send the cmd across SDIO + * < 0: stop, fail + * > 0: stop, succuess + */ +static int +_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd) +{ + int status = 0; + + switch (ltoh16_ua((uint8 *)&cmd->opcode)) { + case HCI_Enhanced_Flush: { + eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms; + dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh)); + break; + } + default: + break; + } + + return status; +} + +/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + int status; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len)); + return BCME_BADLEN; + } + + if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) { + DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n", + len, cmd_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_docmd: out of memory\n")); + return BCME_NOMEM; + } + + + /* intercept and handle the HCI cmd locally */ + if ((status = _dhd_bta_docmd(pub, cmd)) > 0) + return 0; + else if (status < 0) + return status; + + /* copy in HCI cmd */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(cmd, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(eh->ether_dhost); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = 0; + + return dhd_sendpkt(pub, 0, p); +} +#endif /* !SEND_HCI_CMD_VIA_IOCTL */ + +/* Send HCI ACL data to dongle via data channel */ +int +dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len) +{ + amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + + if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len)); + return BCME_BADLEN; + } + + if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) { + DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n", + len, data_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n")); + return BCME_NOMEM; + } + + + /* copy in HCI ACL data header and HCI ACL data */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(data, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = HTON16(BTA_PROT_L2CAP); + + return dhd_sendpkt(pub, 0, p); +} + +/* txcomplete callback */ +void +dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp); + amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN); + uint16 handle = ltoh16(ACL_data->handle); + uint16 llh = HCI_ACL_DATA_HANDLE(handle); + + wl_event_msg_t event; + uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)]; + amp_hci_event_t *evt; + num_completed_data_blocks_evt_parms_t *parms; + + uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t); + + /* update the event struct */ + memset(&event, 0, sizeof(event)); + event.version = hton16(BCM_EVENT_MSG_VERSION); + event.event_type = hton32(WLC_E_BTA_HCI_EVENT); + event.status = 0; + event.reason = 0; + event.auth_type = 0; + event.datalen = hton32(len); + event.flags = 0; + + /* generate Number of Completed Blocks event */ + evt = (amp_hci_event_t *)data; + evt->ecode = HCI_Number_of_Completed_Data_Blocks; + evt->plen = sizeof(num_completed_data_blocks_evt_parms_t); + + parms = (num_completed_data_blocks_evt_parms_t *)evt->parms; + htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks); + parms->num_handles = 1; + htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle); + parms->completed[0].pkts = 1; + parms->completed[0].blocks = 1; + + dhd_sendup_event_common(dhdp, &event, data); +} + +/* event callback */ +void +dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len) +{ + amp_hci_event_t *evt = (amp_hci_event_t *)data_buf; + + switch (evt->ecode) { + case HCI_Command_Complete: { + cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms; + switch (ltoh16_ua((uint8 *)&parms->opcode)) { + case HCI_Read_Data_Block_Size: { + read_data_block_size_evt_parms_t *parms2 = + (read_data_block_size_evt_parms_t *)parms->parms; + dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num); + break; + } + } + break; + } + + case HCI_Flush_Occurred: { + flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms; + dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle)); + break; + } + default: + break; + } +} diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h new file mode 100644 index 0000000000000..07d9cebb883a4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.h @@ -0,0 +1,39 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_bta.h,v 1.2 2009-02-26 22:35:56 Exp $ + */ +#ifndef __dhd_bta_h__ +#define __dhd_bta_h__ + +struct dhd_pub; + +extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); + +extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); + +extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); +extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); + + +#endif /* __dhd_bta_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h new file mode 100644 index 0000000000000..bccb8b6603f81 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bus.h @@ -0,0 +1,99 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_bus.h,v 1.14.28.1 2010-12-23 01:13:17 Exp $ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *fw_path, char *nv_path); + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Get the Bus Idle Time */ +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); + +/* Set the Bus Idle Time*/ +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); +/* Send a data frame to the dongle. Callee disposes of txp. */ +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); +extern void dhd_disable_intr(dhd_pub_t *dhd); + +#if defined(DHD_DEBUG) +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); +#endif /* defined(DHD_DEBUG) */ + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); + +#endif /* _dhd_bus_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c new file mode 100644 index 0000000000000..f16d81c9e198b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c @@ -0,0 +1,2534 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_cdc.c 324280 2012-03-28 19:01:17Z $ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload.) + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + + +#ifdef PROP_TXSTATUS +#include +#include +#endif + + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +#ifdef PROP_TXSTATUS +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; +#endif /* PROP_TXSTATUS */ + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + +extern int dhd_dbus_txdata(dhd_pub_t *dhdp, void *pktbuf); + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ + int err = 0; + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ + err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); + + DHD_OS_WAKE_UNLOCK(dhd); + return err; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ + int ret; + int cdc_len = len+sizeof(cdc_ioctl_t); + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + + return ret; +} + +static int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + void *info; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check info buffer */ + info = (void*)&msg[1]; + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, info, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + +static int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET; + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); + goto done; + } + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + + +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + action = ioc->set; + if (action & WL_IOCTL_ACTION_SET) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +#ifdef PROP_TXSTATUS +void +dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i; + uint8* ea; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhdp->wlfc_state; + wlfc_hanger_t* h; + wlfc_mac_descriptor_t* mac_table; + wlfc_mac_descriptor_t* interfaces; + char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"}; + + if (wlfc == NULL) { + bcm_bprintf(strbuf, "wlfc not initialized yet\n"); + return; + } + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } + + mac_table = wlfc->destination_entries.nodes; + interfaces = wlfc->destination_entries.interfaces; + bcm_bprintf(strbuf, "---- wlfc stats ----\n"); + if (h) { + bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push," + "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n", + h->pushed, + h->popped, + h->failed_to_push, + h->failed_to_pop, + h->failed_slotfind, + (h->pushed - h->popped)); + } + + bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), " + "(dq_full,sendq_full, rollback_fail) = (%d,%d,%d,%d), (%d,%d,%d)\n", + wlfc->stats.tlv_parse_failed, + wlfc->stats.credit_request_failed, + wlfc->stats.mac_update_failed, + wlfc->stats.psmode_update_failed, + wlfc->stats.delayq_full_error, + wlfc->stats.sendq_full_error, + wlfc->stats.rollback_failed); + + bcm_bprintf(strbuf, "SENDQ (len,credit,sent) " + "(AC0[%d,%d,%d],AC1[%d,%d,%d],AC2[%d,%d,%d],AC3[%d,%d,%d],BC_MC[%d,%d,%d])\n", + wlfc->SENDQ.q[0].len, wlfc->FIFO_credit[0], wlfc->stats.sendq_pkts[0], + wlfc->SENDQ.q[1].len, wlfc->FIFO_credit[1], wlfc->stats.sendq_pkts[1], + wlfc->SENDQ.q[2].len, wlfc->FIFO_credit[2], wlfc->stats.sendq_pkts[2], + wlfc->SENDQ.q[3].len, wlfc->FIFO_credit[3], wlfc->stats.sendq_pkts[3], + wlfc->SENDQ.q[4].len, wlfc->FIFO_credit[4], wlfc->stats.sendq_pkts[4]); + +#ifdef PROP_TXSTATUS_DEBUG + bcm_bprintf(strbuf, "SENDQ dropped: AC[0-3]:(%d,%d,%d,%d), (bcmc,atim):(%d,%d)\n", + wlfc->stats.dropped_qfull[0], wlfc->stats.dropped_qfull[1], + wlfc->stats.dropped_qfull[2], wlfc->stats.dropped_qfull[3], + wlfc->stats.dropped_qfull[4], wlfc->stats.dropped_qfull[5]); +#endif + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (interfaces[i].occupied) { + char* iftype_desc; + + if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT) + iftype_desc = "stats.latency_sample_count) { + moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32); + + for (i = 0; i < moving_samples; i++) + moving_avg += wlfc->stats.deltas[i]; + moving_avg /= moving_samples; + + avg = (100 * wlfc->stats.total_status_latency) / + wlfc->stats.latency_sample_count; + bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = " + "(%d.%d, %03d, %03d)\n", + moving_samples, avg/100, (avg - (avg/100)*100), + wlfc->stats.latency_most_recent, + moving_avg); + } + } + + bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), " + "back = (%d,%d,%d,%d,%d,%d)\n", + wlfc->stats.fifo_credits_sent[0], + wlfc->stats.fifo_credits_sent[1], + wlfc->stats.fifo_credits_sent[2], + wlfc->stats.fifo_credits_sent[3], + wlfc->stats.fifo_credits_sent[4], + wlfc->stats.fifo_credits_sent[5], + + wlfc->stats.fifo_credits_back[0], + wlfc->stats.fifo_credits_back[1], + wlfc->stats.fifo_credits_back[2], + wlfc->stats.fifo_credits_back[3], + wlfc->stats.fifo_credits_back[4], + wlfc->stats.fifo_credits_back[5]); + { + uint32 fifo_cr_sent = 0; + uint32 fifo_cr_acked = 0; + uint32 request_cr_sent = 0; + uint32 request_cr_ack = 0; + uint32 bc_mc_cr_ack = 0; + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) { + fifo_cr_sent += wlfc->stats.fifo_credits_sent[i]; + } + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) { + fifo_cr_acked += wlfc->stats.fifo_credits_back[i]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_sent += + wlfc->destination_entries.nodes[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_sent += + wlfc->destination_entries.interfaces[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_ack += + wlfc->destination_entries.nodes[i].dstncredit_acks; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_ack += + wlfc->destination_entries.interfaces[i].dstncredit_acks; + } + } + bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d)," + "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)", + fifo_cr_sent, fifo_cr_acked, + request_cr_sent, request_cr_ack, + wlfc->destination_entries.other.dstncredit_acks, + bc_mc_cr_ack, + wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed); + } +#endif /* PROP_TXSTATUS_DEBUG */ + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)" + "(freed,free_err,rollback)) = " + "((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.pktin, + wlfc->stats.pkt2bus, + wlfc->stats.txstatus_in, + wlfc->stats.dhd_hdrpulls, + + wlfc->stats.pktdropped, + wlfc->stats.wlfc_header_only_pkt, + wlfc->stats.wlc_tossed_pkts, + + wlfc->stats.pkt_freed, + wlfc->stats.pkt_free_err, wlfc->stats.rollback); + + bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = " + "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n", + + wlfc->stats.d11_suppress, + wlfc->stats.wl_suppress, + wlfc->stats.bad_suppress, + + wlfc->stats.psq_d11sup_enq, + wlfc->stats.psq_wlsup_enq, + wlfc->stats.psq_hostq_enq, + wlfc->stats.mac_handle_notfound, + + wlfc->stats.psq_d11sup_retx, + wlfc->stats.psq_wlsup_retx, + wlfc->stats.psq_hostq_retx); + return; +} + +/* Create a place to store all packet pointers submitted to the firmware until + a status comes back, suppress or otherwise. + + hang-er: noun, a contrivance on which things are hung, as a hook. +*/ +static void* +dhd_wlfc_hanger_create(osl_t *osh, int max_items) +{ + int i; + wlfc_hanger_t* hanger; + + /* allow only up to a specific size for now */ + ASSERT(max_items == WLFC_HANGER_MAXITEMS); + + if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL) + return NULL; + + memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); + hanger->max_items = max_items; + + for (i = 0; i < hanger->max_items; i++) { + hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + } + return hanger; +} + +static int +dhd_wlfc_hanger_delete(osl_t *osh, void* hanger) +{ + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items)); + return BCME_OK; + } + return BCME_BADARG; +} + +static uint16 +dhd_wlfc_hanger_get_free_slot(void* hanger) +{ + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) + return (uint16)i; + } + h->failed_slotfind++; + } + return WLFC_HANGER_MAXITEMS; +} + +static int +dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h && (slot_id < WLFC_HANGER_MAXITEMS)) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->items[slot_id].identifier = slot_id; + h->pushed++; + } + else { + h->failed_to_push++; + rc = BCME_NOTFOUND; + } + } + else + rc = BCME_BADARG; + return rc; +} + +static int +dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { + *pktout = h->items[slot_id].pkt; + if (remove_from_hanger) { + h->items[slot_id].state = + WLFC_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->items[slot_id].identifier = 0; + h->popped++; + } + } + else { + h->failed_to_pop++; + rc = BCME_NOTFOUND; + } + } + else + rc = BCME_BADARG; + return rc; +} + +static int +_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal, + uint8 tim_bmp, uint8 mac_handle, uint32 htodtag) +{ + uint32 wl_pktinfo = 0; + uint8* wlh; + uint8 dataOffset; + uint8 fillers; + uint8 tim_signal_len = 0; + + struct bdc_header *h; + + if (tim_signal) { + tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + } + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len; + fillers = ROUNDUP(dataOffset, 4) - dataOffset; + dataOffset += fillers; + + PKTPUSH(ctx->osh, p, dataOffset); + wlh = (uint8*) PKTDATA(ctx->osh, p); + + wl_pktinfo = htol32(htodtag); + + wlh[0] = WLFC_CTL_TYPE_PKTTAG; + wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG; + memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32)); + + if (tim_signal_len) { + wlh[dataOffset - fillers - tim_signal_len ] = + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 1] = + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle; + wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp; + } + if (fillers) + memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); + + PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); + h = (struct bdc_header *)PKTDATA(ctx->osh, p); + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(p)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = dataOffset >> 2; + BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +static int +_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) +{ + struct bdc_header *h; + + if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) { + WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf); + + /* pull BDC header */ + PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN); + /* pull wl-header */ + PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2)); + return BCME_OK; +} + +static wlfc_mac_descriptor_t* +_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) +{ + int i; + wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes; + uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p)); + uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p)); + + /* no lookup necessary, only if this packet belongs to STA interface */ + if (((ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_STA) || + ETHER_ISMULTI(dstn) || + (ctx->destination_entries.interfaces[ifid].iftype == WLC_E_IF_ROLE_P2P_CLIENT)) && + (ctx->destination_entries.interfaces[ifid].occupied)) { + return &ctx->destination_entries.interfaces[ifid]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (table[i].occupied) { + if (table[i].interface_id == ifid) { + if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) + return &table[i]; + } + } + } + return &ctx->destination_entries.other; +} + +static int +_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, + void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) +{ + /* + put the packet back to the head of queue + + - a packet from send-q will need to go back to send-q and not delay-q + since that will change the order of packets. + - suppressed packet goes back to suppress sub-queue + - pull out the header, if new or delayed packet + + Note: hslot is used only when header removal is done. + */ + wlfc_mac_descriptor_t* entry; + void* pktout; + int rc = BCME_OK; + int prec; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + if (entry != NULL) { + if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) { + /* wl-header is saved for suppressed packets */ + if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + } + else { + /* remove header first */ + _dhd_wlfc_pullheader(ctx, p); + + if (pkt_type == eWLFC_PKTTYPE_DELAYED) { + /* delay-q packets are going to delay-q */ + if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + } + else { + /* these are going to SENDQ */ + if (WLFC_PKTQ_PENQ_HEAD(&ctx->SENDQ, prec, p) == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + } + /* free the hanger slot */ + dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1); + + /* decrement sequence count */ + WLFC_DECR_SEQCOUNT(entry, prec); + } + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the firmware (for pspoll etc.) + */ + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + entry->requested_credit++; + } + } + else { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + if (rc != BCME_OK) + ctx->stats.rollback_failed++; + else + ctx->stats.rollback++; + + return rc; +} + +static void +_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) +{ + if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) { + /* start traffic */ + ctx->hostif_flow_state[if_id] = OFF; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("F")); + /* dhd_txflowcontrol(ctx->dhdp, if_id, OFF); */ + ctx->toggle_host_if = 0; + } + if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) { + /* stop traffic */ + ctx->hostif_flow_state[if_id] = ON; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("N")); + /* dhd_txflowcontrol(ctx->dhdp, if_id, ON); */ + ctx->host_ifidx = if_id; + ctx->toggle_host_if = 1; + } + return; +} + +static int +_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 ta_bmp) +{ + int rc = BCME_OK; + void* p = NULL; + int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12; + + /* allocate a dummy packet */ + p = PKTGET(ctx->osh, dummylen, TRUE); + if (p) { + PKTPULL(ctx->osh, p, dummylen); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0); + _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0); + DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1); +#ifdef PROP_TXSTATUS_DEBUG + ctx->stats.signal_only_pkts_sent++; +#endif + rc = dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p); + if (rc != BCME_OK) { + PKTFREE(ctx->osh, p, TRUE); + } + } + else { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, dummylen)); + rc = BCME_NOMEM; + } + return rc; +} + +/* Return TRUE if traffic availability changed */ +static bool +_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + int prec) +{ + bool rc = FALSE; + + if (entry->state == WLFC_STATE_CLOSE) { + if ((pktq_plen(&entry->psq, (prec << 1)) == 0) && + (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) { + + if (entry->traffic_pending_bmp & NBITVAL(prec)) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp & ~ NBITVAL(prec); + } + } + else { + if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp | NBITVAL(prec); + } + } + } + if (rc) { + /* request a TIM update to firmware at the next piggyback opportunity */ + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { + entry->send_tim_signal = 1; + _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + entry->send_tim_signal = 0; + } + else { + rc = FALSE; + } + } + return rc; +} + +static int +_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) +{ + wlfc_mac_descriptor_t* entry; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + if (entry == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_NOTFOUND; + } + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) { + ctx->stats.delayq_full_error++; + /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */ + WLFC_DBGMESG(("s")); + return BCME_ERROR; + } + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +static int +_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, void* p, int header_needed, uint32* slot) +{ + int rc = BCME_OK; + int hslot = WLFC_HANGER_MAXITEMS; + bool send_tim_update = FALSE; + uint32 htod = 0; + uint8 free_ctr; + + *slot = hslot; + + if (entry == NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, p); + } + + if (entry == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + if (entry->send_tim_signal) { + send_tim_update = TRUE; + entry->send_tim_signal = 0; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + if (header_needed) { + hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger); + free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + } + else { + hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + } + WLFC_PKTID_HSLOT_SET(htod, hslot); + WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation); + + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + /* + Indicate that this packet is being sent in response to an + explicit request from the firmware side. + */ + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } + else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); + } + if (header_needed) { + rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod); + if (rc == BCME_OK) { + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { + /* increment free running sequence count */ + WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); +#ifdef PROP_TXSTATUS_DEBUG + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time = + OSL_SYSUPTIME(); +#endif + } + else { + WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } + } + } + else { + /* remove old header */ + _dhd_wlfc_pullheader(ctx, p); + + hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + /* push new header */ + _dhd_wlfc_pushheader(ctx, p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod); + } + *slot = hslot; + return rc; +} + +static int +_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, int prec) +{ + if (ctx->destination_entries.interfaces[entry->interface_id].iftype == + WLC_E_IF_ROLE_P2P_GO) { + /* - destination interface is of type p2p GO. + For a p2pGO interface, if the destination is OPEN but the interface is + CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is + destination-specific-credit left send packets. This is because the + firmware storing the destination-specific-requested packet in queue. + */ + if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) + return 1; + } + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ + if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) || + (!(entry->ac_bitmap & (1 << prec)))) + return 1; + + return 0; +} + +static void* +_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, + int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out) +{ + wlfc_mac_descriptor_t* entry; + wlfc_mac_descriptor_t* table; + uint8 token_pos; + int total_entries; + void* p = NULL; + int pout; + int i; + + *entry_out = NULL; + token_pos = ctx->token_pos[prec]; + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = 1; + *needs_hdr = 1; + + /* search all entries, include nodes as well as interfaces */ + table = (wlfc_mac_descriptor_t*)&ctx->destination_entries; + total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t); + + for (i = 0; i < total_entries; i++) { + entry = &table[(token_pos + i) % total_entries]; + if (entry->occupied) { + if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) { + p = pktq_mdeq(&entry->psq, + /* higher precedence will be picked up first, + i.e. suppressed packets before delayed ones + */ + (NBITVAL((prec << 1) + 1) | NBITVAL((prec << 1))), + &pout); + if (p != NULL) { + /* did the packet come from suppress sub-queue? */ + if (pout == ((prec << 1) + 1)) { + /* + this packet was suppressed and was sent on the bus + previously; this already has a header + */ + *needs_hdr = 0; + } + if (entry->requested_credit > 0) { + entry->requested_credit--; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif + /* + if the packet was pulled out while destination is in + closed state but had a non-zero packets requested, + then this should not count against the FIFO credit. + That is due to the fact that the firmware will + most likely hold onto this packet until a suitable + time later to push it to the appropriate AC FIFO. + */ + if (entry->state == WLFC_STATE_CLOSE) + *ac_credit_spent = 0; + } + else if (entry->requested_packet > 0) { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + if (entry->state == WLFC_STATE_CLOSE) + *ac_credit_spent = 0; + } + /* move token to ensure fair round-robin */ + ctx->token_pos[prec] = + (token_pos + i + 1) % total_entries; + *entry_out = entry; + _dhd_wlfc_flow_control_check(ctx, &entry->psq, + DHD_PKTTAG_IF(PKTTAG(p))); + /* + A packet has been picked up, update traffic + availability bitmap, if applicable + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + return p; + } + } + } + } + return NULL; +} + +static void* +_dhd_wlfc_deque_sendq(athost_wl_status_info_t* ctx, int prec, uint8* ac_credit_spent) +{ + wlfc_mac_descriptor_t* entry; + void* p; + + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = 1; + + p = pktq_pdeq(&ctx->SENDQ, prec); + if (p != NULL) { + if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p)))) + /* bc/mc packets do not have a delay queue */ + return p; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + + if (entry == NULL) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return p; + } + + while ((p != NULL) && _dhd_wlfc_is_destination_closed(ctx, entry, prec)) { + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (WLFC_PKTQ_PENQ(&entry->psq, (prec << 1), p) == NULL) { + WLFC_DBGMESG(("D")); + /* dhd_txcomplete(ctx->dhdp, p, FALSE); */ + PKTFREE(ctx->osh, p, TRUE); + ctx->stats.delayq_full_error++; + } + /* + A packet has been pushed, update traffic availability bitmap, + if applicable + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + p = pktq_pdeq(&ctx->SENDQ, prec); + if (p == NULL) + break; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + + if ((entry == NULL) || (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(p))))) { + return p; + } + } + if (p) { + if (entry->requested_packet == 0) { + if (entry->requested_credit > 0) + entry->requested_credit--; + } + else { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + } + if (entry->state == WLFC_STATE_CLOSE) + *ac_credit_spent = 0; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif + } + if (p) + _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); + } + return p; +} + +static int +_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea) +{ + int rc = BCME_OK; + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + entry->occupied = 1; + entry->state = WLFC_STATE_OPEN; + entry->requested_credit = 0; + entry->interface_id = ifid; + entry->iftype = iftype; + entry->ac_bitmap = 0xff; /* update this when handling APSD */ + /* for an interface entry we may not care about the MAC address */ + if (ea != NULL) + memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + } + else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) { + entry->occupied = 0; + entry->state = WLFC_STATE_CLOSE; + entry->requested_credit = 0; + /* enable after packets are queued-deqeued properly. + pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0); + */ + } + return rc; +} + +int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac) +{ + int lender_ac; + int rc = BCME_ERROR; + + if (ctx == NULL || available_credit_map == 0) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) { + if ((available_credit_map && (1 << lender_ac)) && + (ctx->FIFO_credit[lender_ac] > 0)) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = BCME_OK; + break; + } + } + + return rc; +} + +int +dhd_wlfc_interface_entry_update(void* state, + ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + wlfc_mac_descriptor_t* entry; + + if (ifid >= WLFC_MAX_IFNUM) + return BCME_BADARG; + + entry = &ctx->destination_entries.interfaces[ifid]; + return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea); +} + +int +dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + /* update the AC FIFO credit map */ + ctx->FIFO_credit[0] = credits[0]; + ctx->FIFO_credit[1] = credits[1]; + ctx->FIFO_credit[2] = credits[2]; + ctx->FIFO_credit[3] = credits[3]; + /* credit for bc/mc packets */ + ctx->FIFO_credit[4] = credits[4]; + /* credit for ATIM FIFO is not used yet. */ + ctx->FIFO_credit[5] = 0; + return BCME_OK; +} + +int +dhd_wlfc_enque_sendq(void* state, int prec, void* p) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + if ((state == NULL) || + /* prec = AC_COUNT is used for bc/mc queue */ + (prec > AC_COUNT) || + (p == NULL)) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + if (FALSE == dhd_prec_enq(ctx->dhdp, &ctx->SENDQ, p, prec)) { + ctx->stats.sendq_full_error++; + /* + WLFC_DBGMESG(("Error: %s():%d, qlen:%d\n", + __FUNCTION__, __LINE__, ctx->SENDQ.len)); + */ + WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, prec); + WLFC_DBGMESG(("Q")); + PKTFREE(ctx->osh, p, TRUE); + return BCME_ERROR; + } + ctx->stats.pktin++; + /* _dhd_wlfc_flow_control_check(ctx, &ctx->SENDQ, DHD_PKTTAG_IF(PKTTAG(p))); */ + return BCME_OK; +} + +int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p, + commit_info->needs_hdr, &hslot); + + if (rc == BCME_OK) + rc = fcommit(commit_ctx, commit_info->p); + else + ctx->stats.generic_error++; + + if (rc == BCME_OK) { + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent) { + ctx->stats.sendq_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + } + else { + /* + bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + */ + rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, + (commit_info->pkt_type), hslot); + if (rc != BCME_OK) + ctx->stats.rollback_failed++; + + rc = BCME_ERROR; + } + + return rc; +} + +int +dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx) +{ + int ac; + int credit; + int rc; + dhd_wlfc_commit_info_t commit_info; + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + int credit_count = 0; + int bus_retry_count = 0; + uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */ + + if ((state == NULL) || + (fcommit == NULL)) { + WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + memset(&commit_info, 0, sizeof(commit_info)); + + /* + Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable + + -NOTE: + If the bus between the host and firmware is overwhelmed by the + traffic from host, it is possible that higher priority traffic + starves the lower priority queue. If that occurs often, we may + have to employ weighted round-robin or ucode scheme to avoid + low priority packet starvation. + */ + + for (ac = AC_COUNT; ac >= 0; ac--) { + + int initial_credit_count = ctx->FIFO_credit[ac]; + + for (credit = 0; credit < ctx->FIFO_credit[ac];) { + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry)); + + if (commit_info.p == NULL) + break; + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + credit++; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + ctx->FIFO_credit[ac] -= credit; + return rc; + } + } + } + + ctx->FIFO_credit[ac] -= credit; + + /* packets from SENDQ are fresh and they'd need header and have no MAC entry */ + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.pkt_type = eWLFC_PKTTYPE_NEW; + + for (credit = 0; credit < ctx->FIFO_credit[ac];) { + commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac, + &(commit_info.ac_fifo_credit_spent)); + if (commit_info.p == NULL) + break; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + credit++; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + ctx->FIFO_credit[ac] -= credit; + return rc; + } + } + } + + ctx->FIFO_credit[ac] -= credit; + + /* If no credits were used, the queue is idle and can be re-used + Note that resv credits cannot be borrowed + */ + if (initial_credit_count == ctx->FIFO_credit[ac]) { + ac_available |= (1 << ac); + credit_count += ctx->FIFO_credit[ac]; + } + } + + /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD + + Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to: + a) ignore BC/MC for deferring borrow + b) ignore AC_BE being available along with other ACs + (this should happen only for pure BC/MC traffic) + + i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and + we do not care if AC_BE and BC/MC are available or not + */ + if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) { + + if (ctx->allow_credit_borrow) { + ac = 1; /* Set ac to AC_BE and borrow credits */ + } + else { + int delta; + int curr_t = OSL_SYSUPTIME(); + + if (curr_t > ctx->borrow_defer_timestamp) + delta = curr_t - ctx->borrow_defer_timestamp; + else + delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* Reset borrow but defer to next iteration (defensive borrowing) */ + ctx->allow_credit_borrow = TRUE; + ctx->borrow_defer_timestamp = 0; + } + return BCME_OK; + } + } + else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = FALSE; + ctx->borrow_defer_timestamp = OSL_SYSUPTIME(); + return BCME_OK; + } + + /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE) + Generically use "ac" only in case we extend to all ACs in future + */ + for (; (credit_count > 0);) { + + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry)); + if (commit_info.p == NULL) + break; + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac); + credit_count--; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + return rc; + } + } + } + + /* packets from SENDQ are fresh and they'd need header and have no MAC entry */ + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.pkt_type = eWLFC_PKTTYPE_NEW; + + for (; (credit_count > 0);) { + + commit_info.p = _dhd_wlfc_deque_sendq(ctx, ac, + &(commit_info.ac_fifo_credit_spent)); + if (commit_info.p == NULL) + break; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + fcommit, commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + if (commit_info.ac_fifo_credit_spent) { + (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac); + credit_count--; + } + } + else { + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("dhd_wlfc_commit_packets(): bus error\n")); + return rc; + } + } + } + + return BCME_OK; +} + +static uint8 +dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea) +{ + wlfc_mac_descriptor_t* table = + ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; + uint8 table_index; + + if (ea != NULL) { + for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) { + if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) && + table[table_index].occupied) + return table_index; + } + } + return WLFC_MAC_DESC_ID_INVALID; +} + +void +dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + void* p; + int fifo_id; + + if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.signal_only_pkts_freed++; +#endif + /* is this a signal-only packet? */ + PKTFREE(wlfc->osh, txp, TRUE); + return; + } + if (!success) { + WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", + __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); + dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG + (PKTTAG(txp))), &p, 1); + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, txp, FALSE); + + /* return the credit, if necessary */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) { + int lender, credit_returned = 0; /* Note that borrower is fifo_id */ + + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp)); + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + PKTFREE(wlfc->osh, txp, TRUE); + } + return; +} + +/* Handle discard or suppress indication */ +static int +dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info) +{ + uint8 status_flag; + uint32 status; + int ret; + int remove_from_hanger = 1; + void* pktbuf; + uint8 fifo_id; + wlfc_mac_descriptor_t* entry = NULL; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + + memcpy(&status, pkt_info, sizeof(uint32)); + status_flag = WL_TXSTATUS_GET_FLAGS(status); + wlfc->stats.txstatus_in++; + + if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { + wlfc->stats.pkt_freed++; + } + + else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + wlfc->stats.d11_suppress++; + remove_from_hanger = 0; + } + + else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + wlfc->stats.wl_suppress++; + remove_from_hanger = 0; + } + + else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts++; + } + + ret = dhd_wlfc_hanger_poppkt(wlfc->hanger, + WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger); + if (ret != BCME_OK) { + /* do something */ + return ret; + } + + if (!remove_from_hanger) { + /* this packet was suppressed */ + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + entry->generation = WLFC_PKTID_GEN(status); + } + +#ifdef PROP_TXSTATUS_DEBUG + { + uint32 new_t = OSL_SYSUPTIME(); + uint32 old_t; + uint32 delta; + old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[ + WLFC_PKTID_HSLOT_GET(status)].push_time; + + + wlfc->stats.latency_sample_count++; + if (new_t > old_t) + delta = new_t - old_t; + else + delta = 0xffffffff + new_t - old_t; + wlfc->stats.total_status_latency += delta; + wlfc->stats.latency_most_recent = delta; + + wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta; + if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32)) + wlfc->stats.idx_delta = 0; + } +#endif /* PROP_TXSTATUS_DEBUG */ + + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + + /* pick up the implicit credit from this packet */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { + if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) { + + int lender, credit_returned = 0; /* Note that borrower is fifo_id */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + } + else { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the destination entry (for pspoll etc.) + */ + if (!entry) { + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + } + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) + entry->requested_credit++; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_acks++; +#endif + } + if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) || + (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) { + ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf); + if (ret != BCME_OK) { + /* delay q is full, drop this packet */ + dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status), + &pktbuf, 1); + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, pktbuf, FALSE); + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + } + else { + dhd_txcomplete(dhd, pktbuf, TRUE); + /* free the packet */ + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + return BCME_OK; +} + +static int +dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) +{ + int i; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.fifo_credits_back[i] += credits[i]; +#endif + /* update FIFO credits */ + if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } + else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + } + } + + return BCME_OK; +} + +static int +dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi) +{ + (void)dhd; + (void)rssi; + return BCME_OK; +} + +static int +dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + int rc; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 existing_index; + uint8 table_index; + uint8 ifid; + uint8* ea; + + WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n", + __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7], + ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"), + WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0])); + + table = wlfc->destination_entries.nodes; + table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]); + ifid = value[1]; + ea = &value[2]; + + if (type == WLFC_CTL_TYPE_MACDESC_ADD) { + existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]); + if (existing_index == WLFC_MAC_DESC_ID_INVALID) { + /* this MAC entry does not exist, create one */ + if (!table[table_index].occupied) { + table[table_index].mac_handle = value[0]; + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_ADD, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea); + } + else { + /* the space should have been empty, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + else { + /* + there is an existing entry, move it to new index + if necessary. + */ + if (existing_index != table_index) { + /* if we already have an entry, free the old one */ + table[existing_index].occupied = 0; + table[existing_index].state = WLFC_STATE_CLOSE; + table[existing_index].requested_credit = 0; + table[existing_index].interface_id = 0; + } + } + } + if (type == WLFC_CTL_TYPE_MACDESC_DEL) { + if (table[table_index].occupied) { + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_DEL, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea); + } + else { + /* the space should have been occupied, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + return BCME_OK; +} + +static int +dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle = value[0]; + int i; + + table = wlfc->destination_entries.nodes; + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + /* a fresh PS mode should wipe old ps credits? */ + desc->requested_credit = 0; + if (type == WLFC_CTL_TYPE_MAC_OPEN) { + desc->state = WLFC_STATE_OPEN; + DHD_WLFC_CTRINC_MAC_OPEN(desc); + } + else { + desc->state = WLFC_STATE_CLOSE; + DHD_WLFC_CTRINC_MAC_CLOSE(desc); + /* + Indicate to firmware if there is any traffic pending. + */ + for (i = AC_BE; i < AC_COUNT; i++) { + _dhd_wlfc_traffic_pending_check(wlfc, desc, i); + } + } + } + else { + wlfc->stats.psmode_update_failed++; + } + return BCME_OK; +} + +static int +dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 if_id = value[0]; + + if (if_id < WLFC_MAX_IFNUM) { + table = wlfc->destination_entries.interfaces; + if (table[if_id].occupied) { + if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { + table[if_id].state = WLFC_STATE_OPEN; + /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ + } + else { + table[if_id].state = WLFC_STATE_CLOSE; + /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ + } + return BCME_OK; + } + } + wlfc->stats.interface_update_failed++; + + return BCME_OK; +} + +static int +dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 credit; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + credit = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_credit = credit; + + desc->ac_bitmap = value[2]; + } + else { + wlfc->stats.credit_request_failed++; + } + return BCME_OK; +} + +static int +dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 packet_count; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + packet_count = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_packet = packet_count; + + desc->ac_bitmap = value[2]; + } + else { + wlfc->stats.packet_request_failed++; + } + return BCME_OK; +} + +static int +dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len) +{ + uint8 type, len; + uint8* value; + uint8* tmpbuf; + uint16 remainder = tlv_hdr_len; + uint16 processed = 0; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf); + if (remainder) { + while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) { + type = tmpbuf[processed]; + if (type == WLFC_CTL_TYPE_FILLER) { + remainder -= 1; + processed += 1; + continue; + } + + len = tmpbuf[processed + 1]; + value = &tmpbuf[processed + 2]; + + if (remainder < (2 + len)) + break; + + remainder -= 2 + len; + processed += 2 + len; + if (type == WLFC_CTL_TYPE_TXSTATUS) + dhd_wlfc_txstatus_update(dhd, value); + + else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) + dhd_wlfc_fifocreditback_indicate(dhd, value); + + else if (type == WLFC_CTL_TYPE_RSSI) + dhd_wlfc_rssi_indicate(dhd, value); + + else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) + dhd_wlfc_credit_request(dhd, value); + + else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) + dhd_wlfc_packet_request(dhd, value); + + else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) + dhd_wlfc_psmode_update(dhd, value, type); + + else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) + dhd_wlfc_mac_table_update(dhd, value, type); + + else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { + dhd_wlfc_interface_update(dhd, value, type); + } + } + if (remainder != 0) { + /* trouble..., something is not right */ + wlfc->stats.tlv_parse_failed++; + } + } + return BCME_OK; +} + +int +dhd_wlfc_init(dhd_pub_t *dhd) +{ + char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */ + /* enable all signals & indicate host proptxstatus logic is active */ + uint32 tlv = dhd->wlfc_enabled? + WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE : 0; + + dhd->wlfc_state = NULL; + + /* + try to enable/disable signaling by sending "tlv" iovar. if that fails, + fallback to no flow control? Print a message for now. + */ + + /* enable proptxtstatus signaling by default */ + bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { + DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n")); + } + else { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + dhd->wlfc_enabled?"enabled":"disabled", tlv)); + } + return BCME_OK; +} + +int +dhd_wlfc_enable(dhd_pub_t *dhd) +{ + int i; + athost_wl_status_info_t* wlfc; + + if (!dhd->wlfc_enabled || dhd->wlfc_state) + return BCME_OK; + + /* allocate space to track txstatus propagated from firmware */ + dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t)); + if (dhd->wlfc_state == NULL) + return BCME_NOMEM; + + /* initialize state space */ + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + memset(wlfc, 0, sizeof(athost_wl_status_info_t)); + + /* remember osh & dhdp */ + wlfc->osh = dhd->osh; + wlfc->dhdp = dhd; + + wlfc->hanger = + dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS); + if (wlfc->hanger == NULL) { + MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + return BCME_NOMEM; + } + + /* initialize all interfaces to accept traffic */ + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + wlfc->hostif_flow_state[i] = OFF; + } + + /* + create the SENDQ containing + sub-queues for all AC precedences + 1 for bc/mc traffic + */ + pktq_init(&wlfc->SENDQ, (AC_COUNT + 1), WLFC_SENDQ_LEN); + + wlfc->destination_entries.other.state = WLFC_STATE_OPEN; + /* bc/mc FIFO is always open [credit aside], i.e. b[5] */ + wlfc->destination_entries.other.ac_bitmap = 0x1f; + wlfc->destination_entries.other.interface_id = 0; + + wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + + wlfc->allow_credit_borrow = TRUE; + wlfc->borrow_defer_timestamp = 0; + + return BCME_OK; +} + +/* release all packet resources */ +void +dhd_wlfc_cleanup(dhd_pub_t *dhd) +{ + int i; + int total_entries; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_hanger_t* h; + + if (dhd->wlfc_state == NULL) + return; + + total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); + /* search all entries, include nodes as well as interfaces */ + table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; + + for (i = 0; i < total_entries; i++) { + if (table[i].occupied) { + if (table[i].psq.len) { + WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n", + __FUNCTION__, i, table[i].psq.len)); + /* release packets held in DELAYQ */ + pktq_flush(wlfc->osh, &table[i].psq, TRUE, NULL, 0); + } + table[i].occupied = 0; + } + } + /* release packets held in SENDQ */ + if (wlfc->SENDQ.len) + pktq_flush(wlfc->osh, &wlfc->SENDQ, TRUE, NULL, 0); + /* any in the hanger? */ + h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) { + PKTFREE(wlfc->osh, h->items[i].pkt, TRUE); + } + } + return; +} + +void +dhd_wlfc_deinit(dhd_pub_t *dhd) +{ + /* cleanup all psq related resources */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*) + dhd->wlfc_state; + + if (dhd->wlfc_state == NULL) + return; + +#ifdef PROP_TXSTATUS_DEBUG + { + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) { + WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n", + __FUNCTION__, i, h->items[i].pkt, + DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt)))); + } + } + } +#endif + /* delete hanger */ + dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger); + + /* free top structure */ + MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + return; +} +#endif /* PROP_TXSTATUS */ + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state) + dhd_wlfc_dump(dhdp, strbuf); +#endif +} + +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + PKTPUSH(dhd->osh, pktbuf, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(pktbuf)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf) +{ +#ifdef BDC + struct bdc_header *h; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) { + DHD_ERROR(("%s: rx data ifnum out of range (%d)\n", + __FUNCTION__, *ifidx)); + return BCME_ERROR; + } + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1) + h->dataOffset = 0; + else + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + + if (PKTLEN(dhd->osh, pktbuf) < (uint32) (h->dataOffset << 2)) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), (h->dataOffset * 4))); + return BCME_ERROR; + } + +#ifdef PROP_TXSTATUS + if (dhd->wlfc_state && + ((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode + != WLFC_FCMODE_NONE && + (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) { + /* + - parse txstatus only for packets that came from the firmware + */ + dhd_os_wlfc_block(dhd); + dhd_wlfc_parse_header_info(dhd, pktbuf, (h->dataOffset << 2)); + ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++; + dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata, + (void *)dhd->bus); + dhd_os_wlfc_unblock(dhd); + } +#endif /* PROP_TXSTATUS */ + PKTPULL(dhd->osh, pktbuf, (h->dataOffset << 2)); + return 0; +} + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + + if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: +#ifndef CONFIG_DHD_USE_STATIC_BUF + if (cdc != NULL) + MFREE(dhd->osh, cdc, sizeof(dhd_prot_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifdef PROP_TXSTATUS + dhd_wlfc_deinit(dhd); +#endif +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* No stats from dongle added yet, copy bus stats */ + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_prot_init(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) + goto done; + + +#ifdef PROP_TXSTATUS + ret = dhd_wlfc_init(dhd); +#endif + +#if defined(WL_CFG80211) + if (dhd_download_fw_on_driverload) +#endif /* defined(WL_CFG80211) */ + ret = dhd_preinit_ioctls(dhd); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + +done: + return ret; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ + /* Nothing to do for CDC */ +} diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c new file mode 100644 index 0000000000000..351c372ffa817 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c @@ -0,0 +1,660 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + */ + +#include + +#include +#include +#include +#include +extern struct wl_priv *wlcfg_drv_priv; +static int dhd_dongle_up = FALSE; + +#include +#include +#include +#include +#include + +static s32 wl_dongle_up(struct net_device *ndev, u32 up); + +/** + * Function implementations + */ + +s32 dhd_cfg80211_init(struct wl_priv *wl) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_deinit(struct wl_priv *wl) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_get_opmode(struct wl_priv *wl) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); + return dhd->op_mode; +} + +s32 dhd_cfg80211_down(struct wl_priv *wl) +{ + dhd_dongle_up = FALSE; + return 0; +} + +/* + * dhd_cfg80211_set_p2p_info : gets called when GO or GC created + */ +s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); + int bcn_timeout = DHD_BEACON_TIMEOUT_HIGH; + char iovbuf[30]; + + dhd->op_mode |= val; + WL_ERR(("Set : op_mode=%d\n", dhd->op_mode)); + +#ifdef ARP_OFFLOAD_SUPPORT + /* IF P2P is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, false); +#endif /* ARP_OFFLOAD_SUPPORT */ + /* diable all filtering in p2p mode */ + dhd_os_set_packet_filter(dhd, 0); + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + + + return 0; +} + +/* + * dhd_cfg80211_clean_p2p_info : gets called when GO or GC terminated + */ +s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); + int bcn_timeout = DHD_BEACON_TIMEOUT_NORMAL; + char iovbuf[30]; + + dhd->op_mode &= ~CONCURENT_MASK; + WL_ERR(("Clean : op_mode=%d\n", dhd->op_mode)); + +#ifdef ARP_OFFLOAD_SUPPORT + /* IF P2P is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, true); +#endif /* ARP_OFFLOAD_SUPPORT */ + dhd_os_set_packet_filter(dhd, 1); + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + + return 0; +} + +static s32 wl_dongle_up(struct net_device *ndev, u32 up) +{ + s32 err = 0; + + err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + } + return err; +} + +s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock) +{ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (dhd_dongle_up) { + WL_ERR(("Dongle is already up\n")); + return err; + } + + ndev = wl_to_prmry_ndev(wl); + + if (need_lock) + rtnl_lock(); + + err = wl_dongle_up(ndev, 0); + if (unlikely(err)) { + WL_ERR(("wl_dongle_up failed\n")); + goto default_conf_out; + } + dhd_dongle_up = true; + +default_conf_out: + if (need_lock) + rtnl_unlock(); + return err; + +} + + +/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */ +#define COEX_DHCP + +#if defined(COEX_DHCP) + +/* use New SCO/eSCO smart YG suppression */ +#define BT_DHCP_eSCO_FIX +/* this flag boost wifi pkt priority to max, caution: -not fair to sco */ +#define BT_DHCP_USE_FLAGS +/* T1 start SCO/ESCo priority suppression */ +#define BT_DHCP_OPPR_WIN_TIME 2500 +/* T2 turn off SCO/SCO supperesion is (timeout) */ +#define BT_DHCP_FLAG_FORCE_TIME 5500 + +enum wl_cfg80211_btcoex_status { + BT_DHCP_IDLE, + BT_DHCP_START, + BT_DHCP_OPPR_WIN, + BT_DHCP_FLAG_FORCE_TIMEOUT +}; + +/* + * get named driver variable to uint register value and return error indication + * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value) + */ +static int +dev_wlc_intvar_get_reg(struct net_device *dev, char *name, + uint reg, int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + bcm_mkiovar(name, (char *)(®), sizeof(reg), + (char *)(&var), sizeof(var.buf)); + error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false); + + *retval = dtoh32(var.val); + return (error); +} + +static int +dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf_local[1024]; +#else + static char ioctlbuf_local[1024]; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local)); + + return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true)); +} +/* +get named driver variable to uint register value and return error indication +calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value) +*/ +static int +dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + +static bool btcoex_is_sco_active(struct net_device *dev) +{ + int ioc_res = 0; + bool res = FALSE; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); + + WL_TRACE(("%s, sample[%d], btc params: 27:%x\n", + __FUNCTION__, i, param27)); + + if (ioc_res < 0) { + WL_ERR(("%s ioc read btc params error\n", __FUNCTION__)); + break; + } + + if ((param27 & 0x6) == 2) { /* count both sco & esco */ + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_TRACE(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n", + __FUNCTION__, sco_id_cnt, i)); + res = TRUE; + break; + } + + msleep(5); + } + + return res; +} + +#if defined(BT_DHCP_eSCO_FIX) +/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */ +static int set_btc_esco_params(struct net_device *dev, bool trump_sco) +{ + static bool saved_status = FALSE; + + char buf_reg50va_dhcp_on[8] = + { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 }; + char buf_reg51va_dhcp_on[8] = + { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg64va_dhcp_on[8] = + { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg65va_dhcp_on[8] = + { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg71va_dhcp_on[8] = + { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + uint32 regaddr; + static uint32 saved_reg50; + static uint32 saved_reg51; + static uint32 saved_reg64; + static uint32 saved_reg65; + static uint32 saved_reg71; + + if (trump_sco) { + /* this should reduce eSCO agressive retransmit + * w/o breaking it + */ + + /* 1st save current */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) { + saved_status = TRUE; + WL_TRACE(("%s saved bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + __FUNCTION__, saved_reg50, saved_reg51, + saved_reg64, saved_reg65, saved_reg71)); + } else { + WL_ERR((":%s: save btc_params failed\n", + __FUNCTION__)); + saved_status = FALSE; + return -1; + } + + WL_TRACE(("override with [50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + *(u32 *)(buf_reg50va_dhcp_on+4), + *(u32 *)(buf_reg51va_dhcp_on+4), + *(u32 *)(buf_reg64va_dhcp_on+4), + *(u32 *)(buf_reg65va_dhcp_on+4), + *(u32 *)(buf_reg71va_dhcp_on+4))); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg50va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg51va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg64va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg65va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg71va_dhcp_on[0], 8); + + saved_status = TRUE; + } else if (saved_status) { + /* restore previously saved bt params */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + + regaddr = 50; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg50); + regaddr = 51; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg51); + regaddr = 64; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg64); + regaddr = 65; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg65); + regaddr = 71; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg71); + + WL_TRACE(("restore bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, saved_reg64, + saved_reg65, saved_reg71)); + + saved_status = FALSE; + } else { + WL_ERR((":%s att to restore not saved BTCOEX params\n", + __FUNCTION__)); + return -1; + } + return 0; +} +#endif /* BT_DHCP_eSCO_FIX */ + +static void +wl_cfg80211_bt_setflag(struct net_device *dev, bool set) +{ +#if defined(BT_DHCP_USE_FLAGS) + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + + +#if defined(BT_DHCP_eSCO_FIX) + /* set = 1, save & turn on 0 - off & restore prev settings */ + set_btc_esco_params(dev, set); +#endif + +#if defined(BT_DHCP_USE_FLAGS) + WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set)); + if (set == TRUE) + /* Forcing bt_flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], + sizeof(buf_flag7_dhcp_on)); + else + /* Restoring default bt flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], + sizeof(buf_flag7_default)); +#endif +} + +static void wl_cfg80211_bt_timerfunc(ulong data) +{ + struct btcoex_info *bt_local = (struct btcoex_info *)data; + WL_TRACE(("%s\n", __FUNCTION__)); + bt_local->timer_on = 0; + schedule_work(&bt_local->work); +} + +static void wl_cfg80211_bt_handler(struct work_struct *work) +{ + struct btcoex_info *btcx_inf; + + btcx_inf = container_of(work, struct btcoex_info, work); + + if (btcx_inf->timer_on) { + btcx_inf->timer_on = 0; + del_timer_sync(&btcx_inf->timer); + } + + switch (btcx_inf->bt_state) { + case BT_DHCP_START: + /* DHCP started + * provide OPPORTUNITY window to get DHCP address + */ + WL_TRACE(("%s bt_dhcp stm: started \n", + __FUNCTION__)); + btcx_inf->bt_state = BT_DHCP_OPPR_WIN; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_OPPR_WIN: + if (btcx_inf->dhcp_done) { + WL_TRACE(("%s DHCP Done before T1 expiration\n", + __FUNCTION__)); + goto btc_coex_idle; + } + + /* DHCP is not over yet, start lowering BT priority + * enforce btc_params + flags if necessary + */ + WL_TRACE(("%s DHCP T1:%d expired\n", __FUNCTION__, + BT_DHCP_OPPR_WIN_TIME)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE); + btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + if (btcx_inf->dhcp_done) { + WL_TRACE(("%s DHCP Done before T2 expiration\n", + __FUNCTION__)); + } else { + /* Noo dhcp during T1+T2, restore BT priority */ + WL_TRACE(("%s DHCP wait interval T2:%d" + "msec expired\n", __FUNCTION__, + BT_DHCP_FLAG_FORCE_TIME)); + } + + /* Restoring default bt priority */ + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); +btc_coex_idle: + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + + default: + WL_ERR(("%s error g_status=%d !!!\n", __FUNCTION__, + btcx_inf->bt_state)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + } + + net_os_wake_unlock(btcx_inf->dev); +} + +int wl_cfg80211_btcoex_init(struct wl_priv *wl) +{ + struct btcoex_info *btco_inf = NULL; + + btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL); + if (!btco_inf) + return -ENOMEM; + + btco_inf->bt_state = BT_DHCP_IDLE; + btco_inf->ts_dhcp_start = 0; + btco_inf->ts_dhcp_ok = 0; + /* Set up timer for BT */ + btco_inf->timer_ms = 10; + init_timer(&btco_inf->timer); + btco_inf->timer.data = (ulong)btco_inf; + btco_inf->timer.function = wl_cfg80211_bt_timerfunc; + + btco_inf->dev = wl->wdev->netdev; + + INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler); + + wl->btcoex_info = btco_inf; + return 0; +} + +void wl_cfg80211_btcoex_deinit(struct wl_priv *wl) +{ + if (!wl->btcoex_info) + return; + + if (!wl->btcoex_info->timer_on) { + wl->btcoex_info->timer_on = 0; + del_timer_sync(&wl->btcoex_info->timer); + } + + cancel_work_sync(&wl->btcoex_info->work); + + kfree(wl->btcoex_info); + wl->btcoex_info = NULL; +} + +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command) +{ + + struct wl_priv *wl = wlcfg_drv_priv; + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + +#ifdef COEX_DHCP + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; + struct btcoex_info *btco_inf = wl->btcoex_info; +#endif /* COEX_DHCP */ + + /* Figure out powermode 1 or o command */ + strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + /* Retrieve and saved orig regs value */ + if ((saved_status == FALSE) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_TRACE(("Saved 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + + /* Disable PM mode during dhpc session */ + + /* Disable PM mode during dhpc session */ +#ifdef COEX_DHCP + /* Start BT timer only for SCO connection */ + if (btcoex_is_sco_active(dev)) { + /* btc_params 66 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg66va_dhcp_on[0], + sizeof(buf_reg66va_dhcp_on)); + /* btc_params 41 0x33 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg41va_dhcp_on[0], + sizeof(buf_reg41va_dhcp_on)); + /* btc_params 68 0x190 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg68va_dhcp_on[0], + sizeof(buf_reg68va_dhcp_on)); + saved_status = TRUE; + + btco_inf->bt_state = BT_DHCP_START; + btco_inf->timer_on = 1; + mod_timer(&btco_inf->timer, btco_inf->timer.expires); + WL_TRACE(("%s enable BT DHCP Timer\n", + __FUNCTION__)); + } +#endif /* COEX_DHCP */ + } + else if (saved_status == TRUE) { + WL_ERR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__)); + } + } + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { + + + /* Restoring PM mode */ + +#ifdef COEX_DHCP + /* Stop any bt timer because DHCP session is done */ + WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__)); + if (btco_inf->timer_on) { + btco_inf->timer_on = 0; + del_timer_sync(&btco_inf->timer); + + if (btco_inf->bt_state != BT_DHCP_IDLE) { + /* need to restore original btc flags & extra btc params */ + WL_TRACE(("%s bt->bt_state:%d\n", + __FUNCTION__, btco_inf->bt_state)); + /* wake up btcoex thread to restore btlags+params */ + schedule_work(&btco_inf->work); + } + } + + /* Restoring btc_flag paramter anyway */ + if (saved_status == TRUE) + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); +#endif /* COEX_DHCP */ + + /* Restore original values */ + if (saved_status == TRUE) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg68); + + WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + } + saved_status = FALSE; + + } + else { + WL_ERR(("%s Unkwown yet power setting, ignored\n", + __FUNCTION__)); + } + + snprintf(command, 3, "OK"); + + return (strlen("OK")); +} +#endif diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h new file mode 100644 index 0000000000000..ced46dbdb96c2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h @@ -0,0 +1,45 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + */ + + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include + +s32 dhd_cfg80211_init(struct wl_priv *wl); +s32 dhd_cfg80211_deinit(struct wl_priv *wl); +s32 dhd_cfg80211_get_opmode(struct wl_priv *wl); +s32 dhd_cfg80211_down(struct wl_priv *wl); +s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val); +s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl); +s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock); + +int wl_cfg80211_btcoex_init(struct wl_priv *wl); +void wl_cfg80211_btcoex_deinit(struct wl_priv *wl); + +#endif /* __DHD_CFG80211__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c new file mode 100644 index 0000000000000..d5af27f40b76a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -0,0 +1,2451 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_common.c 331276 2012-05-04 08:05:57Z $ + */ +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#ifdef WL_CFG80211 +#include +#endif +#include +#include +#ifdef SET_RANDOM_MAC_SOFTAP +#include +#include +#endif + +#ifdef PROP_TXSTATUS +#include +#include +#endif + + +#ifdef WLMEDIA_HTSF +extern void htsf_update(struct dhd_info *dhd, void *data); +#endif +int dhd_msg_level = DHD_ERROR_VAL; + + +#include + +char fw_path[MOD_PARAM_PATHLEN]; +char nv_path[MOD_PARAM_PATHLEN]; + +#ifdef SOFTAP +char fw_path2[MOD_PARAM_PATHLEN]; +extern bool softap_enabled; +#endif + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +extern int dhd_iscan_request(void * dhdp, uint16 action); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_iscan_in_progress(void *h); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +bool ap_cfg_running = FALSE; +bool ap_fw_loaded = FALSE; + + +#ifdef DHD_DEBUG +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on " + __DATE__ " at " __TIME__; +#else +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; +#endif + +void dhd_set_timer(void *bus, uint wdtick); + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, + IOV_HCI_CMD, /* HCI command */ + IOV_HCI_ACL_DATA, /* HCI data packet */ +#if defined(DHD_DEBUG) + IOV_CONS, + IOV_DCONSOLE_POLL, +#endif /* defined(DHD_DEBUG) */ +#ifdef PROP_TXSTATUS + IOV_PROPTXSTATUS_ENABLE, + IOV_PROPTXSTATUS_MODE, +#endif + IOV_BUS_TYPE, +#ifdef WLMEDIA_HTSF + IOV_WLPKTDLYSTAT_SZ, +#endif + IOV_CHANGEMTU, + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN }, + {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 }, + {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 }, + {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, +#ifdef DHD_DEBUG + {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 }, + {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 }, +#endif + {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, + {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, + {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0}, + {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0}, +#ifdef PROP_TXSTATUS + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 }, + /* + set the proptxtstatus operation mode: + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 }, +#endif + {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0}, +#ifdef WLMEDIA_HTSF + {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 }, +#endif + {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +struct dhd_cmn * +dhd_common_init(osl_t *osh) +{ + dhd_cmn_t *cmn; + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + /* Allocate private bus interface state */ + if (!(cmn = MALLOC(osh, sizeof(dhd_cmn_t)))) { + DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__)); + return NULL; + } + memset(cmn, 0, sizeof(dhd_cmn_t)); + cmn->osh = osh; + +#ifdef CONFIG_BCMDHD_FW_PATH + bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1); +#else /* CONFIG_BCMDHD_FW_PATH */ + fw_path[0] = '\0'; +#endif /* CONFIG_BCMDHD_FW_PATH */ +#ifdef CONFIG_BCMDHD_NVRAM_PATH + bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1); +#else /* CONFIG_BCMDHD_NVRAM_PATH */ + nv_path[0] = '\0'; +#endif /* CONFIG_BCMDHD_NVRAM_PATH */ +#ifdef SOFTAP + fw_path2[0] = '\0'; +#endif + return cmn; +} + +void +dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn) +{ + osl_t *osh; + dhd_cmn_t *cmn; + + if (dhd_pub != NULL) + cmn = dhd_pub->cmn; + else + cmn = sa_cmn; + + if (!cmn) + return; + + osh = cmn->osh; + + if (dhd_pub != NULL) + dhd_pub->cmn = NULL; + MFREE(osh, cmn, sizeof(dhd_cmn_t)); +} + +static int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n", + dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n", + dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); + bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "\n"); + + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); + + return (!strbuf->size ? BCME_BUFTOOSHORT : 0); +} + +int +dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex) +{ + wl_ioctl_t ioc; + + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len); +} + + +int +dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len) +{ + int ret; + + dhd_os_proto_block(dhd_pub); + + ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len); + if (ret) + dhd_os_check_hang(dhd_pub, ifindex, ret); + + dhd_os_proto_unblock(dhd_pub); + return ret; +} + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + bcm_strncpy_s((char*)arg, len, dhd_version, len); + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + dhd_msg_level = int_val; +#ifdef WL_CFG80211 + /* Enable DHD and WL logs in oneshot */ + if (dhd_msg_level & DHD_WL_VAL) + wl_cfg80211_enable_trace(dhd_msg_level); +#endif + break; + case IOV_GVAL(IOV_BCMERRORSTR): + bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + bcmerror = dhd_dump(dhd_pub, arg, len); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_console_ms = (uint)int_val; + break; + + case IOV_SVAL(IOV_CONS): + if (len > 0) + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); + break; +#endif /* DHD_DEBUG */ + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); +#ifdef PROP_TXSTATUS + /* clear proptxstatus related counters */ + if (dhd_pub->wlfc_state) { + athost_wl_status_info_t *wlfc = + (athost_wl_status_info_t*)dhd_pub->wlfc_state; + wlfc_hanger_t* hanger; + + memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t)); + + hanger = (wlfc_hanger_t*)wlfc->hanger; + hanger->pushed = 0; + hanger->popped = 0; + hanger->failed_slotfind = 0; + hanger->failed_to_pop = 0; + hanger->failed_to_push = 0; + } +#endif /* PROP_TXSTATUS */ + break; + + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + + case IOV_SVAL(IOV_HCI_CMD): { + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg; + + /* sanity check: command preamble present */ + if (len < HCI_CMD_PREAMBLE_SIZE) + return BCME_BUFTOOSHORT; + + /* sanity check: command parameters are present */ + if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen)) + return BCME_BUFTOOSHORT; + + dhd_bta_docmd(dhd_pub, cmd, len); + break; + } + + case IOV_SVAL(IOV_HCI_ACL_DATA): { + amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg; + + /* sanity check: HCI header present */ + if (len < HCI_ACL_DATA_PREAMBLE_SIZE) + return BCME_BUFTOOSHORT; + + /* sanity check: ACL data is present */ + if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen)) + return BCME_BUFTOOSHORT; + + dhd_bta_tx_hcidata(dhd_pub, ACL_data, len); + break; + } + +#ifdef PROP_TXSTATUS + case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): + int_val = dhd_pub->wlfc_enabled? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): + dhd_pub->wlfc_enabled = int_val? 1 : 0; + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_MODE): { + athost_wl_status_info_t *wlfc = + (athost_wl_status_info_t*)dhd_pub->wlfc_state; + int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0; + bcopy(&int_val, arg, val_size); + break; + } + + case IOV_SVAL(IOV_PROPTXSTATUS_MODE): + if (dhd_pub->wlfc_state) { + athost_wl_status_info_t *wlfc = + (athost_wl_status_info_t*)dhd_pub->wlfc_state; + wlfc->proptxstatus_mode = int_val & 0xff; + } + break; +#endif /* PROP_TXSTATUS */ + + case IOV_GVAL(IOV_BUS_TYPE): + /* The dhd application queries the driver to check if its usb or sdio. */ +#ifdef BCMDHDUSB + int_val = BUS_TYPE_USB; +#endif + int_val = BUS_TYPE_SDIO; + bcopy(&int_val, arg, val_size); + break; + + +#ifdef WLMEDIA_HTSF + case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ): + int_val = dhd_pub->htsfdlystat_sz; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ): + dhd_pub->htsfdlystat_sz = int_val & 0xff; + printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz); + break; +#endif + case IOV_SVAL(IOV_CHANGEMTU): + int_val &= 0xffff; + bcmerror = dhd_change_mtu(dhd_pub, int_val, 0); + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec || eprec < 0) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + ASSERT(p); + + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + ASSERT(p); + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) +{ + int bcmerror = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) { + return BCME_BADARG; + } + + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = -BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: { + char *arg; + uint arglen; + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + break; + } + + /* account for the NUL terminator */ + arg++, arglen--; + + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + else + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET); + if (bcmerror != BCME_UNSUPPORTED) + break; + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + else + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + if (bcmerror != BCME_UNSUPPORTED) + break; + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#ifdef SHOW_EVENTS +static void +wl_show_host_event(wl_event_msg_t *event, void *event_data) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + const char *auth_str; + const char *event_name; + uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + uint event_type, flags, auth_type, datalen; + + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + auth_type = ntoh32(event->auth_type); + datalen = ntoh32(event->datalen); + + /* debug dump of event messages */ + sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x", + (uchar)event->addr.octet[0]&0xff, + (uchar)event->addr.octet[1]&0xff, + (uchar)event->addr.octet[2]&0xff, + (uchar)event->addr.octet[3]&0xff, + (uchar)event->addr.octet[4]&0xff, + (uchar)event->addr.octet[5]&0xff); + + event_name = "UNKNOWN"; + for (i = 0; i < (uint)bcmevent_names_size; i++) + if (bcmevent_names[i].event == event_type) + event_name = bcmevent_names[i].name; + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", + event_name, eabuf, (int)reason)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else { + sprintf(err_msg, "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", + event_name, eabuf, auth_str, (int)reason)); + } + + break; + + case WLC_E_JOIN: + case WLC_E_ROAM: + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN")); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf)); + break; + + case WLC_E_SCAN_COMPLETE: + case WLC_E_ASSOC_REQ_IE: + case WLC_E_ASSOC_RESP_IE: + case WLC_E_PMKID_CACHE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_COMPLETE: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; +#endif /* WIFI_ACT_FRAME */ + + case WLC_E_TRACE: { + static uint32 seqnum_prev = 0; + msgtrace_hdr_t hdr; + uint32 nblost; + char *s, *p; + + buf = (uchar *) event_data; + memcpy(&hdr, buf, MSGTRACE_HDRLEN); + + if (hdr.version != MSGTRACE_VERSION) { + printf("\nMACEVENT: %s [unsupported version --> " + "dhd version:%d dongle version:%d]\n", + event_name, MSGTRACE_VERSION, hdr.version); + /* Reset datalen to avoid display below */ + datalen = 0; + break; + } + + /* There are 2 bytes available at the end of data */ + buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; + + if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { + printf("\nWLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf)); + } + + nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; + if (nblost > 0) { + printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n", + ntoh32(hdr.seqnum), nblost); + } + seqnum_prev = ntoh32(hdr.seqnum); + + /* Display the trace buffer. Advance from \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + p = (char *)&buf[MSGTRACE_HDRLEN]; + while ((s = strstr(p, "\n")) != NULL) { + *s = '\0'; + printf("%s\n", p); + p = s+1; + } + printf("%s\n", p); + + /* Reset datalen to avoid display below */ + datalen = 0; + break; + } + + + case WLC_E_RSSI: + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + break; + + default: + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data */ + if (datalen) { + buf = (uchar *) event_data; + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) + DHD_EVENT((" 0x%02x ", *buf++)); + DHD_EVENT(("\n")); + } +} +#endif /* SHOW_EVENTS */ + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, + wl_event_msg_t *event, void **data_ptr) +{ + /* check whether packet is a BRCM event pkt */ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + uint8 *event_data; + uint32 type, status, reason, datalen; + uint16 flags; + int evlen; + + if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); + return (BCME_ERROR); + } + + /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ + if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { + DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); + return (BCME_ERROR); + } + + *data_ptr = &pvt_data[1]; + event_data = *data_ptr; + + /* memcpy since BRCM event pkt may be unaligned. */ + memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); + + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + datalen = ntoh32_ua((void *)&event->datalen); + evlen = datalen + sizeof(bcm_event_t); + + DHD_TRACE(("RX: event_type:%d flags:%d status:%d reason:%d \n", + type, flags, status, reason)); + + switch (type) { +#ifdef PROP_TXSTATUS + case WLC_E_FIFO_CREDIT_MAP: + dhd_wlfc_event(dhd_pub->info); + dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data); + WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): " + "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1], + event_data[2], + event_data[3], event_data[4], event_data[5])); + break; +#endif + + case WLC_E_IF: + { + dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data; +#ifdef PROP_TXSTATUS + { + uint8* ea = pvt_data->eth.ether_dhost; + WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, " + "[%02x:%02x:%02x:%02x:%02x:%02x]\n", + ifevent->ifidx, + ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->is_AP == 0) ? "STA":"AP "), + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5])); + (void)ea; + + dhd_wlfc_interface_event(dhd_pub->info, + ((ifevent->action == WLC_E_IF_ADD) ? + eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL), + ifevent->ifidx, ifevent->is_AP, ea); + + /* dhd already has created an interface by default, for 0 */ + if (ifevent->ifidx == 0) + break; + } +#endif /* PROP_TXSTATUS */ + +#ifdef WL_CFG80211 + if (wl_cfg80211_is_progress_ifchange()) { + DHD_ERROR(("%s: ifidx %d for %s action %d\n", + __FUNCTION__, ifevent->ifidx, + event->ifname, ifevent->action)); + if (ifevent->action == WLC_E_IF_ADD) + wl_cfg80211_notify_ifchange(); + return (BCME_OK); + } +#endif /* WL_CFG80211 */ + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) { + if (ifevent->action == WLC_E_IF_ADD) { + if (dhd_add_if(dhd_pub->info, ifevent->ifidx, + NULL, event->ifname, + event->addr.octet, + ifevent->flags, ifevent->bssidx)) { + DHD_ERROR(("%s: dhd_add_if failed!!" + " ifidx: %d for %s\n", + __FUNCTION__, + ifevent->ifidx, + event->ifname)); + return (BCME_ERROR); + } + } + else + dhd_del_if(dhd_pub->info, ifevent->ifidx); + } else { +#ifndef PROP_TXSTATUS + DHD_ERROR(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); +#endif /* !PROP_TXSTATUS */ + } + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + break; + + +#ifdef WLMEDIA_HTSF + case WLC_E_HTSFSYNC: + htsf_update(dhd_pub->info, event_data); + break; +#endif /* WLMEDIA_HTSF */ + case WLC_E_NDIS_LINK: { + uint32 temp = hton32(WLC_E_LINK); + + memcpy((void *)(&pvt_data->event.event_type), &temp, + sizeof(pvt_data->event.event_type)); + } + /* These are what external supplicant/authenticator wants */ + /* fall through */ + case WLC_E_LINK: + case WLC_E_DEAUTH: + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + /* fall through */ + default: + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + + /* put it back to WLC_E_NDIS_LINK */ + if (type == WLC_E_NDIS_LINK) { + uint32 temp; + + temp = ntoh32_ua((void *)&event->event_type); + DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp)); + + temp = ntoh32(WLC_E_NDIS_LINK); + memcpy((void *)(&pvt_data->event.event_type), &temp, + sizeof(pvt_data->event.event_type)); + } + break; + } + +#ifdef SHOW_EVENTS + wl_show_host_event(event, (void *)event_data); +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +dhd_print_buf(void *pbuf, int len, int bytes_per_line) +{ +#ifdef DHD_DEBUG + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +#endif /* DHD_DEBUG */ +} + +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) + +/* Convert user's input in hex pattern to byte-size mask */ +static int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[128]; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + bcm_strncpy_s(buf, sizeof(buf), str, str_len); + buf[str_len] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + + /* Contorl the master mode */ + bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf)); + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc; + uint32 mask_size; + uint32 pattern_size; + char *argv[8], * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; +#define BUF_SIZE 2048 + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + bcm_strncpy_s(buf, BUF_SIZE, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + mask_size = + htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + pattern_size = + htod32(wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + ** then memcpy'ed into buffer (keep_alive_pktp) since there is no + ** guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, BUF_SIZE); +} + +/* ========================== */ +/* ==== ARP OFFLOAD SUPPORT = */ +/* ========================== */ +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + char iovbuf[32]; + int retcode; + + bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + else + DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + char iovbuf[32]; + int retcode; + + bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf)); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); +} + +void +dhd_aoe_arp_clr(dhd_pub_t *dhd) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[128]; + + if (dhd == NULL) return; + + iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0)) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_aoe_hostip_clr(dhd_pub_t *dhd) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[128]; + + if (dhd == NULL) return; + + iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr) +{ + int iov_len = 0; + char iovbuf[32]; + int retcode; + + iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf)); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0); + + if (retcode) + DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: sARP H ipaddr entry added \n", + __FUNCTION__)); +} + +int +dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen) +{ + int retcode, i; + int iov_len = 0; + uint32 *ptr32 = buf; + bool clr_bottom = FALSE; + + if (!buf) + return -1; + + iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, 0); + + if (retcode) { + DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, retcode)); + + return -1; + } + + /* clean up the buf, ascii reminder */ + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (!clr_bottom) { + if (*ptr32 == 0) + clr_bottom = TRUE; + } else { + *ptr32 = 0; + } + ptr32++; + } + + return 0; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* send up locally generated event */ +void +dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + case WLC_E_BTA_HCI_EVENT: + break; + default: + break; + } + + /* Call per-port handler. */ + dhd_sendup_event(dhdp, event, data); +} + +#ifdef SIMPLE_ISCAN + +uint iscan_thread_id = 0; +iscan_buf_t * iscan_chain = 0; + +iscan_buf_t * +dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf) +{ + iscan_buf_t *iscanbuf_alloc = 0; + iscan_buf_t *iscanbuf_head; + + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + dhd_iscan_lock(); + + iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t)); + if (iscanbuf_alloc == NULL) + goto fail; + + iscanbuf_alloc->next = NULL; + iscanbuf_head = *iscanbuf; + + DHD_ISCAN(("%s: addr of allocated node = 0x%X" + "addr of iscanbuf_head = 0x%X dhd = 0x%X\n", + __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd)); + + if (iscanbuf_head == NULL) { + *iscanbuf = iscanbuf_alloc; + DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__)); + goto fail; + } + + while (iscanbuf_head->next) + iscanbuf_head = iscanbuf_head->next; + + iscanbuf_head->next = iscanbuf_alloc; + +fail: + dhd_iscan_unlock(); + return iscanbuf_alloc; +} + +void +dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete) +{ + iscan_buf_t *iscanbuf_free = 0; + iscan_buf_t *iscanbuf_prv = 0; + iscan_buf_t *iscanbuf_cur; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + DHD_ISCAN(("%s: Entered\n", __FUNCTION__)); + + dhd_iscan_lock(); + + iscanbuf_cur = iscan_chain; + + /* If iscan_delete is null then delete the entire + * chain or else delete specific one provided + */ + if (!iscan_delete) { + while (iscanbuf_cur) { + iscanbuf_free = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + iscanbuf_free->next = 0; + MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t)); + } + iscan_chain = 0; + } else { + while (iscanbuf_cur) { + if (iscanbuf_cur == iscan_delete) + break; + iscanbuf_prv = iscanbuf_cur; + iscanbuf_cur = iscanbuf_cur->next; + } + if (iscanbuf_prv) + iscanbuf_prv->next = iscan_delete->next; + + iscan_delete->next = 0; + MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t)); + + if (!iscanbuf_prv) + iscan_chain = 0; + } + dhd_iscan_unlock(); +} + +iscan_buf_t * +dhd_iscan_result_buf(void) +{ + return iscan_chain; +} + +int +dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size) +{ + int rc = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + char *buf; + char iovar[] = "iscan"; + uint32 allocSize = 0; + wl_ioctl_t ioctl; + + if (pParams) { + allocSize = (size + strlen(iovar) + 1); + if ((allocSize < size) || (allocSize < strlen(iovar))) + { + DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n", + __FUNCTION__, allocSize, size, strlen(iovar))); + goto cleanUp; + } + buf = MALLOC(dhd->osh, allocSize); + + if (buf == NULL) + { + DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize)); + goto cleanUp; + } + ioctl.cmd = WLC_SET_VAR; + bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize); + rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize); + } + +cleanUp: + if (buf) { + MFREE(dhd->osh, buf, allocSize); + } + + return rc; +} + +static int +dhd_iscan_get_partial_result(void *dhdp, uint *scan_count) +{ + wl_iscan_results_t *list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + iscan_buf_t *iscan_cur; + int status = -1; + dhd_pub_t *dhd = dhd_bus_pub(dhdp); + int rc; + wl_ioctl_t ioctl; + + DHD_ISCAN(("%s: Enter\n", __FUNCTION__)); + + iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain); + if (!iscan_cur) { + DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__)); + dhd_iscan_free_buf(dhdp, 0); + dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT); + dhd_ind_scan_confirm(dhdp, FALSE); + goto fail; + } + + dhd_iscan_lock(); + + memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE, + iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + ioctl.cmd = WLC_GET_VAR; + ioctl.set = FALSE; + rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN); + + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + *scan_count = results->count = dtoh32(results->count); + status = dtoh32(list_buf->status); + DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status)); + + dhd_iscan_unlock(); + + if (!(*scan_count)) { + /* TODO: race condition when FLUSH already called */ + dhd_iscan_free_buf(dhdp, 0); + } +fail: + return status; +} + +#endif /* SIMPLE_ISCAN */ + +/* + * returns = TRUE if associated, FALSE if not associated + * third paramter retval can return error from error + */ +bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval) +{ + char bssid[6], zbuf[6]; + int ret; + + bzero(bssid, 6); + bzero(zbuf, 6); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0); + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (retval) + *retval = ret; + + if (ret == BCME_NOTASSOCIATED) { + DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret)); + } + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) { + /* STA is assocoated BSSID is non zero */ + + if (bss_buf) { + /* return bss if caller provided buf */ + memcpy(bss_buf, bssid, ETHER_ADDR_LEN); + } + return TRUE; + } else { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } +} + +/* Function to estimate possible DTIM_SKIP value */ +int +dhd_get_dtim_skip(dhd_pub_t *dhd) +{ + int bcn_li_dtim; + int ret = -1; + int dtim_assoc = 0; + + if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1)) + bcn_li_dtim = 3; + else + bcn_li_dtim = dhd->dtim_skip; + + /* Check if associated */ + if (dhd_is_associated(dhd, NULL, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* if assoc grab ap's dtim value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL)); + + /* if not assocated just eixt */ + if (dtim_assoc == 0) { + goto exit; + } + + /* check if sta listen interval fits into AP dtim */ + if (dtim_assoc > LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = 1; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_assoc, LISTEN_INTERVAL)); + goto exit; + } + + if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } + +exit: + return bcn_li_dtim; +} + +/* Check if HostAPD or WFD mode setup */ +bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd) +{ +#ifdef WL_CFG80211 +#ifndef WL_ENABLE_P2P_IF + /* To be back compatble with ICS MR1 release where p2p interface + * disable but wlan0 used for p2p + */ + if (((dhd->op_mode & HOSTAPD_MASK) == HOSTAPD_MASK) || + ((dhd->op_mode & WFD_MASK) == WFD_MASK)) { + return TRUE; + } + else +#else + /* concurent mode with p2p interface for wfd and wlan0 for sta */ + if (((dhd->op_mode & P2P_GO_ENABLED) == P2P_GO_ENABLED) || + ((dhd->op_mode & P2P_GC_ENABLED) == P2P_GC_ENABLED)) { + DHD_ERROR(("%s P2P enabled for mode=%d\n", __FUNCTION__, dhd->op_mode)); + return TRUE; + } + else +#endif /* WL_ENABLE_P2P_IF */ +#endif /* WL_CFG80211 */ + return FALSE; +} + +#ifdef PNO_SUPPORT +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + char iovbuf[128]; + int pfn_enabled = 0; + int iov_len = 0; + int ret; + + /* Disable pfn */ + iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) { + /* clear pfn */ + iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf)); + if (iov_len) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + iov_len, TRUE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + } + } + else { + ret = -1; + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len)); + } + } + else + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + + return ret; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled) +{ + char iovbuf[128]; + int ret = -1; + + if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + return ret; + } + + + memset(iovbuf, 0, sizeof(iovbuf)); + +#ifndef WL_SCHED_SCAN + if (dhd_check_ap_wfd_mode_set(dhd) == TRUE) + return (ret); + + if ((pfn_enabled) && (dhd_is_associated(dhd, NULL, NULL) == TRUE)) { + DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__)); + return ret; + } +#endif /* !WL_SCHED_SCAN */ + + /* Enable/disable PNO */ + if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) { + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret)); + return ret; + } + else { + dhd->pno_enable = pfn_enabled; + DHD_TRACE(("%s set pno as %s\n", + __FUNCTION__, dhd->pno_enable ? "Enable" : "Disable")); + } + } + else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret)); + + return ret; +} + +/* Function to execute combined scan */ +int +dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, + int pno_repeat, int pno_freq_expo_max) +{ + int err = -1; + char iovbuf[128]; + int k, i; + wl_pfn_param_t pfn_param; + wl_pfn_t pfn_element; + uint len = 0; + + DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr)); + + if ((!dhd) && (!ssids_local)) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + err = -1; + return err; + } +#ifndef WL_SCHED_SCAN + if (dhd_check_ap_wfd_mode_set(dhd) == TRUE) + return (err); +#endif /* !WL_SCHED_SCAN */ + + /* Check for broadcast ssid */ + for (k = 0; k < nssid; k++) { + if (!ssids_local[k].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k)); + return err; + } + } +/* #define PNO_DUMP 1 */ +#ifdef PNO_DUMP + { + int j; + for (j = 0; j < nssid; j++) { + DHD_ERROR(("%d: scan for %s size =%d\n", j, + ssids_local[j].SSID, ssids_local[j].SSID_len)); + } + } +#endif /* PNO_DUMP */ + + /* clean up everything */ + if ((err = dhd_pno_clean(dhd)) < 0) { + DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err)); + return err; + } + memset(iovbuf, 0, sizeof(iovbuf)); + memset(&pfn_param, 0, sizeof(pfn_param)); + memset(&pfn_element, 0, sizeof(pfn_element)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT)); + + /* check and set extra pno params */ + if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_repeat); + pfn_param.exp = (uchar) (pno_freq_expo_max); + } + /* set up pno scan fr */ + if (scan_fr != 0) + pfn_param.scan_freq = htod32(scan_fr); + + if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) { + DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC)); + return err; + } + if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) { + DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + return err; + } + + len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf)); + if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) { + DHD_ERROR(("%s pfn_set failed for error=%d\n", + __FUNCTION__, err)); + return err; + } + + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + + pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); + pfn_element.auth = (DOT11_OPEN_SYSTEM); + pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); + pfn_element.wsec = htod32(0); + pfn_element.infra = htod32(1); + pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); + memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len); + pfn_element.ssid.SSID_len = ssids_local[i].SSID_len; + + if ((len = + bcm_mkiovar("pfn_add", (char *)&pfn_element, + sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) { + if ((err = + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) { + DHD_ERROR(("%s failed for i=%d error=%d\n", + __FUNCTION__, i, err)); + return err; + } + else + DHD_TRACE(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n", + __FUNCTION__, pfn_param.scan_freq, + pfn_param.repeat, pfn_param.exp)); + } + else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err)); + } + + /* Enable PNO */ + /* dhd_pno_enable(dhd, 1); */ + return err; +} + +int +dhd_pno_set_ex(dhd_pub_t *dhd, wl_pfn_t* ssidnet, int nssid, ushort pno_interval, + int pno_repeat, int pno_expo_max, int pno_lost_time) +{ + int err = -1; + char iovbuf[128]; + int k, i; + wl_pfn_param_t pfn_param; + wl_pfn_t pfn_element; + uint len = 0; + + DHD_TRACE(("%s nssid=%d pno_interval=%d\n", __FUNCTION__, nssid, pno_interval)); + + if ((!dhd) && (!ssidnet)) { + DHD_ERROR(("%s error exit\n", __FUNCTION__)); + err = -1; + return err; + } + + if (dhd_check_ap_wfd_mode_set(dhd) == TRUE) + return (err); + + /* Check for broadcast ssid */ + for (k = 0; k < nssid; k++) { + if (!ssidnet[k].ssid.SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k)); + return err; + } + } +/* #define PNO_DUMP 1 */ +#ifdef PNO_DUMP + { + int j; + for (j = 0; j < nssid; j++) { + DHD_ERROR(("%d: scan for %s size =%d\n", j, + ssidnet[j].ssid.SSID, ssidnet[j].ssid.SSID_len)); + } + } +#endif /* PNO_DUMP */ + + /* clean up everything */ + if ((err = dhd_pno_clean(dhd)) < 0) { + DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err)); + return err; + } + memset(iovbuf, 0, sizeof(iovbuf)); + memset(&pfn_param, 0, sizeof(pfn_param)); + memset(&pfn_element, 0, sizeof(pfn_element)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT)); + + /* check and set extra pno params */ + if ((pno_repeat != 0) || (pno_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_repeat); + pfn_param.exp = (uchar) (pno_expo_max); + } + + /* set up pno scan fr */ + if (pno_interval != 0) + pfn_param.scan_freq = htod32(pno_interval); + + if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) { + DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC)); + return err; + } + if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) { + DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + return err; + } + + /* network lost time */ + pfn_param.lost_network_timeout = htod32(pno_lost_time); + + len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf)); + if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) { + DHD_ERROR(("%s pfn_set failed for error=%d\n", + __FUNCTION__, err)); + return err; + } else { + DHD_TRACE(("%s pfn_set OK with PNO time=%d repeat=%d max_adjust=%d\n", + __FUNCTION__, pfn_param.scan_freq, + pfn_param.repeat, pfn_param.exp)); + } + + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + pfn_element.flags = htod32(ssidnet[i].flags); + pfn_element.infra = htod32(ssidnet[i].infra); + pfn_element.auth = htod32(ssidnet[i].auth); + pfn_element.wpa_auth = htod32(ssidnet[i].wpa_auth); + pfn_element.wsec = htod32(ssidnet[i].wsec); + + memcpy((char *)pfn_element.ssid.SSID, ssidnet[i].ssid.SSID, ssidnet[i].ssid.SSID_len); + pfn_element.ssid.SSID_len = htod32(ssidnet[i].ssid.SSID_len); + + if ((len = + bcm_mkiovar("pfn_add", (char *)&pfn_element, + sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) { + if ((err = + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) { + DHD_ERROR(("%s pfn_add failed with ssidnet[%d] error=%d\n", + __FUNCTION__, i, err)); + return err; + } else { + DHD_TRACE(("%s pfn_add OK with ssidnet[%d]\n", __FUNCTION__, i)); + } + } else { + DHD_ERROR(("%s bcm_mkiovar failed with ssidnet[%d]\n", __FUNCTION__, i)); + } + } + + return err; +} + +int +dhd_pno_get_status(dhd_pub_t *dhd) +{ + int ret = -1; + + if (!dhd) + return ret; + else + return (dhd->pno_enable); +} + +#endif /* PNO_SUPPORT */ + +#if defined(KEEP_ALIVE) +int dhd_keep_alive_onoff(dhd_pub_t *dhd) +{ + char buf[256]; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + + if (dhd_check_ap_wfd_mode_set(dhd) == TRUE) + return (res); + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[ str_len ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = KEEP_ALIVE_PERIOD; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + + return res; +} +#endif /* defined(KEEP_ALIVE) */ + +/* Android ComboSCAN support */ + +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str = *list_str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + while (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str = *list_str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } + else if (str[0] <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = str[0]; + *bytes_left -= 1; + str += 1; + + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return -1; + } + + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } + else { + DHD_ERROR(("### SSID size more that %d\n", str[0])); + return -1; + } + + if (idx++ > max) { + DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str); + ssid[idx].SSID_len = strlen(str); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c new file mode 100644 index 0000000000000..de519a57bf8c0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c @@ -0,0 +1,293 @@ +/* +* Customer code to add GPIO control during WLAN start/stop +* Copyright (C) 1999-2011, Broadcom Corporation +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed to you +* under the terms of the GNU General Public License version 2 (the "GPL"), +* available at http://www.broadcom.com/licenses/GPLv2.php, with the +* following added to such license: +* +* As a special exception, the copyright holders of this software give you +* permission to link this software with independent modules, and to copy and +* distribute the resulting executable under terms of your choice, provided that +* you also meet, for each linked independent module, the terms and conditions of +* the license of that module. An independent module is a module which is not +* derived from this software. The special exception does not apply to any +* modifications of the software. +* +* Notwithstanding the above, under no circumstances may you combine this +* software in any way with any other Broadcom software provided under a license +* other than the GPL, without Broadcom's express prior written consent. +* +* $Id: dhd_custom_gpio.c 339054 2012-06-15 04:56:55Z $ +*/ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define WL_ERROR(x) printf x +#define WL_TRACE(x) + +#ifdef CUSTOMER_HW +extern void bcm_wlan_power_off(int); +extern void bcm_wlan_power_on(int); +#endif /* CUSTOMER_HW */ +#if defined(CUSTOMER_HW2) +#ifdef CONFIG_WIFI_CONTROL_FUNC +int wifi_set_power(int on, unsigned long msec); +int wifi_get_irq_number(unsigned long *irq_flags_ptr); +int wifi_get_mac_addr(unsigned char *buf); +void *wifi_get_country_code(char *ccode); +#else +int wifi_set_power(int on, unsigned long msec) { return -1; } +int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; } +int wifi_get_mac_addr(unsigned char *buf) { return -1; } +void *wifi_get_country_code(char *ccode) { return NULL; } +#endif /* CONFIG_WIFI_CONTROL_FUNC */ +#endif /* CUSTOMER_HW2 */ + +#if defined(OOB_INTR_ONLY) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +#ifdef CUSTOMER_HW3 +#include +#endif + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +/* This function will return: + * 1) return : Host gpio interrupt number per customer platform + * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge + * + * NOTE : + * Customer should check his platform definitions + * and his Host Interrupt spec + * to figure out the proper setting for his platform. + * Broadcom provides just reference settings as example. + * + */ +int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + +#ifdef CUSTOMER_HW2 + host_oob_irq = wifi_get_irq_number(irq_flags_ptr); + +#else +#if defined(CUSTOM_OOB_GPIO_NUM) + if (dhd_oob_gpio_num < 0) { + dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; + } +#endif /* CUSTOMER_HW2 */ + + if (dhd_oob_gpio_num < 0) { + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined\n", + __FUNCTION__)); + return (dhd_oob_gpio_num); + } + + WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", + __FUNCTION__, dhd_oob_gpio_num)); + +#if defined CUSTOMER_HW + host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num); +#elif defined CUSTOMER_HW3 + gpio_request(dhd_oob_gpio_num, "oob irq"); + host_oob_irq = gpio_to_irq(dhd_oob_gpio_num); + gpio_direction_input(dhd_oob_gpio_num); +#endif /* CUSTOMER_HW */ +#endif /* CUSTOMER_HW2 */ + + return (host_oob_irq); +} +#endif /* defined(OOB_INTR_ONLY) */ + +/* Customer function to control hw specific wlan gpios */ +void +dhd_customer_gpio_wlan_ctrl(int onoff) +{ + switch (onoff) { + case WLAN_RESET_OFF: + WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_off(2); +#endif /* CUSTOMER_HW */ +#ifdef CUSTOMER_HW2 + wifi_set_power(0, 0); +#endif + WL_ERROR(("=========== WLAN placed in RESET ========\n")); + break; + + case WLAN_RESET_ON: + WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_on(2); +#endif /* CUSTOMER_HW */ +#ifdef CUSTOMER_HW2 + wifi_set_power(1, 0); +#endif + WL_ERROR(("=========== WLAN going back to live ========\n")); + break; + + case WLAN_POWER_OFF: + WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_off(1); +#endif /* CUSTOMER_HW */ + break; + + case WLAN_POWER_ON: + WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n", + __FUNCTION__)); +#ifdef CUSTOMER_HW + bcm_wlan_power_on(1); + /* Lets customer power to get stable */ + OSL_DELAY(200); +#endif /* CUSTOMER_HW */ + break; + } +} + +#ifdef GET_CUSTOM_MAC_ENABLE +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = wifi_get_mac_addr(buf); +#endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ + +/* Customized Locale table : OPTIONAL feature */ +const struct cntry_locales_custom translate_custom_table[] = { +/* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XY", 4}, /* Universal if Country code is unknown or empty */ + {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ + {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ + {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ + {"AT", "EU", 5}, + {"BE", "EU", 5}, + {"BG", "EU", 5}, + {"CY", "EU", 5}, + {"CZ", "EU", 5}, + {"DK", "EU", 5}, + {"EE", "EU", 5}, + {"FI", "EU", 5}, + {"FR", "EU", 5}, + {"DE", "EU", 5}, + {"GR", "EU", 5}, + {"HU", "EU", 5}, + {"IE", "EU", 5}, + {"IT", "EU", 5}, + {"LV", "EU", 5}, + {"LI", "EU", 5}, + {"LT", "EU", 5}, + {"LU", "EU", 5}, + {"MT", "EU", 5}, + {"NL", "EU", 5}, + {"PL", "EU", 5}, + {"PT", "EU", 5}, + {"RO", "EU", 5}, + {"SK", "EU", 5}, + {"SI", "EU", 5}, + {"ES", "EU", 5}, + {"SE", "EU", 5}, + {"GB", "EU", 5}, + {"KR", "XY", 3}, + {"AU", "XY", 3}, + {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ + {"TW", "XY", 3}, + {"AR", "XY", 3}, + {"MX", "XY", 3}, + {"IL", "IL", 0}, + {"CH", "CH", 0}, + {"TR", "TR", 0}, + {"NO", "NO", 0}, +#endif /* EXMAPLE_TABLE */ +}; + + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +void get_customized_country_code(char *country_iso_code, wl_country_t *cspec) +{ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + + struct cntry_locales_custom *cloc_ptr; + + if (!cspec) + return; + + cloc_ptr = wifi_get_country_code(country_iso_code); + if (cloc_ptr) { + strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = cloc_ptr->custom_locale_rev; + } + return; +#else + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } +#ifdef EXAMPLE_TABLE + /* if no country code matched return first universal code from translate_custom_table */ + memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[0].custom_locale_rev; +#endif /* EXMAPLE_TABLE */ + return; +#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ +} diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h new file mode 100644 index 0000000000000..01be6a1f056f5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h @@ -0,0 +1,105 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_dbg.h 285933 2011-09-23 21:45:31Z $ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#if defined(DHD_DEBUG) + +#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \ + printf args;} while (0) +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) +#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) +#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) + +#else /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#define DHD_ERROR(args) do {if (net_ratelimit()) printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) +#define DHD_EVENT(args) +#define DHD_BTA(args) +#define DHD_ISCAN(args) +#define DHD_ARPOE(args) + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#define DHD_ARPOE_ON() 0 +#endif + +#define DHD_LOG(args) + +#define DHD_BLOG(cp, size) +#define DHD_NONE(args) +extern int dhd_msg_level; + +/* Defines msg bits */ +#include + +#endif /* _dhd_dbg_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c new file mode 100644 index 0000000000000..8aa7ed9f43231 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -0,0 +1,5405 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_linux.c 344350 2012-07-12 08:35:03Z $ + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif +#ifdef WL_CFG80211 +#include +#endif + +#include +#include +#include + +#ifdef WLMEDIA_HTSF +#include +#include + +#define HTSF_MINLEN 200 /* min. packet length to timestamp */ +#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */ +#define TSMAX 1000 /* max no. of timing record kept */ +#define NUMBIN 34 + +static uint32 tsidx = 0; +static uint32 htsf_seqnum = 0; +uint32 tsfsync; +struct timeval tsync; +static uint32 tsport = 5010; + +typedef struct histo_ { + uint32 bin[NUMBIN]; +} histo_t; + +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +static histo_t vi_d1, vi_d2, vi_d3, vi_d4; +#endif /* WLMEDIA_HTSF */ + +#if defined(SOFTAP) +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +#endif + +/* enable HOSTIP cache update from the host side when an eth0:N is up */ +#define AOE_IP_ALIAS_SUPPORT 1 + +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#include + +#ifdef ARP_OFFLOAD_SUPPORT +void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add); +static int dhd_device_event(struct notifier_block *this, + unsigned long event, + void *ptr); + +static struct notifier_block dhd_notifier = { + .notifier_call = dhd_device_event +}; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif /* defined(OOB_INTR_ONLY) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(struct work_struct *work); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +MODULE_LICENSE("GPL v2"); +#endif /* LinuxVer */ + +#include + +#ifndef PROP_TXSTATUS +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) +const char * +print_tainted() +{ + return ""; +} +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ + +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +extern wl_iw_extra_params_t g_wl_iw_params; +#endif /* defined(WL_WIRELESS_EXT) */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ +extern int dhd_get_dtim_skip(dhd_pub_t *dhd); + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + struct net_device_stats stats; + int idx; /* iface idx in dongle */ + dhd_if_state_t state; /* interface state */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + uint8 bssidx; /* bsscfg index for the interface */ + bool set_multicast; +} dhd_if_t; + +#ifdef WLMEDIA_HTSF +typedef struct { + uint32 low; + uint32 high; +} tsf_t; + +typedef struct { + uint32 last_cycle; + uint32 last_sec; + uint32 last_tsf; + uint32 coef; /* scaling factor */ + uint32 coefdec1; /* first decimal */ + uint32 coefdec2; /* second decimal */ +} htsf_t; + +typedef struct { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} tstamp_t; + +static tstamp_t ts[TSMAX]; +static tstamp_t maxdelayts; +static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; + +#endif /* WLMEDIA_HTSF */ + +/* Local private structure (extension of pub) */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + + dhd_pub_t pub; + + /* For supporting multiple interfaces */ + dhd_if_t *iflist[DHD_MAX_IFS]; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; +#endif /* PROP_TXSTATUS */ +#ifdef WLMEDIA_HTSF + htsf_t htsf; +#endif + wait_queue_head_t ioctl_resp_wait; + struct timer_list timer; + bool wd_timer_valid; + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; +#ifdef DHDTHREAD + /* Thread based operation */ + bool threads_only; + struct semaphore sdsem; + + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; + +#else + bool dhd_tasklet_create; +#endif /* DHDTHREAD */ + tsk_ctl_t thr_sysioc_ctl; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + struct work_struct work_hang; +#endif + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ + struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#endif + spinlock_t wakelock_spinlock; + int wakelock_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + + /* Thread to issue ioctl for multicast */ + bool set_macaddress; + struct ether_addr macvalue; + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +} dhd_info_t; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; + +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0); + +char info_string[MOD_PARAM_INFOLEN]; +module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); + +int op_mode = 0; +module_param(op_mode, int, 0644); +extern int wl_control_wl_start(struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +struct semaphore dhd_registration_sem; +#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + +/* Spawn a thread for system ioctls (set mac, set mcast) */ +uint dhd_sysioc = TRUE; +module_param(dhd_sysioc, uint, 0); + +/* Error bits */ +module_param(dhd_msg_level, int, 0); + +/* Watchdog interval */ +uint dhd_watchdog_ms = 10; +module_param(dhd_watchdog_ms, uint, 0); + +#if defined(DHD_DEBUG) +/* Console poll interval */ +uint dhd_console_ms = 0; +module_param(dhd_console_ms, uint, 0644); +#endif /* defined(DHD_DEBUG) */ + +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ +uint dhd_arp_mode = 0xb; +module_param(dhd_arp_mode, uint, 0); + +/* ARP offload enable */ +uint dhd_arp_enable = TRUE; +module_param(dhd_arp_enable, uint, 0); + +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +uint dhd_master_mode = TRUE; +module_param(dhd_master_mode, uint, 0); + +#ifdef DHDTHREAD +/* Watchdog thread priority, -1 to use kernel timer */ +int dhd_watchdog_prio = 0; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority, -1 to use tasklet */ +int dhd_dpc_prio = 1; +module_param(dhd_dpc_prio, int, 0); + +extern int dhd_dongle_memsize; +module_param(dhd_dongle_memsize, int, 0); +#endif /* DHDTHREAD */ +/* Control fw roaming */ +uint dhd_roam_disable = 0; + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#ifdef BCMDBGFS +extern void dhd_dbg_init(dhd_pub_t *dhdp); +extern void dhd_dbg_remove(void); +#endif /* BCMDBGFS */ + + + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + +/* Version string to report */ +#ifdef DHD_DEBUG +#ifndef SRCBASE +#define SRCBASE "drivers/net/wireless/bcmdhd" +#endif +#define DHD_COMPILED "\nCompiled in " SRCBASE +#else +#define DHD_COMPILED +#endif /* DHD_DEBUG */ + +static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR +#ifdef DHD_DEBUG +"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__ +#endif +; +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); +#if !defined(AP) && defined(WLP2P) && defined(WL_ENABLE_P2P_IF) +static u32 dhd_concurrent_fw(dhd_pub_t *dhd); +#endif + +#ifdef WLMEDIA_HTSF +void htsf_update(dhd_info_t *dhd, void *data); +tsf_t prev_tsf, cur_tsf; + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); +static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); +static void dhd_dump_latency(void); +static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_dump_htsfhisto(histo_t *his, char *s); +#endif /* WLMEDIA_HTSF */ + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +static void dhd_dpc(ulong data); +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + dhd_mmc_suspend = TRUE; + ret = NOTIFY_OK; + break; + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + dhd_mmc_suspend = FALSE; + ret = NOTIFY_OK; + break; + } + smp_mb(); +#endif + return ret; +} + +static struct notifier_block dhd_sleep_pm_notifier = { + .notifier_call = dhd_sleep_pm_callback, + .priority = 10 +}; +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + DHD_TRACE(("%s: %d\n", __FUNCTION__, value)); + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable && (!value || + (dhd_check_ap_wfd_mode_set(dhd) == FALSE))) { + int i; + + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +#endif +} + +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ + int power_mode = PM_MAX; + /* wl_pkt_filter_enable_t enable_parm; */ + char iovbuf[32]; + int bcn_li_dtim = 3; + uint roamvar = 1; + + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", + __FUNCTION__, value, dhd->in_suspend)); + + dhd_suspend_lock(dhd); + if (dhd && dhd->up) { + if (value && dhd->in_suspend) { + + /* Kernel suspended */ + DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); + + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); + + /* Enable packet filter, only allow unicast packet to send up */ + dhd_set_packet_filter(1, dhd); + + /* If DTIM skip is set up as default, force it to wake + * each third DTIM for better power savings. Note that + * one side effect is a chance to miss BC/MC packet. + */ + bcn_li_dtim = dhd_get_dtim_skip(dhd); + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + + /* Disable firmware roaming during suspend */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, + iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + } else { + + /* Kernel resumed */ + DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__)); + + power_mode = PM_FAST; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); + + /* disable pkt filter */ + dhd_set_packet_filter(0, dhd); + + /* restore pre-suspend setting for dtim_skip */ + bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip, + 4, iovbuf, sizeof(iovbuf)); + + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + roamvar = dhd_roam_disable; + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, + sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + } + } + dhd_suspend_unlock(dhd); + return 0; +} + +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) +{ + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_OS_WAKE_LOCK(dhdp); + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if ((force || !dhdp->suspend_disable_flag) && + (dhd_check_ap_wfd_mode_set(dhdp) == FALSE)) { + ret = dhd_set_suspend(val, dhdp); + } + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 1, 0); +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 0, 0); +} +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ + tmo->limit = usec; + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = jiffies_to_usecs(1); +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = 1; + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + /* Add the delay that's about to take place */ + tmo->elapsed += tmo->increment; + + if (tmo->increment < tmo->tick) { + OSL_DELAY(tmo->increment); + tmo->increment *= 2; + if (tmo->increment > tmo->tick) + tmo->increment = tmo->tick; + } else { + wait_queue_head_t delay_wait; + DECLARE_WAITQUEUE(wait, current); + init_waitqueue_head(&delay_wait); + add_wait_queue(&delay_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + remove_wait_queue(&delay_wait, &wait); + set_current_state(TASK_RUNNING); + } + + return 0; +} + +int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + ASSERT(dhd); + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +struct net_device * dhd_idx2net(void *pub, int ifidx) +{ + struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; + struct dhd_info *dhd_info; + + if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) + return NULL; + dhd_info = dhd_pub->info; + if (dhd_info && dhd_info->iflist[ifidx]) + return dhd_info->iflist[ifidx]->net; + return NULL; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return ""; +} + +uint8 * +dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) +{ + int i; + dhd_info_t *dhd = (dhd_info_t *)dhdp; + + ASSERT(dhd); + for (i = 0; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) + return dhd->iflist[i]->mac_addr; + + return NULL; +} + + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + + /* Send down the multicast list first. */ + + + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strcpy(bufp, "mcast_list"); + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + netdev_for_each_mc_addr(ha, dev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#else + for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + buflen = sizeof("allmulti") + sizeof(allmulti); + if (!(buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); + return; + } + allmulti = htol32(allmulti); + + if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { + DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", + dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); + MFREE(dhd->pub.osh, buf, buflen); + return; + } + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +static int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr) +{ + char buf[32]; + wl_ioctl_t ioc; + int ret; + + if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { + DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); + return -1; + } + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = 32; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + } + + return ret; +} + +#ifdef SOFTAP +extern struct net_device *ap_net_dev; +extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ +#endif + +static void +dhd_op_if(dhd_if_t *ifp) +{ + dhd_info_t *dhd; + int ret = 0, err = 0; +#ifdef SOFTAP + unsigned long flags; +#endif + + if (!ifp || !ifp->info || !ifp->idx) + return; + ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */ + dhd = ifp->info; + + DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state)); + +#ifdef WL_CFG80211 + if (wl_cfg80211_is_progress_ifchange()) + return; + +#endif + switch (ifp->state) { + case DHD_IF_ADD: + /* + * Delete the existing interface before overwriting it + * in case we missed the WLC_E_IF_DEL event. + */ + if (ifp->net != NULL) { + DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n", + __FUNCTION__, ifp->net->name)); + netif_stop_queue(ifp->net); + unregister_netdev(ifp->net); + free_netdev(ifp->net); + } + /* Allocate etherdev, including space for private structure */ + if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) { + DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + ret = -ENOMEM; + } + if (ret == 0) { + strncpy(ifp->net->name, ifp->name, IFNAMSIZ); + ifp->net->name[IFNAMSIZ - 1] = '\0'; + memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd)); +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) + if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx, + (void*)dhd_net_attach)) { + ifp->state = DHD_IF_NONE; + return; + } +#endif + if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) { + DHD_ERROR(("%s: dhd_net_attach failed, err %d\n", + __FUNCTION__, err)); + ret = -EOPNOTSUPP; + } else { +#if defined(SOFTAP) + if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + /* semaphore that the soft AP CODE waits on */ + flags = dhd_os_spin_lock(&dhd->pub); + + /* save ptr to wl0.1 netdev for use in wl_iw.c */ + ap_net_dev = ifp->net; + /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */ + up(&ap_eth_ctl.sema); + dhd_os_spin_unlock(&dhd->pub, flags); + } +#endif + DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n", + current->pid, ifp->net->name)); + ifp->state = DHD_IF_NONE; + } + } + break; + case DHD_IF_DEL: + /* Make sure that we don't enter again here if .. */ + /* dhd_op_if is called again from some other context */ + ifp->state = DHD_IF_DELETING; + if (ifp->net != NULL) { + DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__)); +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_ifdel_ops(ifp->net); + } +#endif + netif_stop_queue(ifp->net); + unregister_netdev(ifp->net); + ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */ + +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_notify_ifdel(); + } +#endif + } + break; + case DHD_IF_DELETING: + break; + default: + DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state)); + ASSERT(!ifp->state); + break; + } + + if (ret < 0) { + ifp->set_multicast = FALSE; + if (ifp->net) { + free_netdev(ifp->net); + ifp->net = NULL; + } + dhd->iflist[ifp->idx] = NULL; +#ifdef SOFTAP + flags = dhd_os_spin_lock(&dhd->pub); + if (ifp->net == ap_net_dev) + ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */ + dhd_os_spin_unlock(&dhd->pub, flags); +#endif /* SOFTAP */ + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + } +} + +static int +_dhd_sysioc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + + int i; +#ifdef SOFTAP + bool in_ap = FALSE; + unsigned long flags; +#endif + + DAEMONIZE("dhd_sysioc"); + + complete(&tsk->completed); + + while (down_interruptible(&tsk->sema) == 0) { + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i)); +#ifdef SOFTAP + flags = dhd_os_spin_lock(&dhd->pub); + in_ap = (ap_net_dev != NULL); + dhd_os_spin_unlock(&dhd->pub, flags); +#endif /* SOFTAP */ + if (dhd->iflist[i] && dhd->iflist[i]->state) + dhd_op_if(dhd->iflist[i]); + + if (dhd->iflist[i] == NULL) { + DHD_TRACE(("\n\n %s: interface %d just been removed," + "!\n\n", __FUNCTION__, i)); + continue; + } +#ifdef SOFTAP + if (in_ap && dhd->set_macaddress) { + DHD_TRACE(("attempt to set MAC for %s in AP Mode," + "blocked. \n", dhd->iflist[i]->net->name)); + dhd->set_macaddress = FALSE; + continue; + } + + if (in_ap && dhd->iflist[i]->set_multicast) { + DHD_TRACE(("attempt to set MULTICAST list for %s" + "in AP Mode, blocked. \n", dhd->iflist[i]->net->name)); + dhd->iflist[i]->set_multicast = FALSE; + continue; + } +#endif /* SOFTAP */ + if (dhd->iflist[i]->set_multicast) { + dhd->iflist[i]->set_multicast = FALSE; + _dhd_set_multicast_list(dhd, i); + } + if (dhd->set_macaddress) { + dhd->set_macaddress = FALSE; + _dhd_set_mac_address(dhd, i, &dhd->macvalue); + } + } + } + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); + } + DHD_TRACE(("%s: stopped\n", __FUNCTION__)); + complete_and_exit(&tsk->completed, 0); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return -1; + + ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); + memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN); + dhd->set_macaddress = TRUE; + up(&dhd->thr_sysioc_ctl.sema); + + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); + dhd->iflist[ifidx]->set_multicast = TRUE; + up(&dhd->thr_sysioc_ctl.sema); +} + +#ifdef PROP_TXSTATUS +int +dhd_os_wlfc_block(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + + spin_lock_bh(&di->wlfc_spinlock); + return 1; +} + +int +dhd_os_wlfc_unblock(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + spin_unlock_bh(&di->wlfc_spinlock); + return 1; +} + +const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; +uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; +#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] + +#endif /* PROP_TXSTATUS */ +int +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh = NULL; + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + /* free the packet here since the caller won't */ + PKTFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) + atomic_inc(&dhd->pend_8021x_cnt); + } else { + PKTFREE(dhd->pub.osh, pktbuf, TRUE); + return BCME_ERROR; + } + + /* Look into the packet and update the packet priority */ + if (PKTPRIO(pktbuf) == 0) + pktsetprio(pktbuf, FALSE); + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state) { + /* store the interface ID */ + DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); + + /* store destination MAC in the tag as well */ + DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); + + /* decide which FIFO this packet belongs to */ + if (ETHER_ISMULTI(eh->ether_dhost)) + /* one additional queue index (highest AC + 1) is used for bc/mc queue */ + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); + else + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); + } else +#endif /* PROP_TXSTATUS */ + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + + /* Use bus module to send data frame */ +#ifdef WLMEDIA_HTSF + dhd_htsf_addtxts(dhdp, pktbuf); +#endif +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode + != WLFC_FCMODE_NONE) { + dhd_os_wlfc_block(dhdp); + ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)), + pktbuf); + dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata, + dhdp->bus); + if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) { + ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0; + } + dhd_os_wlfc_unblock(dhdp); + } + else + /* non-proptxstatus way */ + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* PROP_TXSTATUS */ + + + return ret; +} + +int +dhd_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + int ret; + void *pktbuf; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + int ifidx; +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz; +#else + uint8 htsfdlystat_sz = 0; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(&dhd->pub); + + /* Reject if down */ + if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + netif_stop_queue(net); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + /* Send Event when bus down detected during data session */ + if (dhd->pub.busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + net_os_send_hang_message(net); + } +#endif + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); + netif_stop_queue(net); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + + /* Make sure there's enough room for any header */ + + if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); + + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + } + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } +#ifdef WLMEDIA_HTSF + if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + + if (!ETHER_ISMULTI(eh->ether_dhost) && + (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) { + eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS); + } + } +#endif + + ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + + +done: + if (ret) + dhd->pub.dstats.tx_dropped++; + else + dhd->pub.tx_packets++; + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Return ok: we always eat the packet */ + return 0; +} + +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdp->txoff = state; + ASSERT(dhd); + + if (ifidx == ALL_INTERFACES) { + /* Flow control on all active interfaces */ + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + net = dhd->iflist[i]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } + } + else { + if (dhd->iflist[ifidx]) { + net = dhd->iflist[ifidx]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } +} + +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void *data, *pnext = NULL, *save_pktbuf; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; + int tout_rx = 0; + int tout_ctrl = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + save_pktbuf = pktbuf; + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) { + DHD_ERROR(("%s: ifp is NULL. drop packet\n", + __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, TRUE); + continue; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + /* Dropping packets before registering net device to avoid kernel panic */ + if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || + !dhd->pub.up) { + DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", + __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, TRUE); + continue; + } +#endif + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(wl->sh.osh, pktbuf, NULL); + + eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + + if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) && + (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) && + bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && + lsh->type == HTON16(BTA_PROT_L2CAP)) { + amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *) + ((uint8 *)eh + RFC1042_HDR_LEN); + ACL_data = NULL; + } + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) { + /* WLFC may send header only packet when + there is an urgent message but no packet to + piggy-back on + */ + ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++; + PKTFREE(dhdp->osh, pktbuf, TRUE); + DHD_TRACE(("RX: wlfc header \n")); + continue; + } +#endif + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + } + + skb->data = eth; + skb->len = len; + +#ifdef WLMEDIA_HTSF + dhd_htsf_addrxts(dhdp, pktbuf); +#endif + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Process special event packets and then discard them */ + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + dhd_wl_host_event(dhd, &ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb->mac_header, +#else + skb->mac.raw, +#endif + &event, + &data); + + wl_event_to_host_order(&event); + if (!tout_ctrl) + tout_ctrl = DHD_PACKET_TIMEOUT_MS; + if (event.event_type == WLC_E_BTA_HCI_EVENT) { + dhd_bta_doevt(dhdp, data, event.datalen); + } +#ifdef PNO_SUPPORT + if (event.event_type == WLC_E_PFN_NET_FOUND) { + tout_ctrl *= 2; + } +#endif /* PNO_SUPPORT */ + } else { + tout_rx = DHD_PACKET_TIMEOUT_MS; + } + + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state) + ifp = dhd->iflist[ifidx]; + + if (ifp->net) + ifp->net->last_rx = jiffies; + + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + + if (in_interrupt()) { + netif_rx(skb); + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + + DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + uint ifidx; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + uint len; + + dhd_prot_hdrpull(dhdp, &ifidx, txp); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if (type == ETHER_TYPE_802_1X) + atomic_dec(&dhd->pend_8021x_cnt); + + /* Crack open the packet and check to see if it is BT HCI ACL data packet. + * If yes generate packet completion event. + */ + len = PKTLEN(dhdp->osh, txp); + + /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */ + if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) { + struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1]; + + if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && + ntoh16(lsh->type) == BTA_PROT_L2CAP) { + + dhd_bta_tx_hcidata_complete(dhdp, txp, success); + } + } +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + dhd_if_t *ifp; + int ifidx; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); + return NULL; + } + + ifp = dhd->iflist[ifidx]; + ASSERT(dhd && ifp); + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + + /* Copy dongle stats to net device stats */ + ifp->stats.rx_packets = dhd->pub.dstats.rx_packets; + ifp->stats.tx_packets = dhd->pub.dstats.tx_packets; + ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes; + ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes; + ifp->stats.rx_errors = dhd->pub.dstats.rx_errors; + ifp->stats.tx_errors = dhd->pub.dstats.tx_errors; + ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped; + ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped; + ifp->stats.multicast = dhd->pub.dstats.multicast; + + return &ifp->stats; +} + +#ifdef DHDTHREAD +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + DAEMONIZE("dhd_watchdog"); + + /* Run until signal received */ + complete(&tsk->completed); + + while (1) + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + dhd_os_sdlock(&dhd->pub); + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + + flags = dhd_os_spin_lock(&dhd->pub); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, + jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd_os_spin_unlock(&dhd->pub, flags); + } + dhd_os_sdunlock(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + break; + } + + complete_and_exit(&tsk->completed, 0); +} +#endif /* DHDTHREAD */ + +static void dhd_watchdog(ulong data) +{ + dhd_info_t *dhd = (dhd_info_t *)data; + unsigned long flags; + + DHD_OS_WAKE_LOCK(&dhd->pub); + if (dhd->pub.dongle_reset) { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return; + } + +#ifdef DHDTHREAD + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } +#endif /* DHDTHREAD */ + + dhd_os_sdlock(&dhd->pub); + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + + flags = dhd_os_spin_lock(&dhd->pub); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd_os_spin_unlock(&dhd->pub, flags); + dhd_os_sdunlock(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +} + +#ifdef DHDTHREAD +static int +dhd_dpc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + DAEMONIZE("dhd_dpc"); + /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ + + /* signal: thread has started */ + complete(&tsk->completed); + + /* Run until signal received */ + while (1) { + if (down_interruptible(&tsk->sema) == 0) { + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) { + up(&tsk->sema); + } + else { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } else { + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } + else + break; + } + + complete_and_exit(&tsk->completed, 0); +} +#endif /* DHDTHREAD */ + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)data; + + /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] + * down below , wake lock is set, + * the tasklet is initialized in dhd_attach() + */ + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) + tasklet_schedule(&dhd->tasklet); + else + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + DHD_OS_WAKE_LOCK(dhdp); +#ifdef DHDTHREAD + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + up(&dhd->thr_dpc_ctl.sema); + return; + } +#endif /* DHDTHREAD */ + + tasklet_schedule(&dhd->tasklet); +} + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strcpy(buf, "toe_ol"); + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + /* Check for older dongle image that doesn't support toe_ol */ + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int toe, ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = TRUE; + + /* Set toe_ol as requested */ + + strcpy(buf, "toe_ol"); + memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + + toe = (toe_ol != 0); + + strcpy(buf, "toe"); + memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + + sprintf(info->driver, "wl"); + sprintf(info->version, "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + strncpy(drvname, info.driver, sizeof(info.driver)); + drvname[sizeof(info.driver)-1] = '\0'; + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + sprintf(info.driver, "dhd"); + strcpy(info.version, EPI_VERSION_STR); + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + sprintf(info.driver, "wl"); + else + sprintf(info.driver, "xx"); + + sprintf(info.version, "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + +static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (!dhdp) + return FALSE; + if ((error == -ETIMEDOUT) || ((dhdp->busstate == DHD_BUS_DOWN) && + (!dhdp->dongle_reset))) { + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); + net_os_send_hang_message(net); + return TRUE; + } +#endif + return FALSE; +} + +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + dhd_ioctl_t ioc; + int bcmerror = 0; + int buflen = 0; + void *buf = NULL; + uint driver = 0; + int ifidx; + int ret; + + DHD_OS_WAKE_LOCK(&dhd->pub); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->pub.hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -1; + } + +#if defined(WL_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(net, ifr, cmd); + dhd_check_hang(net, &dhd->pub, ret); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; + } + + if (cmd != SIOCDEVPRIVATE) { + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return -EOPNOTSUPP; + } + + memset(&ioc, 0, sizeof(ioc)); + + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + bcmerror = -BCME_BADADDR; + goto done; + } + + /* Copy out any buffer passed */ + if (ioc.buf) { + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); + /* optimization for direct ioctl calls from kernel */ + /* + if (segment_eq(get_fs(), KERNEL_DS)) { + buf = ioc.buf; + } else { + */ + { + if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) { + bcmerror = -BCME_NOMEM; + goto done; + } + if (copy_from_user(buf, ioc.buf, buflen)) { + bcmerror = -BCME_BADADDR; + goto done; + } + } + } + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + bcmerror = -BCME_BADADDR; + goto done; + } + + if (!capable(CAP_NET_ADMIN)) { + bcmerror = -BCME_EPERM; + goto done; + } + + /* check for local dhd ioctl and handle it */ + if (driver == DHD_IOCTL_MAGIC) { + bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen); + if (bcmerror) + dhd->pub.bcmerror = bcmerror; + goto done; + } + + /* send to dongle (must be up, and wl). */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + if (!dhd->pub.iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + /* + * Flush the TX queue if required for proper message serialization: + * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption and + * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to + * prevent disassoc frame being sent before WPS-DONE frame. + */ + if (ioc.cmd == WLC_SET_KEY || + (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL && + strncmp("wsec_key", ioc.buf, 9) == 0) || + (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL && + strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) || + ioc.cmd == WLC_DISASSOC) + dhd_wait_pend8021x(net); + +#ifdef WLMEDIA_HTSF + if (ioc.buf) { + /* short cut wl ioctl calls here */ + if (strcmp("htsf", ioc.buf) == 0) { + dhd_ioctl_htsf_get(dhd, 0); + return BCME_OK; + } + + if (strcmp("htsflate", ioc.buf) == 0) { + if (ioc.set) { + memset(ts, 0, sizeof(tstamp_t)*TSMAX); + memset(&maxdelayts, 0, sizeof(tstamp_t)); + maxdelay = 0; + tspktcnt = 0; + maxdelaypktno = 0; + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + } else { + dhd_dump_latency(); + } + return BCME_OK; + } + if (strcmp("htsfclear", ioc.buf) == 0) { + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + htsf_seqnum = 0; + return BCME_OK; + } + if (strcmp("htsfhis", ioc.buf) == 0) { + dhd_dump_htsfhisto(&vi_d1, "H to D"); + dhd_dump_htsfhisto(&vi_d2, "D to D"); + dhd_dump_htsfhisto(&vi_d3, "D to H"); + dhd_dump_htsfhisto(&vi_d4, "H to H"); + return BCME_OK; + } + if (strcmp("tsport", ioc.buf) == 0) { + if (ioc.set) { + memcpy(&tsport, ioc.buf + 7, 4); + } else { + DHD_ERROR(("current timestamp port: %d \n", tsport)); + } + return BCME_OK; + } + } +#endif /* WLMEDIA_HTSF */ + + bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen); + +done: + dhd_check_hang(net, &dhd->pub, bcmerror); + + if (!bcmerror && buf && ioc.buf) { + if (copy_to_user(ioc.buf, buf, buflen)) + bcmerror = -EFAULT; + } + + if (buf) + MFREE(dhd->pub.osh, buf, buflen); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return OSL_ERROR(bcmerror); +} + +#ifdef WL_CFG80211 +static int +dhd_cleanup_virt_ifaces(dhd_info_t *dhd) +{ + int i = 1; /* Leave ifidx 0 [Primary Interface] */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + int rollback_lock = FALSE; +#endif + + DHD_TRACE(("%s: Enter \n", __func__)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + /* release lock for unregister_netdev */ + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = TRUE; + } +#endif + + for (i = 1; i < DHD_MAX_IFS; i++) { + dhd_net_if_lock_local(dhd); + if (dhd->iflist[i]) { + DHD_TRACE(("Deleting IF: %d \n", i)); + if ((dhd->iflist[i]->state != DHD_IF_DEL) && + (dhd->iflist[i]->state != DHD_IF_DELETING)) { + dhd->iflist[i]->state = DHD_IF_DEL; + dhd->iflist[i]->idx = i; + dhd_op_if(dhd->iflist[i]); + } + } + dhd_net_if_unlock_local(dhd); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (rollback_lock) + rtnl_lock(); +#endif + + return 0; +} +#endif /* WL_CFG80211 */ + +static int +dhd_stop(struct net_device *net) +{ + int ifidx = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net)); + if (dhd->pub.up == 0) { + goto exit; + } + ifidx = dhd_net2idx(dhd, net); + +#ifdef WL_CFG80211 + if (ifidx == 0) { + wl_cfg80211_down(NULL); + + /* + * For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + dhd_cleanup_virt_ifaces(dhd); + } + } +#endif + +#ifdef PROP_TXSTATUS + dhd_wlfc_cleanup(&dhd->pub); +#endif + /* Set state and stop OS transmissions */ + dhd->pub.up = 0; + netif_stop_queue(net); + + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + OLD_MOD_DEC_USE_COUNT; +exit: +#if defined(WL_CFG80211) + if (ifidx == 0 && !dhd_download_fw_on_driverload) + wl_android_wifi_off(net); +#endif + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return 0; +} + +static int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); + +#ifdef TOE + uint32 toe_ol; +#endif + int ifidx; + int32 ret = 0; + + DHD_OS_WAKE_LOCK(&dhd->pub); + /* Update FW path if it was changed */ + if ((firmware_path != NULL) && (firmware_path[0] != '\0')) { + if (firmware_path[strlen(firmware_path)-1] == '\n') + firmware_path[strlen(firmware_path)-1] = '\0'; + strcpy(fw_path, firmware_path); + firmware_path[0] = '\0'; + } + + dhd->pub.hang_was_sent = 0; +#if !defined(WL_CFG80211) + /* + * Force start if ifconfig_up gets called before START command + * We keep WEXT's wl_control_wl_start to provide backward compatibility + * This should be removed in the future + */ + ret = wl_control_wl_start(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } +#endif + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (ifidx < 0) { + DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (ifidx == 0) { + atomic_set(&dhd->pend_8021x_cnt, 0); +#if defined(WL_CFG80211) + DHD_ERROR(("\n%s\n", dhd_version)); + if (!dhd_download_fw_on_driverload) { + ret = wl_android_wifi_on(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + } +#endif /* defined(WL_CFG80211) */ + + if (dhd->pub.busstate != DHD_BUS_DATA) { + + /* try to bring up bus */ + if ((ret = dhd_bus_start(&dhd->pub)) != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + + } + + /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */ + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; +#endif /* TOE */ + +#if defined(WL_CFG80211) + if (unlikely(wl_cfg80211_up(NULL))) { + DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); + ret = -1; + goto exit; + } +#endif /* WL_CFG80211 */ + } + + /* Allow transmit calls */ + netif_start_queue(net); + dhd->pub.up = 1; + +#ifdef BCMDBGFS + dhd_dbg_init(&dhd->pub); +#endif + + OLD_MOD_INC_USE_COUNT; +exit: + if (ret) + dhd_stop(net); + + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return ret; +} + +int dhd_do_driver_init(struct net_device *net) +{ + dhd_info_t *dhd = NULL; + + if (!net) { + DHD_ERROR(("Primary Interface not initialized \n")); + return -EINVAL; + } + + dhd = *(dhd_info_t **)netdev_priv(net); + + /* If driver is already initialized, do nothing + */ + if (dhd->pub.busstate == DHD_BUS_DATA) { + DHD_TRACE(("Driver already Inititalized. Nothing to do")); + return 0; + } + + if (dhd_open(net) < 0) { + DHD_ERROR(("Driver Init Failed \n")); + return -1; + } + + return 0; +} + +osl_t * +dhd_osl_attach(void *pdev, uint bustype) +{ + return osl_attach(pdev, bustype, TRUE); +} + +void +dhd_osl_detach(osl_t *osh) +{ + if (MALLOCED(osh)) { + DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh))); + } + osl_detach(osh); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + up(&dhd_registration_sem); +#endif +} + +int +dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name, + uint8 *mac_addr, uint32 flags, uint8 bssidx) +{ + dhd_if_t *ifp; + + DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle)); + + ASSERT(dhd && (ifidx < DHD_MAX_IFS)); + + ifp = dhd->iflist[ifidx]; + if (ifp != NULL) { + if (ifp->net != NULL) { + netif_stop_queue(ifp->net); + unregister_netdev(ifp->net); + free_netdev(ifp->net); + } + } else + if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) { + DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__)); + return -ENOMEM; + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhd; + dhd->iflist[ifidx] = ifp; + strncpy(ifp->name, name, IFNAMSIZ); + ifp->name[IFNAMSIZ] = '\0'; + if (mac_addr != NULL) + memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN); + + if (handle == NULL) { + ifp->state = DHD_IF_ADD; + ifp->idx = ifidx; + ifp->bssidx = bssidx; + ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); + up(&dhd->thr_sysioc_ctl.sema); + } else + ifp->net = (struct net_device *)handle; + + return 0; +} + +void +dhd_del_if(dhd_info_t *dhd, int ifidx) +{ + dhd_if_t *ifp; + + DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS)); + ifp = dhd->iflist[ifidx]; + if (!ifp) { + DHD_ERROR(("%s: Null interface\n", __FUNCTION__)); + return; + } + + ifp->state = DHD_IF_DEL; + ifp->idx = ifidx; + ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); + up(&dhd->thr_sysioc_ctl.sema); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_open, + .ndo_stop = dhd_stop, + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, + .ndo_set_multicast_list = dhd_set_multicast_list, +}; + +static struct net_device_ops dhd_ops_virt = { + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, + .ndo_set_multicast_list = dhd_set_multicast_list, +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ + +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) +{ + dhd_info_t *dhd = NULL; + struct net_device *net = NULL; + + dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* updates firmware nvram path if it was provided as module parameters */ + if ((firmware_path != NULL) && (firmware_path[0] != '\0')) + strcpy(fw_path, firmware_path); + if ((nvram_path != NULL) && (nvram_path[0] != '\0')) + strcpy(nv_path, nvram_path); + + /* Allocate etherdev, including space for private structure */ + if (!(net = alloc_etherdev(sizeof(dhd)))) { + DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_NET_ALLOC; + + /* Allocate primary dhd_info */ + if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto fail; + } + memset(dhd, 0, sizeof(dhd_info_t)); + +#ifdef DHDTHREAD + dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; + dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; +#else + dhd->dhd_tasklet_create = FALSE; +#endif /* DHDTHREAD */ + dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID; + dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; + + /* + * Save the dhd_info into the priv + */ + memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd)); + dhd->pub.osh = osh; + + /* Link to info module */ + dhd->pub.info = dhd; + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strncpy(net->name, iface_name, IFNAMSIZ); + net->name[IFNAMSIZ - 1] = 0; + len = strlen(net->name); + ch = net->name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) + strcat(net->name, "%d"); + } + + sema_init(&dhd->proto_sem, 1); +#ifdef DHDTHREAD + sema_init(&dhd->sdsem, 1); +#endif + +#ifdef PROP_TXSTATUS + spin_lock_init(&dhd->wlfc_spinlock); + dhd->pub.wlfc_enabled = TRUE; +#endif /* PROP_TXSTATUS */ + + /* Initialize other structure content */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->ctrl_wait); + + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->dhd_lock); + + + if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF) + goto fail; + dhd_state |= DHD_ATTACH_STATE_ADD_IF; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wakelock_spinlock); + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->dhd_net_if_mutex); + mutex_init(&dhd->dhd_suspend_mutex); +#endif + dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; + +#ifdef WL_CFG80211 + /* Attach and link in the cfg80211 */ + if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { + DHD_ERROR(("wl_cfg80211_attach failed\n")); + goto fail; + } + + dhd_monitor_init(&dhd->pub); + dhd_state |= DHD_ATTACH_STATE_CFG80211; +#endif +#if defined(WL_WIRELESS_EXT) + /* Attach and link in the iw */ + if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) { + if (wl_iw_attach(net, (void *)&dhd->pub) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; + } +#endif /* defined(WL_WIRELESS_EXT) */ + + + /* Set up the watchdog timer */ + init_timer(&dhd->timer); + dhd->timer.data = (ulong)dhd; + dhd->timer.function = dhd_watchdog; + +#ifdef DHDTHREAD + /* Initialize thread based operation and lock */ + if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) { + dhd->threads_only = TRUE; + } + else { + dhd->threads_only = FALSE; + } + + if (dhd_dpc_prio >= 0) { + /* Initialize watchdog thread */ + PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0); + } else { + dhd->thr_wdt_ctl.thr_pid = -1; + } + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0); + } else { + /* use tasklet for dpc */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->thr_dpc_ctl.thr_pid = -1; + } +#else + /* Set up the bottom half handler */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->dhd_tasklet_create = TRUE; +#endif /* DHDTHREAD */ + + if (dhd_sysioc) { + PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0); + } else { + dhd->thr_sysioc_ctl.thr_pid = -1; + } + dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + INIT_WORK(&dhd->work_hang, dhd_hang_process); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + /* + * Save the dhd_info into the priv + */ + memcpy(netdev_priv(net), &dhd, sizeof(dhd)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + register_pm_notifier(&dhd_sleep_pm_notifier); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); + dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; +#endif + +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + register_inetaddr_notifier(&dhd_notifier); +#endif /* ARP_OFFLOAD_SUPPORT */ + + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + return &dhd->pub; + +fail: + if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) { + if (net) free_netdev(net); + } else { + DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", + __FUNCTION__, dhd_state, &dhd->pub)); + dhd->dhd_state = dhd_state; + dhd_detach(&dhd->pub); + dhd_free(&dhd->pub); + } + + return NULL; +} + +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + unsigned long flags; + + ASSERT(dhd); + + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); + +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdlock(dhdp); +#endif /* DHDTHREAD */ + + /* try to download image and nvram to the dongle */ + if ((dhd->pub.busstate == DHD_BUS_DOWN) && + (fw_path != NULL) && (fw_path[0] != '\0') && + (nv_path != NULL) && (nv_path[0] != '\0')) { + /* wake lock moved to dhdsdio_download_firmware */ + if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + fw_path, nv_path))) { + DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n", + __FUNCTION__, fw_path, nv_path)); +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + return -1; + } + } + if (dhd->pub.busstate != DHD_BUS_LOAD) { +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + return -ENETDOWN; + } + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { + + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + return ret; + } +#if defined(OOB_INTR_ONLY) + /* Host registration for OOB interrupt */ + if (bcmsdh_register_oob_intr(dhdp)) { + /* deactivate timer and wait for the handler to finish */ + + flags = dhd_os_spin_lock(&dhd->pub); + dhd->wd_timer_valid = FALSE; + dhd_os_spin_unlock(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + + DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + return -ENODEV; + } + + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + flags = dhd_os_spin_lock(&dhd->pub); + dhd->wd_timer_valid = FALSE; + dhd_os_spin_unlock(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + return -ENODEV; + } + +#ifdef DHDTHREAD + if (dhd->threads_only) + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + +#ifdef READ_MACADDR + dhd_read_macaddr(dhd); +#endif + + /* Bus is ready, do any protocol initialization */ + if ((ret = dhd_prot_init(&dhd->pub)) < 0) + return ret; + +#ifdef WRITE_MACADDR + dhd_write_macaddr(dhd->pub.mac.octet); +#endif + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->pend_ipaddr) { +#ifdef AOE_IP_ALIAS_SUPPORT + aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd->pend_ipaddr = 0; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +#if !defined(AP) && defined(WLP2P) && defined(WL_ENABLE_P2P_IF) +/* For Android ICS MR2 release, the concurrent mode is enabled by default and the firmware + * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA + * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware + * would still be named as fw_bcmdhd_apsta. + */ +static u32 +dhd_concurrent_fw(dhd_pub_t *dhd) +{ + int ret = 0; + char buf[WLC_IOCTL_SMLEN]; + + if ((!op_mode) && (strstr(fw_path, "_p2p") == NULL) && + (strstr(fw_path, "_apsta") == NULL)) { + /* Given path is for the STA firmware. Check whether P2P support is present in + * the firmware. If so, set mode as P2P (concurrent support). + */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); + } else if (buf[0] == 1) { + DHD_TRACE(("%s: P2P is supported\n", __FUNCTION__)); + return 1; + } + } + return ret; +} +#endif + +/* + * dhd_preinit_ioctls makes special pre-setting in the firmware before radio turns on + * returns : 0 if all settings passed or negative value if anything failed +*/ +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ +#if !defined(WL_CFG80211) + uint up = 0; +#endif /* defined(WL_CFG80211) */ + uint power_mode = PM_FAST; + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = 0; + uint bcn_timeout = DHD_BEACON_TIMEOUT_NORMAL; + + uint retry_max = 3; +#if defined(ARP_OFFLOAD_SUPPORT) + int arpoe = 1; +#endif +#if defined(KEEP_ALIVE) + int res; +#endif /* defined(KEEP_ALIVE) */ + int scan_assoc_time = DHD_SCAN_ACTIVE_TIME; + int scan_unassoc_time = 40; + int scan_passive_time = DHD_SCAN_PASSIVE_TIME; + char buf[WLC_IOCTL_SMLEN]; + char *ptr; + uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ + uint16 chipID; +#if defined(SOFTAP) + uint dtim = 1; +#endif +#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) + uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ +#endif +#if defined(AP) || defined(WLP2P) + uint32 apsta = 1; /* Enable APSTA mode */ +#endif /* defined(AP) || defined(WLP2P) */ +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + dhd->op_mode = 0; +#ifdef GET_CUSTOM_MAC_ENABLE + ret = dhd_custom_get_mac_address(ea_addr.octet); + if (!ret) { + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set custom MAC address , error=%d\n", __FUNCTION__, ret)); + return BCME_NOTUP; + } + memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); + } else { +#endif /* GET_CUSTOM_MAC_ENABLE */ + /* Get the default device MAC address directly from firmware */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + return BCME_NOTUP; + } + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); +#ifdef GET_CUSTOM_MAC_ENABLE + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifdef SET_RANDOM_MAC_SOFTAP + if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == HOSTAPD_MASK)) { + uint rand_mac; + + srandom32((uint)jiffies); + rand_mac = random32(); + iovbuf[0] = 0x02; /* locally administered bit */ + iovbuf[1] = 0x1A; + iovbuf[2] = 0x11; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); + } +#endif /* SET_RANDOM_MAC_SOFTAP */ + + DHD_TRACE(("Firmware = %s\n", fw_path)); + +#if !defined(AP) && defined(WLP2P) + /* Check if firmware with WFD support used */ +#if defined(WL_ENABLE_P2P_IF) + if ((ret = dhd_concurrent_fw(dhd)) < 0) { + DHD_ERROR(("%s error : firmware can't support p2p mode\n", __FUNCTION__)); + goto done; + } +#endif /* (WL_ENABLE_P2P_IF) */ + + if ((!op_mode && strstr(fw_path, "_p2p") != NULL) +#if defined(WL_ENABLE_P2P_IF) + || (op_mode == WFD_MASK) || (dhd_concurrent_fw(dhd) == 1) +#endif + ) { + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s APSTA setting failed ret= %d\n", __FUNCTION__, ret)); + } else { + dhd->op_mode |= WFD_MASK; +#if !defined(WL_ENABLE_P2P_IF) + /* ICS back capability : disable any packet filtering for p2p only mode */ + dhd_pkt_filter_enable = FALSE; +#endif /*!defined(WL_ENABLE_P2P_IF) */ + } + } +#endif + +#if !defined(AP) && defined(WL_CFG80211) + /* Check if firmware with HostAPD support used */ + if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == HOSTAPD_MASK)) { + /* Disable A-band for HostAPD */ + uint band = WLC_BAND_2G; + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, (char *)&band, sizeof(band), + TRUE, 0)) < 0) { + DHD_ERROR(("%s:set band failed error (%d)\n", __FUNCTION__, ret)); + } + + /* Turn off wme if we are having only g ONLY firmware */ + bcm_mkiovar("nmode", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s:get nmode failed error (%d)\n", __FUNCTION__, ret)); + } + else { + DHD_TRACE(("%s:get nmode returned %d\n", __FUNCTION__,buf[0])); + } + if (buf[0] == 0) { + int wme = 0; + bcm_mkiovar("wme", (char *)&wme, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set wme for HostAPD failed %d\n", __FUNCTION__, ret)); + } + else { + DHD_TRACE(("%s set wme succeeded for g ONLY firmware\n", __FUNCTION__)); + } + } + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); + } else { + dhd->op_mode |= HOSTAPD_MASK; +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif /* (ARP_OFFLOAD_SUPPORT) */ + /* disable any filtering for SoftAP mode */ + dhd_pkt_filter_enable = FALSE; + } + } +#endif + +#if !defined(WL_ENABLE_P2P_IF) + /* ICS mode setting for sta */ + if ((dhd->op_mode != WFD_MASK) && (dhd->op_mode != HOSTAPD_MASK)) { + /* STA only operation mode */ + dhd->op_mode |= STA_MASK; + dhd_pkt_filter_enable = TRUE; + } +#endif /* !defined(WL_ENABLE_P2P_IF) */ + + DHD_ERROR(("Firmware up: op_mode=%d, " + "Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + dhd->op_mode, + dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2], + dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5])); + + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + bcm_mkiovar("country", (char *)&dhd->dhd_cspec, + sizeof(wl_country_t), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + + /* Set Listen Interval */ + bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + + /* Set PowerSave mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); + + /* Match Host and Dongle rx alignment */ + bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + + /* disable glom option for some chips */ + chipID = (uint16)dhd_bus_chip_id(dhd); + if ((chipID == BCM4330_CHIP_ID) || (chipID == BCM4329_CHIP_ID)) { + DHD_INFO(("%s disable glom for chipID=0x%X\n", __FUNCTION__, chipID)); + bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + } + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + /* Setup assoc_retry_max count to reconnect target AP in dongle */ + bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + +#if defined(AP) && !defined(WLP2P) + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* defined(AP) && !defined(WLP2P) */ + +#if defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif + +#if defined(KEEP_ALIVE) + /* Set Keep Alive : be sure to use FW with -keepalive */ +#if defined(SOFTAP) + if (ap_fw_loaded == FALSE) +#endif + if ((res = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, res)); +#endif /* defined(KEEP_ALIVE) */ + + /* Read event_msgs mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + + /* Setup event_msgs */ + setbit(eventmask, WLC_E_SET_SSID); + setbit(eventmask, WLC_E_PRUNE); + setbit(eventmask, WLC_E_AUTH); + setbit(eventmask, WLC_E_REASSOC); + setbit(eventmask, WLC_E_REASSOC_IND); + setbit(eventmask, WLC_E_DEAUTH); + setbit(eventmask, WLC_E_DEAUTH_IND); + setbit(eventmask, WLC_E_DISASSOC_IND); + setbit(eventmask, WLC_E_DISASSOC); + setbit(eventmask, WLC_E_JOIN); + setbit(eventmask, WLC_E_ASSOC_IND); + setbit(eventmask, WLC_E_PSK_SUP); + setbit(eventmask, WLC_E_LINK); + setbit(eventmask, WLC_E_NDIS_LINK); + setbit(eventmask, WLC_E_MIC_ERROR); + setbit(eventmask, WLC_E_ASSOC_REQ_IE); + setbit(eventmask, WLC_E_ASSOC_RESP_IE); + setbit(eventmask, WLC_E_PMKID_CACHE); + setbit(eventmask, WLC_E_TXFAIL); + setbit(eventmask, WLC_E_JOIN_START); + setbit(eventmask, WLC_E_SCAN_COMPLETE); +#ifdef WLMEDIA_HTSF + setbit(eventmask, WLC_E_HTSFSYNC); +#endif /* WLMEDIA_HTSF */ +#ifdef PNO_SUPPORT + setbit(eventmask, WLC_E_PFN_NET_FOUND); +#endif /* PNO_SUPPORT */ + /* enable dongle roaming event */ + setbit(eventmask, WLC_E_ROAM); +#ifdef WL_CFG80211 + setbit(eventmask, WLC_E_ESCAN_RESULT); + if ((dhd->op_mode & WFD_MASK) == WFD_MASK) { + setbit(eventmask, WLC_E_ACTION_FRAME_RX); + setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE); + setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE); + setbit(eventmask, WLC_E_P2P_PROBREQ_MSG); + setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); + } +#endif /* WL_CFG80211 */ + + /* Write updated Event mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); + +#ifdef ARP_OFFLOAD_SUPPORT + /* Set and enable ARP offload feature for STA only */ +#if defined(SOFTAP) + if (arpoe && !ap_fw_loaded) { +#else + if (arpoe) { +#endif + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, arpoe); + } else { + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + /* Setup defintions for pktfilter , enable in suspend */ + dhd->pktfilter_count = 5; + /* Setup filter to allow only unicast */ + dhd->pktfilter[0] = "100 0 0 0 0x01 0x00"; + dhd->pktfilter[1] = NULL; + dhd->pktfilter[2] = NULL; + dhd->pktfilter[3] = NULL; + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[4] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; +#if defined(SOFTAP) + if (ap_fw_loaded) { + int i; + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + 0, dhd_master_mode); + } + } +#endif /* defined(SOFTAP) */ +#endif /* PKT_FILTER_SUPPORT */ + +#if !defined(WL_CFG80211) + /* Force STA UP */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0)) < 0) { + DHD_ERROR(("%s Setting WL UP failed %d\n", __FUNCTION__, ret)); + goto done; + } +#endif + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + DHD_ERROR(("Firmware version = %s\n", buf)); + + dhd_set_version_info(dhd, buf); + + DHD_BLOG(buf, strlen(buf) + 1); + DHD_BLOG(dhd_version, strlen(dhd_version) + 1); + + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + if (strstr(buf, MANUFACTRING_FW) != NULL) { + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT * 10); + DHD_ERROR(("%s : adjust IOCTL response time for Manufactring Firmware\n", __FUNCTION__)); + } + } + +done: + return ret; +} + + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) +{ + char buf[strlen(name) + 1 + cmd_len]; + int len = sizeof(buf); + wl_ioctl_t ioc; + int ret; + + len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + if (!set && ret >= 0) + memcpy(cmd_buf, buf, cmd_len); + + return ret; +} + +int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) +{ + struct dhd_info *dhd = dhdp->info; + struct net_device *dev = NULL; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + ASSERT(dev); + + if (netif_running(dev)) { + DHD_ERROR(("%s: Must be down to change its MTU", dev->name)); + return BCME_NOTDOWN; + } + +#define DHD_MIN_MTU 1500 +#define DHD_MAX_MTU 1752 + + if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { + DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); + return BCME_BADARG; + } + + dev->mtu = new_mtu; + return 0; +} + +#ifdef ARP_OFFLOAD_SUPPORT +/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add) +{ + u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ + int i; + int ret; + + bzero(ipv4_buf, sizeof(ipv4_buf)); + + /* display what we've got */ + ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf)); + DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); +#ifdef AOE_DBG + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif + /* now we saved hoste_ip table, clr it in the dongle AOE */ + dhd_aoe_hostip_clr(dhd_pub); + + if (ret) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return; + } + + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (add && (ipv4_buf[i] == 0)) { + ipv4_buf[i] = ipa; + add = FALSE; /* added ipa to local table */ + DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", + __FUNCTION__, i)); + } else if (ipv4_buf[i] == ipa) { + ipv4_buf[i] = 0; + DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", + __FUNCTION__, ipa, i)); + } + + if (ipv4_buf[i] != 0) { + /* add back host_ip entries from our local cache */ + dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]); + DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", + __FUNCTION__, ipv4_buf[i], i)); + } + } +#ifdef AOE_DBG + /* see the resulting hostip table */ + dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf)); + DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif +} + +static int dhd_device_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + + if (!ifa) + return NOTIFY_DONE; + + dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev); + dhd_pub = &dhd->pub; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) { +#else + if (ifa->ifa_dev->dev) { +#endif + switch (event) { + case NETDEV_UP: + DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__)); + if (dhd->pend_ipaddr) { + DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", + __FUNCTION__, dhd->pend_ipaddr)); + } + dhd->pend_ipaddr = ifa->ifa_address; + break; + } + +#ifdef AOE_IP_ALIAS_SUPPORT + if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) { + DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE); + } + else + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE); +#endif + break; + + case NETDEV_DOWN: + DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + dhd->pend_ipaddr = 0; +#ifdef AOE_IP_ALIAS_SUPPORT + if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) { + DHD_ARPOE(("%s: primary interface is down, AOE clr all\n", + __FUNCTION__)); + dhd_aoe_hostip_clr(&dhd->pub); + dhd_aoe_arp_clr(&dhd->pub); + } else + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE); +#else + dhd_aoe_hostip_clr(&dhd->pub); + dhd_aoe_arp_clr(&dhd->pub); +#endif + break; + + default: + DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", + __func__, ifa->ifa_label, event)); + break; + } + } + return NOTIFY_DONE; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +int +dhd_net_attach(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct net_device *net = NULL; + int err = 0; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && dhd->iflist[ifidx]); + + net = dhd->iflist[ifidx]->net; + ASSERT(net); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->get_stats = dhd_get_stats; + net->do_ioctl = dhd_ioctl_entry; + net->hard_start_xmit = dhd_start_xmit; + net->set_mac_address = dhd_set_mac_address; + net->set_multicast_list = dhd_set_multicast_list; + net->open = net->stop = NULL; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; +#endif + + /* Ok, link into the network layer... */ + if (ifidx == 0) { + /* + * device functions for the primary interface only + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = dhd_open; + net->stop = dhd_stop; +#else + net->netdev_ops = &dhd_ops_pri; +#endif + } else { + /* + * We have to use the primary MAC for virtual interfaces + */ + memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN); + /* + * Android sets the locally administered bit to indicate that this is a + * portable hotspot. This will not work in simultaneous AP/STA mode, + * nor with P2P. Need to set the Donlge's MAC address, and then use that. + */ + if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, + ETHER_ADDR_LEN)) { + DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", + __func__, net->name)); + temp_addr[0] |= 0x02; + } + } + + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &dhd_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(WL_WIRELESS_EXT) */ + + dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if ((err = register_netdev(net)) != 0) { + DHD_ERROR(("couldn't register the net device, err %d\n", err)); + goto fail; + } + printf("Broadcom Dongle Host Driver: register interface [%s]" + " MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + net->name, + net->dev_addr[0], net->dev_addr[1], net->dev_addr[2], + net->dev_addr[3], net->dev_addr[4], net->dev_addr[5]); + +#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) + wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (ifidx == 0) { + up(&dhd_registration_sem); + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + return 0; + +fail: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + return err; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + + /* + * In case of Android cfg80211 driver, the bus is down in dhd_stop, + * calling stop again will cuase SD read/write errors. + */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ + dhd_bus_stop(dhd->pub.bus, TRUE); + } + +#if defined(OOB_INTR_ONLY) + bcmsdh_unregister_oob_intr(); +#endif /* defined(OOB_INTR_ONLY) */ + } + } +} + + +void dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + unsigned long flags; + int timer_valid = FALSE; + + if (!dhdp) + return; + + dhd = (dhd_info_t *)dhdp->info; + if (!dhd) + return; + + DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + /* Give sufficient time for threads to start running in case + * dhd_attach() has failed + */ + osl_delay(1000*100); + } + +#ifdef ARP_OFFLOAD_SUPPORT + unregister_inetaddr_notifier(&dhd_notifier); +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); + } +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + cancel_work_sync(&dhd->work_hang); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + +#if defined(WL_WIRELESS_EXT) + if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { + /* Detatch and unlink in the iw */ + wl_iw_detach(); + } +#endif /* defined(WL_WIRELESS_EXT) */ + + if (dhd->thr_sysioc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_sysioc_ctl); + } + + /* delete all interfaces, start with virtual */ + if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { + int i = 1; + dhd_if_t *ifp; + + /* Cleanup virtual interfaces */ + for (i = 1; i < DHD_MAX_IFS; i++) { + dhd_net_if_lock_local(dhd); + if (dhd->iflist[i]) { + dhd->iflist[i]->state = DHD_IF_DEL; + dhd->iflist[i]->idx = i; + dhd_op_if(dhd->iflist[i]); + } + dhd_net_if_unlock_local(dhd); + } + /* delete primary interface 0 */ + ifp = dhd->iflist[0]; + ASSERT(ifp); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + if (ifp->net->open) +#else + if (ifp->net->netdev_ops == &dhd_ops_pri) +#endif + { + if (ifp->net) { + unregister_netdev(ifp->net); + free_netdev(ifp->net); + ifp->net = NULL; + } + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + dhd->iflist[0] = NULL; + } + } + + /* Clear the watchdog timer */ + flags = dhd_os_spin_lock(&dhd->pub); + timer_valid = dhd->wd_timer_valid; + dhd->wd_timer_valid = FALSE; + dhd_os_spin_unlock(&dhd->pub, flags); + if (timer_valid) + del_timer_sync(&dhd->timer); + + if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { +#ifdef DHDTHREAD + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_wdt_ctl); + } + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_dpc_ctl); + } + else +#endif /* DHDTHREAD */ + tasklet_kill(&dhd->tasklet); + } + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { + dhd_bus_detach(dhdp); + + if (dhdp->prot) + dhd_prot_detach(dhdp); + } + +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_detach(NULL); + dhd_monitor_uninit(); + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + unregister_pm_notifier(&dhd_sleep_pm_notifier); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_destroy(&dhd->wl_wifi); + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); +#endif + } +} + + +void +dhd_free(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + } +} + +static void __exit +dhd_module_cleanup(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_bus_unregister(); + +#if defined(CONFIG_WIFI_CONTROL_FUNC) + wl_android_wifictrl_func_del(); +#endif /* CONFIG_WIFI_CONTROL_FUNC */ + wl_android_exit(); + + /* Call customer gpio to turn off power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); +} + +static int __init +dhd_module_init(void) +{ + int error = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + wl_android_init(); + +#ifdef DHDTHREAD + /* Sanity check on the module parameters */ + do { + /* Both watchdog and DPC as tasklets are ok */ + if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0)) + break; + + /* If both watchdog and DPC are threads, TX must be deferred */ + if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx) + break; + + DHD_ERROR(("Invalid module parameters.\n")); + return -EINVAL; + } while (0); +#endif /* DHDTHREAD */ + + /* Call customer gpio to turn on power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON); + +#if defined(CONFIG_WIFI_CONTROL_FUNC) + if (wl_android_wifictrl_func_add() < 0) + goto fail_1; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + sema_init(&dhd_registration_sem, 0); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + error = dhd_bus_register(); + + if (!error) + printf("\n%s\n", dhd_version); + else { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail_1; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) { + error = -ENODEV; + DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__)); + goto fail_2; + } +#endif +#if defined(WL_CFG80211) + wl_android_post_init(); +#endif /* defined(WL_CFG80211) */ + + return error; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +fail_2: + dhd_bus_unregister(); +#endif +fail_1: +#if defined(CONFIG_WIFI_CONTROL_FUNC) + wl_android_wifictrl_func_del(); +#endif + + /* Call customer gpio to turn off power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); + + return error; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) +late_initcall(dhd_module_init); +#else +module_init(dhd_module_init); +#endif +module_exit(dhd_module_cleanup); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + down(&dhd->proto_sem); + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); + + timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (waitqueue_active(&dhd->ioctl_resp_wait)) { + wake_up(&dhd->ioctl_resp_wait); + } + + return 0; +} + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + flags = dhd_os_spin_lock(pub); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + dhd_os_spin_unlock(pub, flags); + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + dhd_os_spin_unlock(pub, flags); +#ifdef DHDTHREAD + del_timer_sync(&dhd->timer); +#else + del_timer(&dhd->timer); +#endif /* DHDTHREAD */ + return; + } + + if (wdtick) { + dhd_watchdog_ms = (uint)wdtick; + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + dhd_os_spin_unlock(pub, flags); +} + +void * +dhd_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + + rdlen = kernel_read(fp, fp->f_pos, buf, len); + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +dhd_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef DHDTHREAD + if (dhd->threads_only) + down(&dhd->sdsem); + else +#endif /* DHDTHREAD */ + spin_lock_bh(&dhd->sdlock); +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + +#ifdef DHDTHREAD + if (dhd->threads_only) + up(&dhd->sdsem); + else +#endif /* DHDTHREAD */ + spin_unlock_bh(&dhd->sdlock); +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->txqlock); +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->txqlock); +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdtxlock(dhd_pub_t *pub) +{ + dhd_os_sdlock(pub); +} + +void +dhd_os_sdtxunlock(dhd_pub_t *pub) +{ + dhd_os_sdunlock(pub); +} + +#if defined(CONFIG_DHD_USE_STATIC_BUF) +uint8* dhd_os_prealloc(void *osh, int section, uint size) +{ + return (uint8*)wl_android_prealloc(section, size); +} + +void dhd_os_prefree(void *osh, void *addr, uint size) +{ +} +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (!dhd->pub.up) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(WL_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; + ASSERT(dhd != NULL); + + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data); + if (bcmerror != BCME_OK) + return (bcmerror); + +#if defined(WL_WIRELESS_EXT) + if (event->bsscfgidx == 0) { + /* + * Wireless ext is on primary interface only + */ + + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + + if (dhd->iflist[*ifidx]->net) { + wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); + } + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef WL_CFG80211 + if ((ntoh32(event->event_type) == WLC_E_IF) && + (((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD)) + /* If ADD_IF has been called directly by wl utility then we + * should not report this. In case if ADD_IF was called from + * CFG stack, then too this event need not be reported back + */ + return (BCME_OK); + if ((wl_cfg80211_is_progress_ifchange() || + wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) { + /* + * If IF_ADD/CHANGE operation is going on, + * discard any event received on the virtual I/F + */ + return (BCME_OK); + } + + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + if (dhd->iflist[*ifidx]->net) { + wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data); + } +#endif /* defined(WL_CFG80211) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + /* Send up locally generated AMP HCI Events */ + case WLC_E_BTA_HCI_EVENT: { + struct sk_buff *p, *skb; + bcm_event_t *msg; + wl_event_msg_t *p_bcm_event; + char *ptr; + uint32 len; + uint32 pktlen; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *eth; + int ifidx; + + len = ntoh32(event->datalen); + pktlen = sizeof(bcm_event_t) + len + 2; + dhd = dhdp->info; + ifidx = dhd_ifname2idx(dhd, event->ifname); + + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); + + msg = (bcm_event_t *) PKTDATA(dhdp->osh, p); + + bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost); + + msg->eth.ether_type = hton16(ETHER_TYPE_BRCM); + + /* BCM Vendor specific header... */ + msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG); + msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION; + bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN); + + /* vendor spec header length + pvt data length (private indication + * hdr + actual message itself) + */ + msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH + + BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len); + msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT); + + PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); + + /* copy wl_event_msg_t into sk_buf */ + + /* pointer to wl_event_msg_t in sk_buf */ + p_bcm_event = &msg->event; + bcopy(event, p_bcm_event, sizeof(wl_event_msg_t)); + + /* copy hci event into sk_buf */ + bcopy(data, (p_bcm_event + 1), len); + + msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) + + ntoh16(msg->bcm_hdr.length)); + PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); + + ptr = (char *)(msg + 1); + /* Last 2 bytes of the message are 0x00 0x00 to signal that there + * are no ethertypes which are following this + */ + ptr[len+0] = 0x00; + ptr[len+1] = 0x00; + + skb = PKTTONATIVE(dhdp->osh, p); + eth = skb->data; + len = skb->len; + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + + skb->data = eth; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } + else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__)); + } + break; + } /* case WLC_E_BTA_HCI_EVENT */ + + default: + break; + } +} + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + int timeout = msecs_to_jiffies(2000); + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif + return; +} + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif + return; +} + +int +dhd_dev_reset(struct net_device *dev, uint8 flag) +{ + int ret; + + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + ret = dhd_bus_devreset(&dhd->pub, flag); + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + return ret; + } + + return ret; +} + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val, int force) +{ + int ret = 0; + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd) { +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); +#else + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif + } + return ret; +} + +int net_os_set_dtim_skip(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd) + dhd->pub.dtim_skip = val; + + return 0; +} + +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + char *filterp = NULL; + int ret = 0; + + if (!dhd || (num == DHD_UNICAST_FILTER_NUM) || + (num == DHD_MDNS_FILTER_NUM)) + return ret; + if (num >= dhd->pub.pktfilter_count) + return -EINVAL; + if (add_remove) { + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + break; + case DHD_MULTICAST4_FILTER_NUM: + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + break; + case DHD_MULTICAST6_FILTER_NUM: + filterp = "103 0 0 0 0xFFFF 0x3333"; + break; + default: + return -EINVAL; + } + } + dhd->pub.pktfilter[num] = filterp; + return ret; +} + +int dhd_os_set_packet_filter(dhd_pub_t *dhdp, int val) +{ + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_set_packet_filter(val, dhdp); + } + } + return ret; + +} + +int net_os_set_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return dhd_os_set_packet_filter(&dhd->pub, val); +} + +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return dhd_preinit_ioctls(&dhd->pub); +} + +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_clean */ +int +dhd_dev_pno_reset(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_clean(&dhd->pub)); +} + + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable(&dhd->pub, pfn_enabled)); +} + + +/* Linux wrapper to call common dhd_pno_set */ +int +dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, + ushort scan_fr, int pno_repeat, int pno_freq_expo_max) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max)); +} + +/* Linux wrapper to call common dhd_pno_set_ex */ +int +dhd_dev_pno_set_ex(struct net_device *dev, wl_pfn_t* ssidnet, int nssid, + ushort pno_interval, int pno_repeat, int pno_expo_max, int pno_lost_time) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_ex(&dhd->pub, ssidnet, nssid, + pno_interval, pno_repeat, pno_expo_max, pno_lost_time)); +} + +/* Linux wrapper to get pno status */ +int +dhd_dev_get_pno_status(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_status(&dhd->pub)); +} + +#endif /* PNO_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(struct work_struct *work) +{ + dhd_info_t *dhd; + struct net_device *dev; + + dhd = (dhd_info_t *)container_of(work, dhd_info_t, work_hang); + dev = dhd->iflist[0]->net; + + if (dev) { + rtnl_lock(); + dev_close(dev); + rtnl_unlock(); +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } +} +#endif + +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; + + if (dhdp) { + if (!dhdp->hang_was_sent) { + dhdp->hang_was_sent = 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + schedule_work(&dhdp->info->work_hang); +#endif + } + } + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_send_hang_message(&dhd->pub); + + return ret; +} + +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL); +#endif + } +} + +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL); +#endif + } +} + +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_net_if_lock_local(dhd); +} + +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + dhd_net_if_unlock_local(dhd); +} + +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_suspend_lock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); +#endif +} + +static void dhd_suspend_unlock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +#endif +} + +unsigned long dhd_os_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; + + if (dhd) + spin_lock_irqsave(&dhd->dhd_lock, flags); + + return flags; +} + +void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) + spin_unlock_irqrestore(&dhd->dhd_lock, flags); +} + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 10 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(timeout); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + return pend; +} + +#ifdef DHD_DEBUG +int +write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp; + mm_segment_t old_fs; + loff_t pos = 0; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* open file to write */ + fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640); + if (!fp) { + printf("%s: open file error\n", __FUNCTION__); + ret = -1; + goto exit; + } + + /* Write buf to file */ + fp->f_op->write(fp, buf, size, &pos); + +exit: + /* free buf before return */ + MFREE(dhd->osh, buf, size); + /* close file before return */ + if (fp) + filp_close(fp, current->files); + /* restore previous address limit */ + set_fs(old_fs); + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); +#endif + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; +} + +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; +} + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#ifdef CONFIG_HAS_WAKELOCK + if (!dhd->wakelock_counter) + wake_lock(&dhd->wl_wifi); +#endif + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter) { + dhd->wakelock_counter--; +#ifdef CONFIG_HAS_WAKELOCK + if (!dhd->wakelock_counter) + wake_unlock(&dhd->wl_wifi); +#endif + ret = dhd->wakelock_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_check_wakelock(void *dhdp) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_pub_t *pub = (dhd_pub_t *)dhdp; + dhd_info_t *dhd; + + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); + + if (dhd && wake_lock_active(&dhd->wl_wifi)) + return 1; +#endif + return 0; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int dhd_os_check_if_up(void *dhdp) +{ + dhd_pub_t *pub = (dhd_pub_t *)dhdp; + + if (!pub) + return 0; + return pub->up; +} + +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) +{ + int i; + + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw); + + if (!dhdp) + return; + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp), + dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp)); +} + +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +{ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; + + if (!net || !netdev_priv(net)) { + DHD_ERROR(("%s invalid parameter\n", __FUNCTION__)); + return -EINVAL; + } + + dhd = *(dhd_info_t **)netdev_priv(net); + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; +} + +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +{ + struct net_device *net; + + net = dhd_idx2net(dhdp, ifidx); + return dhd_check_hang(net, dhdp, ret); +} + +#ifdef PROP_TXSTATUS +extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid, + uint8 iftype, uint8* ea); +extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits); + +int dhd_wlfc_interface_event(struct dhd_info *dhd, + ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea) +{ + if (dhd->pub.wlfc_state == NULL) + return BCME_OK; + + return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea); +} + +int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data) +{ + if (dhd->pub.wlfc_state == NULL) + return BCME_OK; + + return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data); +} + +int dhd_wlfc_event(struct dhd_info *dhd) +{ + return dhd_wlfc_enable(&dhd->pub); +} +#endif /* PROP_TXSTATUS */ + +#ifdef BCMDBGFS + +#include + +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; + +dhd_dbgfs_t g_dbgfs; + +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); + + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; + + count -= ret; + *ppos = pos + count; + rval = count; + + return rval; +} + + +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t ret; + uint32 buf; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; +} + + +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + loff_t pos = -1; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; + } + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} + +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; + +static void dhd_dbg_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); + } +} + +void dhd_dbg_init(dhd_pub_t *dhdp) +{ + int err; + + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + err = PTR_ERR(g_dbgfs.debugfs_dir); + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbg_create(); + + return; +} + +void dhd_dbg_remove(void) +{ + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); + + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); + +} +#endif /* ifdef BCMDBGFS */ + +#ifdef WLMEDIA_HTSF + +static +void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct sk_buff *skb; + uint32 htsf = 0; + uint16 dport = 0, oldmagic = 0xACAC; + char *p1; + htsfts_t ts; + + /* timestamp packet */ + + p1 = (char*) PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) { +/* memcpy(&proto, p1+26, 4); */ + memcpy(&dport, p1+40, 2); +/* proto = ((ntoh32(proto))>> 16) & 0xFF; */ + dport = ntoh16(dport); + } + + /* timestamp only if icmp or udb iperf with port 5555 */ +/* if (proto == 17 && dport == tsport) { */ + if (dport >= tsport && dport <= tsport + 20) { + + skb = (struct sk_buff *) pktbuf; + + htsf = dhd_get_htsf(dhd, 0); + memset(skb->data + 44, 0, 2); /* clear checksum */ + memcpy(skb->data+82, &oldmagic, 2); + memcpy(skb->data+84, &htsf, 4); + + memset(&ts, 0, sizeof(htsfts_t)); + ts.magic = HTSFMAGIC; + ts.prio = PKTPRIO(pktbuf); + ts.seqnum = htsf_seqnum++; + ts.c10 = get_cycles(); + ts.t10 = htsf; + ts.endmagic = HTSFENDMAGIC; + + memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts)); + } +} + +static void dhd_dump_htsfhisto(histo_t *his, char *s) +{ + int pktcnt = 0, curval = 0, i; + for (i = 0; i < (NUMBIN-2); i++) { + curval += 500; + printf("%d ", his->bin[i]); + pktcnt += his->bin[i]; + } + printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt, + his->bin[NUMBIN-1], s); +} + +static +void sorttobin(int value, histo_t *histo) +{ + int i, binval = 0; + + if (value < 0) { + histo->bin[NUMBIN-1]++; + return; + } + if (value > histo->bin[NUMBIN-2]) /* store the max value */ + histo->bin[NUMBIN-2] = value; + + for (i = 0; i < (NUMBIN-2); i++) { + binval += 500; /* 500m s bins */ + if (value <= binval) { + histo->bin[i]++; + return; + } + } + histo->bin[NUMBIN-3]++; +} + +static +void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + char *p1; + uint16 old_magic; + int d1, d2, d3, end2end; + htsfts_t *htsf_ts; + uint32 htsf; + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + p1 = (char*)PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) { + memcpy(&old_magic, p1+78, 2); + htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4); + } + else + return; + + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->tE0 = dhd_get_htsf(dhd, 0); + htsf_ts->cE0 = get_cycles(); + } + + if (old_magic == 0xACAC) { + + tspktcnt++; + htsf = dhd_get_htsf(dhd, 0); + memcpy(skb->data+92, &htsf, sizeof(uint32)); + + memcpy(&ts[tsidx].t1, skb->data+80, 16); + + d1 = ts[tsidx].t2 - ts[tsidx].t1; + d2 = ts[tsidx].t3 - ts[tsidx].t2; + d3 = ts[tsidx].t4 - ts[tsidx].t3; + end2end = ts[tsidx].t4 - ts[tsidx].t1; + + sorttobin(d1, &vi_d1); + sorttobin(d2, &vi_d2); + sorttobin(d3, &vi_d3); + sorttobin(end2end, &vi_d4); + + if (end2end > 0 && end2end > maxdelay) { + maxdelay = end2end; + maxdelaypktno = tspktcnt; + memcpy(&maxdelayts, &ts[tsidx], 16); + } + if (++tsidx >= TSMAX) + tsidx = 0; + } +} + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) +{ + uint32 htsf = 0, cur_cycle, delta, delta_us; + uint32 factor, baseval, baseval2; + cycles_t t; + + t = get_cycles(); + cur_cycle = t; + + if (cur_cycle > dhd->htsf.last_cycle) + delta = cur_cycle - dhd->htsf.last_cycle; + else { + delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); + } + + delta = delta >> 4; + + if (dhd->htsf.coef) { + /* times ten to get the first digit */ + factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1); + baseval = (delta*10)/factor; + baseval2 = (delta*10)/(factor+1); + delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10); + htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY; + } + else { + DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n")); + } + + return htsf; +} + +static void dhd_dump_latency(void) +{ + int i, max = 0; + int d1, d2, d3, d4, d5; + + printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n"); + for (i = 0; i < TSMAX; i++) { + d1 = ts[i].t2 - ts[i].t1; + d2 = ts[i].t3 - ts[i].t2; + d3 = ts[i].t4 - ts[i].t3; + d4 = ts[i].t4 - ts[i].t1; + d5 = ts[max].t4-ts[max].t1; + if (d4 > d5 && d4 > 0) { + max = i; + } + printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n", + ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4, + d1, d2, d3, d4, i); + } + + printf("current idx = %d \n", tsidx); + + printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt); + printf("%08X %08X %08X %08X \t%d %d %d %d\n", + maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4, + maxdelayts.t2 - maxdelayts.t1, + maxdelayts.t3 - maxdelayts.t2, + maxdelayts.t4 - maxdelayts.t3, + maxdelayts.t4 - maxdelayts.t1); +} + + +static int +dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + uint32 s1, s2; + + struct tsf { + uint32 low; + uint32 high; + } tsf_buf; + + memset(&ioc, 0, sizeof(ioc)); + memset(&tsf_buf, 0, sizeof(tsf_buf)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strcpy(buf, "tsf"); + s1 = dhd_get_htsf(dhd, 0); + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + if (ret == -EIO) { + DHD_ERROR(("%s: tsf is not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + return ret; + } + s2 = dhd_get_htsf(dhd, 0); + + memcpy(&tsf_buf, buf, sizeof(tsf_buf)); + printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ", + tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1, + dhd->htsf.coefdec2, s2-tsf_buf.low); + printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle); + return 0; +} + +void htsf_update(dhd_info_t *dhd, void *data) +{ + static ulong cur_cycle = 0, prev_cycle = 0; + uint32 htsf, tsf_delta = 0; + uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp; + ulong b, a; + cycles_t t; + + /* cycles_t in inlcude/mips/timex.h */ + + t = get_cycles(); + + prev_cycle = cur_cycle; + cur_cycle = t; + + if (cur_cycle > prev_cycle) + cyc_delta = cur_cycle - prev_cycle; + else { + b = cur_cycle; + a = prev_cycle; + cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle); + } + + if (data == NULL) + printf(" tsf update ata point er is null \n"); + + memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t)); + memcpy(&cur_tsf, data, sizeof(tsf_t)); + + if (cur_tsf.low == 0) { + DHD_INFO((" ---- 0 TSF, do not update, return\n")); + return; + } + + if (cur_tsf.low > prev_tsf.low) + tsf_delta = (cur_tsf.low - prev_tsf.low); + else { + DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n", + cur_tsf.low, prev_tsf.low)); + if (cur_tsf.high > prev_tsf.high) { + tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low); + DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta)); + } + else + return; /* do not update */ + } + + if (tsf_delta) { + hfactor = cyc_delta / tsf_delta; + tmp = (cyc_delta - (hfactor * tsf_delta))*10; + dec1 = tmp/tsf_delta; + dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta; + tmp = (tmp - (dec1*tsf_delta))*10; + dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta; + + if (dec3 > 4) { + if (dec2 == 9) { + dec2 = 0; + if (dec1 == 9) { + dec1 = 0; + hfactor++; + } + else { + dec1++; + } + } + else + dec2++; + } + } + + if (hfactor) { + htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low; + dhd->htsf.coef = hfactor; + dhd->htsf.last_cycle = cur_cycle; + dhd->htsf.last_tsf = cur_tsf.low; + dhd->htsf.coefdec1 = dec1; + dhd->htsf.coefdec2 = dec2; + } + else { + htsf = prev_tsf.low; + } +} + +#endif /* WLMEDIA_HTSF */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c new file mode 100644 index 0000000000000..aadd122f5b07f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c @@ -0,0 +1,39 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_linux_sched.c,v 1.3 2009-04-10 04:14:49 Exp $ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h new file mode 100644 index 0000000000000..bb1d7365ea941 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_proto.h @@ -0,0 +1,105 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_proto.h,v 1.8.10.6 2010-12-22 23:47:24 Exp $ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include +#include + +#ifndef IOCTL_RESP_TIMEOUT +#define IOCTL_RESP_TIMEOUT 2000 /* In milli second */ +#endif + +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_prot_init(dhd_pub_t *dhdp); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Handles a protocol control response asynchronously */ +extern int dhd_prot_ctl_complete(dhd_pub_t *dhd); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +#ifdef PROP_TXSTATUS +extern int dhd_wlfc_enque_sendq(void* state, int prec, void* p); +extern int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx); +extern void dhd_wlfc_cleanup(dhd_pub_t *dhd); +#endif /* PROP_TXSTATUS */ + +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#elif defined(RNDIS) +#define DHD_PROTOCOL "rndis" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +#endif /* _dhd_proto_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c new file mode 100644 index 0000000000000..ed3da83604b5c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -0,0 +1,6355 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_sdio.c 338148 2012-06-11 20:35:45Z $ + */ + +#include +#include +#include + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifndef DHDSDIO_MEM_DUMP_FNAME +#define DHDSDIO_MEM_DUMP_FNAME "mem_dump" +#endif + +#define QLEN 256 /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 + +#define TXRETRIES 2 /* # of retries for tx frames */ + +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ + +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */ +#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */ + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif + +#define MAX_RX_DATASZ 2048 + +/* Maximum milliseconds to wait for F2 to come up */ +#define DHD_WAIT_F2RDY 3000 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY <= 1000000) +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); +#if defined(OOB_INTR_ONLY) +extern void bcmsdh_set_irq(int flag); +#endif /* defined(OOB_INTR_ONLY) */ +#ifdef PROP_TXSTATUS +extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); +#endif + +#ifdef DHD_DEBUG +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 2024 +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hndrte_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; +#endif /* DHD_DEBUG */ + +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + const char *nvram_params; /* user specified nvram params. */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ + bool rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#define PKTGEN_RCV_IDLE (0) +#define PKTGEN_RCV_ONGOING (1) + uint16 pktgen_rcv_state; /* receive state */ + uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; + uint32 rxint_mode; /* rx interrupt mode */ +} dhd_bus_t; + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#define DHD_NOPMU(dhd) (FALSE) + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; +extern void dhd_os_wd_timer(void *bus, uint wdtick); + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax = DHD_TXMINMAX; + +/* override the RAM size if possible */ +#define DONGLE_MIN_MEMSIZE (128 *1024) +int dhd_dongle_memsize; + +static bool dhd_doflow; +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +static const uint watermark = 8; +static const uint firstread = DHD_FIRSTREAD; + +#define HDATLEN (firstread - (SDPCM_HDRLEN)) + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; + +#define ALIGNMENT 4 + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uint datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + +/* To check if there's window offered */ +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* To check if there's window offered for ctrl frame */ +#define TXCTLOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + +#define BUS_WAKE(bus) \ + do { \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + +/* + * pktavail interrupts from dongle to host can be managed in 3 different ways + * whenever there is a packet available in dongle to transmit to host. + * + * Mode 0: Dongle writes the software host mailbox and host is interrupted. + * Mode 1: (sdiod core rev >= 4) + * Device sets a new bit in the intstatus whenever there is a packet + * available in fifo. Host can't clear this specific status bit until all the + * packets are read from the FIFO. No need to ack dongle intstatus. + * Mode 2: (sdiod core rev >= 4) + * Device sets a bit in the intstatus, and host acks this by writing + * one to this bit. Dongle won't generate anymore packet interrupts + * until host reads all the packets from the dongle and reads a zero to + * figure that there are no more packets. No need to disable host ints. + * Need to ack the intstatus. + */ + +#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */ +#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */ +#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */ + + +#define FRAME_AVAIL_MASK(bus) \ + ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL) + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus))) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count); +#endif + +#ifdef DHD_DEBUG +static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); +#endif /* DHD_DEBUG */ + +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, + bool reset_flag); + +static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); + +static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(dhd_bus_t *bus); + +static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); +static int dhdsdio_download_nvram(dhd_bus_t *bus); +#ifdef BCMEMBEDIMAGE +static int dhdsdio_download_code_array(dhd_bus_t *bus); +#endif + +#ifdef WLMEDIA_HTSF +#include +extern uint32 dhd_get_htsf(void *dhd, int ifidx); +#endif /* WLMEDIA_HTSF */ + +static void +dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_memsize, min_size)); + if ((dhd_dongle_memsize > min_size) && + (dhd_dongle_memsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_memsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ +#define HT_AVAIL_ERROR_MAX 10 + static int ht_avail_error = 0; + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#if defined(OOB_INTR_ONLY) + pendok = FALSE; +#endif + clkctl = 0; + sdh = bus->sdh; + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + ht_avail_error++; + if (ht_avail_error < HT_AVAIL_ERROR_MAX) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + } else { + if (ht_avail_error == HT_AVAIL_ERROR_MAX) + dhd_os_send_hang_message(bus->dhd); + } + return BCME_ERROR; + } else { + ht_avail_error = 0; + } + + if (pendok && + ((bus->sih->buscoretype == PCMCIA_CORE_ID) && (bus->sih->buscorerev == 9))) { + uint32 dummy, retries; + R_SDREG(dummy, &bus->regs->clockctlstatus, retries); + } + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } + + + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + iovalue = bus->sd_mode; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + if (sd1idle) { + /* Change to SD1 mode and turn off clock */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ + int ret = BCME_OK; +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; + } + return ret; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; + } + break; + + case CLK_SDONLY: + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + ret = dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + } + break; + + case CLK_NONE: + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + ret = dhdsdio_sdclk(bus, FALSE); +#ifdef DHD_DEBUG + if (dhd_console_ms == 0) +#endif /* DHD_DEBUG */ + if (bus->poll == 0) + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return ret; +} + +static int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq)) + return BCME_BUSY; + + + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + if (bus->sih->chip != BCM4329_CHIP_ID && bus->sih->chip != BCM4319_CHIP_ID) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } + + /* Change state */ + bus->sleeping = TRUE; + + } else { + /* Waking up: bus power up is ok, set local state */ + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + 0, NULL); + + /* Force pad isolation off if possible (in case power never toggled) */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Change state */ + bus->sleeping = FALSE; + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } + + return BCME_OK; +} + +#if defined(OOB_INTR_ONLY) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(HW_OOB) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif /* defined(OOB_INTR_ONLY) */ + +/* Writes a HW/SW header into the packet and sends it. */ +/* Assumes: (a) header space already there, (b) caller holds lock */ +static int +dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt) +{ + int ret; + osl_t *osh; + uint8 *frame; + uint16 len, pad1 = 0; + uint32 swheader; + uint retries = 0; + bcmsdh_info_t *sdh; + void *new; + int i; +#ifdef WLMEDIA_HTSF + char *p; + htsfts_t *htsf_ts; +#endif + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + sdh = bus->sdh; + osh = bus->dhd->osh; + + if (bus->dhd->dongle_reset) { + ret = BCME_NOTREADY; + goto done; + } + + frame = (uint8*)PKTDATA(osh, pkt); + +#ifdef WLMEDIA_HTSF + if (PKTLEN(osh, pkt) >= 100) { + p = PKTDATA(osh, pkt); + htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12); + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->c20 = get_cycles(); + htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0); + } + } +#endif /* WLMEDIA_HTSF */ + + /* Add alignment padding, allocate new packet if needed */ + if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) { + if (PKTHEADROOM(osh, pkt) < pad1) { + DHD_INFO(("%s: insufficient headroom %d for %d pad1\n", + __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1)); + bus->dhd->tx_realloc++; + new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE); + if (!new) { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN)); + ret = BCME_NOMEM; + goto done; + } + + PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt)); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + /* free the pkt if canned one is not used */ + free_pkt = TRUE; + pkt = new; + frame = (uint8*)PKTDATA(osh, pkt); + ASSERT(((uintptr)frame % DHD_SDALIGN) == 0); + pad1 = 0; + } else { + PKTPUSH(osh, pkt, pad1); + frame = (uint8*)PKTDATA(osh, pkt); + + ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt)); + bzero(frame, pad1 + SDPCM_HDRLEN); + } + } + ASSERT(pad1 < DHD_SDALIGN); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + len = (uint16)PKTLEN(osh, pkt); + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~len); + + /* Software tag: channel, sequence number, data offset */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq | + (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + +#ifdef DHD_DEBUG + if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) { + tx_packets[PKTPRIO(pkt)]++; + } + if (DHD_BYTES_ON() && + (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) || + (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + + /* Raise len to next SDIO block to eliminate tail command */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad2 = bus->blocksize - (len % bus->blocksize); + if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) +#ifdef NOTUSED + if (pad2 <= PKTTAILROOM(osh, pkt)) +#endif /* NOTUSED */ + len += pad2; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Some controllers have trouble with odd bytes -- round to even */ + if (forcealign && (len & (ALIGNMENT - 1))) { +#ifdef NOTUSED + if (PKTTAILROOM(osh, pkt)) +#endif + len = ROUNDUP(len, ALIGNMENT); +#ifdef NOTUSED + else + DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len)); +#endif + } + + do { + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, pkt, NULL, NULL); + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + } while ((ret < 0) && retrydata && retries++ < TXRETRIES); + +done: + /* restore pkt buffer pointer before calling tx complete routine */ + PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1); +#ifdef PROP_TXSTATUS + if (bus->dhd->wlfc_state) { + dhd_os_sdunlock(bus->dhd); + dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0); + dhd_os_sdlock(bus->dhd); + } else { +#endif /* PROP_TXSTATUS */ + dhd_txcomplete(bus->dhd, pkt, ret != 0); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + +#ifdef PROP_TXSTATUS + } +#endif + return ret; +} + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#endif /* SDTEST */ + + /* Add space for the header */ + PKTPUSH(osh, pkt, SDPCM_HDRLEN); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); +#ifndef DHDTHREAD + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); +#endif /* DHDTHREAD */ + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, + pktq_len(&bus->txq))); + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) { + PKTPULL(osh, pkt, SDPCM_HDRLEN); +#ifndef DHDTHREAD + /* Need to also release txqlock before releasing sdlock. + * This thread still has txqlock and releases sdlock. + * Deadlock happens when dpc() grabs sdlock first then + * attempts to grab txqlock. + */ + dhd_os_sdunlock_txq(bus->dhd); + dhd_os_sdunlock(bus->dhd); +#endif +#ifdef PROP_TXSTATUS + if (bus->dhd->wlfc_state) + dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE); + else +#endif + dhd_txcomplete(bus->dhd, pkt, FALSE); +#ifndef DHDTHREAD + dhd_os_sdlock(bus->dhd); + dhd_os_sdlock_txq(bus->dhd); +#endif +#ifdef PROP_TXSTATUS + /* let the caller decide whether to free the packet */ + if (!bus->dhd->wlfc_state) +#endif + PKTFREE(osh, pkt, TRUE); + ret = BCME_NORESOURCE; + } + else + ret = BCME_OK; + dhd_os_sdunlock_txq(bus->dhd); + + if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow) + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + +#ifdef DHD_DEBUG + if (pktq_plen(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktq_plen(&bus->txq, prec); +#endif + /* Schedule DPC if needed to send queued packet(s) */ + if (dhd_deferred_tx && !bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { +#ifdef DHDTHREAD + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); +#endif /* DHDTHREAD */ + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); +#ifndef SDTEST + ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE); +#else + ret = dhdsdio_txpkt(bus, pkt, + (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE); +#endif + if (ret) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + +#ifdef DHDTHREAD + dhd_os_sdunlock(bus->dhd); +#endif /* DHDTHREAD */ + } + +#ifndef DHDTHREAD + dhd_os_sdunlock(bus->dhd); +#endif /* DHDTHREAD */ + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + void *pkt; + uint32 intstatus = 0; + uint retries = 0; + int ret = 0, prec_out; + uint cnt = 0; + uint datalen; + uint8 tx_prec_map; + + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + tx_prec_map = ~bus->flowcontrol; + + /* Send frames until the limit or some other event */ + for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) { + dhd_os_sdlock_txq(bus->dhd); + if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) { + dhd_os_sdunlock_txq(bus->dhd); + break; + } + dhd_os_sdunlock_txq(bus->dhd); + datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN; + +#ifndef SDTEST + ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE); +#else + ret = dhdsdio_txpkt(bus, pkt, + (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE); +#endif + if (ret) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + } + + /* Deflow-control stack if needed */ + if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) && + dhd->txoff && (pktq_len(&bus->txq) < FCLOW)) + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + + return cnt; +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + uint8 *frame; + uint16 len; + uint32 swheader; + uint retries = 0; + bcmsdh_info_t *sdh = bus->sdh; + uint8 doff = 0; + int ret = -1; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - SDPCM_HDRLEN; + len = (msglen += SDPCM_HDRLEN); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + SDPCM_HDRLEN); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += SDPCM_HDRLEN; + + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + + if (!TXCTLOK(bus)) { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + bus->dhd->txcnt_timeout++; + if (!bus->dhd->hang_was_sent) + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); + ret = -1; + bus->ctrl_frame_stat = FALSE; + goto done; + } + } + + bus->dhd->txcnt_timeout = 0; + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + + do { + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, NULL, NULL, NULL); + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + } while ((ret < 0) && retries++ < TXRETRIES); + } + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TIMEOUT) + return -ETIMEDOUT; + + return ret ? -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + bool pending = FALSE; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else if (timeleft == 0) { + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#ifdef DHD_DEBUG + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); +#endif /* DHD_DEBUG */ + } else if (pending == TRUE) { + /* signal pending */ + DHD_ERROR(("%s: signal pending\n", __FUNCTION__)); + return -EINTR; + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); +#ifdef DHD_DEBUG + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); +#endif /* DHD_DEBUG */ + } + if (timeleft == 0) { + bus->dhd->rxcnt_timeout++; + DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout)); + } + else + bus->dhd->rxcnt_timeout = 0; + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TIMEOUT) + return -ETIMEDOUT; + + return rxlen ? (int)rxlen : -EIO; +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, + IOV_MEMBYTES, + IOV_MEMSIZE, +#ifdef DHD_DEBUG + IOV_CHECKDIED, + IOV_SERIALCONS, +#endif /* DHD_DEBUG */ + IOV_DOWNLOAD, + IOV_SOCRAM_STATE, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_DONGLEISOLATION, + IOV_VARS, +#ifdef SOFTAP + IOV_FWPATH +#endif +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 }, + {"download", IOV_DOWNLOAD, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#endif /* DHD_DEBUG */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ + {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, +#ifdef SOFTAP + {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 }, +#endif + {NULL, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n", + bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); + bcm_bprintf(strbuf, "tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %d count %d print %d total %d min %d len %d\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) + bus->pktgen_sent = bus->pktgen_rcvd = bus->pktgen_fail = 0; + + return 0; +} +#endif /* SDTEST */ + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +#ifdef DHD_DEBUG +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv; + + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, bus->ramsize - 4, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", __FUNCTION__, addr)); + return BCME_ERROR; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) + return BCME_OK; + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + + +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); + } + } +break2: + + return BCME_OK; +} + +static int +dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + sdpcm_shared_t sdpcm_shared; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr); + + if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line); + } + + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.trap_addr, + (uint8*)&tr, sizeof(trap_t))) < 0) + goto done; + + bcm_bprintf(&strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), + ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), + ltoh32(sdpcm_shared.trap_addr), + ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), + ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + if (dhd_msg_level & DHD_ERROR_VAL) { + printf("CONSOLE: %s\n", line); + DHD_BLOG(line, strlen(line) + 1); + } + } + } + } + } + +printbuf: + if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} +#endif /* #ifdef DHD_DEBUG */ + + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +#ifdef DHD_DEBUG + +#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24) +static int +dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) +{ + int int_val; + uint32 addr, data; + + + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + *bcmerror = 0; + + bcmsdh_reg_write(bus->sdh, addr, 4, 1); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + int_val = bcmsdh_reg_read(bus->sdh, data, 4); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (!set) + return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB); + if (enable) + int_val |= CC_PLL_CHIPCTRL_SERIAL_ENAB; + else + int_val &= ~CC_PLL_CHIPCTRL_SERIAL_ENAB; + bcmsdh_reg_write(bus->sdh, data, 4, int_val); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (bus->sih->chip == BCM4330_CHIP_ID) { + uint32 chipcontrol; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol); + chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4); + chipcontrol &= ~0x8; + if (enable) { + chipcontrol |= 0x8; + chipcontrol &= ~0x3; + } + bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol); + } + + return (int_val & CC_PLL_CHIPCTRL_SERIAL_ENAB); +} +#endif + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) + { + uint8 enable, protect; + si_socdevram(bus->sih, FALSE, &enable, &protect); + if (!enable || protect) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + DHD_ERROR(("%s: socram enable %d, protect %d\n", + __FUNCTION__, enable, protect)); + bcmerror = BCME_BADARG; + break; + } + if (enable && (bus->sih->chip == BCM4330_CHIP_ID)) { + uint32 devramsize = si_socdevram_size(bus->sih); + if ((address < SOCDEVRAM_4330_ARM_ADDR) || + (address + size > (SOCDEVRAM_4330_ARM_ADDR + devramsize))) { + DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", + __FUNCTION__, address, size)); + DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", + __FUNCTION__, SOCDEVRAM_4330_ARM_ADDR, devramsize)); + bcmerror = BCME_BADARG; + break; + } + /* move it such that address is real now */ + address -= SOCDEVRAM_4330_ARM_ADDR; + address += SOCDEVRAM_4330_BP_ADDR; + DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", + __FUNCTION__, (set ? "write" : "read"), size, address)); + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdsdio_membytes(bus, set, address, data, size); + + break; + } + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_DOWNLOAD): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_SOCRAM_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uintptr)bus->regs + sd_ptr->offset; + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uintptr)bus->regs + sd_ptr->offset; + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + + case IOV_GVAL(IOV_SERIALCONS): + int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror); + if (bcmerror != 0) + break; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SERIALCONS): + dhd_serialconsole(bus, TRUE, bool_val, &bcmerror); + break; + + + +#endif /* DHD_DEBUG */ + + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + + break; +#ifdef SOFTAP + case IOV_GVAL(IOV_FWPATH): + { + uint32 fw_path_len; + + fw_path_len = strlen(bus->fw_path); + DHD_INFO(("[softap] get fwpath, l=%d\n", len)); + + if (fw_path_len > len-1) { + bcmerror = BCME_BUFTOOSHORT; + break; + } + + if (fw_path_len) { + bcopy(bus->fw_path, arg, fw_path_len); + ((uchar*)arg)[fw_path_len] = 0; + } + break; + } + + case IOV_SVAL(IOV_FWPATH): + DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val)); + + switch (int_val) { + case 1: + bus->fw_path = fw_path; /* ordinary one */ + break; + case 2: + bus->fw_path = fw_path2; + break; + default: + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL"))); + break; + +#endif /* SOFTAP */ + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == FALSE) + dhd_preinit_ioctls((dhd_pub_t *) bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + if (bus->vars) { + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) { + if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) { + DHD_ERROR(("PR85623WAR in place\n")); + varsize += 4; + varaddr -= 4; + } + } + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + bus->orig_ramsize, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((bus->orig_ramsize - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (bus->orig_ramsize - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + + if (!bus->sih) + return BCME_ERROR; + + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + } else { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + if (!bus->dhd) + return; + + osh = bus->dhd->osh; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitlockfree(NULL); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Change our idea of bus state */ + bus->dhd->busstate = DHD_BUS_DOWN; + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE, NULL, 0); + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + /* Set to a safe default. It gets updated when we + * receive a packet from the fw but when we reset, + * we need a safe default to be able to send the + * initial mac address. + */ + bus->tx_max = 4; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; + uint8 saveclk; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) { + DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate)); + goto exit; + } + + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + goto exit; + } + + /* Enable function 2 (frame transfers) */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); + + ready = 0; + do { + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + } while (ready != enable && !dhd_timeout_expired(&tmo)); + + DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + ASSERT(bus->regs != NULL); + + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + /* corerev 4 could use the newer interrupt logic to detect the frames */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) && + (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) { + bus->hostintmask &= ~I_HMB_FRAME_IND; + bus->hostintmask |= I_XMTDATA_AVAIL; + } + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, &err); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + + } + + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + /* Restore previous clock setting */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, NULL); + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) + bus->dhd->busstate = DHD_BUS_DOWN; +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext, *save_pfirst; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif + + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + save_pfirst = pfirst; + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + if (plast) { + PKTSETNEXT(osh, plast, pnext); + } else { + ASSERT(save_pfirst == pfirst); + save_pfirst = pnext; + } + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + if (plast) { + PKTSETNEXT(osh, plast, pnext); + } else { + ASSERT(save_pfirst == pfirst); + save_pfirst = pnext; + } + continue; + } + + /* this packet will go up, link back into chain and count it */ + PKTSETNEXT(osh, pfirst, pnext); + plast = pfirst; + num++; + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + if (num) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num, 0); + dhd_os_sdlock(bus->dhd); + } + + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { + +#ifdef DHDTHREAD + /* tx more to improve rx performance */ + if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) { + dhdsdio_sendfromq(bus, dhd_txbound); + } +#endif /* DHDTHREAD */ + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } + else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required because dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attempt NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x40) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + + + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, 1, chan); + dhd_os_sdlock(bus->dhd); + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= FRAME_AVAIL_MASK(bus); + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); + /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) { + uint32 val; + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(bus->dhd->osh, &bus->regs->corecontrol, val); + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + } + +#ifdef DHD_DEBUG + /* Retrieve console state address now that firmware should have updated it */ + { + sdpcm_shared_t shared; + if (dhdsdio_readshared(bus, &shared) == 0) + bus->console_addr = shared.console_addr; + } +#endif /* DHD_DEBUG */ + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leave this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + +#ifdef DHD_DEBUG + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED\n")); + dhdsdio_checkdied(bus, NULL, 0); + } +#endif /* DHD_DEBUG */ + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_FWHALT | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + return intstatus; +} + +static bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + return 0; + } + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + dhd_os_sdlock(bus->dhd); + + /* If waiting for HTAVAIL, check status */ + if (bus->clkstate == CLK_PENDING) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate != CLK_AVAIL) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + bus->f1regdata++; + if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && + (newstatus == I_XMTDATA_AVAIL)) { + } + else + W_SDREG(newstatus, ®s->intstatus, retries); + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Just being here means nothing more to do for chipactive */ + if (intstatus & I_CHIPACTIVE) { + /* ASSERT(bus->clkstate == CLK_AVAIL); */ + intstatus &= ~I_CHIPACTIVE; + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus); + } + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) { + intstatus &= ~FRAME_AVAIL_MASK(bus); + } + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE(bus, intstatus)) { + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~FRAME_AVAIL_MASK(bus); + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + +#if defined(OOB_INTR_ONLY) && !defined(HW_OOB) + /* In case of SW-OOB(using edge trigger), + * Check interrupt status in the dongle again after enable irq on the host. + * and rechedule dpc if interrupt is pended in the dongle. + * There is a chance to miss OOB interrupt while irq is disabled on the host. + * No need to do this with HW-OOB(level trigger) + */ + R_SDREG(newstatus, ®s->intstatus, retries); + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + if (newstatus & bus->hostintmask) { + bus->ipend = TRUE; + resched = TRUE; + } +#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ + + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + int ret, i; + uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN; + + if (*frame_seq != bus->tx_seq) { + DHD_INFO(("%s IOCTL frame seq lag detected!" + " frm_seq:%d != bus->tx_seq:%d, corrected\n", + __FUNCTION__, *frame_seq, bus->tx_seq)); + *frame_seq = bus->tx_seq; + } + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL); + ASSERT(ret != BCME_PENDING); + + if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + + bcmsdh_abort(sdh, SDIO_FUNC_2); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + bus->f1regdata++; + + for (i = 0; i < 3; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + } + if (ret == 0) { + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); + } + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; + } + /* Resched the DPC if ctrl cmd is pending on bus credit */ + if (bus->ctrl_frame_stat) + resched = TRUE; + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation %d \n", + __FUNCTION__, bcmsdh_regfail(sdh))); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + } else if (bus->clkstate == CLK_PENDING) { + /* Awaiting I_CHIPACTIVE; don't resched */ + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */ + resched = TRUE; + } + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + + dhd_os_sdunlock(bus->dhd); + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + + bcmsdh_intr_disable(sdh); + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhdsdio_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); +#endif + +} + +#ifdef SDTEST +static void +dhdsdio_pktgen_init(dhd_bus_t *bus) +{ + /* Default to specified length, or full range */ + if (dhd_pktgen_len) { + bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = 10000 / dhd_watchdog_ms; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d rcvd %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd); + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) { + bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING; + dhdsdio_sdtest_set(bus, (uint8)bus->pktgen_total); + } + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + len = bus->pktgen_len; + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (len >> 0); + *data++ = (len >> 8); + + /* Then fill in the remainder -- N/A for burst, but who cares... */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif + + /* Send it */ + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, uint8 count) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = count; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + + /* Send it */ + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE)) + bus->pktgen_fail++; +} + + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len)); + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE) == 0) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + { + int i = 0; + uint8 *prn = data; + uint8 testval = extra; + for (i = 0; i < len; i++) { + if (*prn != testval) { + DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n", + i, bus->pktgen_rcvd_rcvsession, testval, *prn)); + prn++; testval++; + } + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limit (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) { + bus->pktgen_rcvd_rcvsession++; + + if (bus->pktgen_total && + (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) { + bus->pktgen_count = 0; + DHD_ERROR(("Pktgen:rcv test complete!\n")); + bus->pktgen_rcv_state = PKTGEN_RCV_IDLE; + dhdsdio_sdtest_set(bus, FALSE); + bus->pktgen_rcvd_rcvsession = 0; + } + } + } +} +#endif /* SDTEST */ + +extern void +dhd_disable_intr(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + bus = dhdp->bus; + bcmsdh_intr_disable(bus->sdh); +} + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + /* Ignore the timer if simulating bus down */ + if (bus->sleeping) + return FALSE; + + if (dhdp->busstate == DHD_BUS_DOWN) + return FALSE; + + /* Poll period: check device if appropriate. */ + if (bus->poll && (++bus->polltick >= bus->pollrate)) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { + + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif + + /* On idle timeout clear activity flag and/or turn off clock */ + if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } + + return bus->ipend; +} + +#ifdef DHD_DEBUG +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Bump dongle by sending an empty packet on the event channel. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) + dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE); + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return rv; +} +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4325_CHIP_ID) + return TRUE; + if (chipid == BCM4329_CHIP_ID) + return TRUE; + if (chipid == BCM4315_CHIP_ID) + return TRUE; + if (chipid == BCM4319_CHIP_ID) + return TRUE; + if (chipid == BCM4330_CHIP_ID) + return TRUE; + if (chipid == BCM43239_CHIP_ID) + return TRUE; + if (chipid == BCM4336_CHIP_ID) + return TRUE; + if (chipid == BCM43237_CHIP_ID) + return TRUE; + if (chipid == BCM43362_CHIP_ID) + return TRUE; + + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; + dhd_cmn_t *cmn; +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef PROP_TXSTATUS + uint up = 0; +#endif + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; + dhd_alignctl = TRUE; + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + dhd_doflow = FALSE; + dhd_dongle_memsize = 0; + dhd_txminmax = DHD_TXMINMAX; + + forcealign = TRUE; + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == SI_ENUM_BASE); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + return NULL; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { + case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */ + case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */ + case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */ + DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__)); + break; + case BCM4329_D11N_ID: /* 4329 802.11n dualband device */ + case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */ + case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */ + case 0x4329: + DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__)); + break; + case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */ + case BCM4315_D11G_ID: /* 4315 802.11g id */ + case BCM4315_D11A_ID: /* 4315 802.11a id */ + DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__)); + break; + case BCM4319_D11N_ID: /* 4319 802.11n id */ + case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */ + case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */ + DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__)); + break; + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + return NULL; + } + + if (osh == NULL) { + /* Ask the OS interface part for an OSL handle */ + if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) { + DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__)); + return NULL; + } + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ + + /* attach the common module */ + if (!(cmn = dhd_common_init(osh))) { + DHD_ERROR(("%s: dhd_common_init failed\n", __FUNCTION__)); + goto fail; + } + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + dhd_common_deinit(NULL, cmn); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + + bus->dhd->cmn = cmn; + cmn->dhd = bus->dhd; + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + } else { + DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n", + __FUNCTION__)); + } + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + +#ifdef GET_CUSTOM_MAC_ENABLE + /* Read MAC address from external customer place */ + memset(&ea_addr, 0, sizeof(ea_addr)); + ret = dhd_custom_get_mac_address(ea_addr.octet); + if (!ret) { + memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN); + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + + /* if firmware path present try to download and bring up bus */ + if (dhd_download_fw_on_driverload && (ret = dhd_bus_start(bus->dhd)) != 0) { + DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__)); + if (ret == BCME_NOTUP) + goto fail; + } + + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_net_attach(bus->dhd, 0) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + +#ifdef PROP_TXSTATUS + if (dhd_download_fw_on_driverload) + dhd_wl_ioctl_cmd(bus->dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0); +#endif + return bus; + +fail: + dhdsdio_release(bus, osh); + return NULL; +} + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ + int err = 0; + uint8 clkctl = 0; + + bus->alp_only = TRUE; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#ifdef DHD_DEBUG + DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4))); + +#endif /* DHD_DEBUG */ + + + /* Force PLL off until si_attach() programs PLL control regs */ + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + + +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int err = 0; + + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(65); + + for (fn = 0; fn <= numfn; fn++) { + if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); + break; + } + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + break; + } + dhd_dump_cis(fn, cis[fn]); + } + + while (fn-- > 0) { + ASSERT(cis[fn]); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + } + + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + } +#endif /* DHD_DEBUG */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); + goto fail; + } + + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + + + /* Get info on the ARM and SOCRAM cores... */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_memsize) + dhd_dongle_setmemsize(bus, dhd_dongle_memsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n", + bus->ramsize, bus->orig_ramsize)); + } + + /* ...but normally deal with the SDPCMDEV core */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); + bus->rxint_mode = SDIO_DEVICE_HMB_RXINT; + + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) + { + uint32 val; + + val = R_REG(osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(osh, &bus->regs->corecontrol, val); + } + + + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + return TRUE; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen); + goto fail; + } + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + + /* Disable F2 to clear any intermediate frame state on the dongle */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + + + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + + return TRUE; +} + +bool +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path) +{ + bool ret; + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + + return ret; +} + +static bool +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + bool ret; + + /* Download the firmware */ + DHD_OS_WAKE_LOCK(bus->dhd); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + ret = _dhdsdio_download_firmware(bus) == 0; + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + bool dongle_isolation = FALSE; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + dhd_common_deinit(bus->dhd, NULL); + dongle_isolation = bus->dhd->dongle_isolation; + dhd_detach(bus->dhd); + dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + +#ifdef DHD_DEBUG + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + if (osh) + dhd_osl_detach(osh); + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif + bus->rxctl = bus->rxbuf = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + +} + + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) + return; + + if (bus->sih) { + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } +#if !defined(BCMLXSDMMC) + if (dongle_isolation == FALSE) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + si_detach(bus->sih); + bus->sih = NULL; + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#ifdef BCMEMBEDIMAGE +static int +dhdsdio_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *ularray = NULL; + + DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__)); + + /* Download image */ + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + ularray = MALLOC(bus->dhd->osh, bus->ramsize); + /* Upload image to verify downloaded contents. */ + offset = 0; + memset(ularray, 0xaa, bus->ramsize); + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, + ularray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + + if (memcmp(dlarray, ularray, sizeof(dlarray))) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + goto err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + + } +#endif /* DHD_DEBUG */ + +err: + if (ularray) + MFREE(bus->dhd->osh, ularray, bus->ramsize); + return bcmerror; +} +#endif /* BCMEMBEDIMAGE */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = -1; + int offset = 0; + uint len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + image = dhd_os_open_image(pfw_path); + if (image == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +/* + EXAMPLE: nvram_array + nvram_arry format: + name=value + Use carriage return at the end of each assignment, and an empty string with + carriage return at the end of array. + + For example: + unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"}; + Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx. + + Search "EXAMPLE: nvram_array" to see how the array is activated. +*/ + +void +dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params) +{ + bus->nvram_params = nvram_params; +} + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + if (!nvram_file_exists && (bus->nvram_params == NULL)) + return (0); + + if (nvram_file_exists) { + image = dhd_os_open_image(pnv_path); + if (image == NULL) + goto err; + } + + memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + + /* Download variables */ + if (nvram_file_exists) { + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + } + else { + len = strlen(bus->nvram_params); + ASSERT(len <= MAX_NVRAMBUF_SIZE); + memcpy(memblock, bus->nvram_params, len); + } + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + return 0; +#endif + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } + else { + embed = FALSE; + dlok = TRUE; + } + } +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdsdio_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } + else { + dlok = TRUE; + } + } +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + int status; + + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle); + + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + ASSERT(bus); + return bus->dhd; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return SDPCM_HDRLEN; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_bus_stop(bus, FALSE); + +#if defined(OOB_INTR_ONLY) + /* Clean up any pending IRQ */ + bcmsdh_set_irq(FALSE); +#endif /* defined(OOB_INTR_ONLY) */ + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); + + bus->dhd->dongle_reset = TRUE; + bus->dhd->up = FALSE; + dhd_os_sdunlock(dhdp); + DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__)); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ +#ifdef DHDTHREAD + dhd_os_sdlock(dhdp); +#endif /* DHDTHREAD */ + /* Reset SD client */ + bcmsdh_reset(bus->sdh); + + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)SI_ENUM_BASE, + bus->cl_devid)) { + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { +#if defined(OOB_INTR_ONLY) + bcmsdh_set_irq(TRUE); + dhd_enable_oob_intr(bus, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +#endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else + bcmerror = BCME_SDIO_ERROR; + } else + bcmerror = BCME_SDIO_ERROR; + +#ifdef DHDTHREAD + dhd_os_sdunlock(dhdp); +#endif /* DHDTHREAD */ + } else { + bcmerror = BCME_SDIO_ERROR; + DHD_INFO(("%s called when dongle is not in reset\n", + __FUNCTION__)); + DHD_INFO(("Will call dhd_bus_start instead\n")); + sdioh_start(NULL, 1); + if ((bcmerror = dhd_bus_start(dhdp)) != 0) + DHD_ERROR(("%s: dhd_bus_start fail with %d\n", + __FUNCTION__, bcmerror)); + } + } + return bcmerror; +} + +/* Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chip; +} + +/* Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chiprev; +} + +/* Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chippkg; +} + +int +dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size) +{ + dhd_bus_t *bus; + + bus = dhdp->bus; + return dhdsdio_membytes(bus, set, address, data, size); +} diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h new file mode 100644 index 0000000000000..c4d251806d780 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h @@ -0,0 +1,276 @@ +/* +* Copyright (C) 1999-2011, Broadcom Corporation +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed to you +* under the terms of the GNU General Public License version 2 (the "GPL"), +* available at http://www.broadcom.com/licenses/GPLv2.php, with the +* following added to such license: +* +* As a special exception, the copyright holders of this software give you +* permission to link this software with independent modules, and to copy and +* distribute the resulting executable under terms of your choice, provided that +* you also meet, for each linked independent module, the terms and conditions of +* the license of that module. An independent module is a module which is not +* derived from this software. The special exception does not apply to any +* modifications of the software. +* +* Notwithstanding the above, under no circumstances may you combine this +* software in any way with any other Broadcom software provided under a license +* other than the GPL, without Broadcom's express prior written consent. +* $Id: dhd_wlfc.h 286994 2011-09-29 21:27:44Z $ +* +*/ +#ifndef __wlfc_host_driver_definitions_h__ +#define __wlfc_host_driver_definitions_h__ + +/* 16 bits will provide an absolute max of 65536 slots */ +#define WLFC_HANGER_MAXITEMS 1024 + +#define WLFC_HANGER_ITEM_STATE_FREE 1 +#define WLFC_HANGER_ITEM_STATE_INUSE 2 + +#define WLFC_PKTID_HSLOT_MASK 0xffff /* allow 16 bits only */ +#define WLFC_PKTID_HSLOT_SHIFT 8 + +/* x -> TXSTATUS TAG to/from firmware */ +#define WLFC_PKTID_HSLOT_GET(x) \ + (((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK) +#define WLFC_PKTID_HSLOT_SET(var, slot) \ + ((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \ + (((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT)) + +#define WLFC_PKTID_FREERUNCTR_MASK 0xff + +#define WLFC_PKTID_FREERUNCTR_GET(x) ((x) & WLFC_PKTID_FREERUNCTR_MASK) +#define WLFC_PKTID_FREERUNCTR_SET(var, ctr) \ + ((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \ + (((ctr) & WLFC_PKTID_FREERUNCTR_MASK)))) + +#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \ + NULL : pktq_penq((pq), (prec), (p))) +#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \ + NULL : pktq_penq_head((pq), (prec), (p))) + +typedef enum ewlfc_packet_state { + eWLFC_PKTTYPE_NEW, + eWLFC_PKTTYPE_DELAYED, + eWLFC_PKTTYPE_SUPPRESSED, + eWLFC_PKTTYPE_MAX +} ewlfc_packet_state_t; + +typedef enum ewlfc_mac_entry_action { + eWLFC_MAC_ENTRY_ACTION_ADD, + eWLFC_MAC_ENTRY_ACTION_DEL, + eWLFC_MAC_ENTRY_ACTION_MAX +} ewlfc_mac_entry_action_t; + +typedef struct wlfc_hanger_item { + uint8 state; + uint8 pad[3]; + uint32 identifier; + void* pkt; +#ifdef PROP_TXSTATUS_DEBUG + uint32 push_time; +#endif +} wlfc_hanger_item_t; + +typedef struct wlfc_hanger { + int max_items; + uint32 pushed; + uint32 popped; + uint32 failed_to_push; + uint32 failed_to_pop; + uint32 failed_slotfind; + wlfc_hanger_item_t items[1]; +} wlfc_hanger_t; + +#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ + sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) + +#define WLFC_STATE_OPEN 1 +#define WLFC_STATE_CLOSE 2 + +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */ +#define WLFC_PSQ_LEN 64 +#define WLFC_SENDQ_LEN 256 + +#define WLFC_FLOWCONTROL_DELTA 8 +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - WLFC_FLOWCONTROL_DELTA) +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER - WLFC_FLOWCONTROL_DELTA) + +typedef struct wlfc_mac_descriptor { + uint8 occupied; + uint8 interface_id; + uint8 iftype; + uint8 state; + uint8 ac_bitmap; /* for APSD */ + uint8 requested_credit; + uint8 requested_packet; + uint8 ea[ETHER_ADDR_LEN]; + /* + maintain (MAC,AC) based seq count for + packets going to the device. As well as bc/mc. + */ + uint8 seq[AC_COUNT + 1]; + uint8 generation; + struct pktq psq; + /* The AC pending bitmap that was reported to the fw at last change */ + uint8 traffic_lastreported_bmp; + /* The new AC pending bitmap */ + uint8 traffic_pending_bmp; + /* 1= send on next opportunity */ + uint8 send_tim_signal; + uint8 mac_handle; +#ifdef PROP_TXSTATUS_DEBUG + uint32 dstncredit_sent_packets; + uint32 dstncredit_acks; + uint32 opened_ct; + uint32 closed_ct; +#endif +} wlfc_mac_descriptor_t; + +#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\ + entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0) + +#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++ +#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)] + +typedef struct athost_wl_stat_counters { + uint32 pktin; + uint32 pkt2bus; + uint32 pktdropped; + uint32 tlv_parse_failed; + uint32 rollback; + uint32 rollback_failed; + uint32 sendq_full_error; + uint32 delayq_full_error; + uint32 credit_request_failed; + uint32 packet_request_failed; + uint32 mac_update_failed; + uint32 psmode_update_failed; + uint32 interface_update_failed; + uint32 wlfc_header_only_pkt; + uint32 txstatus_in; + uint32 d11_suppress; + uint32 wl_suppress; + uint32 bad_suppress; + uint32 pkt_freed; + uint32 pkt_free_err; + uint32 psq_wlsup_retx; + uint32 psq_wlsup_enq; + uint32 psq_d11sup_retx; + uint32 psq_d11sup_enq; + uint32 psq_hostq_retx; + uint32 psq_hostq_enq; + uint32 mac_handle_notfound; + uint32 wlc_tossed_pkts; + uint32 dhd_hdrpulls; + uint32 generic_error; + /* an extra one for bc/mc traffic */ + uint32 sendq_pkts[AC_COUNT + 1]; +#ifdef PROP_TXSTATUS_DEBUG + /* all pkt2bus -> txstatus latency accumulated */ + uint32 latency_sample_count; + uint32 total_status_latency; + uint32 latency_most_recent; + int idx_delta; + uint32 deltas[10]; + uint32 fifo_credits_sent[6]; + uint32 fifo_credits_back[6]; + uint32 dropped_qfull[6]; + uint32 signal_only_pkts_sent; + uint32 signal_only_pkts_freed; +#endif +} athost_wl_stat_counters_t; + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_back[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \ + (ctx)->stats.dropped_qfull[(ac)]++;} while (0) +#else +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) +#endif + +#define WLFC_FCMODE_NONE 0 +#define WLFC_FCMODE_IMPLIED_CREDIT 1 +#define WLFC_FCMODE_EXPLICIT_CREDIT 2 + +/* How long to defer borrowing in milliseconds */ +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/* Mask to represent available ACs (note: BC/MC is ignored */ +#define WLFC_AC_MASK 0xF + +/* Mask to check for only on-going AC_BE traffic */ +#define WLFC_AC_BE_TRAFFIC_ONLY 0xD + +typedef struct athost_wl_status_info { + uint8 last_seqid_to_wlc; + + /* OSL handle */ + osl_t* osh; + /* dhd pub */ + void* dhdp; + + /* stats */ + athost_wl_stat_counters_t stats; + + /* the additional ones are for bc/mc and ATIM FIFO */ + int FIFO_credit[AC_COUNT + 2]; + + /* Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + + struct pktq SENDQ; + + /* packet hanger and MAC->handle lookup table */ + void* hanger; + struct { + /* table for individual nodes */ + wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; + /* table for interfaces */ + wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; + /* OS may send packets to unknown (unassociated) destinations */ + /* A place holder for bc/mc and packets to unknown destinations */ + wlfc_mac_descriptor_t other; + } destination_entries; + /* token position for different priority packets */ + uint8 token_pos[AC_COUNT+1]; + /* ON/OFF state for flow control to the host network interface */ + uint8 hostif_flow_state[WLFC_MAX_IFNUM]; + uint8 host_ifidx; + /* to flow control an OS interface */ + uint8 toggle_host_if; + + /* + Mode in which the dhd flow control shall operate. Must be set before + traffic starts to the device. + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + uint8 proptxstatus_mode; + + /* To borrow credits */ + uint8 allow_credit_borrow; + + /* Timestamp to compute how long to defer borrowing for */ + uint32 borrow_defer_timestamp; + +} athost_wl_status_info_t; + +int dhd_wlfc_enable(dhd_pub_t *dhd); +int dhd_wlfc_interface_event(struct dhd_info *, + ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea); +int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data); +int dhd_wlfc_event(struct dhd_info *dhd); +int dhd_os_wlfc_block(dhd_pub_t *pub); +int dhd_os_wlfc_unblock(dhd_pub_t *pub); + +#endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h new file mode 100644 index 0000000000000..9cdf718b39906 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_stats.h @@ -0,0 +1,43 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dngl_stats.h,v 1.5 2008-06-02 16:56:20 Exp $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +#endif /* _dngl_stats_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h new file mode 100644 index 0000000000000..8b39b9ecb5847 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h @@ -0,0 +1,40 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dngl_wlhdr.h,v 1.1 2009-01-08 01:21:12 Exp $ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c new file mode 100644 index 0000000000000..0e493343c8069 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hndpmu.c @@ -0,0 +1,196 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndpmu.c,v 1.228.2.56 2011-02-11 22:49:07 $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PMU_ERROR(args) + +#define PMU_MSG(args) + +/* To check in verbose debugging messages not intended + * to be on except on private builds. + */ +#define PMU_NONE(args) + + +/* SDIO Pad drive strength to select value mappings. + * The last strength value in each table must be 0 (the tri-state value). + */ +typedef struct { + uint8 strength; /* Pad Drive Strength in mA */ + uint8 sel; /* Chip-specific select value */ +} sdiod_drive_str_t; + +/* SDIO Drive Strength to sel value table for PMU Rev 1 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = { + {4, 0x2}, + {2, 0x3}, + {1, 0x0}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = { + {12, 0x7}, + {10, 0x6}, + {8, 0x5}, + {6, 0x4}, + {4, 0x2}, + {2, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = { + {32, 0x7}, + {26, 0x6}, + {22, 0x5}, + {16, 0x4}, + {12, 0x3}, + {8, 0x2}, + {4, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = { + {32, 0x6}, + {26, 0x7}, + {22, 0x4}, + {16, 0x5}, + {12, 0x2}, + {8, 0x3}, + {4, 0x0}, + {0, 0x1} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = { + {6, 0x7}, + {5, 0x6}, + {4, 0x5}, + {3, 0x4}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */ + + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) + +void +si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + chipcregs_t *cc; + uint origidx, intr_val = 0; + sdiod_drive_str_t *str_tab = NULL; + uint32 str_mask = 0; + uint32 str_shift = 0; + + if (!(sih->cccaps & CC_CAP_PMU)) { + return; + } + + /* Remember original core before switch to chipc */ + cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val); + + switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; + str_mask = 0x30000000; + str_shift = 28; + break; + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): + case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11): + if (sih->pmurev == 8) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3; + } + else if (sih->pmurev == 11) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + } + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + default: + PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev)); + + break; + } + + if (str_tab != NULL) { + uint32 cc_data_temp; + int i; + + /* Pick the lowest available drive strength equal or greater than the + * requested strength. Drive strength of 0 requests tri-state. + */ + for (i = 0; drivestrength < str_tab[i].strength; i++) + ; + + if (i > 0 && drivestrength > str_tab[i].strength) + i--; + + W_REG(osh, &cc->chipcontrol_addr, 1); + cc_data_temp = R_REG(osh, &cc->chipcontrol_data); + cc_data_temp &= ~str_mask; + cc_data_temp |= str_tab[i].sel << str_shift; + W_REG(osh, &cc->chipcontrol_data, cc_data_temp); + + PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n", + drivestrength, str_tab[i].strength)); + } + + /* Return to original core */ + si_restore_core(sih, origidx, intr_val); +} diff --git a/drivers/net/wireless/bcmdhd/include/Makefile b/drivers/net/wireless/bcmdhd/include/Makefile new file mode 100644 index 0000000000000..67c4906f58897 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/Makefile @@ -0,0 +1,53 @@ +#!/bin/bash +# +# This script serves following purpose: +# +# 1. It generates native version information by querying +# automerger maintained database to see where src/include +# came from +# 2. For select components, as listed in compvers.sh +# it generates component version files +# +# Copyright 2005, Broadcom, Inc. +# +# $Id: Makefile 241702 2011-02-19 00:41:03Z $ +# + +SRCBASE := .. + +TARGETS := epivers.h + +ifdef VERBOSE +export VERBOSE +endif + +all release: epivers compvers + +# Generate epivers.h for native branch version +epivers: + bash epivers.sh + +# Generate epivers.h for native branch version +compvers: + @if [ -s "compvers.sh" ]; then \ + echo "Generating component versions, if any"; \ + bash compvers.sh; \ + else \ + echo "Skipping component version generation"; \ + fi + +# Generate epivers.h for native branch version +clean_compvers: + @if [ -s "compvers.sh" ]; then \ + echo "bash compvers.sh clean"; \ + bash compvers.sh clean; \ + else \ + echo "Skipping component version clean"; \ + fi + +clean: + rm -f $(TARGETS) *.prev + +clean_all: clean clean_compvers + +.PHONY: all release clean epivers compvers clean_compvers diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h new file mode 100644 index 0000000000000..b993a033abc20 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/aidmp.h @@ -0,0 +1,377 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: aidmp.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _AIDMP_H +#define _AIDMP_H + + +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + + +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + + +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + + +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff + + +#define CIA_MFG_MASK 0xfff00000 +#define CIA_MFG_SHIFT 20 +#define CIA_CID_MASK 0x000fff00 +#define CIA_CID_SHIFT 8 +#define CIA_CCL_MASK 0x000000f0 +#define CIA_CCL_SHIFT 4 + + +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 +#define CIB_NSW_MASK 0x00f80000 +#define CIB_NSW_SHIFT 19 +#define CIB_NMW_MASK 0x0007c000 +#define CIB_NMW_SHIFT 14 +#define CIB_NSP_MASK 0x00003e00 +#define CIB_NSP_SHIFT 9 +#define CIB_NMP_MASK 0x000001f0 +#define CIB_NMP_SHIFT 4 + + +#define MPD_MUI_MASK 0x0000ff00 +#define MPD_MUI_SHIFT 8 +#define MPD_MP_MASK 0x000000f0 +#define MPD_MP_SHIFT 4 + + +#define AD_ADDR_MASK 0xfffff000 +#define AD_SP_MASK 0x00000f00 +#define AD_SP_SHIFT 8 +#define AD_ST_MASK 0x000000c0 +#define AD_ST_SHIFT 6 +#define AD_ST_SLAVE 0x00000000 +#define AD_ST_BRIDGE 0x00000040 +#define AD_ST_SWRAP 0x00000080 +#define AD_ST_MWRAP 0x000000c0 +#define AD_SZ_MASK 0x00000030 +#define AD_SZ_SHIFT 4 +#define AD_SZ_4K 0x00000000 +#define AD_SZ_8K 0x00000010 +#define AD_SZ_16K 0x00000020 +#define AD_SZ_SZD 0x00000030 +#define AD_AG32 0x00000008 +#define AD_ADDR_ALIGN 0x00000fff +#define AD_SZ_BASE 0x00001000 + + +#define SD_SZ_MASK 0xfffff000 +#define SD_SG32 0x00000008 +#define SD_SZ_ALIGN 0x00000fff + + +#ifndef _LANGUAGE_ASSEMBLY + +typedef volatile struct _aidmp { + uint32 oobselina30; + uint32 oobselina74; + uint32 PAD[6]; + uint32 oobselinb30; + uint32 oobselinb74; + uint32 PAD[6]; + uint32 oobselinc30; + uint32 oobselinc74; + uint32 PAD[6]; + uint32 oobselind30; + uint32 oobselind74; + uint32 PAD[38]; + uint32 oobselouta30; + uint32 oobselouta74; + uint32 PAD[6]; + uint32 oobseloutb30; + uint32 oobseloutb74; + uint32 PAD[6]; + uint32 oobseloutc30; + uint32 oobseloutc74; + uint32 PAD[6]; + uint32 oobseloutd30; + uint32 oobseloutd74; + uint32 PAD[38]; + uint32 oobsynca; + uint32 oobseloutaen; + uint32 PAD[6]; + uint32 oobsyncb; + uint32 oobseloutben; + uint32 PAD[6]; + uint32 oobsyncc; + uint32 oobseloutcen; + uint32 PAD[6]; + uint32 oobsyncd; + uint32 oobseloutden; + uint32 PAD[38]; + uint32 oobaextwidth; + uint32 oobainwidth; + uint32 oobaoutwidth; + uint32 PAD[5]; + uint32 oobbextwidth; + uint32 oobbinwidth; + uint32 oobboutwidth; + uint32 PAD[5]; + uint32 oobcextwidth; + uint32 oobcinwidth; + uint32 oobcoutwidth; + uint32 PAD[5]; + uint32 oobdextwidth; + uint32 oobdinwidth; + uint32 oobdoutwidth; + uint32 PAD[37]; + uint32 ioctrlset; + uint32 ioctrlclear; + uint32 ioctrl; + uint32 PAD[61]; + uint32 iostatus; + uint32 PAD[127]; + uint32 ioctrlwidth; + uint32 iostatuswidth; + uint32 PAD[62]; + uint32 resetctrl; + uint32 resetstatus; + uint32 resetreadid; + uint32 resetwriteid; + uint32 PAD[60]; + uint32 errlogctrl; + uint32 errlogdone; + uint32 errlogstatus; + uint32 errlogaddrlo; + uint32 errlogaddrhi; + uint32 errlogid; + uint32 errloguser; + uint32 errlogflags; + uint32 PAD[56]; + uint32 intstatus; + uint32 PAD[127]; + uint32 config; + uint32 PAD[63]; + uint32 itcr; + uint32 PAD[3]; + uint32 itipooba; + uint32 itipoobb; + uint32 itipoobc; + uint32 itipoobd; + uint32 PAD[4]; + uint32 itipoobaout; + uint32 itipoobbout; + uint32 itipoobcout; + uint32 itipoobdout; + uint32 PAD[4]; + uint32 itopooba; + uint32 itopoobb; + uint32 itopoobc; + uint32 itopoobd; + uint32 PAD[4]; + uint32 itopoobain; + uint32 itopoobbin; + uint32 itopoobcin; + uint32 itopoobdin; + uint32 PAD[4]; + uint32 itopreset; + uint32 PAD[15]; + uint32 peripherialid4; + uint32 peripherialid5; + uint32 peripherialid6; + uint32 peripherialid7; + uint32 peripherialid0; + uint32 peripherialid1; + uint32 peripherialid2; + uint32 peripherialid3; + uint32 componentid0; + uint32 componentid1; + uint32 componentid2; + uint32 componentid3; +} aidmp_t; + +#endif + + +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + + +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 + + +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOSTATUS 0x500 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 + + +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 + +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0xa00 +#define AI_ERRLOGDONE 0xa04 +#define AI_ERRLOGSTATUS 0xa08 +#define AI_ERRLOGADDRLO 0xa0c +#define AI_ERRLOGADDRHI 0xa10 +#define AI_ERRLOGID 0xa14 +#define AI_ERRLOGUSER 0xa18 +#define AI_ERRLOGFLAGS 0xa1c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + + +#define AIRC_RESET 1 + + +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + + +#define OOB_SEL_OUTEN_B_5 15 +#define OOB_SEL_OUTEN_B_6 23 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h new file mode 100644 index 0000000000000..77a20f87b7eaf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h @@ -0,0 +1,121 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmcdc.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _bcmcdc_h_ +#define _bcmcdc_h_ +#include + +typedef struct cdc_ioctl { + uint32 cmd; + uint32 len; + uint32 flags; + uint32 status; +} cdc_ioctl_t; + + +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + + +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF + +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 +#define CDCL_IOC_INLEN_SHIFT 16 + + +#define CDCF_IOC_ERROR 0x01 +#define CDCF_IOC_SET 0x02 +#define CDCF_IOC_OVL_IDX_MASK 0x3c +#define CDCF_IOC_OVL_RSV 0x40 +#define CDCF_IOC_OVL 0x80 +#define CDCF_IOC_ACTION_MASK 0xfe +#define CDCF_IOC_ACTION_SHIFT 1 +#define CDCF_IOC_IF_MASK 0xF000 +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 +#define CDCF_IOC_ID_SHIFT 16 + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + + + +#define BDC_HEADER_LEN 4 + +#define BDC_PROTO_VER_1 1 +#define BDC_PROTO_VER 2 + +#define BDC_FLAG_VER_MASK 0xf0 +#define BDC_FLAG_VER_SHIFT 4 + +#define BDC_FLAG__UNUSED 0x03 +#define BDC_FLAG_SUM_GOOD 0x04 +#define BDC_FLAG_SUM_NEEDED 0x08 + +#define BDC_PRIORITY_MASK 0x7 + +#define BDC_FLAG2_FC_FLAG 0x10 + +#define BDC_PRIORITY_FC_SHIFT 4 + +#define BDC_FLAG2_IF_MASK 0x0f +#define BDC_FLAG2_IF_SHIFT 0 +#define BDC_FLAG2_PAD_MASK 0xf0 +#define BDC_FLAG_PAD_MASK 0x03 +#define BDC_FLAG2_PAD_SHIFT 2 +#define BDC_FLAG_PAD_SHIFT 0 +#define BDC_FLAG2_PAD_IDX 0x3c +#define BDC_FLAG_PAD_IDX 0x03 +#define BDC_GET_PAD_LEN(hdr) \ + ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \ + ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT))) +#define BDC_SET_PAD_LEN(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \ + (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \ + ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \ + (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT))) + +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +struct bdc_header { + uint8 flags; + uint8 priority; + uint8 flags2; + uint8 dataOffset; +}; + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h new file mode 100644 index 0000000000000..17cc0e955f62a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h @@ -0,0 +1,231 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmdefs.h 279282 2011-08-23 22:44:02Z $ + */ + + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + + + + +#define BCM_REFERENCE(data) ((void)(data)) + + + +#define bcmreclaimed 0 +#define _data _data +#define _fn _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define _data _data +#define _fn _fn +#define _fn _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define BCMOVERLAY0DATA(_sym) _sym +#define BCMOVERLAY0FN(_fn) _fn +#define BCMOVERLAY1DATA(_sym) _sym +#define BCMOVERLAY1FN(_fn) _fn +#define BCMOVERLAYERRFN(_fn) _fn +#define CONST const +#define BCMFASTPATH + + + + +#define _data _data +#define BCMROMDAT_NAME(_data) _data +#define _fn _fn +#define _fn _fn +#define STATIC static +#define BCMROMDAT_ARYSIZ(data) ARRAYSIZE(data) +#define BCMROMDAT_SIZEOF(data) sizeof(data) +#define BCMROMDAT_APATCH(data) +#define BCMROMDAT_SPATCH(data) + + + +#define OVERLAY_INLINE +#define OSTATIC static +#define BCMOVERLAYDATA(_ovly, _sym) _sym +#define BCMOVERLAYFN(_ovly, _fn) _fn +#define BCMOVERLAYERRFN(_fn) _fn +#define BCMROMOVERLAYDATA(_ovly, _data) _data +#define BCMROMOVERLAYFN(_ovly, _fn) _fn +#define BCMATTACHOVERLAYDATA(_ovly, _sym) _sym +#define BCMATTACHOVERLAYFN(_ovly, _fn) _fn +#define BCMINITOVERLAYDATA(_ovly, _sym) _sym +#define BCMINITOVERLAYFN(_ovly, _fn) _fn +#define BCMUNINITOVERLAYFN(_ovly, _fn) _fn + + + +#define SI_BUS 0 +#define PCI_BUS 1 +#define PCMCIA_BUS 2 +#define SDIO_BUS 3 +#define JTAG_BUS 4 +#define USB_BUS 5 +#define SPI_BUS 6 +#define RPC_BUS 7 + + +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif + + +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif + + + +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#elif defined(SI_PCMCIA_SROM) +#define SPROMBUS (PCMCIA_BUS) +#else +#define SPROMBUS (PCI_BUS) +#endif + + +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif + +#ifdef BCMCHIPREV +#define CHIPREV(rev) (BCMCHIPREV) +#else +#define CHIPREV(rev) (rev) +#endif + + +#define DMADDR_MASK_32 0x0 +#define DMADDR_MASK_30 0xc0000000 +#define DMADDR_MASK_0 0xffffffff + +#define DMADDRWIDTH_30 30 +#define DMADDRWIDTH_32 32 +#define DMADDRWIDTH_63 63 +#define DMADDRWIDTH_64 64 + +#ifdef BCMDMA64OSL +typedef struct { + uint32 loaddr; + uint32 hiaddr; +} dma64addr_t; + +typedef dma64addr_t dmaaddr_t; +#define PHYSADDRHI(_pa) ((_pa).hiaddr) +#define PHYSADDRHISET(_pa, _val) \ + do { \ + (_pa).hiaddr = (_val); \ + } while (0) +#define PHYSADDRLO(_pa) ((_pa).loaddr) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa).loaddr = (_val); \ + } while (0) + +#else +typedef unsigned long dmaaddr_t; +#define PHYSADDRHI(_pa) (0) +#define PHYSADDRHISET(_pa, _val) +#define PHYSADDRLO(_pa) ((_pa)) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa) = (_val); \ + } while (0) +#endif + + +typedef struct { + dmaaddr_t addr; + uint32 length; +} hnddma_seg_t; + +#define MAX_DMA_SEGS 4 + + +typedef struct { + void *oshdmah; + uint origsize; + uint nsegs; + hnddma_seg_t segs[MAX_DMA_SEGS]; +} hnddma_seg_map_t; + + + + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY) + +#define BCMEXTRAHDROOM 220 +#else +#define BCMEXTRAHDROOM 172 +#endif + + +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + + +#if defined(BCMASSERT_LOG) +#define BCMASSERT_SUPPORT +#endif + + +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + + +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE +#else +#define BCMSPACE +#define bcmspace TRUE +#endif + + +#define MAXSZ_NVRAM_VARS 4096 + +#define LOCATOR_EXTERN static + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h new file mode 100644 index 0000000000000..cdfc5fe6c8fda --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h @@ -0,0 +1,747 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmdevs.h 332966 2012-05-11 22:40:21Z $ + */ + + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + + +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_3COM 0x10b7 +#define VENDOR_NETGEAR 0x1385 +#define VENDOR_DIAMOND 0x1092 +#define VENDOR_INTEL 0x8086 +#define VENDOR_DELL 0x1028 +#define VENDOR_HP 0x103c +#define VENDOR_HP_COMPAQ 0x0e11 +#define VENDOR_APPLE 0x106b +#define VENDOR_SI_IMAGE 0x1095 +#define VENDOR_BUFFALO 0x1154 +#define VENDOR_TI 0x104c +#define VENDOR_RICOH 0x1180 +#define VENDOR_JMICRON 0x197b + + + +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + + +#define VENDOR_BROADCOM_SDIO 0x00BF + + +#define BCM_DNGL_VID 0x0a5c +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BL_PID_43236 0xbd17 +#define BCM_DNGL_BL_PID_4332 0xbd18 +#define BCM_DNGL_BL_PID_4330 0xbd19 +#define BCM_DNGL_BL_PID_43239 0xbd1b +#define BCM_DNGL_BDC_PID 0x0bdc +#define BCM_DNGL_JTAG_PID 0x4a44 +#define BCM_DNGL_BL_PID_4324 0xbd1c + + +#define BCM_HWUSB_PID_43239 43239 + + +#define BCM4210_DEVICE_ID 0x1072 +#define BCM4230_DEVICE_ID 0x1086 +#define BCM4401_ENET_ID 0x170c +#define BCM3352_DEVICE_ID 0x3352 +#define BCM3360_DEVICE_ID 0x3360 +#define BCM4211_DEVICE_ID 0x4211 +#define BCM4231_DEVICE_ID 0x4231 +#define BCM4303_D11B_ID 0x4303 +#define BCM4311_D11G_ID 0x4311 +#define BCM4311_D11DUAL_ID 0x4312 +#define BCM4311_D11A_ID 0x4313 +#define BCM4328_D11DUAL_ID 0x4314 +#define BCM4328_D11G_ID 0x4315 +#define BCM4328_D11A_ID 0x4316 +#define BCM4318_D11G_ID 0x4318 +#define BCM4318_D11DUAL_ID 0x4319 +#define BCM4318_D11A_ID 0x431a +#define BCM4325_D11DUAL_ID 0x431b +#define BCM4325_D11G_ID 0x431c +#define BCM4325_D11A_ID 0x431d +#define BCM4306_D11G_ID 0x4320 +#define BCM4306_D11A_ID 0x4321 +#define BCM4306_UART_ID 0x4322 +#define BCM4306_V90_ID 0x4323 +#define BCM4306_D11DUAL_ID 0x4324 +#define BCM4306_D11G_ID2 0x4325 +#define BCM4321_D11N_ID 0x4328 +#define BCM4321_D11N2G_ID 0x4329 +#define BCM4321_D11N5G_ID 0x432a +#define BCM4322_D11N_ID 0x432b +#define BCM4322_D11N2G_ID 0x432c +#define BCM4322_D11N5G_ID 0x432d +#define BCM4329_D11N_ID 0x432e +#define BCM4329_D11N2G_ID 0x432f +#define BCM4329_D11N5G_ID 0x4330 +#define BCM4315_D11DUAL_ID 0x4334 +#define BCM4315_D11G_ID 0x4335 +#define BCM4315_D11A_ID 0x4336 +#define BCM4319_D11N_ID 0x4337 +#define BCM4319_D11N2G_ID 0x4338 +#define BCM4319_D11N5G_ID 0x4339 +#define BCM43231_D11N2G_ID 0x4340 +#define BCM43221_D11N2G_ID 0x4341 +#define BCM43222_D11N_ID 0x4350 +#define BCM43222_D11N2G_ID 0x4351 +#define BCM43222_D11N5G_ID 0x4352 +#define BCM43224_D11N_ID 0x4353 +#define BCM43224_D11N_ID_VEN1 0x0576 +#define BCM43226_D11N_ID 0x4354 +#define BCM43236_D11N_ID 0x4346 +#define BCM43236_D11N2G_ID 0x4347 +#define BCM43236_D11N5G_ID 0x4348 +#define BCM43225_D11N2G_ID 0x4357 +#define BCM43421_D11N_ID 0xA99D +#define BCM4313_D11N2G_ID 0x4727 +#define BCM4330_D11N_ID 0x4360 +#define BCM4330_D11N2G_ID 0x4361 +#define BCM4330_D11N5G_ID 0x4362 +#define BCM4336_D11N_ID 0x4343 +#define BCM6362_D11N_ID 0x435f +#define BCM4331_D11N_ID 0x4331 +#define BCM4331_D11N2G_ID 0x4332 +#define BCM4331_D11N5G_ID 0x4333 +#define BCM43237_D11N_ID 0x4355 +#define BCM43237_D11N5G_ID 0x4356 +#define BCM43227_D11N2G_ID 0x4358 +#define BCM43228_D11N_ID 0x4359 +#define BCM43228_D11N5G_ID 0x435a +#define BCM43362_D11N_ID 0x4363 +#define BCM43239_D11N_ID 0x4370 +#define BCM4324_D11N_ID 0x4374 +#define BCM43217_D11N2G_ID 0x43a9 +#define BCM43131_D11N2G_ID 0x43aa + +#define BCM4314_D11N2G_ID 0x4364 +#define BCM43142_D11N2G_ID 0x4365 + +#define BCMGPRS_UART_ID 0x4333 +#define BCMGPRS2_UART_ID 0x4344 +#define FPGA_JTAGM_ID 0x43f0 +#define BCM_JTAGM_ID 0x43f1 +#define SDIOH_FPGA_ID 0x43f2 +#define BCM_SDIOH_ID 0x43f3 +#define SDIOD_FPGA_ID 0x43f4 +#define SPIH_FPGA_ID 0x43f5 +#define BCM_SPIH_ID 0x43f6 +#define MIMO_FPGA_ID 0x43f8 +#define BCM_JTAGM2_ID 0x43f9 +#define SDHCI_FPGA_ID 0x43fa +#define BCM4402_ENET_ID 0x4402 +#define BCM4402_V90_ID 0x4403 +#define BCM4410_DEVICE_ID 0x4410 +#define BCM4412_DEVICE_ID 0x4412 +#define BCM4430_DEVICE_ID 0x4430 +#define BCM4432_DEVICE_ID 0x4432 +#define BCM4704_ENET_ID 0x4706 +#define BCM4710_DEVICE_ID 0x4710 +#define BCM47XX_AUDIO_ID 0x4711 +#define BCM47XX_V90_ID 0x4712 +#define BCM47XX_ENET_ID 0x4713 +#define BCM47XX_EXT_ID 0x4714 +#define BCM47XX_GMAC_ID 0x4715 +#define BCM47XX_USBH_ID 0x4716 +#define BCM47XX_USBD_ID 0x4717 +#define BCM47XX_IPSEC_ID 0x4718 +#define BCM47XX_ROBO_ID 0x4719 +#define BCM47XX_USB20H_ID 0x471a +#define BCM47XX_USB20D_ID 0x471b +#define BCM47XX_ATA100_ID 0x471d +#define BCM47XX_SATAXOR_ID 0x471e +#define BCM47XX_GIGETH_ID 0x471f +#define BCM4712_MIPS_ID 0x4720 +#define BCM4716_DEVICE_ID 0x4722 +#define BCM47XX_SMBUS_EMU_ID 0x47fe +#define BCM47XX_XOR_EMU_ID 0x47ff +#define EPI41210_DEVICE_ID 0xa0fa +#define EPI41230_DEVICE_ID 0xa10e +#define JINVANI_SDIOH_ID 0x4743 +#define BCM27XX_SDIOH_ID 0x2702 +#define PCIXX21_FLASHMEDIA_ID 0x803b +#define PCIXX21_SDIOH_ID 0x803c +#define R5C822_SDIOH_ID 0x0822 +#define JMICRON_SDIOH_ID 0x2381 + + +#define BCM4306_CHIP_ID 0x4306 +#define BCM4311_CHIP_ID 0x4311 +#define BCM43111_CHIP_ID 43111 +#define BCM43112_CHIP_ID 43112 +#define BCM4312_CHIP_ID 0x4312 +#define BCM4313_CHIP_ID 0x4313 +#define BCM43131_CHIP_ID 43131 +#define BCM4315_CHIP_ID 0x4315 +#define BCM4318_CHIP_ID 0x4318 +#define BCM4319_CHIP_ID 0x4319 +#define BCM4320_CHIP_ID 0x4320 +#define BCM4321_CHIP_ID 0x4321 +#define BCM43217_CHIP_ID 43217 +#define BCM4322_CHIP_ID 0x4322 +#define BCM43221_CHIP_ID 43221 +#define BCM43222_CHIP_ID 43222 +#define BCM43224_CHIP_ID 43224 +#define BCM43225_CHIP_ID 43225 +#define BCM43227_CHIP_ID 43227 +#define BCM43228_CHIP_ID 43228 +#define BCM43226_CHIP_ID 43226 +#define BCM43231_CHIP_ID 43231 +#define BCM43234_CHIP_ID 43234 +#define BCM43235_CHIP_ID 43235 +#define BCM43236_CHIP_ID 43236 +#define BCM43237_CHIP_ID 43237 +#define BCM43238_CHIP_ID 43238 +#define BCM43239_CHIP_ID 43239 +#define BCM43420_CHIP_ID 43420 +#define BCM43421_CHIP_ID 43421 +#define BCM43428_CHIP_ID 43428 +#define BCM43431_CHIP_ID 43431 +#define BCM4325_CHIP_ID 0x4325 +#define BCM4328_CHIP_ID 0x4328 +#define BCM4329_CHIP_ID 0x4329 +#define BCM4331_CHIP_ID 0x4331 +#define BCM4336_CHIP_ID 0x4336 +#define BCM43362_CHIP_ID 43362 +#define BCM4330_CHIP_ID 0x4330 +#define BCM6362_CHIP_ID 0x6362 +#define BCM4314_CHIP_ID 0x4314 +#define BCM43142_CHIP_ID 43142 +#define BCM4324_CHIP_ID 0x4324 + +#define BCM4342_CHIP_ID 4342 +#define BCM4402_CHIP_ID 0x4402 +#define BCM4704_CHIP_ID 0x4704 +#define BCM4710_CHIP_ID 0x4710 +#define BCM4712_CHIP_ID 0x4712 +#define BCM4716_CHIP_ID 0x4716 +#define BCM47162_CHIP_ID 47162 +#define BCM4748_CHIP_ID 0x4748 +#define BCM4749_CHIP_ID 0x4749 +#define BCM4785_CHIP_ID 0x4785 +#define BCM5350_CHIP_ID 0x5350 +#define BCM5352_CHIP_ID 0x5352 +#define BCM5354_CHIP_ID 0x5354 +#define BCM5365_CHIP_ID 0x5365 +#define BCM5356_CHIP_ID 0x5356 +#define BCM5357_CHIP_ID 0x5357 +#define BCM53572_CHIP_ID 53572 + + +#define BCM4303_PKG_ID 2 +#define BCM4309_PKG_ID 1 +#define BCM4712LARGE_PKG_ID 0 +#define BCM4712SMALL_PKG_ID 1 +#define BCM4712MID_PKG_ID 2 +#define BCM4328USBD11G_PKG_ID 2 +#define BCM4328USBDUAL_PKG_ID 3 +#define BCM4328SDIOD11G_PKG_ID 4 +#define BCM4328SDIODUAL_PKG_ID 5 +#define BCM4329_289PIN_PKG_ID 0 +#define BCM4329_182PIN_PKG_ID 1 +#define BCM5354E_PKG_ID 1 +#define BCM4716_PKG_ID 8 +#define BCM4717_PKG_ID 9 +#define BCM4718_PKG_ID 10 +#define BCM5356_PKG_NONMODE 1 +#define BCM5358U_PKG_ID 8 +#define BCM5358_PKG_ID 9 +#define BCM47186_PKG_ID 10 +#define BCM5357_PKG_ID 11 +#define BCM5356U_PKG_ID 12 +#define BCM53572_PKG_ID 8 +#define BCM47188_PKG_ID 9 +#define BCM4331TT_PKG_ID 8 +#define BCM4331TN_PKG_ID 9 +#define BCM4331TNA0_PKG_ID 0xb + + +#define HDLSIM5350_PKG_ID 1 +#define HDLSIM_PKG_ID 14 +#define HWSIM_PKG_ID 15 +#define BCM43224_FAB_CSM 0x8 +#define BCM43224_FAB_SMIC 0xa +#define BCM4336_WLBGA_PKG_ID 0x8 +#define BCM4330_WLBGA_PKG_ID 0x0 +#define BCM4314PCIE_ARM_PKG_ID (8 | 0) +#define BCM4314SDIO_PKG_ID (8 | 1) +#define BCM4314PCIE_PKG_ID (8 | 2) +#define BCM4314SDIO_ARM_PKG_ID (8 | 3) +#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) +#define BCM4314DEV_PKG_ID (8 | 6) + +#define PCIXX21_FLASHMEDIA0_ID 0x8033 +#define PCIXX21_SDIOH0_ID 0x8034 + + +#define BFL_BTC2WIRE 0x00000001 +#define BFL_BTCOEX 0x00000001 +#define BFL_PACTRL 0x00000002 +#define BFL_AIRLINEMODE 0x00000004 +#define BFL_ADCDIV 0x00000008 +#define BFL_ENETROBO 0x00000010 +#define BFL_NOPLLDOWN 0x00000020 +#define BFL_CCKHIPWR 0x00000040 +#define BFL_ENETADM 0x00000080 +#define BFL_ENETVLAN 0x00000100 +#ifdef WLAFTERBURNER +#define BFL_AFTERBURNER 0x00000200 +#endif +#define BFL_NOPCI 0x00000400 +#define BFL_FEM 0x00000800 +#define BFL_EXTLNA 0x00001000 +#define BFL_HGPA 0x00002000 +#define BFL_BTC2WIRE_ALTGPIO 0x00004000 +#define BFL_ALTIQ 0x00008000 +#define BFL_NOPA 0x00010000 +#define BFL_RSSIINV 0x00020000 +#define BFL_PAREF 0x00040000 +#define BFL_3TSWITCH 0x00080000 +#define BFL_PHASESHIFT 0x00100000 +#define BFL_BUCKBOOST 0x00200000 +#define BFL_FEM_BT 0x00400000 +#define BFL_NOCBUCK 0x00800000 +#define BFL_CCKFAVOREVM 0x01000000 +#define BFL_PALDO 0x02000000 +#define BFL_LNLDO2_2P5 0x04000000 +#define BFL_FASTPWR 0x08000000 +#define BFL_UCPWRCTL_MININDX 0x08000000 +#define BFL_EXTLNA_5GHz 0x10000000 +#define BFL_TRSW_1by2 0x20000000 +#define BFL_LO_TRSW_R_5GHz 0x40000000 +#define BFL_ELNA_GAINDEF 0x80000000 +#define BFL_EXTLNA_TX 0x20000000 + + +#define BFL2_RXBB_INT_REG_DIS 0x00000001 +#define BFL2_APLL_WAR 0x00000002 +#define BFL2_TXPWRCTRL_EN 0x00000004 +#define BFL2_2X4_DIV 0x00000008 +#define BFL2_5G_PWRGAIN 0x00000010 +#define BFL2_PCIEWAR_OVR 0x00000020 +#define BFL2_CAESERS_BRD 0x00000040 +#define BFL2_BTC3WIRE 0x00000080 +#define BFL2_BTCLEGACY 0x00000080 +#define BFL2_SKWRKFEM_BRD 0x00000100 +#define BFL2_SPUR_WAR 0x00000200 +#define BFL2_GPLL_WAR 0x00000400 +#define BFL2_TRISTATE_LED 0x00000800 +#define BFL2_SINGLEANT_CCK 0x00001000 +#define BFL2_2G_SPUR_WAR 0x00002000 +#define BFL2_BPHY_ALL_TXCORES 0x00004000 +#define BFL2_FCC_BANDEDGE_WAR 0x00008000 +#define BFL2_GPLL_WAR2 0x00010000 +#define BFL2_IPALVLSHIFT_3P3 0x00020000 +#define BFL2_INTERNDET_TXIQCAL 0x00040000 +#define BFL2_XTALBUFOUTEN 0x00080000 +#define BFL2_ANAPACTRL_2G 0x00100000 +#define BFL2_ANAPACTRL_5G 0x00200000 +#define BFL2_ELNACTRL_TRSW_2G 0x00400000 +#define BFL2_BT_SHARE_ANT0 0x00800000 +#define BFL2_TEMPSENSE_HIGHER 0x01000000 +#define BFL2_BTC3WIREONLY 0x02000000 +#define BFL2_PWR_NOMINAL 0x04000000 +#define BFL2_EXTLNA_TX 0x08000000 + +#define BFL2_4313_RADIOREG 0x10000000 +#define BFL2_SECI_LOPWR_DIS 0x20000000 + + + +#define BOARD_GPIO_BTC3W_IN 0x850 +#define BOARD_GPIO_BTC3W_OUT 0x020 +#define BOARD_GPIO_BTCMOD_IN 0x010 +#define BOARD_GPIO_BTCMOD_OUT 0x020 +#define BOARD_GPIO_BTC_IN 0x080 +#define BOARD_GPIO_BTC_OUT 0x100 +#define BOARD_GPIO_PACTRL 0x200 +#define BOARD_GPIO_12 0x1000 +#define BOARD_GPIO_13 0x2000 +#define BOARD_GPIO_BTC4_IN 0x0800 +#define BOARD_GPIO_BTC4_BT 0x2000 +#define BOARD_GPIO_BTC4_STAT 0x4000 +#define BOARD_GPIO_BTC4_WLAN 0x8000 +#define BOARD_GPIO_1_WLAN_PWR 0x2 +#define BOARD_GPIO_4_WLAN_PWR 0x10 + +#define GPIO_BTC4W_OUT_4312 0x010 +#define GPIO_BTC4W_OUT_43224 0x020 +#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 +#define GPIO_BTC4W_OUT_43225 0x0e0 +#define GPIO_BTC4W_OUT_43421 0x020 +#define GPIO_BTC4W_OUT_4313 0x060 + +#define PCI_CFG_GPIO_SCS 0x10 +#define PCI_CFG_GPIO_HWRAD 0x20 +#define PCI_CFG_GPIO_XTAL 0x40 +#define PCI_CFG_GPIO_PLL 0x80 + + +#define PLL_DELAY 150 +#define FREF_DELAY 200 +#define MIN_SLOW_CLK 32 +#define XTAL_ON_DELAY 1000 + + +#define BU4710_BOARD 0x0400 +#define VSIM4710_BOARD 0x0401 +#define QT4710_BOARD 0x0402 + +#define BU4309_BOARD 0x040a +#define BCM94309CB_BOARD 0x040b +#define BCM94309MP_BOARD 0x040c +#define BCM4309AP_BOARD 0x040d + +#define BCM94302MP_BOARD 0x040e + +#define BU4306_BOARD 0x0416 +#define BCM94306CB_BOARD 0x0417 +#define BCM94306MP_BOARD 0x0418 + +#define BCM94710D_BOARD 0x041a +#define BCM94710R1_BOARD 0x041b +#define BCM94710R4_BOARD 0x041c +#define BCM94710AP_BOARD 0x041d + +#define BU2050_BOARD 0x041f + +#define BCM94306P50_BOARD 0x0420 + +#define BCM94309G_BOARD 0x0421 + +#define BU4704_BOARD 0x0423 +#define BU4702_BOARD 0x0424 + +#define BCM94306PC_BOARD 0x0425 + +#define MPSG4306_BOARD 0x0427 + +#define BCM94702MN_BOARD 0x0428 + + +#define BCM94702CPCI_BOARD 0x0429 + + +#define BCM95380RR_BOARD 0x042a + + +#define BCM94306CBSG_BOARD 0x042b + + +#define PCSG94306_BOARD 0x042d + + +#define BU4704SD_BOARD 0x042e + + +#define BCM94704AGR_BOARD 0x042f + + +#define BCM94308MP_BOARD 0x0430 + + +#define BCM94306GPRS_BOARD 0x0432 + + +#define BU5365_FPGA_BOARD 0x0433 + +#define BU4712_BOARD 0x0444 +#define BU4712SD_BOARD 0x045d +#define BU4712L_BOARD 0x045f + + +#define BCM94712AP_BOARD 0x0445 +#define BCM94712P_BOARD 0x0446 + + +#define BU4318_BOARD 0x0447 +#define CB4318_BOARD 0x0448 +#define MPG4318_BOARD 0x0449 +#define MP4318_BOARD 0x044a +#define SD4318_BOARD 0x044b + + +#define BCM94313BU_BOARD 0x050f +#define BCM94313HM_BOARD 0x0510 +#define BCM94313EPA_BOARD 0x0511 +#define BCM94313HMG_BOARD 0x051C + + +#define BCM96338_BOARD 0x6338 +#define BCM96348_BOARD 0x6348 +#define BCM96358_BOARD 0x6358 +#define BCM96368_BOARD 0x6368 + + +#define BCM94306P_BOARD 0x044c + + +#define BCM94303MP_BOARD 0x044e + + +#define BCM94306MPSGH_BOARD 0x044f + + +#define BCM94306MPM 0x0450 +#define BCM94306MPL 0x0453 + + +#define BCM94712AGR_BOARD 0x0451 + + +#define PC4303_BOARD 0x0454 + + +#define BCM95350K_BOARD 0x0455 + + +#define BCM95350R_BOARD 0x0456 + + +#define BCM94306MPLNA_BOARD 0x0457 + + +#define BU4320_BOARD 0x0458 +#define BU4320S_BOARD 0x0459 +#define BCM94320PH_BOARD 0x045a + + +#define BCM94306MPH_BOARD 0x045b + + +#define BCM94306PCIV_BOARD 0x045c + +#define BU4712SD_BOARD 0x045d + +#define BCM94320PFLSH_BOARD 0x045e + +#define BU4712L_BOARD 0x045f +#define BCM94712LGR_BOARD 0x0460 +#define BCM94320R_BOARD 0x0461 + +#define BU5352_BOARD 0x0462 + +#define BCM94318MPGH_BOARD 0x0463 + +#define BU4311_BOARD 0x0464 +#define BCM94311MC_BOARD 0x0465 +#define BCM94311MCAG_BOARD 0x0466 + +#define BCM95352GR_BOARD 0x0467 + + +#define BCM95351AGR_BOARD 0x0470 + + +#define BCM94704MPCB_BOARD 0x0472 + + +#define BU4785_BOARD 0x0478 + + +#define BU4321_BOARD 0x046b +#define BU4321E_BOARD 0x047c +#define MP4321_BOARD 0x046c +#define CB2_4321_BOARD 0x046d +#define CB2_4321_AG_BOARD 0x0066 +#define MC4321_BOARD 0x046e + + +#define BU4328_BOARD 0x0481 +#define BCM4328SDG_BOARD 0x0482 +#define BCM4328SDAG_BOARD 0x0483 +#define BCM4328UG_BOARD 0x0484 +#define BCM4328UAG_BOARD 0x0485 +#define BCM4328PC_BOARD 0x0486 +#define BCM4328CF_BOARD 0x0487 + + +#define BCM94325DEVBU_BOARD 0x0490 +#define BCM94325BGABU_BOARD 0x0491 + +#define BCM94325SDGWB_BOARD 0x0492 + +#define BCM94325SDGMDL_BOARD 0x04aa +#define BCM94325SDGMDL2_BOARD 0x04c6 +#define BCM94325SDGMDL3_BOARD 0x04c9 + +#define BCM94325SDABGWBA_BOARD 0x04e1 + + +#define BCM94322MC_SSID 0x04a4 +#define BCM94322USB_SSID 0x04a8 +#define BCM94322HM_SSID 0x04b0 +#define BCM94322USB2D_SSID 0x04bf + + +#define BCM4312MCGSG_BOARD 0x04b5 + + +#define BCM94315DEVBU_SSID 0x04c2 +#define BCM94315USBGP_SSID 0x04c7 +#define BCM94315BGABU_SSID 0x04ca +#define BCM94315USBGP41_SSID 0x04cb + + +#define BCM94319DEVBU_SSID 0X04e5 +#define BCM94319USB_SSID 0X04e6 +#define BCM94319SD_SSID 0X04e7 + + +#define BCM94716NR2_SSID 0x04cd + + +#define BCM94319DEVBU_SSID 0X04e5 +#define BCM94319USBNP4L_SSID 0X04e6 +#define BCM94319WLUSBN4L_SSID 0X04e7 +#define BCM94319SDG_SSID 0X04ea +#define BCM94319LCUSBSDN4L_SSID 0X04eb +#define BCM94319USBB_SSID 0x04ee +#define BCM94319LCSDN4L_SSID 0X0507 +#define BCM94319LSUSBN4L_SSID 0X0508 +#define BCM94319SDNA4L_SSID 0X0517 +#define BCM94319SDELNA4L_SSID 0X0518 +#define BCM94319SDELNA6L_SSID 0X0539 +#define BCM94319ARCADYAN_SSID 0X0546 +#define BCM94319WINDSOR_SSID 0x0561 +#define BCM94319MLAP_SSID 0x0562 +#define BCM94319SDNA_SSID 0x058b +#define BCM94319BHEMU3_SSID 0x0563 +#define BCM94319SDHMB_SSID 0x058c +#define BCM94319SDBREF_SSID 0x05a1 +#define BCM94319USBSDB_SSID 0x05a2 + + + +#define BCM94329AGB_SSID 0X04b9 +#define BCM94329TDKMDL1_SSID 0X04ba +#define BCM94329TDKMDL11_SSID 0X04fc +#define BCM94329OLYMPICN18_SSID 0X04fd +#define BCM94329OLYMPICN90_SSID 0X04fe +#define BCM94329OLYMPICN90U_SSID 0X050c +#define BCM94329OLYMPICN90M_SSID 0X050b +#define BCM94329AGBF_SSID 0X04ff +#define BCM94329OLYMPICX17_SSID 0X0504 +#define BCM94329OLYMPICX17M_SSID 0X050a +#define BCM94329OLYMPICX17U_SSID 0X0509 +#define BCM94329OLYMPICUNO_SSID 0X0564 +#define BCM94329MOTOROLA_SSID 0X0565 +#define BCM94329OLYMPICLOCO_SSID 0X0568 + +#define BCM94336SD_WLBGABU_SSID 0x0511 +#define BCM94336SD_WLBGAREF_SSID 0x0519 +#define BCM94336SDGP_SSID 0x0538 +#define BCM94336SDG_SSID 0x0519 +#define BCM94336SDGN_SSID 0x0538 +#define BCM94336SDGFC_SSID 0x056B + + +#define BCM94330SDG_SSID 0x0528 +#define BCM94330SD_FCBGABU_SSID 0x052e +#define BCM94330SD_WLBGABU_SSID 0x052f +#define BCM94330SD_FCBGA_SSID 0x0530 +#define BCM94330FCSDAGB_SSID 0x0532 +#define BCM94330OLYMPICAMG_SSID 0x0549 +#define BCM94330OLYMPICAMGEPA_SSID 0x054F +#define BCM94330OLYMPICUNO3_SSID 0x0551 +#define BCM94330WLSDAGB_SSID 0x0547 +#define BCM94330CSPSDAGBB_SSID 0x054A + + +#define BCM943224X21 0x056e +#define BCM943224X21_FCC 0x00d1 + + +#define BCM943228BU8_SSID 0x0540 +#define BCM943228BU9_SSID 0x0541 +#define BCM943228BU_SSID 0x0542 +#define BCM943227HM4L_SSID 0x0543 +#define BCM943227HMB_SSID 0x0544 +#define BCM943228HM4L_SSID 0x0545 +#define BCM943228SD_SSID 0x0573 + + +#define BCM943239MOD_SSID 0x05ac +#define BCM943239REF_SSID 0x05aa + + +#define BCM94331X19 0x00D6 +#define BCM94331PCIEBT3Ax_SSID 0x00E4 +#define BCM94331X12_2G_SSID 0x00EC +#define BCM94331X12_5G_SSID 0x00ED +#define BCM94331X29B 0x00EF +#define BCM94331BU_SSID 0x0523 +#define BCM94331S9BU_SSID 0x0524 +#define BCM94331MC_SSID 0x0525 +#define BCM94331MCI_SSID 0x0526 +#define BCM94331PCIEBT4_SSID 0x0527 +#define BCM94331HM_SSID 0x0574 +#define BCM94331PCIEDUAL_SSID 0x059B +#define BCM94331MCH5_SSID 0x05A9 +#define BCM94331PCIEDUALV2_SSID 0x05B7 +#define BCM94331CS_SSID 0x05C6 +#define BCM94331CSAX_SSID 0x00EF + + +#define BCM953572BU_SSID 0x058D +#define BCM953572NR2_SSID 0x058E +#define BCM947188NR2_SSID 0x058F +#define BCM953572SDRNR2_SSID 0x0590 + + +#define BCM943236OLYMPICSULLEY_SSID 0x594 +#define BCM943236PREPROTOBLU2O3_SSID 0x5b9 +#define BCM943236USBELNA_SSID 0x5f8 + + +#define GPIO_NUMPINS 32 + + +#define RDL_RAM_BASE_4319 0x60000000 +#define RDL_RAM_BASE_4329 0x60000000 +#define RDL_RAM_SIZE_4319 0x48000 +#define RDL_RAM_SIZE_4329 0x48000 +#define RDL_RAM_SIZE_43236 0x70000 +#define RDL_RAM_BASE_43236 0x60000000 +#define RDL_RAM_SIZE_4328 0x60000 +#define RDL_RAM_BASE_4328 0x80000000 +#define RDL_RAM_SIZE_4322 0x60000 +#define RDL_RAM_BASE_4322 0x60000000 + + +#define MUXENAB_UART 0x00000001 +#define MUXENAB_GPIO 0x00000002 +#define MUXENAB_ERCX 0x00000004 +#define MUXENAB_JTAG 0x00000008 +#define MUXENAB_HOST_WAKE 0x00000010 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h new file mode 100644 index 0000000000000..f3356a724b44e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h @@ -0,0 +1,279 @@ +/* + * Byte order utilities + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmendian.h 277737 2011-08-16 17:54:59Z $ + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include + + +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + + +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + + +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + + +#ifndef hton16 +#define HTON16(i) BCMSWAP16(i) +#define hton16(i) bcmswap16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton32(i) bcmswap32(i) +#define NTOH16(i) BCMSWAP16(i) +#define ntoh16(i) bcmswap16(i) +#define NTOH32(i) BCMSWAP32(i) +#define ntoh32(i) bcmswap32(i) +#define LTOH16(i) (i) +#define ltoh16(i) (i) +#define LTOH32(i) (i) +#define ltoh32(i) (i) +#define HTOL16(i) (i) +#define htol16(i) (i) +#define HTOL32(i) (i) +#define htol32(i) (i) +#endif + +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) + + +#define load32_ua(a) ltoh32_ua(a) +#define store32_ua(a, v) htol32_ua_store(v, a) +#define load16_ua(a) ltoh16_ua(a) +#define store16_ua(a, v) htol16_ua_store(v, a) + +#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#ifdef __GNUC__ + + + +#define bcmswap16(val) ({ \ + uint16 _val = (val); \ + BCMSWAP16(_val); \ +}) + +#define bcmswap32(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32(_val); \ +}) + +#define bcmswap32by16(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32BY16(_val); \ +}) + +#define bcmswap16_buf(buf, len) ({ \ + uint16 *_buf = (uint16 *)(buf); \ + uint _wds = (len) / 2; \ + while (_wds--) { \ + *_buf = bcmswap16(*_buf); \ + _buf++; \ + } \ +}) + +#define htol16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = _val >> 8; \ +}) + +#define htol32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = (_val >> 8) & 0xff; \ + _bytes[2] = (_val >> 16) & 0xff; \ + _bytes[3] = _val >> 24; \ +}) + +#define hton16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 8; \ + _bytes[1] = _val & 0xff; \ +}) + +#define hton32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 24; \ + _bytes[1] = (_val >> 16) & 0xff; \ + _bytes[2] = (_val >> 8) & 0xff; \ + _bytes[3] = _val & 0xff; \ +}) + +#define ltoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH16_UA(_bytes); \ +}) + +#define ltoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH32_UA(_bytes); \ +}) + +#define ntoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH16_UA(_bytes); \ +}) + +#define ntoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH32_UA(_bytes); \ +}) + +#else + + +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + + + + +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + + +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + + +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + + +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + + +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + + +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + + +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + + +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + + +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +#endif +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h new file mode 100644 index 0000000000000..51e0427e7f605 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h @@ -0,0 +1,181 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmpcispi.h 277737 2011-08-16 17:54:59Z $ + */ +#ifndef _BCM_PCI_SPI_H +#define _BCM_PCI_SPI_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ + +#endif /* _BCM_PCI_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h new file mode 100644 index 0000000000000..a503edbd62261 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h @@ -0,0 +1,36 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmperf.h 277737 2011-08-16 17:54:59Z $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h new file mode 100644 index 0000000000000..21a58b473e913 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h @@ -0,0 +1,128 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdbus.h 300017 2011-12-01 20:30:27Z $ + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); + +/* attach, return handler on success, NULL if failed. + * The handler shall be provided by all subsequent calls. No local cache + * cfghdl points to the starting address of pci device mapped memory + */ +extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si); +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Wait system lock free */ +extern int sdioh_waitlockfree(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + +/* Helper function */ +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh); + + + +extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab); + +/* GPIO support */ +extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); +extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); + +#endif /* _sdio_api_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h new file mode 100644 index 0000000000000..def3c0269279b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h @@ -0,0 +1,219 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh.h 300017 2011-12-01 20:30:27Z $ + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#define BCMSDH_ERROR(x) +#define BCMSDH_INFO(x) + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +/* Attach and build an interface to the underlying SD host driver. + * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh. + * - Returns the bcmsdh handle and virtual address base for register access. + * The returned handle should be used in all subsequent calls, but the bcmsh + * implementation may maintain a single "default" handle (e.g. the first or + * most recent one) to enable single-instance implementations to pass NULL. + */ +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq); + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); + +#if defined(DHD_DEBUG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif + +#ifdef BCMLXSDMMC +extern int bcmsdh_claim_host_and_lock(void *sdh); +extern int bcmsdh_release_host_and_unlock(void *sdh); +#endif /* BCMLXSDMMC */ + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data); + +/* Indicate if last reg read/write failed */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete, void *handle); + +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Wait system lock free */ +extern int bcmsdh_waitlockfree(void *sdh); + +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh); + +/* callback functions */ +typedef struct { + /* attach to device */ + void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* detach from device */ + void (*detach)(void *ch); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +#if defined(OOB_INTR_ONLY) +extern int bcmsdh_register_oob_intr(void * dhdp); +extern void bcmsdh_unregister_oob_intr(void); +extern void bcmsdh_oob_intr_set(bool enable); +#endif /* defined(OOB_INTR_ONLY) */ +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + + +extern int bcmsdh_sleep(void *sdh, bool enab); + +/* GPIO support */ +extern int bcmsdh_gpio_init(void *sd); +extern bool bcmsdh_gpioin(void *sd, uint32 gpio); +extern int bcmsdh_gpioouten(void *sd, uint32 gpio); +extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab); + +#endif /* _bcmsdh_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h new file mode 100644 index 0000000000000..db8ea596304c9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h @@ -0,0 +1,123 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdh_sdmmc.h 314048 2012-02-09 20:31:56Z $ + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +/* Allocate/init/free per-OS private data */ +extern int sdioh_sdmmc_osinit(sdioh_info_t *sd); +extern void sdioh_sdmmc_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +struct sdioh_info { + osl_t *osh; /* osh handler */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + +#define SDIOH_SDMMC_MAX_SG_ENTRIES 32 + struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; + bool use_rxchain; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +typedef struct _BCMSDH_SDMMC_INSTANCE { + sdioh_info_t *sd; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; +} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE; + +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h new file mode 100644 index 0000000000000..1b9d39fee8fc7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h @@ -0,0 +1,274 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdpcm.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT) + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ +#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ +#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0001 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 +#define SDPCM_SHARED_IN_BRPT 0x0800 +#define SDPCM_SHARED_SET_BRPT 0x1000 +#define SDPCM_SHARED_PENDING_BRPT 0x2000 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hndrte_cons_t */ + uint32 msgtrace_addr; + uint32 brpt_addr; +} sdpcm_shared_t; + +extern sdpcm_shared_t sdpcm_shared; + +/* Function can be used to notify host of FW halt */ +extern void sdpcmd_fwhalt(void); + +#endif /* _bcmsdpcm_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h new file mode 100644 index 0000000000000..a62bee42b2ba8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h @@ -0,0 +1,135 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdspi.h 277737 2011-08-16 17:54:59Z $ + */ +#ifndef _BCM_SD_SPI_H +#define _BCM_SD_SPI_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#endif /* _BCM_SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h new file mode 100644 index 0000000000000..c7382540b84fa --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h @@ -0,0 +1,248 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmsdstd.h 324819 2012-03-30 12:15:19Z $ + */ +#ifndef _BCM_SD_STD_H +#define _BCM_SD_STD_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#define BCMSDYIELD + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#define sdstd_os_yield(sd) do {} while (0) +#define RETRIES_SMALL 100 + + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +#define HC_INTR_RETUNING 0x1000 + + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + ulong dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + ulong dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + ulong adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ + int host_UHSISupported; /* whether UHSI is supported for HC. */ + int card_UHSI_voltage_Supported; /* whether UHSI is supported for + * Card in terms of Voltage [1.8 or 3.3]. + */ + int global_UHSI_Supp; /* type of UHSI support in both host and card. + * HOST_SDR_UNSUPP: capabilities not supported/matched + * HOST_SDR_12_25: SDR12 and SDR25 supported + * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd + */ + int sd3_dat_state; /* data transfer state used for retuning check */ + int sd3_tun_state; /* tuning state used for retuning check */ + bool sd3_tuning_reqd; /* tuning requirement parameter */ + uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */ +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* States for Tuning and corr data */ +#define TUNING_IDLE 0 +#define TUNING_START 1 +#define TUNING_START_AFTER_DAT 2 +#define TUNING_ONGOING 3 + +#define DATA_TRANSFER_IDLE 0 +#define DATA_TRANSFER_ONGOING 1 +#define CHECK_TUNING_PRE_DATA 1 +#define CHECK_TUNING_POST_DATA 2 + + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); +extern void sdstd_waitlockfree(sdioh_info_t *sd); + +/* OS-specific wait-for-interrupt-or-status */ +extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits); + +/* used by bcmsdstd_linux [implemented in sdstd] */ +extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd); +extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd); +extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd); +extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param); +extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd); +extern int sdstd_3_get_tune_state(sdioh_info_t *sd); +extern int sdstd_3_get_data_state(sdioh_info_t *sd); +extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state); +extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state); +extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd); +extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd); +extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode); + +/* used by sdstd [implemented in bcmsdstd_linux/ndis] */ +extern void sdstd_3_start_tuning(sdioh_info_t *sd); +extern void sdstd_3_osinit_tuning(sdioh_info_t *sd); +extern void sdstd_3_osclean_tuning(sdioh_info_t *sd); + +#endif /* _BCM_SD_STD_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h new file mode 100644 index 0000000000000..34a02d00c6bd0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h @@ -0,0 +1,40 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmspi.h 277737 2011-08-16 17:54:59Z $ + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h new file mode 100644 index 0000000000000..6849c26da8372 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h @@ -0,0 +1,720 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmutils.h 294991 2011-11-09 00:17:28Z $ + */ + + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + +#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src)) +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src)) + +#ifdef __cplusplus +extern "C" { +#endif + + +#define _BCM_U 0x01 +#define _BCM_L 0x02 +#define _BCM_D 0x04 +#define _BCM_C 0x08 +#define _BCM_P 0x10 +#define _BCM_S 0x20 +#define _BCM_X 0x40 +#define _BCM_SP 0x80 + +extern const unsigned char bcm_ctype[]; +#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + + + +struct bcmstrbuf { + char *buf; + unsigned int size; + char *origbuf; + unsigned int origsize; +}; + + +#ifdef BCMDRIVER +#include + +#define GPIO_PIN_NOTDEFINED 0x20 + + +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) {\ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ +} + + +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128 +#endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 +#endif + +typedef struct pktq_prec { + void *head; + void *tail; + uint16 len; + uint16 max; +} pktq_prec_t; + + + +struct pktq { + uint16 num_prec; + uint16 hi_prec; + uint16 max; + uint16 len; + + struct pktq_prec q[PKTQ_MAX_PREC]; +}; + + +struct spktq { + uint16 num_prec; + uint16 hi_prec; + uint16 max; + uint16 len; + + struct pktq_prec q[1]; +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + + +typedef bool (*ifpkt_cb_t)(void*, int); + +#ifdef BCMPKTPOOL +#define POOL_ENAB(pool) ((pool) && (pool)->inited) +#if defined(BCM4329C0) +#define SHARED_POOL (pktpool_shared_ptr) +#else +#define SHARED_POOL (pktpool_shared) +#endif +#else +#define POOL_ENAB(bus) 0 +#define SHARED_POOL ((struct pktpool *)NULL) +#endif + +#ifndef PKTPOOL_LEN_MAX +#define PKTPOOL_LEN_MAX 40 +#endif +#define PKTPOOL_CB_MAX 3 + +struct pktpool; +typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); +typedef struct { + pktpool_cb_t cb; + void *arg; +} pktpool_cbinfo_t; + +#ifdef BCMDBG_POOL + +#define POOL_IDLE 0 +#define POOL_RXFILL 1 +#define POOL_RXDH 2 +#define POOL_RXD11 3 +#define POOL_TXDH 4 +#define POOL_TXD11 5 +#define POOL_AMPDU 6 +#define POOL_TXENQ 7 + +typedef struct { + void *p; + uint32 cycles; + uint32 dur; +} pktpool_dbg_t; + +typedef struct { + uint8 txdh; + uint8 txd11; + uint8 enq; + uint8 rxdh; + uint8 rxd11; + uint8 rxfill; + uint8 idle; +} pktpool_stats_t; +#endif + +typedef struct pktpool { + bool inited; + uint16 r; + uint16 w; + uint16 len; + uint16 maxlen; + uint16 plen; + bool istx; + bool empty; + uint8 cbtoggle; + uint8 cbcnt; + uint8 ecbcnt; + bool emptycb_disable; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX]; + pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; + void *q[PKTPOOL_LEN_MAX + 1]; + +#ifdef BCMDBG_POOL + uint8 dbg_cbcnt; + pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; + uint16 dbg_qlen; + pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; +#endif +} pktpool_t; + +#if defined(BCM4329C0) +extern pktpool_t *pktpool_shared_ptr; +#else +extern pktpool_t *pktpool_shared; +#endif + +extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx); +extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); +extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); +extern void* pktpool_get(pktpool_t *pktp); +extern void pktpool_free(pktpool_t *pktp, void *p); +extern int pktpool_add(pktpool_t *pktp, void *p); +extern uint16 pktpool_avail(pktpool_t *pktp); +extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen); +extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen); +extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); +extern bool pktpool_emptycb_disabled(pktpool_t *pktp); + +#define POOLPTR(pp) ((pktpool_t *)(pp)) +#define pktpool_len(pp) (POOLPTR(pp)->len - 1) +#define pktpool_plen(pp) (POOLPTR(pp)->plen) +#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen) + +#ifdef BCMDBG_POOL +extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_start_trigger(pktpool_t *pktp, void *p); +extern int pktpool_dbg_dump(pktpool_t *pktp); +extern int pktpool_dbg_notify(pktpool_t *pktp); +extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); +#endif + + + +struct ether_addr; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + + + +#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) +#define pktq_plen(pq, prec) ((pq)->q[prec].len) +#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) +#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) +#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) + +#define pktq_ppeek(pq, prec) ((pq)->q[prec].head) +#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) + +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); + +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, + ifpkt_cb_t fn, int arg); + +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + + + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); + + + +#define pktq_len(pq) ((int)(pq)->len) +#define pktq_max(pq) ((int)(pq)->max) +#define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) +#define pktq_full(pq) ((pq)->len >= (pq)->max) +#define pktq_empty(pq) ((pq)->len == 0) + + +#define pktenq(pq, p) pktq_penq(((struct pktq *)pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq(((struct pktq *)pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)pq), 0) +#define pktqinit(pq, len) pktq_init(((struct pktq *)pq), 1, len) + +extern void pktq_init(struct pktq *pq, int num_prec, int max_len); + +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg); + + + +extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); + + +extern uint pktsetprio(void *pkt, bool update_vtag); +#define PKTPRIO_VDSCP 0x100 +#define PKTPRIO_VLAN 0x200 +#define PKTPRIO_UPD 0x400 +#define PKTPRIO_DSCP 0x800 + + +extern int bcm_atoi(char *s); +extern ulong bcm_strtoul(char *cp, char **endp, uint base); +extern char *bcmstrstr(char *haystack, char *needle); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + + + +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(char *p, struct ether_addr *ea); + + +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); + + +extern void bcm_mdelay(uint ms); + +#define NVRAM_RECLAIM_CHECK(name) + +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern int getintvararray(char *vars, const char *name, int index); +extern int getintvararraysize(char *vars, const char *name); +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 + +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) + +extern char *bcm_nvram_vars(uint *length); +extern int bcm_nvram_cache(void *sih); + + + + +typedef struct bcm_iovar { + const char *name; + uint16 varid; + uint16 flags; + uint16 type; + uint16 minlen; +} bcm_iovar_t; + + + + +#define IOV_GET 0 +#define IOV_SET 1 + + +#define IOV_GVAL(id) ((id)*2) +#define IOV_SVAL(id) (((id)*2)+IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) +#define IOV_ID(actionid) (actionid >> 1) + + + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif +#endif + + +#define IOVT_VOID 0 +#define IOVT_BOOL 1 +#define IOVT_INT8 2 +#define IOVT_UINT8 3 +#define IOVT_INT16 4 +#define IOVT_UINT16 5 +#define IOVT_INT32 6 +#define IOVT_UINT32 7 +#define IOVT_BUFFER 8 +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + + +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + + + +#define BCME_STRLEN 64 +#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST)) + + + + +#define BCME_OK 0 +#define BCME_ERROR -1 +#define BCME_BADARG -2 +#define BCME_BADOPTION -3 +#define BCME_NOTUP -4 +#define BCME_NOTDOWN -5 +#define BCME_NOTAP -6 +#define BCME_NOTSTA -7 +#define BCME_BADKEYIDX -8 +#define BCME_RADIOOFF -9 +#define BCME_NOTBANDLOCKED -10 +#define BCME_NOCLK -11 +#define BCME_BADRATESET -12 +#define BCME_BADBAND -13 +#define BCME_BUFTOOSHORT -14 +#define BCME_BUFTOOLONG -15 +#define BCME_BUSY -16 +#define BCME_NOTASSOCIATED -17 +#define BCME_BADSSIDLEN -18 +#define BCME_OUTOFRANGECHAN -19 +#define BCME_BADCHAN -20 +#define BCME_BADADDR -21 +#define BCME_NORESOURCE -22 +#define BCME_UNSUPPORTED -23 +#define BCME_BADLEN -24 +#define BCME_NOTREADY -25 +#define BCME_EPERM -26 +#define BCME_NOMEM -27 +#define BCME_ASSOCIATED -28 +#define BCME_RANGE -29 +#define BCME_NOTFOUND -30 +#define BCME_WME_NOT_ENABLED -31 +#define BCME_TSPEC_NOTFOUND -32 +#define BCME_ACM_NOTSUPPORTED -33 +#define BCME_NOT_WME_ASSOCIATION -34 +#define BCME_SDIO_ERROR -35 +#define BCME_DONGLE_DOWN -36 +#define BCME_VERSION -37 +#define BCME_TXFAIL -38 +#define BCME_RXFAIL -39 +#define BCME_NODEVICE -40 +#define BCME_NMODE_DISABLED -41 +#define BCME_NONRESIDENT -42 +#define BCME_LAST BCME_NONRESIDENT + + +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Nonresident overlay access", \ +} + +#ifndef ABS +#define ABS(a) (((a) < 0)?-(a):(a)) +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b))?(a):(b)) +#endif + +#ifndef MAX +#define MAX(a, b) (((a) > (b))?(a):(b)) +#endif + +#define CEIL(x, y) (((x) + ((y)-1)) / (y)) +#define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y)) +#define ISALIGNED(a, x) (((uintptr)(a) & ((x)-1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) + +#ifndef OFFSETOF +#ifdef __ARMCC_VERSION + +#include +#define OFFSETOF(type, member) offsetof(type, member) +#else +#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +#endif +#endif + +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (sizeof(a)/sizeof(a[0])) +#endif + + +extern void *_bcmutils_dummy_fn; +#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) + + +#ifndef setbit +#ifndef NBBY +#define NBBY 8 +#endif +#define setbit(a, i) (((uint8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0) +#endif + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + + +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + + +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + + +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + + +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + + +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + + +#define CRC8_INIT_VALUE 0xff +#define CRC8_GOOD_VALUE 0x9f +#define CRC16_INIT_VALUE 0xffff +#define CRC16_GOOD_VALUE 0xf0b8 +#define CRC32_INIT_VALUE 0xffffffff +#define CRC32_GOOD_VALUE 0xdebb20e3 + + +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + + +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + + +#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len)) + + +#define ETHER_ADDR_STR_LEN 18 + + + +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + + + ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; + } else { + + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + + + +extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc); +extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc); +extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc); + +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); +#endif + +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE) +extern int bcm_format_hex(char *str, const void *bytes, int len); +#endif + +extern const char *bcm_crypto_algo_name(uint algo); +extern char *bcm_chipname(uint chipid, char *buf, uint len); +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); +extern void prhex(const char *msg, uchar *buf, uint len); + + +extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); +extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); +extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key); + + +extern const char *bcmerrorstr(int bcmerror); + + +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) +#define mboolclr(mb, bit) ((mb) &= ~(bit)) +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + + +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); + + +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(uchar *arg1, uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(char *name, const uchar *cdata, int len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); + +extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); +extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); + + + +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +unsigned int process_nvram_vars(char *varbuf, unsigned int len); + +#ifdef __cplusplus + } +#endif + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/bcmwifi.h b/drivers/net/wireless/bcmdhd/include/bcmwifi.h new file mode 100644 index 0000000000000..e5207e9c40861 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmwifi.h @@ -0,0 +1,165 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmwifi.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _bcmwifi_h_ +#define _bcmwifi_h_ + + + +typedef uint16 chanspec_t; + + +#define CH_UPPER_SB 0x01 +#define CH_LOWER_SB 0x02 +#define CH_EWA_VALID 0x04 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 +#define CH_MAX_2G_CHANNEL 14 +#define WLC_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL +#define MAXCHANNEL 224 + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0300 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_CHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_CHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_CHANSPEC_BW_MASK 0x0C00 +#define WL_CHANSPEC_BW_SHIFT 10 +#define WL_CHANSPEC_BW_10 0x0400 +#define WL_CHANSPEC_BW_20 0x0800 +#define WL_CHANSPEC_BW_40 0x0C00 + +#define WL_CHANSPEC_BAND_MASK 0xf000 +#define WL_CHANSPEC_BAND_SHIFT 12 +#define WL_CHANSPEC_BAND_5G 0x1000 +#define WL_CHANSPEC_BAND_2G 0x2000 +#define INVCHANSPEC 255 + + +#define WF_CHAN_FACTOR_2_4_G 4814 +#define WF_CHAN_FACTOR_5_G 10000 +#define WF_CHAN_FACTOR_4_G 8000 + + +#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + ((channel) + CH_10MHZ_APART) : 0) +#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + ((channel) + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ + WL_CHANSPEC_BAND_5G)) +#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) + + +#define CHSPEC_CTL_SB(chspec) (chspec & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) (chspec & WL_CHANSPEC_BW_MASK) + +#ifdef WL11N_20MHZONLY + +#define CHSPEC_IS10(chspec) 0 +#define CHSPEC_IS20(chspec) 1 +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) 0 +#endif + +#else + +#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) +#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif + +#endif + +#define CHSPEC_IS20_UNCOND(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) + +#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE) +#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) +#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) +#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \ + (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \ + (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK)))) +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) + +#define CHANSPEC_STR_LEN 8 + + +#define WLC_MAXRATE 108 +#define WLC_RATE_1M 2 +#define WLC_RATE_2M 4 +#define WLC_RATE_5M5 11 +#define WLC_RATE_11M 22 +#define WLC_RATE_6M 12 +#define WLC_RATE_9M 18 +#define WLC_RATE_12M 24 +#define WLC_RATE_18M 36 +#define WLC_RATE_24M 48 +#define WLC_RATE_36M 72 +#define WLC_RATE_48M 96 +#define WLC_RATE_54M 108 + +#define WLC_2G_25MHZ_OFFSET 5 + + +extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + + +extern chanspec_t wf_chspec_aton(char *a); + + +extern bool wf_chspec_malformed(chanspec_t chanspec); + + +extern uint8 wf_chspec_ctlchan(chanspec_t chspec); + + +extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec); + + +extern int wf_mhz2channel(uint freq, uint start_factor); + + +extern int wf_channel2mhz(uint channel, uint start_factor); + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h new file mode 100644 index 0000000000000..175ff8545a0ce --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h @@ -0,0 +1,131 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhdioctl.h 323572 2012-03-26 06:28:14Z $ + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include + + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint len; /* length of user buffer */ + bool set; /* get or set request (optional) */ + uint used; /* bytes read or written (optional) */ + uint needed; /* bytes needed (optional) */ + uint driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* Underlying BUS definition */ +enum { + BUS_TYPE_USB = 0, /* for USB dongles */ + BUS_TYPE_SDIO /* for SDIO dongles */ +}; + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#define DHD_ISCAN_VAL 0x2000 +#define DHD_ARPOE_VAL 0x4000 +#define DHD_REORDER_VAL 0x8000 +#define DHD_WL_VAL 0x10000 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint version; /* To allow structure change tracking */ + uint freq; /* Max ticks between tx/rx attempts */ + uint count; /* Test packets to send/rcv each attempt */ + uint print; /* Print counts every attempts */ + uint total; /* Total packets (or bursts) */ + uint minlen; /* Minimum length of packets to send */ + uint maxlen; /* Maximum length of packets to send */ + uint numsent; /* Count of test packets sent */ + uint numrcvd; /* Count of test packets received */ + uint numfail; /* Count of test send failures */ + uint mode; /* Test mode (type of test packets) */ + uint stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + + +/* require default structure packing */ +#include + +#endif /* _dhdioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h new file mode 100644 index 0000000000000..19da8f28bc7e8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/epivers.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: epivers.h.in 277737 2011-08-16 17:54:59Z $ + * +*/ + + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 5 + +#define EPI_MINOR_VERSION 90 + +#define EPI_RC_NUMBER 195 + +#define EPI_INCREMENTAL_NUMBER 98 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 5, 90, 195, 98 + +#define EPI_VERSION_NUM 0x055ac362 + +#define EPI_VERSION_DEV 5.90.195 + + +#define EPI_VERSION_STR "5.90.195.98" + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h new file mode 100644 index 0000000000000..9bfc8c9275a9b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h @@ -0,0 +1,37 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndpmu.h 335486 2012-05-28 09:47:55Z $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_set_otp_wr_volts(si_t *sih); +extern void si_pmu_set_otp_rd_volts(si_t *sih); + +#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h new file mode 100644 index 0000000000000..7d862c4deb21f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndrte_armtrap.h @@ -0,0 +1,88 @@ +/* + * HNDRTE arm trap handling. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndrte_armtrap.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _hndrte_armtrap_h +#define _hndrte_armtrap_h + + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_4T__) +#define MAX_TRAP_TYPE (TR_FIQ + 1) +#elif defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +#define TRAP_T_SIZE 80 + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; + uint32 r1; + uint32 r2; + uint32 r3; + uint32 r4; + uint32 r5; + uint32 r6; + uint32 r7; + uint32 r8; + uint32 r9; + uint32 r10; + uint32 r11; + uint32 r12; + uint32 r13; + uint32 r14; + uint32 pc; +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hndrte_armtrap_h */ diff --git a/drivers/net/wireless/bcmdhd/include/hndrte_cons.h b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h new file mode 100644 index 0000000000000..859ddc8953a87 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndrte_cons.h @@ -0,0 +1,68 @@ +/* + * Console support for hndrte. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndrte_cons.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _HNDRTE_CONS_H +#define _HNDRTE_CONS_H + +#include + +#define CBUF_LEN (128) + +#define LOG_BUF_LEN 1024 + +typedef struct { + uint32 buf; /* Can't be pointer on (64-bit) hosts */ + uint buf_size; + uint idx; + char *_buf_compat; /* redundant pointer for backward compat. */ +} hndrte_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hndrte_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hndrte_cons_t; + +#endif /* _HNDRTE_CONS_H */ diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h new file mode 100644 index 0000000000000..34f927c6af806 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h @@ -0,0 +1,207 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: hndsoc.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include +#include + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ +#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ + +#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ + +#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ +#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ +#define SI_FASTRAM_SWAPPED 0x19800000 + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 + +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 +#define SOCI_UBUS 2 + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ +#define CCS_ERSRC_REQ_SHIFT 8 +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ +#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ +#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ +#define CCS_ERSRC_STS_SHIFT 24 + +#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ +#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ + +#endif /* _HNDSOC_H */ diff --git a/drivers/net/wireless/bcmdhd/include/htsf.h b/drivers/net/wireless/bcmdhd/include/htsf.h new file mode 100644 index 0000000000000..d875edb816c9c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/htsf.h @@ -0,0 +1,74 @@ +/* + * Time stamps for latency measurements + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: htsf.h 277737 2011-08-16 17:54:59Z $ + */ +#ifndef _HTSF_H_ +#define _HTSF_H_ + +#define HTSFMAGIC 0xCDCDABAB /* in network order for tcpdump */ +#define HTSFENDMAGIC 0xEFEFABAB /* to distinguish from RT2 magic */ +#define HTSF_HOSTOFFSET 102 +#define HTSF_DNGLOFFSET HTSF_HOSTOFFSET - 4 +#define HTSF_DNGLOFFSET2 HTSF_HOSTOFFSET + 106 +#define HTSF_MIN_PKTLEN 200 +#define ETHER_TYPE_BRCM_PKTDLYSTATS 0x886d + +typedef enum htsfts_type { + T10, + T20, + T30, + T40, + T50, + T60, + T70, + T80, + T90, + TA0, + TE0 +} htsf_timestamp_t; + +typedef struct { + uint32 magic; + uint32 prio; + uint32 seqnum; + uint32 misc; + uint32 c10; + uint32 t10; + uint32 c20; + uint32 t20; + uint32 t30; + uint32 t40; + uint32 t50; + uint32 t60; + uint32 t70; + uint32 t80; + uint32 t90; + uint32 cA0; + uint32 tA0; + uint32 cE0; + uint32 tE0; + uint32 endmagic; +} htsfts_t; + +#endif /* _HTSF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h new file mode 100644 index 0000000000000..7f92966d977e4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h @@ -0,0 +1,431 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linux_osl.h 301794 2011-12-08 20:41:35Z $ + */ + + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include + + +extern void * osl_os_open_image(char * filename); +extern int osl_os_get_image_block(char * buf, int len, void * image); +extern void osl_os_close_image(void * image); + + +#ifdef BCMDRIVER + + +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); +extern void osl_detach(osl_t *osh); + + +extern uint32 g_assert_type; + + +#if defined(BCMASSERT_LOG) + #define ASSERT(exp) \ + do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0) +extern void osl_assert(char *exp, char *file, int line); +#else + #ifdef __GNUC__ + #define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + #if GCC_VERSION > 30100 + #define ASSERT(exp) do {} while (0) + #else + + #define ASSERT(exp) + #endif + #endif +#endif + + +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + +#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ + osl_pcmcia_read_attr((osh), (offset), (buf), (size)) +#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ + osl_pcmcia_write_attr((osh), (offset), (buf), (size)) +extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); +extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); + + +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + + +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); + + +typedef struct { + bool pkttag; + uint pktalloced; + bool mmbus; + pktfree_cb_fn_t tx_fn; + void *tx_ctx; +} osl_pubinfo_t; + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + + + +#define BUS_SWAP32(v) (v) + + #define MALLOC(osh, size) osl_malloc((osh), (size)) + #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) + #define MALLOCED(osh) osl_malloced((osh)) + extern void *osl_malloc(osl_t *osh, uint size); + extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern uint osl_malloced(osl_t *osh); + +#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC) +#define NATIVE_MFREE(osh, addr, size) kfree(addr) + +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) +extern uint osl_malloc_failed(osl_t *osh); + + +#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() +#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) +extern uint osl_dma_consistent_align(void); +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa); + + +#define DMA_TX 1 +#define DMA_RX 2 + + +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction)) +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction); +extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); + + +#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0) + + + #include + #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v))) + #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r)))) + + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op + #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + + +#define PKTBUFSZ 2048 + + + +#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#define printf(fmt, args...) printk(fmt , ## args) +#include +#include + +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + + + +#ifndef __mips__ +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(uint8) ? readb((volatile uint8*)(r)) : \ + sizeof(*(r)) == sizeof(uint16) ? readw((volatile uint16*)(r)) : \ + readl((volatile uint32*)(r)), OSL_READ_REG(osh, r)) \ +) +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + __asm__ __volatile__("sync"); \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __asm__ __volatile__("sync"); \ + __osl_v; \ + }), \ + ({ \ + __typeof(*(r)) __osl_v; \ + __asm__ __volatile__("sync"); \ + __osl_v = OSL_READ_REG(osh, r); \ + __asm__ __volatile__("sync"); \ + __osl_v; \ + })) \ +) +#endif + +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) + + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + + +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + + +#ifdef __mips__ +#include +#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va))) +#define OSL_CACHED(va) ((void *)KSEG0ADDR((va))) +#else +#define OSL_UNCACHED(va) ((void *)va) +#define OSL_CACHED(va) ((void *)va) +#endif + + +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif + + +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + + +#if !defined(CONFIG_MMC_MSM7X00A) +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) (void *)(0) +#endif +#define REG_UNMAP(va) iounmap((va)) + + +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + + +#include + + +#define PKTGET(osh, len, send) osl_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#define PKTLIST_DUMP(osh, buf) +#define PKTDBG_TRACE(osh, pkt, bit) +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#endif +#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data) +#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail)) +#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next) +#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)) +#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len)) +#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes)) +#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes)) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTALLOCED(osh) ((osl_pubinfo_t *)(osh))->pktalloced +#define PKTSETPOOL(osh, skb, x, y) do {} while (0) +#define PKTPOOL(osh, skb) FALSE +#define PKTSHRINK(osh, m) (m) + +#ifdef CTFPOOL +#define CTFPOOL_REFILL_THRESH 3 +typedef struct ctfpool { + void *head; + spinlock_t lock; + uint max_obj; + uint curr_obj; + uint obj_size; + uint refills; + uint fast_allocs; + uint fast_frees; + uint slow_allocs; +} ctfpool_t; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define FASTBUF (1 << 4) +#define CTFBUF (1 << 5) +#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF) +#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)) +#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= CTFBUF) +#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF)) +#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF) +#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & CTFBUF) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len) +#else +#define FASTBUF (1 << 0) +#define CTFBUF (1 << 1) +#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= FASTBUF) +#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)) +#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= CTFBUF) +#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~CTFBUF)) +#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) & FASTBUF) +#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->__unused) & CTFBUF) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused) +#endif + +#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk) +#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head) + +extern void *osl_ctfpool_add(osl_t *osh); +extern void osl_ctfpool_replenish(osl_t *osh, uint thresh); +extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size); +extern void osl_ctfpool_cleanup(osl_t *osh); +extern void osl_ctfpool_stats(osl_t *osh, void *b); +#endif + +#ifdef HNDCTF +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define SKIPCT (1 << 6) +#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len |= SKIPCT) +#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)) +#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len & SKIPCT) +#else +#define SKIPCT (1 << 2) +#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused |= SKIPCT) +#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)) +#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused & SKIPCT) +#endif +#else +#define PKTSETSKIPCT(osh, skb) +#define PKTCLRSKIPCT(osh, skb) +#define PKTSKIPCT(osh, skb) +#endif + +extern void osl_pktfree(osl_t *osh, void *skb, bool send); +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); + +extern void *osl_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); + + +static INLINE void * +osl_pkt_frmnative(osl_pubinfo_t *osh, void *pkt) +{ + struct sk_buff *nskb; + + if (osh->pkttag) + bzero((void*)((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ); + + + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + osh->pktalloced++; + } + + return (void *)pkt; +} +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb)) + + +static INLINE struct sk_buff * +osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt) +{ + struct sk_buff *nskb; + + if (osh->pkttag) + bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ); + + + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + osh->pktalloced--; + } + + return (struct sk_buff *)pkt; +} +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) + +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + + + +#else + + + + #define ASSERT(exp) do {} while (0) + + +#define MALLOC(o, l) malloc(l) +#define MFREE(o, p, l) free(p) +#include + + +#include + + +#include + + +extern void bcopy(const void *src, void *dst, size_t len); +extern int bcmp(const void *b1, const void *b2, size_t len); +extern void bzero(void *b, size_t len); +#endif + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h new file mode 100644 index 0000000000000..54d88ee923b26 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linuxver.h @@ -0,0 +1,614 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linuxver.h 312264 2012-02-02 00:49:43Z $ + */ + + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif +#endif +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) + +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include +#else +#include +#ifndef work_struct +#define work_struct tq_struct +#endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) +typedef void (*work_func_t)(void *work); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#define CONFIG_WIRELESS_EXT +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) +#include +#endif +#endif + + +#ifndef __exit +#define __exit +#endif +#ifndef __devexit +#define __devexit +#endif +#ifndef __devinit +#define __devinit __init +#endif +#ifndef __devinitdata +#define __devinitdata +#endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + + + +struct pci_device_id { + unsigned int vendor, device; + unsigned int subvendor, subdevice; + unsigned int class, class_mask; + unsigned long driver_data; +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); + void (*remove)(struct pci_dev *dev); + void (*suspend)(struct pci_dev *dev); + void (*resume)(struct pci_dev *dev); +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + + +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) +#define WL_USE_NETDEV_OPS +#else +#undef WL_USE_NETDEV_OPS +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL_INPUT) +#define WL_CONFIG_RFKILL_INPUT +#else +#undef WL_CONFIG_RFKILL_INPUT +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + + + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif + +typedef u32 dma_addr_t; + + +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + + +#ifndef _COMPAT_NETDEVICE_H + + + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + + +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + + +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + + +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + + +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) +#define PCI_SAVE_STATE(a, b) pci_save_state(a) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) +#else +#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif +#else +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + +#define af_packet_priv data +#endif + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif + +typedef struct { + void *parent; + struct task_struct *p_task; + long thr_pid; + int prio; + struct semaphore sema; + bool terminated; + struct completion completed; +} tsk_ctl_t; + + + + +#ifdef DHD_DEBUG +#define DBG_THR(x) printk x +#else +#define DBG_THR(x) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) +#else +#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) +#endif + + +#define PROC_START(thread_func, owner, tsk_ctl, flags) \ +{ \ + sema_init(&((tsk_ctl)->sema), 0); \ + init_completion(&((tsk_ctl)->completed)); \ + (tsk_ctl)->parent = owner; \ + (tsk_ctl)->terminated = FALSE; \ + (tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \ + if ((tsk_ctl)->thr_pid > 0) \ + wait_for_completion(&((tsk_ctl)->completed)); \ + DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \ +} + +#define PROC_STOP(tsk_ctl) \ +{ \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + up(&((tsk_ctl)->sema)); \ + wait_for_completion(&((tsk_ctl)->completed)); \ + DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->thr_pid = -1; \ +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#define KILL_PROC(nr, sig) \ +{ \ +struct task_struct *tsk; \ +struct pid *pid; \ +pid = find_get_pid((pid_t)nr); \ +tsk = pid_task(pid, PIDTYPE_PID); \ +if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 30)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = find_task_by_vpid(pid); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#include +#else +#include + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#define WL_DEV_IF(dev) ((wl_if_t*)netdev_priv(dev)) +#else +#define WL_DEV_IF(dev) ((wl_if_t*)(dev)->priv) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#define WL_ISR(i, d, p) wl_isr((i), (d)) +#else +#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h new file mode 100644 index 0000000000000..77eace6252d70 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/miniopt.h @@ -0,0 +1,77 @@ +/* + * Command line options parser. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: miniopt.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Include Files ---------------------------------------------------- */ +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +#ifdef __cplusplus + } +#endif + +#endif /* MINI_OPT_H */ diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h new file mode 100644 index 0000000000000..088f1e845a432 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h @@ -0,0 +1,74 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: msgtrace.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +/* This marks the start of a packed structure section. */ +#include + +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 spare; + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* The hbus driver generates traces when sending a trace message. This causes endless traces. + * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put. + * This prevents endless traces but generates hasardous lost of traces only in bus device code. + * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing + * hbus error traces. hbus error trace should not generates endless traces. + */ +extern bool msgtrace_hbus_trace; + +typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr, + uint16 hdrlen, uint8 *buf, uint16 buflen); +extern void msgtrace_start(void); +extern void msgtrace_stop(void); +extern void msgtrace_sent(void); +extern void msgtrace_put(char *buf, int count); +extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send); +extern bool msgtrace_event_enabled(void); + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _MSGTRACE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h new file mode 100644 index 0000000000000..b8cc2569f5065 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl.h @@ -0,0 +1,66 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: osl.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _osl_h_ +#define _osl_h_ + + +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; + +#define OSL_PKTTAG_SZ 32 + + +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + + +#include + +#ifndef PKTDBG_TRACE +#define PKTDBG_TRACE(osh, pkt, bit) +#endif + + + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif + +#if !defined(OSL_SYSUPTIME) +#define OSL_SYSUPTIME() (0) +#define OSL_SYSUPTIME_SUPPORT FALSE +#else +#define OSL_SYSUPTIME_SUPPORT TRUE +#endif + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h new file mode 100644 index 0000000000000..71f8b2e13b3bf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h @@ -0,0 +1,54 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: packed_section_end.h 277737 2011-08-16 17:54:59Z $ + */ + + + + +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + + + + + +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h new file mode 100644 index 0000000000000..afc2ba32fd937 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h @@ -0,0 +1,61 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: packed_section_start.h 277737 2011-08-16 17:54:59Z $ + */ + + + + +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + + + + + +#if defined(__GNUC__) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h new file mode 100644 index 0000000000000..66199431fb92f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h @@ -0,0 +1,78 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: pcicfg.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + + +#define PCI_CFG_VID 0 +#define PCI_CFG_CMD 4 +#define PCI_CFG_REV 8 +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x14 +#define PCI_BAR0_WIN 0x80 +#define PCI_INT_STATUS 0x90 +#define PCI_INT_MASK 0x94 + +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCI_SPROM_CONTROL 0x88 +#define PCI_BAR1_CONTROL 0x8c +#define PCI_TO_SB_MB 0x98 +#define PCI_BACKPLANE_ADDR 0xa0 +#define PCI_BACKPLANE_DATA 0xa4 +#define PCI_CLK_CTL_ST 0xa8 +#define PCI_BAR0_WIN2 0xac +#define PCI_GPIO_IN 0xb0 +#define PCI_GPIO_OUT 0xb4 +#define PCI_GPIO_OUTEN 0xb8 + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) + +#define PCI_BAR0_WINSZ (16 * 1024) + + +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) +#define PCI_16KBB0_WINSZ (16 * 1024) + + +#define PCI_16KB0_WIN2_OFFSET (4 * 1024) + + + +#define SPROM_SZ_MSK 0x02 +#define SPROM_LOCKED 0x08 +#define SPROM_BLANK 0x04 +#define SPROM_WRITEEN 0x10 +#define SPROM_BOOTROM_WE 0x20 +#define SPROM_BACKPLANE_EN 0x40 +#define SPROM_OTPIN_USE 0x80 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h new file mode 100644 index 0000000000000..fd69aac413098 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h @@ -0,0 +1,2032 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental types and constants relating to 802.11 + * + * $Id: 802.11.h 304058 2011-12-21 00:39:12Z $ + */ + + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +#ifndef _NET_ETHERNET_H_ +#include +#endif + +#include + + +#include + + +#define DOT11_TU_TO_US 1024 + + +#define DOT11_A3_HDR_LEN 24 +#define DOT11_A4_HDR_LEN 30 +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN +#define DOT11_FCS_LEN 4 +#define DOT11_ICV_LEN 4 +#define DOT11_ICV_AES_LEN 8 +#define DOT11_QOS_LEN 2 +#define DOT11_HTC_LEN 4 + +#define DOT11_KEY_INDEX_SHIFT 6 +#define DOT11_IV_LEN 4 +#define DOT11_IV_TKIP_LEN 8 +#define DOT11_IV_AES_OCB_LEN 4 +#define DOT11_IV_AES_CCM_LEN 8 +#define DOT11_IV_MAX_LEN 8 + + +#define DOT11_MAX_MPDU_BODY_LEN 2304 + +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) + +#define DOT11_MAX_SSID_LEN 32 + + +#define DOT11_DEFAULT_RTS_LEN 2347 +#define DOT11_MAX_RTS_LEN 2347 + + +#define DOT11_MIN_FRAG_LEN 256 +#define DOT11_MAX_FRAG_LEN 2346 +#define DOT11_DEFAULT_FRAG_LEN 2346 + + +#define DOT11_MIN_BEACON_PERIOD 1 +#define DOT11_MAX_BEACON_PERIOD 0xFFFF + + +#define DOT11_MIN_DTIM_PERIOD 1 +#define DOT11_MAX_DTIM_PERIOD 0xFF + + +#define DOT11_LLC_SNAP_HDR_LEN 8 +#define DOT11_OUI_LEN 3 +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; + uint8 ssap; + uint8 ctl; + uint8 oui[DOT11_OUI_LEN]; + uint16 type; +} BWL_POST_PACKED_STRUCT; + + +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) + + + +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; + uint16 durid; + struct ether_addr a1; + struct ether_addr a2; + struct ether_addr a3; + uint16 seq; + struct ether_addr a4; +} BWL_POST_PACKED_STRUCT; + + + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; + uint16 durid; + struct ether_addr bssid; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + + +BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; +#define DOT11_ACTION_VS_HDR_LEN 6 + +#define BCM_ACTION_OUI_BYTE0 0x00 +#define BCM_ACTION_OUI_BYTE1 0x90 +#define BCM_ACTION_OUI_BYTE2 0x4c + + +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 +#define DOT11_BA_CTL_POLICY_MASK 0x0001 + +#define DOT11_BA_CTL_MTID 0x0002 +#define DOT11_BA_CTL_COMPRESSED 0x0004 + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 + +#define DOT11_BA_CTL_TID_MASK 0xF000 +#define DOT11_BA_CTL_TID_SHIFT 12 + + +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; + uint16 durid; + struct ether_addr ra; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; + uint16 seqnum; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 + +#define DOT11_BA_BITMAP_LEN 128 +#define DOT11_BA_CMP_BITMAP_LEN 8 + +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; + uint16 seqnum; + uint8 bitmap[DOT11_BA_BITMAP_LEN]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 + + +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; + uint16 durid; + struct ether_addr da; + struct ether_addr sa; + struct ether_addr bssid; + uint16 seq; +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_HDR_LEN 24 + + + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 +#define DOT11_BCN_PRB_FIXED_LEN 12 + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; + uint16 seq; + uint16 status; +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; + uint16 listen; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; + uint16 listen; + struct ether_addr ap; +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; + uint16 status; + uint16 aid; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + + +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + uint8 min; + uint8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + + +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; + uint8 len; + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 +#define DOT11_EXT_CH_UPPER 0x01 +#define DOT11_EXT_CH_LOWER 0x03 +#define DOT11_EXT_CH_NONE 0x00 + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_FRMHDR_LEN 2 + + +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; + uint8 len; + uint8 mode; + uint8 channel; + uint8 count; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 + +#define DOT11_CSA_MODE_ADVISORY 0 +#define DOT11_CSA_MODE_NO_TX 1 + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; + dot11_brcm_extch_ie_t extch_ie; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; + uint8 reg; + uint8 channel; + uint8 count; +} BWL_POST_PACKED_STRUCT; + + +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; + uint8 len; + struct dot11_csa_body b; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; +#define DOT11_EXTCAP_LEN 1 +#define DOT11_EXTCAP_LEN_TDLS 5 + +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_TDLS]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; + + +#define TDLS_CAP_TDLS 37 +#define TDLS_CAP_PU_BUFFER_STA 28 +#define TDLS_CAP_PEER_PSM 20 +#define TDLS_CAP_CH_SW 30 +#define TDLS_CAP_PROH 38 +#define TDLS_CAP_CH_SW_PROH 39 + +#define TDLS_CAP_MAX_BIT 39 + + + +#define DOT11_MEASURE_TYPE_BASIC 0 +#define DOT11_MEASURE_TYPE_CCA 1 +#define DOT11_MEASURE_TYPE_RPI 2 +#define DOT11_MEASURE_TYPE_CHLOAD 3 +#define DOT11_MEASURE_TYPE_NOISE 4 +#define DOT11_MEASURE_TYPE_BEACON 5 +#define DOT11_MEASURE_TYPE_FRAME 6 +#define DOT11_MEASURE_TYPE_STATS 7 +#define DOT11_MEASURE_TYPE_LCI 8 +#define DOT11_MEASURE_TYPE_TXSTREAM 9 +#define DOT11_MEASURE_TYPE_PAUSE 255 + + +#define DOT11_MEASURE_MODE_PARALLEL (1<<0) +#define DOT11_MEASURE_MODE_ENABLE (1<<1) +#define DOT11_MEASURE_MODE_REQUEST (1<<2) +#define DOT11_MEASURE_MODE_REPORT (1<<3) +#define DOT11_MEASURE_MODE_DUR (1<<4) + +#define DOT11_MEASURE_MODE_LATE (1<<0) +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) +#define DOT11_MEASURE_MODE_REFUSED (1<<2) + +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 + +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; + + +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; + uint8 period; + uint16 duration; + uint16 offset; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + + +#define WME_OUI "\x00\x50\xf2" +#define WME_OUI_LEN 3 +#define WME_OUI_TYPE 2 +#define WME_VER 1 +#define WME_TYPE 2 +#define WME_SUBTYPE_IE 0 +#define WME_SUBTYPE_PARAM_IE 1 +#define WME_SUBTYPE_TSPEC 2 +#define WME_VERSION_LEN 1 +#define WME_PARAMETER_IE_LEN 24 + + + +#define AC_BE 0 +#define AC_BK 1 +#define AC_VI 2 +#define AC_VO 3 +#define AC_COUNT 4 + +typedef uint8 ac_bitmap_t; + +#define AC_BITMAP_NONE 0x0 +#define AC_BITMAP_ALL 0xf +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + + +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + + +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 + + +#define WME_QI_AP_APSD_MASK 0x80 +#define WME_QI_AP_APSD_SHIFT 7 +#define WME_QI_AP_COUNT_MASK 0x0f +#define WME_QI_AP_COUNT_SHIFT 0 + + +#define WME_QI_STA_MAXSPLEN_MASK 0x60 +#define WME_QI_STA_MAXSPLEN_SHIFT 5 +#define WME_QI_STA_APSD_ALL_MASK 0xf +#define WME_QI_STA_APSD_ALL_SHIFT 0 +#define WME_QI_STA_APSD_BE_MASK 0x8 +#define WME_QI_STA_APSD_BE_SHIFT 3 +#define WME_QI_STA_APSD_BK_MASK 0x4 +#define WME_QI_STA_APSD_BK_SHIFT 2 +#define WME_QI_STA_APSD_VI_MASK 0x2 +#define WME_QI_STA_APSD_VI_SHIFT 1 +#define WME_QI_STA_APSD_VO_MASK 0x1 +#define WME_QI_STA_APSD_VO_SHIFT 0 + + +#define EDCF_AIFSN_MIN 1 +#define EDCF_AIFSN_MAX 15 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_ACM_MASK 0x10 +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_AIFSN_SHIFT 12 + + +#define EDCF_ECW_MIN 0 +#define EDCF_ECW_MAX 15 +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_MASK 0xf0 +#define EDCF_ECWMAX_SHIFT 4 + + +#define EDCF_TXOP_MIN 0 +#define EDCF_TXOP_MAX 65535 +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + + +#define NON_EDCF_AC_BE_ACI_STA 0x02 + + +#define EDCF_AC_BE_ACI_STA 0x03 +#define EDCF_AC_BE_ECW_STA 0xA4 +#define EDCF_AC_BE_TXOP_STA 0x0000 +#define EDCF_AC_BK_ACI_STA 0x27 +#define EDCF_AC_BK_ECW_STA 0xA4 +#define EDCF_AC_BK_TXOP_STA 0x0000 +#define EDCF_AC_VI_ACI_STA 0x42 +#define EDCF_AC_VI_ECW_STA 0x43 +#define EDCF_AC_VI_TXOP_STA 0x005e +#define EDCF_AC_VO_ACI_STA 0x62 +#define EDCF_AC_VO_ECW_STA 0x32 +#define EDCF_AC_VO_TXOP_STA 0x002f + + +#define EDCF_AC_BE_ACI_AP 0x03 +#define EDCF_AC_BE_ECW_AP 0x64 +#define EDCF_AC_BE_TXOP_AP 0x0000 +#define EDCF_AC_BK_ACI_AP 0x27 +#define EDCF_AC_BK_ECW_AP 0xA4 +#define EDCF_AC_BK_TXOP_AP 0x0000 +#define EDCF_AC_VI_ACI_AP 0x41 +#define EDCF_AC_VI_ECW_AP 0x43 +#define EDCF_AC_VI_TXOP_AP 0x005e +#define EDCF_AC_VO_ACI_AP 0x61 +#define EDCF_AC_VO_ECW_AP 0x32 +#define EDCF_AC_VO_TXOP_AP 0x002f + + +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 + + +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; + uint8 length; + uint16 station_count; + uint8 channel_utilization; + uint16 aac; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; + + +#define FIXED_MSDU_SIZE 0x8000 +#define MSDU_SIZE_MASK 0x7fff + + + +#define INTEGER_SHIFT 13 +#define FRACTION_MASK 0x1FFF + + +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 + + +BWL_PRE_PACKED_STRUCT struct ti_ie { + uint8 ti_type; + uint32 ti_val; +} BWL_POST_PACKED_STRUCT; +typedef struct ti_ie ti_ie_t; +#define TI_TYPE_REASSOC_DEADLINE 1 +#define TI_TYPE_KEY_LIFETIME 2 + + +#define WME_ADDTS_REQUEST 0 +#define WME_ADDTS_RESPONSE 1 +#define WME_DELTS_REQUEST 2 + + +#define WME_ADMISSION_ACCEPTED 0 +#define WME_INVALID_PARAMETERS 1 +#define WME_ADMISSION_REFUSED 3 + + +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + + +#define DOT11_OPEN_SYSTEM 0 +#define DOT11_SHARED_KEY 1 +#define DOT11_FAST_BSS 2 +#define DOT11_CHALLENGE_LEN 128 + + +#define FC_PVER_MASK 0x3 +#define FC_PVER_SHIFT 0 +#define FC_TYPE_MASK 0xC +#define FC_TYPE_SHIFT 2 +#define FC_SUBTYPE_MASK 0xF0 +#define FC_SUBTYPE_SHIFT 4 +#define FC_TODS 0x100 +#define FC_TODS_SHIFT 8 +#define FC_FROMDS 0x200 +#define FC_FROMDS_SHIFT 9 +#define FC_MOREFRAG 0x400 +#define FC_MOREFRAG_SHIFT 10 +#define FC_RETRY 0x800 +#define FC_RETRY_SHIFT 11 +#define FC_PM 0x1000 +#define FC_PM_SHIFT 12 +#define FC_MOREDATA 0x2000 +#define FC_MOREDATA_SHIFT 13 +#define FC_WEP 0x4000 +#define FC_WEP_SHIFT 14 +#define FC_ORDER 0x8000 +#define FC_ORDER_SHIFT 15 + + +#define SEQNUM_SHIFT 4 +#define SEQNUM_MAX 0x1000 +#define FRAGNUM_MASK 0xF + + + + +#define FC_TYPE_MNG 0 +#define FC_TYPE_CTL 1 +#define FC_TYPE_DATA 2 + + +#define FC_SUBTYPE_ASSOC_REQ 0 +#define FC_SUBTYPE_ASSOC_RESP 1 +#define FC_SUBTYPE_REASSOC_REQ 2 +#define FC_SUBTYPE_REASSOC_RESP 3 +#define FC_SUBTYPE_PROBE_REQ 4 +#define FC_SUBTYPE_PROBE_RESP 5 +#define FC_SUBTYPE_BEACON 8 +#define FC_SUBTYPE_ATIM 9 +#define FC_SUBTYPE_DISASSOC 10 +#define FC_SUBTYPE_AUTH 11 +#define FC_SUBTYPE_DEAUTH 12 +#define FC_SUBTYPE_ACTION 13 +#define FC_SUBTYPE_ACTION_NOACK 14 + + +#define FC_SUBTYPE_CTL_WRAPPER 7 +#define FC_SUBTYPE_BLOCKACK_REQ 8 +#define FC_SUBTYPE_BLOCKACK 9 +#define FC_SUBTYPE_PS_POLL 10 +#define FC_SUBTYPE_RTS 11 +#define FC_SUBTYPE_CTS 12 +#define FC_SUBTYPE_ACK 13 +#define FC_SUBTYPE_CF_END 14 +#define FC_SUBTYPE_CF_END_ACK 15 + + +#define FC_SUBTYPE_DATA 0 +#define FC_SUBTYPE_DATA_CF_ACK 1 +#define FC_SUBTYPE_DATA_CF_POLL 2 +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 +#define FC_SUBTYPE_NULL 4 +#define FC_SUBTYPE_CF_ACK 5 +#define FC_SUBTYPE_CF_POLL 6 +#define FC_SUBTYPE_CF_ACK_POLL 7 +#define FC_SUBTYPE_QOS_DATA 8 +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 +#define FC_SUBTYPE_QOS_NULL 12 +#define FC_SUBTYPE_QOS_CF_POLL 14 +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 + + +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) + + +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) + +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) + + + + +#define QOS_PRIO_SHIFT 0 +#define QOS_PRIO_MASK 0x0007 +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) + + +#define QOS_TID_SHIFT 0 +#define QOS_TID_MASK 0x000f +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) + + +#define QOS_EOSP_SHIFT 4 +#define QOS_EOSP_MASK 0x0010 +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) + + +#define QOS_ACK_NORMAL_ACK 0 +#define QOS_ACK_NO_ACK 1 +#define QOS_ACK_NO_EXP_ACK 2 +#define QOS_ACK_BLOCK_ACK 3 +#define QOS_ACK_SHIFT 5 +#define QOS_ACK_MASK 0x0060 +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) + + +#define QOS_AMSDU_SHIFT 7 +#define QOS_AMSDU_MASK 0x0080 + + + + + + +#define DOT11_MNG_AUTH_ALGO_LEN 2 +#define DOT11_MNG_AUTH_SEQ_LEN 2 +#define DOT11_MNG_BEACON_INT_LEN 2 +#define DOT11_MNG_CAP_LEN 2 +#define DOT11_MNG_AP_ADDR_LEN 6 +#define DOT11_MNG_LISTEN_INT_LEN 2 +#define DOT11_MNG_REASON_LEN 2 +#define DOT11_MNG_AID_LEN 2 +#define DOT11_MNG_STATUS_LEN 2 +#define DOT11_MNG_TIMESTAMP_LEN 8 + + +#define DOT11_AID_MASK 0x3fff + + +#define DOT11_RC_RESERVED 0 +#define DOT11_RC_UNSPECIFIED 1 +#define DOT11_RC_AUTH_INVAL 2 +#define DOT11_RC_DEAUTH_LEAVING 3 +#define DOT11_RC_INACTIVITY 4 +#define DOT11_RC_BUSY 5 +#define DOT11_RC_INVAL_CLASS_2 6 +#define DOT11_RC_INVAL_CLASS_3 7 +#define DOT11_RC_DISASSOC_LEAVING 8 +#define DOT11_RC_NOT_AUTH 9 +#define DOT11_RC_BAD_PC 10 +#define DOT11_RC_BAD_CHANNELS 11 + + + +#define DOT11_RC_UNSPECIFIED_QOS 32 +#define DOT11_RC_INSUFFCIENT_BW 33 +#define DOT11_RC_EXCESSIVE_FRAMES 34 +#define DOT11_RC_TX_OUTSIDE_TXOP 35 +#define DOT11_RC_LEAVING_QBSS 36 +#define DOT11_RC_BAD_MECHANISM 37 +#define DOT11_RC_SETUP_NEEDED 38 +#define DOT11_RC_TIMEOUT 39 + +#define DOT11_RC_MAX 23 + +#define DOT11_RC_TDLS_PEER_UNREACH 25 +#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 + + +#define DOT11_SC_SUCCESS 0 +#define DOT11_SC_FAILURE 1 +#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 + +#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 +#define DOT11_SC_TDLS_SEC_DISABLED 5 +#define DOT11_SC_LIFETIME_REJ 6 +#define DOT11_SC_NOT_SAME_BSS 7 +#define DOT11_SC_CAP_MISMATCH 10 +#define DOT11_SC_REASSOC_FAIL 11 +#define DOT11_SC_ASSOC_FAIL 12 +#define DOT11_SC_AUTH_MISMATCH 13 +#define DOT11_SC_AUTH_SEQ 14 +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 +#define DOT11_SC_AUTH_TIMEOUT 16 +#define DOT11_SC_ASSOC_BUSY_FAIL 17 +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 +#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26 +#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27 +#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 +#define DOT11_SC_ASSOC_TRY_LATER 30 +#define DOT11_SC_ASSOC_MFP_VIOLATION 31 + +#define DOT11_SC_DECLINED 37 +#define DOT11_SC_INVALID_PARAMS 38 +#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 +#define DOT11_SC_INVALID_AKMP 43 +#define DOT11_SC_INVALID_RSNIE_CAP 45 +#define DOT11_SC_INVALID_PMKID 53 +#define DOT11_SC_INVALID_MDID 54 +#define DOT11_SC_INVALID_FTIE 55 + +#define DOT11_SC_UNEXP_MSG 70 +#define DOT11_SC_INVALID_SNONCE 71 +#define DOT11_SC_INVALID_RSNIE 72 + +#define DOT11_MNG_DS_PARAM_LEN 1 +#define DOT11_MNG_IBSS_PARAM_LEN 2 + + +#define DOT11_MNG_TIM_FIXED_LEN 3 +#define DOT11_MNG_TIM_DTIM_COUNT 0 +#define DOT11_MNG_TIM_DTIM_PERIOD 1 +#define DOT11_MNG_TIM_BITMAP_CTL 2 +#define DOT11_MNG_TIM_PVB 3 + + +#define TLV_TAG_OFF 0 +#define TLV_LEN_OFF 1 +#define TLV_HDR_LEN 2 +#define TLV_BODY_OFF 2 + + +#define DOT11_MNG_SSID_ID 0 +#define DOT11_MNG_RATES_ID 1 +#define DOT11_MNG_FH_PARMS_ID 2 +#define DOT11_MNG_DS_PARMS_ID 3 +#define DOT11_MNG_CF_PARMS_ID 4 +#define DOT11_MNG_TIM_ID 5 +#define DOT11_MNG_IBSS_PARMS_ID 6 +#define DOT11_MNG_COUNTRY_ID 7 +#define DOT11_MNG_HOPPING_PARMS_ID 8 +#define DOT11_MNG_HOPPING_TABLE_ID 9 +#define DOT11_MNG_REQUEST_ID 10 +#define DOT11_MNG_QBSS_LOAD_ID 11 +#define DOT11_MNG_EDCA_PARAM_ID 12 +#define DOT11_MNG_CHALLENGE_ID 16 +#define DOT11_MNG_PWR_CONSTRAINT_ID 32 +#define DOT11_MNG_PWR_CAP_ID 33 +#define DOT11_MNG_TPC_REQUEST_ID 34 +#define DOT11_MNG_TPC_REPORT_ID 35 +#define DOT11_MNG_SUPP_CHANNELS_ID 36 +#define DOT11_MNG_CHANNEL_SWITCH_ID 37 +#define DOT11_MNG_MEASURE_REQUEST_ID 38 +#define DOT11_MNG_MEASURE_REPORT_ID 39 +#define DOT11_MNG_QUIET_ID 40 +#define DOT11_MNG_IBSS_DFS_ID 41 +#define DOT11_MNG_ERP_ID 42 +#define DOT11_MNG_TS_DELAY_ID 43 +#define DOT11_MNG_HT_CAP 45 +#define DOT11_MNG_QOS_CAP_ID 46 +#define DOT11_MNG_NONERP_ID 47 +#define DOT11_MNG_RSN_ID 48 +#define DOT11_MNG_EXT_RATES_ID 50 +#define DOT11_MNG_AP_CHREP_ID 51 +#define DOT11_MNG_NBR_REP_ID 52 +#define DOT11_MNG_MDIE_ID 54 +#define DOT11_MNG_FTIE_ID 55 +#define DOT11_MNG_FT_TI_ID 56 +#define DOT11_MNG_RDE_ID 57 +#define DOT11_MNG_REGCLASS_ID 59 +#define DOT11_MNG_EXT_CSA_ID 60 +#define DOT11_MNG_HT_ADD 61 +#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 + + +#define DOT11_MNG_RRM_CAP_ID 70 +#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 +#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 +#define DOT11_MNG_HT_OBSS_ID 74 +#define DOT11_MNG_CHANNEL_USAGE 97 +#define DOT11_MNG_LINK_IDENTIFIER_ID 101 +#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 +#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 +#define DOT11_MNG_PTI_CONTROL_ID 105 +#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 +#define DOT11_MNG_EXT_CAP_ID 127 +#define DOT11_MNG_WPA_ID 221 +#define DOT11_MNG_PROPR_ID 221 + +#define DOT11_MNG_VS_ID 221 + + +#define DOT11_RATE_BASIC 0x80 +#define DOT11_RATE_MASK 0x7F + + +#define DOT11_MNG_ERP_LEN 1 +#define DOT11_MNG_NONERP_PRESENT 0x01 +#define DOT11_MNG_USE_PROTECTION 0x02 +#define DOT11_MNG_BARKER_PREAMBLE 0x04 + +#define DOT11_MGN_TS_DELAY_LEN 4 +#define TS_DELAY_FIELD_SIZE 4 + + +#define DOT11_CAP_ESS 0x0001 +#define DOT11_CAP_IBSS 0x0002 +#define DOT11_CAP_POLLABLE 0x0004 +#define DOT11_CAP_POLL_RQ 0x0008 +#define DOT11_CAP_PRIVACY 0x0010 +#define DOT11_CAP_SHORT 0x0020 +#define DOT11_CAP_PBCC 0x0040 +#define DOT11_CAP_AGILITY 0x0080 +#define DOT11_CAP_SPECTRUM 0x0100 +#define DOT11_CAP_SHORTSLOT 0x0400 +#define DOT11_CAP_RRM 0x1000 +#define DOT11_CAP_CCK_OFDM 0x2000 + + +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 + + +#define DOT11_ACTION_HDR_LEN 2 +#define DOT11_ACTION_CAT_OFF 0 +#define DOT11_ACTION_ACT_OFF 1 + + +#define DOT11_ACTION_CAT_ERR_MASK 0x80 +#define DOT11_ACTION_CAT_MASK 0x7F +#define DOT11_ACTION_CAT_SPECT_MNG 0 +#define DOT11_ACTION_CAT_QOS 1 +#define DOT11_ACTION_CAT_DLS 2 +#define DOT11_ACTION_CAT_BLOCKACK 3 +#define DOT11_ACTION_CAT_PUBLIC 4 +#define DOT11_ACTION_CAT_RRM 5 +#define DOT11_ACTION_CAT_FBT 6 +#define DOT11_ACTION_CAT_HT 7 +#if defined(MFP) || defined(WLFBT) || defined(WLWNM) +#define DOT11_ACTION_CAT_SA_QUERY 8 +#define DOT11_ACTION_CAT_PDPA 9 +#define DOT11_ACTION_CAT_BSSMGMT 10 +#define DOT11_ACTION_NOTIFICATION 17 +#define DOT11_ACTION_CAT_VSP 126 +#endif +#define DOT11_ACTION_NOTIFICATION 17 +#define DOT11_ACTION_CAT_VS 127 + + +#define DOT11_SM_ACTION_M_REQ 0 +#define DOT11_SM_ACTION_M_REP 1 +#define DOT11_SM_ACTION_TPC_REQ 2 +#define DOT11_SM_ACTION_TPC_REP 3 +#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 +#define DOT11_SM_ACTION_EXT_CSA 5 + + +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 +#define DOT11_ACTION_ID_HT_MIMO_PS 1 + + +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 + + +#define DOT11_BA_ACTION_ADDBA_REQ 0 +#define DOT11_BA_ACTION_ADDBA_RESP 1 +#define DOT11_BA_ACTION_DELBA 2 + + +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 + +#define DOT11_ADDBA_POLICY_DELAYED 0 +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 + + +#define DOT11_FT_ACTION_FT_RESERVED 0 +#define DOT11_FT_ACTION_FT_REQ 1 +#define DOT11_FT_ACTION_FT_RES 2 +#define DOT11_FT_ACTION_FT_CON 3 +#define DOT11_FT_ACTION_FT_ACK 4 + + + +#define DOT11_WNM_ACTION_EVENT_REQ 0 +#define DOT11_WNM_ACTION_EVENT_REP 1 +#define DOT11_WNM_ACTION_DIAG_REQ 2 +#define DOT11_WNM_ACTION_DIAG_REP 3 +#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 +#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 +#define DOT11_WNM_ACTION_BSS_TRANS_QURY 6 +#define DOT11_WNM_ACTION_BSS_TRANS_REQ 7 +#define DOT11_WNM_ACTION_BSS_TRANS_RESP 8 +#define DOT11_WNM_ACTION_FMS_REQ 9 +#define DOT11_WNM_ACTION_FMS_RESP 10 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 +#define DOT11_WNM_ACTION_TFS_REQ 13 +#define DOT11_WNM_ACTION_TFS_RESP 14 +#define DOT11_WNM_ACTION_TFS_NOTIFY 15 +#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 +#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 +#define DOT11_WNM_ACTION_TIM_BCAST_REQ 18 +#define DOT11_WNM_ACTION_TIM_BCAST_RESP 19 +#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 +#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 +#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 +#define DOT11_WNM_ACTION_DMS_REQ 23 +#define DOT11_WNM_ACTION_DMS_RESP 24 +#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 +#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 +#define DOT11_WNM_ACTION_NOTFCTN_RES 27 + + + +BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_query { + uint8 category; + uint8 action; + uint8 token; + uint8 reason; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_trans_query dot11_bss_trans_query_t; +#define DOT11_BSS_TRANS_QUERY_LEN 4 + + +BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_req { + uint8 category; + uint8 action; + uint8 token; + uint8 reqmode; + uint16 disassoc_tmr; + uint8 validity_intrvl; + uint8 data[1]; + +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_trans_req dot11_bss_trans_req_t; +#define DOT11_BSS_TRANS_REQ_LEN 7 + +#define DOT11_BSS_TERM_DUR_LEN 12 + + + +#define DOT11_BSS_TRNS_REQMODE_PREF_LIST_INCL 0x01 +#define DOT11_BSS_TRNS_REQMODE_ABRIDGED 0x02 +#define DOT11_BSS_TRNS_REQMODE_DISASSOC_IMMINENT 0x04 +#define DOT11_BSS_TRNS_REQMODE_BSS_TERM_INCL 0x08 +#define DOT11_BSS_TRNS_REQMODE_ESS_DISASSOC_IMNT 0x10 + + + +BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_res { + uint8 category; + uint8 action; + uint8 token; + uint8 status; + uint8 term_delay; + uint8 data[1]; + +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_trans_res dot11_bss_trans_res_t; +#define DOT11_BSS_TRANS_RES_LEN 5 + + +#define DOT11_BSS_TRNS_RES_STATUS_ACCEPT 0 +#define DOT11_BSS_TRNS_RES_STATUS_REJECT 1 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_BCN 2 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_CAP 3 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_UNDESIRED 4 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_DELAY_REQ 5 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_BSS_LIST_PROVIDED 6 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_NO_SUITABLE_BSS 7 +#define DOT11_BSS_TRNS_RES_STATUS_REJ_LEAVING_ESS 8 + + + +#define DOT11_NBR_RPRT_BSSID_INFO_REACHABILTY 0x0003 +#define DOT11_NBR_RPRT_BSSID_INFO_SEC 0x0004 +#define DOT11_NBR_RPRT_BSSID_INFO_KEY_SCOPE 0x0008 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP 0x03f0 + +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_SPEC_MGMT 0x0010 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_QOS 0x0020 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_APSD 0x0040 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_RDIO_MSMT 0x0080 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_DEL_BA 0x0100 +#define DOT11_NBR_RPRT_BSSID_INFO_CAP_IMM_BA 0x0200 + + +#define DOT11_NBR_RPRT_SUBELEM_BSS_CANDDT_PREF_ID 3 +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; + uint8 action; + uint8 token; + uint16 addba_param_set; + uint16 timeout; + uint16 start_seqnum; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; + uint8 action; + uint8 token; + uint16 status; + uint16 addba_param_set; + uint16 timeout; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 + + +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 +#define DOT11_DELBA_PARAM_TID_SHIFT 12 + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; + uint8 action; + uint16 delba_param_set; + uint16 reason; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 + + +#define SA_QUERY_REQUEST 0 +#define SA_QUERY_RESPONSE 1 + + + + +BWL_PRE_PACKED_STRUCT struct dot11_ft_req { + uint8 category; + uint8 action; + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_req dot11_ft_req_t; +#define DOT11_FT_REQ_FIXED_LEN 14 + + +BWL_PRE_PACKED_STRUCT struct dot11_ft_res { + uint8 category; + uint8 action; + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint16 status; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_res dot11_ft_res_t; +#define DOT11_FT_RES_FIXED_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct dot11_rde_ie { + uint8 id; + uint8 length; + uint8 rde_id; + uint8 rd_count; + uint16 status; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rde_ie dot11_rde_ie_t; + + +#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t) + + + + + +#define DOT11_RRM_CAP_LEN 5 +BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { + uint8 cap[DOT11_RRM_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; + + +#define DOT11_RRM_CAP_LINK 0 +#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 +#define DOT11_RRM_CAP_PARALLEL 2 +#define DOT11_RRM_CAP_REPEATED 3 +#define DOT11_RRM_CAP_BCN_PASSIVE 4 +#define DOT11_RRM_CAP_BCN_ACTIVE 5 +#define DOT11_RRM_CAP_BCN_TABLE 6 +#define DOT11_RRM_CAP_BCN_REP_COND 7 +#define DOT11_RRM_CAP_AP_CHANREP 16 + + + +#define DOT11_EXT_CAP_LEN 4 +BWL_PRE_PACKED_STRUCT struct dot11_ext_cap_ie { + uint8 cap[DOT11_EXT_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_cap_ie dot11_ext_cap_ie_t; + + +#define DOT11_EXT_CAP_BSS_TRANSITION_MGMT 19 + + +#define DOT11_OP_CLASS_NONE 255 + +BWL_PRE_PACKED_STRUCT struct do11_ap_chrep { + uint8 id; + uint8 len; + uint8 reg; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct do11_ap_chrep dot11_ap_chrep_t; + + +#define DOT11_RM_ACTION_RM_REQ 0 +#define DOT11_RM_ACTION_RM_REP 1 +#define DOT11_RM_ACTION_LM_REQ 2 +#define DOT11_RM_ACTION_LM_REP 3 +#define DOT11_RM_ACTION_NR_REQ 4 +#define DOT11_RM_ACTION_NR_REP 5 + + +BWL_PRE_PACKED_STRUCT struct dot11_rm_action { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_action dot11_rm_action_t; +#define DOT11_RM_ACTION_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq { + uint8 category; + uint8 action; + uint8 token; + uint16 reps; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq dot11_rmreq_t; +#define DOT11_RMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_ie dot11_rm_ie_t; +#define DOT11_RM_IE_LEN 5 + + +#define DOT11_RMREQ_MODE_PARALLEL 1 +#define DOT11_RMREQ_MODE_ENABLE 2 +#define DOT11_RMREQ_MODE_REQUEST 4 +#define DOT11_RMREQ_MODE_REPORT 8 +#define DOT11_RMREQ_MODE_DURMAND 0x10 + + +#define DOT11_RMREP_MODE_LATE 1 +#define DOT11_RMREP_MODE_INCAPABLE 2 +#define DOT11_RMREP_MODE_REFUSED 4 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 bcn_mode; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; +#define DOT11_RMREQ_BCN_LEN 18 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 frame_info; + uint8 rcpi; + uint8 rsni; + struct ether_addr bssid; + uint8 antenna_id; + uint32 parent_tsf; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; +#define DOT11_RMREP_BCN_LEN 26 + + +#define DOT11_RMREQ_BCN_PASSIVE 0 +#define DOT11_RMREQ_BCN_ACTIVE 1 +#define DOT11_RMREQ_BCN_TABLE 2 + + +#define DOT11_RMREQ_BCN_SSID_ID 0 +#define DOT11_RMREQ_BCN_REPINFO_ID 1 +#define DOT11_RMREQ_BCN_REPDET_ID 2 +#define DOT11_RMREQ_BCN_REQUEST_ID 10 +#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID + + +#define DOT11_RMREQ_BCN_REPDET_FIXED 0 +#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 +#define DOT11_RMREQ_BCN_REPDET_ALL 2 + + +#define DOT11_RMREP_BCN_FRM_BODY 1 + + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr { + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uchar sub_elements[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t; +#define DOT11_RMREP_NBR_LEN 13 + + +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 +#define DOT11_BSSTYPE_INDEPENDENT 1 +#define DOT11_BSSTYPE_ANY 2 +#define DOT11_SCANTYPE_ACTIVE 0 +#define DOT11_SCANTYPE_PASSIVE 1 + + +BWL_PRE_PACKED_STRUCT struct dot11_lmreq { + uint8 category; + uint8 action; + uint8 token; + uint8 txpwr; + uint8 maxtxpwr; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmreq dot11_lmreq_t; +#define DOT11_LMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_lmrep { + uint8 category; + uint8 action; + uint8 token; + dot11_tpc_rep_t tpc; + uint8 rxant; + uint8 txant; + uint8 rcpi; + uint8 rsni; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmrep dot11_lmrep_t; +#define DOT11_LMREP_LEN 11 + + +#define PREN_PREAMBLE 24 +#define PREN_MM_EXT 12 +#define PREN_PREAMBLE_EXT 4 + + +#define RIFS_11N_TIME 2 + + + +#define HT_SIG1_MCS_MASK 0x00007F +#define HT_SIG1_CBW 0x000080 +#define HT_SIG1_HT_LENGTH 0xFFFF00 + + +#define HT_SIG2_SMOOTHING 0x000001 +#define HT_SIG2_NOT_SOUNDING 0x000002 +#define HT_SIG2_RESERVED 0x000004 +#define HT_SIG2_AGGREGATION 0x000008 +#define HT_SIG2_STBC_MASK 0x000030 +#define HT_SIG2_STBC_SHIFT 4 +#define HT_SIG2_FEC_CODING 0x000040 +#define HT_SIG2_SHORT_GI 0x000080 +#define HT_SIG2_ESS_MASK 0x000300 +#define HT_SIG2_ESS_SHIFT 8 +#define HT_SIG2_CRC 0x03FC00 +#define HT_SIG2_TAIL 0x1C0000 + + +#define APHY_SLOT_TIME 9 +#define APHY_SIFS_TIME 16 +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) +#define APHY_PREAMBLE_TIME 16 +#define APHY_SIGNAL_TIME 4 +#define APHY_SYMBOL_TIME 4 +#define APHY_SERVICE_NBITS 16 +#define APHY_TAIL_NBITS 6 +#define APHY_CWMIN 15 + + +#define BPHY_SLOT_TIME 20 +#define BPHY_SIFS_TIME 10 +#define BPHY_DIFS_TIME 50 +#define BPHY_PLCP_TIME 192 +#define BPHY_PLCP_SHORT_TIME 96 +#define BPHY_CWMIN 31 + + +#define DOT11_OFDM_SIGNAL_EXTENSION 6 + +#define PHY_CWMAX 1023 + +#define DOT11_MAXNUMFRAGS 16 + + +typedef struct d11cnt { + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 txretry; + uint32 txretrie; + uint32 rxdup; + uint32 txrts; + uint32 txnocts; + uint32 txnoack; + uint32 rxfrag; + uint32 rxmulti; + uint32 rxcrc; + uint32 txfrmsnt; + uint32 rxundec; +} d11cnt_t; + + +#define BRCM_PROP_OUI "\x00\x90\x4C" + + + +#define BRCM_OUI "\x00\x10\x18" + + +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 ver; + uint8 assoc; + uint8 flags; + uint8 flags1; + uint16 amsdu_mtu_pref; +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 11 +#define BRCM_IE_VER 2 +#define BRCM_IE_LEGACY_AES_VER 1 + + +#ifdef WLAFTERBURNER +#define BRF_ABCAP 0x1 +#define BRF_ABRQRD 0x2 +#define BRF_ABCOUNTER_MASK 0xf0 +#define BRF_ABCOUNTER_SHIFT 4 +#endif +#define BRF_LZWDS 0x4 +#define BRF_BLOCKACK 0x8 + + +#define BRF1_AMSDU 0x1 +#define BRF1_WMEPS 0x4 +#define BRF1_PSOFIX 0x8 +#define BRF1_RX_LARGE_AGG 0x10 +#define BRF1_SOFTAP 0x40 + +#ifdef WLAFTERBURNER +#define AB_WDS_TIMEOUT_MAX 15 +#define AB_WDS_TIMEOUT_MIN 1 +#endif + +#define AB_GUARDCOUNT 10 + + +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2 +#define VNDR_IE_MIN_LEN 3 +#define VNDR_IE_MAX_LEN 256 + + +#define MCSSET_LEN 16 +#define MAX_MCS_NUM (128) + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + + + +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; + +#define HT_PROP_IE_OVERHEAD 4 +#define HT_CAP_IE_LEN 26 +#define HT_CAP_IE_TYPE 51 + +#define HT_CAP_LDPC_CODING 0x0001 +#define HT_CAP_40MHZ 0x0002 +#define HT_CAP_MIMO_PS_MASK 0x000C +#define HT_CAP_MIMO_PS_SHIFT 0x0002 +#define HT_CAP_MIMO_PS_OFF 0x0003 +#define HT_CAP_MIMO_PS_RTS 0x0001 +#define HT_CAP_MIMO_PS_ON 0x0000 +#define HT_CAP_GF 0x0010 +#define HT_CAP_SHORT_GI_20 0x0020 +#define HT_CAP_SHORT_GI_40 0x0040 +#define HT_CAP_TX_STBC 0x0080 +#define HT_CAP_RX_STBC_MASK 0x0300 +#define HT_CAP_RX_STBC_SHIFT 8 +#define HT_CAP_DELAYED_BA 0x0400 +#define HT_CAP_MAX_AMSDU 0x0800 +#define HT_CAP_DSSS_CCK 0x1000 +#define HT_CAP_PSMP 0x2000 +#define HT_CAP_40MHZ_INTOLERANT 0x4000 +#define HT_CAP_LSIG_TXOP 0x8000 + +#define HT_CAP_RX_STBC_NO 0x0 +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 + +#define HT_MAX_AMSDU 7935 +#define HT_MIN_AMSDU 3835 + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 +#define HT_PARAMS_DENSITY_MASK 0x1C +#define HT_PARAMS_DENSITY_SHIFT 2 + + +#define AMPDU_MAX_MPDU_DENSITY 7 +#define AMPDU_RX_FACTOR_8K 0 +#define AMPDU_RX_FACTOR_16K 1 +#define AMPDU_RX_FACTOR_32K 2 +#define AMPDU_RX_FACTOR_64K 3 +#define AMPDU_RX_FACTOR_BASE 8*1024 + +#define AMPDU_DELIMITER_LEN 4 +#define AMPDU_DELIMITER_LEN_MAX 63 + +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; + uint8 byte1; + uint16 opmode; + uint16 misc_bits; + uint8 basic_mcs[MCSSET_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + + + +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 +#define HT_ADD_IE_TYPE 52 + + +#define HT_BW_ANY 0x04 +#define HT_RIFS_PERMITTED 0x08 + + +#define HT_OPMODE_MASK 0x0003 +#define HT_OPMODE_SHIFT 0 +#define HT_OPMODE_PURE 0x0000 +#define HT_OPMODE_OPTIONAL 0x0001 +#define HT_OPMODE_HT20IN40 0x0002 +#define HT_OPMODE_MIXED 0x0003 +#define HT_OPMODE_NONGF 0x0004 +#define DOT11N_TXBURST 0x0008 +#define DOT11N_OBSS_NONHT 0x0010 + + +#define HT_BASIC_STBC_MCS 0x007f +#define HT_DUAL_STBC_PROT 0x0080 +#define HT_SECOND_BCN 0x0100 +#define HT_LSIG_TXOP 0x0200 +#define HT_PCO_ACTIVE 0x0400 +#define HT_PCO_PHASE 0x0800 + + +#define DOT11N_2G_TXBURST_LIMIT 6160 +#define DOT11N_5G_TXBURST_LIMIT 3080 + + +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) + + +#define HT_CTRL_LA_TRQ 0x00000002 +#define HT_CTRL_LA_MAI 0x0000003C +#define HT_CTRL_LA_MAI_SHIFT 2 +#define HT_CTRL_LA_MAI_MRQ 0x00000004 +#define HT_CTRL_LA_MAI_MSI 0x00000038 +#define HT_CTRL_LA_MFSI 0x000001C0 +#define HT_CTRL_LA_MFSI_SHIFT 6 +#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 +#define HT_CTRL_LA_MFB_ASELC_SH 9 +#define HT_CTRL_LA_ASELC_CMD 0x00000C00 +#define HT_CTRL_LA_ASELC_DATA 0x0000F000 +#define HT_CTRL_CAL_POS 0x00030000 +#define HT_CTRL_CAL_SEQ 0x000C0000 +#define HT_CTRL_CSI_STEERING 0x00C00000 +#define HT_CTRL_CSI_STEER_SHIFT 22 +#define HT_CTRL_CSI_STEER_NFB 0 +#define HT_CTRL_CSI_STEER_CSI 1 +#define HT_CTRL_CSI_STEER_NCOM 2 +#define HT_CTRL_CSI_STEER_COM 3 +#define HT_CTRL_NDP_ANNOUNCE 0x01000000 +#define HT_CTRL_AC_CONSTRAINT 0x40000000 +#define HT_CTRL_RDG_MOREPPDU 0x80000000 + +#define HT_OPMODE_OPTIONAL 0x0001 +#define HT_OPMODE_HT20IN40 0x0002 +#define HT_OPMODE_MIXED 0x0003 +#define HT_OPMODE_NONGF 0x0004 +#define DOT11N_TXBURST 0x0008 +#define DOT11N_OBSS_NONHT 0x0010 + + + +#define WPA_OUI "\x00\x50\xF2" +#define WPA_OUI_LEN 3 +#define WPA_OUI_TYPE 1 +#define WPA_VERSION 1 +#define WPA2_OUI "\x00\x0F\xAC" +#define WPA2_OUI_LEN 3 +#define WPA2_VERSION 1 +#define WPA2_VERSION_LEN 2 + + +#define WPS_OUI "\x00\x50\xF2" +#define WPS_OUI_LEN 3 +#define WPS_OUI_TYPE 4 + + +#define WFA_OUI "\x50\x6F\x9A" +#define WFA_OUI_LEN 3 + +#define WFA_OUI_TYPE_WPA 1 +#define WFA_OUI_TYPE_WPS 4 +#define WFA_OUI_TYPE_TPC 8 +#define WFA_OUI_TYPE_P2P 9 + + +#define RSN_AKM_NONE 0 +#define RSN_AKM_UNSPECIFIED 1 +#define RSN_AKM_PSK 2 +#define RSN_AKM_FBT_1X 3 +#define RSN_AKM_FBT_PSK 4 +#define RSN_AKM_MFP_1X 5 +#define RSN_AKM_MFP_PSK 6 +#define RSN_AKM_TPK 7 + + +#define DOT11_MAX_DEFAULT_KEYS 4 +#define DOT11_MAX_KEY_SIZE 32 +#define DOT11_MAX_IV_SIZE 16 +#define DOT11_EXT_IV_FLAG (1<<5) +#define DOT11_WPA_KEY_RSC_LEN 8 + +#define WEP1_KEY_SIZE 5 +#define WEP1_KEY_HEX_SIZE 10 +#define WEP128_KEY_SIZE 13 +#define WEP128_KEY_HEX_SIZE 26 +#define TKIP_MIC_SIZE 8 +#define TKIP_EOM_SIZE 7 +#define TKIP_EOM_FLAG 0x5a +#define TKIP_KEY_SIZE 32 +#define TKIP_MIC_AUTH_TX 16 +#define TKIP_MIC_AUTH_RX 24 +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX +#define AES_KEY_SIZE 16 +#define AES_MIC_SIZE 8 + + +#define WCN_OUI "\x00\x50\xf2" +#define WCN_TYPE 4 + + + + + +BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { + uint8 id; + uint8 len; + uint16 mdid; + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mdid_ie dot11_mdid_ie_t; + +#define FBT_MDID_CAP_OVERDS 0x01 +#define FBT_MDID_CAP_RRP 0x02 + + +BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { + uint8 id; + uint8 len; + uint16 mic_control; + uint8 mic[16]; + uint8 anonce[32]; + uint8 snonce[32]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_ie dot11_ft_ie_t; + + +BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { + uint8 id; + uint8 len; + uint16 key_info; + uint8 key_len; + uint8 rsc[8]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_gtk_ie dot11_gtk_ie_t; + +#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" +#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" + + + +BWL_PRE_PACKED_STRUCT struct link_id_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + struct ether_addr tdls_init_mac; + struct ether_addr tdls_resp_mac; +} BWL_POST_PACKED_STRUCT; +typedef struct link_id_ie link_id_ie_t; +#define TDLS_LINK_ID_IE_LEN 18 + + +BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { + uint8 id; + uint8 len; + uint32 offset; + uint32 interval; + uint32 awake_win_slots; + uint32 max_wake_win; + uint16 idle_cnt; +} BWL_POST_PACKED_STRUCT; +typedef struct wakeup_sch_ie wakeup_sch_ie_t; +#define TDLS_WAKEUP_SCH_IE_LEN 18 + + +BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { + uint8 id; + uint8 len; + uint16 switch_time; + uint16 switch_timeout; +} BWL_POST_PACKED_STRUCT; +typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; +#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 + + +BWL_PRE_PACKED_STRUCT struct pti_control_ie { + uint8 id; + uint8 len; + uint8 tid; + uint16 seq_control; +} BWL_POST_PACKED_STRUCT; +typedef struct pti_control_ie pti_control_ie_t; +#define TDLS_PTI_CONTROL_IE_LEN 3 + + +BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { + uint8 id; + uint8 len; + uint8 status; +} BWL_POST_PACKED_STRUCT; +typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; +#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 +#define TDLS_PU_BUFFER_STATUS_AC_BK 1 +#define TDLS_PU_BUFFER_STATUS_AC_BE 2 +#define TDLS_PU_BUFFER_STATUS_AC_VI 4 +#define TDLS_PU_BUFFER_STATUS_AC_VO 8 + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h new file mode 100644 index 0000000000000..cbdd05e624bcc --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h @@ -0,0 +1,45 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer) + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: 802.11_bta.h 277737 2011-08-16 17:54:59Z $ +*/ + +#ifndef _802_11_BTA_H_ +#define _802_11_BTA_H_ + +#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58" + +/* BT-AMP 802.11 PAL Protocols */ +#define BTA_PROT_L2CAP 1 +#define BTA_PROT_ACTIVITY_REPORT 2 +#define BTA_PROT_SECURITY 3 +#define BTA_PROT_LINK_SUPERVISION_REQUEST 4 +#define BTA_PROT_LINK_SUPERVISION_REPLY 5 + +/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */ +#define BTA_TYPE_ID_MAC_ADDRESS 1 +#define BTA_TYPE_ID_PREFERRED_CHANNELS 2 +#define BTA_TYPE_ID_CONNECTED_CHANNELS 3 +#define BTA_TYPE_ID_CAPABILITIES 4 +#define BTA_TYPE_ID_VERSION 5 +#endif /* _802_11_bta_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h new file mode 100644 index 0000000000000..0e070a475b64c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h @@ -0,0 +1,131 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: 802.11e.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 300 /* default ADDTS response timeout in ms */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11e_CAC_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h new file mode 100644 index 0000000000000..c7e07bd5e7c3f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental types and constants relating to 802.1D + * + * $Id: 802.1d.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _802_1_D_ +#define _802_1_D_ + + +#define PRIO_8021D_NONE 2 +#define PRIO_8021D_BK 1 +#define PRIO_8021D_BE 0 +#define PRIO_8021D_EE 3 +#define PRIO_8021D_CL 4 +#define PRIO_8021D_VI 5 +#define PRIO_8021D_VO 6 +#define PRIO_8021D_NC 7 +#define MAXPRIO 7 +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 + + +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h new file mode 100644 index 0000000000000..0f75d3c8f1d60 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h @@ -0,0 +1,83 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bcmeth.h 277737 2011-08-16 17:54:59Z $ + */ + + + + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +#include + + + + + + + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 + + +#define BCMILCP_BCM_SUBTYPE_DPT 4 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 + + +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; + uint16 length; + uint8 version; + uint8 oui[3]; + + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h new file mode 100644 index 0000000000000..e8c2387dd10bb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h @@ -0,0 +1,317 @@ +/* + * Broadcom Event protocol definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Dependencies: proto/bcmeth.h + * + * $Id: bcmevent.h 288077 2011-10-06 00:08:47Z $ + * + */ + + + + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +#include + +#define BCM_EVENT_MSG_VERSION 2 +#define BCM_MSG_IFNAME_MAX 16 + + +#define WLC_EVENT_MSG_LINK 0x01 +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 +#define WLC_EVENT_MSG_GROUP 0x04 +#define WLC_EVENT_MSG_UNKBSS 0x08 +#define WLC_EVENT_MSG_UNKIF 0x10 + + + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; + uint32 event_type; + uint32 status; + uint32 reason; + uint32 auth_type; + uint32 datalen; + struct ether_addr addr; + char ifname[BCM_MSG_IFNAME_MAX]; +} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; + uint32 event_type; + uint32 status; + uint32 reason; + uint32 auth_type; + uint32 datalen; + struct ether_addr addr; + char ifname[BCM_MSG_IFNAME_MAX]; + uint8 ifidx; + uint8 bsscfgidx; +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + + +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + +} BWL_POST_PACKED_STRUCT bcm_event_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + + +#define WLC_E_SET_SSID 0 +#define WLC_E_JOIN 1 +#define WLC_E_START 2 +#define WLC_E_AUTH 3 +#define WLC_E_AUTH_IND 4 +#define WLC_E_DEAUTH 5 +#define WLC_E_DEAUTH_IND 6 +#define WLC_E_ASSOC 7 +#define WLC_E_ASSOC_IND 8 +#define WLC_E_REASSOC 9 +#define WLC_E_REASSOC_IND 10 +#define WLC_E_DISASSOC 11 +#define WLC_E_DISASSOC_IND 12 +#define WLC_E_QUIET_START 13 +#define WLC_E_QUIET_END 14 +#define WLC_E_BEACON_RX 15 +#define WLC_E_LINK 16 +#define WLC_E_MIC_ERROR 17 +#define WLC_E_NDIS_LINK 18 +#define WLC_E_ROAM 19 +#define WLC_E_TXFAIL 20 +#define WLC_E_PMKID_CACHE 21 +#define WLC_E_RETROGRADE_TSF 22 +#define WLC_E_PRUNE 23 +#define WLC_E_AUTOAUTH 24 +#define WLC_E_EAPOL_MSG 25 +#define WLC_E_SCAN_COMPLETE 26 +#define WLC_E_ADDTS_IND 27 +#define WLC_E_DELTS_IND 28 +#define WLC_E_BCNSENT_IND 29 +#define WLC_E_BCNRX_MSG 30 +#define WLC_E_BCNLOST_MSG 31 +#define WLC_E_ROAM_PREP 32 +#define WLC_E_PFN_NET_FOUND 33 +#define WLC_E_PFN_NET_LOST 34 +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 +#define WLC_E_PROBREQ_MSG 44 +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 +#define WLC_E_ICV_ERROR 49 +#define WLC_E_UNICAST_DECODE_ERROR 50 +#define WLC_E_MULTICAST_DECODE_ERROR 51 +#define WLC_E_TRACE 52 +#define WLC_E_BTA_HCI_EVENT 53 +#define WLC_E_IF 54 +#ifdef WLP2P +#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 +#endif +#define WLC_E_RSSI 56 +#define WLC_E_PFN_SCAN_COMPLETE 57 +#define WLC_E_EXTLOG_MSG 58 +#define WLC_E_ACTION_FRAME 59 +#define WLC_E_ACTION_FRAME_COMPLETE 60 +#define WLC_E_PRE_ASSOC_IND 61 +#define WLC_E_PRE_REASSOC_IND 62 +#define WLC_E_CHANNEL_ADOPTED 63 +#define WLC_E_AP_STARTED 64 +#define WLC_E_DFS_AP_STOP 65 +#define WLC_E_DFS_AP_RESUME 66 +#define WLC_E_WAI_STA_EVENT 67 +#define WLC_E_WAI_MSG 68 +#define WLC_E_ESCAN_RESULT 69 +#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 +#if defined(WLP2P) +#define WLC_E_PROBRESP_MSG 71 +#define WLC_E_P2P_PROBREQ_MSG 72 +#endif +#define WLC_E_DCS_REQUEST 73 + +#define WLC_E_FIFO_CREDIT_MAP 74 + +#define WLC_E_ACTION_FRAME_RX 75 +#define WLC_E_WAKE_EVENT 76 +#define WLC_E_RM_COMPLETE 77 +#define WLC_E_HTSFSYNC 78 +#define WLC_E_OVERLAY_REQ 79 +#define WLC_E_CSA_COMPLETE_IND 80 +#define WLC_E_EXCESS_PM_WAKE_EVENT 81 +#define WLC_E_PFN_SCAN_NONE 82 +#define WLC_E_PFN_SCAN_ALLGONE 83 +#define WLC_E_GTK_PLUMBED 84 +#define WLC_E_ASSOC_REQ_IE 85 +#define WLC_E_ASSOC_RESP_IE 86 +#define WLC_E_LAST 87 + + + +typedef struct { + uint event; + const char *name; +} bcmevent_name_t; + +extern const bcmevent_name_t bcmevent_names[]; +extern const int bcmevent_names_size; + + +#define WLC_E_STATUS_SUCCESS 0 +#define WLC_E_STATUS_FAIL 1 +#define WLC_E_STATUS_TIMEOUT 2 +#define WLC_E_STATUS_NO_NETWORKS 3 +#define WLC_E_STATUS_ABORT 4 +#define WLC_E_STATUS_NO_ACK 5 +#define WLC_E_STATUS_UNSOLICITED 6 +#define WLC_E_STATUS_ATTEMPT 7 +#define WLC_E_STATUS_PARTIAL 8 +#define WLC_E_STATUS_NEWSCAN 9 +#define WLC_E_STATUS_NEWASSOC 10 +#define WLC_E_STATUS_11HQUIET 11 +#define WLC_E_STATUS_SUPPRESS 12 +#define WLC_E_STATUS_NOCHANS 13 +#define WLC_E_STATUS_CS_ABORT 15 +#define WLC_E_STATUS_ERROR 16 + + +#define WLC_E_REASON_INITIAL_ASSOC 0 +#define WLC_E_REASON_LOW_RSSI 1 +#define WLC_E_REASON_DEAUTH 2 +#define WLC_E_REASON_DISASSOC 3 +#define WLC_E_REASON_BCNS_LOST 4 +#define WLC_E_REASON_MINTXRATE 9 +#define WLC_E_REASON_TXFAIL 10 + + +#define WLC_E_REASON_FAST_ROAM_FAILED 5 +#define WLC_E_REASON_DIRECTED_ROAM 6 +#define WLC_E_REASON_TSPEC_REJECTED 7 +#define WLC_E_REASON_BETTER_AP 8 + +#define WLC_E_REASON_REQUESTED_ROAM 11 + + +#define WLC_E_PRUNE_ENCR_MISMATCH 1 +#define WLC_E_PRUNE_BCAST_BSSID 2 +#define WLC_E_PRUNE_MAC_DENY 3 +#define WLC_E_PRUNE_MAC_NA 4 +#define WLC_E_PRUNE_REG_PASSV 5 +#define WLC_E_PRUNE_SPCT_MGMT 6 +#define WLC_E_PRUNE_RADAR 7 +#define WLC_E_RSN_MISMATCH 8 +#define WLC_E_PRUNE_NO_COMMON_RATES 9 +#define WLC_E_PRUNE_BASIC_RATES 10 +#define WLC_E_PRUNE_CIPHER_NA 12 +#define WLC_E_PRUNE_KNOWN_STA 13 +#define WLC_E_PRUNE_WDS_PEER 15 +#define WLC_E_PRUNE_QBSS_LOAD 16 +#define WLC_E_PRUNE_HOME_AP 17 + + +#define WLC_E_SUP_OTHER 0 +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 +#define WLC_E_SUP_BAD_UCAST_WEP128 2 +#define WLC_E_SUP_BAD_UCAST_WEP40 3 +#define WLC_E_SUP_UNSUP_KEY_LEN 4 +#define WLC_E_SUP_PW_KEY_CIPHER 5 +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 +#define WLC_E_SUP_NO_INSTALL_FLAG 8 +#define WLC_E_SUP_MSG3_NO_GTK 9 +#define WLC_E_SUP_GRP_KEY_CIPHER 10 +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 +#define WLC_E_SUP_SEND_FAIL 13 +#define WLC_E_SUP_DEAUTH 14 +#define WLC_E_SUP_WPA_PSK_TMO 15 + + + +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data { + uint16 version; + uint16 channel; + int32 rssi; + uint32 mactime; + uint32 rate; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t; + +#define BCM_RX_FRAME_DATA_VERSION 1 + + +typedef struct wl_event_data_if { + uint8 ifidx; + uint8 opcode; + uint8 reserved; + uint8 bssidx; + uint8 role; +} wl_event_data_if_t; + + +#define WLC_E_IF_ADD 1 +#define WLC_E_IF_DEL 2 +#define WLC_E_IF_CHANGE 3 + + +#define WLC_E_IF_ROLE_STA 0 +#define WLC_E_IF_ROLE_AP 1 +#define WLC_E_IF_ROLE_WDS 2 +#define WLC_E_IF_ROLE_P2P_GO 3 +#define WLC_E_IF_ROLE_P2P_CLIENT 4 +#define WLC_E_IF_ROLE_BTA_CREATOR 5 +#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6 + + +#define WLC_E_LINK_BCN_LOSS 1 +#define WLC_E_LINK_DISASSOC 2 +#define WLC_E_LINK_ASSOC_REC 3 +#define WLC_E_LINK_BSSCFG_DIS 4 + + +#define WLC_E_OVL_DOWNLOAD 0 +#define WLC_E_OVL_UPDATE_IND 1 + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h new file mode 100644 index 0000000000000..55eff247c492e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h @@ -0,0 +1,154 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental constants relating to IP Protocol + * + * $Id: bcmip.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +#include + + + +#define IP_VER_OFFSET 0x0 +#define IP_VER_MASK 0xf0 +#define IP_VER_SHIFT 4 +#define IP_VER_4 4 +#define IP_VER_6 6 + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 +#define IP_PROT_TCP 0x6 +#define IP_PROT_UDP 0x11 + + +#define IPV4_VER_HL_OFFSET 0 +#define IPV4_TOS_OFFSET 1 +#define IPV4_PKTLEN_OFFSET 2 +#define IPV4_PKTFLAG_OFFSET 6 +#define IPV4_PROT_OFFSET 9 +#define IPV4_CHKSUM_OFFSET 10 +#define IPV4_SRC_IP_OFFSET 12 +#define IPV4_DEST_IP_OFFSET 16 +#define IPV4_OPTIONS_OFFSET 20 + + +#define IPV4_VER_MASK 0xf0 +#define IPV4_VER_SHIFT 4 + +#define IPV4_HLEN_MASK 0x0f +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_ADDR_LEN 4 + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc +#define IPV4_TOS_DSCP_SHIFT 2 + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 +#define IPV4_TOS_PREC_SHIFT 5 + +#define IPV4_TOS_LOWDELAY 0x10 +#define IPV4_TOS_THROUGHPUT 0x8 +#define IPV4_TOS_RELIABILITY 0x4 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 +#define IPV4_FRAG_DONT 0x4000 +#define IPV4_FRAG_MORE 0x2000 +#define IPV4_FRAG_OFFSET_MASK 0x1fff + +#define IPV4_ADDR_STR_LEN 16 + + +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; + uint8 tos; + uint16 tot_len; + uint16 id; + uint16 frag; + uint8 ttl; + uint8 prot; + uint16 hdr_chksum; + uint8 src_ip[IPV4_ADDR_LEN]; + uint8 dst_ip[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + + +#define IPV6_PAYLOAD_LEN_OFFSET 4 +#define IPV6_NEXT_HDR_OFFSET 6 +#define IPV6_HOP_LIMIT_OFFSET 7 +#define IPV6_SRC_IP_OFFSET 8 +#define IPV6_DEST_IP_OFFSET 24 + + +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 + + +#define IP_TOS46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h new file mode 100644 index 0000000000000..91ab4fe538f21 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h @@ -0,0 +1,442 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface) + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: bt_amp_hci.h 277737 2011-08-16 17:54:59Z $ +*/ + +#ifndef _bt_amp_hci_h +#define _bt_amp_hci_h + +/* This marks the start of a packed structure section. */ +#include + + +/* AMP HCI CMD packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd { + uint16 opcode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_cmd_t; + +#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms) +#define HCI_CMD_DATA_SIZE 255 + +/* AMP HCI CMD opcode layout */ +#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF)) +#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F)) +#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF) + +/* AMP HCI command opcodes */ +#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001) +#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002) +#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003) +#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009) +#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A) +#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B) +#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035) +#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036) +#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037) +#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038) +#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039) +#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A) +#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B) +#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C) +#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067) +#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069) +#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A) +#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B) +#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003) +#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015) +#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016) +#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036) +#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037) +#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F) +#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061) +#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062) +#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063) +#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064) +#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065) +#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001) +#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002) +#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005) +#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A) + +/* AMP HCI command parameters */ +typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms { + uint8 plh; + uint8 offset[2]; /* length so far */ + uint8 max_remote[2]; +} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms { + uint8 plh; + uint8 offset[2]; + uint8 len[2]; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms { + uint8 plh; + uint8 key_length; + uint8 key_type; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms { + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms { + uint8 plh; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec { + uint8 id; + uint8 service_type; + uint8 max_sdu[2]; + uint8 sdu_ia_time[4]; + uint8 access_latency[4]; + uint8 flush_timeout[4]; +} BWL_POST_PACKED_STRUCT ext_flow_spec_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms { + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms { + uint8 llh[2]; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct plh_pad { + uint8 plh; + uint8 pad; +} BWL_POST_PACKED_STRUCT plh_pad_t; + +typedef BWL_PRE_PACKED_STRUCT union hci_handle { + uint16 bredr; + plh_pad_t amp; +} BWL_POST_PACKED_STRUCT hci_handle_t; + +typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms { + hci_handle_t handle; + uint8 timeout[2]; +} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms { + uint8 llh[2]; + uint8 befto[4]; +} BWL_POST_PACKED_STRUCT befto_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms { + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms { + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms { + uint8 llh[2]; + uint8 packet_type; +} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t; + +/* Generic AMP extended flow spec service types */ +#define EFS_SVCTYPE_NO_TRAFFIC 0 +#define EFS_SVCTYPE_BEST_EFFORT 1 +#define EFS_SVCTYPE_GUARANTEED 2 + +/* AMP HCI event packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event { + uint8 ecode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_event_t; + +#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms) + +/* AMP HCI event codes */ +#define HCI_Command_Complete 0x0E +#define HCI_Command_Status 0x0F +#define HCI_Flush_Occurred 0x11 +#define HCI_Enhanced_Flush_Complete 0x39 +#define HCI_Physical_Link_Complete 0x40 +#define HCI_Channel_Select 0x41 +#define HCI_Disconnect_Physical_Link_Complete 0x42 +#define HCI_Logical_Link_Complete 0x45 +#define HCI_Disconnect_Logical_Link_Complete 0x46 +#define HCI_Flow_Spec_Modify_Complete 0x47 +#define HCI_Number_of_Completed_Data_Blocks 0x48 +#define HCI_Short_Range_Mode_Change_Complete 0x4C +#define HCI_Status_Change_Event 0x4D +#define HCI_Vendor_Specific 0xFF + +/* AMP HCI event mask bit positions */ +#define HCI_Physical_Link_Complete_Event_Mask 0x0001 +#define HCI_Channel_Select_Event_Mask 0x0002 +#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004 +#define HCI_Logical_Link_Complete_Event_Mask 0x0020 +#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040 +#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080 +#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100 +#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000 +#define HCI_Status_Change_Event_Mask 0x2000 +#define HCI_All_Event_Mask 0x31e7 + +/* AMP HCI event parameters */ +typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms { + uint8 status; + uint8 cmdpkts; + uint16 opcode; +} BWL_POST_PACKED_STRUCT cmd_status_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms { + uint8 cmdpkts; + uint16 opcode; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT cmd_complete_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms { + uint8 status; + uint8 plh; + uint16 len; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT read_local_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms { + uint8 status; + uint8 AMP_status; + uint32 bandwidth; + uint32 gbandwidth; + uint32 latency; + uint32 PDU_size; + uint8 ctrl_type; + uint16 PAL_cap; + uint16 AMP_ASSOC_len; + uint32 max_flush_timeout; + uint32 be_flush_timeout; +} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 reason; +} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms { + uint8 status; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms { + uint8 status; + uint16 llh; +} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms { + uint8 status; + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms { + uint8 status; + hci_handle_t handle; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms { + uint8 status; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms { + uint8 status; + uint16 ACL_pkt_len; + uint16 data_block_len; + uint16 data_block_num; +} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct data_blocks { + uint16 handle; + uint16 pkts; + uint16 blocks; +} BWL_POST_PACKED_STRUCT data_blocks_t; + +typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms { + uint16 num_blocks; + uint8 num_handles; + data_blocks_t completed[1]; +} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms { + uint8 status; + uint32 befto; +} BWL_POST_PACKED_STRUCT befto_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms { + uint8 status; + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms { + uint8 status; + uint8 llh[2]; + uint16 counter; +} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms { + uint8 status; + uint8 llh[2]; +} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms { + uint8 status; + hci_handle_t handle; + uint8 link_quality; +} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms { + uint8 status; + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms { + uint8 len; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms { + uint8 status; + uint8 hci_version; + uint16 hci_revision; + uint8 pal_version; + uint16 mfg_name; + uint16 pal_subversion; +} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t; + +#define MAX_SUPPORTED_CMD_BYTE 64 +typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms { + uint8 status; + uint8 cmd[MAX_SUPPORTED_CMD_BYTE]; +} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms { + uint8 status; + uint8 amp_status; +} BWL_POST_PACKED_STRUCT status_change_evt_parms_t; + +/* AMP HCI error codes */ +#define HCI_SUCCESS 0x00 +#define HCI_ERR_ILLEGAL_COMMAND 0x01 +#define HCI_ERR_NO_CONNECTION 0x02 +#define HCI_ERR_MEMORY_FULL 0x07 +#define HCI_ERR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09 +#define HCI_ERR_CONNECTION_EXISTS 0x0B +#define HCI_ERR_CONNECTION_DISALLOWED 0x0C +#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10 +#define HCI_ERR_UNSUPPORTED_VALUE 0x11 +#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12 +#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16 +#define HCI_ERR_UNSPECIFIED 0x1F +#define HCI_ERR_UNIT_KEY_USED 0x26 +#define HCI_ERR_QOS_REJECTED 0x2D +#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30 +#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39 +#define HCI_ERR_CHANNEL_MOVE 0xFF + +/* AMP HCI ACL Data packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data { + uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */ + uint16 dlen; /* data total length */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t; + +#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data) + +#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14) +#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12) + +#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff) +#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12) + +/* AMP Activity Report packet formats */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report { + uint8 ScheduleKnown; + uint8 NumReports; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t; + +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple { + uint32 StartTime; + uint32 Duration; + uint32 Periodicity; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t; + +#define HCI_AR_SCHEDULE_KNOWN 0x01 + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _bt_amp_hci_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h new file mode 100644 index 0000000000000..92634c1221a64 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h @@ -0,0 +1,173 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 2002 Broadcom Corporation + * + * $Id: eapol.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#include + +/* EAPOL for 802.3/Ethernet */ +typedef struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2 +#define WPA_EAPOL_VERSION 1 +#define LEAP_EAPOL_VERSION 1 +#define SES_EAPOL_VERSION 1 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8 +#define EAPOL_KEY_IV_LEN 16 +#define EAPOL_KEY_SIG_LEN 16 + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44 + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80 +#define EAPOL_KEY_BROADCAST 0 +#define EAPOL_KEY_UNICAST 0x80 + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7f + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_WPA_KEY_REPLAY_LEN 8 +#define EAPOL_WPA_KEY_NONCE_LEN 32 +#define EAPOL_WPA_KEY_IV_LEN 16 +#define EAPOL_WPA_KEY_RSC_LEN 8 +#define EAPOL_WPA_KEY_ID_LEN 8 +#define EAPOL_WPA_KEY_MIC_LEN 16 +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32 + +/* WPA EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; + +#define EAPOL_WPA_KEY_LEN 95 + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_DESC_V3 0x03 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eapol_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h new file mode 100644 index 0000000000000..20865dc5a2318 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h @@ -0,0 +1,163 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: ethernet.h 285437 2011-09-21 22:16:56Z $ + */ + + +#ifndef _NET_ETHERNET_H_ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif + + +#include + + + +#define ETHER_ADDR_LEN 6 + + +#define ETHER_TYPE_LEN 2 + + +#define ETHER_CRC_LEN 4 + + +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + + +#define ETHER_MIN_LEN 64 + + +#define ETHER_MIN_DATA 46 + + +#define ETHER_MAX_LEN 1518 + + +#define ETHER_MAX_DATA 1500 + + +#define ETHER_TYPE_MIN 0x0600 +#define ETHER_TYPE_IP 0x0800 +#define ETHER_TYPE_ARP 0x0806 +#define ETHER_TYPE_8021Q 0x8100 +#define ETHER_TYPE_BRCM 0x886c +#define ETHER_TYPE_802_1X 0x888e +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 +#define ETHER_TYPE_WAI 0x88b4 +#define ETHER_TYPE_89_0D 0x890d + + + +#define ETHER_BRCM_SUBTYPE_LEN 4 +#define ETHER_BRCM_CRAM 1 + + +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) + + +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ + ((uint8 *)ea)[0] = 0x01; \ + ((uint8 *)ea)[1] = 0x00; \ + ((uint8 *)ea)[2] = 0x5e; \ + ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ + ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ + ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ +} + +#ifndef __INCif_etherh + +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + + +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif + + +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + + +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + + +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + + + +#define ether_cmp(a, b) (!(((short*)a)[0] == ((short*)b)[0]) | \ + !(((short*)a)[1] == ((short*)b)[1]) | \ + !(((short*)a)[2] == ((short*)b)[2])) + + +#define ether_copy(s, d) { \ + ((short*)d)[0] = ((short*)s)[0]; \ + ((short*)d)[1] = ((short*)s)[1]; \ + ((short*)d)[2] = ((short*)s)[2]; } + + +static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; + +#define ETHER_ISBCAST(ea) ((((uint8 *)(ea))[0] & \ + ((uint8 *)(ea))[1] & \ + ((uint8 *)(ea))[2] & \ + ((uint8 *)(ea))[3] & \ + ((uint8 *)(ea))[4] & \ + ((uint8 *)(ea))[5]) == 0xff) +#define ETHER_ISNULLADDR(ea) ((((uint8 *)(ea))[0] | \ + ((uint8 *)(ea))[1] | \ + ((uint8 *)(ea))[2] | \ + ((uint8 *)(ea))[3] | \ + ((uint8 *)(ea))[4] | \ + ((uint8 *)(ea))[5]) == 0) + + +#define ETHER_MOVE_HDR(d, s) \ +do { \ + struct ether_header t; \ + t = *(struct ether_header *)(s); \ + *(struct ether_header *)(d) = t; \ +} while (0) + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h new file mode 100644 index 0000000000000..d2bf3f20688c5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h @@ -0,0 +1,512 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) + * + * $Id: p2p.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include + +/* This marks the start of a packed structure section. */ +#include + + +/* WiFi P2P OUI values */ +#define P2P_OUI WFA_OUI /* WiFi P2P OUI */ +#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */ + +#define P2P_IE_ID 0xdd /* P2P IE element ID */ + +/* WiFi P2P IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie { + uint8 id; /* IE ID: 0xDD */ + uint8 len; /* IE length */ + uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */ + uint8 oui_type; /* Identifies P2P version: P2P_VER */ + uint8 subelts[1]; /* variable length subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ie wifi_p2p_ie_t; + +#define P2P_IE_FIXED_LEN 6 + +#define P2P_ATTR_ID_OFF 0 +#define P2P_ATTR_LEN_OFF 1 +#define P2P_ATTR_DATA_OFF 3 + +#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */ + +/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */ +#define P2P_SEID_STATUS 0 /* Status */ +#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */ +#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */ +#define P2P_SEID_DEV_ID 3 /* P2P Device ID */ +#define P2P_SEID_INTENT 4 /* Group Owner Intent */ +#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */ +#define P2P_SEID_CHANNEL 6 /* Channel */ +#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */ +#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */ +#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */ +#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */ +#define P2P_SEID_CHAN_LIST 11 /* Channel List */ +#define P2P_SEID_ABSENCE 12 /* Notice of Absence */ +#define P2P_SEID_DEV_INFO 13 /* Device Info */ +#define P2P_SEID_GROUP_INFO 14 /* Group Info */ +#define P2P_SEID_GROUP_ID 15 /* Group ID */ +#define P2P_SEID_P2P_IF 16 /* P2P Interface */ +#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */ + +#define P2P_SE_VS_ID_SERVICES 0x1b /* BRCM proprietary subel: L2 Services */ + + +/* WiFi P2P IE subelement: P2P Capability (capabilities info) */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 dev; /* Device Capability Bitmap */ + uint8 group; /* Group Capability Bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t; + +/* P2P Capability subelement's Device Capability Bitmap bit values */ +#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */ +#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */ +#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */ +#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */ +#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */ +#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */ + +/* P2P Capability subelement's Group Capability Bitmap bit values */ +#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */ +#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */ +#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */ +#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */ +#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */ +#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */ +#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */ + + +/* WiFi P2P IE subelement: Group Owner Intent */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTENT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t; + +/* WiFi P2P IE subelement: Configuration Timeout */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 go_tmo; /* GO config timeout in units of 10 ms */ + uint8 client_tmo; /* Client config timeout in units of 10 ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t; + + +/* WiFi P2P IE subelement: Status */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 status; /* Status Code: P2P_STATSE_* */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t; + +/* Status subelement Status Code definitions */ +#define P2P_STATSE_SUCCESS 0 + /* Success */ +#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1 + /* Failed, information currently unavailable */ +#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL + /* Old name for above in P2P spec 1.08 and older */ +#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2 + /* Failed, incompatible parameters */ +#define P2P_STATSE_FAIL_LIMIT_REACHED 3 + /* Failed, limit reached */ +#define P2P_STATSE_FAIL_INVALID_PARAMS 4 + /* Failed, invalid parameters */ +#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5 + /* Failed, unable to accomodate request */ +#define P2P_STATSE_FAIL_PROTO_ERROR 6 + /* Failed, previous protocol error or disruptive behaviour */ +#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7 + /* Failed, no common channels */ +#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8 + /* Failed, unknown P2P Group */ +#define P2P_STATSE_FAIL_INTENT 9 + /* Failed, both peers indicated Intent 15 in GO Negotiation */ +#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10 + /* Failed, incompatible provisioning method */ +#define P2P_STATSE_FAIL_USER_REJECT 11 + /* Failed, rejected by user */ + +/* WiFi P2P IE attribute: Extended Listen Timing */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s { + uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */ + uint8 len[2]; /* length not including eltId, len fields */ + uint8 avail[2]; /* availibility period */ + uint8 interval[2]; /* availibility interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t; + +#define P2P_EXT_MIN 10 /* minimum 10ms */ + +/* WiFi P2P IE subelement: Intended P2P Interface Address */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* intended P2P interface MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t; + +/* WiFi P2P IE subelement: Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 band; /* Regulatory Class (band) */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t; + + +/* Channel Entry structure within the Channel List SE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s { + uint8 band; /* Regulatory Class (band) */ + uint8 num_channels; /* # of channels in the channel list */ + uint8 channels[WL_NUMCHANNELS]; /* Channel List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t; +#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2 + +/* WiFi P2P IE subelement: Channel List */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 num_entries; /* # of channel entries */ + wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES]; + /* Channel Entry List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t; + +/* WiFi P2P IE's Device Info subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P Device MAC address */ + uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pri_devtype[8]; /* Primary Device Type */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t; + +#define P2P_DEV_TYPE_LEN 8 + +/* WiFi P2P IE's Group Info subelement Client Info Descriptor */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s { + uint8 len; + uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */ + uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */ + uint8 devcap; /* Device Capability */ + uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */ + uint8 secdts; /* Number of Secondary Device Types */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t; + +/* WiFi P2P IE's Device ID subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s { + uint8 eltId; + uint8 len[2]; + struct ether_addr addr; /* P2P Device MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t; + +/* WiFi P2P IE subelement: P2P Manageability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mg_bitmap; /* manageability bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t; +/* mg_bitmap field bit values */ +#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */ + +/* WiFi P2P IE subelement: Group Info */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t; + + +/* WiFi P2P Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame { + uint8 category; /* P2P_AF_CATEGORY */ + uint8 OUI[3]; /* OUI - P2P_OUI */ + uint8 type; /* OUI Type - P2P_VER */ + uint8 subtype; /* OUI Subtype - P2P_AF_* */ + uint8 dialog_token; /* nonzero, identifies req/resp tranaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t; +#define P2P_AF_CATEGORY 0x7f + +#define P2P_AF_FIXED_LEN 7 + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + + +/* WiFi P2P Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame { + uint8 category; /* P2P_PUB_AF_CATEGORY */ + uint8 action; /* P2P_PUB_AF_ACTION */ + uint8 oui[3]; /* P2P_OUI */ + uint8 oui_type; /* OUI type - P2P_VER */ + uint8 subtype; /* OUI subtype - P2P_TYPE_* */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t; +#define P2P_PUB_AF_FIXED_LEN 8 +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Request */ + +/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */ +#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ +#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP +#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF + +/* WiFi P2P IE subelement: Notice of Absence */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc { + uint8 cnt_type; /* Count/Type */ + uint32 duration; /* Duration */ + uint32 interval; /* Interval */ + uint32 start; /* Start Time */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t; + +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se { + uint8 eltId; /* Subelement ID */ + uint8 len[2]; /* Length */ + uint8 index; /* Index */ + uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */ + wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; + +#define P2P_NOA_SE_FIXED_LEN 5 + +/* cnt_type field values */ +#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */ +#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */ +#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */ +#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */ + +/* ctw_ops_parms field values */ +#define P2P_NOA_CTW_MASK 0x7f +#define P2P_NOA_OPS_MASK 0x80 +#define P2P_NOA_OPS_SHIFT 7 + +#define P2P_CTW_MIN 10 /* minimum 10TU */ + +/* + * P2P Service Discovery related + */ +#define P2PSD_ACTION_CATEGORY 0x04 + /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a + /* Action value for GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b + /* Action value for GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c + /* Action value for GAS Comback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d + /* Action value for GAS Comback Response AF */ +#define P2PSD_AD_EID 0x6c + /* Advertisement Protocol IE ID */ +#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 + /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */ +#define P2PSD_ADP_PROTO_ID 0x00 + /* Advertisement Protocol ID. Always 0 for P2P SD */ +#define P2PSD_GAS_OUI P2P_OUI + /* WFA OUI */ +#define P2PSD_GAS_OUI_SUBTYPE P2P_VER + /* OUI Subtype for GAS IE */ +#define P2PSD_GAS_NQP_INFOID 0xDDDD + /* NQP Query Info ID: 56797 */ +#define P2PSD_GAS_COMEBACKDEALY 0x00 + /* Not used in the Native GAS protocol */ + +/* Service Protocol Type */ +typedef enum p2psd_svc_protype { + SVC_RPOTYPE_ALL = 0, + SVC_RPOTYPE_BONJOUR = 1, + SVC_RPOTYPE_UPNP = 2, + SVC_RPOTYPE_WSD = 3, + SVC_RPOTYPE_VENDOR = 255 +} p2psd_svc_protype_t; + +/* Service Discovery response status code */ +typedef enum { + P2PSD_RESP_STATUS_SUCCESS = 0, + P2PSD_RESP_STATUS_PROTYPE_NA = 1, + P2PSD_RESP_STATUS_DATA_NA = 2, + P2PSD_RESP_STATUS_BAD_REQUEST = 3 +} p2psd_resp_status_t; + +/* Advertisement Protocol IE tuple field */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl { + uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus + * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0 + */ + uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t; + +/* Advertisement Protocol IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie { + uint8 id; /* IE ID: 0x6c - 108 */ + uint8 len; /* IE length */ + wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one + * tuple is defined for P2P Service Discovery + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t; + +/* NQP Vendor-specific Content */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc { + uint8 oui_subtype; /* OUI Subtype: 0x09 */ + uint16 svc_updi; /* Service Update Indicator */ + uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request, + * wifi_p2psd_qresp_tlv_t type for service response + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t; + +/* Service Request TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t; + +/* Query Request Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Length of service request TLV, 5 plus the size of request data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t; + +/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame { + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qreq_len; /* Query Request Length */ + uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t; + +/* Service Response TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 status; /* Value defined in Table 57 of P2P spec. */ + uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t; + +/* Query Response Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t; + +/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t; + +/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint8 fragment_id; /* Fragmentation ID */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t; + +/* Wi-Fi GAS Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame { + uint8 category; /* 0x04 Public Action Frame */ + uint8 action; /* 0x6c Advertisement Protocol */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t + * or wifi_p2psd_gas_iresp_frame_t format + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _P2P_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h new file mode 100644 index 0000000000000..7353ff0d7c73b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h @@ -0,0 +1,76 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdspi.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _SD_SPI_H +#define _SD_SPI_H + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 + +#endif /* _SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h new file mode 100644 index 0000000000000..27f005537604a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h @@ -0,0 +1,70 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: vlan.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +#include + +#define VLAN_VID_MASK 0xfff +#define VLAN_CFI_SHIFT 12 +#define VLAN_PRI_SHIFT 13 + +#define VLAN_PRI_MASK 7 + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) + +#define VLAN_TPID 0x8100 + +struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; + uint16 vlan_tag; + uint16 ether_type; +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + + + +#include + +#define ETHERVLAN_MOVE_HDR(d, s) \ +do { \ + struct ethervlan_header t; \ + t = *(struct ethervlan_header *)(s); \ + *(struct ethervlan_header *)(d) = t; \ +} while (0) + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h new file mode 100644 index 0000000000000..7361cbf20b065 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h @@ -0,0 +1,168 @@ +/* + * Fundamental types and constants relating to WPA + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wpa.h 285437 2011-09-21 22:16:56Z $ + */ + + +#ifndef _proto_wpa_h_ +#define _proto_wpa_h_ + +#include +#include + + + +#include + + + + +#define DOT11_RC_INVALID_WPA_IE 13 +#define DOT11_RC_MIC_FAILURE 14 +#define DOT11_RC_4WH_TIMEOUT 15 +#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 +#define DOT11_RC_WPA_IE_MISMATCH 17 +#define DOT11_RC_INVALID_MC_CIPHER 18 +#define DOT11_RC_INVALID_UC_CIPHER 19 +#define DOT11_RC_INVALID_AKMP 20 +#define DOT11_RC_BAD_WPA_VERSION 21 +#define DOT11_RC_INVALID_WPA_CAP 22 +#define DOT11_RC_8021X_AUTH_FAIL 23 + +#define WPA2_PMKID_LEN 16 + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 tag; + uint8 length; + uint8 oui[3]; + uint8 oui_type; + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; +} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; +#define WPA_IE_OUITYPE_LEN 4 +#define WPA_IE_FIXED_LEN 8 +#define WPA_IE_TAG_FIXED_LEN 6 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 tag; + uint8 length; + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; +} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; +#define WPA_RSN_IE_FIXED_LEN 4 +#define WPA_RSN_IE_TAG_FIXED_LEN 2 +typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 oui[3]; + uint8 type; +} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; +#define WPA_SUITE_LEN 4 + + +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_suite_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; +#define WPA_IE_SUITE_COUNT_LEN 2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_pmkid_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; + + +#define WPA_CIPHER_NONE 0 +#define WPA_CIPHER_WEP_40 1 +#define WPA_CIPHER_TKIP 2 +#define WPA_CIPHER_AES_OCB 3 +#define WPA_CIPHER_AES_CCM 4 +#define WPA_CIPHER_WEP_104 5 +#define WPA_CIPHER_BIP 6 +#define WPA_CIPHER_TPK 7 + + +#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ + (cipher) == WPA_CIPHER_WEP_40 || \ + (cipher) == WPA_CIPHER_WEP_104 || \ + (cipher) == WPA_CIPHER_TKIP || \ + (cipher) == WPA_CIPHER_AES_OCB || \ + (cipher) == WPA_CIPHER_AES_CCM || \ + (cipher) == WPA_CIPHER_TPK) + + + +#define WPA_TKIP_CM_DETECT 60 +#define WPA_TKIP_CM_BLOCK 60 + + +#define RSN_CAP_LEN 2 + + +#define RSN_CAP_PREAUTH 0x0001 +#define RSN_CAP_NOPAIRWISE 0x0002 +#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 +#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 +#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 +#define RSN_CAP_1_REPLAY_CNTR 0 +#define RSN_CAP_2_REPLAY_CNTRS 1 +#define RSN_CAP_4_REPLAY_CNTRS 2 +#define RSN_CAP_16_REPLAY_CNTRS 3 + + +#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS +#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS +#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT +#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK + + +#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) + + +#define WPA_CAP_LEN RSN_CAP_LEN +#define WPA_PMKID_CNT_LEN 2 + +#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH + + + +#include + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h new file mode 100644 index 0000000000000..78ced30c502b0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h @@ -0,0 +1,1783 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, + * GPIO interface, extbus, and support for serial and parallel flashes. + * + * $Id: sbchipc.h 343982 2012-07-11 00:29:37Z $ + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + */ + + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#ifndef _LANGUAGE_ASSEMBLY + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +typedef struct eci_prerev35 { + uint32 eci_output; + uint32 eci_control; + uint32 eci_inputlo; + uint32 eci_inputmi; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolaritymi; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskmi; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventmi; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskmi; + uint32 eci_eventmaskhi; + uint32 PAD[3]; +} eci_prerev35_t; + +typedef struct eci_rev35 { + uint32 eci_outputlo; + uint32 eci_outputhi; + uint32 eci_controllo; + uint32 eci_controlhi; + uint32 eci_inputlo; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskhi; + uint32 eci_auxtx; + uint32 eci_auxrx; + uint32 eci_datatag; + uint32 eci_uartescvalue; + uint32 eci_autobaudctr; + uint32 eci_uartfifolevel; +} eci_rev35_t; + +typedef volatile struct { + uint32 chipid; + uint32 capabilities; + uint32 corecontrol; + uint32 bist; + + + uint32 otpstatus; + uint32 otpcontrol; + uint32 otpprog; + uint32 otplayout; + + + uint32 intstatus; + uint32 intmask; + + + uint32 chipcontrol; + uint32 chipstatus; + + + uint32 jtagcmd; + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + + uint32 flashcontrol; + uint32 flashaddress; + uint32 flashdata; + uint32 PAD[1]; + + + uint32 broadcastaddress; + uint32 broadcastdata; + + + uint32 gpiopullup; + uint32 gpiopulldown; + uint32 gpioin; + uint32 gpioout; + uint32 gpioouten; + uint32 gpiocontrol; + uint32 gpiointpolarity; + uint32 gpiointmask; + + + uint32 gpioevent; + uint32 gpioeventintmask; + + + uint32 watchdog; + + + uint32 gpioeventintpolarity; + + + uint32 gpiotimerval; + uint32 gpiotimeroutmask; + + + uint32 clockcontrol_n; + uint32 clockcontrol_sb; + uint32 clockcontrol_pci; + uint32 clockcontrol_m2; + uint32 clockcontrol_m3; + uint32 clkdiv; + uint32 gpiodebugsel; + uint32 capabilities_ext; + + + uint32 pll_on_delay; + uint32 fref_sel_delay; + uint32 slow_clk_ctl; + uint32 PAD; + + + uint32 system_clk_ctl; + uint32 clkstatestretch; + uint32 PAD[2]; + + + uint32 bp_addrlow; + uint32 bp_addrhigh; + uint32 bp_data; + uint32 PAD; + uint32 bp_indaccess; + + uint32 gsioctrl; + uint32 gsioaddress; + uint32 gsiodata; + + + uint32 clkdiv2; + uint32 PAD[2]; + + + uint32 eromptr; + + + uint32 pcmcia_config; + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 SECI_config; + uint32 SECI_status; + uint32 SECI_statusmask; + uint32 SECI_rxnibchanged; + + uint32 PAD[20]; + + + uint32 sromcontrol; + uint32 sromaddress; + uint32 sromdata; + uint32 PAD[9]; + uint32 seci_uart_data; + uint32 seci_uart_bauddiv; + uint32 seci_uart_fcr; + uint32 seci_uart_lcr; + uint32 seci_uart_mcr; + uint32 seci_uart_lsr; + uint32 seci_uart_msr; + uint32 seci_uart_baudadj; + + uint32 clk_ctl_st; + uint32 hw_war; + uint32 PAD[70]; + + + uint8 uart0data; + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[248]; + + uint8 uart1data; + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; + uint32 PAD[126]; + + + + uint32 pmucontrol; + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; + uint32 gpioenable; + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; + uint32 chipcontrol_data; + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; + uint32 pmu_xtalfreq; + uint32 PAD[100]; + uint16 sromotp[768]; +} chipcregs_t; + +#endif + + +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_CHIPST 0x2c +#define CC_EROMPTR 0xfc + + +#define CC_OTPST 0x10 +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_GPIOPU 0x58 +#define CC_GPIOPD 0x5c +#define CC_GPIOIN 0x60 +#define CC_GPIOOUT 0x64 +#define CC_GPIOOUTEN 0x68 +#define CC_GPIOCTRL 0x6c +#define CC_GPIOPOL 0x70 +#define CC_GPIOINTM 0x74 +#define CC_WATCHDOG 0x80 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_SYS_CLK_CTL 0xc0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define CC_CHIPCTL_ADDR 0x650 +#define CC_CHIPCTL_DATA 0x654 +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 +#define CC_SROM_OTP 0x800 + + +#define CID_ID_MASK 0x0000ffff +#define CID_REV_MASK 0x000f0000 +#define CID_REV_SHIFT 16 +#define CID_PKG_MASK 0x00f00000 +#define CID_PKG_SHIFT 20 +#define CID_CC_MASK 0x0f000000 +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 +#define CID_TYPE_SHIFT 28 + + +#define CC_CAP_UARTS_MASK 0x00000003 +#define CC_CAP_MIPSEB 0x00000004 +#define CC_CAP_UCLKSEL 0x00000018 +#define CC_CAP_UINTCLK 0x00000008 +#define CC_CAP_UARTGPIO 0x00000020 +#define CC_CAP_EXTBUS_MASK 0x000000c0 +#define CC_CAP_EXTBUS_NONE 0x00000000 +#define CC_CAP_EXTBUS_FULL 0x00000040 +#define CC_CAP_EXTBUS_PROG 0x00000080 +#define CC_CAP_FLASH_MASK 0x00000700 +#define CC_CAP_PLL_MASK 0x00038000 +#define CC_CAP_PWR_CTL 0x00040000 +#define CC_CAP_OTPSIZE 0x00380000 +#define CC_CAP_OTPSIZE_SHIFT 19 +#define CC_CAP_OTPSIZE_BASE 5 +#define CC_CAP_JTAGP 0x00400000 +#define CC_CAP_ROM 0x00800000 +#define CC_CAP_BKPLN64 0x08000000 +#define CC_CAP_PMU 0x10000000 +#define CC_CAP_ECI 0x20000000 +#define CC_CAP_SROM 0x40000000 +#define CC_CAP_NFLASH 0x80000000 + +#define CC_CAP2_SECI 0x00000001 +#define CC_CAP2_GSIO 0x00000002 + + +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 + + +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 +#define PLL_TYPE2 0x00020000 +#define PLL_TYPE3 0x00030000 +#define PLL_TYPE4 0x00008000 +#define PLL_TYPE5 0x00018000 +#define PLL_TYPE6 0x00028000 +#define PLL_TYPE7 0x00038000 + + +#define ILP_CLOCK 32000 + + +#define ALP_CLOCK 20000000 + + +#define HT_CLOCK 80000000 + + +#define CC_UARTCLKO 0x00000001 +#define CC_SE 0x00000002 +#define CC_ASYNCGPIO 0x00000004 +#define CC_UARTCLKEN 0x00000008 + + +#define CHIPCTRL_4321A0_DEFAULT 0x3a4 +#define CHIPCTRL_4321A1_DEFAULT 0x0a4 +#define CHIPCTRL_4321_PLL_DOWN 0x800000 + + +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 +#define OTPS_OL_OR1 0x00000002 +#define OTPS_OL_OR2 0x00000004 +#define OTPS_OL_GU 0x00000008 +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 +#define OTPS_GUP_SW 0x00000200 +#define OTPS_GUP_CI 0x00000400 +#define OTPS_GUP_FUSE 0x00000800 +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) +#define OTPS_RV_MASK 0x0fff0000 + + +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + + +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 +#define OTPP_READ 0x40000000 + + +#define OTP_CISFORMAT_NEW 0x80000000 + + +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + + + +#define JTAGM_CREV_OLD 10 +#define JTAGM_CREV_IRP 22 +#define JTAGM_CREV_RTI 28 + + +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 +#define JCMD_STATE_PIR 0x20000000 +#define JCMD_STATE_PDR 0x40000000 +#define JCMD_STATE_RTI 0x60000000 +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 +#define JCMD_ACC_DR_I 0x00080000 +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + + +#define JCTRL_FORCE_CLK 4 +#define JCTRL_EXT_EN 2 +#define JCTRL_EN 1 + + +#define CLKD_SFLASH 0x0f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + +#define CLKD2_SROM 0x00000003 + + +#define CI_GPIO 0x00000001 +#define CI_EI 0x00000002 +#define CI_TEMP 0x00000004 +#define CI_SIRQ 0x00000008 +#define CI_ECI 0x00000010 +#define CI_PMU 0x00000020 +#define CI_UART 0x00000040 +#define CI_WDRESET 0x80000000 + + +#define SCC_SS_MASK 0x00000007 +#define SCC_SS_LPO 0x00000000 +#define SCC_SS_XTAL 0x00000001 +#define SCC_SS_PCI 0x00000002 +#define SCC_LF 0x00000200 +#define SCC_LP 0x00000400 +#define SCC_FS 0x00000800 +#define SCC_IP 0x00001000 +#define SCC_XC 0x00002000 +#define SCC_XP 0x00004000 +#define SCC_CD_MASK 0xffff0000 +#define SCC_CD_SHIFT 16 + + +#define SYCC_IE 0x00000001 +#define SYCC_AE 0x00000002 +#define SYCC_FP 0x00000004 +#define SYCC_AR 0x00000008 +#define SYCC_HR 0x00000010 +#define SYCC_CD_MASK 0xffff0000 +#define SYCC_CD_SHIFT 16 + + +#define BPIA_BYTEEN 0x0000000f +#define BPIA_SZ1 0x00000001 +#define BPIA_SZ2 0x00000003 +#define BPIA_SZ4 0x00000007 +#define BPIA_SZ8 0x0000000f +#define BPIA_WRITE 0x00000100 +#define BPIA_START 0x00000200 +#define BPIA_BUSY 0x00000200 +#define BPIA_ERROR 0x00000400 + + +#define CF_EN 0x00000001 +#define CF_EM_MASK 0x0000000e +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 +#define CF_EM_SYNC 2 +#define CF_EM_PCMCIA 4 +#define CF_DS 0x00000010 +#define CF_BS 0x00000020 +#define CF_CD_MASK 0x000000c0 +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 +#define CF_CD_DIV3 0x00000040 +#define CF_CD_DIV4 0x00000080 +#define CF_CE 0x00000100 +#define CF_SB 0x00000200 + + +#define PM_W0_MASK 0x0000003f +#define PM_W1_MASK 0x00001f00 +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 +#define PM_W3_SHIFT 24 + + +#define PA_W0_MASK 0x0000003f +#define PA_W1_MASK 0x00001f00 +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 +#define PA_W3_SHIFT 24 + + +#define PI_W0_MASK 0x0000003f +#define PI_W1_MASK 0x00001f00 +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 +#define PI_W3_SHIFT 24 + + +#define PW_W0_MASK 0x0000001f +#define PW_W1_MASK 0x00001f00 +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + + +#define FW_W0_MASK 0x0000003f +#define FW_W1_MASK 0x00001f00 +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 +#define FW_W3_SHIFT 24 + + +#define SRC_START 0x80000000 +#define SRC_BUSY 0x80000000 +#define SRC_OPCODE 0x60000000 +#define SRC_OP_READ 0x00000000 +#define SRC_OP_WRITE 0x20000000 +#define SRC_OP_WRDIS 0x40000000 +#define SRC_OP_WREN 0x60000000 +#define SRC_OTPSEL 0x00000010 +#define SRC_LOCK 0x00000008 +#define SRC_SIZE_MASK 0x00000006 +#define SRC_SIZE_1K 0x00000000 +#define SRC_SIZE_4K 0x00000002 +#define SRC_SIZE_16K 0x00000004 +#define SRC_SIZE_SHIFT 1 +#define SRC_PRESENT 0x00000001 + + +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 +#define PCTL_NOILP_ON_WAIT 0x00000200 +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + + +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff + + +#define GPIO_ONTIME_SHIFT 16 + + +#define CN_N1_MASK 0x3f +#define CN_N2_MASK 0x3f00 +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 +#define CN_PLLC_SHIFT 16 + + +#define CC_M1_MASK 0x3f +#define CC_M2_MASK 0x3f00 +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 +#define CC_MC_SHIFT 24 + + +#define CC_F6_2 0x02 +#define CC_F6_3 0x03 +#define CC_F6_4 0x05 +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + + +#define CC_T2_BIAS 2 +#define CC_T2M2_BIAS 3 + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + + +#define CC_T6_MMASK 1 +#define CC_T6_M0 120000000 +#define CC_T6_M1 100000000 +#define SB2MIPS_T6(sb) (2 * (sb)) + + +#define CC_CLOCK_BASE1 24000000 +#define CC_CLOCK_BASE2 12500000 + + +#define CLKC_5350_N 0x0311 +#define CLKC_5350_M 0x04020009 + + +#define FLASH_NONE 0x000 +#define SFLASH_ST 0x100 +#define SFLASH_AT 0x200 +#define PFLASH 0x700 + + +#define CC_CFG_EN 0x0001 +#define CC_CFG_EM_MASK 0x000e +#define CC_CFG_EM_ASYNC 0x0000 +#define CC_CFG_EM_SYNC 0x0002 +#define CC_CFG_EM_PCMCIA 0x0004 +#define CC_CFG_EM_IDE 0x0006 +#define CC_CFG_DS 0x0010 +#define CC_CFG_CD_MASK 0x00e0 +#define CC_CFG_CE 0x0100 +#define CC_CFG_SB 0x0200 +#define CC_CFG_IS 0x0400 + + +#define CC_EB_BASE 0x1a000000 +#define CC_EB_PCMCIA_MEM 0x1a000000 +#define CC_EB_PCMCIA_IO 0x1a200000 +#define CC_EB_PCMCIA_CFG 0x1a400000 +#define CC_EB_IDE 0x1a800000 +#define CC_EB_PCMCIA1_MEM 0x1a800000 +#define CC_EB_PCMCIA1_IO 0x1aa00000 +#define CC_EB_PCMCIA1_CFG 0x1ac00000 +#define CC_EB_PROGIF 0x1b000000 + + + +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + + +#define SFLASH_ACT_OPONLY 0x0000 +#define SFLASH_ACT_OP1D 0x0100 +#define SFLASH_ACT_OP3A 0x0200 +#define SFLASH_ACT_OP3A1D 0x0300 +#define SFLASH_ACT_OP3A4D 0x0400 +#define SFLASH_ACT_OP3A4X4D 0x0500 +#define SFLASH_ACT_OP3A1X4D 0x0700 + + +#define SFLASH_ST_WREN 0x0006 +#define SFLASH_ST_WRDIS 0x0004 +#define SFLASH_ST_RDSR 0x0105 +#define SFLASH_ST_WRSR 0x0101 +#define SFLASH_ST_READ 0x0303 +#define SFLASH_ST_PP 0x0302 +#define SFLASH_ST_SE 0x02d8 +#define SFLASH_ST_BE 0x00c7 +#define SFLASH_ST_DP 0x00b9 +#define SFLASH_ST_RES 0x03ab +#define SFLASH_ST_CSA 0x1000 +#define SFLASH_ST_SSE 0x0220 + + +#define SFLASH_ST_WIP 0x01 +#define SFLASH_ST_WEL 0x02 +#define SFLASH_ST_BP_MASK 0x1c +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 + + +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + + +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + + +#define GSIO_START 0x80000000 +#define GSIO_BUSY GSIO_START + + + +#define UART_RX 0 +#define UART_TX 0 +#define UART_DLL 0 +#define UART_IER 1 +#define UART_DLM 1 +#define UART_IIR 2 +#define UART_FCR 2 +#define UART_LCR 3 +#define UART_MCR 4 +#define UART_LSR 5 +#define UART_MSR 6 +#define UART_SCR 7 +#define UART_LCR_DLAB 0x80 +#define UART_LCR_WLEN8 0x03 +#define UART_MCR_OUT2 0x08 +#define UART_MCR_LOOP 0x10 +#define UART_LSR_RX_FIFO 0x80 +#define UART_LSR_TDHR 0x40 +#define UART_LSR_THRE 0x20 +#define UART_LSR_BREAK 0x10 +#define UART_LSR_FRAMING 0x08 +#define UART_LSR_PARITY 0x04 +#define UART_LSR_OVERRUN 0x02 +#define UART_LSR_RXRDY 0x01 +#define UART_FCR_FIFO_ENABLE 1 + + +#define UART_IIR_FIFO_MASK 0xc0 +#define UART_IIR_INT_MASK 0xf +#define UART_IIR_MDM_CHG 0x0 +#define UART_IIR_NOINT 0x1 +#define UART_IIR_THRE 0x2 +#define UART_IIR_RCVD_DATA 0x4 +#define UART_IIR_RCVR_STATUS 0x6 +#define UART_IIR_CHAR_TIME 0xc + + +#define UART_IER_EDSSI 8 +#define UART_IER_ELSI 4 +#define UART_IER_ETBEI 2 +#define UART_IER_ERBFI 1 + + +#define PST_EXTLPOAVAIL 0x0100 +#define PST_WDRESET 0x0080 +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 + + +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + + + +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +#define PRRT_REQ_ACTIVE 0x0800 +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 + + +#define PMURES_BIT(bit) (1 << (bit)) + + +#define PMURES_MAX_RESNUM 30 + + +#define PMU_CHIPCTL0 0 + + +#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 +#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) + +#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 +#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 + + +#define PMU_CHIPCTL1 1 +#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 + +#define PMU_CC1_IF_TYPE_MASK 0x00000030 +#define PMU_CC1_IF_TYPE_RMII 0x00000000 +#define PMU_CC1_IF_TYPE_MII 0x00000010 +#define PMU_CC1_IF_TYPE_RGMII 0x00000020 + +#define PMU_CC1_SW_TYPE_MASK 0x000000c0 +#define PMU_CC1_SW_TYPE_EPHY 0x00000000 +#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 +#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 +#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 + + + + + +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + + +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + + +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + + +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + + + +#define PMU1_PLL0_PLLCTL0 0 +#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 +#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 +#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 + + +#define PMU1_PLL0_PLLCTL1 1 +#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff +#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 +#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 +#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 +#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 +#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 +#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 +#define PMU1_PLL0_PC1_M4DIV_BY_9 9 +#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 +#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 + +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + + +#define PMU1_PLL0_PLLCTL2 2 +#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff +#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 +#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc +#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 +#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 +#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 +#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + + +#define PMU1_PLL0_PLLCTL3 3 +#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 + + +#define PMU1_PLL0_PLLCTL4 4 + + +#define PMU1_PLL0_PLLCTL5 5 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 + + +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + + + + +#define PMU2_PLL_PLLCTL0 0 +#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 +#define PMU2_PLL_PC0_P1DIV_SHIFT 20 +#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 +#define PMU2_PLL_PC0_P2DIV_SHIFT 24 + + +#define PMU2_PLL_PLLCTL1 1 +#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff +#define PMU2_PLL_PC1_M1DIV_SHIFT 0 +#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC1_M2DIV_SHIFT 8 +#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 +#define PMU2_PLL_PC1_M3DIV_SHIFT 16 +#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 +#define PMU2_PLL_PC1_M4DIV_SHIFT 24 + + +#define PMU2_PLL_PLLCTL2 2 +#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff +#define PMU2_PLL_PC2_M5DIV_SHIFT 0 +#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC2_M6DIV_SHIFT 8 +#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 +#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 + + +#define PMU2_PLL_PLLCTL3 3 +#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 + + +#define PMU2_PLL_PLLCTL4 4 + + +#define PMU2_PLL_PLLCTL5 5 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 + + +#define PMU5_PLL_P1P2_OFF 0 +#define PMU5_PLL_P1_MASK 0x0f000000 +#define PMU5_PLL_P1_SHIFT 24 +#define PMU5_PLL_P2_MASK 0x00f00000 +#define PMU5_PLL_P2_SHIFT 20 +#define PMU5_PLL_M14_OFF 1 +#define PMU5_PLL_MDIV_MASK 0x000000ff +#define PMU5_PLL_MDIV_WIDTH 8 +#define PMU5_PLL_NM5_OFF 2 +#define PMU5_PLL_NDIV_MASK 0xfff00000 +#define PMU5_PLL_NDIV_SHIFT 20 +#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 +#define PMU5_PLL_NDIV_MODE_SHIFT 17 +#define PMU5_PLL_FMAB_OFF 3 +#define PMU5_PLL_MRAT_MASK 0xf0000000 +#define PMU5_PLL_MRAT_SHIFT 28 +#define PMU5_PLL_ABRAT_MASK 0x08000000 +#define PMU5_PLL_ABRAT_SHIFT 27 +#define PMU5_PLL_FDIV_MASK 0x07ffffff +#define PMU5_PLL_PLLCTL_OFF 4 +#define PMU5_PLL_PCHI_OFF 5 +#define PMU5_PLL_PCHI_MASK 0x0000003f + + +#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF +#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 +#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 + + +#define PMU5_MAINPLL_CPU 1 +#define PMU5_MAINPLL_MEM 2 +#define PMU5_MAINPLL_SI 3 + +#define PMU7_PLL_PLLCTL7 7 +#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 +#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 +#define PMU7_PLL_CTL7_M4DIV_BY_6 6 +#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc +#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL8 8 +#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff +#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 +#define PMU7_PLL_CTL8_M5DIV_BY_8 8 +#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 +#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 +#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 +#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL11 11 +#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 +#define PMU7_PLL_PLLCTL11_VAL 0x22222200 + + +#define PMU4716_MAINPLL_PLL0 12 + + +#define PMU5356_MAINPLL_PLL0 0 +#define PMU5357_MAINPLL_PLL0 0 + + +#define RES4716_PROC_PLL_ON 0x00000040 +#define RES4716_PROC_HT_AVAIL 0x00000080 + + +#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 + + + +#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 +#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 + + +#define RES5354_EXT_SWITCHER_PWM 0 +#define RES5354_BB_SWITCHER_PWM 1 +#define RES5354_BB_SWITCHER_BURST 2 +#define RES5354_BB_EXT_SWITCHER_BURST 3 +#define RES5354_ILP_REQUEST 4 +#define RES5354_RADIO_SWITCHER_PWM 5 +#define RES5354_RADIO_SWITCHER_BURST 6 +#define RES5354_ROM_SWITCH 7 +#define RES5354_PA_REF_LDO 8 +#define RES5354_RADIO_LDO 9 +#define RES5354_AFE_LDO 10 +#define RES5354_PLL_LDO 11 +#define RES5354_BG_FILTBYP 12 +#define RES5354_TX_FILTBYP 13 +#define RES5354_RX_FILTBYP 14 +#define RES5354_XTAL_PU 15 +#define RES5354_XTAL_EN 16 +#define RES5354_BB_PLL_FILTBYP 17 +#define RES5354_RF_PLL_FILTBYP 18 +#define RES5354_BB_PLL_PU 19 + + +#define CCTRL5357_EXTPA (1<<14) +#define CCTRL5357_ANT_MUX_2o3 (1<<15) + + +#define RES4328_EXT_SWITCHER_PWM 0 +#define RES4328_BB_SWITCHER_PWM 1 +#define RES4328_BB_SWITCHER_BURST 2 +#define RES4328_BB_EXT_SWITCHER_BURST 3 +#define RES4328_ILP_REQUEST 4 +#define RES4328_RADIO_SWITCHER_PWM 5 +#define RES4328_RADIO_SWITCHER_BURST 6 +#define RES4328_ROM_SWITCH 7 +#define RES4328_PA_REF_LDO 8 +#define RES4328_RADIO_LDO 9 +#define RES4328_AFE_LDO 10 +#define RES4328_PLL_LDO 11 +#define RES4328_BG_FILTBYP 12 +#define RES4328_TX_FILTBYP 13 +#define RES4328_RX_FILTBYP 14 +#define RES4328_XTAL_PU 15 +#define RES4328_XTAL_EN 16 +#define RES4328_BB_PLL_FILTBYP 17 +#define RES4328_RF_PLL_FILTBYP 18 +#define RES4328_BB_PLL_PU 19 + + +#define RES4325_BUCK_BOOST_BURST 0 +#define RES4325_CBUCK_BURST 1 +#define RES4325_CBUCK_PWM 2 +#define RES4325_CLDO_CBUCK_BURST 3 +#define RES4325_CLDO_CBUCK_PWM 4 +#define RES4325_BUCK_BOOST_PWM 5 +#define RES4325_ILP_REQUEST 6 +#define RES4325_ABUCK_BURST 7 +#define RES4325_ABUCK_PWM 8 +#define RES4325_LNLDO1_PU 9 +#define RES4325_OTP_PU 10 +#define RES4325_LNLDO3_PU 11 +#define RES4325_LNLDO4_PU 12 +#define RES4325_XTAL_PU 13 +#define RES4325_ALP_AVAIL 14 +#define RES4325_RX_PWRSW_PU 15 +#define RES4325_TX_PWRSW_PU 16 +#define RES4325_RFPLL_PWRSW_PU 17 +#define RES4325_LOGEN_PWRSW_PU 18 +#define RES4325_AFE_PWRSW_PU 19 +#define RES4325_BBPLL_PWRSW_PU 20 +#define RES4325_HT_AVAIL 21 + + +#define RES4325B0_CBUCK_LPOM 1 +#define RES4325B0_CBUCK_BURST 2 +#define RES4325B0_CBUCK_PWM 3 +#define RES4325B0_CLDO_PU 4 + + +#define RES4325C1_LNLDO2_PU 12 + + +#define CST4325_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4325_DEFCIS_SEL 0 +#define CST4325_SPROM_SEL 1 +#define CST4325_OTP_SEL 2 +#define CST4325_OTP_PWRDN 3 +#define CST4325_SDIO_USB_MODE_MASK 0x00000004 +#define CST4325_SDIO_USB_MODE_SHIFT 2 +#define CST4325_RCAL_VALID_MASK 0x00000008 +#define CST4325_RCAL_VALID_SHIFT 3 +#define CST4325_RCAL_VALUE_MASK 0x000001f0 +#define CST4325_RCAL_VALUE_SHIFT 4 +#define CST4325_PMUTOP_2B_MASK 0x00000200 +#define CST4325_PMUTOP_2B_SHIFT 9 + +#define RES4329_RESERVED0 0 +#define RES4329_CBUCK_LPOM 1 +#define RES4329_CBUCK_BURST 2 +#define RES4329_CBUCK_PWM 3 +#define RES4329_CLDO_PU 4 +#define RES4329_PALDO_PU 5 +#define RES4329_ILP_REQUEST 6 +#define RES4329_RESERVED7 7 +#define RES4329_RESERVED8 8 +#define RES4329_LNLDO1_PU 9 +#define RES4329_OTP_PU 10 +#define RES4329_RESERVED11 11 +#define RES4329_LNLDO2_PU 12 +#define RES4329_XTAL_PU 13 +#define RES4329_ALP_AVAIL 14 +#define RES4329_RX_PWRSW_PU 15 +#define RES4329_TX_PWRSW_PU 16 +#define RES4329_RFPLL_PWRSW_PU 17 +#define RES4329_LOGEN_PWRSW_PU 18 +#define RES4329_AFE_PWRSW_PU 19 +#define RES4329_BBPLL_PWRSW_PU 20 +#define RES4329_HT_AVAIL 21 + +#define CST4329_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4329_DEFCIS_SEL 0 +#define CST4329_SPROM_SEL 1 +#define CST4329_OTP_SEL 2 +#define CST4329_OTP_PWRDN 3 +#define CST4329_SPI_SDIO_MODE_MASK 0x00000004 +#define CST4329_SPI_SDIO_MODE_SHIFT 2 + + +#define CST4312_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4312_DEFCIS_SEL 0 +#define CST4312_SPROM_SEL 1 +#define CST4312_OTP_SEL 2 +#define CST4312_OTP_BAD 3 + + +#define RES4312_SWITCHER_BURST 0 +#define RES4312_SWITCHER_PWM 1 +#define RES4312_PA_REF_LDO 2 +#define RES4312_CORE_LDO_BURST 3 +#define RES4312_CORE_LDO_PWM 4 +#define RES4312_RADIO_LDO 5 +#define RES4312_ILP_REQUEST 6 +#define RES4312_BG_FILTBYP 7 +#define RES4312_TX_FILTBYP 8 +#define RES4312_RX_FILTBYP 9 +#define RES4312_XTAL_PU 10 +#define RES4312_ALP_AVAIL 11 +#define RES4312_BB_PLL_FILTBYP 12 +#define RES4312_RF_PLL_FILTBYP 13 +#define RES4312_HT_AVAIL 14 + + +#define RES4322_RF_LDO 0 +#define RES4322_ILP_REQUEST 1 +#define RES4322_XTAL_PU 2 +#define RES4322_ALP_AVAIL 3 +#define RES4322_SI_PLL_ON 4 +#define RES4322_HT_SI_AVAIL 5 +#define RES4322_PHY_PLL_ON 6 +#define RES4322_HT_PHY_AVAIL 7 +#define RES4322_OTP_PU 8 + + +#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 +#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 +#define CST4322_SPROM_OTP_SEL_SHIFT 6 +#define CST4322_NO_SPROM_OTP 0 +#define CST4322_SPROM_PRESENT 1 +#define CST4322_OTP_PRESENT 2 +#define CST4322_PCI_OR_USB 0x00000100 +#define CST4322_BOOT_MASK 0x00000600 +#define CST4322_BOOT_SHIFT 9 +#define CST4322_BOOT_FROM_SRAM 0 +#define CST4322_BOOT_FROM_ROM 1 +#define CST4322_BOOT_FROM_FLASH 2 +#define CST4322_BOOT_FROM_INVALID 3 +#define CST4322_ILP_DIV_EN 0x00000800 +#define CST4322_FLASH_TYPE_MASK 0x00001000 +#define CST4322_FLASH_TYPE_SHIFT 12 +#define CST4322_FLASH_TYPE_SHIFT_ST 0 +#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 +#define CST4322_ARM_TAP_SEL 0x00002000 +#define CST4322_RES_INIT_MODE_MASK 0x0000c000 +#define CST4322_RES_INIT_MODE_SHIFT 14 +#define CST4322_RES_INIT_MODE_ILPAVAIL 0 +#define CST4322_RES_INIT_MODE_ILPREQ 1 +#define CST4322_RES_INIT_MODE_ALPAVAIL 2 +#define CST4322_RES_INIT_MODE_HTAVAIL 3 +#define CST4322_PCIPLLCLK_GATING 0x00010000 +#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 +#define CST4322_PCI_CARDBUS_MODE 0x00040000 + + +#define CCTRL43224_GPIO_TOGGLE 0x8000 +#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 +#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 + + +#define RES43236_REGULATOR 0 +#define RES43236_ILP_REQUEST 1 +#define RES43236_XTAL_PU 2 +#define RES43236_ALP_AVAIL 3 +#define RES43236_SI_PLL_ON 4 +#define RES43236_HT_SI_AVAIL 5 + + +#define CCTRL43236_BT_COEXIST (1<<0) +#define CCTRL43236_SECI (1<<1) +#define CCTRL43236_EXT_LNA (1<<2) +#define CCTRL43236_ANT_MUX_2o3 (1<<3) +#define CCTRL43236_GSIO (1<<4) + + +#define CST43236_SFLASH_MASK 0x00000040 +#define CST43236_OTP_SEL_MASK 0x00000080 +#define CST43236_OTP_SEL_SHIFT 7 +#define CST43236_HSIC_MASK 0x00000100 +#define CST43236_BP_CLK 0x00000200 +#define CST43236_BOOT_MASK 0x00001800 +#define CST43236_BOOT_SHIFT 11 +#define CST43236_BOOT_FROM_SRAM 0 +#define CST43236_BOOT_FROM_ROM 1 +#define CST43236_BOOT_FROM_FLASH 2 +#define CST43236_BOOT_FROM_INVALID 3 + + +#define RES43237_REGULATOR 0 +#define RES43237_ILP_REQUEST 1 +#define RES43237_XTAL_PU 2 +#define RES43237_ALP_AVAIL 3 +#define RES43237_SI_PLL_ON 4 +#define RES43237_HT_SI_AVAIL 5 + + +#define CCTRL43237_BT_COEXIST (1<<0) +#define CCTRL43237_SECI (1<<1) +#define CCTRL43237_EXT_LNA (1<<2) +#define CCTRL43237_ANT_MUX_2o3 (1<<3) +#define CCTRL43237_GSIO (1<<4) + + +#define CST43237_SFLASH_MASK 0x00000040 +#define CST43237_OTP_SEL_MASK 0x00000080 +#define CST43237_OTP_SEL_SHIFT 7 +#define CST43237_HSIC_MASK 0x00000100 +#define CST43237_BP_CLK 0x00000200 +#define CST43237_BOOT_MASK 0x00001800 +#define CST43237_BOOT_SHIFT 11 +#define CST43237_BOOT_FROM_SRAM 0 +#define CST43237_BOOT_FROM_ROM 1 +#define CST43237_BOOT_FROM_FLASH 2 +#define CST43237_BOOT_FROM_INVALID 3 + + +#define RES43239_CBUCK_LPOM 0 +#define RES43239_CBUCK_BURST 1 +#define RES43239_CBUCK_LP_PWM 2 +#define RES43239_CBUCK_PWM 3 +#define RES43239_CLDO_PU 4 +#define RES43239_DIS_INT_RESET_PD 5 +#define RES43239_ILP_REQUEST 6 +#define RES43239_LNLDO_PU 7 +#define RES43239_LDO3P3_PU 8 +#define RES43239_OTP_PU 9 +#define RES43239_XTAL_PU 10 +#define RES43239_ALP_AVAIL 11 +#define RES43239_RADIO_PU 12 +#define RES43239_MACPHY_CLKAVAIL 23 +#define RES43239_HT_AVAIL 24 +#define RES43239_XOLDO_PU 25 +#define RES43239_WL_XTAL_CTL_SEL 26 +#define RES43239_SR_CLK_STABLE 27 +#define RES43239_SR_SAVE_RESTORE 28 +#define RES43239_SR_PHY_PIC 29 +#define RES43239_SR_PHY_PWR_SW 30 + + +#define CST43239_SPROM_MASK 0x00000002 +#define CST43239_SFLASH_MASK 0x00000004 +#define CST43239_RES_INIT_MODE_SHIFT 7 +#define CST43239_RES_INIT_MODE_MASK 0x000001f0 +#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) +#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) +#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) +#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) + + +#define CCTRL43239_XTAL_STRENGTH(ctl) ((ctl & 0x3F) << 12) + + +#define RES4331_REGULATOR 0 +#define RES4331_ILP_REQUEST 1 +#define RES4331_XTAL_PU 2 +#define RES4331_ALP_AVAIL 3 +#define RES4331_SI_PLL_ON 4 +#define RES4331_HT_SI_AVAIL 5 + + +#define CCTRL4331_BT_COEXIST (1<<0) +#define CCTRL4331_SECI (1<<1) +#define CCTRL4331_EXT_LNA_G (1<<2) +#define CCTRL4331_SPROM_GPIO13_15 (1<<3) +#define CCTRL4331_EXTPA_EN (1<<4) +#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) +#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) +#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) +#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) +#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) +#define CCTRL4331_PCIE_AUXCLKEN (1<<10) +#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) +#define CCTRL4331_EXTPA_EN2 (1<<12) +#define CCTRL4331_EXT_LNA_A (1<<13) +#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) +#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) +#define CCTRL4331_EXTPA_ANA_EN (1<<24) + + +#define CST4331_XTAL_FREQ 0x00000001 +#define CST4331_SPROM_OTP_SEL_MASK 0x00000006 +#define CST4331_SPROM_OTP_SEL_SHIFT 1 +#define CST4331_SPROM_PRESENT 0x00000002 +#define CST4331_OTP_PRESENT 0x00000004 +#define CST4331_LDO_RF 0x00000008 +#define CST4331_LDO_PAR 0x00000010 + + +#define RES4315_CBUCK_LPOM 1 +#define RES4315_CBUCK_BURST 2 +#define RES4315_CBUCK_PWM 3 +#define RES4315_CLDO_PU 4 +#define RES4315_PALDO_PU 5 +#define RES4315_ILP_REQUEST 6 +#define RES4315_LNLDO1_PU 9 +#define RES4315_OTP_PU 10 +#define RES4315_LNLDO2_PU 12 +#define RES4315_XTAL_PU 13 +#define RES4315_ALP_AVAIL 14 +#define RES4315_RX_PWRSW_PU 15 +#define RES4315_TX_PWRSW_PU 16 +#define RES4315_RFPLL_PWRSW_PU 17 +#define RES4315_LOGEN_PWRSW_PU 18 +#define RES4315_AFE_PWRSW_PU 19 +#define RES4315_BBPLL_PWRSW_PU 20 +#define RES4315_HT_AVAIL 21 + + +#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4315_DEFCIS_SEL 0x00000000 +#define CST4315_SPROM_SEL 0x00000001 +#define CST4315_OTP_SEL 0x00000002 +#define CST4315_OTP_PWRDN 0x00000003 +#define CST4315_SDIO_MODE 0x00000004 +#define CST4315_RCAL_VALID 0x00000008 +#define CST4315_RCAL_VALUE_MASK 0x000001f0 +#define CST4315_RCAL_VALUE_SHIFT 4 +#define CST4315_PALDO_EXTPNP 0x00000200 +#define CST4315_CBUCK_MODE_MASK 0x00000c00 +#define CST4315_CBUCK_MODE_BURST 0x00000400 +#define CST4315_CBUCK_MODE_LPBURST 0x00000c00 + + +#define RES4319_CBUCK_LPOM 1 +#define RES4319_CBUCK_BURST 2 +#define RES4319_CBUCK_PWM 3 +#define RES4319_CLDO_PU 4 +#define RES4319_PALDO_PU 5 +#define RES4319_ILP_REQUEST 6 +#define RES4319_LNLDO1_PU 9 +#define RES4319_OTP_PU 10 +#define RES4319_LNLDO2_PU 12 +#define RES4319_XTAL_PU 13 +#define RES4319_ALP_AVAIL 14 +#define RES4319_RX_PWRSW_PU 15 +#define RES4319_TX_PWRSW_PU 16 +#define RES4319_RFPLL_PWRSW_PU 17 +#define RES4319_LOGEN_PWRSW_PU 18 +#define RES4319_AFE_PWRSW_PU 19 +#define RES4319_BBPLL_PWRSW_PU 20 +#define RES4319_HT_AVAIL 21 + + +#define CST4319_SPI_CPULESSUSB 0x00000001 +#define CST4319_SPI_CLK_POL 0x00000002 +#define CST4319_SPI_CLK_PH 0x00000008 +#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 +#define CST4319_SPROM_OTP_SEL_SHIFT 6 +#define CST4319_DEFCIS_SEL 0x00000000 +#define CST4319_SPROM_SEL 0x00000040 +#define CST4319_OTP_SEL 0x00000080 +#define CST4319_OTP_PWRDN 0x000000c0 +#define CST4319_SDIO_USB_MODE 0x00000100 +#define CST4319_REMAP_SEL_MASK 0x00000600 +#define CST4319_ILPDIV_EN 0x00000800 +#define CST4319_XTAL_PD_POL 0x00001000 +#define CST4319_LPO_SEL 0x00002000 +#define CST4319_RES_INIT_MODE 0x0000c000 +#define CST4319_PALDO_EXTPNP 0x00010000 +#define CST4319_CBUCK_MODE_MASK 0x00060000 +#define CST4319_CBUCK_MODE_BURST 0x00020000 +#define CST4319_CBUCK_MODE_LPBURST 0x00060000 +#define CST4319_RCAL_VALID 0x01000000 +#define CST4319_RCAL_VALUE_MASK 0x3e000000 +#define CST4319_RCAL_VALUE_SHIFT 25 + +#define PMU1_PLL0_CHIPCTL0 0 +#define PMU1_PLL0_CHIPCTL1 1 +#define PMU1_PLL0_CHIPCTL2 2 +#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000 +#define CCTL_4319USB_XTAL_SEL_SHIFT 19 +#define CCTL_4319USB_48MHZ_PLL_SEL 1 +#define CCTL_4319USB_24MHZ_PLL_SEL 2 + + +#define RES4336_CBUCK_LPOM 0 +#define RES4336_CBUCK_BURST 1 +#define RES4336_CBUCK_LP_PWM 2 +#define RES4336_CBUCK_PWM 3 +#define RES4336_CLDO_PU 4 +#define RES4336_DIS_INT_RESET_PD 5 +#define RES4336_ILP_REQUEST 6 +#define RES4336_LNLDO_PU 7 +#define RES4336_LDO3P3_PU 8 +#define RES4336_OTP_PU 9 +#define RES4336_XTAL_PU 10 +#define RES4336_ALP_AVAIL 11 +#define RES4336_RADIO_PU 12 +#define RES4336_BG_PU 13 +#define RES4336_VREG1p4_PU_PU 14 +#define RES4336_AFE_PWRSW_PU 15 +#define RES4336_RX_PWRSW_PU 16 +#define RES4336_TX_PWRSW_PU 17 +#define RES4336_BB_PWRSW_PU 18 +#define RES4336_SYNTH_PWRSW_PU 19 +#define RES4336_MISC_PWRSW_PU 20 +#define RES4336_LOGEN_PWRSW_PU 21 +#define RES4336_BBPLL_PWRSW_PU 22 +#define RES4336_MACPHY_CLKAVAIL 23 +#define RES4336_HT_AVAIL 24 +#define RES4336_RSVD 25 + + +#define CST4336_SPI_MODE_MASK 0x00000001 +#define CST4336_SPROM_PRESENT 0x00000002 +#define CST4336_OTP_PRESENT 0x00000004 +#define CST4336_ARMREMAP_0 0x00000008 +#define CST4336_ILPDIV_EN_MASK 0x00000010 +#define CST4336_ILPDIV_EN_SHIFT 4 +#define CST4336_XTAL_PD_POL_MASK 0x00000020 +#define CST4336_XTAL_PD_POL_SHIFT 5 +#define CST4336_LPO_SEL_MASK 0x00000040 +#define CST4336_LPO_SEL_SHIFT 6 +#define CST4336_RES_INIT_MODE_MASK 0x00000180 +#define CST4336_RES_INIT_MODE_SHIFT 7 +#define CST4336_CBUCK_MODE_MASK 0x00000600 +#define CST4336_CBUCK_MODE_SHIFT 9 + + +#define PCTL_4336_SERIAL_ENAB (1 << 24) + + +#define RES4330_CBUCK_LPOM 0 +#define RES4330_CBUCK_BURST 1 +#define RES4330_CBUCK_LP_PWM 2 +#define RES4330_CBUCK_PWM 3 +#define RES4330_CLDO_PU 4 +#define RES4330_DIS_INT_RESET_PD 5 +#define RES4330_ILP_REQUEST 6 +#define RES4330_LNLDO_PU 7 +#define RES4330_LDO3P3_PU 8 +#define RES4330_OTP_PU 9 +#define RES4330_XTAL_PU 10 +#define RES4330_ALP_AVAIL 11 +#define RES4330_RADIO_PU 12 +#define RES4330_BG_PU 13 +#define RES4330_VREG1p4_PU_PU 14 +#define RES4330_AFE_PWRSW_PU 15 +#define RES4330_RX_PWRSW_PU 16 +#define RES4330_TX_PWRSW_PU 17 +#define RES4330_BB_PWRSW_PU 18 +#define RES4330_SYNTH_PWRSW_PU 19 +#define RES4330_MISC_PWRSW_PU 20 +#define RES4330_LOGEN_PWRSW_PU 21 +#define RES4330_BBPLL_PWRSW_PU 22 +#define RES4330_MACPHY_CLKAVAIL 23 +#define RES4330_HT_AVAIL 24 +#define RES4330_5gRX_PWRSW_PU 25 +#define RES4330_5gTX_PWRSW_PU 26 +#define RES4330_5g_LOGEN_PWRSW_PU 27 + + +#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) +#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) +#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) +#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) +#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) +#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) +#define CST4330_OTP_PRESENT 0x00000010 +#define CST4330_LPO_AUTODET_EN 0x00000020 +#define CST4330_ARMREMAP_0 0x00000040 +#define CST4330_SPROM_PRESENT 0x00000080 +#define CST4330_ILPDIV_EN 0x00000100 +#define CST4330_LPO_SEL 0x00000200 +#define CST4330_RES_INIT_MODE_SHIFT 10 +#define CST4330_RES_INIT_MODE_MASK 0x00000c00 +#define CST4330_CBUCK_MODE_SHIFT 12 +#define CST4330_CBUCK_MODE_MASK 0x00003000 +#define CST4330_CBUCK_POWER_OK 0x00004000 +#define CST4330_BB_PLL_LOCKED 0x00008000 +#define SOCDEVRAM_4330_BP_ADDR 0x1E000000 +#define SOCDEVRAM_4330_ARM_ADDR 0x00800000 + + +#define PCTL_4330_SERIAL_ENAB (1 << 24) + + +#define CCTRL_4330_GPIO_SEL 0x00000001 +#define CCTRL_4330_ERCX_SEL 0x00000002 +#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 +#define CCTRL_4330_JTAG_DISABLE 0x00000008 + + +#define CCTRL_43239_GPIO_SEL 0x00000002 +#define CCTRL_43239_SDIO_HOST_WAKE 0x00000004 + +#define RES4313_BB_PU_RSRC 0 +#define RES4313_ILP_REQ_RSRC 1 +#define RES4313_XTAL_PU_RSRC 2 +#define RES4313_ALP_AVAIL_RSRC 3 +#define RES4313_RADIO_PU_RSRC 4 +#define RES4313_BG_PU_RSRC 5 +#define RES4313_VREG1P4_PU_RSRC 6 +#define RES4313_AFE_PWRSW_RSRC 7 +#define RES4313_RX_PWRSW_RSRC 8 +#define RES4313_TX_PWRSW_RSRC 9 +#define RES4313_BB_PWRSW_RSRC 10 +#define RES4313_SYNTH_PWRSW_RSRC 11 +#define RES4313_MISC_PWRSW_RSRC 12 +#define RES4313_BB_PLL_PWRSW_RSRC 13 +#define RES4313_HT_AVAIL_RSRC 14 +#define RES4313_MACPHY_CLK_AVAIL_RSRC 15 + + +#define CST4313_SPROM_PRESENT 1 +#define CST4313_OTP_PRESENT 2 +#define CST4313_SPROM_OTP_SEL_MASK 0x00000002 +#define CST4313_SPROM_OTP_SEL_SHIFT 0 + + +#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 + + +#define RES43228_NOT_USED 0 +#define RES43228_ILP_REQUEST 1 +#define RES43228_XTAL_PU 2 +#define RES43228_ALP_AVAIL 3 +#define RES43228_PLL_EN 4 +#define RES43228_HT_PHY_AVAIL 5 + + +#define CST43228_ILP_DIV_EN 0x1 +#define CST43228_OTP_PRESENT 0x2 +#define CST43228_SERDES_REFCLK_PADSEL 0x4 +#define CST43228_SDIO_MODE 0x8 +#define CST43228_SDIO_OTP_PRESENT 0x10 +#define CST43228_SDIO_RESET 0x20 + + +#define PMU_MAX_TRANSITION_DLY 15000 + + +#define PMURES_UP_TRANSITION 2 + + + +#define SECI_MODE_UART 0x0 +#define SECI_MODE_SECI 0x1 +#define SECI_MODE_LEGACY_3WIRE_BT 0x2 +#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 +#define SECI_MODE_HALF_SECI 0x4 + +#define SECI_RESET (1 << 0) +#define SECI_RESET_BAR_UART (1 << 1) +#define SECI_ENAB_SECI_ECI (1 << 2) +#define SECI_ENAB_SECIOUT_DIS (1 << 3) +#define SECI_MODE_MASK 0x7 +#define SECI_MODE_SHIFT 4 +#define SECI_UPD_SECI (1 << 7) + +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + + +#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) +#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) + +#define SECI_UART_MSR_CTS_STATE (1 << 0) +#define SECI_UART_MSR_RTS_STATE (1 << 1) +#define SECI_UART_SECI_IN_STATE (1 << 2) +#define SECI_UART_SECI_IN2_STATE (1 << 3) + + +#define SECI_UART_LCR_STOP_BITS (1 << 0) +#define SECI_UART_LCR_PARITY_EN (1 << 1) +#define SECI_UART_LCR_PARITY (1 << 2) +#define SECI_UART_LCR_RX_EN (1 << 3) +#define SECI_UART_LCR_LBRK_CTRL (1 << 4) +#define SECI_UART_LCR_TXO_EN (1 << 5) +#define SECI_UART_LCR_RTSO_EN (1 << 6) +#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) +#define SECI_UART_LCR_RXCRC_CHK (1 << 8) +#define SECI_UART_LCR_TXCRC_INV (1 << 9) +#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) +#define SECI_UART_LCR_TXCRC_EN (1 << 11) + +#define SECI_UART_MCR_TX_EN (1 << 0) +#define SECI_UART_MCR_PRTS (1 << 1) +#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) +#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) +#define SECI_UART_MCR_LOOPBK_EN (1 << 4) +#define SECI_UART_MCR_AUTO_RTS (1 << 5) +#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) +#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) +#define SECI_UART_MCR_XONOFF_RPT (1 << 9) + + + + +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + + +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h new file mode 100644 index 0000000000000..f45351a586cb1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h @@ -0,0 +1,276 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbconfig.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + + +#define SB_BUS_SIZE 0x10000 +#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) + + +#define SBCONFIGOFF 0xf00 +#define SBCONFIGSIZE 256 + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 +#define SBTMERRLOG 0x50 +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + + + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#ifndef _LANGUAGE_ASSEMBLY + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; + uint32 PAD[3]; + uint32 sbtpsflag; + uint32 PAD[11]; + uint32 sbtmerrloga; + uint32 PAD; + uint32 sbtmerrlog; + uint32 PAD[3]; + uint32 sbadmatch3; + uint32 PAD; + uint32 sbadmatch2; + uint32 PAD; + uint32 sbadmatch1; + uint32 PAD[7]; + uint32 sbimstate; + uint32 sbintvec; + uint32 sbtmstatelow; + uint32 sbtmstatehigh; + uint32 sbbwa0; + uint32 PAD; + uint32 sbimconfiglow; + uint32 sbimconfighigh; + uint32 sbadmatch0; + uint32 PAD; + uint32 sbtmconfiglow; + uint32 sbtmconfighigh; + uint32 sbbconfig; + uint32 PAD; + uint32 sbbstate; + uint32 PAD[3]; + uint32 sbactcnfg; + uint32 PAD[3]; + uint32 sbflagst; + uint32 PAD[3]; + uint32 sbidlow; + uint32 sbidhigh; +} sbconfig_t; + +#endif + + +#define SBIPS_INT1_MASK 0x3f +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 +#define SBIPS_INT4_SHIFT 24 + + +#define SBTPS_NUM0_MASK 0x3f +#define SBTPS_F0EN0 0x40 + + +#define SBTMEL_CM 0x00000007 +#define SBTMEL_CI 0x0000ff00 +#define SBTMEL_EC 0x0f000000 +#define SBTMEL_ME 0x80000000 + + +#define SBIM_PC 0xf +#define SBIM_AP_MASK 0x30 +#define SBIM_AP_BOTH 0x00 +#define SBIM_AP_TS 0x10 +#define SBIM_AP_TK 0x20 +#define SBIM_AP_RSV 0x30 +#define SBIM_IBE 0x20000 +#define SBIM_TO 0x40000 +#define SBIM_BY 0x01800000 +#define SBIM_RJ 0x02000000 + + +#define SBTML_RESET 0x0001 +#define SBTML_REJ_MASK 0x0006 +#define SBTML_REJ 0x0002 +#define SBTML_TMPREJ 0x0004 + +#define SBTML_SICF_SHIFT 16 + + +#define SBTMH_SERR 0x0001 +#define SBTMH_INT 0x0002 +#define SBTMH_BUSY 0x0004 +#define SBTMH_TO 0x0020 + +#define SBTMH_SISF_SHIFT 16 + + +#define SBBWA_TAB0_MASK 0xffff +#define SBBWA_TAB1_MASK 0xffff +#define SBBWA_TAB1_SHIFT 16 + + +#define SBIMCL_STO_MASK 0x7 +#define SBIMCL_RTO_MASK 0x70 +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 +#define SBIMCL_CID_SHIFT 16 + + +#define SBIMCH_IEM_MASK 0xc +#define SBIMCH_TEM_MASK 0x30 +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 +#define SBIMCH_BEM_SHIFT 6 + + +#define SBAM_TYPE_MASK 0x3 +#define SBAM_AD64 0x4 +#define SBAM_ADINT0_MASK 0xf8 +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 +#define SBAM_ADNEG 0x800 +#define SBAM_BASE0_MASK 0xffffff00 +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 +#define SBAM_BASE2_SHIFT 16 + + +#define SBTMCL_CD_MASK 0xff +#define SBTMCL_CO_MASK 0xf800 +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 +#define SBTMCL_IM_SHIFT 24 + + +#define SBTMCH_BM_MASK 0x3 +#define SBTMCH_RM_MASK 0x3 +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 +#define SBTMCH_IM_SHIFT 10 + + +#define SBBC_LAT_MASK 0x3 +#define SBBC_MAX0_MASK 0xf0000 +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 +#define SBBC_MAX1_SHIFT 20 + + +#define SBBS_SRD 0x1 +#define SBBS_HRD 0x2 + + +#define SBIDL_CS_MASK 0x3 +#define SBIDL_AR_MASK 0x38 +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 +#define SBIDL_INIT 0x80 +#define SBIDL_MINLAT_MASK 0xf00 +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 +#define SBIDL_CW_MASK 0xc0000 +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 +#define SBIDL_RV_2_3 0x10000000 + + +#define SBIDH_RC_MASK 0x000f +#define SBIDH_RCE_MASK 0x7000 +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 + + +#define SB_VEND_BCM 0x4243 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h new file mode 100644 index 0000000000000..77c413f75f0d4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h @@ -0,0 +1,327 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbhnddma.h 278779 2011-08-19 22:07:18Z $ + */ + + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + + + + + + + +typedef volatile struct { + uint32 control; + uint32 addr; + uint32 ptr; + uint32 status; +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; + dma32regs_t rcv; +} dma32regp_t; + +typedef volatile struct { + uint32 fifoaddr; + uint32 fifodatalow; + uint32 fifodatahigh; + uint32 pad; +} dma32diag_t; + + +typedef volatile struct { + uint32 ctrl; + uint32 addr; +} dma32dd_t; + + +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + + +#define XC_XE ((uint32)1 << 0) +#define XC_SE ((uint32)1 << 1) +#define XC_LE ((uint32)1 << 2) +#define XC_FL ((uint32)1 << 4) +#define XC_PD ((uint32)1 << 11) +#define XC_AE ((uint32)3 << 16) +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 +#define XC_BL_SHIFT 18 + + +#define XP_LD_MASK 0xfff + + +#define XS_CD_MASK 0x0fff +#define XS_XS_MASK 0xf000 +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 +#define XS_XS_ACTIVE 0x1000 +#define XS_XS_IDLE 0x2000 +#define XS_XS_STOPPED 0x3000 +#define XS_XS_SUSP 0x4000 +#define XS_XE_MASK 0xf0000 +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 +#define XS_XE_DPE 0x10000 +#define XS_XE_DFU 0x20000 +#define XS_XE_BEBR 0x30000 +#define XS_XE_BEDA 0x40000 +#define XS_AD_MASK 0xfff00000 +#define XS_AD_SHIFT 20 + + +#define RC_RE ((uint32)1 << 0) +#define RC_RO_MASK 0xfe +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) +#define RC_SH ((uint32)1 << 9) +#define RC_OC ((uint32)1 << 10) +#define RC_PD ((uint32)1 << 11) +#define RC_AE ((uint32)3 << 16) +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 +#define RC_BL_SHIFT 18 + + +#define RP_LD_MASK 0xfff + + +#define RS_CD_MASK 0x0fff +#define RS_RS_MASK 0xf000 +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 +#define RS_RS_ACTIVE 0x1000 +#define RS_RS_IDLE 0x2000 +#define RS_RS_STOPPED 0x3000 +#define RS_RE_MASK 0xf0000 +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 +#define RS_RE_DPE 0x10000 +#define RS_RE_DFO 0x20000 +#define RS_RE_BEBW 0x30000 +#define RS_RE_BEDA 0x40000 +#define RS_AD_MASK 0xfff00000 +#define RS_AD_SHIFT 20 + + +#define FA_OFF_MASK 0xffff +#define FA_SEL_MASK 0xf0000 +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 +#define FA_SEL_XDP 0x10000 +#define FA_SEL_RDD 0x40000 +#define FA_SEL_RDP 0x50000 +#define FA_SEL_XFD 0x80000 +#define FA_SEL_XFP 0x90000 +#define FA_SEL_RFD 0xc0000 +#define FA_SEL_RFP 0xd0000 +#define FA_SEL_RSD 0xe0000 +#define FA_SEL_RSP 0xf0000 + + +#define CTRL_BC_MASK 0x00001fff +#define CTRL_AE ((uint32)3 << 16) +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) +#define CTRL_EOT ((uint32)1 << 28) +#define CTRL_IOC ((uint32)1 << 29) +#define CTRL_EOF ((uint32)1 << 30) +#define CTRL_SOF ((uint32)1 << 31) + + +#define CTRL_CORE_MASK 0x0ff00000 + + + + +typedef volatile struct { + uint32 control; + uint32 ptr; + uint32 addrlow; + uint32 addrhigh; + uint32 status0; + uint32 status1; +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; + dma64regs_t rx; +} dma64regp_t; + +typedef volatile struct { + uint32 fifoaddr; + uint32 fifodatalow; + uint32 fifodatahigh; + uint32 pad; +} dma64diag_t; + + +typedef volatile struct { + uint32 ctrl1; + uint32 ctrl2; + uint32 addrlow; + uint32 addrhigh; +} dma64dd_t; + + +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGALIGN (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + + +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + + +#define D64_XC_XE 0x00000001 +#define D64_XC_SE 0x00000002 +#define D64_XC_LE 0x00000004 +#define D64_XC_FL 0x00000010 +#define D64_XC_PD 0x00000800 +#define D64_XC_AE 0x00030000 +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 +#define D64_XC_BL_SHIFT 18 + + +#define D64_XP_LD_MASK 0x00001fff + + +#define D64_XS0_CD_MASK 0x00001fff +#define D64_XS0_XS_MASK 0xf0000000 +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 +#define D64_XS0_XS_ACTIVE 0x10000000 +#define D64_XS0_XS_IDLE 0x20000000 +#define D64_XS0_XS_STOPPED 0x30000000 +#define D64_XS0_XS_SUSP 0x40000000 + +#define D64_XS1_AD_MASK 0x00001fff +#define D64_XS1_XE_MASK 0xf0000000 +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 +#define D64_XS1_XE_DPE 0x10000000 +#define D64_XS1_XE_DFU 0x20000000 +#define D64_XS1_XE_DTE 0x30000000 +#define D64_XS1_XE_DESRE 0x40000000 +#define D64_XS1_XE_COREE 0x50000000 + + +#define D64_RC_RE 0x00000001 +#define D64_RC_RO_MASK 0x000000fe +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 +#define D64_RC_SH 0x00000200 +#define D64_RC_OC 0x00000400 +#define D64_RC_PD 0x00000800 +#define D64_RC_AE 0x00030000 +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 +#define D64_RC_BL_SHIFT 18 + + +#define DMA_CTRL_PEN (1 << 0) +#define DMA_CTRL_ROC (1 << 1) +#define DMA_CTRL_RXMULTI (1 << 2) +#define DMA_CTRL_UNFRAMED (1 << 3) +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) + + +#define D64_RP_LD_MASK 0x00001fff + + +#define D64_RS0_CD_MASK 0x00001fff +#define D64_RS0_RS_MASK 0xf0000000 +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 +#define D64_RS0_RS_ACTIVE 0x10000000 +#define D64_RS0_RS_IDLE 0x20000000 +#define D64_RS0_RS_STOPPED 0x30000000 +#define D64_RS0_RS_SUSP 0x40000000 + +#define D64_RS1_AD_MASK 0x0001ffff +#define D64_RS1_RE_MASK 0xf0000000 +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 +#define D64_RS1_RE_DPO 0x10000000 +#define D64_RS1_RE_DFU 0x20000000 +#define D64_RS1_RE_DTE 0x30000000 +#define D64_RS1_RE_DESRE 0x40000000 +#define D64_RS1_RE_COREE 0x50000000 + + +#define D64_FA_OFF_MASK 0xffff +#define D64_FA_SEL_MASK 0xf0000 +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 +#define D64_FA_SEL_XDP 0x10000 +#define D64_FA_SEL_RDD 0x40000 +#define D64_FA_SEL_RDP 0x50000 +#define D64_FA_SEL_XFD 0x80000 +#define D64_FA_SEL_XFP 0x90000 +#define D64_FA_SEL_RFD 0xc0000 +#define D64_FA_SEL_RFP 0xd0000 +#define D64_FA_SEL_RSD 0xe0000 +#define D64_FA_SEL_RSP 0xf0000 + + +#define D64_CTRL_COREFLAGS 0x0ff00000 +#define D64_CTRL1_EOT ((uint32)1 << 28) +#define D64_CTRL1_IOC ((uint32)1 << 29) +#define D64_CTRL1_EOF ((uint32)1 << 30) +#define D64_CTRL1_SOF ((uint32)1 << 31) + + +#define D64_CTRL2_BC_MASK 0x00007fff +#define D64_CTRL2_AE 0x00030000 +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 + + +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff +#define D64_RX_FRM_STS_OVFL 0x00800000 +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 + + +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h new file mode 100644 index 0000000000000..d84f69ab5617a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h @@ -0,0 +1,109 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbpcmcia.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + + + + +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + + + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + + +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + + +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) +#define SROM_INFO (0x07be / 2) + + +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + + +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + + + +#define SBTML_INT_ACK 0x40000 +#define SBTML_INT_EN 0x20000 + + +#define SBTMH_INT_STATUS 0x40000 + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h new file mode 100644 index 0000000000000..7aaeb73f01003 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h @@ -0,0 +1,166 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsdio.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_CHIP_CTRL */ +#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu, + * 1: power on oscillator + * (for 4318 only) + */ +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */ +#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */ +#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */ +#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */ + + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h new file mode 100644 index 0000000000000..e5176483e9a1d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h @@ -0,0 +1,293 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific + * device core support + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsdpcmdev.h 282638 2011-09-08 21:18:10Z $ + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */ + uint32 PAD[2]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[3]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[7]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ +#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */ +#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#endif /* _sbsdpcmdev_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h new file mode 100644 index 0000000000000..45c4dc208bda3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h @@ -0,0 +1,186 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbsocram.h 277737 2011-08-16 17:54:59Z $ + */ + + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + + +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + + +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[15]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sbsocramregs_t; + +#endif + + +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + + +#define SRCI_PT_MASK 0x00070000 +#define SRCI_PT_SHIFT 16 + +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + + +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + + +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + + +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + + +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + + +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + + +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + + +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + + +#define SOCRAM_BANKINFO_SZMASK 0x3f +#define SOCRAM_BANKIDX_ROM_MASK 0x100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 + +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_R0M 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_REG 0x40 +#define SOCRAM_BANKIDX_REG 0x10 +#define SOCRAM_BANKINFO_STDBY_MASK 0x400 +#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 + + +#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 + + +#define SOCRAM_DEVRAMBANK_MASK 0xF000 +#define SOCRAM_DEVRAMBANK_SHIFT 12 + + +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SOCRAM_BANKSIZE_SHIFT 13 + + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h new file mode 100644 index 0000000000000..c8ac7b773fb9a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdio.h @@ -0,0 +1,612 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdio.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _SDIO_H +#define _SDIO_H + + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 +#define SDIOD_CCCR_UHSI_SUPPORT 0x14 +#define SDIOD_CCCR_DRIVER_STRENGTH 0x15 +#define SDIOD_CCCR_INTR_EXTN 0x16 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ + +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ + +/* for setting bus speed in card: 0x13h */ +#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSISEL_S 1 + +/* for getting bus speed cap in card: 0x14h */ +#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSICAP_S 0 + +/* for getting driver type CAP in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3) +#define SDIO_BUS_DRVR_TYPE_CAP_S 0 + +/* for setting driver type selection in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2) +#define SDIO_BUS_DRVR_TYPE_SEL_S 4 + +/* for getting async int support in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_CAP_S 0 + +/* for setting async int selection in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_SEL_S 1 + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_3 3 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + + + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_11 11 +#define SDIOH_CMD_14 14 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_19 19 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD5_S18R_M BITFIELD_MASK(1) +#define CMD5_S18R_S 24 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD14_RCA_M BITFIELD_MASK(16) +#define CMD14_RCA_S 16 +#define CMD14_SLEEP_M BITFIELD_MASK(1) +#define CMD14_SLEEP_S 15 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 + +#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_S18A_S 24 + +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +/* command issue options */ +#define CMD_OPTION_DEFAULT 0 +#define CMD_OPTION_TUNING 1 + +#endif /* _SDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h new file mode 100644 index 0000000000000..1d820d1569e73 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdioh.h @@ -0,0 +1,446 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdioh.h 345478 2012-07-18 06:45:15Z $ + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C +#define SD_TimeoutCntrl 0x02E +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities3 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_ErrStatus 0x054 +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE +#define SD_GPIO_Reg 0x100 +#define SD_GPIO_OE 0x104 +#define SD_GPIO_Enable 0x108 + + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* HC 3.0 specific registers and offsets */ +#define SD3_HostCntrl2 0x03E +/* preset regsstart and count */ +#define SD3_PresetValStart 0x060 +#define SD3_PresetValCount 8 +/* preset-indiv regs */ +#define SD3_PresetVal_init 0x060 +#define SD3_PresetVal_default 0x062 +#define SD3_PresetVal_HS 0x064 +#define SD3_PresetVal_SDR12 0x066 +#define SD3_PresetVal_SDR25 0x068 +#define SD3_PresetVal_SDR50 0x06a +#define SD3_PresetVal_SDR104 0x06c +#define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + + +/* preset value indices */ +#define SD3_PRESETVAL_INITIAL_IX 0 +#define SD3_PRESETVAL_DESPEED_IX 1 +#define SD3_PRESETVAL_HISPEED_IX 2 +#define SD3_PRESETVAL_SDR12_IX 3 +#define SD3_PRESETVAL_SDR25_IX 4 +#define SD3_PRESETVAL_SDR50_IX 5 +#define SD3_PRESETVAL_SDR104_IX 6 +#define SD3_PRESETVAL_DDR50_IX 7 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2 + bits are reserved. going ahead with 8 bits, as it is req for 3.0 +*/ +#define CAP_BASECLK_M BITFIELD_MASK(8) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +#define SDIO_OCR_READ_FAIL (2) + + +#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1) +#define CAP_ASYNCINT_SUP_S 29 + +#define CAP_SLOTTYPE_M BITFIELD_MASK(2) +#define CAP_SLOTTYPE_S 30 + +#define CAP3_MSBits_OFFSET (32) +/* note: following are caps MSB32 bits. + So the bits start from 0, instead of 32. that is why + CAP3_MSBits_OFFSET is subtracted. +*/ +#define CAP3_SDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_SDR104_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET) + +#define CAP3_DDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET) + +/* for knowing the clk caps in a single read */ +#define CAP3_30CLKCAP_M BITFIELD_MASK(3) +#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_M BITFIELD_MASK(4) +#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET) + +#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1) +#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2) +#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET) + +#define CAP3_CLK_MULT_M BITFIELD_MASK(8) +#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET) + +#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2) +#define PRESET_DRIVR_SELECT_S 14 + +#define PRESET_CLK_DIV_M BITFIELD_MASK(10) +#define PRESET_CLK_DIV_S 0 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* Host Control2: */ +#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */ + +#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */ + +#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */ + +#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */ + +#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */ +#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */ + +#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */ + +#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */ +#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */ + +#define HOST_CONTR_VER_2 (1) +#define HOST_CONTR_VER_3 (2) + +/* misc defines */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */ +#define INTSTAT_RETUNING_INT_S 12 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 +#define ERRINT_ADMA_M BITFIELD_MASK(1) +#define ERRINT_ADMA_S 9 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 +#define ERRINT_ADMA_BIT 0x0200 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +#endif /* _SDIOH_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h new file mode 100644 index 0000000000000..55a3d3490c306 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h @@ -0,0 +1,58 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sdiovar.h 277737 2011-08-16 17:54:59Z $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ + +#define NUM_PREV_TRANSACTIONS 16 + + +#include + +#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h new file mode 100644 index 0000000000000..4e7aeb71cb027 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/siutils.h @@ -0,0 +1,277 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils.h 335486 2012-05-28 09:47:55Z $ + */ + + +#ifndef _siutils_h_ +#define _siutils_h_ + + +struct si_pub { + uint socitype; + + uint bustype; + uint buscoretype; + uint buscorerev; + uint buscoreidx; + int ccrev; + uint32 cccaps; + uint32 cccaps_ext; + int pmurev; + uint32 pmucaps; + uint boardtype; + uint boardvendor; + uint boardflags; + uint boardflags2; + uint chip; + uint chiprev; + uint chippkg; + uint32 chipst; + bool issim; + uint socirev; + bool pci_pr32414; + +}; + + +typedef const struct si_pub si_t; + + + +#define SI_OSH NULL + +#define BADIDX (SI_MAXCORES + 1) + + +#define XTAL 0x1 +#define PLL 0x2 + + +#define CLK_FAST 0 +#define CLK_DYNAMIC 2 + + +#define GPIO_DRV_PRIORITY 0 +#define GPIO_APP_PRIORITY 1 +#define GPIO_HI_PRIORITY 2 + + +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + + +#define GPIO_REGEVT 0 +#define GPIO_REGEVT_INTMSK 1 +#define GPIO_REGEVT_INTPOL 2 + + +#define SI_DEVPATH_BUFSZ 16 + + +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 +#define SI_PCIUP 3 + +#define ISSIM_ENAB(sih) 0 + + +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif + + +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif + +typedef void (*gpio_handler_t)(uint32 stat, void *arg); + + + +extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern bool si_pci_war16165(si_t *sih); + +extern uint si_corelist(si_t *sih, uint coreid[]); +extern uint si_coreid(si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(si_t *sih); +extern uint si_coreunit(si_t *sih); +extern uint si_corevendor(si_t *sih); +extern uint si_corerev(si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void *si_coreregs(si_t *sih); +extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern bool si_iscoreup(si_t *sih); +extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); +extern void *si_setcoreidx(si_t *sih, uint coreidx); +extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); +extern int si_numaddrspaces(si_t *sih); +extern uint32 si_addrspace(si_t *sih, uint asidx); +extern uint32 si_addrspacesize(si_t *sih, uint asidx); +extern int si_corebist(si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_disable(si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern bool si_read_pmu_autopll(si_t *sih); +extern uint32 si_clock(si_t *sih); +extern uint32 si_alp_clock(si_t *sih); +extern uint32 si_ilp_clock(si_t *sih); +extern void si_pci_setup(si_t *sih, uint coremask); +extern void si_pcmcia_init(si_t *sih); +extern void si_setint(si_t *sih, int siflag); +extern bool si_backplane64(si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(si_t *sih); +extern uint32 si_socram_size(si_t *sih); +extern uint32 si_socdevram_size(si_t *sih); +extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect); +extern bool si_socdevram_pkg(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); + + +extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg); +extern void si_gpio_handler_unregister(si_t *sih, void* gpioh); +extern void si_gpio_handler_process(si_t *sih); + + +extern bool si_pci_pmecap(si_t *sih); +struct osl_info; +extern bool si_pci_fastpmecap(struct osl_info *osh); +extern bool si_pci_pmestat(si_t *sih); +extern void si_pci_pmeclr(si_t *sih); +extern void si_pci_pmeen(si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); + +extern void si_sdio_init(si_t *sih); + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +#define si_eci(sih) 0 +#define si_eci_init(sih) (0) +#define si_eci_notify_bt(sih, type, val) (0) +#define si_seci(sih) 0 +static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;} +#define si_seci_down(sih) do { } while (0) + + +extern bool si_is_otp_disabled(si_t *sih); +extern bool si_is_otp_powered(si_t *sih); +extern void si_otp_power(si_t *sih, bool on); +extern void si_set_otp_wr_volts(si_t *sih); +extern void si_set_otp_rd_volts(si_t *sih); + + +extern bool si_is_sprom_available(si_t *sih); +extern bool si_is_sprom_enabled(si_t *sih); +extern void si_sprom_enable(si_t *sih, bool enable); + + +extern int si_cis_source(si_t *sih); +#define CIS_DEFAULT 0 +#define CIS_SROM 1 +#define CIS_OTP 2 + + +#define DEFAULT_FAB 0x0 +#define CSM_FAB7 0x1 +#define TSMC_FAB12 0x2 +#define SMIC_FAB4 0x3 +extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw); +extern uint16 si_fabid(si_t *sih); + + +extern int si_devpath(si_t *sih, char *path, int size); + +extern char *si_getdevpathvar(si_t *sih, const char *name); +extern int si_getdevpathintvar(si_t *sih, const char *name); + + +extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val); +extern void si_war42780_clkreq(si_t *sih, bool clkreq); +extern void si_pci_sleep(si_t *sih); +extern void si_pci_down(si_t *sih); +extern void si_pci_up(si_t *sih); +extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm); +extern void si_pcie_extendL1timer(si_t *sih, bool extend); +extern int si_pci_fixcfg(si_t *sih); +extern uint si_pll_reset(si_t *sih); + + + +extern bool si_taclear(si_t *sih, bool details); + + + +extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val); + +char *si_getnvramflvar(si_t *sih, const char *name); + + +#endif diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h new file mode 100644 index 0000000000000..b52fb15ba5c08 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h @@ -0,0 +1,53 @@ +/* + * TRX image file header format. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: trxhdr.h 286295 2011-09-27 06:39:43Z $ + */ + +#ifndef _TRX_HDR_H_ +#define _TRX_HDR_H_ + +#include + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_VERSION 1 /* Version 1 */ +#define TRX_MAX_LEN 0x3B0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_OVERLAYS 0x4 /* Contains an overlay header after the trx header */ +#define TRX_MAX_OFFSET 3 /* Max number of individual files */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ +#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ + +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +}; + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; + +#endif /* _TRX_HDR_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h new file mode 100644 index 0000000000000..d0902fe808915 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/typedefs.h @@ -0,0 +1,312 @@ +/* + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * $Id: typedefs.h 290055 2011-10-15 21:26:26Z $ + */ + + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#ifdef SITE_TYPEDEFS + + + +#include "site_typedefs.h" + +#else + + + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif +#ifndef TRUE +#define TRUE true +#endif + +#else + + +#endif + +#if defined(__x86_64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif + + + + + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif + + + + + +#if defined(__sparc__) +#define TYPEDEF_ULONG +#endif + + + +#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif +#ifdef __KERNEL__ +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif + +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) +#include +#ifdef noinline_for_stack +#define TYPEDEF_BOOL +#endif +#endif +#endif +#endif + + + + + +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif + + +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif + +#endif + +#if !defined(__DJGPP__) + + +#if defined(__KERNEL__) + + +#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) +#include +#endif + +#else + + +#include + +#endif + +#endif + + + + +#define USE_TYPEDEF_DEFAULTS + +#endif + + + + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef unsigned char bool; +#endif + + + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif + + + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif + + + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif + + + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else +typedef float64 float_t; +#endif + +#endif + + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef OFF +#define OFF 0 +#endif + +#ifndef ON +#define ON 1 +#endif + +#define AUTO (-1) + + + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif + + + +#if defined(__GNUC__) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) && __CC_ARM + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif + + +#ifndef INLINE + #if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline + #elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ + #elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline + #else + #define INLINE + #endif +#endif + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif + + +#define UNUSED_PARAMETER(x) (void)(x) + + +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) + + +#include +#endif diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h new file mode 100644 index 0000000000000..d37105165bab2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h @@ -0,0 +1,198 @@ +/* +* Copyright (C) 1999-2011, Broadcom Corporation +* +* Unless you and Broadcom execute a separate written software license +* agreement governing use of this software, this software is licensed to you +* under the terms of the GNU General Public License version 2 (the "GPL"), +* available at http://www.broadcom.com/licenses/GPLv2.php, with the +* following added to such license: +* +* As a special exception, the copyright holders of this software give you +* permission to link this software with independent modules, and to copy and +* distribute the resulting executable under terms of your choice, provided that +* you also meet, for each linked independent module, the terms and conditions of +* the license of that module. An independent module is a module which is not +* derived from this software. The special exception does not apply to any +* modifications of the software. +* +* Notwithstanding the above, under no circumstances may you combine this +* software in any way with any other Broadcom software provided under a license +* other than the GPL, without Broadcom's express prior written consent. +* $Id: wlfc_proto.h 277737 2011-08-16 17:54:59Z $ +* +*/ +#ifndef __wlfc_proto_definitions_h__ +#define __wlfc_proto_definitions_h__ + + /* Use TLV to convey WLFC information. + --------------------------------------------------------------------------- + | Type | Len | value | Description + --------------------------------------------------------------------------- + | 1 | 1 | (handle) | MAC OPEN + --------------------------------------------------------------------------- + | 2 | 1 | (handle) | MAC CLOSE + --------------------------------------------------------------------------- + | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn + --------------------------------------------------------------------------- + | 4 | 4 | see pkttag comments | TXSTATUS + --------------------------------------------------------------------------- + | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] + --------------------------------------------------------------------------- + | 6 | 8 | (handle, ifid, MAC) | MAC ADD + --------------------------------------------------------------------------- + | 7 | 8 | (handle, ifid, MAC) | MAC DEL + --------------------------------------------------------------------------- + | 8 | 1 | (rssi) | RSSI - RSSI value for the packet. + --------------------------------------------------------------------------- + | 9 | 1 | (interface ID) | Interface OPEN + --------------------------------------------------------------------------- + | 10 | 1 | (interface ID) | Interface CLOSE + --------------------------------------------------------------------------- + | 11 | 8 | fifo credit returns map | FIFO credits back to the host + | | | | + | | | | -------------------------------------- + | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim | + | | | | -------------------------------------- + | | | | + --------------------------------------------------------------------------- + | 12 | 2 | MAC handle, | Host provides a bitmap of pending + | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn. + | | | | [host->firmware] + --------------------------------------------------------------------------- + | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific + | | | | MAC destination. + --------------------------------------------------------------------------- + | 255 | N/A | N/A | FILLER - This is a special type + | | | | that has no length or value. + | | | | Typically used for padding. + --------------------------------------------------------------------------- + */ + +#define WLFC_CTL_TYPE_MAC_OPEN 1 +#define WLFC_CTL_TYPE_MAC_CLOSE 2 +#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3 +#define WLFC_CTL_TYPE_TXSTATUS 4 +#define WLFC_CTL_TYPE_PKTTAG 5 + +#define WLFC_CTL_TYPE_MACDESC_ADD 6 +#define WLFC_CTL_TYPE_MACDESC_DEL 7 +#define WLFC_CTL_TYPE_RSSI 8 + +#define WLFC_CTL_TYPE_INTERFACE_OPEN 9 +#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10 + +#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11 + +#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 +#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13 + +#define WLFC_CTL_TYPE_FILLER 255 + +#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */ + +#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */ +#define WLFC_CTL_VALUE_LEN_RSSI 1 + +#define WLFC_CTL_VALUE_LEN_INTERFACE 1 +#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2 + +#define WLFC_CTL_VALUE_LEN_TXSTATUS 4 +#define WLFC_CTL_VALUE_LEN_PKTTAG 4 + +/* enough space to host all 4 ACs, bc/mc and atim fifo credit */ +#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 + +#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */ +#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ + + + +#define WLFC_PKTID_GEN_MASK 0x80000000 +#define WLFC_PKTID_GEN_SHIFT 31 + +#define WLFC_PKTID_GEN(x) (((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT) +#define WLFC_PKTID_SETGEN(x, gen) (x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \ + (((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK) + +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 + +#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */ +#define WL_TXSTATUS_FLAGS_SHIFT 27 + +#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)) +#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \ + WL_TXSTATUS_FLAGS_MASK) + +#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */ +#define WL_TXSTATUS_FIFO_SHIFT 24 + +#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT)) +#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK) + +#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */ +#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \ + ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num)) +#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK) + +/* 32 STA should be enough??, 6 bits; Must be power of 2 */ +#define WLFC_MAC_DESC_TABLE_SIZE 32 +#define WLFC_MAX_IFNUM 16 +#define WLFC_MAC_DESC_ID_INVALID 0xff + +/* b[7:5] -reuse guard, b[4:0] -value */ +#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) + +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WL_TXSTATUS_GENERATION_MASK 1 +#define WL_TXSTATUS_GENERATION_SHIFT 31 + +#define WLFC_PKTFLAG_SET_GENERATION(x, gen) ((x) = \ + ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \ + (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT)) + +#define WLFC_PKTFLAG_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \ + WL_TXSTATUS_GENERATION_MASK) + +#define WLFC_MAX_PENDING_DATALEN 120 + +/* host is free to discard the packet */ +#define WLFC_CTL_PKTFLAG_DISCARD 0 +/* D11 suppressed a packet */ +#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1 +/* WL firmware suppressed a packet because MAC is + already in PSMode (short time window) +*/ +#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2 +/* Firmware tossed this packet */ +#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 + +#define WLFC_D11_STATUS_INTERPRET(txs) ((((txs)->status & TX_STATUS_SUPR_MASK) >> \ + TX_STATUS_SUPR_SHIFT)) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_DBGMESG(x) printf x +/* wlfc-breadcrumb */ +#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ + {printf("WLFC: %s():%d:caller:%p\n", \ + __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0) +#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \ + banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0) +#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s)) +#else +#define WLFC_DBGMESG(x) +#define WLFC_BREADCRUMB(x) +#define WLFC_PRINTMAC(banner, ea) +#define WLFC_WHEREIS(s) +#endif + +#endif /* __wlfc_proto_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h new file mode 100644 index 0000000000000..e543bfa6d1759 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h @@ -0,0 +1,2812 @@ +/* + * Custom OID/ioctl definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wlioctl.h 331292 2012-05-04 09:04:23Z $ + */ + + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include +#include +#include +#include +#include +#include + +#include + +#ifndef INTF_NAME_SIZ +#define INTF_NAME_SIZ 16 +#endif + + +typedef struct remote_ioctl { + cdc_ioctl_t msg; + uint data_len; + char intf_name[INTF_NAME_SIZ]; +} rem_ioctl_t; +#define REMOTE_SIZE sizeof(rem_ioctl_t) + +#define ACTION_FRAME_SIZE 1040 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + +typedef struct ssid_info +{ + uint8 ssid_len; + uint8 ssid[32]; +} ssid_info_t; + +typedef struct wl_af_params { + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + wl_action_frame_t action_frame; +} wl_af_params_t; + +#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) + + +#define BWL_DEFAULT_PACKING +#include + + + + + +#define LEGACY2_WL_BSS_INFO_VERSION 108 + + +typedef struct wl_bss_info_108 { + uint32 version; + uint32 length; + struct ether_addr BSSID; + uint16 beacon_period; + uint16 capability; + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; + uint8 rates[16]; + } rateset; + chanspec_t chanspec; + uint16 atim_window; + uint8 dtim_period; + int16 RSSI; + int8 phy_noise; + + uint8 n_cap; + uint32 nbss_cap; + uint8 ctl_ch; + uint32 reserved32[1]; + uint8 flags; + uint8 reserved[3]; + uint8 basic_mcs[MCSSET_LEN]; + + uint16 ie_offset; + uint32 ie_length; + + +} wl_bss_info_108_t; + +#define WL_BSS_INFO_VERSION 109 + + +typedef struct wl_bss_info { + uint32 version; + uint32 length; + struct ether_addr BSSID; + uint16 beacon_period; + uint16 capability; + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; + uint8 rates[16]; + } rateset; + chanspec_t chanspec; + uint16 atim_window; + uint8 dtim_period; + int16 RSSI; + int8 phy_noise; + + uint8 n_cap; + uint32 nbss_cap; + uint8 ctl_ch; + uint32 reserved32[1]; + uint8 flags; + uint8 reserved[3]; + uint8 basic_mcs[MCSSET_LEN]; + + uint16 ie_offset; + uint32 ie_length; + int16 SNR; + + +} wl_bss_info_t; + +typedef struct wl_bsscfg { + uint32 wsec; + uint32 WPA_auth; + uint32 wsec_index; + uint32 associated; + uint32 BSS; + uint32 phytest_on; + struct ether_addr prev_BSSID; + struct ether_addr BSSID; +} wl_bsscfg_t; + +typedef struct wl_bss_config { + uint32 atim_window; + uint32 beacon_period; + uint32 chanspec; +} wl_bss_config_t; + + +typedef struct wlc_ssid { + uint32 SSID_len; + uchar SSID[32]; +} wlc_ssid_t; + +#define WL_BSS_FLAGS_FROM_BEACON 0x01 + +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_INDEP 0 +#define WL_BSSTYPE_ANY 2 + + +#define WL_SCANFLAGS_PASSIVE 0x01 +#define WL_SCANFLAGS_RESERVED 0x02 +#define WL_SCANFLAGS_PROHIBITED 0x04 + +#define WL_SCAN_PARAMS_SSID_MAX 10 + +typedef struct wl_scan_params { + wlc_ssid_t ssid; + struct ether_addr bssid; + int8 bss_type; + uint8 scan_type; + int32 nprobes; + int32 active_time; + int32 passive_time; + int32 home_time; + int32 channel_num; + uint16 channel_list[1]; +} wl_scan_params_t; + + +#define WL_SCAN_PARAMS_FIXED_SIZE 64 + + +#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define WL_SCAN_PARAMS_NSSID_SHIFT 16 + +#define WL_SCAN_ACTION_START 1 +#define WL_SCAN_ACTION_CONTINUE 2 +#define WL_SCAN_ACTION_ABORT 3 + +#define ISCAN_REQ_VERSION 1 + + +typedef struct wl_iscan_params { + uint32 version; + uint16 action; + uint16 scan_duration; + wl_scan_params_t params; +} wl_iscan_params_t; + + +#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_scan_results { + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; +} wl_scan_results_t; + + +#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) + + +#define WL_SCAN_RESULTS_SUCCESS 0 +#define WL_SCAN_RESULTS_PARTIAL 1 +#define WL_SCAN_RESULTS_PENDING 2 +#define WL_SCAN_RESULTS_ABORTED 3 +#define WL_SCAN_RESULTS_NO_MEM 4 + + +#define DNGL_RXCTXT_SIZE 45 + +#if defined(SIMPLE_ISCAN) +#define ISCAN_RETRY_CNT 5 +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 +#define ISCAN_STATE_PENDING 2 + + +#define WLC_IW_ISCAN_MAXLEN 2048 +typedef struct iscan_buf { + struct iscan_buf * next; + char iscan_buf[WLC_IW_ISCAN_MAXLEN]; +} iscan_buf_t; +#endif + +#define ESCAN_REQ_VERSION 1 + +typedef struct wl_escan_params { + uint32 version; + uint16 action; + uint16 sync_id; + wl_scan_params_t params; +} wl_escan_params_t; + +#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_escan_result { + uint32 buflen; + uint32 version; + uint16 sync_id; + uint16 bss_count; + wl_bss_info_t bss_info[1]; +} wl_escan_result_t; + +#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) + + +typedef struct wl_iscan_results { + uint32 status; + wl_scan_results_t results; +} wl_iscan_results_t; + + +#define WL_ISCAN_RESULTS_FIXED_SIZE \ + (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) + +typedef struct wl_probe_params { + wlc_ssid_t ssid; + struct ether_addr bssid; + struct ether_addr mac; +} wl_probe_params_t; + +#define WL_NUMRATES 16 +typedef struct wl_rateset { + uint32 count; + uint8 rates[WL_NUMRATES]; +} wl_rateset_t; + +typedef struct wl_rateset_args { + uint32 count; + uint8 rates[WL_NUMRATES]; + uint8 mcs[MCSSET_LEN]; +} wl_rateset_args_t; + + +typedef struct wl_uint32_list { + + uint32 count; + + uint32 element[1]; +} wl_uint32_list_t; + + +typedef struct wl_assoc_params { + struct ether_addr bssid; + uint16 bssid_cnt; + int32 chanspec_num; + chanspec_t chanspec_list[1]; +} wl_assoc_params_t; +#define WL_ASSOC_PARAMS_FIXED_SIZE (sizeof(wl_assoc_params_t) - sizeof(chanspec_t)) + + +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + + +typedef wl_assoc_params_t wl_join_assoc_params_t; +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + + +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; +} wl_join_params_t; +#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t)) + + +typedef struct wl_join_scan_params { + uint8 scan_type; + int32 nprobes; + int32 active_time; + int32 passive_time; + int32 home_time; +} wl_join_scan_params_t; + + +typedef struct wl_extjoin_params { + wlc_ssid_t ssid; + wl_join_scan_params_t scan; + wl_join_assoc_params_t assoc; +} wl_extjoin_params_t; +#define WL_EXTJOIN_PARAMS_FIXED_SIZE (sizeof(wl_extjoin_params_t) - sizeof(chanspec_t)) + +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + + +#define NRATE_MCS_INUSE 0x00000080 +#define NRATE_RATE_MASK 0x0000007f +#define NRATE_STF_MASK 0x0000ff00 +#define NRATE_STF_SHIFT 8 +#define NRATE_OVERRIDE 0x80000000 +#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 +#define NRATE_SGI_MASK 0x00800000 +#define NRATE_SGI_SHIFT 23 +#define NRATE_LDPC_CODING 0x00400000 +#define NRATE_LDPC_SHIFT 22 +#define NRATE_BCMC_OVERRIDE 0x00200000 +#define NRATE_BCMC_SHIFT 21 + +#define NRATE_STF_SISO 0 +#define NRATE_STF_CDD 1 +#define NRATE_STF_STBC 2 +#define NRATE_STF_SDM 3 + +#define ANTENNA_NUM_1 1 +#define ANTENNA_NUM_2 2 +#define ANTENNA_NUM_3 3 +#define ANTENNA_NUM_4 4 + +#define ANT_SELCFG_AUTO 0x80 +#define ANT_SELCFG_MASK 0x33 +#define ANT_SELCFG_MAX 4 +#define ANT_SELCFG_TX_UNICAST 0 +#define ANT_SELCFG_RX_UNICAST 1 +#define ANT_SELCFG_TX_DEF 2 +#define ANT_SELCFG_RX_DEF 3 + +#define MAX_STREAMS_SUPPORTED 4 + +typedef struct { + uint8 ant_config[ANT_SELCFG_MAX]; + uint8 num_antcfg; +} wlc_antselcfg_t; + +#define HIGHEST_SINGLE_STREAM_MCS 7 + +#define MAX_CCA_CHANNELS 38 +#define MAX_CCA_SECS 60 + +#define IBSS_MED 15 +#define IBSS_HI 25 +#define OBSS_MED 12 +#define OBSS_HI 25 +#define INTERFER_MED 5 +#define INTERFER_HI 10 + +#define CCA_FLAG_2G_ONLY 0x01 +#define CCA_FLAG_5G_ONLY 0x02 +#define CCA_FLAG_IGNORE_DURATION 0x04 +#define CCA_FLAGS_PREFER_1_6_11 0x10 +#define CCA_FLAG_IGNORE_INTERFER 0x20 + +#define CCA_ERRNO_BAND 1 +#define CCA_ERRNO_DURATION 2 +#define CCA_ERRNO_PREF_CHAN 3 +#define CCA_ERRNO_INTERFER 4 +#define CCA_ERRNO_TOO_FEW 5 + +typedef struct { + uint32 duration; + uint32 congest_ibss; + + uint32 congest_obss; + uint32 interference; + uint32 timestamp; +} cca_congest_t; + +typedef struct { + chanspec_t chanspec; + uint8 num_secs; + cca_congest_t secs[1]; +} cca_congest_channel_req_t; + +#define WLC_CNTRY_BUF_SZ 4 + +typedef struct wl_country { + char country_abbrev[WLC_CNTRY_BUF_SZ]; + int32 rev; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_country_t; + +typedef struct wl_channels_in_country { + uint32 buflen; + uint32 band; + char country_abbrev[WLC_CNTRY_BUF_SZ]; + uint32 count; + uint32 channel[1]; +} wl_channels_in_country_t; + +typedef struct wl_country_list { + uint32 buflen; + uint32 band_set; + uint32 band; + uint32 count; + char country_abbrev[1]; +} wl_country_list_t; + +#define WL_NUM_RPI_BINS 8 +#define WL_RM_TYPE_BASIC 1 +#define WL_RM_TYPE_CCA 2 +#define WL_RM_TYPE_RPI 3 + +#define WL_RM_FLAG_PARALLEL (1<<0) + +#define WL_RM_FLAG_LATE (1<<1) +#define WL_RM_FLAG_INCAPABLE (1<<2) +#define WL_RM_FLAG_REFUSED (1<<3) + +typedef struct wl_rm_req_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; + uint32 tsf_h; + uint32 tsf_l; + uint32 dur; +} wl_rm_req_elt_t; + +typedef struct wl_rm_req { + uint32 token; + uint32 count; + void *cb; + void *cb_arg; + wl_rm_req_elt_t req[1]; +} wl_rm_req_t; +#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) + +typedef struct wl_rm_rep_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; + uint32 tsf_h; + uint32 tsf_l; + uint32 dur; + uint32 len; + uint8 data[1]; +} wl_rm_rep_elt_t; +#define WL_RM_REP_ELT_FIXED_LEN 24 + +#define WL_RPI_REP_BIN_NUM 8 +typedef struct wl_rm_rpi_rep { + uint8 rpi[WL_RPI_REP_BIN_NUM]; + int8 rpi_max[WL_RPI_REP_BIN_NUM]; +} wl_rm_rpi_rep_t; + +typedef struct wl_rm_rep { + uint32 token; + uint32 len; + wl_rm_rep_elt_t rep[1]; +} wl_rm_rep_t; +#define WL_RM_REP_FIXED_LEN 8 + + +typedef enum sup_auth_status { + + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + + + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + WLC_SUP_KEYXCHANGE_PREP_M4, + WLC_SUP_KEYXCHANGE_WAIT_G1, + WLC_SUP_KEYXCHANGE_PREP_G2 +} sup_auth_status_t; + + +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_OCB_MSDU 5 +#define CRYPTO_ALGO_AES_OCB_MPDU 6 +#define CRYPTO_ALGO_NALG 7 +#define CRYPTO_ALGO_PMK 12 + +#define WSEC_GEN_MIC_ERROR 0x0001 +#define WSEC_GEN_REPLAY 0x0002 +#define WSEC_GEN_ICV_ERROR 0x0004 + +#define WL_SOFT_KEY (1 << 0) +#define WL_PRIMARY_KEY (1 << 1) +#define WL_KF_RES_4 (1 << 4) +#define WL_KF_RES_5 (1 << 5) +#define WL_IBSS_PEER_GROUP_KEY (1 << 6) + +typedef struct wl_wsec_key { + uint32 index; + uint32 len; + uint8 data[DOT11_MAX_KEY_SIZE]; + uint32 pad_1[18]; + uint32 algo; + uint32 flags; + uint32 pad_2[2]; + int pad_3; + int iv_initialized; + int pad_4; + + struct { + uint32 hi; + uint16 lo; + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; +} wl_wsec_key_t; + +#define WSEC_MIN_PSK_LEN 8 +#define WSEC_MAX_PSK_LEN 64 + + +#define WSEC_PASSPHRASE (1<<0) + + +typedef struct { + ushort key_len; + ushort flags; + uint8 key[WSEC_MAX_PSK_LEN]; +} wsec_pmk_t; + + +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +#define SES_OW_ENABLED 0x0040 + + +#define WPA_AUTH_DISABLED 0x0000 +#define WPA_AUTH_NONE 0x0001 +#define WPA_AUTH_UNSPECIFIED 0x0002 +#define WPA_AUTH_PSK 0x0004 + +#define WPA2_AUTH_UNSPECIFIED 0x0040 +#define WPA2_AUTH_PSK 0x0080 +#define BRCM_AUTH_PSK 0x0100 +#define BRCM_AUTH_DPT 0x0200 +#define WPA2_AUTH_MFP 0x1000 +#define WPA2_AUTH_TPK 0x2000 +#define WPA2_AUTH_FT 0x4000 + + +#define MAXPMKID 16 + +typedef struct _pmkid { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_t; + +typedef struct _pmkid_list { + uint32 npmkid; + pmkid_t pmkid[1]; +} pmkid_list_t; + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + +typedef struct wl_assoc_info { + uint32 req_len; + uint32 resp_len; + uint32 flags; + struct dot11_assoc_req req; + struct ether_addr reassoc_bssid; + struct dot11_assoc_resp resp; +} wl_assoc_info_t; + + +#define WLC_ASSOC_REQ_IS_REASSOC 0x01 + + +typedef struct { + uint16 ver; + uint16 len; + uint16 cap; + uint32 flags; + uint32 idle; + struct ether_addr ea; + wl_rateset_t rateset; + uint32 in; + uint32 listen_interval_inms; + uint32 tx_pkts; + uint32 tx_failures; + uint32 rx_ucast_pkts; + uint32 rx_mcast_pkts; + uint32 tx_rate; + uint32 rx_rate; + uint32 rx_decrypt_succeeds; + uint32 rx_decrypt_failures; +} sta_info_t; + +#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts) + +#define WL_STA_VER 3 + + +#define WL_STA_BRCM 0x1 +#define WL_STA_WME 0x2 +#define WL_STA_ABCAP 0x4 +#define WL_STA_AUTHE 0x8 +#define WL_STA_ASSOC 0x10 +#define WL_STA_AUTHO 0x20 +#define WL_STA_WDS 0x40 +#define WL_STA_WDS_LINKUP 0x80 +#define WL_STA_PS 0x100 +#define WL_STA_APSD_BE 0x200 +#define WL_STA_APSD_BK 0x400 +#define WL_STA_APSD_VI 0x800 +#define WL_STA_APSD_VO 0x1000 +#define WL_STA_N_CAP 0x2000 +#define WL_STA_SCBSTATS 0x4000 + +#define WL_WDS_LINKUP WL_STA_WDS_LINKUP + + +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + + +typedef struct { + uint32 val; + struct ether_addr ea; +} scb_val_t; + + +typedef struct { + uint32 code; + scb_val_t ioctl_args; +} authops_t; + + +typedef struct channel_info { + int hw_channel; + int target_channel; + int scan_channel; +} channel_info_t; + + +struct maclist { + uint count; + struct ether_addr ea[1]; +}; + + +typedef struct get_pktcnt { + uint rx_good_pkt; + uint rx_bad_pkt; + uint tx_good_pkt; + uint tx_bad_pkt; + uint rx_ocast_good_pkt; +} get_pktcnt_t; + +#define WL_IOCTL_ACTION_GET 0x0 +#define WL_IOCTL_ACTION_SET 0x1 +#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e +#define WL_IOCTL_ACTION_OVL_RSV 0x20 +#define WL_IOCTL_ACTION_OVL 0x40 +#define WL_IOCTL_ACTION_MASK 0x7e +#define WL_IOCTL_ACTION_OVL_SHIFT 1 + + +typedef struct wl_ioctl { + uint cmd; + void *buf; + uint len; + uint8 set; + uint used; + uint needed; +} wl_ioctl_t; + + +#define ioctl_subtype set +#define ioctl_pid used +#define ioctl_status needed + + +typedef struct wlc_rev_info { + uint vendorid; + uint deviceid; + uint radiorev; + uint chiprev; + uint corerev; + uint boardid; + uint boardvendor; + uint boardrev; + uint driverrev; + uint ucoderev; + uint bus; + uint chipnum; + uint phytype; + uint phyrev; + uint anarev; + uint chippkg; +} wlc_rev_info_t; + +#define WL_REV_INFO_LEGACY_LENGTH 48 + +#define WL_BRAND_MAX 10 +typedef struct wl_instance_info { + uint instance; + char brand[WL_BRAND_MAX]; +} wl_instance_info_t; + + +typedef struct wl_txfifo_sz { + uint16 magic; + uint16 fifo; + uint16 size; +} wl_txfifo_sz_t; + +#define WL_TXFIFO_SZ_MAGIC 0xa5a5 + + + +#define WLC_IOV_NAME_LEN 30 +typedef struct wlc_iov_trx_s { + uint8 module; + uint8 type; + char name[WLC_IOV_NAME_LEN]; +} wlc_iov_trx_t; + + +#define WLC_IOCTL_MAGIC 0x14e46c77 + + +#define WLC_IOCTL_VERSION 1 + +#define WLC_IOCTL_MAXLEN 8192 +#define WLC_IOCTL_SMLEN 256 +#define WLC_IOCTL_MEDLEN 1536 +#ifdef WLC_HIGH_ONLY +#define WLC_SAMPLECOLLECT_MAXLEN 1024 +#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 1024 +#else +#if defined(LCNCONF) || defined(LCN40CONF) +#define WLC_SAMPLECOLLECT_MAXLEN 8192 +#else +#define WLC_SAMPLECOLLECT_MAXLEN 10240 +#endif +#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192 +#endif + + +#define WLC_GET_MAGIC 0 +#define WLC_GET_VERSION 1 +#define WLC_UP 2 +#define WLC_DOWN 3 +#define WLC_GET_LOOP 4 +#define WLC_SET_LOOP 5 +#define WLC_DUMP 6 +#define WLC_GET_MSGLEVEL 7 +#define WLC_SET_MSGLEVEL 8 +#define WLC_GET_PROMISC 9 +#define WLC_SET_PROMISC 10 +#define WLC_OVERLAY_IOCTL 11 +#define WLC_GET_RATE 12 + +#define WLC_GET_INSTANCE 14 + + + + +#define WLC_GET_INFRA 19 +#define WLC_SET_INFRA 20 +#define WLC_GET_AUTH 21 +#define WLC_SET_AUTH 22 +#define WLC_GET_BSSID 23 +#define WLC_SET_BSSID 24 +#define WLC_GET_SSID 25 +#define WLC_SET_SSID 26 +#define WLC_RESTART 27 +#define WLC_TERMINATED 28 + +#define WLC_GET_CHANNEL 29 +#define WLC_SET_CHANNEL 30 +#define WLC_GET_SRL 31 +#define WLC_SET_SRL 32 +#define WLC_GET_LRL 33 +#define WLC_SET_LRL 34 +#define WLC_GET_PLCPHDR 35 +#define WLC_SET_PLCPHDR 36 +#define WLC_GET_RADIO 37 +#define WLC_SET_RADIO 38 +#define WLC_GET_PHYTYPE 39 +#define WLC_DUMP_RATE 40 +#define WLC_SET_RATE_PARAMS 41 +#define WLC_GET_FIXRATE 42 +#define WLC_SET_FIXRATE 43 + + +#define WLC_GET_KEY 44 +#define WLC_SET_KEY 45 +#define WLC_GET_REGULATORY 46 +#define WLC_SET_REGULATORY 47 +#define WLC_GET_PASSIVE_SCAN 48 +#define WLC_SET_PASSIVE_SCAN 49 +#define WLC_SCAN 50 +#define WLC_SCAN_RESULTS 51 +#define WLC_DISASSOC 52 +#define WLC_REASSOC 53 +#define WLC_GET_ROAM_TRIGGER 54 +#define WLC_SET_ROAM_TRIGGER 55 +#define WLC_GET_ROAM_DELTA 56 +#define WLC_SET_ROAM_DELTA 57 +#define WLC_GET_ROAM_SCAN_PERIOD 58 +#define WLC_SET_ROAM_SCAN_PERIOD 59 +#define WLC_EVM 60 +#define WLC_GET_TXANT 61 +#define WLC_SET_TXANT 62 +#define WLC_GET_ANTDIV 63 +#define WLC_SET_ANTDIV 64 + + +#define WLC_GET_CLOSED 67 +#define WLC_SET_CLOSED 68 +#define WLC_GET_MACLIST 69 +#define WLC_SET_MACLIST 70 +#define WLC_GET_RATESET 71 +#define WLC_SET_RATESET 72 + +#define WLC_LONGTRAIN 74 +#define WLC_GET_BCNPRD 75 +#define WLC_SET_BCNPRD 76 +#define WLC_GET_DTIMPRD 77 +#define WLC_SET_DTIMPRD 78 +#define WLC_GET_SROM 79 +#define WLC_SET_SROM 80 +#define WLC_GET_WEP_RESTRICT 81 +#define WLC_SET_WEP_RESTRICT 82 +#define WLC_GET_COUNTRY 83 +#define WLC_SET_COUNTRY 84 +#define WLC_GET_PM 85 +#define WLC_SET_PM 86 +#define WLC_GET_WAKE 87 +#define WLC_SET_WAKE 88 + +#define WLC_GET_FORCELINK 90 +#define WLC_SET_FORCELINK 91 +#define WLC_FREQ_ACCURACY 92 +#define WLC_CARRIER_SUPPRESS 93 +#define WLC_GET_PHYREG 94 +#define WLC_SET_PHYREG 95 +#define WLC_GET_RADIOREG 96 +#define WLC_SET_RADIOREG 97 +#define WLC_GET_REVINFO 98 +#define WLC_GET_UCANTDIV 99 +#define WLC_SET_UCANTDIV 100 +#define WLC_R_REG 101 +#define WLC_W_REG 102 + + +#define WLC_GET_MACMODE 105 +#define WLC_SET_MACMODE 106 +#define WLC_GET_MONITOR 107 +#define WLC_SET_MONITOR 108 +#define WLC_GET_GMODE 109 +#define WLC_SET_GMODE 110 +#define WLC_GET_LEGACY_ERP 111 +#define WLC_SET_LEGACY_ERP 112 +#define WLC_GET_RX_ANT 113 +#define WLC_GET_CURR_RATESET 114 +#define WLC_GET_SCANSUPPRESS 115 +#define WLC_SET_SCANSUPPRESS 116 +#define WLC_GET_AP 117 +#define WLC_SET_AP 118 +#define WLC_GET_EAP_RESTRICT 119 +#define WLC_SET_EAP_RESTRICT 120 +#define WLC_SCB_AUTHORIZE 121 +#define WLC_SCB_DEAUTHORIZE 122 +#define WLC_GET_WDSLIST 123 +#define WLC_SET_WDSLIST 124 +#define WLC_GET_ATIM 125 +#define WLC_SET_ATIM 126 +#define WLC_GET_RSSI 127 +#define WLC_GET_PHYANTDIV 128 +#define WLC_SET_PHYANTDIV 129 +#define WLC_AP_RX_ONLY 130 +#define WLC_GET_TX_PATH_PWR 131 +#define WLC_SET_TX_PATH_PWR 132 +#define WLC_GET_WSEC 133 +#define WLC_SET_WSEC 134 +#define WLC_GET_PHY_NOISE 135 +#define WLC_GET_BSS_INFO 136 +#define WLC_GET_PKTCNTS 137 +#define WLC_GET_LAZYWDS 138 +#define WLC_SET_LAZYWDS 139 +#define WLC_GET_BANDLIST 140 +#define WLC_GET_BAND 141 +#define WLC_SET_BAND 142 +#define WLC_SCB_DEAUTHENTICATE 143 +#define WLC_GET_SHORTSLOT 144 +#define WLC_GET_SHORTSLOT_OVERRIDE 145 +#define WLC_SET_SHORTSLOT_OVERRIDE 146 +#define WLC_GET_SHORTSLOT_RESTRICT 147 +#define WLC_SET_SHORTSLOT_RESTRICT 148 +#define WLC_GET_GMODE_PROTECTION 149 +#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 +#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 +#define WLC_UPGRADE 152 + + +#define WLC_GET_IGNORE_BCNS 155 +#define WLC_SET_IGNORE_BCNS 156 +#define WLC_GET_SCB_TIMEOUT 157 +#define WLC_SET_SCB_TIMEOUT 158 +#define WLC_GET_ASSOCLIST 159 +#define WLC_GET_CLK 160 +#define WLC_SET_CLK 161 +#define WLC_GET_UP 162 +#define WLC_OUT 163 +#define WLC_GET_WPA_AUTH 164 +#define WLC_SET_WPA_AUTH 165 +#define WLC_GET_UCFLAGS 166 +#define WLC_SET_UCFLAGS 167 +#define WLC_GET_PWRIDX 168 +#define WLC_SET_PWRIDX 169 +#define WLC_GET_TSSI 170 +#define WLC_GET_SUP_RATESET_OVERRIDE 171 +#define WLC_SET_SUP_RATESET_OVERRIDE 172 + + + + + +#define WLC_GET_PROTECTION_CONTROL 178 +#define WLC_SET_PROTECTION_CONTROL 179 +#define WLC_GET_PHYLIST 180 +#define WLC_ENCRYPT_STRENGTH 181 +#define WLC_DECRYPT_STATUS 182 +#define WLC_GET_KEY_SEQ 183 +#define WLC_GET_SCAN_CHANNEL_TIME 184 +#define WLC_SET_SCAN_CHANNEL_TIME 185 +#define WLC_GET_SCAN_UNASSOC_TIME 186 +#define WLC_SET_SCAN_UNASSOC_TIME 187 +#define WLC_GET_SCAN_HOME_TIME 188 +#define WLC_SET_SCAN_HOME_TIME 189 +#define WLC_GET_SCAN_NPROBES 190 +#define WLC_SET_SCAN_NPROBES 191 +#define WLC_GET_PRB_RESP_TIMEOUT 192 +#define WLC_SET_PRB_RESP_TIMEOUT 193 +#define WLC_GET_ATTEN 194 +#define WLC_SET_ATTEN 195 +#define WLC_GET_SHMEM 196 +#define WLC_SET_SHMEM 197 + + +#define WLC_SET_WSEC_TEST 200 +#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define WLC_TKIP_COUNTERMEASURES 202 +#define WLC_GET_PIOMODE 203 +#define WLC_SET_PIOMODE 204 +#define WLC_SET_ASSOC_PREFER 205 +#define WLC_GET_ASSOC_PREFER 206 +#define WLC_SET_ROAM_PREFER 207 +#define WLC_GET_ROAM_PREFER 208 +#define WLC_SET_LED 209 +#define WLC_GET_LED 210 +#define WLC_GET_INTERFERENCE_MODE 211 +#define WLC_SET_INTERFERENCE_MODE 212 +#define WLC_GET_CHANNEL_QA 213 +#define WLC_START_CHANNEL_QA 214 +#define WLC_GET_CHANNEL_SEL 215 +#define WLC_START_CHANNEL_SEL 216 +#define WLC_GET_VALID_CHANNELS 217 +#define WLC_GET_FAKEFRAG 218 +#define WLC_SET_FAKEFRAG 219 +#define WLC_GET_PWROUT_PERCENTAGE 220 +#define WLC_SET_PWROUT_PERCENTAGE 221 +#define WLC_SET_BAD_FRAME_PREEMPT 222 +#define WLC_GET_BAD_FRAME_PREEMPT 223 +#define WLC_SET_LEAP_LIST 224 +#define WLC_GET_LEAP_LIST 225 +#define WLC_GET_CWMIN 226 +#define WLC_SET_CWMIN 227 +#define WLC_GET_CWMAX 228 +#define WLC_SET_CWMAX 229 +#define WLC_GET_WET 230 +#define WLC_SET_WET 231 +#define WLC_GET_PUB 232 + + +#define WLC_GET_KEY_PRIMARY 235 +#define WLC_SET_KEY_PRIMARY 236 + +#define WLC_GET_ACI_ARGS 238 +#define WLC_SET_ACI_ARGS 239 +#define WLC_UNSET_CALLBACK 240 +#define WLC_SET_CALLBACK 241 +#define WLC_GET_RADAR 242 +#define WLC_SET_RADAR 243 +#define WLC_SET_SPECT_MANAGMENT 244 +#define WLC_GET_SPECT_MANAGMENT 245 +#define WLC_WDS_GET_REMOTE_HWADDR 246 +#define WLC_WDS_GET_WPA_SUP 247 +#define WLC_SET_CS_SCAN_TIMER 248 +#define WLC_GET_CS_SCAN_TIMER 249 +#define WLC_MEASURE_REQUEST 250 +#define WLC_INIT 251 +#define WLC_SEND_QUIET 252 +#define WLC_KEEPALIVE 253 +#define WLC_SEND_PWR_CONSTRAINT 254 +#define WLC_UPGRADE_STATUS 255 +#define WLC_CURRENT_PWR 256 +#define WLC_GET_SCAN_PASSIVE_TIME 257 +#define WLC_SET_SCAN_PASSIVE_TIME 258 +#define WLC_LEGACY_LINK_BEHAVIOR 259 +#define WLC_GET_CHANNELS_IN_COUNTRY 260 +#define WLC_GET_COUNTRY_LIST 261 +#define WLC_GET_VAR 262 +#define WLC_SET_VAR 263 +#define WLC_NVRAM_GET 264 +#define WLC_NVRAM_SET 265 +#define WLC_NVRAM_DUMP 266 +#define WLC_REBOOT 267 +#define WLC_SET_WSEC_PMK 268 +#define WLC_GET_AUTH_MODE 269 +#define WLC_SET_AUTH_MODE 270 +#define WLC_GET_WAKEENTRY 271 +#define WLC_SET_WAKEENTRY 272 +#define WLC_NDCONFIG_ITEM 273 +#define WLC_NVOTPW 274 +#define WLC_OTPW 275 +#define WLC_IOV_BLOCK_GET 276 +#define WLC_IOV_MODULES_GET 277 +#define WLC_SOFT_RESET 278 +#define WLC_GET_ALLOW_MODE 279 +#define WLC_SET_ALLOW_MODE 280 +#define WLC_GET_DESIRED_BSSID 281 +#define WLC_SET_DESIRED_BSSID 282 +#define WLC_DISASSOC_MYAP 283 +#define WLC_GET_NBANDS 284 +#define WLC_GET_BANDSTATES 285 +#define WLC_GET_WLC_BSS_INFO 286 +#define WLC_GET_ASSOC_INFO 287 +#define WLC_GET_OID_PHY 288 +#define WLC_SET_OID_PHY 289 +#define WLC_SET_ASSOC_TIME 290 +#define WLC_GET_DESIRED_SSID 291 +#define WLC_GET_CHANSPEC 292 +#define WLC_GET_ASSOC_STATE 293 +#define WLC_SET_PHY_STATE 294 +#define WLC_GET_SCAN_PENDING 295 +#define WLC_GET_SCANREQ_PENDING 296 +#define WLC_GET_PREV_ROAM_REASON 297 +#define WLC_SET_PREV_ROAM_REASON 298 +#define WLC_GET_BANDSTATES_PI 299 +#define WLC_GET_PHY_STATE 300 +#define WLC_GET_BSS_WPA_RSN 301 +#define WLC_GET_BSS_WPA2_RSN 302 +#define WLC_GET_BSS_BCN_TS 303 +#define WLC_GET_INT_DISASSOC 304 +#define WLC_SET_NUM_PEERS 305 +#define WLC_GET_NUM_BSS 306 +#define WLC_NPHY_SAMPLE_COLLECT 307 +#define WLC_UM_PRIV 308 +#define WLC_GET_CMD 309 + +#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 +#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 +#define WLC_GET_WAI_RESTRICT 313 +#define WLC_SET_WAI_RESTRICT 314 +#define WLC_SET_WAI_REKEY 315 +#define WLC_SET_PEAKRATE 316 +#define WLC_GET_PEAKRATE 317 +#define WLC_LAST 318 + +#ifndef EPICTRL_COOKIE +#define EPICTRL_COOKIE 0xABADCEDE +#endif + + +#define CMN_IOCTL_OFF 0x180 + + + + +#define WL_OID_BASE 0xFFE41420 + + +#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE) +#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK) +#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK) +#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH) +#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS) +#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR) +#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM) + + +#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC) +#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS) +#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY) +#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY) +#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME) +#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID) +#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE) +#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING) +#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING) +#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON) +#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON) +#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE) +#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC) +#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS) +#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS) + +#define WL_DECRYPT_STATUS_SUCCESS 1 +#define WL_DECRYPT_STATUS_FAILURE 2 +#define WL_DECRYPT_STATUS_UNKNOWN 3 + + +#define WLC_UPGRADE_SUCCESS 0 +#define WLC_UPGRADE_PENDING 1 + +#ifdef CONFIG_USBRNDIS_RETAIL + +typedef struct { + char *name; + void *param; +} ndconfig_item_t; +#endif + + + +#define WL_AUTH_OPEN_SYSTEM 0 +#define WL_AUTH_SHARED_KEY 1 +#define WL_AUTH_OPEN_SHARED 2 + + +#define WL_RADIO_SW_DISABLE (1<<0) +#define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) +#define WL_RADIO_COUNTRY_DISABLE (1<<3) + +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 + + +#define WL_TXPWR_OVERRIDE (1U<<31) +#define WL_TXPWR_NEG (1U<<30) + +#define WL_PHY_PAVARS_LEN 6 + +#define WL_PHY_PAVARS2_NUM 3 +#define WL_PHY_PAVAR_VER 1 +typedef struct wl_pavars2 { + uint16 ver; + uint16 len; + uint16 inuse; + uint16 phy_type; + uint16 bandrange; + uint16 chain; + uint16 inpa[WL_PHY_PAVARS2_NUM]; +} wl_pavars2_t; + +typedef struct wl_po { + uint16 phy_type; + uint16 band; + uint16 cckpo; + uint32 ofdmpo; + uint16 mcspo[8]; +} wl_po_t; + + +#define WLC_TXPWR_MAX (127) + + +#define WL_DIAG_INTERRUPT 1 +#define WL_DIAG_LOOPBACK 2 +#define WL_DIAG_MEMORY 3 +#define WL_DIAG_LED 4 +#define WL_DIAG_REG 5 +#define WL_DIAG_SROM 6 +#define WL_DIAG_DMA 7 + +#define WL_DIAGERR_SUCCESS 0 +#define WL_DIAGERR_FAIL_TO_RUN 1 +#define WL_DIAGERR_NOT_SUPPORTED 2 +#define WL_DIAGERR_INTERRUPT_FAIL 3 +#define WL_DIAGERR_LOOPBACK_FAIL 4 +#define WL_DIAGERR_SROM_FAIL 5 +#define WL_DIAGERR_SROM_BADCRC 6 +#define WL_DIAGERR_REG_FAIL 7 +#define WL_DIAGERR_MEMORY_FAIL 8 +#define WL_DIAGERR_NOMEM 9 +#define WL_DIAGERR_DMA_FAIL 10 + +#define WL_DIAGERR_MEMORY_TIMEOUT 11 +#define WL_DIAGERR_MEMORY_BADPATTERN 12 + + +#define WLC_BAND_AUTO 0 +#define WLC_BAND_5G 1 +#define WLC_BAND_2G 2 +#define WLC_BAND_ALL 3 + + +#define WL_CHAN_FREQ_RANGE_2G 0 +#define WL_CHAN_FREQ_RANGE_5GL 1 +#define WL_CHAN_FREQ_RANGE_5GM 2 +#define WL_CHAN_FREQ_RANGE_5GH 3 + +#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4 +#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5 +#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6 +#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7 +#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8 + +#define WL_CHAN_FREQ_RANGE_5G_BAND0 1 +#define WL_CHAN_FREQ_RANGE_5G_BAND1 2 +#define WL_CHAN_FREQ_RANGE_5G_BAND2 3 +#define WL_CHAN_FREQ_RANGE_5G_BAND3 4 + + +#define WLC_PHY_TYPE_A 0 +#define WLC_PHY_TYPE_B 1 +#define WLC_PHY_TYPE_G 2 +#define WLC_PHY_TYPE_N 4 +#define WLC_PHY_TYPE_LP 5 +#define WLC_PHY_TYPE_SSN 6 +#define WLC_PHY_TYPE_HT 7 +#define WLC_PHY_TYPE_LCN 8 +#define WLC_PHY_TYPE_NULL 0xf + + +#define WLC_MACMODE_DISABLED 0 +#define WLC_MACMODE_DENY 1 +#define WLC_MACMODE_ALLOW 2 + + +#define GMODE_LEGACY_B 0 +#define GMODE_AUTO 1 +#define GMODE_ONLY 2 +#define GMODE_B_DEFERRED 3 +#define GMODE_PERFORMANCE 4 +#define GMODE_LRS 5 +#define GMODE_MAX 6 + + +#define WLC_PLCP_AUTO -1 +#define WLC_PLCP_SHORT 0 +#define WLC_PLCP_LONG 1 + + +#define WLC_PROTECTION_AUTO -1 +#define WLC_PROTECTION_OFF 0 +#define WLC_PROTECTION_ON 1 +#define WLC_PROTECTION_MMHDR_ONLY 2 +#define WLC_PROTECTION_CTS_ONLY 3 + + +#define WLC_PROTECTION_CTL_OFF 0 +#define WLC_PROTECTION_CTL_LOCAL 1 +#define WLC_PROTECTION_CTL_OVERLAP 2 + + +#define WLC_N_PROTECTION_OFF 0 +#define WLC_N_PROTECTION_OPTIONAL 1 +#define WLC_N_PROTECTION_20IN40 2 +#define WLC_N_PROTECTION_MIXEDMODE 3 + + +#define WLC_N_PREAMBLE_MIXEDMODE 0 +#define WLC_N_PREAMBLE_GF 1 +#define WLC_N_PREAMBLE_GF_BRCM 2 + + +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + + +#define WLC_N_TXRX_CHAIN0 0 +#define WLC_N_TXRX_CHAIN1 1 + + +#define WLC_N_SGI_20 0x01 +#define WLC_N_SGI_40 0x02 + + +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 + +#define LISTEN_INTERVAL 10 + +#define INTERFERE_OVRRIDE_OFF -1 +#define INTERFERE_NONE 0 +#define NON_WLAN 1 +#define WLAN_MANUAL 2 +#define WLAN_AUTO 3 +#define WLAN_AUTO_W_NOISE 4 +#define AUTO_ACTIVE (1 << 7) + +typedef struct wl_aci_args { + int enter_aci_thresh; + int exit_aci_thresh; + int usec_spin; + int glitch_delay; + uint16 nphy_adcpwr_enter_thresh; + uint16 nphy_adcpwr_exit_thresh; + uint16 nphy_repeat_ctr; + uint16 nphy_num_samples; + uint16 nphy_undetect_window_sz; + uint16 nphy_b_energy_lo_aci; + uint16 nphy_b_energy_md_aci; + uint16 nphy_b_energy_hi_aci; + uint16 nphy_noise_noassoc_glitch_th_up; + uint16 nphy_noise_noassoc_glitch_th_dn; + uint16 nphy_noise_assoc_glitch_th_up; + uint16 nphy_noise_assoc_glitch_th_dn; + uint16 nphy_noise_assoc_aci_glitch_th_up; + uint16 nphy_noise_assoc_aci_glitch_th_dn; + uint16 nphy_noise_assoc_enter_th; + uint16 nphy_noise_noassoc_enter_th; + uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; + uint16 nphy_noise_noassoc_crsidx_incr; + uint16 nphy_noise_assoc_crsidx_incr; + uint16 nphy_noise_crsidx_decr; +} wl_aci_args_t; + +#define TRIGGER_NOW 0 +#define TRIGGER_CRS 0x01 +#define TRIGGER_CRSDEASSERT 0x02 +#define TRIGGER_GOODFCS 0x04 +#define TRIGGER_BADFCS 0x08 +#define TRIGGER_BADPLCP 0x10 +#define TRIGGER_CRSGLITCH 0x20 +#define WL_ACI_ARGS_LEGACY_LENGTH 16 +#define WL_SAMPLECOLLECT_T_VERSION 2 +typedef struct wl_samplecollect_args { + + uint8 coll_us; + int cores; + + uint16 version; + uint16 length; + int8 trigger; + uint16 timeout; + uint16 mode; + uint32 pre_dur; + uint32 post_dur; + uint8 gpio_sel; + bool downsamp; + bool be_deaf; + bool agc; + bool filter; + + uint8 trigger_state; + uint8 module_sel1; + uint8 module_sel2; + uint16 nsamps; +} wl_samplecollect_args_t; + +#define WL_SAMPLEDATA_HEADER_TYPE 1 +#define WL_SAMPLEDATA_HEADER_SIZE 80 +#define WL_SAMPLEDATA_TYPE 2 +#define WL_SAMPLEDATA_SEQ 0xff +#define WL_SAMPLEDATA_MORE_DATA 0x100 +#define WL_SAMPLEDATA_T_VERSION 1 + +#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 + +typedef struct wl_sampledata { + uint16 version; + uint16 size; + uint16 tag; + uint16 length; + uint32 flag; +} wl_sampledata_t; + + +#define WL_CHAN_VALID_HW (1 << 0) +#define WL_CHAN_VALID_SW (1 << 1) +#define WL_CHAN_BAND_5G (1 << 2) +#define WL_CHAN_RADAR (1 << 3) +#define WL_CHAN_INACTIVE (1 << 4) +#define WL_CHAN_PASSIVE (1 << 5) +#define WL_CHAN_RESTRICTED (1 << 6) + + +#define WL_ERROR_VAL 0x00000001 +#define WL_TRACE_VAL 0x00000002 +#define WL_PRHDRS_VAL 0x00000004 +#define WL_PRPKT_VAL 0x00000008 +#define WL_INFORM_VAL 0x00000010 +#define WL_TMP_VAL 0x00000020 +#define WL_OID_VAL 0x00000040 +#define WL_RATE_VAL 0x00000080 +#define WL_ASSOC_VAL 0x00000100 +#define WL_PRUSR_VAL 0x00000200 +#define WL_PS_VAL 0x00000400 +#define WL_TXPWR_VAL 0x00000800 +#define WL_PORT_VAL 0x00001000 +#define WL_DUAL_VAL 0x00002000 +#define WL_WSEC_VAL 0x00004000 +#define WL_WSEC_DUMP_VAL 0x00008000 +#define WL_LOG_VAL 0x00010000 +#define WL_NRSSI_VAL 0x00020000 +#define WL_LOFT_VAL 0x00040000 +#define WL_REGULATORY_VAL 0x00080000 +#define WL_PHYCAL_VAL 0x00100000 +#define WL_RADAR_VAL 0x00200000 +#define WL_MPC_VAL 0x00400000 +#define WL_APSTA_VAL 0x00800000 +#define WL_DFS_VAL 0x01000000 +#define WL_BA_VAL 0x02000000 +#define WL_ACI_VAL 0x04000000 +#define WL_MBSS_VAL 0x04000000 +#define WL_CAC_VAL 0x08000000 +#define WL_AMSDU_VAL 0x10000000 +#define WL_AMPDU_VAL 0x20000000 +#define WL_FFPLD_VAL 0x40000000 + + +#define WL_DPT_VAL 0x00000001 +#define WL_SCAN_VAL 0x00000002 +#define WL_WOWL_VAL 0x00000004 +#define WL_COEX_VAL 0x00000008 +#define WL_RTDC_VAL 0x00000010 +#define WL_PROTO_VAL 0x00000020 +#define WL_BTA_VAL 0x00000040 +#define WL_CHANINT_VAL 0x00000080 +#define WL_THERMAL_VAL 0x00000100 +#define WL_P2P_VAL 0x00000200 +#define WL_TXRX_VAL 0x00000400 +#define WL_MCHAN_VAL 0x00000800 +#define WL_TDLS_VAL 0x00001000 + + +#define WL_LED_NUMGPIO 16 + + +#define WL_LED_OFF 0 +#define WL_LED_ON 1 +#define WL_LED_ACTIVITY 2 +#define WL_LED_RADIO 3 +#define WL_LED_ARADIO 4 +#define WL_LED_BRADIO 5 +#define WL_LED_BGMODE 6 +#define WL_LED_WI1 7 +#define WL_LED_WI2 8 +#define WL_LED_WI3 9 +#define WL_LED_ASSOC 10 +#define WL_LED_INACTIVE 11 +#define WL_LED_ASSOCACT 12 +#define WL_LED_WI4 13 +#define WL_LED_WI5 14 +#define WL_LED_BLINKSLOW 15 +#define WL_LED_BLINKMED 16 +#define WL_LED_BLINKFAST 17 +#define WL_LED_BLINKCUSTOM 18 +#define WL_LED_BLINKPERIODIC 19 +#define WL_LED_ASSOC_WITH_SEC 20 + +#define WL_LED_START_OFF 21 +#define WL_LED_NUMBEHAVIOR 22 + + +#define WL_LED_BEH_MASK 0x7f +#define WL_LED_AL_MASK 0x80 + + +#define WL_NUMCHANNELS 64 +#define WL_NUMCHANSPECS 100 + + +#define WL_WDS_WPA_ROLE_AUTH 0 +#define WL_WDS_WPA_ROLE_SUP 1 +#define WL_WDS_WPA_ROLE_AUTO 255 + + +#define WL_EVENTING_MASK_LEN 16 + + + + +#define WL_JOIN_PREF_RSSI 1 +#define WL_JOIN_PREF_WPA 2 +#define WL_JOIN_PREF_BAND 3 +#define WL_JOIN_PREF_RSSI_DELTA 4 +#define WL_JOIN_PREF_TRANS_PREF 5 + + +#define WLJP_BAND_ASSOC_PREF 255 + + +#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" + +struct tsinfo_arg { + uint8 octets[3]; +}; + +#define NFIFO 6 + +#define WL_CNT_T_VERSION 6 + +typedef struct { + uint16 version; + uint16 length; + + + uint32 txframe; + uint32 txbyte; + uint32 txretrans; + uint32 txerror; + uint32 txctl; + uint32 txprshort; + uint32 txserr; + uint32 txnobuf; + uint32 txnoassoc; + uint32 txrunt; + uint32 txchit; + uint32 txcmiss; + + + uint32 txuflo; + uint32 txphyerr; + uint32 txphycrs; + + + uint32 rxframe; + uint32 rxbyte; + uint32 rxerror; + uint32 rxctl; + uint32 rxnobuf; + uint32 rxnondata; + uint32 rxbadds; + uint32 rxbadcm; + uint32 rxfragerr; + uint32 rxrunt; + uint32 rxgiant; + uint32 rxnoscb; + uint32 rxbadproto; + uint32 rxbadsrcmac; + uint32 rxbadda; + uint32 rxfilter; + + + uint32 rxoflo; + uint32 rxuflo[NFIFO]; + + uint32 d11cnt_txrts_off; + uint32 d11cnt_rxcrc_off; + uint32 d11cnt_txnocts_off; + + + uint32 dmade; + uint32 dmada; + uint32 dmape; + uint32 reset; + uint32 tbtt; + uint32 txdmawar; + uint32 pkt_callback_reg_fail; + + + uint32 txallfrm; + uint32 txrtsfrm; + uint32 txctsfrm; + uint32 txackfrm; + uint32 txdnlfrm; + uint32 txbcnfrm; + uint32 txfunfl[8]; + uint32 txtplunfl; + uint32 txphyerror; + uint32 rxfrmtoolong; + uint32 rxfrmtooshrt; + uint32 rxinvmachdr; + uint32 rxbadfcs; + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 rxstrt; + uint32 rxdfrmucastmbss; + uint32 rxmfrmucastmbss; + uint32 rxcfrmucast; + uint32 rxrtsucast; + uint32 rxctsucast; + uint32 rxackucast; + uint32 rxdfrmocast; + uint32 rxmfrmocast; + uint32 rxcfrmocast; + uint32 rxrtsocast; + uint32 rxctsocast; + uint32 rxdfrmmcast; + uint32 rxmfrmmcast; + uint32 rxcfrmmcast; + uint32 rxbeaconmbss; + uint32 rxdfrmucastobss; + uint32 rxbeaconobss; + uint32 rxrsptmout; + uint32 bcntxcancl; + uint32 rxf0ovfl; + uint32 rxf1ovfl; + uint32 rxf2ovfl; + uint32 txsfovfl; + uint32 pmqovfl; + uint32 rxcgprqfrm; + uint32 rxcgprsqovfl; + uint32 txcgprsfail; + uint32 txcgprssuc; + uint32 prs_timeout; + uint32 rxnack; + uint32 frmscons; + uint32 txnack; + uint32 txglitch_nack; + uint32 txburst; + + + uint32 txfrag; + uint32 txmulti; + uint32 txfail; + uint32 txretry; + uint32 txretrie; + uint32 rxdup; + uint32 txrts; + uint32 txnocts; + uint32 txnoack; + uint32 rxfrag; + uint32 rxmulti; + uint32 rxcrc; + uint32 txfrmsnt; + uint32 rxundec; + + + uint32 tkipmicfaill; + uint32 tkipcntrmsr; + uint32 tkipreplay; + uint32 ccmpfmterr; + uint32 ccmpreplay; + uint32 ccmpundec; + uint32 fourwayfail; + uint32 wepundec; + uint32 wepicverr; + uint32 decsuccess; + uint32 tkipicverr; + uint32 wepexcluded; + + uint32 rxundec_mcst; + + + uint32 tkipmicfaill_mcst; + uint32 tkipcntrmsr_mcst; + uint32 tkipreplay_mcst; + uint32 ccmpfmterr_mcst; + uint32 ccmpreplay_mcst; + uint32 ccmpundec_mcst; + uint32 fourwayfail_mcst; + uint32 wepundec_mcst; + uint32 wepicverr_mcst; + uint32 decsuccess_mcst; + uint32 tkipicverr_mcst; + uint32 wepexcluded_mcst; + + uint32 txchanrej; + uint32 txexptime; + uint32 psmwds; + uint32 phywatchdog; + + + uint32 prq_entries_handled; + uint32 prq_undirected_entries; + uint32 prq_bad_entries; + uint32 atim_suppress_count; + uint32 bcn_template_not_ready; + uint32 bcn_template_not_ready_done; + uint32 late_tbtt_dpc; + + + uint32 rx1mbps; + uint32 rx2mbps; + uint32 rx5mbps5; + uint32 rx6mbps; + uint32 rx9mbps; + uint32 rx11mbps; + uint32 rx12mbps; + uint32 rx18mbps; + uint32 rx24mbps; + uint32 rx36mbps; + uint32 rx48mbps; + uint32 rx54mbps; + uint32 rx108mbps; + uint32 rx162mbps; + uint32 rx216mbps; + uint32 rx270mbps; + uint32 rx324mbps; + uint32 rx378mbps; + uint32 rx432mbps; + uint32 rx486mbps; + uint32 rx540mbps; + + + uint32 pktengrxducast; + uint32 pktengrxdmcast; + + uint32 rfdisable; + uint32 bphy_rxcrsglitch; + + uint32 txmpdu_sgi; + uint32 rxmpdu_sgi; + uint32 txmpdu_stbc; + uint32 rxmpdu_stbc; +} wl_cnt_t; + + +#define WL_WME_CNT_VERSION 1 + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; + uint16 length; + + wl_traffic_stats_t tx[AC_COUNT]; + wl_traffic_stats_t tx_failed[AC_COUNT]; + wl_traffic_stats_t rx[AC_COUNT]; + wl_traffic_stats_t rx_failed[AC_COUNT]; + + wl_traffic_stats_t forward[AC_COUNT]; + + wl_traffic_stats_t tx_expired[AC_COUNT]; + +} wl_wme_cnt_t; + +struct wl_msglevel2 { + uint32 low; + uint32 high; +}; + +typedef struct wl_mkeep_alive_pkt { + uint16 version; + uint16 length; + uint32 period_msec; + uint16 len_bytes; + uint8 keep_alive_id; + uint8 data[1]; +} wl_mkeep_alive_pkt_t; + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +#define WL_MKEEP_ALIVE_PRECISION 500 + + + +#define WLC_ROAM_TRIGGER_DEFAULT 0 +#define WLC_ROAM_TRIGGER_BANDWIDTH 1 +#define WLC_ROAM_TRIGGER_DISTANCE 2 +#define WLC_ROAM_TRIGGER_AUTO 3 +#define WLC_ROAM_TRIGGER_MAX_VALUE 3 + + +#define WPA_AUTH_PFN_ANY 0xffffffff + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +enum { + OFF_ADAPT, + SMART_ADAPT, + STRICT_ADAPT, + SLOW_ADAPT +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 + +#define PFN_VERSION 2 +#define PFN_SCANRESULT_VERSION 1 +#define MAX_PFN_LIST_COUNT 16 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + + +typedef struct wl_pfn_subnet_info { + struct ether_addr BSSID; + uint8 channel; + uint8 SSID_len; + uint8 SSID[32]; +} wl_pfn_subnet_info_t; + +typedef struct wl_pfn_net_info { + wl_pfn_subnet_info_t pfnsubnet; + int16 RSSI; + uint16 timestamp; +} wl_pfn_net_info_t; + +typedef struct wl_pfn_scanresults { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_t netinfo[1]; +} wl_pfn_scanresults_t; + + +typedef struct wl_pfn_param { + int32 version; + int32 scan_freq; + int32 lost_network_timeout; + int16 flags; + int16 rssi_margin; + uint8 bestn; + uint8 mscan; + uint8 repeat; + uint8 exp; + int32 slow_freq; +} wl_pfn_param_t; + +typedef struct wl_pfn_bssid { + struct ether_addr macaddr; + + uint16 flags; +} wl_pfn_bssid_t; +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 + +typedef struct wl_pfn_cfg { + uint32 reporttype; + int32 channel_num; + uint16 channel_list[WL_NUMCHANNELS]; +} wl_pfn_cfg_t; +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 + +typedef struct wl_pfn { + wlc_ssid_t ssid; + int32 flags; + int32 infra; + int32 auth; + int32 wpa_auth; + int32 wsec; +} wl_pfn_t; +#define WL_PFN_HIDDEN_BIT 2 +#define PNO_SCAN_MAX_FW 508*1000 +#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 +#define PNO_SCAN_MIN_FW_SEC 10 +#define WL_PFN_HIDDEN_MASK 0x4 + + +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + + +#define TOE_ERRTEST_TX_CSUM 0x00000001 +#define TOE_ERRTEST_RX_CSUM 0x00000002 +#define TOE_ERRTEST_RX_CSUM2 0x00000004 + +struct toe_ol_stats_t { + + uint32 tx_summed; + + + uint32 tx_iph_fill; + uint32 tx_tcp_fill; + uint32 tx_udp_fill; + uint32 tx_icmp_fill; + + + uint32 rx_iph_good; + uint32 rx_iph_bad; + uint32 rx_tcp_good; + uint32 rx_tcp_bad; + uint32 rx_udp_good; + uint32 rx_udp_bad; + uint32 rx_icmp_good; + uint32 rx_icmp_bad; + + + uint32 tx_tcp_errinj; + uint32 tx_udp_errinj; + uint32 tx_icmp_errinj; + + + uint32 rx_tcp_errinj; + uint32 rx_udp_errinj; + uint32 rx_icmp_errinj; +}; + + +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 + + +#define ARP_ERRTEST_REPLY_PEER 0x1 +#define ARP_ERRTEST_REPLY_HOST 0x2 + +#define ARP_MULTIHOMING_MAX 8 + + +struct arp_ol_stats_t { + uint32 host_ip_entries; + uint32 host_ip_overflow; + + uint32 arp_table_entries; + uint32 arp_table_overflow; + + uint32 host_request; + uint32 host_reply; + uint32 host_service; + + uint32 peer_request; + uint32 peer_request_drop; + uint32 peer_reply; + uint32 peer_reply_drop; + uint32 peer_service; +}; + + + + +typedef struct wl_keep_alive_pkt { + uint32 period_msec; + uint16 len_bytes; + uint8 data[1]; +} wl_keep_alive_pkt_t; + +#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) + + + + +#define MAX_WAKE_PACKET_BYTES 128 + + +typedef struct pm_wake_packet { + uint32 status; + uint32 pattern_id; + uint32 original_packet_size; + uint32 saved_packet_size; + uchar packet[MAX_WAKE_PACKET_BYTES]; +} pm_wake_packet_t; + + + +#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1 + +#define PKT_FILTER_MODE_DISABLE 2 + +#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4 + +#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8 + + +typedef enum wl_pkt_filter_type { + WL_PKT_FILTER_TYPE_PATTERN_MATCH, + WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH +} wl_pkt_filter_type_t; + +#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t + + +typedef struct wl_pkt_filter_pattern { + uint32 offset; + uint32 size_bytes; + uint8 mask_and_pattern[1]; +} wl_pkt_filter_pattern_t; + + +typedef struct wl_pkt_filter { + uint32 id; + uint32 type; + uint32 negate_match; + union { + wl_pkt_filter_pattern_t pattern; + } u; +} wl_pkt_filter_t; + +#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u) +#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern) + + +typedef struct wl_pkt_filter_enable { + uint32 id; + uint32 enable; +} wl_pkt_filter_enable_t; + + +typedef struct wl_pkt_filter_list { + uint32 num; + wl_pkt_filter_t filter[1]; +} wl_pkt_filter_list_t; + +#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) + + +typedef struct wl_pkt_filter_stats { + uint32 num_pkts_matched; + uint32 num_pkts_forwarded; + uint32 num_pkts_discarded; +} wl_pkt_filter_stats_t; + + +typedef struct wl_seq_cmd_ioctl { + uint32 cmd; + uint32 len; +} wl_seq_cmd_ioctl_t; + +#define WL_SEQ_CMD_ALIGN_BYTES 4 + + +#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ + (((cmd) == WLC_GET_MAGIC) || \ + ((cmd) == WLC_GET_VERSION) || \ + ((cmd) == WLC_GET_AP) || \ + ((cmd) == WLC_GET_INSTANCE)) + + + +#define WL_PKTENG_PER_TX_START 0x01 +#define WL_PKTENG_PER_TX_STOP 0x02 +#define WL_PKTENG_PER_RX_START 0x04 +#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 +#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 +#define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_MASK 0xff + +#define WL_PKTENG_SYNCHRONOUS 0x100 + +typedef struct wl_pkteng { + uint32 flags; + uint32 delay; + uint32 nframes; + uint32 length; + uint8 seqno; + struct ether_addr dest; + struct ether_addr src; +} wl_pkteng_t; + +#define NUM_80211b_RATES 4 +#define NUM_80211ag_RATES 8 +#define NUM_80211n_RATES 32 +#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) +typedef struct wl_pkteng_stats { + uint32 lostfrmcnt; + int32 rssi; + int32 snr; + uint16 rxpktcnt[NUM_80211_RATES+1]; +} wl_pkteng_stats_t; + + +#define WL_WOWL_MAGIC (1 << 0) +#define WL_WOWL_NET (1 << 1) +#define WL_WOWL_DIS (1 << 2) +#define WL_WOWL_RETR (1 << 3) +#define WL_WOWL_BCN (1 << 4) +#define WL_WOWL_TST (1 << 5) +#define WL_WOWL_M1 (1 << 6) +#define WL_WOWL_EAPID (1 << 7) +#define WL_WOWL_KEYROT (1 << 14) +#define WL_WOWL_BCAST (1 << 15) + +#define MAGIC_PKT_MINLEN 102 + +typedef struct { + uint masksize; + uint offset; + uint patternoffset; + uint patternsize; + ulong id; + + +} wl_wowl_pattern_t; + +typedef struct { + uint count; + wl_wowl_pattern_t pattern[1]; +} wl_wowl_pattern_list_t; + +typedef struct { + uint8 pci_wakeind; + uint16 ucode_wakeind; +} wl_wowl_wakeind_t; + + +typedef struct wl_txrate_class { + uint8 init_rate; + uint8 min_rate; + uint8 max_rate; +} wl_txrate_class_t; + + + + +#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 + + +typedef struct wl_obss_scan_arg { + int16 passive_dwell; + int16 active_dwell; + int16 bss_widthscan_interval; + int16 passive_total; + int16 active_total; + int16 chanwidth_transition_delay; + int16 activity_threshold; +} wl_obss_scan_arg_t; + +#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) +#define WL_MIN_NUM_OBSS_SCAN_ARG 7 + +#define WL_COEX_INFO_MASK 0x07 +#define WL_COEX_INFO_REQ 0x01 +#define WL_COEX_40MHZ_INTOLERANT 0x02 +#define WL_COEX_WIDTH20 0x04 + +#define WLC_RSSI_INVALID 0 + +#define MAX_RSSI_LEVELS 8 + + +typedef struct wl_rssi_event { + uint32 rate_limit_msec; + uint8 num_rssi_levels; + int8 rssi_levels[MAX_RSSI_LEVELS]; +} wl_rssi_event_t; + +typedef struct wl_action_obss_coex_req { + uint8 info; + uint8 num; + uint8 ch_list[1]; +} wl_action_obss_coex_req_t; + + +#define EXTLOG_CUR_VER 0x0100 + +#define MAX_ARGSTR_LEN 18 + + +#define LOG_MODULE_COMMON 0x0001 +#define LOG_MODULE_ASSOC 0x0002 +#define LOG_MODULE_EVENT 0x0004 +#define LOG_MODULE_MAX 3 + + +#define WL_LOG_LEVEL_DISABLE 0 +#define WL_LOG_LEVEL_ERR 1 +#define WL_LOG_LEVEL_WARN 2 +#define WL_LOG_LEVEL_INFO 3 +#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO + + +#define LOG_FLAG_EVENT 1 + + +#define LOG_ARGTYPE_NULL 0 +#define LOG_ARGTYPE_STR 1 +#define LOG_ARGTYPE_INT 2 +#define LOG_ARGTYPE_INT_STR 3 +#define LOG_ARGTYPE_STR_INT 4 + +typedef struct wlc_extlog_cfg { + int max_number; + uint16 module; + uint8 level; + uint8 flag; + uint16 version; +} wlc_extlog_cfg_t; + +typedef struct log_record { + uint32 time; + uint16 module; + uint16 id; + uint8 level; + uint8 sub_unit; + uint8 seq_num; + int32 arg; + char str[MAX_ARGSTR_LEN]; +} log_record_t; + +typedef struct wlc_extlog_req { + uint32 from_last; + uint32 num; +} wlc_extlog_req_t; + +typedef struct wlc_extlog_results { + uint16 version; + uint16 record_len; + uint32 num; + log_record_t logs[1]; +} wlc_extlog_results_t; + +typedef struct log_idstr { + uint16 id; + uint16 flag; + uint8 arg_type; + const char *fmt_str; +} log_idstr_t; + +#define FMTSTRF_USER 1 + + +typedef enum { + FMTSTR_DRIVER_UP_ID = 0, + FMTSTR_DRIVER_DOWN_ID = 1, + FMTSTR_SUSPEND_MAC_FAIL_ID = 2, + FMTSTR_NO_PROGRESS_ID = 3, + FMTSTR_RFDISABLE_ID = 4, + FMTSTR_REG_PRINT_ID = 5, + FMTSTR_EXPTIME_ID = 6, + FMTSTR_JOIN_START_ID = 7, + FMTSTR_JOIN_COMPLETE_ID = 8, + FMTSTR_NO_NETWORKS_ID = 9, + FMTSTR_SECURITY_MISMATCH_ID = 10, + FMTSTR_RATE_MISMATCH_ID = 11, + FMTSTR_AP_PRUNED_ID = 12, + FMTSTR_KEY_INSERTED_ID = 13, + FMTSTR_DEAUTH_ID = 14, + FMTSTR_DISASSOC_ID = 15, + FMTSTR_LINK_UP_ID = 16, + FMTSTR_LINK_DOWN_ID = 17, + FMTSTR_RADIO_HW_OFF_ID = 18, + FMTSTR_RADIO_HW_ON_ID = 19, + FMTSTR_EVENT_DESC_ID = 20, + FMTSTR_PNP_SET_POWER_ID = 21, + FMTSTR_RADIO_SW_OFF_ID = 22, + FMTSTR_RADIO_SW_ON_ID = 23, + FMTSTR_PWD_MISMATCH_ID = 24, + FMTSTR_FATAL_ERROR_ID = 25, + FMTSTR_AUTH_FAIL_ID = 26, + FMTSTR_ASSOC_FAIL_ID = 27, + FMTSTR_IBSS_FAIL_ID = 28, + FMTSTR_EXTAP_FAIL_ID = 29, + FMTSTR_MAX_ID +} log_fmtstr_id_t; + +#ifdef DONGLEOVERLAYS +typedef struct { + uint32 flags_idx; + uint32 offset; + uint32 len; + +} wl_ioctl_overlay_t; + +#define OVERLAY_IDX_MASK 0x000000ff +#define OVERLAY_IDX_SHIFT 0 +#define OVERLAY_FLAGS_MASK 0xffffff00 +#define OVERLAY_FLAGS_SHIFT 8 + +#define OVERLAY_FLAG_POSTLOAD 0x100 + +#define OVERLAY_FLAG_DEFER_DL 0x200 + +#define OVERLAY_FLAG_PRESLEEP 0x400 + +#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024 +#endif + + +#include + + +#include + +#define VNDR_IE_CMD_LEN 4 + + +#define VNDR_IE_BEACON_FLAG 0x1 +#define VNDR_IE_PRBRSP_FLAG 0x2 +#define VNDR_IE_ASSOCRSP_FLAG 0x4 +#define VNDR_IE_AUTHRSP_FLAG 0x8 +#define VNDR_IE_PRBREQ_FLAG 0x10 +#define VNDR_IE_ASSOCREQ_FLAG 0x20 +#define VNDR_IE_CUSTOM_FLAG 0x100 + +#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; + vndr_ie_t vndr_ie_data; +} BWL_POST_PACKED_STRUCT vndr_ie_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + int iecount; + vndr_ie_info_t vndr_ie_list[1]; +} BWL_POST_PACKED_STRUCT vndr_ie_buf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; + vndr_ie_buf_t vndr_ie_buffer; +} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; + + + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { + struct ether_addr staAddr; + uint16 ieLen; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { + sta_prbreq_wps_ie_hdr_t hdr; + uint8 ieData[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { + uint32 totLen; + uint8 ieDataList[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; + + +#ifdef WLMEDIA_TXFAILEVENT +typedef BWL_PRE_PACKED_STRUCT struct { + char dest[ETHER_ADDR_LEN]; + uint8 prio; + uint8 flags; + uint32 tsf_l; + uint32 tsf_h; + uint16 rates; + uint16 txstatus; +} BWL_POST_PACKED_STRUCT txfailinfo_t; +#endif + +#include + + +#define ASSERTLOG_CUR_VER 0x0100 +#define MAX_ASSRTSTR_LEN 64 + +typedef struct assert_record { + uint32 time; + uint8 seq_num; + char str[MAX_ASSRTSTR_LEN]; +} assert_record_t; + +typedef struct assertlog_results { + uint16 version; + uint16 record_len; + uint32 num; + assert_record_t logs[1]; +} assertlog_results_t; + +#define LOGRRC_FIX_LEN 8 +#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) + + + + + +#define CHANIM_DISABLE 0 +#define CHANIM_DETECT 1 +#define CHANIM_ACT 2 +#define CHANIM_MODE_MAX 2 + + +#define APCS_IOCTL 1 +#define APCS_CHANIM 2 +#define APCS_CSTIMER 3 +#define APCS_BTA 4 + + +#define CHANIM_ACS_RECORD 10 + + +typedef struct { + bool valid; + uint8 trigger; + chanspec_t selected_chspc; + uint32 glitch_cnt; + uint8 ccastats; + uint timestamp; +} chanim_acs_record_t; + +typedef struct { + chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; + uint8 count; + uint timestamp; +} wl_acs_record_t; + + + +#define SMFS_VERSION 1 + +typedef struct wl_smfs_elem { + uint32 count; + uint16 code; +} wl_smfs_elem_t; + +typedef struct wl_smf_stats { + uint32 version; + uint16 length; + uint8 type; + uint8 codetype; + uint32 ignored_cnt; + uint32 malformed_cnt; + uint32 count_total; + wl_smfs_elem_t elem[1]; +} wl_smf_stats_t; + +#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem); + +enum { + SMFS_CODETYPE_SC, + SMFS_CODETYPE_RC +}; + + +#define SMFS_CODE_MALFORMED 0xFFFE +#define SMFS_CODE_IGNORED 0xFFFD + +typedef enum smfs_type { + SMFS_TYPE_AUTH, + SMFS_TYPE_ASSOC, + SMFS_TYPE_REASSOC, + SMFS_TYPE_DISASSOC_TX, + SMFS_TYPE_DISASSOC_RX, + SMFS_TYPE_DEAUTH_TX, + SMFS_TYPE_DEAUTH_RX, + SMFS_TYPE_MAX +} smfs_type_t; + +#ifdef PHYMON + +#define PHYMON_VERSION 1 + +typedef struct wl_phycal_core_state { + + int16 tx_iqlocal_a; + int16 tx_iqlocal_b; + int8 tx_iqlocal_ci; + int8 tx_iqlocal_cq; + int8 tx_iqlocal_di; + int8 tx_iqlocal_dq; + int8 tx_iqlocal_ei; + int8 tx_iqlocal_eq; + int8 tx_iqlocal_fi; + int8 tx_iqlocal_fq; + + + int16 rx_iqcal_a; + int16 rx_iqcal_b; + + uint8 tx_iqlocal_pwridx; + uint32 papd_epsilon_table[64]; + int16 papd_epsilon_offset; + uint8 curr_tx_pwrindex; + int8 idle_tssi; + int8 est_tx_pwr; + int8 est_rx_pwr; + uint16 rx_gaininfo; + uint16 init_gaincode; + int8 estirr_tx; + int8 estirr_rx; + +} wl_phycal_core_state_t; + +typedef struct wl_phycal_state { + int version; + int8 num_phy_cores; + int8 curr_temperature; + chanspec_t chspec; + bool aci_state; + uint16 crsminpower; + uint16 crsminpowerl; + uint16 crsminpoweru; + wl_phycal_core_state_t phycal_core[1]; +} wl_phycal_state_t; + +#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) +#endif + +#ifdef WLDSTA +typedef struct wl_dsta_if { + struct ether_addr addr; +} wl_dsta_if_t; +#endif + +#ifdef WLP2P + +typedef struct wl_p2p_disc_st { + uint8 state; + chanspec_t chspec; + uint16 dwell; +} wl_p2p_disc_st_t; + + +#define WL_P2P_DISC_ST_SCAN 0 +#define WL_P2P_DISC_ST_LISTEN 1 +#define WL_P2P_DISC_ST_SEARCH 2 + + +typedef struct wl_p2p_scan { + uint8 type; + uint8 reserved[3]; + +} wl_p2p_scan_t; + + +typedef struct wl_p2p_if { + struct ether_addr addr; + uint8 type; + chanspec_t chspec; +} wl_p2p_if_t; + + +#define WL_P2P_IF_CLIENT 0 +#define WL_P2P_IF_GO 1 +#define WL_P2P_IF_DYNBCN_GO 2 +#define WL_P2P_IF_DEV 3 + + +typedef struct wl_p2p_ifq { + uint bsscfgidx; + char ifname[BCM_MSG_IFNAME_MAX]; +} wl_p2p_ifq_t; + + +typedef struct wl_p2p_ops { + uint8 ops; + uint8 ctw; +} wl_p2p_ops_t; + + +typedef struct wl_p2p_sched_desc { + uint32 start; + uint32 interval; + uint32 duration; + uint32 count; +} wl_p2p_sched_desc_t; + + +#define WL_P2P_SCHED_RSVD 0 +#define WL_P2P_SCHED_REPEAT 255 + +typedef struct wl_p2p_sched { + uint8 type; + uint8 action; + uint8 option; + wl_p2p_sched_desc_t desc[1]; +} wl_p2p_sched_t; +#define WL_P2P_SCHED_FIXED_LEN 3 + + +#define WL_P2P_SCHED_TYPE_ABS 0 +#define WL_P2P_SCHED_TYPE_REQ_ABS 1 + + +#define WL_P2P_SCHED_ACTION_NONE 0 +#define WL_P2P_SCHED_ACTION_DOZE 1 + +#define WL_P2P_SCHED_ACTION_GOOFF 2 + +#define WL_P2P_SCHED_ACTION_RESET 255 + + +#define WL_P2P_SCHED_OPTION_NORMAL 0 +#define WL_P2P_SCHED_OPTION_BCNPCT 1 + +#define WL_P2P_SCHED_OPTION_TSFOFS 2 + + +#define WL_P2P_FEAT_GO_CSA (1 << 0) +#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) +#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) +#endif + + +#define BCM_ACTION_RFAWARE 0x77 +#define BCM_ACTION_RFAWARE_DCS 0x01 + + + +#define WL_11N_2x2 1 +#define WL_11N_3x3 3 +#define WL_11N_4x4 4 + + +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + + +#define LQ_IDX_LAST 3 +#define MCS_INDEX_SIZE 33 + +#define LQ_IDX_MIN 0 +#define LQ_IDX_MAX 1 +#define LQ_IDX_AVG 2 +#define LQ_IDX_SUM 2 +#define LQ_IDX_LAST 3 +#define LQ_STOP_MONITOR 0 +#define LQ_START_MONITOR 1 + +#define LINKQUAL_V1 0x01 + +struct wl_lq { + int32 enable; + int32 rssi[LQ_IDX_LAST]; + int32 rssicnt; + int32 snr[LQ_IDX_LAST]; + uint32 nsamples; + uint8 isvalid; + uint8 version; +}; + +typedef struct wl_lq wl_lq_t; +typedef struct wl_lq wl_lq_stats_t; + +typedef struct { + struct ether_addr ea; + uint8 ac_cat; + uint8 num_pkts; +} wl_mac_ratehisto_cmd_t; + + +typedef struct { + uint32 rate[WLC_MAXRATE + 1]; + uint32 mcs_index[MCS_INDEX_SIZE]; + uint32 tsf_timer[2][2]; +} wl_mac_ratehisto_res_t; + +#ifdef PROP_TXSTATUS + + +#define WLFC_FLAGS_RSSI_SIGNALS 1 + + +#define WLFC_FLAGS_XONXOFF_SIGNALS 2 + + +#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 4 + +#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 8 +#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 16 +#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 32 +#endif + +#define BTA_STATE_LOG_SZ 64 + + +enum { + HCIReset = 1, + HCIReadLocalAMPInfo, + HCIReadLocalAMPASSOC, + HCIWriteRemoteAMPASSOC, + HCICreatePhysicalLink, + HCIAcceptPhysicalLinkRequest, + HCIDisconnectPhysicalLink, + HCICreateLogicalLink, + HCIAcceptLogicalLink, + HCIDisconnectLogicalLink, + HCILogicalLinkCancel, + HCIAmpStateChange, + HCIWriteLogicalLinkAcceptTimeout +}; + +typedef struct flush_txfifo { + uint32 txfifobmp; + uint32 hwtxfifoflush; + struct ether_addr ea; +} flush_txfifo_t; + +#define CHANNEL_5G_LOW_START 36 +#define CHANNEL_5G_MID_START 52 +#define CHANNEL_5G_HIGH_START 100 +#define CHANNEL_5G_UPPER_START 149 + +enum { + SPATIAL_MODE_2G_IDX = 0, + SPATIAL_MODE_5G_LOW_IDX, + SPATIAL_MODE_5G_MID_IDX, + SPATIAL_MODE_5G_HIGH_IDX, + SPATIAL_MODE_5G_UPPER_IDX, + SPATIAL_MODE_MAX_IDX +}; + +#endif diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c new file mode 100644 index 0000000000000..4ef7bf7b24dc7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/linux_osl.c @@ -0,0 +1,924 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: linux_osl.c,v 1.168.2.7 2011-01-27 17:01:13 $ + */ + + +#define LINUX_PORT + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef BCMASSERT_LOG +#include +#endif + +#include + +#define PCI_CFG_RETRY 10 + +#define OS_HANDLE_MAGIC 0x1234abcd +#define BCM_MEM_FILENAME_LEN 24 + +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define STATIC_BUF_MAX_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE * 2) +#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) + +typedef struct bcm_static_buf { + struct semaphore static_sem; + unsigned char *buf_ptr; + unsigned char buf_use[STATIC_BUF_MAX_NUM]; +} bcm_static_buf_t; + +static bcm_static_buf_t *bcm_static_buf = 0; + +#define STATIC_PKT_MAX_NUM 8 + +typedef struct bcm_static_pkt { + struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM]; + struct semaphore osl_pkt_sem; + unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2]; +} bcm_static_pkt_t; + +static bcm_static_pkt_t *bcm_static_skb = 0; +#endif + +typedef struct bcm_mem_link { + struct bcm_mem_link *prev; + struct bcm_mem_link *next; + uint size; + int line; + char file[BCM_MEM_FILENAME_LEN]; +} bcm_mem_link_t; + +struct osl_info { + osl_pubinfo_t pub; +#ifdef CTFPOOL + ctfpool_t *ctfpool; +#endif + uint magic; + void *pdev; + atomic_t malloced; + uint failed; + uint bustype; + bcm_mem_link_t *dbgmem_list; +}; + + + + +uint32 g_assert_type = FALSE; + +static int16 linuxbcmerrormap[] = +{ 0, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -E2BIG, + -E2BIG, + -EBUSY, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EFAULT, + -ENOMEM, + -EOPNOTSUPP, + -EMSGSIZE, + -EINVAL, + -EPERM, + -ENOMEM, + -EINVAL, + -ERANGE, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EINVAL, + -EIO, + -ENODEV, + -EINVAL, + -EIO, + -EIO, + -ENODEV, + -EINVAL, + -ENODATA, + + + +#if BCME_LAST != -42 +#error "You need to add a OS error translation in the linuxbcmerrormap \ + for new error code defined in bcmutils.h" +#endif +}; + + +int +osl_error(int bcmerror) +{ + if (bcmerror > 0) + bcmerror = 0; + else if (bcmerror < BCME_LAST) + bcmerror = BCME_ERROR; + + + return linuxbcmerrormap[-bcmerror]; +} + +extern uint8* dhd_os_prealloc(void *osh, int section, int size); + +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag) +{ + osl_t *osh; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + gfp_t flags; + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + osh = kmalloc(sizeof(osl_t), flags); +#else + osh = kmalloc(sizeof(osl_t), GFP_ATOMIC); +#endif + ASSERT(osh); + + bzero(osh, sizeof(osl_t)); + + + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); + + osh->magic = OS_HANDLE_MAGIC; + atomic_set(&osh->malloced, 0); + osh->failed = 0; + osh->dbgmem_list = NULL; + osh->pdev = pdev; + osh->pub.pkttag = pkttag; + osh->bustype = bustype; + + switch (bustype) { + case PCI_BUS: + case SI_BUS: + case PCMCIA_BUS: + osh->pub.mmbus = TRUE; + break; + case JTAG_BUS: + case SDIO_BUS: + case USB_BUS: + case SPI_BUS: + case RPC_BUS: + osh->pub.mmbus = FALSE; + break; + default: + ASSERT(FALSE); + break; + } + +#if defined(CONFIG_DHD_USE_STATIC_BUF) + if (!bcm_static_buf) { + if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+ + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + } + else + printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); + + sema_init(&bcm_static_buf->static_sem, 1); + + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; + } + + if (!bcm_static_skb) { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = dhd_os_prealloc(osh, 4, 0); + + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * 16); + for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++) + bcm_static_skb->pkt_use[i] = 0; + + sema_init(&bcm_static_skb->osl_pkt_sem, 1); + } +#endif + + return osh; +} + +void +osl_detach(osl_t *osh) +{ + if (osh == NULL) + return; + + ASSERT(osh->magic == OS_HANDLE_MAGIC); + kfree(osh); +} + +static struct sk_buff *osl_alloc_skb(unsigned int len) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) + gfp_t flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + + return __dev_alloc_skb(len, flags); +#else + return dev_alloc_skb(len); +#endif +} + +#ifdef CTFPOOL + +void * +osl_ctfpool_add(osl_t *osh) +{ + struct sk_buff *skb; + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return NULL; + + spin_lock_bh(&osh->ctfpool->lock); + ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj); + + + if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) { + spin_unlock_bh(&osh->ctfpool->lock); + return NULL; + } + + + skb = osl_alloc_skb(osh->ctfpool->obj_size); + if (skb == NULL) { + printf("%s: skb alloc of len %d failed\n", __FUNCTION__, + osh->ctfpool->obj_size); + spin_unlock_bh(&osh->ctfpool->lock); + return NULL; + } + + + skb->next = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = skb; + osh->ctfpool->fast_frees++; + osh->ctfpool->curr_obj++; + + + CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool; + + + PKTFAST(osh, skb) = FASTBUF; + + spin_unlock_bh(&osh->ctfpool->lock); + + return skb; +} + + +void +osl_ctfpool_replenish(osl_t *osh, uint thresh) +{ + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + + while ((osh->ctfpool->refills > 0) && (thresh--)) { + osl_ctfpool_add(osh); + osh->ctfpool->refills--; + } +} + + +int32 +osl_ctfpool_init(osl_t *osh, uint numobj, uint size) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + gfp_t flags; + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + osh->ctfpool = kmalloc(sizeof(ctfpool_t), flags); +#else + osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC); +#endif + ASSERT(osh->ctfpool); + bzero(osh->ctfpool, sizeof(ctfpool_t)); + + osh->ctfpool->max_obj = numobj; + osh->ctfpool->obj_size = size; + + spin_lock_init(&osh->ctfpool->lock); + + while (numobj--) { + if (!osl_ctfpool_add(osh)) + return -1; + osh->ctfpool->fast_frees--; + } + + return 0; +} + + +void +osl_ctfpool_cleanup(osl_t *osh) +{ + struct sk_buff *skb, *nskb; + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + spin_lock_bh(&osh->ctfpool->lock); + + skb = osh->ctfpool->head; + + while (skb != NULL) { + nskb = skb->next; + dev_kfree_skb(skb); + skb = nskb; + osh->ctfpool->curr_obj--; + } + + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->head = NULL; + spin_unlock_bh(&osh->ctfpool->lock); + + kfree(osh->ctfpool); + osh->ctfpool = NULL; +} + +void +osl_ctfpool_stats(osl_t *osh, void *b) +{ + struct bcmstrbuf *bb; + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif + + bb = b; + + ASSERT((osh != NULL) && (bb != NULL)); + + bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n", + osh->ctfpool->max_obj, osh->ctfpool->obj_size, + osh->ctfpool->curr_obj, osh->ctfpool->refills); + bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n", + osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees, + osh->ctfpool->slow_allocs); +} + +static inline struct sk_buff * +osl_pktfastget(osl_t *osh, uint len) +{ + struct sk_buff *skb; + + + if (osh->ctfpool == NULL) + return NULL; + + spin_lock_bh(&osh->ctfpool->lock); + if (osh->ctfpool->head == NULL) { + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->slow_allocs++; + spin_unlock_bh(&osh->ctfpool->lock); + return NULL; + } + + ASSERT(len <= osh->ctfpool->obj_size); + + + skb = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = (void *)skb->next; + + osh->ctfpool->fast_allocs++; + osh->ctfpool->curr_obj--; + ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); + spin_unlock_bh(&osh->ctfpool->lock); + + + skb->next = skb->prev = NULL; + skb->data = skb->head + 16; + skb->tail = skb->head + 16; + + skb->len = 0; + skb->cloned = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) + skb->list = NULL; +#endif + atomic_set(&skb->users, 1); + + return skb; +} +#endif + + +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len) +{ + struct sk_buff *skb; + +#ifdef CTFPOOL + skb = osl_pktfastget(osh, len); + if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) { +#else + if ((skb = osl_alloc_skb(len))) { +#endif + skb_put(skb, len); + skb->priority = 0; + + osh->pub.pktalloced++; + } + + return ((void*) skb); +} + +#ifdef CTFPOOL +static inline void +osl_pktfastfree(osl_t *osh, struct sk_buff *skb) +{ + ctfpool_t *ctfpool; + + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + + + spin_lock_bh(&ctfpool->lock); + skb->next = (struct sk_buff *)ctfpool->head; + ctfpool->head = (void *)skb; + + ctfpool->fast_frees++; + ctfpool->curr_obj++; + + ASSERT(ctfpool->curr_obj <= ctfpool->max_obj); + spin_unlock_bh(&ctfpool->lock); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + skb->tstamp.tv.sec = 0; +#else + skb->stamp.tv_sec = 0; +#endif + + + skb->dev = NULL; + skb->dst = NULL; + memset(skb->cb, 0, sizeof(skb->cb)); + skb->ip_summed = 0; + skb->destructor = NULL; +} +#endif + + +void BCMFASTPATH +osl_pktfree(osl_t *osh, void *p, bool send) +{ + struct sk_buff *skb, *nskb; + + skb = (struct sk_buff*) p; + + if (send && osh->pub.tx_fn) + osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); + + + while (skb) { + nskb = skb->next; + skb->next = NULL; + + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) + osl_pktfastfree(osh, skb); + else { +#else + { +#endif + + if (skb->destructor) + + dev_kfree_skb_any(skb); + else + + dev_kfree_skb(skb); + } + + osh->pub.pktalloced--; + + skb = nskb; + } +} + +#ifdef CONFIG_DHD_USE_STATIC_BUF +void * +osl_pktget_static(osl_t *osh, uint len) +{ + int i; + struct sk_buff *skb; + + if (!bcm_static_skb || (len > (PAGE_SIZE * 2))) { + printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); + return osl_pktget(osh, len); + } + + down(&bcm_static_skb->osl_pkt_sem); + + if (len <= PAGE_SIZE) { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (bcm_static_skb->pkt_use[i] == 0) + break; + } + + if (i != STATIC_PKT_MAX_NUM) { + bcm_static_skb->pkt_use[i] = 1; + skb = bcm_static_skb->skb_4k[i]; + skb->tail = skb->data + len; + skb->len = len; + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + + + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) + break; + } + + if (i != STATIC_PKT_MAX_NUM) { + bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; + skb = bcm_static_skb->skb_8k[i]; + skb->tail = skb->data + len; + skb->len = len; + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + + up(&bcm_static_skb->osl_pkt_sem); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return osl_pktget(osh, len); +} + +void +osl_pktfree_static(osl_t *osh, void *p, bool send) +{ + int i; + + if (!bcm_static_skb) { + osl_pktfree(osh, p, send); + return; + } + + down(&bcm_static_skb->osl_pkt_sem); + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (p == bcm_static_skb->skb_4k[i]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } + + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { + bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } + up(&bcm_static_skb->osl_pkt_sem); + + osl_pktfree(osh, p, send); + return; +} +#endif + +uint32 +osl_pci_read_config(osl_t *osh, uint offset, uint size) +{ + uint val = 0; + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + ASSERT(size == 4); + + do { + pci_read_config_dword(osh->pdev, offset, &val); + if (val != 0xffffffff) + break; + } while (retry--); + + + return (val); +} + +void +osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) +{ + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + ASSERT(size == 4); + + do { + pci_write_config_dword(osh->pdev, offset, val); + if (offset != PCI_BAR0_WIN) + break; + if (osl_pci_read_config(osh, offset, size) == val) + break; + } while (retry--); + +} + + +uint +osl_pci_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return ((struct pci_dev *)osh->pdev)->bus->number; +} + + +uint +osl_pci_slot(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); +} + +static void +osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) +{ +} + +void +osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); +} + +void +osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); +} + +void * +osl_malloc(osl_t *osh, uint size) +{ + void *addr; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + gfp_t flags; + + + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + if ((addr = kmalloc(size, flags)) == NULL) { +#else + if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) { +#endif + if (osh) + osh->failed++; + return (NULL); + } + if (osh) + atomic_add(size, &osh->malloced); + + return (addr); +} + +void +osl_mfree(osl_t *osh, void *addr, uint size) +{ + if (osh) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(size, &osh->malloced); + } + kfree(addr); +} + +uint +osl_malloced(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (atomic_read(&osh->malloced)); +} + +uint +osl_malloc_failed(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->failed); +} + + + +uint +osl_dma_consistent_align(void) +{ + return (PAGE_SIZE); +} + +void* +osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap) +{ + uint16 align = (1 << align_bits); + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) + size += align; + *alloced = size; + + return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap)); +} + +void +osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +} + +uint BCMFASTPATH +osl_dma_map(osl_t *osh, void *va, uint size, int direction) +{ + int dir; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + return (pci_map_single(osh->pdev, va, size, dir)); +} + +void BCMFASTPATH +osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction) +{ + int dir; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +} + +#if defined(BCMASSERT_LOG) +void +osl_assert(char *exp, char *file, int line) +{ + char tempbuf[256]; + char *basename; + + basename = strrchr(file, '/'); + + if (basename) + basename++; + + if (!basename) + basename = file; + +#ifdef BCMASSERT_LOG + snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n", + exp, basename, line); + + bcm_assert_log(tempbuf); +#endif + + +} +#endif + +void +osl_delay(uint usec) +{ + uint d; + + while (usec > 0) { + d = MIN(usec, 1000); + udelay(d); + usec -= d; + } +} + + + +void * +osl_pktdup(osl_t *osh, void *skb) +{ + void * p; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + gfp_t flags; + + flags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + if ((p = skb_clone((struct sk_buff *)skb, flags)) == NULL) +#else + if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL) +#endif + return NULL; + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) { + ctfpool_t *ctfpool; + + + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + PKTCLRFAST(osh, p); + PKTCLRFAST(osh, skb); + ctfpool->refills++; + } +#endif + + + if (osh->pub.pkttag) + bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ); + + + osh->pub.pktalloced++; + return (p); +} + + + + + + + +void * +osl_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +osl_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + + rdlen = kernel_read(fp, fp->f_pos, buf, len); + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +osl_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c new file mode 100644 index 0000000000000..02d1bc0a79d1a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/sbutils.c @@ -0,0 +1,992 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: sbutils.c,v 1.687.2.1 2010-11-29 20:21:56 Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + + +/* local prototypes */ +static uint _sb_coreidx(si_info_t *sii, uint32 sba); +static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, + uint ncores); +static uint32 _sb_coresba(si_info_t *sii); +static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); + +#define SET_SBREG(sii, r, mask, val) \ + W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) +#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF) + +/* sonicsrev */ +#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) +#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT) + +#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr)) +#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v)) +#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v))) +#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v))) + +static uint32 +sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) +{ + uint8 tmp; + uint32 val, intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + val = R_REG(sii->osh, sbr); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } + + return (val); +} + +static void +sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) +{ + uint8 tmp; + volatile uint32 dummy; + uint32 intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); + dummy = R_REG(sii->osh, sbr); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); + } else + W_REG(sii->osh, sbr, v); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } +} + +uint +sb_coreid(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); +} + +uint +sb_intflag(si_t *sih) +{ + si_info_t *sii; + void *corereg; + sbconfig_t *sb; + uint origidx, intflag, intr_val = 0; + + sii = SI_INFO(sih); + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + corereg = si_setcore(sih, CC_CORE_ID, 0); + ASSERT(corereg != NULL); + sb = REGS2SB(corereg); + intflag = R_SBREG(sii, &sb->sbflagst); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + + return intflag; +} + +uint +sb_flag(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK; +} + +void +sb_setint(si_t *sih, int siflag) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 vec; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + if (siflag == -1) + vec = 0; + else + vec = 1 << siflag; + W_SBREG(sii, &sb->sbintvec, vec); +} + +/* return core index of the core with address 'sba' */ +static uint +_sb_coreidx(si_info_t *sii, uint32 sba) +{ + uint i; + + for (i = 0; i < sii->numcores; i ++) + if (sba == sii->coresba[i]) + return i; + return BADIDX; +} + +/* return core address of the current core */ +static uint32 +_sb_coresba(si_info_t *sii) +{ + uint32 sbaddr; + + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: { + sbconfig_t *sb = REGS2SB(sii->curmap); + sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0)); + break; + } + + case PCI_BUS: + sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + break; + + case PCMCIA_BUS: { + uint8 tmp = 0; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + sbaddr = (uint32)tmp << 12; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + sbaddr |= (uint32)tmp << 16; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + sbaddr |= (uint32)tmp << 24; + break; + } + + case SPI_BUS: + case SDIO_BUS: + sbaddr = (uint32)(uintptr)sii->curmap; + break; + + + default: + sbaddr = BADCOREADDR; + break; + } + + return sbaddr; +} + +uint +sb_corevendor(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT); +} + +uint +sb_corerev(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + uint sbidh; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + sbidh = R_SBREG(sii, &sb->sbidhigh); + + return (SBCOREREV(sbidh)); +} + +/* set core-specific control flags */ +void +sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); +} + +/* set/clear core-specific control flags */ +uint32 +sb_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); + } + + /* return the new value + * for write operation, the following readback ensures the completion of write opration. + */ + return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT); +} + +/* set/clear core-specific status flags */ +uint32 +sb_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) | + (val << SBTMH_SISF_SHIFT); + W_SBREG(sii, &sb->sbtmstatehigh, w); + } + + /* return the new value */ + return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT); +} + +bool +sb_iscoreup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbtmstatelow) & + (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) == + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii; + + sii = SI_INFO(sih); + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!sii->regs[coreidx]) { + sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(sii->regs[coreidx])); + } + r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + if (regoff >= SBCONFIGOFF) { + w = (R_SBREG(sii, r) & ~mask) | val; + W_SBREG(sii, r, w); + } else { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + } + + /* readback */ + if (regoff >= SBCONFIGOFF) + w = R_SBREG(sii, r); + else { + if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) && + (coreidx == SI_CC_IDX) && + (regoff == OFFSETOF(chipcregs_t, watchdog))) { + w = val; + } else + w = R_REG(sii->osh, r); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + sb_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* Scan the enumeration space to find all cores starting from the given + * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba' + * is the default core address at chip POR time and 'regs' is the virtual + * address that the default core is mapped at. 'ncores' is the number of + * cores expected on bus 'sbba'. It returns the total number of cores + * starting from bus 'sbba', inclusive. + */ +#define SB_MAXBUSES 2 +static uint +_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores) +{ + uint next; + uint ncc = 0; + uint i; + + if (bus >= SB_MAXBUSES) { + SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus)); + return 0; + } + SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores)); + + /* Scan all cores on the bus starting from core 0. + * Core addresses must be contiguous on each bus. + */ + for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) { + sii->coresba[next] = sbba + (i * SI_CORE_SIZE); + + /* keep and reuse the initial register mapping */ + if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) { + SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next)); + sii->regs[next] = regs; + } + + /* change core to 'next' and read its coreid */ + sii->curmap = _sb_setcoreidx(sii, next); + sii->curidx = next; + + sii->coreid[next] = sb_coreid(&sii->pub); + + /* core specific processing... */ + /* chipc provides # cores */ + if (sii->coreid[next] == CC_CORE_ID) { + chipcregs_t *cc = (chipcregs_t *)sii->curmap; + uint32 ccrev = sb_corerev(&sii->pub); + + /* determine numcores - this is the total # cores in the chip */ + if (((ccrev == 4) || (ccrev >= 6))) + numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> + CID_CC_SHIFT; + else { + /* Older chips */ + uint chip = CHIPID(sii->pub.chip); + + if (chip == BCM4306_CHIP_ID) /* < 4306c0 */ + numcores = 6; + else if (chip == BCM4704_CHIP_ID) + numcores = 9; + else if (chip == BCM5365_CHIP_ID) + numcores = 7; + else { + SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", + chip)); + ASSERT(0); + numcores = 1; + } + } + SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores, + sii->pub.issim ? "QT" : "")); + } + /* scan bridged SB(s) and add results to the end of the list */ + else if (sii->coreid[next] == OCP_CORE_ID) { + sbconfig_t *sb = REGS2SB(sii->curmap); + uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1); + uint nsbcc; + + sii->numcores = next + 1; + + if ((nsbba & 0xfff00000) != SI_ENUM_BASE) + continue; + nsbba &= 0xfffff000; + if (_sb_coreidx(sii, nsbba) != BADIDX) + continue; + + nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16; + nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc); + if (sbba == SI_ENUM_BASE) + numcores -= nsbcc; + ncc += nsbcc; + } + } + + SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba)); + + sii->numcores = i + ncc; + return sii->numcores; +} + +/* scan the sb enumerated space to identify all cores */ +void +sb_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii; + uint32 origsba; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT; + + /* Save the current core info and validate it later till we know + * for sure what is good and what is bad. + */ + origsba = _sb_coresba(sii); + + /* scan all SB(s) starting from SI_ENUM_BASE */ + sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +sb_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + sii->curmap = _sb_setcoreidx(sii, coreidx); + sii->curidx = coreidx; + + return (sii->curmap); +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_sb_setcoreidx(si_info_t *sii, uint coreidx) +{ + uint32 sbaddr = sii->coresba[coreidx]; + void *regs; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: + /* map new one */ + if (!sii->regs[coreidx]) { + sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE); + ASSERT(GOODREGS(sii->regs[coreidx])); + } + regs = sii->regs[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr); + regs = sii->curmap; + break; + + case PCMCIA_BUS: { + uint8 tmp = (sbaddr >> 12) & 0x0f; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + tmp = (sbaddr >> 16) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + tmp = (sbaddr >> 24) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + regs = sii->curmap; + break; + } + case SPI_BUS: + case SDIO_BUS: + /* map new one */ + if (!sii->regs[coreidx]) { + sii->regs[coreidx] = (void *)(uintptr)sbaddr; + ASSERT(GOODREGS(sii->regs[coreidx])); + } + regs = sii->regs[coreidx]; + break; + + + default: + ASSERT(0); + regs = NULL; + break; + } + + return regs; +} + +/* Return the address of sbadmatch0/1/2/3 register */ +static volatile uint32 * +sb_admatch(si_info_t *sii, uint asidx) +{ + sbconfig_t *sb; + volatile uint32 *addrm; + + sb = REGS2SB(sii->curmap); + + switch (asidx) { + case 0: + addrm = &sb->sbadmatch0; + break; + + case 1: + addrm = &sb->sbadmatch1; + break; + + case 2: + addrm = &sb->sbadmatch2; + break; + + case 3: + addrm = &sb->sbadmatch3; + break; + + default: + SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx)); + return 0; + } + + return (addrm); +} + +/* Return the number of address spaces in current core */ +int +sb_numaddrspaces(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + /* + 1 because of enumeration space */ + return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1; +} + +/* Return the address of the nth address space in the current core */ +uint32 +sb_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* Return the size of the nth address space in the current core */ +uint32 +sb_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + + +/* do buffered registers update */ +void +sb_commit(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + + sii = SI_INFO(sih); + + origidx = sii->curidx; + ASSERT(GOODIDX(origidx)); + + INTR_OFF(sii, intr_val); + + /* switch over to chipcommon core if there is one, else use pci */ + if (sii->pub.ccrev != NOREV) { + chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(ccregs != NULL); + + /* do the buffer registers update */ + W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT); + W_REG(sii->osh, &ccregs->broadcastdata, 0x0); + } else + ASSERT(0); + + /* restore core index */ + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} + +void +sb_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* if core is already in reset, just return */ + if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) + return; + + /* if clocks are not enabled, put into reset and return */ + if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) + goto disable; + + /* set target reject and spin until busy is clear (preserve core-specific bits) */ + OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) + SI_ERROR(("%s: target state still busy\n", __FUNCTION__)); + + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) { + OR_SBREG(sii, &sb->sbimstate, SBIM_RJ); + dummy = R_SBREG(sii, &sb->sbimstate); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(10); + + /* don't forget to clear the initiator reject bit */ + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) + AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ); + +disable: + /* leave reset and reject asserted */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET)); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + sbconfig_t *sb; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + sb_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + + /* set reset while enabling the clock and forcing them on throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) { + W_SBREG(sii, &sb->sbtmstatehigh, 0); + } + if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { + AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); + } + + /* clear reset and allow it to propagate throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); + + /* leave clock enabled */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + OSL_DELAY(1); +} + +/* + * Set the initiator timeout for the "master core". + * The master core is defined to be the core in control + * of the chip and so it issues accesses to non-memory + * locations (Because of dma *any* core can access memeory). + * + * The routine uses the bus to decide who is the master: + * SI_BUS => mips + * JTAG_BUS => chipc + * PCI_BUS => pci or pcie + * PCMCIA_BUS => pcmcia + * SDIO_BUS => pcmcia + * + * This routine exists so callers can disable initiator + * timeouts so accesses to very slow devices like otp + * won't cause an abort. The routine allows arbitrary + * settings of the service and request timeouts, though. + * + * Returns the timeout state before changing it or -1 + * on error. + */ + +#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK) + +uint32 +sb_set_initiator_to(si_t *sih, uint32 to, uint idx) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + uint32 tmp, ret = 0xffffffff; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + if ((to & ~TO_MASK) != 0) + return ret; + + /* Figure out the master core */ + if (idx == BADIDX) { + switch (BUSTYPE(sii->pub.bustype)) { + case PCI_BUS: + idx = sii->pub.buscoreidx; + break; + case JTAG_BUS: + idx = SI_CC_IDX; + break; + case PCMCIA_BUS: + case SDIO_BUS: + idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0); + break; + case SI_BUS: + idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0); + break; + default: + ASSERT(0); + } + if (idx == BADIDX) + return ret; + } + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + sb = REGS2SB(sb_setcoreidx(sih, idx)); + + tmp = R_SBREG(sii, &sb->sbimconfiglow); + ret = tmp & TO_MASK; + W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); + + sb_commit(sih); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret; +} + +uint32 +sb_base(uint32 admatch) +{ + uint32 base; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + base = 0; + + if (type == 0) { + base = admatch & SBAM_BASE0_MASK; + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE1_MASK; + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE2_MASK; + } + + return (base); +} + +uint32 +sb_size(uint32 admatch) +{ + uint32 size; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + size = 0; + + if (type == 0) { + size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); + } + + return (size); +} diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c new file mode 100644 index 0000000000000..a655ac4ef1411 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils.c @@ -0,0 +1,1915 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils.c,v 1.813.2.36 2011-02-10 23:43:55 $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + +/* local prototypes */ +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz); +static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); +static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs); + + +/* global variable to indicate reservation/release of gpio's */ +static uint32 si_gpioreservation = 0; + +/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ + +/* + * Allocate a si handle. + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers + * bustype - pci/pcmcia/sb/sdio/etc + * vars - pointer to a pointer area for "environment" variables + * varsz - pointer to int to return the size of the vars + */ +si_t * +si_attach(uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + si_info_t *sii; + + /* alloc si_info_t */ + if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + return (NULL); + } + + if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { + MFREE(osh, sii, sizeof(si_info_t)); + return (NULL); + } + sii->vars = vars ? *vars : NULL; + sii->varsz = varsz ? *varsz : 0; + + return (si_t *)sii; +} + +/* global kernel resource */ +static si_info_t ksii; + +static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ + +/* generic kernel variant of si_attach() */ +si_t * +si_kattach(osl_t *osh) +{ + static bool ksii_attached = FALSE; + + if (!ksii_attached) { + void *regs; + regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + + if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs, + SI_BUS, NULL, + osh != SI_OSH ? &ksii.vars : NULL, + osh != SI_OSH ? &ksii.varsz : NULL) == NULL) { + SI_ERROR(("si_kattach: si_doattach failed\n")); + REG_UNMAP(regs); + return NULL; + } + REG_UNMAP(regs); + + /* save ticks normalized to ms for si_watchdog_ms() */ + if (PMUCTL_ENAB(&ksii.pub)) { + /* based on 32KHz ILP clock */ + wd_msticks = 32; + } else { + wd_msticks = ALP_CLOCK / 1000; + } + + ksii_attached = TRUE; + SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", + ksii.pub.ccrev, wd_msticks)); + } + + return &ksii.pub; +} + + +static bool +si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) +{ + /* need to set memseg flag for CF card first before any sb registers access */ + if (BUSTYPE(bustype) == PCMCIA_BUS) + sii->memseg = TRUE; + + + if (BUSTYPE(bustype) == SDIO_BUS) { + int err; + uint8 clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (!err) { + uint8 clkval; + + /* If register supported, wait for ALPAvail and then force ALP */ + clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", + clkval)); + return FALSE; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + OSL_DELAY(65); + } + } + + /* Also, disable the extra SDIO pull-ups */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + } + + + return TRUE; +} + +static bool +si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs) +{ + bool pci, pcie; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + sii->pub.ccrev = (int)si_corerev(&sii->pub); + + /* get chipcommon chipstatus */ + if (sii->pub.ccrev >= 11) + sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); + + /* get chipcommon capabilites */ + sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); + /* get chipcommon extended capabilities */ + + if (sii->pub.ccrev >= 35) + sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); + + /* get pmu rev and caps */ + if (sii->pub.cccaps & CC_CAP_PMU) { + sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); + sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; + } + + SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", + sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + sii->pub.pmucaps)); + + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = (uint)NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = FALSE; + pcirev = pcierev = (uint)NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { + uint cid, crev; + + si_setcoreidx(&sii->pub, i); + cid = si_coreid(&sii->pub); + crev = si_corerev(&sii->pub); + + /* Display cores found */ + SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", + i, cid, crev, sii->coresba[i], sii->regs[i])); + + if (BUSTYPE(bustype) == PCI_BUS) { + if (cid == PCI_CORE_ID) { + pciidx = i; + pcirev = crev; + pci = TRUE; + } else if (cid == PCIE_CORE_ID) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + } + } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && + (cid == PCMCIA_CORE_ID)) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } + else if (((BUSTYPE(bustype) == SDIO_BUS) || + (BUSTYPE(bustype) == SPI_BUS)) && + ((cid == PCMCIA_CORE_ID) || + (cid == SDIOD_CORE_ID))) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == sii->coresba[i])) || + (regs == sii->regs[i])) + *origidx = i; + } + + if (pci) { + sii->pub.buscoretype = PCI_CORE_ID; + sii->pub.buscorerev = pcirev; + sii->pub.buscoreidx = pciidx; + } else if (pcie) { + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } + + SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, + sii->pub.buscorerev)); + + if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && + (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3)) + OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); + + + /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was + * already running. + */ + if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { + if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || + si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) + si_core_disable(&sii->pub, 0); + } + + /* return to the original core */ + si_setcoreidx(&sii->pub, *origidx); + + return TRUE; +} + + + +static si_info_t * +si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + struct si_pub *sih = &sii->pub; + uint32 w, savewin; + chipcregs_t *cc; + char *pvars = NULL; + uint origidx; + + ASSERT(GOODREGS(regs)); + + bzero((uchar*)sii, sizeof(si_info_t)); + + savewin = 0; + + sih->buscoreidx = BADIDX; + + sii->curmap = regs; + sii->sdh = sdh; + sii->osh = osh; + + + + /* find Chipcommon address */ + if (bustype == PCI_BUS) { + savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) + savewin = SI_ENUM_BASE; + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); + cc = (chipcregs_t *)regs; + } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { + cc = (chipcregs_t *)sii->curmap; + } else { + cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + } + + sih->bustype = bustype; + if (bustype != BUSTYPE(bustype)) { + SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", + bustype, BUSTYPE(bustype))); + return NULL; + } + + /* bus/core/clk setup for register access */ + if (!si_buscore_prep(sii, bustype, devid, sdh)) { + SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); + return NULL; + } + + /* ChipID recognition. + * We assume we can read chipid at offset 0 from the regs arg. + * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), + * some way of recognizing them needs to be added here. + */ + w = R_REG(osh, &cc->chipid); + sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + /* Might as wll fill in chip id rev & pkg */ + sih->chip = w & CID_ID_MASK; + sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; + sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK) + >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT | + CST4322_SPROM_PRESENT))) { + SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__)); + return NULL; + } + +#if defined(HW_OOB) + if (CHIPID(sih->chip) == BCM43362_CHIP_ID) { + uint32 gpiocontrol, addr; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol); + gpiocontrol = bcmsdh_reg_read(sdh, addr, 4); + gpiocontrol |= 0x2; + bcmsdh_reg_write(sdh, addr, 4, gpiocontrol); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL); + } +#endif + + if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) && + (sih->chippkg != BCM4329_289PIN_PKG_ID)) { + sih->chippkg = BCM4329_182PIN_PKG_ID; + } + + sih->issim = IS_SIM(sih->chippkg); + + /* scan for cores */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { + SI_MSG(("Found chip type SB (0x%08x)\n", w)); + sb_scan(&sii->pub, regs, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) { + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + ai_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { + SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); + /* pass chipc address instead of original core base */ + ub_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else { + SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) { + SI_ERROR(("si_doattach: could not find any cores\n")); + return NULL; + } + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { + SI_ERROR(("si_doattach: si_buscore_setup failed\n")); + goto exit; + } + + /* assume current core is CC */ + if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43234_CHIP_ID || + CHIPID(sih->chip) == BCM43235_CHIP_ID || + CHIPID(sih->chip) == BCM43236_CHIP_ID || + CHIPID(sih->chip) == BCM43238_CHIP_ID) && + (CHIPREV(sii->pub.chiprev) == 0))) { + + if ((cc->chipstatus & CST43236_BP_CLK) != 0) { + uint clkdiv; + clkdiv = R_REG(osh, &cc->clkdiv); + /* otp_clk_div is even number, 120/14 < 9mhz */ + clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT); + W_REG(osh, &cc->clkdiv, clkdiv); + SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv)); + } + OSL_DELAY(10); + } + + + pvars = NULL; + + + + if (sii->pub.ccrev >= 20) { + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + W_REG(osh, &cc->gpiopullup, 0); + W_REG(osh, &cc->gpiopulldown, 0); + si_setcoreidx(sih, origidx); + } + + + + + return (sii); + +exit: + + return NULL; +} + +/* may be called with core in reset */ +void +si_detach(si_t *sih) +{ + si_info_t *sii; + uint idx; + + + sii = SI_INFO(sih); + + if (sii == NULL) + return; + + if (BUSTYPE(sih->bustype) == SI_BUS) + for (idx = 0; idx < SI_MAXCORES; idx++) + if (sii->regs[idx]) { + REG_UNMAP(sii->regs[idx]); + sii->regs[idx] = NULL; + } + + + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (sii != &ksii) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, sii, sizeof(si_info_t)); +} + +void * +si_osh(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->osh; +} + +void +si_setosh(si_t *sih, osl_t *osh) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sii->osh != NULL) { + SI_ERROR(("osh is already set....\n")); + ASSERT(!sii->osh); + } + sii->osh = osh; +} + +/* register driver interrupt disabling and restoring callback functions */ +void +si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; + sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; + sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = sii->coreid[sii->curidx]; +} + +void +si_deregister_intr_callback(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intrsoff_fn = NULL; +} + +uint +si_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_intflag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return R_REG(sii->osh, ((uint32 *)(uintptr) + (sii->oob_router + OOB_STATUSA))); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_flag(sih); + else { + ASSERT(0); + return 0; + } +} + +void +si_setint(si_t *sih, int siflag) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_setint(sih, siflag); + else + ASSERT(0); +} + +uint +si_coreid(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->coreid[sii->curidx]; +} + +uint +si_coreidx(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->curidx; +} + +/* return the core-type instantiation # of the current core */ +uint +si_coreunit(si_t *sih) +{ + si_info_t *sii; + uint idx; + uint coreid; + uint coreunit; + uint i; + + sii = SI_INFO(sih); + coreunit = 0; + + idx = sii->curidx; + + ASSERT(GOODREGS(sii->curmap)); + coreid = si_coreid(sih); + + /* count the cores of our type */ + for (i = 0; i < idx; i++) + if (sii->coreid[i] == coreid) + coreunit++; + + return (coreunit); +} + +uint +si_corevendor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corevendor(sih); + else { + ASSERT(0); + return 0; + } +} + +bool +si_backplane64(si_t *sih) +{ + return ((sih->cccaps & CC_CAP_BKPLN64) != 0); +} + +uint +si_corerev(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corerev(sih); + else { + ASSERT(0); + return 0; + } +} + +/* return index of coreid or BADIDX if not found */ +uint +si_findcoreidx(si_t *sih, uint coreid, uint coreunit) +{ + si_info_t *sii; + uint found; + uint i; + + sii = SI_INFO(sih); + + found = 0; + + for (i = 0; i < sii->numcores; i++) + if (sii->coreid[i] == coreid) { + if (found == coreunit) + return (i); + found++; + } + + return (BADIDX); +} + +/* return list of found cores */ +uint +si_corelist(si_t *sih, uint coreid[]) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); + return (sii->numcores); +} + +/* return current register mapping */ +void * +si_coreregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + + return (sii->curmap); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +si_setcore(si_t *sih, uint coreid, uint coreunit) +{ + uint idx; + + idx = si_findcoreidx(sih, coreid, coreunit); + if (!GOODIDX(idx)) + return (NULL); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, idx); + else { + ASSERT(0); + return NULL; + } +} + +void * +si_setcoreidx(si_t *sih, uint coreidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, coreidx); + else { + ASSERT(0); + return NULL; + } +} + +/* Turn off interrupt as required by sb_setcore, before switch core */ +void * +si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) +{ + void *cc; + si_info_t *sii; + + sii = SI_INFO(sih); + + if (SI_FAST(sii)) { + /* Overloading the origidx variable to remember the coreid, + * this works because the core ids cannot be confused with + * core indices. + */ + *origidx = coreid; + if (coreid == CC_CORE_ID) + return (void *)CCREGS_FAST(sii); + else if (coreid == sih->buscoretype) + return (void *)PCIEREGS(sii); + } + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = si_setcore(sih, coreid, 0); + ASSERT(cc != NULL); + + return cc; +} + +/* restore coreidx and restore interrupt */ +void +si_restore_core(si_t *sih, uint coreid, uint intr_val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) + return; + + si_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +int +si_numaddrspaces(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_numaddrspaces(sih); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspace(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspace(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_addrspace(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspace(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspacesize(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspacesize(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_addrspacesize(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspacesize(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_cflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_cflags_wo(sih, mask, val); + else + ASSERT(0); +} + +uint32 +si_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_sflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +bool +si_iscoreup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_iscoreup(sih); + else { + ASSERT(0); + return FALSE; + } +} + +uint +si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + /* only for AI back plane chips */ + if (CHIPTYPE(sih->socitype) == SOCI_AI) + return (ai_wrap_reg(sih, offset, mask, val)); + return 0; +} + +uint +si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + return ai_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corereg(sih, coreidx, regoff, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_disable(si_t *sih, uint32 bits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_disable(sih, bits); +} + +void +si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_AI) + ai_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_reset(sih, bits, resetbits); +} + +/* Run bist on current core. Caller needs to take care of core-specific bist hazards */ +int +si_corebist(si_t *sih) +{ + uint32 cflags; + int result = 0; + + /* Read core control flags */ + cflags = si_core_cflags(sih, 0, 0); + + /* Set bist & fgc */ + si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); + + /* Wait for bist done */ + SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); + + if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) + result = BCME_ERROR; + + /* Reset core control flags */ + si_core_cflags(sih, 0xffff, cflags); + + return result; +} + +static uint32 +factor6(uint32 x) +{ + switch (x) { + case CC_F6_2: return 2; + case CC_F6_3: return 3; + case CC_F6_4: return 4; + case CC_F6_5: return 5; + case CC_F6_6: return 6; + case CC_F6_7: return 7; + default: return 0; + } +} + +/* calculate the speed the SI would run at given a set of clockcontrol values */ +uint32 +si_clock_rate(uint32 pll_type, uint32 n, uint32 m) +{ + uint32 n1, n2, clock, m1, m2, m3, mc; + + n1 = n & CN_N1_MASK; + n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; + + if (pll_type == PLL_TYPE6) { + if (m & CC_T6_MMASK) + return CC_T6_M1; + else + return CC_T6_M0; + } else if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + n1 = factor6(n1); + n2 += CC_F5_BIAS; + } else if (pll_type == PLL_TYPE2) { + n1 += CC_T2_BIAS; + n2 += CC_T2_BIAS; + ASSERT((n1 >= 2) && (n1 <= 7)); + ASSERT((n2 >= 5) && (n2 <= 23)); + } else if (pll_type == PLL_TYPE5) { + return (100000000); + } else + ASSERT(0); + /* PLL types 3 and 7 use BASE2 (25Mhz) */ + if ((pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE7)) { + clock = CC_CLOCK_BASE2 * n1 * n2; + } else + clock = CC_CLOCK_BASE1 * n1 * n2; + + if (clock == 0) + return 0; + + m1 = m & CC_M1_MASK; + m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; + m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; + mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; + + if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + m1 = factor6(m1); + if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) + m2 += CC_F5_BIAS; + else + m2 = factor6(m2); + m3 = factor6(m3); + + switch (mc) { + case CC_MC_BYPASS: return (clock); + case CC_MC_M1: return (clock / m1); + case CC_MC_M1M2: return (clock / (m1 * m2)); + case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3)); + case CC_MC_M1M3: return (clock / (m1 * m3)); + default: return (0); + } + } else { + ASSERT(pll_type == PLL_TYPE2); + + m1 += CC_T2_BIAS; + m2 += CC_T2M2_BIAS; + m3 += CC_T2_BIAS; + ASSERT((m1 >= 2) && (m1 <= 7)); + ASSERT((m2 >= 3) && (m2 <= 10)); + ASSERT((m3 >= 2) && (m3 <= 7)); + + if ((mc & CC_T2MC_M1BYP) == 0) + clock /= m1; + if ((mc & CC_T2MC_M2BYP) == 0) + clock /= m2; + if ((mc & CC_T2MC_M3BYP) == 0) + clock /= m3; + + return (clock); + } +} + + +/* set chip watchdog reset timer to fire in 'ticks' */ +void +si_watchdog(si_t *sih, uint ticks) +{ + uint nb, maxt; + + if (PMUCTL_ENAB(sih)) { + + if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) && + (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) { + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); + si_setcore(sih, USB20D_CORE_ID, 0); + si_core_disable(sih, 1); + si_setcore(sih, CC_CORE_ID, 0); + } + + nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); + /* The mips compiler uses the sllv instruction, + * so we specially handle the 32-bit case. + */ + if (nb == 32) + maxt = 0xffffffff; + else + maxt = ((1 << nb) - 1); + + if (ticks == 1) + ticks = 2; + else if (ticks > maxt) + ticks = maxt; + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks); + } else { + maxt = (1 << 28) - 1; + if (ticks > maxt) + ticks = maxt; + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); + } +} + +/* trigger watchdog reset after ms milliseconds */ +void +si_watchdog_ms(si_t *sih, uint32 ms) +{ + si_watchdog(sih, wd_msticks * ms); +} + + + + +/* return the slow clock source - LPO, XTAL, or PCI */ +static uint +si_slowclk_src(si_info_t *sii) +{ + chipcregs_t *cc; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + if (sii->pub.ccrev < 6) { + if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & + PCI_CFG_GPIO_SCS)) + return (SCC_SS_PCI); + else + return (SCC_SS_XTAL); + } else if (sii->pub.ccrev < 10) { + cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); + return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); + } else /* Insta-clock */ + return (SCC_SS_XTAL); +} + +/* return the ILP (slowclock) min or max frequency */ +static uint +si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) +{ + uint32 slowclk; + uint div; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + /* shouldn't be here unless we've established the chip has dynamic clk control */ + ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); + + slowclk = si_slowclk_src(sii); + if (sii->pub.ccrev < 6) { + if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); + else + return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); + } else if (sii->pub.ccrev < 10) { + div = 4 * + (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); + if (slowclk == SCC_SS_LPO) + return (max_freq ? LPOMAXFREQ : LPOMINFREQ); + else if (slowclk == SCC_SS_XTAL) + return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); + else if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); + else + ASSERT(0); + } else { + /* Chipc rev 10 is InstaClock */ + div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; + div = 4 * (div + 1); + return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); + } + return (0); +} + +static void +si_clkctl_setdelay(si_info_t *sii, void *chipcregs) +{ + chipcregs_t *cc = (chipcregs_t *)chipcregs; + uint slowmaxfreq, pll_delay, slowclk; + uint pll_on_delay, fref_sel_delay; + + pll_delay = PLL_DELAY; + + /* If the slow clock is not sourced by the xtal then add the xtal_on_delay + * since the xtal will also be powered down by dynamic clk control logic. + */ + + slowclk = si_slowclk_src(sii); + if (slowclk != SCC_SS_XTAL) + pll_delay += XTAL_ON_DELAY; + + /* Starting with 4318 it is ILP that is used for the delays */ + slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); + + pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; + fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; + + W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); + W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); +} + +/* initialize power control delay registers */ +void +si_clkctl_init(si_t *sih) +{ + si_info_t *sii; + uint origidx = 0; + chipcregs_t *cc; + bool fast; + + if (!CCCTL_ENAB(sih)) + return; + + sii = SI_INFO(sih); + fast = SI_FAST(sii); + if (!fast) { + origidx = sii->curidx; + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) + return; + } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) + return; + ASSERT(cc != NULL); + + /* set all Instaclk chip ILP to 1 MHz */ + if (sih->ccrev >= 10) + SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + + si_clkctl_setdelay(sii, (void *)(uintptr)cc); + + if (!fast) + si_setcoreidx(sih, origidx); +} + +/* change logical "focus" to the gpio core for optimized access */ +void * +si_gpiosetcore(si_t *sih) +{ + return (si_setcoreidx(sih, SI_CC_IDX)); +} + +/* mask&set gpiocontrol bits */ +uint32 +si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiocontrol); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio output enable bits */ +uint32 +si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioouten); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio output bits */ +uint32 +si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioout); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* reserve one gpio */ +uint32 +si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already reserved */ + if (si_gpioreservation & gpio_bitmask) + return 0xffffffff; + /* set reservation */ + si_gpioreservation |= gpio_bitmask; + + return si_gpioreservation; +} + +/* release one gpio */ +/* + * releasing the gpio doesn't change the current value on the GPIO last write value + * persists till some one overwrites it + */ + +uint32 +si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already released */ + if (!(si_gpioreservation & gpio_bitmask)) + return 0xffffffff; + + /* clear reservation */ + si_gpioreservation &= ~gpio_bitmask; + + return si_gpioreservation; +} + +/* return the current gpioin register value */ +uint32 +si_gpioin(si_t *sih) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + regoff = OFFSETOF(chipcregs_t, gpioin); + return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); +} + +/* mask&set gpio interrupt polarity bits */ +uint32 +si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointpolarity); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio interrupt mask bits */ +uint32 +si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + si_info_t *sii; + uint regoff; + + sii = SI_INFO(sih); + regoff = 0; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* assign the gpio to an led */ +uint32 +si_gpioled(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sih->ccrev < 16) + return 0xffffffff; + + /* gpio led powersave reg */ + return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); +} + +/* mask&set gpio timer val */ +uint32 +si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (sih->ccrev < 16) + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); +} + +uint32 +si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 20) + return 0xffffffff; + + offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return 0xffffffff; + + if (regtype == GPIO_REGEVT) + offs = OFFSETOF(chipcregs_t, gpioevent); + else if (regtype == GPIO_REGEVT_INTMSK) + offs = OFFSETOF(chipcregs_t, gpioeventintmask); + else if (regtype == GPIO_REGEVT_INTPOL) + offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); + else + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +void * +si_gpio_handler_register(si_t *sih, uint32 event, + bool level, gpio_handler_t cb, void *arg) +{ + si_info_t *sii; + gpioh_item_t *gi; + + ASSERT(event); + ASSERT(cb != NULL); + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return NULL; + + if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL) + return NULL; + + bzero(gi, sizeof(gpioh_item_t)); + gi->event = event; + gi->handler = cb; + gi->arg = arg; + gi->level = level; + + gi->next = sii->gpioh_head; + sii->gpioh_head = gi; + + return (void *)(gi); +} + +void +si_gpio_handler_unregister(si_t *sih, void *gpioh) +{ + si_info_t *sii; + gpioh_item_t *p, *n; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return; + + ASSERT(sii->gpioh_head != NULL); + if ((void*)sii->gpioh_head == gpioh) { + sii->gpioh_head = sii->gpioh_head->next; + MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); + return; + } else { + p = sii->gpioh_head; + n = p->next; + while (n) { + if ((void*)n == gpioh) { + p->next = n->next; + MFREE(sii->osh, gpioh, sizeof(gpioh_item_t)); + return; + } + p = n; + n = n->next; + } + } + + ASSERT(0); /* Not found in list */ +} + +void +si_gpio_handler_process(si_t *sih) +{ + si_info_t *sii; + gpioh_item_t *h; + uint32 level = si_gpioin(sih); + uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0); + uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0); + uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0); + + sii = SI_INFO(sih); + for (h = sii->gpioh_head; h != NULL; h = h->next) { + if (h->handler) { + uint32 status = (h->level ? level : edge) & h->event; + uint32 polarity = (h->level ? levelp : edgep) & h->event; + + /* polarity bitval is opposite of status bitval */ + if (status ^ polarity) + h->handler(status, h->arg); + } + } + + si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */ +} + +uint32 +si_gpio_int_enable(si_t *sih, bool enable) +{ + si_info_t *sii; + uint offs; + + sii = SI_INFO(sih); + if (sih->ccrev < 11) + return 0xffffffff; + + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} + + +/* Return the size of the specified SOCRAM bank */ +static uint +socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 index, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = index | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1); + return banksize; +} + +void +si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + sii = SI_INFO(sih); + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + if (!set) + *enable = *protect = 0; + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (set) { + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK; + if (*enable) { + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT); + if (*protect) + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT); + } + W_REG(sii->osh, ®s->bankinfo, bankinfo); + } + else if (i == 0) { + if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { + *enable = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) + *protect = 1; + } + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +bool +si_socdevram_pkg(si_t *sih) +{ + if (si_socdevram_size(sih) > 0) + return TRUE; + else + return FALSE; +} + +uint32 +si_socdevram_size(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + uint32 memsize = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + sii = SI_INFO(sih); + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/* Return the RAM size of the SOCRAM core */ +uint32 +si_socram_size(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + sii = SI_INFO(sih); + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev == 0) + memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); + else if (corerev < 3) { + memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); + memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } else if ((corerev <= 7) || (corerev == 12)) { + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint bsz = (coreinfo & SRCI_SRBSZ_MASK); + uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; + if (lss != 0) + nb --; + memsize = nb * (1 << (bsz + SR_BSZ_BASE)); + if (lss != 0) + memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); + } else { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +void +si_btcgpiowar(si_t *sih) +{ + si_info_t *sii; + uint origidx; + uint intr_val = 0; + chipcregs_t *cc; + + sii = SI_INFO(sih); + + /* Make sure that there is ChipCommon core present && + * UART_TX is strapped to 1 + */ + if (!(sih->cccaps & CC_CAP_UARTGPIO)) + return; + + /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +uint +si_pll_reset(si_t *sih) +{ + uint err = 0; + + return (err); +} + +/* check if the device is removed */ +bool +si_deviceremoved(si_t *sih) +{ + uint32 w; + si_info_t *sii; + + sii = SI_INFO(sih); + + switch (BUSTYPE(sih->bustype)) { + case PCI_BUS: + ASSERT(sii->osh != NULL); + w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32)); + if ((w & 0xFFFF) != VENDOR_BROADCOM) + return TRUE; + break; + } + return FALSE; +} + +bool +si_is_sprom_available(si_t *sih) +{ + if (sih->ccrev >= 31) { + si_info_t *sii; + uint origidx; + chipcregs_t *cc; + uint32 sromctrl; + + if ((sih->cccaps & CC_CAP_SROM) == 0) + return FALSE; + + sii = SI_INFO(sih); + origidx = sii->curidx; + cc = si_setcoreidx(sih, SI_CC_IDX); + sromctrl = R_REG(sii->osh, &cc->sromcontrol); + si_setcoreidx(sih, origidx); + return (sromctrl & SRC_PRESENT); + } + + switch (CHIPID(sih->chip)) { + case BCM4312_CHIP_ID: + return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL); + case BCM4325_CHIP_ID: + return (sih->chipst & CST4325_SPROM_SEL) != 0; + case BCM4322_CHIP_ID: + case BCM43221_CHIP_ID: + case BCM43231_CHIP_ID: + case BCM43222_CHIP_ID: + case BCM43111_CHIP_ID: + case BCM43112_CHIP_ID: + case BCM4342_CHIP_ID: + { + uint32 spromotp; + spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> + CST4322_SPROM_OTP_SEL_SHIFT; + return (spromotp & CST4322_SPROM_PRESENT) != 0; + } + case BCM4329_CHIP_ID: + return (sih->chipst & CST4329_SPROM_SEL) != 0; + case BCM4315_CHIP_ID: + return (sih->chipst & CST4315_SPROM_SEL) != 0; + case BCM4319_CHIP_ID: + return (sih->chipst & CST4319_SPROM_SEL) != 0; + + case BCM4336_CHIP_ID: + case BCM43362_CHIP_ID: + return (sih->chipst & CST4336_SPROM_PRESENT) != 0; + + case BCM4330_CHIP_ID: + return (sih->chipst & CST4330_SPROM_PRESENT) != 0; + case BCM4313_CHIP_ID: + return (sih->chipst & CST4313_SPROM_PRESENT) != 0; + case BCM43239_CHIP_ID: + return ((sih->chipst & CST43239_SPROM_MASK) && + !(sih->chipst & CST43239_SFLASH_MASK)); + default: + return TRUE; + } +} diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h new file mode 100644 index 0000000000000..d80246e01d1b6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils_priv.h @@ -0,0 +1,235 @@ +/* + * Include file private to the SOC Interconnect support files. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: siutils_priv.h,v 1.17.4.3 2010-10-25 16:56:56 Exp $ + */ + +#ifndef _siutils_priv_h_ +#define _siutils_priv_h_ + +#define SI_ERROR(args) + +#define SI_MSG(args) + +/* Define SI_VMSG to printf for verbose debugging, but don't check it in */ +#define SI_VMSG(args) + +#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) + +typedef uint32 (*si_intrsoff_t)(void *intr_arg); +typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); +typedef bool (*si_intrsenabled_t)(void *intr_arg); + +typedef struct gpioh_item { + void *arg; + bool level; + gpio_handler_t handler; + uint32 event; + struct gpioh_item *next; +} gpioh_item_t; + +/* misc si info needed by some of the routines */ +typedef struct si_info { + struct si_pub pub; /* back plane public state (must be first field) */ + + void *osh; /* osl os handle */ + void *sdh; /* bcmsdh handle */ + + uint dev_coreid; /* the core provides driver functions */ + void *intr_arg; /* interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ + + void *pch; /* PCI/E core handle */ + + gpioh_item_t *gpioh_head; /* GPIO event handlers list */ + + bool memseg; /* flag to toggle MEM_SEG register */ + + char *vars; + uint varsz; + + void *curmap; /* current regs va */ + void *regs[SI_MAXCORES]; /* other regs va */ + + uint curidx; /* current core index */ + uint numcores; /* # discovered cores */ + uint coreid[SI_MAXCORES]; /* id of each core */ + uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ + void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ + uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ + uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ + + void *curwrap; /* current wrapper va */ + void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + + uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 oob_router; /* oob router registers for axi */ +} si_info_t; + +#define SI_INFO(sih) (si_info_t *)(uintptr)sih + +#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ + ISALIGNED((x), SI_CORE_SIZE)) +#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) +#define BADCOREADDR 0 +#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) +#define NOREV -1 /* Invalid rev */ + +#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCI_CORE_ID)) +#define PCIE(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE_CORE_ID)) +#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) + +/* Newer chips can access PCI/PCIE and CC core without requiring to change + * PCI BAR0 WIN + */ +#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \ + (((si)->pub.buscoretype == PCI_CORE_ID) && (si)->pub.buscorerev >= 13)) + +#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) +#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) + +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ + * after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } + +/* dynamic clock control defines */ +#define LPOMINFREQ 25000 /* low power oscillator min */ +#define LPOMAXFREQ 43000 /* low power oscillator max */ +#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /* 25 MHz */ +#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ + +#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ + +#define PCI_FORCEHT(si) \ + (((PCIE(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \ + ((PCI(si) || PCIE(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \ + (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))) + +/* GPIO Based LED powersave defines */ +#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ + +#ifndef DEFAULT_GPIOTIMERVAL +#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) +#endif + +/* Silicon Backplane externs */ +extern void sb_scan(si_t *sih, void *regs, uint devid); +extern uint sb_coreid(si_t *sih); +extern uint sb_intflag(si_t *sih); +extern uint sb_flag(si_t *sih); +extern void sb_setint(si_t *sih, int siflag); +extern uint sb_corevendor(si_t *sih); +extern uint sb_corerev(si_t *sih); +extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern bool sb_iscoreup(si_t *sih); +extern void *sb_setcoreidx(si_t *sih, uint coreidx); +extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_commit(si_t *sih); +extern uint32 sb_base(uint32 admatch); +extern uint32 sb_size(uint32 admatch); +extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void sb_core_disable(si_t *sih, uint32 bits); +extern uint32 sb_addrspace(si_t *sih, uint asidx); +extern uint32 sb_addrspacesize(si_t *sih, uint asidx); +extern int sb_numaddrspaces(si_t *sih); + +extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx); + +extern bool sb_taclear(si_t *sih, bool details); + + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool sb_pci_pmecap(si_t *sih); +struct osl_info; +extern bool sb_pci_fastpmecap(struct osl_info *osh); +extern bool sb_pci_pmeclr(si_t *sih); +extern void sb_pci_pmeen(si_t *sih); +extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset); + +/* AMBA Interconnect exported externs */ +extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *ai_kattach(osl_t *osh); +extern void ai_scan(si_t *sih, void *regs, uint devid); + +extern uint ai_flag(si_t *sih); +extern void ai_setint(si_t *sih, int siflag); +extern uint ai_coreidx(si_t *sih); +extern uint ai_corevendor(si_t *sih); +extern uint ai_corerev(si_t *sih); +extern bool ai_iscoreup(si_t *sih); +extern void *ai_setcoreidx(si_t *sih, uint coreidx); +extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void ai_core_disable(si_t *sih, uint32 bits); +extern int ai_numaddrspaces(si_t *sih); +extern uint32 ai_addrspace(si_t *sih, uint asidx); +extern uint32 ai_addrspacesize(si_t *sih, uint asidx); +extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); + + + +#define ub_scan(a, b, c) do {} while (0) +#define ub_flag(a) (0) +#define ub_setint(a, b) do {} while (0) +#define ub_coreidx(a) (0) +#define ub_corevendor(a) (0) +#define ub_corerev(a) (0) +#define ub_iscoreup(a) (0) +#define ub_setcoreidx(a, b) (0) +#define ub_core_cflags(a, b, c) (0) +#define ub_core_cflags_wo(a, b, c) do {} while (0) +#define ub_core_sflags(a, b, c) (0) +#define ub_corereg(a, b, c, d, e) (0) +#define ub_core_reset(a, b, c) do {} while (0) +#define ub_core_disable(a, b) do {} while (0) +#define ub_numaddrspaces(a) (0) +#define ub_addrspace(a, b) (0) +#define ub_addrspacesize(a, b) (0) +#define ub_view(a, b) do {} while (0) +#define ub_dumpregs(a, b) do {} while (0) + +#endif /* _siutils_priv_h_ */ diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h new file mode 100644 index 0000000000000..c51c68cd0eede --- /dev/null +++ b/drivers/net/wireless/bcmdhd/uamp_api.h @@ -0,0 +1,176 @@ +/* + * Name: uamp_api.h + * + * Description: Universal AMP API + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: uamp_api.h,v 1.2.8.1 2011-02-05 00:16:14 Exp $ + * + */ +#ifndef UAMP_API_H +#define UAMP_API_H + + +#include "typedefs.h" + + +/***************************************************************************** +** Constant and Type Definitions +****************************************************************************** +*/ + +#define BT_API + +/* Types. */ +typedef bool BOOLEAN; +typedef uint8 UINT8; +typedef uint16 UINT16; + + +/* UAMP identifiers */ +#define UAMP_ID_1 1 +#define UAMP_ID_2 2 +typedef UINT8 tUAMP_ID; + +/* UAMP event ids (used by UAMP_CBACK) */ +#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */ +#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */ +#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */ +typedef UINT8 tUAMP_EVT; + + +/* UAMP Channels */ +#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */ +#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */ +#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */ +typedef UINT8 tUAMP_CH; + +/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */ +typedef union { + tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */ +} tUAMP_EVT_DATA; + + +/***************************************************************************** +** +** Function: UAMP_CBACK +** +** Description: Callback for events. Register callback using UAMP_Init. +** +** Parameters amp_id: AMP device identifier that generated the event +** amp_evt: event id +** p_amp_evt_data: pointer to event-specific data +** +****************************************************************************** +*/ +typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data); + +/***************************************************************************** +** external function declarations +****************************************************************************** +*/ +#ifdef __cplusplus +extern "C" +{ +#endif + +/***************************************************************************** +** +** Function: UAMP_Init +** +** Description: Initialize UAMP driver +** +** Parameters p_cback: Callback function for UAMP event notification +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback); + + +/***************************************************************************** +** +** Function: UAMP_Open +** +** Description: Open connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. This value +** will be included in AMP messages sent to the +** BTU task, to identify source of the message +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id); + +/***************************************************************************** +** +** Function: UAMP_Close +** +** Description: Close connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. +** +****************************************************************************** +*/ +BT_API void UAMP_Close(tUAMP_ID amp_id); + + +/***************************************************************************** +** +** Function: UAMP_Write +** +** Description: Send buffer to AMP device. Frees GKI buffer when done. +** +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer to write +** num_bytes: number of bytes to write +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD +** +** Returns: number of bytes written +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel); + +/***************************************************************************** +** +** Function: UAMP_Read +** +** Description: Read incoming data from AMP. Call after receiving a +** UAMP_EVT_RX_READY callback event. +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer for holding incoming AMP data +** buf_size: size of p_buf +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT +** +** Returns: number of bytes read +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel); + +#ifdef __cplusplus +} +#endif + +#endif /* UAMP_API_H */ diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c new file mode 100644 index 0000000000000..4fcdcd32a3ff8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.c @@ -0,0 +1,858 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef WL_CFG80211 +#include +#endif +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) +#include +#else +#include +#endif +#endif /* CONFIG_WIFI_CONTROL_FUNC */ + +/* + * Android private command strings, PLEASE define new private commands here + * so they can be updated easily in the future (if needed) + */ + +#define CMD_START "START" +#define CMD_STOP "STOP" +#define CMD_SCAN_ACTIVE "SCAN-ACTIVE" +#define CMD_SCAN_PASSIVE "SCAN-PASSIVE" +#define CMD_RSSI "RSSI" +#define CMD_LINKSPEED "LINKSPEED" +#define CMD_RXFILTER_START "RXFILTER-START" +#define CMD_RXFILTER_STOP "RXFILTER-STOP" +#define CMD_RXFILTER_ADD "RXFILTER-ADD" +#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE" +#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START" +#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP" +#define CMD_BTCOEXMODE "BTCOEXMODE" +#define CMD_SETSUSPENDOPT "SETSUSPENDOPT" +#define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" +#define CMD_SETFWPATH "SETFWPATH" +#define CMD_SETBAND "SETBAND" +#define CMD_GETBAND "GETBAND" +#define CMD_COUNTRY "COUNTRY" +#define CMD_P2P_SET_NOA "P2P_SET_NOA" +#if !defined WL_ENABLE_P2P_IF +#define CMD_P2P_GET_NOA "P2P_GET_NOA" +#endif +#define CMD_P2P_SET_PS "P2P_SET_PS" +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" + + +#ifdef PNO_SUPPORT +#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" +#define CMD_PNOSETUP_SET "PNOSETUP " +#define CMD_PNOENABLE_SET "PNOFORCE" +#define CMD_PNODEBUG_SET "PNODEBUG" + +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBVERSION '2' +#define PNO_TLV_RESERVED '0' +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +typedef struct cmd_tlv { + char prefix; + char version; + char subver; + char reserved; +} cmd_tlv_t; +#endif /* PNO_SUPPORT */ + +typedef struct android_wifi_priv_cmd { + char *buf; + int used_len; + int total_len; +} android_wifi_priv_cmd; + +/** + * Extern function declarations (TODO: move them to dhd_linux.h) + */ +void dhd_customer_gpio_wlan_ctrl(int onoff); +uint dhd_dev_reset(struct net_device *dev, uint8 flag); +int dhd_dev_init_ioctl(struct net_device *dev); +#ifdef WL_CFG80211 +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command); +#else +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ return 0; } +int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ return 0; } +#endif +extern int dhd_os_check_if_up(void *dhdp); +extern void *bcmsdh_get_drvdata(void); + +extern bool ap_fw_loaded; +#ifdef CUSTOMER_HW2 +extern char iface_name[IFNAMSIZ]; +#endif + +/** + * Local (static) functions and variables + */ + +/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first + * time (only) in dhd_open, subsequential wifi on will be handled by + * wl_android_wifi_on + */ +static int g_wifi_on = TRUE; + +/** + * Local (static) function definitions + */ +static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len) +{ + int link_speed; + int bytes_written; + int error; + + error = wldev_get_link_speed(net, &link_speed); + if (error) + return -1; + + /* Convert Kbps to Android Mbps */ + link_speed = link_speed / 1000; + bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed); + DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command)); + return bytes_written; +} + +static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) +{ + wlc_ssid_t ssid = {0}; + int rssi; + int bytes_written = 0; + int error; + + error = wldev_get_rssi(net, &rssi); + if (error) + return -1; + + error = wldev_get_ssid(net, &ssid); + if (error) + return -1; + if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { + DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + } else { + memcpy(command, ssid.SSID, ssid.SSID_len); + bytes_written = ssid.SSID_len; + } + bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi); + DHD_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + return bytes_written; +} + +static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0'; + + if (suspend_flag != 0) + suspend_flag = 1; + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) + DHD_INFO(("%s: Suspend Flag %d -> %d\n", + __FUNCTION__, ret_now, suspend_flag)); + else + DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + return ret; +} + +static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + +#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND) + int suspend_flag; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0'; + + if (suspend_flag != 0) + suspend_flag = 1; + + if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) + DHD_INFO(("%s: Suspend Mode %d\n",__FUNCTION__,suspend_flag)); + else + DHD_ERROR(("%s: failed %d\n",__FUNCTION__,ret)); +#endif + return ret; +} + +static int wl_android_get_band(struct net_device *dev, char *command, int total_len) +{ + uint band; + int bytes_written; + int error; + + error = wldev_get_band(dev, &band); + if (error) + return -1; + bytes_written = snprintf(command, total_len, "Band %d", band); + return bytes_written; +} + +#if defined(PNO_SUPPORT) && !defined(WL_SCHED_SCAN) +static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len) +{ + wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + int res = -1; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char *str_ptr; + int tlv_size_left; + int pno_time = 0; + int pno_repeat = 0; + int pno_freq_expo_max = 0; + +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = { + 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', + 'S', '1', '2', '0', + 'S', + 0x05, + 'd', 'l', 'i', 'n', 'k', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + '0', 'B', + 'R', + '2', + 'M', + '2', + 0x00 + }; +#endif /* PNO_SET_DEBUG */ + + DHD_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { + DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + goto exit_proc; + } + +#ifdef PNO_SET_DEBUG + memcpy(command, pno_in_example, sizeof(pno_in_example)); + for (i = 0; i < sizeof(pno_in_example); i++) + printf("%02X ", command[i]); + printf("\n"); + total_len = sizeof(pno_in_example); +#endif + + str_ptr = command + strlen(CMD_PNOSETUP_SET); + tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET); + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && + (cmd_tlv_temp->version == PNO_TLV_VERSION) && + (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) { + + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { + DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } else { + if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { + DHD_ERROR(("%s scan duration corrupted field size %d\n", + __FUNCTION__, tlv_size_left)); + goto exit_proc; + } + str_ptr++; + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + DHD_ERROR(("%s pno repeat : corrupted field\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_INFO(("%s: pno_freq_expo_max=%d\n", + __FUNCTION__, pno_freq_expo_max)); + } + } + } else { + DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max); + +exit_proc: + return res; +} +#endif /* PNO_SUPPORT && !WL_SCHED_SCAN */ + +static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len) +{ + int ret; + int bytes_written = 0; + + ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command); + if (ret) + return 0; + bytes_written = sizeof(struct ether_addr); + return bytes_written; +} + +/** + * Global function definitions (declared in wl_android.h) + */ + +int wl_android_wifi_on(struct net_device *dev) +{ + int ret = 0; + + printk("%s in\n", __FUNCTION__); + if (!dev) { + DHD_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (!g_wifi_on) { + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON); + sdioh_start(NULL, 0); + ret = dhd_dev_reset(dev, FALSE); + sdioh_start(NULL, 1); + if (!ret) { + if (dhd_dev_init_ioctl(dev) < 0) + ret = -EFAULT; + } + g_wifi_on = 1; + } + dhd_net_if_unlock(dev); + + return ret; +} + +int wl_android_wifi_off(struct net_device *dev) +{ + int ret = 0; + + printk("%s in\n", __FUNCTION__); + if (!dev) { + DHD_TRACE(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (g_wifi_on) { + ret = dhd_dev_reset(dev, TRUE); + sdioh_stop(NULL); + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); + g_wifi_on = 0; + } + dhd_net_if_unlock(dev); + + return ret; +} + +static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len) +{ + if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN) + return -1; + bcm_strncpy_s(fw_path, sizeof(fw_path), + command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1); + if (strstr(fw_path, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } + return 0; +} + +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) +{ + int ret = 0; + char *command = NULL; + int bytes_written = 0; + android_wifi_priv_cmd priv_cmd; + + net_os_wake_lock(net); + + if (!ifr->ifr_data) { + ret = -EINVAL; + goto exit; + } + if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + } + command = kmalloc(priv_cmd.total_len, GFP_KERNEL); + if (!command) + { + DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__)); + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) { + ret = -EFAULT; + goto exit; + } + + DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name)); + + if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) { + DHD_INFO(("%s, Received regular START command\n", __FUNCTION__)); + bytes_written = wl_android_wifi_on(net); + } + else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) { + bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len); + } + + if (!g_wifi_on) { + DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n", + __FUNCTION__, command, ifr->ifr_name)); + ret = 0; + goto exit; + } + + if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) { + bytes_written = wl_android_wifi_off(net); + } + else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) { + /* TBD: SCAN-ACTIVE */ + } + else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) { + /* TBD: SCAN-PASSIVE */ + } + else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) { + bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) { + bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) { + bytes_written = net_os_set_packet_filter(net, 1); + } + else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) { + bytes_written = net_os_set_packet_filter(net, 0); + } + else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num); + } + else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num); + } + else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) { + /* TBD: BTCOEXSCAN-START */ + } + else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) { + /* TBD: BTCOEXSCAN-STOP */ + } + else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) { + uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0'; + + if (mode == 1) + net_os_set_packet_filter(net, 0); /* DHCP starts */ + else + net_os_set_packet_filter(net, 1); /* DHCP ends */ +#ifdef WL_CFG80211 + bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command); +#endif + } + else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) { + bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) { + bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) { + uint band = *(command + strlen(CMD_SETBAND) + 1) - '0'; + bytes_written = wldev_set_band(net, band); + } + else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) { + bytes_written = wl_android_get_band(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) { + char *country_code = command + strlen(CMD_COUNTRY) + 1; + bytes_written = wldev_set_country(net, country_code); + } +#if defined(PNO_SUPPORT) && !defined(WL_SCHED_SCAN) + else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) { + bytes_written = dhd_dev_pno_reset(net); + } + else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) { + bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) { + uint pfn_enabled = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0'; + bytes_written = dhd_dev_pno_enable(net, pfn_enabled); + } +#endif + else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) { + bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) { + int skip = strlen(CMD_P2P_SET_NOA) + 1; + bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip, + priv_cmd.total_len - skip); + } +#if !defined WL_ENABLE_P2P_IF + else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) { + bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len); + } +#endif + else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) { + int skip = strlen(CMD_P2P_SET_PS) + 1; + bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip, + priv_cmd.total_len - skip); + } +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE, + strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) { + int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3; + bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip, + priv_cmd.total_len - skip, *(command + skip - 2) - '0'); + } +#endif /* WL_CFG80211 */ + else { + DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command)); + snprintf(command, 3, "OK"); + bytes_written = strlen("OK"); + } + + if (bytes_written >= 0) { + if ((bytes_written == 0) && (priv_cmd.total_len > 0)) + command[0] = '\0'; + if (bytes_written >= priv_cmd.total_len) { + DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written)); + bytes_written = priv_cmd.total_len; + } else { + bytes_written++; + } + priv_cmd.used_len = bytes_written; + if (copy_to_user(priv_cmd.buf, command, bytes_written)) { + DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__)); + ret = -EFAULT; + } + } + else { + ret = bytes_written; + } + +exit: + net_os_wake_unlock(net); + if (command) { + kfree(command); + } + + return ret; +} + +int wl_android_init(void) +{ + int ret = 0; + + dhd_msg_level |= DHD_ERROR_VAL; +#ifdef ENABLE_INSMOD_NO_FW_LOAD + dhd_download_fw_on_driverload = FALSE; +#endif /* ENABLE_INSMOD_NO_FW_LOAD */ +#ifdef CUSTOMER_HW2 + if (!iface_name[0]) { + memset(iface_name, 0, IFNAMSIZ); + bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ); + } +#endif /* CUSTOMER_HW2 */ + return ret; +} + +int wl_android_exit(void) +{ + int ret = 0; + + return ret; +} + +void wl_android_post_init(void) +{ + if (!dhd_download_fw_on_driverload) { + /* Call customer gpio to turn off power with WL_REG_ON signal */ + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); + g_wifi_on = 0; + } +} +/** + * Functions for Android WiFi card detection + */ +#if defined(CONFIG_WIFI_CONTROL_FUNC) + +static int g_wifidev_registered = 0; +static struct semaphore wifi_control_sem; +static struct wifi_platform_data *wifi_control_data = NULL; +static struct resource *wifi_irqres = NULL; + +static int wifi_add_dev(void); +static void wifi_del_dev(void); + +int wl_android_wifictrl_func_add(void) +{ + int ret = 0; + sema_init(&wifi_control_sem, 0); + + ret = wifi_add_dev(); + if (ret) { + DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__)); + return ret; + } + g_wifidev_registered = 1; + + /* Waiting callback after platform_driver_register is done or exit with error */ + if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) { + ret = -EINVAL; + DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__)); + } + + return ret; +} + +void wl_android_wifictrl_func_del(void) +{ + if (g_wifidev_registered) + { + wifi_del_dev(); + g_wifidev_registered = 0; + } +} + +void* wl_android_prealloc(int section, unsigned long size) +{ + void *alloc_ptr = NULL; + if (wifi_control_data && wifi_control_data->mem_prealloc) { + alloc_ptr = wifi_control_data->mem_prealloc(section, size); + if (alloc_ptr) { + DHD_INFO(("success alloc section %d\n", section)); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + } + + DHD_ERROR(("can't alloc section %d\n", section)); + return NULL; +} + +int wifi_get_irq_number(unsigned long *irq_flags_ptr) +{ + if (wifi_irqres) { + *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK; + return (int)wifi_irqres->start; + } +#ifdef CUSTOM_OOB_GPIO_NUM + return CUSTOM_OOB_GPIO_NUM; +#else + return -1; +#endif +} + +int wifi_set_power(int on, unsigned long msec) +{ + DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + if (wifi_control_data && wifi_control_data->set_power) { + wifi_control_data->set_power(on); + } + if (msec) + msleep(msec); + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) +int wifi_get_mac_addr(unsigned char *buf) +{ + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + if (wifi_control_data && wifi_control_data->get_mac_addr) { + return wifi_control_data->get_mac_addr(buf); + } + return -EOPNOTSUPP; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) +void *wifi_get_country_code(char *ccode) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + if (!ccode) + return NULL; + if (wifi_control_data && wifi_control_data->get_country_code) { + return wifi_control_data->get_country_code(ccode); + } + return NULL; +} +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ + +static int wifi_set_carddetect(int on) +{ + DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + if (wifi_control_data && wifi_control_data->set_carddetect) { + wifi_control_data->set_carddetect(on); + } + return 0; +} + +static int wifi_probe(struct platform_device *pdev) +{ + struct wifi_platform_data *wifi_ctrl = + (struct wifi_platform_data *)(pdev->dev.platform_data); + + DHD_ERROR(("## %s\n", __FUNCTION__)); + wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq"); + if (wifi_irqres == NULL) + wifi_irqres = platform_get_resource_byname(pdev, + IORESOURCE_IRQ, "bcm4329_wlan_irq"); + wifi_control_data = wifi_ctrl; + + wifi_set_power(1, 0); /* Power On */ + wifi_set_carddetect(1); /* CardDetect (0->1) */ + + up(&wifi_control_sem); + return 0; +} + +static int wifi_remove(struct platform_device *pdev) +{ + struct wifi_platform_data *wifi_ctrl = + (struct wifi_platform_data *)(pdev->dev.platform_data); + + DHD_ERROR(("## %s\n", __FUNCTION__)); + wifi_control_data = wifi_ctrl; + + wifi_set_power(0, 0); /* Power Off */ + wifi_set_carddetect(0); /* CardDetect (1->0) */ + + up(&wifi_control_sem); + return 0; +} + +static int wifi_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(0); +#endif + return 0; +} + +static int wifi_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) + if (dhd_os_check_if_up(bcmsdh_get_drvdata())) + bcmsdh_oob_intr_set(1); +#endif + return 0; +} + +static struct platform_driver wifi_device = { + .probe = wifi_probe, + .remove = wifi_remove, + .suspend = wifi_suspend, + .resume = wifi_resume, + .driver = { + .name = "bcmdhd_wlan", + } +}; + +static struct platform_driver wifi_device_legacy = { + .probe = wifi_probe, + .remove = wifi_remove, + .suspend = wifi_suspend, + .resume = wifi_resume, + .driver = { + .name = "bcm4329_wlan", + } +}; + +static int wifi_add_dev(void) +{ + DHD_TRACE(("## Calling platform_driver_register\n")); + platform_driver_register(&wifi_device); + platform_driver_register(&wifi_device_legacy); + return 0; +} + +static void wifi_del_dev(void) +{ + DHD_TRACE(("## Unregister platform_driver_register\n")); + platform_driver_unregister(&wifi_device); + platform_driver_unregister(&wifi_device_legacy); +} +#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */ diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h new file mode 100644 index 0000000000000..3983306cfe38e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.h @@ -0,0 +1,57 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_android.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + */ + +#include +#include +#include + +/** + * Android platform dependent functions, feel free to add Android specific functions here + * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd + * or cfg, define them as static in wl_android.c + */ + +/** + * wl_android_init will be called from module init function (dhd_module_init now), similarly + * wl_android_exit will be called from module exit function (dhd_module_cleanup now) + */ +int wl_android_init(void); +int wl_android_exit(void); +void wl_android_post_init(void); +int wl_android_wifi_on(struct net_device *dev); +int wl_android_wifi_off(struct net_device *dev); +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd); + +#if defined(CONFIG_WIFI_CONTROL_FUNC) +int wl_android_wifictrl_func_add(void); +void wl_android_wifictrl_func_del(void); +void* wl_android_prealloc(int section, unsigned long size); + +int wifi_get_irq_number(unsigned long *irq_flags_ptr); +int wifi_set_power(int on, unsigned long msec); +int wifi_get_mac_addr(unsigned char *buf); +void *wifi_get_country_code(char *ccode); +#endif /* CONFIG_WIFI_CONTROL_FUNC */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c new file mode 100644 index 0000000000000..04689546ac8d2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -0,0 +1,7827 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static struct device *cfg80211_parent_dev = NULL; +static int vsdb_supported = 0; +struct wl_priv *wlcfg_drv_priv = NULL; + +u32 wl_dbg_level = WL_DBG_ERR; + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAX_WAIT_TIME 1500 +#define WL_SCAN_ACTIVE_TIME 40 +#define WL_SCAN_PASSIVE_TIME 130 +#define WL_FRAME_LEN 300 +#define WL_SCAN_BUSY_MAX 8 + +#define DNGL_FUNC(func, parameters) func parameters; +#define COEX_DHCP + + +/* This is to override regulatory domains defined in cfg80211 module (reg.c) + * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN + * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). + * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels. + * All the chnages in world regulatory domain are to be done here. + */ +static const struct ieee80211_regdomain brcm_regdom = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), + /* IEEE 802.11b/g, channels 12..13. No HT40 + * channel fits here. + */ + /* If any */ + /* + * IEEE 802.11 channel 14 - is for JP only, + * we need cfg80211 to allow it (reg_flags = 0); so that + * hostapd could request auto channel by sending down ch 14 + */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS | + NL80211_RRF_NO_OFDM), + /* IEEE 802.11a, channel 36..64 */ + REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), + /* IEEE 802.11a, channel 100..165 */ + REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } +}; + + +/* Data Element Definitions */ +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_REQ_DEV_TYPE 0x106A +#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 + +/* Device Password ID */ +#define DEV_PW_DEFAULT 0x0000 +#define DEV_PW_USER_SPECIFIED 0x0001, +#define DEV_PW_MACHINE_SPECIFIED 0x0002 +#define DEV_PW_REKEY 0x0003 +#define DEV_PW_PUSHBUTTON 0x0004 +#define DEV_PW_REGISTRAR_SPECIFIED 0x0005 + +/* Config Methods */ +#define WPS_CONFIG_USBA 0x0001 +#define WPS_CONFIG_ETHERNET 0x0002 +#define WPS_CONFIG_LABEL 0x0004 +#define WPS_CONFIG_DISPLAY 0x0008 +#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010 +#define WPS_CONFIG_INT_NFC_TOKEN 0x0020 +#define WPS_CONFIG_NFC_INTERFACE 0x0040 +#define WPS_CONFIG_PUSHBUTTON 0x0080 +#define WPS_CONFIG_KEYPAD 0x0100 +#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280 +#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480 +#define WPS_CONFIG_VIRT_DISPLAY 0x2008 +#define WPS_CONFIG_PHY_DISPLAY 0x4008 + +/* + * cfg80211_ops api/callback list + */ +static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody); +static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid); +static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request); +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed); +static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, + struct net_device *dev); +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, u8 *mac, + struct station_info *sinfo); +static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, bool enabled, + s32 timeout); +static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); +static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, + s32 dbm); +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); +static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, + struct net_device *dev, + u8 key_idx, bool unicast, bool multicast); +static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params); +static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr); +static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + void *cookie, void (*callback) (void *cookie, + struct key_params *params)); +static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx); +static s32 wl_cfg80211_resume(struct wiphy *wiphy); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy); +#endif +static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, + struct net_device *dev); +static s32 wl_notify_escan_complete(struct wl_priv *wl, + struct net_device *ndev, bool aborted, bool fw_abort); +/* + * event & event Q handlers for cfg80211 interfaces + */ +static s32 wl_create_event_handler(struct wl_priv *wl); +static void wl_destroy_event_handler(struct wl_priv *wl); +static s32 wl_event_handler(void *data); +static void wl_init_eq(struct wl_priv *wl); +static void wl_flush_eq(struct wl_priv *wl); +static unsigned long wl_lock_eq(struct wl_priv *wl); +static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags); +static void wl_init_eq_lock(struct wl_priv *wl); +static void wl_init_event_handler(struct wl_priv *wl); +static struct wl_event_q *wl_deq_event(struct wl_priv *wl); +static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type, + const wl_event_msg_t *msg, void *data); +static void wl_put_event(struct wl_event_q *e); +static void wl_wakeup_event(struct wl_priv *wl); +static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_connect_status(struct wl_priv *wl, + struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_roaming_status(struct wl_priv *wl, + struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed); +static s32 wl_ibss_join_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed); +static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +#ifdef WL_SCHED_SCAN +static s32 +wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +#endif /* WL_SCHED_SCAN */ +#ifdef PNO_SUPPORT +static s32 wl_notify_pfn_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +#endif /* PNO_SUPPORT */ +/* + * register/deregister parent device + */ +static void wl_cfg80211_clear_parent_dev(void); + +/* + * cfg80211 set_wiphy_params utilities + */ +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l); + +/* + * wl profile utilities + */ +static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, s32 item); +static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item); +static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev); + +/* + * cfg80211 connect utilites + */ +static s32 wl_set_wpa_version(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_auth_type(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_cipher(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_key_mgmt(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev); +static void wl_ch_to_chanspec(int ch, + struct wl_join_params *join_params, size_t *join_params_size); + +/* + * information element utilities + */ +static void wl_rst_ie(struct wl_priv *wl); +static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v); +static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size); +static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size); +static u32 wl_get_ielen(struct wl_priv *wl); + + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev); +static void wl_free_wdev(struct wl_priv *wl); + +static s32 wl_inform_bss(struct wl_priv *wl); +static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi); +static s32 wl_inform_ibss(struct wl_priv *wl, const u8 *bssid); +static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev); +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); + +static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, + struct key_params *params); +/* + * key indianess swap utilities + */ +static void swap_key_from_BE(struct wl_wsec_key *key); +static void swap_key_to_BE(struct wl_wsec_key *key); + +/* + * wl_priv memory init/deinit utilities + */ +static s32 wl_init_priv_mem(struct wl_priv *wl); +static void wl_deinit_priv_mem(struct wl_priv *wl); + +static void wl_delay(u32 ms); + +/* + * ibss mode utilities + */ +static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev); +static __used bool wl_is_ibssstarter(struct wl_priv *wl); + +/* + * link up/down , default configuration utilities + */ +static s32 __wl_cfg80211_up(struct wl_priv *wl); +static s32 __wl_cfg80211_down(struct wl_priv *wl); +static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); +static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e); +static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev); +static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e); +static void wl_link_up(struct wl_priv *wl); +static void wl_link_down(struct wl_priv *wl); +static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype); +static void wl_init_conf(struct wl_conf *conf); + +/* + * iscan handler + */ +static void wl_iscan_timer(unsigned long data); +static void wl_term_iscan(struct wl_priv *wl); +static s32 wl_init_scan(struct wl_priv *wl); +static s32 wl_iscan_thread(void *data); +static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, + u16 action); +static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request); +static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan); +static s32 wl_invoke_iscan(struct wl_priv *wl); +static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status, + struct wl_scan_results **bss_list); +static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted); +static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan); +static s32 wl_iscan_done(struct wl_priv *wl); +static s32 wl_iscan_pending(struct wl_priv *wl); +static s32 wl_iscan_inprogress(struct wl_priv *wl); +static s32 wl_iscan_aborted(struct wl_priv *wl); +static void wl_scan_timeout_process(struct work_struct *work); + +/* + * find most significant bit set + */ +static __used u32 wl_find_msb(u16 bit16); + +/* + * rfkill support + */ +static int wl_setup_rfkill(struct wl_priv *wl, bool setup); +static int wl_rfkill_set(void *data, bool blocked); + +static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel, + int nprobes, int *out_params_size); +static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac); + +/* + * Some external functions, TODO: move them to dhd_linux.h + */ +int dhd_add_monitor(char *name, struct net_device **new_ndev); +int dhd_del_monitor(struct net_device *ndev); +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); +int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); + +#define CHECK_SYS_UP(wlpriv) \ +do { \ + struct net_device *ndev = wl_to_prmry_ndev(wlpriv); \ + if (unlikely(!wl_get_drv_status(wlpriv, READY, ndev))) { \ + WL_INFO(("device is not ready\n")); \ + return -EIO; \ + } \ +} while (0) + + +#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK) + + +extern int dhd_wait_pend8021x(struct net_device *dev); + +#if (WL_DBG_LEVEL > 0) +#define WL_DBG_ESTR_MAX 50 +static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { + "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND", + "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC", + "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END", + "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM", + "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH", + "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND", + "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND", + "PFN_NET_LOST", + "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START", + "IBSS_ASSOC", + "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT", + "PROBREQ_MSG", + "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED", + "EXCEEDED_MEDIUM_TIME", "ICV_ERROR", + "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE", + "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE", + "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG", + "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND", + "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED", + "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT", + "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE", + "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP", + "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE" +}; +#endif /* WL_DBG_LEVEL */ + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = RATE_TO_BASE100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate __wl_rates[] = { + RATETAB_ENT(WLC_RATE_1M, 0), + RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(WLC_RATE_6M, 0), + RATETAB_ENT(WLC_RATE_9M, 0), + RATETAB_ENT(WLC_RATE_12M, 0), + RATETAB_ENT(WLC_RATE_18M, 0), + RATETAB_ENT(WLC_RATE_24M, 0), + RATETAB_ENT(WLC_RATE_36M, 0), + RATETAB_ENT(WLC_RATE_48M, 0), + RATETAB_ENT(WLC_RATE_54M, 0) +}; + +#define wl_a_rates (__wl_rates + 4) +#define wl_a_rates_size 8 +#define wl_g_rates (__wl_rates + 0) +#define wl_g_rates_size 12 + +static struct ieee80211_channel __wl_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0) +}; + +static struct ieee80211_channel __wl_5ghz_a_channels[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0) +}; + +static struct ieee80211_supported_band __wl_band_2ghz = { + .band = IEEE80211_BAND_2GHZ, + .channels = __wl_2ghz_channels, + .n_channels = ARRAY_SIZE(__wl_2ghz_channels), + .bitrates = wl_g_rates, + .n_bitrates = wl_g_rates_size +}; + +static struct ieee80211_supported_band __wl_band_5ghz_a = { + .band = IEEE80211_BAND_5GHZ, + .channels = __wl_5ghz_a_channels, + .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), + .bitrates = wl_a_rates, + .n_bitrates = wl_a_rates_size +}; + +static const u32 __wl_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, +}; + +/* There isn't a lot of sense in it, but you can transmit anything you like */ +static const struct ieee80211_txrx_stypes +wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_ADHOC] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_AP_VLAN] = { + /* copy AP */ + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + } +}; + +static void swap_key_from_BE(struct wl_wsec_key *key) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE(struct wl_wsec_key *key) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +/* For debug: Dump the contents of the encoded wps ie buffe */ +static void +wl_validate_wps_ie(char *wps_ie, bool *pbc) +{ + #define WPS_IE_FIXED_LEN 6 + u16 len = (u16) wps_ie[TLV_LEN_OFF]; + u8 *subel = wps_ie+ WPS_IE_FIXED_LEN; + u16 subelt_id; + u16 subelt_len; + u16 val; + u8 *valptr = (uint8*) &val; + + WL_DBG(("wps_ie len=%d\n", len)); + + len -= 4; /* for the WPS IE's OUI, oui_type fields */ + + while (len >= 4) { /* must have attr id, attr len fields */ + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_id = HTON16(val); + + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_len = HTON16(val); + + len -= 4; /* for the attr id, attr len fields */ + len -= subelt_len; /* for the remaining fields in this attribute */ + WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n", + subel, subelt_id, subelt_len)); + + if (subelt_id == WPS_ID_VERSION) { + WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel)); + } else if (subelt_id == WPS_ID_REQ_TYPE) { + WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel)); + } else if (subelt_id == WPS_ID_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val))); + } else if (subelt_id == WPS_ID_DEVICE_NAME) { + char devname[100]; + memcpy(devname, subel, subelt_len); + devname[subelt_len] = '\0'; + WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n", + devname, subelt_len)); + } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val))); + *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false; + } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS" + ": cat=%u\n", HTON16(val))); + } else { + WL_DBG((" unknown attr 0x%x\n", subelt_id)); + } + + subel += subelt_len; + } +} + +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy) +{ + if (vsdb_supported) { + return wf_chspec_aton(WL_P2P_TEMP_CHAN); + } + else { + chanspec_t chspec; + int err = 0; + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *dev = wl_to_prmry_ndev(wl); + struct ether_addr bssid; + struct wl_bss_info *bss = NULL; + if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) { + /* STA interface is not associated. So start the new interface on a temp + * channel . Later proper channel will be applied by the above framework + * via set_channel (cfg80211 API). + */ + WL_DBG(("Not associated. Return a temp channel. \n")); + return wf_chspec_aton(WL_P2P_TEMP_CHAN); + } + + + *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); + if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, wl->extra_buf, + WL_EXTRA_BUF_MAX, false))) { + WL_ERR(("Failed to get associated bss info, use temp channel \n")); + chspec = wf_chspec_aton(WL_P2P_TEMP_CHAN); + } + else { + bss = (struct wl_bss_info *) (wl->extra_buf + 4); + chspec = bss->chanspec; + WL_DBG(("Valid BSS Found. chanspec:%d \n", bss->chanspec)); + } + return chspec; + } +} + +static struct net_device* wl_cfg80211_add_monitor_if(char *name) +{ + int ret = 0; + struct net_device* ndev = NULL; + + ret = dhd_add_monitor(name, &ndev); + WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev)); + return ndev; +} + +static struct net_device * +wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, char *name, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + s32 err; + s32 timeout = -1; + s32 wlif_type = -1; + s32 mode = 0; +#if defined(WL_ENABLE_P2P_IF) + s32 dhd_mode = 0; +#endif /* (WL_ENABLE_P2P_IF) */ + chanspec_t chspec; + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *_ndev; + struct ether_addr primary_mac; + int (*net_attach)(void *dhdp, int ifidx); + bool rollback_lock = false; + + /* Use primary I/F for sending cmds down to firmware */ + _ndev = wl_to_prmry_ndev(wl); + + WL_DBG(("if name: %s, type: %d\n", name, type)); + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + WL_ERR(("Unsupported interface type\n")); + mode = WL_MODE_IBSS; + return NULL; + case NL80211_IFTYPE_MONITOR: + return wl_cfg80211_add_monitor_if(name); + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_STATION: + wlif_type = WL_P2P_IF_CLIENT; + mode = WL_MODE_BSS; + break; + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + wlif_type = WL_P2P_IF_GO; + mode = WL_MODE_AP; + break; + default: + WL_ERR(("Unsupported interface type\n")); + return NULL; + break; + } + + if (!name) { + WL_ERR(("name is NULL\n")); + return NULL; + } + if (wl->p2p_supported && (wlif_type != -1)) { + if (wl_get_p2p_status(wl, IF_DELETING)) { + /* wait till IF_DEL is complete + * release the lock for the unregister to proceed + */ + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = true; + } + WL_INFO(("%s: Released the lock and wait till IF_DEL is complete\n", + __func__)); + timeout = wait_event_interruptible_timeout(wl->netif_change_event, + (wl_get_p2p_status(wl, IF_DELETING) == false), + msecs_to_jiffies(MAX_WAIT_TIME)); + + /* put back the rtnl_lock again */ + if (rollback_lock) { + rtnl_lock(); + rollback_lock = false; + } + if (timeout > 0) { + WL_ERR(("IF DEL is Success\n")); + + } else { + WL_ERR(("timeount < 0, return -EAGAIN\n")); + return ERR_PTR(-EAGAIN); + } + /* It should be now be safe to put this check here since we are sure + * by now netdev_notifier (unregister) would have been called */ + if (wl->iface_cnt == IFACE_MAX_CNT) + return ERR_PTR(-ENOMEM); + } + if (wl->p2p && !wl->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { + p2p_on(wl) = true; + wl_cfgp2p_set_firm_p2p(wl); + wl_cfgp2p_init_discovery(wl); + get_primary_mac(wl, &primary_mac); + wl_cfgp2p_generate_bss_mac(&primary_mac, + &wl->p2p->dev_addr, &wl->p2p->int_addr); + } + + memset(wl->p2p->vir_ifname, 0, IFNAMSIZ); + strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1); + + wldev_iovar_setint(_ndev, "mpc", 0); + wl_notify_escan_complete(wl, _ndev, true, true); + /* In concurrency case, STA may be already associated in a particular channel. + * so retrieve the current channel of primary interface and then start the virtual + * interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + + /* For P2P mode, use P2P-specific driver features to create the + * bss: "wl p2p_ifadd" + */ + wl_set_p2p_status(wl, IF_ADD); + err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec); + + if (unlikely(err)) { + WL_ERR((" virtual iface add failed (%d) \n", err)); + return ERR_PTR(-ENOMEM); + } + + timeout = wait_event_interruptible_timeout(wl->netif_change_event, + (wl_get_p2p_status(wl, IF_ADD) == false), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) { + + struct wireless_dev *vwdev; + vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); + if (unlikely(!vwdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return ERR_PTR(-ENOMEM); + } + vwdev->wiphy = wl->wdev->wiphy; + WL_INFO((" virtual interface(%s) is created memalloc done \n", + wl->p2p->vir_ifname)); + vwdev->iftype = type; + _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); + _ndev->ieee80211_ptr = vwdev; + SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy)); + vwdev->netdev = _ndev; + wl_set_drv_status(wl, READY, _ndev); + wl->p2p->vif_created = true; + wl_set_mode_by_netdev(wl, _ndev, mode); + net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION); + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = true; + } + if (net_attach && !net_attach(wl->pub, _ndev->ifindex)) { + wl_alloc_netinfo(wl, _ndev, vwdev, mode); + WL_ERR((" virtual interface(%s) is " + "created net attach done\n", wl->p2p->vir_ifname)); +#if defined(WL_ENABLE_P2P_IF) + if (type == NL80211_IFTYPE_P2P_CLIENT) + dhd_mode = P2P_GC_ENABLED; + else if (type == NL80211_IFTYPE_P2P_GO) + dhd_mode = P2P_GO_ENABLED; + DNGL_FUNC(dhd_cfg80211_set_p2p_info, (wl, dhd_mode)); +#endif /* (WL_ENABLE_P2P_IF) */ + /* Start the P2P I/F with PM disabled. Enable PM from + * the framework + */ + if ((type == NL80211_IFTYPE_P2P_CLIENT) || ( + type == NL80211_IFTYPE_P2P_GO)) + vwdev->ps = NL80211_PS_DISABLED; + } else { + /* put back the rtnl_lock again */ + if (rollback_lock) + rtnl_lock(); + goto fail; + } + /* put back the rtnl_lock again */ + if (rollback_lock) + rtnl_lock(); + return _ndev; + + } else { + wl_clr_p2p_status(wl, IF_ADD); + WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname)); + memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ); + wl->p2p->vif_created = false; + } + } +fail: + return ERR_PTR(-ENODEV); +} + +static s32 +wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev) +{ + struct ether_addr p2p_mac; + struct wl_priv *wl = wiphy_priv(wiphy); + s32 timeout = -1; + s32 ret = 0; + WL_DBG(("Enter\n")); + + if (wl->p2p_net == dev) { + /* Since there is no ifidx corresponding to p2p0, cmds to + * firmware should be routed through primary I/F + */ + dev = wl_to_prmry_ndev(wl); + } + + if (wl->p2p_supported) { + memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN); + + /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases + */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared ")); + wl_clr_p2p_status(wl, GO_NEG_PHASE); + if (wl->p2p->vif_created) { + if (wl_get_drv_status(wl, SCANNING, dev)) { + wl_notify_escan_complete(wl, dev, true, true); + } + wldev_iovar_setint(dev, "mpc", 1); + wl_set_p2p_status(wl, IF_DELETING); + ret = wl_cfgp2p_ifdel(wl, &p2p_mac); + /* Firmware could not delete the interface so we will not get WLC_E_IF + * event for cleaning the dhd virtual nw interace + * So lets do it here. Failures from fw will ensure the application to do + * ifconfig down and up sequnce, which will reload the fw + * however we should cleanup the linux network virtual interfaces + */ + /* Request framework to RESET and clean up */ + if (ret) { + struct net_device *ndev = wl_to_prmry_ndev(wl); + WL_ERR(("Firmware returned an error (%d) from p2p_ifdel" + "HANG Notification sent to %s\n", ret, ndev->name)); + wl_cfg80211_hang(ndev, WLAN_REASON_UNSPECIFIED); + } + + /* Wait for any pending scan req to get aborted from the sysioc context */ + timeout = wait_event_interruptible_timeout(wl->netif_change_event, + (wl->p2p->vif_created == false), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && (wl->p2p->vif_created == false)) { + WL_DBG(("IFDEL operation done\n")); +#if defined(WL_ENABLE_P2P_IF) + DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (wl)); +#endif /* (WL_ENABLE_P2P_IF)) */ + } else { + WL_ERR(("IFDEL didn't complete properly\n")); + } + ret = dhd_del_monitor(dev); + } + } + return ret; +} + +static s32 +wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + s32 ap = 0; + s32 infra = 0; + s32 err = BCME_OK; + s32 timeout = -1; + s32 wlif_type; + s32 mode = 0; + chanspec_t chspec; + struct wl_priv *wl = wiphy_priv(wiphy); + + WL_DBG(("Enter type %d\n", type)); + switch (type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + ap = 1; + WL_ERR(("type (%d) : currently we do not support this type\n", + type)); + break; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + ap = 1; + break; + default: + return -EINVAL; + } + WL_DBG(("%s : ap (%d), infra (%d), iftype: (%d)\n", ndev->name, ap, infra, type)); + + if (ap) { + wl_set_mode_by_netdev(wl, ndev, mode); + if (wl->p2p_supported && wl->p2p->vif_created) { + WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created, + p2p_on(wl))); + wldev_iovar_setint(ndev, "mpc", 0); + wl_notify_escan_complete(wl, ndev, true, true); + + /* In concurrency case, STA may be already associated in a particular + * channel. so retrieve the current channel of primary interface and + * then start the virtual interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + + wlif_type = WL_P2P_IF_GO; + WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d)\n", + ndev->name, ap, infra, type)); + wl_set_p2p_status(wl, IF_CHANGING); + wl_clr_p2p_status(wl, IF_CHANGED); + err = wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec); + timeout = wait_event_interruptible_timeout(wl->netif_change_event, + (wl_get_p2p_status(wl, IF_CHANGED) == true), + msecs_to_jiffies(MAX_WAIT_TIME)); + wl_set_mode_by_netdev(wl, ndev, mode); + wl_clr_p2p_status(wl, IF_CHANGING); + wl_clr_p2p_status(wl, IF_CHANGED); + } else if (ndev == wl_to_prmry_ndev(wl) && + !wl_get_drv_status(wl, AP_CREATED, ndev)) { + wl_set_drv_status(wl, AP_CREATING, ndev); + if (!wl->ap_info && + !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) { + WL_ERR(("struct ap_saved_ie allocation failed\n")); + return -ENOMEM; + } + } else { + WL_ERR(("Cannot change the interface for GO or SOFTAP\n")); + return -EINVAL; + } + } else { + infra = htod32(infra); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err) { + WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); + return -EAGAIN; + } + wl_set_mode_by_netdev(wl, ndev, mode); + } + + ndev->ieee80211_ptr->iftype = type; + return 0; +} + +s32 +wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx, + void* _net_attach) +{ + struct wl_priv *wl = wlcfg_drv_priv; + s32 ret = BCME_OK; + WL_DBG(("Enter")); + if (!ndev) { + WL_ERR(("net is NULL\n")); + return 0; + } + if (wl->p2p_supported && wl_get_p2p_status(wl, IF_ADD)) { + WL_DBG(("IF_ADD event called from dongle, old interface name: %s," + "new name: %s\n", ndev->name, wl->p2p->vir_ifname)); + /* Assign the net device to CONNECT BSSCFG */ + strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1); + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev; + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx; + wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach; + ndev->ifindex = idx; + wl_clr_p2p_status(wl, IF_ADD); + + wake_up_interruptible(&wl->netif_change_event); + } else { + ret = BCME_NOTREADY; + } + return ret; +} + +s32 +wl_cfg80211_notify_ifdel(void) +{ + struct wl_priv *wl = wlcfg_drv_priv; + + WL_DBG(("Enter \n")); + wl_clr_p2p_status(wl, IF_DELETING); + wake_up_interruptible(&wl->netif_change_event); + return 0; +} + +s32 +wl_cfg80211_ifdel_ops(struct net_device *ndev) +{ + struct wl_priv *wl = wlcfg_drv_priv; + bool rollback_lock = false; + s32 index = 0; + + if (!ndev || !ndev->name) { + WL_ERR(("net is NULL\n")); + return 0; + } + + if (p2p_is_on(wl) && wl->p2p->vif_created && + wl_get_p2p_status(wl, IF_DELETING)) { + if (wl->scan_request && + (wl->escan_info.ndev == ndev)) { + /* Abort any pending scan requests */ + wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (!rtnl_is_locked()) { + rtnl_lock(); + rollback_lock = true; + } + WL_DBG(("ESCAN COMPLETED\n")); + wl_notify_escan_complete(wl, ndev, true, false); + if (rollback_lock) + rtnl_unlock(); + } + WL_ERR(("IF_DEL event called from dongle, net %x, vif name: %s\n", + (unsigned int)ndev, wl->p2p->vir_ifname)); + + memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ); + index = wl_cfgp2p_find_idx(wl, ndev); + wl_to_p2p_bss_ndev(wl, index) = NULL; + wl_to_p2p_bss_bssidx(wl, index) = 0; + wl->p2p->vif_created = false; + wl_cfgp2p_clear_management_ie(wl, + index); + WL_DBG(("index : %d\n", index)); + + } + + /* Wake up any waiting thread */ + wake_up_interruptible(&wl->netif_change_event); + + return 0; +} + +s32 +wl_cfg80211_is_progress_ifadd(void) +{ + s32 is_progress = 0; + struct wl_priv *wl = wlcfg_drv_priv; + if (wl_get_p2p_status(wl, IF_ADD)) + is_progress = 1; + return is_progress; +} + +s32 +wl_cfg80211_is_progress_ifchange(void) +{ + s32 is_progress = 0; + struct wl_priv *wl = wlcfg_drv_priv; + if (wl_get_p2p_status(wl, IF_CHANGING)) + is_progress = 1; + return is_progress; +} + + +s32 +wl_cfg80211_notify_ifchange(void) +{ + struct wl_priv *wl = wlcfg_drv_priv; + if (wl_get_p2p_status(wl, IF_CHANGING)) { + wl_set_p2p_status(wl, IF_CHANGED); + wake_up_interruptible(&wl->netif_change_event); + } + return 0; +} + +static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request) +{ + u32 n_ssids; + u32 n_channels; + u16 channel; + chanspec_t chanspec; + s32 i, offset; + char *ptr; + wlc_ssid_t ssid; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + memset(¶ms->ssid, 0, sizeof(wlc_ssid_t)); + + WL_SCAN(("Preparing Scan request\n")); + WL_SCAN(("nprobes=%d\n", params->nprobes)); + WL_SCAN(("active_time=%d\n", params->active_time)); + WL_SCAN(("passive_time=%d\n", params->passive_time)); + WL_SCAN(("home_time=%d\n", params->home_time)); + WL_SCAN(("scan_type=%d\n", params->scan_type)); + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + + /* if request is null just exit so it will be all channel broadcast scan */ + if (!request) + return; + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + + /* Copy channel array if applicable */ + WL_SCAN(("### List of channelspecs to scan ###\n")); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + chanspec = 0; + channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq); + if (request->channels[i]->band == IEEE80211_BAND_2GHZ) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40) { + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + } else { + chanspec |= WL_CHANSPEC_BW_40; + if (request->channels[i]->flags & IEEE80211_CHAN_NO_HT40PLUS) + chanspec |= WL_CHANSPEC_CTL_SB_LOWER; + else + chanspec |= WL_CHANSPEC_CTL_SB_UPPER; + } + + params->channel_list[i] = channel; + params->channel_list[i] &= WL_CHANSPEC_CHAN_MASK; + params->channel_list[i] |= chanspec; + WL_SCAN(("Chan : %d, Channel spec: %x \n", + channel, params->channel_list[i])); + params->channel_list[i] = htod16(params->channel_list[i]); + } + } else { + WL_SCAN(("Scanning all channels\n")); + } + + /* Copy ssid array if applicable */ + WL_SCAN(("### List of SSIDs to scan ###\n")); + if (n_ssids > 0) { + offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char*)params + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid, 0, sizeof(wlc_ssid_t)); + ssid.SSID_len = request->ssids[i].ssid_len; + memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len); + if (!ssid.SSID_len) + WL_SCAN(("%d: Broadcast scan\n", i)); + else + WL_SCAN(("%d: scan for %s size =%d\n", i, + ssid.SSID, ssid.SSID_len)); + memcpy(ptr, &ssid, sizeof(wlc_ssid_t)); + ptr += sizeof(wlc_ssid_t); + } + } else { + WL_SCAN(("Broadcast scan\n")); + } + /* Adding mask to channel numbers */ + params->channel_num = + htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & WL_SCAN_PARAMS_COUNT_MASK)); +} + +static s32 +wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action) +{ + u32 n_channels; + u32 n_ssids; + s32 params_size = + (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params)); + struct wl_iscan_params *params; + s32 err = 0; + + if (request != NULL) { + n_channels = request->n_channels; + n_ssids = request->n_ssids; + /* Allocate space for populating ssids in wl_iscan_params struct */ + if (n_channels % 2) + /* If n_channels is odd, add a padd of u16 */ + params_size += sizeof(u16) * (n_channels + 1); + else + params_size += sizeof(u16) * n_channels; + + /* Allocate space for populating ssids in wl_iscan_params struct */ + params_size += sizeof(struct wlc_ssid) * n_ssids; + } + params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL); + if (!params) { + return -ENOMEM; + } + + wl_scan_prep(¶ms->params, request); + + params->version = htod32(ISCAN_REQ_VERSION); + params->action = htod16(action); + params->scan_duration = htod16(0); + + if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) { + WL_ERR(("ioctl buffer length is not sufficient\n")); + err = -ENOMEM; + goto done; + } + err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size, + iscan->ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (unlikely(err)) { + if (err == -EBUSY) { + WL_ERR(("system busy : iscan canceled\n")); + } else { + WL_ERR(("error (%d)\n", err)); + } + } +done: + kfree(params); + return err; +} + +static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request) +{ + struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); + struct net_device *ndev = wl_to_prmry_ndev(wl); + s32 passive_scan; + s32 err = 0; + + iscan->state = WL_ISCAN_STATE_SCANING; + + passive_scan = wl->active_scan ? 0 : 1; + err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan), false); + if (unlikely(err)) { + WL_DBG(("error (%d)\n", err)); + return err; + } + wl->iscan_kickstart = true; + wl_run_iscan(iscan, request, WL_SCAN_ACTION_START); + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + iscan->timer_on = 1; + + return err; +} + +static s32 +wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size) +{ + wl_uint32_list_t *list; + s32 err = BCME_OK; + if (valid_chan_list == NULL || size <= 0) + return -ENOMEM; + + memset(valid_chan_list, 0, size); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false); + if (err != 0) { + WL_ERR(("get channels failed with %d\n", err)); + } + + return err; +} + +static s32 +wl_run_escan(struct wl_priv *wl, struct net_device *ndev, + struct cfg80211_scan_request *request, uint16 action) +{ + s32 err = BCME_OK; + u32 n_channels; + u32 n_ssids; + s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)); + wl_escan_params_t *params = NULL; + struct cfg80211_scan_request *scan_request = wl->scan_request; + u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + u32 num_chans = 0; + s32 channel; + s32 n_valid_chan; + s32 search_state = WL_P2P_DISC_ST_SCAN; + u32 i, j, n_nodfs = 0; + u16 *default_chan_list = NULL; + wl_uint32_list_t *list; + struct net_device *dev = NULL; + WL_DBG(("Enter \n")); + + + if (!wl->p2p_supported || ((ndev == wl_to_prmry_ndev(wl)) && + !p2p_scan(wl))) { + /* LEGACY SCAN TRIGGER */ + WL_SCAN((" LEGACY E-SCAN START\n")); + + if (request != NULL) { + n_channels = request->n_channels; + n_ssids = request->n_ssids; + /* Allocate space for populating ssids in wl_iscan_params struct */ + if (n_channels % 2) + /* If n_channels is odd, add a padd of u16 */ + params_size += sizeof(u16) * (n_channels + 1); + else + params_size += sizeof(u16) * n_channels; + + /* Allocate space for populating ssids in wl_iscan_params struct */ + params_size += sizeof(struct wlc_ssid) * n_ssids; + } + params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + err = -ENOMEM; + goto exit; + } + + wl_scan_prep(¶ms->params, request); + params->version = htod32(ESCAN_REQ_VERSION); + params->action = htod16(action); + params->sync_id = htod16(0x1234); + if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) { + WL_ERR(("ioctl buffer length not sufficient\n")); + kfree(params); + err = -ENOMEM; + goto exit; + } + err = wldev_iovar_setbuf(ndev, "escan", params, params_size, + wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + if (unlikely(err)) + WL_ERR((" Escan set error (%d)\n", err)); + kfree(params); + } + else if (p2p_is_on(wl) && p2p_scan(wl)) { + /* P2P SCAN TRIGGER */ + s32 _freq = 0; + n_nodfs = 0; + if (scan_request && scan_request->n_channels) { + num_chans = scan_request->n_channels; + WL_SCAN((" chann number : %d\n", num_chans)); + default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list), + GFP_KERNEL); + if (default_chan_list == NULL) { + WL_ERR(("channel list allocation failed \n")); + err = -ENOMEM; + goto exit; + } + if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) { + list = (wl_uint32_list_t *) chan_buf; + n_valid_chan = dtoh32(list->count); + for (i = 0; i < num_chans; i++) + { + _freq = scan_request->channels[i]->center_freq; + channel = ieee80211_frequency_to_channel(_freq); + /* remove DFS channels */ + if (channel < 52 || channel > 140) { + for (j = 0; j < n_valid_chan; j++) { + /* allows only supported channel on + * current reguatory + */ + if (channel == (dtoh32(list->element[j]))) + default_chan_list[n_nodfs++] = + channel; + } + } + + } + } + if (num_chans == 3 && ( + (default_chan_list[0] == SOCIAL_CHAN_1) && + (default_chan_list[1] == SOCIAL_CHAN_2) && + (default_chan_list[2] == SOCIAL_CHAN_3))) { + /* SOCIAL CHANNELS 1, 6, 11 */ + search_state = WL_P2P_DISC_ST_SEARCH; + WL_INFO(("P2P SEARCH PHASE START \n")); + } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) && + (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP)) { + /* If you are already a GO, then do SEARCH only */ + WL_INFO(("Already a GO. Do SEARCH Only")); + search_state = WL_P2P_DISC_ST_SEARCH; + num_chans = n_nodfs; + + } else { + WL_INFO(("P2P SCAN STATE START \n")); + num_chans = n_nodfs; + } + + } + err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list, + search_state, action, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + kfree(default_chan_list); + } +exit: + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + } + return err; +} + + +static s32 +wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + s32 err = BCME_OK; + s32 passive_scan; + wl_scan_results_t *results; + WL_SCAN(("Enter \n")); + + mutex_lock(&wl->usr_sync); + wl->escan_info.ndev = ndev; + wl->escan_info.wiphy = wiphy; + wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING; + passive_scan = wl->active_scan ? 0 : 1; + err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan), false); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + goto exit; + } + results = (wl_scan_results_t *) wl->escan_info.escan_buf; + results->version = 0; + results->count = 0; + results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; + + err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START); +exit: + mutex_unlock(&wl->usr_sync); + return err; +} + +static s32 +__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct cfg80211_ssid *ssids; + struct wl_scan_req *sr = wl_to_sr(wl); + struct ether_addr primary_mac; + wpa_ie_fixed_t *wps_ie; + s32 passive_scan; + bool iscan_req; + bool escan_req = false; + bool p2p_ssid; + s32 err = 0; + s32 i; + u32 wpsie_len = 0; + u8 wpsie[IE_MAX_LEN]; + + /* If scan req comes for p2p0, send it over primary I/F + * Scan results will be delivered corresponding to cfg80211_scan_request + */ + if (ndev == wl->p2p_net) { + ndev = wl_to_prmry_ndev(wl); + } + + WL_DBG(("Enter wiphy (%p)\n", wiphy)); + if (wl_get_drv_status_all(wl, SCANNING)) { + WL_ERR(("Scanning already\n")); + return -EAGAIN; + } + if (wl_get_drv_status(wl, SCAN_ABORTING, ndev)) { + WL_ERR(("Scanning being aborted\n")); + return -EAGAIN; + } + if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) { + WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n")); + return -EOPNOTSUPP; + } + + /* Arm scan timeout timer */ + mod_timer(&wl->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); + iscan_req = false; + if (request) { /* scan bss */ + ssids = request->ssids; + if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) { + iscan_req = true; + } else if (wl->escan_on) { + escan_req = true; + p2p_ssid = false; + for (i = 0; i < request->n_ssids; i++) { + if (ssids[i].ssid_len && IS_P2P_SSID(ssids[i].ssid)) { + p2p_ssid = true; + break; + } + } + if (p2p_ssid) { + if (wl->p2p_supported) { + /* p2p scan trigger */ + if (p2p_on(wl) == false) { + /* p2p on at the first time */ + p2p_on(wl) = true; + wl_cfgp2p_set_firm_p2p(wl); + get_primary_mac(wl, &primary_mac); + wl_cfgp2p_generate_bss_mac(&primary_mac, + &wl->p2p->dev_addr, &wl->p2p->int_addr); + } + wl_clr_p2p_status(wl, GO_NEG_PHASE); + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + p2p_scan(wl) = true; + } + } else { + /* legacy scan trigger + * So, we have to disable p2p discovery if p2p discovery is on + */ + if (wl->p2p_supported) { + p2p_scan(wl) = false; + /* If Netdevice is not equals to primary and p2p is on + * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE. + */ + if (p2p_on(wl) && (ndev != wl_to_prmry_ndev(wl))) + p2p_scan(wl) = true; + + if (p2p_scan(wl) == false) { + if (wl_get_p2p_status(wl, DISCOVERY_ON)) { + err = wl_cfgp2p_discover_enable_search(wl, + false); + if (unlikely(err)) { + goto scan_out; + } + + } + } + } + if (!wl->p2p_supported || !p2p_scan(wl)) { + if (ndev == wl_to_prmry_ndev(wl)) { + /* find the WPSIE */ + memset(wpsie, 0, sizeof(wpsie)); + if ((wps_ie = wl_cfgp2p_find_wpsie( + (u8 *)request->ie, + request->ie_len)) != NULL) { + wpsie_len = + wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + memcpy(wpsie, wps_ie, wpsie_len); + } else { + wpsie_len = 0; + } + if (wpsie_len > 0) { + err = wl_cfgp2p_set_management_ie(wl, + ndev, -1, VNDR_IE_PRBREQ_FLAG, + wpsie, wpsie_len); + if (unlikely(err)) { + goto scan_out; + } + } + } + } + } + } + } else { /* scan in ibss */ + /* we don't do iscan in ibss */ + ssids = this_ssid; + } + wl->scan_request = request; + wl_set_drv_status(wl, SCANNING, ndev); + if (iscan_req) { + err = wl_do_iscan(wl, request); + if (likely(!err)) + return err; + else + goto scan_out; + } else if (escan_req) { + if (wl->p2p_supported) { + if (p2p_on(wl) && p2p_scan(wl)) { + + err = wl_cfgp2p_enable_discovery(wl, ndev, + request->ie, request->ie_len); + + if (unlikely(err)) { + goto scan_out; + } + } + } + err = wl_do_escan(wl, wiphy, ndev, request); + if (likely(!err)) + return err; + else + goto scan_out; + + + } else { + memset(&sr->ssid, 0, sizeof(sr->ssid)); + sr->ssid.SSID_len = + min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len); + if (sr->ssid.SSID_len) { + memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len); + sr->ssid.SSID_len = htod32(sr->ssid.SSID_len); + WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n", + sr->ssid.SSID, sr->ssid.SSID_len)); + } else { + WL_SCAN(("Broadcast scan\n")); + } + WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len)); + passive_scan = wl->active_scan ? 0 : 1; + err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan), false); + if (unlikely(err)) { + WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err)); + goto scan_out; + } + err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid, + sizeof(sr->ssid), false); + if (err) { + if (err == -EBUSY) { + WL_ERR(("system busy : scan for \"%s\" " + "canceled\n", sr->ssid.SSID)); + } else { + WL_ERR(("WLC_SCAN error (%d)\n", err)); + } + goto scan_out; + } + } + + return 0; + +scan_out: + wl_clr_drv_status(wl, SCANNING, ndev); + if (timer_pending(&wl->scan_timeout)) + del_timer_sync(&wl->scan_timeout); + wl->scan_request = NULL; + return err; +} + +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + s32 err = 0; + struct wl_priv *wl = wiphy_priv(wiphy); + + WL_DBG(("Enter \n")); + CHECK_SYS_UP(wl); + + err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); + if (unlikely(err)) { + WL_ERR(("scan error (%d)\n", err)); + if (err == BCME_BUSY) { + wl->scan_busy_count++; + if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { + wl->scan_busy_count = 0; + WL_ERR(("Continuous scan failures!! Exercising FW hang recovery\n")); + net_os_send_hang_message(ndev); + } + } + return err; + } + + return err; +} + +static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l) +{ + s32 err = 0; + u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL); + + retry = htod32(retry); + err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), false); + if (unlikely(err)) { + WL_ERR(("cmd (%d) , error (%d)\n", cmd, err)); + return err; + } + return err; +} + +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy); + struct net_device *ndev = wl_to_prmry_ndev(wl); + s32 err = 0; + + CHECK_SYS_UP(wl); + WL_DBG(("Enter\n")); + if (changed & WIPHY_PARAM_RTS_THRESHOLD && + (wl->conf->rts_threshold != wiphy->rts_threshold)) { + wl->conf->rts_threshold = wiphy->rts_threshold; + err = wl_set_rts(ndev, wl->conf->rts_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_FRAG_THRESHOLD && + (wl->conf->frag_threshold != wiphy->frag_threshold)) { + wl->conf->frag_threshold = wiphy->frag_threshold; + err = wl_set_frag(ndev, wl->conf->frag_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_LONG && + (wl->conf->retry_long != wiphy->retry_long)) { + wl->conf->retry_long = wiphy->retry_long; + err = wl_set_retry(ndev, wl->conf->retry_long, true); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_SHORT && + (wl->conf->retry_short != wiphy->retry_short)) { + wl->conf->retry_short = wiphy->retry_short; + err = wl_set_retry(ndev, wl->conf->retry_short, false); + if (!err) { + return err; + } + } + return err; +} + +static s32 +wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct wl_join_params join_params; + struct wlc_ssid ssid; + struct ether_addr bssid; + size_t join_params_size = 0; + s32 wsec = 0; + s32 bcnprd; + s32 err = 0; + + WL_TRACE(("In\n")); + CHECK_SYS_UP(wl); + + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ + if (wl->scan_request) { + wl_notify_escan_complete(wl, dev, true, true); + } + /* Clean BSSID */ + bzero(&bssid, sizeof(bssid)); + wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + + if (params->ssid) + WL_INFO(("SSID: %s\n", params->ssid)); + else { + WL_ERR(("SSID: NULL, Not supported\n")); + err = -EOPNOTSUPP; + goto CleanUp; + } + + if (params->bssid) + WL_INFO(("BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n", + params->bssid[0], params->bssid[1], params->bssid[2], + params->bssid[3], params->bssid[4], params->bssid[5])); + + if (params->channel) + WL_INFO(("channel: %d\n", params->channel->center_freq)); + + if (params->channel_fixed) + WL_INFO(("fixed channel required\n")); + + if (params->ie && params->ie_len) + WL_INFO(("ie len: %d\n", params->ie_len)); + + if (params->beacon_interval) + WL_INFO(("beacon interval: %d\n", params->beacon_interval)); + + if (params->basic_rates) + WL_INFO(("basic rates: %08X\n", params->basic_rates)); + + if (params->privacy) + WL_INFO(("privacy required\n")); + + wl_set_drv_status(wl, CONNECTING, dev); + + /* Configure Privacy for starter */ + if (params->privacy) + wsec |= WEP_ENABLED; + + err = wldev_iovar_setint(dev, "wsec", wsec); + if (err) { + WL_ERR(("wsec failed (%d)\n", err)); + goto CleanUp; + } + + err = wldev_iovar_setint(dev, "auth", WL_AUTH_OPEN_SYSTEM); + if (err) { + WL_ERR(("auth failed (%d)\n", err)); + goto CleanUp; + } + + err = wldev_iovar_setint(dev, "wpa_auth", 0); + if (err) { + WL_ERR(("wpa_auth failed (%d)\n", err)); + goto CleanUp; + } + + /* Configure Beacon Interval for starter */ + if (params->beacon_interval) + bcnprd = params->beacon_interval; + else + bcnprd = 100; + + bcnprd = htod32(bcnprd); + err = wldev_ioctl(dev, WLC_SET_BCNPRD, &bcnprd, sizeof(bcnprd), true); + if (err) { + WL_ERR(("WLC_SET_BCNPRD failed (%d)\n", err)); + goto CleanUp; + } + + /* Configure required join parameter */ + memset(&join_params, 0, sizeof(struct wl_join_params)); + + /* SSID */ + memset(&ssid, 0, sizeof(struct wlc_ssid)); + ssid.SSID_len = MIN(params->ssid_len, 32); + join_params.ssid.SSID_len = htod32(ssid.SSID_len); + memcpy(ssid.SSID, params->ssid, ssid.SSID_len); + memcpy(join_params.ssid.SSID, params->ssid, ssid.SSID_len); + join_params_size = sizeof(join_params.ssid); + + wl_update_prof(wl, dev, NULL, &ssid, WL_PROF_SSID); + + /* BSSID */ + if (params->bssid) { + memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN); + join_params_size = sizeof(join_params.ssid) + + WL_ASSOC_PARAMS_FIXED_SIZE; + + wl_update_prof(wl, dev, NULL, params->bssid, WL_PROF_BSSID); + } else { + memcpy(&join_params.params.bssid, ðer_bcast, ETHER_ADDR_LEN); + } + + /* Channel */ + if (params->channel) { + u32 target_channel; + + target_channel = ieee80211_frequency_to_channel( + params->channel->center_freq); + if (params->channel_fixed) { + /* adding chanspec */ + wl_ch_to_chanspec(target_channel, + &join_params, &join_params_size); + } + + /* set channel for starter */ + target_channel = htod32(target_channel); + err = wldev_ioctl(dev, WLC_SET_CHANNEL, + &target_channel, sizeof(target_channel), true); + if (err) { + WL_ERR(("WLC_SET_CHANNEL failed (%d)\n", err)); + goto CleanUp; + } + } + + wl->ibss_starter = false; + + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); + if (err) { + WL_ERR(("WLC_SET_SSID failed (%d)\n", err)); + goto CleanUp; + } + +CleanUp: + + if (err) + wl_clr_drv_status(wl, CONNECTING, dev); + + WL_TRACE(("Exit\n")); + return err; +} + +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + scb_val_t scbval; + bool act = false; + s32 err = 0; + u8 *curbssid; + + WL_TRACE(("Enter\n")); + + CHECK_SYS_UP(wl); + act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT); + curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID); + if (act) { + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ + if (wl->scan_request) { + wl_notify_escan_complete(wl, dev, true, true); + } + wl_set_drv_status(wl, DISCONNECTING, dev); + scbval.val = DOT11_RC_DISASSOC_LEAVING; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(wl, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } + } + + WL_TRACE(("Exit\n")); + return err; +} + +static s32 +wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; + else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) + val = WPA2_AUTH_PSK| WPA2_AUTH_UNSPECIFIED; + else + val = WPA_AUTH_DISABLED; + + if (is_wps_conn(sme)) + val = WPA_AUTH_DISABLED; + + WL_DBG(("setting wpa_auth to 0x%0x\n", val)); + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set wpa_auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + sec->wpa_versions = sme->crypto.wpa_versions; + return err; +} + +static s32 +wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + switch (sme->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + val = WL_AUTH_OPEN_SYSTEM; + WL_DBG(("open system\n")); + break; + case NL80211_AUTHTYPE_SHARED_KEY: + val = WL_AUTH_SHARED_KEY; + WL_DBG(("shared key\n")); + break; + case NL80211_AUTHTYPE_AUTOMATIC: + val = WL_AUTH_OPEN_SHARED; + WL_DBG(("automatic\n")); + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + WL_DBG(("network eap\n")); + default: + val = WL_AUTH_OPEN_SHARED; + WL_ERR(("invalid auth type (%d)\n", sme->auth_type)); + break; + } + + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + sec->auth_type = sme->auth_type; + return err; +} + +static s32 +wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wl_security *sec; + s32 pval = 0; + s32 gval = 0; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + if (sme->crypto.n_ciphers_pairwise) { + switch (sme->crypto.ciphers_pairwise[0]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + pval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + pval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + pval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + pval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher pairwise (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + } + if (sme->crypto.cipher_group) { + switch (sme->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + gval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + gval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + gval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + gval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher group (%d)\n", + sme->crypto.cipher_group)); + return -EINVAL; + } + } + + WL_DBG(("pval (%d) gval (%d)\n", pval, gval)); + + if (is_wps_conn(sme)) { + if (sme->privacy) + err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx); + else + /* WPS-2.0 allowes no security */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); + } else { + WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC")); + err = wldev_iovar_setint_bsscfg(dev, "wsec", + pval | gval, bssidx); + } + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; + sec->cipher_group = sme->crypto.cipher_group; + + return err; +} + +static s32 +wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + if (sme->crypto.n_akm_suites) { + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + return err; + } + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA_AUTH_PSK; + break; + default: + WL_ERR(("invalid cipher group (%d)\n", + sme->crypto.cipher_group)); + return -EINVAL; + } + } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA2_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA2_AUTH_PSK; + break; + default: + WL_ERR(("invalid cipher group (%d)\n", + sme->crypto.cipher_group)); + return -EINVAL; + } + } + WL_DBG(("setting wpa_auth to %d\n", val)); + + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("could not set wpa_auth (%d)\n", err)); + return err; + } + } + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + sec->wpa_auth = sme->crypto.akm_suites[0]; + + return err; +} + +static s32 +wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wl_security *sec; + struct wl_wsec_key key; + s32 val; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + WL_DBG(("key len (%d)\n", sme->key_len)); + if (sme->key_len) { + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n", + sec->wpa_versions, sec->cipher_pairwise)); + if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 | + NL80211_WPA_VERSION_2)) && + (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | + WLAN_CIPHER_SUITE_WEP104))) + { + memset(&key, 0, sizeof(key)); + key.len = (u32) sme->key_len; + key.index = (u32) sme->key_idx; + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, sme->key, key.len); + key.flags = WL_PRIMARY_KEY; + switch (sec->cipher_pairwise) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + break; + default: + WL_ERR(("Invalid algorithm (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + /* Set the new key/index */ + WL_DBG(("key length (%d) key index (%d) algo (%d)\n", + key.len, key.index, key.algo)); + WL_DBG(("key \"%s\"\n", key.data)); + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { + WL_DBG(("set auth_type to shared key\n")); + val = WL_AUTH_SHARED_KEY; /* shared key */ + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + } + } + } + return err; +} + +static s32 +wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct ieee80211_channel *chan = sme->channel; + wl_extjoin_params_t *ext_join_params; + struct wl_join_params join_params; + size_t join_params_size; + s32 err = 0; + wpa_ie_fixed_t *wpa_ie; + wpa_ie_fixed_t *wps_ie; + bcm_tlv_t *wpa2_ie; + u8* wpaie = 0; + u32 wpaie_len = 0; + u32 wpsie_len = 0; + u32 chan_cnt = 0; + u8 wpsie[IE_MAX_LEN]; + struct ether_addr bssid; + + WL_DBG(("In\n")); + CHECK_SYS_UP(wl); + + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ + if (wl->scan_request) { + wl_notify_escan_complete(wl, dev, true, true); + } + /* Clean BSSID */ + bzero(&bssid, sizeof(bssid)); + wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + + if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) { + /* we only allow to connect using virtual interface in case of P2P */ + if (p2p_is_on(wl) && is_wps_conn(sme)) { + WL_DBG(("ASSOC1 p2p index : %d sme->ie_len %d\n", + wl_cfgp2p_find_idx(wl, dev), sme->ie_len)); + /* Have to apply WPS IE + P2P IE in assoc req frame */ + wl_cfgp2p_set_management_ie(wl, dev, + wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG, + wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie, + wl_to_p2p_bss_saved_ie(wl, + P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len); + wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), + VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); + } else if (p2p_is_on(wl) && (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) { + /* This is the connect req after WPS is done [credentials exchanged] + * currently identified with WPA_VERSION_2 . + * Update the previously set IEs with + * the newly received IEs from Supplicant. This will remove the WPS IE from + * the Assoc Req. + */ + WL_DBG(("ASSOC2 p2p index : %d sme->ie_len %d\n", + wl_cfgp2p_find_idx(wl, dev), sme->ie_len)); + wl_cfgp2p_set_management_ie(wl, dev, + wl_cfgp2p_find_idx(wl, dev), VNDR_IE_PRBREQ_FLAG, + sme->ie, sme->ie_len); + wl_cfgp2p_set_management_ie(wl, dev, wl_cfgp2p_find_idx(wl, dev), + VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); + } + + } else if (dev == wl_to_prmry_ndev(wl)) { + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + /* find the WPA_IE */ + if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie, + sme->ie_len)) != NULL) { + WL_DBG((" WPA IE is found\n")); + } + if (wpa_ie != NULL || wpa2_ie != NULL) { + wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie; + wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len; + wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN; + wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len, + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + } else { + wldev_iovar_setbuf(dev, "wpaie", NULL, 0, + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + } + + /* find the WPSIE */ + memset(wpsie, 0, sizeof(wpsie)); + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)sme->ie, + sme->ie_len)) != NULL) { + wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN; + memcpy(wpsie, wps_ie, wpsie_len); + } else { + wpsie_len = 0; + } + err = wl_cfgp2p_set_management_ie(wl, dev, -1, + VNDR_IE_ASSOCREQ_FLAG, wpsie, wpsie_len); + if (unlikely(err)) { + return err; + } + } + if (unlikely(!sme->ssid)) { + WL_ERR(("Invalid ssid\n")); + return -EOPNOTSUPP; + } + if (chan) { + wl->channel = ieee80211_frequency_to_channel(chan->center_freq); + chan_cnt = 1; + WL_DBG(("channel (%d), center_req (%d)\n", wl->channel, + chan->center_freq)); + } else + wl->channel = 0; + WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len)); + err = wl_set_wpa_version(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid wpa_version\n")); + return err; + } + + err = wl_set_auth_type(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid auth type\n")); + return err; + } + + err = wl_set_set_cipher(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid ciper\n")); + return err; + } + + err = wl_set_key_mgmt(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid key mgmt\n")); + return err; + } + + err = wl_set_set_sharedkey(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid shared key\n")); + return err; + } + + /* + * Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE + + chan_cnt * sizeof(chanspec_t); + ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL); + if (ext_join_params == NULL) { + err = -ENOMEM; + wl_clr_drv_status(wl, CONNECTING, dev); + goto exit; + } + ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len); + memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len); + ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len); + /* Set up join scan parameters */ + ext_join_params->scan.scan_type = -1; + ext_join_params->scan.nprobes = 2; + /* increate dwell time to receive probe response or detect Beacon + * from target AP at a noisy air only during connect command + */ + ext_join_params->scan.active_time = WL_SCAN_ACTIVE_TIME*3; + ext_join_params->scan.passive_time = WL_SCAN_PASSIVE_TIME*3; + ext_join_params->scan.home_time = -1; + + if (sme->bssid) + memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN); + ext_join_params->assoc.chanspec_num = chan_cnt; + if (chan_cnt) { + u16 channel, band, bw, ctl_sb; + chanspec_t chspec; + channel = wl->channel; + band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G + : WL_CHANSPEC_BAND_5G; + bw = WL_CHANSPEC_BW_20; + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + chspec = (channel | band | bw | ctl_sb); + ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + ext_join_params->assoc.chanspec_list[0] |= chspec; + ext_join_params->assoc.chanspec_list[0] = + htodchanspec(ext_join_params->assoc.chanspec_list[0]); + } + ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num); + if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID, + ext_join_params->ssid.SSID_len)); + } + wl_set_drv_status(wl, CONNECTING, dev); + err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, + wl->ioctl_buf, WLC_IOCTL_MAXLEN, wl_cfgp2p_find_idx(wl, dev), &wl->ioctl_buf_sync); + kfree(ext_join_params); + if (err) { + wl_clr_drv_status(wl, CONNECTING, dev); + if (err == BCME_UNSUPPORTED) { + WL_DBG(("join iovar is not supported\n")); + goto set_ssid; + } else + WL_ERR(("error (%d)\n", err)); + } else + goto exit; + +set_ssid: + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len); + memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len); + join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len); + wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID); + if (sme->bssid) + memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN); + + wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size); + WL_DBG(("join_param_size %d\n", join_params_size)); + + if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID, + join_params.ssid.SSID_len)); + } + wl_set_drv_status(wl, CONNECTING, dev); + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); + if (err) { + WL_ERR(("error (%d)\n", err)); + wl_clr_drv_status(wl, CONNECTING, dev); + } +exit: + return err; +} + +static s32 +wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + scb_val_t scbval; + bool act = false; + s32 err = 0; + u8 *curbssid; + WL_ERR(("Reason %d\n", reason_code)); + CHECK_SYS_UP(wl); + act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT); + curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID); + if (act) { + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ + if (wl->scan_request) { + wl_notify_escan_complete(wl, dev, true, true); + } + wl_set_drv_status(wl, DISCONNECTING, dev); + scbval.val = reason_code; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(wl, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } + } + + return err; +} + +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, s32 dbm) +{ + + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *ndev = wl_to_prmry_ndev(wl); + u16 txpwrmw; + s32 err = 0; + s32 disable = 0; + + CHECK_SYS_UP(wl); + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + break; + case NL80211_TX_POWER_LIMITED: + if (dbm < 0) { + WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n")); + return -EINVAL; + } + break; + case NL80211_TX_POWER_FIXED: + if (dbm < 0) { + WL_ERR(("TX_POWER_FIXED - dbm is negative..\n")); + return -EINVAL; + } + break; + } + /* Make sure radio is off or on as far as software is concerned */ + disable = WL_RADIO_SW_DISABLE << 16; + disable = htod32(disable); + err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_RADIO error (%d)\n", err)); + return err; + } + + if (dbm > 0xffff) + txpwrmw = 0xffff; + else + txpwrmw = (u16) dbm; + err = wldev_iovar_setint(ndev, "qtxpower", + (s32) (bcm_mw_to_qdbm(txpwrmw))); + if (unlikely(err)) { + WL_ERR(("qtxpower error (%d)\n", err)); + return err; + } + wl->conf->tx_power = dbm; + + return err; +} + +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *ndev = wl_to_prmry_ndev(wl); + s32 txpwrdbm; + u8 result; + s32 err = 0; + + CHECK_SYS_UP(wl); + err = wldev_iovar_getint(ndev, "qtxpower", &txpwrdbm); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE); + *dbm = (s32) bcm_qdbm_to_mw(result); + + return err; +} + +static s32 +wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool unicast, bool multicast) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + u32 index; + s32 wsec; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + WL_DBG(("key index (%d)\n", key_idx)); + CHECK_SYS_UP(wl); + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + if (wsec & WEP_ENABLED) { + /* Just select a new current key */ + index = (u32) key_idx; + index = htod32(index); + err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index, + sizeof(index), true); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + } + } + return err; +} + +static s32 +wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, struct key_params *params) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct wl_wsec_key key; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + s32 mode = wl_get_mode_by_netdev(wl, dev); + memset(&key, 0, sizeof(key)); + key.index = (u32) key_idx; + + if (!ETHER_ISMULTI(mac_addr)) + memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN); + key.len = (u32) params->key_len; + + /* check for key index change */ + if (key.len == 0) { + /* key delete */ + swap_key_from_BE(&key); + wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("key delete error (%d)\n", err)); + return err; + } + } else { + if (key.len > sizeof(key.data)) { + WL_ERR(("Invalid key length (%d)\n", key.len)); + return -EINVAL; + } + WL_DBG(("Setting the key index %d\n", key.index)); + memcpy(key.data, params->key, key.len); + + if ((mode == WL_MODE_BSS) && + (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { + u8 keybuf[8]; + memcpy(keybuf, &key.data[24], sizeof(keybuf)); + memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); + memcpy(&key.data[16], keybuf, sizeof(keybuf)); + } + + /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ + if (params->seq && params->seq_len == 6) { + /* rx iv */ + u8 *ivptr; + ivptr = (u8 *) params->seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = true; + } + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + swap_key_from_BE(&key); + wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + } + return err; +} + +static s32 +wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params) +{ + struct wl_wsec_key key; + s32 val = 0; + s32 wsec = 0; + s32 err = 0; + u8 keybuf[8]; + s32 bssidx = 0; + struct wl_priv *wl = wiphy_priv(wiphy); + s32 mode = wl_get_mode_by_netdev(wl, dev); + WL_DBG(("key index (%d)\n", key_idx)); + CHECK_SYS_UP(wl); + + bssidx = wl_cfgp2p_find_idx(wl, dev); + + if (mac_addr) { + wl_add_keyext(wiphy, dev, key_idx, mac_addr, params); + goto exit; + } + memset(&key, 0, sizeof(key)); + + key.len = (u32) params->key_len; + key.index = (u32) key_idx; + + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, params->key, key.len); + + key.flags = WL_PRIMARY_KEY; + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + val = TKIP_ENABLED; + /* wpa_supplicant switches the third and fourth quarters of the TKIP key */ + if (mode == WL_MODE_BSS) { + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + + /* Set the new key/index */ + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + +exit: + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("get wsec error (%d)\n", err)); + return err; + } + + wsec |= val; + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("set wsec error (%d)\n", err)); + return err; + } + + return err; +} + +static s32 +wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr) +{ + struct wl_wsec_key key; + struct wl_priv *wl = wiphy_priv(wiphy); + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + WL_DBG(("Enter\n")); + CHECK_SYS_UP(wl); + memset(&key, 0, sizeof(key)); + + key.flags = WL_PRIMARY_KEY; + key.algo = CRYPTO_ALGO_OFF; + key.index = (u32) key_idx; + + WL_DBG(("key index (%d)\n", key_idx)); + /* Set the new key/index */ + swap_key_from_BE(&key); + wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + if (unlikely(err)) { + if (err == -EINVAL) { + if (key.index >= DOT11_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + WL_DBG(("invalid key index (%d)\n", key_idx)); + } + } else { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + } + return err; + } + return err; +} + +static s32 +wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, + void (*callback) (void *cookie, struct key_params * params)) +{ + struct key_params params; + struct wl_wsec_key key; + struct wl_priv *wl = wiphy_priv(wiphy); + struct wl_security *sec; + s32 wsec; + s32 err = 0; + s32 bssidx = wl_cfgp2p_find_idx(wl, dev); + + WL_DBG(("key index (%d)\n", key_idx)); + CHECK_SYS_UP(wl); + memset(&key, 0, sizeof(key)); + key.index = key_idx; + swap_key_to_BE(&key); + memset(¶ms, 0, sizeof(params)); + params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len); + memcpy(params.key, key.data, params.key_len); + + wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + switch (wsec & ~SES_OW_ENABLED) { + case WEP_ENABLED: + sec = wl_read_prof(wl, dev, WL_PROF_SEC); + if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { + params.cipher = WLAN_CIPHER_SUITE_WEP40; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { + params.cipher = WLAN_CIPHER_SUITE_WEP104; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + } + break; + case TKIP_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case AES_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + default: + WL_ERR(("Invalid algo (0x%x)\n", wsec)); + return -EINVAL; + } + + callback(cookie, ¶ms); + return err; +} + +static s32 +wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx) +{ + WL_INFO(("Not supported\n")); + return -EOPNOTSUPP; +} + +static s32 +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + scb_val_t scb_val; + s32 rssi; + s32 rate; + s32 err = 0; + sta_info_t *sta; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) + s8 eabuf[ETHER_ADDR_STR_LEN]; +#endif + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); + + CHECK_SYS_UP(wl); + if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) { + err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac, + ETHER_ADDR_LEN, wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GET STA INFO failed, %d\n", err)); + return err; + } + sinfo->filled = STATION_INFO_INACTIVE_TIME; + sta = (sta_info_t *)wl->ioctl_buf; + sta->len = dtoh16(sta->len); + sta->cap = dtoh16(sta->cap); + sta->flags = dtoh32(sta->flags); + sta->idle = dtoh32(sta->idle); + sta->in = dtoh32(sta->in); + sinfo->inactive_time = sta->idle * 1000; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) + if (sta->flags & WL_STA_ASSOC) { + sinfo->filled |= STATION_INFO_CONNECTED_TIME; + sinfo->connected_time = sta->in; + } + WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n", + bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time, + sta->idle * 1000)); +#endif + } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS) { + get_pktcnt_t pktcnt; + u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID); + err = -ENODEV; + if (!wl_get_drv_status(wl, CONNECTED, dev) || + (dhd_is_associated(dhd, NULL, &err) == FALSE)) { + WL_ERR(("NOT assoc: %d\n", err)); + goto get_station_err; + } + if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) { + WL_ERR(("Wrong Mac address: "MACSTR" != "MACSTR"\n", + MAC2STR(mac), MAC2STR(curmacp))); + } + + /* Report the current tx rate */ + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + } else { + rate = dtoh32(rate); + sinfo->filled |= STATION_INFO_TX_BITRATE; + sinfo->txrate.legacy = rate * 5; + WL_DBG(("Rate %d Mbps\n", (rate / 2))); + } + + memset(&scb_val, 0, sizeof(scb_val)); + scb_val.val = 0; + err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, + sizeof(scb_val_t), false); + if (err) { + WL_ERR(("Could not get rssi (%d)\n", err)); + goto get_station_err; + } + rssi = dtoh32(scb_val.val); + sinfo->filled |= STATION_INFO_SIGNAL; + sinfo->signal = rssi; + WL_DBG(("RSSI %d dBm\n", rssi)); + + err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt), false); + if (!err) { + sinfo->filled |= (STATION_INFO_RX_PACKETS | + STATION_INFO_RX_DROP_MISC | + STATION_INFO_TX_PACKETS | + STATION_INFO_TX_FAILED); + sinfo->rx_packets = pktcnt.rx_good_pkt; + sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; + sinfo->tx_packets = pktcnt.tx_good_pkt; + sinfo->tx_failed = pktcnt.tx_bad_pkt; + } + +get_station_err: + if (err && (err != -ETIMEDOUT) && (err != -EIO)) { + /* Disconnect due to zero BSSID or error to get RSSI */ + WL_ERR(("force cfg80211_disconnected: %d\n", err)); + wl_clr_drv_status(wl, CONNECTED, dev); + cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL); + wl_link_down(wl); + } + } + + return err; +} + +static s32 +wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, s32 timeout) +{ + s32 pm; + s32 err = 0; + struct wl_priv *wl = wiphy_priv(wiphy); + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); + + CHECK_SYS_UP(wl); + + WL_DBG(("Enter : power save %s\n", (enabled ? "enable" : "disable"))); + if (wl->p2p_net == dev) { + return err; + } + + pm = enabled ? ((dhd->in_suspend) ? PM_MAX : PM_FAST) : PM_OFF; + pm = htod32(pm); + err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true); + if (unlikely(err)) { + if (err == -ENODEV) + WL_DBG(("net_device is not ready yet\n")); + else + WL_ERR(("error (%d)\n", err)); + return err; + } + WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled"))); + return err; +} + +static __used u32 wl_find_msb(u16 bit16) +{ + u32 ret = 0; + + if (bit16 & 0xff00) { + ret += 8; + bit16 >>= 8; + } + + if (bit16 & 0xf0) { + ret += 4; + bit16 >>= 4; + } + + if (bit16 & 0xc) { + ret += 2; + bit16 >>= 2; + } + + if (bit16 & 2) + ret += bit16 & 2; + else if (bit16) + ret += bit16; + + return ret; +} + +static s32 wl_cfg80211_resume(struct wiphy *wiphy) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *ndev = wl_to_prmry_ndev(wl); + s32 err = 0; + + if (unlikely(!wl_get_drv_status(wl, READY, ndev))) { + WL_INFO(("device is not ready\n")); + return 0; + } + + wl_invoke_iscan(wl); + + return err; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy) +#endif +{ +#ifdef DHD_CLEAR_ON_SUSPEND + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_info *iter, *next; + struct net_device *ndev = wl_to_prmry_ndev(wl); + unsigned long flags; + + if (unlikely(!wl_get_drv_status(wl, READY, ndev))) { + WL_INFO(("device is not ready : status (%d)\n", + (int)wl->status)); + return 0; + } + for_each_ndev(wl, iter, next) + wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev); + wl_term_iscan(wl); + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + if (wl->scan_request) { + cfg80211_scan_done(wl->scan_request, true); + wl->scan_request = NULL; + } + for_each_ndev(wl, iter, next) { + wl_clr_drv_status(wl, SCANNING, iter->ndev); + wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev); + } + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + for_each_ndev(wl, iter, next) { + if (wl_get_drv_status(wl, CONNECTING, iter->ndev)) { + wl_bss_connect_done(wl, iter->ndev, NULL, NULL, false); + } + } +#endif /* DHD_CLEAR_ON_SUSPEND */ + return 0; +} + +static s32 +wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list, + s32 err) +{ + int i, j; + struct wl_priv *wl = wlcfg_drv_priv; + struct net_device *primary_dev = wl_to_prmry_ndev(wl); + + if (!pmk_list) { + printk("pmk_list is NULL\n"); + return -EINVAL; + } + /* pmk list is supported only for STA interface i.e. primary interface + * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init + */ + if (primary_dev != dev) { + WL_INFO(("Not supporting Flushing pmklist on virtual" + " interfaces than primary interface\n")); + return err; + } + + WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid)); + for (i = 0; i < pmk_list->pmkids.npmkid; i++) { + WL_DBG(("PMKID[%d]: %pM =\n", i, + &pmk_list->pmkids.pmkid[i].BSSID)); + for (j = 0; j < WPA2_PMKID_LEN; j++) { + WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j])); + } + } + if (likely(!err)) { + err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list, + sizeof(*pmk_list), wl->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + } + + return err; +} + +static s32 +wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + s32 err = 0; + int i; + + CHECK_SYS_UP(wl); + for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++) + if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + if (i < WL_NUM_PMKIDS_MAX) { + memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid, + ETHER_ADDR_LEN); + memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid, + WPA2_PMKID_LEN); + if (i == wl->pmk_list->pmkids.npmkid) + wl->pmk_list->pmkids.npmkid++; + } else { + err = -EINVAL; + } + WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", + &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", + wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1]. + PMKID[i])); + } + + err = wl_update_pmklist(dev, wl->pmk_list, err); + + return err; +} + +static s32 +wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + struct _pmkid_list pmkid; + s32 err = 0; + int i; + + CHECK_SYS_UP(wl); + memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN); + memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN); + + WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", + &pmkid.pmkid[0].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i])); + } + + for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++) + if (!memcmp + (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + + if ((wl->pmk_list->pmkids.npmkid > 0) && + (i < wl->pmk_list->pmkids.npmkid)) { + memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t)); + for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) { + memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, + &wl->pmk_list->pmkids.pmkid[i + 1].BSSID, + ETHER_ADDR_LEN); + memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, + &wl->pmk_list->pmkids.pmkid[i + 1].PMKID, + WPA2_PMKID_LEN); + } + wl->pmk_list->pmkids.npmkid--; + } else { + err = -EINVAL; + } + + err = wl_update_pmklist(dev, wl->pmk_list, err); + + return err; + +} + +static s32 +wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + s32 err = 0; + CHECK_SYS_UP(wl); + memset(wl->pmk_list, 0, sizeof(*wl->pmk_list)); + err = wl_update_pmklist(dev, wl->pmk_list, err); + return err; + +} + +static wl_scan_params_t * +wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) +{ + wl_scan_params_t *params; + int params_size; + int num_chans; + + *out_params_size = 0; + + /* Our scan params only need space for 1 channel and 0 ssids */ + params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16); + params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + WL_ERR(("%s: mem alloc failed (%d bytes)\n", __func__, params_size)); + return params; + } + memset(params, 0, params_size); + params->nprobes = nprobes; + + num_chans = (channel == 0) ? 0 : 1; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = DOT11_SCANTYPE_ACTIVE; + params->nprobes = htod32(1); + params->active_time = htod32(-1); + params->passive_time = htod32(-1); + params->home_time = htod32(10); + params->channel_list[0] = htodchanspec(channel); + + /* Our scan params have 1 channel and 0 ssids */ + params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + *out_params_size = params_size; /* rtn size to the caller */ + return params; +} + +static s32 +wl_cfg80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel * channel, + enum nl80211_channel_type channel_type, + unsigned int duration, u64 *cookie) +{ + s32 target_channel; + u32 id; + struct ether_addr primary_mac; + struct net_device *ndev = NULL; + + s32 err = BCME_OK; + struct wl_priv *wl = wiphy_priv(wiphy); + WL_DBG(("Enter, netdev_ifidx: %d \n", dev->ifindex)); + + if (wl->p2p_net == dev) { + ndev = wl_to_prmry_ndev(wl); + } else { + ndev = dev; + } + + if (wl_get_drv_status(wl, SCANNING, ndev)) { + wl_notify_escan_complete(wl, ndev, true, true); + } + target_channel = ieee80211_frequency_to_channel(channel->center_freq); + memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel)); + wl->remain_on_chan_type = channel_type; + id = ++wl->last_roc_id; + if (id == 0) + id = ++wl->last_roc_id; + *cookie = id; + cfg80211_ready_on_channel(dev, *cookie, channel, + channel_type, duration, GFP_KERNEL); + if (wl->p2p && !wl->p2p->on) { + get_primary_mac(wl, &primary_mac); + wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr); + + /* In case of p2p_listen command, supplicant send remain_on_channel + * without turning on P2P + */ + + p2p_on(wl) = true; + err = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0); + + if (unlikely(err)) { + goto exit; + } + } + if (p2p_is_on(wl)) + wl_cfgp2p_discover_listen(wl, target_channel, duration); + + +exit: + return err; +} + +static s32 +wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, + u64 cookie) +{ + s32 err = 0; + WL_DBG((" enter ) netdev_ifidx: %d \n", dev->ifindex)); + return err; +} + +static s32 +wl_cfg80211_send_pending_tx_act_frm(struct wl_priv *wl) +{ + wl_af_params_t *tx_act_frm; + struct net_device *dev = wl->afx_hdl->dev; + if (!p2p_is_on(wl)) + return -1; + + if (dev == wl->p2p_net) { + dev = wl_to_prmry_ndev(wl); + } + + tx_act_frm = wl->afx_hdl->pending_tx_act_frm; + WL_DBG(("Sending the action frame\n")); + wl->afx_hdl->pending_tx_act_frm = NULL; + if (tx_act_frm != NULL) { + /* Suspend P2P discovery's search-listen to prevent it from + * starting a scan or changing the channel. + */ + wl_clr_drv_status(wl, SENDING_ACT_FRM, wl->afx_hdl->dev); + wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); + wl_notify_escan_complete(wl, dev, true, true); + wl_cfgp2p_discover_enable_search(wl, false); + tx_act_frm->channel = wl->afx_hdl->peer_chan; + wl->afx_hdl->ack_recv = (wl_cfgp2p_tx_action_frame(wl, dev, + tx_act_frm, wl->afx_hdl->bssidx)) ? false : true; + } + return 0; +} +static void +wl_cfg80211_afx_handler(struct work_struct *work) +{ + + struct afx_hdl *afx_instance; + struct wl_priv *wl = wlcfg_drv_priv; + afx_instance = container_of(work, struct afx_hdl, work); + if (afx_instance != NULL) { + wl_cfgp2p_act_frm_search(wl, wl->afx_hdl->dev, + wl->afx_hdl->bssidx, 0); + } +} + +static bool +wl_cfg80211_send_at_common_channel(struct wl_priv *wl, + struct net_device *dev, + wl_af_params_t *af_params) +{ + WL_DBG((" enter ) \n")); + /* initialize afx_hdl */ + wl->afx_hdl->pending_tx_act_frm = af_params; + wl->afx_hdl->bssidx = wl_cfgp2p_find_idx(wl, dev); + wl->afx_hdl->dev = dev; + wl->afx_hdl->retry = 0; + wl->afx_hdl->peer_chan = WL_INVALID; + wl->afx_hdl->ack_recv = false; + memcpy(wl->afx_hdl->pending_tx_dst_addr.octet, + af_params->action_frame.da.octet, + sizeof(wl->afx_hdl->pending_tx_dst_addr.octet)); + /* Loop to wait until we have sent the pending tx action frame or the + * pending action frame tx is cancelled. + */ + while ((wl->afx_hdl->retry < WL_CHANNEL_SYNC_RETRY) && + (wl->afx_hdl->peer_chan == WL_INVALID)) { + wl_set_drv_status(wl, SENDING_ACT_FRM, dev); + wl_set_drv_status(wl, SCANNING, dev); + WL_DBG(("Scheduling the action frame for sending.. retry %d\n", + wl->afx_hdl->retry)); + /* Do find_peer_for_action */ + schedule_work(&wl->afx_hdl->work); + wait_for_completion(&wl->act_frm_scan); + wl->afx_hdl->retry++; + } + if (wl->afx_hdl->peer_chan != WL_INVALID) + wl_cfg80211_send_pending_tx_act_frm(wl); + else { + WL_ERR(("Couldn't find the peer " MACSTR " after %d retries\n", + MAC2STR(wl->afx_hdl->pending_tx_dst_addr.octet), wl->afx_hdl->retry)); + } + wl->afx_hdl->dev = NULL; + wl->afx_hdl->bssidx = WL_INVALID; + wl_clr_drv_status(wl, SENDING_ACT_FRM, dev); + if (wl->afx_hdl->ack_recv) + return true; /* ACK */ + else + return false; /* NO ACK */ +} + +static s32 +wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, + struct ieee80211_channel *channel, bool offchan, + enum nl80211_channel_type channel_type, + bool channel_type_valid, unsigned int wait, + const u8* buf, size_t len, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) + bool no_cck, +#endif + u64 *cookie) +{ + wl_action_frame_t *action_frame; + wl_af_params_t *af_params; + wifi_p2p_ie_t *p2p_ie; + wpa_ie_fixed_t *wps_ie; + scb_val_t scb_val; + wifi_wfd_ie_t *wfd_ie; + const struct ieee80211_mgmt *mgmt; + struct wl_priv *wl = wiphy_priv(wiphy); + struct net_device *dev = NULL; + s32 err = BCME_OK; + s32 bssidx = 0; + u32 p2pie_len = 0; + u32 wpsie_len = 0; + u32 wfdie_len = 0; + u32 id; + u32 retry = 0; + bool ack = false; + wifi_p2p_pub_act_frame_t *act_frm = NULL; + wifi_p2p_action_frame_t *p2p_act_frm = NULL; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; + s8 eabuf[ETHER_ADDR_STR_LEN]; + int retry_cnt = 0; + + WL_DBG(("Enter \n")); + + if (ndev == wl->p2p_net) { + dev = wl_to_prmry_ndev(wl); + } else { + /* If TX req is for any valid ifidx. Use as is */ + dev = ndev; + } + + /* find bssidx based on ndev */ + bssidx = wl_cfgp2p_find_idx(wl, dev); + if (bssidx == -1) { + + WL_ERR(("Can not find the bssidx for dev( %p )\n", dev)); + return -ENODEV; + } + if (p2p_is_on(wl)) { + /* Suspend P2P discovery search-listen to prevent it from changing the + * channel. + */ + if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + return -EFAULT; + } + } + *cookie = 0; + id = wl->send_action_id++; + if (id == 0) + id = wl->send_action_id++; + *cookie = id; + mgmt = (const struct ieee80211_mgmt *)buf; + if (ieee80211_is_mgmt(mgmt->frame_control)) { + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + s32 ie_len = len - ie_offset; + if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)(buf + ie_offset), ie_len)) + != NULL) { + /* Total length of P2P Information Element */ + p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id); + } + if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)(buf + ie_offset), ie_len)) + != NULL) { + /* Total length of WFD Information Element */ + wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); + } + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len)) + != NULL) { + /* Order of Vendor IE is 1) WPS IE + + * 2) P2P IE created by supplicant + * So, it is ok to find start address of WPS IE + * to save IEs + */ + wpsie_len = wps_ie->length + sizeof(wps_ie->length) + + sizeof(wps_ie->tag); + wl_cfgp2p_set_management_ie(wl, dev, bssidx, + VNDR_IE_PRBRSP_FLAG, + (u8 *)wps_ie, wpsie_len + p2pie_len+ wfdie_len); + } + cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL); + goto exit; + } else if (ieee80211_is_disassoc(mgmt->frame_control) || + ieee80211_is_deauth(mgmt->frame_control)) { + memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN); + scb_val.val = mgmt->u.disassoc.reason_code; + wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, + sizeof(scb_val_t), true); + WL_DBG(("Disconnect STA : %s scb_val.val %d\n", + bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf), + scb_val.val)); + /* Wait for the deauth event to come, supplicant will do the + * delete iface immediately and we will have problem in sending + * deauth frame if we delete the bss in firmware + */ + wl_delay(400); + cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL); + goto exit; + + } else if (ieee80211_is_action(mgmt->frame_control)) { + /* Abort the dwell time of any previous off-channel + * action frame that may be still in effect. Sending + * off-channel action frames relies on the driver's + * scan engine. If a previous off-channel action frame + * tx is still in progress (including the dwell time), + * then this new action frame will not be sent out. + */ + wl_notify_escan_complete(wl, dev, true, true); + + } + + } else { + WL_ERR(("Driver only allows MGMT packet type\n")); + goto exit; + } + + af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL); + + if (af_params == NULL) + { + WL_ERR(("unable to allocate frame\n")); + return -ENOMEM; + } + + action_frame = &af_params->action_frame; + + /* Add the packet Id */ + action_frame->packetId = *cookie; + WL_DBG(("action frame %d\n", action_frame->packetId)); + /* Add BSSID */ + memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN); + memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN); + + /* Add the length exepted for 802.11 header */ + action_frame->len = len - DOT11_MGMT_HDR_LEN; + WL_DBG(("action_frame->len: %d\n", action_frame->len)); + + /* Add the channel */ + af_params->channel = + ieee80211_frequency_to_channel(channel->center_freq); + + if (channel->band == IEEE80211_BAND_5GHZ) { + WL_DBG(("5GHz channel %d", af_params->channel)); + err = wldev_ioctl(dev, WLC_SET_CHANNEL, + &af_params->channel, sizeof(af_params->channel), true); + if (err < 0) { + WL_ERR(("WLC_SET_CHANNEL error %d\n", err)); + } + } + + /* Add the dwell time + * Dwell time to stay off-channel to wait for a response action frame + * after transmitting an GO Negotiation action frame + */ + af_params->dwell_time = WL_DWELL_TIME; + + memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len); + if (wl_cfgp2p_is_pub_action(action_frame->data, action_frame->len)) { + act_frm = (wifi_p2p_pub_act_frame_t *) (action_frame->data); + WL_DBG(("P2P PUB action_frame->len: %d chan %d category %d subtype %d\n", + action_frame->len, af_params->channel, + act_frm->category, act_frm->subtype)); + if (act_frm && ((act_frm->subtype == P2P_PAF_GON_REQ) || + (act_frm->subtype == P2P_PAF_GON_RSP) || + (act_frm->subtype == P2P_PAF_GON_CONF) || + (act_frm->subtype == P2P_PAF_PROVDIS_REQ))) { + wldev_iovar_setint(dev, "mpc", 0); + } + + if (act_frm->subtype == P2P_PAF_GON_REQ) { + WL_DBG(("P2P: GO_NEG_PHASE status set \n")); + wl_set_p2p_status(wl, GO_NEG_PHASE); + } else if (act_frm->subtype == P2P_PAF_GON_CONF) { + /* If we reached till GO Neg confirmation + * reset the filter + */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(wl, GO_NEG_PHASE); + } + + if (act_frm->subtype == P2P_PAF_GON_RSP) + retry_cnt = 1; + else retry_cnt = WL_ACT_FRAME_RETRY; + + if (act_frm && act_frm->subtype == P2P_PAF_DEVDIS_REQ) { + af_params->dwell_time = WL_LONG_DWELL_TIME; + } else if (act_frm && + (act_frm->subtype == P2P_PAF_PROVDIS_REQ || + act_frm->subtype == P2P_PAF_PROVDIS_RSP || + act_frm->subtype == P2P_PAF_GON_RSP)) { + af_params->dwell_time = WL_MED_DWELL_TIME; + } + } else if (wl_cfgp2p_is_p2p_action(action_frame->data, action_frame->len)) { + p2p_act_frm = (wifi_p2p_action_frame_t *) (action_frame->data); + WL_DBG(("P2P action_frame->len: %d chan %d category %d subtype %d\n", + action_frame->len, af_params->channel, + p2p_act_frm->category, p2p_act_frm->subtype)); + } else if (wl_cfgp2p_is_gas_action(action_frame->data, action_frame->len)) { + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) (action_frame->data); + WL_DBG(("Service Discovery action_frame->len: %d chan %d category %d action %d\n", + action_frame->len, af_params->channel, + sd_act_frm->category, sd_act_frm->action)); + af_params->dwell_time = WL_MED_DWELL_TIME; + retry_cnt = WL_ACT_FRAME_RETRY; + } + wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len); + /* + * To make sure to send successfully action frame, we have to turn off mpc + */ + if (IS_P2P_SOCIAL(af_params->channel) && + (IS_P2P_PUB_ACT_REQ(act_frm, &act_frm->elts[0], action_frame->len) || + IS_GAS_REQ(sd_act_frm, action_frame->len)) && + wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) { + /* channel offload require P2P IE for Probe request + * otherwise, we will use wl_cfgp2p_tx_action_frame directly. + * channel offload for action request frame + */ + + /* channel offload for action request frame */ + ack = wl_cfg80211_send_at_common_channel(wl, dev, af_params); + /* We need to retry Service discovery frames as they don't get retried immediately by supplicant*/ + if ((!ack) && (IS_GAS_REQ(sd_act_frm, action_frame->len))) { + for (retry = 1; retry < retry_cnt; retry++) { + WL_DBG(("Service Discovery action_frame retry %d len: %d chan %d category %d action %d\n", + retry, action_frame->len, af_params->channel, + sd_act_frm->category, sd_act_frm->action)); + ack = (wl_cfgp2p_tx_action_frame(wl, dev, + af_params, bssidx)) ? false : true; + if (ack) + break; + } + } + } else { + ack = (wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx)) ? false : true; + if (!ack) { + for (retry = 1; retry < retry_cnt; retry++) { + ack = (wl_cfgp2p_tx_action_frame(wl, dev, + af_params, bssidx)) ? false : true; + if (ack) + break; + } + } + + } + cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, ack, GFP_KERNEL); + if (act_frm && act_frm->subtype == P2P_PAF_GON_CONF) { + wldev_iovar_setint(dev, "mpc", 1); + } + kfree(af_params); +exit: + return err; +} + + +static void +wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev, + u16 frame_type, bool reg) +{ + + WL_DBG(("%s: frame_type: %x, reg: %d\n", __func__, frame_type, reg)); + + if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) + return; + + return; +} + + +static s32 +wl_cfg80211_change_bss(struct wiphy *wiphy, + struct net_device *dev, + struct bss_parameters *params) +{ + if (params->use_cts_prot >= 0) { + } + + if (params->use_short_preamble >= 0) { + } + + if (params->use_short_slot_time >= 0) { + } + + if (params->basic_rates) { + } + + if (params->ap_isolate >= 0) { + } + + if (params->ht_opmode >= 0) { + } + + return 0; +} + +static s32 +wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + s32 channel; + s32 err = BCME_OK; + struct wl_priv *wl = wiphy_priv(wiphy); + + if (wl->p2p_net == dev) { + dev = wl_to_prmry_ndev(wl); + } + channel = ieee80211_frequency_to_channel(chan->center_freq); + + if (wl_get_drv_status(wl, AP_CREATING, dev)) { + WL_TRACE(("<0> %s: as!!! in AP creating mode, save chan num:%d\n", + __FUNCTION__, channel)); + wl->hostapd_chan = channel; + if (channel == 14) + return err; /* hostapd requested ch auto-select, will be done later */ + } + + WL_DBG(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n", + dev->ifindex, channel_type, channel)); + err = wldev_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel), true); + if (err < 0) { + WL_ERR(("WLC_SET_CHANNEL error %d chip may not be supporting this channel\n", err)); + } + return err; +} + +static s32 +wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) +{ + s32 len = 0; + s32 err = BCME_OK; + u16 auth = WL_AUTH_OPEN_SYSTEM; /* d11 open authentication */ + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + u8* tmp; + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + if (wpa2ie == NULL) + goto exit; + + WL_DBG(("Enter \n")); + len = wpa2ie->len; + /* check the mcast cipher */ + mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN]; + tmp = mcast->oui; + switch (tmp[DOT11_OUI_LEN]) { + case WPA_CIPHER_NONE: + gval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + gval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + gval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + gval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + break; + } + len -= WPA_SUITE_LEN; + /* check the unicast cipher */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + ltoh16_ua(&ucast->count); + tmp = ucast->list[0].oui; + switch (tmp[DOT11_OUI_LEN]) { + case WPA_CIPHER_NONE: + pval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + pval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + pval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + pval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* check the AKM */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[1]; + ltoh16_ua(&mgmt->count); + tmp = (u8 *)&mgmt->list[0]; + switch (tmp[DOT11_OUI_LEN]) { + case RSN_AKM_NONE: + wpa_auth = WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + wpa_auth = WPA2_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + wpa_auth = WPA2_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + +static s32 +wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx) +{ + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + u16 auth = WL_AUTH_OPEN_SYSTEM; /* d11 open authentication */ + u16 count; + s32 err = BCME_OK; + s32 len = 0; + u32 i; + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + u32 tmp = 0; + + if (wpaie == NULL) + goto exit; + WL_DBG(("Enter \n")); + len = wpaie->length; /* value length */ + len -= WPA_IE_TAG_FIXED_LEN; + /* check for multicast cipher suite */ + if (len < WPA_SUITE_LEN) { + WL_INFO(("no multicast cipher suite\n")); + goto exit; + } + + /* pick up multicast cipher */ + mcast = (wpa_suite_mcast_t *)&wpaie[1]; + len -= WPA_SUITE_LEN; + if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(mcast->type)) { + tmp = 0; + switch (mcast->type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + gval |= tmp; + } + } + /* Check for unicast suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFO(("no unicast suite\n")); + goto exit; + } + /* walk thru unicast cipher list and pick up what we recognize */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + count = ltoh16_ua(&ucast->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(ucast->list[i].type)) { + tmp = 0; + switch (ucast->list[i].type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + pval |= tmp; + } + } + } + len -= (count - i) * WPA_SUITE_LEN; + /* Check for auth key management suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFO((" no auth key mgmt suite\n")); + goto exit; + } + /* walk thru auth management suite list and pick up what we recognize */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count]; + count = ltoh16_ua(&mgmt->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_AKM(mgmt->list[i].type)) { + tmp = 0; + switch (mgmt->list[i].type) { + case RSN_AKM_NONE: + tmp = WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + tmp = WPA_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + tmp = WPA_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + wpa_auth |= tmp; + } + } + + } + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + +static s32 +wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info) +{ + s32 err = BCME_OK; + bcm_tlv_t *ssid_ie; + wlc_ssid_t ssid; + struct wl_priv *wl = wiphy_priv(wiphy); + struct wl_join_params join_params; + wpa_ie_fixed_t *wps_ie; + wpa_ie_fixed_t *wpa_ie; + bcm_tlv_t *wpa2_ie; + wifi_p2p_ie_t *p2p_ie; + wifi_wfd_ie_t *wfd_ie; + bool is_bssup = false; + bool update_bss = false; + bool pbc = false; + u16 wpsie_len = 0; + u16 p2pie_len = 0; + u32 wfdie_len = 0; + u8 beacon_ie[IE_MAX_LEN]; + s32 ie_offset = 0; + s32 bssidx = 0; + s32 infra = 1; + s32 join_params_size = 0; + s32 ap = 0; + WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n", + info->interval, info->dtim_period, info->head_len, info->tail_len)); + + if (wl->p2p_net == dev) { + dev = wl_to_prmry_ndev(wl); + } + + bssidx = wl_cfgp2p_find_idx(wl, dev); + if (p2p_is_on(wl) && + (bssidx == wl_to_p2p_bss_bssidx(wl, + P2PAPI_BSSCFG_CONNECTION))) { + memset(beacon_ie, 0, sizeof(beacon_ie)); + /* We don't need to set beacon for P2P_GO, + * but need to parse ssid from beacon_parameters + * because there is no way to set ssid + */ + ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + /* find the SSID */ + if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], + info->head_len - ie_offset, + DOT11_MNG_SSID_ID)) != NULL) { + memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len); + wl->p2p->ssid.SSID_len = ssid_ie->len; + WL_DBG(("SSID (%s) in Head \n", ssid_ie->data)); + + } else { + WL_ERR(("No SSID in beacon \n")); + } + + /* find the WPSIE */ + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) { + wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + /* + * Should be compared with saved ie before saving it + */ + wl_validate_wps_ie((char *) wps_ie, &pbc); + memcpy(beacon_ie, wps_ie, wpsie_len); + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + + + /* find the P2PIE */ + if ((p2p_ie = wl_cfgp2p_find_p2pie((u8 *)info->tail, info->tail_len)) != NULL) { + /* Total length of P2P Information Element */ + p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id); + memcpy(&beacon_ie[wpsie_len], p2p_ie, p2pie_len); + + } else { + WL_ERR(("No P2PIE in beacon \n")); + } + /* find the WFD IEs */ + if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)info->tail, info->tail_len)) != NULL) { + /* Total length of P2P Information Element */ + wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); + if ((wpsie_len + p2pie_len + wfdie_len) < IE_MAX_LEN) { + memcpy(&beacon_ie[wpsie_len + p2pie_len], wfd_ie, wfdie_len); + } else { + WL_ERR(("Found WFD IE but there is no space, (%d)(%d)(%d)\n", + wpsie_len, p2pie_len, wfdie_len)); + wfdie_len = 0; + } + } else { + WL_ERR(("No WFDIE in beacon \n")); + } + /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); + wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, + beacon_ie, wpsie_len + p2pie_len + wfdie_len); + + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + is_bssup = wl_cfgp2p_bss_isup(dev, bssidx); + + if (!is_bssup && (wpa2_ie != NULL)) { + wldev_iovar_setint(dev, "mpc", 0); + if ((err = wl_validate_wpa2ie(dev, wpa2_ie, bssidx)) < 0) { + WL_ERR(("WPA2 IE parsing error")); + goto exit; + } + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid, + sizeof(wl->p2p->ssid), wl->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &wl->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GO SSID setting error %d\n", err)); + goto exit; + } + if ((err = wl_cfgp2p_bss(wl, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } + } + } else if (wl_get_drv_status(wl, AP_CREATING, dev)) { + ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + ap = 1; + /* find the SSID */ + if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], + info->head_len - ie_offset, + DOT11_MNG_SSID_ID)) != NULL) { + memset(&ssid, 0, sizeof(wlc_ssid_t)); + memcpy(ssid.SSID, ssid_ie->data, ssid_ie->len); + WL_DBG(("SSID is (%s) in Head \n", ssid.SSID)); + ssid.SSID_len = ssid_ie->len; + wldev_iovar_setint(dev, "mpc", 0); + wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); + wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + return err; + } + + /* if requested, do softap ch autoselect */ + if (wl->hostapd_chan == 14) { + int auto_chan; + if ((err = wldev_get_auto_channel(dev, &auto_chan)) != 0) { + WL_ERR(("softap: auto chan select failed," + " will use ch 6\n")); + auto_chan = 6; + } else { + printf("<0>softap: got auto ch:%d\n", auto_chan); + } + err = wldev_ioctl(dev, WLC_SET_CHANNEL, + &auto_chan, sizeof(auto_chan), true); + if (err < 0) { + WL_ERR(("softap: WLC_SET_CHANNEL error %d chip" + " may not be supporting this channel\n", err)); + return err; + } + } + + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + /* find the WPA_IE */ + if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail, + info->tail_len)) != NULL) { + WL_DBG((" WPA IE is found\n")); + } + if ((wpa_ie != NULL || wpa2_ie != NULL)) { + if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) { + wl->ap_info->security_mode = false; + return BCME_ERROR; + } + wl->ap_info->security_mode = true; + if (wl->ap_info->rsn_ie) { + kfree(wl->ap_info->rsn_ie); + wl->ap_info->rsn_ie = NULL; + } + if (wl->ap_info->wpa_ie) { + kfree(wl->ap_info->wpa_ie); + wl->ap_info->wpa_ie = NULL; + } + if (wl->ap_info->wps_ie) { + kfree(wl->ap_info->wps_ie); + wl->ap_info->wps_ie = NULL; + } + if (wpa_ie != NULL) { + /* WPAIE */ + wl->ap_info->rsn_ie = NULL; + wl->ap_info->wpa_ie = kmemdup(wpa_ie, + wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else { + /* RSNIE */ + wl->ap_info->wpa_ie = NULL; + wl->ap_info->rsn_ie = kmemdup(wpa2_ie, + wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + } else + wl->ap_info->security_mode = false; + /* find the WPSIE */ + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, + info->tail_len)) != NULL) { + wpsie_len = wps_ie->length +WPA_RSN_IE_TAG_FIXED_LEN; + /* + * Should be compared with saved ie before saving it + */ + wl_validate_wps_ie((char *) wps_ie, &pbc); + memcpy(beacon_ie, wps_ie, wpsie_len); + wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, + beacon_ie, wpsie_len); + wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); + /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); + } else { + WL_DBG(("No WPSIE in beacon \n")); + } + if (info->interval) { + if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, + &info->interval, sizeof(s32), true)) < 0) { + WL_ERR(("Beacon Interval Set Error, %d\n", err)); + return err; + } + } + if (info->dtim_period) { + if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, + &info->dtim_period, sizeof(s32), true)) < 0) { + WL_ERR(("DTIM Interval Set Error, %d\n", err)); + return err; + } + } + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + return err; + } + memset(&join_params, 0, sizeof(join_params)); + /* join parameters starts with ssid */ + join_params_size = sizeof(join_params.ssid); + memcpy(join_params.ssid.SSID, ssid.SSID, ssid.SSID_len); + join_params.ssid.SSID_len = htod32(ssid.SSID_len); + /* create softap */ + if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, + join_params_size, true)) == 0) { + wl_clr_drv_status(wl, AP_CREATING, dev); + wl_set_drv_status(wl, AP_CREATED, dev); + } + } + } else if (wl_get_drv_status(wl, AP_CREATED, dev)) { + ap = 1; + /* find the WPSIE */ + if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)info->tail, info->tail_len)) != NULL) { + wpsie_len = wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + /* + * Should be compared with saved ie before saving it + */ + wl_validate_wps_ie((char *) wps_ie, &pbc); + memcpy(beacon_ie, wps_ie, wpsie_len); + wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, + beacon_ie, wpsie_len); + if (wl->ap_info->wps_ie && + memcmp(wl->ap_info->wps_ie, wps_ie, wpsie_len)) { + WL_DBG((" WPS IE is changed\n")); + kfree(wl->ap_info->wps_ie); + wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); + /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); + } else if (wl->ap_info->wps_ie == NULL) { + WL_DBG((" WPS IE is added\n")); + wl->ap_info->wps_ie = kmemdup(wps_ie, wpsie_len, GFP_KERNEL); + /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); + } + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + /* find the WPA_IE */ + if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)info->tail, + info->tail_len)) != NULL) { + WL_DBG((" WPA IE is found\n")); + } + if ((wpa_ie != NULL || wpa2_ie != NULL)) { + if (!wl->ap_info->security_mode) { + /* change from open mode to security mode */ + update_bss = true; + if (wpa_ie != NULL) { + wl->ap_info->wpa_ie = kmemdup(wpa_ie, + wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else { + wl->ap_info->rsn_ie = kmemdup(wpa2_ie, + wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + } else if (wl->ap_info->wpa_ie) { + /* change from WPA mode to WPA2 mode */ + if (wpa2_ie != NULL) { + update_bss = true; + kfree(wl->ap_info->wpa_ie); + wl->ap_info->rsn_ie = kmemdup(wpa2_ie, + wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + wl->ap_info->wpa_ie = NULL; + } + else if (memcmp(wl->ap_info->wpa_ie, + wpa_ie, wpa_ie->length + + WPA_RSN_IE_TAG_FIXED_LEN)) { + kfree(wl->ap_info->wpa_ie); + update_bss = true; + wl->ap_info->wpa_ie = kmemdup(wpa_ie, + wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + wl->ap_info->rsn_ie = NULL; + } + } else { + /* change from WPA2 mode to WPA mode */ + if (wpa_ie != NULL) { + update_bss = true; + kfree(wl->ap_info->rsn_ie); + wl->ap_info->rsn_ie = NULL; + wl->ap_info->wpa_ie = kmemdup(wpa_ie, + wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else if (memcmp(wl->ap_info->rsn_ie, + wpa2_ie, wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) { + update_bss = true; + kfree(wl->ap_info->rsn_ie); + wl->ap_info->rsn_ie = kmemdup(wpa2_ie, + wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + wl->ap_info->wpa_ie = NULL; + } + } + if (update_bss) { + wl->ap_info->security_mode = true; + wl_cfgp2p_bss(wl, dev, bssidx, 0); + if (wl_validate_wpa2ie(dev, wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, wpa_ie, bssidx) < 0) { + return BCME_ERROR; + } + wl_cfgp2p_bss(wl, dev, bssidx, 1); + } + } + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + } +exit: + if (err) + wldev_iovar_setint(dev, "mpc", 1); + return err; +} + +#ifdef WL_SCHED_SCAN +#define PNO_TIME 30 +#define PNO_REPEAT 4 +#define PNO_FREQ_EXPO_MAX 2 +int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + ushort pno_time = PNO_TIME; + int pno_repeat = PNO_REPEAT; + int pno_freq_expo_max = PNO_FREQ_EXPO_MAX; + wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + struct wl_priv *wl = wiphy_priv(wiphy); + struct cfg80211_ssid *ssid = NULL; + int ssid_count = 0; + int i; + int ret = 0; + + WL_DBG(("Enter n_match_sets:%d n_ssids:%d \n", + request->n_match_sets, request->n_ssids)); + WL_DBG(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", + request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); + +#if defined(WL_ENABLE_P2P_IF) + /* While GO is operational, PNO is not supported */ + if (dhd_cfg80211_get_opmode(wl) & P2P_GO_ENABLED) { + WL_DBG(("PNO not enabled! op_mode: P2P GO")); + return -1; + } +#endif + + if (!request || !request->n_ssids || !request->n_match_sets) { + WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids)); + return -EINVAL; + } + + memset(&ssids_local, 0, sizeof(ssids_local)); + + if (request->n_match_sets > 0) { + for (i = 0; i < request->n_match_sets; i++) { + ssid = &request->match_sets[i].ssid; + memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len); + ssids_local[i].SSID_len = ssid->ssid_len; + WL_DBG((">>> PNO filter set for ssid (%s) \n", ssid->ssid)); + ssid_count++; + } + } + + if (request->n_ssids > 0) { + for (i = 0; i < request->n_ssids; i++) { + /* Active scan req for ssids */ + WL_DBG((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid)); + + /* match_set ssids is a supert set of n_ssid list, so we need + * not add these set seperately + */ + } + } + + if (ssid_count) { + if ((ret = dhd_dev_pno_set(dev, ssids_local, request->n_match_sets, + pno_time, pno_repeat, pno_freq_expo_max)) < 0) { + WL_ERR(("PNO setup failed!! ret=%d \n", ret)); + return -EINVAL; + } + + /* Enable the PNO */ + if (dhd_dev_pno_enable(dev, 1) < 0) { + WL_ERR(("PNO enable failed!! ret=%d \n", ret)); + return -EINVAL; + } + wl->sched_scan_req = request; + } else { + return -EINVAL; + } + + return 0; +} + +int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) +{ + struct wl_priv *wl = wiphy_priv(wiphy); + + WL_DBG(("Enter \n")); + + if (dhd_dev_pno_enable(dev, 0) < 0) + WL_ERR(("PNO disable failed")); + + if (dhd_dev_pno_reset(dev) < 0) + WL_ERR(("PNO reset failed")); + + if (wl->scan_request && wl->sched_scan_running) { + wl_notify_escan_complete(wl, dev, true, true); + } + + wl->sched_scan_req = NULL; + wl->sched_scan_running = FALSE; + + return 0; +} +#endif /* WL_SCHED_SCAN */ + +static struct cfg80211_ops wl_cfg80211_ops = { + .add_virtual_intf = wl_cfg80211_add_virtual_iface, + .del_virtual_intf = wl_cfg80211_del_virtual_iface, + .change_virtual_intf = wl_cfg80211_change_virtual_iface, + .scan = wl_cfg80211_scan, + .set_wiphy_params = wl_cfg80211_set_wiphy_params, + .join_ibss = wl_cfg80211_join_ibss, + .leave_ibss = wl_cfg80211_leave_ibss, + .get_station = wl_cfg80211_get_station, + .set_tx_power = wl_cfg80211_set_tx_power, + .get_tx_power = wl_cfg80211_get_tx_power, + .add_key = wl_cfg80211_add_key, + .del_key = wl_cfg80211_del_key, + .get_key = wl_cfg80211_get_key, + .set_default_key = wl_cfg80211_config_default_key, + .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key, + .set_power_mgmt = wl_cfg80211_set_power_mgmt, + .connect = wl_cfg80211_connect, + .disconnect = wl_cfg80211_disconnect, + .suspend = wl_cfg80211_suspend, + .resume = wl_cfg80211_resume, + .set_pmksa = wl_cfg80211_set_pmksa, + .del_pmksa = wl_cfg80211_del_pmksa, + .flush_pmksa = wl_cfg80211_flush_pmksa, + .remain_on_channel = wl_cfg80211_remain_on_channel, + .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel, + .mgmt_tx = wl_cfg80211_mgmt_tx, + .mgmt_frame_register = wl_cfg80211_mgmt_frame_register, + .change_bss = wl_cfg80211_change_bss, + .set_channel = wl_cfg80211_set_channel, + .set_beacon = wl_cfg80211_add_set_beacon, + .add_beacon = wl_cfg80211_add_set_beacon, +#ifdef WL_SCHED_SCAN + .sched_scan_start = wl_cfg80211_sched_scan_start, + .sched_scan_stop = wl_cfg80211_sched_scan_stop, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) */ +}; + +s32 wl_mode_to_nl80211_iftype(s32 mode) +{ + s32 err = 0; + + switch (mode) { + case WL_MODE_BSS: + return NL80211_IFTYPE_STATION; + case WL_MODE_IBSS: + return NL80211_IFTYPE_ADHOC; + case WL_MODE_AP: + return NL80211_IFTYPE_AP; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } + + return err; +} + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev) +{ + s32 err = 0; + wdev->wiphy = + wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv)); + if (unlikely(!wdev->wiphy)) { + WL_ERR(("Couldn not allocate wiphy device\n")); + err = -ENOMEM; + return err; + } + set_wiphy_dev(wdev->wiphy, sdiofunc_dev); + wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX; + /* Report how many SSIDs Driver can support per Scan request */ + wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX; + wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; +#ifdef WL_SCHED_SCAN + wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX; + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; +#endif /* WL_SCHED_SCAN */ + wdev->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR); + + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; + /* wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; - set in runtime */ + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + wdev->wiphy->cipher_suites = __wl_cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); + wdev->wiphy->max_remain_on_channel_duration = 5000; + wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes; +#ifndef WL_POWERSAVE_DISABLED + wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; +#else + wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; +#endif /* !WL_POWERSAVE_DISABLED */ + wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_4ADDR_AP | +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39) + WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS | +#endif + WIPHY_FLAG_4ADDR_STATION; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif + WL_DBG(("Registering custom regulatory)\n")); + wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); + /* Now we can register wiphy with cfg80211 module */ + err = wiphy_register(wdev->wiphy); + if (unlikely(err < 0)) { + WL_ERR(("Couldn not register wiphy device (%d)\n", err)); + wiphy_free(wdev->wiphy); + } + return err; +} + +static void wl_free_wdev(struct wl_priv *wl) +{ + struct wireless_dev *wdev = wl->wdev; + struct wiphy *wiphy; + if (!wdev) { + WL_ERR(("wdev is invalid\n")); + return; + } + wiphy = wdev->wiphy; + wiphy_unregister(wdev->wiphy); + wdev->wiphy->dev.parent = NULL; + + wl_delete_all_netinfo(wl); + wiphy_free(wiphy); + /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "wl", + * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!! + */ +} + +static s32 wl_inform_bss(struct wl_priv *wl) +{ + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; /* must be initialized */ + s32 err = 0; + s32 i; + + bss_list = wl->bss_list; + WL_DBG(("scanned AP count (%d)\n", bss_list->count)); + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + err = wl_inform_single_bss(wl, bi); + if (unlikely(err)) + break; + } + return err; +} + +static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) +{ + struct wiphy *wiphy = wiphy_from_scan(wl); + struct ieee80211_mgmt *mgmt; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *band; + struct wl_cfg80211_bss_info *notif_bss_info; + struct wl_scan_req *sr = wl_to_sr(wl); + struct beacon_proberesp *beacon_proberesp; + struct cfg80211_bss *cbss = NULL; + s32 mgmt_type; + s32 signal; + u32 freq; + s32 err = 0; + + if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) { + WL_DBG(("Beacon is larger than buffer. Discarding\n")); + return err; + } + notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) + - sizeof(u8) + WL_BSS_INFO_MAX, GFP_KERNEL); + if (unlikely(!notif_bss_info)) { + WL_ERR(("notif_bss_info alloc failed\n")); + return -ENOMEM; + } + mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf; + notif_bss_info->channel = + bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec); + + if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + if (!band) { + WL_ERR(("No valid band")); + kfree(notif_bss_info); + return -EINVAL; + } + notif_bss_info->rssi = dtoh16(bi->RSSI); + memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN); + mgmt_type = (bi->flags & WL_BSS_FLAGS_FROM_BEACON) ? + IEEE80211_STYPE_BEACON : IEEE80211_STYPE_PROBE_RESP; + + if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) { + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type); + } + beacon_proberesp = wl->active_scan ? + (struct beacon_proberesp *)&mgmt->u.probe_resp : + (struct beacon_proberesp *)&mgmt->u.beacon; + beacon_proberesp->timestamp = 0; + beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period); + beacon_proberesp->capab_info = cpu_to_le16(bi->capability); + wl_rst_ie(wl); + + wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length); + wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX - + offsetof(struct wl_cfg80211_bss_info, frame_buf)); + notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, + u.beacon.variable) + wl_get_ielen(wl); +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) + freq = ieee80211_channel_to_frequency(notif_bss_info->channel); +#else + freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band); +#endif + channel = ieee80211_get_channel(wiphy, freq); + if (!channel) { + WL_ERR(("No valid channel: %u\n", freq)); + kfree(notif_bss_info); + return -EINVAL; + } + + WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM" + "mgmt_type %d frame_len %d\n", bi->SSID, + notif_bss_info->rssi, notif_bss_info->channel, + mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type, + notif_bss_info->frame_len)); + + signal = notif_bss_info->rssi * 100; + + if (!mgmt->u.probe_resp.timestamp) { + struct timespec ts; + + get_monotonic_boottime(&ts); + mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec * 1000000) + + ts.tv_nsec / 1000; + } + + cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, + le16_to_cpu(notif_bss_info->frame_len), signal, GFP_KERNEL); + if (unlikely(!cbss)) { + WL_ERR(("cfg80211_inform_bss_frame error\n")); + kfree(notif_bss_info); + return -EINVAL; + } + + cfg80211_put_bss(cbss); + kfree(notif_bss_info); + + return err; +} + +static s32 wl_inform_ibss(struct wl_priv *wl, const u8 *bssid) +{ + struct net_device *ndev = wl_to_prmry_ndev(wl); + struct wiphy *wiphy = wl_to_wiphy(wl); + struct wl_bss_info *bi = NULL; + struct ieee80211_channel *notify_channel; + struct ieee80211_supported_band *band; + struct cfg80211_bss *bss; + s32 err = 0; + u16 channel; + u32 freq; + u32 wsec = 0; + u16 notify_capability; + u16 notify_interval; + u8 *notify_ie; + size_t notify_ielen; + s32 notify_signal; + + WL_TRACE(("Enter\n")); + + if (wl->scan_request) { + wl_notify_escan_complete(wl, ndev, true, true); + } + + mutex_lock(&wl->usr_sync); + + *(u32 *)wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); + err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, wl->extra_buf, + WL_EXTRA_BUF_MAX, false); + if (err) { + WL_ERR(("Failed to get bss info for IBSS\n")); + err = -EIO; + goto CleanUp; + } + bi = (struct wl_bss_info *)(wl->extra_buf + 4); + + if (memcmp(bssid, &bi->BSSID, ETHER_ADDR_LEN)) { + WL_ERR(("BSSID mismatch: Inform %02x:%02x:%02x:%02x:%02x:%02x," + "%02x:%02x:%02x:%02x:%02x:%02x\n", + bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5], + bi->BSSID.octet[0], bi->BSSID.octet[1], bi->BSSID.octet[2], + bi->BSSID.octet[3], bi->BSSID.octet[4], + bi->BSSID.octet[5])); + err = -EINVAL; + goto CleanUp; + } + + err = wldev_iovar_getint(ndev, "wsec", &wsec); + if (err) { + WL_ERR(("wsec failed: %d\n", err)); + err = -EIO; + goto CleanUp; + } + + channel = bi->ctl_ch ? bi->ctl_ch : + CHSPEC_CHANNEL(dtohchanspec(bi->chanspec)); + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) + freq = ieee80211_channel_to_frequency(channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + notify_channel = ieee80211_get_channel(wiphy, freq); + + notify_capability = dtoh16(bi->capability); + notify_interval = dtoh16(bi->beacon_period); + notify_ie = (u8 *)bi + dtoh16(bi->ie_offset); + notify_ielen = dtoh32(bi->ie_length); + notify_signal = (int16)dtoh16(bi->RSSI) * 100; + + if (wl->p2p_supported) { + notify_capability |= DOT11_CAP_IBSS; + if (wsec) + notify_capability |= DOT11_CAP_PRIVACY; + } + + WL_DBG(("BSSID %02x:%02x:%02x:%02x:%02x:%02x", + bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5])); + WL_INFO(("channel: %d(%d)\n", channel, freq)); + WL_INFO(("capability: %X\n", notify_capability)); + WL_INFO(("beacon interval: %d ms\n", notify_interval)); + WL_INFO(("signal: %d dBm\n", notify_signal)); + WL_INFO(("ie_len: %d\n", notify_ielen)); + bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, 0, + notify_capability, notify_interval, + notify_ie, notify_ielen, notify_signal, GFP_KERNEL); + if (!bss) { + WL_ERR(("cfg80211_inform_bss() Failed\n")); + err = -ENOMEM; + goto CleanUp; + } + + cfg80211_put_bss(bss); + err = 0; + +CleanUp: + + mutex_unlock(&wl->usr_sync); + + WL_TRACE(("Exit\n")); + return err; +} + +static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + u16 flags = ntoh16(e->flags); + + WL_DBG(("event %d, status %d flags %x\n", event, status, flags)); + if (event == WLC_E_SET_SSID) { + if (status == WLC_E_STATUS_SUCCESS) { + return true; + } + } else if (event == WLC_E_LINK) { + if (flags & WLC_EVENT_MSG_LINK) + if (!wl_is_ibssmode(wl, ndev)) + return true; + } + + WL_DBG(("wl_is_linkup false\n")); + return false; +} + +static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u16 flags = ntoh16(e->flags); + + if (event == WLC_E_DEAUTH_IND || + event == WLC_E_DISASSOC_IND || + event == WLC_E_DISASSOC || + event == WLC_E_DEAUTH) { + return true; + } else if (event == WLC_E_LINK) { + if (!(flags & WLC_EVENT_MSG_LINK)) + return true; + } + + return false; +} + +static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + + if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS) + return true; + if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) + return true; + + return false; +} + +/* The mainline kernel >= 3.2.0 has support for indicating new/del station + * to AP/P2P GO via events. If this change is backported to kernel for which + * this driver is being built, then define WL_CFG80211_STA_EVENT. You + * should use this new/del sta event mechanism for BRCM supplicant >= 22. + */ +static s32 +wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = ntoh32(e->event_type); + u32 reason = ntoh32(e->reason); + u32 len = ntoh32(e->datalen); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + bool isfree = false; + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + s32 freq; + s32 channel; + u8 body[WL_FRAME_LEN]; + u16 fc = 0; + struct ieee80211_supported_band *band; + struct ether_addr da; + struct ether_addr bssid; + struct wiphy *wiphy = wl_to_wiphy(wl); + channel_info_t ci; +#else + struct station_info sinfo; +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !WL_CFG80211_STA_EVENT */ + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + memset(body, 0, sizeof(body)); + memset(&bssid, 0, ETHER_ADDR_LEN); + WL_DBG(("Enter event %d ndev %p\n", event, ndev)); + if (wl_get_mode_by_netdev(wl, ndev) == WL_INVALID) + return WL_INVALID; + + if (len > WL_FRAME_LEN) { + WL_ERR(("Received frame length %d from dongle is greater than" + " allocated body buffer len %d", len, WL_FRAME_LEN)); + goto exit; + } + memcpy(body, data, len); + wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", + NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync); + memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN); + err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + switch (event) { + case WLC_E_ASSOC_IND: + fc = FC_ASSOC_REQ; + break; + case WLC_E_REASSOC_IND: + fc = FC_REASSOC_REQ; + break; + case WLC_E_DISASSOC_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH: + fc = FC_DISASSOC; + break; + default: + fc = 0; + goto exit; + } + if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) + return err; + + channel = dtoh32(ci.hw_channel); + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + if (!band) { + WL_ERR(("No valid band")); + return -EINVAL; + } +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) + freq = ieee80211_channel_to_frequency(channel); +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + + err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid, + &mgmt_frame, &len, body); + if (err < 0) + goto exit; + isfree = true; + + if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) { + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); + } else if (event == WLC_E_DISASSOC_IND) { + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); + } + +exit: + if (isfree) + kfree(mgmt_frame); + return err; +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */ + sinfo.filled = 0; + if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) && + reason == DOT11_SC_SUCCESS) { + sinfo.filled = STATION_INFO_ASSOC_REQ_IES; + if (!data) { + WL_ERR(("No IEs present in ASSOC/REASSOC_IND")); + return -EINVAL; + } + sinfo.assoc_req_ies = data; + sinfo.assoc_req_ies_len = len; + cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC); + } else if (event == WLC_E_DISASSOC_IND) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !WL_CFG80211_STA_EVENT */ + return err; +} + +static s32 +wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + bool act; + s32 err = 0; + u32 event = ntoh32(e->event_type); + u32 reason; + + if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) { + wl_notify_connect_status_ap(wl, ndev, e, data); + } else { + WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", + ntoh32(e->event_type), ntoh32(e->status), ndev)); + if((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DISASSOC_IND)) { + reason = ntoh32(e->reason); + wl->deauth_reason = reason; + WL_ERR(("Received %s event with reason code: %d\n", + (event == WLC_E_DEAUTH_IND)? + "WLC_E_DEAUTH_IND":"WLC_E_DISASSOC_IND", reason)); + } + if (wl_is_linkup(wl, e, ndev)) { + wl_link_up(wl); + act = true; + wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); + wl->deauth_reason = 0; + if (wl_is_ibssmode(wl, ndev)) { + wl_ibss_join_done(wl, ndev, e, data, true); + WL_DBG(("wl_ibss_join_done succeeded\n")); + } else { + if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) { + printk("wl_bss_connect_done succeeded\n"); + wl_bss_connect_done(wl, ndev, e, data, true); + WL_DBG(("joined in BSS network \"%s\"\n", + ((struct wlc_ssid *) + wl_read_prof(wl, ndev, WL_PROF_SSID))->SSID)); + } + } + } else if (wl_is_linkdown(wl, e)) { + if (wl->scan_request) { + if (wl->escan_on) { + wl_notify_escan_complete(wl, ndev, true, true); + } else { + del_timer_sync(&wl->scan_timeout); + wl_iscan_aborted(wl); + } + } + if (wl_get_drv_status(wl, CONNECTED, ndev)) { + scb_val_t scbval; + u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); + wl_clr_drv_status(wl, CONNECTED, ndev); + if (! wl_get_drv_status(wl, DISCONNECTING, ndev)) { + /* To make sure disconnect, explictly send dissassoc + * for BSSID 00:00:00:00:00:00 issue + */ + scbval.val = WLAN_REASON_DEAUTH_LEAVING; + + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + wldev_ioctl(ndev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + WL_ERR(("link down, calling cfg80211_disconnected" + " with deauth_reason:%d\n", wl->deauth_reason)); + if (!wl_is_ibssmode(wl, ndev)) + cfg80211_disconnected(ndev, wl->deauth_reason, + NULL, 0, GFP_KERNEL); + wl_link_down(wl); + wl_init_prof(wl, ndev); + } + } + else if (wl_get_drv_status(wl, CONNECTING, ndev)) { + printk("link down, during connecting\n"); + if (wl_is_ibssmode(wl, ndev)) + wl_ibss_join_done(wl, ndev, e, data, false); + else + wl_bss_connect_done(wl, ndev, e, data, false); + } + wl_clr_drv_status(wl, DISCONNECTING, ndev); + + } else if (wl_is_nonetwork(wl, e)) { + printk("connect failed event=%d e->status 0x%x\n", + event, (int)ntoh32(e->status)); + /* Clean up any pending scan request */ + if (wl->scan_request) { + if (wl->escan_on) { + wl_notify_escan_complete(wl, ndev, true, true); + } else { + del_timer_sync(&wl->scan_timeout); + wl_iscan_aborted(wl); + } + } + if (wl_get_drv_status(wl, CONNECTING, ndev)) + wl_bss_connect_done(wl, ndev, e, data, false); + } else { + printk("%s nothing\n", __FUNCTION__); + } + } + return err; +} + +static s32 +wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + bool act; + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + u32 status = be32_to_cpu(e->status); + WL_DBG(("Enter \n")); + if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) { + if (wl_get_drv_status(wl, CONNECTED, ndev)) + wl_bss_roaming_done(wl, ndev, e, data); + else + wl_bss_connect_done(wl, ndev, e, data, true); + act = true; + wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID); + } + return err; +} + +static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev) +{ + wl_assoc_info_t assoc_info; + struct wl_connect_info *conn_info = wl_to_conn(wl); + s32 err = 0; + + WL_DBG(("Enter \n")); + err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, wl->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc info (%d)\n", err)); + return err; + } + memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t)); + assoc_info.req_len = htod32(assoc_info.req_len); + assoc_info.resp_len = htod32(assoc_info.resp_len); + assoc_info.flags = htod32(assoc_info.flags); + if (conn_info->req_ie_len) { + conn_info->req_ie_len = 0; + bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); + } + if (conn_info->resp_ie_len) { + conn_info->resp_ie_len = 0; + bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); + } + if (assoc_info.req_len) { + err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, wl->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc req (%d)\n", err)); + return err; + } + conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); + if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { + conn_info->req_ie_len -= ETHER_ADDR_LEN; + } + if (conn_info->req_ie_len <= MAX_REQ_LINE) + memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len); + else { + WL_ERR(("%s IE size %d above max %d size \n", + __FUNCTION__, conn_info->req_ie_len, MAX_REQ_LINE)); + return err; + } + } else { + conn_info->req_ie_len = 0; + } + if (assoc_info.resp_len) { + err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, wl->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc resp (%d)\n", err)); + return err; + } + conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp); + if (conn_info->resp_ie_len <= MAX_REQ_LINE) + memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len); + else { + WL_ERR(("%s IE size %d above max %d size \n", + __FUNCTION__, conn_info->resp_ie_len, MAX_REQ_LINE)); + return err; + } + } else { + conn_info->resp_ie_len = 0; + } + WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len, + conn_info->resp_ie_len)); + + return err; +} + +static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params, + size_t *join_params_size) +{ + chanspec_t chanspec = 0; + + if (ch != 0) { + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; + + if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); + + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + htodchanspec(join_params->params.chanspec_list[0]); + + join_params->params.chanspec_num = + htod32(join_params->params.chanspec_num); + + WL_DBG(("%s join_params->params.chanspec_list[0]= %X\n", + __FUNCTION__, join_params->params.chanspec_list[0])); + + } +} + +static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev) +{ + struct cfg80211_bss *bss; + struct wl_bss_info *bi; + struct wlc_ssid *ssid; + struct bcm_tlv *tim; + s32 beacon_interval; + s32 dtim_period; + size_t ie_len; + u8 *ie; + u8 *curbssid; + s32 err = 0; + struct wiphy *wiphy; + + wiphy = wl_to_wiphy(wl); + + if (wl_is_ibssmode(wl, ndev)) + return err; + + ssid = (struct wlc_ssid *)wl_read_prof(wl, ndev, WL_PROF_SSID); + curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); + bss = cfg80211_get_bss(wiphy, NULL, curbssid, + ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + + mutex_lock(&wl->usr_sync); + if (!bss) { + WL_DBG(("Could not find the AP\n")); + *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX); + err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, + wl->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + goto update_bss_info_out; + } + bi = (struct wl_bss_info *)(wl->extra_buf + 4); + if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { + err = -EIO; + goto update_bss_info_out; + } + err = wl_inform_single_bss(wl, bi); + if (unlikely(err)) + goto update_bss_info_out; + + ie = ((u8 *)bi) + bi->ie_offset; + ie_len = bi->ie_length; + beacon_interval = cpu_to_le16(bi->beacon_period); + } else { + WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid)); + ie = bss->information_elements; + ie_len = bss->len_information_elements; + beacon_interval = bss->beacon_interval; + cfg80211_put_bss(bss); + } + + tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM); + if (tim) { + dtim_period = tim->data[1]; + } else { + /* + * active scan was done so we could not get dtim + * information out of probe response. + * so we speficially query dtim information. + */ + err = wldev_ioctl(ndev, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), false); + if (unlikely(err)) { + WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err)); + goto update_bss_info_out; + } + } + + wl_update_prof(wl, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT); + wl_update_prof(wl, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD); + +update_bss_info_out: + mutex_unlock(&wl->usr_sync); + return err; +} + +static s32 +wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + struct wl_connect_info *conn_info = wl_to_conn(wl); + s32 err = 0; + u8 *curbssid; + + wl_get_assoc_ies(wl, ndev); + wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); + curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); + wl_update_bss_info(wl, ndev); + wl_update_pmklist(ndev, wl->pmk_list, err); + cfg80211_roamed(ndev, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) + NULL, +#endif + curbssid, + conn_info->req_ie, conn_info->req_ie_len, + conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); + WL_DBG(("Report roaming result\n")); + + wl_set_drv_status(wl, CONNECTED, ndev); + + return err; +} + +static s32 +wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed) +{ + struct wl_connect_info *conn_info = wl_to_conn(wl); + s32 err = 0; + u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); + + WL_DBG((" enter\n")); + if (wl->scan_request) { + wl_notify_escan_complete(wl, ndev, true, true); + } + if (wl_get_drv_status(wl, CONNECTING, ndev)) { + wl_clr_drv_status(wl, CONNECTING, ndev); + if (completed) { + wl_get_assoc_ies(wl, ndev); + wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID); + curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); + wl_update_bss_info(wl, ndev); + wl_update_pmklist(ndev, wl->pmk_list, err); + wl_set_drv_status(wl, CONNECTED, ndev); + } + cfg80211_connect_result(ndev, + curbssid, + conn_info->req_ie, + conn_info->req_ie_len, + conn_info->resp_ie, + conn_info->resp_ie_len, + completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT, + GFP_KERNEL); + if (completed) + WL_INFO(("Report connect result - connection succeeded\n")); + else + WL_ERR(("Report connect result - connection failed\n")); + } + return err; +} + +static s32 +wl_ibss_join_done(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed) +{ + s32 err = 0; + + WL_TRACE(("Enter\n")); + + if (wl->scan_request) { + wl_notify_escan_complete(wl, ndev, true, true); + } + if (wl_get_drv_status(wl, CONNECTING, ndev)) { + wl_clr_drv_status(wl, CONNECTING, ndev); + if (completed) { + err = wl_inform_ibss(wl, (u8 *)&e->addr); + if (err) { + WL_ERR(("wl_inform_ibss() failed: %d\n", err)); + } + wl_set_drv_status(wl, CONNECTED, ndev); + + cfg80211_ibss_joined(ndev, (u8 *)&e->addr, GFP_KERNEL); + WL_DBG(("cfg80211_ibss_joined() called with valid BSSID\n")); + } + } + + WL_TRACE(("Exit\n")); + return err; +} + +static s32 +wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + u16 flags = ntoh16(e->flags); + enum nl80211_key_type key_type; + + mutex_lock(&wl->usr_sync); + if (flags & WLC_EVENT_MSG_GROUP) + key_type = NL80211_KEYTYPE_GROUP; + else + key_type = NL80211_KEYTYPE_PAIRWISE; + + cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, + NULL, GFP_KERNEL); + mutex_unlock(&wl->usr_sync); + + return 0; +} + +#ifdef PNO_SUPPORT +static s32 +wl_notify_pfn_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + WL_ERR((" PNO Event\n")); + + mutex_lock(&wl->usr_sync); +#ifndef WL_SCHED_SCAN + /* TODO: Use cfg80211_sched_scan_results(wiphy); */ + cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); +#else + /* If cfg80211 scheduled scan is supported, report the pno results via sched + * scan results + */ + wl_notify_sched_scan_results(wl, ndev, e, data); +#endif /* WL_SCHED_SCAN */ + mutex_unlock(&wl->usr_sync); + return 0; +} +#endif /* PNO_SUPPORT */ + +static s32 +wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + struct channel_info channel_inform; + struct wl_scan_results *bss_list; + u32 len = WL_SCAN_BUF_MAX; + s32 err = 0; + unsigned long flags; + + WL_DBG(("Enter \n")); + if (!wl_get_drv_status(wl, SCANNING, ndev)) { + WL_ERR(("scan is not ready \n")); + return err; + } + if (wl->iscan_on && wl->iscan_kickstart) + return wl_wakeup_iscan(wl_to_iscan(wl)); + + mutex_lock(&wl->usr_sync); + wl_clr_drv_status(wl, SCANNING, ndev); + err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform, + sizeof(channel_inform), false); + if (unlikely(err)) { + WL_ERR(("scan busy (%d)\n", err)); + goto scan_done_out; + } + channel_inform.scan_channel = dtoh32(channel_inform.scan_channel); + if (unlikely(channel_inform.scan_channel)) { + + WL_DBG(("channel_inform.scan_channel (%d)\n", + channel_inform.scan_channel)); + } + wl->bss_list = wl->scan_results; + bss_list = wl->bss_list; + memset(bss_list, 0, len); + bss_list->buflen = htod32(len); + err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false); + if (unlikely(err)) { + WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err)); + err = -EINVAL; + goto scan_done_out; + } + bss_list->buflen = dtoh32(bss_list->buflen); + bss_list->version = dtoh32(bss_list->version); + bss_list->count = dtoh32(bss_list->count); + + err = wl_inform_bss(wl); + +scan_done_out: + del_timer_sync(&wl->scan_timeout); + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + if (wl->scan_request) { + WL_DBG(("cfg80211_scan_done\n")); + cfg80211_scan_done(wl->scan_request, false); + wl->scan_request = NULL; + } + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + mutex_unlock(&wl->usr_sync); + return err; +} +static s32 +wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody) +{ + struct dot11_management_header *hdr; + u32 totlen = 0; + s32 err = 0; + u8 *offset; + u32 prebody_len = *body_len; + switch (fc) { + case FC_ASSOC_REQ: + /* capability , listen interval */ + totlen = DOT11_ASSOC_REQ_FIXED_LEN; + *body_len += DOT11_ASSOC_REQ_FIXED_LEN; + break; + + case FC_REASSOC_REQ: + /* capability, listen inteval, ap address */ + totlen = DOT11_REASSOC_REQ_FIXED_LEN; + *body_len += DOT11_REASSOC_REQ_FIXED_LEN; + break; + } + totlen += DOT11_MGMT_HDR_LEN + prebody_len; + *pheader = kzalloc(totlen, GFP_KERNEL); + if (*pheader == NULL) { + WL_ERR(("memory alloc failed \n")); + return -ENOMEM; + } + hdr = (struct dot11_management_header *) (*pheader); + hdr->fc = htol16(fc); + hdr->durid = 0; + hdr->seq = 0; + offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len); + bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN); + bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN); + bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN); + bcopy((const char*)pbody, offset, prebody_len); + *body_len = totlen; + return err; +} +static s32 +wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + struct ieee80211_supported_band *band; + struct wiphy *wiphy = wl_to_wiphy(wl); + struct ether_addr da; + struct ether_addr bssid; + bool isfree = false; + s32 err = 0; + s32 freq; + struct net_device *dev = NULL; + wifi_p2p_pub_act_frame_t *act_frm = NULL; + wifi_p2p_action_frame_t *p2p_act_frm = NULL; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; + wl_event_rx_frame_data_t *rxframe = + (wl_event_rx_frame_data_t*)data; + u32 event = ntoh32(e->event_type); + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); + u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); + + memset(&bssid, 0, ETHER_ADDR_LEN); + + if (wl->p2p_net == ndev) { + dev = wl_to_prmry_ndev(wl); + } else { + dev = ndev; + } + + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[IEEE80211_BAND_2GHZ]; + else + band = wiphy->bands[IEEE80211_BAND_5GHZ]; + if (!band) { + WL_ERR(("No valid band")); + return -EINVAL; + } + + if ((event == WLC_E_P2P_PROBREQ_MSG) && + wl->p2p && wl_get_p2p_status(wl, GO_NEG_PHASE)) { + WL_DBG(("Filtering P2P probe_req while being in GO-Neg state\n")); + goto exit; + } + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) + freq = ieee80211_channel_to_frequency(channel); +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + if (event == WLC_E_ACTION_FRAME_RX) { + wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr", + NULL, 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bsscfgidx, &wl->ioctl_buf_sync); + + wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN); + err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid, + &mgmt_frame, &mgmt_frame_len, + (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1)); + if (err < 0) { + WL_ERR(("%s: Error in receiving action frame len %d channel %d freq %d\n", + __func__, mgmt_frame_len, channel, freq)); + goto exit; + } + isfree = true; + if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + act_frm = (wifi_p2p_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + p2p_act_frm = (wifi_p2p_action_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + (void) p2p_act_frm; + } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + (void) sd_act_frm; + } + wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN); + /* + * After complete GO Negotiation, roll back to mpc mode + */ + if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) || + (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) { + wldev_iovar_setint(dev, "mpc", 1); + } + + if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) { + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(wl, GO_NEG_PHASE); + } + + if (act_frm && (act_frm->subtype == P2P_PAF_GON_RSP)) { + /* Cancel the dwell time of req frame */ + WL_DBG(("P2P: Received GO NEG Resp frame, cancelling the dwell time\n")); + wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + } + } else { + mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1); + } + + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); + + WL_DBG(("%s: mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", __func__, + mgmt_frame_len, ntoh32(e->datalen), channel, freq)); + + if (isfree) + kfree(mgmt_frame); +exit: + return 0; +} + +#ifdef WL_SCHED_SCAN +/* If target scan is not reliable, set the below define to "1" to do a + * full escan + */ +#define FULL_ESCAN_ON_PFN_NET_FOUND 0 +static s32 +wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + wl_pfn_net_info_t *netinfo, *pnetinfo; + struct cfg80211_scan_request request; + struct wiphy *wiphy = wl_to_wiphy(wl); + int err = 0; + struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT]; + struct ieee80211_channel *channel = NULL; + int channel_req = 0; + int band = 0; + struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data; + + WL_DBG(("Enter\n")); + + if (e->event_type == WLC_E_PFN_NET_LOST) { + WL_DBG(("PFN NET LOST event. Do Nothing \n")); + return 0; + } + WL_DBG(("PFN NET FOUND event. count:%d \n", pfn_result->count)); + if (pfn_result->count > 0) { + int i; + + memset(&request, 0x00, sizeof(struct cfg80211_scan_request)); + memset(&ssid, 0x00, sizeof(ssid)); + request.wiphy = wiphy; + + pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) + - sizeof(wl_pfn_net_info_t)); + channel = (struct ieee80211_channel *)kzalloc( + (sizeof(struct ieee80211_channel) * MAX_PFN_LIST_COUNT), + GFP_KERNEL); + if (!channel) { + WL_ERR(("No memory")); + err = -ENOMEM; + goto out_err; + } + + for (i = 0; i < pfn_result->count; i++) { + netinfo = &pnetinfo[i]; + if (!netinfo) { + WL_ERR(("Invalid netinfo ptr. index:%d", i)); + err = -EINVAL; + goto out_err; + } + WL_DBG(("SSID:%s Channel:%d \n", + netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel)); + /* PFN result doesn't have all the info which are required by the supplicant + * (For e.g IEs) Do a target Escan so that sched scan results are reported + * via wl_inform_single_bss in the required format. Escan does require the + * scan request in the form of cfg80211_scan_request. For timebeing, create + * cfg80211_scan_request one out of the received PNO event. + */ + memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID, + netinfo->pfnsubnet.SSID_len); + ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len; + request.n_ssids++; + + channel_req = netinfo->pfnsubnet.channel; + band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; + channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band); + channel[i].band = band; + channel[i].flags |= IEEE80211_CHAN_NO_HT40; + request.channels[i] = &channel[i]; + request.n_channels++; + } + + /* assign parsed ssid array */ + if (request.n_ssids) + request.ssids = &ssid[0]; + + if (wl_get_drv_status_all(wl, SCANNING)) { + /* Abort any on-going scan */ + wl_notify_escan_complete(wl, ndev, true, true); + } + + if (wl_get_p2p_status(wl, DISCOVERY_ON)) { + err = wl_cfgp2p_discover_enable_search(wl, false); + if (unlikely(err)) { + wl_clr_drv_status(wl, SCANNING, ndev); + goto out_err; + } + } + + wl_set_drv_status(wl, SCANNING, ndev); +#if FULL_ESCAN_ON_PFN_NET_FOUND + err = wl_do_escan(wl, wiphy, ndev, NULL); +#else + err = wl_do_escan(wl, wiphy, ndev, &request); +#endif + if (err) { + wl_clr_drv_status(wl, SCANNING, ndev); + goto out_err; + } + wl->sched_scan_running = TRUE; + } + else { + WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n")); + } +out_err: + if (channel) + kfree(channel); + return err; +} +#endif /* WL_SCHED_SCAN */ + +static void wl_init_conf(struct wl_conf *conf) +{ + WL_DBG(("Enter \n")); + conf->frag_threshold = (u32)-1; + conf->rts_threshold = (u32)-1; + conf->retry_short = (u32)-1; + conf->retry_long = (u32)-1; + conf->tx_power = -1; +} + +static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev) +{ + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); + + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + memset(profile, 0, sizeof(struct wl_profile)); + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); +} + +static void wl_init_event_handler(struct wl_priv *wl) +{ + memset(wl->evt_handler, 0, sizeof(wl->evt_handler)); + + wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status; + wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status; + wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status; + wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status; + wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status; + wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status; + wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status; + wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status; + wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status; + wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status; + wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame; + wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete; + wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete; + wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete; +#ifdef PNO_SUPPORT + wl->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status; +#endif /* PNO_SUPPORT */ +} + +static s32 wl_init_priv_mem(struct wl_priv *wl) +{ + WL_DBG(("Enter \n")); + wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); + if (unlikely(!wl->scan_results)) { + WL_ERR(("Scan results alloc failed\n")); + goto init_priv_mem_out; + } + wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL); + if (unlikely(!wl->conf)) { + WL_ERR(("wl_conf alloc failed\n")); + goto init_priv_mem_out; + } + wl->scan_req_int = + (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL); + if (unlikely(!wl->scan_req_int)) { + WL_ERR(("Scan req alloc failed\n")); + goto init_priv_mem_out; + } + wl->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!wl->ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!wl->escan_ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (unlikely(!wl->extra_buf)) { + WL_ERR(("Extra buf alloc failed\n")); + goto init_priv_mem_out; + } + wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL); + if (unlikely(!wl->iscan)) { + WL_ERR(("Iscan buf alloc failed\n")); + goto init_priv_mem_out; + } + wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL); + if (unlikely(!wl->pmk_list)) { + WL_ERR(("pmk list alloc failed\n")); + goto init_priv_mem_out; + } + wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL); + if (unlikely(!wl->sta_info)) { + WL_ERR(("sta info alloc failed\n")); + goto init_priv_mem_out; + } + wl->afx_hdl = (void *)kzalloc(sizeof(*wl->afx_hdl), GFP_KERNEL); + if (unlikely(!wl->afx_hdl)) { + WL_ERR(("afx hdl alloc failed\n")); + goto init_priv_mem_out; + } else { + init_completion(&wl->act_frm_scan); + INIT_WORK(&wl->afx_hdl->work, wl_cfg80211_afx_handler); + } + return 0; + +init_priv_mem_out: + wl_deinit_priv_mem(wl); + + return -ENOMEM; +} + +static void wl_deinit_priv_mem(struct wl_priv *wl) +{ + kfree(wl->scan_results); + wl->scan_results = NULL; + kfree(wl->conf); + wl->conf = NULL; + kfree(wl->scan_req_int); + wl->scan_req_int = NULL; + kfree(wl->ioctl_buf); + wl->ioctl_buf = NULL; + kfree(wl->escan_ioctl_buf); + wl->escan_ioctl_buf = NULL; + kfree(wl->extra_buf); + wl->extra_buf = NULL; + kfree(wl->iscan); + wl->iscan = NULL; + kfree(wl->pmk_list); + wl->pmk_list = NULL; + kfree(wl->sta_info); + wl->sta_info = NULL; + if (wl->afx_hdl) { + cancel_work_sync(&wl->afx_hdl->work); + kfree(wl->afx_hdl); + wl->afx_hdl = NULL; + } + + if (wl->ap_info) { + kfree(wl->ap_info->wpa_ie); + kfree(wl->ap_info->rsn_ie); + kfree(wl->ap_info->wps_ie); + kfree(wl->ap_info); + wl->ap_info = NULL; + } +} + +static s32 wl_create_event_handler(struct wl_priv *wl) +{ + int ret = 0; + WL_DBG(("Enter \n")); + + /* Do not use DHD in cfg driver */ + wl->event_tsk.thr_pid = -1; + PROC_START(wl_event_handler, wl, &wl->event_tsk, 0); + if (wl->event_tsk.thr_pid < 0) + ret = -ENOMEM; + return ret; +} + +static void wl_destroy_event_handler(struct wl_priv *wl) +{ + if (wl->event_tsk.thr_pid >= 0) + PROC_STOP(&wl->event_tsk); +} + +static void wl_term_iscan(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); + WL_TRACE(("In\n")); + if (wl->iscan_on && iscan->tsk) { + iscan->state = WL_ISCAN_STATE_IDLE; + WL_INFO(("SIGTERM\n")); + send_sig(SIGTERM, iscan->tsk, 1); + WL_DBG(("kthread_stop\n")); + kthread_stop(iscan->tsk); + iscan->tsk = NULL; + } +} + +static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted) +{ + struct wl_priv *wl = iscan_to_wl(iscan); + struct net_device *ndev = wl_to_prmry_ndev(wl); + unsigned long flags; + + WL_DBG(("Enter \n")); + if(!aborted) + wl->scan_busy_count = 0; + + if (!wl_get_drv_status(wl, SCANNING, ndev)) { + wl_clr_drv_status(wl, SCANNING, ndev); + WL_ERR(("Scan complete while device not scanning\n")); + return; + } + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + wl_clr_drv_status(wl, SCANNING, ndev); + if (likely(wl->scan_request)) { + cfg80211_scan_done(wl->scan_request, aborted); + wl->scan_request = NULL; + } + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + wl->iscan_kickstart = false; +} + +static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan) +{ + if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) { + WL_DBG(("wake up iscan\n")); + up(&iscan->sync); + return 0; + } + + return -EIO; +} + +static s32 +wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status, + struct wl_scan_results **bss_list) +{ + struct wl_iscan_results list; + struct wl_scan_results *results; + struct wl_iscan_results *list_buf; + s32 err = 0; + + WL_DBG(("Enter \n")); + memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX); + list_buf = (struct wl_iscan_results *)iscan->scan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WL_ISCAN_BUF_MAX); + err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list, + WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf, + WL_ISCAN_BUF_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + results->count = dtoh32(results->count); + WL_DBG(("results->count = %d\n", results->count)); + WL_DBG(("results->buflen = %d\n", results->buflen)); + *status = dtoh32(list_buf->status); + *bss_list = results; + + return err; +} + +static s32 wl_iscan_done(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl->iscan; + s32 err = 0; + + iscan->state = WL_ISCAN_STATE_IDLE; + mutex_lock(&wl->usr_sync); + wl_inform_bss(wl); + wl_notify_iscan_complete(iscan, false); + mutex_unlock(&wl->usr_sync); + + return err; +} + +static s32 wl_iscan_pending(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl->iscan; + s32 err = 0; + + /* Reschedule the timer */ + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + iscan->timer_on = 1; + + return err; +} + +static s32 wl_iscan_inprogress(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl->iscan; + s32 err = 0; + + mutex_lock(&wl->usr_sync); + wl_inform_bss(wl); + wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); + mutex_unlock(&wl->usr_sync); + /* Reschedule the timer */ + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + iscan->timer_on = 1; + + return err; +} + +static s32 wl_iscan_aborted(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl->iscan; + s32 err = 0; + + iscan->state = WL_ISCAN_STATE_IDLE; + mutex_lock(&wl->usr_sync); + wl_notify_iscan_complete(iscan, true); + mutex_unlock(&wl->usr_sync); + + return err; +} + +static s32 wl_iscan_thread(void *data) +{ + struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data; + struct wl_priv *wl = iscan_to_wl(iscan); + u32 status; + int err = 0; + + allow_signal(SIGTERM); + status = WL_SCAN_RESULTS_PARTIAL; + while (likely(!down_interruptible(&iscan->sync))) { + if (kthread_should_stop()) + break; + if (iscan->timer_on) { + del_timer_sync(&iscan->timer); + iscan->timer_on = 0; + } + mutex_lock(&wl->usr_sync); + err = wl_get_iscan_results(iscan, &status, &wl->bss_list); + if (unlikely(err)) { + status = WL_SCAN_RESULTS_ABORTED; + WL_ERR(("Abort iscan\n")); + } + mutex_unlock(&wl->usr_sync); + iscan->iscan_handler[status] (wl); + } + if (iscan->timer_on) { + del_timer_sync(&iscan->timer); + iscan->timer_on = 0; + } + WL_DBG(("%s was terminated\n", __func__)); + + return 0; +} + +static void wl_scan_timeout(unsigned long data) +{ + struct wl_priv *wl = (struct wl_priv *)data; + + schedule_work(&wl->work_scan_timeout); +} + +static void wl_scan_timeout_process(struct work_struct *work) +{ + struct wl_priv *wl; + + wl = (wl_priv_t *)container_of(work, wl_priv_t, work_scan_timeout); + + if (wl->scan_request) { + WL_ERR(("timer expired\n")); + if (wl->escan_on) + wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true); + else + wl_notify_iscan_complete(wl_to_iscan(wl), true); + } + + /* Assume FW is in bad state if there are continuous scan timeouts */ + wl->scan_busy_count++; + if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { + wl->scan_busy_count = 0; + WL_ERR(("Continuous scan timeouts!! Exercising FW hang recovery\n")); + net_os_send_hang_message(wl->escan_info.ndev); + } +} + +static void wl_iscan_timer(unsigned long data) +{ + struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data; + + if (iscan) { + iscan->timer_on = 0; + WL_DBG(("timer expired\n")); + wl_wakeup_iscan(iscan); + } +} + +static s32 wl_invoke_iscan(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); + int err = 0; + + if (wl->iscan_on && !iscan->tsk) { + iscan->state = WL_ISCAN_STATE_IDLE; + sema_init(&iscan->sync, 0); + iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan"); + if (IS_ERR(iscan->tsk)) { + WL_ERR(("Could not create iscan thread\n")); + iscan->tsk = NULL; + return -ENOMEM; + } + } + + return err; +} + +static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan) +{ + memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler)); + iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done; + iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress; + iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending; + iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted; + iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted; +} + +static s32 +wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, + void *ndev) +{ + struct net_device *dev = ndev; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wl_priv *wl = wlcfg_drv_priv; + + WL_DBG(("Enter \n")); + if (!wdev || !wl || dev == wl_to_prmry_ndev(wl)) + return NOTIFY_DONE; + switch (state) { + case NETDEV_UNREGISTER: + /* after calling list_del_rcu(&wdev->list) */ + wl_dealloc_netinfo(wl, ndev); + break; + case NETDEV_GOING_DOWN: + /* At NETDEV_DOWN state, wdev_cleanup_work work will be called. + * In front of door, the function checks + * whether current scan is working or not. + * If the scanning is still working, wdev_cleanup_work call WARN_ON and + * make the scan done forcibly. + */ + if (wl_get_drv_status(wl, SCANNING, dev)) { + if (wl->escan_on) { + wl_notify_escan_complete(wl, dev, true, true); + } + } + break; + } + return NOTIFY_DONE; +} +static struct notifier_block wl_cfg80211_netdev_notifier = { + .notifier_call = wl_cfg80211_netdev_notifier_call, +}; + +static s32 wl_notify_escan_complete(struct wl_priv *wl, + struct net_device *ndev, + bool aborted, bool fw_abort) +{ + wl_scan_params_t *params = NULL; + s32 params_size = 0; + s32 err = BCME_OK; + unsigned long flags; + struct net_device *dev; + + WL_DBG(("Enter \n")); + + if(!aborted) + wl->scan_busy_count = 0; + + if (wl->scan_request) { + if (wl->scan_request->dev == wl->p2p_net) + dev = wl_to_prmry_ndev(wl); + else + dev = wl->scan_request->dev; + } + else { + WL_ERR(("wl->scan_request is NULL may be internal scan." + "doing scan_abort for ndev %p primary %p p2p_net %p", + ndev, wl_to_prmry_ndev(wl), wl->p2p_net)); + dev = ndev; + } + if (fw_abort && !in_atomic()) { + /* Our scan params only need space for 1 channel and 0 ssids */ + params = wl_cfg80211_scan_alloc_params(-1, 0, ¶ms_size); + if (params == NULL) { + WL_ERR(("scan params allocation failed \n")); + err = -ENOMEM; + } else { + /* Do a scan abort to stop the driver's scan engine */ + err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true); + if (err < 0) { + WL_ERR(("scan abort failed \n")); + } + } + } + if (timer_pending(&wl->scan_timeout)) + del_timer_sync(&wl->scan_timeout); + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + +#ifdef WL_SCHED_SCAN + if (wl->sched_scan_req && !wl->scan_request) { + WL_DBG((" REPORTING SCHED SCAN RESULTS \n")); + if (aborted) + cfg80211_sched_scan_stopped(wl->sched_scan_req->wiphy); + else + cfg80211_sched_scan_results(wl->sched_scan_req->wiphy); + wl->sched_scan_running = FALSE; + wl->sched_scan_req = NULL; + } +#endif /* WL_SCHED_SCAN */ + + if (likely(wl->scan_request)) { + cfg80211_scan_done(wl->scan_request, aborted); + wl->scan_request = NULL; + } + if (p2p_is_on(wl)) + wl_clr_p2p_status(wl, SCANNING); + wl_clr_drv_status(wl, SCANNING, dev); + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + if (params) + kfree(params); + + return err; +} + +static s32 wl_escan_handler(struct wl_priv *wl, + struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = BCME_OK; + s32 status = ntoh32(e->status); + wl_bss_info_t *bi; + wl_escan_result_t *escan_result; + wl_bss_info_t *bss = NULL; + wl_scan_results_t *list; + u32 bi_length; + u32 i; + wifi_p2p_ie_t * p2p_ie; + u8 *p2p_dev_addr = NULL; + + WL_DBG((" enter event type : %d, status : %d \n", + ntoh32(e->event_type), ntoh32(e->status))); + + mutex_lock(&wl->usr_sync); + /* P2P SCAN is coming from primary interface */ + if (wl_get_p2p_status(wl, SCANNING)) { + if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) + ndev = wl->afx_hdl->dev; + else + ndev = wl->escan_info.ndev; + + } + if (!ndev || !wl->escan_on || + !wl_get_drv_status(wl, SCANNING, ndev)) { + WL_ERR(("escan is not ready ndev %p wl->escan_on %d drv_status 0x%x\n", + ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev))); + goto exit; + } + + if (status == WLC_E_STATUS_PARTIAL) { + WL_INFO(("WLC_E_STATUS_PARTIAL \n")); + escan_result = (wl_escan_result_t *) data; + if (!escan_result) { + WL_ERR(("Invalid escan result (NULL pointer)\n")); + goto exit; + } + if (dtoh16(escan_result->bss_count) != 1) { + WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); + goto exit; + } + bi = escan_result->bss_info; + if (!bi) { + WL_ERR(("Invalid escan bss info (NULL pointer)\n")); + goto exit; + } + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { + WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + + if (!(wl_to_wiphy(wl)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) { + if (dtoh16(bi->capability) & DOT11_CAP_IBSS) { + WL_DBG(("Ignoring IBSS result\n")); + goto exit; + } + } + + if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { + p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length); + if (p2p_dev_addr && !memcmp(p2p_dev_addr, + wl->afx_hdl->pending_tx_dst_addr.octet, ETHER_ADDR_LEN)) { + s32 channel = CHSPEC_CHANNEL(dtohchanspec(bi->chanspec)); + WL_DBG(("ACTION FRAME SCAN : Peer " MACSTR " found, channel : %d\n", + MAC2STR(wl->afx_hdl->pending_tx_dst_addr.octet), channel)); + wl_clr_p2p_status(wl, SCANNING); + wl->afx_hdl->peer_chan = channel; + complete(&wl->act_frm_scan); + goto exit; + } + + } else { + list = (wl_scan_results_t *)wl->escan_info.escan_buf; + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { + WL_ERR(("Buffer is too small: ignoring\n")); + goto exit; + } +#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) + if (wl->p2p_net && wl->scan_request && + wl->scan_request->dev == wl->p2p_net) { +#else + if (p2p_is_on(wl) && p2p_scan(wl)) { +#endif + /* p2p scan && allow only probe response */ + if (bi->flags & WL_BSS_FLAGS_FROM_BEACON) + goto exit; + if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, + bi->ie_length)) == NULL) { + WL_ERR(("Couldn't find P2PIE in probe" + " response/beacon\n")); + goto exit; + } + } +#define WLC_BSS_RSSI_ON_CHANNEL 0x0002 + for (i = 0; i < list->count; i++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) + : list->bss_info; + + if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + CHSPEC_BAND(bi->chanspec) == CHSPEC_BAND(bss->chanspec) && + bi->SSID_len == bss->SSID_len && + !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) { + if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) == + (bi->flags & WLC_BSS_RSSI_ON_CHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + bss->RSSI = MAX(bss->RSSI, bi->RSSI); + } else if ((bss->flags & WLC_BSS_RSSI_ON_CHANNEL) && + (bi->flags & WLC_BSS_RSSI_ON_CHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + bss->RSSI = bi->RSSI; + bss->flags |= WLC_BSS_RSSI_ON_CHANNEL; + } + goto exit; + } + } + memcpy(&(wl->escan_info.escan_buf[list->buflen]), bi, bi_length); + list->version = dtoh32(bi->version); + list->buflen += bi_length; + list->count++; + } + + } + else if (status == WLC_E_STATUS_SUCCESS) { + wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { + WL_INFO(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(wl, SCANNING); + wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); + if (wl->afx_hdl->peer_chan == WL_INVALID) + complete(&wl->act_frm_scan); + } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { + WL_INFO(("ESCAN COMPLETED\n")); + wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; + wl_inform_bss(wl); + wl_notify_escan_complete(wl, ndev, false, false); + } + } + else if (status == WLC_E_STATUS_ABORT) { + wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { + WL_INFO(("ACTION FRAME SCAN DONE\n")); + wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); + wl_clr_p2p_status(wl, SCANNING); + if (wl->afx_hdl->peer_chan == WL_INVALID) + complete(&wl->act_frm_scan); + } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { + WL_INFO(("ESCAN ABORTED\n")); + wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; + wl_inform_bss(wl); + wl_notify_escan_complete(wl, ndev, true, false); + } + } + else if (status == WLC_E_STATUS_NEWSCAN) { + /* Do Nothing. Ignore this event */ + } + else { + WL_ERR(("unexpected Escan Event %d : abort\n", status)); + wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) { + WL_INFO(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(wl, SCANNING); + wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev); + if (wl->afx_hdl->peer_chan == WL_INVALID) + complete(&wl->act_frm_scan); + } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { + wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; + wl_inform_bss(wl); + wl_notify_escan_complete(wl, ndev, true, false); + } + } +exit: + mutex_unlock(&wl->usr_sync); + return err; +} + +static s32 wl_init_scan(struct wl_priv *wl) +{ + struct wl_iscan_ctrl *iscan = wl_to_iscan(wl); + int err = 0; + + if (wl->iscan_on) { + iscan->dev = wl_to_prmry_ndev(wl); + iscan->state = WL_ISCAN_STATE_IDLE; + wl_init_iscan_handler(iscan); + iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; + init_timer(&iscan->timer); + iscan->timer.data = (unsigned long) iscan; + iscan->timer.function = wl_iscan_timer; + sema_init(&iscan->sync, 0); + iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan"); + if (IS_ERR(iscan->tsk)) { + WL_ERR(("Could not create iscan thread\n")); + iscan->tsk = NULL; + return -ENOMEM; + } + iscan->data = wl; + } else if (wl->escan_on) { + wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler; + wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + } + /* Init scan_timeout timer */ + init_timer(&wl->scan_timeout); + wl->scan_timeout.data = (unsigned long) wl; + wl->scan_timeout.function = wl_scan_timeout; + + return err; +} + +static s32 wl_init_priv(struct wl_priv *wl) +{ + struct wiphy *wiphy = wl_to_wiphy(wl); + struct net_device *ndev = wl_to_prmry_ndev(wl); + s32 err = 0; + + wl->scan_request = NULL; + wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT); + wl->iscan_on = false; + wl->escan_on = true; + wl->roam_on = false; + wl->iscan_kickstart = false; + wl->active_scan = true; + wl->rf_blocked = false; + wl->deauth_reason = 0; + spin_lock_init(&wl->cfgdrv_lock); + mutex_init(&wl->ioctl_buf_sync); + init_waitqueue_head(&wl->netif_change_event); + wl_init_eq(wl); + err = wl_init_priv_mem(wl); + if (err) + return err; + if (wl_create_event_handler(wl)) + return -ENOMEM; + wl_init_event_handler(wl); + mutex_init(&wl->usr_sync); + INIT_WORK(&wl->work_scan_timeout, wl_scan_timeout_process); + err = wl_init_scan(wl); + if (err) + return err; + wl_init_conf(wl->conf); + wl_init_prof(wl, ndev); + wl_link_down(wl); + DNGL_FUNC(dhd_cfg80211_init, (wl)); + + return err; +} + +static void wl_deinit_priv(struct wl_priv *wl) +{ + DNGL_FUNC(dhd_cfg80211_deinit, (wl)); + wl_destroy_event_handler(wl); + wl_flush_eq(wl); + wl_link_down(wl); + del_timer_sync(&wl->scan_timeout); + wl_term_iscan(wl); + cancel_work_sync(&wl->work_scan_timeout); + wl_deinit_priv_mem(wl); + unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier); +} + +#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) +static s32 wl_cfg80211_attach_p2p(void) +{ + struct wl_priv *wl = wlcfg_drv_priv; + + WL_TRACE(("Enter \n")); + + if (wl_cfgp2p_register_ndev(wl) < 0) { + WL_ERR(("%s: P2P attach failed. \n", __func__)); + return -ENODEV; + } + + return 0; +} + +static s32 wl_cfg80211_detach_p2p(void) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct wireless_dev *wdev = wl->p2p_wdev; + + WL_DBG(("Enter \n")); + if (!wdev || !wl) { + WL_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + wl_cfgp2p_unregister_ndev(wl); + + wl->p2p_wdev = NULL; + wl->p2p_net = NULL; + WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev)); + kfree(wdev); + + return 0; +} +#endif /* defined(WLP2P) && defined(WL_ENABLE_P2P_IF) */ + +s32 wl_cfg80211_attach_post(struct net_device *ndev) +{ + struct wl_priv * wl = NULL; + s32 err = 0; + WL_TRACE(("In\n")); + if (unlikely(!ndev)) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + wl = wlcfg_drv_priv; + if (wl && !wl_get_drv_status(wl, READY, ndev)) { + if (wl->wdev && + wl_cfgp2p_supported(wl, ndev)) { +#if !defined(WL_ENABLE_P2P_IF) + wl->wdev->wiphy->interface_modes |= + (BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO)); +#endif + if ((err = wl_cfgp2p_init_priv(wl)) != 0) + goto fail; + +#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) + if (wl->p2p_net) { + /* Update MAC addr for p2p0 interface here. */ + memcpy(wl->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN); + wl->p2p_net->dev_addr[0] |= 0x02; + printk("%s: p2p_dev_addr="MACSTR "\n", + wl->p2p_net->name, MAC2STR(wl->p2p_net->dev_addr)); + } else { + WL_ERR(("p2p_net not yet populated." + " Couldn't update the MAC Address for p2p0 \n")); + return -ENODEV; + } +#endif /* defined(WLP2P) && (WL_ENABLE_P2P_IF) */ + + wl->p2p_supported = true; + } + } else + return -ENODEV; + wl_set_drv_status(wl, READY, ndev); +fail: + return err; +} + +s32 wl_cfg80211_attach(struct net_device *ndev, void *data) +{ + struct wireless_dev *wdev; + struct wl_priv *wl; + s32 err = 0; + struct device *dev; + + WL_TRACE(("In\n")); + if (!ndev) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev())); + dev = wl_cfg80211_get_parent_dev(); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return -ENOMEM; + } + err = wl_setup_wiphy(wdev, dev); + if (unlikely(err)) { + kfree(wdev); + return -ENOMEM; + } + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + wl = (struct wl_priv *)wiphy_priv(wdev->wiphy); + wl->wdev = wdev; + wl->pub = data; + INIT_LIST_HEAD(&wl->net_list); + ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); + wdev->netdev = ndev; + err = wl_alloc_netinfo(wl, ndev, wdev, WL_MODE_BSS); + if (err) { + WL_ERR(("Failed to alloc net_info (%d)\n", err)); + goto cfg80211_attach_out; + } + err = wl_init_priv(wl); + if (err) { + WL_ERR(("Failed to init iwm_priv (%d)\n", err)); + goto cfg80211_attach_out; + } + + err = wl_setup_rfkill(wl, TRUE); + if (err) { + WL_ERR(("Failed to setup rfkill %d\n", err)); + goto cfg80211_attach_out; + } + err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier); + if (err) { + WL_ERR(("Failed to register notifierl %d\n", err)); + goto cfg80211_attach_out; + } +#if defined(COEX_DHCP) + if (wl_cfg80211_btcoex_init(wl)) + goto cfg80211_attach_out; +#endif + + wlcfg_drv_priv = wl; + +#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) + err = wl_cfg80211_attach_p2p(); + if (err) + goto cfg80211_attach_out; +#endif + + return err; + +cfg80211_attach_out: + err = wl_setup_rfkill(wl, FALSE); + wl_free_wdev(wl); + return err; +} + +void wl_cfg80211_detach(void *para) +{ + struct wl_priv *wl; + + wl = wlcfg_drv_priv; + + WL_TRACE(("In\n")); + +#if defined(COEX_DHCP) + wl_cfg80211_btcoex_deinit(wl); +#endif + +#if defined(WLP2P) && defined(WL_ENABLE_P2P_IF) + wl_cfg80211_detach_p2p(); +#endif + wl_setup_rfkill(wl, FALSE); + if (wl->p2p_supported) + wl_cfgp2p_deinit_priv(wl); + wl_deinit_priv(wl); + wlcfg_drv_priv = NULL; + wl_cfg80211_clear_parent_dev(); + wl_free_wdev(wl); + /* PLEASE do NOT call any function after wl_free_wdev, the driver's private structure "wl", + * which is the private part of wiphy, has been freed in wl_free_wdev !!!!!!!!!!! + */ +} + +static void wl_wakeup_event(struct wl_priv *wl) +{ + if (wl->event_tsk.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(wl->pub); + up(&wl->event_tsk.sema); + } +} + +static int wl_is_p2p_event(struct wl_event_q *e) +{ + switch (e->etype) { + /* We have to seperate out the P2P events received + * on primary interface so that it can be send up + * via p2p0 interface. + */ + case WLC_E_P2P_PROBREQ_MSG: + case WLC_E_P2P_DISC_LISTEN_COMPLETE: + case WLC_E_ACTION_FRAME_RX: + case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE: + case WLC_E_ACTION_FRAME_COMPLETE: + + if (e->emsg.ifidx != 0) { + WL_TRACE(("P2P Event on Virtual I/F (ifidx:%d) \n", + e->emsg.ifidx)); + /* We are only bothered about the P2P events received + * on primary interface. For rest of them return false + * so that it is sent over the interface corresponding + * to the ifidx. + */ + return FALSE; + } else { + WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)." + " Sent it to p2p0 \n", e->emsg.ifidx)); + return TRUE; + } + break; + + default: + WL_TRACE(("NON-P2P Event %d on ifidx (ifidx:%d) \n", + e->etype, e->emsg.ifidx)); + return FALSE; + } +} + +static s32 wl_event_handler(void *data) +{ + struct net_device *netdev; + struct wl_priv *wl = NULL; + struct wl_event_q *e; + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + + wl = (struct wl_priv *)tsk->parent; + DAEMONIZE("dhd_cfg80211_event"); + complete(&tsk->completed); + + while (down_interruptible (&tsk->sema) == 0) { + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) + break; + while ((e = wl_deq_event(wl))) { + WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx)); + /* All P2P device address related events comes on primary interface since + * there is no corresponding bsscfg for P2P interface. Map it to p2p0 + * interface. + */ + if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_net)) { + netdev = wl->p2p_net; + } else { + netdev = dhd_idx2net((struct dhd_pub *)(wl->pub), e->emsg.ifidx); + } + if (!netdev) + netdev = wl_to_prmry_ndev(wl); + if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) { + wl->evt_handler[e->etype] (wl, netdev, &e->emsg, e->edata); + } else { + WL_DBG(("Unknown Event (%d): ignoring\n", e->etype)); + } + wl_put_event(e); + } + DHD_OS_WAKE_UNLOCK(wl->pub); + } + WL_ERR(("%s was terminated\n", __func__)); + complete_and_exit(&tsk->completed, 0); + return 0; +} + +void +wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) +{ + u32 event_type = ntoh32(e->event_type); + struct wl_priv *wl = wlcfg_drv_priv; + +#if (WL_DBG_LEVEL > 0) + s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ? + wl_dbg_estr[event_type] : (s8 *) "Unknown"; + WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr)); +#endif /* (WL_DBG_LEVEL > 0) */ + + if (likely(!wl_enq_event(wl, ndev, event_type, e, data))) + wl_wakeup_event(wl); +} + +static void wl_init_eq(struct wl_priv *wl) +{ + wl_init_eq_lock(wl); + INIT_LIST_HEAD(&wl->eq_list); +} + +static void wl_flush_eq(struct wl_priv *wl) +{ + struct wl_event_q *e; + unsigned long flags; + + flags = wl_lock_eq(wl); + while (!list_empty(&wl->eq_list)) { + e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + kfree(e); + } + wl_unlock_eq(wl, flags); +} + +/* +* retrieve first queued event from head +*/ + +static struct wl_event_q *wl_deq_event(struct wl_priv *wl) +{ + struct wl_event_q *e = NULL; + unsigned long flags; + + flags = wl_lock_eq(wl); + if (likely(!list_empty(&wl->eq_list))) { + e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + } + wl_unlock_eq(wl, flags); + + return e; +} + +/* + * push event to tail of the queue + */ + +static s32 +wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg, + void *data) +{ + struct wl_event_q *e; + s32 err = 0; + uint32 evtq_size; + uint32 data_len; + unsigned long flags; + gfp_t aflags; + + data_len = 0; + if (data) + data_len = ntoh32(msg->datalen); + evtq_size = sizeof(struct wl_event_q) + data_len; + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + e = kzalloc(evtq_size, aflags); + if (unlikely(!e)) { + WL_ERR(("event alloc failed\n")); + return -ENOMEM; + } + e->etype = event; + memcpy(&e->emsg, msg, sizeof(wl_event_msg_t)); + if (data) + memcpy(e->edata, data, data_len); + flags = wl_lock_eq(wl); + list_add_tail(&e->eq_list, &wl->eq_list); + wl_unlock_eq(wl, flags); + + return err; +} + +static void wl_put_event(struct wl_event_q *e) +{ + kfree(e); +} + +static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype) +{ + s32 infra = 0; + s32 err = 0; + s32 mode = 0; + switch (iftype) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + WL_ERR(("type (%d) : currently we do not support this mode\n", + iftype)); + err = -EINVAL; + return err; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + infra = 1; + break; + default: + err = -EINVAL; + WL_ERR(("invalid type (%d)\n", iftype)); + return err; + } + infra = htod32(infra); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); + return err; + } + + wl_set_mode_by_netdev(wl, ndev, mode); + + return 0; +} + +static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) +{ + s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; + + s8 eventmask[WL_EVENTING_MASK_LEN]; + s32 err = 0; + + /* Setup event_msgs */ + bcm_mkiovar("event_msgs", NULL, 0, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); + if (unlikely(err)) { + WL_ERR(("Get event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); + if (add) { + setbit(eventmask, event); + } else { + clrbit(eventmask, event); + } + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(err)) { + WL_ERR(("Set event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + +eventmsg_out: + return err; + +} + +static int wl_construct_reginfo(struct wl_priv *wl, s32 bw_cap) +{ + struct net_device *dev = wl_to_prmry_ndev(wl); + struct ieee80211_channel *band_chan_arr = NULL; + wl_uint32_list_t *list; + u32 i, j, index, n_2g, n_5g, band, channel, array_size; + u32 *n_cnt = NULL; + chanspec_t c = 0; + s32 err = BCME_OK; + bool update; + bool ht40_allowed; + u8 *pbuf = NULL; + +#define LOCAL_BUF_LEN 1024 + pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL); + if (pbuf == NULL) { + WL_ERR(("failed to allocate local buf\n")); + return -ENOMEM; + } + + list = (wl_uint32_list_t *)(void *)pbuf; + list->count = htod32(WL_NUMCHANSPECS); + + err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, + 0, pbuf, LOCAL_BUF_LEN, 0, &wl->ioctl_buf_sync); + if (err != 0) { + WL_ERR(("get chanspecs failed with %d\n", err)); + kfree(pbuf); + return err; + } +#undef LOCAL_BUF_LEN + + band = array_size = n_2g = n_5g = 0; + for (i = 0; i < dtoh32(list->count); i++) { + index = 0; + update = FALSE; + ht40_allowed = FALSE; + c = (chanspec_t)dtoh32(list->element[i]); + channel = CHSPEC_CHANNEL(c); + if (CHSPEC_IS40(c)) { + if (CHSPEC_SB_UPPER(c)) + channel += CH_10MHZ_APART; + else + channel -= CH_10MHZ_APART; + } + + if (CHSPEC_IS2G(c) && channel <= CH_MAX_2G_CHANNEL) { + band_chan_arr = __wl_2ghz_channels; + array_size = ARRAYSIZE(__wl_2ghz_channels); + n_cnt = &n_2g; + band = IEEE80211_BAND_2GHZ; + ht40_allowed = (bw_cap == WLC_N_BW_40ALL) ? TRUE : FALSE; + } else if (CHSPEC_IS5G(c) && channel > CH_MAX_2G_CHANNEL) { + band_chan_arr = __wl_5ghz_a_channels; + array_size = ARRAYSIZE(__wl_5ghz_a_channels); + n_cnt = &n_5g; + band = IEEE80211_BAND_5GHZ; + ht40_allowed = (bw_cap == WLC_N_BW_20ALL) ? FALSE : TRUE; + } + else { + WL_ERR(("Invalid Channel received %x\n", channel)); + continue; + } + + for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { + if (band_chan_arr[j].hw_value == channel) { + update = TRUE; + break; + } + } + + if (update) + index = j; + else + index = *n_cnt; + + if (index < array_size) { +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS) + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel); +#else + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel, band); +#endif + band_chan_arr[index].hw_value = channel; + + if (CHSPEC_IS40(c) && ht40_allowed) { + u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40; + if (CHSPEC_SB_UPPER(c)) { + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; + band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS; + } else { + band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40MINUS; + } + } else { + band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40; + if (band == IEEE80211_BAND_2GHZ) + channel |= WL_CHANSPEC_BAND_2G; + else + channel |= WL_CHANSPEC_BAND_5G; + err = wldev_iovar_getint(dev, "per_chan_info", &channel); + if (!err) { + if (channel & WL_CHAN_RADAR) { + band_chan_arr[index].flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS; + } + if (channel & WL_CHAN_PASSIVE) { + band_chan_arr[index].flags |= IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS; + } + } + } + + if (!update) + (*n_cnt)++; + } + } + + __wl_band_2ghz.n_channels = n_2g; + __wl_band_5ghz_a.n_channels = n_5g; + + kfree(pbuf); + return err; +} + +s32 wl_update_wiphybands(struct wl_priv *wl) +{ + struct wiphy *wiphy; + struct net_device *dev; + u32 bandlist[3]; + u32 nband = 0; + u32 i = 0; + s32 err = 0; + int nmode = 0; + int bw_cap = 0; + int index = 0; + bool rollback_lock = false; + + WL_DBG(("Entry")); + + if (wl == NULL) { + wl = wlcfg_drv_priv; + mutex_lock(&wl->usr_sync); + rollback_lock = true; + } + dev = wl_to_prmry_ndev(wl); + + memset(bandlist, 0, sizeof(bandlist)); + err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist, + sizeof(bandlist), false); + if (unlikely(err)) { + WL_ERR(("error read bandlist (%d)\n", err)); + goto end_bands; + } + wiphy = wl_to_wiphy(wl); + nband = bandlist[0]; + wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; + wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; + + err = wldev_iovar_getint(dev, "nmode", &nmode); + if (unlikely(err)) { + WL_ERR(("error reading nmode (%d)\n", err)); + } else { + /* For nmodeonly check bw cap */ + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (unlikely(err)) { + WL_ERR(("error get mimo_bw_cap (%d)\n", err)); + } + } + + err = wl_construct_reginfo(wl, bw_cap); + if (err) { + WL_ERR(("wl_construct_reginfo() fails err=%d\n", err)); + if (err != BCME_UNSUPPORTED) + goto end_bands; + /* Ignore error if "chanspecs" command is not supported */ + err = 0; + } + for (i = 1; i <= nband && i < sizeof(bandlist)/sizeof(u32); i++) { + index = -1; + if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) { + wiphy->bands[IEEE80211_BAND_5GHZ] = + &__wl_band_5ghz_a; + index = IEEE80211_BAND_5GHZ; + if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G) + wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + } else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) { + wiphy->bands[IEEE80211_BAND_2GHZ] = + &__wl_band_2ghz; + index = IEEE80211_BAND_2GHZ; + if (bw_cap == WLC_N_BW_40ALL) + wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + } + if ((index >= 0) && nmode) { + wiphy->bands[index]->ht_cap.cap |= + IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40; + wiphy->bands[index]->ht_cap.ht_supported = TRUE; + wiphy->bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + wiphy->bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + } + } + + wiphy_apply_custom_regulatory(wiphy, &brcm_regdom); + +end_bands: + if (rollback_lock) + mutex_unlock(&wl->usr_sync); + return err; +} + +static s32 __wl_cfg80211_up(struct wl_priv *wl) +{ + s32 err = 0; + struct net_device *ndev = wl_to_prmry_ndev(wl); + struct wireless_dev *wdev = ndev->ieee80211_ptr; + + WL_DBG(("In\n")); + + err = dhd_config_dongle(wl, false); + if (unlikely(err)) + return err; + + err = wl_config_ifmode(wl, ndev, wdev->iftype); + if (unlikely(err && err != -EINPROGRESS)) { + WL_ERR(("wl_config_ifmode failed\n")); + } + err = wl_update_wiphybands(wl); + if (unlikely(err)) { + WL_ERR(("wl_update_wiphybands failed\n")); + } + + err = dhd_monitor_init(wl->pub); + err = wl_invoke_iscan(wl); + wl_set_drv_status(wl, READY, ndev); + return err; +} + +static s32 __wl_cfg80211_down(struct wl_priv *wl) +{ + s32 err = 0; + unsigned long flags; + struct net_info *iter, *next; + struct net_device *ndev = wl_to_prmry_ndev(wl); +#ifdef WL_ENABLE_P2P_IF + struct wiphy *wiphy = wl_to_prmry_ndev(wl)->ieee80211_ptr->wiphy; + struct net_device *p2p_net = wl->p2p_net; +#endif + + WL_DBG(("In\n")); + /* Check if cfg80211 interface is already down */ + if (!wl_get_drv_status(wl, READY, ndev)) + return err; /* it is even not ready */ + for_each_ndev(wl, iter, next) + wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev); + + wl_term_iscan(wl); + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + if (wl->scan_request) { + cfg80211_scan_done(wl->scan_request, true); + wl->scan_request = NULL; + } + for_each_ndev(wl, iter, next) { + wl_clr_drv_status(wl, READY, iter->ndev); + wl_clr_drv_status(wl, SCANNING, iter->ndev); + wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev); + wl_clr_drv_status(wl, CONNECTING, iter->ndev); + wl_clr_drv_status(wl, CONNECTED, iter->ndev); + wl_clr_drv_status(wl, DISCONNECTING, iter->ndev); + wl_clr_drv_status(wl, AP_CREATED, iter->ndev); + wl_clr_drv_status(wl, AP_CREATING, iter->ndev); + } + wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype = + NL80211_IFTYPE_STATION; +#ifdef WL_ENABLE_P2P_IF + wiphy->interface_modes = (wiphy->interface_modes) + & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO))); + if ((p2p_net) && (p2p_net->flags & IFF_UP)) { + /* p2p0 interface is still UP. Bring it down */ + p2p_net->flags &= ~IFF_UP; + } +#endif /* WL_ENABLE_P2P_IF */ + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + + DNGL_FUNC(dhd_cfg80211_down, (wl)); + wl_flush_eq(wl); + wl_link_down(wl); + if (wl->p2p_supported) + wl_cfgp2p_down(wl); + dhd_monitor_uninit(); + + return err; +} + +s32 wl_cfg80211_up(void *para) +{ + struct wl_priv *wl; + s32 err = 0; + + WL_DBG(("In\n")); + wl = wlcfg_drv_priv; + mutex_lock(&wl->usr_sync); + wl_cfg80211_attach_post(wl_to_prmry_ndev(wl)); + err = __wl_cfg80211_up(wl); + if (err) + WL_ERR(("__wl_cfg80211_up failed\n")); + mutex_unlock(&wl->usr_sync); + return err; +} + +/* Private Event to Supplicant with indication that chip hangs */ +int wl_cfg80211_hang(struct net_device *dev, u16 reason) +{ + struct wl_priv *wl; + wl = wlcfg_drv_priv; + + WL_ERR(("In : chip crash eventing\n")); + cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL); + if (wl != NULL) { + wl_link_down(wl); + } + return 0; +} + +s32 wl_cfg80211_down(void *para) +{ + struct wl_priv *wl; + s32 err = 0; + + WL_DBG(("In\n")); + wl = wlcfg_drv_priv; + mutex_lock(&wl->usr_sync); + err = __wl_cfg80211_down(wl); + mutex_unlock(&wl->usr_sync); + + return err; +} + +static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item) +{ + unsigned long flags; + void *rptr = NULL; + struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); + + if (!profile) + return NULL; + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SEC: + rptr = &profile->sec; + break; + case WL_PROF_ACT: + rptr = &profile->active; + break; + case WL_PROF_BSSID: + rptr = profile->bssid; + break; + case WL_PROF_SSID: + rptr = &profile->ssid; + break; + } + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + if (!rptr) + WL_ERR(("invalid item (%d)\n", item)); + return rptr; +} + +static s32 +wl_update_prof(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data, s32 item) +{ + s32 err = 0; + struct wlc_ssid *ssid; + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev); + + if (!profile) + return WL_INVALID; + spin_lock_irqsave(&wl->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SSID: + ssid = (wlc_ssid_t *) data; + memset(profile->ssid.SSID, 0, + sizeof(profile->ssid.SSID)); + memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len); + profile->ssid.SSID_len = ssid->SSID_len; + break; + case WL_PROF_BSSID: + if (data) + memcpy(profile->bssid, data, ETHER_ADDR_LEN); + else + memset(profile->bssid, 0, ETHER_ADDR_LEN); + break; + case WL_PROF_SEC: + memcpy(&profile->sec, data, sizeof(profile->sec)); + break; + case WL_PROF_ACT: + profile->active = *(bool *)data; + break; + case WL_PROF_BEACONINT: + profile->beacon_interval = *(u16 *)data; + break; + case WL_PROF_DTIMPERIOD: + profile->dtim_period = *(u8 *)data; + break; + default: + WL_ERR(("unsupported item (%d)\n", item)); + err = -EOPNOTSUPP; + break; + } + spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + return err; +} + +void wl_cfg80211_dbg_level(u32 level) +{ + /* + * prohibit to change debug level + * by insmod parameter. + * eventually debug level will be configured + * in compile time by using CONFIG_XXX + */ + /* wl_dbg_level = level; */ +} + +static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev) +{ + return wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS; +} + +static __used bool wl_is_ibssstarter(struct wl_priv *wl) +{ + return wl->ibss_starter; +} + +static void wl_rst_ie(struct wl_priv *wl) +{ + struct wl_ie *ie = wl_to_ie(wl); + + ie->offset = 0; +} + +static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v) +{ + struct wl_ie *ie = wl_to_ie(wl); + s32 err = 0; + + if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) { + WL_ERR(("ei crosses buffer boundary\n")); + return -ENOSPC; + } + ie->buf[ie->offset] = t; + ie->buf[ie->offset + 1] = l; + memcpy(&ie->buf[ie->offset + 2], v, l); + ie->offset += l + 2; + + return err; +} + +static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size) +{ + struct wl_ie *ie = wl_to_ie(wl); + s32 err = 0; + + if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) { + WL_ERR(("ei_stream crosses buffer boundary\n")); + return -ENOSPC; + } + memcpy(&ie->buf[ie->offset], ie_stream, ie_size); + ie->offset += ie_size; + + return err; +} + +static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size) +{ + struct wl_ie *ie = wl_to_ie(wl); + s32 err = 0; + + if (unlikely(ie->offset > dst_size)) { + WL_ERR(("dst_size is not enough\n")); + return -ENOSPC; + } + memcpy(dst, &ie->buf[0], ie->offset); + + return err; +} + +static u32 wl_get_ielen(struct wl_priv *wl) +{ + struct wl_ie *ie = wl_to_ie(wl); + + return ie->offset; +} + +static void wl_link_up(struct wl_priv *wl) +{ + wl->link_up = true; +} + +static void wl_link_down(struct wl_priv *wl) +{ + struct wl_connect_info *conn_info = wl_to_conn(wl); + + WL_DBG(("In\n")); + wl->link_up = false; + conn_info->req_ie_len = 0; + conn_info->resp_ie_len = 0; +} + +static unsigned long wl_lock_eq(struct wl_priv *wl) +{ + unsigned long flags; + + spin_lock_irqsave(&wl->eq_lock, flags); + return flags; +} + +static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags) +{ + spin_unlock_irqrestore(&wl->eq_lock, flags); +} + +static void wl_init_eq_lock(struct wl_priv *wl) +{ + spin_lock_init(&wl->eq_lock); +} + +static void wl_delay(u32 ms) +{ + if (in_atomic() || (ms < jiffies_to_msecs(1))) { + mdelay(ms); + } else { + msleep(ms); + } +} + +s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ + struct wl_priv *wl = wlcfg_drv_priv; + struct ether_addr p2pif_addr; + struct ether_addr primary_mac; + + if (!wl->p2p) + return -1; + if (!p2p_is_on(wl)) { + get_primary_mac(wl, &primary_mac); + wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr); + } else { + memcpy(p2pdev_addr->octet, + wl->p2p->dev_addr.octet, ETHER_ADDR_LEN); + } + + return 0; +} +s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct wl_priv *wl; + + wl = wlcfg_drv_priv; + + return wl_cfgp2p_set_p2p_noa(wl, net, buf, len); +} + +s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct wl_priv *wl; + wl = wlcfg_drv_priv; + + return wl_cfgp2p_get_p2p_noa(wl, net, buf, len); +} + +s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ + struct wl_priv *wl; + wl = wlcfg_drv_priv; + + return wl_cfgp2p_set_p2p_ps(wl, net, buf, len); +} + +s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, + enum wl_management_type type) +{ + struct wl_priv *wl; + struct net_device *ndev = NULL; + struct ether_addr primary_mac; + s32 ret = 0; + s32 bssidx = 0; + s32 pktflag = 0; + wl = wlcfg_drv_priv; + + if (wl_get_drv_status(wl, AP_CREATING, net) || + wl_get_drv_status(wl, AP_CREATED, net)) { + ndev = net; + bssidx = 0; + } else if (wl->p2p) { + if (net == wl->p2p_net) { + net = wl_to_prmry_ndev(wl); + } + + if (!wl->p2p->on) { + get_primary_mac(wl, &primary_mac); + wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, + &wl->p2p->int_addr); + /* In case of p2p_listen command, supplicant send remain_on_channel + * without turning on P2P + */ + p2p_on(wl) = true; + ret = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0); + + if (unlikely(ret)) { + goto exit; + } + } + if (net != wl_to_prmry_ndev(wl)) { + if (wl_get_mode_by_netdev(wl, net) == WL_MODE_AP) { + ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); + bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION); + } + } else { + ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); + bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); + } + } + if (ndev != NULL) { + switch (type) { + case WL_BEACON: + pktflag = VNDR_IE_BEACON_FLAG; + break; + case WL_PROBE_RESP: + pktflag = VNDR_IE_PRBRSP_FLAG; + break; + case WL_ASSOC_RESP: + pktflag = VNDR_IE_ASSOCRSP_FLAG; + break; + } + if (pktflag) + ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len); + } +exit: + return ret; +} + +static const struct rfkill_ops wl_rfkill_ops = { + .set_block = wl_rfkill_set +}; + +static int wl_rfkill_set(void *data, bool blocked) +{ + struct wl_priv *wl = (struct wl_priv *)data; + + WL_DBG(("Enter \n")); + WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); + + if (!wl) + return -EINVAL; + + wl->rf_blocked = blocked; + + return 0; +} + +static int wl_setup_rfkill(struct wl_priv *wl, bool setup) +{ + s32 err = 0; + + WL_DBG(("Enter \n")); + if (!wl) + return -EINVAL; + if (setup) { + wl->rfkill = rfkill_alloc("brcmfmac-wifi", + wl_cfg80211_get_parent_dev(), + RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl); + + if (!wl->rfkill) { + err = -ENOMEM; + goto err_out; + } + + err = rfkill_register(wl->rfkill); + + if (err) + rfkill_destroy(wl->rfkill); + } else { + if (!wl->rfkill) { + err = -ENOMEM; + goto err_out; + } + + rfkill_unregister(wl->rfkill); + rfkill_destroy(wl->rfkill); + } + +err_out: + return err; +} + +struct device *wl_cfg80211_get_parent_dev(void) +{ + return cfg80211_parent_dev; +} + +void wl_cfg80211_set_parent_dev(void *dev) +{ + cfg80211_parent_dev = dev; +} + +static void wl_cfg80211_clear_parent_dev(void) +{ + cfg80211_parent_dev = NULL; +} + +static void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac) +{ + wldev_iovar_getbuf_bsscfg(wl_to_prmry_ndev(wl), "cur_etheraddr", NULL, + 0, wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync); + memcpy(mac->octet, wl->ioctl_buf, ETHER_ADDR_LEN); +} + +int wl_cfg80211_do_driver_init(struct net_device *net) +{ + struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net); + + if (!wl || !wl->wdev) + return -EINVAL; + + if (dhd_do_driver_init(wl->wdev->netdev) < 0) + return -1; + + return 0; +} + +void wl_cfg80211_enable_trace(int level) +{ + wl_dbg_level |= WL_DBG_DBG; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h new file mode 100644 index 0000000000000..974770e0e899d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h @@ -0,0 +1,687 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfg80211.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $ + */ + +#ifndef _wl_cfg80211_h_ +#define _wl_cfg80211_h_ + +#include +#include +#include +#include +#include +#include +#include + +#include + +struct wl_conf; +struct wl_iface; +struct wl_priv; +struct wl_security; +struct wl_ibss; + + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i + +#define WL_DBG_NONE 0 +#define WL_DBG_TRACE (1 << 4) +#define WL_DBG_SCAN (1 << 3) +#define WL_DBG_DBG (1 << 2) +#define WL_DBG_INFO (1 << 1) +#define WL_DBG_ERR (1 << 0) + +/* 0 invalidates all debug messages. default is 1 */ +#define WL_DBG_LEVEL 0xFF + +#if defined(DHD_DEBUG) +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#else /* defined(DHD_DEBUG) */ +#define WL_ERR(args) \ +do { \ + if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \ + printk(KERN_INFO "CFG80211-ERROR) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* defined(DHD_DEBUG) */ + +#ifdef WL_INFO +#undef WL_INFO +#endif +#define WL_INFO(args) \ +do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_ERR "CFG80211-INFO) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_SCAN +#undef WL_SCAN +#endif +#define WL_SCAN(args) \ +do { \ + if (wl_dbg_level & WL_DBG_SCAN) { \ + printk(KERN_ERR "CFG80211-SCAN) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_TRACE +#undef WL_TRACE +#endif +#define WL_TRACE(args) \ +do { \ + if (wl_dbg_level & WL_DBG_TRACE) { \ + printk(KERN_ERR "CFG80211-TRACE) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#if (WL_DBG_LEVEL > 0) +#define WL_DBG(args) \ +do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_ERR "CFG80211-DEBUG) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#else /* !(WL_DBG_LEVEL > 0) */ +#define WL_DBG(args) +#endif /* (WL_DBG_LEVEL > 0) */ + + +#define WL_SCAN_RETRY_MAX 3 +#define WL_NUM_PMKIDS_MAX MAXPMKID +#define WL_SCAN_BUF_MAX (1024 * 8) +#define WL_TLV_INFO_MAX 1024 +#define WL_SCAN_IE_LEN_MAX 2048 +#define WL_BSS_INFO_MAX 2048 +#define WL_ASSOC_INFO_MAX 512 +#define WL_IOCTL_LEN_MAX 1024 +#define WL_EXTRA_BUF_MAX 2048 +#define WL_ISCAN_BUF_MAX 2048 +#define WL_ISCAN_TIMER_INTERVAL_MS 3000 +#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1) +#define WL_AP_MAX 256 +#define WL_FILE_NAME_MAX 256 +#define WL_DWELL_TIME 200 +#define WL_MED_DWELL_TIME 400 +#define WL_LONG_DWELL_TIME 1000 +#define IFACE_MAX_CNT 2 + +#define WL_SCAN_TIMER_INTERVAL_MS 8000 /* Scan timeout */ +#define WL_CHANNEL_SYNC_RETRY 3 +#define WL_ACT_FRAME_RETRY 4 + +#define WL_INVALID -1 + + +/* Bring down SCB Timeout to 20secs from 60secs default */ +#ifndef WL_SCB_TIMEOUT +#define WL_SCB_TIMEOUT 20 +#endif + +/* driver status */ +enum wl_status { + WL_STATUS_READY = 0, + WL_STATUS_SCANNING, + WL_STATUS_SCAN_ABORTING, + WL_STATUS_CONNECTING, + WL_STATUS_CONNECTED, + WL_STATUS_DISCONNECTING, + WL_STATUS_AP_CREATING, + WL_STATUS_AP_CREATED, + WL_STATUS_SENDING_ACT_FRM +}; + +/* wi-fi mode */ +enum wl_mode { + WL_MODE_BSS, + WL_MODE_IBSS, + WL_MODE_AP +}; + +/* driver profile list */ +enum wl_prof_list { + WL_PROF_MODE, + WL_PROF_SSID, + WL_PROF_SEC, + WL_PROF_IBSS, + WL_PROF_BAND, + WL_PROF_BSSID, + WL_PROF_ACT, + WL_PROF_BEACONINT, + WL_PROF_DTIMPERIOD +}; + +/* driver iscan state */ +enum wl_iscan_state { + WL_ISCAN_STATE_IDLE, + WL_ISCAN_STATE_SCANING +}; + +/* donlge escan state */ +enum wl_escan_state { + WL_ESCAN_STATE_IDLE, + WL_ESCAN_STATE_SCANING +}; +/* fw downloading status */ +enum wl_fw_status { + WL_FW_LOADING_DONE, + WL_NVRAM_LOADING_DONE +}; + +enum wl_management_type { + WL_BEACON = 0x1, + WL_PROBE_RESP = 0x2, + WL_ASSOC_RESP = 0x4 +}; +/* beacon / probe_response */ +struct beacon_proberesp { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + u8 variable[0]; +} __attribute__ ((packed)); + +/* driver configuration */ +struct wl_conf { + u32 frag_threshold; + u32 rts_threshold; + u32 retry_short; + u32 retry_long; + s32 tx_power; + struct ieee80211_channel channel; +}; + +typedef s32(*EVENT_HANDLER) (struct wl_priv *wl, + struct net_device *ndev, const wl_event_msg_t *e, void *data); + +/* bss inform structure for cfg80211 interface */ +struct wl_cfg80211_bss_info { + u16 band; + u16 channel; + s16 rssi; + u16 frame_len; + u8 frame_buf[1]; +}; + +/* basic structure of scan request */ +struct wl_scan_req { + struct wlc_ssid ssid; +}; + +/* basic structure of information element */ +struct wl_ie { + u16 offset; + u8 buf[WL_TLV_INFO_MAX]; +}; + +/* event queue for cfg80211 main event */ +struct wl_event_q { + struct list_head eq_list; + u32 etype; + wl_event_msg_t emsg; + s8 edata[1]; +}; + +/* security information with currently associated ap */ +struct wl_security { + u32 wpa_versions; + u32 auth_type; + u32 cipher_pairwise; + u32 cipher_group; + u32 wpa_auth; +}; + +/* ibss information for currently joined ibss network */ +struct wl_ibss { + u8 beacon_interval; /* in millisecond */ + u8 atim; /* in millisecond */ + s8 join_only; + u8 band; + u8 channel; +}; + +/* wl driver profile */ +struct wl_profile { + u32 mode; + s32 band; + struct wlc_ssid ssid; + struct wl_security sec; + struct wl_ibss ibss; + u8 bssid[ETHER_ADDR_LEN]; + u16 beacon_interval; + u8 dtim_period; + bool active; +}; + +struct net_info { + struct net_device *ndev; + struct wireless_dev *wdev; + struct wl_profile profile; + s32 mode; + unsigned long sme_state; + struct list_head list; /* list of all net_info structure */ +}; +typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl); + +/* iscan controller */ +struct wl_iscan_ctrl { + struct net_device *dev; + struct timer_list timer; + u32 timer_ms; + u32 timer_on; + s32 state; + struct task_struct *tsk; + struct semaphore sync; + ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST]; + void *data; + s8 ioctl_buf[WLC_IOCTL_SMLEN]; + s8 scan_buf[WL_ISCAN_BUF_MAX]; +}; + +/* association inform */ +#define MAX_REQ_LINE 1024 +struct wl_connect_info { + u8 req_ie[MAX_REQ_LINE]; + s32 req_ie_len; + u8 resp_ie[MAX_REQ_LINE]; + s32 resp_ie_len; +}; + +/* firmware /nvram downloading controller */ +struct wl_fw_ctrl { + const struct firmware *fw_entry; + unsigned long status; + u32 ptr; + s8 fw_name[WL_FILE_NAME_MAX]; + s8 nvram_name[WL_FILE_NAME_MAX]; +}; + +/* assoc ie length */ +struct wl_assoc_ielen { + u32 req_len; + u32 resp_len; +}; + +/* wpa2 pmk list */ +struct wl_pmk_list { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID - 1]; +}; + + +#define ESCAN_BUF_SIZE (64 * 1024) + +struct escan_info { + u32 escan_state; + u8 escan_buf[ESCAN_BUF_SIZE]; + struct wiphy *wiphy; + struct net_device *ndev; +}; + +struct ap_info { +/* Structure to hold WPS, WPA IEs for a AP */ + u8 probe_res_ie[IE_MAX_LEN]; + u8 beacon_ie[IE_MAX_LEN]; + u32 probe_res_ie_len; + u32 beacon_ie_len; + u8 *wpa_ie; + u8 *rsn_ie; + u8 *wps_ie; + bool security_mode; +}; +struct btcoex_info { + struct timer_list timer; + u32 timer_ms; + u32 timer_on; + u32 ts_dhcp_start; /* ms ts ecord time stats */ + u32 ts_dhcp_ok; /* ms ts ecord time stats */ + bool dhcp_done; /* flag, indicates that host done with + * dhcp before t1/t2 expiration + */ + s32 bt_state; + struct work_struct work; + struct net_device *dev; +}; + +struct sta_info { + /* Structure to hold WPS IE for a STA */ + u8 probe_req_ie[IE_MAX_LEN]; + u8 assoc_req_ie[IE_MAX_LEN]; + u32 probe_req_ie_len; + u32 assoc_req_ie_len; +}; + +struct afx_hdl { + wl_af_params_t *pending_tx_act_frm; + struct ether_addr pending_tx_dst_addr; + struct net_device *dev; + struct work_struct work; + u32 bssidx; + u32 retry; + s32 peer_chan; + bool ack_recv; +}; + +/* private data of cfg80211 interface */ +typedef struct wl_priv { + struct wireless_dev *wdev; /* representing wl cfg80211 device */ + + struct wireless_dev *p2p_wdev; /* representing wl cfg80211 device for P2P */ + struct net_device *p2p_net; /* reference to p2p0 interface */ + + struct wl_conf *conf; + struct cfg80211_scan_request *scan_request; /* scan request object */ + EVENT_HANDLER evt_handler[WLC_E_LAST]; + struct list_head eq_list; /* used for event queue */ + struct list_head net_list; /* used for struct net_info */ + spinlock_t eq_lock; /* for event queue synchronization */ + spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */ + struct completion act_frm_scan; + struct mutex usr_sync; /* maily for up/down synchronization */ + struct wl_scan_results *bss_list; + struct wl_scan_results *scan_results; + + /* scan request object for internal purpose */ + struct wl_scan_req *scan_req_int; + /* information element object for internal purpose */ + struct wl_ie ie; + struct wl_iscan_ctrl *iscan; /* iscan controller */ + + /* association information container */ + struct wl_connect_info conn_info; + + struct wl_pmk_list *pmk_list; /* wpa2 pmk list */ + tsk_ctl_t event_tsk; /* task of main event handler thread */ + void *pub; + u32 iface_cnt; + u32 channel; /* current channel */ + bool iscan_on; /* iscan on/off switch */ + bool iscan_kickstart; /* indicate iscan already started */ + bool escan_on; /* escan on/off switch */ + struct escan_info escan_info; /* escan information */ + bool active_scan; /* current scan mode */ + bool ibss_starter; /* indicates this sta is ibss starter */ + bool link_up; /* link/connection up flag */ + + /* indicate whether chip to support power save mode */ + bool pwr_save; + bool roam_on; /* on/off switch for self-roaming */ + bool scan_tried; /* indicates if first scan attempted */ + u8 *ioctl_buf; /* ioctl buffer */ + struct mutex ioctl_buf_sync; + u8 *escan_ioctl_buf; + u8 *extra_buf; /* maily to grab assoc information */ + struct dentry *debugfsdir; + struct rfkill *rfkill; + bool rf_blocked; + struct ieee80211_channel remain_on_chan; + enum nl80211_channel_type remain_on_chan_type; + u64 send_action_id; + u64 last_roc_id; + wait_queue_head_t netif_change_event; + struct afx_hdl *afx_hdl; + struct ap_info *ap_info; + struct sta_info *sta_info; + struct p2p_info *p2p; + bool p2p_supported; + struct btcoex_info *btcoex_info; + struct timer_list scan_timeout; /* Timer for catch scan event timeout */ +#ifdef WL_SCHED_SCAN + struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */ +#endif /* WL_SCHED_SCAN */ + bool sched_scan_running; /* scheduled scan req status */ + u16 hostapd_chan; /* remember chan requested by framework for hostapd */ + u16 deauth_reason; /* Place holder to save deauth/disassoc reasons */ + u16 scan_busy_count; + struct work_struct work_scan_timeout; +} wl_priv_t; + + +static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) +{ + return bss = bss ? + (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info; +} + +static inline s32 +wl_alloc_netinfo(struct wl_priv *wl, struct net_device *ndev, + struct wireless_dev * wdev, s32 mode) +{ + struct net_info *_net_info; + s32 err = 0; + if (wl->iface_cnt == IFACE_MAX_CNT) + return -ENOMEM; + _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL); + if (!_net_info) + err = -ENOMEM; + else { + _net_info->mode = mode; + _net_info->ndev = ndev; + _net_info->wdev = wdev; + wl->iface_cnt++; + list_add(&_net_info->list, &wl->net_list); + } + return err; +} + +static inline void +wl_dealloc_netinfo(struct wl_priv *wl, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + list_del(&_net_info->list); + wl->iface_cnt--; + if (_net_info->wdev) { + kfree(_net_info->wdev); + ndev->ieee80211_ptr = NULL; + } + kfree(_net_info); + } + } +} + +static inline void +wl_delete_all_netinfo(struct wl_priv *wl) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + list_del(&_net_info->list); + if (_net_info->wdev) + kfree(_net_info->wdev); + kfree(_net_info); + } + wl->iface_cnt = 0; +} + +static inline bool +wl_get_status_all(struct wl_priv *wl, s32 status) + +{ + struct net_info *_net_info, *next; + u32 cnt = 0; + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (_net_info->ndev && + test_bit(status, &_net_info->sme_state)) + cnt++; + } + return cnt? true: false; +} + +static inline void +wl_set_status_by_netdev(struct wl_priv *wl, s32 status, + struct net_device *ndev, u32 op) +{ + + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + switch (op) { + case 1: + set_bit(status, &_net_info->sme_state); + break; + case 2: + clear_bit(status, &_net_info->sme_state); + break; + case 4: + change_bit(status, &_net_info->sme_state); + break; + } + } + + } +} + +static inline u32 +wl_get_status_by_netdev(struct wl_priv *wl, s32 status, + struct net_device *ndev) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + return test_bit(status, &_net_info->sme_state); + } + return 0; +} + +static inline s32 +wl_get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + return _net_info->mode; + } + return -1; +} + +static inline void +wl_set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev, + s32 mode) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + _net_info->mode = mode; + } +} + +static inline struct wl_profile * +wl_get_profile_by_netdev(struct wl_priv *wl, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + + list_for_each_entry_safe(_net_info, next, &wl->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + return &_net_info->profile; + } + return NULL; +} +#define wl_to_wiphy(w) (w->wdev->wiphy) +#define wl_to_prmry_ndev(w) (w->wdev->netdev) +#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr)) +#define wl_to_sr(w) (w->scan_req_int) +#define wl_to_ie(w) (&w->ie) +#define iscan_to_wl(i) ((struct wl_priv *)(i->data)) +#define wl_to_iscan(w) (w->iscan) +#define wl_to_conn(w) (&w->conn_info) +#define wiphy_from_scan(w) (w->escan_info.wiphy) +#define wl_get_drv_status_all(wl, stat) \ + (wl_get_status_all(wl, WL_STATUS_ ## stat)) +#define wl_get_drv_status(wl, stat, ndev) \ + (wl_get_status_by_netdev(wl, WL_STATUS_ ## stat, ndev)) +#define wl_set_drv_status(wl, stat, ndev) \ + (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 1)) +#define wl_clr_drv_status(wl, stat, ndev) \ + (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 2)) +#define wl_chg_drv_status(wl, stat, ndev) \ + (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 4)) + +#define for_each_bss(list, bss, __i) \ + for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss)) + +#define for_each_ndev(wl, iter, next) \ + list_for_each_entry_safe(iter, next, &wl->net_list, list) + + +/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0. + * In addtion to that, wpa_version is WPA_VERSION_1 + */ +#define is_wps_conn(_sme) \ + ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \ + (!_sme->crypto.n_ciphers_pairwise) && \ + (!_sme->crypto.cipher_group)) +extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data); +extern s32 wl_cfg80211_attach_post(struct net_device *ndev); +extern void wl_cfg80211_detach(void *para); + +extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e, + void *data); +void wl_cfg80211_set_parent_dev(void *dev); +struct device *wl_cfg80211_get_parent_dev(void); + +extern s32 wl_cfg80211_up(void *para); +extern s32 wl_cfg80211_down(void *para); +extern s32 wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx, + void* _net_attach); +extern s32 wl_cfg80211_ifdel_ops(struct net_device *net); +extern s32 wl_cfg80211_notify_ifdel(void); +extern s32 wl_cfg80211_is_progress_ifadd(void); +extern s32 wl_cfg80211_is_progress_ifchange(void); +extern s32 wl_cfg80211_is_progress_ifadd(void); +extern s32 wl_cfg80211_notify_ifchange(void); +extern void wl_cfg80211_dbg_level(u32 level); +extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, + enum wl_management_type type); +extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len); +extern int wl_cfg80211_hang(struct net_device *dev, u16 reason); +extern s32 wl_mode_to_nl80211_iftype(s32 mode); +int wl_cfg80211_do_driver_init(struct net_device *net); +void wl_cfg80211_enable_trace(int level); +extern s32 wl_update_wiphybands(struct wl_priv *wl); +extern s32 wl_cfg80211_if_is_group_owner(void); +#endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c new file mode 100644 index 0000000000000..f06ae50b12ab5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c @@ -0,0 +1,2020 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfgp2p.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +static s8 scanparambuf[WLC_IOCTL_SMLEN]; + +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type); + +static s32 +wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete); + +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd); +static int wl_cfgp2p_if_open(struct net_device *net); +static int wl_cfgp2p_if_stop(struct net_device *net); +static s32 wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev, + bool notify); + +static const struct net_device_ops wl_cfgp2p_if_ops = { + .ndo_open = wl_cfgp2p_if_open, + .ndo_stop = wl_cfgp2p_if_stop, + .ndo_do_ioctl = wl_cfgp2p_do_ioctl, + .ndo_start_xmit = wl_cfgp2p_start_xmit, +}; + +bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + + if (frame == NULL) + return false; + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1) + return false; + + if (pact_frm->category == P2P_PUB_AF_CATEGORY && + pact_frm->action == P2P_PUB_AF_ACTION && + pact_frm->oui_type == P2P_VER && + memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len) +{ + wifi_p2p_action_frame_t *act_frm; + + if (frame == NULL) + return false; + act_frm = (wifi_p2p_action_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_action_frame_t) -1) + return false; + + if (act_frm->category == P2P_AF_CATEGORY && + act_frm->type == P2P_VER && + memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return true; + else + return false; + +} + +void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + wifi_p2p_action_frame_t *act_frm; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + if (!frame || frame_len <= 2) + return; + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + switch (pact_frm->subtype) { + case P2P_PAF_GON_REQ: + CFGP2P_DBG(("%s P2P Group Owner Negotiation Req Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_GON_RSP: + CFGP2P_DBG(("%s P2P Group Owner Negotiation Rsp Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_GON_CONF: + CFGP2P_DBG(("%s P2P Group Owner Negotiation Confirm Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_INVITE_REQ: + CFGP2P_DBG(("%s P2P Invitation Request Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_INVITE_RSP: + CFGP2P_DBG(("%s P2P Invitation Response Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_DEVDIS_REQ: + CFGP2P_DBG(("%s P2P Device Discoverability Request Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_DEVDIS_RSP: + CFGP2P_DBG(("%s P2P Device Discoverability Response Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_PROVDIS_REQ: + CFGP2P_DBG(("%s P2P Provision Discovery Request Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_PAF_PROVDIS_RSP: + CFGP2P_DBG(("%s P2P Provision Discovery Response Frame\n", + (tx)? "TX": "RX")); + break; + default: + CFGP2P_DBG(("%s Unknown P2P Public Action Frame\n", + (tx)? "TX": "RX")); + + } + + } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) { + act_frm = (wifi_p2p_action_frame_t *)frame; + switch (act_frm->subtype) { + case P2P_AF_NOTICE_OF_ABSENCE: + CFGP2P_DBG(("%s P2P Notice of Absence Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_AF_PRESENCE_REQ: + CFGP2P_DBG(("%s P2P Presence Request Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_AF_PRESENCE_RSP: + CFGP2P_DBG(("%s P2P Presence Response Frame\n", + (tx)? "TX": "RX")); + break; + case P2P_AF_GO_DISC_REQ: + CFGP2P_DBG(("%s P2P Discoverability Request Frame\n", + (tx)? "TX": "RX")); + break; + default: + CFGP2P_DBG(("%s Unknown P2P Action Frame\n", + (tx)? "TX": "RX")); + } + + } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) { + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + switch (sd_act_frm->action) { + case P2PSD_ACTION_ID_GAS_IREQ: + CFGP2P_DBG(("%s P2P GAS Initial Request\n", + (tx)? "TX" : "RX")); + break; + case P2PSD_ACTION_ID_GAS_IRESP: + CFGP2P_DBG(("%s P2P GAS Initial Response\n", + (tx)? "TX" : "RX")); + break; + case P2PSD_ACTION_ID_GAS_CREQ: + CFGP2P_DBG(("%s P2P GAS Comback Request\n", + (tx)? "TX" : "RX")); + break; + case P2PSD_ACTION_ID_GAS_CRESP: + CFGP2P_DBG(("%s P2P GAS Comback Response\n", + (tx)? "TX" : "RX")); + break; + default: + CFGP2P_DBG(("%s Unknown P2P GAS Frame\n", + (tx)? "TX" : "RX")); + } + } +} + +/* + * Initialize variables related to P2P + * + */ +s32 +wl_cfgp2p_init_priv(struct wl_priv *wl) +{ + if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) { + CFGP2P_ERR(("struct p2p_info allocation failed\n")); + return -ENOMEM; + } +#define INIT_IE(IE_TYPE, BSS_TYPE) \ + do { \ + memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ + sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ + wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ + } while (0); + + INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY); + INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY); + INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY); + INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY); + INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY); + INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE); + INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE); + INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE); + INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE); + INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE); + INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION); + INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION); + INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION); + INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION); + INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION); +#undef INIT_IE + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl); + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0; + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL; + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0; + return BCME_OK; + +} +/* + * Deinitialize variables related to P2P + * + */ +void +wl_cfgp2p_deinit_priv(struct wl_priv *wl) +{ + CFGP2P_DBG(("In\n")); + + if (wl->p2p) { + kfree(wl->p2p); + wl->p2p = NULL; + } + wl->p2p_supported = 0; +} +/* + * Set P2P functions into firmware + */ +s32 +wl_cfgp2p_set_firm_p2p(struct wl_priv *wl) +{ + struct net_device *ndev = wl_to_prmry_ndev(wl); + struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } }; + s32 ret = BCME_OK; + s32 val = 0; + /* Do we have to check whether APSTA is enabled or not ? */ + wldev_iovar_getint(ndev, "apsta", &val); + if (val == 0) { + val = 1; + wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true); + wldev_iovar_setint(ndev, "apsta", val); + wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true); + } + val = 1; + /* Disable firmware roaming for P2P */ + wldev_iovar_setint(ndev, "roam_off", val); + /* In case of COB type, firmware has default mac address + * After Initializing firmware, we have to set current mac address to + * firmware for P2P device address + */ + ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr, + sizeof(null_eth_addr), wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync); + if (ret && ret != BCME_UNSUPPORTED) { + CFGP2P_ERR(("failed to update device address ret %d\n", ret)); + } + return ret; +} + +/* Create a new P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to create + * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT + * @chspec : chspec to use if creating a GO BSS. + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, + chanspec_t chspec) +{ + wl_p2p_if_t ifreq; + s32 err; + struct net_device *ndev = wl_to_prmry_ndev(wl); + u32 scb_timeout = WL_SCB_TIMEOUT; + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_DBG(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n", + ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2], + ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5], + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); + + err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + + if (unlikely(err < 0)) { + printk("'wl p2p_ifadd' error %d\n", err); + } else if (if_type == WL_P2P_IF_GO) { + err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true); + if (unlikely(err < 0)) + printk("'wl scb_timeout' error %d\n", err); + } + + return err; +} + +/* Delete a P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to create + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac) +{ + s32 ret; + struct net_device *netdev = wl_to_prmry_ndev(wl); + + CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->ifindex, mac->octet[0], mac->octet[1], mac->octet[2], + mac->octet[3], mac->octet[4], mac->octet[5])); + ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + if (unlikely(ret < 0)) { + printk("'wl p2p_ifdel' error %d\n", ret); + } + return ret; +} + +/* Change a P2P Role. + * Parameters: + * @mac : MAC address of the BSS to change a role + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, + chanspec_t chspec) +{ + wl_p2p_if_t ifreq; + s32 err; + u32 scb_timeout = WL_SCB_TIMEOUT; + struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n", + ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2], + ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5], + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); + + err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + + if (unlikely(err < 0)) { + printk("'wl p2p_ifupd' error %d\n", err); + } else if (if_type == WL_P2P_IF_GO) { + err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true); + if (unlikely(err < 0)) + printk("'wl scb_timeout' error %d\n", err); + } + return err; +} + + +/* Get the index of a created P2P BSS. + * Parameters: + * @mac : MAC address of the created BSS + * @index : output: index of created BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index) +{ + s32 ret; + u8 getbuf[64]; + struct net_device *dev = wl_to_prmry_ndev(wl); + + CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n", + mac->octet[0], mac->octet[1], mac->octet[2], + mac->octet[3], mac->octet[4], mac->octet[5])); + + ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf, + sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY), NULL); + + if (ret == 0) { + memcpy(index, getbuf, sizeof(index)); + CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index)); + } + + return ret; +} + +static s32 +wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on) +{ + s32 ret = BCME_OK; + struct net_device *ndev = wl_to_prmry_ndev(wl); + CFGP2P_DBG(("enter\n")); + + ret = wldev_iovar_setint(ndev, "p2p_disc", on); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret)); + } + + return ret; +} + +/* Set the WL driver's P2P mode. + * Parameters : + * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}. + * @channel : the channel to listen + * @listen_ms : the time (milli seconds) to wait + * @bssidx : bss index for BSSCFG + * Returns 0 if success + */ + +s32 +wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx) +{ + wl_p2p_disc_st_t discovery_mode; + s32 ret; + struct net_device *dev; + CFGP2P_DBG(("enter\n")); + + if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) { + CFGP2P_ERR((" %d index out of range\n", bssidx)); + return -1; + } + + dev = wl_to_p2p_bss_ndev(wl, bssidx); + if (unlikely(dev == NULL)) { + CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx)); + return BCME_NOTFOUND; + } + + /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */ + discovery_mode.state = mode; + discovery_mode.chspec = CH20MHZ_CHSPEC(channel); + discovery_mode.dwell = listen_ms; + ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode, + sizeof(discovery_mode), wl->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &wl->ioctl_buf_sync); + + return ret; +} + +/* Get the index of the P2P Discovery BSS */ +static s32 +wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index) +{ + s32 ret; + struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); + + ret = wldev_iovar_getint(dev, "p2p_dev", index); + CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret)); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("'p2p_dev' error %d\n", ret)); + return ret; + } + return ret; +} + +s32 +wl_cfgp2p_init_discovery(struct wl_priv *wl) +{ + + s32 index = 0; + s32 ret = BCME_OK; + + CFGP2P_DBG(("enter\n")); + + if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) { + CFGP2P_ERR(("do nothing, already initialized\n")); + return ret; + } + + ret = wl_cfgp2p_set_discovery(wl, 1); + if (ret < 0) { + CFGP2P_ERR(("set discover error\n")); + return ret; + } + /* Enable P2P Discovery in the WL Driver */ + ret = wl_cfgp2p_get_disc_idx(wl, &index); + + if (ret < 0) { + return ret; + } + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index; + + /* Set the initial discovery state to SCAN */ + ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + + if (unlikely(ret != 0)) { + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + wl_cfgp2p_set_discovery(wl, 0); + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; + return 0; + } + return ret; +} + +/* Deinitialize P2P Discovery + * Parameters : + * @wl : wl_private data + * Returns 0 if succes + */ +static s32 +wl_cfgp2p_deinit_discovery(struct wl_priv *wl) +{ + s32 ret = BCME_OK; + CFGP2P_DBG(("enter\n")); + + if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) { + CFGP2P_ERR(("do nothing, not initialized\n")); + return -1; + } + /* Set the discovery state to SCAN */ + ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */ + ret = wl_cfgp2p_set_discovery(wl, 0); + + /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver + * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery + * BSS. + */ + + /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we + * have no discovery BSS. + */ + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; + + return ret; + +} +/* Enable P2P Discovery + * Parameters: + * @wl : wl_private data + * @ie : probe request ie (WPS IE + P2P IE) + * @ie_len : probe request ie length + * Returns 0 if success. + */ +s32 +wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, + const u8 *ie, u32 ie_len) +{ + s32 ret = BCME_OK; + if (wl_get_p2p_status(wl, DISCOVERY_ON)) { + CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n")); + goto set_ie; + } + + wl_set_p2p_status(wl, DISCOVERY_ON); + + CFGP2P_DBG(("enter\n")); + + ret = wl_cfgp2p_init_discovery(wl); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" init discovery error %d\n", ret)); + goto exit; + } + /* Set wsec to any non-zero value in the discovery bsscfg to ensure our + * P2P probe responses have the privacy bit set in the 802.11 WPA IE. + * Some peer devices may not initiate WPS with us if this bit is not set. + */ + ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), + "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" wsec error %d\n", ret)); + } +set_ie: + ret = wl_cfgp2p_set_management_ie(wl, dev, + wl_cfgp2p_find_idx(wl, dev), + VNDR_IE_PRBREQ_FLAG, ie, ie_len); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("set probreq ie occurs error %d\n", ret)); + goto exit; + } +exit: + return ret; +} + +/* Disable P2P Discovery + * Parameters: + * @wl : wl_private_data + * Returns 0 if success. + */ +s32 +wl_cfgp2p_disable_discovery(struct wl_priv *wl) +{ + s32 ret = BCME_OK; + CFGP2P_DBG((" enter\n")); + wl_clr_p2p_status(wl, DISCOVERY_ON); + + if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) { + CFGP2P_ERR((" do nothing, not initialized\n")); + goto exit; + } + + ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + + if (unlikely(ret < 0)) { + + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + } + /* Do a scan abort to stop the driver's scan engine in case it is still + * waiting out an action frame tx dwell time. + */ +#ifdef NOT_YET + if (wl_get_p2p_status(wl, SCANNING)) { + p2pwlu_scan_abort(hdl, FALSE); + } +#endif + wl_clr_p2p_status(wl, DISCOVERY_ON); + ret = wl_cfgp2p_deinit_discovery(wl); + +exit: + return ret; +} + +s32 +wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, + u32 num_chans, u16 *channels, + s32 search_state, u16 action, u32 bssidx) +{ + s32 ret = BCME_OK; + s32 memsize; + s32 eparams_size; + u32 i; + s8 *memblk; + wl_p2p_scan_t *p2p_params; + wl_escan_params_t *eparams; + wlc_ssid_t ssid; + /* Scan parameters */ +#define P2PAPI_SCAN_NPROBES 1 +#define P2PAPI_SCAN_DWELL_TIME_MS 50 +#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40 +#define P2PAPI_SCAN_HOME_TIME_MS 60 + struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); + wl_set_p2p_status(wl, SCANNING); + /* Allocate scan params which need space for 3 channels and 0 ssids */ + eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE + + OFFSETOF(wl_escan_params_t, params)) + + num_chans * sizeof(eparams->params.channel_list[0]); + + memsize = sizeof(wl_p2p_scan_t) + eparams_size; + memblk = scanparambuf; + if (memsize > sizeof(scanparambuf)) { + CFGP2P_ERR((" scanpar buf too small (%u > %u)\n", + memsize, sizeof(scanparambuf))); + return -1; + } + memset(memblk, 0, memsize); + memset(wl->ioctl_buf, 0, WLC_IOCTL_MAXLEN); + if (search_state == WL_P2P_DISC_ST_SEARCH) { + /* + * If we in SEARCH STATE, we don't need to set SSID explictly + * because dongle use P2P WILDCARD internally by default + */ + wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx); + ssid.SSID_len = htod32(0); + + } else if (search_state == WL_P2P_DISC_ST_SCAN) { + /* SCAN STATE 802.11 SCAN + * WFD Supplicant has p2p_find command with (type=progressive, type= full) + * So if P2P_find command with type=progressive, + * we have to set ssid to P2P WILDCARD because + * we just do broadcast scan unless setting SSID + */ + strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID); + ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN); + wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx); + } + + + /* Fill in the P2P scan structure at the start of the iovar param block */ + p2p_params = (wl_p2p_scan_t*) memblk; + p2p_params->type = 'E'; + /* Fill in the Scan structure that follows the P2P scan structure */ + eparams = (wl_escan_params_t*) (p2p_params + 1); + eparams->params.bss_type = DOT11_BSSTYPE_ANY; + if (active) + eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE; + else + eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE; + + memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN); + if (ssid.SSID_len) + memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t)); + + eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES); + eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS); + if (wl_get_drv_status_all(wl, CONNECTED)) + eparams->params.active_time = htod32(-1); + else if (num_chans == 3) + eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS); + else + eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS); + eparams->params.passive_time = htod32(-1); + eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + for (i = 0; i < num_chans; i++) { + eparams->params.channel_list[i] = htodchanspec(channels[i]); + } + eparams->version = htod32(ESCAN_REQ_VERSION); + eparams->action = htod16(action); + eparams->sync_id = htod16(0x1234); + CFGP2P_INFO(("SCAN CHANNELS : ")); + + for (i = 0; i < num_chans; i++) { + if (i == 0) CFGP2P_INFO(("%d", channels[i])); + else CFGP2P_INFO((",%d", channels[i])); + } + + CFGP2P_INFO(("\n")); + + ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan", + memblk, memsize, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + return ret; +} + +/* search function to reach at common channel to send action frame + * Parameters: + * @wl : wl_private data + * @ndev : net device for bssidx + * @bssidx : bssidx for BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev, + s32 bssidx, s32 channel) +{ + s32 ret = 0; + u32 chan_cnt = 0; + u16 *default_chan_list = NULL; + if (!p2p_is_on(wl)) + return -BCME_ERROR; + CFGP2P_ERR((" Enter\n")); + if (bssidx == P2PAPI_BSSCFG_PRIMARY) + bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); + if (channel) + chan_cnt = 1; + else + chan_cnt = SOCIAL_CHAN_CNT; + default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL); + if (default_chan_list == NULL) { + CFGP2P_ERR(("channel list allocation failed \n")); + ret = -ENOMEM; + goto exit; + } + if (channel) { + default_chan_list[0] = channel; + } else { + default_chan_list[0] = SOCIAL_CHAN_1; + default_chan_list[1] = SOCIAL_CHAN_2; + default_chan_list[2] = SOCIAL_CHAN_3; + } + ret = wl_cfgp2p_escan(wl, ndev, true, SOCIAL_CHAN_CNT, + default_chan_list, WL_P2P_DISC_ST_SEARCH, + WL_SCAN_ACTION_START, bssidx); + kfree(default_chan_list); +exit: + return ret; +} + +/* Check whether pointed-to IE looks like WPA. */ +#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE) +/* Check whether pointed-to IE looks like WPS. */ +#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE) +/* Check whether the given IE looks like WFA P2P IE. */ +#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P) + /* Check whether the given IE looks like WFA WFDisplay IE. */ +#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */ +#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD) + +/* Delete and Set a management vndr ie to firmware + * Parameters: + * @wl : wl_private data + * @ndev : net device for bssidx + * @bssidx : bssidx for BSS + * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG, + * VNDR_IE_ASSOCREQ_FLAG) + * @ie : VNDR IE (such as P2P IE , WPS IE) + * @ie_len : VNDR IE Length + * Returns 0 if success. + */ + +s32 +wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, + s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) +{ + /* Vendor-specific Information Element ID */ +#define VNDR_SPEC_ELEMENT_ID 0xdd + s32 ret = BCME_OK; + u32 pos; + u8 *ie_buf; + u8 *mgmt_ie_buf = NULL; + u32 mgmt_ie_buf_len = 0; + u32 *mgmt_ie_len = 0; + u8 ie_id, ie_len; + u8 delete = 0; +#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie) +#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len) + if (p2p_is_on(wl) && bssidx != -1) { + if (bssidx == P2PAPI_BSSCFG_PRIMARY) + bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); + switch (pktflag) { + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = IE_TYPE(probe_req, bssidx); + mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx); + mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx)); + break; + case VNDR_IE_PRBRSP_FLAG : + mgmt_ie_buf = IE_TYPE(probe_res, bssidx); + mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx); + mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx)); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = IE_TYPE(assoc_req, bssidx); + mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx); + mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx)); + break; + case VNDR_IE_ASSOCRSP_FLAG : + mgmt_ie_buf = IE_TYPE(assoc_res, bssidx); + mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx); + mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx)); + break; + case VNDR_IE_BEACON_FLAG : + mgmt_ie_buf = IE_TYPE(beacon, bssidx); + mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx); + mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx)); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + CFGP2P_ERR(("not suitable type\n")); + return -1; + } + } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) { + switch (pktflag) { + case VNDR_IE_PRBRSP_FLAG : + mgmt_ie_buf = wl->ap_info->probe_res_ie; + mgmt_ie_len = &wl->ap_info->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie); + break; + case VNDR_IE_BEACON_FLAG : + mgmt_ie_buf = wl->ap_info->beacon_ie; + mgmt_ie_len = &wl->ap_info->beacon_ie_len; + mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + CFGP2P_ERR(("not suitable type\n")); + return -1; + } + bssidx = 0; + } else if (bssidx == -1 && wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) { + switch (pktflag) { + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = wl->sta_info->probe_req_ie; + mgmt_ie_len = &wl->sta_info->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = wl->sta_info->assoc_req_ie; + mgmt_ie_len = &wl->sta_info->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + CFGP2P_ERR(("not suitable type\n")); + return -1; + } + bssidx = 0; + } else { + CFGP2P_ERR(("not suitable type\n")); + return -1; + } + + if (vndr_ie_len > mgmt_ie_buf_len) { + CFGP2P_ERR(("extra IE size too big\n")); + ret = -ENOMEM; + } else { + if (mgmt_ie_buf != NULL) { + if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) && + (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) { + CFGP2P_INFO(("Previous mgmt IE is equals to current IE")); + goto exit; + } + pos = 0; + delete = 1; + ie_buf = (u8 *) mgmt_ie_buf; + while (pos < *mgmt_ie_len) { + ie_id = ie_buf[pos++]; + ie_len = ie_buf[pos++]; + if ((ie_id == DOT11_MNG_VS_ID) && + (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) { + CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :" + "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], + ie_buf[pos+1], ie_buf[pos+2])); + ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag, + ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, + ie_len-3, delete); + } + pos += ie_len; + } + + } + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (vndr_ie && vndr_ie_len) { + /* save the current IE in wl struct */ + memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len); + *mgmt_ie_len = vndr_ie_len; + pos = 0; + ie_buf = (u8 *) vndr_ie; + delete = 0; + while (pos < vndr_ie_len) { + ie_id = ie_buf[pos++]; + ie_len = ie_buf[pos++]; + if ((ie_id == DOT11_MNG_VS_ID) && + (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) { + CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :" + "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], + ie_buf[pos+1], ie_buf[pos+2])); + ret = wl_cfgp2p_vndr_ie(wl, ndev, bssidx, pktflag, + ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, + ie_len-3, delete); + } + pos += ie_len; + } + } + } +#undef IE_TYPE +#undef IE_TYPE_LEN +exit: + return ret; +} + +/* Clear the manament IE buffer of BSSCFG + * Parameters: + * @wl : wl_private data + * @bssidx : bssidx for BSS + * + * Returns 0 if success. + */ +s32 +wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx) +{ +#define INIT_IE(IE_TYPE, BSS_TYPE) \ + do { \ + memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ + sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ + wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ + } while (0); + if (bssidx < 0) { + CFGP2P_ERR(("invalid bssidx\n")); + return BCME_BADARG; + } + INIT_IE(probe_req, bssidx); + INIT_IE(probe_res, bssidx); + INIT_IE(assoc_req, bssidx); + INIT_IE(assoc_res, bssidx); + INIT_IE(beacon, bssidx); + return BCME_OK; +} + + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return TRUE; + } + + if (tlvs == NULL) + return FALSE; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return FALSE; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) { + return (wifi_p2p_ie_t *)ie; + } + } + return NULL; +} + +wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) { + return (wifi_wfd_ie_t *)ie; + } + } + return NULL; +} + +static s32 +wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete) +{ + s32 err = BCME_OK; + s32 buf_len; + s32 iecount; + + vndr_ie_setbuf_t *ie_setbuf; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) { + CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag)); + return -1; + } + + buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1; + ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); + + CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len)); + if (!ie_setbuf) { + + CFGP2P_ERR(("Error allocating buffer for IE\n")); + return -ENOMEM; + } + if (delete) + strcpy(ie_setbuf->cmd, "del"); + else + strcpy(ie_setbuf->cmd, "add"); + /* Buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int)); + pktflag = htod32(pktflag); + memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag, + &pktflag, sizeof(uint32)); + ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id; + ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len + = (uchar)(data_len + VNDR_IE_MIN_LEN); + memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3); + memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len); + err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len, + wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + + CFGP2P_INFO(("vndr_ie iovar returns %d\n", err)); + kfree(ie_setbuf); + return err; +} + +/* + * Search the bssidx based on dev argument + * Parameters: + * @wl : wl_private data + * @ndev : net device to search bssidx + * Returns bssidx for ndev + */ +s32 +wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev) +{ + u32 i; + s32 index = -1; + + if (ndev == NULL) { + CFGP2P_ERR((" ndev is NULL\n")); + goto exit; + } + if (!wl->p2p_supported) { + return P2PAPI_BSSCFG_PRIMARY; + } + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + if (ndev == wl_to_p2p_bss_ndev(wl, i)) { + index = wl_to_p2p_bss_bssidx(wl, i); + break; + } + } + if (index == -1) + return P2PAPI_BSSCFG_PRIMARY; +exit: + return index; +} +/* + * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE + */ +s32 +wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + + CFGP2P_DBG((" Enter\n")); + + /* If p2p_info is de-initialized, do nothing */ + if (!wl->p2p) + return ret; + + if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) { + wl_set_p2p_status(wl, LISTEN_EXPIRED); + if (timer_pending(&wl->p2p->listen_timer)) { + del_timer_sync(&wl->p2p->listen_timer); + } + cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id, &wl->remain_on_chan, + wl->remain_on_chan_type, GFP_KERNEL); + } else + wl_clr_p2p_status(wl, LISTEN_EXPIRED); + + return ret; + +} + +/* + * Timer expire callback function for LISTEN + * We can't report cfg80211_remain_on_channel_expired from Timer ISR context, + * so lets do it from thread context. + */ +static void +wl_cfgp2p_listen_expired(unsigned long data) +{ + wl_event_msg_t msg; + struct wl_priv *wl = (struct wl_priv *) data; + + CFGP2P_DBG((" Enter\n")); + memset(&msg, 0, sizeof(wl_event_msg_t)); + msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE); + wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL); +} + +/* + * Routine for cancelling the P2P LISTEN + */ +static s32 +wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev, + bool notify) +{ + WL_DBG(("Enter \n")); + + /* Irrespective of whether timer is running or not, reset + * the LISTEN state. + */ + wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + + if (timer_pending(&wl->p2p->listen_timer)) { + del_timer_sync(&wl->p2p->listen_timer); + + if (notify) + cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id, + &wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL); + } + + + return 0; +} + +/* + * Do a P2P Listen on the given channel for the given duration. + * A listen consists of sitting idle and responding to P2P probe requests + * with a P2P probe response. + * + * This fn assumes dongle p2p device discovery is already enabled. + * Parameters : + * @wl : wl_private data + * @channel : channel to listen + * @duration_ms : the time (milli seconds) to wait + */ +s32 +wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms) +{ +#define INIT_TIMER(timer, func, duration, extra_delay) \ + do { \ + init_timer(timer); \ + timer->function = func; \ + timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \ + timer->data = (unsigned long) wl; \ + add_timer(timer); \ + } while (0); + + s32 ret = BCME_OK; + struct timer_list *_timer; + CFGP2P_DBG((" Enter Channel : %d, Duration : %d\n", channel, duration_ms)); + if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) { + + CFGP2P_ERR((" Discovery is not set, so we have noting to do\n")); + + ret = BCME_NOTREADY; + goto exit; + } + if (timer_pending(&wl->p2p->listen_timer)) { + CFGP2P_DBG(("previous LISTEN is not completed yet\n")); + goto exit; + + } else + wl_clr_p2p_status(wl, LISTEN_EXPIRED); + + wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + _timer = &wl->p2p->listen_timer; + + /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle , + * otherwise we will wait up to duration_ms + 200ms + */ + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, 200); + +#undef INIT_TIMER +exit: + return ret; +} + + +s32 +wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable) +{ + s32 ret = BCME_OK; + CFGP2P_DBG((" Enter\n")); + if (!wl_get_p2p_status(wl, DISCOVERY_ON)) { + + CFGP2P_DBG((" do nothing, discovery is off\n")); + return ret; + } + if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) { + CFGP2P_DBG(("already : %d\n", enable)); + return ret; + } + + wl_chg_p2p_status(wl, SEARCH_ENABLED); + /* When disabling Search, reset the WL driver's p2p discovery state to + * WL_P2P_DISC_ST_SCAN. + */ + if (!enable) { + wl_clr_p2p_status(wl, SCANNING); + ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); + } + + return ret; +} + +/* + * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE + */ +s32 +wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + u32 event_type = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + CFGP2P_DBG((" Enter\n")); + if (event_type == WLC_E_ACTION_FRAME_COMPLETE) { + + CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status)); + if (status == WLC_E_STATUS_SUCCESS) { + wl_set_p2p_status(wl, ACTION_TX_COMPLETED); + } + else { + wl_set_p2p_status(wl, ACTION_TX_NOACK); + CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n")); + } + } else { + CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received," + "status : %d\n", status)); + wake_up_interruptible(&wl->netif_change_event); + } + return ret; +} +/* Send an action frame immediately without doing channel synchronization. + * + * This function does not wait for a completion event before returning. + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an + * 802.11 ack has been received for the sent action frame. + */ +s32 +wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx) +{ + s32 ret = BCME_OK; + s32 timeout = 0; + + + CFGP2P_INFO(("\n")); + CFGP2P_INFO(("channel : %u , dwell time : %u\n", + af_params->channel, af_params->dwell_time)); + + wl_clr_p2p_status(wl, ACTION_TX_COMPLETED); + wl_clr_p2p_status(wl, ACTION_TX_NOACK); +#define MAX_WAIT_TIME 2000 + if (bssidx == P2PAPI_BSSCFG_PRIMARY) + bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); + + ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR((" sending action frame is failed\n")); + goto exit; + } + timeout = wait_event_interruptible_timeout(wl->netif_change_event, + (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) || wl_get_p2p_status(wl, ACTION_TX_NOACK)), + msecs_to_jiffies(MAX_WAIT_TIME)); + + if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) { + CFGP2P_INFO(("tx action frame operation is completed\n")); + ret = BCME_OK; + } else { + ret = BCME_ERROR; + CFGP2P_INFO(("tx action frame operation is failed\n")); + } +exit: + CFGP2P_INFO((" via act frame iovar : status = %d\n", ret)); +#undef MAX_WAIT_TIME + return ret; +} + +/* Generate our P2P Device Address and P2P Interface Address from our primary + * MAC address. + */ +void +wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, + struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr) +{ + memset(out_dev_addr, 0, sizeof(*out_dev_addr)); + memset(out_int_addr, 0, sizeof(*out_int_addr)); + + /* Generate the P2P Device Address. This consists of the device's + * primary MAC address with the locally administered bit set. + */ + memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr)); + out_dev_addr->octet[0] |= 0x02; + + /* Generate the P2P Interface Address. If the discovery and connection + * BSSCFGs need to simultaneously co-exist, then this address must be + * different from the P2P Device Address. + */ + memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr)); + out_int_addr->octet[4] ^= 0x80; + +} + +/* P2P IF Address change to Virtual Interface MAC Address */ +void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id) +{ + wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf; + u16 len = ie->len; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + CFGP2P_DBG((" Enter\n")); + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + if (subelt_id == P2P_SEID_INTINTADDR) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device ID ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_INFO) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device INFO ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_GROUP_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("GROUP ID ATTR FOUND\n")); + } return; + } else { + CFGP2P_DBG(("OTHER id : %d\n", subelt_id)); + } + subel += subelt_len; + } +} +/* + * Check if a BSS is up. + * This is a common implementation called by most OSL implementations of + * p2posl_bss_isup(). DO NOT call this function directly from the + * common code -- call p2posl_bss_isup() instead to allow the OSL to + * override the common implementation if necessary. + */ +bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx) +{ + s32 result, val; + bool isup = false; + s8 getbuf[64]; + + /* Check if the BSS is up */ + *(int*)getbuf = -1; + result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx, + sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL); + if (result != 0) { + CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result)); + CFGP2P_ERR(("NOTE: this ioctl error is normal " + "when the BSS has not been created yet.\n")); + } else { + val = *(int*)getbuf; + val = dtoh32(val); + CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val)); + isup = (val ? TRUE : FALSE); + } + return isup; +} + + +/* Bring up or down a BSS */ +s32 +wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up) +{ + s32 ret = BCME_OK; + s32 val = up ? 1 : 0; + + struct { + s32 cfg; + s32 val; + } bss_setbuf; + + bss_setbuf.cfg = htod32(bsscfg_idx); + bss_setbuf.val = htod32(val); + CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down")); + ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + + if (ret != 0) { + CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret)); + } + + return ret; +} + +/* Check if 'p2p' is supported in the driver */ +s32 +wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev) +{ + s32 ret = BCME_OK; + s32 p2p_supported = 0; + ret = wldev_iovar_getint(ndev, "p2p", + &p2p_supported); + if (ret < 0) { + CFGP2P_ERR(("wl p2p error %d\n", ret)); + return 0; + } + if (p2p_supported == 1) { + CFGP2P_INFO(("p2p is supported\n")); + } else { + CFGP2P_INFO(("p2p is unsupported\n")); + p2p_supported = 0; + } + return p2p_supported; +} + +/* Cleanup P2P resources */ +s32 +wl_cfgp2p_down(struct wl_priv *wl) +{ + + wl_cfgp2p_cancel_listen(wl, + wl->p2p_net ? wl->p2p_net : wl_to_prmry_ndev(wl), TRUE); + + wl_cfgp2p_deinit_priv(wl); + return 0; +} + +s32 +wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len) +{ + s32 ret = -1; + int count, start, duration; + wl_p2p_sched_t dongle_noa; + + CFGP2P_DBG((" Enter\n")); + + memset(&dongle_noa, 0, sizeof(dongle_noa)); + + if (wl->p2p && wl->p2p->vif_created) { + + wl->p2p->noa.desc[0].start = 0; + + sscanf(buf, "%d %d %d", &count, &start, &duration); + CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n", + count, start, duration)); + if (count != -1) + wl->p2p->noa.desc[0].count = count; + + /* supplicant gives interval as start */ + if (start != -1) + wl->p2p->noa.desc[0].interval = start; + + if (duration != -1) + wl->p2p->noa.desc[0].duration = duration; + + if (wl->p2p->noa.desc[0].count != 255) { + wl->p2p->noa.desc[0].start = 200; + dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS; + dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF; + dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS; + } + else { + /* Continuous NoA interval. */ + dongle_noa.action = WL_P2P_SCHED_ACTION_NONE; + dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; + if ((wl->p2p->noa.desc[0].interval == 102) || + (wl->p2p->noa.desc[0].interval == 100)) { + wl->p2p->noa.desc[0].start = 100 - + wl->p2p->noa.desc[0].duration; + dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT; + } + else { + dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; + } + } + /* Put the noa descriptor in dongle format for dongle */ + dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count); + if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) { + dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start); + dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration); + } + else { + dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000); + dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000); + } + dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000); + + ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), + "p2p_noa", &dongle_noa, sizeof(dongle_noa), wl->ioctl_buf, WLC_IOCTL_MAXLEN, + &wl->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret)); + } + } + else { + CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n")); + } + return ret; +} + +s32 +wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len) +{ + wifi_p2p_noa_desc_t *noa_desc; + int len = 0, i; + char _buf[200]; + + CFGP2P_DBG((" Enter\n")); + buf[0] = '\0'; + if (wl->p2p && wl->p2p->vif_created) { + if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) { + _buf[0] = 1; /* noa index */ + _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) | + (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */ + len += 2; + if (wl->p2p->noa.desc[0].count) { + noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len]; + noa_desc->cnt_type = wl->p2p->noa.desc[0].count; + noa_desc->duration = wl->p2p->noa.desc[0].duration; + noa_desc->interval = wl->p2p->noa.desc[0].interval; + noa_desc->start = wl->p2p->noa.desc[0].start; + len += sizeof(wifi_p2p_noa_desc_t); + } + if (buf_len <= len * 2) { + CFGP2P_ERR(("ERROR: buf_len %d in not enough for" + "returning noa in string format\n", buf_len)); + return -1; + } + /* We have to convert the buffer data into ASCII strings */ + for (i = 0; i < len; i++) { + sprintf(buf, "%02x", _buf[i]); + buf += 2; + } + buf[i*2] = '\0'; + } + } + else { + CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n")); + return -1; + } + return len * 2; +} + +s32 +wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len) +{ + int ps, ctw; + int ret = -1; + s32 legacy_ps; + + CFGP2P_DBG((" Enter\n")); + if (wl->p2p && wl->p2p->vif_created) { + sscanf(buf, "%d %d %d", &legacy_ps, &ps, &ctw); + CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw)); + if (ctw != -1) { + wl->p2p->ops.ctw = ctw; + ret = 0; + } + if (ps != -1) { + wl->p2p->ops.ops = ps; + ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), + "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops), + wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync); + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret)); + } + } + + if (legacy_ps != -1) { + s32 pm = legacy_ps ? PM_MAX : PM_OFF; + ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), + WLC_SET_PM, &pm, sizeof(pm), true); + if (unlikely(ret)) { + CFGP2P_ERR(("error (%d)\n", ret)); + } + } + } + else { + CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n")); + ret = -1; + } + return ret; +} + +u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id) +{ + wifi_p2p_ie_t *ie = NULL; + u16 len = 0; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + + if (!buf) { + WL_ERR(("P2P IE not present")); + return 0; + } + + ie = (wifi_p2p_ie_t*) buf; + len = ie->len; + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + /* This will point to start of subelement attrib after + * attribute id & len + */ + return subel; + } + + /* Go to next subelement */ + subel += subelt_len; + } + + /* Not Found */ + return NULL; +} + +#define P2P_GROUP_CAPAB_GO_BIT 0x01 +u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length) +{ + wifi_p2p_ie_t * p2p_ie = NULL; + u8 *capability = NULL; + bool p2p_go = 0; + u8 *ptr = NULL; + + if (!(p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, bi->ie_length))) { + WL_ERR(("P2P IE not found")); + return NULL; + } + + if (!(capability = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_P2P_INFO))) { + WL_ERR(("P2P Capability attribute not found")); + return NULL; + } + + /* Check Group capability for Group Owner bit */ + p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT; + if (!p2p_go) { + return bi->BSSID.octet; + } + + /* In probe responses, DEVICE INFO attribute will be present */ + if (!(ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_INFO))) { + /* If DEVICE_INFO is not found, this might be a beacon frame. + * check for DEVICE_ID in the beacon frame. + */ + ptr = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_DEV_ID); + } + + if (!ptr) + WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE ")); + + return ptr; +} + +s32 +wl_cfgp2p_register_ndev(struct wl_priv *wl) +{ + int ret = 0; + struct net_device* net = NULL; + struct wireless_dev *wdev; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 }; + + /* Allocate etherdev, including space for private structure */ + if (!(net = alloc_etherdev(sizeof(wl)))) { + CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + goto fail; + } + + strcpy(net->name, "p2p%d"); + net->name[IFNAMSIZ - 1] = '\0'; + + /* Copy the reference to wl_priv */ + memcpy((void *)netdev_priv(net), &wl, sizeof(wl)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->do_ioctl = wl_cfgp2p_do_ioctl; + net->hard_start_xmit = wl_cfgp2p_start_xmit; + net->open = wl_cfgp2p_if_open; + net->stop = wl_cfgp2p_if_stop; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &wl_cfgp2p_if_ops; +#endif + + /* Register with a dummy MAC addr */ + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return -ENOMEM; + } + + wdev->wiphy = wl->wdev->wiphy; + + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + + net->ieee80211_ptr = wdev; + + SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy)); + + /* Associate p2p0 network interface with new wdev */ + wdev->netdev = net; + + /* store p2p net ptr for further reference. Note that iflist won't have this + * entry as there corresponding firmware interface is a "Hidden" interface. + */ + if (wl->p2p_net) { + CFGP2P_ERR(("p2p_net defined already.\n")); + return -EINVAL; + } else { + wl->p2p_wdev = wdev; + wl->p2p_net = net; + } + + ret = register_netdev(net); + if (ret) { + CFGP2P_ERR((" register_netdevice failed (%d)\n", ret)); + goto fail; + } + + printk("%s: P2P Interface Registered\n", net->name); + + return ret; +fail: + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + + if (net) { + unregister_netdev(net); + free_netdev(net); + } + + return -ENODEV; +} + +s32 +wl_cfgp2p_unregister_ndev(struct wl_priv *wl) +{ + + if (!wl || !wl->p2p_net) { + CFGP2P_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + unregister_netdev(wl->p2p_net); + free_netdev(wl->p2p_net); + + return 0; +} + +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + CFGP2P_DBG(("(%s) is not used for data operations. Droping the packet. \n", ndev->name)); + return 0; +} + +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd) +{ + int ret = 0; + struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net); + struct net_device *ndev = wl_to_prmry_ndev(wl); + + /* There is no ifidx corresponding to p2p0 in our firmware. So we should + * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs. + * For Android PRIV CMD handling map it to primary I/F + */ + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(ndev, ifr, cmd); + + } else { + CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n", + __FUNCTION__, cmd)); + return -1; + } + + return ret; +} + +static int wl_cfgp2p_if_open(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev) + return -EINVAL; + + /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now, + * do it here. This will make sure that in concurrent mode, supplicant + * is not dependent on a particular order of interface initialization. + * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N + * -iwlan0. + */ + wl_cfg80211_do_driver_init(net); + + wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO)); + + return 0; +} + +static int wl_cfgp2p_if_stop(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev) + return -EINVAL; + + wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes) + & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO))); + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h new file mode 100644 index 0000000000000..03a645aea31a5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h @@ -0,0 +1,288 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_cfgp2p.h,v 1.1.4.1.2.8 2011/02/09 01:37:52 Exp $ + */ +#ifndef _wl_cfgp2p_h_ +#define _wl_cfgp2p_h_ +#include +#include + +struct wl_priv; +extern u32 wl_dbg_level; + +typedef struct wifi_p2p_ie wifi_wfd_ie_t; + +/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not + * confuse this with a bsscfg index. This value is an index into the + * saved_ie[] array of structures which in turn contains a bsscfg index field. + */ +typedef enum { + P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ + P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ + P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_MAX +} p2p_bsscfg_type_t; + +#define IE_MAX_LEN 512 +/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */ +struct p2p_saved_ie { + u8 p2p_probe_req_ie[IE_MAX_LEN]; + u8 p2p_probe_res_ie[IE_MAX_LEN]; + u8 p2p_assoc_req_ie[IE_MAX_LEN]; + u8 p2p_assoc_res_ie[IE_MAX_LEN]; + u8 p2p_beacon_ie[IE_MAX_LEN]; + u32 p2p_probe_req_ie_len; + u32 p2p_probe_res_ie_len; + u32 p2p_assoc_req_ie_len; + u32 p2p_assoc_res_ie_len; + u32 p2p_beacon_ie_len; +}; + +struct p2p_bss { + u32 bssidx; + struct net_device *dev; + struct p2p_saved_ie saved_ie; + void *private_data; +}; + +struct p2p_info { + bool on; /* p2p on/off switch */ + bool scan; + bool vif_created; + s8 vir_ifname[IFNAMSIZ]; + unsigned long status; + struct ether_addr dev_addr; + struct ether_addr int_addr; + struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX]; + struct timer_list listen_timer; + wl_p2p_sched_t noa; + wl_p2p_ops_t ops; + wlc_ssid_t ssid; +}; + +/* dongle status */ +enum wl_cfgp2p_status { + WLP2P_STATUS_DISCOVERY_ON = 0, + WLP2P_STATUS_SEARCH_ENABLED, + WLP2P_STATUS_IF_ADD, + WLP2P_STATUS_IF_DEL, + WLP2P_STATUS_IF_DELETING, + WLP2P_STATUS_IF_CHANGING, + WLP2P_STATUS_IF_CHANGED, + WLP2P_STATUS_LISTEN_EXPIRED, + WLP2P_STATUS_ACTION_TX_COMPLETED, + WLP2P_STATUS_ACTION_TX_NOACK, + WLP2P_STATUS_SCANNING, + WLP2P_STATUS_GO_NEG_PHASE +}; + + +#define wl_to_p2p_bss_ndev(w, type) ((wl)->p2p->bss_idx[type].dev) +#define wl_to_p2p_bss_bssidx(w, type) ((wl)->p2p->bss_idx[type].bssidx) +#define wl_to_p2p_bss_saved_ie(w, type) ((wl)->p2p->bss_idx[type].saved_ie) +#define wl_to_p2p_bss_private(w, type) ((wl)->p2p->bss_idx[type].private_data) +#define wl_to_p2p_bss(wl, type) ((wl)->p2p->bss_idx[type]) +#define wl_get_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:test_bit(WLP2P_STATUS_ ## stat, \ + &(wl)->p2p->status)) +#define wl_set_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:set_bit(WLP2P_STATUS_ ## stat, \ + &(wl)->p2p->status)) +#define wl_clr_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:clear_bit(WLP2P_STATUS_ ## stat, \ + &(wl)->p2p->status)) +#define wl_chg_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:change_bit(WLP2P_STATUS_ ## stat, \ + &(wl)->p2p->status)) +#define p2p_on(wl) ((wl)->p2p->on) +#define p2p_scan(wl) ((wl)->p2p->scan) +#define p2p_is_on(wl) ((wl)->p2p && (wl)->p2p->on) + +/* dword align allocation */ +#define WLC_IOCTL_MAXLEN 8192 +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" + +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_ERR "CFGP2P-ERROR) %s : ", __func__); \ + printk args; \ + } \ + } while (0) +#define CFGP2P_INFO(args) \ + do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_ERR "CFGP2P-INFO) %s : ", __func__); \ + printk args; \ + } \ + } while (0) +#define CFGP2P_DBG(args) \ + do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_ERR "CFGP2P-DEBUG) %s :", __func__); \ + printk args; \ + } \ + } while (0) + +extern bool +wl_cfgp2p_is_pub_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_gas_action(void *frame, u32 frame_len); +extern void +wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len); +extern s32 +wl_cfgp2p_init_priv(struct wl_priv *wl); +extern void +wl_cfgp2p_deinit_priv(struct wl_priv *wl); +extern s32 +wl_cfgp2p_set_firm_p2p(struct wl_priv *wl); +extern s32 +wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, + u32 channel, u16 listen_ms, int bssidx); +extern s32 +wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, + chanspec_t chspec); +extern s32 +wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac); +extern s32 +wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec); + +extern s32 +wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index); + +extern s32 +wl_cfgp2p_init_discovery(struct wl_priv *wl); +extern s32 +wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len); +extern s32 +wl_cfgp2p_disable_discovery(struct wl_priv *wl); +extern s32 +wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans, + u16 *channels, + s32 search_state, u16 action, u32 bssidx); + +extern s32 +wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev, + s32 bssidx, s32 channel); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len); + +extern wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len); + +extern wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len); + +extern s32 +wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, + s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len); +extern s32 +wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx); + +extern s32 +wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev); + + +extern s32 +wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +extern s32 +wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms); + +extern s32 +wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable); + +extern s32 +wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +extern s32 +wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx); + +extern void +wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr, + struct ether_addr *out_int_addr); + +extern void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id); +extern bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx); + +extern s32 +wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up); + + +extern s32 +wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev); + +extern s32 +wl_cfgp2p_down(struct wl_priv *wl); + +extern s32 +wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len); + +extern u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id); + +extern u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length); + +extern s32 +wl_cfgp2p_register_ndev(struct wl_priv *wl); + +extern s32 +wl_cfgp2p_unregister_ndev(struct wl_priv *wl); + +/* WiFi Direct */ +#define SOCIAL_CHAN_1 1 +#define SOCIAL_CHAN_2 6 +#define SOCIAL_CHAN_3 11 +#define SOCIAL_CHAN_CNT 3 +#define WL_P2P_WILDCARD_SSID "DIRECT-" +#define WL_P2P_WILDCARD_SSID_LEN 7 +#define WL_P2P_INTERFACE_PREFIX "p2p" +#define WL_P2P_TEMP_CHAN "11" + +/* If the provision discovery is for JOIN operations, then we need not do an internal scan to find GO */ +#define IS_PROV_DISC_WITHOUT_GROUP_ID(p2p_ie, len) (wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL ) + +#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \ + ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \ + (frame->action == P2PSD_ACTION_ID_GAS_CREQ))) +#define IS_P2P_PUB_ACT_REQ(frame, p2p_ie, len) (wl_cfgp2p_is_pub_action(frame, len) && \ + ((frame->subtype == P2P_PAF_GON_REQ) || \ + (frame->subtype == P2P_PAF_INVITE_REQ) || \ + ((frame->subtype == P2P_PAF_PROVDIS_REQ) && IS_PROV_DISC_WITHOUT_GROUP_ID(p2p_ie, len)))) +#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3)) +#define IS_P2P_SSID(ssid) (memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) == 0) +#endif /* _wl_cfgp2p_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h new file mode 100644 index 0000000000000..0b99557cbe8d4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_dbg.h @@ -0,0 +1,49 @@ +/* + * Minimal debug/trace/assert driver definitions for + * Broadcom 802.11 Networking Adapter. + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_dbg.h,v 1.115.6.3 2010-12-15 21:42:23 Exp $ + */ + + + +#ifndef _wl_dbg_h_ +#define _wl_dbg_h_ + + +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; + +#define WL_PRINT(args) printf args + + + +#define WL_NONE(args) + +#define WL_ERROR(args) +#define WL_TRACE(args) + + +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; +#endif diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c new file mode 100644 index 0000000000000..d60c21c03671b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.c @@ -0,0 +1,8894 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_iw.c,v 1.132.2.18 2011-02-05 01:44:47 $ + */ + +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +typedef void wlc_info_t; +typedef void wl_info_t; +typedef const struct si_pub si_t; +#include + +#include +#include +#include +#define WL_ERROR(x) printf x +#define WL_TRACE(x) +#define WL_ASSOC(x) +#define WL_INFORM(x) +#define WL_WSEC(x) +#define WL_SCAN(x) + + +#ifdef PNO_SET_DEBUG +#define WL_PNO(x) printf x +#else +#define WL_PNO(x) +#endif + + +#define JF2MS jiffies_to_msecs(jiffies) + +#ifdef COEX_DBG +#define WL_TRACE_COEX(x) printf("TS:%lu ", JF2MS); \ + printf x +#else +#define WL_TRACE_COEX(x) +#endif + +#ifdef SCAN_DBG +#define WL_TRACE_SCAN(x) printf("TS:%lu ", JF2MS); \ + printf x +#else +#define WL_TRACE_SCAN(x) +#endif + + +#include + + + + +#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) + +#include + +#define WL_IW_USE_ISCAN 1 +#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS 1 + +#ifdef OEM_CHROMIUMOS +bool g_set_essid_before_scan = TRUE; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + struct mutex g_wl_ss_scan_lock; +#endif + +#if defined(SOFTAP) +#define WL_SOFTAP(x) +static struct net_device *priv_dev; +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +struct net_device *ap_net_dev = NULL; +tsk_ctl_t ap_eth_ctl; +static int wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap); +static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac); +#endif + + +#define WL_IW_IOCTL_CALL(func_call) \ + do { \ + func_call; \ + } while (0) + +#define RETURN_IF_EXTRA_NULL(extra) \ + if (!extra) { \ + WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \ + return -EINVAL; \ + } + +static int g_onoff = G_WLAN_SET_ON; +wl_iw_extra_params_t g_wl_iw_params; + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 + +static struct mutex wl_cache_lock; +static struct mutex wl_softap_lock; + +#define DHD_OS_MUTEX_INIT(a) mutex_init(a) +#define DHD_OS_MUTEX_LOCK(a) mutex_lock(a) +#define DHD_OS_MUTEX_UNLOCK(a) mutex_unlock(a) + +#else + +#define DHD_OS_MUTEX_INIT(a) +#define DHD_OS_MUTEX_LOCK(a) +#define DHD_OS_MUTEX_UNLOCK(a) + +#endif + +#include +extern void dhd_customer_gpio_wlan_ctrl(int onoff); +extern uint dhd_dev_reset(struct net_device *dev, uint8 flag); +extern int dhd_dev_init_ioctl(struct net_device *dev); + +uint wl_msg_level = WL_ERROR_VAL; + +#define MAX_WLIW_IOCTL_LEN 1024 + + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i + +extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +extern int dhd_wait_pend8021x(struct net_device *dev); + +#if WIRELESS_EXT < 19 +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) +#endif + +static void *g_scan = NULL; +static volatile uint g_scan_specified_ssid; +static wlc_ssid_t g_specific_ssid; + +static wlc_ssid_t g_ssid; + +#ifdef CONFIG_WPS2 +static char *g_wps_probe_req_ie; +static int g_wps_probe_req_ie_len; +#endif + +bool btcoex_is_sco_active(struct net_device *dev); +static wl_iw_ss_cache_ctrl_t g_ss_cache_ctrl; +#if defined(CONFIG_FIRST_SCAN) +static volatile uint g_first_broadcast_scan; +static volatile uint g_first_counter_scans; +#define MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN 3 +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif + +#if defined(WL_IW_USE_ISCAN) +#if !defined(CSCAN) +static void wl_iw_free_ss_cache(void); +static int wl_iw_run_ss_cache_timer(int kick_off); +#endif +#if defined(CONFIG_FIRST_SCAN) +int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +#endif +static int dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len); +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 + + +#define WLC_IW_ISCAN_MAXLEN 2048 +typedef struct iscan_buf { + struct iscan_buf * next; + char iscan_buf[WLC_IW_ISCAN_MAXLEN]; +} iscan_buf_t; + +typedef struct iscan_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + int iscan_state; + iscan_buf_t * list_hdr; + iscan_buf_t * list_cur; + + + tsk_ctl_t tsk_ctl; + + uint32 scan_flag; +#if defined CSCAN + char ioctlbuf[WLC_IOCTL_MEDLEN]; +#else + char ioctlbuf[WLC_IOCTL_SMLEN]; +#endif + + wl_iscan_params_t *iscan_ex_params_p; + int iscan_ex_param_size; +} iscan_info_t; + + + +#define COEX_DHCP 1 +#ifdef COEX_DHCP + +#define BT_DHCP_eSCO_FIX +#define BT_DHCP_USE_FLAGS +#define BT_DHCP_OPPORTUNITY_WINDOW_TIME 2500 +#define BT_DHCP_FLAG_FORCE_TIME 5500 + + + +static int wl_iw_set_btcoex_dhcp( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +); + +static void wl_iw_bt_flag_set(struct net_device *dev, bool set); +static void wl_iw_bt_release(void); + +typedef enum bt_coex_status { + BT_DHCP_IDLE = 0, + BT_DHCP_START, + BT_DHCP_OPPORTUNITY_WINDOW, + BT_DHCP_FLAG_FORCE_TIMEOUT +} coex_status_t; + + +typedef struct bt_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + uint32 ts_dhcp_start; + uint32 ts_dhcp_ok; + bool dhcp_done; + int bt_state; + + + tsk_ctl_t tsk_ctl; + +} bt_info_t; + +bt_info_t *g_bt = NULL; +static void wl_iw_bt_timerfunc(ulong data); +#endif +iscan_info_t *g_iscan = NULL; +void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +static void wl_iw_timerfunc(ulong data); +static void wl_iw_set_event_mask(struct net_device *dev); +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); +#endif + +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +); + +#ifndef CSCAN +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +); + +static uint +wl_iw_get_scan_prep( + wl_scan_results_t *list, + struct iw_request_info *info, + char *extra, + short max_size +); +#endif + +static void +swap_key_from_BE( + wl_wsec_key_t *key +) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void +swap_key_to_BE( + wl_wsec_key_t *key +) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +static int +dev_wlc_ioctl( + struct net_device *dev, + int cmd, + void *arg, + int len +) +{ + struct ifreq ifr; + wl_ioctl_t ioc; + mm_segment_t fs; + int ret = -EINVAL; + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return ret; + } + + net_os_wake_lock(dev); + + WL_INFORM(("%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d ,\n", + __FUNCTION__, current->pid, cmd, arg, len)); + + if (g_onoff == G_WLAN_SET_ON) { + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + + strcpy(ifr.ifr_name, dev->name); + ifr.ifr_data = (caddr_t) &ioc; + + + ret = dev_open(dev); + if (ret) { + WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret)); + net_os_wake_unlock(dev); + return ret; + } + + fs = get_fs(); + set_fs(get_ds()); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31) + ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#else + ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#endif + set_fs(fs); + } + else { + WL_TRACE(("%s: call after driver stop : ignored\n", __FUNCTION__)); + } + + net_os_wake_unlock(dev); + + return ret; +} + + +static int +dev_wlc_intvar_get_reg( + struct net_device *dev, + char *name, + uint reg, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + len = bcm_mkiovar(name, (char *)(®), sizeof(reg), (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + return (error); +} + + +static int +dev_wlc_intvar_set_reg( + struct net_device *dev, + char *name, + char *addr, + char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + + + + +static int +dev_wlc_intvar_set( + struct net_device *dev, + char *name, + int val) +{ + char buf[WLC_IOCTL_SMLEN]; + uint len; + + val = htod32(val); + len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); + ASSERT(len); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); +} + +#if defined(WL_IW_USE_ISCAN) +static int +dev_iw_iovar_setbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + + if (iolen == 0) + return 0; + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); +} + +static int +dev_iw_iovar_getbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + + return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); +} +#endif + + +#if WIRELESS_EXT > 17 +static int +dev_wlc_bufvar_set( + struct net_device *dev, + char *name, + char *buf, int len) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf[MAX_WLIW_IOCTL_LEN]; +#else + static char ioctlbuf[MAX_WLIW_IOCTL_LEN]; +#endif + uint buflen; + + buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf)); + ASSERT(buflen); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen)); +} +#endif + + +static int +dev_wlc_bufvar_get( + struct net_device *dev, + char *name, + char *buf, int buflen) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf[MAX_WLIW_IOCTL_LEN]; +#else + static char ioctlbuf[MAX_WLIW_IOCTL_LEN]; +#endif + int error; + uint len; + + len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); + if (!error) + bcopy(ioctlbuf, buf, buflen); + + return (error); +} + + + +static int +dev_wlc_intvar_get( + struct net_device *dev, + char *name, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + uint data_null; + + len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + + return (error); +} + + +#if WIRELESS_EXT > 12 +static int +wl_iw_set_active_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int as = 0; + int error = 0; + char *p = extra; + +#if defined(WL_IW_USE_ISCAN) + if (g_iscan->iscan_state == ISCAN_STATE_IDLE) +#endif + error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as)); +#if defined(WL_IW_USE_ISCAN) + else + g_iscan->scan_flag = as; +#endif + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_set_passive_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int ps = 1; + int error = 0; + char *p = extra; + +#if defined(WL_IW_USE_ISCAN) + if (g_iscan->iscan_state == ISCAN_STATE_IDLE) { +#endif + + + if (g_scan_specified_ssid == 0) { + error = dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &ps, sizeof(ps)); + } +#if defined(WL_IW_USE_ISCAN) + } + else + g_iscan->scan_flag = ps; +#endif + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + return error; +} + + +static int +wl_iw_set_txpower( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + int txpower = -1; + + txpower = bcm_atoi(extra + strlen(TXPOWER_SET_CMD) + 1); + if ((txpower >= 0) && (txpower <= 127)) + { + txpower |= WL_TXPWR_OVERRIDE; + txpower = htod32(txpower); + + error = dev_wlc_intvar_set(dev, "qtxpower", txpower); + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set TXpower 0x%X is OK\n", __FUNCTION__, txpower)); + } else { + WL_ERROR(("%s: set tx power failed\n", __FUNCTION__)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); + } + + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_get_macaddr( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error; + char buf[128]; + struct ether_addr *id; + char *p = extra; + + + strcpy(buf, "cur_etheraddr"); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, buf, sizeof(buf)); + id = (struct ether_addr *) buf; + p += snprintf(p, MAX_WX_STRING, "Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + id->octet[0], id->octet[1], id->octet[2], + id->octet[3], id->octet[4], id->octet[5]); + wrqu->data.length = p - extra + 1; + + return error; +} + + + +static int +wl_iw_set_country( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + char country_code[WLC_CNTRY_BUF_SZ]; + int error = 0; + char *p = extra; + int country_offset; + int country_code_size; + wl_country_t cspec = {{0}, 0, {0}}; + char smbuf[WLC_IOCTL_SMLEN]; + scb_val_t scbval; + + cspec.rev = -1; + memset(country_code, 0, sizeof(country_code)); + memset(smbuf, 0, sizeof(smbuf)); + + + country_offset = strcspn(extra, " "); + country_code_size = strlen(extra) - country_offset; + + + if (country_offset != 0) { + strncpy(country_code, extra + country_offset +1, + MIN(country_code_size, sizeof(country_code))); + + + bzero(&scbval, sizeof(scb_val_t)); + if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) { + WL_ERROR(("%s: set country failed due to Disassoc error\n", __FUNCTION__)); + goto exit_failed; + } + + memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ); + + get_customized_country_code((char *)&cspec.country_abbrev, &cspec); + + + if ((error = dev_iw_iovar_setbuf(dev, "country", &cspec, + sizeof(cspec), smbuf, sizeof(smbuf))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_ERROR(("%s: set country for %s as %s rev %d is OK\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + dhd_bus_country_set(dev, &cspec); + goto exit; + } + } + + WL_ERROR(("%s: set country for %s as %s rev %d failed\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + +exit_failed: + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_set_power_mode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + static int pm = PM_FAST; + int pm_local = PM_OFF; + char powermode_val = 0; + + WL_TRACE_COEX(("%s: DHCP session cmd:%s\n", __FUNCTION__, extra)); + + strncpy((char *)&powermode_val, extra + strlen("POWERMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)); + dev_wlc_ioctl(dev, WLC_SET_PM, &pm_local, sizeof(pm_local)); + + + net_os_set_packet_filter(dev, 0); + +#ifdef COEX_DHCP + g_bt->ts_dhcp_start = JF2MS; + g_bt->dhcp_done = FALSE; + WL_TRACE_COEX(("%s: DHCP start, pm:%d changed to pm:%d\n", + __FUNCTION__, pm, pm_local)); + +#endif + } else if (strnicmp((char *)&powermode_val, "0", strlen("0")) == 0) { + + + dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); + + + net_os_set_packet_filter(dev, 1); + +#ifdef COEX_DHCP + g_bt->dhcp_done = TRUE; + g_bt->ts_dhcp_ok = JF2MS; + WL_TRACE_COEX(("%s: DHCP done for:%d ms, restored pm:%d\n", + __FUNCTION__, (g_bt->ts_dhcp_ok - g_bt->ts_dhcp_start), pm)); +#endif + + } else { + WL_ERROR(("%s Unkwown yet power setting, ignored\n", + __FUNCTION__)); + } + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + + return error; +} + + +bool btcoex_is_sco_active(struct net_device *dev) +{ + int ioc_res = 0; + bool res = FALSE; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); + + WL_TRACE_COEX(("%s, sample[%d], btc params: 27:%x\n", + __FUNCTION__, i, param27)); + + if (ioc_res < 0) { + WL_ERROR(("%s ioc read btc params error\n", __FUNCTION__)); + break; + } + + if ((param27 & 0x6) == 2) { + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_TRACE_COEX(("%s, sco/esco detected, pkt id_cnt:%d samples:%d\n", + __FUNCTION__, sco_id_cnt, i)); + res = TRUE; + break; + } + + msleep(5); + } + + return res; +} + +#if defined(BT_DHCP_eSCO_FIX) + +static int set_btc_esco_params(struct net_device *dev, bool trump_sco) +{ + static bool saved_status = FALSE; + + char buf_reg50va_dhcp_on[8] = { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 }; + char buf_reg51va_dhcp_on[8] = { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg64va_dhcp_on[8] = { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg65va_dhcp_on[8] = { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg71va_dhcp_on[8] = { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg50; + static uint32 saved_reg51; + static uint32 saved_reg64; + static uint32 saved_reg65; + static uint32 saved_reg71; + + if (trump_sco) { + + + WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n")); + + + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) { + + saved_status = TRUE; + WL_TRACE_COEX(("%s saved bt_params[50,51,64,65,71]:" + " 0x%x 0x%x 0x%x 0x%x 0x%x\n", + __FUNCTION__, saved_reg50, saved_reg51, + saved_reg64, saved_reg65, saved_reg71)); + + } else { + WL_ERROR((":%s: save btc_params failed\n", + __FUNCTION__)); + saved_status = FALSE; + return -1; + } + + WL_TRACE_COEX(("override with [50,51,64,65,71]:" + " 0x%x 0x%x 0x%x 0x%x 0x%x\n", + *(u32 *)(buf_reg50va_dhcp_on+4), + *(u32 *)(buf_reg51va_dhcp_on+4), + *(u32 *)(buf_reg64va_dhcp_on+4), + *(u32 *)(buf_reg65va_dhcp_on+4), + *(u32 *)(buf_reg71va_dhcp_on+4))); + + dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg50va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg51va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg64va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg65va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", (char *)&buf_reg71va_dhcp_on[0], 8); + + saved_status = TRUE; + + } else if (saved_status) { + + WL_TRACE_COEX(("Do new SCO/eSCO coex algo {save & override} \n")); + + regaddr = 50; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg50); + regaddr = 51; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg51); + regaddr = 64; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg64); + regaddr = 65; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg65); + regaddr = 71; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg71); + + WL_TRACE_COEX(("restore bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, saved_reg64, + saved_reg65, saved_reg71)); + + saved_status = FALSE; + } else { + WL_ERROR((":%s att to restore not saved BTCOEX params\n", + __FUNCTION__)); + return -1; + } + return 0; +} +#endif + + +static int +wl_iw_get_power_mode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + int pm_local; + char *p = extra; + + error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local)); + if (!error) { + WL_TRACE(("%s: Powermode = %d\n", __func__, pm_local)); + if (pm_local == PM_OFF) + pm_local = 1; + else + pm_local = 0; + p += snprintf(p, MAX_WX_STRING, "powermode = %d", pm_local); + } + else { + WL_TRACE(("%s: Error = %d\n", __func__, error)); + p += snprintf(p, MAX_WX_STRING, "FAIL"); + } + wrqu->data.length = p - extra + 1; + return error; +} + +static int +wl_iw_set_btcoex_dhcp( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + +#ifdef COEX_DHCP + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + + + strncpy((char *)&powermode_val, extra + strlen("BTCOEXMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + + WL_TRACE(("%s: DHCP session starts\n", __FUNCTION__)); + + + if ((saved_status == FALSE) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_TRACE(("Saved 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + + + + +#ifdef COEX_DHCP + + if (btcoex_is_sco_active(dev)) { + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg66va_dhcp_on[0], + sizeof(buf_reg66va_dhcp_on)); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg41va_dhcp_on[0], + sizeof(buf_reg41va_dhcp_on)); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg68va_dhcp_on[0], + sizeof(buf_reg68va_dhcp_on)); + saved_status = TRUE; + + g_bt->bt_state = BT_DHCP_START; + g_bt->timer_on = 1; + mod_timer(&g_bt->timer, g_bt->timer.expires); + WL_TRACE_COEX(("%s enable BT DHCP Timer\n", + __FUNCTION__)); + } +#endif + } + else if (saved_status == TRUE) { + WL_ERROR(("%s was called w/o DHCP OFF. Continue\n", __FUNCTION__)); + } + } + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { + + + + +#ifdef COEX_DHCP + + WL_TRACE(("%s disable BT DHCP Timer\n", __FUNCTION__)); + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + + if (g_bt->bt_state != BT_DHCP_IDLE) { + + WL_TRACE_COEX(("%s bt->bt_state:%d\n", + __FUNCTION__, g_bt->bt_state)); + + up(&g_bt->tsk_ctl.sema); + } + } + + + if (saved_status == TRUE) + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); +#endif + + + if (saved_status == TRUE) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg68); + + WL_TRACE_COEX(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + } + saved_status = FALSE; + + } + else { + WL_ERROR(("%s Unkwown yet power setting, ignored\n", + __FUNCTION__)); + } + + p += snprintf(p, MAX_WX_STRING, "OK"); + + wrqu->data.length = p - extra + 1; + + return error; +} + +static int +wl_iw_set_suspend_opt( +struct net_device *dev, +struct iw_request_info *info, +union iwreq_data *wrqu, +char *extra +) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(extra + strlen(SETSUSPENDOPT_CMD) + 1) - '0'; + + if (suspend_flag != 0) + suspend_flag = 1; + + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) + WL_ERROR(("%s: Suspend Flag %d -> %d\n", + __FUNCTION__, ret_now, suspend_flag)); + else + WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + + return ret; +} + +static int +wl_iw_set_suspend_mode( +struct net_device *dev, +struct iw_request_info *info, +union iwreq_data *wrqu, +char *extra +) +{ + int ret = 0; + +#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND) + int suspend_flag; + + suspend_flag = *(extra + strlen(SETSUSPENDMODE_CMD) + 1) - '0'; + + if (suspend_flag != 0) + suspend_flag = 1; + + if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) + WL_ERROR(("%s: Suspend Mode %d\n",__FUNCTION__,suspend_flag)); + else + WL_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); +#endif + return ret; +} + +static int +wl_format_ssid(char* ssid_buf, uint8* ssid, int ssid_len) +{ + int i, c; + char *p = ssid_buf; + + if (ssid_len > 32) ssid_len = 32; + + for (i = 0; i < ssid_len; i++) { + c = (int)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += sprintf(p, "\\x%02X", c); + } + } + *p = '\0'; + + return p - ssid_buf; +} + +static int +wl_iw_get_link_speed( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = 0; + char *p = extra; + static int link_speed; + + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_RATE, &link_speed, sizeof(link_speed)); + link_speed *= 500000; + } + + p += snprintf(p, MAX_WX_STRING, "LinkSpeed %d", link_speed/1000000); + + wrqu->data.length = p - extra + 1; + + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_get_dtim_skip( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + char iovbuf[32]; + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + + memset(iovbuf, 0, sizeof(iovbuf)); + strcpy(iovbuf, "bcn_li_dtim"); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_VAR, + &iovbuf, sizeof(iovbuf))) >= 0) { + + p += snprintf(p, MAX_WX_STRING, "Dtim_skip %d", iovbuf[0]); + WL_TRACE(("%s: get dtim_skip = %d\n", __FUNCTION__, iovbuf[0])); + wrqu->data.length = p - extra + 1; + } + else + WL_ERROR(("%s: get dtim_skip failed code %d\n", + __FUNCTION__, error)); + } + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_set_dtim_skip( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + int bcn_li_dtim; + char iovbuf[32]; + + net_os_wake_lock(dev); + if (g_onoff == G_WLAN_SET_ON) { + + bcn_li_dtim = htod32((uint)*(extra + strlen(DTIM_SKIP_SET_CMD) + 1) - '0'); + + if ((bcn_li_dtim >= 0) || ((bcn_li_dtim <= 5))) { + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_VAR, + &iovbuf, sizeof(iovbuf))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + + + net_os_set_dtim_skip(dev, bcn_li_dtim); + + WL_TRACE(("%s: set dtim_skip %d OK\n", __FUNCTION__, + bcn_li_dtim)); + goto exit; + } + else WL_ERROR(("%s: set dtim_skip %d failed code %d\n", + __FUNCTION__, bcn_li_dtim, error)); + } + else WL_ERROR(("%s Incorrect dtim_skip setting %d, ignored\n", + __FUNCTION__, bcn_li_dtim)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_get_band( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + static int band; + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_BAND, &band, sizeof(band)); + + p += snprintf(p, MAX_WX_STRING, "Band %d", band); + + wrqu->data.length = p - extra + 1; + } + + net_os_wake_unlock(dev); + return error; +} + + +static int +wl_iw_set_band( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + uint band; + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_ON) { + + band = htod32((uint)*(extra + strlen(BAND_SET_CMD) + 1) - '0'); + + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) { + + if ((error = dev_wlc_ioctl(dev, WLC_SET_BAND, + &band, sizeof(band))) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set band %d OK\n", __FUNCTION__, band)); + goto exit; + } else { + WL_ERROR(("%s: set band %d failed code %d\n", __FUNCTION__, + band, error)); + } + } else { + WL_ERROR(("%s Incorrect band setting %d, ignored\n", __FUNCTION__, band)); + } + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + +#ifdef PNO_SUPPORT + +static int +wl_iw_set_pno_reset( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + + net_os_wake_lock(dev); + if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) { + + if ((error = dhd_dev_pno_reset(dev)) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set OK\n", __FUNCTION__)); + goto exit; + } + else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + + +static int +wl_iw_set_pno_enable( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error = -1; + char *p = extra; + int pfn_enabled; + + net_os_wake_lock(dev); + pfn_enabled = htod32((uint)*(extra + strlen(PNOENABLE_SET_CMD) + 1) - '0'); + + if ((g_onoff == G_WLAN_SET_ON) && (dev != NULL)) { + + if ((error = dhd_dev_pno_enable(dev, pfn_enabled)) >= 0) { + p += snprintf(p, MAX_WX_STRING, "OK"); + WL_TRACE(("%s: set OK\n", __FUNCTION__)); + goto exit; + } + else WL_ERROR(("%s: failed code %d\n", __FUNCTION__, error)); + } + + p += snprintf(p, MAX_WX_STRING, "FAIL"); + +exit: + wrqu->data.length = p - extra + 1; + net_os_wake_unlock(dev); + return error; +} + + + +static int +wl_iw_set_pno_set( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int res = -1; + wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT]; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char *str_ptr; + int tlv_size_left; + int pno_time; + int pno_repeat; + int pno_freq_expo_max; +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = { + 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', + 'S', '1', '2', '0', + 'S', + 0x04, + 'B', 'R', 'C', 'M', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + '1', 'E', + 'R', + '2', + 'M', + '2', + 0x00 + }; +#endif + + net_os_wake_lock(dev); + WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto exit_proc; + } + + if (wrqu->data.length < (strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t))) { + WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__, + wrqu->data.length, (int)(strlen(PNOSETUP_SET_CMD) + sizeof(cmd_tlv_t)))); + goto exit_proc; + } + +#ifdef PNO_SET_DEBUG + if (!(extra = kmalloc(sizeof(pno_in_example) +100, GFP_KERNEL))) { + res = -ENOMEM; + goto exit_proc; + } + memcpy(extra, pno_in_example, sizeof(pno_in_example)); + wrqu->data.length = sizeof(pno_in_example); + for (i = 0; i < wrqu->data.length; i++) + printf("%02X ", extra[i]); + printf("\n"); +#endif + + str_ptr = extra; +#ifdef PNO_SET_DEBUG + str_ptr += strlen("PNOSETUP "); + tlv_size_left = wrqu->data.length - strlen("PNOSETUP "); +#else + str_ptr += strlen(PNOSETUP_SET_CMD); + tlv_size_left = wrqu->data.length - strlen(PNOSETUP_SET_CMD); +#endif + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + pno_repeat = pno_freq_expo_max = 0; + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && + (cmd_tlv_temp->version == PNO_TLV_VERSION) && + (cmd_tlv_temp->subver == PNO_TLV_SUBVERSION)) + { + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + MAX_PFN_LIST_COUNT, + &tlv_size_left)) <= 0) { + WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } + else { + if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { + WL_ERROR(("%s scan duration corrupted field size %d\n", + __FUNCTION__, tlv_size_left)); + goto exit_proc; + } + str_ptr++; + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + WL_ERROR(("%s pno repeat : corrupted field\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + WL_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + WL_PNO(("%s: pno_freq_expo_max=%d\n", + __FUNCTION__, pno_freq_expo_max)); + } + } + } + else { + WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + + res = dhd_dev_pno_set(dev, ssids_local, nssid, pno_time, pno_repeat, pno_freq_expo_max); + +exit_proc: + net_os_wake_unlock(dev); + return res; +} + +static int +wl_iw_set_pno_setadd( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int ret = -1; + char *tmp_ptr; + int size, tmp_size; + + net_os_wake_lock(dev); + WL_ERROR(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto exit_proc; + } + + if (wrqu->data.length <= strlen(PNOSETADD_SET_CMD) + sizeof(cmd_tlv_t)) { + WL_ERROR(("%s argument=%d less than %d\n", __FUNCTION__, + wrqu->data.length, (int)(strlen(PNOSETADD_SET_CMD) + sizeof(cmd_tlv_t)))); + goto exit_proc; + } + + + bcopy(PNOSETUP_SET_CMD, extra, strlen(PNOSETUP_SET_CMD)); + + tmp_ptr = extra + strlen(PNOSETUP_SET_CMD); + size = wrqu->data.length - strlen(PNOSETUP_SET_CMD); + tmp_size = size; + + while (*tmp_ptr && tmp_size > 0) { + if ((*tmp_ptr == 'S') && (size - tmp_size) >= sizeof(cmd_tlv_t)) { + *(tmp_ptr + 1) = ((*(tmp_ptr + 1) - '0') << 4) + (*(tmp_ptr + 2) - '0'); + memmove(tmp_ptr + 2, tmp_ptr + 3, tmp_size - 3); + tmp_size -= 2 + *(tmp_ptr + 1); + tmp_ptr += 2 + *(tmp_ptr + 1); + size--; + } else { + tmp_ptr++; + tmp_size--; + } + } + + wrqu->data.length = strlen(PNOSETUP_SET_CMD) + size; + ret = wl_iw_set_pno_set(dev, info, wrqu, extra); + +exit_proc: + net_os_wake_unlock(dev); + return ret; + +} +#endif + +static int +wl_iw_get_rssi( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + static int rssi = 0; + static wlc_ssid_t ssid = {0}; + int error = 0; + char *p = extra; + static char ssidbuf[SSID_FMT_BUF_LEN]; + scb_val_t scb_val; + + net_os_wake_lock(dev); + + bzero(&scb_val, sizeof(scb_val_t)); + + if (g_onoff == G_WLAN_SET_ON) { + error = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)); + if (error) { + WL_ERROR(("%s: Fails %d\n", __FUNCTION__, error)); + net_os_wake_unlock(dev); + return error; + } + rssi = dtoh32(scb_val.val); + + error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)); + if (!error) { + ssid.SSID_len = dtoh32(ssid.SSID_len); + wl_format_ssid(ssidbuf, ssid.SSID, dtoh32(ssid.SSID_len)); + } + } + + p += snprintf(p, MAX_WX_STRING, "%s rssi %d ", ssidbuf, rssi); + wrqu->data.length = p - extra + 1; + + net_os_wake_unlock(dev); + return error; +} + +int +wl_iw_send_priv_event( + struct net_device *dev, + char *flag +) +{ + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd; + + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + if (strlen(flag) > sizeof(extra)) + return -1; + + strcpy(extra, flag); + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + net_os_wake_lock_ctrl_timeout_enable(dev, DHD_EVENT_TIMEOUT_MS); + WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); + + return 0; +} + + +int +wl_control_wl_start(struct net_device *dev) +{ + wl_iw_t *iw; + int ret = 0; + + WL_TRACE(("Enter %s \n", __FUNCTION__)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + iw = *(wl_iw_t **)netdev_priv(dev); + + if (!iw) { + WL_ERROR(("%s: wl is null\n", __FUNCTION__)); + return -1; + } + + dhd_net_if_lock(dev); + + if (g_onoff == G_WLAN_SET_OFF) { + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON); + +#if defined(BCMLXSDMMC) + sdioh_start(NULL, 0); +#endif + + ret = dhd_dev_reset(dev, 0); + +#if defined(BCMLXSDMMC) + sdioh_start(NULL, 1); +#endif + if (!ret) + dhd_dev_init_ioctl(dev); + + g_onoff = G_WLAN_SET_ON; + } + WL_TRACE(("Exited %s\n", __FUNCTION__)); + + dhd_net_if_unlock(dev); + return ret; +} + + +static int +wl_iw_control_wl_off( + struct net_device *dev, + struct iw_request_info *info +) +{ + wl_iw_t *iw; + int ret = 0; + + WL_TRACE(("Enter %s\n", __FUNCTION__)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + iw = *(wl_iw_t **)netdev_priv(dev); + + if (!iw) { + WL_ERROR(("%s: wl is null\n", __FUNCTION__)); + return -1; + } + + dhd_net_if_lock(dev); + +#ifdef SOFTAP + ap_cfg_running = FALSE; +#endif + + if (g_onoff == G_WLAN_SET_ON) { + g_onoff = G_WLAN_SET_OFF; + +#if defined(WL_IW_USE_ISCAN) + g_iscan->iscan_state = ISCAN_STATE_IDLE; +#endif + + ret = dhd_dev_reset(dev, 1); + +#if defined(WL_IW_USE_ISCAN) +#if !defined(CSCAN) + + wl_iw_free_ss_cache(); + wl_iw_run_ss_cache_timer(0); + + g_ss_cache_ctrl.m_link_down = 1; +#endif + memset(g_scan, 0, G_SCAN_RESULTS); + g_scan_specified_ssid = 0; +#if defined(CONFIG_FIRST_SCAN) + + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; + g_first_counter_scans = 0; +#endif +#endif + +#if defined(BCMLXSDMMC) + sdioh_stop(NULL); +#endif + + dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF); + + wl_iw_send_priv_event(dev, "STOP"); + } + + dhd_net_if_unlock(dev); + + WL_TRACE(("Exited %s\n", __FUNCTION__)); + + return ret; +} + +static int +wl_iw_control_wl_on( + struct net_device *dev, + struct iw_request_info *info +) +{ + int ret = 0; + + WL_TRACE(("Enter %s \n", __FUNCTION__)); + + ret = wl_control_wl_start(dev); + + wl_iw_send_priv_event(dev, "START"); + +#ifdef SOFTAP + if (!ap_fw_loaded) { + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); + } +#else + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); +#endif + + WL_TRACE(("Exited %s\n", __FUNCTION__)); + + return ret; +} + +#ifdef SOFTAP +static struct ap_profile my_ap; +static int set_ap_cfg(struct net_device *dev, struct ap_profile *ap); +static int get_assoc_sta_list(struct net_device *dev, char *buf, int len); +static int set_ap_mac_list(struct net_device *dev, void *buf); + +#define PTYPE_STRING 0 +#define PTYPE_INTDEC 1 +#define PTYPE_INTHEX 2 +#define PTYPE_STR_HEX 3 + +static int get_parameter_from_string( + char **str_ptr, const char *token, int param_type, void *dst, int param_max_len); + +static int +hex2num(char c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + return -1; +} + + + +static int +hstr_2_buf(const char *txt, u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) { + int a, b; + + a = hex2num(*txt++); + if (a < 0) + return -1; + b = hex2num(*txt++); + if (b < 0) + return -1; + *buf++ = (a << 4) | b; + } + + return 0; +} + + + +static int +init_ap_profile_from_string(char *param_str, struct ap_profile *ap_cfg) +{ + char *str_ptr = param_str; + char sub_cmd[16]; + int ret = 0; + + memset(sub_cmd, 0, sizeof(sub_cmd)); + memset(ap_cfg, 0, sizeof(struct ap_profile)); + + + if (get_parameter_from_string(&str_ptr, "ASCII_CMD=", + PTYPE_STRING, sub_cmd, SSID_LEN) != 0) { + return -1; + } + if (strncmp(sub_cmd, "AP_CFG", 6)) { + WL_ERROR(("ERROR: sub_cmd:%s != 'AP_CFG'!\n", sub_cmd)); + return -1; + } + + + + ret = get_parameter_from_string(&str_ptr, "SSID=", PTYPE_STRING, ap_cfg->ssid, SSID_LEN); + + ret |= get_parameter_from_string(&str_ptr, "SEC=", PTYPE_STRING, ap_cfg->sec, SEC_LEN); + + ret |= get_parameter_from_string(&str_ptr, "KEY=", PTYPE_STRING, ap_cfg->key, KEY_LEN); + + ret |= get_parameter_from_string(&str_ptr, "CHANNEL=", PTYPE_INTDEC, &ap_cfg->channel, 5); + + + get_parameter_from_string(&str_ptr, "PREAMBLE=", PTYPE_INTDEC, &ap_cfg->preamble, 5); + + + get_parameter_from_string(&str_ptr, "MAX_SCB=", PTYPE_INTDEC, &ap_cfg->max_scb, 5); + + + get_parameter_from_string(&str_ptr, "HIDDEN=", + PTYPE_INTDEC, &ap_cfg->closednet, 5); + + + get_parameter_from_string(&str_ptr, "COUNTRY=", + PTYPE_STRING, &ap_cfg->country_code, 3); + + return ret; +} +#endif + + + +#ifdef SOFTAP +static int +iwpriv_set_ap_config(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + char *extra = NULL; + struct ap_profile *ap_cfg = &my_ap; + + WL_TRACE(("> Got IWPRIV SET_AP IOCTL: info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (!ap_fw_loaded) { + WL_ERROR(("Can't execute %s(), SOFTAP fw is not Loaded\n", + __FUNCTION__)); + return -1; + } + + if (wrqu->data.length != 0) { + + char *str_ptr; + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + kfree(extra); + return -EFAULT; + } + + extra[wrqu->data.length] = 0; + WL_SOFTAP((" Got str param in iw_point:\n %s\n", extra)); + + memset(ap_cfg, 0, sizeof(struct ap_profile)); + + + + str_ptr = extra; + + if ((res = init_ap_profile_from_string(extra, ap_cfg)) < 0) { + WL_ERROR(("%s failed to parse %d\n", __FUNCTION__, res)); + kfree(extra); + return -1; + } + + } else { + + WL_ERROR(("IWPRIV argument len = 0 \n")); + return -1; + } + + if ((res = set_ap_cfg(dev, ap_cfg)) < 0) + WL_ERROR(("%s failed to set_ap_cfg %d\n", __FUNCTION__, res)); + + kfree(extra); + + return res; +} +#endif + + + +#ifdef SOFTAP +static int iwpriv_get_assoc_list(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *p_iwrq, + char *extra) +{ + int i, ret = 0; + char mac_buf[256]; + struct maclist *sta_maclist = (struct maclist *)mac_buf; + + char mac_lst[384]; + char *p_mac_str; + char *p_mac_str_end; + wl_iw_t *iw; + + if ((!dev) || (!extra)) { + + return -EINVAL; + } + + + iw = *(wl_iw_t **)netdev_priv(dev); + + net_os_wake_lock(dev); + DHD_OS_MUTEX_LOCK(&wl_softap_lock); + + WL_TRACE(("\n %s: IWPRIV IOCTL: cmd:%hx, flags:%hx, extra:%p, iwp.len:%d," + "iwp.len:%p, iwp.flags:%x \n", __FUNCTION__, info->cmd, info->flags, + extra, p_iwrq->data.length, p_iwrq->data.pointer, p_iwrq->data.flags)); + + + memset(sta_maclist, 0, sizeof(mac_buf)); + + sta_maclist->count = 8; + + WL_SOFTAP(("%s: net device:%s, buf_sz:%d\n", + __FUNCTION__, dev->name, sizeof(mac_buf))); + + if ((ret = get_assoc_sta_list(dev, mac_buf, sizeof(mac_buf))) < 0) { + WL_ERROR(("%s: sta list ioctl error:%d\n", + __FUNCTION__, ret)); + goto func_exit; + } + + WL_SOFTAP(("%s: got %d stations\n", __FUNCTION__, + sta_maclist->count)); + + + + memset(mac_lst, 0, sizeof(mac_lst)); + p_mac_str = mac_lst; + p_mac_str_end = &mac_lst[sizeof(mac_lst)-1]; + + for (i = 0; i < 8; i++) { + struct ether_addr * id = &sta_maclist->ea[i]; + if (!ETHER_ISNULLADDR(id->octet)) { + scb_val_t scb_val; + int rssi = 0; + bzero(&scb_val, sizeof(scb_val_t)); + + + if ((p_mac_str_end - p_mac_str) <= 36) { + WL_ERROR(("%s: mac list buf is < 36 for item[%i] item\n", + __FUNCTION__, i)); + break; + } + + p_mac_str += snprintf(p_mac_str, MAX_WX_STRING, + "\nMac[%d]=%02X:%02X:%02X:%02X:%02X:%02X,", i, + id->octet[0], id->octet[1], id->octet[2], + id->octet[3], id->octet[4], id->octet[5]); + + + bcopy(id->octet, &scb_val.ea, 6); + ret = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)); + if (ret < 0) { + snprintf(p_mac_str, MAX_WX_STRING, "RSSI:ERR"); + WL_ERROR(("%s: RSSI ioctl error:%d\n", + __FUNCTION__, ret)); + break; + } + + rssi = dtoh32(scb_val.val); + p_mac_str += snprintf(p_mac_str, MAX_WX_STRING, + "RSSI:%d", rssi); + } + } + + p_iwrq->data.length = strlen(mac_lst)+1; + + WL_SOFTAP(("%s: data to user:\n%s\n usr_ptr:%p\n", __FUNCTION__, + mac_lst, p_iwrq->data.pointer)); + + if (p_iwrq->data.length) { + bcopy(mac_lst, extra, p_iwrq->data.length); + } + +func_exit: + + DHD_OS_MUTEX_UNLOCK(&wl_softap_lock); + net_os_wake_unlock(dev); + + WL_SOFTAP(("%s: Exited\n", __FUNCTION__)); + return ret; +} +#endif + + +#ifdef SOFTAP + +#define MAC_FILT_MAX 8 +static int iwpriv_set_mac_filters(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int i, ret = -1; + char * extra = NULL; + int mac_cnt = 0; + int mac_mode = 0; + struct ether_addr *p_ea; + struct mac_list_set mflist_set; + + WL_SOFTAP((">>> Got IWPRIV SET_MAC_FILTER IOCTL: info->cmd:%x," + "info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + char *str_ptr; + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + kfree(extra); + return -EFAULT; + } + + extra[wrqu->data.length] = 0; + WL_SOFTAP((" Got parameter string in iw_point:\n %s \n", extra)); + + memset(&mflist_set, 0, sizeof(mflist_set)); + + + str_ptr = extra; + + + + if (get_parameter_from_string(&str_ptr, "MAC_MODE=", + PTYPE_INTDEC, &mac_mode, 4) != 0) { + WL_ERROR(("ERROR: 'MAC_MODE=' token is missing\n")); + goto exit_proc; + } + + p_ea = &mflist_set.mac_list.ea[0]; + + if (get_parameter_from_string(&str_ptr, "MAC_CNT=", + PTYPE_INTDEC, &mac_cnt, 4) != 0) { + WL_ERROR(("ERROR: 'MAC_CNT=' token param is missing \n")); + goto exit_proc; + } + + if (mac_cnt > MAC_FILT_MAX) { + WL_ERROR(("ERROR: number of MAC filters > MAX\n")); + goto exit_proc; + } + + for (i=0; i< mac_cnt; i++) + if (get_parameter_from_string(&str_ptr, "MAC=", + PTYPE_STR_HEX, &p_ea[i], 12) != 0) { + WL_ERROR(("ERROR: MAC_filter[%d] is missing !\n", i)); + goto exit_proc; + } + + WL_SOFTAP(("MAC_MODE=:%d, MAC_CNT=%d, MACs:..\n", mac_mode, mac_cnt)); + for (i = 0; i < mac_cnt; i++) { + WL_SOFTAP(("mac_filt[%d]:", i)); + dhd_print_buf(&p_ea[i], 6, 0); + } + + + mflist_set.mode = mac_mode; + mflist_set.mac_list.count = mac_cnt; + set_ap_mac_list(dev, &mflist_set); + + + wrqu->data.pointer = NULL; + wrqu->data.length = 0; + ret = 0; + + } else { + + WL_ERROR(("IWPRIV argument len is 0\n")); + return -1; + } + + exit_proc: + kfree(extra); + return ret; +} +#endif + + +#ifdef SOFTAP + +static int iwpriv_set_ap_sta_disassoc(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + char sta_mac[6] = {0, 0, 0, 0, 0, 0}; + char cmd_buf[256]; + char *str_ptr = cmd_buf; + + WL_SOFTAP((">>%s called\n args: info->cmd:%x," + " info->flags:%x, u.data.p:%p, u.data.len:%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + if (copy_from_user(cmd_buf, wrqu->data.pointer, wrqu->data.length)) { + return -EFAULT; + } + + if (get_parameter_from_string(&str_ptr, + "MAC=", PTYPE_STR_HEX, sta_mac, 12) == 0) { + res = wl_iw_softap_deassoc_stations(dev, sta_mac); + } else { + WL_ERROR(("ERROR: STA_MAC= token not found\n")); + } + } + + return res; +} +#endif + +#endif + +#if WIRELESS_EXT < 13 +struct iw_request_info +{ + __u16 cmd; + __u16 flags; +}; + +typedef int (*iw_handler)(struct net_device *dev, + struct iw_request_info *info, + void *wrqu, + char *extra); +#endif + +static int +wl_iw_config_commit( + struct net_device *dev, + struct iw_request_info *info, + void *zwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + struct sockaddr bssid; + + WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) + return error; + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + if (!ssid.SSID_len) + return 0; + + bzero(&bssid, sizeof(struct sockaddr)); + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __FUNCTION__, ssid.SSID)); + return error; + } + + return 0; +} + +static int +wl_iw_get_name( + struct net_device *dev, + struct iw_request_info *info, + char *cwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); + + strcpy(cwrq, "IEEE 802.11-DS"); + + return 0; +} + +static int +wl_iw_set_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + int error, chan; + uint sf = 0; + + WL_TRACE(("%s %s: SIOCSIWFREQ\n", __FUNCTION__, dev->name)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s:>> not executed, 'SOFT_AP is active' \n", __FUNCTION__)); + return 0; + } +#endif + + + if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { + chan = fwrq->m; + } + + else { + + if (fwrq->e >= 6) { + fwrq->e -= 6; + while (fwrq->e--) + fwrq->m *= 10; + } else if (fwrq->e < 6) { + while (fwrq->e++ < 6) + fwrq->m /= 10; + } + + if (fwrq->m > 4000 && fwrq->m < 5000) + sf = WF_CHAN_FACTOR_4_G; + + chan = wf_mhz2channel(fwrq->m, sf); + } + + chan = htod32(chan); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) + return error; + + g_wl_iw_params.target_channel = chan; + + + return -EINPROGRESS; +} + +static int +wl_iw_get_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + channel_info_t ci; + int error; + + WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + + + fwrq->m = dtoh32(ci.hw_channel); + fwrq->e = dtoh32(0); + return 0; +} + +static int +wl_iw_set_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int infra = 0, ap = 0, error = 0; + + WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); + + switch (*uwrq) { + case IW_MODE_MASTER: + infra = ap = 1; + break; + case IW_MODE_ADHOC: + case IW_MODE_AUTO: + break; + case IW_MODE_INFRA: + infra = 1; + break; + default: + return -EINVAL; + } + infra = htod32(infra); + ap = htod32(ap); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) + return error; + + + return -EINPROGRESS; +} + +static int +wl_iw_get_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int error, infra = 0, ap = 0; + + WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) + return error; + + infra = dtoh32(infra); + ap = dtoh32(ap); + *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; + + return 0; +} + +static int +wl_iw_get_range( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + struct iw_range *range = (struct iw_range *) extra; + wl_uint32_list_t *list; + wl_rateset_t rateset; + int8 *channels; + int error, i, k; + uint sf, ch; + + int phytype; + int bw_cap = 0, sgi_tx = 0, nmode = 0; + channel_info_t ci; + uint8 nrate_list2copy = 0; + uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, + {14, 29, 43, 58, 87, 116, 130, 144}, + {27, 54, 81, 108, 162, 216, 243, 270}, + {30, 60, 90, 120, 180, 240, 270, 300}}; + + WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); + + if (!extra) + return -EINVAL; + + channels = kmalloc((MAXCHANNEL+1)*4, GFP_KERNEL); + if (!channels) { + WL_ERROR(("Could not alloc channels\n")); + return -ENOMEM; + } + list = (wl_uint32_list_t *)channels; + + dwrq->length = sizeof(struct iw_range); + memset(range, 0, sizeof(*range)); + + + range->min_nwid = range->max_nwid = 0; + + + list->count = htod32(MAXCHANNEL); + if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, (MAXCHANNEL+1)*4))) { + kfree(channels); + return error; + } + for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { + range->freq[i].i = dtoh32(list->element[i]); + + ch = dtoh32(list->element[i]); + if (ch <= CH_MAX_2G_CHANNEL) + sf = WF_CHAN_FACTOR_2_4_G; + else + sf = WF_CHAN_FACTOR_5_G; + + range->freq[i].m = wf_channel2mhz(ch, sf); + range->freq[i].e = 6; + } + range->num_frequency = range->num_channels = i; + + + range->max_qual.qual = 5; + + range->max_qual.level = 0x100 - 200; + + range->max_qual.noise = 0x100 - 200; + + range->sensitivity = 65535; + +#if WIRELESS_EXT > 11 + + range->avg_qual.qual = 3; + + range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; + + range->avg_qual.noise = 0x100 - 75; +#endif + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) { + kfree(channels); + return error; + } + rateset.count = dtoh32(rateset.count); + range->num_bitrates = rateset.count; + for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) + range->bitrate[i] = (rateset.rates[i]& 0x7f) * 500000; + dev_wlc_intvar_get(dev, "nmode", &nmode); + dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)); + + if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) { + dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap); + dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx); + dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)); + ci.hw_channel = dtoh32(ci.hw_channel); + + if (bw_cap == 0 || + (bw_cap == 2 && ci.hw_channel <= 14)) { + if (sgi_tx == 0) + nrate_list2copy = 0; + else + nrate_list2copy = 1; + } + if (bw_cap == 1 || + (bw_cap == 2 && ci.hw_channel >= 36)) { + if (sgi_tx == 0) + nrate_list2copy = 2; + else + nrate_list2copy = 3; + } + range->num_bitrates += 8; + for (k = 0; i < range->num_bitrates; k++, i++) { + + range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; + } + } + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) { + kfree(channels); + return error; + } + i = dtoh32(i); + if (i == WLC_PHY_TYPE_A) + range->throughput = 24000000; + else + range->throughput = 1500000; + + + range->min_rts = 0; + range->max_rts = 2347; + range->min_frag = 256; + range->max_frag = 2346; + + range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; + range->num_encoding_sizes = 4; + range->encoding_size[0] = WEP1_KEY_SIZE; + range->encoding_size[1] = WEP128_KEY_SIZE; +#if WIRELESS_EXT > 17 + range->encoding_size[2] = TKIP_KEY_SIZE; +#else + range->encoding_size[2] = 0; +#endif + range->encoding_size[3] = AES_KEY_SIZE; + + + range->min_pmp = 0; + range->max_pmp = 0; + range->min_pmt = 0; + range->max_pmt = 0; + range->pmp_flags = 0; + range->pm_capa = 0; + + + range->num_txpower = 2; + range->txpower[0] = 1; + range->txpower[1] = 255; + range->txpower_capa = IW_TXPOW_MWATT; + +#if WIRELESS_EXT > 10 + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 19; + + + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT; + range->r_time_flags = 0; + + range->min_retry = 1; + range->max_retry = 255; + + range->min_r_time = 0; + range->max_r_time = 0; +#endif + +#if WIRELESS_EXT > 17 + range->enc_capa = IW_ENC_CAPA_WPA; + range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; + range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; + range->enc_capa |= IW_ENC_CAPA_WPA2; + + + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); + IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); +#endif + + kfree(channels); + + return 0; +} + +static int +rssi_to_qual(int rssi) +{ + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + return 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + return 1; + else if (rssi <= WL_IW_RSSI_LOW) + return 2; + else if (rssi <= WL_IW_RSSI_GOOD) + return 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + return 4; + else + return 5; +} + +static int +wl_iw_set_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = NETDEV_PRIV(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + int i; + + WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); + for (i = 0; i < iw->spy_num; i++) + memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); + memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); + + return 0; +} + +static int +wl_iw_get_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = NETDEV_PRIV(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; + int i; + + WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = iw->spy_num; + for (i = 0; i < iw->spy_num; i++) { + memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); + addr[i].sa_family = AF_UNIX; + memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); + iw->spy_qual[i].updated = 0; + } + + return 0; +} + + +static int +wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params, int *join_params_size) +{ + chanspec_t chanspec = 0; + + if (ch != 0) { + + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; + + if (join_params->params.chanspec_list[0]) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= WL_CHANSPEC_BW_20; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); + + + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + htodchanspec(join_params->params.chanspec_list[0]); + + join_params->params.chanspec_num = htod32(join_params->params.chanspec_num); + + WL_TRACE(("%s join_params->params.chanspec_list[0]= %X\n", + __FUNCTION__, join_params->params.chanspec_list[0])); + } + return 1; +} + +static int +wl_iw_set_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + int error = -EINVAL; + wl_join_params_t join_params; + int join_params_size; + + WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); + + if (awrq->sa_family != ARPHRD_ETHER) { + WL_ERROR(("Invalid Header...sa_family\n")); + return -EINVAL; + } + + + if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { + scb_val_t scbval; + + bzero(&scbval, sizeof(scb_val_t)); + + (void) dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + return 0; + } + + + + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); + join_params.ssid.SSID_len = htod32(g_ssid.SSID_len); + memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN); + + + + WL_TRACE(("%s target_channel=%d\n", __FUNCTION__, g_wl_iw_params.target_channel)); + wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params, &join_params_size); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size))) { + WL_ERROR(("%s Invalid ioctl data=%d\n", __FUNCTION__, error)); + return error; + } + + if (g_ssid.SSID_len) { + WL_TRACE(("%s: join SSID=%s BSSID="MACSTR" ch=%d\n", __FUNCTION__, + g_ssid.SSID, MAC2STR((u8 *)awrq->sa_data), + g_wl_iw_params.target_channel)); + } + + + memset(&g_ssid, 0, sizeof(g_ssid)); + return 0; +} + +static int +wl_iw_get_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); + + awrq->sa_family = ARPHRD_ETHER; + memset(awrq->sa_data, 0, ETHER_ADDR_LEN); + + + (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_mlme( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + struct iw_mlme *mlme; + scb_val_t scbval; + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name)); + + mlme = (struct iw_mlme *)extra; + if (mlme == NULL) { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + scbval.val = mlme->reason_code; + bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); + + if (mlme->cmd == IW_MLME_DISASSOC) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + } + else if (mlme->cmd == IW_MLME_DEAUTH) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, + sizeof(scb_val_t)); + } + else { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + return error; +} +#endif + +#ifndef WL_IW_USE_ISCAN +static int +wl_iw_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int error, i; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + kfree(list); + return -EINVAL; + } + + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif + + dwrq->length++; + } + + kfree(list); + + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + + dwrq->flags = 1; + } + + return 0; +} +#endif + +#ifdef WL_IW_USE_ISCAN +static int +wl_iw_iscan_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + iscan_buf_t * buf; + iscan_info_t *iscan = g_iscan; + + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int i; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) { + WL_ERROR(("%s error\n", __FUNCTION__)); + return 0; + } + + buf = iscan->list_hdr; + + while (buf) { + list = &((wl_iscan_results_t*)buf->iscan_buf)->results; + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + return -EINVAL; + } + + bi = NULL; + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) + : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif + + dwrq->length++; + } + buf = buf->next; + } + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + + dwrq->flags = 1; + } + + return 0; +} + +static int +wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) +{ + int err = 0; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + +#if defined(CONFIG_FIRST_SCAN) + + if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) + params->passive_time = 30; +#endif + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + if (ssid && ssid->SSID_len) + memcpy(¶ms->ssid, ssid, sizeof(wlc_ssid_t)); + + return err; +} + +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) +{ + int err = 0; + + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(action); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + WL_SCAN(("%s : nprobes=%d\n", __FUNCTION__, iscan->iscan_ex_params_p->params.nprobes)); + WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time)); + WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time)); + WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time)); + WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type)); + WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type)); + + if ((dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p, + iscan->iscan_ex_param_size, iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) { + WL_ERROR(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err)); + err = -1; + } + + return err; +} + +static void +wl_iw_timerfunc(ulong data) +{ + iscan_info_t *iscan = (iscan_info_t *)data; + if (iscan) { + iscan->timer_on = 0; + if (iscan->iscan_state != ISCAN_STATE_IDLE) { + WL_TRACE(("timer trigger\n")); + up(&iscan->tsk_ctl.sema); + } + } +} + +static void +wl_iw_set_event_mask(struct net_device *dev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; + + dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + setbit(eventmask, WLC_E_SCAN_COMPLETE); + dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, + iovbuf, sizeof(iovbuf)); +} + +static uint32 +wl_iw_iscan_get(iscan_info_t *iscan) +{ + iscan_buf_t * buf; + iscan_buf_t * ptr; + wl_iscan_results_t * list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + uint32 status; + int res = 0; + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + if (iscan->list_cur) { + buf = iscan->list_cur; + iscan->list_cur = buf->next; + } + else { + buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); + if (!buf) { + WL_ERROR(("%s can't alloc iscan_buf_t : going to abort currect iscan\n", + __FUNCTION__)); + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return WL_SCAN_RESULTS_NO_MEM; + } + buf->next = NULL; + if (!iscan->list_hdr) + iscan->list_hdr = buf; + else { + ptr = iscan->list_hdr; + while (ptr->next) { + ptr = ptr->next; + } + ptr->next = buf; + } + } + memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)buf->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + res = dev_iw_iovar_getbuf( + iscan->dev, + "iscanresults", + &list, + WL_ISCAN_RESULTS_FIXED_SIZE, + buf->iscan_buf, + WLC_IW_ISCAN_MAXLEN); + if (res == 0) { + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + results->count = dtoh32(results->count); + WL_TRACE(("results->count = %d\n", results->count)); + WL_TRACE(("results->buflen = %d\n", results->buflen)); + status = dtoh32(list_buf->status); + } else { + WL_ERROR(("%s returns error %d\n", __FUNCTION__, res)); + + status = WL_SCAN_RESULTS_NO_MEM; + } + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return status; +} + +static void +wl_iw_force_specific_scan(iscan_info_t *iscan) +{ + WL_TRACE(("%s force Specific SCAN for %s\n", __FUNCTION__, g_specific_ssid.SSID)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + + (void) dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif +} + +static void +wl_iw_send_scan_complete(iscan_info_t *iscan) +{ + union iwreq_data wrqu; + + memset(&wrqu, 0, sizeof(wrqu)); + + + wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); +#if defined(CONFIG_FIRST_SCAN) + if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_READY; +#endif + WL_TRACE(("Send Event ISCAN complete\n")); +} + +static int +_iscan_sysioc_thread(void *data) +{ + uint32 status; + + tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data; + iscan_info_t *iscan = (iscan_info_t *) tsk_ctl->parent; + + + static bool iscan_pass_abort = FALSE; + + DAEMONIZE("iscan_sysioc"); + + status = WL_SCAN_RESULTS_PARTIAL; + + + complete(&tsk_ctl->completed); + + while (down_interruptible(&tsk_ctl->sema) == 0) { + + SMP_RD_BARRIER_DEPENDS(); + if (tsk_ctl->terminated) { + break; + } +#if defined(SOFTAP) + + if (ap_cfg_running) { + WL_TRACE(("%s skipping SCAN ops in AP mode !!!\n", __FUNCTION__)); + net_os_wake_unlock(iscan->dev); + continue; + } +#endif + + if (iscan->timer_on) { + + iscan->timer_on = 0; + del_timer_sync(&iscan->timer); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + status = wl_iw_iscan_get(iscan); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + if (g_scan_specified_ssid && (iscan_pass_abort == TRUE)) { + WL_TRACE(("%s Get results from specific scan status=%d\n", __FUNCTION__, status)); + wl_iw_send_scan_complete(iscan); + iscan_pass_abort = FALSE; + status = -1; + } + + switch (status) { + case WL_SCAN_RESULTS_PARTIAL: + WL_TRACE(("iscanresults incomplete\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + + wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_SUCCESS: + WL_TRACE(("iscanresults complete\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + case WL_SCAN_RESULTS_PENDING: + WL_TRACE(("iscanresults pending\n")); + + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_ABORTED: + WL_TRACE(("iscanresults aborted\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + if (g_scan_specified_ssid == 0) + wl_iw_send_scan_complete(iscan); + else { + iscan_pass_abort = TRUE; + wl_iw_force_specific_scan(iscan); + } + break; + case WL_SCAN_RESULTS_NO_MEM: + WL_TRACE(("iscanresults can't alloc memory: skip\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + break; + default: + WL_TRACE(("iscanresults returned unknown status %d\n", status)); + break; + } + + net_os_wake_unlock(iscan->dev); + } + + if (iscan->timer_on) { + iscan->timer_on = 0; + del_timer_sync(&iscan->timer); + } + complete_and_exit(&tsk_ctl->completed, 0); +} +#endif + +#if !defined(CSCAN) + +static void +wl_iw_set_ss_cache_timer_flag(void) +{ + g_ss_cache_ctrl.m_timer_expired = 1; + WL_TRACE(("%s called\n", __FUNCTION__)); +} + + +static int +wl_iw_init_ss_cache_ctrl(void) +{ + WL_TRACE(("%s :\n", __FUNCTION__)); + g_ss_cache_ctrl.m_prev_scan_mode = 0; + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + g_ss_cache_ctrl.m_cache_head = NULL; + g_ss_cache_ctrl.m_link_down = 0; + g_ss_cache_ctrl.m_timer_expired = 0; + memset(g_ss_cache_ctrl.m_active_bssid, 0, ETHER_ADDR_LEN); + + g_ss_cache_ctrl.m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL); + if (!g_ss_cache_ctrl.m_timer) { + return -ENOMEM; + } + g_ss_cache_ctrl.m_timer->function = (void *)wl_iw_set_ss_cache_timer_flag; + init_timer(g_ss_cache_ctrl.m_timer); + + return 0; +} + + + +static void +wl_iw_free_ss_cache(void) +{ + wl_iw_ss_cache_t *node, *cur; + wl_iw_ss_cache_t **spec_scan_head; + + WL_TRACE(("%s called\n", __FUNCTION__)); + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + + for (;node;) { + WL_TRACE(("%s : SSID - %s\n", __FUNCTION__, node->bss_info->SSID)); + cur = node; + node = cur->next; + kfree(cur); + } + *spec_scan_head = NULL; + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); +} + + + +static int +wl_iw_run_ss_cache_timer(int kick_off) +{ + struct timer_list **timer; + + timer = &g_ss_cache_ctrl.m_timer; + + if (*timer) { + if (kick_off) { +#ifdef CONFIG_PRESCANNED + (*timer)->expires = jiffies + msecs_to_jiffies(70000); +#else + (*timer)->expires = jiffies + msecs_to_jiffies(30000); +#endif + add_timer(*timer); + WL_TRACE(("%s : timer starts \n", __FUNCTION__)); + } else { + del_timer_sync(*timer); + WL_TRACE(("%s : timer stops \n", __FUNCTION__)); + } + } + + return 0; +} + + +static void +wl_iw_release_ss_cache_ctrl(void) +{ + WL_TRACE(("%s :\n", __FUNCTION__)); + wl_iw_free_ss_cache(); + wl_iw_run_ss_cache_timer(0); + if (g_ss_cache_ctrl.m_timer) { + kfree(g_ss_cache_ctrl.m_timer); + } +} + + + +static void +wl_iw_reset_ss_cache(void) +{ + wl_iw_ss_cache_t *node, *prev, *cur; + wl_iw_ss_cache_t **spec_scan_head; + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + prev = node; + + for (;node;) { + WL_TRACE(("%s : node SSID %s \n", __FUNCTION__, node->bss_info->SSID)); + if (!node->dirty) { + cur = node; + if (cur == *spec_scan_head) { + *spec_scan_head = cur->next; + prev = *spec_scan_head; + } + else { + prev->next = cur->next; + } + node = cur->next; + + WL_TRACE(("%s : Del node : SSID %s\n", __FUNCTION__, cur->bss_info->SSID)); + kfree(cur); + continue; + } + + node->dirty = 0; + prev = node; + node = node->next; + } + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); +} + + +static int +wl_iw_add_bss_to_ss_cache(wl_scan_results_t *ss_list) +{ + + wl_iw_ss_cache_t *node, *prev, *leaf; + wl_iw_ss_cache_t **spec_scan_head; + wl_bss_info_t *bi = NULL; + int i; + + + if (!ss_list->count) { + return 0; + } + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + + for (i = 0; i < ss_list->count; i++) { + + node = *spec_scan_head; + prev = node; + + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info; + + WL_TRACE(("%s : find %d with specific SSID %s\n", __FUNCTION__, i, bi->SSID)); + for (;node;) { + if (!memcmp(&node->bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) { + + WL_TRACE(("dirty marked : SSID %s\n", bi->SSID)); + node->dirty = 1; + break; + } + prev = node; + node = node->next; + } + + if (node) { + continue; + } + + leaf = kmalloc(bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL); + if (!leaf) { + WL_ERROR(("Memory alloc failure %d\n", + bi->length + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN)); + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return -ENOMEM; + } + + memcpy(leaf->bss_info, bi, bi->length); + leaf->next = NULL; + leaf->dirty = 1; + leaf->count = 1; + leaf->version = ss_list->version; + + if (!prev) { + *spec_scan_head = leaf; + } + else { + prev->next = leaf; + } + } + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return 0; +} + + +static int +wl_iw_merge_scan_cache(struct iw_request_info *info, char *extra, uint buflen_from_user, +__u16 *merged_len) +{ + wl_iw_ss_cache_t *node; + wl_scan_results_t *list_merge; + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + node = g_ss_cache_ctrl.m_cache_head; + for (;node;) { + list_merge = (wl_scan_results_t *)&node->buflen; + WL_TRACE(("%s: Cached Specific APs list=%d\n", __FUNCTION__, list_merge->count)); + if (buflen_from_user - *merged_len > 0) { + *merged_len += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra + *merged_len, buflen_from_user - *merged_len); + } + else { + WL_TRACE(("%s: exit with break\n", __FUNCTION__)); + break; + } + node = node->next; + } + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return 0; +} + + +static int +wl_iw_delete_bss_from_ss_cache(void *addr) +{ + + wl_iw_ss_cache_t *node, *prev; + wl_iw_ss_cache_t **spec_scan_head; + + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + spec_scan_head = &g_ss_cache_ctrl.m_cache_head; + node = *spec_scan_head; + prev = node; + for (;node;) { + if (!memcmp(&node->bss_info->BSSID, addr, ETHER_ADDR_LEN)) { + if (node == *spec_scan_head) { + *spec_scan_head = node->next; + } + else { + prev->next = node->next; + } + + WL_TRACE(("%s : Del node : %s\n", __FUNCTION__, node->bss_info->SSID)); + kfree(node); + break; + } + + prev = node; + node = node->next; + } + + memset(addr, 0, ETHER_ADDR_LEN); + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + return 0; +} + +#endif + +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int error; + WL_TRACE(("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __FUNCTION__, dev->name)); + +#ifdef OEM_CHROMIUMOS + g_set_essid_before_scan = FALSE; +#endif + +#if defined(CSCAN) + WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__)); + return -EINVAL; +#endif + +#if defined(SOFTAP) + + if (ap_cfg_running) { + WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return 0; + } +#endif + + + if (g_onoff == G_WLAN_SET_OFF) + return 0; + + + memset(&g_specific_ssid, 0, sizeof(g_specific_ssid)); +#ifndef WL_IW_USE_ISCAN + + g_scan_specified_ssid = 0; +#endif + +#if WIRELESS_EXT > 17 + + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; +#if defined(CONFIG_FIRST_SCAN) + if (g_first_broadcast_scan != BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + + WL_TRACE(("%s Ignoring SC %s first BC is not done = %d\n", + __FUNCTION__, req->essid, + g_first_broadcast_scan)); + return -EBUSY; + } +#endif + if (g_scan_specified_ssid) { + WL_TRACE(("%s Specific SCAN is not done ignore scan for = %s \n", + __FUNCTION__, req->essid)); + + return -EBUSY; + } + else { + g_specific_ssid.SSID_len = MIN(sizeof(g_specific_ssid.SSID), + req->essid_len); + memcpy(g_specific_ssid.SSID, req->essid, g_specific_ssid.SSID_len); + g_specific_ssid.SSID_len = htod32(g_specific_ssid.SSID_len); + g_scan_specified_ssid = 1; + WL_TRACE(("### Specific scan ssid=%s len=%d\n", + g_specific_ssid.SSID, g_specific_ssid.SSID_len)); + } + } + } +#endif + + if ((error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid, sizeof(g_specific_ssid)))) { + WL_TRACE(("#### Set SCAN for %s failed with %d\n", g_specific_ssid.SSID, error)); + + g_scan_specified_ssid = 0; + return -EBUSY; + } + + return 0; +} + +#ifdef WL_IW_USE_ISCAN +int +wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + +#if defined(CONFIG_FIRST_SCAN) + + if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_IDLE) { + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_STARTED; + WL_TRACE(("%s: First Brodcast scan was forced\n", __FUNCTION__)); + } + else if (g_first_broadcast_scan == BROADCAST_SCAN_FIRST_STARTED) { + WL_TRACE(("%s: ignore ISCAN request first BS is not done yet\n", __FUNCTION__)); + return 0; + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (flag) + rtnl_lock(); +#endif + + dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &iscan->scan_flag, sizeof(iscan->scan_flag)); + wl_iw_set_event_mask(dev); + + WL_TRACE(("+++: Set Broadcast ISCAN\n")); + + memset(&ssid, 0, sizeof(ssid)); + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + + memset(&iscan->iscan_ex_params_p->params, 0, iscan->iscan_ex_param_size); + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid); + wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (flag) + rtnl_unlock(); +#endif + + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + + iscan->timer_on = 1; + + return 0; +} + +static int +wl_iw_iscan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + int ret = 0; + + WL_TRACE_SCAN(("%s: SIOCSIWSCAN : ISCAN\n", dev->name)); + +#if defined(CSCAN) + WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __FUNCTION__)); + return -EINVAL; +#endif + + net_os_wake_lock(dev); + + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("\n>%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + goto set_scan_end; + } +#endif + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + goto set_scan_end; + } + +#ifdef PNO_SUPPORT + + if (dhd_dev_get_pno_status(dev)) { + WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__)); + } +#endif + + + if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) { + WL_ERROR(("%s error \n", __FUNCTION__)); + goto set_scan_end; + } + + if (g_scan_specified_ssid) { + WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n", + __FUNCTION__)); + ret = EBUSY; + goto set_scan_end; + } + + + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + int as = 0; + struct iw_scan_req *req = (struct iw_scan_req *)extra; + + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + dev_wlc_ioctl(dev, WLC_SET_PASSIVE_SCAN, &as, sizeof(as)); + wl_iw_set_event_mask(dev); + ret = wl_iw_set_scan(dev, info, wrqu, extra); + goto set_scan_end; + } + else { + g_scan_specified_ssid = 0; + + if (iscan->iscan_state == ISCAN_STATE_SCANING) { + WL_TRACE(("%s ISCAN already in progress \n", __FUNCTION__)); + goto set_scan_end; + } + } + } +#endif + +#if defined(CONFIG_FIRST_SCAN) && !defined(CSCAN) + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { + + WL_ERROR(("%s Clean up First scan flag which is %d\n", + __FUNCTION__, g_first_broadcast_scan)); + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; + } + else { + WL_ERROR(("%s Ignoring Broadcast Scan:First Scan is not done yet %d\n", + __FUNCTION__, g_first_counter_scans)); + ret = -EBUSY; + goto set_scan_end; + } + } +#endif + + wl_iw_iscan_set_scan_broadcast_prep(dev, 0); + +set_scan_end: + net_os_wake_unlock(dev); + return ret; +} +#endif + +#if WIRELESS_EXT > 17 +static bool +ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) +{ + + + uint8 *ie = *wpaie; + + + if ((ie[1] >= 6) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { + return TRUE; + } + + + ie += ie[1] + 2; + + *tlvs_len -= (int)(ie - *tlvs); + + *tlvs = ie; + return FALSE; +} + +static bool +ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) +{ + + + uint8 *ie = *wpsie; + + + if ((ie[1] >= 4) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { + return TRUE; + } + + + ie += ie[1] + 2; + + *tlvs_len -= (int)(ie - *tlvs); + + *tlvs = ie; + return FALSE; +} +#endif + + +static int +wl_iw_handle_scanresults_ies(char **event_p, char *end, + struct iw_request_info *info, wl_bss_info_t *bi) +{ +#if WIRELESS_EXT > 17 + struct iw_event iwe; + char *event; + + event = *event_p; + if (bi->ie_length) { + + bcm_tlv_t *ie; + uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + int ptr_len = bi->ie_length; + + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + + if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t); + ptr_len = bi->ie_length; + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + *event_p = event; + } +#endif + + return 0; +} + +#ifndef CSCAN +static uint +wl_iw_get_scan_prep( + wl_scan_results_t *list, + struct iw_request_info *info, + char *extra, + short max_size) +{ + int i, j; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value; + int ret = 0; + + if (!list) { + WL_ERROR(("%s: Null list pointer", __FUNCTION__)); + return ret; + } + + + + for (i = 0; i < list->count && i < IW_MAX_AP; i++) { + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + return ret; + } + + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + + WL_TRACE(("%s : %s\n", __FUNCTION__, bi->SSID)); + + + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + + if (bi->rateset.count) { + if (((event -extra) + IW_EV_LCP_LEN) <= (uintptr)end) { + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = + (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + } + + if ((ret = (event - extra)) < 0) { + WL_ERROR(("==> Wrong size\n")); + ret = 0; + } + + WL_TRACE(("%s: size=%d bytes prepared \n", __FUNCTION__, (unsigned int)(event - extra))); + return (uint)ret; +} + +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + channel_info_t ci; + wl_scan_results_t *list_merge; + wl_scan_results_t *list = (wl_scan_results_t *) g_scan; + int error; + uint buflen_from_user = dwrq->length; + uint len = G_SCAN_RESULTS; + __u16 len_ret = 0; +#if !defined(CSCAN) + __u16 merged_len = 0; +#endif +#if defined(WL_IW_USE_ISCAN) + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; +#if !defined(CSCAN) + uint32 counter = 0; +#endif +#endif + + WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user)); + + if (!extra) { + WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name)); + return -EINVAL; + } + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + ci.scan_channel = dtoh32(ci.scan_channel); + if (ci.scan_channel) + return -EAGAIN; + +#if !defined(CSCAN) + if (g_ss_cache_ctrl.m_timer_expired) { + wl_iw_free_ss_cache(); + g_ss_cache_ctrl.m_timer_expired ^= 1; + } + if ((!g_scan_specified_ssid && g_ss_cache_ctrl.m_prev_scan_mode) || + g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + + wl_iw_reset_ss_cache(); + } + g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid; + if (g_scan_specified_ssid) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + } + else { + g_ss_cache_ctrl.m_cons_br_scan_cnt++; + } +#endif + + + + if (g_scan_specified_ssid) { + + list = kmalloc(len, GFP_KERNEL); + if (!list) { + WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n", dev->name)); + g_scan_specified_ssid = 0; + return -ENOMEM; + } + } + + memset(list, 0, len); + list->buflen = htod32(len); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len))) { + WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name, __FUNCTION__, error)); + dwrq->length = len; + if (g_scan_specified_ssid) { + g_scan_specified_ssid = 0; + kfree(list); + } + return 0; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + if (g_scan_specified_ssid) { + g_scan_specified_ssid = 0; + kfree(list); + } + return -EINVAL; + } + +#if !defined(CSCAN) + if (g_scan_specified_ssid) { + + wl_iw_add_bss_to_ss_cache(list); + kfree(list); + } +#endif + +#if !defined(CSCAN) + DHD_OS_MUTEX_LOCK(&wl_cache_lock); +#if defined(WL_IW_USE_ISCAN) + if (g_scan_specified_ssid) + WL_TRACE(("%s: Specified scan APs from scan=%d\n", __FUNCTION__, list->count)); + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + counter += list_merge->count; + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra+len_ret, buflen_from_user -len_ret); + p_buf = p_buf->next; + } + WL_TRACE(("%s merged with total Bcast APs=%d\n", __FUNCTION__, counter)); +#else + list_merge = (wl_scan_results_t *) g_scan; + len_ret = (__u16) wl_iw_get_scan_prep(list_merge, info, extra, buflen_from_user); +#endif + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); + if (g_ss_cache_ctrl.m_link_down) { + + wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid); + } + + wl_iw_merge_scan_cache(info, extra+len_ret, buflen_from_user-len_ret, &merged_len); + len_ret += merged_len; + wl_iw_run_ss_cache_timer(0); + wl_iw_run_ss_cache_timer(1); +#else + + + if (g_scan_specified_ssid) { + WL_TRACE(("%s: Specified scan APs in the list =%d\n", __FUNCTION__, list->count)); + len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user); + kfree(list); + +#if defined(WL_IW_USE_ISCAN) + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list_merge = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, + extra+len_ret, buflen_from_user -len_ret); + p_buf = p_buf->next; + } +#else + list_merge = (wl_scan_results_t *) g_scan; + WL_TRACE(("%s: Bcast APs list=%d\n", __FUNCTION__, list_merge->count)); + if (list_merge->count > 0) + len_ret += (__u16) wl_iw_get_scan_prep(list_merge, info, extra+len_ret, + buflen_from_user -len_ret); +#endif + } + else { + list = (wl_scan_results_t *) g_scan; + len_ret = (__u16) wl_iw_get_scan_prep(list, info, extra, buflen_from_user); + } +#endif + +#if defined(WL_IW_USE_ISCAN) + + g_scan_specified_ssid = 0; +#endif + + if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user) + len = len_ret; + + dwrq->length = len; + dwrq->flags = 0; + + WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, list->count)); + return 0; +} +#endif + +#if defined(WL_IW_USE_ISCAN) +static int +wl_iw_iscan_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int ii, j; + int apcnt; + char *event = extra, *end = extra + dwrq->length, *value; + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; + uint32 counter = 0; + uint8 channel; +#if !defined(CSCAN) + __u16 merged_len = 0; + uint buflen_from_user = dwrq->length; +#endif + + WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __FUNCTION__, dwrq->length)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return -EINVAL; + } +#endif + + if (!extra) { + WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n", dev->name)); + return -EINVAL; + } + +#if defined(CONFIG_FIRST_SCAN) + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_READY) { + WL_TRACE(("%s %s: first ISCAN results are NOT ready yet \n", + dev->name, __FUNCTION__)); + return -EAGAIN; + } +#endif + + if ((!iscan) || (iscan->tsk_ctl.thr_pid < 0)) { + WL_ERROR(("%ssysioc_pid\n", __FUNCTION__)); + return EAGAIN; + } + + + +#if !defined(CSCAN) + if (g_ss_cache_ctrl.m_timer_expired) { + wl_iw_free_ss_cache(); + g_ss_cache_ctrl.m_timer_expired ^= 1; + } + if (g_scan_specified_ssid) { + return wl_iw_get_scan(dev, info, dwrq, extra); + } + else { + if (g_ss_cache_ctrl.m_link_down) { + + wl_iw_delete_bss_from_ss_cache(g_ss_cache_ctrl.m_active_bssid); + } + if (g_ss_cache_ctrl.m_prev_scan_mode || g_ss_cache_ctrl.m_cons_br_scan_cnt > 4) { + g_ss_cache_ctrl.m_cons_br_scan_cnt = 0; + + wl_iw_reset_ss_cache(); + } + g_ss_cache_ctrl.m_prev_scan_mode = g_scan_specified_ssid; + g_ss_cache_ctrl.m_cons_br_scan_cnt++; + } +#endif + + WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name)); + apcnt = 0; + p_buf = iscan->list_hdr; + + while (p_buf != iscan->list_cur) { + list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + + counter += list->count; + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n", + __FUNCTION__, list->version)); + return -EINVAL; + } + + bi = NULL; + for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { + bi = (bi ? + (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : + list->bss_info); + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + + if (event + ETHER_ADDR_LEN + bi->SSID_len + + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >= end) + return -E2BIG; + + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, + &iwe, IW_EV_UINT_LEN); + } + + + iwe.cmd = SIOCGIWFREQ; + channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch; + iwe.u.freq.m = wf_channel2mhz(channel, + channel <= CH_MAX_2G_CHANNEL ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + + if (bi->rateset.count) { + if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) + return -E2BIG; + + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = + (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + p_buf = p_buf->next; + } + + dwrq->length = event - extra; + dwrq->flags = 0; + +#if !defined(CSCAN) + + wl_iw_merge_scan_cache(info, event, buflen_from_user - dwrq->length, &merged_len); + dwrq->length += merged_len; + wl_iw_run_ss_cache_timer(0); + wl_iw_run_ss_cache_timer(1); +#endif + +#if defined(CONFIG_FIRST_SCAN) + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; +#endif + + WL_TRACE(("%s return to WE %d bytes APs=%d\n", __FUNCTION__, dwrq->length, counter)); + + return 0; +} +#endif + +#define WL_JOIN_PARAMS_MAX 1600 +#ifdef CONFIG_PRESCANNED +static int +check_prescan(wl_join_params_t *join_params, int *join_params_size) +{ + int cnt = 0; + int indx = 0; + wl_iw_ss_cache_t *node = NULL; + wl_bss_info_t *bi = NULL; + iscan_info_t *iscan = g_iscan; + iscan_buf_t * buf; + wl_scan_results_t *list; + char *destbuf; + + buf = iscan->list_hdr; + + while (buf) { + list = &((wl_iscan_results_t*)buf->iscan_buf)->results; + bi = NULL; + for (indx = 0; indx < list->count; indx++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) + : list->bss_info; + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + if ((dtoh32(bi->SSID_len) != join_params->ssid.SSID_len) || + memcmp(bi->SSID, join_params->ssid.SSID, + join_params->ssid.SSID_len)) + continue; + memcpy(&join_params->params.chanspec_list[cnt], + &bi->chanspec, sizeof(chanspec_t)); + WL_ERROR(("iscan : chanspec :%d, count %d \n", bi->chanspec, cnt)); + cnt++; + } + buf = buf->next; + } + + if (!cnt) { + MUTEX_LOCK_WL_SCAN_SET(); + node = g_ss_cache_ctrl.m_cache_head; + for (; node; ) { + if (!memcmp(&node->bss_info->SSID, join_params->ssid.SSID, + join_params->ssid.SSID_len)) { + memcpy(&join_params->params.chanspec_list[cnt], + &node->bss_info->chanspec, sizeof(chanspec_t)); + WL_ERROR(("cache_scan : chanspec :%d, count %d \n", + (int)node->bss_info->chanspec, cnt)); + cnt++; + } + node = node->next; + } + MUTEX_UNLOCK_WL_SCAN_SET(); + } + + if (!cnt) { + return 0; + } + + destbuf = (char *)&join_params->params.chanspec_list[cnt]; + *join_params_size = destbuf - (char*)join_params; + join_params->ssid.SSID_len = htod32(g_ssid.SSID_len); + memcpy(&(join_params->params.bssid), ðer_bcast, ETHER_ADDR_LEN); + join_params->params.chanspec_num = htod32(cnt); + + if ((*join_params_size) > WL_JOIN_PARAMS_MAX) { + WL_ERROR(("can't fit bssids for all %d APs found\n", cnt)); + kfree(join_params); + return 0; + } + + WL_ERROR(("Passing %d channel/bssid pairs.\n", cnt)); + return cnt; +} +#endif + +static int +wl_iw_set_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + int error; + wl_join_params_t *join_params; + int join_params_size; + + WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); + + RETURN_IF_EXTRA_NULL(extra); + +#ifdef OEM_CHROMIUMOS + if (g_set_essid_before_scan) + return -EAGAIN; +#endif + if (!(join_params = kmalloc(WL_JOIN_PARAMS_MAX, GFP_KERNEL))) { + WL_ERROR(("allocation failed for join_params size is %d\n", WL_JOIN_PARAMS_MAX)); + return -ENOMEM; + } + + memset(join_params, 0, WL_JOIN_PARAMS_MAX); + + + memset(&g_ssid, 0, sizeof(g_ssid)); + + if (dwrq->length && extra) { +#if WIRELESS_EXT > 20 + g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length); +#else + g_ssid.SSID_len = MIN(sizeof(g_ssid.SSID), dwrq->length-1); +#endif + memcpy(g_ssid.SSID, extra, g_ssid.SSID_len); + +#ifdef CONFIG_PRESCANNED + memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); + join_params->ssid.SSID_len = g_ssid.SSID_len; + + if (check_prescan(join_params, &join_params_size)) { + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, + join_params, join_params_size))) { + WL_ERROR(("Invalid ioctl data=%d\n", error)); + kfree(join_params); + return error; + } + kfree(join_params); + return 0; + } else { + WL_ERROR(("No matched found\n Trying to join to specific channel\n")); + } +#endif + } else { + + g_ssid.SSID_len = 0; + } + g_ssid.SSID_len = htod32(g_ssid.SSID_len); + + + memset(join_params, 0, sizeof(*join_params)); + join_params_size = sizeof(join_params->ssid); + + memcpy(join_params->ssid.SSID, g_ssid.SSID, g_ssid.SSID_len); + join_params->ssid.SSID_len = htod32(g_ssid.SSID_len); + memcpy(&(join_params->params.bssid), ðer_bcast, ETHER_ADDR_LEN); + + + + wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, join_params, &join_params_size); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, join_params, join_params_size))) { + WL_ERROR(("Invalid ioctl data=%d\n", error)); + return error; + } + + if (g_ssid.SSID_len) { + WL_ERROR(("%s: join SSID=%s ch=%d\n", __FUNCTION__, + g_ssid.SSID, g_wl_iw_params.target_channel)); + } + kfree(join_params); + return 0; +} + +static int +wl_iw_get_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { + WL_ERROR(("Error getting the SSID\n")); + return error; + } + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + + memcpy(extra, ssid.SSID, ssid.SSID_len); + + dwrq->length = ssid.SSID_len; + + dwrq->flags = 1; + + return 0; +} + +static int +wl_iw_set_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = NETDEV_PRIV(dev); + + WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + + if (dwrq->length > sizeof(iw->nickname)) + return -E2BIG; + + memcpy(iw->nickname, extra, dwrq->length); + iw->nickname[dwrq->length - 1] = '\0'; + + return 0; +} + +static int +wl_iw_get_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = NETDEV_PRIV(dev); + + WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + strcpy(extra, iw->nickname); + dwrq->length = strlen(extra) + 1; + + return 0; +} + +static int +wl_iw_set_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + wl_rateset_t rateset; + int error, rate, i, error_bg, error_a; + + WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + + rateset.count = dtoh32(rateset.count); + + if (vwrq->value < 0) { + + rate = rateset.rates[rateset.count - 1] & 0x7f; + } else if (vwrq->value < rateset.count) { + + rate = rateset.rates[vwrq->value] & 0x7f; + } else { + + rate = vwrq->value / 500000; + } + + if (vwrq->fixed) { + + error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); + error_a = dev_wlc_intvar_set(dev, "a_rate", rate); + + if (error_bg && error_a) + return (error_bg | error_a); + } else { + + + error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); + + error_a = dev_wlc_intvar_set(dev, "a_rate", 0); + + if (error_bg && error_a) + return (error_bg | error_a); + + + for (i = 0; i < rateset.count; i++) + if ((rateset.rates[i] & 0x7f) > rate) + break; + rateset.count = htod32(i); + + + if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) + return error; + } + + return 0; +} + +static int +wl_iw_get_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rate; + + WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) + return error; + rate = dtoh32(rate); + vwrq->value = rate * 500000; + + return 0; +} + +static int +wl_iw_set_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); + + if (vwrq->disabled) + rts = DOT11_DEFAULT_RTS_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) + return -EINVAL; + else + rts = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) + return error; + + return 0; +} + +static int +wl_iw_get_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) + return error; + + vwrq->value = rts; + vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, frag; + + WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); + + if (vwrq->disabled) + frag = DOT11_DEFAULT_FRAG_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) + return -EINVAL; + else + frag = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) + return error; + + return 0; +} + +static int +wl_iw_get_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, fragthreshold; + + WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) + return error; + + vwrq->value = fragthreshold; + vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable; + uint16 txpwrmw; + WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); + + + disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; + disable += WL_RADIO_SW_DISABLE << 16; + + disable = htod32(disable); + if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) + return error; + + + if (disable & WL_RADIO_SW_DISABLE) + return 0; + + + if (!(vwrq->flags & IW_TXPOW_MWATT)) + return -EINVAL; + + + if (vwrq->value < 0) + return 0; + + if (vwrq->value > 0xffff) txpwrmw = 0xffff; + else txpwrmw = (uint16)vwrq->value; + + + error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); + return error; +} + +static int +wl_iw_get_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable, txpwrdbm; + uint8 result; + + WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || + (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) + return error; + + disable = dtoh32(disable); + result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); + vwrq->value = (int32)bcm_qdbm_to_mw(result); + vwrq->fixed = 0; + vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; + vwrq->flags = IW_TXPOW_MWATT; + + return 0; +} + +#if WIRELESS_EXT > 10 +static int +wl_iw_set_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); + + + if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) + return -EINVAL; + + + if (vwrq->flags & IW_RETRY_LIMIT) { + + +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || + !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { +#else + if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { +#endif + lrl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) + return error; + } + + +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || + !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { +#else + if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { +#endif + srl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) + return error; + } + } + return 0; +} + +static int +wl_iw_get_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); + + vwrq->disabled = 0; + + + if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) + return -EINVAL; + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || + (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) + return error; + + lrl = dtoh32(lrl); + srl = dtoh32(srl); + + + if (vwrq->flags & IW_RETRY_MAX) { + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; + vwrq->value = lrl; + } else { + vwrq->flags = IW_RETRY_LIMIT; + vwrq->value = srl; + if (srl != lrl) + vwrq->flags |= IW_RETRY_MIN; + } + + return 0; +} +#endif + +static int +wl_iw_set_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec; + + WL_TRACE(("%s: SIOCSIWENCODE index %d, len %d, flags %04x (%s%s%s%s%s)\n", + dev->name, dwrq->flags & IW_ENCODE_INDEX, dwrq->length, dwrq->flags, + dwrq->flags & IW_ENCODE_NOKEY ? "NOKEY" : "", + dwrq->flags & IW_ENCODE_DISABLED ? " DISABLED" : "", + dwrq->flags & IW_ENCODE_RESTRICTED ? " RESTRICTED" : "", + dwrq->flags & IW_ENCODE_OPEN ? " OPEN" : "", + dwrq->flags & IW_ENCODE_TEMP ? " TEMP" : "")); + + memset(&key, 0, sizeof(key)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + + if (key.index == DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + } else { + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + return -EINVAL; + } + + + if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { + + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + } else { + key.len = dwrq->length; + + if (dwrq->length > sizeof(key.data)) + return -EINVAL; + + memcpy(key.data, extra, dwrq->length); + + key.flags = WL_PRIMARY_KEY; + switch (key.len) { + case WEP1_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WEP128_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP128; + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + case TKIP_KEY_SIZE: + key.algo = CRYPTO_ALGO_TKIP; + break; +#endif + case AES_KEY_SIZE: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + return -EINVAL; + } + + + swap_key_from_BE(&key); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) + return error; + } + + + val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; + + if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) + return error; + + wsec &= ~(WEP_ENABLED); + wsec |= val; + + if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) + return error; + + + val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; + val = htod32(val); + if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) + return error; + + return 0; +} + +static int +wl_iw_get_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec, auth; + + WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); + + + bzero(&key, sizeof(wl_wsec_key_t)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = key.index; + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + } else + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + + + + if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) + return error; + + swap_key_to_BE(&key); + + wsec = dtoh32(wsec); + auth = dtoh32(auth); + + dwrq->length = MIN(DOT11_MAX_KEY_SIZE, key.len); + + + dwrq->flags = key.index + 1; + if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { + + dwrq->flags |= IW_ENCODE_DISABLED; + } + if (auth) { + + dwrq->flags |= IW_ENCODE_RESTRICTED; + } + + + if (dwrq->length && extra) + memcpy(extra, key.data, dwrq->length); + + return 0; +} + +static int +wl_iw_set_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); + + pm = vwrq->disabled ? PM_OFF : PM_MAX; + + pm = htod32(pm); + if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) + return error; + + return 0; +} + +static int +wl_iw_get_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) + return error; + + pm = dtoh32(pm); + vwrq->disabled = pm ? 0 : 1; + vwrq->flags = IW_POWER_ALL_R; + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_set_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + + WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name)); + + RETURN_IF_EXTRA_NULL(extra); + +#ifdef DHD_DEBUG + { + int i; + + for (i = 0; i < iwp->length; i++) + WL_TRACE(("%02X ", extra[i])); + WL_TRACE(("\n")); + } +#endif + + dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); + + return 0; +} + +static int +wl_iw_get_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); + iwp->length = 64; + dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); + return 0; +} + +static int +wl_iw_set_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error; + struct iw_encode_ext *iwe; + + WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); + + RETURN_IF_EXTRA_NULL(extra); + + memset(&key, 0, sizeof(key)); + iwe = (struct iw_encode_ext *)extra; + + + if (dwrq->flags & IW_ENCODE_DISABLED) { + + } + + + key.index = 0; + if (dwrq->flags & IW_ENCODE_INDEX) + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + key.len = iwe->key_len; + + + if (!ETHER_ISMULTI(iwe->addr.sa_data)) + bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); + + + if (key.len == 0) { + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("Changing the the primary Key to %d\n", key.index)); + + key.index = htod32(key.index); + error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, + &key.index, sizeof(key.index)); + if (error) + return error; + } + + else { + swap_key_from_BE(&key); + dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + } + } + else { + if (iwe->key_len > sizeof(key.data)) + return -EINVAL; + + WL_WSEC(("Setting the key index %d\n", key.index)); + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("key is a Primary Key\n")); + key.flags = WL_PRIMARY_KEY; + } + + bcopy((void *)iwe->key, key.data, iwe->key_len); + + if (iwe->alg == IW_ENCODE_ALG_TKIP) { + uint8 keybuf[8]; + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + + + if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { + uchar *ivptr; + ivptr = (uchar *)iwe->rx_seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = TRUE; + } + + switch (iwe->alg) { + case IW_ENCODE_ALG_NONE: + key.algo = CRYPTO_ALGO_OFF; + break; + case IW_ENCODE_ALG_WEP: + if (iwe->key_len == WEP1_KEY_SIZE) + key.algo = CRYPTO_ALGO_WEP1; + else + key.algo = CRYPTO_ALGO_WEP128; + break; + case IW_ENCODE_ALG_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + break; + case IW_ENCODE_ALG_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + break; + } + swap_key_from_BE(&key); + + dhd_wait_pend8021x(dev); + + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + return 0; +} + +#if WIRELESS_EXT > 17 +struct { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID-1]; +} pmkid_list; + +static int +wl_iw_set_pmksa( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + struct iw_pmksa *iwpmksa; + uint i; + int ret = 0; + char eabuf[ETHER_ADDR_STR_LEN]; + pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid; + + WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name)); + + RETURN_IF_EXTRA_NULL(extra); + + iwpmksa = (struct iw_pmksa *)extra; + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + + if (iwpmksa->cmd == IW_PMKSA_FLUSH) { + WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); + bzero((char *)&pmkid_list, sizeof(pmkid_list)); + } + + else if (iwpmksa->cmd == IW_PMKSA_REMOVE) { + { + pmkid_list_t pmkid, *pmkidptr; + uint j; + pmkidptr = &pmkid; + + bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, + ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); + + WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", + bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j])); + WL_WSEC(("\n")); + } + + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, + ETHER_ADDR_LEN)) + break; + + if ((pmkid_list.pmkids.npmkid > 0) && (i < pmkid_list.pmkids.npmkid)) { + bzero(&pmkid_array[i], sizeof(pmkid_t)); + for (; i < (pmkid_list.pmkids.npmkid - 1); i++) { + bcopy(&pmkid_array[i+1].BSSID, + &pmkid_array[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&pmkid_array[i+1].PMKID, + &pmkid_array[i].PMKID, + WPA2_PMKID_LEN); + } + pmkid_list.pmkids.npmkid--; + } + else + ret = -EINVAL; + } + + else if (iwpmksa->cmd == IW_PMKSA_ADD) { + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, + ETHER_ADDR_LEN)) + break; + if (i < MAXPMKID) { + bcopy(&iwpmksa->bssid.sa_data[0], + &pmkid_array[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkid_array[i].PMKID, + WPA2_PMKID_LEN); + if (i == pmkid_list.pmkids.npmkid) + pmkid_list.pmkids.npmkid++; + } + else + ret = -EINVAL; + + { + uint j; + uint k; + k = pmkid_list.pmkids.npmkid; + WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", + bcm_ether_ntoa(&pmkid_array[k].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkid_array[k].PMKID[j])); + WL_WSEC(("\n")); + } + } + WL_WSEC(("PRINTING pmkid LIST - No of elements %d", pmkid_list.pmkids.npmkid)); + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { + uint j; + WL_WSEC(("\nPMKID[%d]: %s = ", i, + bcm_ether_ntoa(&pmkid_array[i].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_WSEC(("%02x ", pmkid_array[i].PMKID[j])); + } + WL_WSEC(("\n")); + + if (!ret) + ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, + sizeof(pmkid_list)); + return ret; +} +#endif + +static int +wl_iw_get_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); + return 0; +} + + +static uint32 +wl_iw_create_wpaauth_wsec(struct net_device *dev) +{ + wl_iw_t *iw = NETDEV_PRIV(dev); + uint32 wsec; + + + if (iw->pcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) + wsec = WEP_ENABLED; + else if (iw->pcipher & IW_AUTH_CIPHER_TKIP) + wsec = TKIP_ENABLED; + else if (iw->pcipher & IW_AUTH_CIPHER_CCMP) + wsec = AES_ENABLED; + else + wsec = 0; + + + if (iw->gcipher & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) + wsec |= WEP_ENABLED; + else if (iw->gcipher & IW_AUTH_CIPHER_TKIP) + wsec |= TKIP_ENABLED; + else if (iw->gcipher & IW_AUTH_CIPHER_CCMP) + wsec |= AES_ENABLED; + + + if (wsec == 0 && iw->privacy_invoked) + wsec = WEP_ENABLED; + + WL_INFORM(("%s: returning wsec of %d\n", __FUNCTION__, wsec)); + + return wsec; +} + +static int +wl_iw_set_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error = 0; + int paramid; + int paramval; + int val = 0; + wl_iw_t *iw = NETDEV_PRIV(dev); + + paramid = vwrq->flags & IW_AUTH_INDEX; + paramval = vwrq->value; + + WL_TRACE(("%s: SIOCSIWAUTH, %s(%d), paramval = 0x%0x\n", + dev->name, + paramid == IW_AUTH_WPA_VERSION ? "IW_AUTH_WPA_VERSION" : + paramid == IW_AUTH_CIPHER_PAIRWISE ? "IW_AUTH_CIPHER_PAIRWISE" : + paramid == IW_AUTH_CIPHER_GROUP ? "IW_AUTH_CIPHER_GROUP" : + paramid == IW_AUTH_KEY_MGMT ? "IW_AUTH_KEY_MGMT" : + paramid == IW_AUTH_TKIP_COUNTERMEASURES ? "IW_AUTH_TKIP_COUNTERMEASURES" : + paramid == IW_AUTH_DROP_UNENCRYPTED ? "IW_AUTH_DROP_UNENCRYPTED" : + paramid == IW_AUTH_80211_AUTH_ALG ? "IW_AUTH_80211_AUTH_ALG" : + paramid == IW_AUTH_WPA_ENABLED ? "IW_AUTH_WPA_ENABLED" : + paramid == IW_AUTH_RX_UNENCRYPTED_EAPOL ? "IW_AUTH_RX_UNENCRYPTED_EAPOL" : + paramid == IW_AUTH_ROAMING_CONTROL ? "IW_AUTH_ROAMING_CONTROL" : + paramid == IW_AUTH_PRIVACY_INVOKED ? "IW_AUTH_PRIVACY_INVOKED" : + "UNKNOWN", + paramid, paramval)); + +#if defined(SOFTAP) + if (ap_cfg_running) { + WL_TRACE(("%s: Not executed, reason -'SOFTAP is active'\n", __FUNCTION__)); + return 0; + } +#endif + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + + if (paramval & IW_AUTH_WPA_VERSION_DISABLED) + val = WPA_AUTH_DISABLED; + else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; + else if (paramval & IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; + WL_ERROR(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + iw->pcipher = paramval; + val = wl_iw_create_wpaauth_wsec(dev); + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + break; + + case IW_AUTH_CIPHER_GROUP: + iw->gcipher = paramval; + val = wl_iw_create_wpaauth_wsec(dev); + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + break; + + case IW_AUTH_KEY_MGMT: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + if (paramval & IW_AUTH_KEY_MGMT_PSK) + val = WPA_AUTH_PSK; + else + val = WPA_AUTH_UNSPECIFIED; + if (paramval & 0x04) + val |= WPA2_AUTH_FT; + } + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + if (paramval & IW_AUTH_KEY_MGMT_PSK) + val = WPA2_AUTH_PSK; + else + val = WPA2_AUTH_UNSPECIFIED; + if (paramval & 0x04) + val |= WPA2_AUTH_FT; + } + + else if (paramval & IW_AUTH_KEY_MGMT_PSK) { + if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA) + val = WPA_AUTH_PSK; + else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_PSK; + else + val = WPA_AUTH_DISABLED; + } else if (paramval & IW_AUTH_KEY_MGMT_802_1X) { + if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA) + val = WPA_AUTH_UNSPECIFIED; + else if (iw->wpaversion == IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_UNSPECIFIED; + else + val = WPA_AUTH_DISABLED; + } + else + val = WPA_AUTH_DISABLED; + + WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + + WL_INFORM(("Setting the D11auth %d\n", paramval)); + if (paramval == IW_AUTH_ALG_OPEN_SYSTEM) + val = 0; + else if (paramval == IW_AUTH_ALG_SHARED_KEY) + val = 1; + else if (paramval == (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY)) + val = 2; + else + error = 1; + if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) + return error; + break; + + case IW_AUTH_WPA_ENABLED: + if (paramval == 0) { + iw->privacy_invoked = 0; + iw->pcipher = 0; + iw->gcipher = 0; + val = wl_iw_create_wpaauth_wsec(dev); + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + WL_INFORM(("%s: %d: setting wpa_auth to %d, wsec to %d\n", + __FUNCTION__, __LINE__, paramval, val)); + dev_wlc_intvar_set(dev, "wpa_auth", paramval); + return error; + } + + + break; + + case IW_AUTH_DROP_UNENCRYPTED: + if ((error = dev_wlc_intvar_set(dev, "wsec_restrict", paramval))) + return error; + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + +#if WIRELESS_EXT > 17 + case IW_AUTH_ROAMING_CONTROL: + WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + + break; + + case IW_AUTH_PRIVACY_INVOKED: + iw->privacy_invoked = paramval; + val = wl_iw_create_wpaauth_wsec(dev); + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + break; + +#endif + default: + break; + } + return 0; +} +#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) + +static int +wl_iw_get_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error; + int paramid; + int paramval = 0; + int val; + wl_iw_t *iw = NETDEV_PRIV(dev); + + WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + paramval = iw->wpaversion; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + paramval = iw->pcipher; + break; + + case IW_AUTH_CIPHER_GROUP: + paramval = iw->gcipher; + break; + + case IW_AUTH_KEY_MGMT: + + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (VAL_PSK(val)) + paramval = IW_AUTH_KEY_MGMT_PSK; + else + paramval = IW_AUTH_KEY_MGMT_802_1X; + + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_intvar_get(dev, "wsec_restrict", ¶mval); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + + if ((error = dev_wlc_intvar_get(dev, "auth", &val))) + return error; + if (!val) + paramval = IW_AUTH_ALG_OPEN_SYSTEM; + else + paramval = IW_AUTH_ALG_SHARED_KEY; + break; + case IW_AUTH_WPA_ENABLED: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val) + paramval = TRUE; + else + paramval = FALSE; + break; +#if WIRELESS_EXT > 17 + case IW_AUTH_ROAMING_CONTROL: + WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + + break; + case IW_AUTH_PRIVACY_INVOKED: + paramval = iw->privacy_invoked; + break; + +#endif + } + vwrq->value = paramval; + return 0; +} +#endif + + +#ifdef SOFTAP + +static int ap_macmode = MACLIST_MODE_DISABLED; +static struct mflist ap_black_list; + +static int +wl_iw_parse_wep(char *keystr, wl_wsec_key_t *key) +{ + char hex[] = "XX"; + unsigned char *data = key->data; + + switch (strlen(keystr)) { + case 5: + case 13: + case 16: + key->len = strlen(keystr); + memcpy(data, keystr, key->len + 1); + break; + case 12: + case 28: + case 34: + case 66: + + if (!strnicmp(keystr, "0x", 2)) + keystr += 2; + else + return -1; + + case 10: + case 26: + case 32: + case 64: + key->len = strlen(keystr) / 2; + while (*keystr) { + strncpy(hex, keystr, 2); + *data++ = (char) bcm_strtoul(hex, NULL, 16); + keystr += 2; + } + break; + default: + return -1; + } + + switch (key->len) { + case 5: + key->algo = CRYPTO_ALGO_WEP1; + break; + case 13: + key->algo = CRYPTO_ALGO_WEP128; + break; + case 16: + + key->algo = CRYPTO_ALGO_AES_CCM; + break; + case 32: + key->algo = CRYPTO_ALGO_TKIP; + break; + default: + return -1; + } + + + key->flags |= WL_PRIMARY_KEY; + + return 0; +} + +#ifdef EXT_WPA_CRYPTO +#define SHA1HashSize 20 +extern void pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len, + int iterations, u8 *buf, size_t buflen); + +#else + +#define SHA1HashSize 20 +static int +pbkdf2_sha1(const char *passphrase, const char *ssid, size_t ssid_len, + int iterations, u8 *buf, size_t buflen) +{ + WL_ERROR(("WARNING: %s is not implemented !!!\n", __FUNCTION__)); + return -1; +} + +#endif + + +static int +dev_iw_write_cfg1_bss_var(struct net_device *dev, int val) +{ + struct { + int cfg; + int val; + } bss_setbuf; + + int bss_set_res; + char smbuf[WLC_IOCTL_SMLEN]; + memset(smbuf, 0, sizeof(smbuf)); + + bss_setbuf.cfg = 1; + bss_setbuf.val = val; + + bss_set_res = dev_iw_iovar_setbuf(dev, "bss", + &bss_setbuf, sizeof(bss_setbuf), smbuf, sizeof(smbuf)); + WL_TRACE(("%s: bss_set_result:%d set with %d\n", __FUNCTION__, bss_set_res, val)); + + return bss_set_res; +} + + + +#ifndef AP_ONLY +static int +wl_bssiovar_mkbuf( + const char *iovar, + int bssidx, + void *param, + int paramlen, + void *bufptr, + int buflen, + int *perr) +{ + const char *prefix = "bsscfg:"; + int8* p; + uint prefixlen; + uint namelen; + uint iolen; + + prefixlen = strlen(prefix); + namelen = strlen(iovar) + 1; + iolen = prefixlen + namelen + sizeof(int) + paramlen; + + + if (buflen < 0 || iolen > (uint)buflen) { + *perr = BCME_BUFTOOSHORT; + return 0; + } + + p = (int8*)bufptr; + + + memcpy(p, prefix, prefixlen); + p += prefixlen; + + + memcpy(p, iovar, namelen); + p += namelen; + + + bssidx = htod32(bssidx); + memcpy(p, &bssidx, sizeof(int32)); + p += sizeof(int32); + + + if (paramlen) + memcpy(p, param, paramlen); + + *perr = 0; + return iolen; +} +#endif + + + + +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) + + +#if defined(CSCAN) + + + +static int +wl_iw_combined_scan_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, int nchan) +{ + int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16); + int err = 0; + char *p; + int i; + iscan_info_t *iscan = g_iscan; + + WL_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, nchan)); + + if ((!dev) && (!g_iscan) && (!iscan->iscan_ex_params_p)) { + WL_ERROR(("%s error exit\n", __FUNCTION__)); + err = -1; + goto exit; + } + +#ifdef PNO_SUPPORT + + if (dhd_dev_get_pno_status(dev)) { + WL_ERROR(("%s: Scan called when PNO is active\n", __FUNCTION__)); + } +#endif + + params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t); + + + if (nssid > 0) { + i = OFFSETOF(wl_scan_params_t, channel_list) + nchan * sizeof(uint16); + i = ROUNDUP(i, sizeof(uint32)); + if (i + nssid * sizeof(wlc_ssid_t) > params_size) { + printf("additional ssids exceed params_size\n"); + err = -1; + goto exit; + } + + p = ((char*)&iscan->iscan_ex_params_p->params) + i; + memcpy(p, ssids_local, nssid * sizeof(wlc_ssid_t)); + p += nssid * sizeof(wlc_ssid_t); + } else { + p = (char*)iscan->iscan_ex_params_p->params.channel_list + nchan * sizeof(uint16); + } + + + iscan->iscan_ex_params_p->params.channel_num = + htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) | + (nchan & WL_SCAN_PARAMS_COUNT_MASK)); + + nssid = (uint) + ((iscan->iscan_ex_params_p->params.channel_num >> WL_SCAN_PARAMS_NSSID_SHIFT) & + WL_SCAN_PARAMS_COUNT_MASK); + + + params_size = (int) (p - (char*)iscan->iscan_ex_params_p + nssid * sizeof(wlc_ssid_t)); + iscan->iscan_ex_param_size = params_size; + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + wl_iw_set_event_mask(dev); + mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms)); + + iscan->timer_on = 1; + +#ifdef SCAN_DUMP + { + int i; + WL_SCAN(("\n### List of SSIDs to scan ###\n")); + for (i = 0; i < nssid; i++) { + if (!ssids_local[i].SSID_len) + WL_SCAN(("%d: Broadcast scan\n", i)); + else + WL_SCAN(("%d: scan for %s size =%d\n", i, + ssids_local[i].SSID, ssids_local[i].SSID_len)); + } + WL_SCAN(("### List of channels to scan ###\n")); + for (i = 0; i < nchan; i++) + { + WL_SCAN(("%d ", iscan->iscan_ex_params_p->params.channel_list[i])); + } + WL_SCAN(("\nnprobes=%d\n", iscan->iscan_ex_params_p->params.nprobes)); + WL_SCAN(("active_time=%d\n", iscan->iscan_ex_params_p->params.active_time)); + WL_SCAN(("passive_time=%d\n", iscan->iscan_ex_params_p->params.passive_time)); + WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time)); + WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type)); + WL_SCAN(("\n###################\n")); + } +#endif + + if (params_size > WLC_IOCTL_MEDLEN) { + WL_ERROR(("Set ISCAN for %s due to params_size=%d \n", + __FUNCTION__, params_size)); + err = -1; + } + + if ((err = dev_iw_iovar_setbuf(dev, "iscan", iscan->iscan_ex_params_p, + iscan->iscan_ex_param_size, + iscan->ioctlbuf, sizeof(iscan->ioctlbuf)))) { + WL_TRACE(("Set ISCAN for %s failed with %d\n", __FUNCTION__, err)); + err = -1; + } + +exit: + return err; +} + + +static int +iwpriv_set_cscan(struct net_device *dev, struct iw_request_info *info, + union iwreq_data *wrqu, char *ext) +{ + int res; + char *extra = NULL; + iscan_info_t *iscan = g_iscan; + wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX]; + int nssid = 0; + int nchan = 0; + char *str_ptr; + + WL_TRACE(("%s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + return -ENODEV; + } + + if (wrqu->data.length == 0) { + WL_ERROR(("IWPRIV argument len = 0\n")); + return -EINVAL; + } + + if (!iscan->iscan_ex_params_p) { + return -EFAULT; + } + + if (!(extra = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + res = -EFAULT; + goto exit_proc; + } + + extra[wrqu->data.length] = 0; + WL_ERROR(("Got str param in iw_point:\n %s\n", extra)); + + str_ptr = extra; + + + if (strncmp(str_ptr, GET_SSID, strlen(GET_SSID))) { + WL_ERROR(("%s Error: extracting SSID='' string\n", __FUNCTION__)); + res = -EINVAL; + goto exit_proc; + } + + str_ptr += strlen(GET_SSID); + nssid = wl_iw_parse_ssid_list(&str_ptr, ssids_local, nssid, + WL_SCAN_PARAMS_SSID_MAX); + if (nssid == -1) { + WL_ERROR(("%s wrong ssid list", __FUNCTION__)); + res = -EINVAL; + goto exit_proc; + } + + memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size); + ASSERT(iscan->iscan_ex_param_size < WLC_IOCTL_MAXLEN); + + + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL); + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + + if ((nchan = wl_iw_parse_channel_list(&str_ptr, + &iscan->iscan_ex_params_p->params.channel_list[0], + WL_NUMCHANNELS)) == -1) { + WL_ERROR(("%s missing channel list\n", __FUNCTION__)); + res = -EINVAL; + goto exit_proc; + } + + + get_parameter_from_string(&str_ptr, + GET_NPROBE, PTYPE_INTDEC, + &iscan->iscan_ex_params_p->params.nprobes, 2); + + get_parameter_from_string(&str_ptr, GET_ACTIVE_ASSOC_DWELL, PTYPE_INTDEC, + &iscan->iscan_ex_params_p->params.active_time, 4); + + get_parameter_from_string(&str_ptr, GET_PASSIVE_ASSOC_DWELL, PTYPE_INTDEC, + &iscan->iscan_ex_params_p->params.passive_time, 4); + + get_parameter_from_string(&str_ptr, GET_HOME_DWELL, PTYPE_INTDEC, + &iscan->iscan_ex_params_p->params.home_time, 4); + + get_parameter_from_string(&str_ptr, GET_SCAN_TYPE, PTYPE_INTDEC, + &iscan->iscan_ex_params_p->params.scan_type, 1); + + + res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan); + +exit_proc: + kfree(extra); + + return res; +} + + +static int +wl_iw_set_cscan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int res = -1; + iscan_info_t *iscan = g_iscan; + wlc_ssid_t ssids_local[WL_SCAN_PARAMS_SSID_MAX]; + int nssid = 0; + int nchan = 0; + cscan_tlv_t *cscan_tlv_temp; + char type; + char *str_ptr; + int tlv_size_left; +#ifdef TLV_DEBUG + int i; + char tlv_in_example[] = { + 'C', 'S', 'C', 'A', 'N', ' ', + 0x53, 0x01, 0x00, 0x00, + 'S', + 0x00, + 'S', + 0x04, + 'B', 'R', 'C', 'M', + 'C', + 0x06, + 'P', + 0x94, + 0x11, + 'T', + 0x01 + }; +#endif + + WL_TRACE(("\n### %s: info->cmd:%x, info->flags:%x, u.data=0x%p, u.len=%d\n", + __FUNCTION__, info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + net_os_wake_lock(dev); + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s: driver is not up yet after START\n", __FUNCTION__)); + return -1; + } + + if (wrqu->data.length < (strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t))) { + WL_ERROR(("%s argument=%d less %d\n", __FUNCTION__, + wrqu->data.length, (int)(strlen(CSCAN_COMMAND) + sizeof(cscan_tlv_t)))); + return -1; + } + +#ifdef TLV_DEBUG + memcpy(extra, tlv_in_example, sizeof(tlv_in_example)); + wrqu->data.length = sizeof(tlv_in_example); + for (i = 0; i < wrqu->data.length; i++) + printf("%02X ", extra[i]); + printf("\n"); +#endif + + str_ptr = extra; + str_ptr += strlen(CSCAN_COMMAND); + tlv_size_left = wrqu->data.length - strlen(CSCAN_COMMAND); + + cscan_tlv_temp = (cscan_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cscan_tlv_temp->prefix == CSCAN_TLV_PREFIX) && + (cscan_tlv_temp->version == CSCAN_TLV_VERSION) && + (cscan_tlv_temp->subver == CSCAN_TLV_SUBVERSION)) + { + str_ptr += sizeof(cscan_tlv_t); + tlv_size_left -= sizeof(cscan_tlv_t); + + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + WL_SCAN_PARAMS_SSID_MAX, &tlv_size_left)) <= 0) { + WL_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } + else { + + memset(iscan->iscan_ex_params_p, 0, iscan->iscan_ex_param_size); + + + wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, NULL); + iscan->iscan_ex_params_p->version = htod32(ISCAN_REQ_VERSION); + iscan->iscan_ex_params_p->action = htod16(WL_SCAN_ACTION_START); + iscan->iscan_ex_params_p->scan_duration = htod16(0); + + + while (tlv_size_left > 0) + { + type = str_ptr[0]; + switch (type) { + case CSCAN_TLV_TYPE_CHANNEL_IE: + + if ((nchan = wl_iw_parse_channel_list_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.channel_list[0], + WL_NUMCHANNELS, &tlv_size_left)) == -1) { + WL_ERROR(("%s missing channel list\n", + __FUNCTION__)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_NPROBE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.nprobes, + sizeof(iscan->iscan_ex_params_p->params.nprobes), + type, sizeof(char), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_ACTIVE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.active_time, + sizeof(iscan->iscan_ex_params_p->params.active_time), + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_PASSIVE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.passive_time, + sizeof(iscan->iscan_ex_params_p->params.passive_time), + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_HOME_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.home_time, + sizeof(iscan->iscan_ex_params_p->params.home_time), + type, sizeof(short), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", + __FUNCTION__, res)); + goto exit_proc; + } + break; + case CSCAN_TLV_TYPE_STYPE_IE: + if ((res = wl_iw_parse_data_tlv(&str_ptr, + &iscan->iscan_ex_params_p->params.scan_type, + sizeof(iscan->iscan_ex_params_p->params.scan_type), + type, sizeof(char), &tlv_size_left)) == -1) { + WL_ERROR(("%s return %d\n", + __FUNCTION__, res)); + goto exit_proc; + } + break; + + default : + WL_ERROR(("%s get unkwown type %X\n", + __FUNCTION__, type)); + goto exit_proc; + break; + } + } + } + } + else { + WL_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + +#if defined(CONFIG_FIRST_SCAN) + if (g_first_broadcast_scan < BROADCAST_SCAN_FIRST_RESULT_CONSUMED) { + if (++g_first_counter_scans == MAX_ALLOWED_BLOCK_SCAN_FROM_FIRST_SCAN) { + + WL_ERROR(("%s Clean up First scan flag which is %d\n", + __FUNCTION__, g_first_broadcast_scan)); + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_RESULT_CONSUMED; + } + else { + WL_ERROR(("%s Ignoring CSCAN : First Scan is not done yet %d\n", + __FUNCTION__, g_first_counter_scans)); + return -EBUSY; + } + } +#endif + + + res = wl_iw_combined_scan_set(dev, ssids_local, nssid, nchan); + +exit_proc: + net_os_wake_unlock(dev); + return res; +} + +#endif + +#ifdef CONFIG_WPS2 +static int +wl_iw_del_wps_probe_req_ie( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int ret; + vndr_ie_setbuf_t *ie_delbuf; + + if (g_wps_probe_req_ie) { + ie_delbuf = (vndr_ie_setbuf_t *)(g_wps_probe_req_ie + strlen("vndr_ie ")); + strncpy(ie_delbuf->cmd, "del", 3); + ie_delbuf->cmd[3] = '\0'; + + ret = dev_wlc_ioctl(dev, WLC_SET_VAR, g_wps_probe_req_ie, g_wps_probe_req_ie_len); + if (ret) { + WL_ERROR(("ioctl failed %d \n", ret)); + } + + kfree(g_wps_probe_req_ie); + g_wps_probe_req_ie = NULL; + g_wps_probe_req_ie_len = 0; + } + + return 0; +} + +static int +wl_iw_add_wps_probe_req_ie( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + char *str_ptr = NULL; + char *bufptr = NULL; + uint buflen, datalen, iecount, pktflag, iolen, total_len; + int ret = 0; + vndr_ie_setbuf_t *ie_setbuf = NULL; + + if (!g_wps_probe_req_ie) { + ret = -1; + str_ptr = extra; + str_ptr += WPS_PROBE_REQ_IE_CMD_LENGTH; + datalen = wrqu->data.length - WPS_PROBE_REQ_IE_CMD_LENGTH; + + + + buflen = sizeof(vndr_ie_setbuf_t) + datalen - sizeof(vndr_ie_t); + ie_setbuf = (vndr_ie_setbuf_t *)kmalloc(buflen, GFP_KERNEL); + if (!ie_setbuf) { + WL_ERROR(("memory alloc failure ie_setbuf\n")); + return ret; + } + + memset(ie_setbuf, 0x00, buflen); + + + strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); + ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + + iecount = htod32(1); + memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int)); + + + pktflag = 0x10; + memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag, + &pktflag, sizeof(uint32)); + + memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data, + str_ptr, datalen); + + total_len = strlen("vndr_ie ") + buflen; + bufptr = (char *)kmalloc(total_len, GFP_KERNEL); + if (!bufptr) { + WL_ERROR(("memory alloc failure bufptr\n")); + goto fail; + } + + iolen = bcm_mkiovar("vndr_ie", (char *)ie_setbuf, buflen, bufptr, total_len); + if (iolen == 0) { + WL_ERROR(("Buffer length is illegal\n")); + goto fail2; + } + + ret = dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen); + if (ret) { + WL_ERROR(("ioctl failed\n")); + goto fail2; + } + + g_wps_probe_req_ie = (char *)kmalloc(iolen, GFP_KERNEL); + if (!g_wps_probe_req_ie) { + WL_ERROR(("memory alloc failure g_wps_probe_req_ie\n")); + goto fail2; + } + + memcpy(g_wps_probe_req_ie, bufptr, iolen); + g_wps_probe_req_ie_len = iolen; + } + +fail2: + if (bufptr) { + kfree(bufptr); + bufptr = NULL; + } +fail: + if (ie_setbuf) { + kfree(ie_setbuf); + ie_setbuf = NULL; + } + return ret; +} +#endif + + +#ifdef SOFTAP +#ifndef AP_ONLY + + +static int +thr_wait_for_2nd_eth_dev(void *data) +{ + wl_iw_t *iw; + int ret = 0; + unsigned long flags = 0; + + tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data; + struct net_device *dev = (struct net_device *)tsk_ctl->parent; + iw = *(wl_iw_t **)netdev_priv(dev); + + DAEMONIZE("wl0_eth_wthread"); + + + WL_SOFTAP(("\n>%s threda started:, PID:%x\n", __FUNCTION__, current->pid)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (!iw) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + tsk_ctl->thr_pid = -1; + complete(&tsk_ctl->completed); + return -1; + } + DHD_OS_WAKE_LOCK(iw->pub); + complete(&tsk_ctl->completed); + if (down_timeout(&tsk_ctl->sema, msecs_to_jiffies(1000)) != 0) { +#else + if (down_interruptible(&tsk_ctl->sema) != 0) { +#endif + WL_ERROR(("\n%s: sap_eth_sema timeout \n", __FUNCTION__)); + ret = -1; + goto fail; + } + + SMP_RD_BARRIER_DEPENDS(); + if (tsk_ctl->terminated) { + ret = -1; + goto fail; + } + + flags = dhd_os_spin_lock(iw->pub); + if (!ap_net_dev) { + WL_ERROR((" ap_net_dev is null !!!")); + ret = -1; + dhd_os_spin_unlock(iw->pub, flags); + goto fail; + } + + WL_SOFTAP(("\n>%s: Thread:'softap ethdev IF:%s is detected!'\n\n", + __FUNCTION__, ap_net_dev->name)); + + ap_cfg_running = TRUE; + + dhd_os_spin_unlock(iw->pub, flags); + bcm_mdelay(500); + + + wl_iw_send_priv_event(priv_dev, "AP_SET_CFG_OK"); + +fail: + + DHD_OS_WAKE_UNLOCK(iw->pub); + + WL_SOFTAP(("\n>%s, thread completed\n", __FUNCTION__)); + + complete_and_exit(&tsk_ctl->completed, 0); + return ret; +} +#endif +#ifndef AP_ONLY +static int last_auto_channel = 6; +#endif + +static int +get_softap_auto_channel(struct net_device *dev, struct ap_profile *ap) +{ + int chosen = 0; + wl_uint32_list_t request; + int retry = 0; + int updown = 0; + int ret = 0; + wlc_ssid_t null_ssid; + int res = 0; +#ifndef AP_ONLY + int iolen = 0; + int mkvar_err = 0; + int bsscfg_index = 1; + char buf[WLC_IOCTL_SMLEN]; +#endif + WL_SOFTAP(("Enter %s\n", __FUNCTION__)); + +#ifndef AP_ONLY + if (ap_cfg_running) { + ap->channel = last_auto_channel; + return res; + } +#endif + + memset(&null_ssid, 0, sizeof(wlc_ssid_t)); + res |= dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)); + +#ifdef AP_ONLY + res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid)); +#else + + iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&null_ssid), + null_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + res |= dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen); + +#endif + + request.count = htod32(0); + ret = dev_wlc_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request)); + if (ret < 0) { + WL_ERROR(("can't start auto channel scan\n")); + goto fail; + } + + get_channel_retry: + bcm_mdelay(350); + + ret = dev_wlc_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen)); + if (ret < 0 || dtoh32(chosen) == 0) { + if (retry++ < 15) { + goto get_channel_retry; + } else { + if (ret < 0) { + WL_ERROR(("can't get auto channel sel, err = %d, " + "chosen = 0x%04X\n", ret, (uint16)chosen)); + goto fail; + } else { + ap->channel = (uint16)last_auto_channel; + WL_ERROR(("auto channel sel timed out. we get channel %d\n", + ap->channel)); + } + } + } + + if (chosen) { + ap->channel = (uint16)chosen & 0x00FF; + WL_SOFTAP(("%s: Got auto channel = %d, attempt:%d\n", + __FUNCTION__, ap->channel, retry)); + } + + if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown))) < 0) { + WL_ERROR(("%s fail to set up err =%d\n", __FUNCTION__, res)); + goto fail; + } + +#ifndef AP_ONLY + if (!res || !ret) + last_auto_channel = ap->channel; +#endif + +fail : + if (ret < 0) { + WL_TRACE(("%s: return value %d\n", __FUNCTION__, ret)); + return ret; + } + return res; +} + + +static int +set_ap_cfg(struct net_device *dev, struct ap_profile *ap) +{ + int updown = 0; + int channel = 0; + + wlc_ssid_t ap_ssid; + int max_assoc = 8; + + int res = 0; + int apsta_var = 0; +#ifndef AP_ONLY + int mpc = 0; + int iolen = 0; + int mkvar_err = 0; + int bsscfg_index = 1; + char buf[WLC_IOCTL_SMLEN]; +#endif + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + net_os_wake_lock(dev); + DHD_OS_MUTEX_LOCK(&wl_softap_lock); + + WL_SOFTAP(("wl_iw: set ap profile:\n")); + WL_SOFTAP((" ssid = '%s'\n", ap->ssid)); + WL_SOFTAP((" security = '%s'\n", ap->sec)); + if (ap->key[0] != '\0') + WL_SOFTAP((" key = '%s'\n", ap->key)); + WL_SOFTAP((" channel = %d\n", ap->channel)); + WL_SOFTAP((" max scb = %d\n", ap->max_scb)); + +#ifdef AP_ONLY + if (ap_cfg_running) { + wl_iw_softap_deassoc_stations(dev, NULL); + ap_cfg_running = FALSE; + } +#endif + + + if (ap_cfg_running == FALSE) { + +#ifndef AP_ONLY + + + sema_init(&ap_eth_ctl.sema, 0); + + mpc = 0; + if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) { + WL_ERROR(("%s fail to set mpc\n", __FUNCTION__)); + goto fail; + } +#endif + + updown = 0; + if ((res = dev_wlc_ioctl(dev, WLC_DOWN, &updown, sizeof(updown)))) { + WL_ERROR(("%s fail to set updown\n", __FUNCTION__)); + goto fail; + } + +#ifdef AP_ONLY + + apsta_var = 0; + if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) { + WL_ERROR(("%s fail to set apsta_var 0\n", __FUNCTION__)); + goto fail; + } + apsta_var = 1; + if ((res = dev_wlc_ioctl(dev, WLC_SET_AP, &apsta_var, sizeof(apsta_var)))) { + WL_ERROR(("%s fail to set apsta_var 1\n", __FUNCTION__)); + goto fail; + } + res = dev_wlc_ioctl(dev, WLC_GET_AP, &apsta_var, sizeof(apsta_var)); +#else + + apsta_var = 1; + iolen = wl_bssiovar_mkbuf("apsta", + bsscfg_index, &apsta_var, sizeof(apsta_var)+4, + buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) { + WL_ERROR(("%s fail to set apsta \n", __FUNCTION__)); + goto fail; + } + WL_TRACE(("\n>in %s: apsta set result: %d \n", __FUNCTION__, res)); + + + mpc = 0; + if ((res = dev_wlc_intvar_set(dev, "mpc", mpc))) { + WL_ERROR(("%s fail to set mpc\n", __FUNCTION__)); + goto fail; + } + + +#endif + + updown = 1; + if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown))) < 0) { + WL_ERROR(("%s fail to set apsta \n", __FUNCTION__)); + goto fail; + } + + } else { + + if (!ap_net_dev) { + WL_ERROR(("%s: ap_net_dev is null\n", __FUNCTION__)); + goto fail; + } + + res = wl_iw_softap_deassoc_stations(ap_net_dev, NULL); + + + if ((res = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) { + WL_ERROR(("%s fail to set bss down\n", __FUNCTION__)); + goto fail; + } + } + + + if (strlen(ap->country_code)) { + WL_ERROR(("%s: Igonored: Country MUST be specified" + "COUNTRY command with \n", __FUNCTION__)); + } else { + WL_SOFTAP(("%s: Country code is not specified," + " will use Radio's default\n", + __FUNCTION__)); + + } + iolen = wl_bssiovar_mkbuf("closednet", + bsscfg_index, &ap->closednet, sizeof(ap->closednet)+4, + buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) < 0) { + WL_ERROR(("%s failed to set 'closednet'for apsta \n", __FUNCTION__)); + goto fail; + } + + + if ((ap->channel == 0) && (get_softap_auto_channel(dev, ap) < 0)) { + ap->channel = 1; + WL_ERROR(("%s auto channel failed, use channel=%d\n", + __FUNCTION__, ap->channel)); + } + + channel = ap->channel; + if ((res = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &channel, sizeof(channel)))) { + WL_ERROR(("%s fail to set channel\n", __FUNCTION__)); + } + + + if (ap_cfg_running == FALSE) { + updown = 0; + if ((res = dev_wlc_ioctl(dev, WLC_UP, &updown, sizeof(updown)))) { + WL_ERROR(("%s fail to set up\n", __FUNCTION__)); + goto fail; + } + } + + max_assoc = ap->max_scb; + if ((res = dev_wlc_intvar_set(dev, "maxassoc", max_assoc))) { + WL_ERROR(("%s fail to set maxassoc\n", __FUNCTION__)); + goto fail; + } + + ap_ssid.SSID_len = strlen(ap->ssid); + strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len); + + +#ifdef AP_ONLY + if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) { + WL_ERROR(("ERROR:%d in:%s, wl_iw_set_ap_security is skipped\n", + res, __FUNCTION__)); + goto fail; + } + wl_iw_send_priv_event(dev, "ASCII_CMD=AP_BSS_START"); + ap_cfg_running = TRUE; +#else + + iolen = wl_bssiovar_mkbuf("ssid", bsscfg_index, (char *)(&ap_ssid), + ap_ssid.SSID_len+4, buf, sizeof(buf), &mkvar_err); + ASSERT(iolen); + if ((res = dev_wlc_ioctl(dev, WLC_SET_VAR, buf, iolen)) != 0) { + WL_ERROR(("ERROR:%d in:%s, Security & BSS reconfiguration is skipped\n", + res, __FUNCTION__)); + goto fail; + } + if (ap_cfg_running == FALSE) { + + PROC_START(thr_wait_for_2nd_eth_dev, dev, &ap_eth_ctl, 0); + } else { + ap_eth_ctl.thr_pid = -1; + + if (ap_net_dev == NULL) { + WL_ERROR(("%s ERROR: ap_net_dev is NULL !!!\n", __FUNCTION__)); + goto fail; + } + + WL_ERROR(("%s: %s Configure security & restart AP bss \n", + __FUNCTION__, ap_net_dev->name)); + + + if ((res = wl_iw_set_ap_security(ap_net_dev, &my_ap)) < 0) { + WL_ERROR(("%s fail to set security : %d\n", __FUNCTION__, res)); + goto fail; + } + + + if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) { + WL_ERROR(("%s fail to set bss up\n", __FUNCTION__)); + goto fail; + } + } +#endif +fail: + WL_SOFTAP(("%s exit with %d\n", __FUNCTION__, res)); + + DHD_OS_MUTEX_UNLOCK(&wl_softap_lock); + net_os_wake_unlock(dev); + + return res; +} +#endif + + + +static int +wl_iw_set_ap_security(struct net_device *dev, struct ap_profile *ap) +{ + int wsec = 0; + int wpa_auth = 0; + int res = 0; + int i; + char *ptr; +#ifdef AP_ONLY + int mpc = 0; + wlc_ssid_t ap_ssid; +#endif + wl_wsec_key_t key; + + WL_SOFTAP(("\nsetting SOFTAP security mode:\n")); + WL_SOFTAP(("wl_iw: set ap profile:\n")); + WL_SOFTAP((" ssid = '%s'\n", ap->ssid)); + WL_SOFTAP((" security = '%s'\n", ap->sec)); + if (ap->key[0] != '\0') + WL_SOFTAP((" key = '%s'\n", ap->key)); + WL_SOFTAP((" channel = %d\n", ap->channel)); + WL_SOFTAP((" max scb = %d\n", ap->max_scb)); + + + if (strnicmp(ap->sec, "open", strlen("open")) == 0) { + + + wsec = 0; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + wpa_auth = WPA_AUTH_DISABLED; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP(("=====================\n")); + WL_SOFTAP((" wsec & wpa_auth set 'OPEN', result:&d %d\n", res)); + WL_SOFTAP(("=====================\n")); + + } else if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) { + + + memset(&key, 0, sizeof(key)); + + wsec = WEP_ENABLED; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + + key.index = 0; + if (wl_iw_parse_wep(ap->key, &key)) { + WL_SOFTAP(("wep key parse err!\n")); + return -1; + } + + key.index = htod32(key.index); + key.len = htod32(key.len); + key.algo = htod32(key.algo); + key.flags = htod32(key.flags); + + res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + + wpa_auth = WPA_AUTH_DISABLED; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP(("=====================\n")); + WL_SOFTAP((" wsec & auth set 'WEP', result:&d %d\n", res)); + WL_SOFTAP(("=====================\n")); + + } else if (strnicmp(ap->sec, "wpa2-psk", strlen("wpa2-psk")) == 0) { + + + + wsec_pmk_t psk; + size_t key_len; + + wsec = AES_ENABLED; + dev_wlc_intvar_set(dev, "wsec", wsec); + + key_len = strlen(ap->key); + if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) { + WL_SOFTAP(("passphrase must be between %d and %d characters long\n", + WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN)); + return -1; + } + + + if (key_len < WSEC_MAX_PSK_LEN) { + unsigned char output[2*SHA1HashSize]; + char key_str_buf[WSEC_MAX_PSK_LEN+1]; + + + memset(output, 0, sizeof(output)); + pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32); + + ptr = key_str_buf; + for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) { + + sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], + (uint)output[i*4+1], (uint)output[i*4+2], + (uint)output[i*4+3]); + ptr += 8; + } + WL_SOFTAP(("%s: passphase = %s\n", __FUNCTION__, key_str_buf)); + + psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN); + memcpy(psk.key, key_str_buf, psk.key_len); + } else { + psk.key_len = htod16((ushort) key_len); + memcpy(psk.key, ap->key, key_len); + } + psk.flags = htod16(WSEC_PASSPHRASE); + dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk)); + + wpa_auth = WPA2_AUTH_PSK; + dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + } else if (strnicmp(ap->sec, "wpa-psk", strlen("wpa-psk")) == 0) { + + + wsec_pmk_t psk; + size_t key_len; + + wsec = TKIP_ENABLED; + res = dev_wlc_intvar_set(dev, "wsec", wsec); + + key_len = strlen(ap->key); + if (key_len < WSEC_MIN_PSK_LEN || key_len > WSEC_MAX_PSK_LEN) { + WL_SOFTAP(("passphrase must be between %d and %d characters long\n", + WSEC_MIN_PSK_LEN, WSEC_MAX_PSK_LEN)); + return -1; + } + + + if (key_len < WSEC_MAX_PSK_LEN) { + unsigned char output[2*SHA1HashSize]; + char key_str_buf[WSEC_MAX_PSK_LEN+1]; + bzero(output, 2*SHA1HashSize); + + WL_SOFTAP(("%s: do passhash...\n", __FUNCTION__)); + + pbkdf2_sha1(ap->key, ap->ssid, strlen(ap->ssid), 4096, output, 32); + + ptr = key_str_buf; + for (i = 0; i < (WSEC_MAX_PSK_LEN/8); i++) { + WL_SOFTAP(("[%02d]: %08x\n", i, *((unsigned int*)&output[i*4]))); + + sprintf(ptr, "%02x%02x%02x%02x", (uint)output[i*4], + (uint)output[i*4+1], (uint)output[i*4+2], + (uint)output[i*4+3]); + ptr += 8; + } + printk("%s: passphase = %s\n", __FUNCTION__, key_str_buf); + + psk.key_len = htod16((ushort)WSEC_MAX_PSK_LEN); + memcpy(psk.key, key_str_buf, psk.key_len); + } else { + psk.key_len = htod16((ushort) key_len); + memcpy(psk.key, ap->key, key_len); + } + + psk.flags = htod16(WSEC_PASSPHRASE); + res |= dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk)); + + wpa_auth = WPA_AUTH_PSK; + res |= dev_wlc_intvar_set(dev, "wpa_auth", wpa_auth); + + WL_SOFTAP((" wsec & auth set 'wpa-psk' (TKIP), result:&d %d\n", res)); + } + +#ifdef AP_ONLY + ap_ssid.SSID_len = strlen(ap->ssid); + strncpy(ap_ssid.SSID, ap->ssid, ap_ssid.SSID_len); + res |= dev_wlc_ioctl(dev, WLC_SET_SSID, &ap_ssid, sizeof(ap_ssid)); + mpc = 0; + res |= dev_wlc_intvar_set(dev, "mpc", mpc); + if (strnicmp(ap->sec, "wep", strlen("wep")) == 0) { + res |= dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + } +#endif + return res; +} + + + +static int +get_parameter_from_string( + char **str_ptr, const char *token, + int param_type, void *dst, int param_max_len) +{ + char int_str[7] = "0"; + int parm_str_len; + char *param_str_begin; + char *param_str_end; + char *orig_str = *str_ptr; + + if ((*str_ptr) && !strncmp(*str_ptr, token, strlen(token))) { + + strsep(str_ptr, "=,"); + param_str_begin = *str_ptr; + strsep(str_ptr, "=,"); + + if (*str_ptr == NULL) { + + parm_str_len = strlen(param_str_begin); + } else { + param_str_end = *str_ptr-1; + parm_str_len = param_str_end - param_str_begin; + } + + WL_TRACE((" 'token:%s', len:%d, ", token, parm_str_len)); + + if (parm_str_len > param_max_len) { + WL_ERROR((" WARNING: extracted param len:%d is > MAX:%d\n", + parm_str_len, param_max_len)); + + parm_str_len = param_max_len; + } + + switch (param_type) { + + case PTYPE_INTDEC: { + + int *pdst_int = dst; + char *eptr; + + if (parm_str_len > sizeof(int_str)) + parm_str_len = sizeof(int_str); + + memcpy(int_str, param_str_begin, parm_str_len); + + *pdst_int = simple_strtoul(int_str, &eptr, 10); + + WL_TRACE((" written as integer:%d\n", *pdst_int)); + } + break; + case PTYPE_STR_HEX: { + u8 *buf = dst; + + param_max_len = param_max_len >> 1; + hstr_2_buf(param_str_begin, buf, param_max_len); + dhd_print_buf(buf, param_max_len, 0); + } + break; + default: + + memcpy(dst, param_str_begin, parm_str_len); + *((char *)dst + parm_str_len) = 0; + WL_ERROR((" written as a string:%s\n", (char *)dst)); + break; + + } + + return 0; + } else { + WL_ERROR(("\n %s: ERROR: can't find token:%s in str:%s \n", + __FUNCTION__, token, orig_str)); + + return -1; + } +} + +static int wl_iw_softap_deassoc_stations(struct net_device *dev, u8 *mac) +{ + int i; + int res = 0; + char mac_buf[128] = {0}; + char z_mac[6] = {0, 0, 0, 0, 0, 0}; + char *sta_mac; + struct maclist *assoc_maclist = (struct maclist *) mac_buf; + bool deauth_all = FALSE; + + + if (mac == NULL) { + deauth_all = TRUE; + sta_mac = z_mac; + } else { + sta_mac = mac; + } + + memset(assoc_maclist, 0, sizeof(mac_buf)); + assoc_maclist->count = 8; + + res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 128); + if (res != 0) { + WL_SOFTAP(("%s: Error:%d Couldn't get ASSOC List\n", __FUNCTION__, res)); + return res; + } + + if (assoc_maclist->count) + for (i = 0; i < assoc_maclist->count; i++) { + scb_val_t scbval; + scbval.val = htod32(1); + + bcopy(&assoc_maclist->ea[i], &scbval.ea, ETHER_ADDR_LEN); + + if (deauth_all || (memcmp(&scbval.ea, sta_mac, ETHER_ADDR_LEN) == 0)) { + + WL_SOFTAP(("%s, deauth STA:%d \n", __FUNCTION__, i)); + res |= dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t)); + } + } else WL_SOFTAP(("%s: No Stations \n", __FUNCTION__)); + + if (res != 0) { + WL_ERROR(("%s: Error:%d\n", __FUNCTION__, res)); + } else if (assoc_maclist->count) { + + bcm_mdelay(200); + } + return res; +} + + + +static int +iwpriv_softap_stop(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + + WL_SOFTAP(("got iwpriv AP_BSS_STOP \n")); + + if ((!dev) && (!ap_net_dev)) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return res; + } + + net_os_wake_lock(dev); + DHD_OS_MUTEX_LOCK(&wl_softap_lock); + + if ((ap_cfg_running == TRUE)) { +#ifdef AP_ONLY + wl_iw_softap_deassoc_stations(dev, NULL); +#else + wl_iw_softap_deassoc_stations(ap_net_dev, NULL); + if ((res = dev_iw_write_cfg1_bss_var(dev, 2)) < 0) + WL_ERROR(("%s failed to del BSS err = %d", __FUNCTION__, res)); +#endif + + + bcm_mdelay(100); + + wrqu->data.length = 0; + ap_cfg_running = FALSE; + } else + WL_ERROR(("%s: was called when SoftAP is OFF : move on\n", __FUNCTION__)); + + WL_SOFTAP(("%s Done with %d\n", __FUNCTION__, res)); + DHD_OS_MUTEX_UNLOCK(&wl_softap_lock); + net_os_wake_unlock(dev); + + return res; +} + + + +static int +iwpriv_fw_reload(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int ret = -1; + char extra[256]; + char *fwstr = fw_path ; + + WL_SOFTAP(("current firmware_path[]=%s\n", fwstr)); + + WL_TRACE((">Got FW_RELOAD cmd:" + "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d, " + "fw_path:%p, len:%d \n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length, fwstr, strlen(fwstr))); + + if ((wrqu->data.length > 4) && (wrqu->data.length < sizeof(extra))) { + char *str_ptr; + + if (copy_from_user(extra, wrqu->data.pointer, wrqu->data.length)) { + ret = -EFAULT; + goto exit_proc; + } + + + extra[wrqu->data.length] = 8; + str_ptr = extra; + + if (get_parameter_from_string(&str_ptr, + "FW_PATH=", PTYPE_STRING, fwstr, 255) != 0) { + WL_ERROR(("Error: extracting FW_PATH='' string\n")); + goto exit_proc; + } + + if (strstr(fwstr, "apsta") != NULL) { + WL_SOFTAP(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + WL_SOFTAP(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } + + WL_SOFTAP(("SET firmware_path[]=%s , str_p:%p\n", fwstr, fwstr)); + ret = 0; + } else { + WL_ERROR(("Error: ivalid param len:%d\n", wrqu->data.length)); + } + +exit_proc: + return ret; +} + +#ifdef SOFTAP + +static int +iwpriv_wpasupp_loop_tst(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *ext) +{ + int res = 0; + char *params = NULL; + + WL_TRACE((">Got IWPRIV wp_supp loopback cmd test:" + "info->cmd:%x, info->flags:%x, u.data:%p, u.len:%d\n", + info->cmd, info->flags, + wrqu->data.pointer, wrqu->data.length)); + + if (wrqu->data.length != 0) { + + if (!(params = kmalloc(wrqu->data.length+1, GFP_KERNEL))) + return -ENOMEM; + + + if (copy_from_user(params, wrqu->data.pointer, wrqu->data.length)) { + kfree(params); + return -EFAULT; + } + + params[wrqu->data.length] = 0; + WL_SOFTAP(("\n>> copied from user:\n %s\n", params)); + } else { + WL_ERROR(("ERROR param length is 0\n")); + return -EFAULT; + } + + + res = wl_iw_send_priv_event(dev, params); + kfree(params); + + return res; +} +#endif + + +static int +iwpriv_en_ap_bss( + struct net_device *dev, + struct iw_request_info *info, + void *wrqu, + char *extra) +{ + int res = 0; + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -1; + } + + net_os_wake_lock(dev); + DHD_OS_MUTEX_LOCK(&wl_softap_lock); + + WL_TRACE(("%s: rcvd IWPRIV IOCTL: for dev:%s\n", __FUNCTION__, dev->name)); + + +#ifndef AP_ONLY + if ((res = wl_iw_set_ap_security(dev, &my_ap)) != 0) { + WL_ERROR((" %s ERROR setting SOFTAP security in :%d\n", __FUNCTION__, res)); + } + else { + + if ((res = dev_iw_write_cfg1_bss_var(dev, 1)) < 0) + WL_ERROR(("%s fail to set bss up err=%d\n", __FUNCTION__, res)); + else + + bcm_mdelay(100); + } + +#endif + WL_SOFTAP(("%s done with res %d \n", __FUNCTION__, res)); + + DHD_OS_MUTEX_UNLOCK(&wl_softap_lock); + net_os_wake_unlock(dev); + + return res; +} + +static int +get_assoc_sta_list(struct net_device *dev, char *buf, int len) +{ + + WL_TRACE(("%s: dev_wlc_ioctl(dev:%p, cmd:%d, buf:%p, len:%d)\n", + __FUNCTION__, dev, WLC_GET_ASSOCLIST, buf, len)); + + return dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, buf, len); + +} + + +void check_error(int res, const char *msg, const char *func, int line) +{ + if (res != 0) + WL_ERROR(("%s, %d function:%s, line:%d\n", msg, res, func, line)); +} + +static int +set_ap_mac_list(struct net_device *dev, void *buf) +{ + struct mac_list_set *mac_list_set = (struct mac_list_set *)buf; + struct maclist *maclist = (struct maclist *)&mac_list_set->mac_list; + int length; + int i; + int mac_mode = mac_list_set->mode; + int ioc_res = 0; + ap_macmode = mac_list_set->mode; + + + bzero(&ap_black_list, sizeof(struct mflist)); + + if (mac_mode == MACLIST_MODE_DISABLED) { + + ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode)); + check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); + WL_SOFTAP(("%s: MAC filtering disabled\n", __FUNCTION__)); + } else { + + scb_val_t scbval; + char mac_buf[256] = {0}; + struct maclist *assoc_maclist = (struct maclist *) mac_buf; + + + bcopy(maclist, &ap_black_list, sizeof(ap_black_list)); + + + ioc_res = dev_wlc_ioctl(dev, WLC_SET_MACMODE, &mac_mode, sizeof(mac_mode)); + check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); + + + length = sizeof(maclist->count) + maclist->count*ETHER_ADDR_LEN; + dev_wlc_ioctl(dev, WLC_SET_MACLIST, maclist, length); + + WL_SOFTAP(("%s: applied MAC List, mode:%d, length %d:\n", + __FUNCTION__, mac_mode, length)); + + for (i = 0; i < maclist->count; i++) + WL_SOFTAP(("mac %d: %02X:%02X:%02X:%02X:%02X:%02X\n", + i, maclist->ea[i].octet[0], maclist->ea[i].octet[1], + maclist->ea[i].octet[2], + maclist->ea[i].octet[3], maclist->ea[i].octet[4], + maclist->ea[i].octet[5])); + + + assoc_maclist->count = 8; + ioc_res = dev_wlc_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, 256); + check_error(ioc_res, "ioctl ERROR:", __FUNCTION__, __LINE__); + WL_SOFTAP((" Cur assoc clients:%d\n", assoc_maclist->count)); + + + if (assoc_maclist->count) + for (i = 0; i < assoc_maclist->count; i++) { + int j; + bool assoc_mac_matched = FALSE; + + WL_SOFTAP(("\n Cheking assoc STA: ")); + dhd_print_buf(&assoc_maclist->ea[i], 6, 7); + WL_SOFTAP(("with the b/w list:")); + + for (j = 0; j < maclist->count; j++) + if (!bcmp(&assoc_maclist->ea[i], &maclist->ea[j], + ETHER_ADDR_LEN)) { + + assoc_mac_matched = TRUE; + break; + } + + + if (((mac_mode == MACLIST_MODE_ALLOW) && !assoc_mac_matched) || + ((mac_mode == MACLIST_MODE_DENY) && assoc_mac_matched)) { + + WL_SOFTAP(("b-match or w-mismatch," + " do deauth/disassoc \n")); + scbval.val = htod32(1); + bcopy(&assoc_maclist->ea[i], &scbval.ea, + ETHER_ADDR_LEN); + ioc_res = dev_wlc_ioctl(dev, + WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t)); + check_error(ioc_res, + "ioctl ERROR:", + __FUNCTION__, __LINE__); + + } else { + WL_SOFTAP((" no b/w list hits, let it be\n")); + } + } else { + WL_SOFTAP(("No ASSOC CLIENTS\n")); + } + + } + + WL_SOFTAP(("%s iocres:%d\n", __FUNCTION__, ioc_res)); + return ioc_res; +} +#endif + + + +#ifdef SOFTAP +#define PARAM_OFFSET PROFILE_OFFSET + +static int +wl_iw_process_private_ascii_cmd( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *dwrq, + char *cmd_str) +{ + int ret = 0; + char *sub_cmd = cmd_str + PROFILE_OFFSET + strlen("ASCII_CMD="); + + WL_SOFTAP(("\n %s: ASCII_CMD: offs_0:%s, offset_32:\n'%s'\n", + __FUNCTION__, cmd_str, cmd_str + PROFILE_OFFSET)); + + if (strnicmp(sub_cmd, "AP_CFG", strlen("AP_CFG")) == 0) { + + WL_SOFTAP((" AP_CFG \n")); + + + if (init_ap_profile_from_string(cmd_str+PROFILE_OFFSET, &my_ap) != 0) { + WL_ERROR(("ERROR: SoftAP CFG prams !\n")); + ret = -1; + } else { + ret = set_ap_cfg(dev, &my_ap); + } + + } else if (strnicmp(sub_cmd, "AP_BSS_START", strlen("AP_BSS_START")) == 0) { + + WL_SOFTAP(("\n SOFTAP - ENABLE BSS \n")); + + + WL_SOFTAP(("\n!!! got 'WL_AP_EN_BSS' from WPA supplicant, dev:%s\n", dev->name)); + +#ifndef AP_ONLY + if (ap_net_dev == NULL) { + printf("\n ERROR: SOFTAP net_dev* is NULL !!!\n"); + } else { + + if ((ret = iwpriv_en_ap_bss(ap_net_dev, info, dwrq, cmd_str)) < 0) + WL_ERROR(("%s line %d fail to set bss up\n", + __FUNCTION__, __LINE__)); + } +#else + if ((ret = iwpriv_en_ap_bss(dev, info, dwrq, cmd_str)) < 0) + WL_ERROR(("%s line %d fail to set bss up\n", + __FUNCTION__, __LINE__)); +#endif + } else if (strnicmp(sub_cmd, "ASSOC_LST", strlen("ASSOC_LST")) == 0) { + + + + } else if (strnicmp(sub_cmd, "AP_BSS_STOP", strlen("AP_BSS_STOP")) == 0) { + + WL_SOFTAP((" \n temp DOWN SOFTAP\n")); +#ifndef AP_ONLY + if ((ret = dev_iw_write_cfg1_bss_var(dev, 0)) < 0) { + WL_ERROR(("%s line %d fail to set bss down\n", + __FUNCTION__, __LINE__)); + } +#endif + } + + return ret; + +} +#endif + + +static int +wl_iw_set_priv( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *ext +) +{ + int ret = 0; + char * extra; + + if (!(extra = kmalloc(dwrq->length, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, dwrq->pointer, dwrq->length)) { + kfree(extra); + return -EFAULT; + } + + WL_TRACE(("%s: SIOCSIWPRIV request %s, info->cmd:%x, info->flags:%d\n dwrq->length:%d\n", + dev->name, extra, info->cmd, info->flags, dwrq->length)); + + + + net_os_wake_lock(dev); + + if (dwrq->length && extra) { + if (strnicmp(extra, "START", strlen("START")) == 0) { + wl_iw_control_wl_on(dev, info); + WL_TRACE(("%s, Received regular START command\n", __FUNCTION__)); + } + + if (g_onoff == G_WLAN_SET_OFF) { + WL_TRACE(("%s, missing START, Fail\n", __FUNCTION__)); + kfree(extra); + net_os_wake_unlock(dev); + return -EFAULT; + } + + if (strnicmp(extra, "SCAN-ACTIVE", strlen("SCAN-ACTIVE")) == 0) { +#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS + WL_TRACE(("%s: active scan setting suppressed\n", dev->name)); +#else + ret = wl_iw_set_active_scan(dev, info, (union iwreq_data *)dwrq, extra); +#endif + } + else if (strnicmp(extra, "SCAN-PASSIVE", strlen("SCAN-PASSIVE")) == 0) +#ifdef ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS + WL_TRACE(("%s: passive scan setting suppressed\n", dev->name)); +#else + ret = wl_iw_set_passive_scan(dev, info, (union iwreq_data *)dwrq, extra); +#endif + else if (strnicmp(extra, "RSSI", strlen("RSSI")) == 0) + ret = wl_iw_get_rssi(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "LINKSPEED", strlen("LINKSPEED")) == 0) + ret = wl_iw_get_link_speed(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "MACADDR", strlen("MACADDR")) == 0) + ret = wl_iw_get_macaddr(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "COUNTRY", strlen("COUNTRY")) == 0) + ret = wl_iw_set_country(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "STOP", strlen("STOP")) == 0) + ret = wl_iw_control_wl_off(dev, info); + else if (strnicmp(extra, BAND_GET_CMD, strlen(BAND_GET_CMD)) == 0) + ret = wl_iw_get_band(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, BAND_SET_CMD, strlen(BAND_SET_CMD)) == 0) + ret = wl_iw_set_band(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, DTIM_SKIP_GET_CMD, strlen(DTIM_SKIP_GET_CMD)) == 0) + ret = wl_iw_get_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, DTIM_SKIP_SET_CMD, strlen(DTIM_SKIP_SET_CMD)) == 0) + ret = wl_iw_set_dtim_skip(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, SETSUSPENDOPT_CMD, strlen(SETSUSPENDOPT_CMD)) == 0) + ret = wl_iw_set_suspend_opt(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, SETSUSPENDMODE_CMD, strlen(SETSUSPENDMODE_CMD)) == 0) + ret = wl_iw_set_suspend_mode(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, TXPOWER_SET_CMD, strlen(TXPOWER_SET_CMD)) == 0) + ret = wl_iw_set_txpower(dev, info, (union iwreq_data *)dwrq, extra); +#if defined(PNO_SUPPORT) + else if (strnicmp(extra, PNOSSIDCLR_SET_CMD, strlen(PNOSSIDCLR_SET_CMD)) == 0) + ret = wl_iw_set_pno_reset(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, PNOSETUP_SET_CMD, strlen(PNOSETUP_SET_CMD)) == 0) + ret = wl_iw_set_pno_set(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, PNOSETADD_SET_CMD, strlen(PNOSETADD_SET_CMD)) == 0) + ret = wl_iw_set_pno_setadd(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, PNOENABLE_SET_CMD, strlen(PNOENABLE_SET_CMD)) == 0) + ret = wl_iw_set_pno_enable(dev, info, (union iwreq_data *)dwrq, extra); +#endif +#if defined(CSCAN) + + else if (strnicmp(extra, CSCAN_COMMAND, strlen(CSCAN_COMMAND)) == 0) + ret = wl_iw_set_cscan(dev, info, (union iwreq_data *)dwrq, extra); +#endif +#ifdef CONFIG_WPS2 + else if (strnicmp(extra, WPS_ADD_PROBE_REQ_IE_CMD, + strlen(WPS_ADD_PROBE_REQ_IE_CMD)) == 0) + ret = wl_iw_add_wps_probe_req_ie(dev, info, + (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, WPS_DEL_PROBE_REQ_IE_CMD, + strlen(WPS_DEL_PROBE_REQ_IE_CMD)) == 0) + ret = wl_iw_del_wps_probe_req_ie(dev, info, + (union iwreq_data *)dwrq, extra); +#endif + else if (strnicmp(extra, "POWERMODE", strlen("POWERMODE")) == 0) + ret = wl_iw_set_power_mode(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "BTCOEXMODE", strlen("BTCOEXMODE")) == 0) + ret = wl_iw_set_btcoex_dhcp(dev, info, (union iwreq_data *)dwrq, extra); + else if (strnicmp(extra, "GETPOWER", strlen("GETPOWER")) == 0) + ret = wl_iw_get_power_mode(dev, info, (union iwreq_data *)dwrq, extra); +#ifdef SOFTAP + else if (strnicmp(extra, "ASCII_CMD", strlen("ASCII_CMD")) == 0) { + wl_iw_process_private_ascii_cmd(dev, info, (union iwreq_data *)dwrq, extra); + } + else if (strnicmp(extra, "AP_MAC_LIST_SET", strlen("AP_MAC_LIST_SET")) == 0) { + WL_SOFTAP(("penguin, set AP_MAC_LIST_SET\n")); + set_ap_mac_list(dev, (extra + PROFILE_OFFSET)); + } +#endif + else { + WL_ERROR(("Unknown PRIVATE command %s - ignored\n", extra)); + snprintf(extra, MAX_WX_STRING, "OK"); + dwrq->length = strlen("OK") + 1; + } + } + + net_os_wake_unlock(dev); + + if (extra) { + if (copy_to_user(dwrq->pointer, extra, dwrq->length)) { + kfree(extra); + return -EFAULT; + } + + kfree(extra); + } + + return ret; +} + +static const iw_handler wl_iw_handler[] = +{ + (iw_handler) wl_iw_config_commit, + (iw_handler) wl_iw_get_name, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_freq, + (iw_handler) wl_iw_get_freq, + (iw_handler) wl_iw_set_mode, + (iw_handler) wl_iw_get_mode, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_get_range, + (iw_handler) wl_iw_set_priv, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_spy, + (iw_handler) wl_iw_get_spy, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_wap, + (iw_handler) wl_iw_get_wap, +#if WIRELESS_EXT > 17 + (iw_handler) wl_iw_mlme, +#else + (iw_handler) NULL, +#endif +#if defined(WL_IW_USE_ISCAN) + (iw_handler) wl_iw_iscan_get_aplist, +#else + (iw_handler) wl_iw_get_aplist, +#endif +#if WIRELESS_EXT > 13 +#if defined(WL_IW_USE_ISCAN) + (iw_handler) wl_iw_iscan_set_scan, + (iw_handler) wl_iw_iscan_get_scan, +#else + (iw_handler) wl_iw_set_scan, + (iw_handler) wl_iw_get_scan, +#endif +#else + (iw_handler) NULL, + (iw_handler) NULL, +#endif + (iw_handler) wl_iw_set_essid, + (iw_handler) wl_iw_get_essid, + (iw_handler) wl_iw_set_nick, + (iw_handler) wl_iw_get_nick, + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_rate, + (iw_handler) wl_iw_get_rate, + (iw_handler) wl_iw_set_rts, + (iw_handler) wl_iw_get_rts, + (iw_handler) wl_iw_set_frag, + (iw_handler) wl_iw_get_frag, + (iw_handler) wl_iw_set_txpow, + (iw_handler) wl_iw_get_txpow, +#if WIRELESS_EXT > 10 + (iw_handler) wl_iw_set_retry, + (iw_handler) wl_iw_get_retry, +#endif + (iw_handler) wl_iw_set_encode, + (iw_handler) wl_iw_get_encode, + (iw_handler) wl_iw_set_power, + (iw_handler) wl_iw_get_power, +#if WIRELESS_EXT > 17 + (iw_handler) NULL, + (iw_handler) NULL, + (iw_handler) wl_iw_set_wpaie, + (iw_handler) wl_iw_get_wpaie, + (iw_handler) wl_iw_set_wpaauth, + (iw_handler) wl_iw_get_wpaauth, + (iw_handler) wl_iw_set_encodeext, + (iw_handler) wl_iw_get_encodeext, + (iw_handler) wl_iw_set_pmksa, +#endif +}; + +#if WIRELESS_EXT > 12 +static const iw_handler wl_iw_priv_handler[] = { + NULL, + (iw_handler)wl_iw_set_active_scan, + NULL, + (iw_handler)wl_iw_get_rssi, + NULL, + (iw_handler)wl_iw_set_passive_scan, + NULL, + (iw_handler)wl_iw_get_link_speed, + NULL, + (iw_handler)wl_iw_get_macaddr, + NULL, + (iw_handler)wl_iw_control_wl_off, + NULL, + (iw_handler)wl_iw_control_wl_on, +#ifdef SOFTAP + + + NULL, + (iw_handler)iwpriv_set_ap_config, + + + + NULL, + (iw_handler)iwpriv_get_assoc_list, + + + NULL, + (iw_handler)iwpriv_set_mac_filters, + + + NULL, + (iw_handler)iwpriv_en_ap_bss, + + + NULL, + (iw_handler)iwpriv_wpasupp_loop_tst, + + NULL, + (iw_handler)iwpriv_softap_stop, + + NULL, + (iw_handler)iwpriv_fw_reload, + NULL, + (iw_handler)iwpriv_set_ap_sta_disassoc, +#endif +#if defined(CSCAN) + + NULL, + (iw_handler)iwpriv_set_cscan +#endif +}; + +static const struct iw_priv_args wl_iw_priv_args[] = +{ + { + WL_IW_SET_ACTIVE_SCAN, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "SCAN-ACTIVE" + }, + { + WL_IW_GET_RSSI, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "RSSI" + }, + { + WL_IW_SET_PASSIVE_SCAN, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "SCAN-PASSIVE" + }, + { + WL_IW_GET_LINK_SPEED, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "LINKSPEED" + }, + { + WL_IW_GET_CURR_MACADDR, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "Macaddr" + }, + { + WL_IW_SET_STOP, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "STOP" + }, + { + WL_IW_SET_START, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "START" + }, + +#ifdef SOFTAP + + + { + WL_SET_AP_CFG, + IW_PRIV_TYPE_CHAR | 256, + 0, + "AP_SET_CFG" + }, + + { + WL_AP_STA_LIST, + IW_PRIV_TYPE_CHAR | 0, + IW_PRIV_TYPE_CHAR | 1024, + "AP_GET_STA_LIST" + }, + + { + WL_AP_MAC_FLTR, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_SET_MAC_FLTR" + }, + + { + WL_AP_BSS_START, + 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, + "AP_BSS_START" + }, + + { + AP_LPB_CMD, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_LPB_CMD" + }, + + { + WL_AP_STOP, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "AP_BSS_STOP" + }, + { + WL_FW_RELOAD, + IW_PRIV_TYPE_CHAR | 256, + IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 0, + "WL_FW_RELOAD" + }, +#endif +#if defined(CSCAN) + { + WL_COMBO_SCAN, + IW_PRIV_TYPE_CHAR | 1024, + 0, + "CSCAN" + }, +#endif + }; + +const struct iw_handler_def wl_iw_handler_def = +{ + .num_standard = ARRAYSIZE(wl_iw_handler), + .standard = (iw_handler *) wl_iw_handler, + .num_private = ARRAYSIZE(wl_iw_priv_handler), + .num_private_args = ARRAY_SIZE(wl_iw_priv_args), + .private = (iw_handler *)wl_iw_priv_handler, + .private_args = (void *) wl_iw_priv_args, + +#if WIRELESS_EXT >= 19 + get_wireless_stats: dhd_get_wireless_stats, +#endif + }; +#endif + + + +int +wl_iw_ioctl( + struct net_device *dev, + struct ifreq *rq, + int cmd +) +{ + struct iwreq *wrq = (struct iwreq *) rq; + struct iw_request_info info; + iw_handler handler; + char *extra = NULL; + size_t token_size = 1; + int max_tokens = 0, ret = 0; + + net_os_wake_lock(dev); + + WL_TRACE(("\n%s, cmd:%x called via dhd->do_ioctl()entry point\n", __FUNCTION__, cmd)); + if (cmd < SIOCIWFIRST || + IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || + !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) { + WL_ERROR(("%s: error in cmd=%x : not supported\n", __FUNCTION__, cmd)); + net_os_wake_unlock(dev); + return -EOPNOTSUPP; + } + + switch (cmd) { + + case SIOCSIWESSID: + case SIOCGIWESSID: + case SIOCSIWNICKN: + case SIOCGIWNICKN: + max_tokens = IW_ESSID_MAX_SIZE + 1; + break; + + case SIOCSIWENCODE: + case SIOCGIWENCODE: +#if WIRELESS_EXT > 17 + case SIOCSIWENCODEEXT: + case SIOCGIWENCODEEXT: +#endif + max_tokens = wrq->u.data.length; + break; + + case SIOCGIWRANGE: + + max_tokens = sizeof(struct iw_range) + 500; + break; + + case SIOCGIWAPLIST: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_AP; + break; + +#if WIRELESS_EXT > 13 + case SIOCGIWSCAN: +#if defined(WL_IW_USE_ISCAN) + if (g_iscan) + max_tokens = wrq->u.data.length; + else +#endif + max_tokens = IW_SCAN_MAX_DATA; + break; +#endif + + case SIOCSIWSPY: + token_size = sizeof(struct sockaddr); + max_tokens = IW_MAX_SPY; + break; + + case SIOCGIWSPY: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_SPY; + break; + +#if WIRELESS_EXT > 17 + case SIOCSIWPMKSA: + case SIOCSIWGENIE: +#endif + case SIOCSIWPRIV: + max_tokens = wrq->u.data.length; + break; + } + + if (max_tokens && wrq->u.data.pointer) { + if (wrq->u.data.length > max_tokens) { + WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n", + __FUNCTION__, cmd, wrq->u.data.length, max_tokens)); + ret = -E2BIG; + goto wl_iw_ioctl_done; + } + if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) { + ret = -ENOMEM; + goto wl_iw_ioctl_done; + } + + if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { + kfree(extra); + ret = -EFAULT; + goto wl_iw_ioctl_done; + } + } + + info.cmd = cmd; + info.flags = 0; + + ret = handler(dev, &info, &wrq->u, extra); + + if (extra) { + if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { + kfree(extra); + ret = -EFAULT; + goto wl_iw_ioctl_done; + } + + kfree(extra); + } + +wl_iw_ioctl_done: + + net_os_wake_unlock(dev); + + return ret; +} + + +static bool +wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, + char* stringBuf, uint buflen) +{ + typedef struct conn_fail_event_map_t { + uint32 inEvent; + uint32 inStatus; + uint32 inReason; + const char* outName; + const char* outCause; + } conn_fail_event_map_t; + + +#define WL_IW_DONT_CARE 9999 + const conn_fail_event_map_t event_map [] = { + + + {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, + "Conn", "Success"}, + {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, + "Conn", "NoNetworks"}, + {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ConfigMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, + "Conn", "EncrypMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, + "Conn", "RsnMismatch"}, + {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "AuthTimeout"}, + {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "AuthFail"}, + {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, + "Conn", "AuthNoAck"}, + {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ReassocFail"}, + {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "ReassocTimeout"}, + {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, + "Conn", "ReassocAbort"}, + {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, + "Sup", "ConnSuccess"}, + {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Sup", "WpaHandshakeFail"}, + {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Deauth"}, + {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "DisassocInd"}, + {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Disassoc"} + }; + + const char* name = ""; + const char* cause = NULL; + int i; + + + for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { + const conn_fail_event_map_t* row = &event_map[i]; + if (row->inEvent == event_type && + (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && + (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { + name = row->outName; + cause = row->outCause; + break; + } + } + + + if (cause) { + memset(stringBuf, 0, buflen); + snprintf(stringBuf, buflen, "%s %s %02d %02d", + name, cause, status, reason); + WL_INFORM(("Connection status: %s\n", stringBuf)); + return TRUE; + } else { + return FALSE; + } +} + +#if WIRELESS_EXT > 14 + +static bool +wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) +{ + uint32 event = ntoh32(e->event_type); + uint32 status = ntoh32(e->status); + uint32 reason = ntoh32(e->reason); + + if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { + return TRUE; + } + else + return FALSE; +} +#endif + +#ifndef IW_CUSTOM_MAX +#define IW_CUSTOM_MAX 256 +#endif + +void +wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) +{ +#if WIRELESS_EXT > 13 + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd = 0; + uint32 event_type = ntoh32(e->event_type); + uint16 flags = ntoh16(e->flags); + uint32 datalen = ntoh32(e->datalen); + uint32 status = ntoh32(e->status); + uint32 toto; + memset(&wrqu, 0, sizeof(wrqu)); + memset(extra, 0, sizeof(extra)); + + if (!dev) { + WL_ERROR(("%s: dev is null\n", __FUNCTION__)); + return; + } + + net_os_wake_lock(dev); + + WL_TRACE(("%s: dev=%s event=%d \n", __FUNCTION__, dev->name, event_type)); + + + switch (event_type) { +#if defined(SOFTAP) + case WLC_E_PRUNE: + if (ap_cfg_running) { + char *macaddr = (char *)&e->addr; + WL_SOFTAP(("PRUNE received, %02X:%02X:%02X:%02X:%02X:%02X!\n", + macaddr[0], macaddr[1], macaddr[2], macaddr[3], + macaddr[4], macaddr[5])); + + + if (ap_macmode) + { + int i; + for (i = 0; i < ap_black_list.count; i++) { + if (!bcmp(macaddr, &ap_black_list.ea[i], + sizeof(struct ether_addr))) { + WL_SOFTAP(("mac in black list, ignore it\n")); + break; + } + } + + if (i == ap_black_list.count) { + + char mac_buf[32] = {0}; + sprintf(mac_buf, "STA_BLOCK %02X:%02X:%02X:%02X:%02X:%02X", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + wl_iw_send_priv_event(priv_dev, mac_buf); + } + } + } + break; +#endif + case WLC_E_TXFAIL: + cmd = IWEVTXDROP; + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + break; +#if WIRELESS_EXT > 14 + case WLC_E_JOIN: + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: +#if defined(SOFTAP) + WL_SOFTAP(("STA connect received %d\n", event_type)); + if (ap_cfg_running) { + wl_iw_send_priv_event(priv_dev, "STA_JOIN"); + goto wl_iw_event_end; + } +#endif + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + cmd = IWEVREGISTERED; + break; + case WLC_E_ROAM: + if (status == WLC_E_STATUS_SUCCESS) { + WL_ASSOC((" WLC_E_ROAM : success \n")); + goto wl_iw_event_end; + } + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: +#if defined(SOFTAP) + WL_SOFTAP(("STA disconnect received %d\n", event_type)); + if (ap_cfg_running) { + wl_iw_send_priv_event(priv_dev, "STA_LEAVE"); + goto wl_iw_event_end; + } +#endif + cmd = SIOCGIWAP; + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + bzero(&extra, ETHER_ADDR_LEN); + break; + case WLC_E_LINK: + case WLC_E_NDIS_LINK: + cmd = SIOCGIWAP; + if (!(flags & WLC_EVENT_MSG_LINK)) { + + +#ifdef SOFTAP +#ifdef AP_ONLY + if (ap_cfg_running) { +#else + if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) { +#endif + + WL_SOFTAP(("AP DOWN %d\n", event_type)); + wl_iw_send_priv_event(priv_dev, "AP_DOWN"); + } else { + WL_TRACE(("STA_Link Down\n")); + g_ss_cache_ctrl.m_link_down = 1; + } +#else + g_ss_cache_ctrl.m_link_down = 1; +#endif + WL_TRACE(("Link Down\n")); + + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + } + else { + + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + g_ss_cache_ctrl.m_link_down = 0; + + memcpy(g_ss_cache_ctrl.m_active_bssid, &e->addr, ETHER_ADDR_LEN); +#ifdef SOFTAP + +#ifdef AP_ONLY + if (ap_cfg_running) { +#else + if (ap_cfg_running && !strncmp(dev->name, "wl0.1", 5)) { +#endif + + WL_SOFTAP(("AP UP %d\n", event_type)); + wl_iw_send_priv_event(priv_dev, "AP_UP"); + } else { + WL_TRACE(("STA_LINK_UP\n")); + } +#else +#endif + WL_TRACE(("Link UP\n")); + + } + wrqu.addr.sa_family = ARPHRD_ETHER; + break; + case WLC_E_ACTION_FRAME: + cmd = IWEVCUSTOM; + if (datalen + 1 <= sizeof(extra)) { + wrqu.data.length = datalen + 1; + extra[0] = WLC_E_ACTION_FRAME; + memcpy(&extra[1], data, datalen); + WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); + } + break; + + case WLC_E_ACTION_FRAME_COMPLETE: + cmd = IWEVCUSTOM; + memcpy(&toto, data, 4); + if (sizeof(status) + 1 <= sizeof(extra)) { + wrqu.data.length = sizeof(status) + 1; + extra[0] = WLC_E_ACTION_FRAME_COMPLETE; + memcpy(&extra[1], &status, sizeof(status)); + printf("wl_iw_event status %d PacketId %d \n", status, toto); + printf("WLC_E_ACTION_FRAME_COMPLETE len %d \n", wrqu.data.length); + } + break; +#endif +#if WIRELESS_EXT > 17 + case WLC_E_MIC_ERROR: { + struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; + cmd = IWEVMICHAELMICFAILURE; + wrqu.data.length = sizeof(struct iw_michaelmicfailure); + if (flags & WLC_EVENT_MSG_GROUP) + micerrevt->flags |= IW_MICFAILURE_GROUP; + else + micerrevt->flags |= IW_MICFAILURE_PAIRWISE; + memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); + micerrevt->src_addr.sa_family = ARPHRD_ETHER; + + break; + } + + case WLC_E_ASSOC_REQ_IE: + cmd = IWEVASSOCREQIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_ASSOC_RESP_IE: + cmd = IWEVASSOCRESPIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_PMKID_CACHE: { + if (data) + { + struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; + pmkid_cand_list_t *pmkcandlist; + pmkid_cand_t *pmkidcand; + int count; + + cmd = IWEVPMKIDCAND; + pmkcandlist = data; + count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); + ASSERT(count >= 0); + wrqu.data.length = sizeof(struct iw_pmkid_cand); + pmkidcand = pmkcandlist->pmkid_cand; + while (count) { + bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); + if (pmkidcand->preauth) + iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; + bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, + ETHER_ADDR_LEN); + wireless_send_event(dev, cmd, &wrqu, extra); + pmkidcand++; + count--; + } + } + goto wl_iw_event_end; + } +#endif + + case WLC_E_SCAN_COMPLETE: +#if defined(WL_IW_USE_ISCAN) + if (!g_iscan) { + WL_ERROR(("Event WLC_E_SCAN_COMPLETE on g_iscan NULL!")); + goto wl_iw_event_end; + } + + if ((g_iscan) && (g_iscan->tsk_ctl.thr_pid >= 0) && + (g_iscan->iscan_state != ISCAN_STATE_IDLE)) + { + up(&g_iscan->tsk_ctl.sema); + } else { + cmd = SIOCGIWSCAN; + wrqu.data.length = strlen(extra); + WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific scan %d\n", + g_iscan->iscan_state)); + } +#else + cmd = SIOCGIWSCAN; + wrqu.data.length = strlen(extra); + WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n")); +#endif + break; + + + case WLC_E_PFN_NET_FOUND: + { + wl_pfn_net_info_t *netinfo; + netinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) - + sizeof(wl_pfn_net_info_t)); + WL_ERROR(("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n", + __FUNCTION__, PNO_EVENT_UP, netinfo->pfnsubnet.SSID, + netinfo->pfnsubnet.SSID_len)); + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + strcpy(extra, PNO_EVENT_UP); + wrqu.data.length = strlen(extra); + } + break; + + default: + + WL_TRACE(("Unknown Event %d: ignoring\n", event_type)); + break; + } + if (cmd) { + if (cmd == SIOCGIWSCAN) + wireless_send_event(dev, cmd, &wrqu, NULL); + else + wireless_send_event(dev, cmd, &wrqu, extra); + } + +#if WIRELESS_EXT > 14 + + memset(extra, 0, sizeof(extra)); + if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { + cmd = IWEVCUSTOM; + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + } +#endif + + goto wl_iw_event_end; +wl_iw_event_end: + + net_os_wake_unlock(dev); +#endif +} + +int +wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) +{ + int res = 0; + wl_cnt_t cnt; + int phy_noise; + int rssi; + scb_val_t scb_val; + + phy_noise = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) + goto done; + + phy_noise = dtoh32(phy_noise); + WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise)); + + bzero(&scb_val, sizeof(scb_val_t)); + if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) + goto done; + + rssi = dtoh32(scb_val.val); + WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi)); + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + wstats->qual.qual = 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + wstats->qual.qual = 1; + else if (rssi <= WL_IW_RSSI_LOW) + wstats->qual.qual = 2; + else if (rssi <= WL_IW_RSSI_GOOD) + wstats->qual.qual = 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + wstats->qual.qual = 4; + else + wstats->qual.qual = 5; + + + wstats->qual.level = 0x100 + rssi; + wstats->qual.noise = 0x100 + phy_noise; +#if WIRELESS_EXT > 18 + wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); +#else + wstats->qual.updated |= 7; +#endif + +#if WIRELESS_EXT > 11 + WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n", (int)sizeof(wl_cnt_t))); + + memset(&cnt, 0, sizeof(wl_cnt_t)); + res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t)); + if (res) + { + WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n", res)); + goto done; + } + + cnt.version = dtoh16(cnt.version); + if (cnt.version != WL_CNT_T_VERSION) { + WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", + WL_CNT_T_VERSION, cnt.version)); + goto done; + } + + wstats->discard.nwid = 0; + wstats->discard.code = dtoh32(cnt.rxundec); + wstats->discard.fragment = dtoh32(cnt.rxfragerr); + wstats->discard.retries = dtoh32(cnt.txfail); + wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant); + wstats->miss.beacon = 0; + + WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", + dtoh32(cnt.txframe), dtoh32(cnt.txbyte))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr))); + WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant))); + +#endif + +done: + return res; +} +#if defined(COEX_DHCP) +static void +wl_iw_bt_flag_set( + struct net_device *dev, + bool set) +{ +#if defined(BT_DHCP_USE_FLAGS) + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + + +#if defined(BT_DHCP_eSCO_FIX) + + set_btc_esco_params(dev, set); +#endif + + +#if defined(BT_DHCP_USE_FLAGS) + WL_TRACE_COEX(("WI-FI priority boost via bt flags, set:%d\n", set)); + if (set == TRUE) { + + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], sizeof(buf_flag7_dhcp_on)); + } + else { + + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif +} + +static void +wl_iw_bt_timerfunc(ulong data) +{ + bt_info_t *bt_local = (bt_info_t *)data; + bt_local->timer_on = 0; + WL_TRACE(("%s\n", __FUNCTION__)); + + up(&bt_local->tsk_ctl.sema); +} + +static int +_bt_dhcp_sysioc_thread(void *data) +{ + tsk_ctl_t *tsk_ctl = (tsk_ctl_t *)data; + + DAEMONIZE("dhcp_sysioc"); + + complete(&tsk_ctl->completed); + + while (down_interruptible(&tsk_ctl->sema) == 0) { + + SMP_RD_BARRIER_DEPENDS(); + if (tsk_ctl->terminated) { + break; + } + + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + } + + switch (g_bt->bt_state) { + case BT_DHCP_START: + + WL_TRACE_COEX(("%s bt_dhcp stm: started \n", __FUNCTION__)); + g_bt->bt_state = BT_DHCP_OPPORTUNITY_WINDOW; + mod_timer(&g_bt->timer, + jiffies + msecs_to_jiffies(BT_DHCP_OPPORTUNITY_WINDOW_TIME)); + g_bt->timer_on = 1; + break; + + case BT_DHCP_OPPORTUNITY_WINDOW: + if (g_bt->dhcp_done) { + WL_TRACE_COEX(("%s DHCP Done before T1 expiration\n", + __FUNCTION__)); + goto btc_coex_idle; + } + + + WL_TRACE_COEX(("%s DHCP T1:%d expired\n", + __FUNCTION__, BT_DHCP_OPPORTUNITY_WINDOW_TIME)); + + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, TRUE); + g_bt->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&g_bt->timer, jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME)); + g_bt->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + if (g_bt->dhcp_done) { + WL_TRACE_COEX(("%s DHCP Done before T2 expiration\n", + __FUNCTION__)); + } else { + + WL_TRACE_COEX(("%s DHCP wait interval T2:%d msec expired\n", + __FUNCTION__, BT_DHCP_FLAG_FORCE_TIME)); + } + + + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE); + btc_coex_idle: + g_bt->bt_state = BT_DHCP_IDLE; + g_bt->timer_on = 0; + break; + + default: + WL_ERROR(("%s error g_status=%d !!!\n", __FUNCTION__, + g_bt->bt_state)); + if (g_bt->dev) wl_iw_bt_flag_set(g_bt->dev, FALSE); + g_bt->bt_state = BT_DHCP_IDLE; + g_bt->timer_on = 0; + break; + } + + net_os_wake_unlock(g_bt->dev); + } + + if (g_bt->timer_on) { + g_bt->timer_on = 0; + del_timer_sync(&g_bt->timer); + } + complete_and_exit(&tsk_ctl->completed, 0); +} + +static void +wl_iw_bt_release(void) +{ + bt_info_t *bt_local = g_bt; + + if (!bt_local) { + return; + } + + if (bt_local->tsk_ctl.thr_pid >= 0) { + PROC_STOP(&bt_local->tsk_ctl); + } + kfree(bt_local); + g_bt = NULL; +} + +static int +wl_iw_bt_init(struct net_device *dev) +{ + bt_info_t *bt_dhcp = NULL; + + bt_dhcp = kmalloc(sizeof(bt_info_t), GFP_KERNEL); + if (!bt_dhcp) + return -ENOMEM; + + memset(bt_dhcp, 0, sizeof(bt_info_t)); + + g_bt = bt_dhcp; + bt_dhcp->dev = dev; + bt_dhcp->bt_state = BT_DHCP_IDLE; + + + bt_dhcp->timer_ms = 10; + init_timer(&bt_dhcp->timer); + bt_dhcp->timer.data = (ulong)bt_dhcp; + bt_dhcp->timer.function = wl_iw_bt_timerfunc; + bt_dhcp->ts_dhcp_start = 0; + bt_dhcp->ts_dhcp_ok = 0; + + PROC_START(_bt_dhcp_sysioc_thread, bt_dhcp, &bt_dhcp->tsk_ctl, 0); + if (bt_dhcp->tsk_ctl.thr_pid < 0) { + WL_ERROR(("Failed in %s\n", __FUNCTION__)); + return -ENOMEM; + } + + return 0; +} +#endif + +int +wl_iw_attach(struct net_device *dev, void * dhdp) +{ +#if defined(WL_IW_USE_ISCAN) + int params_size = 0; +#endif + wl_iw_t *iw; +#if defined(WL_IW_USE_ISCAN) + iscan_info_t *iscan = NULL; +#endif + + DHD_OS_MUTEX_INIT(&wl_cache_lock); + DHD_OS_MUTEX_INIT(&wl_softap_lock); + +#if defined(WL_IW_USE_ISCAN) + if (!dev) + return 0; + + + memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t)); + + +#ifdef CSCAN + params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)) + + (WL_NUMCHANNELS * sizeof(uint16)) + WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t); +#else + params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); +#endif + iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); + if (!iscan) + return -ENOMEM; + memset(iscan, 0, sizeof(iscan_info_t)); + + + iscan->iscan_ex_params_p = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); + if (!iscan->iscan_ex_params_p) { + kfree(iscan); + return -ENOMEM; + } + iscan->iscan_ex_param_size = params_size; + + + g_iscan = iscan; + iscan->dev = dev; + iscan->iscan_state = ISCAN_STATE_IDLE; + +#if defined(CONFIG_FIRST_SCAN) + g_first_broadcast_scan = BROADCAST_SCAN_FIRST_IDLE; + g_first_counter_scans = 0; + g_iscan->scan_flag = 0; +#endif + +#ifdef CONFIG_WPS2 + g_wps_probe_req_ie = NULL; + g_wps_probe_req_ie_len = 0; +#endif + + iscan->timer_ms = 8000; + init_timer(&iscan->timer); + iscan->timer.data = (ulong)iscan; + iscan->timer.function = wl_iw_timerfunc; + + PROC_START(_iscan_sysioc_thread, iscan, &iscan->tsk_ctl, 0); + if (iscan->tsk_ctl.thr_pid < 0) + return -ENOMEM; +#endif + + iw = *(wl_iw_t **)netdev_priv(dev); + iw->pub = (dhd_pub_t *)dhdp; +#ifdef SOFTAP + priv_dev = dev; +#endif + g_scan = NULL; + + + g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL); + if (!g_scan) + return -ENOMEM; + + memset(g_scan, 0, G_SCAN_RESULTS); + g_scan_specified_ssid = 0; + +#if !defined(CSCAN) + + wl_iw_init_ss_cache_ctrl(); +#endif +#ifdef COEX_DHCP + + wl_iw_bt_init(dev); +#endif + + + return 0; +} + +void +wl_iw_detach(void) +{ +#if defined(WL_IW_USE_ISCAN) + iscan_buf_t *buf; + iscan_info_t *iscan = g_iscan; + + if (!iscan) + return; + if (iscan->tsk_ctl.thr_pid >= 0) { + PROC_STOP(&iscan->tsk_ctl); + } + DHD_OS_MUTEX_LOCK(&wl_cache_lock); + while (iscan->list_hdr) { + buf = iscan->list_hdr->next; + kfree(iscan->list_hdr); + iscan->list_hdr = buf; + } + kfree(iscan->iscan_ex_params_p); + kfree(iscan); + g_iscan = NULL; + DHD_OS_MUTEX_UNLOCK(&wl_cache_lock); +#endif + + if (g_scan) + kfree(g_scan); + + g_scan = NULL; +#ifdef CONFIG_WPS2 + + if (g_wps_probe_req_ie) { + kfree(g_wps_probe_req_ie); + g_wps_probe_req_ie = NULL; + g_wps_probe_req_ie_len = 0; + } +#endif +#if !defined(CSCAN) + wl_iw_release_ss_cache_ctrl(); +#endif +#ifdef COEX_DHCP + wl_iw_bt_release(); +#endif + +#ifdef SOFTAP + if (ap_cfg_running) { + WL_TRACE(("\n%s AP is going down\n", __FUNCTION__)); + + wl_iw_send_priv_event(priv_dev, "AP_DOWN"); + } +#endif + +} diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h new file mode 100644 index 0000000000000..9cdb53dfef8c2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.h @@ -0,0 +1,319 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_iw.h,v 1.15.80.6 2010-12-23 01:13:23 $ + */ + + +#ifndef _wl_iw_h_ +#define _wl_iw_h_ + +#include + +#include +#include +#include + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#define BAND_GET_CMD "GETBAND" +#define BAND_SET_CMD "SETBAND" +#define DTIM_SKIP_GET_CMD "DTIMSKIPGET" +#define DTIM_SKIP_SET_CMD "DTIMSKIPSET" +#define SETSUSPENDOPT_CMD "SETSUSPENDOPT" +#define SETSUSPENDMODE_CMD "SETSUSPENDMODE" +#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR" + +#define PNOSETUP_SET_CMD "PNOSETUP " +#define PNOSETADD_SET_CMD "PNOSETADD" +#define PNOENABLE_SET_CMD "PNOFORCE" +#define PNODEBUG_SET_CMD "PNODEBUG" +#define TXPOWER_SET_CMD "TXPOWER" + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" + + +typedef struct wl_iw_extra_params { + int target_channel; +} wl_iw_extra_params_t; + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; + char custom_locale[WLC_CNTRY_BUF_SZ]; + int32 custom_locale_rev; +}; + + +#define WL_IW_RSSI_MINVAL -200 +#define WL_IW_RSSI_NO_SIGNAL -91 +#define WL_IW_RSSI_VERY_LOW -80 +#define WL_IW_RSSI_LOW -70 +#define WL_IW_RSSI_GOOD -68 +#define WL_IW_RSSI_VERY_GOOD -58 +#define WL_IW_RSSI_EXCELLENT -57 +#define WL_IW_RSSI_INVALID 0 +#define MAX_WX_STRING 80 +#define isprint(c) bcm_isprint(c) +#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1) +#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3) +#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5) +#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7) +#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9) +#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11) +#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13) + + +#define WL_SET_AP_CFG (SIOCIWFIRSTPRIV+15) +#define WL_AP_STA_LIST (SIOCIWFIRSTPRIV+17) +#define WL_AP_MAC_FLTR (SIOCIWFIRSTPRIV+19) +#define WL_AP_BSS_START (SIOCIWFIRSTPRIV+21) +#define AP_LPB_CMD (SIOCIWFIRSTPRIV+23) +#define WL_AP_STOP (SIOCIWFIRSTPRIV+25) +#define WL_FW_RELOAD (SIOCIWFIRSTPRIV+27) +#define WL_AP_STA_DISASSOC (SIOCIWFIRSTPRIV+29) +#define WL_COMBO_SCAN (SIOCIWFIRSTPRIV+31) + + +#define G_SCAN_RESULTS 8*1024 +#define WE_ADD_EVENT_FIX 0x80 +#define G_WLAN_SET_ON 0 +#define G_WLAN_SET_OFF 1 + +#define CHECK_EXTRA_FOR_NULL(extra) \ +if (!extra) { \ + WL_ERROR(("%s: error : extra is null pointer\n", __FUNCTION__)); \ + return -EINVAL; \ +} + +typedef struct wl_iw { + char nickname[IW_ESSID_MAX_SIZE]; + + struct iw_statistics wstats; + + int spy_num; + int wpaversion; + int pcipher; + int gcipher; + int privacy_invoked; + + struct ether_addr spy_addr[IW_MAX_SPY]; + struct iw_quality spy_qual[IW_MAX_SPY]; + void *wlinfo; + dhd_pub_t * pub; +} wl_iw_t; + +int wl_control_wl_start(struct net_device *dev); +#define WLC_IW_SS_CACHE_MAXLEN 2048 +#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN 32 +#define WLC_IW_BSS_INFO_MAXLEN \ + (WLC_IW_SS_CACHE_MAXLEN - WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN) + +typedef struct wl_iw_ss_cache { + struct wl_iw_ss_cache *next; + int dirty; + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; +} wl_iw_ss_cache_t; + +typedef struct wl_iw_ss_cache_ctrl { + wl_iw_ss_cache_t *m_cache_head; + int m_link_down; + int m_timer_expired; + char m_active_bssid[ETHER_ADDR_LEN]; + uint m_prev_scan_mode; + uint m_cons_br_scan_cnt; + struct timer_list *m_timer; +} wl_iw_ss_cache_ctrl_t; + +typedef enum broadcast_first_scan { + BROADCAST_SCAN_FIRST_IDLE = 0, + BROADCAST_SCAN_FIRST_STARTED, + BROADCAST_SCAN_FIRST_RESULT_READY, + BROADCAST_SCAN_FIRST_RESULT_CONSUMED +} broadcast_first_scan_t; +#ifdef SOFTAP +#define SSID_LEN 33 +#define SEC_LEN 16 +#define KEY_LEN 65 +#define PROFILE_OFFSET 32 +struct ap_profile { + uint8 ssid[SSID_LEN]; + uint8 sec[SEC_LEN]; + uint8 key[KEY_LEN]; + uint32 channel; + uint32 preamble; + uint32 max_scb; + uint32 closednet; + char country_code[WLC_CNTRY_BUF_SZ]; +}; + + +#define MACLIST_MODE_DISABLED 0 +#define MACLIST_MODE_DENY 1 +#define MACLIST_MODE_ALLOW 2 +struct mflist { + uint count; + struct ether_addr ea[16]; +}; +struct mac_list_set { + uint32 mode; + struct mflist mac_list; +}; +#endif + +#if WIRELESS_EXT > 12 +#include +extern const struct iw_handler_def wl_iw_handler_def; +#endif + +extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data); +extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats); +int wl_iw_attach(struct net_device *dev, void * dhdp); +void wl_iw_detach(void); + +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val, int force); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(info, stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(info, event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(info, stream, ends, iwe, extra) +#else +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(stream, ends, iwe, extra) +#endif + +extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled); +extern int dhd_pno_clean(dhd_pub_t *dhd); +extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, + ushort scan_fr, int pno_repeat, int pno_freq_expo_max); +extern int dhd_pno_get_status(dhd_pub_t *dhd); +extern int dhd_dev_pno_reset(struct net_device *dev); +extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, + int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max); +extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled); +extern int dhd_dev_get_pno_status(struct net_device *dev); +extern int dhd_get_dtim_skip(dhd_pub_t *dhd); + +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec); + +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBVERSION '2' +#define PNO_TLV_RESERVED '0' +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' +#define PNO_EVENT_UP "PNO_EVENT" + +typedef struct cmd_tlv { + char prefix; + char version; + char subver; + char reserved; +} cmd_tlv_t; + + + + +typedef struct cscan_tlv { + char prefix; + char version; + char subver; + char reserved; +} cscan_tlv_t; + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +#ifdef SOFTAP_TLV_CFG + +#define SOFTAP_SET_CMD "SOFTAPSET " +#define SOFTAP_TLV_PREFIX 'A' +#define SOFTAP_TLV_VERSION '1' +#define SOFTAP_TLV_SUBVERSION '0' +#define SOFTAP_TLV_RESERVED '0' + +#define TLV_TYPE_SSID 'S' +#define TLV_TYPE_SECUR 'E' +#define TLV_TYPE_KEY 'K' +#define TLV_TYPE_CHANNEL 'C' +#endif + +extern int wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left); + +extern int wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, + const char token, int input_size, int *bytes_left); + +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, + int max, int *bytes_left); + +extern int wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max); + +extern int wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num); + + +#define NETDEV_PRIV(dev) (*(wl_iw_t **)netdev_priv(dev)) + +#ifdef CONFIG_WPS2 +#define WPS_ADD_PROBE_REQ_IE_CMD "ADD_WPS_PROBE_REQ_IE " +#define WPS_DEL_PROBE_REQ_IE_CMD "DEL_WPS_PROBE_REQ_IE " +#define WPS_PROBE_REQ_IE_CMD_LENGTH 21 +#endif + +#endif diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c new file mode 100644 index 0000000000000..f44b4b04bb964 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c @@ -0,0 +1,409 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux monitor network interface + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wl_linux_mon.c 303266 2011-12-16 00:15:23Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +typedef enum monitor_states +{ + MONITOR_STATE_DEINIT = 0x0, + MONITOR_STATE_INIT = 0x1, + MONITOR_STATE_INTERFACE_ADDED = 0x2, + MONITOR_STATE_INTERFACE_DELETED = 0x4 +} monitor_states_t; +extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); + +/** + * Local declarations and defintions (not exposed) + */ +#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) +#define MON_TRACE MON_PRINT + +typedef struct monitor_interface { + int radiotap_enabled; + struct net_device* real_ndev; /* The real interface that the monitor is on */ + struct net_device* mon_ndev; +} monitor_interface; + +typedef struct dhd_linux_monitor { + void *dhd_pub; + monitor_states_t monitor_state; + monitor_interface mon_if[DHD_MAX_IFS]; + struct mutex lock; /* lock to protect mon_if */ +} dhd_linux_monitor_t; + +static dhd_linux_monitor_t g_monitor; + +static struct net_device* lookup_real_netdev(char *name); +static monitor_interface* ndev_to_monif(struct net_device *ndev); +static int dhd_mon_if_open(struct net_device *ndev); +static int dhd_mon_if_stop(struct net_device *ndev); +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static void dhd_mon_if_set_multicast_list(struct net_device *ndev); +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); + +static const struct net_device_ops dhd_mon_if_ops = { + .ndo_open = dhd_mon_if_open, + .ndo_stop = dhd_mon_if_stop, + .ndo_start_xmit = dhd_mon_if_subif_start_xmit, + .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, + .ndo_set_mac_address = dhd_mon_if_change_mac, +}; + +/** + * Local static function defintions + */ + +/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" + * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") + */ +static struct net_device* lookup_real_netdev(char *name) +{ + int i; + int len = 0; + int last_name_len = 0; + struct net_device *ndev; + struct net_device *ndev_found = NULL; + + /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0", + * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon + * iface would be mon-p2p0-0. + */ + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = dhd_idx2net(g_monitor.dhd_pub, i); + + /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it + * it matches, then this netdev is the corresponding real_netdev. + */ + if (ndev && strstr(ndev->name, "p2p-p2p0")) { + len = strlen("p2p"); + } else { + /* if p2p- is not present, then the IFNAMSIZ have reached and name + * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x + */ + len = 0; + } + if (ndev && strstr(name, (ndev->name + len))) { + if (strlen(ndev->name) > last_name_len) { + ndev_found = ndev; + last_name_len = strlen(ndev->name); + } + } + } + + return ndev_found; +} + +static monitor_interface* ndev_to_monif(struct net_device *ndev) +{ + int i; + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev) + return &g_monitor.mon_if[i]; + } + + return NULL; +} + +static int dhd_mon_if_open(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_stop(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + int ret = 0; + int rtap_len; + int qos_len = 0; + int dot11_hdr_len = 24; + int snap_len = 6; + unsigned char *pdata; + unsigned short frame_ctl; + unsigned char src_mac_addr[6]; + unsigned char dst_mac_addr[6]; + struct ieee80211_hdr *dot11_hdr; + struct ieee80211_radiotap_header *rtap_hdr; + monitor_interface* mon_if; + + MON_PRINT("enter\n"); + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + goto fail; + } + + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + goto fail; + + rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; + if (unlikely(rtap_hdr->it_version)) + goto fail; + + rtap_len = ieee80211_get_radiotap_len(skb->data); + if (unlikely(skb->len < rtap_len)) + goto fail; + + MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); + + /* Skip the ratio tap header */ + skb_pull(skb, rtap_len); + + dot11_hdr = (struct ieee80211_hdr *)skb->data; + frame_ctl = le16_to_cpu(dot11_hdr->frame_control); + /* Check if the QoS bit is set */ + if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { + /* Check if this ia a Wireless Distribution System (WDS) frame + * which has 4 MAC addresses + */ + if (dot11_hdr->frame_control & 0x0080) + qos_len = 2; + if ((dot11_hdr->frame_control & 0x0300) == 0x0300) + dot11_hdr_len += 6; + + memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); + memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); + + /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for + * for two MAC addresses + */ + skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); + pdata = (unsigned char*)skb->data; + memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); + memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); + + MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); + + /* Use the real net device to transmit the packet */ + ret = dhd_start_xmit(skb, mon_if->real_ndev); + + return ret; + } +fail: + dev_kfree_skb(skb); + return 0; +} + +static void dhd_mon_if_set_multicast_list(struct net_device *ndev) +{ + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } +} + +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) +{ + int ret = 0; + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } + return ret; +} + +/** + * Global function definitions (declared in dhd_linux_mon.h) + */ + +int dhd_add_monitor(char *name, struct net_device **new_ndev) +{ + int i; + int idx = -1; + int ret = 0; + struct net_device* ndev = NULL; + dhd_linux_monitor_t **dhd_mon; + + mutex_lock(&g_monitor.lock); + + MON_TRACE("enter, if name: %s\n", name); + if (!name || !new_ndev) { + MON_PRINT("invalid parameters\n"); + ret = -EINVAL; + goto out; + } + + /* + * Find a vacancy + */ + for (i = 0; i < DHD_MAX_IFS; i++) + if (g_monitor.mon_if[i].mon_ndev == NULL) { + idx = i; + break; + } + if (idx == -1) { + MON_PRINT("exceeds maximum interfaces\n"); + ret = -EFAULT; + goto out; + } + + ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); + if (!ndev) { + MON_PRINT("failed to allocate memory\n"); + ret = -ENOMEM; + goto out; + } + + ndev->type = ARPHRD_IEEE80211_RADIOTAP; + strncpy(ndev->name, name, IFNAMSIZ); + ndev->name[IFNAMSIZ - 1] = 0; + ndev->netdev_ops = &dhd_mon_if_ops; + + ret = register_netdevice(ndev); + if (ret) { + MON_PRINT(" register_netdevice failed (%d)\n", ret); + goto out; + } + + *new_ndev = ndev; + g_monitor.mon_if[idx].radiotap_enabled = TRUE; + g_monitor.mon_if[idx].mon_ndev = ndev; + g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); + dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); + *dhd_mon = &g_monitor; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; + MON_PRINT("net device returned: 0x%p\n", ndev); + MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); + +out: + if (ret && ndev) + free_netdev(ndev); + + mutex_unlock(&g_monitor.lock); + return ret; + +} + +int dhd_del_monitor(struct net_device *ndev) +{ + int i; + bool rollback_lock = false; + if (!ndev) + return -EINVAL; + mutex_lock(&g_monitor.lock); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev || + g_monitor.mon_if[i].real_ndev == ndev) { + g_monitor.mon_if[i].real_ndev = NULL; + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = true; + } + unregister_netdev(g_monitor.mon_if[i].mon_ndev); + free_netdev(g_monitor.mon_if[i].mon_ndev); + g_monitor.mon_if[i].mon_ndev = NULL; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; + break; + } + } + if (rollback_lock) { + rtnl_lock(); + rollback_lock = false; + } + + if (g_monitor.monitor_state != + MONITOR_STATE_INTERFACE_DELETED) + MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n", + ndev); + mutex_unlock(&g_monitor.lock); + + return 0; +} + +int dhd_monitor_init(void *dhd_pub) +{ + if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { + g_monitor.dhd_pub = dhd_pub; + mutex_init(&g_monitor.lock); + g_monitor.monitor_state = MONITOR_STATE_INIT; + } + return 0; +} + +int dhd_monitor_uninit(void) +{ + int i; + struct net_device *ndev; + bool rollback_lock = false; + mutex_lock(&g_monitor.lock); + if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = g_monitor.mon_if[i].mon_ndev; + if (ndev) { + if (rtnl_is_locked()) { + rtnl_unlock(); + rollback_lock = true; + } + unregister_netdev(ndev); + free_netdev(ndev); + g_monitor.mon_if[i].real_ndev = NULL; + g_monitor.mon_if[i].mon_ndev = NULL; + if (rollback_lock) { + rtnl_lock(); + rollback_lock = false; + } + } + } + g_monitor.monitor_state = MONITOR_STATE_DEINIT; + } + mutex_unlock(&g_monitor.lock); + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c new file mode 100644 index 0000000000000..7bea3dcfa7163 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.c @@ -0,0 +1,424 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $ + */ + +#include +#include +#include +#include + +#include +#include + +#define htod32(i) i +#define htod16(i) i +#define dtoh32(i) i +#define dtoh16(i) i +#define htodchanspec(i) i +#define dtohchanspec(i) i + +#define WLDEV_ERROR(args) \ + do { \ + printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \ + printk args; \ + } while (0) + +extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd); + +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set) +{ + s32 ret = 0; + struct wl_ioctl ioc; + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + ret = dhd_ioctl_entry_local(dev, &ioc, cmd); + return ret; +} + +/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +static s32 wldev_mkiovar( + s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, u32 buflen) +{ + s32 iolen = 0; + + iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen); + return iolen; +} + +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + + +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), NULL); +} + + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +/** Format a bsscfg indexed iovar buffer. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx) +{ + const s8 *prefix = "bsscfg:"; + s8 *p; + u32 prefixlen; + u32 namelen; + u32 iolen; + + if (bssidx == 0) { + return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen, + (s8 *) iovar_buf, buflen); + } + + prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */ + namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */ + iolen = prefixlen + namelen + sizeof(u32) + paramlen; + + if (buflen < 0 || iolen > (u32)buflen) + { + WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__)); + return BCME_BUFTOOSHORT; + } + + p = (s8 *)iovar_buf; + + /* copy prefix, no null */ + memcpy(p, prefix, prefixlen); + p += prefixlen; + + /* copy iovar name including null */ + memcpy(p, iovar_name, namelen); + p += namelen; + + /* bss config index as first param */ + bssidx = htod32(bssidx); + memcpy(p, &bssidx, sizeof(u32)); + p += sizeof(u32); + + /* parameter buffer follows */ + if (paramlen) + memcpy(p, param, paramlen); + + return iolen; + +} + +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; + +} + +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; +} + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); +} + + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +int wldev_get_link_speed( + struct net_device *dev, int *plink_speed) +{ + int error; + + if (!plink_speed) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0); + if (unlikely(error)) + return error; + + /* Convert internal 500Kbps to Kbps */ + *plink_speed *= 500; + return error; +} + +int wldev_get_rssi( + struct net_device *dev, int *prssi) +{ + scb_val_t scb_val; + int error; + + if (!prssi) + return -ENOMEM; + bzero(&scb_val, sizeof(scb_val_t)); + + error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0); + if (unlikely(error)) + return error; + + *prssi = dtoh32(scb_val.val); + return error; +} + +int wldev_get_ssid( + struct net_device *dev, wlc_ssid_t *pssid) +{ + int error; + + if (!pssid) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0); + if (unlikely(error)) + return error; + pssid->SSID_len = dtoh32(pssid->SSID_len); + return error; +} + +int wldev_get_band( + struct net_device *dev, uint *pband) +{ + int error; + + error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0); + return error; +} + +int wldev_set_band( + struct net_device *dev, uint band) +{ + int error = -1; + + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) { + error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1); + if (!error) + dhd_bus_band_set(dev, band); + } + return error; +} + +int wldev_set_country( + struct net_device *dev, char *country_code) +{ + int error = -1; + wl_country_t cspec = {{0}, 0, {0}}; + scb_val_t scbval; + char smbuf[WLC_IOCTL_SMLEN]; + + if (!country_code) { + WLDEV_ERROR(("%s: set country failed for %s\n", + __FUNCTION__, country_code)); + return error; + } + + error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec), + smbuf, sizeof(smbuf), NULL); + if (error < 0) + WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error)); + + if ((error < 0) || + (strncmp(country_code, smbuf, WLC_CNTRY_BUF_SZ) != 0)) { + bzero(&scbval, sizeof(scb_val_t)); + error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), 1); + if (error < 0) { + WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n", + __FUNCTION__, error)); + return error; + } + + cspec.rev = -1; + memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ); + get_customized_country_code((char *)&cspec.country_abbrev, &cspec); + error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec), + smbuf, sizeof(smbuf), NULL); + if (error < 0) { + WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + return error; + } + dhd_bus_country_set(dev, &cspec); + WLDEV_ERROR(("%s: set country for %s as %s rev %d\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + } + return 0; +} + +/* + * softap channel autoselect + */ +int wldev_get_auto_channel(struct net_device *dev, int *chan) +{ + int chosen = 0; + wl_uint32_list_t request; + int retry = 0; + int updown = 0; + int ret = 0; + wlc_ssid_t null_ssid; + + memset(&null_ssid, 0, sizeof(wlc_ssid_t)); + ret |= wldev_ioctl(dev, WLC_UP, &updown, sizeof(updown), true); + + ret |= wldev_ioctl(dev, WLC_SET_SSID, &null_ssid, sizeof(null_ssid), true); + + request.count = htod32(0); + ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, &request, sizeof(request), true); + if (ret < 0) { + WLDEV_ERROR(("can't start auto channel scan:%d\n", ret)); + goto fail; + } + + while (retry++ < 15) { + + bcm_mdelay(350); + + ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen), false); + + if ((ret == 0) && (dtoh32(chosen) != 0)) { + *chan = (uint16)chosen & 0x00FF; /* covert chanspec --> chan number */ + printf("%s: Got channel = %d, attempt:%d\n", + __FUNCTION__, *chan, retry); + break; + } + } + + if ((ret = wldev_ioctl(dev, WLC_DOWN, &updown, sizeof(updown), true)) < 0) { + WLDEV_ERROR(("%s fail to WLC_DOWN ioctl err =%d\n", __FUNCTION__, ret)); + goto fail; + } + +fail : + if (ret < 0) { + WLDEV_ERROR(("%s: return value %d\n", __FUNCTION__, ret)); + } + return ret; +} diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h new file mode 100644 index 0000000000000..dd3c899d12ca9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.h @@ -0,0 +1,113 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2011, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 $ + */ +#ifndef __WLDEV_COMMON_H__ +#define __WLDEV_COMMON_H__ + +#include + +/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or + * netdev_ops->ndo_do_ioctl in new kernels) + * @dev: the net_device handle + */ +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set); + +/** Retrieve named IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +/** Set named IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val); + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval); + +/** The following function can be implemented if there is a need for bsscfg + * indexed IOVARs + */ + +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx); + +/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx); + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx); + +extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec); +extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec); +extern void dhd_bus_band_set(struct net_device *dev, uint band); +extern int wldev_set_country(struct net_device *dev, char *country_code); +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val, int force); +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, + int max, int *bytes_left); + +/* Get the link speed from dongle, speed is in kpbs */ +int wldev_get_link_speed(struct net_device *dev, int *plink_speed); + +int wldev_get_rssi(struct net_device *dev, int *prssi); + +int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid); + +int wldev_get_band(struct net_device *dev, uint *pband); + +int wldev_set_band(struct net_device *dev, uint band); + +int wldev_get_auto_channel(struct net_device *dev, int *chan); + +#endif /* __WLDEV_COMMON_H__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h index 65b5834da28c7..c2dd4cdeb0d6f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -74,8 +74,6 @@ /* RSSI to dBm */ #define IWL39_RSSI_OFFSET 95 -#define IWL_DEFAULT_TX_POWER 0x0F - /* * EEPROM related constants, enums, and structures. */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 39b6f16c87fae..4e7b58bb1a1b5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -1823,7 +1823,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ - rc = priv->cfg->ops->lib->send_tx_power(priv); + rc = iwl_set_tx_power(priv, priv->tx_power_next, true); if (rc) { IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); return rc; diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 91a9f52534699..992caa0231d06 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -1571,7 +1571,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ - ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + ret = iwl_set_tx_power(priv, priv->tx_power_next, true); if (ret) { IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 3dee87e8f55db..d9c87b3227eb3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -604,6 +604,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, struct iwl_mod_params iwlagn_mod_params = { .amsdu_size_8K = 1, .restart_fw = 1, + .plcp_check = true, /* the rest are 0 by default */ }; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 6d140bd532918..ee802fe0e4c45 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -288,10 +288,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames. * - * FIXME: which RXON requires a tune? Can we optimise this out in - * some cases? + * It's expected we set power here if channel is changing. */ - ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + ret = iwl_set_tx_power(priv, priv->tx_power_next, true); if (ret) { IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 24a11b8f73bc1..c13542b495fed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -561,12 +561,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); - /* Find index into station table for destination station */ - sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); - if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", - hdr->addr1); - goto drop_unlock; + /* For management frames use broadcast id to do not break aggregation */ + if (!ieee80211_is_data(fc)) + sta_id = ctx->bcast_sta_id; + else { + /* Find index into station table for destination station */ + sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; + } } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); @@ -1207,12 +1212,16 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - iwlagn_tx_status(priv, tx_info, - txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); + + if (WARN_ON_ONCE(tx_info->skb == NULL)) + continue; hdr = (struct ieee80211_hdr *)tx_info->skb->data; - if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + if (ieee80211_is_data_qos(hdr->frame_control)) nfreed++; + + iwlagn_tx_status(priv, tx_info, + txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); tx_info->skb = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c1cfd9952e520..be076439cc91e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3841,12 +3841,6 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; } - /* Set the tx_power_user_lmt to the lowest power level - * this value will get overwritten by channel max power avg - * from eeprom */ - priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN; - priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN; - ret = iwl_init_channel_map(priv); if (ret) { IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); @@ -4592,3 +4586,9 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO); MODULE_PARM_DESC(bt_ch_inhibition, "Disable BT channel inhibition (default: enable)"); + +module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO); +MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); + +module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); +MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index efbde1f1a8bfc..c4c8417153dc0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -168,6 +168,7 @@ int iwlcore_init_geos(struct iwl_priv *priv) struct ieee80211_channel *geo_ch; struct ieee80211_rate *rates; int i = 0; + s8 max_tx_power = 0; if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { @@ -244,8 +245,8 @@ int iwlcore_init_geos(struct iwl_priv *priv) geo_ch->flags |= ch->ht40_extension_channel; - if (ch->max_power_avg > priv->tx_power_device_lmt) - priv->tx_power_device_lmt = ch->max_power_avg; + if (ch->max_power_avg > max_tx_power) + max_tx_power = ch->max_power_avg; } else { geo_ch->flags |= IEEE80211_CHAN_DISABLED; } @@ -258,6 +259,10 @@ int iwlcore_init_geos(struct iwl_priv *priv) geo_ch->flags); } + priv->tx_power_device_lmt = max_tx_power; + priv->tx_power_user_lmt = max_tx_power; + priv->tx_power_next = max_tx_power; + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && priv->cfg->sku & IWL_SKU_A) { IWL_INFO(priv, "Incorrectly detected BG card as ABG. " @@ -1161,6 +1166,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) { int ret; s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; lockdep_assert_held(&priv->mutex); @@ -1188,10 +1195,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) if (!iwl_is_ready_rf(priv)) return -EIO; - /* scan complete use tx_power_next, need to be updated */ + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ priv->tx_power_next = tx_power; - if (test_bit(STATUS_SCANNING, &priv->status) && !force) { - IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n"); + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); return 0; } @@ -1867,6 +1879,15 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_lock(&priv->mutex); + if (!ctx->vif || !iwl_is_ready_rf(priv)) { + /* + * Huh? But wait ... this can maybe happen when + * we're in the middle of a firmware restart! + */ + err = -EBUSY; + goto out; + } + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; if (!(interface_modes & BIT(newtype))) { @@ -1894,6 +1915,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* success */ iwl_teardown_interface(priv, vif, true); vif->type = newtype; + vif->p2p = newp2p; err = iwl_setup_interface(priv, ctx); WARN_ON(err); /* diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index a3474376fdbc7..5c0d5f72cbac5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -263,6 +263,8 @@ struct iwl_mod_params { int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ int antenna; /* def: 0 = both antennas (use diversity) */ int restart_fw; /* def: 1 = restart firmware */ + bool plcp_check; /* def: true = enable plcp health check */ + bool ack_check; /* def: false = disable ack health check */ }; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 8dda67850af45..e4872b13f93f0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -1604,21 +1604,24 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ if (priv->valid_contexts & BIT(ctx->ctxid)) -static inline int iwl_is_associated(struct iwl_priv *priv, - enum iwl_rxon_context_id ctxid) +static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) { - return (priv->contexts[ctxid].active.filter_flags & - RXON_FILTER_ASSOC_MSK) ? 1 : 0; + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; } -static inline int iwl_is_any_associated(struct iwl_priv *priv) +static inline int iwl_is_associated(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) { - return iwl_is_associated(priv, IWL_RXON_CTX_BSS); + return iwl_is_associated_ctx(&priv->contexts[ctxid]); } -static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) +static inline int iwl_is_any_associated(struct iwl_priv *priv) { - return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; + struct iwl_rxon_context *ctx; + for_each_context(priv, ctx) + if (iwl_is_associated_ctx(ctx)) + return true; + return false; } static inline int is_channel_valid(const struct iwl_channel_info *ch_info) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 358cfd7e5af19..8b3c12753f9e8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -724,13 +724,6 @@ int iwl_init_channel_map(struct iwl_priv *priv) flags & EEPROM_CHANNEL_RADAR)) ? "" : "not "); - /* Set the tx_power_user_lmt to the highest power - * supported by any channel */ - if (eeprom_ch_info[ch].max_power_avg > - priv->tx_power_user_lmt) - priv->tx_power_user_lmt = - eeprom_ch_info[ch].max_power_avg; - ch_info++; } } diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 9e6f31355eee8..c0cd307dc2e82 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr { /* 6x00 Specific */ #define EEPROM_6000_TX_POWER_VERSION (4) -#define EEPROM_6000_EEPROM_VERSION (0x434) +#define EEPROM_6000_EEPROM_VERSION (0x423) /* 6x50 Specific */ #define EEPROM_6050_TX_POWER_VERSION (4) diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c index bb1a742a98a0e..7f4905bc16316 100644 --- a/drivers/net/wireless/iwlwifi/iwl-legacy.c +++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c @@ -123,6 +123,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) goto set_ch_out; } + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !is_channel_ibss(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + spin_lock_irqsave(&priv->lock, flags); for_each_context(priv, ctx) { diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 87a6fd84d4d25..b7076173436ac 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -234,10 +234,13 @@ EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt) { + const struct iwl_mod_params *mod_params = priv->cfg->mod_params; + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; if (iwl_is_any_associated(priv)) { - if (priv->cfg->ops->lib->check_ack_health) { + if (mod_params->ack_check && + priv->cfg->ops->lib->check_ack_health) { if (!priv->cfg->ops->lib->check_ack_health( priv, pkt)) { /* @@ -250,7 +253,8 @@ void iwl_recover_from_statistics(struct iwl_priv *priv, return; } } - if (priv->cfg->ops->lib->check_plcp_health) { + if (mod_params->plcp_check && + priv->cfg->ops->lib->check_plcp_health) { if (!priv->cfg->ops->lib->check_plcp_health( priv, pkt)) { /* diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 371abbf60eac4..64917edc9f5f3 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -94,6 +94,7 @@ MODULE_LICENSE("GPL"); struct iwl_mod_params iwl3945_mod_params = { .sw_crypto = 1, .restart_fw = 1, + .disable_hw_scan = 1, /* the rest are 0 by default */ }; @@ -3858,10 +3859,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) priv->force_reset[IWL_FW_RESET].reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; - - priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; - priv->tx_power_next = IWL_DEFAULT_TX_POWER; - if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", eeprom->version); @@ -3995,8 +3992,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e * "the hard way", rather than using device's scan. */ if (iwl3945_mod_params.disable_hw_scan) { - dev_printk(KERN_DEBUG, &(pdev->dev), - "sw scan support is deprecated\n"); + IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); iwl3945_hw_ops.hw_scan = NULL; } @@ -4318,8 +4314,7 @@ MODULE_PARM_DESC(debug, "debug output mask"); #endif module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, S_IRUGO); -MODULE_PARM_DESC(disable_hw_scan, - "disable hardware scanning (default 0) (deprecated)"); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)"); module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 78c4da150a745..b9b0a0cec7966 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1335,8 +1335,8 @@ int lbs_execute_next_command(struct lbs_private *priv) cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1348,8 +1348,8 @@ int lbs_execute_next_command(struct lbs_private *priv) (priv->psstate == PS_STATE_PRE_SLEEP)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); priv->needtowakeup = 1; @@ -1362,7 +1362,9 @@ int lbs_execute_next_command(struct lbs_private *priv) "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } + spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); + spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmd->command)); lbs_submit_command(priv, cmdnode); diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 9b344a921e742..a8f3bc740dfaf 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ + {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ @@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ + {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ @@ -80,6 +82,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index f618b9623e5a6..2cfdd3890bd17 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -705,7 +705,7 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) struct p54_tx_info *p54info; struct p54_hdr *hdr; struct p54_tx_data *txhdr; - unsigned int padding, len, extra_len; + unsigned int padding, len, extra_len = 0; int i, j, ridx; u16 hdr_flags = 0, aid = 0; u8 rate, queue = 0, crypt_offset = 0; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 54917a2813981..e2a528da36417 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -2810,10 +2810,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); /* Wait for DMA, ignore error */ @@ -2823,9 +2820,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0); rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); - - rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); - rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); } EXPORT_SYMBOL_GPL(rt2800_disable_radio); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 3b3f1e45ab3e5..37a38b5a2a8f7 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -475,39 +475,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) { - u32 reg; - - rt2800_disable_radio(rt2x00dev); - - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); - - rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); - rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); - - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + if (rt2x00_is_soc(rt2x00dev)) { + rt2800_disable_radio(rt2x00dev); + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); + } } static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - /* - * Always put the device to sleep (even when we intend to wakeup!) - * if the device is booting and wasn't asleep it will return - * failure when attempting to wakeup. - */ - rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); - if (state == STATE_AWAKE) { - rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); + rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); + } else if (state == STATE_SLEEP) { + rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); + rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); } return 0; diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 9597a03242cce..2b77a291aa970 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1031,8 +1031,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) * Stop all work. */ cancel_work_sync(&rt2x00dev->intf_work); - cancel_work_sync(&rt2x00dev->rxdone_work); - cancel_work_sync(&rt2x00dev->txdone_work); + if (rt2x00_is_usb(rt2x00dev)) { + cancel_work_sync(&rt2x00dev->rxdone_work); + cancel_work_sync(&rt2x00dev->txdone_work); + } /* * Free the tx status fifo. diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 6b82cac37ee33..2bb5297655cee 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -871,23 +871,35 @@ static void rtl8187_work(struct work_struct *work) /* The RTL8187 returns the retry count through register 0xFFFA. In * addition, it appears to be a cumulative retry count, not the * value for the current TX packet. When multiple TX entries are - * queued, the retry count will be valid for the last one in the queue. - * The "error" should not matter for purposes of rate setting. */ + * waiting in the queue, the retry count will be the total for all. + * The "error" may matter for purposes of rate setting, but there is + * no other choice with this hardware. + */ struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, work.work); struct ieee80211_tx_info *info; struct ieee80211_hw *dev = priv->dev; static u16 retry; u16 tmp; + u16 avg_retry; + int length; mutex_lock(&priv->conf_mutex); tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); + length = skb_queue_len(&priv->b_tx_status.queue); + if (unlikely(!length)) + length = 1; + if (unlikely(tmp < retry)) + tmp = retry; + avg_retry = (tmp - retry) / length; while (skb_queue_len(&priv->b_tx_status.queue) > 0) { struct sk_buff *old_skb; old_skb = skb_dequeue(&priv->b_tx_status.queue); info = IEEE80211_SKB_CB(old_skb); - info->status.rates[0].count = tmp - retry + 1; + info->status.rates[0].count = avg_retry + 1; + if (info->status.rates[0].count > RETRY_COUNT) + info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(dev, old_skb); } retry = tmp; @@ -933,8 +945,8 @@ static int rtl8187_start(struct ieee80211_hw *dev) rtl818x_iowrite32(priv, &priv->map->TX_CONF, RTL818X_TX_CONF_HW_SEQNUM | RTL818X_TX_CONF_DISREQQSIZE | - (7 << 8 /* short retry limit */) | - (7 << 0 /* long retry limit */) | + (RETRY_COUNT << 8 /* short retry limit */) | + (RETRY_COUNT << 0 /* long retry limit */) | (7 << 21 /* MAX TX DMA */)); rtl8187_init_urbs(dev); rtl8187b_init_status_urb(dev); @@ -1378,6 +1390,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_RX_INCLUDES_FCS; + /* Initialize rate-control variables */ + dev->max_rates = 1; + dev->max_rate_tries = RETRY_COUNT; eeprom.data = dev; eeprom.register_read = rtl8187_eeprom_register_read; diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h index 0d7b1423f77b4..f1cc90751dbf5 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h @@ -35,6 +35,8 @@ #define RFKILL_MASK_8187_89_97 0x2 #define RFKILL_MASK_8198 0x4 +#define RETRY_COUNT 7 + struct rtl8187_rx_info { struct urb *urb; struct ieee80211_hw *dev; diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index e64403b6896d7..6ec06a4a4c6df 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c @@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) kfree(wl->nvs); - wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); + if (len != sizeof(struct wl1271_nvs_file)) + return -EINVAL; + + wl->nvs = kzalloc(len, GFP_KERNEL); if (!wl->nvs) { wl1271_error("could not allocate memory for the nvs file"); ret = -ENOMEM; diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index b78a38d9172a5..8c7c522a056ac 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, board = z->resource.start; ioaddr = board+cards[i].offset; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { @@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, static const struct net_device_ops zorro8390_netdev_ops = { .ndo_open = zorro8390_open, .ndo_stop = zorro8390_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cb23aa2ebf96b..e610cfe4f07bc 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (pdev) { + pdev->current_state = PCI_D0; slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); pci_dev_put(pdev); } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 4789f8e8bf7ad..5dc5d3e3508e8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1835,7 +1835,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ret = iommu_attach_domain(domain, iommu); if (ret) { - domain_exit(domain); + free_domain_mem(domain); goto error; } @@ -3260,9 +3260,15 @@ static int device_notifier(struct notifier_block *nb, if (!domain) return 0; - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { domain_remove_one_dev_info(domain, pdev); + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) + domain_exit(domain); + } + return 0; } @@ -3411,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, domain->iommu_count--; domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -3627,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { - free_pgtable_page(dmar_domain->pgd); dmar_domain->pgd = (struct dma_pte *) phys_to_virt(dma_pte_addr(pte)); + free_pgtable_page(pte); } dmar_domain->agaw--; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index ea25e5bfcf238..c85438a367d53 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1088,7 +1088,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) attr->write = write_vpd_attr; retval = sysfs_create_bin_file(&dev->dev.kobj, attr); if (retval) { - kfree(dev->vpd->attr); + kfree(attr); return retval; } dev->vpd->attr = attr; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 3188cd96b3386..bbdb4fd85b9cf 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -69,6 +69,7 @@ struct pcie_link_state { }; static int aspm_disabled, aspm_force, aspm_clear_state; +static bool aspm_support_enabled = true; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -896,6 +897,7 @@ static int __init pcie_aspm_disable(char *str) { if (!strcmp(str, "off")) { aspm_disabled = 1; + aspm_support_enabled = false; printk(KERN_INFO "PCIe ASPM is disabled\n"); } else if (!strcmp(str, "force")) { aspm_force = 1; @@ -930,3 +932,8 @@ int pcie_aspm_enabled(void) } EXPORT_SYMBOL(pcie_aspm_enabled); +bool pcie_aspm_support_enabled(void) +{ + return aspm_support_enabled; +} +EXPORT_SYMBOL(pcie_aspm_support_enabled); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 53a786fd0d40c..a1e4f6156913f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -533,6 +533,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); +#define ICH_PMBASE 0x40 +#define ICH_ACPI_CNTL 0x44 +#define ICH4_ACPI_EN 0x10 +#define ICH6_ACPI_EN 0x80 +#define ICH4_GPIOBASE 0x58 +#define ICH4_GPIO_CNTL 0x5c +#define ICH4_GPIO_EN 0x10 +#define ICH6_GPIOBASE 0x48 +#define ICH6_GPIO_CNTL 0x4c +#define ICH6_GPIO_EN 0x10 + /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) @@ -541,12 +552,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); + /* + * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict + * with low legacy (and fixed) ports. We don't know the decoding + * priority and can't tell whether the legacy device or the one created + * here is really at that address. This happens on boards with broken + * BIOSes. + */ + + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH4_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH4 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x58, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); + pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); @@ -562,12 +594,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH6_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH6 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x48, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); + pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); + } } static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) @@ -2618,58 +2663,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, #endif /* CONFIG_PCI_MSI */ -#ifdef CONFIG_PCI_IOV - -/* - * For Intel 82576 SR-IOV NIC, if BIOS doesn't allocate resources for the - * SR-IOV BARs, zero the Flash BAR and program the SR-IOV BARs to use the - * old Flash Memory Space. - */ -static void __devinit quirk_i82576_sriov(struct pci_dev *dev) -{ - int pos, flags; - u32 bar, start, size; - - if (PAGE_SIZE > 0x10000) - return; - - flags = pci_resource_flags(dev, 0); - if ((flags & PCI_BASE_ADDRESS_SPACE) != - PCI_BASE_ADDRESS_SPACE_MEMORY || - (flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != - PCI_BASE_ADDRESS_MEM_TYPE_32) - return; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return; - - pci_read_config_dword(dev, pos + PCI_SRIOV_BAR, &bar); - if (bar & PCI_BASE_ADDRESS_MEM_MASK) - return; - - start = pci_resource_start(dev, 1); - size = pci_resource_len(dev, 1); - if (!start || size != 0x400000 || start & (size - 1)) - return; - - pci_resource_flags(dev, 1) = 0; - pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); - pci_write_config_dword(dev, pos + PCI_SRIOV_BAR, start); - pci_write_config_dword(dev, pos + PCI_SRIOV_BAR + 12, start + size / 2); - - dev_info(&dev->dev, "use Flash Memory Space for SR-IOV BARs\n"); -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov); - -#endif /* CONFIG_PCI_IOV */ - /* Allow manual resource allocation for PCI hotplug bridges * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), @@ -2791,6 +2784,16 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); #endif +static void __devinit fixup_ti816x_class(struct pci_dev* dev) +{ + /* TI 816x devices do not have class code set when in PCIe boot mode */ + if (dev->class == PCI_CLASS_NOT_DEFINED) { + dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n"); + dev->class = PCI_CLASS_MULTIMEDIA_VIDEO; + } +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) { diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 38b34a73866a8..fa54ba70657ae 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -222,6 +222,7 @@ struct acer_debug { static struct rfkill *wireless_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *threeg_rfkill; +static bool rfkill_inited; /* Each low-level interface must define at least some of the following */ struct wmi_interface { @@ -1161,9 +1162,13 @@ static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; - status = set_u32(!blocked, cap); - if (ACPI_FAILURE(status)) - return -ENODEV; + + if (rfkill_inited) { + status = set_u32(!blocked, cap); + if (ACPI_FAILURE(status)) + return -ENODEV; + } + return 0; } @@ -1187,14 +1192,16 @@ static struct rfkill *acer_rfkill_register(struct device *dev, return ERR_PTR(-ENOMEM); status = get_device_status(&state, cap); - if (ACPI_SUCCESS(status)) - rfkill_init_sw_state(rfkill_dev, !state); err = rfkill_register(rfkill_dev); if (err) { rfkill_destroy(rfkill_dev); return ERR_PTR(err); } + + if (ACPI_SUCCESS(status)) + rfkill_set_sw_state(rfkill_dev, !state); + return rfkill_dev; } @@ -1229,6 +1236,8 @@ static int acer_rfkill_init(struct device *dev) } } + rfkill_inited = true; + schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); return 0; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 114d95247cdf8..21b101899baee 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -459,6 +459,8 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) if (test_bit(vpc_bit, &vpc1)) { if (vpc_bit == 9) ideapad_sync_rfk_state(adevice); + else if (vpc_bit == 4) + read_ec_data(handle, 0x12, &vpc2); else ideapad_input_report(priv, vpc_bit); } diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 1294a39373bab..85c8ad43c0c58 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -1111,7 +1111,7 @@ static int ips_monitor(void *data) last_msecs = jiffies_to_msecs(jiffies); expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); - __set_current_state(TASK_UNINTERRUPTIBLE); + __set_current_state(TASK_INTERRUPTIBLE); mod_timer(&timer, expire); schedule(); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index eb9922385ef8f..125d8912ed8a0 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -128,7 +128,8 @@ enum { }; /* ACPI HIDs */ -#define TPACPI_ACPI_HKEY_HID "IBM0068" +#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068" +#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068" #define TPACPI_ACPI_EC_HID "PNP0C09" /* Input IDs */ @@ -3879,7 +3880,8 @@ static int hotkey_write(char *buf) } static const struct acpi_device_id ibm_htk_device_ids[] = { - {TPACPI_ACPI_HKEY_HID, 0}, + {TPACPI_ACPI_IBM_HKEY_HID, 0}, + {TPACPI_ACPI_LENOVO_HKEY_HID, 0}, {"", 0}, }; diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 61bf5d724139c..ae211c3ce9d09 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -75,6 +75,12 @@ config BATTERY_DS2782 Say Y here to enable support for the DS2782/DS2786 standalone battery gas-gauge. +config BATTERY_DS2784 + tristate "DS2784 battery driver " + select W1 + help + Say Y here to enable support for batteries with ds2784 chip. + config BATTERY_PMU tristate "Apple PMU battery" depends on PPC32 && ADB_PMU @@ -205,4 +211,10 @@ config CHARGER_GPIO This driver can be build as a module. If so, the module will be called gpio-charger. +config CHARGER_PM8058 + bool "Qualcomm PM8058 pmic charger driver" + depends on PM8058 + help + Say Y to include support for the pm8058 charge controller + endif # POWER_SUPPLY diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 8385bfae87283..b0d77944cd230 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_TEST_POWER) += test_power.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o +obj-$(CONFIG_BATTERY_DS2784) += ds2784_battery.o obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o @@ -34,3 +35,4 @@ obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o +obj-$(CONFIG_CHARGER_PM8058) += pm8058-charger.o diff --git a/drivers/power/ds2784_battery.c b/drivers/power/ds2784_battery.c new file mode 100644 index 0000000000000..533552cc4aa52 --- /dev/null +++ b/drivers/power/ds2784_battery.c @@ -0,0 +1,1163 @@ +/* drivers/power/ds2784_battery.c + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * additional code by XDA members RogerPodacter and theloginwithnoname, 2010 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "../w1/w1.h" +#include "w1_ds2784.h" + +extern int is_ac_power_supplied(void); + +struct battery_status { + int timestamp; + + int voltage_uV; /* units of uV */ + int current_uA; /* units of uA */ + int current_avg_uA; + int charge_uAh; + + u16 temp_C; /* units of 0.1 C */ + + u8 percentage; /* battery percentage */ + u8 charge_source; + u8 status_reg; + u8 battery_full; /* battery full (don't charge) */ + + u8 cooldown; /* was overtemp */ + u8 charge_mode; +} __attribute__((packed)); + + +#define SOURCE_NONE 0 +#define SOURCE_USB 1 +#define SOURCE_AC 2 + +#define CHARGE_OFF 0 +#define CHARGE_SLOW 1 +#define CHARGE_FAST 2 +#define CHARGE_BATT_DISABLE 3 /* disable charging at battery */ + +#define TEMP_CRITICAL 600 /* no charging at all */ +#define TEMP_HOT 500 /* no fast charge, no charge > 4.1v */ +#define TEMP_WARM 450 /* no fast charge above this */ + +#define TEMP_HOT_MAX_MV 4100 /* stop charging here when hot */ +#define TEMP_HOT_MIN_MV 3800 /* resume charging here when hot */ +#define CE_DISABLE_MIN_MV 4100 + +#define BATTERY_LOG_MAX 1024 +#define BATTERY_LOG_MASK (BATTERY_LOG_MAX - 1) + +/* When we're awake or running on wall power, sample the battery + * gauge every FAST_POLL seconds. If we're asleep and on battery + * power, sample every SLOW_POLL seconds + */ +#define FAST_POLL (1 * 60) +#define SLOW_POLL (10 * 60) + +static DEFINE_MUTEX(battery_log_lock); +static struct battery_status battery_log[BATTERY_LOG_MAX]; +static unsigned battery_log_head; +static unsigned battery_log_tail; + +void battery_log_status(struct battery_status *s) +{ + unsigned n; + mutex_lock(&battery_log_lock); + n = battery_log_head; + memcpy(battery_log + n, s, sizeof(struct battery_status)); + n = (n + 1) & BATTERY_LOG_MASK; + if (n == battery_log_tail) + battery_log_tail = (battery_log_tail + 1) & BATTERY_LOG_MASK; + battery_log_head = n; + mutex_unlock(&battery_log_lock); +} + +static const char *battery_source[3] = { "none", " usb", " ac" }; +static const char *battery_mode[4] = { " off", "slow", "fast", "full" }; + +static int battery_log_print(struct seq_file *sf, void *private) +{ + unsigned n; + mutex_lock(&battery_log_lock); + seq_printf(sf, "timestamp mV mA avg mA uAh dC %% src mode reg full\n"); + for (n = battery_log_tail; n != battery_log_head; n = (n + 1) & BATTERY_LOG_MASK) { + struct battery_status *s = battery_log + n; + seq_printf(sf, "%9d %5d %6d %6d %8d %4d %3d %s %s 0x%02x %d\n", + s->timestamp, s->voltage_uV / 1000, + s->current_uA / 1000, s->current_avg_uA / 1000, + s->charge_uAh, s->temp_C, + s->percentage, + battery_source[s->charge_source], + battery_mode[s->charge_mode], + s->status_reg, s->battery_full); + } + mutex_unlock(&battery_log_lock); + return 0; +} + + +struct ds2784_device_info { + struct device *dev; + + /* DS2784 data, valid after calling ds2784_battery_read_status() */ + char raw[DS2784_DATA_SIZE]; /* raw DS2784 data */ + + struct battery_status status; + + struct power_supply bat; + struct workqueue_struct *monitor_wqueue; + struct work_struct monitor_work; + struct alarm alarm; + struct wake_lock work_wake_lock; + + int (*charge)(int on, int fast); + struct w1_slave *w1_slave; + + u8 dummy; /* dummy battery flag */ + u8 last_charge_mode; /* previous charger state */ + u8 slow_poll; + + ktime_t last_poll; + ktime_t last_charge_seen; +}; + +#define psy_to_dev_info(x) container_of((x), struct ds2784_device_info, bat) + +static struct wake_lock vbus_wake_lock; + +#define BATT_RSNSP (67) /*Passion battery source 1*/ + +static enum power_supply_property battery_properties[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, +}; + +static int battery_initial; + +static int battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); + +static void battery_ext_power_changed(struct power_supply *psy); + +#define to_ds2784_device_info(x) container_of((x), struct ds2784_device_info, \ + bat); + +static void ds2784_parse_data(u8 *raw, struct battery_status *s) +{ + short n; + + /* Get status reg */ + s->status_reg = raw[DS2784_REG_STS]; + + /* Get Level */ + s->percentage = raw[DS2784_REG_RARC]; + + /* Get Voltage: Unit=4.886mV, range is 0V to 4.99V */ + n = (((raw[DS2784_REG_VOLT_MSB] << 8) | + (raw[DS2784_REG_VOLT_LSB])) >> 5); + + s->voltage_uV = n * 4886; + + /* Get Current: Unit= 1.5625uV x Rsnsp(67)=104.68 */ + n = ((raw[DS2784_REG_CURR_MSB]) << 8) | + raw[DS2784_REG_CURR_LSB]; + s->current_uA = ((n * 15625) / 10000) * 67; + + n = ((raw[DS2784_REG_AVG_CURR_MSB]) << 8) | + raw[DS2784_REG_AVG_CURR_LSB]; + s->current_avg_uA = ((n * 15625) / 10000) * 67; + + /* Get Temperature: + * 11 bit signed result in Unit=0.125 degree C. + * Convert to integer tenths of degree C. + */ + n = ((raw[DS2784_REG_TEMP_MSB] << 8) | + (raw[DS2784_REG_TEMP_LSB])) >> 5; + + s->temp_C = (n * 10) / 8; + + /* RAAC is in units of 1.6mAh */ + s->charge_uAh = ((raw[DS2784_REG_RAAC_MSB] << 8) | + raw[DS2784_REG_RAAC_LSB]) * 1600; +} + +static int w1_ds2784_io(struct w1_slave *sl, char *buf, int addr, size_t count, int io) +{ + if (!sl) + return 0; + + mutex_lock(&sl->master->mutex); + + if (addr > DS2784_DATA_SIZE || addr < 0) { + count = 0; + goto out; + } + if (addr + count > DS2784_DATA_SIZE) + count = DS2784_DATA_SIZE - addr; + + if (!w1_reset_select_slave(sl)) { + if (!io) { + w1_write_8(sl->master, W1_DS2784_READ_DATA); + w1_write_8(sl->master, addr); + count = w1_read_block(sl->master, buf, count); + } else { + w1_write_8(sl->master, W1_DS2784_WRITE_DATA); + w1_write_8(sl->master, addr); + w1_write_block(sl->master, buf, count); + /* XXX w1_write_block returns void, not n_written */ + } + } + +out: + mutex_unlock(&sl->master->mutex); + + return count; +} + +static int w1_ds2784_read(struct w1_slave *sl, char *buf, int addr, size_t count) +{ + return w1_ds2784_io(sl, buf, addr, count, 0); +} + +static int w1_ds2784_write(struct w1_slave *sl, char *buf, int addr, size_t count) +{ + return w1_ds2784_io(sl, buf, addr, count, 1); +} + +static int ds2784_set_cc(struct ds2784_device_info *di, bool enable) +{ + int ret; + + if (enable) + di->raw[DS2784_REG_PORT] |= 0x02; + else + di->raw[DS2784_REG_PORT] &= ~0x02; + ret = w1_ds2784_write(di->w1_slave, di->raw + DS2784_REG_PORT, + DS2784_REG_PORT, 1); + if (ret != 1) { + dev_warn(di->dev, "call to w1_ds2784_write failed (0x%p)\n", + di->w1_slave); + return 1; + } + return 0; +} + +static int ds2784_battery_read_status(struct ds2784_device_info *di) +{ + int ret, start, count; + + /* The first time we read the entire contents of SRAM/EEPROM, + * but after that we just read the interesting bits that change. */ + if (di->raw[DS2784_REG_RSNSP] == 0x00) { + start = 0; + count = DS2784_DATA_SIZE; + } else { + start = DS2784_REG_PORT; + count = DS2784_REG_CURR_LSB - start + 1; + } + + ret = w1_ds2784_read(di->w1_slave, di->raw + start, start, count); + if (ret != count) { + dev_warn(di->dev, "call to w1_ds2784_read failed (0x%p)\n", + di->w1_slave); + return 1; + } + + if (battery_initial == 0) { + if (!memcmp(di->raw + 0x20, "DUMMY!", 6)) { + unsigned char acr[2]; + + di->dummy = 1; + pr_info("batt: dummy battery detected\n"); + + /* reset ACC register to ~500mAh, since it may have zeroed out */ + acr[0] = 0x05; + acr[1] = 0x06; + w1_ds2784_write(di->w1_slave, acr, DS2784_REG_ACCUMULATE_CURR_MSB, 2); + } + battery_initial = 1; + } + + ds2784_parse_data(di->raw, &di->status); + + pr_info("batt: %3d%%, %d mV, %d mA (%d avg), %d.%d C, %d mAh\n", + di->status.percentage, + di->status.voltage_uV / 1000, di->status.current_uA / 1000, + di->status.current_avg_uA / 1000, + di->status.temp_C / 10, di->status.temp_C % 10, + di->status.charge_uAh / 1000); + + return 0; +} + +static int battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ds2784_device_info *di = psy_to_dev_info(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + switch (di->status.charge_source) { + case CHARGE_OFF: + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case CHARGE_FAST: + case CHARGE_SLOW: + if (di->status.battery_full) + val->intval = POWER_SUPPLY_STATUS_FULL; + else if (di->status.charge_mode == CHARGE_OFF || + di->status.charge_mode == CHARGE_BATT_DISABLE) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_CHARGING; + break; + default: + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } + break; + case POWER_SUPPLY_PROP_HEALTH: + if (di->status.temp_C >= TEMP_HOT) + val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; + else + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_PRESENT: + /* XXX todo */ + val->intval = 1; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + if (di->dummy) + val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN; + else + val->intval = POWER_SUPPLY_TECHNOLOGY_LION; + break; + case POWER_SUPPLY_PROP_CAPACITY: + if (di->dummy) + val->intval = 75; + else + val->intval = di->status.percentage; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = di->status.voltage_uV; + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = di->status.temp_C; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = di->status.current_uA; + break; + case POWER_SUPPLY_PROP_CURRENT_AVG: + val->intval = di->status.current_avg_uA; + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = di->status.charge_uAh; + break; + default: + return -EINVAL; + } + + return 0; +} + +static ssize_t set_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int reg; + int val; + int check; + + sscanf(buf, "%x %x", ®, &val); + + if (reg < 0 || reg > 255) + return -EINVAL; + if (val < 0 || val > 255) + return -EINVAL; + + di->raw[reg] = val; + check = w1_ds2784_write(di->w1_slave, di->raw + reg, reg, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_write register failed (ox%p)\n", di->w1_slave); + } + + pr_info("batt: register 0x%02x changed to 0x%02x by user\n", reg, val); + + return count; +} + +static DEVICE_ATTR(setreg, 0666, NULL, set_reg); + +static ssize_t dump_regs(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + + int addr; + int ret = 0; + int val; + int printbytes = 0; + + for (addr = 0; addr <= 0xb1; addr++, printbytes++) { + if (addr == 0 || addr == 0x30 || addr == 0x80) { + if (addr == 0x30) { + addr = 0x60; + } + if (addr == 0x80) { + addr = 0xb0; + } + + if (PAGE_SIZE-ret > 2) { + ret+= snprintf(&buf[ret], PAGE_SIZE-ret, "\n%02x:", addr); + } + + w1_ds2784_read(di->w1_slave, di->raw + addr, addr, 1); + val = di->raw[addr]; + printbytes = 0; + } + + if (printbytes >= 16) { + if (PAGE_SIZE-ret > 2) { + ret+= snprintf(&buf[ret], PAGE_SIZE-ret, "\n%02x:", addr); + } + printbytes = 0; + } + + if (PAGE_SIZE-ret > 2) { + val = di->raw[addr]; + ret+= snprintf(&buf[ret], PAGE_SIZE-ret, " %02x", val); + } + else + { + break; + } + } + if (PAGE_SIZE-ret > 2) { + ret+= snprintf(&buf[ret], PAGE_SIZE-ret, "\n"); + } + + return ret; +} + +static DEVICE_ATTR(dumpreg, 0444, dump_regs, NULL); + +static ssize_t show_status_reg(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int statusreg; + int check; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_STS, DS2784_REG_STS, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_read Status Register failed (ox%p)\n", di->w1_slave); + } + + statusreg = di->raw[DS2784_REG_STS]; + + ret = sprintf(buf, "0x%02x\n", statusreg); + return ret; +} + +static ssize_t store_status_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int val; + int check; + + sscanf(buf, "%x", &val); + + if (val < 0 || val > 255) + return -EINVAL; + + di->raw[DS2784_REG_STS] = val; + check = w1_ds2784_write(di->w1_slave, di->raw + DS2784_REG_STS, DS2784_REG_STS, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_write Status Register failed (ox%p)\n", di->w1_slave); + } + + pr_info("batt: Status Register set to: 0x%02x \n", val); + + return count; +} + +static DEVICE_ATTR(statusreg, 0666, show_status_reg, store_status_reg); + +static ssize_t show_voltage(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int getvoltage; + int check; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_VOLT_MSB, DS2784_REG_VOLT_MSB, 2); + + getvoltage = (((di->raw[DS2784_REG_VOLT_MSB]<<8) | (di->raw[DS2784_REG_VOLT_LSB]))>>5)*4886; + + if (check != 2) { + dev_warn(di->dev, "w1_ds2784_read Voltage failed (ox%p)\n", di->w1_slave); + } + + ret = sprintf(buf, "%d\n", getvoltage); + return ret; +} + +static DEVICE_ATTR(getvoltage, 0644, show_voltage, NULL); // deprecated +static DEVICE_ATTR(voltageNow, 0444, show_voltage, NULL); + +static ssize_t show_current(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + short n; + int ret; + int getcurrent; + int check; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_CURR_MSB, DS2784_REG_CURR_MSB, 2); + + if (check != 2) { + dev_warn(di->dev, "w1_ds2784_read Current failed (ox%p)\n", di->w1_slave); + } + + n = ((di->raw[DS2784_REG_CURR_MSB]) << 8) | di->raw[DS2784_REG_CURR_LSB]; + getcurrent = ((n * 15625) / 10000) * 67; + + ret = sprintf(buf, "%d\n", getcurrent); + return ret; +} + +static DEVICE_ATTR(getcurrent, 0644, show_current, NULL); // deprecated +static DEVICE_ATTR(currentNow, 0444, show_current, NULL); + +static ssize_t show_avgcurrent(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + short n; + int ret; + int getavgcurrent; + int check; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_AVG_CURR_MSB, DS2784_REG_AVG_CURR_MSB, 2); + + if (check != 2) { + dev_warn(di->dev, "w1_ds2784_read Avg Current failed (ox%p)\n", di->w1_slave); + } + + n = ((di->raw[DS2784_REG_AVG_CURR_MSB]) << 8) | di->raw[DS2784_REG_AVG_CURR_LSB]; + getavgcurrent = ((n * 15625) / 10000) * 67; + + ret = sprintf(buf, "%d\n", getavgcurrent); + return ret; +} + +// compatibility til app update +static DEVICE_ATTR(getavgcurrent, 0644, show_avgcurrent, NULL); // deprecated +static DEVICE_ATTR(currentAverage, 0444, show_avgcurrent, NULL); + +static ssize_t show_age(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int check; + int age; + int ageraw; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_AGE_SCALAR, DS2784_REG_AGE_SCALAR, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_read age_scalar failed (ox%p)\n", di->w1_slave); + } + + ageraw = di->raw[DS2784_REG_AGE_SCALAR]; + age = (ageraw * 100) / 128; + + pr_info("%d\n", age); + + ret = sprintf(buf, "%d\n", age); + + return ret; +} + +static ssize_t set_age(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int age; + int check; + + sscanf(buf, "%d", &age); + + di->raw[DS2784_REG_AGE_SCALAR] = ((age * 128) / 100); + + check = w1_ds2784_write(di->w1_slave, di->raw + DS2784_REG_AGE_SCALAR, DS2784_REG_AGE_SCALAR, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_write age_scalar failed (ox%p)\n", di->w1_slave); + } + + pr_info("batt: age_scalar set to: %d percent\n", age); + + return count; +} + +static DEVICE_ATTR(age, 0666, show_age, set_age); + +static ssize_t show_AEvolt(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int check; + int rawaevolt; + int aevolt; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_ACTIVE_EMPTY_VOLT, DS2784_REG_ACTIVE_EMPTY_VOLT, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_read Active Empty Voltage failed (ox%p)\n", di->w1_slave); + } + rawaevolt = di->raw[DS2784_REG_ACTIVE_EMPTY_VOLT]; + aevolt = (rawaevolt * 1952) / 100; + + // pr_info("batt: Active Empty Voltage is: %d volts\n", aevolt); + + ret = sprintf(buf, "%d\n", aevolt); + + return ret; +} + +static ssize_t set_AEvolt (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int val; + int check; + int temp; + + sscanf(buf, "%d", &val); + + di->raw[DS2784_REG_ACTIVE_EMPTY_VOLT] = ( ( val * 100 ) / 1952 ) ; + + check = w1_ds2784_write(di->w1_slave, di->raw + DS2784_REG_ACTIVE_EMPTY_VOLT, DS2784_REG_ACTIVE_EMPTY_VOLT, 1); + + if (check != 1) { + dev_warn(di->dev, "w1_ds2784_write Active Empty Voltage failed (ox%p)\n", di->w1_slave); + } + + temp = ( ( val * 100 ) / 1952 ) ; + + pr_info("batt: Active Empty Voltage set to: %d volts\n", temp); + + return count; +} + +static DEVICE_ATTR(voltAE, 0644, show_AEvolt, set_AEvolt); // deprecated +static DEVICE_ATTR(voltageActiveEmpty, 0666, show_AEvolt, set_AEvolt); + +static ssize_t show_full40(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int check; + int full40mAh, full40raw; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_FULL_40_MSB, DS2784_REG_FULL_40_MSB, 2); + + if (check != 2) { + dev_warn(di->dev, "w1_ds2784_read Full40 mAh failed (ox%p)\n", di->w1_slave); + } + + full40raw = ((di->raw[DS2784_REG_FULL_40_MSB]) << 8) | di->raw[DS2784_REG_FULL_40_LSB]; + + full40mAh = ((full40raw * 625) / 100) / 15; + + // no need to put this in log, only when writing + // pr_info("batt: Full40 mAh capacity is: %d mAh\n", full40mAh); + + ret = sprintf(buf, "%dmAh\n", full40mAh); + + return ret; +} + +// backwards compatibility removed +// static DEVICE_ATTR(getFull40, 0644, show_full40, NULL); + +// again, backwards compatibility until app updated +static DEVICE_ATTR(getfull40, 0644, show_full40, NULL); // deprecated + +// use "proper" permissions, appropriate to read/write nature of file +// correct the filename - permissions indicate whether value changeable from now on +static DEVICE_ATTR(full40, 0444, show_full40, NULL); + +static ssize_t show_mAh(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + int ret; + int check; + int mAh; + + check = w1_ds2784_read(di->w1_slave, di->raw + DS2784_REG_RAAC_MSB, DS2784_REG_RAAC_MSB, 2); + + if (check != 2) { + dev_warn(di->dev, "w1_ds2784_read mAh failed (ox%p)\n", di->w1_slave); + } + + mAh = ((di->raw[DS2784_REG_RAAC_MSB] << 8) | di->raw[DS2784_REG_RAAC_LSB]) * 1600; + + ret = sprintf(buf, "%d\n", mAh); + + return ret; +} + +static DEVICE_ATTR(getmAh, 0644, show_mAh, NULL); // deprecated +static DEVICE_ATTR(mAh, 0444, show_mAh, NULL); + +static void ds2784_battery_update_status(struct ds2784_device_info *di) +{ + u8 last_level; + last_level = di->status.percentage; + + ds2784_battery_read_status(di); + + if ((last_level != di->status.percentage) || (di->status.temp_C > 450)) + power_supply_changed(&di->bat); +} + +static DEFINE_MUTEX(charge_state_lock); + +static bool check_timeout(ktime_t now, ktime_t last, int seconds) +{ + ktime_t timeout = ktime_add(last, ktime_set(seconds, 0)); + return ktime_sub(timeout, now).tv64 < 0; +} + +static int battery_adjust_charge_state(struct ds2784_device_info *di) +{ + unsigned source; + int rc = 0; + int temp, volt; + u8 charge_mode; + bool charge_timeout = false; + + mutex_lock(&charge_state_lock); + + temp = di->status.temp_C; + volt = di->status.voltage_uV / 1000; + + source = di->status.charge_source; + + /* initially our charge mode matches our source: + * NONE:OFF, USB:SLOW, AC:FAST + */ + charge_mode = source; + + /* shut off charger when full: + * - CHGTF flag is set + */ + + if (di->status.status_reg & 0x80) { + di->status.battery_full = 1; + charge_mode = CHARGE_BATT_DISABLE; + } + else + di->status.battery_full = 0; + + + if (temp >= TEMP_HOT) { + if (temp >= TEMP_CRITICAL) + charge_mode = CHARGE_BATT_DISABLE; + + /* once we charge to max voltage when hot, disable + * charging until the temp drops or the voltage drops + */ + if (volt >= TEMP_HOT_MAX_MV) + di->status.cooldown = 1; + } + + /* when the battery is warm, only charge in slow charge mode */ + if ((temp >= TEMP_WARM) && (charge_mode == CHARGE_FAST)) + charge_mode = CHARGE_SLOW; + + if (di->status.cooldown) { + if ((temp < TEMP_WARM) || (volt <= TEMP_HOT_MIN_MV)) + di->status.cooldown = 0; + else + charge_mode = CHARGE_BATT_DISABLE; + } + + if (di->status.current_uA > 1024) + di->last_charge_seen = di->last_poll; + else if (di->last_charge_mode != CHARGE_OFF && + check_timeout(di->last_poll, di->last_charge_seen, 60 * 60)) { + if (di->last_charge_mode == CHARGE_BATT_DISABLE) { + /* The charger is only powering the phone. Toggle the + * enable line periodically to prevent auto shutdown. + */ + di->last_charge_seen = di->last_poll; + pr_info("batt: charging POKE CHARGER\n"); + di->charge(0, 0); + udelay(10); + di->charge(1, source == CHARGE_FAST); + } else { + /* The charger has probably stopped charging. Turn it + * off until the next sample period. + */ + charge_timeout = true; + charge_mode = CHARGE_OFF; + } + } + + if (source == CHARGE_OFF) + charge_mode = CHARGE_OFF; + + /* Don't use CHARGE_BATT_DISABLE unless the voltage is high since the + * voltage drop over the discharge-path diode can cause a shutdown. + */ + if (charge_mode == CHARGE_BATT_DISABLE && volt < CE_DISABLE_MIN_MV) + charge_mode = CHARGE_OFF; + + if (di->last_charge_mode == charge_mode) + goto done; + + di->last_charge_mode = charge_mode; + di->status.charge_mode = charge_mode; + + switch (charge_mode) { + case CHARGE_OFF: + di->charge(0, 0); + ds2784_set_cc(di, true); + if (temp >= TEMP_CRITICAL) + pr_info("batt: charging OFF [OVERTEMP]\n"); + else if (di->status.cooldown) + pr_info("batt: charging OFF [COOLDOWN]\n"); + else if (di->status.battery_full) + pr_info("batt: charging OFF [FULL]\n"); + else if (charge_timeout) + pr_info("batt: charging OFF [TIMEOUT]\n"); + else + pr_info("batt: charging OFF\n"); + break; + case CHARGE_BATT_DISABLE: + di->last_charge_seen = di->last_poll; + ds2784_set_cc(di, false); + di->charge(1, source == CHARGE_FAST); + if (temp >= TEMP_CRITICAL) + pr_info("batt: charging BATTOFF [OVERTEMP]\n"); + else if (di->status.cooldown) + pr_info("batt: charging BATTOFF [COOLDOWN]\n"); + else if (di->status.battery_full) + pr_info("batt: charging BATTOFF [FULL]\n"); + else + pr_info("batt: charging BATTOFF [UNKNOWN]\n"); + break; + case CHARGE_SLOW: + di->last_charge_seen = di->last_poll; + ds2784_set_cc(di, true); + di->charge(1, 0); + pr_info("batt: charging SLOW\n"); + break; + case CHARGE_FAST: + di->last_charge_seen = di->last_poll; + ds2784_set_cc(di, true); + di->charge(1, 1); + pr_info("batt: charging FAST\n"); + break; + } + rc = 1; +done: + mutex_unlock(&charge_state_lock); + return rc; +} + +static void ds2784_program_alarm(struct ds2784_device_info *di, int seconds) +{ + ktime_t low_interval = ktime_set(seconds - 10, 0); + ktime_t slack = ktime_set(20, 0); + ktime_t next; + + next = ktime_add(di->last_poll, low_interval); + + alarm_start_range(&di->alarm, next, ktime_add(next, slack)); +} + +static void ds2784_battery_work(struct work_struct *work) +{ + struct ds2784_device_info *di = + container_of(work, struct ds2784_device_info, monitor_work); + struct timespec ts; + unsigned long flags; + + ds2784_battery_update_status(di); + + di->last_poll = alarm_get_elapsed_realtime(); + + if (battery_adjust_charge_state(di)) + power_supply_changed(&di->bat); + + ts = ktime_to_timespec(di->last_poll); + di->status.timestamp = ts.tv_sec; + battery_log_status(&di->status); + + /* prevent suspend before starting the alarm */ + local_irq_save(flags); + wake_unlock(&di->work_wake_lock); + ds2784_program_alarm(di, FAST_POLL); + local_irq_restore(flags); +} + +static void ds2784_battery_alarm(struct alarm *alarm) +{ + struct ds2784_device_info *di = + container_of(alarm, struct ds2784_device_info, alarm); + wake_lock(&di->work_wake_lock); + queue_work(di->monitor_wqueue, &di->monitor_work); +} + +static void battery_ext_power_changed(struct power_supply *psy) +{ + struct ds2784_device_info *di; + int got_power; + + di = psy_to_dev_info(psy); + got_power = power_supply_am_i_supplied(psy); + + if (got_power) { + if (is_ac_power_supplied()) + di->status.charge_source = SOURCE_AC; + else + di->status.charge_source = SOURCE_USB; + wake_lock(&vbus_wake_lock); + } else { + di->status.charge_source = SOURCE_NONE; + /* give userspace some time to see the uevent and update + * LED state or whatnot... + */ + wake_lock_timeout(&vbus_wake_lock, HZ / 2); + } + battery_adjust_charge_state(di); + power_supply_changed(psy); +} + +static int ds2784_battery_probe(struct platform_device *pdev) +{ + int rc, ret; /*Added "ret" as a check for creating the sysfs files*/ + struct ds2784_device_info *di; + struct ds2784_platform_data *pdata; + + di = kzalloc(sizeof(*di), GFP_KERNEL); + if (!di) + return -ENOMEM; + + platform_set_drvdata(pdev, di); + + pdata = pdev->dev.platform_data; + if (!pdata || !pdata->charge || !pdata->w1_slave) { + pr_err("%s: pdata missing or invalid\n", __func__); + rc = -EINVAL; + goto fail_register; + } + + di->charge = pdata->charge; + di->w1_slave = pdata->w1_slave; + + di->dev = &pdev->dev; + + di->bat.name = "battery"; + di->bat.type = POWER_SUPPLY_TYPE_BATTERY; + di->bat.properties = battery_properties; + di->bat.num_properties = ARRAY_SIZE(battery_properties); + di->bat.external_power_changed = battery_ext_power_changed; + di->bat.get_property = battery_get_property; + di->last_charge_mode = 0xff; + + rc = power_supply_register(&pdev->dev, &di->bat); + if (rc) + goto fail_register; + + ret = device_create_file(&pdev->dev, &dev_attr_setreg); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for setreg\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_dumpreg); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for dumpreg\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_statusreg); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for statusreg\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_getvoltage); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_voltageNow); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for voltage\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_getcurrent); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_currentNow); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for current\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_getavgcurrent); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_currentAverage); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for avg current\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_age); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for age\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_voltAE); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_voltageActiveEmpty); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for voltAE\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_getfull40); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_full40); + if (ret < 0) + pr_err("%s: Failed to create sysfs entry for getfull40\n", __func__); + + ret = device_create_file(&pdev->dev, &dev_attr_getmAh); // deprecated + + ret = device_create_file(&pdev->dev, &dev_attr_mAh); + if(ret < 0) + pr_err("%s: Failed to create sysfs entry for mAh\n", __func__); + + INIT_WORK(&di->monitor_work, ds2784_battery_work); + di->monitor_wqueue = create_freezable_workqueue(dev_name(&pdev->dev)); + + /* init to something sane */ + di->last_poll = alarm_get_elapsed_realtime(); + + if (!di->monitor_wqueue) { + rc = -ESRCH; + goto fail_workqueue; + } + wake_lock_init(&di->work_wake_lock, WAKE_LOCK_SUSPEND, + "ds2784-battery"); + alarm_init(&di->alarm, ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, + ds2784_battery_alarm); + wake_lock(&di->work_wake_lock); + queue_work(di->monitor_wqueue, &di->monitor_work); + return 0; + +fail_workqueue: + power_supply_unregister(&di->bat); +fail_register: + kfree(di); + return rc; +} + +static int ds2784_suspend(struct device *dev) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + + /* If we are on battery, reduce our update rate until + * we next resume. + */ + if (di->status.charge_source == SOURCE_NONE) { + ds2784_program_alarm(di, SLOW_POLL); + di->slow_poll = 1; + } + return 0; +} + +static int ds2784_resume(struct device *dev) +{ + struct ds2784_device_info *di = dev_get_drvdata(dev); + + /* We might be on a slow sample cycle. If we're + * resuming we should resample the battery state + * if it's been over a minute since we last did + * so, and move back to sampling every minute until + * we suspend again. + */ + if (di->slow_poll) { + ds2784_program_alarm(di, FAST_POLL); + di->slow_poll = 0; + } + return 0; +} + +static struct dev_pm_ops ds2784_pm_ops = { + .suspend = ds2784_suspend, + .resume = ds2784_resume, +}; + +static struct platform_driver ds2784_battery_driver = { + .driver = { + .name = "ds2784-battery", + .pm = &ds2784_pm_ops, + }, + .probe = ds2784_battery_probe, +}; + +static int battery_log_open(struct inode *inode, struct file *file) +{ + return single_open(file, battery_log_print, NULL); +} + +static struct file_operations battery_log_fops = { + .open = battery_log_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init ds2784_battery_init(void) +{ + debugfs_create_file("battery_log", 0444, NULL, NULL, &battery_log_fops); + wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); + return platform_driver_register(&ds2784_battery_driver); +} + +module_init(ds2784_battery_init); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Justin Lin "); +MODULE_DESCRIPTION("ds2784 battery driver"); diff --git a/drivers/power/pm8058-charger.c b/drivers/power/pm8058-charger.c new file mode 100644 index 0000000000000..0642274d52e58 --- /dev/null +++ b/drivers/power/pm8058-charger.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2010 Google, Inc. + * + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +struct pm8058_charger { + struct device *pmic_dev; + int chgval_irq; + int fastchg_irq; + + struct power_supply ac_supply; + struct power_supply usb_supply; + + struct pm8058_charger_platform_data *pdata; + + spinlock_t lock; + bool can_charge; + bool is_ac; + bool is_online; + bool vbus_present; + int charge_type; + u32 max_current; +}; + +static struct pm8058_charger *the_pm8058_charger; + +/* TODO: the usb core driver should provide the maximum current draw value to us + * for charging */ + +void pm8058_notify_charger_connected(int status) +{ + struct pm8058_charger *charger = the_pm8058_charger; + u32 max_current = 0; + bool is_ac; + bool is_online; + bool change = false; + unsigned long flags; + + if (!charger) + return; + + printk("### %s(%d) ###\n", __func__, status); + if (status && !charger->vbus_present) + pr_warning("%s: cable status mismatch %d %d\n", __func__, + status, charger->vbus_present); + + switch (status) { + case 1: + /* usb (pc) charging */ + max_current = 500; + is_ac = false; + is_online = true; + break; + case 2: + /* wall charger */ + max_current = 1500; + is_ac = true; + is_online = true; + break; + case 0: + default: + /* disable charging */ + max_current = 0; + is_ac = false; + is_online = false; + break; + } + spin_lock_irqsave(&charger->lock, flags); + if (max_current != charger->max_current || + is_ac != charger->is_ac || is_online != charger->is_online) { + charger->max_current = max_current; + charger->is_ac = is_ac; + charger->is_online = is_online; + change = true; + } + spin_unlock_irqrestore(&charger->lock, flags); + /* for now, charge control is done on the modem side, so we have to + * delegate to the board file. Eventually, all charge control will + * be done in this driver */ + if (change && charger->pdata->charge) + charger->pdata->charge(max_current, is_ac); + + power_supply_changed(&charger->ac_supply); + power_supply_changed(&charger->usb_supply); +} +EXPORT_SYMBOL_GPL(pm8058_notify_charger_connected); + +static void check_chgval(struct pm8058_charger *charger) +{ + int ret; + unsigned long flags; + + ret = pm8058_irq_get_status(charger->pmic_dev, PM8058_CHGVAL_IRQ); + if (ret >= 0) { + spin_lock_irqsave(&charger->lock, flags); + charger->vbus_present = !!ret; + spin_unlock_irqrestore(&charger->lock, flags); + charger->pdata->vbus_present(ret); + } else { + pr_err("%s: can't read status!! ignoring event?!\n", __func__); + } +} + +static irqreturn_t chgval_irq_handler(int irq, void *dev_id) +{ + struct pm8058_charger *charger = dev_id; + + check_chgval(charger); + return IRQ_HANDLED; +} + +/* should only get this irq when we are plugged in */ +static irqreturn_t fastchg_irq_handler(int irq, void *dev_id) +{ + struct pm8058_charger *charger = dev_id; + int ret; + bool fast_charging; + unsigned long flags; + + ret = pm8058_irq_get_status(charger->pmic_dev, PM8058_FASTCHG_IRQ); + if (ret < 0) + return IRQ_HANDLED; + fast_charging = !!ret; + + spin_lock_irqsave(&charger->lock, flags); + if (fast_charging) { + if (!charger->vbus_present) { + pr_err("%s: charging without vbus?!\n", __func__); + goto done; + } + charger->charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; + } else { + /* charging is either stopped (done/overtemp/etc.), or we + * are trickle charging. */ + /* TODO: detect trickle charging mode */ + if (charger->is_online) + charger->charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE; + else + charger->charge_type = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + } + +done: + spin_unlock_irqrestore(&charger->lock, flags); + + power_supply_changed(&charger->ac_supply); + power_supply_changed(&charger->usb_supply); + return IRQ_HANDLED; +} + +static int power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct pm8058_charger *charger; + + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + charger = container_of(psy, struct pm8058_charger, ac_supply); + else + charger = container_of(psy, struct pm8058_charger, usb_supply); + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + val->intval = charger->is_online && charger->is_ac; + else + val->intval = charger->is_online && !charger->is_ac; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + /* for now, fake fast charge all the time if we're on */ + if (psy->type == POWER_SUPPLY_TYPE_MAINS) + val->intval = charger->is_ac ? charger->charge_type : + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + else + val->intval = charger->is_online && !charger->is_ac ? + charger->charge_type : + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + break; + default: + return -EINVAL; + } + + return 0; +} + +static enum power_supply_property power_properties[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CHARGE_TYPE, +}; + +static int __init pm8058_charger_probe(struct platform_device *pdev) +{ + struct pm8058_charger_platform_data *pdata = pdev->dev.platform_data; + struct pm8058_charger *charger; + int chgval_irq; + int fastchg_irq; + int ret; + + chgval_irq = platform_get_irq_byname(pdev, "chgval_irq"); + fastchg_irq = platform_get_irq_byname(pdev, "fastchg_irq"); + + if (!pdata || chgval_irq < 0 || fastchg_irq < 0) { + pr_err("%s: missing platform data/resources\n", __func__); + return -EINVAL; + } + + charger = kzalloc(sizeof(struct pm8058_charger), GFP_KERNEL); + if (!charger) { + pr_err("%s: can't alloc mem for charger struct\n", __func__); + return -ENOMEM; + } + + charger->pmic_dev = pdev->dev.parent; + charger->pdata = pdata; + platform_set_drvdata(pdev, charger); + spin_lock_init(&charger->lock); + + the_pm8058_charger = charger; + + charger->ac_supply.name = "ac"; + charger->ac_supply.type = POWER_SUPPLY_TYPE_MAINS; + charger->ac_supply.supplied_to = pdata->supplied_to; + charger->ac_supply.num_supplicants = pdata->num_supplicants; + charger->ac_supply.properties = power_properties; + charger->ac_supply.num_properties = ARRAY_SIZE(power_properties); + charger->ac_supply.get_property = power_get_property; + + charger->usb_supply.name = "usb"; + charger->usb_supply.type = POWER_SUPPLY_TYPE_USB; + charger->usb_supply.supplied_to = pdata->supplied_to; + charger->usb_supply.num_supplicants = pdata->num_supplicants; + charger->usb_supply.properties = power_properties; + charger->usb_supply.num_properties = ARRAY_SIZE(power_properties); + charger->usb_supply.get_property = power_get_property; + + ret = power_supply_register(&pdev->dev, &charger->ac_supply); + if (ret) + goto err_reg_ac_supply; + ret = power_supply_register(&pdev->dev, &charger->usb_supply); + if (ret) + goto err_reg_usb_supply; + + ret = request_threaded_irq(chgval_irq, NULL, chgval_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "pm8058-charger-valid", charger); + if (ret) { + pr_err("%s: can't request chgval_irq\n", __func__); + goto err_req_chgval_irq; + } + charger->chgval_irq = chgval_irq; + + ret = request_threaded_irq(fastchg_irq, NULL, fastchg_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, + "pm8058-charger-fastchg", charger); + if (ret) { + pr_err("%s: can't request stuck\n", __func__); + goto err_req_fastchg_irq; + } + charger->fastchg_irq = fastchg_irq; + enable_irq_wake(charger->chgval_irq); + + pr_info("%s: driver initialized\n", __func__); + check_chgval(charger); + + return 0; + +err_req_fastchg_irq: + free_irq(chgval_irq, charger); +err_req_chgval_irq: + power_supply_unregister(&charger->usb_supply); +err_reg_usb_supply: + power_supply_unregister(&charger->ac_supply); +err_reg_ac_supply: + platform_set_drvdata(pdev, NULL); + the_pm8058_charger = NULL; + kfree(charger); + return ret; +} + +static struct platform_driver pm8058_charger_driver = { + .probe = pm8058_charger_probe, + .driver = { + .name = "pm8058-charger", + .owner = THIS_MODULE, + }, +}; + +static int __init pm8058_charger_init(void) +{ + return platform_driver_register(&pm8058_charger_driver); +} + +module_init(pm8058_charger_init); +MODULE_DESCRIPTION("PM8058 Charger Driver"); +MODULE_AUTHOR("Dima Zavin "); +MODULE_LICENSE("GPL"); + diff --git a/drivers/power/w1_ds2784.h b/drivers/power/w1_ds2784.h new file mode 100644 index 0000000000000..6822fc02fb852 --- /dev/null +++ b/drivers/power/w1_ds2784.h @@ -0,0 +1,132 @@ +/* drivers/w1/slaves/w1_ds2784.h + * + * Copyright (C) 2009 HTC Corporation + * Author: Justin Lin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __w1_ds2784_h__ +#define __w1_ds2784_h__ + + +/* Known commands to the DS2784 chip */ +#define W1_DS2784_SWAP 0xAA +#define W1_DS2784_READ_DATA 0x69 +#define W1_DS2784_WRITE_DATA 0x6C +#define W1_DS2784_COPY_DATA 0x48 +#define W1_DS2784_RECALL_DATA 0xB8 +#define W1_DS2784_LOCK 0x6A + +/* Number of valid register addresses */ +#define DS2784_DATA_SIZE 0x80 + +#define DS2784_EEPROM_BLOCK0 0x20 +#define DS2784_ACTIVE_FULL 0x20 +#define DS2784_EEPROM_BLOCK1 0x30 +#define DS2784_RATED_CAPACITY 0x32 +#define DS2784_CURRENT_OFFSET_BIAS 0x33 +#define DS2784_ACTIVE_EMPTY 0x3b + +/** + * The DS2482 registers - there are 3 registers that are addressed by a read + * pointer. The read pointer is set by the last command executed. + * + * To read the data, issue a register read for any address + */ +#define DS2482_CMD_RESET 0xF0 /* No param */ +#define DS2482_CMD_SET_READ_PTR 0xE1 /* Param: DS2482_PTR_CODE_xxx */ +#define DS2482_CMD_CHANNEL_SELECT 0xC3 +#define DS2482_CMD_WRITE_CONFIG 0xD2 /* Param: Config byte */ +#define DS2482_CMD_1WIRE_RESET 0xB4 /* Param: None */ +#define DS2482_CMD_1WIRE_SINGLE_BIT 0x87 /* Param: Bit byte (bit7) */ +#define DS2482_CMD_1WIRE_WRITE_BYTE 0xA5 /* Param: Data byte */ +#define DS2482_CMD_1WIRE_READ_BYTE 0x96 /* Param: None */ +/* Note to read the byte, Set the ReadPtr to Data then read (any addr) */ +#define DS2482_CMD_1WIRE_TRIPLET 0x78 /* Param: Dir byte (bit7) */ + +/* Values for DS2482_CMD_SET_READ_PTR */ +#define DS2482_PTR_CODE_STATUS 0xF0 +#define DS2482_PTR_CODE_DATA 0xE1 +#define DS2482_PTR_CODE_CHANNEL 0xD2 /* DS2482-800 only */ +#define DS2482_PTR_CODE_CONFIG 0xC3 + +/* +DS2784 1-wire slave memory map definitions +*/ +#define DS2784_REG_PORT 0x00 +#define DS2784_REG_STS 0x01 +#define DS2784_REG_RAAC_MSB 0x02 +#define DS2784_REG_RAAC_LSB 0x03 +#define DS2784_REG_RSAC_MSB 0x04 +#define DS2784_REG_RSAC_LSB 0x05 +#define DS2784_REG_RARC 0x06 +#define DS2784_REG_RSRC 0x07 +#define DS2784_REG_AVG_CURR_MSB 0x08 +#define DS2784_REG_AVG_CURR_LSB 0x09 +#define DS2784_REG_TEMP_MSB 0x0A +#define DS2784_REG_TEMP_LSB 0x0B +#define DS2784_REG_VOLT_MSB 0x0C +#define DS2784_REG_VOLT_LSB 0x0D +#define DS2784_REG_CURR_MSB 0x0E +#define DS2784_REG_CURR_LSB 0x0F +#define DS2784_REG_ACCUMULATE_CURR_MSB 0x10 +#define DS2784_REG_ACCUMULATE_CURR_LSB 0x11 +#define DS2784_REG_ACCUMULATE_CURR_LSB1 0x12 +#define DS2784_REG_ACCUMULATE_CURR_LSB2 0x13 +#define DS2784_REG_AGE_SCALAR 0x14 +#define DS2784_REG_SPECIALL_FEATURE 0x15 +#define DS2784_REG_FULL_MSB 0x16 +#define DS2784_REG_FULL_LSB 0x17 +#define DS2784_REG_ACTIVE_EMPTY_MSB 0x18 +#define DS2784_REG_ACTIVE_EMPTY_LSB 0x19 +#define DS2784_REG_STBY_EMPTY_MSB 0x1A +#define DS2784_REG_STBY_EMPTY_LSB 0x1B +#define DS2784_REG_EEPROM 0x1F +#define DS2784_REG_MFG_GAIN_RSGAIN_MSB 0xB0 +#define DS2784_REG_MFG_GAIN_RSGAIN_LSB 0xB1 + +#define DS2784_REG_CTRL 0x60 +#define DS2784_REG_ACCUMULATE_BIAS 0x61 +#define DS2784_REG_AGE_CAPA_MSB 0x62 +#define DS2784_REG_AGE_CAPA_LSB 0x63 +#define DS2784_REG_CHARGE_VOLT 0x64 +#define DS2784_REG_MIN_CHARGE_CURR 0x65 +#define DS2784_REG_ACTIVE_EMPTY_VOLT 0x66 +#define DS2784_REG_ACTIVE_EMPTY_CURR 0x67 +#define DS2784_REG_ACTIVE_EMPTY_40 0x68 +#define DS2784_REG_RSNSP 0x69 +#define DS2784_REG_FULL_40_MSB 0x6A +#define DS2784_REG_FULL_40_LSB 0x6B +#define DS2784_REG_FULL_SEG_4_SLOPE 0x6C +#define DS2784_REG_FULL_SEG_3_SLOPE 0x6D +#define DS2784_REG_FULL_SEG_2_SLOPE 0x6E +#define DS2784_REG_FULL_SEG_1_SLOPE 0x6F +#define DS2784_REG_AE_SEG_4_SLOPE 0x70 +#define DS2784_REG_AE_SEG_3_SLOPE 0x71 +#define DS2784_REG_AE_SEG_2_SLOPE 0x72 +#define DS2784_REG_AE_SEG_1_SLOPE 0x73 +#define DS2784_REG_SE_SEG_4_SLOPE 0x74 +#define DS2784_REG_SE_SEG_3_SLOPE 0x75 +#define DS2784_REG_SE_SEG_2_SLOPE 0x76 +#define DS2784_REG_SE_SEG_1_SLOPE 0x77 +#define DS2784_REG_RSGAIN_MSB 0x78 +#define DS2784_REG_RSGAIN_LSB 0x79 +#define DS2784_REG_RSTC 0x7A +#define DS2784_REG_CURR_OFFSET_BIAS 0x7B +#define DS2784_REG_TBP34 0x7C +#define DS2784_REG_TBP23 0x7D +#define DS2784_REG_TBP12 0x7E +#define DS2784_REG_PROTECTOR_THRESHOLD 0x7F + +#define DS2784_REG_USER_EEPROM_20 0x20 + +#endif /* !__w1_ds2784__ */ diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 095016a9dec16..a70f37c4f49ff 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, else table++; + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); @@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + return 0; } diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index 3a971077e7bfc..d06ee2d44b447 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c @@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, { u32 result; + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } return 0; diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 1a62934bfebc5..db8b8028988d3 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_init = tsi57x_em_init; rdev->rswitch->em_handle = tsi57x_em_handler; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + return 0; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 9fa20957847db..0cf7ce50bde61 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1002,6 +1002,29 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, return 0; } +static void unset_consumer_device_supply(struct regulator_dev *rdev, + const char *consumer_dev_name, struct device *consumer_dev) +{ + struct regulator_map *node, *n; + + if (consumer_dev && !consumer_dev_name) + consumer_dev_name = dev_name(consumer_dev); + + list_for_each_entry_safe(node, n, ®ulator_map_list, list) { + if (rdev != node->regulator) + continue; + + if (consumer_dev_name && node->dev_name && + strcmp(consumer_dev_name, node->dev_name)) + continue; + + list_del(&node->list); + kfree(node->dev_name); + kfree(node); + return; + } +} + static void unset_regulator_supplies(struct regulator_dev *rdev) { struct regulator_map *node, *n; @@ -2565,9 +2588,14 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, init_data->consumer_supplies[i].dev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); - if (ret < 0) - goto unset_supplies; + if (ret < 0) { + for (--i; i >= 0; i--) + unset_consumer_device_supply(rdev, + init_data->consumer_supplies[i].dev_name, + init_data->consumer_supplies[i].dev); + goto scrub; } +} list_add(&rdev->list, ®ulator_list); @@ -2576,9 +2604,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, mutex_unlock(®ulator_list_mutex); return rdev; -unset_supplies: - unset_regulator_supplies(rdev); - scrub: device_unregister(&rdev->dev); /* device core frees rdev */ diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 60a7ca5409e97..b16963b5c0c6b 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -62,6 +62,9 @@ #define TPS65023_REG_CTRL_LDO2_EN BIT(2) #define TPS65023_REG_CTRL_LDO1_EN BIT(1) +/* CON_CTRL2 bitfields */ +#define TPS65023_CON_CTRL2_GO BIT(7) + /* LDO_CTRL bitfields */ #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4) #define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0xF0 >> ((ldo_id)*4)) @@ -126,11 +129,38 @@ struct tps_pmic { struct regulator_dev *rdev[TPS65023_NUM_REGULATOR]; const struct tps_info *info[TPS65023_NUM_REGULATOR]; struct mutex io_lock; + unsigned dcdc1_last_uV; }; +static int tps_65023_read_3bytes(struct tps_pmic *tps, u8 reg) +{ + int rv; + u8 txbuf[1]; + u8 rxbuf[3]; + struct i2c_msg msgs[] = { + { + .addr = tps->client->addr, + .flags = 0, + .len = sizeof(txbuf), + .buf = txbuf, + }, + { + .addr = tps->client->addr, + .flags = I2C_M_RD, + .len = sizeof(rxbuf), + .buf = rxbuf, + }, + }; + txbuf[0] = reg; + rv = i2c_transfer(tps->client->adapter, msgs, 2); + if (rv < 0) + return rv; + return rxbuf[0]; +} + static inline int tps_65023_read(struct tps_pmic *tps, u8 reg) { - return i2c_smbus_read_byte_data(tps->client, reg); + return tps_65023_read_3bytes(tps, reg); } static inline int tps_65023_write(struct tps_pmic *tps, u8 reg, u8 val) @@ -327,6 +357,9 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev, struct tps_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); int vsel; + int rv; + int uV = 0; + int delay; if (dcdc != TPS65023_DCDC_1) return -EINVAL; @@ -340,7 +373,7 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev, for (vsel = 0; vsel < tps->info[dcdc]->table_len; vsel++) { int mV = tps->info[dcdc]->table[vsel]; - int uV = mV * 1000; + uV = mV * 1000; /* Break at the first in-range value */ if (min_uV <= uV && uV <= max_uV) @@ -352,10 +385,70 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev, /* write to the register in case we found a match */ if (vsel == tps->info[dcdc]->table_len) return -EINVAL; + + rv = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel); + if (!rv) + rv = tps_65023_reg_write(tps, TPS65023_REG_CON_CTRL2, + TPS65023_CON_CTRL2_GO); + + /* Add delay to reach relected voltage (14.4 mV/us default slew rate) */ + if (tps->dcdc1_last_uV) + delay = abs(tps->dcdc1_last_uV - uV); else - return tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel); + delay = max(uV - 800000, 1600000 - uV); + delay = DIV_ROUND_UP(delay, 14400); + udelay(delay); + tps->dcdc1_last_uV = rv ? 0 /* Unknown voltage */ : uV; + + return rv; } +/* TPS65023_registers */ +#define TPS65023_VERSION 0 +#define TPS65023_PGOODZ 1 +#define TPS65023_MASK 2 +#define TPS65023_REG_CTRL 3 +#define TPS65023_CON_CTRL 4 +#define TPS65023_CON_CTRL2 5 +#define TPS65023_DEFCORE 6 +#define TPS65023_DEFSLEW 7 +#define TPS65023_LDO_CTRL 8 +#define TPS65023_MAX 9 + +static struct i2c_client *tpsclient; + +int tps65023_set_dcdc1_level(struct regulator_dev *dev, int mvolts) +{ + int val; + int ret; + struct tps_pmic *tps; + + if (!tpsclient) { + tps = rdev_get_drvdata(dev); + tpsclient = tps->client; + } + + if (!tpsclient) + return -ENODEV; + + if (mvolts < 800 || mvolts > 1600) + return -EINVAL; + + if (mvolts == 1600) + val = 0x1F; + else + val = ((mvolts - 800)/25) & 0x1F; + + ret = i2c_smbus_write_byte_data(tpsclient, TPS65023_DEFCORE, val); + + if (!ret) + ret = i2c_smbus_write_byte_data(tpsclient, + TPS65023_CON_CTRL2, 0x80); + + return ret; +} +EXPORT_SYMBOL(tps65023_set_dcdc1_level); + static int tps65023_ldo_get_voltage(struct regulator_dev *dev) { struct tps_pmic *tps = rdev_get_drvdata(dev); @@ -543,6 +636,9 @@ static int __devexit tps_65023_remove(struct i2c_client *client) struct tps_pmic *tps = i2c_get_clientdata(client); int i; + /* clear the client data in i2c */ + i2c_set_clientdata(client, NULL); + for (i = 0; i < TPS65023_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 79b2a635dc3b8..04df80a29c46a 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -707,6 +707,13 @@ config RTC_DRV_DAVINCI This driver can also be built as a module. If so, the module will be called rtc-davinci. +config RTC_DRV_MSM7X00A + tristate "MSM7X00A" + depends on ARCH_MSM + default y + help + RTC driver for Qualcomm MSM7K chipsets + config RTC_DRV_OMAP tristate "TI OMAP1" depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 9b5b85e6e998b..a0cdff96eaf5c 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -67,6 +67,7 @@ obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o +obj-$(CONFIG_RTC_DRV_MSM7X00A) += rtc-msm7x00a.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index ec7d9de4d12ee..01a7df5317c1b 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -41,33 +41,42 @@ static void rtc_device_release(struct device *dev) * system's wall clock; restore it on resume(). */ -static struct timespec delta; -static struct timespec delta_delta; -static time_t oldtime; +static struct timespec old_rtc, old_system, old_delta; + static int rtc_suspend(struct device *dev, pm_message_t mesg) { struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; - struct timespec ts; - struct timespec new_delta; - + struct timespec delta, delta_delta; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; - getnstimeofday(&ts); + /* snapshot the current RTC and system time at suspend*/ rtc_read_time(rtc, &tm); - rtc_tm_to_time(&tm, &oldtime); + getnstimeofday(&old_system); + rtc_tm_to_time(&tm, &old_rtc.tv_sec); + - /* RTC precision is 1 second; adjust delta for avg 1/2 sec err */ - set_normalized_timespec(&new_delta, - ts.tv_sec - oldtime, - ts.tv_nsec - (NSEC_PER_SEC >> 1)); + /* + * To avoid drift caused by repeated suspend/resumes, + * which each can add ~1 second drift error, + * try to compensate so the difference in system time + * and rtc time stays close to constant. + */ + delta = timespec_sub(old_system, old_rtc); + delta_delta = timespec_sub(delta, old_delta); + if (abs(delta_delta.tv_sec) >= 2) { + /* + * if delta_delta is too large, assume time correction + * has occured and set old_delta to the current delta. + */ + old_delta = delta; + } else { + /* Otherwise try to adjust old_system to compensate */ + old_system = timespec_sub(old_system, delta_delta); + } - /* prevent 1/2 sec errors from accumulating */ - delta_delta = timespec_sub(new_delta, delta); - if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) - delta = new_delta; return 0; } @@ -75,34 +84,42 @@ static int rtc_resume(struct device *dev) { struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; - time_t newtime; - struct timespec time; + struct timespec new_system, new_rtc; + struct timespec sleep_time; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return 0; + /* snapshot the current rtc and system time at resume */ + getnstimeofday(&new_system); rtc_read_time(rtc, &tm); if (rtc_valid_tm(&tm) != 0) { pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); return 0; } - rtc_tm_to_time(&tm, &newtime); - if (delta_delta.tv_sec < -1) - newtime++; - if (newtime <= oldtime) { - if (newtime < oldtime) + rtc_tm_to_time(&tm, &new_rtc.tv_sec); + new_rtc.tv_nsec = 0; + + if (new_rtc.tv_sec <= old_rtc.tv_sec) { + if (new_rtc.tv_sec < old_rtc.tv_sec) pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return 0; } - /* restore wall clock using delta against this RTC; - * adjust again for avg 1/2 second RTC sampling error + /* calculate the RTC time delta (sleep time)*/ + sleep_time = timespec_sub(new_rtc, old_rtc); + + /* + * Since these RTC suspend/resume handlers are not called + * at the very end of suspend or the start of resume, + * some run-time may pass on either sides of the sleep time + * so subtract kernel run-time between rtc_suspend to rtc_resume + * to keep things accurate. */ - set_normalized_timespec(&time, - newtime + delta.tv_sec, - (NSEC_PER_SEC >> 1) + delta.tv_nsec); - do_settimeofday(&time); + sleep_time = timespec_sub(sleep_time, + timespec_sub(new_system, old_system)); + timekeeping_inject_sleeptime(&sleep_time); return 0; } @@ -126,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, struct module *owner) { struct rtc_device *rtc; + struct rtc_wkalrm alrm; int id, err; if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) { @@ -175,6 +193,12 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, rtc->pie_timer.function = rtc_pie_update_irq; rtc->pie_enabled = 0; + /* Check to see if there is an ALARM already set in hw */ + err = __rtc_read_alarm(rtc, &alrm); + + if (!err && !rtc_valid_tm(&alrm.time)) + rtc_initialize_alarm(rtc, &alrm); + strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); dev_set_name(&rtc->dev, "rtc%d", id); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index cb2f0728fd70d..b2fea80dfb65e 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -116,6 +116,186 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) } EXPORT_SYMBOL_GPL(rtc_set_mmss); +static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return err; + + if (rtc->ops == NULL) + err = -ENODEV; + else if (!rtc->ops->read_alarm) + err = -EINVAL; + else { + memset(alarm, 0, sizeof(struct rtc_wkalrm)); + err = rtc->ops->read_alarm(rtc->dev.parent, alarm); + } + + mutex_unlock(&rtc->ops_lock); + return err; +} + +int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + struct rtc_time before, now; + int first_time = 1; + unsigned long t_now, t_alm; + enum { none, day, month, year } missing = none; + unsigned days; + + /* The lower level RTC driver may return -1 in some fields, + * creating invalid alarm->time values, for reasons like: + * + * - The hardware may not be capable of filling them in; + * many alarms match only on time-of-day fields, not + * day/month/year calendar data. + * + * - Some hardware uses illegal values as "wildcard" match + * values, which non-Linux firmware (like a BIOS) may try + * to set up as e.g. "alarm 15 minutes after each hour". + * Linux uses only oneshot alarms. + * + * When we see that here, we deal with it by using values from + * a current RTC timestamp for any missing (-1) values. The + * RTC driver prevents "periodic alarm" modes. + * + * But this can be racey, because some fields of the RTC timestamp + * may have wrapped in the interval since we read the RTC alarm, + * which would lead to us inserting inconsistent values in place + * of the -1 fields. + * + * Reading the alarm and timestamp in the reverse sequence + * would have the same race condition, and not solve the issue. + * + * So, we must first read the RTC timestamp, + * then read the RTC alarm value, + * and then read a second RTC timestamp. + * + * If any fields of the second timestamp have changed + * when compared with the first timestamp, then we know + * our timestamp may be inconsistent with that used by + * the low-level rtc_read_alarm_internal() function. + * + * So, when the two timestamps disagree, we just loop and do + * the process again to get a fully consistent set of values. + * + * This could all instead be done in the lower level driver, + * but since more than one lower level RTC implementation needs it, + * then it's probably best best to do it here instead of there.. + */ + + /* Get the "before" timestamp */ + err = rtc_read_time(rtc, &before); + if (err < 0) + return err; + do { + if (!first_time) + memcpy(&before, &now, sizeof(struct rtc_time)); + first_time = 0; + + /* get the RTC alarm values, which may be incomplete */ + err = rtc_read_alarm_internal(rtc, alarm); + if (err) + return err; + + /* full-function RTCs won't have such missing fields */ + if (rtc_valid_tm(&alarm->time) == 0) + return 0; + + /* get the "after" timestamp, to detect wrapped fields */ + err = rtc_read_time(rtc, &now); + if (err < 0) + return err; + + /* note that tm_sec is a "don't care" value here: */ + } while ( before.tm_min != now.tm_min + || before.tm_hour != now.tm_hour + || before.tm_mon != now.tm_mon + || before.tm_year != now.tm_year); + + /* Fill in the missing alarm fields using the timestamp; we + * know there's at least one since alarm->time is invalid. + */ + if (alarm->time.tm_sec == -1) + alarm->time.tm_sec = now.tm_sec; + if (alarm->time.tm_min == -1) + alarm->time.tm_min = now.tm_min; + if (alarm->time.tm_hour == -1) + alarm->time.tm_hour = now.tm_hour; + + /* For simplicity, only support date rollover for now */ + if (alarm->time.tm_mday == -1) { + alarm->time.tm_mday = now.tm_mday; + missing = day; + } + if (alarm->time.tm_mon == -1) { + alarm->time.tm_mon = now.tm_mon; + if (missing == none) + missing = month; + } + if (alarm->time.tm_year == -1) { + alarm->time.tm_year = now.tm_year; + if (missing == none) + missing = year; + } + + /* with luck, no rollover is needed */ + rtc_tm_to_time(&now, &t_now); + rtc_tm_to_time(&alarm->time, &t_alm); + if (t_now < t_alm) + goto done; + + switch (missing) { + + /* 24 hour rollover ... if it's now 10am Monday, an alarm that + * that will trigger at 5am will do so at 5am Tuesday, which + * could also be in the next month or year. This is a common + * case, especially for PCs. + */ + case day: + dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); + t_alm += 24 * 60 * 60; + rtc_time_to_tm(t_alm, &alarm->time); + break; + + /* Month rollover ... if it's the 31th, an alarm on the 3rd will + * be next month. An alarm matching on the 30th, 29th, or 28th + * may end up in the month after that! Many newer PCs support + * this type of alarm. + */ + case month: + dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); + do { + if (alarm->time.tm_mon < 11) + alarm->time.tm_mon++; + else { + alarm->time.tm_mon = 0; + alarm->time.tm_year++; + } + days = rtc_month_days(alarm->time.tm_mon, + alarm->time.tm_year); + } while (days < alarm->time.tm_mday); + break; + + /* Year rollover ... easy except for leap years! */ + case year: + dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); + do { + alarm->time.tm_year++; + } while (rtc_valid_tm(&alarm->time) != 0); + break; + + default: + dev_warn(&rtc->dev, "alarm rollover not handled\n"); + } + +done: + return 0; +} + int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; @@ -195,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_set_alarm); +/* Called once per device from rtc_device_register */ +int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + err = rtc_valid_tm(&alarm->time); + if (err != 0) + return err; + + err = mutex_lock_interruptible(&rtc->ops_lock); + if (err) + return err; + + rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); + rtc->aie_timer.period = ktime_set(0, 0); + if (alarm->enabled) { + rtc->aie_timer.enabled = 1; + timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); + } + mutex_unlock(&rtc->ops_lock); + return err; +} +EXPORT_SYMBOL_GPL(rtc_initialize_alarm); + + + int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err = mutex_lock_interruptible(&rtc->ops_lock); diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 17971d93354d2..0e61e2dad5d37 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c @@ -276,6 +276,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) bfin_rtc_int_set_alarm(rtc); else bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); + + return 0; } static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c index 37268e97de49e..afeb5469708b8 100644 --- a/drivers/rtc/rtc-ds1511.c +++ b/drivers/rtc/rtc-ds1511.c @@ -485,7 +485,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj, static struct bin_attribute ds1511_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, }, .size = DS1511_RAM_MAX, .read = ds1511_nvram_read, diff --git a/drivers/rtc/rtc-msm7x00a.c b/drivers/rtc/rtc-msm7x00a.c new file mode 100644 index 0000000000000..f14608a2591a6 --- /dev/null +++ b/drivers/rtc/rtc-msm7x00a.c @@ -0,0 +1,306 @@ +/* drivers/rtc/rtc-msm7x00a.c + * + * Copyright (C) 2008 Google, Inc. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RTC_DEBUG 0 + +extern void msm_pm_set_max_sleep_time(int64_t sleep_time_ns); + +static const char *rpc_versions[] = { +#if !defined(CONFIG_MSM_LEGACY_7X00A_AMSS) + "rs30000048:00040000", + "rs30000048:00010000", +#else + "rs30000048:0da5b528", +#endif +}; + +#define TIMEREMOTE_PROCEEDURE_SET_JULIAN 6 +#define TIMEREMOTE_PROCEEDURE_GET_JULIAN 7 + +struct rpc_time_julian { + uint32_t year; + uint32_t month; + uint32_t day; + uint32_t hour; + uint32_t minute; + uint32_t second; + uint32_t day_of_week; +}; + +static struct msm_rpc_endpoint *ep; +static struct rtc_device *rtc; +static unsigned long rtcalarm_time; + +static int +msmrtc_timeremote_set_time(struct device *dev, struct rtc_time *tm) +{ + int rc; + + struct timeremote_set_julian_req { + struct rpc_request_hdr hdr; + uint32_t opt_arg; + + struct rpc_time_julian time; + } req; + + struct timeremote_set_julian_rep { + struct rpc_reply_hdr hdr; + } rep; + + if (tm->tm_year < 1900) + tm->tm_year += 1900; + + if (tm->tm_year < 1970) + return -EINVAL; + +#if RTC_DEBUG + printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n", + __func__, tm->tm_mon, tm->tm_mday, tm->tm_year, + tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); +#endif + + req.opt_arg = cpu_to_be32(1); + req.time.year = cpu_to_be32(tm->tm_year); + req.time.month = cpu_to_be32(tm->tm_mon + 1); + req.time.day = cpu_to_be32(tm->tm_mday); + req.time.hour = cpu_to_be32(tm->tm_hour); + req.time.minute = cpu_to_be32(tm->tm_min); + req.time.second = cpu_to_be32(tm->tm_sec); + req.time.day_of_week = cpu_to_be32(tm->tm_wday); + + + rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_SET_JULIAN, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + return rc; +} + +static int +msmrtc_timeremote_read_time(struct device *dev, struct rtc_time *tm) +{ + int rc; + + struct timeremote_get_julian_req { + struct rpc_request_hdr hdr; + uint32_t julian_time_not_null; + } req; + + struct timeremote_get_julian_rep { + struct rpc_reply_hdr hdr; + uint32_t opt_arg; + struct rpc_time_julian time; + } rep; + + req.julian_time_not_null = cpu_to_be32(1); + + rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_GET_JULIAN, + &req, sizeof(req), + &rep, sizeof(rep), + 5 * HZ); + if (rc < 0) + return rc; + + if (!be32_to_cpu(rep.opt_arg)) { + printk(KERN_ERR "%s: No data from RTC\n", __func__); + return -ENODATA; + } + + tm->tm_year = be32_to_cpu(rep.time.year); + tm->tm_mon = be32_to_cpu(rep.time.month); + tm->tm_mday = be32_to_cpu(rep.time.day); + tm->tm_hour = be32_to_cpu(rep.time.hour); + tm->tm_min = be32_to_cpu(rep.time.minute); + tm->tm_sec = be32_to_cpu(rep.time.second); + tm->tm_wday = be32_to_cpu(rep.time.day_of_week); + +#if RTC_DEBUG + printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n", + __func__, tm->tm_mon, tm->tm_mday, tm->tm_year, + tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); +#endif + + tm->tm_year -= 1900; /* RTC layer expects years to start at 1900 */ + tm->tm_mon--; /* RTC layer expects mons to be 0 based */ + + if (rtc_valid_tm(tm) < 0) { + dev_err(dev, "retrieved date/time is not valid.\n"); + rtc_time_to_tm(0, tm); + } + + return 0; +} + + +static int +msmrtc_virtual_alarm_set(struct device *dev, struct rtc_wkalrm *a) +{ + unsigned long now = get_seconds(); + + if (!a->enabled) { + rtcalarm_time = 0; + return 0; + } else + rtc_tm_to_time(&a->time, &rtcalarm_time); + + if (now > rtcalarm_time) { + printk(KERN_ERR "%s: Attempt to set alarm in the past\n", + __func__); + rtcalarm_time = 0; + return -EINVAL; + } + + return 0; +} + +static struct rtc_class_ops msm_rtc_ops = { + .read_time = msmrtc_timeremote_read_time, + .set_time = msmrtc_timeremote_set_time, + .set_alarm = msmrtc_virtual_alarm_set, +}; + +static void +msmrtc_alarmtimer_expired(unsigned long _data) +{ +#if RTC_DEBUG + printk(KERN_DEBUG "%s: Generating alarm event (src %lu)\n", + rtc->name, _data); +#endif + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + rtcalarm_time = 0; +} + +static int +msmrtc_probe(struct platform_device *pdev) +{ + struct rpcsvr_platform_device *rdev = + container_of(pdev, struct rpcsvr_platform_device, base); + + if (rtc) + return -EBUSY; + + ep = msm_rpc_connect(rdev->prog, rdev->vers, 0); + if (IS_ERR(ep)) { + printk(KERN_ERR "%s: init rpc failed! rc = %ld\n", + __func__, PTR_ERR(ep)); + return PTR_ERR(ep); + } + + rtc = rtc_device_register("msm_rtc", + &pdev->dev, + &msm_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + printk(KERN_ERR "%s: Can't register RTC device (%ld)\n", + pdev->name, PTR_ERR(rtc)); + return PTR_ERR(rtc); + } + return 0; +} + + +static unsigned long msmrtc_get_seconds(void) +{ + struct rtc_time tm; + unsigned long now; + + msmrtc_timeremote_read_time(NULL, &tm); + rtc_tm_to_time(&tm, &now); + return now; +} + +static int +msmrtc_suspend(struct platform_device *dev, pm_message_t state) +{ + if (rtcalarm_time) { + unsigned long now = msmrtc_get_seconds(); + int diff = rtcalarm_time - now; + if (diff <= 0) { + msmrtc_alarmtimer_expired(1); + msm_pm_set_max_sleep_time(0); + return 0; + } + msm_pm_set_max_sleep_time((int64_t) ((int64_t) diff * NSEC_PER_SEC)); + } else + msm_pm_set_max_sleep_time(0); + return 0; +} + +static int +msmrtc_resume(struct platform_device *dev) +{ + if (rtcalarm_time) { + unsigned long now = msmrtc_get_seconds(); + int diff = rtcalarm_time - now; + if (diff <= 0) + msmrtc_alarmtimer_expired(2); + } + return 0; +} + +static int __init msmrtc_init(void) +{ + int i; + int ret; + struct platform_driver *pdrv[ARRAY_SIZE(rpc_versions)]; + + rtcalarm_time = 0; + + /* register the devices for all the major versions we support, only + * one should match */ + for (i = 0; i < ARRAY_SIZE(rpc_versions); i++) { + pdrv[i] = kzalloc(sizeof(struct platform_driver), GFP_KERNEL); + if (!pdrv[i]) { + ret = -ENOMEM; + goto err; + } + pdrv[i]->probe = msmrtc_probe; + pdrv[i]->suspend = msmrtc_suspend; + pdrv[i]->resume = msmrtc_resume; + pdrv[i]->driver.name = rpc_versions[i]; + pdrv[i]->driver.owner = THIS_MODULE; + ret = platform_driver_register(pdrv[i]); + if (ret) { + kfree(pdrv[i]); + goto err; + } + } + return 0; + +err: + for (--i; i >= 0; i--) + platform_driver_unregister(pdrv[i]); + return ret; +} + +module_init(msmrtc_init); + +MODULE_DESCRIPTION("RTC driver for Qualcomm MSM7x00a chipsets"); +MODULE_AUTHOR("San Mehat "); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index b80fa28824083..637b012007aa7 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -46,6 +46,7 @@ static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; +static bool wake_en; static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -597,8 +598,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) } s3c_rtc_enable(pdev, 0); - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(s3c_rtc_alarmno); + if (device_may_wakeup(&pdev->dev) && !wake_en) { + if (enable_irq_wake(s3c_rtc_alarmno) == 0) + wake_en = true; + else + dev_err(&pdev->dev, "enable_irq_wake failed\n"); + } return 0; } @@ -614,8 +619,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } - if (device_may_wakeup(&pdev->dev)) + if (device_may_wakeup(&pdev->dev) && wake_en) { disable_irq_wake(s3c_rtc_alarmno); + wake_en = false; + } return 0; } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 96505e3ab9861..34f28fea22284 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1221,6 +1221,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) iscsi_init.dummy_buffer_addr_hi = (u32) ((u64) hba->dummy_buf_dma >> 32); + hba->num_ccell = hba->max_sqes >> 1; hba->ctx_ccell_tasks = ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); iscsi_init.num_ccells_per_conn = hba->num_ccell; diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 72a7b2d4a439e..dd4622eca5639 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -227,7 +227,7 @@ void bnx2i_stop(void *handle) wait_event_interruptible_timeout(hba->eh_wait, (list_empty(&hba->ep_ofld_list) && list_empty(&hba->ep_destroy_list)), - 10 * HZ); + 2 * HZ); /* Wait for all endpoints to be torn down, Chip will be reset once * control returns to network driver. So it is required to cleanup and * release all connection resources before returning from this routine. diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f0dce26593eb9..0a46832f55c2c 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -858,7 +858,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) mutex_init(&hba->net_dev_lock); init_waitqueue_head(&hba->eh_wait); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { - hba->hba_shutdown_tmo = 20 * HZ; + hba->hba_shutdown_tmo = 30 * HZ; hba->conn_teardown_tmo = 20 * HZ; hba->conn_ctx_destroy_tmo = 6 * HZ; } else { /* 5706/5708/5709 */ @@ -1205,6 +1205,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task) struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; + if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes) + return -ENOMEM; + /* * If there is no scsi_cmnd this must be a mgmt task */ diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index b837c5b3c8f9d..1367b919c4937 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -437,12 +437,14 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) unsigned long flags; struct scsi_device *sdev; struct scsi_device_handler *scsi_dh = NULL; + struct device *dev = NULL; spin_lock_irqsave(q->queue_lock, flags); sdev = q->queuedata; if (sdev && sdev->scsi_dh_data) scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (!scsi_dh || !get_device(&sdev->sdev_gendev) || + dev = get_device(&sdev->sdev_gendev); + if (!scsi_dh || !dev || sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL) err = SCSI_DH_NOSYS; @@ -453,12 +455,13 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) if (err) { if (fn) fn(data, err); - return err; + goto out; } if (scsi_dh->activate) err = scsi_dh->activate(sdev, fn, data); - put_device(&sdev->sdev_gendev); +out: + put_device(dev); return err; } EXPORT_SYMBOL_GPL(scsi_dh_activate); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6b729324b8d37..30f2b333ccbdb 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -285,7 +285,8 @@ static void stpg_endio(struct request *req, int error) print_alua_state(h->state)); } done: - blk_put_request(req); + req->end_io_data = NULL; + __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5d6d07bd1cd05..cee1d3bd68dcd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4611,6 +4611,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { + if (!ioc->sgl[i].iov_len) + continue; + kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index e92b77af54849..3834c95cffa22 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -688,6 +688,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, goto out; } + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + /* copy in request message frame from user */ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { printk(KERN_ERR "failure at %s:%d/%s()!\n", __FILE__, __LINE__, @@ -1963,7 +1970,7 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) Mpi2DiagBufferPostReply_t *mpi_reply; int rc, i; u8 buffer_type; - unsigned long timeleft; + unsigned long timeleft, request_size, copy_size; u16 smid; u16 ioc_status; u8 issue_reset = 0; @@ -1999,6 +2006,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -ENOMEM; } + request_size = ioc->diag_buffer_sz[buffer_type]; + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { printk(MPT2SAS_ERR_FMT "%s: either the starting_offset " "or bytes_to_read are not 4 byte aligned\n", ioc->name, @@ -2006,13 +2015,23 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state) return -EINVAL; } + if (karg.starting_offset > request_size) + return -EINVAL; + diag_data = (void *)(request_data + karg.starting_offset); dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: diag_buffer(%p), " "offset(%d), sz(%d)\n", ioc->name, __func__, diag_data, karg.starting_offset, karg.bytes_to_read)); + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + if (copy_to_user((void __user *)uarg->diagnostic_data, - diag_data, karg.bytes_to_read)) { + diag_data, copy_size)) { printk(MPT2SAS_ERR_FMT "%s: Unable to write " "mpt_diag_read_buffer_t data @ %p\n", ioc->name, __func__, diag_data); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5ded3db6e316f..20bcd1ec14cae 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -113,6 +113,7 @@ struct sense_info { }; +#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) #define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF) /** @@ -121,6 +122,7 @@ struct sense_info { * @work: work object (ioc->fault_reset_work_q) * @cancel_pending_work: flag set during reset handling * @ioc: per adapter object + * @device_handle: device handle * @VF_ID: virtual function id * @VP_ID: virtual port id * @ignore: flag meaning this event has been marked to ignore @@ -134,6 +136,7 @@ struct fw_event_work { u8 cancel_pending_work; struct delayed_work delayed_work; struct MPT2SAS_ADAPTER *ioc; + u16 device_handle; u8 VF_ID; u8 VP_ID; u8 ignore; @@ -3708,17 +3711,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, #endif /** - * _scsih_smart_predicted_fault - illuminate Fault LED + * _scsih_turn_on_fault_led - illuminate Fault LED * @ioc: per adapter object * @handle: device handle + * Context: process * * Return nothing. */ static void -_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) +_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2SepReply_t mpi_reply; Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: " + "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_send_event_to_turn_on_fault_led - fire delayed event + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event = MPT2SAS_TURN_ON_FAULT_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * _scsih_smart_predicted_fault - process smart errors + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) +{ struct scsi_target *starget; struct MPT2SAS_TARGET *sas_target_priv_data; Mpi2EventNotificationReply_t *event_reply; @@ -3745,30 +3806,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) starget_printk(KERN_WARNING, starget, "predicted fault\n"); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) { - memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); - mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; - mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; - mpi_request.SlotStatus = - cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); - mpi_request.DevHandle = cpu_to_le16(handle); - mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; - if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply, - &mpi_request)) != 0) { - printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", - ioc->name, __FILE__, __LINE__, __func__); - return; - } - - if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT - "enclosure_processor: ioc_status (0x%04x), " - "loginfo(0x%08x)\n", ioc->name, - le16_to_cpu(mpi_reply.IOCStatus), - le32_to_cpu(mpi_reply.IOCLogInfo))); - return; - } - } + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + _scsih_send_event_to_turn_on_fault_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + @@ -6330,6 +6369,9 @@ _firmware_event_work(struct work_struct *work) } switch (fw_event->event) { + case MPT2SAS_TURN_ON_FAULT_LED: + _scsih_turn_on_fault_led(ioc, fw_event->device_handle); + break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_sas_topology_change_event(ioc, fw_event); break; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 321cf3ae86308..ce0701d512f42 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3814,6 +3814,9 @@ static long pmcraid_ioctl_passthrough( rc = -EFAULT; goto out_free_buffer; } + } else if (request_size < 0) { + rc = -EINVAL; + goto out_free_buffer; } /* check if we have any additional command parameters */ diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d3e58d763b434..c52a0a26f2a7d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) scsi_remove_host(vha->host); + /* Allow timer to run to drain queued items, when removing vp */ + qla24xx_deallocate_vp_id(vha); + if (vha->timer_active) { qla2x00_vp_stop_timer(vha); DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]" " = %p has stopped\n", vha->host_no, vha->vp_idx, vha)); } - qla24xx_deallocate_vp_id(vha); - /* No pending activities shall be there on the vha now */ DEBUG(msleep(random32()%10)); /* Just to see if something falls on * the net we have placed below */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f27724d76cf66..8f823bd9f027b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2359,21 +2359,26 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vha, &ha->vp_list, list) { - atomic_inc(&vha->vref_count); + mutex_lock(&ha->vport_lock); + while (ha->cur_vport_count) { + struct Scsi_Host *scsi_host; - if (vha->fc_vport) { - spin_unlock_irqrestore(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->vport_slock, flags); - fc_vport_terminate(vha->fc_vport); + BUG_ON(base_vha->list.next == &ha->vp_list); + /* This assumes first entry in ha->vp_list is always base vha */ + vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); + scsi_host = scsi_host_get(vha->host); - spin_lock_irqsave(&ha->vport_slock, flags); - } + spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); + + fc_vport_terminate(vha->fc_vport); + scsi_host_put(vha->host); - atomic_dec(&vha->vref_count); + mutex_lock(&ha->vport_lock); } - spin_unlock_irqrestore(&ha->vport_slock, flags); + mutex_unlock(&ha->vport_lock); set_bit(UNLOADING, &base_vha->dpc_flags); @@ -3603,7 +3608,8 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); - if (IS_QLA82XX(ha)) { + /* Make sure qla82xx_watchdog is run only for physical port */ + if (!vha->vp_idx && IS_QLA82XX(ha)) { if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) start_dpc++; qla82xx_watchdog(vha); @@ -3674,8 +3680,8 @@ qla2x00_timer(scsi_qla_host_t *vha) atomic_read(&vha->loop_down_timer))); } - /* Check if beacon LED needs to be blinked */ - if (ha->beacon_blink_led == 1) { + /* Check if beacon LED needs to be blinked for physical host only */ + if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); start_dpc++; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fb2bb35c62cbf..415fdf2bd9462 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -400,10 +400,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; + struct Scsi_Host *shost; LIST_HEAD(starved_list); unsigned long flags; + /* if the device is dead, sdev will be NULL, so no queue to run */ + if (!sdev) + return; + + shost = sdev->host; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 490ce213204e9..360b7cbadb446 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -322,14 +322,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) kfree(evt); } - if (sdev->request_queue) { - sdev->request_queue->queuedata = NULL; - /* user context needed to free queue */ - scsi_free_queue(sdev->request_queue); - /* temporary expedient, try to catch use of queue lock - * after free of sdev */ - sdev->request_queue = NULL; - } + /* NULL queue means the device can't be used */ + sdev->request_queue = NULL; scsi_target_reap(scsi_target(sdev)); @@ -937,6 +931,12 @@ void __scsi_remove_device(struct scsi_device *sdev) if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); + + /* cause the request function to reject all I/O requests */ + sdev->request_queue->queuedata = NULL; + + /* Freeing the queue signals to block that we're done */ + scsi_free_queue(sdev->request_queue); put_device(dev); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f905ecb5704d7..01543d297b53a 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1847,7 +1847,7 @@ store_priv_session_##field(struct device *dev, \ #define iscsi_priv_session_rw_attr(field, format) \ iscsi_priv_session_attr_show(field, format) \ iscsi_priv_session_attr_store(field) \ -static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUGO, \ +static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ show_priv_session_##field, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e56730214c05e..a63b94c0fba62 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1910,14 +1910,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; - if (sdp->skip_ms_page_8) { - if (sdp->type == TYPE_RBC) - goto defaults; - else { - modepage = 0x3F; - dbd = 0; - } - } else if (sdp->type == TYPE_RBC) { + if (sdp->skip_ms_page_8) + goto defaults; + + if (sdp->type == TYPE_RBC) { modepage = 6; dbd = 8; } else { @@ -1945,11 +1941,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) */ if (len < 3) goto bad_sense; - else if (len > SD_BUF_SIZE) { - sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " - "data from %d to %d bytes\n", len, SD_BUF_SIZE); - len = SD_BUF_SIZE; - } + if (len > 20) + len = 20; + + /* Take headers and block descriptors into account */ + len += data.header_length + data.block_descriptor_length; + if (len > SD_BUF_SIZE) + goto bad_sense; /* Get the data */ res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); @@ -1957,45 +1955,16 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (scsi_status_is_good(res)) { int offset = data.header_length + data.block_descriptor_length; - while (offset < len) { - u8 page_code = buffer[offset] & 0x3F; - u8 spf = buffer[offset] & 0x40; - - if (page_code == 8 || page_code == 6) { - /* We're interested only in the first 3 bytes. - */ - if (len - offset <= 2) { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); - goto defaults; - } else { - modepage = page_code; - goto Page_found; - } - } else { - /* Go to the next page */ - if (spf && len - offset > 3) - offset += 4 + (buffer[offset+2] << 8) + - buffer[offset+3]; - else if (!spf && len - offset > 1) - offset += 2 + buffer[offset+1]; - else { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); - goto defaults; - } - } + if (offset >= SD_BUF_SIZE - 2) { + sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); + goto defaults; } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { + if ((buffer[offset] & 0x3f) != modepage) { sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); goto defaults; } - Page_found: + if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a86f820f..3b00e907b9174 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -390,9 +390,9 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, len = (desc_ptr[2] << 8) + desc_ptr[3]; /* skip past overall descriptor */ desc_ptr += len + 4; - if (ses_dev->page10) - addl_desc_ptr = ses_dev->page10 + 8; } + if (ses_dev->page10) + addl_desc_ptr = ses_dev->page10 + 8; type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; components = 0; for (i = 0; i < types; i++, type_ptr += 4) { diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index aefadc6a16072..464ee7ba95b7e 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -636,7 +636,7 @@ static int sr_probe(struct device *dev) disk->first_minor = minor; sprintf(disk->disk_name, "sr%d", minor); disk->fops = &sr_bdops; - disk->flags = GENHD_FL_CD; + disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index 0571ef9639cbc..dc076e028a3ca 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c @@ -306,7 +306,7 @@ static inline int find_and_clear_bit_16(unsigned long *field) "0: bsfw %1,%w0\n\t" "btr %0,%1\n\t" "jnc 0b" - : "=&r" (rv), "=m" (*field) :); + : "=&r" (rv), "+m" (*field) :); return rv; } diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 6172335ae3234..82dd6fb178386 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -105,7 +105,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) /* Rebuild the frequency table */ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, - table, &clk->arch_flags); + table, NULL); return 0; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index bb233a9cbad2f..5451dcd53066a 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -288,6 +288,11 @@ config SPI_PXA2XX config SPI_PXA2XX_PCI def_bool SPI_PXA2XX && X86_32 && PCI +config SPI_QSD + tristate "Qualcomm MSM SPI support" + default n + depends on ARCH_MSM_SCORPION + config SPI_S3C24XX tristate "Samsung S3C24XX series SPI" depends on ARCH_S3C2410 && EXPERIMENTAL diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 86d1b5f9bbd9a..881d84be4267e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o +obj-$(CONFIG_SPI_QSD) += spi_qsd.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 34bb17f030197..cfc1e33be29b4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -25,6 +25,7 @@ #include #include #include + #include #include #include @@ -39,7 +40,7 @@ static void spidev_release(struct device *dev) spi->master->cleanup(spi); spi_master_put(spi->master); - kfree(spi); + kfree(dev); } static ssize_t @@ -339,7 +340,6 @@ int spi_add_device(struct spi_device *spi) { static DEFINE_MUTEX(spi_add_lock); struct device *dev = spi->master->dev.parent; - struct device *d; int status; /* Chipselects are numbered 0..max; validate. */ @@ -361,11 +361,10 @@ int spi_add_device(struct spi_device *spi) */ mutex_lock(&spi_add_lock); - d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); - if (d != NULL) { + if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)) + != NULL) { dev_err(dev, "chipselect %d already in use\n", spi->chip_select); - put_device(d); status = -EBUSY; goto done; } @@ -1101,6 +1100,27 @@ int spi_write_then_read(struct spi_device *spi, } EXPORT_SYMBOL_GPL(spi_write_then_read); +static DEFINE_MUTEX(spi_lock); +int +spi_read_write_lock(struct spi_device *spidev, struct spi_msg *msg, char *buf, int size, int func) +{ + int i = 0, err = 0; + mutex_lock(&spi_lock); + if(func) { + if(!msg) return -EINVAL; + + for(i = 0; i < msg->len + 1; i++) { + err = spi_write(spidev, &msg->buffer[size * i], size); + } + } else { + if(!buf) return -EINVAL; + + err = spi_read(spidev, buf, size); + } + mutex_unlock(&spi_lock); + return err; +} + /*-------------------------------------------------------------------------*/ static int __init spi_init(void) diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c new file mode 100644 index 0000000000000..adce009b8ae78 --- /dev/null +++ b/drivers/spi/spi_qsd.c @@ -0,0 +1,281 @@ +/* linux/driver/spi/spi_qsd.c + * + * Copyright (C) 2009 Solomon Chiu + * + * This is a temporary solution to substitute Qualcomm's SPI. + * Should be replaced by formal SPI driver in the future. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SPI_CONFIG (0x00000000) +#define SPI_IO_CONTROL (0x00000004) +#define SPI_OPERATIONAL (0x00000030) +#define SPI_ERROR_FLAGS_EN (0x00000038) +#define SPI_ERROR_FLAGS (0x00000034) +#define SPI_OUTPUT_FIFO (0x00000100) + +#ifdef CONFIG_ARCH_MSM7X30 + +struct spi_device *spidev; + +extern int samsung_oled_panel_init(struct msm_lcdc_panel_ops *ops); +int qspi_send_16bit(unsigned char id, unsigned data) +{ + unsigned char buffer[4]; + int tmp; + tmp = (id<<13 | data)<<16; + + buffer[0] = tmp >> 24; + buffer[1] = (tmp & 0x00FF0000) >> 16; + buffer[2] = (tmp & 0x0000FF00) >> 8; + buffer[3] = tmp & 0x000000FF; + spi_write(spidev,buffer,4); + return 0; +} +int qspi_send_9bit(struct spi_msg *msg) +{ + int tmp = 0; + spidev->bits_per_word = 9; + tmp = (0x0 <<8 | msg->cmd)<<23; + msg->buffer[0] = tmp >> 24; + msg->buffer[1] = (tmp & 0x00FF0000) >> 16; + + if(msg->len != 0) { + int i = 0, j; + for(j = 2; i < msg->len; i++, j+=2){ + tmp &= 0x00000000; + tmp = (0x1<<8 | *(msg->data+i))<<23; + msg->buffer[j] = tmp >> 24; + msg->buffer[j+1] = (tmp & 0x00FF0000) >> 16; + } + } + + spi_read_write_lock(spidev, msg, NULL, 2, 1); + + return 0; +} + + +int qspi_send(unsigned char id, unsigned data) +{ + unsigned char buffer[2]; + int tmp; + tmp = (0x7000 | id<<9 | data)<<16; + + spidev->bits_per_word = 16; + + buffer[0] = tmp >> 24; + buffer[1] = (tmp & 0x00FF0000) >> 16; + + spi_write(spidev,buffer,2); + return 0; +} + + +static int msm_spi_probe(struct spi_device *spi) +{ + printk(" %s \n", __func__); + spidev = spi; + return 0 ; +} + +static int msm_spi_remove(struct platform_device *pdev) +{ + spidev = NULL; + return 0; +} + + +static struct spi_driver spi_qsd = { + .driver = { + .name = "spi_qsd", + .owner = THIS_MODULE, + }, + .probe = msm_spi_probe, + .remove = __devexit_p(msm_spi_remove), +}; + + +static int __init spi_qsd_init(void) +{ + int rc; + rc = spi_register_driver(&spi_qsd); + return rc; +} +module_init(spi_qsd_init); + +static void __exit spi_qsd_exit(void) +{ + spi_unregister_driver(&spi_qsd); +} +module_exit(spi_qsd_exit); + +#else + +void __iomem *spi_base; +struct clk *spi_clk ; + +int qspi_send_16bit(unsigned char id, unsigned data) +{ + unsigned err ; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + clk_enable(spi_clk); + while( readl(spi_base+SPI_OPERATIONAL) & (1<<5) ) + { + if( (err=readl(spi_base+SPI_ERROR_FLAGS)) ) + { + printk("\rERROR: SPI_ERROR_FLAGS=%d\r", err); + return -1; + } + } + + writel( (id<<13 | data)<<16, spi_base+SPI_OUTPUT_FIFO );/*AUO*/ + udelay(1000); + clk_disable(spi_clk); + + return 0; +} + +int qspi_send_9bit(unsigned char id, unsigned data) +{ + unsigned err ; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + clk_enable(spi_clk); + while( readl(spi_base+SPI_OPERATIONAL) & (1<<5) ) + { + if( (err=readl(spi_base+SPI_ERROR_FLAGS)) ) + { + printk("\rERROR: SPI_ERROR_FLAGS=%d\r", err); + return -1; + } + } + + writel( ((id<<8) | data)<<23, spi_base+SPI_OUTPUT_FIFO);/*sharp*/ + + udelay(1000); + clk_disable(spi_clk); + return 0; +} + + +int qspi_send(unsigned char id, unsigned data) +{ + unsigned err ; + + /* bit-5: OUTPUT_FIFO_NOT_EMPTY */ + clk_enable(spi_clk); + while( readl(spi_base+SPI_OPERATIONAL) & (1<<5) ) + { + if( (err=readl(spi_base+SPI_ERROR_FLAGS)) ) + { + printk("\rERROR: SPI_ERROR_FLAGS=%d\r", err); + return -1; + } + } + + writel( (0x7000 | id<<9 | data)<<16, spi_base+SPI_OUTPUT_FIFO ); + udelay(100); + clk_disable(spi_clk); + return 0; +} + +static int __init msm_spi_probe(struct platform_device *pdev) +{ + int rc ; + struct spi_platform_data *pdata = pdev->dev.platform_data; + + spi_base=ioremap(0xA1200000, 4096); + if(!spi_base) + return -1; + + spi_clk = clk_get(&pdev->dev, "spi_clk"); + if (IS_ERR(spi_clk)) { + dev_err(&pdev->dev, "%s: unable to get spi_clk\n", __func__); + rc = PTR_ERR(spi_clk); + goto err_probe_clk_get; + } + rc = clk_enable(spi_clk); + if (rc) { + dev_err(&pdev->dev, "%s: unable to enable spi_clk\n", + __func__); + goto err_probe_clk_enable; + } + + if(pdata == NULL) + clk_set_rate(spi_clk, 4800000); + else + clk_set_rate(spi_clk, pdata->clk_rate); + + printk(KERN_DEBUG "spi clk = 0x%ld\n", clk_get_rate(spi_clk)); + printk("spi: SPI_CONFIG=%x\n", readl(spi_base+SPI_CONFIG)); + printk("spi: SPI_IO_CONTROL=%x\n", readl(spi_base+SPI_IO_CONTROL)); + printk("spi: SPI_OPERATIONAL=%x\n", readl(spi_base+SPI_OPERATIONAL)); + printk("spi: SPI_ERROR_FLAGS_EN=%x\n", readl(spi_base+SPI_ERROR_FLAGS_EN)); + printk("spi: SPI_ERROR_FLAGS=%x\n", readl(spi_base+SPI_ERROR_FLAGS)); + printk("-%s()\n", __FUNCTION__); + clk_disable(spi_clk); + + return 0 ; + +err_probe_clk_get: +err_probe_clk_enable: + return -1 ; +} + +static int __devexit msm_spi_remove(struct platform_device *pdev) +{ + return 0; +} + +static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state) +{ + printk("+%s()\n", __FUNCTION__); + clk_disable(spi_clk); + return 0 ; +} + +static int msm_spi_resume(struct platform_device *pdev) +{ + printk("+%s()\n", __FUNCTION__); + clk_enable(spi_clk); + return 0 ; +} + +static struct platform_driver msm_spi_driver = { + .probe = msm_spi_probe, + .driver = { + .name = "spi_qsd", + .owner = THIS_MODULE, + }, +#if 0 + .suspend = msm_spi_suspend, + .resume = msm_spi_resume, +#endif + .remove = __exit_p(msm_spi_remove), +}; + +static int __init msm_spi_init(void) +{ + return platform_driver_register(&msm_spi_driver); +} + +fs_initcall(msm_spi_init); +#endif diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 0208ba2fc650c..e98af574e8e11 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_DX_SEP) += sep/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/ obj-$(CONFIG_ZRAM) += zram/ +obj-$(CONFIG_XVMALLOC) += zram/ obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/ diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index e13b4c4834076..39e7aa5bb9756 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -3,6 +3,7 @@ * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. + * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -38,6 +39,7 @@ static DEFINE_MUTEX(binder_lock); static DEFINE_MUTEX(binder_deferred_lock); +static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); @@ -97,9 +99,9 @@ enum { BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, + BINDER_DEBUG_TOP_ERRORS = 1U << 16, }; -static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | - BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +static uint32_t binder_debug_mask; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); static int binder_debug_no_lock; @@ -287,6 +289,7 @@ struct binder_proc { struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; + struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; @@ -632,13 +635,19 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; + if (vma && mm != proc->vma_vm_mm) { + pr_err("binder: %d: vma mm and task mm mismatch\n", + proc->pid); + vma = NULL; + } } if (allocate == 0) goto free_range; if (vma == NULL) { - printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf failed to " "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } @@ -651,7 +660,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { - printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } @@ -660,7 +670,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { - printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf failed " "to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; @@ -669,7 +680,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, (uintptr_t)page_addr + proc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { - printk(KERN_ERR "binder: %d: binder_alloc_buf failed " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf failed " "to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; @@ -718,7 +730,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t size; if (proc->vma == NULL) { - printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf, no vma\n", proc->pid); return NULL; } @@ -756,7 +769,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, } } if (best_fit == NULL) { - printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d: binder_alloc_buf size %zd failed, " "no address space\n", proc->pid, size); return NULL; } @@ -991,7 +1005,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, node->internal_strong_refs == 0 && !(node == binder_context_mgr_node && node->has_strong_ref)) { - printk(KERN_ERR "binder: invalid inc strong " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: invalid inc strong " "node for %d\n", node->debug_id); return -EINVAL; } @@ -1007,7 +1022,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { - printk(KERN_ERR "binder: invalid inc weak node " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: invalid inc weak node " "for %d\n", node->debug_id); return -EINVAL; } @@ -1044,7 +1060,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal) if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "binder: refless node %d deleted\n", + "binder: refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); @@ -1263,14 +1279,16 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply for " "transaction %d to %d:%d\n", - t->debug_id, target_thread->proc->pid, + t->debug_id, + target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); target_thread->return_error = error_code; wake_up_interruptible(&target_thread->wait); } else { - printk(KERN_ERR "binder: reply failed, target " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: reply failed, target " "thread, %d:%d, has error code %d " "already\n", target_thread->proc->pid, target_thread->pid, @@ -1308,14 +1326,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", - proc->pid, buffer->debug_id, + "binder: %d buffer release %d, size %zd-%zd, failed at" + " %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); - offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); + offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, + sizeof(void *))); if (failed_at) off_end = failed_at; else @@ -1325,7 +1344,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, if (*offp > buffer->data_size - sizeof(*fp) || buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { - printk(KERN_ERR "binder: transaction release %d bad" + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: transaction release %d bad" "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); continue; @@ -1334,29 +1354,35 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { - struct binder_node *node = binder_get_node(proc, fp->binder); + struct binder_node *node = binder_get_node(proc, + fp->binder); if (node == NULL) { - printk(KERN_ERR "binder: transaction release %d" + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: transaction release %d" " bad node %p\n", debug_id, fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n", node->debug_id, node->ptr); - binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); + binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, + 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); + struct binder_ref *ref = binder_get_ref(proc, + fp->handle); if (ref == NULL) { - printk(KERN_ERR "binder: transaction release %d" + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: transaction release %d" " bad handle %ld\n", debug_id, fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, ref->node->debug_id); + ref->debug_id, ref->desc, + ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; @@ -1368,7 +1394,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, break; default: - printk(KERN_ERR "binder: transaction release %d bad " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: transaction release %d bad " "object type %lx\n", debug_id, fp->type); break; } @@ -1594,15 +1621,19 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; - struct binder_node *node = binder_get_node(proc, fp->binder); + struct binder_node *node = binder_get_node(proc, + fp->binder); if (node == NULL) { - node = binder_new_node(proc, fp->binder, fp->cookie); + node = binder_new_node(proc, fp->binder, + fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } - node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; - node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->min_priority = fp->flags & + FLAT_BINDER_FLAG_PRIORITY_MASK; + node->accept_fds = !!(fp->flags & + FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("binder: %d:%d sending u%p " @@ -1632,7 +1663,8 @@ static void binder_transaction(struct binder_proc *proc, } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, fp->handle); + struct binder_ref *ref = binder_get_ref(proc, + fp->handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction with invalid " @@ -1648,24 +1680,31 @@ static void binder_transaction(struct binder_proc *proc, fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; - binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); + binder_inc_node(ref->node, fp->type == + BINDER_TYPE_BINDER, 0, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> node %d u%p\n", - ref->debug_id, ref->desc, ref->node->debug_id, - ref->node->ptr); + " ref %d desc %d -> node %d u%p\n", + ref->debug_id, ref->desc, + ref->node->debug_id, + ref->node->ptr); } else { struct binder_ref *new_ref; - new_ref = binder_get_ref_for_node(target_proc, ref->node); + new_ref = binder_get_ref_for_node(target_proc, + ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; - binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); + binder_inc_ref(new_ref, fp->type == + BINDER_TYPE_HANDLE, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, new_ref->debug_id, - new_ref->desc, ref->node->debug_id); + " ref %d desc %d -> ref %d" + " desc %d (node %d)\n", + ref->debug_id, ref->desc, + new_ref->debug_id, + new_ref->desc, + ref->node->debug_id); } } break; @@ -1675,13 +1714,19 @@ static void binder_transaction(struct binder_proc *proc, if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { - binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", - proc->pid, thread->pid, fp->handle); + binder_user_error("binder: %d:%d got" + " reply with fd, %ld, but" + " target does not allow fds\n", + proc->pid, thread->pid, + fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { - binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", + binder_user_error( + "binder: %d:%d got transaction" + " with fd, %ld, but target does" + " not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; @@ -1689,12 +1734,15 @@ static void binder_transaction(struct binder_proc *proc, file = fget(fp->handle); if (file == NULL) { - binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", + binder_user_error( + "binder: %d:%d got transaction" + " with invalid fd, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } - target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); + target_fd = task_get_unused_fd_flags(target_proc, + O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; @@ -1702,7 +1750,8 @@ static void binder_transaction(struct binder_proc *proc, } task_fd_install(target_proc, target_fd, file); binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %ld -> %d\n", fp->handle, target_fd); + " fd %ld -> %d\n", fp->handle, + target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; @@ -1851,9 +1900,11 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, break; } binder_debug(BINDER_DEBUG_USER_REFS, - "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", - proc->pid, thread->pid, debug_string, ref->debug_id, - ref->desc, ref->strong, ref->weak, ref->node->debug_id); + "binder: %d:%d %s ref %d desc %d s %d w %d" + " for node %d\n", proc->pid, thread->pid, + debug_string, ref->debug_id, ref->desc, + ref->strong, ref->weak, + ref->node->debug_id); break; } case BC_INCREFS_DONE: @@ -1914,15 +1965,19 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, - cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", - node->debug_id, node->local_strong_refs, node->local_weak_refs); + cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" + : "BC_ACQUIRE_DONE", + node->debug_id, node->local_strong_refs, + node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: - printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: - printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { @@ -1948,9 +2003,11 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, - "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", - proc->pid, thread->pid, data_ptr, buffer->debug_id, - buffer->transaction ? "active" : "finished"); + "binder: %d:%d BC_FREE_BUFFER u%p found" + " buffer %d for %s transaction\n", + proc->pid, thread->pid, data_ptr, + buffer->debug_id, buffer->transaction ? + "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; @@ -2047,13 +2104,15 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, - "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", + "binder: %d:%d %s %p ref %d desc %d s %d" + " w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", cookie, ref->debug_id, ref->desc, - ref->strong, ref->weak, ref->node->debug_id); + ref->strong, ref->weak, + ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { @@ -2067,10 +2126,12 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "binder: %d:%d " - "BC_REQUEST_DEATH_NOTIFICATION failed\n", - proc->pid, thread->pid); + binder_debug( + BINDER_DEBUG_FAILED_TRANSACTION, + "binder: %d:%d " + "BC_REQUEST_DEATH_NOTIFICATION" + " failed\n", + proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -2159,7 +2220,8 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, } break; default: - printk(KERN_ERR "binder: %d:%d unknown command %d\n", + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd); return -EINVAL; } @@ -2629,9 +2691,11 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; - /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ + /*binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder_ioctl: %d:%d %x %lx\n", + proc->pid, current->pid, cmd, arg);*/ - ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); + ret = wait_event_interruptible(binder_user_error_wait, + binder_stop_on_user_error < 2); if (ret) return ret; @@ -2688,20 +2752,23 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } case BINDER_SET_MAX_THREADS: - if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { + if (copy_from_user(&proc->max_threads, ubuf, + sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { - printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { - printk(KERN_ERR "binder: BINDER_SET_" + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: BINDER_SET_" "CONTEXT_MGR bad uid %d != %d\n", current->cred->euid, binder_context_mgr_uid); @@ -2747,7 +2814,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) mutex_unlock(&binder_lock); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) - printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: %d:%d ioctl %x %lx returned %d\n", + proc->pid, current->pid, cmd, arg, ret); return ret; } @@ -2759,7 +2828,6 @@ static void binder_vma_open(struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); - dump_stack(); } static void binder_vma_close(struct vm_area_struct *vma) @@ -2771,6 +2839,7 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; + proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } @@ -2803,6 +2872,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; @@ -2817,11 +2887,14 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; + mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { - printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder_mmap: %d %lx-%lx maps %p bad alignment\n", + proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } @@ -2849,10 +2922,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); - proc->files = get_files_struct(current); + proc->files = get_files_struct(proc->tsk); proc->vma = vma; + proc->vma_vm_mm = vma->vm_mm; - /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", + /*binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; @@ -2860,12 +2935,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: + mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: + mutex_unlock(&binder_mmap_lock); err_bad_arg: - printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -3020,7 +3098,8 @@ static void binder_deferred_release(struct binder_proc *proc) if (t) { t->buffer = NULL; buffer->transaction = NULL; - printk(KERN_ERR "binder: release proc %d, " + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: release proc %d, " "transaction %d, not freed\n", proc->pid, t->debug_id); /*BUG();*/ diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h index 863ae1ad5d558..25ab6f2759e90 100644 --- a/drivers/staging/android/binder.h +++ b/drivers/staging/android/binder.h @@ -84,7 +84,7 @@ struct binder_version { /* This is the current protocol version. */ #define BINDER_CURRENT_PROTOCOL_VERSION 7 -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) +#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 531bdbeede89f..ffc2d043dd8e4 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -37,7 +37,7 @@ * mutex 'mutex'. */ struct logger_log { - unsigned char *buffer;/* the ring buffer itself */ + unsigned char *buffer;/* the ring buffer itself */ struct miscdevice misc; /* misc device representing the log */ wait_queue_head_t wq; /* wait queue for readers */ struct list_head readers; /* this log's readers */ @@ -67,9 +67,9 @@ struct logger_reader { * * This isn't aesthetic. We have several goals: * - * 1) Need to quickly obtain the associated log during an I/O operation - * 2) Readers need to maintain state (logger_reader) - * 3) Writers need to be very fast (open() should be a near no-op) + * 1) Need to quickly obtain the associated log during an I/O operation + * 2) Readers need to maintain state (logger_reader) + * 3) Writers need to be very fast (open() should be a near no-op) * * In the reader case, we can trivially go file->logger_reader->logger_log. * For a writer, we don't want to maintain a logger_reader, so we just go @@ -147,9 +147,9 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * * Behavior: * - * - O_NONBLOCK works - * - If there are no log entries to read, blocks until log is written to - * - Atomically reads exactly one log entry + * - O_NONBLOCK works + * - If there are no log entries to read, blocks until log is written to + * - Atomically reads exactly one log entry * * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read * buffer is insufficient to hold next entry. @@ -555,10 +555,10 @@ static struct logger_log VAR = { \ .size = SIZE, \ }; -DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) +DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024) DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) -DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) -DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 64*1024) +DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024) +DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024) static struct logger_log *get_log_from_minor(int minor) { diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 897bc3ab96672..5a3b09d7f16ea 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -34,7 +34,10 @@ #include #include #include +#include #include +#include +#include static uint32_t lowmem_debug_level = 2; static int lowmem_adj[6] = { @@ -44,7 +47,7 @@ static int lowmem_adj[6] = { 12, }; static int lowmem_adj_size = 4; -static size_t lowmem_minfree[6] = { +static int lowmem_minfree[6] = { 3 * 512, /* 6MB */ 2 * 1024, /* 8MB */ 4 * 1024, /* 16MB */ @@ -55,6 +58,8 @@ static int lowmem_minfree_size = 4; static struct task_struct *lowmem_deathpending; static unsigned long lowmem_deathpending_timeout; +extern int compact_nodes(bool sync); + #define lowmem_print(level, x...) \ do { \ if (lowmem_debug_level >= (level)) \ @@ -79,9 +84,9 @@ task_notify_func(struct notifier_block *self, unsigned long val, void *data) return NOTIFY_OK; } -static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) +static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) { - struct task_struct *p; + struct task_struct *tsk; struct task_struct *selected = NULL; int rem = 0; int tasksize; @@ -116,40 +121,39 @@ static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) break; } } - if (nr_to_scan > 0) - lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", - nr_to_scan, gfp_mask, other_free, other_file, + if (sc->nr_to_scan > 0) + lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", + sc->nr_to_scan, sc->gfp_mask, other_free, other_file, min_adj); rem = global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE) + global_page_state(NR_INACTIVE_ANON) + global_page_state(NR_INACTIVE_FILE); - if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { - lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", - nr_to_scan, gfp_mask, rem); + if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { + lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", + sc->nr_to_scan, sc->gfp_mask, rem); return rem; } selected_oom_adj = min_adj; - read_lock(&tasklist_lock); - for_each_process(p) { - struct mm_struct *mm; - struct signal_struct *sig; + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; int oom_adj; - task_lock(p); - mm = p->mm; - sig = p->signal; - if (!mm || !sig) { - task_unlock(p); + if (tsk->flags & PF_KTHREAD) continue; - } - oom_adj = sig->oom_adj; + + p = find_lock_task_mm(tsk); + if (!p) + continue; + + oom_adj = p->signal->oom_adj; if (oom_adj < min_adj) { task_unlock(p); continue; } - tasksize = get_mm_rss(mm); + tasksize = get_mm_rss(p->mm); task_unlock(p); if (tasksize <= 0) continue; @@ -167,17 +171,25 @@ static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) p->pid, p->comm, oom_adj, tasksize); } if (selected) { + if (fatal_signal_pending(selected)) { + pr_warning("process %d is suffering a slow death\n", + selected->pid); + read_unlock(&tasklist_lock); + return rem; + } lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", selected->pid, selected->comm, selected_oom_adj, selected_tasksize); lowmem_deathpending = selected; lowmem_deathpending_timeout = jiffies + HZ; - force_sig(SIGKILL, selected); + send_sig(SIGKILL, selected, 0); rem -= selected_tasksize; } - lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", - nr_to_scan, gfp_mask, rem); - read_unlock(&tasklist_lock); + lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", + sc->nr_to_scan, sc->gfp_mask, rem); + rcu_read_unlock(); + if (selected) + compact_nodes(false); return rem; } diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c index 45a627d77b417..09e596a506b0e 100644 --- a/drivers/staging/hv/channel.c +++ b/drivers/staging/hv/channel.c @@ -76,14 +76,14 @@ static void vmbus_setevent(struct vmbus_channel *channel) if (channel->offermsg.monitor_allocated) { /* Each u32 represents 32 channels */ - set_bit(channel->offermsg.child_relid & 31, + sync_set_bit(channel->offermsg.child_relid & 31, (unsigned long *) gVmbusConnection.SendInterruptPage + (channel->offermsg.child_relid >> 5)); monitorpage = gVmbusConnection.MonitorPages; monitorpage++; /* Get the child to parent monitor page */ - set_bit(channel->monitor_bit, + sync_set_bit(channel->monitor_bit, (unsigned long *)&monitorpage->trigger_group [channel->monitor_grp].pending); @@ -99,7 +99,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) if (Channel->offermsg.monitor_allocated) { /* Each u32 represents 32 channels */ - clear_bit(Channel->offermsg.child_relid & 31, + sync_clear_bit(Channel->offermsg.child_relid & 31, (unsigned long *)gVmbusConnection.SendInterruptPage + (Channel->offermsg.child_relid >> 5)); @@ -107,7 +107,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) (struct hv_monitor_page *)gVmbusConnection.MonitorPages; monitorPage++; /* Get the child to parent monitor page */ - clear_bit(Channel->monitor_bit, + sync_clear_bit(Channel->monitor_bit, (unsigned long *)&monitorPage->trigger_group [Channel->monitor_grp].Pending); } diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c index c2e298ff48344..0739eb7b6ee11 100644 --- a/drivers/staging/hv/connection.c +++ b/drivers/staging/hv/connection.c @@ -281,7 +281,7 @@ void VmbusOnEvents(void) for (dword = 0; dword < maxdword; dword++) { if (recvInterruptPage[dword]) { for (bit = 0; bit < 32; bit++) { - if (test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { + if (sync_test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { relid = (dword << 5) + bit; DPRINT_DBG(VMBUS, "event detected for relid - %d", relid); @@ -320,7 +320,7 @@ int VmbusPostMessage(void *buffer, size_t bufferLen) int VmbusSetEvent(u32 childRelId) { /* Each u32 represents 32 channels */ - set_bit(childRelId & 31, + sync_set_bit(childRelId & 31, (unsigned long *)gVmbusConnection.SendInterruptPage + (childRelId >> 5)); diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index b41c9640b72dc..f433addaae569 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -46,6 +46,7 @@ struct net_device_context { /* point back to our device context */ struct vm_device *device_ctx; unsigned long avail; + struct work_struct work; }; struct netvsc_driver_context { @@ -225,6 +226,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, unsigned int status) { struct vm_device *device_ctx = to_vm_device(device_obj); + struct net_device_context *ndev_ctx; struct net_device *net = dev_get_drvdata(&device_ctx->device); if (!net) { @@ -237,6 +239,8 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, netif_carrier_on(net); netif_wake_queue(net); netif_notify_peers(net); + ndev_ctx = netdev_priv(net); + schedule_work(&ndev_ctx->work); } else { netif_carrier_off(net); netif_stop_queue(net); @@ -336,6 +340,25 @@ static const struct net_device_ops device_ops = { .ndo_set_mac_address = eth_mac_addr, }; +/* + * Send GARP packet to network peers after migrations. + * After Quick Migration, the network is not immediately operational in the + * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add + * another netif_notify_peers() into a scheduled work, otherwise GARP packet + * will not be sent after quick migration, and cause network disconnection. + */ +static void netvsc_send_garp(struct work_struct *w) +{ + struct net_device_context *ndev_ctx; + struct net_device *net; + + msleep(20); + ndev_ctx = container_of(w, struct net_device_context, work); + net = dev_get_drvdata(&ndev_ctx->device_ctx->device); + netif_notify_peers(net); +} + + static int netvsc_probe(struct device *device) { struct driver_context *driver_ctx = @@ -364,6 +387,7 @@ static int netvsc_probe(struct device *device) net_device_ctx->device_ctx = device_ctx; net_device_ctx->avail = ring_size; dev_set_drvdata(device, net); + INIT_WORK(&net_device_ctx->work, netvsc_send_garp); /* Notify the netvsc driver of the new device */ ret = net_drv_obj->base.OnDeviceAdd(device_obj, &device_info); diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c index 84fdb64d3ceb2..87e6cf2086f08 100644 --- a/drivers/staging/hv/vmbus_drv.c +++ b/drivers/staging/hv/vmbus_drv.c @@ -291,7 +291,7 @@ static int vmbus_on_isr(struct hv_driver *drv) event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; /* Since we are a child, we only need to check bit 0 */ - if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { + if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]); ret |= 0x2; } diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h index 07f6d22eeabb5..c75b2d7fb2f06 100644 --- a/drivers/staging/hv/vmbus_private.h +++ b/drivers/staging/hv/vmbus_private.h @@ -31,6 +31,7 @@ #include "channel_mgmt.h" #include "ring_buffer.h" #include +#include /* diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h index 6ff33e1ad8c19..90e90f0e65e88 100644 --- a/drivers/staging/iio/imu/adis16400.h +++ b/drivers/staging/iio/imu/adis16400.h @@ -17,7 +17,8 @@ #ifndef SPI_ADIS16400_H_ #define SPI_ADIS16400_H_ -#define ADIS16400_STARTUP_DELAY 220 /* ms */ +#define ADIS16400_STARTUP_DELAY 290 /* ms */ +#define ADIS16400_MTEST_DELAY 90 /* ms */ #define ADIS16400_READ_REG(a) a #define ADIS16400_WRITE_REG(a) ((a) | 0x80) diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c index cfb108a1545b3..2107edb3ebce1 100644 --- a/drivers/staging/iio/imu/adis16400_core.c +++ b/drivers/staging/iio/imu/adis16400_core.c @@ -93,7 +93,6 @@ static int adis16400_spi_write_reg_16(struct device *dev, .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, - .cs_change = 1, }, }; @@ -137,7 +136,6 @@ static int adis16400_spi_read_reg_16(struct device *dev, .rx_buf = st->rx, .bits_per_word = 8, .len = 2, - .cs_change = 1, }, }; @@ -375,7 +373,7 @@ static int adis16400_self_test(struct device *dev) dev_err(dev, "problem starting self test"); goto err_ret; } - + msleep(ADIS16400_MTEST_DELAY); adis16400_check_status(dev); err_ret: @@ -497,12 +495,12 @@ static int adis16400_initial_setup(struct adis16400_state *st) _reg) static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_X, ADIS16400_XGYRO_OFF); -static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_XGYRO_OFF); -static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_XGYRO_OFF); +static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Y, ADIS16400_YGYRO_OFF); +static ADIS16400_DEV_ATTR_CALIBBIAS(GYRO_Z, ADIS16400_ZGYRO_OFF); static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_X, ADIS16400_XACCL_OFF); -static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_XACCL_OFF); -static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_XACCL_OFF); +static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Y, ADIS16400_YACCL_OFF); +static ADIS16400_DEV_ATTR_CALIBBIAS(ACCEL_Z, ADIS16400_ZACCL_OFF); static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16400_read_14bit_signed, diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c index 33293fba9bccd..da28cb4288af3 100644 --- a/drivers/staging/iio/imu/adis16400_ring.c +++ b/drivers/staging/iio/imu/adis16400_ring.c @@ -122,12 +122,10 @@ static int adis16400_spi_read_burst(struct device *dev, u8 *rx) .tx_buf = st->tx, .bits_per_word = 8, .len = 2, - .cs_change = 0, }, { .rx_buf = rx, .bits_per_word = 8, .len = 24, - .cs_change = 1, }, }; @@ -162,9 +160,10 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) work_trigger_to_ring); struct iio_ring_buffer *ring = st->indio_dev->ring; - int i = 0; + int i = 0, j; s16 *data; size_t datasize = ring->access.get_bytes_per_datum(ring); + unsigned long mask = ring->scan_mask; data = kmalloc(datasize , GFP_KERNEL); if (data == NULL) { @@ -174,9 +173,12 @@ static void adis16400_trigger_bh_to_ring(struct work_struct *work_s) if (ring->scan_count) if (adis16400_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0) - for (; i < ring->scan_count; i++) + for (; i < ring->scan_count; i++) { + j = __ffs(mask); + mask &= ~(1 << j); data[i] = be16_to_cpup( - (__be16 *)&(st->rx[i*2])); + (__be16 *)&(st->rx[j*2])); + } /* Guaranteed to be aligned with 8 byte boundary */ if (ring->scan_timestamp) diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 56d3a4e5622f2..1edacd0cf4ebd 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -890,7 +890,7 @@ static int pohmelfs_fsync(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; - return sync_inode_metadata(inode, 1); + return sync_inode_metadata(inode, datasync, 1); } ssize_t pohmelfs_write(struct file *file, const char __user *buf, diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c index 8b1451d030698..8486eb1503cc0 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c @@ -68,7 +68,10 @@ static u8 do_join(struct _adapter *padapter) pmlmepriv->fw_state |= _FW_UNDER_LINKING; pmlmepriv->pscanned = plist; pmlmepriv->to_join = true; - if (_queue_empty(queue) == true) { + + /* adhoc mode will start with an empty queue, but skip checking */ + if (!check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) && + _queue_empty(queue)) { if (pmlmepriv->fw_state & _FW_UNDER_LINKING) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; /* when set_ssid/set_bssid for do_join(), but scanning queue diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c index b47d7aa747b1d..e2fe165bd797b 100644 --- a/drivers/staging/tidspbridge/rmgr/proc.c +++ b/drivers/staging/tidspbridge/rmgr/proc.c @@ -781,12 +781,14 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, (u32)pmpu_addr, ul_size, dir); + mutex_lock(&proc_lock); + /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; - goto err_out; + goto no_map; } if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { @@ -795,6 +797,8 @@ int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, status = -EFAULT; } +no_map: + mutex_unlock(&proc_lock); err_out: return status; @@ -819,21 +823,24 @@ int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, (u32)pmpu_addr, ul_size, dir); + mutex_lock(&proc_lock); + /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; - goto err_out; + goto no_map; } if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pmpu_addr, ul_size); status = -EFAULT; - goto err_out; } +no_map: + mutex_unlock(&proc_lock); err_out: return status; } @@ -1726,9 +1733,8 @@ int proc_un_map(void *hprocessor, void *map_addr, (p_proc_object->hbridge_context, va_align, size_align); } - mutex_unlock(&proc_lock); if (status) - goto func_end; + goto unmap_failed; /* * A successful unmap should be followed by removal of map_obj @@ -1737,6 +1743,9 @@ int proc_un_map(void *hprocessor, void *map_addr, */ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); +unmap_failed: + mutex_unlock(&proc_lock); + func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", __func__, hprocessor, map_addr, status); diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c index ae6ac82754a4d..8e60332efaef3 100644 --- a/drivers/staging/usbip/stub_rx.c +++ b/drivers/staging/usbip/stub_rx.c @@ -170,33 +170,23 @@ static int tweak_set_configuration_cmd(struct urb *urb) static int tweak_reset_device_cmd(struct urb *urb) { - struct usb_ctrlrequest *req; - __u16 value; - __u16 index; - int ret; - - req = (struct usb_ctrlrequest *) urb->setup_packet; - value = le16_to_cpu(req->wValue); - index = le16_to_cpu(req->wIndex); - - usbip_uinfo("reset_device (port %d) to %s\n", index, - dev_name(&urb->dev->dev)); + struct stub_priv *priv = (struct stub_priv *) urb->context; + struct stub_device *sdev = priv->sdev; - /* all interfaces should be owned by usbip driver, so just reset it. */ - ret = usb_lock_device_for_reset(urb->dev, NULL); - if (ret < 0) { - dev_err(&urb->dev->dev, "lock for reset\n"); - return ret; - } - - /* try to reset the device */ - ret = usb_reset_device(urb->dev); - if (ret < 0) - dev_err(&urb->dev->dev, "device reset\n"); + usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev)); - usb_unlock_device(urb->dev); - - return ret; + /* + * usb_lock_device_for_reset caused a deadlock: it causes the driver + * to unbind. In the shutdown the rx thread is signalled to shut down + * but this thread is pending in the usb_lock_device_for_reset. + * + * Instead queue the reset. + * + * Unfortunatly an existing usbip connection will be dropped due to + * driver unbinding. + */ + usb_queue_reset_device(sdev->interface); + return 0; } /* diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c index d7136e2c86faa..b7a493c1df461 100644 --- a/drivers/staging/usbip/stub_tx.c +++ b/drivers/staging/usbip/stub_tx.c @@ -169,7 +169,6 @@ static int stub_send_ret_submit(struct stub_device *sdev) struct stub_priv *priv, *tmp; struct msghdr msg; - struct kvec iov[3]; size_t txsize; size_t total_size = 0; @@ -179,28 +178,73 @@ static int stub_send_ret_submit(struct stub_device *sdev) struct urb *urb = priv->urb; struct usbip_header pdu_header; void *iso_buffer = NULL; + struct kvec *iov = NULL; + int iovnum = 0; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); - memset(&iov, 0, sizeof(iov)); - usbip_dbg_stub_tx("setup txdata urb %p\n", urb); + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + iovnum = 2 + urb->number_of_packets; + else + iovnum = 2; + + iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL); + if (!iov) { + usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); + return -1; + } + + iovnum = 0; /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", + pdu_header.base.seqnum, urb); + /*usbip_dump_header(pdu_header);*/ usbip_header_correct_endian(&pdu_header, 1); - iov[0].iov_base = &pdu_header; - iov[0].iov_len = sizeof(pdu_header); + iov[iovnum].iov_base = &pdu_header; + iov[iovnum].iov_len = sizeof(pdu_header); + iovnum++; txsize += sizeof(pdu_header); /* 2. setup transfer buffer */ - if (usb_pipein(urb->pipe) && urb->actual_length > 0) { - iov[1].iov_base = urb->transfer_buffer; - iov[1].iov_len = urb->actual_length; + if (usb_pipein(urb->pipe) && + usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && + urb->actual_length > 0) { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = urb->actual_length; + iovnum++; txsize += urb->actual_length; + } else if (usb_pipein(urb->pipe) && + usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + /* + * For isochronous packets: actual length is the sum of + * the actual length of the individual, packets, but as + * the packet offsets are not changed there will be + * padding between the packets. To optimally use the + * bandwidth the padding is not transmitted. + */ + + int i; + for (i = 0; i < urb->number_of_packets; i++) { + iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset; + iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length; + iovnum++; + txsize += urb->iso_frame_desc[i].actual_length; + } + + if (txsize != sizeof(pdu_header) + urb->actual_length) { + dev_err(&sdev->interface->dev, + "actual length of urb (%d) does not match iso packet sizes (%d)\n", + urb->actual_length, txsize-sizeof(pdu_header)); + kfree(iov); + usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); + return -1; + } } /* 3. setup iso_packet_descriptor */ @@ -211,32 +255,34 @@ static int stub_send_ret_submit(struct stub_device *sdev) if (!iso_buffer) { usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); + kfree(iov); return -1; } - iov[2].iov_base = iso_buffer; - iov[2].iov_len = len; + iov[iovnum].iov_base = iso_buffer; + iov[iovnum].iov_len = len; txsize += len; + iovnum++; } - ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, - 3, txsize); + ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, + iov, iovnum, txsize); if (ret != txsize) { dev_err(&sdev->interface->dev, "sendmsg failed!, retval %d for %zd\n", ret, txsize); + kfree(iov); kfree(iso_buffer); usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); return -1; } + kfree(iov); kfree(iso_buffer); - usbip_dbg_stub_tx("send txdata\n"); total_size += txsize; } - spin_lock_irqsave(&sdev->priv_lock, flags); list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 210ef16bab8d2..69aa496b19317 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -334,10 +334,11 @@ void usbip_dump_header(struct usbip_header *pdu) usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); break; case USBIP_RET_SUBMIT: - usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n", + usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", pdu->u.ret_submit.status, pdu->u.ret_submit.actual_length, pdu->u.ret_submit.start_frame, + pdu->u.ret_submit.number_of_packets, pdu->u.ret_submit.error_count); case USBIP_RET_UNLINK: usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); @@ -625,6 +626,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, rpdu->status = urb->status; rpdu->actual_length = urb->actual_length; rpdu->start_frame = urb->start_frame; + rpdu->number_of_packets = urb->number_of_packets; rpdu->error_count = urb->error_count; } else { /* vhci_rx.c */ @@ -632,6 +634,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, urb->status = rpdu->status; urb->actual_length = rpdu->actual_length; urb->start_frame = rpdu->start_frame; + urb->number_of_packets = rpdu->number_of_packets; urb->error_count = rpdu->error_count; } } @@ -700,11 +703,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); + cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); + be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->error_count); } } @@ -830,6 +835,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) int size = np * sizeof(*iso); int i; int ret; + int total_length = 0; if (!usb_pipeisoc(urb->pipe)) return 0; @@ -859,19 +865,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) return -EPIPE; } + for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_iso_pakcet_correct_endian(iso, 0); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); + total_length += urb->iso_frame_desc[i].actual_length; } kfree(buff); + if (total_length != urb->actual_length) { + dev_err(&urb->dev->dev, + "total length of iso packets (%d) not equal to actual length of buffer (%d)\n", + total_length, urb->actual_length); + + if (ud->side == USBIP_STUB) + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); + else + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + + return -EPIPE; + } + return ret; } EXPORT_SYMBOL_GPL(usbip_recv_iso); +/* + * This functions restores the padding which was removed for optimizing + * the bandwidth during transfer over tcp/ip + * + * buffer and iso packets need to be stored and be in propeper endian in urb + * before calling this function + */ +int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) +{ + int np = urb->number_of_packets; + int i; + int ret; + int actualoffset = urb->actual_length; + + if (!usb_pipeisoc(urb->pipe)) + return 0; + + /* if no packets or length of data is 0, then nothing to unpack */ + if (np == 0 || urb->actual_length == 0) + return 0; + + /* + * if actual_length is transfer_buffer_length then no padding is + * present. + */ + if (urb->actual_length == urb->transfer_buffer_length) + return 0; + + /* + * loop over all packets from last to first (to prevent overwritting + * memory when padding) and move them into the proper place + */ + for (i = np-1; i > 0; i--) { + actualoffset -= urb->iso_frame_desc[i].actual_length; + memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, + urb->transfer_buffer + actualoffset, + urb->iso_frame_desc[i].actual_length); + } + return ret; +} +EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h index d280e234e0670..baa4c09bb98c6 100644 --- a/drivers/staging/usbip/usbip_common.h +++ b/drivers/staging/usbip/usbip_common.h @@ -393,6 +393,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send); int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); +/* some members of urb must be substituted before. */ +int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c index a35fe61268de3..7284d0c18a4f6 100644 --- a/drivers/staging/usbip/vhci_hcd.c +++ b/drivers/staging/usbip/vhci_hcd.c @@ -1135,7 +1135,7 @@ static int vhci_hcd_probe(struct platform_device *pdev) usbip_uerr("create hcd failed\n"); return -ENOMEM; } - + hcd->has_tt = 1; /* this is private data for vhci_hcd */ the_controller = hcd_to_vhci(hcd); diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index bf69914709410..109002a347b9a 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -99,6 +99,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_recv_iso(ud, urb) < 0) return; + /* restore the padding in iso packets */ + if (usbip_pad_iso(ud, urb) < 0) + return; if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h index d7b3aca5ddeba..6160b2fab833f 100644 --- a/drivers/staging/winbond/core.h +++ b/drivers/staging/winbond/core.h @@ -3,6 +3,7 @@ #include #include +#include #include "wbhal.h" #include "mto.h" diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig index da079f8d6e3dd..3bec4dba3fe5d 100644 --- a/drivers/staging/zram/Kconfig +++ b/drivers/staging/zram/Kconfig @@ -1,6 +1,11 @@ +config XVMALLOC + bool + default n + config ZRAM tristate "Compressed RAM block device support" - depends on BLOCK + depends on BLOCK && SYSFS + select XVMALLOC select LZO_COMPRESS select LZO_DECOMPRESS default n @@ -15,3 +20,11 @@ config ZRAM See zram.txt for more information. Project home: http://compcache.googlecode.com/ + +config ZRAM_DEBUG + bool "Compressed RAM block device debug support" + depends on ZRAM + default n + help + This option adds additional debugging code to the compressed + RAM block device driver. diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile index b1709c57f636a..2a6d3213a7564 100644 --- a/drivers/staging/zram/Makefile +++ b/drivers/staging/zram/Makefile @@ -1,3 +1,4 @@ -zram-y := zram_drv.o zram_sysfs.o xvmalloc.o +zram-y := zram_drv.o zram_sysfs.o obj-$(CONFIG_ZRAM) += zram.o +obj-$(CONFIG_XVMALLOC) += xvmalloc.o \ No newline at end of file diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c index b64406739d05e..1f9c5082b6d5d 100644 --- a/drivers/staging/zram/xvmalloc.c +++ b/drivers/staging/zram/xvmalloc.c @@ -10,6 +10,12 @@ * Released under the terms of GNU General Public License Version 2.0 */ +#ifdef CONFIG_ZRAM_DEBUG +#define DEBUG +#endif + +#include +#include #include #include #include @@ -46,7 +52,7 @@ static void clear_flag(struct block_header *block, enum blockflags flag) } /* - * Given pair, provide a derefrencable pointer. + * Given pair, provide a dereferencable pointer. * This is called from xv_malloc/xv_free path, so it * needs to be fast. */ @@ -200,61 +206,23 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset, nextblock->link.prev_page = page; nextblock->link.prev_offset = offset; put_ptr_atomic(nextblock, KM_USER1); + /* If there was a next page then the free bits are set. */ + return; } __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]); __set_bit(flindex, &pool->flbitmap); } -/* - * Remove block from head of freelist. Index 'slindex' identifies the freelist. - */ -static void remove_block_head(struct xv_pool *pool, - struct block_header *block, u32 slindex) -{ - struct block_header *tmpblock; - u32 flindex = slindex / BITS_PER_LONG; - - pool->freelist[slindex].page = block->link.next_page; - pool->freelist[slindex].offset = block->link.next_offset; - block->link.prev_page = NULL; - block->link.prev_offset = 0; - - if (!pool->freelist[slindex].page) { - __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]); - if (!pool->slbitmap[flindex]) - __clear_bit(flindex, &pool->flbitmap); - } else { - /* - * DEBUG ONLY: We need not reinitialize freelist head previous - * pointer to 0 - we never depend on its value. But just for - * sanity, lets do it. - */ - tmpblock = get_ptr_atomic(pool->freelist[slindex].page, - pool->freelist[slindex].offset, KM_USER1); - tmpblock->link.prev_page = NULL; - tmpblock->link.prev_offset = 0; - put_ptr_atomic(tmpblock, KM_USER1); - } -} - /* * Remove block from freelist. Index 'slindex' identifies the freelist. */ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, struct block_header *block, u32 slindex) { - u32 flindex; + u32 flindex = slindex / BITS_PER_LONG; struct block_header *tmpblock; - if (pool->freelist[slindex].page == page - && pool->freelist[slindex].offset == offset) { - remove_block_head(pool, block, slindex); - return; - } - - flindex = slindex / BITS_PER_LONG; - if (block->link.prev_page) { tmpblock = get_ptr_atomic(block->link.prev_page, block->link.prev_offset, KM_USER1); @@ -270,6 +238,35 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, tmpblock->link.prev_offset = block->link.prev_offset; put_ptr_atomic(tmpblock, KM_USER1); } + + /* Is this block is at the head of the freelist? */ + if (pool->freelist[slindex].page == page + && pool->freelist[slindex].offset == offset) { + + pool->freelist[slindex].page = block->link.next_page; + pool->freelist[slindex].offset = block->link.next_offset; + + if (pool->freelist[slindex].page) { + struct block_header *tmpblock; + tmpblock = get_ptr_atomic(pool->freelist[slindex].page, + pool->freelist[slindex].offset, + KM_USER1); + tmpblock->link.prev_page = NULL; + tmpblock->link.prev_offset = 0; + put_ptr_atomic(tmpblock, KM_USER1); + } else { + /* This freelist bucket is empty */ + __clear_bit(slindex % BITS_PER_LONG, + &pool->slbitmap[flindex]); + if (!pool->slbitmap[flindex]) + __clear_bit(flindex, &pool->flbitmap); + } + } + + block->link.prev_page = NULL; + block->link.prev_offset = 0; + block->link.next_page = NULL; + block->link.next_offset = 0; } /* @@ -320,11 +317,13 @@ struct xv_pool *xv_create_pool(void) return pool; } +EXPORT_SYMBOL_GPL(xv_create_pool); void xv_destroy_pool(struct xv_pool *pool) { kfree(pool); } +EXPORT_SYMBOL_GPL(xv_destroy_pool); /** * xv_malloc - Allocate block of given size from pool. @@ -378,7 +377,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, block = get_ptr_atomic(*page, *offset, KM_USER0); - remove_block_head(pool, block, index); + remove_block(pool, *page, *offset, block, index); /* Split the block if required */ tmpoffset = *offset + size + XV_ALIGN; @@ -413,6 +412,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, return 0; } +EXPORT_SYMBOL_GPL(xv_malloc); /* * Free block identified with @@ -489,6 +489,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) put_ptr_atomic(page_start, KM_USER0); spin_unlock(&pool->lock); } +EXPORT_SYMBOL_GPL(xv_free); u32 xv_get_object_size(void *obj) { @@ -497,6 +498,7 @@ u32 xv_get_object_size(void *obj) blk = (struct block_header *)((char *)(obj) - XV_ALIGN); return blk->size; } +EXPORT_SYMBOL_GPL(xv_get_object_size); /* * Returns total memory used by allocator (userdata + metadata) @@ -505,3 +507,4 @@ u64 xv_get_total_size_bytes(struct xv_pool *pool) { return pool->total_pages << PAGE_SHIFT; } +EXPORT_SYMBOL_GPL(xv_get_total_size_bytes); diff --git a/drivers/staging/zram/xvmalloc_int.h b/drivers/staging/zram/xvmalloc_int.h index e23ed5c8b8e4e..b5f1f7febcf63 100644 --- a/drivers/staging/zram/xvmalloc_int.h +++ b/drivers/staging/zram/xvmalloc_int.h @@ -19,7 +19,11 @@ /* User configurable params */ /* Must be power of two */ +#ifdef CONFIG_64BIT +#define XV_ALIGN_SHIFT 3 +#else #define XV_ALIGN_SHIFT 2 +#endif #define XV_ALIGN (1 << XV_ALIGN_SHIFT) #define XV_ALIGN_MASK (XV_ALIGN - 1) @@ -27,8 +31,16 @@ #define XV_MIN_ALLOC_SIZE 32 #define XV_MAX_ALLOC_SIZE (PAGE_SIZE - XV_ALIGN) -/* Free lists are separated by FL_DELTA bytes */ -#define FL_DELTA_SHIFT 3 +/* + * Free lists are separated by FL_DELTA bytes + * This value is 3 for 4k pages and 4 for 64k pages, for any + * other page size, a conservative (PAGE_SHIFT - 9) is used. + */ +#if PAGE_SHIFT == 16 +#define FL_DELTA_SHIFT 4 +#else +#define FL_DELTA_SHIFT (PAGE_SHIFT - 9) +#endif #define FL_DELTA (1 << FL_DELTA_SHIFT) #define FL_DELTA_MASK (FL_DELTA - 1) #define NUM_FREE_LISTS ((XV_MAX_ALLOC_SIZE - XV_MIN_ALLOC_SIZE) \ @@ -75,12 +87,9 @@ struct block_header { struct xv_pool { ulong flbitmap; ulong slbitmap[MAX_FLI]; - spinlock_t lock; - + u64 total_pages; /* stats */ struct freelist_entry freelist[NUM_FREE_LISTS]; - - /* stats */ - u64 total_pages; + spinlock_t lock; }; #endif diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 4bd8cbdaee76f..aab4ec482124e 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -15,6 +15,10 @@ #define KMSG_COMPONENT "zram" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#ifdef CONFIG_ZRAM_DEBUG +#define DEBUG +#endif + #include #include #include @@ -200,19 +204,13 @@ static void handle_uncompressed_page(struct zram *zram, flush_dcache_page(page); } -static int zram_read(struct zram *zram, struct bio *bio) +static void zram_read(struct zram *zram, struct bio *bio) { int i; u32 index; struct bio_vec *bvec; - if (unlikely(!zram->init_done)) { - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio_endio(bio, 0); - return 0; - } - zram_stat64_inc(zram, &zram->stats.num_reads); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; @@ -235,7 +233,7 @@ static int zram_read(struct zram *zram, struct bio *bio) if (unlikely(!zram->table[index].page)) { pr_debug("Read before write: sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); - /* Do nothing */ + handle_zero_page(page); index++; continue; } @@ -275,29 +273,23 @@ static int zram_read(struct zram *zram, struct bio *bio) set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); - return 0; + return; out: bio_io_error(bio); - return 0; } -static int zram_write(struct zram *zram, struct bio *bio) +static void zram_write(struct zram *zram, struct bio *bio) { - int i, ret; + int i; u32 index; struct bio_vec *bvec; - if (unlikely(!zram->init_done)) { - ret = zram_init_device(zram); - if (ret) - goto out; - } - zram_stat64_inc(zram, &zram->stats.num_writes); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; bio_for_each_segment(bvec, bio, i) { + int ret; u32 offset; size_t clen; struct zobj_header *zheader; @@ -407,11 +399,10 @@ static int zram_write(struct zram *zram, struct bio *bio) set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); - return 0; + return; out: bio_io_error(bio); - return 0; } /* @@ -436,7 +427,6 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) */ static int zram_make_request(struct request_queue *queue, struct bio *bio) { - int ret = 0; struct zram *zram = queue->queuedata; if (!valid_io_request(zram, bio)) { @@ -445,17 +435,22 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio) return 0; } + if (unlikely(!zram->init_done) && zram_init_device(zram)) { + bio_io_error(bio); + return 0; + } + switch (bio_data_dir(bio)) { case READ: - ret = zram_read(zram, bio); + zram_read(zram, bio); break; case WRITE: - ret = zram_write(zram, bio); + zram_write(zram, bio); break; } - return ret; + return 0; } void zram_reset_device(struct zram *zram) @@ -624,20 +619,19 @@ static int create_device(struct zram *zram, int device_id) * and n*PAGE_SIZED sized I/O requests. */ blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); - blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE); + blk_queue_logical_block_size(zram->disk->queue, + ZRAM_LOGICAL_BLOCK_SIZE); blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); add_disk(zram->disk); -#ifdef CONFIG_SYSFS ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, &zram_disk_attr_group); if (ret < 0) { pr_warning("Error creating sysfs group"); goto out; } -#endif zram->init_done = 0; @@ -647,10 +641,8 @@ static int create_device(struct zram *zram, int device_id) static void destroy_device(struct zram *zram) { -#ifdef CONFIG_SYSFS sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, &zram_disk_attr_group); -#endif if (zram->disk) { del_gendisk(zram->disk); diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h index a48155112b1e6..408b2c067fc9c 100644 --- a/drivers/staging/zram/zram_drv.h +++ b/drivers/staging/zram/zram_drv.h @@ -61,6 +61,7 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3; #define SECTOR_SIZE (1 << SECTOR_SHIFT) #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) +#define ZRAM_LOGICAL_BLOCK_SIZE 4096 /* Flags for zram pages (table[page_no].flags) */ enum zram_pageflags { diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c index 6b3cf00b0ff42..a70cc010d18da 100644 --- a/drivers/staging/zram/zram_sysfs.c +++ b/drivers/staging/zram/zram_sysfs.c @@ -14,11 +14,10 @@ #include #include +#include #include "zram_drv.h" -#ifdef CONFIG_SYSFS - static u64 zram_stat64_read(struct zram *zram, u64 *v) { u64 val; @@ -67,7 +66,7 @@ static ssize_t disksize_store(struct device *dev, if (ret) return ret; - zram->disksize &= PAGE_MASK; + zram->disksize = PAGE_ALIGN(zram->disksize); set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); return len; @@ -220,5 +219,3 @@ static struct attribute *zram_disk_attrs[] = { struct attribute_group zram_disk_attr_group = { .attrs = zram_disk_attrs, }; - -#endif /* CONFIG_SYSFS */ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 366080baf4743..7f19c8b7b84c7 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = SE_DEV(cmd); unsigned char *buf = cmd->t_task->t_task_buf; - u32 blocks = dev->transport->get_blocks(dev); + unsigned long long blocks_long = dev->transport->get_blocks(dev); + u32 blocks; + + if (blocks_long >= 0x00000000ffffffff) + blocks = 0xffffffff; + else + blocks = (u32)blocks_long; buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 5da051a07fa30..0e0257bf5b78d 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -151,13 +151,13 @@ int transport_get_lun_for_cmd( { struct se_device *dev = se_lun->lun_se_dev; - spin_lock(&dev->stats_lock); + spin_lock_irq(&dev->stats_lock); dev->num_cmds++; if (se_cmd->data_direction == DMA_TO_DEVICE) dev->write_bytes += se_cmd->data_length; else if (se_cmd->data_direction == DMA_FROM_DEVICE) dev->read_bytes += se_cmd->data_length; - spin_unlock(&dev->stats_lock); + spin_unlock_irq(&dev->stats_lock); } /* diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4a109835e4203..59b8b9c5ad72a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req( { struct se_tmr_req *tmr; - tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL); + tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? + GFP_ATOMIC : GFP_KERNEL); if (!(tmr)) { printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); @@ -398,9 +399,9 @@ int core_tmr_lun_reset( printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); } - spin_lock(&dev->stats_lock); + spin_lock_irq(&dev->stats_lock); dev->num_resets++; - spin_unlock(&dev->stats_lock); + spin_unlock_irq(&dev->stats_lock); DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4bbf6c147f896..d2a5768a5d93e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -765,7 +765,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) transport_all_task_dev_remove_state(cmd); spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - transport_free_dev_tasks(cmd); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); @@ -1198,6 +1197,7 @@ transport_get_task_from_execute_queue(struct se_device *dev) break; list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); atomic_dec(&dev->execute_tasks); return task; @@ -1213,8 +1213,14 @@ void transport_remove_task_from_execute_queue( { unsigned long flags; + if (atomic_read(&task->task_execute_queue) == 0) { + dump_stack(); + return; + } + spin_lock_irqsave(&dev->execute_task_lock, flags); list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); atomic_dec(&dev->execute_tasks); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -2062,6 +2068,13 @@ int transport_generic_handle_tmr( } EXPORT_SYMBOL(transport_generic_handle_tmr); +void transport_generic_free_cmd_intr( + struct se_cmd *cmd) +{ + transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); +} +EXPORT_SYMBOL(transport_generic_free_cmd_intr); + static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) { struct se_task *task, *task_tmp; @@ -4777,18 +4790,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) sg_end_cur->page_link &= ~0x02; sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += (task->task_sg_num + 1); - } else sg_count += task->task_sg_num; + task_sg_num = (task->task_sg_num + 1); + } else { + sg_chain(sg_head, task_sg_num, sg_head_cur); + sg_count += task->task_sg_num; + task_sg_num = task->task_sg_num; + } sg_head = sg_head_cur; sg_link = sg_link_cur; - task_sg_num = task->task_sg_num; continue; } sg_head = sg_first = &task->task_sg[0]; sg_link = &task->task_sg[task->task_sg_num]; - task_sg_num = task->task_sg_num; /* * Check for single task.. */ @@ -4799,9 +4814,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) */ sg_end = &task->task_sg[task->task_sg_num - 1]; sg_end->page_link &= ~0x02; - sg_count += (task->task_sg_num + 1); - } else sg_count += task->task_sg_num; + task_sg_num = (task->task_sg_num + 1); + } else { + sg_count += task->task_sg_num; + task_sg_num = task->task_sg_num; + } } /* * Setup the starting pointer and total t_tasks_sg_linked_no including @@ -4810,21 +4828,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) T_TASK(cmd)->t_tasks_sg_chained = sg_first; T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; - DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, + DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, T_TASK(cmd)->t_tasks_sg_chained_no); for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, T_TASK(cmd)->t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", - sg, sg_page(sg), sg->length, sg->offset); + DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", + i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); if (sg_is_chain(sg)) DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); } - } EXPORT_SYMBOL(transport_do_task_sg_chain); @@ -5298,6 +5315,8 @@ void transport_generic_free_cmd( if (wait_for_tasks && cmd->transport_wait_for_tasks) cmd->transport_wait_for_tasks(cmd, 0, 0); + transport_free_dev_tasks(cmd); + transport_generic_remove(cmd, release_to_pool, session_reinstatement); } @@ -6138,6 +6157,9 @@ static int transport_processing_thread(void *param) case TRANSPORT_REMOVE: transport_generic_remove(cmd, 1, 0); break; + case TRANSPORT_FREE_CMD_INTR: + transport_generic_free_cmd(cmd, 0, 1, 0); + break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); break; diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index aa2e5d3eb01a4..c4b0ef1a38747 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1659,8 +1659,12 @@ static void gsm_queue(struct gsm_mux *gsm) if ((gsm->control & ~PF) == UI) gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); - /* generate final CRC with received FCS */ - gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); + if (gsm->encoding == 0){ + /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only. + In this case it contain the last piece of data + required to generate final CRC */ + gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); + } if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & 4) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 428f4fe0b5f74..426f9d283c823 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1981,7 +1981,9 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, tty->ops->flush_chars(tty); } else { while (nr > 0) { + mutex_lock(&tty->output_lock); c = tty->ops->write(tty, b, nr); + mutex_unlock(&tty->output_lock); if (c < 0) { retval = c; goto break_out; diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index 3975df6f7fdba..b3b881bc4712f 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c @@ -954,6 +954,23 @@ static int broken_efr(struct uart_8250_port *up) return 0; } +static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) +{ + unsigned char status; + + status = serial_in(up, 0x04); /* EXCR2 */ +#define PRESL(x) ((x) & 0x30) + if (PRESL(status) == 0x10) { + /* already in high speed mode */ + return 0; + } else { + status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ + status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ + serial_outp(up, 0x04, status); + } + return 1; +} + /* * We know that the chip has FIFOs. Does it have an EFR? The * EFR is located in the same register position as the IIR and @@ -1025,12 +1042,8 @@ static void autoconfig_16550a(struct uart_8250_port *up) quot = serial_dl_read(up); quot <<= 3; - status1 = serial_in(up, 0x04); /* EXCR2 */ - status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ - status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ - serial_outp(up, 0x04, status1); - - serial_dl_write(up, quot); + if (ns16550a_goto_highspeed(up)) + serial_dl_write(up, quot); serial_outp(up, UART_LCR, 0); @@ -3025,17 +3038,13 @@ void serial8250_resume_port(int line) struct uart_8250_port *up = &serial8250_ports[line]; if (up->capabilities & UART_NATSEMI) { - unsigned char tmp; - /* Ensure it's still in high speed mode */ serial_outp(up, UART_LCR, 0xE0); - tmp = serial_in(up, 0x04); /* EXCR2 */ - tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ - tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ - serial_outp(up, 0x04, tmp); + ns16550a_goto_highspeed(up); serial_outp(up, UART_LCR, 0); + up->port.uartclk = 921600*16; } uart_resume_port(&serial8250_reg, &up->port); } diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 2b8334601c8b5..4b1067f99d2f6 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1329,6 +1329,38 @@ config SERIAL_VT8500_CONSOLE depends on SERIAL_VT8500=y select SERIAL_CORE_CONSOLE +config SERIAL_MSM_CLOCK_CONTROL + bool "Allow tty clients to make clock requests to msm uarts." + depends on SERIAL_MSM=y + default y + help + Provides an interface for tty clients to request the msm uart clock + to be turned on or off for power savings. + +config SERIAL_MSM_RX_WAKEUP + bool "Wakeup the msm uart clock on GPIO activity." + depends on SERIAL_MSM_CLOCK_CONTROL + default n + help + Requires SERIAL_MSM_CLOCK_CONTROL. Wake up the uart while the uart + clock is off, using a wakeup GPIO. + +config SERIAL_MSM_HS + tristate "MSM UART High Speed: Serial Driver" + depends on ARM && ARCH_MSM + select SERIAL_CORE + default n + help + Select this module to enable MSM high speed UART driver. + +config SERIAL_BCM_BT_LPM + tristate "Broadcom Bluetooth Low Power Mode" + depends on ARM && ARCH_MSM + select SERIAL_CORE + default n + help + Select this module for Broadcom Bluetooth low power management. + config SERIAL_NETX tristate "NetX serial port support" depends on ARM && ARCH_NETX diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 8ea92e9c73b04..41757700c465c 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o obj-$(CONFIG_SERIAL_SA1100) += sa1100.o obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o +obj-$(CONFIG_SERIAL_BCM_BT_LPM) += bcm_bt_lpm.o obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o @@ -76,6 +77,8 @@ obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o +obj-$(CONFIG_MSM_SERIAL_DEBUGGER) += msm_serial_debugger.o +obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o diff --git a/drivers/tty/serial/bcm_bt_lpm.c b/drivers/tty/serial/bcm_bt_lpm.c new file mode 100644 index 0000000000000..bf5ab3320594c --- /dev/null +++ b/drivers/tty/serial/bcm_bt_lpm.c @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Manage WAKE and HOST_WAKE low power mode signals for Broadcom + * Bluetooth chipsets. + * + * This driver needs to be tightly coupled with a uart driver that supports + * request_clock_off_locked() and request_clock_on_locked(), to clock off and + * on the uart indepdently of Linux suspend/resume. + * + * The uart driver needs to call bcm_bt_lpm_exit_lpm_locked() every time it + * begins TX, to ensure this driver keeps WAKE asserted during TX. + * + * The callbacks and hijacking of the uart_port struct are not a clean API, + * but the Linux tty and serial core layers do not have a better alternative + * right now: there is no good way to plumb uart clock control through these + * layers. See http://lkml.org/lkml/2008/12/19/213 for more background. + */ + +struct bcm_bt_lpm { + unsigned int gpio_wake; + unsigned int gpio_host_wake; + + int wake; + int host_wake; + + struct hrtimer enter_lpm_timer; + ktime_t enter_lpm_delay; + + struct uart_port *uport; + + void (*request_clock_off_locked)(struct uart_port *uport); + void (*request_clock_on_locked)(struct uart_port *uport); +} bt_lpm; + +static void set_wake_locked(int wake) +{ + if (wake == bt_lpm.wake) + return; + bt_lpm.wake = wake; + + if (wake || bt_lpm.host_wake) + bt_lpm.request_clock_on_locked(bt_lpm.uport); + else + bt_lpm.request_clock_off_locked(bt_lpm.uport); + + gpio_set_value(bt_lpm.gpio_wake, wake); +} + +static enum hrtimer_restart enter_lpm(struct hrtimer *timer) { + unsigned long flags; + + spin_lock_irqsave(&bt_lpm.uport->lock, flags); + set_wake_locked(0); + spin_unlock_irqrestore(&bt_lpm.uport->lock, flags); + + return HRTIMER_NORESTART; +} + +void bcm_bt_lpm_exit_lpm_locked(struct uart_port *uport) { + bt_lpm.uport = uport; + + hrtimer_try_to_cancel(&bt_lpm.enter_lpm_timer); + + set_wake_locked(1); + + hrtimer_start(&bt_lpm.enter_lpm_timer, bt_lpm.enter_lpm_delay, + HRTIMER_MODE_REL); +} +EXPORT_SYMBOL(bcm_bt_lpm_exit_lpm_locked); + +static void update_host_wake_locked(int host_wake) +{ + if (host_wake == bt_lpm.host_wake) + return; + bt_lpm.host_wake = host_wake; + + if (bt_lpm.wake || host_wake) + bt_lpm.request_clock_on_locked(bt_lpm.uport); + else + bt_lpm.request_clock_off_locked(bt_lpm.uport); +} + +static irqreturn_t host_wake_isr(int irq, void *dev) +{ + int host_wake; + unsigned long flags; + + host_wake = gpio_get_value(bt_lpm.gpio_host_wake); + set_irq_type(irq, host_wake ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH); + + if (!bt_lpm.uport) { + bt_lpm.host_wake = host_wake; + return IRQ_HANDLED; + } + + spin_lock_irqsave(&bt_lpm.uport->lock, flags); + + update_host_wake_locked(host_wake); + + spin_unlock_irqrestore(&bt_lpm.uport->lock, flags); + + return IRQ_HANDLED; +} + +static int bcm_bt_lpm_probe(struct platform_device *pdev) +{ + int irq; + int ret; + struct bcm_bt_lpm_platform_data *pdata = pdev->dev.platform_data; + + if (bt_lpm.request_clock_off_locked != NULL) { + printk(KERN_ERR "Cannot register two bcm_bt_lpm drivers\n"); + return -EINVAL; + } + + bt_lpm.gpio_wake = pdata->gpio_wake; + bt_lpm.gpio_host_wake = pdata->gpio_host_wake; + bt_lpm.request_clock_off_locked = pdata->request_clock_off_locked; + bt_lpm.request_clock_on_locked = pdata->request_clock_on_locked; + + hrtimer_init(&bt_lpm.enter_lpm_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + bt_lpm.enter_lpm_delay = ktime_set(1, 0); /* 1 sec */ + bt_lpm.enter_lpm_timer.function = enter_lpm; + + gpio_set_value(bt_lpm.gpio_wake, 0); + bt_lpm.host_wake = 0; + + irq = gpio_to_irq(bt_lpm.gpio_host_wake); + ret = request_irq(irq, host_wake_isr, IRQF_TRIGGER_HIGH, + "bt host_wake", NULL); + if (ret) + return ret; + ret = set_irq_wake(irq, 1); + if (ret) + return ret; + + return 0; +} + +static struct platform_driver bcm_bt_lpm_driver = { + .probe = bcm_bt_lpm_probe, + .driver = { + .name = "bcm_bt_lpm", + .owner = THIS_MODULE, + }, +}; + +static int __init bcm_bt_lpm_init(void) +{ + return platform_driver_register(&bcm_bt_lpm_driver); +} + +module_init(bcm_bt_lpm_init); +MODULE_DESCRIPTION("Broadcom Bluetooth low power mode driver"); +MODULE_AUTHOR("Nick Pelly "); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index dfcf4b1878aa3..0d66751c0cbc0 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port) static irqreturn_t imx_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; - unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; + unsigned int val; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); writel(USR1_RTSD, sport->port.membase + USR1); + val = readl(sport->port.membase + USR1) & USR1_RTSS; uart_handle_cts_change(&sport->port, !!val); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c index b62857bf2fdbd..37e13c3d91d99 100644 --- a/drivers/tty/serial/mrst_max3110.c +++ b/drivers/tty/serial/mrst_max3110.c @@ -51,7 +51,7 @@ struct uart_max3110 { struct uart_port port; struct spi_device *spi; - char name[24]; + char name[SPI_NAME_SIZE]; wait_queue_head_t wq; struct task_struct *main_thread; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 8e43a7b69e646..254939e1fe583 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -34,44 +34,175 @@ #include "msm_serial.h" +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL +enum msm_clk_states_e { + MSM_CLK_PORT_OFF, /* uart port not in use */ + MSM_CLK_OFF, /* clock enabled */ + MSM_CLK_REQUEST_OFF, /* disable after TX flushed */ + MSM_CLK_ON, /* clock disabled */ +}; +#endif + struct msm_port { struct uart_port uart; char name[16]; struct clk *clk; unsigned int imr; +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL + enum msm_clk_states_e clk_state; + struct hrtimer clk_off_timer; + ktime_t clk_off_delay; +#endif }; static void msm_stop_tx(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + clk_enable(msm_port->clk); + msm_port->imr &= ~UART_IMR_TXLEV; msm_write(port, msm_port->imr, UART_IMR); + + clk_disable(msm_port->clk); } static void msm_start_tx(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + clk_enable(msm_port->clk); + msm_port->imr |= UART_IMR_TXLEV; msm_write(port, msm_port->imr, UART_IMR); + + clk_disable(msm_port->clk); } static void msm_stop_rx(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + clk_enable(msm_port->clk); + msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE); msm_write(port, msm_port->imr, UART_IMR); + + clk_disable(msm_port->clk); } static void msm_enable_ms(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + clk_enable(msm_port->clk); + msm_port->imr |= UART_IMR_DELTA_CTS; msm_write(port, msm_port->imr, UART_IMR); + + clk_disable(msm_port->clk); +} + +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL +/* turn clock off if TX buffer is empty, otherwise reschedule */ +static enum hrtimer_restart msm_serial_clock_off(struct hrtimer *timer) { + struct msm_port *msm_port = container_of(timer, struct msm_port, + clk_off_timer); + struct uart_port *port = &msm_port->uart; + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + int ret = HRTIMER_NORESTART; + + spin_lock_irqsave(&port->lock, flags); + + if (msm_port->clk_state == MSM_CLK_REQUEST_OFF) { + if (uart_circ_empty(xmit)) { + struct msm_port *msm_port = UART_TO_MSM(port); + clk_disable(msm_port->clk); + msm_port->clk_state = MSM_CLK_OFF; + } else { + hrtimer_forward_now(timer, msm_port->clk_off_delay); + ret = HRTIMER_RESTART; + } + } + + spin_unlock_irqrestore(&port->lock, flags); + + return HRTIMER_NORESTART; +} + +/* request to turn off uart clock once pending TX is flushed */ +void msm_serial_clock_request_off(struct uart_port *port) { + unsigned long flags; + struct msm_port *msm_port = UART_TO_MSM(port); + + spin_lock_irqsave(&port->lock, flags); + if (msm_port->clk_state == MSM_CLK_ON) { + msm_port->clk_state = MSM_CLK_REQUEST_OFF; + /* turn off TX later. unfortunately not all msm uart's have a + * TXDONE available, and TXLEV does not wait until completely + * flushed, so a timer is our only option + */ + hrtimer_start(&msm_port->clk_off_timer, + msm_port->clk_off_delay, HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&port->lock, flags); +} + +/* request to immediately turn on uart clock. + * ignored if there is a pending off request, unless force = 1. + */ +void msm_serial_clock_on(struct uart_port *port, int force) { + unsigned long flags; + struct msm_port *msm_port = UART_TO_MSM(port); + + spin_lock_irqsave(&port->lock, flags); + + switch (msm_port->clk_state) { + case MSM_CLK_OFF: + clk_enable(msm_port->clk); + force = 1; + case MSM_CLK_REQUEST_OFF: + if (force) { + hrtimer_try_to_cancel(&msm_port->clk_off_timer); + msm_port->clk_state = MSM_CLK_ON; + } + break; + case MSM_CLK_ON: break; + case MSM_CLK_PORT_OFF: break; + } + + spin_unlock_irqrestore(&port->lock, flags); +} +#endif + +#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP +#define WAKE_UP_IND 0x32 +static irqreturn_t msm_rx_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct msm_port *msm_port = UART_TO_MSM(port); + int inject_wakeup = 0; + + spin_lock(&port->lock); + + if (msm_port->clk_state == MSM_CLK_OFF) + inject_wakeup = 1; + + msm_serial_clock_on(port, 0); + + /* we missed an rx while asleep - it must be a wakeup indicator + */ + if (inject_wakeup) { + struct tty_struct *tty = port->state->port.tty; + tty_insert_flip_char(tty, WAKE_UP_IND, TTY_NORMAL); + tty_flip_buffer_push(tty); + } + + spin_unlock(&port->lock); + return IRQ_HANDLED; } +#endif static void handle_rx(struct uart_port *port) { @@ -148,6 +279,14 @@ static void handle_tx(struct uart_port *port) sent_tx = 1; } +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL + if (sent_tx && msm_port->clk_state == MSM_CLK_REQUEST_OFF) + /* new TX - restart the timer */ + if (hrtimer_try_to_cancel(&msm_port->clk_off_timer) == 1) + hrtimer_start(&msm_port->clk_off_timer, + msm_port->clk_off_delay, HRTIMER_MODE_REL); +#endif + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); } @@ -166,6 +305,7 @@ static irqreturn_t msm_irq(int irq, void *dev_id) unsigned int misr; spin_lock(&port->lock); + clk_enable(msm_port->clk); misr = msm_read(port, UART_MISR); msm_write(port, 0, UART_IMR); /* disable interrupt */ @@ -177,6 +317,7 @@ static irqreturn_t msm_irq(int irq, void *dev_id) handle_delta_cts(port); msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */ + clk_disable(msm_port->clk); spin_unlock(&port->lock); return IRQ_HANDLED; @@ -184,7 +325,14 @@ static irqreturn_t msm_irq(int irq, void *dev_id) static unsigned int msm_tx_empty(struct uart_port *port) { - return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0; + unsigned int ret; + struct msm_port *msm_port = UART_TO_MSM(port); + + clk_enable(msm_port->clk); + ret = (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0; + clk_disable(msm_port->clk); + + return ret; } static unsigned int msm_get_mctrl(struct uart_port *port) @@ -195,6 +343,9 @@ static unsigned int msm_get_mctrl(struct uart_port *port) static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned int mr; + struct msm_port *msm_port = UART_TO_MSM(port); + + clk_enable(msm_port->clk); mr = msm_read(port, UART_MR1); @@ -206,14 +357,22 @@ static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) mr |= UART_MR1_RX_RDY_CTL; msm_write(port, mr, UART_MR1); } + + clk_disable(msm_port->clk); } static void msm_break_ctl(struct uart_port *port, int break_ctl) { + struct msm_port *msm_port = UART_TO_MSM(port); + + clk_enable(msm_port->clk); + if (break_ctl) msm_write(port, UART_CR_CMD_START_BREAK, UART_CR); else msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR); + + clk_disable(msm_port->clk); } static int msm_set_baud_rate(struct uart_port *port, unsigned int baud) @@ -307,6 +466,10 @@ static void msm_init_clock(struct uart_port *port) struct msm_port *msm_port = UART_TO_MSM(port); clk_enable(msm_port->clk); +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL + msm_port->clk_state = MSM_CLK_ON; +#endif + msm_serial_set_mnd_regs(port); } @@ -356,6 +519,17 @@ static int msm_startup(struct uart_port *port) UART_IMR_CURRENT_CTS; msm_write(port, msm_port->imr, UART_IMR); +#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP + /* Apply the RX GPIO wake irq workaround to the bluetooth uart */ + if (port->line == 0) { /* BT is serial device 0 */ + ret = request_irq(MSM_GPIO_TO_INT(45), msm_rx_irq, + IRQF_TRIGGER_FALLING, "msm_serial0_rx", + port); + if (unlikely(ret)) + return ret; + } +#endif + return 0; } @@ -363,12 +537,27 @@ static void msm_shutdown(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); + clk_enable(msm_port->clk); + msm_port->imr = 0; msm_write(port, 0, UART_IMR); /* disable interrupts */ clk_disable(msm_port->clk); free_irq(port->irq, port); + +#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP + if (port->line == 0) + free_irq(MSM_GPIO_TO_INT(45), port); +#endif + +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL + if (msm_port->clk_state != MSM_CLK_OFF) + clk_disable(msm_port->clk); + msm_port->clk_state = MSM_CLK_PORT_OFF; +#else + clk_disable(msm_port->clk); +#endif } static void msm_set_termios(struct uart_port *port, struct ktermios *termios, @@ -376,8 +565,10 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios, { unsigned long flags; unsigned int baud, mr; + struct msm_port *msm_port = UART_TO_MSM(port); spin_lock_irqsave(&port->lock, flags); + clk_enable(msm_port->clk); /* calculate and set baud rate */ baud = uart_get_baud_rate(port, termios, old, 300, 115200); @@ -443,6 +634,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios, uart_update_timeout(port, termios->c_cflag, baud); + clk_disable(msm_port->clk); spin_unlock_irqrestore(&port->lock, flags); } @@ -510,6 +702,7 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) static void msm_power(struct uart_port *port, unsigned int state, unsigned int oldstate) { +#ifndef CONFIG_SERIAL_MSM_CLOCK_CONTROL struct msm_port *msm_port = UART_TO_MSM(port); switch (state) { @@ -522,6 +715,7 @@ static void msm_power(struct uart_port *port, unsigned int state, default: printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state); } +#endif } static struct uart_ops msm_uart_pops = { @@ -602,7 +796,9 @@ static void msm_console_write(struct console *co, const char *s, msm_port = UART_TO_MSM(port); spin_lock(&port->lock); + clk_enable(msm_port->clk); uart_console_write(port, s, count, msm_console_putchar); + clk_disable(msm_port->clk); spin_unlock(&port->lock); } @@ -704,6 +900,22 @@ static int __init msm_serial_probe(struct platform_device *pdev) platform_set_drvdata(pdev, port); + if (unlikely(set_irq_wake(port->irq, 1))) + return -ENXIO; + +#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP + if (port->line == 0) /* BT is serial device 0 */ + if (unlikely(set_irq_wake(MSM_GPIO_TO_INT(45), 1))) + return -ENXIO; +#endif + +#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL + msm_port->clk_state = MSM_CLK_PORT_OFF; + hrtimer_init(&msm_port->clk_off_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + msm_port->clk_off_timer.function = msm_serial_clock_off; + msm_port->clk_off_delay = ktime_set(0, 1000000); /* 1 ms */ +#endif + return uart_add_one_port(&msm_uart_driver, port); } diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h index f6ca9ca79e98f..059a69efc14bd 100644 --- a/drivers/tty/serial/msm_serial.h +++ b/drivers/tty/serial/msm_serial.h @@ -114,6 +114,7 @@ #define UART_MISR 0x0010 #define UART_ISR 0x0014 +#ifndef BUILD_SERIAL_DEBUGGER #define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port) static inline @@ -169,5 +170,6 @@ void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port) #else #define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk #endif +#endif #endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */ diff --git a/drivers/tty/serial/msm_serial_debugger.c b/drivers/tty/serial/msm_serial_debugger.c new file mode 100644 index 0000000000000..f3214d26c74c6 --- /dev/null +++ b/drivers/tty/serial/msm_serial_debugger.c @@ -0,0 +1,687 @@ +/* + * drivers/serial/msm_serial_debuger.c + * + * Serial Debugger Interface for MSM7K + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define BUILD_SERIAL_DEBUGGER +#include "msm_serial.h" + +#include + +static void sleep_timer_expired(unsigned long); + +static unsigned int debug_port_base; +static int debug_signal_irq; +static struct clk *debug_clk; +static bool debug_clk_enabled; +static bool ignore_next_wakeup_irq; +#ifdef CONFIG_MSM_SERIAL_DEBUGGER_NO_SLEEP +static int no_sleep = true; +#else +static int no_sleep; +#endif +static DEFINE_TIMER(sleep_timer, sleep_timer_expired, 0, 0); +static int debug_enable; +static int debugger_enable; +static struct wake_lock debugger_wake_lock; +static struct { + unsigned int base; + int irq; + struct device *clk_device; + int signal_irq; + int wakeup_irq; +} init_data; + +module_param(no_sleep, bool, 0644); + +#ifdef CONFIG_MSM_SERIAL_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline void enable_wakeup_irq(unsigned int irq) {} +static inline void disable_wakeup_irq(unsigned int irq) {} +#else +static inline void enable_wakeup_irq(unsigned int irq) {enable_irq(irq);} +static inline void disable_wakeup_irq(unsigned int irq) + {disable_irq_nosync(irq);} +#endif + + +static inline void msm_write(unsigned int val, unsigned int off) +{ + __raw_writel(val, debug_port_base + off); +} + +static inline unsigned int msm_read(unsigned int off) +{ + return __raw_readl(debug_port_base + off); +} + +static void debug_port_init(void) +{ + /* reset everything */ + msm_write(UART_CR_CMD_RESET_RX, UART_CR); + msm_write(UART_CR_CMD_RESET_TX, UART_CR); + msm_write(UART_CR_CMD_RESET_ERR, UART_CR); + msm_write(UART_CR_CMD_RESET_BREAK_INT, UART_CR); + msm_write(UART_CR_CMD_RESET_CTS, UART_CR); + msm_write(UART_CR_CMD_SET_RFR, UART_CR); + + /* setup clock dividers */ +#ifdef CONFIG_ARCH_MSM_SCORPION + if (clk_get_rate(debug_clk) == 19200000) { + /* clock is TCXO (19.2MHz) */ + msm_write(0x06, UART_MREG); + msm_write(0xF1, UART_NREG); + msm_write(0x0F, UART_DREG); + msm_write(0x1A, UART_MNDREG); + } else +#endif + { + /* clock must be TCXO/4 */ + msm_write(0xC0, UART_MREG); + msm_write(0xB2, UART_NREG); + msm_write(0x7D, UART_DREG); + msm_write(0x1C, UART_MNDREG); + } + + msm_write(UART_CSR_115200, UART_CSR); + + /* rx interrupt on every character -- keep it simple */ + msm_write(0, UART_RFWR); + + /* enable TX and RX */ + msm_write(0x05, UART_CR); + + /* enable RX interrupt */ + msm_write(UART_IMR_RXLEV, UART_IMR); +} + +static inline int debug_getc(void) +{ + if (msm_read(UART_SR) & UART_SR_RX_READY) { + return msm_read(UART_RF); + } else { + return -1; + } +} + +static inline void debug_putc(unsigned int c) +{ + while (!(msm_read(UART_SR) & UART_SR_TX_READY)) ; + msm_write(c, UART_TF); +} + +static inline void debug_flush(void) +{ + while (!(msm_read(UART_SR) & UART_SR_TX_EMPTY)) ; +} + +static void debug_puts(char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + debug_putc('\r'); + debug_putc(c); + } +} + +static void debug_prompt(void) +{ + debug_puts("debug> "); +} + +int log_buf_copy(char *dest, int idx, int len); +static void dump_kernel_log(void) +{ + char buf[1024]; + int idx = 0; + int ret; + int saved_oip; + + /* setting oops_in_progress prevents log_buf_copy() + * from trying to take a spinlock which will make it + * very unhappy in some cases... + */ + saved_oip = oops_in_progress; + oops_in_progress = 1; + for (;;) { + ret = log_buf_copy(buf, idx, 1023); + if (ret <= 0) + break; + buf[ret] = 0; + debug_puts(buf); + idx += ret; + } + oops_in_progress = saved_oip; +} + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +#define DEBUG_MAX 64 +static char debug_cmd[DEBUG_MAX]; +static int debug_busy; +static int debug_abort; + +static int debug_printf(void *cookie, const char *fmt, ...) +{ + char buf[256]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + debug_puts(buf); + return debug_abort; +} + +/* Safe outside fiq context */ +static int debug_printf_nfiq(void *cookie, const char *fmt, ...) +{ + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + debug_puts(buf); + debug_flush(); + local_irq_restore(irq_flags); + return debug_abort; +} + +#define dprintf(fmt...) debug_printf(0, fmt) + +unsigned int last_irqs[NR_IRQS]; + +static void dump_regs(unsigned *regs) +{ + dprintf(" r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs[0], regs[1], regs[2], regs[3]); + dprintf(" r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs[4], regs[5], regs[6], regs[7]); + dprintf(" r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs[8], regs[9], regs[10], regs[11], + mode_name(regs[16])); + if ((regs[16] & MODE_MASK) == USR_MODE) + dprintf(" ip %08x sp %08x lr %08x pc %08x cpsr %08x\n", + regs[12], regs[13], regs[14], regs[15], regs[16]); + else + dprintf(" ip %08x sp %08x lr %08x pc %08x cpsr %08x " + "spsr %08x\n", regs[12], regs[13], regs[14], regs[15], + regs[16], regs[17]); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +static void dump_allregs(unsigned *regs) +{ + struct mode_regs mode_regs; + dump_regs(regs); + get_mode_regs(&mode_regs); + dprintf(" svc: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + dprintf(" abt: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + dprintf(" und: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + dprintf(" irq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + dprintf(" fiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n", + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + dprintf(" fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +static void dump_irqs(void) +{ + int n; + dprintf("irqnr total since-last status name\n"); + for (n = 1; n < NR_IRQS; n++) { + struct irqaction *act = irq_desc[n].action; + if (!act && !kstat_irqs(n)) + continue; + dprintf("%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - last_irqs[n], + irq_desc[n].status, + (act && act->name) ? act->name : "???"); + last_irqs[n] = kstat_irqs(n); + } +} + +static int report_trace(struct stackframe *frame, void *d) +{ + unsigned int *depth = d; + + if (*depth) { + dprintf(" pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + (*depth)--; + return 0; + } + dprintf(" ...\n"); + + return *depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + dprintf(" invalid frame pointer %p\n", tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + dprintf(" failed to copy frame pointer %p\n", tail); + return NULL; + } + + dprintf(" %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void dump_stacktrace(struct pt_regs * const regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = (struct thread_info *) + ((unsigned long)ssp & ~(THREAD_SIZE - 1)); + + *current_thread_info() = *real_thread_info; + + if (!current) + dprintf("current NULL\n"); + else + dprintf("pid: %d comm: %s\n", current->pid, current->comm); + dump_regs((unsigned *)regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + dprintf(" pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &depth); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(tail); +} + +static void debug_exec(const char *cmd, unsigned *regs, void *svc_sp) +{ + if (!strcmp(cmd, "pc")) { + dprintf(" pc %08x cpsr %08x mode %s\n", + regs[15], regs[16], mode_name(regs[16])); + } else if (!strcmp(cmd, "regs")) { + dump_regs(regs); + } else if (!strcmp(cmd, "allregs")) { + dump_allregs(regs); + } else if (!strcmp(cmd, "bt")) { + dump_stacktrace((struct pt_regs *)regs, 100, svc_sp); + } else if (!strcmp(cmd, "reboot")) { + if (msm_hw_reset_hook) + msm_hw_reset_hook(); + } else if (!strcmp(cmd, "irqs")) { + dump_irqs(); + } else if (!strcmp(cmd, "kmsg")) { + dump_kernel_log(); + } else if (!strcmp(cmd, "version")) { + dprintf("%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + no_sleep = false; + } else if (!strcmp(cmd, "nosleep")) { + no_sleep = true; + } else { + if (debug_busy) { + dprintf("command processor busy. trying to abort.\n"); + debug_abort = -1; + } else { + strcpy(debug_cmd, cmd); + debug_busy = 1; + } + msm_trigger_irq(debug_signal_irq); + return; + } + debug_prompt(); +} + +static void sleep_timer_expired(unsigned long data) +{ + if (debug_clk_enabled && !no_sleep) { + if (debug_enable) { + debug_enable = 0; + debug_printf_nfiq(NULL, + "suspending fiq debugger\n"); + } + ignore_next_wakeup_irq = true; + clk_disable(debug_clk); + debug_clk_enabled = false; + enable_wakeup_irq(init_data.wakeup_irq); + set_irq_wake(init_data.wakeup_irq, 1); + } + wake_unlock(&debugger_wake_lock); +} + +static irqreturn_t wakeup_irq_handler(int irq, void *dev) +{ + if (ignore_next_wakeup_irq) + ignore_next_wakeup_irq = false; + else if (!debug_clk_enabled) { + wake_lock(&debugger_wake_lock); + clk_enable(debug_clk); + debug_clk_enabled = true; + set_irq_wake(irq, 0); + disable_wakeup_irq(irq); + mod_timer(&sleep_timer, jiffies + HZ / 2); + } + return IRQ_HANDLED; +} + +static irqreturn_t debug_irq(int irq, void *dev) +{ + if (!no_sleep) { + wake_lock(&debugger_wake_lock); + mod_timer(&sleep_timer, jiffies + HZ * 5); + } + if (debug_busy) { + struct kdbg_ctxt ctxt; + + ctxt.printf = debug_printf_nfiq; + kernel_debugger(&ctxt, debug_cmd); + debug_prompt(); + + debug_busy = 0; + } + return IRQ_HANDLED; +} + +static char debug_buf[DEBUG_MAX]; +static int debug_count; + +static void debug_fiq(void *data, void *regs, void *svc_sp) +{ + int c; + static int last_c; + + while ((c = debug_getc()) != -1) { + if (!debug_enable) { + if ((c == 13) || (c == 10)) { + debug_enable = true; + debug_count = 0; + debug_prompt(); + } + } else if ((c >= ' ') && (c < 127)) { + if (debug_count < (DEBUG_MAX - 1)) { + debug_buf[debug_count++] = c; + debug_putc(c); + } + } else if ((c == 8) || (c == 127)) { + if (debug_count > 0) { + debug_count--; + debug_putc(8); + debug_putc(' '); + debug_putc(8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + debug_putc('\r'); + debug_putc('\n'); + } + if (debug_count) { + debug_buf[debug_count] = 0; + debug_count = 0; + debug_exec(debug_buf, regs, svc_sp); + } else { + debug_prompt(); + } + } + last_c = c; + } + debug_flush(); + if (debug_enable && !no_sleep) + msm_trigger_irq(debug_signal_irq); /* poke sleep timer */ +} + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) +static void debug_console_write(struct console *co, + const char *s, unsigned int count) +{ + unsigned long irq_flags; + + /* disable irq's while TXing outside of FIQ context */ + local_irq_save(irq_flags); + while (count--) { + if (*s == '\n') + debug_putc('\r'); + debug_putc(*s++); + } + debug_flush(); + local_irq_restore(irq_flags); +} + +static struct console msm_serial_debug_console = { + .name = "debug_console", + .write = debug_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; +#endif + +void msm_serial_debug_enable(int enable) { + debug_enable = enable; +} + +void msm_serial_debug_init(unsigned int base, int irq, + struct device *clk_device, int signal_irq, int wakeup_irq) +{ + int ret; + void *port; + + debug_clk = clk_get(clk_device, "uart_clk"); + if (!debug_clk) + return; + + port = ioremap(base, 4096); + if (!port) + return; + + wake_lock_init(&debugger_wake_lock, WAKE_LOCK_SUSPEND, "serial-debug"); + + init_data.base = base; + init_data.irq = irq; + init_data.clk_device = clk_device; + init_data.signal_irq = signal_irq; + init_data.wakeup_irq = wakeup_irq; + debug_port_base = (unsigned int) port; + debug_signal_irq = signal_irq; + clk_enable(debug_clk); + debug_port_init(); + + debug_printf_nfiq(NULL, "\n", + no_sleep ? "" : "twice "); + ignore_next_wakeup_irq = !no_sleep; + + msm_fiq_select(irq); + msm_fiq_set_handler(debug_fiq, 0); + msm_fiq_enable(irq); + clk_disable(debug_clk); + + ret = request_irq(signal_irq, debug_irq, + IRQF_TRIGGER_RISING, "debug", 0); + if (ret) + printk(KERN_ERR + "serial_debugger: could not install signal_irq"); + + ret = set_irq_wake(wakeup_irq, 1); + if (ret) + pr_err("serial_debugger: could not enable wakeup\n"); + ret = request_irq(wakeup_irq, wakeup_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_DISABLED, + "debug-wakeup", 0); + if (ret) + pr_err("serial_debugger: could not install wakeup irq\n"); + if (no_sleep) + wakeup_irq_handler(wakeup_irq, 0); + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) + register_console(&msm_serial_debug_console); + clk_enable(debug_clk); +#endif + debugger_enable = 1; +} +static int msm_serial_debug_remove(const char *val, struct kernel_param *kp) +{ + int ret; + static int pre_stat = 1; + ret = param_set_bool(val, kp); + if (ret) + return ret; + + if (pre_stat == *(int *)kp->arg) + return 0; + + pre_stat = *(int *)kp->arg; + + if (*(int *)kp->arg) { + msm_serial_debug_init(init_data.base, init_data.irq, + init_data.clk_device, init_data.signal_irq, + init_data.wakeup_irq); + printk(KERN_INFO "enable FIQ serial debugger\n"); + return 0; + } + +#if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) + unregister_console(&msm_serial_debug_console); + clk_disable(debug_clk); +#endif + free_irq(init_data.wakeup_irq, 0); + free_irq(init_data.signal_irq, 0); + msm_fiq_set_handler(NULL, 0); + msm_fiq_disable(init_data.irq); + msm_fiq_unselect(init_data.irq); + if (debug_clk_enabled) + clk_disable(debug_clk); + wake_lock_destroy(&debugger_wake_lock); + printk(KERN_INFO "disable FIQ serial debugger\n"); + return 0; +} +module_param_call(enable, msm_serial_debug_remove, param_get_bool, + &debugger_enable, S_IWUSR | S_IRUGO); diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c new file mode 100644 index 0000000000000..305b8b885cad7 --- /dev/null +++ b/drivers/tty/serial/msm_serial_hs.c @@ -0,0 +1,1555 @@ +/* drivers/serial/msm_serial_hs.c + * + * MSM 7k/8k High speed uart driver + * + * Copyright (c) 2007-2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * Copyright (c) 2008 Google Inc. + * Modified: Nick Pelly + * + * All source code in this file is licensed under the following license + * except where indicated. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +/* + * MSM 7k/8k High speed uart driver + * + * Has optional support for uart power management independent of linux + * suspend/resume: + * + * RX wakeup. + * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the + * UART RX pin). This should only be used if there is not a wakeup + * GPIO on the UART CTS, and the first RX byte is known (for example, with the + * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will + * always be lost. RTS will be asserted even while the UART is off in this mode + * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "msm_serial_hs_hwreg.h" + +enum flush_reason { + FLUSH_NONE, + FLUSH_DATA_READY, + FLUSH_DATA_INVALID, /* values after this indicate invalid data */ + FLUSH_IGNORE = FLUSH_DATA_INVALID, + FLUSH_STOP, + FLUSH_SHUTDOWN, +}; + +enum msm_hs_clk_states_e { + MSM_HS_CLK_PORT_OFF, /* port not in use */ + MSM_HS_CLK_OFF, /* clock disabled */ + MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */ + MSM_HS_CLK_ON, /* clock enabled */ +}; + +/* Track the forced RXSTALE flush during clock off sequence. + * These states are only valid during MSM_HS_CLK_REQUEST_OFF */ +enum msm_hs_clk_req_off_state_e { + CLK_REQ_OFF_START, + CLK_REQ_OFF_RXSTALE_ISSUED, + CLK_REQ_OFF_FLUSH_ISSUED, + CLK_REQ_OFF_RXSTALE_FLUSHED, +}; + +struct msm_hs_tx { + unsigned int tx_ready_int_en; /* ok to dma more tx */ + unsigned int dma_in_flight; /* tx dma in progress */ + struct msm_dmov_cmd xfer; + dmov_box *command_ptr; + u32 *command_ptr_ptr; + dma_addr_t mapped_cmd_ptr; + dma_addr_t mapped_cmd_ptr_ptr; + int tx_count; + dma_addr_t dma_base; +}; + +struct msm_hs_rx { + enum flush_reason flush; + struct msm_dmov_cmd xfer; + dma_addr_t cmdptr_dmaaddr; + dmov_box *command_ptr; + u32 *command_ptr_ptr; + dma_addr_t mapped_cmd_ptr; + wait_queue_head_t wait; + dma_addr_t rbuffer; + unsigned char *buffer; + struct dma_pool *pool; + struct wake_lock wake_lock; + struct work_struct tty_work; +}; + +/* optional RX GPIO IRQ low power wakeup */ +struct msm_hs_rx_wakeup { + int irq; /* < 0 indicates low power wakeup disabled */ + unsigned char ignore; /* bool */ + + /* bool: inject char into rx tty on wakeup */ + unsigned char inject_rx; + char rx_to_inject; +}; + +struct msm_hs_port { + struct uart_port uport; + unsigned long imr_reg; /* shadow value of UARTDM_IMR */ + struct clk *clk; + struct msm_hs_tx tx; + struct msm_hs_rx rx; + + int dma_tx_channel; + int dma_rx_channel; + int dma_tx_crci; + int dma_rx_crci; + + struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */ + ktime_t clk_off_delay; + enum msm_hs_clk_states_e clk_state; + enum msm_hs_clk_req_off_state_e clk_req_off_state; + + struct msm_hs_rx_wakeup rx_wakeup; + /* optional callback to exit low power mode */ + void (*exit_lpm_cb)(struct uart_port *); + + struct wake_lock dma_wake_lock; /* held while any DMA active */ +}; + +#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */ +#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE +#define UARTDM_RX_BUF_SIZE 512 + +#define UARTDM_NR 2 + +static struct msm_hs_port q_uart_port[UARTDM_NR]; +static struct platform_driver msm_serial_hs_platform_driver; +static struct uart_driver msm_hs_driver; +static struct uart_ops msm_hs_ops; +static struct workqueue_struct *msm_hs_workqueue; + +#define UARTDM_TO_MSM(uart_port) \ + container_of((uart_port), struct msm_hs_port, uport) + +static inline unsigned int use_low_power_rx_wakeup(struct msm_hs_port *msm_uport) +{ + return (msm_uport->rx_wakeup.irq >= 0); +} + +static inline unsigned int msm_hs_read(struct uart_port *uport, + unsigned int offset) +{ + return ioread32(uport->membase + offset); +} + +static inline void msm_hs_write(struct uart_port *uport, unsigned int offset, + unsigned int value) +{ + iowrite32(value, uport->membase + offset); +} + +static void msm_hs_release_port(struct uart_port *port) +{ +} + +static int msm_hs_request_port(struct uart_port *port) +{ + return 0; +} + +static int __devexit msm_hs_remove(struct platform_device *pdev) +{ + + struct msm_hs_port *msm_uport; + struct device *dev; + + if (pdev->id < 0 || pdev->id >= UARTDM_NR) { + printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); + return -EINVAL; + } + + msm_uport = &q_uart_port[pdev->id]; + dev = msm_uport->uport.dev; + + dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box), + DMA_TO_DEVICE); + dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer, + msm_uport->rx.rbuffer); + dma_pool_destroy(msm_uport->rx.pool); + + dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *), + DMA_TO_DEVICE); + dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *), + DMA_TO_DEVICE); + dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box), + DMA_TO_DEVICE); + + wake_lock_destroy(&msm_uport->rx.wake_lock); + wake_lock_destroy(&msm_uport->dma_wake_lock); + + uart_remove_one_port(&msm_hs_driver, &msm_uport->uport); + clk_put(msm_uport->clk); + + /* Free the tx resources */ + kfree(msm_uport->tx.command_ptr); + kfree(msm_uport->tx.command_ptr_ptr); + + /* Free the rx resources */ + kfree(msm_uport->rx.command_ptr); + kfree(msm_uport->rx.command_ptr_ptr); + + iounmap(msm_uport->uport.membase); + + return 0; +} + +static int msm_hs_init_clk_locked(struct uart_port *uport) +{ + int ret; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + wake_lock(&msm_uport->dma_wake_lock); + ret = clk_enable(msm_uport->clk); + if (ret) { + printk(KERN_ERR "Error could not turn on UART clk\n"); + return ret; + } + + /* Set up the MREG/NREG/DREG/MNDREG */ + ret = clk_set_rate(msm_uport->clk, uport->uartclk); + if (ret) { + printk(KERN_WARNING "Error setting clock rate on UART\n"); + return ret; + } + + msm_uport->clk_state = MSM_HS_CLK_ON; + return 0; +} + +/* Enable and Disable clocks (Used for power management) */ +static void msm_hs_pm(struct uart_port *uport, unsigned int state, + unsigned int oldstate) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + if (use_low_power_rx_wakeup(msm_uport) || msm_uport->exit_lpm_cb) + return; /* ignore linux PM states, use msm_hs_request_clock API */ + + switch (state) { + case 0: + clk_enable(msm_uport->clk); + break; + case 3: + clk_disable(msm_uport->clk); + break; + default: + printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state); + } +} + +/* + * programs the UARTDM_CSR register with correct bit rates + * + * Interrupts should be disabled before we are called, as + * we modify Set Baud rate + * Set receive stale interrupt level, dependant on Bit Rate + * Goal is to have around 8 ms before indicate stale. + * roundup (((Bit Rate * .008) / 10) + 1 + */ +static void msm_hs_set_bps_locked(struct uart_port *uport, + unsigned int bps) +{ + unsigned long rxstale; + unsigned long data; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + switch (bps) { + case 300: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00); + rxstale = 1; + break; + case 600: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11); + rxstale = 1; + break; + case 1200: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22); + rxstale = 1; + break; + case 2400: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33); + rxstale = 1; + break; + case 4800: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44); + rxstale = 1; + break; + case 9600: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55); + rxstale = 2; + break; + case 14400: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66); + rxstale = 3; + break; + case 19200: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77); + rxstale = 4; + break; + case 28800: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88); + rxstale = 6; + break; + case 38400: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99); + rxstale = 8; + break; + case 57600: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa); + rxstale = 16; + break; + case 76800: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb); + rxstale = 16; + break; + case 115200: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc); + rxstale = 31; + break; + case 230400: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee); + rxstale = 31; + break; + case 460800: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff); + rxstale = 31; + break; + case 4000000: + case 3686400: + case 3200000: + case 3500000: + case 3000000: + case 2500000: + case 1500000: + case 1152000: + case 1000000: + case 921600: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff); + rxstale = 31; + break; + default: + msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff); + /* default to 9600 */ + bps = 9600; + rxstale = 2; + break; + } + if (bps > 460800) { + uport->uartclk = bps * 16; + } else { + uport->uartclk = 7372800; + } + if (clk_set_rate(msm_uport->clk, uport->uartclk)) { + printk(KERN_WARNING "Error setting clock rate on UART\n"); + return; + } + + data = rxstale & UARTDM_IPR_STALE_LSB_BMSK; + data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2); + + msm_hs_write(uport, UARTDM_IPR_ADDR, data); +} + +/* + * termios : new ktermios + * oldtermios: old ktermios previous setting + * + * Configure the serial port + */ +static void msm_hs_set_termios(struct uart_port *uport, + struct ktermios *termios, + struct ktermios *oldtermios) +{ + unsigned int bps; + unsigned long data; + unsigned long flags; + unsigned int c_cflag = termios->c_cflag; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* 300 is the minimum baud support by the driver */ + bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000); + + /* Temporary remapping 200 BAUD to 3.2 mbps */ + if (bps == 200) + bps = 3200000; + + msm_hs_set_bps_locked(uport, bps); + + data = msm_hs_read(uport, UARTDM_MR2_ADDR); + data &= ~UARTDM_MR2_PARITY_MODE_BMSK; + /* set parity */ + if (PARENB == (c_cflag & PARENB)) { + if (PARODD == (c_cflag & PARODD)) { + data |= ODD_PARITY; + } else if (CMSPAR == (c_cflag & CMSPAR)) { + data |= SPACE_PARITY; + } else { + data |= EVEN_PARITY; + } + } + + /* Set bits per char */ + data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK; + + switch (c_cflag & CSIZE) { + case CS5: + data |= FIVE_BPC; + break; + case CS6: + data |= SIX_BPC; + break; + case CS7: + data |= SEVEN_BPC; + break; + default: + data |= EIGHT_BPC; + break; + } + /* stop bits */ + if (c_cflag & CSTOPB) { + data |= STOP_BIT_TWO; + } else { + /* otherwise 1 stop bit */ + data |= STOP_BIT_ONE; + } + data |= UARTDM_MR2_ERROR_MODE_BMSK; + /* write parity/bits per char/stop bit configuration */ + msm_hs_write(uport, UARTDM_MR2_ADDR, data); + + /* Configure HW flow control */ + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + + data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK); + + if (c_cflag & CRTSCTS) { + data |= UARTDM_MR1_CTS_CTL_BMSK; + data |= UARTDM_MR1_RX_RDY_CTL_BMSK; + } + + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + + uport->ignore_status_mask = termios->c_iflag & INPCK; + uport->ignore_status_mask |= termios->c_iflag & IGNPAR; + uport->read_status_mask = (termios->c_cflag & CREAD); + + msm_hs_write(uport, UARTDM_IMR_ADDR, 0); + + /* Set Transmit software time out */ + uart_update_timeout(uport, c_cflag, bps); + + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); + + if (msm_uport->rx.flush == FLUSH_NONE) { + wake_lock(&msm_uport->rx.wake_lock); + msm_uport->rx.flush = FLUSH_IGNORE; + msm_dmov_flush(msm_uport->dma_rx_channel); + } + + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&uport->lock, flags); +} + +/* + * Standard API, Transmitter + * Any character in the transmit shift register is sent + */ +static unsigned int msm_hs_tx_empty(struct uart_port *uport) +{ + unsigned int data; + unsigned int ret = 0; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + data = msm_hs_read(uport, UARTDM_SR_ADDR); + if (data & UARTDM_SR_TXEMT_BMSK) + ret = TIOCSER_TEMT; + + clk_disable(msm_uport->clk); + + return ret; +} + +/* + * Standard API, Stop transmitter. + * Any character in the transmit shift register is sent as + * well as the current data mover transfer . + */ +static void msm_hs_stop_tx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + msm_uport->tx.tx_ready_int_en = 0; +} + +/* + * Standard API, Stop receiver as soon as possible. + * + * Function immediately terminates the operation of the + * channel receiver and any incoming characters are lost. None + * of the receiver status bits are affected by this command and + * characters that are already in the receive FIFO there. + */ +static void msm_hs_stop_rx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + unsigned int data; + + clk_enable(msm_uport->clk); + + /* disable dlink */ + data = msm_hs_read(uport, UARTDM_DMEN_ADDR); + data &= ~UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + + /* Disable the receiver */ + if (msm_uport->rx.flush == FLUSH_NONE) { + wake_lock(&msm_uport->rx.wake_lock); + msm_dmov_flush(msm_uport->dma_rx_channel); + } + if (msm_uport->rx.flush != FLUSH_SHUTDOWN) + msm_uport->rx.flush = FLUSH_STOP; + + clk_disable(msm_uport->clk); +} + +/* Transmit the next chunk of data */ +static void msm_hs_submit_tx_locked(struct uart_port *uport) +{ + int left; + int tx_count; + dma_addr_t src_addr; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct msm_hs_tx *tx = &msm_uport->tx; + struct circ_buf *tx_buf = &msm_uport->uport.state->xmit; + + if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) { + msm_hs_stop_tx_locked(uport); + return; + } + + tx->dma_in_flight = 1; + + tx_count = uart_circ_chars_pending(tx_buf); + + if (UARTDM_TX_BUF_SIZE < tx_count) + tx_count = UARTDM_TX_BUF_SIZE; + + left = UART_XMIT_SIZE - tx_buf->tail; + + if (tx_count > left) + tx_count = left; + + src_addr = tx->dma_base + tx_buf->tail; + dma_sync_single_for_device(uport->dev, src_addr, tx_count, + DMA_TO_DEVICE); + + tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) | + ((tx_count + 15) >> 4); + tx->command_ptr->src_row_addr = src_addr; + + dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + + *tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr); + + dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + + /* Save tx_count to use in Callback */ + tx->tx_count = tx_count; + msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count); + + /* Disable the tx_ready interrupt */ + msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer); +} + +/* Start to receive the next chunk of data */ +static void msm_hs_start_rx_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE); + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE); + msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + msm_uport->rx.flush = FLUSH_NONE; + msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer); + + /* might have finished RX and be ready to clock off */ + hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay, + HRTIMER_MODE_REL); +} + +/* Enable the transmitter Interrupt */ +static void msm_hs_start_tx_locked(struct uart_port *uport ) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + if (msm_uport->exit_lpm_cb) + msm_uport->exit_lpm_cb(uport); + + if (msm_uport->tx.tx_ready_int_en == 0) { + msm_uport->tx.tx_ready_int_en = 1; + msm_hs_submit_tx_locked(uport); + } + + clk_disable(msm_uport->clk); +} + +/* + * This routine is called when we are done with a DMA transfer + * + * This routine is registered with Data mover when we set + * up a Data Mover transfer. It is called from Data mover ISR + * when the DMA transfer is done. + */ +static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, + struct msm_dmov_errdata *err) +{ + unsigned long flags; + struct msm_hs_port *msm_uport; + + WARN_ON(result != 0x80000002); /* DMA did not finish properly */ + msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer); + + spin_lock_irqsave(&msm_uport->uport.lock, flags); + clk_enable(msm_uport->clk); + + msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK; + msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&msm_uport->uport.lock, flags); +} + +/* + * This routine is called when we are done with a DMA transfer or the + * a flush has been sent to the data mover driver. + * + * This routine is registered with Data mover when we set up a Data Mover + * transfer. It is called from Data mover ISR when the DMA transfer is done. + */ +static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, + struct msm_dmov_errdata *err) +{ + int retval; + int rx_count; + unsigned long status; + unsigned int error_f = 0; + unsigned long flags; + unsigned int flush; + struct tty_struct *tty; + struct uart_port *uport; + struct msm_hs_port *msm_uport; + + msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer); + uport = &msm_uport->uport; + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + tty = uport->state->port.tty; + + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); + + status = msm_hs_read(uport, UARTDM_SR_ADDR); + + /* overflow is not connect to data in a FIFO */ + if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) && + (uport->read_status_mask & CREAD))) { + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + uport->icount.buf_overrun++; + error_f = 1; + } + + if (!(uport->ignore_status_mask & INPCK)) + status = status & ~(UARTDM_SR_PAR_FRAME_BMSK); + + if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) { + /* Can not tell difference between parity & frame error */ + uport->icount.parity++; + error_f = 1; + if (uport->ignore_status_mask & IGNPAR) + tty_insert_flip_char(tty, 0, TTY_PARITY); + } + + if (error_f) + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); + + if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED) + msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED; + + flush = msm_uport->rx.flush; + if (flush == FLUSH_IGNORE) + msm_hs_start_rx_locked(uport); + if (flush == FLUSH_STOP) + msm_uport->rx.flush = FLUSH_SHUTDOWN; + if (flush >= FLUSH_DATA_INVALID) + goto out; + + rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR); + + if (0 != (uport->read_status_mask & CREAD)) { + retval = tty_insert_flip_string(tty, msm_uport->rx.buffer, + rx_count); + BUG_ON(retval != rx_count); + } + + msm_hs_start_rx_locked(uport); + +out: + clk_disable(msm_uport->clk); + /* release wakelock in 500ms, not immediately, because higher layers + * don't always take wakelocks when they should */ + wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2); + spin_unlock_irqrestore(&uport->lock, flags); + + if (flush < FLUSH_DATA_INVALID) + queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); +} + +static void msm_hs_tty_flip_buffer_work(struct work_struct *work) +{ + struct msm_hs_port *msm_uport = + container_of(work, struct msm_hs_port, rx.tty_work); + struct tty_struct *tty = msm_uport->uport.state->port.tty; + + tty_flip_buffer_push(tty); +} + +/* + * Standard API, Current states of modem control inputs + * + * Since CTS can be handled entirely by HARDWARE we always + * indicate clear to send and count on the TX FIFO to block when + * it fills up. + * + * - TIOCM_DCD + * - TIOCM_CTS + * - TIOCM_DSR + * - TIOCM_RI + * (Unsupported) DCD and DSR will return them high. RI will return low. + */ +static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport) +{ + return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; +} + +/* + * True enables UART auto RFR, which indicates we are ready for data if the RX + * buffer is not full. False disables auto RFR, and deasserts RFR to indicate + * we are not ready for data. Must be called with UART clock on. + */ +static void set_rfr_locked(struct uart_port *uport, int auto_rfr) { + unsigned int data; + + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + + if (auto_rfr) { + /* enable auto ready-for-receiving */ + data |= UARTDM_MR1_RX_RDY_CTL_BMSK; + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + } else { + /* disable auto ready-for-receiving */ + data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK; + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + /* RFR is active low, set high */ + msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH); + } +} + +/* + * Standard API, used to set or clear RFR + */ +static void msm_hs_set_mctrl_locked(struct uart_port *uport, + unsigned int mctrl) +{ + unsigned int auto_rfr; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + auto_rfr = TIOCM_RTS & mctrl ? 1 : 0; + set_rfr_locked(uport, auto_rfr); + + clk_disable(msm_uport->clk); +} + +/* Standard API, Enable modem status (CTS) interrupt */ +static void msm_hs_enable_ms_locked(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + + /* Enable DELTA_CTS Interrupt */ + msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + clk_disable(msm_uport->clk); + +} + +/* + * Standard API, Break Signal + * + * Control the transmission of a break signal. ctl eq 0 => break + * signal terminate ctl ne 0 => start break signal + */ +static void msm_hs_break_ctl(struct uart_port *uport, int ctl) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + clk_enable(msm_uport->clk); + msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK); + clk_disable(msm_uport->clk); +} + +static void msm_hs_config_port(struct uart_port *uport, int cfg_flags) +{ + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + if (cfg_flags & UART_CONFIG_TYPE) { + uport->type = PORT_MSM; + msm_hs_request_port(uport); + } + spin_unlock_irqrestore(&uport->lock, flags); +} + +/* Handle CTS changes (Called from interrupt handler) */ +static void msm_hs_handle_delta_cts(struct uart_port *uport) +{ + unsigned long flags; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* clear interrupt */ + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); + uport->icount.cts++; + + clk_disable(msm_uport->clk); + spin_unlock_irqrestore(&uport->lock, flags); + + /* clear the IOCTL TIOCMIWAIT if called */ + wake_up_interruptible(&uport->state->port.delta_msr_wait); +} + +/* check if the TX path is flushed, and if so clock off + * returns 0 did not clock off, need to retry (still sending final byte) + * -1 did not clock off, do not retry + * 1 if we clocked off + */ +static int msm_hs_check_clock_off_locked(struct uart_port *uport) +{ + unsigned long sr_status; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct circ_buf *tx_buf = &uport->state->xmit; + + /* Cancel if tx tty buffer is not empty, dma is in flight, + * or tx fifo is not empty, or rx fifo is not empty */ + if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF || + !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight || + (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) || + !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) { + return -1; + } + + /* Make sure the uart is finished with the last byte */ + sr_status = msm_hs_read(uport, UARTDM_SR_ADDR); + if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) + return 0; /* retry */ + + /* Make sure forced RXSTALE flush complete */ + switch (msm_uport->clk_req_off_state) { + case CLK_REQ_OFF_START: + msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED; + msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT); + return 0; /* RXSTALE flush not complete - retry */ + case CLK_REQ_OFF_RXSTALE_ISSUED: + case CLK_REQ_OFF_FLUSH_ISSUED: + return 0; /* RXSTALE flush not complete - retry */ + case CLK_REQ_OFF_RXSTALE_FLUSHED: + break; /* continue */ + } + + if (msm_uport->rx.flush != FLUSH_SHUTDOWN) { + if (msm_uport->rx.flush == FLUSH_NONE) + msm_hs_stop_rx_locked(uport); + return 0; /* come back later to really clock off */ + } + + /* we really want to clock off */ + clk_disable(msm_uport->clk); + msm_uport->clk_state = MSM_HS_CLK_OFF; + wake_unlock(&msm_uport->dma_wake_lock); + if (use_low_power_rx_wakeup(msm_uport)) { + msm_uport->rx_wakeup.ignore = 1; + enable_irq(msm_uport->rx_wakeup.irq); + } + return 1; +} + +static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer) { + unsigned long flags; + int ret = HRTIMER_NORESTART; + struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port, + clk_off_timer); + struct uart_port *uport = &msm_uport->uport; + + spin_lock_irqsave(&uport->lock, flags); + + if (!msm_hs_check_clock_off_locked(uport)) { + hrtimer_forward_now(timer, msm_uport->clk_off_delay); + ret = HRTIMER_RESTART; + } + + spin_unlock_irqrestore(&uport->lock, flags); + + return ret; +} + +static irqreturn_t msm_hs_isr(int irq, void *dev) +{ + unsigned long flags; + unsigned long isr_status; + struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev; + struct uart_port *uport = &msm_uport->uport; + struct circ_buf *tx_buf = &uport->state->xmit; + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + spin_lock_irqsave(&uport->lock, flags); + + isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR); + + /* Uart RX starting */ + if (isr_status & UARTDM_ISR_RXLEV_BMSK) { + wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */ + msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + } + /* Stale rx interrupt */ + if (isr_status & UARTDM_ISR_RXSTALE_BMSK) { + msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + + if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED) + msm_uport->clk_req_off_state = + CLK_REQ_OFF_FLUSH_ISSUED; + if (rx->flush == FLUSH_NONE) { + rx->flush = FLUSH_DATA_READY; + msm_dmov_flush(msm_uport->dma_rx_channel); + } + } + /* tx ready interrupt */ + if (isr_status & UARTDM_ISR_TX_READY_BMSK) { + /* Clear TX Ready */ + msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY); + + if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) { + msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, + msm_uport->imr_reg); + } + + /* Complete DMA TX transactions and submit new transactions */ + tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE; + + tx->dma_in_flight = 0; + + uport->icount.tx += tx->tx_count; + if (tx->tx_ready_int_en) + msm_hs_submit_tx_locked(uport); + + if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS) + uart_write_wakeup(uport); + } + if (isr_status & UARTDM_ISR_TXLEV_BMSK) { + /* TX FIFO is empty */ + msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + if (!msm_hs_check_clock_off_locked(uport)) + hrtimer_start(&msm_uport->clk_off_timer, + msm_uport->clk_off_delay, + HRTIMER_MODE_REL); + } + + /* Change in CTS interrupt */ + if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK) + msm_hs_handle_delta_cts(uport); + + spin_unlock_irqrestore(&uport->lock, flags); + + return IRQ_HANDLED; +} + +void msm_hs_request_clock_off_locked(struct uart_port *uport) { + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + if (msm_uport->clk_state == MSM_HS_CLK_ON) { + msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF; + msm_uport->clk_req_off_state = CLK_REQ_OFF_START; + if (!use_low_power_rx_wakeup(msm_uport)) + set_rfr_locked(uport, 0); + msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + } +} +EXPORT_SYMBOL(msm_hs_request_clock_off_locked); + +/* request to turn off uart clock once pending TX is flushed */ +void msm_hs_request_clock_off(struct uart_port *uport) { + unsigned long flags; + + spin_lock_irqsave(&uport->lock, flags); + msm_hs_request_clock_off_locked(uport); + spin_unlock_irqrestore(&uport->lock, flags); +} +EXPORT_SYMBOL(msm_hs_request_clock_off); + +void msm_hs_request_clock_on_locked(struct uart_port *uport) { + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + unsigned int data; + + switch (msm_uport->clk_state) { + case MSM_HS_CLK_OFF: + wake_lock(&msm_uport->dma_wake_lock); + clk_enable(msm_uport->clk); + disable_irq_nosync(msm_uport->rx_wakeup.irq); + /* fall-through */ + case MSM_HS_CLK_REQUEST_OFF: + if (msm_uport->rx.flush == FLUSH_STOP || + msm_uport->rx.flush == FLUSH_SHUTDOWN) { + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + data = msm_hs_read(uport, UARTDM_DMEN_ADDR); + data |= UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + } + hrtimer_try_to_cancel(&msm_uport->clk_off_timer); + if (msm_uport->rx.flush == FLUSH_SHUTDOWN) + msm_hs_start_rx_locked(uport); + if (!use_low_power_rx_wakeup(msm_uport)) + set_rfr_locked(uport, 1); + if (msm_uport->rx.flush == FLUSH_STOP) + msm_uport->rx.flush = FLUSH_IGNORE; + msm_uport->clk_state = MSM_HS_CLK_ON; + break; + case MSM_HS_CLK_ON: break; + case MSM_HS_CLK_PORT_OFF: break; + } +} +EXPORT_SYMBOL(msm_hs_request_clock_on_locked); + +void msm_hs_request_clock_on(struct uart_port *uport) { + unsigned long flags; + spin_lock_irqsave(&uport->lock, flags); + msm_hs_request_clock_on_locked(uport); + spin_unlock_irqrestore(&uport->lock, flags); +} +EXPORT_SYMBOL(msm_hs_request_clock_on); + +static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev) +{ + unsigned int wakeup = 0; + unsigned long flags; + struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev; + struct uart_port *uport = &msm_uport->uport; + struct tty_struct *tty = NULL; + + spin_lock_irqsave(&uport->lock, flags); + if (msm_uport->clk_state == MSM_HS_CLK_OFF) { + /* ignore the first irq - it is a pending irq that occured + * before enable_irq() */ + if (msm_uport->rx_wakeup.ignore) + msm_uport->rx_wakeup.ignore = 0; + else + wakeup = 1; + } + + if (wakeup) { + /* the uart was clocked off during an rx, wake up and + * optionally inject char into tty rx */ + msm_hs_request_clock_on_locked(uport); + if (msm_uport->rx_wakeup.inject_rx) { + tty = uport->state->port.tty; + tty_insert_flip_char(tty, + msm_uport->rx_wakeup.rx_to_inject, + TTY_NORMAL); + queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); + } + } + + spin_unlock_irqrestore(&uport->lock, flags); + + return IRQ_HANDLED; +} + +static const char *msm_hs_type(struct uart_port *port) +{ + return ("MSM HS UART"); +} + +/* Called when port is opened */ +static int msm_hs_startup(struct uart_port *uport) +{ + int ret; + int rfr_level; + unsigned long flags; + unsigned int data; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct circ_buf *tx_buf = &uport->state->xmit; + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + rfr_level = uport->fifosize; + if (rfr_level > 16) + rfr_level -= 16; + + tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE, + DMA_TO_DEVICE); + + /* do not let tty layer execute RX in global workqueue, use a + * dedicated workqueue managed by this driver */ + uport->state->port.tty->low_latency = 1; + + /* turn on uart clk */ + ret = msm_hs_init_clk_locked(uport); + if (unlikely(ret)) + return ret; + + /* Set auto RFR Level */ + data = msm_hs_read(uport, UARTDM_MR1_ADDR); + data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK; + data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK; + data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2)); + data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level); + msm_hs_write(uport, UARTDM_MR1_ADDR, data); + + /* Make sure RXSTALE count is non-zero */ + data = msm_hs_read(uport, UARTDM_IPR_ADDR); + if (!data) { + data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK; + msm_hs_write(uport, UARTDM_IPR_ADDR, data); + } + + /* Enable Data Mover Mode */ + data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK; + msm_hs_write(uport, UARTDM_DMEN_ADDR, data); + + /* Reset TX */ + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); + msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); + msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW); + /* Turn on Uart Receiver */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK); + + /* Turn on Uart Transmitter */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK); + + /* Initialize the tx */ + tx->tx_ready_int_en = 0; + tx->dma_in_flight = 0; + + tx->xfer.complete_func = msm_hs_dmov_tx_callback; + tx->xfer.execute_func = NULL; + + tx->command_ptr->cmd = CMD_LC | + CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX; + + tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) + | (MSM_UARTDM_BURST_SIZE); + + tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16); + + tx->command_ptr->dst_row_addr = + msm_uport->uport.mapbase + UARTDM_TF_ADDR; + + + /* Turn on Uart Receive */ + rx->xfer.complete_func = msm_hs_dmov_rx_callback; + rx->xfer.execute_func = NULL; + + rx->command_ptr->cmd = CMD_LC | + CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX; + + rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) + | (MSM_UARTDM_BURST_SIZE); + rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE; + rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR; + + + msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK; + /* Enable reading the current CTS, no harm even if CTS is ignored */ + msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK; + + msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */ + + + ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH, + "msm_hs_uart", msm_uport); + if (unlikely(ret)) + return ret; + if (use_low_power_rx_wakeup(msm_uport)) { + ret = request_irq(msm_uport->rx_wakeup.irq, + msm_hs_rx_wakeup_isr, + IRQF_TRIGGER_FALLING, + "msm_hs_rx_wakeup", msm_uport); + if (unlikely(ret)) + return ret; + disable_irq(msm_uport->rx_wakeup.irq); + } + + spin_lock_irqsave(&uport->lock, flags); + + msm_hs_write(uport, UARTDM_RFWR_ADDR, 0); + msm_hs_start_rx_locked(uport); + + spin_unlock_irqrestore(&uport->lock, flags); + + return 0; +} + +/* Initialize tx and rx data structures */ +static int uartdm_init_port(struct uart_port *uport) +{ + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; + + /* Allocate the command pointer. Needs to be 64 bit aligned */ + tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); + + tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); + + if (!tx->command_ptr || !tx->command_ptr_ptr) + return -ENOMEM; + + tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev, + tx->command_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr); + + init_waitqueue_head(&rx->wait); + wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx"); + wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND, + "msm_serial_hs_dma"); + + rx->pool = dma_pool_create("rx_buffer_pool", uport->dev, + UARTDM_RX_BUF_SIZE, 16, 0); + + rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer); + + /* Allocate the command pointer. Needs to be 64 bit aligned */ + rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); + + rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); + + if (!rx->command_ptr || !rx->command_ptr_ptr || !rx->pool || + !rx->buffer) + return -ENOMEM; + + rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) | + (UARTDM_RX_BUF_SIZE >> 4); + + rx->command_ptr->dst_row_addr = rx->rbuffer; + + rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr, + sizeof(dmov_box), DMA_TO_DEVICE); + + *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr); + + rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr, + sizeof(u32 *), DMA_TO_DEVICE); + rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr); + + INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work); + + return 0; +} + +static int msm_hs_probe(struct platform_device *pdev) +{ + int ret; + struct uart_port *uport; + struct msm_hs_port *msm_uport; + struct resource *resource; + struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data; + + if (pdev->id < 0 || pdev->id >= UARTDM_NR) { + printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); + return -EINVAL; + } + + msm_uport = &q_uart_port[pdev->id]; + uport = &msm_uport->uport; + + uport->dev = &pdev->dev; + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!resource)) + return -ENXIO; + uport->mapbase = resource->start; /* virtual address */ + + uport->membase = ioremap(uport->mapbase, PAGE_SIZE); + if (unlikely(!uport->membase)) + return -ENOMEM; + + uport->irq = platform_get_irq(pdev, 0); + if (unlikely(uport->irq < 0)) + return -ENXIO; + if (unlikely(set_irq_wake(uport->irq, 1))) + return -ENXIO; + + if (pdata == NULL || pdata->rx_wakeup_irq < 0) + msm_uport->rx_wakeup.irq = -1; + else { + msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq; + msm_uport->rx_wakeup.ignore = 1; + msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup; + msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject; + + if (unlikely(msm_uport->rx_wakeup.irq < 0)) + return -ENXIO; + if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1))) + return -ENXIO; + } + + if (pdata == NULL) + msm_uport->exit_lpm_cb = NULL; + else + msm_uport->exit_lpm_cb = pdata->exit_lpm_cb; + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "uartdm_channels"); + if (unlikely(!resource)) + return -ENXIO; + msm_uport->dma_tx_channel = resource->start; + msm_uport->dma_rx_channel = resource->end; + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "uartdm_crci"); + if (unlikely(!resource)) + return -ENXIO; + msm_uport->dma_tx_crci = resource->start; + msm_uport->dma_rx_crci = resource->end; + + uport->iotype = UPIO_MEM; + uport->fifosize = 64; + uport->ops = &msm_hs_ops; + uport->flags = UPF_BOOT_AUTOCONF; + uport->uartclk = 7372800; + msm_uport->imr_reg = 0x0; + msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk"); + if (IS_ERR(msm_uport->clk)) + return PTR_ERR(msm_uport->clk); + + ret = uartdm_init_port(uport); + if (unlikely(ret)) + return ret; + + /* configure the CR Protection to Enable */ + msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN); + + msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; + hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + msm_uport->clk_off_timer.function = msm_hs_clk_off_retry; + msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */ + + uport->line = pdev->id; + return uart_add_one_port(&msm_hs_driver, uport); +} + +static int __init msm_serial_hs_init(void) +{ + int ret; + int i; + + /* Init all UARTS as non-configured */ + for (i = 0; i < UARTDM_NR; i++) + q_uart_port[i].uport.type = PORT_UNKNOWN; + + msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs"); + + ret = uart_register_driver(&msm_hs_driver); + if (unlikely(ret)) { + printk(KERN_ERR "%s failed to load\n", __FUNCTION__); + return ret; + } + ret = platform_driver_register(&msm_serial_hs_platform_driver); + if (ret) { + printk(KERN_ERR "%s failed to load\n", __FUNCTION__); + uart_unregister_driver(&msm_hs_driver); + return ret; + } + + printk(KERN_INFO "msm_serial_hs module loaded\n"); + return ret; +} + +/* + * Called by the upper layer when port is closed. + * - Disables the port + * - Unhook the ISR + */ +static void msm_hs_shutdown(struct uart_port *uport) +{ + unsigned long flags; + struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + + BUG_ON(msm_uport->rx.flush < FLUSH_STOP); + + spin_lock_irqsave(&uport->lock, flags); + clk_enable(msm_uport->clk); + + /* Disable the transmitter */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK); + /* Disable the receiver */ + msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK); + + /* Free the interrupt */ + free_irq(uport->irq, msm_uport); + if (use_low_power_rx_wakeup(msm_uport)) + free_irq(msm_uport->rx_wakeup.irq, msm_uport); + + msm_uport->imr_reg = 0; + msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); + + wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN); + + clk_disable(msm_uport->clk); /* to balance local clk_enable() */ + if (msm_uport->clk_state != MSM_HS_CLK_OFF) { + wake_unlock(&msm_uport->dma_wake_lock); + clk_disable(msm_uport->clk); /* to balance clk_state */ + } + msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; + + dma_unmap_single(uport->dev, msm_uport->tx.dma_base, + UART_XMIT_SIZE, DMA_TO_DEVICE); + + spin_unlock_irqrestore(&uport->lock, flags); + + if (cancel_work_sync(&msm_uport->rx.tty_work)) + msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work); +} + +static void __exit msm_serial_hs_exit(void) +{ + printk(KERN_INFO "msm_serial_hs module removed\n"); + platform_driver_unregister(&msm_serial_hs_platform_driver); + uart_unregister_driver(&msm_hs_driver); + destroy_workqueue(msm_hs_workqueue); +} + +static struct platform_driver msm_serial_hs_platform_driver = { + .probe = msm_hs_probe, + .remove = msm_hs_remove, + .driver = { + .name = "msm_serial_hs", + }, +}; + +static struct uart_driver msm_hs_driver = { + .owner = THIS_MODULE, + .driver_name = "msm_serial_hs", + .dev_name = "ttyHS", + .nr = UARTDM_NR, + .cons = 0, +}; + +static struct uart_ops msm_hs_ops = { + .tx_empty = msm_hs_tx_empty, + .set_mctrl = msm_hs_set_mctrl_locked, + .get_mctrl = msm_hs_get_mctrl_locked, + .stop_tx = msm_hs_stop_tx_locked, + .start_tx = msm_hs_start_tx_locked, + .stop_rx = msm_hs_stop_rx_locked, + .enable_ms = msm_hs_enable_ms_locked, + .break_ctl = msm_hs_break_ctl, + .startup = msm_hs_startup, + .shutdown = msm_hs_shutdown, + .set_termios = msm_hs_set_termios, + .pm = msm_hs_pm, + .type = msm_hs_type, + .config_port = msm_hs_config_port, + .release_port = msm_hs_release_port, + .request_port = msm_hs_request_port, +}; + +module_init(msm_serial_hs_init); +module_exit(msm_serial_hs_exit); +MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset"); +MODULE_VERSION("1.2"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/msm_serial_hs.h b/drivers/tty/serial/msm_serial_hs.h new file mode 100644 index 0000000000000..fe17eeccdb0a4 --- /dev/null +++ b/drivers/tty/serial/msm_serial_hs.h @@ -0,0 +1,34 @@ +/* drivers/serial/msm_serial_hs.h + * + * MSM High speed uart driver + * + * Copyright (c) 2007-2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * All source code in this file is licensed under the following license + * except where indicated. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +#ifndef __MSM_UART_HS_H__ +#define __MSM_UART_HS_H__ + +enum msm_uart_hs_pm_e { + MSM_UART_HS_POWER_DOWN, + MSM_UART_HS_POWER_UP +}; +unsigned int msm_uart_hs_tx_empty(int line); +void msm_uart_hs_safe_pm(int line, enum msm_uart_hs_pm_e mode); + +#endif diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h new file mode 100644 index 0000000000000..df596781a5ab8 --- /dev/null +++ b/drivers/tty/serial/msm_serial_hs_hwreg.h @@ -0,0 +1,149 @@ +/* drivers/serial/msm_serial_hs_hwreg.h + * + * Copyright (c) 2007-2008 QUALCOMM Incorporated. + * Copyright (c) 2008 QUALCOMM USA, INC. + * + * All source code in this file is licensed under the following license + * except where indicated. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +#ifndef MSM_SERIAL_HS_HWREG_H +#define MSM_SERIAL_HS_HWREG_H + +#define UARTDM_MR1_ADDR 0x0 +#define UARTDM_MR2_ADDR 0x4 + +/* write only register */ +#define UARTDM_CSR_ADDR 0x8 + +/* write only register */ +#define UARTDM_TF_ADDR 0x70 +#define UARTDM_TF2_ADDR 0x74 +#define UARTDM_TF3_ADDR 0x78 +#define UARTDM_TF4_ADDR 0x7C + +/* write only register */ +#define UARTDM_CR_ADDR 0x10 +/* write only register */ +#define UARTDM_IMR_ADDR 0x14 + +#define UARTDM_IPR_ADDR 0x18 +#define UARTDM_TFWR_ADDR 0x1c +#define UARTDM_RFWR_ADDR 0x20 +#define UARTDM_HCR_ADDR 0x24 +#define UARTDM_DMRX_ADDR 0x34 +#define UARTDM_IRDA_ADDR 0x38 +#define UARTDM_DMEN_ADDR 0x3c + +/* UART_DM_NO_CHARS_FOR_TX */ +#define UARTDM_NCF_TX_ADDR 0x40 + +#define UARTDM_BADR_ADDR 0x44 + +#define UARTDM_SIM_CFG_ADDR 0x80 + +/* Read Only register */ +#define UARTDM_SR_ADDR 0x8 + +/* Read Only register */ +#define UARTDM_RF_ADDR 0x70 +#define UARTDM_RF2_ADDR 0x74 +#define UARTDM_RF3_ADDR 0x78 +#define UARTDM_RF4_ADDR 0x7C + +/* Read Only register */ +#define UARTDM_MISR_ADDR 0x10 + +/* Read Only register */ +#define UARTDM_ISR_ADDR 0x14 +#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38 + +#define UARTDM_RXFS_ADDR 0x50 + +/* Register field Mask Mapping */ +#define UARTDM_SR_PAR_FRAME_BMSK BIT(5) +#define UARTDM_SR_OVERRUN_BMSK BIT(4) +#define UARTDM_SR_TXEMT_BMSK BIT(3) +#define UARTDM_SR_TXRDY_BMSK BIT(2) +#define UARTDM_SR_RXRDY_BMSK BIT(0) + +#define UARTDM_CR_TX_DISABLE_BMSK BIT(3) +#define UARTDM_CR_RX_DISABLE_BMSK BIT(1) +#define UARTDM_CR_TX_EN_BMSK BIT(2) +#define UARTDM_CR_RX_EN_BMSK BIT(0) + +/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */ +#define RESET_RX 0x10 +#define RESET_TX 0x20 +#define RESET_ERROR_STATUS 0x30 +#define RESET_BREAK_INT 0x40 +#define START_BREAK 0x50 +#define STOP_BREAK 0x60 +#define RESET_CTS 0x70 +#define RESET_STALE_INT 0x80 +#define RFR_LOW 0xD0 +#define RFR_HIGH 0xE0 +#define CR_PROTECTION_EN 0x100 +#define STALE_EVENT_ENABLE 0x500 +#define STALE_EVENT_DISABLE 0x600 +#define FORCE_STALE_EVENT 0x400 +#define CLEAR_TX_READY 0x300 +#define RESET_TX_ERROR 0x800 +#define RESET_TX_DONE 0x810 + +#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00 +#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f +#define UARTDM_MR1_CTS_CTL_BMSK 0x40 +#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80 + +#define UARTDM_MR2_ERROR_MODE_BMSK 0x40 +#define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30 + +/* bits per character configuration */ +#define FIVE_BPC (0 << 4) +#define SIX_BPC (1 << 4) +#define SEVEN_BPC (2 << 4) +#define EIGHT_BPC (3 << 4) + +#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc +#define STOP_BIT_ONE (1 << 2) +#define STOP_BIT_TWO (3 << 2) + +#define UARTDM_MR2_PARITY_MODE_BMSK 0x3 + +/* Parity configuration */ +#define NO_PARITY 0x0 +#define EVEN_PARITY 0x1 +#define ODD_PARITY 0x2 +#define SPACE_PARITY 0x3 + +#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80 +#define UARTDM_IPR_STALE_LSB_BMSK 0x1f + +/* These can be used for both ISR and IMR register */ +#define UARTDM_ISR_TX_READY_BMSK BIT(7) +#define UARTDM_ISR_CURRENT_CTS_BMSK BIT(6) +#define UARTDM_ISR_DELTA_CTS_BMSK BIT(5) +#define UARTDM_ISR_RXLEV_BMSK BIT(4) +#define UARTDM_ISR_RXSTALE_BMSK BIT(3) +#define UARTDM_ISR_RXBREAK_BMSK BIT(2) +#define UARTDM_ISR_RXHUNT_BMSK BIT(1) +#define UARTDM_ISR_TXLEV_BMSK BIT(0) + +/* Field definitions for UART_DM_DMEN*/ +#define UARTDM_TX_DM_EN_BMSK 0x1 +#define UARTDM_RX_DM_EN_BMSK 0x2 + +#endif /* MSM_SERIAL_HS_HWREG_H */ diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 4ab49d4eebf4a..83589f49efde5 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) if (!ACM_READY(acm)) goto exit; + usb_mark_last_busy(acm->dev); + data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) break; } exit: - usb_mark_last_busy(acm->dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) if (!ACM_READY(acm)) return; tty = tty_port_tty_get(&acm->port); + if (!tty) + return; tty_wakeup(tty); tty_kref_put(tty); } @@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm) usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); + tasklet_disable(&acm->urb_task); for (i = 0; i < nr; i++) usb_kill_urb(acm->ru[i].urb); + tasklet_enable(&acm->urb_task); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } @@ -941,7 +946,7 @@ static int acm_probe(struct usb_interface *intf, u8 ac_management_function = 0; u8 call_management_function = 0; int call_interface_num = -1; - int data_interface_num; + int data_interface_num = -1; unsigned long quirks; int num_rx_buf; int i; @@ -1025,7 +1030,11 @@ static int acm_probe(struct usb_interface *intf, if (!union_header) { if (call_interface_num > 0) { dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n"); - data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); + /* quirks for Droids MuIn LCD */ + if (quirks & NO_DATA_INTERFACE) + data_interface = usb_ifnum_to_if(usb_dev, 0); + else + data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); control_interface = intf; } else { if (intf->cur_altsetting->desc.bNumEndpoints != 3) { @@ -1617,6 +1626,11 @@ static const struct usb_device_id acm_ids[] = { .driver_info = NOT_A_MODEM, }, + /* Support for Droids MuIn LCD */ + { USB_DEVICE(0x04d8, 0x000b), + .driver_info = NO_DATA_INTERFACE, + }, + /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 5eeb570b9a617..a2446d6caf05e 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -137,3 +137,4 @@ struct acm { #define SINGLE_RX_URB 2 #define NO_CAP_LINE 4 #define NOT_A_MODEM 8 +#define NO_DATA_INTERFACE 16 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 47085e5879abb..a97c018dd4198 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc) desc->sbuf, desc->validity->transfer_dma); usb_free_coherent(interface_to_usbdev(desc->intf), - desc->wMaxCommand, + desc->bMaxPacketSize0, desc->inbuf, desc->response->transfer_dma); kfree(desc->orq); diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index a3d2e2399655b..96fdfb815f895 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, break; case USB_ENDPOINT_XFER_INT: type = "Int."; - if (speed == USB_SPEED_HIGH) + if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) interval = 1 << (desc->bInterval - 1); else interval = desc->bInterval; @@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, default: /* "can't happen" */ return start; } - interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; + interval *= (speed == USB_SPEED_HIGH || + speed == USB_SPEED_SUPER) ? 125 : 1000; if (interval % 1000) unit = 'u'; else { @@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, if (level == 0) { int max; - /* high speed reserves 80%, full/low reserves 90% */ - if (usbdev->speed == USB_SPEED_HIGH) + /* super/high speed reserves 80%, full/low reserves 90% */ + if (usbdev->speed == USB_SPEED_HIGH || + usbdev->speed == USB_SPEED_SUPER) max = 800; else max = FRAME_TIME_MAX_USECS_ALLOC; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index a7131ad630f9a..37518dfdeb987 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg) tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, - tbuf, i); + tbuf, max(i, 0)); if ((i > 0) && ctrl.wLength) { if (copy_to_user(ctrl.data, tbuf, i)) { free_page((unsigned long)tbuf); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index f71e8e307e0f0..d37088591d9af 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -363,8 +363,7 @@ static int check_root_hub_suspended(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); - if (!(hcd->state == HC_STATE_SUSPENDED || - hcd->state == HC_STATE_HALT)) { + if (HCD_RH_RUNNING(hcd)) { dev_warn(dev, "Root hub is not suspended\n"); return -EBUSY; } @@ -386,7 +385,7 @@ static int suspend_common(struct device *dev, bool do_wakeup) if (retval) return retval; - if (hcd->driver->pci_suspend) { + if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { /* Optimization: Don't suspend if a root-hub wakeup is * pending and it would cause the HCD to wake up anyway. */ @@ -427,7 +426,7 @@ static int resume_common(struct device *dev, int event) struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; - if (hcd->state != HC_STATE_SUSPENDED) { + if (HCD_RH_RUNNING(hcd)) { dev_dbg(dev, "can't resume, not suspended!\n"); return 0; } @@ -442,7 +441,7 @@ static int resume_common(struct device *dev, int event) clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); - if (hcd->driver->pci_resume) { + if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { if (event != PM_EVENT_AUTO_RESUME) wait_for_companions(pci_dev, hcd); @@ -475,10 +474,10 @@ static int hcd_pci_suspend_noirq(struct device *dev) pci_save_state(pci_dev); - /* If the root hub is HALTed rather than SUSPENDed, + /* If the root hub is dead rather than suspended, * disallow remote wakeup. */ - if (hcd->state == HC_STATE_HALT) + if (HCD_DEAD(hcd)) device_set_wakeup_enable(dev, 0); dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index e935f71d7a346..a27dd223a3334 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -983,7 +983,7 @@ static int register_root_hub(struct usb_hcd *hcd) spin_unlock_irq (&hcd_root_hub_lock); /* Did the HC die before the root hub was registered? */ - if (hcd->state == HC_STATE_HALT) + if (HCD_DEAD(hcd)) usb_hc_died (hcd); /* This time clean up */ } @@ -1089,13 +1089,10 @@ int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb) * Check the host controller's state and add the URB to the * endpoint's queue. */ - switch (hcd->state) { - case HC_STATE_RUNNING: - case HC_STATE_RESUMING: + if (HCD_RH_RUNNING(hcd)) { urb->unlinked = 0; list_add_tail(&urb->urb_list, &urb->ep->urb_list); - break; - default: + } else { rc = -ESHUTDOWN; goto done; } @@ -1888,7 +1885,7 @@ void usb_free_streams(struct usb_interface *interface, /* Streams only apply to bulk endpoints. */ for (i = 0; i < num_eps; i++) - if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) + if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc)) return; hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); @@ -1913,7 +1910,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); - if (!HC_IS_RUNNING (hcd->state)) + if (!HCD_RH_RUNNING(hcd)) return -ESHUTDOWN; return hcd->driver->get_frame_number (hcd); } @@ -1930,9 +1927,15 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) dev_dbg(&rhdev->dev, "bus %s%s\n", (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend"); + if (HCD_DEAD(hcd)) { + dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend"); + return 0; + } + if (!hcd->driver->bus_suspend) { status = -ENOENT; } else { + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); hcd->state = HC_STATE_QUIESCING; status = hcd->driver->bus_suspend(hcd); } @@ -1940,7 +1943,12 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg) usb_set_device_state(rhdev, USB_STATE_SUSPENDED); hcd->state = HC_STATE_SUSPENDED; } else { - hcd->state = old_state; + spin_lock_irq(&hcd_root_hub_lock); + if (!HCD_DEAD(hcd)) { + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + hcd->state = old_state; + } + spin_unlock_irq(&hcd_root_hub_lock); dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", "suspend", status); } @@ -1955,9 +1963,13 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) dev_dbg(&rhdev->dev, "usb %s%s\n", (msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume"); + if (HCD_DEAD(hcd)) { + dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume"); + return 0; + } if (!hcd->driver->bus_resume) return -ENOENT; - if (hcd->state == HC_STATE_RUNNING) + if (HCD_RH_RUNNING(hcd)) return 0; hcd->state = HC_STATE_RESUMING; @@ -1966,10 +1978,15 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) if (status == 0) { /* TRSMRCY = 10 msec */ msleep(10); - usb_set_device_state(rhdev, rhdev->actconfig - ? USB_STATE_CONFIGURED - : USB_STATE_ADDRESS); - hcd->state = HC_STATE_RUNNING; + spin_lock_irq(&hcd_root_hub_lock); + if (!HCD_DEAD(hcd)) { + usb_set_device_state(rhdev, rhdev->actconfig + ? USB_STATE_CONFIGURED + : USB_STATE_ADDRESS); + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + hcd->state = HC_STATE_RUNNING; + } + spin_unlock_irq(&hcd_root_hub_lock); } else { hcd->state = old_state; dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", @@ -2080,15 +2097,12 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) */ local_irq_save(flags); - if (unlikely(hcd->state == HC_STATE_HALT || !HCD_HW_ACCESSIBLE(hcd))) { + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) { rc = IRQ_NONE; } else if (hcd->driver->irq(hcd) == IRQ_NONE) { rc = IRQ_NONE; } else { set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); - - if (unlikely(hcd->state == HC_STATE_HALT)) - usb_hc_died(hcd); rc = IRQ_HANDLED; } @@ -2114,6 +2128,8 @@ void usb_hc_died (struct usb_hcd *hcd) dev_err (hcd->self.controller, "HC died; cleaning up\n"); spin_lock_irqsave (&hcd_root_hub_lock, flags); + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + set_bit(HCD_FLAG_DEAD, &hcd->flags); if (hcd->rh_registered) { clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -2256,6 +2272,12 @@ int usb_add_hcd(struct usb_hcd *hcd, */ device_init_wakeup(&rhdev->dev, 1); + /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is + * registered. But since the controller can die at any time, + * let's initialize the flag before touching the hardware. + */ + set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + /* "reset" is misnamed; its role is now one-time init. the controller * should already have been reset (and boot firmware kicked off etc). */ @@ -2323,6 +2345,7 @@ int usb_add_hcd(struct usb_hcd *hcd, return retval; error_create_attr_group: + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); if (HC_IS_RUNNING(hcd->state)) hcd->state = HC_STATE_QUIESCING; spin_lock_irq(&hcd_root_hub_lock); @@ -2375,6 +2398,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) usb_get_dev(rhdev); sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group); + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); if (HC_IS_RUNNING (hcd->state)) hcd->state = HC_STATE_QUIESCING; diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index c14fc082864f1..ae334b067c13e 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -366,7 +366,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) if (xfertype == USB_ENDPOINT_XFER_ISOC) { int n, len; - /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */ + /* SuperSpeed isoc endpoints have up to 16 bursts of up to + * 3 packets each + */ + if (dev->speed == USB_SPEED_SUPER) { + int burst = 1 + ep->ss_ep_comp.bMaxBurst; + int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes); + max *= burst; + max *= mult; + } + /* "high bandwidth" mode, 1-3 packets/uframe? */ if (dev->speed == USB_SPEED_HIGH) { int mult = 1 + ((max >> 11) & 0x03); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 1eeae810a65ae..3af4afce538ed 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -563,6 +563,28 @@ config USB_CI13XXX_MSM # LAST -- dummy/emulated controller # +config USB_GADGET_MSM_72K + boolean "MSM 72K Device Controller" + depends on ARCH_MSM + select USB_GADGET_SELECTED + select USB_GADGET_DUALSPEED + help + USB gadget driver for Qualcomm MSM 72K architecture. + + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "msm72k" and force all + gadget drivers to also be dynamically linked. + +config USB_MSM_72K + tristate + depends on USB_GADGET_MSM_72K + default USB_GADGET + select USB_GADGET_SELECTED + +config USB_MSM_72K_HTC + boolean "Use HTC driver variant" + depends on USB_MSM_72K + config USB_GADGET_DUMMY_HCD boolean "Dummy HCD (DEVELOPMENT)" depends on USB=y || (USB=m && USB_GADGET=m) @@ -923,6 +945,12 @@ config USB_ANDROID_ADB help Provides adb function for android gadget driver. +config USB_ANDROID_DIAG + boolean "USB MSM7K Diag Function" + depends on USB_ANDROID + help + Qualcomm diagnostics interface support. + config USB_ANDROID_MASS_STORAGE boolean "Android gadget mass storage function" depends on USB_ANDROID && SWITCH @@ -951,6 +979,22 @@ config USB_ANDROID_RNDIS_WCEIS If you enable this option, the device is no longer CDC ethernet compatible. + +config USB_ANDROID_ACCESSORY + boolean "Android USB accessory function" + depends on USB_ANDROID + help + Provides Android USB Accessory support for android gadget driver. + +config USB_CSW_HACK + boolean "USB Mass storage csw hack Feature" + depends on USB_ANDROID + help + This csw hack feature is for increasing the performance of the mass + storage + + default y + config USB_CDC_COMPOSITE tristate "CDC Composite Device (Ethernet and ACM)" depends on NET @@ -1072,4 +1116,28 @@ config USB_G_WEBCAM endchoice +if USB_MSM_72K_HTC + +config USB_ACCESSORY_DETECT + boolean "USB ACCESSORY DETECT" + depends on USB_ANDROID + default n + +config USB_ACCESSORY_DETECT_BY_ADC + boolean "DETECT USB ACCESSORY BY PMIC ADC" + depends on USB_ANDROID && USB_ACCESSORY_DETECT + default n + +config DOCK_ACCESSORY_DETECT + boolean "DOCK ACCESSORY DETECT" + depends on USB_ANDROID + default n + +config USB_BYPASS_VBUS_NOTIFY + boolean "USB BYPASS VBUS NOTIFY" + depends on USB_ANDROID + default n + +endif # USB_MSM_72K_HTC + endif # USB_GADGET diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index aa4669477a0d3..2d00a918b5718 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -21,13 +21,14 @@ fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o obj-$(CONFIG_USB_M66592) += m66592-udc.o obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o -obj-$(CONFIG_USB_CI13XXX_PCI) += ci13xxx_pci.o +obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o -obj-$(CONFIG_USB_EG20T) += pch_udc.o -obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o -mv_udc-y := mv_udc_core.o mv_udc_phy.o -obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o +ifdef CONFIG_USB_MSM_72K_HTC + obj-$(CONFIG_USB_MSM_72K_HTC) += msm72k_udc_htc.o +else + obj-$(CONFIG_USB_MSM_72K) += msm72k_udc.o +endif # # USB gadget drivers @@ -72,3 +73,7 @@ obj-$(CONFIG_USB_ANDROID_ADB) += f_adb.o obj-$(CONFIG_USB_ANDROID_MASS_STORAGE) += f_mass_storage.o obj-$(CONFIG_USB_ANDROID_MTP) += f_mtp.o obj-$(CONFIG_USB_ANDROID_RNDIS) += f_rndis.o u_ether.o +obj-$(CONFIG_USB_ANDROID_ACCESSORY) += f_accessory.o + +# MSM specific +obj-$(CONFIG_USB_ANDROID_DIAG) += diag.o diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index 3d3fc795c4c72..61fd129acfaf5 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -65,6 +65,7 @@ struct android_dev { int num_functions; char **functions; + int vendor_id; int product_id; int version; }; @@ -254,6 +255,22 @@ static int product_matches_functions(struct android_usb_product *p) return 1; } +static int get_vendor_id(struct android_dev *dev) +{ + struct android_usb_product *p = dev->products; + int count = dev->num_products; + int i; + + if (p) { + for (i = 0; i < count; i++, p++) { + if (p->vendor_id && product_matches_functions(p)) + return p->vendor_id; + } + } + /* use default vendor ID */ + return dev->vendor_id; +} + static int get_product_id(struct android_dev *dev) { struct android_usb_product *p = dev->products; @@ -274,7 +291,7 @@ static int android_bind(struct usb_composite_dev *cdev) { struct android_dev *dev = _android_dev; struct usb_gadget *gadget = cdev->gadget; - int gcnum, id, product_id, ret; + int gcnum, id, ret; printk(KERN_INFO "android_bind\n"); @@ -324,8 +341,9 @@ static int android_bind(struct usb_composite_dev *cdev) usb_gadget_set_selfpowered(gadget); dev->cdev = cdev; - product_id = get_product_id(dev); - device_desc.idProduct = __constant_cpu_to_le16(product_id); + device_desc.idVendor = __constant_cpu_to_le16(get_vendor_id(dev)); + device_desc.idProduct = __constant_cpu_to_le16(get_product_id(dev)); + cdev->desc.idVendor = device_desc.idVendor; cdev->desc.idProduct = device_desc.idProduct; return 0; @@ -394,7 +412,6 @@ void android_enable_function(struct usb_function *f, int enable) { struct android_dev *dev = _android_dev; int disable = !enable; - int product_id; if (!!f->disabled != disable) { usb_function_set_enabled(f, !disable); @@ -413,17 +430,43 @@ void android_enable_function(struct usb_function *f, int enable) } } #endif +#ifdef CONFIG_USB_ANDROID_ACCESSORY + if (!strcmp(f->name, "accessory") && enable) { + struct usb_function *func; + + /* disable everything else (and keep adb for now) */ + list_for_each_entry(func, &android_config_driver.functions, list) { + if (strcmp(func->name, "accessory") + && strcmp(func->name, "adb")) { + usb_function_set_enabled(func, 0); + } + } + } +#endif update_dev_desc(dev); - product_id = get_product_id(dev); - device_desc.idProduct = __constant_cpu_to_le16(product_id); - if (dev->cdev) + device_desc.idVendor = __constant_cpu_to_le16(get_vendor_id(dev)); + device_desc.idProduct = __constant_cpu_to_le16(get_product_id(dev)); + if (dev->cdev) { + dev->cdev->desc.idVendor = device_desc.idVendor; dev->cdev->desc.idProduct = device_desc.idProduct; + } usb_composite_force_reset(dev->cdev); } } +void android_set_serialno(char *serialno) +{ + strings_dev[STRING_SERIAL_IDX].s = serialno; +} + +int android_get_model_id(void) +{ + struct android_dev *dev = _android_dev; + return dev->product_id; +} + static int android_probe(struct platform_device *pdev) { struct android_usb_platform_data *pdata = pdev->dev.platform_data; @@ -436,9 +479,11 @@ static int android_probe(struct platform_device *pdev) dev->num_products = pdata->num_products; dev->functions = pdata->functions; dev->num_functions = pdata->num_functions; - if (pdata->vendor_id) + if (pdata->vendor_id) { + dev->vendor_id = pdata->vendor_id; device_desc.idVendor = __constant_cpu_to_le16(pdata->vendor_id); + } if (pdata->product_id) { dev->product_id = pdata->product_id; device_desc.idProduct = diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index bdec36acd0fa4..c3bb5ec393aaf 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -1767,7 +1767,7 @@ static int __init at91udc_probe(struct platform_device *pdev) } /* newer chips have more FIFO memory than rm9200 */ - if (cpu_is_at91sam9260()) { + if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) { udc->ep[0].maxpacket = 64; udc->ep[3].maxpacket = 64; udc->ep[4].maxpacket = 512; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index b793304d7968c..05b7bf13a7c06 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1132,9 +1132,6 @@ static void composite_disconnect(struct usb_gadget *gadget) if (cdev->config) reset_config(cdev); - if (composite->disconnect) - composite->disconnect(cdev); - cdev->connected = 0; schedule_work(&cdev->switch_work); spin_unlock_irqrestore(&cdev->lock, flags); @@ -1200,7 +1197,6 @@ composite_unbind(struct usb_gadget *gadget) } switch_dev_unregister(&cdev->sw_connected); switch_dev_unregister(&cdev->sw_config); - device_remove_file(&gadget->dev, &dev_attr_suspended); kfree(cdev); set_gadget_data(gadget, NULL); composite = NULL; diff --git a/drivers/usb/gadget/diag.c b/drivers/usb/gadget/diag.c new file mode 100644 index 0000000000000..c100ed40d7678 --- /dev/null +++ b/drivers/usb/gadget/diag.c @@ -0,0 +1,935 @@ +/* + * Diag Function Device - Route DIAG frames between SMD and USB + * + * Copyright (C) 2007 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define NO_HDLC 1 +#define ROUTE_TO_USERSPACE 1 + +#if 1 +#define TRACE(tag,data,len,decode) do {} while(0) +#else +static void TRACE(const char *tag, const void *_data, int len, int decode) +{ + const unsigned char *data = _data; + int escape = 0; + + printk(KERN_INFO "%s", tag); + if (decode) { + while (len-- > 0) { + unsigned x = *data++; + if (x == 0x7e) { + printk(" $$"); + escape = 0; + continue; + } + if (x == 0x7d) { + escape = 1; + continue; + } + if (escape) { + escape = 0; + printk(" %02x", x ^ 0x20); + } else { + printk(" %02x", x); + } + } + } else { + while (len-- > 0) { + printk(" %02x", *data++); + } + printk(" $$"); + } + printk("\n"); +} +#endif + +#define HDLC_MAX 4096 + +#define TX_REQ_BUF_SZ 8192 +#define RX_REQ_BUF_SZ 8192 + +/* number of tx/rx requests to allocate */ +#define TX_REQ_NUM 4 +#define RX_REQ_NUM 4 + +struct diag_context +{ + struct usb_function function; + struct usb_composite_dev *cdev; + struct usb_ep *out; + struct usb_ep *in; + struct list_head tx_req_idle; + struct list_head rx_req_idle; + spinlock_t req_lock; +#if ROUTE_TO_USERSPACE + struct mutex user_lock; +#define ID_TABLE_SZ 10 /* keep this small */ + struct list_head rx_req_user; + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + char *user_read_buf; + uint32_t user_read_len; + char *user_readp; + bool opened; + + /* list of registered command ids to be routed to userspace */ + unsigned char id_table[ID_TABLE_SZ]; +#endif + smd_channel_t *ch; + int in_busy; +#ifdef CONFIG_ARCH_QSD8X50 + smd_channel_t *ch_dsp; + int in_busy_dsp; +#endif + int online; + + /* assembly buffer for USB->A9 HDLC frames */ + unsigned char hdlc_buf[HDLC_MAX]; + unsigned hdlc_count; + unsigned hdlc_escape; + + u64 tx_count; /* to smd */ + u64 rx_count; /* from smd */ + + int function_enable; +}; + +static struct usb_interface_descriptor diag_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = 0xFF, + .bInterfaceSubClass = 0xFF, + .bInterfaceProtocol = 0xFF, +}; + +static struct usb_endpoint_descriptor diag_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor diag_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor diag_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor diag_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_diag_descs[] = { + (struct usb_descriptor_header *) &diag_interface_desc, + (struct usb_descriptor_header *) &diag_fullspeed_in_desc, + (struct usb_descriptor_header *) &diag_fullspeed_out_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_diag_descs[] = { + (struct usb_descriptor_header *) &diag_interface_desc, + (struct usb_descriptor_header *) &diag_highspeed_in_desc, + (struct usb_descriptor_header *) &diag_highspeed_out_desc, + NULL, +}; + +static struct diag_context _context; + +static inline struct diag_context *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct diag_context, function); +} + +static void smd_try_to_send(struct diag_context *ctxt); +#ifdef CONFIG_ARCH_QSD8X50 +static void dsp_try_to_send(struct diag_context *ctxt); +#endif + +static void diag_queue_out(struct diag_context *ctxt); + +/* add a request to the tail of a list */ +static void req_put(struct diag_context *ctxt, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&ctxt->req_lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&ctxt->req_lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request *req_get(struct diag_context *ctxt, + struct list_head *head) +{ + struct usb_request *req = 0; + unsigned long flags; + + spin_lock_irqsave(&ctxt->req_lock, flags); + if (!list_empty(head)) { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&ctxt->req_lock, flags); + + return req; +} + +static void reqs_free(struct diag_context *ctxt, struct usb_ep *ep, + struct list_head *head) +{ + struct usb_request *req; + while ((req = req_get(ctxt, head))) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +#if ROUTE_TO_USERSPACE +#define USB_DIAG_IOC_MAGIC 0xFF +#define USB_DIAG_FUNC_IOC_REGISTER_SET _IOW(USB_DIAG_IOC_MAGIC, 3, char *) + +static long diag_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct diag_context *ctxt = &_context; + void __user *argp = (void __user *)arg; + unsigned long flags; + unsigned char temp_id_table[ID_TABLE_SZ]; + + if (cmd != USB_DIAG_FUNC_IOC_REGISTER_SET) { + pr_err("%s: invalid cmd %d\n", __func__, _IOC_NR(cmd)); + return -EINVAL; + } + + if (copy_from_user(temp_id_table, (unsigned char *)argp, ID_TABLE_SZ)) + return -EFAULT; + + spin_lock_irqsave(&ctxt->req_lock, flags); + memcpy(ctxt->id_table, temp_id_table, ID_TABLE_SZ); + spin_unlock_irqrestore(&ctxt->req_lock, flags); + + return 0; +} + +static ssize_t diag_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct diag_context *ctxt = &_context; + struct usb_request *req = 0; + int ret = 0; + + mutex_lock(&ctxt->user_lock); + + if (ctxt->user_read_len && ctxt->user_readp) { + if (count > ctxt->user_read_len) + count = ctxt->user_read_len; + if (copy_to_user(buf, ctxt->user_readp, count)) + ret = -EFAULT; + else { + ctxt->user_readp += count; + ctxt->user_read_len -= count; + ret = count; + } + goto end; + } + + mutex_unlock(&ctxt->user_lock); + ret = wait_event_interruptible(ctxt->read_wq, + (req = req_get(ctxt, &ctxt->rx_req_user)) || !ctxt->online); + mutex_lock(&ctxt->user_lock); + if (ret < 0) { + pr_err("%s: wait_event_interruptible error %d\n", + __func__, ret); + goto end; + } + if (!ctxt->online) { + pr_err("%s: offline\n", __func__); + ret = -EIO; + goto end; + } + if (req) { + if (req->actual == 0) { + pr_info("%s: no data\n", __func__); + goto end; + } + if (count > req->actual) + count = req->actual; + if (copy_to_user(buf, req->buf, count)) { + ret = -EFAULT; + goto end; + } + req->actual -= count; + if (req->actual) { + memcpy(ctxt->user_read_buf, req->buf + count, req->actual); + ctxt->user_read_len = req->actual; + ctxt->user_readp = ctxt->user_read_buf; + } + ret = count; + } + +end: + if (req) + req_put(ctxt, &ctxt->rx_req_idle, req); + + mutex_unlock(&ctxt->user_lock); + return ret; +} + +static ssize_t diag_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct diag_context *ctxt = &_context; + struct usb_request *req = 0; + int ret = 0; + + ret = wait_event_interruptible(ctxt->write_wq, + ((req = req_get(ctxt, &ctxt->tx_req_idle)) || !ctxt->online)); + + mutex_lock(&ctxt->user_lock); + if (ret < 0) { + pr_err("%s: wait_event_interruptible error %d\n", + __func__, ret); + goto end; + } + + if (!ctxt->online) { + pr_err("%s: offline\n", __func__); + ret = -EIO; + goto end; + } + + if (count > TX_REQ_BUF_SZ) + count = TX_REQ_BUF_SZ; + + if (req) { + if (copy_from_user(req->buf, buf, count)) { + ret = -EFAULT; + goto end; + } + + req->length = count; + ret = usb_ep_queue(ctxt->in, req, GFP_ATOMIC); + if (ret < 0) { + pr_err("%s: usb_ep_queue error %d\n", __func__, ret); + goto end; + } + + ret = req->length; + } + +end: + if (req) + req_put(ctxt, &ctxt->tx_req_idle, req); + + mutex_unlock(&ctxt->user_lock); + return ret; +} + +static int diag_open(struct inode *ip, struct file *fp) +{ + struct diag_context *ctxt = &_context; + int rc = 0; + + mutex_lock(&ctxt->user_lock); + if (ctxt->opened) { + pr_err("%s: already opened\n", __func__); + rc = -EBUSY; + goto done; + } + ctxt->user_read_len = 0; + ctxt->user_readp = 0; + if (!ctxt->user_read_buf) { + ctxt->user_read_buf = kmalloc(RX_REQ_BUF_SZ, GFP_KERNEL); + if (!ctxt->user_read_buf) { + rc = -ENOMEM; + goto done; + } + } + ctxt->opened = true; + +done: + mutex_unlock(&ctxt->user_lock); + return rc; +} + +static int diag_release(struct inode *ip, struct file *fp) +{ + struct diag_context *ctxt = &_context; + + mutex_lock(&ctxt->user_lock); + ctxt->opened = false; + ctxt->user_read_len = 0; + ctxt->user_readp = 0; + if (ctxt->user_read_buf) { + kfree(ctxt->user_read_buf); + ctxt->user_read_buf = 0; + } + mutex_unlock(&ctxt->user_lock); + + return 0; +} + +static struct file_operations diag_fops = { + .owner = THIS_MODULE, + .read = diag_read, + .write = diag_write, + .open = diag_open, + .release = diag_release, + .unlocked_ioctl = diag_ioctl, +}; + +static struct miscdevice diag_device_fops = { + .minor = MISC_DYNAMIC_MINOR, + .name = "diag", + .fops = &diag_fops, +}; +#endif + +static void diag_in_complete(struct usb_ep *ept, struct usb_request *req) +{ + struct diag_context *ctxt = req->context; +#if ROUTE_TO_USERSPACE + char c; +#endif + + ctxt->in_busy = 0; + req_put(ctxt, &ctxt->tx_req_idle, req); + +#if ROUTE_TO_USERSPACE + c = *((char *)req->buf + req->actual - 1); + if (c == 0x7e) + wake_up(&ctxt->write_wq); +#endif + + smd_try_to_send(ctxt); +} + +#ifdef CONFIG_ARCH_QSD8X50 +static void diag_dsp_in_complete(struct usb_ep *ept, struct usb_request *req) +{ + struct diag_context *ctxt = req->context; + + ctxt->in_busy_dsp = 0; + req_put(ctxt, &ctxt->tx_req_idle, req); + dsp_try_to_send(ctxt); + wake_up(&ctxt->write_wq); +} +#endif + +static void diag_process_hdlc(struct diag_context *ctxt, void *_data, unsigned len) +{ + unsigned char *data = _data; + unsigned count = ctxt->hdlc_count; + unsigned escape = ctxt->hdlc_escape; + unsigned char *hdlc = ctxt->hdlc_buf; + + while (len-- > 0) { + unsigned char x = *data++; + if (x == 0x7E) { + if (count > 2) { + /* we're just ignoring the crc here */ + TRACE("PC>", hdlc, count - 2, 0); + if (ctxt->ch) + smd_write(ctxt->ch, hdlc, count - 2); + } + count = 0; + escape = 0; + } else if (x == 0x7D) { + escape = 1; + } else { + if (escape) { + x = x ^ 0x20; + escape = 0; + } + hdlc[count++] = x; + + /* discard frame if we overflow */ + if (count == HDLC_MAX) + count = 0; + } + } + + ctxt->hdlc_count = count; + ctxt->hdlc_escape = escape; +} + +#if ROUTE_TO_USERSPACE +static int if_route_to_userspace(struct diag_context *ctxt, unsigned int cmd_id) +{ + unsigned long flags; + int i; + + if (!ctxt->opened || cmd_id == 0) + return 0; + + /* command ids 0xfb..0xff are not used by msm diag; we steal these ids + * for communication between userspace tool and host test tool. + */ + if (cmd_id >= 0xfb && cmd_id <= 0xff) + return 1; + + spin_lock_irqsave(&ctxt->req_lock, flags); + for (i = 0; i < ARRAY_SIZE(ctxt->id_table); i++) + if (ctxt->id_table[i] == cmd_id) { + /* if the command id equals to any of registered ids, + * route to userspace to handle. + */ + spin_unlock_irqrestore(&ctxt->req_lock, flags); + return 1; + } + spin_unlock_irqrestore(&ctxt->req_lock, flags); + + return 0; +} +#endif + +static void diag_out_complete(struct usb_ep *ept, struct usb_request *req) +{ + struct diag_context *ctxt = req->context; + + if (req->status == 0) { +#if ROUTE_TO_USERSPACE + unsigned int cmd_id = *((unsigned char *)req->buf); + if (if_route_to_userspace(ctxt, cmd_id)) { + req_put(ctxt, &ctxt->rx_req_user, req); + wake_up(&ctxt->read_wq); + diag_queue_out(ctxt); + return; + } +#endif + +#if NO_HDLC + TRACE("PC>", req->buf, req->actual, 0); + if (ctxt->ch) + smd_write(ctxt->ch, req->buf, req->actual); +#else + diag_process_hdlc(ctxt, req->buf, req->actual); +#endif + ctxt->tx_count += req->actual; + } + + req_put(ctxt, &ctxt->rx_req_idle, req); + diag_queue_out(ctxt); +} + +static void diag_queue_out(struct diag_context *ctxt) +{ + struct usb_request *req; + int rc; + + req = req_get(ctxt, &ctxt->rx_req_idle); + if (!req) { + pr_err("%s: rx req queue - out of buffer\n", __func__); + return; + } + + req->complete = diag_out_complete; + req->context = ctxt; + req->length = RX_REQ_BUF_SZ; + + rc = usb_ep_queue(ctxt->out, req, GFP_ATOMIC); + if (rc < 0) { + pr_err("%s: usb_ep_queue failed: %d\n", __func__, rc); + req_put(ctxt, &ctxt->rx_req_idle, req); + } +} + +static void smd_try_to_send(struct diag_context *ctxt) +{ +again: + if (ctxt->ch && (!ctxt->in_busy)) { + int r = smd_read_avail(ctxt->ch); + + if (r > TX_REQ_BUF_SZ) { + return; + } + if (r > 0) { + struct usb_request *req; + req = req_get(ctxt, &ctxt->tx_req_idle); + if (!req) { + pr_err("%s: tx req queue is out of buffers\n", + __func__); + return; + } + smd_read(ctxt->ch, req->buf, r); + ctxt->rx_count += r; + + if (!ctxt->online) { +// printk("$$$ discard %d\n", r); + req_put(ctxt, &ctxt->tx_req_idle, req); + goto again; + } + req->complete = diag_in_complete; + req->context = ctxt; + req->length = r; + + TRACE("A9>", req->buf, r, 1); + ctxt->in_busy = 1; + r = usb_ep_queue(ctxt->in, req, GFP_ATOMIC); + if (r < 0) { + pr_err("%s: usb_ep_queue failed: %d\n", + __func__, r); + req_put(ctxt, &ctxt->tx_req_idle, req); + ctxt->in_busy = 0; + } + } + } +} + +static void smd_diag_notify(void *priv, unsigned event) +{ + struct diag_context *ctxt = priv; + smd_try_to_send(ctxt); +} + +#ifdef CONFIG_ARCH_QSD8X50 +static void dsp_try_to_send(struct diag_context *ctxt) +{ +again: + if (ctxt->ch_dsp && (!ctxt->in_busy_dsp)) { + int r = smd_read_avail(ctxt->ch_dsp); + + if (r > TX_REQ_BUF_SZ) { + return; + } + if (r > 0) { + struct usb_request *req; + req = req_get(ctxt, &ctxt->tx_req_idle); + if (!req) { + pr_err("%s: tx req queue is out of buffers\n", + __func__); + return; + } + smd_read(ctxt->ch_dsp, req->buf, r); + + if (!ctxt->online) { +// printk("$$$ discard %d\n", r); + req_put(ctxt, &ctxt->tx_req_idle, req); + goto again; + } + req->complete = diag_dsp_in_complete; + req->context = ctxt; + req->length = r; + + TRACE("Q6>", req->buf, r, 1); + ctxt->in_busy_dsp = 1; + r = usb_ep_queue(ctxt->in, req, GFP_ATOMIC); + if (r < 0) { + pr_err("%s: usb_ep_queue failed: %d\n", + __func__, r); + req_put(ctxt, &ctxt->tx_req_idle, req); + ctxt->in_busy_dsp = 0; + } + } + } +} + +static void dsp_diag_notify(void *priv, unsigned event) +{ + struct diag_context *ctxt = priv; + dsp_try_to_send(ctxt); +} +#endif + +static int __init create_bulk_endpoints(struct diag_context *ctxt, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc) +{ + struct usb_composite_dev *cdev = ctxt->cdev; + struct usb_ep *ep; + struct usb_request *req; + int n; + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + ctxt->in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + return -ENODEV; + } + ctxt->out = ep; + + ctxt->tx_count = ctxt->rx_count = 0; + + for (n = 0; n < RX_REQ_NUM; n++) { + req = usb_ep_alloc_request(ctxt->out, GFP_KERNEL); + if (!req) { + DBG(cdev, "%s: usb_ep_alloc_request out of memory\n", + __func__); + goto rx_fail; + } + req->buf = kmalloc(RX_REQ_BUF_SZ, GFP_KERNEL); + if (!req->buf) { + DBG(cdev, "%s: kmalloc out of memory\n", __func__); + goto rx_fail; + } + req->context = ctxt; + req->complete = diag_out_complete; + req_put(ctxt, &ctxt->rx_req_idle, req); + } + + for (n = 0; n < TX_REQ_NUM; n++) { + req = usb_ep_alloc_request(ctxt->in, GFP_KERNEL); + if (!req) { + DBG(cdev, "%s: usb_ep_alloc_request out of memory\n", + __func__); + goto tx_fail; + } + req->buf = kmalloc(TX_REQ_BUF_SZ, GFP_KERNEL); + if (!req->buf) { + DBG(cdev, "%s: kmalloc out of memory\n", __func__); + goto tx_fail; + } + req->context = ctxt; + req->complete = diag_in_complete; + req_put(ctxt, &ctxt->tx_req_idle, req); + } + + return 0; + +tx_fail: + reqs_free(ctxt, ctxt->in, &ctxt->tx_req_idle); +rx_fail: + reqs_free(ctxt, ctxt->out, &ctxt->rx_req_idle); + return -ENOMEM; +} + +static int +diag_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct diag_context *ctxt = func_to_dev(f); + int id; + int ret; + + ctxt->cdev = cdev; + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + diag_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(ctxt, &diag_fullspeed_in_desc, + &diag_fullspeed_out_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + diag_highspeed_in_desc.bEndpointAddress = + diag_fullspeed_in_desc.bEndpointAddress; + diag_highspeed_out_desc.bEndpointAddress = + diag_fullspeed_out_desc.bEndpointAddress; + } + +#if ROUTE_TO_USERSPACE + misc_register(&diag_device_fops); +#endif + + return 0; +} + +static void +diag_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct diag_context *ctxt = func_to_dev(f); + reqs_free(ctxt, ctxt->out, &ctxt->rx_req_idle); + reqs_free(ctxt, ctxt->in, &ctxt->tx_req_idle); + +#if ROUTE_TO_USERSPACE + misc_deregister(&diag_device_fops); +#endif + + ctxt->tx_count = ctxt->rx_count = 0; +} + +static int diag_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct diag_context *ctxt = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; +#if ROUTE_TO_USERSPACE + struct usb_request *req; +#endif + int ret; + + ret = usb_ep_enable(ctxt->in, + ep_choose(cdev->gadget, + &diag_highspeed_in_desc, + &diag_fullspeed_in_desc)); + if (ret) + return ret; + ret = usb_ep_enable(ctxt->out, + ep_choose(cdev->gadget, + &diag_highspeed_out_desc, + &diag_fullspeed_out_desc)); + if (ret) { + usb_ep_disable(ctxt->in); + return ret; + } + ctxt->online = 1; + +#if ROUTE_TO_USERSPACE + /* recycle unhandled rx reqs to user if any */ + while ((req = req_get(ctxt, &ctxt->rx_req_user))) + req_put(ctxt, &ctxt->rx_req_idle, req); +#endif + + diag_queue_out(ctxt); + smd_try_to_send(ctxt); +#ifdef CONFIG_ARCH_QSD8X50 + dsp_try_to_send(ctxt); +#endif + +#if ROUTE_TO_USERSPACE + wake_up(&ctxt->read_wq); + wake_up(&ctxt->write_wq); +#endif + + return 0; +} + +static void diag_function_disable(struct usb_function *f) +{ + struct diag_context *ctxt = func_to_dev(f); + + ctxt->online = 0; +#if ROUTE_TO_USERSPACE + wake_up(&ctxt->read_wq); + wake_up(&ctxt->write_wq); +#endif + usb_ep_disable(ctxt->in); + usb_ep_disable(ctxt->out); +} + +static int diag_set_enabled(const char *val, struct kernel_param *kp) +{ + int enabled = simple_strtol(val, NULL, 0); + if (_context.cdev) + android_enable_function(&_context.function, enabled); + _context.function_enable = !!enabled; + return 0; +} + +static int diag_get_tx_rx_count(char *buffer, struct kernel_param *kp) +{ + struct diag_context *ctxt = &_context; + + return sprintf(buffer, "tx: %llu bytes, rx: %llu bytes", + ctxt->tx_count, ctxt->rx_count); +} +module_param_call(tx_rx_count, NULL, diag_get_tx_rx_count, NULL, 0444); + +static int diag_get_enabled(char *buffer, struct kernel_param *kp) +{ + buffer[0] = '0' + !_context.function.disabled; + return 1; +} +module_param_call(enabled, diag_set_enabled, diag_get_enabled, NULL, 0664); + + +int diag_bind_config(struct usb_configuration *c) +{ + struct diag_context *ctxt = &_context; + int ret; + + printk(KERN_INFO "diag_bind_config\n"); + + ret = smd_open("SMD_DIAG", &ctxt->ch, ctxt, smd_diag_notify); + if (ret) + return ret; + +#ifdef CONFIG_ARCH_QSD8X50 + ret = smd_open("DSP_DIAG", &ctxt->ch_dsp, ctxt, dsp_diag_notify); + if (ret) { + pr_err("%s: smd_open failed (DSP_DIAG)\n", __func__); + return ret; + } +#endif + + ctxt->cdev = c->cdev; + ctxt->function.name = "diag"; + ctxt->function.descriptors = fs_diag_descs; + ctxt->function.hs_descriptors = hs_diag_descs; + ctxt->function.bind = diag_function_bind; + ctxt->function.unbind = diag_function_unbind; + ctxt->function.set_alt = diag_function_set_alt; + ctxt->function.disable = diag_function_disable; + + ctxt->function.disabled = !_context.function_enable; + + return usb_add_function(c, &ctxt->function); +} + +static struct android_usb_function diag_function = { + .name = "diag", + .bind_config = diag_bind_config, +}; + +static int __init init(void) +{ + struct diag_context *ctxt = &_context; + + printk(KERN_INFO "diag init\n"); + spin_lock_init(&ctxt->req_lock); + INIT_LIST_HEAD(&ctxt->rx_req_idle); + INIT_LIST_HEAD(&ctxt->tx_req_idle); +#if ROUTE_TO_USERSPACE + mutex_init(&ctxt->user_lock); + INIT_LIST_HEAD(&ctxt->rx_req_user); + init_waitqueue_head(&ctxt->read_wq); + init_waitqueue_head(&ctxt->write_wq); +#endif + + android_register_function(&diag_function); + return 0; +} +module_init(init); diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c new file mode 100644 index 0000000000000..ad3c1738336f9 --- /dev/null +++ b/drivers/usb/gadget/f_accessory.c @@ -0,0 +1,808 @@ +/* + * Gadget Function Driver for Android USB accessories + * + * Copyright (C) 2011 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#define BULK_BUFFER_SIZE 16384 +#define ACC_STRING_SIZE 256 + +#define PROTOCOL_VERSION 1 + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 + +struct acc_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + + /* set to 1 when we connect */ + int online:1; + /* Set to 1 when we disconnect. + * Not cleared until our file is closed. + */ + int disconnected:1; + + /* strings sent by the host */ + char manufacturer[ACC_STRING_SIZE]; + char model[ACC_STRING_SIZE]; + char description[ACC_STRING_SIZE]; + char version[ACC_STRING_SIZE]; + char uri[ACC_STRING_SIZE]; + char serial[ACC_STRING_SIZE]; + + /* for acc_complete_set_string */ + int string_index; + + /* synchronize access to our device file */ + atomic_t open_excl; + + struct list_head tx_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + int rx_done; + struct delayed_work work; +}; + +static struct usb_interface_descriptor acc_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_endpoint_descriptor acc_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor acc_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_fullspeed_in_desc, + (struct usb_descriptor_header *) &acc_fullspeed_out_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_highspeed_in_desc, + (struct usb_descriptor_header *) &acc_highspeed_out_desc, + NULL, +}; + +static struct usb_string acc_string_defs[] = { + [INTERFACE_STRING_INDEX].s = "Android Accessory Interface", + { }, /* end of list */ +}; + +static struct usb_gadget_strings acc_string_table = { + .language = 0x0409, /* en-US */ + .strings = acc_string_defs, +}; + +static struct usb_gadget_strings *acc_strings[] = { + &acc_string_table, + NULL, +}; + +/* temporary variable used between acc_open() and acc_gadget_bind() */ +static struct acc_dev *_acc_dev; + +static inline struct acc_dev *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct acc_dev, function); +} + +static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void acc_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +/* add a request to the tail of a list */ +static void req_put(struct acc_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void acc_set_disconnected(struct acc_dev *dev) +{ + dev->online = 0; + dev->disconnected = 1; +} + +static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + if (req->status != 0) + acc_set_disconnected(dev); + + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + dev->rx_done = 1; + if (req->status != 0) + acc_set_disconnected(dev); + + wake_up(&dev->read_wq); +} + +static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = ep->driver_data; + struct usb_composite_dev *cdev = dev->cdev; + char *string_dest = NULL; + int length = req->actual; + + if (req->status != 0) { + DBG(cdev, "acc_complete_set_string, err %d\n", req->status); + return; + } + + switch (dev->string_index) { + case ACCESSORY_STRING_MANUFACTURER: + string_dest = dev->manufacturer; + break; + case ACCESSORY_STRING_MODEL: + string_dest = dev->model; + break; + case ACCESSORY_STRING_DESCRIPTION: + string_dest = dev->description; + break; + case ACCESSORY_STRING_VERSION: + string_dest = dev->version; + break; + case ACCESSORY_STRING_URI: + string_dest = dev->uri; + break; + case ACCESSORY_STRING_SERIAL: + string_dest = dev->serial; + break; + } + if (string_dest) { + unsigned long flags; + + if (length >= ACC_STRING_SIZE) + length = ACC_STRING_SIZE - 1; + + spin_lock_irqsave(&dev->lock, flags); + memcpy(string_dest, cdev->req->buf, length); + /* ensure zero termination */ + string_dest[length] = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } else { + DBG(cdev, "unknown accessory string index %d\n", + dev->string_index); + } +} + +static int __init create_bulk_endpoints(struct acc_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_in; + req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_out; + dev->rx_req[i] = req; + } + + return 0; + +fail: + printk(KERN_ERR "acc_bind() could not allocate requests\n"); + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + return -1; +} + +static ssize_t acc_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int r = count, xfer; + int ret = 0; + + DBG(cdev, "acc_read(%d)\n", count); + + if (dev->disconnected) + return -ENODEV; + + if (count > BULK_BUFFER_SIZE) + count = BULK_BUFFER_SIZE; + + /* we will block until we're online */ + DBG(cdev, "acc_read: waiting for online\n"); + ret = wait_event_interruptible(dev->read_wq, dev->online); + if (ret < 0) { + r = ret; + goto done; + } + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = count; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + DBG(cdev, "rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + usb_ep_dequeue(dev->ep_out, req); + goto done; + } + if (dev->online) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + DBG(cdev, "rx %p %d\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + DBG(cdev, "acc_read returning %d\n", r); + return r; +} + +static ssize_t acc_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + int r = count, xfer; + int ret; + + DBG(cdev, "acc_write(%d)\n", count); + + if (!dev->online || dev->disconnected) + return -ENODEV; + + while (count > 0) { + if (!dev->online) { + DBG(cdev, "acc_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = req_get(dev, &dev->tx_idle)) || !dev->online)); + if (!req) { + r = ret; + break; + } + + if (count > BULK_BUFFER_SIZE) + xfer = BULK_BUFFER_SIZE; + else + xfer = count; + if (copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "acc_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + DBG(cdev, "acc_write returning %d\n", r); + return r; +} + +static long acc_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct acc_dev *dev = fp->private_data; + char *src = NULL; + int ret; + + if (dev->function.disabled) + return -ENODEV; + + switch (code) { + case ACCESSORY_GET_STRING_MANUFACTURER: + src = dev->manufacturer; + break; + case ACCESSORY_GET_STRING_MODEL: + src = dev->model; + break; + case ACCESSORY_GET_STRING_DESCRIPTION: + src = dev->description; + break; + case ACCESSORY_GET_STRING_VERSION: + src = dev->version; + break; + case ACCESSORY_GET_STRING_URI: + src = dev->uri; + break; + case ACCESSORY_GET_STRING_SERIAL: + src = dev->serial; + break; + } + if (!src) + return -EINVAL; + + ret = strlen(src) + 1; + if (copy_to_user((void __user *)value, src, ret)) + ret = -EFAULT; + return ret; +} + +static int acc_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_open\n"); + if (atomic_xchg(&_acc_dev->open_excl, 1)) + return -EBUSY; + + _acc_dev->disconnected = 0; + fp->private_data = _acc_dev; + return 0; +} + +static int acc_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_release\n"); + + WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); + _acc_dev->disconnected = 0; + return 0; +} + +/* file operations for /dev/acc_usb */ +static const struct file_operations acc_fops = { + .owner = THIS_MODULE, + .read = acc_read, + .write = acc_write, + .unlocked_ioctl = acc_ioctl, + .open = acc_open, + .release = acc_release, +}; + +static struct miscdevice acc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "usb_accessory", + .fops = &acc_fops, +}; + +static int +acc_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct acc_dev *dev = func_to_dev(f); + int id; + int ret; + + dev->cdev = cdev; + DBG(cdev, "acc_function_bind dev: %p\n", dev); + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + acc_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc, + &acc_fullspeed_out_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + acc_highspeed_in_desc.bEndpointAddress = + acc_fullspeed_in_desc.bEndpointAddress; + acc_highspeed_out_desc.bEndpointAddress = + acc_fullspeed_out_desc.bEndpointAddress; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static void +acc_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_request *req; + int i; + + spin_lock_irq(&dev->lock); + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + dev->online = 0; + spin_unlock_irq(&dev->lock); + + misc_deregister(&acc_device); + kfree(_acc_dev); + _acc_dev = NULL; +} + +static void acc_work(struct work_struct *data) +{ + struct delayed_work *delayed = to_delayed_work(data); + struct acc_dev *dev = + container_of(delayed, struct acc_dev, work); + android_enable_function(&dev->function, 1); +} + +static int acc_function_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + int value = -EOPNOTSUPP; + u8 b_requestType = ctrl->bRequestType; + u8 b_request = ctrl->bRequest; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + +/* + printk(KERN_INFO "acc_function_setup " + "%02x.%02x v%04x i%04x l%u\n", + b_requestType, b_request, + w_value, w_index, w_length); +*/ + + if (dev->function.disabled) { + if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_START) { + schedule_delayed_work( + &dev->work, msecs_to_jiffies(10)); + value = 0; + } else if (b_request == ACCESSORY_SEND_STRING) { + dev->string_index = w_index; + cdev->gadget->ep0->driver_data = dev; + cdev->req->complete = acc_complete_set_string; + value = w_length; + } + } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_GET_PROTOCOL) { + *((u16 *)cdev->req->buf) = PROTOCOL_VERSION; + value = sizeof(u16); + + /* clear any strings left over from a previous session */ + memset(dev->manufacturer, 0, sizeof(dev->manufacturer)); + memset(dev->model, 0, sizeof(dev->model)); + memset(dev->description, 0, sizeof(dev->description)); + memset(dev->version, 0, sizeof(dev->version)); + memset(dev->uri, 0, sizeof(dev->uri)); + memset(dev->serial, 0, sizeof(dev->serial)); + } + } + } + + if (value >= 0) { + cdev->req->zero = 0; + cdev->req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "%s setup response queue error\n", + __func__); + } + + if (value == -EOPNOTSUPP) + VDBG(cdev, + "unknown class-specific control req " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + return value; +} + +static int acc_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt); + ret = usb_ep_enable(dev->ep_in, + ep_choose(cdev->gadget, + &acc_highspeed_in_desc, + &acc_fullspeed_in_desc)); + if (ret) + return ret; + ret = usb_ep_enable(dev->ep_out, + ep_choose(cdev->gadget, + &acc_highspeed_out_desc, + &acc_fullspeed_out_desc)); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + if (!dev->function.disabled) + dev->online = 1; + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void acc_function_disable(struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "acc_function_disable\n"); + acc_set_disconnected(dev); + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int acc_bind_config(struct usb_configuration *c) +{ + struct acc_dev *dev; + int ret; + + printk(KERN_INFO "acc_bind_config\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + /* allocate a string ID for our interface */ + if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + acc_string_defs[INTERFACE_STRING_INDEX].id = ret; + acc_interface_desc.iInterface = ret; + } + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + atomic_set(&dev->open_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + INIT_DELAYED_WORK(&dev->work, acc_work); + + dev->cdev = c->cdev; + dev->function.name = "accessory"; + dev->function.strings = acc_strings, + dev->function.descriptors = fs_acc_descs; + dev->function.hs_descriptors = hs_acc_descs; + dev->function.bind = acc_function_bind; + dev->function.unbind = acc_function_unbind; + dev->function.setup = acc_function_setup; + dev->function.set_alt = acc_function_set_alt; + dev->function.disable = acc_function_disable; + dev->function.disabled = 1; + + /* _acc_dev must be set before calling usb_gadget_register_driver */ + _acc_dev = dev; + + ret = misc_register(&acc_device); + if (ret) + goto err1; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto err2; + + return 0; + +err2: + misc_deregister(&acc_device); +err1: + kfree(dev); + printk(KERN_ERR "USB accessory gadget driver failed to initialize\n"); + return ret; +} + +static struct android_usb_function acc_function = { + .name = "accessory", + .bind_config = acc_bind_config, +}; + +static int __init init(void) +{ + printk(KERN_INFO "f_accessory init\n"); + android_register_function(&acc_function); + return 0; +} +module_init(init); diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c index 15a5387dd3478..105399f917714 100644 --- a/drivers/usb/gadget/f_adb.c +++ b/drivers/usb/gadget/f_adb.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index f7489fc4ffb01..5de5494a184fb 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -318,7 +318,10 @@ static const char fsg_string_interface[] = "Mass Storage"; #include "storage_common.c" - +#ifdef CONFIG_USB_CSW_HACK +static int write_error_after_csw_sent; +static int csw_hack_sent; +#endif /*-------------------------------------------------------------------------*/ struct fsg_dev; @@ -478,6 +481,7 @@ static inline struct fsg_dev *fsg_from_func(struct usb_function *f) } typedef void (*fsg_routine_t)(struct fsg_dev *); +static int send_status(struct fsg_common *common); static int exception_in_progress(struct fsg_common *common) { @@ -885,6 +889,9 @@ static int do_write(struct fsg_common *common) ssize_t nwritten; int rc; +#ifdef CONFIG_USB_CSW_HACK + int i; +#endif if (curlun->ro) { curlun->sense_data = SS_WRITE_PROTECTED; return -EINVAL; @@ -998,7 +1005,17 @@ static int do_write(struct fsg_common *common) bh = common->next_buffhd_to_drain; if (bh->state == BUF_STATE_EMPTY && !get_some_more) break; /* We stopped early */ +#ifdef CONFIG_USB_CSW_HACK + /* + * If the csw packet is already submmitted to the hardware, + * by marking the state of buffer as full, then by checking + * the residue, we make sure that this csw packet is not + * written on to the storage media. + */ + if (bh->state == BUF_STATE_FULL && common->residue) { +#else if (bh->state == BUF_STATE_FULL) { +#endif smp_rmb(); common->next_buffhd_to_drain = bh->next; bh->state = BUF_STATE_EMPTY; @@ -1049,9 +1066,36 @@ static int do_write(struct fsg_common *common) curlun->sense_data = SS_WRITE_ERROR; curlun->sense_data_info = file_offset >> 9; curlun->info_valid = 1; +#ifdef CONFIG_USB_CSW_HACK + write_error_after_csw_sent = 1; + goto write_error; +#endif break; } +#ifdef CONFIG_USB_CSW_HACK +write_error: + if ((nwritten == amount) && !csw_hack_sent) { + if (write_error_after_csw_sent) + break; + /* + * Check if any of the buffer is in the + * busy state, if any buffer is in busy state, + * means the complete data is not received + * yet from the host. So there is no point in + * csw right away without the complete data. + */ + for (i = 0; i < FSG_NUM_BUFFERS; i++) { + if (common->buffhds[i].state == + BUF_STATE_BUSY) + break; + } + if (!amount_left_to_req && i == FSG_NUM_BUFFERS) { + csw_hack_sent = 1; + send_status(common); + } + } +#endif /* Did the host decide to stop early? */ if (bh->outreq->actual != bh->outreq->length) { common->short_packet_received = 1; @@ -1400,7 +1444,7 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) memset(buf+2, 0, 10); /* None of the fields are changeable */ if (!changeable_values) { - buf[2] = 0x04; /* Write cache enable, */ + buf[2] = 0x00; /* Write cache disable, */ /* Read cache not disabled */ /* No cache retention priorities */ put_unaligned_be16(0xffff, &buf[4]); @@ -1825,7 +1869,19 @@ static int send_status(struct fsg_common *common) csw->Signature = cpu_to_le32(USB_BULK_CS_SIG); csw->Tag = common->tag; +#ifdef CONFIG_USB_CSW_HACK + /* Since csw is being sent early, before + * writing on to storage media, need to set + * residue to zero,assuming that write will succeed. + */ + if (write_error_after_csw_sent) { + write_error_after_csw_sent = 0; + csw->Residue = cpu_to_le32(common->residue); + } else + csw->Residue = 0; +#else csw->Residue = cpu_to_le32(common->residue); +#endif csw->Status = status; bh->inreq->length = USB_BULK_CS_WRAP_LEN; @@ -2688,6 +2744,16 @@ static int fsg_main_thread(void *common_) common->state = FSG_STATE_STATUS_PHASE; spin_unlock_irq(&common->lock); +#ifdef CONFIG_USB_CSW_HACK + /* Since status is already sent for write scsi command, + * need to skip sending status once again if it is a + * write scsi command. + */ + if (csw_hack_sent) { + csw_hack_sent = 0; + continue; + } +#endif if (send_status(common)) continue; @@ -3033,6 +3099,17 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f) fsg_intf_desc.bInterfaceNumber = i; fsg->interface_number = i; +#ifdef CONFIG_USB_ANDROID_MASS_STORAGE + /* HACK!! Android doesn't rebind on new configurations, instead it + * separates functionality in different products. Thus config_buf() + * in composite.c is set to rewrite bInterfaceNumber to match the + * actual function configuration of the active product. Since that + * number is checked in fsg_setup, we need to know it. So we cheat, + * knowing that UMS is the first function in all of our "products". + */ + fsg->interface_number = 0; +#endif + /* Find all the endpoints we will use */ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); if (!ep) diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c index d560fbcf4f58b..8128b203e76f4 100644 --- a/drivers/usb/gadget/f_mtp.c +++ b/drivers/usb/gadget/f_mtp.c @@ -92,7 +92,6 @@ struct mtp_dev { wait_queue_head_t read_wq; wait_queue_head_t write_wq; - wait_queue_head_t intr_wq; struct usb_request *rx_req[RX_REQ_MAX]; struct usb_request *intr_req; int rx_done; @@ -373,12 +372,11 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; - DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n", req->status, req->actual); + DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n", + req->status, req->actual); dev->intr_busy = 0; if (req->status != 0) dev->state = STATE_ERROR; - - wake_up(&dev->intr_wq); } static int __init create_bulk_endpoints(struct mtp_dev *dev, @@ -798,13 +796,15 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) if (length < 0 || length > INTR_BUFFER_SIZE) return -EINVAL; - - /* wait for a request to complete */ - ret = wait_event_interruptible(dev->intr_wq, !dev->intr_busy || dev->state == STATE_OFFLINE); - if (ret < 0) - return ret; if (dev->state == STATE_OFFLINE) return -ENODEV; + /* unfortunately an interrupt request might hang indefinitely if the host + * is not listening on the interrupt endpoint, so instead of waiting, + * we just fail if the endpoint is busy. + */ + if (dev->intr_busy) + return -EBUSY; + req = dev->intr_req; if (copy_from_user(req->buf, (void __user *)event->data, length)) return -EFAULT; @@ -1016,7 +1016,6 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) mtp_request_free(dev->intr_req, dev->ep_intr); dev->state = STATE_OFFLINE; spin_unlock_irq(&dev->lock); - wake_up(&dev->intr_wq); misc_deregister(&mtp_device); kfree(_mtp_dev); @@ -1180,7 +1179,6 @@ static void mtp_function_disable(struct usb_function *f) /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); - wake_up(&dev->intr_wq); VDBG(cdev, "%s disabled\n", dev->function.name); } @@ -1208,7 +1206,6 @@ static int mtp_bind_config(struct usb_configuration *c) spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); - init_waitqueue_head(&dev->intr_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); INIT_LIST_HEAD(&dev->tx_idle); diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index af60922d6713b..64faf659cafaa 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -439,8 +439,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: - if (w_length > req->length || w_value - || w_index != rndis->ctrl_id) + if (w_value || w_index != rndis->ctrl_id) goto invalid; /* read the request; process it later */ value = w_length; diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 5c2720d64ffa4..4d1412f259371 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -126,6 +126,12 @@ #define gadget_is_ci13xxx_pci(g) 0 #endif +#ifdef CONFIG_USB_GADGET_MSM_72K +#define gadget_is_msm72k(g) !strcmp("msm72k_udc", (g)->name) +#else +#define gadget_is_msm72k(g) 0 +#endif + // CONFIG_USB_GADGET_SX2 // CONFIG_USB_GADGET_AU1X00 // ... @@ -215,6 +221,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x27; else if (gadget_is_ci13xxx_msm(gadget)) return 0x28; + else if (gadget_is_msm72k(gadget)) + return 0x29; return -ENOENT; } diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c new file mode 100644 index 0000000000000..9740bd8cd6f4e --- /dev/null +++ b/drivers/usb/gadget/msm72k_udc.c @@ -0,0 +1,1938 @@ +/* + * Driver for HighSpeed USB Client Controller in MSM7K + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +static const char driver_name[] = "msm72k_udc"; + +/* #define DEBUG */ +/* #define VERBOSE */ + +#define MSM_USB_BASE ((unsigned) ui->addr) + +#define DRIVER_DESC "MSM 72K USB Peripheral Controller" + +#define EPT_FLAG_IN 0x0001 + +#define SETUP_BUF_SIZE 4096 + +typedef void (*completion_func)(struct usb_ep *ep, struct usb_request *req); + +static const char *const ep_name[] = { + "ep0out", "ep1out", "ep2out", "ep3out", + "ep4out", "ep5out", "ep6out", "ep7out", + "ep8out", "ep9out", "ep10out", "ep11out", + "ep12out", "ep13out", "ep14out", "ep15out", + "ep0in", "ep1in", "ep2in", "ep3in", + "ep4in", "ep5in", "ep6in", "ep7in", + "ep8in", "ep9in", "ep10in", "ep11in", + "ep12in", "ep13in", "ep14in", "ep15in" +}; + +/* current state of VBUS */ +static int vbus; + +struct msm_request { + struct usb_request req; + + /* saved copy of req.complete */ + completion_func gadget_complete; + + struct usb_info *ui; + struct msm_request *next; + + unsigned busy:1; + unsigned live:1; + unsigned alloced:1; + + dma_addr_t dma; + dma_addr_t item_dma; + + struct ept_queue_item *item; +}; + +#define to_msm_request(r) container_of(r, struct msm_request, req) +#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep) + +struct msm_endpoint { + struct usb_ep ep; + struct usb_info *ui; + struct msm_request *req; /* head of pending requests */ + struct msm_request *last; + unsigned flags; + + /* bit number (0-31) in various status registers + ** as well as the index into the usb_info's array + ** of all endpoints + */ + unsigned char bit; + unsigned char num; + + /* pointers to DMA transfer list area */ + /* these are allocated from the usb_info dma space */ + struct ept_queue_head *head; +}; + +static void usb_do_work(struct work_struct *w); + + +#define USB_STATE_IDLE 0 +#define USB_STATE_ONLINE 1 +#define USB_STATE_OFFLINE 2 + +#define USB_FLAG_START 0x0001 +#define USB_FLAG_VBUS_ONLINE 0x0002 +#define USB_FLAG_VBUS_OFFLINE 0x0004 +#define USB_FLAG_RESET 0x0008 + +struct usb_info { + /* lock for register/queue/device state changes */ + spinlock_t lock; + + /* single request used for handling setup transactions */ + struct usb_request *setup_req; + + struct platform_device *pdev; + int irq; + void *addr; + + unsigned state; + unsigned flags; + + unsigned online:1; + unsigned running:1; + + struct dma_pool *pool; + + /* dma page to back the queue heads and items */ + unsigned char *buf; + dma_addr_t dma; + + struct ept_queue_head *head; + + /* used for allocation */ + unsigned next_item; + unsigned next_ifc_num; + + /* endpoints are ordered based on their status bits, + ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15 + */ + struct msm_endpoint ept[32]; + + int *phy_init_seq; + void (*phy_reset)(void); + void (*hw_reset)(bool en); + + /* for notification when USB is connected or disconnected */ + void (*usb_connected)(int); + + struct work_struct work; + unsigned phy_status; + unsigned phy_fail_count; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + +#define ep0out ept[0] +#define ep0in ept[16] + + struct clk *clk; + struct clk *coreclk; + struct clk *pclk; + struct clk *otgclk; + struct clk *ebi1clk; + + unsigned int ep0_dir; + u16 test_mode; + + u8 remote_wakeup; +}; + +static const struct usb_ep_ops msm72k_ep_ops; + + +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active); +static int msm72k_set_halt(struct usb_ep *_ep, int value); +static void flush_endpoint(struct msm_endpoint *ept); + +static int usb_ep_get_stall(struct msm_endpoint *ept) +{ + unsigned int n; + struct usb_info *ui = ept->ui; + + n = readl(USB_ENDPTCTRL(ept->num)); + if (ept->flags & EPT_FLAG_IN) + return (CTRL_TXS & n) ? 1 : 0; + else + return (CTRL_RXS & n) ? 1 : 0; +} + +static unsigned ulpi_read(struct usb_info *ui, unsigned reg) +{ + unsigned timeout = 100000; + + /* initiate read operation */ + writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; + + if (timeout == 0) { + ERROR("ulpi_read: timeout %08x\n", readl(USB_ULPI_VIEWPORT)); + return 0xffffffff; + } + return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); +} + +static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg) +{ + unsigned timeout = 10000; + + /* initiate write operation */ + writel(ULPI_RUN | ULPI_WRITE | + ULPI_ADDR(reg) | ULPI_DATA(val), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; + + if (timeout == 0) { + printk(KERN_ERR "ulpi_write: timeout\n"); + return -1; + } + + return 0; +} + +static void ulpi_init(struct usb_info *ui) +{ + int *seq = ui->phy_init_seq; + + if (!seq) + return; + + while (seq[0] >= 0) { +// INFO("ulpi: write 0x%02x to 0x%02x\n", seq[0], seq[1]); + ulpi_write(ui, seq[0], seq[1]); + seq += 2; + } +} + +static void init_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + + ept->ui = ui; + ept->bit = n; + ept->num = n & 15; + ept->ep.name = ep_name[n]; + ept->ep.ops = &msm72k_ep_ops; + + if (ept->bit > 15) { + /* IN endpoint */ + ept->head = ui->head + (ept->num << 1) + 1; + ept->flags = EPT_FLAG_IN; + } else { + /* OUT endpoint */ + ept->head = ui->head + (ept->num << 1); + ept->flags = 0; + } + + } +} + +static void config_ept(struct msm_endpoint *ept) +{ + unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT; + + if (ept->bit == 0) + /* ep0 out needs interrupt-on-setup */ + cfg |= CONFIG_IOS; + + ept->head->config = cfg; + ept->head->next = TERMINATE; + +#if 0 + if (ept->ep.maxpacket) + INFO("ept #%d %s max:%d head:%p bit:%d\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); +#endif +} + +static void configure_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) + config_ept(ui->ept + n); +} + +struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept, + unsigned bufsize, gfp_t gfp_flags) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + goto fail1; + + req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma); + if (!req->item) + goto fail2; + + if (bufsize) { + req->req.buf = kmalloc(bufsize, gfp_flags); + if (!req->req.buf) + goto fail3; + req->alloced = 1; + } + + return &req->req; + +fail3: + dma_pool_free(ui->pool, req->item, req->item_dma); +fail2: + kfree(req); +fail1: + return 0; +} + +static void usb_ept_enable(struct msm_endpoint *ept, int yes, + unsigned char ep_type) +{ + struct usb_info *ui = ept->ui; + int in = ept->flags & EPT_FLAG_IN; + unsigned n; + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + n = (n & (~CTRL_TXT_MASK)); + if (yes) { + n |= CTRL_TXE | CTRL_TXR; + } else { + n &= (~CTRL_TXE); + } + if (yes) { + switch (ep_type) { + case USB_ENDPOINT_XFER_BULK: + n |= CTRL_TXT_BULK; + break; + case USB_ENDPOINT_XFER_INT: + n |= CTRL_TXT_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + n |= CTRL_TXT_ISOCH; + break; + default: + pr_err("%s: unsupported ep_type %d for %s\n", + __func__, ep_type, ept->ep.name); + break; + } + } + } else { + n = (n & (~CTRL_RXT_MASK)); + if (yes) { + n |= CTRL_RXE | CTRL_RXR; + } else { + n &= ~(CTRL_RXE); + } + if (yes) { + switch (ep_type) { + case USB_ENDPOINT_XFER_BULK: + n |= CTRL_RXT_BULK; + break; + case USB_ENDPOINT_XFER_INT: + n |= CTRL_RXT_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + n |= CTRL_RXT_ISOCH; + break; + default: + pr_err("%s: unsupported ep_type %d for %s\n", + __func__, ep_type, ept->ep.name); + break; + } + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + +#if 0 + INFO("ept %d %s %s\n", + ept->num, in ? "in" : "out", yes ? "enabled" : "disabled"); +#endif +} + +static void usb_ept_start(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req = ept->req; + + BUG_ON(req->live); + + /* link the hw queue head to the request's transaction item */ + ept->head->next = req->item_dma; + ept->head->info = 0; + + /* start the endpoint */ + writel(1 << ept->bit, USB_ENDPTPRIME); + + /* mark this chain of requests as live */ + while (req) { + req->live = 1; + req = req->next; + } +} + +int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req) +{ + unsigned long flags; + struct msm_request *req = to_msm_request(_req); + struct msm_request *last; + struct usb_info *ui = ept->ui; + struct ept_queue_item *item = req->item; + unsigned length = req->req.length; + + if (length > 0x4000) + return -EMSGSIZE; + + spin_lock_irqsave(&ui->lock, flags); + + if (req->busy) { + req->req.status = -EBUSY; + spin_unlock_irqrestore(&ui->lock, flags); + INFO("usb_ept_queue_xfer() tried to queue busy request\n"); + return -EBUSY; + } + + if (!ui->online && (ept->num != 0)) { + req->req.status = -ESHUTDOWN; + spin_unlock_irqrestore(&ui->lock, flags); + INFO("usb_ept_queue_xfer() called while offline\n"); + return -ESHUTDOWN; + } + + req->busy = 1; + req->live = 0; + req->next = 0; + req->req.status = -EBUSY; + + req->dma = dma_map_single(NULL, req->req.buf, length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + /* prepare the transaction descriptor item for the hardware */ + item->next = TERMINATE; + item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE; + item->page0 = req->dma; + item->page1 = (req->dma + 0x1000) & 0xfffff000; + item->page2 = (req->dma + 0x2000) & 0xfffff000; + item->page3 = (req->dma + 0x3000) & 0xfffff000; + + /* Add the new request to the end of the queue */ + last = ept->last; + if (last) { + /* Already requests in the queue. add us to the + * end, but let the completion interrupt actually + * start things going, to avoid hw issues + */ + last->next = req; + + /* only modify the hw transaction next pointer if + * that request is not live + */ + if (!last->live) + last->item->next = req->item_dma; + } else { + /* queue was empty -- kick the hardware */ + ept->req = req; + usb_ept_start(ept); + } + ept->last = req; + + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +/* --- endpoint 0 handling --- */ + +static void ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + req->complete = r->gadget_complete; + r->gadget_complete = NULL; + if (req->complete) + req->complete(&ui->ep0in.ep, req); +} + +static void ep0_queue_ack_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct msm_request *r = to_msm_request(req); + completion_func gadget_complete = r->gadget_complete; + + if (gadget_complete) { + r->gadget_complete = NULL; + gadget_complete(ep, req); + } + + /* queue up the receive of the ACK response from the host */ + if (req->status == 0) { + struct usb_info *ui = ept->ui; + req->length = 0; + req->complete = ep0_complete; + if (ui->ep0_dir == USB_DIR_IN) + usb_ept_queue_xfer(&ui->ep0out, req); + else + usb_ept_queue_xfer(&ui->ep0in, req); + } else + ep0_complete(ep, req); +} + +static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + unsigned int temp; + + if (!ui->test_mode) + return; + + switch (ui->test_mode) { + case J_TEST: + pr_info("usb electrical test mode: (J)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC); + break; + + case K_TEST: + pr_info("usb electrical test mode: (K)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC); + break; + + case SE0_NAK_TEST: + pr_info("usb electrical test mode: (SE0-NAK)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC); + break; + + case TST_PKT_TEST: + pr_info("usb electrical test mode: (TEST_PKT)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC); + break; + } +} + +static void ep0_setup_ack(struct usb_info *ui) +{ + struct usb_request *req = ui->setup_req; + req->length = 0; + req->complete = ep0_setup_ack_complete; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_setup_stall(struct usb_info *ui) +{ + writel((1<<16) | (1<<0), USB_ENDPTCTRL(0)); +} + +static void ep0_setup_send(struct usb_info *ui, unsigned length) +{ + struct usb_request *req = ui->setup_req; + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = &ui->ep0in; + + req->length = length; + req->complete = ep0_queue_ack_complete; + r->gadget_complete = NULL; + usb_ept_queue_xfer(ept, req); +} + +static void handle_setup(struct usb_info *ui) +{ + struct usb_ctrlrequest ctl; + struct usb_request *req = ui->setup_req; + int ret; + + memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl)); + writel(EPT_RX(0), USB_ENDPTSETUPSTAT); + + if (ctl.bRequestType & USB_DIR_IN) + ui->ep0_dir = USB_DIR_IN; + else + ui->ep0_dir = USB_DIR_OUT; + + /* any pending ep0 transactions must be canceled */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + +#if 0 + INFO("setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n", + ctl.bRequestType, ctl.bRequest, ctl.wValue, + ctl.wIndex, ctl.wLength); +#endif + + if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) == + (USB_DIR_IN | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_GET_STATUS) { + if (ctl.wLength != 2) + goto stall; + switch (ctl.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_ENDPOINT: + { + struct msm_endpoint *ept; + unsigned num = + ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; + u16 temp = 0; + + if (num == 0) { + memset(req->buf, 0, 2); + break; + } + if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) + num += 16; + ept = &ui->ep0out + num; + temp = usb_ep_get_stall(ept); + temp = temp << USB_ENDPOINT_HALT; + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_DEVICE: + { + u16 temp = 0; + + temp = 1 << USB_DEVICE_SELF_POWERED; + temp |= (ui->remote_wakeup << + USB_DEVICE_REMOTE_WAKEUP); + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_INTERFACE: + memset(req->buf, 0, 2); + break; + default: + goto stall; + } + ep0_setup_send(ui, 2); + return; + } + } + if (ctl.bRequestType == + (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) { + if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) || + (ctl.bRequest == USB_REQ_SET_FEATURE)) { + if ((ctl.wValue == 0) && (ctl.wLength == 0)) { + unsigned num = ctl.wIndex & 0x0f; + + if (num != 0) { + struct msm_endpoint *ept; + + if (ctl.wIndex & 0x80) + num += 16; + ept = &ui->ep0out + num; + + if (ctl.bRequest == USB_REQ_SET_FEATURE) + msm72k_set_halt(&ept->ep, 1); + else + msm72k_set_halt(&ept->ep, 0); + } + goto ack; + } + } + } + if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) { + ui->online = !!ctl.wValue; + if (ui->online && ui->usb_connected) + ui->usb_connected(1); + } else if (ctl.bRequest == USB_REQ_SET_ADDRESS) { + /* write address delayed (will take effect + ** after the next IN txn) + */ + writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR); + goto ack; + } else if (ctl.bRequest == USB_REQ_SET_FEATURE) { + switch (ctl.wValue) { + case USB_DEVICE_TEST_MODE: + switch (ctl.wIndex) { + case J_TEST: + case K_TEST: + case SE0_NAK_TEST: + case TST_PKT_TEST: + ui->test_mode = ctl.wIndex; + goto ack; + } + goto stall; + case USB_DEVICE_REMOTE_WAKEUP: + ui->remote_wakeup = 1; + goto ack; + } + } else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) && + (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) { + ui->remote_wakeup = 0; + goto ack; + } + } + + /* delegate if we get here */ + if (ui->driver) { + ret = ui->driver->setup(&ui->gadget, &ctl); + if (ret >= 0) + return; + } + +stall: + /* stall ep0 on error */ + ep0_setup_stall(ui); + return; + +ack: + ep0_setup_ack(ui); +} + +static void handle_endpoint(struct usb_info *ui, unsigned bit) +{ + struct msm_endpoint *ept = ui->ept + bit; + struct msm_request *req; + unsigned long flags; + unsigned info; + +#if 0 + INFO("handle_endpoint() %d %s req=%p(%08x)\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->req, ept->req ? ept->req->item_dma : 0); +#endif + + /* expire all requests that are no longer active */ + spin_lock_irqsave(&ui->lock, flags); + while ((req = ept->req)) { + info = req->item->info; + + /* if we've processed all live requests, time to + * restart the hardware on the next non-live request + */ + if (!req->live) { + usb_ept_start(ept); + break; + } + + /* if the transaction is still in-flight, stop here */ + if (info & INFO_ACTIVE) + break; + + /* advance ept queue to the next request */ + ept->req = req->next; + if (ept->req == 0) + ept->last = 0; + + dma_unmap_single(NULL, req->dma, req->req.length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) { + /* XXX pass on more specific error code */ + req->req.status = -EIO; + req->req.actual = 0; + INFO("msm72k_udc: ept %d %s error. info=%08x\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + info); + } else { + req->req.status = 0; + req->req.actual = + req->req.length - ((info >> 16) & 0x7FFF); + } + req->busy = 0; + req->live = 0; + + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +#define FLUSH_WAIT_US 5 +#define FLUSH_TIMEOUT (2 * (USEC_PER_SEC / FLUSH_WAIT_US)) +static void flush_endpoint_hw(struct usb_info *ui, unsigned bits) +{ + uint32_t unflushed = 0; + uint32_t stat = 0; + int cnt = 0; + + /* flush endpoint, canceling transactions + ** - this can take a "large amount of time" (per databook) + ** - the flush can fail in some cases, thus we check STAT + ** and repeat if we're still operating + ** (does the fact that this doesn't use the tripwire matter?!) + */ + while (cnt < FLUSH_TIMEOUT) { + writel(bits, USB_ENDPTFLUSH); + while (((unflushed = readl(USB_ENDPTFLUSH)) & bits) && + cnt < FLUSH_TIMEOUT) { + cnt++; + udelay(FLUSH_WAIT_US); + } + + stat = readl(USB_ENDPTSTAT); + if (cnt >= FLUSH_TIMEOUT) + goto err; + if (!(stat & bits)) + goto done; + cnt++; + udelay(FLUSH_WAIT_US); + } + +err: + pr_warning("%s: Could not complete flush! NOT GOOD! " + "stat: %x unflushed: %x bits: %x\n", __func__, + stat, unflushed, bits); +done: + return; +} + +static void flush_endpoint_sw(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + unsigned long flags; + + /* inactive endpoints have nothing to do here */ + if (ept->ep.maxpacket == 0) + return; + + /* put the queue head in a sane state */ + ept->head->info = 0; + ept->head->next = TERMINATE; + + /* cancel any pending requests */ + spin_lock_irqsave(&ui->lock, flags); + req = ept->req; + ept->req = 0; + ept->last = 0; + while (req != 0) { + req->busy = 0; + req->live = 0; + req->req.status = -ECONNRESET; + req->req.actual = 0; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + req = req->next; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint(struct msm_endpoint *ept) +{ + flush_endpoint_hw(ept->ui, (1 << ept->bit)); + flush_endpoint_sw(ept); +} + +static void flush_all_endpoints(struct usb_info *ui) +{ + unsigned n; + + flush_endpoint_hw(ui, 0xffffffff); + + for (n = 0; n < 32; n++) + flush_endpoint_sw(ui->ept + n); +} + + +static irqreturn_t usb_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + unsigned n; + + n = readl(USB_USBSTS); + writel(n, USB_USBSTS); + + /* somehow we got an IRQ while in the reset sequence: ignore it */ + if (ui->running == 0) + return IRQ_HANDLED; + + if (n & STS_PCI) { + switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) { + case PORTSC_PSPD_FS: + INFO("msm72k_udc: portchange USB_SPEED_FULL\n"); + ui->gadget.speed = USB_SPEED_FULL; + break; + case PORTSC_PSPD_LS: + INFO("msm72k_udc: portchange USB_SPEED_LOW\n"); + ui->gadget.speed = USB_SPEED_LOW; + break; + case PORTSC_PSPD_HS: + INFO("msm72k_udc: portchange USB_SPEED_HIGH\n"); + ui->gadget.speed = USB_SPEED_HIGH; + break; + } + } + + if (n & STS_URI) { + INFO("msm72k_udc: reset\n"); + + writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT); + writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE); + writel(0xffffffff, USB_ENDPTFLUSH); + writel(0, USB_ENDPTCTRL(1)); + + if (ui->online != 0) { + /* marking us offline will cause ept queue attempts + ** to fail + */ + ui->online = 0; + + flush_all_endpoints(ui); + + /* XXX: we can't seem to detect going offline, + * XXX: so deconfigure on reset for the time being + */ + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + } + } + + if (n & STS_SLI) + INFO("msm72k_udc: suspend\n"); + + if (n & STS_UI) { + n = readl(USB_ENDPTSETUPSTAT); + if (n & EPT_RX(0)) + handle_setup(ui); + + n = readl(USB_ENDPTCOMPLETE); + writel(n, USB_ENDPTCOMPLETE); + while (n) { + unsigned bit = __ffs(n); + handle_endpoint(ui, bit); + n = n & (~(1 << bit)); + } + } + return IRQ_HANDLED; +} + +static void usb_prepare(struct usb_info *ui) +{ + spin_lock_init(&ui->lock); + + memset(ui->buf, 0, 4096); + ui->head = (void *) (ui->buf + 0); + + /* only important for reset/reinit */ + memset(ui->ept, 0, sizeof(ui->ept)); + ui->next_item = 0; + ui->next_ifc_num = 0; + + init_endpoints(ui); + + ui->ep0in.ep.maxpacket = 64; + ui->ep0out.ep.maxpacket = 64; + + ui->setup_req = + usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL); + + INIT_WORK(&ui->work, usb_do_work); +} + +static void usb_suspend_phy(struct usb_info *ui) +{ +#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM7X30) + /* clear VBusValid and SessionEnd rising interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x0f); + /* clear VBusValid and SessionEnd falling interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x12); + + /* Disable 60MHz CLKOUT in serial or carkit mode */ + ulpi_write(ui, 0x08, 0x09); + + /* Enable PHY Low Power Suspend - Clock Disable (PLPSCD) */ + writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); + mdelay(1); +#else + /* clear VBusValid and SessionEnd rising interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x0f); + /* clear VBusValid and SessionEnd falling interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x12); + /* disable interface protect circuit to drop current consumption */ + ulpi_write(ui, (1 << 7), 0x08); + /* clear the SuspendM bit -> suspend the PHY */ + ulpi_write(ui, 1 << 6, 0x06); +#endif +} + +/* If this function returns < 0, the phy reset failed and we cannot + * continue at this point. The only solution is to wait until the next + * cable disconnect/reconnect to bring the phy back */ +static int usb_phy_reset(struct usb_info *ui) +{ + u32 val; + int ret; + int retries; + + if (!ui->phy_reset) + return 0; + + if (ui->hw_reset) + ui->hw_reset(1); + ui->phy_reset(); + if (ui->hw_reset) + ui->hw_reset(0); + +#if defined(CONFIG_ARCH_QSD8X50) + val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK; + writel(val | PORTSC_PTS_ULPI, USB_PORTSC); + + /* XXX: only necessary for pre-45nm internal PHYs. */ + for (retries = 3; retries > 0; retries--) { + ret = ulpi_write(ui, ULPI_FUNC_SUSPENDM, ULPI_FUNC_CTRL_CLR); + if (!ret) + break; + ui->phy_reset(); + } + if (!retries) + return -1; + + /* this reset calibrates the phy, if the above write succeeded */ + ui->phy_reset(); + + /* XXX: pre-45nm internal phys have a known issue which can cause them + * to lockup on reset. If ULPI accesses fail, try resetting the phy + * again */ + for (retries = 3; retries > 0; retries--) { + ret = ulpi_read(ui, ULPI_DEBUG_REG); + if (ret != 0xffffffff) + break; + ui->phy_reset(); + } + if (!retries) + return -1; +#endif + pr_info("msm_hsusb_phy_reset: success\n"); + return 0; +} + +static void usb_reset(struct usb_info *ui) +{ + unsigned long flags; + printk(KERN_INFO "hsusb: reset controller\n"); + + spin_lock_irqsave(&ui->lock, flags); + ui->running = 0; + spin_unlock_irqrestore(&ui->lock, flags); + + /* To prevent phantom packets being received by the usb core on + * some devices, put the controller into reset prior to + * resetting the phy. */ + writel(2, USB_USBCMD); + msleep(10); + +#if 0 + /* we should flush and shutdown cleanly if already running */ + writel(0xffffffff, USB_ENDPTFLUSH); + msleep(2); +#endif + + if (usb_phy_reset(ui) < 0) + pr_err("%s: Phy reset failed!\n", __func__); + + msleep(100); + + /* toggle non-driving mode after phy reset to ensure that + * we cause a disconnect event to the host */ + ulpi_write(ui, 0x18, 0x6); + msleep(1); + ulpi_write(ui, 0x8, 0x5); + msleep(1); + + /* RESET */ + writel(2, USB_USBCMD); + msleep(10); + +#ifdef CONFIG_ARCH_MSM7X00A + /* INCR4 BURST mode */ + writel(0x01, USB_SBUSCFG); +#else + /* bursts of unspecified length. */ + writel(0, USB_AHBBURST); + /* Use the AHB transactor */ + writel(0, USB_AHBMODE); +#endif + + /* select DEVICE mode */ + writel(0x12, USB_USBMODE); + msleep(1); + + /* select ULPI phy */ + writel(0x80000000, USB_PORTSC); + + ulpi_init(ui); + + writel(ui->dma, USB_ENDPOINTLISTADDR); + + configure_endpoints(ui); + + /* marking us offline will cause ept queue attempts to fail */ + ui->online = 0; + + /* terminate any pending transactions */ + flush_all_endpoints(ui); + + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + /* enable interrupts */ + writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR); + + /* go to RUN mode (D+ pullup enable) */ + msm72k_pullup(&ui->gadget, 1); + + spin_lock_irqsave(&ui->lock, flags); + ui->running = 1; + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void usb_start(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_START; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); +} + +static struct usb_info *the_usb_info; + +static int usb_free(struct usb_info *ui, int ret) +{ + INFO("usb_free(%d)\n", ret); + + if (ui->irq) + free_irq(ui->irq, 0); + if (ui->pool) + dma_pool_destroy(ui->pool); + if (ui->dma) + dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma); + if (ui->addr) + iounmap(ui->addr); + if (ui->clk) + clk_put(ui->clk); + if (ui->pclk) + clk_put(ui->pclk); + if (ui->otgclk) + clk_put(ui->otgclk); + if (ui->coreclk) + clk_put(ui->coreclk); + if (ui->ebi1clk) + clk_put(ui->ebi1clk); + kfree(ui); + return ret; +} + +static void usb_do_work_check_vbus(struct usb_info *ui) +{ + unsigned long iflags; + + spin_lock_irqsave(&ui->lock, iflags); + if (vbus) { + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + spin_unlock_irqrestore(&ui->lock, iflags); +} + +static void usb_do_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, work); + unsigned long iflags; + unsigned flags, _vbus; + + for (;;) { + spin_lock_irqsave(&ui->lock, iflags); + flags = ui->flags; + ui->flags = 0; + _vbus = vbus; + spin_unlock_irqrestore(&ui->lock, iflags); + + /* give up if we have nothing to do */ + if (flags == 0) + break; + + switch (ui->state) { + case USB_STATE_IDLE: + if (flags & USB_FLAG_START) { + pr_info("msm72k_udc: IDLE -> ONLINE\n"); + clk_set_rate(ui->ebi1clk, 128000000); + udelay(10); + if (ui->coreclk) + clk_enable(ui->coreclk); + clk_enable(ui->clk); + clk_enable(ui->pclk); + if (ui->otgclk) + clk_enable(ui->otgclk); + usb_reset(ui); + + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + } + break; + case USB_STATE_ONLINE: + /* If at any point when we were online, we received + * the signal to go offline, we must honor it + */ + if (flags & USB_FLAG_VBUS_OFFLINE) { + pr_info("msm72k_udc: ONLINE -> OFFLINE\n"); + + /* synchronize with irq context */ + spin_lock_irqsave(&ui->lock, iflags); + ui->running = 0; + ui->online = 0; + msm72k_pullup(&ui->gadget, 0); + spin_unlock_irqrestore(&ui->lock, iflags); + + if (ui->usb_connected) + ui->usb_connected(0); + + /* terminate any transactions, etc */ + flush_all_endpoints(ui); + + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + usb_phy_reset(ui); + + /* power down phy, clock down usb */ + spin_lock_irqsave(&ui->lock, iflags); + usb_suspend_phy(ui); + clk_disable(ui->pclk); + clk_disable(ui->clk); + if (ui->otgclk) + clk_disable(ui->otgclk); + if (ui->coreclk) + clk_disable(ui->coreclk); + clk_set_rate(ui->ebi1clk, 0); + spin_unlock_irqrestore(&ui->lock, iflags); + + ui->state = USB_STATE_OFFLINE; + usb_do_work_check_vbus(ui); + break; + } + if (flags & USB_FLAG_RESET) { + pr_info("msm72k_udc: ONLINE -> RESET\n"); + usb_reset(ui); + pr_info("msm72k_udc: RESET -> ONLINE\n"); + break; + } + break; + case USB_STATE_OFFLINE: + /* If we were signaled to go online and vbus is still + * present when we received the signal, go online. + */ + if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) { + pr_info("msm72k_udc: OFFLINE -> ONLINE\n"); + clk_set_rate(ui->ebi1clk, 128000000); + udelay(10); + if (ui->coreclk) + clk_enable(ui->coreclk); + clk_enable(ui->clk); + clk_enable(ui->pclk); + if (ui->otgclk) + clk_enable(ui->otgclk); + usb_reset(ui); + + /* detect shorted D+/D-, indicating AC power */ + msleep(10); + if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) + if (ui->usb_connected) + ui->usb_connected(2); + + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + } + break; + } + } +} + +/* FIXME - the callers of this function should use a gadget API instead. + * This is called from htc_battery.c and board-halibut.c + * WARNING - this can get called before this driver is initialized. + */ +void msm_hsusb_set_vbus_state(int online) +{ + unsigned long flags = 0; + struct usb_info *ui = the_usb_info; + + if (ui) + spin_lock_irqsave(&ui->lock, flags); + if (vbus != online) { + vbus = online; + if (ui) { + if (online) { + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + schedule_work(&ui->work); + } + } + if (ui) + spin_unlock_irqrestore(&ui->lock, flags); +} + +#if defined(CONFIG_DEBUG_FS) && 0 + +void usb_function_reenumerate(void) +{ + struct usb_info *ui = the_usb_info; + + /* disable and re-enable the D+ pullup */ + msm72k_pullup(&ui->gadget, false); + msleep(10); + msm72k_pullup(&ui->gadget, true); +} + +static char debug_buffer[PAGE_SIZE]; + +static ssize_t debug_read_status(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = debug_buffer; + unsigned long flags; + struct msm_endpoint *ept; + struct msm_request *req; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: setup=%08x prime=%08x stat=%08x done=%08x\n", + readl(USB_ENDPTSETUPSTAT), + readl(USB_ENDPTPRIME), + readl(USB_ENDPTSTAT), + readl(USB_ENDPTCOMPLETE)); + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n", + readl(USB_USBCMD), + readl(USB_USBSTS), + readl(USB_USBINTR), + readl(USB_PORTSC)); + + + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + + for (req = ept->req; req; req = req->next) + i += scnprintf(buf + i, PAGE_SIZE - i, + " req @%08x next=%08x info=%08x page0=%08x %c %c\n", + req->item_dma, req->item->next, + req->item->info, req->item->page0, + req->busy ? 'B' : ' ', + req->live ? 'L' : ' ' + ); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "phy failure count: %d\n", ui->phy_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static ssize_t debug_write_reset(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_RESET; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static ssize_t debug_write_cycle(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + usb_function_reenumerate(); + return count; +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations debug_stat_ops = { + .open = debug_open, + .read = debug_read_status, +}; + +const struct file_operations debug_reset_ops = { + .open = debug_open, + .write = debug_write_reset, +}; + +const struct file_operations debug_cycle_ops = { + .open = debug_open, + .write = debug_write_cycle, +}; + +static void usb_debugfs_init(struct usb_info *ui) +{ + struct dentry *dent; + dent = debugfs_create_dir("usb", 0); + if (IS_ERR(dent)) + return; + + debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops); + debugfs_create_file("reset", 0220, dent, ui, &debug_reset_ops); + debugfs_create_file("cycle", 0220, dent, ui, &debug_cycle_ops); +} +#else +static void usb_debugfs_init(struct usb_info *ui) {} +#endif + +static int +msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned char ep_type = + desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + if (ep_type == USB_ENDPOINT_XFER_BULK) + _ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize); + else + _ep->maxpacket = le16_to_cpu(64); + config_ept(ept); + usb_ept_enable(ept, 1, ep_type); + return 0; +} + +static int msm72k_disable(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + usb_ept_enable(ept, 0, 0); + return 0; +} + +static struct usb_request * +msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) +{ + return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags); +} + +static void +msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_request *req = to_msm_request(_req); + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + + /* request should not be busy */ + BUG_ON(req->busy); + + if (req->alloced) + kfree(req->req.buf); + dma_pool_free(ui->pool, req->item, req->item_dma); + kfree(req); +} + +static int +msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct usb_info *ui = ep->ui; + + if (ep == &ui->ep0in) { + struct msm_request *r = to_msm_request(req); + if (!req->length) + goto ep_queue_done; + else { + if (ui->ep0_dir == USB_DIR_OUT) { + ep = &ui->ep0out; + ep->ep.driver_data = ui->ep0in.ep.driver_data; + } + /* ep0_queue_ack_complete queue a receive for ACK before + ** calling req->complete + */ + r->gadget_complete = req->complete; + req->complete = ep0_queue_ack_complete; + } + } +ep_queue_done: + return usb_ept_queue_xfer(ep, req); +} + +static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct msm_request *req = to_msm_request(_req); + struct usb_info *ui = ep->ui; + + struct msm_request *cur, *prev; + unsigned long flags; + + if (!_ep || !_req) + return -EINVAL; + + spin_lock_irqsave(&ui->lock, flags); + cur = ep->req; + prev = NULL; + + while (cur != 0) { + if (cur == req) { + req->busy = 0; + req->live = 0; + req->req.status = -ECONNRESET; + req->req.actual = 0; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ep->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + /* remove from linked list */ + if (prev) + prev->next = cur->next; + else + ep->req = cur->next; + if (ep->last == cur) + ep->last = prev; + /* break from loop */ + cur = NULL; + } else { + prev = cur; + cur = cur->next; + } + } + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + unsigned int in = ept->flags & EPT_FLAG_IN; + unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (value) + n |= CTRL_TXS; + else { + n &= ~CTRL_TXS; + n |= CTRL_TXR; + } + } else { + if (value) + n |= CTRL_RXS; + else { + n &= ~CTRL_RXS; + n |= CTRL_RXR; + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_fifo_status(struct usb_ep *_ep) +{ + return -EOPNOTSUPP; +} + +static void +msm72k_fifo_flush(struct usb_ep *_ep) +{ + flush_endpoint(to_msm_endpoint(_ep)); +} + +static const struct usb_ep_ops msm72k_ep_ops = { + .enable = msm72k_enable, + .disable = msm72k_disable, + + .alloc_request = msm72k_alloc_request, + .free_request = msm72k_free_request, + + .queue = msm72k_queue, + .dequeue = msm72k_dequeue, + + .set_halt = msm72k_set_halt, + .fifo_status = msm72k_fifo_status, + .fifo_flush = msm72k_fifo_flush, +}; + +static int msm72k_get_frame(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + /* frame number is in bits 13:3 */ + return (readl(USB_FRINDEX) >> 3) & 0x000007FF; +} + +/* VBUS reporting logically comes from a transceiver */ +static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active) +{ + msm_hsusb_set_vbus_state(is_active); + return 0; +} + +/* drivers may have software control over D+ pullup */ +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + u32 cmd = (8 << 16); + + /* disable/enable D+ pullup */ + if (is_active) { + pr_info("msm_hsusb: enable pullup\n"); + writel(cmd | 1, USB_USBCMD); + } else { + pr_info("msm_hsusb: disable pullup\n"); + writel(cmd, USB_USBCMD); + +#if defined(CONFIG_ARCH_QSD8X50) || defined(CONFIG_ARCH_MSM7X30) + ulpi_write(ui, 0x48, 0x04); +#endif + } + + return 0; +} + +static int msm72k_wakeup(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + if (!ui->remote_wakeup) { + pr_err("%s: remote wakeup not supported\n", __func__); + return -ENOTSUPP; + } + + if (!ui->online) { + pr_err("%s: device is not configured\n", __func__); + return -ENODEV; + } + + spin_lock_irqsave(&ui->lock, flags); + if ((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) { + pr_info("%s: enabling force resume\n", __func__); + writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); + } + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops msm72k_ops = { + .get_frame = msm72k_get_frame, + .vbus_session = msm72k_udc_vbus_session, + .pullup = msm72k_pullup, + .wakeup = msm72k_wakeup, +}; + +static ssize_t usb_remote_wakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); + + return count; +} +static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup); + +static int msm72k_probe(struct platform_device *pdev) +{ + struct resource *res; + struct usb_info *ui; + int irq; + int ret; + + INFO("msm72k_probe\n"); + ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); + if (!ui) + return -ENOMEM; + + spin_lock_init(&ui->lock); + ui->pdev = pdev; + + if (pdev->dev.platform_data) { + struct msm_hsusb_platform_data *pdata = pdev->dev.platform_data; + ui->phy_reset = pdata->phy_reset; + ui->phy_init_seq = pdata->phy_init_seq; + ui->usb_connected = pdata->usb_connected; + } + + irq = platform_get_irq(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res || (irq < 0)) + return usb_free(ui, -ENODEV); + + ui->addr = ioremap(res->start, 4096); + if (!ui->addr) + return usb_free(ui, -ENOMEM); + + ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); + if (!ui->buf) + return usb_free(ui, -ENOMEM); + + ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0); + if (!ui->pool) + return usb_free(ui, -ENOMEM); + + INFO("msm72k_probe() io=%p, irq=%d, dma=%p(%x)\n", + ui->addr, irq, ui->buf, ui->dma); + + ui->clk = clk_get(&pdev->dev, "usb_hs_clk"); + if (IS_ERR(ui->clk)) + return usb_free(ui, PTR_ERR(ui->clk)); + + ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk"); + if (IS_ERR(ui->pclk)) + return usb_free(ui, PTR_ERR(ui->pclk)); + + ui->otgclk = clk_get(&pdev->dev, "usb_otg_clk"); + if (IS_ERR(ui->otgclk)) + ui->otgclk = NULL; + + ui->coreclk = clk_get(&pdev->dev, "usb_hs_core_clk"); + if (IS_ERR(ui->coreclk)) + ui->coreclk = NULL; + + ui->ebi1clk = clk_get(NULL, "ebi1_clk"); + if (IS_ERR(ui->ebi1clk)) + return usb_free(ui, PTR_ERR(ui->ebi1clk)); + + /* clear interrupts before requesting irq */ + if (ui->coreclk) + clk_enable(ui->coreclk); + clk_enable(ui->clk); + clk_enable(ui->pclk); + if (ui->otgclk) + clk_enable(ui->otgclk); + writel(0, USB_USBINTR); + writel(0, USB_OTGSC); + if (ui->coreclk) + clk_disable(ui->coreclk); + if (ui->otgclk) + clk_disable(ui->otgclk); + clk_disable(ui->pclk); + clk_disable(ui->clk); + + ret = request_irq(irq, usb_interrupt, 0, pdev->name, ui); + if (ret) + return usb_free(ui, ret); + enable_irq_wake(irq); + ui->irq = irq; + + ui->gadget.ops = &msm72k_ops; + ui->gadget.is_dualspeed = 1; + device_initialize(&ui->gadget.dev); + dev_set_name(&ui->gadget.dev, "gadget"); + ui->gadget.dev.parent = &pdev->dev; + ui->gadget.dev.dma_mask = pdev->dev.dma_mask; + + the_usb_info = ui; + + usb_debugfs_init(ui); + + usb_prepare(ui); + + return 0; +} + +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, + int (*bind)(struct usb_gadget *)) +{ + struct usb_info *ui = the_usb_info; + int retval, n; + + if (!driver + || driver->speed < USB_SPEED_FULL + || !bind + || !driver->disconnect + || !driver->setup) + return -EINVAL; + if (!ui) + return -ENODEV; + if (ui->driver) + return -EBUSY; + + /* first hook up the driver ... */ + ui->driver = driver; + ui->gadget.dev.driver = &driver->driver; + ui->gadget.name = driver_name; + INIT_LIST_HEAD(&ui->gadget.ep_list); + ui->gadget.ep0 = &ui->ep0in.ep; + INIT_LIST_HEAD(&ui->gadget.ep0->ep_list); + ui->gadget.speed = USB_SPEED_UNKNOWN; + + for (n = 1; n < 16; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + for (n = 17; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + + retval = device_add(&ui->gadget.dev); + if (retval) + goto fail; + + retval = bind(&ui->gadget); + if (retval) { + INFO("bind to driver %s --> error %d\n", + driver->driver.name, retval); + device_del(&ui->gadget.dev); + goto fail; + } + + /* create sysfs node for remote wakeup */ + retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup); + if (retval != 0) + INFO("failed to create sysfs entry: (wakeup) error: (%d)\n", + retval); + INFO("msm72k_udc: registered gadget driver '%s'\n", + driver->driver.name); + usb_start(ui); + + return 0; + +fail: + ui->driver = NULL; + ui->gadget.dev.driver = NULL; + return retval; +} +EXPORT_SYMBOL(usb_gadget_probe_driver); + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + struct usb_info *dev = the_usb_info; + + if (!dev) + return -ENODEV; + if (!driver || driver != dev->driver || !driver->unbind) + return -EINVAL; + + device_remove_file(&dev->gadget.dev, &dev_attr_wakeup); + driver->unbind(&dev->gadget); + dev->gadget.dev.driver = NULL; + dev->driver = NULL; + + device_del(&dev->gadget.dev); + + VDEBUG("unregistered gadget driver '%s'\n", driver->driver.name); + return 0; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + + +static struct platform_driver usb_driver = { + .probe = msm72k_probe, + .driver = { .name = "msm_hsusb", }, +}; + +static int __init init(void) +{ + return platform_driver_register(&usb_driver); +} +module_init(init); + +static void __exit cleanup(void) +{ + platform_driver_unregister(&usb_driver); +} +module_exit(cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Mike Lockwood, Brian Swetland"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/msm72k_udc_htc.c b/drivers/usb/gadget/msm72k_udc_htc.c new file mode 100644 index 0000000000000..80905c54b1463 --- /dev/null +++ b/drivers/usb/gadget/msm72k_udc_htc.c @@ -0,0 +1,2849 @@ +/* + * Driver for HighSpeed USB Client Controller in MSM7K + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#ifdef CONFIG_ARCH_MSM7X30 +#include +#endif +#ifdef CONFIG_USB_ACCESSORY_DETECT_BY_ADC +#include +#endif +#include + +static const char driver_name[] = "msm72k_udc"; + +/* #define DEBUG */ +/* #define VERBOSE */ + +#define MSM_USB_BASE ((unsigned) ui->addr) + +#define DRIVER_DESC "MSM 72K USB Peripheral Controller" + +#define EPT_FLAG_IN 0x0001 + +#define SETUP_BUF_SIZE 4096 + +typedef void (*completion_func)(struct usb_ep *ep, struct usb_request *req); + +static const char *const ep_name[] = { + "ep0out", "ep1out", "ep2out", "ep3out", + "ep4out", "ep5out", "ep6out", "ep7out", + "ep8out", "ep9out", "ep10out", "ep11out", + "ep12out", "ep13out", "ep14out", "ep15out", + "ep0in", "ep1in", "ep2in", "ep3in", + "ep4in", "ep5in", "ep6in", "ep7in", + "ep8in", "ep9in", "ep10in", "ep11in", + "ep12in", "ep13in", "ep14in", "ep15in" +}; + +static struct usb_info *the_usb_info; +/* current state of VBUS */ +static int vbus; +static int use_mfg_serialno; +static char mfg_df_serialno[16]; +static int disable_charger; + +#if defined (CONFIG_DOCK_ACCESSORY_DETECT) || defined(CONFIG_USB_ACCESSORY_DETECT) +#ifdef CONFIG_USB_ACCESSORY_DETECT_BY_ADC +extern int htc_get_usb_accessory_adc_level(uint32_t *buffer); +#endif + + +static struct switch_dev dock_switch = { + .name = "dock", +}; + +#define DOCK_STATE_UNDOCKED 0 +#define DOCK_STATE_DESK (1 << 0) +#define DOCK_STATE_CAR (1 << 1) +#endif + +#include +#include + +static struct wake_lock vbus_idle_wake_lock; +static struct perf_lock usb_perf_lock; + +struct msm_request { + struct usb_request req; + + /* saved copy of req.complete */ + completion_func gadget_complete; + + + struct usb_info *ui; + struct msm_request *next; + + unsigned busy:1; + unsigned live:1; + unsigned alloced:1; + unsigned dead:1; + + dma_addr_t dma; + dma_addr_t item_dma; + + struct ept_queue_item *item; +}; + +#define to_msm_request(r) container_of(r, struct msm_request, req) +#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep) + +struct msm_endpoint { + struct usb_ep ep; + struct usb_info *ui; + struct msm_request *req; /* head of pending requests */ + struct msm_request *last; + unsigned flags; + + /* bit number (0-31) in various status registers + ** as well as the index into the usb_info's array + ** of all endpoints + */ + unsigned char bit; + unsigned char num; + + /* pointers to DMA transfer list area */ + /* these are allocated from the usb_info dma space */ + struct ept_queue_head *head; +}; + +static void usb_do_work(struct work_struct *w); +static void check_charger(struct work_struct *w); +#ifdef CONFIG_USB_ACCESSORY_DETECT +static void accessory_detect_work(struct work_struct *w); +#endif +#ifdef CONFIG_DOCK_ACCESSORY_DETECT +static void dock_detect_work(struct work_struct *w); +static void dock_detect_init(struct usb_info *ui); +#endif +extern int android_switch_function(unsigned func); +extern int android_show_function(char *buf); +extern void android_set_serialno(char *serialno); + +#define USB_STATE_IDLE 0 +#define USB_STATE_ONLINE 1 +#define USB_STATE_OFFLINE 2 + +#define USB_FLAG_START 0x0001 +#define USB_FLAG_VBUS_ONLINE 0x0002 +#define USB_FLAG_VBUS_OFFLINE 0x0004 +#define USB_FLAG_RESET 0x0008 + +enum usb_connect_type { + CONNECT_TYPE_NONE = 0, + CONNECT_TYPE_USB, + CONNECT_TYPE_AC, + CONNECT_TYPE_UNKNOWN, +}; + +struct usb_info { + /* lock for register/queue/device state changes */ + spinlock_t lock; + + /* single request used for handling setup transactions */ + struct usb_request *setup_req; + + struct platform_device *pdev; + int irq; + void *addr; + + unsigned state; + unsigned flags; + + unsigned online:1; + unsigned running:1; + + struct dma_pool *pool; + + /* dma page to back the queue heads and items */ + unsigned char *buf; + dma_addr_t dma; + + struct ept_queue_head *head; + + /* used for allocation */ + unsigned next_item; + unsigned next_ifc_num; + + /* endpoints are ordered based on their status bits, + ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15 + */ + struct msm_endpoint ept[32]; + + int *phy_init_seq; + void (*phy_reset)(void); + void (*hw_reset)(bool en); + void (*usb_uart_switch)(int); + void (*serial_debug_gpios)(int); + void (*usb_hub_enable)(bool); + int (*china_ac_detect)(void); + void (*disable_usb_charger)(void); + + /* for notification when USB is connected or disconnected */ + void (*usb_connected)(int); + + struct workqueue_struct *usb_wq; + struct work_struct work; + struct delayed_work chg_work; + struct work_struct detect_work; + struct work_struct dock_work; + struct work_struct notifier_work; + unsigned phy_status; + unsigned phy_fail_count; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + +#define ep0out ept[0] +#define ep0in ept[16] + + struct clk *clk; + struct clk *coreclk; + struct clk *pclk; + struct clk *otgclk; + struct clk *ebi1clk; + + unsigned int ep0_dir; + u16 test_mode; + + u8 remote_wakeup; + enum usb_connect_type connect_type; + u8 in_lpm; + + /* for accessory detection */ + bool dock_detect; + u8 accessory_detect; + u8 mfg_usb_carkit_enable; + int idpin_irq; + int dockpin_irq; + int usb_id_pin_gpio; + int dock_pin_gpio; + void (*config_usb_id_gpios)(bool output_enable); + /* 0: none, 1: carkit, 2: usb headset */ + u8 accessory_type; +}; + +static const struct usb_ep_ops msm72k_ep_ops; + + +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active); +static int msm72k_set_halt(struct usb_ep *_ep, int value); +static void flush_endpoint(struct msm_endpoint *ept); + +static DEFINE_MUTEX(notify_sem); +static void send_usb_connect_notify(struct work_struct *w) +{ + static struct t_usb_status_notifier *notifier; + struct usb_info *ui = container_of(w, struct usb_info, + notifier_work); + if (!ui) + return; + + printk(KERN_INFO "usb: send connect type %d\n", ui->connect_type); + mutex_lock(¬ify_sem); + list_for_each_entry(notifier, + &g_lh_usb_notifier_list, + notifier_link) { + if (notifier->func != NULL) { + /* Notify other drivers about connect type. */ + /* use slow charging for unknown type*/ + if (ui->connect_type == CONNECT_TYPE_UNKNOWN) + notifier->func(CONNECT_TYPE_USB); + else + notifier->func(ui->connect_type); + } + } + mutex_unlock(¬ify_sem); +} + +int usb_register_notifier(struct t_usb_status_notifier *notifier) +{ + if (!notifier || !notifier->name || !notifier->func) + return -EINVAL; + + mutex_lock(¬ify_sem); + list_add(¬ifier->notifier_link, + &g_lh_usb_notifier_list); + mutex_unlock(¬ify_sem); + return 0; +} + +static int usb_ep_get_stall(struct msm_endpoint *ept) +{ + unsigned int n; + struct usb_info *ui = ept->ui; + + n = readl(USB_ENDPTCTRL(ept->num)); + if (ept->flags & EPT_FLAG_IN) + return (CTRL_TXS & n) ? 1 : 0; + else + return (CTRL_RXS & n) ? 1 : 0; +} + +static unsigned ulpi_read(struct usb_info *ui, unsigned reg) +{ + unsigned timeout = 100000; + + /* initiate read operation */ + writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; + + if (timeout == 0) { + ERROR("ulpi_read: timeout %08x\n", readl(USB_ULPI_VIEWPORT)); + return 0xffffffff; + } + return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); +} + +static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg) +{ + unsigned timeout = 10000; + + /* initiate write operation */ + writel(ULPI_RUN | ULPI_WRITE | + ULPI_ADDR(reg) | ULPI_DATA(val), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ; + + if (timeout == 0) { + printk(KERN_ERR "ulpi_write: timeout\n"); + return -1; + } + + return 0; +} + +static void ulpi_init(struct usb_info *ui) +{ + int *seq = ui->phy_init_seq; + + if (!seq) + return; + + while (seq[0] >= 0) { + INFO("ulpi: write 0x%02x to 0x%02x\n", seq[0], seq[1]); + ulpi_write(ui, seq[0], seq[1]); + seq += 2; + } +} + +static void init_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + + ept->ui = ui; + ept->bit = n; + ept->num = n & 15; + ept->ep.name = ep_name[n]; + ept->ep.ops = &msm72k_ep_ops; + + if (ept->bit > 15) { + /* IN endpoint */ + ept->head = ui->head + (ept->num << 1) + 1; + ept->flags = EPT_FLAG_IN; + } else { + /* OUT endpoint */ + ept->head = ui->head + (ept->num << 1); + ept->flags = 0; + } + + } +} + +static void config_ept(struct msm_endpoint *ept) +{ + unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT; + + if (ept->bit == 0) + /* ep0 out needs interrupt-on-setup */ + cfg |= CONFIG_IOS; + + ept->head->config = cfg; + ept->head->next = TERMINATE; +#if 0 + if (ept->ep.maxpacket) + INFO("ept #%d %s max:%d head:%p bit:%d\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); +#endif +} + +static void configure_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) + config_ept(ui->ept + n); +} + +struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept, + unsigned bufsize, gfp_t gfp_flags) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + goto fail1; + + req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma); + if (!req->item) + goto fail2; + + if (bufsize) { + req->req.buf = kmalloc(bufsize, gfp_flags); + if (!req->req.buf) + goto fail3; + req->alloced = 1; + } + + return &req->req; + +fail3: + dma_pool_free(ui->pool, req->item, req->item_dma); +fail2: + kfree(req); +fail1: + return 0; +} + +static void do_free_req(struct usb_info *ui, struct msm_request *req) +{ + if (req->alloced) + kfree(req->req.buf); + + dma_pool_free(ui->pool, req->item, req->item_dma); + kfree(req); +} + + +static void usb_ept_enable(struct msm_endpoint *ept, int yes, + unsigned char ep_type) +{ + struct usb_info *ui = ept->ui; + int in = ept->flags & EPT_FLAG_IN; + unsigned n; + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + n = (n & (~CTRL_TXT_MASK)); + if (yes) { + n |= CTRL_TXE | CTRL_TXR; + } else { + n &= (~CTRL_TXE); + } + if (yes) { + switch (ep_type) { + case USB_ENDPOINT_XFER_BULK: + n |= CTRL_TXT_BULK; + break; + case USB_ENDPOINT_XFER_INT: + n |= CTRL_TXT_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + n |= CTRL_TXT_ISOCH; + break; + default: + pr_err("%s: unsupported ep_type %d for %s\n", + __func__, ep_type, ept->ep.name); + break; + } + } + } else { + n = (n & (~CTRL_RXT_MASK)); + if (yes) { + n |= CTRL_RXE | CTRL_RXR; + } else { + n &= ~(CTRL_RXE); + } + if (yes) { + switch (ep_type) { + case USB_ENDPOINT_XFER_BULK: + n |= CTRL_RXT_BULK; + break; + case USB_ENDPOINT_XFER_INT: + n |= CTRL_RXT_INT; + break; + case USB_ENDPOINT_XFER_ISOC: + n |= CTRL_RXT_ISOCH; + break; + default: + pr_err("%s: unsupported ep_type %d for %s\n", + __func__, ep_type, ept->ep.name); + break; + } + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + +#if 0 + INFO("ept %d %s %s\n", + ept->num, in ? "in" : "out", yes ? "enabled" : "disabled"); +#endif +} + +static void usb_ept_start(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req = ept->req; + int i, cnt; + unsigned n = 1 << ept->bit; + + BUG_ON(req->live); + + /* link the hw queue head to the request's transaction item */ + ept->head->next = req->item_dma; + ept->head->info = 0; + + /* during high throughput testing it is observed that + * ept stat bit is not set even thoguh all the data + * structures are updated properly and ept prime bit + * is set. To workaround the issue, try to check if + * ept stat bit otherwise try to re-prime the ept + */ + for (i = 0; i < 5; i++) { + writel(n, USB_ENDPTPRIME); + for (cnt = 0; cnt < 3000; cnt++) { + if (!(readl(USB_ENDPTPRIME) & n) && + (readl(USB_ENDPTSTAT) & n)) + goto DONE; + udelay(1); + } + } + + if (!(readl(USB_ENDPTSTAT) & n)) { + pr_err("Unable to prime the ept%d%s\n", + ept->num, + ept->flags & EPT_FLAG_IN ? "in" : "out"); + return; + } + +DONE: + /* mark this chain of requests as live */ + while (req) { + req->live = 1; + req = req->next; + } + +} + +int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req) +{ + unsigned long flags; + struct msm_request *req = to_msm_request(_req); + struct msm_request *last; + struct usb_info *ui = ept->ui; + struct ept_queue_item *item = req->item; + unsigned length = req->req.length; + + if (length > 0x4000) + return -EMSGSIZE; + + spin_lock_irqsave(&ui->lock, flags); + + if (req->busy) { + req->req.status = -EBUSY; + spin_unlock_irqrestore(&ui->lock, flags); + INFO("usb_ept_queue_xfer() tried to queue busy request\n"); + return -EBUSY; + } + + if (!ui->online && (ept->num != 0)) { + req->req.status = -ESHUTDOWN; + spin_unlock_irqrestore(&ui->lock, flags); + INFO("usb_ept_queue_xfer() called while offline\n"); + return -ESHUTDOWN; + } + + req->busy = 1; + req->live = 0; + req->next = 0; + req->req.status = -EBUSY; + + req->dma = dma_map_single(NULL, req->req.buf, length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + /* prepare the transaction descriptor item for the hardware */ + item->next = TERMINATE; + item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE; + item->page0 = req->dma; + item->page1 = (req->dma + 0x1000) & 0xfffff000; + item->page2 = (req->dma + 0x2000) & 0xfffff000; + item->page3 = (req->dma + 0x3000) & 0xfffff000; + + /* Add the new request to the end of the queue */ + last = ept->last; + if (last) { + /* Already requests in the queue. add us to the + * end, but let the completion interrupt actually + * start things going, to avoid hw issues + */ + last->next = req; + + /* only modify the hw transaction next pointer if + * that request is not live + */ + if (!last->live) + last->item->next = req->item_dma; + } else { + /* queue was empty -- kick the hardware */ + ept->req = req; + usb_ept_start(ept); + } + ept->last = req; + + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +/* --- endpoint 0 handling --- */ + +static void ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + req->complete = r->gadget_complete; + r->gadget_complete = NULL; + if (req->complete) + req->complete(&ui->ep0in.ep, req); +} + +static void ep0_queue_ack_complete(struct usb_ep *ep, + struct usb_request *_req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + struct usb_request *req = ui->setup_req; + struct msm_request *r = to_msm_request(req); + completion_func gadget_complete = r->gadget_complete; + + if (gadget_complete) { + r->gadget_complete = NULL; + gadget_complete(ep, req); + } + + /* queue up the receive of the ACK response from the host */ + if (_req->status == 0 && _req->actual == _req->length) { + req->length = 0; + if (ui->ep0_dir == USB_DIR_IN) + usb_ept_queue_xfer(&ui->ep0out, req); + else + usb_ept_queue_xfer(&ui->ep0in, req); + _req->complete = r->gadget_complete; + r->gadget_complete = NULL; + if (_req->complete) + _req->complete(&ui->ep0in.ep, _req); + } else + ep0_complete(ep, _req); +} + +static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + unsigned int temp; + + if (!ui->test_mode) + return; + + switch (ui->test_mode) { + case J_TEST: + pr_info("usb electrical test mode: (J)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC); + break; + + case K_TEST: + pr_info("usb electrical test mode: (K)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC); + break; + + case SE0_NAK_TEST: + pr_info("usb electrical test mode: (SE0-NAK)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC); + break; + + case TST_PKT_TEST: + pr_info("usb electrical test mode: (TEST_PKT)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC); + break; + } +} + +static void ep0_setup_ack(struct usb_info *ui) +{ + struct usb_request *req = ui->setup_req; + req->length = 0; + req->complete = ep0_setup_ack_complete; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_setup_stall(struct usb_info *ui) +{ + writel((1<<16) | (1<<0), USB_ENDPTCTRL(0)); +} + +static void ep0_setup_send(struct usb_info *ui, unsigned length) +{ + struct usb_request *req = ui->setup_req; + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = &ui->ep0in; + + req->length = length; + req->complete = ep0_queue_ack_complete; + r->gadget_complete = NULL; + usb_ept_queue_xfer(ept, req); +} + +static void handle_setup(struct usb_info *ui) +{ + struct usb_ctrlrequest ctl; + struct usb_request *req = ui->setup_req; + int ret; + + memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl)); + writel(EPT_RX(0), USB_ENDPTSETUPSTAT); + + if (ctl.bRequestType & USB_DIR_IN) + ui->ep0_dir = USB_DIR_IN; + else + ui->ep0_dir = USB_DIR_OUT; + + /* any pending ep0 transactions must be canceled */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + +#if 0 + INFO("setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n", + ctl.bRequestType, ctl.bRequest, ctl.wValue, + ctl.wIndex, ctl.wLength); +#endif + + if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) == + (USB_DIR_IN | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_GET_STATUS) { + if (ctl.wLength != 2) + goto stall; + switch (ctl.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_ENDPOINT: + { + struct msm_endpoint *ept; + unsigned num = + ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; + u16 temp = 0; + + if (num == 0) { + memset(req->buf, 0, 2); + break; + } + if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) + num += 16; + ept = &ui->ep0out + num; + temp = usb_ep_get_stall(ept); + temp = temp << USB_ENDPOINT_HALT; + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_DEVICE: + { + u16 temp = 0; + + temp |= (ui->remote_wakeup << + USB_DEVICE_REMOTE_WAKEUP); + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_INTERFACE: + memset(req->buf, 0, 2); + break; + default: + goto stall; + } + ep0_setup_send(ui, 2); + return; + } + } + if (ctl.bRequestType == + (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) { + if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) || + (ctl.bRequest == USB_REQ_SET_FEATURE)) { + if ((ctl.wValue == 0) && (ctl.wLength == 0)) { + unsigned num = ctl.wIndex & 0x0f; + + if (num != 0) { + struct msm_endpoint *ept; + + if (ctl.wIndex & 0x80) + num += 16; + ept = &ui->ep0out + num; + + if (ctl.bRequest == USB_REQ_SET_FEATURE) + msm72k_set_halt(&ept->ep, 1); + else + msm72k_set_halt(&ept->ep, 0); + } + goto ack; + } + } + } + if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) + ui->online = !!ctl.wValue; + else if (ctl.bRequest == USB_REQ_SET_ADDRESS) { + /* write address delayed (will take effect + ** after the next IN txn) + */ + writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR); + goto ack; + } else if (ctl.bRequest == USB_REQ_SET_FEATURE) { + switch (ctl.wValue) { + case USB_DEVICE_TEST_MODE: + switch (ctl.wIndex) { + case J_TEST: + case K_TEST: + case SE0_NAK_TEST: + if (!ui->test_mode) { + disable_charger = 1; + queue_delayed_work(ui->usb_wq, &ui->chg_work, 0); + } + case TST_PKT_TEST: + ui->test_mode = ctl.wIndex; + goto ack; + } + goto stall; + case USB_DEVICE_REMOTE_WAKEUP: + ui->remote_wakeup = 1; + goto ack; + } + } else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) && + (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) { + ui->remote_wakeup = 0; + goto ack; + } + } + + /* delegate if we get here */ + if (ui->driver) { + ret = ui->driver->setup(&ui->gadget, &ctl); + if (ret >= 0) + return; + } + +stall: + /* stall ep0 on error */ + ep0_setup_stall(ui); + return; + +ack: + ep0_setup_ack(ui); +} + +static void handle_endpoint(struct usb_info *ui, unsigned bit) +{ + struct msm_endpoint *ept = ui->ept + bit; + struct msm_request *req; + unsigned long flags; + unsigned info; + +#if 0 + INFO("handle_endpoint() %d %s req=%p(%08x)\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->req, ept->req ? ept->req->item_dma : 0); +#endif + + /* expire all requests that are no longer active */ + spin_lock_irqsave(&ui->lock, flags); + while ((req = ept->req)) { + info = req->item->info; + + /* if we've processed all live requests, time to + * restart the hardware on the next non-live request + */ + if (!req->live) { + usb_ept_start(ept); + break; + } + + /* if the transaction is still in-flight, stop here */ + if (info & INFO_ACTIVE) + break; + + /* advance ept queue to the next request */ + ept->req = req->next; + if (ept->req == 0) + ept->last = 0; + + dma_unmap_single(NULL, req->dma, req->req.length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) { + /* XXX pass on more specific error code */ + req->req.status = -EIO; + req->req.actual = 0; + INFO("msm72k_udc: ept %d %s error. info=%08x\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + info); + } else { + req->req.status = 0; + req->req.actual = + req->req.length - ((info >> 16) & 0x7FFF); + } + req->busy = 0; + req->live = 0; + if (req->dead) + do_free_req(ui, req); + + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +#define FLUSH_WAIT_US 5 +#define FLUSH_TIMEOUT (2 * (USEC_PER_SEC / FLUSH_WAIT_US)) +static void flush_endpoint_hw(struct usb_info *ui, unsigned bits) +{ + uint32_t unflushed = 0; + uint32_t stat = 0; + int cnt = 0; + + /* flush endpoint, canceling transactions + ** - this can take a "large amount of time" (per databook) + ** - the flush can fail in some cases, thus we check STAT + ** and repeat if we're still operating + ** (does the fact that this doesn't use the tripwire matter?!) + */ + while (cnt < FLUSH_TIMEOUT) { + writel(bits, USB_ENDPTFLUSH); + while (((unflushed = readl(USB_ENDPTFLUSH)) & bits) && + cnt < FLUSH_TIMEOUT) { + cnt++; + udelay(FLUSH_WAIT_US); + } + + stat = readl(USB_ENDPTSTAT); + if (cnt >= FLUSH_TIMEOUT) + goto err; + if (!(stat & bits)) + goto done; + cnt++; + udelay(FLUSH_WAIT_US); + } + +err: + pr_warning("%s: Could not complete flush! NOT GOOD! " + "stat: %x unflushed: %x bits: %x\n", __func__, + stat, unflushed, bits); +done: + return; +} + +static void flush_endpoint_sw(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + unsigned long flags; + + /* inactive endpoints have nothing to do here */ + if (ept->ep.maxpacket == 0) + return; + + /* put the queue head in a sane state */ + ept->head->info = 0; + ept->head->next = TERMINATE; + + /* cancel any pending requests */ + spin_lock_irqsave(&ui->lock, flags); + req = ept->req; + ept->req = 0; + ept->last = 0; + while (req != 0) { + req->busy = 0; + req->live = 0; + req->req.status = -ECONNRESET; + req->req.actual = 0; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + if (req->dead) + do_free_req(ui, req); + req = req->next; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint(struct msm_endpoint *ept) +{ + flush_endpoint_hw(ept->ui, (1 << ept->bit)); + flush_endpoint_sw(ept); +} + +static void flush_all_endpoints(struct usb_info *ui) +{ + unsigned n; + + flush_endpoint_hw(ui, 0xffffffff); + + for (n = 0; n < 32; n++) + flush_endpoint_sw(ui->ept + n); +} + + +static irqreturn_t usb_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + unsigned n; + + n = readl(USB_USBSTS); + writel(n, USB_USBSTS); + + /* somehow we got an IRQ while in the reset sequence: ignore it */ + if (ui->running == 0) + return IRQ_HANDLED; + + if (n & STS_PCI) { + switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) { + case PORTSC_PSPD_FS: + INFO("usb: portchange USB_SPEED_FULL\n"); + ui->gadget.speed = USB_SPEED_FULL; + break; + case PORTSC_PSPD_LS: + INFO("usb: portchange USB_SPEED_LOW\n"); + ui->gadget.speed = USB_SPEED_LOW; + break; + case PORTSC_PSPD_HS: + INFO("usb: portchange USB_SPEED_HIGH\n"); + ui->gadget.speed = USB_SPEED_HIGH; + break; + } + } + + if (n & STS_URI) { + INFO("usb: reset\n"); + + writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT); + writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE); + writel(0xffffffff, USB_ENDPTFLUSH); + writel(0, USB_ENDPTCTRL(1)); + + if (ui->online != 0) { + /* marking us offline will cause ept queue attempts + ** to fail + */ + ui->online = 0; + + flush_all_endpoints(ui); + + /* XXX: we can't seem to detect going offline, + * XXX: so deconfigure on reset for the time being + */ + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + } + if (ui->connect_type != CONNECT_TYPE_USB) { + ui->connect_type = CONNECT_TYPE_USB; + queue_work(ui->usb_wq, &ui->notifier_work); + } + } + + if (n & STS_SLI) + INFO("usb: suspend\n"); + + if (n & STS_UI) { + n = readl(USB_ENDPTSETUPSTAT); + if (n & EPT_RX(0)) + handle_setup(ui); + + n = readl(USB_ENDPTCOMPLETE); + writel(n, USB_ENDPTCOMPLETE); + while (n) { + unsigned bit = __ffs(n); + handle_endpoint(ui, bit); + n = n & (~(1 << bit)); + } + } + return IRQ_HANDLED; +} + +int usb_get_connect_type(void) +{ + if (!the_usb_info) + return 0; + return the_usb_info->connect_type; +} +EXPORT_SYMBOL(usb_get_connect_type); + +void msm_hsusb_request_reset(void) +{ + struct usb_info *ui = the_usb_info; + unsigned long flags; + if (!ui) + return; + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_RESET; + queue_work(ui->usb_wq, &ui->work); + spin_unlock_irqrestore(&ui->lock, flags); +} + +static ssize_t show_usb_cable_connect(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned length; + if (!the_usb_info) + return 0; + length = sprintf(buf, "%d\n", + (the_usb_info->connect_type == CONNECT_TYPE_USB)?1:0); + return length; +} + +static DEVICE_ATTR(usb_cable_connect, 0444, show_usb_cable_connect, NULL); + +#if 0 +static ssize_t show_usb_function_switch(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return android_show_function(buf); +} + +static ssize_t store_usb_function_switch(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned u; + ssize_t ret; + + u = simple_strtoul(buf, NULL, 10); + ret = android_switch_function(u); + + if (ret == 0) + return count; + else + return 0; +} + +static DEVICE_ATTR(usb_function_switch, 0666, + show_usb_function_switch, store_usb_function_switch); +#endif + +static ssize_t show_usb_serial_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned length; + struct msm_hsusb_platform_data *pdata = dev->platform_data; + + length = sprintf(buf, "%s", pdata->serial_number); + return length; +} + +static ssize_t store_usb_serial_number(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct msm_hsusb_platform_data *pdata = dev->platform_data; + + if (buf[0] == '0' || buf[0] == '1') { + memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); + if (buf[0] == '0') { + strncpy(mfg_df_serialno, "000000000000", + strlen("000000000000")); + use_mfg_serialno = 1; + android_set_serialno(mfg_df_serialno); + } else { + strncpy(mfg_df_serialno, pdata->serial_number, + strlen(pdata->serial_number)); + use_mfg_serialno = 0; + android_set_serialno(pdata->serial_number); + } + /* reset_device */ + msm_hsusb_request_reset(); + } + + return count; +} + +static DEVICE_ATTR(usb_serial_number, 0644, + show_usb_serial_number, store_usb_serial_number); + +static ssize_t show_dummy_usb_serial_number(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned length; + struct msm_hsusb_platform_data *pdata = dev->platform_data; + + if (use_mfg_serialno) + length = sprintf(buf, "%s", mfg_df_serialno); /* dummy */ + else + length = sprintf(buf, "%s", pdata->serial_number); /* Real */ + return length; +} + +static ssize_t store_dummy_usb_serial_number(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int data_buff_size = (sizeof(mfg_df_serialno) > strlen(buf))? + strlen(buf):sizeof(mfg_df_serialno); + int loop_i; + + /* avoid overflow, mfg_df_serialno[16] always is 0x0 */ + if (data_buff_size == 16) + data_buff_size--; + + for (loop_i = 0; loop_i < data_buff_size; loop_i++) { + if (buf[loop_i] >= 0x30 && buf[loop_i] <= 0x39) /* 0-9 */ + continue; + else if (buf[loop_i] >= 0x41 && buf[loop_i] <= 0x5A) /* A-Z */ + continue; + if (buf[loop_i] == 0x0A) /* Line Feed */ + continue; + else { + printk(KERN_WARNING "%s(): get invaild char (0x%2.2X)\n", + __func__, buf[loop_i]); + return -EINVAL; + } + } + + use_mfg_serialno = 1; + memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); + strncpy(mfg_df_serialno, buf, data_buff_size); + android_set_serialno(mfg_df_serialno); + /*device_reset */ + msm_hsusb_request_reset(); + + return count; +} + +static DEVICE_ATTR(dummy_usb_serial_number, 0644, + show_dummy_usb_serial_number, store_dummy_usb_serial_number); + +static ssize_t show_USB_ID_status(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_info *ui = the_usb_info; + int value = 1; + unsigned length; + + if (!ui) + return 0; + if (ui->usb_id_pin_gpio != 0) { + value = gpio_get_value(ui->usb_id_pin_gpio); + printk(KERN_INFO "usb: id pin status %d\n", value); + } + length = sprintf(buf, "%d", value); + return length; +} + +static DEVICE_ATTR(USB_ID_status, 0444, + show_USB_ID_status, NULL); + +static ssize_t show_usb_car_kit_enable(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_info *ui = the_usb_info; + int value = 1; + unsigned length; + + if (!ui) + return 0; + if (ui->accessory_detect == 0) { + value = 0; + } + printk(KERN_INFO "usb: USB_car_kit_enable %d\n", ui->accessory_detect); + length = sprintf(buf, "%d", value); + return length; +} + +static DEVICE_ATTR(usb_car_kit_enable, 0444, + show_usb_car_kit_enable, NULL);/*for kar kit AP check if car kit enable*/ + +static ssize_t show_usb_phy_setting(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + unsigned length = 0; + int i; + + for (i = 0; i <= 0x14; i++) + length += sprintf(buf + length, "0x%x = 0x%x\n", i, ulpi_read(ui, i)); + + for (i = 0x30; i <= 0x37; i++) + length += sprintf(buf + length, "0x%x = 0x%x\n", i, ulpi_read(ui, i)); + + return length; +} + +static ssize_t store_usb_phy_setting(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + char *token[10]; + unsigned reg; + unsigned value; + int i; + + printk(KERN_INFO "%s\n", buf); + for (i = 0; i < 2; i++) + token[i] = strsep((char **)&buf, " "); + + reg = simple_strtoul(token[0], NULL, 16); + value = simple_strtoul(token[1], NULL, 16); + printk(KERN_INFO "Set 0x%x = 0x%x\n", reg, value); + + ulpi_write(ui, value, reg); + + return 0; +} + +static DEVICE_ATTR(usb_phy_setting, 0666, + show_usb_phy_setting, store_usb_phy_setting); + +#ifdef CONFIG_USB_ACCESSORY_DETECT +static ssize_t show_mfg_carkit_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned length; + struct usb_info *ui = the_usb_info; + + length = sprintf(buf, "%d", ui->mfg_usb_carkit_enable); + printk(KERN_INFO "%s: %d\n", __func__, + ui->mfg_usb_carkit_enable); + return length; + +} + +static ssize_t store_mfg_carkit_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + unsigned char uc; + + if (buf[0] != '0' && buf[0] != '1') { + printk(KERN_ERR "Can't enable/disable carkit\n"); + return -EINVAL; + } + uc = buf[0] - '0'; + printk(KERN_INFO "%s: %d\n", __func__, uc); + ui->mfg_usb_carkit_enable = uc; + if (uc == 1 && ui->accessory_type == 1 && + board_mfg_mode() == 1) { + switch_set_state(&dock_switch, DOCK_STATE_CAR); + printk(KERN_INFO "carkit: set state %d\n", DOCK_STATE_CAR); + } + return count; +} + +static DEVICE_ATTR(usb_mfg_carkit_enable, 0644, + show_mfg_carkit_enable, store_mfg_carkit_enable); +#endif + +#if defined (CONFIG_DOCK_ACCESSORY_DETECT) || defined(CONFIG_USB_ACCESSORY_DETECT) +static ssize_t dock_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + if (ui->accessory_type == 1) + return sprintf(buf, "online\n"); + else if (ui->accessory_type == 3) /*desk dock*/ + return sprintf(buf, "online\n"); + else + return sprintf(buf, "offline\n"); +} +static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, dock_status_show, NULL); +#endif + +static void usb_prepare(struct usb_info *ui) +{ + int ret; + spin_lock_init(&ui->lock); + + memset(ui->buf, 0, 4096); + ui->head = (void *) (ui->buf + 0); + + /* only important for reset/reinit */ + memset(ui->ept, 0, sizeof(ui->ept)); + ui->next_item = 0; + ui->next_ifc_num = 0; + + init_endpoints(ui); + + ui->ep0in.ep.maxpacket = 64; + ui->ep0out.ep.maxpacket = 64; + + ui->setup_req = + usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL); + + ui->usb_wq = create_singlethread_workqueue("msm_hsusb"); + if (ui->usb_wq == 0) { + printk(KERN_ERR "usb: fail to create workqueue\n"); + return; + } + INIT_WORK(&ui->work, usb_do_work); +#ifdef CONFIG_USB_ACCESSORY_DETECT + INIT_WORK(&ui->detect_work, accessory_detect_work); +#endif +#ifdef CONFIG_DOCK_ACCESSORY_DETECT + if (ui->dock_detect) { + INIT_WORK(&ui->dock_work, dock_detect_work); + dock_detect_init(ui); + } +#endif + + INIT_WORK(&ui->notifier_work, send_usb_connect_notify); + INIT_DELAYED_WORK(&ui->chg_work, check_charger); + + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_cable_connect); + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_cable_connect failed\n"); + +#if 0 + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_function_switch); + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_function_switch failed\n"); +#endif + + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_serial_number); + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_serial_number failed\n"); + + ret = device_create_file(&ui->pdev->dev, + &dev_attr_dummy_usb_serial_number); + if (ret != 0) + printk(KERN_WARNING "dev_attr_dummy_usb_serial_number failed\n"); + + ret = device_create_file(&ui->pdev->dev, + &dev_attr_USB_ID_status); + if (ret != 0) + printk(KERN_WARNING "dev_attr_USB_ID_status failed\n"); + + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_phy_setting); + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_phy_setting failed\n"); + +#ifdef CONFIG_USB_ACCESSORY_DETECT + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_mfg_carkit_enable); + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_mfg_carkit_enable failed\n"); +#endif + ret = device_create_file(&ui->pdev->dev, + &dev_attr_usb_car_kit_enable);/*for kar kit AP check if car kit enable*/ + if (ret != 0) + printk(KERN_WARNING "dev_attr_usb_car_kit_enable failed\n"); + +} + +static int usb_wakeup_phy(struct usb_info *ui) +{ + int i; + + /*writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);*/ + + /* some circuits automatically clear PHCD bit */ + for (i = 0; i < 5 && (readl(USB_PORTSC) & PORTSC_PHCD); i++) { + writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC); + mdelay(1); + } + + if ((readl(USB_PORTSC) & PORTSC_PHCD)) { + pr_err("%s: cannot clear phcd bit\n", __func__); + return -1; + } + + return 0; +} + +static void usb_suspend_phy(struct usb_info *ui) +{ + printk(KERN_INFO "%s\n", __func__); +#ifdef CONFIG_ARCH_MSM7X00A + /* disable unused interrupt */ + ulpi_write(ui, 0x01, 0x0d); + ulpi_write(ui, 0x01, 0x10); + + /* disable interface protect circuit to drop current consumption */ + ulpi_write(ui, (1 << 7), 0x08); + /* clear the SuspendM bit -> suspend the PHY */ + ulpi_write(ui, 1 << 6, 0x06); +#else +#ifdef CONFIG_ARCH_MSM7X30 + ulpi_write(ui, 0x0, 0x0D); + ulpi_write(ui, 0x0, 0x10); +#endif + /* clear VBusValid and SessionEnd rising interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x0f); + /* clear VBusValid and SessionEnd falling interrupts */ + ulpi_write(ui, (1 << 1) | (1 << 3), 0x12); + ulpi_read(ui, 0x14); /* clear PHY interrupt latch register*/ + + ulpi_write(ui, 0x08, 0x09);/* turn off PLL on integrated phy */ + + /* set phy to be in lpm */ + writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); + mdelay(1); + if (!(readl(USB_PORTSC) & PORTSC_PHCD)) + printk(KERN_INFO "%s: unable to set lpm\n", __func__); +#endif +} + +static void usb_reset(struct usb_info *ui) +{ + unsigned long flags; + printk(KERN_INFO "hsusb: reset controller\n"); + + spin_lock_irqsave(&ui->lock, flags); + ui->running = 0; + spin_unlock_irqrestore(&ui->lock, flags); + + /* disable usb interrupts */ + writel(0, USB_USBINTR); + + /* wait for a while after enable usb clk*/ + msleep(5); + + /* To prevent phantom packets being received by the usb core on + * some devices, put the controller into reset prior to + * resetting the phy. */ + writel(2, USB_USBCMD); + msleep(10); + +#if 0 + /* we should flush and shutdown cleanly if already running */ + writel(0xffffffff, USB_ENDPTFLUSH); + msleep(2); +#endif + + if (ui->phy_reset) + ui->phy_reset(); + + msleep(100); + + /* RESET */ + writel(2, USB_USBCMD); + msleep(10); + +#ifdef CONFIG_ARCH_MSM7X00A + /* INCR4 BURST mode */ + writel(0x01, USB_SBUSCFG); +#else + /* bursts of unspecified length. */ + writel(0, USB_AHBBURST); + /* Use the AHB transactor */ + writel(0, USB_AHBMODE); +#endif + + /* select DEVICE mode */ + writel(0x12, USB_USBMODE); + msleep(1); + + /* select ULPI phy */ + writel(0x80000000, USB_PORTSC); + + ulpi_init(ui); + + writel(ui->dma, USB_ENDPOINTLISTADDR); + + configure_endpoints(ui); + + /* marking us offline will cause ept queue attempts to fail */ + ui->online = 0; + + /* terminate any pending transactions */ + flush_all_endpoints(ui); + + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + /* enable interrupts */ + writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR); + + /* go to RUN mode (D+ pullup enable) */ + msm72k_pullup(&ui->gadget, 1); + + spin_lock_irqsave(&ui->lock, flags); + ui->running = 1; + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void usb_start(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_START; +/*if msm_hsusb_set_vbus_state set 1, but usb did not init, the ui =NULL, */ +/*it would cause reboot with usb, it did not swith to USB and ADB fail*/ +/*So when USB start, check again*/ + if (vbus) { + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + /* online->switch to USB, offline->switch to uart */ + if (ui->usb_uart_switch) + ui->usb_uart_switch(!vbus); + + queue_work(ui->usb_wq, &ui->work); + spin_unlock_irqrestore(&ui->lock, flags); +} + +static int usb_free(struct usb_info *ui, int ret) +{ + INFO("usb_free(%d)\n", ret); + + if (ui->irq) + free_irq(ui->irq, 0); + if (ui->pool) + dma_pool_destroy(ui->pool); + if (ui->dma) + dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma); + if (ui->addr) + iounmap(ui->addr); + if (ui->clk) + clk_put(ui->clk); + if (ui->pclk) + clk_put(ui->pclk); + if (ui->otgclk) + clk_put(ui->otgclk); + if (ui->coreclk) + clk_put(ui->coreclk); + if (ui->ebi1clk) + clk_put(ui->ebi1clk); + kfree(ui); + return ret; +} + +static void usb_do_work_check_vbus(struct usb_info *ui) +{ + unsigned long iflags; + + spin_lock_irqsave(&ui->lock, iflags); +#if defined(CONFIG_USB_BYPASS_VBUS_NOTIFY) + ui->flags |= USB_FLAG_VBUS_ONLINE; + pr_info("usb: fake vbus\n"); +#else + if (vbus) + ui->flags |= USB_FLAG_VBUS_ONLINE; + else + ui->flags |= USB_FLAG_VBUS_OFFLINE; +#endif + spin_unlock_irqrestore(&ui->lock, iflags); +} + +static void usb_lpm_enter(struct usb_info *ui) +{ + unsigned long iflags; + if (ui->in_lpm) + return; + printk(KERN_INFO "usb: lpm enter\n"); + spin_lock_irqsave(&ui->lock, iflags); + usb_suspend_phy(ui); + if (ui->otgclk) + clk_disable(ui->otgclk); + clk_disable(ui->pclk); + clk_disable(ui->clk); + if (ui->coreclk) + clk_disable(ui->coreclk); + clk_set_rate(ui->ebi1clk, 0); + ui->in_lpm = 1; + spin_unlock_irqrestore(&ui->lock, iflags); +} + +static void usb_lpm_exit(struct usb_info *ui) +{ + if (!ui->in_lpm) + return; + printk(KERN_INFO "usb: lpm exit\n"); + clk_set_rate(ui->ebi1clk, acpuclk_get_max_axi_rate()); + udelay(10); + if (ui->coreclk) + clk_enable(ui->coreclk); + clk_enable(ui->clk); + clk_enable(ui->pclk); + if (ui->otgclk) + clk_enable(ui->otgclk); + usb_wakeup_phy(ui); + ui->in_lpm = 0; +} + +#ifdef CONFIG_DOCK_ACCESSORY_DETECT +static irqreturn_t dock_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + disable_irq_nosync(ui->dockpin_irq); + queue_work(ui->usb_wq, &ui->dock_work); + return IRQ_HANDLED; +} +static void dock_detect_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, dock_work); + int value; + + value = gpio_get_value(ui->dock_pin_gpio); + + if (value == 0 && vbus) { + set_irq_type(ui->dockpin_irq, IRQF_TRIGGER_HIGH); + switch_set_state(&dock_switch, DOCK_STATE_DESK); + ui->accessory_type = 3; + printk(KERN_INFO "usb:dock: set state %d\n", DOCK_STATE_DESK); + } else { + set_irq_type(ui->dockpin_irq, IRQF_TRIGGER_LOW); + switch_set_state(&dock_switch, DOCK_STATE_UNDOCKED); + ui->accessory_type = 0; + printk(KERN_INFO "usb:dock: set state %d\n", DOCK_STATE_UNDOCKED); + } + enable_irq(ui->dockpin_irq); + + +} +static void dock_detect_init(struct usb_info *ui) +{ + int ret; + + if (ui->dock_pin_gpio == 0) + return; + if (ui->dockpin_irq == 0) + ui->dockpin_irq = gpio_to_irq(ui->dock_pin_gpio); + + ret = request_irq(ui->dockpin_irq, dock_interrupt, + IRQF_TRIGGER_LOW, + "dock_irq", ui); + if (ret < 0) { + printk(KERN_ERR "%s: request_irq failed\n", __func__); + return; + } + printk(KERN_INFO "%s: dock irq %d\n", __func__, + ui->dockpin_irq); + ret = set_irq_wake(ui->dockpin_irq, 1); + if (ret < 0) { + printk(KERN_ERR "%s: set_irq_wake failed\n", __func__); + goto err; + } + + if (switch_dev_register(&dock_switch) < 0) { + printk(KERN_ERR "usb: fail to register dock switch!\n"); + goto err; + } + + ret = device_create_file(dock_switch.dev, &dev_attr_status); + if (ret != 0) + printk(KERN_WARNING "dev_attr_status failed\n"); + + return; + +err: + free_irq(ui->dockpin_irq, 0); +} +#endif + + +#ifdef CONFIG_USB_ACCESSORY_DETECT +static void carkit_detect(struct usb_info *ui) +{ + unsigned n; + int value; + unsigned in_lpm; + + msleep(100); + value = gpio_get_value(ui->usb_id_pin_gpio); + printk(KERN_INFO "usb: usb ID pin = %d\n", value); + in_lpm = ui->in_lpm; + if (value == 0) { + if (in_lpm) + usb_lpm_exit(ui); + + n = readl(USB_OTGSC); + /* ID pull-up register */ + writel(n | OTGSC_IDPU, USB_OTGSC); + + msleep(100); + n = readl(USB_OTGSC); + + if (n & OTGSC_ID) { + printk(KERN_INFO "usb: carkit inserted\n"); + if ((board_mfg_mode() == 0) || (board_mfg_mode() == 1 && + ui->mfg_usb_carkit_enable == 1)) { + switch_set_state(&dock_switch, DOCK_STATE_CAR); + printk(KERN_INFO "carkit: set state %d\n", DOCK_STATE_CAR); + } + ui->accessory_type = 1; + } else + ui->accessory_type = 0; + if (in_lpm) + usb_lpm_enter(ui); + } else { + if (ui->accessory_type == 1) + printk(KERN_INFO "usb: carkit removed\n"); + switch_set_state(&dock_switch, DOCK_STATE_UNDOCKED); + printk(KERN_INFO "carkit: set state %d\n", DOCK_STATE_UNDOCKED); + ui->accessory_type = 0; + } +} + +#ifdef CONFIG_USB_ACCESSORY_DETECT_BY_ADC +static void accessory_detect_by_adc(struct usb_info *ui) +{ + int value; + msleep(100); + value = gpio_get_value(ui->usb_id_pin_gpio); + printk(KERN_INFO "usb: usb ID pin = %d\n", value); + if (value == 0) { + uint32_t adc_value = 0xffffffff; + htc_get_usb_accessory_adc_level(&adc_value); + printk(KERN_INFO "usb: accessory adc = 0x%x\n", adc_value); + if (adc_value >= 0x2112 && adc_value <= 0x3D53) { + printk(KERN_INFO "usb: headset inserted\n"); + ui->accessory_type = 2; + headset_ext_detect(USB_AUDIO_OUT); + } else if (adc_value >= 0x88A && adc_value <= 0x1E38) { + printk(KERN_INFO "usb: carkit inserted\n"); + ui->accessory_type = 1; + if ((board_mfg_mode() == 0) || (board_mfg_mode() == 1 && + ui->mfg_usb_carkit_enable == 1)) { + switch_set_state(&dock_switch, DOCK_STATE_CAR); + printk(KERN_INFO "carkit: set state %d\n", DOCK_STATE_CAR); + } + } else + ui->accessory_type = 0; + } else { + if (ui->accessory_type == 2) { + printk(KERN_INFO "usb: headset removed\n"); + headset_ext_detect(USB_NO_HEADSET); + } else if (ui->accessory_type == 1) { + printk(KERN_INFO "usb: carkit removed\n"); + switch_set_state(&dock_switch, DOCK_STATE_UNDOCKED); + } + ui->accessory_type = 0; + } + +} +#endif + +static void accessory_detect_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, detect_work); + int value; + + if (!ui->accessory_detect) + return; + + if (ui->accessory_detect == 1) + carkit_detect(ui); +#ifdef CONFIG_USB_ACCESSORY_DETECT_BY_ADC + else if (ui->accessory_detect == 2) + accessory_detect_by_adc(ui); +#endif + + value = gpio_get_value(ui->usb_id_pin_gpio); + if (value == 0) + set_irq_type(ui->idpin_irq, IRQF_TRIGGER_HIGH); + else + set_irq_type(ui->idpin_irq, IRQF_TRIGGER_LOW); + enable_irq(ui->idpin_irq); +} + +static irqreturn_t usbid_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + + disable_irq_nosync(ui->idpin_irq); + printk(KERN_INFO "usb: id interrupt\n"); + queue_work(ui->usb_wq, &ui->detect_work); + return IRQ_HANDLED; +} + +static void accessory_detect_init(struct usb_info *ui) +{ + int ret; + printk(KERN_INFO "%s: id pin %d\n", __func__, + ui->usb_id_pin_gpio); + + if (ui->usb_id_pin_gpio == 0) + return; + if (ui->idpin_irq == 0) + ui->idpin_irq = gpio_to_irq(ui->usb_id_pin_gpio); + + ret = request_irq(ui->idpin_irq, usbid_interrupt, + IRQF_TRIGGER_LOW, + "car_kit_irq", ui); + if (ret < 0) { + printk(KERN_ERR "%s: request_irq failed\n", __func__); + return; + } + + ret = set_irq_wake(ui->idpin_irq, 1); + if (ret < 0) { + printk(KERN_ERR "%s: set_irq_wake failed\n", __func__); + goto err; + } + + if (switch_dev_register(&dock_switch) < 0) { + printk(KERN_ERR "usb: fail to register dock switch!\n"); + goto err; + } + + ret = device_create_file(dock_switch.dev, &dev_attr_status); + if (ret != 0) + printk(KERN_WARNING "dev_attr_status failed\n"); + + return; +err: + free_irq(ui->idpin_irq, 0); +} + +#endif + +#define DELAY_FOR_CHECK_CHG msecs_to_jiffies(300) + +static void charger_detect_by_uart(struct usb_info *ui) +{ + int is_china_ac; + + /*UART*/ + if (ui->usb_uart_switch) + ui->usb_uart_switch(1); + + is_china_ac = ui->china_ac_detect(); + + if (is_china_ac) { + ui->connect_type = CONNECT_TYPE_AC; + queue_work(ui->usb_wq, &ui->notifier_work); + usb_lpm_enter(ui); + printk(KERN_INFO "usb: AC charger\n"); + } else { + ui->connect_type = CONNECT_TYPE_UNKNOWN; + queue_delayed_work(ui->usb_wq, &ui->chg_work, + DELAY_FOR_CHECK_CHG); + printk(KERN_INFO "usb: not AC charger\n"); + + /*set uart to gpo*/ + if (ui->serial_debug_gpios) + ui->serial_debug_gpios(0); + /*turn on USB HUB*/ + if (ui->usb_hub_enable) + ui->usb_hub_enable(1); + + /*USB*/ + if (ui->usb_uart_switch) + ui->usb_uart_switch(0); + + usb_lpm_exit(ui); + usb_reset(ui); + } +} + +static void charger_detect(struct usb_info *ui) +{ + if (!vbus) + return; + msleep(10); + /* detect shorted D+/D-, indicating AC power */ + if ((readl(USB_PORTSC) & PORTSC_LS) != PORTSC_LS) { + printk(KERN_INFO "usb: not AC charger\n"); + ui->connect_type = CONNECT_TYPE_UNKNOWN; + queue_delayed_work(ui->usb_wq, &ui->chg_work, + DELAY_FOR_CHECK_CHG); + } else { + printk(KERN_INFO "usb: AC charger\n"); + ui->connect_type = CONNECT_TYPE_AC; + queue_work(ui->usb_wq, &ui->notifier_work); + writel(0x00080000, USB_USBCMD); + msleep(10); + usb_lpm_enter(ui); + } +} + +static void check_charger(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, chg_work.work); + if (disable_charger) { + printk(KERN_INFO "usb: disable charger\n"); + if (ui->disable_usb_charger) + ui->disable_usb_charger(); + disable_charger = 0; + return; + } + /* unknown charger */ + if (vbus && ui->connect_type == CONNECT_TYPE_UNKNOWN) + queue_work(ui->usb_wq, &ui->notifier_work); +} + +static void usb_do_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, work); + unsigned long iflags; + unsigned flags, _vbus; + + + for (;;) { + spin_lock_irqsave(&ui->lock, iflags); + flags = ui->flags; + ui->flags = 0; + _vbus = vbus; + spin_unlock_irqrestore(&ui->lock, iflags); + + /* give up if we have nothing to do */ + if (flags == 0) + break; + switch (ui->state) { + case USB_STATE_IDLE: + if (flags & USB_FLAG_START) { + pr_info("hsusb: IDLE -> ONLINE\n"); + + if (ui->china_ac_detect) + charger_detect_by_uart(ui); + else { + usb_lpm_exit(ui); + usb_reset(ui); + + charger_detect(ui); + } + + ui->state = USB_STATE_ONLINE; +#ifdef CONFIG_USB_ACCESSORY_DETECT + if (ui->accessory_detect) + accessory_detect_init(ui); +#endif + usb_do_work_check_vbus(ui); + } + break; + case USB_STATE_ONLINE: + /* If at any point when we were online, we received + * the signal to go offline, we must honor it + */ + if (flags & USB_FLAG_VBUS_OFFLINE) { + pr_info("hsusb: ONLINE -> OFFLINE\n"); + + /* synchronize with irq context */ + spin_lock_irqsave(&ui->lock, iflags); + ui->running = 0; + ui->online = 0; + writel(0x00080000, USB_USBCMD); + spin_unlock_irqrestore(&ui->lock, iflags); + + if (ui->connect_type != CONNECT_TYPE_NONE) { + ui->connect_type = CONNECT_TYPE_NONE; + queue_work(ui->usb_wq, &ui->notifier_work); + } + if (ui->in_lpm) { + usb_lpm_exit(ui); + msleep(5); + } + + /* terminate any transactions, etc */ + flush_all_endpoints(ui); + + if (ui->driver) { + printk(KERN_INFO "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + if (ui->phy_reset) + ui->phy_reset(); + + /* power down phy, clock down usb */ + usb_lpm_enter(ui); + + ui->state = USB_STATE_OFFLINE; + usb_do_work_check_vbus(ui); + break; + } + if (flags & USB_FLAG_RESET) { + pr_info("hsusb: ONLINE -> RESET\n"); + if (ui->connect_type == CONNECT_TYPE_AC) { + pr_info("hsusb: RESET -> ONLINE\n"); + break; + } + spin_lock_irqsave(&ui->lock, iflags); + ui->online = 0; + msm72k_pullup(&ui->gadget, 0); + spin_unlock_irqrestore(&ui->lock, iflags); + usb_reset(ui); + pr_info("hsusb: RESET -> ONLINE\n"); + break; + } + break; + case USB_STATE_OFFLINE: + /* If we were signaled to go online and vbus is still + * present when we received the signal, go online. + */ + if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) { + pr_info("hsusb: OFFLINE -> ONLINE\n"); + + if (ui->china_ac_detect) + charger_detect_by_uart(ui); + else { + usb_lpm_exit(ui); + usb_reset(ui); + charger_detect(ui); + } + + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + } + break; + } + } +} + +/* FIXME - the callers of this function should use a gadget API instead. + * This is called from htc_battery.c and board-halibut.c + * WARNING - this can get called before this driver is initialized. + */ +void msm_hsusb_set_vbus_state(int online) +{ + unsigned long flags = 0; + struct usb_info *ui = the_usb_info; + printk(KERN_INFO "%s: %d\n", __func__, online); + + if (ui) + spin_lock_irqsave(&ui->lock, flags); + if (vbus != online) { + vbus = online; + if (ui) { + if (online) { + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + + if (online) { + /*USB*/ + if (ui->usb_uart_switch) + ui->usb_uart_switch(0); + } else { + /*turn off USB HUB*/ + if (ui->usb_hub_enable) + ui->usb_hub_enable(0); + + /*UART*/ + if (ui->usb_uart_switch) + ui->usb_uart_switch(1); + /*configure uart pin to alternate function*/ + if (ui->serial_debug_gpios) + ui->serial_debug_gpios(1); +#ifdef CONFIG_DOCK_ACCESSORY_DETECT + if (ui->accessory_type == 3) { + set_irq_type(ui->dockpin_irq, IRQF_TRIGGER_LOW); + switch_set_state(&dock_switch, DOCK_STATE_UNDOCKED); + ui->accessory_type = 0; + printk(KERN_INFO "usb:dock: vbus offline\n"); + enable_irq(ui->dockpin_irq); + } +#endif + } + + queue_work(ui->usb_wq, &ui->work); + } + } + if (ui) + spin_unlock_irqrestore(&ui->lock, flags); +} + +#if defined(CONFIG_DEBUG_FS) && 0 + +void usb_function_reenumerate(void) +{ + struct usb_info *ui = the_usb_info; + + /* disable and re-enable the D+ pullup */ + msm72k_pullup(&ui->gadget, false); + msleep(10); + msm72k_pullup(&ui->gadget, true); +} + +static char debug_buffer[PAGE_SIZE]; + +static ssize_t debug_read_status(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = debug_buffer; + unsigned long flags; + struct msm_endpoint *ept; + struct msm_request *req; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: setup=%08x prime=%08x stat=%08x done=%08x\n", + readl(USB_ENDPTSETUPSTAT), + readl(USB_ENDPTPRIME), + readl(USB_ENDPTSTAT), + readl(USB_ENDPTCOMPLETE)); + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n", + readl(USB_USBCMD), + readl(USB_USBSTS), + readl(USB_USBINTR), + readl(USB_PORTSC)); + + + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + + for (req = ept->req; req; req = req->next) + i += scnprintf(buf + i, PAGE_SIZE - i, + " req @%08x next=%08x info=%08x page0=%08x %c %c\n", + req->item_dma, req->item->next, + req->item->info, req->item->page0, + req->busy ? 'B' : ' ', + req->live ? 'L' : ' ' + ); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "phy failure count: %d\n", ui->phy_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static ssize_t debug_write_reset(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_RESET; + queue_work(ui->usb_wq, &ui->work); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static ssize_t debug_write_cycle(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + usb_function_reenumerate(); + return count; +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations debug_stat_ops = { + .open = debug_open, + .read = debug_read_status, +}; + +const struct file_operations debug_reset_ops = { + .open = debug_open, + .write = debug_write_reset, +}; + +const struct file_operations debug_cycle_ops = { + .open = debug_open, + .write = debug_write_cycle, +}; + +static void usb_debugfs_init(struct usb_info *ui) +{ + struct dentry *dent; + dent = debugfs_create_dir("usb", 0); + if (IS_ERR(dent)) + return; + + debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops); + debugfs_create_file("reset", 0220, dent, ui, &debug_reset_ops); + debugfs_create_file("cycle", 0220, dent, ui, &debug_cycle_ops); +} +#else +static void usb_debugfs_init(struct usb_info *ui) {} +#endif + +static int +msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned char ep_type = + desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + if (ep_type == USB_ENDPOINT_XFER_BULK) + _ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize); + else + _ep->maxpacket = le16_to_cpu(64); + config_ept(ept); + usb_ept_enable(ept, 1, ep_type); + return 0; +} + +static int msm72k_disable(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + usb_ept_enable(ept, 0, 0); + return 0; +} + +static struct usb_request * +msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) +{ + return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags); +} + +static void +msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_request *req = to_msm_request(_req); + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + unsigned long flags; + int dead = 0; + + spin_lock_irqsave(&ui->lock, flags); + /* defer freeing resources if request is still busy */ + if (req->busy) + dead = req->dead = 1; + spin_unlock_irqrestore(&ui->lock, flags); + + /* if req->dead, then we will clean up when the request finishes */ + if (!dead) + do_free_req(ui, req); +} + +static int +msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct usb_info *ui = ep->ui; + + if (ep == &ui->ep0in) { + struct msm_request *r = to_msm_request(req); + if (!req->length) + goto ep_queue_done; + else { + if (ui->ep0_dir == USB_DIR_OUT) { + ep = &ui->ep0out; + ep->ep.driver_data = ui->ep0in.ep.driver_data; + } + /* ep0_queue_ack_complete queue a receive for ACK before + ** calling req->complete + */ + r->gadget_complete = req->complete; + req->complete = ep0_queue_ack_complete; + } + } +ep_queue_done: + return usb_ept_queue_xfer(ep, req); +} + +static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct msm_request *req = to_msm_request(_req); + struct usb_info *ui = ep->ui; + + struct msm_request *cur, *prev; + unsigned long flags; + + if (!_ep || !_req) + return -EINVAL; + + spin_lock_irqsave(&ui->lock, flags); + cur = ep->req; + prev = NULL; + + while (cur != 0) { + if (cur == req) { + req->busy = 0; + req->live = 0; + req->req.status = -ECONNRESET; + req->req.actual = 0; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ep->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + if (req->dead) + do_free_req(ui, req); + /* remove from linked list */ + if (prev) + prev->next = cur->next; + else + ep->req = cur->next; + if (ep->last == cur) + ep->last = prev; + /* break from loop */ + cur = NULL; + } else { + prev = cur; + cur = cur->next; + } + } + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + unsigned int in = ept->flags & EPT_FLAG_IN; + unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (value) + n |= CTRL_TXS; + else { + n &= ~CTRL_TXS; + n |= CTRL_TXR; + } + } else { + if (value) + n |= CTRL_RXS; + else { + n &= ~CTRL_RXS; + n |= CTRL_RXR; + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_fifo_status(struct usb_ep *_ep) +{ + return -EOPNOTSUPP; +} + +static void +msm72k_fifo_flush(struct usb_ep *_ep) +{ + flush_endpoint(to_msm_endpoint(_ep)); +} + +static const struct usb_ep_ops msm72k_ep_ops = { + .enable = msm72k_enable, + .disable = msm72k_disable, + + .alloc_request = msm72k_alloc_request, + .free_request = msm72k_free_request, + + .queue = msm72k_queue, + .dequeue = msm72k_dequeue, + + .set_halt = msm72k_set_halt, + .fifo_status = msm72k_fifo_status, + .fifo_flush = msm72k_fifo_flush, +}; + +static int msm72k_get_frame(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + /* frame number is in bits 13:3 */ + return (readl(USB_FRINDEX) >> 3) & 0x000007FF; +} + +/* VBUS reporting logically comes from a transceiver */ +static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active) +{ + msm_hsusb_set_vbus_state(is_active); + return 0; +} + +/* drivers may have software control over D+ pullup */ +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + u32 cmd = (8 << 16); + + /* disable/enable D+ pullup */ + if (is_active) { + pr_info("msm_hsusb: enable pullup\n"); + writel(cmd | 1, USB_USBCMD); + } else { + pr_info("msm_hsusb: disable pullup\n"); + writel(cmd, USB_USBCMD); + +#ifndef CONFIG_ARCH_MSM7X00A + ulpi_write(ui, 0x48, 0x04); +#endif + } + + return 0; +} + +static int msm72k_wakeup(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + if (!ui->remote_wakeup) { + pr_err("%s: remote wakeup not supported\n", __func__); + return -ENOTSUPP; + } + + if (!ui->online) { + pr_err("%s: device is not configured\n", __func__); + return -ENODEV; + } + + spin_lock_irqsave(&ui->lock, flags); + if ((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) { + pr_info("%s: enabling force resume\n", __func__); + writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); + } + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops msm72k_ops = { + .get_frame = msm72k_get_frame, + .vbus_session = msm72k_udc_vbus_session, + .pullup = msm72k_pullup, + /* .wakeup = msm72k_wakeup, */ +}; + +static ssize_t usb_remote_wakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); + + return count; +} +static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup); + +static int msm72k_probe(struct platform_device *pdev) +{ + struct resource *res; + struct usb_info *ui; + int irq; + int ret; + + INFO("msm72k_probe\n"); + ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); + if (!ui) + return -ENOMEM; + + spin_lock_init(&ui->lock); + ui->pdev = pdev; + + if (pdev->dev.platform_data) { + struct msm_hsusb_platform_data *pdata = pdev->dev.platform_data; + ui->phy_reset = pdata->phy_reset; + ui->phy_init_seq = pdata->phy_init_seq; + ui->usb_connected = pdata->usb_connected; + ui->usb_uart_switch = pdata->usb_uart_switch; + ui->serial_debug_gpios = pdata->serial_debug_gpios; + ui->usb_hub_enable = pdata->usb_hub_enable; + ui->china_ac_detect = pdata->china_ac_detect; + ui->disable_usb_charger = pdata->disable_usb_charger; + + ui->accessory_detect = pdata->accessory_detect; + printk(KERN_INFO "usb: accessory detect %d\n", + ui->accessory_detect); + ui->usb_id_pin_gpio = pdata->usb_id_pin_gpio; + printk(KERN_INFO "usb: id_pin_gpio %d\n", + pdata->usb_id_pin_gpio); + + ui->dock_detect = pdata->dock_detect; + printk(KERN_INFO "usb: dock detect %d\n", + ui->dock_detect); + ui->dock_pin_gpio = pdata->dock_pin_gpio; + printk(KERN_INFO "usb: dock pin gpio %d\n", + ui->dock_pin_gpio); + + ui->idpin_irq = pdata->id_pin_irq; + if (pdata->config_usb_id_gpios) + ui->config_usb_id_gpios = pdata->config_usb_id_gpios; + } + + irq = platform_get_irq(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res || (irq < 0)) + return usb_free(ui, -ENODEV); + + ui->addr = ioremap(res->start, 4096); + if (!ui->addr) + return usb_free(ui, -ENOMEM); + + ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); + if (!ui->buf) + return usb_free(ui, -ENOMEM); + + ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0); + if (!ui->pool) + return usb_free(ui, -ENOMEM); + + INFO("msm72k_probe() io=%p, irq=%d, dma=%p(%x)\n", + ui->addr, irq, ui->buf, ui->dma); + +#ifdef CONFIG_ARCH_MSM7X30 + msm_hsusb_rpc_connect(); +#endif + ui->clk = clk_get(&pdev->dev, "usb_hs_clk"); + if (IS_ERR(ui->clk)) + return usb_free(ui, PTR_ERR(ui->clk)); + + ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk"); + if (IS_ERR(ui->pclk)) + return usb_free(ui, PTR_ERR(ui->pclk)); + + ui->otgclk = clk_get(&pdev->dev, "usb_otg_clk"); + if (IS_ERR(ui->otgclk)) + ui->otgclk = NULL; + + ui->coreclk = clk_get(&pdev->dev, "usb_hs_core_clk"); + if (IS_ERR(ui->coreclk)) + ui->coreclk = NULL; + + ui->ebi1clk = clk_get(NULL, "ebi1_clk"); + if (IS_ERR(ui->ebi1clk)) + return usb_free(ui, PTR_ERR(ui->ebi1clk)); + + /* clear interrupts before requesting irq */ + if (ui->coreclk) + clk_enable(ui->coreclk); + clk_enable(ui->clk); + clk_enable(ui->pclk); + if (ui->otgclk) + clk_enable(ui->otgclk); + writel(0, USB_USBINTR); + writel(0, USB_OTGSC); + if (ui->coreclk) + clk_disable(ui->coreclk); + if (ui->otgclk) + clk_disable(ui->otgclk); + clk_disable(ui->pclk); + clk_disable(ui->clk); + + ui->in_lpm = 1; + ret = request_irq(irq, usb_interrupt, 0, pdev->name, ui); + if (ret) + return usb_free(ui, ret); + enable_irq_wake(irq); + ui->irq = irq; + + ui->gadget.ops = &msm72k_ops; + ui->gadget.is_dualspeed = 1; + device_initialize(&ui->gadget.dev); + dev_set_name(&ui->gadget.dev, "gadget"); + ui->gadget.dev.parent = &pdev->dev; + ui->gadget.dev.dma_mask = pdev->dev.dma_mask; + + the_usb_info = ui; + + usb_debugfs_init(ui); + + usb_prepare(ui); + + /* initialize mfg serial number */ + + if (board_mfg_mode() == 1) { + use_mfg_serialno = 1; + wake_lock_init(&vbus_idle_wake_lock, WAKE_LOCK_IDLE, "usb_idle_lock"); + perf_lock_init(&usb_perf_lock, PERF_LOCK_HIGHEST, "usb"); + } else + use_mfg_serialno = 0; + strncpy(mfg_df_serialno, "000000000000", strlen("000000000000")); + + return 0; +} + +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) +{ + struct usb_info *ui = the_usb_info; + int retval, n; + + if (!driver + || driver->speed < USB_SPEED_FULL + || !bind + || !driver->disconnect + || !driver->setup) + return -EINVAL; + if (!ui) + return -ENODEV; + if (ui->driver) + return -EBUSY; + + /* first hook up the driver ... */ + ui->driver = driver; + ui->gadget.dev.driver = &driver->driver; + ui->gadget.name = driver_name; + INIT_LIST_HEAD(&ui->gadget.ep_list); + ui->gadget.ep0 = &ui->ep0in.ep; + INIT_LIST_HEAD(&ui->gadget.ep0->ep_list); + ui->gadget.speed = USB_SPEED_UNKNOWN; + + for (n = 1; n < 16; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + for (n = 17; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + + retval = device_add(&ui->gadget.dev); + if (retval) + goto fail; + + retval = bind(&ui->gadget); + if (retval) { + INFO("bind to driver %s --> error %d\n", + driver->driver.name, retval); + device_del(&ui->gadget.dev); + goto fail; + } + + /* create sysfs node for remote wakeup */ + retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup); + if (retval != 0) + INFO("failed to create sysfs entry: (wakeup) error: (%d)\n", + retval); + INFO("msm72k_udc: registered gadget driver '%s'\n", + driver->driver.name); + usb_start(ui); + + return 0; + +fail: + ui->driver = NULL; + ui->gadget.dev.driver = NULL; + return retval; +} +EXPORT_SYMBOL(usb_gadget_probe_driver); + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + struct usb_info *dev = the_usb_info; + + if (!dev) + return -ENODEV; + if (!driver || driver != dev->driver || !driver->unbind) + return -EINVAL; + + device_remove_file(&dev->gadget.dev, &dev_attr_wakeup); + driver->unbind(&dev->gadget); + dev->gadget.dev.driver = NULL; + dev->driver = NULL; + + device_del(&dev->gadget.dev); + + VDEBUG("unregistered gadget driver '%s'\n", driver->driver.name); + return 0; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + + +static struct platform_driver usb_driver = { + .probe = msm72k_probe, + .driver = { .name = "msm_hsusb", }, +}; + +static int __init init(void) +{ + return platform_driver_register(&usb_driver); +} +module_init(init); + +static void __exit cleanup(void) +{ + platform_driver_unregister(&usb_driver); +} +module_exit(cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Mike Lockwood, Brian Swetland"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index 1c610a0aeb654..8f836337c3055 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -262,8 +262,12 @@ static struct fsg_lun *fsg_lun_from_dev(struct device *dev) #define EP0_BUFSIZE 256 #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ -/* Number of buffers we will use. 2 is enough for double-buffering */ +/* Number of buffers for CBW, DATA and CSW */ +#ifdef CONFIG_USB_CSW_HACK +#define FSG_NUM_BUFFERS 4 +#else #define FSG_NUM_BUFFERS 2 +#endif /* Default size of buffer length. */ #define FSG_BUFLEN ((u32)16384) diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 74dcf49bd015a..44849262b85e5 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -776,8 +776,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) goto dead; } + /* Shared IRQ? */ masked_status = status & INTR_MASK; - if (!masked_status) { /* irq sharing? */ + if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) { spin_unlock(&ehci->lock); return IRQ_NONE; } @@ -872,6 +873,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) dead: ehci_reset(ehci); ehci_writel(ehci, 0, &ehci->regs->configured_flag); + usb_hc_died(hcd); /* generic layer kills/unlinks all urbs, then * uses ehci_stop to clean up the rest */ diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 8a515f0d59880..72ae77c7b7c08 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -106,6 +106,27 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) ehci->owned_ports = 0; } +static int ehci_port_change(struct ehci_hcd *ehci) +{ + int i = HCS_N_PORTS(ehci->hcs_params); + + /* First check if the controller indicates a change event */ + + if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) + return 1; + + /* + * Not all controllers appear to update this while going from D3 to D0, + * so check the individual port status registers as well + */ + + while (i--) + if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) + return 1; + + return 0; +} + static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, bool suspending, bool do_wakeup) { @@ -173,7 +194,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, } /* Does the root hub have a port wakeup pending? */ - if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD)) + if (!suspending && ehci_port_change(ehci)) usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); spin_unlock_irqrestore(&ehci->lock, flags); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 233c288e3f931..baf7362b0e444 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) int stopped; unsigned count = 0; u8 state; - const __le32 halt = HALT_BIT(ehci); struct ehci_qh_hw *hw = qh->hw; if (unlikely (list_empty (&qh->qtd_list))) @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) && !(qtd->hw_alt_next & EHCI_LIST_END(ehci))) { stopped = 1; - goto halt; } /* stop scanning when we reach qtds the hc is using */ @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) */ ehci_clear_tt_buffer(ehci, qh, urb, token); } - - /* force halt for unlinked or blocked qh, so we'll - * patch the qh later and so that completions can't - * activate it while we "know" it's stopped. - */ - if ((halt & hw->hw_token) == 0) { -halt: - hw->hw_token |= halt; - wmb (); - } } /* unless we already know the urb's status, collect qtd status @@ -1257,24 +1245,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void scan_async (struct ehci_hcd *ehci) { + bool stopped; struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); timer_action_done (ehci, TIMER_ASYNC_SHRINK); rescan: + stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ - if (!list_empty (&qh->qtd_list) - && qh->stamp != ehci->stamp) { + if (!list_empty(&qh->qtd_list) && (stopped || + qh->stamp != ehci->stamp)) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan - * qhs we already finished (no looping). + * qhs we already finished (no looping) + * unless the controller is stopped. */ qh = qh_get (qh); qh->stamp = ehci->stamp; @@ -1295,9 +1286,9 @@ static void scan_async (struct ehci_hcd *ehci) */ if (list_empty(&qh->qtd_list) && qh->qh_state == QH_STATE_LINKED) { - if (!ehci->reclaim - && ((ehci->stamp - qh->stamp) & 0x1fff) - >= (EHCI_SHRINK_FRAMES * 8)) + if (!ehci->reclaim && (stopped || + ((ehci->stamp - qh->stamp) & 0x1fff) + >= EHCI_SHRINK_FRAMES * 8)) start_unlink_async(ehci, qh); else action = TIMER_ASYNC_SHRINK; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index aa46f57f9ec8f..9dc7c19a8204e 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -471,8 +471,10 @@ static int enable_periodic (struct ehci_hcd *ehci) */ status = handshake_on_error_set_halt(ehci, &ehci->regs->status, STS_PSS, 0, 9 * 125); - if (status) + if (status) { + usb_hc_died(ehci_to_hcd(ehci)); return status; + } cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE; ehci_writel(ehci, cmd, &ehci->regs->command); @@ -510,8 +512,10 @@ static int disable_periodic (struct ehci_hcd *ehci) */ status = handshake_on_error_set_halt(ehci, &ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); - if (status) + if (status) { + usb_hc_died(ehci_to_hcd(ehci)); return status; + } cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE; ehci_writel(ehci, cmd, &ehci->regs->command); diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0da7fc05f4537..9e3ed9a670630 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -612,6 +612,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd) /* IRQ's are off, we do no DMA, perfectly ready to die ... */ hcd->state = HC_STATE_HALT; + usb_hc_died(hcd); ret = IRQ_HANDLED; goto done; } diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index bdba8c5d844aa..c470cc83dbb01 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -33,6 +33,7 @@ struct isp1760_hcd { struct inter_packet_info atl_ints[32]; struct inter_packet_info int_ints[32]; struct memory_chunk memory_pool[BLOCKS]; + u32 atl_queued; /* periodic schedule support */ #define DEFAULT_I_TDPS 1024 @@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, skip_map &= ~queue_entry; isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); @@ -992,6 +998,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) u32 dw3; status = 0; + priv->atl_queued--; queue_entry = __ffs(done_map); done_map &= ~(1 << queue_entry); @@ -1054,11 +1061,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) * device is not able to send data fast enough. * This happens mostly on slower hardware. */ - printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " - "%d of %zu done: %08x cur: %08x\n", qtd, - urb, qh, PTD_XFERRED_LENGTH(dw3), - qtd->length, done_map, - (1 << queue_entry)); /* RL counter = ERR counter */ dw3 &= ~(0xf << 19); @@ -1086,6 +1088,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, sizeof(ptd)); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(usb_hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; @@ -1191,6 +1198,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) skip_map = isp1760_readl(usb_hcd->regs + HC_ATL_PTD_SKIPMAP_REG); } + if (priv->atl_queued <= 1) + isp1760_writel(INTERRUPT_ENABLE_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); } static void do_intl_int(struct usb_hcd *usb_hcd) @@ -1770,7 +1780,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) goto leave; isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); - if (imask & HC_ATL_INT) + if (imask & (HC_ATL_INT | HC_SOT_INT)) do_atl_int(usb_hcd); if (imask & HC_INTL_INT) diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 6931ef5c96509..612bce5dce03c 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h @@ -69,6 +69,7 @@ void deinit_kmem_cache(void); #define HC_INTERRUPT_ENABLE 0x314 #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) +#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) #define HC_ISO_INT (1 << 9) #define HC_ATL_INT (1 << 8) diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 759a12ff8048b..46b884aa3fbae 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -773,6 +773,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) if (ints == ~(u32)0) { disable (ohci); ohci_dbg (ohci, "device removed!\n"); + usb_hc_died(hcd); return IRQ_HANDLED; } @@ -780,7 +781,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) ints &= ohci_readl(ohci, ®s->intrenable); /* interrupt for some other device? */ - if (ints == 0) + if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT)) return IRQ_NOTMINE; if (ints & OHCI_INTR_UE) { @@ -797,6 +798,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) } else { disable (ohci); ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n"); + usb_hc_died(hcd); } ohci_dump (ohci, 1); diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 36ee9a666e937..702f4c74dc97a 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -207,10 +207,18 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) */ static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) { + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct ohci_hcd *ohci = hcd_to_ohci(hcd); - ohci->flags |= OHCI_QUIRK_SHUTDOWN; - ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); + /* Evidently nVidia fixed their later hardware; this is a guess at + * the changeover point. + */ +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d + + if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { + ohci->flags |= OHCI_QUIRK_SHUTDOWN; + ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); + } return 0; } diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index e0cb12b573f91..6d6e2b38a98a1 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -1884,6 +1884,7 @@ static int enable_periodic(struct oxu_hcd *oxu) status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); if (status != 0) { oxu_to_hcd(oxu)->state = HC_STATE_HALT; + usb_hc_died(oxu_to_hcd(oxu)); return status; } @@ -1909,6 +1910,7 @@ static int disable_periodic(struct oxu_hcd *oxu) status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); if (status != 0) { oxu_to_hcd(oxu)->state = HC_STATE_HALT; + usb_hc_died(oxu_to_hcd(oxu)); return status; } @@ -2449,8 +2451,9 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) goto dead; } + /* Shared IRQ? */ status &= INTR_MASK; - if (!status) { /* irq sharing? */ + if (!status || unlikely(hcd->state == HC_STATE_HALT)) { spin_unlock(&oxu->lock); return IRQ_NONE; } @@ -2516,6 +2519,7 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) dead: ehci_reset(oxu); writel(0, &oxu->regs->configured_flag); + usb_hc_died(hcd); /* generic layer kills/unlinks all urbs, then * uses oxu_stop to clean up the rest */ diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index a9534396e85ba..175434ddc2168 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -207,14 +207,13 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, rings_cached = virt_dev->num_rings_cached; if (rings_cached < XHCI_MAX_RINGS_CACHED) { - virt_dev->num_rings_cached++; - rings_cached = virt_dev->num_rings_cached; virt_dev->ring_cache[rings_cached] = virt_dev->eps[ep_index].ring; + virt_dev->num_rings_cached++; xhci_dbg(xhci, "Cached old ring, " "%d ring%s cached\n", - rings_cached, - (rings_cached > 1) ? "s" : ""); + virt_dev->num_rings_cached, + (virt_dev->num_rings_cached > 1) ? "s" : ""); } else { xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); xhci_dbg(xhci, "Ring cache full (%d rings), " @@ -920,6 +919,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return 0; } +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + * + */ +static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; + if (interval != ep->desc.bInterval - 1) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval); + + return interval; +} + +/* + * Convert bInterval expressed in frames (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = fls(8 * ep->desc.bInterval) - 1; + interval = clamp_val(interval, 3, 10); + if ((1 << interval) != 8 * ep->desc.bInterval) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8 * ep->desc.bInterval); + + return interval; +} + /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field @@ -937,45 +977,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || - usb_endpoint_xfer_bulk(&ep->desc)) + usb_endpoint_xfer_bulk(&ep->desc)) { interval = ep->desc.bInterval; + break; + } /* Fall through - SS and HS isoc/int have same decoding */ + case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - if (ep->desc.bInterval == 0) - interval = 0; - else - interval = ep->desc.bInterval - 1; - if (interval > 15) - interval = 15; - if (interval != ep->desc.bInterval + 1) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); } break; - /* Convert bInterval (in 1-255 frames) to microframes and round down to - * nearest power of 2. - */ + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + break; + } + /* + * Fall through for interrupt endpoint interval decoding + * since it uses the same rules as low speed interrupt + * endpoints. + */ + case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - interval = fls(8*ep->desc.bInterval) - 1; - if (interval > 10) - interval = 10; - if (interval < 3) - interval = 3; - if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, - "ep %#x - rounding interval" - " to %d microframes, " - "ep desc says %d microframes\n", - ep->desc.bEndpointAddress, - 1 << interval, - 8*ep->desc.bInterval); + usb_endpoint_xfer_isoc(&ep->desc)) { + + interval = xhci_parse_frame_interval(udev, ep); } break; + default: BUG(); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3289bf4832c9a..2d7fa995ea39b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -500,15 +500,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + /* + * If there is only one segment in a ring, find_trb_seg()'s while loop + * will not run, and it will return before it has a chance to see if it + * needs to toggle the cycle bit. It can't tell if the stalled transfer + * ended just before the link TRB on a one-segment ring, or if the TD + * wrapped around the top of the ring, because it doesn't have the TD in + * question. Look for the one-segment case where stalled TRB's address + * is greater than the new dequeue pointer address. + */ + if (ep_ring->first_seg == ep_ring->first_seg->next && + state->new_deq_ptr < dev->eps[ep_index].stopped_trb) + state->new_cycle_state ^= 0x1; + xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); + /* Don't update the ring cycle state for the producer (us). */ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", (unsigned long long) addr); - xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); - ep_ring->dequeue = state->new_deq_ptr; - ep_ring->deq_seg = state->new_deq_seg; } static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, @@ -951,9 +962,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, } else { xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", ep_ctx->deq); + if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr) == + (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { + /* Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; + ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; + } else { + xhci_warn(xhci, "Mismatch between completed Set TR Deq " + "Ptr command & xHCI internal state.\n"); + xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", + dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr); + } } dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; + dev->eps[ep_index].queued_deq_seg = NULL; + dev->eps[ep_index].queued_deq_ptr = NULL; /* Restart any rings with pending URBs */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1504,6 +1532,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, else *status = 0; break; + case COMP_STOP_INVAL: + case COMP_STOP: + return finish_td(xhci, td, event_trb, event, ep, status, false); default: if (!xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) @@ -1548,15 +1579,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, } } else { /* Maybe the event was for the data stage? */ - if (trb_comp_code != COMP_STOP_INVAL) { - /* We didn't stop on a link TRB in the middle */ - td->urb->actual_length = - td->urb->transfer_buffer_length - - TRB_LEN(event->transfer_len); - xhci_dbg(xhci, "Waiting for status " - "stage event\n"); - return 0; - } + td->urb->actual_length = + td->urb->transfer_buffer_length - + TRB_LEN(le32_to_cpu(event->transfer_len)); + xhci_dbg(xhci, "Waiting for status " + "stage event\n"); + return 0; } } @@ -3229,6 +3257,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); u32 type = TRB_TYPE(TRB_SET_DEQ); + struct xhci_virt_ep *ep; addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); if (addr == 0) { @@ -3237,6 +3266,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, deq_seg, deq_ptr); return 0; } + ep = &xhci->devs[slot_id]->eps[ep_index]; + if ((ep->ep_state & SET_DEQ_PENDING)) { + xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); + xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); + return 0; + } + ep->queued_deq_seg = deq_seg; + ep->queued_deq_ptr = deq_ptr; return queue_command(xhci, lower_32_bits(addr) | cycle_state, upper_32_bits(addr), trb_stream_id, trb_slot_id | trb_ep_index | type, false); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2083fc2179b2a..c39f12fe95898 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1636,8 +1636,17 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_dbg_ctx(xhci, virt_dev->out_ctx, LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); + /* Free any rings that were dropped, but not changed. */ + for (i = 1; i < 31; ++i) { + if ((ctrl_ctx->drop_flags & (1 << (i + 1))) && + !(ctrl_ctx->add_flags & (1 << (i + 1)))) + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + } xhci_zero_in_ctx(xhci, virt_dev); - /* Install new rings and free or cache any old rings */ + /* + * Install any rings for completely new endpoints or changed endpoints, + * and free or cache any old rings from changed endpoints. + */ for (i = 1; i < 31; ++i) { if (!virt_dev->eps[i].new_ring) continue; @@ -2335,10 +2344,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].ring) - continue; - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - last_freed_endpoint = i; + struct xhci_virt_ep *ep = &virt_dev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } + + if (ep->ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } } xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f127df6dd553..19040c54be0dc 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -232,7 +232,7 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << x) +#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ @@ -601,11 +601,11 @@ struct xhci_ep_ctx { #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 /* Mult - Max number of burtst within an interval, in EP companion desc. */ -#define EP_MULT(p) ((p & 0x3) << 8) +#define EP_MULT(p) (((p) & 0x3) << 8) /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) ((p & 0xff) << 16) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) #define EP_MAXPSTREAMS_MASK (0x1f << 10) #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) @@ -644,6 +644,9 @@ struct xhci_ep_ctx { #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) +/* deq bitmasks */ +#define EP_CTX_CYCLE_MASK (1 << 0) + /** * struct xhci_input_control_context @@ -746,6 +749,12 @@ struct xhci_virt_ep { struct timer_list stop_cmd_timer; int stop_cmds_pending; struct xhci_hcd *xhci; + /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue + * command. We'll need to update the ring's dequeue segment and dequeue + * pointer after the command completes. + */ + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; /* * Sometimes the xHC can not process isochronous endpoint ring quickly * enough, and it will miss some isoc tds on the ring and generate diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index f7a2057380321..8b1d94a769140 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p spin_lock_irqsave(&priv->asynclock, flags); list_add_tail(&rq->asynclist, &priv->asynclist); spin_unlock_irqrestore(&priv->asynclock, flags); + kref_get(&rq->ref_count); ret = usb_submit_urb(rq->urb, mem_flags); - if (!ret) { - kref_get(&rq->ref_count); + if (!ret) return rq; - } - kref_put(&rq->ref_count, destroy_async); + destroy_async(&rq->ref_count); err("submit_async_request submit_urb failed with %d", ret); return NULL; } diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 4cbb7e4b368d2..74073b363c30f 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -14,7 +14,7 @@ config USB_MUSB_HDRC select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS - tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' + bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' help Say Y here if your system has a dual role high speed USB controller based on the Mentor Graphics silicon IP. Then @@ -30,8 +30,8 @@ config USB_MUSB_HDRC If you do not know what this is, please say N. - To compile this driver as a module, choose M here; the - module will be called "musb-hdrc". +# To compile this driver as a module, choose M here; the +# module will be called "musb-hdrc". choice prompt "Platform Glue Layer" diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 9d49d1cd7ce23..52312e8af213a 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -322,7 +322,7 @@ static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); } -static int bfin_musb_get_vbus_status(struct musb *musb) +static int bfin_musb_vbus_status(struct musb *musb) { return 0; } @@ -540,7 +540,7 @@ static struct dev_pm_ops bfin_pm_ops = { .resume = bfin_resume, }; -#define DEV_PM_OPS &bfin_pm_op, +#define DEV_PM_OPS &bfin_pm_ops #else #define DEV_PM_OPS NULL #endif @@ -548,7 +548,7 @@ static struct dev_pm_ops bfin_pm_ops = { static struct platform_driver bfin_driver = { .remove = __exit_p(bfin_remove), .driver = { - .name = "musb-bfin", + .name = "musb-blackfin", .pm = DEV_PM_OPS, }, }; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 7b8815ddf3688..14ac87ee9251b 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -75,6 +75,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x1a86, 0x5523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 735ea03157aba..66767ce0d7bef 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -113,6 +113,10 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index f349a3629d00c..3c2d9d685c7b1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, @@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, + { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), @@ -563,6 +566,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, /* * ELV devices: */ @@ -785,6 +789,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 117e8e6f93c68..f8e3f0dc34e83 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -300,6 +300,8 @@ * Hameg HO820 and HO870 interface (using VID 0x0403) */ #define HAMEG_HO820_PID 0xed74 +#define HAMEG_HO730_PID 0xed73 +#define HAMEG_HO720_PID 0xed72 #define HAMEG_HO870_PID 0xed71 /* @@ -489,6 +491,11 @@ /* www.canusb.com Lawicel CANUSB device (FTDI_VID) */ #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */ +/* + * TavIR AVR product ids (FTDI_VID) + */ +#define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */ + /********************************/ @@ -572,6 +579,7 @@ /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ +#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* @@ -1140,3 +1148,12 @@ #define QIHARDWARE_VID 0x20B7 #define MILKYMISTONE_JTAGSERIAL_PID 0x0713 +/* + * CTI GmbH RS485 Converter http://www.cti-lean.com/ + */ +/* USB-485-Mini*/ +#define FTDI_CTI_MINI_PID 0xF608 +/* USB-Nano-485*/ +#define FTDI_CTI_NANO_PID 0xF60B + + diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 26710b1899185..456447e03383c 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -1,7 +1,7 @@ /* * Garmin GPS driver * - * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net + * Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de * * The latest version of the driver can be found at * http://sourceforge.net/projects/garmin-gps/ @@ -51,7 +51,7 @@ static int debug; */ #define VERSION_MAJOR 0 -#define VERSION_MINOR 33 +#define VERSION_MINOR 36 #define _STR(s) #s #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b) @@ -410,6 +410,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id) */ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count) { + unsigned long flags; const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET; __le32 *usbdata = (__le32 *) garmin_data_p->inbuffer; @@ -458,7 +459,9 @@ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count) /* if this was an abort-transfer command, flush all queued data. */ if (isAbortTrfCmnd(garmin_data_p->inbuffer)) { + spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= FLAGS_DROP_DATA; + spin_unlock_irqrestore(&garmin_data_p->lock, flags); pkt_clear(garmin_data_p); } @@ -943,7 +946,7 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->mode = initial_mode; garmin_data_p->count = 0; - garmin_data_p->flags = 0; + garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN; spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ @@ -1178,7 +1181,8 @@ static int garmin_write_room(struct tty_struct *tty) static void garmin_read_process(struct garmin_data *garmin_data_p, - unsigned char *data, unsigned data_length) + unsigned char *data, unsigned data_length, + int bulk_data) { unsigned long flags; @@ -1193,7 +1197,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p, send it directly to the tty port */ if (garmin_data_p->flags & FLAGS_QUEUING) { pkt_add(garmin_data_p, data, data_length); - } else if (getLayerId(data) == GARMIN_LAYERID_APPL) { + } else if (bulk_data || + getLayerId(data) == GARMIN_LAYERID_APPL) { spin_lock_irqsave(&garmin_data_p->lock, flags); garmin_data_p->flags |= APP_RESP_SEEN; @@ -1237,7 +1242,7 @@ static void garmin_read_bulk_callback(struct urb *urb) usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); - garmin_read_process(garmin_data_p, data, urb->actual_length); + garmin_read_process(garmin_data_p, data, urb->actual_length, 1); if (urb->actual_length == 0 && 0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) { @@ -1346,7 +1351,7 @@ static void garmin_read_int_callback(struct urb *urb) __func__, garmin_data_p->serial_num); } - garmin_read_process(garmin_data_p, data, urb->actual_length); + garmin_read_process(garmin_data_p, data, urb->actual_length, 0); port->interrupt_in_urb->dev = port->serial->dev; retval = usb_submit_urb(urb, GFP_ATOMIC); @@ -1461,6 +1466,7 @@ static int garmin_attach(struct usb_serial *serial) garmin_data_p->timer.function = timeout_handler; garmin_data_p->port = port; garmin_data_p->state = 0; + garmin_data_p->flags = 0; garmin_data_p->count = 0; usb_set_serial_port_data(port, garmin_data_p); diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index bd5bd8589e04c..b382d9a0274d5 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) } tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { + if (tty && urb->actual_length) { /* BEGIN DEBUG */ /* diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c index 653465f61d4a9..e2bfecc464024 100644 --- a/drivers/usb/serial/moto_modem.c +++ b/drivers/usb/serial/moto_modem.c @@ -25,6 +25,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ + { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ { USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */ { }, }; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5f46838dfee5d..318dd00040a3f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -149,6 +149,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_ETS1220 0x1803 +#define HUAWEI_PRODUCT_E353 0x1506 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -407,6 +408,10 @@ static void option_instat_callback(struct urb *urb); /* ONDA MT825UP HSDPA 14.2 modem */ #define ONDA_MT825UP 0x000b +/* Samsung products */ +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_GT_B3730 0x6889 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -528,6 +533,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -652,7 +658,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, + 0xff, 0xff), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, @@ -967,6 +974,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -1103,6 +1111,12 @@ static int option_probe(struct usb_serial *serial, serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) return -ENODEV; + /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ + if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID && + serial->dev->descriptor.idProduct == SAMSUNG_PRODUCT_GT_B3730 && + serial->interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA) + return -ENODEV; + data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 8858201eb1d39..54a9dab1f33b4 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) ifnum = intf->desc.bInterfaceNumber; dbg("This Interface = %d", ifnum); - data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), + data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; @@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { dbg("QDL port found"); - if (serial->interface->num_altsetting == 1) - return 0; + if (serial->interface->num_altsetting == 1) { + retval = 0; /* Success */ + break; + } retval = usb_set_interface(serial->dev, ifnum, 1); if (retval < 0) { @@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } break; @@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } else if (ifnum == 2) { dbg("Modem port found"); @@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - return retval; } else if (ifnum==3) { /* * NMEA (serial line 9600 8N1) @@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) "Could not set interface, error %d\n", retval); retval = -ENODEV; + kfree(data); } } break; @@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) dev_err(&serial->dev->dev, "unknown number of interfaces: %d\n", nintf); kfree(data); - return -ENODEV; + retval = -ENODEV; } + /* Set serial->private if not returning -ENODEV */ + if (retval != -ENODEV) + usb_set_serial_data(serial, data); return retval; } +static void qc_release(struct usb_serial *serial) +{ + struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); + + dbg("%s", __func__); + + /* Call usb_wwan release & free the private data allocated in qcprobe */ + usb_wwan_release(serial); + usb_set_serial_data(serial, NULL); + kfree(priv); +} + static struct usb_serial_driver qcdevice = { .driver = { .owner = THIS_MODULE, @@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = { .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, - .release = usb_wwan_release, + .release = qc_release, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 546a52179becb..2ff90a9c8f474 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -911,9 +911,8 @@ int usb_serial_probe(struct usb_interface *interface, dev_err(&interface->dev, "No free urbs available\n"); goto probe_error; } - buffer_size = serial->type->bulk_in_size; - if (!buffer_size) - buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); + buffer_size = max_t(int, serial->type->bulk_in_size, + le16_to_cpu(endpoint->wMaxPacketSize)); port->bulk_in_size = buffer_size; port->bulk_in_endpointAddress = endpoint->bEndpointAddress; port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6bafb51bb4373..3f402e4587b00 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -26,6 +26,10 @@ source "drivers/gpu/drm/Kconfig" source "drivers/gpu/stub/Kconfig" +source "drivers/gpu/ion/Kconfig" + +source "drivers/gpu/msm/Kconfig" + config VGASTATE tristate default n @@ -2323,13 +2327,6 @@ config FB_PRE_INIT_FB Select this option if display contents should be inherited as set by the bootloader. -config FB_MSM - tristate "MSM Framebuffer support" - depends on FB && ARCH_MSM - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - config FB_MX3 tristate "MX3 Framebuffer support" depends on FB && MX3_IPU @@ -2365,6 +2362,8 @@ config FB_JZ4740 help Framebuffer support for the JZ4740 SoC. +source "drivers/video/msm/Kconfig" + source "drivers/video/omap/Kconfig" source "drivers/video/omap2/Kconfig" diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c index 0056a41e5c35c..15e8e1a89c45d 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/console/tileblit.c @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_tilecursor cursor; - int use_sw = (vc->vc_cursor_type & 0x01); + int use_sw = (vc->vc_cursor_type & 0x10); cursor.sx = vc->vc_x; cursor.sy = vc->vc_y; diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig new file mode 100644 index 0000000000000..defabe5a65591 --- /dev/null +++ b/drivers/video/msm/Kconfig @@ -0,0 +1,43 @@ +config FB_MSM + tristate "MSM Framebuffer" + depends on FB && ARCH_MSM + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + default y + +config FB_MSM_LEGACY_MDP + bool "MSM Legacy MDP (qsd8k)" + depends on FB_MSM && (MSM_MDP31 || MSM_MDP22) + default y + +config FB_MSM_MDP_PPP + bool "MSM MDP PPP" + depends on FB_MSM_LEGACY_MDP + default y + +config FB_MSM_LCDC + bool "Support for integrated LCD controller in MDP3/4" + depends on FB_MSM && (MSM_MDP31 || MSM_MDP40) + default y + +config FB_MSM_MDDI + bool "Support for MSM MDDI controllers" + depends on FB_MSM + default y + +config FB_MSM_MDDI_EPSON + bool "Support for Epson MDDI panels" + depends on FB_MSM_MDDI + default n + +config FB_MSM_MDDI_NOVTEC + bool "Support for Novtec MDDI panels" + depends on FB_MSM_MDDI + default n + +config MSM_HDMI + bool "Support for HDMI in QCT platform" + depends on MSM_MDP31 + default n + diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile index 802d6ae523fb3..696960dbadd82 100644 --- a/drivers/video/msm/Makefile +++ b/drivers/video/msm/Makefile @@ -2,18 +2,34 @@ # core framebuffer # obj-y := msm_fb.o +ifeq ($(CONFIG_FB_MSM_LOGO),y) +obj-y += logo.o +endif # MDP DMA/PPP engine # -obj-y += mdp.o mdp_scale_tables.o mdp_ppp.o +obj-y += mdp.o + +obj-$(CONFIG_MSM_MDP40) += mdp_hw40.o + +obj-$(CONFIG_FB_MSM_LEGACY_MDP) += mdp_hw_legacy.o + +obj-$(CONFIG_FB_MSM_MDP_PPP) += mdp_ppp.o +obj-$(CONFIG_MSM_MDP22) += mdp_ppp22.o +obj-$(CONFIG_MSM_MDP31) += mdp_ppp31.o # MDDI interface # -obj-y += mddi.o +obj-$(CONFIG_FB_MSM_MDDI) += mddi.o # MDDI client/panel drivers # -obj-y += mddi_client_dummy.o -obj-y += mddi_client_toshiba.o -obj-y += mddi_client_nt35399.o +obj-$(CONFIG_FB_MSM_MDDI) += mddi_client_simple.o +obj-$(CONFIG_FB_MSM_MDDI) += mddi_client_toshiba.o +obj-$(CONFIG_FB_MSM_MDDI_NOVTEC) += mddi_client_novb9f6_5582.o +obj-$(CONFIG_FB_MSM_MDDI_EPSON) += mddi_client_epson.o + +# MDP LCD controller driver +obj-$(CONFIG_FB_MSM_LCDC) += mdp_lcdc.o +obj-$(CONFIG_MSM_HDMI) += hdmi/ diff --git a/drivers/video/msm/hdmi/Makefile b/drivers/video/msm/hdmi/Makefile new file mode 100644 index 0000000000000..a6734d5dbb19f --- /dev/null +++ b/drivers/video/msm/hdmi/Makefile @@ -0,0 +1,9 @@ +msm_hdmi-objs = \ + transmitter.o \ + hdmi_lcdc.o \ + fb-hdmi.o \ + edid.o + +obj-$(CONFIG_MSM_HDMI) += msm_hdmi.o + +obj-$(CONFIG_MSM_HDMI) += silicon-image/ diff --git a/drivers/video/msm/hdmi/edid.c b/drivers/video/msm/hdmi/edid.c new file mode 100644 index 0000000000000..06ee29af56d6b --- /dev/null +++ b/drivers/video/msm/hdmi/edid.c @@ -0,0 +1,740 @@ +/* + * Copyright (C) 2009 HTC + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * Common function for accessing/debugging EDID data. + * Reference: + http://en.wikipedia.org/wiki/Extended_display_identification_data +*/ + +#include +#include +#include + +#include "include/fb-hdmi.h" +#include "include/sil902x.h" + +#if 1 +#define EDID_DBG(s...) printk("[hdmi/edid]" s) +#else +#define EDID_DBG(s...) do {} while (0) +#endif + +static struct video_mode established_timing_db[] = { + {800, 600, 60, ASPECT(4, 3), PROGRESSIVE, false, "800x600 @ 60 Hz"}, + {800, 600, 56, ASPECT(4, 3), PROGRESSIVE, false, "800x600 @ 56 Hz"}, + {640, 480, 75, ASPECT(4, 3), PROGRESSIVE, false, "800x600 @ 75 Hz"}, + {640, 480, 72, ASPECT(4, 3), PROGRESSIVE, false, "640x480 @ 72 Hz"}, + {640, 480, 67, ASPECT(4, 3), PROGRESSIVE, false, "640x480 @ 67 Hz"}, + {640, 480, 60, ASPECT(4, 3), PROGRESSIVE, false, "640x480 @ 60 Hz"}, + {720, 400, 88, ASPECT(4, 3), PROGRESSIVE, false, "720x400 @ 88 Hz"}, + {720, 400, 70, ASPECT(4, 3), PROGRESSIVE, false, "720x400 @ 70 Hz"}, + + {1280, 1024, 75, ASPECT(4, 3), PROGRESSIVE, false, "1280x1024@75 Hz"}, + {1024, 768, 75, ASPECT(4, 3), PROGRESSIVE, false, "1024x768@75 Hz"}, + {1024, 768, 70, ASPECT(4, 3), PROGRESSIVE, false, "1024x768@70 Hz"}, + {1024, 768, 60, ASPECT(4, 3), PROGRESSIVE, false, "1024x768@60 Hz"}, + {1024, 768, 87, ASPECT(4, 3), INTERLACE, false, "1024x768@87 Hz (Interlaced)"}, + {832, 624, 75, ASPECT(4, 3), PROGRESSIVE, false, "832x624@75 Hz"}, + {800, 600, 75, ASPECT(4, 3), PROGRESSIVE, false, "800x600@75 Hz"}, + {800, 600, 72, ASPECT(4, 3), PROGRESSIVE, false, "800x600@72 Hz"}, + + {1152, 870, 75, ASPECT(4, 3), PROGRESSIVE, false, "1152x870 @ 75 Hz"}, +}; + +static struct video_mode standard_timing_db[8]; + +static struct video_mode additional_timing_db[] = { + {640, 480, 60, ASPECT(4, 3), PROGRESSIVE, false, + " 1 DMT0659 4:3 640x480p @ 59.94/60Hz"}, + {720, 480, 60, ASPECT(4, 3), PROGRESSIVE, false, + " 2 480p 4:3 720x480p @ 59.94/60Hz"}, + {720, 480, 60, ASPECT(16, 9), PROGRESSIVE, false, + " 3 480pH 16:9 720x480p @ 59.94/60Hz"}, + {1280, 720, 60, ASPECT(16, 9), PROGRESSIVE, false, + " 4 720p 16:9 1280x720p @ 59.94/60Hz"}, + {1920, 1080, 60, ASPECT(4, 3), INTERLACE, false, + " 5 1080i 16:9 1920x1080i @ 59.94/60Hz"}, + {720, 480, 60, ASPECT(4, 3), INTERLACE, false, + " 6 480i 4:3 720(1440)x480i @ 59.94/60Hz"}, + {720, 480, 60, ASPECT(16, 9), INTERLACE, false, + " 7 480iH 16:9 720(1440)x480i @ 59.94/60Hz"}, + {720, 240, 60, ASPECT(4, 3), PROGRESSIVE, false, + " 8 240p 4:3 720(1440)x240p @ 59.94/60Hz"}, + {720, 480, 60, ASPECT(16, 9), PROGRESSIVE, false, + " 9 240pH 16:9 720(1440)x240p @ 59.94/60Hz"}, + {2880, 480, 60, ASPECT(4, 3), INTERLACE, false, + "10 480i4x 4:3 (2880)x480i @ 59.94/60Hz"}, + {2880, 480, 60, ASPECT(16, 9), INTERLACE, false, + "11 480i4xH 16:9 (2880)x480i @ 59.94/60Hz"}, + {2880, 240, 60, ASPECT(4, 3), PROGRESSIVE, false, + "12 240p4x 4:3 (2880)x240p @ 59.94/60Hz"}, + {2880, 240, 60, ASPECT(16, 9), PROGRESSIVE, false, + "13 240p4xH 16:9 (2880)x240p @ 59.94/60Hz"}, + {1440, 480, 60, ASPECT(4, 3), PROGRESSIVE, false, + "14 480p2x 4:3 1440x480p @ 59.94/60Hz"}, + {1440, 480, 60, ASPECT(16, 9), PROGRESSIVE, false, + "15 480p2xH 16:9 1440x480p @ 59.94/60Hz"}, + {1920, 1080, 60, ASPECT(16, 9), PROGRESSIVE, false, + "16 1080p 16:9 1920x1080p @ 59.94/60Hz"}, + {720, 576, 50, ASPECT(4, 3), PROGRESSIVE, false, + "17 576p 4:3 720x576p @ 50Hz"}, + {720, 576, 50, ASPECT(16, 9), PROGRESSIVE, false, + "18 576pH 16:9 720x576p @ 50Hz"}, + {1280, 720, 50, ASPECT(16, 9), PROGRESSIVE, false, + "19 720p50 16:9 1280x720p @ 50Hz"}, + {1920, 1080, 50, ASPECT(16, 9), INTERLACE, false, + "20 1080i25 16:9 1920x1080i @ 50Hz*"}, + {1440, 576, 50, ASPECT(4, 3), INTERLACE, false, + "21 576i 4:3 720(1440)x576i @ 50Hz"}, + {1440, 576, 50, ASPECT(4, 3), PROGRESSIVE, false, + "22 576iH 16:9 720(1440)x576i @ 50Hz"}, + {720, 288, 50, ASPECT(4, 3), PROGRESSIVE, false, + "23 288p 4:3 720(1440)x288p @ 50Hz"}, + {720, 288, 50, ASPECT(16, 9), PROGRESSIVE, false, + "24 288pH 16:9 720(1440)x288p @ 50Hz"}, + {2880, 576, 50, ASPECT(4, 3), INTERLACE, false, + "25 576i4x 4:3 (2880)x576i @ 50Hz"}, + {2880, 576, 50, ASPECT(16, 9), INTERLACE, false, + "26 576i4xH 16:9 (2880)x576i @ 50Hz"}, + {2880, 288, 50, ASPECT(4, 3), PROGRESSIVE, false, + "27 288p4x 4:3 (2880)x288p @ 50Hz"}, + {2880, 288, 50, ASPECT(16, 9), PROGRESSIVE, false, + "28 288p4xH 16:9 (2880)x288p @ 50Hz"}, + {1440, 576, 50, ASPECT(4, 3), PROGRESSIVE, false, + "29 576p2x 4:3 1440x576p @ 50Hz"}, + {1440, 576, 50, ASPECT(16, 9), PROGRESSIVE, false, + "30 576p2xH 16:9 1440x576p @ 50Hz"}, + {1920, 1080, 50, ASPECT(16, 9), PROGRESSIVE, false, + "31 1080p50 16:9 1920x1080p @ 50Hz"}, + {1920, 1080, 24, ASPECT(16, 9), PROGRESSIVE, false, + "32 1080p24 16:9 1920x1080p @ 23.98/24Hz"}, + {1920, 1080, 25, ASPECT(16, 9), PROGRESSIVE, false, + "33 1080p25 16:9 1920x1080p @ 25Hz"}, + {1920, 1080, 30, ASPECT(16, 9), PROGRESSIVE, false, + "34 1080p30 16:9 1920x1080p @ 29.97/30Hz"}, + {2880, 480, 60, ASPECT(4, 3), PROGRESSIVE, false, + "35 480p4x 4:3 (2880)x480p @ 59.94/60Hz"}, + {2880, 480, 60, ASPECT(16, 9), PROGRESSIVE, false, + "36 480p4xH 16:9 (2880)x480p @ 59.94/60Hz"}, + {2880, 576, 50, ASPECT(4, 3), PROGRESSIVE, false, + "37 576p4x 4:3 (2880)x576p @ 50Hz"}, + {2880, 576, 50, ASPECT(16, 9), PROGRESSIVE, false, + "38 576p4xH 16:9 (2880)x576p @ 50Hz"}, + {1920, 1080, 50, ASPECT(16, 9), INTERLACE, false, + "39 108Oi25 16:9 1920x1080i(1250 Total) @ 50Hz*"}, + {1920, 1080, 100, ASPECT(16, 9), INTERLACE, false, + "40 1080i50 16:9 1920x1080i @ 100Hz"}, + {1280, 720, 100, ASPECT(16, 9), PROGRESSIVE, false, + "41 720p100 16:9 1280x720p @ 100Hz"}, + {720, 576, 100, ASPECT(4, 3), PROGRESSIVE, false, + "42 576p100 4:3 720x576p @ 100Hz"}, + {720, 576, 100, ASPECT(16, 9), PROGRESSIVE, false, + "43 576p100H 16:9 720x576p @ 100Hz"}, + {720, 576, 100, ASPECT(4, 3), INTERLACE, false, + "44 576i50 4:3 720(1440)x576i @ 100Hz"}, + {720, 576, 100, ASPECT(16, 9), INTERLACE, false, + "45 576i50H 16:9 720(1440)x576i @ 100Hz"}, + {1920, 1080, 120, ASPECT(16, 9), INTERLACE, false, + "46 1080i60 16:9 1920x1080i @ 119.88/120Hz"}, + {1280, 720, 120, ASPECT(16, 9), PROGRESSIVE, false, + "47 720p120 16:9 1280x720p @ 119.88/120Hz"}, + {720, 480, 120, ASPECT(4, 3), PROGRESSIVE, false, + "48 480p119 4:3 720x480p @ 119.88/120Hz"}, + {720, 480, 120, ASPECT(16, 9), PROGRESSIVE, false, + "49 480p119H 16:9 720x480p @ 119.88/120Hz"}, + {720, 480, 120, ASPECT(4, 3), INTERLACE, false, + "50 480i59 4:3 720(1440)x480i @ 119.88/120Hz"}, + {720, 480, 120, ASPECT(16, 9), INTERLACE, false, + "51 480i59H 16:9 720(1440)x480i @ 119.88/120Hz"}, + {720, 576, 200, ASPECT(4, 3), PROGRESSIVE, false, + "52 576p200 4:3 720x576p @ 200Hz"}, + {720, 576, 200, ASPECT(16, 9), PROGRESSIVE, false, + "53 576p200H 16:9 720x576p @ 200Hz"}, + {720, 576, 200, ASPECT(4, 3), INTERLACE, false, + "54 576i100 4:3 720(1440)x576i @ 200Hz"}, + {720, 576, 200, ASPECT(16, 9), INTERLACE, false, + "55 576i100H 16:9 720(1440)x576i @ 200Hz"}, + {720, 480, 240, ASPECT(4, 3), PROGRESSIVE, false, + "56 480p239 4:3 720x480p @ 239.76/240Hz"}, + {720, 480, 240, ASPECT(16, 9), PROGRESSIVE, false, + "57 480p239H 16:9 720x480p @ 239.76/240Hz"}, + {720, 480, 240, ASPECT(4, 3), INTERLACE, false, + "58 480i119 4:3 720(1440)x480i @ 239.76/240Hz"}, + {720, 480, 240, ASPECT(16, 9), INTERLACE, false, + "59 480i119H 16:9 720(1440)x480i @ 239.76/240Hz"}, + {1280, 720, 24, ASPECT(16, 9), PROGRESSIVE, false, + "60 720p24 16:9 1280x720p @ 23.98/24Hz"}, + {1280, 720, 25, ASPECT(16, 9), PROGRESSIVE, false, + "61 720p25 16:9 1280x720p @ 25Hz"}, + {1280, 720, 30, ASPECT(16, 9), PROGRESSIVE, false, + "62 720p30 16:9 1280x720p @ 29.97/30Hz"}, + {1920, 1080, 120, ASPECT(16, 9), PROGRESSIVE, false, + "63 1080p120 16:9 1920x1080 @ 119.88/120Hz"}, +}; + +/* device supported modes in CEA */ +enum { + CEA_MODE_640X480P_60HZ_4_3 = 0, + CEA_MODE_720X480P_60HZ_4_3 = 1, + CEA_MODE_720X480P_60HZ_16_9 = 2, + CEA_MODE_1280X720P_60HZ_16_9 = 3, + CEA_MODE_720X576P_50HZ_4_3 = 16, + CEA_MODE_720X576P_50HZ_16_9 = 17, +}; + +/* device supported modes in established timing */ +enum { + ESTABLISHED_MODE_800X600_60HZ = 0, + ESTABLISHED_MODE_640X480_60HZ = 5, +}; + +int init_edid_info(struct edid_info_struct *edid_info) +{ + edid_info->is_valid = false; + mutex_init(&edid_info->access_lock); + + return 0; +} + +/* Byte 35-37 of block-0 */ +static char *established_timing_str[] = { + "800x600 @ 60 Hz", + "800x600 @ 56 Hz", + "640x480 @ 75 Hz", + "640x480 @ 72 Hz", + "640x480 @ 67 Hz", + "640x480 @ 60 Hz", + "720x400 @ 88 Hz", + "720x400 @ 70 Hz", + + "1280x1024@75 Hz", + "1024x768@75 Hz", + "1024x768@70 Hz", + "1024x768@60 Hz", + "1024x768@87 Hz (Interlaced)", + "832x624@75 Hz", + "800x600@75 Hz", + "800x600@72 Hz", + + "", + "", + "", + "", + "", + "", + "", + "1152x870 @ 75 Hz", +}; + +/* E-EDID Video data block: */ +static char *vdb_modes_str[] = { + " 1 DMT0659 4:3 640x480p @ 59.94/60Hz", + " 2 480p 4:3 720x480p @ 59.94/60Hz", + " 3 480pH 16:9 720x480p @ 59.94/60Hz", + " 4 720p 16:9 1280x720p @ 59.94/60Hz", + " 5 1080i 16:9 1920x1080i @ 59.94/60Hz", + " 6 480i 4:3 720(1440)x480i @ 59.94/60Hz", + " 7 480iH 16:9 720(1440)x480i @ 59.94/60Hz", + " 8 240p 4:3 720(1440)x240p @ 59.94/60Hz", + " 9 240pH 16:9 720(1440)x240p @ 59.94/60Hz", + "10 480i4x 4:3 (2880)x480i @ 59.94/60Hz", + "11 480i4xH 16:9 (2880)x480i @ 59.94/60Hz", + "12 240p4x 4:3 (2880)x240p @ 59.94/60Hz", + "13 240p4xH 16:9 (2880)x240p @ 59.94/60Hz", + "14 480p2x 4:3 1440x480p @ 59.94/60Hz", + "15 480p2xH 16:9 1440x480p @ 59.94/60Hz", + "16 1080p 16:9 1920x1080p @ 59.94/60Hz", + "17 576p 4:3 720x576p @ 50Hz", + "18 576pH 16:9 720x576p @ 50Hz", + "19 720p50 16:9 1280x720p @ 50Hz", + "20 1080i25 16:9 1920x1080i @ 50Hz*", + "21 576i 4:3 720(1440)x576i @ 50Hz", + "22 576iH 16:9 720(1440)x576i @ 50Hz", + "23 288p 4:3 720(1440)x288p @ 50Hz", + "24 288pH 16:9 720(1440)x288p @ 50Hz", + "25 576i4x 4:3 (2880)x576i @ 50Hz", + "26 576i4xH 16:9 (2880)x576i @ 50Hz", + "27 288p4x 4:3 (2880)x288p @ 50Hz", + "28 288p4xH 16:9 (2880)x288p @ 50Hz", + "29 576p2x 4:3 1440x576p @ 50Hz", + "30 576p2xH 16:9 1440x576p @ 50Hz", + "31 1080p50 16:9 1920x1080p @ 50Hz", + "32 1080p24 16:9 1920x1080p @ 23.98/24Hz", + "33 1080p25 16:9 1920x1080p @ 25Hz", + "34 1080p30 16:9 1920x1080p @ 29.97/30Hz", + "35 480p4x 4:3 (2880)x480p @ 59.94/60Hz", + "36 480p4xH 16:9 (2880)x480p @ 59.94/60Hz", + "37 576p4x 4:3 (2880)x576p @ 50Hz", + "38 576p4xH 16:9 (2880)x576p @ 50Hz", + "39 108Oi25 16:9 1920x1080i(1250 Total) @ 50Hz*", + "40 1080i50 16:9 1920x1080i @ 100Hz", + "41 720p100 16:9 1280x720p @ 100Hz", + "42 576p100 4:3 720x576p @ 100Hz", + "43 576p100H 16:9 720x576p @ 100Hz", + "44 576i50 4:3 720(1440)x576i @ 100Hz", + "45 576i50H 16:9 720(1440)x576i @ 100Hz", + "46 1080i60 16:9 1920x1080i @ 119.88/120Hz", + "47 720p120 16:9 1280x720p @ 119.88/120Hz", + "48 480p119 4:3 720x480p @ 119.88/120Hz", + "49 480p119H 16:9 720x480p @ 119.88/120Hz", + "50 480i59 4:3 720(1440)x480i @ 119.88/120Hz", + "51 480i59H 16:9 720(1440)x480i @ 119.88/120Hz", + "52 576p200 4:3 720x576p @ 200Hz", + "53 576p200H 16:9 720x576p @ 200Hz", + "54 576i100 4:3 720(1440)x576i @ 200Hz", + "55 576i100H 16:9 720(1440)x576i @ 200Hz", + "56 480p239 4:3 720x480p @ 239.76/240Hz", + "57 480p239H 16:9 720x480p @ 239.76/240Hz", + "58 480i119 4:3 720(1440)x480i @ 239.76/240Hz", + "59 480i119H 16:9 720(1440)x480i @ 239.76/240Hz", + "60 720p24 16:9 1280x720p @ 23.98/24Hz", + "61 720p25 16:9 1280x720p @ 25Hz", + "62 720p30 16:9 1280x720p @ 29.97/30Hz", + "63 1080p120 16:9 1920x1080 @ 119.88/120Hz", +}; + +int edid_dump_video_modes(u8 *edid_buf) +{ + int i, v1, v2, width, height, ret, aspect; + char *str_aspect[] = { "16:10", "4:3", "5:4", "16:9" }; + + switch (edid_buf[0]) { + case 0: + pr_info("block type 0: supported mode:\n"); + v1 = edid_buf[35] | edid_buf[36] << 8 | edid_buf[37] << 16; + /* Established timing */ + pr_info("established timing: {%02x, %02x, %02x}\n", + edid_buf[35], edid_buf[36], edid_buf[37]); + for (i = 0 ; i < 18; i++ ) { + v1 >>= 1; + if (v1 & 1) pr_info("%s\n", established_timing_str[i]); + }; + + pr_info("Standard timing identification:\n"); + /* Standard timing identification */ + for (i = 0; i < 8; i++) { + v1 = edid_buf[38+i*2]; + v2 = edid_buf[38+i*2+1]; + width = v1 * 8 + 248; + aspect = v2 >> 6; + switch (aspect) { + case 0: height = width * 10 / 16; break; + case 1: height = width * 3 / 4; break; + case 2: height = width * 4 / 5; break; + case 3: height = width * 9 / 16; break; + } + pr_info("%dx%d, %s, %d Hz\n", width, height, + str_aspect[aspect], (v2 & ~(3 << 6)) + 60); + } + ret = 0; + break; + case 2: + pr_info("block type 2: supported mode:\n"); + pr_info("edid_buf[4]=%x\n", edid_buf[4]); + for( i = 0; i < (edid_buf[4] & 0x1f); i++) { + pr_info("%s\n", vdb_modes_str[edid_buf[5+i] & 0x7f]); + } + ret = 0; + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +bool edid_do_checksum(u8 *data) +{ + int i; + u8 sum = 0; + + for (i = 0; i < EDID_BLOCK_SIZE; i++) + sum += data[i]; + EDID_DBG("%s: result=%s\n", __func__, sum ? "fail" : "pass"); + return sum ? false : true; +} + +static bool edid_check_header(u8 *data) +{ + int ret = true; + + /* EDID 8 bytes header */ + static const u8 header[] = {0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0}; + + if (memcmp(data, header, 8) != 0) + ret = false; + + EDID_DBG("%s: result=%s\n", __func__, ret ? "pass" : "fail"); + if (!edid_do_checksum(data)) + { + pr_err("%s: checksum error\n", __func__); + + // Not all monitors have a proper checksum, so we'll ignore this error and just log the problem. + // ret = false; + } + return ret; +} + +struct edid_black_list_info { + u8 mfr_model[4]; + u8 prefer_modes[3]; +}; + +struct edid_black_list_info edid_black_list[] = { + { {0x4c, 0x2d, 0xa5, 0x02}, {0, 0, 0x40} }, // 720p only + { {0x4c, 0x2d, 0x0d, 0x05}, {0, 0, 0x40} }, // 720p only + + //{ {0x5a, 0x63, 0x20, 0x2b}, {0, 0, 0x40} }, // Viewsonic test +}; + +/* By comparing the Manufacture(0x8, 0x9) and Model field(0xa, 0xb) of EDID, + * to check if the attached TV is a known less-compatibile one. + */ +int edid_fixup_compatibility_list(struct hdmi_info *hdmi) +{ + int i, ret = -1; + + /* FIXME: magic numbers...*/ + for (i = 0; i < ARRAY_SIZE(edid_black_list); i++) { + if (!memcmp(hdmi->edid_buf + 8, edid_black_list[i].mfr_model, 4)){ +#if 0 + EDID_DBG("%s: found in blacklist %d\n", __func__, i); + EDID_DBG("%s: old timing = {%02x, %02x, %02x}\n", + __func__, + hdmi->edid_buf[35], hdmi->edid_buf[36], + hdmi->edid_buf[37]); + memcpy(hdmi->edid_buf + 35, + edid_black_list[i].prefer_modes, 3); + EDID_DBG("%s: new timing = {%02x, %02x, %02x}\n", + __func__, + hdmi->edid_buf[35], hdmi->edid_buf[36], + hdmi->edid_buf[37]); +#else + EDID_DBG("%s: found in compatibility %d\n", __func__, i); + memcpy(hdmi->edid_buf + 35, + edid_black_list[i].prefer_modes, 3); +#endif + ret = i; + break; + } + } + + return ret; +} + +u8 edid_simple_parsing(struct hdmi_info *hdmi) +{ + u8 *edid_buf = hdmi->edid_buf; + int i, index, ret = -EINVAL; + struct edid_info_struct *edid_info = &hdmi->edid_info; + unsigned v1, width, height, aspect; + unsigned extensions; + unsigned extension; + + EDID_DBG("%s\n", __func__); + if (!edid_check_header(edid_buf)) + { + pr_err("%s: incorrect header\n", __func__); + return INCORRECT_EDID_HEADER; + } + + // Retrieve the number of extensions in this EDID + extensions = edid_buf[126]; + EDID_DBG("%s: extensions=%d\n", __func__, extensions); + if (!extensions) + { + hdmi->edid_info.hdmi_sink = false; + return NO_861_EXTENSIONS; + } + + /* reset all supported */ + for (i = 0 ; i < ARRAY_SIZE(additional_timing_db); i++) + additional_timing_db[i].supported = false; + for (i = 0 ; i < ARRAY_SIZE(established_timing_db); i++) + established_timing_db[i].supported = false; + + /* Block 0: established timing */ + pr_info("established timing: {%02x, %02x, %02x}\n", + edid_buf[35], edid_buf[36], edid_buf[37]); + + v1 = edid_buf[35] | edid_buf[36] << 8; + if (edid_buf[37] & 0x80) + v1 |= 0x00010000; + + for (i = 0 ; i < 17; i++ ) // 17 bits defined in established timing + established_timing_db[i].supported = ((v1 >>= 1) & 1) ; + + /* standard timing identification */ + for (i = 0; i < 8; i++) { + width = (edid_buf[38 + (i * 2)] * 8) + 248; + v1 = edid_buf[38 + (i * 2) + 1]; + switch (v1 >> 6) { + case 0: height = width * 10 / 16; aspect = ASPECT(16, 10); break; + case 1: height = width * 3 / 4; aspect = ASPECT(4, 3); break; + case 2: height = width * 4 / 5; aspect = ASPECT(5, 4); break; + case 3: height = width * 9 / 16; aspect = ASPECT(16, 9); break; + } + standard_timing_db[i].width = width; + standard_timing_db[i].height = height; + standard_timing_db[i].aspect = aspect; + standard_timing_db[i].refresh_rate = (v1 & 0x3F) + 60; + standard_timing_db[i].supported = true; + } + + for (extension = 0; extension < extensions; extension++) + { + unsigned baseOffset = (extension + 1) * 0x80; + unsigned dtdStart = 0; + unsigned nativeFormats = 0; + unsigned dbcOffset; + + EDID_DBG("Extension %d\n", extension +1); + if (edid_buf[baseOffset] != 0x02) + { + EDID_DBG(" Extension is not CEA EDID format. Skipping.\n"); + continue; + } + + if (edid_buf[baseOffset+1] < 0x03) + { + EDID_DBG(" CEA EDID Extension is below version 3. Skipping.\n"); + continue; + } + + dtdStart = edid_buf[baseOffset+2]; + + edid_info->under_scan = edid_buf[baseOffset+3] & EDID_BIT(7); + edid_info->basic_audio = edid_buf[baseOffset+3] & EDID_BIT(6); + edid_info->ycbcr_4_4_4 = edid_buf[baseOffset+3] & EDID_BIT(5); + edid_info->ycbcr_4_2_2 = edid_buf[baseOffset+3] & EDID_BIT(4); + + nativeFormats = edid_buf[baseOffset+3] & 0x04; + dbcOffset = 4; + + while (dbcOffset < dtdStart) + { + unsigned blockType = edid_buf[baseOffset + dbcOffset] >> 5; + unsigned blockLen = edid_buf[baseOffset + dbcOffset] & 0x1f; + unsigned byte; + + EDID_DBG(" Block Type: %d Block Length: %d\n", blockType, blockLen); + + // Check for an audio data block + if (blockType == AUDIO_D_BLOCK) + { + edid_info->basic_audio = true; + EDID_DBG(" CEA3 Audio Data Block found.\n"); + dbcOffset += blockLen + 1; + continue; + } + + // Check for a vendor data block + if (blockType == VENDOR_SPEC_D_BLOCK) + { + EDID_DBG(" CEA3 Vendor Block found.\n"); + // This may be an HDMI vendor block, and if so, we need to parse it + if (edid_buf[baseOffset + dbcOffset + 1] == 0x03 && + edid_buf[baseOffset + dbcOffset + 2] == 0x0C && + edid_buf[baseOffset + dbcOffset + 3] == 0x00 ) + { + // We found the HDMI block + EDID_DBG(" CEA3 HDMI Vendor Block found.\n"); + hdmi->edid_info.hdmi_sink = true; + } + dbcOffset += blockLen + 1; + continue; + } + if (blockType != VIDEO_D_BLOCK) + { + dbcOffset += blockLen + 1; + continue; + } + + // The block will be an array of indexes + for (byte = 1; byte < blockLen; byte++) + { + index = edid_buf[baseOffset + dbcOffset + byte] & 0x7f; + + if (index > 63) + { + EDID_DBG("Invalid index in EDID Video block. Ignoring.\n"); + } + else + { + additional_timing_db[index-1].supported = true; + EDID_DBG("%s\n", additional_timing_db[index-1].descrption); + } + } + + dbcOffset += blockLen + 1; + } + } + + // As a cheat, we're replacing the existing timings with our own "custom" + // definition of these bytes. + edid_buf[35] = 0; + edid_buf[36] = 0; + edid_buf[37] = 0; + + /* edid_buf[37] bit4: 480p, bit5: 576p, bit6: 720p */ + if (additional_timing_db[CEA_MODE_720X480P_60HZ_4_3].supported || + additional_timing_db[CEA_MODE_720X480P_60HZ_16_9].supported) { + EDID_DBG("decide to support 480P\n"); + edid_buf[37] |= (1<<4); + } + + if (additional_timing_db[CEA_MODE_720X576P_50HZ_4_3].supported || + additional_timing_db[CEA_MODE_720X576P_50HZ_16_9].supported) { + EDID_DBG("decide to support 576P\n"); + edid_buf[37] |= (1<<5); + } + + if (additional_timing_db[CEA_MODE_1280X720P_60HZ_16_9].supported) { + EDID_DBG("decide to support 720P\n"); + edid_buf[37] |= (1<<6); + } + + if (established_timing_db[ESTABLISHED_MODE_800X600_60HZ].supported) { + EDID_DBG("decide to support 800x600\n"); + edid_buf[36] |= (1<<6); + } + + if (established_timing_db[ESTABLISHED_MODE_640X480_60HZ].supported) { + EDID_DBG("decide to support 640x480\n"); + edid_buf[35] |= (1<<5); + } + edid_fixup_compatibility_list(hdmi); + + return ret; +} + +// FIXME: modify the checking routines into inline function. +bool edid_is_video_mode_supported(struct video_mode *vmode) +{ + int i; + struct video_mode *vmode_db; + + vmode_db = established_timing_db; + for (i = 0, vmode_db = established_timing_db; + i < ARRAY_SIZE(established_timing_db); i++) + if ( (vmode->width == vmode_db[i].width) && + (vmode->height == vmode_db[i].height) && + (vmode->refresh_rate == vmode_db[i].refresh_rate ) && + (vmode_db[i].interlaced == PROGRESSIVE) && + (vmode_db[i].supported == true )) + return true; + for (i = 0, vmode_db = standard_timing_db; + i < ARRAY_SIZE(standard_timing_db); i++) + if ( (vmode->width == vmode_db[i].width) && + (vmode->height == vmode_db[i].height) && + (vmode->refresh_rate == vmode_db[i].refresh_rate ) && + (vmode_db[i].interlaced == PROGRESSIVE) && + (vmode_db[i].supported == true )) + return true; + for (i = 0, vmode_db = additional_timing_db; + i < ARRAY_SIZE(additional_timing_db); i++) + if ( (vmode->width == vmode_db[i].width) && + (vmode->height == vmode_db[i].height) && + (vmode->refresh_rate == vmode_db[i].refresh_rate ) && + (vmode_db[i].interlaced == PROGRESSIVE) && + (vmode_db[i].supported == true )) + return true; + return false; +} + +bool edid_check_sink_type(struct hdmi_info *hdmi) +{ + EDID_DBG("%s: ret=%d\n", __func__, hdmi->edid_info.hdmi_sink); + return hdmi->edid_info.hdmi_sink; +} + +bool edid_check_audio_support(struct hdmi_info *hdmi) +{ + EDID_DBG("%s: ret=%d\n", __func__, hdmi->edid_info.basic_audio); + return hdmi->edid_info.basic_audio; +} + +int edid_dump_hex(u8 *src, int src_size, char *output, int output_size) +{ + char line[80]; + static char hextab[] = "0123456789abcdef"; + int i, j, n = 0, v, len, offset, line_size; + + len = src_size; + memset(line, ' ', 79); + line[79] = '\0'; + offset = strlen("0000 | "); + line_size = offset + 3 * 16 + 1; + for (i = 0; i < len / 16 ; i++) { + scnprintf(line, offset + 1, "%04x | ", (i << 4)); + for (j = 0; j < 16 ; j++) { + v = src[i * 16 + j]; + line[offset + j * 3] = hextab[v / 16]; + line[offset + j * 3 + 1] = hextab[v % 16]; + } + line[line_size - 1] = '\n'; + strncpy(output+ n, line, line_size); + if ((n + line_size) > output_size) + break; + else + n += line_size; + } + + return n; +} + +/*============================================================================*/ +static ssize_t edid_dbg_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static char hex_buff[2048]; + +static ssize_t edid_buffered_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + edid_simple_parsing(hdmi); + n = edid_dump_hex(hdmi->edid_buf, (hdmi->edid_buf[0x7e] + 1) * 128, + hex_buff, 2048); + return simple_read_from_buffer(buf, count, ppos, hex_buff, n); +} + +static struct file_operations edid_debugfs_fops[] = { + { + .open = edid_dbg_open, + .read = edid_buffered_read, + .write = NULL, + } +}; + +// TODO: error handling +int edid_debugfs_init(struct hdmi_info *hdmi) +{ + //int ret; + struct dentry *edid_dent; + + edid_dent = debugfs_create_dir("edid", hdmi->debug_dir); + if (IS_ERR(edid_dent)) + return PTR_ERR(edid_dent); + + debugfs_create_file("hex_dump", 0444, edid_dent, hdmi, + &edid_debugfs_fops[0]); + + return 0; +} + diff --git a/drivers/video/msm/hdmi/fb-hdmi.c b/drivers/video/msm/hdmi/fb-hdmi.c new file mode 100644 index 0000000000000..ec3bc0ccadcbd --- /dev/null +++ b/drivers/video/msm/hdmi/fb-hdmi.c @@ -0,0 +1,914 @@ +/* + * Copyright (C) 2009 HTC + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Referenced from drivers/video/msm/msm_fb.c, Google Incorporated. + * + */ +#define DEBUG +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HTC_HEADSET_MGR +#include +#endif + +#include "include/fb-hdmi.h" +#include "include/sil902x.h" + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi/fb]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +#define BITS_PER_PIXEL 16 + +struct update_info_t { + int left; + int top; + int eright; /* exclusive */ + int ebottom; /* exclusive */ + unsigned yoffset; +}; + +struct hdmifb_info { + struct fb_info *fb; + struct msm_panel_data *panel; + struct notifier_block fb_hdmi_event; + struct msmfb_callback dma_callback; + struct msmfb_callback vsync_callback; + struct update_info_t update_info; + struct early_suspend earlier_suspend; + struct early_suspend early_suspend; + spinlock_t update_lock; + int xres; + int yres; + unsigned long state; + atomic_t use_count; + int vsyncMode; + int mirroring; + int doubleBuffering; + struct mdp_blit_req* vsyncBlitReq; + struct mdp_blit_req mirrorReq; + struct mirror_statistics mirror_stats; +}; + +static struct mdp_device *mdp; +static struct hdmi_device *hdmi; + +static unsigned PP[16]; + +void hdmi_pre_change(struct hdmi_info *hdmi); +void hdmi_post_change(struct hdmi_info *info, struct fb_var_screeninfo *var); + +static int hdmifb_open(struct fb_info *info, int user) +{ + return 0; +} + +static int hdmifb_release(struct fb_info *info, int user) +{ + return 0; +} + +static int hdmifb_read(struct fb_info *info, int user) +{ + pr_info("%s\n", __func__); + return 0; +} +static int hdmifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + u32 size; + + if (mdp->check_output_format(mdp, var->bits_per_pixel)) + return -EINVAL; + if (hdmi->check_res(hdmi, var)) + return -EINVAL; + + size = var->xres_virtual * var->yres_virtual * + (var->bits_per_pixel >> 3); + if (size > info->fix.smem_len) + return -EINVAL; + return 0; +} + +static int hdmifb_set_par(struct fb_info *info) +{ + struct fb_var_screeninfo *var = &info->var; + struct fb_fix_screeninfo *fix = &info->fix; + struct hdmifb_info *hdmi_fb = info->par; + struct msm_panel_data *panel = hdmi_fb->panel; + struct msm_lcdc_timing *timing; + struct hdmi_info *hinfo = container_of(hdmi, struct hdmi_info, + hdmi_dev); + + HDMI_DBG("%s\n", __func__); + /* we only support RGB ordering for now */ + if (var->bits_per_pixel == 32 || var->bits_per_pixel == 24) { + var->red.offset = 0; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 16; + var->blue.length = 8; + } else if (var->bits_per_pixel == 16) { + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + } else + return -1; + + HDMI_DBG("set res (%d, %d)\n", var->xres, var->yres); + timing = hdmi->set_res(hdmi, var); + panel->adjust_timing(panel, timing, var->xres, var->yres); + hdmi_post_change(hinfo, var); + + mdp->set_output_format(mdp, var->bits_per_pixel); + + hdmi_fb->xres = var->xres; + hdmi_fb->yres = var->yres; + fix->line_length = var->xres * var->bits_per_pixel / 8; + return 0; +} + +/* core update function */ +static void +hdmifb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, + uint32_t eright, uint32_t ebottom, uint32_t yoffset) +{ + struct hdmifb_info *hdmi_fb = info->par; + struct msm_panel_data *panel = hdmi_fb->panel; + unsigned long irq_flags; + + /* printk(KERN_DEBUG "%s\n", __func__); */ + if ((test_bit(fb_enabled, &hdmi_fb->state) == 0) || + (test_bit(hdmi_enabled, &hdmi_fb->state) == 0)) + return; + + spin_lock_irqsave(&hdmi_fb->update_lock, irq_flags); + hdmi_fb->update_info.left = left; + hdmi_fb->update_info.top = top; + hdmi_fb->update_info.eright = eright; + hdmi_fb->update_info.ebottom = ebottom; + hdmi_fb->update_info.yoffset = yoffset; + spin_unlock_irqrestore(&hdmi_fb->update_lock, irq_flags); + panel->request_vsync(panel, &hdmi_fb->vsync_callback); +} + +/* fb ops, fb_pan_display */ +static int +hdmifb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) +{ + /* full update */ + hdmifb_pan_update(info, 0, 0, info->var.xres, info->var.yres, + var->yoffset); + return 0; +} + +static void hdmifb_fillrect(struct fb_info *p, const struct fb_fillrect *rect) +{ + cfb_fillrect(p, rect); + hdmifb_pan_update(p, rect->dx, rect->dy, rect->dx + rect->width, + rect->dy + rect->height, 0); +} + +static void hdmifb_copyarea(struct fb_info *p, const struct fb_copyarea *area) +{ + cfb_copyarea(p, area); + hdmifb_pan_update(p, area->dx, area->dy, area->dx + area->width, + area->dy + area->height, 0); +} + +static void hdmifb_imageblit(struct fb_info *p, const struct fb_image *image) +{ + cfb_imageblit(p, image); + hdmifb_pan_update(p, image->dx, image->dy, image->dx + image->width, + image->dy + image->height, 0); +} + +static int hdmifb_change_mode(struct fb_info *info, unsigned int mode) +{ + struct hdmifb_info *hdmi_fb = info->par; + + /* printk(KERN_DEBUG "%s mode = %d\n", __func__, mode); */ + + if (mode) + set_bit(hdmi_mode, &hdmi_fb->state); + else + clear_bit(hdmi_mode, &hdmi_fb->state); + return 0; +} + +struct hdmifb_info *_hdmi_fb; +int hdmifb_get_mode(void) +{ + return test_bit(hdmi_mode, &_hdmi_fb->state); +} + +bool hdmifb_suspending = false; + +static int hdmifb_pause(struct fb_info *fb, unsigned int mode) +{ + int ret = 0; + struct hdmifb_info *hdmi_fb = fb->par; + struct msm_panel_data *panel = hdmi_fb->panel; + struct hdmi_info *info = container_of(hdmi, struct hdmi_info, + hdmi_dev); + + pr_info("%s: %d %s\n", __func__, atomic_read(&hdmi_fb->use_count), + mode == 1 ? "pause" : "resume"); + + if (mode == 1) { + hdmifb_suspending = false; + HDMI_DBG("%s: hdmifb_suspending = false\n", __func__); + /* pause */ + if (atomic_read(&hdmi_fb->use_count) == 0) + goto done; + if (atomic_dec_return(&hdmi_fb->use_count) == 0) { + hdmi_pre_change(info); + ret = panel->blank(panel); + clear_bit(hdmi_enabled, &hdmi_fb->state); +#ifdef CONFIG_HTC_HEADSET_MGR + switch_send_event(BIT_HDMI_AUDIO, 0); +#endif + } + } else if (mode == 0) { + /* resume */ + if (atomic_inc_return(&hdmi_fb->use_count) == 1) { + hdmi_pre_change(info); + ret = panel->unblank(panel); +/* + // set timing again to prevent TV been out of range + var = &fb->var; + timing = hdmi->set_res(hdmi, var); + panel->adjust_timing(panel, timing, var->xres, var->yres); + hdmi_post_change(info, var); +*/ + set_bit(hdmi_enabled, &hdmi_fb->state); +#ifdef CONFIG_HTC_HEADSET_MGR + switch_send_event(BIT_HDMI_AUDIO, 1); +#endif + } + } else + ret = -EINVAL; +done: + return ret; +} + +static int hdmifb_blit(struct fb_info *info, void __user *p) +{ + struct mdp_blit_req req; + struct mdp_blit_req_list req_list; + int i; + int ret; + + if (copy_from_user(&req_list, p, sizeof(req_list))) + return -EFAULT; + + for (i = 0; i < req_list.count; i++) { + struct mdp_blit_req_list *list = + (struct mdp_blit_req_list *)p; + if (copy_from_user(&req, &list->req[i], sizeof(req))) + return -EFAULT; + req.flags |= MDP_DITHER; + + /* Copy the requested blit in case we're mirroring */ + if (_hdmi_fb->mirroring) + { + int previousValue = _hdmi_fb->doubleBuffering; + + memcpy(&_hdmi_fb->mirrorReq, &req, sizeof(struct mdp_blit_req)); + + /* Default double-buffering off */ + _hdmi_fb->doubleBuffering = 0; + + /* Are we rotating? */ + if ((req.flags & 0x7) == MDP_ROT_90 || (req.flags & 0x7) == MDP_ROT_270) + { + /* Are we scaling at the same time? */ + if (req.src_rect.w != req.dst_rect.w || req.src_rect.h != req.dst_rect.h) + { + _hdmi_fb->doubleBuffering = 1; + } + } + + if (previousValue == 1 && _hdmi_fb->doubleBuffering == 0) + { + // Switch us back to buffer 0 + mdp->dma(mdp, _hdmi_fb->fb->fix.smem_start, _hdmi_fb->fb->var.xres * 2, + _hdmi_fb->fb->var.xres, _hdmi_fb->fb->var.yres, 0, 0, + &_hdmi_fb->dma_callback, _hdmi_fb->panel->interface_type); + } + } + + ret = mdp->blit(mdp, info, &req); + if (ret) + return ret; + } + return 0; +} + +void reportUnderflow(void) +{ + if (_hdmi_fb) _hdmi_fb->mirror_stats.underflows++; +} + +int hdmi_usePanelSync(void) +{ + if (_hdmi_fb->mirroring && + (_hdmi_fb->vsyncMode == VSYNC_NONE || _hdmi_fb->vsyncMode == VSYNC_HDMI_ONLY)) + return 0; + return 1; +} + +int hdmi_useHdmiSync(void) +{ + if (_hdmi_fb->mirroring && + (_hdmi_fb->vsyncMode == VSYNC_NONE || _hdmi_fb->vsyncMode == VSYNC_PANEL_ONLY)) + return 0; + return 1; +} + +void blit_on_vsync(struct mdp_blit_req* blitReq) +{ + struct msm_panel_data *panel = _hdmi_fb->panel; + + /* Request mirror blit and vsync callback */ + _hdmi_fb->vsyncBlitReq = blitReq; + panel->request_vsync(panel, &_hdmi_fb->vsync_callback); + return; +} + +void hdmi_DoBlit(int fb0Offset) +{ + int yoffset = 0; + + /* Always track frame counts, so we can measure FPS of panel-only mode */ + _hdmi_fb->mirror_stats.frames++; + + /* This handles the disabled case */ + if (_hdmi_fb->mirroring == 0 || _hdmi_fb->mirrorReq.src.width == 0) + return; + + /* Verify HDMI is enabled */ + if ((test_bit(fb_enabled, &_hdmi_fb->state) == 0) || + (test_bit(hdmi_enabled, &_hdmi_fb->state) == 0)) + return; + + /* Set the proper offsets */ + _hdmi_fb->mirrorReq.src.offset = fb0Offset; + + if (_hdmi_fb->doubleBuffering) + { + if (_hdmi_fb->mirrorReq.dst.offset == 0) + { + yoffset = _hdmi_fb->mirrorReq.dst.height; + _hdmi_fb->mirrorReq.dst.offset = yoffset * _hdmi_fb->mirrorReq.dst.width * BITS_PER_PIXEL / 8; + } + else + { + _hdmi_fb->mirrorReq.dst.offset = 0; + yoffset = 0; + } + } + else + { + if (_hdmi_fb->mirrorReq.dst.offset != 0) + { + // Force us back to the primary buffer + _hdmi_fb->mirrorReq.dst.offset = 0; + mdp->dma(mdp, _hdmi_fb->fb->fix.smem_start, _hdmi_fb->fb->var.xres * 2, + _hdmi_fb->fb->var.xres, _hdmi_fb->fb->var.yres, 0, 0, + &_hdmi_fb->dma_callback, _hdmi_fb->panel->interface_type); + } + } + + /* Either schedule the blit on vsync, or do it now */ + if (hdmi_useHdmiSync() && !_hdmi_fb->doubleBuffering) + { + blit_on_vsync(&_hdmi_fb->mirrorReq); + } + else + { + mdp->blit(mdp, _hdmi_fb->fb, &_hdmi_fb->mirrorReq); + } + + if (_hdmi_fb->doubleBuffering) + { + if (hdmi_useHdmiSync()) + { + hdmifb_pan_update(_hdmi_fb->fb, 0, 0, + _hdmi_fb->fb->var.xres, _hdmi_fb->fb->var.yres, yoffset); + } + else + { + mdp->dma(mdp, _hdmi_fb->mirrorReq.dst.offset + _hdmi_fb->fb->fix.smem_start, + _hdmi_fb->fb->var.xres * 2, _hdmi_fb->fb->var.xres, _hdmi_fb->fb->var.yres, 0, 0, + &_hdmi_fb->dma_callback, _hdmi_fb->panel->interface_type); + } + } + + return; +} + +enum ioctl_cmd_index { + CMD_SET_MODE, + CMD_GET_MODE, + CMD_DISABLE, + CMD_ENABLE, + CMD_GET_STATE, + CMD_BLIT, + CMD_CABLE_STAT, + CMD_ESTABLISH_TIMING, +}; + +static char *cmd_str[] = { + "HDMI_SET_MODE", + "HDMI_GET_MODE", + "HDMI_DISABLE", + "HDMI_ENABLE", + "HDMI_GET_STATE", + "HDMI_BLIT", + "HDMI_CABLE_STAT", + "HDMI_ESTABLISH_TIMING", +}; + +static int hdmifb_ioctl(struct fb_info *p, unsigned int cmd, unsigned long arg) +{ + struct hdmifb_info *hdmi_fb = p->par; + void __user *argp = (void __user *)arg; + unsigned int val; + int ret = -EINVAL; + struct hdmi_info *hinfo = container_of(hdmi, struct hdmi_info, + hdmi_dev); + +/* + if (cmd != HDMI_BLIT) + HDMI_DBG("%s, cmd=%d=%s\n", __func__, cmd - HDMI_SET_MODE, + cmd_str[cmd-HDMI_SET_MODE]); +*/ + + switch (cmd) { + case HDMI_SET_MODE: + get_user(val, (unsigned __user *) arg); + //pr_info("[hdmi] SET_MODE: %d\n", val); + ret = hdmifb_change_mode(p, val); + _hdmi_fb->mirroring = 0; + break; + case HDMI_GET_MODE: +/* + pr_info("[hdmi] GET_MODE: %d\n", + test_bit(hdmi_mode, &hdmi_fb->state)); +*/ + ret = put_user(test_bit(hdmi_mode, &hdmi_fb->state), + (unsigned __user *) arg); + break; + case HDMI_DISABLE: + get_user(val, (unsigned __user *) arg); + _hdmi_fb->mirroring = 0; + ret = hdmifb_pause(p, 1); + break; + case HDMI_ENABLE: + get_user(val, (unsigned __user *) arg); + if (val == 0x03040601) + { + ret = hdmifb_pause(p, 0); + } + else + { + ret = hdmifb_pause(p, 1); + } + // Always disable mirroring if someone sends an HDMI_ENABLE request + hdmi_fb->mirroring = 0; + break; + case HDMI_GET_STATE: + ret = put_user(test_bit(hdmi_enabled, &hdmi_fb->state), + (unsigned __user *) arg); + break; + case HDMI_BLIT: + if (test_bit(hdmi_enabled, &hdmi_fb->state)) + ret = hdmifb_blit(p, argp); + else + ret = -EPERM; + break; + case HDMI_CABLE_STAT: { + int connect; + ret = hdmi->get_cable_state(hdmi, &connect); + ret = put_user(connect, (unsigned __user *) arg); + break; + } + case HDMI_ESTABLISH_TIMING: { + u8 tmp[3]; + hdmi->get_establish_timing(hdmi, tmp); + ret = copy_to_user((unsigned __user *) arg, tmp, 3); + if (ret) + ret = -EFAULT; + break; + } + case HDMI_GET_EDID: + ret = copy_to_user((unsigned __user *) arg, + hinfo->edid_buf, 512); + break; + case HDMI_GET_DISPLAY_INFO: { + struct display_info dinfo; + u8 *ptr = hinfo->edid_buf; + dinfo.visible_width = + (((u32)ptr[68] & 0xf0) << 4) | ptr[66]; + dinfo.visible_height = + (((u32)ptr[68] & 0x0f) << 8) | ptr[67]; + dinfo.resolution_width = + (((u32)ptr[58] & 0xf0) << 4) | ptr[56]; + dinfo.resolution_height = + (((u32)ptr[61] & 0xf0) << 4) | ptr[59]; + ret = copy_to_user((unsigned __user *) arg, + &dinfo, sizeof(dinfo)); + break; + } + + case HDMI_GET_MIRRORING: + ret = put_user(hdmi_fb->mirroring, (unsigned __user *) arg); + break; + case HDMI_SET_MIRRORING: + get_user(val, (unsigned __user *) arg); + hdmi_fb->mirroring = val; + memset(&hdmi_fb->mirror_stats, 0, sizeof(struct mirror_statistics)); + hdmi_fb->mirror_stats.statisticsTime = ktime_to_ns(ktime_get()); + break; + case HDMI_GET_STATISTICS: + { + struct mirror_statistics temp; + + memcpy(&temp, &hdmi_fb->mirror_stats, sizeof(struct mirror_statistics)); + temp.statisticsTime = ktime_to_ns(ktime_get()) - hdmi_fb->mirror_stats.statisticsTime; + ret = copy_to_user((unsigned __user *) arg, &temp, sizeof(struct mirror_statistics)); + } + break; + case HDMI_CLEAR_STATISTICS: + memset(&hdmi_fb->mirror_stats, 0, sizeof(struct mirror_statistics)); + hdmi_fb->mirror_stats.statisticsTime = ktime_to_ns(ktime_get()); + break; + case HDMI_GET_VSYNC_MODE: + ret = put_user(hdmi_fb->vsyncMode, (unsigned __user *) arg); + break; + case HDMI_SET_VSYNC_MODE: + get_user(val, (unsigned __user *) arg); + hdmi_fb->vsyncMode = val; + ret = 0; + break; + default: + printk(KERN_ERR "hdmi: unknown cmd, cmd = %d\n", cmd); + } + return ret; +} + +static struct fb_ops hdmi_fb_ops = { + .owner = THIS_MODULE, + .fb_open = hdmifb_open, + .fb_release = hdmifb_release, + .fb_check_var = hdmifb_check_var, + .fb_set_par = hdmifb_set_par, + .fb_pan_display = hdmifb_pan_display, + .fb_fillrect = hdmifb_fillrect, + .fb_copyarea = hdmifb_copyarea, + .fb_imageblit = hdmifb_imageblit, + .fb_ioctl = hdmifb_ioctl, + .fb_read = hdmifb_read, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void hdmifb_suspend(struct early_suspend *h) +{ + struct hdmifb_info *hdmi_fb = container_of(h, struct hdmifb_info, + early_suspend); + struct msm_panel_data *panel = hdmi_fb->panel; + + HDMI_DBG("%s, use_count=%d\n", __func__, + atomic_read(&hdmi_fb->use_count)); + hdmifb_suspending = true; + HDMI_DBG("%s: hdmifb_suspending = true\n", __func__); + if (atomic_read(&hdmi_fb->use_count) && + false == test_bit(hdmi_enabled, &hdmi_fb->state) + ) { + if (panel->blank) + panel->blank(panel); + } + + if (panel->suspend) + panel->suspend(panel); + + clear_bit(hdmi_enabled, &hdmi_fb->state); + clear_bit(fb_enabled, &hdmi_fb->state); +} + +static void hdmifb_resume(struct early_suspend *h) +{ + struct hdmifb_info *hdmi_fb = container_of(h, struct hdmifb_info, + early_suspend); + struct msm_panel_data *panel = hdmi_fb->panel; + + HDMI_DBG("%s\n", __func__); + if (panel->resume) + panel->resume(panel); + + atomic_set(&hdmi_fb->use_count, 0); + set_bit(fb_enabled, &hdmi_fb->state); +} +#endif + +static void setup_fb_info(struct hdmifb_info *hdmi_fb) +{ + struct fb_info *fb_info = hdmi_fb->fb; + int r; + + /* finish setting up the fb_info struct */ + strncpy(fb_info->fix.id, "hdmi_fb", 16); + fb_info->fix.ypanstep = 1; + + fb_info->fbops = &hdmi_fb_ops; + fb_info->flags = FBINFO_DEFAULT; + + fb_info->fix.type = FB_TYPE_PACKED_PIXELS; + fb_info->fix.visual = FB_VISUAL_TRUECOLOR; + fb_info->fix.line_length = hdmi_fb->xres * 2; + + fb_info->var.xres = hdmi_fb->xres; + fb_info->var.yres = hdmi_fb->yres; + fb_info->var.width = hdmi_fb->panel->fb_data->width; + fb_info->var.height = hdmi_fb->panel->fb_data->height; + fb_info->var.xres_virtual = hdmi_fb->xres; + fb_info->var.yres_virtual = hdmi_fb->yres * 2; + fb_info->var.bits_per_pixel = BITS_PER_PIXEL; + fb_info->var.accel_flags = 0; + fb_info->var.yoffset = 0; + + fb_info->var.red.offset = 11; + fb_info->var.red.length = 5; + fb_info->var.red.msb_right = 0; + fb_info->var.green.offset = 5; + fb_info->var.green.length = 6; + fb_info->var.green.msb_right = 0; + fb_info->var.blue.offset = 0; + fb_info->var.blue.length = 5; + fb_info->var.blue.msb_right = 0; + + r = fb_alloc_cmap(&fb_info->cmap, 16, 0); + fb_info->pseudo_palette = PP; + + PP[0] = 0; + for (r = 1; r < 16; r++) + PP[r] = 0xffffffff; +} + +static int +setup_fbmem(struct hdmifb_info *hdmi_fb, struct platform_device *pdev) +{ + struct fb_info *fb = hdmi_fb->fb; + struct resource *res; + unsigned long size = hdmi_fb->xres * hdmi_fb->yres * + (BITS_PER_PIXEL >> 3) * 2; + unsigned char *fbram; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + /* check the resource is large enough to fit the fb */ + if (resource_size(res) < size) { + printk(KERN_ERR "allocated resource(%d) is too small(%lu)" + "for fb\n", resource_size(res), size); + return -ENOMEM; + } + + fb->fix.smem_start = res->start; + fb->fix.smem_len = resource_size(res); + + fbram = ioremap(res->start, resource_size(res)); + if (fbram == 0) { + printk(KERN_ERR "hdmi_fb: cannot allocate fbram!\n"); + return -ENOMEM; + } + + fb->screen_base = fbram; + memset(fbram, 0, resource_size(res)); + + printk(KERN_DEBUG "HDMI FB: 0x%x 0x%x\n", res->start, res->end); + return 0; +} + +/* Called from dma interrupt handler, must not sleep */ +static void hdmi_handle_dma(struct msmfb_callback *callback) +{ + /* printk(KERN_DEBUG "%s\n", __func__); */ +} + +/* Called from vsync interrupt handler, must not sleep */ +static void hdmi_handle_vsync(struct msmfb_callback *callback) +{ + uint32_t x, y, w, h; + unsigned yoffset; + unsigned addr; + unsigned long irq_flags; + struct fb_info *mirror_fb = registered_fb[0], *fb_hdmi; + struct hdmifb_info *hdmi = container_of(callback, struct hdmifb_info, + vsync_callback); + struct msm_panel_data *panel = hdmi->panel; + + /* Handle blitting requests */ + if (hdmi->vsyncBlitReq) + { + mdp->blit(mdp, _hdmi_fb->fb, hdmi->vsyncBlitReq); + hdmi->vsyncBlitReq = NULL; + return; + } + + spin_lock_irqsave(&hdmi->update_lock, irq_flags); + x = hdmi->update_info.left; + y = hdmi->update_info.top; + w = hdmi->update_info.eright - x; + h = hdmi->update_info.ebottom - y; + yoffset = hdmi->update_info.yoffset; + hdmi->update_info.left = hdmi->xres + 1; + hdmi->update_info.top = hdmi->yres + 1; + hdmi->update_info.eright = 0; + hdmi->update_info.ebottom = 0; + if (unlikely(w > hdmi->xres || h > hdmi->yres || + w == 0 || h == 0)) { + printk(KERN_INFO "invalid update: %d %d %d " + "%d\n", x, y, w, h); + goto error; + } + spin_unlock_irqrestore(&hdmi->update_lock, irq_flags); + + addr = ((hdmi->xres * (yoffset + y) + x) * 2); + if (test_bit(hdmi_mode, &hdmi->state) == 0) { + mdp->dma(mdp, addr + mirror_fb->fix.smem_start, + hdmi->xres * 2, w, h, x, y, &hdmi->dma_callback, + panel->interface_type); + } else { + fb_hdmi = hdmi->fb; + mdp->dma(mdp, addr + fb_hdmi->fix.smem_start, + hdmi->xres * 2, w, h, x, y, &hdmi->dma_callback, + panel->interface_type); + } + return; +error: + spin_unlock_irqrestore(&hdmi->update_lock, irq_flags); +} + +static int hdmifb_probe(struct platform_device *pdev) +{ + struct fb_info *info; + struct hdmifb_info *hdmi_fb; + struct msm_panel_data *panel = pdev->dev.platform_data; + int ret; + + printk(KERN_DEBUG "%s\n", __func__); + + if (!panel) { + pr_err("hdmi_fb_probe: no platform data\n"); + return -EINVAL; + } + + if (!panel->fb_data) { + pr_err("hdmi_fb_probe: no fb_data\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(struct hdmifb_info), &pdev->dev); + if (!info) + return -ENOMEM; + + /* Zero out the structure before using it */ + memset(info, 0, sizeof(struct hdmifb_info)); + + hdmi_fb = info->par; + _hdmi_fb = hdmi_fb; + hdmi_fb->fb = info; + hdmi_fb->panel = panel; + set_bit(hdmi_mode, &hdmi_fb->state); + hdmi_fb->dma_callback.func = hdmi_handle_dma; + hdmi_fb->vsync_callback.func = hdmi_handle_vsync; + hdmi_fb->xres = panel->fb_data->xres; + hdmi_fb->yres = panel->fb_data->yres; + spin_lock_init(&hdmi_fb->update_lock); + + ret = setup_fbmem(hdmi_fb, pdev); + if (ret) + goto error_setup_fbmem; + + setup_fb_info(hdmi_fb); + + ret = register_framebuffer(info); + if (ret) + goto error_register_fb; + + printk(KERN_INFO "hdmi_fb %d * %d initialed\n", + hdmi_fb->xres, hdmi_fb->yres); + +#ifdef CONFIG_HAS_EARLYSUSPEND + hdmi_fb->early_suspend.suspend = hdmifb_suspend; + hdmi_fb->early_suspend.resume = hdmifb_resume; + hdmi_fb->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + register_early_suspend(&hdmi_fb->early_suspend); +#endif + /* blank panel explicitly because we turn on clk on initial */ + if (panel->blank) + panel->blank(panel); + set_bit(fb_enabled, &hdmi_fb->state); + return 0; + +error_register_fb: +error_setup_fbmem: + framebuffer_release(hdmi_fb->fb); + printk(KERN_ERR "msm probe fail with %d\n", ret); + return ret; +} + +static struct platform_driver hdmi_frame_buffer = { + .probe = hdmifb_probe, + .driver = {.name = "msm_hdmi"}, +}; + +static int hdmifb_add_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (mdp) + return 0; + mdp = container_of(dev, struct mdp_device, dev); + return platform_driver_register(&hdmi_frame_buffer); +} + +static void hdmifb_remove_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (dev != &mdp->dev) + return; + platform_driver_unregister(&hdmi_frame_buffer); + mdp = NULL; +} + +static struct class_interface hdmi_fb_interface = { + .add_dev = &hdmifb_add_mdp_device, + .remove_dev = &hdmifb_remove_mdp_device, +}; + +static int hdmifb_add_hdmi_device(struct device *dev, + struct class_interface *class_intf) +{ + dev_dbg(dev, "%s\n", __func__); + + if (hdmi) + return 0; + hdmi = container_of(dev, struct hdmi_device, dev); + return 0; +} + +static struct class_interface hdmi_interface = { + .add_dev = hdmifb_add_hdmi_device, +}; + +static int __init hdmifb_init(void) +{ + int rc; + + rc = register_mdp_client(&hdmi_fb_interface); + if (rc) + return rc; + + rc = register_hdmi_client(&hdmi_interface); + if (rc) + return rc; + return 0; +} + +module_init(hdmifb_init); diff --git a/drivers/video/msm/hdmi/hdmi_lcdc.c b/drivers/video/msm/hdmi/hdmi_lcdc.c new file mode 100644 index 0000000000000..32dbc0c868444 --- /dev/null +++ b/drivers/video/msm/hdmi/hdmi_lcdc.c @@ -0,0 +1,606 @@ +/* drivers/video/msm/hdmi_lcdc.c + * + * Copyright (c) 2009 Google Inc. + * Copyright (c) 2009 HTC + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "../mdp_hw.h" +#include "../../../../arch/arm/mach-msm/proc_comm.h" +#include "../../../../arch/arm/mach-msm/clock-pcom.h" + + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi/lcdc]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +struct mdp_lcdc_info *_lcdc; + +static struct mdp_device *mdp_dev; + +#define panel_to_lcdc(p) container_of((p), struct mdp_lcdc_info, fb_panel_data) + +/* FIXME: arrange the clock manipulating to proper place, + integrate with the counter of fb_hdmi +*/ +int lcdc_enable_video(void) +{ + //struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct mdp_lcdc_info *lcdc = _lcdc; + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + mutex_lock(&lcdc->blank_lock); + if (atomic_read(&lcdc->blank_count)) + goto end_enable_video; + HDMI_DBG("%s: enable clocks\n", __func__); + clk_enable(lcdc->mdp_clk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); + + /* TODO: need pre-test to see if it make any influence to HDCP, + * if ebi1_clk doesn't enabled here. + */ + //panel_ops->unblank(panel_ops); + + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + atomic_inc(&lcdc->blank_count); + HDMI_DBG("%s, blank_count=%d\n", __func__, + atomic_read(&lcdc->blank_count)); +end_enable_video: + mutex_unlock(&lcdc->blank_lock); + + return 0; +} + +int lcdc_disable_video(void) +{ + struct mdp_lcdc_info *lcdc = _lcdc; + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + mutex_lock(&lcdc->blank_lock); + if (atomic_read(&lcdc->blank_count) == 0) + goto disable_video_done; + if (atomic_dec_return(&lcdc->blank_count) == 0) { + HDMI_DBG("%s: disable clocks\n", __func__); + panel_ops->blank(panel_ops); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pclk); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->mdp_clk); + } +disable_video_done: + mutex_unlock(&lcdc->blank_lock); + HDMI_DBG("%s, blank_count=%d\n", __func__, + atomic_read(&lcdc->blank_count)); + return 0; +} + +static int lcdc_unblank(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + HDMI_DBG("%s\n", __func__); + +#if 0 + HDMI_DBG("%s: enable clocks\n", __func__); + clk_enable(lcdc->mdp_clk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); + + panel_ops->unblank(panel_ops); + + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + atomic_set(&lcdc->blank_count, 1); +#else + lcdc_enable_video(); + /* TODO: need pre-test to see if it make any influence to HDCP, + * if ebi1_clk enabled here. + */ + panel_ops->unblank(panel_ops); +#endif + return 0; +} + +static int lcdc_blank(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + //struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + +#if 0 + mutex_lock(&lcdc->blank_lock); + if (atomic_read(&lcdc->blank_count) == 0) + goto blank_done; + if (atomic_dec_return(&lcdc->blank_count) == 0) { + HDMI_DBG("%s: disable clocks\n", __func__); + panel_ops->blank(panel_ops); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pclk); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->mdp_clk); + } +blank_done: + mutex_unlock(&lcdc->blank_lock); + HDMI_DBG("%s, blank_count=%d\n", __func__, + atomic_read(&lcdc->blank_count)); +#else + lcdc_disable_video(); +#endif + return 0; +} + +static int lcdc_suspend(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + //pr_info("%s: suspending\n", __func__); + HDMI_DBG("%s\n", __func__); + + if (panel_ops->uninit) + panel_ops->uninit(panel_ops); + lcdc_disable_video(); + + return 0; +} + +static int lcdc_resume(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + //pr_info("%s: resuming\n", __func__); + HDMI_DBG("%s\n", __func__); + + if (panel_ops->init) { + if (panel_ops->init(panel_ops) < 0) + printk(KERN_ERR "LCD init fail!\n"); + } + + return 0; +} + +static int +lcdc_adjust_timing(struct msm_panel_data *fb_panel, + struct msm_lcdc_timing *timing, u32 xres, u32 yres) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + unsigned int hsync_period; + unsigned int hsync_start_x; + unsigned int hsync_end_x; + unsigned int vsync_period; + unsigned int display_vstart; + unsigned int display_vend; + uint32_t dma_cfg; + + clk_set_rate(lcdc->pclk, timing->clk_rate); + clk_set_rate(lcdc->pad_pclk, timing->clk_rate); + HDMI_DBG("%s, clk=%d, xres=%d, yres=%d,\n", __func__, + clk_get_rate(lcdc->pclk), xres, yres); + + hsync_period = (timing->hsync_pulse_width + timing->hsync_back_porch + + xres + timing->hsync_front_porch); + hsync_start_x = (timing->hsync_pulse_width + timing->hsync_back_porch); + hsync_end_x = hsync_period - timing->hsync_front_porch - 1; + + vsync_period = (timing->vsync_pulse_width + timing->vsync_back_porch + + yres + timing->vsync_front_porch); + vsync_period *= hsync_period; + + display_vstart = timing->vsync_pulse_width + timing->vsync_back_porch; + display_vstart *= hsync_period; + display_vstart += timing->hsync_skew; + + display_vend = timing->vsync_front_porch * hsync_period; + display_vend = vsync_period - display_vend + timing->hsync_skew - 1; + + /* register values we pre-compute at init time from the timing + * information in the panel info */ + lcdc->parms.hsync_ctl = (((hsync_period & 0xfff) << 16) | + (timing->hsync_pulse_width & 0xfff)); + lcdc->parms.vsync_period = vsync_period & 0xffffff; + lcdc->parms.vsync_pulse_width = (timing->vsync_pulse_width * + hsync_period) & 0xffffff; + + lcdc->parms.display_hctl = (((hsync_end_x & 0xfff) << 16) | + (hsync_start_x & 0xfff)); + lcdc->parms.display_vstart = display_vstart & 0xffffff; + lcdc->parms.display_vend = display_vend & 0xffffff; + lcdc->parms.hsync_skew = timing->hsync_skew & 0xfff; + lcdc->parms.polarity = ((timing->hsync_act_low << 0) | + (timing->vsync_act_low << 1) | + (timing->den_act_low << 2)); + lcdc->parms.clk_rate = timing->clk_rate; + + mdp_writel(lcdc->mdp, lcdc->parms.hsync_ctl, MDP_LCDC_HSYNC_CTL); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_period, MDP_LCDC_VSYNC_PERIOD); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_pulse_width, + MDP_LCDC_VSYNC_PULSE_WIDTH); + mdp_writel(lcdc->mdp, lcdc->parms.display_hctl, MDP_LCDC_DISPLAY_HCTL); + mdp_writel(lcdc->mdp, lcdc->parms.display_vstart, + MDP_LCDC_DISPLAY_V_START); + mdp_writel(lcdc->mdp, lcdc->parms.display_vend, MDP_LCDC_DISPLAY_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.hsync_skew, MDP_LCDC_HSYNC_SKEW); + + mdp_writel(lcdc->mdp, 0, MDP_LCDC_BORDER_CLR); + mdp_writel(lcdc->mdp, 0x0, MDP_LCDC_UNDERFLOW_CTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_HCTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_START); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.polarity, MDP_LCDC_CTL_POLARITY); + printk("solomon: polarity=%04x\n", mdp_readl(lcdc->mdp, MDP_LCDC_CTL_POLARITY)); + + /* config the dma_p block that drives the lcdc data */ + mdp_writel(lcdc->mdp, lcdc->fb_start, MDP_DMA_P_IBUF_ADDR); + mdp_writel(lcdc->mdp, (((yres & 0x7ff) << 16) | + (xres & 0x7ff)), + MDP_DMA_P_SIZE); + /* TODO: pull in the bpp info from somewhere else? */ + mdp_writel(lcdc->mdp, xres * 2, + MDP_DMA_P_IBUF_Y_STRIDE); + mdp_writel(lcdc->mdp, 0, MDP_DMA_P_OUT_XY); + + dma_cfg = (DMA_PACK_ALIGN_LSB | + DMA_PACK_PATTERN_RGB | + DMA_DITHER_EN); + dma_cfg |= DMA_OUT_SEL_LCDC; + dma_cfg |= DMA_IBUF_FORMAT_RGB565; + dma_cfg |= DMA_DSTC0G_8BITS | DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS; + + mdp_writel(lcdc->mdp, dma_cfg, MDP_DMA_P_CONFIG); + return 0; +} + +static int lcdc_hw_init(struct mdp_lcdc_info *lcdc) +{ + struct msm_panel_data *fb_panel = &lcdc->fb_panel_data; + uint32_t dma_cfg; + unsigned int clk_id, clk_rate; + + clk_enable(lcdc->mdp_clk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); + + clk_set_rate(lcdc->pclk, lcdc->parms.clk_rate); + clk_set_rate(lcdc->pad_pclk, lcdc->parms.clk_rate); + printk(KERN_DEBUG "pclk = %ld, pad_pclk = %ld\n", + clk_get_rate(lcdc->pclk), + clk_get_rate(lcdc->pad_pclk)); + + /* write the lcdc params */ + mdp_writel(lcdc->mdp, lcdc->parms.hsync_ctl, MDP_LCDC_HSYNC_CTL); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_period, MDP_LCDC_VSYNC_PERIOD); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_pulse_width, + MDP_LCDC_VSYNC_PULSE_WIDTH); + mdp_writel(lcdc->mdp, lcdc->parms.display_hctl, MDP_LCDC_DISPLAY_HCTL); + mdp_writel(lcdc->mdp, lcdc->parms.display_vstart, + MDP_LCDC_DISPLAY_V_START); + mdp_writel(lcdc->mdp, lcdc->parms.display_vend, MDP_LCDC_DISPLAY_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.hsync_skew, MDP_LCDC_HSYNC_SKEW); + + mdp_writel(lcdc->mdp, 0, MDP_LCDC_BORDER_CLR); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_UNDERFLOW_CTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_HCTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_START); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.polarity, MDP_LCDC_CTL_POLARITY); + printk("solomon: polarity=%04x\n", mdp_readl(lcdc->mdp, MDP_LCDC_CTL_POLARITY)); + + /* config the dma_p block that drives the lcdc data */ + mdp_writel(lcdc->mdp, lcdc->fb_start, MDP_DMA_P_IBUF_ADDR); + mdp_writel(lcdc->mdp, (((fb_panel->fb_data->yres & 0x7ff) << 16) | + (fb_panel->fb_data->xres & 0x7ff)), + MDP_DMA_P_SIZE); + /* TODO: pull in the bpp info from somewhere else? */ + mdp_writel(lcdc->mdp, fb_panel->fb_data->xres * 2, + MDP_DMA_P_IBUF_Y_STRIDE); + mdp_writel(lcdc->mdp, 0, MDP_DMA_P_OUT_XY); + + dma_cfg = (DMA_PACK_ALIGN_LSB | + DMA_PACK_PATTERN_RGB | + DMA_DITHER_EN); + dma_cfg |= DMA_OUT_SEL_LCDC; + dma_cfg |= DMA_IBUF_FORMAT_RGB565; + dma_cfg |= DMA_DSTC0G_8BITS | DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS; + + mdp_writel(lcdc->mdp, dma_cfg, MDP_DMA_P_CONFIG); + + /* Send customized command to ARM9 for escalating DMA_P as tier-1 + * of AXI bus. + * Ref: SR#272509 + */ + clk_id = P_USB_PHY_CLK; + clk_rate = 0x1; + msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &clk_id, &clk_rate); + + return 0; +} + +static void lcdc_wait_vsync(struct msm_panel_data *panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(panel); + int ret; + + ret = wait_event_timeout(lcdc->vsync_waitq, lcdc->got_vsync, HZ / 2); + if (ret == 0) + pr_err("%s: timeout waiting for VSYNC\n", __func__); + lcdc->got_vsync = 0; +} + +static void lcdc_request_vsync(struct msm_panel_data *fb_panel, + struct msmfb_callback *vsync_cb) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + + /* the vsync callback will start the dma */ + vsync_cb->func(vsync_cb); + lcdc->got_vsync = 0; + mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, MDP_LCDC_FRAME_START, + &lcdc->frame_start_cb); + lcdc_wait_vsync(fb_panel); +} + +static void lcdc_clear_vsync(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + lcdc->got_vsync = 0; + mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, 0, NULL); +} + +/* called in irq context with mdp lock held, when mdp gets the + * MDP_LCDC_FRAME_START interrupt */ +static void lcdc_frame_start(struct msmfb_callback *cb) +{ + struct mdp_lcdc_info *lcdc; + + lcdc = container_of(cb, struct mdp_lcdc_info, frame_start_cb); + + lcdc->got_vsync = 1; + wake_up(&lcdc->vsync_waitq); +} + +static void lcdc_dma_start(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_lcdc_info *lcdc = priv; + struct mdp_info *mdp = lcdc->mdp; + +#if 0 + if (mdp->dma_config_dirty) { + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + mdelay(20); + mdp_dev->configure_dma(mdp_dev); + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + } +#endif + mdp_writel(lcdc->mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE); + mdp_writel(lcdc->mdp, addr, MDP_DMA_P_IBUF_ADDR); +} + +static void precompute_timing_parms(struct mdp_lcdc_info *lcdc) +{ + struct msm_lcdc_timing *timing = lcdc->pdata->timing; + struct msm_fb_data *fb_data = lcdc->pdata->fb_data; + unsigned int hsync_period; + unsigned int hsync_start_x; + unsigned int hsync_end_x; + unsigned int vsync_period; + unsigned int display_vstart; + unsigned int display_vend; + + hsync_period = (timing->hsync_pulse_width + timing->hsync_back_porch + + fb_data->xres + timing->hsync_front_porch); + hsync_start_x = (timing->hsync_pulse_width + timing->hsync_back_porch); + hsync_end_x = hsync_period - timing->hsync_front_porch - 1; + + vsync_period = (timing->vsync_pulse_width + timing->vsync_back_porch + + fb_data->yres + timing->vsync_front_porch); + vsync_period *= hsync_period; + + display_vstart = timing->vsync_pulse_width + timing->vsync_back_porch; + display_vstart *= hsync_period; + display_vstart += timing->hsync_skew; + + display_vend = timing->vsync_front_porch * hsync_period; + display_vend = vsync_period - display_vend + timing->hsync_skew - 1; + + /* register values we pre-compute at init time from the timing + * information in the panel info */ + lcdc->parms.hsync_ctl = (((hsync_period & 0xfff) << 16) | + (timing->hsync_pulse_width & 0xfff)); + lcdc->parms.vsync_period = vsync_period & 0xffffff; + lcdc->parms.vsync_pulse_width = (timing->vsync_pulse_width * + hsync_period) & 0xffffff; + + lcdc->parms.display_hctl = (((hsync_end_x & 0xfff) << 16) | + (hsync_start_x & 0xfff)); + lcdc->parms.display_vstart = display_vstart & 0xffffff; + lcdc->parms.display_vend = display_vend & 0xffffff; + lcdc->parms.hsync_skew = timing->hsync_skew & 0xfff; + lcdc->parms.polarity = ((timing->hsync_act_low << 0) | + (timing->vsync_act_low << 1) | + (timing->den_act_low << 2)); + lcdc->parms.clk_rate = timing->clk_rate; +} + +static int hdmi_lcdc_probe(struct platform_device *pdev) +{ + struct msm_lcdc_platform_data *pdata = pdev->dev.platform_data; + struct mdp_lcdc_info *lcdc; + int ret = 0; + + printk(KERN_DEBUG "%s\n", __func__); + + if (!pdata) { + pr_err("%s: no LCDC platform data found\n", __func__); + return -EINVAL; + } + + _lcdc = lcdc = kzalloc(sizeof(struct mdp_lcdc_info), GFP_KERNEL); + if (!lcdc) + return -ENOMEM; + + /* We don't actually own the clocks, the mdp does. */ + lcdc->mdp_clk = clk_get(mdp_dev->dev.parent, "mdp_clk"); + if (IS_ERR(lcdc->mdp_clk)) { + pr_err("%s: failed to get mdp_clk\n", __func__); + ret = PTR_ERR(lcdc->mdp_clk); + goto err_get_mdp_clk; + } + + lcdc->pclk = clk_get(mdp_dev->dev.parent, "lcdc_pclk_clk"); + if (IS_ERR(lcdc->pclk)) { + pr_err("%s: failed to get lcdc_pclk\n", __func__); + ret = PTR_ERR(lcdc->pclk); + goto err_get_pclk; + } + + lcdc->pad_pclk = clk_get(mdp_dev->dev.parent, "lcdc_pad_pclk_clk"); + if (IS_ERR(lcdc->pad_pclk)) { + pr_err("%s: failed to get lcdc_pad_pclk\n", __func__); + ret = PTR_ERR(lcdc->pad_pclk); + goto err_get_pad_pclk; + } + + init_waitqueue_head(&lcdc->vsync_waitq); + mutex_init(&lcdc->blank_lock); + lcdc->pdata = pdata; + lcdc->frame_start_cb.func = lcdc_frame_start; + + platform_set_drvdata(pdev, lcdc); + + mdp_out_if_register(mdp_dev, MSM_LCDC_INTERFACE, lcdc, MDP_DMA_P_DONE, + lcdc_dma_start); + + precompute_timing_parms(lcdc); + + lcdc->fb_start = pdata->fb_resource->start; + lcdc->mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + + lcdc->fb_panel_data.suspend = lcdc_suspend; + lcdc->fb_panel_data.resume = lcdc_resume; + lcdc->fb_panel_data.wait_vsync = lcdc_wait_vsync; + lcdc->fb_panel_data.request_vsync = lcdc_request_vsync; + lcdc->fb_panel_data.clear_vsync = lcdc_clear_vsync; + lcdc->fb_panel_data.blank = lcdc_blank; + lcdc->fb_panel_data.unblank = lcdc_unblank; + lcdc->fb_panel_data.adjust_timing = lcdc_adjust_timing; + lcdc->fb_panel_data.fb_data = pdata->fb_data; + lcdc->fb_panel_data.interface_type = MSM_LCDC_INTERFACE; + + ret = lcdc_hw_init(lcdc); + atomic_set(&lcdc->blank_count, 1); + if (ret) { + pr_err("%s: Cannot initialize the mdp_lcdc\n", __func__); + goto err_hw_init; + } + + lcdc->fb_pdev.name = "msm_hdmi"; + lcdc->fb_pdev.id = pdata->fb_id; + lcdc->fb_pdev.resource = pdata->fb_resource; + lcdc->fb_pdev.num_resources = 1; + lcdc->fb_pdev.dev.platform_data = &lcdc->fb_panel_data; + + ret = platform_device_register(&lcdc->fb_pdev); + if (ret) { + pr_err("%s: Cannot register msm_panel pdev\n", __func__); + goto err_plat_dev_reg; + } + + pr_info("%s: initialized\n", __func__); + + return 0; + +err_plat_dev_reg: +err_hw_init: + platform_set_drvdata(pdev, NULL); + clk_put(lcdc->pad_pclk); +err_get_pad_pclk: + clk_put(lcdc->pclk); +err_get_pclk: + clk_put(lcdc->mdp_clk); +err_get_mdp_clk: + kfree(lcdc); + return ret; +} + +static int hdmi_lcdc_remove(struct platform_device *pdev) +{ + struct mdp_lcdc_info *lcdc = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + clk_put(lcdc->pclk); + clk_put(lcdc->pad_pclk); + kfree(lcdc); + + return 0; +} + +static struct platform_driver mdp_lcdc_driver = { + .probe = hdmi_lcdc_probe, + .remove = hdmi_lcdc_remove, + .driver = { + .name = "msm_mdp_hdmi", + .owner = THIS_MODULE, + }, +}; + +static int mdp_lcdc_add_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (mdp_dev) + return 0; + mdp_dev = container_of(dev, struct mdp_device, dev); + return platform_driver_register(&mdp_lcdc_driver); +} + +static void mdp_lcdc_remove_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (dev != &mdp_dev->dev) + return; + platform_driver_unregister(&mdp_lcdc_driver); + mdp_dev = NULL; +} + +static struct class_interface mdp_lcdc_interface = { + .add_dev = &mdp_lcdc_add_mdp_device, + .remove_dev = &mdp_lcdc_remove_mdp_device, +}; + +static int __init mdp_lcdc_init(void) +{ + return register_mdp_client(&mdp_lcdc_interface); +} + +module_init(mdp_lcdc_init); diff --git a/drivers/video/msm/hdmi/include/edid.h b/drivers/video/msm/hdmi/include/edid.h new file mode 100644 index 0000000000000..4a6f9ac436c52 --- /dev/null +++ b/drivers/video/msm/hdmi/include/edid.h @@ -0,0 +1,85 @@ +#ifndef _EDID_H_ +#define _EDID_H_ + +#define MAX_VIDEO_MODES 32 +struct edid_info_struct { + bool is_valid; + struct mutex access_lock; + bool under_scan; + bool basic_audio; + bool ycbcr_4_4_4; + bool ycbcr_4_2_2; + bool hdmi_sink; +}; + +#define EDID_BLOCK_SIZE 128 +#define EDID_HDR_NO_OF_FF 0x06 + +#define EDID_BLOCK_0_OFFSET 0x00 +#define EDID_BLOCK_1_OFFSET 0x80 +#define EDID_BLOCK_SIZE 128 +#define EDID_HDR_NO_OF_FF 0x06 +#define NUM_OF_EXTEN_ADDR 0x7E + +#define EDID_TAG_ADDR 0x00 +#define EDID_REV_ADDR 0x01 +#define EDID_TAG_IDX 0x02 +#define LONG_DESCR_PTR_IDX 0x02 +#define MISC_SUPPORT_IDX 0x03 + +#define ESTABLISHED_TIMING_INDEX 35 +#define NUM_OF_STANDARD_TIMINGS 8 +#define STANDARD_TIMING_OFFSET 38 +#define LONG_DESCR_LENi 18 +#define NUM_OF_DETAILED_DESCRIPTORS 4 + +#define DETAILED_TIMING_OFFSET 0x36 + +/* Offsets within a Long Descriptors Block */ +#define PIX_CLK_OFFSET 0 +#define H_ACTIVE_OFFSET 2 +#define H_BLANKING_OFFSET 3 +#define V_ACTIVE_OFFSET 5 +#define V_BLANKING_OFFSET 6 +#define H_SYNC_OFFSET 8 +#define H_SYNC_PW_OFFSET 9 +#define V_SYNC_OFFSET 10 +#define V_SYNC_PW_OFFSET 10 +#define H_IMAGE_SIZE_OFFSET 12 +#define V_IMAGE_SIZE_OFFSET 13 +#define H_BORDER_OFFSET 15 +#define V_BORDER_OFFSET 16 +#define FLAGS_OFFSET 17 + +#define AR16_10 0 +#define AR4_3 1 +#define AR5_4 2 +#define AR16_9 3 + +#define EDID_EXTENSION_TAG 0x02 +#define EDID_REV_THREE 0x03 +#define EDID_DATA_START 0x04 + +#define EDID_BLOCK_0 0x00 +#define EDID_BLOCK_2_3 0x01 + +#define AUDIO_DESCR_SIZE 3 + +/* Data Block Tag Codes */ +#define AUDIO_D_BLOCK 0x01 +#define VIDEO_D_BLOCK 0x02 +#define VENDOR_SPEC_D_BLOCK 0x03 +#define SPKR_ALLOC_D_BLOCK 0x04 +#define USE_EXTENDED_TAG 0x07 + +/* Extended Data Block Tag Codes */ +#define COLORIMETRY_D_BLOCK 0x05 + +#define VIDEO_CAPABILITY_D_BLOCK 0x00 +#define HDMI_SIGNATURE_LEN 0x03 + +#define CEC_PHYS_ADDR_LEN 0x02 + +#define EDID_BIT(b) (1 << b) + +#endif diff --git a/drivers/video/msm/hdmi/include/fb-hdmi.h b/drivers/video/msm/hdmi/include/fb-hdmi.h new file mode 100644 index 0000000000000..3252ec125f337 --- /dev/null +++ b/drivers/video/msm/hdmi/include/fb-hdmi.h @@ -0,0 +1,27 @@ +#ifndef _FB_HDMI_H_ +#define _FB_HDMI_H_ + +#include +#include + +enum hd_res { + hd_720p = 0, /* 1280 * 720 */ + svga, /* 800 * 600 */ + pal, /* 720 * 576 */ + edtv, /* 720 * 480 */ + vga, /* 640 * 480 */ +}; + +struct msm_lcdc_timing; +struct hdmi_device { + struct device dev; + int (*check_res)(struct hdmi_device *, struct fb_var_screeninfo *); + struct msm_lcdc_timing *(*set_res)(struct hdmi_device *, + struct fb_var_screeninfo *); + int (*get_cable_state)(struct hdmi_device *, int *); + int (*get_establish_timing)(struct hdmi_device *, u8 *); +}; + +struct class_interface; +int register_hdmi_client(struct class_interface *class_intf); +#endif diff --git a/drivers/video/msm/hdmi/include/sil902x.h b/drivers/video/msm/hdmi/include/sil902x.h new file mode 100644 index 0000000000000..17ab291fcfc20 --- /dev/null +++ b/drivers/video/msm/hdmi/include/sil902x.h @@ -0,0 +1,401 @@ +#ifndef __SIL902X_H_ +#define __SIL902X_H_ + +#include +#include +#include "edid.h" + +struct hdmi_info { + struct hdmi_device hdmi_dev; + struct i2c_client *client; + struct msm_lcdc_panel_ops hdmi_lcdc_ops; + struct work_struct work; + struct delayed_work hdmi_delay_work; + struct mutex lock; + struct mutex lock2; + struct clk *ebi1_clk; + int (*power)(int on); + void (*hdmi_gpio_on)(void); + void (*hdmi_gpio_off)(void); + enum hd_res res; + // FIXME: move to edid_info_struct + u8 edid_buf[128 * 4]; + enum { + SLEEP, + AWAKE, + } sleeping; + bool polling; + bool cable_connected; + bool isr_enabled; + bool first; + struct completion hotplug_completion; + struct timer_list timer; + struct work_struct polling_work; + struct dentry *debug_dir; + struct edid_info_struct edid_info; + struct mutex polling_lock; + bool suspending; + bool user_playing; + bool video_streaming; +}; + +enum { + HDMI_PIXEL_DATA = 0x08, + HDMI_AVI_INFO_FRAME = 0x0c, + HDMI_AUDIO_INFO_FRAME = 0xbf, + HDMI_SYS_CTL = 0x1a, + HDMI_POWER = 0x1e, + HDMI_IDENTIFY = 0x1b, + HDMI_INT_EN = 0x3c, + HDMI_INT_STAT = 0x3d, + HDMI_EN_REG = 0xc7, +}; + +/* flag bitmap for register HDMI_INT_STAT */ +enum { + HOT_PLUG_PENDING = (1U << 0), + RX_PENDING = (1U << 1), + HOT_PLUG_STATE = (1U << 2), + RX_STATE = (1U << 3), + AUDIO_ERR = (1U << 4), + SECURITY_STATE = (1U << 5), + HDCP_VALUE = (1U << 6), + HDCP_AUTH = (1U << 7), +}; + +enum ErrorMessages { + INIT_SYSTEM_SUCCESSFUL, // 0 + + BLACK_BOX_OPEN_FAILURE, + BLACK_BOX_OPENED_SUCCESSFULLY, + HW_RESET_FAILURE, + TPI_ENABLE_FAILURE, + INTERRUPT_EN_FAILURE, + INTERRUPT_POLLING_FAILED, + + NO_SINK_CONNECTED, + DDC_BUS_REQ_FAILURE, + HDCP_FAILURE, + HDCP_OK, // 10 + RX_AUTHENTICATED, + SINK_DOES_NOT_SUPPORT_HDCP, + TX_DOES_NOT_SUPPORT_HDCP, + + ILLEGAL_AKSV, + SET_PROTECTION_FAILURE, + REVOKED_KEYS_FOUND, + REPEATER_AUTHENTICATED, + INT_STATUS_READ_FAILURE, + + PROTECTION_OFF_FAILED, + PROTECTION_ON_FAILED, // 20 + INTERRUPT_POLLING_OK, + + EDID_PARSING_FAILURE, + VIDEO_SETUP_FAILURE, + TPI_READ_FAILURE, + TPI_WRITE_FAILURE, + + INIT_VIDEO_FAILURE, + DE_CANNOT_BE_SET_WITH_EMBEDDED_SYNC, + SET_EMBEDDED_SYC_FAILURE, + V_MODE_NOT_SUPPORTED, + + AUD_MODE_NOT_SUPPORTED, // 30 + I2S_NOT_SET, + + EDID_READ_FAILURE, + EDID_CHECKSUM_ERROR, + INCORRECT_EDID_HEADER, + EDID_EXT_TAG_ERROR, + + EDID_REV_ADDR_ERROR, + EDID_V_DESCR_OVERFLOW, + INCORRECT_EDID_FILE, + UNKNOWN_EDID_TAG_CODE, + NO_DETAILED_DESCRIPTORS_IN_THIS_EDID_BLOCK, // 40 + CONFIG_DATA_VALID, + CONFIG_DATA_INVALID, + + GPIO_ACCESS_FAILED, + GPIO_CONFIG_ERROR, + + HP_EVENT_GOING_TO_SERVICE_LOOP, + EDID_PARSED_OK, + VIDEO_MODE_SET_OK, + AUDIO_MODE_SET_OK, + + I2S_MAPPING_SUCCESSFUL, + I2S_INPUT_CONFIG_SUCCESSFUL, // 50 + I2S_HEADER_SET_SUCCESSFUL, + INTERRUPT_POLLING_SUCCESSFUL, + + HPD_LOOP_EXITED_SUCCESSFULY, + HPD_LOOP_FAILED, + SINK_CONNECTED, + HP_EVENT_RETURNING_FROM_SERVICE_LOOP, + AVI_INFOFRAMES_SETTING_FAILED, + + TMDS_ENABLING_FAILED, + DE_SET_OK, + DE_SET_FAILED, + NO_861_EXTENSIONS, + + GO_OK, + IMAGE_PKTS_UPDATED_OK, + MONITORING_BLOCKED, + LINK_NORMAL, + LINK_LOST, + RENEGOTIATION_REQUIRED, + + LINK_SUSPENDED, + EDID_SHORT_DESCRIPTORS_PARSED_OK, + EDID_LONG_DESCRIPTORS_PARSED_OK, + DDC_BUS_RELEASE_FAILURE, + FAILED_GETTING_BKSV, + + PLL_SETUP_FAILUE, + ERR_RX_QUEUE_FULL, + ERR_TX_QUEUE_FULL, + GBD_SET_SUCCESSFULLY, + BACKDOOR_SETTING_FAILED, + ERR_TX_QUEUE_EMPTY +}; + +#define BIT_0 0x01 +#define BIT_1 0x02 +#define BIT_2 0x04 +#define BIT_3 0x08 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 + +#define INTERVAL_HDCP_POLLING (HZ / 25) + +#define REQUEST_RELEASE_DDC_BEFORE_HDCP +#define T_HDCP_ACTIVATION 500 +#define T_HDCP_DEACTIVATION 200 + +#define T_HPD_DELAY 10 +#define TPI_INTERRUPT_EN 0x3c +#define ALL 0xff +#define DUMMY 0xFD + +#define SiI_DEVICE_ID 0xB0 + +#define T_DDC_ACCESS 50 +// TPI Control Masks +// ================= +#define BIT_OUTPUT_MODE 0x01 +#define BIT_DDC_BUS_GRANT 0x02 +#define BIT_DDC_BUS_REQ 0x04 +#define BIT_TMDS_OUTPUT 0x10 + +#define TPI_INTERNAL_PAGE_REG 0xBC +#define TPI_REGISTER_OFFSET_REG 0xBD +#define TPI_REGISTER_VALUE_REG 0xBE + +/* HDCP Control Masks */ +#define BIT_PROTECT_LEVEL 0x01 +#define BIT_PROTECT_TYPE 0x02 +#define BIT_REPEATER 0x08 +#define BIT_LOCAL_PROTECT 0x40 +#define BIT_EXT_PROTECT 0x80 + +#define BITS_LINK_LOST 0x10 +#define BITS_RENEGOTIATION 0x20 + +#define BIT_TMDS_OUTPUT 0x10 + +#define BIT_AUDIO_MUTE 0x10 + +#define TPI_HDCP_REVISION_DATA_REG (0x30) +#define HDCP_MAJOR_REVISION_MASK (BIT_7 | BIT_6 | BIT_5 | BIT_4) +#define HDCP_MAJOR_REVISION_VALUE (0x10) + +#define HDCP_MINOR_REVISION_MASK (BIT_3 | BIT_2 | BIT_1 | BIT_0) +#define HDCP_MINOR_REVISION_VALUE (0x02) + +#define HDCP_REVISION 0x12 +#define SET_PROT_ATTEMPTS 0x05 + +#define AKSV_SIZE 5 +#define BYTE_SIZE 8 +#define NUM_OF_ONES_IN_KSV 20 + +// Interrupt Masks +//================ +#define HOT_PLUG_EVENT 0x01 +#define RX_SENSE_EVENT 0x02 +#define TPI_HOT_PLUG_STATE 0x04 +#define RX_SENSE_STATE 0x08 + +#define AUDIO_ERROR_EVENT 0x10 +#define SECURITY_CHANGE_EVENT 0x20 +#define V_READY_EVENT 0x40 +#define HDCP_CHANGE_EVENT 0x80 + +#define NON_MASKABLE_INT 0xFF + +/* Protection Levels */ +#define NO_PROTECTION 0x00 +#define LOCAL_PROTECTION 0x01 +#define EXTENDED_PROTECTION 0x03 + +#define LINK_NORMAL 0 +#define MAX_V_DESCRIPTORS 20 +#define MAX_A_DESCRIPTORS 10 +#define MAX_SPEAKER_CONFIGURATIONS 4 + +#define HDMI_DEBUGFS_ROOT "hdmi" + +/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// +/*\ +| | HDCP Implementation +| | +| | HDCP link security logic is implemented in certain transmitters; unique +| | keys are embedded in each chip as part of the solution. The security +| | scheme is fully automatic and handled completely by the hardware. +\*/ +/// HDCP Query Data Register ============================================== /// + +#define TPI_HDCP_QUERY_DATA_REG (0x29) + +#define EXTENDED_LINK_PROTECTION_MASK (BIT_7) +#define EXTENDED_LINK_PROTECTION_NONE (0x00) +#define EXTENDED_LINK_PROTECTION_SECURE (0x80) + +#define LOCAL_LINK_PROTECTION_MASK (BIT_6) +#define LOCAL_LINK_PROTECTION_NONE (0x00) +#define LOCAL_LINK_PROTECTION_SECURE (0x40) + +#define LINK_STATUS_MASK (BIT_5 | BIT_4) +#define LINK_STATUS_NORMAL (0x00) +#define LINK_STATUS_LINK_LOST (0x10) +#define LINK_STATUS_RENEGOTIATION_REQ (0x20) +#define LINK_STATUS_LINK_SUSPENDED (0x30) + +#define HDCP_REPEATER_MASK (BIT_3) +#define HDCP_REPEATER_NO (0x00) +#define HDCP_REPEATER_YES (0x08) + +#define CONNECTOR_TYPE_MASK (BIT_2 | BIT_0) +#define CONNECTOR_TYPE_DVI (0x00) +#define CONNECTOR_TYPE_RSVD (0x01) +#define CONNECTOR_TYPE_HDMI (0x04) +#define CONNECTOR_TYPE_FUTURE (0x05) + +#define PROTECTION_TYPE_MASK (BIT_1) +#define PROTECTION_TYPE_NONE (0x00) +#define PROTECTION_TYPE_HDCP (0x02) + +/// HDCP Control Data Register ============================================ /// + +#define TPI_HDCP_CONTROL_DATA_REG (0x2A) + +#define PROTECTION_LEVEL_MASK (BIT_0) +#define PROTECTION_LEVEL_MIN (0x00) +#define PROTECTION_LEVEL_MAX (0x01) + +/*---------------------------------------------------------------------------*/ + +#if 0 +/* Caller: ChangeVideoMode(), HDCP_Poll(), HotPlugServiceLoop(), RestartHDCP() + */ +#define EnableTMDS(hdmi) ReadClearWriteTPI(hdmi, TPI_SYSTEM_CONTROL, BIT_TMDS_OUTPUT) // 0x1A[4] = 0 + +/* Caller: ChangeVideoMode(), HDCP_Poll(), TPI_Poll(), RestartHDCP(), + * OnHdmiCableDisconnected() + */ +#define DisableTMDS(hdmi) ReadSetWriteTPI(hdmi, TPI_SYSTEM_CONTROL, BIT_TMDS_OUTPUT) // 0x1A[4] = 1 +#else + +void EnableTMDS(struct hdmi_info *hdmi); +void DisableTMDS(struct hdmi_info *hdmi); + +#endif + +// FIXME: fix the global variables +extern u8 pvid_mode, vid_mode; +extern u8 LinkProtectionLevel; +extern u8 systemInitialized; +/*---------------------------------------------------------------------------*/ +int hdmi_read(struct i2c_client *client, u8 cmd); +int hdmi_write_byte(struct i2c_client *client, u8 reg, u8 val); +int hdmi_enable_int(struct i2c_client *client); +int hdmi_disable_int(struct i2c_client *client); +int hdmi_read_edid(struct hdmi_info *info, struct i2c_client *client); +int hdmi_standby(struct hdmi_info *hdmi); +int hdmi_wakeup(struct hdmi_info *hdmi); +int read_backdoor_register(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset); +void ReadSetWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Pattern); +void ReadModifyWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Mask, u8 Value); +void tpi_clear_interrupt(struct hdmi_info *hdmi, u8 pattern); +bool tpi_init(struct hdmi_info *hdmi); +void ReadClearWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Pattern); +s32 ReadBlockTPI(struct hdmi_info *hdmi, u8 TPI_Offset, u16 NBytes, u8 *pData); +bool IsRepeater(struct hdmi_info *hdmi); +bool GetDDC_Access(struct hdmi_info *hdmi, u8* SysCtrlRegVal); +bool ReleaseDDC(struct hdmi_info *hdmi, u8 SysCtrlRegVal); +int tpi_read_backdoor_register(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset) +; +void tpi_write_backdoor_register(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset, u8 RegValue); + +int HotPlugServiceLoop(struct hdmi_info *hdmi); + +int hdmi_active9022(struct i2c_client *client); +int hdmi_active9022_dup(struct i2c_client *client); +bool avc_send_avi_info_frames(struct hdmi_info *hdmi); +bool avc_init_video(struct hdmi_info *hdmi, u8 mode, u8 TclkSel, bool Init); +//void hdcp_on(struct hdmi_info *hdmi); +void hdcp_off(struct hdmi_info *hdmi); +void hdcp_check_status(struct hdmi_info *hdmi, u8 InterruptStatusImage); +void hdcp_init(struct hdmi_info *hdmi); +int hdcp_debugfs_init(struct hdmi_info *hdmi); + +extern u8 EDID_TempData[]; +u8 edid_simple_parsing(struct hdmi_info *hdmi); + +int edid_dump_hex(u8 *src, int src_size, char *output, int output_size); +bool edid_is_video_mode_supported(struct video_mode *vmode); +int edid_debugfs_init(struct hdmi_info *hdmi); +bool edid_check_sink_type(struct hdmi_info *hdmi); +bool edid_check_audio_support(struct hdmi_info *hdmi); +int HotPlugServiceLoop(struct hdmi_info *hdmi); +int tpi_prepare(struct hdmi_info *hdmi); + +//bool InitVideo(struct hdmi_info *hdmi, u8 Mode, u8 TclkSel, bool Init); +void avc_set_basic_audio(struct hdmi_info *hdmi); + +int hdmi_debugfs_init(struct hdmi_info *hdmi); +int tpi_debugfs_init(struct hdmi_info *hdmi); +ssize_t hdmi_dbgfs_open(struct inode *inode, struct file *file); + +void SetAudioMute(struct hdmi_info *hdmi, u8 audioMute); +void SetInputColorSpace(struct hdmi_info *hdmi, u8 inputColorSpace); + +void WriteBackDoorRegister(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset, u8 RegValue); + +#define TPI_INPUT_FORMAT 0x09 +#define TPI_OUTPUT_FORMAT 0x0A + +#define TPI_SYSTEM_CONTROL 0x1A + +#define TPI_DEVICE_POWER_STATE_CTRL_REG (0x1E) + +#define CTRL_PIN_CONTROL_MASK (BIT_4) +#define CTRL_PIN_TRISTATE (0x00) +#define CTRL_PIN_DRIVEN_TX_BRIDGE (0x10) + +#define TX_POWER_STATE_D0 (0x00) +#define TX_POWER_STATE_D1 (0x01) +#define TX_POWER_STATE_D2 (0x02) +#define TX_POWER_STATE_D3 (0x03) + +#define TPI_AUDIO_INTERFACE 0x26 + +#define TPI_HDCP_QUERY_DATA 0x29 +#define TPI_HDCP_CTRL 0x2A + +#endif diff --git a/drivers/video/msm/hdmi/include/tpi.h b/drivers/video/msm/hdmi/include/tpi.h new file mode 100644 index 0000000000000..db0271fd5ed1b --- /dev/null +++ b/drivers/video/msm/hdmi/include/tpi.h @@ -0,0 +1,411 @@ +#ifndef _TPI_H_ +#define _TPI_H_ + +#define TPI_PIX_CLK_LSB (0x00) +#define TPI_PIX_CLK_MSB (0x01) + +#define TPI_VERT_FREQ_LSB (0x02) +#define TPI_VERT_FREQ_MSB (0x03) + +#define TPI_TOTAL_PIX_LSB (0x04) +#define TPI_TOTAL_PIX_MSB (0x05) + +#define TPI_TOTAL_LINES_LSB (0x06) +#define TPI_TOTAL_LINES_MSB (0x07) + +// Pixel Repetition Data +//====================== + +#define TPI_PIX_REPETITION (0x08) + +// TPI AVI Input and Output Format Data +//===================================== + +/// AVI Input Format Data ================================================= /// + +#define TPI_INPUT_FORMAT_REG (0x09) + +//Finish this... + +#define INPUT_COLOR_SPACE_MASK (BIT_1 | BIT_0) +#define INPUT_COLOR_SPACE_RGB (0x00) +#define INPUT_COLOR_SPACE_YCBCR444 (0x01) +#define INPUT_COLOR_SPACE_YCBCR422 (0x02) +#define INPUT_COLOR_SPACE_BLACK_MODE (0x03) + +/// AVI Output Format Data ================================================ /// + +#define TPI_OUTPUT_FORMAT_REG (0x0A) + +#define TPI_YC_Input_Mode (0x0B) + +// TPI AVI InfoFrame Data +//======================= + +#define TPI_AVI_BYTE_0 (0x0C) +#define TPI_AVI_BYTE_1 (0x0D) +#define TPI_AVI_BYTE_2 (0x0E) +#define TPI_AVI_BYTE_3 (0x0F) +#define TPI_AVI_BYTE_4 (0x10) +#define TPI_AVI_BYTE_5 (0x11) + +#define TPI_AUDIO_BYTE_0 (0xBF) + +#define TPI_INFO_FRM_DBYTE5 0xC8 +#define TPI_INFO_FRM_DBYTE6 0xC9 + +#define TPI_END_TOP_BAR_LSB (0x12) +#define TPI_END_TOP_BAR_MSB (0x13) + +#define TPI_START_BTM_BAR_LSB (0x14) +#define TPI_START_BTM_BAR_MSB (0x15) + +#define TPI_END_LEFT_BAR_LSB (0x16) +#define TPI_END_LEFT_BAR_MSB (0x17) + +#define TPI_END_RIGHT_BAR_LSB (0x18) +#define TPI_END_RIGHT_BAR_MSB (0x19) + +// Colorimetry +//============ +#define SET_EX_COLORIMETRY 0x0C // Set TPI_AVI_BYTE_2 to extended colorimetry and use + //TPI_AVI_BYTE_3 + +// ===================================================== // + +#define TPI_SYSTEM_CONTROL_DATA_REG (0x1A) + +#define LINK_INTEGRITY_MODE_MASK (BIT_6) +#define LINK_INTEGRITY_STATIC (0x00) +#define LINK_INTEGRITY_DYNAMIC (0x40) + +#define TMDS_OUTPUT_CONTROL_MASK (BIT_4) +#define TMDS_OUTPUT_CONTROL_ACTIVE (0x00) +#define TMDS_OUTPUT_CONTROL_POWER_DOWN (0x10) + +#define AV_MUTE_MASK (BIT_3) +#define AV_MUTE_NORMAL (0x00) +#define AV_MUTE_MUTED (0x08) + +#define DDC_BUS_REQUEST_MASK (BIT_2) +#define DDC_BUS_REQUEST_NOT_USING (0x00) +#define DDC_BUS_REQUEST_REQUESTED (0x04) + +#define DDC_BUS_GRANT_MASK (BIT_1) +#define DDC_BUS_GRANT_NOT_AVAILABLE (0x00) +#define DDC_BUS_GRANT_GRANTED (0x02) + +#define OUTPUT_MODE_MASK (BIT_0) +#define OUTPUT_MODE_DVI (0x00) +#define OUTPUT_MODE_HDMI (0x01) + + +// TPI Identification Registers +//============================= + +#define TPI_DEVICE_ID (0x1B) +#define TPI_DEVICE_REV_ID (0x1C) + +#define TPI_RESERVED2 (0x1D) + +// ===================================================== // + +#define TPI_DEVICE_POWER_STATE_CTRL_REG (0x1E) + +#define CTRL_PIN_CONTROL_MASK (BIT_4) +#define CTRL_PIN_TRISTATE (0x00) +#define CTRL_PIN_DRIVEN_TX_BRIDGE (0x10) + +#define TX_POWER_STATE_MASK (BIT_1 | BIT_0) +#define TX_POWER_STATE_D0 (0x00) +#define TX_POWER_STATE_D1 (0x01) +#define TX_POWER_STATE_D2 (0x02) +#define TX_POWER_STATE_D3 (0x03) + +// Configuration of I2S Interface +//=============================== + +#define TPI_I2S_EN (0x1F) +#define TPI_I2S_IN_CFG (0x20) + +// Available only when TPI 0x26[7:6]=10 to select I2S input +//========================================================= +#define TPI_I2S_CHST_0 (0x21) +#define TPI_I2S_CHST_1 (0x22) +#define TPI_I2S_CHST_2 (0x23) +#define TPI_I2S_CHST_3 (0x24) +#define TPI_I2S_CHST_4 (0x25) + + +// Available only when 0x26[7:6]=01 +//================================= +#define TPI_SPDIF_HEADER (0x24) +#define TPI_AUDIO_HANDLING (0x25) + + +// Audio Configuration Regiaters +//============================== +#define TPI_AUDIO_INTERFACE_REG (0x26) + +// Finish this... + +#define AUDIO_MUTE_MASK (BIT_4) +#define AUDIO_MUTE_NORMAL (0x00) +#define AUDIO_MUTE_MUTED (0x10) + + + + + + +#define TPI_AUDIO_SAMPLE_CTRL (0x27) + +#define TPI_SPEAKER_CFG (0xC7) +#define TPI_CHANNEL_COUNT (0xC4) + +/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// + +/*\ +| | HDCP Implementation +| | +| | HDCP link security logic is implemented in certain transmitters; unique +| | keys are embedded in each chip as part of the solution. The security +| | scheme is fully automatic and handled completely by the hardware. +\*/ + +/// HDCP Query Data Register ============================================== /// + +#define TPI_HDCP_QUERY_DATA_REG (0x29) + +#define EXTENDED_LINK_PROTECTION_MASK (BIT_7) +#define EXTENDED_LINK_PROTECTION_NONE (0x00) +#define EXTENDED_LINK_PROTECTION_SECURE (0x80) + +#define LOCAL_LINK_PROTECTION_MASK (BIT_6) +#define LOCAL_LINK_PROTECTION_NONE (0x00) +#define LOCAL_LINK_PROTECTION_SECURE (0x40) + +#define LINK_STATUS_MASK (BIT_5 | BIT_4) +#define LINK_STATUS_NORMAL (0x00) +#define LINK_STATUS_LINK_LOST (0x10) +#define LINK_STATUS_RENEGOTIATION_REQ (0x20) +#define LINK_STATUS_LINK_SUSPENDED (0x30) + +#define HDCP_REPEATER_MASK (BIT_3) +#define HDCP_REPEATER_NO (0x00) +#define HDCP_REPEATER_YES (0x08) + +#define CONNECTOR_TYPE_MASK (BIT_2 | BIT_0) +#define CONNECTOR_TYPE_DVI (0x00) +#define CONNECTOR_TYPE_RSVD (0x01) +#define CONNECTOR_TYPE_HDMI (0x04) +#define CONNECTOR_TYPE_FUTURE (0x05) + +#define PROTECTION_TYPE_MASK (BIT_1) +#define PROTECTION_TYPE_NONE (0x00) +#define PROTECTION_TYPE_HDCP (0x02) + +/// HDCP Control Data Register ============================================ /// + +#define TPI_HDCP_CONTROL_DATA_REG (0x2A) + +#define PROTECTION_LEVEL_MASK (BIT_0) +#define PROTECTION_LEVEL_MIN (0x00) +#define PROTECTION_LEVEL_MAX (0x01) + +/// HDCP BKSV Registers =================================================== /// + +#define TPI_BKSV_1_REG (0x2B) +#define TPI_BKSV_2_REG (0x2C) +#define TPI_BKSV_3_REG (0x2D) +#define TPI_BKSV_4_REG (0x2E) +#define TPI_BKSV_5_REG (0x2F) + +/// HDCP Revision Data Register =========================================== /// + +#define TPI_HDCP_REVISION_DATA_REG (0x30) + +#define HDCP_MAJOR_REVISION_MASK (BIT_7 | BIT_6 | BIT_5 | BIT_4) +#define HDCP_MAJOR_REVISION_VALUE (0x10) + +#define HDCP_MINOR_REVISION_MASK (BIT_3 | BIT_2 | BIT_1 | BIT_0) +#define HDCP_MINOR_REVISION_VALUE (0x02) + +/// HDCP KSV and V' Value Data Register =================================== /// + +#define TPI_V_PRIME_SELECTOR_REG (0x31) + +/// V' Value Readback Registers =========================================== /// + +#define TPI_V_PRIME_7_0_REG (0x32) +#define TPI_V_PRIME_15_9_REG (0x33) +#define TPI_V_PRIME_23_16_REG (0x34) +#define TPI_V_PRIME_31_24_REG (0x35) + +/// HDCP AKSV Registers =================================================== /// + +#define TPI_AKSV_1_REG (0x36) +#define TPI_AKSV_2_REG (0x37) +#define TPI_AKSV_3_REG (0x38) +#define TPI_AKSV_4_REG (0x39) +#define TPI_AKSV_5_REG (0x3A) + +/*\ +| | Interrupt Service +| | +| | TPI can be configured to generate an interrupt to the host to notify it of +| | various events. The host can either poll for activity or use an interrupt +| | handler routine. TPI generates on a single interrupt (INT) to the host. +\*/ + +/// Interrupt Enable Register ============================================= /// + +#define TPI_INTERRUPT_ENABLE_REG (0x3C) + +#define HDCP_AUTH_STATUS_CHANGE_EN_MASK (BIT_7) +#define HDCP_AUTH_STATUS_CHANGE_DISABLE (0x00) +#define HDCP_AUTH_STATUS_CHANGE_ENABLE (0x80) + +#define HDCP_VPRIME_VALUE_READY_EN_MASK (BIT_6) +#define HDCP_VPRIME_VALUE_READY_DISABLE (0x00) +#define HDCP_VPRIME_VALUE_READY_ENABLE (0x40) + +#define HDCP_SECURITY_CHANGE_EN_MASK (BIT_5) +#define HDCP_SECURITY_CHANGE_DISABLE (0x00) +#define HDCP_SECURITY_CHANGE_ENABLE (0x20) + +#define AUDIO_ERROR_EVENT_EN_MASK (BIT_4) +#define AUDIO_ERROR_EVENT_DISABLE (0x00) +#define AUDIO_ERROR_EVENT_ENABLE (0x10) + +#define CPI_EVENT_NO_RX_SENSE_MASK (BIT_3) +#define CPI_EVENT_NO_RX_SENSE_DISABLE (0x00) +#define CPI_EVENT_NO_RX_SENSE_ENABLE (0x08) + +#define RECEIVER_SENSE_EVENT_EN_MASK (BIT_1) +#define RECEIVER_SENSE_EVENT_DISABLE (0x00) +#define RECEIVER_SENSE_EVENT_ENABLE (0x02) + +#define HOT_PLUG_EVENT_EN_MASK (BIT_0) +#define HOT_PLUG_EVENT_DISABLE (0x00) +#define HOT_PLUG_EVENT_ENABLE (0x01) + +/// Interrupt Status Register ============================================= /// + +#define TPI_INTERRUPT_STATUS_REG (0x3D) + +#define HDCP_AUTH_STATUS_CHANGE_EVENT_MASK (BIT_7) +#define HDCP_AUTH_STATUS_CHANGE_EVENT_NO (0x00) +#define HDCP_AUTH_STATUS_CHANGE_EVENT_YES (0x80) + +#define HDCP_VPRIME_VALUE_READY_EVENT_MASK (BIT_6) +#define HDCP_VPRIME_VALUE_READY_EVENT_NO (0x00) +#define HDCP_VPRIME_VALUE_READY_EVENT_YES (0x40) + +#define HDCP_SECURITY_CHANGE_EVENT_MASK (BIT_5) +#define HDCP_SECURITY_CHANGE_EVENT_NO (0x00) +#define HDCP_SECURITY_CHANGE_EVENT_YES (0x20) + +#define AUDIO_ERROR_EVENT_MASK (BIT_4) +#define AUDIO_ERROR_EVENT_NO (0x00) +#define AUDIO_ERROR_EVENT_YES (0x10) + +#define CPI_EVENT_MASK (BIT_3) +#define CPI_EVENT_NO (0x00) +#define CPI_EVENT_YES (0x08) +#define RX_SENSE_MASK (BIT_3) // This bit is dual purpose depending on the value of 0x3C[3] +#define RX_SENSE_NOT_ATTACHED (0x00) +#define RX_SENSE_ATTACHED (0x08) + +#define HOT_PLUG_PIN_STATE_MASK (BIT_2) +#define HOT_PLUG_PIN_STATE_LOW (0x00) +#define HOT_PLUG_PIN_STATE_HIGH (0x04) + +#define RECEIVER_SENSE_EVENT_MASK (BIT_1) +#define RECEIVER_SENSE_EVENT_NO (0x00) +#define RECEIVER_SENSE_EVENT_YES (0x02) + +#define HOT_PLUG_EVENT_MASK (BIT_0) +#define HOT_PLUG_EVENT_NO (0x00) +#define HOT_PLUG_EVENT_YES (0x01) + +/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /// + +// Sync Register Configuration and Sync Monitoring Registers +//========================================================== + +#define TPI_SYNC_GEN_CTRL (0x60) +#define TPI_SYNC_POLAR_DETECT (0x61) + +// Explicit Sync DE Generator Registers (TPI 0x60[7]=0) +//===================================================== + +#define TPI_DE_DLY (0x62) +#define TPI_DE_CTRL (0x63) +#define TPI_DE_TOP (0x64) + +#define TPI_RESERVED4 (0x65) + +#define TPI_DE_CNT_7_0 (0x66) +#define TPI_DE_CNT_11_8 (0x67) + +#define TPI_DE_LIN_7_0 (0x68) +#define TPI_DE_LIN_10_8 (0x69) + +#define TPI_DE_H_RES_7_0 (0x6A) +#define TPI_DE_H_RES_10_8 (0x6B) + +#define TPI_DE_V_RES_7_0 (0x6C) +#define TPI_DE_V_RES_10_8 (0x6D) + +// Embedded Sync Register Set (TPI 0x60[7]=1) +//=========================================== + +#define TPI_HBIT_TO_HSYNC_7_0 (0x62) +#define TPI_HBIT_TO_HSYNC_9_8 (0x63) +#define TPI_FIELD_2_OFFSET_7_0 (0x64) +#define TPI_FIELD_2_OFFSET_11_8 (0x65) +#define TPI_HWIDTH_7_0 (0x66) +#define TPI_HWIDTH_8_9 (0x67) +#define TPI_VBIT_TO_VSYNC (0x68) +#define TPI_VWIDTH (0x69) + +// TPI Enable Register +//==================== + +#define TPI_ENABLE (0xC7) + +// Misc InfoFrames +//================ +#define MISC_INFO_FRAMES_CTRL (0xBF) +#define MISC_INFO_FRAMES_TYPE (0xC0) +#define MISC_INFO_FRAMES_VER (0xC1) +#define MISC_INFO_FRAMES_LEN (0xC2) +#define MISC_INFO_FRAMES_CHKSUM (0xC3) + +// Backdoor Register Offsets +//========================== +#define INTERNAL_PAGE_0 0x00 +#define INTERNAL_PAGE_1 0x01 +#define DEVICE_ID_LOW_BYTE 0x02 +#define DEVICE_ID_HI_BYTE 0x03 +#define AUDIO_INPUT_LENGTH 0x24 + + +#define SW_RESET 0x05 +#define POWER_DOWN 0x6F + + +// Backdoor constants +//=================== + +#define DIV_BY_2 0x00 +#define MULT_BY_1 0x01 +#define MULT_BY_2 0x02 +#define MULT_BY_4 0x03 + +#define INTERNAL_PAGE_1 0x01 +#define INTERNAL_PAGE_2 0x02 +#define TMDS_CONT_REG 0x82 + +#endif diff --git a/drivers/video/msm/hdmi/silicon-image/Makefile b/drivers/video/msm/hdmi/silicon-image/Makefile new file mode 100644 index 0000000000000..37cda94e17518 --- /dev/null +++ b/drivers/video/msm/hdmi/silicon-image/Makefile @@ -0,0 +1,7 @@ +sil-objs = \ + tpi.o \ + hdcp.o \ + av_config.o \ + debug-sil902x.o + +obj-$(CONFIG_MSM_HDMI) += sil.o diff --git a/drivers/video/msm/hdmi/silicon-image/av_config.c b/drivers/video/msm/hdmi/silicon-image/av_config.c new file mode 100644 index 0000000000000..22be4b67d3953 --- /dev/null +++ b/drivers/video/msm/hdmi/silicon-image/av_config.c @@ -0,0 +1,373 @@ +/* + * Copyright (C) 2009 HTC + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* TODO: + * 1. Mutex while active + */ +#include +#include +#include "../include/fb-hdmi.h" +#include "../include/sil902x.h" +#include "../include/tpi.h" + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi/avc]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +static u8 video_param[][8] = { + [hd_720p] = {0x01, 0x1d, 0x70, 0x17, 0x72, 0x06, 0xee, 0x02}, + [svga] = {0xa0, 0x0f, 0x70, 0x17, 0x20, 0x04, 0x74, 0x02}, + [pal] = {0x8c, 0x0a, 0x88, 0x13, 0x60, 0x03, 0x71, 0x02}, /* 576p50 */ + [edtv] = {0x8c, 0x0a, 0x70, 0x17, 0x5a, 0x03, 0x0d, 0x02},/* 480p60 */ + [vga] = {0x8c, 0x0a, 0x70, 0x17, 0x20, 0x03, 0x0d, 0x02}, +}; + +#if 0 +static u8 avi_info_frame[][14] = { + [hd_720p] = {0x32, 0x0d, 0xa8, 0x84, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00}, + [svga] = {0xd1, 0x0e, 0x08, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + [pal] = {0x64, 0x0d, 0x68, 0x84, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, /* 576p50 */ + [edtv] = {0x73, 0x0d, 0x68, 0x84, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + [vga] = {0x5E, 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, +}; +#else +static u8 avi_info_frame[][14] = { + [hd_720p] = {0x43, 0x00, 0x28, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00}, + [svga] = {0x6f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + [pal] = {0x46, 0x00, 0x18, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, /* 576p50 */ + [edtv] = {0x55, 0x00, 0x18, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + [vga] = {0x5E, 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, +}; +#endif + +static u8 audio_info_frame[] = + { 0xc2, 0x84, 0x01, 0x0a, 0x71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +#if 0 +int hdmi_active9022(struct i2c_client *client) +{ + int i, ret = -EIO; + struct hdmi_info *info = i2c_get_clientdata(client); + u8 *video_parm = &video_param[info->res][0]; + + HDMI_DBG("%s+\n", __func__); + mutex_lock(&info->polling_lock); + mutex_lock(&info->lock); + + //hdcp_off(info); + DisableTMDS(info); + msleep(128); + + /* choose video mode */ + ret = i2c_smbus_write_i2c_block_data(client, 0, 8, video_parm); + + /* wakeup Sil9022 to D0 state */ + ret = hdmi_write_byte(client, HDMI_POWER, 0); + + if (edid_check_sink_type(info)) { + /* HDMI Output */ + ReadModifyWriteTPI(info, TPI_SYSTEM_CONTROL, + OUTPUT_MODE_MASK, OUTPUT_MODE_HDMI); + } else { + ReadModifyWriteTPI(info, TPI_SYSTEM_CONTROL, + OUTPUT_MODE_MASK, OUTPUT_MODE_DVI); + } + + if (edid_check_audio_support(info)) { + /* audio configuration */ + ret = hdmi_write_byte(client, 0x26, 0x91); + ret = hdmi_write_byte(client, 0x25, 0x03); + ret = hdmi_write_byte(client, 0x27, 0x59); + ret = hdmi_write_byte(client, 0x28, 0x00); + ret = hdmi_write_byte(client, 0x1f, 0x80); + ret = hdmi_write_byte(client, 0x20, 0x90); + ret = hdmi_write_byte(client, 0x21, 0x00); + //ret = hdmi_write_byte(client, 0x24, 0x00);//0x00 for 44.1k + ret = hdmi_write_byte(client, 0x24, 0x02);//0x02 for 48k + ret = hdmi_write_byte(client, 0x25, 0x00); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x24); + ret = hdmi_write_byte(client, 0xbe, 0x92); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x2f); + ret = hdmi_write_byte(client, 0xbe, 0); + ret = hdmi_write_byte(client, 0x26, 0x81); + } else { + SetAudioMute(info, AUDIO_MUTE_MUTED); + } + ret = hdmi_write_byte(client, HDMI_PIXEL_DATA, 0x60); + + ret = i2c_smbus_write_i2c_block_data(client, HDMI_AUDIO_INFO_FRAME, + 15, audio_info_frame); + hdmi_write_byte(client, 0x09, 0); + hdmi_write_byte(client, 0x0a, 0); + for (i = 0; i < 14 ;i++) + hdmi_write_byte(client, 0xc + i, avi_info_frame[info->res][i]); + + EnableTMDS(info); + HDMI_DBG("%s-\n", __func__); + mutex_unlock(&info->lock); + mutex_unlock(&info->polling_lock); + + return ret; +} +#endif + +int avc_set_video_parm(struct hdmi_info *hdmi) +{ + int ret; + u8 *video_parm = video_param[hdmi->res]; + + ret = i2c_smbus_write_i2c_block_data(hdmi->client, 0, 8, video_parm); + + return ret; +} + +int avc_set_blank_screen(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s+\n", __func__); + hdmi_write_byte(hdmi->client, 0x09, 0x03); + hdmi_write_byte(hdmi->client, 0x19, 0x00); + hdmi_write_byte(hdmi->client, 0x26, + hdmi_read(hdmi->client, 0x26) | 0x10); +} + +int hdmi_active9022_dup(struct i2c_client *client) +{ + int i, ret = -EIO; + struct hdmi_info *info = i2c_get_clientdata(client); + u8 *video_parm = &video_param[info->res][0]; + + HDMI_DBG("%s+\n", __func__); + + //hdcp_off(info); + DisableTMDS(info); + msleep(128); + + /* choose video mode */ + ret = i2c_smbus_write_i2c_block_data(client, 0, 8, video_parm); + + /* wakeup Sil9022 to D0 state */ + ret = hdmi_write_byte(client, HDMI_POWER, 0); + + if (edid_check_sink_type(info)) { + /* HDMI Output */ + ReadModifyWriteTPI(info, TPI_SYSTEM_CONTROL, + OUTPUT_MODE_MASK, OUTPUT_MODE_HDMI); + } else { + ReadModifyWriteTPI(info, TPI_SYSTEM_CONTROL, + OUTPUT_MODE_MASK, OUTPUT_MODE_DVI); + } + + if (edid_check_audio_support(info)) { + /* audio configuration */ + ret = hdmi_write_byte(client, 0x26, 0x91); + ret = hdmi_write_byte(client, 0x25, 0x03); + ret = hdmi_write_byte(client, 0x27, 0x59); + ret = hdmi_write_byte(client, 0x28, 0x00); + ret = hdmi_write_byte(client, 0x1f, 0x80); + ret = hdmi_write_byte(client, 0x20, 0x90); + ret = hdmi_write_byte(client, 0x21, 0x00); + //ret = hdmi_write_byte(client, 0x24, 0x00);//0x00 for 44.1k + ret = hdmi_write_byte(client, 0x24, 0x02);//0x02 for 48k + ret = hdmi_write_byte(client, 0x25, 0x00); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x24); + ret = hdmi_write_byte(client, 0xbe, 0x92); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x2f); + ret = hdmi_write_byte(client, 0xbe, 0); + ret = hdmi_write_byte(client, 0x26, 0x81); + } else { + SetAudioMute(info, AUDIO_MUTE_MUTED); + } + ret = hdmi_write_byte(client, HDMI_PIXEL_DATA, 0x60); + + ret = i2c_smbus_write_i2c_block_data(client, HDMI_AUDIO_INFO_FRAME, + 15, audio_info_frame); + hdmi_write_byte(client, 0x09, 0x03); + hdmi_write_byte(client, 0x0a, 0); + for (i = 0; i < 14 ;i++) + hdmi_write_byte(client, 0xc + i, avi_info_frame[info->res][i]); + + EnableTMDS(info); + + HDMI_DBG("%s-\n", __func__); + return ret; +} + +bool avc_send_avi_info_frames(struct hdmi_info *hdmi) +{ + int i; + + HDMI_DBG("%s res=%d\n", __func__, hdmi->res); + for (i = 0; i < 14 ;i++) + hdmi_write_byte(hdmi->client, 0xc + i, + avi_info_frame[hdmi->res][i]); + + return true; +} + +#if 0 +/* FIXME: intergrate with active9022 */ +bool InitVideo(struct hdmi_info *hdmi, u8 Mode, u8 TclkSel, bool Init) +{ + int Pattern, ret; + u8 *video_parm = &video_param[hdmi->res][0]; + + /* Use TPI 0x08[7:6] for 9022A/24A video clock multiplier */ + Pattern = (1 << 6) & 0xc0; + ReadSetWriteTPI(hdmi, TPI_PIX_REPETITION, Pattern); + + ret = i2c_smbus_write_block_data(hdmi->client, 0, 8, video_parm); + + /* input format */ + hdmi_write_byte(hdmi->client, 0x09, 0); + hdmi_write_byte(hdmi->client, 0x0a, 0); + if(Init) { + hdmi_write_byte(hdmi->client, 0x08, 0x60); + hdmi_write_byte(hdmi->client, 0x09, 0); + hdmi_write_byte(hdmi->client, 0x0a, 0); + hdmi_write_byte(hdmi->client, 0x60, 0x4); + } + + // termination ??? + //ret = (read_back_door_register(0x1, 0x82) & 0x3f ) | 0x25; + //write_back_door_register(); +#if 0 + for (i = 0; i < 14 ;i++) { + hdmi_write_byte(hdmi->client, 0xc + i, avi_info_frame[info->res][i]); + } +#endif + + return true; +} +#endif + +bool avc_init_video(struct hdmi_info *hdmi, u8 mode, u8 TclkSel, bool Init) +{ + int ret; + u8 *video_parm = &video_param[hdmi->res][0]; + + HDMI_DBG("%s\n", __func__); + /* Use TPI 0x08[7:6] for 9022A/24A video clock multiplier */ + hdmi_write_byte(hdmi->client, HDMI_PIXEL_DATA, 0x60); + + ret = i2c_smbus_write_i2c_block_data(hdmi->client, 0, 8, video_parm); + + /* input format */ + hdmi_write_byte(hdmi->client, TPI_INPUT_FORMAT_REG, 0x00); + hdmi_write_byte(hdmi->client, TPI_OUTPUT_FORMAT_REG, 0x00); +#if 0 + if (Init) { + hdmi_write_byte(hdmi->client, 0x08, 0x60); + hdmi_write_byte(hdmi->client, 0x09, 0); + hdmi_write_byte(hdmi->client, 0x0a, 0); + /* Default to External Sync mode + disable VSync adjustment */ + hdmi_write_byte(hdmi->client, 0x60, 0x4); + /* Disable DE generator by default */ + hdmi_write_byte(hdmi->client, 0x63, 0x0); + } + +#endif + ret = (tpi_read_backdoor_register(hdmi, INTERNAL_PAGE_1, TMDS_CONT_REG) + & 0x3f ) | 0x25; + tpi_write_backdoor_register(hdmi, INTERNAL_PAGE_1, TMDS_CONT_REG, ret); + + return true; +} + +void avc_set_basic_audio(struct hdmi_info *hdmi) +{ + int ret; + struct i2c_client *client = hdmi->client; + HDMI_DBG("%s\n", __func__); + + ret = hdmi_write_byte(client, 0x26, 0x91); + ret = hdmi_write_byte(client, 0x25, 0x03); + ret = hdmi_write_byte(client, 0x27, 0x59); + ret = hdmi_write_byte(client, 0x28, 0x00); + ret = hdmi_write_byte(client, 0x1f, 0x80); + ret = hdmi_write_byte(client, 0x20, 0x90); + ret = hdmi_write_byte(client, 0x21, 0x00); + //ret = hdmi_write_byte(client, 0x24, 0x00);//0x00 for 44.1k + ret = hdmi_write_byte(client, 0x24, 0x02);//0x02 for 48k + ret = hdmi_write_byte(client, 0x25, 0x00); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x24); + ret = hdmi_write_byte(client, 0xbe, 0x92); + ret = hdmi_write_byte(client, 0xbc, 0x02); + ret = hdmi_write_byte(client, 0xbd, 0x2f); + ret = hdmi_write_byte(client, 0xbe, 0); + ret = hdmi_write_byte(client, 0x26, 0x81); + + ret = i2c_smbus_write_i2c_block_data(client, HDMI_AUDIO_INFO_FRAME, + 15, audio_info_frame); +} + +/* simplifier version of ChangeVideoMode() */ +u8 avc_change_video_mode(struct hdmi_info *hdmi, int *resolution) +{ + HDMI_DBG("%s\n", __func__); + + hdcp_off(hdmi); + DisableTMDS(hdmi); + /* allow control InfoFrames to pass through to the sink device. */ + mdelay(128); + + // FIXME: video mode + avc_init_video(hdmi, vid_mode, 0, 0); + + hdmi_write_byte(hdmi->client, TPI_PIX_REPETITION, 0x60); + hdmi_write_byte(hdmi->client, TPI_INPUT_FORMAT, 0); + + if (edid_check_sink_type(hdmi)) + ReadSetWriteTPI(hdmi, 0x1a, 0x01); + else + ReadSetWriteTPI(hdmi, 0x1a, 0x00); + + /* FIXME: 720p/480p ?*/ + hdmi_write_byte(hdmi->client, TPI_OUTPUT_FORMAT, 0); + + /* set 0x60[7] = 0 for External Sync */ + ReadClearWriteTPI(hdmi, TPI_SYNC_GEN_CTRL, 0x80); + /* clear 0x63[6] = 0 to disable internal DE */ + ReadClearWriteTPI(hdmi, 0x63, 1 << 6); + + /* InfoFrames - only if output mode is HDMI */ + if (edid_check_sink_type(hdmi)) + avc_send_avi_info_frames(hdmi); + + /* SETTING UP AVI InfoFrames CLEARS 0x63 and 0x60[5] */ + hdmi_write_byte(hdmi->client, TPI_SYNC_GEN_CTRL, 1 << 2); + /* SETTING UP AVI InfoFrames CLEARS 0x63 */ + hdmi_write_byte(hdmi->client, 0x63, 0); + + /* YC Input Mode Select */ + hdmi_write_byte(hdmi->client, 0x0b, 0); // 0x0b + EnableTMDS(hdmi); + + return VIDEO_MODE_SET_OK; +} diff --git a/drivers/video/msm/hdmi/silicon-image/debug-sil902x.c b/drivers/video/msm/hdmi/silicon-image/debug-sil902x.c new file mode 100644 index 0000000000000..1c61b30546993 --- /dev/null +++ b/drivers/video/msm/hdmi/silicon-image/debug-sil902x.c @@ -0,0 +1,109 @@ +#include +#include + +#include +#include + +#include "../include/fb-hdmi.h" +#include "../include/sil902x.h" + +//#define HDMI_DEBUGFS + +#if defined(HDMI_DEBUGFS) +static spinlock_t hdmi_dbgfs_lock; +ssize_t hdmi_dbgfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t hdmi_dbgfs_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + static char line[80], buffer[80*8*4]; + static char hextab[] = "0123456789abcdefg"; + int i, j, n = 0, v, len, offset, line_size; + unsigned long irq_flags; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + len = ((int)hdmi->edid_buf[0x7e]+1) * 128; + spin_lock_irqsave(&hdmi_dbgfs_lock, irq_flags); + memset(line, ' ', 79); + line[79] = '\0'; + offset = strlen("0000 | "); + line_size = offset + 3 * 16 + 1; + + for (i = 0; i < len / 16 ; i++) { + scnprintf(line, offset + 1, "%04x | ", (i << 4)); + for (j = 0; j < 16 ; j++) { + v = hdmi->edid_buf[i * 16 + j]; + line[offset + j * 3] = hextab[v / 16]; + line[offset + j * 3 + 1] = hextab[v % 16]; + } + line[line_size - 1] = '\n'; + strncpy(buffer + i * line_size, line, line_size); + n += line_size; + } + spin_unlock_irqrestore(&hdmi_dbgfs_lock, irq_flags); + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +#if 0 +static ssize_t hdmi_dbgfs_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long v; + unsigned long irq_flags; + char buff[80]; + struct tv_reg_data *trd = (struct tv_reg_data *)filp->private_data; + + if (count >= sizeof(buff)) + return -EINVAL; + if (copy_from_user(&buff, buf, 80)) + return -EFAULT; + buff[count] = 0; + +#if 0 + spin_lock_irqsave(&hdmi_dbgfs_lock, irq_flags); + strict_strtoul(buff, 16, &v); + buff[strlen(buff)]=0; + writel(v, tvenc_base+trd->offset); + spin_unlock_irqrestore(&hdmi_dbgfs_lock, irq_flags); +#endif + + return count; +} +#endif + +static struct file_operations hdmi_fops[] = { + { + .open = hdmi_dbgfs_open, + .read = hdmi_dbgfs_read, + } +}; + +int hdmi_debugfs_init(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + spin_lock_init(&hdmi_dbgfs_lock); + hdmi->debug_dir = debugfs_create_dir(HDMI_DEBUGFS_ROOT, 0); + if (IS_ERR(hdmi->debug_dir)) + return PTR_ERR(hdmi->debug_dir); + + // FIXME: error handling + debugfs_create_file("dummy", 0644, hdmi->debug_dir, hdmi, + &hdmi_fops[0]); + edid_debugfs_init(hdmi); + tpi_debugfs_init(hdmi); + hdcp_debugfs_init(hdmi); +/* + int ret; + if (!ret) { + pr_err("%s: failure on debugfs_create_file()\n", __func__); + return -1; + } +*/ + + return 0; +} +#endif diff --git a/drivers/video/msm/hdmi/silicon-image/hdcp.c b/drivers/video/msm/hdmi/silicon-image/hdcp.c new file mode 100644 index 0000000000000..474a84c56a332 --- /dev/null +++ b/drivers/video/msm/hdmi/silicon-image/hdcp.c @@ -0,0 +1,354 @@ +#include +#include +#include +#include + +#include "../include/fb-hdmi.h" +#include "../include/sil902x.h" +#include "../include/tpi.h" + +#if 1 +#define HDCP_DBG(fmt, arg...) printk( "[hdmi/hdcp]%s: " fmt, __func__, ##arg) +#else +#define HDCP_DBG(fmt...) do {} while (0) +#endif + +#define hdcp_err(fmt, arg...) pr_err( "[hdmi/hdcp]%s: " fmt, __func__, ##arg) + +//#define HDCP_DEBUG /* Remove this definition when releasing!!! */ +#if defined(HDCP_DEBUG) +#define HDCP_OVERWRITE_CONTROL 0x1 +#define HDCP_SUPPORT 0x2 +#define HDCP_AVALABLE 0x4 +static int hdcp_control = 0x7; +module_param_named(hdcp_control, hdcp_control, int, + S_IRUGO | S_IWUSR | S_IWGRP); +#endif + +bool HDCP_TxSupports; +bool HDCP_Started; +u8 HDCP_LinkProtectionLevel; + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : IsHDCP_Supported() +// PURPOSE : Check Tx revision number to find if this Tx supports HDCP +// by reading the HDCP revision number from TPI register 0x30. +// RETURNS : true if Tx supports HDCP. false if not. +////////////////////////////////////////////////////////////////////////////// +bool hdcp_check_support(struct hdmi_info *hdmi) +{ + u8 HDCP_Rev; + bool HDCP_Supported; + + HDCP_Supported = true; + /* Check Device ID */ + HDCP_Rev = hdmi_read(hdmi->client, TPI_HDCP_REVISION_DATA_REG); + if (HDCP_Rev != + (HDCP_MAJOR_REVISION_VALUE | HDCP_MINOR_REVISION_VALUE)) + HDCP_Supported = false; + + HDCP_DBG("ret=%d\n", HDCP_Supported); + return HDCP_Supported; +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : AreAKSV_OK() +// PURPOSE : Check if AKSVs contain 20 '0' and 20 '1' +// INPUT PARAMS : None +// OUTPUT PARAMS : None +// GLOBALS USED : TBD +// RETURNS : true if 20 zeros and 20 ones found in AKSV. false OTHERWISE +////////////////////////////////////////////////////////////////////////////// +static bool hdcp_check_aksv(struct hdmi_info *hdmi) +{ + int ret; + u8 B_Data[AKSV_SIZE]; + u8 i, j, NumOfOnes = 0; + + memset(B_Data, 0, AKSV_SIZE); +#if 0 + ReadBlockTPI(hdmi, TPI_AKSV_1_REG, AKSV_SIZE, B_Data); +#else + for (i = 0; i < 5; i++) { + B_Data[i] = hdmi_read(hdmi->client, TPI_AKSV_1_REG+i); + } +#endif + HDCP_DBG(" askv={%02x, %02x, %02x, %02x, %02x}\n", + B_Data[0], B_Data[1], B_Data[2], B_Data[3], B_Data[4]); + for (i=0; i < AKSV_SIZE; i++) + for (j=0; j < BYTE_SIZE; j++) { + if (B_Data[i] & 0x01) + NumOfOnes++; + B_Data[i] >>= 1; + } + if (NumOfOnes != NUM_OF_ONES_IN_KSV) + ret = false; + else ret = true; + + HDCP_DBG(":ret=%s\n", ret ? "true" : "false"); + + return true; +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : HDCP_On() +// PURPOSE : Switch hdcp on. +// INPUT PARAMS : None +// OUTPUT PARAMS: None +// GLOBALS USED : HDCP_Started set to true +// RETURNS : None +////////////////////////////////////////////////////////////////////////////// +//void hdcp_on(struct hdmi_info *hdmi) +void hdcp_on(struct hdmi_info *hdmi, const char *caller) +{ + HDCP_DBG(", caller=%s\n", caller); + hdmi_write_byte(hdmi->client, TPI_HDCP_CONTROL_DATA_REG, PROTECTION_LEVEL_MAX); + HDCP_Started = true; +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : HDCP_Off() +// PURPOSE : Switch hdcp off. +// GLOBALS USED : HDCP_Started set to false +// RETURNS : None +////////////////////////////////////////////////////////////////////////////// +void hdcp_off(struct hdmi_info *hdmi) +{ + HDCP_DBG("\n"); + + SetInputColorSpace(hdmi, INPUT_COLOR_SPACE_BLACK_MODE); + SetAudioMute(hdmi, AUDIO_MUTE_MUTED); + hdmi_write_byte(hdmi->client, TPI_HDCP_CONTROL_DATA_REG, PROTECTION_LEVEL_MIN); + HDCP_Started = false; + HDCP_LinkProtectionLevel = EXTENDED_LINK_PROTECTION_NONE | LOCAL_LINK_PROTECTION_NONE; +} + +void hdcp_init(struct hdmi_info *hdmi) +{ + HDCP_DBG("\n"); + + HDCP_TxSupports = false; + HDCP_Started = false; + HDCP_LinkProtectionLevel = EXTENDED_LINK_PROTECTION_NONE | LOCAL_LINK_PROTECTION_NONE; + + /* TX-related... need only be done once. */ + if (!hdcp_check_support(hdmi)) { + hdcp_err("TX does not support HDCP\n"); + return; + } + if (!hdcp_check_aksv(hdmi)) { + hdcp_err("Illegal AKSV\n"); + return; + } + HDCP_TxSupports = true; +} + +void hdcp_restart(struct hdmi_info *hdmi) +{ + HDCP_DBG("\n"); + DisableTMDS(hdmi); + hdcp_off(hdmi); + EnableTMDS(hdmi); +} + +void hdcp_check_status(struct hdmi_info *hdmi, u8 InterruptStatusImage) +{ + u8 QueryData, LinkStatus, RegImage, NewLinkProtectionLevel; + + if (HDCP_TxSupports == false) + return; + + if ((HDCP_LinkProtectionLevel == + (EXTENDED_LINK_PROTECTION_NONE | LOCAL_LINK_PROTECTION_NONE)) && + (HDCP_Started == false)) { + QueryData = hdmi_read(hdmi->client, TPI_HDCP_QUERY_DATA_REG); + /* Is HDCP avaialable */ + if (QueryData & PROTECTION_TYPE_MASK) { + hdcp_on(hdmi, __func__); + } + } + /* Check if Link Status has changed: */ + if (InterruptStatusImage & SECURITY_CHANGE_EVENT) { + HDCP_DBG("SECURITY_CHANGE_EVENT\n"); + LinkStatus = hdmi_read(hdmi->client, TPI_HDCP_QUERY_DATA_REG); + LinkStatus &= LINK_STATUS_MASK; + tpi_clear_interrupt(hdmi, SECURITY_CHANGE_EVENT); + switch (LinkStatus) { + case LINK_STATUS_NORMAL: + HDCP_DBG("Link = Normal\n"); + break; + case LINK_STATUS_LINK_LOST: + HDCP_DBG("Link = Lost\n"); + hdcp_restart(hdmi); + break; + case LINK_STATUS_RENEGOTIATION_REQ: + HDCP_DBG("Link = Renegotiation Required\n"); + hdcp_off(hdmi); + hdcp_on(hdmi, __func__); + break; + case LINK_STATUS_LINK_SUSPENDED: + HDCP_DBG("Link = Suspended\n"); + hdcp_on(hdmi, __func__); + break; + } + } + /* Check if HDCP state has changed: */ + if (InterruptStatusImage & HDCP_CHANGE_EVENT) { + HDCP_DBG("HDCP_CHANGE_EVENT\n"); + RegImage = hdmi_read(hdmi->client, TPI_HDCP_QUERY_DATA_REG); + NewLinkProtectionLevel = RegImage & + (EXTENDED_LINK_PROTECTION_MASK | LOCAL_LINK_PROTECTION_MASK); + if (NewLinkProtectionLevel != HDCP_LinkProtectionLevel) { + HDCP_LinkProtectionLevel = NewLinkProtectionLevel; + switch (HDCP_LinkProtectionLevel) { + case (EXTENDED_LINK_PROTECTION_NONE | LOCAL_LINK_PROTECTION_NONE): + HDCP_DBG("Protection = None\n"); + hdcp_restart(hdmi); + break; + case LOCAL_LINK_PROTECTION_SECURE: + SetAudioMute(hdmi, AUDIO_MUTE_NORMAL); + //SetInputColorSpace (hdmi, INPUT_COLOR_SPACE_YCBCR422); + SetInputColorSpace (hdmi, INPUT_COLOR_SPACE_RGB); + HDCP_DBG("Protection = Local, Video Unmuted\n"); + break; + case (EXTENDED_LINK_PROTECTION_SECURE | LOCAL_LINK_PROTECTION_SECURE): + HDCP_DBG("Protection = Extended\n"); + break; + default: + HDCP_DBG("Protection = Extended but not Local?\n"); + hdcp_restart(hdmi); + break; + } + } + tpi_clear_interrupt(hdmi, HDCP_CHANGE_EVENT); + } +} + +/*----------------------------------------------------------------------------*/ +#if defined(HDMI_DEBUGFS) +static ssize_t hdcp_supported_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n=0; + char buffer[80]; + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + //n = scnprintf(buffer, 80, "%d\n", is_hdcp_supported(hdmi)); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t hdcp_available_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n=0; + char buffer[80]; + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + //n = scnprintf(buffer, 80, "%d\n", is_hdcp_available(hdmi)); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t hdcp_on_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + hdcp_on(hdmi, __func__); + return 0; +} + +static ssize_t hdcp_off_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + hdcp_off(hdmi); + return 0; +} + +static ssize_t hdcp_restart_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + hdcp_restart(hdmi); + return 0; +} + +static ssize_t hdcp_handle_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + //handle_hdcp(hdmi); + return 0; +} + +static ssize_t hdcp_poll_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + //hdcp_poll(hdmi); + return 0; +} + +static ssize_t hdcp_aksv_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + hdcp_check_aksv(hdmi); + return 0; +} + +static ssize_t hdcp_bksv_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + //hdcp_check_bksv(hdmi); + return 0; +} + +static struct file_operations hdcp_debugfs_fops[] = { + { .open = hdmi_dbgfs_open, .read = hdcp_supported_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_available_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_on_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_off_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_restart_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_poll_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_aksv_read, }, + { .open = hdmi_dbgfs_open, .read = hdcp_bksv_read, }, +}; + +// mutex ? +int hdcp_debugfs_init(struct hdmi_info *hdmi) +{ + struct dentry *hdcp_dent; + + hdcp_dent = debugfs_create_dir("hdcp", hdmi->debug_dir); + if (IS_ERR(hdcp_dent)) + return PTR_ERR(hdcp_dent); + + //FIXME: error handling + debugfs_create_file("supported", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[0]); + debugfs_create_file("available", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[1]); + debugfs_create_file("on", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[2]); + debugfs_create_file("off", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[3]); + debugfs_create_file("restart", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[4]); + debugfs_create_file("poll", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[5]); + debugfs_create_file("aksv", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[6]); + debugfs_create_file("bksv", 0444, hdcp_dent, hdmi, + &hdcp_debugfs_fops[7]); + + return 0; +} +#endif + diff --git a/drivers/video/msm/hdmi/silicon-image/tpi.c b/drivers/video/msm/hdmi/silicon-image/tpi.c new file mode 100644 index 0000000000000..36ec47ed9d781 --- /dev/null +++ b/drivers/video/msm/hdmi/silicon-image/tpi.c @@ -0,0 +1,952 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +// FIXME: remove this if unnecessary in the future +#ifdef CONFIG_HTC_HEADSET_MGR +#include +#endif + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi/tpi]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +#include "../include/fb-hdmi.h" +#include "../include/sil902x.h" +#include "../include/tpi.h" + +#define NEW_INTEGRATE +#define DBG_POLLING 0x1 +static int debug_mask; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define DLOG(mask, fmt, args...) \ +do { \ + if (debug_mask & mask) \ + printk(KERN_INFO "[hdmi/sil]: "fmt, ##args); \ +} while (0) + +#define X1 0x01 +#define AFTER_INIT 1 + +void HotPlugService (struct hdmi_info *hdmi); +// FIXME: should be decide by detection +static bool dsRxPoweredUp; +static bool edidDataValid; +static bool tmdsPoweredUp; +u8 pvid_mode, vid_mode = 16; +u8 systemInitialized; + +void tpi_clear_interrupt(struct hdmi_info *hdmi, u8 pattern) +{ + /* write "1" to clear interrupt bit, and 0 won't effect origin value. */ + hdmi_write_byte(hdmi->client, TPI_INTERRUPT_STATUS_REG, pattern); +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : EnableInterrupts() +// PURPOSE : Enable the interrupts specified in the input parameter +// INPUT PARAMS : A bit pattern with "1" for each interrupt that needs to be +// set in the Interrupt Enable Register (TPI offset 0x3C) +// OUTPUT PARAMS : void +// GLOBALS USED : None +// RETURNS : TRUE +////////////////////////////////////////////////////////////////////////////// +bool tpi_enable_interrupts(struct hdmi_info *hdmi, u8 Interrupt_Pattern) +{ + HDMI_DBG("%s, reg=%02x, pat=%02x\n", __func__, TPI_INTERRUPT_EN, Interrupt_Pattern); + ReadSetWriteTPI(hdmi, TPI_INTERRUPT_EN, Interrupt_Pattern); + return true; +} + +static void tpi_disable_interrupts(struct hdmi_info *hdmi, u8 pattern) +{ +/* + HDMI_DBG("%s, reg=%02x, pat=%02x\n", __func__, + TPI_INTERRUPT_EN, pattern); +*/ + ReadClearWriteTPI(hdmi, TPI_INTERRUPT_EN, pattern); +} + +static void tpi_clear_pending_event(struct hdmi_info *hdmi) +{ + int retry = 100; + + if (hdmi->sleeping == SLEEP) return; + while (retry--) { + hdmi_write_byte(hdmi->client, 0x3c, 1); + hdmi_write_byte(hdmi->client, 0x3d, 1); + if (hdmi_read(hdmi->client, 0x3d) & 0x01) + msleep(1); + else + break; + } + if (retry < 19) HDMI_DBG("%s: retry=%d\n", __func__, 19 - retry); +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : ReadBackDoorRegister() +// PURPOSE : Read a 922x register value from a backdoor register +// Write: +// 1. 0xBC => Internal page num +// 2. 0xBD => Backdoor register offset +// Read: +// 3. 0xBE => Returns the backdoor register value +// INPUT PARAMS : Internal page number, backdoor register offset, pointer to +// buffer to store read value +// OUTPUT PARAMS: Buffer that stores the read value +// RETURNS : TRUE +// NOTE : This workaround is needed for the 9220/2 only. +////////////////////////////////////////////////////////////////////////////// +int tpi_read_backdoor_register(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset) +{ + // FIXME: error handling + struct i2c_client *client = hdmi->client; + + /* Internal page */ + hdmi_write_byte(client, TPI_INTERNAL_PAGE_REG, PageNum); + /* Indexed register */ + hdmi_write_byte(client, TPI_REGISTER_OFFSET_REG, RegOffset); + /* Read value into buffer */ + return hdmi_read(client, TPI_REGISTER_VALUE_REG); +} + +void tpi_write_backdoor_register(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset, u8 RegValue) { + /* Internal page */ + hdmi_write_byte(hdmi->client, TPI_INTERNAL_PAGE_REG, PageNum); + /* Indexed register */ + hdmi_write_byte(hdmi->client, TPI_REGISTER_OFFSET_REG, RegOffset); + /* Read value into buffer */ + hdmi_write_byte(hdmi->client, TPI_REGISTER_VALUE_REG, RegValue); +} + +#define TPI_INTERNAL_PAGE_REG 0xBC +#define TPI_INDEXED_OFFSET_REG 0xBD +#define TPI_INDEXED_VALUE_REG 0xBE +#define INDEXED_PAGE_0 0x01 +#define INDEXED_PAGE_1 0x02 +#define INDEXED_PAGE_2 0x03 + +void ReadModifyWriteIndexedRegister(struct hdmi_info *hdmi, u8 PageNum, u8 RegOffset, u8 Mask, u8 Value) +{ + u8 Tmp; + + hdmi_write_byte(hdmi->client, TPI_INTERNAL_PAGE_REG, PageNum); + hdmi_write_byte(hdmi->client, TPI_INDEXED_OFFSET_REG, RegOffset); + Tmp = hdmi_read(hdmi->client, TPI_INDEXED_VALUE_REG); + + Tmp &= ~Mask; + Tmp |= (Value & Mask); + + hdmi_write_byte(hdmi->client, TPI_INDEXED_VALUE_REG, Tmp); +} + +void ReadSetWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Pattern) +{ + u8 Tmp; + struct i2c_client *client = hdmi->client; + + Tmp = hdmi_read(client, Offset); + Tmp |= Pattern; + hdmi_write_byte(client, Offset, Tmp); +} + +int tpi_set_bit(struct hdmi_info *hdmi, u8 reg, u8 pattern) +{ + return hdmi_write_byte(hdmi->client, reg, + hdmi_read(hdmi->client, reg) | pattern); +} +//// +void ReadModifyWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Mask, u8 Value) +{ + u8 Tmp; + struct i2c_client *client = hdmi->client; + + Tmp = hdmi_read(client, Offset); + Tmp &= ~Mask; + Tmp |= (Value & Mask); + hdmi_write_byte(client, Offset, Tmp); +} +//// +void ReadClearWriteTPI(struct hdmi_info *hdmi, u8 Offset, u8 Pattern) +{ + u8 Tmp; + + Tmp = hdmi_read(hdmi->client, Offset); + Tmp &= ~Pattern; + hdmi_write_byte(hdmi->client, Offset, Tmp); +} +void tpi_clear_bit(struct hdmi_info *hdmi, u8 reg, u8 pattern) +{ + hdmi_write_byte(hdmi->client, reg, + hdmi_read(hdmi->client, reg) & pattern); +} +//// + +/* Caller: ChangeVideoMode(), HDCP_Poll(), HotPlugServiceLoop(), RestartHDCP() + */ + +void EnableTMDS(struct hdmi_info *hdmi) +{ + u8 val; +#if 1 + /* 0x1A[4] = 0 */ + ReadClearWriteTPI(hdmi, TPI_SYSTEM_CONTROL, BIT_TMDS_OUTPUT); + + if (edid_check_sink_type(hdmi)) + hdmi_write_byte(hdmi->client, 0x26, + hdmi_read(hdmi->client, 0x26) & ~0x10); + +#else + struct i2c_client *client = hdmi->i2c_client; + + val = hdmi_read(client, TPI_SYSTEM_CONTROL); + hdmi_write_byte(client, TPI_SYSTEM_CONTROL, val & ~BIT_TMDS_OUTPUT); + HDMI_DBG("%s, reg 0x1a: %02x->%02x\n", __func__, + val, val & ~BIT_TMDS_OUTPUT); +#endif + +} + +/* Caller: ChangeVideoMode(), HDCP_Poll(), TPI_Poll(), RestartHDCP(), + * OnHdmiCableDisconnected() + */ + +void DisableTMDS(struct hdmi_info *hdmi) +{ + /* 0x1A[4] = 1 */ + //ReadClearWriteTPI(hdmi, TPI_SYSTEM_CONTROL, BIT_TMDS_OUTPUT); + ReadSetWriteTPI(hdmi, TPI_SYSTEM_CONTROL, BIT_TMDS_OUTPUT); +} +static void OnDownstreamRxPoweredDown(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + dsRxPoweredUp = false; + hdcp_off(hdmi); +} + +static void OnDownstreamRxPoweredUp(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + dsRxPoweredUp = true; + HotPlugService(hdmi); +#ifdef CONFIG_HTC_HEADSET_MGR + /* send cable in event */ + switch_send_event(BIT_HDMI_CABLE, 1); + HDMI_DBG("Cable inserted.\n"); +#endif + pvid_mode = vid_mode; + hdmi_active9022_dup(hdmi->client); +} + +bool GetDDC_Access(struct hdmi_info *hdmi, u8* SysCtrlRegVal) +{ + u8 sysCtrl, TPI_ControlImage, DDCReqTimeout = T_DDC_ACCESS; + + HDMI_DBG("%s\n", __func__); + /* Read and store original value. Will be passed into ReleaseDDC() */ + sysCtrl = hdmi_read(hdmi->client, TPI_SYSTEM_CONTROL); + *SysCtrlRegVal = sysCtrl; + + sysCtrl |= BIT_DDC_BUS_REQ; + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, sysCtrl); + + /* Loop till 0x1A[1] reads "1" */ + while (DDCReqTimeout--) { + TPI_ControlImage = hdmi_read(hdmi->client, TPI_SYSTEM_CONTROL); + + /* When 0x1A[1] reads "1" */ + if (TPI_ControlImage & BIT_DDC_BUS_GRANT) { + sysCtrl |= BIT_DDC_BUS_GRANT; + /* lock host DDC bus access (0x1A[2:1] = 11) */ + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, sysCtrl); + return true; + } + /* 0x1A[2] = "1" - Requst the DDC bus */ + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, sysCtrl); + mdelay(200); + } + + /* Failure... restore original value. */ + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, sysCtrl); + return false; +} + +bool ReleaseDDC(struct hdmi_info *hdmi, u8 SysCtrlRegVal) +{ + u8 DDCReqTimeout = T_DDC_ACCESS, TPI_ControlImage; + + HDMI_DBG("%s\n", __func__); + /* Just to be sure bits [2:1] are 0 before it is written */ + SysCtrlRegVal &= ~(0x6); + /* Loop till 0x1A[1] reads "0" */ + while (DDCReqTimeout--) { + /* Cannot use ReadClearWriteTPI() here. A read of + * TPI_SYSTEM_CONTROL is invalid while DDC is granted. + * Doing so will return 0xFF, and cause an invalid value to be + * written back. + */ + /* 0x1A[2:1] = "0" - release the DDC bus */ + //ReadClearWriteTPI(TPI_SYSTEM_CONTROL,BITS_2_1); + + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, SysCtrlRegVal); + TPI_ControlImage = hdmi_read(hdmi->client, TPI_SYSTEM_CONTROL); + /* When 0x1A[2:1] read "0" */ + if (!(TPI_ControlImage & 0x6)) + return true; + } + + /* Failed to release DDC bus control */ + return false; +} + +int tpi_read_edid(struct hdmi_info *hdmi) +{ + u8 SysCtrlReg; + int ret, edid_blocks = 0; + struct i2c_msg msg; + u8 i2c_buff[2]; + u8 pbuf[] = {1, 0, 1, 128} ; + + struct i2c_msg paging_msg[] = { + { + .addr = 0x30, .flags = 0, .len = 1, .buf = &pbuf[0], + }, + { + .addr = 0x50, .flags = 0, .len = 1, .buf = &pbuf[1], + }, + { //Block-2 + .addr = 0x50, .flags = I2C_M_RD, .len = 128, .buf = &hdmi->edid_buf[256], + }, + { + .addr = 0x30, .flags = 0, .len = 1, .buf = &pbuf[2], + }, + { + .addr = 0x50, .flags = 0, .len = 1, .buf = &pbuf[3], + }, + { //Block-3 + .addr = 0x50, .flags = I2C_M_RD, .len = 128, .buf = &hdmi->edid_buf[384], + }, + }; + + HDMI_DBG("%s\n", __func__); +#if 0 + DisableTMDS(hdmi); +#else + u8 val; + val = hdmi_read(hdmi->client, TPI_SYSTEM_CONTROL); + //hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, val|BIT_4|BIT_6); + hdmi_write_byte(hdmi->client, TPI_SYSTEM_CONTROL, val|BIT_4); +#endif + + if (!GetDDC_Access(hdmi, &SysCtrlReg)) { + pr_err("%s: DDC bus request failed\n", __func__); + return DDC_BUS_REQ_FAILURE; + } + + // Block-0 + memset(hdmi->edid_buf, 0, 512); + + msg.addr = 0x50; + msg.flags = 0; + msg.len = 1; + msg.buf = hdmi->edid_buf; + ret = i2c_transfer(hdmi->client->adapter, &msg, 1); + if (ret < 0) + dev_err(&hdmi->client->dev, "%s: i2c transfer error\n", __func__); + + msg.addr = 0x50; + msg.flags = I2C_M_RD; + msg.len = 256; + msg.buf = hdmi->edid_buf; + ret = i2c_transfer(hdmi->client->adapter, &msg, 1); + if (ret < 0) { + dev_err(&hdmi->client->dev, "%s: i2c transfer error\n", __func__); + goto end_read_edid; + } else { + if (hdmi->edid_buf[0x7e] <= 3) + edid_blocks = hdmi->edid_buf[0x7e] ; + + dev_info(&hdmi->client->dev, "EDID blocks = %d\n", edid_blocks); + } + + if (edid_blocks > 1) { + // block 2/3 + i2c_transfer(hdmi->client->adapter, paging_msg, 3); + i2c_transfer(hdmi->client->adapter, &paging_msg[3], 3); + } + +end_read_edid: + if (!ReleaseDDC(hdmi, SysCtrlReg)) { + pr_err("%s: DDC bus release failed\n", __func__); + return DDC_BUS_REQ_FAILURE; + } + + edid_simple_parsing(hdmi); + + return 0; +} + +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : HotPlugService() +// PURPOSE : Implement Hot Plug Service Loop activities +// INPUT PARAMS : None +// OUTPUT PARAMS: void +// GLOBALS USED : LinkProtectionLevel +// RETURNS : An error code that indicates success or cause of failure +////////////////////////////////////////////////////////////////////////////// + +extern bool HDCP_TxSupports; +static bool tmdsPoweredUp; +void HotPlugService (struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + + mutex_lock(&hdmi->lock); + tpi_disable_interrupts(hdmi, 0xFF); + +/* + // use 1st mode supported by sink + //vid_mode = EDID_Data.VideoDescriptor[0]; + vid_mode = 0; +*/ + avc_init_video(hdmi, vid_mode, X1, AFTER_INIT); + + hdmi_write_byte(hdmi->client, HDMI_POWER, 0); + if (edid_check_sink_type(hdmi)) + avc_send_avi_info_frames(hdmi); + + /* This check needs to be changed to if HDCP is required by the content + once support has been added by RX-side library. */ + if (HDCP_TxSupports == true) { + HDMI_DBG("TMDS -> Enabled\n"); + /* turn on black mode will lost around 3 secs frames thus remove it */ + //SetInputColorSpace(hdmi, INPUT_COLOR_SPACE_BLACK_MODE); +#if 1 + ReadModifyWriteTPI(hdmi, TPI_SYSTEM_CONTROL, + LINK_INTEGRITY_MODE_MASK | TMDS_OUTPUT_CONTROL_MASK, + LINK_INTEGRITY_DYNAMIC | TMDS_OUTPUT_CONTROL_ACTIVE); +#else + ReadModifyWriteTPI(hdmi, TPI_SYSTEM_CONTROL, + LINK_INTEGRITY_MODE_MASK | TMDS_OUTPUT_CONTROL_MASK, + LINK_INTEGRITY_DYNAMIC); +#endif + tmdsPoweredUp = true; + } else { + EnableTMDS(hdmi); + } + + if (edid_check_audio_support(hdmi)) + avc_set_basic_audio(hdmi); + else + SetAudioMute(hdmi, AUDIO_MUTE_MUTED); + + tpi_enable_interrupts(hdmi, HOT_PLUG_EVENT | RX_SENSE_EVENT | + AUDIO_ERROR_EVENT | SECURITY_CHANGE_EVENT | + V_READY_EVENT | HDCP_CHANGE_EVENT); + + //complete(&hdmi->hotplug_completion); + mutex_unlock(&hdmi->lock); +} + +static bool tpi_start(struct hdmi_info *hdmi) +{ + u8 devID = 0x00; + u16 wID = 0x0000; + + hdmi_write_byte(hdmi->client, TPI_ENABLE, 0x00); // Write "0" to 72:C7 to start HW TPI mode + mdelay(100); + + devID = tpi_read_backdoor_register(hdmi, 0x00, 0x03); + wID = devID; + wID <<= 8; + devID = tpi_read_backdoor_register(hdmi, 0x00, 0x02); + wID |= devID; + devID = hdmi_read(hdmi->client, TPI_DEVICE_ID); + HDMI_DBG("%s, ID=%04X\n", __func__, (u32)wID); + + if (devID == SiI_DEVICE_ID) { + return true; + } + + pr_err("%s: Unsupported TX\n", __func__); + return false; +} + +bool tpi_init(struct hdmi_info *hdmi) +{ + tmdsPoweredUp = false; + hdmi->cable_connected = false; + dsRxPoweredUp = false; + edidDataValid = false; + + /* Enable HW TPI mode, check device ID */ + if (tpi_start(hdmi)) { + hdcp_init(hdmi); + return true; + } + return 0; +} + +void SetAudioMute(struct hdmi_info *hdmi, u8 audioMute) +{ + ReadModifyWriteTPI(hdmi, TPI_AUDIO_INTERFACE_REG, AUDIO_MUTE_MASK, audioMute); +} + +void SetInputColorSpace(struct hdmi_info *hdmi, u8 inputColorSpace) +{ + ReadModifyWriteTPI(hdmi, TPI_INPUT_FORMAT_REG, INPUT_COLOR_SPACE_MASK, inputColorSpace); + /* Must be written for previous write to take effect. Just write read value unmodified. */ + ReadModifyWriteTPI(hdmi, TPI_END_RIGHT_BAR_MSB, 0x00, 0x00); +} + +static char edid_hex_buff[2048]; +int lcdc_enable_video(void); +int lcdc_disable_video(void); +void tpi_cable_conn(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + + hdmi->cable_connected = true; + tpi_write_backdoor_register(hdmi, INTERNAL_PAGE_0, 0xCE, 0x00); // Clear BStatus + tpi_write_backdoor_register(hdmi, INTERNAL_PAGE_0, 0xCF, 0x00); + +//----------------------------------------------- + hdmi_write_byte(hdmi->client, 0x09, 0x03); + hdmi_write_byte(hdmi->client, 0x19, 0x00); // go to blank mode, avoid screen noise + +/* + HDMI_DBG("solomon: H/V total=%02x, %02x, %02x, %02x\n", + hdmi_read(hdmi->client, 0x6a), + hdmi_read(hdmi->client, 0x6b), + hdmi_read(hdmi->client, 0x6c), + hdmi_read(hdmi->client, 0x6d) + ); +*/ + + lcdc_enable_video(); + msleep(160); +/* + //clk_set_rate(hdmi->ebi1_clk, 120000000); + HDMI_DBG("solomon: H/V total=%02x, %02x, %02x, %02x\n", + hdmi_read(hdmi->client, 0x6a), + hdmi_read(hdmi->client, 0x6b), + hdmi_read(hdmi->client, 0x6c), + hdmi_read(hdmi->client, 0x6d) + ); +*/ + EnableTMDS(hdmi); + +//----------------------------------------------- + + tpi_read_edid(hdmi); + memset(edid_hex_buff, 0, 2048); + edid_dump_hex(hdmi->edid_buf, 128, edid_hex_buff, 2048); + printk("Base EDID:\n%s\n=====\n", edid_hex_buff); + edid_dump_hex(hdmi->edid_buf + 128, 256, edid_hex_buff, 2048); + printk("Extended EDID blocks:\n%s\n=====\n", edid_hex_buff); + /* select output mode (HDMI/DVI) according to sink capabilty */ + if (edid_check_sink_type(hdmi)) + ReadModifyWriteTPI(hdmi, TPI_SYSTEM_CONTROL, OUTPUT_MODE_MASK, OUTPUT_MODE_HDMI); + else + ReadModifyWriteTPI(hdmi, TPI_SYSTEM_CONTROL, OUTPUT_MODE_MASK, OUTPUT_MODE_DVI); + + hdmi->first = false; +#if 0 +#ifdef CONFIG_HTC_HEADSET_MGR + /* send cable in event */ + switch_send_event(BIT_HDMI_CABLE, 1); + HDMI_DBG("Cable inserted.\n"); +#endif +#endif +} + +void tpi_cable_disconn(struct hdmi_info *hdmi, bool into_d3) +{ + HDMI_DBG("%s, into_d3=%d\n", __func__, into_d3); + +#if 0 + hdmi->cable_connected = false; + dsRxPoweredUp = false; + edidDataValid = false; + hdcp_off(hdmi); + DisableTMDS(hdmi); +#endif + +#if 1 + /* wait for debounce */ + msleep(20); + tpi_clear_pending_event(hdmi); +#else + reg = hdmi_read(hdmi->client, 0x3d); + if (!(reg & 0x0c)) + tpi_clear_pending_event(hdmi); +#endif + +#if 0 + if (into_d3) { + mutex_lock(&hdmi->lock); + HDMI_DBG("%s, playing=%d\n", __func__, hdmi->user_playing); + if (false == hdmi->user_playing) + lcdc_disable_video(); + clk_set_rate(hdmi->ebi1_clk, 0); + hdmi_standby(hdmi); + hdmi->power(2); + memset(hdmi->edid_buf, 0, 512); + mutex_unlock(&hdmi->lock); + } +#ifdef CONFIG_HTC_HEADSET_MGR + HDMI_DBG("Cable unplugged.\n"); + switch_send_event(BIT_HDMI_CABLE, 0); +#endif +#endif +} + +static char *str_debug_interrupt[] = { + "HOT_PLUG_EVENT\t\t\t", + "RECEIVER_SENSE_EVENT\t\t", + "HOT_PLUG_PIN_STATE\t\t", + "RX_SENSE_MASK\t\t\t", + "AUDIO_ERROR_EVENT\t\t", + "HDCP_SECURITY_CHANGE_EVENT\t", + "HDCP_VPRIME_VALUE_READY_EVENT\t", + "HDCP_AUTH_STATUS_CHANGE_EVENT\t", +}; + +void tpi_debug_interrupt(struct hdmi_info *hdmi, u8 old_status, u8 new_status) +{ + int i, diff, on_off; + HDMI_DBG("%s: status changed, %02x to %02x\n", __func__, + old_status, new_status); + for (i = 7; i >= 0; i--) { + diff = (old_status ^ new_status) & (1 << i); + if (!diff) + continue; + on_off = new_status & (1 << i); + HDMI_DBG("%d-%s->%s\n", i, str_debug_interrupt[i], + on_off ? "on" : "off"); + } +} +////////////////////////////////////////////////////////////////////////////// +// FUNCTION : TPI_Poll () +// PURPOSE : Poll Interrupt Status register for new interrupts +// INPUT PARAMS : None +// OUTPUT PARAMS: None +// GLOBALS USED : LinkProtectionLevel +// RETURNS : None +////////////////////////////////////////////////////////////////////////////// +static u8 last_status = 0; +static void tpi_poll(struct hdmi_info *hdmi) +{ + u8 status, orig_status; + int retry = 20; + + mutex_lock(&hdmi->polling_lock); + orig_status = status = hdmi_read(hdmi->client, TPI_INTERRUPT_STATUS_REG); + if (last_status != status) { + tpi_debug_interrupt(hdmi, last_status, status); + } + last_status = status; + DLOG(DBG_POLLING, "%s, INT_STAT=%02x\n", __func__, status); +#if 0 + if (status & HOT_PLUG_EVENT) { +#else + if (hdmi->first || status & HOT_PLUG_EVENT) { + if (hdmi->first) hdmi->first = false; +#endif + // Enable HPD interrupt bit + ReadSetWriteTPI(hdmi, TPI_INTERRUPT_ENABLE_REG, HOT_PLUG_EVENT); + // Repeat this loop while cable is bouncing: + do { + DLOG(DBG_POLLING, "TPI: Interrupt status image - 2= %02x\n", status); + hdmi_write_byte(hdmi->client, TPI_INTERRUPT_STATUS_REG, HOT_PLUG_EVENT); + // Delay for metastability protection and to help filter out connection bouncing + mdelay(T_HPD_DELAY); + // Read Interrupt status register + status = hdmi_read(hdmi->client, TPI_INTERRUPT_STATUS_REG); + DLOG(DBG_POLLING, "TPI: Interrupt status image - 3= %02x\n", status); + if (!retry--) { + HDMI_DBG("%s: retry failed\n", __func__); + break; + } + + } while (status & HOT_PLUG_EVENT);// loop as long as HP interrupts recur + DLOG(DBG_POLLING, "int status: %02x, after debouncing: %02x\n", + orig_status, status); + + DLOG(DBG_POLLING, "TPI->hdmiCableConnected = %d\n", hdmi->cable_connected); + if (((status & HOT_PLUG_STATE) >> 2) != hdmi->cable_connected) { + DLOG(DBG_POLLING, "cable status changed: from %d to %d\n", + hdmi->cable_connected, !!(status & HOT_PLUG_STATE)); + DLOG(DBG_POLLING, "TPI-> CONDITION\n"); + if (hdmi->cable_connected == true) + tpi_cable_disconn(hdmi, status & 0x8 ? false : true); + else { + tpi_cable_conn(hdmi); + ReadModifyWriteIndexedRegister(hdmi, INDEXED_PAGE_0, 0x0A, 0x08, 0x08); + } + if (hdmi->cable_connected == false) { + mutex_unlock(&hdmi->polling_lock); + return; + } + } else if ( false == hdmi->cable_connected) { + /* only occur while booting without cable attached. */ + tpi_cable_disconn(hdmi, true); + } + } + + // Check rx power + if (((status & RX_SENSE_STATE) >> 3) != dsRxPoweredUp) + { + if (hdmi->cable_connected == true) { + if (dsRxPoweredUp == true) + OnDownstreamRxPoweredDown(hdmi); + else + OnDownstreamRxPoweredUp(hdmi); + } + tpi_clear_interrupt(hdmi, RX_SENSE_EVENT); + } + + // Check if Audio Error event has occurred: + if (status & AUDIO_ERROR_EVENT) + // The hardware handles the event without need for host intervention (PR, p. 31) + tpi_clear_interrupt(hdmi, AUDIO_ERROR_EVENT); + + if (hdmi->video_streaming) { + if ((hdmi->cable_connected == true) && (dsRxPoweredUp == true)) + hdcp_check_status(hdmi, status); + } + mutex_unlock(&hdmi->polling_lock); +} + +static void tpi_work_func(struct work_struct *work) +{ + u8 reg = 0; + struct hdmi_info *hdmi = + container_of(work, struct hdmi_info, polling_work); + + if (hdmi->sleeping == SLEEP) { + mutex_lock(&hdmi->lock); + hdmi->power(3); + hdmi_wakeup(hdmi); + tpi_init(hdmi); + hdcp_off(hdmi); + mutex_unlock(&hdmi->lock); + } + + tpi_poll(hdmi); +#if 1 + mutex_lock(&hdmi->lock); + if (hdmi->sleeping == AWAKE) + reg = hdmi_read(hdmi->client, 0x3d) & 0x0c; + if (hdmi->cable_connected || reg) { + hdmi->polling = true; + mod_timer(&hdmi->timer, jiffies + INTERVAL_HDCP_POLLING); + } else { + enable_irq(hdmi->client->irq); + hdmi->isr_enabled = true; + hdmi->polling = false; + } + mutex_unlock(&hdmi->lock); +#else + if (hdmi->sleeping == AWAKE) { + reg = hdmi_read(hdmi->client, 0x3d); + if (reg & 0x0c) { + hdmi->polling = true; + mod_timer(&hdmi->timer, jiffies + INTERVAL_HDCP_POLLING); + } else { + tpi_clear_pending_event(hdmi); + } + } + + if (hdmi->cable_connected ) { + hdmi->polling = true; + mod_timer(&hdmi->timer, jiffies + INTERVAL_HDCP_POLLING); + } else { + enable_irq(hdmi->client->irq); + hdmi->isr_enabled = true; + hdmi->polling = false; + } +#endif +/* + HDMI_DBG("after polling: reg=%02x, conn=%d, isr=%d, polling=%d\n", + reg, hdmi->cable_connected, hdmi->isr_enabled, hdmi->polling); +*/ +} + +static void tpi_timer_func(unsigned long arg) +{ + struct hdmi_info *hdmi = (struct hdmi_info *) arg; + + schedule_work(&hdmi->polling_work); +} + +int tpi_prepare(struct hdmi_info *hdmi) +{ + HDMI_DBG("%s\n", __func__); + init_timer(&hdmi->timer); + hdmi->timer.data = (unsigned long)hdmi; + hdmi->timer.function = tpi_timer_func; + hdmi->cable_connected = false; + + init_completion(&hdmi->hotplug_completion); + INIT_WORK(&hdmi->polling_work, tpi_work_func); + + return 0; +} + +/*============================================================================*/ +#if defined(HDMI_DEBUGFS) +static ssize_t tpi_dbg_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t tpi_ddc_request_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + //struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + return 0; +} + +static ssize_t tpi_ddc_request_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + return 0; +} + +static ssize_t tpi_isr_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n = 0; + char buffer[4]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + n = scnprintf(buffer, 4, "%d\n", hdmi->isr_enabled); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t tpi_polling_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n = 0; + char buffer[4]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + n = scnprintf(buffer, 4, "%d\n", hdmi->polling); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t tpi_int_status_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n = 0; + char buffer[8]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + n = scnprintf(buffer, 8, "%02x\n", hdmi_read(hdmi->client, 0x3d)); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t tpi_int_enable_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n = 0; + char buffer[8]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); + n = scnprintf(buffer, 8, "%02x\n", hdmi_read(hdmi->client, 0x3c)); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t tpi_avc_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n = 0; + char buffer[8]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + HDMI_DBG("%s\n", __func__); +/* + n = scnprintf(buffer, 8, "%02x\n", hdmi_read(hdmi->client, 0x3c)); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +*/ + hdmi_active9022(hdmi->client); + return 0; +} + +static struct file_operations tpi_debugfs_fops[] = { + { + .open = tpi_dbg_open, + .read = tpi_ddc_request_read, + .write = tpi_ddc_request_write, + }, + { + .open = tpi_dbg_open, + .read = tpi_isr_read, + }, + { + .open = tpi_dbg_open, + .read = tpi_polling_read, + }, + { + .open = tpi_dbg_open, + .read = tpi_int_status_read, + }, + { + .open = tpi_dbg_open, + .read = tpi_int_enable_read, + }, + { + .open = tpi_dbg_open, + .read = tpi_avc_read, + }, +}; + +int tpi_debugfs_init(struct hdmi_info *hdmi) +{ + struct dentry *tpi_dent; + + tpi_dent = debugfs_create_dir("tpi", hdmi->debug_dir); + if (IS_ERR(tpi_dent)) + return PTR_ERR(tpi_dent); + + //FIXME: error handling + debugfs_create_file("ddc_request", 0644, tpi_dent, hdmi, + &tpi_debugfs_fops[0]); + debugfs_create_file("isr_enabled", 0444, tpi_dent, hdmi, + &tpi_debugfs_fops[1]); + debugfs_create_file("polling", 0444, tpi_dent, hdmi, + &tpi_debugfs_fops[2]); + debugfs_create_file("int_stat", 0444, tpi_dent, hdmi, + &tpi_debugfs_fops[3]); + debugfs_create_file("int_ena", 0444, tpi_dent, hdmi, + &tpi_debugfs_fops[4]); + debugfs_create_file("avc", 0444, tpi_dent, hdmi, + &tpi_debugfs_fops[5]); + + return 0; +} +#endif diff --git a/drivers/video/msm/hdmi/transmitter.c b/drivers/video/msm/hdmi/transmitter.c new file mode 100644 index 0000000000000..0f2a7da805886 --- /dev/null +++ b/drivers/video/msm/hdmi/transmitter.c @@ -0,0 +1,1020 @@ +/* + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "include/fb-hdmi.h" +#include "include/sil902x.h" + +#ifdef CONFIG_HTC_HEADSET_MGR +#include +#endif + +#if 1 +#define HDMI_DBG(s...) printk("[hdmi/tx]" s) +#else +#define HDMI_DBG(s...) do {} while (0) +#endif + +#define HDMI_NAME "SiL902x-hdmi" +//#define HDMI_DEBUGFS + +static struct class *hdmi_class; + +enum { + ESTABLISHED_TIMING_OFFSET = 35, + LONG_DESCR_LEN = 18, + NUM_DETAILED_DESC = 4, + NUM_STANDARD_TIMING = 8, +}; + +#if 1 +int hdmi_read(struct i2c_client *client, u8 cmd) +#else +#define hdmi_read(client, cmd) _hdmi_read(client, cmd, __func__) +int _hdmi_read(struct i2c_client *client, u8 cmd, const char *caller) +#endif +{ + int ret = -EIO, retry = 10; + + while (retry--) { + ret = i2c_smbus_read_byte_data(client, cmd); + if (ret >= 0) + break; + msleep(1); + } +/* + if (retry!=9) + HDMI_DBG("%s, retry=%d, caller=%s\n", __func__, 10-retry, + caller); +*/ + + return ret; +} + +int tpi_readb(struct hdmi_info *hdmi, u8 reg) +{ + int i, ret = -EIO, retrial = 10, timeout = 1; + struct i2c_client *client = hdmi->client; + + for (i = 1 ; i < retrial ; i++) { + ret = i2c_smbus_read_byte_data(client, reg); + if (ret >= 0) + break; + msleep(timeout++); + } + return ret; +} + +int tpi_readb_oneshoot(struct hdmi_info *hdmi, u8 reg) +{ + return i2c_smbus_read_byte_data(hdmi->client, reg); +} + +#if 1 +int hdmi_write_byte(struct i2c_client *client, u8 reg, u8 val) +#else +#define hdmi_write_byte(client, reg, val) \ + _hdmi_write_byte(client, reg, val, __func__) +int _hdmi_write_byte(struct i2c_client *client, u8 reg, u8 val, const char *caller) +#endif +{ + int ret = -EIO, retry = 10; + + while (retry--) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (ret == 0) + break; + msleep(1); + } +/* + if (retry!=9) HDMI_DBG("%s, retry=%d, caller=%s\n", __func__, + 10 - retry, caller); +*/ + + return ret; +} + +int tpi_writeb(struct hdmi_info *hdmi, u8 reg, u8 val) +{ + int i, ret = -EIO, retrial = 10, timeout = 1; + struct i2c_client *client = hdmi->client; + + for (i = 1 ; i < retrial ; i++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (ret == 0) + break; + msleep(timeout); + } + return ret; +} + +int tpi_writeb_oneshot(struct hdmi_info *hdmi, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(hdmi->client, reg, val); +} + +int hdmi_enable_int(struct i2c_client *client) +{ + u8 data; + + HDMI_DBG("%s\n", __func__); + data = hdmi_read(client, HDMI_INT_EN); + return hdmi_write_byte(client, HDMI_INT_EN, data | 0x01); +} + +int hdmi_disable_int(struct i2c_client *client) +{ + u8 data; + + HDMI_DBG("%s\n", __func__); + data = hdmi_read(client, HDMI_INT_EN); + return hdmi_write_byte(client, HDMI_INT_EN, data & 0xfe); +} + +/* + * Tx is brought to low-power state, off audio codec. + * i2c alive. Still be able to response to INT. + */ +int hdmi_standby(struct hdmi_info *hdmi) +{ + u8 data; + int ret; + struct i2c_client *client = hdmi->client; + + HDMI_DBG("%s\n", __func__); +#if 0 + /* D2 sleep mode */ + data = hdmi_read(client, HDMI_POWER); + return hdmi_write_byte(client, HDMI_POWER, (data & 0xfc) | 0x02); +#else + if (SLEEP == hdmi->sleeping) + return 0; + /* D3 sleep mode */ + hdmi->sleeping = SLEEP; + hdmi->cable_connected = false; + data = hdmi_write_byte(client, 0x3c, hdmi_read(client, 0x3c) | 1); + data = hdmi_write_byte(client, 0x3c, hdmi_read(client, 0x3c) & ~2); + HDMI_DBG("%s: INT_EN=%02x\n", __func__, hdmi_read(client, 0x3c)); + data = hdmi_read(client, HDMI_POWER); + data |= 4; + ret = hdmi_write_byte(client, HDMI_POWER, data ); + if (ret) + dev_err(&client->dev, + "error on entering D3 sleep mode: into cold mode\n"); +#if 0 + ret = hdmi_write_byte(client, HDMI_POWER, 7); +#else + tpi_writeb_oneshot(hdmi, HDMI_POWER, 7); +#endif +/* + if (ret) + dev_err(&client->dev, + "error on entering D3 sleep mode: set D3 mode\n"); +*/ +#endif + return ret; +} + +int hdmi_wakeup(struct hdmi_info *hdmi) +{ + int err = -EIO; + int ret; + u8 data; + struct i2c_client *client = hdmi->client; + + HDMI_DBG("%s\n", __func__); +#if 0 + data = hdmi_read(client, HDMI_POWER); + err = hdmi_write_byte(client, HDMI_POWER, data & 0xfc); + if (err) + goto exit; +#else + /* Exiting D3 sleep mode */ + ret = hdmi_write_byte(client, 0xc7, 0); + if (ret) + dev_err(&client->dev, + "error on exiting D3 sleep mode: 0xc7=0\n"); + + data = hdmi_read(client, HDMI_POWER); + data = ( data & 0xfc ) ; + ret = hdmi_write_byte(client, HDMI_POWER, data ); + if (ret) + dev_err(&client->dev, + "error on exiting D3 sleep mode: 0x1e=0\n"); + /* Enable insternal TMDS source termination */ + hdmi_write_byte(client, 0xbc, 0x01); + hdmi_write_byte(client, 0xbd, 0x82); + data = hdmi_read(client, 0xbe); + hdmi_write_byte(client, 0xbe, data | 0x01); + + hdmi->sleeping = AWAKE; +#endif + +/* + data = hdmi_read(client, HDMI_SYS_CTL); + dev_info(&client->dev, "%s, HDMI_SYS_CTL=0x%x\n", __func__, data); + err = hdmi_write_byte(client, HDMI_SYS_CTL, 0x01); + if (err) + goto exit; +*/ + return 0; +exit: + dev_err(&client->dev, "%s: fail, err = %d\n", __func__, err); + return err; +} + +static int +hdmi_check_res(struct hdmi_device *hdmi_device, struct fb_var_screeninfo *var) +{ + if (((var->xres == 1280) && (var->yres == 720)) || + ((var->xres == 800) && (var->yres == 600)) || + ((var->xres == 720) && (var->yres == 576)) || + ((var->xres == 720) && (var->yres == 480)) || + ((var->xres == 640) && (var->yres == 480))) { + dev_info(&hdmi_device->dev, "resolution check successfully\n"); + /* check pixel clock also */ + return 0; + } + + return -EINVAL; +} + +static struct msm_lcdc_timing hdmi_lcdc_timing[] = { + [hd_720p] = { + .clk_rate = 74250000, + .hsync_pulse_width = 40, + .hsync_back_porch = 220, + .hsync_front_porch = 110, + .hsync_skew = 0, + .vsync_pulse_width = 5, + .vsync_back_porch = 20, + .vsync_front_porch = 5, + .vsync_act_low = 0, + .hsync_act_low = 0, + .den_act_low = 0, + }, + [svga] = { + .clk_rate = 40000000, + .hsync_pulse_width = 128, + .hsync_back_porch = 88, + .hsync_front_porch = 40, + .hsync_skew = 0, + .vsync_pulse_width = 4, + .vsync_back_porch = 23, + .vsync_front_porch = 1, + .vsync_act_low = 0, + .hsync_act_low = 0, + .den_act_low = 0, + }, + [pal] = { + .clk_rate = 27027000, + .hsync_pulse_width = 64, + .hsync_back_porch = 68, + .hsync_front_porch = 12, + .hsync_skew = 0, + .vsync_pulse_width = 5, + .vsync_back_porch = 39, + .vsync_front_porch = 5, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, + }, + [edtv] = { + .clk_rate = 27027000, + .hsync_pulse_width = 62, + .hsync_back_porch = 60, + .hsync_front_porch = 16, + .hsync_skew = 0, + .vsync_pulse_width = 6, + .vsync_back_porch = 30, + .vsync_front_porch = 9, +#if 1 + .vsync_act_low = 1, + .hsync_act_low = 1, +#else + .vsync_act_low = 0, + .hsync_act_low = 0, +#endif + + .den_act_low = 0, + }, + [vga] = { + .clk_rate = 25175000, + .hsync_pulse_width = 96, + .hsync_back_porch = 48, + .hsync_front_porch = 16, + .hsync_skew = 0, + .vsync_pulse_width = 2, + //.vsync_pulse_width = 3, + .vsync_back_porch = 33, + .vsync_front_porch = 10, + .vsync_act_low = 1, + .hsync_act_low = 1, + .den_act_low = 0, + }, +}; + +static struct msm_lcdc_timing * +hdmi_set_res(struct hdmi_device *hdmi_device, struct fb_var_screeninfo *var) +{ + struct hdmi_info *info = container_of(hdmi_device, struct hdmi_info, + hdmi_dev); + + printk(KERN_DEBUG "%s, info->res=%d=(%d x %d)\n", + __func__, info->res, var->xres, var->yres); + if ((var->xres == 1280) && (var->yres == 720)) + info->res = hd_720p; + else if ((var->xres == 800) && (var->yres == 600)) + info->res = svga; + else if ((var->xres == 720) && (var->yres == 576)) + info->res = pal; + else if ((var->xres == 720) && (var->yres == 480)) + info->res = edtv; + else if ((var->xres == 640) && (var->yres == 480)) + info->res = vga; + else + return ERR_PTR(-EINVAL); +/* + if (info->user_playing) + avc_send_avi_info_frames(info); +*/ + return &hdmi_lcdc_timing[info->res]; +} + +static int hdmi_get_cable_state(struct hdmi_device *hdmi_device, int *connect) +{ +#if 0 + struct hdmi_info *info = container_of(hdmi_device, struct hdmi_info, + hdmi_dev); + struct i2c_client *client = info->client; + u8 status; + + *connect = 0; + status = hdmi_read(client, HDMI_INT_STAT); + if (status & HOT_PLUG_STATE) + *connect = 1; +#else + struct hdmi_info *hdmi = + container_of(hdmi_device, struct hdmi_info, hdmi_dev); + *connect = hdmi->cable_connected; +#endif +// HDMI_DBG("%s, state=%s\n", __func__, *connect ? "on" : "off" ); + return 0; +} + +static int +hdmi_get_established_timing(struct hdmi_device *hdmi_device, u8 *byte) +{ + struct hdmi_info *info = container_of(hdmi_device, struct hdmi_info, + hdmi_dev); + + HDMI_DBG("%s\n", __func__); + memcpy(byte, &info->edid_buf[ESTABLISHED_TIMING_OFFSET], 3); + return 0; +} + +#if 0 +// FIXME: remove the parameter: data +static u8 hdmi_request_ddc(struct i2c_client *client, int request, u8 data) +{ + int retry = 10; + + HDMI_DBG("%s, request=%d\n", __func__, request); + if (request) { + data = hdmi_read(client, HDMI_SYS_CTL); + hdmi_write_byte(client, HDMI_SYS_CTL, (data | 0x04)); + msleep(1); + hdmi_write_byte(client, HDMI_SYS_CTL, (data | 0x06)); + msleep(1); + } else { + hdmi_write_byte(client, HDMI_SYS_CTL, (data & 0xf9)); + hdmi_write_byte(client, HDMI_SYS_CTL, (data & 0xf9)); + /* make sure bit [2:1] = 00 */ + data = hdmi_read(client, HDMI_SYS_CTL); + while ((data & 0x03) & retry--) + msleep(1); + } + return data; +} + +#else +// FIXME: remove the static varible. if caller need to presev the reg, +// it should use hdmi_read() first. +static u8 hdmi_request_ddc(struct i2c_client *client, int request) +{ + int retry = 10; + static u8 val = 0; + u8 tmp; + + HDMI_DBG("%s, request=%d\n", __func__, request); + + if (request) { + val = hdmi_read(client, HDMI_SYS_CTL); + hdmi_write_byte(client, HDMI_SYS_CTL, (val | 0x04)); + msleep(1); + hdmi_write_byte(client, HDMI_SYS_CTL, (val | 0x06)); + msleep(1); + + } else { + do { + hdmi_write_byte(client, HDMI_SYS_CTL, (val & 0xf9)); + tmp = hdmi_read(client, HDMI_SYS_CTL); + msleep(1); + /* make sure bit [2:1] = 00 */ + } while ((tmp & 0x06) & retry--) ; + } + + return 0; +} +#endif + +static uint8_t timing_id[][3] = { + { 0x81, 0xc0, 1 << 6 }, /* 1280x720 */ + { 0x3b, 0x80, 1 << 5 }, /* 720x576 */ + { 0x3b, 0x00, 1 << 4 }, /* 720x480 */ +}; + +//---------------------------------------------------------------------- +static irqreturn_t hdmi_irq_handler(int irq, void *data) +{ + struct hdmi_info *hdmi = (struct hdmi_info *) data; + HDMI_DBG("%s\n", __func__); + + disable_irq_nosync(hdmi->client->irq); + hdmi->isr_enabled = false; + hdmi->first = true; + if (!hdmi->cable_connected) { + hdmi->timer.expires = jiffies + INTERVAL_HDCP_POLLING; + add_timer(&hdmi->timer); + } + + return IRQ_HANDLED; +} +/* ---------------------------------------------------------------- */ +extern bool hdmifb_suspending; +static int hdmi_panel_blank(struct msm_lcdc_panel_ops *ops) +{ + struct hdmi_info *info = container_of(ops, struct hdmi_info, + hdmi_lcdc_ops); + + HDMI_DBG("%s\n", __func__); + + info->user_playing = false; + info->video_streaming= false; +#if 0 + /* if called from suspending */ + if (hdmifb_suspending) { + /* to avoid timer been revoked after standby */ + HDMI_DBG("suspending=true, disable timer\n"); + cancel_work_sync(&info->polling_work); + del_timer(&info->timer); + + HDMI_DBG("%s\n", __func__); + mutex_lock(&info->lock); + hdmi_standby(info); + info->power(2); + mutex_unlock(&info->lock); + } +#endif + return 0; +} + +static int hdmi_panel_unblank(struct msm_lcdc_panel_ops *ops) +{ + struct hdmi_info *info = container_of(ops, struct hdmi_info, + hdmi_lcdc_ops); + struct i2c_client *client = info->client; + + HDMI_DBG("%s\n", __func__); + clk_set_rate(info->ebi1_clk, 120000000); + if (info->suspending == true) { + HDMI_DBG("%s :actived before panel_init\n", __func__); + msleep(500); + } + + info->user_playing = true; + + return 0; +} + +static int hdmi_panel_init(struct msm_lcdc_panel_ops *ops) +{ + u8 conn; + struct hdmi_info *hd = container_of(ops, struct hdmi_info, + hdmi_lcdc_ops); + struct i2c_client *client = hd->client; + + HDMI_DBG("%s\n", __func__); + + if (hd->hdmi_gpio_on) + hd->hdmi_gpio_on(); + /* Turn-on 5V to ensure hot-plug detection */ + hd->power(5); + +#if 0 + /* For D2 sleep mode */ + hd->power(1); + + ret = hdmi_write_byte(client, HDMI_EN_REG, 0x00); + if (ret < 0) + goto fail; + + hdmi_disable_int(client); + + data = hdmi_read(client, HDMI_POWER); + if (data & 0xfc) { + dev_info(&client->dev, "power state = %d\n", data & 0xfc); + } else { + dev_info(&client->dev, "bring HDMI back\n"); + hdmi_enable_int(client); + HDMI_DBG("hotplug state=%d\n", hd->cable_connected); + } +#else + if (hd->polling) { + mutex_lock(&hd->lock); + hd->power(3); + hdmi_wakeup(hd); + hd->first = true; + mod_timer(&hd->timer, jiffies + INTERVAL_HDCP_POLLING); + conn = hdmi_read(client, HDMI_INT_STAT) & HOT_PLUG_STATE; + tpi_init(hd); +#ifdef CONFIG_HTC_HEADSET_MGR + switch_send_event(BIT_HDMI_AUDIO, conn); +#endif + mutex_unlock(&hd->lock); + } + hd->suspending = false; +#endif + return 0; +/* +fail: + return ret; +*/ +} + +static int hdmi_panel_uninit(struct msm_lcdc_panel_ops *ops) +{ + struct hdmi_info *info = container_of(ops, struct hdmi_info, + hdmi_lcdc_ops); + HDMI_DBG("%s\n", __func__); + + if (info->hdmi_gpio_off) + info->hdmi_gpio_off(); +#if 0 + /* For D2 sleep mode */ + info->power(0); +#endif + + if (hdmifb_suspending) { + /* to avoid timer been revoked after standby */ + HDMI_DBG("suspending=true, disable timer\n"); + cancel_work_sync(&info->polling_work); + del_timer(&info->timer); + flush_scheduled_work(); + + HDMI_DBG("%s\n", __func__); + mutex_lock(&info->lock); + hdmi_standby(info); + info->power(4); + mutex_unlock(&info->lock); + } + info->suspending = true; + + return 0; +} + +int avc_set_video_parm(struct hdmi_info *hdmi); +int avc_set_blank_screen(struct hdmi_info *hdmi); +void hdmi_pre_change(struct hdmi_info *hdmi) { + if (hdmi->sleeping == SLEEP) + return; + mutex_lock(&hdmi->polling_lock); + HDMI_DBG("%s\n", __func__); + avc_set_blank_screen(hdmi); + hdcp_off(hdmi); + mutex_unlock(&hdmi->polling_lock); +} + +void hdmi_post_change(struct hdmi_info *info, struct fb_var_screeninfo *var) +{ + u8 data[4]; + int i, ret, retry = 10; + struct msm_lcdc_timing *timing; + unsigned h_total, v_total, h_curr, v_curr; + + if (info->sleeping == SLEEP) + return; + + mutex_lock(&info->polling_lock); + HDMI_DBG("%s\n", __func__); + timing = &hdmi_lcdc_timing[info->res]; + + h_total = var->xres + timing->hsync_pulse_width + + timing->hsync_back_porch + timing->hsync_front_porch; + v_total = var->yres + timing->vsync_pulse_width + + timing->vsync_back_porch + timing->vsync_front_porch; + /* Waiting for video stream until steady */ + for (i = 0; i < retry ; i++) { + /* TODO: error handling. */ + /* Read current horizontal/vertical info of video */ + data[0] = hdmi_read(info->client, 0x6a); + data[1] = hdmi_read(info->client, 0x6b); + data[2] = hdmi_read(info->client, 0x6c); + data[3] = hdmi_read(info->client, 0x6d); + h_curr = ((int)data[1]) << 8 | data[0]; + v_curr = ((int)data[3]) << 8 | data[2]; + if (h_curr == h_total && v_curr == v_total) + break; + msleep(17); + } + + avc_set_video_parm(info); + avc_send_avi_info_frames(info); + info->video_streaming = true; + + mutex_unlock(&info->polling_lock); +} + +static struct msm_fb_data hdmi_lcdc_fb_data = { +#if 1 + .xres = 1280, + .yres = 720, +#else + .xres = 720, + .yres = 480, +#endif + .width = 94, + .height = 57, + .output_format = 0, +}; + +static struct msm_lcdc_platform_data hdmi_lcdc_platform_data = { + .timing = &hdmi_lcdc_timing[hd_720p], + .fb_id = 0, + .fb_data = &hdmi_lcdc_fb_data, +}; + +static struct platform_device hdmi_lcdc_device = { + .name = "msm_mdp_hdmi", + .id = -1, +}; + +int register_hdmi_client(struct class_interface *interface) +{ + if (!hdmi_class) { + pr_err("mdp: no hdmi_class when register hdmi client\n"); + return -ENODEV; + } + interface->class = hdmi_class; + return class_interface_register(interface); +} + +#if defined(OLD_DEBUGFS) +static spinlock_t hdmi_dbgfs_lock; +ssize_t hdmi_dbgfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t hdmi_edid_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + static char line[80], buffer[80*8*4]; + static char hextab[] = "0123456789abcdef"; + int i, j, n = 0, v, len, offset, line_size; + unsigned long irq_flags; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + len = ((int)hdmi->edid_buf[0x7e]+1) * 128; + spin_lock_irqsave(&hdmi_dbgfs_lock, irq_flags); + memset(line, ' ', 79); + line[79] = '\0'; + offset = strlen("0000 | "); + line_size = offset + 3 * 16 + 1; + + for (i = 0; i < len / 16 ; i++) { + scnprintf(line, offset + 1, "%04x | ", (i << 4)); + for (j = 0; j < 16 ; j++) { + v = hdmi->edid_buf[i * 16 + j]; + line[offset + j * 3] = hextab[v / 16]; + line[offset + j * 3 + 1] = hextab[v % 16]; + } + line[line_size - 1] = '\n'; + strncpy(buffer + i * line_size, line, line_size); + n += line_size; + } + spin_unlock_irqrestore(&hdmi_dbgfs_lock, irq_flags); + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +#if 0 +static ssize_t hdmi_dbgfs_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long v; + unsigned long irq_flags; + char buff[80]; + struct tv_reg_data *trd = (struct tv_reg_data *)filp->private_data; + + if (count >= sizeof(buff)) + return -EINVAL; + if (copy_from_user(&buff, buf, 80)) + return -EFAULT; + buff[count] = 0; + + spin_lock_irqsave(&hdmi_dbgfs_lock, irq_flags); + strict_strtoul(buff, 16, &v); + buff[strlen(buff)]=0; + writel(v, tvenc_base+trd->offset); + spin_unlock_irqrestore(&hdmi_dbgfs_lock, irq_flags); + + return count; +} +#endif + +static ssize_t hdmi_cable_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n; + char buffer[80]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + n = scnprintf(buffer, 80, "%d\n", hdmi->cable_connected); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t hdmi_sleeping_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n; + char buffer[80]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + n = scnprintf(buffer, 80, "%d\n", hdmi->sleeping); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +int hdmifb_get_mode(void); +static ssize_t hdmi_fb_mode_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n; + char buffer[80]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + n = scnprintf(buffer, 80, "%d\n", hdmifb_get_mode()); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static ssize_t hdmi_isr_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int n; + char buffer[80]; + struct hdmi_info *hdmi = (struct hdmi_info*)filp->private_data; + + n = scnprintf(buffer, 80, "%d\n", hdmi->isr_enabled); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static struct file_operations hdmi_fops[] = { + { + .open = hdmi_dbgfs_open, + .read = hdmi_edid_read, + }, + { /* cable*/ + .open = hdmi_dbgfs_open, + .read = hdmi_cable_read, + }, + { /* sleeping */ + .open = hdmi_dbgfs_open, + .read = hdmi_sleeping_read, + }, + { /* fb_mode */ + .open = hdmi_dbgfs_open, + .read = hdmi_fb_mode_read, + }, + { /* isr_enabled */ + .open = hdmi_dbgfs_open, + .read = hdmi_isr_read, + }, + +}; + +static int hdmi_debugfs_init(struct hdmi_info *hdmi) +{ + struct dentry *dent_hdmi; + int ret; + + spin_lock_init(&hdmi_dbgfs_lock); + dent_hdmi = debugfs_create_dir("hdmi", 0); + if (IS_ERR(dent_hdmi)) + return PTR_ERR(dent_hdmi); + debugfs_create_file("edid", 0644, dent_hdmi, hdmi, &hdmi_fops[0]); + debugfs_create_file("cable", 0444, dent_hdmi, hdmi, &hdmi_fops[1]); + debugfs_create_file("sleeping", 0444, dent_hdmi, hdmi, &hdmi_fops[2]); + debugfs_create_file("fb_mode", 0444, dent_hdmi, hdmi, &hdmi_fops[3]); + debugfs_create_file("isr", 0444, dent_hdmi, hdmi, &hdmi_fops[4]); + + return 0; +} +#endif + +static int __init hdmi_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct hdmi_info *hd; + struct hdmi_platform_data *pdata; + int ret = -EIO; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "No supported I2C func\n"); + ret = -ENOTSUPP; + goto exit; + } + + hd = kzalloc(sizeof(*hd), GFP_KERNEL); + if (hd == NULL) { + ret = -ENOMEM; + goto exit; + } + + hd->client = client; + i2c_set_clientdata(client, hd); + mutex_init(&hd->lock); + mutex_init(&hd->lock2); + mutex_init(&hd->polling_lock); + + hd->ebi1_clk = clk_get(NULL, "ebi1_clk"); + if (IS_ERR(hd->ebi1_clk)) { + dev_err(&client->dev, "get ebi1 clk fail\n"); + goto fail_get_ebi1; + } + + pdata = client->dev.platform_data; + if (unlikely(!pdata) || unlikely(!pdata->power)) { + dev_err(&client->dev, "No platform data\n"); + ret = -ENXIO; + goto fail_power; + } else { + if (pdata->hdmi_gpio_on) + pdata->hdmi_gpio_on(); + hd->power = pdata->power; + ret = hd->power(1); + if (ret) { + dev_err(&client->dev, "hdmi power on failed\n"); + ret = -EIO; + goto fail_power; + } + } + + ret = hdmi_write_byte(client, HDMI_EN_REG, 0x00); + if (ret < 0) { + ret = -EIO; + goto fail_hdmi_init; + } + + ret = hdmi_read(client, HDMI_IDENTIFY); + if (ret < 0) { + ret = -EIO; + goto fail_hdmi_init; + } else if (ret != 0xb0) { + dev_err(&client->dev, "can not recognize, 0x%x\n", ret); + ret = -ENXIO; + goto fail_hdmi_init; + } + + hdmi_disable_int(client); + + hd->user_playing = false; + tpi_prepare(hd); + ret = request_irq(client->irq, hdmi_irq_handler, IRQF_TRIGGER_LOW, + client->name, hd); + if (ret) { + /* HDMI did not care if interrupt fail */ + dev_err(&client->dev, "request irq fail, err = %d\n", ret); + } else { + ret = hdmi_enable_int(client); + if (ret) { + free_irq(client->irq, hd); + ret = -ENOTSUPP; + } + } + + dev_info(&client->dev, "hdmi is on line with irq %s\n", + ret ? "Disabled" : "Enabled"); + + /* set up "panel" */ + hd->hdmi_lcdc_ops.init = hdmi_panel_init; + hd->hdmi_lcdc_ops.uninit = hdmi_panel_uninit; + hd->hdmi_lcdc_ops.blank = hdmi_panel_blank; + hd->hdmi_lcdc_ops.unblank = hdmi_panel_unblank; + hd->hdmi_gpio_on = pdata->hdmi_gpio_on; + hd->hdmi_gpio_off = pdata->hdmi_gpio_off; + + hdmi_lcdc_platform_data.panel_ops = &hd->hdmi_lcdc_ops; + hdmi_lcdc_platform_data.fb_resource = &pdata->hdmi_res; + hdmi_lcdc_device.dev.platform_data = &hdmi_lcdc_platform_data; + ret = platform_device_register(&hdmi_lcdc_device); + if (ret) + goto fail_hdmi_init; + + hd->hdmi_dev.check_res = hdmi_check_res; + hd->hdmi_dev.set_res = hdmi_set_res; + hd->hdmi_dev.get_cable_state = hdmi_get_cable_state; + hd->hdmi_dev.get_establish_timing = hdmi_get_established_timing; + + hd->hdmi_dev.dev.parent = &client->dev; + hd->hdmi_dev.dev.class = hdmi_class; + //snprintf(hd->hdmi_dev.dev.bus_id, BUS_ID_SIZE, "hdmi%d", 0); + dev_set_name(&hd->hdmi_dev.dev, "hdmi%d", 0); + ret = device_register(&hd->hdmi_dev.dev); + if (ret) + dev_err(&client->dev, "device register fail\n"); + +#if defined(HDMI_DEBUGFS) + hdmi_debugfs_init(hd); +#endif + /* check any pending interrupt */ + hdmi_irq_handler(client->irq, hd); + return 0; + +fail_hdmi_init: +fail_get_ebi1: + clk_put(hd->ebi1_clk); +fail_power: + kfree(hd); +exit: + dev_err(&client->dev, "%s fail, err = %d\n", __func__, ret); + return ret; +} + +/* -------------------------------------------------------------------- */ + +static const struct i2c_device_id hdmi_id[] = { + {HDMI_NAME, 0}, + { } +}; + +static struct i2c_driver hdmi_driver = { + .probe = hdmi_probe, + /*.remove = hdmi_remove,*/ + .id_table = hdmi_id, + .driver = { + .name = HDMI_NAME, + }, +}; + +static int __init hdmi_init(void) +{ + hdmi_class = class_create(THIS_MODULE, "msm_hdmi"); + if (IS_ERR(hdmi_class)) { + printk(KERN_ERR "Error creating hdmi class\n"); + return PTR_ERR(hdmi_class); + } + return i2c_add_driver(&hdmi_driver); +} + +static void __exit hdmi_exit(void) +{ + i2c_del_driver(&hdmi_driver); +} + +module_init(hdmi_init); +module_exit(hdmi_exit); + +MODULE_DESCRIPTION("Sil902x hdmi driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/msm/logo.c b/drivers/video/msm/logo.c new file mode 100644 index 0000000000000..7272765f48cd0 --- /dev/null +++ b/drivers/video/msm/logo.c @@ -0,0 +1,98 @@ +/* drivers/video/msm/logo.c + * + * Show Logo in RLE 565 format + * + * Copyright (C) 2008 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#define fb_width(fb) ((fb)->var.xres) +#define fb_height(fb) ((fb)->var.yres) +#define fb_size(fb) ((fb)->var.xres * (fb)->var.yres * 2) + +static void memset16(void *_ptr, unsigned short val, unsigned count) +{ + unsigned short *ptr = _ptr; + count >>= 1; + while (count--) + *ptr++ = val; +} + +/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */ +int load_565rle_image(char *filename) +{ + struct fb_info *info; + int fd, err = 0; + unsigned count, max; + unsigned short *data, *bits, *ptr; + + info = registered_fb[0]; + if (!info) { + printk(KERN_WARNING "%s: Can not access framebuffer\n", + __func__); + return -ENODEV; + } + + fd = sys_open(filename, O_RDONLY, 0); + if (fd < 0) { + printk(KERN_WARNING "%s: Can not open %s\n", + __func__, filename); + return -ENOENT; + } + count = (unsigned)sys_lseek(fd, (off_t)0, 2); + if (count == 0) { + sys_close(fd); + err = -EIO; + goto err_logo_close_file; + } + sys_lseek(fd, (off_t)0, 0); + data = kmalloc(count, GFP_KERNEL); + if (!data) { + printk(KERN_WARNING "%s: Can not alloc data\n", __func__); + err = -ENOMEM; + goto err_logo_close_file; + } + if ((unsigned)sys_read(fd, (char *)data, count) != count) { + err = -EIO; + goto err_logo_free_data; + } + + max = fb_width(info) * fb_height(info); + ptr = data; + bits = (unsigned short *)(info->screen_base); + while (count > 3) { + unsigned n = ptr[0]; + if (n > max) + break; + memset16(bits, ptr[1], n << 1); + bits += n; + max -= n; + ptr += 2; + count -= 4; + } + +err_logo_free_data: + kfree(data); +err_logo_close_file: + sys_close(fd); + return err; +} +EXPORT_SYMBOL(load_565rle_image); diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c index b66d86ac7cea6..15d4b725b084e 100644 --- a/drivers/video/msm/mddi.c +++ b/drivers/video/msm/mddi.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -29,6 +28,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include "mddi_hw.h" @@ -40,6 +44,10 @@ #define CMD_GET_CLIENT_CAP 0x0601 #define CMD_GET_CLIENT_STATUS 0x0602 +static uint32_t mddi_debug_flags; +#ifdef CONFIG_MSM_MDP40 +static struct clk *mdp_clk; +#endif union mddi_rev { unsigned char raw[MDDI_REV_BUFFER_SIZE]; struct mddi_rev_packet hdr; @@ -84,6 +92,9 @@ struct mddi_info { struct mddi_client_caps caps; struct mddi_client_status status; + struct wake_lock idle_lock; + struct wake_lock link_active_idle_lock; + void (*power_client)(struct msm_mddi_client_data *, int); /* client device published to bind us to the @@ -92,9 +103,14 @@ struct mddi_info { char client_name[20]; struct platform_device client_pdev; + unsigned type; + char debugfs_buf[32]; }; static void mddi_init_rev_encap(struct mddi_info *mddi); +/* FIXME: Workaround for Novatek +static void mddi_skew_calibration(struct mddi_info *mddi); +*/ #define mddi_readl(r) readl(mddi->base + (MDDI_##r)) #define mddi_writel(v, r) writel((v), mddi->base + (MDDI_##r)) @@ -103,7 +119,7 @@ void mddi_activate_link(struct msm_mddi_client_data *cdata) { struct mddi_info *mddi = container_of(cdata, struct mddi_info, client_data); - + wake_lock(&mddi->link_active_idle_lock); mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD); } @@ -127,6 +143,8 @@ static void mddi_handle_rev_data(struct mddi_info *mddi, union mddi_rev *rev) if ((rev->hdr.length <= MDDI_REV_BUFFER_SIZE - 2) && (rev->hdr.length >= sizeof(struct mddi_rev_packet) - 2)) { + /* printk(KERN_INFO "rev: len=%04x type=%04x\n", + * rev->hdr.length, rev->hdr.type); */ switch (rev->hdr.type) { case TYPE_CLIENT_CAPS: @@ -142,12 +160,15 @@ static void mddi_handle_rev_data(struct mddi_info *mddi, union mddi_rev *rev) wake_up(&mddi->int_wait); break; case TYPE_REGISTER_ACCESS: + /* printk(KERN_INFO "rev: reg %x = %x\n", + * rev->reg.register_address, + * rev->reg.register_data_list); */ ri = mddi->reg_read; if (ri == 0) { printk(KERN_INFO "rev: got reg %x = %x without " " pending read\n", rev->reg.register_address, - rev->reg.register_data_list); + rev->reg.u.reg_data); break; } if (ri->reg != rev->reg.register_address) { @@ -155,12 +176,12 @@ static void mddi_handle_rev_data(struct mddi_info *mddi, union mddi_rev *rev) "wrong register, expected " "%x\n", rev->reg.register_address, - rev->reg.register_data_list, ri->reg); + rev->reg.u.reg_data, ri->reg); break; } mddi->reg_read = NULL; ri->status = 0; - ri->result = rev->reg.register_data_list; + ri->result = rev->reg.u.reg_data; complete(&ri->done); break; default: @@ -201,6 +222,8 @@ static void mddi_handle_rev_data_avail(struct mddi_info *mddi) rev_crc_err_count = mddi_readl(REV_CRC_ERR); if (rev_data_count > 1) printk(KERN_INFO "rev_data_count %d\n", rev_data_count); + /* printk(KERN_INFO "rev_data_count %d, INT %x\n", rev_data_count, + * mddi_readl(INT)); */ if (rev_crc_err_count) { printk(KERN_INFO "rev_crc_err_count %d, INT %x\n", @@ -220,6 +243,22 @@ static void mddi_handle_rev_data_avail(struct mddi_info *mddi) if (rev_data_count == 0) return; + if (mddi_debug_flags & 1) { + int i; + union mddi_rev *rev = mddi->rev_data; + printk(KERN_INFO "INT %x, STAT %x, CURR_REV_PTR %x\n", + mddi_readl(INT), mddi_readl(STAT), + mddi_readl(CURR_REV_PTR)); + for (i = 0; i < MDDI_REV_BUFFER_SIZE; i++) { + if ((i % 16) == 0) + printk(KERN_INFO "\n"); + printk(KERN_INFO " %02x", rev->raw[i]); + } + printk(KERN_INFO "\n"); + } + + /* printk(KERN_INFO "rev_data_curr %d + %d\n", mddi->rev_data_curr, + * crev->hdr.length); */ prev_offset = mddi->rev_data_curr; length = *((uint8_t *)mddi->rev_data + mddi->rev_data_curr); @@ -245,12 +284,23 @@ static void mddi_handle_rev_data_avail(struct mddi_info *mddi) memcpy(&tmprev.raw[0], mddi->rev_data + prev_offset, rem); memcpy(&tmprev.raw[rem], mddi->rev_data, 2 + length - rem); mddi_handle_rev_data(mddi, &tmprev); + if (mddi_debug_flags & 2) { + memset(mddi->rev_data + prev_offset, 0xee, rem); + memset(mddi->rev_data, 0xee, mddi->rev_data_curr); + } } else { mddi_handle_rev_data(mddi, crev); + if (mddi_debug_flags & 2) + memset(mddi->rev_data + prev_offset, 0xee, + mddi->rev_data_curr - prev_offset); } + /* if(mddi->rev_data_curr + MDDI_MAX_REV_PKT_SIZE >= + * MDDI_REV_BUFFER_SIZE) { */ if (prev_offset < MDDI_REV_BUFFER_SIZE / 2 && mddi->rev_data_curr >= MDDI_REV_BUFFER_SIZE / 2) { + /* printk(KERN_INFO "passed buffer half full: rev_data_curr + * %d\n", mddi->rev_data_curr); */ mddi_writel(mddi->rev_addr, REV_PTR); } } @@ -269,6 +319,9 @@ static irqreturn_t mddi_isr(int irq, void *data) mddi_writel(active, INT); + /* printk(KERN_INFO "%s: isr a=%08x e=%08x s=%08x\n", + mddi->name, active, mddi->int_enable, status); */ + /* ignore any interrupts we have disabled */ active &= mddi->int_enable; @@ -288,11 +341,13 @@ static irqreturn_t mddi_isr(int irq, void *data) if (active & MDDI_INT_LINK_ACTIVE) { mddi->int_enable &= (~MDDI_INT_LINK_ACTIVE); mddi->int_enable |= MDDI_INT_IN_HIBERNATION; + wake_lock(&mddi->link_active_idle_lock); } if (active & MDDI_INT_IN_HIBERNATION) { mddi->int_enable &= (~MDDI_INT_IN_HIBERNATION); mddi->int_enable |= MDDI_INT_LINK_ACTIVE; + wake_unlock(&mddi->link_active_idle_lock); } mddi_writel(mddi->int_enable, INTEN); @@ -304,7 +359,7 @@ static irqreturn_t mddi_isr(int irq, void *data) static long mddi_wait_interrupt_timeout(struct mddi_info *mddi, uint32_t intmask, int timeout) { - unsigned long irq_flags; + unsigned long irq_flags=0; spin_lock_irqsave(&mddi->int_lock, irq_flags); mddi->got_int &= ~intmask; @@ -348,11 +403,11 @@ static uint16_t mddi_init_registers(struct mddi_info *mddi) mddi_writel(0x0001, VERSION); mddi_writel(MDDI_HOST_BYTES_PER_SUBFRAME, BPS); mddi_writel(0x0003, SPM); /* subframes per media */ - mddi_writel(0x0005, TA1_LEN); + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_writel(0x00C8, TA1_LEN); + else + mddi_writel(0x0005, TA1_LEN); mddi_writel(MDDI_HOST_TA2_LEN, TA2_LEN); - mddi_writel(0x0096, DRIVE_HI); - /* 0x32 normal, 0x50 for Toshiba display */ - mddi_writel(0x0050, DRIVE_LO); mddi_writel(0x003C, DISP_WAKE); /* wakeup counter */ mddi_writel(MDDI_HOST_REV_RATE_DIV, REV_RATE_DIV); @@ -371,15 +426,37 @@ static uint16_t mddi_init_registers(struct mddi_info *mddi) } /* Recommendation from PAD hw team */ - mddi_writel(0xa850f, PAD_CTL); - + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_writel(0x402a850f, PAD_CTL); + else + mddi_writel(0xa850f, PAD_CTL); + +#if defined (CONFIG_ARCH_QSD8X50) || defined (CONFIG_ARCH_MSM7X30) + /* Only for novatek driver IC*/ + mddi_writel(0x00C8, DRIVE_HI); + /* 0x32 normal, 0x50 for Toshiba display */ + mddi_writel(0x0050, DRIVE_LO); + mddi_writel(0x00320000, PAD_IO_CTL); + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_writel(0x40880020, PAD_CAL); + else + mddi_writel(0x00220020, PAD_CAL); +#else + mddi_writel(0x0096, DRIVE_HI); + /* 0x32 normal, 0x50 for Toshiba display */ + mddi_writel(0x0050, DRIVE_LO); +#endif /* Need an even number for counts */ mddi_writel(0x60006, DRIVER_START_CNT); mddi_set_auto_hibernate(&mddi->client_data, 0); +#if 1 /* ignore listen */ mddi_writel(MDDI_CMD_DISP_IGNORE, CMD); +#else + mddi_writel(MDDI_CMD_DISP_LISTEN, CMD); +#endif mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); mddi_init_rev_encap(mddi); @@ -390,6 +467,7 @@ static void mddi_suspend(struct msm_mddi_client_data *cdata) { struct mddi_info *mddi = container_of(cdata, struct mddi_info, client_data); + wake_lock(&mddi->idle_lock); /* turn off the client */ if (mddi->power_client) mddi->power_client(&mddi->client_data, 0); @@ -398,26 +476,40 @@ static void mddi_suspend(struct msm_mddi_client_data *cdata) mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); /* turn off the clock */ clk_disable(mddi->clk); +#ifdef CONFIG_MSM_MDP40 + clk_disable(mdp_clk); +#endif + wake_unlock(&mddi->idle_lock); } static void mddi_resume(struct msm_mddi_client_data *cdata) { struct mddi_info *mddi = container_of(cdata, struct mddi_info, client_data); + wake_lock(&mddi->idle_lock); mddi_set_auto_hibernate(&mddi->client_data, 0); /* turn on the client */ if (mddi->power_client) mddi->power_client(&mddi->client_data, 1); +#ifdef CONFIG_MSM_MDP40 + clk_enable(mdp_clk); +#endif /* turn on the clock */ clk_enable(mddi->clk); /* set up the local registers */ mddi->rev_data_curr = 0; mddi_init_registers(mddi); +/* FIXME: Workaround for Novatek + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_skew_calibration(mddi); +*/ mddi_writel(mddi->int_enable, INTEN); mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD); - mddi_writel(MDDI_CMD_SEND_RTD, CMD); + if (mddi->type == MSM_MDP_MDDI_TYPE_I) + mddi_writel(MDDI_CMD_SEND_RTD, CMD); mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); mddi_set_auto_hibernate(&mddi->client_data, 1); + wake_unlock(&mddi->idle_lock); } static int __init mddi_get_client_caps(struct mddi_info *mddi) @@ -438,36 +530,44 @@ static int __init mddi_get_client_caps(struct mddi_info *mddi) mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD); mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); + /*FIXME: mddi host can't get caps on MDDI type 2*/ + if (mddi->type == MSM_MDP_MDDI_TYPE_I) { + for (j = 0; j < 3; j++) { + /* the toshiba vga panel does not respond to get + * caps unless you SEND_RTD, but the first SEND_RTD + * will fail... + */ + for (i = 0; i < 4; i++) { + uint32_t stat; + + mddi_writel(MDDI_CMD_SEND_RTD, CMD); + mdelay(1); + mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); + stat = mddi_readl(STAT); + printk(KERN_INFO "mddi cmd send rtd: int %x, stat %x, " + "rtd val %x\n", mddi_readl(INT), stat, + mddi_readl(RTD_VAL)); + if ((stat & MDDI_STAT_RTD_MEAS_FAIL) == 0) { + mdelay(1); + break; + } + msleep(1); + } - for (j = 0; j < 3; j++) { - /* the toshiba vga panel does not respond to get - * caps unless you SEND_RTD, but the first SEND_RTD - * will fail... - */ - for (i = 0; i < 4; i++) { - uint32_t stat; - - mddi_writel(MDDI_CMD_SEND_RTD, CMD); + mddi_writel(CMD_GET_CLIENT_CAP, CMD); + mdelay(1); mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); - stat = mddi_readl(STAT); - printk(KERN_INFO "mddi cmd send rtd: int %x, stat %x, " - "rtd val %x\n", mddi_readl(INT), stat, - mddi_readl(RTD_VAL)); - if ((stat & MDDI_STAT_RTD_MEAS_FAIL) == 0) - break; - msleep(1); - } - - mddi_writel(CMD_GET_CLIENT_CAP, CMD); - mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); - wait_event_timeout(mddi->int_wait, mddi->flags & FLAG_HAVE_CAPS, + wait_event_timeout(mddi->int_wait, mddi->flags & FLAG_HAVE_CAPS, HZ / 100); - if (mddi->flags & FLAG_HAVE_CAPS) - break; - printk(KERN_INFO "mddi_init, timeout waiting for caps\n"); - } - return mddi->flags & FLAG_HAVE_CAPS; + if (mddi->flags & FLAG_HAVE_CAPS) + break; + printk(KERN_INFO KERN_ERR "mddi_init, timeout waiting for " + "caps\n"); + } + return (mddi->flags & FLAG_HAVE_CAPS); + } else + return 1; } /* link must be active when this is called */ @@ -508,42 +608,86 @@ int mddi_check_status(struct mddi_info *mddi) } -void mddi_remote_write(struct msm_mddi_client_data *cdata, uint32_t val, - uint32_t reg) +/* + * mddi_remote_write_vals - send the register access packet + * + * @cdata: mddi layer dedicated structure, holding info needed by mddi + * @val : parameters + * @reg : cmd + * @nr_bytes: size of parameters in bytes + * + * jay, Nov 13, 08' + * extend the single parameter to multiple. + */ +void mddi_remote_write_vals(struct msm_mddi_client_data *cdata, uint8_t * val, + uint32_t reg, unsigned int nr_bytes) { struct mddi_info *mddi = container_of(cdata, struct mddi_info, client_data); struct mddi_llentry *ll; struct mddi_register_access *ra; + dma_addr_t bus_addr = 0; mutex_lock(&mddi->reg_write_lock); ll = mddi->reg_write_data; ra = &(ll->u.r); - ra->length = 14 + 4; + ra->length = 14 + nr_bytes; ra->type = TYPE_REGISTER_ACCESS; ra->client_id = 0; - ra->read_write_info = MDDI_WRITE | 1; + ra->read_write_info = MDDI_WRITE | (nr_bytes / 4); ra->crc16 = 0; ra->register_address = reg; - ra->register_data_list = val; ll->flags = 1; + /* register access packet header occupies 14 bytes */ ll->header_count = 14; - ll->data_count = 4; - ll->data = mddi->reg_write_addr + offsetof(struct mddi_llentry, - u.r.register_data_list); + ll->data_count = nr_bytes; /* num of bytes in the data field */ + + if (nr_bytes == 4) { + uint32_t *prm = (uint32_t *)val; + + ll->data = mddi->reg_write_addr + + offsetof(struct mddi_llentry, u.r.u.reg_data); + ra->u.reg_data = *prm; + } else { + int dma_retry = 5; + + while (dma_retry--) { + bus_addr = dma_map_single(NULL, (void *)val, nr_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(NULL, bus_addr) == 0) + break; + msleep(1); + } + if (dma_retry == 0) { + printk(KERN_ERR "%s: dma map fail!\n", __func__); + return; + } + + ll->data = bus_addr; + ra->u.reg_data_list = (uint32_t *)bus_addr; + } ll->next = 0; ll->reserved = 0; + /* inform mddi to start */ mddi_writel(mddi->reg_write_addr, PRI_PTR); - mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE); + if (bus_addr) + dma_unmap_single(NULL, bus_addr, nr_bytes, DMA_TO_DEVICE); mutex_unlock(&mddi->reg_write_lock); } +void mddi_remote_write(struct msm_mddi_client_data *cdata, uint32_t val, + uint32_t reg) +{ + uint8_t * p = (uint8_t *)&val; + mddi_remote_write_vals(cdata, p, reg, 4); +} + uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg) { struct mddi_info *mddi = container_of(cdata, struct mddi_info, @@ -553,7 +697,7 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg) struct reg_read_info ri; unsigned s; int retry_count = 2; - unsigned long irq_flags; + unsigned long irq_flags=0; mutex_lock(&mddi->reg_read_lock); @@ -576,20 +720,37 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg) ll->reserved = 0; s = mddi_readl(STAT); + /* printk(KERN_INFO "mddi_remote_read(%x), stat = %x\n", reg, s); */ ri.reg = reg; ri.status = -1; + ri.result = -1; do { init_completion(&ri.done); + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_set_auto_hibernate(&mddi->client_data, 0); + mddi_writel(MDDI_CMD_SEND_RTD, CMD); mddi->reg_read = &ri; mddi_writel(mddi->reg_read_addr, PRI_PTR); mddi_wait_interrupt(mddi, MDDI_INT_PRI_LINK_LIST_DONE); - - /* Enable Periodic Reverse Encapsulation. */ - mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD); - mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); + /* s = mddi_readl(STAT); */ + /* printk(KERN_INFO "mddi_remote_read(%x) sent, stat = %x\n", + * reg, s); */ + + /* s = mddi_readl(STAT); */ + /* while((s & MDDI_STAT_PRI_LINK_LIST_DONE) == 0){ */ + /* s = mddi_readl(STAT); */ + /* } */ + if (mddi->type == MSM_MDP_MDDI_TYPE_II) { + mddi_writel(MDDI_CMD_SEND_REV_ENCAP, CMD); + mddi_wait_interrupt(mddi, MDDI_INT_REV_DATA_AVAIL); + } else { + /* Enable Periodic Reverse Encapsulation. */ + mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 1, CMD); + mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); + } if (wait_for_completion_timeout(&ri.done, HZ/10) == 0 && !ri.done.done) { printk(KERN_INFO "mddi_remote_read(%x) timeout " @@ -604,17 +765,27 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg) if (ri.status == 0) break; + /* printk(KERN_INFO "mddi_remote_read: failed, sent + * MDDI_CMD_SEND_RTD: int %x, stat %x, rtd val %x\n", + * mddi_readl(INT), mddi_readl(STAT), mddi_readl(RTD_VAL)); */ mddi_writel(MDDI_CMD_SEND_RTD, CMD); mddi_writel(MDDI_CMD_LINK_ACTIVE, CMD); + /* printk(KERN_INFO "mddi_remote_read: failed, sent + * MDDI_CMD_SEND_RTD: int %x, stat %x, rtd val %x\n", + * mddi_readl(INT), mddi_readl(STAT), mddi_readl(RTD_VAL)); */ mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); printk(KERN_INFO "mddi_remote_read: failed, sent " "MDDI_CMD_SEND_RTD: int %x, stat %x, rtd val %x " "curr_rev_ptr %x\n", mddi_readl(INT), mddi_readl(STAT), mddi_readl(RTD_VAL), mddi_readl(CURR_REV_PTR)); + if (mddi->type == MSM_MDP_MDDI_TYPE_II) + mddi_set_auto_hibernate(&mddi->client_data, 1); } while (retry_count-- > 0); /* Disable Periodic Reverse Encapsulation. */ mddi_writel(MDDI_CMD_PERIODIC_REV_ENCAP | 0, CMD); mddi_wait_interrupt(mddi, MDDI_INT_NO_CMD_PKTS_PEND); + /* printk(KERN_INFO "mddi_remote_read(%x) done, stat = %x, + * return %x\n", reg, s, ri.result); */ mddi->reg_read = NULL; mutex_unlock(&mddi->reg_read_lock); return ri.result; @@ -627,7 +798,16 @@ static int __init mddi_clk_setup(struct platform_device *pdev, unsigned long clk_rate) { int ret; - +#ifdef CONFIG_MSM_MDP40 + mdp_clk = clk_get(&pdev->dev, "mdp_clk"); + if (IS_ERR(mdp_clk)) { + printk(KERN_INFO "mddi: failed to get mdp clk"); + return PTR_ERR(mdp_clk); + } + ret = clk_enable(mdp_clk); + if (ret) + goto fail; +#endif /* set up the clocks */ mddi->clk = clk_get(&pdev->dev, "mddi_clk"); if (IS_ERR(mddi->clk)) { @@ -640,6 +820,7 @@ static int __init mddi_clk_setup(struct platform_device *pdev, ret = clk_set_rate(mddi->clk, clk_rate); if (ret) goto fail; + printk(KERN_DEBUG "mddi runs at %ld\n", clk_get_rate(mddi->clk)); return 0; fail: @@ -666,8 +847,98 @@ static int __init mddi_rev_data_setup(struct mddi_info *mddi) sizeof(*mddi->reg_write_data); return 0; } +/* FIXME: Workaround for Novatek +static void mddi_skew_calibration(struct mddi_info *mddi) +{ + struct msm_mddi_platform_data *pdata = mddi->client_pdev.dev.platform_data; + + clk_set_rate( mddi->clk, 50000000); + mdelay(1); + mddi_writel(MDDI_CMD_SKEW_CALIBRATION, CMD); + mdelay(1); + clk_set_rate( mddi->clk, pdata->clk_rate); + mdelay(1); +} +*/ + +static ssize_t mddi_reg_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} -static int __devinit mddi_probe(struct platform_device *pdev) +static ssize_t mddi_reg_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct mddi_info *mddi = (struct mddi_info*)file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, mddi->debugfs_buf, strlen(mddi->debugfs_buf)); +} + +static ssize_t mddi_reg_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + unsigned int reg, data; + char debug_buf[32], type; + int cnt, len; + struct mddi_info *mddi = file->private_data; + + memset(debug_buf, 0x00, sizeof(debug_buf)); + + if (count > sizeof(debug_buf)) + return -EFAULT; + + if (copy_from_user(debug_buf, user_buf, count)) + return -EFAULT; + + debug_buf[count] = 0; /* end of string */ + + if (debug_buf[0] == 'w') { + cnt = sscanf(debug_buf, "%s %x %x", &type ,®, &data); + mddi_set_auto_hibernate(&mddi->client_data, 0); + mddi_remote_write(&mddi->client_data, data, reg); + mddi_set_auto_hibernate(&mddi->client_data, 1); + + len = snprintf(mddi->debugfs_buf, sizeof(mddi->debugfs_buf), + "[W] reg=0x%x val=0x%x\n", reg, data); + printk(KERN_INFO "%s: reg=%x val=%x\n", __func__, reg, data); + } else { + cnt = sscanf(debug_buf, "%s %x", &type ,®); + + len = snprintf(mddi->debugfs_buf, sizeof(mddi->debugfs_buf), + "[R] reg=0x%x val=0x%x\n", reg, + mddi_remote_read(&mddi->client_data, reg)); + + printk(KERN_INFO "%s: reg=%x val=%x buf=%s\n", __func__, reg, + mddi_remote_read(&mddi->client_data, reg), debug_buf); + } + + return count; +} + +static struct file_operations mddi_reg_debugfs_fops[] = { + { + .open = mddi_reg_open, + .read = mddi_reg_read, + .write = mddi_reg_write, + } +}; + +int mddi_reg_debugfs_init(struct mddi_info *mddi) +{ + struct dentry *mddi_reg_dent; + + mddi_reg_dent = debugfs_create_dir("mddi", 0); + if (IS_ERR(mddi_reg_dent)) + return PTR_ERR(mddi_reg_dent); + + debugfs_create_file("reg", 0666, mddi_reg_dent, mddi, + &mddi_reg_debugfs_fops[0]); + + return 0; +} + +static int __init mddi_probe(struct platform_device *pdev) { struct msm_mddi_platform_data *pdata = pdev->dev.platform_data; struct mddi_info *mddi = &mddi_info[pdev->id]; @@ -695,12 +966,18 @@ static int __devinit mddi_probe(struct platform_device *pdev) printk(KERN_INFO "mddi: init() base=0x%p irq=%d\n", mddi->base, mddi->irq); mddi->power_client = pdata->power_client; + if (pdata->type != MSM_MDP_MDDI_TYPE_I) + mddi->type = pdata->type; mutex_init(&mddi->reg_write_lock); mutex_init(&mddi->reg_read_lock); spin_lock_init(&mddi->int_lock); init_waitqueue_head(&mddi->int_wait); + wake_lock_init(&mddi->idle_lock, WAKE_LOCK_IDLE, "mddi_idle_lock"); + wake_lock_init(&mddi->link_active_idle_lock, WAKE_LOCK_IDLE, + "mddi_link_active_idle_lock"); + ret = mddi_clk_setup(pdev, mddi, pdata->clk_rate); if (ret) { printk(KERN_ERR "mddi: failed to setup clock!\n"); @@ -723,9 +1000,10 @@ static int __devinit mddi_probe(struct platform_device *pdev) } /* turn on the mddi client bridge chip */ + #if 0 /*advised by SKY*/ if (mddi->power_client) mddi->power_client(&mddi->client_data, 1); - + #endif /* initialize the mddi registers */ mddi_set_auto_hibernate(&mddi->client_data, 0); mddi_writel(MDDI_CMD_RESET, CMD); @@ -746,11 +1024,19 @@ static int __devinit mddi_probe(struct platform_device *pdev) printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT)); msleep(100); printk(KERN_INFO "mddi powerdown: stat %x\n", mddi_readl(STAT)); - return 0; + goto dummy_client; } + mddi_set_auto_hibernate(&mddi->client_data, 1); - if (mddi->caps.Mfr_Name == 0 && mddi->caps.Product_Code == 0) + /* + * FIXME: User kernel defconfig to link dedicated mddi client driver. + */ +#if 0 + if ( mddi->caps.Mfr_Name == 0 && mddi->caps.Product_Code == 0) +#else + if (mddi->caps.Mfr_Name == 0 ) +#endif pdata->fixup(&mddi->caps.Mfr_Name, &mddi->caps.Product_Code); mddi->client_pdev.id = 0; @@ -768,8 +1054,17 @@ static int __devinit mddi_probe(struct platform_device *pdev) } } - if (i >= pdata->num_clients) + if (i >= pdata->num_clients) { +dummy_client: + mddi->client_data.private_client_data = + pdata->client_platform_data[0].client_data; + mddi->client_pdev.name = + pdata->client_platform_data[0].name; + mddi->client_pdev.id = + pdata->client_platform_data[0].id; mddi->client_pdev.name = "mddi_c_dummy"; + clk_disable(mddi->clk); + } printk(KERN_INFO "mddi: registering panel %s\n", mddi->client_pdev.name); @@ -777,6 +1072,7 @@ static int __devinit mddi_probe(struct platform_device *pdev) mddi->client_data.resume = mddi_resume; mddi->client_data.activate_link = mddi_activate_link; mddi->client_data.remote_write = mddi_remote_write; + mddi->client_data.remote_write_vals = mddi_remote_write_vals; mddi->client_data.remote_read = mddi_remote_read; mddi->client_data.auto_hibernate = mddi_set_auto_hibernate; mddi->client_data.fb_resource = pdata->fb_resource; @@ -794,6 +1090,8 @@ static int __devinit mddi_probe(struct platform_device *pdev) mddi->client_pdev.dev.platform_data = &mddi->client_data; printk(KERN_INFO "mddi: publish: %s\n", mddi->client_name); platform_device_register(&mddi->client_pdev); + mddi_reg_debugfs_init(mddi); + return 0; error_mddi_interface: @@ -803,6 +1101,8 @@ static int __devinit mddi_probe(struct platform_device *pdev) dma_free_coherent(NULL, 0x1000, mddi->rev_data, mddi->rev_addr); error_rev_data: error_clk_setup: + wake_lock_destroy(&mddi->idle_lock); + wake_lock_destroy(&mddi->link_active_idle_lock); error_get_irq_resource: iounmap(mddi->base); error_ioremap: @@ -811,6 +1111,37 @@ static int __devinit mddi_probe(struct platform_device *pdev) return ret; } +#if 0 /* read/write mddi registers from userspace */ +module_param_named(debug, mddi_debug_flags, uint, 0644); + +static uint32_t selected_register; +module_param_named(reg, selected_register, uint, 0644); + +static int set_reg(const char *val, struct kernel_param *kp) +{ + char *endp; + uint32_t l; + + if (!val) + return -EINVAL; + l = simple_strtoul(val, &endp, 0); + if (endp == val || ((uint32_t)l != l)) + return -EINVAL; + mddi_remote_write(kp->arg, l, selected_register); + return 0; +} + +static int get_reg(char *buffer, struct kernel_param *kp) +{ + int val; + val = mddi_remote_read(kp->arg, selected_register); + return sprintf(buffer, "%x", val); +} + +module_param_call(pmdh_val, set_reg, get_reg, &mddi_info[0], 0644); +module_param_call(emdh_val, set_reg, get_reg, &mddi_info[1], 0644); + +#endif static struct platform_driver mddi_driver = { .probe = mddi_probe, diff --git a/drivers/video/msm/mddi_client_dummy.c b/drivers/video/msm/mddi_client_dummy.c deleted file mode 100644 index d2a091cebe2c3..0000000000000 --- a/drivers/video/msm/mddi_client_dummy.c +++ /dev/null @@ -1,98 +0,0 @@ -/* drivers/video/msm_fb/mddi_client_dummy.c - * - * Support for "dummy" mddi client devices which require no - * special initialization code. - * - * Copyright (C) 2007 Google Incorporated - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include - -#include - -struct panel_info { - struct platform_device pdev; - struct msm_panel_data panel_data; -}; - -static int mddi_dummy_suspend(struct msm_panel_data *panel_data) -{ - return 0; -} - -static int mddi_dummy_resume(struct msm_panel_data *panel_data) -{ - return 0; -} - -static int mddi_dummy_blank(struct msm_panel_data *panel_data) -{ - return 0; -} - -static int mddi_dummy_unblank(struct msm_panel_data *panel_data) -{ - return 0; -} - -static int mddi_dummy_probe(struct platform_device *pdev) -{ - struct msm_mddi_client_data *client_data = pdev->dev.platform_data; - struct panel_info *panel = - kzalloc(sizeof(struct panel_info), GFP_KERNEL); - int ret; - if (!panel) - return -ENOMEM; - platform_set_drvdata(pdev, panel); - panel->panel_data.suspend = mddi_dummy_suspend; - panel->panel_data.resume = mddi_dummy_resume; - panel->panel_data.blank = mddi_dummy_blank; - panel->panel_data.unblank = mddi_dummy_unblank; - panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES; - panel->pdev.name = "msm_panel"; - panel->pdev.id = pdev->id; - platform_device_add_resources(&panel->pdev, - client_data->fb_resource, 1); - panel->panel_data.fb_data = client_data->private_client_data; - panel->pdev.dev.platform_data = &panel->panel_data; - ret = platform_device_register(&panel->pdev); - if (ret) { - kfree(panel); - return ret; - } - return 0; -} - -static int mddi_dummy_remove(struct platform_device *pdev) -{ - struct panel_info *panel = platform_get_drvdata(pdev); - kfree(panel); - return 0; -} - -static struct platform_driver mddi_client_dummy = { - .probe = mddi_dummy_probe, - .remove = mddi_dummy_remove, - .driver = { .name = "mddi_c_dummy" }, -}; - -static int __init mddi_client_dummy_init(void) -{ - platform_driver_register(&mddi_client_dummy); - return 0; -} - -module_init(mddi_client_dummy_init); - diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/msm/mddi_client_epson.c similarity index 56% rename from drivers/video/msm/mddi_client_nt35399.c rename to drivers/video/msm/mddi_client_epson.c index f239f4a25e014..d5ea5e3e9d223 100644 --- a/drivers/video/msm/mddi_client_nt35399.c +++ b/drivers/video/msm/mddi_client_epson.c @@ -1,9 +1,5 @@ -/* drivers/video/msm_fb/mddi_client_nt35399.c - * - * Support for Novatek NT35399 MDDI client of Sapphire - * - * Copyright (C) 2008 HTC Incorporated - * Author: Solomon Chiu (solomon_chiu@htc.com) +/* + * Copyright (C) 2008 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -18,59 +14,71 @@ #include #include #include +#include #include -#include #include -#include +#include #include +#include +#include -static DECLARE_WAIT_QUEUE_HEAD(nt35399_vsync_wait); +static DECLARE_WAIT_QUEUE_HEAD(epson_vsync_wait); struct panel_info { struct msm_mddi_client_data *client_data; struct platform_device pdev; struct msm_panel_data panel_data; - struct msmfb_callback *fb_callback; - struct work_struct panel_work; - struct workqueue_struct *fb_wq; - int nt35399_got_int; + struct msmfb_callback *epson_callback; + struct wake_lock idle_lock; + int epson_got_int; +}; + +static struct platform_device mddi_eps_cabc = { + .name = "eps_cabc", + .id = 0, }; -static void -nt35399_request_vsync(struct msm_panel_data *panel_data, - struct msmfb_callback *callback) +static void epson_request_vsync(struct msm_panel_data *panel_data, + struct msmfb_callback *callback) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; - panel->fb_callback = callback; - if (panel->nt35399_got_int) { - panel->nt35399_got_int = 0; - client_data->activate_link(client_data); /* clears interrupt */ + panel->epson_callback = callback; + if (panel->epson_got_int) { + panel->epson_got_int = 0; + client_data->activate_link(client_data); } } -static void nt35399_wait_vsync(struct msm_panel_data *panel_data) +static void epson_clear_vsync(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); struct msm_mddi_client_data *client_data = panel->client_data; - if (panel->nt35399_got_int) { - panel->nt35399_got_int = 0; + client_data->activate_link(client_data); +} + +static void epson_wait_vsync(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + if (panel->epson_got_int) { + panel->epson_got_int = 0; client_data->activate_link(client_data); /* clears interrupt */ } - - if (wait_event_timeout(nt35399_vsync_wait, panel->nt35399_got_int, + if (wait_event_timeout(epson_vsync_wait, panel->epson_got_int, HZ/2) == 0) printk(KERN_ERR "timeout waiting for VSYNC\n"); - - panel->nt35399_got_int = 0; + panel->epson_got_int = 0; /* interrupt clears when screen dma starts */ } -static int nt35399_suspend(struct msm_panel_data *panel_data) +static int epson_suspend(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); @@ -80,9 +88,11 @@ static int nt35399_suspend(struct msm_panel_data *panel_data) client_data->private_client_data; int ret; + wake_lock(&panel->idle_lock); ret = bridge_data->uninit(bridge_data, client_data); + wake_unlock(&panel->idle_lock); if (ret) { - printk(KERN_INFO "mddi nt35399 client: non zero return from " + printk(KERN_INFO "mddi epson client: non zero return from " "uninit\n"); return ret; } @@ -90,7 +100,7 @@ static int nt35399_suspend(struct msm_panel_data *panel_data) return 0; } -static int nt35399_resume(struct msm_panel_data *panel_data) +static int epson_resume(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); @@ -100,14 +110,16 @@ static int nt35399_resume(struct msm_panel_data *panel_data) client_data->private_client_data; int ret; + wake_lock(&panel->idle_lock); client_data->resume(client_data); + wake_unlock(&panel->idle_lock); ret = bridge_data->init(bridge_data, client_data); if (ret) return ret; return 0; } -static int nt35399_blank(struct msm_panel_data *panel_data) +static int epson_blank(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); @@ -118,7 +130,7 @@ static int nt35399_blank(struct msm_panel_data *panel_data) return bridge_data->blank(bridge_data, client_data); } -static int nt35399_unblank(struct msm_panel_data *panel_data) +static int epson_unblank(struct msm_panel_data *panel_data) { struct panel_info *panel = container_of(panel_data, struct panel_info, panel_data); @@ -129,26 +141,24 @@ static int nt35399_unblank(struct msm_panel_data *panel_data) return bridge_data->unblank(bridge_data, client_data); } -irqreturn_t nt35399_vsync_interrupt(int irq, void *data) +static irqreturn_t epson_vsync_interrupt(int irq, void *data) { struct panel_info *panel = data; - panel->nt35399_got_int = 1; - - if (panel->fb_callback) { - panel->fb_callback->func(panel->fb_callback); - panel->fb_callback = NULL; + panel->epson_got_int = 1; + if (panel->epson_callback) { + panel->epson_callback->func(panel->epson_callback); + panel->epson_callback = 0; } - - wake_up(&nt35399_vsync_wait); - + wake_up(&epson_vsync_wait); return IRQ_HANDLED; } -static int setup_vsync(struct panel_info *panel, int init) +static int setup_vsync(struct panel_info *panel, + int init) { int ret; - int gpio = 97; + int gpio = 98; unsigned int irq; if (!init) { @@ -167,17 +177,16 @@ static int setup_vsync(struct panel_info *panel, int init) if (ret < 0) goto err_get_irq_num_failed; - ret = request_irq(irq, nt35399_vsync_interrupt, IRQF_TRIGGER_RISING, + ret = request_irq(irq, epson_vsync_interrupt, IRQF_TRIGGER_FALLING, "vsync", panel); if (ret) goto err_request_irq_failed; - printk(KERN_INFO "vsync on gpio %d now %d\n", gpio, gpio_get_value(gpio)); return 0; uninit: - free_irq(gpio_to_irq(gpio), panel->client_data); + free_irq(gpio_to_irq(gpio), panel); err_request_irq_failed: err_get_irq_num_failed: err_gpio_direction_input_failed: @@ -186,54 +195,56 @@ static int setup_vsync(struct panel_info *panel, int init) return ret; } -static int mddi_nt35399_probe(struct platform_device *pdev) +static int mddi_epson_probe(struct platform_device *pdev) { + int ret; struct msm_mddi_client_data *client_data = pdev->dev.platform_data; struct msm_mddi_bridge_platform_data *bridge_data = client_data->private_client_data; - - int ret; - - struct panel_info *panel = kzalloc(sizeof(struct panel_info), - GFP_KERNEL); - - printk(KERN_DEBUG "%s: enter.\n", __func__); - + struct panel_data *panel_data = &bridge_data->panel_conf; + struct panel_info *panel = + kzalloc(sizeof(struct panel_info), GFP_KERNEL); if (!panel) return -ENOMEM; platform_set_drvdata(pdev, panel); + printk(KERN_DEBUG "%s\n", __func__); + + if (panel_data->caps & MSMFB_CAP_CABC) { + printk(KERN_INFO "CABC enabled\n"); + mddi_eps_cabc.dev.platform_data = client_data; + platform_device_register(&mddi_eps_cabc); + } + ret = setup_vsync(panel, 1); if (ret) { - dev_err(&pdev->dev, "mddi_nt35399_setup_vsync failed\n"); + dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n"); return ret; } panel->client_data = client_data; - panel->panel_data.suspend = nt35399_suspend; - panel->panel_data.resume = nt35399_resume; - panel->panel_data.wait_vsync = nt35399_wait_vsync; - panel->panel_data.request_vsync = nt35399_request_vsync; - panel->panel_data.blank = nt35399_blank; - panel->panel_data.unblank = nt35399_unblank; - panel->panel_data.fb_data = &bridge_data->fb_data; - panel->panel_data.caps = 0; + panel->panel_data.suspend = epson_suspend; + panel->panel_data.resume = epson_resume; + panel->panel_data.wait_vsync = epson_wait_vsync; + panel->panel_data.request_vsync = epson_request_vsync; + panel->panel_data.clear_vsync = epson_clear_vsync; + panel->panel_data.blank = epson_blank; + panel->panel_data.unblank = epson_unblank; + panel->panel_data.fb_data = &bridge_data->fb_data; + panel->panel_data.caps = ~MSMFB_CAP_PARTIAL_UPDATES; panel->pdev.name = "msm_panel"; panel->pdev.id = pdev->id; panel->pdev.resource = client_data->fb_resource; panel->pdev.num_resources = 1; panel->pdev.dev.platform_data = &panel->panel_data; - - if (bridge_data->init) - bridge_data->init(bridge_data, client_data); - platform_device_register(&panel->pdev); + wake_lock_init(&panel->idle_lock, WAKE_LOCK_IDLE, "eps_idle_lock"); return 0; } -static int mddi_nt35399_remove(struct platform_device *pdev) +static int mddi_epson_remove(struct platform_device *pdev) { struct panel_info *panel = platform_get_drvdata(pdev); @@ -242,16 +253,17 @@ static int mddi_nt35399_remove(struct platform_device *pdev) return 0; } -static struct platform_driver mddi_client_0bda_8a47 = { - .probe = mddi_nt35399_probe, - .remove = mddi_nt35399_remove, - .driver = { .name = "mddi_c_0bda_8a47" }, +static struct platform_driver mddi_client_d263_0000 = { + .probe = mddi_epson_probe, + .remove = mddi_epson_remove, + .driver = { .name = "mddi_c_4ca3_0000" }, }; -static int __init mddi_client_nt35399_init(void) +static int __init mddi_client_epson_init(void) { - return platform_driver_register(&mddi_client_0bda_8a47); + platform_driver_register(&mddi_client_d263_0000); + return 0; } -module_init(mddi_client_nt35399_init); +module_init(mddi_client_epson_init); diff --git a/drivers/video/msm/mddi_client_novb9f6_5582.c b/drivers/video/msm/mddi_client_novb9f6_5582.c new file mode 100644 index 0000000000000..0229530a27efb --- /dev/null +++ b/drivers/video/msm/mddi_client_novb9f6_5582.c @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DECLARE_WAIT_QUEUE_HEAD(novtec_vsync_wait); + +struct panel_info { + struct msm_mddi_client_data *client_data; + struct platform_device pdev; + struct msm_panel_data panel_data; + struct msmfb_callback *novtec_callback; + struct wake_lock idle_lock; + int novtec_got_int; +}; + +static struct platform_device mddi_nov_cabc = { + .name = "nov_cabc", + .id = 0, +}; + +static void novtec_request_vsync(struct msm_panel_data *panel_data, + struct msmfb_callback *callback) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + panel->novtec_callback = callback; + if (panel->novtec_got_int) { + panel->novtec_got_int = 0; + client_data->activate_link(client_data); + } +} + +static void novtec_clear_vsync(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + client_data->activate_link(client_data); +} + +static void novtec_wait_vsync(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + if (panel->novtec_got_int) { + panel->novtec_got_int = 0; + client_data->activate_link(client_data); /* clears interrupt */ + } + if (wait_event_timeout(novtec_vsync_wait, panel->novtec_got_int, + HZ/2) == 0) + printk(KERN_ERR "timeout waiting for VSYNC\n"); + panel->novtec_got_int = 0; + /* interrupt clears when screen dma starts */ +} + +static int novtec_suspend(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + int ret; + + wake_lock(&panel->idle_lock); + ret = bridge_data->uninit(bridge_data, client_data); + wake_unlock(&panel->idle_lock); + if (ret) { + printk(KERN_INFO "mddi novtec client: non zero return from " + "uninit\n"); + return ret; + } + client_data->suspend(client_data); + return 0; +} + +static int novtec_resume(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + int ret; + + wake_lock(&panel->idle_lock); + client_data->resume(client_data); + wake_unlock(&panel->idle_lock); + ret = bridge_data->init(bridge_data, client_data); + if (ret) + return ret; + return 0; +} + +static int novtec_blank(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + + return bridge_data->blank(bridge_data, client_data); +} + +static int novtec_unblank(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = container_of(panel_data, struct panel_info, + panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + + return bridge_data->unblank(bridge_data, client_data); +} + +static irqreturn_t novtec_vsync_interrupt(int irq, void *data) +{ + struct panel_info *panel = data; + + panel->novtec_got_int = 1; + if (panel->novtec_callback) { +//XXX T2 Fix For Supersonic +// mdelay(3); + panel->novtec_callback->func(panel->novtec_callback); + panel->novtec_callback = 0; + } + wake_up(&novtec_vsync_wait); + return IRQ_HANDLED; +} + +static int setup_vsync(struct panel_info *panel, + int init) +{ + int ret; + int gpio = 98; + unsigned int irq; + + if (!init) { + ret = 0; + goto uninit; + } + ret = gpio_request(gpio, "vsync"); + if (ret) + goto err_request_gpio_failed; + + ret = gpio_direction_input(gpio); + if (ret) + goto err_gpio_direction_input_failed; + + ret = irq = gpio_to_irq(gpio); + if (ret < 0) + goto err_get_irq_num_failed; + + ret = request_irq(irq, novtec_vsync_interrupt, IRQF_TRIGGER_FALLING, + "vsync", panel); + if (ret) + goto err_request_irq_failed; + printk(KERN_INFO "vsync on gpio %d now %d\n", + gpio, gpio_get_value(gpio)); + return 0; + +uninit: + free_irq(gpio_to_irq(gpio), panel); +err_request_irq_failed: +err_get_irq_num_failed: +err_gpio_direction_input_failed: + gpio_free(gpio); +err_request_gpio_failed: + return ret; +} + +/* maejrep's T2 interface - start */ +/* Allows for changing of the T2 register on the fly */ +static ssize_t nov_t2_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct msm_mddi_client_data *client_data = dev->platform_data; + int ret; + unsigned val; + + val = 0; + val |= client_data->remote_read(client_data, 0xb101) << 8; + val |= client_data->remote_read(client_data, 0xb102); + + ret = sprintf(buf, "T2: d%u, 0x%04xh\n", val, val); + + return ret; +} + +static ssize_t nov_t2_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msm_mddi_client_data *client_data = dev->platform_data; + unsigned val; + + sscanf(buf, "%u", &val); + + if (val <= 245 || val > 999) { + printk(KERN_WARNING "%s: invalid value for t2: %u\n", __func__, val); + return -EINVAL; + } + + client_data->remote_write(client_data, (0xff00 & val) >> 8, 0xb101); + client_data->remote_write(client_data, (0x00ff & val), 0xb102); + + return count; +} + +DEVICE_ATTR(t2, 0644, nov_t2_show, nov_t2_store); +/* maejrep's T2 interface - end */ + +static int mddi_novtec_probe(struct platform_device *pdev) +{ + int ret; + struct msm_mddi_client_data *client_data = pdev->dev.platform_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + struct panel_data *panel_data = &bridge_data->panel_conf; + struct panel_info *panel = + kzalloc(sizeof(struct panel_info), GFP_KERNEL); + + if (!panel) + return -ENOMEM; + platform_set_drvdata(pdev, panel); + + printk(KERN_DEBUG "%s\n", __func__); + + if (panel_data->caps & MSMFB_CAP_CABC) { + printk(KERN_INFO "CABC enabled\n"); + mddi_nov_cabc.dev.platform_data = client_data; + platform_device_register(&mddi_nov_cabc); + } + + ret = setup_vsync(panel, 1); + if (ret) { + dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n"); + return ret; + } + + panel->client_data = client_data; + panel->panel_data.suspend = novtec_suspend; + panel->panel_data.resume = novtec_resume; + panel->panel_data.wait_vsync = novtec_wait_vsync; + panel->panel_data.request_vsync = novtec_request_vsync; + panel->panel_data.clear_vsync = novtec_clear_vsync; + panel->panel_data.blank = novtec_blank; + panel->panel_data.unblank = novtec_unblank; + panel->panel_data.fb_data = &bridge_data->fb_data; + panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES; + + panel->pdev.name = "msm_panel"; + panel->pdev.id = pdev->id; + panel->pdev.resource = client_data->fb_resource; + panel->pdev.num_resources = 1; + panel->pdev.dev.platform_data = &panel->panel_data; + platform_device_register(&panel->pdev); + wake_lock_init(&panel->idle_lock, WAKE_LOCK_IDLE, "nov_idle_lock"); + + return 0; +} + +static int mddi_novtec_remove(struct platform_device *pdev) +{ + struct panel_info *panel = platform_get_drvdata(pdev); + + setup_vsync(panel, 0); + kfree(panel); + return 0; +} + +static struct platform_driver mddi_client_d263_0000 = { + .probe = mddi_novtec_probe, + .remove = mddi_novtec_remove, + .driver = { .name = "mddi_c_b9f6_5582" }, +}; + +static int __init mddi_client_novtec_init(void) +{ + platform_driver_register(&mddi_client_d263_0000); + return 0; +} + +module_init(mddi_client_novtec_init); + diff --git a/drivers/video/msm/mddi_client_simple.c b/drivers/video/msm/mddi_client_simple.c new file mode 100644 index 0000000000000..b27a809dd14ca --- /dev/null +++ b/drivers/video/msm/mddi_client_simple.c @@ -0,0 +1,236 @@ +/* drivers/video/msm_fb/mddi_client_simple.c + * + * Support for simple mddi client devices which require no special + * initialization code except for what may be provided in the board file. + * If the clients do not provide board specific code, this driver's + * panel operations are no-ops. + * + * Copyright (C) 2007-2010, Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +struct panel_info { + struct platform_device pdev; + struct msm_mddi_client_data *client_data; + struct msm_panel_data panel_data; + struct msmfb_callback *fb_callback; + wait_queue_head_t vsync_wait; + int got_vsync; + int irq; +}; + +#define to_panel_info(pd) container_of((pd), struct panel_info, panel_data) + +static void mddi_simple_request_vsync(struct msm_panel_data *panel_data, + struct msmfb_callback *callback) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + + panel->fb_callback = callback; + if (panel->got_vsync) { + panel->got_vsync = 0; + client_data->activate_link(client_data); /* clears interrupt */ + } +} + +static void mddi_simple_wait_vsync(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + int ret; + + if (panel->got_vsync) { + panel->got_vsync = 0; + client_data->activate_link(client_data); /* clears interrupt */ + } + + ret = wait_event_timeout(panel->vsync_wait, panel->got_vsync, HZ/2); + if (!ret && !panel->got_vsync) + pr_err("mddi_client_simple: timeout waiting for vsync\n"); + + panel->got_vsync = 0; + /* interrupt clears when screen dma starts */ +} + + +static int mddi_simple_suspend(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + int ret; + + if (!bridge_data->uninit) + return 0; + + ret = bridge_data->uninit(bridge_data, client_data); + if (ret) { + pr_info("%s: non zero return from uninit\n", __func__); + return ret; + } + client_data->suspend(client_data); + return 0; +} + +static int mddi_simple_resume(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + + if (!bridge_data->init) + return 0; + + client_data->resume(client_data); + return bridge_data->init(bridge_data, client_data); +} + +static int mddi_simple_blank(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + + if (!bridge_data->blank) + return 0; + return bridge_data->blank(bridge_data, client_data); +} + +static int mddi_simple_unblank(struct msm_panel_data *panel_data) +{ + struct panel_info *panel = to_panel_info(panel_data); + struct msm_mddi_client_data *client_data = panel->client_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + + if (!bridge_data->unblank) + return 0; + return bridge_data->unblank(bridge_data, client_data); +} + +static irqreturn_t handle_vsync_irq(int irq, void *data) +{ + struct panel_info *panel = data; + + panel->got_vsync = 1; + if (panel->fb_callback) { + panel->fb_callback->func(panel->fb_callback); + panel->fb_callback = NULL; + } + + wake_up(&panel->vsync_wait); + return IRQ_HANDLED; +} + +static int mddi_simple_probe(struct platform_device *pdev) +{ + struct msm_mddi_client_data *client_data = pdev->dev.platform_data; + struct msm_mddi_bridge_platform_data *bridge_data = + client_data->private_client_data; + struct panel_info *panel; + int ret; + + pr_debug("%s()\n", __func__); + + panel = kzalloc(sizeof(struct panel_info), GFP_KERNEL); + if (!panel) + return -ENOMEM; + + platform_set_drvdata(pdev, panel); + + init_waitqueue_head(&panel->vsync_wait); + + panel->irq = platform_get_irq_byname(pdev, "vsync"); + if (panel->irq >= 0) { + ret = request_irq(panel->irq, handle_vsync_irq, + IRQF_TRIGGER_RISING, "mddi_c_simple_vsync", + panel); + if (ret) { + pr_err("%s: request vsync irq %d failed (%d)\n", + __func__, panel->irq, ret); + goto err_req_irq; + } + + panel->panel_data.wait_vsync = mddi_simple_wait_vsync; + panel->panel_data.request_vsync = mddi_simple_request_vsync; + } + + panel->client_data = client_data; + panel->panel_data.suspend = mddi_simple_suspend; + panel->panel_data.resume = mddi_simple_resume; + panel->panel_data.blank = mddi_simple_blank; + panel->panel_data.unblank = mddi_simple_unblank; + panel->panel_data.caps = bridge_data->caps; + panel->panel_data.fb_data = &bridge_data->fb_data; + + panel->pdev.name = "msm_panel"; + panel->pdev.id = pdev->id; + platform_device_add_resources(&panel->pdev, + client_data->fb_resource, 1); + panel->pdev.dev.platform_data = &panel->panel_data; + + if (bridge_data->init) + bridge_data->init(bridge_data, client_data); + + ret = platform_device_register(&panel->pdev); + if (ret) { + pr_err("%s: Can't register platform device\n", __func__); + goto err_plat_dev_reg; + } + + return 0; + +err_plat_dev_reg: + if (panel->irq >= 0) + free_irq(panel->irq, panel); +err_req_irq: + platform_set_drvdata(pdev, NULL); + kfree(panel); + return ret; +} + +static int mddi_simple_remove(struct platform_device *pdev) +{ + struct panel_info *panel = platform_get_drvdata(pdev); + kfree(panel); + return 0; +} + +static struct platform_driver mddi_client_simple = { + .probe = mddi_simple_probe, + .remove = mddi_simple_remove, + .driver = { + .owner = THIS_MODULE, + .name = "mddi_c_simple" + }, +}; + +static int __init mddi_client_simple_init(void) +{ + platform_driver_register(&mddi_client_simple); + return 0; +} + +module_init(mddi_client_simple_init); diff --git a/drivers/video/msm/mddi_client_toshiba.c b/drivers/video/msm/mddi_client_toshiba.c index f9bc932ac46b3..21e0ac73c4e06 100644 --- a/drivers/video/msm/mddi_client_toshiba.c +++ b/drivers/video/msm/mddi_client_toshiba.c @@ -60,6 +60,7 @@ struct panel_info { struct msm_panel_data panel_data; struct msmfb_callback *toshiba_callback; int toshiba_got_int; + int irq; }; @@ -175,47 +176,6 @@ irqreturn_t toshiba_vsync_interrupt(int irq, void *data) return IRQ_HANDLED; } -static int setup_vsync(struct panel_info *panel, - int init) -{ - int ret; - int gpio = 97; - unsigned int irq; - - if (!init) { - ret = 0; - goto uninit; - } - ret = gpio_request(gpio, "vsync"); - if (ret) - goto err_request_gpio_failed; - - ret = gpio_direction_input(gpio); - if (ret) - goto err_gpio_direction_input_failed; - - ret = irq = gpio_to_irq(gpio); - if (ret < 0) - goto err_get_irq_num_failed; - - ret = request_irq(irq, toshiba_vsync_interrupt, IRQF_TRIGGER_RISING, - "vsync", panel); - if (ret) - goto err_request_irq_failed; - printk(KERN_INFO "vsync on gpio %d now %d\n", - gpio, gpio_get_value(gpio)); - return 0; - -uninit: - free_irq(gpio_to_irq(gpio), panel); -err_request_irq_failed: -err_get_irq_num_failed: -err_gpio_direction_input_failed: - gpio_free(gpio); -err_request_gpio_failed: - return ret; -} - static int mddi_toshiba_probe(struct platform_device *pdev) { int ret; @@ -232,10 +192,16 @@ static int mddi_toshiba_probe(struct platform_device *pdev) client_data->remote_write(client_data, GPIOSEL_VWAKEINT, GPIOSEL); client_data->remote_write(client_data, INTMASK_VWAKEOUT, INTMASK); - ret = setup_vsync(panel, 1); + ret = platform_get_irq_byname(pdev, "vsync"); + if (ret < 0) + goto err_plat_get_irq; + + panel->irq = ret; + ret = request_irq(panel->irq, toshiba_vsync_interrupt, + IRQF_TRIGGER_RISING, "vsync", panel); if (ret) { dev_err(&pdev->dev, "mddi_bridge_setup_vsync failed\n"); - return ret; + goto err_req_irq; } panel->client_data = client_data; @@ -258,13 +224,19 @@ static int mddi_toshiba_probe(struct platform_device *pdev) platform_device_register(&panel->pdev); return 0; + +err_req_irq: +err_plat_get_irq: + kfree(panel); + return ret; } static int mddi_toshiba_remove(struct platform_device *pdev) { struct panel_info *panel = platform_get_drvdata(pdev); - setup_vsync(panel, 0); + platform_set_drvdata(pdev, NULL); + free_irq(panel->irq, panel); kfree(panel); return 0; } diff --git a/drivers/video/msm/mddi_hw.h b/drivers/video/msm/mddi_hw.h index 45cc01fc1e7fc..7858b0e16e977 100644 --- a/drivers/video/msm/mddi_hw.h +++ b/drivers/video/msm/mddi_hw.h @@ -53,6 +53,9 @@ #define MDDI_MF_CNT 0x0084 #define MDDI_CURR_REV_PTR 0x0088 #define MDDI_CORE_VER 0x008c +#define MDDI_SF_LEN_CTL_REG 0x0094 +#define MDDI_PAD_IO_CTL 0x00a0 +#define MDDI_PAD_CAL 0x00a4 #define MDDI_INT_PRI_PTR_READ 0x0001 #define MDDI_INT_SEC_PTR_READ 0x0002 @@ -112,9 +115,7 @@ #define MDDI_CMD_LINK_ACTIVE 0x0900 #define MDDI_CMD_PERIODIC_REV_ENCAP 0x0A00 #define MDDI_CMD_FORCE_NEW_REV_PTR 0x0C00 - - - +#define MDDI_CMD_SKEW_CALIBRATION 0x0D00 #define MDDI_VIDEO_REV_PKT_SIZE 0x40 #define MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE 0x60 #define MDDI_MAX_REV_PKT_SIZE 0x60 @@ -125,9 +126,19 @@ /* MDP sends 256 pixel packets, so lower value hibernates more without * significantly increasing latency of waiting for next subframe */ #define MDDI_HOST_BYTES_PER_SUBFRAME 0x3C00 +#if defined (CONFIG_ARCH_QSD8X50) || defined (CONFIG_ARCH_MSM7X30) +#define MDDI_HOST_TA2_LEN 0x001a +#else #define MDDI_HOST_TA2_LEN 0x000c -#define MDDI_HOST_REV_RATE_DIV 0x0002 +#endif +#if defined (CONFIG_ARCH_QSD8X50) +#define MDDI_HOST_REV_RATE_DIV 0x0004 +#elif defined (CONFIG_ARCH_MSM7X30) +#define MDDI_HOST_REV_RATE_DIV 0x0010 +#else +#define MDDI_HOST_REV_RATE_DIV 0x0002 +#endif struct __attribute__((packed)) mddi_rev_packet { uint16_t length; @@ -284,8 +295,12 @@ struct __attribute__((packed)) mddi_register_access { uint16_t crc16; - uint32_t register_data_list; - /* list of 4-byte register data values for/from client registers */ + union { + uint32_t reg_data; + uint32_t *reg_data_list; + } u; + + uint16_t crc_data; }; struct __attribute__((packed)) mddi_llentry { diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c index c3636d55a3c5a..72fa9bb4dde39 100644 --- a/drivers/video/msm/mdp.c +++ b/drivers/video/msm/mdp.c @@ -21,59 +21,102 @@ #include #include #include +#include #include #include -#include #include #include #include #include "mdp_hw.h" +#include "mdp_ppp.h" +#include struct class *mdp_class; -#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000) +#ifdef CONFIG_MSM_HDMI +/* Used to report LCDC underflows */ +void reportUnderflow(void); +#endif -static uint16_t mdp_default_ccs[] = { - 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000, - 0x010, 0x080, 0x080 -}; +#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000) -static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue); -static struct msmfb_callback *dma_callback; -static struct clk *clk; static unsigned int mdp_irq_mask; -static DEFINE_SPINLOCK(mdp_lock); -DEFINE_MUTEX(mdp_mutex); - -static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask) +static unsigned int mdp_dma_timer_enable = 0; +struct clk *mdp_clk_to_disable_later = 0; +static struct mdp_blit_req *timeout_req; +#ifdef CONFIG_FB_MSM_OVERLAY +extern int mdp4_overlay_get(struct mdp_device *mdp_dev, struct fb_info *info, struct mdp_overlay *req); +extern int mdp4_overlay_set(struct mdp_device *mdp_dev, struct fb_info *info, struct mdp_overlay *req); +extern int mdp4_overlay_unset(struct mdp_device *mdp_dev, struct fb_info *info, int ndx); +extern int mdp4_overlay_play(struct mdp_device *mdp_dev, struct fb_info *info, struct msmfb_overlay_data *req, + struct file **pp_src_file); +extern void mdp4_mddi_overlay(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y); +#include "mdp4.h" +#endif + + +static void mdp_do_standby_timer(unsigned long data) { - unsigned long irq_flags; - int ret = 0; + struct mdp_info *mdp = (struct mdp_info *) data; + if (!mdp_irq_mask) { + clk_set_rate(mdp->ebi1_clk, 0); + mdp->state |= MDP_STATE_STANDBY; + } else { + mod_timer(&mdp->standby_timer, + jiffies + msecs_to_jiffies(200)); + } +} +static int locked_enable_mdp_irq(struct mdp_info *mdp, uint32_t mask) +{ BUG_ON(!mask); - spin_lock_irqsave(&mdp_lock, irq_flags); /* if the mask bits are already set return an error, this interrupt * is already enabled */ if (mdp_irq_mask & mask) { - printk(KERN_ERR "mdp irq already on already on %x %x\n", - mdp_irq_mask, mask); - ret = -1; + pr_err("mdp irq already on %x %x\n", mdp_irq_mask, mask); + return -1; } /* if the mdp irq is not already enabled enable it */ if (!mdp_irq_mask) { - if (clk) - clk_enable(clk); + clk_enable(mdp->clk); enable_irq(mdp->irq); + if (mdp->state & MDP_STATE_STANDBY) { +#ifdef CONFIG_MSM_MDP40 + clk_set_rate(mdp->ebi1_clk, 153000000); +#else + clk_set_rate(mdp->ebi1_clk, 128000000); +#endif + mdp->state &= ~MDP_STATE_STANDBY; + } else { + del_timer_sync(&mdp->standby_timer); + barrier(); + } } + /* clear out any previous irqs for the requested mask*/ + mdp_writel(mdp, mask, MDP_INTR_CLEAR); + /* update the irq mask to reflect the fact that the interrupt is * enabled */ mdp_irq_mask |= mask; - spin_unlock_irqrestore(&mdp_lock, irq_flags); + mdp_writel(mdp, mdp_irq_mask, MDP_INTR_ENABLE); + return 0; +} + +static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask) +{ + unsigned long flags=0; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + ret = locked_enable_mdp_irq(mdp, mask); + spin_unlock_irqrestore(&mdp->lock, flags); return ret; } @@ -88,93 +131,202 @@ static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask) /* update the irq mask to reflect the fact that the interrupt is * disabled */ mdp_irq_mask &= ~(mask); + mdp_writel(mdp, mdp_irq_mask, MDP_INTR_ENABLE); + /* if no one is waiting on the interrupt, disable it */ if (!mdp_irq_mask) { disable_irq_nosync(mdp->irq); - if (clk) - clk_disable(clk); + if (mdp->clk) + clk_disable(mdp->clk); + if (!(mdp->state & MDP_STATE_STANDBY)) + mod_timer(&mdp->standby_timer, + jiffies + msecs_to_jiffies(200)); } return 0; } -static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask) +int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask) { - unsigned long irq_flags; + unsigned long irq_flags=0; int ret; - spin_lock_irqsave(&mdp_lock, irq_flags); + spin_lock_irqsave(&mdp->lock, irq_flags); ret = locked_disable_mdp_irq(mdp, mask); - spin_unlock_irqrestore(&mdp_lock, irq_flags); + spin_unlock_irqrestore(&mdp->lock, irq_flags); return ret; } static irqreturn_t mdp_isr(int irq, void *data) { uint32_t status; - unsigned long irq_flags; + unsigned long irq_flags=0; struct mdp_info *mdp = data; + int i; - spin_lock_irqsave(&mdp_lock, irq_flags); + spin_lock_irqsave(&mdp->lock, irq_flags); status = mdp_readl(mdp, MDP_INTR_STATUS); mdp_writel(mdp, status, MDP_INTR_CLEAR); +// pr_info("%s: status=%08x (irq_mask=%08x)\n", __func__, status, +// mdp_irq_mask); + + if (mdp_dma_timer_enable) { + del_timer_sync(&mdp->dma_timer); + mdp_dma_timer_enable = 0; + } + +#ifdef CONFIG_MSM_HDMI + if (status & MDP_LCDC_UNDERFLOW) + { + pr_err("%s: LCDC Underflow\n", __func__); + reportUnderflow(); + } +#endif + status &= mdp_irq_mask; - if (status & DL0_DMA2_TERM_DONE) { - if (dma_callback) { - dma_callback->func(dma_callback); - dma_callback = NULL; +#ifdef CONFIG_MSM_MDP40 + if (mdp->mdp_dev.overrides & MSM_MDP4_MDDI_DMA_SWITCH) { + if(status && mdp->out_if[MSM_MDDI_PMDH_INTERFACE].dma_cb != NULL) + status |= (INTR_OVERLAY0_DONE | MDP_DMA_S_DONE); + } +#endif + for (i = 0; i < MSM_MDP_NUM_INTERFACES; ++i) { + struct mdp_out_interface *out_if = &mdp->out_if[i]; + if (status & out_if->dma_mask) { + if (out_if->dma_cb) { + out_if->dma_cb->func(out_if->dma_cb); + out_if->dma_cb = NULL; + } + wake_up(&out_if->dma_waitqueue); + } + if (status & out_if->irq_mask) { + out_if->irq_cb->func(out_if->irq_cb); + out_if->irq_cb = NULL; } - wake_up(&mdp_dma2_waitqueue); } - if (status & DL0_ROI_DONE) - wake_up(&mdp_ppp_waitqueue); - +#ifndef CONFIG_MSM_MDP40 + mdp_ppp_handle_isr(mdp, status); +#endif if (status) locked_disable_mdp_irq(mdp, status); - spin_unlock_irqrestore(&mdp_lock, irq_flags); + spin_unlock_irqrestore(&mdp->lock, irq_flags); return IRQ_HANDLED; } -static uint32_t mdp_check_mask(uint32_t mask) +static void mdp_do_dma_timer(unsigned long data) +{ + uint32_t status; + struct mdp_info *mdp = (struct mdp_info *) data; + unsigned long irq_flags=0; + int i; + + spin_lock_irqsave(&mdp->lock, irq_flags); + + status = mdp_readl(mdp, MDP_INTR_STATUS); + mdp_writel(mdp, mdp_irq_mask, MDP_INTR_CLEAR); + + for (i = 0; i < MSM_MDP_NUM_INTERFACES; ++i) { + struct mdp_out_interface *out_if = &mdp->out_if[i]; + if (mdp_irq_mask & out_if->dma_mask) { + if (out_if->dma_cb) { + out_if->dma_cb->func(out_if->dma_cb); + out_if->dma_cb = NULL; + } + wake_up(&out_if->dma_waitqueue); + } + if (mdp_irq_mask & out_if->irq_mask) { + out_if->irq_cb->func(out_if->irq_cb); + out_if->irq_cb = NULL; + } + } + + locked_disable_mdp_irq(mdp, mdp_irq_mask); + + spin_unlock_irqrestore(&mdp->lock, irq_flags); + +} + +static uint32_t mdp_check_mask(struct mdp_info *mdp, uint32_t mask) { uint32_t ret; - unsigned long irq_flags; + unsigned long irq_flags=0; - spin_lock_irqsave(&mdp_lock, irq_flags); + spin_lock_irqsave(&mdp->lock, irq_flags); ret = mdp_irq_mask & mask; - spin_unlock_irqrestore(&mdp_lock, irq_flags); + spin_unlock_irqrestore(&mdp->lock, irq_flags); return ret; } -static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq) +void mdp_dump_blit(struct mdp_blit_req *req) +{ + pr_info("%s: src: w=%d h=%d f=0x%x offs=0x%x mem_id=%d\n", __func__, + req->src.width, req->src.height, req->src.format, + req->src.offset, req->src.memory_id); + pr_info("%s: dst: w=%d h=%d f=0x%x offs=0x%x mem_id=%d\n", __func__, + req->dst.width, req->dst.height, req->dst.format, + req->dst.offset, req->dst.memory_id); + pr_info("%s: src_rect: x=%d y=%d w=%d h=%d\n", __func__, + req->src_rect.x, req->src_rect.y, req->src_rect.w, + req->src_rect.h); + pr_info("%s: dst_rect: x=%d y=%d w=%d h=%d\n", __func__, + req->dst_rect.x, req->dst_rect.y, req->dst_rect.w, + req->dst_rect.h); + pr_info("%s: alpha=0x%08x\n", __func__, req->alpha); + pr_info("%s: transp_max=0x%08x\n", __func__, req->transp_mask); + pr_info("%s: flags=%08x\n", __func__, req->flags); +} + +int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq) { int ret = 0; - unsigned long irq_flags; + unsigned long irq_flags=0; - wait_event_timeout(*wq, !mdp_check_mask(mask), HZ); +// pr_info("%s: WAITING for 0x%x\n", __func__, mask); + wait_event_timeout(*wq, !mdp_check_mask(mdp, mask), 5*HZ); - spin_lock_irqsave(&mdp_lock, irq_flags); + spin_lock_irqsave(&mdp->lock, irq_flags); if (mdp_irq_mask & mask) { locked_disable_mdp_irq(mdp, mask); - printk(KERN_WARNING "timeout waiting for mdp to complete %x\n", - mask); + pr_warning("%s: timeout waiting for mdp to complete 0x%x\n", + __func__, mask); + if(timeout_req) + mdp_dump_blit(timeout_req); + ret = -ETIMEDOUT; + } else { +// pr_info("%s: SUCCESS waiting for 0x%x\n", __func__, mask); } - spin_unlock_irqrestore(&mdp_lock, irq_flags); + spin_unlock_irqrestore(&mdp->lock, irq_flags); return ret; } -void mdp_dma_wait(struct mdp_device *mdp_dev) +static void mdp_dma_wait(struct mdp_device *mdp_dev, int interface) { #define MDP_MAX_TIMEOUTS 20 static int timeout_count; struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + unsigned int mask = 0; + wait_queue_head_t *wq; + + switch (interface) { + case MSM_MDDI_PMDH_INTERFACE: + case MSM_MDDI_EMDH_INTERFACE: + case MSM_LCDC_INTERFACE: + case MSM_TV_INTERFACE: + BUG_ON(!mdp->out_if[interface].registered); + mask = mdp->out_if[interface].dma_mask; + wq = &mdp->out_if[interface].dma_waitqueue; + break; + default: + pr_err("%s: Unknown interface %d\n", __func__, interface); + BUG(); + } - if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT) + if (mdp_wait(mdp, mask, wq) == -ETIMEDOUT) timeout_count++; else timeout_count = 0; @@ -185,181 +337,336 @@ void mdp_dma_wait(struct mdp_device *mdp_dev) BUG(); } } - +/* static int mdp_ppp_wait(struct mdp_info *mdp) { return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue); } - -void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride, - uint32_t width, uint32_t height, uint32_t x, uint32_t y, - struct msmfb_callback *callback) +*/ +#ifndef CONFIG_MSM_MDP40 +static void mdp_dmas_to_mddi(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, uint32_t y) { + struct mdp_info *mdp = priv; uint32_t dma2_cfg; - uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */ + uint32_t video_packet_parameter = 0; + uint16_t ld_param = 1; - if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) { - printk(KERN_ERR "mdp_dma_to_mddi: busy\n"); - return; + dma2_cfg = DMA_PACK_TIGHT | + DMA_PACK_ALIGN_LSB | + DMA_OUT_SEL_AHB | + DMA_IBUF_NONCONTIGUOUS; + + dma2_cfg |= mdp->dma_format; + +#if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30 + if (mdp->dma_format == DMA_IBUF_FORMAT_RGB888_OR_ARGB8888) +#else + if (mdp->dma_format == DMA_IBUF_FORMAT_XRGB8888) +#endif + dma2_cfg |= DMA_PACK_PATTERN_BGR; + else + dma2_cfg |= DMA_PACK_PATTERN_RGB; + + dma2_cfg |= DMA_OUT_SEL_MDDI; + + dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY; + + dma2_cfg |= DMA_DITHER_EN; + + if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB565) { + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS; + video_packet_parameter = MDDI_VDO_PACKET_DESC_RGB565; + } else if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB666) { + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; + video_packet_parameter = MDDI_VDO_PACKET_DESC_RGB666; + } + + /* setup size, address, and stride */ + mdp_writel(mdp, (height << 16) | (width), MDP_DMA_S_SIZE); + mdp_writel(mdp, addr, MDP_DMA_S_IBUF_ADDR); + mdp_writel(mdp, stride, MDP_DMA_S_IBUF_Y_STRIDE); + + /* set y & x offset and MDDI transaction parameters */ + mdp_writel(mdp, (y << 16) | (x), MDP_DMA_S_OUT_XY); + mdp_writel(mdp, ld_param, MDP_MDDI_PARAM_WR_SEL); + if (mdp->mdp_dev.overrides & MSM_MDP_PANEL_IGNORE_PIXEL_DATA) { + mdp_writel(mdp, (video_packet_parameter << 16) | 0xE3, + MDP_MDDI_PARAM); + } + else { + mdp_writel(mdp, (video_packet_parameter << 16) | MDDI_VDO_PACKET_PRIM, + MDP_MDDI_PARAM); } - dma_callback = callback; + mdp_writel(mdp, dma2_cfg, MDP_DMA_S_CONFIG); + mdp_writel(mdp, 0, MDP_DMA_S_START); +} + +static void mdp_dma_to_mddi(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_info *mdp = priv; + uint32_t dma2_cfg = 0; + uint32_t video_packet_parameter = 0; + uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */ +#if !defined(CONFIG_MSM_MDP30) dma2_cfg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB | - DMA_PACK_PATTERN_RGB | DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS; - dma2_cfg |= DMA_IBUF_FORMAT_RGB565; +#endif + dma2_cfg |= mdp->dma_format; + +#if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30 + if (mdp->dma_format == DMA_IBUF_FORMAT_RGB888_OR_ARGB8888) +#else + if (mdp->dma_format == DMA_IBUF_FORMAT_XRGB8888) +#endif + dma2_cfg |= DMA_PACK_PATTERN_BGR; + else + dma2_cfg |= DMA_PACK_PATTERN_RGB; dma2_cfg |= DMA_OUT_SEL_MDDI; dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY; +#if !defined(CONFIG_MSM_MDP30) dma2_cfg |= DMA_DITHER_EN; +#endif + + if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB565) { + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS; + video_packet_parameter = MDDI_VDO_PACKET_DESC_RGB565; + } else if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB666) { + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; + video_packet_parameter = MDDI_VDO_PACKET_DESC_RGB666; + } + +#if defined(CONFIG_MSM_MDP30) || defined(CONFIG_MSM_MDP302) + writel(height << 16 | width, mdp->base + 0x90004); + writel(addr, mdp->base + 0x90008); + writel(stride, mdp->base + 0x9000c); + + /* set y & x offset and MDDI transaction parameters */ + writel(y << 16 | x, mdp->base + 0x90010); + writel(ld_param, mdp->base + 0x00090); + writel((video_packet_parameter << 16) | MDDI_VDO_PACKET_PRIM, + mdp->base + 0x00094); + + writel(dma2_cfg, mdp->base + 0x90000); + + /* start DMA2 */ + writel(0, mdp->base + 0x0044); +#elif defined(CONFIG_MSM_MDP22) /* setup size, address, and stride */ mdp_writel(mdp, (height << 16) | (width), MDP_CMD_DEBUG_ACCESS_BASE + 0x0184); mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188); mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C); - /* 666 18BPP */ - dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; - /* set y & x offset and MDDI transaction parameters */ mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194); mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0); - mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM, + mdp_writel(mdp, (video_packet_parameter << 16) | MDDI_VDO_PACKET_PRIM, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4); mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180); /* start DMA2 */ mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044); +#else + /* setup size, address, and stride */ + mdp_writel(mdp, (height << 16) | (width), MDP_DMA_P_SIZE); + mdp_writel(mdp, addr, MDP_DMA_P_IBUF_ADDR); + mdp_writel(mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE); + + /* set y & x offset and MDDI transaction parameters */ + mdp_writel(mdp, (y << 16) | (x), MDP_DMA_P_OUT_XY); + mdp_writel(mdp, ld_param, MDP_MDDI_PARAM_WR_SEL); + mdp_writel(mdp, (video_packet_parameter << 16) | MDDI_VDO_PACKET_PRIM, + MDP_MDDI_PARAM); + + mdp_writel(mdp, dma2_cfg, MDP_DMA_P_CONFIG); + mdp_writel(mdp, 0, MDP_DMA_P_START); +#endif } +#endif /* ifndef CONFIG_MSM_MDP40 */ void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride, uint32_t width, uint32_t height, uint32_t x, uint32_t y, struct msmfb_callback *callback, int interface) { struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + struct mdp_out_interface *out_if; + unsigned long flags; - if (interface == MSM_MDDI_PMDH_INTERFACE) { - mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y, - callback); + if (interface < 0 || interface >= MSM_MDP_NUM_INTERFACES || + !mdp->out_if[interface].registered) { + pr_err("%s: Unknown interface: %d\n", __func__, interface); + BUG(); } -} + out_if = &mdp->out_if[interface]; -int get_img(struct mdp_img *img, struct fb_info *info, - unsigned long *start, unsigned long *len, - struct file **filep) -{ - int put_needed, ret = 0; - struct file *file; + spin_lock_irqsave(&mdp->lock, flags); + if (locked_enable_mdp_irq(mdp, out_if->dma_mask)) { + /* something wrong in dma, workaround it */ + mdp_dma_timer_enable = 1; + pr_err("%s: busy\n", __func__); + } - file = fget_light(img->memory_id, &put_needed); - if (file == NULL) - return -1; + out_if->dma_cb = callback; + out_if->dma_start(out_if->priv, addr, stride, width, height, x, y); - if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { - *start = info->fix.smem_start; - *len = info->fix.smem_len; - } else - ret = -1; - fput_light(file, put_needed); + if (mdp_dma_timer_enable) + mod_timer(&mdp->dma_timer, + jiffies + msecs_to_jiffies(17)); - return ret; + spin_unlock_irqrestore(&mdp->lock, flags); } -void put_img(struct file *src_file, struct file *dst_file) -{ -} -int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb, - struct mdp_blit_req *req) +void mdp_configure_dma(struct mdp_device *mdp_dev) { - int ret; - unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0; struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); - struct file *src_file = 0, *dst_file = 0; + uint32_t dma_cfg; - /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */ - if (unlikely(req->src_rect.h == 0 || - req->src_rect.w == 0)) { - printk(KERN_ERR "mpd_ppp: src img of zero size!\n"); - return -EINVAL; - } - if (unlikely(req->dst_rect.h == 0 || - req->dst_rect.w == 0)) - return -EINVAL; + if (!mdp->dma_config_dirty) + return; + dma_cfg = mdp_readl(mdp, MDP_DMA_P_CONFIG); + dma_cfg &= ~DMA_IBUF_FORMAT_MASK; + dma_cfg &= ~DMA_PACK_PATTERN_MASK; + dma_cfg |= (mdp->dma_format | mdp->dma_pack_pattern); + mdp_writel(mdp, dma_cfg, MDP_DMA_P_CONFIG); + mdp->dma_config_dirty = false; + + return; +} - /* do this first so that if this fails, the caller can always - * safely call put_img */ - if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) { - printk(KERN_ERR "mpd_ppp: could not retrieve src image from " - "memory\n"); +int mdp_check_output_format(struct mdp_device *mdp_dev, int bpp) +{ + switch (bpp) { + case 16: + case 24: + case 32: + break; + default: return -EINVAL; } + return 0; +} + +void mdp_set_panel_size(struct mdp_device *mdp_dev, int width, int height) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + mdp->mdp_dev.width = width; + mdp->mdp_dev.height = height; +} - if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) { - printk(KERN_ERR "mpd_ppp: could not retrieve dst image from " - "memory\n"); +int mdp_set_output_format(struct mdp_device *mdp_dev, int bpp) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + uint32_t format, pack_pattern = DMA_PACK_PATTERN_RGB; + + switch (bpp) { + case 16: + format = DMA_IBUF_FORMAT_RGB565; + pack_pattern = DMA_PACK_PATTERN_RGB; + break; +#if defined CONFIG_MSM_MDP22 || defined CONFIG_MSM_MDP30 + case 24: + case 32: + format = DMA_IBUF_FORMAT_RGB888_OR_ARGB8888; + break; +#else + case 24: + format = DMA_IBUF_FORMAT_RGB888; + pack_pattern = DMA_PACK_PATTERN_BGR; + break; + case 32: + format = DMA_IBUF_FORMAT_XRGB8888; + pack_pattern = DMA_PACK_PATTERN_BGR; + break; +#endif + default: return -EINVAL; } - mutex_lock(&mdp_mutex); - - /* transp_masking unimplemented */ - req->transp_mask = MDP_TRANSP_NOP; - if (unlikely((req->transp_mask != MDP_TRANSP_NOP || - req->alpha != MDP_ALPHA_NOP || - HAS_ALPHA(req->src.format)) && - (req->flags & MDP_ROT_90 && - req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) { - int i; - unsigned int tiles = req->dst_rect.h / 16; - unsigned int remainder = req->dst_rect.h % 16; - req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h; - req->dst_rect.h = 16; - for (i = 0; i < tiles; i++) { - enable_mdp_irq(mdp, DL0_ROI_DONE); - ret = mdp_ppp_blit(mdp, req, src_file, src_start, - src_len, dst_file, dst_start, - dst_len); - if (ret) - goto err_bad_blit; - ret = mdp_ppp_wait(mdp); - if (ret) - goto err_wait_failed; - req->dst_rect.y += 16; - req->src_rect.x += req->src_rect.w; - } - if (!remainder) - goto end; - req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h; - req->dst_rect.h = remainder; + if (format != mdp->dma_format || pack_pattern != mdp->dma_pack_pattern) { + mdp->dma_format = format; + mdp->dma_pack_pattern = pack_pattern; + mdp->dma_config_dirty = true; } + + return 0; +} +/* +static void dump_req(struct mdp_blit_req *req, + unsigned long src_start, unsigned long src_len, + unsigned long dst_start, unsigned long dst_len) +{ + pr_err("flags: 0x%x\n", req->flags); + pr_err("src_start: 0x%08lx\n", src_start); + pr_err("src_len: 0x%08lx\n", src_len); + pr_err("src.offset: 0x%x\n", req->src.offset); + pr_err("src.format: 0x%x\n", req->src.format); + pr_err("src.width: %d\n", req->src.width); + pr_err("src.height: %d\n", req->src.height); + pr_err("src_rect.x: %d\n", req->src_rect.x); + pr_err("src_rect.y: %d\n", req->src_rect.y); + pr_err("src_rect.w: %d\n", req->src_rect.w); + pr_err("src_rect.h: %d\n", req->src_rect.h); + + pr_err("dst_start: 0x%08lx\n", dst_start); + pr_err("dst_len: 0x%08lx\n", dst_len); + pr_err("dst.offset: 0x%x\n", req->dst.offset); + pr_err("dst.format: 0x%x\n", req->dst.format); + pr_err("dst.width: %d\n", req->dst.width); + pr_err("dst.height: %d\n", req->dst.height); + pr_err("dst_rect.x: %d\n", req->dst_rect.x); + pr_err("dst_rect.y: %d\n", req->dst_rect.y); + pr_err("dst_rect.w: %d\n", req->dst_rect.w); + pr_err("dst_rect.h: %d\n", req->dst_rect.h); +} + +int mdp_blit_and_wait(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, unsigned long src_len, + struct file *dst_file, unsigned long dst_start, unsigned long dst_len) +{ + int ret; enable_mdp_irq(mdp, DL0_ROI_DONE); - ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file, - dst_start, - dst_len); - if (ret) - goto err_bad_blit; + ret = mdp_ppp_blit(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + if (unlikely(ret)) { + disable_mdp_irq(mdp, DL0_ROI_DONE); + return ret; + } ret = mdp_ppp_wait(mdp); - if (ret) - goto err_wait_failed; -end: - put_img(src_file, dst_file); - mutex_unlock(&mdp_mutex); + if (unlikely(ret)) { + printk(KERN_ERR "%s: failed!\n", __func__); + pr_err("original request:\n"); + dump_req(mdp->req, src_start, src_len, dst_start, dst_len); + pr_err("dead request:\n"); + dump_req(req, src_start, src_len, dst_start, dst_len); + BUG(); + return ret; + } return 0; -err_bad_blit: - disable_mdp_irq(mdp, DL0_ROI_DONE); -err_wait_failed: - put_img(src_file, dst_file); - mutex_unlock(&mdp_mutex); - return ret; } +*/ +int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb, + struct mdp_blit_req *req) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + timeout_req = req; + return mdp_ppp_blit(mdp, fb, req); +} + + void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id) { @@ -369,6 +676,78 @@ void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id) mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43); } +/* used by output interface drivers like mddi and lcdc */ +int mdp_out_if_register(struct mdp_device *mdp_dev, int interface, + void *private_data, uint32_t dma_mask, + mdp_dma_start_func_t dma_start) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + unsigned long flags=0; + int ret = 0; + + if (interface < 0 || interface >= MSM_MDP_NUM_INTERFACES) { + pr_err("%s: invalid interface (%d)\n", __func__, interface); + return -EINVAL; + } + + spin_lock_irqsave(&mdp->lock, flags); + + if (mdp->out_if[interface].registered) { + pr_err("%s: interface (%d) already registered\n", __func__, + interface); + ret = -EINVAL; + goto done; + } + + init_waitqueue_head(&mdp->out_if[interface].dma_waitqueue); + mdp->out_if[interface].registered = 1; + mdp->out_if[interface].priv = private_data; + mdp->out_if[interface].dma_mask = dma_mask; + mdp->out_if[interface].dma_start = dma_start; + mdp->out_if[interface].dma_cb = NULL; + +done: + spin_unlock_irqrestore(&mdp->lock, flags); + return ret; +} + +int mdp_out_if_req_irq(struct mdp_device *mdp_dev, int interface, + uint32_t mask, struct msmfb_callback *cb) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + unsigned long flags=0; + int ret = 0; + + if (interface < 0 || interface >= MSM_MDP_NUM_INTERFACES) { + pr_err("%s: invalid interface (%d)\n", __func__, interface); + BUG(); + } else if (!mdp->out_if[interface].registered) { + pr_err("%s: interface (%d) not registered\n", __func__, + interface); + BUG(); + } + + spin_lock_irqsave(&mdp->lock, flags); + + if (mask) { + ret = locked_enable_mdp_irq(mdp, mask); + if (ret) { + pr_err("%s: busy\n", __func__); + goto done; + } + mdp->out_if[interface].irq_mask = mask; + mdp->out_if[interface].irq_cb = cb; + } else { + locked_disable_mdp_irq(mdp, mask); + mdp->out_if[interface].irq_mask = 0; + mdp->out_if[interface].irq_cb = NULL; + } + +done: + spin_unlock_irqrestore(&mdp->lock, flags); + return ret; +} + int register_mdp_client(struct class_interface *cint) { if (!mdp_class) { @@ -379,15 +758,12 @@ int register_mdp_client(struct class_interface *cint) return class_interface_register(cint); } -#include "mdp_csc_table.h" -#include "mdp_scale_tables.h" - int mdp_probe(struct platform_device *pdev) { struct resource *resource; - int ret; - int n; + int ret = -EINVAL; struct mdp_info *mdp; + struct msm_mdp_platform_data *pdata = pdev->dev.platform_data; resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { @@ -399,6 +775,8 @@ int mdp_probe(struct platform_device *pdev) if (!mdp) return -ENOMEM; + spin_lock_init(&mdp->lock); + mdp->irq = platform_get_irq(pdev, 0); if (mdp->irq < 0) { pr_err("mdp: can not get mdp irq\n"); @@ -417,68 +795,99 @@ int mdp_probe(struct platform_device *pdev) mdp->mdp_dev.dma = mdp_dma; mdp->mdp_dev.dma_wait = mdp_dma_wait; mdp->mdp_dev.blit = mdp_blit; +#ifdef CONFIG_FB_MSM_OVERLAY + mdp->mdp_dev.overlay_get = mdp4_overlay_get; + mdp->mdp_dev.overlay_set = mdp4_overlay_set; + mdp->mdp_dev.overlay_unset = mdp4_overlay_unset; + mdp->mdp_dev.overlay_play = mdp4_overlay_play; +#endif mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp; + mdp->mdp_dev.set_output_format = mdp_set_output_format; + mdp->mdp_dev.set_panel_size = mdp_set_panel_size; + mdp->mdp_dev.check_output_format = mdp_check_output_format; + mdp->mdp_dev.configure_dma = mdp_configure_dma; + + mdp->enable_irq = enable_mdp_irq; + mdp->disable_irq = disable_mdp_irq; + + if (pdata == NULL || pdata->overrides == 0) + mdp->mdp_dev.overrides = 0; + else if(pdata->overrides) + mdp->mdp_dev.overrides = pdata->overrides; + + if (pdata == NULL || pdata->color_format == 0) + mdp->mdp_dev.color_format = MSM_MDP_OUT_IF_FMT_RGB565; + else if(pdata->color_format) + mdp->mdp_dev.color_format = pdata->color_format; + +#ifdef CONFIG_MSM_MDP40 + if (mdp->mdp_dev.overrides & MSM_MDP4_MDDI_DMA_SWITCH) { + ret = mdp_out_if_register(&mdp->mdp_dev, + MSM_MDDI_PMDH_INTERFACE, mdp, INTR_OVERLAY0_DONE + | MDP_DMA_S_DONE, mdp4_mddi_overlay); + } else { + ret = mdp_out_if_register(&mdp->mdp_dev, + MSM_MDDI_PMDH_INTERFACE, mdp, INTR_OVERLAY0_DONE, + mdp4_mddi_overlay); + } +#else + if (pdata == NULL || pdata->dma_channel == MDP_DMA_P) { + ret = mdp_out_if_register(&mdp->mdp_dev, + MSM_MDDI_PMDH_INTERFACE, mdp, MDP_DMA_P_DONE, + mdp_dma_to_mddi); + } else if (pdata->dma_channel == MDP_DMA_S) { + ret = mdp_out_if_register(&mdp->mdp_dev, + MSM_MDDI_PMDH_INTERFACE, mdp, MDP_DMA_S_DONE, + mdp_dmas_to_mddi); + } +#endif + + if (ret) + goto error_mddi_pmdh_register; - clk = clk_get(&pdev->dev, "mdp_clk"); - if (IS_ERR(clk)) { + mdp->clk = clk_get(&pdev->dev, "mdp_clk"); + if (IS_ERR(mdp->clk)) { printk(KERN_INFO "mdp: failed to get mdp clk"); - return PTR_ERR(clk); + ret = PTR_ERR(mdp->clk); + goto error_get_mdp_clk; } + mdp->ebi1_clk = clk_get(NULL, "ebi1_clk"); + if (IS_ERR(mdp->ebi1_clk)) { + pr_err("mdp: failed to get ebi1 clk\n"); + ret = PTR_ERR(mdp->ebi1_clk); + goto error_get_ebi1_clk; + } + ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp); if (ret) goto error_request_irq; disable_irq(mdp->irq); - mdp_irq_mask = 0; - - /* debug interface write access */ - mdp_writel(mdp, 1, 0x60); - - mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE); - mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE); - - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc); - - for (n = 0; n < ARRAY_SIZE(csc_table); n++) - mdp_writel(mdp, csc_table[n].val, csc_table[n].reg); - - /* clear up unused fg/main registers */ - /* comp.plane 2&3 ystride */ - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120); - - /* unpacked pattern */ - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c); - - /* comp.plane 2 & 3 */ - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118); - - /* clear unused bg registers */ - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0); - mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4); - - for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++) - mdp_writel(mdp, mdp_upscale_table[n].val, - mdp_upscale_table[n].reg); - - for (n = 0; n < 9; n++) - mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n); - mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0); - mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0); - mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0); + clk_enable(mdp->clk); + mdp_clk_to_disable_later = mdp->clk; + +#ifdef CONFIG_MSM_MDP40 + //MDP_DISP_INTF_SEL + if (mdp_readl(mdp, 0xc0000)) + mdp_writel(mdp, 0x8, 0x0038); + else + mdp_writel(mdp, 0xa, 0x0038); //mddi + //FIXME: should select mddi or lcdc interface + //mdp_writel(mdp, 0x8, 0x0038); //lcdc +#endif + +#ifdef CONFIG_MSM_MDP40 + mdp4_hw_init(mdp); +#else + mdp_hw_init(mdp); +#endif + +#if defined CONFIG_MSM_MDP302 + /* enable the tearing check in MDP */ + if(pdata != NULL && pdata->tearing_check) + mdp_check_tearing(mdp, pdata); +#endif /* register mdp device */ mdp->mdp_dev.dev.parent = &pdev->dev; mdp->mdp_dev.dev.class = mdp_class; @@ -491,14 +900,26 @@ int mdp_probe(struct platform_device *pdev) ret = device_register(&mdp->mdp_dev.dev); if (ret) goto error_device_register; + + setup_timer(&mdp->standby_timer, mdp_do_standby_timer, (unsigned long )mdp); + setup_timer(&mdp->dma_timer, mdp_do_dma_timer, (unsigned long )mdp); + + + pr_info("%s: initialized\n", __func__); + return 0; error_device_register: free_irq(mdp->irq, mdp); error_request_irq: + clk_put(mdp->ebi1_clk); +error_get_ebi1_clk: + clk_put(mdp->clk); +error_get_mdp_clk: +error_mddi_pmdh_register: iounmap(mdp->base); -error_get_irq: error_ioremap: +error_get_irq: kfree(mdp); return ret; } @@ -508,6 +929,13 @@ static struct platform_driver msm_mdp_driver = { .driver = {.name = "msm_mdp"}, }; +static int __init mdp_lateinit(void) +{ + if (mdp_clk_to_disable_later) + clk_disable(mdp_clk_to_disable_later); + return 0; +} + static int __init mdp_init(void) { mdp_class = class_create(THIS_MODULE, "msm_mdp"); @@ -519,3 +947,4 @@ static int __init mdp_init(void) } subsys_initcall(mdp_init); +late_initcall(mdp_lateinit); diff --git a/drivers/video/msm/mdp_csc_table.h b/drivers/video/msm/mdp_csc_table.h index d1cde30ead52f..a0f72c0ebd4f7 100644 --- a/drivers/video/msm/mdp_csc_table.h +++ b/drivers/video/msm/mdp_csc_table.h @@ -1,4 +1,4 @@ -/* drivers/video/msm_fb/mdp_csc_table.h +/* drivers/video/msm/mdp_csc_table.h * * Copyright (C) 2007 QUALCOMM Incorporated * Copyright (C) 2007 Google Incorporated @@ -16,57 +16,116 @@ static struct { uint32_t reg; uint32_t val; -} csc_table[] = { - { 0x40400, 0x83 }, - { 0x40404, 0x102 }, - { 0x40408, 0x32 }, - { 0x4040c, 0xffffffb5 }, - { 0x40410, 0xffffff6c }, - { 0x40414, 0xe1 }, - { 0x40418, 0xe1 }, - { 0x4041c, 0xffffff45 }, - { 0x40420, 0xffffffdc }, - { 0x40440, 0x254 }, - { 0x40444, 0x0 }, - { 0x40448, 0x331 }, - { 0x4044c, 0x254 }, - { 0x40450, 0xffffff38 }, - { 0x40454, 0xfffffe61 }, - { 0x40458, 0x254 }, - { 0x4045c, 0x409 }, - { 0x40460, 0x0 }, - { 0x40480, 0x5d }, - { 0x40484, 0x13a }, - { 0x40488, 0x20 }, - { 0x4048c, 0xffffffcd }, - { 0x40490, 0xffffff54 }, - { 0x40494, 0xe1 }, - { 0x40498, 0xe1 }, - { 0x4049c, 0xffffff35 }, - { 0x404a0, 0xffffffec }, - { 0x404c0, 0x254 }, - { 0x404c4, 0x0 }, - { 0x404c8, 0x396 }, - { 0x404cc, 0x254 }, - { 0x404d0, 0xffffff94 }, - { 0x404d4, 0xfffffef0 }, - { 0x404d8, 0x254 }, - { 0x404dc, 0x43a }, - { 0x404e0, 0x0 }, - { 0x40500, 0x10 }, - { 0x40504, 0x80 }, - { 0x40508, 0x80 }, - { 0x40540, 0x10 }, - { 0x40544, 0x80 }, - { 0x40548, 0x80 }, - { 0x40580, 0x10 }, - { 0x40584, 0xeb }, - { 0x40588, 0x10 }, - { 0x4058c, 0xf0 }, - { 0x405c0, 0x10 }, - { 0x405c4, 0xeb }, - { 0x405c8, 0x10 }, - { 0x405cc, 0xf0 }, +} csc_matrix_config_table[] = { + /* RGB -> YUV primary forward matrix (set1). */ + { MDP_CSC_PFMVn(0), 0x83 }, + { MDP_CSC_PFMVn(1), 0x102 }, + { MDP_CSC_PFMVn(2), 0x32 }, + { MDP_CSC_PFMVn(3), 0xffffffb5 }, + { MDP_CSC_PFMVn(4), 0xffffff6c }, + { MDP_CSC_PFMVn(5), 0xe1 }, + { MDP_CSC_PFMVn(6), 0xe1 }, + { MDP_CSC_PFMVn(7), 0xffffff45 }, + { MDP_CSC_PFMVn(8), 0xffffffdc }, + + /* YUV -> RGB primary reverse matrix (set2) */ + { MDP_CSC_PRMVn(0), 0x254 }, + { MDP_CSC_PRMVn(1), 0x0 }, + { MDP_CSC_PRMVn(2), 0x331 }, + { MDP_CSC_PRMVn(3), 0x254 }, + { MDP_CSC_PRMVn(4), 0xffffff38 }, + { MDP_CSC_PRMVn(5), 0xfffffe61 }, + { MDP_CSC_PRMVn(6), 0x254 }, + { MDP_CSC_PRMVn(7), 0x409 }, + { MDP_CSC_PRMVn(8), 0x0 }, + +#ifndef CONFIG_MSM_MDP31 + /* For MDP 2.2/3.0 */ + + /* primary limit vector */ + { MDP_CSC_PLVn(0), 0x10 }, + { MDP_CSC_PLVn(1), 0xeb }, + { MDP_CSC_PLVn(2), 0x10 }, + { MDP_CSC_PLVn(3), 0xf0 }, + + /* primary bias vector */ + { MDP_CSC_PBVn(0), 0x10 }, + { MDP_CSC_PBVn(1), 0x80 }, + { MDP_CSC_PBVn(2), 0x80 }, + +#else /* CONFIG_MSM_MDP31 */ + + /* limit vectors configuration */ + /* rgb -> yuv (set1) pre-limit vector */ + { MDP_PPP_CSC_PRE_LV1n(0), 0x10 }, + { MDP_PPP_CSC_PRE_LV1n(1), 0xeb }, + { MDP_PPP_CSC_PRE_LV1n(2), 0x10 }, + { MDP_PPP_CSC_PRE_LV1n(3), 0xf0 }, + { MDP_PPP_CSC_PRE_LV1n(4), 0x10 }, + { MDP_PPP_CSC_PRE_LV1n(5), 0xf0 }, + + /* rgb -> yuv (set1) post-limit vector */ + { MDP_PPP_CSC_POST_LV1n(0), 0x0 }, + { MDP_PPP_CSC_POST_LV1n(1), 0xff }, + { MDP_PPP_CSC_POST_LV1n(2), 0x0 }, + { MDP_PPP_CSC_POST_LV1n(3), 0xff }, + { MDP_PPP_CSC_POST_LV1n(4), 0x0 }, + { MDP_PPP_CSC_POST_LV1n(5), 0xff }, + + /* yuv -> rgb (set2) pre-limit vector */ + { MDP_PPP_CSC_PRE_LV2n(0), 0x0 }, + { MDP_PPP_CSC_PRE_LV2n(1), 0xff }, + { MDP_PPP_CSC_PRE_LV2n(2), 0x0 }, + { MDP_PPP_CSC_PRE_LV2n(3), 0xff }, + { MDP_PPP_CSC_PRE_LV2n(4), 0x0 }, + { MDP_PPP_CSC_PRE_LV2n(5), 0xff }, + + /* yuv -> rgb (set2) post-limit vector */ + { MDP_PPP_CSC_POST_LV2n(0), 0x10 }, + { MDP_PPP_CSC_POST_LV2n(1), 0xeb }, + { MDP_PPP_CSC_POST_LV2n(2), 0x10 }, + { MDP_PPP_CSC_POST_LV2n(3), 0xf0 }, + { MDP_PPP_CSC_POST_LV2n(4), 0x10 }, + { MDP_PPP_CSC_POST_LV2n(5), 0xf0 }, + + /* bias vectors configuration */ + + /* XXX: why is set2 used for rgb->yuv, but set1 */ + /* used for yuv -> rgb??!? Seems to be the reverse of the + * other vectors. */ + + /* RGB -> YUV pre-bias vector... */ + { MDP_PPP_CSC_PRE_BV2n(0), 0 }, + { MDP_PPP_CSC_PRE_BV2n(1), 0 }, + { MDP_PPP_CSC_PRE_BV2n(2), 0 }, + + /* RGB -> YUV post-bias vector */ + { MDP_PPP_CSC_POST_BV2n(0), 0x10 }, + { MDP_PPP_CSC_POST_BV2n(1), 0x80 }, + { MDP_PPP_CSC_POST_BV2n(2), 0x80 }, + + /* YUV -> RGB pre-bias vector... */ + { MDP_PPP_CSC_PRE_BV1n(0), 0x1f0 }, + { MDP_PPP_CSC_PRE_BV1n(1), 0x180 }, + { MDP_PPP_CSC_PRE_BV1n(2), 0x180 }, + + /* YUV -> RGB post-bias vector */ + { MDP_PPP_CSC_POST_BV1n(0), 0 }, + { MDP_PPP_CSC_POST_BV1n(1), 0 }, + { MDP_PPP_CSC_POST_BV1n(2), 0 }, + + /* luma filter coefficients */ + { MDP_PPP_DEINT_COEFFn(0), 0x3e0 }, + { MDP_PPP_DEINT_COEFFn(1), 0x360 }, + { MDP_PPP_DEINT_COEFFn(2), 0x120 }, + { MDP_PPP_DEINT_COEFFn(3), 0x140 }, +#endif +}; + +static struct { + uint32_t reg; + uint32_t val; +} csc_color_lut[] = { { 0x40800, 0x0 }, { 0x40804, 0x151515 }, { 0x40808, 0x1d1d1d }, diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h index 4e3deb4e592b3..d5c6ddc27a92d 100644 --- a/drivers/video/msm/mdp_hw.h +++ b/drivers/video/msm/mdp_hw.h @@ -15,43 +15,175 @@ #ifndef _MDP_HW_H_ #define _MDP_HW_H_ +#include +#include #include #include +typedef void (*mdp_dma_start_func_t)(void *private_data, uint32_t addr, + uint32_t stride, uint32_t width, + uint32_t height, uint32_t x, uint32_t y); + +struct mdp_out_interface { + uint32_t registered:1; + void *priv; + + /* If the interface client wants to get DMA_DONE events */ + uint32_t dma_mask; + mdp_dma_start_func_t dma_start; + + struct msmfb_callback *dma_cb; + wait_queue_head_t dma_waitqueue; + + /* If the interface client wants to be notified of non-DMA irqs, + * e.g. LCDC/TV-out frame start */ + uint32_t irq_mask; + struct msmfb_callback *irq_cb; +}; + struct mdp_info { + spinlock_t lock; struct mdp_device mdp_dev; char * __iomem base; int irq; + struct clk *clk; + struct clk *pclk; + struct clk *ebi1_clk; + struct mdp_out_interface out_if[MSM_MDP_NUM_INTERFACES]; + int dma_format; + int dma_pack_pattern; + bool dma_config_dirty; + struct mdp_blit_req *req; + uint32_t state; + struct timer_list standby_timer; + struct timer_list dma_timer; + + int (*enable_irq)(struct mdp_info *mdp, uint32_t mask); + int (*disable_irq)(struct mdp_info *mdp, uint32_t mask); +}; + +struct mdp_lcdc_info { + struct mdp_info *mdp; + struct clk *mdp_clk; + struct clk *mdp_pclk; + struct clk *pclk; + struct clk *pad_pclk; + struct msm_panel_data fb_panel_data; + struct platform_device fb_pdev; + struct msm_lcdc_platform_data *pdata; + uint32_t fb_start; + + struct msmfb_callback frame_start_cb; + wait_queue_head_t vsync_waitq; + int got_vsync; + unsigned color_format; + struct { + uint32_t clk_rate; + uint32_t hsync_ctl; + uint32_t vsync_period; + uint32_t vsync_pulse_width; + uint32_t display_hctl; + uint32_t display_vstart; + uint32_t display_vend; + uint32_t hsync_skew; + uint32_t polarity; + } parms; + atomic_t blank_count; + struct mutex blank_lock; +}; + +struct panel_icm_info { + bool icm_mode; + bool icm_doable; + bool clock_enabled; + int panel_update; + struct mutex icm_lock; + struct mdp_lcdc_info *lcdc; + spinlock_t lock; + void (*force_leave)(void); }; + +extern int mdp_out_if_register(struct mdp_device *mdp_dev, int interface, + void *private_data, uint32_t dma_mask, + mdp_dma_start_func_t dma_start); + +extern int mdp_out_if_req_irq(struct mdp_device *mdp_dev, int interface, + uint32_t mask, struct msmfb_callback *cb); + struct mdp_blit_req; struct mdp_device; -int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, - struct file *src_file, unsigned long src_start, - unsigned long src_len, struct file *dst_file, - unsigned long dst_start, unsigned long dst_len); + +void mdp_ppp_dump_debug(const struct mdp_info *mdp); +int mdp_hw_init(struct mdp_info *mdp); +void mdp_check_tearing(struct mdp_info *mdp, struct msm_mdp_platform_data *pdata); +void mdp_dump_blit(struct mdp_blit_req *req); +int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq); + #define mdp_writel(mdp, value, offset) writel(value, mdp->base + offset) #define mdp_readl(mdp, offset) readl(mdp->base + offset) +#define panel_to_lcdc(p) container_of((p), struct mdp_lcdc_info, fb_panel_data) + +/* define mdp state for multi purpose */ +#define MDP_STATE_STANDBY (1 << 0) + + +#ifdef CONFIG_MSM_MDP302 +#define MDP_SYNC_CONFIG_0 ( 0x00300) +#define MDP_SYNC_CONFIG_1 ( 0x00304) +#define MDP_SYNC_CONFIG_2 ( 0x00308) +#else +#define MDP_SYNC_CONFIG_0 ( 0x00000) +#define MDP_SYNC_CONFIG_1 ( 0x00004) +#define MDP_SYNC_CONFIG_2 ( 0x00008) +#endif + +#define MDP_SYNC_STATUS_0 ( 0x0000c) +#define MDP_SYNC_STATUS_1 ( 0x00010) +#define MDP_SYNC_STATUS_2 ( 0x00014) -#define MDP_SYNC_CONFIG_0 (0x00000) -#define MDP_SYNC_CONFIG_1 (0x00004) -#define MDP_SYNC_CONFIG_2 (0x00008) -#define MDP_SYNC_STATUS_0 (0x0000c) -#define MDP_SYNC_STATUS_1 (0x00010) -#define MDP_SYNC_STATUS_2 (0x00014) -#define MDP_SYNC_THRESH_0 (0x00018) -#define MDP_SYNC_THRESH_1 (0x0001c) -#define MDP_INTR_ENABLE (0x00020) -#define MDP_INTR_STATUS (0x00024) -#define MDP_INTR_CLEAR (0x00028) -#define MDP_DISPLAY0_START (0x00030) -#define MDP_DISPLAY1_START (0x00034) -#define MDP_DISPLAY_STATUS (0x00038) -#define MDP_EBI2_LCD0 (0x0003c) -#define MDP_EBI2_LCD1 (0x00040) +#ifdef CONFIG_MSM_MDP302 +#define MDP_SYNC_THRESH_0 ( 0x00200) +#define MDP_SYNC_THRESH_1 ( 0x00204) +#else +#define MDP_SYNC_THRESH_0 ( 0x00018) +#define MDP_SYNC_THRESH_1 ( 0x0001c) +#endif +#ifdef CONFIG_MSM_MDP40 +#define MDP_INTR_ENABLE ( 0x0050) +#define MDP_INTR_STATUS ( 0x0054) +#define MDP_INTR_CLEAR ( 0x0058) +#define MDP_EBI2_LCD0 ( 0x0060) +#define MDP_EBI2_LCD1 ( 0x0064) +#define MDP_EBI2_PORTMAP_MODE ( 0x0070) + +#define MDP_DMA_P_HIST_INTR_STATUS ( 0x95014) +#define MDP_DMA_P_HIST_INTR_CLEAR ( 0x95018) +#define MDP_DMA_P_HIST_INTR_ENABLE ( 0x9501C) +#else +#define MDP_INTR_ENABLE ( 0x00020) +#define MDP_INTR_STATUS ( 0x00024) +#define MDP_INTR_CLEAR ( 0x00028) +#define MDP_EBI2_LCD0 ( 0x0003c) +#define MDP_EBI2_LCD1 ( 0x00040) +#define MDP_EBI2_PORTMAP_MODE ( 0x0005c) +#endif +#define MDP_DISPLAY0_START ( 0x00030) +#define MDP_DISPLAY1_START ( 0x00034) +#define MDP_DISPLAY_STATUS ( 0x00038) +/* CONFIG_MSM_MDP302 */ +#define MDP_TEAR_CHECK_EN ( 0x0020c) +#define MDP_PRIM_START_POS ( 0x00210) + +#ifndef CONFIG_MSM_MDP31 #define MDP_DISPLAY0_ADDR (0x00054) #define MDP_DISPLAY1_ADDR (0x00058) -#define MDP_EBI2_PORTMAP_MODE (0x0005c) -#define MDP_MODE (0x00060) +#define MDP_PPP_CMD_MODE (0x00060) +#else +#define MDP_DISPLAY0_ADDR (0x10000) +#define MDP_DISPLAY1_ADDR (0x10004) +#define MDP_PPP_CMD_MODE (0x10060) +#endif + #define MDP_TV_OUT_STATUS (0x00064) #define MDP_HW_VERSION (0x00070) #define MDP_SW_RESET (0x00074) @@ -61,7 +193,43 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define MDP_SECONDARY_VSYNC_OUT_CTRL (0x00084) #define MDP_EXTERNAL_VSYNC_OUT_CTRL (0x00088) #define MDP_VSYNC_CTRL (0x0008c) +#define MDP_MDDI_PARAM_WR_SEL (0x00090) +#define MDP_MDDI_PARAM (0x00094) +#define MDP_MDDI_DATA_XFR (0x00098) + + +#if defined(CONFIG_MSM_MDP40) +#define MDP_LAYERMIXER_IN_CFG (0x10100) +#define MDP_OVERLAY_REG_FLUSH (0x18000) +#define MDP_OVERLAYPROC_START(x) (0x00004 + ((x) * 0x4)) +#define MDP_OVERLAYPROC_CFG(x) (0x10004 + ((x) * 0x8000)) +#define MDP_OVERLAYPROC_OUT_SIZE(x) (0x10008 + ((x) * 0x8000)) +#define MDP_OVERLAYPROC_FB_ADDR(x) (0x1000c + ((x) * 0x8000)) +#define MDP_OVERLAYPROC_FB_ADDR2(x) (0x1001c + ((x) * 0x8000)) +#define MDP_OVERLAYPROC_FB_Y_STRIDE(x) (0x10010 + ((x) * 0x8000)) +#define MDP_OVERLAYPROC_OPMODE(x) (0x10014 + ((x) * 0x8000)) + +#define MDP_PIPE_RGB_SRC_SIZE(x) (0x40000 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SRC_XY(x) (0x40004 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_OUT_SIZE(x) (0x40008 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_OUT_XY(x) (0x4000c + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SRC_ADDR(x) (0x40010 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SRC_Y_STRIDE(x) (0x40040 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SRC_FORMAT(x) (0x40050 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SRC_UNPACK_PATTERN(x) (0x40054 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_OP_MODE(x) (0x40058 + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SCALE_PHASEX_STEP(x) (0x4005c + ((x) * 0x10000)) +#define MDP_PIPE_RGB_SCALE_PHASEY_STEP(x) (0x40060 + ((x) * 0x10000)) + +#define MDP_PIPE_RGB_FETCH_CFG(x) (0x41004 + ((x) * 0x10000)) +#endif + +#if defined(CONFIG_MSM_MDP40) +#define MDP_CGC_EN (0x00040) +#else #define MDP_CGC_EN (0x00100) +#endif + #define MDP_CMD_STATUS (0x10008) #define MDP_PROFILE_EN (0x10010) #define MDP_PROFILE_COUNT (0x10014) @@ -107,6 +275,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define MDP_FULL_BYPASS_WORD35 (0x1018c) #define MDP_FULL_BYPASS_WORD37 (0x10194) #define MDP_FULL_BYPASS_WORD39 (0x1019c) +#define MDP_PPP_OUT_XY (0x1019c) #define MDP_FULL_BYPASS_WORD40 (0x101a0) #define MDP_FULL_BYPASS_WORD41 (0x101a4) #define MDP_FULL_BYPASS_WORD43 (0x101ac) @@ -129,11 +298,27 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define MDP_FULL_BYPASS_WORD61 (0x101f4) #define MDP_FULL_BYPASS_WORD62 (0x101f8) #define MDP_FULL_BYPASS_WORD63 (0x101fc) + +#ifdef CONFIG_MSM_MDP31 +#define MDP_PPP_SRC_XY (0x10200) +#define MDP_PPP_BG_XY (0x10204) +#define MDP_PPP_SRC_IMAGE_SIZE (0x10208) +#define MDP_PPP_BG_IMAGE_SIZE (0x1020c) +#define MDP_PPP_SCALE_CONFIG (0x10230) +#define MDP_PPP_CSC_CONFIG (0x10240) +#define MDP_PPP_BLEND_BG_ALPHA_SEL (0x70010) +#endif + #define MDP_TFETCH_TEST_MODE (0x20004) #define MDP_TFETCH_STATUS (0x20008) #define MDP_TFETCH_TILE_COUNT (0x20010) #define MDP_TFETCH_FETCH_COUNT (0x20014) #define MDP_TFETCH_CONSTANT_COLOR (0x20040) +#define MDP_BGTFETCH_TEST_MODE (0x28004) +#define MDP_BGTFETCH_STATUS (0x28008) +#define MDP_BGTFETCH_TILE_COUNT (0x28010) +#define MDP_BGTFETCH_FETCH_COUNT (0x28014) +#define MDP_BGTFETCH_CONSTANT_COLOR (0x28040) #define MDP_CSC_BYPASS (0x40004) #define MDP_SCALE_COEFF_LSB (0x5fffc) #define MDP_TV_OUT_CTL (0xc0000) @@ -158,55 +343,98 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define MDP_TEST_MISR_CURR_VAL_DCLK (0xd020c) #define MDP_TEST_CAPTURED_DCLK (0xd0210) #define MDP_TEST_MISR_CAPT_VAL_DCLK (0xd0214) -#define MDP_LCDC_CTL (0xe0000) + +#ifdef CONFIG_MSM_MDP40 +#define MDP_DMA_P_START (0x000c) +#else +#define MDP_DMA_P_START (0x00044) +#endif +#define MDP_DMA_P_CONFIG (0x90000) +#define MDP_DMA_P_SIZE (0x90004) +#define MDP_DMA_P_IBUF_ADDR (0x90008) +#define MDP_DMA_P_IBUF_Y_STRIDE (0x9000c) +#define MDP_DMA_P_OUT_XY (0x90010) +#define MDP_DMA_P_COLOR_CORRECT_CONFIG (0x90070) + +#define MDP_DMA_S_START (0x00048) +#define MDP_DMA_S_CONFIG (0xa0000) +#define MDP_DMA_S_SIZE (0xa0004) +#define MDP_DMA_S_IBUF_ADDR (0xa0008) +#define MDP_DMA_S_IBUF_Y_STRIDE (0xa000c) +#define MDP_DMA_S_OUT_XY (0xa0010) + +#ifdef CONFIG_MSM_MDP40 +#define MDP_LCDC_EN (0xc0000) +#define MDP_LCDC_HSYNC_CTL (0xc0004) +#define MDP_LCDC_VSYNC_PERIOD (0xc0008) +#define MDP_LCDC_VSYNC_PULSE_WIDTH (0xc000c) +#define MDP_LCDC_DISPLAY_HCTL (0xc0010) +#define MDP_LCDC_DISPLAY_V_START (0xc0014) +#define MDP_LCDC_DISPLAY_V_END (0xc0018) +#define MDP_LCDC_ACTIVE_HCTL (0xc001c) +#define MDP_LCDC_ACTIVE_V_START (0xc0020) +#define MDP_LCDC_ACTIVE_V_END (0xc0024) +#define MDP_LCDC_BORDER_CLR (0xc0028) +#define MDP_LCDC_UNDERFLOW_CTL (0xc002c) +#define MDP_LCDC_HSYNC_SKEW (0xc0030) +#define MDP_LCDC_TEST_CTL (0xc0034) +#define MDP_LCDC_CTL_POLARITY (0xc0038) +#else +#define MDP_LCDC_EN (0xe0000) #define MDP_LCDC_HSYNC_CTL (0xe0004) -#define MDP_LCDC_VSYNC_CTL (0xe0008) -#define MDP_LCDC_ACTIVE_HCTL (0xe000c) -#define MDP_LCDC_ACTIVE_VCTL (0xe0010) -#define MDP_LCDC_BORDER_CLR (0xe0014) -#define MDP_LCDC_H_BLANK (0xe0018) -#define MDP_LCDC_V_BLANK (0xe001c) -#define MDP_LCDC_UNDERFLOW_CLR (0xe0020) -#define MDP_LCDC_HSYNC_SKEW (0xe0024) -#define MDP_LCDC_TEST_CTL (0xe0028) -#define MDP_LCDC_LINE_IRQ (0xe002c) -#define MDP_LCDC_CTL_POLARITY (0xe0030) -#define MDP_LCDC_DMA_CONFIG (0xe1000) -#define MDP_LCDC_DMA_SIZE (0xe1004) -#define MDP_LCDC_DMA_IBUF_ADDR (0xe1008) -#define MDP_LCDC_DMA_IBUF_Y_STRIDE (0xe100c) - - -#define MDP_DMA2_TERM 0x1 -#define MDP_DMA3_TERM 0x2 -#define MDP_PPP_TERM 0x3 +#define MDP_LCDC_VSYNC_PERIOD (0xe0008) +#define MDP_LCDC_VSYNC_PULSE_WIDTH (0xe000c) +#define MDP_LCDC_DISPLAY_HCTL (0xe0010) +#define MDP_LCDC_DISPLAY_V_START (0xe0014) +#define MDP_LCDC_DISPLAY_V_END (0xe0018) +#define MDP_LCDC_ACTIVE_HCTL (0xe001c) +#define MDP_LCDC_ACTIVE_V_START (0xe0020) +#define MDP_LCDC_ACTIVE_V_END (0xe0024) +#define MDP_LCDC_BORDER_CLR (0xe0028) +#define MDP_LCDC_UNDERFLOW_CTL (0xe002c) +#define MDP_LCDC_HSYNC_SKEW (0xe0030) +#define MDP_LCDC_TEST_CTL (0xe0034) +#define MDP_LCDC_CTL_POLARITY (0xe0038) +#endif + +#define MDP_PPP_SCALE_STATUS (0x50000) +#define MDP_PPP_BLEND_STATUS (0x70000) + +/* MDP_SW_RESET */ +#define MDP_PPP_SW_RESET (1<<4) /* MDP_INTR_ENABLE */ -#define DL0_ROI_DONE (1<<0) -#define DL1_ROI_DONE (1<<1) -#define DL0_DMA2_TERM_DONE (1<<2) -#define DL1_DMA2_TERM_DONE (1<<3) -#define DL0_PPP_TERM_DONE (1<<4) -#define DL1_PPP_TERM_DONE (1<<5) -#define TV_OUT_DMA3_DONE (1<<6) -#define TV_ENC_UNDERRUN (1<<7) -#define DL0_FETCH_DONE (1<<11) -#define DL1_FETCH_DONE (1<<12) - -#define MDP_PPP_BUSY_STATUS (DL0_ROI_DONE| \ - DL1_ROI_DONE| \ - DL0_PPP_TERM_DONE| \ - DL1_PPP_TERM_DONE) - -#define MDP_ANY_INTR_MASK (DL0_ROI_DONE| \ - DL1_ROI_DONE| \ - DL0_DMA2_TERM_DONE| \ - DL1_DMA2_TERM_DONE| \ - DL0_PPP_TERM_DONE| \ - DL1_PPP_TERM_DONE| \ - DL0_FETCH_DONE| \ - DL1_FETCH_DONE| \ - TV_ENC_UNDERRUN) +#define DL0_ROI_DONE (1<<0) +#define TV_OUT_DMA3_DONE (1<<6) +#define TV_ENC_UNDERRUN (1<<7) +#define TV_OUT_FRAME_START (1<<13) + +#if defined(CONFIG_MSM_MDP40) +#define MDP_OVERLAYPROC0_DONE (1 << 0) +#define MDP_OVERLAYPROC1_DONE (1 << 1) +#define MDP_DMA_P_DONE (1 << 4) +#define MDP_LCDC_FRAME_START (1 << 7) +#elif defined(CONFIG_MSM_MDP22) +#define MDP_DMA_P_DONE (1 << 2) +#define MDP_DMA_S_DONE (1 << 3) +#else /* CONFIG_MSM_MDP31 */ + +#ifdef CONFIG_MSM_MDP40 +#define MDP_DMA_P_DONE (1 << 4) +#else +#define MDP_DMA_P_DONE (1 << 14) +#endif + +#define MDP_DMA_S_DONE (1 << 2) +#define MDP_LCDC_UNDERFLOW (1 << 16) + +#ifdef CONFIG_MSM_MDP40 +#define MDP_LCDC_FRAME_START (1 << 7) +#else +#define MDP_LCDC_FRAME_START (1 << 15) +#endif + +#endif #define MDP_TOP_LUMA 16 #define MDP_TOP_CHROMA 0 @@ -231,9 +459,15 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, (((a)<<(bit*3))|((x)<<(bit*2))|((y)< + * + * Based on code from Code Aurora Forum. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include "mdp_hw.h" + +static void mdp_dma_to_mddi(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_info *mdp = priv; + uint32_t dma2_cfg; + uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */ + uint32_t fmt, pattern; + + /* XXX: HACK! hardcode to do mddi on primary */ + mdp_writel(mdp, 0x2, MDP_DISP_INTF_SEL); + + /* configure source, base layer */ + mdp_writel(mdp, ((height << 16) | (width)), MDP_PIPE_RGB_SRC_SIZE(0)); + mdp_writel(mdp, 0, MDP_PIPE_RGB_SRC_XY(0)); + mdp_writel(mdp, addr, MDP_PIPE_RGB_SRC_ADDR(0)); + mdp_writel(mdp, stride, MDP_PIPE_RGB_SRC_Y_STRIDE(0)); + + switch (mdp->dma_format) { + case DMA_IBUF_FORMAT_XRGB8888: + fmt = PPP_CFG_MDP_XRGB_8888(SRC); + pattern = PPP_PACK_PATTERN_MDP_BGRA_8888; + break; + case DMA_IBUF_FORMAT_RGB565: + fmt = PPP_CFG_MDP_RGB_565(SRC); + pattern = PPP_PACK_PATTERN_MDP_RGB_565; + break; + default: + BUG(); + break; + } + + mdp_writel(mdp, fmt, MDP_PIPE_RGB_SRC_FORMAT(0)); + mdp_writel(mdp, pattern, MDP_PIPE_RGB_SRC_UNPACK_PATTERN(0)); + + /* configure destination */ + /* setup size, address, and stride in the overlay engine */ + mdp_writel(mdp, (height << 16) | (width), MDP_OVERLAYPROC_OUT_SIZE(0)); + mdp_writel(mdp, addr, MDP_OVERLAYPROC_FB_ADDR(0)); + mdp_writel(mdp, stride, MDP_OVERLAYPROC_FB_Y_STRIDE(0)); + + /* output i/f config is in dma_p */ + dma2_cfg = DMA_PACK_ALIGN_LSB; + + dma2_cfg |= mdp->dma_format; + dma2_cfg |= mdp->dma_pack_pattern; + dma2_cfg |= DMA_DITHER_EN; + + /* 666 18BPP */ + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; + + /* set y & x offset and MDDI transaction parameters */ + mdp_writel(mdp, (y << 16) | (x), MDP_DMA_P_OUT_XY); + mdp_writel(mdp, ld_param, MDP_MDDI_PARAM_WR_SEL); + mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM, + MDP_MDDI_PARAM); + + mdp_writel(mdp, 0x1, MDP_MDDI_DATA_XFR); + mdp_writel(mdp, dma2_cfg, MDP_DMA_P_CONFIG); + + /* start the overlay fetch */ + mdp_writel(mdp, 0, MDP_OVERLAYPROC_START(0)); +} + +int mdp_hw_init(struct mdp_info *mdp) +{ + int ret; + + ret = mdp_out_if_register(&mdp->mdp_dev, MSM_MDDI_PMDH_INTERFACE, mdp, + MDP_DMA_P_DONE, mdp_dma_to_mddi); + if (ret) + return ret; + + mdp_writel(mdp, 0, MDP_INTR_ENABLE); + mdp_writel(mdp, 0, MDP_DMA_P_HIST_INTR_ENABLE); + mdp_writel(mdp, 0, MDP_LCDC_EN); + mdp_writel(mdp, 0xffffffff, MDP_CGC_EN); + + /* XXX: why set this? QCT says it should be > mdp_pclk, + * but they never set the clkrate of pclk */ + clk_set_rate(mdp->clk, 122880000); /* 122.88 Mhz */ + pr_info("%s: mdp_clk=%lu\n", __func__, clk_get_rate(mdp->clk)); + + /* this should work for any mdp_clk freq. + * TODO: use different value for mdp_clk freqs >= 90Mhz */ + /* 8 bytes-burst x 8 req */ + mdp_writel(mdp, 0x27, MDP_DMA_P_FETCH_CFG); + /* 16 bytes-burst x 4 req */ + /* TODO: do same for vg pipes */ + mdp_writel(mdp, 0xc3, MDP_PIPE_RGB_FETCH_CFG(0)); + mdp_writel(mdp, 0xc3, MDP_PIPE_RGB_FETCH_CFG(1)); + + mdp_writel(mdp, 0x3, MDP_EBI2_PORTMAP_MODE); + + /* 3 pending requests */ + mdp_writel(mdp, 0x02222, MDP_MAX_RD_PENDING_CMD_CONFIG); + + /* RGB1 -> Layer 0 base */ + mdp_writel(mdp, 1 << 8, MDP_LAYERMIXER_IN_CFG); + + mdp_writel(mdp, 1, MDP_OVERLAYPROC_CFG(0)); + mdp_writel(mdp, 0, MDP_OVERLAYPROC_CFG(1)); + + mdp_writel(mdp, 0, MDP_OVERLAYPROC_OPMODE(0)); + + return 0; +} + diff --git a/drivers/video/msm/mdp_hw_legacy.c b/drivers/video/msm/mdp_hw_legacy.c new file mode 100644 index 0000000000000..b3cba027533bf --- /dev/null +++ b/drivers/video/msm/mdp_hw_legacy.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2010 Google, Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "mdp_hw.h" +#include "mdp_ppp.h" +#include "mdp_csc_table.h" + +#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000) +static unsigned int mdp_irq_mask; +#if 0 +static void mdp_dma_to_mddi(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_info *mdp = priv; + uint32_t dma2_cfg; + uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */ + + dma2_cfg = DMA_PACK_TIGHT | + DMA_PACK_ALIGN_LSB | + DMA_OUT_SEL_AHB | + DMA_IBUF_NONCONTIGUOUS; + + dma2_cfg |= mdp->dma_format; + dma2_cfg |= mdp->dma_pack_pattern; + + dma2_cfg |= DMA_OUT_SEL_MDDI; + + dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY; + + dma2_cfg |= DMA_DITHER_EN; + + /* 666 18BPP */ + dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; + +#ifdef CONFIG_MSM_MDP22 + /* setup size, address, and stride */ + mdp_writel(mdp, (height << 16) | (width), + MDP_CMD_DEBUG_ACCESS_BASE + 0x0184); + mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188); + mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C); + + /* set y & x offset and MDDI transaction parameters */ + mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194); + mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0); + if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB565) + mdp_writel(mdp, (MDDI_VDO_PACKET_DESC_RGB565 << 16) | MDDI_VDO_PACKET_PRIM, + MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4); + else + mdp_writel(mdp, (MDDI_VDO_PACKET_DESC_RGB666 << 16) | MDDI_VDO_PACKET_PRIM, + MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4); + mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180); + + /* start DMA2 */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044); +#else + /* setup size, address, and stride */ + mdp_writel(mdp, (height << 16) | (width), MDP_DMA_P_SIZE); + mdp_writel(mdp, addr, MDP_DMA_P_IBUF_ADDR); + mdp_writel(mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE); + + /* set y & x offset and MDDI transaction parameters */ + mdp_writel(mdp, (y << 16) | (x), MDP_DMA_P_OUT_XY); + mdp_writel(mdp, ld_param, MDP_MDDI_PARAM_WR_SEL); + if (mdp->mdp_dev.color_format == MSM_MDP_OUT_IF_FMT_RGB565) + mdp_writel(mdp, (MDDI_VDO_PACKET_DESC_RGB565 << 16) | MDDI_VDO_PACKET_PRIM, + MDP_MDDI_PARAM); + else + mdp_writel(mdp, (MDDI_VDO_PACKET_DESC_RGB666 << 16) | MDDI_VDO_PACKET_PRIM, + MDP_MDDI_PARAM); + + mdp_writel(mdp, 0x1, MDP_MDDI_DATA_XFR); + mdp_writel(mdp, dma2_cfg, MDP_DMA_P_CONFIG); + mdp_writel(mdp, 0, MDP_DMA_P_START); +#endif +} +#endif +#if defined CONFIG_MSM_MDP302 +void mdp_check_tearing(struct mdp_info *mdp, struct msm_mdp_platform_data *pdata) +{ mdp_writel(mdp, pdata->sync_config, MDP_SYNC_CONFIG_0); + mdp_writel(mdp, 1, MDP_TEAR_CHECK_EN); + mdp_writel(mdp, pdata->sync_thresh, MDP_SYNC_THRESH_0); + mdp_writel(mdp, pdata->sync_start_pos, MDP_PRIM_START_POS); +} +#endif +#if 0 +int mdp_hw_init(struct mdp_info *mdp) +{ + int n; + + n = mdp_out_if_register(&mdp->mdp_dev, MSM_MDDI_PMDH_INTERFACE, mdp, + MDP_DMA_P_DONE, mdp_dma_to_mddi); + if (n) + return n; + + mdp_writel(mdp, 0, MDP_INTR_ENABLE); + + /* debug interface write access */ + mdp_writel(mdp, 1, 0x60); + mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE); + +#ifndef CONFIG_MSM_MDP22 + /* disable lcdc */ + mdp_writel(mdp, 0, MDP_LCDC_EN); + /* enable auto clock gating for all blocks by default */ + mdp_writel(mdp, 0xffffffff, MDP_CGC_EN); + /* reset color/gamma correct parms */ + mdp_writel(mdp, 0, MDP_DMA_P_COLOR_CORRECT_CONFIG); +#endif + + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc); + mdp_writel(mdp, 1, 0x60); + + for (n = 0; n < ARRAY_SIZE(csc_color_lut); n++) + mdp_writel(mdp, csc_color_lut[n].val, csc_color_lut[n].reg); + + /* clear up unused fg/main registers */ + /* comp.plane 2&3 ystride */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120); + + /* unpacked pattern */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c); + + /* comp.plane 2 & 3 */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118); + + /* clear unused bg registers */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4); + + for (n = 0; n < ARRAY_SIZE(csc_matrix_config_table); n++) + mdp_writel(mdp, csc_matrix_config_table[n].val, + csc_matrix_config_table[n].reg); + + mdp_ppp_init_scale(mdp); + +#ifndef CONFIG_MSM_MDP31 + mdp_writel(mdp, 0x04000400, MDP_COMMAND_CONFIG); +#endif + + return 0; +} +#endif + +int mdp_hw_init(struct mdp_info *mdp) +{ + int n; + int lcdc_enabled; + + mdp_irq_mask = 0; + + mdp_writel(mdp, 0, MDP_INTR_ENABLE); + + /* debug interface write access */ + mdp_writel(mdp, 1, 0x60); + mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE); + +#ifndef CONFIG_MSM_MDP22 + lcdc_enabled = mdp_readl(mdp, MDP_LCDC_EN); + /* disable lcdc */ + mdp_writel(mdp, 0, MDP_LCDC_EN); + /* enable auto clock gating for all blocks by default */ + mdp_writel(mdp, 0xffffffff, MDP_CGC_EN); + /* reset color/gamma correct parms */ + mdp_writel(mdp, 0, MDP_DMA_P_COLOR_CORRECT_CONFIG); +#endif + + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc); + mdp_writel(mdp, 1, 0x60); + + for (n = 0; n < ARRAY_SIZE(csc_color_lut); n++) + mdp_writel(mdp, csc_color_lut[n].val, csc_color_lut[n].reg); + + /* clear up unused fg/main registers */ + /* comp.plane 2&3 ystride */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120); + + /* unpacked pattern */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c); + + /* comp.plane 2 & 3 */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118); + + /* clear unused bg registers */ + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0); + mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4); + + for (n = 0; n < ARRAY_SIZE(csc_matrix_config_table); n++) + mdp_writel(mdp, csc_matrix_config_table[n].val, + csc_matrix_config_table[n].reg); + + mdp_ppp_init_scale(mdp); + +#ifndef CONFIG_MSM_MDP31 + mdp_writel(mdp, 0x04000400, MDP_COMMAND_CONFIG); +#endif +#ifndef CONFIG_MSM_MDP22 + if (lcdc_enabled) + mdp_writel(mdp, 1, MDP_LCDC_EN); +#endif + return 0; +} \ No newline at end of file diff --git a/drivers/video/msm/mdp_lcdc.c b/drivers/video/msm/mdp_lcdc.c new file mode 100644 index 0000000000000..b0b2f3787f7ef --- /dev/null +++ b/drivers/video/msm/mdp_lcdc.c @@ -0,0 +1,814 @@ +/* drivers/video/msm/mdp_lcdc.c + * + * Copyright (c) 2009 Google Inc. + * Copyright (c) 2009 QUALCOMM Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Dima Zavin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mdp_hw.h" +#ifdef CONFIG_MSM_MDP40 +#include "mdp4.h" +#endif +#ifdef CONFIG_PANEL_SELF_REFRESH +#include +#endif + +#if 0 +#define D(fmt, args...) printk(KERN_INFO "Dispaly: " fmt, ##args) +#else +#define D(fmt, args...) do {} while (0) +#endif + +#if defined(CONFIG_ARCH_MSM7227) +#define LCDC_MUX_CTL (MSM_TGPIO1_BASE + 0x278) +#endif + +static struct mdp_device *mdp_dev; + +#ifdef CONFIG_MSM_MDP40 +static struct mdp4_overlay_pipe *lcdc_pipe; +#endif + +#ifdef CONFIG_PANEL_SELF_REFRESH +#if 0 +#define ICM_DBG(s...) printk("[icm]" s) +#else +#define ICM_DBG(s...) do {} while (0) +#endif + +/* set the timeout to 200 milliseconds */ +#define PANEL_ENTER_IDLE_TIMEOUT HZ/5 +/* Afetr setting ICM=1, we need to keep sending the RGB signal more than 2-frame */ +#define PANEL_IDLE_STABLE_TIMEOUT 48 + +static struct task_struct *th_display; +struct panel_icm_info *panel_icm; +DECLARE_WAIT_QUEUE_HEAD(panel_update_wait_queue); +#endif + +#ifdef CONFIG_PANEL_SELF_REFRESH +static int icm_check_panel_update(void) +{ + int ret; + unsigned long irq_flags = 0; + + spin_lock_irqsave(&panel_icm->lock, irq_flags); + ret = panel_icm->panel_update; + spin_unlock_irqrestore(&panel_icm->lock, irq_flags); + return ret; +} + +static int icm_thread(void *data) +{ + struct mdp_lcdc_info *lcdc; + struct msm_lcdc_panel_ops *panel_ops; + int rc; + unsigned long irq_flags = 0; + + lcdc = data; + panel_ops = lcdc->pdata->panel_ops; + while (1) { + rc = wait_event_timeout(panel_update_wait_queue, icm_check_panel_update() == 1, PANEL_ENTER_IDLE_TIMEOUT); + ICM_DBG("ICM Thread:wake up rc=%d \n", rc); + mutex_lock(&panel_icm->icm_lock); + if (rc == 0 && icm_check_panel_update() != 1) {/* wait_timeout */ + ICM_DBG("EnterICM: icm_mode=%d icm_doable=%d \n", panel_icm->icm_mode, panel_icm->icm_doable); + if (panel_icm->icm_mode == false && panel_icm->icm_doable == true) { + + if (panel_ops->refresh_enable) + panel_ops->refresh_enable(panel_ops); + + panel_icm->icm_mode = true; + msleep(PANEL_IDLE_STABLE_TIMEOUT); + + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->pclk); + clk_disable(lcdc->mdp_clk); + panel_icm->clock_enabled = false; + pr_info("EnterICM: enter ICM MODE done!!!\n"); + } + } else {/* get update event, no timeout */ + ICM_DBG("Leave ICM: icm_mode=%d icm_doable=%d \n", panel_icm->icm_mode, panel_icm->icm_doable); + if (panel_icm->icm_mode == true && panel_icm->icm_doable == true) { + clk_enable(lcdc->mdp_clk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + panel_icm->clock_enabled = true; + + if (panel_ops->refresh_disable) + panel_ops->refresh_disable(panel_ops); + + panel_icm->icm_mode = false; + pr_info("LeaveICM: leave ICM MODE done !!!\n"); + } + spin_lock_irqsave(&panel_icm->lock, irq_flags); + panel_icm->panel_update = 0; + spin_unlock_irqrestore(&panel_icm->lock, irq_flags); + } + mutex_unlock(&panel_icm->icm_lock); + } /* end while */ + return 0; +} + +static void icm_force_leave(void) +{ + struct msm_lcdc_panel_ops *panel_ops; + unsigned long irq_flags = 0; + + panel_ops = panel_icm->lcdc->pdata->panel_ops; + + mutex_lock(&panel_icm->icm_lock); + ICM_DBG("Force Leave ICM: icm_mode=%d icm_doable=%d \n", panel_icm->icm_mode, panel_icm->icm_doable); + if (panel_icm->icm_mode == true) { + clk_enable(panel_icm->lcdc->mdp_clk); + clk_enable(panel_icm->lcdc->pclk); + clk_enable(panel_icm->lcdc->pad_pclk); + mdp_writel(panel_icm->lcdc->mdp, 1, MDP_LCDC_EN); + panel_icm->clock_enabled = true; + if (panel_ops->refresh_disable) + panel_ops->refresh_disable(panel_ops); + panel_icm->icm_mode = false; + panel_icm->icm_doable = true; + pr_info("ForceLeaveICM: leave ICM MODE done !!!\n"); + } + spin_lock_irqsave(&panel_icm->lock, irq_flags); + panel_icm->panel_update = 0; + spin_unlock_irqrestore(&panel_icm->lock, irq_flags); + mutex_unlock(&panel_icm->icm_lock); +} + +static int icm_init(struct mdp_lcdc_info *lcdc) +{ + int ret = 0; + + /* init panel_icm_info */ + panel_icm = kzalloc(sizeof(struct panel_icm_info), GFP_KERNEL); + if (!panel_icm) + return -ENOMEM; + panel_icm->icm_doable = 1; + panel_icm->clock_enabled = true; + panel_icm->lcdc = lcdc; + panel_icm->force_leave = icm_force_leave; + mutex_init(&panel_icm->icm_lock); + th_display = kthread_run(icm_thread, lcdc, "panel-enterIdle"); + if (IS_ERR(th_display)) { + ret = PTR_ERR(th_display); + pr_err("%s: panel_icm_thread create fail:%d!!!\n", __func__, ret); + goto error_create_thread; + } + return ret; +error_create_thread: + kfree(panel_icm); + return ret; +} +#endif + +static int lcdc_unblank(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + pr_info("%s: ()\n", __func__); + + if (panel_ops->unblank) + panel_ops->unblank(panel_ops); + + return 0; +} + +static int lcdc_blank(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + pr_info("%s: ()\n", __func__); + + if (panel_ops->blank) + panel_ops->blank(panel_ops); + + return 0; +} + +static int lcdc_shutdown(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + pr_info("%s: ()\n", __func__); + + if (panel_ops->shutdown) + panel_ops->shutdown(panel_ops); + + return 0; +} + +static int lcdc_suspend(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + pr_info("%s: suspending\n", __func__); + +#if defined(CONFIG_ARCH_MSM7227) + writel(0x0, LCDC_MUX_CTL); + D("suspend_lcdc_mux_ctl = %x\n", readl(LCDC_MUX_CTL)); +#endif +#ifdef CONFIG_PANEL_SELF_REFRESH + if (lcdc->mdp->mdp_dev.overrides & MSM_MDP_RGB_PANEL_SELE_REFRESH) { + mutex_lock(&panel_icm->icm_lock); + panel_icm->icm_doable = false; + pr_info("[ICM %s]: icm mode=%d, clock_enabled=%d\n", __func__, panel_icm->icm_mode, panel_icm->clock_enabled); + if (panel_icm->icm_mode == true && panel_icm->clock_enabled == false) { + if (panel_ops->refresh_disable) + panel_ops->refresh_disable(panel_ops); + panel_icm->icm_mode = false; + } else { + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->pclk); + clk_disable(lcdc->mdp_clk); + } + panel_icm->clock_enabled = false; + mutex_unlock(&panel_icm->icm_lock); + } else { + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->pclk); + clk_disable(lcdc->mdp_clk); + } +#else + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + clk_disable(lcdc->pad_pclk); + clk_disable(lcdc->pclk); + if (lcdc->mdp_pclk) + clk_disable(lcdc->mdp_pclk); + clk_disable(lcdc->mdp_clk); +#endif + if (panel_ops->uninit) + panel_ops->uninit(panel_ops); + + return 0; +} + +static int lcdc_resume(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + struct msm_lcdc_panel_ops *panel_ops = lcdc->pdata->panel_ops; + + pr_info("%s: resuming\n", __func__); + + if (panel_ops->init) { + if (panel_ops->init(panel_ops) < 0) + printk(KERN_ERR "LCD init fail!\n"); + } + clk_enable(lcdc->mdp_clk); + if (lcdc->mdp_pclk) + clk_enable(lcdc->mdp_pclk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); +#if defined(CONFIG_ARCH_MSM7227) + writel(0x1, LCDC_MUX_CTL); + D("resume_lcdc_mux_ctl = %x\n", readl(LCDC_MUX_CTL)); +#endif + + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); +#ifdef CONFIG_PANEL_SELF_REFRESH + if (lcdc->mdp->mdp_dev.overrides & MSM_MDP_RGB_PANEL_SELE_REFRESH) { + mutex_lock(&panel_icm->icm_lock); + panel_icm->icm_doable = true; + panel_icm->clock_enabled = true; + mutex_unlock(&panel_icm->icm_lock); + } +#endif + + return 0; +} + +static int lcdc_hw_init(struct mdp_lcdc_info *lcdc) +{ + struct msm_panel_data *fb_panel = &lcdc->fb_panel_data; + uint32_t dma_cfg; + uint32_t fb_size; + + clk_enable(lcdc->mdp_clk); + if (lcdc->mdp_pclk) + clk_enable(lcdc->mdp_pclk); + clk_enable(lcdc->pclk); + clk_enable(lcdc->pad_pclk); + +#ifdef CONFIG_MSM_MDP40 + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + mdelay(10); +#endif + clk_set_rate(lcdc->pclk, lcdc->parms.clk_rate); + clk_set_rate(lcdc->pad_pclk, lcdc->parms.clk_rate); + /* write the lcdc params */ + mdp_writel(lcdc->mdp, lcdc->parms.hsync_ctl, MDP_LCDC_HSYNC_CTL); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_period, MDP_LCDC_VSYNC_PERIOD); + mdp_writel(lcdc->mdp, lcdc->parms.vsync_pulse_width, + MDP_LCDC_VSYNC_PULSE_WIDTH); + mdp_writel(lcdc->mdp, lcdc->parms.display_hctl, MDP_LCDC_DISPLAY_HCTL); + mdp_writel(lcdc->mdp, lcdc->parms.display_vstart, + MDP_LCDC_DISPLAY_V_START); + mdp_writel(lcdc->mdp, lcdc->parms.display_vend, MDP_LCDC_DISPLAY_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.hsync_skew, MDP_LCDC_HSYNC_SKEW); + + mdp_writel(lcdc->mdp, 0, MDP_LCDC_BORDER_CLR); + mdp_writel(lcdc->mdp, 0x80000000 | 0xff, MDP_LCDC_UNDERFLOW_CTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_HCTL); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_START); + mdp_writel(lcdc->mdp, 0, MDP_LCDC_ACTIVE_V_END); + mdp_writel(lcdc->mdp, lcdc->parms.polarity, MDP_LCDC_CTL_POLARITY); + + fb_size = ((fb_panel->fb_data->yres & 0x7ff) << 16) | + (fb_panel->fb_data->xres & 0x7ff); + + /* config the dma_p block that drives the lcdc data */ + mdp_writel(lcdc->mdp, lcdc->fb_start, MDP_DMA_P_IBUF_ADDR); + mdp_writel(lcdc->mdp, fb_size, MDP_DMA_P_SIZE); + mdp_writel(lcdc->mdp, 0, MDP_DMA_P_OUT_XY); + +#ifdef CONFIG_MSM_MDP40 + mdp_writel(lcdc->mdp, lcdc->fb_start, MDP_PIPE_RGB_SRC_ADDR(0)); + mdp_writel(lcdc->mdp, fb_size, MDP_PIPE_RGB_SRC_SIZE(0)); + mdp_writel(lcdc->mdp, 0, MDP_PIPE_RGB_SRC_XY(0)); +#endif + + dma_cfg = mdp_readl(lcdc->mdp, MDP_DMA_P_CONFIG); + dma_cfg &= ~(DMA_PACK_PATTERN_MASK | DMA_PACK_ALIGN_MASK); + dma_cfg |= (DMA_PACK_ALIGN_MSB | + DMA_PACK_PATTERN_RGB | + DMA_DITHER_EN); + dma_cfg |= DMA_OUT_SEL_LCDC; + dma_cfg &= ~DMA_DST_BITS_MASK; + + if (fb_panel->fb_data->output_format == MSM_MDP_OUT_IF_FMT_RGB666) + dma_cfg |= DMA_DSTC0G_6BITS | + DMA_DSTC1B_6BITS | + DMA_DSTC2R_6BITS; + else if (fb_panel->fb_data->output_format == MSM_MDP_OUT_IF_FMT_RGB888) + dma_cfg |= DMA_DSTC0G_8BITS | + DMA_DSTC1B_8BITS | + DMA_DSTC2R_8BITS; + else + dma_cfg |= DMA_DSTC0G_6BITS | + DMA_DSTC1B_5BITS | + DMA_DSTC2R_5BITS; + + mdp_writel(lcdc->mdp, dma_cfg, MDP_DMA_P_CONFIG); + +#ifdef CONFIG_MSM_MDP40 + mdp_writel(lcdc->mdp, 0, MDP_DISP_INTF_SEL); +#endif + + /* enable the lcdc timing generation */ + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + + return 0; +} + +static void lcdc_wait_vsync(struct msm_panel_data *panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(panel); + int ret; + + ret = wait_event_timeout(lcdc->vsync_waitq, lcdc->got_vsync, HZ / 2); + if (!ret && !lcdc->got_vsync) + pr_err("%s: timeout waiting for VSYNC\n", __func__); + lcdc->got_vsync = 0; +} + +static void lcdc_request_vsync(struct msm_panel_data *fb_panel, + struct msmfb_callback *vsync_cb) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + + /* the vsync callback will start the dma */ + vsync_cb->func(vsync_cb); + lcdc->got_vsync = 0; + mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, MDP_LCDC_FRAME_START, + &lcdc->frame_start_cb); + lcdc_wait_vsync(fb_panel); +} + +static void lcdc_clear_vsync(struct msm_panel_data *fb_panel) +{ + struct mdp_lcdc_info *lcdc = panel_to_lcdc(fb_panel); + lcdc->got_vsync = 0; + mdp_out_if_req_irq(mdp_dev, MSM_LCDC_INTERFACE, 0, NULL); +} + +/* called in irq context with mdp lock held, when mdp gets the + * MDP_LCDC_FRAME_START interrupt */ +static void lcdc_frame_start(struct msmfb_callback *cb) +{ + struct mdp_lcdc_info *lcdc; + + lcdc = container_of(cb, struct mdp_lcdc_info, frame_start_cb); + + lcdc->got_vsync = 1; + wake_up(&lcdc->vsync_waitq); +} + +#ifdef CONFIG_MSM_MDP40 +static void lcdc_overlay_start(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + + struct mdp4_overlay_pipe *pipe; + pipe = lcdc_pipe; + pipe->srcp0_addr = addr; + + if (mdp->dma_config_dirty) + { + if(mdp->dma_format == DMA_IBUF_FORMAT_RGB565) { + pipe->src_format = MDP_RGB_565; + pipe->srcp0_ystride = pipe->src_width * 2; + } else if(mdp->dma_format == DMA_IBUF_FORMAT_XRGB8888) { + pipe->src_format = MDP_RGBA_8888; + pipe->srcp0_ystride = pipe->src_width * 4; + } + mdp4_overlay_format2pipe(pipe); + mdp_writel(pipe->mdp, 0, MDP_LCDC_EN); + mdelay(30); + mdp4_overlay_dmap_xy(pipe); + mdp4_overlay_dmap_cfg(pipe, 1); + mdp4_overlayproc_cfg(pipe); + mdp4_overlay_rgb_setup(pipe); + mdp4_overlay_reg_flush(pipe, 1); /* rgb1 and mixer0 */ + mdp_writel(pipe->mdp, 1, MDP_LCDC_EN); + mdp->dma_config_dirty = false; + } else { + mdp4_overlay_rgb_setup(pipe); + mdp4_overlay_reg_flush(pipe, 1); /* rgb1 and mixer0 */ + } + +} +#else +static void lcdc_dma_start(void *priv, uint32_t addr, uint32_t stride, + uint32_t width, uint32_t height, uint32_t x, + uint32_t y) +{ + struct mdp_lcdc_info *lcdc = priv; + + struct mdp_info *mdp = lcdc->mdp; + uint32_t dma2_cfg; + +#ifdef CONFIG_MSM_MDP31 + if (lcdc->mdp->dma_config_dirty) { + + mdp_writel(lcdc->mdp, 0, MDP_LCDC_EN); + mdelay(30); + mdp_dev->configure_dma(mdp_dev); + mdp_writel(lcdc->mdp, 1, MDP_LCDC_EN); + } + + mdp_writel(lcdc->mdp, stride, MDP_DMA_P_IBUF_Y_STRIDE); + mdp_writel(lcdc->mdp, addr, MDP_DMA_P_IBUF_ADDR); +#else + if (lcdc->mdp->dma_config_dirty) { + uint32_t fmt; + uint32_t dma_fmt; + uint32_t dma_ptrn; + uint32_t pattern; + + switch (mdp->dma_format) { + case DMA_IBUF_FORMAT_XRGB8888: + dma_fmt = DMA_IBUF_FORMAT_RGB888; + dma_ptrn = DMA_PACK_PATTERN_BGR; + fmt = PPP_CFG_MDP_XRGB_8888(SRC); + pattern = PPP_PACK_PATTERN_MDP_BGRA_8888; + break; + case DMA_IBUF_FORMAT_RGB565: + dma_fmt = DMA_IBUF_FORMAT_RGB565; + dma_ptrn = DMA_PACK_PATTERN_RGB; + fmt = PPP_CFG_MDP_RGB_565(SRC); + pattern = PPP_PACK_PATTERN_MDP_RGB_565; + break; + default: + BUG(); + break; + } + + mdp_writel(mdp, fmt, MDP_PIPE_RGB_SRC_FORMAT(0)); + mdp_writel(mdp, pattern, MDP_PIPE_RGB_SRC_UNPACK_PATTERN(0)); + + dma2_cfg = mdp_readl(lcdc->mdp, MDP_DMA_P_CONFIG); + dma2_cfg &= ~(DMA_PACK_PATTERN_MASK | DMA_IBUF_FORMAT_MASK | + DMA_PACK_ALIGN_MASK); + dma2_cfg |= dma_ptrn | dma_fmt | DMA_PACK_ALIGN_MSB; + mdp_writel(mdp, dma2_cfg, MDP_DMA_P_CONFIG); + lcdc->mdp->dma_config_dirty = false; + } + + mdp_writel(mdp, addr, MDP_PIPE_RGB_SRC_ADDR(0)); + mdp_writel(mdp, stride, MDP_PIPE_RGB_SRC_Y_STRIDE(0)); + + /* flush the new pipe config */ + mdp_writel(mdp, 0x11, MDP_OVERLAY_REG_FLUSH); +#endif +} +#endif + +static void precompute_timing_parms(struct mdp_lcdc_info *lcdc) +{ + struct msm_lcdc_timing *timing = lcdc->pdata->timing; + struct msm_fb_data *fb_data = lcdc->pdata->fb_data; + unsigned int hsync_period; + unsigned int hsync_start_x; + unsigned int hsync_end_x; + unsigned int vsync_period; + unsigned int display_vstart; + unsigned int display_vend; + +#ifdef CONFIG_MACH_SUPERSONIC + hsync_period = (timing->hsync_pulse_width + timing->hsync_back_porch + + fb_data->xres + timing->hsync_front_porch); + hsync_start_x = (timing->hsync_pulse_width + timing->hsync_back_porch); + hsync_end_x = hsync_start_x + fb_data->xres - 1; + + vsync_period = (timing->vsync_pulse_width + timing->vsync_back_porch + + fb_data->yres + timing->vsync_front_porch); + vsync_period *= hsync_period; + + display_vstart = timing->vsync_pulse_width + timing->vsync_back_porch; + display_vstart *= hsync_period; + display_vstart += timing->hsync_skew; + + display_vend = (timing->vsync_pulse_width + timing->vsync_back_porch + + fb_data->yres) * hsync_period; +#else + hsync_period = (timing->hsync_back_porch + + fb_data->xres + timing->hsync_front_porch); + hsync_start_x = timing->hsync_back_porch; + hsync_end_x = hsync_start_x + fb_data->xres - 1; + + vsync_period = (timing->vsync_back_porch + + fb_data->yres + timing->vsync_front_porch); + vsync_period *= hsync_period; + + display_vstart = timing->vsync_back_porch; + display_vstart *= hsync_period; + display_vstart += timing->hsync_skew; + + display_vend = (timing->vsync_back_porch + fb_data->yres) * + hsync_period; +#endif + display_vend += timing->hsync_skew - 1; + + /* register values we pre-compute at init time from the timing + * information in the panel info */ + lcdc->parms.hsync_ctl = (((hsync_period & 0xfff) << 16) | + (timing->hsync_pulse_width & 0xfff)); + lcdc->parms.vsync_period = vsync_period & 0xffffff; + lcdc->parms.vsync_pulse_width = (timing->vsync_pulse_width * + hsync_period) & 0xffffff; + + lcdc->parms.display_hctl = (((hsync_end_x & 0xfff) << 16) | + (hsync_start_x & 0xfff)); + lcdc->parms.display_vstart = display_vstart & 0xffffff; + lcdc->parms.display_vend = display_vend & 0xffffff; + lcdc->parms.hsync_skew = timing->hsync_skew & 0xfff; + lcdc->parms.polarity = ((timing->hsync_act_low << 0) | + (timing->vsync_act_low << 1) | + (timing->den_act_low << 2)); + lcdc->parms.clk_rate = timing->clk_rate; +} + +static int mdp_lcdc_probe(struct platform_device *pdev) +{ + struct msm_lcdc_platform_data *pdata = pdev->dev.platform_data; + struct mdp_lcdc_info *lcdc; + int ret = 0; +#ifdef CONFIG_MSM_MDP40 + struct mdp4_overlay_pipe *pipe; + int ptype; +#endif + + if (!pdata) { + pr_err("%s: no LCDC platform data found\n", __func__); + return -EINVAL; + } + + lcdc = kzalloc(sizeof(struct mdp_lcdc_info), GFP_KERNEL); + if (!lcdc) + return -ENOMEM; + + /* We don't actually own the clocks, the mdp does. */ + lcdc->mdp_clk = clk_get(mdp_dev->dev.parent, "mdp_clk"); + if (IS_ERR(lcdc->mdp_clk)) { + pr_err("%s: failed to get mdp_clk\n", __func__); + ret = PTR_ERR(lcdc->mdp_clk); + goto err_get_mdp_clk; + } + + lcdc->mdp_pclk = clk_get(mdp_dev->dev.parent, "mdp_pclk"); + if (IS_ERR(lcdc->mdp_pclk)) + lcdc->mdp_pclk = NULL; + + lcdc->pclk = clk_get(mdp_dev->dev.parent, "lcdc_pclk_clk"); + if (IS_ERR(lcdc->pclk)) { + pr_err("%s: failed to get lcdc_pclk\n", __func__); + ret = PTR_ERR(lcdc->pclk); + goto err_get_pclk; + } + + lcdc->pad_pclk = clk_get(mdp_dev->dev.parent, "lcdc_pad_pclk_clk"); + if (IS_ERR(lcdc->pad_pclk)) { + pr_err("%s: failed to get lcdc_pad_pclk\n", __func__); + ret = PTR_ERR(lcdc->pad_pclk); + goto err_get_pad_pclk; + } + + init_waitqueue_head(&lcdc->vsync_waitq); + lcdc->pdata = pdata; + lcdc->frame_start_cb.func = lcdc_frame_start; + + platform_set_drvdata(pdev, lcdc); +#ifdef CONFIG_MSM_MDP40 + mdp_out_if_register(mdp_dev, MSM_LCDC_INTERFACE, lcdc, INTR_OVERLAY0_DONE, + lcdc_overlay_start); +#else + mdp_out_if_register(mdp_dev, MSM_LCDC_INTERFACE, lcdc, MDP_DMA_P_DONE, + lcdc_dma_start); +#endif + precompute_timing_parms(lcdc); + + lcdc->fb_start = pdata->fb_resource->start; + lcdc->mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); + if(lcdc->mdp->mdp_dev.color_format) + lcdc->color_format = lcdc->mdp->mdp_dev.color_format; + else + lcdc->color_format = MSM_MDP_OUT_IF_FMT_RGB565; + +#ifdef CONFIG_MSM_MDP40 + if (lcdc_pipe == NULL) { + ptype = mdp4_overlay_format2type(MDP_RGB_565); + pipe = mdp4_overlay_pipe_alloc(ptype); + if (!pipe) + goto err_mdp4_overlay_pipe_alloc; + pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; + pipe->mixer_num = MDP4_MIXER0; + pipe->src_format = MDP_RGB_565; + mdp4_overlay_format2pipe(pipe); + pipe->mdp = lcdc->mdp; + + lcdc_pipe = pipe; /* keep it */ + } else { + pipe = lcdc_pipe; + } + + pipe->src_height = pdata->fb_data->yres; + pipe->src_width = pdata->fb_data->xres; + pipe->src_h = pdata->fb_data->yres; + pipe->src_w = pdata->fb_data->xres; + pipe->src_y = 0; + pipe->src_x = 0; + pipe->srcp0_addr = (uint32_t) lcdc->fb_start; + pipe->srcp0_ystride = pdata->fb_data->xres * 2; + + mdp4_overlay_rgb_setup(pipe); + mdp4_mixer_stage_up(pipe); +#endif + + lcdc->fb_panel_data.suspend = lcdc_suspend; + lcdc->fb_panel_data.resume = lcdc_resume; + lcdc->fb_panel_data.wait_vsync = lcdc_wait_vsync; + lcdc->fb_panel_data.request_vsync = lcdc_request_vsync; + lcdc->fb_panel_data.clear_vsync = lcdc_clear_vsync; + lcdc->fb_panel_data.blank = lcdc_blank; + lcdc->fb_panel_data.unblank = lcdc_unblank; + lcdc->fb_panel_data.fb_data = pdata->fb_data; + lcdc->fb_panel_data.interface_type = MSM_LCDC_INTERFACE; + lcdc->fb_panel_data.shutdown = lcdc_shutdown; + ret = lcdc_hw_init(lcdc); + if (ret) { + pr_err("%s: Cannot initialize the mdp_lcdc\n", __func__); + goto err_hw_init; + } + lcdc->fb_pdev.name = "msm_panel"; + lcdc->fb_pdev.id = pdata->fb_id; + lcdc->fb_pdev.resource = pdata->fb_resource; + lcdc->fb_pdev.num_resources = 1; + lcdc->fb_pdev.dev.platform_data = &lcdc->fb_panel_data; + + + ret = platform_device_register(&lcdc->fb_pdev); + if (ret) { + pr_err("%s: Cannot register msm_panel pdev\n", __func__); + goto err_plat_dev_reg; + } + + pr_info("%s: initialized\n", __func__); +#ifdef CONFIG_PANEL_SELF_REFRESH + if (lcdc->mdp->mdp_dev.overrides & MSM_MDP_RGB_PANEL_SELE_REFRESH) { + ret = icm_init(lcdc); + if (ret) { + pr_err("%s: Cannot init dispaly selfrefresh \n", __func__); + goto err_plat_dev_reg; + } + } +#endif + + return 0; + +err_plat_dev_reg: +err_hw_init: +#ifdef CONFIG_MSM_MDP40 +err_mdp4_overlay_pipe_alloc: +#endif + platform_set_drvdata(pdev, NULL); + clk_put(lcdc->pad_pclk); +err_get_pad_pclk: + clk_put(lcdc->pclk); +err_get_pclk: + if (lcdc->mdp_pclk) + clk_put(lcdc->mdp_pclk); + clk_put(lcdc->mdp_clk); +err_get_mdp_clk: + kfree(lcdc); + return ret; +} + +static int mdp_lcdc_remove(struct platform_device *pdev) +{ + struct mdp_lcdc_info *lcdc = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + clk_put(lcdc->pclk); + clk_put(lcdc->pad_pclk); + kfree(lcdc); + + return 0; +} + +static struct platform_driver mdp_lcdc_driver = { + .probe = mdp_lcdc_probe, + .remove = mdp_lcdc_remove, + .driver = { + .name = "msm_mdp_lcdc", + .owner = THIS_MODULE, + }, +}; + +static int mdp_lcdc_add_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (mdp_dev) + return 0; + mdp_dev = container_of(dev, struct mdp_device, dev); + return platform_driver_register(&mdp_lcdc_driver); +} + +static void mdp_lcdc_remove_mdp_device(struct device *dev, + struct class_interface *class_intf) +{ + /* might need locking if mulitple mdp devices */ + if (dev != &mdp_dev->dev) + return; + platform_driver_unregister(&mdp_lcdc_driver); + mdp_dev = NULL; +} + +static struct class_interface mdp_lcdc_interface = { + .add_dev = &mdp_lcdc_add_mdp_device, + .remove_dev = &mdp_lcdc_remove_mdp_device, +}; + +static int __init mdp_lcdc_init(void) +{ + return register_mdp_client(&mdp_lcdc_interface); +} + +module_init(mdp_lcdc_init); diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c index 4ff001f4cbbdd..6aa960a43e31d 100644 --- a/drivers/video/msm/mdp_ppp.c +++ b/drivers/video/msm/mdp_ppp.c @@ -15,40 +15,36 @@ #include #include #include +#include #include +#include +#include +#include +#include #include #include "mdp_hw.h" -#include "mdp_scale_tables.h" +#include "mdp_ppp.h" +#define PPP_DUMP_BLITS 0 + +#define PPP_DEBUG_MSGS 1 +#if PPP_DEBUG_MSGS +#define DLOG(fmt,args...) \ + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, \ + __LINE__, ##args); } \ + while (0) +#else #define DLOG(x...) do {} while (0) +#endif -#define MDP_DOWNSCALE_BLUR (MDP_DOWNSCALE_MAX + 1) -static int downscale_y_table = MDP_DOWNSCALE_MAX; -static int downscale_x_table = MDP_DOWNSCALE_MAX; - -struct mdp_regs { - uint32_t src0; - uint32_t src1; - uint32_t dst0; - uint32_t dst1; - uint32_t src_cfg; - uint32_t dst_cfg; - uint32_t src_pack; - uint32_t dst_pack; - uint32_t src_rect; - uint32_t dst_rect; - uint32_t src_ystride; - uint32_t dst_ystride; - uint32_t op; - uint32_t src_bpp; - uint32_t dst_bpp; - uint32_t edge; - uint32_t phasex_init; - uint32_t phasey_init; - uint32_t phasex_step; - uint32_t phasey_step; -}; +#define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp) + +#define Y_TO_CRCB_RATIO(format) \ + ((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ? 2 :\ + (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ? 1 : 1) + +static struct ion_client *ppp_display_iclient; static uint32_t pack_pattern[] = { PPP_ARRAY0(PACK_PATTERN) @@ -62,18 +58,19 @@ static uint32_t dst_img_cfg[] = { PPP_ARRAY1(CFG, DST) }; -static uint32_t bytes_per_pixel[] = { +static const uint32_t bytes_per_pixel[] = { [MDP_RGB_565] = 2, - [MDP_RGB_888] = 3, [MDP_XRGB_8888] = 4, + [MDP_Y_CBCR_H2V2] = 1, [MDP_ARGB_8888] = 4, + [MDP_RGB_888] = 3, + [MDP_Y_CRCB_H2V2] = 1, + [MDP_YCRYCB_H2V1] = 2, + [MDP_Y_CRCB_H2V1] = 1, + [MDP_Y_CBCR_H2V1] = 1, [MDP_RGBA_8888] = 4, [MDP_BGRA_8888] = 4, - [MDP_Y_CBCR_H2V1] = 1, - [MDP_Y_CBCR_H2V2] = 1, - [MDP_Y_CRCB_H2V1] = 1, - [MDP_Y_CRCB_H2V2] = 1, - [MDP_YCRYCB_H2V1] = 2 + [MDP_RGBX_8888] = 4, }; static uint32_t dst_op_chroma[] = { @@ -88,26 +85,108 @@ static uint32_t bg_op_chroma[] = { PPP_ARRAY1(CHROMA_SAMP, BG) }; -static void rotate_dst_addr_x(struct mdp_blit_req *req, struct mdp_regs *regs) +static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue); +DEFINE_MUTEX(mdp_mutex); + +static uint32_t get_luma_offset(struct mdp_img *img, + struct mdp_rect *rect, uint32_t bpp) +{ +#ifndef CONFIG_MSM_MDP31 + return (rect->x + (rect->y * img->width)) * bpp; +#else + return 0; +#endif +} + +static uint32_t get_chroma_offset(struct mdp_img *img, + struct mdp_rect *rect, uint32_t bpp) +{ +#ifndef CONFIG_MSM_MDP31 + uint32_t compress_v = Y_TO_CRCB_RATIO(img->format); + uint32_t compress_h = 2; + uint32_t offset = 0; + + if (IS_PSEUDOPLNR(img->format)) { + offset = (rect->x / compress_h) * compress_h; + offset += rect->y == 0 ? 0 : + ((rect->y + 1) / compress_v) * img->width; + offset *= bpp; + } + return offset; +#else + return 0; +#endif +} + +static void set_src_region(struct mdp_img *img, struct mdp_rect *rect, + struct ppp_regs *regs) { + regs->src_rect = (rect->h << 16) | (rect->w & 0x1fff); + +#ifdef CONFIG_MSM_MDP31 + regs->src_xy = (rect->y << 16) | (rect->x & 0x1fff); + regs->src_img_sz = (img->height << 16) | (img->width & 0x1fff); +#endif +} + +static inline void set_dst_region(struct mdp_rect *rect, struct ppp_regs *regs) +{ + regs->dst_rect = (rect->h << 16) | (rect->w & 0xfff); + +#ifdef CONFIG_MSM_MDP31 + regs->dst_xy = (rect->y << 16) | (rect->x & 0x1fff); +#endif +} + +static void set_blend_region(struct mdp_img *img, struct mdp_rect *rect, + struct ppp_regs *regs) +{ +#ifdef CONFIG_MSM_MDP31 + uint32_t rect_x = rect->x; + uint32_t rect_y = rect->y; + uint32_t img_w = img->width; + uint32_t img_h = img->height; + + /* HW bug workaround */ + if (img->format == MDP_YCRYCB_H2V1) { + regs->bg0 += (rect_x + (rect_y * img_w)) * regs->bg_bpp; + rect_x = 0; + rect_y = 0; + img_w = rect->w; + img_h = rect->h; + } + + regs->bg_xy = (rect_y << 16) | (rect_x & 0x1fff); + regs->bg_img_sz = (img_h << 16) | (img_w & 0x1fff); +#endif +} + +static void rotate_dst_addr_x(struct mdp_blit_req *req, + struct ppp_regs *regs) +{ +#ifndef CONFIG_MSM_MDP31 regs->dst0 += (req->dst_rect.w - min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp; regs->dst1 += (req->dst_rect.w - min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp; +#endif } -static void rotate_dst_addr_y(struct mdp_blit_req *req, struct mdp_regs *regs) +static void rotate_dst_addr_y(struct mdp_blit_req *req, + struct ppp_regs *regs) { +#ifndef CONFIG_MSM_MDP31 regs->dst0 += (req->dst_rect.h - min((uint32_t)16, req->dst_rect.h)) * regs->dst_ystride; regs->dst1 += (req->dst_rect.h - min((uint32_t)16, req->dst_rect.h)) * regs->dst_ystride; +#endif } static void blit_rotate(struct mdp_blit_req *req, - struct mdp_regs *regs) + struct ppp_regs *regs) { if (req->flags == MDP_ROT_NOP) return; @@ -126,16 +205,24 @@ static void blit_rotate(struct mdp_blit_req *req, regs->op |= PPP_OP_FLIP_LR; } -static void blit_convert(struct mdp_blit_req *req, struct mdp_regs *regs) +static void blit_convert(struct mdp_blit_req *req, struct ppp_regs *regs) { if (req->src.format == req->dst.format) return; if (IS_RGB(req->src.format) && IS_YCRCB(req->dst.format)) { regs->op |= PPP_OP_CONVERT_RGB2YCBCR | PPP_OP_CONVERT_ON; +#ifdef CONFIG_MSM_MDP31 + /* primary really means set1 */ + regs->op |= PPP_OP_CONVERT_MATRIX_PRIMARY; + regs->csc_cfg = 0x1e; +#endif } else if (IS_YCRCB(req->src.format) && IS_RGB(req->dst.format)) { regs->op |= PPP_OP_CONVERT_YCBCR2RGB | PPP_OP_CONVERT_ON; - if (req->dst.format == MDP_RGB_565) - regs->op |= PPP_OP_CONVERT_MATRIX_SECONDARY; +#ifdef CONFIG_MSM_MDP31 + /* secondary really means set2 */ + regs->op |= PPP_OP_CONVERT_MATRIX_SECONDARY; + regs->csc_cfg = 0; +#endif } } @@ -165,7 +252,7 @@ static uint32_t transp_convert(struct mdp_blit_req *req) } #undef GET_BIT_RANGE -static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs) +static void blit_blend(struct mdp_blit_req *req, struct ppp_regs *regs) { /* TRANSP BLEND */ if (req->transp_mask != MDP_TRANSP_NOP) { @@ -190,8 +277,22 @@ static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs) req->alpha &= 0xff; /* ALPHA BLEND */ if (HAS_ALPHA(req->src.format)) { - regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | - PPP_OP_BLEND_SRCPIXEL_ALPHA; + regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON; + if (req->flags & MDP_BLEND_FG_PREMULT) { +#ifdef CONFIG_MSM_MDP31 + /* premultiplied alpha: + * bg_alpha = (1 - fg_alpha) + * fg_alpha = 0xff + */ + regs->bg_alpha_sel = PPP_BLEND_BG_USE_ALPHA_SEL | + PPP_BLEND_BG_ALPHA_REVERSE | + PPP_BLEND_BG_SRCPIXEL_ALPHA; + regs->op |= PPP_OP_BLEND_CONSTANT_ALPHA; + req->alpha = 0xff; +#endif + } else { + regs->op |= PPP_OP_BLEND_SRCPIXEL_ALPHA; + } } else if (req->alpha < MDP_ALPHA_NOP) { /* just blend by alpha */ regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON | @@ -200,254 +301,31 @@ static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs) } regs->op |= bg_op_chroma[req->dst.format]; -} -#define ONE_HALF (1LL << 32) -#define ONE (1LL << 33) -#define TWO (2LL << 33) -#define THREE (3LL << 33) -#define FRAC_MASK (ONE - 1) -#define INT_MASK (~FRAC_MASK) - -static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin, - uint32_t *phase_init, uint32_t *phase_step) -{ - /* to improve precicsion calculations are done in U31.33 and converted - * to U3.29 at the end */ - int64_t k1, k2, k3, k4, tmp; - uint64_t n, d, os, os_p, od, od_p, oreq; - unsigned rpa = 0; - int64_t ip64, delta; - - if (dim_out % 3 == 0) - rpa = !(dim_in % (dim_out / 3)); - - n = ((uint64_t)dim_out) << 34; - d = dim_in; - if (!d) - return -1; - do_div(n, d); - k3 = (n + 1) >> 1; - if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) { - DLOG("crap bad scale\n"); - return -1; - } - n = ((uint64_t)dim_in) << 34; - d = (uint64_t)dim_out; - if (!d) - return -1; - do_div(n, d); - k1 = (n + 1) >> 1; - k2 = (k1 - ONE) >> 1; - - *phase_init = (int)(k2 >> 4); - k4 = (k3 - ONE) >> 1; - - if (rpa) { - os = ((uint64_t)origin << 33) - ONE_HALF; - tmp = (dim_out * os) + ONE_HALF; - if (!dim_in) - return -1; - do_div(tmp, dim_in); - od = tmp - ONE_HALF; - } else { - os = ((uint64_t)origin << 1) - 1; - od = (((k3 * os) >> 1) + k4); - } - - od_p = od & INT_MASK; - if (od_p != od) - od_p += ONE; - - if (rpa) { - tmp = (dim_in * od_p) + ONE_HALF; - if (!dim_in) - return -1; - do_div(tmp, dim_in); - os_p = tmp - ONE_HALF; - } else { - os_p = ((k1 * (od_p >> 33)) + k2); - } - - oreq = (os_p & INT_MASK) - ONE; - - ip64 = os_p - oreq; - delta = ((int64_t)(origin) << 33) - oreq; - ip64 -= delta; - /* limit to valid range before the left shift */ - delta = (ip64 & (1LL << 63)) ? 4 : -4; - delta <<= 33; - while (abs((int)(ip64 >> 33)) > 4) - ip64 += delta; - *phase_init = (int)(ip64 >> 4); - *phase_step = (uint32_t)(k1 >> 4); - return 0; + /* since we always blend src + dst -> dst, copy most of the + * configuration from dest to bg */ + regs->bg0 = regs->dst0; + regs->bg1 = regs->dst1; + regs->bg_cfg = src_img_cfg[req->dst.format]; + regs->bg_bpp = regs->dst_bpp; + regs->bg_pack = pack_pattern[req->dst.format]; + regs->bg_ystride = regs->dst_ystride; + set_blend_region(&req->dst, &req->dst_rect, regs); } -static void load_scale_table(const struct mdp_info *mdp, - struct mdp_table_entry *table, int len) +static int blit_scale(struct mdp_info *mdp, struct mdp_blit_req *req, + struct ppp_regs *regs) { - int i; - for (i = 0; i < len; i++) - mdp_writel(mdp, table[i].val, table[i].reg); -} - -enum { -IMG_LEFT, -IMG_RIGHT, -IMG_TOP, -IMG_BOTTOM, -}; - -static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst, - uint32_t *interp1, uint32_t *interp2, - uint32_t *repeat1, uint32_t *repeat2) { - if (src > 3 * dst) { - *interp1 = 0; - *interp2 = src - 1; - *repeat1 = 0; - *repeat2 = 0; - } else if (src == 3 * dst) { - *interp1 = 0; - *interp2 = src; - *repeat1 = 0; - *repeat2 = 1; - } else if (src > dst && src < 3 * dst) { - *interp1 = -1; - *interp2 = src; - *repeat1 = 1; - *repeat2 = 1; - } else if (src == dst) { - *interp1 = -1; - *interp2 = src + 1; - *repeat1 = 1; - *repeat2 = 2; - } else { - *interp1 = -2; - *interp2 = src + 1; - *repeat1 = 2; - *repeat2 = 2; - } - *interp1 += src_coord; - *interp2 += src_coord; -} - -static int get_edge_cond(struct mdp_blit_req *req, struct mdp_regs *regs) -{ - int32_t luma_interp[4]; - int32_t luma_repeat[4]; - int32_t chroma_interp[4]; - int32_t chroma_bound[4]; - int32_t chroma_repeat[4]; - uint32_t dst_w, dst_h; - - memset(&luma_interp, 0, sizeof(int32_t) * 4); - memset(&luma_repeat, 0, sizeof(int32_t) * 4); - memset(&chroma_interp, 0, sizeof(int32_t) * 4); - memset(&chroma_bound, 0, sizeof(int32_t) * 4); - memset(&chroma_repeat, 0, sizeof(int32_t) * 4); - regs->edge = 0; + struct mdp_rect dst_rect; + memcpy(&dst_rect, &req->dst_rect, sizeof(dst_rect)); if (req->flags & MDP_ROT_90) { - dst_w = req->dst_rect.h; - dst_h = req->dst_rect.w; - } else { - dst_w = req->dst_rect.w; - dst_h = req->dst_rect.h; - } - - if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) { - get_edge_info(req->src_rect.h, req->src_rect.y, dst_h, - &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM], - &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]); - get_edge_info(req->src_rect.w, req->src_rect.x, dst_w, - &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT], - &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]); - } else { - luma_interp[IMG_LEFT] = req->src_rect.x; - luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; - luma_interp[IMG_TOP] = req->src_rect.y; - luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; - luma_repeat[IMG_LEFT] = 0; - luma_repeat[IMG_TOP] = 0; - luma_repeat[IMG_RIGHT] = 0; - luma_repeat[IMG_BOTTOM] = 0; - } - - chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT]; - chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT]; - chroma_interp[IMG_TOP] = luma_interp[IMG_TOP]; - chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM]; - - chroma_bound[IMG_LEFT] = req->src_rect.x; - chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; - chroma_bound[IMG_TOP] = req->src_rect.y; - chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; - - if (IS_YCRCB(req->src.format)) { - chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1; - chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1; - - chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1; - chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1; - } - - if (req->src.format == MDP_Y_CBCR_H2V2 || - req->src.format == MDP_Y_CRCB_H2V2) { - chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1; - chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1) - >> 1; - chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1; - chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1; + dst_rect.w = req->dst_rect.h; + dst_rect.h = req->dst_rect.w; } - chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] - - chroma_interp[IMG_LEFT]; - chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] - - chroma_bound[IMG_RIGHT]; - chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] - - chroma_interp[IMG_TOP]; - chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] - - chroma_bound[IMG_BOTTOM]; - - if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 || - chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 || - chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 || - chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 || - luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 || - luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 || - luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 || - luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3) - return -1; - - regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA; - regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA; - regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA; - regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA; - regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA; - regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA; - regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA; - regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA; - return 0; -} - -static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req, - struct mdp_regs *regs) -{ - uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; - uint32_t scale_factor_x, scale_factor_y; - uint32_t downscale; - uint32_t dst_w, dst_h; - - if (req->flags & MDP_ROT_90) { - dst_w = req->dst_rect.h; - dst_h = req->dst_rect.w; - } else { - dst_w = req->dst_rect.w; - dst_h = req->dst_rect.h; - } - if ((req->src_rect.w == dst_w) && (req->src_rect.h == dst_h) && - !(req->flags & MDP_BLUR)) { + if ((req->src_rect.w == dst_rect.w) && (req->src_rect.h == dst_rect.h) + && !(req->flags & MDP_BLUR)) { regs->phasex_init = 0; regs->phasey_init = 0; regs->phasex_step = 0; @@ -455,73 +333,30 @@ static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req, return 0; } - if (scale_params(req->src_rect.w, dst_w, 1, &phase_init_x, - &phase_step_x) || - scale_params(req->src_rect.h, dst_h, 1, &phase_init_y, - &phase_step_y)) + if (mdp_ppp_cfg_scale(mdp, regs, &req->src_rect, &dst_rect, + req->src.format, req->dst.format)) { + DLOG("crap, bad scale\n"); return -1; - - scale_factor_x = (dst_w * 10) / req->src_rect.w; - scale_factor_y = (dst_h * 10) / req->src_rect.h; - - if (scale_factor_x > 8) - downscale = MDP_DOWNSCALE_PT8TO1; - else if (scale_factor_x > 6) - downscale = MDP_DOWNSCALE_PT6TOPT8; - else if (scale_factor_x > 4) - downscale = MDP_DOWNSCALE_PT4TOPT6; - else - downscale = MDP_DOWNSCALE_PT2TOPT4; - if (downscale != downscale_x_table) { - load_scale_table(mdp, mdp_downscale_x_table[downscale], 64); - downscale_x_table = downscale; - } - - if (scale_factor_y > 8) - downscale = MDP_DOWNSCALE_PT8TO1; - else if (scale_factor_y > 6) - downscale = MDP_DOWNSCALE_PT6TOPT8; - else if (scale_factor_y > 4) - downscale = MDP_DOWNSCALE_PT4TOPT6; - else - downscale = MDP_DOWNSCALE_PT2TOPT4; - if (downscale != downscale_y_table) { - load_scale_table(mdp, mdp_downscale_y_table[downscale], 64); - downscale_y_table = downscale; } - regs->phasex_init = phase_init_x; - regs->phasey_init = phase_init_y; - regs->phasex_step = phase_step_x; - regs->phasey_step = phase_step_y; regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON); return 0; - } -static void blit_blur(const struct mdp_info *mdp, struct mdp_blit_req *req, - struct mdp_regs *regs) +static void blit_blur(struct mdp_info *mdp, struct mdp_blit_req *req, + struct ppp_regs *regs) { + int ret; if (!(req->flags & MDP_BLUR)) return; - if (!(downscale_x_table == MDP_DOWNSCALE_BLUR && - downscale_y_table == MDP_DOWNSCALE_BLUR)) { - load_scale_table(mdp, mdp_gaussian_blur_table, 128); - downscale_x_table = MDP_DOWNSCALE_BLUR; - downscale_y_table = MDP_DOWNSCALE_BLUR; - } + ret = mdp_ppp_load_blur(mdp); + if (ret) + return; regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON); } - -#define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp) - -#define Y_TO_CRCB_RATIO(format) \ - ((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ? 2 :\ - (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ? 1 : 1) - static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp, uint32_t *len0, uint32_t *len1) { @@ -534,7 +369,7 @@ static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp, static int valid_src_dst(unsigned long src_start, unsigned long src_len, unsigned long dst_start, unsigned long dst_len, - struct mdp_blit_req *req, struct mdp_regs *regs) + struct mdp_blit_req *req, struct ppp_regs *regs) { unsigned long src_min_ok = src_start; unsigned long src_max_ok = src_start + src_len; @@ -574,83 +409,153 @@ static int valid_src_dst(unsigned long src_start, unsigned long src_len, return 1; } - -static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs, +static void flush_imgs(struct mdp_blit_req *req, struct ppp_regs *regs, struct file *src_file, struct file *dst_file) { +#ifdef CONFIG_ANDROID_PMEM + uint32_t src0_len, src1_len, dst0_len, dst1_len; + + if (!(req->flags & MDP_BLIT_NON_CACHED)) { + /* flush src images to memory before dma to mdp */ + get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len, + &src1_len); + flush_pmem_file(src_file, req->src.offset, src0_len); + if (IS_PSEUDOPLNR(req->src.format)) + flush_pmem_file(src_file, req->src.offset + src0_len, + src1_len); + + /* flush dst images */ + get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len, + &dst1_len); + flush_pmem_file(dst_file, req->dst.offset, dst0_len); + if (IS_PSEUDOPLNR(req->dst.format)) + flush_pmem_file(dst_file, req->dst.offset + dst0_len, + dst1_len); + } +#endif } -static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect, - uint32_t base, uint32_t bpp, uint32_t cfg, - uint32_t *addr, uint32_t *ystride) +static uint32_t get_chroma_base(struct mdp_img *img, uint32_t base, + uint32_t bpp) { - uint32_t compress_v = Y_TO_CRCB_RATIO(img->format); - uint32_t compress_h = 2; - uint32_t offset; + uint32_t addr = 0; - if (IS_PSEUDOPLNR(img->format)) { - offset = (rect->x / compress_h) * compress_h; - offset += rect->y == 0 ? 0 : - ((rect->y + 1) / compress_v) * img->width; - *addr = base + (img->width * img->height * bpp); - *addr += offset * bpp; - *ystride |= *ystride << 16; - } else { - *addr = 0; - } + if (IS_PSEUDOPLNR(img->format)) + addr = base + (img->width * img->height * bpp); + return addr; +} + +int mdp_get_bytes_per_pixel(int format) +{ + if (format < 0 || format >= MDP_IMGTYPE_LIMIT) + return -1; + return bytes_per_pixel[format]; } +#if PPP_DUMP_BLITS +#define mdp_writel_dbg(mdp, val, reg) do { \ + pr_info("%s: writing 0x%08x=0x%08x\n", __func__, (reg), (val));\ + mdp_writel((mdp), (val), (reg)); \ + } while (0) +#else +#define mdp_writel_dbg(mdp, val, reg) mdp_writel((mdp), (val), (reg)) +#endif + + static int send_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, - struct mdp_regs *regs, struct file *src_file, + struct ppp_regs *regs, struct file *src_file, struct file *dst_file) { - mdp_writel(mdp, 1, 0x060); - mdp_writel(mdp, regs->src_rect, PPP_ADDR_SRC_ROI); - mdp_writel(mdp, regs->src0, PPP_ADDR_SRC0); - mdp_writel(mdp, regs->src1, PPP_ADDR_SRC1); - mdp_writel(mdp, regs->src_ystride, PPP_ADDR_SRC_YSTRIDE); - mdp_writel(mdp, regs->src_cfg, PPP_ADDR_SRC_CFG); - mdp_writel(mdp, regs->src_pack, PPP_ADDR_SRC_PACK_PATTERN); - - mdp_writel(mdp, regs->op, PPP_ADDR_OPERATION); - mdp_writel(mdp, regs->phasex_init, PPP_ADDR_PHASEX_INIT); - mdp_writel(mdp, regs->phasey_init, PPP_ADDR_PHASEY_INIT); - mdp_writel(mdp, regs->phasex_step, PPP_ADDR_PHASEX_STEP); - mdp_writel(mdp, regs->phasey_step, PPP_ADDR_PHASEY_STEP); - - mdp_writel(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff), +#if 0 + mdp_writel_dbg(mdp, 1, MDP_PPP_CMD_MODE); +#endif + mdp_writel_dbg(mdp, regs->src_rect, PPP_ADDR_SRC_ROI); + mdp_writel_dbg(mdp, regs->src0, PPP_ADDR_SRC0); + mdp_writel_dbg(mdp, regs->src1, PPP_ADDR_SRC1); + mdp_writel_dbg(mdp, regs->src_ystride, PPP_ADDR_SRC_YSTRIDE); + mdp_writel_dbg(mdp, regs->src_cfg, PPP_ADDR_SRC_CFG); + mdp_writel_dbg(mdp, regs->src_pack, PPP_ADDR_SRC_PACK_PATTERN); + + mdp_writel_dbg(mdp, regs->op, PPP_ADDR_OPERATION); + mdp_writel_dbg(mdp, regs->phasex_init, PPP_ADDR_PHASEX_INIT); + mdp_writel_dbg(mdp, regs->phasey_init, PPP_ADDR_PHASEY_INIT); + mdp_writel_dbg(mdp, regs->phasex_step, PPP_ADDR_PHASEX_STEP); + mdp_writel_dbg(mdp, regs->phasey_step, PPP_ADDR_PHASEY_STEP); + +#ifdef CONFIG_MSM_MDP31 + mdp_writel_dbg(mdp, regs->scale_cfg, MDP_PPP_SCALE_CONFIG); + mdp_writel_dbg(mdp, regs->csc_cfg, MDP_PPP_CSC_CONFIG); + mdp_writel_dbg(mdp, regs->src_xy, MDP_PPP_SRC_XY); + mdp_writel_dbg(mdp, regs->src_img_sz, MDP_PPP_SRC_IMAGE_SIZE); + mdp_writel_dbg(mdp, regs->dst_xy, MDP_PPP_OUT_XY); +#else + /* no edge conditions to set for MDP 3.1 */ + mdp_writel_dbg(mdp, regs->edge, PPP_ADDR_EDGE); +#endif + + mdp_writel_dbg(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff), PPP_ADDR_ALPHA_TRANSP); - mdp_writel(mdp, regs->dst_cfg, PPP_ADDR_DST_CFG); - mdp_writel(mdp, regs->dst_pack, PPP_ADDR_DST_PACK_PATTERN); - mdp_writel(mdp, regs->dst_rect, PPP_ADDR_DST_ROI); - mdp_writel(mdp, regs->dst0, PPP_ADDR_DST0); - mdp_writel(mdp, regs->dst1, PPP_ADDR_DST1); - mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_DST_YSTRIDE); + mdp_writel_dbg(mdp, regs->dst_cfg, PPP_ADDR_DST_CFG); + mdp_writel_dbg(mdp, regs->dst_pack, PPP_ADDR_DST_PACK_PATTERN); + mdp_writel_dbg(mdp, regs->dst_rect, PPP_ADDR_DST_ROI); + mdp_writel_dbg(mdp, regs->dst0, PPP_ADDR_DST0); + mdp_writel_dbg(mdp, regs->dst1, PPP_ADDR_DST1); + mdp_writel_dbg(mdp, regs->dst_ystride, PPP_ADDR_DST_YSTRIDE); - mdp_writel(mdp, regs->edge, PPP_ADDR_EDGE); if (regs->op & PPP_OP_BLEND_ON) { - mdp_writel(mdp, regs->dst0, PPP_ADDR_BG0); - mdp_writel(mdp, regs->dst1, PPP_ADDR_BG1); - mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_BG_YSTRIDE); - mdp_writel(mdp, src_img_cfg[req->dst.format], PPP_ADDR_BG_CFG); - mdp_writel(mdp, pack_pattern[req->dst.format], - PPP_ADDR_BG_PACK_PATTERN); + mdp_writel_dbg(mdp, regs->bg0, PPP_ADDR_BG0); + mdp_writel_dbg(mdp, regs->bg1, PPP_ADDR_BG1); + mdp_writel_dbg(mdp, regs->bg_ystride, PPP_ADDR_BG_YSTRIDE); + mdp_writel_dbg(mdp, regs->bg_cfg, PPP_ADDR_BG_CFG); + mdp_writel_dbg(mdp, regs->bg_pack, PPP_ADDR_BG_PACK_PATTERN); +#ifdef CONFIG_MSM_MDP31 + mdp_writel_dbg(mdp, regs->bg_xy, MDP_PPP_BG_XY); + mdp_writel_dbg(mdp, regs->bg_img_sz, MDP_PPP_BG_IMAGE_SIZE); + mdp_writel_dbg(mdp, regs->bg_alpha_sel, + MDP_PPP_BLEND_BG_ALPHA_SEL); +#endif } flush_imgs(req, regs, src_file, dst_file); - mdp_writel(mdp, 0x1000, MDP_DISPLAY0_START); + mdp_writel_dbg(mdp, 0x1000, MDP_DISPLAY0_START); return 0; } -int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, +#if PPP_DUMP_BLITS +static void mdp_dump_blit(struct mdp_blit_req *req) +{ + pr_info("%s: src: w=%d h=%d f=0x%x offs=0x%x mem_id=%d\n", __func__, + req->src.width, req->src.height, req->src.format, + req->src.offset, req->src.memory_id); + pr_info("%s: dst: w=%d h=%d f=0x%x offs=0x%x mem_id=%d\n", __func__, + req->dst.width, req->dst.height, req->dst.format, + req->dst.offset, req->dst.memory_id); + pr_info("%s: src_rect: x=%d y=%d w=%d h=%d\n", __func__, + req->src_rect.x, req->src_rect.y, req->src_rect.w, + req->src_rect.h); + pr_info("%s: dst_rect: x=%d y=%d w=%d h=%d\n", __func__, + req->dst_rect.x, req->dst_rect.y, req->dst_rect.w, + req->dst_rect.h); + pr_info("%s: alpha=0x%08x\n", __func__, req->alpha); + pr_info("%s: transp_max=0x%08x\n", __func__, req->transp_mask); + pr_info("%s: flags=%08x\n", __func__, req->flags); +} +#endif + +static int process_blit(struct mdp_info *mdp, struct mdp_blit_req *req, struct file *src_file, unsigned long src_start, unsigned long src_len, struct file *dst_file, unsigned long dst_start, unsigned long dst_len) { - struct mdp_regs regs = {0}; + struct ppp_regs regs = {0}; + uint32_t luma_base; + +#if PPP_DUMP_BLITS + mdp_dump_blit(req); +#endif if (unlikely(req->src.format >= MDP_IMGTYPE_LIMIT || req->dst.format >= MDP_IMGTYPE_LIMIT)) { - printk(KERN_ERR "mpd_ppp: img is of wrong format\n"); + printk(KERN_ERR "mdp_ppp: img is of wrong format\n"); return -EINVAL; } @@ -658,7 +563,15 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, req->src_rect.y > req->src.height || req->dst_rect.x > req->dst.width || req->dst_rect.y > req->dst.height)) { - printk(KERN_ERR "mpd_ppp: img rect is outside of img!\n"); + printk(KERN_ERR "mdp_ppp: img rect is outside of img!\n"); + return -EINVAL; + } + + if (unlikely(req->src_rect.x + req->src_rect.w > req->src.width || + req->src_rect.y + req->src_rect.h > req->src.height || + req->dst_rect.x + req->dst_rect.w > req->dst.width || + req->dst_rect.y + req->dst_rect.h > req->dst.height)) { + printk(KERN_ERR "mdp_ppp: img rect extends outside of img!\n"); return -EINVAL; } @@ -666,35 +579,35 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, regs.src_cfg = src_img_cfg[req->src.format]; regs.src_cfg |= (req->src_rect.x & 0x1) ? PPP_SRC_BPP_ROI_ODD_X : 0; regs.src_cfg |= (req->src_rect.y & 0x1) ? PPP_SRC_BPP_ROI_ODD_Y : 0; - regs.src_rect = (req->src_rect.h << 16) | req->src_rect.w; regs.src_pack = pack_pattern[req->src.format]; /* set the dest image configuration */ regs.dst_cfg = dst_img_cfg[req->dst.format] | PPP_DST_OUT_SEL_AXI; - regs.dst_rect = (req->dst_rect.h << 16) | req->dst_rect.w; regs.dst_pack = pack_pattern[req->dst.format]; /* set src, bpp, start pixel and ystride */ - regs.src_bpp = bytes_per_pixel[req->src.format]; - regs.src0 = src_start + req->src.offset; + regs.src_bpp = mdp_get_bytes_per_pixel(req->src.format); + luma_base = src_start + req->src.offset; + regs.src0 = luma_base + + get_luma_offset(&req->src, &req->src_rect, regs.src_bpp); + regs.src1 = get_chroma_base(&req->src, luma_base, regs.src_bpp); + regs.src1 += get_chroma_offset(&req->src, &req->src_rect, regs.src_bpp); regs.src_ystride = req->src.width * regs.src_bpp; - get_chroma_addr(&req->src, &req->src_rect, regs.src0, regs.src_bpp, - regs.src_cfg, ®s.src1, ®s.src_ystride); - regs.src0 += (req->src_rect.x + (req->src_rect.y * req->src.width)) * - regs.src_bpp; + set_src_region(&req->src, &req->src_rect, ®s); /* set dst, bpp, start pixel and ystride */ - regs.dst_bpp = bytes_per_pixel[req->dst.format]; - regs.dst0 = dst_start + req->dst.offset; + regs.dst_bpp = mdp_get_bytes_per_pixel(req->dst.format); + luma_base = dst_start + req->dst.offset; + regs.dst0 = luma_base + + get_luma_offset(&req->dst, &req->dst_rect, regs.dst_bpp); + regs.dst1 = get_chroma_base(&req->dst, luma_base, regs.dst_bpp); + regs.dst1 += get_chroma_offset(&req->dst, &req->dst_rect, regs.dst_bpp); regs.dst_ystride = req->dst.width * regs.dst_bpp; - get_chroma_addr(&req->dst, &req->dst_rect, regs.dst0, regs.dst_bpp, - regs.dst_cfg, ®s.dst1, ®s.dst_ystride); - regs.dst0 += (req->dst_rect.x + (req->dst_rect.y * req->dst.width)) * - regs.dst_bpp; + set_dst_region(&req->dst_rect, ®s); if (!valid_src_dst(src_start, src_len, dst_start, dst_len, req, ®s)) { - printk(KERN_ERR "mpd_ppp: final src or dst location is " + printk(KERN_ERR "mdp_ppp: final src or dst location is " "invalid, are you trying to make an image too large " "or to place it outside the screen?\n"); return -EINVAL; @@ -708,7 +621,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, regs.op |= PPP_OP_DITHER_EN; blit_blend(req, ®s); if (blit_scale(mdp, req, ®s)) { - printk(KERN_ERR "mpd_ppp: error computing scale for img.\n"); + printk(KERN_ERR "mdp_ppp: error computing scale for img.\n"); return -EINVAL; } blit_blur(mdp, req, ®s); @@ -722,9 +635,220 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, req->dst_rect.x = req->dst_rect.x & (~0x1); req->dst_rect.w = req->dst_rect.w & (~0x1); } - if (get_edge_cond(req, ®s)) + + if (mdp_ppp_cfg_edge_cond(req, ®s)) return -EINVAL; + /* for simplicity, always write the chroma stride */ + regs.src_ystride &= 0x3fff; + regs.src_ystride |= regs.src_ystride << 16; + regs.dst_ystride &= 0x3fff; + regs.dst_ystride |= regs.dst_ystride << 16; + regs.bg_ystride &= 0x3fff; + regs.bg_ystride |= regs.bg_ystride << 16; + +#if PPP_DUMP_BLITS + pr_info("%s: sending blit\n", __func__); +#endif send_blit(mdp, req, ®s, src_file, dst_file); return 0; } + +#define mdp_dump_register(mdp, reg) \ + printk(# reg ": %08x\n", mdp_readl((mdp), (reg))) + +void mdp_ppp_dump_debug(const struct mdp_info *mdp) +{ + mdp_dump_register(mdp, MDP_TFETCH_STATUS); + mdp_dump_register(mdp, MDP_TFETCH_TILE_COUNT); + mdp_dump_register(mdp, MDP_TFETCH_FETCH_COUNT); + mdp_dump_register(mdp, MDP_BGTFETCH_STATUS); + mdp_dump_register(mdp, MDP_BGTFETCH_TILE_COUNT); + mdp_dump_register(mdp, MDP_BGTFETCH_FETCH_COUNT); + mdp_dump_register(mdp, MDP_PPP_SCALE_STATUS); + mdp_dump_register(mdp, MDP_PPP_BLEND_STATUS); + mdp_dump_register(mdp, MDP_INTR_STATUS); + mdp_dump_register(mdp, MDP_INTR_ENABLE); +} + +static int mdp_ppp_wait(struct mdp_info *mdp) +{ + int ret; + + ret = mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue); + if (ret) + mdp_ppp_dump_debug(mdp); + return ret; +} + +int get_img(struct mdp_img *img, struct mdp_blit_req *req, + struct fb_info *info, + unsigned long *start, unsigned long *len, + struct file** filep, struct ion_handle **ihdlp) +{ + int put_needed, ret = 0; + struct file *file; +#ifdef CONFIG_ION_MSM + struct msmfb_info *msmfb = (struct msmfb_info *)info->par; +#else + unsigned long vstart; +#endif + + if (img->memory_id & 0x40000000) + { + struct fb_info *fb = registered_fb[img->memory_id & 0x0000FFFF]; + if (fb) + { + *start = fb->fix.smem_start; + *len = fb->fix.smem_len; + } + *filep = NULL; + return 0; + } + + if (req->flags & MDP_MEMORY_ID_TYPE_FB) { + file = fget_light(img->memory_id, &put_needed); + if (file == NULL) + return -1; + + if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { + *start = info->fix.smem_start; + *len = info->fix.smem_len; + fput_light(file, put_needed); + return 0; + } else + fput_light(file, put_needed); + } + +#ifdef CONFIG_ION_MSM + *ihdlp = ion_import_fd(msmfb->iclient, img->memory_id); + if (IS_ERR_OR_NULL(*ihdlp)) + return -1; + + if (!ion_phys(msmfb->iclient, *ihdlp, start, (size_t *) len)) + return 0; + else + return -1; +#else + if (!get_pmem_file(img->memory_id, start, &vstart, len, filep)) + return 0; + else + return -1; +#endif +} + +void put_img(struct file *p_src_file, struct ion_handle *p_ihdl) +{ +#ifdef CONFIG_ION_MSM + if (!IS_ERR_OR_NULL(p_ihdl)) + ion_free(ppp_display_iclient, p_ihdl); +#else + if (p_src_file) + put_pmem_file(p_src_file); +#endif +} + +static void dump_req(struct mdp_blit_req *req, + unsigned long src_start, unsigned long src_len, + unsigned long dst_start, unsigned long dst_len) +{ + pr_err("flags: 0x%x\n", req->flags); + pr_err("src_start: 0x%08lx\n", src_start); + pr_err("src_len: 0x%08lx\n", src_len); + pr_err("src.offset: 0x%x\n", req->src.offset); + pr_err("src.format: 0x%x\n", req->src.format); + pr_err("src.width: %d\n", req->src.width); + pr_err("src.height: %d\n", req->src.height); + pr_err("src_rect.x: %d\n", req->src_rect.x); + pr_err("src_rect.y: %d\n", req->src_rect.y); + pr_err("src_rect.w: %d\n", req->src_rect.w); + pr_err("src_rect.h: %d\n", req->src_rect.h); + + pr_err("dst_start: 0x%08lx\n", dst_start); + pr_err("dst_len: 0x%08lx\n", dst_len); + pr_err("dst.offset: 0x%x\n", req->dst.offset); + pr_err("dst.format: 0x%x\n", req->dst.format); + pr_err("dst.width: %d\n", req->dst.width); + pr_err("dst.height: %d\n", req->dst.height); + pr_err("dst_rect.x: %d\n", req->dst_rect.x); + pr_err("dst_rect.y: %d\n", req->dst_rect.y); + pr_err("dst_rect.w: %d\n", req->dst_rect.w); + pr_err("dst_rect.h: %d\n", req->dst_rect.h); +} + +int mdp_ppp_blit_and_wait(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, unsigned long src_len, + struct file *dst_file, unsigned long dst_start, unsigned long dst_len) +{ + int ret; + mdp->enable_irq(mdp, DL0_ROI_DONE); + ret = process_blit(mdp, req, src_file, src_start, src_len, + dst_file, dst_start, dst_len); + if (unlikely(ret)) { + mdp->disable_irq(mdp, DL0_ROI_DONE); + return ret; + } + ret = mdp_ppp_wait(mdp); + if (unlikely(ret)) { + printk(KERN_ERR "%s: failed!\n", __func__); + pr_err("original request:\n"); + dump_req(mdp->req, src_start, src_len, dst_start, dst_len); + pr_err("dead request:\n"); + dump_req(req, src_start, src_len, dst_start, dst_len); + BUG(); + return ret; + } + return 0; +} + +int mdp_ppp_blit(struct mdp_info *mdp, struct fb_info *fb, + struct mdp_blit_req *req) +{ + int ret; + unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0; + struct file *src_file = 0, *dst_file = 0; + struct ion_handle *src_ihdl = NULL; + struct ion_handle *dst_ihdl = NULL; + struct msmfb_info *msmfb = fb->par; + ppp_display_iclient = msmfb->iclient; + + ret = mdp_ppp_validate_blit(mdp, req); + if (ret) + return ret; + + /* do this first so that if this fails, the caller can always + * safely call put_img */ + if (unlikely(get_img(&req->src, req, fb, &src_start, &src_len, &src_file, &src_ihdl))) { + printk(KERN_ERR "mdp_ppp: could not retrieve src image from " + "memory\n"); + return -EINVAL; + } + + if (unlikely(get_img(&req->dst, req, fb, &dst_start, &dst_len, &dst_file, &dst_ihdl))) { + printk(KERN_ERR "mdp_ppp: could not retrieve dst image from " + "memory\n"); + put_img(src_file, src_ihdl); + return -EINVAL; + } + mutex_lock(&mdp_mutex); + + /* transp_masking unimplemented */ + req->transp_mask = MDP_TRANSP_NOP; + mdp->req = req; + + ret = mdp_ppp_do_blit(mdp, req, src_file, src_start, src_len, + dst_file, dst_start, dst_len); + + put_img(src_file, src_ihdl); + put_img(dst_file, dst_ihdl); + mutex_unlock(&mdp_mutex); + return ret; +} + +void mdp_ppp_handle_isr(struct mdp_info *mdp, uint32_t mask) +{ + if (mask & DL0_ROI_DONE) + wake_up(&mdp_ppp_waitqueue); +} + + diff --git a/drivers/video/msm/mdp_ppp.h b/drivers/video/msm/mdp_ppp.h new file mode 100644 index 0000000000000..03a1506807b98 --- /dev/null +++ b/drivers/video/msm/mdp_ppp.h @@ -0,0 +1,118 @@ +/* drivers/video/msm/mdp_ppp.h + * + * Copyright (C) 2009 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _VIDEO_MSM_MDP_PPP_H_ +#define _VIDEO_MSM_MDP_PPP_H_ + +#include + +struct ppp_regs { + uint32_t src0; + uint32_t src1; + uint32_t dst0; + uint32_t dst1; + uint32_t src_cfg; + uint32_t dst_cfg; + uint32_t src_pack; + uint32_t dst_pack; + uint32_t src_rect; + uint32_t dst_rect; + uint32_t src_ystride; + uint32_t dst_ystride; + uint32_t op; + uint32_t src_bpp; + uint32_t dst_bpp; + uint32_t edge; + uint32_t phasex_init; + uint32_t phasey_init; + uint32_t phasex_step; + uint32_t phasey_step; + + uint32_t bg0; + uint32_t bg1; + uint32_t bg_cfg; + uint32_t bg_bpp; + uint32_t bg_pack; + uint32_t bg_ystride; + +#ifdef CONFIG_MSM_MDP31 + uint32_t src_xy; + uint32_t src_img_sz; + uint32_t dst_xy; + uint32_t bg_xy; + uint32_t bg_img_sz; + uint32_t bg_alpha_sel; + + uint32_t scale_cfg; + uint32_t csc_cfg; +#endif +}; + +struct mdp_info; +struct mdp_rect; +struct mdp_blit_req; +struct fb_info; + +#ifdef CONFIG_FB_MSM_MDP_PPP +int mdp_get_bytes_per_pixel(int format); +int mdp_ppp_blit(struct mdp_info *mdp, struct fb_info *fb, + struct mdp_blit_req *req); +void mdp_ppp_handle_isr(struct mdp_info *mdp, uint32_t mask); +int mdp_ppp_blit_and_wait(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, + unsigned long src_len, struct file *dst_file, + unsigned long dst_start, unsigned long dst_len); + +/* these must be provided by h/w specific ppp files */ +void mdp_ppp_init_scale(struct mdp_info *mdp); +int mdp_ppp_cfg_scale(struct mdp_info *mdp, struct ppp_regs *regs, + struct mdp_rect *src_rect, struct mdp_rect *dst_rect, + uint32_t src_format, uint32_t dst_format); +int mdp_ppp_load_blur(struct mdp_info *mdp); +int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs); +int mdp_ppp_validate_blit(struct mdp_info *mdp, struct mdp_blit_req *req); +int mdp_ppp_do_blit(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, + unsigned long src_len, struct file *dst_file, + unsigned long dst_start, unsigned long dst_len); + +#else + +static inline int mdp_get_bytes_per_pixel(int format) { return -1; } +static inline int mdp_ppp_blit(struct mdp_info *mdp, struct fb_info *fb, + struct mdp_blit_req *req) { return -EINVAL; } +static inline void mdp_ppp_handle_isr(struct mdp_info *mdp, uint32_t mask) {} +static inline int mdp_ppp_blit_and_wait(struct mdp_info *mdp, + struct mdp_blit_req *req, struct file *src_file, + unsigned long src_start, unsigned long src_len, + struct file *dst_file, unsigned long dst_start, + unsigned long dst_len) { return 0; } + +static inline void mdp_ppp_init_scale(struct mdp_info *mdp) {} +static inline int mdp_ppp_cfg_scale(struct mdp_info *mdp, struct ppp_regs *regs, + struct mdp_rect *src_rect, struct mdp_rect *dst_rect, + uint32_t src_format, uint32_t dst_format) { return 0; } +static inline int mdp_ppp_load_blur(struct mdp_info *mdp) { return 0; } +static inline int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs) { return 0; } +static inline int mdp_ppp_validate_blit(struct mdp_info *mdp, struct mdp_blit_req *req) { return -EINVAL; } +static inline int mdp_ppp_do_blit(struct mdp_info *mdp, + struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, + unsigned long src_len, struct file *dst_file, + unsigned long dst_start, unsigned long dst_len) { return 0; } + + +#endif /* CONFIG_FB_MSM_MDP_PPP */ + +#endif /* _VIDEO_MSM_MDP_PPP_H_ */ diff --git a/drivers/video/msm/mdp_scale_tables.c b/drivers/video/msm/mdp_ppp22.c similarity index 65% rename from drivers/video/msm/mdp_scale_tables.c rename to drivers/video/msm/mdp_ppp22.c index 604783b2e17c9..dc4cc2794c052 100644 --- a/drivers/video/msm/mdp_scale_tables.c +++ b/drivers/video/msm/mdp_ppp22.c @@ -1,4 +1,4 @@ -/* drivers/video/msm_fb/mdp_scale_tables.c +/* drivers/video/msm/mdp_ppp22.c * * Copyright (C) 2007 QUALCOMM Incorporated * Copyright (C) 2007 Google Incorporated @@ -13,10 +13,33 @@ * GNU General Public License for more details. */ -#include "mdp_scale_tables.h" +#include +#include +#include + #include "mdp_hw.h" +#include "mdp_ppp.h" + +struct mdp_table_entry { + uint32_t reg; + uint32_t val; +}; + +enum { + MDP_DOWNSCALE_PT2TOPT4, + MDP_DOWNSCALE_PT4TOPT6, + MDP_DOWNSCALE_PT6TOPT8, + MDP_DOWNSCALE_PT8TO1, + MDP_DOWNSCALE_MAX, + + /* not technically in the downscale table list */ + MDP_DOWNSCALE_BLUR, +}; + +static int downscale_x_table; +static int downscale_y_table; -struct mdp_table_entry mdp_upscale_table[] = { +static struct mdp_table_entry mdp_upscale_table[] = { { 0x5fffc, 0x0 }, { 0x50200, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, @@ -764,3 +787,359 @@ struct mdp_table_entry mdp_gaussian_blur_table[] = { { 0x5fffc, 0x20000080 }, { 0x5037c, 0x20000080 }, }; + +static void load_table(const struct mdp_info *mdp, + struct mdp_table_entry *table, int len) +{ + int i; + for (i = 0; i < len; i++) + mdp_writel(mdp, table[i].val, table[i].reg); +} + +enum { + IMG_LEFT, + IMG_RIGHT, + IMG_TOP, + IMG_BOTTOM, +}; + +static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst, + uint32_t *interp1, uint32_t *interp2, + uint32_t *repeat1, uint32_t *repeat2) { + if (src > 3 * dst) { + *interp1 = 0; + *interp2 = src - 1; + *repeat1 = 0; + *repeat2 = 0; + } else if (src == 3 * dst) { + *interp1 = 0; + *interp2 = src; + *repeat1 = 0; + *repeat2 = 1; + } else if (src > dst && src < 3 * dst) { + *interp1 = -1; + *interp2 = src; + *repeat1 = 1; + *repeat2 = 1; + } else if (src == dst) { + *interp1 = -1; + *interp2 = src + 1; + *repeat1 = 1; + *repeat2 = 2; + } else { + *interp1 = -2; + *interp2 = src + 1; + *repeat1 = 2; + *repeat2 = 2; + } + *interp1 += src_coord; + *interp2 += src_coord; +} + +int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs) +{ + int32_t luma_interp[4]; + int32_t luma_repeat[4]; + int32_t chroma_interp[4]; + int32_t chroma_bound[4]; + int32_t chroma_repeat[4]; + uint32_t dst_w, dst_h; + + memset(&luma_interp, 0, sizeof(int32_t) * 4); + memset(&luma_repeat, 0, sizeof(int32_t) * 4); + memset(&chroma_interp, 0, sizeof(int32_t) * 4); + memset(&chroma_bound, 0, sizeof(int32_t) * 4); + memset(&chroma_repeat, 0, sizeof(int32_t) * 4); + regs->edge = 0; + + if (req->flags & MDP_ROT_90) { + dst_w = req->dst_rect.h; + dst_h = req->dst_rect.w; + } else { + dst_w = req->dst_rect.w; + dst_h = req->dst_rect.h; + } + + if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) { + get_edge_info(req->src_rect.h, req->src_rect.y, dst_h, + &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM], + &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]); + get_edge_info(req->src_rect.w, req->src_rect.x, dst_w, + &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT], + &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]); + } else { + luma_interp[IMG_LEFT] = req->src_rect.x; + luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; + luma_interp[IMG_TOP] = req->src_rect.y; + luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; + luma_repeat[IMG_LEFT] = 0; + luma_repeat[IMG_TOP] = 0; + luma_repeat[IMG_RIGHT] = 0; + luma_repeat[IMG_BOTTOM] = 0; + } + + chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT]; + chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT]; + chroma_interp[IMG_TOP] = luma_interp[IMG_TOP]; + chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM]; + + chroma_bound[IMG_LEFT] = req->src_rect.x; + chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; + chroma_bound[IMG_TOP] = req->src_rect.y; + chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; + + if (IS_YCRCB(req->src.format)) { + chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1; + chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1; + + chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1; + chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1; + } + + if (req->src.format == MDP_Y_CBCR_H2V2 || + req->src.format == MDP_Y_CRCB_H2V2) { + chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1; + chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1) + >> 1; + chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1; + chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1; + } + + chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] - + chroma_interp[IMG_LEFT]; + chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] - + chroma_bound[IMG_RIGHT]; + chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] - + chroma_interp[IMG_TOP]; + chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] - + chroma_bound[IMG_BOTTOM]; + + if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 || + chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 || + chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 || + chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 || + luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 || + luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 || + luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 || + luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3) + return -1; + + regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA; + regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA; + regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA; + regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA; + regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA; + regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA; + regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA; + regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA; + return 0; +} + +#define ONE_HALF (1LL << 32) +#define ONE (1LL << 33) +#define TWO (2LL << 33) +#define THREE (3LL << 33) +#define FRAC_MASK (ONE - 1) +#define INT_MASK (~FRAC_MASK) + +static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin, + uint32_t *phase_init, uint32_t *phase_step) +{ + /* to improve precicsion calculations are done in U31.33 and converted + * to U3.29 at the end */ + int64_t k1, k2, k3, k4, tmp; + uint64_t n, d, os, os_p, od, od_p, oreq; + unsigned rpa = 0; + int64_t ip64, delta; + + if (dim_out % 3 == 0) + rpa = !(dim_in % (dim_out / 3)); + + n = ((uint64_t)dim_out) << 34; + d = dim_in; + if (!d) + return -1; + do_div(n, d); + k3 = (n + 1) >> 1; + if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) + return -1; + + n = ((uint64_t)dim_in) << 34; + d = (uint64_t)dim_out; + if (!d) + return -1; + do_div(n, d); + k1 = (n + 1) >> 1; + k2 = (k1 - ONE) >> 1; + + *phase_init = (int)(k2 >> 4); + k4 = (k3 - ONE) >> 1; + + if (rpa) { + os = ((uint64_t)origin << 33) - ONE_HALF; + tmp = (dim_out * os) + ONE_HALF; + if (!dim_in) + return -1; + do_div(tmp, dim_in); + od = tmp - ONE_HALF; + } else { + os = ((uint64_t)origin << 1) - 1; + od = (((k3 * os) >> 1) + k4); + } + + od_p = od & INT_MASK; + if (od_p != od) + od_p += ONE; + + if (rpa) { + tmp = (dim_in * od_p) + ONE_HALF; + if (!dim_in) + return -1; + do_div(tmp, dim_in); + os_p = tmp - ONE_HALF; + } else { + os_p = ((k1 * (od_p >> 33)) + k2); + } + + oreq = (os_p & INT_MASK) - ONE; + + ip64 = os_p - oreq; + delta = ((int64_t)(origin) << 33) - oreq; + ip64 -= delta; + /* limit to valid range before the left shift */ + delta = (ip64 & (1LL << 63)) ? 4 : -4; + delta <<= 33; + while (abs((int)(ip64 >> 33)) > 4) + ip64 += delta; + *phase_init = (int)(ip64 >> 4); + *phase_step = (uint32_t)(k1 >> 4); + return 0; +} + +int mdp_ppp_cfg_scale(struct mdp_info *mdp, struct ppp_regs *regs, + struct mdp_rect *src_rect, struct mdp_rect *dst_rect, + uint32_t src_format, uint32_t dst_format) +{ + int downscale; + uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; + uint32_t scale_factor_x, scale_factor_y; + + if (scale_params(src_rect->w, dst_rect->w, 1, &phase_init_x, + &phase_step_x) || + scale_params(src_rect->h, dst_rect->h, 1, &phase_init_y, + &phase_step_y)) + return -1; + + regs->phasex_init = phase_init_x; + regs->phasey_init = phase_init_y; + regs->phasex_step = phase_step_x; + regs->phasey_step = phase_step_y; + + scale_factor_x = (dst_rect->w * 10) / src_rect->w; + scale_factor_y = (dst_rect->h * 10) / src_rect->h; + + if (scale_factor_x > 8) + downscale = MDP_DOWNSCALE_PT8TO1; + else if (scale_factor_x > 6) + downscale = MDP_DOWNSCALE_PT6TOPT8; + else if (scale_factor_x > 4) + downscale = MDP_DOWNSCALE_PT4TOPT6; + else + downscale = MDP_DOWNSCALE_PT2TOPT4; + + if (downscale != downscale_x_table) { + load_table(mdp, mdp_downscale_x_table[downscale], 64); + downscale_x_table = downscale; + } + + if (scale_factor_y > 8) + downscale = MDP_DOWNSCALE_PT8TO1; + else if (scale_factor_y > 6) + downscale = MDP_DOWNSCALE_PT6TOPT8; + else if (scale_factor_y > 4) + downscale = MDP_DOWNSCALE_PT4TOPT6; + else + downscale = MDP_DOWNSCALE_PT2TOPT4; + + if (downscale != downscale_y_table) { + load_table(mdp, mdp_downscale_y_table[downscale], 64); + downscale_y_table = downscale; + } + + return 0; +} + + +int mdp_ppp_load_blur(struct mdp_info *mdp) +{ + if (!(downscale_x_table == MDP_DOWNSCALE_BLUR && + downscale_y_table == MDP_DOWNSCALE_BLUR)) { + load_table(mdp, mdp_gaussian_blur_table, 128); + downscale_x_table = MDP_DOWNSCALE_BLUR; + downscale_y_table = MDP_DOWNSCALE_BLUR; + } + + return 0; +} + +void mdp_ppp_init_scale(struct mdp_info *mdp) +{ + downscale_x_table = MDP_DOWNSCALE_MAX; + downscale_y_table = MDP_DOWNSCALE_MAX; + + load_table(mdp, mdp_upscale_table, ARRAY_SIZE(mdp_upscale_table)); +} + +int mdp_ppp_validate_blit(struct mdp_info *mdp, struct mdp_blit_req *req) +{ + /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */ + if (unlikely(req->src_rect.h == 0 || + req->src_rect.w == 0)) { + pr_info("mdp_ppp: src img of zero size!\n"); + return -EINVAL; + } + if (unlikely(req->dst_rect.h == 0 || + req->dst_rect.w == 0)) + return -EINVAL; + + return 0; +} + +int mdp_ppp_do_blit(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, + unsigned long src_len, struct file *dst_file, + unsigned long dst_start, unsigned long dst_len) +{ + int ret; + + if (unlikely((req->transp_mask != MDP_TRANSP_NOP || + req->alpha != MDP_ALPHA_NOP || + HAS_ALPHA(req->src.format)) && + (req->flags & MDP_ROT_90 && + req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) { + int i; + unsigned int tiles = req->dst_rect.h / 16; + unsigned int remainder = req->dst_rect.h % 16; + req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h; + req->dst_rect.h = 16; + for (i = 0; i < tiles; i++) { + ret = mdp_ppp_blit_and_wait(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + if (ret) + goto end; + req->dst_rect.y += 16; + req->src_rect.x += req->src_rect.w; + } + if (!remainder) + goto end; + req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h; + req->dst_rect.h = remainder; + } + + ret = mdp_ppp_blit_and_wait(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); +end: + return ret; +} diff --git a/drivers/video/msm/mdp_ppp31.c b/drivers/video/msm/mdp_ppp31.c new file mode 100644 index 0000000000000..fa36002ebe374 --- /dev/null +++ b/drivers/video/msm/mdp_ppp31.c @@ -0,0 +1,637 @@ +/* drivers/video/msm/mdp_ppp31.c + * + * Copyright (C) 2009 QUALCOMM Incorporated + * Copyright (C) 2009 Google Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "mdp_hw.h" +#include "mdp_ppp.h" + +#define NUM_COEFFS 32 + +struct mdp_scale_coeffs { + uint16_t c[4][NUM_COEFFS]; +}; + +struct mdp_scale_tbl_info { + uint16_t offset; + uint32_t set:2; + int use_pr; + struct mdp_scale_coeffs coeffs; +}; + +enum { + MDP_SCALE_PT2TOPT4, + MDP_SCALE_PT4TOPT6, + MDP_SCALE_PT6TOPT8, + MDP_SCALE_PT8TO8, + MDP_SCALE_MAX, +}; + +static struct mdp_scale_coeffs mdp_scale_pr_coeffs = { + .c = { + [0] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }, + [1] = { + 511, 511, 511, 511, 511, 511, 511, 511, + 511, 511, 511, 511, 511, 511, 511, 511, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }, + [2] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 511, 511, 511, 511, 511, 511, 511, 511, + 511, 511, 511, 511, 511, 511, 511, 511, + }, + [3] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }, + }, +}; + +static struct mdp_scale_tbl_info mdp_scale_tbl[MDP_SCALE_MAX] = { + [ MDP_SCALE_PT2TOPT4 ] = { + .offset = 0, + .set = MDP_PPP_SCALE_COEFF_D0_SET, + .use_pr = -1, + .coeffs.c = { + [0] = { + 131, 131, 130, 129, 128, 127, 127, 126, + 125, 125, 124, 123, 123, 121, 120, 119, + 119, 118, 117, 117, 116, 115, 115, 114, + 113, 112, 111, 110, 109, 109, 108, 107, + }, + [1] = { + 141, 140, 140, 140, 140, 139, 138, 138, + 138, 137, 137, 137, 136, 137, 137, 137, + 136, 136, 136, 135, 135, 135, 134, 134, + 134, 134, 134, 133, 133, 132, 132, 132, + }, + [2] = { + 132, 132, 132, 133, 133, 134, 134, 134, + 134, 134, 135, 135, 135, 136, 136, 136, + 137, 137, 137, 136, 137, 137, 137, 138, + 138, 138, 139, 140, 140, 140, 140, 141, + }, + [3] = { + 107, 108, 109, 109, 110, 111, 112, 113, + 114, 115, 115, 116, 117, 117, 118, 119, + 119, 120, 121, 123, 123, 124, 125, 125, + 126, 127, 127, 128, 129, 130, 131, 131, + } + }, + }, + [ MDP_SCALE_PT4TOPT6 ] = { + .offset = 32, + .set = MDP_PPP_SCALE_COEFF_D1_SET, + .use_pr = -1, + .coeffs.c = { + [0] = { + 136, 132, 128, 123, 119, 115, 111, 107, + 103, 98, 95, 91, 87, 84, 80, 76, + 73, 69, 66, 62, 59, 57, 54, 50, + 47, 44, 41, 39, 36, 33, 32, 29, + }, + [1] = { + 206, 205, 204, 204, 201, 200, 199, 197, + 196, 194, 191, 191, 189, 185, 184, 182, + 180, 178, 176, 173, 170, 168, 165, 162, + 160, 157, 155, 152, 148, 146, 142, 140, + }, + [2] = { + 140, 142, 146, 148, 152, 155, 157, 160, + 162, 165, 168, 170, 173, 176, 178, 180, + 182, 184, 185, 189, 191, 191, 194, 196, + 197, 199, 200, 201, 204, 204, 205, 206, + }, + [3] = { + 29, 32, 33, 36, 39, 41, 44, 47, + 50, 54, 57, 59, 62, 66, 69, 73, + 76, 80, 84, 87, 91, 95, 98, 103, + 107, 111, 115, 119, 123, 128, 132, 136, + }, + }, + }, + [ MDP_SCALE_PT6TOPT8 ] = { + .offset = 64, + .set = MDP_PPP_SCALE_COEFF_D2_SET, + .use_pr = -1, + .coeffs.c = { + [0] = { + 104, 96, 89, 82, 75, 68, 61, 55, + 49, 43, 38, 33, 28, 24, 20, 16, + 12, 9, 6, 4, 2, 0, -2, -4, + -5, -6, -7, -7, -8, -8, -8, -8, + }, + [1] = { + 303, 303, 302, 300, 298, 296, 293, 289, + 286, 281, 276, 270, 265, 258, 252, 245, + 238, 230, 223, 214, 206, 197, 189, 180, + 172, 163, 154, 145, 137, 128, 120, 112, + }, + [2] = { + 112, 120, 128, 137, 145, 154, 163, 172, + 180, 189, 197, 206, 214, 223, 230, 238, + 245, 252, 258, 265, 270, 276, 281, 286, + 289, 293, 296, 298, 300, 302, 303, 303, + }, + [3] = { + -8, -8, -8, -8, -7, -7, -6, -5, + -4, -2, 0, 2, 4, 6, 9, 12, + 16, 20, 24, 28, 33, 38, 43, 49, + 55, 61, 68, 75, 82, 89, 96, 104, + }, + }, + }, + [ MDP_SCALE_PT8TO8 ] = { + .offset = 96, + .set = MDP_PPP_SCALE_COEFF_U1_SET, + .use_pr = -1, + .coeffs.c = { + [0] = { + 0, -7, -13, -19, -24, -28, -32, -34, + -37, -39, -40, -41, -41, -41, -40, -40, + -38, -37, -35, -33, -31, -29, -26, -24, + -21, -18, -15, -13, -10, -7, -5, -2, + }, + [1] = { + 511, 507, 501, 494, 485, 475, 463, 450, + 436, 422, 405, 388, 370, 352, 333, 314, + 293, 274, 253, 233, 213, 193, 172, 152, + 133, 113, 95, 77, 60, 43, 28, 13, + }, + [2] = { + 0, 13, 28, 43, 60, 77, 95, 113, + 133, 152, 172, 193, 213, 233, 253, 274, + 294, 314, 333, 352, 370, 388, 405, 422, + 436, 450, 463, 475, 485, 494, 501, 507, + }, + [3] = { + 0, -2, -5, -7, -10, -13, -15, -18, + -21, -24, -26, -29, -31, -33, -35, -37, + -38, -40, -40, -41, -41, -41, -40, -39, + -37, -34, -32, -28, -24, -19, -13, -7, + }, + }, + }, +}; + +static void load_table(const struct mdp_info *mdp, int scale, int use_pr) +{ + int i; + uint32_t val; + struct mdp_scale_coeffs *coeffs; + struct mdp_scale_tbl_info *tbl = &mdp_scale_tbl[scale]; + + if (use_pr == tbl->use_pr) + return; + + tbl->use_pr = use_pr; + if (!use_pr) + coeffs = &tbl->coeffs; + else + coeffs = &mdp_scale_pr_coeffs; + + for (i = 0; i < NUM_COEFFS; ++i) { + val = ((coeffs->c[1][i] & 0x3ff) << 16) | + (coeffs->c[0][i] & 0x3ff); + mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_LSBn(tbl->offset + i)); + + val = ((coeffs->c[3][i] & 0x3ff) << 16) | + (coeffs->c[2][i] & 0x3ff); + mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_MSBn(tbl->offset + i)); + } +} + +#define SCALER_PHASE_BITS 29 +static void scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t scaler, + uint32_t *phase_init, uint32_t *phase_step) +{ + uint64_t src = dim_in; + uint64_t dst = dim_out; + uint64_t numer; + uint64_t denom; + + *phase_init = 0; + + if (dst == 1) { + /* if destination is 1 pixel wide, the value of phase_step + * is unimportant. */ + *phase_step = (uint32_t) (src << SCALER_PHASE_BITS); + if (scaler == MDP_PPP_SCALER_FIR) + *phase_init = + (uint32_t) ((src - 1) << SCALER_PHASE_BITS); + return; + } + + if (scaler == MDP_PPP_SCALER_FIR) { + numer = (src - 1) << SCALER_PHASE_BITS; + denom = dst - 1; + /* we want to round up the result*/ + numer += denom - 1; + } else { + numer = src << SCALER_PHASE_BITS; + denom = dst; + } + + do_div(numer, denom); + *phase_step = (uint32_t) numer; +} + +static int scale_idx(int factor) +{ + int idx; + + if (factor > 80) + idx = MDP_SCALE_PT8TO8; + else if (factor > 60) + idx = MDP_SCALE_PT6TOPT8; + else if (factor > 40) + idx = MDP_SCALE_PT4TOPT6; + else + idx = MDP_SCALE_PT2TOPT4; + + return idx; +} + +int mdp_ppp_cfg_scale(struct mdp_info *mdp, struct ppp_regs *regs, + struct mdp_rect *src_rect, struct mdp_rect *dst_rect, + uint32_t src_format, uint32_t dst_format) +{ + uint32_t x_fac; + uint32_t y_fac; + uint32_t scaler_x = MDP_PPP_SCALER_FIR; + uint32_t scaler_y = MDP_PPP_SCALER_FIR; + // Don't use pixel repeat mode, it looks bad + int use_pr = 0; + int x_idx; + int y_idx; + + if (unlikely(src_rect->w > 2048 || src_rect->h > 2048)) + return -ENOTSUPP; + + x_fac = (dst_rect->w * 100) / src_rect->w; + y_fac = (dst_rect->h * 100) / src_rect->h; + + /* if down-scaling by a factor smaller than 1/4, use M/N */ + scaler_x = x_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR; + scaler_y = y_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR; + scale_params(src_rect->w, dst_rect->w, scaler_x, ®s->phasex_init, + ®s->phasex_step); + scale_params(src_rect->h, dst_rect->h, scaler_y, ®s->phasey_init, + ®s->phasey_step); + + x_idx = scale_idx(x_fac); + y_idx = scale_idx(y_fac); + load_table(mdp, x_idx, use_pr); + load_table(mdp, y_idx, use_pr); + + regs->scale_cfg = 0; + // Enable SVI when source or destination is YUV + if (!IS_RGB(src_format) && !IS_RGB(dst_format)) + regs->scale_cfg |= (1 << 6); + regs->scale_cfg |= (mdp_scale_tbl[x_idx].set << 2) | + (mdp_scale_tbl[x_idx].set << 4); + regs->scale_cfg |= (scaler_x << 0) | (scaler_y << 1); + + return 0; +} + +int mdp_ppp_load_blur(struct mdp_info *mdp) +{ + return -ENOTSUPP; +} + +int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs) +{ + return 0; +} + +void mdp_ppp_init_scale(struct mdp_info *mdp) +{ + int scale; + for (scale = 0; scale < MDP_SCALE_MAX; ++scale) + load_table(mdp, scale, 0); +} + +/* Splits a blit into two horizontal stripes. Used to work around MDP bugs */ +static int blit_split_height(struct mdp_info *mdp, const struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, unsigned long src_len, + struct file *dst_file, unsigned long dst_start, unsigned long dst_len) +{ + int ret; + struct mdp_blit_req splitreq; + int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1; + int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1; + + splitreq = *req; + /* break dest roi at height*/ + d_x_0 = d_x_1 = req->dst_rect.x; + d_w_0 = d_w_1 = req->dst_rect.w; + d_y_0 = req->dst_rect.y; + if (req->dst_rect.h % 32 == 3) + d_h_1 = (req->dst_rect.h - 3) / 2 - 1; + else + d_h_1 = (req->dst_rect.h - 1) / 2 - 1; + d_h_0 = req->dst_rect.h - d_h_1; + d_y_1 = d_y_0 + d_h_0; + if (req->dst_rect.h == 3) { + d_h_1 = 2; + d_h_0 = 2; + d_y_1 = d_y_0 + 1; + } + /* break source roi */ + if (splitreq.flags & MDP_ROT_90) { + s_y_0 = s_y_1 = req->src_rect.y; + s_h_0 = s_h_1 = req->src_rect.h; + s_x_0 = req->src_rect.x; + s_w_1 = (req->src_rect.w * d_h_1) / req->dst_rect.h; + s_w_0 = req->src_rect.w - s_w_1; + s_x_1 = s_x_0 + s_w_0; + if (d_h_1 >= 8 * s_w_1) { + s_w_1++; + s_x_1--; + } + } else { + s_x_0 = s_x_1 = req->src_rect.x; + s_w_0 = s_w_1 = req->src_rect.w; + s_y_0 = req->src_rect.y; + s_h_1 = (req->src_rect.h * d_h_1) / req->dst_rect.h; + s_h_0 = req->src_rect.h - s_h_1; + s_y_1 = s_y_0 + s_h_0; + if (d_h_1 >= 8 * s_h_1) { + s_h_1++; + s_y_1--; + } + } + + /* blit first region */ + if (((splitreq.flags & 0x07) == MDP_ROT_90) || + ((splitreq.flags & 0x07) == 0x0)) { + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } else { + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } + ret = mdp_ppp_blit_and_wait(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + if (ret) + return ret; + + /* blit second region */ + if (((splitreq.flags & 0x07) == MDP_ROT_90) || + ((splitreq.flags & 0x07) == 0x0)) { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } else { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } + ret = mdp_ppp_blit_and_wait(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + return ret; +} + +/* Splits a blit into two vertical stripes. Used to work around MDP bugs */ +static int blit_split_width(struct mdp_info *mdp, const struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, unsigned long src_len, + struct file *dst_file, unsigned long dst_start, unsigned long dst_len) +{ + int ret; + struct mdp_blit_req splitreq; + int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1; + int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1; + splitreq = *req; + + /* break dest roi at width*/ + d_y_0 = d_y_1 = req->dst_rect.y; + d_h_0 = d_h_1 = req->dst_rect.h; + d_x_0 = req->dst_rect.x; + if (req->dst_rect.w % 32 == 6) + d_w_1 = req->dst_rect.w / 2 - 1; + else if (req->dst_rect.w % 2 == 0) + d_w_1 = req->dst_rect.w / 2; + else if (req->dst_rect.w % 32 == 3) + d_w_1 = (req->dst_rect.w - 3) / 2 - 1; + else + d_w_1 = (req->dst_rect.w - 1) / 2 - 1; + d_w_0 = req->dst_rect.w - d_w_1; + d_x_1 = d_x_0 + d_w_0; + if (req->dst_rect.w == 3) { + d_w_1 = 2; + d_w_0 = 2; + d_x_1 = d_x_0 + 1; + } + + /* break src roi at height or width*/ + if (splitreq.flags & MDP_ROT_90) { + s_x_0 = s_x_1 = req->src_rect.x; + s_w_0 = s_w_1 = req->src_rect.w; + s_y_0 = req->src_rect.y; + s_h_1 = (req->src_rect.h * d_w_1) / req->dst_rect.w; + s_h_0 = req->src_rect.h - s_h_1; + s_y_1 = s_y_0 + s_h_0; + if (d_w_1 >= 8 * s_h_1) { + s_h_1++; + s_y_1--; + } + } else { + s_y_0 = s_y_1 = req->src_rect.y; + s_h_0 = s_h_1 = req->src_rect.h; + s_x_0 = req->src_rect.x; + s_w_1 = (req->src_rect.w * d_w_1) / req->dst_rect.w; + s_w_0 = req->src_rect.w - s_w_1; + s_x_1 = s_x_0 + s_w_0; + if (d_w_1 >= 8 * s_w_1) { + s_w_1++; + s_x_1--; + } + } + + /* blit first region */ + if (((splitreq.flags & 0x07) == MDP_ROT_270) || + ((splitreq.flags & 0x07) == 0x0)) { + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } else { + splitreq.src_rect.h = s_h_0; + splitreq.src_rect.y = s_y_0; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_0; + splitreq.src_rect.w = s_w_0; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } + + if (unlikely((splitreq.dst_rect.h != 1) && + ((splitreq.dst_rect.h % 32 == 3) || + (splitreq.dst_rect.h % 32) == 1))) + ret = blit_split_height(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + else + ret = mdp_ppp_blit_and_wait(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + if (ret) + return ret; + + /* blit second region */ + if (((splitreq.flags & 0x07) == MDP_ROT_270) || + ((splitreq.flags & 0x07) == 0x0)) { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_1; + splitreq.dst_rect.y = d_y_1; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_1; + splitreq.dst_rect.w = d_w_1; + } else { + splitreq.src_rect.h = s_h_1; + splitreq.src_rect.y = s_y_1; + splitreq.dst_rect.h = d_h_0; + splitreq.dst_rect.y = d_y_0; + splitreq.src_rect.x = s_x_1; + splitreq.src_rect.w = s_w_1; + splitreq.dst_rect.x = d_x_0; + splitreq.dst_rect.w = d_w_0; + } + + if (unlikely((splitreq.dst_rect.h != 1) && + ((splitreq.dst_rect.h % 32 == 3) || + (splitreq.dst_rect.h % 32) == 1))) + ret = blit_split_height(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + else + ret = mdp_ppp_blit_and_wait(mdp, &splitreq, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + return ret; +} + + +int mdp_ppp_validate_blit(struct mdp_info *mdp, struct mdp_blit_req *req) +{ + if (req->flags & MDP_ROT_90) { + if (unlikely(((req->dst_rect.h == 1) && + ((req->src_rect.w != 1) || + (req->dst_rect.w != req->src_rect.h))) || + ((req->dst_rect.w == 1) && ((req->src_rect.h != 1) || + (req->dst_rect.h != req->src_rect.w))))) { + pr_err("mpd_ppp: error scaling when size is 1!\n"); + return -EINVAL; + } + } else { + if (unlikely(((req->dst_rect.w == 1) && + ((req->src_rect.w != 1) || + (req->dst_rect.h != req->src_rect.h))) || + ((req->dst_rect.h == 1) && ((req->src_rect.h != 1) || + (req->dst_rect.h != req->src_rect.h))))) { + pr_err("mpd_ppp: error scaling when size is 1!\n"); + return -EINVAL; + } + } + + /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */ + if (unlikely(req->src_rect.h == 0 || + req->src_rect.w == 0)) { + pr_info("mdp_ppp: src img of zero size!\n"); + return -EINVAL; + } + if (unlikely(req->dst_rect.h == 0 || + req->dst_rect.w == 0)) + return -EINVAL; + + return 0; +} + +int mdp_ppp_do_blit(struct mdp_info *mdp, struct mdp_blit_req *req, + struct file *src_file, unsigned long src_start, + unsigned long src_len, struct file *dst_file, + unsigned long dst_start, unsigned long dst_len) +{ + int ret; + + /* Workarounds for MDP 3.1 hardware bugs */ + if (unlikely((mdp_get_bytes_per_pixel(req->dst.format) == 4) && + (req->dst_rect.w != 1) && + (((req->dst_rect.w % 8) == 6) || + ((req->dst_rect.w % 32) == 3) || + ((req->dst_rect.w % 32) == 1)))) { + ret = blit_split_width(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + goto end; + } else if (unlikely((req->dst_rect.w != 1) && (req->dst_rect.h != 1) && + ((req->dst_rect.h % 32) == 3 || + (req->dst_rect.h % 32) == 1))) { + ret = blit_split_height(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); + goto end; + } + + ret = mdp_ppp_blit_and_wait(mdp, req, + src_file, src_start, src_len, + dst_file, dst_start, dst_len); +end: + return ret; +} diff --git a/drivers/video/msm/mdp_scale_tables.h b/drivers/video/msm/mdp_scale_tables.h deleted file mode 100644 index 34077b1af6039..0000000000000 --- a/drivers/video/msm/mdp_scale_tables.h +++ /dev/null @@ -1,38 +0,0 @@ -/* drivers/video/msm_fb/mdp_scale_tables.h - * - * Copyright (C) 2007 QUALCOMM Incorporated - * Copyright (C) 2007 Google Incorporated - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef _MDP_SCALE_TABLES_H_ -#define _MDP_SCALE_TABLES_H_ - -#include -struct mdp_table_entry { - uint32_t reg; - uint32_t val; -}; - -extern struct mdp_table_entry mdp_upscale_table[64]; - -enum { - MDP_DOWNSCALE_PT2TOPT4, - MDP_DOWNSCALE_PT4TOPT6, - MDP_DOWNSCALE_PT6TOPT8, - MDP_DOWNSCALE_PT8TO1, - MDP_DOWNSCALE_MAX, -}; - -extern struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX]; -extern struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX]; -extern struct mdp_table_entry mdp_gaussian_blur_table[]; - -#endif diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index debe5933fd2e6..c54ced3186178 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -17,11 +17,12 @@ #include #include #include -#include #include #include #include +#include +#include #include #include #include @@ -31,6 +32,24 @@ #include #include #include +#include +#include +#include "mdp_hw.h" + +extern void start_drawing_late_resume(struct early_suspend *h); +static void msmfb_resume_handler(struct early_suspend *h); +static void msmfb_resume(struct work_struct *work); + +#ifdef CONFIG_MSM_HDMI +void hdmi_DoBlit(int offset); +int hdmi_usePanelSync(void); +#endif + +#define MSMFB_DEBUG 1 +#ifdef CONFIG_FB_MSM_LOGO +#define INIT_IMAGE_FILE "/logo.rle" +extern int load_565rle_image(char *filename); +#endif #define PRINT_FPS 0 #define PRINT_BLIT_TIME 0 @@ -47,47 +66,70 @@ #define BLIT_TIME 0x4 #define SHOW_UPDATES 0x8 +#ifdef CONFIG_PANEL_SELF_REFRESH +extern struct panel_icm_info *panel_icm; +extern wait_queue_head_t panel_update_wait_queue; +#endif + #define DLOG(mask, fmt, args...) \ do { \ - if (msmfb_debug_mask & mask) \ + if ((msmfb_debug_mask | SUSPEND_RESUME) & mask) \ printk(KERN_INFO "msmfb: "fmt, ##args); \ } while (0) +#define BITS_PER_PIXEL(info) (info->fb->var.bits_per_pixel) +#define BYTES_PER_PIXEL(info) (info->fb->var.bits_per_pixel >> 3) static int msmfb_debug_mask; module_param_named(msmfb_debug_mask, msmfb_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); struct mdp_device *mdp; +static atomic_t mdpclk_on = ATOMIC_INIT(1); -struct msmfb_info { - struct fb_info *fb; - struct msm_panel_data *panel; - int xres; - int yres; - unsigned output_format; - unsigned yoffset; - unsigned frame_requested; - unsigned frame_done; - int sleeping; - unsigned update_frame; - struct { - int left; - int top; - int eright; /* exclusive */ - int ebottom; /* exclusive */ - } update_info; - char *black; - - spinlock_t update_lock; - struct mutex panel_init_lock; - wait_queue_head_t frame_wq; - struct workqueue_struct *resume_workqueue; - struct work_struct resume_work; - struct msmfb_callback dma_callback; - struct msmfb_callback vsync_callback; - struct hrtimer fake_vsync; - ktime_t vsync_request_time; +#ifdef CONFIG_FB_MSM_OVERLAY +#define USE_OVERLAY 1 +struct overlay_waitevent{ + uint32_t waked_up; + wait_queue_head_t event_wait; }; +static struct overlay_waitevent overlay_event; +DEFINE_MUTEX(overlay_event_lock); +#endif + +#if (defined(CONFIG_USB_FUNCTION_PROJECTOR) || defined(CONFIG_USB_ANDROID_PROJECTOR)) +static spinlock_t fb_data_lock = SPIN_LOCK_UNLOCKED; +static struct msm_fb_info msm_fb_data; +int msmfb_get_var(struct msm_fb_info *tmp) +{ + unsigned long flags; + spin_lock_irqsave(&fb_data_lock, flags); + memcpy(tmp, &msm_fb_data, sizeof(msm_fb_data)); + spin_unlock_irqrestore(&fb_data_lock, flags); + return 0; +} + +/* projector need this, and very much */ +int msmfb_get_fb_area(void) +{ + int area; + unsigned long flags; + spin_lock_irqsave(&fb_data_lock, flags); + area = msm_fb_data.msmfb_area; + spin_unlock_irqrestore(&fb_data_lock, flags); + return area; +} + +static void msmfb_set_var(unsigned char *addr, int area) +{ + unsigned long flags; + + spin_lock_irqsave(&fb_data_lock, flags); + msm_fb_data.fb_addr = addr; + msm_fb_data.msmfb_area = area; + spin_unlock_irqrestore(&fb_data_lock, flags); + +} +#endif static int msmfb_open(struct fb_info *info, int user) { @@ -102,17 +144,36 @@ static int msmfb_release(struct fb_info *info, int user) /* Called from dma interrupt handler, must not sleep */ static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback) { - unsigned long irq_flags; + unsigned long irq_flags=0; struct msmfb_info *msmfb = container_of(callback, struct msmfb_info, dma_callback); +#if PRINT_FPS + int64_t dt; + ktime_t now; + static int64_t frame_count; + static ktime_t last_sec; +#endif spin_lock_irqsave(&msmfb->update_lock, irq_flags); msmfb->frame_done = msmfb->frame_requested; + if (msmfb->sleeping == UPDATING && msmfb->frame_done == msmfb->update_frame) { DLOG(SUSPEND_RESUME, "full update completed\n"); queue_work(msmfb->resume_workqueue, &msmfb->resume_work); } +#if PRINT_FPS + now = ktime_get(); + dt = ktime_to_ns(ktime_sub(now, last_sec)); + frame_count++; + if (dt > NSEC_PER_SEC) { + int64_t fps = frame_count * NSEC_PER_SEC * 100; + frame_count = 0; + last_sec = ktime_get(); + do_div(fps, dt); + DLOG(FPS, "fps * 100: %llu\n", fps); + } +#endif spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); wake_up(&msmfb->frame_wq); } @@ -121,7 +182,7 @@ static int msmfb_start_dma(struct msmfb_info *msmfb) { uint32_t x, y, w, h; unsigned addr; - unsigned long irq_flags; + unsigned long irq_flags=0; uint32_t yoffset; s64 time_since_request; struct msm_panel_data *panel = msmfb->panel; @@ -162,9 +223,10 @@ static int msmfb_start_dma(struct msmfb_info *msmfb) } spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); - addr = ((msmfb->xres * (yoffset + y) + x) * 2); + addr = ((msmfb->xres * (yoffset + y) + x) * BYTES_PER_PIXEL(msmfb)); mdp->dma(mdp, addr + msmfb->fb->fix.smem_start, - msmfb->xres * 2, w, h, x, y, &msmfb->dma_callback, + msmfb->xres * BYTES_PER_PIXEL(msmfb), w, h, x, y, + &msmfb->dma_callback, panel->interface_type); return 0; error: @@ -181,6 +243,7 @@ static void msmfb_handle_vsync_interrupt(struct msmfb_callback *callback) { struct msmfb_info *msmfb = container_of(callback, struct msmfb_info, vsync_callback); + wake_unlock(&msmfb->idle_lock); msmfb_start_dma(msmfb); } @@ -198,12 +261,38 @@ static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, { struct msmfb_info *msmfb = info->par; struct msm_panel_data *panel = msmfb->panel; - unsigned long irq_flags; +#ifdef CONFIG_PANEL_SELF_REFRESH + struct mdp_lcdc_info *lcdc = panel_to_lcdc(panel); +#endif + unsigned long irq_flags=0; int sleeping; int retry = 1; +#if PRINT_FPS + ktime_t t1, t2; + static uint64_t pans; + static uint64_t dt; + t1 = ktime_get(); +#endif DLOG(SHOW_UPDATES, "update %d %d %d %d %d %d\n", left, top, eright, ebottom, yoffset, pan_display); + +#ifdef CONFIG_PANEL_SELF_REFRESH + if (lcdc->mdp->mdp_dev.overrides & MSM_MDP_RGB_PANEL_SELE_REFRESH) { + spin_lock_irqsave(&panel_icm->lock, irq_flags); + panel_icm->panel_update = 1; + spin_unlock_irqrestore(&panel_icm->lock, irq_flags); + wake_up(&panel_update_wait_queue); + } +#endif + +#if (defined(CONFIG_USB_FUNCTION_PROJECTOR) || defined(CONFIG_USB_ANDROID_PROJECTOR)) + /* Jay, 8/1/09' */ + msmfb_set_var(msmfb->fb->screen_base, yoffset); +#endif + if (msmfb->sleeping != AWAKE) + DLOG(SUSPEND_RESUME, "pan_update in state(%d)\n", msmfb->sleeping); + restart: spin_lock_irqsave(&msmfb->update_lock, irq_flags); @@ -231,6 +320,7 @@ static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, msmfb->sleeping == UPDATING)) { if (retry && panel->request_vsync && (sleeping == AWAKE)) { + wake_lock_timeout(&msmfb->idle_lock, HZ/4); panel->request_vsync(panel, &msmfb->vsync_callback); retry = 0; @@ -247,6 +337,21 @@ static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, goto restart; } +#if PRINT_FPS + t2 = ktime_get(); + if (pan_display) { + uint64_t temp = ktime_to_ns(ktime_sub(t2, t1)); + do_div(temp, 1000); + dt += temp; + pans++; + if (pans > 1000) { + do_div(dt, pans); + DLOG(FPS, "ave_wait_time: %lld\n", dt); + dt = 0; + pans = 0; + } + } +#endif msmfb->frame_requested++; /* if necessary, update the y offset, if this is the @@ -278,18 +383,35 @@ static void msmfb_pan_update(struct fb_info *info, uint32_t left, uint32_t top, msmfb->yoffset); spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); - /* if the panel is all the way on wait for vsync, otherwise sleep - * for 16 ms (long enough for the dma to panel) and then begin dma */ - msmfb->vsync_request_time = ktime_get(); - if (panel->request_vsync && (sleeping == AWAKE)) { - panel->request_vsync(panel, &msmfb->vsync_callback); - } else { - if (!hrtimer_active(&msmfb->fake_vsync)) { - hrtimer_start(&msmfb->fake_vsync, - ktime_set(0, NSEC_PER_SEC/60), - HRTIMER_MODE_REL); - } - } +#ifdef CONFIG_MSM_HDMI + if (!hdmi_usePanelSync()) + { + msmfb->vsync_request_time = ktime_get(); + msmfb_start_dma(msmfb); + } + else + { +#endif + /* if the panel is all the way on wait for vsync, otherwise sleep + * for 16 ms (long enough for the dma to panel) and then begin dma */ + msmfb->vsync_request_time = ktime_get(); + if (panel->request_vsync && (sleeping == AWAKE)) { + wake_lock_timeout(&msmfb->idle_lock, HZ/4); + panel->request_vsync(panel, &msmfb->vsync_callback); + } else { + if (!hrtimer_active(&msmfb->fake_vsync)) { + hrtimer_start(&msmfb->fake_vsync, + ktime_set(0, NSEC_PER_SEC/60), + HRTIMER_MODE_REL); + } + } +#ifdef CONFIG_MSM_HDMI + } + + /* We did the DMA, now blit the data to the other display */ + hdmi_DoBlit(msmfb->xres * msmfb->yoffset * BYTES_PER_PIXEL(msmfb)); +#endif + return; } static void msmfb_update(struct fb_info *info, uint32_t left, uint32_t top, @@ -303,16 +425,17 @@ static void power_on_panel(struct work_struct *work) struct msmfb_info *msmfb = container_of(work, struct msmfb_info, resume_work); struct msm_panel_data *panel = msmfb->panel; - unsigned long irq_flags; - + unsigned long irq_flags=0; mutex_lock(&msmfb->panel_init_lock); DLOG(SUSPEND_RESUME, "turning on panel\n"); if (msmfb->sleeping == UPDATING) { + wake_lock_timeout(&msmfb->idle_lock, HZ); if (panel->unblank(panel)) { printk(KERN_INFO "msmfb: panel unblank failed," "not starting drawing\n"); goto error; } + wake_unlock(&msmfb->idle_lock); spin_lock_irqsave(&msmfb->update_lock, irq_flags); msmfb->sleeping = AWAKE; wake_up(&msmfb->frame_wq); @@ -322,17 +445,218 @@ static void power_on_panel(struct work_struct *work) mutex_unlock(&msmfb->panel_init_lock); } +static BLOCKING_NOTIFIER_HEAD(display_chain_head); +int register_display_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&display_chain_head, nb); +} +static int display_notifier_callback(struct notifier_block *nfb, + unsigned long action, + void *ignored) +{ + //struct msmfb_info *msm_fb = (struct msmfb_info *)ignored; + + switch (action) { + case NOTIFY_MSM_FB: + printk(KERN_DEBUG "NOTIFY_MSM_FB\n"); + //msmfb_resume(&msm_fb->early_suspend); + break; + case NOTIFY_POWER: + /* nothing to do */ + break; + default: + printk(KERN_ERR "%s: unknown action in 0x%lx\n", + __func__, action); + return NOTIFY_BAD; + } + return NOTIFY_OK; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +/* turn off the panel */ +static void msmfb_earlier_suspend(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + earlier_suspend); + struct msm_panel_data *panel = msmfb->panel; + unsigned long irq_flags=0; + + mutex_lock(&msmfb->panel_init_lock); + msmfb->sleeping = SLEEPING; + wake_up(&msmfb->frame_wq); + spin_lock_irqsave(&msmfb->update_lock, irq_flags); + spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); + wait_event_timeout(msmfb->frame_wq, + msmfb->frame_requested == msmfb->frame_done, HZ/10); +#ifndef CONFIG_MSM_MDP40 + mdp->dma(mdp, virt_to_phys(msmfb->black), 0, + msmfb->fb->var.xres, msmfb->fb->var.yres, 0, 0, + NULL, panel->interface_type); + mdp->dma_wait(mdp, panel->interface_type); +#endif + /* turn off the panel */ + panel->blank(panel); +} + +static void msmfb_suspend(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + early_suspend); + struct msm_panel_data *panel = msmfb->panel; + /* suspend the panel */ +#ifdef CONFIG_FB_MSM_OVERLAY + /*check whether overlay done*/ + wait_event_interruptible_timeout( + overlay_event.event_wait, + (overlay_event.waked_up == ~USE_OVERLAY), + 10*HZ); + /*wait until USE_OVERLAY flag is off and set mdpclk_on as off*/ + atomic_set(&mdpclk_on, 0); + pr_info("wait event : %X\n", overlay_event.waked_up); +#endif + panel->suspend(panel); + msmfb->fb_resumed = 0; + mutex_unlock(&msmfb->panel_init_lock); +} + +static void msmfb_resume_handler(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + early_suspend); +#ifdef CONFIG_HTC_ONMODE_CHARGING + if (msmfb->fb_resumed == 1) { + DLOG(SUSPEND_RESUME, "fb is resumed by onchg. skip resume\n"); + return; + } +#endif + queue_work(msmfb->resume_workqueue, &msmfb->msmfb_resume_work); + wait_event_interruptible_timeout(msmfb->frame_wq, msmfb->fb_resumed==1,HZ/2); +} + +#ifdef CONFIG_HTC_ONMODE_CHARGING +static void msmfb_onchg_earlier_suspend(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + onchg_earlier_suspend); + + struct msm_panel_data *panel = msmfb->panel; + unsigned long irq_flags=0; + + mutex_lock(&msmfb->panel_init_lock); + msmfb->sleeping = SLEEPING; + wake_up(&msmfb->frame_wq); + spin_lock_irqsave(&msmfb->update_lock, irq_flags); + spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); + +#ifndef CONFIG_MSM_MDP40 + mdp->dma(mdp, virt_to_phys(msmfb->black), 0, + msmfb->fb->var.xres, msmfb->fb->var.yres, 0, 0, + NULL, panel->interface_type); + mdp->dma_wait(mdp, panel->interface_type); +#endif + wait_event_timeout(msmfb->frame_wq, + msmfb->frame_requested == msmfb->frame_done, HZ/10); + + /* turn off the panel */ + panel->blank(panel); +} + +static void msmfb_onchg_suspend(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + onchg_suspend); + struct msm_panel_data *panel = msmfb->panel; + /* suspend the panel */ +#ifdef CONFIG_FB_MSM_OVERLAY + /*check whether overlay done*/ + wait_event_interruptible_timeout( + overlay_event.event_wait, + (overlay_event.waked_up == ~USE_OVERLAY), + 10*HZ); + /*wait until USE_OVERLAY flag is off and set mdpclk_on as off*/ + atomic_set(&mdpclk_on, 0); + pr_info("wait event : %X\n", overlay_event.waked_up); +#endif + panel->suspend(panel); + msmfb->fb_resumed = 0; + mutex_unlock(&msmfb->panel_init_lock); +} + +static void msmfb_onchg_resume_handler(struct early_suspend *h) +{ + struct msmfb_info *msmfb = container_of(h, struct msmfb_info, + onchg_suspend); + queue_work(msmfb->resume_workqueue, &msmfb->msmfb_resume_work); + wait_event_interruptible_timeout(msmfb->frame_wq, msmfb->fb_resumed == 1, HZ/2); +} +#endif + +static void msmfb_resume(struct work_struct *work) +{ + struct msmfb_info *msmfb = + container_of(work, struct msmfb_info, msmfb_resume_work); + struct msm_panel_data *panel = msmfb->panel; + unsigned long irq_flags=0; + + if (panel->resume(panel)) { + printk(KERN_INFO "msmfb: panel resume failed, not resuming " + "fb\n"); + return; + } + spin_lock_irqsave(&msmfb->update_lock, irq_flags); + msmfb->frame_requested = msmfb->frame_done = msmfb->update_frame = 0; + msmfb->sleeping = WAKING; + DLOG(SUSPEND_RESUME, "ready, waiting for full update\n"); + spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); + msmfb->fb_resumed = 1; + wake_up(&msmfb->frame_wq); + + atomic_set(&mdpclk_on, 1); +} +#endif static int msmfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { + uint32_t size; + if ((var->xres != info->var.xres) || (var->yres != info->var.yres) || - (var->xres_virtual != info->var.xres_virtual) || - (var->yres_virtual != info->var.yres_virtual) || (var->xoffset != info->var.xoffset) || - (var->bits_per_pixel != info->var.bits_per_pixel) || + (mdp->check_output_format(mdp, var->bits_per_pixel)) || (var->grayscale != info->var.grayscale)) return -EINVAL; + + size = var->xres_virtual * var->yres_virtual * + (var->bits_per_pixel >> 3); + if (size > info->fix.smem_len) + return -EINVAL; + return 0; +} + +static int msmfb_set_par(struct fb_info *info) +{ + struct fb_var_screeninfo *var = &info->var; + struct fb_fix_screeninfo *fix = &info->fix; + + /* we only support RGB ordering for now */ + if (var->bits_per_pixel == 32 || var->bits_per_pixel == 24) { + var->red.offset = 0; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 16; + var->blue.length = 8; + } else if (var->bits_per_pixel == 16) { + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + } else + return -1; + mdp->set_output_format(mdp, var->bits_per_pixel); + fix->line_length = var->xres * var->bits_per_pixel / 8; return 0; } @@ -344,6 +668,13 @@ int msmfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) /* "UPDT" */ if ((panel->caps & MSMFB_CAP_PARTIAL_UPDATES) && (var->reserved[0] == 0x54445055)) { +#if 0 + printk(KERN_INFO "pan frame %d-%d, rect %d %d %d %d\n", + msmfb->frame_requested, msmfb->frame_done, + var->reserved[1] & 0xffff, + var->reserved[1] >> 16, var->reserved[2] & 0xffff, + var->reserved[2] >> 16); +#endif msmfb_pan_update(info, var->reserved[1] & 0xffff, var->reserved[1] >> 16, var->reserved[2] & 0xffff, @@ -399,6 +730,92 @@ static int msmfb_blit(struct fb_info *info, } return 0; } +#ifdef CONFIG_FB_MSM_OVERLAY +static int msmfb_overlay_get(struct fb_info *info, void __user *p) +{ + struct mdp_overlay req; + int ret; + + if (copy_from_user(&req, p, sizeof(req))) + return -EFAULT; + + ret = mdp->overlay_get(mdp, info, &req); + + if (ret) { + printk(KERN_ERR "%s: ioctl failed \n", + __func__); + return ret; + } + if (copy_to_user(p, &req, sizeof(req))) { + printk(KERN_ERR "%s: copy2user failed \n", + __func__); + return -EFAULT; + } + + return 0; +} + +static int msmfb_overlay_set(struct fb_info *info, void __user *p) +{ + struct mdp_overlay req; + int ret; + + if (copy_from_user(&req, p, sizeof(req))) + return -EFAULT; + + printk(KERN_INFO "%s(%d) dst rect info w=%d h=%d x=%d y=%d rotator=%d\n", __func__, __LINE__, req.dst_rect.w, req.dst_rect.h, req.dst_rect.x, req.dst_rect.y, req.user_data[0]); + ret = mdp->overlay_set(mdp, info, &req); + if (ret) { + printk(KERN_ERR "%s:ioctl failed \n", + __func__); + return ret; + } + + if (copy_to_user(p, &req, sizeof(req))) { + printk(KERN_ERR "%s: copy2user failed \n", + __func__); + return -EFAULT; + } + + return 0; +} + +static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp) +{ + int ret, ndx; + + ret = copy_from_user(&ndx, argp, sizeof(ndx)); + if (ret) { + printk(KERN_ERR "%s:msmfb_overlay_unset ioctl failed \n", + __func__); + return ret; + } + + return mdp->overlay_unset(mdp, info, ndx); +} + +static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp) +{ + int ret; + struct msmfb_overlay_data req; + struct file *p_src_file = 0; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + printk(KERN_ERR "%s:msmfb_overlay_play ioctl failed \n", + __func__); + return ret; + } + + ret = mdp->overlay_play(mdp, info, &req, &p_src_file); + + if (p_src_file) + put_pmem_file(p_src_file); + + return ret; +} + +#endif DEFINE_MUTEX(mdp_ppp_lock); @@ -406,22 +823,65 @@ DEFINE_MUTEX(mdp_ppp_lock); static int msmfb_ioctl(struct fb_info *p, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; - int ret; + int ret = 0; +#if PRINT_BLIT_TIME + ktime_t t1, t2; +#endif switch (cmd) { case MSMFB_GRP_DISP: mdp->set_grp_disp(mdp, arg); break; case MSMFB_BLIT: +#if PRINT_BLIT_TIME + t1 = ktime_get(); +#endif ret = msmfb_blit(p, argp); if (ret) return ret; +#if PRINT_BLIT_TIME + t2 = ktime_get(); + DLOG(BLIT_TIME, "total %lld\n", + ktime_to_ns(t2) - ktime_to_ns(t1)); +#endif + break; +#ifdef CONFIG_FB_MSM_OVERLAY + case MSMFB_OVERLAY_GET: + ret = msmfb_overlay_get(p, argp); + break; + case MSMFB_OVERLAY_SET: + if(!atomic_read(&mdpclk_on)) { + printk(KERN_ERR "MSMFB_OVERLAY_SET during suspend\n"); + ret = -EINVAL; + } else { + mutex_lock(&overlay_event_lock); + overlay_event.waked_up = USE_OVERLAY; + mutex_unlock(&overlay_event_lock); + ret = msmfb_overlay_set(p, argp); + } + printk(KERN_INFO "MSMFB_OVERLAY_SET ret=%d\n", ret); + break; + case MSMFB_OVERLAY_UNSET: + ret = msmfb_overlay_unset(p, argp); + mutex_lock(&overlay_event_lock); + overlay_event.waked_up = ~USE_OVERLAY; + wake_up(&overlay_event.event_wait); + mutex_unlock(&overlay_event_lock); + printk(KERN_INFO "MSMFB_OVERLAY_UNSET ret=%d\n", ret); break; + case MSMFB_OVERLAY_PLAY: + if(!atomic_read(&mdpclk_on)) { + printk(KERN_ERR "MSMFB_OVERLAY_PLAY during suspend\n"); + ret = -EINVAL; + } else + ret = msmfb_overlay_play(p, argp); + break; +#endif default: printk(KERN_INFO "msmfb unknown ioctl: %d\n", cmd); return -EINVAL; } - return 0; + return ret; } static struct fb_ops msmfb_ops = { @@ -429,6 +889,7 @@ static struct fb_ops msmfb_ops = { .fb_open = msmfb_open, .fb_release = msmfb_release, .fb_check_var = msmfb_check_var, + .fb_set_par = msmfb_set_par, .fb_pan_display = msmfb_pan_display, .fb_fillrect = msmfb_fillrect, .fb_copyarea = msmfb_copyarea, @@ -439,8 +900,46 @@ static struct fb_ops msmfb_ops = { static unsigned PP[16]; +#if MSMFB_DEBUG +static ssize_t debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + + +static ssize_t debug_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct msmfb_info *msmfb = (struct msmfb_info *)file->private_data; + unsigned long irq_flags=0; -#define BITS_PER_PIXEL 16 + spin_lock_irqsave(&msmfb->update_lock, irq_flags); + n = scnprintf(buffer, debug_bufmax, "yoffset %d\n", msmfb->yoffset); + n += scnprintf(buffer + n, debug_bufmax, "frame_requested %d\n", + msmfb->frame_requested); + n += scnprintf(buffer + n, debug_bufmax, "frame_done %d\n", + msmfb->frame_done); + n += scnprintf(buffer + n, debug_bufmax, "sleeping %d\n", + msmfb->sleeping); + n += scnprintf(buffer + n, debug_bufmax, "update_frame %d\n", + msmfb->update_frame); + spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static struct file_operations debug_fops = { + .read = debug_read, + .open = debug_open, +}; +#endif + +#define BITS_PER_PIXEL_DEF 32 static void setup_fb_info(struct msmfb_info *msmfb) { @@ -448,7 +947,7 @@ static void setup_fb_info(struct msmfb_info *msmfb) int r; /* finish setting up the fb_info struct */ - strncpy(fb_info->fix.id, "msmfb", 16); + strncpy(fb_info->fix.id, "msmfb31_0", 16); fb_info->fix.ypanstep = 1; fb_info->fbops = &msmfb_ops; @@ -464,12 +963,24 @@ static void setup_fb_info(struct msmfb_info *msmfb) fb_info->var.height = msmfb->panel->fb_data->height; fb_info->var.xres_virtual = msmfb->xres; fb_info->var.yres_virtual = msmfb->yres * 2; - fb_info->var.bits_per_pixel = BITS_PER_PIXEL; + fb_info->var.bits_per_pixel = BITS_PER_PIXEL_DEF; fb_info->var.accel_flags = 0; + fb_info->var.reserved[3] = 60; fb_info->var.yoffset = 0; if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) { + /* set the param in the fixed screen, so userspace can't + * change it. This will be used to check for the + * capability. */ + + /* FIX ME: every panel support partial update? + fb_info->fix.reserved[0] = 0x5444; + fb_info->fix.reserved[1] = 0x5055; + */ + + /* This preloads the value so that if userspace doesn't + * change it, it will be a full update */ fb_info->var.reserved[0] = 0x54445055; fb_info->var.reserved[1] = 0; fb_info->var.reserved[2] = (uint16_t)msmfb->xres | @@ -486,12 +997,23 @@ static void setup_fb_info(struct msmfb_info *msmfb) fb_info->var.blue.length = 5; fb_info->var.blue.msb_right = 0; + mdp->set_output_format(mdp, fb_info->var.bits_per_pixel); + mdp->set_panel_size(mdp, msmfb->xres, msmfb->yres); + r = fb_alloc_cmap(&fb_info->cmap, 16, 0); fb_info->pseudo_palette = PP; PP[0] = 0; for (r = 1; r < 16; r++) PP[r] = 0xffffffff; + + /* Jay add, 7/1/09' */ +#if (defined(CONFIG_USB_FUNCTION_PROJECTOR) || defined(CONFIG_USB_ANDROID_PROJECTOR)) + msm_fb_data.xres = msmfb->xres; + msm_fb_data.yres = msmfb->yres; + printk(KERN_INFO "setup_fb_info msmfb->xres %d, msmfb->yres %d\n", + msmfb->xres,msmfb->yres); +#endif } static int setup_fbmem(struct msmfb_info *msmfb, struct platform_device *pdev) @@ -499,28 +1021,30 @@ static int setup_fbmem(struct msmfb_info *msmfb, struct platform_device *pdev) struct fb_info *fb = msmfb->fb; struct resource *resource; unsigned long size = msmfb->xres * msmfb->yres * - (BITS_PER_PIXEL >> 3) * 2; + BYTES_PER_PIXEL(msmfb) * 2; + unsigned long resource_size; unsigned char *fbram; /* board file might have attached a resource describing an fb */ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) return -EINVAL; + resource_size = resource->end - resource->start + 1; /* check the resource is large enough to fit the fb */ - if (resource->end - resource->start < size) { - printk(KERN_ERR "allocated resource is too small for " + if (resource_size < size) { + printk(KERN_ERR "msmfb: allocated resource is too small for " "fb\n"); return -ENOMEM; } fb->fix.smem_start = resource->start; - fb->fix.smem_len = resource->end - resource->start; - fbram = ioremap(resource->start, - resource->end - resource->start); + fb->fix.smem_len = resource_size; + fbram = ioremap(resource->start, resource_size); if (fbram == 0) { printk(KERN_ERR "msmfb: cannot allocate fbram!\n"); return -ENOMEM; } + fb->screen_base = fbram; return 0; } @@ -550,10 +1074,19 @@ static int msmfb_probe(struct platform_device *pdev) msmfb->xres = panel->fb_data->xres; msmfb->yres = panel->fb_data->yres; +#ifdef CONFIG_ION_MSM + msmfb->iclient = msm_ion_client_create(-1, pdev->name); +#endif + ret = setup_fbmem(msmfb, pdev); if (ret) goto error_setup_fbmem; +#if (defined(CONFIG_USB_FUNCTION_PROJECTOR) || defined(CONFIG_USB_ANDROID_PROJECTOR)) + /* Jay, 8/1/09' */ + msmfb_set_var(msmfb->fb->screen_base, 0); +#endif + setup_fb_info(msmfb); spin_lock_init(&msmfb->update_lock); @@ -569,6 +1102,35 @@ static int msmfb_probe(struct platform_device *pdev) msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres, GFP_KERNEL); + wake_lock_init(&msmfb->idle_lock, WAKE_LOCK_IDLE, "msmfb_idle_lock"); + +#ifdef CONFIG_HAS_EARLYSUSPEND + INIT_WORK(&msmfb->msmfb_resume_work, msmfb_resume); + msmfb->early_suspend.suspend = msmfb_suspend; + msmfb->early_suspend.resume = msmfb_resume_handler; + msmfb->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + register_early_suspend(&msmfb->early_suspend); + + msmfb->earlier_suspend.suspend = msmfb_earlier_suspend; + msmfb->earlier_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN; + register_early_suspend(&msmfb->earlier_suspend); +#ifdef CONFIG_HTC_ONMODE_CHARGING + msmfb->onchg_suspend.suspend = msmfb_onchg_suspend; + msmfb->onchg_suspend.resume = msmfb_onchg_resume_handler; + msmfb->onchg_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + register_onchg_suspend(&msmfb->onchg_suspend); + + msmfb->onchg_earlier_suspend.suspend = msmfb_onchg_earlier_suspend; + msmfb->onchg_earlier_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN; + register_onchg_suspend(&msmfb->onchg_earlier_suspend); +#endif +#endif + +#if MSMFB_DEBUG + debugfs_create_file("msm_fb", S_IFREG | S_IRUGO, NULL, + (void *)fb->par, &debug_fops); +#endif + printk(KERN_INFO "msmfb_probe() installing %d x %d panel\n", msmfb->xres, msmfb->yres); @@ -577,7 +1139,6 @@ static int msmfb_probe(struct platform_device *pdev) hrtimer_init(&msmfb->fake_vsync, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - msmfb->fake_vsync.function = msmfb_fake_vsync; ret = register_framebuffer(fb); @@ -586,9 +1147,30 @@ static int msmfb_probe(struct platform_device *pdev) msmfb->sleeping = WAKING; +#ifdef CONFIG_FB_MSM_OVERLAY + /*init wait event*/ + init_waitqueue_head(&overlay_event.event_wait); + /*init waked_up value*/ + overlay_event.waked_up = ~USE_OVERLAY; +#endif + +#ifdef CONFIG_FB_MSM_LOGO + if (!load_565rle_image(INIT_IMAGE_FILE)) { + /* Flip buffer */ + msmfb->update_info.left = 0; + msmfb->update_info.top = 0; + msmfb->update_info.eright = info->var.xres; + msmfb->update_info.ebottom = info->var.yres; + msmfb_pan_update(info, 0, 0, fb->var.xres, + fb->var.yres, 0, 1); + } +#endif + /* Jay, 29/12/08' */ + display_notifier(display_notifier_callback, NOTIFY_MSM_FB); return 0; error_register_framebuffer: + wake_lock_destroy(&msmfb->idle_lock); destroy_workqueue(msmfb->resume_workqueue); error_create_workqueue: iounmap(fb->screen_base); @@ -597,9 +1179,35 @@ static int msmfb_probe(struct platform_device *pdev) return ret; } +static void msmfb_shutdown(struct platform_device *pdev) +{ + struct msm_panel_data *panel = pdev->dev.platform_data; + struct fb_info *fb; + struct msmfb_info *msmfb; + + printk(KERN_INFO "%s\n", __func__); + fb = registered_fb[0]; + if (!fb) { + printk(KERN_ERR "fb0 unavailable.\n"); + return; + } + msmfb = fb->par; + + mdp->dma(mdp, virt_to_phys(msmfb->black), 0, + msmfb->fb->var.xres, msmfb->fb->var.yres, 0, 0, + NULL, panel->interface_type); + + if (panel->blank) + panel->blank(panel); + + if (panel->shutdown) + panel->shutdown(panel); +} + static struct platform_driver msm_panel_driver = { /* need to write remove */ .probe = msmfb_probe, + .shutdown = msmfb_shutdown, .driver = {.name = "msm_panel"}, }; diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h index d66f963e930e3..137996dc547e9 100644 --- a/drivers/video/via/viafbdev.h +++ b/drivers/video/via/viafbdev.h @@ -94,9 +94,6 @@ extern int viafb_LCD_ON; extern int viafb_DVI_ON; extern int viafb_hotplug; -extern int strict_strtoul(const char *cp, unsigned int base, - unsigned long *res); - u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info, u8 index); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cc2f73e03475b..b0043fb26a4d5 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); + vq->vring.avail->idx--; END_USE(vq); return buf; } diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index e5f74416d4b76..62a3702c10a71 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c @@ -84,7 +84,9 @@ static const u8 ds2482_chan_rd[8] = static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id); static int ds2482_remove(struct i2c_client *client); - +static int ds2482_suspend(struct i2c_client *client, + pm_message_t mesg); +static int ds2482_resume(struct i2c_client *client); /** * Driver data (common to all clients) @@ -101,6 +103,8 @@ static struct i2c_driver ds2482_driver = { }, .probe = ds2482_probe, .remove = ds2482_remove, + .suspend = ds2482_suspend, + .resume = ds2482_resume, .id_table = ds2482_id, }; @@ -130,6 +134,52 @@ struct ds2482_data { u8 reg_config; }; +static int ds2482_write_byte(struct ds2482_data *pdev, u8 cmd) +{ + int ret; + int retry = 5; + + do { + ret = i2c_smbus_write_byte(pdev->client, cmd); + if (ret >= 0) + break; + dev_warn(&pdev->client->dev, + "i2c write %x failed, %d, retries left %d\n", + cmd, ret, retry); + } while(retry--); + return ret; +} + +static int ds2482_write_byte_data(struct ds2482_data *pdev, u8 cmd, u8 byte) +{ + int ret; + int retry = 5; + + do { + ret = i2c_smbus_write_byte_data(pdev->client, cmd, byte); + if (ret >= 0) + break; + dev_warn(&pdev->client->dev, + "i2c write %x %x failed, %d, retries left %d\n", + cmd, byte, ret, retry); + } while(retry--); + return ret; +} + +static int ds2482_read_byte(struct ds2482_data *pdev) +{ + int ret; + int retry = 5; + + do { + ret = i2c_smbus_read_byte(pdev->client); + if (ret >= 0) + break; + dev_warn(&pdev->client->dev, + "i2c read failed, %d, retries left %d\n", ret, retry); + } while(retry--); + return ret; +} /** * Sets the read pointer. @@ -140,8 +190,7 @@ struct ds2482_data { static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) { if (pdev->read_prt != read_ptr) { - if (i2c_smbus_write_byte_data(pdev->client, - DS2482_CMD_SET_READ_PTR, + if (ds2482_write_byte_data(pdev, DS2482_CMD_SET_READ_PTR, read_ptr) < 0) return -1; @@ -160,7 +209,7 @@ static inline int ds2482_select_register(struct ds2482_data *pdev, u8 read_ptr) */ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) { - if (i2c_smbus_write_byte(pdev->client, cmd) < 0) + if (ds2482_write_byte(pdev, cmd) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_STATUS; @@ -180,7 +229,7 @@ static inline int ds2482_send_cmd(struct ds2482_data *pdev, u8 cmd) static inline int ds2482_send_cmd_data(struct ds2482_data *pdev, u8 cmd, u8 byte) { - if (i2c_smbus_write_byte_data(pdev->client, cmd, byte) < 0) + if (ds2482_write_byte_data(pdev, cmd, byte) < 0) return -1; /* all cmds leave in STATUS, except CONFIG */ @@ -231,13 +280,13 @@ static int ds2482_wait_1wire_idle(struct ds2482_data *pdev) */ static int ds2482_set_channel(struct ds2482_data *pdev, u8 channel) { - if (i2c_smbus_write_byte_data(pdev->client, DS2482_CMD_CHANNEL_SELECT, + if (ds2482_write_byte_data(pdev, DS2482_CMD_CHANNEL_SELECT, ds2482_chan_wr[channel]) < 0) return -1; pdev->read_prt = DS2482_PTR_CODE_CHANNEL; pdev->channel = -1; - if (i2c_smbus_read_byte(pdev->client) == ds2482_chan_rd[channel]) { + if (ds2482_read_byte(pdev) == ds2482_chan_rd[channel]) { pdev->channel = channel; return 0; } @@ -361,7 +410,7 @@ static u8 ds2482_w1_read_byte(void *data) ds2482_select_register(pdev, DS2482_PTR_CODE_DATA); /* Read the data byte */ - result = i2c_smbus_read_byte(pdev->client); + result = ds2482_read_byte(pdev); mutex_unlock(&pdev->access_lock); @@ -408,6 +457,31 @@ static u8 ds2482_w1_reset_bus(void *data) } +static int ds2482_suspend(struct i2c_client *client, + pm_message_t mesg) +{ + void (*set_slp_n)(int n) = + client->dev.platform_data; + + if (set_slp_n) + set_slp_n(0); + + return 0; +} + +static int ds2482_resume(struct i2c_client *client) +{ + void (*set_slp_n)(int n) = + client->dev.platform_data; + + if (set_slp_n) { + set_slp_n(1); + udelay(100); + } + + return 0; +} + static int ds2482_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -439,7 +513,7 @@ static int ds2482_probe(struct i2c_client *client, ndelay(525); /* Read the status byte - only reset bit and line should be set */ - temp1 = i2c_smbus_read_byte(client); + temp1 = ds2482_read_byte(data); if (temp1 != (DS2482_REG_STS_LL | DS2482_REG_STS_RST)) { dev_warn(&client->dev, "DS2482 reset status " "0x%02X - not a DS2482\n", temp1); diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index f3b636d7cafe7..4c425c07db7f8 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -36,6 +36,7 @@ #define W1_THERM_DS18B20 0x28 #define W1_EEPROM_DS2431 0x2D #define W1_FAMILY_DS2760 0x30 +#define W1_FAMILY_DS2784 0x32 #define MAXNAMELEN 32 diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 596ba604e78d1..51b5551b4e3fe 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -202,7 +202,6 @@ static struct miscdevice davinci_wdt_miscdev = { static int __devinit davinci_wdt_probe(struct platform_device *pdev) { int ret = 0, size; - struct resource *res; struct device *dev = &pdev->dev; wdt_clk = clk_get(dev, NULL); @@ -216,31 +215,31 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev) dev_info(dev, "heartbeat %d sec\n", heartbeat); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (wdt_mem == NULL) { dev_err(dev, "failed to get memory region resource\n"); return -ENOENT; } - size = resource_size(res); - wdt_mem = request_mem_region(res->start, size, pdev->name); - - if (wdt_mem == NULL) { + size = resource_size(wdt_mem); + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { dev_err(dev, "failed to get memory region\n"); return -ENOENT; } - wdt_base = ioremap(res->start, size); + wdt_base = ioremap(wdt_mem->start, size); if (!wdt_base) { dev_err(dev, "failed to map memory region\n"); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; return -ENOMEM; } ret = misc_register(&davinci_wdt_miscdev); if (ret < 0) { dev_err(dev, "cannot register misc device\n"); - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; } else { set_bit(WDT_DEVICE_INITED, &wdt_status); } @@ -253,8 +252,7 @@ static int __devexit davinci_wdt_remove(struct platform_device *pdev) { misc_deregister(&davinci_wdt_miscdev); if (wdt_mem) { - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); wdt_mem = NULL; } diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 3053ff05ca410..1fe9bc5a96517 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -270,7 +270,6 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) { int ret = 0; int size; - struct resource *res; struct device *dev = &pdev->dev; struct max63xx_timeout *table; @@ -294,21 +293,19 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) max63xx_pdev = pdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (wdt_mem == NULL) { dev_err(dev, "failed to get memory region resource\n"); return -ENOENT; } - size = resource_size(res); - wdt_mem = request_mem_region(res->start, size, pdev->name); - - if (wdt_mem == NULL) { + size = resource_size(wdt_mem); + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { dev_err(dev, "failed to get memory region\n"); return -ENOENT; } - wdt_base = ioremap(res->start, size); + wdt_base = ioremap(wdt_mem->start, size); if (!wdt_base) { dev_err(dev, "failed to map memory region\n"); ret = -ENOMEM; @@ -326,8 +323,8 @@ static int __devinit max63xx_wdt_probe(struct platform_device *pdev) out_unmap: iounmap(wdt_base); out_request: - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; return ret; } @@ -336,8 +333,7 @@ static int __devexit max63xx_wdt_remove(struct platform_device *pdev) { misc_deregister(&max63xx_wdt_miscdev); if (wdt_mem) { - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); wdt_mem = NULL; } diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index bf5b97c546eb4..8c8c7d54497c4 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -254,7 +254,6 @@ static struct miscdevice pnx4008_wdt_miscdev = { static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) { int ret = 0, size; - struct resource *res; if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; @@ -262,42 +261,42 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) printk(KERN_INFO MODULE_NAME "PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (wdt_mem == NULL) { printk(KERN_INFO MODULE_NAME "failed to get memory region resouce\n"); return -ENOENT; } - size = resource_size(res); - wdt_mem = request_mem_region(res->start, size, pdev->name); + size = resource_size(wdt_mem); - if (wdt_mem == NULL) { + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { printk(KERN_INFO MODULE_NAME "failed to get memory region\n"); return -ENOENT; } - wdt_base = (void __iomem *)IO_ADDRESS(res->start); + wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start); wdt_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(wdt_clk)) { ret = PTR_ERR(wdt_clk); - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; goto out; } ret = clk_enable(wdt_clk); if (ret) { - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; + clk_put(wdt_clk); goto out; } ret = misc_register(&pnx4008_wdt_miscdev); if (ret < 0) { printk(KERN_ERR MODULE_NAME "cannot register misc device\n"); - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; clk_disable(wdt_clk); clk_put(wdt_clk); } else { @@ -320,8 +319,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) clk_put(wdt_clk); if (wdt_mem) { - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); wdt_mem = NULL; } return 0; diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index ae53662c29bce..8303c576c57e0 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -402,7 +402,6 @@ static inline void s3c2410wdt_cpufreq_deregister(void) static int __devinit s3c2410wdt_probe(struct platform_device *pdev) { - struct resource *res; struct device *dev; unsigned int wtcon; int started = 0; @@ -416,20 +415,19 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) /* get the memory region for the watchdog timer */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (wdt_mem == NULL) { dev_err(dev, "no memory resource specified\n"); return -ENOENT; } - size = resource_size(res); - wdt_mem = request_mem_region(res->start, size, pdev->name); - if (wdt_mem == NULL) { + size = resource_size(wdt_mem); + if (!request_mem_region(wdt_mem->start, size, pdev->name)) { dev_err(dev, "failed to get memory region\n"); return -EBUSY; } - wdt_base = ioremap(res->start, size); + wdt_base = ioremap(wdt_mem->start, size); if (wdt_base == NULL) { dev_err(dev, "failed to ioremap() region\n"); ret = -EINVAL; @@ -524,8 +522,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) iounmap(wdt_base); err_req: - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, size); + wdt_mem = NULL; return ret; } @@ -545,8 +543,7 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev) iounmap(wdt_base); - release_resource(wdt_mem); - kfree(wdt_mem); + release_mem_region(wdt_mem->start, resource_size(wdt_mem)); wdt_mem = NULL; return 0; } diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 808372883e88e..c7ea4bedfe63e 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -42,6 +42,7 @@ #define PFX TCO_MODULE_NAME ": " /* internal variables */ +static u32 tcobase_phys; static void __iomem *tcobase; static unsigned int pm_iobase; static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ @@ -305,10 +306,18 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) /* Low three bits of BASE0 are reserved. */ val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8); + if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, + "SP5100 TCO")) { + printk(KERN_ERR PFX "mmio address 0x%04x already in use\n", + val); + goto unreg_region; + } + tcobase_phys = val; + tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE); if (tcobase == 0) { printk(KERN_ERR PFX "failed to get tcobase address\n"); - goto unreg_region; + goto unreg_mem_region; } /* Enable watchdog decode bit */ @@ -346,7 +355,8 @@ static unsigned char __devinit sp5100_tco_setupdevice(void) /* Done */ return 1; - iounmap(tcobase); +unreg_mem_region: + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); unreg_region: release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); exit: @@ -401,6 +411,7 @@ static int __devinit sp5100_tco_init(struct platform_device *dev) exit: iounmap(tcobase); + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); return ret; } @@ -414,6 +425,7 @@ static void __devexit sp5100_tco_cleanup(void) /* Deregister */ misc_deregister(&sp5100_tco_miscdev); iounmap(tcobase); + release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 24177272bcb84..0b5366b5be201 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -34,74 +35,68 @@ enum shutdown_state { /* Ignore multiple shutdown requests. */ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; -#ifdef CONFIG_PM_SLEEP -static int xen_hvm_suspend(void *data) -{ - int err; - struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; - int *cancelled = data; - - BUG_ON(!irqs_disabled()); - - err = sysdev_suspend(PMSG_SUSPEND); - if (err) { - printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", - err); - return err; - } - - *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); +struct suspend_info { + int cancelled; + unsigned long arg; /* extra hypercall argument */ + void (*pre)(void); + void (*post)(int cancelled); +}; - xen_hvm_post_suspend(*cancelled); +static void xen_hvm_post_suspend(int cancelled) +{ + xen_arch_hvm_post_suspend(cancelled); gnttab_resume(); +} - if (!*cancelled) { - xen_irq_resume(); - xen_console_resume(); - xen_timer_resume(); - } - - sysdev_resume(); +static void xen_pre_suspend(void) +{ + xen_mm_pin_all(); + gnttab_suspend(); + xen_arch_pre_suspend(); +} - return 0; +static void xen_post_suspend(int cancelled) +{ + xen_arch_post_suspend(cancelled); + gnttab_resume(); + xen_mm_unpin_all(); } +#ifdef CONFIG_HIBERNATE_CALLBACKS static int xen_suspend(void *data) { + struct suspend_info *si = data; int err; - int *cancelled = data; BUG_ON(!irqs_disabled()); - err = sysdev_suspend(PMSG_SUSPEND); + err = syscore_suspend(); if (err) { - printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", + printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", err); return err; } - xen_mm_pin_all(); - gnttab_suspend(); - xen_pre_suspend(); + if (si->pre) + si->pre(); /* * This hypercall returns 1 if suspend was cancelled * or the domain was merely checkpointed, and 0 if it * is resuming in a new domain. */ - *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info)); + si->cancelled = HYPERVISOR_suspend(si->arg); - xen_post_suspend(*cancelled); - gnttab_resume(); - xen_mm_unpin_all(); + if (si->post) + si->post(si->cancelled); - if (!*cancelled) { + if (!si->cancelled) { xen_irq_resume(); xen_console_resume(); xen_timer_resume(); } - sysdev_resume(); + syscore_resume(); return 0; } @@ -109,7 +104,7 @@ static int xen_suspend(void *data) static void do_suspend(void) { int err; - int cancelled = 1; + struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; @@ -124,7 +119,7 @@ static void do_suspend(void) } #endif - err = dpm_suspend_start(PMSG_SUSPEND); + err = dpm_suspend_start(PMSG_FREEZE); if (err) { printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); goto out_thaw; @@ -133,32 +128,41 @@ static void do_suspend(void) printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); - err = dpm_suspend_noirq(PMSG_SUSPEND); + err = dpm_suspend_noirq(PMSG_FREEZE); if (err) { printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); goto out_resume; } - if (xen_hvm_domain()) - err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0)); - else - err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); + si.cancelled = 1; - dpm_resume_noirq(PMSG_RESUME); + if (xen_hvm_domain()) { + si.arg = 0UL; + si.pre = NULL; + si.post = &xen_hvm_post_suspend; + } else { + si.arg = virt_to_mfn(xen_start_info); + si.pre = &xen_pre_suspend; + si.post = &xen_post_suspend; + } + + err = stop_machine(xen_suspend, &si, cpumask_of(0)); + + dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { printk(KERN_ERR "failed to start xen_suspend: %d\n", err); - cancelled = 1; + si.cancelled = 1; } out_resume: - if (!cancelled) { + if (!si.cancelled) { xen_arch_resume(); xs_resume(); } else xs_suspend_cancel(); - dpm_resume_end(PMSG_RESUME); + dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); /* Make sure timer events get retriggered on all CPUs */ clock_was_set(); @@ -170,7 +174,24 @@ static void do_suspend(void) #endif shutting_down = SHUTDOWN_INVALID; } -#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + +struct shutdown_handler { + const char *command; + void (*cb)(void); +}; + +static void do_poweroff(void) +{ + shutting_down = SHUTDOWN_POWEROFF; + orderly_poweroff(false); +} + +static void do_reboot(void) +{ + shutting_down = SHUTDOWN_POWEROFF; /* ? */ + ctrl_alt_del(); +} static void shutdown_handler(struct xenbus_watch *watch, const char **vec, unsigned int len) @@ -178,6 +199,16 @@ static void shutdown_handler(struct xenbus_watch *watch, char *str; struct xenbus_transaction xbt; int err; + static struct shutdown_handler handlers[] = { + { "poweroff", do_poweroff }, + { "halt", do_poweroff }, + { "reboot", do_reboot }, +#ifdef CONFIG_HIBERNATE_CALLBACKS + { "suspend", do_suspend }, +#endif + {NULL, NULL}, + }; + static struct shutdown_handler *handler; if (shutting_down != SHUTDOWN_INVALID) return; @@ -194,7 +225,14 @@ static void shutdown_handler(struct xenbus_watch *watch, return; } - xenbus_write(xbt, "control", "shutdown", ""); + for (handler = &handlers[0]; handler->command; handler++) { + if (strcmp(str, handler->command) == 0) + break; + } + + /* Only acknowledge commands which we are prepared to handle. */ + if (handler->cb) + xenbus_write(xbt, "control", "shutdown", ""); err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) { @@ -202,17 +240,8 @@ static void shutdown_handler(struct xenbus_watch *watch, goto again; } - if (strcmp(str, "poweroff") == 0 || - strcmp(str, "halt") == 0) { - shutting_down = SHUTDOWN_POWEROFF; - orderly_poweroff(false); - } else if (strcmp(str, "reboot") == 0) { - shutting_down = SHUTDOWN_POWEROFF; /* ? */ - ctrl_alt_del(); -#ifdef CONFIG_PM_SLEEP - } else if (strcmp(str, "suspend") == 0) { - do_suspend(); -#endif + if (handler->cb) { + handler->cb(); } else { printk(KERN_INFO "Ignoring shutdown request: %s\n", str); shutting_down = SHUTDOWN_INVALID; @@ -291,27 +320,18 @@ static int shutdown_event(struct notifier_block *notifier, return NOTIFY_DONE; } -static int __init __setup_shutdown_event(void) -{ - /* Delay initialization in the PV on HVM case */ - if (xen_hvm_domain()) - return 0; - - if (!xen_pv_domain()) - return -ENODEV; - - return xen_setup_shutdown_event(); -} - int xen_setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event }; + + if (!xen_domain()) + return -ENODEV; register_xenstore_notifier(&xenstore_notifier); return 0; } EXPORT_SYMBOL_GPL(xen_setup_shutdown_event); -subsys_initcall(__setup_shutdown_event); +subsys_initcall(xen_setup_shutdown_event); diff --git a/fs/Kconfig b/fs/Kconfig index 663d3389952f9..a219eb3794cbb 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -99,6 +99,7 @@ menu "DOS/FAT/NT Filesystems" source "fs/fat/Kconfig" source "fs/ntfs/Kconfig" +source "fs/exfat/Kconfig" endmenu endif # BLOCK @@ -256,4 +257,10 @@ endif source "fs/nls/Kconfig" source "fs/dlm/Kconfig" +config DYNAMIC_FSYNC + bool "dynamic file sync control" + default n + help + An experimental file sync control using Android's early suspend / late resume drivers + endmenu diff --git a/fs/Makefile b/fs/Makefile index 7cb19ebff625b..cc6cd29f222e3 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_CIFS) += cifs/ obj-$(CONFIG_NCP_FS) += ncpfs/ obj-$(CONFIG_HPFS_FS) += hpfs/ obj-$(CONFIG_NTFS_FS) += ntfs/ +obj-$(CONFIG_EXFAT_FS) += exfat/ obj-$(CONFIG_UFS_FS) += ufs/ obj-$(CONFIG_EFS_FS) += efs/ obj-$(CONFIG_JFFS2_FS) += jffs2/ @@ -124,3 +125,5 @@ obj-$(CONFIG_CEPH_FS) += ceph/ # Patched by YAFFS obj-$(CONFIG_YAFFS_FS) += yaffs2/ + +obj-$(CONFIG_DYNAMIC_FSYNC) += dyn_sync_cntrl.o diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 65794b8fe79eb..615c8d33bea81 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -383,7 +383,7 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc) obj.attr = ADFS_I(inode)->attr; obj.size = inode->i_size; - ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL); + ret = adfs_dir_update(sb, &obj, 1 /* XXX: fix fsync and use 'wbc->sync_mode == WB_SYNC_ALL' */); unlock_kernel(); return ret; } diff --git a/fs/affs/file.c b/fs/affs/file.c index 0a90dcd46de28..465d7ae54c599 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -931,6 +931,7 @@ int affs_file_fsync(struct file *filp, int datasync) int ret, err; ret = write_inode_now(inode, 0); + /* XXX: could just sync the buffer been dirtied by write_inode */ err = sync_blockdev(inode->i_sb->s_bdev); if (!ret) ret = err; diff --git a/fs/aio.c b/fs/aio.c index 26869cde39535..88f0ed5144252 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ctx->reqs_active--; if (unlikely(!ctx->reqs_active && ctx->dead)) - wake_up(&ctx->wait); + wake_up_all(&ctx->wait); } static void aio_fput_routine(struct work_struct *data) @@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx) * by other CPUs at this point. Right now, we rely on the * locking done by the above calls to ensure this consistency. */ - wake_up(&ioctx->wait); + wake_up_all(&ioctx->wait); put_ioctx(ioctx); /* once for the lookup */ } diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index a8e37f81d097b..692b2700b8b64 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -151,7 +151,7 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc) di->i_eoffset = cpu_to_le32(i_sblock * BFS_BSIZE + inode->i_size - 1); mark_buffer_dirty(bh); - if (wbc->sync_mode == WB_SYNC_ALL) { + if (1 /* XXX: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */ ) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index d5b640ba6cb1a..e7d78eff65128 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -133,6 +133,25 @@ static int padzero(unsigned long elf_bss) #define ELF_BASE_PLATFORM NULL #endif +/* + * Use get_random_int() to implement AT_RANDOM while avoiding depletion + * of the entropy pool. + */ +static void get_atrandom_bytes(unsigned char *buf, size_t nbytes) +{ + unsigned char *p = buf; + + while (nbytes) { + unsigned int random_variable; + size_t chunk = min(nbytes, sizeof(random_variable)); + + random_variable = get_random_int(); + memcpy(p, &random_variable, chunk); + p += chunk; + nbytes -= chunk; + } +} + static int create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, unsigned long load_addr, unsigned long interp_load_addr) @@ -194,7 +213,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, /* * Generate 16 random bytes for userspace PRNG seeding. */ - get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); + get_atrandom_bytes(k_rand_bytes, sizeof(k_rand_bytes)); u_rand_bytes = (elf_addr_t __user *) STACK_ALLOC(p, sizeof(k_rand_bytes)); if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) @@ -941,9 +960,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->start_stack = bprm->p; #ifdef arch_randomize_brk - if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) + if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); +#ifdef CONFIG_COMPAT_BRK + current->brk_randomized = 1; +#endif + } #endif if (current->personality & MMAP_PAGE_ZERO) { diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 63039ed9576f7..2bc5dc644b4cb 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1864,6 +1864,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) kfree(psinfo); kfree(notes); kfree(fpu); + kfree(shdr4extnum); #ifdef ELF_CORE_COPY_XFPREGS kfree(xfpu); #endif diff --git a/fs/block_dev.c b/fs/block_dev.c index 889287019599a..59277ba8d2903 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1099,6 +1099,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (!bdev->bd_part) goto out_clear; + ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); if (ret == -ERESTARTSYS) { @@ -1114,18 +1115,26 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) mutex_unlock(&bdev->bd_mutex); goto restart; } - if (ret) - goto out_clear; } - if (!bdev->bd_openers) { + + if (!ret && !bdev->bd_openers) { bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bdi = blk_get_backing_dev_info(bdev); if (bdi == NULL) bdi = &default_backing_dev_info; bdev_inode_switch_bdi(bdev->bd_inode, bdi); } - if (bdev->bd_invalidated) + + /* + * If the device is invalidated, rescan partition + * if open succeeded or failed with -ENOMEDIUM. + * The latter is necessary to prevent ghost + * partitions on a removed medium. + */ + if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) rescan_partitions(disk, bdev); + if (ret) + goto out_clear; } else { struct block_device *whole; whole = bdget_disk(disk, 0); @@ -1152,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) put_disk(disk); disk = NULL; if (bdev->bd_contains == bdev) { - if (bdev->bd_disk->fops->open) { + ret = 0; + if (bdev->bd_disk->fops->open) ret = bdev->bd_disk->fops->open(bdev, mode); - if (ret) - goto out_unlock_bdev; - } - if (bdev->bd_invalidated) + /* the same as first opener case, read comment there */ + if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM)) rescan_partitions(bdev->bd_disk, bdev); + if (ret) + goto out_unlock_bdev; } } bdev->bd_openers++; @@ -1223,6 +1233,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); if (whole) { + struct gendisk *disk = whole->bd_disk; + /* finish claiming */ mutex_lock(&bdev->bd_mutex); spin_lock(&bdev_lock); @@ -1249,15 +1261,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) spin_unlock(&bdev_lock); /* - * Block event polling for write claims. Any write - * holder makes the write_holder state stick until all - * are released. This is good enough and tracking - * individual writeable reference is too fragile given - * the way @mode is used in blkdev_get/put(). + * Block event polling for write claims if requested. Any + * write holder makes the write_holder state stick until + * all are released. This is good enough and tracking + * individual writeable reference is too fragile given the + * way @mode is used in blkdev_get/put(). */ - if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { + if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && + !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { bdev->bd_write_holder = true; - disk_block_events(bdev->bd_disk); + disk_block_events(disk); } mutex_unlock(&bdev->bd_mutex); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7f78cc78fdd0a..bd64b4101f5f1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1284,6 +1284,8 @@ struct btrfs_root { #define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_DIRSYNC (1 << 10) +#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) + /* some macros to generate set/get funcs for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple * one for u8: @@ -2355,6 +2357,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, struct extent_buffer *node); +void btrfs_check_and_init_root_item(struct btrfs_root_item *item); + /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1aa8d607bc7d..edd9efa515675 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1184,8 +1184,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, root->commit_root = btrfs_root_node(root); BUG_ON(!root->node); out: - if (location->objectid != BTRFS_TREE_LOG_OBJECTID) + if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; + btrfs_check_and_init_root_item(&root->root_item); + } return root; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5fdb2abc4fa78..2ff51e69dea83 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -294,6 +294,10 @@ static noinline int create_subvol(struct btrfs_root *root, inode_item->nbytes = cpu_to_le64(root->leafsize); inode_item->mode = cpu_to_le32(S_IFDIR | 0755); + root_item.flags = 0; + root_item.byte_limit = 0; + inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT); + btrfs_set_root_bytenr(&root_item, leaf->start); btrfs_set_root_generation(&root_item, trans->transid); btrfs_set_root_level(&root_item, 0); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6a1086e83ffc8..3e45c3206e7f5 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -471,3 +471,21 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, btrfs_free_path(path); return 0; } + +/* + * Old btrfs forgets to init root_item->flags and root_item->byte_limit + * for subvolumes. To work around this problem, we steal a bit from + * root_item->inode_item->flags, and use it to indicate if those fields + * have been properly initialized. + */ +void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) +{ + u64 inode_flags = le64_to_cpu(root_item->inode.flags); + + if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { + inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; + root_item->inode.flags = cpu_to_le64(inode_flags); + root_item->flags = 0; + root_item->byte_limit = 0; + } +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3d73c8d93bbb1..f3d66819025c4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -970,6 +970,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); + btrfs_check_and_init_root_item(new_root_item); root_flags = btrfs_root_flags(new_root_item); if (pending->readonly) diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index fc0fd4fde306b..1b2e180b018dd 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, case UNI_COLON: *target = ':'; break; - case UNI_ASTERIK: + case UNI_ASTERISK: *target = '*'; break; case UNI_QUESTION: @@ -264,40 +264,41 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, * names are little endian 16 bit Unicode on the wire */ int -cifsConvertToUCS(__le16 *target, const char *source, int maxlen, +cifsConvertToUCS(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int mapChars) { int i, j, charlen; - int len_remaining = maxlen; char src_char; - __u16 temp; + __le16 dst_char; + wchar_t tmp; if (!mapChars) return cifs_strtoUCS(target, source, PATH_MAX, cp); - for (i = 0, j = 0; i < maxlen; j++) { + for (i = 0, j = 0; i < srclen; j++) { src_char = source[i]; + charlen = 1; switch (src_char) { case 0: - put_unaligned_le16(0, &target[j]); + put_unaligned(0, &target[j]); goto ctoUCS_out; case ':': - temp = UNI_COLON; + dst_char = cpu_to_le16(UNI_COLON); break; case '*': - temp = UNI_ASTERIK; + dst_char = cpu_to_le16(UNI_ASTERISK); break; case '?': - temp = UNI_QUESTION; + dst_char = cpu_to_le16(UNI_QUESTION); break; case '<': - temp = UNI_LESSTHAN; + dst_char = cpu_to_le16(UNI_LESSTHAN); break; case '>': - temp = UNI_GRTRTHAN; + dst_char = cpu_to_le16(UNI_GRTRTHAN); break; case '|': - temp = UNI_PIPE; + dst_char = cpu_to_le16(UNI_PIPE); break; /* * FIXME: We can not handle remapping backslash (UNI_SLASH) @@ -305,28 +306,24 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, * as they use backslash as separator. */ default: - charlen = cp->char2uni(source+i, len_remaining, - &temp); + charlen = cp->char2uni(source + i, srclen - i, &tmp); + dst_char = cpu_to_le16(tmp); + /* * if no match, use question mark, which at least in * some cases serves as wild card */ if (charlen < 1) { - temp = 0x003f; + dst_char = cpu_to_le16(0x003f); charlen = 1; } - len_remaining -= charlen; - /* - * character may take more than one byte in the source - * string, but will take exactly two bytes in the - * target string - */ - i += charlen; - continue; } - put_unaligned_le16(temp, &target[j]); - i++; /* move to next char in source string */ - len_remaining--; + /* + * character may take more than one byte in the source string, + * but will take exactly two bytes in the target string + */ + i += charlen; + put_unaligned(dst_char, &target[j]); } ctoUCS_out: diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 7fe6b52df5076..644dd882a5604 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -44,7 +44,7 @@ * reserved symbols (along with \ and /), otherwise illegal to store * in filenames in NTFS */ -#define UNI_ASTERIK (__u16) ('*' + 0xF000) +#define UNI_ASTERISK (__u16) ('*' + 0xF000) #define UNI_QUESTION (__u16) ('?' + 0xF000) #define UNI_COLON (__u16) (':' + 0xF000) #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a51585f9852b4..96b9a34db48b5 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -30,12 +30,13 @@ #include #include -/* Calculate and return the CIFS signature based on the mac key and SMB PDU */ -/* the 16 byte signature must be allocated by the caller */ -/* Note we only use the 1st eight bytes */ -/* Note that the smb header signature field on input contains the - sequence number before this function is called */ - +/* + * Calculate and return the CIFS signature based on the mac key and SMB PDU. + * The 16 byte signature must be allocated by the caller. Note we only use the + * 1st eight bytes and that the smb header signature field on input contains + * the sequence number before this function is called. Also, this function + * should be called with the server->srv_mutex held. + */ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, char *signature) { @@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, cpu_to_le32(expected_sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; + mutex_lock(&server->srv_mutex); rc = cifs_calculate_signature(cifs_pdu, server, what_we_think_sig_should_be); + mutex_unlock(&server->srv_mutex); if (rc) return rc; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f2970136d17d0..c0def4f5602a7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -127,6 +127,7 @@ cifs_read_super(struct super_block *sb, void *data, kfree(cifs_sb); return rc; } + cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; #ifdef CONFIG_CIFS_DFS_UPCALL /* copy mount params to sb for use in submounts */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 8d6c17ab593da..fbe54a4f36328 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -275,7 +275,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) char *data_area_of_target; char *data_area_of_buf2; int remaining; - __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; + unsigned int byte_count, total_in_buf; + __u16 total_data_size, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); @@ -288,7 +289,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) remaining = total_data_size - total_in_buf; if (remaining < 0) - return -EINVAL; + return -EPROTO; if (remaining == 0) /* nothing to do, ignore */ return 0; @@ -309,20 +310,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ - memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; + /* is the result too big for the field? */ + if (total_in_buf > USHRT_MAX) + return -EPROTO; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); + + /* fix up the BCC */ byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; + /* is the result too big for the field? */ + if (byte_count > USHRT_MAX) + return -EPROTO; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; - - /* BB also add check that we are not beyond maximum buffer size */ - + /* don't allow buffer to overflow */ + if (byte_count > CIFSMaxBufSize) + return -ENOBUFS; pTargetSMB->smb_buf_length = byte_count; + memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); + if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ @@ -608,59 +618,63 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - if ((mid_entry->mid == smb_buffer->Mid) && - (mid_entry->midState == MID_REQUEST_SUBMITTED) && - (mid_entry->command == smb_buffer->Command)) { - if (length == 0 && - check2ndT2(smb_buffer, server->maxBuf) > 0) { - /* We have a multipart transact2 resp */ - isMultiRsp = true; - if (mid_entry->resp_buf) { - /* merge response - fix up 1st*/ - if (coalesce_t2(smb_buffer, - mid_entry->resp_buf)) { - mid_entry->multiRsp = - true; - break; - } else { - /* all parts received */ - mid_entry->multiEnd = - true; - goto multi_t2_fnd; - } + if (mid_entry->mid != smb_buffer->Mid || + mid_entry->midState != MID_REQUEST_SUBMITTED || + mid_entry->command != smb_buffer->Command) { + mid_entry = NULL; + continue; + } + + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { + /* We have a multipart transact2 resp */ + isMultiRsp = true; + if (mid_entry->resp_buf) { + /* merge response - fix up 1st*/ + length = coalesce_t2(smb_buffer, + mid_entry->resp_buf); + if (length > 0) { + length = 0; + mid_entry->multiRsp = true; + break; } else { - if (!isLargeBuf) { - cERROR(1, "1st trans2 resp needs bigbuf"); - /* BB maybe we can fix this up, switch - to already allocated large buffer? */ - } else { - /* Have first buffer */ - mid_entry->resp_buf = - smb_buffer; - mid_entry->largeBuf = - true; - bigbuf = NULL; - } + /* all parts received or + * packet is malformed + */ + mid_entry->multiEnd = true; + goto multi_t2_fnd; + } + } else { + if (!isLargeBuf) { + /* + * FIXME: switch to already + * allocated largebuf? + */ + cERROR(1, "1st trans2 resp " + "needs bigbuf"); + } else { + /* Have first buffer */ + mid_entry->resp_buf = + smb_buffer; + mid_entry->largeBuf = true; + bigbuf = NULL; } - break; } - mid_entry->resp_buf = smb_buffer; - mid_entry->largeBuf = isLargeBuf; + break; + } + mid_entry->resp_buf = smb_buffer; + mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - if (length == 0) - mid_entry->midState = - MID_RESPONSE_RECEIVED; - else - mid_entry->midState = - MID_RESPONSE_MALFORMED; + if (length == 0) + mid_entry->midState = MID_RESPONSE_RECEIVED; + else + mid_entry->midState = MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 - mid_entry->when_received = jiffies; + mid_entry->when_received = jiffies; #endif - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); - break; - } - mid_entry = NULL; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); + break; } spin_unlock(&GlobalMid_Lock); @@ -808,8 +822,7 @@ static int cifs_parse_mount_options(char *options, const char *devname, struct smb_vol *vol) { - char *value; - char *data; + char *value, *data, *end; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; @@ -852,6 +865,7 @@ cifs_parse_mount_options(char *options, const char *devname, if (!options) return 1; + end = options + strlen(options); if (strncmp(options, "sep=", 4) == 0) { if (options[4] != 0) { separator[0] = options[4]; @@ -916,6 +930,7 @@ cifs_parse_mount_options(char *options, const char *devname, the only illegal character in a password is null */ if ((value[temp_len] == 0) && + (value + temp_len < end) && (value[temp_len+1] == separator[0])) { /* reinsert comma */ value[temp_len] = separator[0]; @@ -2416,7 +2431,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); - + cFYI(1, "unix caps which server supports %lld", cap); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { @@ -2434,6 +2449,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, } } + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) + cERROR(1, "per-share encryption not supported yet"); + cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; @@ -2482,6 +2500,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, cFYI(1, "very large read cap"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cFYI(1, "very large write cap"); + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) + cFYI(1, "transport encryption cap"); + if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) + cFYI(1, "mandatory transport encryption cap"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { @@ -2642,6 +2664,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + + if (rc == -EOPNOTSUPP || rc == -EINVAL) + rc = SMBQueryInformation(xid, tcon, full_path, pfile_info, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(pfile_info); return rc; } @@ -2795,20 +2822,26 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, goto remote_path_check; } - /* do not care if following two calls succeed - informational */ - if (!tcon->ipc) { - CIFSSMBQFSDeviceInfo(xid, tcon); - CIFSSMBQFSAttributeInfo(xid, tcon); - } - /* tell server which Unix caps we support */ - if (tcon->ses->capabilities & CAP_UNIX) + if (tcon->ses->capabilities & CAP_UNIX) { /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, sb, volume_info); - else + if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && + (le64_to_cpu(tcon->fsUnixInfo.Capability) & + CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { + rc = -EACCES; + goto mount_fail_check; + } + } else tcon->unix_ext = 0; /* server does not support them */ + /* do not care if following two calls succeed - informational */ + if (!tcon->ipc) { + CIFSSMBQFSDeviceInfo(xid, tcon); + CIFSSMBQFSAttributeInfo(xid, tcon); + } + /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); @@ -2826,7 +2859,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, remote_path_check: /* check if a whole path (including prepath) is not remote */ - if (!rc && cifs_sb->prepathlen && tcon) { + if (!rc && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb, tcon); if (full_path == NULL) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e964b1cd5dd09..7b2e8ec2709b3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -575,8 +575,10 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) int cifs_close(struct inode *inode, struct file *file) { - cifsFileInfo_put(file->private_data); - file->private_data = NULL; + if (file->private_data != NULL) { + cifsFileInfo_put(file->private_data); + file->private_data = NULL; + } /* return code from the ->release op is always ignored */ return 0; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 16765703131be..894076fbb76c8 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -277,7 +277,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, } static void -decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, +decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { int len; @@ -285,19 +285,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, cFYI(1, "bleft %d", bleft); - /* - * Windows servers do not always double null terminate their final - * Unicode string. Check to see if there are an uneven number of bytes - * left. If so, then add an extra NULL pad byte to the end of the - * response. - * - * See section 2.7.2 in "Implementing CIFS" for details - */ - if (bleft % 2) { - data[bleft] = 0; - ++bleft; - } - kfree(ses->serverOS); ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp); cFYI(1, "serverOS=%s", ses->serverOS); @@ -930,7 +917,9 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, } /* BB check if Unicode and decode strings */ - if (smb_buf->Flags2 & SMBFLG2_UNICODE) { + if (bytes_remaining == 0) { + /* no string area to decode, do nothing */ + } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) { /* unicode string area must be word-aligned */ if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) { ++bcc_ptr; diff --git a/fs/dcache.c b/fs/dcache.c index 611ffe928c03c..407cb296c4e14 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -75,7 +75,7 @@ * dentry1->d_lock * dentry2->d_lock */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure __read_mostly = 10; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); @@ -296,8 +296,12 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) __releases(parent->d_lock) __releases(dentry->d_inode->i_lock) { - dentry->d_parent = NULL; list_del(&dentry->d_u.d_child); + /* + * Inform try_to_ascend() that we are no longer attached to the + * dentry tree + */ + dentry->d_flags |= DCACHE_DISCONNECTED; if (parent) spin_unlock(&parent->d_lock); dentry_iput(dentry); @@ -1011,6 +1015,35 @@ void shrink_dcache_for_umount(struct super_block *sb) } } +/* + * This tries to ascend one level of parenthood, but + * we can race with renaming, so we need to re-check + * the parenthood after dropping the lock and check + * that the sequence number still matches. + */ +static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) +{ + struct dentry *new = old->d_parent; + + rcu_read_lock(); + spin_unlock(&old->d_lock); + spin_lock(&new->d_lock); + + /* + * might go back up the wrong parent if we have had a rename + * or deletion + */ + if (new != old->d_parent || + (old->d_flags & DCACHE_DISCONNECTED) || + (!locked && read_seqretry(&rename_lock, seq))) { + spin_unlock(&new->d_lock); + new = NULL; + } + rcu_read_unlock(); + return new; +} + + /* * Search for at least 1 mount point in the dentry's subdirs. * We descend to the next level whenever the d_subdirs @@ -1066,24 +1099,10 @@ int have_submounts(struct dentry *parent) * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { - struct dentry *tmp; - struct dentry *child; - - tmp = this_parent->d_parent; - rcu_read_lock(); - spin_unlock(&this_parent->d_lock); - child = this_parent; - this_parent = tmp; - spin_lock(&this_parent->d_lock); - /* might go back up the wrong parent if we have had a rename - * or deletion */ - if (this_parent != child->d_parent || - (!locked && read_seqretry(&rename_lock, seq))) { - spin_unlock(&this_parent->d_lock); - rcu_read_unlock(); + struct dentry *child = this_parent; + this_parent = try_to_ascend(this_parent, locked, seq); + if (!this_parent) goto rename_retry; - } - rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } @@ -1181,24 +1200,10 @@ static int select_parent(struct dentry * parent) * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { - struct dentry *tmp; - struct dentry *child; - - tmp = this_parent->d_parent; - rcu_read_lock(); - spin_unlock(&this_parent->d_lock); - child = this_parent; - this_parent = tmp; - spin_lock(&this_parent->d_lock); - /* might go back up the wrong parent if we have had a rename - * or deletion */ - if (this_parent != child->d_parent || - (!locked && read_seqretry(&rename_lock, seq))) { - spin_unlock(&this_parent->d_lock); - rcu_read_unlock(); + struct dentry *child = this_parent; + this_parent = try_to_ascend(this_parent, locked, seq); + if (!this_parent) goto rename_retry; - } - rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } @@ -1236,7 +1241,7 @@ void shrink_dcache_parent(struct dentry * parent) EXPORT_SYMBOL(shrink_dcache_parent); /* - * Scan `nr' dentries and return the number which remain. + * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain. * * We need to avoid reentering the filesystem if the caller is performing a * GFP_NOFS allocation attempt. One example deadlock is: @@ -1247,8 +1252,12 @@ EXPORT_SYMBOL(shrink_dcache_parent); * * In this case we return -1 to tell the caller that we baled. */ -static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +static int shrink_dcache_memory(struct shrinker *shrink, + struct shrink_control *sc) { + int nr = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; + if (nr) { if (!(gfp_mask & __GFP_FS)) return -1; @@ -1607,10 +1616,13 @@ struct dentry *d_obtain_alias(struct inode *inode) __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); spin_unlock(&tmp->d_lock); spin_unlock(&inode->i_lock); + security_d_instantiate(tmp, inode); return tmp; out_iput: + if (res && !IS_ERR(res)) + security_d_instantiate(res, inode); iput(inode); return res; } @@ -2942,28 +2954,14 @@ void d_genocide(struct dentry *root) spin_unlock(&dentry->d_lock); } if (this_parent != root) { - struct dentry *tmp; - struct dentry *child; - - tmp = this_parent->d_parent; + struct dentry *child = this_parent; if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { this_parent->d_flags |= DCACHE_GENOCIDE; this_parent->d_count--; } - rcu_read_lock(); - spin_unlock(&this_parent->d_lock); - child = this_parent; - this_parent = tmp; - spin_lock(&this_parent->d_lock); - /* might go back up the wrong parent if we have had a rename - * or deletion */ - if (this_parent != child->d_parent || - (!locked && read_seqretry(&rename_lock, seq))) { - spin_unlock(&this_parent->d_lock); - rcu_read_unlock(); + this_parent = try_to_ascend(this_parent, locked, seq); + if (!this_parent) goto rename_retry; - } - rcu_read_unlock(); next = child->d_u.d_child.next; goto resume; } diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 2195c213ab2f5..0891072d600c2 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -36,9 +36,12 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) static void drop_slab(void) { int nr_objects; + struct shrink_control shrink = { + .gfp_mask = GFP_KERNEL, + }; do { - nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); + nr_objects = shrink_slab(&shrink, 1000, 1000); } while (nr_objects > 10); } diff --git a/fs/dyn_sync_cntrl.c b/fs/dyn_sync_cntrl.c new file mode 100644 index 0000000000000..3d6a0e3fafde8 --- /dev/null +++ b/fs/dyn_sync_cntrl.c @@ -0,0 +1,155 @@ +/* + * Author: Paul Reioux aka Faux123 + * + * Copyright 2012 Paul Reioux + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include + +#define DYN_FSYNC_VERSION 1 + +/* + * fsync_mutex protects dyn_fsync_active during early suspend / lat resume transitions + */ +static DEFINE_MUTEX(fsync_mutex); + +bool early_suspend_active = false; +bool dyn_fsync_active = false; + +static ssize_t dyn_fsync_active_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", (dyn_fsync_active ? 1 : 0)); +} + +static ssize_t dyn_fsync_active_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +{ + unsigned int data; + + if(sscanf(buf, "%u\n", &data) == 1) { + if (data == 1) { + pr_info("%s: dynamic fsync enabled\n", __FUNCTION__); + dyn_fsync_active = true; + } + else if (data == 0) { + pr_info("%s: dyanamic fsync disabled\n", __FUNCTION__); + dyn_fsync_active = false; + } + else + pr_info("%s: bad value: %u\n", __FUNCTION__, data); + } else + pr_info("%s: unknown input!\n", __FUNCTION__); + + return count; +} + +static ssize_t dyn_fsync_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "version: %u\n", DYN_FSYNC_VERSION); +} + +static ssize_t dyn_fsync_earlysuspend_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "early suspend active: %u\n", early_suspend_active); +} + +static struct kobj_attribute dyn_fsync_active_attribute = + __ATTR(Dyn_fsync_active, 0666, dyn_fsync_active_show, dyn_fsync_active_store); + +static struct kobj_attribute dyn_fsync_version_attribute = + __ATTR(Dyn_fsync_version, 0444 , dyn_fsync_version_show, NULL); + +static struct kobj_attribute dyn_fsync_earlysuspend_attribute = + __ATTR(Dyn_fsync_earlysuspend, 0444 , dyn_fsync_earlysuspend_show, NULL); + +static struct attribute *dyn_fsync_active_attrs[] = + { + &dyn_fsync_active_attribute.attr, + &dyn_fsync_version_attribute.attr, + &dyn_fsync_earlysuspend_attribute.attr, + NULL, + }; + +static struct attribute_group dyn_fsync_active_attr_group = + { + .attrs = dyn_fsync_active_attrs, + }; + +static struct kobject *dyn_fsync_kobj; + +static void dyn_fsync_early_suspend(struct early_suspend *h) +{ + mutex_lock(&fsync_mutex); + if (dyn_fsync_active) { + early_suspend_active = true; +#if 1 + /* flush all outstanding buffers */ + wakeup_flusher_threads(0); + sync_filesystems(0); + sync_filesystems(1); +#endif + } + mutex_unlock(&fsync_mutex); +} + +static void dyn_fsync_late_resume(struct early_suspend *h) +{ + mutex_lock(&fsync_mutex); + early_suspend_active = false; + mutex_unlock(&fsync_mutex); +} + +static struct early_suspend dyn_fsync_early_suspend_handler = + { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN, + .suspend = dyn_fsync_early_suspend, + .resume = dyn_fsync_late_resume, + }; + +static int dyn_fsync_init(void) +{ + int sysfs_result; + + register_early_suspend(&dyn_fsync_early_suspend_handler); + + dyn_fsync_kobj = kobject_create_and_add("dyn_fsync", kernel_kobj); + if (!dyn_fsync_kobj) { + pr_err("%s dyn_fsync kobject create failed!\n", __FUNCTION__); + return -ENOMEM; + } + + sysfs_result = sysfs_create_group(dyn_fsync_kobj, &dyn_fsync_active_attr_group); + + if (sysfs_result) { + pr_info("%s dyn_fsync sysfs create failed!\n", __FUNCTION__); + kobject_put(dyn_fsync_kobj); + } + return sysfs_result; +} + +static void dyn_fsync_exit(void) +{ + unregister_early_suspend(&dyn_fsync_early_suspend_handler); + + if (dyn_fsync_kobj != NULL) + kobject_put(dyn_fsync_kobj); +} + +module_init(dyn_fsync_init); +module_exit(dyn_fsync_exit); + diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index c1436cff6f2d7..7ed2ef30b8a86 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -492,8 +492,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack { struct mutex *tfm_mutex; char *block_aligned_filename; struct ecryptfs_auth_tok *auth_tok; - struct scatterlist src_sg; - struct scatterlist dst_sg; + struct scatterlist src_sg[2]; + struct scatterlist dst_sg[2]; struct blkcipher_desc desc; char iv[ECRYPTFS_MAX_IV_BYTES]; char hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; @@ -709,23 +709,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename, filename_size); rc = virt_to_scatterlist(s->block_aligned_filename, - s->block_aligned_filename_size, &s->src_sg, 1); - if (rc != 1) { + s->block_aligned_filename_size, s->src_sg, 2); + if (rc < 1) { printk(KERN_ERR "%s: Internal error whilst attempting to " - "convert filename memory to scatterlist; " - "expected rc = 1; got rc = [%d]. " + "convert filename memory to scatterlist; rc = [%d]. " "block_aligned_filename_size = [%zd]\n", __func__, rc, s->block_aligned_filename_size); goto out_release_free_unlock; } rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size, - &s->dst_sg, 1); - if (rc != 1) { + s->dst_sg, 2); + if (rc < 1) { printk(KERN_ERR "%s: Internal error whilst attempting to " "convert encrypted filename memory to scatterlist; " - "expected rc = 1; got rc = [%d]. " - "block_aligned_filename_size = [%zd]\n", __func__, rc, - s->block_aligned_filename_size); + "rc = [%d]. block_aligned_filename_size = [%zd]\n", + __func__, rc, s->block_aligned_filename_size); goto out_release_free_unlock; } /* The characters in the first block effectively do the job @@ -748,7 +746,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, mount_crypt_stat->global_default_fn_cipher_key_bytes); goto out_release_free_unlock; } - rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, + rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg, s->block_aligned_filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to encrypt filename; " @@ -782,8 +780,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack { struct mutex *tfm_mutex; char *decrypted_filename; struct ecryptfs_auth_tok *auth_tok; - struct scatterlist src_sg; - struct scatterlist dst_sg; + struct scatterlist src_sg[2]; + struct scatterlist dst_sg[2]; struct blkcipher_desc desc; char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; char iv[ECRYPTFS_MAX_IV_BYTES]; @@ -890,13 +888,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, } mutex_lock(s->tfm_mutex); rc = virt_to_scatterlist(&data[(*packet_size)], - s->block_aligned_filename_size, &s->src_sg, 1); - if (rc != 1) { + s->block_aligned_filename_size, s->src_sg, 2); + if (rc < 1) { printk(KERN_ERR "%s: Internal error whilst attempting to " "convert encrypted filename memory to scatterlist; " - "expected rc = 1; got rc = [%d]. " - "block_aligned_filename_size = [%zd]\n", __func__, rc, - s->block_aligned_filename_size); + "rc = [%d]. block_aligned_filename_size = [%zd]\n", + __func__, rc, s->block_aligned_filename_size); goto out_unlock; } (*packet_size) += s->block_aligned_filename_size; @@ -910,13 +907,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, goto out_unlock; } rc = virt_to_scatterlist(s->decrypted_filename, - s->block_aligned_filename_size, &s->dst_sg, 1); - if (rc != 1) { + s->block_aligned_filename_size, s->dst_sg, 2); + if (rc < 1) { printk(KERN_ERR "%s: Internal error whilst attempting to " "convert decrypted filename memory to scatterlist; " - "expected rc = 1; got rc = [%d]. " - "block_aligned_filename_size = [%zd]\n", __func__, rc, - s->block_aligned_filename_size); + "rc = [%d]. block_aligned_filename_size = [%zd]\n", + __func__, rc, s->block_aligned_filename_size); goto out_free_unlock; } /* The characters in the first block effectively do the job of @@ -956,7 +952,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, mount_crypt_stat->global_default_fn_cipher_key_bytes); goto out_free_unlock; } - rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, + rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg, s->block_aligned_filename_size); if (rc) { printk(KERN_ERR "%s: Error attempting to decrypt filename; " @@ -1563,6 +1559,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, printk(KERN_ERR "Could not find key with description: [%s]\n", sig); rc = process_request_key_err(PTR_ERR(*auth_tok_key)); + (*auth_tok_key) = NULL; goto out; } (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index cc64fca89f8dc..eb9d9672ebd85 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -374,6 +374,11 @@ static int ecryptfs_write_begin(struct file *file, && (pos != 0)) zero_user(page, 0, PAGE_CACHE_SIZE); out: + if (unlikely(rc)) { + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + } return rc; } diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig new file mode 100644 index 0000000000000..144b6eccabebf --- /dev/null +++ b/fs/exfat/Kconfig @@ -0,0 +1,19 @@ +config EXFAT_FS + tristate "exFAT filesystem support" + select NLS + help + exFAT driver from Samsung + +config EXFAT_DEFAULT_CODEPAGE + int "Default codepage for exFAT" + depends on EXFAT_FS + default 437 + help + This option should be set to the codepage of your exFAT filesystems. + +config EXFAT_DEFAULT_IOCHARSET + string "Default iocharset for exFAT" + depends on EXFAT_FS + default "utf8" + help + Set this to the default input/output character set you'd like exFAT to use. diff --git a/fs/exfat/Makefile b/fs/exfat/Makefile new file mode 100644 index 0000000000000..77dc36c6183b9 --- /dev/null +++ b/fs/exfat/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_EXFAT_FS) += exfat.o + +exfat-y := exfat_core.o exfat_api.o exfat_blkdev.o exfat_cache.o exfat_super.o \ + exfat_data.o exfat_global.o exfat_nls.o exfat_oal.o exfat_upcase.o diff --git a/fs/exfat/Makefile.module b/fs/exfat/Makefile.module new file mode 100644 index 0000000000000..2e97826d059d5 --- /dev/null +++ b/fs/exfat/Makefile.module @@ -0,0 +1,20 @@ +EXTRA_FLAGS += -I$(PWD) + +#KDIR := /usr/src/linux/ +KDIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +export CONFIG_EXFAT_FS := m + +all: + $(MAKE) -C $(KDIR) M=$(PWD) modules + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean + +help: + $(MAKE) -C $(KDIR) M=$(PWD) help + +.PHONY : install +install : all + sudo $(MAKE) -C $(KDIR) M=$(PWD) modules_install; sudo depmod diff --git a/fs/exfat/README.md b/fs/exfat/README.md new file mode 100644 index 0000000000000..9ab089f2fa8a5 --- /dev/null +++ b/fs/exfat/README.md @@ -0,0 +1,71 @@ +exfat-nofuse +============ + +Linux non-fuse read/write kernel driver for the exFAT file system.
+Originally ported from android kernel v3.0. + + +Kudos to ksv1986 for the mutex patch!
+Thanks to JackNorris for being awesome and providing the clear_inode() patch.
+
+Big thanks to lqs for completing the driver! +Big thanks to benpicco for fixing 3.11.y compatibility! + + +Special thanks to github user AndreiLux for spreading the word about the leak!
+ + +Installation as stand alone module: +==================================== + + make -f Makefile.module KDIR="path to kernel source" CROSS_COMPILE="path to android chain tools (as linaro)/bin/SOMETHING- (see your folder for clues)" + +Example how it's works for me! + + make -f Makefile.module CROSS_COMPILE=../dorimanx-SG2-I9100-Kernel/android-toolchain/bin/arm-eabi- KDIR=../dorimanx-SG2-I9100-Kernel/ + +exfat.ko module file will be created in exfat source folder. and will work with kernel source you have used. + + make -f Makefile.module install + +To load the driver manually, run this as root: + + modprobe exfat + +To add to kernel you need to do this: +====================================== + +cd your kernel source dir + +mkdir fs/exfat + +copy all files (exept .git) from exfat-nofuse to your kernel source fs/exfat/ + +see +https://github.com/dorimanx/Dorimanx-SG2-I9100-Kernel/commit/e8fc728a68096db9ffcebff40244ebfb60a3de18 + +edit fs/Kconfig +edit fs/Makefile + +cd your kernel source +make menuconfig + +Go to: +> File systems > DOS/FAT/NT > check the exfat as MODULE (M) +> (437) Default codepage for exFAT +> (utf8) Default iocharset for exFAT + +> ESC to main menu +> Save an Alternate Configuration File +> ESC ESC + +build your kernel. + +and you will have new module! + +exfat.ko + +have fun. + +Free Software for the Free Minds! +===================================== diff --git a/fs/exfat/exfat.h b/fs/exfat/exfat.h new file mode 100644 index 0000000000000..a37f4b154066d --- /dev/null +++ b/fs/exfat/exfat.h @@ -0,0 +1,675 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat.h */ +/* PURPOSE : Header File for exFAT File Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_H +#define _EXFAT_H + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_blkdev.h" +#include "exfat_cache.h" +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat_cache.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#if EXFAT_CONFIG_KERNEL_DEBUG + /* For Debugging Purpose */ + /* IOCTL code 'f' used by + * - file systems typically #0~0x1F + * - embedded terminal devices #128~ + * - exts for debugging purpose #99 + * number 100 and 101 is availble now but has possible conflicts + */ +#define EXFAT_IOC_GET_DEBUGFLAGS _IOR('f', 100, long) +#define EXFAT_IOC_SET_DEBUGFLAGS _IOW('f', 101, long) + +#define EXFAT_DEBUGFLAGS_INVALID_UMOUNT 0x01 +#define EXFAT_DEBUGFLAGS_ERROR_RW 0x02 +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions */ + /*----------------------------------------------------------------------*/ + +#define MAX_VOLUME 4 // max num of volumes per device + +#define DENTRY_SIZE 32 // dir entry size +#define DENTRY_SIZE_BITS 5 + + /* PBR entries */ +#define PBR_SIGNATURE 0xAA55 +#define EXT_SIGNATURE 0xAA550000 +#define VOL_LABEL "NO NAME " // size should be 11 +#define OEM_NAME "MSWIN4.1" // size should be 8 +#define STR_FAT12 "FAT12 " // size should be 8 +#define STR_FAT16 "FAT16 " // size should be 8 +#define STR_FAT32 "FAT32 " // size should be 8 +#define STR_EXFAT "EXFAT " // size should be 8 +#define VOL_CLEAN 0x0000 +#define VOL_DIRTY 0x0002 + + /* max number of clusters */ +#define FAT12_THRESHOLD 4087 // 2^12 - 1 + 2 (clu 0 & 1) +#define FAT16_THRESHOLD 65527 // 2^16 - 1 + 2 +#define FAT32_THRESHOLD 268435457 // 2^28 - 1 + 2 +#define EXFAT_THRESHOLD 268435457 // 2^28 - 1 + 2 + + /* file types */ +#define TYPE_UNUSED 0x0000 +#define TYPE_DELETED 0x0001 +#define TYPE_INVALID 0x0002 +#define TYPE_CRITICAL_PRI 0x0100 +#define TYPE_BITMAP 0x0101 +#define TYPE_UPCASE 0x0102 +#define TYPE_VOLUME 0x0103 +#define TYPE_DIR 0x0104 +#define TYPE_FILE 0x011F +#define TYPE_SYMLINK 0x015F +#define TYPE_CRITICAL_SEC 0x0200 +#define TYPE_STREAM 0x0201 +#define TYPE_EXTEND 0x0202 +#define TYPE_ACL 0x0203 +#define TYPE_BENIGN_PRI 0x0400 +#define TYPE_GUID 0x0401 +#define TYPE_PADDING 0x0402 +#define TYPE_ACLTAB 0x0403 +#define TYPE_BENIGN_SEC 0x0800 +#define TYPE_ALL 0x0FFF + + /* time modes */ +#define TM_CREATE 0 +#define TM_MODIFY 1 +#define TM_ACCESS 2 + + /* checksum types */ +#define CS_DIR_ENTRY 0 +#define CS_PBR_SECTOR 1 +#define CS_DEFAULT 2 + +#define CLUSTER_16(x) ((UINT16)(x)) +#define CLUSTER_32(x) ((UINT32)(x)) + +#define START_SECTOR(x) \ + ( (((x)-2) << p_fs->sectors_per_clu_bits) + p_fs->data_start_sector ) + +#define IS_LAST_SECTOR_IN_CLUSTER(sec) \ + ( (((sec) - p_fs->data_start_sector + 1) & ((1 << p_fs->sectors_per_clu_bits) -1)) == 0) + +#define GET_CLUSTER_FROM_SECTOR(sec) \ + ((((sec) - p_fs->data_start_sector) >> p_fs->sectors_per_clu_bits) +2) + +#define GET16(p_src) \ + ( ((UINT16)(p_src)[0]) | (((UINT16)(p_src)[1]) << 8) ) +#define GET32(p_src) \ + ( ((UINT32)(p_src)[0]) | (((UINT32)(p_src)[1]) << 8) | \ + (((UINT32)(p_src)[2]) << 16) | (((UINT32)(p_src)[3]) << 24) ) +#define GET64(p_src) \ + ( ((UINT64)(p_src)[0]) | (((UINT64)(p_src)[1]) << 8) | \ + (((UINT64)(p_src)[2]) << 16) | (((UINT64)(p_src)[3]) << 24) | \ + (((UINT64)(p_src)[4]) << 32) | (((UINT64)(p_src)[5]) << 40) | \ + (((UINT64)(p_src)[6]) << 48) | (((UINT64)(p_src)[7]) << 56) ) + + +#define SET16(p_dst,src) \ + do { \ + (p_dst)[0]=(UINT8)(src); \ + (p_dst)[1]=(UINT8)(((UINT16)(src)) >> 8); \ + } while (0) +#define SET32(p_dst,src) \ + do { \ + (p_dst)[0]=(UINT8)(src); \ + (p_dst)[1]=(UINT8)(((UINT32)(src)) >> 8); \ + (p_dst)[2]=(UINT8)(((UINT32)(src)) >> 16); \ + (p_dst)[3]=(UINT8)(((UINT32)(src)) >> 24); \ + } while (0) +#define SET64(p_dst,src) \ + do { \ + (p_dst)[0]=(UINT8)(src); \ + (p_dst)[1]=(UINT8)(((UINT64)(src)) >> 8); \ + (p_dst)[2]=(UINT8)(((UINT64)(src)) >> 16); \ + (p_dst)[3]=(UINT8)(((UINT64)(src)) >> 24); \ + (p_dst)[4]=(UINT8)(((UINT64)(src)) >> 32); \ + (p_dst)[5]=(UINT8)(((UINT64)(src)) >> 40); \ + (p_dst)[6]=(UINT8)(((UINT64)(src)) >> 48); \ + (p_dst)[7]=(UINT8)(((UINT64)(src)) >> 56); \ + } while (0) + +#ifdef __LITTLE_ENDIAN +#define GET16_A(p_src) (*((UINT16 *)(p_src))) +#define GET32_A(p_src) (*((UINT32 *)(p_src))) +#define GET64_A(p_src) (*((UINT64 *)(p_src))) +#define SET16_A(p_dst,src) *((UINT16 *)(p_dst)) = (UINT16)(src) +#define SET32_A(p_dst,src) *((UINT32 *)(p_dst)) = (UINT32)(src) +#define SET64_A(p_dst,src) *((UINT64 *)(p_dst)) = (UINT64)(src) +#else +#define GET16_A(p_src) GET16(p_src) +#define GET32_A(p_src) GET32(p_src) +#define GET64_A(p_src) GET64(p_src) +#define SET16_A(p_dst,src) SET16(p_dst, src) +#define SET32_A(p_dst,src) SET32(p_dst, src) +#define SET64_A(p_dst,src) SET64(p_dst, src) +#endif + + /* Upcase tabel mecro */ +#define HIGH_INDEX_BIT (8) +#define HIGH_INDEX_MASK (0xFF00) +#define LOW_INDEX_BIT (16-HIGH_INDEX_BIT) +#define UTBL_ROW_COUNT (1<> LOW_INDEX_BIT; + } + static inline UINT16 get_row_index(UINT16 i) + { + return i & ~HIGH_INDEX_MASK; + } + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + /* MS_DOS FAT partition boot record (512 bytes) */ + typedef struct { + UINT8 jmp_boot[3]; + UINT8 oem_name[8]; + UINT8 bpb[109]; + UINT8 boot_code[390]; + UINT8 signature[2]; + } PBR_SECTOR_T; + + /* MS-DOS FAT12/16 BIOS parameter block (51 bytes) */ + typedef struct { + UINT8 sector_size[2]; + UINT8 sectors_per_clu; + UINT8 num_reserved[2]; + UINT8 num_fats; + UINT8 num_root_entries[2]; + UINT8 num_sectors[2]; + UINT8 media_type; + UINT8 num_fat_sectors[2]; + UINT8 sectors_in_track[2]; + UINT8 num_heads[2]; + UINT8 num_hid_sectors[4]; + UINT8 num_huge_sectors[4]; + + UINT8 phy_drv_no; + UINT8 reserved; + UINT8 ext_signature; + UINT8 vol_serial[4]; + UINT8 vol_label[11]; + UINT8 vol_type[8]; + } BPB16_T; + + /* MS-DOS FAT32 BIOS parameter block (79 bytes) */ + typedef struct { + UINT8 sector_size[2]; + UINT8 sectors_per_clu; + UINT8 num_reserved[2]; + UINT8 num_fats; + UINT8 num_root_entries[2]; + UINT8 num_sectors[2]; + UINT8 media_type; + UINT8 num_fat_sectors[2]; + UINT8 sectors_in_track[2]; + UINT8 num_heads[2]; + UINT8 num_hid_sectors[4]; + UINT8 num_huge_sectors[4]; + UINT8 num_fat32_sectors[4]; + UINT8 ext_flags[2]; + UINT8 fs_version[2]; + UINT8 root_cluster[4]; + UINT8 fsinfo_sector[2]; + UINT8 backup_sector[2]; + UINT8 reserved[12]; + + UINT8 phy_drv_no; + UINT8 ext_reserved; + UINT8 ext_signature; + UINT8 vol_serial[4]; + UINT8 vol_label[11]; + UINT8 vol_type[8]; + } BPB32_T; + + /* MS-DOS EXFAT BIOS parameter block (109 bytes) */ + typedef struct { + UINT8 reserved1[53]; + UINT8 vol_offset[8]; + UINT8 vol_length[8]; + UINT8 fat_offset[4]; + UINT8 fat_length[4]; + UINT8 clu_offset[4]; + UINT8 clu_count[4]; + UINT8 root_cluster[4]; + UINT8 vol_serial[4]; + UINT8 fs_version[2]; + UINT8 vol_flags[2]; + UINT8 sector_size_bits; + UINT8 sectors_per_clu_bits; + UINT8 num_fats; + UINT8 phy_drv_no; + UINT8 perc_in_use; + UINT8 reserved2[7]; + } BPBEX_T; + + /* MS-DOS FAT file system information sector (512 bytes) */ + typedef struct { + UINT8 signature1[4]; // aligned + UINT8 reserved1[480]; + UINT8 signature2[4]; // aligned + UINT8 free_cluster[4]; // aligned + UINT8 next_cluster[4]; // aligned + UINT8 reserved2[14]; + UINT8 signature3[2]; + } FSI_SECTOR_T; + + /* MS-DOS FAT directory entry (32 bytes) */ + typedef struct { + UINT8 dummy[32]; + } DENTRY_T; + + typedef struct { + UINT8 name[DOS_NAME_LENGTH]; + UINT8 attr; + UINT8 lcase; + UINT8 create_time_ms; + UINT8 create_time[2]; // aligned + UINT8 create_date[2]; // aligned + UINT8 access_date[2]; // aligned + UINT8 start_clu_hi[2]; // aligned + UINT8 modify_time[2]; // aligned + UINT8 modify_date[2]; // aligned + UINT8 start_clu_lo[2]; // aligned + UINT8 size[4]; // aligned + } DOS_DENTRY_T; + + /* MS-DOS FAT extended directory entry (32 bytes) */ + typedef struct { + UINT8 order; + UINT8 unicode_0_4[10]; + UINT8 attr; + UINT8 sysid; + UINT8 checksum; + UINT8 unicode_5_10[12]; // aligned + UINT8 start_clu[2]; // aligned + UINT8 unicode_11_12[4]; // aligned + } EXT_DENTRY_T; + + /* MS-DOS EXFAT file directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 num_ext; + UINT8 checksum[2]; // aligned + UINT8 attr[2]; // aligned + UINT8 reserved1[2]; + UINT8 create_time[2]; // aligned + UINT8 create_date[2]; // aligned + UINT8 modify_time[2]; // aligned + UINT8 modify_date[2]; // aligned + UINT8 access_time[2]; // aligned + UINT8 access_date[2]; // aligned + UINT8 create_time_ms; + UINT8 modify_time_ms; + UINT8 access_time_ms; + UINT8 reserved2[9]; + } FILE_DENTRY_T; + + /* MS-DOS EXFAT stream extension directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 flags; + UINT8 reserved1; + UINT8 name_len; + UINT8 name_hash[2]; // aligned + UINT8 reserved2[2]; + UINT8 valid_size[8]; // aligned + UINT8 reserved3[4]; // aligned + UINT8 start_clu[4]; // aligned + UINT8 size[8]; // aligned + } STRM_DENTRY_T; + + /* MS-DOS EXFAT file name directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 flags; + UINT8 unicode_0_14[30]; // aligned + } NAME_DENTRY_T; + + /* MS-DOS EXFAT allocation bitmap directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 flags; + UINT8 reserved[18]; + UINT8 start_clu[4]; // aligned + UINT8 size[8]; // aligned + } BMAP_DENTRY_T; + + /* MS-DOS EXFAT up-case table directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 reserved1[3]; + UINT8 checksum[4]; // aligned + UINT8 reserved2[12]; + UINT8 start_clu[4]; // aligned + UINT8 size[8]; // aligned + } CASE_DENTRY_T; + + /* MS-DOS EXFAT volume label directory entry (32 bytes) */ + typedef struct { + UINT8 type; + UINT8 label_len; + UINT8 unicode_0_10[22]; // aligned + UINT8 reserved[8]; + } VOLM_DENTRY_T; + + /* unused entry hint information */ + typedef struct { + UINT32 dir; + INT32 entry; + CHAIN_T clu; + } UENTRY_T; + + /* file system volume information structure */ + typedef struct __FS_STRUCT_T { + UINT32 mounted; + struct super_block *sb; + struct semaphore v_sem; + } FS_STRUCT_T; + + typedef struct { + INT32 (*alloc_cluster)(struct super_block *sb, INT32 num_alloc, CHAIN_T *p_chain); + void (*free_cluster)(struct super_block *sb, CHAIN_T *p_chain, INT32 do_relse); + INT32 (*count_used_clusters)(struct super_block *sb); + + INT32 (*init_dir_entry)(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, + UINT32 start_clu, UINT64 size); + INT32 (*init_ext_entry)(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 num_entries, + UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname); + INT32 (*find_dir_entry)(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 num_entries, DOS_NAME_T *p_dosname, UINT32 type); + void (*delete_dir_entry)(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 offset, INT32 num_entries); + void (*get_uni_name_from_ext_entry)(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT16 *uniname); + INT32 (*count_ext_entries)(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, DENTRY_T *p_entry); + INT32 (*calc_num_entries)(UNI_NAME_T *p_uniname); + + UINT32 (*get_entry_type)(DENTRY_T *p_entry); + void (*set_entry_type)(DENTRY_T *p_entry, UINT32 type); + UINT32 (*get_entry_attr)(DENTRY_T *p_entry); + void (*set_entry_attr)(DENTRY_T *p_entry, UINT32 attr); + UINT8 (*get_entry_flag)(DENTRY_T *p_entry); + void (*set_entry_flag)(DENTRY_T *p_entry, UINT8 flag); + UINT32 (*get_entry_clu0)(DENTRY_T *p_entry); + void (*set_entry_clu0)(DENTRY_T *p_entry, UINT32 clu0); + UINT64 (*get_entry_size)(DENTRY_T *p_entry); + void (*set_entry_size)(DENTRY_T *p_entry, UINT64 size); + void (*get_entry_time)(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + void (*set_entry_time)(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + } FS_FUNC_T; + + typedef struct __FS_INFO_T { + UINT32 drv; // drive ID + UINT32 vol_type; // volume FAT type + UINT32 vol_id; // volume serial number + + UINT32 num_sectors; // num of sectors in volume + UINT32 num_clusters; // num of clusters in volume + UINT32 cluster_size; // cluster size in bytes + UINT32 cluster_size_bits; + UINT32 sectors_per_clu; // cluster size in sectors + UINT32 sectors_per_clu_bits; + + UINT32 PBR_sector; // PBR sector + UINT32 FAT1_start_sector; // FAT1 start sector + UINT32 FAT2_start_sector; // FAT2 start sector + UINT32 root_start_sector; // root dir start sector + UINT32 data_start_sector; // data area start sector + UINT32 num_FAT_sectors; // num of FAT sectors + + UINT32 root_dir; // root dir cluster + UINT32 dentries_in_root; // num of dentries in root dir + UINT32 dentries_per_clu; // num of dentries per cluster + + UINT32 vol_flag; // volume dirty flag + struct buffer_head *pbr_bh; // PBR sector + + UINT32 map_clu; // allocation bitmap start cluster + UINT32 map_sectors; // num of allocation bitmap sectors + struct buffer_head **vol_amap; // allocation bitmap + + UINT16 **vol_utbl; // upcase table + + UINT32 clu_srch_ptr; // cluster search pointer + UINT32 used_clusters; // number of used clusters + UENTRY_T hint_uentry; // unused entry hint information + + UINT32 dev_ejected; // block device operation error flag + + FS_FUNC_T *fs_func; + + /* FAT cache */ + BUF_CACHE_T FAT_cache_array[FAT_CACHE_SIZE]; + BUF_CACHE_T FAT_cache_lru_list; + BUF_CACHE_T FAT_cache_hash_list[FAT_CACHE_HASH_SIZE]; + + /* buf cache */ + BUF_CACHE_T buf_cache_array[BUF_CACHE_SIZE]; + BUF_CACHE_T buf_cache_lru_list; + BUF_CACHE_T buf_cache_hash_list[BUF_CACHE_HASH_SIZE]; + } FS_INFO_T; + +#define ES_2_ENTRIES 2 +#define ES_3_ENTRIES 3 +#define ES_ALL_ENTRIES 0 + + typedef struct { + UINT32 sector; // sector number that contains file_entry + INT32 offset; // byte offset in the sector + INT32 alloc_flag; // flag in stream entry. 01 for cluster chain, 03 for contig. clusteres. + UINT32 num_entries; + + // __buf should be the last member + void *__buf; + } ENTRY_SET_CACHE_T; + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + + /* file system initialization & shutdown functions */ + INT32 ffsInit(void); + INT32 ffsShutdown(void); + + /* volume management functions */ + INT32 ffsMountVol(struct super_block *sb, INT32 drv); + INT32 ffsUmountVol(struct super_block *sb); + INT32 ffsCheckVol(struct super_block *sb); + INT32 ffsGetVolInfo(struct super_block *sb, VOL_INFO_T *info); + INT32 ffsSyncVol(struct super_block *sb, INT32 do_sync); + + /* file management functions */ + INT32 ffsLookupFile(struct inode *inode, UINT8 *path, FILE_ID_T *fid); + INT32 ffsCreateFile(struct inode *inode, UINT8 *path, UINT8 mode, FILE_ID_T *fid); + INT32 ffsReadFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *rcount); + INT32 ffsWriteFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *wcount); + INT32 ffsTruncateFile(struct inode *inode, UINT64 old_size, UINT64 new_size); + INT32 ffsMoveFile(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry); + INT32 ffsRemoveFile(struct inode *inode, FILE_ID_T *fid); + INT32 ffsSetAttr(struct inode *inode, UINT32 attr); + INT32 ffsGetStat(struct inode *inode, DIR_ENTRY_T *info); + INT32 ffsSetStat(struct inode *inode, DIR_ENTRY_T *info); + INT32 ffsMapCluster(struct inode *inode, INT32 clu_offset, UINT32 *clu); + + /* directory management functions */ + INT32 ffsCreateDir(struct inode *inode, UINT8 *path, FILE_ID_T *fid); + INT32 ffsReadDir(struct inode *inode, DIR_ENTRY_T *dir_ent); + INT32 ffsRemoveDir(struct inode *inode, FILE_ID_T *fid); + + /*----------------------------------------------------------------------*/ + /* External Function Declarations (NOT TO UPPER LAYER) */ + /*----------------------------------------------------------------------*/ + + /* fs management functions */ + INT32 fs_init(void); + INT32 fs_shutdown(void); + void fs_set_vol_flags(struct super_block *sb, UINT32 new_flag); + void fs_sync(struct super_block *sb, INT32 do_sync); + void fs_error(struct super_block *sb); + + /* cluster management functions */ + INT32 clear_cluster(struct super_block *sb, UINT32 clu); + INT32 fat_alloc_cluster(struct super_block *sb, INT32 num_alloc, CHAIN_T *p_chain); + INT32 exfat_alloc_cluster(struct super_block *sb, INT32 num_alloc, CHAIN_T *p_chain); + void fat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, INT32 do_relse); + void exfat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, INT32 do_relse); + UINT32 find_last_cluster(struct super_block *sb, CHAIN_T *p_chain); + INT32 count_num_clusters(struct super_block *sb, CHAIN_T *dir); + INT32 fat_count_used_clusters(struct super_block *sb); + INT32 exfat_count_used_clusters(struct super_block *sb); + void exfat_chain_cont_cluster(struct super_block *sb, UINT32 chain, INT32 len); + + /* allocation bitmap management functions */ + INT32 load_alloc_bitmap(struct super_block *sb); + void free_alloc_bitmap(struct super_block *sb); + INT32 set_alloc_bitmap(struct super_block *sb, UINT32 clu); + INT32 clr_alloc_bitmap(struct super_block *sb, UINT32 clu); + UINT32 test_alloc_bitmap(struct super_block *sb, UINT32 clu); + void sync_alloc_bitmap(struct super_block *sb); + + /* upcase table management functions */ + INT32 load_upcase_table(struct super_block *sb); + void free_upcase_table(struct super_block *sb); + + /* dir entry management functions */ + UINT32 fat_get_entry_type(DENTRY_T *p_entry); + UINT32 exfat_get_entry_type(DENTRY_T *p_entry); + void fat_set_entry_type(DENTRY_T *p_entry, UINT32 type); + void exfat_set_entry_type(DENTRY_T *p_entry, UINT32 type); + UINT32 fat_get_entry_attr(DENTRY_T *p_entry); + UINT32 exfat_get_entry_attr(DENTRY_T *p_entry); + void fat_set_entry_attr(DENTRY_T *p_entry, UINT32 attr); + void exfat_set_entry_attr(DENTRY_T *p_entry, UINT32 attr); + UINT8 fat_get_entry_flag(DENTRY_T *p_entry); + UINT8 exfat_get_entry_flag(DENTRY_T *p_entry); + void fat_set_entry_flag(DENTRY_T *p_entry, UINT8 flag); + void exfat_set_entry_flag(DENTRY_T *p_entry, UINT8 flag); + UINT32 fat_get_entry_clu0(DENTRY_T *p_entry); + UINT32 exfat_get_entry_clu0(DENTRY_T *p_entry); + void fat_set_entry_clu0(DENTRY_T *p_entry, UINT32 start_clu); + void exfat_set_entry_clu0(DENTRY_T *p_entry, UINT32 start_clu); + UINT64 fat_get_entry_size(DENTRY_T *p_entry); + UINT64 exfat_get_entry_size(DENTRY_T *p_entry); + void fat_set_entry_size(DENTRY_T *p_entry, UINT64 size); + void exfat_set_entry_size(DENTRY_T *p_entry, UINT64 size); + void fat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + void fat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + void exfat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode); + INT32 fat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, UINT32 start_clu, UINT64 size); + INT32 exfat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, UINT32 start_clu, UINT64 size); + INT32 fat_init_ext_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 num_entries, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname); + INT32 exfat_init_ext_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 num_entries, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname); + void init_dos_entry(DOS_DENTRY_T *ep, UINT32 type, UINT32 start_clu); + void init_ext_entry(EXT_DENTRY_T *ep, INT32 order, UINT8 chksum, UINT16 *uniname); + void init_file_entry(FILE_DENTRY_T *ep, UINT32 type); + void init_strm_entry(STRM_DENTRY_T *ep, UINT8 flags, UINT32 start_clu, UINT64 size); + void init_name_entry(NAME_DENTRY_T *ep, UINT16 *uniname); + void fat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 order, INT32 num_entries); + void exfat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 order, INT32 num_entries); + + INT32 find_location(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 *sector, INT32 *offset); + DENTRY_T *get_entry_with_sector(struct super_block *sb, UINT32 sector, INT32 offset); + DENTRY_T *get_entry_in_dir(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 *sector); + ENTRY_SET_CACHE_T *get_entry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, DENTRY_T **file_ep); + void release_entry_set (ENTRY_SET_CACHE_T *es); + INT32 write_whole_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es); + INT32 write_partial_entries_in_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es, DENTRY_T *ep, UINT32 count); + INT32 search_deleted_or_unused_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 num_entries); + INT32 find_empty_entry(struct inode *inode, CHAIN_T *p_dir, INT32 num_entries); + INT32 fat_find_dir_entry(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 num_entries, DOS_NAME_T *p_dosname, UINT32 type); + INT32 exfat_find_dir_entry(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 num_entries, DOS_NAME_T *p_dosname, UINT32 type); + INT32 fat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, DENTRY_T *p_entry); + INT32 exfat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, DENTRY_T *p_entry); + INT32 count_dos_name_entries(struct super_block *sb, CHAIN_T *p_dir, UINT32 type); + void update_dir_checksum(struct super_block *sb, CHAIN_T *p_dir, INT32 entry); + void update_dir_checksum_with_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es); + BOOL is_dir_empty(struct super_block *sb, CHAIN_T *p_dir); + + /* name conversion functions */ + INT32 get_num_entries_and_dos_name(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 *entries, DOS_NAME_T *p_dosname); + void get_uni_name_from_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, UNI_NAME_T *p_uniname, UINT8 mode); + void fat_get_uni_name_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT16 *uniname); + void exfat_get_uni_name_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT16 *uniname); + INT32 extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, UINT16 *uniname, INT32 order); + INT32 extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, UINT16 *uniname, INT32 order); + INT32 fat_generate_dos_name(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T *p_dosname); + void fat_attach_count_to_dos_name(UINT8 *dosname, INT32 count); + INT32 fat_calc_num_entries(UNI_NAME_T *p_uniname); + INT32 exfat_calc_num_entries(UNI_NAME_T *p_uniname); + UINT8 calc_checksum_1byte(void *data, INT32 len, UINT8 chksum); + UINT16 calc_checksum_2byte(void *data, INT32 len, UINT16 chksum, INT32 type); + UINT32 calc_checksum_4byte(void *data, INT32 len, UINT32 chksum, INT32 type); + + /* name resolution functions */ + INT32 resolve_path(struct inode *inode, UINT8 *path, CHAIN_T *p_dir, UNI_NAME_T *p_uniname); + INT32 resolve_name(UINT8 *name, UINT8 **arg); + + /* file operation functions */ + INT32 fat16_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr); + INT32 fat32_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr); + INT32 exfat_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr); + INT32 create_dir(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, FILE_ID_T *fid); + INT32 create_file(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, UINT8 mode, FILE_ID_T *fid); + void remove_file(struct inode *inode, CHAIN_T *p_dir, INT32 entry); + INT32 rename_file(struct inode *inode, CHAIN_T *p_dir, INT32 old_entry, UNI_NAME_T *p_uniname, FILE_ID_T *fid); + INT32 move_file(struct inode *inode, CHAIN_T *p_olddir, INT32 oldentry, CHAIN_T *p_newdir, UNI_NAME_T *p_uniname, FILE_ID_T *fid); + + /* sector read/write functions */ + INT32 sector_read(struct super_block *sb, UINT32 sec, struct buffer_head **bh, INT32 read); + INT32 sector_write(struct super_block *sb, UINT32 sec, struct buffer_head *bh, INT32 sync); + INT32 multi_sector_read(struct super_block *sb, UINT32 sec, struct buffer_head **bh, INT32 num_secs, INT32 read); + INT32 multi_sector_write(struct super_block *sb, UINT32 sec, struct buffer_head *bh, INT32 num_secs, INT32 sync); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_H */ + +/* end of exfat.h */ diff --git a/fs/exfat/exfat_api.c b/fs/exfat/exfat_api.c new file mode 100644 index 0000000000000..5423181260b75 --- /dev/null +++ b/fs/exfat/exfat_api.c @@ -0,0 +1,563 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_api.c */ +/* PURPOSE : exFAT API Glue Layer */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include +#include +#include + +#include "exfat_version.h" +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_part.h" +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat_super.h" +#include "exfat.h" + +/*----------------------------------------------------------------------*/ +/* Constant & Macro Definitions */ +/*----------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +extern FS_STRUCT_T fs_struct[]; + +extern struct semaphore z_sem; + +/*----------------------------------------------------------------------*/ +/* Local Variable Definitions */ +/*----------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------*/ +/* Local Function Declarations */ +/*----------------------------------------------------------------------*/ + +/*======================================================================*/ +/* Global Function Definitions */ +/* - All functions for global use have same return value format, */ +/* that is, FFS_SUCCESS on success and several FS error code on */ +/* various error condition. */ +/*======================================================================*/ + +/*----------------------------------------------------------------------*/ +/* exFAT Filesystem Init & Exit Functions */ +/*----------------------------------------------------------------------*/ + +INT32 FsInit(void) +{ + INT32 i; + + /* initialize all volumes as un-mounted */ + for (i = 0; i < MAX_DRIVE; i++) { + fs_struct[i].mounted = FALSE; + fs_struct[i].sb = NULL; + sm_init(&(fs_struct[i].v_sem)); + } + + return(ffsInit()); +} + +INT32 FsShutdown(void) +{ + INT32 i; + + /* unmount all volumes */ + for (i = 0; i < MAX_DRIVE; i++) { + if (!fs_struct[i].mounted) continue; + + ffsUmountVol(fs_struct[i].sb); + } + + return(ffsShutdown()); +} + +/*----------------------------------------------------------------------*/ +/* Volume Management Functions */ +/*----------------------------------------------------------------------*/ + +/* FsMountVol : mount the file system volume */ +INT32 FsMountVol(struct super_block *sb) +{ + INT32 err, drv; + + sm_P(&z_sem); + + for (drv = 0; drv < MAX_DRIVE; drv++) { + if (!fs_struct[drv].mounted) break; + } + + if (drv >= MAX_DRIVE) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[drv].v_sem)); + + err = buf_init(sb); + if (!err) { + err = ffsMountVol(sb, drv); + } + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[drv].v_sem)); + + if (!err) { + fs_struct[drv].mounted = TRUE; + fs_struct[drv].sb = sb; + } else { + buf_shutdown(sb); + } + + sm_V(&z_sem); + + return(err); +} /* end of FsMountVol */ + +/* FsUmountVol : unmount the file system volume */ +INT32 FsUmountVol(struct super_block *sb) +{ + INT32 err; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&z_sem); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsUmountVol(sb); + buf_shutdown(sb); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + fs_struct[p_fs->drv].mounted = FALSE; + fs_struct[p_fs->drv].sb = NULL; + + sm_V(&z_sem); + + return(err); +} /* end of FsUmountVol */ + +/* FsGetVolInfo : get the information of a file system volume */ +INT32 FsGetVolInfo(struct super_block *sb, VOL_INFO_T *info) +{ + INT32 err; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if (info == NULL) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsGetVolInfo(sb, info); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsGetVolInfo */ + +/* FsSyncVol : synchronize a file system volume */ +INT32 FsSyncVol(struct super_block *sb, INT32 do_sync) +{ + INT32 err; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsSyncVol(sb, do_sync); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsSyncVol */ + + +/*----------------------------------------------------------------------*/ +/* File Operation Functions */ +/*----------------------------------------------------------------------*/ + +/* FsCreateFile : create a file */ +INT32 FsLookupFile(struct inode *inode, UINT8 *path, FILE_ID_T *fid) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsLookupFile(inode, path, fid); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsLookupFile */ + +/* FsCreateFile : create a file */ +INT32 FsCreateFile(struct inode *inode, UINT8 *path, UINT8 mode, FILE_ID_T *fid) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsCreateFile(inode, path, mode, fid); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsCreateFile */ + +INT32 FsReadFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *rcount) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) return(FFS_INVALIDFID); + + /* check the validity of pointer parameters */ + if (buffer == NULL) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsReadFile(inode, fid, buffer, count, rcount); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsReadFile */ + +INT32 FsWriteFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *wcount) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) return(FFS_INVALIDFID); + + /* check the validity of pointer parameters */ + if (buffer == NULL) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsWriteFile(inode, fid, buffer, count, wcount); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsWriteFile */ + +/* FsTruncateFile : resize the file length */ +INT32 FsTruncateFile(struct inode *inode, UINT64 old_size, UINT64 new_size) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + PRINTK("FsTruncateFile entered (inode %p size %llu)\n", inode, new_size); + + err = ffsTruncateFile(inode, old_size, new_size); + + PRINTK("FsTruncateFile exitted (%d)\n", err); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsTruncateFile */ + +/* FsMoveFile : move(rename) a old file into a new file */ +INT32 FsMoveFile(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry) +{ + INT32 err; + struct super_block *sb = old_parent_inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) return(FFS_INVALIDFID); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsMoveFile(old_parent_inode, fid, new_parent_inode, new_dentry); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsMoveFile */ + +/* FsRemoveFile : remove a file */ +INT32 FsRemoveFile(struct inode *inode, FILE_ID_T *fid) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) return(FFS_INVALIDFID); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsRemoveFile(inode, fid); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsRemoveFile */ + +/* FsSetAttr : set the attribute of a given file */ +INT32 FsSetAttr(struct inode *inode, UINT32 attr) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsSetAttr(inode, attr); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsSetAttr */ + +/* FsReadStat : get the information of a given file */ +INT32 FsReadStat(struct inode *inode, DIR_ENTRY_T *info) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsGetStat(inode, info); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsReadStat */ + +/* FsWriteStat : set the information of a given file */ +INT32 FsWriteStat(struct inode *inode, DIR_ENTRY_T *info) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + PRINTK("FsWriteStat entered (inode %p info %p\n", inode, info); + + err = ffsSetStat(inode, info); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + PRINTK("FsWriteStat exited (%d)\n", err); + + return(err); +} /* end of FsWriteStat */ + +/* FsMapCluster : return the cluster number in the given cluster offset */ +INT32 FsMapCluster(struct inode *inode, INT32 clu_offset, UINT32 *clu) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if (clu == NULL) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsMapCluster(inode, clu_offset, clu); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsMapCluster */ + +/*----------------------------------------------------------------------*/ +/* Directory Operation Functions */ +/*----------------------------------------------------------------------*/ + +/* FsCreateDir : create(make) a directory */ +INT32 FsCreateDir(struct inode *inode, UINT8 *path, FILE_ID_T *fid) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if ((fid == NULL) || (path == NULL) || (*path == '\0')) + return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsCreateDir(inode, path, fid); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsCreateDir */ + +/* FsReadDir : read a directory entry from the opened directory */ +INT32 FsReadDir(struct inode *inode, DIR_ENTRY_T *dir_entry) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of pointer parameters */ + if (dir_entry == NULL) return(FFS_ERROR); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsReadDir(inode, dir_entry); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsReadDir */ + +/* FsRemoveDir : remove a directory */ +INT32 FsRemoveDir(struct inode *inode, FILE_ID_T *fid) +{ + INT32 err; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of the given file id */ + if (fid == NULL) return(FFS_INVALIDFID); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + err = ffsRemoveDir(inode, fid); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return(err); +} /* end of FsRemoveDir */ + +EXPORT_SYMBOL(FsMountVol); +EXPORT_SYMBOL(FsUmountVol); +EXPORT_SYMBOL(FsGetVolInfo); +EXPORT_SYMBOL(FsSyncVol); +EXPORT_SYMBOL(FsLookupFile); +EXPORT_SYMBOL(FsCreateFile); +EXPORT_SYMBOL(FsReadFile); +EXPORT_SYMBOL(FsWriteFile); +EXPORT_SYMBOL(FsTruncateFile); +EXPORT_SYMBOL(FsMoveFile); +EXPORT_SYMBOL(FsRemoveFile); +EXPORT_SYMBOL(FsSetAttr); +EXPORT_SYMBOL(FsReadStat); +EXPORT_SYMBOL(FsWriteStat); +EXPORT_SYMBOL(FsMapCluster); +EXPORT_SYMBOL(FsCreateDir); +EXPORT_SYMBOL(FsReadDir); +EXPORT_SYMBOL(FsRemoveDir); + +#if EXFAT_CONFIG_KERNEL_DEBUG +/* FsReleaseCache: Release FAT & buf cache */ +INT32 FsReleaseCache(struct super_block *sb) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* acquire the lock for file system critical section */ + sm_P(&(fs_struct[p_fs->drv].v_sem)); + + FAT_release_all(sb); + buf_release_all(sb); + + /* release the lock for file system critical section */ + sm_V(&(fs_struct[p_fs->drv].v_sem)); + + return 0; +} +/* FsReleaseCache */ + +EXPORT_SYMBOL(FsReleaseCache); +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + +/*======================================================================*/ +/* Local Function Definitions */ +/*======================================================================*/ + +/* end of exfat_api.c */ diff --git a/fs/exfat/exfat_api.h b/fs/exfat/exfat_api.h new file mode 100644 index 0000000000000..d89b8fed596fb --- /dev/null +++ b/fs/exfat/exfat_api.h @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_api.h */ +/* PURPOSE : Header File for exFAT API Glue Layer */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_API_H +#define _EXFAT_API_H + +#include "exfat_config.h" +#include "exfat_global.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions */ + /*----------------------------------------------------------------------*/ + +#define EXFAT_SUPER_MAGIC (0x2011BAB0L) +#define EXFAT_ROOT_INO 1 + + /* FAT types */ +#define FAT12 0x01 // FAT12 +#define FAT16 0x0E // Win95 FAT16 (LBA) +#define FAT32 0x0C // Win95 FAT32 (LBA) +#define EXFAT 0x07 // exFAT + + /* file name lengths */ +#define MAX_CHARSET_SIZE 3 // max size of multi-byte character +#define MAX_PATH_DEPTH 15 // max depth of path name +#define MAX_NAME_LENGTH 256 // max len of file name including NULL +#define MAX_PATH_LENGTH 260 // max len of path name including NULL +#define DOS_NAME_LENGTH 11 // DOS file name length excluding NULL +#define DOS_PATH_LENGTH 80 // DOS path name length excluding NULL + + /* file attributes */ +#define ATTR_NORMAL 0x0000 +#define ATTR_READONLY 0x0001 +#define ATTR_HIDDEN 0x0002 +#define ATTR_SYSTEM 0x0004 +#define ATTR_VOLUME 0x0008 +#define ATTR_SUBDIR 0x0010 +#define ATTR_ARCHIVE 0x0020 +#define ATTR_SYMLINK 0x0040 +#define ATTR_EXTEND 0x000F +#define ATTR_RWMASK 0x007E + + /* file creation modes */ +#define FM_REGULAR 0x00 +#define FM_SYMLINK 0x40 + + /* return values */ +#define FFS_SUCCESS 0 +#define FFS_MEDIAERR 1 +#define FFS_FORMATERR 2 +#define FFS_MOUNTED 3 +#define FFS_NOTMOUNTED 4 +#define FFS_ALIGNMENTERR 5 +#define FFS_SEMAPHOREERR 6 +#define FFS_INVALIDPATH 7 +#define FFS_INVALIDFID 8 +#define FFS_NOTFOUND 9 +#define FFS_FILEEXIST 10 +#define FFS_PERMISSIONERR 11 +#define FFS_NOTOPENED 12 +#define FFS_MAXOPENED 13 +#define FFS_FULL 14 +#define FFS_EOF 15 +#define FFS_DIRBUSY 16 +#define FFS_MEMORYERR 17 +#define FFS_NAMETOOLONG 18 +#define FFS_ERROR 19 // generic error code + + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + typedef struct { + UINT16 Year; + UINT16 Month; + UINT16 Day; + UINT16 Hour; + UINT16 Minute; + UINT16 Second; + UINT16 MilliSecond; + } DATE_TIME_T; + + typedef struct { + UINT32 Offset; // start sector number of the partition + UINT32 Size; // in sectors + } PART_INFO_T; + + typedef struct { + UINT32 SecSize; // sector size in bytes + UINT32 DevSize; // block device size in sectors + } DEV_INFO_T; + + typedef struct { + UINT32 FatType; + UINT32 ClusterSize; + UINT32 NumClusters; + UINT32 FreeClusters; + UINT32 UsedClusters; + } VOL_INFO_T; + + /* directory structure */ + typedef struct { + UINT32 dir; + INT32 size; + UINT8 flags; + } CHAIN_T; + + /* file id structure */ + typedef struct { + CHAIN_T dir; + UINT8 flags; + INT32 entry; + UINT32 type; + UINT32 attr; + UINT32 start_clu; + INT32 hint_last_off; + UINT32 hint_last_clu; + INT64 rwoffset; + UINT64 size; + } FILE_ID_T; + + typedef struct { + INT8 Name[MAX_NAME_LENGTH *MAX_CHARSET_SIZE]; + INT8 ShortName[DOS_NAME_LENGTH + 2]; // used only for FAT12/16/32, not used for exFAT + UINT32 Attr; + UINT64 Size; + UINT32 NumSubdirs; + DATE_TIME_T CreateTimestamp; + DATE_TIME_T ModifyTimestamp; + DATE_TIME_T AccessTimestamp; + } DIR_ENTRY_T; + + /*======================================================================*/ + /* */ + /* API FUNCTION DECLARATIONS */ + /* (CHANGE THIS PART IF REQUIRED) */ + /* */ + /*======================================================================*/ + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + + /* file system initialization & shutdown functions */ + INT32 FsInit(void); + INT32 FsShutdown(void); + + /* volume management functions */ + INT32 FsMountVol(struct super_block *sb); + INT32 FsUmountVol(struct super_block *sb); + INT32 FsGetVolInfo(struct super_block *sb, VOL_INFO_T *info); + INT32 FsSyncVol(struct super_block *sb, INT32 do_sync); + + /* file management functions */ + INT32 FsLookupFile(struct inode *inode, UINT8 *path, FILE_ID_T *fid); + INT32 FsCreateFile(struct inode *inode, UINT8 *path, UINT8 mode, FILE_ID_T *fid); + INT32 FsReadFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *rcount); + INT32 FsWriteFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *wcount); + INT32 FsTruncateFile(struct inode *inode, UINT64 old_size, UINT64 new_size); + INT32 FsMoveFile(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry); + INT32 FsRemoveFile(struct inode *inode, FILE_ID_T *fid); + INT32 FsSetAttr(struct inode *inode, UINT32 attr); + INT32 FsReadStat(struct inode *inode, DIR_ENTRY_T *info); + INT32 FsWriteStat(struct inode *inode, DIR_ENTRY_T *info); + INT32 FsMapCluster(struct inode *inode, INT32 clu_offset, UINT32 *clu); + + /* directory management functions */ + INT32 FsCreateDir(struct inode *inode, UINT8 *path, FILE_ID_T *fid); + INT32 FsReadDir(struct inode *inode, DIR_ENTRY_T *dir_entry); + INT32 FsRemoveDir(struct inode *inode, FILE_ID_T *fid); + + /* debug functions */ + INT32 FsReleaseCache(struct super_block *sb); + + /* partition management functions */ +//INT32 FsSetPartition(INT32 dev, INT32 num_vol, PART_INFO_T *vol_spec); +//INT32 FsGetPartition(INT32 dev, INT32 *num_vol, PART_INFO_T *vol_spec); +//INT32 FsGetDevInfo(INT32 dev, DEV_INFO_T *info); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_API_H */ + +/* end of exfat_api.h */ diff --git a/fs/exfat/exfat_blkdev.c b/fs/exfat/exfat_blkdev.c new file mode 100644 index 0000000000000..154b444b26f07 --- /dev/null +++ b/fs/exfat/exfat_blkdev.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_blkdev.c */ +/* PURPOSE : exFAT Block Device Driver Glue Layer */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_blkdev.h" +#include "exfat_data.h" +#include "exfat_api.h" +#include "exfat_super.h" + +/*----------------------------------------------------------------------*/ +/* Constant & Macro Definitions */ +/*----------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------*/ +/* Local Variable Definitions */ +/*----------------------------------------------------------------------*/ + +/*======================================================================*/ +/* Function Definitions */ +/*======================================================================*/ + +INT32 bdev_init(void) +{ + return(FFS_SUCCESS); +} + +INT32 bdev_shutdown(void) +{ + return(FFS_SUCCESS); +} + +INT32 bdev_open(struct super_block *sb) +{ + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bd->opened) return(FFS_SUCCESS); + + p_bd->sector_size = bdev_logical_block_size(sb->s_bdev); + p_bd->sector_size_bits = my_log2(p_bd->sector_size); + p_bd->sector_size_mask = p_bd->sector_size - 1; + p_bd->num_sectors = i_size_read(sb->s_bdev->bd_inode) >> p_bd->sector_size_bits; + + p_bd->opened = TRUE; + + return(FFS_SUCCESS); +} + +INT32 bdev_close(struct super_block *sb) +{ + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (!p_bd->opened) return(FFS_SUCCESS); + + p_bd->opened = FALSE; + return(FFS_SUCCESS); +} + +INT32 bdev_read(struct super_block *sb, UINT32 secno, struct buffer_head **bh, UINT32 num_secs, INT32 read) +{ + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); +#if EXFAT_CONFIG_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return (FFS_MEDIAERR); +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + + if (!p_bd->opened) return(FFS_MEDIAERR); + + if (*bh) __brelse(*bh); + + if (read) + *bh = __bread(sb->s_bdev, secno, num_secs << p_bd->sector_size_bits); + else + *bh = __getblk(sb->s_bdev, secno, num_secs << p_bd->sector_size_bits); + + if (*bh) return(FFS_SUCCESS); + + WARN(!p_fs->dev_ejected, + "[EXFAT] No bh, device seems wrong or to be ejected.\n"); + + return(FFS_MEDIAERR); +} + +INT32 bdev_write(struct super_block *sb, UINT32 secno, struct buffer_head *bh, UINT32 num_secs, INT32 sync) +{ + INT32 count; + struct buffer_head *bh2; + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); +#if EXFAT_CONFIG_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return (FFS_MEDIAERR); +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + + if (!p_bd->opened) return(FFS_MEDIAERR); + + if (secno == bh->b_blocknr) { + lock_buffer(bh); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + unlock_buffer(bh); + if (sync && (sync_dirty_buffer(bh) != 0)) + return (FFS_MEDIAERR); + } else { + count = num_secs << p_bd->sector_size_bits; + + bh2 = __getblk(sb->s_bdev, secno, count); + + if (bh2 == NULL) + goto no_bh; + + lock_buffer(bh2); + MEMCPY(bh2->b_data, bh->b_data, count); + set_buffer_uptodate(bh2); + mark_buffer_dirty(bh2); + unlock_buffer(bh2); + if (sync && (sync_dirty_buffer(bh2) != 0)) { + __brelse(bh2); + goto no_bh; + } + __brelse(bh2); + } + + return(FFS_SUCCESS); + +no_bh: + WARN(!p_fs->dev_ejected, + "[EXFAT] No bh, device seems wrong or to be ejected.\n"); + + return (FFS_MEDIAERR); +} + +INT32 bdev_sync(struct super_block *sb) +{ + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); +#if EXFAT_CONFIG_KERNEL_DEBUG + struct exfat_sb_info *sbi = EXFAT_SB(sb); + long flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_ERROR_RW) return (FFS_MEDIAERR); +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + + if (!p_bd->opened) return(FFS_MEDIAERR); + + return sync_blockdev(sb->s_bdev); +} + +/* end of exfat_blkdev.c */ diff --git a/fs/exfat/exfat_blkdev.h b/fs/exfat/exfat_blkdev.h new file mode 100644 index 0000000000000..c1f0c28cacb29 --- /dev/null +++ b/fs/exfat/exfat_blkdev.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_blkdev.h */ +/* PURPOSE : Header File for exFAT Block Device Driver Glue Layer */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_BLKDEV_H +#define _EXFAT_BLKDEV_H + +#include "exfat_config.h" +#include "exfat_global.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions (Non-Configurable) */ + /*----------------------------------------------------------------------*/ + + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + typedef struct __BD_INFO_T { + INT32 sector_size; // in bytes + INT32 sector_size_bits; + INT32 sector_size_mask; + INT32 num_sectors; // total number of sectors in this block device + BOOL opened; // opened or not + } BD_INFO_T; + + /*----------------------------------------------------------------------*/ + /* External Variable Declarations */ + /*----------------------------------------------------------------------*/ + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + + INT32 bdev_init(void); + INT32 bdev_shutdown(void); + INT32 bdev_open(struct super_block *sb); + INT32 bdev_close(struct super_block *sb); + INT32 bdev_read(struct super_block *sb, UINT32 secno, struct buffer_head **bh, UINT32 num_secs, INT32 read); + INT32 bdev_write(struct super_block *sb, UINT32 secno, struct buffer_head *bh, UINT32 num_secs, INT32 sync); + INT32 bdev_sync(struct super_block *sb); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_BLKDEV_H */ + +/* end of exfat_blkdev.h */ diff --git a/fs/exfat/exfat_cache.c b/fs/exfat/exfat_cache.c new file mode 100644 index 0000000000000..05c613653010c --- /dev/null +++ b/fs/exfat/exfat_cache.c @@ -0,0 +1,785 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_cache.c */ +/* PURPOSE : exFAT Cache Manager */ +/* (FAT Cache & Buffer Cache) */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Sung-Kwan Kim] : first writing */ +/* */ +/************************************************************************/ + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" + +#include "exfat_cache.h" +#include "exfat_super.h" +#include "exfat.h" + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +extern FS_STRUCT_T fs_struct[]; + +#define sm_P(s) +#define sm_V(s) + +static INT32 __FAT_read(struct super_block *sb, UINT32 loc, UINT32 *content); +static INT32 __FAT_write(struct super_block *sb, UINT32 loc, UINT32 content); + +static BUF_CACHE_T *FAT_cache_find(struct super_block *sb, UINT32 sec); +static BUF_CACHE_T *FAT_cache_get(struct super_block *sb, UINT32 sec); +static void FAT_cache_insert_hash(struct super_block *sb, BUF_CACHE_T *bp); +static void FAT_cache_remove_hash(BUF_CACHE_T *bp); + +static UINT8 *__buf_getblk(struct super_block *sb, UINT32 sec); + +static BUF_CACHE_T *buf_cache_find(struct super_block *sb, UINT32 sec); +static BUF_CACHE_T *buf_cache_get(struct super_block *sb, UINT32 sec); +static void buf_cache_insert_hash(struct super_block *sb, BUF_CACHE_T *bp); +static void buf_cache_remove_hash(BUF_CACHE_T *bp); + +static void push_to_mru(BUF_CACHE_T *bp, BUF_CACHE_T *list); +static void push_to_lru(BUF_CACHE_T *bp, BUF_CACHE_T *list); +static void move_to_mru(BUF_CACHE_T *bp, BUF_CACHE_T *list); +static void move_to_lru(BUF_CACHE_T *bp, BUF_CACHE_T *list); + +/*======================================================================*/ +/* Cache Initialization Functions */ +/*======================================================================*/ + +INT32 buf_init(struct super_block *sb) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + INT32 i; + + /* LRU list */ + p_fs->FAT_cache_lru_list.next = p_fs->FAT_cache_lru_list.prev = &p_fs->FAT_cache_lru_list; + + for (i = 0; i < FAT_CACHE_SIZE; i++) { + p_fs->FAT_cache_array[i].drv = -1; + p_fs->FAT_cache_array[i].sec = ~0; + p_fs->FAT_cache_array[i].flag = 0; + p_fs->FAT_cache_array[i].buf_bh = NULL; + p_fs->FAT_cache_array[i].prev = p_fs->FAT_cache_array[i].next = NULL; + push_to_mru(&(p_fs->FAT_cache_array[i]), &p_fs->FAT_cache_lru_list); + } + + p_fs->buf_cache_lru_list.next = p_fs->buf_cache_lru_list.prev = &p_fs->buf_cache_lru_list; + + for (i = 0; i < BUF_CACHE_SIZE; i++) { + p_fs->buf_cache_array[i].drv = -1; + p_fs->buf_cache_array[i].sec = ~0; + p_fs->buf_cache_array[i].flag = 0; + p_fs->buf_cache_array[i].buf_bh = NULL; + p_fs->buf_cache_array[i].prev = p_fs->buf_cache_array[i].next = NULL; + push_to_mru(&(p_fs->buf_cache_array[i]), &p_fs->buf_cache_lru_list); + } + + /* HASH list */ + for (i = 0; i < FAT_CACHE_HASH_SIZE; i++) { + p_fs->FAT_cache_hash_list[i].drv = -1; + p_fs->FAT_cache_hash_list[i].sec = ~0; + p_fs->FAT_cache_hash_list[i].hash_next = p_fs->FAT_cache_hash_list[i].hash_prev = &(p_fs->FAT_cache_hash_list[i]); + } + + for (i = 0; i < FAT_CACHE_SIZE; i++) { + FAT_cache_insert_hash(sb, &(p_fs->FAT_cache_array[i])); + } + + for (i = 0; i < BUF_CACHE_HASH_SIZE; i++) { + p_fs->buf_cache_hash_list[i].drv = -1; + p_fs->buf_cache_hash_list[i].sec = ~0; + p_fs->buf_cache_hash_list[i].hash_next = p_fs->buf_cache_hash_list[i].hash_prev = &(p_fs->buf_cache_hash_list[i]); + } + + for (i = 0; i < BUF_CACHE_SIZE; i++) { + buf_cache_insert_hash(sb, &(p_fs->buf_cache_array[i])); + } + + return(FFS_SUCCESS); +} /* end of buf_init */ + +INT32 buf_shutdown(struct super_block *sb) +{ + return(FFS_SUCCESS); +} /* end of buf_shutdown */ + +/*======================================================================*/ +/* FAT Read/Write Functions */ +/*======================================================================*/ + +/* in : sb, loc + * out: content + * returns 0 on success + * -1 on error + */ +INT32 FAT_read(struct super_block *sb, UINT32 loc, UINT32 *content) +{ + INT32 ret; + + sm_P(&f_sem); + + ret = __FAT_read(sb, loc, content); + + sm_V(&f_sem); + + return(ret); +} /* end of FAT_read */ + +INT32 FAT_write(struct super_block *sb, UINT32 loc, UINT32 content) +{ + INT32 ret; + + sm_P(&f_sem); + + ret = __FAT_write(sb, loc, content); + + sm_V(&f_sem); + + return(ret); +} /* end of FAT_write */ + +static INT32 __FAT_read(struct super_block *sb, UINT32 loc, UINT32 *content) +{ + INT32 off; + UINT32 sec, _content; + UINT8 *fat_sector, *fat_entry; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_fs->vol_type == FAT12) { + sec = p_fs->FAT1_start_sector + ((loc + (loc >> 1)) >> p_bd->sector_size_bits); + off = (loc + (loc >> 1)) & p_bd->sector_size_mask; + + if (off == (p_bd->sector_size-1)) { + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + _content = (UINT32) fat_sector[off]; + + fat_sector = FAT_getblk(sb, ++sec); + if (!fat_sector) + return -1; + + _content |= (UINT32) fat_sector[0] << 8; + } else { + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + _content = GET16(fat_entry); + } + + if (loc & 1) _content >>= 4; + + _content &= 0x00000FFF; + + if (_content >= CLUSTER_16(0x0FF8)) { + *content = CLUSTER_32(~0); + return 0; + } else { + *content = CLUSTER_32(_content); + return 0; + } + } else if (p_fs->vol_type == FAT16) { + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-1)); + off = (loc << 1) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + + _content = GET16_A(fat_entry); + + _content &= 0x0000FFFF; + + if (_content >= CLUSTER_16(0xFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } else { + *content = CLUSTER_32(_content); + return 0; + } + } else if (p_fs->vol_type == FAT32) { + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + + _content = GET32_A(fat_entry); + + _content &= 0x0FFFFFFF; + + if (_content >= CLUSTER_32(0x0FFFFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } else { + *content = CLUSTER_32(_content); + return 0; + } + } else { + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + _content = GET32_A(fat_entry); + + if (_content >= CLUSTER_32(0xFFFFFFF8)) { + *content = CLUSTER_32(~0); + return 0; + } else { + *content = CLUSTER_32(_content); + return 0; + } + } + + *content = CLUSTER_32(~0); + return 0; +} /* end of __FAT_read */ + +static INT32 __FAT_write(struct super_block *sb, UINT32 loc, UINT32 content) +{ + INT32 off; + UINT32 sec; + UINT8 *fat_sector, *fat_entry; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_fs->vol_type == FAT12) { + + content &= 0x00000FFF; + + sec = p_fs->FAT1_start_sector + ((loc + (loc >> 1)) >> p_bd->sector_size_bits); + off = (loc + (loc >> 1)) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + if (loc & 1) { /* odd */ + + content <<= 4; + + if (off == (p_bd->sector_size-1)) { + fat_sector[off] = (UINT8)(content | (fat_sector[off] & 0x0F)); + FAT_modify(sb, sec); + + fat_sector = FAT_getblk(sb, ++sec); + if (!fat_sector) + return -1; + + fat_sector[0] = (UINT8)(content >> 8); + } else { + fat_entry = &(fat_sector[off]); + content |= GET16(fat_entry) & 0x000F; + + SET16(fat_entry, content); + } + } else { /* even */ + fat_sector[off] = (UINT8)(content); + + if (off == (p_bd->sector_size-1)) { + fat_sector[off] = (UINT8)(content); + FAT_modify(sb, sec); + + fat_sector = FAT_getblk(sb, ++sec); + fat_sector[0] = (UINT8)((fat_sector[0] & 0xF0) | (content >> 8)); + } else { + fat_entry = &(fat_sector[off]); + content |= GET16(fat_entry) & 0xF000; + + SET16(fat_entry, content); + } + } + } + + else if (p_fs->vol_type == FAT16) { + + content &= 0x0000FFFF; + + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-1)); + off = (loc << 1) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + + SET16_A(fat_entry, content); + } + + else if (p_fs->vol_type == FAT32) { + + content &= 0x0FFFFFFF; + + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + + content |= GET32_A(fat_entry) & 0xF0000000; + + SET32_A(fat_entry, content); + } + + else { /* p_fs->vol_type == EXFAT */ + + sec = p_fs->FAT1_start_sector + (loc >> (p_bd->sector_size_bits-2)); + off = (loc << 2) & p_bd->sector_size_mask; + + fat_sector = FAT_getblk(sb, sec); + if (!fat_sector) + return -1; + + fat_entry = &(fat_sector[off]); + + SET32_A(fat_entry, content); + } + + FAT_modify(sb, sec); + return 0; +} /* end of __FAT_write */ + +UINT8 *FAT_getblk(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = FAT_cache_find(sb, sec); + if (bp != NULL) { + move_to_mru(bp, &p_fs->FAT_cache_lru_list); + return(bp->buf_bh->b_data); + } + + bp = FAT_cache_get(sb, sec); + + FAT_cache_remove_hash(bp); + + bp->drv = p_fs->drv; + bp->sec = sec; + bp->flag = 0; + + FAT_cache_insert_hash(sb, bp); + + if (sector_read(sb, sec, &(bp->buf_bh), 1) != FFS_SUCCESS) { + FAT_cache_remove_hash(bp); + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + bp->buf_bh = NULL; + + move_to_lru(bp, &p_fs->FAT_cache_lru_list); + return NULL; + } + + return(bp->buf_bh->b_data); +} /* end of FAT_getblk */ + +void FAT_modify(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + + bp = FAT_cache_find(sb, sec); + if (bp != NULL) { + sector_write(sb, sec, bp->buf_bh, 0); + } +} /* end of FAT_modify */ + +void FAT_release_all(struct super_block *sb) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&f_sem); + + bp = p_fs->FAT_cache_lru_list.next; + while (bp != &p_fs->FAT_cache_lru_list) { + if (bp->drv == p_fs->drv) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if(bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + } + bp = bp->next; + } + + sm_V(&f_sem); +} /* end of FAT_release_all */ + +void FAT_sync(struct super_block *sb) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&f_sem); + + bp = p_fs->FAT_cache_lru_list.next; + while (bp != &p_fs->FAT_cache_lru_list) { + if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { + sync_dirty_buffer(bp->buf_bh); + bp->flag &= ~(DIRTYBIT); + } + bp = bp->next; + } + + sm_V(&f_sem); +} /* end of FAT_sync */ + +static BUF_CACHE_T *FAT_cache_find(struct super_block *sb, UINT32 sec) +{ + INT32 off; + BUF_CACHE_T *bp, *hp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + off = (sec + (sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE - 1); + + hp = &(p_fs->FAT_cache_hash_list[off]); + for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { + if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { + + WARN(!bp->buf_bh, "[EXFAT] FAT_cache has no bh. " + "It will make system panic.\n"); + + touch_buffer(bp->buf_bh); + return(bp); + } + } + return(NULL); +} /* end of FAT_cache_find */ + +static BUF_CACHE_T *FAT_cache_get(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = p_fs->FAT_cache_lru_list.prev; + + + move_to_mru(bp, &p_fs->FAT_cache_lru_list); + return(bp); +} /* end of FAT_cache_get */ + +static void FAT_cache_insert_hash(struct super_block *sb, BUF_CACHE_T *bp) +{ + INT32 off; + BUF_CACHE_T *hp; + FS_INFO_T *p_fs; + + p_fs = &(EXFAT_SB(sb)->fs_info); + off = (bp->sec + (bp->sec >> p_fs->sectors_per_clu_bits)) & (FAT_CACHE_HASH_SIZE-1); + + hp = &(p_fs->FAT_cache_hash_list[off]); + bp->hash_next = hp->hash_next; + bp->hash_prev = hp; + hp->hash_next->hash_prev = bp; + hp->hash_next = bp; +} /* end of FAT_cache_insert_hash */ + +static void FAT_cache_remove_hash(BUF_CACHE_T *bp) +{ + (bp->hash_prev)->hash_next = bp->hash_next; + (bp->hash_next)->hash_prev = bp->hash_prev; +} /* end of FAT_cache_remove_hash */ + +/*======================================================================*/ +/* Buffer Read/Write Functions */ +/*======================================================================*/ + +UINT8 *buf_getblk(struct super_block *sb, UINT32 sec) +{ + UINT8 *buf; + + sm_P(&b_sem); + + buf = __buf_getblk(sb, sec); + + sm_V(&b_sem); + + return(buf); +} /* end of buf_getblk */ + +static UINT8 *__buf_getblk(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = buf_cache_find(sb, sec); + if (bp != NULL) { + move_to_mru(bp, &p_fs->buf_cache_lru_list); + return(bp->buf_bh->b_data); + } + + bp = buf_cache_get(sb, sec); + + buf_cache_remove_hash(bp); + + bp->drv = p_fs->drv; + bp->sec = sec; + bp->flag = 0; + + buf_cache_insert_hash(sb, bp); + + if (sector_read(sb, sec, &(bp->buf_bh), 1) != FFS_SUCCESS) { + buf_cache_remove_hash(bp); + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + bp->buf_bh = NULL; + + move_to_lru(bp, &p_fs->buf_cache_lru_list); + return NULL; + } + + return(bp->buf_bh->b_data); + +} /* end of __buf_getblk */ + +void buf_modify(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + + sm_P(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp != NULL)) { + sector_write(sb, sec, bp->buf_bh, 0); + } + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%u).\n", sec); + + sm_V(&b_sem); +} /* end of buf_modify */ + +void buf_lock(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + + sm_P(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp != NULL)) bp->flag |= LOCKBIT; + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%u).\n", sec); + + sm_V(&b_sem); +} /* end of buf_lock */ + +void buf_unlock(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + + sm_P(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp != NULL)) bp->flag &= ~(LOCKBIT); + + WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%u).\n", sec); + + sm_V(&b_sem); +} /* end of buf_unlock */ + +void buf_release(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&b_sem); + + bp = buf_cache_find(sb, sec); + if (likely(bp != NULL)) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if(bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + + move_to_lru(bp, &p_fs->buf_cache_lru_list); + } + + sm_V(&b_sem); +} /* end of buf_release */ + +void buf_release_all(struct super_block *sb) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&b_sem); + + bp = p_fs->buf_cache_lru_list.next; + while (bp != &p_fs->buf_cache_lru_list) { + if (bp->drv == p_fs->drv) { + bp->drv = -1; + bp->sec = ~0; + bp->flag = 0; + + if(bp->buf_bh) { + __brelse(bp->buf_bh); + bp->buf_bh = NULL; + } + } + bp = bp->next; + } + + sm_V(&b_sem); +} /* end of buf_release_all */ + +void buf_sync(struct super_block *sb) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + sm_P(&b_sem); + + bp = p_fs->buf_cache_lru_list.next; + while (bp != &p_fs->buf_cache_lru_list) { + if ((bp->drv == p_fs->drv) && (bp->flag & DIRTYBIT)) { + sync_dirty_buffer(bp->buf_bh); + bp->flag &= ~(DIRTYBIT); + } + bp = bp->next; + } + + sm_V(&b_sem); +} /* end of buf_sync */ + +static BUF_CACHE_T *buf_cache_find(struct super_block *sb, UINT32 sec) +{ + INT32 off; + BUF_CACHE_T *bp, *hp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + off = (sec + (sec >> p_fs->sectors_per_clu_bits)) & (BUF_CACHE_HASH_SIZE - 1); + + hp = &(p_fs->buf_cache_hash_list[off]); + for (bp = hp->hash_next; bp != hp; bp = bp->hash_next) { + if ((bp->drv == p_fs->drv) && (bp->sec == sec)) { + touch_buffer(bp->buf_bh); + return(bp); + } + } + return(NULL); +} /* end of buf_cache_find */ + +static BUF_CACHE_T *buf_cache_get(struct super_block *sb, UINT32 sec) +{ + BUF_CACHE_T *bp; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + bp = p_fs->buf_cache_lru_list.prev; + while (bp->flag & LOCKBIT) bp = bp->prev; + + + move_to_mru(bp, &p_fs->buf_cache_lru_list); + return(bp); +} /* end of buf_cache_get */ + +static void buf_cache_insert_hash(struct super_block *sb, BUF_CACHE_T *bp) +{ + INT32 off; + BUF_CACHE_T *hp; + FS_INFO_T *p_fs; + + p_fs = &(EXFAT_SB(sb)->fs_info); + off = (bp->sec + (bp->sec >> p_fs->sectors_per_clu_bits)) & (BUF_CACHE_HASH_SIZE-1); + + hp = &(p_fs->buf_cache_hash_list[off]); + bp->hash_next = hp->hash_next; + bp->hash_prev = hp; + hp->hash_next->hash_prev = bp; + hp->hash_next = bp; +} /* end of buf_cache_insert_hash */ + +static void buf_cache_remove_hash(BUF_CACHE_T *bp) +{ + (bp->hash_prev)->hash_next = bp->hash_next; + (bp->hash_next)->hash_prev = bp->hash_prev; +} /* end of buf_cache_remove_hash */ + +/*======================================================================*/ +/* Local Function Definitions */ +/*======================================================================*/ + +static void push_to_mru(BUF_CACHE_T *bp, BUF_CACHE_T *list) +{ + bp->next = list->next; + bp->prev = list; + list->next->prev = bp; + list->next = bp; +} /* end of buf_cache_push_to_mru */ + +static void push_to_lru(BUF_CACHE_T *bp, BUF_CACHE_T *list) +{ + bp->prev = list->prev; + bp->next = list; + list->prev->next = bp; + list->prev = bp; +} /* end of buf_cache_push_to_lru */ + +static void move_to_mru(BUF_CACHE_T *bp, BUF_CACHE_T *list) +{ + bp->prev->next = bp->next; + bp->next->prev = bp->prev; + push_to_mru(bp, list); +} /* end of buf_cache_move_to_mru */ + +static void move_to_lru(BUF_CACHE_T *bp, BUF_CACHE_T *list) +{ + bp->prev->next = bp->next; + bp->next->prev = bp->prev; + push_to_lru(bp, list); +} /* end of buf_cache_move_to_lru */ + +/* end of exfat_cache.c */ diff --git a/fs/exfat/exfat_cache.h b/fs/exfat/exfat_cache.h new file mode 100644 index 0000000000000..056636b4e4837 --- /dev/null +++ b/fs/exfat/exfat_cache.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_cache.h */ +/* PURPOSE : Header File for exFAT Cache Manager */ +/* (FAT Cache & Buffer Cache) */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Sung-Kwan Kim] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_CACHE_H +#define _EXFAT_CACHE_H + +#include "exfat_config.h" +#include "exfat_global.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions */ + /*----------------------------------------------------------------------*/ + +#define LOCKBIT 0x01 +#define DIRTYBIT 0x02 + + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + typedef struct __BUF_CACHE_T { + struct __BUF_CACHE_T *next; + struct __BUF_CACHE_T *prev; + struct __BUF_CACHE_T *hash_next; + struct __BUF_CACHE_T *hash_prev; + INT32 drv; + UINT32 sec; + UINT32 flag; + struct buffer_head *buf_bh; + } BUF_CACHE_T; + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + + INT32 buf_init(struct super_block *sb); + INT32 buf_shutdown(struct super_block *sb); + INT32 FAT_read(struct super_block *sb, UINT32 loc, UINT32 *content); + INT32 FAT_write(struct super_block *sb, UINT32 loc, UINT32 content); + UINT8 *FAT_getblk(struct super_block *sb, UINT32 sec); + void FAT_modify(struct super_block *sb, UINT32 sec); + void FAT_release_all(struct super_block *sb); + void FAT_sync(struct super_block *sb); + UINT8 *buf_getblk(struct super_block *sb, UINT32 sec); + void buf_modify(struct super_block *sb, UINT32 sec); + void buf_lock(struct super_block *sb, UINT32 sec); + void buf_unlock(struct super_block *sb, UINT32 sec); + void buf_release(struct super_block *sb, UINT32 sec); + void buf_release_all(struct super_block *sb); + void buf_sync(struct super_block *sb); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_CACHE_H */ + +/* end of exfat_cache.h */ diff --git a/fs/exfat/exfat_config.h b/fs/exfat/exfat_config.h new file mode 100644 index 0000000000000..d16c882f88ca6 --- /dev/null +++ b/fs/exfat/exfat_config.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_config.h */ +/* PURPOSE : Header File for exFAT Configuable Policies */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_CONFIG_H +#define _EXFAT_CONFIG_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/*======================================================================*/ +/* */ +/* FFS CONFIGURATIONS */ +/* (CHANGE THIS PART IF REQUIRED) */ +/* */ +/*======================================================================*/ + +/*----------------------------------------------------------------------*/ +/* Target OS Platform */ +/*----------------------------------------------------------------------*/ + +#define OS_NONOS 1 +#define OS_LINUX 2 + +#define FFS_CONFIG_OS OS_LINUX + +/*----------------------------------------------------------------------*/ +/* Set this definition to 1 to support APIs with pointer parameters */ +/* to 32-bit variables (e.g. read, write, seek, get_filesize) */ +/*----------------------------------------------------------------------*/ +#define FFS_CONFIG_LEGACY_32BIT_API 0 + +/*----------------------------------------------------------------------*/ +/* Set this definition to 1 to support APIs with pointer parameters */ +/* to 32-bit variables (e.g. read, write, seek, get_filesize) */ +/*----------------------------------------------------------------------*/ +#define FFS_CONFIG_LEGACY_32BIT_API 0 + +/*----------------------------------------------------------------------*/ +/* Set appropriate definitions to 1's to support the languages */ +/*----------------------------------------------------------------------*/ +#define FFS_CONFIG_SUPPORT_CP1250 1 // Central Europe +#define FFS_CONFIG_SUPPORT_CP1251 1 // Cyrillic +#define FFS_CONFIG_SUPPORT_CP1252 1 // Latin I +#define FFS_CONFIG_SUPPORT_CP1253 1 // Greek +#define FFS_CONFIG_SUPPORT_CP1254 1 // Turkish +#define FFS_CONFIG_SUPPORT_CP1255 1 // Hebrew +#define FFS_CONFIG_SUPPORT_CP1256 1 // Arabic +#define FFS_CONFIG_SUPPORT_CP1257 1 // Baltic +#define FFS_CONFIG_SUPPORT_CP1258 1 // Vietnamese +#define FFS_CONFIG_SUPPORT_CP874 1 // Thai +#define FFS_CONFIG_SUPPORT_CP932 1 // Japanese +#define FFS_CONFIG_SUPPORT_CP936 1 // Simplified Chinese +#define FFS_CONFIG_SUPPORT_CP949 1 // Korean +#define FFS_CONFIG_SUPPORT_CP950 1 // Traditional Chinese +#define FFS_CONFIG_SUPPORT_UTF8 1 // UTF8 encoding + +/*----------------------------------------------------------------------*/ +/* Feature Config */ +/*----------------------------------------------------------------------*/ +#define EXFAT_CONFIG_DISCARD 1 // mount option -o discard support +#define EXFAT_CONFIG_KERNEL_DEBUG 1 // kernel debug features via ioctl +#define EXFAT_CONFIG_DEBUG_MSG 0 // debugging message on/off + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_CONFIG_H */ + +/* end of exfat_config.h */ diff --git a/fs/exfat/exfat_core.c b/fs/exfat/exfat_core.c new file mode 100644 index 0000000000000..2ab28dc32da21 --- /dev/null +++ b/fs/exfat/exfat_core.c @@ -0,0 +1,5187 @@ +/* Some of the source code in this file came from "linux/fs/fat/misc.c". */ +/* + * linux/fs/fat/misc.c + * + * Written 1992,1993 by Werner Almesberger + * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980 + * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) + */ + +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat.c */ +/* PURPOSE : exFAT File Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_blkdev.h" +#include "exfat_cache.h" +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat_super.h" +#include "exfat.h" + +#include + +/*----------------------------------------------------------------------*/ +/* Constant & Macro Definitions */ +/*----------------------------------------------------------------------*/ + +#define THERE_IS_MBR 0 /* if there is no MBR (e.g. memory card), +set this macro to 0 */ + +#if (THERE_IS_MBR == 1) +#include "exfat_part.h" +#endif + +#define DELAYED_SYNC 0 + +#define ELAPSED_TIME 0 + +#if (ELAPSED_TIME == 1) +#include + +static UINT32 __t1, __t2; +static UINT32 get_current_msec(void) +{ + struct timeval tm; + do_gettimeofday(&tm); + return (UINT32)(tm.tv_sec*1000000 + tm.tv_usec); +} +#define TIME_START() do {__t1 = get_current_msec(); } while (0) +#define TIME_END() do {__t2 = get_current_msec(); } while (0) +#define PRINT_TIME(n) do {printk("[EXFAT] Elapsed time %d = %d (usec)\n", n, (__t2 - __t1)); } while (0) +#else +#define TIME_START() +#define TIME_END() +#define PRINT_TIME(n) +#endif + +static void __set_sb_dirty(struct super_block *sb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + sb->s_dirt = 1; +#else + struct exfat_sb_info *sbi = EXFAT_SB(sb); + sbi->s_dirt = 1; +#endif +} + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +extern UINT8 uni_upcase[]; + +/*----------------------------------------------------------------------*/ +/* Local Variable Definitions */ +/*----------------------------------------------------------------------*/ + +static UINT8 name_buf[MAX_PATH_LENGTH *MAX_CHARSET_SIZE]; + +static INT8 *reserved_names[] = { + "AUX ", "CON ", "NUL ", "PRN ", + "COM1 ", "COM2 ", "COM3 ", "COM4 ", + "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ", + "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ", + "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ", + NULL +}; + +static UINT8 free_bit[] = { + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */ + 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 40 ~ 59 */ + 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 60 ~ 79 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, /* 80 ~ 99 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, /* 100 ~ 119 */ + 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 120 ~ 139 */ + 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, /* 140 ~ 159 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 160 ~ 179 */ + 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, /* 180 ~ 199 */ + 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, /* 200 ~ 219 */ + 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, /* 220 ~ 239 */ + 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 /* 240 ~ 254 */ +}; + +static UINT8 used_bit[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, /* 0 ~ 19 */ + 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, /* 20 ~ 39 */ + 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, /* 40 ~ 59 */ + 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, /* 60 ~ 79 */ + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, /* 80 ~ 99 */ + 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, /* 100 ~ 119 */ + 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, /* 120 ~ 139 */ + 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, /* 140 ~ 159 */ + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, /* 160 ~ 179 */ + 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, /* 180 ~ 199 */ + 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, /* 200 ~ 219 */ + 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, /* 220 ~ 239 */ + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 /* 240 ~ 255 */ +}; + +/*======================================================================*/ +/* Global Function Definitions */ +/*======================================================================*/ + +/* ffsInit : roll back to the initial state of the file system */ +INT32 ffsInit(void) +{ + INT32 ret; + + ret = bdev_init(); + if (ret) + return ret; + + ret = fs_init(); + if (ret) + return ret; + + return FFS_SUCCESS; +} /* end of ffsInit */ + +/* ffsShutdown : make free all memory-alloced global buffers */ +INT32 ffsShutdown(void) +{ + INT32 ret; + ret = fs_shutdown(); + if (ret) + return ret; + + ret = bdev_shutdown(); + if (ret) + return ret; + + return FFS_SUCCESS; +} /* end of ffsShutdown */ + +/* ffsMountVol : mount the file system volume */ +INT32 ffsMountVol(struct super_block *sb, INT32 drv) +{ + INT32 i, ret; +#if (THERE_IS_MBR == 1) + MBR_SECTOR_T *p_mbr; + PART_ENTRY_T *p_pte; +#endif + PBR_SECTOR_T *p_pbr; + struct buffer_head *tmp_bh = NULL; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + PRINTK("[EXFAT] trying to mount...\n"); + + p_fs->drv = drv; + p_fs->dev_ejected = FALSE; + + /* open the block device */ + if (bdev_open(sb)) + return FFS_MEDIAERR; + + if (p_bd->sector_size < sb->s_blocksize) + return FFS_MEDIAERR; + if (p_bd->sector_size > sb->s_blocksize) + sb_set_blocksize(sb, p_bd->sector_size); + + /* read Sector 0 */ + if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) + return FFS_MEDIAERR; + +#if (THERE_IS_MBR == 1) + if (buf[0] != 0xEB) { + /* MBR is read */ + p_mbr = (MBR_SECTOR_T *) tmp_bh->b_data; + + /* check the validity of MBR */ + if (GET16_A(p_mbr->signature) != MBR_SIGNATURE) { + brelse(tmp_bh); + bdev_close(sb); + return FFS_FORMATERR; + } + + p_pte = (PART_ENTRY_T *) p_mbr->partition + 0; + p_fs->PBR_sector = GET32(p_pte->start_sector); + p_fs->num_sectors = GET32(p_pte->num_sectors); + + if (p_fs->num_sectors == 0) { + brelse(tmp_bh); + bdev_close(sb); + return FFS_ERROR; + } + + /* read PBR */ + if (sector_read(sb, p_fs->PBR_sector, &tmp_bh, 1) != FFS_SUCCESS) { + bdev_close(sb); + return FFS_MEDIAERR; + } + } else { +#endif + /* PRB is read */ + p_fs->PBR_sector = 0; +#if (THERE_IS_MBR == 1) + } +#endif + + p_pbr = (PBR_SECTOR_T *) tmp_bh->b_data; + + /* check the validity of PBR */ + if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) { + brelse(tmp_bh); + bdev_close(sb); + return FFS_FORMATERR; + } + + /* fill fs_stuct */ + for (i = 0; i < 53; i++) + if (p_pbr->bpb[i]) + break; + + if (i < 53) { + if (GET16(p_pbr->bpb+11)) /* num_fat_sectors */ + ret = fat16_mount(sb, p_pbr); + else + ret = fat32_mount(sb, p_pbr); + } else { + ret = exfat_mount(sb, p_pbr); + } + + brelse(tmp_bh); + + if (ret) { + bdev_close(sb); + return ret; + } + + if (p_fs->vol_type == EXFAT) { + ret = load_alloc_bitmap(sb); + if (ret) { + bdev_close(sb); + return ret; + } + ret = load_upcase_table(sb); + if (ret) { + free_alloc_bitmap(sb); + bdev_close(sb); + return ret; + } + } + + if (p_fs->dev_ejected) { + if (p_fs->vol_type == EXFAT) { + free_upcase_table(sb); + free_alloc_bitmap(sb); + } + bdev_close(sb); + return FFS_MEDIAERR; + } + + PRINTK("[EXFAT] mounted successfully\n"); + return FFS_SUCCESS; +} /* end of ffsMountVol */ + +/* ffsUmountVol : umount the file system volume */ +INT32 ffsUmountVol(struct super_block *sb) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + PRINTK("[EXFAT] trying to unmount...\n"); + + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); + + if (p_fs->vol_type == EXFAT) { + free_upcase_table(sb); + free_alloc_bitmap(sb); + } + + FAT_release_all(sb); + buf_release_all(sb); + + /* close the block device */ + bdev_close(sb); + + if (p_fs->dev_ejected) { + PRINTK( "[EXFAT] unmounted with media errors. " + "device's already ejected.\n"); + return FFS_MEDIAERR; + } + + PRINTK("[EXFAT] unmounted successfully\n"); + return FFS_SUCCESS; +} /* end of ffsUmountVol */ + +/* ffsGetVolInfo : get the information of a file system volume */ +INT32 ffsGetVolInfo(struct super_block *sb, VOL_INFO_T *info) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_fs->used_clusters == (UINT32) ~0) + p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb); + + info->FatType = p_fs->vol_type; + info->ClusterSize = p_fs->cluster_size; + info->NumClusters = p_fs->num_clusters - 2; /* clu 0 & 1 */ + info->UsedClusters = p_fs->used_clusters; + info->FreeClusters = info->NumClusters - info->UsedClusters; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsGetVolInfo */ + +/* ffsSyncVol : synchronize all file system volumes */ +INT32 ffsSyncVol(struct super_block *sb, INT32 do_sync) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* synchronize the file system */ + fs_sync(sb, do_sync); + fs_set_vol_flags(sb, VOL_CLEAN); + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsSyncVol */ + +/*----------------------------------------------------------------------*/ +/* File Operation Functions */ +/*----------------------------------------------------------------------*/ + +/* ffsLookupFile : lookup a file */ +INT32 ffsLookupFile(struct inode *inode, UINT8 *path, FILE_ID_T *fid) +{ + INT32 ret, dentry, num_entries; + CHAIN_T dir; + UNI_NAME_T uni_name; + DOS_NAME_T dos_name; + DENTRY_T *ep, *ep2; + ENTRY_SET_CACHE_T *es=NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + PRINTK("ffsLookupFile entered\n"); + + /* check the validity of directory name in the given pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + return ret; + + ret = get_num_entries_and_dos_name(sb, &dir, &uni_name, &num_entries, &dos_name); + if (ret) + return ret; + + /* search the file name for directories */ + dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries, &dos_name, TYPE_ALL); + if (dentry < -1) + return FFS_NOTFOUND; + + fid->dir.dir = dir.dir; + fid->dir.size = dir.size; + fid->dir.flags = dir.flags; + fid->entry = dentry; + + if (dentry == -1) { + fid->type = TYPE_DIR; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + fid->attr = ATTR_SUBDIR; + fid->flags = 0x01; + fid->size = 0; + fid->start_clu = p_fs->root_dir; + } else { + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &dir, dentry, ES_2_ENTRIES, &ep); + if (!es) + return FFS_MEDIAERR; + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &dir, dentry, NULL); + if (!ep) + return FFS_MEDIAERR; + ep2 = ep; + } + + fid->type = p_fs->fs_func->get_entry_type(ep); + fid->rwoffset = 0; + fid->hint_last_off = -1; + fid->attr = p_fs->fs_func->get_entry_attr(ep); + + fid->size = p_fs->fs_func->get_entry_size(ep2); + if ((fid->type == TYPE_FILE) && (fid->size == 0)) { + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->start_clu = CLUSTER_32(~0); + } else { + fid->flags = p_fs->fs_func->get_entry_flag(ep2); + fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2); + } + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + } + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + PRINTK("ffsLookupFile exited successfully\n"); + + return FFS_SUCCESS; +} /* end of ffsLookupFile */ + +/* ffsCreateFile : create a file */ +INT32 ffsCreateFile(struct inode *inode, UINT8 *path, UINT8 mode, FILE_ID_T *fid) +{ + INT32 ret/*, dentry*/; + CHAIN_T dir; + UNI_NAME_T uni_name; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + /* check the validity of directory name in the given pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + return ret; + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* create a new file */ + ret = create_file(inode, &dir, &uni_name, mode, fid); + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return ret; +} /* end of ffsCreateFile */ + +/* ffsReadFile : read data from a opened file */ +INT32 ffsReadFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *rcount) +{ + INT32 offset, sec_offset, clu_offset; + UINT32 clu, LogSector; + UINT64 oneblkread, read_bytes; + struct buffer_head *tmp_bh = NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) + return FFS_PERMISSIONERR; + + if (fid->rwoffset > fid->size) + fid->rwoffset = fid->size; + + if (count > (fid->size - fid->rwoffset)) + count = fid->size - fid->rwoffset; + + if (count == 0) { + if (rcount != NULL) + *rcount = 0; + return FFS_EOF; + } + + read_bytes = 0; + + while (count > 0) { + clu_offset = (INT32)(fid->rwoffset >> p_fs->cluster_size_bits); + clu = fid->start_clu; + + if (fid->flags == 0x03) { + clu += clu_offset; + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu = fid->hint_last_clu; + } + + while (clu_offset > 0) { + /* clu = FAT_read(sb, clu); */ + if (FAT_read(sb, clu, &clu) == -1) + return FFS_MEDIAERR; + + clu_offset--; + } + } + + /* hint information */ + fid->hint_last_off = (INT32)(fid->rwoffset >> p_fs->cluster_size_bits); + fid->hint_last_clu = clu; + + offset = (INT32)(fid->rwoffset & (p_fs->cluster_size-1)); /* byte offset in cluster */ + sec_offset = offset >> p_bd->sector_size_bits; /* sector offset in cluster */ + offset &= p_bd->sector_size_mask; /* byte offset in sector */ + + LogSector = START_SECTOR(clu) + sec_offset; + + oneblkread = (UINT64)(p_bd->sector_size - offset); + if (oneblkread > count) + oneblkread = count; + + if ((offset == 0) && (oneblkread == p_bd->sector_size)) { + if (sector_read(sb, LogSector, &tmp_bh, 1) != FFS_SUCCESS) + goto err_out; + MEMCPY(((INT8 *) buffer)+read_bytes, ((INT8 *) tmp_bh->b_data), (INT32) oneblkread); + } else { + if (sector_read(sb, LogSector, &tmp_bh, 1) != FFS_SUCCESS) + goto err_out; + MEMCPY(((INT8 *) buffer)+read_bytes, ((INT8 *) tmp_bh->b_data)+offset, (INT32) oneblkread); + } + count -= oneblkread; + read_bytes += oneblkread; + fid->rwoffset += oneblkread; + } + brelse(tmp_bh); + +err_out: + /* set the size of read bytes */ + if (rcount != NULL) + *rcount = read_bytes; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsReadFile */ + +/* ffsWriteFile : write data into a opened file */ +INT32 ffsWriteFile(struct inode *inode, FILE_ID_T *fid, void *buffer, UINT64 count, UINT64 *wcount) +{ + INT32 modified = FALSE, offset, sec_offset, clu_offset; + INT32 num_clusters, num_alloc, num_alloced = (INT32) ~0; + UINT32 clu, last_clu, LogSector, sector = 0; + UINT64 oneblkwrite, write_bytes; + CHAIN_T new_clu; + TIMESTAMP_T tm; + DENTRY_T *ep, *ep2; + ENTRY_SET_CACHE_T *es = NULL; + struct buffer_head *tmp_bh = NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) + return FFS_PERMISSIONERR; + + if (fid->rwoffset > fid->size) + fid->rwoffset = fid->size; + + if (count == 0) { + if (wcount != NULL) + *wcount = 0; + return FFS_SUCCESS; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + if (fid->size == 0) + num_clusters = 0; + else + num_clusters = (INT32)((fid->size-1) >> p_fs->cluster_size_bits) + 1; + + write_bytes = 0; + + while (count > 0) { + clu_offset = (INT32)(fid->rwoffset >> p_fs->cluster_size_bits); + clu = last_clu = fid->start_clu; + + if (fid->flags == 0x03) { + if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { + last_clu += clu_offset - 1; + + if (clu_offset == num_clusters) + clu = CLUSTER_32(~0); + else + clu += clu_offset; + } + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu = fid->hint_last_clu; + } + + while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) { + last_clu = clu; + /* clu = FAT_read(sb, clu); */ + if (FAT_read(sb, clu, &clu) == -1) + return FFS_MEDIAERR; + + clu_offset--; + } + } + + if (clu == CLUSTER_32(~0)) { + num_alloc = (INT32)((count-1) >> p_fs->cluster_size_bits) + 1; + new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) : last_clu+1; + new_clu.size = 0; + new_clu.flags = fid->flags; + + /* (1) allocate a chain of clusters */ + num_alloced = p_fs->fs_func->alloc_cluster(sb, num_alloc, &new_clu); + if (num_alloced == 0) + break; + + /* (2) append to the FAT chain */ + if (last_clu == CLUSTER_32(~0)) { + if (new_clu.flags == 0x01) + fid->flags = 0x01; + fid->start_clu = new_clu.dir; + modified = TRUE; + } else { + if (new_clu.flags != fid->flags) { + exfat_chain_cont_cluster(sb, fid->start_clu, num_clusters); + fid->flags = 0x01; + modified = TRUE; + } + if (new_clu.flags == 0x01) + FAT_write(sb, last_clu, new_clu.dir); + } + + num_clusters += num_alloced; + clu = new_clu.dir; + } + + /* hint information */ + fid->hint_last_off = (INT32)(fid->rwoffset >> p_fs->cluster_size_bits); + fid->hint_last_clu = clu; + + offset = (INT32)(fid->rwoffset & (p_fs->cluster_size-1)); /* byte offset in cluster */ + sec_offset = offset >> p_bd->sector_size_bits; /* sector offset in cluster */ + offset &= p_bd->sector_size_mask; /* byte offset in sector */ + + LogSector = START_SECTOR(clu) + sec_offset; + + oneblkwrite = (UINT64)(p_bd->sector_size - offset); + if (oneblkwrite > count) + oneblkwrite = count; + + if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) { + if (sector_read(sb, LogSector, &tmp_bh, 0) != FFS_SUCCESS) + goto err_out; + MEMCPY(((INT8 *) tmp_bh->b_data), ((INT8 *) buffer)+write_bytes, (INT32) oneblkwrite); + if (sector_write(sb, LogSector, tmp_bh, 0) != FFS_SUCCESS) { + brelse(tmp_bh); + goto err_out; + } + } else { + if ((offset > 0) || ((fid->rwoffset+oneblkwrite) < fid->size)) { + if (sector_read(sb, LogSector, &tmp_bh, 1) != FFS_SUCCESS) + goto err_out; + } else { + if (sector_read(sb, LogSector, &tmp_bh, 0) != FFS_SUCCESS) + goto err_out; + } + + MEMCPY(((INT8 *) tmp_bh->b_data)+offset, ((INT8 *) buffer)+write_bytes, (INT32) oneblkwrite); + if (sector_write(sb, LogSector, tmp_bh, 0) != FFS_SUCCESS) { + brelse(tmp_bh); + goto err_out; + } + } + + count -= oneblkwrite; + write_bytes += oneblkwrite; + fid->rwoffset += oneblkwrite; + + fid->attr |= ATTR_ARCHIVE; + + if (fid->size < fid->rwoffset) { + fid->size = fid->rwoffset; + modified = TRUE; + } + } + + brelse(tmp_bh); + + /* (3) update the direcoty entry */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep); + if (es == NULL) + goto err_out; + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + goto err_out; + ep2 = ep; + } + + p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY); + p_fs->fs_func->set_entry_attr(ep, fid->attr); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + + if (modified) { + if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags) + p_fs->fs_func->set_entry_flag(ep2, fid->flags); + + if (p_fs->fs_func->get_entry_size(ep2) != fid->size) + p_fs->fs_func->set_entry_size(ep2, fid->size); + + if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu) + p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + } + + if (p_fs->vol_type == EXFAT) { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + +err_out: + /* set the size of written bytes */ + if (wcount != NULL) + *wcount = write_bytes; + + if (num_alloced == 0) + return FFS_FULL; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsWriteFile */ + +/* ffsTruncateFile : resize the file length */ +INT32 ffsTruncateFile(struct inode *inode, UINT64 old_size, UINT64 new_size) +{ + INT32 num_clusters; + UINT32 last_clu = CLUSTER_32(0), sector = 0; + CHAIN_T clu; + TIMESTAMP_T tm; + DENTRY_T *ep, *ep2; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + ENTRY_SET_CACHE_T *es=NULL; + + /* check if the given file ID is opened */ + if (fid->type != TYPE_FILE) + return FFS_PERMISSIONERR; + + if (fid->size != old_size) { + printk(KERN_ERR "[EXFAT] truncate : can't skip it because of " + "size-mismatch(old:%lld->fid:%lld).\n" + ,old_size, fid->size); + } + + if (old_size <= new_size) + return FFS_SUCCESS; + + fs_set_vol_flags(sb, VOL_DIRTY); + + clu.dir = fid->start_clu; + clu.size = (INT32)((old_size-1) >> p_fs->cluster_size_bits) + 1; + clu.flags = fid->flags; + + if (new_size > 0) { + num_clusters = (INT32)((new_size-1) >> p_fs->cluster_size_bits) + 1; + + if (clu.flags == 0x03) { + clu.dir += num_clusters; + } else { + while (num_clusters > 0) { + last_clu = clu.dir; + if (FAT_read(sb, clu.dir, &(clu.dir)) == -1) + return FFS_MEDIAERR; + num_clusters--; + } + } + + clu.size -= num_clusters; + } + + fid->size = new_size; + fid->attr |= ATTR_ARCHIVE; + if (new_size == 0) { + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->start_clu = CLUSTER_32(~0); + } + + /* (1) update the directory entry */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep); + if (es == NULL) + return FFS_MEDIAERR; + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + return FFS_MEDIAERR; + ep2 = ep; + } + + p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY); + p_fs->fs_func->set_entry_attr(ep, fid->attr); + + p_fs->fs_func->set_entry_size(ep2, new_size); + if (new_size == 0) { + p_fs->fs_func->set_entry_flag(ep2, 0x01); + p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0)); + } + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + /* (2) cut off from the FAT chain */ + if (last_clu != CLUSTER_32(0)) { + if (fid->flags == 0x01) + FAT_write(sb, last_clu, CLUSTER_32(~0)); + } + + /* (3) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu, 0); + + /* hint information */ + fid->hint_last_off = -1; + if (fid->rwoffset > fid->size) { + fid->rwoffset = fid->size; + } + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsTruncateFile */ + +static void update_parent_info( FILE_ID_T *fid, struct inode *parent_inode) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(parent_inode->i_sb)->fs_info); + FILE_ID_T *parent_fid = &(EXFAT_I(parent_inode)->fid); + + if (unlikely((parent_fid->flags != fid->dir.flags) + || (parent_fid->size != (fid->dir.size<cluster_size_bits)) + || (parent_fid->start_clu != fid->dir.dir))) { + + fid->dir.dir = parent_fid->start_clu; + fid->dir.flags = parent_fid->flags; + fid->dir.size = ((parent_fid->size + (p_fs->cluster_size-1)) + >> p_fs->cluster_size_bits); + } +} + +/* ffsMoveFile : move(rename) a old file into a new file */ +INT32 ffsMoveFile(struct inode *old_parent_inode, FILE_ID_T *fid, struct inode *new_parent_inode, struct dentry *new_dentry) +{ + INT32 ret; + INT32 dentry; + CHAIN_T olddir, newdir; + CHAIN_T *p_dir=NULL; + UNI_NAME_T uni_name; + DENTRY_T *ep; + struct super_block *sb = old_parent_inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + UINT8 *new_path = (UINT8 *) new_dentry->d_name.name; + struct inode *new_inode = new_dentry->d_inode; + int num_entries; + FILE_ID_T *new_fid = NULL; + INT32 new_entry=0; + + /* check the validity of pointer parameters */ + if ((new_path == NULL) || (*new_path == '\0')) + return FFS_ERROR; + + update_parent_info(fid, old_parent_inode); + + olddir.dir = fid->dir.dir; + olddir.size = fid->dir.size; + olddir.flags = fid->dir.flags; + + dentry = fid->entry; + + /* check if the old file is "." or ".." */ + if (p_fs->vol_type != EXFAT) { + if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) + return FFS_PERMISSIONERR; + } + + ep = get_entry_in_dir(sb, &olddir, dentry, NULL); + if (!ep) + return FFS_MEDIAERR; + + if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) + return FFS_PERMISSIONERR; + + /* check whether new dir is existing directory and empty */ + if (new_inode) { + UINT32 entry_type; + + ret = FFS_MEDIAERR; + new_fid = &EXFAT_I(new_inode)->fid; + + update_parent_info(new_fid, new_parent_inode); + + p_dir = &(new_fid->dir); + new_entry = new_fid->entry; + ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); + if (!ep) + goto out; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if (entry_type == TYPE_DIR) { + CHAIN_T new_clu; + new_clu.dir = new_fid->start_clu; + new_clu.size = (INT32)((new_fid->size-1) >> p_fs->cluster_size_bits) + 1; + new_clu.flags = new_fid->flags; + + if (!is_dir_empty(sb, &new_clu)) + return FFS_FILEEXIST; + } + } + + /* check the validity of directory name in the given new pathname */ + ret = resolve_path(new_parent_inode, new_path, &newdir, &uni_name); + if (ret) + return ret; + + fs_set_vol_flags(sb, VOL_DIRTY); + + if (olddir.dir == newdir.dir) + ret = rename_file(new_parent_inode, &olddir, dentry, &uni_name, fid); + else + ret = move_file(new_parent_inode, &olddir, dentry, &newdir, &uni_name, fid); + + if ((ret == FFS_SUCCESS) && new_inode) { + /* delete entries of new_dir */ + ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); + if (!ep) + goto out; + + num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir, new_entry, ep); + if (num_entries < 0) + goto out; + p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0, num_entries+1); + } +out: +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return ret; +} /* end of ffsMoveFile */ + +/* ffsRemoveFile : remove a file */ +INT32 ffsRemoveFile(struct inode *inode, FILE_ID_T *fid) +{ + INT32 dentry; + CHAIN_T dir, clu_to_free; + DENTRY_T *ep; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + dir.dir = fid->dir.dir; + dir.size = fid->dir.size; + dir.flags = fid->dir.flags; + + dentry = fid->entry; + + ep = get_entry_in_dir(sb, &dir, dentry, NULL); + if (!ep) + return FFS_MEDIAERR; + + if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) + return FFS_PERMISSIONERR; + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* (1) update the directory entry */ + remove_file(inode, &dir, dentry); + + clu_to_free.dir = fid->start_clu; + clu_to_free.size = (INT32)((fid->size-1) >> p_fs->cluster_size_bits) + 1; + clu_to_free.flags = fid->flags; + + /* (2) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu_to_free, 0); + + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + fid->flags = (p_fs->vol_type == EXFAT)? 0x03: 0x01; + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsRemoveFile */ + +/* ffsSetAttr : set the attribute of a given file */ +INT32 ffsSetAttr(struct inode *inode, UINT32 attr) +{ + UINT32 type, sector = 0; + DENTRY_T *ep; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + UINT8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + ENTRY_SET_CACHE_T *es = NULL; + + if (fid->attr == attr) { + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + } + + /* get the directory entry of given file */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep); + if (es == NULL) + return FFS_MEDIAERR; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + return FFS_MEDIAERR; + } + + type = p_fs->fs_func->get_entry_type(ep); + + if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) || + ((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) { + INT32 err; + if (p_fs->dev_ejected) + err = FFS_MEDIAERR; + else + err = FFS_ERROR; + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + return err; + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* set the file attribute */ + fid->attr = attr; + p_fs->fs_func->set_entry_attr(ep, attr); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsSetAttr */ + +/* ffsGetStat : get the information of a given file */ +INT32 ffsGetStat(struct inode *inode, DIR_ENTRY_T *info) +{ + UINT32 sector = 0; + INT32 count; + CHAIN_T dir; + UNI_NAME_T uni_name; + TIMESTAMP_T tm; + DENTRY_T *ep, *ep2; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + ENTRY_SET_CACHE_T *es=NULL; + UINT8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + + PRINTK("ffsGetStat entered\n"); + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + info->Attr = ATTR_SUBDIR; + MEMSET((INT8 *) &info->CreateTimestamp, 0, sizeof(DATE_TIME_T)); + MEMSET((INT8 *) &info->ModifyTimestamp, 0, sizeof(DATE_TIME_T)); + MEMSET((INT8 *) &info->AccessTimestamp, 0, sizeof(DATE_TIME_T)); + STRCPY(info->ShortName, "."); + STRCPY(info->Name, "."); + + dir.dir = p_fs->root_dir; + dir.flags = 0x01; + + if (p_fs->root_dir == CLUSTER_32(0)) /* FAT16 root_dir */ + info->Size = p_fs->dentries_in_root << DENTRY_SIZE_BITS; + else + info->Size = count_num_clusters(sb, &dir) << p_fs->cluster_size_bits; + + count = count_dos_name_entries(sb, &dir, TYPE_DIR); + if (count < 0) + return FFS_MEDIAERR; + info->NumSubdirs = count; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + } + + /* get the directory entry of given file or directory */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_2_ENTRIES, &ep); + if (es == NULL) + return FFS_MEDIAERR; + ep2 = ep+1; + } else { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + return FFS_MEDIAERR; + ep2 = ep; + buf_lock(sb, sector); + } + + /* set FILE_INFO structure using the acquired DENTRY_T */ + info->Attr = p_fs->fs_func->get_entry_attr(ep); + + p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE); + info->CreateTimestamp.Year = tm.year; + info->CreateTimestamp.Month = tm.mon; + info->CreateTimestamp.Day = tm.day; + info->CreateTimestamp.Hour = tm.hour; + info->CreateTimestamp.Minute = tm.min; + info->CreateTimestamp.Second = tm.sec; + info->CreateTimestamp.MilliSecond = 0; + + p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY); + info->ModifyTimestamp.Year = tm.year; + info->ModifyTimestamp.Month = tm.mon; + info->ModifyTimestamp.Day = tm.day; + info->ModifyTimestamp.Hour = tm.hour; + info->ModifyTimestamp.Minute = tm.min; + info->ModifyTimestamp.Second = tm.sec; + info->ModifyTimestamp.MilliSecond = 0; + + MEMSET((INT8 *) &info->AccessTimestamp, 0, sizeof(DATE_TIME_T)); + + *(uni_name.name) = 0x0; + /* XXX this is very bad for exfat cuz name is already included in es. + API should be revised */ + p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry, uni_name.name); + if (*(uni_name.name) == 0x0) + get_uni_name_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x1); + nls_uniname_to_cstring(sb, info->Name, &uni_name); + + if (p_fs->vol_type == EXFAT) { + info->NumSubdirs = 2; + } else { + buf_unlock(sb, sector); + get_uni_name_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x0); + nls_uniname_to_cstring(sb, info->ShortName, &uni_name); + info->NumSubdirs = 0; + } + + info->Size = p_fs->fs_func->get_entry_size(ep2); + + if (p_fs->vol_type == EXFAT) + release_entry_set(es); + + if (is_dir) { + dir.dir = fid->start_clu; + dir.flags = 0x01; + + if (info->Size == 0) + info->Size = (UINT64) count_num_clusters(sb, &dir) << p_fs->cluster_size_bits; + + count = count_dos_name_entries(sb, &dir, TYPE_DIR); + if (count < 0) + return FFS_MEDIAERR; + info->NumSubdirs += count; + } + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + PRINTK("ffsGetStat exited successfully\n"); + return FFS_SUCCESS; +} /* end of ffsGetStat */ + +/* ffsSetStat : set the information of a given file */ +INT32 ffsSetStat(struct inode *inode, DIR_ENTRY_T *info) +{ + UINT32 sector = 0; + TIMESTAMP_T tm; + DENTRY_T *ep, *ep2; + ENTRY_SET_CACHE_T *es=NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + UINT8 is_dir = (fid->type == TYPE_DIR) ? 1 : 0; + + if (is_dir) { + if ((fid->dir.dir == p_fs->root_dir) && + (fid->entry == -1)) { + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + return FFS_SUCCESS; + } + } + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* get the directory entry of given file or directory */ + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep); + if (es == NULL) + return FFS_MEDIAERR; + ep2 = ep+1; + } else { + /* for other than exfat */ + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + return FFS_MEDIAERR; + ep2 = ep; + } + + + p_fs->fs_func->set_entry_attr(ep, info->Attr); + + /* set FILE_INFO structure using the acquired DENTRY_T */ + tm.sec = info->CreateTimestamp.Second; + tm.min = info->CreateTimestamp.Minute; + tm.hour = info->CreateTimestamp.Hour; + tm.day = info->CreateTimestamp.Day; + tm.mon = info->CreateTimestamp.Month; + tm.year = info->CreateTimestamp.Year; + p_fs->fs_func->set_entry_time(ep, &tm, TM_CREATE); + + tm.sec = info->ModifyTimestamp.Second; + tm.min = info->ModifyTimestamp.Minute; + tm.hour = info->ModifyTimestamp.Hour; + tm.day = info->ModifyTimestamp.Day; + tm.mon = info->ModifyTimestamp.Month; + tm.year = info->ModifyTimestamp.Year; + p_fs->fs_func->set_entry_time(ep, &tm, TM_MODIFY); + + + p_fs->fs_func->set_entry_size(ep2, info->Size); + + if (p_fs->vol_type != EXFAT) { + buf_modify(sb, sector); + } else { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsSetStat */ + +INT32 ffsMapCluster(struct inode *inode, INT32 clu_offset, UINT32 *clu) +{ + INT32 num_clusters, num_alloced, modified = FALSE; + UINT32 last_clu, sector = 0; + CHAIN_T new_clu; + DENTRY_T *ep; + ENTRY_SET_CACHE_T *es = NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + + fid->rwoffset = (INT64)(clu_offset) << p_fs->cluster_size_bits; + + if (EXFAT_I(inode)->mmu_private == 0) + num_clusters = 0; + else + num_clusters = (INT32)((EXFAT_I(inode)->mmu_private-1) >> p_fs->cluster_size_bits) + 1; + + *clu = last_clu = fid->start_clu; + + if (fid->flags == 0x03) { + if ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { + last_clu += clu_offset - 1; + + if (clu_offset == num_clusters) + *clu = CLUSTER_32(~0); + else + *clu += clu_offset; + } + } else { + /* hint information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + *clu = fid->hint_last_clu; + } + + while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) { + last_clu = *clu; + if (FAT_read(sb, *clu, clu) == -1) + return FFS_MEDIAERR; + clu_offset--; + } + } + + if (*clu == CLUSTER_32(~0)) { + fs_set_vol_flags(sb, VOL_DIRTY); + + new_clu.dir = (last_clu == CLUSTER_32(~0)) ? CLUSTER_32(~0) : last_clu+1; + new_clu.size = 0; + new_clu.flags = fid->flags; + + /* (1) allocate a cluster */ + num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu); + if (num_alloced < 1) + return FFS_FULL; + + /* (2) append to the FAT chain */ + if (last_clu == CLUSTER_32(~0)) { + if (new_clu.flags == 0x01) + fid->flags = 0x01; + fid->start_clu = new_clu.dir; + modified = TRUE; + } else { + if (new_clu.flags != fid->flags) { + exfat_chain_cont_cluster(sb, fid->start_clu, num_clusters); + fid->flags = 0x01; + modified = TRUE; + } + if (new_clu.flags == 0x01) + FAT_write(sb, last_clu, new_clu.dir); + } + + *clu = new_clu.dir; + + if (p_fs->vol_type == EXFAT) { + es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry, ES_ALL_ENTRIES, &ep); + if (es == NULL) + return FFS_MEDIAERR; + /* get stream entry */ + ep++; + } + + /* (3) update directory entry */ + if (modified) { + if (p_fs->vol_type != EXFAT) { + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, §or); + if (!ep) + return FFS_MEDIAERR; + } + + if (p_fs->fs_func->get_entry_flag(ep) != fid->flags) + p_fs->fs_func->set_entry_flag(ep, fid->flags); + + if (p_fs->fs_func->get_entry_clu0(ep) != fid->start_clu) + p_fs->fs_func->set_entry_clu0(ep, fid->start_clu); + + if (p_fs->vol_type != EXFAT) + buf_modify(sb, sector); + } + + if (p_fs->vol_type == EXFAT) { + update_dir_checksum_with_entry_set(sb, es); + release_entry_set(es); + } + + /* add number of new blocks to inode */ + inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9); + } + + /* hint information */ + fid->hint_last_off = (INT32)(fid->rwoffset >> p_fs->cluster_size_bits); + fid->hint_last_clu = *clu; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsMapCluster */ + +/*----------------------------------------------------------------------*/ +/* Directory Operation Functions */ +/*----------------------------------------------------------------------*/ + +/* ffsCreateDir : create(make) a directory */ +INT32 ffsCreateDir(struct inode *inode, UINT8 *path, FILE_ID_T *fid) +{ + INT32 ret/*, dentry*/; + CHAIN_T dir; + UNI_NAME_T uni_name; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + PRINTK("ffsCreateDir entered\n"); + + /* check the validity of directory name in the given old pathname */ + ret = resolve_path(inode, path, &dir, &uni_name); + if (ret) + return ret; + + fs_set_vol_flags(sb, VOL_DIRTY); + + ret = create_dir(inode, &dir, &uni_name, fid); + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return ret; +} /* end of ffsCreateDir */ + +/* ffsReadDir : read a directory entry from the opened directory */ +INT32 ffsReadDir(struct inode *inode, DIR_ENTRY_T *dir_entry) +{ + INT32 i, dentry, clu_offset; + INT32 dentries_per_clu, dentries_per_clu_bits = 0; + UINT32 type, sector; + CHAIN_T dir, clu; + UNI_NAME_T uni_name; + TIMESTAMP_T tm; + DENTRY_T *ep; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + + /* check if the given file ID is opened */ + if (fid->type != TYPE_DIR) + return FFS_PERMISSIONERR; + + if (fid->entry == -1) { + dir.dir = p_fs->root_dir; + dir.flags = 0x01; + } else { + dir.dir = fid->start_clu; + dir.size = (INT32)(fid->size >> p_fs->cluster_size_bits); + dir.flags = fid->flags; + } + + dentry = (INT32) fid->rwoffset; + + if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + + if (dentry == dentries_per_clu) { + clu.dir = CLUSTER_32(~0); + } else { + clu.dir = dir.dir; + clu.size = dir.size; + clu.flags = dir.flags; + } + } else { + dentries_per_clu = p_fs->dentries_per_clu; + dentries_per_clu_bits = my_log2(dentries_per_clu); + + clu_offset = dentry >> dentries_per_clu_bits; + clu.dir = dir.dir; + clu.size = dir.size; + clu.flags = dir.flags; + + if (clu.flags == 0x03) { + clu.dir += clu_offset; + clu.size -= clu_offset; + } else { + /* hint_information */ + if ((clu_offset > 0) && (fid->hint_last_off > 0) && + (clu_offset >= fid->hint_last_off)) { + clu_offset -= fid->hint_last_off; + clu.dir = fid->hint_last_clu; + } + + while (clu_offset > 0) { + /* clu.dir = FAT_read(sb, clu.dir); */ + if (FAT_read(sb, clu.dir, &(clu.dir)) == -1) + return FFS_MEDIAERR; + + clu_offset--; + } + } + } + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + if (dir.dir == CLUSTER_32(0)) /* FAT16 root_dir */ + i = dentry % dentries_per_clu; + else + i = dentry & (dentries_per_clu-1); + + for ( ; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, §or); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) + break; + + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + buf_lock(sb, sector); + dir_entry->Attr = p_fs->fs_func->get_entry_attr(ep); + + p_fs->fs_func->get_entry_time(ep, &tm, TM_CREATE); + dir_entry->CreateTimestamp.Year = tm.year; + dir_entry->CreateTimestamp.Month = tm.mon; + dir_entry->CreateTimestamp.Day = tm.day; + dir_entry->CreateTimestamp.Hour = tm.hour; + dir_entry->CreateTimestamp.Minute = tm.min; + dir_entry->CreateTimestamp.Second = tm.sec; + dir_entry->CreateTimestamp.MilliSecond = 0; + + p_fs->fs_func->get_entry_time(ep, &tm, TM_MODIFY); + dir_entry->ModifyTimestamp.Year = tm.year; + dir_entry->ModifyTimestamp.Month = tm.mon; + dir_entry->ModifyTimestamp.Day = tm.day; + dir_entry->ModifyTimestamp.Hour = tm.hour; + dir_entry->ModifyTimestamp.Minute = tm.min; + dir_entry->ModifyTimestamp.Second = tm.sec; + dir_entry->ModifyTimestamp.MilliSecond = 0; + + MEMSET((INT8 *) &dir_entry->AccessTimestamp, 0, sizeof(DATE_TIME_T)); + + *(uni_name.name) = 0x0; + p_fs->fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry, uni_name.name); + if (*(uni_name.name) == 0x0) + get_uni_name_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x1); + nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name); + buf_unlock(sb, sector); + + if (p_fs->vol_type == EXFAT) { + ep = get_entry_in_dir(sb, &clu, i+1, NULL); + if (!ep) + return FFS_MEDIAERR; + } else { + get_uni_name_from_dos_entry(sb, (DOS_DENTRY_T *) ep, &uni_name, 0x0); + nls_uniname_to_cstring(sb, dir_entry->ShortName, &uni_name); + } + + dir_entry->Size = p_fs->fs_func->get_entry_size(ep); + + /* hint information */ + if (dir.dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + } else { + fid->hint_last_off = dentry >> dentries_per_clu_bits; + fid->hint_last_clu = clu.dir; + } + + fid->rwoffset = (INT64) ++dentry; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; + } + + if (dir.dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + /* clu.dir = FAT_read(sb, clu.dir); */ + if (FAT_read(sb, clu.dir, &(clu.dir)) == -1) + return FFS_MEDIAERR; + } + } + + *(dir_entry->Name) = '\0'; + + fid->rwoffset = (INT64) ++dentry; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsReadDir */ + +/* ffsRemoveDir : remove a directory */ +INT32 ffsRemoveDir(struct inode *inode, FILE_ID_T *fid) +{ + INT32 dentry; + CHAIN_T dir, clu_to_free; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + dir.dir = fid->dir.dir; + dir.size = fid->dir.size; + dir.flags = fid->dir.flags; + + dentry = fid->entry; + + /* check if the file is "." or ".." */ + if (p_fs->vol_type != EXFAT) { + if ((dir.dir != p_fs->root_dir) && (dentry < 2)) + return FFS_PERMISSIONERR; + } + + clu_to_free.dir = fid->start_clu; + clu_to_free.size = (INT32)((fid->size-1) >> p_fs->cluster_size_bits) + 1; + clu_to_free.flags = fid->flags; + + if (!is_dir_empty(sb, &clu_to_free)) + return FFS_FILEEXIST; + + fs_set_vol_flags(sb, VOL_DIRTY); + + /* (1) update the directory entry */ + remove_file(inode, &dir, dentry); + + /* (2) free the clusters */ + p_fs->fs_func->free_cluster(sb, &clu_to_free, 1); + + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + fid->flags = (p_fs->vol_type == EXFAT)? 0x03: 0x01; + +#if (DELAYED_SYNC == 0) + fs_sync(sb, 0); + fs_set_vol_flags(sb, VOL_CLEAN); +#endif + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + return FFS_SUCCESS; +} /* end of ffsRemoveDir */ + +/*======================================================================*/ +/* Local Function Definitions */ +/*======================================================================*/ + +/* + * File System Management Functions + */ + +INT32 fs_init(void) +{ + /* critical check for system requirement on size of DENTRY_T structure */ + if (sizeof(DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(DOS_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(EXT_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(FILE_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(STRM_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(NAME_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(BMAP_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(CASE_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + if (sizeof(VOLM_DENTRY_T) != DENTRY_SIZE) { + return FFS_ALIGNMENTERR; + } + + return FFS_SUCCESS; +} /* end of fs_init */ + +INT32 fs_shutdown(void) +{ + return FFS_SUCCESS; +} /* end of fs_shutdown */ + +void fs_set_vol_flags(struct super_block *sb, UINT32 new_flag) +{ + PBR_SECTOR_T *p_pbr; + BPBEX_T *p_bpb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_fs->vol_flag == new_flag) + return; + + p_fs->vol_flag = new_flag; + + if (p_fs->vol_type == EXFAT) { + if (p_fs->pbr_bh == NULL) { + if (sector_read(sb, p_fs->PBR_sector, &(p_fs->pbr_bh), 1) != FFS_SUCCESS) + return; + } + + p_pbr = (PBR_SECTOR_T *) p_fs->pbr_bh->b_data; + p_bpb = (BPBEX_T *) p_pbr->bpb; + SET16(p_bpb->vol_flags, (UINT16) new_flag); + + /* XXX duyoung + what can we do here? (cuz fs_set_vol_flags() is void) */ + if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh))) + sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1); + else + sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0); + } +} /* end of fs_set_vol_flags */ + +void fs_sync(struct super_block *sb, INT32 do_sync) +{ + if (do_sync) + bdev_sync(sb); +} /* end of fs_sync */ + +void fs_error(struct super_block *sb) +{ + struct exfat_mount_options *opts = &EXFAT_SB(sb)->options; + + if (opts->errors == EXFAT_ERRORS_PANIC) + panic("[EXFAT] Filesystem panic from previous error\n"); + else if ((opts->errors == EXFAT_ERRORS_RO) && !(sb->s_flags & MS_RDONLY)) { + sb->s_flags |= MS_RDONLY; + printk(KERN_ERR "[EXFAT] Filesystem has been set read-only\n"); + } +} + +/* + * Cluster Management Functions + */ + +INT32 clear_cluster(struct super_block *sb, UINT32 clu) +{ + UINT32 s, n; + INT32 ret = FFS_SUCCESS; + struct buffer_head *tmp_bh = NULL; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (clu == CLUSTER_32(0)) { /* FAT16 root_dir */ + s = p_fs->root_start_sector; + n = p_fs->data_start_sector; + } else { + s = START_SECTOR(clu); + n = s + p_fs->sectors_per_clu; + } + + for ( ; s < n; s++) { + if ((ret = sector_read(sb, s, &tmp_bh, 0)) != FFS_SUCCESS) + return ret; + + MEMSET((INT8 *) tmp_bh->b_data, 0x0, p_bd->sector_size); + if ((ret = sector_write(sb, s, tmp_bh, 0)) !=FFS_SUCCESS) + break; + } + + brelse(tmp_bh); + return ret; +} /* end of clear_cluster */ + +INT32 fat_alloc_cluster(struct super_block *sb, INT32 num_alloc, CHAIN_T *p_chain) +{ + INT32 i, num_clusters = 0; + UINT32 new_clu, last_clu = CLUSTER_32(~0), read_clu; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + new_clu = p_chain->dir; + if (new_clu == CLUSTER_32(~0)) + new_clu = p_fs->clu_srch_ptr; + else if (new_clu >= p_fs->num_clusters) + new_clu = 2; + + __set_sb_dirty(sb); + + p_chain->dir = CLUSTER_32(~0); + + for (i = 2; i < p_fs->num_clusters; i++) { + if (FAT_read(sb, new_clu, &read_clu) != 0) + return 0; + + if (read_clu == CLUSTER_32(0)) { + FAT_write(sb, new_clu, CLUSTER_32(~0)); + num_clusters++; + + if (p_chain->dir == CLUSTER_32(~0)) + p_chain->dir = new_clu; + else + FAT_write(sb, last_clu, new_clu); + + last_clu = new_clu; + + if ((--num_alloc) == 0) { + p_fs->clu_srch_ptr = new_clu; + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters += num_clusters; + + return(num_clusters); + } + } + if ((++new_clu) >= p_fs->num_clusters) + new_clu = 2; + } + + p_fs->clu_srch_ptr = new_clu; + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters += num_clusters; + + return(num_clusters); +} /* end of fat_alloc_cluster */ + +INT32 exfat_alloc_cluster(struct super_block *sb, INT32 num_alloc, CHAIN_T *p_chain) +{ + INT32 num_clusters = 0; + UINT32 hint_clu, new_clu, last_clu = CLUSTER_32(~0); + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + hint_clu = p_chain->dir; + if (hint_clu == CLUSTER_32(~0)) { + hint_clu = test_alloc_bitmap(sb, p_fs->clu_srch_ptr-2); + if (hint_clu == CLUSTER_32(~0)) + return 0; + } else if (hint_clu >= p_fs->num_clusters) { + hint_clu = 2; + p_chain->flags = 0x01; + } + + __set_sb_dirty(sb); + + p_chain->dir = CLUSTER_32(~0); + + while ((new_clu = test_alloc_bitmap(sb, hint_clu-2)) != CLUSTER_32(~0)) { + if (new_clu != hint_clu) { + if (p_chain->flags == 0x03) { + exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters); + p_chain->flags = 0x01; + } + } + + if (set_alloc_bitmap(sb, new_clu-2) != FFS_SUCCESS) + return 0; + + num_clusters++; + + if (p_chain->flags == 0x01) + FAT_write(sb, new_clu, CLUSTER_32(~0)); + + if (p_chain->dir == CLUSTER_32(~0)) { + p_chain->dir = new_clu; + } else { + if (p_chain->flags == 0x01) + FAT_write(sb, last_clu, new_clu); + } + last_clu = new_clu; + + if ((--num_alloc) == 0) { + p_fs->clu_srch_ptr = hint_clu; + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters += num_clusters; + + p_chain->size += num_clusters; + return(num_clusters); + } + + hint_clu = new_clu + 1; + if (hint_clu >= p_fs->num_clusters) { + hint_clu = 2; + + if (p_chain->flags == 0x03) { + exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters); + p_chain->flags = 0x01; + } + } + } + + p_fs->clu_srch_ptr = hint_clu; + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters += num_clusters; + + p_chain->size += num_clusters; + return(num_clusters); +} /* end of exfat_alloc_cluster */ + +void fat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, INT32 do_relse) +{ + INT32 num_clusters = 0; + UINT32 clu, prev; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + INT32 i; + UINT32 sector; + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return; + __set_sb_dirty(sb); + clu = p_chain->dir; + + if (p_chain->size <= 0) + return; + + do { + if (p_fs->dev_ejected) + break; + + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) { + buf_release(sb, sector+i); + } + } + + prev = clu; + if (FAT_read(sb, clu, &clu) == -1) + break; + + FAT_write(sb, prev, CLUSTER_32(0)); + num_clusters++; + + } while (clu != CLUSTER_32(~0)); + + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters -= num_clusters; +} /* end of fat_free_cluster */ + +void exfat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, INT32 do_relse) +{ + INT32 num_clusters = 0; + UINT32 clu; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + INT32 i; + UINT32 sector; + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return; + + if (p_chain->size <= 0) { + printk(KERN_ERR "[EXFAT] free_cluster : skip free-req clu:%u, " + "because of zero-size truncation\n" + ,p_chain->dir); + return; + } + + __set_sb_dirty(sb); + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + do { + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) { + buf_release(sb, sector+i); + } + } + + if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS) + break; + clu++; + + num_clusters++; + } while (num_clusters < p_chain->size); + } else { + do { + if (p_fs->dev_ejected) + break; + + if (do_relse) { + sector = START_SECTOR(clu); + for (i = 0; i < p_fs->sectors_per_clu; i++) { + buf_release(sb, sector+i); + } + } + + if (clr_alloc_bitmap(sb, clu-2) != FFS_SUCCESS) + break; + + if (FAT_read(sb, clu, &clu) == -1) + break; + num_clusters++; + } while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0))); + } + + if (p_fs->used_clusters != (UINT32) ~0) + p_fs->used_clusters -= num_clusters; +} /* end of exfat_free_cluster */ + +UINT32 find_last_cluster(struct super_block *sb, CHAIN_T *p_chain) +{ + UINT32 clu, next; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + clu += p_chain->size - 1; + } else { + while((FAT_read(sb, clu, &next) == 0) && (next != CLUSTER_32(~0))) { + if (p_fs->dev_ejected) + break; + clu = next; + } + } + + return(clu); +} /* end of find_last_cluster */ + +INT32 count_num_clusters(struct super_block *sb, CHAIN_T *p_chain) +{ + INT32 i, count = 0; + UINT32 clu; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0))) + return 0; + + clu = p_chain->dir; + + if (p_chain->flags == 0x03) { + count = p_chain->size; + } else { + for (i = 2; i < p_fs->num_clusters; i++) { + count++; + if (FAT_read(sb, clu, &clu) != 0) + return 0; + if (clu == CLUSTER_32(~0)) + break; + } + } + + return(count); +} /* end of count_num_clusters */ + +INT32 fat_count_used_clusters(struct super_block *sb) +{ + INT32 i, count = 0; + UINT32 clu; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = 2; i < p_fs->num_clusters; i++) { + if (FAT_read(sb, i, &clu) != 0) + break; + if (clu != CLUSTER_32(0)) + count++; + } + + return(count); +} /* end of fat_count_used_clusters */ + +INT32 exfat_count_used_clusters(struct super_block *sb) +{ + INT32 i, map_i, map_b, count = 0; + UINT8 k; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + map_i = map_b = 0; + + for (i = 2; i < p_fs->num_clusters; i += 8) { + k = *(((UINT8 *) p_fs->vol_amap[map_i]->b_data) + map_b); + count += used_bit[k]; + + if ((++map_b) >= p_bd->sector_size) { + map_i++; + map_b = 0; + } + } + + return(count); +} /* end of exfat_count_used_clusters */ + +void exfat_chain_cont_cluster(struct super_block *sb, UINT32 chain, INT32 len) +{ + if (len == 0) + return; + + while (len > 1) { + FAT_write(sb, chain, chain+1); + chain++; + len--; + } + FAT_write(sb, chain, CLUSTER_32(~0)); +} /* end of exfat_chain_cont_cluster */ + +/* + * Allocation Bitmap Management Functions + */ + +INT32 load_alloc_bitmap(struct super_block *sb) +{ + INT32 i, j, ret; + UINT32 map_size; + UINT32 type, sector; + CHAIN_T clu; + BMAP_DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu.dir = p_fs->root_dir; + clu.flags = 0x01; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < p_fs->dentries_per_clu; i++) { + ep = (BMAP_DENTRY_T *) get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((DENTRY_T *) ep); + + if (type == TYPE_UNUSED) + break; + if (type != TYPE_BITMAP) + continue; + + if (ep->flags == 0x0) { + p_fs->map_clu = GET32_A(ep->start_clu); + map_size = (UINT32) GET64_A(ep->size); + + p_fs->map_sectors = ((map_size-1) >> p_bd->sector_size_bits) + 1; + + p_fs->vol_amap = (struct buffer_head **) MALLOC(sizeof(struct buffer_head *) * p_fs->map_sectors); + if (p_fs->vol_amap == NULL) + return FFS_MEMORYERR; + + sector = START_SECTOR(p_fs->map_clu); + + for (j = 0; j < p_fs->map_sectors; j++) { + p_fs->vol_amap[j] = NULL; + ret = sector_read(sb, sector+j, &(p_fs->vol_amap[j]), 1); + if (ret != FFS_SUCCESS) { + /* release all buffers and free vol_amap */ + i=0; + while (i < j) + brelse(p_fs->vol_amap[i++]); + + FREE(p_fs->vol_amap); + p_fs->vol_amap = NULL; + return ret; + } + } + + p_fs->pbr_bh = NULL; + return FFS_SUCCESS; + } + } + + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return FFS_MEDIAERR; + } + + return FFS_FORMATERR; +} /* end of load_alloc_bitmap */ + +void free_alloc_bitmap(struct super_block *sb) +{ + INT32 i; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + brelse(p_fs->pbr_bh); + + for (i = 0; i < p_fs->map_sectors; i++) { + __brelse(p_fs->vol_amap[i]); + } + + FREE(p_fs->vol_amap); + p_fs->vol_amap = NULL; +} /* end of free_alloc_bitmap */ + +INT32 set_alloc_bitmap(struct super_block *sb, UINT32 clu) +{ + INT32 i, b; + UINT32 sector; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + i = clu >> (p_bd->sector_size_bits + 3); + b = clu & ((p_bd->sector_size << 3) - 1); + + sector = START_SECTOR(p_fs->map_clu) + i; + + Bitmap_set((UINT8 *) p_fs->vol_amap[i]->b_data, b); + + return (sector_write(sb, sector, p_fs->vol_amap[i], 0)); +} /* end of set_alloc_bitmap */ + +INT32 clr_alloc_bitmap(struct super_block *sb, UINT32 clu) +{ + INT32 i, b; + UINT32 sector; +#if EXFAT_CONFIG_DISCARD + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_mount_options *opts = &sbi->options; + int ret; +#endif /* EXFAT_CONFIG_DISCARD */ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + i = clu >> (p_bd->sector_size_bits + 3); + b = clu & ((p_bd->sector_size << 3) - 1); + + sector = START_SECTOR(p_fs->map_clu) + i; + + Bitmap_clear((UINT8 *) p_fs->vol_amap[i]->b_data, b); + + return (sector_write(sb, sector, p_fs->vol_amap[i], 0)); + +#if EXFAT_CONFIG_DISCARD + if (opts->discard) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) + ret = sb_issue_discard(sb, START_SECTOR(clu), (1 << p_fs->sectors_per_clu_bits)); +#else + ret = sb_issue_discard(sb, START_SECTOR(clu), (1 << p_fs->sectors_per_clu_bits), GFP_NOFS, 0); +#endif + if (ret == -EOPNOTSUPP) { + printk(KERN_WARNING "discard not supported by device, disabling"); + opts->discard = 0; + } + } +#endif /* EXFAT_CONFIG_DISCARD */ +} /* end of clr_alloc_bitmap */ + +UINT32 test_alloc_bitmap(struct super_block *sb, UINT32 clu) +{ + INT32 i, map_i, map_b; + UINT32 clu_base, clu_free; + UINT8 k, clu_mask; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu_base = (clu & ~(0x7)) + 2; + clu_mask = (1 << (clu - clu_base + 2)) - 1; + + map_i = clu >> (p_bd->sector_size_bits + 3); + map_b = (clu >> 3) & p_bd->sector_size_mask; + + for (i = 2; i < p_fs->num_clusters; i += 8) { + k = *(((UINT8 *) p_fs->vol_amap[map_i]->b_data) + map_b); + if (clu_mask > 0) { + k |= clu_mask; + clu_mask = 0; + } + if (k < 0xFF) { + clu_free = clu_base + free_bit[k]; + if (clu_free < p_fs->num_clusters) + return(clu_free); + } + clu_base += 8; + + if (((++map_b) >= p_bd->sector_size) || (clu_base >= p_fs->num_clusters)) { + if ((++map_i) >= p_fs->map_sectors) { + clu_base = 2; + map_i = 0; + } + map_b = 0; + } + } + + return(CLUSTER_32(~0)); +} /* end of test_alloc_bitmap */ + +void sync_alloc_bitmap(struct super_block *sb) +{ + INT32 i; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_fs->vol_amap == NULL) + return; + + for (i = 0; i < p_fs->map_sectors; i++) { + sync_dirty_buffer(p_fs->vol_amap[i]); + } +} /* end of sync_alloc_bitmap */ + +/* + * Upcase table Management Functions + */ +INT32 __load_upcase_table(struct super_block *sb, UINT32 sector, UINT32 num_sectors, UINT32 utbl_checksum) +{ + INT32 i, ret = FFS_ERROR; + UINT32 j; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + struct buffer_head *tmp_bh = NULL; + + UINT8 skip = FALSE; + UINT32 index = 0; + UINT16 uni = 0; + UINT16 **upcase_table; + + UINT32 checksum = 0; + + upcase_table = p_fs->vol_utbl = (UINT16 **) MALLOC(UTBL_COL_COUNT * sizeof(UINT16 *)); + if(upcase_table == NULL) + return FFS_MEMORYERR; + MEMSET(upcase_table, 0, UTBL_COL_COUNT * sizeof(UINT16 *)); + + num_sectors += sector; + + while(sector < num_sectors) { + ret = sector_read(sb, sector, &tmp_bh, 1); + if (ret != FFS_SUCCESS) { + PRINTK("sector read (0x%X)fail\n", sector); + goto error; + } + sector++; + + for(i = 0; i < p_bd->sector_size && index <= 0xFFFF; i += 2) { + uni = GET16(((UINT8 *) tmp_bh->b_data)+i); + + checksum = ((checksum & 1) ? 0x80000000 : 0 ) + (checksum >> 1) + *(((UINT8 *) tmp_bh->b_data)+i); + checksum = ((checksum & 1) ? 0x80000000 : 0 ) + (checksum >> 1) + *(((UINT8 *) tmp_bh->b_data)+(i+1)); + + if(skip) { + PRINTK("skip from 0x%X ", index); + index += uni; + PRINTK("to 0x%X (amount of 0x%X)\n", index, uni); + skip = FALSE; + } else if(uni == index) + index++; + else if(uni == 0xFFFF) + skip = TRUE; + else { /* uni != index , uni != 0xFFFF */ + UINT16 col_index = get_col_index(index); + + if(upcase_table[col_index]== NULL) { + PRINTK("alloc = 0x%X\n", col_index); + upcase_table[col_index] = (UINT16 *) MALLOC(UTBL_ROW_COUNT * sizeof(UINT16)); + if(upcase_table[col_index] == NULL) { + ret = FFS_MEMORYERR; + goto error; + } + + for(j = 0 ; j < UTBL_ROW_COUNT ; j++) + upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; + } + + upcase_table[col_index][get_row_index(index)] = uni; + index++; + } + } + } + if(index >= 0xFFFF && utbl_checksum == checksum) { + if(tmp_bh) + brelse(tmp_bh); + return FFS_SUCCESS; + } + ret = FFS_ERROR; +error: + if(tmp_bh) + brelse(tmp_bh); + free_upcase_table(sb); + return ret; +} + +INT32 __load_default_upcase_table(struct super_block *sb) +{ + INT32 i, ret = FFS_ERROR; + UINT32 j; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + UINT8 skip = FALSE; + UINT32 index = 0; + UINT16 uni = 0; + UINT16 **upcase_table; + + upcase_table = p_fs->vol_utbl = (UINT16 **) MALLOC(UTBL_COL_COUNT * sizeof(UINT16 *)); + if(upcase_table == NULL) + return FFS_MEMORYERR; + MEMSET(upcase_table, 0, UTBL_COL_COUNT * sizeof(UINT16 *)); + + for(i = 0; index <= 0xFFFF && i < NUM_UPCASE*2; i += 2) { + uni = GET16(uni_upcase + i); + if(skip) { + PRINTK("skip from 0x%X ", index); + index += uni; + PRINTK("to 0x%X (amount of 0x%X)\n", index, uni); + skip = FALSE; + } else if(uni == index) + index++; + else if(uni == 0xFFFF) + skip = TRUE; + else { /* uni != index , uni != 0xFFFF */ + UINT16 col_index = get_col_index(index); + + if(upcase_table[col_index]== NULL) { + PRINTK("alloc = 0x%X\n", col_index); + upcase_table[col_index] = (UINT16 *) MALLOC(UTBL_ROW_COUNT * sizeof(UINT16)); + if(upcase_table[col_index] == NULL) { + ret = FFS_MEMORYERR; + goto error; + } + + for(j = 0 ; j < UTBL_ROW_COUNT ; j++) + upcase_table[col_index][j] = (col_index << LOW_INDEX_BIT) | j; + } + + upcase_table[col_index][get_row_index(index)] = uni; + index ++; + } + } + + if(index >= 0xFFFF) + return FFS_SUCCESS; + +error: + /* FATAL error: default upcase table has error */ + free_upcase_table(sb); + return ret; +} + +INT32 load_upcase_table(struct super_block *sb) +{ + INT32 i; + UINT32 tbl_clu, tbl_size; + UINT32 type, sector, num_sectors; + CHAIN_T clu; + CASE_DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + clu.dir = p_fs->root_dir; + clu.flags = 0x01; + + if (p_fs->dev_ejected) + return FFS_MEDIAERR; + + while (clu.dir != CLUSTER_32(~0)) { + for (i = 0; i < p_fs->dentries_per_clu; i++) { + ep = (CASE_DENTRY_T *) get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((DENTRY_T *) ep); + + if (type == TYPE_UNUSED) + break; + if (type != TYPE_UPCASE) + continue; + + tbl_clu = GET32_A(ep->start_clu); + tbl_size = (UINT32) GET64_A(ep->size); + + sector = START_SECTOR(tbl_clu); + num_sectors = ((tbl_size-1) >> p_bd->sector_size_bits) + 1; + if(__load_upcase_table(sb, sector, num_sectors, GET32_A(ep->checksum)) != FFS_SUCCESS) + break; + else + return FFS_SUCCESS; + } + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return FFS_MEDIAERR; + } + /* load default upcase table */ + return __load_default_upcase_table(sb); +} /* end of load_upcase_table */ + +void free_upcase_table(struct super_block *sb) +{ + UINT32 i; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + UINT16 **upcase_table; + + upcase_table = p_fs->vol_utbl; + for(i = 0 ; i < UTBL_COL_COUNT ; i ++) + FREE(upcase_table[i]); + + FREE(p_fs->vol_utbl); + + p_fs->vol_utbl = NULL; +} /* end of free_upcase_table */ + +/* + * Directory Entry Management Functions + */ + +UINT32 fat_get_entry_type(DENTRY_T *p_entry) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + + if (*(ep->name) == 0x0) + return TYPE_UNUSED; + + else if (*(ep->name) == 0xE5) + return TYPE_DELETED; + + else if (ep->attr == ATTR_EXTEND) + return TYPE_EXTEND; + + else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_VOLUME) + return TYPE_VOLUME; + + else if ((ep->attr & (ATTR_SUBDIR|ATTR_VOLUME)) == ATTR_SUBDIR) + return TYPE_DIR; + + return TYPE_FILE; +} /* end of fat_get_entry_type */ + +UINT32 exfat_get_entry_type(DENTRY_T *p_entry) +{ + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + + if (ep->type == 0x0) { + return TYPE_UNUSED; + } else if (ep->type < 0x80) { + return TYPE_DELETED; + } else if (ep->type == 0x80) { + return TYPE_INVALID; + } else if (ep->type < 0xA0) { + if (ep->type == 0x81) { + return TYPE_BITMAP; + } else if (ep->type == 0x82) { + return TYPE_UPCASE; + } else if (ep->type == 0x83) { + return TYPE_VOLUME; + } else if (ep->type == 0x85) { + if (GET16_A(ep->attr) & ATTR_SUBDIR) + return TYPE_DIR; + else + return TYPE_FILE; + } + return TYPE_CRITICAL_PRI; + } else if (ep->type < 0xC0) { + if (ep->type == 0xA0) { + return TYPE_GUID; + } else if (ep->type == 0xA1) { + return TYPE_PADDING; + } else if (ep->type == 0xA2) { + return TYPE_ACLTAB; + } + return TYPE_BENIGN_PRI; + } else if (ep->type < 0xE0) { + if (ep->type == 0xC0) { + return TYPE_STREAM; + } else if (ep->type == 0xC1) { + return TYPE_EXTEND; + } else if (ep->type == 0xC2) { + return TYPE_ACL; + } + return TYPE_CRITICAL_SEC; + } + + return TYPE_BENIGN_SEC; +} /* end of exfat_get_entry_type */ + +void fat_set_entry_type(DENTRY_T *p_entry, UINT32 type) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + + if (type == TYPE_UNUSED) + *(ep->name) = 0x0; + + else if (type == TYPE_DELETED) + *(ep->name) = 0xE5; + + else if (type == TYPE_EXTEND) + ep->attr = ATTR_EXTEND; + + else if (type == TYPE_DIR) + ep->attr = ATTR_SUBDIR; + + else if (type == TYPE_FILE) + ep->attr = ATTR_ARCHIVE; + + else if (type == TYPE_SYMLINK) + ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK; +} /* end of fat_set_entry_type */ + +void exfat_set_entry_type(DENTRY_T *p_entry, UINT32 type) +{ + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + + if (type == TYPE_UNUSED) { + ep->type = 0x0; + } else if (type == TYPE_DELETED) { + ep->type &= ~0x80; + } else if (type == TYPE_STREAM) { + ep->type = 0xC0; + } else if (type == TYPE_EXTEND) { + ep->type = 0xC1; + } else if (type == TYPE_BITMAP) { + ep->type = 0x81; + } else if (type == TYPE_UPCASE) { + ep->type = 0x82; + } else if (type == TYPE_VOLUME) { + ep->type = 0x83; + } else if (type == TYPE_DIR) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_SUBDIR); + } else if (type == TYPE_FILE) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_ARCHIVE); + } else if (type == TYPE_SYMLINK) { + ep->type = 0x85; + SET16_A(ep->attr, ATTR_ARCHIVE | ATTR_SYMLINK); + } +} /* end of exfat_set_entry_type */ + +UINT32 fat_get_entry_attr(DENTRY_T *p_entry) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + return((UINT32) ep->attr); +} /* end of fat_get_entry_attr */ + +UINT32 exfat_get_entry_attr(DENTRY_T *p_entry) +{ + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + return((UINT32) GET16_A(ep->attr)); +} /* end of exfat_get_entry_attr */ + +void fat_set_entry_attr(DENTRY_T *p_entry, UINT32 attr) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + ep->attr = (UINT8) attr; +} /* end of fat_set_entry_attr */ + +void exfat_set_entry_attr(DENTRY_T *p_entry, UINT32 attr) +{ + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + SET16_A(ep->attr, (UINT16) attr); +} /* end of exfat_set_entry_attr */ + +UINT8 fat_get_entry_flag(DENTRY_T *p_entry) +{ + return 0x01; +} /* end of fat_get_entry_flag */ + +UINT8 exfat_get_entry_flag(DENTRY_T *p_entry) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + return(ep->flags); +} /* end of exfat_get_entry_flag */ + +void fat_set_entry_flag(DENTRY_T *p_entry, UINT8 flags) +{ +} /* end of fat_set_entry_flag */ + +void exfat_set_entry_flag(DENTRY_T *p_entry, UINT8 flags) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + ep->flags = flags; +} /* end of exfat_set_entry_flag */ + +UINT32 fat_get_entry_clu0(DENTRY_T *p_entry) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + return((GET32_A(ep->start_clu_hi) << 16) | GET16_A(ep->start_clu_lo)); +} /* end of fat_get_entry_clu0 */ + +UINT32 exfat_get_entry_clu0(DENTRY_T *p_entry) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + return(GET32_A(ep->start_clu)); +} /* end of exfat_get_entry_clu0 */ + +void fat_set_entry_clu0(DENTRY_T *p_entry, UINT32 start_clu) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu)); + SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16)); +} /* end of fat_set_entry_clu0 */ + +void exfat_set_entry_clu0(DENTRY_T *p_entry, UINT32 start_clu) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + SET32_A(ep->start_clu, start_clu); +} /* end of exfat_set_entry_clu0 */ + +UINT64 fat_get_entry_size(DENTRY_T *p_entry) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + return((UINT64) GET32_A(ep->size)); +} /* end of fat_get_entry_size */ + +UINT64 exfat_get_entry_size(DENTRY_T *p_entry) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + return(GET64_A(ep->valid_size)); +} /* end of exfat_get_entry_size */ + +void fat_set_entry_size(DENTRY_T *p_entry, UINT64 size) +{ + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + SET32_A(ep->size, (UINT32) size); +} /* end of fat_set_entry_size */ + +void exfat_set_entry_size(DENTRY_T *p_entry, UINT64 size) +{ + STRM_DENTRY_T *ep = (STRM_DENTRY_T *) p_entry; + SET64_A(ep->valid_size, size); + SET64_A(ep->size, size); +} /* end of exfat_set_entry_size */ + +void fat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode) +{ + UINT16 t = 0x00, d = 0x21; + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + + switch (mode) { + case TM_CREATE: + t = GET16_A(ep->create_time); + d = GET16_A(ep->create_date); + break; + case TM_MODIFY: + t = GET16_A(ep->modify_time); + d = GET16_A(ep->modify_date); + break; + } + + tp->sec = (t & 0x001F) << 1; + tp->min = (t >> 5) & 0x003F; + tp->hour = (t >> 11); + tp->day = (d & 0x001F); + tp->mon = (d >> 5) & 0x000F; + tp->year = (d >> 9); +} /* end of fat_get_entry_time */ + +void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode) +{ + UINT16 t = 0x00, d = 0x21; + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + + switch (mode) { + case TM_CREATE: + t = GET16_A(ep->create_time); + d = GET16_A(ep->create_date); + break; + case TM_MODIFY: + t = GET16_A(ep->modify_time); + d = GET16_A(ep->modify_date); + break; + case TM_ACCESS: + t = GET16_A(ep->access_time); + d = GET16_A(ep->access_date); + break; + } + + tp->sec = (t & 0x001F) << 1; + tp->min = (t >> 5) & 0x003F; + tp->hour = (t >> 11); + tp->day = (d & 0x001F); + tp->mon = (d >> 5) & 0x000F; + tp->year = (d >> 9); +} /* end of exfat_get_entry_time */ + +void fat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode) +{ + UINT16 t, d; + DOS_DENTRY_T *ep = (DOS_DENTRY_T *) p_entry; + + t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1); + d = (tp->year << 9) | (tp->mon << 5) | tp->day; + + switch (mode) { + case TM_CREATE: + SET16_A(ep->create_time, t); + SET16_A(ep->create_date, d); + break; + case TM_MODIFY: + SET16_A(ep->modify_time, t); + SET16_A(ep->modify_date, d); + break; + } +} /* end of fat_set_entry_time */ + +void exfat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, UINT8 mode) +{ + UINT16 t, d; + FILE_DENTRY_T *ep = (FILE_DENTRY_T *) p_entry; + + t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1); + d = (tp->year << 9) | (tp->mon << 5) | tp->day; + + switch (mode) { + case TM_CREATE: + SET16_A(ep->create_time, t); + SET16_A(ep->create_date, d); + break; + case TM_MODIFY: + SET16_A(ep->modify_time, t); + SET16_A(ep->modify_date, d); + break; + case TM_ACCESS: + SET16_A(ep->access_time, t); + SET16_A(ep->access_date, d); + break; + } +} /* end of exfat_set_entry_time */ + +INT32 fat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, + UINT32 start_clu, UINT64 size) +{ + UINT32 sector; + DOS_DENTRY_T *dos_ep; + + dos_ep = (DOS_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, §or); + if (!dos_ep) + return FFS_MEDIAERR; + + init_dos_entry(dos_ep, type, start_clu); + buf_modify(sb, sector); + + return FFS_SUCCESS; +} /* end of fat_init_dir_entry */ + +INT32 exfat_init_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, + UINT32 start_clu, UINT64 size) +{ + UINT32 sector; + UINT8 flags; + FILE_DENTRY_T *file_ep; + STRM_DENTRY_T *strm_ep; + + flags = (type == TYPE_FILE) ? 0x01 : 0x03; + + /* we cannot use get_entry_set_in_dir here because file ep is not initialized yet */ + file_ep = (FILE_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, §or); + if (!file_ep) + return FFS_MEDIAERR; + + strm_ep = (STRM_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry+1, §or); + if (!strm_ep) + return FFS_MEDIAERR; + + init_file_entry(file_ep, type); + buf_modify(sb, sector); + + init_strm_entry(strm_ep, flags, start_clu, size); + buf_modify(sb, sector); + + return FFS_SUCCESS; +} /* end of exfat_init_dir_entry */ + +INT32 fat_init_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 num_entries, + UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname) +{ + INT32 i; + UINT32 sector; + UINT8 chksum; + UINT16 *uniname = p_uniname->name; + DOS_DENTRY_T *dos_ep; + EXT_DENTRY_T *ext_ep; + + dos_ep = (DOS_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, §or); + if (!dos_ep) + return FFS_MEDIAERR; + + dos_ep->lcase = p_dosname->name_case; + MEMCPY(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH); + buf_modify(sb, sector); + + if ((--num_entries) > 0) { + chksum = calc_checksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0); + + for (i = 1; i < num_entries; i++) { + ext_ep = (EXT_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry-i, §or); + if (!ext_ep) + return FFS_MEDIAERR; + + init_ext_entry(ext_ep, i, chksum, uniname); + buf_modify(sb, sector); + uniname += 13; + } + + ext_ep = (EXT_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry-i, §or); + if (!ext_ep) + return FFS_MEDIAERR; + + init_ext_entry(ext_ep, i+0x40, chksum, uniname); + buf_modify(sb, sector); + } + + return FFS_SUCCESS; +} /* end of fat_init_ext_entry */ + +INT32 exfat_init_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 num_entries, + UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname) +{ + INT32 i; + UINT32 sector; + UINT16 *uniname = p_uniname->name; + FILE_DENTRY_T *file_ep; + STRM_DENTRY_T *strm_ep; + NAME_DENTRY_T *name_ep; + + file_ep = (FILE_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, §or); + if (!file_ep) + return FFS_MEDIAERR; + + file_ep->num_ext = (UINT8)(num_entries - 1); + buf_modify(sb, sector); + + strm_ep = (STRM_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry+1, §or); + if (!strm_ep) + return FFS_MEDIAERR; + + strm_ep->name_len = p_uniname->name_len; + SET16_A(strm_ep->name_hash, p_uniname->name_hash); + buf_modify(sb, sector); + + for (i = 2; i < num_entries; i++) { + name_ep = (NAME_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry+i, §or); + if (!name_ep) + return FFS_MEDIAERR; + + init_name_entry(name_ep, uniname); + buf_modify(sb, sector); + uniname += 15; + } + + update_dir_checksum(sb, p_dir, entry); + + return FFS_SUCCESS; +} /* end of exfat_init_ext_entry */ + +void init_dos_entry(DOS_DENTRY_T *ep, UINT32 type, UINT32 start_clu) +{ + TIMESTAMP_T tm, *tp; + + fat_set_entry_type((DENTRY_T *) ep, type); + SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu)); + SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16)); + SET32_A(ep->size, 0); + + tp = tm_current(&tm); + fat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE); + fat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY); + SET16_A(ep->access_date, 0); + ep->create_time_ms = 0; +} /* end of init_dos_entry */ + +void init_ext_entry(EXT_DENTRY_T *ep, INT32 order, UINT8 chksum, UINT16 *uniname) +{ + INT32 i; + UINT8 end = FALSE; + + fat_set_entry_type((DENTRY_T *) ep, TYPE_EXTEND); + ep->order = (UINT8) order; + ep->sysid = 0; + ep->checksum = chksum; + SET16_A(ep->start_clu, 0); + + for (i = 0; i < 10; i += 2) { + if (!end) { + SET16(ep->unicode_0_4+i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16(ep->unicode_0_4+i, 0xFFFF); + } + } + + for (i = 0; i < 12; i += 2) { + if (!end) { + SET16_A(ep->unicode_5_10+i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16_A(ep->unicode_5_10+i, 0xFFFF); + } + } + + for (i = 0; i < 4; i += 2) { + if (!end) { + SET16_A(ep->unicode_11_12+i, *uniname); + if (*uniname == 0x0) + end = TRUE; + else + uniname++; + } else { + SET16_A(ep->unicode_11_12+i, 0xFFFF); + } + } +} /* end of init_ext_entry */ + +void init_file_entry(FILE_DENTRY_T *ep, UINT32 type) +{ + TIMESTAMP_T tm, *tp; + + exfat_set_entry_type((DENTRY_T *) ep, type); + + tp = tm_current(&tm); + exfat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE); + exfat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY); + exfat_set_entry_time((DENTRY_T *) ep, tp, TM_ACCESS); + ep->create_time_ms = 0; + ep->modify_time_ms = 0; + ep->access_time_ms = 0; +} /* end of init_file_entry */ + +void init_strm_entry(STRM_DENTRY_T *ep, UINT8 flags, UINT32 start_clu, UINT64 size) +{ + exfat_set_entry_type((DENTRY_T *) ep, TYPE_STREAM); + ep->flags = flags; + SET32_A(ep->start_clu, start_clu); + SET64_A(ep->valid_size, size); + SET64_A(ep->size, size); +} /* end of init_strm_entry */ + +void init_name_entry(NAME_DENTRY_T *ep, UINT16 *uniname) +{ + INT32 i; + + exfat_set_entry_type((DENTRY_T *) ep, TYPE_EXTEND); + ep->flags = 0x0; + + for (i = 0; i < 30; i++, i++) { + SET16_A(ep->unicode_0_14+i, *uniname); + if (*uniname == 0x0) + break; + uniname++; + } +} /* end of init_name_entry */ + +void fat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 order, INT32 num_entries) +{ + INT32 i; + UINT32 sector; + DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = num_entries-1; i >= order; i--) { + ep = get_entry_in_dir(sb, p_dir, entry-i, §or); + if (!ep) + return; + + p_fs->fs_func->set_entry_type(ep, TYPE_DELETED); + buf_modify(sb, sector); + } +} /* end of fat_delete_dir_entry */ + +void exfat_delete_dir_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, INT32 order, INT32 num_entries) +{ + INT32 i; + UINT32 sector; + DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = order; i < num_entries; i++) { + ep = get_entry_in_dir(sb, p_dir, entry+i, §or); + if (!ep) + return; + + p_fs->fs_func->set_entry_type(ep, TYPE_DELETED); + buf_modify(sb, sector); + } +} /* end of exfat_delete_dir_entry */ + +void update_dir_checksum(struct super_block *sb, CHAIN_T *p_dir, INT32 entry) +{ + INT32 i, num_entries; + UINT32 sector; + UINT16 chksum; + FILE_DENTRY_T *file_ep; + DENTRY_T *ep; + + file_ep = (FILE_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, §or); + if (!file_ep) + return; + + buf_lock(sb, sector); + + num_entries = (INT32) file_ep->num_ext + 1; + chksum = calc_checksum_2byte((void *) file_ep, DENTRY_SIZE, 0, CS_DIR_ENTRY); + + for (i = 1; i < num_entries; i++) { + ep = get_entry_in_dir(sb, p_dir, entry+i, NULL); + if (!ep) { + buf_unlock(sb, sector); + return; + } + + chksum = calc_checksum_2byte((void *) ep, DENTRY_SIZE, chksum, CS_DEFAULT); + } + + SET16_A(file_ep->checksum, chksum); + buf_modify(sb, sector); + buf_unlock(sb, sector); +} /* end of update_dir_checksum */ + +void update_dir_checksum_with_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es) +{ + DENTRY_T *ep; + UINT16 chksum = 0; + INT32 chksum_type = CS_DIR_ENTRY, i; + + ep = (DENTRY_T *)&(es->__buf); + for (i=0; i < es->num_entries; i++) { + PRINTK ("update_dir_checksum_with_entry_set ep %p\n", ep); + chksum = calc_checksum_2byte((void *) ep, DENTRY_SIZE, chksum, chksum_type); + ep++; + chksum_type = CS_DEFAULT; + } + + ep = (DENTRY_T *)&(es->__buf); + SET16_A(((FILE_DENTRY_T *)ep)->checksum, chksum); + write_whole_entry_set(sb, es); +} + +static INT32 _walk_fat_chain (struct super_block *sb, CHAIN_T *p_dir, INT32 byte_offset, UINT32 *clu) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + INT32 clu_offset; + UINT32 cur_clu; + + clu_offset = byte_offset >> p_fs->cluster_size_bits; + cur_clu = p_dir->dir; + + if (p_dir->flags == 0x03) { + cur_clu += clu_offset; + } else { + while (clu_offset > 0) { + if (FAT_read(sb, cur_clu, &cur_clu) == -1) + return FFS_MEDIAERR; + clu_offset--; + } + } + + if (clu) + *clu = cur_clu; + return FFS_SUCCESS; +} +INT32 find_location(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 *sector, INT32 *offset) +{ + INT32 off, ret; + UINT32 clu=0; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + off = entry << DENTRY_SIZE_BITS; + + if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + *offset = off & p_bd->sector_size_mask; + *sector = off >> p_bd->sector_size_bits; + *sector += p_fs->root_start_sector; + } else { + ret =_walk_fat_chain(sb, p_dir, off, &clu); + if (ret != FFS_SUCCESS) + return ret; + + off &= p_fs->cluster_size - 1; /* byte offset in cluster */ + + *offset = off & p_bd->sector_size_mask; /* byte offset in sector */ + *sector = off >> p_bd->sector_size_bits; /* sector offset in cluster */ + *sector += START_SECTOR(clu); + } + return FFS_SUCCESS; +} /* end of find_location */ + +DENTRY_T *get_entry_with_sector(struct super_block *sb, UINT32 sector, INT32 offset) +{ + UINT8 *buf; + + buf = buf_getblk(sb, sector); + + if (buf == NULL) + return NULL; + + return((DENTRY_T *)(buf + offset)); +} /* end of get_entry_with_sector */ + +DENTRY_T *get_entry_in_dir(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 *sector) +{ + INT32 off; + UINT32 sec; + UINT8 *buf; + + if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS) + return NULL; + + buf = buf_getblk(sb, sec); + + if (buf == NULL) + return NULL; + + if (sector != NULL) + *sector = sec; + return((DENTRY_T *)(buf + off)); +} /* end of get_entry_in_dir */ + + +/* returns a set of dentries for a file or dir. + * Note that this is a copy (dump) of dentries so that user should call write_entry_set() + * to apply changes made in this entry set to the real device. + * in: + * sb+p_dir+entry: indicates a file/dir + * type: specifies how many dentries should be included. + * out: + * file_ep: will point the first dentry(= file dentry) on success + * return: + * pointer of entry set on success, + * NULL on failure. + */ + +#define ES_MODE_STARTED 0 +#define ES_MODE_GET_FILE_ENTRY 1 +#define ES_MODE_GET_STRM_ENTRY 2 +#define ES_MODE_GET_NAME_ENTRY 3 +#define ES_MODE_GET_CRITICAL_SEC_ENTRY 4 +ENTRY_SET_CACHE_T *get_entry_set_in_dir (struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT32 type, DENTRY_T **file_ep) +{ + INT32 off, ret, byte_offset; + UINT32 clu=0; + UINT32 sec, entry_type; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + ENTRY_SET_CACHE_T *es = NULL; + DENTRY_T *ep, *pos; + UINT8 *buf; + UINT8 num_entries; + INT32 mode = ES_MODE_STARTED; + + PRINTK("get_entry_set_in_dir entered\n"); + PRINTK("p_dir dir %u flags %x size %d\n", p_dir->dir, p_dir->flags, p_dir->size); + + byte_offset = entry << DENTRY_SIZE_BITS; + ret =_walk_fat_chain(sb, p_dir, byte_offset, &clu); + if (ret != FFS_SUCCESS) + return NULL; + + + byte_offset &= p_fs->cluster_size - 1; /* byte offset in cluster */ + + off = byte_offset & p_bd->sector_size_mask; /* byte offset in sector */ + sec = byte_offset >> p_bd->sector_size_bits; /* sector offset in cluster */ + sec += START_SECTOR(clu); + + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + + + ep = (DENTRY_T *)(buf + off); + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type != TYPE_FILE) + && (entry_type != TYPE_DIR)) + goto err_out; + + if (type == ES_ALL_ENTRIES) + num_entries = ((FILE_DENTRY_T *)ep)->num_ext+1; + else + num_entries = type; + + PRINTK("trying to malloc %x bytes for %d entries\n", offsetof(ENTRY_SET_CACHE_T, __buf) + (num_entries) * sizeof(DENTRY_T), num_entries); + es = MALLOC(offsetof(ENTRY_SET_CACHE_T, __buf) + (num_entries) * sizeof(DENTRY_T)); + if (es == NULL) + goto err_out; + + es->num_entries = num_entries; + es->sector = sec; + es->offset = off; + es->alloc_flag = p_dir->flags; + + pos = (DENTRY_T *) &(es->__buf); + + while(num_entries) { + /* instead of copying whole sector, we will check every entry. + * this will provide minimum stablity and consistancy. + */ + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) + goto err_out; + + switch(mode) { + case ES_MODE_STARTED: + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) + mode = ES_MODE_GET_FILE_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_FILE_ENTRY: + if (entry_type == TYPE_STREAM) + mode = ES_MODE_GET_STRM_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_STRM_ENTRY: + if (entry_type == TYPE_EXTEND) + mode = ES_MODE_GET_NAME_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_NAME_ENTRY: + if (entry_type == TYPE_EXTEND) + break; + else if (entry_type == TYPE_STREAM) + goto err_out; + else if (entry_type & TYPE_CRITICAL_SEC) + mode = ES_MODE_GET_CRITICAL_SEC_ENTRY; + else + goto err_out; + break; + case ES_MODE_GET_CRITICAL_SEC_ENTRY: + if ((entry_type == TYPE_EXTEND) || (entry_type == TYPE_STREAM)) + goto err_out; + else if ((entry_type & TYPE_CRITICAL_SEC) != TYPE_CRITICAL_SEC) + goto err_out; + break; + } + + COPY_DENTRY(pos, ep); + + if (--num_entries == 0) + break; + + if (((off + DENTRY_SIZE) & p_bd->sector_size_mask) < (off & p_bd->sector_size_mask)) { + /* get the next sector */ + if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { + if (es->alloc_flag == 0x03) { + clu++; + } else { + if (FAT_read(sb, clu, &clu) == -1) + goto err_out; + } + sec = START_SECTOR(clu); + } else { + sec++; + } + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + off = 0; + ep = (DENTRY_T *)(buf); + } else { + ep++; + off += DENTRY_SIZE; + } + pos++; + } + + if (file_ep) + *file_ep = (DENTRY_T *)&(es->__buf); + + PRINTK("es sec %u offset %d flags %d, num_entries %u buf ptr %p\n", + es->sector, es->offset, es->alloc_flag, es->num_entries, &(es->__buf)); + PRINTK("get_entry_set_in_dir exited %p\n", es); + return es; +err_out: + PRINTK("get_entry_set_in_dir exited NULL (es %p)\n", es); + FREE(es); + return NULL; +} + +void release_entry_set (ENTRY_SET_CACHE_T *es) +{ + PRINTK("release_entry_set %p\n", es); + FREE(es); +} + + +static INT32 __write_partial_entries_in_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es, UINT32 sec, INT32 off, UINT32 count) +{ + INT32 num_entries, buf_off = (off - es->offset); + UINT32 remaining_byte_in_sector, copy_entries; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + UINT32 clu; + UINT8 *buf, *esbuf = (UINT8 *)&(es->__buf); + + PRINTK("__write_partial_entries_in_entry_set entered\n"); + PRINTK("es %p sec %u off %d count %d\n", es, sec, off, count); + num_entries = count; + + while(num_entries) { + /* white per sector base */ + remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off; + copy_entries = MIN(remaining_byte_in_sector>> DENTRY_SIZE_BITS , num_entries); + buf = buf_getblk(sb, sec); + if (buf == NULL) + goto err_out; + PRINTK("es->buf %p buf_off %u\n", esbuf, buf_off); + PRINTK("copying %d entries from %p to sector %u\n", copy_entries, (esbuf + buf_off), sec); + MEMCPY(buf + off, esbuf + buf_off, copy_entries << DENTRY_SIZE_BITS); + buf_modify(sb, sec); + num_entries -= copy_entries; + + if (num_entries) { + /* get next sector */ + if (IS_LAST_SECTOR_IN_CLUSTER(sec)) { + clu = GET_CLUSTER_FROM_SECTOR(sec); + if (es->alloc_flag == 0x03) { + clu++; + } else { + if (FAT_read(sb, clu, &clu) == -1) + goto err_out; + } + sec = START_SECTOR(clu); + } else { + sec++; + } + off = 0; + buf_off += copy_entries << DENTRY_SIZE_BITS; + } + } + + PRINTK("__write_partial_entries_in_entry_set exited successfully\n"); + return FFS_SUCCESS; +err_out: + PRINTK("__write_partial_entries_in_entry_set failed\n"); + return FFS_ERROR; +} + +/* write back all entries in entry set */ +INT32 write_whole_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es) +{ + return (__write_partial_entries_in_entry_set(sb, es, es->sector,es->offset, es->num_entries)); +} + +/* write back some entries in entry set */ +INT32 write_partial_entries_in_entry_set (struct super_block *sb, ENTRY_SET_CACHE_T *es, DENTRY_T *ep, UINT32 count) +{ + INT32 ret, byte_offset, off; + UINT32 clu=0, sec; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + CHAIN_T dir; + + /* vaidity check */ + if (ep + count > ((DENTRY_T *)&(es->__buf)) + es->num_entries) + return FFS_ERROR; + + dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector); + dir.flags = es->alloc_flag; + dir.size = 0xffffffff; /* XXX */ + + byte_offset = (es->sector - START_SECTOR(dir.dir)) << p_bd->sector_size_bits; + byte_offset += ((INT32 *)ep - (INT32 *)&(es->__buf)) + es->offset; + + ret =_walk_fat_chain(sb, &dir, byte_offset, &clu); + if (ret != FFS_SUCCESS) + return ret; + byte_offset &= p_fs->cluster_size - 1; /* byte offset in cluster */ + off = byte_offset & p_bd->sector_size_mask; /* byte offset in sector */ + sec = byte_offset >> p_bd->sector_size_bits; /* sector offset in cluster */ + sec += START_SECTOR(clu); + return (__write_partial_entries_in_entry_set(sb, es, sec, off, count)); +} + +/* search EMPTY CONTINUOUS "num_entries" entries */ +INT32 search_deleted_or_unused_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 num_entries) +{ + INT32 i, dentry, num_empty = 0; + INT32 dentries_per_clu; + UINT32 type; + CHAIN_T clu; + DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + if (p_fs->hint_uentry.dir == p_dir->dir) { + if (p_fs->hint_uentry.entry == -1) + return -1; + + clu.dir = p_fs->hint_uentry.clu.dir; + clu.size = p_fs->hint_uentry.clu.size; + clu.flags = p_fs->hint_uentry.clu.flags; + + dentry = p_fs->hint_uentry.entry; + } else { + p_fs->hint_uentry.entry = -1; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + dentry = 0; + } + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + i = dentry % dentries_per_clu; + else + i = dentry & (dentries_per_clu-1); + + for ( ; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -1; + + type = p_fs->fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) { + num_empty++; + if (p_fs->hint_uentry.entry == -1) { + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = dentry; + + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = clu.size; + p_fs->hint_uentry.clu.flags = clu.flags; + } + } else if (type == TYPE_DELETED) { + num_empty++; + } else { + num_empty = 0; + } + + if (num_empty >= num_entries) { + p_fs->hint_uentry.dir = CLUSTER_32(~0); + p_fs->hint_uentry.entry = -1; + + if (p_fs->vol_type == EXFAT) + return(dentry - (num_entries-1)); + else + return(dentry); + } + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return -1; + } + } + + return -1; +} /* end of search_deleted_or_unused_entry */ + +INT32 find_empty_entry(struct inode *inode, CHAIN_T *p_dir, INT32 num_entries) +{ + INT32 ret, dentry; + UINT32 last_clu, sector; + UINT64 size = 0; + CHAIN_T clu; + DENTRY_T *ep = NULL; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + return(search_deleted_or_unused_entry(sb, p_dir, num_entries)); + + while ((dentry = search_deleted_or_unused_entry(sb, p_dir, num_entries)) < 0) { + if (p_fs->dev_ejected) + break; + + if (p_fs->vol_type == EXFAT) { + if (p_dir->dir != p_fs->root_dir) { + size = i_size_read(inode); + } + } + + last_clu = find_last_cluster(sb, p_dir); + clu.dir = last_clu + 1; + clu.size = 0; + clu.flags = p_dir->flags; + + /* (1) allocate a cluster */ + ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu); + if (ret < 1) + return -1; + + if (clear_cluster(sb, clu.dir) != FFS_SUCCESS) + return -1; + + /* (2) append to the FAT chain */ + if (clu.flags != p_dir->flags) { + exfat_chain_cont_cluster(sb, p_dir->dir, p_dir->size); + p_dir->flags = 0x01; + p_fs->hint_uentry.clu.flags = 0x01; + } + if (clu.flags == 0x01) + FAT_write(sb, last_clu, clu.dir); + + if (p_fs->hint_uentry.entry == -1) { + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = p_dir->size << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); + + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = 0; + p_fs->hint_uentry.clu.flags = clu.flags; + } + p_fs->hint_uentry.clu.size++; + p_dir->size++; + + /* (3) update the directory entry */ + if (p_fs->vol_type == EXFAT) { + if (p_dir->dir != p_fs->root_dir) { + size += p_fs->cluster_size; + + ep = get_entry_in_dir(sb, &(fid->dir), fid->entry+1, §or); + if (!ep) + return -1; + p_fs->fs_func->set_entry_size(ep, size); + p_fs->fs_func->set_entry_flag(ep, p_dir->flags); + buf_modify(sb, sector); + + update_dir_checksum(sb, &(fid->dir), fid->entry); + } + } + + i_size_write(inode, i_size_read(inode)+p_fs->cluster_size); + EXFAT_I(inode)->mmu_private += p_fs->cluster_size; + EXFAT_I(inode)->fid.size += p_fs->cluster_size; + EXFAT_I(inode)->fid.flags = p_dir->flags; + inode->i_blocks += 1 << (p_fs->cluster_size_bits - 9); + } + + return(dentry); +} /* end of find_empty_entry */ + +/* return values of fat_find_dir_entry() + >= 0 : return dir entiry position with the name in dir + -1 : (root dir, ".") it is the root dir itself + -2 : entry with the name does not exist */ +INT32 fat_find_dir_entry(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 num_entries, DOS_NAME_T *p_dosname, UINT32 type) +{ + INT32 i, dentry = 0, len; + INT32 order = 0, is_feasible_entry = TRUE, has_ext_entry = FALSE; + INT32 dentries_per_clu; + UINT32 entry_type; + UINT16 entry_uniname[14], *uniname = NULL, unichar; + CHAIN_T clu; + DENTRY_T *ep; + DOS_DENTRY_T *dos_ep; + EXT_DENTRY_T *ext_ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == p_fs->root_dir) { + if ((!nls_uniname_cmp(sb, p_uniname->name, (UINT16 *) UNI_CUR_DIR_NAME)) || + (!nls_uniname_cmp(sb, p_uniname->name, (UINT16 *) UNI_PAR_DIR_NAME))) + return -1; // special case, root directory itself + } + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -2; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) { + if ((type == TYPE_ALL) || (type == entry_type)) { + if (is_feasible_entry && has_ext_entry) + return(dentry); + + dos_ep = (DOS_DENTRY_T *) ep; + if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name)) + return(dentry); + } + is_feasible_entry = TRUE; + has_ext_entry = FALSE; + } else if (entry_type == TYPE_EXTEND) { + if (is_feasible_entry) { + ext_ep = (EXT_DENTRY_T *) ep; + if (ext_ep->order > 0x40) { + order = (INT32)(ext_ep->order - 0x40); + uniname = p_uniname->name + 13 * (order-1); + } else { + order = (INT32) ext_ep->order; + uniname -= 13; + } + + len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order); + + unichar = *(uniname+len); + *(uniname+len) = 0x0; + + if (nls_uniname_cmp(sb, uniname, entry_uniname)) { + is_feasible_entry = FALSE; + } + + *(uniname+len) = unichar; + } + has_ext_entry = TRUE; + } else if (entry_type == TYPE_UNUSED) { + return -2; + } else { + is_feasible_entry = TRUE; + has_ext_entry = FALSE; + } + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return -2; + } + + return -2; +} /* end of fat_find_dir_entry */ + +/* return values of exfat_find_dir_entry() + >= 0 : return dir entiry position with the name in dir + -1 : (root dir, ".") it is the root dir itself + -2 : entry with the name does not exist */ +INT32 exfat_find_dir_entry(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 num_entries, DOS_NAME_T *p_dosname, UINT32 type) +{ + INT32 i, dentry = 0, num_ext_entries = 0, len; + INT32 order = 0, is_feasible_entry = FALSE; + INT32 dentries_per_clu, num_empty = 0; + UINT32 entry_type; + UINT16 entry_uniname[16], *uniname = NULL, unichar; + CHAIN_T clu; + DENTRY_T *ep; + FILE_DENTRY_T *file_ep; + STRM_DENTRY_T *strm_ep; + NAME_DENTRY_T *name_ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == p_fs->root_dir) { + if ((!nls_uniname_cmp(sb, p_uniname->name, (UINT16 *) UNI_CUR_DIR_NAME)) || + (!nls_uniname_cmp(sb, p_uniname->name, (UINT16 *) UNI_PAR_DIR_NAME))) + return -1; // special case, root directory itself + } + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + p_fs->hint_uentry.dir = p_dir->dir; + p_fs->hint_uentry.entry = -1; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++, dentry++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -2; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if ((entry_type == TYPE_UNUSED) || (entry_type == TYPE_DELETED)) { + is_feasible_entry = FALSE; + + if (p_fs->hint_uentry.entry == -1) { + num_empty++; + + if (num_empty == 1) { + p_fs->hint_uentry.clu.dir = clu.dir; + p_fs->hint_uentry.clu.size = clu.size; + p_fs->hint_uentry.clu.flags = clu.flags; + } + if ((num_empty >= num_entries) || (entry_type == TYPE_UNUSED)) { + p_fs->hint_uentry.entry = dentry - (num_empty-1); + } + } + + if (entry_type == TYPE_UNUSED) { + return -2; + } + } else { + num_empty = 0; + + if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) { + if ((type == TYPE_ALL) || (type == entry_type)) { + file_ep = (FILE_DENTRY_T *) ep; + num_ext_entries = file_ep->num_ext; + is_feasible_entry = TRUE; + } else { + is_feasible_entry = FALSE; + } + } else if (entry_type == TYPE_STREAM) { + if (is_feasible_entry) { + strm_ep = (STRM_DENTRY_T *) ep; + if (p_uniname->name_len == strm_ep->name_len) { + order = 1; + } else { + is_feasible_entry = FALSE; + } + } + } else if (entry_type == TYPE_EXTEND) { + if (is_feasible_entry) { + name_ep = (NAME_DENTRY_T *) ep; + + if ((++order) == 2) + uniname = p_uniname->name; + else + uniname += 15; + + len = extract_uni_name_from_name_entry(name_ep, entry_uniname, order); + + unichar = *(uniname+len); + *(uniname+len) = 0x0; + + if (nls_uniname_cmp(sb, uniname, entry_uniname)) { + is_feasible_entry = FALSE; + } else if (order == num_ext_entries) { + p_fs->hint_uentry.dir = CLUSTER_32(~0); + p_fs->hint_uentry.entry = -1; + return(dentry - (num_ext_entries)); + } + + *(uniname+len) = unichar; + } + } else { + is_feasible_entry = FALSE; + } + } + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return -2; + } + } + + return -2; +} /* end of exfat_find_dir_entry */ + +/* returns -1 on error */ +INT32 fat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, DENTRY_T *p_entry) +{ + INT32 count = 0; + UINT8 chksum; + DOS_DENTRY_T *dos_ep = (DOS_DENTRY_T *) p_entry; + EXT_DENTRY_T *ext_ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + chksum = calc_checksum_1byte((void *) dos_ep->name, DOS_NAME_LENGTH, 0); + + for (entry--; entry >= 0; entry--) { + ext_ep = (EXT_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, NULL); + if (!ext_ep) + return -1; + + if ((p_fs->fs_func->get_entry_type((DENTRY_T *) ext_ep) == TYPE_EXTEND) && + (ext_ep->checksum == chksum)) { + count++; + if (ext_ep->order > 0x40) + return(count); + } else { + return(count); + } + } + + return(count); +} /* end of fat_count_ext_entries */ + +/* returns -1 on error */ +INT32 exfat_count_ext_entries(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, DENTRY_T *p_entry) +{ + INT32 i, count = 0; + UINT32 type; + FILE_DENTRY_T *file_ep = (FILE_DENTRY_T *) p_entry; + DENTRY_T *ext_ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (i = 0, entry++; i < file_ep->num_ext; i++, entry++) { + ext_ep = get_entry_in_dir(sb, p_dir, entry, NULL); + if (!ext_ep) + return -1; + + type = p_fs->fs_func->get_entry_type(ext_ep); + if ((type == TYPE_EXTEND) || (type == TYPE_STREAM)) { + count++; + } else { + return(count); + } + } + + return(count); +} /* end of exfat_count_ext_entries */ + +/* returns -1 on error */ +INT32 count_dos_name_entries(struct super_block *sb, CHAIN_T *p_dir, UINT32 type) +{ + INT32 i, count = 0; + INT32 dentries_per_clu; + UINT32 entry_type; + CHAIN_T clu; + DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return -1; + + entry_type = p_fs->fs_func->get_entry_type(ep); + + if (entry_type == TYPE_UNUSED) + return(count); + if (!(type & TYPE_CRITICAL_PRI) && !(type & TYPE_BENIGN_PRI)) + continue; + + if ((type == TYPE_ALL) || (type == entry_type)) + count++; + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return -1; + } + } + + return(count); +} /* end of count_dos_name_entries */ + +BOOL is_dir_empty(struct super_block *sb, CHAIN_T *p_dir) +{ + INT32 i, count = 0; + INT32 dentries_per_clu; + UINT32 type; + CHAIN_T clu; + DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.size = p_dir->size; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + break; + + type = p_fs->fs_func->get_entry_type(ep); + + if (type == TYPE_UNUSED) + return TRUE; + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + if (p_dir->dir == CLUSTER_32(0)) { /* FAT16 root_dir */ + return FALSE; + } else { + if (p_fs->vol_type == EXFAT) + return FALSE; + if ((p_dir->dir == p_fs->root_dir) || ((++count) > 2)) + return FALSE; + } + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (clu.flags == 0x03) { + if ((--clu.size) > 0) + clu.dir++; + else + clu.dir = CLUSTER_32(~0); + } else { + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + break; + } + } + + return TRUE; +} /* end of is_dir_empty */ + +/* + * Name Conversion Functions + */ + +/* input : dir, uni_name + output : num_of_entry, dos_name(format : aaaaaa~1.bbb) */ +INT32 get_num_entries_and_dos_name(struct super_block *sb, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, INT32 *entries, DOS_NAME_T *p_dosname) +{ + INT32 ret, num_entries, lossy = FALSE; + INT8 **r; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + num_entries = p_fs->fs_func->calc_num_entries(p_uniname); + if (num_entries == 0) + return FFS_INVALIDPATH; + + if (p_fs->vol_type != EXFAT) { + nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy); + + if (lossy) { + ret = fat_generate_dos_name(sb, p_dir, p_dosname); + if (ret) + return ret; + } else { + for (r = reserved_names; *r; r++) { + if (!STRNCMP((void *) p_dosname->name, *r, 8)) + return FFS_INVALIDPATH; + } + + if (p_dosname->name_case != 0xFF) + num_entries = 1; + } + + if (num_entries > 1) + p_dosname->name_case = 0x0; + } + + *entries = num_entries; + + return FFS_SUCCESS; +} /* end of get_num_entries_and_dos_name */ + +void get_uni_name_from_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, UNI_NAME_T *p_uniname, UINT8 mode) +{ + DOS_NAME_T dos_name; + + if (mode == 0x0) + dos_name.name_case = 0x0; + else + dos_name.name_case = ep->lcase; + + MEMCPY(dos_name.name, ep->name, DOS_NAME_LENGTH); + nls_dosname_to_uniname(sb, p_uniname, &dos_name); +} /* end of get_uni_name_from_dos_entry */ + +void fat_get_uni_name_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT16 *uniname) +{ + INT32 i; + EXT_DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + for (entry--, i = 1; entry >= 0; entry--, i++) { + ep = (EXT_DENTRY_T *) get_entry_in_dir(sb, p_dir, entry, NULL); + if (!ep) + return; + + if (p_fs->fs_func->get_entry_type((DENTRY_T *) ep) == TYPE_EXTEND) { + extract_uni_name_from_ext_entry(ep, uniname, i); + if (ep->order > 0x40) + return; + } else { + return; + } + + uniname += 13; + } +} /* end of fat_get_uni_name_from_ext_entry */ + +void exfat_get_uni_name_from_ext_entry(struct super_block *sb, CHAIN_T *p_dir, INT32 entry, UINT16 *uniname) +{ + INT32 i; + DENTRY_T *ep; + ENTRY_SET_CACHE_T *es; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + es = get_entry_set_in_dir(sb, p_dir, entry, ES_ALL_ENTRIES, &ep); + if (es == NULL || es->num_entries < 3) { + if(es) { + release_entry_set(es); + } + return; + } + + ep += 2; + + /* + * First entry : file entry + * Second entry : stream-extension entry + * Third entry : first file-name entry + * So, the index of first file-name dentry should start from 2. + */ + for (i = 2; i < es->num_entries; i++, ep++) { + if (p_fs->fs_func->get_entry_type(ep) == TYPE_EXTEND) { + extract_uni_name_from_name_entry((NAME_DENTRY_T *)ep, uniname, i); + } else { + /* end of name entry */ + goto out; + } + uniname += 15; + } + +out: + release_entry_set(es); +} /* end of exfat_get_uni_name_from_ext_entry */ + +INT32 extract_uni_name_from_ext_entry(EXT_DENTRY_T *ep, UINT16 *uniname, INT32 order) +{ + INT32 i, len = 0; + + for (i = 0; i < 10; i += 2) { + *uniname = GET16(ep->unicode_0_4+i); + if (*uniname == 0x0) + return(len); + uniname++; + len++; + } + + if (order < 20) { + for (i = 0; i < 12; i += 2) { + *uniname = GET16_A(ep->unicode_5_10+i); + if (*uniname == 0x0) + return(len); + uniname++; + len++; + } + } else { + for (i = 0; i < 8; i += 2) { + *uniname = GET16_A(ep->unicode_5_10+i); + if (*uniname == 0x0) + return(len); + uniname++; + len++; + } + *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */ + return(len); + } + + for (i = 0; i < 4; i += 2) { + *uniname = GET16_A(ep->unicode_11_12+i); + if (*uniname == 0x0) + return(len); + uniname++; + len++; + } + + *uniname = 0x0; + return(len); + +} /* end of extract_uni_name_from_ext_entry */ + +INT32 extract_uni_name_from_name_entry(NAME_DENTRY_T *ep, UINT16 *uniname, INT32 order) +{ + INT32 i, len = 0; + + for (i = 0; i < 30; i += 2) { + *uniname = GET16_A(ep->unicode_0_14+i); + if (*uniname == 0x0) + return(len); + uniname++; + len++; + } + + *uniname = 0x0; + return(len); + +} /* end of extract_uni_name_from_name_entry */ + +INT32 fat_generate_dos_name(struct super_block *sb, CHAIN_T *p_dir, DOS_NAME_T *p_dosname) +{ + INT32 i, j, count = 0, count_begin = FALSE; + INT32 dentries_per_clu; + UINT32 type; + UINT8 bmap[128/* 1 ~ 1023 */]; + CHAIN_T clu; + DOS_DENTRY_T *ep; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + Bitmap_clear_all(bmap, 128); + Bitmap_set(bmap, 0); + + if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */ + dentries_per_clu = p_fs->dentries_in_root; + else + dentries_per_clu = p_fs->dentries_per_clu; + + clu.dir = p_dir->dir; + clu.flags = p_dir->flags; + + while (clu.dir != CLUSTER_32(~0)) { + if (p_fs->dev_ejected) + break; + + for (i = 0; i < dentries_per_clu; i++) { + ep = (DOS_DENTRY_T *) get_entry_in_dir(sb, &clu, i, NULL); + if (!ep) + return FFS_MEDIAERR; + + type = p_fs->fs_func->get_entry_type((DENTRY_T *) ep); + + if (type == TYPE_UNUSED) + break; + if ((type != TYPE_FILE) && (type != TYPE_DIR)) + continue; + + count = 0; + count_begin = FALSE; + + for (j = 0; j < 8; j++) { + if (ep->name[j] == ' ') + break; + + if (ep->name[j] == '~') { + count_begin = TRUE; + } else if (count_begin) { + if ((ep->name[j] >= '0') && (ep->name[j] <= '9')) { + count = count * 10 + (ep->name[j] - '0'); + } else { + count = 0; + count_begin = FALSE; + } + } + } + + if ((count > 0) && (count < 1024)) + Bitmap_set(bmap, count); + } + + if (p_dir->dir == CLUSTER_32(0)) + break; /* FAT16 root_dir */ + + if (FAT_read(sb, clu.dir, &(clu.dir)) != 0) + return FFS_MEDIAERR; + } + + count = 0; + for (i = 0; i < 128; i++) { + if (bmap[i] != 0xFF) { + for (j = 0; j < 8; j++) { + if (Bitmap_test(&(bmap[i]), j) == 0) { + count = (i << 3) + j; + break; + } + } + if (count != 0) + break; + } + } + + if ((count == 0) || (count >= 1024)) + return FFS_FILEEXIST; + else + fat_attach_count_to_dos_name(p_dosname->name, count); + + /* Now dos_name has DOS~????.EXT */ + return FFS_SUCCESS; +} /* end of generate_dos_name */ + +void fat_attach_count_to_dos_name(UINT8 *dosname, INT32 count) +{ + INT32 i, j, length; + INT8 str_count[6]; + + str_count[0] = '~'; + str_count[1] = '\0'; + my_itoa(&(str_count[1]), count); + length = STRLEN(str_count); + + i = j = 0; + while (j <= (8 - length)) { + i = j; + if (dosname[j] == ' ') + break; + if (dosname[j] & 0x80) + j += 2; + else + j++; + } + + for (j = 0; j < length; i++, j++) + dosname[i] = (UINT8) str_count[j]; + + if (i == 7) + dosname[7] = ' '; + +} /* end of attach_count_to_dos_name */ + +INT32 fat_calc_num_entries(UNI_NAME_T *p_uniname) +{ + INT32 len; + + len = p_uniname->name_len; + if (len == 0) + return 0; + + /* 1 dos name entry + extended entries */ + return((len-1) / 13 + 2); + +} /* end of calc_num_enties */ + +INT32 exfat_calc_num_entries(UNI_NAME_T *p_uniname) +{ + INT32 len; + + len = p_uniname->name_len; + if (len == 0) + return 0; + + /* 1 file entry + 1 stream entry + name entries */ + return((len-1) / 15 + 3); + +} /* end of exfat_calc_num_enties */ + +UINT8 calc_checksum_1byte(void *data, INT32 len, UINT8 chksum) +{ + INT32 i; + UINT8 *c = (UINT8 *) data; + + for (i = 0; i < len; i++, c++) + chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c; + + return(chksum); +} /* end of calc_checksum_1byte */ + +UINT16 calc_checksum_2byte(void *data, INT32 len, UINT16 chksum, INT32 type) +{ + INT32 i; + UINT8 *c = (UINT8 *) data; + + switch (type) { + case CS_DIR_ENTRY: + for (i = 0; i < len; i++, c++) { + if ((i == 2) || (i == 3)) + continue; + chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) + (UINT16) *c; + } + break; + default + : + for (i = 0; i < len; i++, c++) { + chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) + (UINT16) *c; + } + } + + return(chksum); +} /* end of calc_checksum_2byte */ + +UINT32 calc_checksum_4byte(void *data, INT32 len, UINT32 chksum, INT32 type) +{ + INT32 i; + UINT8 *c = (UINT8 *) data; + + switch (type) { + case CS_PBR_SECTOR: + for (i = 0; i < len; i++, c++) { + if ((i == 106) || (i == 107) || (i == 112)) + continue; + chksum = (((chksum & 1) << 31) | ((chksum & 0xFFFFFFFE) >> 1)) + (UINT32) *c; + } + break; + default + : + for (i = 0; i < len; i++, c++) { + chksum = (((chksum & 1) << 31) | ((chksum & 0xFFFFFFFE) >> 1)) + (UINT32) *c; + } + } + + return(chksum); +} /* end of calc_checksum_4byte */ + +/* + * Name Resolution Functions + */ + +/* return values of resolve_path() + > 0 : return the length of the path + < 0 : return error */ +INT32 resolve_path(struct inode *inode, UINT8 *path, CHAIN_T *p_dir, UNI_NAME_T *p_uniname) +{ + INT32 lossy = FALSE; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + + if (STRLEN(path) >= (MAX_NAME_LENGTH * MAX_CHARSET_SIZE)) + return(FFS_INVALIDPATH); + + STRCPY(name_buf, path); + + nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy); + if (lossy) + return(FFS_INVALIDPATH); + + fid->size = i_size_read(inode); + + p_dir->dir = fid->start_clu; + p_dir->size = (INT32)(fid->size >> p_fs->cluster_size_bits); + p_dir->flags = fid->flags; + + return(FFS_SUCCESS); +} + +/* + * File Operation Functions + */ +static FS_FUNC_T fat_fs_func = { + .alloc_cluster = fat_alloc_cluster, + .free_cluster = fat_free_cluster, + .count_used_clusters = fat_count_used_clusters, + + .init_dir_entry = fat_init_dir_entry, + .init_ext_entry = fat_init_ext_entry, + .find_dir_entry = fat_find_dir_entry, + .delete_dir_entry = fat_delete_dir_entry, + .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry, + .count_ext_entries = fat_count_ext_entries, + .calc_num_entries = fat_calc_num_entries, + + .get_entry_type = fat_get_entry_type, + .set_entry_type = fat_set_entry_type, + .get_entry_attr = fat_get_entry_attr, + .set_entry_attr = fat_set_entry_attr, + .get_entry_flag = fat_get_entry_flag, + .set_entry_flag = fat_set_entry_flag, + .get_entry_clu0 = fat_get_entry_clu0, + .set_entry_clu0 = fat_set_entry_clu0, + .get_entry_size = fat_get_entry_size, + .set_entry_size = fat_set_entry_size, + .get_entry_time = fat_get_entry_time, + .set_entry_time = fat_set_entry_time, +}; + + +INT32 fat16_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr) +{ + INT32 num_reserved, num_root_sectors; + BPB16_T *p_bpb = (BPB16_T *) p_pbr->bpb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS; + num_root_sectors = ((num_root_sectors-1) >> p_bd->sector_size_bits) + 1; + + p_fs->sectors_per_clu = p_bpb->sectors_per_clu; + p_fs->sectors_per_clu_bits = my_log2(p_bpb->sectors_per_clu); + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->FAT2_start_sector + p_fs->num_FAT_sectors; + p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors; + + p_fs->num_sectors = GET16(p_bpb->num_sectors); + if (p_fs->num_sectors == 0) + p_fs->num_sectors = GET32(p_bpb->num_huge_sectors); + + num_reserved = p_fs->data_start_sector - p_fs->PBR_sector; + p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >> p_fs->sectors_per_clu_bits) + 2; + /* because the cluster index starts with 2 */ + + if (p_fs->num_clusters < FAT12_THRESHOLD) + p_fs->vol_type = FAT12; + else + p_fs->vol_type = FAT16; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = 0; + p_fs->dentries_in_root = GET16(p_bpb->num_root_entries); + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); + + p_fs->vol_flag = VOL_CLEAN; + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (UINT32) ~0; + + p_fs->fs_func = &fat_fs_func; + + return FFS_SUCCESS; +} /* end of fat16_mount */ + +INT32 fat32_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr) +{ + INT32 num_reserved; + BPB32_T *p_bpb = (BPB32_T *) p_pbr->bpb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + p_fs->sectors_per_clu = p_bpb->sectors_per_clu; + p_fs->sectors_per_clu_bits = my_log2(p_bpb->sectors_per_clu); + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->FAT2_start_sector + p_fs->num_FAT_sectors; + p_fs->data_start_sector = p_fs->root_start_sector; + + p_fs->num_sectors = GET32(p_bpb->num_huge_sectors); + num_reserved = p_fs->data_start_sector - p_fs->PBR_sector; + + p_fs->num_clusters = ((p_fs->num_sectors-num_reserved) >> p_fs->sectors_per_clu_bits) + 2; + /* because the cluster index starts with 2 */ + + p_fs->vol_type = FAT32; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = GET32(p_bpb->root_cluster); + p_fs->dentries_in_root = 0; + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); + + p_fs->vol_flag = VOL_CLEAN; + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (UINT32) ~0; + + p_fs->fs_func = &fat_fs_func; + + return FFS_SUCCESS; +} /* end of fat32_mount */ + +static FS_FUNC_T exfat_fs_func = { + .alloc_cluster = exfat_alloc_cluster, + .free_cluster = exfat_free_cluster, + .count_used_clusters = exfat_count_used_clusters, + + .init_dir_entry = exfat_init_dir_entry, + .init_ext_entry = exfat_init_ext_entry, + .find_dir_entry = exfat_find_dir_entry, + .delete_dir_entry = exfat_delete_dir_entry, + .get_uni_name_from_ext_entry = exfat_get_uni_name_from_ext_entry, + .count_ext_entries = exfat_count_ext_entries, + .calc_num_entries = exfat_calc_num_entries, + + .get_entry_type = exfat_get_entry_type, + .set_entry_type = exfat_set_entry_type, + .get_entry_attr = exfat_get_entry_attr, + .set_entry_attr = exfat_set_entry_attr, + .get_entry_flag = exfat_get_entry_flag, + .set_entry_flag = exfat_set_entry_flag, + .get_entry_clu0 = exfat_get_entry_clu0, + .set_entry_clu0 = exfat_set_entry_clu0, + .get_entry_size = exfat_get_entry_size, + .set_entry_size = exfat_set_entry_size, + .get_entry_time = exfat_get_entry_time, + .set_entry_time = exfat_set_entry_time, +}; + +INT32 exfat_mount(struct super_block *sb, PBR_SECTOR_T *p_pbr) +{ + BPBEX_T *p_bpb = (BPBEX_T *) p_pbr->bpb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + + if (p_bpb->num_fats == 0) + return FFS_FORMATERR; + + p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits; + p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits; + p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits + p_bd->sector_size_bits; + p_fs->cluster_size = 1 << p_fs->cluster_size_bits; + + p_fs->num_FAT_sectors = GET32(p_bpb->fat_length); + + p_fs->FAT1_start_sector = p_fs->PBR_sector + GET32(p_bpb->fat_offset); + if (p_bpb->num_fats == 1) + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector; + else + p_fs->FAT2_start_sector = p_fs->FAT1_start_sector + p_fs->num_FAT_sectors; + + p_fs->root_start_sector = p_fs->PBR_sector + GET32(p_bpb->clu_offset); + p_fs->data_start_sector = p_fs->root_start_sector; + + p_fs->num_sectors = GET64(p_bpb->vol_length); + p_fs->num_clusters = GET32(p_bpb->clu_count) + 2; + /* because the cluster index starts with 2 */ + + p_fs->vol_type = EXFAT; + p_fs->vol_id = GET32(p_bpb->vol_serial); + + p_fs->root_dir = GET32(p_bpb->root_cluster); + p_fs->dentries_in_root = 0; + p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits - DENTRY_SIZE_BITS); + + p_fs->vol_flag = (UINT32) GET16(p_bpb->vol_flags); + p_fs->clu_srch_ptr = 2; + p_fs->used_clusters = (UINT32) ~0; + + p_fs->fs_func = &exfat_fs_func; + + return FFS_SUCCESS; +} /* end of exfat_mount */ + +INT32 create_dir(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, FILE_ID_T *fid) +{ + INT32 ret, dentry, num_entries; + UINT64 size; + CHAIN_T clu; + DOS_NAME_T dos_name, dot_name; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, &dos_name); + if (ret) + return ret; + + /* find_empty_entry must be called before alloc_cluster */ + dentry = find_empty_entry(inode, p_dir, num_entries); + if (dentry < 0) + return FFS_FULL; + + clu.dir = CLUSTER_32(~0); + clu.size = 0; + clu.flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + + /* (1) allocate a cluster */ + ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu); + if (ret < 1) + return FFS_FULL; + + ret = clear_cluster(sb, clu.dir); + if (ret != FFS_SUCCESS) + return ret; + + if (p_fs->vol_type == EXFAT) { + size = p_fs->cluster_size; + } else { + size = 0; + + /* initialize the . and .. entry + Information for . points to itself + Information for .. points to parent dir */ + + dot_name.name_case = 0x0; + MEMCPY(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH); + + ret = p_fs->fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir, 0); + if (ret != FFS_SUCCESS) + return ret; + + ret = p_fs->fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name); + if (ret != FFS_SUCCESS) + return ret; + + MEMCPY(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH); + + if (p_dir->dir == p_fs->root_dir) + ret = p_fs->fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, CLUSTER_32(0), 0); + else + ret = p_fs->fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR, p_dir->dir, 0); + + if (ret != FFS_SUCCESS) + return ret; + + ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL, &dot_name); + if (ret != FFS_SUCCESS) + return ret; + } + + /* (2) update the directory entry */ + /* make sub-dir entry in parent directory */ + ret = p_fs->fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir, size); + if (ret != FFS_SUCCESS) + return ret; + + ret = p_fs->fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fid->dir.dir = p_dir->dir; + fid->dir.size = p_dir->size; + fid->dir.flags = p_dir->flags; + fid->entry = dentry; + + fid->attr = ATTR_SUBDIR; + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->size = size; + fid->start_clu = clu.dir; + + fid->type= TYPE_DIR; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + return FFS_SUCCESS; +} /* end of create_dir */ + +INT32 create_file(struct inode *inode, CHAIN_T *p_dir, UNI_NAME_T *p_uniname, UINT8 mode, FILE_ID_T *fid) +{ + INT32 ret, dentry, num_entries; + DOS_NAME_T dos_name; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_entries, &dos_name); + if (ret) + return ret; + + /* find_empty_entry must be called before alloc_cluster() */ + dentry = find_empty_entry(inode, p_dir, num_entries); + if (dentry < 0) + return FFS_FULL; + + /* (1) update the directory entry */ + /* fill the dos name directory entry information of the created file. + the first cluster is not determined yet. (0) */ + ret = p_fs->fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode, CLUSTER_32(0), 0); + if (ret != FFS_SUCCESS) + return ret; + + ret = p_fs->fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + fid->dir.dir = p_dir->dir; + fid->dir.size = p_dir->size; + fid->dir.flags = p_dir->flags; + fid->entry = dentry; + + fid->attr = ATTR_ARCHIVE | mode; + fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01; + fid->size = 0; + fid->start_clu = CLUSTER_32(~0); + + fid->type= TYPE_FILE; + fid->rwoffset = 0; + fid->hint_last_off = -1; + + return FFS_SUCCESS; +} /* end of create_file */ + +void remove_file(struct inode *inode, CHAIN_T *p_dir, INT32 entry) +{ + INT32 num_entries; + UINT32 sector; + DENTRY_T *ep; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + ep = get_entry_in_dir(sb, p_dir, entry, §or); + if (!ep) + return; + + buf_lock(sb, sector); + + /* buf_lock() before call count_ext_entries() */ + num_entries = p_fs->fs_func->count_ext_entries(sb, p_dir, entry, ep); + if (num_entries < 0) { + buf_unlock(sb, sector); + return; + } + num_entries++; + + buf_unlock(sb, sector); + + /* (1) update the directory entry */ + p_fs->fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries); +} /* end of remove_file */ + +INT32 rename_file(struct inode *inode, CHAIN_T *p_dir, INT32 oldentry, UNI_NAME_T *p_uniname, FILE_ID_T *fid) +{ + INT32 ret, newentry = -1, num_old_entries, num_new_entries; + UINT32 sector_old, sector_new; + DOS_NAME_T dos_name; + DENTRY_T *epold, *epnew; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + epold = get_entry_in_dir(sb, p_dir, oldentry, §or_old); + if (!epold) + return FFS_MEDIAERR; + + buf_lock(sb, sector_old); + + /* buf_lock() before call count_ext_entries() */ + num_old_entries = p_fs->fs_func->count_ext_entries(sb, p_dir, oldentry, epold); + if (num_old_entries < 0) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + num_old_entries++; + + ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname, &num_new_entries, &dos_name); + if (ret) { + buf_unlock(sb, sector_old); + return ret; + } + + if (num_old_entries < num_new_entries) { + newentry = find_empty_entry(inode, p_dir, num_new_entries); + if (newentry < 0) { + buf_unlock(sb, sector_old); + return FFS_FULL; + } + + epnew = get_entry_in_dir(sb, p_dir, newentry, §or_new); + if (!epnew) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + + MEMCPY((void *) epnew, (void *) epold, DENTRY_SIZE); + if (p_fs->fs_func->get_entry_type(epnew) == TYPE_FILE) { + p_fs->fs_func->set_entry_attr(epnew, p_fs->fs_func->get_entry_attr(epnew) | ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_new); + buf_unlock(sb, sector_old); + + if (p_fs->vol_type == EXFAT) { + epold = get_entry_in_dir(sb, p_dir, oldentry+1, §or_old); + buf_lock(sb, sector_old); + epnew = get_entry_in_dir(sb, p_dir, newentry+1, §or_new); + + if (!epold || !epnew) { + buf_unlock(sb, sector_old); + return FFS_MEDIAERR; + } + + MEMCPY((void *) epnew, (void *) epold, DENTRY_SIZE); + buf_modify(sb, sector_new); + buf_unlock(sb, sector_old); + } + + ret = p_fs->fs_func->init_ext_entry(sb, p_dir, newentry, num_new_entries, p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + p_fs->fs_func->delete_dir_entry(sb, p_dir, oldentry, 0, num_old_entries); + fid->entry = newentry; + } else { + if (p_fs->fs_func->get_entry_type(epold) == TYPE_FILE) { + p_fs->fs_func->set_entry_attr(epold, p_fs->fs_func->get_entry_attr(epold) | ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_old); + buf_unlock(sb, sector_old); + + ret = p_fs->fs_func->init_ext_entry(sb, p_dir, oldentry, num_new_entries, p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + p_fs->fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries, num_old_entries); + } + + return FFS_SUCCESS; +} /* end of rename_file */ + +INT32 move_file(struct inode *inode, CHAIN_T *p_olddir, INT32 oldentry, CHAIN_T *p_newdir, UNI_NAME_T *p_uniname, FILE_ID_T *fid) +{ + INT32 ret, newentry, num_new_entries, num_old_entries; + UINT32 sector_mov, sector_new; + CHAIN_T clu; + DOS_NAME_T dos_name; + DENTRY_T *epmov, *epnew; + struct super_block *sb = inode->i_sb; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + epmov = get_entry_in_dir(sb, p_olddir, oldentry, §or_mov); + if (!epmov) + return FFS_MEDIAERR; + + /* check if the source and target directory is the same */ + if (p_fs->fs_func->get_entry_type(epmov) == TYPE_DIR && + p_fs->fs_func->get_entry_clu0(epmov) == p_newdir->dir) + return FFS_INVALIDPATH; + + buf_lock(sb, sector_mov); + + /* buf_lock() before call count_ext_entries() */ + num_old_entries = p_fs->fs_func->count_ext_entries(sb, p_olddir, oldentry, epmov); + if (num_old_entries < 0) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + num_old_entries++; + + ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname, &num_new_entries, &dos_name); + if (ret) { + buf_unlock(sb, sector_mov); + return ret; + } + + newentry = find_empty_entry(inode, p_newdir, num_new_entries); + if (newentry < 0) { + buf_unlock(sb, sector_mov); + return FFS_FULL; + } + + epnew = get_entry_in_dir(sb, p_newdir, newentry, §or_new); + if (!epnew) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + + MEMCPY((void *) epnew, (void *) epmov, DENTRY_SIZE); + if (p_fs->fs_func->get_entry_type(epnew) == TYPE_FILE) { + p_fs->fs_func->set_entry_attr(epnew, p_fs->fs_func->get_entry_attr(epnew) | ATTR_ARCHIVE); + fid->attr |= ATTR_ARCHIVE; + } + buf_modify(sb, sector_new); + buf_unlock(sb, sector_mov); + + if (p_fs->vol_type == EXFAT) { + epmov = get_entry_in_dir(sb, p_olddir, oldentry+1, §or_mov); + buf_lock(sb, sector_mov); + epnew = get_entry_in_dir(sb, p_newdir, newentry+1, §or_new); + if (!epmov || !epnew) { + buf_unlock(sb, sector_mov); + return FFS_MEDIAERR; + } + + MEMCPY((void *) epnew, (void *) epmov, DENTRY_SIZE); + buf_modify(sb, sector_new); + buf_unlock(sb, sector_mov); + } else if (p_fs->fs_func->get_entry_type(epnew) == TYPE_DIR) { + /* change ".." pointer to new parent dir */ + clu.dir = p_fs->fs_func->get_entry_clu0(epnew); + clu.flags = 0x01; + + epnew = get_entry_in_dir(sb, &clu, 1, §or_new); + if (!epnew) + return FFS_MEDIAERR; + + if (p_newdir->dir == p_fs->root_dir) + p_fs->fs_func->set_entry_clu0(epnew, CLUSTER_32(0)); + else + p_fs->fs_func->set_entry_clu0(epnew, p_newdir->dir); + buf_modify(sb, sector_new); + } + + ret = p_fs->fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries, p_uniname, &dos_name); + if (ret != FFS_SUCCESS) + return ret; + + p_fs->fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries); + + fid->dir.dir = p_newdir->dir; + fid->dir.size = p_newdir->size; + fid->dir.flags = p_newdir->flags; + + fid->entry = newentry; + + return FFS_SUCCESS; +} /* end of move_file */ + +/* + * Sector Read/Write Functions + */ + +INT32 sector_read(struct super_block *sb, UINT32 sec, struct buffer_head **bh, INT32 read) +{ + INT32 ret = FFS_MEDIAERR; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((sec >= (p_fs->PBR_sector+p_fs->num_sectors)) && (p_fs->num_sectors > 0)) { + PRINT("[EXFAT] sector_read: out of range error! (sec = %d)\n", sec); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_read(sb, sec, bh, 1, read); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} /* end of sector_read */ + +INT32 sector_write(struct super_block *sb, UINT32 sec, struct buffer_head *bh, INT32 sync) +{ + INT32 ret = FFS_MEDIAERR; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (sec >= (p_fs->PBR_sector+p_fs->num_sectors) && (p_fs->num_sectors > 0)) { + PRINT("[EXFAT] sector_write: out of range error! (sec = %d)\n", sec); + fs_error(sb); + return ret; + } + if (bh == NULL) { + PRINT("[EXFAT] sector_write: bh is NULL!\n"); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_write(sb, sec, bh, 1, sync); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} /* end of sector_write */ + +INT32 multi_sector_read(struct super_block *sb, UINT32 sec, struct buffer_head **bh, INT32 num_secs, INT32 read) +{ + INT32 ret = FFS_MEDIAERR; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (((sec+num_secs) > (p_fs->PBR_sector+p_fs->num_sectors)) && (p_fs->num_sectors > 0)) { + PRINT("[EXFAT] multi_sector_read: out of range error! (sec = %d, num_secs = %d)\n", sec, num_secs); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_read(sb, sec, bh, num_secs, read); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} /* end of multi_sector_read */ + +INT32 multi_sector_write(struct super_block *sb, UINT32 sec, struct buffer_head *bh, INT32 num_secs, INT32 sync) +{ + INT32 ret = FFS_MEDIAERR; + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if ((sec+num_secs) > (p_fs->PBR_sector+p_fs->num_sectors) && (p_fs->num_sectors > 0)) { + PRINT("[EXFAT] multi_sector_write: out of range error! (sec = %d, num_secs = %d)\n", sec, num_secs); + fs_error(sb); + return ret; + } + if (bh == NULL) { + PRINT("[EXFAT] multi_sector_write: bh is NULL!\n"); + fs_error(sb); + return ret; + } + + if (!p_fs->dev_ejected) { + ret = bdev_write(sb, sec, bh, num_secs, sync); + if (ret != FFS_SUCCESS) + p_fs->dev_ejected = TRUE; + } + + return ret; +} /* end of multi_sector_write */ + +/* end of exfat_core.c */ diff --git a/fs/exfat/exfat_data.c b/fs/exfat/exfat_data.c new file mode 100644 index 0000000000000..37e1932109b51 --- /dev/null +++ b/fs/exfat/exfat_data.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_data.c */ +/* PURPOSE : exFAT Configuable Data Definitions */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_blkdev.h" +#include "exfat_cache.h" +#include "exfat_nls.h" +#include "exfat_super.h" +#include "exfat.h" + +/*======================================================================*/ +/* */ +/* GLOBAL VARIABLE DEFINITIONS */ +/* */ +/*======================================================================*/ + +/*----------------------------------------------------------------------*/ +/* File Manager */ +/*----------------------------------------------------------------------*/ + +/* file system volume table */ +FS_STRUCT_T fs_struct[MAX_DRIVE]; + +#if 0 +/*----------------------------------------------------------------------*/ +/* Buffer Manager */ +/*----------------------------------------------------------------------*/ + +/* FAT cache */ +DECLARE_MUTEX(f_sem); +BUF_CACHE_T FAT_cache_array[FAT_CACHE_SIZE]; +BUF_CACHE_T FAT_cache_lru_list; +BUF_CACHE_T FAT_cache_hash_list[FAT_CACHE_HASH_SIZE]; + +/* buf cache */ +DECLARE_MUTEX(b_sem); +BUF_CACHE_T buf_cache_array[BUF_CACHE_SIZE]; +BUF_CACHE_T buf_cache_lru_list; +BUF_CACHE_T buf_cache_hash_list[BUF_CACHE_HASH_SIZE]; +#endif + +/* end of exfat_data.c */ diff --git a/fs/exfat/exfat_data.h b/fs/exfat/exfat_data.h new file mode 100644 index 0000000000000..f1a0332c961bd --- /dev/null +++ b/fs/exfat/exfat_data.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_data.h */ +/* PURPOSE : Header File for exFAT Configuable Constants */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_DATA_H +#define _EXFAT_DATA_H + +#include "exfat_config.h" +#include "exfat_global.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*======================================================================*/ + /* */ + /* FFS CONFIGURATIONS */ + /* (CHANGE THIS PART IF REQUIRED) */ + /* */ + /*======================================================================*/ + + /* max number of block devices */ +#define MAX_DEVICE 2 + + /* max number of volumes on all block devices */ +#define MAX_DRIVE 2 + + /* max number of open files */ +#define MAX_OPEN 20 + + /* max number of root directory entries in FAT12/16 */ + /* (should be an exponential value of 2) */ +#define MAX_DENTRY 512 + + /* cache size (in number of sectors) */ + /* (should be an exponential value of 2) */ +#define FAT_CACHE_SIZE 128 +#define FAT_CACHE_HASH_SIZE 64 +#define BUF_CACHE_SIZE 256 +#define BUF_CACHE_HASH_SIZE 64 + +#ifndef CONFIG_EXFAT_DEFAULT_CODEPAGE +#define CONFIG_EXFAT_DEFAULT_CODEPAGE 437 +#define CONFIG_EXFAT_DEFAULT_IOCHARSET "utf8" +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_DATA_H */ + +/* end of exfat_data.h */ diff --git a/fs/exfat/exfat_global.c b/fs/exfat/exfat_global.c new file mode 100644 index 0000000000000..036f08eb0a94d --- /dev/null +++ b/fs/exfat/exfat_global.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_global.c */ +/* PURPOSE : exFAT Miscellaneous Functions */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include "exfat_config.h" +#include "exfat_global.h" + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +/*======================================================================*/ +/* */ +/* LIBRARY FUNCTION DEFINITIONS -- WELL-KNOWN FUNCTIONS */ +/* */ +/*======================================================================*/ + +/*----------------------------------------------------------------------*/ +/* String Manipulation Functions */ +/* (defined if no system memory functions are available) */ +/*----------------------------------------------------------------------*/ + +INT32 __wstrchr(UINT16 *str, UINT16 wchar) +{ + while (*str) { + if (*(str++) == wchar) return(1); + } + return(0); +} + +INT32 __wstrlen(UINT16 *str) +{ + INT32 length = 0; + + while (*(str++)) length++; + return(length); +} + +/*======================================================================*/ +/* */ +/* LIBRARY FUNCTION DEFINITIONS -- OTHER UTILITY FUNCTIONS */ +/* */ +/*======================================================================*/ + +/*----------------------------------------------------------------------*/ +/* Bitmap Manipulation Functions */ +/*----------------------------------------------------------------------*/ + +#define BITMAP_LOC(v) ((v) >> 3) +#define BITMAP_SHIFT(v) ((v) & 0x07) + +void Bitmap_set_all(UINT8 *bitmap, INT32 mapsize) +{ + MEMSET(bitmap, 0xFF, mapsize); +} /* end of Bitmap_set_all */ + +void Bitmap_clear_all(UINT8 *bitmap, INT32 mapsize) +{ + MEMSET(bitmap, 0x0, mapsize); +} /* end of Bitmap_clear_all */ + +INT32 Bitmap_test(UINT8 *bitmap, INT32 i) +{ + UINT8 data; + + data = bitmap[BITMAP_LOC(i)]; + if ((data >> BITMAP_SHIFT(i)) & 0x01) return(1); + return(0); +} /* end of Bitmap_test */ + +void Bitmap_set(UINT8 *bitmap, INT32 i) +{ + bitmap[BITMAP_LOC(i)] |= (0x01 << BITMAP_SHIFT(i)); +} /* end of Bitmap_set */ + +void Bitmap_clear(UINT8 *bitmap, INT32 i) +{ + bitmap[BITMAP_LOC(i)] &= ~(0x01 << BITMAP_SHIFT(i)); +} /* end of Bitmap_clear */ + +void Bitmap_nbits_set(UINT8 *bitmap, INT32 offset, INT32 nbits) +{ + INT32 i; + + for (i = 0; i < nbits; i++) { + Bitmap_set(bitmap, offset+i); + } +} /* end of Bitmap_nbits_set */ + +void Bitmap_nbits_clear(UINT8 *bitmap, INT32 offset, INT32 nbits) +{ + INT32 i; + + for (i = 0; i < nbits; i++) { + Bitmap_clear(bitmap, offset+i); + } +} /* end of Bitmap_nbits_clear */ + +/*----------------------------------------------------------------------*/ +/* Miscellaneous Library Functions */ +/*----------------------------------------------------------------------*/ + +/* integer to ascii conversion */ +void my_itoa(INT8 *buf, INT32 v) +{ + INT32 mod[10]; + INT32 i; + + for (i = 0; i < 10; i++) { + mod[i] = (v % 10); + v = v / 10; + if (v == 0) break; + } + + if (i == 10) + i--; + + for (; i >= 0; i--) { + *buf = (UINT8) ('0' + mod[i]); + buf++; + } + *buf = '\0'; +} /* end of my_itoa */ + +/* value to base 2 log conversion */ +INT32 my_log2(UINT32 v) +{ + UINT32 bits = 0; + + while (v > 1) { + if (v & 0x01) return(-1); + v >>= 1; + bits++; + } + return(bits); +} /* end of my_log2 */ + +/* end of exfat_global.c */ diff --git a/fs/exfat/exfat_global.h b/fs/exfat/exfat_global.h new file mode 100644 index 0000000000000..890bd390de350 --- /dev/null +++ b/fs/exfat/exfat_global.h @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_global.h */ +/* PURPOSE : Header File for exFAT Global Definitions & Misc Functions */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_GLOBAL_H +#define _EXFAT_GLOBAL_H + +#include +#include +#include +#include +#include + +#include "exfat_config.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*======================================================================*/ + /* */ + /* CONSTANT & MACRO DEFINITIONS */ + /* */ + /*======================================================================*/ + + /*----------------------------------------------------------------------*/ + /* Well-Known Constants (DO NOT CHANGE THIS PART !!) */ + /*----------------------------------------------------------------------*/ + +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef OK +#define OK 0 +#endif +#ifndef FAIL +#define FAIL 1 +#endif +#ifndef NULL +#define NULL 0 +#endif + + /* Min/Max macro */ +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + + /*======================================================================*/ + /* */ + /* TYPE DEFINITIONS */ + /* (CHANGE THIS PART IF REQUIRED) */ + /* */ + /*======================================================================*/ + + /* type definitions for primitive types; + these should be re-defined to meet its size for each OS platform; + these should be used instead of primitive types for portability. */ + + typedef char INT8; // 1 byte signed integer + typedef short INT16; // 2 byte signed integer + typedef int INT32; // 4 byte signed integer + typedef long long INT64; // 8 byte signed integer + + typedef unsigned char UINT8; // 1 byte unsigned integer + typedef unsigned short UINT16; // 2 byte unsigned integer + typedef unsigned int UINT32; // 4 byte unsigned integer + typedef unsigned long long UINT64; // 8 byte ussigned integer + + typedef unsigned char BOOL; + + + /*======================================================================*/ + /* */ + /* LIBRARY FUNCTION DECLARATIONS -- WELL-KNOWN FUNCTIONS */ + /* (CHANGE THIS PART IF REQUIRED) */ + /* */ + /*======================================================================*/ + + /*----------------------------------------------------------------------*/ + /* Memory Manipulation Macros & Functions */ + /*----------------------------------------------------------------------*/ + +#ifdef MALLOC +#undef MALLOC +#endif +#ifdef FREE +#undef FREE +#endif +#ifdef MEMSET +#undef MEMSET +#endif +#ifdef MEMCPY +#undef MEMCPY +#endif +#ifdef MEMCMP +#undef MEMCMP +#endif + +#define MALLOC(size) kmalloc(size, GFP_KERNEL) +#define FREE(mem) if (mem) kfree(mem) +#define MEMSET(mem, value, size) memset(mem, value, size) +#define MEMCPY(dest, src, size) memcpy(dest, src, size) +#define MEMCMP(mem1, mem2, size) memcmp(mem1, mem2, size) +#define COPY_DENTRY(dest, src) memcpy(dest, src, sizeof(DENTRY_T)) + + /*----------------------------------------------------------------------*/ + /* String Manipulation Macros & Functions */ + /*----------------------------------------------------------------------*/ + +#define STRCPY(dest, src) strcpy(dest, src) +#define STRNCPY(dest, src, n) strncpy(dest, src, n) +#define STRCAT(str1, str2) strcat(str1, str2) +#define STRCMP(str1, str2) strcmp(str1, str2) +#define STRNCMP(str1, str2, n) strncmp(str1, str2, n) +#define STRLEN(str) strlen(str) + + INT32 __wstrchr(UINT16 *str, UINT16 wchar); + INT32 __wstrlen(UINT16 *str); + +#define WSTRCHR(str, wchar) __wstrchr(str, wchar) +#define WSTRLEN(str) __wstrlen(str) + + /*----------------------------------------------------------------------*/ + /* Debugging Macros & Functions */ + /* EXFAT_CONFIG_DEBUG_MSG is configured in exfat_config.h */ + /*----------------------------------------------------------------------*/ +#if EXFAT_CONFIG_DEBUG_MSG +#define PRINTK(...) \ + do { \ + printk("[EXFAT] " __VA_ARGS__); \ + } while(0) +#else +#define PRINTK(...) +#endif + + /*======================================================================*/ + /* */ + /* LIBRARY FUNCTION DECLARATIONS -- OTHER UTILITY FUNCTIONS */ + /* (DO NOT CHANGE THIS PART !!) */ + /* */ + /*======================================================================*/ + + /*----------------------------------------------------------------------*/ + /* Bitmap Manipulation Functions */ + /*----------------------------------------------------------------------*/ + + void Bitmap_set_all(UINT8 *bitmap, INT32 mapsize); + void Bitmap_clear_all(UINT8 *bitmap, INT32 mapsize); + INT32 Bitmap_test(UINT8 *bitmap, INT32 i); + void Bitmap_set(UINT8 *bitmap, INT32 i); + void Bitmap_clear(UINT8 *bitmpa, INT32 i); + void Bitmap_nbits_set(UINT8 *bitmap, INT32 offset, INT32 nbits); + void Bitmap_nbits_clear(UINT8 *bitmap, INT32 offset, INT32 nbits); + + /*----------------------------------------------------------------------*/ + /* Miscellaneous Library Functions */ + /*----------------------------------------------------------------------*/ + + void my_itoa(INT8 *buf, INT32 v); + INT32 my_log2(UINT32 v); + + /*======================================================================*/ + /* */ + /* DEFINITIONS FOR DEBUGGING */ + /* (CHANGE THIS PART IF REQUIRED) */ + /* */ + /*======================================================================*/ + + /* debug message ouput macro */ +#ifdef PRINT +#undef PRINT +#endif + +#define PRINT printk + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_GLOBAL_H */ + +/* end of exfat_global.h */ diff --git a/fs/exfat/exfat_nls.c b/fs/exfat/exfat_nls.c new file mode 100644 index 0000000000000..f80af8b186b0b --- /dev/null +++ b/fs/exfat/exfat_nls.c @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_nls.c */ +/* PURPOSE : exFAT NLS Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" + +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat_super.h" +#include "exfat.h" + +#include + +/*----------------------------------------------------------------------*/ +/* Global Variable Definitions */ +/*----------------------------------------------------------------------*/ + +/*----------------------------------------------------------------------*/ +/* Local Variable Definitions */ +/*----------------------------------------------------------------------*/ + +static UINT16 bad_dos_chars[] = { + /* + , ; = [ ] */ + 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D, + 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D, + 0 +}; + +static UINT16 bad_uni_chars[] = { + /* " * / : < > ? \ | */ + 0x0022, 0x002A, 0x002F, 0x003A, + 0x003C, 0x003E, 0x003F, 0x005C, 0x007C, + 0 +}; + +/*----------------------------------------------------------------------*/ +/* Local Function Declarations */ +/*----------------------------------------------------------------------*/ + +static INT32 convert_uni_to_ch(struct nls_table *nls, UINT8 *ch, UINT16 uni, INT32 *lossy); +static INT32 convert_ch_to_uni(struct nls_table *nls, UINT16 *uni, UINT8 *ch, INT32 *lossy); + +/*======================================================================*/ +/* Global Function Definitions */ +/*======================================================================*/ + +UINT16 nls_upper(struct super_block *sb, UINT16 a) +{ + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + + if (EXFAT_SB(sb)->options.casesensitive) + return(a); + if (p_fs->vol_utbl != NULL && (p_fs->vol_utbl)[get_col_index(a)] != NULL) + return (p_fs->vol_utbl)[get_col_index(a)][get_row_index(a)]; + else + return a; +} + +INT32 nls_dosname_cmp(struct super_block *sb, UINT8 *a, UINT8 *b) +{ + return(STRNCMP((void *) a, (void *) b, DOS_NAME_LENGTH)); +} /* end of nls_dosname_cmp */ + +INT32 nls_uniname_cmp(struct super_block *sb, UINT16 *a, UINT16 *b) +{ + INT32 i; + + for (i = 0; i < MAX_NAME_LENGTH; i++, a++, b++) { + if (nls_upper(sb, *a) != nls_upper(sb, *b)) return(1); + if (*a == 0x0) return(0); + } + return(0); +} /* end of nls_uniname_cmp */ + +void nls_uniname_to_dosname(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T *p_uniname, INT32 *p_lossy) +{ + INT32 i, j, len, lossy = FALSE; + UINT8 buf[MAX_CHARSET_SIZE]; + UINT8 lower = 0, upper = 0; + UINT8 *dosname = p_dosname->name; + UINT16 *uniname = p_uniname->name; + UINT16 *p, *last_period; + struct nls_table *nls = EXFAT_SB(sb)->nls_disk; + + for (i = 0; i < DOS_NAME_LENGTH; i++) { + *(dosname+i) = ' '; + } + + if (!nls_uniname_cmp(sb, uniname, (UINT16 *) UNI_CUR_DIR_NAME)) { + *(dosname) = '.'; + p_dosname->name_case = 0x0; + if (p_lossy != NULL) *p_lossy = FALSE; + return; + } + + if (!nls_uniname_cmp(sb, uniname, (UINT16 *) UNI_PAR_DIR_NAME)) { + *(dosname) = '.'; + *(dosname+1) = '.'; + p_dosname->name_case = 0x0; + if (p_lossy != NULL) *p_lossy = FALSE; + return; + } + + /* search for the last embedded period */ + last_period = NULL; + for (p = uniname; *p; p++) { + if (*p == (UINT16) '.') last_period = p; + } + + i = 0; + while (i < DOS_NAME_LENGTH) { + if (i == 8) { + if (last_period == NULL) break; + + if (uniname <= last_period) { + if (uniname < last_period) lossy = TRUE; + uniname = last_period + 1; + } + } + + if (*uniname == (UINT16) '\0') { + break; + } else if (*uniname == (UINT16) ' ') { + lossy = TRUE; + } else if (*uniname == (UINT16) '.') { + if (uniname < last_period) lossy = TRUE; + else i = 8; + } else if (WSTRCHR(bad_dos_chars, *uniname)) { + lossy = TRUE; + *(dosname+i) = '_'; + i++; + } else { + len = convert_uni_to_ch(nls, buf, *uniname, &lossy); + + if (len > 1) { + if ((i >= 8) && ((i+len) > DOS_NAME_LENGTH)) { + break; + } + if ((i < 8) && ((i+len) > 8)) { + i = 8; + continue; + } + + lower = 0xFF; + + for (j = 0; j < len; j++, i++) { + *(dosname+i) = *(buf+j); + } + } else { /* len == 1 */ + if ((*buf >= 'a') && (*buf <= 'z')) { + *(dosname+i) = *buf - ('a' - 'A'); + + if (i < 8) lower |= 0x08; + else lower |= 0x10; + } else if ((*buf >= 'A') && (*buf <= 'Z')) { + *(dosname+i) = *buf; + + if (i < 8) upper |= 0x08; + else upper |= 0x10; + } else { + *(dosname+i) = *buf; + } + i++; + } + } + + uniname++; + } + + if (*dosname == 0xE5) *dosname = 0x05; + if (*uniname != 0x0) lossy = TRUE; + + if (upper & lower) p_dosname->name_case = 0xFF; + else p_dosname->name_case = lower; + + if (p_lossy != NULL) *p_lossy = lossy; +} /* end of nls_uniname_to_dosname */ + +void nls_dosname_to_uniname(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname) +{ + INT32 i = 0, j, n = 0; + UINT8 buf[DOS_NAME_LENGTH+2]; + UINT8 *dosname = p_dosname->name; + UINT16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_disk; + + if (*dosname == 0x05) { + *buf = 0xE5; + i++; + n++; + } + + for ( ; i < 8; i++, n++) { + if (*(dosname+i) == ' ') break; + + if ((*(dosname+i) >= 'A') && (*(dosname+i) <= 'Z') && (p_dosname->name_case & 0x08)) + *(buf+n) = *(dosname+i) + ('a' - 'A'); + else + *(buf+n) = *(dosname+i); + } + if (*(dosname+8) != ' ') { + *(buf+n) = '.'; + n++; + } + + for (i = 8; i < DOS_NAME_LENGTH; i++, n++) { + if (*(dosname+i) == ' ') break; + + if ((*(dosname+i) >= 'A') && (*(dosname+i) <= 'Z') && (p_dosname->name_case & 0x10)) + *(buf+n) = *(dosname+i) + ('a' - 'A'); + else + *(buf+n) = *(dosname+i); + } + *(buf+n) = '\0'; + + i = j = 0; + while (j < (MAX_NAME_LENGTH-1)) { + if (*(buf+i) == '\0') break; + + i += convert_ch_to_uni(nls, uniname, (buf+i), NULL); + + uniname++; + j++; + } + + *uniname = (UINT16) '\0'; +} /* end of nls_dosname_to_uniname */ + +void nls_uniname_to_cstring(struct super_block *sb, UINT8 *p_cstring, UNI_NAME_T *p_uniname) +{ + INT32 i, j, len; + UINT8 buf[MAX_CHARSET_SIZE]; + UINT16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_io; + + i = 0; + while (i < (MAX_NAME_LENGTH-1)) { + if (*uniname == (UINT16) '\0') break; + + len = convert_uni_to_ch(nls, buf, *uniname, NULL); + + if (len > 1) { + for (j = 0; j < len; j++) + *p_cstring++ = (INT8) *(buf+j); + } else { /* len == 1 */ + *p_cstring++ = (INT8) *buf; + } + + uniname++; + i++; + } + + *p_cstring = '\0'; +} /* end of nls_uniname_to_cstring */ + +void nls_cstring_to_uniname(struct super_block *sb, UNI_NAME_T *p_uniname, UINT8 *p_cstring, INT32 *p_lossy) +{ + INT32 i, j, lossy = FALSE; + UINT8 *end_of_name; + UINT8 upname[MAX_NAME_LENGTH * 2]; + UINT16 *uniname = p_uniname->name; + struct nls_table *nls = EXFAT_SB(sb)->nls_io; + + + /* strip all trailing spaces */ + end_of_name = p_cstring + STRLEN((INT8 *) p_cstring); + + while (*(--end_of_name) == ' ') { + if (end_of_name < p_cstring) break; + } + *(++end_of_name) = '\0'; + + if (STRCMP((INT8 *) p_cstring, ".") && STRCMP((INT8 *) p_cstring, "..")) { + + /* strip all trailing periods */ + while (*(--end_of_name) == '.') { + if (end_of_name < p_cstring) break; + } + *(++end_of_name) = '\0'; + } + + if (*p_cstring == '\0') + lossy = TRUE; + + i = j = 0; + while (j < (MAX_NAME_LENGTH-1)) { + if (*(p_cstring+i) == '\0') break; + + i += convert_ch_to_uni(nls, uniname, (UINT8 *)(p_cstring+i), &lossy); + + if ((*uniname < 0x0020) || WSTRCHR(bad_uni_chars, *uniname)) + lossy = TRUE; + + SET16_A(upname + j * 2, nls_upper(sb, *uniname)); + + uniname++; + j++; + } + + if (*(p_cstring+i) != '\0') + lossy = TRUE; + *uniname = (UINT16) '\0'; + + p_uniname->name_len = j; + p_uniname->name_hash = calc_checksum_2byte((void *) upname, j<<1, 0, CS_DEFAULT); + + if (p_lossy != NULL) + *p_lossy = lossy; +} /* end of nls_cstring_to_uniname */ + +/*======================================================================*/ +/* Local Function Definitions */ +/*======================================================================*/ + +static INT32 convert_ch_to_uni(struct nls_table *nls, UINT16 *uni, UINT8 *ch, INT32 *lossy) +{ + int len; + + *uni = 0x0; + + if (ch[0] < 0x80) { + *uni = (UINT16) ch[0]; + return(1); + } + + if ((len = nls->char2uni(ch, NLS_MAX_CHARSET_SIZE, uni)) < 0) { + /* conversion failed */ + printk("%s: fail to use nls \n", __func__); + if (lossy != NULL) + *lossy = TRUE; + *uni = (UINT16) '_'; + if (!strcmp(nls->charset, "utf8")) return(1); + else return(2); + } + + return(len); +} /* end of convert_ch_to_uni */ + +static INT32 convert_uni_to_ch(struct nls_table *nls, UINT8 *ch, UINT16 uni, INT32 *lossy) +{ + int len; + + ch[0] = 0x0; + + if (uni < 0x0080) { + ch[0] = (UINT8) uni; + return(1); + } + + if ((len = nls->uni2char(uni, ch, NLS_MAX_CHARSET_SIZE)) < 0) { + /* conversion failed */ + printk("%s: fail to use nls \n", __func__); + if (lossy != NULL) *lossy = TRUE; + ch[0] = '_'; + return(1); + } + + return(len); + +} /* end of convert_uni_to_ch */ + +/* end of exfat_nls.c */ diff --git a/fs/exfat/exfat_nls.h b/fs/exfat/exfat_nls.h new file mode 100644 index 0000000000000..5b14cef02463c --- /dev/null +++ b/fs/exfat/exfat_nls.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_nls.h */ +/* PURPOSE : Header File for exFAT NLS Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_NLS_H +#define _EXFAT_NLS_H + +#include + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_api.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions */ + /*----------------------------------------------------------------------*/ + +#define NUM_UPCASE 2918 + +#define DOS_CUR_DIR_NAME ". " +#define DOS_PAR_DIR_NAME ".. " + +#ifdef __LITTLE_ENDIAN +#define UNI_CUR_DIR_NAME ".\0" +#define UNI_PAR_DIR_NAME ".\0.\0" +#else +#define UNI_CUR_DIR_NAME "\0." +#define UNI_PAR_DIR_NAME "\0.\0." +#endif + +/*----------------------------------------------------------------------*/ +/* Type Definitions */ +/*----------------------------------------------------------------------*/ + +/* DOS name stucture */ +typedef struct { + UINT8 name[DOS_NAME_LENGTH]; + UINT8 name_case; +} DOS_NAME_T; + +/* unicode name stucture */ +typedef struct { + UINT16 name[MAX_NAME_LENGTH]; + UINT16 name_hash; + UINT8 name_len; +} UNI_NAME_T; + +/*----------------------------------------------------------------------*/ +/* External Function Declarations */ +/*----------------------------------------------------------------------*/ + +/* NLS management function */ +UINT16 nls_upper(struct super_block *sb, UINT16 a); +INT32 nls_dosname_cmp(struct super_block *sb, UINT8 *a, UINT8 *b); +INT32 nls_uniname_cmp(struct super_block *sb, UINT16 *a, UINT16 *b); +void nls_uniname_to_dosname(struct super_block *sb, DOS_NAME_T *p_dosname, UNI_NAME_T *p_uniname, INT32 *p_lossy); +void nls_dosname_to_uniname(struct super_block *sb, UNI_NAME_T *p_uniname, DOS_NAME_T *p_dosname); +void nls_uniname_to_cstring(struct super_block *sb, UINT8 *p_cstring, UNI_NAME_T *p_uniname); +void nls_cstring_to_uniname(struct super_block *sb, UNI_NAME_T *p_uniname, UINT8 *p_cstring, INT32 *p_lossy); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_NLS_H */ + +/* end of exfat_nls.h */ diff --git a/fs/exfat/exfat_oal.c b/fs/exfat/exfat_oal.c new file mode 100644 index 0000000000000..8d1a9b93bcc71 --- /dev/null +++ b/fs/exfat/exfat_oal.c @@ -0,0 +1,189 @@ +/* Some of the source code in this file came from "linux/fs/fat/misc.c". */ +/* + * linux/fs/fat/misc.c + * + * Written 1992,1993 by Werner Almesberger + * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980 + * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) + */ + +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_oal.c */ +/* PURPOSE : exFAT OS Adaptation Layer */ +/* (Semaphore Functions & Real-Time Clock Functions) */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include +#include + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_api.h" +#include "exfat_oal.h" + +/*======================================================================*/ +/* */ +/* SEMAPHORE FUNCTIONS */ +/* */ +/*======================================================================*/ + +DECLARE_MUTEX(z_sem); + +INT32 sm_init(struct semaphore *sm) +{ + sema_init(sm, 1); + return(0); +} /* end of sm_init */ + +INT32 sm_P(struct semaphore *sm) +{ + down(sm); + return 0; +} /* end of sm_P */ + +void sm_V(struct semaphore *sm) +{ + up(sm); +} /* end of sm_V */ + + +/*======================================================================*/ +/* */ +/* REAL-TIME CLOCK FUNCTIONS */ +/* */ +/*======================================================================*/ + +extern struct timezone sys_tz; + +/* + * The epoch of FAT timestamp is 1980. + * : bits : value + * date: 0 - 4: day (1 - 31) + * date: 5 - 8: month (1 - 12) + * date: 9 - 15: year (0 - 127) from 1980 + * time: 0 - 4: sec (0 - 29) 2sec counts + * time: 5 - 10: min (0 - 59) + * time: 11 - 15: hour (0 - 23) + */ +#define UNIX_SECS_1980 315532800L + +#if BITS_PER_LONG == 64 +#define UNIX_SECS_2108 4354819200L +#endif +/* days between 1.1.70 and 1.1.80 (2 leap days) */ +#define DAYS_DELTA_DECADE (365 * 10 + 2) +/* 120 (2100 - 1980) isn't leap year */ +#define NO_LEAP_YEAR_2100 (120) +#define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != NO_LEAP_YEAR_2100) + +#define SECS_PER_MIN (60) +#define SECS_PER_HOUR (60 * SECS_PER_MIN) +#define SECS_PER_DAY (24 * SECS_PER_HOUR) + +#define MAKE_LEAP_YEAR(leap_year, year) \ + do { \ + if (unlikely(year > NO_LEAP_YEAR_2100)) \ + leap_year = ((year + 3) / 4) - 1; \ + else \ + leap_year = ((year + 3) / 4); \ + } while(0) + +/* Linear day numbers of the respective 1sts in non-leap years. */ +static time_t accum_days_in_year[] = { + /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ + 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, +}; + +TIMESTAMP_T *tm_current(TIMESTAMP_T *tp) +{ + struct timespec ts = CURRENT_TIME_SEC; + time_t second = ts.tv_sec; + time_t day, leap_day, month, year; + + second -= sys_tz.tz_minuteswest * SECS_PER_MIN; + + /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ + if (second < UNIX_SECS_1980) { + tp->sec = 0; + tp->min = 0; + tp->hour = 0; + tp->day = 1; + tp->mon = 1; + tp->year = 0; + return(tp); + } +#if BITS_PER_LONG == 64 + if (second >= UNIX_SECS_2108) { + tp->sec = 59; + tp->min = 59; + tp->hour = 23; + tp->day = 31; + tp->mon = 12; + tp->year = 127; + return(tp); + } +#endif + + day = second / SECS_PER_DAY - DAYS_DELTA_DECADE; + year = day / 365; + + MAKE_LEAP_YEAR(leap_day, year); + if (year * 365 + leap_day > day) + year--; + + MAKE_LEAP_YEAR(leap_day, year); + + day -= year * 365 + leap_day; + + if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) { + month = 2; + } else { + if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3]) + day--; + for (month = 1; month < 12; month++) { + if (accum_days_in_year[month + 1] > day) + break; + } + } + day -= accum_days_in_year[month]; + + tp->sec = second % SECS_PER_MIN; + tp->min = (second / SECS_PER_MIN) % 60; + tp->hour = (second / SECS_PER_HOUR) % 24; + tp->day = day + 1; + tp->mon = month; + tp->year = year; + + return(tp); +} /* end of tm_current */ + +/* end of exfat_oal.c */ diff --git a/fs/exfat/exfat_oal.h b/fs/exfat/exfat_oal.h new file mode 100644 index 0000000000000..ca3f956eeadcc --- /dev/null +++ b/fs/exfat/exfat_oal.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_oal.h */ +/* PURPOSE : Header File for exFAT OS Adaptation Layer */ +/* (Semaphore Functions & Real-Time Clock Functions) */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_OAL_H +#define _EXFAT_OAL_H + +#include "exfat_config.h" +#include "exfat_global.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions (Configurable) */ + /*----------------------------------------------------------------------*/ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions (Non-Configurable) */ + /*----------------------------------------------------------------------*/ + + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + typedef struct { + UINT16 sec; /* 0 ~ 59 */ + UINT16 min; /* 0 ~ 59 */ + UINT16 hour; /* 0 ~ 23 */ + UINT16 day; /* 1 ~ 31 */ + UINT16 mon; /* 1 ~ 12 */ + UINT16 year; /* 0 ~ 127 (since 1980) */ + } TIMESTAMP_T; + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) +#define DECLARE_MUTEX(m) DEFINE_SEMAPHORE(m) +#endif + + INT32 sm_init(struct semaphore *sm); + INT32 sm_P(struct semaphore *sm); + void sm_V(struct semaphore *sm); + + TIMESTAMP_T *tm_current(TIMESTAMP_T *tm); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_OAL_H */ + +/* end of exfat_oal.h */ diff --git a/fs/exfat/exfat_part.h b/fs/exfat/exfat_part.h new file mode 100644 index 0000000000000..94dd8862f5e12 --- /dev/null +++ b/fs/exfat/exfat_part.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_part.h */ +/* PURPOSE : Header File for exFAT Partition Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#ifndef _EXFAT_PART_H +#define _EXFAT_PART_H + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_api.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + /*----------------------------------------------------------------------*/ + /* Constant & Macro Definitions */ + /*----------------------------------------------------------------------*/ + +#define MBR_SIGNATURE 0xAA55 + + /*----------------------------------------------------------------------*/ + /* Type Definitions */ + /*----------------------------------------------------------------------*/ + + /* MS-DOS FAT master boot record (512 bytes) */ + typedef struct { + UINT8 boot_code[446]; + UINT8 partition[64]; + UINT8 signature[2]; + } MBR_SECTOR_T; + + /* MS-DOS FAT partition table (64 bytes) */ + typedef struct { + UINT8 def_boot; + UINT8 bgn_chs[3]; + UINT8 sys_type; + UINT8 end_chs[3]; + UINT8 start_sector[4]; + UINT8 num_sectors[4]; + } PART_ENTRY_T; + + /*----------------------------------------------------------------------*/ + /* External Function Declarations */ + /*----------------------------------------------------------------------*/ + + /* volume management functions */ + INT32 ffsSetPartition(INT32 dev, INT32 num_vol, PART_INFO_T *vol_spec); + INT32 ffsGetPartition(INT32 dev, INT32 *num_vol, PART_INFO_T *vol_spec); + INT32 ffsGetDevInfo(INT32 dev, DEV_INFO_T *info); + + /*----------------------------------------------------------------------*/ + /* External Function Declarations (NOT TO UPPER LAYER) */ + /*----------------------------------------------------------------------*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EXFAT_PART_H */ + +/* end of exfat_part.h */ diff --git a/fs/exfat/exfat_super.c b/fs/exfat/exfat_super.c new file mode 100644 index 0000000000000..6524508739ba1 --- /dev/null +++ b/fs/exfat/exfat_super.c @@ -0,0 +1,2301 @@ +/* Some of the source code in this file came from "linux/fs/fat/file.c","linux/fs/fat/inode.c" and "linux/fs/fat/misc.c". */ +/* + * linux/fs/fat/file.c + * + * Written 1992,1993 by Werner Almesberger + * + * regular file handling primitives for fat-based filesystems + */ + +/* + * linux/fs/fat/inode.c + * + * Written 1992,1993 by Werner Almesberger + * VFAT extensions by Gordon Chaffee, merged with msdos fs by Henrik Storner + * Rewritten for the constant inumbers support by Al Viro + * + * Fixes: + * + * Max Cohan: Fixed invalid FSINFO offset when info_sector is 0 + */ + +/* + * linux/fs/fat/misc.c + * + * Written 1992,1993 by Werner Almesberger + * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980 + * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) + */ + +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +#include +#endif + +#include "exfat_version.h" +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_blkdev.h" +#include "exfat_cache.h" +#include "exfat_part.h" +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat.h" + +#include "exfat_super.h" + +static struct kmem_cache *exfat_inode_cachep; + +static int exfat_default_codepage = CONFIG_EXFAT_DEFAULT_CODEPAGE; +static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET; + +extern struct timezone sys_tz; + +#define CHECK_ERR(x) BUG_ON(x) +#define ELAPSED_TIME 0 + +#if (ELAPSED_TIME == 1) +#include + +static UINT32 __t1, __t2; +static UINT32 get_current_msec(void) +{ + struct timeval tm; + do_gettimeofday(&tm); + return((UINT32)(tm.tv_sec*1000000 + tm.tv_usec)); +} +#define TIME_START() do {__t1 = get_current_msec();} while (0) +#define TIME_END() do {__t2 = get_current_msec();} while (0) +#define PRINT_TIME(n) do {printk("[EXFAT] Elapsed time %d = %d (usec)\n", n, (__t2 - __t1));} while (0) +#else +#define TIME_START() +#define TIME_END() +#define PRINT_TIME(n) +#endif + +#define UNIX_SECS_1980 315532800L + +#if BITS_PER_LONG == 64 +#define UNIX_SECS_2108 4354819200L +#endif +/* days between 1.1.70 and 1.1.80 (2 leap days) */ +#define DAYS_DELTA_DECADE (365 * 10 + 2) +/* 120 (2100 - 1980) isn't leap year */ +#define NO_LEAP_YEAR_2100 (120) +#define IS_LEAP_YEAR(y) (!((y) & 0x3) && (y) != NO_LEAP_YEAR_2100) + +#define SECS_PER_MIN (60) +#define SECS_PER_HOUR (60 * SECS_PER_MIN) +#define SECS_PER_DAY (24 * SECS_PER_HOUR) + +#define MAKE_LEAP_YEAR(leap_year, year) \ + do { \ + if (unlikely(year > NO_LEAP_YEAR_2100)) \ + leap_year = ((year + 3) / 4) - 1; \ + else \ + leap_year = ((year + 3) / 4); \ + } while(0) + +/* Linear day numbers of the respective 1sts in non-leap years. */ +static time_t accum_days_in_year[] = { + /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ + 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, +}; + +static void _exfat_truncate(struct inode *inode, loff_t old_size); + +/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ +void exfat_time_fat2unix(struct exfat_sb_info *sbi, struct timespec *ts, + DATE_TIME_T *tp) +{ + time_t year = tp->Year; + time_t ld; + + MAKE_LEAP_YEAR(ld, year); + + if (IS_LEAP_YEAR(year) && (tp->Month) > 2) + ld++; + + ts->tv_sec = tp->Second + tp->Minute * SECS_PER_MIN + + tp->Hour * SECS_PER_HOUR + + (year * 365 + ld + accum_days_in_year[(tp->Month)] + (tp->Day - 1) + DAYS_DELTA_DECADE) * SECS_PER_DAY + + sys_tz.tz_minuteswest * SECS_PER_MIN; + ts->tv_nsec = 0; +} + +/* Convert linear UNIX date to a FAT time/date pair. */ +void exfat_time_unix2fat(struct exfat_sb_info *sbi, struct timespec *ts, + DATE_TIME_T *tp) +{ + time_t second = ts->tv_sec; + time_t day, month, year; + time_t ld; + + second -= sys_tz.tz_minuteswest * SECS_PER_MIN; + + /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */ + if (second < UNIX_SECS_1980) { + tp->Second = 0; + tp->Minute = 0; + tp->Hour = 0; + tp->Day = 1; + tp->Month = 1; + tp->Year = 0; + return; + } +#if (BITS_PER_LONG == 64) + if (second >= UNIX_SECS_2108) { + tp->Second = 59; + tp->Minute = 59; + tp->Hour = 23; + tp->Day = 31; + tp->Month = 12; + tp->Year = 127; + return; + } +#endif + day = second / SECS_PER_DAY - DAYS_DELTA_DECADE; + year = day / 365; + MAKE_LEAP_YEAR(ld, year); + if (year * 365 + ld > day) + year--; + + MAKE_LEAP_YEAR(ld, year); + day -= year * 365 + ld; + + if (IS_LEAP_YEAR(year) && day == accum_days_in_year[3]) { + month = 2; + } else { + if (IS_LEAP_YEAR(year) && day > accum_days_in_year[3]) + day--; + for (month = 1; month < 12; month++) { + if (accum_days_in_year[month + 1] > day) + break; + } + } + day -= accum_days_in_year[month]; + + tp->Second = second % SECS_PER_MIN; + tp->Minute = (second / SECS_PER_MIN) % 60; + tp->Hour = (second / SECS_PER_HOUR) % 24; + tp->Day = day + 1; + tp->Month = month; + tp->Year = year; +} + +static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +static int exfat_generic_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +#else +static long exfat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#endif +static int exfat_sync_inode(struct inode *inode); +static struct inode *exfat_build_inode(struct super_block *sb, FILE_ID_T *fid, loff_t i_pos); +static void exfat_detach(struct inode *inode); +static void exfat_attach(struct inode *inode, loff_t i_pos); +static inline unsigned long exfat_hash(loff_t i_pos); +static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc); +static void exfat_write_super(struct super_block *sb); + +static void __lock_super(struct super_block *sb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + lock_super(sb); +#else + struct exfat_sb_info *sbi = EXFAT_SB(sb); + mutex_lock(&sbi->s_lock); +#endif +} + +static void __unlock_super(struct super_block *sb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + unlock_super(sb); +#else + struct exfat_sb_info *sbi = EXFAT_SB(sb); + mutex_unlock(&sbi->s_lock); +#endif +} + +static int __is_sb_dirty(struct super_block *sb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + return sb->s_dirt; +#else + struct exfat_sb_info *sbi = EXFAT_SB(sb); + return sbi->s_dirt; +#endif +} + +static void __set_sb_clean(struct super_block *sb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + sb->s_dirt = 0; +#else + struct exfat_sb_info *sbi = EXFAT_SB(sb); + sbi->s_dirt = 0; +#endif +} + +/*======================================================================*/ +/* Directory Entry Operations */ +/*======================================================================*/ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) +static int exfat_readdir(struct file *filp, struct dir_context *ctx) +#else +static int exfat_readdir(struct file *filp, void *dirent, filldir_t filldir) +#endif +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) + struct inode *inode = file_inode(filp); +#else + struct inode *inode = filp->f_path.dentry->d_inode; +#endif + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + FS_INFO_T *p_fs = &(sbi->fs_info); + BD_INFO_T *p_bd = &(EXFAT_SB(sb)->bd_info); + DIR_ENTRY_T de; + unsigned long inum; + loff_t cpos; + int err = 0; + + __lock_super(sb); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + cpos = ctx->pos; +#else + cpos = filp->f_pos; +#endif + /* Fake . and .. for the root directory. */ + if ((p_fs->vol_type == EXFAT) || (inode->i_ino == EXFAT_ROOT_INO)) { + while (cpos < 2) { + if (inode->i_ino == EXFAT_ROOT_INO) + inum = EXFAT_ROOT_INO; + else if (cpos == 0) + inum = inode->i_ino; + else /* (cpos == 1) */ + inum = parent_ino(filp->f_path.dentry); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + if (!dir_emit_dots(filp, ctx)) +#else + if (filldir(dirent, "..", cpos+1, cpos, inum, DT_DIR) < 0) +#endif + goto out; + cpos++; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + ctx->pos++; +#else + filp->f_pos++; +#endif + } + if (cpos == 2) { + cpos = 0; + } + } + if (cpos & (DENTRY_SIZE - 1)) { + err = -ENOENT; + goto out; + } + +get_new: + EXFAT_I(inode)->fid.size = i_size_read(inode); + EXFAT_I(inode)->fid.rwoffset = cpos >> DENTRY_SIZE_BITS; + + err = FsReadDir(inode, &de); + if (err) { + /* at least we tried to read a sector + * move cpos to next sector position (should be aligned) + */ + if (err == FFS_MEDIAERR) { + cpos += 1 << p_bd->sector_size_bits; + cpos &= ~((1 << p_bd->sector_size_bits)-1); + } + + err = -EIO; + goto end_of_dir; + } + + cpos = EXFAT_I(inode)->fid.rwoffset << DENTRY_SIZE_BITS; + + if (!de.Name[0]) + goto end_of_dir; + + if (!memcmp(de.ShortName, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) { + inum = inode->i_ino; + } else if (!memcmp(de.ShortName, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) { + inum = parent_ino(filp->f_path.dentry); + } else { + loff_t i_pos = ((loff_t) EXFAT_I(inode)->fid.start_clu << 32) | + ((EXFAT_I(inode)->fid.rwoffset-1) & 0xffffffff); + + struct inode *tmp = exfat_iget(sb, i_pos); + if (tmp) { + inum = tmp->i_ino; + iput(tmp); + } else { + inum = iunique(sb, EXFAT_ROOT_INO); + } + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + if (!dir_emit(ctx, de.Name, strlen(de.Name), inum, + (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) +#else + if (filldir(dirent, de.Name, strlen(de.Name), cpos-1, inum, + (de.Attr & ATTR_SUBDIR) ? DT_DIR : DT_REG) < 0) +#endif + goto out; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + ctx->pos = cpos; +#else + filp->f_pos = cpos; +#endif + goto get_new; + +end_of_dir: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + ctx->pos = cpos; +#else + filp->f_pos = cpos; +#endif +out: + __unlock_super(sb); + return err; +} + +static int exfat_ioctl_volume_id(struct inode *dir) +{ + struct super_block *sb = dir->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + FS_INFO_T *p_fs = &(sbi->fs_info); + + return p_fs->vol_id; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +static int exfat_generic_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +#else +static long exfat_generic_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +#endif +{ +#if EXFAT_CONFIG_KERNEL_DEBUG +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) + struct inode *inode = filp->f_dentry->d_inode; +#endif + unsigned int flags; +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + + switch (cmd) { + case EXFAT_IOCTL_GET_VOLUME_ID: + return exfat_ioctl_volume_id(inode); +#if EXFAT_CONFIG_KERNEL_DEBUG + case EXFAT_IOC_GET_DEBUGFLAGS: { + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + flags = sbi->debug_flags; + return put_user(flags, (int __user *)arg); + } + case EXFAT_IOC_SET_DEBUGFLAGS: { + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + __lock_super(sb); + sbi->debug_flags = flags; + __unlock_super(sb); + + return 0; + } +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + default: + return -ENOTTY; /* Inappropriate ioctl for device */ + } +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +static int exfat_file_fsync(struct file *filp, int datasync) +{ + struct inode *inode = filp->f_mapping->host; + struct super_block *sb = inode->i_sb; + int res, err; + + res = generic_file_fsync(filp, datasync); + err = FsSyncVol(sb, 1); + + return res ? res : err; +} +#endif + +const struct file_operations exfat_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0) + .iterate = exfat_readdir, +#else + .readdir = exfat_readdir, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + .ioctl = exfat_generic_ioctl, + .fsync = exfat_file_fsync, +#else + .unlocked_ioctl = exfat_generic_ioctl, + .fsync = generic_file_fsync, +#endif +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,00) +static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, + bool excl) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) +static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, + struct nameidata *nd) +#else +static int exfat_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *nd) +#endif +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct timespec ts; + FILE_ID_T fid; + loff_t i_pos; + int err; + + __lock_super(sb); + + PRINTK("exfat_create entered\n"); + + ts = CURRENT_TIME_SEC; + + err = FsCreateFile(dir, (UINT8 *) dentry->d_name.name, FM_REGULAR, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else if (err == FFS_NAMETOOLONG) + err = -ENAMETOOLONG; + else + err = -EIO; + goto out; + } + dir->i_version++; + dir->i_ctime = dir->i_mtime = dir->i_atime = ts; + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + inode->i_version++; + inode->i_mtime = inode->i_atime = inode->i_ctime = ts; + /* timestamp is already written, so mark_inode_dirty() is unnecessary. */ + + dentry->d_time = dentry->d_parent->d_inode->i_version; + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + PRINTK("exfat_create exited\n"); + return err; +} + +static int exfat_find(struct inode *dir, struct qstr *qname, + FILE_ID_T *fid) +{ + int err; + + if (qname->len == 0) + return -ENOENT; + + err = FsLookupFile(dir, (UINT8 *) qname->name, fid); + if (err) + return -ENOENT; + + return 0; +} + +static int exfat_d_anon_disconn(struct dentry *dentry) +{ + return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,00) +static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +#else +static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +#endif +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct dentry *alias; + int err; + FILE_ID_T fid; + loff_t i_pos; + UINT64 ret; + mode_t i_mode; + + __lock_super(sb); + PRINTK("exfat_lookup entered\n"); + err = exfat_find(dir, &dentry->d_name, &fid); + if (err) { + if (err == -ENOENT) { + inode = NULL; + goto out; + } + goto error; + } + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + + i_mode = inode->i_mode; + if (S_ISLNK(i_mode)) { + EXFAT_I(inode)->target = MALLOC(i_size_read(inode)+1); + if (!EXFAT_I(inode)->target) { + err = -ENOMEM; + goto error; + } + FsReadFile(dir, &fid, EXFAT_I(inode)->target, i_size_read(inode), &ret); + *(EXFAT_I(inode)->target + i_size_read(inode)) = '\0'; + } + + alias = d_find_alias(inode); + if (alias && !exfat_d_anon_disconn(alias)) { + CHECK_ERR(d_unhashed(alias)); + if (!S_ISDIR(i_mode)) + d_move(alias, dentry); + iput(inode); + __unlock_super(sb); + PRINTK("exfat_lookup exited 1\n"); + return alias; + } else { + dput(alias); + } +out: + __unlock_super(sb); + dentry->d_time = dentry->d_parent->d_inode->i_version; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) + dentry->d_op = sb->s_root->d_op; + dentry = d_splice_alias(inode, dentry); + if (dentry) { + dentry->d_op = sb->s_root->d_op; + dentry->d_time = dentry->d_parent->d_inode->i_version; + } +#else + dentry = d_splice_alias(inode, dentry); + if (dentry) + dentry->d_time = dentry->d_parent->d_inode->i_version; +#endif + PRINTK("exfat_lookup exited 2\n"); + return dentry; + +error: + __unlock_super(sb); + PRINTK("exfat_lookup exited 3\n"); + return ERR_PTR(err); +} + +static int exfat_unlink(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct super_block *sb = dir->i_sb; + struct timespec ts; + int err; + + __lock_super(sb); + + PRINTK("exfat_unlink entered\n"); + + ts = CURRENT_TIME_SEC; + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = FsRemoveFile(dir, &(EXFAT_I(inode)->fid)); + if (err) { + if (err == FFS_PERMISSIONERR) + err = -EPERM; + else + err = -EIO; + goto out; + } + dir->i_version++; + dir->i_mtime = dir->i_atime = ts; + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + clear_nlink(inode); + inode->i_mtime = inode->i_atime = ts; + exfat_detach(inode); + remove_inode_hash(inode); + +out: + __unlock_super(sb); + PRINTK("exfat_unlink exited\n"); + return err; +} + +static int exfat_symlink(struct inode *dir, struct dentry *dentry, const char *target) +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct timespec ts; + FILE_ID_T fid; + loff_t i_pos; + int err; + UINT64 len = (UINT64) strlen(target); + UINT64 ret; + + __lock_super(sb); + + PRINTK("exfat_symlink entered\n"); + + ts = CURRENT_TIME_SEC; + + err = FsCreateFile(dir, (UINT8 *) dentry->d_name.name, FM_SYMLINK, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + + err = FsWriteFile(dir, &fid, (char *) target, len, &ret); + + if (err) { + FsRemoveFile(dir, &fid); + + if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + + dir->i_version++; + dir->i_ctime = dir->i_mtime = dir->i_atime = ts; + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + inode->i_version++; + inode->i_mtime = inode->i_atime = inode->i_ctime = ts; + /* timestamp is already written, so mark_inode_dirty() is unneeded. */ + + EXFAT_I(inode)->target = MALLOC(len+1); + if (!EXFAT_I(inode)->target) { + err = -ENOMEM; + goto out; + } + MEMCPY(EXFAT_I(inode)->target, target, len+1); + + dentry->d_time = dentry->d_parent->d_inode->i_version; + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + PRINTK("exfat_symlink exited\n"); + return err; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) +static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +#else +static int exfat_mkdir(struct inode *dir, struct dentry *dentry, int mode) +#endif +{ + struct super_block *sb = dir->i_sb; + struct inode *inode; + struct timespec ts; + FILE_ID_T fid; + loff_t i_pos; + int err; + + __lock_super(sb); + + PRINTK("exfat_mkdir entered\n"); + + ts = CURRENT_TIME_SEC; + + err = FsCreateDir(dir, (UINT8 *) dentry->d_name.name, &fid); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_FULL) + err = -ENOSPC; + else if (err == FFS_NAMETOOLONG) + err = -ENAMETOOLONG; + else + err = -EIO; + goto out; + } + dir->i_version++; + dir->i_ctime = dir->i_mtime = dir->i_atime = ts; + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + inc_nlink(dir); + + i_pos = ((loff_t) fid.dir.dir << 32) | (fid.entry & 0xffffffff); + + inode = exfat_build_inode(sb, &fid, i_pos); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto out; + } + inode->i_version++; + inode->i_mtime = inode->i_atime = inode->i_ctime = ts; + /* timestamp is already written, so mark_inode_dirty() is unneeded. */ + + dentry->d_time = dentry->d_parent->d_inode->i_version; + d_instantiate(dentry, inode); + +out: + __unlock_super(sb); + PRINTK("exfat_mkdir exited\n"); + return err; +} + +static int exfat_rmdir(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct super_block *sb = dir->i_sb; + struct timespec ts; + int err; + + __lock_super(sb); + + PRINTK("exfat_rmdir entered\n"); + + ts = CURRENT_TIME_SEC; + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = FsRemoveDir(dir, &(EXFAT_I(inode)->fid)); + if (err) { + if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -ENOTEMPTY; + else if (err == FFS_NOTFOUND) + err = -ENOENT; + else if (err == FFS_DIRBUSY) + err = -EBUSY; + else + err = -EIO; + goto out; + } + dir->i_version++; + dir->i_mtime = dir->i_atime = ts; + if (IS_DIRSYNC(dir)) + (void) exfat_sync_inode(dir); + else + mark_inode_dirty(dir); + drop_nlink(dir); + + clear_nlink(inode); + inode->i_mtime = inode->i_atime = ts; + exfat_detach(inode); + remove_inode_hash(inode); + +out: + __unlock_super(sb); + PRINTK("exfat_rmdir exited\n"); + return err; +} + +static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + struct inode *old_inode, *new_inode; + struct super_block *sb = old_dir->i_sb; + struct timespec ts; + loff_t i_pos; + int err; + + __lock_super(sb); + + PRINTK("exfat_rename entered\n"); + + old_inode = old_dentry->d_inode; + new_inode = new_dentry->d_inode; + + ts = CURRENT_TIME_SEC; + + EXFAT_I(old_inode)->fid.size = i_size_read(old_inode); + + err = FsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir, new_dentry); + if (err) { + if (err == FFS_PERMISSIONERR) + err = -EPERM; + else if (err == FFS_INVALIDPATH) + err = -EINVAL; + else if (err == FFS_FILEEXIST) + err = -EEXIST; + else if (err == FFS_NOTFOUND) + err = -ENOENT; + else if (err == FFS_FULL) + err = -ENOSPC; + else + err = -EIO; + goto out; + } + new_dir->i_version++; + new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime = ts; + if (IS_DIRSYNC(new_dir)) + (void) exfat_sync_inode(new_dir); + else + mark_inode_dirty(new_dir); + + i_pos = ((loff_t) EXFAT_I(old_inode)->fid.dir.dir << 32) | + (EXFAT_I(old_inode)->fid.entry & 0xffffffff); + + exfat_detach(old_inode); + exfat_attach(old_inode, i_pos); + if (IS_DIRSYNC(new_dir)) + (void) exfat_sync_inode(old_inode); + else + mark_inode_dirty(old_inode); + + if ((S_ISDIR(old_inode->i_mode)) && (old_dir != new_dir)) { + drop_nlink(old_dir); + if (!new_inode) inc_nlink(new_dir); + } + + old_dir->i_version++; + old_dir->i_ctime = old_dir->i_mtime = ts; + if (IS_DIRSYNC(old_dir)) + (void) exfat_sync_inode(old_dir); + else + mark_inode_dirty(old_dir); + + if (new_inode) { + exfat_detach(new_inode); + drop_nlink(new_inode); + if (S_ISDIR(new_inode->i_mode)) + drop_nlink(new_inode); + new_inode->i_ctime = ts; + } + +out: + __unlock_super(sb); + PRINTK("exfat_rename exited\n"); + return err; +} + +static int exfat_cont_expand(struct inode *inode, loff_t size) +{ + struct address_space *mapping = inode->i_mapping; + loff_t start = i_size_read(inode), count = size - i_size_read(inode); + int err, err2; + + if ((err = generic_cont_expand_simple(inode, size)) != 0) + return err; + + inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); + + if (IS_SYNC(inode)) { + err = filemap_fdatawrite_range(mapping, start, start + count - 1); + err2 = sync_mapping_buffers(mapping); + err = (err)?(err):(err2); + err2 = write_inode_now(inode, 1); + err = (err)?(err):(err2); + if (!err) { + err = filemap_fdatawait_range(mapping, start, start + count - 1); + } + } + return err; +} + +static int exfat_allow_set_time(struct exfat_sb_info *sbi, struct inode *inode) +{ + mode_t allow_utime = sbi->options.allow_utime; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + if (!uid_eq(current_fsuid(), inode->i_uid)) +#else + if (current_fsuid() != inode->i_uid) +#endif + { + if (in_group_p(inode->i_gid)) + allow_utime >>= 3; + if (allow_utime & MAY_WRITE) + return 1; + } + + /* use a default check */ + return 0; +} + +static int exfat_sanitize_mode(const struct exfat_sb_info *sbi, + struct inode *inode, umode_t *mode_ptr) +{ + mode_t i_mode, mask, perm; + + i_mode = inode->i_mode; + + if (S_ISREG(i_mode) || S_ISLNK(i_mode)) + mask = sbi->options.fs_fmask; + else + mask = sbi->options.fs_dmask; + + perm = *mode_ptr & ~(S_IFMT | mask); + + /* Of the r and x bits, all (subject to umask) must be present.*/ + if ((perm & (S_IRUGO | S_IXUGO)) != (i_mode & (S_IRUGO|S_IXUGO))) + return -EPERM; + + if (exfat_mode_can_hold_ro(inode)) { + /* Of the w bits, either all (subject to umask) or none must be present. */ + if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask))) + return -EPERM; + } else { + /* If exfat_mode_can_hold_ro(inode) is false, can't change w bits. */ + if ((perm & S_IWUGO) != (S_IWUGO & ~mask)) + return -EPERM; + } + + *mode_ptr &= S_IFMT | perm; + + return 0; +} + +static int exfat_setattr(struct dentry *dentry, struct iattr *attr) +{ + + struct exfat_sb_info *sbi = EXFAT_SB(dentry->d_sb); + struct inode *inode = dentry->d_inode; + unsigned int ia_valid; + int error; + loff_t old_size; + + PRINTK("exfat_setattr entered\n"); + + if ((attr->ia_valid & ATTR_SIZE) + && (attr->ia_size > i_size_read(inode))) { + error = exfat_cont_expand(inode, attr->ia_size); + if (error || attr->ia_valid == ATTR_SIZE) + return error; + attr->ia_valid &= ~ATTR_SIZE; + } + + ia_valid = attr->ia_valid; + + if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) + && exfat_allow_set_time(sbi, inode)) { + attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET); + } + + error = inode_change_ok(inode, attr); + attr->ia_valid = ia_valid; + if (error) { + return error; + } + + if (((attr->ia_valid & ATTR_UID) && +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + (!uid_eq(attr->ia_uid, sbi->options.fs_uid))) || + ((attr->ia_valid & ATTR_GID) && + (!gid_eq(attr->ia_gid, sbi->options.fs_gid))) || +#else + (attr->ia_uid != sbi->options.fs_uid)) || + ((attr->ia_valid & ATTR_GID) && + (attr->ia_gid != sbi->options.fs_gid)) || +#endif + ((attr->ia_valid & ATTR_MODE) && + (attr->ia_mode & ~(S_IFREG | S_IFLNK | S_IFDIR | S_IRWXUGO)))) { + return -EPERM; + } + + /* + * We don't return -EPERM here. Yes, strange, but this is too + * old behavior. + */ + if (attr->ia_valid & ATTR_MODE) { + if (exfat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0) + attr->ia_valid &= ~ATTR_MODE; + } + + EXFAT_I(inode)->fid.size = i_size_read(inode); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + if (attr->ia_valid) + error = inode_setattr(inode, attr); +#else + if (attr->ia_valid & ATTR_SIZE) { + old_size = i_size_read(inode); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + down_write(&EXFAT_I(inode)->truncate_lock); + truncate_setsize(inode, attr->ia_size); + _exfat_truncate(inode, old_size); + up_write(&EXFAT_I(inode)->truncate_lock); +#else + truncate_setsize(inode, attr->ia_size); + _exfat_truncate(inode, old_size); +#endif + } + setattr_copy(inode, attr); + mark_inode_dirty(inode); +#endif + + PRINTK("exfat_setattr exited\n"); + return error; +} + +static int exfat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + + PRINTK("exfat_getattr entered\n"); + + generic_fillattr(inode, stat); + stat->blksize = EXFAT_SB(inode->i_sb)->fs_info.cluster_size; + + PRINTK("exfat_getattr exited\n"); + return 0; +} + +const struct inode_operations exfat_dir_inode_operations = { + .create = exfat_create, + .lookup = exfat_lookup, + .unlink = exfat_unlink, + .symlink = exfat_symlink, + .mkdir = exfat_mkdir, + .rmdir = exfat_rmdir, + .rename = exfat_rename, + .setattr = exfat_setattr, + .getattr = exfat_getattr, +}; + +/*======================================================================*/ +/* File Operations */ +/*======================================================================*/ + +static void *exfat_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct exfat_inode_info *ei = EXFAT_I(dentry->d_inode); + nd_set_link(nd, (char *)(ei->target)); + return NULL; +} + +const struct inode_operations exfat_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = exfat_follow_link, +}; + +static int exfat_file_release(struct inode *inode, struct file *filp) +{ + struct super_block *sb = inode->i_sb; + + EXFAT_I(inode)->fid.size = i_size_read(inode); + FsSyncVol(sb, 0); + return 0; +} + +const struct file_operations exfat_file_operations = { + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .mmap = generic_file_mmap, + .release = exfat_file_release, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + .ioctl = exfat_generic_ioctl, + .fsync = exfat_file_fsync, +#else + .unlocked_ioctl = exfat_generic_ioctl, + .fsync = generic_file_fsync, +#endif + .splice_read = generic_file_splice_read, +}; + +static void _exfat_truncate(struct inode *inode, loff_t old_size) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + FS_INFO_T *p_fs = &(sbi->fs_info); + int err; + + __lock_super(sb); + + /* + * This protects against truncating a file bigger than it was then + * trying to write into the hole. + */ + if (EXFAT_I(inode)->mmu_private > i_size_read(inode)) + EXFAT_I(inode)->mmu_private = i_size_read(inode); + + if (EXFAT_I(inode)->fid.start_clu == 0) goto out; + + err = FsTruncateFile(inode, old_size, i_size_read(inode)); + if (err) goto out; + + inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; + if (IS_DIRSYNC(inode)) + (void) exfat_sync_inode(inode); + else + mark_inode_dirty(inode); + + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) + & ~((loff_t)p_fs->cluster_size - 1)) >> 9; +out: + __unlock_super(sb); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36) +static void exfat_truncate(struct inode *inode) +{ + _exfat_truncate(inode, i_size_read(inode)); +} +#endif + +const struct inode_operations exfat_file_inode_operations = { +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36) + .truncate = exfat_truncate, +#endif + .setattr = exfat_setattr, + .getattr = exfat_getattr, +}; + +/*======================================================================*/ +/* Address Space Operations */ +/*======================================================================*/ + +static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys, + unsigned long *mapped_blocks, int *create) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + FS_INFO_T *p_fs = &(sbi->fs_info); + BD_INFO_T *p_bd = &(sbi->bd_info); + const unsigned long blocksize = sb->s_blocksize; + const unsigned char blocksize_bits = sb->s_blocksize_bits; + sector_t last_block; + int err, clu_offset, sec_offset; + unsigned int cluster; + + *phys = 0; + *mapped_blocks = 0; + + if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) { + if (inode->i_ino == EXFAT_ROOT_INO) { + if (sector < (p_fs->dentries_in_root >> (p_bd->sector_size_bits-DENTRY_SIZE_BITS))) { + *phys = sector + p_fs->root_start_sector; + *mapped_blocks = 1; + } + return 0; + } + } + + last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits; + if (sector >= last_block) { + if (*create == 0) return 0; + } else { + *create = 0; + } + + clu_offset = sector >> p_fs->sectors_per_clu_bits; /* cluster offset */ + sec_offset = sector & (p_fs->sectors_per_clu - 1); /* sector offset in cluster */ + + EXFAT_I(inode)->fid.size = i_size_read(inode); + + err = FsMapCluster(inode, clu_offset, &cluster); + + if (err) { + if (err == FFS_FULL) + return -ENOSPC; + else + return -EIO; + } else if (cluster != CLUSTER_32(~0)) { + *phys = START_SECTOR(cluster) + sec_offset; + *mapped_blocks = p_fs->sectors_per_clu - sec_offset; + } + + return 0; +} + +static int exfat_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct super_block *sb = inode->i_sb; + unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; + int err; + unsigned long mapped_blocks; + sector_t phys; + + __lock_super(sb); + + err = exfat_bmap(inode, iblock, &phys, &mapped_blocks, &create); + if (err) { + __unlock_super(sb); + return err; + } + + if (phys) { + max_blocks = min(mapped_blocks, max_blocks); + if (create) { + EXFAT_I(inode)->mmu_private += max_blocks << sb->s_blocksize_bits; + set_buffer_new(bh_result); + } + map_bh(bh_result, sb, phys); + } + + bh_result->b_size = max_blocks << sb->s_blocksize_bits; + __unlock_super(sb); + + return 0; +} + +static int exfat_readpage(struct file *file, struct page *page) +{ + int ret; + ret = mpage_readpage(page, exfat_get_block); + return ret; +} + +static int exfat_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + int ret; + ret = mpage_readpages(mapping, pages, nr_pages, exfat_get_block); + return ret; +} + +static int exfat_writepage(struct page *page, struct writeback_control *wbc) +{ + int ret; + ret = block_write_full_page(page, exfat_get_block, wbc); + return ret; +} + +static int exfat_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + int ret; + ret = mpage_writepages(mapping, wbc, exfat_get_block); + return ret; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34) +static void exfat_write_failed(struct address_space *mapping, loff_t to) +{ + struct inode *inode = mapping->host; + if (to > i_size_read(inode)) { + truncate_pagecache(inode, to, i_size_read(inode)); + EXFAT_I(inode)->fid.size = i_size_read(inode); + _exfat_truncate(inode, i_size_read(inode)); + } +} +#endif + + +static int exfat_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + int ret; + *pagep = NULL; + ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + exfat_get_block, + &EXFAT_I(mapping->host)->mmu_private); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34) + if (ret < 0) + exfat_write_failed(mapping, pos+len); +#endif + return ret; +} + +static int exfat_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *pagep, void *fsdata) +{ + struct inode *inode = mapping->host; + FILE_ID_T *fid = &(EXFAT_I(inode)->fid); + int err; + + err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34) + if (err < len) + exfat_write_failed(mapping, pos+len); +#endif + + if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) { + inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; + fid->attr |= ATTR_ARCHIVE; + mark_inode_dirty(inode); + } + return err; +} + +static ssize_t exfat_direct_IO(int rw, struct kiocb *iocb, + const struct iovec *iov, + loff_t offset, unsigned long nr_segs) +{ + struct inode *inode = iocb->ki_filp->f_mapping->host; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34) + struct address_space *mapping = iocb->ki_filp->f_mapping; +#endif + ssize_t ret; + + if (rw == WRITE) { + if (EXFAT_I(inode)->mmu_private < (offset + iov_length(iov, nr_segs))) + return 0; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + ret = blockdev_direct_IO(rw, iocb, inode, iov, + offset, nr_segs, exfat_get_block); +#else + ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, + offset, nr_segs, exfat_get_block, NULL); +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34) + if ((ret < 0) && (rw & WRITE)) + exfat_write_failed(mapping, offset+iov_length(iov, nr_segs)); +#endif + return ret; +} + +static sector_t _exfat_bmap(struct address_space *mapping, sector_t block) +{ + sector_t blocknr; + + /* exfat_get_cluster() assumes the requested blocknr isn't truncated. */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + down_read(&EXFAT_I(mapping->host)->truncate_lock); + blocknr = generic_block_bmap(mapping, block, exfat_get_block); + up_read(&EXFAT_I(mapping->host)->truncate_lock); +#else + down_read(&EXFAT_I(mapping->host)->i_alloc_sem); + blocknr = generic_block_bmap(mapping, block, exfat_get_block); + up_read(&EXFAT_I(mapping->host)->i_alloc_sem); +#endif + + return blocknr; +} + +const struct address_space_operations exfat_aops = { + .readpage = exfat_readpage, + .readpages = exfat_readpages, + .writepage = exfat_writepage, + .writepages = exfat_writepages, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) + .sync_page = block_sync_page, +#endif + .write_begin = exfat_write_begin, + .write_end = exfat_write_end, + .direct_IO = exfat_direct_IO, + .bmap = _exfat_bmap +}; + +/*======================================================================*/ +/* Super Operations */ +/*======================================================================*/ + +static inline unsigned long exfat_hash(loff_t i_pos) +{ + return hash_32(i_pos, EXFAT_HASH_BITS); +} + +static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos) { + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_inode_info *info; + struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); + struct inode *inode = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) + struct hlist_node *node; + + spin_lock(&sbi->inode_hash_lock); + hlist_for_each_entry(info, node, head, i_hash_fat) { +#else + spin_lock(&sbi->inode_hash_lock); + hlist_for_each_entry(info, head, i_hash_fat) { +#endif + CHECK_ERR(info->vfs_inode.i_sb != sb); + + if (i_pos != info->i_pos) + continue; + inode = igrab(&info->vfs_inode); + if (inode) + break; + } + spin_unlock(&sbi->inode_hash_lock); + return inode; +} + +static void exfat_attach(struct inode *inode, loff_t i_pos) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); + + spin_lock(&sbi->inode_hash_lock); + EXFAT_I(inode)->i_pos = i_pos; + hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head); + spin_unlock(&sbi->inode_hash_lock); +} + +static void exfat_detach(struct inode *inode) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + + spin_lock(&sbi->inode_hash_lock); + hlist_del_init(&EXFAT_I(inode)->i_hash_fat); + EXFAT_I(inode)->i_pos = 0; + spin_unlock(&sbi->inode_hash_lock); +} + +/* doesn't deal with root inode */ +static int exfat_fill_inode(struct inode *inode, FILE_ID_T *fid) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + FS_INFO_T *p_fs = &(sbi->fs_info); + DIR_ENTRY_T info; + + memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(FILE_ID_T)); + + FsReadStat(inode, &info); + + EXFAT_I(inode)->i_pos = 0; + EXFAT_I(inode)->target = NULL; + inode->i_uid = sbi->options.fs_uid; + inode->i_gid = sbi->options.fs_gid; + inode->i_version++; + inode->i_generation = get_seconds(); + + if (info.Attr & ATTR_SUBDIR) { /* directory */ + inode->i_generation &= ~1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, S_IRWXUGO); + inode->i_op = &exfat_dir_inode_operations; + inode->i_fop = &exfat_dir_operations; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + set_nlink(inode,info.NumSubdirs); +#else + inode->i_nlink = info.NumSubdirs; +#endif + } else if (info.Attr & ATTR_SYMLINK) { /* symbolic link */ + inode->i_generation |= 1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, S_IRWXUGO); + inode->i_op = &exfat_symlink_inode_operations; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); + } else { /* regular file */ + inode->i_generation |= 1; + inode->i_mode = exfat_make_mode(sbi, info.Attr, S_IRWXUGO); + inode->i_op = &exfat_file_inode_operations; + inode->i_fop = &exfat_file_operations; + inode->i_mapping->a_ops = &exfat_aops; + inode->i_mapping->nrpages = 0; + + i_size_write(inode, info.Size); + EXFAT_I(inode)->mmu_private = i_size_read(inode); + } + exfat_save_attr(inode, info.Attr); + + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) + & ~((loff_t)p_fs->cluster_size - 1)) >> 9; + + exfat_time_fat2unix(sbi, &inode->i_mtime, &info.ModifyTimestamp); + exfat_time_fat2unix(sbi, &inode->i_ctime, &info.CreateTimestamp); + exfat_time_fat2unix(sbi, &inode->i_atime, &info.AccessTimestamp); + + return 0; +} + +static struct inode *exfat_build_inode(struct super_block *sb, + FILE_ID_T *fid, loff_t i_pos) { + struct inode *inode; + int err; + + inode = exfat_iget(sb, i_pos); + if (inode) + goto out; + inode = new_inode(sb); + if (!inode) { + inode = ERR_PTR(-ENOMEM); + goto out; + } + inode->i_ino = iunique(sb, EXFAT_ROOT_INO); + inode->i_version = 1; + err = exfat_fill_inode(inode, fid); + if (err) { + iput(inode); + inode = ERR_PTR(err); + goto out; + } + exfat_attach(inode, i_pos); + insert_inode_hash(inode); +out: + return inode; +} + +static int exfat_sync_inode(struct inode *inode) +{ + return exfat_write_inode(inode, NULL); +} + +static struct inode *exfat_alloc_inode(struct super_block *sb) { + struct exfat_inode_info *ei; + + ei = kmem_cache_alloc(exfat_inode_cachep, GFP_NOFS); + if (!ei) + return NULL; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + init_rwsem(&ei->truncate_lock); +#endif + + return &ei->vfs_inode; +} + +static void exfat_destroy_inode(struct inode *inode) +{ + FREE(EXFAT_I(inode)->target); + EXFAT_I(inode)->target = NULL; + + kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode)); +} + +static int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + DIR_ENTRY_T info; + + if (inode->i_ino == EXFAT_ROOT_INO) + return 0; + + info.Attr = exfat_make_attr(inode); + info.Size = i_size_read(inode); + + exfat_time_unix2fat(sbi, &inode->i_mtime, &info.ModifyTimestamp); + exfat_time_unix2fat(sbi, &inode->i_ctime, &info.CreateTimestamp); + exfat_time_unix2fat(sbi, &inode->i_atime, &info.AccessTimestamp); + + FsWriteStat(inode, &info); + + return 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +static void exfat_delete_inode(struct inode *inode) +{ + truncate_inode_pages(&inode->i_data, 0); + clear_inode(inode); +} + +static void exfat_clear_inode(struct inode *inode) +{ + exfat_detach(inode); + remove_inode_hash(inode); +} +#else +static void exfat_evict_inode(struct inode *inode) +{ + truncate_inode_pages(&inode->i_data, 0); + + if (!inode->i_nlink) + i_size_write(inode, 0); + invalidate_inode_buffers(inode); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,00) + end_writeback(inode); +#else + clear_inode(inode); +#endif + exfat_detach(inode); + + remove_inode_hash(inode); +} +#endif + +static void exfat_free_super(struct exfat_sb_info *sbi) +{ + if (sbi->nls_disk) + unload_nls(sbi->nls_disk); + if (sbi->nls_io) + unload_nls(sbi->nls_io); + if (sbi->options.iocharset != exfat_default_iocharset) + kfree(sbi->options.iocharset); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + mutex_destroy(&sbi->s_lock); +#endif + kfree(sbi); +} + +static void exfat_put_super(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + if (__is_sb_dirty(sb)) + exfat_write_super(sb); + + FsUmountVol(sb); + + sb->s_fs_info = NULL; + exfat_free_super(sbi); +} + +static void exfat_write_super(struct super_block *sb) +{ + __lock_super(sb); + + __set_sb_clean(sb); + + if (!(sb->s_flags & MS_RDONLY)) + FsSyncVol(sb, 1); + + __unlock_super(sb); +} + +static int exfat_sync_fs(struct super_block *sb, int wait) +{ + int err = 0; + + if (__is_sb_dirty(sb)) { + __lock_super(sb); + __set_sb_clean(sb); + err = FsSyncVol(sb, 1); + __unlock_super(sb); + } + + return err; +} + +static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + FS_INFO_T *p_fs = &(EXFAT_SB(sb)->fs_info); + VOL_INFO_T info; + + if (p_fs->used_clusters == (UINT32) ~0) { + if (FFS_MEDIAERR == FsGetVolInfo(sb, &info)) + return -EIO; + + } else { + info.FatType = p_fs->vol_type; + info.ClusterSize = p_fs->cluster_size; + info.NumClusters = p_fs->num_clusters - 2; + info.UsedClusters = p_fs->used_clusters; + info.FreeClusters = info.NumClusters - info.UsedClusters; + + if (p_fs->dev_ejected) + return -EIO; + } + + buf->f_type = sb->s_magic; + buf->f_bsize = info.ClusterSize; + buf->f_blocks = info.NumClusters; + buf->f_bfree = info.FreeClusters; + buf->f_bavail = info.FreeClusters; + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + buf->f_namelen = 260; + + return 0; +} + +static int exfat_remount(struct super_block *sb, int *flags, char *data) +{ + *flags |= MS_NODIRATIME; + return 0; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) +static int exfat_show_options(struct seq_file *m, struct dentry *root) +{ + struct exfat_sb_info *sbi = EXFAT_SB(root->d_sb); +#else +static int exfat_show_options(struct seq_file *m, struct vfsmount *mnt) +{ + struct exfat_sb_info *sbi = EXFAT_SB(mnt->mnt_sb); +#endif + struct exfat_mount_options *opts = &sbi->options; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + if (__kuid_val(opts->fs_uid)) + seq_printf(m, ",uid=%u", __kuid_val(opts->fs_uid)); + if (__kgid_val(opts->fs_gid)) + seq_printf(m, ",gid=%u", __kgid_val(opts->fs_gid)); +#else + if (opts->fs_uid != 0) + seq_printf(m, ",uid=%u", opts->fs_uid); + if (opts->fs_gid != 0) + seq_printf(m, ",gid=%u", opts->fs_gid); +#endif + seq_printf(m, ",fmask=%04o", opts->fs_fmask); + seq_printf(m, ",dmask=%04o", opts->fs_dmask); + if (opts->allow_utime) + seq_printf(m, ",allow_utime=%04o", opts->allow_utime); + if (sbi->nls_disk) + seq_printf(m, ",codepage=%s", sbi->nls_disk->charset); + if (sbi->nls_io) + seq_printf(m, ",iocharset=%s", sbi->nls_io->charset); + seq_printf(m, ",namecase=%u", opts->casesensitive); + if (opts->errors == EXFAT_ERRORS_CONT) + seq_puts(m, ",errors=continue"); + else if (opts->errors == EXFAT_ERRORS_PANIC) + seq_puts(m, ",errors=panic"); + else + seq_puts(m, ",errors=remount-ro"); +#if EXFAT_CONFIG_DISCARD + if (opts->discard) + seq_printf(m, ",discard"); +#endif + return 0; +} + +const struct super_operations exfat_sops = { + .alloc_inode = exfat_alloc_inode, + .destroy_inode = exfat_destroy_inode, + .write_inode = exfat_write_inode, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + .delete_inode = exfat_delete_inode, + .clear_inode = exfat_clear_inode, +#else + .evict_inode = exfat_evict_inode, +#endif + .put_super = exfat_put_super, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) + .write_super = exfat_write_super, +#endif + .sync_fs = exfat_sync_fs, + .statfs = exfat_statfs, + .remount_fs = exfat_remount, + .show_options = exfat_show_options, +}; + +/*======================================================================*/ +/* Super Block Read Operations */ +/*======================================================================*/ + +enum { + Opt_uid, + Opt_gid, + Opt_umask, + Opt_dmask, + Opt_fmask, + Opt_allow_utime, + Opt_codepage, + Opt_charset, + Opt_namecase, + Opt_debug, + Opt_err_cont, + Opt_err_panic, + Opt_err_ro, + Opt_err, +#if EXFAT_CONFIG_DISCARD + Opt_discard, +#endif /* EXFAT_CONFIG_DISCARD */ +}; + +static const match_table_t exfat_tokens = { + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_umask, "umask=%o"}, + {Opt_dmask, "dmask=%o"}, + {Opt_fmask, "fmask=%o"}, + {Opt_allow_utime, "allow_utime=%o"}, + {Opt_codepage, "codepage=%u"}, + {Opt_charset, "iocharset=%s"}, + {Opt_namecase, "namecase=%u"}, + {Opt_debug, "debug"}, + {Opt_err_cont, "errors=continue"}, + {Opt_err_panic, "errors=panic"}, + {Opt_err_ro, "errors=remount-ro"}, +#if EXFAT_CONFIG_DISCARD + {Opt_discard, "discard"}, +#endif /* EXFAT_CONFIG_DISCARD */ + {Opt_err, NULL} +}; + +static int parse_options(char *options, int silent, int *debug, + struct exfat_mount_options *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *iocharset; + + opts->fs_uid = current_uid(); + opts->fs_gid = current_gid(); + opts->fs_fmask = opts->fs_dmask = current->fs->umask; + opts->allow_utime = (unsigned short) -1; + opts->codepage = exfat_default_codepage; + opts->iocharset = exfat_default_iocharset; + opts->casesensitive = 0; + opts->errors = EXFAT_ERRORS_RO; +#if EXFAT_CONFIG_DISCARD + opts->discard = 0; +#endif + *debug = 0; + + if (!options) + goto out; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) + continue; + + token = match_token(p, exfat_tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + return 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + opts->fs_uid = KUIDT_INIT(option); +#else + opts->fs_uid = option; +#endif + break; + case Opt_gid: + if (match_int(&args[0], &option)) + return 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + opts->fs_gid = KGIDT_INIT(option); +#else + opts->fs_gid = option; +#endif + break; + case Opt_umask: + case Opt_dmask: + case Opt_fmask: + if (match_octal(&args[0], &option)) + return 0; + if (token != Opt_dmask) + opts->fs_fmask = option; + if (token != Opt_fmask) + opts->fs_dmask = option; + break; + case Opt_allow_utime: + if (match_octal(&args[0], &option)) + return 0; + opts->allow_utime = option & (S_IWGRP | S_IWOTH); + break; + case Opt_codepage: + if (match_int(&args[0], &option)) + return 0; + opts->codepage = option; + break; + case Opt_charset: + if (opts->iocharset != exfat_default_iocharset) + kfree(opts->iocharset); + iocharset = match_strdup(&args[0]); + if (!iocharset) + return -ENOMEM; + opts->iocharset = iocharset; + break; + case Opt_namecase: + if (match_int(&args[0], &option)) + return 0; + opts->casesensitive = option; + break; + case Opt_err_cont: + opts->errors = EXFAT_ERRORS_CONT; + break; + case Opt_err_panic: + opts->errors = EXFAT_ERRORS_PANIC; + break; + case Opt_err_ro: + opts->errors = EXFAT_ERRORS_RO; + break; + case Opt_debug: + *debug = 1; + break; +#if EXFAT_CONFIG_DISCARD + case Opt_discard: + opts->discard = 1; + break; +#endif /* EXFAT_CONFIG_DISCARD */ + default: + if (!silent) { + printk(KERN_ERR "[EXFAT] Unrecognized mount option %s or missing value\n", p); + } + return -EINVAL; + } + } + +out: + if (opts->allow_utime == (unsigned short) -1) + opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH); + + return 0; +} + +static void exfat_hash_init(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + int i; + + spin_lock_init(&sbi->inode_hash_lock); + for (i = 0; i < EXFAT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); +} + +static int exfat_read_root(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct timespec ts; + FS_INFO_T *p_fs = &(sbi->fs_info); + DIR_ENTRY_T info; + + ts = CURRENT_TIME_SEC; + + EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir; + EXFAT_I(inode)->fid.dir.flags = 0x01; + EXFAT_I(inode)->fid.entry = -1; + EXFAT_I(inode)->fid.start_clu = p_fs->root_dir; + EXFAT_I(inode)->fid.flags = 0x01; + EXFAT_I(inode)->fid.type = TYPE_DIR; + EXFAT_I(inode)->fid.rwoffset = 0; + EXFAT_I(inode)->fid.hint_last_off = -1; + + EXFAT_I(inode)->target = NULL; + + FsReadStat(inode, &info); + + inode->i_uid = sbi->options.fs_uid; + inode->i_gid = sbi->options.fs_gid; + inode->i_version++; + inode->i_generation = 0; + inode->i_mode = exfat_make_mode(sbi, ATTR_SUBDIR, S_IRWXUGO); + inode->i_op = &exfat_dir_inode_operations; + inode->i_fop = &exfat_dir_operations; + + i_size_write(inode, info.Size); + inode->i_blocks = ((i_size_read(inode) + (p_fs->cluster_size - 1)) + & ~((loff_t)p_fs->cluster_size - 1)) >> 9; + EXFAT_I(inode)->i_pos = ((loff_t) p_fs->root_dir << 32) | 0xffffffff; + EXFAT_I(inode)->mmu_private = i_size_read(inode); + + exfat_save_attr(inode, ATTR_SUBDIR); + inode->i_mtime = inode->i_atime = inode->i_ctime = ts; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + set_nlink(inode,info.NumSubdirs + 2); +#else + inode->i_nlink = info.NumSubdirs + 2; +#endif + + return 0; +} + +static int exfat_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *root_inode = NULL; + struct exfat_sb_info *sbi; + int debug, ret; + long error; + char buf[50]; + + /* + * GFP_KERNEL is ok here, because while we do hold the + * supeblock lock, memory pressure can't call back into + * the filesystem, since we're only just about to mount + * it and have no inodes etc active! + */ + sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL); + if (!sbi) + return -ENOMEM; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) + mutex_init(&sbi->s_lock); +#endif + sb->s_fs_info = sbi; + + sb->s_flags |= MS_NODIRATIME; + sb->s_magic = EXFAT_SUPER_MAGIC; + sb->s_op = &exfat_sops; + + error = parse_options(data, silent, &debug, &sbi->options); + if (error) + goto out_fail; + + error = -EIO; + sb_min_blocksize(sb, 512); + sb->s_maxbytes = 0x7fffffffffffffffLL; // maximum file size + + ret = FsMountVol(sb); + if (ret) { + if (!silent) + printk(KERN_ERR "[EXFAT] FsMountVol failed\n"); + + goto out_fail; + } + + /* set up enough so that it can read an inode */ + exfat_hash_init(sb); + + /* + * The low byte of FAT's first entry must have same value with + * media-field. But in real world, too many devices is + * writing wrong value. So, removed that validity check. + * + * if (FAT_FIRST_ENT(sb, media) != first) + */ + + if (sbi->fs_info.vol_type != EXFAT) { + error = -EINVAL; + sprintf(buf, "cp%d", sbi->options.codepage); + sbi->nls_disk = load_nls(buf); + if (!sbi->nls_disk) { + printk(KERN_ERR "[EXFAT] Codepage %s not found\n", buf); + goto out_fail2; + } + } + + sbi->nls_io = load_nls(sbi->options.iocharset); + if (!sbi->nls_io) { + printk(KERN_ERR "[EXFAT] IO charset %s not found\n", + sbi->options.iocharset); + goto out_fail2; + } + + error = -ENOMEM; + root_inode = new_inode(sb); + if (!root_inode) + goto out_fail2; + root_inode->i_ino = EXFAT_ROOT_INO; + root_inode->i_version = 1; + error = exfat_read_root(root_inode); + if (error < 0) + goto out_fail2; + error = -ENOMEM; + exfat_attach(root_inode, EXFAT_I(root_inode)->i_pos); + insert_inode_hash(root_inode); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + sb->s_root = d_make_root(root_inode); +#else + sb->s_root = d_alloc_root(root_inode); +#endif + if (!sb->s_root) { + printk(KERN_ERR "[EXFAT] Getting the root inode failed\n"); + goto out_fail2; + } + + return 0; + +out_fail2: + FsUmountVol(sb); +out_fail: + if (root_inode) + iput(root_inode); + sb->s_fs_info = NULL; + exfat_free_super(sbi); + return error; +} +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) +static int exfat_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data, struct vfsmount *mnt) +{ + return get_sb_bdev(fs_type, flags, dev_name, data, exfat_fill_super, mnt); +} +#else +static struct dentry *exfat_fs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) { + return mount_bdev(fs_type, flags, dev_name, data, exfat_fill_super); +} +#endif + +static void init_once(void *foo) +{ + struct exfat_inode_info *ei = (struct exfat_inode_info *)foo; + + INIT_HLIST_NODE(&ei->i_hash_fat); + inode_init_once(&ei->vfs_inode); +} + +static int __init exfat_init_inodecache(void) +{ + exfat_inode_cachep = kmem_cache_create("exfat_inode_cache", + sizeof(struct exfat_inode_info), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + init_once); + if (exfat_inode_cachep == NULL) + return -ENOMEM; + return 0; +} + +static void __exit exfat_destroy_inodecache(void) +{ + kmem_cache_destroy(exfat_inode_cachep); +} + +#if EXFAT_CONFIG_KERNEL_DEBUG +static void exfat_debug_kill_sb(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct block_device *bdev = sb->s_bdev; + + long flags; + + if (sbi) { + flags = sbi->debug_flags; + + if (flags & EXFAT_DEBUGFLAGS_INVALID_UMOUNT) { + /* invalidate_bdev drops all device cache include dirty. + we use this to simulate device removal */ + FsReleaseCache(sb); + invalidate_bdev(bdev); + } + } + + kill_block_super(sb); +} +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ + +static struct file_system_type exfat_fs_type = { + .owner = THIS_MODULE, + .name = "exfat", +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) + .get_sb = exfat_get_sb, +#else + .mount = exfat_fs_mount, +#endif +#if EXFAT_CONFIG_KERNEL_DEBUG + .kill_sb = exfat_debug_kill_sb, +#else + .kill_sb = kill_block_super, +#endif /* EXFAT_CONFIG_KERNLE_DEBUG */ + .fs_flags = FS_REQUIRES_DEV, +}; + +static int __init init_exfat(void) +{ + int err; + + err = FsInit(); + if (err) { + if (err == FFS_MEMORYERR) + return -ENOMEM; + else + return -EIO; + } + + printk(KERN_INFO "exFAT: Version %s\n", EXFAT_VERSION); + + err = exfat_init_inodecache(); + if (err) goto out; + + err = register_filesystem(&exfat_fs_type); + if (err) goto out; + + return 0; +out: + FsShutdown(); + return err; +} + +static void __exit exit_exfat(void) +{ + exfat_destroy_inodecache(); + unregister_filesystem(&exfat_fs_type); + FsShutdown(); +} + +module_init(init_exfat); +module_exit(exit_exfat); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("exFAT Filesystem Driver"); +#ifdef MODULE_ALIAS_FS +MODULE_ALIAS_FS("exfat"); +#endif diff --git a/fs/exfat/exfat_super.h b/fs/exfat/exfat_super.h new file mode 100644 index 0000000000000..c9201dd79250a --- /dev/null +++ b/fs/exfat/exfat_super.h @@ -0,0 +1,172 @@ +/* Some of the source code in this file came from "linux/fs/fat/fat.h". */ + +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _EXFAT_LINUX_H +#define _EXFAT_LINUX_H + +#include +#include +#include +#include +#include +#include + +#include "exfat_config.h" +#include "exfat_global.h" +#include "exfat_data.h" +#include "exfat_oal.h" + +#include "exfat_blkdev.h" +#include "exfat_cache.h" +#include "exfat_part.h" +#include "exfat_nls.h" +#include "exfat_api.h" +#include "exfat.h" + +#define EXFAT_ERRORS_CONT 1 /* ignore error and continue */ +#define EXFAT_ERRORS_PANIC 2 /* panic on error */ +#define EXFAT_ERRORS_RO 3 /* remount r/o on error */ + +/* ioctl command */ +#define EXFAT_IOCTL_GET_VOLUME_ID _IOR('r', 0x12, __u32) + +struct exfat_mount_options { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + kuid_t fs_uid; + kgid_t fs_gid; +#else + uid_t fs_uid; + gid_t fs_gid; +#endif + unsigned short fs_fmask; + unsigned short fs_dmask; + unsigned short allow_utime; /* permission for setting the [am]time */ + unsigned short codepage; /* codepage for shortname conversions */ + char *iocharset; /* charset for filename input/display */ + unsigned char casesensitive; + unsigned char errors; /* on error: continue, panic, remount-ro */ +#if EXFAT_CONFIG_DISCARD + unsigned char discard; /* flag on if -o dicard specified and device support discard() */ +#endif /* EXFAT_CONFIG_DISCARD */ +}; + +#define EXFAT_HASH_BITS 8 +#define EXFAT_HASH_SIZE (1UL << EXFAT_HASH_BITS) + +/* + * EXFAT file system in-core superblock data + */ +struct exfat_sb_info { + FS_INFO_T fs_info; + BD_INFO_T bd_info; + + struct exfat_mount_options options; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,00) + int s_dirt; + struct mutex s_lock; +#endif + struct nls_table *nls_disk; /* Codepage used on disk */ + struct nls_table *nls_io; /* Charset used for input and display */ + + struct inode *fat_inode; + + spinlock_t inode_hash_lock; + struct hlist_head inode_hashtable[EXFAT_HASH_SIZE]; +#if EXFAT_CONFIG_KERNEL_DEBUG + long debug_flags; +#endif /* EXFAT_CONFIG_KERNEL_DEBUG */ +}; + +/* + * EXFAT file system inode data in memory + */ +struct exfat_inode_info { + FILE_ID_T fid; + char *target; + /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */ + loff_t mmu_private; /* physically allocated size */ + loff_t i_pos; /* on-disk position of directory entry or 0 */ + struct hlist_node i_hash_fat; /* hash by i_location */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,00) + struct rw_semaphore truncate_lock; +#endif + struct inode vfs_inode; + struct rw_semaphore i_alloc_sem; /* protect bmap against truncate */ +}; + +#define EXFAT_SB(sb) ((struct exfat_sb_info *)((sb)->s_fs_info)) + +static inline struct exfat_inode_info *EXFAT_I(struct inode *inode) { + return container_of(inode, struct exfat_inode_info, vfs_inode); +} + +/* + * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to + * save ATTR_RO instead of ->i_mode. + * + * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only + * bit, it's just used as flag for app. + */ +static inline int exfat_mode_can_hold_ro(struct inode *inode) +{ + struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); + + if (S_ISDIR(inode->i_mode)) + return 0; + + if ((~sbi->options.fs_fmask) & S_IWUGO) + return 1; + return 0; +} + +/* Convert attribute bits and a mask to the UNIX mode. */ +static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, + u32 attr, mode_t mode) +{ + if ((attr & ATTR_READONLY) && !(attr & ATTR_SUBDIR)) + mode &= ~S_IWUGO; + + if (attr & ATTR_SUBDIR) + return (mode & ~sbi->options.fs_dmask) | S_IFDIR; + else if (attr & ATTR_SYMLINK) + return (mode & ~sbi->options.fs_dmask) | S_IFLNK; + else + return (mode & ~sbi->options.fs_fmask) | S_IFREG; +} + +/* Return the FAT attribute byte for this inode */ +static inline u32 exfat_make_attr(struct inode *inode) +{ + if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & S_IWUGO)) + return ((EXFAT_I(inode)->fid.attr) | ATTR_READONLY); + else + return (EXFAT_I(inode)->fid.attr); +} + +static inline void exfat_save_attr(struct inode *inode, u32 attr) +{ + if (exfat_mode_can_hold_ro(inode)) + EXFAT_I(inode)->fid.attr = attr & ATTR_RWMASK; + else + EXFAT_I(inode)->fid.attr = attr & (ATTR_RWMASK | ATTR_READONLY); +} + +#endif /* _EXFAT_LINUX_H */ diff --git a/fs/exfat/exfat_upcase.c b/fs/exfat/exfat_upcase.c new file mode 100644 index 0000000000000..10e48d28b2949 --- /dev/null +++ b/fs/exfat/exfat_upcase.c @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_upcase.c */ +/* PURPOSE : exFAT Up-case Table */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY (Ver 0.9) */ +/* */ +/* - 2010.11.15 [Joosun Hahn] : first writing */ +/* */ +/************************************************************************/ + +#include "exfat_config.h" +#include "exfat_global.h" + +#include "exfat_nls.h" + +UINT8 uni_upcase[NUM_UPCASE<<1] = { + 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00, + 0x08, 0x00, 0x09, 0x00, 0x0A, 0x00, 0x0B, 0x00, 0x0C, 0x00, 0x0D, 0x00, 0x0E, 0x00, 0x0F, 0x00, + 0x10, 0x00, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00, 0x14, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, + 0x18, 0x00, 0x19, 0x00, 0x1A, 0x00, 0x1B, 0x00, 0x1C, 0x00, 0x1D, 0x00, 0x1E, 0x00, 0x1F, 0x00, + 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, 0x24, 0x00, 0x25, 0x00, 0x26, 0x00, 0x27, 0x00, + 0x28, 0x00, 0x29, 0x00, 0x2A, 0x00, 0x2B, 0x00, 0x2C, 0x00, 0x2D, 0x00, 0x2E, 0x00, 0x2F, 0x00, + 0x30, 0x00, 0x31, 0x00, 0x32, 0x00, 0x33, 0x00, 0x34, 0x00, 0x35, 0x00, 0x36, 0x00, 0x37, 0x00, + 0x38, 0x00, 0x39, 0x00, 0x3A, 0x00, 0x3B, 0x00, 0x3C, 0x00, 0x3D, 0x00, 0x3E, 0x00, 0x3F, 0x00, + 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, + 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, + 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, + 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x5B, 0x00, 0x5C, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x5F, 0x00, + 0x60, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44, 0x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, + 0x48, 0x00, 0x49, 0x00, 0x4A, 0x00, 0x4B, 0x00, 0x4C, 0x00, 0x4D, 0x00, 0x4E, 0x00, 0x4F, 0x00, + 0x50, 0x00, 0x51, 0x00, 0x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57, 0x00, + 0x58, 0x00, 0x59, 0x00, 0x5A, 0x00, 0x7B, 0x00, 0x7C, 0x00, 0x7D, 0x00, 0x7E, 0x00, 0x7F, 0x00, + 0x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, 0x84, 0x00, 0x85, 0x00, 0x86, 0x00, 0x87, 0x00, + 0x88, 0x00, 0x89, 0x00, 0x8A, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0x8D, 0x00, 0x8E, 0x00, 0x8F, 0x00, + 0x90, 0x00, 0x91, 0x00, 0x92, 0x00, 0x93, 0x00, 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, + 0x98, 0x00, 0x99, 0x00, 0x9A, 0x00, 0x9B, 0x00, 0x9C, 0x00, 0x9D, 0x00, 0x9E, 0x00, 0x9F, 0x00, + 0xA0, 0x00, 0xA1, 0x00, 0xA2, 0x00, 0xA3, 0x00, 0xA4, 0x00, 0xA5, 0x00, 0xA6, 0x00, 0xA7, 0x00, + 0xA8, 0x00, 0xA9, 0x00, 0xAA, 0x00, 0xAB, 0x00, 0xAC, 0x00, 0xAD, 0x00, 0xAE, 0x00, 0xAF, 0x00, + 0xB0, 0x00, 0xB1, 0x00, 0xB2, 0x00, 0xB3, 0x00, 0xB4, 0x00, 0xB5, 0x00, 0xB6, 0x00, 0xB7, 0x00, + 0xB8, 0x00, 0xB9, 0x00, 0xBA, 0x00, 0xBB, 0x00, 0xBC, 0x00, 0xBD, 0x00, 0xBE, 0x00, 0xBF, 0x00, + 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, + 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, + 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xD7, 0x00, + 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0xDF, 0x00, + 0xC0, 0x00, 0xC1, 0x00, 0xC2, 0x00, 0xC3, 0x00, 0xC4, 0x00, 0xC5, 0x00, 0xC6, 0x00, 0xC7, 0x00, + 0xC8, 0x00, 0xC9, 0x00, 0xCA, 0x00, 0xCB, 0x00, 0xCC, 0x00, 0xCD, 0x00, 0xCE, 0x00, 0xCF, 0x00, + 0xD0, 0x00, 0xD1, 0x00, 0xD2, 0x00, 0xD3, 0x00, 0xD4, 0x00, 0xD5, 0x00, 0xD6, 0x00, 0xF7, 0x00, + 0xD8, 0x00, 0xD9, 0x00, 0xDA, 0x00, 0xDB, 0x00, 0xDC, 0x00, 0xDD, 0x00, 0xDE, 0x00, 0x78, 0x01, + 0x00, 0x01, 0x00, 0x01, 0x02, 0x01, 0x02, 0x01, 0x04, 0x01, 0x04, 0x01, 0x06, 0x01, 0x06, 0x01, + 0x08, 0x01, 0x08, 0x01, 0x0A, 0x01, 0x0A, 0x01, 0x0C, 0x01, 0x0C, 0x01, 0x0E, 0x01, 0x0E, 0x01, + 0x10, 0x01, 0x10, 0x01, 0x12, 0x01, 0x12, 0x01, 0x14, 0x01, 0x14, 0x01, 0x16, 0x01, 0x16, 0x01, + 0x18, 0x01, 0x18, 0x01, 0x1A, 0x01, 0x1A, 0x01, 0x1C, 0x01, 0x1C, 0x01, 0x1E, 0x01, 0x1E, 0x01, + 0x20, 0x01, 0x20, 0x01, 0x22, 0x01, 0x22, 0x01, 0x24, 0x01, 0x24, 0x01, 0x26, 0x01, 0x26, 0x01, + 0x28, 0x01, 0x28, 0x01, 0x2A, 0x01, 0x2A, 0x01, 0x2C, 0x01, 0x2C, 0x01, 0x2E, 0x01, 0x2E, 0x01, + 0x30, 0x01, 0x31, 0x01, 0x32, 0x01, 0x32, 0x01, 0x34, 0x01, 0x34, 0x01, 0x36, 0x01, 0x36, 0x01, + 0x38, 0x01, 0x39, 0x01, 0x39, 0x01, 0x3B, 0x01, 0x3B, 0x01, 0x3D, 0x01, 0x3D, 0x01, 0x3F, 0x01, + 0x3F, 0x01, 0x41, 0x01, 0x41, 0x01, 0x43, 0x01, 0x43, 0x01, 0x45, 0x01, 0x45, 0x01, 0x47, 0x01, + 0x47, 0x01, 0x49, 0x01, 0x4A, 0x01, 0x4A, 0x01, 0x4C, 0x01, 0x4C, 0x01, 0x4E, 0x01, 0x4E, 0x01, + 0x50, 0x01, 0x50, 0x01, 0x52, 0x01, 0x52, 0x01, 0x54, 0x01, 0x54, 0x01, 0x56, 0x01, 0x56, 0x01, + 0x58, 0x01, 0x58, 0x01, 0x5A, 0x01, 0x5A, 0x01, 0x5C, 0x01, 0x5C, 0x01, 0x5E, 0x01, 0x5E, 0x01, + 0x60, 0x01, 0x60, 0x01, 0x62, 0x01, 0x62, 0x01, 0x64, 0x01, 0x64, 0x01, 0x66, 0x01, 0x66, 0x01, + 0x68, 0x01, 0x68, 0x01, 0x6A, 0x01, 0x6A, 0x01, 0x6C, 0x01, 0x6C, 0x01, 0x6E, 0x01, 0x6E, 0x01, + 0x70, 0x01, 0x70, 0x01, 0x72, 0x01, 0x72, 0x01, 0x74, 0x01, 0x74, 0x01, 0x76, 0x01, 0x76, 0x01, + 0x78, 0x01, 0x79, 0x01, 0x79, 0x01, 0x7B, 0x01, 0x7B, 0x01, 0x7D, 0x01, 0x7D, 0x01, 0x7F, 0x01, + 0x43, 0x02, 0x81, 0x01, 0x82, 0x01, 0x82, 0x01, 0x84, 0x01, 0x84, 0x01, 0x86, 0x01, 0x87, 0x01, + 0x87, 0x01, 0x89, 0x01, 0x8A, 0x01, 0x8B, 0x01, 0x8B, 0x01, 0x8D, 0x01, 0x8E, 0x01, 0x8F, 0x01, + 0x90, 0x01, 0x91, 0x01, 0x91, 0x01, 0x93, 0x01, 0x94, 0x01, 0xF6, 0x01, 0x96, 0x01, 0x97, 0x01, + 0x98, 0x01, 0x98, 0x01, 0x3D, 0x02, 0x9B, 0x01, 0x9C, 0x01, 0x9D, 0x01, 0x20, 0x02, 0x9F, 0x01, + 0xA0, 0x01, 0xA0, 0x01, 0xA2, 0x01, 0xA2, 0x01, 0xA4, 0x01, 0xA4, 0x01, 0xA6, 0x01, 0xA7, 0x01, + 0xA7, 0x01, 0xA9, 0x01, 0xAA, 0x01, 0xAB, 0x01, 0xAC, 0x01, 0xAC, 0x01, 0xAE, 0x01, 0xAF, 0x01, + 0xAF, 0x01, 0xB1, 0x01, 0xB2, 0x01, 0xB3, 0x01, 0xB3, 0x01, 0xB5, 0x01, 0xB5, 0x01, 0xB7, 0x01, + 0xB8, 0x01, 0xB8, 0x01, 0xBA, 0x01, 0xBB, 0x01, 0xBC, 0x01, 0xBC, 0x01, 0xBE, 0x01, 0xF7, 0x01, + 0xC0, 0x01, 0xC1, 0x01, 0xC2, 0x01, 0xC3, 0x01, 0xC4, 0x01, 0xC5, 0x01, 0xC4, 0x01, 0xC7, 0x01, + 0xC8, 0x01, 0xC7, 0x01, 0xCA, 0x01, 0xCB, 0x01, 0xCA, 0x01, 0xCD, 0x01, 0xCD, 0x01, 0xCF, 0x01, + 0xCF, 0x01, 0xD1, 0x01, 0xD1, 0x01, 0xD3, 0x01, 0xD3, 0x01, 0xD5, 0x01, 0xD5, 0x01, 0xD7, 0x01, + 0xD7, 0x01, 0xD9, 0x01, 0xD9, 0x01, 0xDB, 0x01, 0xDB, 0x01, 0x8E, 0x01, 0xDE, 0x01, 0xDE, 0x01, + 0xE0, 0x01, 0xE0, 0x01, 0xE2, 0x01, 0xE2, 0x01, 0xE4, 0x01, 0xE4, 0x01, 0xE6, 0x01, 0xE6, 0x01, + 0xE8, 0x01, 0xE8, 0x01, 0xEA, 0x01, 0xEA, 0x01, 0xEC, 0x01, 0xEC, 0x01, 0xEE, 0x01, 0xEE, 0x01, + 0xF0, 0x01, 0xF1, 0x01, 0xF2, 0x01, 0xF1, 0x01, 0xF4, 0x01, 0xF4, 0x01, 0xF6, 0x01, 0xF7, 0x01, + 0xF8, 0x01, 0xF8, 0x01, 0xFA, 0x01, 0xFA, 0x01, 0xFC, 0x01, 0xFC, 0x01, 0xFE, 0x01, 0xFE, 0x01, + 0x00, 0x02, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x04, 0x02, 0x04, 0x02, 0x06, 0x02, 0x06, 0x02, + 0x08, 0x02, 0x08, 0x02, 0x0A, 0x02, 0x0A, 0x02, 0x0C, 0x02, 0x0C, 0x02, 0x0E, 0x02, 0x0E, 0x02, + 0x10, 0x02, 0x10, 0x02, 0x12, 0x02, 0x12, 0x02, 0x14, 0x02, 0x14, 0x02, 0x16, 0x02, 0x16, 0x02, + 0x18, 0x02, 0x18, 0x02, 0x1A, 0x02, 0x1A, 0x02, 0x1C, 0x02, 0x1C, 0x02, 0x1E, 0x02, 0x1E, 0x02, + 0x20, 0x02, 0x21, 0x02, 0x22, 0x02, 0x22, 0x02, 0x24, 0x02, 0x24, 0x02, 0x26, 0x02, 0x26, 0x02, + 0x28, 0x02, 0x28, 0x02, 0x2A, 0x02, 0x2A, 0x02, 0x2C, 0x02, 0x2C, 0x02, 0x2E, 0x02, 0x2E, 0x02, + 0x30, 0x02, 0x30, 0x02, 0x32, 0x02, 0x32, 0x02, 0x34, 0x02, 0x35, 0x02, 0x36, 0x02, 0x37, 0x02, + 0x38, 0x02, 0x39, 0x02, 0x65, 0x2C, 0x3B, 0x02, 0x3B, 0x02, 0x3D, 0x02, 0x66, 0x2C, 0x3F, 0x02, + 0x40, 0x02, 0x41, 0x02, 0x41, 0x02, 0x43, 0x02, 0x44, 0x02, 0x45, 0x02, 0x46, 0x02, 0x46, 0x02, + 0x48, 0x02, 0x48, 0x02, 0x4A, 0x02, 0x4A, 0x02, 0x4C, 0x02, 0x4C, 0x02, 0x4E, 0x02, 0x4E, 0x02, + 0x50, 0x02, 0x51, 0x02, 0x52, 0x02, 0x81, 0x01, 0x86, 0x01, 0x55, 0x02, 0x89, 0x01, 0x8A, 0x01, + 0x58, 0x02, 0x8F, 0x01, 0x5A, 0x02, 0x90, 0x01, 0x5C, 0x02, 0x5D, 0x02, 0x5E, 0x02, 0x5F, 0x02, + 0x93, 0x01, 0x61, 0x02, 0x62, 0x02, 0x94, 0x01, 0x64, 0x02, 0x65, 0x02, 0x66, 0x02, 0x67, 0x02, + 0x97, 0x01, 0x96, 0x01, 0x6A, 0x02, 0x62, 0x2C, 0x6C, 0x02, 0x6D, 0x02, 0x6E, 0x02, 0x9C, 0x01, + 0x70, 0x02, 0x71, 0x02, 0x9D, 0x01, 0x73, 0x02, 0x74, 0x02, 0x9F, 0x01, 0x76, 0x02, 0x77, 0x02, + 0x78, 0x02, 0x79, 0x02, 0x7A, 0x02, 0x7B, 0x02, 0x7C, 0x02, 0x64, 0x2C, 0x7E, 0x02, 0x7F, 0x02, + 0xA6, 0x01, 0x81, 0x02, 0x82, 0x02, 0xA9, 0x01, 0x84, 0x02, 0x85, 0x02, 0x86, 0x02, 0x87, 0x02, + 0xAE, 0x01, 0x44, 0x02, 0xB1, 0x01, 0xB2, 0x01, 0x45, 0x02, 0x8D, 0x02, 0x8E, 0x02, 0x8F, 0x02, + 0x90, 0x02, 0x91, 0x02, 0xB7, 0x01, 0x93, 0x02, 0x94, 0x02, 0x95, 0x02, 0x96, 0x02, 0x97, 0x02, + 0x98, 0x02, 0x99, 0x02, 0x9A, 0x02, 0x9B, 0x02, 0x9C, 0x02, 0x9D, 0x02, 0x9E, 0x02, 0x9F, 0x02, + 0xA0, 0x02, 0xA1, 0x02, 0xA2, 0x02, 0xA3, 0x02, 0xA4, 0x02, 0xA5, 0x02, 0xA6, 0x02, 0xA7, 0x02, + 0xA8, 0x02, 0xA9, 0x02, 0xAA, 0x02, 0xAB, 0x02, 0xAC, 0x02, 0xAD, 0x02, 0xAE, 0x02, 0xAF, 0x02, + 0xB0, 0x02, 0xB1, 0x02, 0xB2, 0x02, 0xB3, 0x02, 0xB4, 0x02, 0xB5, 0x02, 0xB6, 0x02, 0xB7, 0x02, + 0xB8, 0x02, 0xB9, 0x02, 0xBA, 0x02, 0xBB, 0x02, 0xBC, 0x02, 0xBD, 0x02, 0xBE, 0x02, 0xBF, 0x02, + 0xC0, 0x02, 0xC1, 0x02, 0xC2, 0x02, 0xC3, 0x02, 0xC4, 0x02, 0xC5, 0x02, 0xC6, 0x02, 0xC7, 0x02, + 0xC8, 0x02, 0xC9, 0x02, 0xCA, 0x02, 0xCB, 0x02, 0xCC, 0x02, 0xCD, 0x02, 0xCE, 0x02, 0xCF, 0x02, + 0xD0, 0x02, 0xD1, 0x02, 0xD2, 0x02, 0xD3, 0x02, 0xD4, 0x02, 0xD5, 0x02, 0xD6, 0x02, 0xD7, 0x02, + 0xD8, 0x02, 0xD9, 0x02, 0xDA, 0x02, 0xDB, 0x02, 0xDC, 0x02, 0xDD, 0x02, 0xDE, 0x02, 0xDF, 0x02, + 0xE0, 0x02, 0xE1, 0x02, 0xE2, 0x02, 0xE3, 0x02, 0xE4, 0x02, 0xE5, 0x02, 0xE6, 0x02, 0xE7, 0x02, + 0xE8, 0x02, 0xE9, 0x02, 0xEA, 0x02, 0xEB, 0x02, 0xEC, 0x02, 0xED, 0x02, 0xEE, 0x02, 0xEF, 0x02, + 0xF0, 0x02, 0xF1, 0x02, 0xF2, 0x02, 0xF3, 0x02, 0xF4, 0x02, 0xF5, 0x02, 0xF6, 0x02, 0xF7, 0x02, + 0xF8, 0x02, 0xF9, 0x02, 0xFA, 0x02, 0xFB, 0x02, 0xFC, 0x02, 0xFD, 0x02, 0xFE, 0x02, 0xFF, 0x02, + 0x00, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x03, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x07, 0x03, + 0x08, 0x03, 0x09, 0x03, 0x0A, 0x03, 0x0B, 0x03, 0x0C, 0x03, 0x0D, 0x03, 0x0E, 0x03, 0x0F, 0x03, + 0x10, 0x03, 0x11, 0x03, 0x12, 0x03, 0x13, 0x03, 0x14, 0x03, 0x15, 0x03, 0x16, 0x03, 0x17, 0x03, + 0x18, 0x03, 0x19, 0x03, 0x1A, 0x03, 0x1B, 0x03, 0x1C, 0x03, 0x1D, 0x03, 0x1E, 0x03, 0x1F, 0x03, + 0x20, 0x03, 0x21, 0x03, 0x22, 0x03, 0x23, 0x03, 0x24, 0x03, 0x25, 0x03, 0x26, 0x03, 0x27, 0x03, + 0x28, 0x03, 0x29, 0x03, 0x2A, 0x03, 0x2B, 0x03, 0x2C, 0x03, 0x2D, 0x03, 0x2E, 0x03, 0x2F, 0x03, + 0x30, 0x03, 0x31, 0x03, 0x32, 0x03, 0x33, 0x03, 0x34, 0x03, 0x35, 0x03, 0x36, 0x03, 0x37, 0x03, + 0x38, 0x03, 0x39, 0x03, 0x3A, 0x03, 0x3B, 0x03, 0x3C, 0x03, 0x3D, 0x03, 0x3E, 0x03, 0x3F, 0x03, + 0x40, 0x03, 0x41, 0x03, 0x42, 0x03, 0x43, 0x03, 0x44, 0x03, 0x45, 0x03, 0x46, 0x03, 0x47, 0x03, + 0x48, 0x03, 0x49, 0x03, 0x4A, 0x03, 0x4B, 0x03, 0x4C, 0x03, 0x4D, 0x03, 0x4E, 0x03, 0x4F, 0x03, + 0x50, 0x03, 0x51, 0x03, 0x52, 0x03, 0x53, 0x03, 0x54, 0x03, 0x55, 0x03, 0x56, 0x03, 0x57, 0x03, + 0x58, 0x03, 0x59, 0x03, 0x5A, 0x03, 0x5B, 0x03, 0x5C, 0x03, 0x5D, 0x03, 0x5E, 0x03, 0x5F, 0x03, + 0x60, 0x03, 0x61, 0x03, 0x62, 0x03, 0x63, 0x03, 0x64, 0x03, 0x65, 0x03, 0x66, 0x03, 0x67, 0x03, + 0x68, 0x03, 0x69, 0x03, 0x6A, 0x03, 0x6B, 0x03, 0x6C, 0x03, 0x6D, 0x03, 0x6E, 0x03, 0x6F, 0x03, + 0x70, 0x03, 0x71, 0x03, 0x72, 0x03, 0x73, 0x03, 0x74, 0x03, 0x75, 0x03, 0x76, 0x03, 0x77, 0x03, + 0x78, 0x03, 0x79, 0x03, 0x7A, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03, 0x7E, 0x03, 0x7F, 0x03, + 0x80, 0x03, 0x81, 0x03, 0x82, 0x03, 0x83, 0x03, 0x84, 0x03, 0x85, 0x03, 0x86, 0x03, 0x87, 0x03, + 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, 0x8B, 0x03, 0x8C, 0x03, 0x8D, 0x03, 0x8E, 0x03, 0x8F, 0x03, + 0x90, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, + 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, + 0xA0, 0x03, 0xA1, 0x03, 0xA2, 0x03, 0xA3, 0x03, 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, + 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, 0x86, 0x03, 0x88, 0x03, 0x89, 0x03, 0x8A, 0x03, + 0xB0, 0x03, 0x91, 0x03, 0x92, 0x03, 0x93, 0x03, 0x94, 0x03, 0x95, 0x03, 0x96, 0x03, 0x97, 0x03, + 0x98, 0x03, 0x99, 0x03, 0x9A, 0x03, 0x9B, 0x03, 0x9C, 0x03, 0x9D, 0x03, 0x9E, 0x03, 0x9F, 0x03, + 0xA0, 0x03, 0xA1, 0x03, 0xA3, 0x03, 0xA3, 0x03, 0xA4, 0x03, 0xA5, 0x03, 0xA6, 0x03, 0xA7, 0x03, + 0xA8, 0x03, 0xA9, 0x03, 0xAA, 0x03, 0xAB, 0x03, 0x8C, 0x03, 0x8E, 0x03, 0x8F, 0x03, 0xCF, 0x03, + 0xD0, 0x03, 0xD1, 0x03, 0xD2, 0x03, 0xD3, 0x03, 0xD4, 0x03, 0xD5, 0x03, 0xD6, 0x03, 0xD7, 0x03, + 0xD8, 0x03, 0xD8, 0x03, 0xDA, 0x03, 0xDA, 0x03, 0xDC, 0x03, 0xDC, 0x03, 0xDE, 0x03, 0xDE, 0x03, + 0xE0, 0x03, 0xE0, 0x03, 0xE2, 0x03, 0xE2, 0x03, 0xE4, 0x03, 0xE4, 0x03, 0xE6, 0x03, 0xE6, 0x03, + 0xE8, 0x03, 0xE8, 0x03, 0xEA, 0x03, 0xEA, 0x03, 0xEC, 0x03, 0xEC, 0x03, 0xEE, 0x03, 0xEE, 0x03, + 0xF0, 0x03, 0xF1, 0x03, 0xF9, 0x03, 0xF3, 0x03, 0xF4, 0x03, 0xF5, 0x03, 0xF6, 0x03, 0xF7, 0x03, + 0xF7, 0x03, 0xF9, 0x03, 0xFA, 0x03, 0xFA, 0x03, 0xFC, 0x03, 0xFD, 0x03, 0xFE, 0x03, 0xFF, 0x03, + 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, + 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, + 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, + 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, + 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, + 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, + 0x10, 0x04, 0x11, 0x04, 0x12, 0x04, 0x13, 0x04, 0x14, 0x04, 0x15, 0x04, 0x16, 0x04, 0x17, 0x04, + 0x18, 0x04, 0x19, 0x04, 0x1A, 0x04, 0x1B, 0x04, 0x1C, 0x04, 0x1D, 0x04, 0x1E, 0x04, 0x1F, 0x04, + 0x20, 0x04, 0x21, 0x04, 0x22, 0x04, 0x23, 0x04, 0x24, 0x04, 0x25, 0x04, 0x26, 0x04, 0x27, 0x04, + 0x28, 0x04, 0x29, 0x04, 0x2A, 0x04, 0x2B, 0x04, 0x2C, 0x04, 0x2D, 0x04, 0x2E, 0x04, 0x2F, 0x04, + 0x00, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x04, 0x07, 0x04, + 0x08, 0x04, 0x09, 0x04, 0x0A, 0x04, 0x0B, 0x04, 0x0C, 0x04, 0x0D, 0x04, 0x0E, 0x04, 0x0F, 0x04, + 0x60, 0x04, 0x60, 0x04, 0x62, 0x04, 0x62, 0x04, 0x64, 0x04, 0x64, 0x04, 0x66, 0x04, 0x66, 0x04, + 0x68, 0x04, 0x68, 0x04, 0x6A, 0x04, 0x6A, 0x04, 0x6C, 0x04, 0x6C, 0x04, 0x6E, 0x04, 0x6E, 0x04, + 0x70, 0x04, 0x70, 0x04, 0x72, 0x04, 0x72, 0x04, 0x74, 0x04, 0x74, 0x04, 0x76, 0x04, 0x76, 0x04, + 0x78, 0x04, 0x78, 0x04, 0x7A, 0x04, 0x7A, 0x04, 0x7C, 0x04, 0x7C, 0x04, 0x7E, 0x04, 0x7E, 0x04, + 0x80, 0x04, 0x80, 0x04, 0x82, 0x04, 0x83, 0x04, 0x84, 0x04, 0x85, 0x04, 0x86, 0x04, 0x87, 0x04, + 0x88, 0x04, 0x89, 0x04, 0x8A, 0x04, 0x8A, 0x04, 0x8C, 0x04, 0x8C, 0x04, 0x8E, 0x04, 0x8E, 0x04, + 0x90, 0x04, 0x90, 0x04, 0x92, 0x04, 0x92, 0x04, 0x94, 0x04, 0x94, 0x04, 0x96, 0x04, 0x96, 0x04, + 0x98, 0x04, 0x98, 0x04, 0x9A, 0x04, 0x9A, 0x04, 0x9C, 0x04, 0x9C, 0x04, 0x9E, 0x04, 0x9E, 0x04, + 0xA0, 0x04, 0xA0, 0x04, 0xA2, 0x04, 0xA2, 0x04, 0xA4, 0x04, 0xA4, 0x04, 0xA6, 0x04, 0xA6, 0x04, + 0xA8, 0x04, 0xA8, 0x04, 0xAA, 0x04, 0xAA, 0x04, 0xAC, 0x04, 0xAC, 0x04, 0xAE, 0x04, 0xAE, 0x04, + 0xB0, 0x04, 0xB0, 0x04, 0xB2, 0x04, 0xB2, 0x04, 0xB4, 0x04, 0xB4, 0x04, 0xB6, 0x04, 0xB6, 0x04, + 0xB8, 0x04, 0xB8, 0x04, 0xBA, 0x04, 0xBA, 0x04, 0xBC, 0x04, 0xBC, 0x04, 0xBE, 0x04, 0xBE, 0x04, + 0xC0, 0x04, 0xC1, 0x04, 0xC1, 0x04, 0xC3, 0x04, 0xC3, 0x04, 0xC5, 0x04, 0xC5, 0x04, 0xC7, 0x04, + 0xC7, 0x04, 0xC9, 0x04, 0xC9, 0x04, 0xCB, 0x04, 0xCB, 0x04, 0xCD, 0x04, 0xCD, 0x04, 0xC0, 0x04, + 0xD0, 0x04, 0xD0, 0x04, 0xD2, 0x04, 0xD2, 0x04, 0xD4, 0x04, 0xD4, 0x04, 0xD6, 0x04, 0xD6, 0x04, + 0xD8, 0x04, 0xD8, 0x04, 0xDA, 0x04, 0xDA, 0x04, 0xDC, 0x04, 0xDC, 0x04, 0xDE, 0x04, 0xDE, 0x04, + 0xE0, 0x04, 0xE0, 0x04, 0xE2, 0x04, 0xE2, 0x04, 0xE4, 0x04, 0xE4, 0x04, 0xE6, 0x04, 0xE6, 0x04, + 0xE8, 0x04, 0xE8, 0x04, 0xEA, 0x04, 0xEA, 0x04, 0xEC, 0x04, 0xEC, 0x04, 0xEE, 0x04, 0xEE, 0x04, + 0xF0, 0x04, 0xF0, 0x04, 0xF2, 0x04, 0xF2, 0x04, 0xF4, 0x04, 0xF4, 0x04, 0xF6, 0x04, 0xF6, 0x04, + 0xF8, 0x04, 0xF8, 0x04, 0xFA, 0x04, 0xFA, 0x04, 0xFC, 0x04, 0xFC, 0x04, 0xFE, 0x04, 0xFE, 0x04, + 0x00, 0x05, 0x00, 0x05, 0x02, 0x05, 0x02, 0x05, 0x04, 0x05, 0x04, 0x05, 0x06, 0x05, 0x06, 0x05, + 0x08, 0x05, 0x08, 0x05, 0x0A, 0x05, 0x0A, 0x05, 0x0C, 0x05, 0x0C, 0x05, 0x0E, 0x05, 0x0E, 0x05, + 0x10, 0x05, 0x10, 0x05, 0x12, 0x05, 0x12, 0x05, 0x14, 0x05, 0x15, 0x05, 0x16, 0x05, 0x17, 0x05, + 0x18, 0x05, 0x19, 0x05, 0x1A, 0x05, 0x1B, 0x05, 0x1C, 0x05, 0x1D, 0x05, 0x1E, 0x05, 0x1F, 0x05, + 0x20, 0x05, 0x21, 0x05, 0x22, 0x05, 0x23, 0x05, 0x24, 0x05, 0x25, 0x05, 0x26, 0x05, 0x27, 0x05, + 0x28, 0x05, 0x29, 0x05, 0x2A, 0x05, 0x2B, 0x05, 0x2C, 0x05, 0x2D, 0x05, 0x2E, 0x05, 0x2F, 0x05, + 0x30, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, + 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, + 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, + 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, + 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0x57, 0x05, + 0x58, 0x05, 0x59, 0x05, 0x5A, 0x05, 0x5B, 0x05, 0x5C, 0x05, 0x5D, 0x05, 0x5E, 0x05, 0x5F, 0x05, + 0x60, 0x05, 0x31, 0x05, 0x32, 0x05, 0x33, 0x05, 0x34, 0x05, 0x35, 0x05, 0x36, 0x05, 0x37, 0x05, + 0x38, 0x05, 0x39, 0x05, 0x3A, 0x05, 0x3B, 0x05, 0x3C, 0x05, 0x3D, 0x05, 0x3E, 0x05, 0x3F, 0x05, + 0x40, 0x05, 0x41, 0x05, 0x42, 0x05, 0x43, 0x05, 0x44, 0x05, 0x45, 0x05, 0x46, 0x05, 0x47, 0x05, + 0x48, 0x05, 0x49, 0x05, 0x4A, 0x05, 0x4B, 0x05, 0x4C, 0x05, 0x4D, 0x05, 0x4E, 0x05, 0x4F, 0x05, + 0x50, 0x05, 0x51, 0x05, 0x52, 0x05, 0x53, 0x05, 0x54, 0x05, 0x55, 0x05, 0x56, 0x05, 0xFF, 0xFF, + 0xF6, 0x17, 0x63, 0x2C, 0x7E, 0x1D, 0x7F, 0x1D, 0x80, 0x1D, 0x81, 0x1D, 0x82, 0x1D, 0x83, 0x1D, + 0x84, 0x1D, 0x85, 0x1D, 0x86, 0x1D, 0x87, 0x1D, 0x88, 0x1D, 0x89, 0x1D, 0x8A, 0x1D, 0x8B, 0x1D, + 0x8C, 0x1D, 0x8D, 0x1D, 0x8E, 0x1D, 0x8F, 0x1D, 0x90, 0x1D, 0x91, 0x1D, 0x92, 0x1D, 0x93, 0x1D, + 0x94, 0x1D, 0x95, 0x1D, 0x96, 0x1D, 0x97, 0x1D, 0x98, 0x1D, 0x99, 0x1D, 0x9A, 0x1D, 0x9B, 0x1D, + 0x9C, 0x1D, 0x9D, 0x1D, 0x9E, 0x1D, 0x9F, 0x1D, 0xA0, 0x1D, 0xA1, 0x1D, 0xA2, 0x1D, 0xA3, 0x1D, + 0xA4, 0x1D, 0xA5, 0x1D, 0xA6, 0x1D, 0xA7, 0x1D, 0xA8, 0x1D, 0xA9, 0x1D, 0xAA, 0x1D, 0xAB, 0x1D, + 0xAC, 0x1D, 0xAD, 0x1D, 0xAE, 0x1D, 0xAF, 0x1D, 0xB0, 0x1D, 0xB1, 0x1D, 0xB2, 0x1D, 0xB3, 0x1D, + 0xB4, 0x1D, 0xB5, 0x1D, 0xB6, 0x1D, 0xB7, 0x1D, 0xB8, 0x1D, 0xB9, 0x1D, 0xBA, 0x1D, 0xBB, 0x1D, + 0xBC, 0x1D, 0xBD, 0x1D, 0xBE, 0x1D, 0xBF, 0x1D, 0xC0, 0x1D, 0xC1, 0x1D, 0xC2, 0x1D, 0xC3, 0x1D, + 0xC4, 0x1D, 0xC5, 0x1D, 0xC6, 0x1D, 0xC7, 0x1D, 0xC8, 0x1D, 0xC9, 0x1D, 0xCA, 0x1D, 0xCB, 0x1D, + 0xCC, 0x1D, 0xCD, 0x1D, 0xCE, 0x1D, 0xCF, 0x1D, 0xD0, 0x1D, 0xD1, 0x1D, 0xD2, 0x1D, 0xD3, 0x1D, + 0xD4, 0x1D, 0xD5, 0x1D, 0xD6, 0x1D, 0xD7, 0x1D, 0xD8, 0x1D, 0xD9, 0x1D, 0xDA, 0x1D, 0xDB, 0x1D, + 0xDC, 0x1D, 0xDD, 0x1D, 0xDE, 0x1D, 0xDF, 0x1D, 0xE0, 0x1D, 0xE1, 0x1D, 0xE2, 0x1D, 0xE3, 0x1D, + 0xE4, 0x1D, 0xE5, 0x1D, 0xE6, 0x1D, 0xE7, 0x1D, 0xE8, 0x1D, 0xE9, 0x1D, 0xEA, 0x1D, 0xEB, 0x1D, + 0xEC, 0x1D, 0xED, 0x1D, 0xEE, 0x1D, 0xEF, 0x1D, 0xF0, 0x1D, 0xF1, 0x1D, 0xF2, 0x1D, 0xF3, 0x1D, + 0xF4, 0x1D, 0xF5, 0x1D, 0xF6, 0x1D, 0xF7, 0x1D, 0xF8, 0x1D, 0xF9, 0x1D, 0xFA, 0x1D, 0xFB, 0x1D, + 0xFC, 0x1D, 0xFD, 0x1D, 0xFE, 0x1D, 0xFF, 0x1D, 0x00, 0x1E, 0x00, 0x1E, 0x02, 0x1E, 0x02, 0x1E, + 0x04, 0x1E, 0x04, 0x1E, 0x06, 0x1E, 0x06, 0x1E, 0x08, 0x1E, 0x08, 0x1E, 0x0A, 0x1E, 0x0A, 0x1E, + 0x0C, 0x1E, 0x0C, 0x1E, 0x0E, 0x1E, 0x0E, 0x1E, 0x10, 0x1E, 0x10, 0x1E, 0x12, 0x1E, 0x12, 0x1E, + 0x14, 0x1E, 0x14, 0x1E, 0x16, 0x1E, 0x16, 0x1E, 0x18, 0x1E, 0x18, 0x1E, 0x1A, 0x1E, 0x1A, 0x1E, + 0x1C, 0x1E, 0x1C, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x20, 0x1E, 0x20, 0x1E, 0x22, 0x1E, 0x22, 0x1E, + 0x24, 0x1E, 0x24, 0x1E, 0x26, 0x1E, 0x26, 0x1E, 0x28, 0x1E, 0x28, 0x1E, 0x2A, 0x1E, 0x2A, 0x1E, + 0x2C, 0x1E, 0x2C, 0x1E, 0x2E, 0x1E, 0x2E, 0x1E, 0x30, 0x1E, 0x30, 0x1E, 0x32, 0x1E, 0x32, 0x1E, + 0x34, 0x1E, 0x34, 0x1E, 0x36, 0x1E, 0x36, 0x1E, 0x38, 0x1E, 0x38, 0x1E, 0x3A, 0x1E, 0x3A, 0x1E, + 0x3C, 0x1E, 0x3C, 0x1E, 0x3E, 0x1E, 0x3E, 0x1E, 0x40, 0x1E, 0x40, 0x1E, 0x42, 0x1E, 0x42, 0x1E, + 0x44, 0x1E, 0x44, 0x1E, 0x46, 0x1E, 0x46, 0x1E, 0x48, 0x1E, 0x48, 0x1E, 0x4A, 0x1E, 0x4A, 0x1E, + 0x4C, 0x1E, 0x4C, 0x1E, 0x4E, 0x1E, 0x4E, 0x1E, 0x50, 0x1E, 0x50, 0x1E, 0x52, 0x1E, 0x52, 0x1E, + 0x54, 0x1E, 0x54, 0x1E, 0x56, 0x1E, 0x56, 0x1E, 0x58, 0x1E, 0x58, 0x1E, 0x5A, 0x1E, 0x5A, 0x1E, + 0x5C, 0x1E, 0x5C, 0x1E, 0x5E, 0x1E, 0x5E, 0x1E, 0x60, 0x1E, 0x60, 0x1E, 0x62, 0x1E, 0x62, 0x1E, + 0x64, 0x1E, 0x64, 0x1E, 0x66, 0x1E, 0x66, 0x1E, 0x68, 0x1E, 0x68, 0x1E, 0x6A, 0x1E, 0x6A, 0x1E, + 0x6C, 0x1E, 0x6C, 0x1E, 0x6E, 0x1E, 0x6E, 0x1E, 0x70, 0x1E, 0x70, 0x1E, 0x72, 0x1E, 0x72, 0x1E, + 0x74, 0x1E, 0x74, 0x1E, 0x76, 0x1E, 0x76, 0x1E, 0x78, 0x1E, 0x78, 0x1E, 0x7A, 0x1E, 0x7A, 0x1E, + 0x7C, 0x1E, 0x7C, 0x1E, 0x7E, 0x1E, 0x7E, 0x1E, 0x80, 0x1E, 0x80, 0x1E, 0x82, 0x1E, 0x82, 0x1E, + 0x84, 0x1E, 0x84, 0x1E, 0x86, 0x1E, 0x86, 0x1E, 0x88, 0x1E, 0x88, 0x1E, 0x8A, 0x1E, 0x8A, 0x1E, + 0x8C, 0x1E, 0x8C, 0x1E, 0x8E, 0x1E, 0x8E, 0x1E, 0x90, 0x1E, 0x90, 0x1E, 0x92, 0x1E, 0x92, 0x1E, + 0x94, 0x1E, 0x94, 0x1E, 0x96, 0x1E, 0x97, 0x1E, 0x98, 0x1E, 0x99, 0x1E, 0x9A, 0x1E, 0x9B, 0x1E, + 0x9C, 0x1E, 0x9D, 0x1E, 0x9E, 0x1E, 0x9F, 0x1E, 0xA0, 0x1E, 0xA0, 0x1E, 0xA2, 0x1E, 0xA2, 0x1E, + 0xA4, 0x1E, 0xA4, 0x1E, 0xA6, 0x1E, 0xA6, 0x1E, 0xA8, 0x1E, 0xA8, 0x1E, 0xAA, 0x1E, 0xAA, 0x1E, + 0xAC, 0x1E, 0xAC, 0x1E, 0xAE, 0x1E, 0xAE, 0x1E, 0xB0, 0x1E, 0xB0, 0x1E, 0xB2, 0x1E, 0xB2, 0x1E, + 0xB4, 0x1E, 0xB4, 0x1E, 0xB6, 0x1E, 0xB6, 0x1E, 0xB8, 0x1E, 0xB8, 0x1E, 0xBA, 0x1E, 0xBA, 0x1E, + 0xBC, 0x1E, 0xBC, 0x1E, 0xBE, 0x1E, 0xBE, 0x1E, 0xC0, 0x1E, 0xC0, 0x1E, 0xC2, 0x1E, 0xC2, 0x1E, + 0xC4, 0x1E, 0xC4, 0x1E, 0xC6, 0x1E, 0xC6, 0x1E, 0xC8, 0x1E, 0xC8, 0x1E, 0xCA, 0x1E, 0xCA, 0x1E, + 0xCC, 0x1E, 0xCC, 0x1E, 0xCE, 0x1E, 0xCE, 0x1E, 0xD0, 0x1E, 0xD0, 0x1E, 0xD2, 0x1E, 0xD2, 0x1E, + 0xD4, 0x1E, 0xD4, 0x1E, 0xD6, 0x1E, 0xD6, 0x1E, 0xD8, 0x1E, 0xD8, 0x1E, 0xDA, 0x1E, 0xDA, 0x1E, + 0xDC, 0x1E, 0xDC, 0x1E, 0xDE, 0x1E, 0xDE, 0x1E, 0xE0, 0x1E, 0xE0, 0x1E, 0xE2, 0x1E, 0xE2, 0x1E, + 0xE4, 0x1E, 0xE4, 0x1E, 0xE6, 0x1E, 0xE6, 0x1E, 0xE8, 0x1E, 0xE8, 0x1E, 0xEA, 0x1E, 0xEA, 0x1E, + 0xEC, 0x1E, 0xEC, 0x1E, 0xEE, 0x1E, 0xEE, 0x1E, 0xF0, 0x1E, 0xF0, 0x1E, 0xF2, 0x1E, 0xF2, 0x1E, + 0xF4, 0x1E, 0xF4, 0x1E, 0xF6, 0x1E, 0xF6, 0x1E, 0xF8, 0x1E, 0xF8, 0x1E, 0xFA, 0x1E, 0xFB, 0x1E, + 0xFC, 0x1E, 0xFD, 0x1E, 0xFE, 0x1E, 0xFF, 0x1E, 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, + 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, 0x08, 0x1F, 0x09, 0x1F, 0x0A, 0x1F, 0x0B, 0x1F, + 0x0C, 0x1F, 0x0D, 0x1F, 0x0E, 0x1F, 0x0F, 0x1F, 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, + 0x1C, 0x1F, 0x1D, 0x1F, 0x16, 0x1F, 0x17, 0x1F, 0x18, 0x1F, 0x19, 0x1F, 0x1A, 0x1F, 0x1B, 0x1F, + 0x1C, 0x1F, 0x1D, 0x1F, 0x1E, 0x1F, 0x1F, 0x1F, 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, + 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, 0x28, 0x1F, 0x29, 0x1F, 0x2A, 0x1F, 0x2B, 0x1F, + 0x2C, 0x1F, 0x2D, 0x1F, 0x2E, 0x1F, 0x2F, 0x1F, 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, + 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, 0x38, 0x1F, 0x39, 0x1F, 0x3A, 0x1F, 0x3B, 0x1F, + 0x3C, 0x1F, 0x3D, 0x1F, 0x3E, 0x1F, 0x3F, 0x1F, 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, + 0x4C, 0x1F, 0x4D, 0x1F, 0x46, 0x1F, 0x47, 0x1F, 0x48, 0x1F, 0x49, 0x1F, 0x4A, 0x1F, 0x4B, 0x1F, + 0x4C, 0x1F, 0x4D, 0x1F, 0x4E, 0x1F, 0x4F, 0x1F, 0x50, 0x1F, 0x59, 0x1F, 0x52, 0x1F, 0x5B, 0x1F, + 0x54, 0x1F, 0x5D, 0x1F, 0x56, 0x1F, 0x5F, 0x1F, 0x58, 0x1F, 0x59, 0x1F, 0x5A, 0x1F, 0x5B, 0x1F, + 0x5C, 0x1F, 0x5D, 0x1F, 0x5E, 0x1F, 0x5F, 0x1F, 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, + 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, 0x68, 0x1F, 0x69, 0x1F, 0x6A, 0x1F, 0x6B, 0x1F, + 0x6C, 0x1F, 0x6D, 0x1F, 0x6E, 0x1F, 0x6F, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F, + 0xCA, 0x1F, 0xCB, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, 0xF8, 0x1F, 0xF9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, + 0xFA, 0x1F, 0xFB, 0x1F, 0x7E, 0x1F, 0x7F, 0x1F, 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, + 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, 0x88, 0x1F, 0x89, 0x1F, 0x8A, 0x1F, 0x8B, 0x1F, + 0x8C, 0x1F, 0x8D, 0x1F, 0x8E, 0x1F, 0x8F, 0x1F, 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, + 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, 0x98, 0x1F, 0x99, 0x1F, 0x9A, 0x1F, 0x9B, 0x1F, + 0x9C, 0x1F, 0x9D, 0x1F, 0x9E, 0x1F, 0x9F, 0x1F, 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, + 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, 0xA8, 0x1F, 0xA9, 0x1F, 0xAA, 0x1F, 0xAB, 0x1F, + 0xAC, 0x1F, 0xAD, 0x1F, 0xAE, 0x1F, 0xAF, 0x1F, 0xB8, 0x1F, 0xB9, 0x1F, 0xB2, 0x1F, 0xBC, 0x1F, + 0xB4, 0x1F, 0xB5, 0x1F, 0xB6, 0x1F, 0xB7, 0x1F, 0xB8, 0x1F, 0xB9, 0x1F, 0xBA, 0x1F, 0xBB, 0x1F, + 0xBC, 0x1F, 0xBD, 0x1F, 0xBE, 0x1F, 0xBF, 0x1F, 0xC0, 0x1F, 0xC1, 0x1F, 0xC2, 0x1F, 0xC3, 0x1F, + 0xC4, 0x1F, 0xC5, 0x1F, 0xC6, 0x1F, 0xC7, 0x1F, 0xC8, 0x1F, 0xC9, 0x1F, 0xCA, 0x1F, 0xCB, 0x1F, + 0xC3, 0x1F, 0xCD, 0x1F, 0xCE, 0x1F, 0xCF, 0x1F, 0xD8, 0x1F, 0xD9, 0x1F, 0xD2, 0x1F, 0xD3, 0x1F, + 0xD4, 0x1F, 0xD5, 0x1F, 0xD6, 0x1F, 0xD7, 0x1F, 0xD8, 0x1F, 0xD9, 0x1F, 0xDA, 0x1F, 0xDB, 0x1F, + 0xDC, 0x1F, 0xDD, 0x1F, 0xDE, 0x1F, 0xDF, 0x1F, 0xE8, 0x1F, 0xE9, 0x1F, 0xE2, 0x1F, 0xE3, 0x1F, + 0xE4, 0x1F, 0xEC, 0x1F, 0xE6, 0x1F, 0xE7, 0x1F, 0xE8, 0x1F, 0xE9, 0x1F, 0xEA, 0x1F, 0xEB, 0x1F, + 0xEC, 0x1F, 0xED, 0x1F, 0xEE, 0x1F, 0xEF, 0x1F, 0xF0, 0x1F, 0xF1, 0x1F, 0xF2, 0x1F, 0xF3, 0x1F, + 0xF4, 0x1F, 0xF5, 0x1F, 0xF6, 0x1F, 0xF7, 0x1F, 0xF8, 0x1F, 0xF9, 0x1F, 0xFA, 0x1F, 0xFB, 0x1F, + 0xF3, 0x1F, 0xFD, 0x1F, 0xFE, 0x1F, 0xFF, 0x1F, 0x00, 0x20, 0x01, 0x20, 0x02, 0x20, 0x03, 0x20, + 0x04, 0x20, 0x05, 0x20, 0x06, 0x20, 0x07, 0x20, 0x08, 0x20, 0x09, 0x20, 0x0A, 0x20, 0x0B, 0x20, + 0x0C, 0x20, 0x0D, 0x20, 0x0E, 0x20, 0x0F, 0x20, 0x10, 0x20, 0x11, 0x20, 0x12, 0x20, 0x13, 0x20, + 0x14, 0x20, 0x15, 0x20, 0x16, 0x20, 0x17, 0x20, 0x18, 0x20, 0x19, 0x20, 0x1A, 0x20, 0x1B, 0x20, + 0x1C, 0x20, 0x1D, 0x20, 0x1E, 0x20, 0x1F, 0x20, 0x20, 0x20, 0x21, 0x20, 0x22, 0x20, 0x23, 0x20, + 0x24, 0x20, 0x25, 0x20, 0x26, 0x20, 0x27, 0x20, 0x28, 0x20, 0x29, 0x20, 0x2A, 0x20, 0x2B, 0x20, + 0x2C, 0x20, 0x2D, 0x20, 0x2E, 0x20, 0x2F, 0x20, 0x30, 0x20, 0x31, 0x20, 0x32, 0x20, 0x33, 0x20, + 0x34, 0x20, 0x35, 0x20, 0x36, 0x20, 0x37, 0x20, 0x38, 0x20, 0x39, 0x20, 0x3A, 0x20, 0x3B, 0x20, + 0x3C, 0x20, 0x3D, 0x20, 0x3E, 0x20, 0x3F, 0x20, 0x40, 0x20, 0x41, 0x20, 0x42, 0x20, 0x43, 0x20, + 0x44, 0x20, 0x45, 0x20, 0x46, 0x20, 0x47, 0x20, 0x48, 0x20, 0x49, 0x20, 0x4A, 0x20, 0x4B, 0x20, + 0x4C, 0x20, 0x4D, 0x20, 0x4E, 0x20, 0x4F, 0x20, 0x50, 0x20, 0x51, 0x20, 0x52, 0x20, 0x53, 0x20, + 0x54, 0x20, 0x55, 0x20, 0x56, 0x20, 0x57, 0x20, 0x58, 0x20, 0x59, 0x20, 0x5A, 0x20, 0x5B, 0x20, + 0x5C, 0x20, 0x5D, 0x20, 0x5E, 0x20, 0x5F, 0x20, 0x60, 0x20, 0x61, 0x20, 0x62, 0x20, 0x63, 0x20, + 0x64, 0x20, 0x65, 0x20, 0x66, 0x20, 0x67, 0x20, 0x68, 0x20, 0x69, 0x20, 0x6A, 0x20, 0x6B, 0x20, + 0x6C, 0x20, 0x6D, 0x20, 0x6E, 0x20, 0x6F, 0x20, 0x70, 0x20, 0x71, 0x20, 0x72, 0x20, 0x73, 0x20, + 0x74, 0x20, 0x75, 0x20, 0x76, 0x20, 0x77, 0x20, 0x78, 0x20, 0x79, 0x20, 0x7A, 0x20, 0x7B, 0x20, + 0x7C, 0x20, 0x7D, 0x20, 0x7E, 0x20, 0x7F, 0x20, 0x80, 0x20, 0x81, 0x20, 0x82, 0x20, 0x83, 0x20, + 0x84, 0x20, 0x85, 0x20, 0x86, 0x20, 0x87, 0x20, 0x88, 0x20, 0x89, 0x20, 0x8A, 0x20, 0x8B, 0x20, + 0x8C, 0x20, 0x8D, 0x20, 0x8E, 0x20, 0x8F, 0x20, 0x90, 0x20, 0x91, 0x20, 0x92, 0x20, 0x93, 0x20, + 0x94, 0x20, 0x95, 0x20, 0x96, 0x20, 0x97, 0x20, 0x98, 0x20, 0x99, 0x20, 0x9A, 0x20, 0x9B, 0x20, + 0x9C, 0x20, 0x9D, 0x20, 0x9E, 0x20, 0x9F, 0x20, 0xA0, 0x20, 0xA1, 0x20, 0xA2, 0x20, 0xA3, 0x20, + 0xA4, 0x20, 0xA5, 0x20, 0xA6, 0x20, 0xA7, 0x20, 0xA8, 0x20, 0xA9, 0x20, 0xAA, 0x20, 0xAB, 0x20, + 0xAC, 0x20, 0xAD, 0x20, 0xAE, 0x20, 0xAF, 0x20, 0xB0, 0x20, 0xB1, 0x20, 0xB2, 0x20, 0xB3, 0x20, + 0xB4, 0x20, 0xB5, 0x20, 0xB6, 0x20, 0xB7, 0x20, 0xB8, 0x20, 0xB9, 0x20, 0xBA, 0x20, 0xBB, 0x20, + 0xBC, 0x20, 0xBD, 0x20, 0xBE, 0x20, 0xBF, 0x20, 0xC0, 0x20, 0xC1, 0x20, 0xC2, 0x20, 0xC3, 0x20, + 0xC4, 0x20, 0xC5, 0x20, 0xC6, 0x20, 0xC7, 0x20, 0xC8, 0x20, 0xC9, 0x20, 0xCA, 0x20, 0xCB, 0x20, + 0xCC, 0x20, 0xCD, 0x20, 0xCE, 0x20, 0xCF, 0x20, 0xD0, 0x20, 0xD1, 0x20, 0xD2, 0x20, 0xD3, 0x20, + 0xD4, 0x20, 0xD5, 0x20, 0xD6, 0x20, 0xD7, 0x20, 0xD8, 0x20, 0xD9, 0x20, 0xDA, 0x20, 0xDB, 0x20, + 0xDC, 0x20, 0xDD, 0x20, 0xDE, 0x20, 0xDF, 0x20, 0xE0, 0x20, 0xE1, 0x20, 0xE2, 0x20, 0xE3, 0x20, + 0xE4, 0x20, 0xE5, 0x20, 0xE6, 0x20, 0xE7, 0x20, 0xE8, 0x20, 0xE9, 0x20, 0xEA, 0x20, 0xEB, 0x20, + 0xEC, 0x20, 0xED, 0x20, 0xEE, 0x20, 0xEF, 0x20, 0xF0, 0x20, 0xF1, 0x20, 0xF2, 0x20, 0xF3, 0x20, + 0xF4, 0x20, 0xF5, 0x20, 0xF6, 0x20, 0xF7, 0x20, 0xF8, 0x20, 0xF9, 0x20, 0xFA, 0x20, 0xFB, 0x20, + 0xFC, 0x20, 0xFD, 0x20, 0xFE, 0x20, 0xFF, 0x20, 0x00, 0x21, 0x01, 0x21, 0x02, 0x21, 0x03, 0x21, + 0x04, 0x21, 0x05, 0x21, 0x06, 0x21, 0x07, 0x21, 0x08, 0x21, 0x09, 0x21, 0x0A, 0x21, 0x0B, 0x21, + 0x0C, 0x21, 0x0D, 0x21, 0x0E, 0x21, 0x0F, 0x21, 0x10, 0x21, 0x11, 0x21, 0x12, 0x21, 0x13, 0x21, + 0x14, 0x21, 0x15, 0x21, 0x16, 0x21, 0x17, 0x21, 0x18, 0x21, 0x19, 0x21, 0x1A, 0x21, 0x1B, 0x21, + 0x1C, 0x21, 0x1D, 0x21, 0x1E, 0x21, 0x1F, 0x21, 0x20, 0x21, 0x21, 0x21, 0x22, 0x21, 0x23, 0x21, + 0x24, 0x21, 0x25, 0x21, 0x26, 0x21, 0x27, 0x21, 0x28, 0x21, 0x29, 0x21, 0x2A, 0x21, 0x2B, 0x21, + 0x2C, 0x21, 0x2D, 0x21, 0x2E, 0x21, 0x2F, 0x21, 0x30, 0x21, 0x31, 0x21, 0x32, 0x21, 0x33, 0x21, + 0x34, 0x21, 0x35, 0x21, 0x36, 0x21, 0x37, 0x21, 0x38, 0x21, 0x39, 0x21, 0x3A, 0x21, 0x3B, 0x21, + 0x3C, 0x21, 0x3D, 0x21, 0x3E, 0x21, 0x3F, 0x21, 0x40, 0x21, 0x41, 0x21, 0x42, 0x21, 0x43, 0x21, + 0x44, 0x21, 0x45, 0x21, 0x46, 0x21, 0x47, 0x21, 0x48, 0x21, 0x49, 0x21, 0x4A, 0x21, 0x4B, 0x21, + 0x4C, 0x21, 0x4D, 0x21, 0x32, 0x21, 0x4F, 0x21, 0x50, 0x21, 0x51, 0x21, 0x52, 0x21, 0x53, 0x21, + 0x54, 0x21, 0x55, 0x21, 0x56, 0x21, 0x57, 0x21, 0x58, 0x21, 0x59, 0x21, 0x5A, 0x21, 0x5B, 0x21, + 0x5C, 0x21, 0x5D, 0x21, 0x5E, 0x21, 0x5F, 0x21, 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, + 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, + 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, 0x60, 0x21, 0x61, 0x21, 0x62, 0x21, 0x63, 0x21, + 0x64, 0x21, 0x65, 0x21, 0x66, 0x21, 0x67, 0x21, 0x68, 0x21, 0x69, 0x21, 0x6A, 0x21, 0x6B, 0x21, + 0x6C, 0x21, 0x6D, 0x21, 0x6E, 0x21, 0x6F, 0x21, 0x80, 0x21, 0x81, 0x21, 0x82, 0x21, 0x83, 0x21, + 0x83, 0x21, 0xFF, 0xFF, 0x4B, 0x03, 0xB6, 0x24, 0xB7, 0x24, 0xB8, 0x24, 0xB9, 0x24, 0xBA, 0x24, + 0xBB, 0x24, 0xBC, 0x24, 0xBD, 0x24, 0xBE, 0x24, 0xBF, 0x24, 0xC0, 0x24, 0xC1, 0x24, 0xC2, 0x24, + 0xC3, 0x24, 0xC4, 0x24, 0xC5, 0x24, 0xC6, 0x24, 0xC7, 0x24, 0xC8, 0x24, 0xC9, 0x24, 0xCA, 0x24, + 0xCB, 0x24, 0xCC, 0x24, 0xCD, 0x24, 0xCE, 0x24, 0xCF, 0x24, 0xFF, 0xFF, 0x46, 0x07, 0x00, 0x2C, + 0x01, 0x2C, 0x02, 0x2C, 0x03, 0x2C, 0x04, 0x2C, 0x05, 0x2C, 0x06, 0x2C, 0x07, 0x2C, 0x08, 0x2C, + 0x09, 0x2C, 0x0A, 0x2C, 0x0B, 0x2C, 0x0C, 0x2C, 0x0D, 0x2C, 0x0E, 0x2C, 0x0F, 0x2C, 0x10, 0x2C, + 0x11, 0x2C, 0x12, 0x2C, 0x13, 0x2C, 0x14, 0x2C, 0x15, 0x2C, 0x16, 0x2C, 0x17, 0x2C, 0x18, 0x2C, + 0x19, 0x2C, 0x1A, 0x2C, 0x1B, 0x2C, 0x1C, 0x2C, 0x1D, 0x2C, 0x1E, 0x2C, 0x1F, 0x2C, 0x20, 0x2C, + 0x21, 0x2C, 0x22, 0x2C, 0x23, 0x2C, 0x24, 0x2C, 0x25, 0x2C, 0x26, 0x2C, 0x27, 0x2C, 0x28, 0x2C, + 0x29, 0x2C, 0x2A, 0x2C, 0x2B, 0x2C, 0x2C, 0x2C, 0x2D, 0x2C, 0x2E, 0x2C, 0x5F, 0x2C, 0x60, 0x2C, + 0x60, 0x2C, 0x62, 0x2C, 0x63, 0x2C, 0x64, 0x2C, 0x65, 0x2C, 0x66, 0x2C, 0x67, 0x2C, 0x67, 0x2C, + 0x69, 0x2C, 0x69, 0x2C, 0x6B, 0x2C, 0x6B, 0x2C, 0x6D, 0x2C, 0x6E, 0x2C, 0x6F, 0x2C, 0x70, 0x2C, + 0x71, 0x2C, 0x72, 0x2C, 0x73, 0x2C, 0x74, 0x2C, 0x75, 0x2C, 0x75, 0x2C, 0x77, 0x2C, 0x78, 0x2C, + 0x79, 0x2C, 0x7A, 0x2C, 0x7B, 0x2C, 0x7C, 0x2C, 0x7D, 0x2C, 0x7E, 0x2C, 0x7F, 0x2C, 0x80, 0x2C, + 0x80, 0x2C, 0x82, 0x2C, 0x82, 0x2C, 0x84, 0x2C, 0x84, 0x2C, 0x86, 0x2C, 0x86, 0x2C, 0x88, 0x2C, + 0x88, 0x2C, 0x8A, 0x2C, 0x8A, 0x2C, 0x8C, 0x2C, 0x8C, 0x2C, 0x8E, 0x2C, 0x8E, 0x2C, 0x90, 0x2C, + 0x90, 0x2C, 0x92, 0x2C, 0x92, 0x2C, 0x94, 0x2C, 0x94, 0x2C, 0x96, 0x2C, 0x96, 0x2C, 0x98, 0x2C, + 0x98, 0x2C, 0x9A, 0x2C, 0x9A, 0x2C, 0x9C, 0x2C, 0x9C, 0x2C, 0x9E, 0x2C, 0x9E, 0x2C, 0xA0, 0x2C, + 0xA0, 0x2C, 0xA2, 0x2C, 0xA2, 0x2C, 0xA4, 0x2C, 0xA4, 0x2C, 0xA6, 0x2C, 0xA6, 0x2C, 0xA8, 0x2C, + 0xA8, 0x2C, 0xAA, 0x2C, 0xAA, 0x2C, 0xAC, 0x2C, 0xAC, 0x2C, 0xAE, 0x2C, 0xAE, 0x2C, 0xB0, 0x2C, + 0xB0, 0x2C, 0xB2, 0x2C, 0xB2, 0x2C, 0xB4, 0x2C, 0xB4, 0x2C, 0xB6, 0x2C, 0xB6, 0x2C, 0xB8, 0x2C, + 0xB8, 0x2C, 0xBA, 0x2C, 0xBA, 0x2C, 0xBC, 0x2C, 0xBC, 0x2C, 0xBE, 0x2C, 0xBE, 0x2C, 0xC0, 0x2C, + 0xC0, 0x2C, 0xC2, 0x2C, 0xC2, 0x2C, 0xC4, 0x2C, 0xC4, 0x2C, 0xC6, 0x2C, 0xC6, 0x2C, 0xC8, 0x2C, + 0xC8, 0x2C, 0xCA, 0x2C, 0xCA, 0x2C, 0xCC, 0x2C, 0xCC, 0x2C, 0xCE, 0x2C, 0xCE, 0x2C, 0xD0, 0x2C, + 0xD0, 0x2C, 0xD2, 0x2C, 0xD2, 0x2C, 0xD4, 0x2C, 0xD4, 0x2C, 0xD6, 0x2C, 0xD6, 0x2C, 0xD8, 0x2C, + 0xD8, 0x2C, 0xDA, 0x2C, 0xDA, 0x2C, 0xDC, 0x2C, 0xDC, 0x2C, 0xDE, 0x2C, 0xDE, 0x2C, 0xE0, 0x2C, + 0xE0, 0x2C, 0xE2, 0x2C, 0xE2, 0x2C, 0xE4, 0x2C, 0xE5, 0x2C, 0xE6, 0x2C, 0xE7, 0x2C, 0xE8, 0x2C, + 0xE9, 0x2C, 0xEA, 0x2C, 0xEB, 0x2C, 0xEC, 0x2C, 0xED, 0x2C, 0xEE, 0x2C, 0xEF, 0x2C, 0xF0, 0x2C, + 0xF1, 0x2C, 0xF2, 0x2C, 0xF3, 0x2C, 0xF4, 0x2C, 0xF5, 0x2C, 0xF6, 0x2C, 0xF7, 0x2C, 0xF8, 0x2C, + 0xF9, 0x2C, 0xFA, 0x2C, 0xFB, 0x2C, 0xFC, 0x2C, 0xFD, 0x2C, 0xFE, 0x2C, 0xFF, 0x2C, 0xA0, 0x10, + 0xA1, 0x10, 0xA2, 0x10, 0xA3, 0x10, 0xA4, 0x10, 0xA5, 0x10, 0xA6, 0x10, 0xA7, 0x10, 0xA8, 0x10, + 0xA9, 0x10, 0xAA, 0x10, 0xAB, 0x10, 0xAC, 0x10, 0xAD, 0x10, 0xAE, 0x10, 0xAF, 0x10, 0xB0, 0x10, + 0xB1, 0x10, 0xB2, 0x10, 0xB3, 0x10, 0xB4, 0x10, 0xB5, 0x10, 0xB6, 0x10, 0xB7, 0x10, 0xB8, 0x10, + 0xB9, 0x10, 0xBA, 0x10, 0xBB, 0x10, 0xBC, 0x10, 0xBD, 0x10, 0xBE, 0x10, 0xBF, 0x10, 0xC0, 0x10, + 0xC1, 0x10, 0xC2, 0x10, 0xC3, 0x10, 0xC4, 0x10, 0xC5, 0x10, 0xFF, 0xFF, 0x1B, 0xD2, 0x21, 0xFF, + 0x22, 0xFF, 0x23, 0xFF, 0x24, 0xFF, 0x25, 0xFF, 0x26, 0xFF, 0x27, 0xFF, 0x28, 0xFF, 0x29, 0xFF, + 0x2A, 0xFF, 0x2B, 0xFF, 0x2C, 0xFF, 0x2D, 0xFF, 0x2E, 0xFF, 0x2F, 0xFF, 0x30, 0xFF, 0x31, 0xFF, + 0x32, 0xFF, 0x33, 0xFF, 0x34, 0xFF, 0x35, 0xFF, 0x36, 0xFF, 0x37, 0xFF, 0x38, 0xFF, 0x39, 0xFF, + 0x3A, 0xFF, 0x5B, 0xFF, 0x5C, 0xFF, 0x5D, 0xFF, 0x5E, 0xFF, 0x5F, 0xFF, 0x60, 0xFF, 0x61, 0xFF, + 0x62, 0xFF, 0x63, 0xFF, 0x64, 0xFF, 0x65, 0xFF, 0x66, 0xFF, 0x67, 0xFF, 0x68, 0xFF, 0x69, 0xFF, + 0x6A, 0xFF, 0x6B, 0xFF, 0x6C, 0xFF, 0x6D, 0xFF, 0x6E, 0xFF, 0x6F, 0xFF, 0x70, 0xFF, 0x71, 0xFF, + 0x72, 0xFF, 0x73, 0xFF, 0x74, 0xFF, 0x75, 0xFF, 0x76, 0xFF, 0x77, 0xFF, 0x78, 0xFF, 0x79, 0xFF, + 0x7A, 0xFF, 0x7B, 0xFF, 0x7C, 0xFF, 0x7D, 0xFF, 0x7E, 0xFF, 0x7F, 0xFF, 0x80, 0xFF, 0x81, 0xFF, + 0x82, 0xFF, 0x83, 0xFF, 0x84, 0xFF, 0x85, 0xFF, 0x86, 0xFF, 0x87, 0xFF, 0x88, 0xFF, 0x89, 0xFF, + 0x8A, 0xFF, 0x8B, 0xFF, 0x8C, 0xFF, 0x8D, 0xFF, 0x8E, 0xFF, 0x8F, 0xFF, 0x90, 0xFF, 0x91, 0xFF, + 0x92, 0xFF, 0x93, 0xFF, 0x94, 0xFF, 0x95, 0xFF, 0x96, 0xFF, 0x97, 0xFF, 0x98, 0xFF, 0x99, 0xFF, + 0x9A, 0xFF, 0x9B, 0xFF, 0x9C, 0xFF, 0x9D, 0xFF, 0x9E, 0xFF, 0x9F, 0xFF, 0xA0, 0xFF, 0xA1, 0xFF, + 0xA2, 0xFF, 0xA3, 0xFF, 0xA4, 0xFF, 0xA5, 0xFF, 0xA6, 0xFF, 0xA7, 0xFF, 0xA8, 0xFF, 0xA9, 0xFF, + 0xAA, 0xFF, 0xAB, 0xFF, 0xAC, 0xFF, 0xAD, 0xFF, 0xAE, 0xFF, 0xAF, 0xFF, 0xB0, 0xFF, 0xB1, 0xFF, + 0xB2, 0xFF, 0xB3, 0xFF, 0xB4, 0xFF, 0xB5, 0xFF, 0xB6, 0xFF, 0xB7, 0xFF, 0xB8, 0xFF, 0xB9, 0xFF, + 0xBA, 0xFF, 0xBB, 0xFF, 0xBC, 0xFF, 0xBD, 0xFF, 0xBE, 0xFF, 0xBF, 0xFF, 0xC0, 0xFF, 0xC1, 0xFF, + 0xC2, 0xFF, 0xC3, 0xFF, 0xC4, 0xFF, 0xC5, 0xFF, 0xC6, 0xFF, 0xC7, 0xFF, 0xC8, 0xFF, 0xC9, 0xFF, + 0xCA, 0xFF, 0xCB, 0xFF, 0xCC, 0xFF, 0xCD, 0xFF, 0xCE, 0xFF, 0xCF, 0xFF, 0xD0, 0xFF, 0xD1, 0xFF, + 0xD2, 0xFF, 0xD3, 0xFF, 0xD4, 0xFF, 0xD5, 0xFF, 0xD6, 0xFF, 0xD7, 0xFF, 0xD8, 0xFF, 0xD9, 0xFF, + 0xDA, 0xFF, 0xDB, 0xFF, 0xDC, 0xFF, 0xDD, 0xFF, 0xDE, 0xFF, 0xDF, 0xFF, 0xE0, 0xFF, 0xE1, 0xFF, + 0xE2, 0xFF, 0xE3, 0xFF, 0xE4, 0xFF, 0xE5, 0xFF, 0xE6, 0xFF, 0xE7, 0xFF, 0xE8, 0xFF, 0xE9, 0xFF, + 0xEA, 0xFF, 0xEB, 0xFF, 0xEC, 0xFF, 0xED, 0xFF, 0xEE, 0xFF, 0xEF, 0xFF, 0xF0, 0xFF, 0xF1, 0xFF, + 0xF2, 0xFF, 0xF3, 0xFF, 0xF4, 0xFF, 0xF5, 0xFF, 0xF6, 0xFF, 0xF7, 0xFF, 0xF8, 0xFF, 0xF9, 0xFF, + 0xFA, 0xFF, 0xFB, 0xFF, 0xFC, 0xFF, 0xFD, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF +}; + +/* end of exfat_upcase.c */ diff --git a/fs/exfat/exfat_version.h b/fs/exfat/exfat_version.h new file mode 100644 index 0000000000000..44289678311dc --- /dev/null +++ b/fs/exfat/exfat_version.h @@ -0,0 +1,19 @@ +/************************************************************************/ +/* */ +/* PROJECT : exFAT & FAT12/16/32 File System */ +/* FILE : exfat_version.h */ +/* PURPOSE : exFAT File Manager */ +/* */ +/*----------------------------------------------------------------------*/ +/* NOTES */ +/* */ +/*----------------------------------------------------------------------*/ +/* REVISION HISTORY */ +/* */ +/* - 2012.02.10 : Release Version 1.1.0 */ +/* - 2012.04.02 : P1 : Change Module License to Samsung Proprietary */ +/* - 2012.06.07 : P2 : Fixed incorrect filename problem */ +/* */ +/************************************************************************/ + +#define EXFAT_VERSION "1.2.5" diff --git a/fs/exofs/file.c b/fs/exofs/file.c index b905c79b4f0af..1f089e0411bf7 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -48,12 +48,7 @@ static int exofs_file_fsync(struct file *filp, int datasync) struct inode *inode = filp->f_mapping->host; struct super_block *sb; - if (!(inode->i_state & I_DIRTY)) - return 0; - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - return 0; - - ret = sync_inode_metadata(inode, 1); + ret = sync_inode_metadata(inode, datasync, 1); /* This is a good place to write the sb */ /* TODO: Sechedule an sb-sync on create */ diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index a7555238c41aa..007364605488c 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -1271,7 +1271,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync) int exofs_write_inode(struct inode *inode, struct writeback_control *wbc) { - return exofs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); + return exofs_update_inode(inode, 1 /* XXX: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */ ); } /* diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 47cda410b548a..60c67072c7651 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -107,7 +107,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) if (IS_DIRSYNC(dir)) { err = write_one_page(page, 1); if (!err) - err = sync_inode_metadata(dir, 1); + err = sync_inode_metadata(dir, 0, 1); } else { unlock_page(page); } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 6346a2acf3260..8fdbe05061ae9 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -124,6 +124,8 @@ extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); extern int ext2_setattr (struct dentry *, struct iattr *); extern void ext2_set_inode_flags(struct inode *inode); extern void ext2_get_inode_flags(struct ext2_inode_info *); +extern struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, + struct buffer_head **p); extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 49eec9456c5b3..4ccb040f28f58 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "ext2.h" #include "xattr.h" #include "acl.h" @@ -43,16 +44,33 @@ static int ext2_release_file (struct inode * inode, struct file * filp) int ext2_fsync(struct file *file, int datasync) { int ret; - struct super_block *sb = file->f_mapping->host->i_sb; - struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; + struct inode *inode = file->f_mapping->host; + ino_t ino = inode->i_ino; + struct super_block *sb = inode->i_sb; + struct address_space *sb_mapping = sb->s_bdev->bd_inode->i_mapping; + struct buffer_head *bh; + struct ext2_inode *raw_inode; ret = generic_file_fsync(file, datasync); - if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { + if (ret == -EIO || test_and_clear_bit(AS_EIO, &sb_mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); + return -EIO; + } + + raw_inode = ext2_get_inode(sb, ino, &bh); + if (IS_ERR(raw_inode)) + return -EIO; + + sync_dirty_buffer(bh); + if (buffer_req(bh) && !buffer_uptodate(bh)) { + printk ("IO error syncing ext2 inode [%s:%08lx]\n", + sb->s_id, (unsigned long) ino); ret = -EIO; } + brelse (bh); + return ret; } diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 40ad210a5049a..be07143cd045f 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1203,7 +1203,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize) inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; if (inode_needs_sync(inode)) { sync_mapping_buffers(inode->i_mapping); - sync_inode_metadata(inode, 1); + sync_inode_metadata(inode, 0, 1); } else { mark_inode_dirty(inode); } @@ -1211,7 +1211,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize) return 0; } -static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, +struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, struct buffer_head **p) { struct buffer_head * bh; @@ -1505,16 +1505,8 @@ static int __ext2_write_inode(struct inode *inode, int do_sync) } else for (n = 0; n < EXT2_N_BLOCKS; n++) raw_inode->i_block[n] = ei->i_data[n]; mark_buffer_dirty(bh); - if (do_sync) { - sync_dirty_buffer(bh); - if (buffer_req(bh) && !buffer_uptodate(bh)) { - printk ("IO error syncing ext2 inode [%s:%08lx]\n", - sb->s_id, (unsigned long) ino); - err = -EIO; - } - } - ei->i_state &= ~EXT2_STATE_NEW; brelse (bh); + ei->i_state &= ~EXT2_STATE_NEW; return err; } diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index c2e4dce984d2a..02ef139bc85fc 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -691,7 +691,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; inode->i_ctime = CURRENT_TIME_SEC; if (IS_SYNC(inode)) { - error = sync_inode_metadata(inode, 1); + error = sync_inode_metadata(inode, 0, 1); /* In case sync failed due to ENOSPC the inode was actually * written (only some dirty data were not) so we just proceed * as if nothing happened and cleanup the unused block */ diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index b27ba71810ecd..d5d35d7d2bd0b 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1416,10 +1416,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, frame->at = entries; frame->bh = bh; bh = bh2; + /* + * Mark buffers dirty here so that if do_split() fails we write a + * consistent set of buffers to disk. + */ + ext3_journal_dirty_metadata(handle, frame->bh); + ext3_journal_dirty_metadata(handle, bh); de = do_split(handle,dir, &bh, frame, &hinfo, &retval); - dx_release (frames); - if (!(de)) + if (!de) { + ext3_mark_inode_dirty(handle, dir); + dx_release(frames); return retval; + } + dx_release(frames); return add_dirent_to_buf(handle, dentry, inode, de, bh); } @@ -1540,8 +1549,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; + memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); - node2->fake.inode = 0; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, frame->bh); if (err) diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 85c8cc8f24732..0d62f29f213a1 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } + /* Check if feature set allows readwrite operations */ + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { + ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " + "unknown ROCOMPAT features"); + return; + } + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 32e6cc23bd9ad..d565759d82eee 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -803,8 +803,16 @@ ext3_xattr_block_set(handle_t *handle, struct inode *inode, /* We need to allocate a new block */ ext3_fsblk_t goal = ext3_group_first_block_no(sb, EXT3_I(inode)->i_block_group); - ext3_fsblk_t block = ext3_new_block(handle, inode, - goal, &error); + ext3_fsblk_t block; + + /* + * Protect us agaist concurrent allocations to the + * same inode from ext3_..._writepage(). Reservation + * code does not expect racing allocations. + */ + mutex_lock(&EXT3_I(inode)->truncate_mutex); + block = ext3_new_block(handle, inode, goal, &error); + mutex_unlock(&EXT3_I(inode)->truncate_mutex); if (error) goto cleanup; ea_idebug(inode, "creating block %d", block); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 3aa0b72b3b94b..92ad5a9d6ea53 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1351,6 +1351,21 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ +#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR +#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ + EXT4_FEATURE_INCOMPAT_META_BG) +#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT4_FEATURE_RO_COMPAT_BTREE_DIR) + +#define EXT3_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR +#define EXT3_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ + EXT4_FEATURE_INCOMPAT_RECOVER| \ + EXT4_FEATURE_INCOMPAT_META_BG) +#define EXT3_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ + EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ + EXT4_FEATURE_RO_COMPAT_BTREE_DIR) + #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ EXT4_FEATURE_INCOMPAT_RECOVER| \ @@ -1590,12 +1605,8 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, */ struct ext4_lazy_init { unsigned long li_state; - - wait_queue_head_t li_wait_daemon; wait_queue_head_t li_wait_task; - struct timer_list li_timer; struct task_struct *li_task; - struct list_head li_request_list; struct mutex li_list_mtx; }; diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index d8b992e658c15..be8c636289872 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -301,10 +301,10 @@ static inline int ext4_should_order_data(struct inode *inode) static inline int ext4_should_writeback_data(struct inode *inode) { - if (!S_ISREG(inode->i_mode)) - return 0; if (EXT4_JOURNAL(inode) == NULL) return 1; + if (!S_ISREG(inode->i_mode)) + return 0; if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) return 0; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9f7f9e49914fa..bd1e6acff2147 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1846,6 +1846,8 @@ static int ext4_journalled_write_end(struct file *file, from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; + BUG_ON(!ext4_handle_valid(handle)); + if (copied < len) { if (!PageUptodate(page)) copied = 0; @@ -2659,6 +2661,8 @@ static int __ext4_journalled_writepage(struct page *page, goto out; } + BUG_ON(!ext4_handle_valid(handle)); + ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); @@ -5274,7 +5278,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) err = __ext4_get_inode_loc(inode, &iloc, 0); if (err) return err; - if (wbc->sync_mode == WB_SYNC_ALL) + if (1 /* XXX: fix fxync and use wbc->sync_mode == WB_SYNC_ALL */) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, @@ -5460,13 +5464,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, /* if nrblocks are contiguous */ if (chunk) { /* - * With N contiguous data blocks, it need at most - * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks - * 2 dindirect blocks - * 1 tindirect block + * With N contiguous data blocks, we need at most + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, + * 2 dindirect blocks, and 1 tindirect block */ - indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); - return indirects + 3; + return DIV_ROUND_UP(nrblocks, + EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; } /* * if nrblocks are not contiguous, worse case, each block touch diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d1fe09aea73dc..1738236a51060 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1268,6 +1268,8 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, return 0; err: + if (page) + page_cache_release(page); if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f6a318f836b2c..b8c891cbf1b75 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -75,10 +75,27 @@ static void ext4_write_super(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); +static inline int ext2_feature_set_ok(struct super_block *sb); +static inline int ext3_feature_set_ok(struct super_block *sb); +static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); +#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) +static struct file_system_type ext2_fs_type = { + .owner = THIS_MODULE, + .name = "ext2", + .mount = ext4_mount, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; +#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) +#else +#define IS_EXT2_SB(sb) (0) +#endif + + #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, @@ -2120,6 +2137,13 @@ static void ext4_orphan_cleanup(struct super_block *sb, return; } + /* Check if feature set would not allow a r/w mount */ + if (!ext4_feature_set_ok(sb, 0)) { + ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " + "unknown ROCOMPAT features"); + return; + } + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " @@ -2637,12 +2661,6 @@ static void print_daily_error_info(unsigned long arg) mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ } -static void ext4_lazyinode_timeout(unsigned long data) -{ - struct task_struct *p = (struct task_struct *)data; - wake_up_process(p); -} - /* Find next suitable group and run ext4_init_inode_table */ static int ext4_run_li_request(struct ext4_li_request *elr) { @@ -2690,7 +2708,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) /* * Remove lr_request from the list_request and free the - * request tructure. Should be called with li_list_mtx held + * request structure. Should be called with li_list_mtx held */ static void ext4_remove_li_request(struct ext4_li_request *elr) { @@ -2708,14 +2726,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr) static void ext4_unregister_li_request(struct super_block *sb) { - struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request; - - if (!ext4_li_info) + mutex_lock(&ext4_li_mtx); + if (!ext4_li_info) { + mutex_unlock(&ext4_li_mtx); return; + } mutex_lock(&ext4_li_info->li_list_mtx); - ext4_remove_li_request(elr); + ext4_remove_li_request(EXT4_SB(sb)->s_li_request); mutex_unlock(&ext4_li_info->li_list_mtx); + mutex_unlock(&ext4_li_mtx); } static struct task_struct *ext4_lazyinit_task; @@ -2734,14 +2754,10 @@ static int ext4_lazyinit_thread(void *arg) struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; struct list_head *pos, *n; struct ext4_li_request *elr; - unsigned long next_wakeup; - DEFINE_WAIT(wait); + unsigned long next_wakeup, cur; BUG_ON(NULL == eli); - eli->li_timer.data = (unsigned long)current; - eli->li_timer.function = ext4_lazyinode_timeout; - eli->li_task = current; wake_up(&eli->li_wait_task); @@ -2775,19 +2791,15 @@ static int ext4_lazyinit_thread(void *arg) if (freezing(current)) refrigerator(); - if ((time_after_eq(jiffies, next_wakeup)) || + cur = jiffies; + if ((time_after_eq(cur, next_wakeup)) || (MAX_JIFFY_OFFSET == next_wakeup)) { cond_resched(); continue; } - eli->li_timer.expires = next_wakeup; - add_timer(&eli->li_timer); - prepare_to_wait(&eli->li_wait_daemon, &wait, - TASK_INTERRUPTIBLE); - if (time_before(jiffies, next_wakeup)) - schedule(); - finish_wait(&eli->li_wait_daemon, &wait); + schedule_timeout_interruptible(next_wakeup - cur); + if (kthread_should_stop()) { ext4_clear_request_list(); goto exit_thread; @@ -2811,12 +2823,10 @@ static int ext4_lazyinit_thread(void *arg) goto cont_thread; } mutex_unlock(&eli->li_list_mtx); - del_timer_sync(&ext4_li_info->li_timer); eli->li_task = NULL; wake_up(&eli->li_wait_task); kfree(ext4_li_info); - ext4_lazyinit_task = NULL; ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); @@ -2844,7 +2854,6 @@ static int ext4_run_lazyinit_thread(void) if (IS_ERR(ext4_lazyinit_task)) { int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); - del_timer_sync(&ext4_li_info->li_timer); kfree(ext4_li_info); ext4_li_info = NULL; printk(KERN_CRIT "EXT4: error %d creating inode table " @@ -2893,9 +2902,7 @@ static int ext4_li_info_new(void) INIT_LIST_HEAD(&eli->li_request_list); mutex_init(&eli->li_list_mtx); - init_waitqueue_head(&eli->li_wait_daemon); init_waitqueue_head(&eli->li_wait_task); - init_timer(&eli->li_timer); eli->li_state |= EXT4_LAZYINIT_QUIT; ext4_li_info = eli; @@ -2970,6 +2977,12 @@ static int ext4_register_li_request(struct super_block *sb, mutex_unlock(&ext4_li_info->li_list_mtx); sbi->s_li_request = elr; + /* + * set elr to NULL here since it has been inserted to + * the request_list and the removal and free of it is + * handled by ext4_clear_request_list from now on. + */ + elr = NULL; if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { ret = ext4_run_lazyinit_thread(); @@ -3159,6 +3172,28 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "feature flags set on rev 0 fs, " "running e2fsck is recommended"); + if (IS_EXT2_SB(sb)) { + if (ext2_feature_set_ok(sb)) + ext4_msg(sb, KERN_INFO, "mounting ext2 file system " + "using the ext4 subsystem"); + else { + ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " + "to feature incompatibilities"); + goto failed_mount; + } + } + + if (IS_EXT3_SB(sb)) { + if (ext3_feature_set_ok(sb)) + ext4_msg(sb, KERN_INFO, "mounting ext3 file system " + "using the ext4 subsystem"); + else { + ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " + "to feature incompatibilities"); + goto failed_mount; + } + } + /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, @@ -4717,14 +4752,6 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, } #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) -static struct file_system_type ext2_fs_type = { - .owner = THIS_MODULE, - .name = "ext2", - .mount = ext4_mount, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, -}; - static inline void register_as_ext2(void) { int err = register_filesystem(&ext2_fs_type); @@ -4737,10 +4764,22 @@ static inline void unregister_as_ext2(void) { unregister_filesystem(&ext2_fs_type); } + +static inline int ext2_feature_set_ok(struct super_block *sb) +{ + if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP)) + return 0; + if (sb->s_flags & MS_RDONLY) + return 1; + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP)) + return 0; + return 1; +} MODULE_ALIAS("ext2"); #else static inline void register_as_ext2(void) { } static inline void unregister_as_ext2(void) { } +static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } #endif #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) @@ -4756,10 +4795,24 @@ static inline void unregister_as_ext3(void) { unregister_filesystem(&ext3_fs_type); } + +static inline int ext3_feature_set_ok(struct super_block *sb) +{ + if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP)) + return 0; + if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) + return 0; + if (sb->s_flags & MS_RDONLY) + return 1; + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) + return 0; + return 1; +} MODULE_ALIAS("ext3"); #else static inline void register_as_ext3(void) { } static inline void unregister_as_ext3(void) { } +static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; } #endif static struct file_system_type ext4_fs_type = { @@ -4843,8 +4896,8 @@ static int __init ext4_init_fs(void) err = init_inodecache(); if (err) goto out1; - register_as_ext2(); register_as_ext3(); + register_as_ext2(); err = register_filesystem(&ext4_fs_type); if (err) goto out; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index b147d434b3f2c..3a74cefca2732 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -652,7 +652,7 @@ static int __fat_write_inode(struct inode *inode, int wait) spin_unlock(&sbi->inode_hash_lock); mark_buffer_dirty(bh); err = 0; - if (wait) + if (1 /* XXX: fix fsync and use wait */) err = sync_dirty_buffer(bh); brelse(bh); return err; diff --git a/fs/file.c b/fs/file.c index 0be344755c020..4c6992d8f3ba1 100644 --- a/fs/file.c +++ b/fs/file.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -39,14 +40,17 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */ */ static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); -static inline void *alloc_fdmem(unsigned int size) +static void *alloc_fdmem(unsigned int size) { - void *data; - - data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); - if (data != NULL) - return data; - + /* + * Very large allocations can stress page reclaim, so fall back to + * vmalloc() if the allocation size will be considered "large" by the VM. + */ + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); + if (data != NULL) + return data; + } return vmalloc(size); } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0fd1195867164..96fde5ef1b971 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -313,26 +313,25 @@ static void inode_wait_for_writeback(struct inode *inode) } } -/* - * Write out an inode's dirty pages. Called under inode_lock. Either the - * caller has ref on the inode (either via __iget or via syscall against an fd) - * or the inode has I_WILL_FREE set (via generic_forget_inode) +/** + * inode_writeback_begin -- prepare to writeback an inode + * @indoe: inode to write back + * @wait: synch writeout or not + * @Returns: 0 if wait == 0 and this call would block (due to other writeback). + * otherwise returns 1. * - * If `wait' is set, wait on the writeout. + * Context: inode_lock must be held, may be dropped. Returns with it held. * - * The whole writeout design is quite complex and fragile. We want to avoid - * starvation of particular inodes when others are being redirtied, prevent - * livelocks, etc. + * inode_writeback_begin sets up an inode to be written back (data and/or + * metadata). This must be called before examining I_DIRTY state of the + * inode, and should be called at least before any data integrity writeout. * - * Called under inode_lock. + * If inode_writeback_begin returns 1, it must be followed by a call to + * inode_writeback_end. */ -static int -writeback_single_inode(struct inode *inode, struct writeback_control *wbc) +int inode_writeback_begin(struct inode *inode, int wait) { - struct address_space *mapping = inode->i_mapping; - unsigned dirty; - int ret; - + assert_spin_locked(&inode_lock); if (!atomic_read(&inode->i_count)) WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); else @@ -341,16 +340,10 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) if (inode->i_state & I_SYNC) { /* * If this inode is locked for writeback and we are not doing - * writeback-for-data-integrity, move it to b_more_io so that - * writeback can proceed with the other inodes on s_io. - * - * We'll have another go at writing back this inode when we - * completed a full scan of b_io. + * writeback-for-data-integrity, skip it. */ - if (wbc->sync_mode != WB_SYNC_ALL) { - requeue_io(inode); + if (!wait) return 0; - } /* * It's a data-integrity sync. We must wait. @@ -360,9 +353,91 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) BUG_ON(inode->i_state & I_SYNC); - /* Set I_SYNC, reset I_DIRTY_PAGES */ inode->i_state |= I_SYNC; inode->i_state &= ~I_DIRTY_PAGES; + + return 1; +} +EXPORT_SYMBOL(inode_writeback_begin); + +/** + * inode_writeback_end - end a writeback section opened by inode_writeback_begin + * @inode: inode in question + * @Returns: 0 if the inode still has dirty pagecache, otherwise 1. + * + * Context: inode_lock must be held, not dropped. + * + * inode_writeback_end must follow a successful call to inode_writeback_begin + * after we have finished submitting writeback to the inode. + */ +int inode_writeback_end(struct inode *inode) +{ + int ret = 1; + + assert_spin_locked(&inode_lock); + BUG_ON(!(inode->i_state & I_SYNC)); + + if (!(inode->i_state & I_FREEING)) { + if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { + /* + * We didn't write back all the pages. nfs_writepages() + * sometimes bales out without doing anything. + */ + inode->i_state |= I_DIRTY_PAGES; + ret = 0; + } else if (inode->i_state & I_DIRTY) { + /* + * Filesystems can dirty the inode during writeback + * operations, such as delayed allocation during + * submission or metadata updates after data IO + * completion. + */ + redirty_tail(inode); + } else { + /* + * The inode is clean. At this point we either have + * a reference to the inode or it's on it's way out. + * No need to add it back to the LRU. + */ + list_del_init(&inode->i_wb_list); + } + } + inode->i_state &= ~I_SYNC; + inode_sync_complete(inode); + + return ret; +} +EXPORT_SYMBOL(inode_writeback_end); + +/* + * Write out an inode's dirty pages. Called under inode_lock. Either the + * caller has ref on the inode (either via __iget or via syscall against an fd) + * or the inode has I_WILL_FREE set (via generic_forget_inode) + * + * If `wait' is set, wait on the writeout. + * + * The whole writeout design is quite complex and fragile. We want to avoid + * starvation of particular inodes when others are being redirtied, prevent + * livelocks, etc. + * + * Called under inode_lock. + */ +static int +writeback_single_inode(struct inode *inode, struct writeback_control *wbc) +{ + struct address_space *mapping = inode->i_mapping; + unsigned dirty; + int ret; + + if (!inode_writeback_begin(inode, wbc->sync_mode == WB_SYNC_ALL)) { + /* + * We'll have another go at writing back this inode when we + * completed a full scan of b_io. + */ + requeue_io(inode); + return 0; + } + spin_unlock(&inode_lock); ret = do_writepages(mapping, wbc); @@ -386,56 +461,43 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode_lock); dirty = inode->i_state & I_DIRTY; inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); - spin_unlock(&inode_lock); /* Don't write the inode if only I_DIRTY_PAGES was set */ if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { - int err = write_inode(inode, wbc); + int err; + + spin_unlock(&inode_lock); + err = write_inode(inode, wbc); if (ret == 0) ret = err; - } - - spin_lock(&inode_lock); - inode->i_state &= ~I_SYNC; - if (!(inode->i_state & I_FREEING)) { - if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { + spin_lock(&inode_lock); + if (err) { /* - * We didn't write back all the pages. nfs_writepages() - * sometimes bales out without doing anything. + * Inode writeout failed, restore inode metadata + * dirty bits. */ - inode->i_state |= I_DIRTY_PAGES; - if (wbc->nr_to_write <= 0) { - /* - * slice used up: queue for next turn - */ - requeue_io(inode); - } else { - /* - * Writeback blocked by something other than - * congestion. Delay the inode for some time to - * avoid spinning on the CPU (100% iowait) - * retrying writeback of the dirty page/inode - * that cannot be performed immediately. - */ - redirty_tail(inode); - } - } else if (inode->i_state & I_DIRTY) { + inode->i_state |= dirty & + (I_DIRTY_SYNC | I_DIRTY_DATASYNC); + } + } + + if (!inode_writeback_end(inode)) { + if (wbc->nr_to_write <= 0) { /* - * Filesystems can dirty the inode during writeback - * operations, such as delayed allocation during - * submission or metadata updates after data IO - * completion. + * slice used up: queue for next turn */ - redirty_tail(inode); + requeue_io(inode); } else { /* - * The inode is clean. At this point we either have - * a reference to the inode or it's on it's way out. - * No need to add it back to the LRU. + * Writeback blocked by something other than + * congestion. Delay the inode for some time to + * avoid spinning on the CPU (100% iowait) + * retrying writeback of the dirty page/inode + * that cannot be performed immediately. */ - list_del_init(&inode->i_wb_list); + redirty_tail(inode); } } - inode_sync_complete(inode); + return ret; } @@ -994,6 +1056,15 @@ void __mark_inode_dirty(struct inode *inode, int flags) struct backing_dev_info *bdi = NULL; bool wakeup_bdi = false; + /* + * Make sure that changes are seen by all cpus before we test i_state + * or mark anything as being dirty. Ie. all dirty state should be + * written to the inode and visible. Like an "unlock" operation, the + * mark_inode_dirty call must "release" our ordering window that is + * opened when we started modifying the inode. + */ + smp_mb(); + /* * Don't do this for I_DIRTY_PAGES - that doesn't actually * dirty the inode itself @@ -1003,12 +1074,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) sb->s_op->dirty_inode(inode); } - /* - * make sure that changes are seen by all cpus before we test i_state - * -- mikulas - */ - smp_mb(); - /* avoid the locking if we can */ if ((inode->i_state & flags) == flags) return; @@ -1311,13 +1376,50 @@ EXPORT_SYMBOL(sync_inode); * * Note: only writes the actual inode, no associated data or other metadata. */ -int sync_inode_metadata(struct inode *inode, int wait) +int sync_inode_metadata(struct inode *inode, int datasync, int wait) { + struct address_space *mapping = inode->i_mapping; struct writeback_control wbc = { .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, .nr_to_write = 0, /* metadata-only */ }; + unsigned dirty, mask; + int ret = 0; + + /* + * This is a similar implementation to writeback_single_inode. + * Keep them in sync. + */ + spin_lock(&inode_lock); + if (!inode_writeback_begin(inode, wait)) + goto out; + + if (datasync) + mask = I_DIRTY_DATASYNC; + else + mask = I_DIRTY_SYNC | I_DIRTY_DATASYNC; + dirty = inode->i_state & mask; + if (!dirty) + goto out_wb_end; + /* + * Generic write_inode doesn't distinguish between sync and datasync, + * so even a datasync can clear the sync state. Filesystems which + * distiguish these cases must only clear 'mask' in their metadata + * sync code. + */ + inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); + + spin_unlock(&inode_lock); + ret = write_inode(inode, &wbc); + spin_lock(&inode_lock); + if (ret) + inode->i_state |= dirty; /* couldn't write out inode */ - return sync_inode(inode, &wbc); +out_wb_end: + inode_writeback_end(inode); + +out: + spin_unlock(&inode_lock); + return ret; } EXPORT_SYMBOL(sync_inode_metadata); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 7cd9a5a68d59d..793239839e3a8 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1382,11 +1382,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) } -static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +static int gfs2_shrink_glock_memory(struct shrinker *shrink, + struct shrink_control *sc) { struct gfs2_glock *gl; int may_demote; int nr_skipped = 0; + int nr = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; LIST_HEAD(skipped); if (nr == 0) diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a689901963dea..9bc65a26535a4 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list); static atomic_t qd_lru_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(qd_lru_lock); -int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) { struct gfs2_quota_data *qd; struct gfs2_sbd *sdp; + int nr_to_scan = sc->nr_to_scan; - if (nr == 0) + if (nr_to_scan == 0) goto out; - if (!(gfp_mask & __GFP_FS)) + if (!(sc->gfp_mask & __GFP_FS)) return -1; spin_lock(&qd_lru_lock); - while (nr && !list_empty(&qd_lru_list)) { + while (nr_to_scan && !list_empty(&qd_lru_list)) { qd = list_entry(qd_lru_list.next, struct gfs2_quota_data, qd_reclaim); sdp = qd->qd_gl->gl_sbd; @@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) spin_unlock(&qd_lru_lock); kmem_cache_free(gfs2_quotad_cachep, qd); spin_lock(&qd_lru_lock); - nr--; + nr_to_scan--; } spin_unlock(&qd_lru_lock); diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index e7d236ca48bd2..90bf1c302a983 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -12,6 +12,7 @@ struct gfs2_inode; struct gfs2_sbd; +struct shrink_control; #define NO_QUOTA_CHANGE ((u32)-1) @@ -51,7 +52,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) return ret; } -extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); +extern int gfs2_shrink_qd_memory(struct shrinker *shrink, + struct shrink_control *sc); extern const struct quotactl_ops gfs2_quotactl_ops; #endif /* __QUOTA_DOT_H__ */ diff --git a/fs/inode.c b/fs/inode.c index 0647d80accf6f..e0520542ced8b 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -702,8 +702,12 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +static int shrink_icache_memory(struct shrinker *shrink, + struct shrink_control *sc) { + int nr = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; + if (nr) { /* * Nasty deadlock avoidance. We may hold various FS locks, diff --git a/fs/ioprio.c b/fs/ioprio.c index 7da2a06508e54..95a6c2b04e0db 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -30,7 +30,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) { - int err; + int err, i; struct io_context *ioc; const struct cred *cred = current_cred(), *tcred; @@ -60,12 +60,17 @@ int set_task_ioprio(struct task_struct *task, int ioprio) err = -ENOMEM; break; } + /* let other ioc users see the new values */ + smp_wmb(); task->io_context = ioc; } while (1); if (!err) { ioc->ioprio = ioprio; - ioc->ioprio_changed = 1; + /* make sure schedulers see the new ioprio value */ + wmb(); + for (i = 0; i < IOC_IOPRIO_CHANGED_BITS; i++) + set_bit(i, ioc->ioprio_changed); } task_unlock(task); diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 34a4861c14b85..eea335e5323f0 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -722,8 +722,13 @@ void journal_commit_transaction(journal_t *journal) required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); journal_file_buffer(jh, commit_transaction, BJ_Forget); - /* Wake up any transactions which were waiting for this - IO to complete */ + /* + * Wake up any transactions which were waiting for this + * IO to complete. The barrier must be here so that changes + * by journal_file_buffer() take effect before wake_up_bit() + * does the waitqueue check. + */ + smp_mb(); wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index da1b5e4ffce12..f23188c30ba5a 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -437,9 +437,12 @@ int __log_space_left(journal_t *journal) int __log_start_commit(journal_t *journal, tid_t target) { /* - * Are we already doing a recent enough commit? + * The only transaction we can possibly wait upon is the + * currently running transaction (if it exists). Otherwise, + * the target tid must be an old one. */ - if (!tid_geq(journal->j_commit_request, target)) { + if (journal->j_running_transaction && + journal->j_running_transaction->t_tid == target) { /* * We want a new commit: OK, mark the request and wakeup the * commit thread. We do _not_ do the commit ourselves. @@ -451,7 +454,14 @@ int __log_start_commit(journal_t *journal, tid_t target) journal->j_commit_sequence); wake_up(&journal->j_wait_commit); return 1; - } + } else if (!tid_geq(journal->j_commit_request, target)) + /* This should never happen, but if it does, preserve + the evidence before kjournald goes into a loop and + increments j_commit_sequence beyond all recognition. */ + WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n", + journal->j_commit_request, journal->j_commit_sequence, + target, journal->j_running_transaction ? + journal->j_running_transaction->t_tid : 0); return 0; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9978803ceedc5..a99e77d270783 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -123,7 +123,7 @@ int jfs_commit_inode(struct inode *inode, int wait) int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) { - int wait = wbc->sync_mode == WB_SYNC_ALL; + int wait = 1; /* XXX fix fsync and use wbc->sync_mode == WB_SYNC_ALL; */ if (test_cflag(COMMIT_Nolink, inode)) return 0; diff --git a/fs/libfs.c b/fs/libfs.c index c88eab55aec95..dfea85e9139b5 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -912,12 +912,7 @@ int generic_file_fsync(struct file *file, int datasync) int ret; ret = sync_mapping_buffers(inode->i_mapping); - if (!(inode->i_state & I_DIRTY)) - return ret; - if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - return ret; - - err = sync_inode_metadata(inode, 1); + err = sync_inode_metadata(inode, datasync, 1); if (ret == 0) ret = err; return ret; diff --git a/fs/mbcache.c b/fs/mbcache.c index a25444ab2baf8..e6196feccc1e4 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -90,7 +90,8 @@ static DEFINE_SPINLOCK(mb_cache_spinlock); * What the mbcache registers as to get shrunk dynamically. */ -static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); +static int mb_cache_shrink_fn(struct shrinker *shrink, + struct shrink_control *sc); static struct shrinker mb_cache_shrinker = { .shrink = mb_cache_shrink_fn, @@ -156,18 +157,19 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce) * gets low. * * @shrink: (ignored) - * @nr_to_scan: Number of objects to scan - * @gfp_mask: (ignored) + * @sc: shrink_control passed from reclaim * * Returns the number of objects which are present in the cache. */ static int -mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) +mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) { LIST_HEAD(free_list); struct mb_cache *cache; struct mb_cache_entry *entry, *tmp; int count = 0; + int nr_to_scan = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; mb_debug("trying to free %d entries", nr_to_scan); spin_lock(&mb_cache_spinlock); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index ae0b83f476a63..c47e397d0da59 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -583,7 +583,7 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) bh = V2_minix_update_inode(inode); if (!bh) return -EIO; - if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) { + if (1 /* XXX: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */ && buffer_dirty(bh)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk("IO error syncing minix inode [%s:%08lx]\n", diff --git a/fs/namei.c b/fs/namei.c index a4689eb2df285..3095ca8a31adf 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -712,6 +712,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd) do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; + nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } } diff --git a/fs/namespace.c b/fs/namespace.c index d1edf26025dcb..445534be02432 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2469,9 +2469,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, error = user_path_dir(new_root, &new); if (error) goto out0; - error = -EINVAL; - if (!check_mnt(new.mnt)) - goto out1; error = user_path_dir(put_old, &old); if (error) @@ -2491,7 +2488,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, IS_MNT_SHARED(new.mnt->mnt_parent) || IS_MNT_SHARED(root.mnt->mnt_parent)) goto out2; - if (!check_mnt(root.mnt)) + if (!check_mnt(root.mnt) || !check_mnt(new.mnt)) goto out2; error = -ENOENT; if (cant_mount(old.dentry)) @@ -2515,19 +2512,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, goto out2; /* not attached */ /* make sure we can reach put_old from new_root */ tmp = old.mnt; - br_write_lock(vfsmount_lock); if (tmp != new.mnt) { for (;;) { if (tmp->mnt_parent == tmp) - goto out3; /* already mounted on put_old */ + goto out2; /* already mounted on put_old */ if (tmp->mnt_parent == new.mnt) break; tmp = tmp->mnt_parent; } if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) - goto out3; + goto out2; } else if (!is_subdir(old.dentry, new.dentry)) - goto out3; + goto out2; + br_write_lock(vfsmount_lock); detach_mnt(new.mnt, &parent_path); detach_mnt(root.mnt, &root_parent); /* mount old root on put_old */ @@ -2550,9 +2547,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, path_put(&new); out0: return error; -out3: - br_write_unlock(vfsmount_lock); - goto out2; } static void __init init_mount_tree(void) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2c3eb33b904dc..242d7012c5b1f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1962,11 +1962,14 @@ static void nfs_access_free_list(struct list_head *head) } } -int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) +int nfs_access_cache_shrinker(struct shrinker *shrink, + struct shrink_control *sc) { LIST_HEAD(head); struct nfs_inode *nfsi, *next; struct nfs_access_entry *cache; + int nr_to_scan = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) return (nr_to_scan == 0) ? 0 : -1; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index cf9fdbdabc675..fabb489c2b85e 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -218,7 +218,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync); /* dir.c */ extern int nfs_access_cache_shrinker(struct shrinker *shrink, - int nr_to_scan, gfp_t gfp_mask); + struct shrink_control *sc); /* inode.c */ extern struct workqueue_struct *nfsiod_workqueue; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0a07e353a9613..31c1ad7e9f5b3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -258,9 +258,11 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; + case -NFS4ERR_EXPIRED: + if (state != NULL) + nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_EXPIRED: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) @@ -3504,9 +3506,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, break; nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; + case -NFS4ERR_EXPIRED: + if (state != NULL) + nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_EXPIRED: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; #if defined(CONFIG_NFS_V4_1) @@ -4397,6 +4401,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) case -ESTALE: goto out; case -NFS4ERR_EXPIRED: + nfs4_schedule_stateid_recovery(server, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: nfs4_schedule_lease_recovery(server->nfs_client); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0592288f9f067..6eea1a62c8d75 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1446,7 +1446,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) #ifdef CONFIG_NFS_V4_1 void nfs4_schedule_session_recovery(struct nfs4_session *session) { - nfs4_schedule_lease_recovery(session->clp); + struct nfs_client *clp = session->clp; + + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); + nfs4_schedule_lease_recovery(clp); } void nfs41_handle_recall_slot(struct nfs_client *clp) @@ -1528,6 +1531,7 @@ static int nfs4_reset_session(struct nfs_client *clp) status = nfs4_recovery_handle_error(clp, status); goto out; } + clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); /* create_session negotiated new slot table */ clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); @@ -1600,7 +1604,7 @@ static void nfs4_state_manager(struct nfs_client *clp) int status = 0; /* Ensure exclusive access to NFSv4 state */ - for(;;) { + do { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); @@ -1684,7 +1688,7 @@ static void nfs4_state_manager(struct nfs_client *clp) break; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) break; - } + } while (atomic_read(&clp->cl_count) > 1); return; out_error: printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" diff --git a/fs/nfs/super.c b/fs/nfs/super.c index b68c8607770fc..6a2ec5043a0c3 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2077,6 +2077,15 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) if (error < 0) goto out; + /* + * noac is a special case. It implies -o sync, but that's not + * necessarily reflected in the mtab options. do_remount_sb + * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the + * remount options, so we have to explicitly reset it. + */ + if (data->flags & NFS_MOUNT_NOAC) + *flags |= MS_SYNCHRONOUS; + /* compare new mount options with old ones */ error = nfs_compare_remount_data(nfss, data); out: diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 42b92d7a9cc4e..b5fcbf7da6fc3 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) { + int ret; + if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) return 1; - if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags, - NFS_INO_COMMIT, nfs_wait_bit_killable, - TASK_KILLABLE)) - return 1; - return 0; + if (!may_wait) + return 0; + ret = out_of_line_wait_on_bit_lock(&nfsi->flags, + NFS_INO_COMMIT, + nfs_wait_bit_killable, + TASK_KILLABLE); + return (ret < 0) ? ret : 1; } static void nfs_commit_clear_lock(struct nfs_inode *nfsi) @@ -1396,9 +1400,10 @@ int nfs_commit_inode(struct inode *inode, int how) { LIST_HEAD(head); int may_wait = how & FLUSH_SYNC; - int res = 0; + int res; - if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) + res = nfs_commit_set_lock(NFS_I(inode), may_wait); + if (res <= 0) goto out_mark_dirty; spin_lock(&inode->i_lock); res = nfs_scan_commit(inode, &head, 0, 0); @@ -1407,12 +1412,14 @@ int nfs_commit_inode(struct inode *inode, int how) int error = nfs_commit_list(inode, &head, how); if (error < 0) return error; - if (may_wait) - wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, - nfs_wait_bit_killable, - TASK_KILLABLE); - else + if (!may_wait) goto out_mark_dirty; + error = wait_on_bit(&NFS_I(inode)->flags, + NFS_INO_COMMIT, + nfs_wait_bit_killable, + TASK_KILLABLE); + if (error < 0) + return error; } else nfs_commit_clear_lock(NFS_I(inode)); return res; diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 0c6d816701374..7c831a2731fa1 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) exp_readlock(); nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); fh_put(&fh); - rqstp->rq_client = NULL; exp_readunlock(); /* We return nlm error codes as nlm doesn't know * about nfsd, but nfsd does know about nlm.. diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index db52546143d12..5fcb1396a7e32 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, void *); enum nfsd4_op_flags { ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ }; struct nfsd4_operation { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 7b566ec14e183..18c356cd50d8a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) if (atomic_dec_and_test(&fp->fi_delegees)) { vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); fp->fi_lease = NULL; + fput(fp->fi_deleg_file); fp->fi_deleg_file = NULL; } } @@ -316,64 +317,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; static struct list_head client_lru; static struct list_head close_lru; -static void unhash_generic_stateid(struct nfs4_stateid *stp) -{ - list_del(&stp->st_hash); - list_del(&stp->st_perfile); - list_del(&stp->st_perstateowner); -} - -static void free_generic_stateid(struct nfs4_stateid *stp) -{ - put_nfs4_file(stp->st_file); - kmem_cache_free(stateid_slab, stp); -} - -static void release_lock_stateid(struct nfs4_stateid *stp) -{ - struct file *file; - - unhash_generic_stateid(stp); - file = find_any_file(stp->st_file); - if (file) - locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); - free_generic_stateid(stp); -} - -static void unhash_lockowner(struct nfs4_stateowner *sop) -{ - struct nfs4_stateid *stp; - - list_del(&sop->so_idhash); - list_del(&sop->so_strhash); - list_del(&sop->so_perstateid); - while (!list_empty(&sop->so_stateids)) { - stp = list_first_entry(&sop->so_stateids, - struct nfs4_stateid, st_perstateowner); - release_lock_stateid(stp); - } -} - -static void release_lockowner(struct nfs4_stateowner *sop) -{ - unhash_lockowner(sop); - nfs4_put_stateowner(sop); -} - -static void -release_stateid_lockowners(struct nfs4_stateid *open_stp) -{ - struct nfs4_stateowner *lock_sop; - - while (!list_empty(&open_stp->st_lockowners)) { - lock_sop = list_entry(open_stp->st_lockowners.next, - struct nfs4_stateowner, so_perstateid); - /* list_del(&open_stp->st_lockowners); */ - BUG_ON(lock_sop->so_is_open_owner); - release_lockowner(lock_sop); - } -} - /* * We store the NONE, READ, WRITE, and BOTH bits separately in the * st_{access,deny}_bmap field of the stateid, in order to track not @@ -446,13 +389,74 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) return nfs4_access_to_omode(access); } -static void release_open_stateid(struct nfs4_stateid *stp) +static void unhash_generic_stateid(struct nfs4_stateid *stp) +{ + list_del(&stp->st_hash); + list_del(&stp->st_perfile); + list_del(&stp->st_perstateowner); +} + +static void free_generic_stateid(struct nfs4_stateid *stp) +{ + int oflag; + + if (stp->st_access_bmap) { + oflag = nfs4_access_bmap_to_omode(stp); + nfs4_file_put_access(stp->st_file, oflag); + } + put_nfs4_file(stp->st_file); + kmem_cache_free(stateid_slab, stp); +} + +static void release_lock_stateid(struct nfs4_stateid *stp) +{ + struct file *file; + + unhash_generic_stateid(stp); + file = find_any_file(stp->st_file); + if (file) + locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); + free_generic_stateid(stp); +} + +static void unhash_lockowner(struct nfs4_stateowner *sop) +{ + struct nfs4_stateid *stp; + + list_del(&sop->so_idhash); + list_del(&sop->so_strhash); + list_del(&sop->so_perstateid); + while (!list_empty(&sop->so_stateids)) { + stp = list_first_entry(&sop->so_stateids, + struct nfs4_stateid, st_perstateowner); + release_lock_stateid(stp); + } +} + +static void release_lockowner(struct nfs4_stateowner *sop) { - int oflag = nfs4_access_bmap_to_omode(stp); + unhash_lockowner(sop); + nfs4_put_stateowner(sop); +} + +static void +release_stateid_lockowners(struct nfs4_stateid *open_stp) +{ + struct nfs4_stateowner *lock_sop; + while (!list_empty(&open_stp->st_lockowners)) { + lock_sop = list_entry(open_stp->st_lockowners.next, + struct nfs4_stateowner, so_perstateid); + /* list_del(&open_stp->st_lockowners); */ + BUG_ON(lock_sop->so_is_open_owner); + release_lockowner(lock_sop); + } +} + +static void release_open_stateid(struct nfs4_stateid *stp) +{ unhash_generic_stateid(stp); release_stateid_lockowners(stp); - nfs4_file_put_access(stp->st_file, oflag); free_generic_stateid(stp); } @@ -3735,6 +3739,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc stp->st_stateid.si_stateownerid = sop->so_id; stp->st_stateid.si_fileid = fp->fi_id; stp->st_stateid.si_generation = 0; + stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; @@ -3749,6 +3754,17 @@ check_lock_length(u64 offset, u64 length) LOFF_OVERFLOW(offset, length))); } +static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) +{ + struct nfs4_file *fp = lock_stp->st_file; + int oflag = nfs4_access_to_omode(access); + + if (test_bit(access, &lock_stp->st_access_bmap)) + return; + nfs4_file_get_access(fp, oflag); + __set_bit(access, &lock_stp->st_access_bmap); +} + /* * LOCK operation */ @@ -3765,7 +3781,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct file_lock conflock; __be32 status = 0; unsigned int strhashval; - unsigned int cmd; int err; dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", @@ -3847,22 +3862,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, switch (lock->lk_type) { case NFS4_READ_LT: case NFS4_READW_LT: - if (find_readable_file(lock_stp->st_file)) { - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ); - filp = find_readable_file(lock_stp->st_file); - } + filp = find_readable_file(lock_stp->st_file); + if (filp) + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); file_lock.fl_type = F_RDLCK; - cmd = F_SETLK; - break; + break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: - if (find_writeable_file(lock_stp->st_file)) { - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE); - filp = find_writeable_file(lock_stp->st_file); - } + filp = find_writeable_file(lock_stp->st_file); + if (filp) + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); file_lock.fl_type = F_WRLCK; - cmd = F_SETLK; - break; + break; default: status = nfserr_inval; goto out; @@ -3886,7 +3897,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * Note: locks.c uses the BKL to protect the inode's lock list. */ - err = vfs_lock_file(filp, cmd, &file_lock, &conflock); + err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); switch (-err) { case 0: /* success! */ update_stateid(&lock_stp->st_stateid); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 615f0a9f06008..c6766af00d983 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, u32 dummy; char *machine_name; - int i, j; + int i; int nr_secflavs; READ_BUF(16); @@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); - for (j = 0; j < dummy; ++j) - READ32(dummy); break; case RPC_AUTH_GSS: dprintk("RPC_AUTH_GSS callback secflavor " @@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy); - p += XDR_QUADLEN(dummy); break; default: dprintk("Illegal callback secflavor\n"); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index da1d9701f8e44..afd5895ccf05d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -294,7 +294,7 @@ commit_metadata(struct svc_fh *fhp) if (export_ops->commit_metadata) return export_ops->commit_metadata(inode); - return sync_inode_metadata(inode, 1); + return sync_inode_metadata(inode, 0, 1); } /* @@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, goto out; if (!(iap->ia_valid & ATTR_MODE)) iap->ia_mode = 0; - err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); + err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); if (err) goto out; @@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, if (IS_ERR(dchild)) goto out_nfserr; + /* If file doesn't exist, check for permissions to create one */ + if (!dchild->d_inode) { + err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); + if (err) + goto out; + } + err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); if (err) goto out; diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 2f560c9fb8081..f49e6287c879b 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) /* * check to see if the page is mapped already (no holes) */ - if (PageMappedToDisk(page)) { - unlock_page(page); + if (PageMappedToDisk(page)) goto mapped; - } + if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; @@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (fully_mapped) { SetPageMappedToDisk(page); - unlock_page(page); goto mapped; } } @@ -105,16 +103,17 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); - if (unlikely(ret)) { + if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } + nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: SetPageChecked(page); wait_on_page_writeback(page); - return 0; + return VM_FAULT_LOCKED; } static const struct vm_operations_struct nilfs_file_vm_ops = { diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index a91b69a6a291b..0348d0c8f65e8 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -198,6 +198,7 @@ static void inotify_free_group_priv(struct fsnotify_group *group) idr_for_each(&group->inotify_data.idr, idr_callback, group); idr_remove_all(&group->inotify_data.idr); idr_destroy(&group->inotify_data.idr); + atomic_dec(&group->inotify_data.user->inotify_devs); free_uid(group->inotify_data.user); } diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 4cd5d5d78f9fa..aec9b4a4ed112 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -290,7 +290,6 @@ static int inotify_fasync(int fd, struct file *file, int on) static int inotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; - struct user_struct *user = group->inotify_data.user; pr_debug("%s: group=%p\n", __func__, group); @@ -299,8 +298,6 @@ static int inotify_release(struct inode *ignored, struct file *file) /* free this group, matching get was inotify_init->fsnotify_obtain_group */ fsnotify_put_group(group); - atomic_dec(&user->inotify_devs); - return 0; } @@ -697,7 +694,7 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod return ret; } -static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) +static struct fsnotify_group *inotify_new_group(unsigned int max_events) { struct fsnotify_group *group; @@ -710,8 +707,14 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign spin_lock_init(&group->inotify_data.idr_lock); idr_init(&group->inotify_data.idr); group->inotify_data.last_wd = 0; - group->inotify_data.user = user; group->inotify_data.fa = NULL; + group->inotify_data.user = get_current_user(); + + if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > + inotify_max_user_instances) { + fsnotify_put_group(group); + return ERR_PTR(-EMFILE); + } return group; } @@ -721,7 +724,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign SYSCALL_DEFINE1(inotify_init1, int, flags) { struct fsnotify_group *group; - struct user_struct *user; int ret; /* Check the IN_* constants for consistency. */ @@ -731,31 +733,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) return -EINVAL; - user = get_current_user(); - if (unlikely(atomic_read(&user->inotify_devs) >= - inotify_max_user_instances)) { - ret = -EMFILE; - goto out_free_uid; - } - /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ - group = inotify_new_group(user, inotify_max_queued_events); - if (IS_ERR(group)) { - ret = PTR_ERR(group); - goto out_free_uid; - } - - atomic_inc(&user->inotify_devs); + group = inotify_new_group(inotify_max_queued_events); + if (IS_ERR(group)) + return PTR_ERR(group); ret = anon_inode_getfd("inotify", &inotify_fops, group, O_RDONLY | flags); - if (ret >= 0) - return ret; + if (ret < 0) + fsnotify_put_group(group); - fsnotify_put_group(group); - atomic_dec(&user->inotify_devs); -out_free_uid: - free_uid(user); return ret; } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1fbb0e20131bf..bbba782cce264 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1026,6 +1026,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); + /* treat the write as new if the a hole/lseek spanned across + * the page boundary. + */ + new = new | ((i_size_read(inode) <= page_offset(page)) && + (page_offset(page) <= user_pos)); + if (page == wc->w_target_page) { map_from = user_pos & (PAGE_CACHE_SIZE - 1); map_to = map_from + user_len; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index d417b3f9b0c73..f97b6f1c61dd4 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -354,7 +354,7 @@ static inline int ocfs2_match(int len, /* * Returns 0 if not found, -1 on failure, and 1 on success */ -static int inline ocfs2_search_dirblock(struct buffer_head *bh, +static inline int ocfs2_search_dirblock(struct buffer_head *bh, struct inode *dir, const char *name, int namelen, unsigned long offset, diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index e043c4cb9a972..2606b49e592a6 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -169,7 +169,7 @@ static int __omfs_write_inode(struct inode *inode, int wait) static int omfs_write_inode(struct inode *inode, struct writeback_control *wbc) { - return __omfs_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); + return __omfs_write_inode(inode, 1 /* XXX: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */); } int omfs_sync_inode(struct inode *inode) diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c index ac0ccb5026a2d..19d6750d1d6ce 100644 --- a/fs/partitions/efi.c +++ b/fs/partitions/efi.c @@ -348,6 +348,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, goto fail; } + /* Check that sizeof_partition_entry has the correct value */ + if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { + pr_debug("GUID Partitition Entry Size check failed.\n"); + goto fail; + } + if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) goto fail; diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index b10e3540d5b71..a29d5ccf3d54a 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) BUG_ON (!data || !frags); + if (size < 2 * VBLK_SIZE_HEAD) { + ldm_error("Value of size is to small."); + return false; + } + group = get_unaligned_be32(data + 0x08); rec = get_unaligned_be16(data + 0x0C); num = get_unaligned_be16(data + 0x0E); @@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) ldm_error ("A VBLK claims to have %d parts.", num); return false; } + if (rec >= num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); + return false; + } list_for_each (item, frags) { f = list_entry (item, struct frag, list); @@ -1326,6 +1335,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) list_add_tail (&f->list, frags); found: + if (rec >= f->num) { + ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); + return false; + } + if (f->map & (1 << rec)) { ldm_error ("Duplicate VBLK, part %d.", rec); f->map &= 0x7F; /* Mark the group as broken */ @@ -1334,10 +1348,9 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) f->map |= (1 << rec); - if (num > 0) { - data += VBLK_SIZE_HEAD; - size -= VBLK_SIZE_HEAD; - } + data += VBLK_SIZE_HEAD; + size -= VBLK_SIZE_HEAD; + memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); return true; diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c index be03a0b08b47a..764b86a01965a 100644 --- a/fs/partitions/osf.c +++ b/fs/partitions/osf.c @@ -10,7 +10,7 @@ #include "check.h" #include "osf.h" -#define MAX_OSF_PARTITIONS 8 +#define MAX_OSF_PARTITIONS 18 int osf_partition(struct parsed_partitions *state) { diff --git a/fs/proc/array.c b/fs/proc/array.c index 7c99c1cf7e5c4..0a8d96340491d 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -337,6 +337,12 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) seq_putc(m, '\n'); } +static void task_vpid(struct seq_file *m, struct task_struct *task) +{ + struct pid_namespace *ns = task_active_pid_ns(task); + seq_printf(m, "Vpid:\t%d\n", ns ? task_pid_nr_ns(task, ns) : 0); +} + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -354,6 +360,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); task_context_switch_counts(m, task); + task_vpid(m, task); return 0; } @@ -489,8 +496,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, vsize, mm ? get_mm_rss(mm) : 0, rsslim, - mm ? mm->start_code : 0, - mm ? mm->end_code : 0, + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, (permitted && mm) ? mm->start_stack : 0, esp, eip, diff --git a/fs/proc/base.c b/fs/proc/base.c index 4147a124cf49e..ca581edcb3b5a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -363,7 +363,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, static int proc_pid_schedstat(struct task_struct *task, char *buffer) { return sprintf(buffer, "%llu %llu %lu\n", - (unsigned long long)task->se.sum_exec_runtime, + (unsigned long long)tsk_seruntime(task), (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); } @@ -3106,11 +3106,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi /* for the /proc/ directory itself, after non-process stuff has been done */ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) { - unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; - struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); + unsigned int nr; + struct task_struct *reaper; struct tgid_iter iter; struct pid_namespace *ns; + if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET) + goto out_no_task; + nr = filp->f_pos - FIRST_PROCESS_ENTRY; + + reaper = get_proc_task(filp->f_path.dentry->d_inode); if (!reaper) goto out_no_task; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 60b914860f815..c4bec29cf7d11 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -211,7 +211,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) int flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; - unsigned long start; + unsigned long start, end; dev_t dev = 0; int len; @@ -224,13 +224,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (vma->vm_flags & VM_GROWSDOWN) - if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) - start += PAGE_SIZE; + if (stack_guard_page_start(vma, start)) + start += PAGE_SIZE; + end = vma->vm_end; + if (stack_guard_page_end(vma, end)) + end -= PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", start, - vma->vm_end, + end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', @@ -249,8 +251,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) const char *name = arch_vma_name(vma); if (!name) { if (mm) { - if (vma->vm_start <= mm->start_brk && - vma->vm_end >= mm->brk) { + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { name = "[heap]"; } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index a2a622e079f08..afac03212ad47 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire); */ int dquot_commit(struct dquot *dquot) { - int ret = 0, ret2 = 0; + int ret = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dqopt->dqio_mutex); @@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot) spin_unlock(&dq_list_lock); /* Inactive dquot can be only if there was error during read/init * => we have better not writing it */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) { - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( - dquot->dq_sb, dquot->dq_type); - } - if (ret >= 0) - ret = ret2; - } + else + ret = -EIO; out_sem: mutex_unlock(&dqopt->dqio_mutex); return ret; @@ -696,8 +691,11 @@ static void prune_dqcache(int count) * This is called from kswapd when we think we need some * more memory */ -static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +static int shrink_dqcache_memory(struct shrinker *shrink, + struct shrink_control *sc) { + int nr = sc->nr_to_scan; + if (nr) { spin_lock(&dq_list_lock); prune_dqcache(nr); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 9eead2c796b7f..fbb0b478a346f 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) SetPageDirty(page); unlock_page(page); + put_page(page); } return 0; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 0bae036831e2c..615a996ef77c9 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1635,6 +1635,8 @@ int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc) ** these cases are just when the system needs ram, not when the ** inode needs to reach disk for safety, and they can safely be ** ignored because the altered inode has already been logged. + ** XXX: is this really OK? The caller clears the inode dirty bit, so + ** a subsequent sync for integrity might never reach here. */ if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) { reiserfs_write_lock(inode->i_sb); diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 0dc340aa2be97..3f79cd1d0c197 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c @@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) length += sizeof(dirh); dir_count = le32_to_cpu(dirh.count) + 1; + + /* dir_count should never be larger than 256 */ + if (dir_count > 256) + goto failed_read; + while (dir_count--) { /* * Read directory entry. @@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) size = le16_to_cpu(dire->size) + 1; + /* size should never be larger than SQUASHFS_NAME_LEN */ + if (size > SQUASHFS_NAME_LEN) + goto failed_read; + err = squashfs_read_metadata(inode->i_sb, dire->name, &block, &offset, size); if (err < 0) diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 7a9464d08cf63..5d922a6701ab7 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c @@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, length += sizeof(dirh); dir_count = le32_to_cpu(dirh.count) + 1; + + /* dir_count should never be larger than 256 */ + if (dir_count > 256) + goto data_error; + while (dir_count--) { /* * Read directory entry. @@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, size = le16_to_cpu(dire->size) + 1; + /* size should never be larger than SQUASHFS_NAME_LEN */ + if (size > SQUASHFS_NAME_LEN) + goto data_error; + err = squashfs_read_metadata(dir->i_sb, dire->name, &block, &offset, size); if (err < 0) @@ -228,6 +237,9 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, d_add(dentry, inode); return ERR_PTR(0); +data_error: + err = -EIO; + read_failure: ERROR("Unable to read directory block [%llx:%x]\n", squashfs_i(dir)->start + msblk->directory_table, diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 4661ae2b1cec8..04ae9a5b70a66 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "squashfs_fs.h" #include "squashfs_fs_sb.h" @@ -37,8 +38,7 @@ static void *zlib_init(struct squashfs_sb_info *dummy) z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL); if (stream == NULL) goto failed; - stream->workspace = kmalloc(zlib_inflate_workspacesize(), - GFP_KERNEL); + stream->workspace = vmalloc(zlib_inflate_workspacesize()); if (stream->workspace == NULL) goto failed; @@ -56,7 +56,7 @@ static void zlib_free(void *strm) z_stream *stream = strm; if (stream) - kfree(stream->workspace); + vfree(stream->workspace); kfree(stream); } diff --git a/fs/super.c b/fs/super.c index 7e9dd4cc2c011..0d89e93f654e1 100644 --- a/fs/super.c +++ b/fs/super.c @@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type) #else INIT_LIST_HEAD(&s->s_files); #endif + s->s_bdi = &default_backing_dev_info; INIT_LIST_HEAD(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); @@ -1003,6 +1004,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void } BUG_ON(!mnt->mnt_sb); WARN_ON(!mnt->mnt_sb->s_bdi); + WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info); mnt->mnt_sb->s_flags |= MS_BORN; error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); diff --git a/fs/sync.c b/fs/sync.c index ba76b9623e7e8..12ce4845f8df1 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -17,6 +17,11 @@ #include #include "internal.h" +#ifdef CONFIG_DYNAMIC_FSYNC +extern bool early_suspend_active; +extern bool dyn_fsync_active; +#endif + #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) @@ -33,7 +38,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) * This should be safe, as we require bdi backing to actually * write out data in the first place */ - if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) + if (sb->s_bdi == &noop_backing_dev_info) return 0; if (sb->s_qcop && sb->s_qcop->quota_sync) @@ -79,14 +84,14 @@ EXPORT_SYMBOL_GPL(sync_filesystem); static void sync_one_sb(struct super_block *sb, void *arg) { - if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) + if (!(sb->s_flags & MS_RDONLY)) __sync_filesystem(sb, *(int *)arg); } /* * Sync all the data for all the filesystems (called by sys_sync() and * emergency sync) */ -static void sync_filesystems(int wait) +void sync_filesystems(int wait) { iterate_supers(sync_one_sb, &wait); } @@ -141,6 +146,11 @@ void emergency_sync(void) */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (dyn_fsync_active && !early_suspend_active) + return 0; + else { +#endif struct address_space *mapping = file->f_mapping; int err, ret; @@ -163,6 +173,9 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) out: return ret; +#ifdef CONFIG_DYNAMIC_FSYNC + } +#endif } EXPORT_SYMBOL(vfs_fsync_range); @@ -195,11 +208,21 @@ static int do_fsync(unsigned int fd, int datasync) SYSCALL_DEFINE1(fsync, unsigned int, fd) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (dyn_fsync_active && !early_suspend_active) + return 0; + else +#endif return do_fsync(fd, 0); } SYSCALL_DEFINE1(fdatasync, unsigned int, fd) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (dyn_fsync_active && !early_suspend_active) + return 0; + else +#endif return do_fsync(fd, 1); } @@ -270,6 +293,12 @@ EXPORT_SYMBOL(generic_write_sync); SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (dyn_fsync_active && !early_suspend_active) + return 0; + else { +#endif + int ret; struct file *file; struct address_space *mapping; @@ -349,6 +378,9 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, fput_light(file, fput_needed); out: return ret; +#ifdef CONFIG_DYNAMIC_FSYNC + } +#endif } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, @@ -365,6 +397,11 @@ SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, loff_t offset, loff_t nbytes) { +#ifdef CONFIG_DYNAMIC_FSYNC + if (dyn_fsync_active && !early_suspend_active) + return 0; + else +#endif return sys_sync_file_range(fd, offset, nbytes, flags); } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 0630eb969a280..9f7e214fff135 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -286,7 +286,7 @@ static int __sysv_write_inode(struct inode *inode, int wait) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); - if (wait) { + if (1 /* XXX: fix fsync and use wait */) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 02429d81ca337..32bcb2c467e7b 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -521,7 +521,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) size_t sz; if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) - goto out; + return 0; INIT_LIST_HEAD(&list); diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 0bee4dbffc31f..b67ed36f16a12 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -961,11 +961,39 @@ void dbg_dump_index(struct ubifs_info *c) void dbg_save_space_info(struct ubifs_info *c) { struct ubifs_debug_info *d = c->dbg; - - ubifs_get_lp_stats(c, &d->saved_lst); + int freeable_cnt; spin_lock(&c->space_lock); + memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); + + /* + * We use a dirty hack here and zero out @c->freeable_cnt, because it + * affects the free space calculations, and UBIFS might not know about + * all freeable eraseblocks. Indeed, we know about freeable eraseblocks + * only when we read their lprops, and we do this only lazily, upon the + * need. So at any given point of time @c->freeable_cnt might be not + * exactly accurate. + * + * Just one example about the issue we hit when we did not zero + * @c->freeable_cnt. + * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the + * amount of free space in @d->saved_free + * 2. We re-mount R/W, which makes UBIFS to read the "lsave" + * information from flash, where we cache LEBs from various + * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()' + * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()' + * -> 'ubifs_get_pnode()' -> 'update_cats()' + * -> 'ubifs_add_to_cat()'). + * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt + * becomes %1. + * 4. We calculate the amount of free space when the re-mount is + * finished in 'dbg_check_space_info()' and it does not match + * @d->saved_free. + */ + freeable_cnt = c->freeable_cnt; + c->freeable_cnt = 0; d->saved_free = ubifs_get_free_space_nolock(c); + c->freeable_cnt = freeable_cnt; spin_unlock(&c->space_lock); } @@ -982,12 +1010,15 @@ int dbg_check_space_info(struct ubifs_info *c) { struct ubifs_debug_info *d = c->dbg; struct ubifs_lp_stats lst; - long long avail, free; + long long free; + int freeable_cnt; spin_lock(&c->space_lock); - avail = ubifs_calc_available(c, c->min_idx_lebs); + freeable_cnt = c->freeable_cnt; + c->freeable_cnt = 0; + free = ubifs_get_free_space_nolock(c); + c->freeable_cnt = freeable_cnt; spin_unlock(&c->space_lock); - free = ubifs_get_free_space(c); if (free != d->saved_free) { ubifs_err("free space changed from %lld to %lld", @@ -2813,19 +2844,19 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) } fname = "dump_lprops"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_lprops = dent; fname = "dump_budg"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_budg = dent; fname = "dump_tnc"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_tnc = dent; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index d77db7e36484e..fe14f4df4ca0b 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1309,6 +1309,9 @@ int ubifs_fsync(struct file *file, int datasync) dbg_gen("syncing inode %lu", inode->i_ino); + if (inode->i_sb->s_flags & MS_RDONLY) + return 0; + /* * VFS has already synchronized dirty pages for this inode. Synchronize * the inode unless this is a 'datasync()' call. diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index 4d0cb1241460f..40fa780ebea7d 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -174,26 +174,6 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) spin_unlock(&c->buds_lock); } -/** - * ubifs_create_buds_lists - create journal head buds lists for remount rw. - * @c: UBIFS file-system description object - */ -void ubifs_create_buds_lists(struct ubifs_info *c) -{ - struct rb_node *p; - - spin_lock(&c->buds_lock); - p = rb_first(&c->buds); - while (p) { - struct ubifs_bud *bud = rb_entry(p, struct ubifs_bud, rb); - struct ubifs_jhead *jhead = &c->jheads[bud->jhead]; - - list_add_tail(&bud->list, &jhead->buds_list); - p = rb_next(p); - } - spin_unlock(&c->buds_lock); -} - /** * ubifs_add_bud_to_log - add a new bud to the log. * @c: UBIFS file-system description object diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 72775d35b99e5..ef5155e109a27 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) lnum = branch->lnum; offs = branch->offs; pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); - if (!pnode) { - err = -ENOMEM; - goto out; - } + if (!pnode) + return -ENOMEM; + if (lnum == 0) { /* * This pnode was not written which just means that the LEB diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 77e9b874b6c22..c0c590feabe1a 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -300,6 +300,32 @@ int ubifs_recover_master_node(struct ubifs_info *c) goto out_free; } memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); + + /* + * We had to recover the master node, which means there was an + * unclean reboot. However, it is possible that the master node + * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set. + * E.g., consider the following chain of events: + * + * 1. UBIFS was cleanly unmounted, so the master node is clean + * 2. UBIFS is being mounted R/W and starts changing the master + * node in the first (%UBIFS_MST_LNUM). A power cut happens, + * so this LEB ends up with some amount of garbage at the + * end. + * 3. UBIFS is being mounted R/O. We reach this place and + * recover the master node from the second LEB + * (%UBIFS_MST_LNUM + 1). But we cannot update the media + * because we are being mounted R/O. We have to defer the + * operation. + * 4. However, this master node (@c->mst_node) is marked as + * clean (since the step 1). And if we just return, the + * mount code will be confused and won't recover the master + * node when it is re-mounter R/W later. + * + * Thus, to force the recovery by marking the master node as + * dirty. + */ + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); } else { /* Write the recovered master node */ c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index eed0fcff8d731..d3d6d365bfc11 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -59,6 +59,7 @@ enum { * @new_size: truncation new size * @free: amount of free space in a bud * @dirty: amount of dirty space in a bud from padding and deletion nodes + * @jhead: journal head number of the bud * * UBIFS journal replay must compare node sequence numbers, which means it must * build a tree of node information to insert into the TNC. @@ -80,6 +81,7 @@ struct replay_entry { struct { int free; int dirty; + int jhead; }; }; }; @@ -159,6 +161,11 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r) err = PTR_ERR(lp); goto out; } + + /* Make sure the journal head points to the latest bud */ + err = ubifs_wbuf_seek_nolock(&c->jheads[r->jhead].wbuf, r->lnum, + c->leb_size - r->free, UBI_SHORTTERM); + out: ubifs_release_lprops(c); return err; @@ -627,10 +634,6 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, ubifs_assert(sleb->endpt - offs >= used); ubifs_assert(sleb->endpt % c->min_io_size == 0); - if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount) - err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum, - sleb->endpt, UBI_SHORTTERM); - *dirty = sleb->endpt - offs - used; *free = c->leb_size - sleb->endpt; @@ -653,12 +656,14 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, * @sqnum: sequence number * @free: amount of free space in bud * @dirty: amount of dirty space from padding and deletion nodes + * @jhead: journal head number for the bud * * This function inserts a reference node to the replay tree and returns zero * in case of success or a negative error code in case of failure. */ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, - unsigned long long sqnum, int free, int dirty) + unsigned long long sqnum, int free, int dirty, + int jhead) { struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL; struct replay_entry *r; @@ -688,6 +693,7 @@ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, r->flags = REPLAY_REF; r->free = free; r->dirty = dirty; + r->jhead = jhead; rb_link_node(&r->rb, parent, p); rb_insert_color(&r->rb, &c->replay_tree); @@ -712,7 +718,7 @@ static int replay_buds(struct ubifs_info *c) if (err) return err; err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum, - free, dirty); + free, dirty, b->bud->jhead); if (err) return err; } diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index bf31b4729e51c..cad60b51f7c4a 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -475,7 +475,8 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) * @c: UBIFS file-system description object * * This function returns a pointer to the superblock node or a negative error - * code. + * code. Note, the user of this function is responsible of kfree()'ing the + * returned superblock buffer. */ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) { diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 46961c0032362..ca953a945029a 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c @@ -277,8 +277,9 @@ static int kick_a_thread(void) return 0; } -int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) +int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) { + int nr = sc->nr_to_scan; int freed, contention = 0; long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 6e11c2975dcf5..38749e76dda56 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1235,12 +1235,12 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } + err = alloc_wbufs(c); + if (err) + goto out_cbuf; + sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); if (!c->ro_mount) { - err = alloc_wbufs(c); - if (err) - goto out_cbuf; - /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { @@ -1543,6 +1543,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) mutex_lock(&c->umount_mutex); dbg_save_space_info(c); c->remounting_rw = 1; + c->ro_mount = 0; c->always_chk_crc = 1; err = check_free_space(c); @@ -1559,6 +1560,7 @@ static int ubifs_remount_rw(struct ubifs_info *c) } sup->leb_cnt = cpu_to_le32(c->leb_cnt); err = ubifs_write_sb_node(c, sup); + kfree(sup); if (err) goto out; } @@ -1602,12 +1604,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) if (err) goto out; - err = alloc_wbufs(c); - if (err) - goto out; - - ubifs_create_buds_lists(c); - /* Create background thread */ c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); if (IS_ERR(c->bgt)) { @@ -1642,20 +1638,32 @@ static int ubifs_remount_rw(struct ubifs_info *c) if (err) goto out; + dbg_gen("re-mounted read-write"); + c->remounting_rw = 0; + if (c->need_recovery) { c->need_recovery = 0; ubifs_msg("deferred recovery completed"); + } else { + /* + * Do not run the debugging space check if the were doing + * recovery, because when we saved the information we had the + * file-system in a state where the TNC and lprops has been + * modified in memory, but all the I/O operations (including a + * commit) were deferred. So the file-system was in + * "non-committed" state. Now the file-system is in committed + * state, and of course the amount of free space will change + * because, for example, the old index size was imprecise. + */ + err = dbg_check_space_info(c); } - dbg_gen("re-mounted read-write"); - c->ro_mount = 0; - c->remounting_rw = 0; c->always_chk_crc = 0; - err = dbg_check_space_info(c); mutex_unlock(&c->umount_mutex); return err; out: + c->ro_mount = 1; vfree(c->orph_buf); c->orph_buf = NULL; if (c->bgt) { @@ -1704,7 +1712,6 @@ static void ubifs_remount_ro(struct ubifs_info *c) if (err) ubifs_ro_mode(c, err); - free_wbufs(c); vfree(c->orph_buf); c->orph_buf = NULL; vfree(c->ileb_buf); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 381d6b207a525..e9e694e2a18f5 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1586,7 +1586,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); int ubifs_tnc_end_commit(struct ubifs_info *c); /* shrinker.c */ -int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); +int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc); /* commit.c */ int ubifs_bg_thread(void *info); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index c6a2e782b97b1..ed51a4c347432 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1591,7 +1591,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) /* write the data blocks */ mark_buffer_dirty(bh); - if (do_sync) { + if (1 /* XXX fix fsync and use do_sync */) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { printk(KERN_WARNING "IO error syncing udf inode " diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 2b251f2093afc..b2119db47cb20 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -889,7 +889,7 @@ static int ufs_update_inode(struct inode * inode, int do_sync) } mark_buffer_dirty(bh); - if (do_sync) + if (1 /* XXX: fix fsync and use do_sync */) sync_dirty_buffer(bh); brelse (bh); diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ac1c7e8378ddd..8321493bf8f44 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1543,12 +1543,12 @@ xfs_wait_buftarg( int xfs_buftarg_shrink( struct shrinker *shrink, - int nr_to_scan, - gfp_t mask) + struct shrink_control *sc) { struct xfs_buftarg *btp = container_of(shrink, struct xfs_buftarg, bt_shrinker); struct xfs_buf *bp; + int nr_to_scan = sc->nr_to_scan; LIST_HEAD(dispose); if (!nr_to_scan) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9731898083ae8..ad485b60340fb 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1551,10 +1551,14 @@ xfs_fs_fill_super( if (error) goto out_free_sb; - error = xfs_mountfs(mp); - if (error) - goto out_filestream_unmount; - + /* + * we must configure the block size in the superblock before we run the + * full mount process as the mount process can lookup and cache inodes. + * For the same reason we must also initialise the syncd and register + * the inode cache shrinker so that inodes can be reclaimed during + * operations like a quotacheck that iterate all inodes in the + * filesystem. + */ sb->s_magic = XFS_SB_MAGIC; sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; @@ -1562,6 +1566,16 @@ xfs_fs_fill_super( sb->s_time_gran = 1; set_posix_acl_flag(sb); + error = xfs_syncd_init(mp); + if (error) + goto out_filestream_unmount; + + xfs_inode_shrinker_register(mp); + + error = xfs_mountfs(mp); + if (error) + goto out_syncd_stop; + root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = ENOENT; @@ -1577,14 +1591,11 @@ xfs_fs_fill_super( goto fail_vnrele; } - error = xfs_syncd_init(mp); - if (error) - goto fail_vnrele; - - xfs_inode_shrinker_register(mp); - return 0; + out_syncd_stop: + xfs_inode_shrinker_unregister(mp); + xfs_syncd_stop(mp); out_filestream_unmount: xfs_filestream_unmount(mp); out_free_sb: @@ -1608,6 +1619,9 @@ xfs_fs_fill_super( } fail_unmount: + xfs_inode_shrinker_unregister(mp); + xfs_syncd_stop(mp); + /* * Blow away any referenced inode in the filestreams cache. * This can and will cause log traffic as inodes go inactive diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index e22f0057d21fa..aaa4cf738a5df 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -999,13 +999,14 @@ xfs_reclaim_inodes( static int xfs_reclaim_inode_shrink( struct shrinker *shrink, - int nr_to_scan, - gfp_t gfp_mask) + struct shrink_control *sc) { struct xfs_mount *mp; struct xfs_perag *pag; xfs_agnumber_t ag; int reclaimable; + int nr_to_scan = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; mp = container_of(shrink, struct xfs_mount, m_inode_shrink); if (nr_to_scan) { diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 206a2815ced67..471f911c7d9e6 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -60,7 +60,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); -STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); +STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); static struct shrinker xfs_qm_shaker = { .shrink = xfs_qm_shake, @@ -2027,10 +2027,10 @@ xfs_qm_shake_freelist( STATIC int xfs_qm_shake( struct shrinker *shrink, - int nr_to_scan, - gfp_t gfp_mask) + struct shrink_control *sc) { int ndqused, nfree, n; + gfp_t gfp_mask = sc->gfp_mask; if (!kmem_shake_allow(gfp_mask)) return 0; diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 3c2344f48136d..01f227e14254f 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -6,7 +6,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { return -ENOSYS; } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index fe77e3395b40b..91ee08bb48579 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -274,70 +274,70 @@ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ - *(__ksymtab) \ + *(SORT(___ksymtab+*)) \ VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ - *(__ksymtab_gpl) \ + *(SORT(___ksymtab_gpl+*)) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ - *(__ksymtab_unused) \ + *(SORT(___ksymtab_unused+*)) \ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ - *(__ksymtab_unused_gpl) \ + *(SORT(___ksymtab_unused_gpl+*)) \ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ - *(__ksymtab_gpl_future) \ + *(SORT(___ksymtab_gpl_future+*)) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab) = .; \ - *(__kcrctab) \ + *(SORT(___kcrctab+*)) \ VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ - *(__kcrctab_gpl) \ + *(SORT(___kcrctab_gpl+*)) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ - *(__kcrctab_unused) \ + *(SORT(___kcrctab_unused+*)) \ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ - *(__kcrctab_unused_gpl) \ + *(SORT(___kcrctab_unused_gpl+*)) \ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ - *(__kcrctab_gpl_future) \ + *(SORT(___kcrctab_gpl_future+*)) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 5ff1194dc2ea1..e893a90a74c55 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -153,6 +153,7 @@ {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -185,6 +186,7 @@ {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ @@ -195,7 +197,9 @@ {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -458,6 +462,8 @@ {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0, 0, 0} #define r128_PCI_IDS \ diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index e5c607a02d57c..53f9ea216774d 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -908,6 +908,7 @@ struct drm_radeon_cs { #define RADEON_INFO_WANT_HYPERZ 0x07 #define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ +#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ struct drm_radeon_info { uint32_t request; diff --git a/include/linux/Kbuild b/include/linux/Kbuild index b0ada6f37dd65..bd15415f6a15c 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -17,6 +17,7 @@ header-y += netfilter_ipv4/ header-y += netfilter_ipv6/ header-y += usb/ header-y += wimax/ +header-y += genlock.h objhdr-y += version.h @@ -44,6 +45,7 @@ header-y += agpgart.h header-y += aio_abi.h header-y += apm_bios.h header-y += arcfb.h +header-y += ashmem.h header-y += atalk.h header-y += atm.h header-y += atm_eni.h @@ -362,6 +364,7 @@ header-y += tty.h header-y += types.h header-y += udf_fs_i.h header-y += udp.h +header-y += uhid.h header-y += uinput.h header-y += uio.h header-y += ultrasound.h @@ -392,3 +395,13 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h +header-y += android_pmem.h +header-y += ashmem.h +header-y += genlock.h +header-y += ion.h +header-y += msm_kgsl.h +header-y += msm_mdp.h +header-y += msm_q6vdec.h +header-y += msm_q6venc.h +header-y += msm_ion.h +header-y += msm_rotator.h diff --git a/include/linux/a1026.h b/include/linux/a1026.h new file mode 100644 index 0000000000000..5b02240df6209 --- /dev/null +++ b/include/linux/a1026.h @@ -0,0 +1,234 @@ +/* include/linux/a1026.h - a1026 voice processor driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_A1026_H +#define __LINUX_A1026_H + +#include + +#define A1026_MAX_FW_SIZE (32*1024) +struct a1026img { + unsigned char *buf; + unsigned img_size; +}; + +enum A1026_PathID { + A1026_PATH_SUSPEND, + A1026_PATH_INCALL_RECEIVER, + A1026_PATH_INCALL_HEADSET, + A1026_PATH_INCALL_SPEAKER, + A1026_PATH_INCALL_BT, + A1026_PATH_VR_NO_NS_RECEIVER, + A1026_PATH_VR_NO_NS_HEADSET, + A1026_PATH_VR_NO_NS_SPEAKER, + A1026_PATH_VR_NO_NS_BT, + A1026_PATH_VR_NS_RECEIVER, + A1026_PATH_VR_NS_HEADSET, + A1026_PATH_VR_NS_SPEAKER, + A1026_PATH_VR_NS_BT, + A1026_PATH_RECORD_RECEIVER, + A1026_PATH_RECORD_HEADSET, + A1026_PATH_RECORD_SPEAKER, + A1026_PATH_RECORD_BT, + A1026_PATH_CAMCORDER, + A1026_PATH_INCALL_TTY, + A1026_PATH_MAX +}; + +/* noise suppression states */ +enum A1026_NS_states { + A1026_NS_STATE_AUTO, /* leave mode as selected by driver */ + A1026_NS_STATE_OFF, /* disable noise suppression */ + A1026_NS_STATE_CT, /* force close talk mode */ + A1026_NS_STATE_FT, /* force far talk mode */ + A1026_NS_NUM_STATES +}; + +/* indicates if a1026_set_config() performs a full configuration or only + * a voice processing algorithm configuration */ +/* IOCTLs for Audience A1026 */ +#define A1026_IOCTL_MAGIC 'u' + +#define A1026_BOOTUP_INIT _IOW(A1026_IOCTL_MAGIC, 0x01, struct a1026img *) +#define A1026_SET_CONFIG _IOW(A1026_IOCTL_MAGIC, 0x02, enum A1026_PathID) +#define A1026_SET_NS_STATE _IOW(A1026_IOCTL_MAGIC, 0x03, enum A1026_NS_states) + +/* For Diag */ +#define A1026_SET_MIC_ONOFF _IOW(A1026_IOCTL_MAGIC, 0x50, unsigned) +#define A1026_SET_MICSEL_ONOFF _IOW(A1026_IOCTL_MAGIC, 0x51, unsigned) +#define A1026_READ_DATA _IOR(A1026_IOCTL_MAGIC, 0x52, unsigned) +#define A1026_WRITE_MSG _IOW(A1026_IOCTL_MAGIC, 0x53, unsigned) +#define A1026_SYNC_CMD _IO(A1026_IOCTL_MAGIC, 0x54) +#define A1026_SET_CMD_FILE _IOW(A1026_IOCTL_MAGIC, 0x55, unsigned) + +#ifdef __KERNEL__ + +/* A1026 Command codes */ +#define CtrlMode_LAL 0x0001 /* Level Active Low */ +#define CtrlMode_LAH 0x0002 /* Level Active High */ +#define CtrlMode_FE 0x0003 /* Falling Edge */ +#define CtrlMode_RE 0x0004 /* Rising Edge */ +#define A100_msg_Sync 0x80000000 +#define A100_msg_Sync_Ack 0x80000000 + +#define A100_msg_Reset 0x8002 +#define RESET_IMMEDIATE 0x0000 +#define RESET_DELAYED 0x0001 + +#define A100_msg_BootloadInitiate 0x8003 +#define A100_msg_GetDeviceParm 0x800B +#define A100_msg_SetDeviceParmID 0x800C +#define A100_msg_SetDeviceParm 0x800D + +/* Get/Set PCM Device Parameter ID List */ +/* PCM-0 */ +#define PCM0WordLength 0x0100 +#define PCM0DelFromFsTx 0x0101 +#define PCM0DelFromFsRx 0x0102 +#define PCM0LatchEdge 0x0103 +#define PCM0Endianness 0x0105 +#define PCM0TristateEnable 0x0107 + +/* PCM-1 */ +#define PCM1WordLength 0x0200 +#define PCM1DelFromFsTx 0x0201 +#define PCM1DelFromFsRx 0x0202 +#define PCM1LatchEdge 0x0203 +#define PCM1Endianness 0x0205 +#define PCM1TristateEnable 0x0207 + +/* Possible setting values for PCM I/F */ +#define PCMWordLength_16bit 0x10 /* Default */ +#define PCMWordLength_24bit 0x18 +#define PCMWordLength_32bit 0x20 +#define PCMLatchEdge_Tx_F_Rx_R 0x00 /* Tx/Rx on falling/rising edge */ +#define PCMLatchEdge_Tx_R_Rx_F 0x03 /* Tx/Rx on falling/rising edge */ +#define PCMEndianness_Little 0x00 +#define PCMEndianness_Big 0x01 /* Default */ +#define PCMTristate_Disable 0x00 /* Default */ +#define PCMTristate_Enable 0x01 + +/* Get/Set ADC Device Parameter ID List */ +/* ADC-0 */ +#define ADC0Gain 0x0300 +#define ADC0Rate 0x0301 +#define ADC0CutoffFreq 0x0302 + +/* ADC-1 */ +#define ADC1Gain 0x0400 +#define ADC1Rate 0x0401 +#define ADC1CutoffFreq 0x0402 + +/* Possible setting values for ADC I/F */ +#define ADC_Gain_0db 0x00 +#define ADC_Gain_6db 0x01 +#define ADC_Gain_12db 0x02 +#define ADC_Gain_18db 0x03 +#define ADC_Gain_24db 0x04 /* Default */ +#define ADC_Gain_30db 0x05 +#define ADC_Rate_8kHz 0x00 /* Default */ +#define ADC_Rate_16kHz 0x01 +#define ADC_CutoffFreq_NO_DC_Filter 0x00 +#define ADC_CutoffFreq_59p68Hz 0x01 /* Default */ +#define ADC_CutoffFreq_7p46Hz 0x02 +#define ADC_CutoffFreq_3p73Hz 0x03 + +/* Set Power State */ +#define A100_msg_Sleep 0x80100001 + +/* Get/Set Algorithm Parameter command codes list */ +#define A100_msg_GetAlgorithmParm 0x8016 +#define A100_msg_SetAlgorithmParmID 0x8017 +#define A100_msg_SetAlgorithmParm 0x8018 + +/* Get/Set Algorithm Parameter ID List (Transmit Feature) */ +#define AIS_Global_Supression_Level 0x0000 +#define Mic_Config 0x0002 +#define AEC_Mode 0x0003 +#define AEC_CNG 0x0023 +#define Output_AGC 0x0004 +#define Output_AGC_Target_Level 0x0005 +#define Output_AGC_Noise_Floor 0x0006 +#define Output_AGC_SNR_Improvement 0x0007 +#define Comfort_Noise 0x001A +#define Comfort_Noise_Level 0x001B + +/* Get/Set Algorithm Parameter ID List (Receive Feature) */ +#define Speaker_Volume 0x0012 +#define VEQ_Mode 0x0009 +#define VEQ_Max_FarEnd_Limiter_Level 0x000D +#define VEQ_Noise_Estimation_Adj 0x0025 +#define Receive_NS 0x000E +#define Receive_NS_Level 0x000F +#define SideTone 0x0015 +#define SideTone_Gain 0x0016 + +/* Audio Path Commands */ +/* Get/Set Transmit Digital Input Gain */ +#define A100_msg_GetTxDigitalInputGain 0x801A +#define A100_msg_SetTxDigitalInputGain 0x801B + +/* Get/Set Receive Digital Input Gain */ +#define A100_msg_GetRcvDigitalInputGain 0x8022 +#define A100_msg_SetRcvDigitalInputGain 0x8023 + +/* Get/Set Transmit Digital Output Gain */ +#define A100_msg_GetTxDigitalOutputGain 0x801D +#define A100_msg_SetTxDigitalOutputGain 0x8015 + +/* Bypass */ +#define A100_msg_Bypass 0x801C /* 0ff = 0x0000; on = 0x0001 (Default) */ +#define A1026_msg_VP_ON 0x801C0001 +#define A1026_msg_VP_OFF 0x801C0000 + +/* Diagnostic API Commands */ +#define A100_msg_GetMicRMS 0x8013 +#define A100_msg_GetMicPeak 0x8014 +#define DiagPath_Pri_Input_Mic 0x0000 +#define DiagPath_Sec_Input_Mic 0x0001 +#define DiagPath_Output_Mic 0x0002 +#define DiagPath_Far_End_Input 0x0003 +#define DiagPath_Far_End_Output 0x0004 +#define A100_msg_SwapInputCh 0x8019 +#define A100_msg_OutputKnownSig 0x801E + +#define A1026_msg_BOOT 0x0001 +#define A1026_msg_BOOT_ACK 0x01 + +/* general definitions */ +#define TIMEOUT 20 /* ms */ +#define RETRY_CNT 5 +#define POLLING_RETRY_CNT 3 +#define A1026_ERROR_CODE 0xffff +#define A1026_SLEEP 0 +#define A1026_ACTIVE 1 +#define A1026_CMD_FIFO_DEPTH 64 + +enum A1026_config_mode { + A1026_CONFIG_FULL, + A1026_CONFIG_VP +}; + +struct a1026_platform_data { + uint32_t gpio_a1026_micsel; + uint32_t gpio_a1026_wakeup; + uint32_t gpio_a1026_reset; + uint32_t gpio_a1026_int; + uint32_t gpio_a1026_clk; +}; + + +#endif /* __KERNEL__ */ +#endif /* __LINUX_A1026_H */ diff --git a/include/linux/akm8973.h b/include/linux/akm8973.h new file mode 100644 index 0000000000000..2afa3bacddacf --- /dev/null +++ b/include/linux/akm8973.h @@ -0,0 +1,61 @@ +/* + * Definitions for akm8973 compass chip. + */ +#ifndef AKM8973_H +#define AKM8973_H + +#include + +#define AKM8973_I2C_NAME "akm8973" + +/* Compass device dependent definition */ +#define AKECS_MODE_MEASURE 0x00 /* Starts measurement. Please use AKECS_MODE_MEASURE_SNG */ + /* or AKECS_MODE_MEASURE_SEQ instead of this. */ +#define AKECS_MODE_E2P_READ 0x02 /* E2P access mode (read). */ +#define AKECS_MODE_POWERDOWN 0x03 /* Power down mode */ + +#define RBUFF_SIZE 4 /* Rx buffer size */ + +/* AK8973 register address */ +#define AKECS_REG_ST 0xC0 +#define AKECS_REG_TMPS 0xC1 +#define AKECS_REG_MS1 0xE0 + +#define AKMIO 0xA1 + +/* IOCTLs for AKM library */ +#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x01, char[5]) +#define ECS_IOCTL_READ _IOWR(AKMIO, 0x02, char[5]) +#define ECS_IOCTL_RESET _IO(AKMIO, 0x03) +#define ECS_IOCTL_SET_MODE _IOW(AKMIO, 0x04, short) +#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x05, char[RBUFF_SIZE+1]) +#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x06, short[12]) +#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x07, int) +#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x08, int) +#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short) +#define ECS_IOCTL_GET_PROJECT_NAME _IOR(AKMIO, 0x0D, char[64]) +#define ECS_IOCTL_GET_MATRIX _IOR(AKMIO, 0x0E, short [4][3][3]) + +/* IOCTLs for APPs */ +#define ECS_IOCTL_APP_SET_MODE _IOW(AKMIO, 0x10, short) +#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short) +#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short) +#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short) +#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short) +#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short) +#define ECS_IOCTL_APP_GET_TFLAG _IOR(AKMIO, 0x16, short) +#define ECS_IOCTL_APP_RESET_PEDOMETER _IO(AKMIO, 0x17) +#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short) +#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY +#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short) /* Set raw magnetic vector flag */ +#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short) /* Get raw magnetic vector flag */ + +struct akm8973_platform_data { + short layouts[4][3][3]; + char project_name[64]; + int reset; + int intr; +}; + +#endif + diff --git a/include/linux/akm8976.h b/include/linux/akm8976.h new file mode 100644 index 0000000000000..8f6a2bbc8b94c --- /dev/null +++ b/include/linux/akm8976.h @@ -0,0 +1,90 @@ +/* + * Definitions for akm8976 compass chip. + */ +#ifndef AKM8976_H +#define AKM8976_H + +#include + +/* Compass device dependent definition */ +#define AKECS_MODE_MEASURE 0x00 /* Starts measurement. Please use AKECS_MODE_MEASURE_SNG */ + /* or AKECS_MODE_MEASURE_SEQ instead of this. */ +#define AKECS_MODE_PFFD 0x01 /* Start pedometer and free fall detect. */ +#define AKECS_MODE_E2P_READ 0x02 /* E2P access mode (read). */ +#define AKECS_MODE_POWERDOWN 0x03 /* Power down mode */ + +#define AKECS_MODE_MEASURE_SNG 0x10 /* Starts single measurement */ +#define AKECS_MODE_MEASURE_SEQ 0x11 /* Starts sequential measurement */ + +/* Default register settings */ +#define CSPEC_AINT 0x01 /* Amplification for acceleration sensor */ +#define CSPEC_SNG_NUM 0x01 /* Single measurement mode */ +#define CSPEC_SEQ_NUM 0x02 /* Sequential measurement mode */ +#define CSPEC_SFRQ_32 0x00 /* Measurement frequency: 32Hz */ +#define CSPEC_SFRQ_64 0x01 /* Measurement frequency: 64Hz */ +#define CSPEC_MCS 0x07 /* Clock frequency */ +#define CSPEC_MKS 0x01 /* Clock type: CMOS level */ +#define CSPEC_INTEN 0x01 /* Interruption pin enable: Enable */ + +#define RBUFF_SIZE 31 /* Rx buffer size */ +#define MAX_CALI_SIZE 0x1000U /* calibration buffer size */ + +/* AK8976A register address */ +#define AKECS_REG_ST 0xC0 +#define AKECS_REG_TMPS 0xC1 +#define AKECS_REG_MS1 0xE0 +#define AKECS_REG_MS2 0xE1 +#define AKECS_REG_MS3 0xE2 + +#define AKMIO 0xA1 + +/* IOCTLs for AKM library */ +#define ECS_IOCTL_INIT _IO(AKMIO, 0x01) +#define ECS_IOCTL_WRITE _IOW(AKMIO, 0x02, char[5]) +#define ECS_IOCTL_READ _IOWR(AKMIO, 0x03, char[5]) +#define ECS_IOCTL_RESET _IO(AKMIO, 0x04) +#define ECS_IOCTL_INT_STATUS _IO(AKMIO, 0x05) +#define ECS_IOCTL_FFD_STATUS _IO(AKMIO, 0x06) +#define ECS_IOCTL_SET_MODE _IOW(AKMIO, 0x07, short) +#define ECS_IOCTL_GETDATA _IOR(AKMIO, 0x08, char[RBUFF_SIZE+1]) +#define ECS_IOCTL_GET_NUMFRQ _IOR(AKMIO, 0x09, char[2]) +#define ECS_IOCTL_SET_PERST _IO(AKMIO, 0x0A) +#define ECS_IOCTL_SET_G0RST _IO(AKMIO, 0x0B) +#define ECS_IOCTL_SET_YPR _IOW(AKMIO, 0x0C, short[12]) +#define ECS_IOCTL_GET_OPEN_STATUS _IOR(AKMIO, 0x0D, int) +#define ECS_IOCTL_GET_CLOSE_STATUS _IOR(AKMIO, 0x0E, int) +#define ECS_IOCTL_GET_CALI_DATA _IOR(AKMIO, 0x0F, char[MAX_CALI_SIZE]) +#define ECS_IOCTL_GET_DELAY _IOR(AKMIO, 0x30, short) + +/* IOCTLs for APPs */ +#define ECS_IOCTL_APP_SET_MODE _IOW(AKMIO, 0x10, short) +#define ECS_IOCTL_APP_SET_MFLAG _IOW(AKMIO, 0x11, short) +#define ECS_IOCTL_APP_GET_MFLAG _IOW(AKMIO, 0x12, short) +#define ECS_IOCTL_APP_SET_AFLAG _IOW(AKMIO, 0x13, short) +#define ECS_IOCTL_APP_GET_AFLAG _IOR(AKMIO, 0x14, short) +#define ECS_IOCTL_APP_SET_TFLAG _IOR(AKMIO, 0x15, short) +#define ECS_IOCTL_APP_GET_TFLAG _IOR(AKMIO, 0x16, short) +#define ECS_IOCTL_APP_RESET_PEDOMETER _IO(AKMIO, 0x17) +#define ECS_IOCTL_APP_SET_DELAY _IOW(AKMIO, 0x18, short) +#define ECS_IOCTL_APP_GET_DELAY ECS_IOCTL_GET_DELAY +#define ECS_IOCTL_APP_SET_MVFLAG _IOW(AKMIO, 0x19, short) /* Set raw magnetic vector flag */ +#define ECS_IOCTL_APP_GET_MVFLAG _IOR(AKMIO, 0x1A, short) /* Get raw magnetic vector flag */ + +/* IOCTLs for pedometer */ +#define ECS_IOCTL_SET_STEP_CNT _IOW(AKMIO, 0x20, short) + +/* Default GPIO setting */ +#define ECS_RST 146 /*MISC4, bit2 */ +#define ECS_CLK_ON 155 /*MISC5, bit3 */ +#define ECS_INTR 161 /*INT2, bit1 */ + +struct akm8976_platform_data { + int reset; + int clk_on; + int intr; +}; + +extern char *get_akm_cal_ram(void); + +#endif + diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h index 7f16a14c0fe71..0f904b3ba7f07 100644 --- a/include/linux/android_aid.h +++ b/include/linux/android_aid.h @@ -22,5 +22,7 @@ #define AID_INET 3003 #define AID_NET_RAW 3004 #define AID_NET_ADMIN 3005 +#define AID_NET_BW_STATS 3006 /* read bandwidth statistics */ +#define AID_NET_BW_ACCT 3007 /* change bandwidth statistics accounting */ #endif diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h index f633621f5be3d..09edec114b4ed 100644 --- a/include/linux/android_pmem.h +++ b/include/linux/android_pmem.h @@ -1,6 +1,7 @@ /* include/linux/android_pmem.h * * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -16,6 +17,20 @@ #ifndef _ANDROID_PMEM_H_ #define _ANDROID_PMEM_H_ +#include + +#define PMEM_KERNEL_TEST_MAGIC 0xc0 +#define PMEM_KERNEL_TEST_NOMINAL_TEST_IOCTL \ + _IO(PMEM_KERNEL_TEST_MAGIC, 1) +#define PMEM_KERNEL_TEST_ADVERSARIAL_TEST_IOCTL \ + _IO(PMEM_KERNEL_TEST_MAGIC, 2) +#define PMEM_KERNEL_TEST_HUGE_ALLOCATION_TEST_IOCTL \ + _IO(PMEM_KERNEL_TEST_MAGIC, 3) +#define PMEM_KERNEL_TEST_FREE_UNALLOCATED_TEST_IOCTL \ + _IO(PMEM_KERNEL_TEST_MAGIC, 4) +#define PMEM_KERNEL_TEST_LARGE_REGION_NUMBER_TEST_IOCTL \ + _IO(PMEM_KERNEL_TEST_MAGIC, 5) + #define PMEM_IOCTL_MAGIC 'p' #define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) #define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) @@ -33,7 +48,90 @@ * struct (with offset set to 0). */ #define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) -#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) +/* Revokes gpu registers and resets the gpu. Pass a pointer to the + * start of the mapped gpu regs (the vaddr returned by mmap) as the argument. + */ +#define HW3D_REVOKE_GPU _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) +#define HW3D_GRANT_GPU _IOW(PMEM_IOCTL_MAGIC, 9, unsigned int) +#define HW3D_WAIT_FOR_INTERRUPT _IOW(PMEM_IOCTL_MAGIC, 10, unsigned int) + +#define PMEM_CLEAN_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 11, unsigned int) +#define PMEM_CLEAN_CACHES _IOW(PMEM_IOCTL_MAGIC, 12, unsigned int) +#define PMEM_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 13, unsigned int) + +#define PMEM_GET_FREE_SPACE _IOW(PMEM_IOCTL_MAGIC, 14, unsigned int) +#define PMEM_ALLOCATE_ALIGNED _IOW(PMEM_IOCTL_MAGIC, 15, unsigned int) +struct pmem_region { + unsigned long offset; + unsigned long len; +}; + +struct pmem_addr { + unsigned long vaddr; + unsigned long offset; + unsigned long length; +}; + +struct pmem_freespace { + unsigned long total; + unsigned long largest; +}; + +struct pmem_allocation { + unsigned long size; + unsigned int align; +}; + +#ifdef __KERNEL__ +int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart, + unsigned long *end, struct file **filp); +int get_pmem_fd(int fd, unsigned long *start, unsigned long *end); +int get_pmem_user_addr(struct file *file, unsigned long *start, + unsigned long *end); +void put_pmem_file(struct file* file); +void put_pmem_fd(int fd); +void flush_pmem_fd(int fd, unsigned long start, unsigned long len); +void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); +int pmem_cache_maint(struct file *file, unsigned int cmd, + struct pmem_addr *pmem_addr); + +enum pmem_allocator_type { + /* Zero is a default in platform PMEM structures in the board files, + * when the "allocator_type" structure element is not explicitly + * defined + */ + PMEM_ALLOCATORTYPE_BITMAP = 0, /* forced to be zero here */ + PMEM_ALLOCATORTYPE_SYSTEM, + + PMEM_ALLOCATORTYPE_ALLORNOTHING, + PMEM_ALLOCATORTYPE_BUDDYBESTFIT, + + PMEM_ALLOCATORTYPE_MAX, +}; + +#define PMEM_MEMTYPE_MASK 0x7 +#define PMEM_INVALID_MEMTYPE 0x0 +#define PMEM_MEMTYPE_EBI1 0x1 +#define PMEM_MEMTYPE_SMI 0x2 +#define PMEM_MEMTYPE_RESERVED_INVALID2 0x3 +#define PMEM_MEMTYPE_RESERVED_INVALID3 0x4 +#define PMEM_MEMTYPE_RESERVED_INVALID4 0x5 +#define PMEM_MEMTYPE_RESERVED_INVALID5 0x6 +#define PMEM_MEMTYPE_RESERVED_INVALID6 0x7 + +#define PMEM_ALIGNMENT_MASK 0x18 +#define PMEM_ALIGNMENT_RESERVED_INVALID1 0x0 +#define PMEM_ALIGNMENT_4K 0x8 /* the default */ +#define PMEM_ALIGNMENT_1M 0x10 +#define PMEM_ALIGNMENT_RESERVED_INVALID2 0x18 + +/* flags in the following function defined as above. */ +int32_t pmem_kalloc(const size_t size, const uint32_t flags); +int32_t pmem_kfree(const int32_t physaddr); + +/* kernel api names for board specific data structures */ +#define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1" +#define PMEM_KERNEL_SMI_DATA_NAME "pmem_kernel_smi" struct android_pmem_platform_data { @@ -42,52 +140,31 @@ struct android_pmem_platform_data unsigned long start; /* size of memory region */ unsigned long size; - /* set to indicate the region should not be managed with an allocator */ - unsigned no_allocator; + + enum pmem_allocator_type allocator_type; + /* treated as a 'hidden' variable in the board files. Can be + * set, but default is the system init value of 0 which becomes a + * quantum of 4K pages. + */ + unsigned int quantum; + /* set to indicate maps of this region should be cached, if a mix of * cached and uncached is desired, set this and open the device with * O_SYNC to get an uncached region */ unsigned cached; /* The MSM7k has bits to enable a write buffer in the bus controller*/ unsigned buffered; + /* This PMEM is on memory that may be powered off */ + unsigned unstable; }; -struct pmem_region { - unsigned long offset; - unsigned long len; -}; - -#ifdef CONFIG_ANDROID_PMEM -int is_pmem_file(struct file *file); -int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, - unsigned long *end, struct file **filp); -int get_pmem_user_addr(struct file *file, unsigned long *start, - unsigned long *end); -void put_pmem_file(struct file* file); -void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); int pmem_setup(struct android_pmem_platform_data *pdata, long (*ioctl)(struct file *, unsigned int, unsigned long), int (*release)(struct inode *, struct file *)); + int pmem_remap(struct pmem_region *region, struct file *file, unsigned operation); - -#else -static inline int is_pmem_file(struct file *file) { return 0; } -static inline int get_pmem_file(int fd, unsigned long *start, - unsigned long *vstart, unsigned long *end, - struct file **filp) { return -ENOSYS; } -static inline int get_pmem_user_addr(struct file *file, unsigned long *start, - unsigned long *end) { return -ENOSYS; } -static inline void put_pmem_file(struct file* file) { return; } -static inline void flush_pmem_file(struct file *file, unsigned long start, - unsigned long len) { return; } -static inline int pmem_setup(struct android_pmem_platform_data *pdata, - long (*ioctl)(struct file *, unsigned int, unsigned long), - int (*release)(struct inode *, struct file *)) { return -ENOSYS; } - -static inline int pmem_remap(struct pmem_region *region, struct file *file, - unsigned operation) { return -ENOSYS; } -#endif +#endif /* __KERNEL__ */ #endif //_ANDROID_PPP_H_ diff --git a/include/linux/ashmem.h b/include/linux/ashmem.h index 1976b10ef93eb..25a190e12ab79 100644 --- a/include/linux/ashmem.h +++ b/include/linux/ashmem.h @@ -44,5 +44,12 @@ struct ashmem_pin { #define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) #define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) #define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) +#define ASHMEM_CACHE_FLUSH_RANGE _IO(__ASHMEMIOC, 11) +#define ASHMEM_CACHE_CLEAN_RANGE _IO(__ASHMEMIOC, 12) +#define ASHMEM_CACHE_INV_RANGE _IO(__ASHMEMIOC, 13) + +int get_ashmem_file(int fd, struct file **filp, struct file **vm_file, + unsigned long *len); +void put_ashmem_file(struct file *file); #endif /* _LINUX_ASHMEM_H */ diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 475f8c42c0e92..381f4cec82602 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -443,6 +443,7 @@ void atm_dev_signal_change(struct atm_dev *dev, char signal); void vcc_insert_socket(struct sock *sk); +void atm_dev_release_vccs(struct atm_dev *dev); /* * This is approximately the algorithm used by alloc_skb. diff --git a/include/linux/atmel_qt602240.h b/include/linux/atmel_qt602240.h new file mode 100644 index 0000000000000..8d468aadd25ce --- /dev/null +++ b/include/linux/atmel_qt602240.h @@ -0,0 +1,337 @@ +#ifndef _LINUX_ATMEL_H +#define _LINUX_ATMEL_H + +#include + +#define ATMEL_QT602240_NAME "atmel_qt602240" + +#define INFO_BLK_FID 0 +#define INFO_BLK_VID 1 +#define INFO_BLK_VER 2 +#define INFO_BLK_BUILD 3 +#define INFO_BLK_XSIZE 4 +#define INFO_BLK_YSIZE 5 +#define INFO_BLK_OBJS 6 + +#define OBJ_TABLE_TYPE 0 +#define OBJ_TABLE_LSB 1 +#define OBJ_TABLE_MSB 2 +#define OBJ_TABLE_SIZE 3 +#define OBJ_TABLE_INSTANCES 4 +#define OBJ_TABLE_RIDS 5 + +#define RESERVED_T0 0u +#define RESERVED_T1 1u +#define DEBUG_DELTAS_T2 2u +#define DEBUG_REFERENCES_T3 3u +#define DEBUG_SIGNALS_T4 4u +#define GEN_MESSAGEPROCESSOR_T5 5u +#define GEN_COMMANDPROCESSOR_T6 6u +#define GEN_POWERCONFIG_T7 7u +#define GEN_ACQUISITIONCONFIG_T8 8u +#define TOUCH_MULTITOUCHSCREEN_T9 9u +#define TOUCH_SINGLETOUCHSCREEN_T10 10u +#define TOUCH_XSLIDER_T11 11u +#define TOUCH_YSLIDER_T12 12u +#define TOUCH_XWHEEL_T13 13u +#define TOUCH_YWHEEL_T14 14u +#define TOUCH_KEYARRAY_T15 15u +#define PROCG_SIGNALFILTER_T16 16u +#define PROCI_LINEARIZATIONTABLE_T17 17u +#define SPT_COMCONFIG_T18 18u +#define SPT_GPIOPWM_T19 19u +#define PROCI_GRIPFACESUPPRESSION_T20 20u +#define RESERVED_T21 21u +#define PROCG_NOISESUPPRESSION_T22 22u +#define TOUCH_PROXIMITY_T23 23u +#define PROCI_ONETOUCHGESTUREPROCESSOR_T24 24u +#define SPT_SELFTEST_T25 25u +#define DEBUG_CTERANGE_T26 26u +#define PROCI_TWOTOUCHGESTUREPROCESSOR_T27 27u +#define SPT_CTECONFIG_T28 28u +#define SPT_GPI_T29 29u +#define SPT_GATE_T30 30u +#define TOUCH_KEYSET_T31 31u +#define TOUCH_XSLIDERSET_T32 32u +#define DIAGNOSTIC_T37 37u + +#define T37_PAGE_SIZE 128 + +#define T37_TCH_FLAG_SIZE 80 +#define T37_TCH_FLAG_IDX 0 +#define T37_ATCH_FLAG_IDX 40 + +#define T37_MODE 0 +#define T37_PAGE 1 +#define T37_DATA 2 /* n bytes */ + +#define T37_PAGE_NUM0 0 +#define T37_PAGE_NUM1 1 +#define T37_PAGE_NUM2 2 +#define T37_PAGE_NUM3 3 + +#define MSG_RID 0 + +#define T6_CFG_RESET 0 +#define T6_CFG_BACKUPNV 1 +#define T6_CFG_CALIBRATE 2 +#define T6_CFG_REPORTALL 3 +/* Reserved */ +#define T6_CFG_DIAG 5 + +#define T6_CFG_DIAG_CMD_PAGEUP 0x01 +#define T6_CFG_DIAG_CMD_PAGEDOWN 0x02 +#define T6_CFG_DIAG_CMD_DELTAS 0x10 +#define T6_CFG_DIAG_CMD_REF 0x11 +#define T6_CFG_DIAG_CMD_CTE 0x31 +#define T6_CFG_DIAG_CMD_TCH 0xF3 + +#define T6_MSG_STATUS 1 +#define T6_MSG_CHECKSUM 2 /* three bytes */ + +#define T6_MSG_STATUS_COMSERR BIT(2) +#define T6_MSG_STATUS_CFGERR BIT(3) +#define T6_MSG_STATUS_CAL BIT(4) +#define T6_MSG_STATUS_SIGERR BIT(5) +#define T6_MSG_STATUS_OFL BIT(6) +#define T6_MSG_STATUS_RESET BIT(7) + +#define T7_CFG_IDLEACQINT 0 +#define T7_CFG_ACTVACQINT 1 +#define T7_CFG_ACTV2IDLETO 2 + +#define T8_CFG_CHRGTIME 0 +/* Reserved */ +#define T8_CFG_TCHDRIFT 2 +#define T8_CFG_DRIFTST 3 +#define T8_CFG_TCHAUTOCAL 4 +#define T8_CFG_SYNC 5 +#define T8_CFG_ATCHCALST 6 +#define T8_CFG_ATCHCALSTHR 7 +#define T8_CFG_ATCHFRCCALTHR 8 /* FW v2.x */ +#define T8_CFG_ATCHFRCCALRATIO 9 /* FW v2.x */ + +#define T9_CFG_CTRL 0 +#define T9_CFG_XORIGIN 1 +#define T9_CFG_YORIGIN 2 +#define T9_CFG_XSIZE 3 +#define T9_CFG_YSIZE 4 +#define T9_CFG_AKSCFG 5 +#define T9_CFG_BLEN 6 +#define T9_CFG_TCHTHR 7 +#define T9_CFG_TCHDI 8 +#define T9_CFG_ORIENT 9 +#define T9_CFG_MRGTIMEOUT 10 +#define T9_CFG_MOVHYSTI 11 +#define T9_CFG_MOVHYSTN 12 +#define T9_CFG_MOVFILTER 13 +#define T9_CFG_NUMTOUCH 14 +#define T9_CFG_MRGHYST 15 +#define T9_CFG_MRGTHR 16 +#define T9_CFG_AMPHYST 17 +#define T9_CFG_XRANGE 18 /* two bytes */ +#define T9_CFG_YRANGE 20 /* two bytes */ +#define T9_CFG_XLOCLIP 22 +#define T9_CFG_XHICLIP 23 +#define T9_CFG_YLOCLIP 24 +#define T9_CFG_YHICLIP 25 +#define T9_CFG_XEDGECTRL 26 +#define T9_CFG_XEDGEDIST 27 +#define T9_CFG_YEDGECTRL 28 +#define T9_CFG_YEDGEDIST 29 +#define T9_CFG_JUMPLIMIT 30 +#define T9_CFG_TCHHYST 31 /* FW v2.x */ + +#define T9_MSG_STATUS 1 +#define T9_MSG_XPOSMSB 2 +#define T9_MSG_YPOSMSB 3 +#define T9_MSG_XYPOSLSB 4 +#define T9_MSG_TCHAREA 5 +#define T9_MSG_TCHAMPLITUDE 6 +#define T9_MSG_TCHVECTOR 7 + +#define T9_MSG_STATUS_UNGRIP BIT(0) /* FW v2.x */ +#define T9_MSG_STATUS_SUPPRESS BIT(1) +#define T9_MSG_STATUS_AMP BIT(2) +#define T9_MSG_STATUS_VECTOR BIT(3) +#define T9_MSG_STATUS_MOVE BIT(4) +#define T9_MSG_STATUS_RELEASE BIT(5) +#define T9_MSG_STATUS_PRESS BIT(6) +#define T9_MSG_STATUS_DETECT BIT(7) + +#define T20_CFG_CTRL 0 +#define T20_CFG_XLOGRIP 1 +#define T20_CFG_XHIGRIP 2 +#define T20_CFG_YLOGRIP 3 +#define T20_CFG_YHIGRIP 4 +#define T20_CFG_MAXTCHS 5 +/* Reserved */ +#define T20_CFG_SZTHR1 7 +#define T20_CFG_SZTHR2 8 +#define T20_CFG_SHPTHR1 9 +#define T20_CFG_SHPTHR2 10 +#define T20_CFG_SHPEXTTO 11 + +#define T20_MSG_STATUS 1 + +#define T20_MSG_STATUS_FACESUP BIT(0) + +#define T22_CFG_CTRL 0 +/* Reserved */ +#define T22_CFG_GCAFUL 3 /* two bytes */ +#define T22_CFG_GCAFLL 5 /* two bytes */ +#define T22_CFG_ACTVGCAFVALID 7 +#define T22_CFG_NOISETHR 8 +/* Reserved */ +#define T22_CFG_FREQHOPSCALE 10 +#define T22_CFG_FREQ 11 /* five bytes */ +#define T22_CFG_IDLEGCAFVAILD 16 + +#define T22_MSG_STATUS 1 +#define T22_MSG_GCAFDEPTH 2 +#define T22_MSG_FREQINDEX 3 + +#define T22_MSG_STATUS_FHCHG BIT(0) +#define T22_MSG_STATUS_GCAFERR BIT(2) +#define T22_MSG_STATUS_FHERR BIT(3) +#define T22_MSG_STATUS_GCAFCHG BIT(4) + +#define T19_CFG_CTRL 0 +#define T19_CFG_REPORTMASK 1 +#define T19_CFG_DIR 2 +#define T19_CFG_INTPULLUP 3 +#define T19_CFG_OUT 4 +#define T19_CFG_WAKE 5 +#define T19_CFG_PWM 6 +#define T19_CFG_PERIOD 7 +#define T19_CFG_DUTY0 8 +#define T19_CFG_DUTY1 9 +#define T19_CFG_DUTY2 10 +#define T19_CFG_DUTY3 11 +#define T19_CFG_TRIGGER0 12 +#define T19_CFG_TRIGGER1 13 +#define T19_CFG_TRIGGER2 14 +#define T19_CFG_TRIGGER3 15 + +#define T19_CFG_CTRL_ENABLE BIT(0) +#define T19_CFG_CTRL_RPTEN BIT(1) +#define T19_CFG_CTRL_FORCERPT BIT(2) + +#define T19_MSG_STATUS 1 + +#define T25_CFG_CTRL 0 +#define T25_CFG_CMD 1 + +#define T25_MSG_STATUS 1 +#define T25_MSG_INFO 2 /* five bytes */ + +#define T28_CFG_CTRL 0 +#define T28_CFG_CMD 1 +#define T28_CFG_MODE 2 +#define T28_CFG_IDLEGCAFDEPTH 3 +#define T28_CFG_ACTVGCAFDEPTH 4 +#define T28_CFG_VOLTAGE 5 + +#define T28_CFG_MODE0_X 16 +#define T28_CFG_MODE0_Y 14 + +#define T28_MSG_STATUS 1 + +/* cable_config[] of atmel_i2c_platform_data */ +/* config[] of atmel_config_data */ +#define CB_TCHTHR 0 +#define CB_NOISETHR 1 +#define CB_IDLEGCAFDEPTH 2 +#define CB_ACTVGCAFDEPTH 3 + +#define NC_TCHTHR 0 +#define NC_TCHDI 1 +#define NC_NOISETHR 2 + +/* filter_level */ +#define FL_XLOGRIPMIN 0 +#define FL_XLOGRIPMAX 1 +#define FL_XHIGRIPMIN 2 +#define FL_XHIGRIPMAX 3 + +struct info_id_t { + uint8_t family_id; + uint8_t variant_id; + uint8_t version; + uint8_t build; + uint8_t matrix_x_size; + uint8_t matrix_y_size; + uint8_t num_declared_objects; +}; + +struct object_t { + uint8_t object_type; + uint16_t i2c_address; + uint8_t size; + uint8_t instances; + uint8_t num_report_ids; + uint8_t report_ids; +}; + +struct atmel_virtual_key { + int keycode; + int range_min; + int range_max; +}; + +struct atmel_finger_data { + int x; + int y; + int w; + int z; +}; + +struct atmel_i2c_platform_data { + uint16_t version; + uint16_t source; + uint16_t abs_x_min; + uint16_t abs_x_max; + uint16_t abs_y_min; + uint16_t abs_y_max; + uint8_t abs_pressure_min; + uint8_t abs_pressure_max; + uint8_t abs_width_min; + uint8_t abs_width_max; + int gpio_irq; + int (*power)(int on); + int8_t config_T6[6]; + int8_t config_T7[3]; + int8_t config_T8[10]; + int8_t config_T9[32]; + int8_t config_T15[11]; + int8_t config_T19[12]; + int8_t config_T20[12]; + int8_t config_T22[17]; + int8_t config_T23[13]; + int8_t config_T24[19]; + int8_t config_T25[14]; + int8_t config_T27[7]; + int8_t config_T28[6]; + uint8_t object_crc[3]; + int8_t cable_config[4]; + int8_t cable_config_T7[3]; + int8_t cable_config_T8[10]; + int8_t cable_config_T9[32]; + int8_t cable_config_T22[17]; + int8_t cable_config_T28[6]; + int8_t noise_config[3]; + uint16_t filter_level[4]; + uint8_t GCAF_level[5]; +}; + +struct atmel_config_data { + int8_t config[4]; + int8_t *config_T7; + int8_t *config_T8; + int8_t *config_T9; + int8_t *config_T22; + int8_t *config_T28; +}; + +#endif + diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index daf8c480c7867..50e2c16b20f64 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -45,6 +45,7 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) @@ -55,7 +56,8 @@ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf - * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list + * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf + * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region @@ -113,11 +115,24 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern void bitmap_set(unsigned long *map, int i, int len); extern void bitmap_clear(unsigned long *map, int start, int nr); -extern unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); @@ -129,6 +144,8 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); +extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); extern void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits); extern int bitmap_bitremap(int oldbit, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d5063e1b55559..65661479a232c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -252,7 +252,7 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; unsigned char cluster; - signed char discard_zeroes_data; + unsigned char discard_zeroes_data; }; struct request_queue @@ -1032,13 +1032,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector { unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); + if (!lim->max_discard_sectors) + return 0; + return (lim->discard_granularity + lim->discard_alignment - alignment) & (lim->discard_granularity - 1); } static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) { - if (q->limits.discard_zeroes_data == 1) + if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) return 1; return 0; diff --git a/include/linux/bma150.h b/include/linux/bma150.h new file mode 100644 index 0000000000000..dd6ee393d2dcf --- /dev/null +++ b/include/linux/bma150.h @@ -0,0 +1,84 @@ +/* + * Definitions for BMA150 G-sensor chip. + */ +#ifndef BMA150_H +#define BMA150_H + +#include + +#define BMA150_I2C_NAME "bma150" +#ifdef CONFIG_SPI_QSD +#define BMA150_G_SENSOR_NAME "bma150_uP_spi" +#else +#define BMA150_G_SENSOR_NAME "bma150" +#endif + +#define BMAIO 0xA1 + +/* BMA150 register address */ +#define CHIP_ID_REG 0x00 +#define VERSION_REG 0x01 +#define X_AXIS_LSB_REG 0x02 +#define X_AXIS_MSB_REG 0x03 +#define Y_AXIS_LSB_REG 0x04 +#define Y_AXIS_MSB_REG 0x05 +#define Z_AXIS_LSB_REG 0x06 +#define Z_AXIS_MSB_REG 0x07 +#define TEMP_RD_REG 0x08 +#define SMB150_STATUS_REG 0x09 +#define SMB150_CTRL_REG 0x0a +#define SMB150_CONF1_REG 0x0b +#define LG_THRESHOLD_REG 0x0c +#define LG_DURATION_REG 0x0d +#define HG_THRESHOLD_REG 0x0e +#define HG_DURATION_REG 0x0f +#define MOTION_THRS_REG 0x10 +#define HYSTERESIS_REG 0x11 +#define CUSTOMER1_REG 0x12 +#define CUSTOMER2_REG 0x13 +#define RANGE_BWIDTH_REG 0x14 +#define SMB150_CONF2_REG 0x15 + +#define OFFS_GAIN_X_REG 0x16 +#define OFFS_GAIN_Y_REG 0x17 +#define OFFS_GAIN_Z_REG 0x18 +#define OFFS_GAIN_T_REG 0x19 +#define OFFSET_X_REG 0x1a +#define OFFSET_Y_REG 0x1b +#define OFFSET_Z_REG 0x1c +#define OFFSET_T_REG 0x1d + + +/* IOCTLs*/ +#define BMA_IOCTL_INIT _IO(BMAIO, 0x31) +#define BMA_IOCTL_WRITE _IOW(BMAIO, 0x32, char[5]) +#define BMA_IOCTL_READ _IOWR(BMAIO, 0x33, char[5]) +#define BMA_IOCTL_READ_ACCELERATION _IOWR(BMAIO, 0x34, short[7]) +#define BMA_IOCTL_SET_MODE _IOW(BMAIO, 0x35, short) +#define BMA_IOCTL_GET_INT _IOR(BMAIO, 0x36, short) +#define BMA_IOCTL_GET_CHIP_LAYOUT _IOR(BMAIO, 0x37, short) + +/* range and bandwidth */ +#define BMA_RANGE_2G 0 +#define BMA_RANGE_4G 1 +#define BMA_RANGE_8G 2 + +#define BMA_BW_25HZ 0 +#define BMA_BW_50HZ 1 +#define BMA_BW_100HZ 2 +#define BMA_BW_190HZ 3 +#define BMA_BW_375HZ 4 +#define BMA_BW_750HZ 5 +#define BMA_BW_1500HZ 6 + +/* mode settings */ +#define BMA_MODE_NORMAL 0 +#define BMA_MODE_SLEEP 1 + +struct bma150_platform_data { + int intr; + int microp_new_cmd; + int chip_layout; +}; + +#endif diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 499dfe982a0e8..552875fae0f04 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -115,6 +115,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ diff --git a/include/linux/capella_cm3602.h b/include/linux/capella_cm3602.h new file mode 100644 index 0000000000000..cf09da0c18aa9 --- /dev/null +++ b/include/linux/capella_cm3602.h @@ -0,0 +1,38 @@ +/* include/linux/capella_cm3602.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_CAPELLA_CM3602_H +#define __LINUX_CAPELLA_CM3602_H + +#include +#include + +#define CAPELLA_CM3602_IOCTL_MAGIC 'c' +#define CAPELLA_CM3602_IOCTL_GET_ENABLED \ + _IOR(CAPELLA_CM3602_IOCTL_MAGIC, 1, int *) +#define CAPELLA_CM3602_IOCTL_ENABLE \ + _IOW(CAPELLA_CM3602_IOCTL_MAGIC, 2, int *) + +#ifdef __KERNEL__ +#define CAPELLA_CM3602 "capella_cm3602" +struct capella_cm3602_platform_data { + int (*power)(int); /* power to the chip */ + int p_out; /* proximity-sensor output */ + int p_en; +}; +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/capella_cm3602_htc.h b/include/linux/capella_cm3602_htc.h new file mode 100644 index 0000000000000..0aaf5c9c8bc4f --- /dev/null +++ b/include/linux/capella_cm3602_htc.h @@ -0,0 +1,42 @@ +/* include/linux/capella_cm3602.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_CAPELLA_CM3602_H +#define __LINUX_CAPELLA_CM3602_H + +#include +#include + +#define CAPELLA_CM3602_IOCTL_MAGIC 'c' +#define CAPELLA_CM3602_IOCTL_GET_ENABLED \ + _IOR(CAPELLA_CM3602_IOCTL_MAGIC, 1, int *) +#define CAPELLA_CM3602_IOCTL_ENABLE \ + _IOW(CAPELLA_CM3602_IOCTL_MAGIC, 2, int *) + +#ifdef __KERNEL__ +#define CAPELLA_CM3602 "capella_cm3602" +#define LS_PWR_ON (1 << 0) +#define PS_PWR_ON (1 << 1) +struct capella_cm3602_platform_data { + int (*power)(int, uint8_t); /* power to the chip */ + int (*enable)(uint8_t); /* enable to the chip */ + int p_out; /* proximity-sensor output */ + int p_en; /* proximity-sensor enable */ + int irq; +}; +#endif /* __KERNEL__ */ + +#endif diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ccefff02b6cb0..37f523b85dd6d 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -66,3 +66,9 @@ SUBSYS(blkio) #endif /* */ + +#ifdef CONFIG_CGROUP_BFQIO +SUBSYS(bfqio) +#endif + +/* */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index dfa2ed4c0d26a..32e34242d8635 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -11,9 +11,6 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 3 -#define COMPACT_MODE_DIRECT_RECLAIM 0 -#define COMPACT_MODE_KSWAPD 1 - #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, @@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist, bool sync); extern unsigned long compaction_suitable(struct zone *zone, int order); extern unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, bool sync, - int compact_mode); + gfp_t gfp_mask, bool sync); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) } static inline unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, bool sync, - int compact_mode) + gfp_t gfp_mask, bool sync) { return COMPACT_CONTINUE; } @@ -89,6 +84,11 @@ static inline bool compaction_deferred(struct zone *zone) return 1; } +static inline int compact_nodes(bool sync) +{ + return COMPACT_CONTINUE; +} + #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) diff --git a/include/linux/completion.h b/include/linux/completion.h index 51494e6b55487..a5b2e1ca5ceb1 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -77,6 +77,7 @@ static inline void init_completion(struct completion *x) } extern void wait_for_completion(struct completion *); +extern void wait_for_completion_io(struct completion *); extern int wait_for_completion_interruptible(struct completion *x); extern int wait_for_completion_killable(struct completion *x); extern unsigned long wait_for_completion_timeout(struct completion *x, diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 5f09323ee8808..97f1ca76b4aa4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -174,4 +174,11 @@ static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} #endif /* !CONFIG_PM_SLEEP_SMP */ +#define IDLE_START 1 +#define IDLE_END 2 + +void idle_notifier_register(struct notifier_block *n); +void idle_notifier_unregister(struct notifier_block *n); +void idle_notifier_call_chain(unsigned long val); + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index e71e0f6ecd58e..9bcfa569984ee 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -3,7 +3,7 @@ * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski - * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -56,9 +56,13 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) -/* Frequency values here are CPU kHz so that hardware which doesn't run - * with some frequencies can complain without having to guess what per - * cent / per mille means. +/* Minimum frequency cutoff to notify the userspace about cpu utilization + * changes */ +#define MIN_CPU_UTIL_NOTIFY 40 + +/* Frequency values here are CPU kHz so that hardware which doesn't run + * with some frequencies can complain without having to guess what per + * cent / per mille means. * Maximum transition latency is in nanoseconds - if it's unknown, * CPUFREQ_ETERNAL shall be used. */ @@ -72,13 +76,15 @@ extern struct kobject *cpufreq_global_kobject; struct cpufreq_cpuinfo { unsigned int max_freq; unsigned int min_freq; - unsigned int transition_latency; /* in 10^(-9) s = nanoseconds */ + + /* in 10^(-9) s = nanoseconds */ + unsigned int transition_latency; }; struct cpufreq_real_policy { unsigned int min; /* in kHz */ unsigned int max; /* in kHz */ - unsigned int policy; /* see above */ + unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ }; @@ -94,7 +100,8 @@ struct cpufreq_policy { unsigned int max; /* in kHz */ unsigned int cur; /* in kHz, only needed if cpufreq * governors are used */ - unsigned int policy; /* see above */ + unsigned int util; /* CPU utilization at max frequency */ + unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ struct work_struct update; /* if update_policy() needs to be @@ -167,11 +174,11 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; - int (*governor) (struct cpufreq_policy *policy, + int (*governor) (struct cpufreq_policy *policy, unsigned int event); ssize_t (*show_setspeed) (struct cpufreq_policy *policy, char *buf); - int (*store_setspeed) (struct cpufreq_policy *policy, + int (*store_setspeed) (struct cpufreq_policy *policy, unsigned int freq); unsigned int max_transition_latency; /* HW must be able to switch to next freq faster than this value in nano secs or we @@ -180,7 +187,8 @@ struct cpufreq_governor { struct module *owner; }; -/* pass a target to the cpufreq driver +/* + * Pass a target to the cpufreq driver. */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, @@ -196,6 +204,8 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); +int lock_policy_rwsem_write(int cpu); +void unlock_policy_rwsem_write(int cpu); /********************************************************************* * CPUFREQ DRIVER INTERFACE * @@ -230,16 +240,16 @@ struct cpufreq_driver { int (*bios_limit) (int cpu, unsigned int *limit); int (*exit) (struct cpufreq_policy *policy); - int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); + int (*suspend) (struct cpufreq_policy *policy); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; }; /* flags */ -#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if +#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if * all ->init() calls failed */ -#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel +#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel * "constants" aren't affected by * frequency transitions */ #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed @@ -250,9 +260,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state); +void cpufreq_notify_utilization(struct cpufreq_policy *policy, + unsigned int load); - -static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) +static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) { if (policy->min < min) policy->min = min; @@ -281,19 +292,10 @@ __ATTR(_name, 0444, show_##_name, NULL) static struct freq_attr _name = \ __ATTR(_name, _perm, show_##_name, NULL) -#define cpufreq_freq_attr_ro_old(_name) \ -static struct freq_attr _name##_old = \ -__ATTR(_name, 0444, show_##_name##_old, NULL) - #define cpufreq_freq_attr_rw(_name) \ static struct freq_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) -#define cpufreq_freq_attr_rw_old(_name) \ -static struct freq_attr _name##_old = \ -__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) - - struct global_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, @@ -367,6 +369,9 @@ extern struct cpufreq_governor cpufreq_gov_conservative; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) +extern struct cpufreq_governor cpufreq_gov_smartass2; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) #endif @@ -398,34 +403,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, /* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); -void cpufreq_cpu_put (struct cpufreq_policy *data); +void cpufreq_cpu_put(struct cpufreq_policy *data); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; -void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, +void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu); -/********************************************************************* - * UNIFIED DEBUG HELPERS * - *********************************************************************/ - -#define CPUFREQ_DEBUG_CORE 1 -#define CPUFREQ_DEBUG_DRIVER 2 -#define CPUFREQ_DEBUG_GOVERNOR 4 - -#ifdef CONFIG_CPU_FREQ_DEBUG - -extern void cpufreq_debug_printk(unsigned int type, const char *prefix, - const char *fmt, ...); - -#else - -#define cpufreq_debug_printk(msg...) do { } while(0) - -#endif /* CONFIG_CPU_FREQ_DEBUG */ - #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h index ec78a4bbe1d5b..f9452185b0190 100644 --- a/include/linux/cryptohash.h +++ b/include/linux/cryptohash.h @@ -3,7 +3,7 @@ #define SHA_DIGEST_WORDS 5 #define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) -#define SHA_WORKSPACE_WORDS 80 +#define SHA_WORKSPACE_WORDS 16 void sha_init(__u32 *buf); void sha_transform(__u32 *digest, const char *data, __u32 *W); diff --git a/include/linux/curcial_oj.h b/include/linux/curcial_oj.h new file mode 100644 index 0000000000000..0a65cb7b6c501 --- /dev/null +++ b/include/linux/curcial_oj.h @@ -0,0 +1,43 @@ +#ifndef _CURCIAL_OJ_H +#define _CURCIAL_OJ_H +#include + +#define CURCIAL_OJ_NAME "curcial_oj" + +struct curcial_oj_platform_data { + struct input_dev *input_dev; + struct work_struct work; + bool click; + uint8_t key; + uint32_t last_key_time; + bool ap_code; + uint8_t degree; + uint8_t debugflag; + uint32_t last_click_time; + uint16_t interval; + uint8_t mdelay_time; + int8_t normal_th; + int8_t xy_ratio; + void (*oj_shutdown)(int); + int (*oj_poweron)(int); + void(*oj_adjust_xy)(uint8_t *, int16_t *, int16_t *); + int microp_version; + bool share_power; + bool swap; + int x; + int y; + uint8_t Xsteps[30]; + uint8_t Ysteps[30]; + uint16_t sht_tbl[10]; + uint8_t pxsum_tbl[10]; + int irq; +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif + + unsigned irq_gpio; + +}; +void curcial_oj_send_key(unsigned int code, int value); + +#endif \ No newline at end of file diff --git a/include/linux/cyttsp.h b/include/linux/cyttsp.h new file mode 100644 index 0000000000000..a525a9f81812e --- /dev/null +++ b/include/linux/cyttsp.h @@ -0,0 +1,658 @@ +/* Header file for: + * Cypress TrueTouch(TM) Standard Product touchscreen drivers. + * include/linux/cyttsp.h + * + * Copyright (C) 2009, 2010 Cypress Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Cypress reserves the right to make changes without further notice + * to the materials described herein. Cypress does not assume any + * liability arising out of the application described herein. + * + * Contact Cypress Semiconductor at www.cypress.com + * + */ + + +#ifndef __CYTTSP_H__ +#define __CYTTSP_H__ + +#include +#include +#include +#include +#include + +#include + +#define CYPRESS_TTSP_NAME "cyttsp" +#define CY_I2C_NAME "cyttsp-i2c" +#define CY_SPI_NAME "cyttsp-spi" + +#ifdef CY_DECLARE_GLOBALS + uint32_t cyttsp_tsdebug; + module_param_named(tsdebug, cyttsp_tsdebug, uint, 0664); + uint32_t cyttsp_tsxdebug; + module_param_named(tsxdebug, cyttsp_tsxdebug, uint, 0664); + + uint32_t cyttsp_disable_touch; + module_param_named(disable_touch, cyttsp_disable_touch, uint, 0664); +#else + extern uint32_t cyttsp_tsdebug; + extern uint32_t cyttsp_tsxdebug; + extern uint32_t cyttsp_disable_touch; +#endif + + + +/****************************************************************************** + * Global Control, Used to control the behavior of the driver + */ + +/* defines for Gen2 (Txx2xx); Gen3 (Txx3xx) + * use these defines to set cyttsp_platform_data.gen in board config file + */ +#define CY_GEN2 2 +#define CY_GEN3 3 + +/* define for using I2C driver + */ +#define CY_USE_I2C_DRIVER + +/* defines for using SPI driver */ +/* +#define CY_USE_SPI_DRIVER + */ +#define CY_SPI_DFLT_SPEED_HZ 1000000 +#define CY_SPI_MAX_SPEED_HZ 4000000 +#define CY_SPI_SPEED_HZ CY_SPI_DFLT_SPEED_HZ +#define CY_SPI_BITS_PER_WORD 8 +#define CY_SPI_DAV 139 /* set correct gpio id */ +#define CY_SPI_BUFSIZE 512 + + +/* define for inclusion of TTSP App Update Load File + * use this define if update to the TTSP Device is desired + */ +/* +#define CY_INCLUDE_LOAD_FILE +*/ + +/* define if force new load file for bootloader load */ +/* +#define CY_FORCE_FW_UPDATE +*/ + +/* undef for production use */ +/* +#define CY_USE_DEBUG +*/ + +/* undef for irq use; use this define in the board configuration file */ +/* +#define CY_USE_TIMER + */ + +/* undef to allow use of extra debug capability */ +/* +#define CY_ALLOW_EXTRA_DEBUG +*/ + +/* undef to remove additional debug prints */ +/* +#define CY_USE_EXTRA_DEBUG +*/ + +/* undef to remove additional debug prints */ +/* +#define CY_USE_EXTRA_DEBUG1 + */ + +/* undef to use operational touch timer jiffies; else use test jiffies */ +/* + */ +#define CY_USE_TIMER_DEBUG + +/* define to use canned test data */ +/* +#define CY_USE_TEST_DATA + */ + +/* define to activate power management */ +/* +#define CY_USE_LOW_POWER + */ + +/* define if wake on i2c addr is activated */ +/* +#define CY_USE_DEEP_SLEEP + */ + +/* define if gesture signaling is used + * and which gesture groups to use + */ +/* +#define CY_USE_GEST +#define CY_USE_GEST_GRP1 +#define CY_USE_GEST_GRP2 +#define CY_USE_GEST_GRP3 +#define CY_USE_GEST_GRP4 + */ +/* Active distance in pixels for a gesture to be reported + * if set to 0, then all gesture movements are reported + */ +#define CY_ACT_DIST_DFLT 8 +#define CY_ACT_DIST CY_ACT_DIST_DFLT + +/* define if MT signals are desired */ +/* +*/ +#define CY_USE_MT_SIGNALS + +/* define if MT tracking id signals are used */ +/* +#define CY_USE_MT_TRACK_ID + */ + +/* define if ST signals are required */ +/* +*/ +#define CY_USE_ST_SIGNALS + +/* define to send handshake to device */ +/* +*/ +#define CY_USE_HNDSHK + +/* define if log all raw motion signals to a sysfs file */ +/* +#define CY_LOG_TO_FILE +*/ + + +/* End of the Global Control section + ****************************************************************************** + */ +#define CY_DIFF(m, n) ((m) != (n)) + +#ifdef CY_LOG_TO_FILE + #define cyttsp_openlog() /* use sysfs */ +#else + #define cyttsp_openlog() +#endif /* CY_LOG_TO_FILE */ + +/* see kernel.h for pr_xxx def'ns */ +#define cyttsp_info(f, a...) pr_info("%s:" f, __func__ , ## a) +#define cyttsp_error(f, a...) pr_err("%s:" f, __func__ , ## a) +#define cyttsp_alert(f, a...) pr_alert("%s:" f, __func__ , ## a) + +#ifdef CY_USE_DEBUG + #define cyttsp_debug(f, a...) pr_alert("%s:" f, __func__ , ## a) +#else + #define cyttsp_debug(f, a...) {if (cyttsp_tsdebug) \ + pr_alert("%s:" f, __func__ , ## a); } +#endif /* CY_USE_DEBUG */ + +#ifdef CY_ALLOW_EXTRA_DEBUG +#ifdef CY_USE_EXTRA_DEBUG + #define cyttsp_xdebug(f, a...) pr_alert("%s:" f, __func__ , ## a) +#else + #define cyttsp_xdebug(f, a...) {if (cyttsp_tsxdebug) \ + pr_alert("%s:" f, __func__ , ## a); } +#endif /* CY_USE_EXTRA_DEBUG */ + +#ifdef CY_USE_EXTRA_DEBUG1 + #define cyttsp_xdebug1(f, a...) pr_alert("%s:" f, __func__ , ## a) +#else + #define cyttsp_xdebug1(f, a...) +#endif /* CY_USE_EXTRA_DEBUG1 */ +#else + #define cyttsp_xdebug(f, a...) + #define cyttsp_xdebug1(f, a...) +#endif /* CY_ALLOW_EXTRA_DEBUG */ + +#ifdef CY_USE_TIMER_DEBUG + #define TOUCHSCREEN_TIMEOUT (msecs_to_jiffies(1000)) +#else + #define TOUCHSCREEN_TIMEOUT (msecs_to_jiffies(28)) +#endif + +/* reduce extra signals in MT only build + * be careful not to lose backward compatibility for pre-MT apps + */ +#ifdef CY_USE_ST_SIGNALS + #define CY_USE_ST 1 +#else + #define CY_USE_ST 0 +#endif /* CY_USE_ST_SIGNALS */ + +/* rely on kernel input.h to define Multi-Touch capability */ +/* if input.h defines the Multi-Touch signals, then use MT */ +#if defined(ABS_MT_TOUCH_MAJOR) && defined(CY_USE_MT_SIGNALS) + #define CY_USE_MT 1 + #define CY_MT_SYNC(input) input_mt_sync(input) +#else + #define CY_USE_MT 0 + #define CY_MT_SYNC(input) + /* the following includes are provided to ensure a compile; + * the code that compiles with these defines will not be executed if + * the CY_USE_MT is properly used in the platform structure init + */ + #ifndef ABS_MT_TOUCH_MAJOR + #define ABS_MT_TOUCH_MAJOR 0x30 /* touching ellipse */ + #define ABS_MT_TOUCH_MINOR 0x31 /* (omit if circular) */ + #define ABS_MT_WIDTH_MAJOR 0x32 /* approaching ellipse */ + #define ABS_MT_WIDTH_MINOR 0x33 /* (omit if circular) */ + #define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */ + #define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */ + #define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */ + #define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ + #define ABS_MT_BLOB_ID 0x38 /* Group set of pkts as blob */ + #endif /* ABS_MT_TOUCH_MAJOR */ +#endif /* ABS_MT_TOUCH_MAJOR and CY_USE_MT_SIGNALS */ +#if defined(ABS_MT_TRACKING_ID) && defined(CY_USE_MT_TRACK_ID) + #define CY_USE_TRACKING_ID 1 +#else + #define CY_USE_TRACKING_ID 0 +/* define only if not defined already by system; + * value based on linux kernel 2.6.30.10 + */ +#ifndef ABS_MT_TRACKING_ID + #define ABS_MT_TRACKING_ID (ABS_MT_BLOB_ID+1) +#endif +#endif /* ABS_MT_TRACKING_ID */ + +#ifdef CY_USE_DEEP_SLEEP + #define CY_USE_DEEP_SLEEP_SEL 0x80 +#else + #define CY_USE_DEEP_SLEEP_SEL 0x00 +#endif +#ifdef CY_USE_LOW_POWER + #define CY_USE_SLEEP (CY_USE_DEEP_SLEEP_SEL | 0x01) +#else + #define CY_USE_SLEEP 0x00 +#endif /* CY_USE_LOW_POWER */ + +#ifdef CY_USE_TEST_DATA + #define cyttsp_testdat(ray1, ray2, sizeofray) \ + { \ + int i; \ + u8 *up1 = (u8 *)ray1; \ + u8 *up2 = (u8 *)ray2; \ + for (i = 0; i < sizeofray; i++) { \ + up1[i] = up2[i]; \ + } \ + } +#else + #define cyttsp_testdat(xy, test_xy, sizeofray) +#endif /* CY_USE_TEST_DATA */ + +/* helper macros */ +#define GET_NUM_TOUCHES(x) ((x) & 0x0F) +#define GET_TOUCH1_ID(x) (((x) & 0xF0) >> 4) +#define GET_TOUCH2_ID(x) ((x) & 0x0F) +#define GET_TOUCH3_ID(x) (((x) & 0xF0) >> 4) +#define GET_TOUCH4_ID(x) ((x) & 0x0F) +#define IS_LARGE_AREA(x) (((x) & 0x10) >> 4) +#define FLIP_DATA_FLAG 0x01 +#define REVERSE_X_FLAG 0x02 +#define REVERSE_Y_FLAG 0x04 +#define FLIP_DATA(flags) ((flags) & FLIP_DATA_FLAG) +#define REVERSE_X(flags) ((flags) & REVERSE_X_FLAG) +#define REVERSE_Y(flags) ((flags) & REVERSE_Y_FLAG) +#define FLIP_XY(x, y) { \ + u16 tmp; \ + tmp = (x); \ + (x) = (y); \ + (y) = tmp; \ + } +#define INVERT_X(x, xmax) ((xmax) - (x)) +#define INVERT_Y(y, maxy) ((maxy) - (y)) +#define SET_HSTMODE(reg, mode) ((reg) & (mode)) +#define GET_HSTMODE(reg) ((reg & 0x70) >> 4) +#define GET_BOOTLOADERMODE(reg) ((reg & 0x10) >> 4) + +/* constant definitions */ +/* maximum number of concurrent ST track IDs */ +#define CY_NUM_ST_TCH_ID 2 + +/* maximum number of concurrent MT track IDs */ +#define CY_NUM_MT_TCH_ID 4 + +/* maximum number of track IDs */ +#define CY_NUM_TRK_ID 16 + +#define CY_NTCH 0 /* no touch (lift off) */ +#define CY_TCH 1 /* active touch (touchdown) */ +#define CY_ST_FNGR1_IDX 0 +#define CY_ST_FNGR2_IDX 1 +#define CY_MT_TCH1_IDX 0 +#define CY_MT_TCH2_IDX 1 +#define CY_MT_TCH3_IDX 2 +#define CY_MT_TCH4_IDX 3 +#define CY_XPOS 0 +#define CY_YPOS 1 +#define CY_IGNR_TCH (-1) +#define CY_SMALL_TOOL_WIDTH 10 +#define CY_LARGE_TOOL_WIDTH 255 +#define CY_REG_BASE 0x00 +#define CY_REG_GEST_SET 0x1E +#define CY_REG_ACT_INTRVL 0x1D +#define CY_REG_TCH_TMOUT (CY_REG_ACT_INTRVL+1) +#define CY_REG_LP_INTRVL (CY_REG_TCH_TMOUT+1) +#define CY_SOFT_RESET ((1 << 0)) +#define CY_DEEP_SLEEP ((1 << 1)) +#define CY_LOW_POWER ((1 << 2)) +#define CY_MAXZ 255 +#define CY_OK 0 +#define CY_INIT 1 +#define CY_DLY_DFLT 10 /* ms */ +#define CY_DLY_SYSINFO 20 /* ms */ +#define CY_DLY_BL 300 +#define CY_DLY_DNLOAD 100 /* ms */ +#define CY_NUM_RETRY 4 /* max num touch data read */ + +/* handshake bit in the hst_mode reg */ +#define CY_HNDSHK_BIT 0x80 +#ifdef CY_USE_HNDSHK + #define CY_SEND_HNDSHK 1 +#else + #define CY_SEND_HNDSHK 0 +#endif + +/* Bootloader File 0 offset */ +#define CY_BL_FILE0 0x00 + +/* Bootloader command directive */ +#define CY_BL_CMD 0xFF + +/* Bootloader Initiate Bootload */ +#define CY_BL_INIT_LOAD 0x38 + +/* Bootloader Write a Block */ +#define CY_BL_WRITE_BLK 0x39 + +/* Bootloader Terminate Bootload */ +#define CY_BL_TERMINATE 0x3B + +/* Bootloader Exit and Verify Checksum command */ +#define CY_BL_EXIT 0xA5 + +/* Bootloader default keys */ +#define CY_BL_KEY0 0x00 +#define CY_BL_KEY1 0x01 +#define CY_BL_KEY2 0x02 +#define CY_BL_KEY3 0x03 +#define CY_BL_KEY4 0x04 +#define CY_BL_KEY5 0x05 +#define CY_BL_KEY6 0x06 +#define CY_BL_KEY7 0x07 + +/* Active Power state scanning/processing refresh interval */ +#define CY_ACT_INTRVL_DFLT 0x00 + +/* touch timeout for the Active power */ +#define CY_TCH_TMOUT_DFLT 0xFF + +/* Low Power state scanning/processing refresh interval */ +#define CY_LP_INTRVL_DFLT 0x0A + +#define CY_IDLE_STATE 0 +#define CY_ACTIVE_STATE 1 +#define CY_LOW_PWR_STATE 2 +#define CY_SLEEP_STATE 3 + +/* device mode bits */ +#define CY_OP_MODE 0x00 +#define CY_SYSINFO_MODE 0x10 + +/* power mode select bits */ +#define CY_SOFT_RESET_MODE 0x01 /* return to Bootloader mode */ +#define CY_DEEP_SLEEP_MODE 0x02 +#define CY_LOW_PWR_MODE 0x04 + +#define CY_NUM_KEY 8 + +#ifdef CY_USE_GEST + #define CY_USE_GESTURES 1 +#else + #define CY_USE_GESTURES 0 +#endif /* CY_USE_GESTURE_SIGNALS */ + +#ifdef CY_USE_GEST_GRP1 + #define CY_GEST_GRP1 0x10 +#else + #define CY_GEST_GRP1 0x00 +#endif /* CY_USE_GEST_GRP1 */ +#ifdef CY_USE_GEST_GRP2 + #define CY_GEST_GRP2 0x20 +#else + #define CY_GEST_GRP2 0x00 +#endif /* CY_USE_GEST_GRP2 */ +#ifdef CY_USE_GEST_GRP3 + #define CY_GEST_GRP3 0x40 +#else + #define CY_GEST_GRP3 0x00 +#endif /* CY_USE_GEST_GRP3 */ +#ifdef CY_USE_GEST_GRP4 + #define CY_GEST_GRP4 0x80 +#else + #define CY_GEST_GRP4 0x00 +#endif /* CY_USE_GEST_GRP4 */ + + +struct cyttsp_platform_data { + u32 panel_maxx; + u32 panel_maxy; + u32 disp_resx; + u32 disp_resy; + u32 disp_minx; + u32 disp_miny; + u32 disp_maxx; + u32 disp_maxy; + u32 flags; + u8 gen; + u8 use_st; + u8 use_mt; + u8 use_hndshk; + u8 use_trk_id; + u8 use_sleep; + u8 use_gestures; + u8 gest_set; + u8 act_intrvl; + u8 tch_tmout; + u8 lp_intrvl; + u8 power_state; + bool wakeup; +#ifdef CY_USE_I2C_DRIVER + s32 (*init)(struct i2c_client *client); + s32 (*resume)(struct i2c_client *client); +#endif +#ifdef CY_USE_SPI_DRIVER + s32 (*init)(struct spi_device *spi); + s32 (*resume)(struct spi_device *spi); +#endif +}; + +/* TrueTouch Standard Product Gen3 (Txx3xx) interface definition */ +struct cyttsp_gen3_xydata_t { + u8 hst_mode; + u8 tt_mode; + u8 tt_stat; + u16 x1 __attribute__ ((packed)); + u16 y1 __attribute__ ((packed)); + u8 z1; + u8 touch12_id; + u16 x2 __attribute__ ((packed)); + u16 y2 __attribute__ ((packed)); + u8 z2; + u8 gest_cnt; + u8 gest_id; + u16 x3 __attribute__ ((packed)); + u16 y3 __attribute__ ((packed)); + u8 z3; + u8 touch34_id; + u16 x4 __attribute__ ((packed)); + u16 y4 __attribute__ ((packed)); + u8 z4; + u8 tt_undef[3]; + u8 gest_set; + u8 tt_reserved; +}; + +/* TrueTouch Standard Product Gen2 (Txx2xx) interface definition */ +#define CY_GEN2_NOTOUCH 0x03 /* Both touches removed */ +#define CY_GEN2_GHOST 0x02 /* ghost */ +#define CY_GEN2_2TOUCH 0x03 /* 2 touch; no ghost */ +#define CY_GEN2_1TOUCH 0x01 /* 1 touch only */ +#define CY_GEN2_TOUCH2 0x01 /* 1st touch removed; + * 2nd touch remains */ +struct cyttsp_gen2_xydata_t { + u8 hst_mode; + u8 tt_mode; + u8 tt_stat; + u16 x1 __attribute__ ((packed)); + u16 y1 __attribute__ ((packed)); + u8 z1; + u8 evnt_idx; + u16 x2 __attribute__ ((packed)); + u16 y2 __attribute__ ((packed)); + u8 tt_undef1; + u8 gest_cnt; + u8 gest_id; + u8 tt_undef[14]; + u8 gest_set; + u8 tt_reserved; +}; + +/* TTSP System Information interface definition */ +struct cyttsp_sysinfo_data_t { + u8 hst_mode; + u8 mfg_cmd; + u8 mfg_stat; + u8 cid[3]; + u8 tt_undef1; + u8 uid[8]; + u8 bl_verh; + u8 bl_verl; + u8 tts_verh; + u8 tts_verl; + u8 app_idh; + u8 app_idl; + u8 app_verh; + u8 app_verl; + u8 tt_undef[6]; + u8 act_intrvl; + u8 tch_tmout; + u8 lp_intrvl; +}; + +/* TTSP Bootloader Register Map interface definition */ +#define CY_BL_CHKSUM_OK 0x01 +struct cyttsp_bootloader_data_t { + u8 bl_file; + u8 bl_status; + u8 bl_error; + u8 blver_hi; + u8 blver_lo; + u8 bld_blver_hi; + u8 bld_blver_lo; + u8 ttspver_hi; + u8 ttspver_lo; + u8 appid_hi; + u8 appid_lo; + u8 appver_hi; + u8 appver_lo; + u8 cid_0; + u8 cid_1; + u8 cid_2; +}; + +#define cyttsp_wake_data_t cyttsp_gen3_xydata_t +#ifdef CY_DECLARE_GLOBALS + #ifdef CY_INCLUDE_LOAD_FILE + /* this file declares: + * firmware download block array (cyttsp_fw[]), + * the number of command block records (cyttsp_fw_records), + * and the version variables + */ + #include "cyttsp_fw.h" /* imports cyttsp_fw[] array */ + #define cyttsp_app_load() 1 + #ifdef CY_FORCE_FW_UPDATE + #define cyttsp_force_fw_load() 1 + #else + #define cyttsp_force_fw_load() 0 + #endif + + #else + /* the following declarations are to allow + * some debugging capability + */ + unsigned char cyttsp_fw_tts_verh = 0x00; + unsigned char cyttsp_fw_tts_verl = 0x01; + unsigned char cyttsp_fw_app_idh = 0x02; + unsigned char cyttsp_fw_app_idl = 0x03; + unsigned char cyttsp_fw_app_verh = 0x04; + unsigned char cyttsp_fw_app_verl = 0x05; + unsigned char cyttsp_fw_cid_0 = 0x06; + unsigned char cyttsp_fw_cid_1 = 0x07; + unsigned char cyttsp_fw_cid_2 = 0x08; + #define cyttsp_app_load() 0 + #define cyttsp_force_fw_load() 0 + #endif + #define cyttsp_tts_verh() cyttsp_fw_tts_verh + #define cyttsp_tts_verl() cyttsp_fw_tts_verl + #define cyttsp_app_idh() cyttsp_fw_app_idh + #define cyttsp_app_idl() cyttsp_fw_app_idl + #define cyttsp_app_verh() cyttsp_fw_app_verh + #define cyttsp_app_verl() cyttsp_fw_app_verl + #define cyttsp_cid_0() cyttsp_fw_cid_0 + #define cyttsp_cid_1() cyttsp_fw_cid_1 + #define cyttsp_cid_2() cyttsp_fw_cid_2 + #ifdef CY_USE_TEST_DATA + static struct cyttsp_gen2_xydata_t tt_gen2_testray[] = { + {0x00}, {0x00}, {0x04}, + {0x4000}, {0x8000}, {0x80}, + {0x03}, + {0x2000}, {0x1000}, {0x00}, + {0x00}, + {0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00}, + {0x00} + }; + + static struct cyttsp_gen3_xydata_t tt_gen3_testray[] = { + {0x00}, {0x00}, {0x04}, + {0x4000}, {0x8000}, {0x80}, + {0x12}, + {0x2000}, {0x1000}, {0xA0}, + {0x00}, {0x00}, + {0x8000}, {0x4000}, {0xB0}, + {0x34}, + {0x4000}, {0x1000}, {0xC0}, + {0x00, 0x00, 0x00}, + {0x00}, + {0x00} + }; + #endif /* CY_USE_TEST_DATA */ + +#else + extern u8 g_appload_ray[]; +#endif + +#endif /* __CYTTSP_H__ */ diff --git a/include/linux/device.h b/include/linux/device.h index 1bf5cf0b45131..c69f45d1e50f7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -634,9 +634,6 @@ static inline int devtmpfs_mount(const char *mountpoint) { return 0; } /* drivers/base/power/shutdown.c */ extern void device_shutdown(void); -/* drivers/base/sys.c */ -extern void sysdev_shutdown(void); - /* debugging and troubleshooting/diagnostic helpers. */ extern const char *dev_driver_string(const struct device *dev); diff --git a/include/linux/ds2784_battery.h b/include/linux/ds2784_battery.h new file mode 100644 index 0000000000000..7ca10f6fc193e --- /dev/null +++ b/include/linux/ds2784_battery.h @@ -0,0 +1,29 @@ +/* include/linux/ds2784_battery.h + * + * Copyright (C) 2009 HTC Corporation + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_DS2784_BATTERY_H +#define __LINUX_DS2784_BATTERY_H + +#ifdef __KERNEL__ + +struct ds2784_platform_data { + int (*charge)(int on, int fast); + void *w1_slave; +}; + +#endif /* __KERNEL__ */ + +#endif /* __LINUX_DS2784_BATTERY_H */ diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h index 8343b817af31b..772b94f562053 100755 --- a/include/linux/earlysuspend.h +++ b/include/linux/earlysuspend.h @@ -47,6 +47,10 @@ struct early_suspend { #ifdef CONFIG_HAS_EARLYSUSPEND void register_early_suspend(struct early_suspend *handler); void unregister_early_suspend(struct early_suspend *handler); +#ifdef CONFIG_HTC_ONMODE_CHARGING +void register_onchg_suspend(struct early_suspend *handler); +void unregister_onchg_suspend(struct early_suspend *handler); +#endif #else #define register_early_suspend(handler) do { } while (0) #define unregister_early_suspend(handler) do { } while (0) diff --git a/include/linux/elan_i2c.h b/include/linux/elan_i2c.h new file mode 100644 index 0000000000000..41a993613891f --- /dev/null +++ b/include/linux/elan_i2c.h @@ -0,0 +1,17 @@ +#ifndef ELAN_I2C_H +#define ELAN_I2C_H + +#define ELAN_8232_I2C_NAME "elan-touch" + +struct elan_i2c_platform_data { + uint16_t version; + int abs_x_min; + int abs_x_max; + int abs_y_min; + int abs_y_max; + int intr_gpio; + int (*power)(int on); +}; + +#endif + diff --git a/include/linux/err.h b/include/linux/err.h index 448afc12c78af..f2edce25a76b6 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -52,6 +52,14 @@ static inline void * __must_check ERR_CAST(const void *ptr) return (void *) ptr; } +static inline int __must_check PTR_RET(const void *ptr) +{ + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + else + return 0; +} + #endif #endif /* _LINUX_ERR_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1908929204a9e..a04b6cee1cb43 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -13,6 +13,9 @@ #ifndef _LINUX_ETHTOOL_H #define _LINUX_ETHTOOL_H +#ifdef __KERNEL__ +#include +#endif #include #include @@ -449,6 +452,37 @@ struct ethtool_rxnfc { __u32 rule_locs[0]; }; +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + u8 hdata[72]; + } h_u, m_u; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ + /** * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR @@ -557,6 +591,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data); u32 ethtool_op_get_flags(struct net_device *dev); int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); void ethtool_ntuple_flush(struct net_device *dev); +bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); /** * ðtool_ops - Alter and report network device settings diff --git a/include/linux/export.h b/include/linux/export.h new file mode 100644 index 0000000000000..696c0f48afc71 --- /dev/null +++ b/include/linux/export.h @@ -0,0 +1,89 @@ +#ifndef _LINUX_EXPORT_H +#define _LINUX_EXPORT_H +/* + * Export symbols from the kernel to modules. Forked from module.h + * to reduce the amount of pointless cruft we feed to gcc when only + * exporting a simple symbol or two. + * + * If you feel the need to add #include to this file + * then you are doing something wrong and should go away silently. + */ + +/* Some toolchains use a `_' prefix for all user symbols. */ +#ifdef CONFIG_SYMBOL_PREFIX +#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX +#else +#define MODULE_SYMBOL_PREFIX "" +#endif + +struct kernel_symbol +{ + unsigned long value; + const char *name; +}; + +#ifdef MODULE +extern struct module __this_module; +#define THIS_MODULE (&__this_module) +#else +#define THIS_MODULE ((struct module *)0) +#endif + +#ifdef CONFIG_MODULES + +#ifndef __GENKSYMS__ +#ifdef CONFIG_MODVERSIONS +/* Mark the CRC weak since genksyms apparently decides not to + * generate a checksums for some symbols */ +#define __CRC_SYMBOL(sym, sec) \ + extern void *__crc_##sym __attribute__((weak)); \ + static const unsigned long __kcrctab_##sym \ + __used \ + __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ + = (unsigned long) &__crc_##sym; +#else +#define __CRC_SYMBOL(sym, sec) +#endif + +/* For every exported symbol, place a struct in the __ksymtab section */ +#define __EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), aligned(1))) \ + = MODULE_SYMBOL_PREFIX #sym; \ + static const struct kernel_symbol __ksymtab_##sym \ + __used \ + __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ + = { (unsigned long)&sym, __kstrtab_##sym } + +#define EXPORT_SYMBOL(sym) \ + __EXPORT_SYMBOL(sym, "") + +#define EXPORT_SYMBOL_GPL(sym) \ + __EXPORT_SYMBOL(sym, "_gpl") + +#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ + __EXPORT_SYMBOL(sym, "_gpl_future") + +#ifdef CONFIG_UNUSED_SYMBOLS +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#else +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) +#endif + +#endif /* __GENKSYMS__ */ + +#else /* !CONFIG_MODULES... */ + +#define EXPORT_SYMBOL(sym) +#define EXPORT_SYMBOL_GPL(sym) +#define EXPORT_SYMBOL_GPL_FUTURE(sym) +#define EXPORT_UNUSED_SYMBOL(sym) +#define EXPORT_UNUSED_SYMBOL_GPL(sym) + +#endif /* CONFIG_MODULES */ + +#endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 68ba85a00c063..c7df155a97419 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -169,7 +169,8 @@ struct fb_fix_screeninfo { __u32 mmio_len; /* Length of Memory Mapped I/O */ __u32 accel; /* Indicate to driver which */ /* specific chip/card we have */ - __u16 reserved[3]; /* Reserved for future compatibility */ + __u16 capabilities; /* see FB_CAP_* */ + __u16 reserved[2]; /* Reserved for future compatibility */ }; /* Interpretation of offset for color fields: All offsets are from the right, @@ -271,7 +272,8 @@ struct fb_var_screeninfo { __u32 sync; /* see FB_SYNC_* */ __u32 vmode; /* see FB_VMODE_* */ __u32 rotate; /* angle we rotate counter clockwise */ - __u32 reserved[5]; /* Reserved for future compatibility */ + __u32 colorspace; /* colorspace for FOURCC-based modes */ + __u32 reserved[4]; /* Reserved for future compatibility */ }; struct fb_cmap { diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 70e4efabe0fb6..ebeb2f3ad068d 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h @@ -61,7 +61,7 @@ struct flex_array { struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags); int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int end, gfp_t flags); + unsigned int nr_elements, gfp_t flags); void flex_array_free(struct flex_array *fa); void flex_array_free_parts(struct flex_array *fa); int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, diff --git a/include/linux/fmem.h b/include/linux/fmem.h new file mode 100644 index 0000000000000..cda4a0fe13d68 --- /dev/null +++ b/include/linux/fmem.h @@ -0,0 +1,62 @@ +/* + * + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef _FMEM_H_ +#define _FMEM_H_ + +#include + +struct fmem_platform_data { + unsigned long phys; + unsigned long size; + unsigned long reserved_size_low; + unsigned long reserved_size_high; + unsigned long align; +}; + +struct fmem_data { + unsigned long phys; + void *virt; + struct vm_struct *area; + unsigned long size; + unsigned long reserved_size_low; + unsigned long reserved_size_high; +}; + +enum fmem_state { + FMEM_UNINITIALIZED = 0, + FMEM_C_STATE, + FMEM_T_STATE, + FMEM_O_STATE, +}; + +#ifdef CONFIG_QCACHE +struct fmem_data *fmem_get_info(void); +int fmem_set_state(enum fmem_state); +void lock_fmem_state(void); +void unlock_fmem_state(void); +void *fmem_map_virtual_area(int cacheability); +void fmem_unmap_virtual_area(void); +#else +static inline struct fmem_data *fmem_get_info(void) { return NULL; } +static inline int fmem_set_state(enum fmem_state f) { return -ENODEV; } +static inline void lock_fmem_state(void) { return; } +static inline void unlock_fmem_state(void) { return; } +static inline void *fmem_map_virtual_area(int cacheability) { return NULL; } +static inline void fmem_unmap_virtual_area(void) { return; } +#endif + +int request_fmem_c_region(void *unused); +int release_fmem_c_region(void *unused); +#endif diff --git a/include/linux/fs.h b/include/linux/fs.h index e38b50a4b9d25..f05c0162880e6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1779,7 +1779,9 @@ static inline void file_accessed(struct file *file) } int sync_inode(struct inode *inode, struct writeback_control *wbc); -int sync_inode_metadata(struct inode *inode, int wait); +int sync_inode_metadata(struct inode *inode, int datasync, int wait); +int inode_writeback_begin(struct inode *inode, int wait); +int inode_writeback_end(struct inode *inode); struct file_system_type { const char *name; @@ -2044,6 +2046,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) } #endif extern int sync_filesystem(struct super_block *); +extern void sync_filesystems(int wait); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dcd6a7c3a4358..ca29e03c1fac0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); static inline int task_curr_ret_stack(struct task_struct *t) { @@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 9869ef3674acb..3bc7dd7394bf0 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -8,29 +8,53 @@ * Version 2. See the file COPYING for more details. */ +#ifndef __GENALLOC_H__ +#define __GENALLOC_H__ -/* - * General purpose special memory pool descriptor. +struct gen_pool; + +struct gen_pool *__must_check gen_pool_create(unsigned order, int nid); + +void gen_pool_destroy(struct gen_pool *pool); + +unsigned long __must_check +gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, + unsigned alignment_order); + +/** + * gen_pool_alloc() - allocate special memory from the pool + * @pool: Pool to allocate from. + * @size: Number of bytes to allocate from the pool. + * + * Allocate the requested number of bytes from the specified pool. + * Uses a first-fit algorithm. */ -struct gen_pool { - rwlock_t lock; - struct list_head chunks; /* list of chunks in this pool */ - int min_alloc_order; /* minimum allocation order */ -}; +static inline unsigned long __must_check +gen_pool_alloc(struct gen_pool *pool, size_t size) +{ + return gen_pool_alloc_aligned(pool, size, 0); +} -/* - * General purpose special memory pool chunk descriptor. +void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size); + +extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); +extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t, + size_t, int); +/** + * gen_pool_add - add a new chunk of special memory to the pool + * @pool: pool to add new memory chunk to + * @addr: starting address of memory chunk to add to pool + * @size: size in bytes of the memory chunk to add to pool + * @nid: node id of the node the chunk structure and bitmap should be + * allocated on, or -1 + * + * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. */ -struct gen_pool_chunk { - spinlock_t lock; - struct list_head next_chunk; /* next chunk in pool */ - unsigned long start_addr; /* starting address of memory chunk */ - unsigned long end_addr; /* ending address of memory chunk */ - unsigned long bits[0]; /* bitmap for allocating memory chunk */ -}; - -extern struct gen_pool *gen_pool_create(int, int); -extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int); -extern void gen_pool_destroy(struct gen_pool *); -extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); -extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); +static inline int __must_check gen_pool_add(struct gen_pool *pool, unsigned long addr, + size_t size, int nid) +{ + return gen_pool_add_virt(pool, addr, -1, size, nid); +} +#endif /* __GENALLOC_H__ */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index c0d5f6945c1eb..035bc67051ce5 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -127,6 +127,7 @@ struct hd_struct { #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define GENHD_FL_NATIVE_CAPACITY 128 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 enum { DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ diff --git a/include/linux/genlock.h b/include/linux/genlock.h new file mode 100644 index 0000000000000..587c49df7444a --- /dev/null +++ b/include/linux/genlock.h @@ -0,0 +1,52 @@ +#ifndef _GENLOCK_H_ +#define _GENLOCK_H_ + +#ifdef __KERNEL__ + +struct genlock; +struct genlock_handle; + +struct genlock_handle *genlock_get_handle(void); +struct genlock_handle *genlock_get_handle_fd(int fd); +void genlock_put_handle(struct genlock_handle *handle); +struct genlock *genlock_create_lock(struct genlock_handle *); +struct genlock *genlock_attach_lock(struct genlock_handle *, int fd); +int genlock_wait(struct genlock_handle *handle, u32 timeout); +/* genlock_release_lock was deprecated */ +int genlock_lock(struct genlock_handle *handle, int op, int flags, + u32 timeout); +#endif + +#define GENLOCK_UNLOCK 0 +#define GENLOCK_WRLOCK 1 +#define GENLOCK_RDLOCK 2 + +#define GENLOCK_NOBLOCK (1 << 0) +#define GENLOCK_WRITE_TO_READ (1 << 1) + +struct genlock_lock { + int fd; + int op; + int flags; + int timeout; +}; + +#define GENLOCK_IOC_MAGIC 'G' + +#define GENLOCK_IOC_NEW _IO(GENLOCK_IOC_MAGIC, 0) +#define GENLOCK_IOC_EXPORT _IOR(GENLOCK_IOC_MAGIC, 1, \ + struct genlock_lock) +#define GENLOCK_IOC_ATTACH _IOW(GENLOCK_IOC_MAGIC, 2, \ + struct genlock_lock) + +/* Deprecated */ +#define GENLOCK_IOC_LOCK _IOW(GENLOCK_IOC_MAGIC, 3, \ + struct genlock_lock) + +/* Deprecated */ +#define GENLOCK_IOC_RELEASE _IO(GENLOCK_IOC_MAGIC, 4) +#define GENLOCK_IOC_WAIT _IOW(GENLOCK_IOC_MAGIC, 5, \ + struct genlock_lock) +#define GENLOCK_IOC_DREADLOCK _IOW(GENLOCK_IOC_MAGIC, 6, \ + struct genlock_lock) +#endif diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h index 360b4ddb46a6b..fc6aa099d8314 100644 --- a/include/linux/gpio_event.h +++ b/include/linux/gpio_event.h @@ -37,6 +37,9 @@ struct gpio_event_info { void **data, unsigned int dev, unsigned int type, unsigned int code, int value); /* out events */ bool no_suspend; +#ifdef CONFIG_OPTICALJOYSTICK_CRUCIAL + bool oj_btn; +#endif }; struct gpio_event_platform_data { diff --git a/include/linux/hid.h b/include/linux/hid.h index d91c25e253c87..bb29bb1dbd2fd 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -504,6 +504,9 @@ struct hid_device { /* device report descriptor */ struct hid_usage *, __s32); void (*hiddev_report_event) (struct hid_device *, struct hid_report *); + /* handler for raw input (Get_Report) data, used by hidraw */ + int (*hid_get_raw_report) (struct hid_device *, unsigned char, __u8 *, size_t, unsigned char); + /* handler for raw output data, used by hidraw */ int (*hid_output_raw_report) (struct hid_device *, __u8 *, size_t, unsigned char); @@ -638,7 +641,7 @@ struct hid_driver { struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); void (*feature_mapping)(struct hid_device *hdev, - struct hid_input *hidinput, struct hid_field *field, + struct hid_field *field, struct hid_usage *usage); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h index dd8d69269176f..4b88e697c4e9b 100644 --- a/include/linux/hidraw.h +++ b/include/linux/hidraw.h @@ -35,6 +35,9 @@ struct hidraw_devinfo { #define HIDIOCGRAWINFO _IOR('H', 0x03, struct hidraw_devinfo) #define HIDIOCGRAWNAME(len) _IOC(_IOC_READ, 'H', 0x04, len) #define HIDIOCGRAWPHYS(len) _IOC(_IOC_READ, 'H', 0x05, len) +/* The first byte of SFEATURE and GFEATURE is the report number */ +#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len) +#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len) #define HIDRAW_FIRST_MINOR 0 #define HIDRAW_MAX_DEVICES 64 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index f376ddc64c4dd..b3785ab060e73 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -308,6 +308,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer) extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); +extern ktime_t ktime_get_boottime(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/include/linux/htc_hdmi.h b/include/linux/htc_hdmi.h new file mode 100644 index 0000000000000..315ee5f2312ac --- /dev/null +++ b/include/linux/htc_hdmi.h @@ -0,0 +1,79 @@ +/* include/linux/htc_hdmi.h + * + * Copyright (c) 2010 HTC + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _HTC_HDMI_H_ +#define _HTC_HDMI_H_ + +enum { + hdmi_mode = 0, /* 0: mirror, 1: presentation */ + hdmi_enabled, + fb_enabled, +}; + +#define HDMI_IOCTL_MAGIC 'h' +#define HDMI_SET_MODE _IOW(HDMI_IOCTL_MAGIC, 1, unsigned) +#define HDMI_GET_MODE _IOR(HDMI_IOCTL_MAGIC, 2, unsigned) +#define HDMI_DISABLE _IOW(HDMI_IOCTL_MAGIC, 3, unsigned) +#define HDMI_ENABLE _IOW(HDMI_IOCTL_MAGIC, 4, unsigned) +#define HDMI_GET_STATE _IOR(HDMI_IOCTL_MAGIC, 5, unsigned) +#define HDMI_BLIT _IOW(HDMI_IOCTL_MAGIC, 6, unsigned) +#define HDMI_CABLE_STAT _IOR(HDMI_IOCTL_MAGIC, 7, unsigned) +#define HDMI_ESTABLISH_TIMING _IOR(HDMI_IOCTL_MAGIC, 8, unsigned) +#define HDMI_GET_EDID _IOR(HDMI_IOCTL_MAGIC, 9, unsigned) +#define HDMI_GET_DISPLAY_INFO _IOR(HDMI_IOCTL_MAGIC, 10, unsigned) + +#define HDMI_GET_MIRRORING _IOR(HDMI_IOCTL_MAGIC, 30, unsigned) +#define HDMI_SET_MIRRORING _IOW(HDMI_IOCTL_MAGIC, 31, unsigned) +#define HDMI_GET_STATISTICS _IOR(HDMI_IOCTL_MAGIC, 32, unsigned) +#define HDMI_CLEAR_STATISTICS _IOW(HDMI_IOCTL_MAGIC, 33, unsigned) +#define HDMI_GET_VSYNC_MODE _IOR(HDMI_IOCTL_MAGIC, 34, unsigned) +#define HDMI_SET_VSYNC_MODE _IOW(HDMI_IOCTL_MAGIC, 35, unsigned) + +#define ASPECT(w, h) (w << 8 | h) +struct video_mode { + unsigned short width, height, refresh_rate, aspect; + bool interlaced, supported; + char *descrption; +}; + +enum { + PROGRESSIVE, + INTERLACE, +}; + +struct display_info { + unsigned int visible_width; /* in mm */ + unsigned int visible_height; + unsigned int resolution_width; /* in pixel */ + unsigned int resolution_height; +}; + +/* Gathered statistics for mirroring */ +struct mirror_statistics { + unsigned int frames; /* Number of panel frames requested */ + unsigned int underflows; /* Number of times we underflowed the LCDC */ + s64 statisticsTime; /* Mirror time, in ns */ +}; + +/* Panel state while mirroring */ +enum { + VSYNC_ALL = 0, + VSYNC_PANEL_ONLY, + VSYNC_HDMI_ONLY, + VSYNC_NONE +}; + +#endif + diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index df29c8fde36be..8847c8c29791c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -117,7 +117,7 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long end, long adjust_next) { - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + if (!vma->anon_vma || vma->vm_ops) return; __vma_adjust_trans_huge(vma, start, end, adjust_next); } diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 903576df88dcd..e7559e1f8dd57 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -34,7 +34,6 @@ #include /* for struct device */ #include /* for completion */ #include -#include /* for struct device_node */ extern struct bus_type i2c_bus_type; extern struct device_type i2c_adapter_type; @@ -258,9 +257,6 @@ struct i2c_board_info { unsigned short addr; void *platform_data; struct dev_archdata *archdata; -#ifdef CONFIG_OF - struct device_node *of_node; -#endif int irq; }; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 3e837352b4e1e..999ccd3fff378 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -173,19 +173,21 @@ struct pptp_opt { }; struct pppolac_opt { - __u32 local; - __u32 remote; - __u16 sequence; - __u8 sequencing; - int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb); + __u32 local; + __u32 remote; + __u32 recv_sequence; + __u32 xmit_sequence; + atomic_t sequencing; + int (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb); }; struct pppopns_opt { - __u16 local; - __u16 remote; - __u32 sequence; - void (*data_ready)(struct sock *sk_raw, int length); - int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb); + __u16 local; + __u16 remote; + __u32 recv_sequence; + __u32 xmit_sequence; + void (*data_ready)(struct sock *sk_raw, int length); + int (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb); }; #include diff --git a/include/linux/init_task.h b/include/linux/init_task.h index caa151fbebb74..9ff59ddf0ec0b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -128,6 +128,67 @@ extern struct cred init_cred; * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) */ +#ifdef CONFIG_SCHED_BFS +#define INIT_TASK(tsk) \ +{ \ + .state = 0, \ + .stack = &init_thread_info, \ + .usage = ATOMIC_INIT(2), \ + .flags = PF_KTHREAD, \ + .lock_depth = -1, \ + .prio = NORMAL_PRIO, \ + .static_prio = MAX_PRIO-20, \ + .normal_prio = NORMAL_PRIO, \ + .deadline = 0, \ + .policy = SCHED_NORMAL, \ + .cpus_allowed = CPU_MASK_ALL, \ + .mm = NULL, \ + .active_mm = &init_mm, \ + .run_list = LIST_HEAD_INIT(tsk.run_list), \ + .time_slice = HZ, \ + .tasks = LIST_HEAD_INIT(tsk.tasks), \ + INIT_PUSHABLE_TASKS(tsk) \ + .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ + .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ + .real_parent = &tsk, \ + .parent = &tsk, \ + .children = LIST_HEAD_INIT(tsk.children), \ + .sibling = LIST_HEAD_INIT(tsk.sibling), \ + .group_leader = &tsk, \ + RCU_INIT_POINTER(.real_cred, &init_cred), \ + RCU_INIT_POINTER(.cred, &init_cred), \ + .comm = "swapper", \ + .thread = INIT_THREAD, \ + .fs = &init_fs, \ + .files = &init_files, \ + .signal = &init_signals, \ + .sighand = &init_sighand, \ + .nsproxy = &init_nsproxy, \ + .pending = { \ + .list = LIST_HEAD_INIT(tsk.pending.list), \ + .signal = {{0}}}, \ + .blocked = {{0}}, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ + .journal_info = NULL, \ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .fs_excl = ATOMIC_INIT(0), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ + [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ + }, \ + .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ + INIT_IDS \ + INIT_PERF_EVENTS(tsk) \ + INIT_TRACE_IRQFLAGS \ + INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ + INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ +} +#else /* CONFIG_SCHED_BFS */ #define INIT_TASK(tsk) \ { \ .state = 0, \ @@ -193,7 +254,7 @@ extern struct cred init_cred; INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ } - +#endif /* CONFIG_SCHED_BFS */ #define INIT_CPU_TIMERS(cpu_timers) \ { \ diff --git a/include/linux/input.h b/include/linux/input.h index e428382ca28a5..c49e04de0df05 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -760,6 +760,8 @@ struct input_keymap_entry { #define ABS_VOLUME 0x20 #define ABS_MISC 0x28 +#define ABS_MT_POSITION 0x2a /* Group a set of X and Y */ +#define ABS_MT_AMPLITUDE 0x2b /* Group a set of Z and W */ #define ABS_MT_SLOT 0x2f /* MT slot being modified */ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index b2eee896dcbc7..7cbace3e14f7b 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -1,14 +1,14 @@ #ifndef IOCONTEXT_H #define IOCONTEXT_H +#include #include #include -struct cfq_queue; struct cfq_io_context { void *key; - struct cfq_queue *cfqq[2]; + void *cfqq[2]; struct io_context *ioc; @@ -27,6 +27,16 @@ struct cfq_io_context { struct rcu_head rcu_head; }; +/* + * Indexes into the ioprio_changed bitmap. A bit set indicates that + * the corresponding I/O scheduler needs to see a ioprio update. + */ +enum { + IOC_CFQ_IOPRIO_CHANGED, + IOC_BFQ_IOPRIO_CHANGED, + IOC_IOPRIO_CHANGED_BITS +}; + /* * I/O subsystem state of the associated processes. It is refcounted * and kmalloc'ed. These could be shared between processes. @@ -39,7 +49,7 @@ struct io_context { spinlock_t lock; unsigned short ioprio; - unsigned short ioprio_changed; + DECLARE_BITMAP(ioprio_changed, IOC_IOPRIO_CHANGED_BITS); #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) unsigned short cgroup_changed; @@ -53,6 +63,8 @@ struct io_context { struct radix_tree_root radix_root; struct hlist_head cic_list; + struct radix_tree_root bfq_radix_root; + struct hlist_head bfq_cic_list; void __rcu *ioc_data; }; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0a2ba4098996d..d5330973a9fea 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -19,6 +19,8 @@ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H +#include + #define IOMMU_READ (1) #define IOMMU_WRITE (2) #define IOMMU_CACHE (4) /* DMA cache coherency */ @@ -33,7 +35,7 @@ struct iommu_domain { #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ struct iommu_ops { - int (*domain_init)(struct iommu_domain *domain); + int (*domain_init)(struct iommu_domain *domain, int flags); void (*domain_destroy)(struct iommu_domain *domain); int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); @@ -41,6 +43,10 @@ struct iommu_ops { phys_addr_t paddr, int gfp_order, int prot); int (*unmap)(struct iommu_domain *domain, unsigned long iova, int gfp_order); + int (*map_range)(struct iommu_domain *domain, unsigned int iova, + struct scatterlist *sg, unsigned int len, int prot); + int (*unmap_range)(struct iommu_domain *domain, unsigned int iova, + unsigned int len); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); int (*domain_has_cap)(struct iommu_domain *domain, @@ -51,7 +57,7 @@ struct iommu_ops { extern void register_iommu(struct iommu_ops *ops); extern bool iommu_found(void); -extern struct iommu_domain *iommu_domain_alloc(void); +extern struct iommu_domain *iommu_domain_alloc(int flags); extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -61,6 +67,10 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, int gfp_order, int prot); extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order); +extern int iommu_map_range(struct iommu_domain *domain, unsigned int iova, + struct scatterlist *sg, unsigned int len, int prot); +extern int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova, + unsigned int len); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova); extern int iommu_domain_has_cap(struct iommu_domain *domain, @@ -77,7 +87,7 @@ static inline bool iommu_found(void) return false; } -static inline struct iommu_domain *iommu_domain_alloc(void) +static inline struct iommu_domain *iommu_domain_alloc(int flags) { return NULL; } @@ -109,6 +119,19 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, return -ENODEV; } +static inline int iommu_map_range(struct iommu_domain *domain, + unsigned int iova, struct scatterlist *sg, + unsigned int len, int prot) +{ + return -ENODEV; +} + +static inline int iommu_unmap_range(struct iommu_domain *domain, + unsigned int iova, unsigned int len) +{ + return -ENODEV; +} + static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova) { diff --git a/include/linux/ion.h b/include/linux/ion.h new file mode 100644 index 0000000000000..0848591e794cd --- /dev/null +++ b/include/linux/ion.h @@ -0,0 +1,869 @@ +/* + * include/linux/ion.h + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include +#include + + +struct ion_handle; +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_HEAP_TYPE_IOMMU: IOMMU memory + * @ION_HEAP_TYPE_CP: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous. Used for content protection. + * @ION_HEAP_END: helper for iterating over heaps + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_IOMMU, + ION_HEAP_TYPE_CP, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP) + +/** + * heap flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ + +/** + * These are the only ids that should be used for Ion heap ids. + * The ids listed are the order in which allocation will be attempted + * if specified. Don't swap the order of heap ids unless you know what + * you are doing! + * Id's are spaced by purpose to allow new Id's to be inserted in-between (for + * possible fallbacks) + */ + +enum ion_heap_ids { + INVALID_HEAP_ID = -1, + /* In a system with the "Mini Ion Upgrade" (such as this one) + * the heap_mask and caching flag end up sharing a spot in + * ion_allocation_data.flags. We should make sure to never use + * the 0th bit for a heap because that's where the caching bit + * ends up. + */ + ION_BOGUS_HEAP_DO_NOT_USE = 0, + ION_CP_MM_HEAP_ID = 8, + ION_CP_MFC_HEAP_ID = 12, + ION_CP_WB_HEAP_ID = 16, /* 8660 only */ + ION_CAMERA_HEAP_ID = 20, /* 8660 only */ + ION_SF_HEAP_ID = 24, + ION_IOMMU_HEAP_ID = 25, + ION_QSECOM_HEAP_ID = 27, + ION_AUDIO_HEAP_ID = 28, + + ION_MM_FIRMWARE_HEAP_ID = 29, + ION_SYSTEM_HEAP_ID = 30, + + ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */ +}; + +enum ion_fixed_position { + NOT_FIXED, + FIXED_LOW, + FIXED_MIDDLE, + FIXED_HIGH, +}; + +/** + * Flag to use when allocating to indicate that a heap is secure. + */ +#define ION_SECURE (1 << ION_HEAP_ID_RESERVED) + +/** + * Macro should be used with ion_heap_ids defined above. + */ +#define ION_HEAP(bit) (1 << (bit)) + +#define ION_VMALLOC_HEAP_NAME "vmalloc" +#define ION_AUDIO_HEAP_NAME "audio" +#define ION_SF_HEAP_NAME "sf" +#define ION_MM_HEAP_NAME "mm" +#define ION_CAMERA_HEAP_NAME "camera_preview" +#define ION_IOMMU_HEAP_NAME "iommu" +#define ION_MFC_HEAP_NAME "mfc" +#define ION_WB_HEAP_NAME "wb" +#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw" +#define ION_QSECOM_HEAP_NAME "qsecom" +#define ION_FMEM_HEAP_NAME "fmem" + +#define CACHED 1 +#define UNCACHED 0 + +#define ION_CACHE_SHIFT 0 + +#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT) + +#define ION_IS_CACHED(__flags) ((__flags) & (1 << ION_CACHE_SHIFT)) + +/* + * This flag allows clients when mapping into the IOMMU to specify to + * defer un-mapping from the IOMMU until the buffer memory is freed. + */ +#define ION_IOMMU_UNMAP_DELAYED 1 + +#ifdef __KERNEL__ +#include +#include +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long +#define ion_virt_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating (lower numbers + * will be allocated from first) + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * @memory_type:Memory type used for the heap + * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. + * @extra_data: Extra data specific to each heap type + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; + enum ion_memory_types memory_type; + unsigned int has_outer_cache; + void *extra_data; +}; + +/** + * struct ion_cp_heap_pdata - defines a content protection heap in the given + * platform + * @permission_type: Memory ID used to identify the memory to TZ + * @align: Alignment requirement for the memory + * @secure_base: Base address for securing the heap. + * Note: This might be different from actual base address + * of this heap in the case of a shared heap. + * @secure_size: Memory size for securing the heap. + * Note: This might be different from actual size + * of this heap in the case of a shared heap. + * @reusable Flag indicating whether this heap is reusable of not. + * (see FMEM) + * @mem_is_fmem Flag indicating whether this memory is coming from fmem + * or not. + * @fixed_position If nonzero, position in the fixed area. + * @virt_addr: Virtual address used when using fmem. + * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. + * @iommu_2x_map_domain: Indicates the domain to use for overmapping. + * @request_region: function to be called when the number of allocations + * goes from 0 -> 1 + * @release_region: function to be called when the number of allocations + * goes from 1 -> 0 + * @setup_region: function to be called upon ion registration + * + */ +struct ion_cp_heap_pdata { + enum ion_permission_type permission_type; + unsigned int align; + ion_phys_addr_t secure_base; /* Base addr used when heap is shared */ + size_t secure_size; /* Size used for securing heap when heap is shared*/ + int reusable; + int mem_is_fmem; + enum ion_fixed_position fixed_position; + int iommu_map_all; + int iommu_2x_map_domain; + ion_virt_addr_t *virt_addr; + int (*request_region)(void *); + int (*release_region)(void *); + void *(*setup_region)(void); +}; + +/** + * struct ion_co_heap_pdata - defines a carveout heap in the given platform + * @adjacent_mem_id: Id of heap that this heap must be adjacent to. + * @align: Alignment requirement for the memory + * @mem_is_fmem Flag indicating whether this memory is coming from fmem + * or not. + * @fixed_position If nonzero, position in the fixed area. + * @request_region: function to be called when the number of allocations + * goes from 0 -> 1 + * @release_region: function to be called when the number of allocations + * goes from 1 -> 0 + * @setup_region: function to be called upon ion registration + * + */ +struct ion_co_heap_pdata { + int adjacent_mem_id; + unsigned int align; + int mem_is_fmem; + enum ion_fixed_position fixed_position; + int (*request_region)(void *); + int (*release_region)(void *); + void *(*setup_region)(void); +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. + * @nr: number of structures in the array + * @request_region: function to be called when the number of allocations goes + * from 0 -> 1 + * @release_region: function to be called when the number of allocations goes + * from 1 -> 0 + * @setup_region: function to be called upon ion registration + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + unsigned int has_outer_cache; + int nr; + int (*request_region)(void *); + int (*release_region)(void *); + void *(*setup_region)(void); + struct ion_platform_heap heaps[]; +}; + +#ifdef CONFIG_ION + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @heap_mask: mask of heaps this client can allocate from + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, const char *name); + +/** + * msm_ion_client_create - allocate a client using the ion_device specified in + * drivers/gpu/ion/msm/msm_ion.c + * + * heap_mask and name are the same as ion_client_create, return values + * are the same as ion_client_create. + */ + +struct ion_client *msm_ion_client_create(unsigned int heap_mask, + const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks have + * alignment requirements of some kind + * @flags: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from lowest to highest order bit + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_map_dma should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * @flags: flags for this mapping + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. If no flags are specified, this + * will return a non-secure uncached mapping. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle, + unsigned long flags); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_map_dma - create a dma mapping for a given handle + * @client: the client + * @handle: handle to map + * + * Return an sglist describing the given handle + */ +struct scatterlist *ion_map_dma(struct ion_client *client, + struct ion_handle *handle, + unsigned long flags); + +/** + * ion_unmap_dma() - destroy a dma mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share() - given a handle, obtain a buffer to pass to other clients + * @client: the client + * @handle: the handle to share + * + * Given a handle, return a buffer, which exists in a global name + * space, and can be passed to other clients. Should be passed into ion_import + * to obtain a new handle for this buffer. + * + * NOTE: This function does do not an extra reference. The burden is on the + * caller to make sure the buffer doesn't go away while it's being passed to + * another client. That is, ion_free should not be called on this handle until + * the buffer has been imported into the other client. + */ +struct ion_buffer *ion_share(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_import() - given an buffer in another client, import it + * @client: this blocks client + * @buffer: the buffer to import (as obtained from ion_share) + * + * Given a buffer, add it to the client and return the handle to use to refer + * to it further. This is called to share a handle from one kernel client to + * another. + */ +struct ion_handle *ion_import(struct ion_client *client, + struct ion_buffer *buffer); + +/** + * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it + * @client: this blocks client + * @fd: the fd + * + * A helper function for drivers that will be recieving ion buffers shared + * with them from userspace. These buffers are represented by a file + * descriptor obtained as the return from the ION_IOC_SHARE ioctl. + * This function coverts that fd into the underlying buffer, and returns + * the handle to use to refer to it further. + */ +struct ion_handle *ion_import_fd(struct ion_client *client, int fd); + +/** + * ion_handle_get_flags - get the flags for a given handle + * + * @client - client who allocated the handle + * @handle - handle to get the flags + * @flags - pointer to store the flags + * + * Gets the current flags for a handle. These flags indicate various options + * of the buffer (caching, security, etc.) + */ +int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, + unsigned long *flags); + + +/** + * ion_map_iommu - map the given handle into an iommu + * + * @client - client who allocated the handle + * @handle - handle to map + * @domain_num - domain number to map to + * @partition_num - partition number to allocate iova from + * @align - alignment for the iova + * @iova_length - length of iova to map. If the iova length is + * greater than the handle length, the remaining + * address space will be mapped to a dummy buffer. + * @iova - pointer to store the iova address + * @buffer_size - pointer to store the size of the buffer + * @flags - flags for options to map + * @iommu_flags - flags specific to the iommu. + * + * Maps the handle into the iova space specified via domain number. Iova + * will be allocated from the partition specified via partition_num. + * Returns 0 on success, negative value on error. + */ +int ion_map_iommu(struct ion_client *client, struct ion_handle *handle, + int domain_num, int partition_num, unsigned long align, + unsigned long iova_length, unsigned long *iova, + unsigned long *buffer_size, + unsigned long flags, unsigned long iommu_flags); + + +/** + * ion_handle_get_size - get the allocated size of a given handle + * + * @client - client who allocated the handle + * @handle - handle to get the size + * @size - pointer to store the size + * + * gives the allocated size of a handle. returns 0 on success, negative + * value on error + * + * NOTE: This is intended to be used only to get a size to pass to map_iommu. + * You should *NOT* rely on this for any other usage. + */ + +int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, + unsigned long *size); + +/** + * ion_unmap_iommu - unmap the handle from an iommu + * + * @client - client who allocated the handle + * @handle - handle to unmap + * @domain_num - domain to unmap from + * @partition_num - partition to unmap from + * + * Decrement the reference count on the iommu mapping. If the count is + * 0, the mapping will be removed from the iommu. + */ +void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle, + int domain_num, int partition_num); + + +/** + * ion_secure_heap - secure a heap + * + * @client - a client that has allocated from the heap heap_id + * @heap_id - heap id to secure. + * + * Secure a heap + * Returns 0 on success + */ +int ion_secure_heap(struct ion_device *dev, int heap_id); + +/** + * ion_unsecure_heap - un-secure a heap + * + * @client - a client that has allocated from the heap heap_id + * @heap_id - heap id to un-secure. + * + * Un-secure a heap + * Returns 0 on success + */ +int ion_unsecure_heap(struct ion_device *dev, int heap_id); + +/** + * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap. + * + * @heap_id - heap id to secure. + * + * Secure a heap + * Returns 0 on success + */ +int msm_ion_secure_heap(int heap_id); + +/** + * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap. + * + * @heap_id - heap id to secure. + * + * Un-secure a heap + * Returns 0 on success + */ +int msm_ion_unsecure_heap(int heap_id); + +/** + * msm_ion_do_cache_op - do cache operations. + * + * @client - pointer to ION client. + * @handle - pointer to buffer handle. + * @vaddr - virtual address to operate on. + * @len - Length of data to do cache operation on. + * @cmd - Cache operation to perform: + * ION_IOC_CLEAN_CACHES + * ION_IOC_INV_CACHES + * ION_IOC_CLEAN_INV_CACHES + * + * Returns 0 on success + */ +int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *vaddr, unsigned long len, unsigned int cmd); + +#else +static inline struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, const char *name) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask, + const char *name) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_client_destroy(struct ion_client *client) { } + +static inline struct ion_handle *ion_alloc(struct ion_client *client, + size_t len, size_t align, unsigned int flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_free(struct ion_client *client, + struct ion_handle *handle) { } + + +static inline int ion_phys(struct ion_client *client, + struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) +{ + return -ENODEV; +} + +static inline void *ion_map_kernel(struct ion_client *client, + struct ion_handle *handle, unsigned long flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_unmap_kernel(struct ion_client *client, + struct ion_handle *handle) { } + +static inline struct scatterlist *ion_map_dma(struct ion_client *client, + struct ion_handle *handle, unsigned long flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_unmap_dma(struct ion_client *client, + struct ion_handle *handle) { } + +static inline struct ion_buffer *ion_share(struct ion_client *client, + struct ion_handle *handle) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct ion_handle *ion_import(struct ion_client *client, + struct ion_buffer *buffer) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct ion_handle *ion_import_fd(struct ion_client *client, + int fd) +{ + return ERR_PTR(-ENODEV); +} + +static inline int ion_handle_get_flags(struct ion_client *client, + struct ion_handle *handle, unsigned long *flags) +{ + return -ENODEV; +} + +static inline int ion_map_iommu(struct ion_client *client, + struct ion_handle *handle, int domain_num, + int partition_num, unsigned long align, + unsigned long iova_length, unsigned long *iova, + unsigned long *buffer_size, + unsigned long flags, + unsigned long iommu_flags) +{ + return -ENODEV; +} + +static inline void ion_unmap_iommu(struct ion_client *client, + struct ion_handle *handle, int domain_num, + int partition_num) +{ + return; +} + +static inline int ion_secure_heap(struct ion_device *dev, int heap_id) +{ + return -ENODEV; + +} + +static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id) +{ + return -ENODEV; +} + +static inline int msm_ion_secure_heap(int heap_id) +{ + return -ENODEV; + +} + +static inline int msm_ion_unsecure_heap(int heap_id) +{ + return -ENODEV; +} + +static inline int msm_ion_do_cache_op(struct ion_client *client, + struct ion_handle *handle, void *vaddr, + unsigned long len, unsigned int cmd) +{ + return -ENODEV; +} + +#endif /* CONFIG_ION */ +#endif /* __KERNEL__ */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to refer + * to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_mask; + unsigned int flags; + struct ion_handle *handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + struct ion_handle *handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + struct ion_handle *handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + + +/* struct ion_flush_data - data passed to ion for flushing caches + * + * @handle: handle with data to flush + * @fd: fd to flush + * @vaddr: userspace virtual address mapped with mmap + * @offset: offset into the handle to flush + * @length: length of handle to flush + * + * Performs cache operations on the handle. If p is the start address + * of the handle, p + offset through p + offset + length will have + * the cache operations performed + */ +struct ion_flush_data { + struct ion_handle *handle; + int fd; + void *vaddr; + unsigned int offset; + unsigned int length; +}; + +/* struct ion_flag_data - information about flags for this buffer + * + * @handle: handle to get flags from + * @flags: flags of this handle + * + * Takes handle as an input and outputs the flags from the handle + * in the flag field. + */ +struct ion_flag_data { + struct ion_handle *handle; + unsigned long flags; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + + +/** + * DOC: ION_IOC_CLEAN_CACHES - clean the caches + * + * Clean the caches of the handle specified. + */ +#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MAGIC, 20, \ + struct ion_flush_data) +/** + * DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches + * + * Invalidate the caches of the handle specified. + */ +#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MAGIC, 21, \ + struct ion_flush_data) +/** + * DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches + * + * Clean and invalidate the caches of the handle specified. + */ +#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MAGIC, 22, \ + struct ion_flush_data) + +/** + * DOC: ION_IOC_GET_FLAGS - get the flags of the handle + * + * Gets the flags of the current handle which indicate cachability, + * secure state etc. + */ +#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MAGIC, 23, \ + struct ion_flag_data) + + +/** + * DOC: ION_IOC_SYNC - BOGUS + * + * NOT SUPPORTED + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 42, \ + struct ion_flag_data) +#endif /* _LINUX_ION_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 76dad48088474..72324720a973a 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -64,6 +64,8 @@ static inline int task_ioprio_class(struct io_context *ioc) static inline int task_nice_ioprio(struct task_struct *task) { + if (iso_task(task)) + return 0; return (task_nice(task) + 20) / 5; } diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 6811f4bfc6e7a..cf4e278945bc9 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -164,7 +164,7 @@ static inline u64 get_jiffies_64(void) * Have the 32 bit jiffies value wrap 5 minutes after boot * so jiffies wrap bugs show up earlier. */ -#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) +#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) /* * Change timeval to jiffies, trying to avoid the diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2fe6e84894a4b..00cec4dc0ae25 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -187,14 +187,76 @@ NORET_TYPE void do_exit(long error_code) ATTRIB_NORET; NORET_TYPE void complete_and_exit(struct completion *, long) ATTRIB_NORET; + +/* Internal, do not use. */ +int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __must_check _kstrtol(const char *s, unsigned int base, long *res); + +int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __must_check kstrtoll(const char *s, unsigned int base, long long *res); +static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0. + */ + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} + +static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) +{ + /* + * We want to shortcut function call, but + * __builtin_types_compatible_p(long, long long) = 0. + */ + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __must_check kstrtoint(const char *s, unsigned int base, int *res); + +static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __must_check kstrtou16(const char *s, unsigned int base, u16 *res); +int __must_check kstrtos16(const char *s, unsigned int base, s16 *res); +int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); +int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); + extern unsigned long simple_strtoul(const char *,char **,unsigned int); extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *); -extern int __must_check strict_strtol(const char *, unsigned int, long *); -extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *); -extern int __must_check strict_strtoll(const char *, unsigned int, long long *); +#define strict_strtoul kstrtoul +#define strict_strtol kstrtol +#define strict_strtoull kstrtoull +#define strict_strtoll kstrtoll + extern int sprintf(char * buf, const char * fmt, ...) __attribute__ ((format (printf, 2, 3))); extern int vsprintf(char *buf, const char *, va_list) diff --git a/include/linux/libata.h b/include/linux/libata.h index c9c5d7ad1a2bc..1f000807847d2 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -203,6 +203,7 @@ enum { * management */ ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity * led */ + ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */ diff --git a/include/linux/lightsensor.h b/include/linux/lightsensor.h new file mode 100644 index 0000000000000..f133cb684c227 --- /dev/null +++ b/include/linux/lightsensor.h @@ -0,0 +1,35 @@ +/* include/linux/lightsensor.h + * + * Copyright (C) 2009 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_LIGHTSENSOR_H +#define __LINUX_LIGHTSENSOR_H + +#include +#include + +#define LIGHTSENSOR_IOCTL_MAGIC 'l' + +#define LIGHTSENSOR_IOCTL_GET_ENABLED _IOR(LIGHTSENSOR_IOCTL_MAGIC, 1, int *) +#define LIGHTSENSOR_IOCTL_ENABLE _IOW(LIGHTSENSOR_IOCTL_MAGIC, 2, int *) + +struct lightsensor_smd_platform_data { + const char *name; + uint16_t levels[10]; + uint16_t golden_adc; + int (*ls_power)(int, uint8_t); +}; + +#endif diff --git a/include/linux/memcopy.h b/include/linux/memcopy.h new file mode 100644 index 0000000000000..18bc86091dd3d --- /dev/null +++ b/include/linux/memcopy.h @@ -0,0 +1,226 @@ +/* + * memcopy.h -- definitions for memory copy functions. Generic C version. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The code is derived from the GNU C Library. + * Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc. + */ +#ifndef _LINUX_MEMCOPY_H_ +#define _LINUX_MEMCOPY_H_ + +/* + * The strategy of the memory functions is: + * + * 1. Copy bytes until the destination pointer is aligned. + * + * 2. Copy words in unrolled loops. If the source and destination + * are not aligned in the same way, use word memory operations, + * but shift and merge two read words before writing. + * + * 3. Copy the few remaining bytes. + * + * This is fast on processors that have at least 10 registers for + * allocation by GCC, and that can access memory at reg+const in one + * instruction. + */ + +#include +#include +#include + +/* + * The macros defined in this file are: + * + * BYTE_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy) + * + * BYTE_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy) + * + * WORD_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_remaining, nbytes_to_copy) + * + * WORD_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_remaining, nbytes_to_copy) + * + * MERGE(old_word, sh_1, new_word, sh_2) + * + * MEM_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy) + * + * MEM_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy) + */ + +#define OP_T_THRESHOLD 16 + +/* + * Type to use for aligned memory operations. + * This should normally be the biggest type supported by a single load + * and store. + */ +#define op_t unsigned long int +#define OPSIZ (sizeof(op_t)) + +/* Type to use for unaligned operations. */ +typedef unsigned char byte; + +#ifndef MERGE +# ifdef __LITTLE_ENDIAN +# define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2))) +# elif defined(__BIG_ENDIAN) +# define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2))) +# else +# error "Macro MERGE() hasn't defined!" +# endif +#endif + +/* + * Copy exactly NBYTES bytes from SRC_BP to DST_BP, + * without any assumptions about alignment of the pointers. + */ +#ifndef BYTE_COPY_FWD +#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes) \ +do { \ + size_t __nbytes = (nbytes); \ + while (__nbytes > 0) { \ + byte __x = ((byte *) src_bp)[0]; \ + src_bp += 1; \ + __nbytes -= 1; \ + ((byte *) dst_bp)[0] = __x; \ + dst_bp += 1; \ + } \ +} while (0) +#endif + +/* + * Copy exactly NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR, + * beginning at the bytes right before the pointers and continuing towards + * smaller addresses. Don't assume anything about alignment of the + * pointers. + */ +#ifndef BYTE_COPY_BWD +#define BYTE_COPY_BWD(dst_ep, src_ep, nbytes) \ +do { \ + size_t __nbytes = (nbytes); \ + while (__nbytes > 0) { \ + byte __x; \ + src_ep -= 1; \ + __x = ((byte *) src_ep)[0]; \ + dst_ep -= 1; \ + __nbytes -= 1; \ + ((byte *) dst_ep)[0] = __x; \ + } \ +} while (0) +#endif +/* + * Copy *up to* NBYTES bytes from SRC_BP to DST_BP, with + * the assumption that DST_BP is aligned on an OPSIZ multiple. If + * not all bytes could be easily copied, store remaining number of bytes + * in NBYTES_LEFT, otherwise store 0. + */ +extern void _wordcopy_fwd_aligned(long int, long int, size_t); +extern void _wordcopy_fwd_dest_aligned(long int, long int, size_t); +#ifndef WORD_COPY_FWD +#define WORD_COPY_FWD(dst_bp, src_bp, nbytes_left, nbytes) \ +do { \ + if (src_bp % OPSIZ == 0) \ + _wordcopy_fwd_aligned (dst_bp, src_bp, (nbytes) / OPSIZ); \ + else \ + _wordcopy_fwd_dest_aligned (dst_bp, src_bp, (nbytes) / OPSIZ);\ + \ + src_bp += (nbytes) & -OPSIZ; \ + dst_bp += (nbytes) & -OPSIZ; \ + (nbytes_left) = (nbytes) % OPSIZ; \ +} while (0) +#endif + +/* + * Copy *up to* NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR, + * beginning at the words (of type op_t) right before the pointers and + * continuing towards smaller addresses. May take advantage of that + * DST_END_PTR is aligned on an OPSIZ multiple. If not all bytes could be + * easily copied, store remaining number of bytes in NBYTES_REMAINING, + * otherwise store 0. + */ +extern void _wordcopy_bwd_aligned(long int, long int, size_t); +extern void _wordcopy_bwd_dest_aligned(long int, long int, size_t); +#ifndef WORD_COPY_BWD +#define WORD_COPY_BWD(dst_ep, src_ep, nbytes_left, nbytes) \ +do { \ + if (src_ep % OPSIZ == 0) \ + _wordcopy_bwd_aligned (dst_ep, src_ep, (nbytes) / OPSIZ); \ + else \ + _wordcopy_bwd_dest_aligned (dst_ep, src_ep, (nbytes) / OPSIZ);\ + \ + src_ep -= (nbytes) & -OPSIZ; \ + dst_ep -= (nbytes) & -OPSIZ; \ + (nbytes_left) = (nbytes) % OPSIZ; \ +} while (0) +#endif + +/* Copy memory from the beginning to the end */ +#ifndef MEM_COPY_FWD +static __always_inline void mem_copy_fwd(unsigned long dstp, + unsigned long srcp, + size_t count) +{ + /* If there not too few bytes to copy, use word copy. */ + if (count >= OP_T_THRESHOLD) { + /* Copy just a few bytes to make dstp aligned. */ + count -= (-dstp) % OPSIZ; + BYTE_COPY_FWD(dstp, srcp, (-dstp) % OPSIZ); + + /* + * Copy from srcp to dstp taking advantage of the known + * alignment of dstp. Number if bytes remaining is put in + * the third argument. + */ + WORD_COPY_FWD(dstp, srcp, count, count); + + /* Fall out and copy the tail. */ + } + + /* There are just a few bytes to copy. Use byte memory operations. */ + BYTE_COPY_FWD(dstp, srcp, count); +} +#endif + +/* Copy memory from the end to the beginning. */ +#ifndef MEM_COPY_BWD +static __always_inline void mem_copy_bwd(unsigned long dstp, + unsigned long srcp, + size_t count) +{ + srcp += count; + dstp += count; + + /* If there not too few bytes to copy, use word copy. */ + if (count >= OP_T_THRESHOLD) { + /* Copy just a few bytes to make dstp aligned. */ + count -= dstp % OPSIZ; + BYTE_COPY_BWD(dstp, srcp, dstp % OPSIZ); + + /* + * Copy from srcp to dstp taking advantage of the known + * alignment of dstp. Number if bytes remaining is put in + * the third argument. + */ + WORD_COPY_BWD(dstp, srcp, count, count); + + /* Fall out and copy the tail. */ + } + + /* There are just a few bytes to copy. Use byte memory operations. */ + BYTE_COPY_BWD (dstp, srcp, count); +} +#endif + +#endif diff --git a/include/linux/memory_alloc.h b/include/linux/memory_alloc.h new file mode 100644 index 0000000000000..e7049f8a355ea --- /dev/null +++ b/include/linux/memory_alloc.h @@ -0,0 +1,58 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_MEMALLOC_H +#define _LINUX_MEMALLOC_H + +#include +#include +#include + +struct mem_pool { + struct mutex pool_mutex; + struct gen_pool *gpool; + unsigned long paddr; + unsigned long size; + unsigned long free; + unsigned int id; +}; + +struct alloc { + struct rb_node rb_node; + void *vaddr; + unsigned long paddr; + struct mem_pool *mpool; + unsigned long len; + void *caller; +}; + +struct mem_pool *initialize_memory_pool(unsigned long start, + unsigned long size, int mem_type); + +void *allocate_contiguous_memory(unsigned long size, + int mem_type, unsigned long align, int cached); + +unsigned long _allocate_contiguous_memory_nomap(unsigned long size, + int mem_type, unsigned long align, void *caller); + +unsigned long allocate_contiguous_memory_nomap(unsigned long size, + int mem_type, unsigned long align); + +void free_contiguous_memory(void *addr); +void free_contiguous_memory_by_paddr(unsigned long paddr); + +unsigned long memory_pool_node_paddr(void *vaddr); + +unsigned long memory_pool_node_len(void *vaddr); + +int memory_pool_init(void); +#endif /* _LINUX_MEMALLOC_H */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8122018d30002..2d2414be44c46 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -10,6 +10,9 @@ struct zone; struct pglist_data; struct mem_section; +extern unsigned long movable_reserved_start, movable_reserved_size; +extern unsigned long low_power_memory_start, low_power_memory_size; + #ifdef CONFIG_MEMORY_HOTPLUG /* @@ -232,4 +235,14 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); +extern void reserve_hotplug_pages(unsigned long start_pfn, + unsigned long nr_pages); +extern void unreserve_hotplug_pages(unsigned long start_pfn, + unsigned long nr_pages); #endif /* __LINUX_MEMORY_HOTPLUG_H */ +extern int physical_remove_memory(u64 start, u64 size); +extern int arch_physical_remove_memory(u64 start, u64 size); +extern int physical_low_power_memory(u64 start, u64 size); +extern int arch_physical_low_power_memory(u64 start, u64 size); +extern int physical_active_memory(u64 start, u64 size); +extern int arch_physical_active_memory(u64 start, u64 size); diff --git a/include/linux/mfd/pm8058.h b/include/linux/mfd/pm8058.h new file mode 100644 index 0000000000000..4e0ffdd8ea1e7 --- /dev/null +++ b/include/linux/mfd/pm8058.h @@ -0,0 +1,186 @@ +/* include/linux/mfd/pm8058.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Dima Zavin + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_PM8058_CORE_H +#define __LINUX_MFD_PM8058_CORE_H + +#include + +#define PM8058_NUM_GPIO_IRQS 40 +#define PM8058_NUM_MPP_IRQS 12 +#define PM8058_NUM_KEYPAD_IRQS 2 +#define PM8058_NUM_CHARGER_IRQS 7 +#define PM8058_NUM_IRQS (PM8058_NUM_GPIO_IRQS + \ + PM8058_NUM_MPP_IRQS + \ + PM8058_NUM_KEYPAD_IRQS + \ + PM8058_NUM_CHARGER_IRQS) + +/* be careful if you change this since this is used to map irq <-> gpio */ +#define PM8058_FIRST_GPIO_IRQ 0 +#define PM8058_FIRST_MPP_IRQ (PM8058_FIRST_GPIO_IRQ + \ + PM8058_NUM_GPIO_IRQS) +#define PM8058_FIRST_KEYPAD_IRQ (PM8058_FIRST_MPP_IRQ + \ + PM8058_NUM_MPP_IRQS) +#define PM8058_FIRST_CHARGER_IRQ (PM8058_FIRST_KEYPAD_IRQ + \ + PM8058_NUM_KEYPAD_IRQS) + +#define PM8058_KEYPAD_IRQ (PM8058_FIRST_KEYPAD_IRQ + 0) +#define PM8058_KEYPAD_STUCK_IRQ (PM8058_FIRST_KEYPAD_IRQ + 1) + +#define PM8058_CHGVAL_IRQ (PM8058_FIRST_CHARGER_IRQ + 0) +#define PM8058_CHGEND_IRQ (PM8058_FIRST_CHARGER_IRQ + 1) +#define PM8058_FASTCHG_IRQ (PM8058_FIRST_CHARGER_IRQ + 2) +#define PM8058_CHGFAIL_IRQ (PM8058_FIRST_CHARGER_IRQ + 5) +#define PM8058_CHGDONE_IRQ (PM8058_FIRST_CHARGER_IRQ + 6) + +#define PM8058_GPIO_TO_IRQ(base,gpio) (PM8058_FIRST_GPIO_IRQ + \ + (base) + (gpio)) + +/* these need to match the irq counts/offsets above above */ +#define PM8058_FIRST_GPIO PM8058_FIRST_GPIO_IRQ +#define PM8058_NUM_GPIOS PM8058_NUM_GPIO_IRQS +#define PM8058_FIRST_MPP PM8058_FIRST_MPP_IRQ +#define PM8058_NUM_MPP PM8058_NUM_MPP_IRQS + +#define PM8058_GPIO(base,gpio) ((base) + (gpio) + PM8058_FIRST_GPIO) +/*#define PM8058_MPP(base,mpp) ((base) + (mpp) + PM8058_FIRST_MPP)*/ + +struct pm8058_keypad_platform_data { + const char *name; + int num_drv; + int num_sns; + /* delay in ms = 1 << scan_delay_shift, 0-7 */ + int scan_delay_shift; + /* # of 32kHz clock cycles, 1-4 */ + int drv_hold_clks; + /* in increments of 5ms, max 20ms */ + int debounce_ms; + + /* size must be num_drv * num_sns + * index is (drv * num_sns + sns) */ + const unsigned short *keymap; + + int (*init)(struct device *dev); +}; + +struct pm8058_charger_platform_data { + /* function to call on vbus detect */ + void (*vbus_present)(bool present); + + int (*charge)(u32 max_current, bool is_ac); + + char **supplied_to; + int num_supplicants; +}; + +struct pm8058_platform_data { + unsigned int irq_base; + unsigned int gpio_base; + int (*init)(struct device *dev); + + /* child devices */ + struct pm8058_keypad_platform_data *keypad_pdata; + struct pm8058_charger_platform_data *charger_pdata; +}; + +#define PM8058_GPIO_VIN_SRC_VPH_PWR 0x0 /* VDD_L6_L7 */ +#define PM8058_GPIO_VIN_SRC_VREG_BB 0x1 /* VDD_L3_L4_L5 */ +#define PM8058_GPIO_VIN_SRC_VREG_S3 0x2 /* VDD_L0_L1_LVS, 1.8V */ +#define PM8058_GPIO_VIN_SRC_VREG_L3 0x3 /* 1.8V or 2.85 */ +#define PM8058_GPIO_VIN_SRC_VREG_L7 0x4 /* 1.8V */ +#define PM8058_GPIO_VIN_SRC_VREG_L6 0x5 /* 3.3V */ +#define PM8058_GPIO_VIN_SRC_VREG_L5 0x6 /* 2.85V */ +#define PM8058_GPIO_VIN_SRC_VREG_L2 0x7 /* 2.6V */ + +#define PM8058_GPIO_INPUT 0x01 +#define PM8058_GPIO_OUTPUT 0x02 +#define PM8058_GPIO_OUTPUT_HIGH 0x04 + +#define PM8058_GPIO_STRENGTH_OFF 0x0 +#define PM8058_GPIO_STRENGTH_HIGH 0x1 +#define PM8058_GPIO_STRENGTH_MED 0x2 +#define PM8058_GPIO_STRENGTH_LOW 0x3 + +#define PM8058_GPIO_PULL_UP_30 0x0 +#define PM8058_GPIO_PULL_UP_1P5 0x1 +#define PM8058_GPIO_PULL_UP_31P5 0x2 +#define PM8058_GPIO_PULL_UP_1P5_30 0x3 +#define PM8058_GPIO_PULL_DOWN 0x4 +#define PM8058_GPIO_PULL_NONE 0x5 + +#define PM8058_GPIO_FUNC_NORMAL 0x0 +#define PM8058_GPIO_FUNC_PAIRED 0x1 +#define PM8058_GPIO_FUNC_1 0x2 +#define PM8058_GPIO_FUNC_2 0x3 + +/* gpio pin flags */ +#define PM8058_GPIO_OPEN_DRAIN 0x10 +#define PM8058_GPIO_HIGH_Z 0x20 +#define PM8058_GPIO_INV_IRQ_POL 0x40 +#define PM8058_GPIO_CONFIGURED 0x80 /* FOR INTERNAL USE ONLY */ + +struct pm8058_pin_config { + u8 vin_src; + u8 dir; + u8 pull_up; + u8 strength; + u8 func; + u8 flags; +}; + +#define PM8058_GPIO_PIN_CONFIG(v,d,p,s,fn,fl) \ + { \ + .vin_src = (v), \ + .dir = (d), \ + .pull_up = (p), \ + .strength = (s), \ + .func = (fn), \ + .flags = (fl), \ + } + +#ifdef CONFIG_PM8058 +int pm8058_readb(struct device *dev, u16 addr, u8 *val); +int pm8058_writeb(struct device *dev, u16 addr, u8 val); +int pm8058_write_buf(struct device *dev, u16 addr, u8 *buf, int cnt); +int pm8058_read_buf(struct device *dev, u16 addr, u8 *buf, int cnt); +int pm8058_gpio_mux_cfg(struct device *dev, unsigned int gpio, + struct pm8058_pin_config *cfg); +int pm8058_gpio_mux(unsigned int gpio, struct pm8058_pin_config *cfg); +int pm8058_irq_get_status(struct device *dev, unsigned int irq); +#else +static inline int pm8058_readb(struct device *dev, u16 addr, u8 *val) +{ return 0; } +static inline int pm8058_writeb(struct device *dev, u16 addr, u8 val) +{ return 0; } +static inline int pm8058_write_buf(struct device *dev, u16 addr, u8 *buf, + int cnt) { return 0; } +static inline int pm8058_read_buf(struct device *dev, u16 addr, u8 *buf, + int cnt) { return 0; } +static inline int pm8058_gpio_mux_cfg(struct device *dev, unsigned int gpio, + struct pm8058_pin_config *cfg) { return 0; } +static inline int pm8058_gpio_mux(unsigned int gpio, + struct pm8058_pin_config *cfg) { return 0; } +static inline int pm8058_irq_get_status(struct device *dev, unsigned int irq) +{ return 0; } +#endif + +#ifdef CONFIG_CHARGER_PM8058 +void pm8058_notify_charger_connected(int status); +#else +static inline void pm8058_notify_charger_connected(int status) {} +#endif + +#endif diff --git a/include/linux/mm.h b/include/linux/mm.h index cdd44722cbc21..fe144f570a33c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -137,7 +137,8 @@ extern unsigned int kobjsize(const void *objp); #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) /* - * special vmas that are non-mergable, non-mlock()able + * Special vmas that are non-mergable, non-mlock()able. + * Note: mm/huge_memory.c VM_NO_THP depends on this definition. */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) @@ -402,16 +403,23 @@ static inline void init_page_count(struct page *page) /* * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). + * + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to + * -2 so that an underflow of the page_mapcount() won't be mistaken + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very + * efficiently by most CPU architectures. */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) + static inline int PageBuddy(struct page *page) { - return atomic_read(&page->_mapcount) == -2; + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; } static inline void __SetPageBuddy(struct page *page) { VM_BUG_ON(atomic_read(&page->_mapcount) != -1); - atomic_set(&page->_mapcount, -2); + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); } static inline void __ClearPageBuddy(struct page *page) @@ -952,6 +960,8 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int fault_flags); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, @@ -961,6 +971,14 @@ static inline int handle_mm_fault(struct mm_struct *mm, BUG(); return VM_FAULT_SIGBUS; } +static inline int fixup_user_fault(struct task_struct *tsk, + struct mm_struct *mm, unsigned long address, + unsigned int fault_flags) +{ + /* should never happen if there's no MMU */ + BUG(); + return -EFAULT; +} #endif extern int make_pages_present(unsigned long addr, unsigned long end); @@ -987,11 +1005,33 @@ int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); /* Is the vma a continuation of the stack vma above it? */ -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) { return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); } +static inline int stack_guard_page_start(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_growsdown(vma->vm_prev, addr); +} + +/* Is the vma a continuation of the stack vma below it? */ +static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); +} + +static inline int stack_guard_page_end(struct vm_area_struct *vma, + unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSUP) && + (vma->vm_end == addr) && + !vma_growsup(vma->vm_next, addr); +} + extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); @@ -1116,14 +1156,25 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) } #endif +/* + * This struct is used to pass information from page reclaim to the shrinkers. + * We consolidate the values for easier extention later. + */ +struct shrink_control { + gfp_t gfp_mask; + + /* How many slab objects shrinker() should scan and try to reclaim */ + unsigned long nr_to_scan; +}; + /* * A callback you can register to apply pressure to ageable caches. * - * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should - * look through the least-recently-used 'nr_to_scan' entries and - * attempt to free them up. It should return the number of objects - * which remain in the cache. If it returns -1, it means it cannot do - * any scanning at this time (eg. there is a risk of deadlock). + * 'sc' is passed shrink_control which includes a count 'nr_to_scan' + * and a 'gfpmask'. It should look through the least-recently-used + * 'nr_to_scan' entries and attempt to free them up. It should return + * the number of objects which remain in the cache. If it returns -1, it means + * it cannot do any scanning at this time (eg. there is a risk of deadlock). * * The 'gfpmask' refers to the allocation we are currently trying to * fulfil. @@ -1132,7 +1183,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) * querying the cache size, so a fastpath for that case is appropriate. */ struct shrinker { - int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); + int (*shrink)(struct shrinker *, struct shrink_control *sc); int seeks; /* seeks to recreate an obj */ /* These are for internal use */ @@ -1449,7 +1500,7 @@ int write_one_page(struct page *page, int wait); void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ -#define VM_MAX_READAHEAD 128 /* kbytes */ +#define VM_MAX_READAHEAD 2048 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, @@ -1580,8 +1631,9 @@ int in_gate_area_no_task(unsigned long addr); int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, - unsigned long lru_pages); +unsigned long shrink_slab(struct shrink_control *shrink, + unsigned long nr_pages_scanned, + unsigned long lru_pages); #ifndef CONFIG_MMU #define randomize_va_space 0 diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8ce082781ccb4..f6e87b401d2e3 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -11,6 +11,7 @@ #define LINUX_MMC_CARD_H #include +#include struct mmc_cid { unsigned int manfid; @@ -109,6 +110,8 @@ struct mmc_card { #define MMC_TYPE_SD 1 /* SD card */ #define MMC_TYPE_SDIO 2 /* SDIO card */ #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ +#define MMC_TYPE_SDIO_WIMAX 4 /* SDIO card of WIMAX */ + unsigned int state; /* (our) card state */ #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ #define MMC_STATE_READONLY (1<<1) /* card is read-only */ @@ -121,6 +124,7 @@ struct mmc_card { /* for byte mode */ #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ /* (missing CIA registers) */ +#define MMC_QUIRK_INAND_CMD38 (1<<3) /* iNAND devices have broken CMD38 */ unsigned int erase_size; /* erase size in sectors */ unsigned int erase_shift; /* if erase unit is power 2 */ @@ -146,8 +150,97 @@ struct mmc_card { struct sdio_func_tuple *tuples; /* unknown common tuples */ struct dentry *debugfs_root; + unsigned int removed; }; +/* + * The world is not perfect and supplies us with broken mmc/sdio devices. + * For at least a part of these bugs we need a work-around + */ + +struct mmc_fixup { + + /* CID-specific fields. */ + const char *name; + + /* Valid revision range */ + u64 rev_start, rev_end; + + unsigned int manfid; + unsigned short oemid; + + /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */ + u16 cis_vendor, cis_device; + + void (*vendor_fixup)(struct mmc_card *card, int data); + int data; +}; + +#define CID_MANFID_ANY (-1ul) +#define CID_OEMID_ANY ((unsigned short) -1) +#define CID_NAME_ANY (NULL) + +#define END_FIXUP { 0 } + +#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _cis_vendor, _cis_device, \ + _fixup, _data) \ + { \ + .name = (_name), \ + .manfid = (_manfid), \ + .oemid = (_oemid), \ + .rev_start = (_rev_start), \ + .rev_end = (_rev_end), \ + .cis_vendor = (_cis_vendor), \ + .cis_device = (_cis_device), \ + .vendor_fixup = (_fixup), \ + .data = (_data), \ + } + +#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \ + _fixup, _data) \ + _FIXUP_EXT(_name, _manfid, \ + _oemid, _rev_start, _rev_end, \ + SDIO_ANY_ID, SDIO_ANY_ID, \ + _fixup, _data) \ + +#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \ + MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data) + +#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \ + _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \ + CID_OEMID_ANY, 0, -1ull, \ + _vendor, _device, \ + _fixup, _data) \ + +#define cid_rev(hwrev, fwrev, year, month) \ + (((u64) hwrev) << 40 | \ + ((u64) fwrev) << 32 | \ + ((u64) year) << 16 | \ + ((u64) month)) + +#define cid_rev_card(card) \ + cid_rev(card->cid.hwrev, \ + card->cid.fwrev, \ + card->cid.year, \ + card->cid.month) + +/* + * This hook just adds a quirk unconditionnally + */ +static inline void __maybe_unused add_quirk(struct mmc_card *card, int data) +{ + card->quirks |= data; +} + +/* + * This hook just removes a quirk unconditionnally + */ +static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) +{ + card->quirks &= ~data; +} + #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) @@ -197,4 +290,7 @@ struct mmc_driver { extern int mmc_register_driver(struct mmc_driver *); extern void mmc_unregister_driver(struct mmc_driver *); +extern void mmc_fixup_device(struct mmc_card *card, + const struct mmc_fixup *table); + #endif diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 64e013f1cfb82..c11bef9ca5443 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -135,6 +135,7 @@ extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); +extern int mmc_switch(struct mmc_card *, u8, u8, u8); #define MMC_ERASE_ARG 0x00000000 #define MMC_SECURE_ERASE_ARG 0x80000000 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index fd81f2a26b8a4..374cc632093fd 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -183,7 +183,6 @@ struct mmc_host { struct work_struct clk_gate_work; /* delayed clock gate */ unsigned int clk_old; /* old clock value cache */ spinlock_t clk_lock; /* lock for clk fields */ - struct mutex clk_gate_mutex; /* mutex for clock gating */ #endif /* host specific block data */ @@ -223,6 +222,7 @@ struct mmc_host { int claim_cnt; /* "claim" nesting count */ struct delayed_work detect; + struct delayed_work remove; const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int bus_refs; /* reference counter */ @@ -295,7 +295,7 @@ static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual) extern int mmc_resume_bus(struct mmc_host *host); -extern int mmc_suspend_host(struct mmc_host *); +extern int mmc_suspend_host(struct mmc_host *, pm_message_t); extern int mmc_resume_host(struct mmc_host *); extern int mmc_power_save_host(struct mmc_host *host); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 0a99c8936a055..612301f85d144 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -268,7 +268,6 @@ struct _mmc_csd { #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ #define EXT_CSD_TRIM_MULT 232 /* RO */ -#define EXT_CSD_BOOT_SIZE_MULTI 226 /* RO */ /* * EXT_CSD field definitions diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 245cdacee5443..b544861617632 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -96,6 +96,8 @@ #define SDIO_BUS_WIDTH_1BIT 0x00 #define SDIO_BUS_WIDTH_4BIT 0x02 +#define SDIO_BUS_WIDTH_8BIT 0x03 + #define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ #define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 557acae8cf95b..10c026ebc1f85 100755 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -88,6 +88,8 @@ struct sdio_driver { int (*probe)(struct sdio_func *, const struct sdio_device_id *); void (*remove)(struct sdio_func *); + int (*suspend)(struct sdio_func *, pm_message_t state); + int (*resume)(struct sdio_func *); struct device_driver drv; }; diff --git a/include/linux/module.h b/include/linux/module.h index 5de42043dff0c..cca49b3410d66 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -25,21 +26,8 @@ /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) -/* Some toolchains use a `_' prefix for all user symbols. */ -#ifdef CONFIG_SYMBOL_PREFIX -#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX -#else -#define MODULE_SYMBOL_PREFIX "" -#endif - #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN -struct kernel_symbol -{ - unsigned long value; - const char *name; -}; - struct modversion_info { unsigned long crc; @@ -93,11 +81,8 @@ void trim_init_extable(struct module *m); extern const struct gtype##_id __mod_##gtype##_table \ __attribute__ ((unused, alias(__stringify(name)))) -extern struct module __this_module; -#define THIS_MODULE (&__this_module) #else /* !MODULE */ #define MODULE_GENERIC_TABLE(gtype,name) -#define THIS_MODULE ((struct module *)0) #endif /* Generic info of form tag = "info" */ @@ -215,52 +200,6 @@ struct module_use { struct module *source, *target; }; -#ifndef __GENKSYMS__ -#ifdef CONFIG_MODVERSIONS -/* Mark the CRC weak since genksyms apparently decides not to - * generate a checksums for some symbols */ -#define __CRC_SYMBOL(sym, sec) \ - extern void *__crc_##sym __attribute__((weak)); \ - static const unsigned long __kcrctab_##sym \ - __used \ - __attribute__((section("__kcrctab" sec), unused)) \ - = (unsigned long) &__crc_##sym; -#else -#define __CRC_SYMBOL(sym, sec) -#endif - -/* For every exported symbol, place a struct in the __ksymtab section */ -#define __EXPORT_SYMBOL(sym, sec) \ - extern typeof(sym) sym; \ - __CRC_SYMBOL(sym, sec) \ - static const char __kstrtab_##sym[] \ - __attribute__((section("__ksymtab_strings"), aligned(1))) \ - = MODULE_SYMBOL_PREFIX #sym; \ - static const struct kernel_symbol __ksymtab_##sym \ - __used \ - __attribute__((section("__ksymtab" sec), unused)) \ - = { (unsigned long)&sym, __kstrtab_##sym } - -#define EXPORT_SYMBOL(sym) \ - __EXPORT_SYMBOL(sym, "") - -#define EXPORT_SYMBOL_GPL(sym) \ - __EXPORT_SYMBOL(sym, "_gpl") - -#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ - __EXPORT_SYMBOL(sym, "_gpl_future") - - -#ifdef CONFIG_UNUSED_SYMBOLS -#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") -#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") -#else -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) -#endif - -#endif - enum module_state { MODULE_STATE_LIVE, @@ -579,11 +518,6 @@ extern void module_update_tracepoints(void); extern int module_get_iter_tracepoints(struct tracepoint_iter *iter); #else /* !CONFIG_MODULES... */ -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#define EXPORT_SYMBOL_GPL_FUTURE(sym) -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) /* Given an address, look for it in the exception tables. */ static inline const struct exception_table_entry * diff --git a/include/linux/msm_adsp.h b/include/linux/msm_adsp.h new file mode 100644 index 0000000000000..12f219ea63d27 --- /dev/null +++ b/include/linux/msm_adsp.h @@ -0,0 +1,84 @@ +/* include/linux/msm_adsp.h + * + * Copyright (c) QUALCOMM Incorporated + * Copyright (C) 2007 Google, Inc. + * Author: Iliyan Malchev + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_MSM_ADSP_H +#define __LINUX_MSM_ADSP_H + +#include +#include + +#define ADSP_IOCTL_MAGIC 'q' + +/* ADSP_IOCTL_WRITE_COMMAND */ +struct adsp_command_t { + uint16_t queue; + uint32_t len; /* bytes */ + uint8_t *data; +}; + +/* ADSP_IOCTL_GET_EVENT */ +struct adsp_event_t { + uint16_t type; /* 1 == event (RPC), 0 == message (adsp) */ + uint32_t timeout_ms; /* -1 for infinite, 0 for immediate return */ + uint16_t msg_id; + uint16_t flags; /* 1 == 16--bit event, 0 == 32-bit event */ + uint32_t len; /* size in, number of bytes out */ + uint8_t *data; +}; + +#define ADSP_IOCTL_ENABLE \ + _IOR(ADSP_IOCTL_MAGIC, 1, unsigned) + +#define ADSP_IOCTL_DISABLE \ + _IOR(ADSP_IOCTL_MAGIC, 2, unsigned) + +#define ADSP_IOCTL_DISABLE_ACK \ + _IOR(ADSP_IOCTL_MAGIC, 3, unsigned) + +#define ADSP_IOCTL_WRITE_COMMAND \ + _IOR(ADSP_IOCTL_MAGIC, 4, struct adsp_command_t *) + +#define ADSP_IOCTL_GET_EVENT \ + _IOWR(ADSP_IOCTL_MAGIC, 5, struct adsp_event_data_t *) + +#define ADSP_IOCTL_SET_CLKRATE \ + _IOR(ADSP_IOCTL_MAGIC, 6, unsigned) + +#define ADSP_IOCTL_DISABLE_EVENT_RSP \ + _IOR(ADSP_IOCTL_MAGIC, 10, unsigned) + +struct adsp_pmem_info { + int fd; + void *vaddr; +}; + +#define ADSP_IOCTL_REGISTER_PMEM \ + _IOW(ADSP_IOCTL_MAGIC, 13, unsigned) + +#define ADSP_IOCTL_UNREGISTER_PMEM \ + _IOW(ADSP_IOCTL_MAGIC, 14, unsigned) + +/* Cause any further GET_EVENT ioctls to fail (-ENODEV) + * until the device is closed and reopened. Useful for + * terminating event dispatch threads + */ +#define ADSP_IOCTL_ABORT_EVENT_READ \ + _IOW(ADSP_IOCTL_MAGIC, 15, unsigned) + +#define ADSP_IOCTL_LINK_TASK \ + _IOW(ADSP_IOCTL_MAGIC, 16, unsigned) + +#endif diff --git a/include/linux/msm_audio.h b/include/linux/msm_audio.h new file mode 100644 index 0000000000000..4f7562d89fb75 --- /dev/null +++ b/include/linux/msm_audio.h @@ -0,0 +1,215 @@ +/* include/linux/msm_audio.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MSM_AUDIO_H +#define __LINUX_MSM_AUDIO_H + +#include +#include +#include + +/* PCM Audio */ + +#define AUDIO_IOCTL_MAGIC 'a' + +#define AUDIO_START _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned) +#define AUDIO_STOP _IOW(AUDIO_IOCTL_MAGIC, 1, unsigned) +#define AUDIO_FLUSH _IOW(AUDIO_IOCTL_MAGIC, 2, unsigned) +#define AUDIO_GET_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 3, unsigned) +#define AUDIO_SET_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 4, unsigned) +#define AUDIO_GET_STATS _IOR(AUDIO_IOCTL_MAGIC, 5, unsigned) +#define AUDIO_ENABLE_AUDPP _IOW(AUDIO_IOCTL_MAGIC, 6, unsigned) +#define AUDIO_SET_ADRC _IOW(AUDIO_IOCTL_MAGIC, 7, unsigned) +#define AUDIO_SET_EQ _IOW(AUDIO_IOCTL_MAGIC, 8, unsigned) +#define AUDIO_SET_RX_IIR _IOW(AUDIO_IOCTL_MAGIC, 9, unsigned) +#define AUDIO_SET_VOLUME _IOW(AUDIO_IOCTL_MAGIC, 10, unsigned) +#define AUDIO_ENABLE_AUDPRE _IOW(AUDIO_IOCTL_MAGIC, 11, unsigned) +#define AUDIO_SET_AGC _IOW(AUDIO_IOCTL_MAGIC, 12, unsigned) +#define AUDIO_SET_NS _IOW(AUDIO_IOCTL_MAGIC, 13, unsigned) +#define AUDIO_SET_TX_IIR _IOW(AUDIO_IOCTL_MAGIC, 14, unsigned) +#define AUDIO_PAUSE _IOW(AUDIO_IOCTL_MAGIC, 15, unsigned) +#define AUDIO_SET_AAC_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 15, unsigned) +#define AUDIO_WAIT_ADSP_DONE _IOR(AUDIO_IOCTL_MAGIC, 16, unsigned) +#define AUDIO_ADSP_PAUSE _IOR(AUDIO_IOCTL_MAGIC, 17, unsigned) +#define AUDIO_ADSP_RESUME _IOR(AUDIO_IOCTL_MAGIC, 18, unsigned) +#define AUDIO_PLAY_DTMF _IOW(AUDIO_IOCTL_MAGIC, 19, unsigned) +#define AUDIO_GET_AAC_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 20, unsigned) +#define AUDIO_GET_AMRNB_ENC_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 21, unsigned) +#define AUDIO_SET_AMRNB_ENC_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 22, unsigned) +#define AUDIO_GET_PCM_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 30, unsigned) +#define AUDIO_SET_PCM_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 31, unsigned) +#define AUDIO_SWITCH_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 32, unsigned) +#define AUDIO_SET_MUTE _IOW(AUDIO_IOCTL_MAGIC, 33, unsigned) +#define AUDIO_UPDATE_ACDB _IOW(AUDIO_IOCTL_MAGIC, 34, unsigned) +#define AUDIO_START_VOICE _IOW(AUDIO_IOCTL_MAGIC, 35, unsigned) +#define AUDIO_STOP_VOICE _IOW(AUDIO_IOCTL_MAGIC, 36, unsigned) +#define AUDIO_START_FM _IOW(AUDIO_IOCTL_MAGIC, 37, unsigned) +#define AUDIO_STOP_FM _IOW(AUDIO_IOCTL_MAGIC, 38, unsigned) +#define AUDIO_REINIT_ACDB _IOW(AUDIO_IOCTL_MAGIC, 39, unsigned) +#define AUDIO_ENABLE_AUXPGA_LOOPBACK _IOW(AUDIO_IOCTL_MAGIC, 40, unsigned) +#define AUDIO_SET_AUXPGA_GAIN _IOW(AUDIO_IOCTL_MAGIC, 41, unsigned) +#define AUDIO_SET_RX_MUTE _IOW(AUDIO_IOCTL_MAGIC, 42, unsigned) + +#define AUDIO_MAX_COMMON_IOCTL_NUM 100 + +#define AUDIO_MAX_COMMON_IOCTL_NUM 100 + +struct msm_audio_config { + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t channel_count; + uint32_t sample_rate; + uint32_t type; + uint32_t unused[3]; +}; + +struct msm_audio_stats { + uint32_t byte_count; + uint32_t sample_count; + uint32_t unused[2]; +}; + +struct msm_mute_info { + uint32_t mute; + uint32_t path; +}; + +#define AUDIO_AAC_FORMAT_ADTS -1 +#define AUDIO_AAC_FORMAT_RAW 0x0000 +#define AUDIO_AAC_FORMAT_PSUEDO_RAW 0x0001 +#define AUDIO_AAC_FORMAT_LOAS 0x0002 + +#define AUDIO_AAC_OBJECT_LC 0x0002 +#define AUDIO_AAC_OBJECT_LTP 0x0004 +#define AUDIO_AAC_OBJECT_ERLC 0x0011 + +#define AUDIO_AAC_SEC_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SEC_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SCA_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SCA_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SPEC_DATA_RES_ON 0x0001 +#define AUDIO_AAC_SPEC_DATA_RES_OFF 0x0000 + +#define AUDIO_AAC_SBR_ON_FLAG_ON 0x0001 +#define AUDIO_AAC_SBR_ON_FLAG_OFF 0x0000 + +#define AUDIO_AAC_SBR_PS_ON_FLAG_ON 0x0001 +#define AUDIO_AAC_SBR_PS_ON_FLAG_OFF 0x0000 + +/* Primary channel on both left and right channels */ +#define AUDIO_AAC_DUAL_MONO_PL_PR 0 +/* Secondary channel on both left and right channels */ +#define AUDIO_AAC_DUAL_MONO_SL_SR 1 +/* Primary channel on right channel and 2nd on left channel */ +#define AUDIO_AAC_DUAL_MONO_SL_PR 2 +/* 2nd channel on right channel and primary on left channel */ +#define AUDIO_AAC_DUAL_MONO_PL_SR 3 + +#define AAC_OBJECT_ER_LC 17 +#define AAC_OBJECT_ER_LTP 19 +#define AAC_OBJECT_ER_SCALABLE 20 +#define AAC_OBJECT_BSAC 22 +#define AAC_OBJECT_ER_LD 23 + +struct aac_format { + uint16_t sample_rate; + uint16_t channel_config; + uint16_t block_formats; + uint16_t audio_object_type; + uint16_t ep_config; + uint16_t aac_section_data_resilience_flag; + uint16_t aac_scalefactor_data_resilience_flag; + uint16_t aac_spectral_data_resilience_flag; + uint16_t sbr_on_flag; + uint16_t sbr_ps_on_flag; + uint32_t bit_rate; +}; + +struct msm_audio_aac_config { + signed short format; + unsigned short audio_object; + unsigned short ep_config; /* 0 ~ 3 useful only obj = ERLC */ + unsigned short aac_section_data_resilience_flag; + unsigned short aac_scalefactor_data_resilience_flag; + unsigned short aac_spectral_data_resilience_flag; + unsigned short sbr_on_flag; + unsigned short sbr_ps_on_flag; + unsigned short dual_mono_mode; + unsigned short channel_configuration; +}; + +struct msm_audio_amrnb_enc_config { + unsigned short voicememoencweight1; + unsigned short voicememoencweight2; + unsigned short voicememoencweight3; + unsigned short voicememoencweight4; + unsigned short dtx_mode_enable; /* 0xFFFF - enable, 0- disable */ + unsigned short test_mode_enable; /* 0xFFFF - enable, 0- disable */ + unsigned short enc_mode; /* 0-MR475,1-MR515,2-MR59,3-MR67,4-MR74 + 5-MR795, 6- MR102, 7- MR122(default) */ +}; + +/* Audio routing */ + +#define SND_IOCTL_MAGIC 's' + +#define SND_MUTE_UNMUTED 0 +#define SND_MUTE_MUTED 1 + +struct msm_snd_device_config { + uint32_t device; + uint32_t ear_mute; + uint32_t mic_mute; +}; + +#define SND_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_device_config *) + +#define SND_METHOD_VOICE 0 + +struct msm_snd_volume_config { + uint32_t device; + uint32_t method; + uint32_t volume; +}; + +#define SND_SET_VOLUME _IOW(SND_IOCTL_MAGIC, 3, struct msm_snd_volume_config *) + +/* Returns the number of SND endpoints supported. */ + +#define SND_GET_NUM_ENDPOINTS _IOR(SND_IOCTL_MAGIC, 4, unsigned *) + +struct msm_snd_endpoint { + int id; /* input and output */ + char name[64]; /* output only */ +}; + +/* Takes an index between 0 and one less than the number returned by + * SND_GET_NUM_ENDPOINTS, and returns the SND index and name of a + * SND endpoint. On input, the .id field contains the number of the + * endpoint, and on exit it contains the SND index, while .name contains + * the description of the endpoint. + */ + +#define SND_GET_ENDPOINT _IOWR(SND_IOCTL_MAGIC, 5, struct msm_snd_endpoint *) + +struct msm_audio_pcm_config { + uint32_t pcm_feedback; /* 0 - disable > 0 - enable */ + uint32_t buffer_count; /* Number of buffers to allocate */ + uint32_t buffer_size; /* Size of buffer for capturing of + PCM samples */ +}; +#endif diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h new file mode 100644 index 0000000000000..5434a05229737 --- /dev/null +++ b/include/linux/msm_ion.h @@ -0,0 +1 @@ +#include diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h new file mode 100644 index 0000000000000..1c789691c63bf --- /dev/null +++ b/include/linux/msm_kgsl.h @@ -0,0 +1,485 @@ +#ifndef _MSM_KGSL_H +#define _MSM_KGSL_H + +#define KGSL_VERSION_MAJOR 3 +#define KGSL_VERSION_MINOR 8 + +/*context flags */ +#define KGSL_CONTEXT_SAVE_GMEM 1 +#define KGSL_CONTEXT_NO_GMEM_ALLOC 2 +#define KGSL_CONTEXT_SUBMIT_IB_LIST 4 +#define KGSL_CONTEXT_CTX_SWITCH 8 +#define KGSL_CONTEXT_PREAMBLE 16 + +/* Memory allocayion flags */ +#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000 + +/* generic flag values */ +#define KGSL_FLAGS_NORMALMODE 0x00000000 +#define KGSL_FLAGS_SAFEMODE 0x00000001 +#define KGSL_FLAGS_INITIALIZED0 0x00000002 +#define KGSL_FLAGS_INITIALIZED 0x00000004 +#define KGSL_FLAGS_STARTED 0x00000008 +#define KGSL_FLAGS_ACTIVE 0x00000010 +#define KGSL_FLAGS_RESERVED0 0x00000020 +#define KGSL_FLAGS_RESERVED1 0x00000040 +#define KGSL_FLAGS_RESERVED2 0x00000080 +#define KGSL_FLAGS_SOFT_RESET 0x00000100 + +/* Clock flags to show which clocks should be controled by a given platform */ +#define KGSL_CLK_SRC 0x00000001 +#define KGSL_CLK_CORE 0x00000002 +#define KGSL_CLK_IFACE 0x00000004 +#define KGSL_CLK_MEM 0x00000008 +#define KGSL_CLK_MEM_IFACE 0x00000010 +#define KGSL_CLK_AXI 0x00000020 + +/* Arbitrary defines for 8x50 devices using google clock naming / implementation */ +#define KGSL_CLK_GRP 0x00001000 +#define KGSL_CLK_IMEM 0x00010000 + +/* + * Reset status values for context + */ +enum kgsl_ctx_reset_stat { + KGSL_CTX_STAT_NO_ERROR = 0x00000000, + KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001, + KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002, + KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003 +}; + +#define KGSL_MAX_PWRLEVELS 5 + +#define KGSL_CONVERT_TO_MBPS(val) \ + (val*1000*1000U) + +/* device id */ +enum kgsl_deviceid { + KGSL_DEVICE_3D0 = 0x00000000, + KGSL_DEVICE_2D0 = 0x00000001, + KGSL_DEVICE_2D1 = 0x00000002, + KGSL_DEVICE_MAX = 0x00000003 +}; + +enum kgsl_user_mem_type { + KGSL_USER_MEM_TYPE_PMEM = 0x00000000, + KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, + KGSL_USER_MEM_TYPE_ADDR = 0x00000002, + KGSL_USER_MEM_TYPE_ION = 0x00000003, + KGSL_USER_MEM_TYPE_MAX = 0x00000004, +}; + +struct kgsl_devinfo { + + unsigned int device_id; + /* chip revision id + * coreid:8 majorrev:8 minorrev:8 patch:8 + */ + unsigned int chip_id; + unsigned int mmu_enabled; + unsigned int gmem_gpubaseaddr; + /* + * This field contains the adreno revision + * number 200, 205, 220, etc... + */ + unsigned int gpu_id; + unsigned int gmem_sizebytes; +}; + +/* this structure defines the region of memory that can be mmap()ed from this + driver. The timestamp fields are volatile because they are written by the + GPU +*/ +struct kgsl_devmemstore { + volatile unsigned int soptimestamp; + unsigned int sbz; + volatile unsigned int eoptimestamp; + unsigned int sbz2; + volatile unsigned int ts_cmp_enable; + unsigned int sbz3; + volatile unsigned int ref_wait_ts; + unsigned int sbz4; + unsigned int current_context; + unsigned int sbz5; +}; + +#define KGSL_DEVICE_MEMSTORE_OFFSET(field) \ + offsetof(struct kgsl_devmemstore, field) + + +/* timestamp id*/ +enum kgsl_timestamp_type { + KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */ + KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/ + KGSL_TIMESTAMP_MAX = 0x00000002, +}; + +/* property types - used with kgsl_device_getproperty */ +enum kgsl_property_type { + KGSL_PROP_DEVICE_INFO = 0x00000001, + KGSL_PROP_DEVICE_SHADOW = 0x00000002, + KGSL_PROP_DEVICE_POWER = 0x00000003, + KGSL_PROP_SHMEM = 0x00000004, + KGSL_PROP_SHMEM_APERTURES = 0x00000005, + KGSL_PROP_MMU_ENABLE = 0x00000006, + KGSL_PROP_INTERRUPT_WAITS = 0x00000007, + KGSL_PROP_VERSION = 0x00000008, + KGSL_PROP_GPU_RESET_STAT = 0x00000009 +}; + +struct kgsl_shadowprop { + unsigned int gpuaddr; + unsigned int size; + unsigned int flags; /* contains KGSL_FLAGS_ values */ +}; + +struct kgsl_pwrlevel { + unsigned int gpu_freq; + unsigned int bus_freq; + unsigned int io_fraction; +}; + +struct kgsl_version { + unsigned int drv_major; + unsigned int drv_minor; + unsigned int dev_major; + unsigned int dev_minor; +}; + +#ifdef __KERNEL__ + +#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory" +#define KGSL_3D0_IRQ "kgsl_3d0_irq" +#define KGSL_2D0_REG_MEMORY "kgsl_2d0_reg_memory" +#define KGSL_2D0_IRQ "kgsl_2d0_irq" +#define KGSL_2D1_REG_MEMORY "kgsl_2d1_reg_memory" +#define KGSL_2D1_IRQ "kgsl_2d1_irq" + +struct kgsl_device_platform_data { + struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS]; + int init_level; + int num_levels; + int (*set_grp_async)(void); + unsigned int idle_timeout; + unsigned int nap_allowed; + unsigned int clk_map; + unsigned int idle_needed; + struct msm_bus_scale_pdata *bus_scale_table; + const char *iommu_user_ctx_name; + const char *iommu_priv_ctx_name; +}; + +#endif + +/* structure holds list of ibs */ +struct kgsl_ibdesc { + unsigned int gpuaddr; + void *hostptr; + unsigned int sizedwords; + unsigned int ctrl; +}; + +/* ioctls */ +#define KGSL_IOC_TYPE 0x09 + +/* get misc info about the GPU + type should be a value from enum kgsl_property_type + value points to a structure that varies based on type + sizebytes is sizeof() that structure + for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo + this structure contaings hardware versioning info. + for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop + this is used to find mmap() offset and sizes for mapping + struct kgsl_memstore into userspace. +*/ +struct kgsl_device_getproperty { + unsigned int type; + void *value; + unsigned int sizebytes; +}; + +#define IOCTL_KGSL_DEVICE_GETPROPERTY \ + _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) + + +/* read a GPU register. + offsetwords it the 32 bit word offset from the beginning of the + GPU register space. + */ +struct kgsl_device_regread { + unsigned int offsetwords; + unsigned int value; /* output param */ +}; + +#define IOCTL_KGSL_DEVICE_REGREAD \ + _IOWR(KGSL_IOC_TYPE, 0x3, struct kgsl_device_regread) + + +/* block until the GPU has executed past a given timestamp + * timeout is in milliseconds. + */ +struct kgsl_device_waittimestamp { + unsigned int timestamp; + unsigned int timeout; +}; + +#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \ + _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp) + + +/* issue indirect commands to the GPU. + * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE + * ibaddr and sizedwords must specify a subset of a buffer created + * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM + * flags may be a mask of KGSL_CONTEXT_ values + * timestamp is a returned counter value which can be passed to + * other ioctls to determine when the commands have been executed by + * the GPU. + */ +struct kgsl_ringbuffer_issueibcmds { + unsigned int drawctxt_id; + unsigned int ibdesc_addr; + unsigned int numibs; + unsigned int timestamp; /*output param */ + unsigned int flags; +}; + +#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \ + _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds) + +/* read the most recently executed timestamp value + * type should be a value from enum kgsl_timestamp_type + */ +struct kgsl_cmdstream_readtimestamp { + unsigned int type; + unsigned int timestamp; /*output param */ +}; + +#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \ + _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) + +#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \ + _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) + +/* free memory when the GPU reaches a given timestamp. + * gpuaddr specify a memory region created by a + * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call + * type should be a value from enum kgsl_timestamp_type + */ +struct kgsl_cmdstream_freememontimestamp { + unsigned int gpuaddr; + unsigned int type; + unsigned int timestamp; +}; + +#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \ + _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) + +/* Previous versions of this header had incorrectly defined + IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead + of a write only ioctl. To ensure binary compatability, the following + #define will be used to intercept the incorrect ioctl +*/ + +#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \ + _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) + +/* create a draw context, which is used to preserve GPU state. + * The flags field may contain a mask KGSL_CONTEXT_* values + */ +struct kgsl_drawctxt_create { + unsigned int flags; + unsigned int drawctxt_id; /*output param */ +}; + +#define IOCTL_KGSL_DRAWCTXT_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) + +/* destroy a draw context */ +struct kgsl_drawctxt_destroy { + unsigned int drawctxt_id; +}; + +#define IOCTL_KGSL_DRAWCTXT_DESTROY \ + _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) + +/* add a block of pmem, fb, ashmem or user allocated address + * into the GPU address space */ +struct kgsl_map_user_mem { + int fd; + unsigned int gpuaddr; /*output param */ + unsigned int len; + unsigned int offset; + unsigned int hostptr; /*input param */ + enum kgsl_user_mem_type memtype; + unsigned int reserved; /* May be required to add + params for another mem type */ +}; + +#define IOCTL_KGSL_MAP_USER_MEM \ + _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) + +/* add a block of pmem or fb into the GPU address space */ +struct kgsl_sharedmem_from_pmem { + int pmem_fd; + unsigned int gpuaddr; /*output param */ + unsigned int len; + unsigned int offset; +}; + +#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \ + _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem) + +/* remove memory from the GPU's address space */ +struct kgsl_sharedmem_free { + unsigned int gpuaddr; +}; + +#define IOCTL_KGSL_SHAREDMEM_FREE \ + _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free) + +struct kgsl_cff_user_event { + unsigned char cff_opcode; + unsigned int op1; + unsigned int op2; + unsigned int op3; + unsigned int op4; + unsigned int op5; + unsigned int __pad[2]; +}; + +#define IOCTL_KGSL_CFF_USER_EVENT \ + _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event) + +struct kgsl_gmem_desc { + unsigned int x; + unsigned int y; + unsigned int width; + unsigned int height; + unsigned int pitch; +}; + +struct kgsl_buffer_desc { + void *hostptr; + unsigned int gpuaddr; + int size; + unsigned int format; + unsigned int pitch; + unsigned int enabled; +}; + +struct kgsl_bind_gmem_shadow { + unsigned int drawctxt_id; + struct kgsl_gmem_desc gmem_desc; + unsigned int shadow_x; + unsigned int shadow_y; + struct kgsl_buffer_desc shadow_buffer; + unsigned int buffer_id; +}; + +#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \ + _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow) + +/* add a block of memory into the GPU address space */ +struct kgsl_sharedmem_from_vmalloc { + unsigned int gpuaddr; /*output param */ + unsigned int hostptr; + unsigned int flags; +}; + +#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \ + _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc) + +#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \ + _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free) + +struct kgsl_drawctxt_set_bin_base_offset { + unsigned int drawctxt_id; + unsigned int offset; +}; + +#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \ + _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset) + +enum kgsl_cmdwindow_type { + KGSL_CMDWINDOW_MIN = 0x00000000, + KGSL_CMDWINDOW_2D = 0x00000000, + KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */ + KGSL_CMDWINDOW_MMU = 0x00000002, + KGSL_CMDWINDOW_ARBITER = 0x000000FF, + KGSL_CMDWINDOW_MAX = 0x000000FF, +}; + +/* write to the command window */ +struct kgsl_cmdwindow_write { + enum kgsl_cmdwindow_type target; + unsigned int addr; + unsigned int data; +}; + +#define IOCTL_KGSL_CMDWINDOW_WRITE \ + _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write) + +struct kgsl_gpumem_alloc { + unsigned long gpuaddr; + size_t size; + unsigned int flags; +}; + +#define IOCTL_KGSL_GPUMEM_ALLOC \ + _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc) + +struct kgsl_cff_syncmem { + unsigned int gpuaddr; + unsigned int len; + unsigned int __pad[2]; /* For future binary compatibility */ +}; + +#define IOCTL_KGSL_CFF_SYNCMEM \ + _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem) + +/* + * A timestamp event allows the user space to register an action following an + * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to + * _IOWR to support fences which need to return a fd for the priv parameter. + */ + +struct kgsl_timestamp_event { + int type; /* Type of event (see list below) */ + unsigned int timestamp; /* Timestamp to trigger event on */ + unsigned int context_id; /* Context for the timestamp */ + void *priv; /* Pointer to the event specific blob */ + size_t len; /* Size of the event specific blob */ +}; + +#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \ + _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event) + +/* A genlock timestamp event releases an existing lock on timestamp expire */ + +#define KGSL_TIMESTAMP_EVENT_GENLOCK 1 + +struct kgsl_timestamp_event_genlock { + int handle; /* Handle of the genlock lock to release */ +}; + +/* A fence timestamp event releases an existing lock on timestamp expire */ + +#define KGSL_TIMESTAMP_EVENT_FENCE 2 + +struct kgsl_timestamp_event_fence { + int fence_fd; /* Fence to signal */ +}; + +#define IOCTL_KGSL_TIMESTAMP_EVENT \ + _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event) + +#ifdef __KERNEL__ +#ifdef CONFIG_MSM_KGSL_DRM +int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, + unsigned long *len); +#else +#define kgsl_gem_obj_addr(...) 0 +#endif +#endif +#endif /* _MSM_KGSL_H */ diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h index d11fe0f2f956f..c024ffbdd5f6b 100644 --- a/include/linux/msm_mdp.h +++ b/include/linux/msm_mdp.h @@ -1,6 +1,7 @@ /* include/linux/msm_mdp.h * * Copyright (C) 2007 Google Incorporated + * Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -15,24 +16,105 @@ #define _MSM_MDP_H_ #include +#include #define MSMFB_IOCTL_MAGIC 'm' #define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) #define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) +#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int) +#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int) +#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor) +#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap) +#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data) +/* new ioctls's for set/get ccs matrix */ +#define MSMFB_GET_CCS_MATRIX _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs) +#define MSMFB_SET_CCS_MATRIX _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs) +#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \ + struct mdp_overlay) +#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int) + +#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \ + struct msmfb_overlay_data) +#define MSMFB_OVERLAY_QUEUE MSMFB_OVERLAY_PLAY + +#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \ + struct mdp_page_protection) +#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \ + struct mdp_page_protection) +#define MSMFB_OVERLAY_GET _IOR(MSMFB_IOCTL_MAGIC, 140, \ + struct mdp_overlay) +#define MSMFB_OVERLAY_PLAY_ENABLE _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int) +#define MSMFB_OVERLAY_BLT _IOWR(MSMFB_IOCTL_MAGIC, 142, \ + struct msmfb_overlay_blt) +#define MSMFB_OVERLAY_BLT_OFFSET _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int) +#define MSMFB_HISTOGRAM_START _IOR(MSMFB_IOCTL_MAGIC, 144, \ + struct mdp_histogram_start_req) +#define MSMFB_HISTOGRAM_STOP _IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int) +#define MSMFB_NOTIFY_UPDATE _IOW(MSMFB_IOCTL_MAGIC, 146, unsigned int) + +#define MSMFB_OVERLAY_3D _IOWR(MSMFB_IOCTL_MAGIC, 147, \ + struct msmfb_overlay_3d) + +#define MSMFB_MIXER_INFO _IOWR(MSMFB_IOCTL_MAGIC, 148, \ + struct msmfb_mixer_info_req) +#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \ + struct msmfb_overlay_data) +#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150) +#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151) +#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152) +#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \ + struct msmfb_data) +#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \ + struct msmfb_data) +#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155) +#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp) + +#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int) +#define MSMFB_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int) + +#define MSMFB_BUFFER_SYNC _IOW(MSMFB_IOCTL_MAGIC, 165, struct mdp_buf_sync) +#define MSMFB_METADATA_SET _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata) +#define MSMFB_DISPLAY_COMMIT _IOW(MSMFB_IOCTL_MAGIC, 164, \ + struct mdp_display_commit) +#define MSMFB_OVERLAY_COMMIT _IO(MSMFB_IOCTL_MAGIC, 163) + +#define FB_TYPE_3D_PANEL 0x10101010 +#define MDP_IMGTYPE2_START 0x10000 +#define MSMFB_DRIVER_VERSION 0xF9E8D701 enum { - MDP_RGB_565, /* RGB 565 planar */ - MDP_XRGB_8888, /* RGB 888 padded */ - MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ - MDP_ARGB_8888, /* ARGB 888 */ - MDP_RGB_888, /* RGB 888 planar */ - MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ - MDP_YCRYCB_H2V1, /* YCrYCb interleave */ - MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_RGBA_8888, /* ARGB 888 */ - MDP_BGRA_8888, /* ABGR 888 */ - MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ + NOTIFY_UPDATE_START, + NOTIFY_UPDATE_STOP, +}; + +enum { + MDP_RGB_565, /* RGB 565 planer */ + MDP_XRGB_8888, /* RGB 888 padded */ + MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planer w/ Cb is in MSB */ + MDP_Y_CBCR_H2V2_ADRENO, + MDP_ARGB_8888, /* ARGB 888 */ + MDP_RGB_888, /* RGB 888 planer */ + MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planer w/ Cr is in MSB */ + MDP_YCRYCB_H2V1, /* YCrYCb interleave */ + MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ + MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ + MDP_RGBA_8888, /* ARGB 888 */ + MDP_BGRA_8888, /* ABGR 888 */ + MDP_RGBX_8888, /* RGBX 888 */ + MDP_Y_CRCB_H2V2_TILE, /* Y and CrCb, pseudo planer tile */ + MDP_Y_CBCR_H2V2_TILE, /* Y and CbCr, pseudo planer tile */ + MDP_Y_CR_CB_H2V2, /* Y, Cr and Cb, planar */ + MDP_Y_CR_CB_GH2V2, /* Y, Cr and Cb, planar aligned to Android YV12 */ + MDP_Y_CB_CR_H2V2, /* Y, Cb and Cr, planar */ + MDP_Y_CRCB_H1V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ + MDP_Y_CBCR_H1V1, /* Y and CbCr, pseduo planer w/ Cb is in MSB */ + MDP_YCRCB_H1V1, /* YCrCb interleave */ + MDP_YCBCR_H1V1, /* YCbCr interleave */ + MDP_IMGTYPE_LIMIT, + MDP_RGB_BORDERFILL, /* border fill pipe */ + MDP_BGR_565 = MDP_IMGTYPE2_START, /* BGR 565 planer */ + MDP_FB_FORMAT, /* framebuffer format */ + MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */ }; enum { @@ -40,39 +122,492 @@ enum { FB_IMG, }; -/* flag values */ -#define MDP_ROT_NOP 0 -#define MDP_FLIP_LR 0x1 -#define MDP_FLIP_UD 0x2 -#define MDP_ROT_90 0x4 -#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_DITHER 0x8 -#define MDP_BLUR 0x10 +enum { + HSIC_HUE = 0, + HSIC_SAT, + HSIC_INT, + HSIC_CON, + NUM_HSIC_PARAM, +}; + +/* mdp_blit_req flag values */ +#define MDP_ROT_NOP 0 +#define MDP_FLIP_LR 0x1 +#define MDP_FLIP_UD 0x2 +#define MDP_ROT_90 0x4 +#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) +#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) +#define MDP_DITHER 0x8 +#define MDP_BLUR 0x10 +#define MDP_BLEND_FG_PREMULT 0x20000 +#define MDP_DEINTERLACE 0x80000000 +#define MDP_SHARPENING 0x40000000 +#define MDP_NO_DMA_BARRIER_START 0x20000000 +#define MDP_NO_DMA_BARRIER_END 0x10000000 +#define MDP_NO_BLIT 0x08000000 +#define MDP_BLIT_WITH_DMA_BARRIERS 0x000 +#define MDP_BLIT_WITH_NO_DMA_BARRIERS \ + (MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END) +#define MDP_BLIT_SRC_GEM 0x04000000 +#define MDP_BLIT_DST_GEM 0x02000000 +#define MDP_BLIT_NON_CACHED 0x01000000 +#define MDP_OV_PIPE_SHARE 0x00800000 +#define MDP_DEINTERLACE_ODD 0x00400000 +#define MDP_OV_PLAY_NOWAIT 0x00200000 +#define MDP_SOURCE_ROTATED_90 0x00100000 +#define MDP_OVERLAY_PP_CFG_EN 0x00080000 +#define MDP_BACKEND_COMPOSITION 0x00040000 +#define MDP_BORDERFILL_SUPPORTED 0x00010000 +#define MDP_SECURE_OVERLAY_SESSION 0x00008000 +#define MDP_MEMORY_ID_TYPE_FB 0x00001000 -#define MDP_TRANSP_NOP 0xffffffff -#define MDP_ALPHA_NOP 0xff +#define MDP_TRANSP_NOP 0xffffffff +#define MDP_ALPHA_NOP 0xff + +#define MDP_FB_PAGE_PROTECTION_NONCACHED (0) +#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE (1) +#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2) +#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE (3) +#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE (4) +/* Sentinel: Don't use! */ +#define MDP_FB_PAGE_PROTECTION_INVALID (5) +/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */ +#define MDP_NUM_FB_PAGE_PROTECTION_VALUES (5) struct mdp_rect { - u32 x, y, w, h; + uint32_t x; + uint32_t y; + uint32_t w; + uint32_t h; }; struct mdp_img { - u32 width, height, format, offset; + uint32_t width; + uint32_t height; + uint32_t format; + uint32_t offset; int memory_id; /* the file descriptor */ + uint32_t priv; +}; + +/* + * {3x3} + {3} ccs matrix + */ + +#define MDP_CCS_RGB2YUV 0 +#define MDP_CCS_YUV2RGB 1 + +#define MDP_CCS_SIZE 9 +#define MDP_BV_SIZE 3 + +struct mdp_ccs { + int direction; /* MDP_CCS_RGB2YUV or YUV2RGB */ + uint16_t ccs[MDP_CCS_SIZE]; /* 3x3 color coefficients */ + uint16_t bv[MDP_BV_SIZE]; /* 1x3 bias vector */ +}; + +struct mdp_csc { + int id; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; }; +/* The version of the mdp_blit_req structure so that + * user applications can selectively decide which functionality + * to include + */ + +#define MDP_BLIT_REQ_VERSION 2 + struct mdp_blit_req { struct mdp_img src; struct mdp_img dst; struct mdp_rect src_rect; struct mdp_rect dst_rect; - u32 alpha, transp_mask, flags; + uint32_t alpha; + uint32_t transp_mask; + uint32_t flags; + int sharpening_strength; /* -127 <--> 127, default 64 */ }; struct mdp_blit_req_list { - u32 count; + uint32_t count; struct mdp_blit_req req[]; }; -#endif /* _MSM_MDP_H_ */ +#define MSMFB_DATA_VERSION 2 + +struct msmfb_data { + uint32_t offset; + int memory_id; + int id; + uint32_t flags; + uint32_t priv; + uint32_t iova; +}; + +#define MSMFB_NEW_REQUEST -1 + +struct msmfb_overlay_data { + uint32_t id; + struct msmfb_data data; + uint32_t version_key; + struct msmfb_data plane1_data; + struct msmfb_data plane2_data; + struct msmfb_data dst_data; +}; + +struct msmfb_img { + uint32_t width; + uint32_t height; + uint32_t format; +}; + +#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1 +struct msmfb_writeback_data { + struct msmfb_data buf_info; + struct msmfb_img img; +}; + +#define MDP_PP_OPS_ENABLE 0x1 +#define MDP_PP_OPS_READ 0x2 +#define MDP_PP_OPS_WRITE 0x4 +#define MDP_PP_OPS_DISABLE 0x8 + +struct mdp_qseed_cfg { + uint32_t table_num; + uint32_t ops; + uint32_t len; + uint32_t *data; +}; + +struct mdp_qseed_cfg_data { + uint32_t block; + struct mdp_qseed_cfg qseed_data; +}; + +struct mdp_sharp_cfg { + uint32_t flags; + uint32_t strength; + uint32_t edge_thr; + uint32_t smooth_thr; + uint32_t noise_thr; +}; + +#define MDP_OVERLAY_PP_CSC_CFG 0x1 +#define MDP_OVERLAY_PP_QSEED_CFG 0x2 +#define MDP_OVERLAY_PP_PA_CFG 0x4 +#define MDP_OVERLAY_PP_IGC_CFG 0x8 +#define MDP_OVERLAY_PP_SHARP_CFG 0x10 + +#define MDP_CSC_FLAG_ENABLE 0x1 +#define MDP_CSC_FLAG_YUV_IN 0x2 +#define MDP_CSC_FLAG_YUV_OUT 0x4 + +struct mdp_csc_cfg { + /* flags for enable CSC, toggling RGB,YUV input/output */ + uint32_t flags; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; +}; + +struct mdp_csc_cfg_data { + uint32_t block; + struct mdp_csc_cfg csc_data; +}; + +struct mdp_pa_cfg { + uint32_t flags; + uint32_t hue_adj; + uint32_t sat_adj; + uint32_t val_adj; + uint32_t cont_adj; +}; + +struct mdp_igc_lut_data { + uint32_t block; + uint32_t len, ops; + uint32_t *c0_c1_data; + uint32_t *c2_data; +}; + +struct mdp_overlay_pp_params { + uint32_t config_ops; + struct mdp_csc_cfg csc_cfg; + struct mdp_qseed_cfg qseed_cfg[2]; + struct mdp_pa_cfg pa_cfg; + struct mdp_igc_lut_data igc_cfg; + struct mdp_sharp_cfg sharp_cfg; +}; + +struct mdp_overlay { + struct msmfb_img src; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + uint32_t z_order; /* stage number */ + uint32_t is_fg; /* control alpha & transp */ + uint32_t alpha; + uint32_t transp_mask; + uint32_t flags; + uint32_t id; + uint32_t user_data[8]; + struct mdp_overlay_pp_params overlay_pp_cfg; +}; + +struct msmfb_overlay_3d { + uint32_t is_3d; + uint32_t width; + uint32_t height; +}; + + +struct msmfb_overlay_blt { + uint32_t enable; + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t bpp; +}; + +struct mdp_histogram { + uint32_t frame_cnt; + uint32_t bin_cnt; + uint32_t *r; + uint32_t *g; + uint32_t *b; +}; + + +/* + + mdp_block_type defines the identifiers for pipes in MDP 4.3 and up + + MDP_BLOCK_RESERVED is provided for backward compatibility and is + deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used + instead. + + MDP_LOGICAL_BLOCK_DISP_0 identifies the display pipe which fb0 uses, + same for others. + +*/ + +enum { + MDP_BLOCK_RESERVED = 0, + MDP_BLOCK_OVERLAY_0, + MDP_BLOCK_OVERLAY_1, + MDP_BLOCK_VG_1, + MDP_BLOCK_VG_2, + MDP_BLOCK_RGB_1, + MDP_BLOCK_RGB_2, + MDP_BLOCK_DMA_P, + MDP_BLOCK_DMA_S, + MDP_BLOCK_DMA_E, + MDP_BLOCK_OVERLAY_2, + MDP_LOGICAL_BLOCK_DISP_0 = 0x1000, + MDP_LOGICAL_BLOCK_DISP_1, + MDP_LOGICAL_BLOCK_DISP_2, + MDP_BLOCK_MAX, +}; + +/* + * mdp_histogram_start_req is used to provide the parameters for + *histogram start request + */ + +struct mdp_histogram_start_req { + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint8_t num_bins; +}; + + +/* + * mdp_histogram_data is used to return the histogram data, once + * the histogram is done/stopped/cance + */ + + +struct mdp_histogram_data { + uint32_t block; + uint8_t bin_cnt; + uint32_t *c0; + uint32_t *c1; + uint32_t *c2; + uint32_t *extra_info; +}; + +struct mdp_pcc_coeff { + uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1; +}; + +struct mdp_pcc_cfg_data { + uint32_t block; + uint32_t ops; + struct mdp_pcc_coeff r, g, b; +}; + +enum { + mdp_lut_igc, + mdp_lut_pgc, + mdp_lut_hist, + mdp_lut_max, +}; + +struct mdp_ar_gc_lut_data { + uint32_t x_start; + uint32_t slope; + uint32_t offset; +}; + +struct mdp_pgc_lut_data { + uint32_t block; + uint32_t flags; + uint8_t num_r_stages; + uint8_t num_g_stages; + uint8_t num_b_stages; + struct mdp_ar_gc_lut_data *r_data; + struct mdp_ar_gc_lut_data *g_data; + struct mdp_ar_gc_lut_data *b_data; +}; + + +struct mdp_hist_lut_data { + uint32_t block; + uint32_t ops; + uint32_t len; + uint32_t *data; +}; + +struct mdp_lut_cfg_data { + uint32_t lut_type; + union { + struct mdp_igc_lut_data igc_lut_data; + struct mdp_pgc_lut_data pgc_lut_data; + struct mdp_hist_lut_data hist_lut_data; + } data; +}; + +struct mdp_bl_scale_data { + uint32_t min_lvl; + uint32_t scale; +}; + +struct mdp_pa_cfg_data { + uint32_t block; + struct mdp_pa_cfg pa_data; +}; + +enum { + mdp_op_pcc_cfg, + mdp_op_csc_cfg, + mdp_op_lut_cfg, + mdp_bl_scale_cfg, + mdp_op_qseed_cfg, + mdp_op_pa_cfg, + mdp_op_max, +}; + +struct msmfb_mdp_pp { + uint32_t op; + union { + struct mdp_pcc_cfg_data pcc_cfg_data; + struct mdp_csc_cfg_data csc_cfg_data; + struct mdp_lut_cfg_data lut_cfg_data; + struct mdp_bl_scale_data bl_scale_data; + struct mdp_qseed_cfg_data qseed_cfg_data; + struct mdp_pa_cfg_data pa_cfg_data; + } data; +}; + +enum { + metadata_op_none, + metadata_op_base_blend, + metadata_op_max +}; + +#define MDP_MAX_FENCE_FD 10 +#define MDP_BUF_SYNC_FLAG_WAIT 1 + +struct mdp_buf_sync { + uint32_t flags; + uint32_t acq_fen_fd_cnt; + int *acq_fen_fd; + int *rel_fen_fd; +}; + +struct mdp_blend_cfg { + uint32_t is_premultiplied; +}; + +struct msmfb_metadata { + uint32_t op; + uint32_t flags; + union { + struct mdp_blend_cfg blend_cfg; + } data; +}; +struct mdp_buf_fence { + uint32_t flags; + uint32_t acq_fen_fd_cnt; + int acq_fen_fd[MDP_MAX_FENCE_FD]; + int rel_fen_fd[MDP_MAX_FENCE_FD]; +}; + +#define MDP_DISPLAY_COMMIT_OVERLAY 0x00000001 + +struct mdp_display_commit { + uint32_t flags; + uint32_t wait_for_finish; + struct fb_var_screeninfo var; +}; + +struct mdp_page_protection { + uint32_t page_protection; +}; + + +struct mdp_mixer_info { + int pndx; + int pnum; + int ptype; + int mixer_num; + int z_order; +}; + +#define MAX_PIPE_PER_MIXER 4 + +struct msmfb_mixer_info_req { + int mixer_num; + int cnt; + struct mdp_mixer_info info[MAX_PIPE_PER_MIXER]; +}; + +enum { + DISPLAY_SUBSYSTEM_ID, + ROTATOR_SUBSYSTEM_ID, +}; + +#ifdef __KERNEL__ + +/* get the framebuffer physical address information */ +int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num, + int subsys_id); +struct fb_info *msm_fb_get_writeback_fb(void); +int msm_fb_writeback_init(struct fb_info *info); +int msm_fb_writeback_start(struct fb_info *info); +int msm_fb_writeback_queue_buffer(struct fb_info *info, + struct msmfb_data *data); +int msm_fb_writeback_dequeue_buffer(struct fb_info *info, + struct msmfb_data *data); +int msm_fb_writeback_stop(struct fb_info *info); +int msm_fb_writeback_terminate(struct fb_info *info); +#endif + +#endif /*_MSM_MDP_H_*/ diff --git a/include/linux/msm_q6vdec.h b/include/linux/msm_q6vdec.h new file mode 100644 index 0000000000000..ed7cf78241927 --- /dev/null +++ b/include/linux/msm_q6vdec.h @@ -0,0 +1,241 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MSM_VDEC_H_ +#define _MSM_VDEC_H_ + +#include + +#define VDEC_IOCTL_MAGIC 'v' + +#define VDEC_IOCTL_INITIALIZE _IOWR(VDEC_IOCTL_MAGIC, 1, struct vdec_init) +#define VDEC_IOCTL_SETBUFFERS _IOW(VDEC_IOCTL_MAGIC, 2, struct vdec_buffer) +#define VDEC_IOCTL_QUEUE _IOWR(VDEC_IOCTL_MAGIC, 3, \ + struct vdec_input_buf) +#define VDEC_IOCTL_REUSEFRAMEBUFFER _IOW(VDEC_IOCTL_MAGIC, 4, unsigned int) +#define VDEC_IOCTL_FLUSH _IOW(VDEC_IOCTL_MAGIC, 5, unsigned int) +#define VDEC_IOCTL_EOS _IO(VDEC_IOCTL_MAGIC, 6) +#define VDEC_IOCTL_GETMSG _IOR(VDEC_IOCTL_MAGIC, 7, struct vdec_msg) +#define VDEC_IOCTL_CLOSE _IO(VDEC_IOCTL_MAGIC, 8) +#define VDEC_IOCTL_FREEBUFFERS _IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_buf_info) +#define VDEC_IOCTL_GETDECATTRIBUTES _IOR(VDEC_IOCTL_MAGIC, 10, \ + struct vdec_dec_attributes) +#define VDEC_IOCTL_GETVERSION _IOR(VDEC_IOCTL_MAGIC, 11, struct vdec_version) + +enum { + VDEC_FRAME_DECODE_OK, + VDEC_FRAME_DECODE_ERR, + VDEC_FATAL_ERR, + VDEC_FLUSH_FINISH, + VDEC_EOS, + VDEC_FRAME_FLUSH, + VDEC_STREAM_SWITCH, + VDEC_SUSPEND_FINISH, + VDEC_BUFFER_CONSUMED +}; + +enum { + VDEC_FLUSH_INPUT, + VDEC_FLUSH_OUTPUT, + VDEC_FLUSH_ALL +}; + +enum { + VDEC_BUFFER_TYPE_INPUT, + VDEC_BUFFER_TYPE_OUTPUT, + VDEC_BUFFER_TYPE_INTERNAL1, + VDEC_BUFFER_TYPE_INTERNAL2, +}; + +enum { + VDEC_QUEUE_SUCCESS, + VDEC_QUEUE_FAILED, + VDEC_QUEUE_BADSTATE, +}; + +enum { + VDEC_COLOR_FORMAT_NV21 = 0x01, + VDEC_COLOR_FORMAT_NV21_YAMOTO = 0x02 + }; + +struct vdec_input_buf_info { + u32 offset; + u32 data; + u32 size; + int timestamp_lo; + int timestamp_hi; + int avsync_state; + u32 flags; +}; + +struct vdec_buf_desc { + u32 bufsize; + u32 num_min_buffers; + u32 num_max_buffers; +}; + +struct vdec_buf_req { + u32 max_input_queue_size; + struct vdec_buf_desc input; + struct vdec_buf_desc output; + struct vdec_buf_desc dec_req1; + struct vdec_buf_desc dec_req2; +}; + +struct vdec_region_info { + u32 src_id; + u32 offset; + u32 size; +}; + +struct vdec_config { + u32 fourcc; /* video format */ + u32 width; /* source width */ + u32 height; /* source height */ + u32 order; /* render decoder order */ + u32 notify_enable; /* enable notify input buffer done event */ + u32 vc1_rowbase; + u32 h264_startcode_detect; + u32 h264_nal_len_size; + u32 postproc_flag; + u32 fruc_enable; + u32 color_format; /* used to set YUV color format */ +}; + +struct vdec_vc1_panscan_regions { + int num; + int width[4]; + int height[4]; + int xoffset[4]; + int yoffset[4]; +}; + +struct vdec_cropping_window { + u32 x1; + u32 y1; + u32 x2; + u32 y2; +}; + +struct vdec_frame_info { + u32 status; /* video decode status */ + u32 offset; /* buffer offset */ + u32 data1; /* user data field 1 */ + u32 data2; /* user data field 2 */ + int timestamp_lo; /* lower 32 bits timestamp, in msec */ + int timestamp_hi; /* higher 32 bits timestamp, in msec */ + int cal_timestamp_lo; /* lower 32 bits cal timestamp, in msec */ + int cal_timestamp_hi; /* higher 32 bits cal timestamp, in msec */ + u32 dec_width; /* frame roi width */ + u32 dec_height; /* frame roi height */ + struct vdec_cropping_window cwin; /* The frame cropping window */ + u32 picture_type[2]; /* picture coding type */ + u32 picture_format; /* picture coding format */ + u32 vc1_rangeY; /* luma range mapping */ + u32 vc1_rangeUV; /* chroma range mapping */ + u32 picture_resolution; /* scaling factor */ + u32 frame_disp_repeat; /* how often repeated by disp */ + u32 repeat_first_field; /* repeat 1st field after 2nd */ + u32 top_field_first; /* top field displayed first */ + u32 interframe_interp; /* not for inter-frame interp */ + struct vdec_vc1_panscan_regions panscan; /* pan region */ + u32 concealed_macblk_num; /* number of concealed macro blk */ + u32 flags; /* input flags */ + u32 performance_stats; /* performance statistics returned by decoder */ + u32 data3; /* user data field 3 */ +}; + +struct vdec_buf_info { + u32 buf_type; + struct vdec_region_info region; + u32 num_buf; + u32 islast; +}; + +struct vdec_buffer { + u32 pmem_id; + struct vdec_buf_info buf; +}; + +struct vdec_sequence { + u8 *header; + u32 len; +}; + +struct vdec_config_sps { + struct vdec_config cfg; + struct vdec_sequence seq; +}; + +#define VDEC_MSG_REUSEINPUTBUFFER 1 +#define VDEC_MSG_FRAMEDONE 2 + +struct vdec_msg { + u32 id; + + union { + /* id = VDEC_MSG_REUSEINPUTBUFFER */ + u32 buf_id; + /* id = VDEC_MSG_FRAMEDONE */ + struct vdec_frame_info vfr_info; + }; +}; + +struct vdec_init { + struct vdec_config_sps sps_cfg; + struct vdec_buf_req *buf_req; +}; + +struct vdec_input_buf { + u32 pmem_id; + struct vdec_input_buf_info buffer; + struct vdec_queue_status *queue_status; +}; + +struct vdec_queue_status { + u32 status; +}; + +struct vdec_dec_attributes { + u32 fourcc; + u32 profile; + u32 level; + u32 dec_pic_width; + u32 dec_pic_height; + struct vdec_buf_desc input; + struct vdec_buf_desc output; + struct vdec_buf_desc dec_req1; + struct vdec_buf_desc dec_req2; +}; + +struct vdec_version { + u32 major; + u32 minor; +}; + +#endif /* _MSM_VDEC_H_ */ diff --git a/include/linux/msm_q6venc.h b/include/linux/msm_q6venc.h new file mode 100755 index 0000000000000..097e2db869c36 --- /dev/null +++ b/include/linux/msm_q6venc.h @@ -0,0 +1,331 @@ +/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MSM_VENC_H_ +#define _MSM_VENC_H_ + +#include + +#define VENC_MAX_RECON_BUFFERS 2 + +#define VENC_FLAG_EOS 0x00000001 +#define VENC_FLAG_END_OF_FRAME 0x00000010 +#define VENC_FLAG_SYNC_FRAME 0x00000020 +#define VENC_FLAG_EXTRA_DATA 0x00000040 +#define VENC_FLAG_CODEC_CONFIG 0x00000080 + +enum venc_flush_type { + VENC_FLUSH_INPUT, + VENC_FLUSH_OUTPUT, + VENC_FLUSH_ALL +}; + +enum venc_state_type { + VENC_STATE_PAUSE = 0x1, + VENC_STATE_START = 0x2, + VENC_STATE_STOP = 0x4 +}; + +enum venc_event_type_enum { + VENC_EVENT_START_STATUS, + VENC_EVENT_STOP_STATUS, + VENC_EVENT_SUSPEND_STATUS, + VENC_EVENT_RESUME_STATUS, + VENC_EVENT_FLUSH_STATUS, + VENC_EVENT_RELEASE_INPUT, + VENC_EVENT_DELIVER_OUTPUT, + VENC_EVENT_UNKNOWN_STATUS +}; + +enum venc_status_code { + VENC_STATUS_SUCCESS, + VENC_STATUS_ERROR, + VENC_STATUS_INVALID_STATE, + VENC_STATUS_FLUSHING, + VENC_STATUS_INVALID_PARAM, + VENC_STATUS_CMD_QUEUE_FULL, + VENC_STATUS_CRITICAL, + VENC_STATUS_INSUFFICIENT_RESOURCES, + VENC_STATUS_TIMEOUT +}; + +enum venc_msg_code { + VENC_MSG_INDICATION, + VENC_MSG_INPUT_BUFFER_DONE, + VENC_MSG_OUTPUT_BUFFER_DONE, + VENC_MSG_NEED_OUTPUT_BUFFER, + VENC_MSG_FLUSH, + VENC_MSG_START, + VENC_MSG_STOP, + VENC_MSG_PAUSE, + VENC_MSG_RESUME, + VENC_MSG_STOP_READING_MSG +}; + +enum venc_error_code { + VENC_S_SUCCESS, + VENC_S_EFAIL, + VENC_S_EFATAL, + VENC_S_EBADPARAM, + VENC_S_EINVALSTATE, + VENC_S_ENOSWRES, + VENC_S_ENOHWRES, + VENC_S_EBUFFREQ, + VENC_S_EINVALCMD, + VENC_S_ETIMEOUT, + VENC_S_ENOREATMPT, + VENC_S_ENOPREREQ, + VENC_S_ECMDQFULL, + VENC_S_ENOTSUPP, + VENC_S_ENOTIMPL, + VENC_S_ENOTPMEM, + VENC_S_EFLUSHED, + VENC_S_EINSUFBUF, + VENC_S_ESAMESTATE, + VENC_S_EINVALTRANS +}; + +enum venc_mem_region_enum { + VENC_PMEM_EBI1, + VENC_PMEM_SMI +}; + +struct venc_buf_type { + unsigned int region; + unsigned int phys; + unsigned int size; + int offset; +}; + +struct venc_qp_range { + unsigned int min_qp; + unsigned int max_qp; +}; + +struct venc_frame_rate { + unsigned int frame_rate_num; + unsigned int frame_rate_den; +}; + +struct venc_slice_info { + unsigned int slice_mode; + unsigned int units_per_slice; +}; + +struct venc_extra_data { + unsigned int slice_extra_data_flag; + unsigned int slice_client_data1; + unsigned int slice_client_data2; + unsigned int slice_client_data3; + unsigned int none_extra_data_flag; + unsigned int none_client_data1; + unsigned int none_client_data2; + unsigned int none_client_data3; +}; + +struct venc_common_config { + unsigned int standard; + unsigned int input_frame_height; + unsigned int input_frame_width; + unsigned int output_frame_height; + unsigned int output_frame_width; + unsigned int rotation_angle; + unsigned int intra_period; + unsigned int rate_control; + struct venc_frame_rate frame_rate; + unsigned int bitrate; + struct venc_qp_range qp_range; + unsigned int iframe_qp; + unsigned int pframe_qp; + struct venc_slice_info slice_config; + struct venc_extra_data extra_data; +}; + +struct venc_nonio_buf_config { + struct venc_buf_type recon_buf1; + struct venc_buf_type recon_buf2; + struct venc_buf_type wb_buf; + struct venc_buf_type cmd_buf; + struct venc_buf_type vlc_buf; +}; + +struct venc_mpeg4_config { + unsigned int profile; + unsigned int level; + unsigned int time_resolution; + unsigned int ac_prediction; + unsigned int hec_interval; + unsigned int data_partition; + unsigned int short_header; + unsigned int rvlc_enable; +}; + +struct venc_h263_config { + unsigned int profile; + unsigned int level; +}; + +struct venc_h264_config { + unsigned int profile; + unsigned int level; + unsigned int max_nal; + unsigned int idr_period; +}; + +struct venc_pmem { + int src; + int fd; + unsigned int offset; + void *virt; + void *phys; + unsigned int size; +}; + +struct venc_buffer { + unsigned char *ptr_buffer; + unsigned int size; + unsigned int len; + unsigned int offset; + long long time_stamp; + unsigned int flags; + unsigned int client_data; + +}; + +struct venc_buffers { + struct venc_pmem recon_buf[VENC_MAX_RECON_BUFFERS]; + struct venc_pmem wb_buf; + struct venc_pmem cmd_buf; + struct venc_pmem vlc_buf; +}; + +struct venc_buffer_flush { + unsigned int flush_mode; +}; + +union venc_msg_data { + struct venc_buffer buf; + struct venc_buffer_flush flush_ret; + +}; + +struct venc_msg { + unsigned int status_code; + unsigned int msg_code; + union venc_msg_data msg_data; + unsigned int msg_data_size; +}; + +union venc_codec_config { + struct venc_mpeg4_config mpeg4_params; + struct venc_h263_config h263_params; + struct venc_h264_config h264_params; +}; + +struct venc_q6_config { + struct venc_common_config config_params; + union venc_codec_config codec_params; + struct venc_nonio_buf_config buf_params; + void *callback_event; +}; + +struct venc_hdr_config { + struct venc_common_config config_params; + union venc_codec_config codec_params; +}; + +struct venc_init_config { + struct venc_q6_config q6_config; + struct venc_buffers q6_bufs; +}; + +struct venc_seq_config { + int size; + struct venc_pmem buf; + struct venc_q6_config q6_config; +}; + +struct venc_version { + u32 major; + u32 minor; +}; + +#define VENC_IOCTL_MAGIC 'V' + +#define VENC_IOCTL_CMD_READ_NEXT_MSG \ + _IOWR(VENC_IOCTL_MAGIC, 1, struct venc_msg) + +#define VENC_IOCTL_CMD_STOP_READ_MSG _IO(VENC_IOCTL_MAGIC, 2) + +#define VENC_IOCTL_SET_INPUT_BUFFER \ + _IOW(VENC_IOCTL_MAGIC, 3, struct venc_pmem) + +#define VENC_IOCTL_SET_OUTPUT_BUFFER \ + _IOW(VENC_IOCTL_MAGIC, 4, struct venc_pmem) + +#define VENC_IOCTL_CMD_START _IOW(VENC_IOCTL_MAGIC, 5, struct venc_init_config) + +#define VENC_IOCTL_CMD_ENCODE_FRAME \ + _IOW(VENC_IOCTL_MAGIC, 6, struct venc_buffer) + +#define VENC_IOCTL_CMD_FILL_OUTPUT_BUFFER \ + _IOW(VENC_IOCTL_MAGIC, 7, struct venc_buffer) + +#define VENC_IOCTL_CMD_FLUSH \ + _IOW(VENC_IOCTL_MAGIC, 8, struct venc_buffer_flush) + +#define VENC_IOCTL_CMD_PAUSE _IO(VENC_IOCTL_MAGIC, 9) + +#define VENC_IOCTL_CMD_RESUME _IO(VENC_IOCTL_MAGIC, 10) + +#define VENC_IOCTL_CMD_STOP _IO(VENC_IOCTL_MAGIC, 11) + +#define VENC_IOCTL_SET_INTRA_PERIOD \ + _IOW(VENC_IOCTL_MAGIC, 12, int) + +#define VENC_IOCTL_CMD_REQUEST_IFRAME _IO(VENC_IOCTL_MAGIC, 13) + +#define VENC_IOCTL_GET_SEQUENCE_HDR \ + _IOWR(VENC_IOCTL_MAGIC, 14, struct venc_seq_config) + +#define VENC_IOCTL_SET_INTRA_REFRESH \ + _IOW(VENC_IOCTL_MAGIC, 15, int) + +#define VENC_IOCTL_SET_FRAME_RATE \ + _IOW(VENC_IOCTL_MAGIC, 16, struct venc_frame_rate) + +#define VENC_IOCTL_SET_TARGET_BITRATE \ + _IOW(VENC_IOCTL_MAGIC, 17, int) + +#define VENC_IOCTL_SET_QP_RANGE \ + _IOW(VENC_IOCTL_MAGIC, 18, struct venc_qp_range) + +#define VENC_IOCTL_GET_VERSION \ + _IOR(VENC_IOCTL_MAGIC, 19, struct venc_version) + +#endif diff --git a/include/linux/msm_rotator.h b/include/linux/msm_rotator.h new file mode 100644 index 0000000000000..2e57191b00cd2 --- /dev/null +++ b/include/linux/msm_rotator.h @@ -0,0 +1,76 @@ +/**************************************************************************** + **************************************************************************** + *** + *** This header was automatically generated from a Linux kernel header + *** of the same name, to make information necessary for userspace to + *** call into the kernel available to libc. It contains only constants, + *** structures, and macros generated from the original header, and thus, + *** contains no copyrightable information. + *** + *** To edit the content of this header, modify the corresponding + *** source file (e.g. under external/kernel-headers/original/) then + *** run bionic/libc/kernel/tools/update_all.py + *** + *** Any manual change here will be lost the next time this script will + *** be run. You've been warned! + *** + **************************************************************************** + ****************************************************************************/ +#ifndef __MSM_ROTATOR_H__ +#define __MSM_ROTATOR_H__ +#include +#include +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define MSM_ROTATOR_IOCTL_MAGIC 'R' +#define MSM_ROTATOR_IOCTL_START _IOWR(MSM_ROTATOR_IOCTL_MAGIC, 1, struct msm_rotator_img_info) +#define MSM_ROTATOR_IOCTL_ROTATE _IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info) +#define MSM_ROTATOR_IOCTL_FINISH _IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int) +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +#define ROTATOR_VERSION_01 0xA5B4C301 +enum rotator_clk_type { + ROTATOR_CORE_CLK, + ROTATOR_PCLK, +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + ROTATOR_IMEM_CLK +}; +struct msm_rotator_img_info { + unsigned int session_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct msmfb_img src; + struct msmfb_img dst; + struct mdp_rect src_rect; + unsigned int dst_x; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned int dst_y; + unsigned char rotations; + int enable; + unsigned int downscale_ratio; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned int secure; +}; +struct msm_rotator_data_info { + int session_id; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct msmfb_data src; + struct msmfb_data dst; + unsigned int version_key; + struct msmfb_data src_chroma; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + struct msmfb_data dst_chroma; +}; +struct msm_rot_clocks { + const char *clk_name; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + enum rotator_clk_type clk_type; + unsigned int clk_rate; +}; +struct msm_rotator_platform_data { +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ + unsigned int number_of_clocks; + unsigned int hardware_version_number; + struct msm_rot_clocks *rotator_clks; + char rot_iommu_split_domain; +/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ +}; +#endif + diff --git a/include/linux/msm_rpcrouter.h b/include/linux/msm_rpcrouter.h new file mode 100644 index 0000000000000..62141a04e002f --- /dev/null +++ b/include/linux/msm_rpcrouter.h @@ -0,0 +1,47 @@ +/* include/linux/msm_rpcrouter.h + * + * Copyright (c) QUALCOMM Incorporated + * Copyright (C) 2007 Google, Inc. + * Author: San Mehat + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_MSM_RPCROUTER_H +#define __LINUX_MSM_RPCROUTER_H + +#include +#include + +#define RPC_ROUTER_VERSION_V1 0x00010000 + +struct rpcrouter_ioctl_server_args { + uint32_t prog; + uint32_t vers; +}; + +#define RPC_ROUTER_IOCTL_MAGIC (0xC1) + +#define RPC_ROUTER_IOCTL_GET_VERSION \ + _IOR(RPC_ROUTER_IOCTL_MAGIC, 0, unsigned int) + +#define RPC_ROUTER_IOCTL_GET_MTU \ + _IOR(RPC_ROUTER_IOCTL_MAGIC, 1, unsigned int) + +#define RPC_ROUTER_IOCTL_REGISTER_SERVER \ + _IOWR(RPC_ROUTER_IOCTL_MAGIC, 2, unsigned int) + +#define RPC_ROUTER_IOCTL_UNREGISTER_SERVER \ + _IOWR(RPC_ROUTER_IOCTL_MAGIC, 3, unsigned int) + +#define RPC_ROUTER_IOCTL_GET_MINOR_VERSION \ + _IOW(RPC_ROUTER_IOCTL_MAGIC, 4, unsigned int) + +#endif diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h new file mode 100644 index 0000000000000..5d6233a12290b --- /dev/null +++ b/include/linux/msm_vidc_dec.h @@ -0,0 +1,526 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MSM_VIDC_DEC_H_ +#define _MSM_VIDC_DEC_H_ + +#include +#include + +/* STATUS CODES */ +/* Base value for status codes */ +#define VDEC_S_BASE 0x40000000 +/* Success */ +#define VDEC_S_SUCCESS (VDEC_S_BASE) +/* General failure */ +#define VDEC_S_EFAIL (VDEC_S_BASE + 1) +/* Fatal irrecoverable failure. Need to tear down session. */ +#define VDEC_S_EFATAL (VDEC_S_BASE + 2) +/* Error detected in the passed parameters */ +#define VDEC_S_EBADPARAM (VDEC_S_BASE + 3) +/* Command called in invalid state. */ +#define VDEC_S_EINVALSTATE (VDEC_S_BASE + 4) + /* Insufficient OS resources - thread, memory etc. */ +#define VDEC_S_ENOSWRES (VDEC_S_BASE + 5) + /* Insufficient HW resources - core capacity maxed out. */ +#define VDEC_S_ENOHWRES (VDEC_S_BASE + 6) +/* Invalid command called */ +#define VDEC_S_EINVALCMD (VDEC_S_BASE + 7) +/* Command timeout. */ +#define VDEC_S_ETIMEOUT (VDEC_S_BASE + 8) +/* Pre-requirement is not met for API. */ +#define VDEC_S_ENOPREREQ (VDEC_S_BASE + 9) +/* Command queue is full. */ +#define VDEC_S_ECMDQFULL (VDEC_S_BASE + 10) +/* Command is not supported by this driver */ +#define VDEC_S_ENOTSUPP (VDEC_S_BASE + 11) +/* Command is not implemented by thedriver. */ +#define VDEC_S_ENOTIMPL (VDEC_S_BASE + 12) +/* Command is not implemented by the driver. */ +#define VDEC_S_BUSY (VDEC_S_BASE + 13) + +#define VDEC_INTF_VER 1 +#define VDEC_MSG_BASE 0x0000000 +/* Codes to identify asynchronous message responses and events that driver + wants to communicate to the app.*/ +#define VDEC_MSG_INVALID (VDEC_MSG_BASE + 0) +#define VDEC_MSG_RESP_INPUT_BUFFER_DONE (VDEC_MSG_BASE + 1) +#define VDEC_MSG_RESP_OUTPUT_BUFFER_DONE (VDEC_MSG_BASE + 2) +#define VDEC_MSG_RESP_INPUT_FLUSHED (VDEC_MSG_BASE + 3) +#define VDEC_MSG_RESP_OUTPUT_FLUSHED (VDEC_MSG_BASE + 4) +#define VDEC_MSG_RESP_FLUSH_INPUT_DONE (VDEC_MSG_BASE + 5) +#define VDEC_MSG_RESP_FLUSH_OUTPUT_DONE (VDEC_MSG_BASE + 6) +#define VDEC_MSG_RESP_START_DONE (VDEC_MSG_BASE + 7) +#define VDEC_MSG_RESP_STOP_DONE (VDEC_MSG_BASE + 8) +#define VDEC_MSG_RESP_PAUSE_DONE (VDEC_MSG_BASE + 9) +#define VDEC_MSG_RESP_RESUME_DONE (VDEC_MSG_BASE + 10) +#define VDEC_MSG_RESP_RESOURCE_LOADED (VDEC_MSG_BASE + 11) +#define VDEC_EVT_RESOURCES_LOST (VDEC_MSG_BASE + 12) +#define VDEC_MSG_EVT_CONFIG_CHANGED (VDEC_MSG_BASE + 13) +#define VDEC_MSG_EVT_HW_ERROR (VDEC_MSG_BASE + 14) + +/*Buffer flags bits masks.*/ +#define VDEC_BUFFERFLAG_EOS 0x00000001 +#define VDEC_BUFFERFLAG_DECODEONLY 0x00000004 +#define VDEC_BUFFERFLAG_DATACORRUPT 0x00000008 +#define VDEC_BUFFERFLAG_ENDOFFRAME 0x00000010 +#define VDEC_BUFFERFLAG_SYNCFRAME 0x00000020 +#define VDEC_BUFFERFLAG_EXTRADATA 0x00000040 +#define VDEC_BUFFERFLAG_CODECCONFIG 0x00000080 + +/*Post processing flags bit masks*/ +#define VDEC_EXTRADATA_QP 0x00000001 +#define VDEC_EXTRADATA_SEI 0x00000002 +#define VDEC_EXTRADATA_VUI 0x00000004 +#define VDEC_EXTRADATA_MB_ERROR_MAP 0x00000008 + +#define VDEC_CMDBASE 0x800 +#define VDEC_CMD_SET_INTF_VERSION (VDEC_CMDBASE) + +#define VDEC_IOCTL_MAGIC 'v' + +struct vdec_ioctl_msg { + void __user *in; + void __user *out; +}; + +/* CMD params: InputParam:enum vdec_codec + OutputParam: struct vdec_profile_level*/ +#define VDEC_IOCTL_GET_PROFILE_LEVEL_SUPPORTED \ + _IOWR(VDEC_IOCTL_MAGIC, 0, struct vdec_ioctl_msg) + +/*CMD params:InputParam: NULL + OutputParam: uint32_t(bitmask)*/ +#define VDEC_IOCTL_GET_INTERLACE_FORMAT \ + _IOR(VDEC_IOCTL_MAGIC, 1, struct vdec_ioctl_msg) + +/* CMD params: InputParam: enum vdec_codec + OutputParam: struct vdec_profile_level*/ +#define VDEC_IOCTL_GET_CURRENT_PROFILE_LEVEL \ + _IOWR(VDEC_IOCTL_MAGIC, 2, struct vdec_ioctl_msg) + +/*CMD params: SET: InputParam: enum vdec_output_fromat OutputParam: NULL + GET: InputParam: NULL OutputParam: enum vdec_output_fromat*/ +#define VDEC_IOCTL_SET_OUTPUT_FORMAT \ + _IOWR(VDEC_IOCTL_MAGIC, 3, struct vdec_ioctl_msg) +#define VDEC_IOCTL_GET_OUTPUT_FORMAT \ + _IOWR(VDEC_IOCTL_MAGIC, 4, struct vdec_ioctl_msg) + +/*CMD params: SET: InputParam: enum vdec_codec OutputParam: NULL + GET: InputParam: NULL OutputParam: enum vdec_codec*/ +#define VDEC_IOCTL_SET_CODEC \ + _IOW(VDEC_IOCTL_MAGIC, 5, struct vdec_ioctl_msg) +#define VDEC_IOCTL_GET_CODEC \ + _IOR(VDEC_IOCTL_MAGIC, 6, struct vdec_ioctl_msg) + +/*CMD params: SET: InputParam: struct vdec_picsize outputparam: NULL + GET: InputParam: NULL outputparam: struct vdec_picsize*/ +#define VDEC_IOCTL_SET_PICRES \ + _IOW(VDEC_IOCTL_MAGIC, 7, struct vdec_ioctl_msg) +#define VDEC_IOCTL_GET_PICRES \ + _IOR(VDEC_IOCTL_MAGIC, 8, struct vdec_ioctl_msg) + +#define VDEC_IOCTL_SET_EXTRADATA \ + _IOW(VDEC_IOCTL_MAGIC, 9, struct vdec_ioctl_msg) +#define VDEC_IOCTL_GET_EXTRADATA \ + _IOR(VDEC_IOCTL_MAGIC, 10, struct vdec_ioctl_msg) + +#define VDEC_IOCTL_SET_SEQUENCE_HEADER \ + _IOW(VDEC_IOCTL_MAGIC, 11, struct vdec_ioctl_msg) + +/* CMD params: SET: InputParam - vdec_allocatorproperty, OutputParam - NULL + GET: InputParam - NULL, OutputParam - vdec_allocatorproperty*/ +#define VDEC_IOCTL_SET_BUFFER_REQ \ + _IOW(VDEC_IOCTL_MAGIC, 12, struct vdec_ioctl_msg) +#define VDEC_IOCTL_GET_BUFFER_REQ \ + _IOR(VDEC_IOCTL_MAGIC, 13, struct vdec_ioctl_msg) +/* CMD params: InputParam - vdec_buffer, OutputParam - uint8_t** */ +#define VDEC_IOCTL_ALLOCATE_BUFFER \ + _IOWR(VDEC_IOCTL_MAGIC, 14, struct vdec_ioctl_msg) +/* CMD params: InputParam - uint8_t *, OutputParam - NULL.*/ +#define VDEC_IOCTL_FREE_BUFFER \ + _IOW(VDEC_IOCTL_MAGIC, 15, struct vdec_ioctl_msg) + +/*CMD params: CMD: InputParam - struct vdec_setbuffer_cmd, OutputParam - NULL*/ +#define VDEC_IOCTL_SET_BUFFER \ + _IOW(VDEC_IOCTL_MAGIC, 16, struct vdec_ioctl_msg) + +/* CMD params: InputParam - struct vdec_fillbuffer_cmd, OutputParam - NULL*/ +#define VDEC_IOCTL_FILL_OUTPUT_BUFFER \ + _IOW(VDEC_IOCTL_MAGIC, 17, struct vdec_ioctl_msg) + +/*CMD params: InputParam - struct vdec_frameinfo , OutputParam - NULL*/ +#define VDEC_IOCTL_DECODE_FRAME \ + _IOW(VDEC_IOCTL_MAGIC, 18, struct vdec_ioctl_msg) + +#define VDEC_IOCTL_LOAD_RESOURCES _IO(VDEC_IOCTL_MAGIC, 19) +#define VDEC_IOCTL_CMD_START _IO(VDEC_IOCTL_MAGIC, 20) +#define VDEC_IOCTL_CMD_STOP _IO(VDEC_IOCTL_MAGIC, 21) +#define VDEC_IOCTL_CMD_PAUSE _IO(VDEC_IOCTL_MAGIC, 22) +#define VDEC_IOCTL_CMD_RESUME _IO(VDEC_IOCTL_MAGIC, 23) + +/*CMD params: InputParam - enum vdec_bufferflush , OutputParam - NULL */ +#define VDEC_IOCTL_CMD_FLUSH _IOW(VDEC_IOCTL_MAGIC, 24, struct vdec_ioctl_msg) + +/* ======================================================== + * IOCTL for getting asynchronous notification from driver + * ========================================================*/ + +/*IOCTL params: InputParam - NULL, OutputParam - struct vdec_msginfo*/ +#define VDEC_IOCTL_GET_NEXT_MSG \ + _IOR(VDEC_IOCTL_MAGIC, 25, struct vdec_ioctl_msg) + +#define VDEC_IOCTL_STOP_NEXT_MSG _IO(VDEC_IOCTL_MAGIC, 26) + +#define VDEC_IOCTL_GET_NUMBER_INSTANCES \ + _IOR(VDEC_IOCTL_MAGIC, 27, struct vdec_ioctl_msg) + +enum vdec_picture { + PICTURE_TYPE_I, + PICTURE_TYPE_P, + PICTURE_TYPE_B, + PICTURE_TYPE_BI, + PICTURE_TYPE_SKIP, + PICTURE_TYPE_UNKNOWN +}; + +enum vdec_buffer { + VDEC_BUFFER_TYPE_INPUT, + VDEC_BUFFER_TYPE_OUTPUT +}; + +struct vdec_allocatorproperty { + enum vdec_buffer buffer_type; + uint32_t mincount; + uint32_t maxcount; + uint32_t actualcount; + uint32_t buffer_size; + uint32_t alignment; + uint32_t buf_poolid; +}; + +struct vdec_bufferpayload { + void __user *addr; + size_t sz; + int pmem_fd; + size_t offset; + size_t mmaped_sz; +}; + +struct vdec_setbuffer_cmd { + enum vdec_buffer buffer_type; + struct vdec_bufferpayload buffer; +}; + +struct vdec_fillbuffer_cmd { + struct vdec_bufferpayload buffer; + void *client_data; +}; + +enum vdec_bufferflush { + VDEC_FLUSH_TYPE_INPUT, + VDEC_FLUSH_TYPE_OUTPUT, + VDEC_FLUSH_TYPE_ALL +}; + +enum vdec_codec { + VDEC_CODECTYPE_H264 = 0x1, + VDEC_CODECTYPE_H263 = 0x2, + VDEC_CODECTYPE_MPEG4 = 0x3, + VDEC_CODECTYPE_DIVX_3 = 0x4, + VDEC_CODECTYPE_DIVX_4 = 0x5, + VDEC_CODECTYPE_DIVX_5 = 0x6, + VDEC_CODECTYPE_DIVX_6 = 0x7, + VDEC_CODECTYPE_XVID = 0x8, + VDEC_CODECTYPE_MPEG1 = 0x9, + VDEC_CODECTYPE_MPEG2 = 0xa, + VDEC_CODECTYPE_VC1 = 0xb, + VDEC_CODECTYPE_VC1_RCV = 0xc +}; + +enum vdec_mpeg2_profile { + VDEC_MPEG2ProfileSimple = 0x1, + VDEC_MPEG2ProfileMain = 0x2, + VDEC_MPEG2Profile422 = 0x4, + VDEC_MPEG2ProfileSNR = 0x8, + VDEC_MPEG2ProfileSpatial = 0x10, + VDEC_MPEG2ProfileHigh = 0x20, + VDEC_MPEG2ProfileKhronosExtensions = 0x6F000000, + VDEC_MPEG2ProfileVendorStartUnused = 0x7F000000, + VDEC_MPEG2ProfileMax = 0x7FFFFFFF +}; + +enum vdec_mpeg2_level { + + VDEC_MPEG2LevelLL = 0x1, + VDEC_MPEG2LevelML = 0x2, + VDEC_MPEG2LevelH14 = 0x4, + VDEC_MPEG2LevelHL = 0x8, + VDEC_MPEG2LevelKhronosExtensions = 0x6F000000, + VDEC_MPEG2LevelVendorStartUnused = 0x7F000000, + VDEC_MPEG2LevelMax = 0x7FFFFFFF +}; + +enum vdec_mpeg4_profile { + VDEC_MPEG4ProfileSimple = 0x01, + VDEC_MPEG4ProfileSimpleScalable = 0x02, + VDEC_MPEG4ProfileCore = 0x04, + VDEC_MPEG4ProfileMain = 0x08, + VDEC_MPEG4ProfileNbit = 0x10, + VDEC_MPEG4ProfileScalableTexture = 0x20, + VDEC_MPEG4ProfileSimpleFace = 0x40, + VDEC_MPEG4ProfileSimpleFBA = 0x80, + VDEC_MPEG4ProfileBasicAnimated = 0x100, + VDEC_MPEG4ProfileHybrid = 0x200, + VDEC_MPEG4ProfileAdvancedRealTime = 0x400, + VDEC_MPEG4ProfileCoreScalable = 0x800, + VDEC_MPEG4ProfileAdvancedCoding = 0x1000, + VDEC_MPEG4ProfileAdvancedCore = 0x2000, + VDEC_MPEG4ProfileAdvancedScalable = 0x4000, + VDEC_MPEG4ProfileAdvancedSimple = 0x8000, + VDEC_MPEG4ProfileKhronosExtensions = 0x6F000000, + VDEC_MPEG4ProfileVendorStartUnused = 0x7F000000, + VDEC_MPEG4ProfileMax = 0x7FFFFFFF +}; + +enum vdec_mpeg4_level { + VDEC_MPEG4Level0 = 0x01, + VDEC_MPEG4Level0b = 0x02, + VDEC_MPEG4Level1 = 0x04, + VDEC_MPEG4Level2 = 0x08, + VDEC_MPEG4Level3 = 0x10, + VDEC_MPEG4Level4 = 0x20, + VDEC_MPEG4Level4a = 0x40, + VDEC_MPEG4Level5 = 0x80, + VDEC_MPEG4LevelKhronosExtensions = 0x6F000000, + VDEC_MPEG4LevelVendorStartUnused = 0x7F000000, + VDEC_MPEG4LevelMax = 0x7FFFFFFF +}; + +enum vdec_avc_profile { + VDEC_AVCProfileBaseline = 0x01, + VDEC_AVCProfileMain = 0x02, + VDEC_AVCProfileExtended = 0x04, + VDEC_AVCProfileHigh = 0x08, + VDEC_AVCProfileHigh10 = 0x10, + VDEC_AVCProfileHigh422 = 0x20, + VDEC_AVCProfileHigh444 = 0x40, + VDEC_AVCProfileKhronosExtensions = 0x6F000000, + VDEC_AVCProfileVendorStartUnused = 0x7F000000, + VDEC_AVCProfileMax = 0x7FFFFFFF +}; + +enum vdec_avc_level { + VDEC_AVCLevel1 = 0x01, + VDEC_AVCLevel1b = 0x02, + VDEC_AVCLevel11 = 0x04, + VDEC_AVCLevel12 = 0x08, + VDEC_AVCLevel13 = 0x10, + VDEC_AVCLevel2 = 0x20, + VDEC_AVCLevel21 = 0x40, + VDEC_AVCLevel22 = 0x80, + VDEC_AVCLevel3 = 0x100, + VDEC_AVCLevel31 = 0x200, + VDEC_AVCLevel32 = 0x400, + VDEC_AVCLevel4 = 0x800, + VDEC_AVCLevel41 = 0x1000, + VDEC_AVCLevel42 = 0x2000, + VDEC_AVCLevel5 = 0x4000, + VDEC_AVCLevel51 = 0x8000, + VDEC_AVCLevelKhronosExtensions = 0x6F000000, + VDEC_AVCLevelVendorStartUnused = 0x7F000000, + VDEC_AVCLevelMax = 0x7FFFFFFF +}; + +enum vdec_divx_profile { + VDEC_DIVXProfile_qMobile = 0x01, + VDEC_DIVXProfile_Mobile = 0x02, + VDEC_DIVXProfile_HD = 0x04, + VDEC_DIVXProfile_Handheld = 0x08, + VDEC_DIVXProfile_Portable = 0x10, + VDEC_DIVXProfile_HomeTheater = 0x20 +}; + +enum vdec_xvid_profile { + VDEC_XVIDProfile_Simple = 0x1, + VDEC_XVIDProfile_Advanced_Realtime_Simple = 0x2, + VDEC_XVIDProfile_Advanced_Simple = 0x4 +}; + +enum vdec_xvid_level { + VDEC_XVID_LEVEL_S_L0 = 0x1, + VDEC_XVID_LEVEL_S_L1 = 0x2, + VDEC_XVID_LEVEL_S_L2 = 0x4, + VDEC_XVID_LEVEL_S_L3 = 0x8, + VDEC_XVID_LEVEL_ARTS_L1 = 0x10, + VDEC_XVID_LEVEL_ARTS_L2 = 0x20, + VDEC_XVID_LEVEL_ARTS_L3 = 0x40, + VDEC_XVID_LEVEL_ARTS_L4 = 0x80, + VDEC_XVID_LEVEL_AS_L0 = 0x100, + VDEC_XVID_LEVEL_AS_L1 = 0x200, + VDEC_XVID_LEVEL_AS_L2 = 0x400, + VDEC_XVID_LEVEL_AS_L3 = 0x800, + VDEC_XVID_LEVEL_AS_L4 = 0x1000 +}; + +enum vdec_h263profile { + VDEC_H263ProfileBaseline = 0x01, + VDEC_H263ProfileH320Coding = 0x02, + VDEC_H263ProfileBackwardCompatible = 0x04, + VDEC_H263ProfileISWV2 = 0x08, + VDEC_H263ProfileISWV3 = 0x10, + VDEC_H263ProfileHighCompression = 0x20, + VDEC_H263ProfileInternet = 0x40, + VDEC_H263ProfileInterlace = 0x80, + VDEC_H263ProfileHighLatency = 0x100, + VDEC_H263ProfileKhronosExtensions = 0x6F000000, + VDEC_H263ProfileVendorStartUnused = 0x7F000000, + VDEC_H263ProfileMax = 0x7FFFFFFF +}; + +enum vdec_h263level { + VDEC_H263Level10 = 0x01, + VDEC_H263Level20 = 0x02, + VDEC_H263Level30 = 0x04, + VDEC_H263Level40 = 0x08, + VDEC_H263Level45 = 0x10, + VDEC_H263Level50 = 0x20, + VDEC_H263Level60 = 0x40, + VDEC_H263Level70 = 0x80, + VDEC_H263LevelKhronosExtensions = 0x6F000000, + VDEC_H263LevelVendorStartUnused = 0x7F000000, + VDEC_H263LevelMax = 0x7FFFFFFF +}; + +enum vdec_wmv_format { + VDEC_WMVFormatUnused = 0x01, + VDEC_WMVFormat7 = 0x02, + VDEC_WMVFormat8 = 0x04, + VDEC_WMVFormat9 = 0x08, + VDEC_WMFFormatKhronosExtensions = 0x6F000000, + VDEC_WMFFormatVendorStartUnused = 0x7F000000, + VDEC_WMVFormatMax = 0x7FFFFFFF +}; + +enum vdec_vc1_profile { + VDEC_VC1ProfileSimple = 0x1, + VDEC_VC1ProfileMain = 0x2, + VDEC_VC1ProfileAdvanced = 0x4 +}; + +enum vdec_vc1_level { + VDEC_VC1_LEVEL_S_Low = 0x1, + VDEC_VC1_LEVEL_S_Medium = 0x2, + VDEC_VC1_LEVEL_M_Low = 0x4, + VDEC_VC1_LEVEL_M_Medium = 0x8, + VDEC_VC1_LEVEL_M_High = 0x10, + VDEC_VC1_LEVEL_A_L0 = 0x20, + VDEC_VC1_LEVEL_A_L1 = 0x40, + VDEC_VC1_LEVEL_A_L2 = 0x80, + VDEC_VC1_LEVEL_A_L3 = 0x100, + VDEC_VC1_LEVEL_A_L4 = 0x200 +}; + +struct vdec_profile_level { + uint32_t profiles; + uint32_t levels; +}; + +enum vdec_interlaced_format { + VDEC_InterlaceFrameProgressive = 0x1, + VDEC_InterlaceInterleaveFrameTopFieldFirst = 0x2, + VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4 +}; + +enum vdec_output_format { + VDEC_YUV_FORMAT_NV12 = 0x1, + VDEC_YUV_FORMAT_TILE_4x2 = 0x2 +}; + +struct vdec_picsize { + uint32_t frame_width; + uint32_t frame_height; + uint32_t stride; + uint32_t scan_lines; +}; + +struct vdec_seqheader { + void *addr; + size_t sz; + int pmem_fd; + size_t pmem_offset; +}; + +struct vdec_mberror { + uint8_t *ptr_errormap; + uint32_t err_mapsize; +}; + +struct vdec_input_frameinfo { + void __user *user_addr; + size_t offset; + size_t data_len; + uint32_t flags; + int64_t timestamp; + void *client_data; + int pmem_fd; + size_t pmem_offset; +}; + +struct vdec_framesize { + uint32_t left; + uint32_t top; + uint32_t right; + uint32_t bottom; +}; + +struct vdec_output_frameinfo { + phys_addr_t phys_addr; + void __user *user_addr; + uint32_t offset; + uint32_t len; + uint32_t flags; + int64_t time_stamp; + void *client_data; + void *input_frame_clientdata; + struct vdec_framesize framesize; +}; + +union vdec_msgdata { + struct vdec_output_frameinfo output_frame; + void *input_frame_clientdata; +}; + +struct vdec_msginfo { + uint32_t status_code; + uint32_t msgcode; + union vdec_msgdata msgdata; + uint32_t msgdatasize; +}; +#endif /* end of macro _VDECDECODER_H_ */ diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h new file mode 100644 index 0000000000000..f7f398c297728 --- /dev/null +++ b/include/linux/msm_vidc_enc.h @@ -0,0 +1,592 @@ +/* Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _MSM_VIDC_ENC_H_ +#define _MSM_VIDC_ENC_H_ + +#include +#include + +/** STATUS CODES*/ +/* Base value for status codes */ +#define VEN_S_BASE 0x00000000 +#define VEN_S_SUCCESS (VEN_S_BASE)/* Success */ +#define VEN_S_EFAIL (VEN_S_BASE+1)/* General failure */ +#define VEN_S_EFATAL (VEN_S_BASE+2)/* Fatal irrecoverable failure*/ +#define VEN_S_EBADPARAM (VEN_S_BASE+3)/* Error passed parameters*/ +/*Command called in invalid state*/ +#define VEN_S_EINVALSTATE (VEN_S_BASE+4) +#define VEN_S_ENOSWRES (VEN_S_BASE+5)/* Insufficient OS resources*/ +#define VEN_S_ENOHWRES (VEN_S_BASE+6)/*Insufficient HW resources */ +#define VEN_S_EBUFFREQ (VEN_S_BASE+7)/* Buffer requirements were not met*/ +#define VEN_S_EINVALCMD (VEN_S_BASE+8)/* Invalid command called */ +#define VEN_S_ETIMEOUT (VEN_S_BASE+9)/* Command timeout. */ +/*Re-attempt was made when multiple invocation not supported for API.*/ +#define VEN_S_ENOREATMPT (VEN_S_BASE+10) +#define VEN_S_ENOPREREQ (VEN_S_BASE+11)/*Pre-requirement is not met for API*/ +#define VEN_S_ECMDQFULL (VEN_S_BASE+12)/*Command queue is full*/ +#define VEN_S_ENOTSUPP (VEN_S_BASE+13)/*Command not supported*/ +#define VEN_S_ENOTIMPL (VEN_S_BASE+14)/*Command not implemented.*/ +#define VEN_S_ENOTPMEM (VEN_S_BASE+15)/*Buffer is not from PMEM*/ +#define VEN_S_EFLUSHED (VEN_S_BASE+16)/*returned buffer was flushed*/ +#define VEN_S_EINSUFBUF (VEN_S_BASE+17)/*provided buffer size insufficient*/ +#define VEN_S_ESAMESTATE (VEN_S_BASE+18) +#define VEN_S_EINVALTRANS (VEN_S_BASE+19) + +#define VEN_INTF_VER 1 + +/*Asynchronous messages from driver*/ +#define VEN_MSG_INDICATION 0 +#define VEN_MSG_INPUT_BUFFER_DONE 1 +#define VEN_MSG_OUTPUT_BUFFER_DONE 2 +#define VEN_MSG_NEED_OUTPUT_BUFFER 3 +#define VEN_MSG_FLUSH_INPUT_DONE 4 +#define VEN_MSG_FLUSH_OUPUT_DONE 5 +#define VEN_MSG_START 6 +#define VEN_MSG_STOP 7 +#define VEN_MSG_PAUSE 8 +#define VEN_MSG_RESUME 9 +#define VEN_MSG_STOP_READING_MSG 10 + +/*Buffer flags bits masks*/ +#define VEN_BUFFLAG_EOS 0x00000001 +#define VEN_BUFFLAG_ENDOFFRAME 0x00000010 +#define VEN_BUFFLAG_SYNCFRAME 0x00000020 +#define VEN_BUFFLAG_EXTRADATA 0x00000040 +#define VEN_BUFFLAG_CODECCONFIG 0x00000080 + +/*ENCODER CONFIGURATION CONSTANTS*/ + +/*Encoded video frame types*/ +#define VEN_FRAME_TYPE_I 1/* I frame type */ +#define VEN_FRAME_TYPE_P 2/* P frame type */ +#define VEN_FRAME_TYPE_B 3/* B frame type */ + +/*Video codec types*/ +#define VEN_CODEC_MPEG4 1/* MPEG4 Codec */ +#define VEN_CODEC_H264 2/* H.264 Codec */ +#define VEN_CODEC_H263 3/* H.263 Codec */ + +/*Video codec profile types.*/ +#define VEN_PROFILE_MPEG4_SP 1/* 1 - MPEG4 SP profile */ +#define VEN_PROFILE_MPEG4_ASP 2/* 2 - MPEG4 ASP profile */ +#define VEN_PROFILE_H264_BASELINE 3/* 3 - H264 Baseline profile */ +#define VEN_PROFILE_H264_MAIN 4/* 4 - H264 Main profile*/ +#define VEN_PROFILE_H264_HIGH 5/* 5 - H264 High profile*/ +#define VEN_PROFILE_H263_BASELINE 6/* 6 - H263 Baseline profile */ + +/*Video codec profile level types.*/ +#define VEN_LEVEL_MPEG4_0 0x1/* MPEG4 Level 0 */ +#define VEN_LEVEL_MPEG4_1 0x2/* MPEG4 Level 1 */ +#define VEN_LEVEL_MPEG4_2 0x3/* MPEG4 Level 2 */ +#define VEN_LEVEL_MPEG4_3 0x4/* MPEG4 Level 3 */ +#define VEN_LEVEL_MPEG4_4 0x5/* MPEG4 Level 4 */ +#define VEN_LEVEL_MPEG4_5 0x6/* MPEG4 Level 5 */ +#define VEN_LEVEL_MPEG4_3b 0x7/* MPEG4 Level 3b */ +#define VEN_LEVEL_MPEG4_6 0x8/* MPEG4 Level 6 */ + +#define VEN_LEVEL_H264_1 0x9/* H.264 Level 1 */ +#define VEN_LEVEL_H264_1b 0xA/* H.264 Level 1b */ +#define VEN_LEVEL_H264_1p1 0xB/* H.264 Level 1.1 */ +#define VEN_LEVEL_H264_1p2 0xC/* H.264 Level 1.2 */ +#define VEN_LEVEL_H264_1p3 0xD/* H.264 Level 1.3 */ +#define VEN_LEVEL_H264_2 0xE/* H.264 Level 2 */ +#define VEN_LEVEL_H264_2p1 0xF/* H.264 Level 2.1 */ +#define VEN_LEVEL_H264_2p2 0x10/* H.264 Level 2.2 */ +#define VEN_LEVEL_H264_3 0x11/* H.264 Level 3 */ +#define VEN_LEVEL_H264_3p1 0x12/* H.264 Level 3.1 */ + +#define VEN_LEVEL_H263_10 0x13/* H.263 Level 10 */ +#define VEN_LEVEL_H263_20 0x14/* H.263 Level 20 */ +#define VEN_LEVEL_H263_30 0x15/* H.263 Level 30 */ +#define VEN_LEVEL_H263_40 0x16/* H.263 Level 40 */ +#define VEN_LEVEL_H263_45 0x17/* H.263 Level 45 */ +#define VEN_LEVEL_H263_50 0x18/* H.263 Level 50 */ +#define VEN_LEVEL_H263_60 0x19/* H.263 Level 60 */ +#define VEN_LEVEL_H263_70 0x1A/* H.263 Level 70 */ + +/*Entropy coding model selection for H.264 encoder.*/ +#define VEN_ENTROPY_MODEL_CAVLC 1 +#define VEN_ENTROPY_MODEL_CABAC 2 +/*Cabac model number (0,1,2) for encoder.*/ +#define VEN_CABAC_MODEL_0 1/* CABAC Model 0. */ +#define VEN_CABAC_MODEL_1 2/* CABAC Model 1. */ +#define VEN_CABAC_MODEL_2 3/* CABAC Model 2. */ + +/*Deblocking filter control type for encoder.*/ +#define VEN_DB_DISABLE 1/* 1 - Disable deblocking filter*/ +#define VEN_DB_ALL_BLKG_BNDRY 2/* 2 - All blocking boundary filtering*/ +#define VEN_DB_SKIP_SLICE_BNDRY 3/* 3 - Filtering except sliceboundary*/ + +/*Different methods of Multi slice selection.*/ +#define VEN_MSLICE_OFF 1 +#define VEN_MSLICE_CNT_MB 2 /*number of MBscount per slice*/ +#define VEN_MSLICE_CNT_BYTE 3 /*number of bytes count per slice.*/ +#define VEN_MSLICE_GOB 4 /*Multi slice by GOB for H.263 only.*/ + +/*Different modes for Rate Control.*/ +#define VEN_RC_OFF 1 +#define VEN_RC_VBR_VFR 2 +#define VEN_RC_VBR_CFR 3 +#define VEN_RC_CBR_VFR 4 + +/*Different modes for flushing buffers*/ +#define VEN_FLUSH_INPUT 1 +#define VEN_FLUSH_OUTPUT 2 +#define VEN_FLUSH_ALL 3 + +/*Different input formats for YUV data.*/ +#define VEN_INPUTFMT_NV12 1/* NV12 Linear */ +#define VEN_INPUTFMT_NV21 2/* NV21 Linear */ + +/*Different allowed rotation modes.*/ +#define VEN_ROTATION_0 1/* 0 degrees */ +#define VEN_ROTATION_90 2/* 90 degrees */ +#define VEN_ROTATION_180 3/* 180 degrees */ +#define VEN_ROTATION_270 4/* 270 degrees */ + +/*IOCTL timeout values*/ +#define VEN_TIMEOUT_INFINITE 0xffffffff + +/*Different allowed intra refresh modes.*/ +#define VEN_IR_OFF 1 +#define VEN_IR_CYCLIC 2 +#define VEN_IR_RANDOM 3 + +/*IOCTL BASE CODES Not to be used directly by the client.*/ +/* Base value for ioctls that are not related to encoder configuration.*/ +#define VEN_IOCTLBASE_NENC 0x800 +/* Base value for encoder configuration ioctls*/ +#define VEN_IOCTLBASE_ENC 0x850 + +struct venc_ioctl_msg { + void __user *in; + void __user *out; +}; + +/*NON ENCODER CONFIGURATION IOCTLs*/ + +/*IOCTL params:SET: InputData - unsigned long, OutputData - NULL*/ +#define VEN_IOCTL_SET_INTF_VERSION \ + _IOW(VEN_IOCTLBASE_NENC, 0, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_timeout, OutputData - venc_msg*/ +#define VEN_IOCTL_CMD_READ_NEXT_MSG \ + _IOWR(VEN_IOCTLBASE_NENC, 1, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - NULL, OutputData - NULL*/ +#define VEN_IOCTL_CMD_STOP_READ_MSG _IO(VEN_IOCTLBASE_NENC, 2) + +/*IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL + GET: InputData - NULL, OutputData - venc_allocatorproperty*/ +#define VEN_IOCTL_SET_INPUT_BUFFER_REQ \ + _IOW(VEN_IOCTLBASE_NENC, 3, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_INPUT_BUFFER_REQ \ + _IOR(VEN_IOCTLBASE_NENC, 4, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/ +#define VEN_IOCTL_CMD_ALLOC_INPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 5, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/ +#define VEN_IOCTL_SET_INPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 6, struct venc_ioctl_msg) + +/*IOCTL params: CMD: InputData - venc_bufferpayload, OutputData - NULL*/ +#define VEN_IOCTL_CMD_FREE_INPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 7, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_allocatorproperty, OutputData - NULL + GET: InputData - NULL, OutputData - venc_allocatorproperty*/ +#define VEN_IOCTL_SET_OUTPUT_BUFFER_REQ \ + _IOW(VEN_IOCTLBASE_NENC, 8, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_OUTPUT_BUFFER_REQ \ + _IOR(VEN_IOCTLBASE_NENC, 9, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/ +#define VEN_IOCTL_CMD_ALLOC_OUTPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 10, struct venc_ioctl_msg) + + +/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL*/ +#define VEN_IOCTL_SET_OUTPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 11, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_bufferpayload, OutputData - NULL.*/ +#define VEN_IOCTL_CMD_FREE_OUTPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 12, struct venc_ioctl_msg) + + +/* Asynchronous respone message code:* VEN_MSG_START*/ +#define VEN_IOCTL_CMD_START _IO(VEN_IOCTLBASE_NENC, 13) + + +/*IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL + Asynchronous respone message code:VEN_MSG_INPUT_BUFFER_DONE*/ +#define VEN_IOCTL_CMD_ENCODE_FRAME \ + _IOW(VEN_IOCTLBASE_NENC, 14, struct venc_ioctl_msg) + + +/*IOCTL params:CMD: InputData - venc_buffer, OutputData - NULL + Asynchronous response message code:VEN_MSG_OUTPUT_BUFFER_DONE*/ +#define VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER \ + _IOW(VEN_IOCTLBASE_NENC, 15, struct venc_ioctl_msg) + +/*IOCTL params:CMD: InputData - venc_bufferflush, OutputData - NULL + * Asynchronous response message code:VEN_MSG_INPUT_BUFFER_DONE*/ +#define VEN_IOCTL_CMD_FLUSH \ + _IOW(VEN_IOCTLBASE_NENC, 16, struct venc_ioctl_msg) + + +/*Asynchronous respone message code:VEN_MSG_PAUSE*/ +#define VEN_IOCTL_CMD_PAUSE _IO(VEN_IOCTLBASE_NENC, 17) + +/*Asynchronous respone message code:VEN_MSG_RESUME*/ +#define VEN_IOCTL_CMD_RESUME _IO(VEN_IOCTLBASE_NENC, 18) + +/* Asynchronous respone message code:VEN_MSG_STOP*/ +#define VEN_IOCTL_CMD_STOP _IO(VEN_IOCTLBASE_NENC, 19) + + +/*ENCODER PROPERTY CONFIGURATION & CAPABILITY IOCTLs*/ + +/*IOCTL params:SET: InputData - venc_basecfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_basecfg*/ +#define VEN_IOCTL_SET_BASE_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 1, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_BASE_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 2, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL + GET: InputData - NULL, OutputData - venc_switch*/ +#define VEN_IOCTL_SET_LIVE_MODE \ + _IOW(VEN_IOCTLBASE_ENC, 3, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_LIVE_MODE \ + _IOR(VEN_IOCTLBASE_ENC, 4, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_profile, OutputData - NULL + GET: InputData - NULL, OutputData - venc_profile*/ +#define VEN_IOCTL_SET_CODEC_PROFILE \ + _IOW(VEN_IOCTLBASE_ENC, 5, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_CODEC_PROFILE \ + _IOR(VEN_IOCTLBASE_ENC, 6, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - ven_profilelevel, OutputData - NULL + GET: InputData - NULL, OutputData - ven_profilelevel*/ +#define VEN_IOCTL_SET_PROFILE_LEVEL \ + _IOW(VEN_IOCTLBASE_ENC, 7, struct venc_ioctl_msg) + +#define VEN_IOCTL_GET_PROFILE_LEVEL \ + _IOR(VEN_IOCTLBASE_ENC, 8, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL + GET: InputData - NULL, OutputData - venc_switch*/ +#define VEN_IOCTL_SET_SHORT_HDR \ + _IOW(VEN_IOCTLBASE_ENC, 9, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_SHORT_HDR \ + _IOR(VEN_IOCTLBASE_ENC, 10, struct venc_ioctl_msg) + + +/*IOCTL params: SET: InputData - venc_sessionqp, OutputData - NULL + GET: InputData - NULL, OutputData - venc_sessionqp*/ +#define VEN_IOCTL_SET_SESSION_QP \ + _IOW(VEN_IOCTLBASE_ENC, 11, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_SESSION_QP \ + _IOR(VEN_IOCTLBASE_ENC, 12, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_intraperiod, OutputData - NULL + GET: InputData - NULL, OutputData - venc_intraperiod*/ +#define VEN_IOCTL_SET_INTRA_PERIOD \ + _IOW(VEN_IOCTLBASE_ENC, 13, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_INTRA_PERIOD \ + _IOR(VEN_IOCTLBASE_ENC, 14, struct venc_ioctl_msg) + + +/* Request an Iframe*/ +#define VEN_IOCTL_CMD_REQUEST_IFRAME _IO(VEN_IOCTLBASE_ENC, 15) + +/*IOCTL params:GET: InputData - NULL, OutputData - venc_capability*/ +#define VEN_IOCTL_GET_CAPABILITY \ + _IOR(VEN_IOCTLBASE_ENC, 16, struct venc_ioctl_msg) + + +/*IOCTL params:GET: InputData - NULL, OutputData - venc_seqheader*/ +#define VEN_IOCTL_GET_SEQUENCE_HDR \ + _IOR(VEN_IOCTLBASE_ENC, 17, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_entropycfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_entropycfg*/ +#define VEN_IOCTL_SET_ENTROPY_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 18, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_ENTROPY_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 19, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_dbcfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_dbcfg*/ +#define VEN_IOCTL_SET_DEBLOCKING_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 20, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_DEBLOCKING_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 21, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_intrarefresh, OutputData - NULL + GET: InputData - NULL, OutputData - venc_intrarefresh*/ +#define VEN_IOCTL_SET_INTRA_REFRESH \ + _IOW(VEN_IOCTLBASE_ENC, 22, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_INTRA_REFRESH \ + _IOR(VEN_IOCTLBASE_ENC, 23, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_multiclicecfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_multiclicecfg*/ +#define VEN_IOCTL_SET_MULTI_SLICE_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 24, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_MULTI_SLICE_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 25, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_ratectrlcfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_ratectrlcfg*/ +#define VEN_IOCTL_SET_RATE_CTRL_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 26, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_RATE_CTRL_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 27, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_voptimingcfg, OutputData - NULL + GET: InputData - NULL, OutputData - venc_voptimingcfg*/ +#define VEN_IOCTL_SET_VOP_TIMING_CFG \ + _IOW(VEN_IOCTLBASE_ENC, 28, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_VOP_TIMING_CFG \ + _IOR(VEN_IOCTLBASE_ENC, 29, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_framerate, OutputData - NULL + GET: InputData - NULL, OutputData - venc_framerate*/ +#define VEN_IOCTL_SET_FRAME_RATE \ + _IOW(VEN_IOCTLBASE_ENC, 30, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_FRAME_RATE \ + _IOR(VEN_IOCTLBASE_ENC, 31, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_targetbitrate, OutputData - NULL + GET: InputData - NULL, OutputData - venc_targetbitrate*/ +#define VEN_IOCTL_SET_TARGET_BITRATE \ + _IOW(VEN_IOCTLBASE_ENC, 32, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_TARGET_BITRATE \ + _IOR(VEN_IOCTLBASE_ENC, 33, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_rotation, OutputData - NULL + GET: InputData - NULL, OutputData - venc_rotation*/ +#define VEN_IOCTL_SET_ROTATION \ + _IOW(VEN_IOCTLBASE_ENC, 34, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_ROTATION \ + _IOR(VEN_IOCTLBASE_ENC, 35, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_headerextension, OutputData - NULL + GET: InputData - NULL, OutputData - venc_headerextension*/ +#define VEN_IOCTL_SET_HEC \ + _IOW(VEN_IOCTLBASE_ENC, 36, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_HEC \ + _IOR(VEN_IOCTLBASE_ENC, 37, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL + GET: InputData - NULL, OutputData - venc_switch*/ +#define VEN_IOCTL_SET_DATA_PARTITION \ + _IOW(VEN_IOCTLBASE_ENC, 38, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_DATA_PARTITION \ + _IOR(VEN_IOCTLBASE_ENC, 39, struct venc_ioctl_msg) + +/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL + GET: InputData - NULL, OutputData - venc_switch*/ +#define VEN_IOCTL_SET_RVLC \ + _IOW(VEN_IOCTLBASE_ENC, 40, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_RVLC \ + _IOR(VEN_IOCTLBASE_ENC, 41, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_switch, OutputData - NULL + GET: InputData - NULL, OutputData - venc_switch*/ +#define VEN_IOCTL_SET_AC_PREDICTION \ + _IOW(VEN_IOCTLBASE_ENC, 42, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_AC_PREDICTION \ + _IOR(VEN_IOCTLBASE_ENC, 43, struct venc_ioctl_msg) + + +/*IOCTL params:SET: InputData - venc_qprange, OutputData - NULL + GET: InputData - NULL, OutputData - venc_qprange*/ +#define VEN_IOCTL_SET_QP_RANGE \ + _IOW(VEN_IOCTLBASE_ENC, 44, struct venc_ioctl_msg) +#define VEN_IOCTL_GET_QP_RANGE \ + _IOR(VEN_IOCTLBASE_ENC, 45, struct venc_ioctl_msg) + +struct venc_switch { + unsigned char status; +}; + +struct venc_allocatorproperty { + u32 mincount; + u32 maxcount; + u32 actualcount; + u32 datasize; + u32 suffixsize; + u32 alignment; + u32 bufpoolid; +}; + +struct venc_bufferpayload { + void __user *buffer; + size_t sz; + int fd; + size_t offset; + unsigned int maped_size; + unsigned long filled_len; +}; + +struct venc_buffer { + void __user *addr; + size_t sz; + size_t len; + size_t offset; + long long timestamp; + u32 flags; + void *clientdata; +}; + +struct venc_basecfg { + u32 input_width; + u32 input_height; + u32 dvs_width; + u32 dvs_height; + u32 codectype; + u32 fps_num; + u32 fps_den; + u32 targetbitrate; + u32 inputformat; +}; + +struct venc_profile { + unsigned long profile; +}; +struct ven_profilelevel { + unsigned long level; +}; + +struct venc_sessionqp { + unsigned long iframeqp; + unsigned long pframqp; +}; + +struct venc_qprange { + u32 maxqp; + u32 minqp; +}; +struct venc_intraperiod { + unsigned long num_pframes; +}; +struct venc_seqheader { + void *buf; + size_t buf_sz; + size_t hdr_len; +}; + +struct venc_capability { + unsigned long codec_types; + unsigned long maxframe_width; + unsigned long maxframe_height; + unsigned long maxtarget_bitrate; + unsigned long maxframe_rate; + unsigned long input_formats; + unsigned char dvs; +}; + +struct venc_entropycfg { + unsigned longentropysel; + unsigned long cabacmodel; +}; + +struct venc_dbcfg { + u32 db_mode; + u32 slicealpha_offset; + u32 slicebeta_offset; +}; + +struct venc_intrarefresh { + unsigned long irmode; + unsigned long mbcount; +}; + +struct venc_multiclicecfg { + unsigned long mslice_mode; + unsigned long mslice_size; +}; + +struct venc_bufferflush { + unsigned long flush_mode; +}; + +struct venc_ratectrlcfg { + unsigned long rcmode; +}; + +struct venc_voptimingcfg { + u32 voptime_resolution; +}; +struct venc_framerate { + u32 fps_denominator; + u32 fps_numerator; +}; + +//TODO remove these stupid structs +struct venc_targetbitrate{ + u32 target_bitrate; +}; + +struct venc_rotation { + u32 rotation; +}; + +struct venc_timeout { + u32 millisec; +}; + +struct venc_headerextension { + unsigned long header_extension; +}; + +struct venc_msg { + unsigned long statuscode; + unsigned long msgcode; + struct venc_buffer buf; + size_t msgdata_size; +}; +#endif /* _MSM_VIDC_ENC_H_ */ diff --git a/include/linux/mt9t013.h b/include/linux/mt9t013.h new file mode 100644 index 0000000000000..543923a136007 --- /dev/null +++ b/include/linux/mt9t013.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2007-2008 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef CAMERA_MT9T013_H +#define CAMERA_MT9T013_H +#include +#include +#include +#include + +/************************************************************* +* IOCTL define +*************************************************************/ + +#define MT9T013_I2C_IOCTL_MAGIC 'm' + +#define MT9T013_I2C_IOCTL_W \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 0, unsigned) + +#define MT9T013_I2C_IOCTL_R \ + _IOR(MT9T013_I2C_IOCTL_MAGIC, 1, unsigned) + +#define MT9T013_I2C_IOCTL_AF_W \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 2, unsigned) + +#define MT9T013_I2C_IOCTL_CAMIF_PAD_REG_RESET \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 3) + +#define MT9T013_I2C_IOCTL_CAMIF_PAD_REG_RESET_2 \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 4) + +#define CAMERA_CONFIGURE_GPIOS \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 7) + +#define CAMERA_UNCONFIGURE_GPIOS \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 8) + +#define CAMERA_LENS_POWER_ON \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 9) + +#define CAMERA_LENS_POWER_OFF \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 10) + +#define MT9T013_I2C_IOCTL_CAMIF_APPS_RESET \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 11) + +/* Replacement ioctls() for the clkrgm_sec RPCs. */ + +#define CAMIO_VFE_MDC_CLK 1 /* enable, disable */ +#define CAMIO_MDC_CLK 2 /* enable, disable */ +#define CAMIO_VFE_CLK 3 /* clk_select, freq_prog */ + +#define MT9T013_I2C_IOCTL_CLK_ENABLE \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 12, unsigned) + +#define MT9T013_I2C_IOCTL_CLK_DISABLE \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 13, unsigned) + +#define MT9T013_I2C_IOCTL_CLK_SELECT \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 14, unsigned) + +#define MT9T013_I2C_IOCTL_CLK_FREQ_PROG \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 15, unsigned) + +#define CAMSENSOR_REG_INIT 0<<0 +#define CAMSENSOR_REG_UPDATE_PERIODIC 1<<0 +#define CAMSENSOR_TYPE_PREVIEW 0<<1 +#define CAMSENSOR_TYPE_SNAPSHOT 1<<1 + +#define MT9T013_I2C_IOCTL_SENSOR_SETTING \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 16, uint32_t) + +struct mt9t013_reg_struct +{ + uint16_t vt_pix_clk_div; /* 0x0300 */ + uint16_t vt_sys_clk_div; /* 0x0302 */ + uint16_t pre_pll_clk_div; /* 0x0304 */ + uint16_t pll_multiplier; /* 0x0306 */ + uint16_t op_pix_clk_div; /* 0x0308 */ + uint16_t op_sys_clk_div; /* 0x030A */ + uint16_t scale_m; /* 0x0404 */ + uint16_t row_speed; /* 0x3016 */ + uint16_t x_addr_start; /* 0x3004 */ + uint16_t x_addr_end; /* 0x3008 */ + uint16_t y_addr_start; /* 0x3002 */ + uint16_t y_addr_end; /* 0x3006 */ + uint16_t read_mode; /* 0x3040 */ + uint16_t x_output_size ; /* 0x034C */ + uint16_t y_output_size; /* 0x034E */ + uint16_t line_length_pck; /* 0x300C */ + uint16_t frame_length_lines; /* 0x300A */ + uint16_t coarse_integration_time; /* 0x3012 */ + uint16_t fine_integration_time; /* 0x3014 */ +}; + +struct mt9t013_reg_pat { + struct mt9t013_reg_struct reg[2]; +}; + +#define MT9T013_I2C_IOCTL_GET_REGISTERS \ + _IOR(MT9T013_I2C_IOCTL_MAGIC, 17, struct mt9t013_reg_pat *) + +struct mt9t013_exposure_gain { + uint16_t gain; + uint16_t line; + uint32_t mode; +}; + +#define MT9T013_I2C_IOCTL_EXPOSURE_GAIN \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 18, struct exposure_gain *) + +#define MT9T013_I2C_IOCTL_MOVE_FOCUS \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 19, uint32_t) + +#define MT9T013_I2C_IOCTL_SET_DEFAULT_FOCUS \ + _IOW(MT9T013_I2C_IOCTL_MAGIC, 20, uint32_t) + +#define MT9T013_I2C_IOCTL_POWER_DOWN \ + _IO(MT9T013_I2C_IOCTL_MAGIC, 21) + +struct mt9t013_init { + int preview; /* in: 1 for preview, 0 for capture */ + uint16_t chipid; /* out: chip id */ +}; + +#define MT9T013_I2C_IOCTL_INIT \ + _IOWR(MT9T013_I2C_IOCTL_MAGIC, 22, struct mt9t013_init *) + +#endif + diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 50cdc2559a5aa..0d3dd66322ecb 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -18,6 +18,9 @@ enum ip_conntrack_info { /* >= this indicates reply direction */ IP_CT_IS_REPLY, + IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, + IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, + IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY, /* Number of distinct IP_CT types (no NEW in reply dirn). */ IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 }; diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h new file mode 100644 index 0000000000000..ca60fbdec2f3b --- /dev/null +++ b/include/linux/netfilter/xt_qtaguid.h @@ -0,0 +1,13 @@ +#ifndef _XT_QTAGUID_MATCH_H +#define _XT_QTAGUID_MATCH_H + +/* For now we just replace the xt_owner. + * FIXME: make iptables aware of qtaguid. */ +#include + +#define XT_QTAGUID_UID XT_OWNER_UID +#define XT_QTAGUID_GID XT_OWNER_GID +#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET +#define xt_qtaguid_match_info xt_owner_match_info + +#endif /* _XT_QTAGUID_MATCH_H */ diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h new file mode 100644 index 0000000000000..eadc6903314e7 --- /dev/null +++ b/include/linux/netfilter/xt_quota2.h @@ -0,0 +1,25 @@ +#ifndef _XT_QUOTA_H +#define _XT_QUOTA_H + +enum xt_quota_flags { + XT_QUOTA_INVERT = 1 << 0, + XT_QUOTA_GROW = 1 << 1, + XT_QUOTA_PACKET = 1 << 2, + XT_QUOTA_NO_CHANGE = 1 << 3, + XT_QUOTA_MASK = 0x0F, +}; + +struct xt_quota_counter; + +struct xt_quota_mtinfo2 { + char name[15]; + u_int8_t flags; + + /* Comparison-invariant */ + aligned_u64 quota; + + /* Used internally by the kernel */ + struct xt_quota_counter *master __attribute__((aligned(8))); +}; + +#endif /* _XT_QUOTA_H */ diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h index 6f475b8ff34be..b21f2c23cd24f 100644 --- a/include/linux/netfilter/xt_socket.h +++ b/include/linux/netfilter/xt_socket.h @@ -9,4 +9,10 @@ struct xt_socket_mtinfo1 { __u8 flags; }; +void xt_socket_put_sk(struct sock *sk); +struct sock *xt_socket_get4_sk(const struct sk_buff *skb, + struct xt_action_param *par); +struct sock *xt_socket_get6_sk(const struct sk_buff *skb, + struct xt_action_param *par); + #endif /* _XT_SOCKET_H */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 821ffb954f147..ad7fc6b818963 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -1221,6 +1221,36 @@ enum nl80211_rate_info { NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1 }; +/** + * enum nl80211_sta_bss_param - BSS information collected by STA + * + * These attribute types are used with %NL80211_STA_INFO_BSS_PARAM + * when getting information about the bitrate of a station. + * + * @__NL80211_STA_BSS_PARAM_INVALID: attribute number 0 is reserved + * @NL80211_STA_BSS_PARAM_CTS_PROT: whether CTS protection is enabled (flag) + * @NL80211_STA_BSS_PARAM_SHORT_PREAMBLE: whether short preamble is enabled + * (flag) + * @NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME: whether short slot time is enabled + * (flag) + * @NL80211_STA_BSS_PARAM_DTIM_PERIOD: DTIM period for beaconing (u8) + * @NL80211_STA_BSS_PARAM_BEACON_INTERVAL: Beacon interval (u16) + * @NL80211_STA_BSS_PARAM_MAX: highest sta_bss_param number currently defined + * @__NL80211_STA_BSS_PARAM_AFTER_LAST: internal use + */ +enum nl80211_sta_bss_param { + __NL80211_STA_BSS_PARAM_INVALID, + NL80211_STA_BSS_PARAM_CTS_PROT, + NL80211_STA_BSS_PARAM_SHORT_PREAMBLE, + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME, + NL80211_STA_BSS_PARAM_DTIM_PERIOD, + NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + + /* keep last */ + __NL80211_STA_BSS_PARAM_AFTER_LAST, + NL80211_STA_BSS_PARAM_MAX = __NL80211_STA_BSS_PARAM_AFTER_LAST - 1 +}; + /** * enum nl80211_sta_info - station information * @@ -1233,7 +1263,7 @@ enum nl80211_rate_info { * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute - * containing info as possible, see &enum nl80211_sta_info_txrate. + * containing info as possible, see &enum nl80211_rate_info * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this * station) @@ -1243,6 +1273,11 @@ enum nl80211_rate_info { * @NL80211_STA_INFO_LLID: the station's mesh LLID * @NL80211_STA_INFO_PLID: the station's mesh PLID * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station + * @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested + * attribute, like NL80211_STA_INFO_TX_BITRATE. + * @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute + * containing info as possible, see &enum nl80211_sta_bss_param + * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -1261,6 +1296,9 @@ enum nl80211_sta_info { NL80211_STA_INFO_TX_RETRIES, NL80211_STA_INFO_TX_FAILED, NL80211_STA_INFO_SIGNAL_AVG, + NL80211_STA_INFO_RX_BITRATE, + NL80211_STA_INFO_BSS_PARAM, + NL80211_STA_INFO_CONNECTED_TIME, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, diff --git a/include/linux/oom.h b/include/linux/oom.h index 5e3aa8311c5ed..4952fb874ad3d 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -40,6 +40,8 @@ enum oom_constraint { CONSTRAINT_MEMCG, }; +extern int test_set_oom_score_adj(int new_val); + extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages); extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); diff --git a/include/linux/pci.h b/include/linux/pci.h index 559d028970752..6002bcade0801 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1002,12 +1002,11 @@ extern bool pcie_ports_auto; #endif #ifndef CONFIG_PCIEASPM -static inline int pcie_aspm_enabled(void) -{ - return 0; -} +static inline int pcie_aspm_enabled(void) { return 0; } +static inline bool pcie_aspm_support_enabled(void) { return false; } #else extern int pcie_aspm_enabled(void); +extern bool pcie_aspm_support_enabled(void); #endif #ifdef CONFIG_PCIEAER diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3adb06ebf8418..9f36491a18eb0 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -607,6 +607,8 @@ #define PCI_DEVICE_ID_MATROX_G550 0x2527 #define PCI_DEVICE_ID_MATROX_VIA 0x4536 +#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2 + #define PCI_VENDOR_ID_CT 0x102c #define PCI_DEVICE_ID_CT_69000 0x00c0 #define PCI_DEVICE_ID_CT_65545 0x00d8 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index dda5b0a3ff601..a9bd9c0af44ba 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1052,7 +1052,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); + __perf_event_task_sched_out(task, next); } extern void perf_event_mmap(struct vm_area_struct *vma); diff --git a/include/linux/pid.h b/include/linux/pid.h index 49f1c2f66e951..ec9f2df57f1b8 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr); */ extern struct pid *find_get_pid(int nr); extern struct pid *find_ge_pid(int nr, struct pid_namespace *); -int next_pidmap(struct pid_namespace *pid_ns, int last); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); diff --git a/include/linux/pm.h b/include/linux/pm.h index 21415cc91cbb4..6fedd1c2a5a2a 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -522,12 +522,10 @@ extern void update_pm_runtime_accounting(struct device *dev); #ifdef CONFIG_PM_SLEEP extern void device_pm_lock(void); -extern int sysdev_resume(void); extern void dpm_resume_noirq(pm_message_t state); extern void dpm_resume_end(pm_message_t state); extern void device_pm_unlock(void); -extern int sysdev_suspend(pm_message_t state); extern int dpm_suspend_noirq(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 77cbddb3784cf..a7d87f911cabb 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -16,6 +16,10 @@ #define PM_QOS_NUM_CLASSES 4 #define PM_QOS_DEFAULT_VALUE -1 +#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 + struct pm_qos_request_list { struct plist_node list; int pm_qos_class; diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 3e23844a6990c..b2c14cbd47a6f 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -69,7 +69,8 @@ struct k_itimer { struct k_clock { int res; /* in nanoseconds */ int (*clock_getres) (const clockid_t which_clock, struct timespec *tp); - int (*clock_set) (const clockid_t which_clock, struct timespec * tp); + int (*clock_set) (const clockid_t which_clock, + const struct timespec *tp); int (*clock_get) (const clockid_t which_clock, struct timespec * tp); int (*timer_create) (struct k_itimer *timer); int (*nsleep) (const clockid_t which_clock, int flags, @@ -89,7 +90,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock); /* error handlers for timer_create, nanosleep and settime */ int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *, struct timespec __user *); -int do_posix_clock_nosettime(const clockid_t, struct timespec *tp); +int do_posix_clock_nosettime(const clockid_t, const struct timespec *tp); /* function to call to trigger timer event */ int posix_timer_event(struct k_itimer *timr, int si_private); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index a1147e5dd245e..9178d5cc0b014 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) child->ptrace = current->ptrace; __ptrace_link(child, current->parent); } + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_set(&child->ptrace_bp_refcnt, 1); +#endif } /** @@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); -#endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern int ptrace_get_breakpoints(struct task_struct *tsk); +extern void ptrace_put_breakpoints(struct task_struct *tsk); +#else +static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* __KERNEL */ #endif diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 7066acb2c5307..033b507b33b17 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -136,6 +136,14 @@ static inline void rb_set_color(struct rb_node *rb, int color) #define RB_EMPTY_NODE(node) (rb_parent(node) == node) #define RB_CLEAR_NODE(node) (rb_set_parent(node, node)) +static inline void rb_init_node(struct rb_node *rb) +{ + rb->rb_parent_color = 0; + rb->rb_right = NULL; + rb->rb_left = NULL; + RB_CLEAR_NODE(rb); +} + extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index af5614856285d..70c932fa675c0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -777,4 +777,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) +{ + return offset < 4096; +} + +static __always_inline +void __kfree_rcu(struct rcu_head *head, unsigned long offset) +{ + typedef void (*rcu_callback)(struct rcu_head *); + + BUILD_BUG_ON(!__builtin_constant_p(offset)); + + /* See the kfree_rcu() header comment. */ + BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); + + call_rcu(head, (rcu_callback)offset); +} + +extern void kfree(const void *); + +static inline void __rcu_reclaim(struct rcu_head *head) +{ + unsigned long offset = (unsigned long)head->func; + + if (__is_kfree_rcu_offset(offset)) + kfree((void *)head - offset); + else + head->func(head); +} + +/** + * kfree_rcu() - kfree an object after a grace period. + * @ptr: pointer to kfree + * @rcu_head: the name of the struct rcu_head within the type of @ptr. + * + * Many rcu callbacks functions just call kfree() on the base structure. + * These functions are trivial, but their size adds up, and furthermore + * when they are used in a kernel module, that module must invoke the + * high-latency rcu_barrier() function at module-unload time. + * + * The kfree_rcu() function handles this issue. Rather than encoding a + * function address in the embedded rcu_head structure, kfree_rcu() instead + * encodes the offset of the rcu_head structure within the base structure. + * Because the functions are not allowed in the low-order 4096 bytes of + * kernel virtual memory, offsets up to 4095 bytes can be accommodated. + * If the offset is larger than 4095 bytes, a compile-time error will + * be generated in __kfree_rcu(). If this error is triggered, you can + * either fall back to use of call_rcu() or rearrange the structure to + * position the rcu_head structure into the first 4096 bytes. + * + * Note that the allowable offset might decrease in the future, for example, + * to allow something like kmem_cache_free_rcu(). + */ +#define kfree_rcu(ptr, rcu_head) \ + __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index b8ed16a33c47f..cf302297eeded 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -217,4 +217,6 @@ int regulator_mode_to_status(unsigned int); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); +extern int tps65023_set_dcdc1_level(struct regulator_dev *dev, int mvolts); + #endif diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c6c608482cba4..3d049abf9ae61 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -199,8 +199,11 @@ void rfkill_pause_polling(struct rfkill *rfkill); * NOTE: not necessary for suspend/resume -- in that case the * core stops polling anyway */ +#ifdef CONFIG_RFKILL_PM void rfkill_resume_polling(struct rfkill *rfkill); - +#else +static inline void rfkill_resume_polling(struct rfkill *rfkill) { } +#endif /** * rfkill_unregister - Unregister a rfkill structure. diff --git a/include/linux/rmap.h b/include/linux/rmap.h index e9fd04ca1e518..b9b23ddca63af 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -87,7 +87,7 @@ static inline void get_anon_vma(struct anon_vma *anon_vma) atomic_inc(&anon_vma->external_refcount); } -void drop_anon_vma(struct anon_vma *); +void put_anon_vma(struct anon_vma *); #else static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) { @@ -102,7 +102,7 @@ static inline void get_anon_vma(struct anon_vma *anon_vma) { } -static inline void drop_anon_vma(struct anon_vma *anon_vma) +static inline void put_anon_vma(struct anon_vma *anon_vma) { } #endif /* CONFIG_KSM */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 89c3e51829911..020847fbe22b3 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -227,10 +227,13 @@ extern void rtc_device_unregister(struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs); +int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); +extern int rtc_initialize_alarm(struct rtc_device *rtc, + struct rtc_wkalrm *alrm); extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); diff --git a/include/linux/sched.h b/include/linux/sched.h index 536014c6b4f2e..aa02252c5ca2a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -37,8 +37,15 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 -/* SCHED_ISO: reserved but not implemented yet */ +/* SCHED_ISO: Implemented on BFS only */ #define SCHED_IDLE 5 +#ifdef CONFIG_SCHED_BFS +#define SCHED_ISO 4 +#define SCHED_IDLEPRIO SCHED_IDLE +#define SCHED_MAX (SCHED_IDLEPRIO) +#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) +#endif + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 @@ -267,8 +274,6 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); -extern int runqueue_is_locked(int cpu); - extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); @@ -1199,17 +1204,33 @@ struct task_struct { int lock_depth; /* BKL lock depth */ +#ifndef CONFIG_SCHED_BFS #ifdef CONFIG_SMP #ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif +#endif +#else /* CONFIG_SCHED_BFS */ + int oncpu; #endif int prio, static_prio, normal_prio; unsigned int rt_priority; +#ifdef CONFIG_SCHED_BFS + int time_slice; + u64 deadline; + struct list_head run_list; + u64 last_ran; + u64 sched_time; /* sched_clock time spent running */ +#ifdef CONFIG_SMP + int sticky; /* Soft affined flag */ +#endif + unsigned long rt_timeout; +#else /* CONFIG_SCHED_BFS */ const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -1254,6 +1275,9 @@ struct task_struct { #endif struct mm_struct *mm, *active_mm; +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; #endif @@ -1311,6 +1335,9 @@ struct task_struct { int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ cputime_t utime, stime, utimescaled, stimescaled; +#ifdef CONFIG_SCHED_BFS + unsigned long utime_pc, stime_pc; +#endif cputime_t gtime; #ifndef CONFIG_VIRT_CPU_ACCOUNTING cputime_t prev_utime, prev_stime; @@ -1528,8 +1555,67 @@ struct task_struct { unsigned long memsw_bytes; /* uncharged mem+swap usage */ } memcg_batch; #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_t ptrace_bp_refcnt; +#endif }; +#ifdef CONFIG_SCHED_BFS +extern int grunqueue_is_locked(void); +extern void grq_unlock_wait(void); +extern void cpu_scaling(int cpu); +extern void cpu_nonscaling(int cpu); +#define tsk_seruntime(t) ((t)->sched_time) +#define tsk_rttimeout(t) ((t)->rt_timeout) + +static inline void tsk_cpus_current(struct task_struct *p) +{ +} + +#define runqueue_is_locked(cpu) grunqueue_is_locked() + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"BFS CPU scheduler v0.404 by Con Kolivas.\n"); +} + +static inline int iso_task(struct task_struct *p) +{ + return (p->policy == SCHED_ISO); +} +extern void remove_cpu(unsigned long cpu); +#else /* CFS */ +extern int runqueue_is_locked(int cpu); +static inline void cpu_scaling(int cpu) +{ +} + +static inline void cpu_nonscaling(int cpu) +{ +} +#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) +#define tsk_rttimeout(t) ((t)->rt.timeout) + +static inline void tsk_cpus_current(struct task_struct *p) +{ + p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; +} + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"CFS CPU scheduler.\n"); +} + +static inline int iso_task(struct task_struct *p) +{ + return 0; +} + +static inline void remove_cpu(unsigned long cpu) +{ +} +#endif /* CONFIG_SCHED_BFS */ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) @@ -1547,10 +1633,20 @@ struct task_struct { */ #define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO +#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#ifdef CONFIG_SCHED_BFS +#define PRIO_RANGE (40) +#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE) +#define ISO_PRIO (MAX_RT_PRIO) +#define NORMAL_PRIO (MAX_RT_PRIO + 1) +#define IDLE_PRIO (MAX_RT_PRIO + 2) +#define PRIO_LIMIT ((IDLE_PRIO) + 1) +#else /* CONFIG_SCHED_BFS */ #define MAX_PRIO (MAX_RT_PRIO + 40) -#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#define NORMAL_PRIO DEFAULT_PRIO +#endif /* CONFIG_SCHED_BFS */ static inline int rt_prio(int prio) { @@ -1736,7 +1832,6 @@ extern int task_free_unregister(struct notifier_block *n); #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ -#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ @@ -1885,7 +1980,7 @@ task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) extern void sched_exec(void); #else #define sched_exec() {} @@ -2053,6 +2148,7 @@ extern void release_uids(struct user_namespace *ns); #include extern void do_timer(unsigned long ticks); +extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); diff --git a/include/linux/security.h b/include/linux/security.h index b2b7f9749f5eb..debbd97db7abc 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -53,7 +53,7 @@ struct audit_krule; */ extern int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit); -extern int cap_settime(struct timespec *ts, struct timezone *tz); +extern int cap_settime(const struct timespec *ts, const struct timezone *tz); extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1387,7 +1387,7 @@ struct security_operations { int (*quotactl) (int cmds, int type, int id, struct super_block *sb); int (*quota_on) (struct dentry *dentry); int (*syslog) (int type); - int (*settime) (struct timespec *ts, struct timezone *tz); + int (*settime) (const struct timespec *ts, const struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); int (*bprm_set_creds) (struct linux_binprm *bprm); @@ -1669,7 +1669,7 @@ int security_sysctl(struct ctl_table *table, int op); int security_quotactl(int cmds, int type, int id, struct super_block *sb); int security_quota_on(struct dentry *dentry); int security_syslog(int type); -int security_settime(struct timespec *ts, struct timezone *tz); +int security_settime(const struct timespec *ts, const struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_vm_enough_memory_kern(long pages); @@ -1904,7 +1904,8 @@ static inline int security_syslog(int type) return 0; } -static inline int security_settime(struct timespec *ts, struct timezone *tz) +static inline int security_settime(const struct timespec *ts, + const struct timezone *tz) { return cap_settime(ts, tz); } diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index e98cd2e571944..06d69648fc86c 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl) unsigned ret; repeat: - ret = sl->sequence; - smp_rmb(); + ret = ACCESS_ONCE(sl->sequence); if (unlikely(ret & 1)) { cpu_relax(); goto repeat; } + smp_rmb(); return ret; } diff --git a/include/linux/smb329.h b/include/linux/smb329.h new file mode 100644 index 0000000000000..8ce3440e0bc0e --- /dev/null +++ b/include/linux/smb329.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2007 HTC Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _SMB329_H_ +#define _SMB329_H_ +#include +#include + +#ifdef CONFIG_SMB329 +extern int set_charger_ctrl(u32 ctl); +#else +static int set_charger_ctrl(u32 ctl) {return 0 ; } +#endif +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b4d7710bc38d2..26456a49f5a8c 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -101,6 +101,14 @@ struct spi_device { */ }; +struct spi_msg +{ + u8 cmd; + u8 *data; + int len; + unsigned char buffer[128]; +}; + static inline struct spi_device *to_spi_device(struct device *dev) { return dev ? container_of(dev, struct spi_device, dev) : NULL; @@ -622,7 +630,15 @@ spi_read(struct spi_device *spi, u8 *buf, size_t len) extern int spi_write_then_read(struct spi_device *spi, const u8 *txbuf, unsigned n_tx, u8 *rxbuf, unsigned n_rx); - +/* + * htc workaround to support multiple clients: add mutex lock to avoid SPI commands conflict. + * @func: true for spi write, false for spi read + * @msg: spi write commands struct + * @buf: spi read buffer + * @size: read/wirte length + */ +extern int +spi_read_write_lock(struct spi_device *spidev, struct spi_msg * msg, char *buf, int size, int func); /** * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read * @spi: device with which data will be exchanged @@ -763,6 +779,10 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } #endif +struct spi_platform_data { + int clk_rate; +}; + /* If you're hotplugging an adapter with devices (parport, usb, etc) * use spi_new_device() to describe each device. You can also call diff --git a/include/linux/sw_sync.h b/include/linux/sw_sync.h new file mode 100644 index 0000000000000..bd6f2089e77f3 --- /dev/null +++ b/include/linux/sw_sync.h @@ -0,0 +1,58 @@ +/* + * include/linux/sw_sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SW_SYNC_H +#define _LINUX_SW_SYNC_H + +#include + +#ifdef __KERNEL__ + +#include + +struct sw_sync_timeline { + struct sync_timeline obj; + + u32 value; +}; + +struct sw_sync_pt { + struct sync_pt pt; + + u32 value; +}; + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name); +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); + +#endif /* __KERNEL __ */ + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC 'W' + +#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ + struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + + +#endif /* _LINUX_SW_SYNC_H */ diff --git a/include/linux/synaptics_i2c_rmi.h b/include/linux/synaptics_i2c_rmi.h index 5539cc5207796..206ba75669810 100644 --- a/include/linux/synaptics_i2c_rmi.h +++ b/include/linux/synaptics_i2c_rmi.h @@ -50,6 +50,8 @@ struct synaptics_i2c_rmi_platform_data { int fuzz_p; int fuzz_w; int8_t sensitivity_adjust; + uint32_t dup_threshold; + uint32_t margin_inactive_pixel[4]; }; #endif /* _LINUX_SYNAPTICS_I2C_RMI_H */ diff --git a/include/linux/sync.h b/include/linux/sync.h new file mode 100755 index 0000000000000..31ba6ec0819d4 --- /dev/null +++ b/include/linux/sync.h @@ -0,0 +1,427 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +struct sync_timeline; +struct sync_pt; +struct sync_fence; +struct seq_file; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name: name of the implentation + * @dup: duplicate a sync_pt + * @has_signaled: returns: + * 1 if pt has signaled + * 0 if pt has not signaled + * <0 on error + * @compare: returns: + * 1 if b will signal before a + * 0 if a and b will signal at the same time + * -1 if a will signabl before b + * @free_pt: called before sync_pt is freed + * @release_obj: called before sync_timeline is freed + * @print_obj: deprecated + * @print_pt: deprecated + * @fill_driver_data: write implmentation specific driver data to data. + * should return an error if there is not enough room + * as specified by size. This information is returned + * to userspace by SYNC_IOC_FENCE_INFO. + * @timeline_value_str: fill str with the value of the sync_timeline's counter + * @pt_value_str: fill str with the value of the sync_pt + */ +struct sync_timeline_ops { + const char *driver_name; + + /* required */ + struct sync_pt *(*dup)(struct sync_pt *pt); + + /* required */ + int (*has_signaled)(struct sync_pt *pt); + + /* required */ + int (*compare)(struct sync_pt *a, struct sync_pt *b); + + /* optional */ + void (*free_pt)(struct sync_pt *sync_pt); + + /* optional */ + void (*release_obj)(struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_obj)(struct seq_file *s, + struct sync_timeline *sync_timeline); + + /* deprecated */ + void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); + + /* optional */ + int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + + /* optional */ + void (*timeline_value_str)(struct sync_timeline *timeline, char *str, + int size); + + /* optional */ + void (*pt_value_str)(struct sync_pt *pt, char *str, int size); +}; + +/** + * struct sync_timeline - sync object + * @kref: reference count on fence. + * @ops: ops that define the implementaiton of the sync_timeline + * @name: name of the sync_timeline. Useful for debugging + * @destoryed: set when sync_timeline is destroyed + * @child_list_head: list of children sync_pts for this sync_timeline + * @child_list_lock: lock protecting @child_list_head, destroyed, and + * sync_pt.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + * @sync_timeline_list: membership in global sync_timeline_list + */ +struct sync_timeline { + struct kref kref; + const struct sync_timeline_ops *ops; + char name[32]; + + /* protected by child_list_lock */ + bool destroyed; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; + spinlock_t active_list_lock; + + struct list_head sync_timeline_list; +}; + +/** + * struct sync_pt - sync point + * @parent: sync_timeline to which this sync_pt belongs + * @child_list: membership in sync_timeline.child_list_head + * @active_list: membership in sync_timeline.active_list_head + * @signaled_list: membership in temorary signaled_list on stack + * @fence: sync_fence to which the sync_pt belongs + * @pt_list: membership in sync_fence.pt_list_head + * @status: 1: signaled, 0:active, <0: error + * @timestamp: time which sync_pt status transitioned from active to + * singaled or error. + */ +struct sync_pt { + struct sync_timeline *parent; + struct list_head child_list; + + struct list_head active_list; + struct list_head signaled_list; + + struct sync_fence *fence; + struct list_head pt_list; + + /* protected by parent->active_list_lock */ + int status; + + ktime_t timestamp; +}; + +/** + * struct sync_fence - sync fence + * @file: file representing this fence + * @kref: referenace count on fence. + * @name: name of sync_fence. Useful for debugging + * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * is created + * @waiter_list_head: list of asynchronous waiters on this fence + * @waiter_list_lock: lock protecting @waiter_list_head and @status + * @status: 1: signaled, 0:active, <0: error + * + * @wq: wait queue for fence signaling + * @sync_fence_list: membership in global fence list + */ +struct sync_fence { + struct file *file; + struct kref kref; + char name[32]; + + /* this list is immutable once the fence is created */ + struct list_head pt_list_head; + + struct list_head waiter_list_head; + spinlock_t waiter_list_lock; /* also protects status */ + int status; + + wait_queue_head_t wq; + + struct list_head sync_fence_list; +}; + +struct sync_fence_waiter; +typedef void (*sync_callback_t)(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list: membership in sync_fence.waiter_list_head + * @callback: function pointer to call when fence signals + * @callback_data: pointer to pass to @callback + */ +struct sync_fence_waiter { + struct list_head waiter_list; + + sync_callback_t callback; +}; + +static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, + sync_callback_t callback) +{ + waiter->callback = callback; +} + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops: specifies the implemention ops for the object + * @size: size to allocate for this obj + * @name: sync_timeline name + * + * Creates a new sync_timeline which will use the implemetation specified by + * @ops. @size bytes will be allocated allowing for implemntation specific + * data to be kept after the generic sync_timeline stuct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name); + +/** + * sync_timeline_destory() - destorys a sync object + * @obj: sync_timeline to destroy + * + * A sync implemntation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its childern + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * + * A sync implemntation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent: sync_pt's parent sync_timeline + * @size: size to allocate for this pt + * + * Creates a new sync_pt as a chiled of @parent. @size bytes will be + * allocated allowing for implemntation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt: sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name: name of fence to create + * @pt: sync_pt to add to the fence + * + * Creates a fence containg @pt. Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name: name of new fence + * @a: fence a + * @b: fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b. @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a refernnce of a sync fence + * @fence: fence to put + * + * Puts a reference on @fence. If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence: fence to instal + * @fd: file descriptor in which to install the fence + * + * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error. + * @waiter should be initialized with sync_fence_waiter_init(). + */ +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_cancel_async() - cancels an async wait + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * returns 0 if waiter was removed from fence's async waiter list. + * returns -ENOENT if waiter was not found on fence's async waiter list. + * + * Cancels a previously registered async wait. Will fail gracefully if + * @waiter was never registered or if @fence has already signaled @waiter. + */ +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_wait() - wait on fence + * @fence: fence to wait on + * @tiemout: timeout in ms + * + * Wait for @fence to be signaled or have an error. Waits indefinitely + * if @timeout < 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +#endif /* __KERNEL__ */ + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len: length of sync_pt_info including any driver_data + * @obj_name: name of parent sync_timeline + * @driver_name: name of driver implmenting the parent + * @status: status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns: timestamp of status change in nanoseconds + * @driver_data: any driver dependant data + */ +struct sync_pt_info { + __u32 len; + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u64 timestamp_ns; + + __u8 driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len: ioctl caller writes the size of the buffer its passing in. + * ioctl returns length of sync_fence_data reutnred to userspace + * including pt_info. + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @pt_info: a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { + __u32 len; + char name[32]; + __s32 status; + + __u8 pt_info[0]; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ + struct sync_fence_info_data) + +#endif /* _LINUX_SYNC_H */ diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h new file mode 100644 index 0000000000000..27b3b0bc41a94 --- /dev/null +++ b/include/linux/syscore_ops.h @@ -0,0 +1,29 @@ +/* + * syscore_ops.h - System core operations. + * + * Copyright (C) 2011 Rafael J. Wysocki , Novell Inc. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_SYSCORE_OPS_H +#define _LINUX_SYSCORE_OPS_H + +#include + +struct syscore_ops { + struct list_head node; + int (*suspend)(void); + void (*resume)(void); + void (*shutdown)(void); +}; + +extern void register_syscore_ops(struct syscore_ops *ops); +extern void unregister_syscore_ops(struct syscore_ops *ops); +#ifdef CONFIG_PM_SLEEP +extern int syscore_suspend(void); +extern void syscore_resume(void); +#endif +extern void syscore_shutdown(void); + +#endif diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h index 1154c29f41018..6555679b415bc 100644 --- a/include/linux/sysdev.h +++ b/include/linux/sysdev.h @@ -33,11 +33,6 @@ struct sysdev_class { const char *name; struct list_head drivers; struct sysdev_class_attribute **attrs; - - /* Default operations for these types of devices */ - int (*shutdown)(struct sys_device *); - int (*suspend)(struct sys_device *, pm_message_t state); - int (*resume)(struct sys_device *); struct kset kset; }; @@ -76,9 +71,6 @@ struct sysdev_driver { struct list_head entry; int (*add)(struct sys_device *); int (*remove)(struct sys_device *); - int (*shutdown)(struct sys_device *); - int (*suspend)(struct sys_device *, pm_message_t state); - int (*resume)(struct sys_device *); }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e64f4c67d0ef7..053b24bd899f7 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -378,6 +378,10 @@ struct tcp_sock { u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_used; u32 snd_cwnd_stamp; + u32 prior_cwnd; /* Congestion window at start of Recovery. */ + u32 prr_delivered; /* Number of newly delivered packets to + * receiver in Recovery. */ + u32 prr_out; /* Total number of pkts sent during Recovery. */ u32 rcv_wnd; /* Current receiver window */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ diff --git a/include/linux/time.h b/include/linux/time.h index 1e6d3b59238d3..a9c186e6f95cf 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -125,8 +125,10 @@ extern int timekeeping_suspended; unsigned long get_seconds(void); struct timespec current_kernel_time(void); struct timespec __current_kernel_time(void); /* does not take xtime_lock */ -struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */ struct timespec get_monotonic_coarse(void); +void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, + struct timespec *wtom, struct timespec *sleep); +void timekeeping_inject_sleeptime(struct timespec *delta); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) @@ -147,8 +149,9 @@ static inline u32 arch_gettimeoffset(void) { return 0; } #endif extern void do_gettimeofday(struct timeval *tv); -extern int do_settimeofday(struct timespec *tv); -extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); +extern int do_settimeofday(const struct timespec *tv); +extern int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz); #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct itimerval; @@ -162,12 +165,13 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); +extern void get_monotonic_boottime(struct timespec *ts); extern struct timespec timespec_trunc(struct timespec t, unsigned gran); extern int timekeeping_valid_for_hres(void); extern u64 timekeeping_max_deferment(void); -extern void update_wall_time(void); extern void timekeeping_leap_insert(int leapsecond); +extern int timekeeping_inject_offset(struct timespec *ts); struct tms; extern void do_sys_times(struct tms *); diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index d24aabaca4741..98b0a6c3c21cf 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -39,7 +39,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) static inline void timerqueue_init(struct timerqueue_node *node) { - RB_CLEAR_NODE(&node->node); + rb_init_node(&node->node); } static inline void timerqueue_init_head(struct timerqueue_head *head) diff --git a/include/linux/tpa2018d1.h b/include/linux/tpa2018d1.h new file mode 100644 index 0000000000000..26f608bca1e6c --- /dev/null +++ b/include/linux/tpa2018d1.h @@ -0,0 +1,36 @@ +/* include/linux/tpa2018d1.h - tpa2018d1 speaker amplifier driver + * + * Copyright (C) 2009 HTC Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_TPA2018D1_H +#define _LINUX_TPA2018D1_H + +#include + +enum tpa2018d1_mode { + TPA2018_MODE_OFF, + TPA2018_MODE_PLAYBACK, + TPA2018_MODE_RINGTONE, + TPA2018_MODE_VOICE_CALL, + TPA2018_NUM_MODES, +}; + +#define TPA2018_IOCTL_MAGIC 'a' +#define TPA2018_SET_CONFIG _IOW(TPA2018_IOCTL_MAGIC, 1, unsigned) +#define TPA2018_READ_CONFIG _IOR(TPA2018_IOCTL_MAGIC, 2, unsigned) +#define TPA2018_SET_PARAM _IOW(TPA2018_IOCTL_MAGIC, 3, unsigned) +#define TPA2018_SET_MODE _IOW(TPA2018_IOCTL_MAGIC, 4, unsigned) + +#endif + diff --git a/include/linux/tps65200.h b/include/linux/tps65200.h new file mode 100644 index 0000000000000..6048cecb4f930 --- /dev/null +++ b/include/linux/tps65200.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2007 HTC Incorporated + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _TPS65200_H_ +#define _TPS65200_H_ +#include +#include + +#ifdef CONFIG_TPS65200 +extern int tps_set_charger_ctrl(u32 ctl); +#else +static int tps_set_charger_ctrl(u32 ctl) {return 0 ; } +#endif +#endif diff --git a/include/linux/uhid.h b/include/linux/uhid.h new file mode 100644 index 0000000000000..9c6974f169661 --- /dev/null +++ b/include/linux/uhid.h @@ -0,0 +1,104 @@ +#ifndef __UHID_H_ +#define __UHID_H_ + +/* + * User-space I/O driver support for HID subsystem + * Copyright (c) 2012 David Herrmann + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* + * Public header for user-space communication. We try to keep every structure + * aligned but to be safe we also use __attribute__((__packed__)). Therefore, + * the communication should be ABI compatible even between architectures. + */ + +#include +#include + +enum uhid_event_type { + UHID_CREATE, + UHID_DESTROY, + UHID_START, + UHID_STOP, + UHID_OPEN, + UHID_CLOSE, + UHID_OUTPUT, + UHID_OUTPUT_EV, + UHID_INPUT, + UHID_FEATURE, + UHID_FEATURE_ANSWER, +}; + +struct uhid_create_req { + __u8 name[128]; + __u8 phys[64]; + __u8 uniq[64]; + __u8 __user *rd_data; + __u16 rd_size; + + __u16 bus; + __u32 vendor; + __u32 product; + __u32 version; + __u32 country; +} __attribute__((__packed__)); + +#define UHID_DATA_MAX 4096 + +enum uhid_report_type { + UHID_FEATURE_REPORT, + UHID_OUTPUT_REPORT, + UHID_INPUT_REPORT, +}; + +struct uhid_input_req { + __u8 data[UHID_DATA_MAX]; + __u16 size; +} __attribute__((__packed__)); + +struct uhid_output_req { + __u8 data[UHID_DATA_MAX]; + __u16 size; + __u8 rtype; +} __attribute__((__packed__)); + +struct uhid_output_ev_req { + __u16 type; + __u16 code; + __s32 value; +} __attribute__((__packed__)); + +struct uhid_feature_req { + __u32 id; + __u8 rnum; + __u8 rtype; +} __attribute__((__packed__)); + +struct uhid_feature_answer_req { + __u32 id; + __u16 err; + __u16 size; + __u8 data[UHID_DATA_MAX]; +}; + +struct uhid_event { + __u32 type; + + union { + struct uhid_create_req create; + struct uhid_input_req input; + struct uhid_output_req output; + struct uhid_output_ev_req output_ev; + struct uhid_feature_req feature; + struct uhid_feature_answer_req feature_answer; + } u; +} __attribute__((__packed__)); + +#endif /* __UHID_H_ */ diff --git a/include/linux/usb/android_composite.h b/include/linux/usb/android_composite.h index 62e72e3bd2b6e..7f9000711f4e3 100644 --- a/include/linux/usb/android_composite.h +++ b/include/linux/usb/android_composite.h @@ -27,7 +27,12 @@ struct android_usb_function { }; struct android_usb_product { - /* Default product ID. */ + /* Vendor ID for this set of functions. + * Default vendor_id in platform data will be used if this is zero. + */ + __u16 vendor_id; + + /* Product ID for this set of functions. */ __u16 product_id; /* List of function names associated with this product. diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index ab461948b579e..76d896cc842e9 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -585,6 +585,8 @@ struct usb_ss_ep_comp_descriptor { #define USB_DT_SS_EP_COMP_SIZE 6 /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ #define USB_SS_MAX_STREAMS(p) (1 << (p & 0x1f)) +/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ +#define USB_SS_MULT(p) (1 + ((p) & 0x3)) /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h new file mode 100644 index 0000000000000..ebcc5f34f554d --- /dev/null +++ b/include/linux/usb/f_accessory.h @@ -0,0 +1,81 @@ +/* + * Gadget Function Driver for Android USB accessories + * + * Copyright (C) 2011 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_F_ACCESSORY_H +#define __LINUX_USB_F_ACCESSORY_H + +/* Use Google Vendor ID when in accessory mode */ +#define USB_ACCESSORY_VENDOR_ID 0x18D1 + + +/* Product ID to use when in accessory mode */ +#define USB_ACCESSORY_PRODUCT_ID 0x2D00 + +/* Product ID to use when in accessory mode and adb is enabled */ +#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01 + +/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */ +#define ACCESSORY_STRING_MANUFACTURER 0 +#define ACCESSORY_STRING_MODEL 1 +#define ACCESSORY_STRING_DESCRIPTION 2 +#define ACCESSORY_STRING_VERSION 3 +#define ACCESSORY_STRING_URI 4 +#define ACCESSORY_STRING_SERIAL 5 + +/* Control request for retrieving device's protocol version (currently 1) + * + * requestType: USB_DIR_IN | USB_TYPE_VENDOR + * request: ACCESSORY_GET_PROTOCOL + * value: 0 + * index: 0 + * data version number (16 bits little endian) + */ +#define ACCESSORY_GET_PROTOCOL 51 + +/* Control request for host to send a string to the device + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_SEND_STRING + * value: 0 + * index: string ID + * data zero terminated UTF8 string + * + * The device can later retrieve these strings via the + * ACCESSORY_GET_STRING_* ioctls + */ +#define ACCESSORY_SEND_STRING 52 + +/* Control request for starting device in accessory mode. + * The host sends this after setting all its strings to the device. + * + * requestType: USB_DIR_OUT | USB_TYPE_VENDOR + * request: ACCESSORY_START + * value: 0 + * index: 0 + * data none + */ +#define ACCESSORY_START 53 + +/* ioctls for retrieving strings set by the host */ +#define ACCESSORY_GET_STRING_MANUFACTURER _IOW('M', 1, char[256]) +#define ACCESSORY_GET_STRING_MODEL _IOW('M', 2, char[256]) +#define ACCESSORY_GET_STRING_DESCRIPTION _IOW('M', 3, char[256]) +#define ACCESSORY_GET_STRING_VERSION _IOW('M', 4, char[256]) +#define ACCESSORY_GET_STRING_URI _IOW('M', 5, char[256]) +#define ACCESSORY_GET_STRING_SERIAL _IOW('M', 6, char[256]) + +#endif /* __LINUX_USB_F_ACCESSORY_H */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index a854fe89484e4..f21f5996fa61f 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -99,6 +99,8 @@ struct usb_hcd { #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ +#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ +#define HCD_FLAG_DEAD 6 /* controller has died? */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -108,6 +110,8 @@ struct usb_hcd { #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) +#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) +#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index c9049139a7a5a..45f3b9db42582 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -191,7 +191,8 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) * @id_table: pointer to a list of usb_device_id structures that define all * of the devices this structure can support. * @num_ports: the number of different ports this device will have. - * @bulk_in_size: bytes to allocate for bulk-in buffer (0 = end-point size) + * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer + * (0 = end-point size) * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) * @calc_num_ports: pointer to a function to determine how many ports this * device has dynamically. It will be called after the probe() diff --git a/include/linux/wait.h b/include/linux/wait.h index 3efc9f3f43a08..945ca7d83650f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -346,6 +346,75 @@ do { \ __ret; \ }) +/** + * wait_io_event_interruptible - sleep until an io condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ +#define wait_io_event_interruptible(wq, condition) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __wait_io_event_interruptible(wq, condition, __ret); \ + __ret; \ +}) + +#define __wait_io_event_interruptible_timeout(wq, condition, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = io_schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_io_event_interruptible_timeout - sleep until an io condition gets true or a timeout elapses + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or a signal is received. + * The @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. + */ + +#define wait_io_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_io_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + #define __wait_event_interruptible_exclusive(wq, condition, ret) \ do { \ DEFINE_WAIT(__wait); \ diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h index a096d24ada1db..c5fcd1e85618e 100755 --- a/include/linux/wakelock.h +++ b/include/linux/wakelock.h @@ -43,6 +43,7 @@ struct wake_lock { int count; int expire_count; int wakeup_count; + int wakeup_last_generation; ktime_t total_time; ktime_t prevent_suspend_time; ktime_t max_time; diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h index 3b1e2e054fd52..40ec3482d1efb 100644 --- a/include/linux/wlan_plat.h +++ b/include/linux/wlan_plat.h @@ -21,6 +21,7 @@ struct wifi_platform_data { int (*set_carddetect)(int val); void *(*mem_prealloc)(int section, unsigned long size); int (*get_mac_addr)(unsigned char *buf); + void *(*get_country_code)(char *ccode); }; #endif diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h new file mode 100644 index 0000000000000..db7172eda5f6b --- /dev/null +++ b/include/media/msm_camera.h @@ -0,0 +1,606 @@ +/* + * Copyright (C) 2008-2009 QUALCOMM Incorporated. + */ + +#ifndef __LINUX_MSM_CAMERA_H +#define __LINUX_MSM_CAMERA_H + +#include +#include +#include + +#define MSM_CAM_IOCTL_MAGIC 'm' + +#define MSM_CAM_IOCTL_GET_SENSOR_INFO \ + _IOR(MSM_CAM_IOCTL_MAGIC, 1, struct msm_camsensor_info *) + +#define MSM_CAM_IOCTL_REGISTER_PMEM \ + _IOW(MSM_CAM_IOCTL_MAGIC, 2, struct msm_pmem_info *) + +#define MSM_CAM_IOCTL_UNREGISTER_PMEM \ + _IOW(MSM_CAM_IOCTL_MAGIC, 3, unsigned) + +#define MSM_CAM_IOCTL_CTRL_COMMAND \ + _IOW(MSM_CAM_IOCTL_MAGIC, 4, struct msm_ctrl_cmd *) + +#define MSM_CAM_IOCTL_CONFIG_VFE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 5, struct msm_camera_vfe_cfg_cmd *) + +#define MSM_CAM_IOCTL_GET_STATS \ + _IOR(MSM_CAM_IOCTL_MAGIC, 6, struct msm_camera_stats_event_ctrl *) + +#define MSM_CAM_IOCTL_GETFRAME \ + _IOR(MSM_CAM_IOCTL_MAGIC, 7, struct msm_camera_get_frame *) + +#define MSM_CAM_IOCTL_ENABLE_VFE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 8, struct camera_enable_cmd *) + +#define MSM_CAM_IOCTL_CTRL_CMD_DONE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 9, struct camera_cmd *) + +#define MSM_CAM_IOCTL_CONFIG_CMD \ + _IOW(MSM_CAM_IOCTL_MAGIC, 10, struct camera_cmd *) + +#define MSM_CAM_IOCTL_DISABLE_VFE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 11, struct camera_enable_cmd *) + +#define MSM_CAM_IOCTL_PAD_REG_RESET2 \ + _IOW(MSM_CAM_IOCTL_MAGIC, 12, struct camera_enable_cmd *) + +#define MSM_CAM_IOCTL_VFE_APPS_RESET \ + _IOW(MSM_CAM_IOCTL_MAGIC, 13, struct camera_enable_cmd *) + +#define MSM_CAM_IOCTL_RELEASE_FRAME_BUFFER \ + _IOW(MSM_CAM_IOCTL_MAGIC, 14, struct camera_enable_cmd *) + +#define MSM_CAM_IOCTL_RELEASE_STATS_BUFFER \ + _IOW(MSM_CAM_IOCTL_MAGIC, 15, struct msm_stats_buf *) + +#define MSM_CAM_IOCTL_AXI_CONFIG \ + _IOW(MSM_CAM_IOCTL_MAGIC, 16, struct msm_camera_vfe_cfg_cmd *) + +#define MSM_CAM_IOCTL_GET_PICTURE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 17, struct msm_camera_ctrl_cmd *) + +#define MSM_CAM_IOCTL_SET_CROP \ + _IOW(MSM_CAM_IOCTL_MAGIC, 18, struct crop_info *) + +#define MSM_CAM_IOCTL_PP \ + _IOW(MSM_CAM_IOCTL_MAGIC, 19, uint8_t *) + +#define MSM_CAM_IOCTL_PP_DONE \ + _IOW(MSM_CAM_IOCTL_MAGIC, 20, struct msm_snapshot_pp_status *) + +#define MSM_CAM_IOCTL_SENSOR_IO_CFG \ + _IOW(MSM_CAM_IOCTL_MAGIC, 21, struct sensor_cfg_data *) + +#define MSM_CAMERA_LED_OFF 0 +#define MSM_CAMERA_LED_LOW 1 +#define MSM_CAMERA_LED_HIGH 2 +#define MSM_CAMERA_LED_LOW_FOR_SNAPSHOT 3 +#define MSM_CAMERA_LED_DEATH_RAY 4 + +#define MSM_CAM_IOCTL_FLASH_LED_CFG \ + _IOW(MSM_CAM_IOCTL_MAGIC, 22, unsigned *) + +#define MSM_CAM_IOCTL_UNBLOCK_POLL_FRAME \ + _IO(MSM_CAM_IOCTL_MAGIC, 23) + +#define MSM_CAM_IOCTL_CTRL_COMMAND_2 \ + _IOW(MSM_CAM_IOCTL_MAGIC, 24, struct msm_ctrl_cmd *) + +#define MSM_CAM_IOCTL_ENABLE_OUTPUT_IND \ + _IOW(MSM_CAM_IOCTL_MAGIC, 25, uint32_t *) + +#define MAX_SENSOR_NUM 3 +#define MAX_SENSOR_NAME 32 + +#define PP_SNAP 1 +#define PP_RAW_SNAP (1<<1) +#define PP_PREV (1<<2) +#define PP_MASK (PP_SNAP|PP_RAW_SNAP|PP_PREV) + +#define MSM_CAM_CTRL_CMD_DONE 0 +#define MSM_CAM_SENSOR_VFE_CMD 1 + +/***************************************************** + * structure + *****************************************************/ + +/* define five type of structures for userspace <==> kernel + * space communication: + * command 1 - 2 are from userspace ==> kernel + * command 3 - 4 are from kernel ==> userspace + * + * 1. control command: control command(from control thread), + * control status (from config thread); + */ +struct msm_ctrl_cmd { + uint16_t type; + uint16_t length; + void *value; + uint16_t status; + uint32_t timeout_ms; + int resp_fd; /* FIXME: to be used by the kernel, pass-through for now */ +}; + +struct msm_vfe_evt_msg { + unsigned short type; /* 1 == event (RPC), 0 == message (adsp) */ + unsigned short msg_id; + unsigned int len; /* size in, number of bytes out */ + void *data; + unsigned short exttype; +}; + +#define MSM_CAM_RESP_CTRL 0 +#define MSM_CAM_RESP_STAT_EVT_MSG 1 +#define MSM_CAM_RESP_V4L2 2 +#define MSM_CAM_RESP_MAX 3 + +/* this one is used to send ctrl/status up to config thread */ +struct msm_stats_event_ctrl { + /* 0 - ctrl_cmd from control thread, + * 1 - stats/event kernel, + * 2 - V4L control or read request */ + int resptype; + int timeout_ms; + struct msm_ctrl_cmd ctrl_cmd; + /* struct vfe_event_t stats_event; */ + struct msm_vfe_evt_msg stats_event; +}; + +/* 2. config command: config command(from config thread); */ +struct msm_camera_cfg_cmd { + /* what to config: + * 1 - sensor config, 2 - vfe config */ + uint16_t cfg_type; + + /* sensor config type */ + uint16_t cmd_type; + uint16_t queue; + uint16_t length; + void *value; +}; + +#ifdef CONFIG_720P_CAMERA +#define CMD_GENERAL 0 +#define CMD_AXI_CFG_SNAP 1 +#define CMD_AXI_CFG_PREVIEW 2 +#define CMD_AXI_CFG_VIDEO 3 +#define CMD_RAW_PICT_AXI_CFG 4 + +#define CMD_FRAME_BUF_RELEASE 5 +#define CMD_PREV_BUF_CFG 6 +#define CMD_SNAP_BUF_RELEASE 7 +#define CMD_SNAP_BUF_CFG 8 +#define CMD_STATS_DISABLE 9 +#define CMD_STATS_AEC_AWB_ENABLE 10 +#define CMD_STATS_AF_ENABLE 11 +#define CMD_STATS_AEC_ENABLE 12 +#define CMD_STATS_AWB_ENABLE 13 +#define CMD_STATS_ENABLE 14 + +#define CMD_STATS_AXI_CFG 15 +#define CMD_STATS_AEC_AXI_CFG 16 +#define CMD_STATS_AF_AXI_CFG 17 +#define CMD_STATS_AWB_AXI_CFG 18 +#define CMD_STATS_RS_AXI_CFG 19 +#define CMD_STATS_CS_AXI_CFG 20 +#define CMD_STATS_IHIST_AXI_CFG 21 +#define CMD_STATS_SKIN_AXI_CFG 22 + +#define CMD_STATS_BUF_RELEASE 23 +#define CMD_STATS_AEC_BUF_RELEASE 24 +#define CMD_STATS_AF_BUF_RELEASE 25 +#define CMD_STATS_AWB_BUF_RELEASE 26 +#define CMD_STATS_RS_BUF_RELEASE 27 +#define CMD_STATS_CS_BUF_RELEASE 28 +#define CMD_STATS_IHIST_BUF_RELEASE 29 +#define CMD_STATS_SKIN_BUF_RELEASE 30 + +#define UPDATE_STATS_INVALID 31 +#else + +//Just for build pass (Horng test) +//------------------------------------ +#define CMD_AXI_CFG_SNAP 1 +#define CMD_AXI_CFG_PREVIEW 2 +#define CMD_AXI_CFG_VIDEO 3 +//------------------------------------ + +#define CMD_GENERAL 0 +#define CMD_AXI_CFG_OUT1 1 +#define CMD_AXI_CFG_SNAP_O1_AND_O2 2 +#define CMD_AXI_CFG_OUT2 3 +#define CMD_PICT_T_AXI_CFG 4 +#define CMD_PICT_M_AXI_CFG 5 +#define CMD_RAW_PICT_AXI_CFG 6 +#define CMD_STATS_AXI_CFG 7 +#define CMD_STATS_AF_AXI_CFG 8 +#define CMD_FRAME_BUF_RELEASE 9 +#define CMD_PREV_BUF_CFG 10 +#define CMD_SNAP_BUF_RELEASE 11 +#define CMD_SNAP_BUF_CFG 12 +#define CMD_STATS_DISABLE 13 +#define CMD_STATS_AEC_AWB_ENABLE 14 +#define CMD_STATS_AF_ENABLE 15 +#define CMD_STATS_BUF_RELEASE 16 +#define CMD_STATS_AF_BUF_RELEASE 17 +#define CMD_STATS_ENABLE 18 +#define UPDATE_STATS_INVALID 19 + +#endif + +/* vfe config command: config command(from config thread)*/ +struct msm_vfe_cfg_cmd { + int cmd_type; + uint16_t length; + void *value; +}; + +#define MAX_CAMERA_ENABLE_NAME_LEN 32 +struct camera_enable_cmd { + char name[MAX_CAMERA_ENABLE_NAME_LEN]; +}; + +#ifdef CONFIG_720P_CAMERA + +#define MSM_PMEM_VIDEO 0 +#define MSM_PMEM_PREVIEW 1 +#define MSM_PMEM_THUMBNAIL 2 +#define MSM_PMEM_MAINIMG 3 +#define MSM_PMEM_RAW_MAINIMG 4 +#define MSM_PMEM_AEC_AWB 5 +#define MSM_PMEM_AF 6 +#define MSM_PMEM_AEC 7 +#define MSM_PMEM_AWB 8 +#define MSM_PMEM_RS 9 +#define MSM_PMEM_CS 10 +#define MSM_PMEM_IHIST 11 +#define MSM_PMEM_SKIN 12 +#define MSM_PMEM_MAX 13 + +#else + +//Just for build pass (Horng test) +//------------------------------------ +#define MSM_PMEM_VIDEO 0 +#define MSM_PMEM_PREVIEW 1 +//------------------------------------ + +#define MSM_PMEM_OUTPUT1 0 +#define MSM_PMEM_OUTPUT2 1 +#define MSM_PMEM_OUTPUT1_OUTPUT2 2 +#define MSM_PMEM_THUMBNAIL 3 +#define MSM_PMEM_MAINIMG 4 +#define MSM_PMEM_RAW_MAINIMG 5 +#define MSM_PMEM_AEC_AWB 6 +#define MSM_PMEM_AF 7 +#define MSM_PMEM_MAX 8 + +#endif + +#define FRAME_PREVIEW_OUTPUT1 0 +#define FRAME_PREVIEW_OUTPUT2 1 +#define FRAME_SNAPSHOT 2 +#define FRAME_THUMBAIL 3 +#define FRAME_RAW_SNAPSHOT 4 +#define FRAME_MAX 5 + +struct msm_pmem_info { + int type; + int fd; + void *vaddr; + uint32_t offset; + uint32_t len; + uint32_t y_off; /* relative to offset */ + uint32_t cbcr_off; /* relative to offset */ + uint8_t vfe_can_write; +}; + +struct outputCfg { + uint32_t height; + uint32_t width; + + uint32_t window_height_firstline; + uint32_t window_height_lastline; +}; + +#ifndef CONFIG_720P_CAMERA + +#define OUTPUT_1 0 +#define OUTPUT_2 1 +#define OUTPUT_1_AND_2 2 +#define CAMIF_TO_AXI_VIA_OUTPUT_2 3 +#define OUTPUT_1_AND_CAMIF_TO_AXI_VIA_OUTPUT_2 4 +#define OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 5 +#define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 6 + +#define MSM_FRAME_PREV_1 0 +#define MSM_FRAME_PREV_2 1 +#define MSM_FRAME_ENC 2 + +#else + +#define OUTPUT_1 0 +#define OUTPUT_2 1 +#define OUTPUT_1_AND_2 2 +#define OUTPUT_1_AND_3 3 +#define CAMIF_TO_AXI_VIA_OUTPUT_2 4 +#define OUTPUT_1_AND_CAMIF_TO_AXI_VIA_OUTPUT_2 5 +#define OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 6 +#define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_2_AND_CAMIF_TO_AXI_VIA_OUTPUT_1 7 + +#define MSM_FRAME_PREV_1 0 +#define MSM_FRAME_PREV_2 1 +#define MSM_FRAME_ENC 2 + +#define OUTPUT_TYPE_P 1 +#define OUTPUT_TYPE_T 2 +#define OUTPUT_TYPE_S 3 +#define OUTPUT_TYPE_V 4 + +#endif + +struct msm_frame { + int path; + unsigned long buffer; + uint32_t y_off; + uint32_t cbcr_off; + int fd; + + void *cropinfo; + int croplen; +}; + +#define STAT_AEAW 0 +#define STAT_AF 1 +#define STAT_MAX 2 + +struct msm_stats_buf { + int type; + unsigned long buffer; + int fd; +}; + +#define MSM_V4L2_VID_CAP_TYPE 0 +#define MSM_V4L2_STREAM_ON 1 +#define MSM_V4L2_STREAM_OFF 2 +#define MSM_V4L2_SNAPSHOT 3 +#define MSM_V4L2_QUERY_CTRL 4 +#define MSM_V4L2_GET_CTRL 5 +#define MSM_V4L2_SET_CTRL 6 +#define MSM_V4L2_QUERY 7 +#define MSM_V4L2_GET_CROP 8 +#define MSM_V4L2_SET_CROP 9 +#define MSM_V4L2_MAX 10 + +#define V4L2_CAMERA_EXIT 43 +struct crop_info { + void *info; + int len; +}; + +struct msm_postproc { + int ftnum; + struct msm_frame fthumnail; + int fmnum; + struct msm_frame fmain; +}; + +struct msm_snapshot_pp_status { + void *status; +}; + +#define CFG_SET_MODE 0 +#define CFG_SET_EFFECT 1 +#define CFG_START 2 +#define CFG_PWR_UP 3 +#define CFG_PWR_DOWN 4 +#define CFG_WRITE_EXPOSURE_GAIN 5 +#define CFG_SET_DEFAULT_FOCUS 6 +#define CFG_MOVE_FOCUS 7 +#define CFG_REGISTER_TO_REAL_GAIN 8 +#define CFG_REAL_TO_REGISTER_GAIN 9 +#define CFG_SET_FPS 10 +#define CFG_SET_PICT_FPS 11 +#define CFG_SET_BRIGHTNESS 12 +#define CFG_SET_CONTRAST 13 +#define CFG_SET_ZOOM 14 +#define CFG_SET_EXPOSURE_MODE 15 +#define CFG_SET_WB 16 +#define CFG_SET_ANTIBANDING 17 +#define CFG_SET_EXP_GAIN 18 +#define CFG_SET_PICT_EXP_GAIN 19 +#define CFG_SET_LENS_SHADING 20 +#define CFG_GET_PICT_FPS 21 +#define CFG_GET_PREV_L_PF 22 +#define CFG_GET_PREV_P_PL 23 +#define CFG_GET_PICT_L_PF 24 +#define CFG_GET_PICT_P_PL 25 +#define CFG_GET_AF_MAX_STEPS 26 +#define CFG_GET_PICT_MAX_EXP_LC 27 +#define CFG_I2C_IOCTL_R_OTP 28 +#define CFG_SET_OV_LSC 29 /*vincent for LSC calibration*/ +#define CFG_SET_SHARPNESS 30 +#define CFG_SET_SATURATION 31 +#define CFG_SET_OV_LSC_RAW_CAPTURE 32/*20100330 vincent for LSC calibration*/ +#define CFG_SET_ISO 33 +#define CFG_SET_COORDINATE 34 +#define CFG_RUN_AUTO_FOCUS 35 +#define CFG_CANCEL_AUTO_FOCUS 36 +#define CFG_GET_EXP_FOR_LED 37 +#define CFG_UPDATE_AEC_FOR_LED 38 +#define CFG_SET_FRONT_CAMERA_MODE 39 +#define CFG_SET_QCT_LSC_RAW_CAPTURE 40 /* 20101011 QCT mesh LSC Calibration */ +#define CFG_MAX 41 + +#define MOVE_NEAR 0 +#define MOVE_FAR 1 + +#define SENSOR_PREVIEW_MODE 0 +#define SENSOR_SNAPSHOT_MODE 1 +#define SENSOR_RAW_SNAPSHOT_MODE 2 +#define SENSOR_GET_EXP 3 + +#define SENSOR_QTR_SIZE 0 +#define SENSOR_FULL_SIZE 1 +#define SENSOR_INVALID_SIZE 2 + +#define CAMERA_EFFECT_OFF 0 +#define CAMERA_EFFECT_MONO 1 +#define CAMERA_EFFECT_NEGATIVE 2 +#define CAMERA_EFFECT_SOLARIZE 3 +#define CAMERA_EFFECT_PASTEL 4 +#define CAMERA_EFFECT_MOSAIC 5 +#define CAMERA_EFFECT_RESIZE 6 +#define CAMERA_EFFECT_SEPIA 7 +#define CAMERA_EFFECT_POSTERIZE 8 +#define CAMERA_EFFECT_WHITEBOARD 9 +#define CAMERA_EFFECT_BLACKBOARD 10 +#define CAMERA_EFFECT_AQUA 11 +#define CAMERA_EFFECT_MAX 12 + +struct sensor_pict_fps { + uint16_t prevfps; + uint16_t pictfps; +}; + +struct exp_gain_cfg { + uint16_t gain; + uint32_t line; + uint16_t mul; +}; + +struct focus_cfg { + int32_t steps; + int dir; + int coarse_delay; + int fine_delay; + int step_dir; + int init_code_offset_max; +}; + +struct fps_cfg { + uint16_t f_mult; + uint16_t fps_div; + uint32_t pict_fps_div; +}; + +/*Becker for AWB calibration*/ +struct fuse_id{ + uint32_t fuse_id_word1; + uint32_t fuse_id_word2; + uint32_t fuse_id_word3; + uint32_t fuse_id_word4; +}; + +/*Vincent for LSC calibration*/ +struct reg_addr_val_pair_struct { + uint16_t reg_addr; + uint8_t reg_val; +}; + +struct lsc_cfg{ + struct reg_addr_val_pair_struct lsc_table[144]; /*OV LSC table*/ +}; + +enum antibanding_mode{ + CAMERA_ANTI_BANDING_50HZ, + CAMERA_ANTI_BANDING_60HZ, + CAMERA_ANTI_BANDING_AUTO, +}; + +enum brightness_t{ + CAMERA_BRIGHTNESS_N3, + CAMERA_BRIGHTNESS_N2, + CAMERA_BRIGHTNESS_N1, + CAMERA_BRIGHTNESS_D, + CAMERA_BRIGHTNESS_P1, + CAMERA_BRIGHTNESS_P2, + CAMERA_BRIGHTNESS_P3, + CAMERA_BRIGHTNESS_P4, + CAMERA_BRIGHTNESS_N4, +}; + +enum frontcam_t{ + CAMERA_MIRROR, + CAMERA_REVERSE, +}; + +enum wb_mode{ + CAMERA_AWB_AUTO,/*auto*/ + CAMERA_AWB_CLOUDY,/*Cloudy*/ + CAMERA_AWB_INDOOR_HOME,/*Fluorescent*/ + CAMERA_AWB_INDOOR_OFFICE,/*Incandescent*/ + CAMERA_AWB_SUNNY,/*daylight*/ +}; + +enum sharpness_mode{ + CAMERA_SHARPNESS_X0, + CAMERA_SHARPNESS_X1, + CAMERA_SHARPNESS_X2, + CAMERA_SHARPNESS_X3, + CAMERA_SHARPNESS_X4, +}; + +enum saturation_mode{ + CAMERA_SATURATION_X0, + CAMERA_SATURATION_X05, + CAMERA_SATURATION_X1, + CAMERA_SATURATION_X15, + CAMERA_SATURATION_X2, +}; + +enum contrast_mode{ + CAMERA_CONTRAST_P2, + CAMERA_CONTRAST_P1, + CAMERA_CONTRAST_D, + CAMERA_CONTRAST_N1, + CAMERA_CONTRAST_N2, +}; + +struct sensor_cfg_data { + int cfgtype; + int mode; + int rs; + uint8_t max_steps; + + union { + int8_t effect; + uint8_t lens_shading; + uint16_t prevl_pf; + uint16_t prevp_pl; + uint16_t pictl_pf; + uint16_t pictp_pl; + uint32_t pict_max_exp_lc; + uint16_t p_fps; + struct sensor_pict_fps gfps; + struct exp_gain_cfg exp_gain; + struct focus_cfg focus; + struct fps_cfg fps; + struct fuse_id fuse; + struct lsc_cfg lsctable;/*Vincent for LSC calibration*/ + enum antibanding_mode antibanding_value; + enum brightness_t brightness_value; + enum frontcam_t frontcam_value; + enum wb_mode wb_value; + enum sharpness_mode sharpness_value; + enum saturation_mode saturation_value; + enum contrast_mode contrast_value; + } cfg; +}; + +#define GET_NAME 0 +#define GET_PREVIEW_LINE_PER_FRAME 1 +#define GET_PREVIEW_PIXELS_PER_LINE 2 +#define GET_SNAPSHOT_LINE_PER_FRAME 3 +#define GET_SNAPSHOT_PIXELS_PER_LINE 4 +#define GET_SNAPSHOT_FPS 5 +#define GET_SNAPSHOT_MAX_EP_LINE_CNT 6 + +struct msm_camsensor_info { + char name[MAX_SENSOR_NAME]; + uint8_t flash_enabled; +}; +#endif /* __LINUX_MSM_CAMERA_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 339b2ea173ddf..dc74690112e5e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -76,6 +76,8 @@ enum { HCI_INQUIRY, HCI_RAW, + + HCI_RESET, }; /* HCI ioctl defines */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1322695beb529..981c92f031bdf 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -413,7 +413,7 @@ struct station_parameters { * @STATION_INFO_PLID: @plid filled * @STATION_INFO_PLINK_STATE: @plink_state filled * @STATION_INFO_SIGNAL: @signal filled - * @STATION_INFO_TX_BITRATE: @tx_bitrate fields are filled + * @STATION_INFO_TX_BITRATE: @txrate fields are filled * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) * @STATION_INFO_RX_PACKETS: @rx_packets filled * @STATION_INFO_TX_PACKETS: @tx_packets filled @@ -421,6 +421,10 @@ struct station_parameters { * @STATION_INFO_TX_FAILED: @tx_failed filled * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled * @STATION_INFO_SIGNAL_AVG: @signal_avg filled + * @STATION_INFO_RX_BITRATE: @rxrate fields are filled + * @STATION_INFO_BSS_PARAM: @bss_param filled + * @STATION_INFO_CONNECTED_TIME: @connected_time filled + * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -437,6 +441,10 @@ enum station_info_flags { STATION_INFO_TX_FAILED = 1<<11, STATION_INFO_RX_DROP_MISC = 1<<12, STATION_INFO_SIGNAL_AVG = 1<<13, + STATION_INFO_RX_BITRATE = 1<<14, + STATION_INFO_BSS_PARAM = 1<<15, + STATION_INFO_CONNECTED_TIME = 1<<16, + STATION_INFO_ASSOC_REQ_IES = 1<<17 }; /** @@ -470,12 +478,44 @@ struct rate_info { u16 legacy; }; +/** + * enum station_info_rate_flags - bitrate info flags + * + * Used by the driver to indicate the specific rate transmission + * type for 802.11n transmissions. + * + * @BSS_PARAM_FLAGS_CTS_PROT: whether CTS protection is enabled + * @BSS_PARAM_FLAGS_SHORT_PREAMBLE: whether short preamble is enabled + * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled + */ +enum bss_param_flags { + BSS_PARAM_FLAGS_CTS_PROT = 1<<0, + BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1, + BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2, +}; + +/** + * struct sta_bss_parameters - BSS parameters for the attached station + * + * Information about the currently associated BSS + * + * @flags: bitflag of flags from &enum bss_param_flags + * @dtim_period: DTIM period for the BSS + * @beacon_interval: beacon interval + */ +struct sta_bss_parameters { + u8 flags; + u8 dtim_period; + u16 beacon_interval; +}; + /** * struct station_info - station information * * Station information filled by driver for get_station() and dump_station. * * @filled: bitflag of flags from &enum station_info_flags + * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds * @rx_bytes: bytes received from this station * @tx_bytes: bytes transmitted to this station @@ -494,9 +534,15 @@ struct rate_info { * This number should increase every time the list of stations * changes, i.e. when a station is added or removed, so that * userspace can tell whether it got a consistent snapshot. + * @assoc_req_ies: IEs from (Re)Association Request. + * This is used only when in AP mode with drivers that do not use + * user space MLME/SME implementation. The information is provided for + * the cfg80211_new_sta() calls to notify user space of the IEs. + * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. */ struct station_info { u32 filled; + u32 connected_time; u32 inactive_time; u32 rx_bytes; u32 tx_bytes; @@ -506,13 +552,18 @@ struct station_info { s8 signal; s8 signal_avg; struct rate_info txrate; + struct rate_info rxrate; u32 rx_packets; u32 tx_packets; u32 tx_retries; u32 tx_failed; u32 rx_dropped_misc; + struct sta_bss_parameters bss_param; int generation; + + const u8 *assoc_req_ies; + size_t assoc_req_ies_len; }; /** @@ -2653,6 +2704,15 @@ void cfg80211_remain_on_channel_expired(struct net_device *dev, void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); +/** + * cfg80211_del_sta - notify userspace about deletion of a station + * + * @dev: the netdev + * @mac_addr: the station's address + * @gfp: allocation flags + */ +void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp); + /** * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @dev: network device diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 85867dcde3352..bfd36ff14c991 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -461,7 +461,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) } static inline int scsi_device_enclosure(struct scsi_device *sdev) { - return sdev->inquiry[6] & (1<<6); + return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; } static inline int scsi_device_protection(struct scsi_device *sdev) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index e731f8d719347..ec2678131e112 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1030,9 +1030,7 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif -int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, - struct vm_area_struct *area); -#define snd_pcm_lib_mmap_vmalloc snd_pcm_lib_mmap_noncached +#define snd_pcm_lib_mmap_vmalloc NULL static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) { diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 8031769ac4858..60f94fbd3a10d 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -45,25 +45,25 @@ /* platform domain */ #define SND_SOC_DAPM_INPUT(wname) \ { .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_OUTPUT(wname) \ { .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_MIC(wname, wevent) \ { .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} #define SND_SOC_DAPM_HP(wname, wevent) \ { .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_SPK(wname, wevent) \ { .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_LINE(wname, wevent) \ { .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} /* path domain */ @@ -177,11 +177,11 @@ /* events that are pre and post DAPM */ #define SND_SOC_DAPM_PRE(wname, wevent) \ { .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_POST(wname, wevent) \ { .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD} /* stream domain */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 0828b6c8610ae..95c9d9810dc2f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -98,6 +98,7 @@ enum transport_state_table { TRANSPORT_REMOVE = 14, TRANSPORT_FREE = 15, TRANSPORT_NEW_CMD_MAP = 16, + TRANSPORT_FREE_CMD_INTR = 17, }; /* Used for struct se_cmd->se_cmd_flags */ diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 2e8ec51f06155..379ae61912699 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -170,6 +170,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); +extern void transport_generic_free_cmd_intr(struct se_cmd *); extern void __transport_stop_task_timer(struct se_task *, unsigned long *); extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h new file mode 100644 index 0000000000000..951e6ca12da81 --- /dev/null +++ b/include/trace/events/cpufreq_interactive.h @@ -0,0 +1,112 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cpufreq_interactive + +#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CPUFREQ_INTERACTIVE_H + +#include + +DECLARE_EVENT_CLASS(set, + TP_PROTO(u32 cpu_id, unsigned long targfreq, + unsigned long actualfreq), + TP_ARGS(cpu_id, targfreq, actualfreq), + + TP_STRUCT__entry( + __field( u32, cpu_id ) + __field(unsigned long, targfreq ) + __field(unsigned long, actualfreq ) + ), + + TP_fast_assign( + __entry->cpu_id = (u32) cpu_id; + __entry->targfreq = targfreq; + __entry->actualfreq = actualfreq; + ), + + TP_printk("cpu=%u targ=%lu actual=%lu", + __entry->cpu_id, __entry->targfreq, + __entry->actualfreq) +); + +DEFINE_EVENT(set, cpufreq_interactive_setspeed, + TP_PROTO(u32 cpu_id, unsigned long targfreq, + unsigned long actualfreq), + TP_ARGS(cpu_id, targfreq, actualfreq) +); + +DECLARE_EVENT_CLASS(loadeval, + TP_PROTO(unsigned long cpu_id, unsigned long load, + unsigned long curtarg, unsigned long curactual, + unsigned long newtarg), + TP_ARGS(cpu_id, load, curtarg, curactual, newtarg), + + TP_STRUCT__entry( + __field(unsigned long, cpu_id ) + __field(unsigned long, load ) + __field(unsigned long, curtarg ) + __field(unsigned long, curactual ) + __field(unsigned long, newtarg ) + ), + + TP_fast_assign( + __entry->cpu_id = cpu_id; + __entry->load = load; + __entry->curtarg = curtarg; + __entry->curactual = curactual; + __entry->newtarg = newtarg; + ), + + TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu", + __entry->cpu_id, __entry->load, __entry->curtarg, + __entry->curactual, __entry->newtarg) +); + +DEFINE_EVENT(loadeval, cpufreq_interactive_target, + TP_PROTO(unsigned long cpu_id, unsigned long load, + unsigned long curtarg, unsigned long curactual, + unsigned long newtarg), + TP_ARGS(cpu_id, load, curtarg, curactual, newtarg) +); + +DEFINE_EVENT(loadeval, cpufreq_interactive_already, + TP_PROTO(unsigned long cpu_id, unsigned long load, + unsigned long curtarg, unsigned long curactual, + unsigned long newtarg), + TP_ARGS(cpu_id, load, curtarg, curactual, newtarg) +); + +DEFINE_EVENT(loadeval, cpufreq_interactive_notyet, + TP_PROTO(unsigned long cpu_id, unsigned long load, + unsigned long curtarg, unsigned long curactual, + unsigned long newtarg), + TP_ARGS(cpu_id, load, curtarg, curactual, newtarg) +); + +TRACE_EVENT(cpufreq_interactive_boost, + TP_PROTO(const char *s), + TP_ARGS(s), + TP_STRUCT__entry( + __string(s, s) + ), + TP_fast_assign( + __assign_str(s, s); + ), + TP_printk("%s", __get_str(s)) +); + +TRACE_EVENT(cpufreq_interactive_unboost, + TP_PROTO(const char *s), + TP_ARGS(s), + TP_STRUCT__entry( + __string(s, s) + ), + TP_fast_assign( + __assign_str(s, s); + ), + TP_printk("%s", __get_str(s)) +); + +#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/sync.h b/include/trace/events/sync.h new file mode 100755 index 0000000000000..f31bc63ca65d3 --- /dev/null +++ b/include/trace/events/sync.h @@ -0,0 +1,82 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sync + +#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SYNC_H + +#include +#include + +TRACE_EVENT(sync_timeline, + TP_PROTO(struct sync_timeline *timeline), + + TP_ARGS(timeline), + + TP_STRUCT__entry( + __string(name, timeline->name) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(name, timeline->name); + if (timeline->ops->timeline_value_str) { + timeline->ops->timeline_value_str(timeline, + __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(name), __entry->value) +); + +TRACE_EVENT(sync_wait, + TP_PROTO(struct sync_fence *fence, int begin), + + TP_ARGS(fence, begin), + + TP_STRUCT__entry( + __string(name, fence->name) + __field(s32, status) + __field(u32, begin) + ), + + TP_fast_assign( + __assign_str(name, fence->name); + __entry->status = fence->status; + __entry->begin = begin; + ), + + TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", + __get_str(name), __entry->status) +); + +TRACE_EVENT(sync_pt, + TP_PROTO(struct sync_pt *pt), + + TP_ARGS(pt), + + TP_STRUCT__entry( + __string(timeline, pt->parent->name) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(timeline, pt->parent->name); + if (pt->parent->ops->pt_value_str) { + pt->parent->ops->pt_value_str(pt, + __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) + ); + +#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include diff --git a/init/Kconfig b/init/Kconfig index ce8838396fdf2..c1cbe97c33c66 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -30,6 +30,19 @@ config IRQ_WORK menu "General setup" +config SCHED_BFS + bool "BFS cpu scheduler" + ---help--- + The Brain Fuck CPU Scheduler for excellent interactivity and + responsiveness on the desktop and solid scalability on normal + hardware. Not recommended for 4096 CPUs. + + Currently incompatible with the Group CPU scheduler, and RCU TORTURE + TEST so these options are disabled. + + Say Y here. + default y + config EXPERIMENTAL bool "Prompt for development and/or incomplete code/drivers" ---help--- @@ -619,6 +632,7 @@ config PROC_PID_CPUSET config CGROUP_CPUACCT bool "Simple CPU accounting cgroup subsystem" + depends on !SCHED_BFS help Provides a simple Resource Controller for monitoring the total CPU consumed by the tasks in a cgroup. @@ -685,7 +699,7 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED menuconfig CGROUP_SCHED bool "Group CPU scheduler" - depends on EXPERIMENTAL + depends on EXPERIMENTAL && !SCHED_BFS default n help This feature lets CPU scheduler recognize task groups and control CPU @@ -799,6 +813,7 @@ endif # NAMESPACES config SCHED_AUTOGROUP bool "Automatic process group scheduling" + depends on !SCHED_BFS select EVENTFD select CGROUPS select CGROUP_SCHED @@ -1224,6 +1239,7 @@ config SLAB per cpu and per node queues. config SLUB + depends on BROKEN || NUMA || !DISCONTIGMEM bool "SLUB (Unqueued Allocator)" help SLUB is a slab allocator that minimizes cache line usage diff --git a/init/main.c b/init/main.c index 33c37c379e964..37a275cf99f15 100644 --- a/init/main.c +++ b/init/main.c @@ -668,8 +668,8 @@ asmlinkage void __init start_kernel(void) #endif page_cgroup_init(); enable_debug_pagealloc(); - kmemleak_init(); debug_objects_mem_init(); + kmemleak_init(); setup_per_cpu_pageset(); numa_policy_init(); if (late_time_init) @@ -832,6 +832,7 @@ static noinline int init_post(void) system_state = SYSTEM_RUNNING; numa_default_policy(); + print_scheduler_version(); current->signal->flags |= SIGNAL_UNKILLABLE; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d96bd1eb5627d..d83723e7ee05c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1820,10 +1820,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) /* Update the css_set linked lists if we're using them */ write_lock(&css_set_lock); - if (!list_empty(&tsk->cg_list)) { - list_del(&tsk->cg_list); - list_add(&tsk->cg_list, &newcg->tasks); - } + if (!list_empty(&tsk->cg_list)) + list_move(&tsk->cg_list, &newcg->tasks); write_unlock(&css_set_lock); for_each_subsys(root, ss) { @@ -3670,12 +3668,12 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) spin_lock(&release_list_lock); set_bit(CGRP_REMOVED, &cgrp->flags); if (!list_empty(&cgrp->release_list)) - list_del(&cgrp->release_list); + list_del_init(&cgrp->release_list); spin_unlock(&release_list_lock); cgroup_lock_hierarchy(cgrp->root); /* delete this cgroup from parent->children */ - list_del(&cgrp->sibling); + list_del_init(&cgrp->sibling); cgroup_unlock_hierarchy(cgrp->root); d = dget(cgrp->dentry); @@ -3893,7 +3891,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) subsys[ss->subsys_id] = NULL; /* remove subsystem from rootnode's list of subsystems */ - list_del(&ss->sibling); + list_del_init(&ss->sibling); /* * disentangle the css from all css_sets attached to the dummytop. as diff --git a/kernel/cpu.c b/kernel/cpu.c index 156cc55561408..559457492dc04 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -595,3 +595,23 @@ void init_cpu_online(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_online_bits), src); } + +static ATOMIC_NOTIFIER_HEAD(idle_notifier); + +void idle_notifier_register(struct notifier_block *n) +{ + atomic_notifier_chain_register(&idle_notifier, n); +} +EXPORT_SYMBOL_GPL(idle_notifier_register); + +void idle_notifier_unregister(struct notifier_block *n) +{ + atomic_notifier_chain_unregister(&idle_notifier, n); +} +EXPORT_SYMBOL_GPL(idle_notifier_unregister); + +void idle_notifier_call_chain(unsigned long val) +{ + atomic_notifier_call_chain(&idle_notifier, val, NULL); +} +EXPORT_SYMBOL_GPL(idle_notifier_call_chain); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e210b3d30085f..6c41a147c473c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1382,9 +1382,10 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, int ret; struct cpuset *cs = cgroup_cs(cont); - if ((current != task) && (!capable(CAP_SYS_ADMIN))) { + if ((current != tsk) && (!capable(CAP_SYS_ADMIN))) { const struct cred *cred = current_cred(), *tcred; + tcred = __task_cred(tsk); if (cred->euid != tcred->uid && cred->euid != tcred->suid) return -EPERM; } diff --git a/kernel/delayacct.c b/kernel/delayacct.c index ead9b610aa71a..44cc3d77e6498 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -128,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) */ t1 = tsk->sched_info.pcount; t2 = tsk->sched_info.run_delay; - t3 = tsk->se.sum_exec_runtime; + t3 = tsk_seruntime(tsk); d->cpu_count += t1; diff --git a/kernel/exit.c b/kernel/exit.c index f9a45ebcc7b17..9a3c75f226a4a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -132,7 +132,7 @@ static void __exit_signal(struct task_struct *tsk) sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); - sig->sum_sched_runtime += tsk->se.sum_exec_runtime; + sig->sum_sched_runtime += tsk_seruntime(tsk); } sig->nr_threads--; @@ -1015,7 +1015,7 @@ NORET_TYPE void do_exit(long code) /* * FIXME: do that only when needed, using sched_exit tracepoint */ - flush_ptrace_hw_breakpoint(tsk); + ptrace_put_breakpoints(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA diff --git a/kernel/futex.c b/kernel/futex.c index b766d28accd6b..902fc39067a29 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -235,6 +235,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) struct mm_struct *mm = current->mm; struct page *page, *page_head; int err; + struct vm_area_struct *vma; /* * The futex address must be "naturally" aligned. @@ -260,6 +261,37 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) return 0; } + /* + * The futex is hashed differently depending on whether + * it's in a shared or private mapping. So check vma first. + */ + vma = find_extend_vma(mm, address); + if (unlikely(!vma)) + return -EFAULT; + + /* + * Permissions. + */ + if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) + return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; + + /* + * Private mappings are handled in a simple way. + * + * NOTE: When userspace waits on a MAP_SHARED mapping, even if + * it's a read-only handle, it's expected that futexes attach to + * the object not the particular process. Therefore we use + * VM_MAYSHARE here, not VM_SHARED which is restricted to shared + * mappings of _writable_ handles. + */ + if (likely(!(vma->vm_flags & VM_MAYSHARE))) { + key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ + key->private.mm = mm; + key->private.address = address; + get_futex_key_refs(key); + return 0; + } + again: err = get_user_pages_fast(address, 1, 1, &page); if (err < 0) @@ -381,15 +413,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, return NULL; } -static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) +static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, + u32 uval, u32 newval) { - u32 curval; + int ret; pagefault_disable(); - curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); + ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); pagefault_enable(); - return curval; + return ret; } static int get_futex_value_locked(u32 *dest, u32 __user *from) @@ -674,7 +707,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct task_struct *task, int set_waiters) { int lock_taken, ret, ownerdied = 0; - u32 uval, newval, curval; + u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: ret = lock_taken = 0; @@ -684,19 +717,17 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ - newval = task_pid_vnr(task); + newval = vpid; if (set_waiters) newval |= FUTEX_WAITERS; - curval = cmpxchg_futex_value_locked(uaddr, 0, newval); - - if (unlikely(curval == -EFAULT)) + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) return -EFAULT; /* * Detect deadlocks. */ - if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) + if ((unlikely((curval & FUTEX_TID_MASK) == vpid))) return -EDEADLK; /* @@ -723,14 +754,12 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ - newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); + newval = (curval & ~FUTEX_TID_MASK) | vpid; ownerdied = 0; lock_taken = 1; } - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (unlikely(curval == -EFAULT)) + if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; if (unlikely(curval != uval)) goto retry; @@ -843,9 +872,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; @@ -880,10 +907,8 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ - oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); - - if (oldval == -EFAULT) - return oldval; + if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) + return -EFAULT; if (oldval != uval) return -EAGAIN; @@ -1578,9 +1603,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; - curval = cmpxchg_futex_value_locked(uaddr, uval, newval); - - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) goto handle_fault; if (curval == uval) break; @@ -1886,7 +1909,7 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; - restart->futex.flags = flags; + restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; ret = -ERESTART_RESTARTBLOCK; @@ -2046,9 +2069,9 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; - u32 uval; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; + u32 uval, vpid = task_pid_vnr(current); int ret; retry: @@ -2057,7 +2080,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) /* * We release only a lock we actually own: */ - if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) + if ((uval & FUTEX_TID_MASK) != vpid) return -EPERM; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); @@ -2072,17 +2095,14 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) * again. If it succeeds then we can return without waking * anyone else up: */ - if (!(uval & FUTEX_OWNER_DIED)) - uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); - - - if (unlikely(uval == -EFAULT)) + if (!(uval & FUTEX_OWNER_DIED) && + cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ - if (unlikely(uval == task_pid_vnr(current))) + if (unlikely(uval == vpid)) goto out_unlock; /* @@ -2463,9 +2483,7 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; - nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); - - if (nval == -EFAULT) + if (futex_atomic_cmpxchg_inatomic(&nval, uaddr, uval, mval)) return -1; if (nval != uval) @@ -2678,8 +2696,7 @@ static int __init futex_init(void) * implementation, the non-functional ones will return * -ENOSYS. */ - curval = cmpxchg_futex_value_locked(NULL, 0, 0); - if (curval == -EFAULT) + if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 0c8d7c0486154..5afa04ce856b9 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -84,14 +84,9 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) { ktime_t xtim, tomono; - struct timespec xts, tom; - unsigned long seq; + struct timespec xts, tom, slp; - do { - seq = read_seqbegin(&xtime_lock); - xts = __current_kernel_time(); - tom = __get_wall_to_monotonic(); - } while (read_seqretry(&xtime_lock, seq)); + get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); xtim = timespec_to_ktime(xts); tomono = timespec_to_ktime(tom); @@ -611,16 +606,13 @@ static int hrtimer_reprogram(struct hrtimer *timer, static void retrigger_next_event(void *arg) { struct hrtimer_cpu_base *base; - struct timespec realtime_offset, wtm; - unsigned long seq; + struct timespec realtime_offset, wtm, sleep; if (!hrtimer_hres_active()) return; - do { - seq = read_seqbegin(&xtime_lock); - wtm = __get_wall_to_monotonic(); - } while (read_seqretry(&xtime_lock, seq)); + get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm, + &sleep); set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); base = &__get_cpu_var(hrtimer_bases); diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 1df62ef4713bf..bc56498e7b90d 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -50,7 +50,7 @@ void resume_device_irqs(void) struct irq_desc *desc; int irq; - for_each_irq_desc(irq, desc) { + for_each_irq_desc_reverse(irq, desc) { unsigned long flags; if (!(desc->status & IRQ_SUSPENDED)) diff --git a/kernel/kexec.c b/kernel/kexec.c index ec19b92c7ebda..1914fd5daf236 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1529,8 +1530,7 @@ int kernel_kexec(void) if (error) goto Enable_cpus; local_irq_disable(); - /* Suspend system devices */ - error = sysdev_suspend(PMSG_FREEZE); + error = syscore_suspend(); if (error) goto Enable_irqs; } else @@ -1545,7 +1545,7 @@ int kernel_kexec(void) #ifdef CONFIG_KEXEC_JUMP if (kexec_image->preserve_context) { - sysdev_resume(); + syscore_resume(); Enable_irqs: local_irq_enable(); Enable_cpus: diff --git a/kernel/kthread.c b/kernel/kthread.c index c55afba990a38..426f7b896af4f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -184,7 +184,9 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) } p->cpus_allowed = cpumask_of_cpu(cpu); +#ifndef CONFIG_SCHED_BFS p->rt.nr_cpus_allowed = 1; +#endif p->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 656222fcf767e..b2536bd2b6b53 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -62,7 +62,8 @@ static struct srcu_struct pmus_srcu; */ int sysctl_perf_event_paranoid __read_mostly = 1; -int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ +/* Minimum for 512 kiB + 1 user control page */ +int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ /* * max perf event sample rate @@ -4567,7 +4568,7 @@ static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { if (event->hw.state & PERF_HES_STOPPED) - return 0; + return 1; if (regs) { if (event->attr.exclude_user && user_mode(regs)) @@ -4923,6 +4924,8 @@ static int perf_tp_event_match(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { + if (event->hw.state & PERF_HES_STOPPED) + return 0; /* * All tracepoints are from kernel-space. */ @@ -5914,6 +5917,11 @@ SYSCALL_DEFINE5(perf_event_open, goto err_alloc; } + if (task) { + put_task_struct(task); + task = NULL; + } + /* * Look up the group leader (we will attach this event to it): */ @@ -6113,17 +6121,20 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - struct perf_event *parent_event; + if (child_event->parent) { + raw_spin_lock_irq(&child_ctx->lock); + perf_group_detach(child_event); + raw_spin_unlock_irq(&child_ctx->lock); + } perf_event_remove_from_context(child_event); - parent_event = child_event->parent; /* - * It can happen that parent exits first, and has events + * It can happen that the parent exits first, and has events * that are still around due to the child reference. These - * events need to be zapped - but otherwise linger. + * events need to be zapped. */ - if (parent_event) { + if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); } diff --git a/kernel/pid.c b/kernel/pid.c index 39b65b69584f5..6aeebc20d34b2 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) return -1; } -int next_pidmap(struct pid_namespace *pid_ns, int last) +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; + if (last >= PID_MAX_LIMIT) + return -1; + offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index aeaa7f8468216..6a8fad82a3ad4 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c @@ -53,11 +53,17 @@ enum pm_qos_type { PM_QOS_MIN /* return the smallest value */ }; +/* + * Note: The lockless read path depends on the CPU accessing + * target_value atomically. Atomic access is only guaranteed on all CPU + * types linux supports for 32 bit quantites + */ struct pm_qos_object { struct plist_head requests; struct blocking_notifier_head *notifiers; struct miscdevice pm_qos_power_miscdev; char *name; + s32 target_value; /* Do not change to 64 bit */ s32 default_value; enum pm_qos_type type; }; @@ -70,7 +76,8 @@ static struct pm_qos_object cpu_dma_pm_qos = { .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), .notifiers = &cpu_dma_lat_notifier, .name = "cpu_dma_latency", - .default_value = 2000 * USEC_PER_SEC, + .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, + .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, }; @@ -79,7 +86,8 @@ static struct pm_qos_object network_lat_pm_qos = { .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), .notifiers = &network_lat_notifier, .name = "network_latency", - .default_value = 2000 * USEC_PER_SEC, + .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, + .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN }; @@ -89,7 +97,8 @@ static struct pm_qos_object network_throughput_pm_qos = { .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), .notifiers = &network_throughput_notifier, .name = "network_throughput", - .default_value = 0, + .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, + .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .type = PM_QOS_MAX, }; @@ -132,6 +141,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) } } +static inline s32 pm_qos_read_value(struct pm_qos_object *o) +{ + return o->target_value; +} + +static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value) +{ + o->target_value = value; +} + static void update_target(struct pm_qos_object *o, struct plist_node *node, int del, int value) { @@ -156,6 +175,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node, plist_add(node, &o->requests); } curr_value = pm_qos_get_value(o); + pm_qos_set_value(o, curr_value); spin_unlock_irqrestore(&pm_qos_lock, flags); if (prev_value != curr_value) @@ -190,18 +210,11 @@ static int find_pm_qos_object_by_minor(int minor) * pm_qos_request - returns current system wide qos expectation * @pm_qos_class: identification of which qos value is requested * - * This function returns the current target value in an atomic manner. + * This function returns the current target value. */ int pm_qos_request(int pm_qos_class) { - unsigned long flags; - int value; - - spin_lock_irqsave(&pm_qos_lock, flags); - value = pm_qos_get_value(pm_qos_array[pm_qos_class]); - spin_unlock_irqrestore(&pm_qos_lock, flags); - - return value; + return pm_qos_read_value(pm_qos_array[pm_qos_class]); } EXPORT_SYMBOL_GPL(pm_qos_request); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 05bb7173850e0..a61830b1af48c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -248,7 +248,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) do { times->utime = cputime_add(times->utime, t->utime); times->stime = cputime_add(times->stime, t->stime); - times->sum_exec_runtime += t->se.sum_exec_runtime; + times->sum_exec_runtime += tsk_seruntime(t); } while_each_thread(tsk, t); out: rcu_read_unlock(); @@ -508,7 +508,7 @@ static void cleanup_timers(struct list_head *head, void posix_cpu_timers_exit(struct task_struct *tsk) { cleanup_timers(tsk->cpu_timers, - tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); + tsk->utime, tsk->stime, tsk_seruntime(tsk)); } void posix_cpu_timers_exit_group(struct task_struct *tsk) @@ -518,7 +518,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) cleanup_timers(tsk->signal->cpu_timers, cputime_add(tsk->utime, sig->utime), cputime_add(tsk->stime, sig->stime), - tsk->se.sum_exec_runtime + sig->sum_sched_runtime); + tsk_seruntime(tsk) + sig->sum_sched_runtime); } static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) @@ -949,7 +949,7 @@ static void check_thread_timers(struct task_struct *tsk, struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { + if (!--maxfire || tsk_seruntime(tsk) < t->expires.sched) { tsk->cputime_expires.sched_exp = t->expires.sched; break; } @@ -966,7 +966,7 @@ static void check_thread_timers(struct task_struct *tsk, ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); if (hard != RLIM_INFINITY && - tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { /* * At the hard limit, we just die. * No need to calculate anything else now. @@ -974,7 +974,7 @@ static void check_thread_timers(struct task_struct *tsk, __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } - if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { /* * At the soft limit, send a SIGXCPU every second. */ @@ -1276,7 +1276,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) struct task_cputime task_sample = { .utime = tsk->utime, .stime = tsk->stime, - .sum_exec_runtime = tsk->se.sum_exec_runtime + .sum_exec_runtime = tsk_seruntime(tsk) }; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 93bd2eb2bc53e..21b7ca205f382 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -192,7 +192,7 @@ static int common_clock_get(clockid_t which_clock, struct timespec *tp) } static inline int common_clock_set(const clockid_t which_clock, - struct timespec *tp) + const struct timespec *tp) { return do_sys_settimeofday(tp, NULL); } @@ -928,7 +928,7 @@ void exit_itimers(struct signal_struct *sig) } /* Not available / possible... functions */ -int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp) +int do_posix_clock_nosettime(const clockid_t clockid, const struct timespec *tp) { return -EINVAL; } diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index d4d3d180abb44..871e458e1ffc8 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -325,3 +325,10 @@ config PM_OPP representing individual voltage domains and provides SOC implementations a ready to use framework to manage OPPs. For more information, read + +config SUSPEND_TIME + bool "Log time spent in suspend" + ---help--- + Prints the time spent in suspend in the kernel log, and + keeps statistics on the time spent in suspend in + /sys/kernel/debug/suspend_time diff --git a/kernel/power/Makefile b/kernel/power/Makefile index d128ccea88648..bd4e091c411b8 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -12,5 +12,6 @@ obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o +obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c index 84bed51dcdce7..1c26be4e9015b 100644 --- a/kernel/power/earlysuspend.c +++ b/kernel/power/earlysuspend.c @@ -38,11 +38,22 @@ static DECLARE_WORK(early_suspend_work, early_suspend); static DECLARE_WORK(late_resume_work, late_resume); static DEFINE_SPINLOCK(state_lock); enum { + SUSPENDED_ON = 0x0, SUSPEND_REQUESTED = 0x1, SUSPENDED = 0x2, SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, }; static int state; +#ifdef CONFIG_HTC_ONMODE_CHARGING +static LIST_HEAD(onchg_suspend_handlers); +static void onchg_suspend(struct work_struct *work); +static void onchg_resume(struct work_struct *work); + +static DECLARE_WORK(onchg_suspend_work, onchg_suspend); +static DECLARE_WORK(onchg_resume_work, onchg_resume); + +static int state_onchg; +#endif void register_early_suspend(struct early_suspend *handler) { @@ -76,10 +87,15 @@ static void early_suspend(struct work_struct *work) unsigned long irqflags; int abort = 0; + pr_info("[R] early_suspend start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); - if (state == SUSPEND_REQUESTED) + if (state == SUSPEND_REQUESTED) { state |= SUSPENDED; +#ifdef CONFIG_HTC_ONMODE_CHARGING + state_onchg = SUSPEND_REQUESTED_AND_SUSPENDED; +#endif + } else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); @@ -102,12 +118,14 @@ static void early_suspend(struct work_struct *work) if (debug_mask & DEBUG_SUSPEND) pr_info("early_suspend: sync\n"); + pr_info("[R] early_suspend: sync\n"); sys_sync(); abort: spin_lock_irqsave(&state_lock, irqflags); if (state == SUSPEND_REQUESTED_AND_SUSPENDED) wake_unlock(&main_wake_lock); spin_unlock_irqrestore(&state_lock, irqflags); + pr_info("[R] early_suspend end\n"); } static void late_resume(struct work_struct *work) @@ -116,10 +134,15 @@ static void late_resume(struct work_struct *work) unsigned long irqflags; int abort = 0; + pr_info("[R] late_resume start\n"); mutex_lock(&early_suspend_lock); spin_lock_irqsave(&state_lock, irqflags); - if (state == SUSPENDED) + if (state == SUSPENDED) { state &= ~SUSPENDED; +#ifdef CONFIG_HTC_ONMODE_CHARGING + state_onchg &= ~SUSPEND_REQUESTED_AND_SUSPENDED; +#endif + } else abort = 1; spin_unlock_irqrestore(&state_lock, irqflags); @@ -138,7 +161,143 @@ static void late_resume(struct work_struct *work) pr_info("late_resume: done\n"); abort: mutex_unlock(&early_suspend_lock); + pr_info("[R] late_resume end\n"); +} + +#ifdef CONFIG_HTC_ONMODE_CHARGING +void register_onchg_suspend(struct early_suspend *handler) +{ + struct list_head *pos; + mutex_lock(&early_suspend_lock); + list_for_each(pos, &onchg_suspend_handlers) { + struct early_suspend *e; + e = list_entry(pos, struct early_suspend, link); + if (e->level > handler->level) + break; + } + list_add_tail(&handler->link, pos); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(register_onchg_suspend); + +void unregister_onchg_suspend(struct early_suspend *handler) +{ + mutex_lock(&early_suspend_lock); + list_del(&handler->link); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(unregister_onchg_suspend); + + +static void onchg_suspend(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + pr_info("[R] onchg_suspend start\n"); + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPEND_REQUESTED_AND_SUSPENDED && + state_onchg == SUSPEND_REQUESTED) + state_onchg |= SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("onchg_suspend: abort, state %d, state_onchg: %d\n", state, state_onchg); + mutex_unlock(&early_suspend_lock); + goto abort; + } + + if (debug_mask & DEBUG_SUSPEND) + pr_info("onchg_suspend: call handlers\n"); + + list_for_each_entry(pos, &onchg_suspend_handlers, link) { + if (pos->suspend != NULL) + pos->suspend(pos); + } + mutex_unlock(&early_suspend_lock); + +abort: + pr_info("[R] onchg_suspend end\n"); +} + +static void onchg_resume(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + pr_info("[R] onchg_resume start\n"); + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if ( state == SUSPEND_REQUESTED_AND_SUSPENDED && + state_onchg == SUSPENDED) + state_onchg &= ~SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("onchg_resume: abort, state %d, state_onchg: %d\n", state, state_onchg); + goto abort; + } + if (debug_mask & DEBUG_SUSPEND) + pr_info("onchg_resume: call handlers\n"); + list_for_each_entry_reverse(pos, &onchg_suspend_handlers, link) + if (pos->resume != NULL) + pos->resume(pos); + if (debug_mask & DEBUG_SUSPEND) + pr_info("onchg_resume: done\n"); +abort: + mutex_unlock(&early_suspend_lock); + pr_info("[R] onchg_resume end\n"); +} + +void request_onchg_state(int on) +{ + unsigned long irqflags; + int old_sleep; + + spin_lock_irqsave(&state_lock, irqflags); + if (debug_mask & DEBUG_USER_STATE) { + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info("request_onchg_state: %s (%d.%d)->%d at %lld " + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + on == 1 ? "on" : "off", + state, + !(state_onchg & SUSPEND_REQUESTED), + on, + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + } + if (state == SUSPEND_REQUESTED_AND_SUSPENDED) { + old_sleep = state_onchg & SUSPEND_REQUESTED; + if (!old_sleep && on == 0) { + state_onchg |= SUSPEND_REQUESTED; + queue_work(suspend_work_queue, &onchg_suspend_work); + } + else if (old_sleep && on ==1) { + state_onchg &= ~SUSPEND_REQUESTED; + queue_work(suspend_work_queue, &onchg_resume_work); + } + } + spin_unlock_irqrestore(&state_lock, irqflags); +} + +int get_onchg_state(void) +{ + return state_onchg; } +#endif void request_suspend_state(suspend_state_t new_state) { diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c index 15137650149c2..83104fe471038 100644 --- a/kernel/power/fbearlysuspend.c +++ b/kernel/power/fbearlysuspend.c @@ -47,7 +47,7 @@ static void stop_drawing_early_suspend(struct early_suspend *h) } /* tell userspace to start drawing */ -static void start_drawing_late_resume(struct early_suspend *h) +void start_drawing_late_resume(struct early_suspend *h) { unsigned long irq_flags; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 1832bd2642192..554d3b049f353 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -271,7 +272,7 @@ static int create_image(int platform_mode) local_irq_disable(); - error = sysdev_suspend(PMSG_FREEZE); + error = syscore_suspend(); if (error) { printk(KERN_ERR "PM: Some system devices failed to power down, " "aborting hibernation\n"); @@ -295,7 +296,7 @@ static int create_image(int platform_mode) } Power_up: - sysdev_resume(); + syscore_resume(); /* NOTE: dpm_resume_noirq() is just a resume() for devices * that suspended with irqs off ... no overall powerup. */ @@ -402,7 +403,7 @@ static int resume_target_kernel(bool platform_mode) local_irq_disable(); - error = sysdev_suspend(PMSG_QUIESCE); + error = syscore_suspend(); if (error) goto Enable_irqs; @@ -429,7 +430,7 @@ static int resume_target_kernel(bool platform_mode) restore_processor_state(); touch_softlockup_watchdog(); - sysdev_resume(); + syscore_resume(); Enable_irqs: local_irq_enable(); @@ -515,7 +516,7 @@ int hibernation_platform_enter(void) goto Platform_finish; local_irq_disable(); - sysdev_suspend(PMSG_HIBERNATE); + syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; goto Power_up; @@ -526,7 +527,7 @@ int hibernation_platform_enter(void) while (1); Power_up: - sysdev_resume(); + syscore_resume(); local_irq_enable(); enable_nonboot_cpus(); diff --git a/kernel/power/main.c b/kernel/power/main.c index bd70a6f21befb..50f7f4441bd88 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -3,7 +3,7 @@ * * Copyright (c) 2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab - * + * * This file is released under the GPLv2 * */ @@ -144,7 +144,7 @@ struct kobject *power_kobj; * 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and * 'disk' (Suspend-to-Disk). * - * store() accepts one of those strings, translates it into the + * store() accepts one of those strings, translates it into the * proper enumerated value, and initiates a suspend transition. */ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -316,6 +316,46 @@ power_attr(wake_lock); power_attr(wake_unlock); #endif +#ifdef CONFIG_HTC_ONMODE_CHARGING +static ssize_t state_onchg_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + char *s = buf; + if (get_onchg_state()) + s += sprintf(s, "chgoff "); + else + s += sprintf(s, "chgon "); + + if (s != buf) + /* convert the last space to a newline */ + *(s-1) = '\n'; + + return (s - buf); +} + +static ssize_t +state_onchg_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + char *p; + int len; + + p = memchr(buf, '\n', n); + len = p ? p - buf : n; + + if (len == 5 || len == 6 || len == 7) { + if (!strncmp(buf, "chgon", len)) + request_onchg_state(1); + else if (!strncmp(buf, "chgoff", len)) + request_onchg_state(0); + } + + return 0; +} + +power_attr(state_onchg); +#endif + static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE @@ -332,6 +372,9 @@ static struct attribute * g[] = { &wake_lock_attr.attr, &wake_unlock_attr.attr, #endif +#ifdef CONFIG_HTC_ONMODE_CHARGING + &state_onchg_attr.attr, +#endif #endif NULL, }; diff --git a/kernel/power/power.h b/kernel/power/power.h index 3237ad456e77d..c37fbfbe0b734 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -264,4 +264,8 @@ ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr, /* kernel/power/earlysuspend.c */ void request_suspend_state(suspend_state_t state); suspend_state_t get_suspend_state(void); +#ifdef CONFIG_HTC_ONMODE_CHARGING +void request_onchg_state(int on); +int get_onchg_state(void); +#endif #endif diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 7c4cd6a648419..067fae0f9b997 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "power.h" @@ -165,13 +166,13 @@ static int suspend_enter(suspend_state_t state) arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); - error = sysdev_suspend(PMSG_SUSPEND); + error = syscore_suspend(); if (!error) { if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { error = suspend_ops->enter(state); events_check_enabled = false; } - sysdev_resume(); + syscore_resume(); } arch_suspend_enable_irqs(); @@ -212,7 +213,6 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); - pm_restrict_gfp_mask(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -229,7 +229,6 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); - pm_restore_gfp_mask(); resume_console(); Close: if (suspend_ops->end) @@ -290,7 +289,9 @@ int enter_state(suspend_state_t state) goto Finish; pr_debug("PM: Entering %s sleep\n", pm_states[state]); + pm_restrict_gfp_mask(); error = suspend_devices_and_enter(state); + pm_restore_gfp_mask(); Finish: pr_debug("PM: Finishing wakeup.\n"); diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c new file mode 100644 index 0000000000000..d2a65da9f22c1 --- /dev/null +++ b/kernel/power/suspend_time.c @@ -0,0 +1,111 @@ +/* + * debugfs file to track time spent in suspend + * + * Copyright (c) 2011, Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct timespec suspend_time_before; +static unsigned int time_in_suspend_bins[32]; + +#ifdef CONFIG_DEBUG_FS +static int suspend_time_debug_show(struct seq_file *s, void *data) +{ + int bin; + seq_printf(s, "time (secs) count\n"); + seq_printf(s, "------------------\n"); + for (bin = 0; bin < 32; bin++) { + if (time_in_suspend_bins[bin] == 0) + continue; + seq_printf(s, "%4d - %4d %4u\n", + bin ? 1 << (bin - 1) : 0, 1 << bin, + time_in_suspend_bins[bin]); + } + return 0; +} + +static int suspend_time_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, suspend_time_debug_show, NULL); +} + +static const struct file_operations suspend_time_debug_fops = { + .open = suspend_time_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init suspend_time_debug_init(void) +{ + struct dentry *d; + + d = debugfs_create_file("suspend_time", 0755, NULL, NULL, + &suspend_time_debug_fops); + if (!d) { + pr_err("Failed to create suspend_time debug file\n"); + return -ENOMEM; + } + + return 0; +} + +late_initcall(suspend_time_debug_init); +#endif + +static int suspend_time_syscore_suspend(void) +{ + read_persistent_clock(&suspend_time_before); + + return 0; +} + +static void suspend_time_syscore_resume(void) +{ + struct timespec after; + + read_persistent_clock(&after); + + after = timespec_sub(after, suspend_time_before); + + time_in_suspend_bins[fls(after.tv_sec)]++; + + pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec, + after.tv_nsec / NSEC_PER_MSEC); +} + +static struct syscore_ops suspend_time_syscore_ops = { + .suspend = suspend_time_syscore_suspend, + .resume = suspend_time_syscore_resume, +}; + +static int suspend_time_syscore_init(void) +{ + register_syscore_ops(&suspend_time_syscore_ops); + + return 0; +} + +static void suspend_time_syscore_exit(void) +{ + unregister_syscore_ops(&suspend_time_syscore_ops); +} +module_init(suspend_time_syscore_init); +module_exit(suspend_time_syscore_exit); diff --git a/kernel/power/user.c b/kernel/power/user.c index c36c3b9e8a84f..7d02d33be699f 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp) free_basic_memory_bitmaps(); data = filp->private_data; free_all_swap_pages(data->swap); - if (data->frozen) + if (data->frozen) { + pm_restore_gfp_mask(); thaw_processes(); + } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); atomic_inc(&snapshot_device_available); @@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); + data->ready = 0; break; case SNAPSHOT_PLATFORM_SUPPORT: diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index ee9781c5adb28..b054e9cee3463 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -48,11 +48,20 @@ struct workqueue_struct *suspend_work_queue; struct wake_lock main_wake_lock; suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; static struct wake_lock unknown_wakeup; +static struct wake_lock suspend_backoff_lock; + +#define SUSPEND_BACKOFF_FAILURES 10 +#define SUSPEND_BACKOFF_INTERVAL 5000 + +static unsigned suspend_backoff_count; +static unsigned suspend_fail_count; #ifdef CONFIG_WAKELOCK_STAT +static bool resuming_devices; +static int suspend_generation; static struct wake_lock deleted_wake_locks; static ktime_t last_sleep_time_update; -static int wait_for_wakeup; + int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) { @@ -60,19 +69,15 @@ int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) struct timespec kt; struct timespec tomono; struct timespec delta; - unsigned long seq; + struct timespec sleep; long timeout; if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) return 0; - do { - seq = read_seqbegin(&xtime_lock); - timeout = lock->expires - jiffies; - if (timeout > 0) - return 0; - kt = current_kernel_time(); - tomono = __get_wall_to_monotonic(); - } while (read_seqretry(&xtime_lock, seq)); + get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep); + timeout = lock->expires - jiffies; + if (timeout > 0) + return 0; jiffies_to_timespec(-timeout, &delta); set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); @@ -259,10 +264,20 @@ long has_wake_lock(int type) return ret; } +static void suspend_backoff(void) +{ + pr_info("suspend: too many immediate wakeups, back off\n"); + ++suspend_backoff_count; + wake_lock_timeout(&suspend_backoff_lock, + msecs_to_jiffies(suspend_backoff_count * + SUSPEND_BACKOFF_INTERVAL)); +} + static void suspend(struct work_struct *work) { int ret; int entry_event_num; + struct timespec ts_entry, ts_exit; if (has_wake_lock(WAKE_LOCK_SUSPEND)) { if (debug_mask & DEBUG_SUSPEND) @@ -271,23 +286,63 @@ static void suspend(struct work_struct *work) } entry_event_num = current_event_num; + +#ifdef CONFIG_WAKELOCK_STAT + suspend_generation++; +#endif + sys_sync(); if (debug_mask & DEBUG_SUSPEND) pr_info("suspend: enter suspend\n"); + getnstimeofday(&ts_entry); ret = pm_suspend(requested_suspend_state); + getnstimeofday(&ts_exit); + +#ifdef CONFIG_WAKELOCK_STAT + if (debug_mask & DEBUG_WAKEUP) { + unsigned long irqflags; + struct wake_lock *lock; + int type; + + spin_lock_irqsave(&list_lock, irqflags); + pr_info("wakeup wake lock:"); + + for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { + list_for_each_entry(lock, &active_wake_locks[type], + link) + if (lock->stat.wakeup_last_generation == + suspend_generation) + pr_cont(" %s", lock->name); + } + pr_cont("\n"); + spin_unlock_irqrestore(&list_lock, irqflags); + } +#endif + if (debug_mask & DEBUG_EXIT_SUSPEND) { - struct timespec ts; struct rtc_time tm; - getnstimeofday(&ts); - rtc_time_to_tm(ts.tv_sec, &tm); + rtc_time_to_tm(ts_exit.tv_sec, &tm); pr_info("suspend: exit suspend, ret = %d " "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec); } + + if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) { + ++suspend_fail_count; + + if (suspend_fail_count == SUSPEND_BACKOFF_FAILURES) { + suspend_backoff(); + suspend_fail_count = 0; + } + } else { + suspend_fail_count = 0; + suspend_backoff_count = 0; + } + if (current_event_num == entry_event_num) { - if (debug_mask & DEBUG_SUSPEND) - pr_info("suspend: pm_suspend returned with no event\n"); + if (debug_mask & DEBUG_WAKEUP) + pr_info("suspend: no wakelock for wakeup source\n"); wake_lock_timeout(&unknown_wakeup, HZ / 2); } } @@ -314,16 +369,24 @@ static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0); static int power_suspend_late(struct device *dev) { int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; -#ifdef CONFIG_WAKELOCK_STAT - wait_for_wakeup = 1; -#endif + if (debug_mask & DEBUG_SUSPEND) pr_info("power_suspend_late return %d\n", ret); + + if (!ret) + resuming_devices = true; + return ret; } +static void wakelocks_resume_complete(struct device *dev) +{ + resuming_devices = false; +} + static struct dev_pm_ops power_driver_pm_ops = { .suspend_noirq = power_suspend_late, + .complete = wakelocks_resume_complete, }; static struct platform_driver power_driver = { @@ -348,6 +411,7 @@ void wake_lock_init(struct wake_lock *lock, int type, const char *name) lock->stat.count = 0; lock->stat.expire_count = 0; lock->stat.wakeup_count = 0; + lock->stat.wakeup_last_generation = 0; lock->stat.total_time = ktime_set(0, 0); lock->stat.prevent_suspend_time = ktime_set(0, 0); lock->stat.max_time = ktime_set(0, 0); @@ -401,11 +465,10 @@ static void wake_lock_internal( BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED)); #ifdef CONFIG_WAKELOCK_STAT - if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) { - if (debug_mask & DEBUG_WAKEUP) - pr_info("wakeup wake lock: %s\n", lock->name); - wait_for_wakeup = 0; + if (type == WAKE_LOCK_SUSPEND && resuming_devices && + lock->stat.wakeup_last_generation != suspend_generation) { lock->stat.wakeup_count++; + lock->stat.wakeup_last_generation = suspend_generation; } if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) && (long)(lock->expires - jiffies) <= 0) { @@ -551,6 +614,8 @@ static int __init wakelocks_init(void) wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); wake_lock(&main_wake_lock); wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); + wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND, + "suspend_backoff"); ret = platform_device_register(&power_device); if (ret) { @@ -580,6 +645,7 @@ static int __init wakelocks_init(void) err_platform_driver_register: platform_device_unregister(&power_device); err_platform_device_register: + wake_lock_destroy(&suspend_backoff_lock); wake_lock_destroy(&unknown_wakeup); wake_lock_destroy(&main_wake_lock); #ifdef CONFIG_WAKELOCK_STAT @@ -596,6 +662,7 @@ static void __exit wakelocks_exit(void) destroy_workqueue(suspend_work_queue); platform_driver_unregister(&power_driver); platform_device_unregister(&power_device); + wake_lock_destroy(&suspend_backoff_lock); wake_lock_destroy(&unknown_wakeup); wake_lock_destroy(&main_wake_lock); #ifdef CONFIG_WAKELOCK_STAT diff --git a/kernel/ptrace.c b/kernel/ptrace.c index e2302e40b3600..254ad5b525123 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -22,6 +22,7 @@ #include #include #include +#include /* @@ -876,3 +877,19 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, return ret; } #endif /* CONFIG_COMPAT */ + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +int ptrace_get_breakpoints(struct task_struct *tsk) +{ + if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt)) + return 0; + + return -1; +} + +void ptrace_put_breakpoints(struct task_struct *tsk) +{ + if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt)) + flush_ptrace_hw_breakpoint(tsk); +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 0c343b9a46d56..421abfd3641d2 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -40,10 +40,10 @@ static struct task_struct *rcu_kthread_task; static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); static unsigned long have_rcu_kthread_work; -static void invoke_rcu_kthread(void); /* Forward declarations for rcutiny_plugin.h. */ struct rcu_ctrlblk; +static void invoke_rcu_kthread(void); static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); static int rcu_kthread(void *arg); static void __call_rcu(struct rcu_head *head, @@ -79,26 +79,31 @@ void rcu_exit_nohz(void) #endif /* #ifdef CONFIG_NO_HZ */ /* - * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). - * Also disable irqs to avoid confusion due to interrupt handlers + * Helper function for rcu_sched_qs() and rcu_bh_qs(). + * Also irqs are disabled to avoid confusion due to interrupt handlers * invoking call_rcu(). */ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) { - unsigned long flags; - - local_irq_save(flags); if (rcp->rcucblist != NULL && rcp->donetail != rcp->curtail) { rcp->donetail = rcp->curtail; - local_irq_restore(flags); return 1; } - local_irq_restore(flags); return 0; } +/* + * Wake up rcu_kthread() to process callbacks now eligible for invocation + * or to boost readers. + */ +static void invoke_rcu_kthread(void) +{ + have_rcu_kthread_work = 1; + wake_up(&rcu_kthread_wq); +} + /* * Record an rcu quiescent state. And an rcu_bh quiescent state while we * are at it, given that any rcu quiescent state is also an rcu_bh @@ -106,9 +111,13 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) */ void rcu_sched_qs(int cpu) { + unsigned long flags; + + local_irq_save(flags); if (rcu_qsctr_help(&rcu_sched_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) invoke_rcu_kthread(); + local_irq_restore(flags); } /* @@ -116,8 +125,12 @@ void rcu_sched_qs(int cpu) */ void rcu_bh_qs(int cpu) { + unsigned long flags; + + local_irq_save(flags); if (rcu_qsctr_help(&rcu_bh_ctrlblk)) invoke_rcu_kthread(); + local_irq_restore(flags); } /* @@ -167,7 +180,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) prefetch(next); debug_rcu_head_unqueue(list); local_bh_disable(); - list->func(list); + __rcu_reclaim(list); local_bh_enable(); list = next; RCU_TRACE(cb_count++); @@ -207,20 +220,6 @@ static int rcu_kthread(void *arg) return 0; /* Not reached, but needed to shut gcc up. */ } -/* - * Wake up rcu_kthread() to process callbacks now eligible for invocation - * or to boost readers. - */ -static void invoke_rcu_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - have_rcu_kthread_work = 1; - wake_up(&rcu_kthread_wq); - local_irq_restore(flags); -} - /* * Wait for a grace period to elapse. But it is illegal to invoke * synchronize_sched() from within an RCU read-side critical section. diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dd4aea806f8ef..b3c1aede66d57 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1143,7 +1143,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) next = list->next; prefetch(next); debug_rcu_head_unqueue(list); - list->func(list); + __rcu_reclaim(list); list = next; if (++count >= rdp->blimit) break; diff --git a/kernel/sched.c b/kernel/sched.c index b294a1882ffca..71691e5b81bdc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1,3 +1,6 @@ +#ifdef CONFIG_SCHED_BFS +#include "sched_bfs.c" +#else /* * kernel/sched.c * @@ -4304,7 +4307,7 @@ void complete_all(struct completion *x) EXPORT_SYMBOL(complete_all); static inline long __sched -do_wait_for_common(struct completion *x, long timeout, int state) +do_wait_for_common(struct completion *x, long timeout, int state, int iowait) { if (!x->done) { DECLARE_WAITQUEUE(wait, current); @@ -4317,7 +4320,10 @@ do_wait_for_common(struct completion *x, long timeout, int state) } __set_current_state(state); spin_unlock_irq(&x->wait.lock); - timeout = schedule_timeout(timeout); + if (iowait) + timeout = io_schedule_timeout(timeout); + else + timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); __remove_wait_queue(&x->wait, &wait); @@ -4329,12 +4335,12 @@ do_wait_for_common(struct completion *x, long timeout, int state) } static long __sched -wait_for_common(struct completion *x, long timeout, int state) +wait_for_common(struct completion *x, long timeout, int state, int iowait) { might_sleep(); spin_lock_irq(&x->wait.lock); - timeout = do_wait_for_common(x, timeout, state); + timeout = do_wait_for_common(x, timeout, state, iowait); spin_unlock_irq(&x->wait.lock); return timeout; } @@ -4351,10 +4357,23 @@ wait_for_common(struct completion *x, long timeout, int state) */ void __sched wait_for_completion(struct completion *x) { - wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wait_for_completion); +/** + * wait_for_completion_io: - waits for completion of a task + * @x: holds the state of this particular completion + * + * This waits for completion of a specific task to be signaled. Treats any + * sleeping as waiting for IO for the purposes of process accounting. + */ +void __sched wait_for_completion_io(struct completion *x) +{ + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE, 1); +} +EXPORT_SYMBOL(wait_for_completion_io); + /** * wait_for_completion_timeout: - waits for completion of a task (w/timeout) * @x: holds the state of this particular completion @@ -4367,7 +4386,7 @@ EXPORT_SYMBOL(wait_for_completion); unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { - return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); + return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(wait_for_completion_timeout); @@ -4380,7 +4399,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout); */ int __sched wait_for_completion_interruptible(struct completion *x) { - long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + long t = + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, 0); if (t == -ERESTARTSYS) return t; return 0; @@ -4399,7 +4419,7 @@ long __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { - return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); + return wait_for_common(x, timeout, TASK_INTERRUPTIBLE, 0); } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); @@ -4412,7 +4432,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); */ int __sched wait_for_completion_killable(struct completion *x) { - long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE, 0); if (t == -ERESTARTSYS) return t; return 0; @@ -4432,7 +4452,7 @@ long __sched wait_for_completion_killable_timeout(struct completion *x, unsigned long timeout) { - return wait_for_common(x, timeout, TASK_KILLABLE); + return wait_for_common(x, timeout, TASK_KILLABLE, 0); } EXPORT_SYMBOL(wait_for_completion_killable_timeout); @@ -5573,7 +5593,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; - ftrace_graph_init_task(idle); + ftrace_graph_init_idle_task(idle, cpu); } /* @@ -9343,4 +9363,4 @@ struct cgroup_subsys cpuacct_subsys = { .subsys_id = cpuacct_subsys_id, }; #endif /* CONFIG_CGROUP_CPUACCT */ - +#endif /* CONFIG_SCHED_BFS */ diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 9fb6562831570..5946ac5156024 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c @@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr; static void __init autogroup_init(struct task_struct *init_task) { autogroup_default.tg = &root_task_group; - root_task_group.autogroup = &autogroup_default; kref_init(&autogroup_default.kref); init_rwsem(&autogroup_default.lock); init_task->signal->autogroup = &autogroup_default; @@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) static inline bool task_group_is_autogroup(struct task_group *tg) { - return tg != &root_task_group && tg->autogroup; + return !!tg->autogroup; } static inline struct task_group * @@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) p->signal->autogroup = autogroup_kref_get(ag); + if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) + goto out; + t = p; do { sched_move_task(t); } while_each_thread(p, t); +out: unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } @@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) { struct autogroup *ag = autogroup_task_get(p); + if (!task_group_is_autogroup(ag->tg)) + goto out; + down_read(&ag->lock); seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); up_read(&ag->lock); +out: autogroup_kref_put(ag); } #endif /* CONFIG_PROC_FS */ @@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) #ifdef CONFIG_SCHED_DEBUG static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) { - int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); - - if (!enabled || !tg->autogroup) + if (!task_group_is_autogroup(tg)) return 0; return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h index 7b859ffe5dadd..05577055cfcaa 100644 --- a/kernel/sched_autogroup.h +++ b/kernel/sched_autogroup.h @@ -1,6 +1,11 @@ #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup { + /* + * reference doesn't mean how many thread attach to this + * autogroup now. It just stands for the number of task + * could use this autogroup. + */ struct kref kref; struct task_group *tg; struct rw_semaphore lock; diff --git a/kernel/sched_bfs.c b/kernel/sched_bfs.c new file mode 100644 index 0000000000000..f3cb030a219d8 --- /dev/null +++ b/kernel/sched_bfs.c @@ -0,0 +1,7294 @@ +/* + * kernel/sched_bfs.c, was sched.c + * + * Kernel scheduler and related syscalls + * + * Copyright (C) 1991-2002 Linus Torvalds + * + * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and + * make semaphores SMP safe + * 1998-11-19 Implemented schedule_timeout() and related stuff + * by Andrea Arcangeli + * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: + * hybrid priority-list and round-robin design with + * an array-switch method of distributing timeslices + * and per-CPU runqueues. Cleanups and useful suggestions + * by Davide Libenzi, preemptible kernel bits by Robert Love. + * 2003-09-03 Interactivity tuning by Con Kolivas. + * 2004-04-02 Scheduler domains code by Nick Piggin + * 2007-04-15 Work begun on replacing all interactivity tuning with a + * fair scheduling design by Con Kolivas. + * 2007-05-05 Load balancing (smp-nice) and other improvements + * by Peter Williams + * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith + * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri + * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, + * Thomas Gleixner, Mike Kravetz + * now Brainfuck deadline scheduling policy by Con Kolivas deletes + * a whole lot of those previous things. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sched_cpupri.h" +#include "workqueue_sched.h" + +#define CREATE_TRACE_POINTS +#include + +#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) +#define rt_task(p) rt_prio((p)->prio) +#define rt_queue(rq) rt_prio((rq)->rq_prio) +#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) +#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \ + (policy) == SCHED_RR) +#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) +#define idleprio_task(p) unlikely((p)->policy == SCHED_IDLEPRIO) +#define iso_task(p) unlikely((p)->policy == SCHED_ISO) +#define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO) +#define ISO_PERIOD ((5 * HZ * grq.noc) + 1) + +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * and back. + */ +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) + +/* + * 'User priority' is the nice value converted to something we + * can work with better when scaling various scheduler parameters, + * it's a [ 0 ... 39 ] range. + */ +#define USER_PRIO(p) ((p) - MAX_RT_PRIO) +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) +#define SCHED_PRIO(p) ((p) + MAX_RT_PRIO) +#define STOP_PRIO (MAX_RT_PRIO - 1) + +/* + * Some helpers for converting to/from various scales. Use shifts to get + * approximate multiples of ten for less overhead. + */ +#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) +#define JIFFY_NS (1000000000 / HZ) +#define HALF_JIFFY_NS (1000000000 / HZ / 2) +#define HALF_JIFFY_US (1000000 / HZ / 2) +#define MS_TO_NS(TIME) ((TIME) << 20) +#define MS_TO_US(TIME) ((TIME) << 10) +#define NS_TO_MS(TIME) ((TIME) >> 20) +#define NS_TO_US(TIME) ((TIME) >> 10) + +#define RESCHED_US (100) /* Reschedule if less than this many μs left */ + +/* + * This is the time all tasks within the same priority round robin. + * Value is in ms and set to a minimum of 6ms. Scales with number of cpus. + * Tunable via /proc interface. + */ +int rr_interval __read_mostly = 6; + +/* + * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks + * are allowed to run five seconds as real time tasks. This is the total over + * all online cpus. + */ +int sched_iso_cpu __read_mostly = 70; + +/* + * The relative length of deadline for each priority(nice) level. + */ +static int prio_ratios[PRIO_RANGE] __read_mostly; + +/* + * The quota handed out to tasks of all priority levels when refilling their + * time_slice. + */ +static inline int timeslice(void) +{ + return MS_TO_US(rr_interval); +} + +/* + * The global runqueue data that all CPUs work off. Data is protected either + * by the global grq lock, or the discrete lock that precedes the data in this + * struct. + */ +struct global_rq { + raw_spinlock_t lock; + unsigned long nr_running; + unsigned long nr_uninterruptible; + unsigned long long nr_switches; + struct list_head queue[PRIO_LIMIT]; + DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1); +#ifdef CONFIG_SMP + unsigned long qnr; /* queued not running */ + cpumask_t cpu_idle_map; + int idle_cpus; +#endif + int noc; /* num_online_cpus stored and updated when it changes */ + u64 niffies; /* Nanosecond jiffies */ + unsigned long last_jiffy; /* Last jiffy we updated niffies */ + + raw_spinlock_t iso_lock; + int iso_ticks; + int iso_refractory; +}; + +/* There can be only one */ +static struct global_rq grq; + +/* + * This is the main, per-CPU runqueue data structure. + * This data should only be modified by the local cpu. + */ +struct rq { +#ifdef CONFIG_SMP +#ifdef CONFIG_NO_HZ + u64 nohz_stamp; + unsigned char in_nohz_recently; +#endif +#endif + + struct task_struct *curr, *idle, *stop; + struct mm_struct *prev_mm; + + /* Stored data about rq->curr to work outside grq lock */ + u64 rq_deadline; + unsigned int rq_policy; + int rq_time_slice; + u64 rq_last_ran; + int rq_prio; + int rq_running; /* There is a task running */ + + /* Accurate timekeeping data */ + u64 timekeep_clock; + unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc, + iowait_pc, idle_pc; + long account_pc; + atomic_t nr_iowait; + +#ifdef CONFIG_SMP + int cpu; /* cpu of this runqueue */ + int online; + int scaling; /* This CPU is managed by a scaling CPU freq governor */ + struct task_struct *sticky_task; + + struct root_domain *rd; + struct sched_domain *sd; + unsigned long *cpu_locality; /* CPU relative cache distance */ +#ifdef CONFIG_SCHED_SMT + int (*siblings_idle)(unsigned long cpu); + /* See if all smt siblings are idle */ + cpumask_t smt_siblings; +#endif +#ifdef CONFIG_SCHED_MC + int (*cache_idle)(unsigned long cpu); + /* See if all cache siblings are idle */ + cpumask_t cache_siblings; +#endif + u64 last_niffy; /* Last time this RQ updated grq.niffies */ +#endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif + u64 clock, old_clock, last_tick; + u64 clock_task; + int dither; + +#ifdef CONFIG_SCHEDSTATS + + /* latency stats */ + struct sched_info rq_sched_info; + unsigned long long rq_cpu_time; + /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ + + /* sys_sched_yield() stats */ + unsigned int yld_count; + + /* schedule() stats */ + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; + + /* try_to_wake_up() stats */ + unsigned int ttwu_count; + unsigned int ttwu_local; + + /* BKL stats */ + unsigned int bkl_count; +#endif +}; + +static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; +static DEFINE_MUTEX(sched_hotcpu_mutex); + +#ifdef CONFIG_SMP +/* + * sched_domains_mutex serializes calls to arch_init_sched_domains, + * detach_destroy_domains and partition_sched_domains. + */ +static DEFINE_MUTEX(sched_domains_mutex); + +/* + * By default the system creates a single root-domain with all cpus as + * members (mimicking the global state we have today). + */ +static struct root_domain def_root_domain; + +int __weak arch_sd_sibling_asym_packing(void) +{ + return 0*SD_ASYM_PACKING; +} +#endif + +/* + * We add the notion of a root-domain which will be used to define per-domain + * variables. Each exclusive cpuset essentially defines an island domain by + * fully partitioning the member cpus from any other cpuset. Whenever a new + * exclusive cpuset is created, we also create and attach a new root-domain + * object. + * + */ +struct root_domain { + atomic_t refcount; + cpumask_var_t span; + cpumask_var_t online; + + /* + * The "RT overload" flag: it gets set if a CPU has more than + * one runnable RT task. + */ + cpumask_var_t rto_mask; + atomic_t rto_count; +#ifdef CONFIG_SMP + struct cpupri cpupri; +#endif +}; + +#define rcu_dereference_check_sched_domain(p) \ + rcu_dereference_check((p), \ + rcu_read_lock_sched_held() || \ + lockdep_is_held(&sched_domains_mutex)) + +/* + * The domain tree (rq->sd) is protected by RCU's quiescent state transition. + * See detach_destroy_domains: synchronize_sched for details. + * + * The domain tree of any CPU may only be accessed from within + * preempt-disabled sections. + */ +#define for_each_domain(cpu, __sd) \ + for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) + +static inline void update_rq_clock(struct rq *rq); + +/* + * Sanity check should sched_clock return bogus values. We make sure it does + * not appear to go backwards, and use jiffies to determine the maximum and + * minimum it could possibly have increased, and round down to the nearest + * jiffy when it falls outside this. + */ +static inline void niffy_diff(s64 *niff_diff, int jiff_diff) +{ + unsigned long min_diff, max_diff; + + if (jiff_diff > 1) + min_diff = JIFFIES_TO_NS(jiff_diff - 1); + else + min_diff = 1; + /* Round up to the nearest tick for maximum */ + max_diff = JIFFIES_TO_NS(jiff_diff + 1); + + if (unlikely(*niff_diff < min_diff || *niff_diff > max_diff)) + *niff_diff = min_diff; +} + +#ifdef CONFIG_SMP +#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) +#define this_rq() (&__get_cpu_var(runqueues)) +#define task_rq(p) cpu_rq(task_cpu(p)) +#define cpu_curr(cpu) (cpu_rq(cpu)->curr) +static inline int cpu_of(struct rq *rq) +{ + return rq->cpu; +} + +/* + * Niffies are a globally increasing nanosecond counter. Whenever a runqueue + * clock is updated with the grq.lock held, it is an opportunity to update the + * niffies value. Any CPU can update it by adding how much its clock has + * increased since it last updated niffies, minus any added niffies by other + * CPUs. + */ +static inline void update_clocks(struct rq *rq) +{ + s64 ndiff; + long jdiff; + + update_rq_clock(rq); + ndiff = rq->clock - rq->old_clock; + /* old_clock is only updated when we are updating niffies */ + rq->old_clock = rq->clock; + ndiff -= grq.niffies - rq->last_niffy; + jdiff = jiffies - grq.last_jiffy; + niffy_diff(&ndiff, jdiff); + grq.last_jiffy += jdiff; + grq.niffies += ndiff; + rq->last_niffy = grq.niffies; +} +#else /* CONFIG_SMP */ +static struct rq *uprq; +#define cpu_rq(cpu) (uprq) +#define this_rq() (uprq) +#define task_rq(p) (uprq) +#define cpu_curr(cpu) ((uprq)->curr) +static inline int cpu_of(struct rq *rq) +{ + return 0; +} + +static inline void update_clocks(struct rq *rq) +{ + s64 ndiff; + long jdiff; + + update_rq_clock(rq); + ndiff = rq->clock - rq->old_clock; + rq->old_clock = rq->clock; + jdiff = jiffies - grq.last_jiffy; + niffy_diff(&ndiff, jdiff); + grq.last_jiffy += jdiff; + grq.niffies += ndiff; +} +#endif +#define raw_rq() (&__raw_get_cpu_var(runqueues)) + +#include "sched_stats.h" + +#ifndef prepare_arch_switch +# define prepare_arch_switch(next) do { } while (0) +#endif +#ifndef finish_arch_switch +# define finish_arch_switch(prev) do { } while (0) +#endif + +/* + * All common locking functions performed on grq.lock. rq->clock is local to + * the CPU accessing it so it can be modified just with interrupts disabled + * when we're not updating niffies. + * Looking up task_rq must be done under grq.lock to be safe. + */ +static void update_rq_clock_task(struct rq *rq, s64 delta); + +static inline void update_rq_clock(struct rq *rq) +{ + s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; + + rq->clock += delta; + update_rq_clock_task(rq, delta); +} + +static inline int task_running(struct task_struct *p) +{ + return p->oncpu; +} + +static inline void grq_lock(void) + __acquires(grq.lock) +{ + raw_spin_lock(&grq.lock); +} + +static inline void grq_unlock(void) + __releases(grq.lock) +{ + raw_spin_unlock(&grq.lock); +} + +static inline void grq_lock_irq(void) + __acquires(grq.lock) +{ + raw_spin_lock_irq(&grq.lock); +} + +static inline void time_lock_grq(struct rq *rq) + __acquires(grq.lock) +{ + grq_lock(); + update_clocks(rq); +} + +static inline void grq_unlock_irq(void) + __releases(grq.lock) +{ + raw_spin_unlock_irq(&grq.lock); +} + +static inline void grq_lock_irqsave(unsigned long *flags) + __acquires(grq.lock) +{ + raw_spin_lock_irqsave(&grq.lock, *flags); +} + +static inline void grq_unlock_irqrestore(unsigned long *flags) + __releases(grq.lock) +{ + raw_spin_unlock_irqrestore(&grq.lock, *flags); +} + +static inline struct rq +*task_grq_lock(struct task_struct *p, unsigned long *flags) + __acquires(grq.lock) +{ + grq_lock_irqsave(flags); + return task_rq(p); +} + +static inline struct rq +*time_task_grq_lock(struct task_struct *p, unsigned long *flags) + __acquires(grq.lock) +{ + struct rq *rq = task_grq_lock(p, flags); + update_clocks(rq); + return rq; +} + +static inline struct rq *task_grq_lock_irq(struct task_struct *p) + __acquires(grq.lock) +{ + grq_lock_irq(); + return task_rq(p); +} + +static inline void time_task_grq_lock_irq(struct task_struct *p) + __acquires(grq.lock) +{ + struct rq *rq = task_grq_lock_irq(p); + update_clocks(rq); +} + +static inline void task_grq_unlock_irq(void) + __releases(grq.lock) +{ + grq_unlock_irq(); +} + +static inline void task_grq_unlock(unsigned long *flags) + __releases(grq.lock) +{ + grq_unlock_irqrestore(flags); +} + +/** + * grunqueue_is_locked + * + * Returns true if the global runqueue is locked. + * This interface allows printk to be called with the runqueue lock + * held and know whether or not it is OK to wake up the klogd. + */ +inline int grunqueue_is_locked(void) +{ + return raw_spin_is_locked(&grq.lock); +} + +inline void grq_unlock_wait(void) + __releases(grq.lock) +{ + smp_mb(); /* spin-unlock-wait is not a full memory barrier */ + raw_spin_unlock_wait(&grq.lock); +} + +static inline void time_grq_lock(struct rq *rq, unsigned long *flags) + __acquires(grq.lock) +{ + local_irq_save(*flags); + time_lock_grq(rq); +} + +static inline struct rq *__task_grq_lock(struct task_struct *p) + __acquires(grq.lock) +{ + grq_lock(); + return task_rq(p); +} + +static inline void __task_grq_unlock(void) + __releases(grq.lock) +{ + grq_unlock(); +} + +#ifndef __ARCH_WANT_UNLOCKED_CTXSW +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) +{ +} + +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + /* this is a valid case when another task releases the spinlock */ + grq.lock.owner = current; +#endif + /* + * If we are tracking spinlock dependencies then we have to + * fix up the runqueue lock - which gets 'carried over' from + * prev into current: + */ + spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_); + + grq_unlock_irq(); +} + +#else /* __ARCH_WANT_UNLOCKED_CTXSW */ + +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) +{ +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + grq_unlock_irq(); +#else + grq_unlock(); +#endif +} + +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) +{ + smp_wmb(); +#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_enable(); +#endif +} +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ + +static inline int deadline_before(u64 deadline, u64 time) +{ + return (deadline < time); +} + +static inline int deadline_after(u64 deadline, u64 time) +{ + return (deadline > time); +} + +/* + * A task that is queued but not running will be on the grq run list. + * A task that is not running or queued will not be on the grq run list. + * A task that is currently running will have ->oncpu set but not on the + * grq run list. + */ +static inline int task_queued(struct task_struct *p) +{ + return (!list_empty(&p->run_list)); +} + +/* + * Removing from the global runqueue. Enter with grq locked. + */ +static void dequeue_task(struct task_struct *p) +{ + list_del_init(&p->run_list); + if (list_empty(grq.queue + p->prio)) + __clear_bit(p->prio, grq.prio_bitmap); +} + +/* + * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as + * an idle task, we ensure none of the following conditions are met. + */ +static int idleprio_suitable(struct task_struct *p) +{ + return (!freezing(p) && !signal_pending(p) && + !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); +} + +/* + * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check + * that the iso_refractory flag is not set. + */ +static int isoprio_suitable(void) +{ + return !grq.iso_refractory; +} + +/* + * Adding to the global runqueue. Enter with grq locked. + */ +static void enqueue_task(struct task_struct *p) +{ + if (!rt_task(p)) { + /* Check it hasn't gotten rt from PI */ + if ((idleprio_task(p) && idleprio_suitable(p)) || + (iso_task(p) && isoprio_suitable())) + p->prio = p->normal_prio; + else + p->prio = NORMAL_PRIO; + } + __set_bit(p->prio, grq.prio_bitmap); + list_add_tail(&p->run_list, grq.queue + p->prio); + sched_info_queued(p); +} + +/* Only idle task does this as a real time task*/ +static inline void enqueue_task_head(struct task_struct *p) +{ + __set_bit(p->prio, grq.prio_bitmap); + list_add(&p->run_list, grq.queue + p->prio); + sched_info_queued(p); +} + +static inline void requeue_task(struct task_struct *p) +{ + sched_info_queued(p); +} + +/* + * Returns the relative length of deadline all compared to the shortest + * deadline which is that of nice -20. + */ +static inline int task_prio_ratio(struct task_struct *p) +{ + return prio_ratios[TASK_USER_PRIO(p)]; +} + +/* + * task_timeslice - all tasks of all priorities get the exact same timeslice + * length. CPU distribution is handled by giving different deadlines to + * tasks of different priorities. Use 128 as the base value for fast shifts. + */ +static inline int task_timeslice(struct task_struct *p) +{ + return (rr_interval * task_prio_ratio(p) / 128); +} + +#ifdef CONFIG_SMP +/* + * qnr is the "queued but not running" count which is the total number of + * tasks on the global runqueue list waiting for cpu time but not actually + * currently running on a cpu. + */ +static inline void inc_qnr(void) +{ + grq.qnr++; +} + +static inline void dec_qnr(void) +{ + grq.qnr--; +} + +static inline int queued_notrunning(void) +{ + return grq.qnr; +} + +/* + * The cpu_idle_map stores a bitmap of all the CPUs currently idle to + * allow easy lookup of whether any suitable idle CPUs are available. + * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the + * idle_cpus variable than to do a full bitmask check when we are busy. + */ +static inline void set_cpuidle_map(unsigned long cpu) +{ + if (likely(cpu_online(cpu))) { + cpu_set(cpu, grq.cpu_idle_map); + grq.idle_cpus = 1; + } +} + +static inline void clear_cpuidle_map(unsigned long cpu) +{ + cpu_clear(cpu, grq.cpu_idle_map); + if (cpus_empty(grq.cpu_idle_map)) + grq.idle_cpus = 0; +} + +static int suitable_idle_cpus(struct task_struct *p) +{ + if (!grq.idle_cpus) + return 0; + return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map)); +} + +static void resched_task(struct task_struct *p); + +#define CPUIDLE_DIFF_THREAD (1) +#define CPUIDLE_DIFF_CORE (2) +#define CPUIDLE_CACHE_BUSY (4) +#define CPUIDLE_DIFF_CPU (8) +#define CPUIDLE_THREAD_BUSY (16) +#define CPUIDLE_DIFF_NODE (32) + +/* + * The best idle CPU is chosen according to the CPUIDLE ranking above where the + * lowest value would give the most suitable CPU to schedule p onto next. The + * order works out to be the following: + * + * Same core, idle or busy cache, idle threads + * Other core, same cache, idle or busy cache, idle threads. + * Same node, other CPU, idle cache, idle threads. + * Same node, other CPU, busy cache, idle threads. + * Same core, busy threads. + * Other core, same cache, busy threads. + * Same node, other CPU, busy threads. + * Other node, other CPU, idle cache, idle threads. + * Other node, other CPU, busy cache, idle threads. + * Other node, other CPU, busy threads. + */ +static void +resched_best_mask(unsigned long best_cpu, struct rq *rq, cpumask_t *tmpmask) +{ + unsigned long cpu_tmp, best_ranking; + + best_ranking = ~0UL; + + for_each_cpu_mask(cpu_tmp, *tmpmask) { + unsigned long ranking; + struct rq *tmp_rq; + + ranking = 0; + tmp_rq = cpu_rq(cpu_tmp); + +#ifdef CONFIG_NUMA + if (rq->cpu_locality[cpu_tmp] > 3) + ranking |= CPUIDLE_DIFF_NODE; + else +#endif + if (rq->cpu_locality[cpu_tmp] > 2) + ranking |= CPUIDLE_DIFF_CPU; +#ifdef CONFIG_SCHED_MC + if (rq->cpu_locality[cpu_tmp] == 2) + ranking |= CPUIDLE_DIFF_CORE; + if (!(tmp_rq->cache_idle(cpu_tmp))) + ranking |= CPUIDLE_CACHE_BUSY; +#endif +#ifdef CONFIG_SCHED_SMT + if (rq->cpu_locality[cpu_tmp] == 1) + ranking |= CPUIDLE_DIFF_THREAD; + if (!(tmp_rq->siblings_idle(cpu_tmp))) + ranking |= CPUIDLE_THREAD_BUSY; +#endif + if (ranking < best_ranking) { + best_cpu = cpu_tmp; + if (ranking == 0) + break; + best_ranking = ranking; + } + } + + resched_task(cpu_rq(best_cpu)->curr); +} + +static void resched_best_idle(struct task_struct *p) +{ + cpumask_t tmpmask; + + cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); + resched_best_mask(task_cpu(p), task_rq(p), &tmpmask); +} + +static inline void resched_suitable_idle(struct task_struct *p) +{ + if (suitable_idle_cpus(p)) + resched_best_idle(p); +} +/* + * Flags to tell us whether this CPU is running a CPU frequency governor that + * has slowed its speed or not. No locking required as the very rare wrongly + * read value would be harmless. + */ +void cpu_scaling(int cpu) +{ + cpu_rq(cpu)->scaling = 1; +} + +void cpu_nonscaling(int cpu) +{ + cpu_rq(cpu)->scaling = 0; +} + +static inline int scaling_rq(struct rq *rq) +{ + return rq->scaling; +} +#else /* CONFIG_SMP */ +static inline void inc_qnr(void) +{ +} + +static inline void dec_qnr(void) +{ +} + +static inline int queued_notrunning(void) +{ + return grq.nr_running; +} + +static inline void set_cpuidle_map(unsigned long cpu) +{ +} + +static inline void clear_cpuidle_map(unsigned long cpu) +{ +} + +static inline int suitable_idle_cpus(struct task_struct *p) +{ + return uprq->curr == uprq->idle; +} + +static inline void resched_suitable_idle(struct task_struct *p) +{ +} + +void cpu_scaling(int __unused) +{ +} + +void cpu_nonscaling(int __unused) +{ +} + +/* + * Although CPUs can scale in UP, there is nowhere else for tasks to go so this + * always returns 0. + */ +static inline int scaling_rq(struct rq *rq) +{ + return 0; +} +#endif /* CONFIG_SMP */ +EXPORT_SYMBOL_GPL(cpu_scaling); +EXPORT_SYMBOL_GPL(cpu_nonscaling); + +/* + * activate_idle_task - move idle task to the _front_ of runqueue. + */ +static inline void activate_idle_task(struct task_struct *p) +{ + enqueue_task_head(p); + grq.nr_running++; + inc_qnr(); +} + +static inline int normal_prio(struct task_struct *p) +{ + if (has_rt_policy(p)) + return MAX_RT_PRIO - 1 - p->rt_priority; + if (idleprio_task(p)) + return IDLE_PRIO; + if (iso_task(p)) + return ISO_PRIO; + return NORMAL_PRIO; +} + +/* + * Calculate the current priority, i.e. the priority + * taken into account by the scheduler. This value might + * be boosted by RT tasks as it will be RT if the task got + * RT-boosted. If not then it returns p->normal_prio. + */ +static int effective_prio(struct task_struct *p) +{ + p->normal_prio = normal_prio(p); + /* + * If we are RT tasks or we were boosted to RT priority, + * keep the priority unchanged. Otherwise, update priority + * to the normal priority: + */ + if (!rt_prio(p->prio)) + return p->normal_prio; + return p->prio; +} + +/* + * activate_task - move a task to the runqueue. Enter with grq locked. + */ +static void activate_task(struct task_struct *p, struct rq *rq) +{ + update_clocks(rq); + + /* + * Sleep time is in units of nanosecs, so shift by 20 to get a + * milliseconds-range estimation of the amount of time that the task + * spent sleeping: + */ + if (unlikely(prof_on == SLEEP_PROFILING)) { + if (p->state == TASK_UNINTERRUPTIBLE) + profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), + (rq->clock - p->last_ran) >> 20); + } + + p->prio = effective_prio(p); + if (task_contributes_to_load(p)) + grq.nr_uninterruptible--; + enqueue_task(p); + grq.nr_running++; + inc_qnr(); +} + +/* + * deactivate_task - If it's running, it's not on the grq and we can just + * decrement the nr_running. Enter with grq locked. + */ +static inline void deactivate_task(struct task_struct *p) +{ + if (task_contributes_to_load(p)) + grq.nr_uninterruptible++; + grq.nr_running--; +} + +#ifdef CONFIG_SMP +void set_task_cpu(struct task_struct *p, unsigned int cpu) +{ + trace_sched_migrate_task(p, cpu); + if (task_cpu(p) != cpu) + perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); + + /* + * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be + * successfuly executed on another CPU. We must ensure that updates of + * per-task data have been completed by this moment. + */ + smp_wmb(); + task_thread_info(p)->cpu = cpu; +} + +static inline void clear_sticky(struct task_struct *p) +{ + p->sticky = 0; +} + +static inline int task_sticky(struct task_struct *p) +{ + return p->sticky; +} + +/* Reschedule the best idle CPU that is not this one. */ +static void +resched_closest_idle(struct rq *rq, unsigned long cpu, struct task_struct *p) +{ + cpumask_t tmpmask; + + cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); + cpu_clear(cpu, tmpmask); + if (cpus_empty(tmpmask)) + return; + resched_best_mask(cpu, rq, &tmpmask); +} + +/* + * We set the sticky flag on a task that is descheduled involuntarily meaning + * it is awaiting further CPU time. If the last sticky task is still sticky + * but unlucky enough to not be the next task scheduled, we unstick it and try + * to find it an idle CPU. Realtime tasks do not stick to minimise their + * latency at all times. + */ +static inline void +swap_sticky(struct rq *rq, unsigned long cpu, struct task_struct *p) +{ + if (rq->sticky_task) { + if (rq->sticky_task == p) { + p->sticky = 1; + return; + } + if (task_sticky(rq->sticky_task)) { + clear_sticky(rq->sticky_task); + resched_closest_idle(rq, cpu, rq->sticky_task); + } + } + if (!rt_task(p)) { + p->sticky = 1; + rq->sticky_task = p; + } else { + resched_closest_idle(rq, cpu, p); + rq->sticky_task = NULL; + } +} + +static inline void unstick_task(struct rq *rq, struct task_struct *p) +{ + rq->sticky_task = NULL; + clear_sticky(p); +} +#else +static inline void clear_sticky(struct task_struct *p) +{ +} + +static inline int task_sticky(struct task_struct *p) +{ + return 0; +} + +static inline void +swap_sticky(struct rq *rq, unsigned long cpu, struct task_struct *p) +{ +} + +static inline void unstick_task(struct rq *rq, struct task_struct *p) +{ +} +#endif + +/* + * Move a task off the global queue and take it to a cpu for it will + * become the running task. + */ +static inline void take_task(struct rq *rq, struct task_struct *p) +{ + set_task_cpu(p, cpu_of(rq)); + dequeue_task(p); + clear_sticky(p); + dec_qnr(); +} + +/* + * Returns a descheduling task to the grq runqueue unless it is being + * deactivated. + */ +static inline void return_task(struct task_struct *p, int deactivate) +{ + if (deactivate) + deactivate_task(p); + else { + inc_qnr(); + enqueue_task(p); + } +} + +/* + * resched_task - mark a task 'to be rescheduled now'. + * + * On UP this means the setting of the need_resched flag, on SMP it + * might also involve a cross-CPU call to trigger the scheduler on + * the target CPU. + */ +#ifdef CONFIG_SMP + +#ifndef tsk_is_polling +#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) +#endif + +static void resched_task(struct task_struct *p) +{ + int cpu; + + assert_raw_spin_locked(&grq.lock); + + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) + return; + + set_tsk_thread_flag(p, TIF_NEED_RESCHED); + + cpu = task_cpu(p); + if (cpu == smp_processor_id()) + return; + + /* NEED_RESCHED must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(p)) + smp_send_reschedule(cpu); +} + +#else +static inline void resched_task(struct task_struct *p) +{ + assert_raw_spin_locked(&grq.lock); + set_tsk_need_resched(p); +} +#endif + +/** + * task_curr - is this task currently executing on a CPU? + * @p: the task in question. + */ +inline int task_curr(const struct task_struct *p) +{ + return cpu_curr(task_cpu(p)) == p; +} + +#ifdef CONFIG_SMP +struct migration_req { + struct task_struct *task; + int dest_cpu; +}; + +/* + * wait_task_inactive - wait for a thread to unschedule. + * + * If @match_state is nonzero, it's the @p->state value just checked and + * not expected to change. If it changes, i.e. @p might have woken up, + * then return zero. When we succeed in waiting for @p to be off its CPU, + * we return a positive number (its total switch count). If a second call + * a short while later returns the same number, the caller can be sure that + * @p has remained unscheduled the whole time. + * + * The caller must ensure that the task *will* unschedule sometime soon, + * else this function might spin for a *long* time. This function can't + * be called with interrupts off, or it may introduce deadlock with + * smp_call_function() if an IPI is sent by the same process we are + * waiting to become inactive. + */ +unsigned long wait_task_inactive(struct task_struct *p, long match_state) +{ + unsigned long flags; + int running, on_rq; + unsigned long ncsw; + struct rq *rq; + + for (;;) { + /* + * We do the initial early heuristics without holding + * any task-queue locks at all. We'll only try to get + * the runqueue lock when things look like they will + * work out! In the unlikely event rq is dereferenced + * since we're lockless, grab it again. + */ +#ifdef CONFIG_SMP +retry_rq: + rq = task_rq(p); + if (unlikely(!rq)) + goto retry_rq; +#else /* CONFIG_SMP */ + rq = task_rq(p); +#endif + /* + * If the task is actively running on another CPU + * still, just relax and busy-wait without holding + * any locks. + * + * NOTE! Since we don't hold any locks, it's not + * even sure that "rq" stays as the right runqueue! + * But we don't care, since this will return false + * if the runqueue has changed and p is actually now + * running somewhere else! + */ + while (task_running(p) && p == rq->curr) { + if (match_state && unlikely(p->state != match_state)) + return 0; + cpu_relax(); + } + + /* + * Ok, time to look more closely! We need the grq + * lock now, to be *sure*. If we're wrong, we'll + * just go back and repeat. + */ + rq = task_grq_lock(p, &flags); + trace_sched_wait_task(p); + running = task_running(p); + on_rq = task_queued(p); + ncsw = 0; + if (!match_state || p->state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_grq_unlock(&flags); + + /* + * If it changed from the expected state, bail out now. + */ + if (unlikely(!ncsw)) + break; + + /* + * Was it really running after all now that we + * checked with the proper locks actually held? + * + * Oops. Go back and try again.. + */ + if (unlikely(running)) { + cpu_relax(); + continue; + } + + /* + * It's not enough that it's not actively running, + * it must be off the runqueue _entirely_, and not + * preempted! + * + * So if it was still runnable (but just not actively + * running right now), it's preempted, and we should + * yield - it could be a while. + */ + if (unlikely(on_rq)) { + schedule_timeout_uninterruptible(1); + continue; + } + + /* + * Ahh, all good. It wasn't running, and it wasn't + * runnable, which means that it will never become + * running in the future either. We're all done! + */ + break; + } + + return ncsw; +} + +/*** + * kick_process - kick a running thread to enter/exit the kernel + * @p: the to-be-kicked thread + * + * Cause a process which is running on another CPU to enter + * kernel-mode, without any delay. (to get signals handled.) + * + * NOTE: this function doesnt have to take the runqueue lock, + * because all it wants to ensure is that the remote task enters + * the kernel. If the IPI races and the task has been migrated + * to another CPU then no harm is done and the purpose has been + * achieved as well. + */ +void kick_process(struct task_struct *p) +{ + int cpu; + + preempt_disable(); + cpu = task_cpu(p); + if ((cpu != smp_processor_id()) && task_curr(p)) + smp_send_reschedule(cpu); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(kick_process); +#endif + +#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) + +/* + * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the + * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or + * between themselves, they cooperatively multitask. An idle rq scores as + * prio PRIO_LIMIT so it is always preempted. + */ +static inline int +can_preempt(struct task_struct *p, int prio, u64 deadline) +{ + /* Better static priority RT task or better policy preemption */ + if (p->prio < prio) + return 1; + if (p->prio > prio) + return 0; + /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */ + if (!deadline_before(p->deadline, deadline)) + return 0; + return 1; +} +#ifdef CONFIG_SMP +#ifdef CONFIG_HOTPLUG_CPU +/* + * Check to see if there is a task that is affined only to offline CPUs but + * still wants runtime. This happens to kernel threads during suspend/halt and + * disabling of CPUs. + */ +static inline int online_cpus(struct task_struct *p) +{ + return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed))); +} +#else /* CONFIG_HOTPLUG_CPU */ +/* All available CPUs are always online without hotplug. */ +static inline int online_cpus(struct task_struct *p) +{ + return 1; +} +#endif + +/* + * Check to see if p can run on cpu, and if not, whether there are any online + * CPUs it can run on instead. + */ +static inline int needs_other_cpu(struct task_struct *p, int cpu) +{ + if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) + return 1; + return 0; +} + +/* + * latest_deadline and highest_prio_rq are initialised only to silence the + * compiler. When all else is equal, still prefer this_rq. + */ +static void try_preempt(struct task_struct *p, struct rq *this_rq) +{ + struct rq *highest_prio_rq = this_rq; + u64 latest_deadline; + unsigned long cpu; + int highest_prio; + cpumask_t tmp; + + /* + * We clear the sticky flag here because for a task to have called + * try_preempt with the sticky flag enabled means some complicated + * re-scheduling has occurred and we should ignore the sticky flag. + */ + clear_sticky(p); + + if (suitable_idle_cpus(p)) { + resched_best_idle(p); + return; + } + + /* IDLEPRIO tasks never preempt anything */ + if (p->policy == SCHED_IDLEPRIO) + return; + + if (likely(online_cpus(p))) + cpus_and(tmp, cpu_online_map, p->cpus_allowed); + else + return; + + latest_deadline = 0; + highest_prio = -1; + + for_each_cpu_mask(cpu, tmp) { + struct rq *rq; + int rq_prio; + + rq = cpu_rq(cpu); + rq_prio = rq->rq_prio; + if (rq_prio < highest_prio) + continue; + + if (rq_prio > highest_prio || + deadline_after(rq->rq_deadline, latest_deadline)) { + latest_deadline = rq->rq_deadline; + highest_prio = rq_prio; + highest_prio_rq = rq; + } + } + + if (!can_preempt(p, highest_prio, highest_prio_rq->rq_deadline)) + return; + + resched_task(highest_prio_rq->curr); +} +#else /* CONFIG_SMP */ +static inline int needs_other_cpu(struct task_struct *p, int cpu) +{ + return 0; +} + +static void try_preempt(struct task_struct *p, struct rq *this_rq) +{ + if (p->policy == SCHED_IDLEPRIO) + return; + if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) + resched_task(uprq->curr); +} +#endif /* CONFIG_SMP */ + +/** + * task_oncpu_function_call - call a function on the cpu on which a task runs + * @p: the task to evaluate + * @func: the function to be called + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might + * be on the current CPU, which just calls the function directly + */ +void task_oncpu_function_call(struct task_struct *p, + void (*func) (void *info), void *info) +{ + int cpu; + + preempt_disable(); + cpu = task_cpu(p); + if (task_curr(p)) + smp_call_function_single(cpu, func, info, 1); + preempt_enable(); +} + +static inline void ttwu_activate(struct task_struct *p, struct rq *rq, + bool is_sync) +{ + activate_task(p, rq); + + /* + * Sync wakeups (i.e. those types of wakeups where the waker + * has indicated that it will leave the CPU in short order) + * don't trigger a preemption if there are no idle cpus, + * instead waiting for current to deschedule. + */ + if (!is_sync || suitable_idle_cpus(p)) + try_preempt(p, rq); +} + +static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, + bool success) +{ + trace_sched_wakeup(p, success); + p->state = TASK_RUNNING; + + /* + * if a worker is waking up, notify workqueue. Note that on BFS, we + * don't really know what cpu it will be, so we fake it for + * wq_worker_waking_up :/ + */ + if ((p->flags & PF_WQ_WORKER) && success) + wq_worker_waking_up(p, cpu_of(rq)); +} + +/*** + * try_to_wake_up - wake up a thread + * @p: the thread to be awakened + * @state: the mask of task states that can be woken + * @wake_flags: wake modifier flags (WF_*) + * + * Put it on the run-queue if it's not already there. The "current" + * thread is always on the run-queue (except when the actual + * re-schedule is in progress), and as such you're allowed to do + * the simpler "current->state = TASK_RUNNING" to mark yourself + * runnable without the overhead of this. + * + * Returns %true if @p was woken up, %false if it was already running + * or @state didn't match @p's state. + */ +static int try_to_wake_up(struct task_struct *p, unsigned int state, + int wake_flags) +{ + unsigned long flags; + int success = 0; + struct rq *rq; + + get_cpu(); + + /* This barrier is undocumented, probably for p->state? くそ */ + smp_wmb(); + + /* + * No need to do time_lock_grq as we only need to update the rq clock + * if we activate the task + */ + rq = task_grq_lock(p, &flags); + + /* state is a volatile long, どうして、分からない */ + if (!((unsigned int)p->state & state)) + goto out_unlock; + + if (task_queued(p) || task_running(p)) + goto out_running; + + ttwu_activate(p, rq, wake_flags & WF_SYNC); + success = true; + +out_running: + ttwu_post_activation(p, rq, success); +out_unlock: + task_grq_unlock(&flags); + put_cpu(); + + return success; +} + +/** + * try_to_wake_up_local - try to wake up a local task with grq lock held + * @p: the thread to be awakened + * + * Put @p on the run-queue if it's not already there. The caller must + * ensure that grq is locked and, @p is not the current task. + * grq stays locked over invocation. + */ +static void try_to_wake_up_local(struct task_struct *p) +{ + struct rq *rq = task_rq(p); + bool success = false; + + lockdep_assert_held(&grq.lock); + + if (!(p->state & TASK_NORMAL)) + return; + + if (!task_queued(p)) { + if (likely(!task_running(p))) { + schedstat_inc(rq, ttwu_count); + schedstat_inc(rq, ttwu_local); + } + ttwu_activate(p, rq, false); + success = true; + } + ttwu_post_activation(p, rq, success); +} + +/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * + * Attempt to wake up the nominated process and move it to the set of runnable + * processes. Returns 1 if the process was woken up, 0 if it was already + * running. + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +int wake_up_process(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_ALL, 0); +} +EXPORT_SYMBOL(wake_up_process); + +int wake_up_state(struct task_struct *p, unsigned int state) +{ + return try_to_wake_up(p, state, 0); +} + +static void time_slice_expired(struct task_struct *p); + +/* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. + */ +void sched_fork(struct task_struct *p, int clone_flags) +{ + struct task_struct *curr; + int cpu = get_cpu(); + struct rq *rq; + +#ifdef CONFIG_PREEMPT_NOTIFIERS + INIT_HLIST_HEAD(&p->preempt_notifiers); +#endif + /* + * We mark the process as running here. This guarantees that + * nobody will actually run it, and a signal or other external + * event cannot wake it up and insert it on the runqueue either. + */ + p->state = TASK_RUNNING; + set_task_cpu(p, cpu); + + /* Should be reset in fork.c but done here for ease of bfs patching */ + p->sched_time = p->stime_pc = p->utime_pc = 0; + + /* + * Revert to default priority/policy on fork if requested. + */ + if (unlikely(p->sched_reset_on_fork)) { + if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { + p->policy = SCHED_NORMAL; + p->normal_prio = normal_prio(p); + } + + if (PRIO_TO_NICE(p->static_prio) < 0) { + p->static_prio = NICE_TO_PRIO(0); + p->normal_prio = p->static_prio; + } + + /* + * We don't need the reset flag anymore after the fork. It has + * fulfilled its duty: + */ + p->sched_reset_on_fork = 0; + } + + curr = current; + /* + * Make sure we do not leak PI boosting priority to the child. + */ + p->prio = curr->normal_prio; + + INIT_LIST_HEAD(&p->run_list); +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) + if (unlikely(sched_info_on())) + memset(&p->sched_info, 0, sizeof(p->sched_info)); +#endif + + p->oncpu = 0; + clear_sticky(p); + +#ifdef CONFIG_PREEMPT + /* Want to start with kernel preemption disabled. */ + task_thread_info(p)->preempt_count = 1; +#endif + if (unlikely(p->policy == SCHED_FIFO)) + goto out; + /* + * Share the timeslice between parent and child, thus the + * total amount of pending timeslices in the system doesn't change, + * resulting in more scheduling fairness. If it's negative, it won't + * matter since that's the same as being 0. current's time_slice is + * actually in rq_time_slice when it's running, as is its last_ran + * value. rq->rq_deadline is only modified within schedule() so it + * is always equal to current->deadline. + */ + rq = task_grq_lock_irq(curr); + if (likely(rq->rq_time_slice >= RESCHED_US * 2)) { + rq->rq_time_slice /= 2; + p->time_slice = rq->rq_time_slice; + } else { + /* + * Forking task has run out of timeslice. Reschedule it and + * start its child with a new time slice and deadline. The + * child will end up running first because its deadline will + * be slightly earlier. + */ + rq->rq_time_slice = 0; + set_tsk_need_resched(curr); + time_slice_expired(p); + } + p->last_ran = rq->rq_last_ran; + task_grq_unlock_irq(); +out: + put_cpu(); +} + +/* + * wake_up_new_task - wake up a newly created task for the first time. + * + * This function will do some initial scheduler statistics housekeeping + * that must be done for every newly created context, then puts the task + * on the runqueue and wakes it. + */ +void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) +{ + struct task_struct *parent; + unsigned long flags; + struct rq *rq; + + rq = task_grq_lock(p, &flags); + p->state = TASK_RUNNING; + parent = p->parent; + /* Unnecessary but small chance that the parent changed CPU */ + set_task_cpu(p, task_cpu(parent)); + activate_task(p, rq); + trace_sched_wakeup_new(p, 1); + if (!(clone_flags & CLONE_VM) && rq->curr == parent && + !suitable_idle_cpus(p)) { + /* + * The VM isn't cloned, so we're in a good position to + * do child-runs-first in anticipation of an exec. This + * usually avoids a lot of COW overhead. + */ + resched_task(parent); + } else + try_preempt(p, rq); + task_grq_unlock(&flags); +} + +#ifdef CONFIG_PREEMPT_NOTIFIERS + +/** + * preempt_notifier_register - tell me when current is being preempted & rescheduled + * @notifier: notifier struct to register + */ +void preempt_notifier_register(struct preempt_notifier *notifier) +{ + hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); +} +EXPORT_SYMBOL_GPL(preempt_notifier_register); + +/** + * preempt_notifier_unregister - no longer interested in preemption notifications + * @notifier: notifier struct to unregister + * + * This is safe to call from within a preemption notifier. + */ +void preempt_notifier_unregister(struct preempt_notifier *notifier) +{ + hlist_del(¬ifier->link); +} +EXPORT_SYMBOL_GPL(preempt_notifier_unregister); + +static void fire_sched_in_preempt_notifiers(struct task_struct *curr) +{ + struct preempt_notifier *notifier; + struct hlist_node *node; + + hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) + notifier->ops->sched_in(notifier, raw_smp_processor_id()); +} + +static void +fire_sched_out_preempt_notifiers(struct task_struct *curr, + struct task_struct *next) +{ + struct preempt_notifier *notifier; + struct hlist_node *node; + + hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) + notifier->ops->sched_out(notifier, next); +} + +#else /* !CONFIG_PREEMPT_NOTIFIERS */ + +static void fire_sched_in_preempt_notifiers(struct task_struct *curr) +{ +} + +static void +fire_sched_out_preempt_notifiers(struct task_struct *curr, + struct task_struct *next) +{ +} + +#endif /* CONFIG_PREEMPT_NOTIFIERS */ + +/** + * prepare_task_switch - prepare to switch tasks + * @rq: the runqueue preparing to switch + * @next: the task we are going to switch to. + * + * This is called with the rq lock held and interrupts off. It must + * be paired with a subsequent finish_task_switch after the context + * switch. + * + * prepare_task_switch sets up locking and calls architecture specific + * hooks. + */ +static inline void +prepare_task_switch(struct rq *rq, struct task_struct *prev, + struct task_struct *next) +{ + fire_sched_out_preempt_notifiers(prev, next); + prepare_lock_switch(rq, next); + prepare_arch_switch(next); +} + +/** + * finish_task_switch - clean up after a task-switch + * @rq: runqueue associated with task-switch + * @prev: the thread we just switched away from. + * + * finish_task_switch must be called after the context switch, paired + * with a prepare_task_switch call before the context switch. + * finish_task_switch will reconcile locking set up by prepare_task_switch, + * and do any other architecture-specific cleanup actions. + * + * Note that we may have delayed dropping an mm in context_switch(). If + * so, we finish that here outside of the runqueue lock. (Doing it + * with the lock held can cause deadlocks; see schedule() for + * details.) + */ +static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) + __releases(grq.lock) +{ + struct mm_struct *mm = rq->prev_mm; + long prev_state; + + rq->prev_mm = NULL; + + /* + * A task struct has one reference for the use as "current". + * If a task dies, then it sets TASK_DEAD in tsk->state and calls + * schedule one last time. The schedule call will never return, and + * the scheduled task must drop that reference. + * The test for TASK_DEAD must occur while the runqueue locks are + * still held, otherwise prev could be scheduled on another cpu, die + * there before we look at prev->state, and then the reference would + * be dropped twice. + * Manfred Spraul + */ + prev_state = prev->state; + finish_arch_switch(prev); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_disable(); +#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ + perf_event_task_sched_in(current); +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + local_irq_enable(); +#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ + finish_lock_switch(rq, prev); + + fire_sched_in_preempt_notifiers(current); + if (mm) + mmdrop(mm); + if (unlikely(prev_state == TASK_DEAD)) { + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); + put_task_struct(prev); + } +} + +/** + * schedule_tail - first thing a freshly forked thread must call. + * @prev: the thread we just switched away from. + */ +asmlinkage void schedule_tail(struct task_struct *prev) + __releases(grq.lock) +{ + struct rq *rq = this_rq(); + + finish_task_switch(rq, prev); +#ifdef __ARCH_WANT_UNLOCKED_CTXSW + /* In this case, finish_task_switch does not reenable preemption */ + preempt_enable(); +#endif + if (current->set_child_tid) + put_user(current->pid, current->set_child_tid); +} + +/* + * context_switch - switch to the new MM and the new + * thread's register state. + */ +static inline void +context_switch(struct rq *rq, struct task_struct *prev, + struct task_struct *next) +{ + struct mm_struct *mm, *oldmm; + + prepare_task_switch(rq, prev, next); + trace_sched_switch(prev, next); + mm = next->mm; + oldmm = prev->active_mm; + /* + * For paravirt, this is coupled with an exit in switch_to to + * combine the page table reload and the switch backend into + * one hypercall. + */ + arch_start_context_switch(prev); + + if (!mm) { + next->active_mm = oldmm; + atomic_inc(&oldmm->mm_count); + enter_lazy_tlb(oldmm, next); + } else + switch_mm(oldmm, mm, next); + + if (!prev->mm) { + prev->active_mm = NULL; + rq->prev_mm = oldmm; + } + /* + * Since the runqueue lock will be released by the next + * task (which is an invalid locking op but in the case + * of the scheduler it's an obvious special-case), so we + * do an early lockdep release here: + */ +#ifndef __ARCH_WANT_UNLOCKED_CTXSW + spin_release(&grq.lock.dep_map, 1, _THIS_IP_); +#endif + + /* Here we just switch the register state and the stack. */ + switch_to(prev, next, prev); + + barrier(); + /* + * this_rq must be evaluated again because prev may have moved + * CPUs since it called schedule(), thus the 'rq' on its stack + * frame will be invalid. + */ + finish_task_switch(this_rq(), prev); +} + +/* + * nr_running, nr_uninterruptible and nr_context_switches: + * + * externally visible scheduler statistics: current number of runnable + * threads, current number of uninterruptible-sleeping threads, total + * number of context switches performed since bootup. All are measured + * without grabbing the grq lock but the occasional inaccurate result + * doesn't matter so long as it's positive. + */ +unsigned long nr_running(void) +{ + long nr = grq.nr_running; + + if (unlikely(nr < 0)) + nr = 0; + return (unsigned long)nr; +} + +unsigned long nr_uninterruptible(void) +{ + long nu = grq.nr_uninterruptible; + + if (unlikely(nu < 0)) + nu = 0; + return nu; +} + +unsigned long long nr_context_switches(void) +{ + long long ns = grq.nr_switches; + + /* This is of course impossible */ + if (unlikely(ns < 0)) + ns = 1; + return (unsigned long long)ns; +} + +unsigned long nr_iowait(void) +{ + unsigned long i, sum = 0; + + for_each_possible_cpu(i) + sum += atomic_read(&cpu_rq(i)->nr_iowait); + + return sum; +} + +unsigned long nr_iowait_cpu(int cpu) +{ + struct rq *this = cpu_rq(cpu); + return atomic_read(&this->nr_iowait); +} + +unsigned long nr_active(void) +{ + return nr_running() + nr_uninterruptible(); +} + +/* Beyond a task running on this CPU, load is equal everywhere on BFS */ +unsigned long this_cpu_load(void) +{ + return this_rq()->rq_running + + ((queued_notrunning() + nr_uninterruptible()) / grq.noc); +} + +/* Variables and functions for calc_load */ +static unsigned long calc_load_update; +unsigned long avenrun[3]; +EXPORT_SYMBOL(avenrun); + +/** + * get_avenrun - get the load average array + * @loads: pointer to dest load array + * @offset: offset to add + * @shift: shift count to shift the result left + * + * These values are estimates at best, so no need for locking. + */ +void get_avenrun(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} + +static unsigned long +calc_load(unsigned long load, unsigned long exp, unsigned long active) +{ + load *= exp; + load += active * (FIXED_1 - exp); + return load >> FSHIFT; +} + +/* + * calc_load - update the avenrun load estimates every LOAD_FREQ seconds. + */ +void calc_global_load(unsigned long ticks) +{ + long active; + + if (time_before(jiffies, calc_load_update)) + return; + active = nr_active() * FIXED_1; + + avenrun[0] = calc_load(avenrun[0], EXP_1, active); + avenrun[1] = calc_load(avenrun[1], EXP_5, active); + avenrun[2] = calc_load(avenrun[2], EXP_15, active); + + calc_load_update = jiffies + LOAD_FREQ; +} + +DEFINE_PER_CPU(struct kernel_stat, kstat); + +EXPORT_PER_CPU_SYMBOL(kstat); + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +/* + * There are no locks covering percpu hardirq/softirq time. + * They are only modified in account_system_vtime, on corresponding CPU + * with interrupts disabled. So, writes are safe. + * They are read and saved off onto struct rq in update_rq_clock(). + * This may result in other CPU reading this CPU's irq time and can + * race with irq/account_system_vtime on this CPU. We would either get old + * or new value with a side effect of accounting a slice of irq time to wrong + * task when irq is in progress while we read rq->clock. That is a worthy + * compromise in place of having locks on each irq in account_system_time. + */ +static DEFINE_PER_CPU(u64, cpu_hardirq_time); +static DEFINE_PER_CPU(u64, cpu_softirq_time); + +static DEFINE_PER_CPU(u64, irq_start_time); +static int sched_clock_irqtime; + +void enable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 1; +} + +void disable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 0; +} + +#ifndef CONFIG_64BIT +static DEFINE_PER_CPU(seqcount_t, irq_time_seq); + +static inline void irq_time_write_begin(void) +{ + __this_cpu_inc(irq_time_seq.sequence); + smp_wmb(); +} + +static inline void irq_time_write_end(void) +{ + smp_wmb(); + __this_cpu_inc(irq_time_seq.sequence); +} + +static inline u64 irq_time_read(int cpu) +{ + u64 irq_time; + unsigned seq; + + do { + seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); + irq_time = per_cpu(cpu_softirq_time, cpu) + + per_cpu(cpu_hardirq_time, cpu); + } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); + + return irq_time; +} +#else /* CONFIG_64BIT */ +static inline void irq_time_write_begin(void) +{ +} + +static inline void irq_time_write_end(void) +{ +} + +static inline u64 irq_time_read(int cpu) +{ + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} +#endif /* CONFIG_64BIT */ + +/* + * Called before incrementing preempt_count on {soft,}irq_enter + * and before decrementing preempt_count on {soft,}irq_exit. + */ +void account_system_vtime(struct task_struct *curr) +{ + unsigned long flags; + s64 delta; + int cpu; + + if (!sched_clock_irqtime) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); + __this_cpu_add(irq_start_time, delta); + + irq_time_write_begin(); + /* + * We do not account for softirq time from ksoftirqd here. + * We want to continue accounting softirq time to ksoftirqd thread + * in that case, so as not to confuse scheduler with a special task + * that do not consume any time, but still wants to run. + */ + if (hardirq_count()) + __this_cpu_add(cpu_hardirq_time, delta); + else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) + __this_cpu_add(cpu_softirq_time, delta); + + irq_time_write_end(); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(account_system_vtime); + +static void update_rq_clock_task(struct rq *rq, s64 delta) +{ + s64 irq_delta; + + irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; + + /* + * Since irq_time is only updated on {soft,}irq_exit, we might run into + * this case when a previous update_rq_clock() happened inside a + * {soft,}irq region. + * + * When this happens, we stop ->clock_task and only update the + * prev_irq_time stamp to account for the part that fit, so that a next + * update will consume the rest. This ensures ->clock_task is + * monotonic. + * + * It does however cause some slight miss-attribution of {soft,}irq + * time, a more accurate solution would be to update the irq_time using + * the current rq->clock timestamp, except that would require using + * atomic ops. + */ + if (irq_delta > delta) + irq_delta = delta; + + rq->prev_irq_time += irq_delta; + delta -= irq_delta; + rq->clock_task += delta; +} + +#else /* CONFIG_IRQ_TIME_ACCOUNTING */ + +static void update_rq_clock_task(struct rq *rq, s64 delta) +{ + rq->clock_task += delta; +} + +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + +/* + * On each tick, see what percentage of that tick was attributed to each + * component and add the percentage to the _pc values. Once a _pc value has + * accumulated one tick's worth, account for that. This means the total + * percentage of load components will always be 100 per tick. + */ +static void pc_idle_time(struct rq *rq, unsigned long pc) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); + + if (atomic_read(&rq->nr_iowait) > 0) { + rq->iowait_pc += pc; + if (rq->iowait_pc >= 100) { + rq->iowait_pc -= 100; + cpustat->iowait = cputime64_add(cpustat->iowait, tmp); + } + } else { + rq->idle_pc += pc; + if (rq->idle_pc >= 100) { + rq->idle_pc -= 100; + cpustat->idle = cputime64_add(cpustat->idle, tmp); + } + } +} + +static void +pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset, + unsigned long pc, unsigned long ns) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); + + p->stime_pc += pc; + if (p->stime_pc >= 100) { + p->stime_pc -= 100; + p->stime = cputime_add(p->stime, cputime_one_jiffy); + p->stimescaled = cputime_add(p->stimescaled, one_jiffy_scaled); + account_group_system_time(p, cputime_one_jiffy); + acct_update_integrals(p); + } + p->sched_time += ns; + + if (hardirq_count() - hardirq_offset) { + rq->irq_pc += pc; + if (rq->irq_pc >= 100) { + rq->irq_pc -= 100; + cpustat->irq = cputime64_add(cpustat->irq, tmp); + } + } else if (in_serving_softirq()) { + rq->softirq_pc += pc; + if (rq->softirq_pc >= 100) { + rq->softirq_pc -= 100; + cpustat->softirq = cputime64_add(cpustat->softirq, tmp); + } + } else { + rq->system_pc += pc; + if (rq->system_pc >= 100) { + rq->system_pc -= 100; + cpustat->system = cputime64_add(cpustat->system, tmp); + } + } +} + +static void pc_user_time(struct rq *rq, struct task_struct *p, + unsigned long pc, unsigned long ns) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); + + p->utime_pc += pc; + if (p->utime_pc >= 100) { + p->utime_pc -= 100; + p->utime = cputime_add(p->utime, cputime_one_jiffy); + p->utimescaled = cputime_add(p->utimescaled, one_jiffy_scaled); + account_group_user_time(p, cputime_one_jiffy); + acct_update_integrals(p); + } + p->sched_time += ns; + + if (TASK_NICE(p) > 0 || idleprio_task(p)) { + rq->nice_pc += pc; + if (rq->nice_pc >= 100) { + rq->nice_pc %= 100; + cpustat->nice = cputime64_add(cpustat->nice, tmp); + } + } else { + rq->user_pc += pc; + if (rq->user_pc >= 100) { + rq->user_pc %= 100; + cpustat->user = cputime64_add(cpustat->user, tmp); + } + } +} + +/* Convert nanoseconds to percentage of one tick. */ +#define NS_TO_PC(NS) (NS * 100 / JIFFY_NS) + +/* + * This is called on clock ticks and on context switches. + * Bank in p->sched_time the ns elapsed since the last tick or switch. + * CPU scheduler quota accounting is also performed here in microseconds. + */ +static void +update_cpu_clock(struct rq *rq, struct task_struct *p, int tick) +{ + long account_ns = rq->clock - rq->timekeep_clock; + struct task_struct *idle = rq->idle; + unsigned long account_pc; + + if (unlikely(account_ns < 0)) + account_ns = 0; + + account_pc = NS_TO_PC(account_ns); + + if (tick) { + int user_tick = user_mode(get_irq_regs()); + + /* Accurate tick timekeeping */ + rq->account_pc += account_pc - 100; + if (rq->account_pc < 0) { + /* + * Small errors in micro accounting may not make the + * accounting add up to 100% each tick so we keep track + * of the percentage and round it up when less than 100 + */ + account_pc += -rq->account_pc; + rq->account_pc = 0; + } + if (user_tick) + pc_user_time(rq, p, account_pc, account_ns); + else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) + pc_system_time(rq, p, HARDIRQ_OFFSET, + account_pc, account_ns); + else + pc_idle_time(rq, account_pc); + } else { + /* Accurate subtick timekeeping */ + rq->account_pc += account_pc; + if (p == idle) + pc_idle_time(rq, account_pc); + else + pc_user_time(rq, p, account_pc, account_ns); + } + + /* time_slice accounting is done in usecs to avoid overflow on 32bit */ + if (rq->rq_policy != SCHED_FIFO && p != idle) { + s64 time_diff = rq->clock - rq->rq_last_ran; + + niffy_diff(&time_diff, 1); + rq->rq_time_slice -= NS_TO_US(time_diff); + } + rq->rq_last_ran = rq->timekeep_clock = rq->clock; +} + +/* + * Return any ns on the sched_clock that have not yet been accounted in + * @p in case that task is currently running. + * + * Called with task_grq_lock() held. + */ +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) +{ + u64 ns = 0; + + if (p == rq->curr) { + update_clocks(rq); + ns = rq->clock_task - rq->rq_last_ran; + if (unlikely((s64)ns < 0)) + ns = 0; + } + + return ns; +} + +unsigned long long task_delta_exec(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + u64 ns; + + rq = task_grq_lock(p, &flags); + ns = do_task_delta_exec(p, rq); + task_grq_unlock(&flags); + + return ns; +} + +/* + * Return accounted runtime for the task. + * In case the task is currently running, return the runtime plus current's + * pending runtime that have not been accounted yet. + */ +unsigned long long task_sched_runtime(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + u64 ns; + + rq = task_grq_lock(p, &flags); + ns = p->sched_time + do_task_delta_exec(p, rq); + task_grq_unlock(&flags); + + return ns; +} + +/* + * Return sum_exec_runtime for the thread group. + * In case the task is currently running, return the sum plus current's + * pending runtime that have not been accounted yet. + * + * Note that the thread group might have other running tasks as well, + * so the return value not includes other pending runtime that other + * running tasks might have. + */ +unsigned long long thread_group_sched_runtime(struct task_struct *p) +{ + struct task_cputime totals; + unsigned long flags; + struct rq *rq; + u64 ns; + + rq = task_grq_lock(p, &flags); + thread_group_cputime(p, &totals); + ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); + task_grq_unlock(&flags); + + return ns; +} + +/* Compatibility crap for removal */ +void account_user_time(struct task_struct *p, cputime_t cputime, + cputime_t cputime_scaled) +{ +} + +void account_idle_time(cputime_t cputime) +{ +} + +/* + * Account guest cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in virtual machine since the last update + * @cputime_scaled: cputime scaled by cpu frequency + */ +static void account_guest_time(struct task_struct *p, cputime_t cputime, + cputime_t cputime_scaled) +{ + cputime64_t tmp; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + + tmp = cputime_to_cputime64(cputime); + + /* Add guest time to process. */ + p->utime = cputime_add(p->utime, cputime); + p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); + account_group_user_time(p, cputime); + p->gtime = cputime_add(p->gtime, cputime); + + /* Add guest time to cpustat. */ + if (TASK_NICE(p) > 0) { + cpustat->nice = cputime64_add(cpustat->nice, tmp); + cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); + } else { + cpustat->user = cputime64_add(cpustat->user, tmp); + cpustat->guest = cputime64_add(cpustat->guest, tmp); + } +} + +/* + * Account system cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @hardirq_offset: the offset to subtract from hardirq_count() + * @cputime: the cpu time spent in kernel space since the last update + * @cputime_scaled: cputime scaled by cpu frequency + * This is for guest only now. + */ +void account_system_time(struct task_struct *p, int hardirq_offset, + cputime_t cputime, cputime_t cputime_scaled) +{ + + if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) + account_guest_time(p, cputime, cputime_scaled); +} + +/* + * Account for involuntary wait time. + * @steal: the cpu time spent in involuntary wait + */ +void account_steal_time(cputime_t cputime) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t cputime64 = cputime_to_cputime64(cputime); + + cpustat->steal = cputime64_add(cpustat->steal, cputime64); +} + +/* + * Account for idle time. + * @cputime: the cpu time spent in idle wait + */ +static void account_idle_times(cputime_t cputime) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t cputime64 = cputime_to_cputime64(cputime); + struct rq *rq = this_rq(); + + if (atomic_read(&rq->nr_iowait) > 0) + cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); + else + cpustat->idle = cputime64_add(cpustat->idle, cputime64); +} + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + +void account_process_tick(struct task_struct *p, int user_tick) +{ +} + +/* + * Account multiple ticks of steal time. + * @p: the process from which the cpu time has been stolen + * @ticks: number of stolen ticks + */ +void account_steal_ticks(unsigned long ticks) +{ + account_steal_time(jiffies_to_cputime(ticks)); +} + +/* + * Account multiple ticks of idle time. + * @ticks: number of stolen ticks + */ +void account_idle_ticks(unsigned long ticks) +{ + account_idle_times(jiffies_to_cputime(ticks)); +} +#endif + +static inline void grq_iso_lock(void) + __acquires(grq.iso_lock) +{ + raw_spin_lock(&grq.iso_lock); +} + +static inline void grq_iso_unlock(void) + __releases(grq.iso_lock) +{ + raw_spin_unlock(&grq.iso_lock); +} + +/* + * Functions to test for when SCHED_ISO tasks have used their allocated + * quota as real time scheduling and convert them back to SCHED_NORMAL. + * Where possible, the data is tested lockless, to avoid grabbing iso_lock + * because the occasional inaccurate result won't matter. However the + * tick data is only ever modified under lock. iso_refractory is only simply + * set to 0 or 1 so it's not worth grabbing the lock yet again for that. + */ +static void set_iso_refractory(void) +{ + grq.iso_refractory = 1; +} + +static void clear_iso_refractory(void) +{ + grq.iso_refractory = 0; +} + +/* + * Test if SCHED_ISO tasks have run longer than their alloted period as RT + * tasks and set the refractory flag if necessary. There is 10% hysteresis + * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a + * slow division. + */ +static unsigned int test_ret_isorefractory(struct rq *rq) +{ + if (likely(!grq.iso_refractory)) { + if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu) + set_iso_refractory(); + } else { + if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) + clear_iso_refractory(); + } + return grq.iso_refractory; +} + +static void iso_tick(void) +{ + grq_iso_lock(); + grq.iso_ticks += 100; + grq_iso_unlock(); +} + +/* No SCHED_ISO task was running so decrease rq->iso_ticks */ +static inline void no_iso_tick(void) +{ + if (grq.iso_ticks) { + grq_iso_lock(); + grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1; + if (unlikely(grq.iso_refractory && grq.iso_ticks < + ISO_PERIOD * (sched_iso_cpu * 115 / 128))) + clear_iso_refractory(); + grq_iso_unlock(); + } +} + +static int rq_running_iso(struct rq *rq) +{ + return rq->rq_prio == ISO_PRIO; +} + +/* This manages tasks that have run out of timeslice during a scheduler_tick */ +static void task_running_tick(struct rq *rq) +{ + struct task_struct *p; + + /* + * If a SCHED_ISO task is running we increment the iso_ticks. In + * order to prevent SCHED_ISO tasks from causing starvation in the + * presence of true RT tasks we account those as iso_ticks as well. + */ + if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) { + if (grq.iso_ticks <= (ISO_PERIOD * 100) - 100) + iso_tick(); + } else + no_iso_tick(); + + if (iso_queue(rq)) { + if (unlikely(test_ret_isorefractory(rq))) { + if (rq_running_iso(rq)) { + /* + * SCHED_ISO task is running as RT and limit + * has been hit. Force it to reschedule as + * SCHED_NORMAL by zeroing its time_slice + */ + rq->rq_time_slice = 0; + } + } + } + + /* SCHED_FIFO tasks never run out of timeslice. */ + if (rq->rq_policy == SCHED_FIFO) + return; + /* + * Tasks that were scheduled in the first half of a tick are not + * allowed to run into the 2nd half of the next tick if they will + * run out of time slice in the interim. Otherwise, if they have + * less than RESCHED_US μs of time slice left they will be rescheduled. + */ + if (rq->dither) { + if (rq->rq_time_slice > HALF_JIFFY_US) + return; + else + rq->rq_time_slice = 0; + } else if (rq->rq_time_slice >= RESCHED_US) + return; + + /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ + p = rq->curr; + requeue_task(p); + grq_lock(); + set_tsk_need_resched(p); + grq_unlock(); +} + +void wake_up_idle_cpu(int cpu); + +/* + * This function gets called by the timer code, with HZ frequency. + * We call it with interrupts disabled. The data modified is all + * local to struct rq so we don't need to grab grq lock. + */ +void scheduler_tick(void) +{ + int cpu __maybe_unused = smp_processor_id(); + struct rq *rq = cpu_rq(cpu); + + sched_clock_tick(); + /* grq lock not grabbed, so only update rq clock */ + update_rq_clock(rq); + update_cpu_clock(rq, rq->curr, 1); + if (!rq_idle(rq)) + task_running_tick(rq); + else + no_iso_tick(); + rq->last_tick = rq->clock; + perf_event_task_tick(); +} + +notrace unsigned long get_parent_ip(unsigned long addr) +{ + if (in_lock_functions(addr)) { + addr = CALLER_ADDR2; + if (in_lock_functions(addr)) + addr = CALLER_ADDR3; + } + return addr; +} + +#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ + defined(CONFIG_PREEMPT_TRACER)) +void __kprobes add_preempt_count(int val) +{ +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Underflow? + */ + if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) + return; +#endif + preempt_count() += val; +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Spinlock count overflowing soon? + */ + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= + PREEMPT_MASK - 10); +#endif + if (preempt_count() == val) + trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); +} +EXPORT_SYMBOL(add_preempt_count); + +void __kprobes sub_preempt_count(int val) +{ +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Underflow? + */ + if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) + return; + /* + * Is the spinlock portion underflowing? + */ + if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && + !(preempt_count() & PREEMPT_MASK))) + return; +#endif + + if (preempt_count() == val) + trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); + preempt_count() -= val; +} +EXPORT_SYMBOL(sub_preempt_count); +#endif + +/* + * Deadline is "now" in niffies + (offset by priority). Setting the deadline + * is the key to everything. It distributes cpu fairly amongst tasks of the + * same nice value, it proportions cpu according to nice level, it means the + * task that last woke up the longest ago has the earliest deadline, thus + * ensuring that interactive tasks get low latency on wake up. The CPU + * proportion works out to the square of the virtual deadline difference, so + * this equation will give nice 19 3% CPU compared to nice 0. + */ +static inline u64 prio_deadline_diff(int user_prio) +{ + return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); +} + +static inline u64 task_deadline_diff(struct task_struct *p) +{ + return prio_deadline_diff(TASK_USER_PRIO(p)); +} + +static inline u64 static_deadline_diff(int static_prio) +{ + return prio_deadline_diff(USER_PRIO(static_prio)); +} + +static inline int longest_deadline_diff(void) +{ + return prio_deadline_diff(39); +} + +static inline int ms_longest_deadline_diff(void) +{ + return NS_TO_MS(longest_deadline_diff()); +} + +/* + * The time_slice is only refilled when it is empty and that is when we set a + * new deadline. + */ +static void time_slice_expired(struct task_struct *p) +{ + p->time_slice = timeslice(); + p->deadline = grq.niffies + task_deadline_diff(p); +} + +/* + * Timeslices below RESCHED_US are considered as good as expired as there's no + * point rescheduling when there's so little time left. SCHED_BATCH tasks + * have been flagged be not latency sensitive and likely to be fully CPU + * bound so every time they're rescheduled they have their time_slice + * refilled, but get a new later deadline to have little effect on + * SCHED_NORMAL tasks. + + */ +static inline void check_deadline(struct task_struct *p) +{ + if (p->time_slice < RESCHED_US || batch_task(p)) + time_slice_expired(p); +} + +/* + * O(n) lookup of all tasks in the global runqueue. The real brainfuck + * of lock contention and O(n). It's not really O(n) as only the queued, + * but not running tasks are scanned, and is O(n) queued in the worst case + * scenario only because the right task can be found before scanning all of + * them. + * Tasks are selected in this order: + * Real time tasks are selected purely by their static priority and in the + * order they were queued, so the lowest value idx, and the first queued task + * of that priority value is chosen. + * If no real time tasks are found, the SCHED_ISO priority is checked, and + * all SCHED_ISO tasks have the same priority value, so they're selected by + * the earliest deadline value. + * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the + * earliest deadline. + * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are + * selected by the earliest deadline. + */ +static inline struct +task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle) +{ + u64 dl, earliest_deadline = 0; /* Initialise to silence compiler */ + struct task_struct *p, *edt = idle; + unsigned int cpu = cpu_of(rq); + struct list_head *queue; + int idx = 0; + +retry: + idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx); + if (idx >= PRIO_LIMIT) + goto out; + queue = grq.queue + idx; + + if (idx < MAX_RT_PRIO) { + /* We found an rt task */ + list_for_each_entry(p, queue, run_list) { + /* Make sure cpu affinity is ok */ + if (needs_other_cpu(p, cpu)) + continue; + edt = p; + goto out_take; + } + /* None of the RT tasks at this priority can run on this cpu */ + ++idx; + goto retry; + } + + list_for_each_entry(p, queue, run_list) { + /* Make sure cpu affinity is ok */ + if (needs_other_cpu(p, cpu)) + continue; + + /* + * Soft affinity happens here by not scheduling a task with + * its sticky flag set that ran on a different CPU last when + * the CPU is scaling, or by greatly biasing against its + * deadline when not. + */ + if (task_rq(p) != rq && task_sticky(p)) { + if (scaling_rq(rq)) + continue; + else + dl = p->deadline + longest_deadline_diff(); + } else + dl = p->deadline; + + /* + * No rt tasks. Find the earliest deadline task. Now we're in + * O(n) territory. This is what we silenced the compiler for: + * edt will always start as idle. + */ + if (edt == idle || + deadline_before(dl, earliest_deadline)) { + earliest_deadline = dl; + edt = p; + } + } + if (edt == idle) { + if (++idx < PRIO_LIMIT) + goto retry; + goto out; + } +out_take: + take_task(rq, edt); +out: + return edt; +} + +/* + * Print scheduling while atomic bug: + */ +static noinline void __schedule_bug(struct task_struct *prev) +{ + struct pt_regs *regs = get_irq_regs(); + + printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", + prev->comm, prev->pid, preempt_count()); + + debug_show_held_locks(prev); + print_modules(); + if (irqs_disabled()) + print_irqtrace_events(prev); + + if (regs) + show_regs(regs); + else + dump_stack(); +} + +/* + * Various schedule()-time debugging checks and statistics: + */ +static inline void schedule_debug(struct task_struct *prev) +{ + /* + * Test if we are atomic. Since do_exit() needs to call into + * schedule() atomically, we ignore that path for now. + * Otherwise, whine if we are scheduling when we should not be. + */ + if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) + __schedule_bug(prev); + + profile_hit(SCHED_PROFILING, __builtin_return_address(0)); + + schedstat_inc(this_rq(), sched_count); +#ifdef CONFIG_SCHEDSTATS + if (unlikely(prev->lock_depth >= 0)) { + schedstat_inc(this_rq(), rq_sched_info.bkl_count); + schedstat_inc(prev, sched_info.bkl_count); + } +#endif +} + +/* + * The currently running task's information is all stored in rq local data + * which is only modified by the local CPU, thereby allowing the data to be + * changed without grabbing the grq lock. + */ +static inline void set_rq_task(struct rq *rq, struct task_struct *p) +{ + rq->rq_time_slice = p->time_slice; + rq->rq_deadline = p->deadline; + rq->rq_last_ran = p->last_ran = rq->clock; + rq->rq_policy = p->policy; + rq->rq_prio = p->prio; + if (p != rq->idle) + rq->rq_running = 1; + else + rq->rq_running = 0; +} + +static void reset_rq_task(struct rq *rq, struct task_struct *p) +{ + rq->rq_policy = p->policy; + rq->rq_prio = p->prio; +} + +/* + * schedule() is the main scheduler function. + */ +asmlinkage void __sched schedule(void) +{ + struct task_struct *prev, *next, *idle; + unsigned long *switch_count; + int deactivate, cpu; + struct rq *rq; + +need_resched: + preempt_disable(); + + cpu = smp_processor_id(); + rq = cpu_rq(cpu); + idle = rq->idle; + rcu_note_context_switch(cpu); + prev = rq->curr; + + release_kernel_lock(prev); +need_resched_nonpreemptible: + + deactivate = 0; + schedule_debug(prev); + + grq_lock_irq(); + update_clocks(rq); + update_cpu_clock(rq, prev, 0); + if (rq->clock - rq->last_tick > HALF_JIFFY_NS) + rq->dither = 0; + else + rq->dither = 1; + + clear_tsk_need_resched(prev); + + switch_count = &prev->nivcsw; + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if (unlikely(signal_pending_state(prev->state, prev))) { + prev->state = TASK_RUNNING; + } else { + deactivate = 1; + /* + * If a worker is going to sleep, notify and + * ask workqueue whether it wants to wake up a + * task to maintain concurrency. If so, wake + * up the task. + */ + if (prev->flags & PF_WQ_WORKER) { + struct task_struct *to_wakeup; + + to_wakeup = wq_worker_sleeping(prev, cpu); + if (to_wakeup) { + /* This shouldn't happen, but does */ + if (unlikely(to_wakeup == prev)) + deactivate = 0; + else + try_to_wake_up_local(to_wakeup); + } + } + } + switch_count = &prev->nvcsw; + } + + if (prev != idle) { + /* Update all the information stored on struct rq */ + prev->time_slice = rq->rq_time_slice; + prev->deadline = rq->rq_deadline; + check_deadline(prev); + prev->last_ran = rq->clock; + + /* Task changed affinity off this CPU */ + if (needs_other_cpu(prev, cpu)) + resched_suitable_idle(prev); + else if (!deactivate) { + if (!queued_notrunning()) { + /* + * We now know prev is the only thing that is + * awaiting CPU so we can bypass rechecking for + * the earliest deadline task and just run it + * again. + */ + grq_unlock_irq(); + goto rerun_prev_unlocked; + } else + swap_sticky(rq, cpu, prev); + } + return_task(prev, deactivate); + } + + if (unlikely(!queued_notrunning())) { + /* + * This CPU is now truly idle as opposed to when idle is + * scheduled as a high priority task in its own right. + */ + next = idle; + schedstat_inc(rq, sched_goidle); + set_cpuidle_map(cpu); + } else { + next = earliest_deadline_task(rq, idle); + if (likely(next->prio != PRIO_LIMIT)) { + prefetch(next); + prefetch_stack(next); + clear_cpuidle_map(cpu); + } else + set_cpuidle_map(cpu); + } + + if (likely(prev != next)) { + /* + * Don't stick tasks when a real time task is going to run as + * they may literally get stuck. + */ + if (rt_task(next)) + unstick_task(rq, prev); + sched_info_switch(prev, next); + perf_event_task_sched_out(prev, next); + + set_rq_task(rq, next); + grq.nr_switches++; + prev->oncpu = 0; + next->oncpu = 1; + rq->curr = next; + ++*switch_count; + + context_switch(rq, prev, next); /* unlocks the grq */ + /* + * The context switch have flipped the stack from under us + * and restored the local variables which were saved when + * this task called schedule() in the past. prev == current + * is still correct, but it can be moved to another cpu/rq. + */ + cpu = smp_processor_id(); + rq = cpu_rq(cpu); + idle = rq->idle; + } else + grq_unlock_irq(); + +rerun_prev_unlocked: + if (unlikely(reacquire_kernel_lock(prev))) + goto need_resched_nonpreemptible; + + preempt_enable_no_resched(); + if (need_resched()) + goto need_resched; +} +EXPORT_SYMBOL(schedule); + +#ifdef CONFIG_SMP +int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) +{ + unsigned int cpu; + struct rq *rq; + +#ifdef CONFIG_DEBUG_PAGEALLOC + /* + * Need to access the cpu field knowing that + * DEBUG_PAGEALLOC could have unmapped it if + * the mutex owner just released it and exited. + */ + if (probe_kernel_address(&owner->cpu, cpu)) + return 0; +#else + cpu = owner->cpu; +#endif + + /* + * Even if the access succeeded (likely case), + * the cpu field may no longer be valid. + */ + if (cpu >= nr_cpumask_bits) + return 0; + + /* + * We need to validate that we can do a + * get_cpu() and that we have the percpu area. + */ + if (!cpu_online(cpu)) + return 0; + + rq = cpu_rq(cpu); + + for (;;) { + /* + * Owner changed, break to re-assess state. + */ + if (lock->owner != owner) + break; + + /* + * Is that owner really running on that cpu? + */ + if (task_thread_info(rq->curr) != owner || need_resched()) + return 0; + + arch_mutex_cpu_relax(); + } + + return 1; +} +#endif + +#ifdef CONFIG_PREEMPT +/* + * this is the entry point to schedule() from in-kernel preemption + * off of preempt_enable. Kernel preemptions off return from interrupt + * occur there and call schedule directly. + */ +asmlinkage void __sched notrace preempt_schedule(void) +{ + struct thread_info *ti = current_thread_info(); + + /* + * If there is a non-zero preempt_count or interrupts are disabled, + * we do not want to preempt the current task. Just return.. + */ + if (likely(ti->preempt_count || irqs_disabled())) + return; + + do { + add_preempt_count_notrace(PREEMPT_ACTIVE); + schedule(); + sub_preempt_count_notrace(PREEMPT_ACTIVE); + + /* + * Check again in case we missed a preemption opportunity + * between schedule and now. + */ + barrier(); + } while (need_resched()); +} +EXPORT_SYMBOL(preempt_schedule); + +/* + * this is the entry point to schedule() from kernel preemption + * off of irq context. + * Note, that this is called and return with irqs disabled. This will + * protect us against recursive calling from irq. + */ +asmlinkage void __sched preempt_schedule_irq(void) +{ + struct thread_info *ti = current_thread_info(); + + /* Catch callers which need to be fixed */ + BUG_ON(ti->preempt_count || !irqs_disabled()); + + do { + add_preempt_count(PREEMPT_ACTIVE); + local_irq_enable(); + schedule(); + local_irq_disable(); + sub_preempt_count(PREEMPT_ACTIVE); + + /* + * Check again in case we missed a preemption opportunity + * between schedule and now. + */ + barrier(); + } while (need_resched()); +} + +#endif /* CONFIG_PREEMPT */ + +int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, + void *key) +{ + return try_to_wake_up(curr->private, mode, wake_flags); +} +EXPORT_SYMBOL(default_wake_function); + +/* + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve + * number) then we wake all the non-exclusive tasks and one exclusive task. + * + * There are circumstances in which we can try to wake a task which has already + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns + * zero in this (rare) case, and we handle it by continuing to scan the queue. + */ +static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, int wake_flags, void *key) +{ + struct list_head *tmp, *next; + + list_for_each_safe(tmp, next, &q->task_list) { + wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); + unsigned int flags = curr->flags; + + if (curr->func(curr, mode, wake_flags, key) && + (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + break; + } +} + +/** + * __wake_up - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * @key: is directly passed to the wakeup function + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +void __wake_up(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, void *key) +{ + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, 0, key); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(__wake_up); + +/* + * Same as __wake_up but called with the spinlock in wait_queue_head_t held. + */ +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +{ + __wake_up_common(q, mode, 1, 0, NULL); +} +EXPORT_SYMBOL_GPL(__wake_up_locked); + +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) +{ + __wake_up_common(q, mode, 1, 0, key); +} +EXPORT_SYMBOL_GPL(__wake_up_locked_key); + +/** + * __wake_up_sync_key - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * @key: opaque value to be passed to wakeup targets + * + * The sync wakeup differs that the waker knows that it will schedule + * away soon, so while the target thread will be woken up, it will not + * be migrated to another CPU - ie. the two threads are 'synchronised' + * with each other. This can prevent needless bouncing between CPUs. + * + * On UP it can prevent extra preemption. + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, + int nr_exclusive, void *key) +{ + unsigned long flags; + int wake_flags = WF_SYNC; + + if (unlikely(!q)) + return; + + if (unlikely(!nr_exclusive)) + wake_flags = 0; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, wake_flags, key); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL_GPL(__wake_up_sync_key); + +/** + * __wake_up_sync - wake up threads blocked on a waitqueue. + * @q: the waitqueue + * @mode: which threads + * @nr_exclusive: how many wake-one or wake-many threads to wake up + * + * The sync wakeup differs that the waker knows that it will schedule + * away soon, so while the target thread will be woken up, it will not + * be migrated to another CPU - ie. the two threads are 'synchronised' + * with each other. This can prevent needless bouncing between CPUs. + * + * On UP it can prevent extra preemption. + */ +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) +{ + unsigned long flags; + int sync = 1; + + if (unlikely(!q)) + return; + + if (unlikely(!nr_exclusive)) + sync = 0; + + spin_lock_irqsave(&q->lock, flags); + __wake_up_common(q, mode, nr_exclusive, sync, NULL); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ + +/** + * complete: - signals a single thread waiting on this completion + * @x: holds the state of this particular completion + * + * This will wake up a single thread waiting on this completion. Threads will be + * awakened in the same order in which they were queued. + * + * See also complete_all(), wait_for_completion() and related routines. + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +void complete(struct completion *x) +{ + unsigned long flags; + + spin_lock_irqsave(&x->wait.lock, flags); + x->done++; + __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); + spin_unlock_irqrestore(&x->wait.lock, flags); +} +EXPORT_SYMBOL(complete); + +/** + * complete_all: - signals all threads waiting on this completion + * @x: holds the state of this particular completion + * + * This will wake up all threads waiting on this particular completion event. + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +void complete_all(struct completion *x) +{ + unsigned long flags; + + spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; + __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); + spin_unlock_irqrestore(&x->wait.lock, flags); +} +EXPORT_SYMBOL(complete_all); + +static inline long __sched +do_wait_for_common(struct completion *x, long timeout, int state) +{ + if (!x->done) { + DECLARE_WAITQUEUE(wait, current); + + __add_wait_queue_tail_exclusive(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); + spin_unlock_irq(&x->wait.lock); + timeout = schedule_timeout(timeout); + spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); + __remove_wait_queue(&x->wait, &wait); + if (!x->done) + return timeout; + } + x->done--; + return timeout ?: 1; +} + +static long __sched +wait_for_common(struct completion *x, long timeout, int state) +{ + might_sleep(); + + spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, timeout, state); + spin_unlock_irq(&x->wait.lock); + return timeout; +} + +/** + * wait_for_completion: - waits for completion of a task + * @x: holds the state of this particular completion + * + * This waits to be signaled for completion of a specific task. It is NOT + * interruptible and there is no timeout. + * + * See also similar routines (i.e. wait_for_completion_timeout()) with timeout + * and interrupt capability. Also see complete(). + */ +void __sched wait_for_completion(struct completion *x) +{ + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(wait_for_completion); + +/** + * wait_for_completion_timeout: - waits for completion of a task (w/timeout) + * @x: holds the state of this particular completion + * @timeout: timeout value in jiffies + * + * This waits for either a completion of a specific task to be signaled or for a + * specified timeout to expire. The timeout is in jiffies. It is not + * interruptible. + */ +unsigned long __sched +wait_for_completion_timeout(struct completion *x, unsigned long timeout) +{ + return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(wait_for_completion_timeout); + +/** + * wait_for_completion_interruptible: - waits for completion of a task (w/intr) + * @x: holds the state of this particular completion + * + * This waits for completion of a specific task to be signaled. It is + * interruptible. + */ +int __sched wait_for_completion_interruptible(struct completion *x) +{ + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + if (t == -ERESTARTSYS) + return t; + return 0; +} +EXPORT_SYMBOL(wait_for_completion_interruptible); + +/** + * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) + * @x: holds the state of this particular completion + * @timeout: timeout value in jiffies + * + * This waits for either a completion of a specific task to be signaled or for a + * specified timeout to expire. It is interruptible. The timeout is in jiffies. + */ +long __sched +wait_for_completion_interruptible_timeout(struct completion *x, + unsigned long timeout) +{ + return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); +} +EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); + +/** + * wait_for_completion_killable: - waits for completion of a task (killable) + * @x: holds the state of this particular completion + * + * This waits to be signaled for completion of a specific task. It can be + * interrupted by a kill signal. + */ +int __sched wait_for_completion_killable(struct completion *x) +{ + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); + if (t == -ERESTARTSYS) + return t; + return 0; +} +EXPORT_SYMBOL(wait_for_completion_killable); + +/** + * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) + * @x: holds the state of this particular completion + * @timeout: timeout value in jiffies + * + * This waits for either a completion of a specific task to be + * signaled or for a specified timeout to expire. It can be + * interrupted by a kill signal. The timeout is in jiffies. + */ +long __sched +wait_for_completion_killable_timeout(struct completion *x, + unsigned long timeout) +{ + return wait_for_common(x, timeout, TASK_KILLABLE); +} +EXPORT_SYMBOL(wait_for_completion_killable_timeout); + +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +bool try_wait_for_completion(struct completion *x) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; +} +EXPORT_SYMBOL(try_wait_for_completion); + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +bool completion_done(struct completion *x) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; +} +EXPORT_SYMBOL(completion_done); + +static long __sched +sleep_on_common(wait_queue_head_t *q, int state, long timeout) +{ + unsigned long flags; + wait_queue_t wait; + + init_waitqueue_entry(&wait, current); + + __set_current_state(state); + + spin_lock_irqsave(&q->lock, flags); + __add_wait_queue(q, &wait); + spin_unlock(&q->lock); + timeout = schedule_timeout(timeout); + spin_lock_irq(&q->lock); + __remove_wait_queue(q, &wait); + spin_unlock_irqrestore(&q->lock, flags); + + return timeout; +} + +void __sched interruptible_sleep_on(wait_queue_head_t *q) +{ + sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); +} +EXPORT_SYMBOL(interruptible_sleep_on); + +long __sched +interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) +{ + return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); +} +EXPORT_SYMBOL(interruptible_sleep_on_timeout); + +void __sched sleep_on(wait_queue_head_t *q) +{ + sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); +} +EXPORT_SYMBOL(sleep_on); + +long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) +{ + return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); +} +EXPORT_SYMBOL(sleep_on_timeout); + +#ifdef CONFIG_RT_MUTEXES + +/* + * rt_mutex_setprio - set the current priority of a task + * @p: task + * @prio: prio value (kernel-internal form) + * + * This function changes the 'effective' priority of a task. It does + * not touch ->normal_prio like __setscheduler(). + * + * Used by the rt_mutex code to implement priority inheritance logic. + */ +void rt_mutex_setprio(struct task_struct *p, int prio) +{ + unsigned long flags; + int queued, oldprio; + struct rq *rq; + + BUG_ON(prio < 0 || prio > MAX_PRIO); + + rq = task_grq_lock(p, &flags); + + trace_sched_pi_setprio(p, prio); + oldprio = p->prio; + queued = task_queued(p); + if (queued) + dequeue_task(p); + p->prio = prio; + if (task_running(p) && prio > oldprio) + resched_task(p); + if (queued) { + enqueue_task(p); + try_preempt(p, rq); + } + + task_grq_unlock(&flags); +} + +#endif + +/* + * Adjust the deadline for when the priority is to change, before it's + * changed. + */ +static inline void adjust_deadline(struct task_struct *p, int new_prio) +{ + p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); +} + +void set_user_nice(struct task_struct *p, long nice) +{ + int queued, new_static, old_static; + unsigned long flags; + struct rq *rq; + + if (TASK_NICE(p) == nice || nice < -20 || nice > 19) + return; + new_static = NICE_TO_PRIO(nice); + /* + * We have to be careful, if called from sys_setpriority(), + * the task might be in the middle of scheduling on another CPU. + */ + rq = time_task_grq_lock(p, &flags); + /* + * The RT priorities are set via sched_setscheduler(), but we still + * allow the 'normal' nice value to be set - but as expected + * it wont have any effect on scheduling until the task is + * not SCHED_NORMAL/SCHED_BATCH: + */ + if (has_rt_policy(p)) { + p->static_prio = new_static; + goto out_unlock; + } + queued = task_queued(p); + if (queued) + dequeue_task(p); + + adjust_deadline(p, new_static); + old_static = p->static_prio; + p->static_prio = new_static; + p->prio = effective_prio(p); + + if (queued) { + enqueue_task(p); + if (new_static < old_static) + try_preempt(p, rq); + } else if (task_running(p)) { + reset_rq_task(rq, p); + if (old_static < new_static) + resched_task(p); + } +out_unlock: + task_grq_unlock(&flags); +} +EXPORT_SYMBOL(set_user_nice); + +/* + * can_nice - check if a task can reduce its nice value + * @p: task + * @nice: nice value + */ +int can_nice(const struct task_struct *p, const int nice) +{ + /* convert nice value [19,-20] to rlimit style value [1,40] */ + int nice_rlim = 20 - nice; + + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || + capable(CAP_SYS_NICE)); +} + +#ifdef __ARCH_WANT_SYS_NICE + +/* + * sys_nice - change the priority of the current process. + * @increment: priority increment + * + * sys_setpriority is a more generic, but much slower function that + * does similar things. + */ +SYSCALL_DEFINE1(nice, int, increment) +{ + long nice, retval; + + /* + * Setpriority might change our priority at the same moment. + * We don't have to worry. Conceptually one call occurs first + * and we have a single winner. + */ + if (increment < -40) + increment = -40; + if (increment > 40) + increment = 40; + + nice = TASK_NICE(current) + increment; + if (nice < -20) + nice = -20; + if (nice > 19) + nice = 19; + + if (increment < 0 && !can_nice(current, nice)) + return -EPERM; + + retval = security_task_setnice(current, nice); + if (retval) + return retval; + + set_user_nice(current, nice); + return 0; +} + +#endif + +/** + * task_prio - return the priority value of a given task. + * @p: the task in question. + * + * This is the priority value as seen by users in /proc. + * RT tasks are offset by -100. Normal tasks are centered around 1, value goes + * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). + */ +int task_prio(const struct task_struct *p) +{ + int delta, prio = p->prio - MAX_RT_PRIO; + + /* rt tasks and iso tasks */ + if (prio <= 0) + goto out; + + /* Convert to ms to avoid overflows */ + delta = NS_TO_MS(p->deadline - grq.niffies); + delta = delta * 40 / ms_longest_deadline_diff(); + if (delta > 0 && delta <= 80) + prio += delta; + if (idleprio_task(p)) + prio += 40; +out: + return prio; +} + +/** + * task_nice - return the nice value of a given task. + * @p: the task in question. + */ +int task_nice(const struct task_struct *p) +{ + return TASK_NICE(p); +} +EXPORT_SYMBOL_GPL(task_nice); + +/** + * idle_cpu - is a given cpu idle currently? + * @cpu: the processor in question. + */ +int idle_cpu(int cpu) +{ + return cpu_curr(cpu) == cpu_rq(cpu)->idle; +} + +/** + * idle_task - return the idle task for a given cpu. + * @cpu: the processor in question. + */ +struct task_struct *idle_task(int cpu) +{ + return cpu_rq(cpu)->idle; +} + +/** + * find_process_by_pid - find a process with a matching PID value. + * @pid: the pid in question. + */ +static inline struct task_struct *find_process_by_pid(pid_t pid) +{ + return pid ? find_task_by_vpid(pid) : current; +} + +/* Actually do priority change: must hold grq lock. */ +static void +__setscheduler(struct task_struct *p, struct rq *rq, int policy, int prio) +{ + int oldrtprio, oldprio; + + BUG_ON(task_queued(p)); + + p->policy = policy; + oldrtprio = p->rt_priority; + p->rt_priority = prio; + p->normal_prio = normal_prio(p); + oldprio = p->prio; + /* we are holding p->pi_lock already */ + p->prio = rt_mutex_getprio(p); + if (task_running(p)) { + reset_rq_task(rq, p); + /* Resched only if we might now be preempted */ + if (p->prio > oldprio || p->rt_priority > oldrtprio) + resched_task(p); + } +} + +/* + * check the target process has a UID that matches the current process's + */ +static bool check_same_owner(struct task_struct *p) +{ + const struct cred *cred = current_cred(), *pcred; + bool match; + + rcu_read_lock(); + pcred = __task_cred(p); + match = (cred->euid == pcred->euid || + cred->euid == pcred->uid); + rcu_read_unlock(); + return match; +} + +static int __sched_setscheduler(struct task_struct *p, int policy, + const struct sched_param *param, bool user) +{ + struct sched_param zero_param = { .sched_priority = 0 }; + int queued, retval, oldpolicy = -1; + unsigned long flags, rlim_rtprio = 0; + int reset_on_fork; + struct rq *rq; + + /* may grab non-irq protected spin_locks */ + BUG_ON(in_interrupt()); + + if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) { + unsigned long lflags; + + if (!lock_task_sighand(p, &lflags)) + return -ESRCH; + rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); + unlock_task_sighand(p, &lflags); + if (rlim_rtprio) + goto recheck; + /* + * If the caller requested an RT policy without having the + * necessary rights, we downgrade the policy to SCHED_ISO. + * We also set the parameter to zero to pass the checks. + */ + policy = SCHED_ISO; + param = &zero_param; + } +recheck: + /* double check policy once rq lock held */ + if (policy < 0) { + reset_on_fork = p->sched_reset_on_fork; + policy = oldpolicy = p->policy; + } else { + reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); + policy &= ~SCHED_RESET_ON_FORK; + + if (!SCHED_RANGE(policy)) + return -EINVAL; + } + + /* + * Valid priorities for SCHED_FIFO and SCHED_RR are + * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and + * SCHED_BATCH is 0. + */ + if (param->sched_priority < 0 || + (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) || + (!p->mm && param->sched_priority > MAX_RT_PRIO - 1)) + return -EINVAL; + if (is_rt_policy(policy) != (param->sched_priority != 0)) + return -EINVAL; + + /* + * Allow unprivileged RT tasks to decrease priority: + */ + if (user && !capable(CAP_SYS_NICE)) { + if (is_rt_policy(policy)) { + unsigned long rlim_rtprio = + task_rlimit(p, RLIMIT_RTPRIO); + + /* can't set/change the rt policy */ + if (policy != p->policy && !rlim_rtprio) + return -EPERM; + + /* can't increase priority */ + if (param->sched_priority > p->rt_priority && + param->sched_priority > rlim_rtprio) + return -EPERM; + } else { + switch (p->policy) { + /* + * Can only downgrade policies but not back to + * SCHED_NORMAL + */ + case SCHED_ISO: + if (policy == SCHED_ISO) + goto out; + if (policy == SCHED_NORMAL) + return -EPERM; + break; + case SCHED_BATCH: + if (policy == SCHED_BATCH) + goto out; + if (policy != SCHED_IDLEPRIO) + return -EPERM; + break; + case SCHED_IDLEPRIO: + if (policy == SCHED_IDLEPRIO) + goto out; + return -EPERM; + default: + break; + } + } + + /* can't change other user's priorities */ + if (!check_same_owner(p)) + return -EPERM; + + /* Normal users shall not reset the sched_reset_on_fork flag */ + if (p->sched_reset_on_fork && !reset_on_fork) + return -EPERM; + } + + if (user) { + retval = security_task_setscheduler(p); + if (retval) + return retval; + } + + /* + * make sure no PI-waiters arrive (or leave) while we are + * changing the priority of the task: + */ + raw_spin_lock_irqsave(&p->pi_lock, flags); + /* + * To be able to change p->policy safely, the apropriate + * runqueue lock must be held. + */ + rq = __task_grq_lock(p); + + /* + * Changing the policy of the stop threads its a very bad idea + */ + if (p == rq->stop) { + __task_grq_unlock(); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return -EINVAL; + } + + /* recheck policy now with rq lock held */ + if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { + policy = oldpolicy = -1; + __task_grq_unlock(); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + goto recheck; + } + update_clocks(rq); + p->sched_reset_on_fork = reset_on_fork; + + queued = task_queued(p); + if (queued) + dequeue_task(p); + __setscheduler(p, rq, policy, param->sched_priority); + if (queued) { + enqueue_task(p); + try_preempt(p, rq); + } + __task_grq_unlock(); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + rt_mutex_adjust_pi(p); +out: + return 0; +} + +/** + * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. + * @p: the task in question. + * @policy: new policy. + * @param: structure containing the new RT priority. + * + * NOTE that the task may be already dead. + */ +int sched_setscheduler(struct task_struct *p, int policy, + const struct sched_param *param) +{ + return __sched_setscheduler(p, policy, param, true); +} + +EXPORT_SYMBOL_GPL(sched_setscheduler); + +/** + * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. + * @p: the task in question. + * @policy: new policy. + * @param: structure containing the new RT priority. + * + * Just like sched_setscheduler, only don't bother checking if the + * current context has permission. For example, this is needed in + * stop_machine(): we create temporary high priority worker threads, + * but our caller might not have that capability. + */ +int sched_setscheduler_nocheck(struct task_struct *p, int policy, + const struct sched_param *param) +{ + return __sched_setscheduler(p, policy, param, false); +} + +static int +do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) +{ + struct sched_param lparam; + struct task_struct *p; + int retval; + + if (!param || pid < 0) + return -EINVAL; + if (copy_from_user(&lparam, param, sizeof(struct sched_param))) + return -EFAULT; + + rcu_read_lock(); + retval = -ESRCH; + p = find_process_by_pid(pid); + if (p != NULL) + retval = sched_setscheduler(p, policy, &lparam); + rcu_read_unlock(); + + return retval; +} + +/** + * sys_sched_setscheduler - set/change the scheduler policy and RT priority + * @pid: the pid in question. + * @policy: new policy. + * @param: structure containing the new RT priority. + */ +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, + struct sched_param __user *param) +{ + /* negative values for policy are not valid */ + if (policy < 0) + return -EINVAL; + + return do_sched_setscheduler(pid, policy, param); +} + +/** + * sys_sched_setparam - set/change the RT priority of a thread + * @pid: the pid in question. + * @param: structure containing the new RT priority. + */ +SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) +{ + return do_sched_setscheduler(pid, -1, param); +} + +/** + * sys_sched_getscheduler - get the policy (scheduling class) of a thread + * @pid: the pid in question. + */ +SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) +{ + struct task_struct *p; + int retval = -EINVAL; + + if (pid < 0) + goto out_nounlock; + + retval = -ESRCH; + rcu_read_lock(); + p = find_process_by_pid(pid); + if (p) { + retval = security_task_getscheduler(p); + if (!retval) + retval = p->policy; + } + rcu_read_unlock(); + +out_nounlock: + return retval; +} + +/** + * sys_sched_getscheduler - get the RT priority of a thread + * @pid: the pid in question. + * @param: structure containing the RT priority. + */ +SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) +{ + struct sched_param lp; + struct task_struct *p; + int retval = -EINVAL; + + if (!param || pid < 0) + goto out_nounlock; + + rcu_read_lock(); + p = find_process_by_pid(pid); + retval = -ESRCH; + if (!p) + goto out_unlock; + + retval = security_task_getscheduler(p); + if (retval) + goto out_unlock; + + lp.sched_priority = p->rt_priority; + rcu_read_unlock(); + + /* + * This one might sleep, we cannot do it with a spinlock held ... + */ + retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; + +out_nounlock: + return retval; + +out_unlock: + rcu_read_unlock(); + return retval; +} + +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +{ + cpumask_var_t cpus_allowed, new_mask; + struct task_struct *p; + int retval; + + get_online_cpus(); + rcu_read_lock(); + + p = find_process_by_pid(pid); + if (!p) { + rcu_read_unlock(); + put_online_cpus(); + return -ESRCH; + } + + /* Prevent p going away */ + get_task_struct(p); + rcu_read_unlock(); + + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_put_task; + } + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_free_cpus_allowed; + } + retval = -EPERM; + if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) + goto out_unlock; + + retval = security_task_setscheduler(p); + if (retval) + goto out_unlock; + + cpuset_cpus_allowed(p, cpus_allowed); + cpumask_and(new_mask, in_mask, cpus_allowed); +again: + retval = set_cpus_allowed_ptr(p, new_mask); + + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } + } +out_unlock: + free_cpumask_var(new_mask); +out_free_cpus_allowed: + free_cpumask_var(cpus_allowed); +out_put_task: + put_task_struct(p); + put_online_cpus(); + return retval; +} + +static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, + cpumask_t *new_mask) +{ + if (len < sizeof(cpumask_t)) { + memset(new_mask, 0, sizeof(cpumask_t)); + } else if (len > sizeof(cpumask_t)) { + len = sizeof(cpumask_t); + } + return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; +} + + +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, + unsigned long __user *, user_mask_ptr) +{ + cpumask_var_t new_mask; + int retval; + + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) + return -ENOMEM; + + retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); + if (retval == 0) + retval = sched_setaffinity(pid, new_mask); + free_cpumask_var(new_mask); + return retval; +} + +long sched_getaffinity(pid_t pid, cpumask_t *mask) +{ + struct task_struct *p; + unsigned long flags; + int retval; + + get_online_cpus(); + rcu_read_lock(); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = security_task_getscheduler(p); + if (retval) + goto out_unlock; + + grq_lock_irqsave(&flags); + cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); + grq_unlock_irqrestore(&flags); + +out_unlock: + rcu_read_unlock(); + put_online_cpus(); + + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, + unsigned long __user *, user_mask_ptr) +{ + int ret; + cpumask_var_t mask; + + if ((len * BITS_PER_BYTE) < nr_cpu_ids) + return -EINVAL; + if (len & (sizeof(unsigned long)-1)) + return -EINVAL; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + + ret = sched_getaffinity(pid, mask); + if (ret == 0) { + size_t retlen = min_t(size_t, len, cpumask_size()); + + if (copy_to_user(user_mask_ptr, mask, retlen)) + ret = -EFAULT; + else + ret = retlen; + } + free_cpumask_var(mask); + + return ret; +} + +/** + * sys_sched_yield - yield the current processor to other threads. + * + * This function yields the current CPU to other tasks. It does this by + * scheduling away the current task. If it still has the earliest deadline + * it will be scheduled again as the next task. + */ +SYSCALL_DEFINE0(sched_yield) +{ + struct task_struct *p; + + p = current; + grq_lock_irq(); + schedstat_inc(task_rq(p), yld_count); + requeue_task(p); + + /* + * Since we are going to call schedule() anyway, there's + * no need to preempt or enable interrupts: + */ + __release(grq.lock); + spin_release(&grq.lock.dep_map, 1, _THIS_IP_); + do_raw_spin_unlock(&grq.lock); + preempt_enable_no_resched(); + + schedule(); + + return 0; +} + +static inline int should_resched(void) +{ + return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); +} + +static void __cond_resched(void) +{ + /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */ + if (unlikely(system_state != SYSTEM_RUNNING)) + return; + + add_preempt_count(PREEMPT_ACTIVE); + schedule(); + sub_preempt_count(PREEMPT_ACTIVE); +} + +int __sched _cond_resched(void) +{ + if (should_resched()) { + __cond_resched(); + return 1; + } + return 0; +} +EXPORT_SYMBOL(_cond_resched); + +/* + * __cond_resched_lock() - if a reschedule is pending, drop the given lock, + * call schedule, and on return reacquire the lock. + * + * This works OK both with and without CONFIG_PREEMPT. We do strange low-level + * operations here to prevent schedule() from being called twice (once via + * spin_unlock(), once by hand). + */ +int __cond_resched_lock(spinlock_t *lock) +{ + int resched = should_resched(); + int ret = 0; + + lockdep_assert_held(lock); + + if (spin_needbreak(lock) || resched) { + spin_unlock(lock); + if (resched) + __cond_resched(); + else + cpu_relax(); + ret = 1; + spin_lock(lock); + } + return ret; +} +EXPORT_SYMBOL(__cond_resched_lock); + +int __sched __cond_resched_softirq(void) +{ + BUG_ON(!in_softirq()); + + if (should_resched()) { + local_bh_enable(); + __cond_resched(); + local_bh_disable(); + return 1; + } + return 0; +} +EXPORT_SYMBOL(__cond_resched_softirq); + +/** + * yield - yield the current processor to other threads. + * + * This is a shortcut for kernel-space yielding - it marks the + * thread runnable and calls sys_sched_yield(). + */ +void __sched yield(void) +{ + set_current_state(TASK_RUNNING); + sys_sched_yield(); +} +EXPORT_SYMBOL(yield); + +/* + * This task is about to go to sleep on IO. Increment rq->nr_iowait so + * that process accounting knows that this is a task in IO wait state. + * + * But don't do that if it is a deliberate, throttling IO wait (this task + * has set its backing_dev_info: the queue against which it should throttle) + */ +void __sched io_schedule(void) +{ + struct rq *rq = raw_rq(); + + delayacct_blkio_start(); + atomic_inc(&rq->nr_iowait); + current->in_iowait = 1; + schedule(); + current->in_iowait = 0; + atomic_dec(&rq->nr_iowait); + delayacct_blkio_end(); +} +EXPORT_SYMBOL(io_schedule); + +long __sched io_schedule_timeout(long timeout) +{ + struct rq *rq = raw_rq(); + long ret; + + delayacct_blkio_start(); + atomic_inc(&rq->nr_iowait); + current->in_iowait = 1; + ret = schedule_timeout(timeout); + current->in_iowait = 0; + atomic_dec(&rq->nr_iowait); + delayacct_blkio_end(); + return ret; +} + +/** + * sys_sched_get_priority_max - return maximum RT priority. + * @policy: scheduling class. + * + * this syscall returns the maximum rt_priority that can be used + * by a given scheduling class. + */ +SYSCALL_DEFINE1(sched_get_priority_max, int, policy) +{ + int ret = -EINVAL; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + ret = MAX_USER_RT_PRIO-1; + break; + case SCHED_NORMAL: + case SCHED_BATCH: + case SCHED_ISO: + case SCHED_IDLEPRIO: + ret = 0; + break; + } + return ret; +} + +/** + * sys_sched_get_priority_min - return minimum RT priority. + * @policy: scheduling class. + * + * this syscall returns the minimum rt_priority that can be used + * by a given scheduling class. + */ +SYSCALL_DEFINE1(sched_get_priority_min, int, policy) +{ + int ret = -EINVAL; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + ret = 1; + break; + case SCHED_NORMAL: + case SCHED_BATCH: + case SCHED_ISO: + case SCHED_IDLEPRIO: + ret = 0; + break; + } + return ret; +} + +/** + * sys_sched_rr_get_interval - return the default timeslice of a process. + * @pid: pid of the process. + * @interval: userspace pointer to the timeslice value. + * + * this syscall writes the default timeslice value of a given process + * into the user-space timespec buffer. A value of '0' means infinity. + */ +SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, + struct timespec __user *, interval) +{ + struct task_struct *p; + unsigned int time_slice; + unsigned long flags; + int retval; + struct timespec t; + + if (pid < 0) + return -EINVAL; + + retval = -ESRCH; + rcu_read_lock(); + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = security_task_getscheduler(p); + if (retval) + goto out_unlock; + + grq_lock_irqsave(&flags); + time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); + grq_unlock_irqrestore(&flags); + + rcu_read_unlock(); + t = ns_to_timespec(time_slice); + retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; + return retval; + +out_unlock: + rcu_read_unlock(); + return retval; +} + +static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; + +void sched_show_task(struct task_struct *p) +{ + unsigned long free = 0; + unsigned state; + + state = p->state ? __ffs(p->state) + 1 : 0; + printk(KERN_INFO "%-15.15s %c", p->comm, + state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); +#if BITS_PER_LONG == 32 + if (state == TASK_RUNNING) + printk(KERN_CONT " running "); + else + printk(KERN_CONT " %08lx ", thread_saved_pc(p)); +#else + if (state == TASK_RUNNING) + printk(KERN_CONT " running task "); + else + printk(KERN_CONT " %016lx ", thread_saved_pc(p)); +#endif +#ifdef CONFIG_DEBUG_STACK_USAGE + free = stack_not_used(p); +#endif + printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, + task_pid_nr(p), task_pid_nr(p->real_parent), + (unsigned long)task_thread_info(p)->flags); + + show_stack(p, NULL); +} + +void show_state_filter(unsigned long state_filter) +{ + struct task_struct *g, *p; + +#if BITS_PER_LONG == 32 + printk(KERN_INFO + " task PC stack pid father\n"); +#else + printk(KERN_INFO + " task PC stack pid father\n"); +#endif + read_lock(&tasklist_lock); + do_each_thread(g, p) { + /* + * reset the NMI-timeout, listing all files on a slow + * console might take alot of time: + */ + touch_nmi_watchdog(); + if (!state_filter || (p->state & state_filter)) + sched_show_task(p); + } while_each_thread(g, p); + + touch_all_softlockup_watchdogs(); + + read_unlock(&tasklist_lock); + /* + * Only show locks if all tasks are dumped: + */ + if (!state_filter) + debug_show_all_locks(); +} + +/** + * init_idle - set up an idle thread for a given CPU + * @idle: task in question + * @cpu: cpu the idle task belongs to + * + * NOTE: this function does not set the idle thread's NEED_RESCHED + * flag, to make booting more robust. + */ +void init_idle(struct task_struct *idle, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + time_grq_lock(rq, &flags); + idle->last_ran = rq->clock; + idle->state = TASK_RUNNING; + /* Setting prio to illegal value shouldn't matter when never queued */ + idle->prio = PRIO_LIMIT; + set_rq_task(rq, idle); + idle->cpus_allowed = cpumask_of_cpu(cpu); + /* Silence PROVE_RCU */ + rcu_read_lock(); + set_task_cpu(idle, cpu); + rcu_read_unlock(); + rq->curr = rq->idle = idle; + idle->oncpu = 1; + grq_unlock_irqrestore(&flags); + + /* Set the preempt count _outside_ the spinlocks! */ +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) + task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); +#else + task_thread_info(idle)->preempt_count = 0; +#endif + ftrace_graph_init_task(idle); +} + +/* + * In a system that switches off the HZ timer nohz_cpu_mask + * indicates which cpus entered this state. This is used + * in the rcu update to wait only for active cpus. For system + * which do not switch off the HZ timer nohz_cpu_mask should + * always be CPU_BITS_NONE. + */ +cpumask_var_t nohz_cpu_mask; + +#ifdef CONFIG_SMP +#ifdef CONFIG_NO_HZ +void select_nohz_load_balancer(int stop_tick) +{ +} +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) +/** + * lowest_flag_domain - Return lowest sched_domain containing flag. + * @cpu: The cpu whose lowest level of sched domain is to + * be returned. + * @flag: The flag to check for the lowest sched_domain + * for the given cpu. + * + * Returns the lowest sched_domain of a cpu which contains the given flag. + */ +static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) +{ + struct sched_domain *sd; + + for_each_domain(cpu, sd) + if (sd && (sd->flags & flag)) + break; + + return sd; +} + +/** + * for_each_flag_domain - Iterates over sched_domains containing the flag. + * @cpu: The cpu whose domains we're iterating over. + * @sd: variable holding the value of the power_savings_sd + * for cpu. + * @flag: The flag to filter the sched_domains to be iterated. + * + * Iterates over all the scheduler domains for a given cpu that has the 'flag' + * set, starting from the lowest sched_domain to the highest. + */ +#define for_each_flag_domain(cpu, sd, flag) \ + for (sd = lowest_flag_domain(cpu, flag); \ + (sd && (sd->flags & flag)); sd = sd->parent) + +#endif /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ + +static inline void resched_cpu(int cpu) +{ + unsigned long flags; + + grq_lock_irqsave(&flags); + resched_task(cpu_curr(cpu)); + grq_unlock_irqrestore(&flags); +} + +/* + * In the semi idle case, use the nearest busy cpu for migrating timers + * from an idle cpu. This is good for power-savings. + * + * We don't do similar optimization for completely idle system, as + * selecting an idle cpu will add more delays to the timers than intended + * (as that cpu's timer base may not be uptodate wrt jiffies etc). + */ +int get_nohz_timer_target(void) +{ + int cpu = smp_processor_id(); + int i; + struct sched_domain *sd; + + for_each_domain(cpu, sd) { + for_each_cpu(i, sched_domain_span(sd)) + if (!idle_cpu(i)) + return i; + } + return cpu; +} + +/* + * When add_timer_on() enqueues a timer into the timer wheel of an + * idle CPU then this timer might expire before the next timer event + * which is scheduled to wake up that CPU. In case of a completely + * idle system the next event might even be infinite time into the + * future. wake_up_idle_cpu() ensures that the CPU is woken up and + * leaves the inner idle loop so the newly added timer is taken into + * account when the CPU goes back to idle and evaluates the timer + * wheel for the next timer event. + */ +void wake_up_idle_cpu(int cpu) +{ + struct task_struct *idle; + struct rq *rq; + + if (cpu == smp_processor_id()) + return; + + rq = cpu_rq(cpu); + idle = rq->idle; + + /* + * This is safe, as this function is called with the timer + * wheel base lock of (cpu) held. When the CPU is on the way + * to idle and has not yet set rq->curr to idle then it will + * be serialised on the timer wheel base lock and take the new + * timer into account automatically. + */ + if (unlikely(rq->curr != idle)) + return; + + /* + * We can set TIF_RESCHED on the idle task of the other CPU + * lockless. The worst case is that the other CPU runs the + * idle task through an additional NOOP schedule() + */ + set_tsk_need_resched(idle); + + /* NEED_RESCHED must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(idle)) + smp_send_reschedule(cpu); +} + +#endif /* CONFIG_NO_HZ */ + +/* + * Change a given task's CPU affinity. Migrate the thread to a + * proper CPU and schedule it away if the CPU it's executing on + * is removed from the allowed bitmask. + * + * NOTE: the caller must have a valid reference to the task, the + * task must not exit() & deallocate itself prematurely. The + * call is not atomic; no spinlocks may be held. + */ +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) +{ + unsigned long flags; + int running_wrong = 0; + int queued = 0; + struct rq *rq; + int ret = 0; + + rq = task_grq_lock(p, &flags); + + if (!cpumask_intersects(new_mask, cpu_active_mask)) { + ret = -EINVAL; + goto out; + } + + if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && + !cpumask_equal(&p->cpus_allowed, new_mask))) { + ret = -EINVAL; + goto out; + } + + queued = task_queued(p); + + cpumask_copy(&p->cpus_allowed, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ + if (cpumask_test_cpu(task_cpu(p), new_mask)) + goto out; + + if (task_running(p)) { + /* Task is running on the wrong cpu now, reschedule it. */ + if (rq == this_rq()) { + set_tsk_need_resched(p); + running_wrong = 1; + } else + resched_task(p); + } else + set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask)); + +out: + if (queued) + try_preempt(p, rq); + task_grq_unlock(&flags); + + if (running_wrong) + _cond_resched(); + + return ret; +} +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + +#ifdef CONFIG_HOTPLUG_CPU +/* Run through task list and find tasks affined to just the dead cpu, then + * allocate a new affinity */ +static void break_sole_affinity(int src_cpu, struct task_struct *idle) +{ + struct task_struct *p, *t; + + do_each_thread(t, p) { + if (p != idle && !online_cpus(p)) { + cpumask_copy(&p->cpus_allowed, cpu_possible_mask); + /* + * Don't tell them about moving exiting tasks or + * kernel threads (both mm NULL), since they never + * leave kernel. + */ + if (p->mm && printk_ratelimit()) { + printk(KERN_INFO "process %d (%s) no " + "longer affine to cpu %d\n", + task_pid_nr(p), p->comm, src_cpu); + } + } + clear_sticky(p); + } while_each_thread(t, p); +} + +/* + * Schedules idle task to be the next runnable task on current CPU. + * It does so by boosting its priority to highest possible. + * Used by CPU offline code. + */ +void sched_idle_next(struct rq *rq, int this_cpu, struct task_struct *idle) +{ + /* cpu has to be offline */ + BUG_ON(cpu_online(this_cpu)); + + break_sole_affinity(this_cpu, idle); + + __setscheduler(idle, rq, SCHED_FIFO, STOP_PRIO); + + activate_idle_task(idle); + set_tsk_need_resched(rq->curr); +} + +/* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. + */ +void idle_task_exit(void) +{ + struct mm_struct *mm = current->active_mm; + + BUG_ON(cpu_online(smp_processor_id())); + + if (mm != &init_mm) + switch_mm(mm, &init_mm, current); + mmdrop(mm); +} +#endif /* CONFIG_HOTPLUG_CPU */ +void sched_set_stop_task(int cpu, struct task_struct *stop) +{ + struct sched_param stop_param = { .sched_priority = STOP_PRIO }; + struct sched_param start_param = { .sched_priority = MAX_USER_RT_PRIO - 1 }; + struct task_struct *old_stop = cpu_rq(cpu)->stop; + + if (stop) { + /* + * Make it appear like a SCHED_FIFO task, its something + * userspace knows about and won't get confused about. + * + * Also, it will make PI more or less work without too + * much confusion -- but then, stop work should not + * rely on PI working anyway. + */ + sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); + } + + cpu_rq(cpu)->stop = stop; + + if (old_stop) { + /* + * Reset it back to a normal rt scheduling prio so that + * it can die in pieces. + */ + sched_setscheduler_nocheck(old_stop, SCHED_FIFO, &start_param); + } +} + + +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) + +static struct ctl_table sd_ctl_dir[] = { + { + .procname = "sched_domain", + .mode = 0555, + }, + {} +}; + +static struct ctl_table sd_ctl_root[] = { + { + .procname = "kernel", + .mode = 0555, + .child = sd_ctl_dir, + }, + {} +}; + +static struct ctl_table *sd_alloc_ctl_entry(int n) +{ + struct ctl_table *entry = + kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); + + return entry; +} + +static void sd_free_ctl_entry(struct ctl_table **tablep) +{ + struct ctl_table *entry; + + /* + * In the intermediate directories, both the child directory and + * procname are dynamically allocated and could fail but the mode + * will always be set. In the lowest directory the names are + * static strings and all have proc handlers. + */ + for (entry = *tablep; entry->mode; entry++) { + if (entry->child) + sd_free_ctl_entry(&entry->child); + if (entry->proc_handler == NULL) + kfree(entry->procname); + } + + kfree(*tablep); + *tablep = NULL; +} + +static void +set_table_entry(struct ctl_table *entry, + const char *procname, void *data, int maxlen, + mode_t mode, proc_handler *proc_handler) +{ + entry->procname = procname; + entry->data = data; + entry->maxlen = maxlen; + entry->mode = mode; + entry->proc_handler = proc_handler; +} + +static struct ctl_table * +sd_alloc_ctl_domain_table(struct sched_domain *sd) +{ + struct ctl_table *table = sd_alloc_ctl_entry(13); + + if (table == NULL) + return NULL; + + set_table_entry(&table[0], "min_interval", &sd->min_interval, + sizeof(long), 0644, proc_doulongvec_minmax); + set_table_entry(&table[1], "max_interval", &sd->max_interval, + sizeof(long), 0644, proc_doulongvec_minmax); + set_table_entry(&table[2], "busy_idx", &sd->busy_idx, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[3], "idle_idx", &sd->idle_idx, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[5], "wake_idx", &sd->wake_idx, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[7], "busy_factor", &sd->busy_factor, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[9], "cache_nice_tries", + &sd->cache_nice_tries, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[10], "flags", &sd->flags, + sizeof(int), 0644, proc_dointvec_minmax); + set_table_entry(&table[11], "name", sd->name, + CORENAME_MAX_SIZE, 0444, proc_dostring); + /* &table[12] is terminator */ + + return table; +} + +static ctl_table *sd_alloc_ctl_cpu_table(int cpu) +{ + struct ctl_table *entry, *table; + struct sched_domain *sd; + int domain_num = 0, i; + char buf[32]; + + for_each_domain(cpu, sd) + domain_num++; + entry = table = sd_alloc_ctl_entry(domain_num + 1); + if (table == NULL) + return NULL; + + i = 0; + for_each_domain(cpu, sd) { + snprintf(buf, 32, "domain%d", i); + entry->procname = kstrdup(buf, GFP_KERNEL); + entry->mode = 0555; + entry->child = sd_alloc_ctl_domain_table(sd); + entry++; + i++; + } + return table; +} + +static struct ctl_table_header *sd_sysctl_header; +static void register_sched_domain_sysctl(void) +{ + int i, cpu_num = num_possible_cpus(); + struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); + char buf[32]; + + WARN_ON(sd_ctl_dir[0].child); + sd_ctl_dir[0].child = entry; + + if (entry == NULL) + return; + + for_each_possible_cpu(i) { + snprintf(buf, 32, "cpu%d", i); + entry->procname = kstrdup(buf, GFP_KERNEL); + entry->mode = 0555; + entry->child = sd_alloc_ctl_cpu_table(i); + entry++; + } + + WARN_ON(sd_sysctl_header); + sd_sysctl_header = register_sysctl_table(sd_ctl_root); +} + +/* may be called multiple times per register */ +static void unregister_sched_domain_sysctl(void) +{ + if (sd_sysctl_header) + unregister_sysctl_table(sd_sysctl_header); + sd_sysctl_header = NULL; + if (sd_ctl_dir[0].child) + sd_free_ctl_entry(&sd_ctl_dir[0].child); +} +#else +static void register_sched_domain_sysctl(void) +{ +} +static void unregister_sched_domain_sysctl(void) +{ +} +#endif + +static void set_rq_online(struct rq *rq) +{ + if (!rq->online) { + cpumask_set_cpu(cpu_of(rq), rq->rd->online); + rq->online = 1; + } +} + +static void set_rq_offline(struct rq *rq) +{ + if (rq->online) { + cpumask_clear_cpu(cpu_of(rq), rq->rd->online); + rq->online = 0; + } +} + +/* + * migration_call - callback that gets triggered when a CPU is added. + */ +static int __cpuinit +migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + unsigned long flags; + struct rq *rq = cpu_rq(cpu); +#ifdef CONFIG_HOTPLUG_CPU + struct task_struct *idle = rq->idle; +#endif + + switch (action & ~CPU_TASKS_FROZEN) { + + case CPU_UP_PREPARE: + break; + + case CPU_ONLINE: + /* Update our root-domain */ + grq_lock_irqsave(&flags); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + + set_rq_online(rq); + } + grq.noc = num_online_cpus(); + grq_unlock_irqrestore(&flags); + break; + +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + /* Idle task back to normal (off runqueue, low prio) */ + grq_lock_irq(); + return_task(idle, 1); + idle->static_prio = MAX_PRIO; + __setscheduler(idle, rq, SCHED_NORMAL, 0); + idle->prio = PRIO_LIMIT; + set_rq_task(rq, idle); + update_clocks(rq); + grq_unlock_irq(); + break; + + case CPU_DYING: + /* Update our root-domain */ + grq_lock_irqsave(&flags); + sched_idle_next(rq, cpu, idle); + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_offline(rq); + } + grq.noc = num_online_cpus(); + grq_unlock_irqrestore(&flags); + break; +#endif + } + return NOTIFY_OK; +} + +/* + * Register at high priority so that task migration (migrate_all_tasks) + * happens before everything else. This has to be lower priority than + * the notifier in the perf_counter subsystem, though. + */ +static struct notifier_block __cpuinitdata migration_notifier = { + .notifier_call = migration_call, + .priority = CPU_PRI_MIGRATION, +}; + +static int __cpuinit sched_cpu_active(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + set_cpu_active((long)hcpu, true); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + set_cpu_active((long)hcpu, false); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +int __init migration_init(void) +{ + void *cpu = (void *)(long)smp_processor_id(); + int err; + + /* Initialise migration for the boot CPU */ + err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); + BUG_ON(err == NOTIFY_BAD); + migration_call(&migration_notifier, CPU_ONLINE, cpu); + register_cpu_notifier(&migration_notifier); + + /* Register cpu active notifiers */ + cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); + cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); + + return 0; +} +early_initcall(migration_init); +#endif + +#ifdef CONFIG_SMP + +#ifdef CONFIG_SCHED_DEBUG + +static __read_mostly int sched_domain_debug_enabled; + +static int __init sched_domain_debug_setup(char *str) +{ + sched_domain_debug_enabled = 1; + + return 0; +} +early_param("sched_debug", sched_domain_debug_setup); + +static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, + struct cpumask *groupmask) +{ + struct sched_group *group = sd->groups; + char str[256]; + + cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); + cpumask_clear(groupmask); + + printk(KERN_DEBUG "%*s domain %d: ", level, "", level); + + if (!(sd->flags & SD_LOAD_BALANCE)) { + printk("does not load-balance\n"); + if (sd->parent) + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" + " has parent"); + return -1; + } + + printk(KERN_CONT "span %s level %s\n", str, sd->name); + + if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { + printk(KERN_ERR "ERROR: domain->span does not contain " + "CPU%d\n", cpu); + } + if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { + printk(KERN_ERR "ERROR: domain->groups does not contain" + " CPU%d\n", cpu); + } + + printk(KERN_DEBUG "%*s groups:", level + 1, ""); + do { + if (!group) { + printk("\n"); + printk(KERN_ERR "ERROR: group is NULL\n"); + break; + } + + if (!group->cpu_power) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: domain->cpu_power not " + "set\n"); + break; + } + + if (!cpumask_weight(sched_group_cpus(group))) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: empty group\n"); + break; + } + + if (cpumask_intersects(groupmask, sched_group_cpus(group))) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: repeated CPUs\n"); + break; + } + + cpumask_or(groupmask, groupmask, sched_group_cpus(group)); + + cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); + + printk(KERN_CONT " %s", str); + if (group->cpu_power != SCHED_LOAD_SCALE) { + printk(KERN_CONT " (cpu_power = %d)", + group->cpu_power); + } + + group = group->next; + } while (group != sd->groups); + printk(KERN_CONT "\n"); + + if (!cpumask_equal(sched_domain_span(sd), groupmask)) + printk(KERN_ERR "ERROR: groups don't span domain->span\n"); + + if (sd->parent && + !cpumask_subset(groupmask, sched_domain_span(sd->parent))) + printk(KERN_ERR "ERROR: parent span is not a superset " + "of domain->span\n"); + return 0; +} + +static void sched_domain_debug(struct sched_domain *sd, int cpu) +{ + cpumask_var_t groupmask; + int level = 0; + + if (!sched_domain_debug_enabled) + return; + + if (!sd) { + printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); + return; + } + + printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + + if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { + printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); + return; + } + + for (;;) { + if (sched_domain_debug_one(sd, cpu, level, groupmask)) + break; + level++; + sd = sd->parent; + if (!sd) + break; + } + free_cpumask_var(groupmask); +} +#else /* !CONFIG_SCHED_DEBUG */ +# define sched_domain_debug(sd, cpu) do { } while (0) +#endif /* CONFIG_SCHED_DEBUG */ + +static int sd_degenerate(struct sched_domain *sd) +{ + if (cpumask_weight(sched_domain_span(sd)) == 1) + return 1; + + /* Following flags need at least 2 groups */ + if (sd->flags & (SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC | + SD_SHARE_CPUPOWER | + SD_SHARE_PKG_RESOURCES)) { + if (sd->groups != sd->groups->next) + return 0; + } + + /* Following flags don't use groups */ + if (sd->flags & (SD_WAKE_AFFINE)) + return 0; + + return 1; +} + +static int +sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) +{ + unsigned long cflags = sd->flags, pflags = parent->flags; + + if (sd_degenerate(parent)) + return 1; + + if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) + return 0; + + /* Flags needing groups don't count if only 1 group in parent */ + if (parent->groups == parent->groups->next) { + pflags &= ~(SD_LOAD_BALANCE | + SD_BALANCE_NEWIDLE | + SD_BALANCE_FORK | + SD_BALANCE_EXEC | + SD_SHARE_CPUPOWER | + SD_SHARE_PKG_RESOURCES); + if (nr_node_ids == 1) + pflags &= ~SD_SERIALIZE; + } + if (~cflags & pflags) + return 0; + + return 1; +} + +static void free_rootdomain(struct root_domain *rd) +{ + synchronize_sched(); + + free_cpumask_var(rd->rto_mask); + free_cpumask_var(rd->online); + free_cpumask_var(rd->span); + kfree(rd); +} + +static void rq_attach_root(struct rq *rq, struct root_domain *rd) +{ + struct root_domain *old_rd = NULL; + unsigned long flags; + + grq_lock_irqsave(&flags); + + if (rq->rd) { + old_rd = rq->rd; + + if (cpumask_test_cpu(cpu_of(rq), old_rd->online)) + set_rq_offline(rq); + + cpumask_clear_cpu(cpu_of(rq), old_rd->span); + + /* + * If we dont want to free the old_rt yet then + * set old_rd to NULL to skip the freeing later + * in this function: + */ + if (!atomic_dec_and_test(&old_rd->refcount)) + old_rd = NULL; + } + + atomic_inc(&rd->refcount); + rq->rd = rd; + + cpumask_set_cpu(cpu_of(rq), rd->span); + if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) + set_rq_online(rq); + + grq_unlock_irqrestore(&flags); + + if (old_rd) + free_rootdomain(old_rd); +} + +static int init_rootdomain(struct root_domain *rd) +{ + memset(rd, 0, sizeof(*rd)); + + if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) + goto out; + if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) + goto free_span; + if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + goto free_online; + + if (cpupri_init(&rd->cpupri) != 0) + goto free_rto_mask; + return 0; + +free_rto_mask: + free_cpumask_var(rd->rto_mask); +free_online: + free_cpumask_var(rd->online); +free_span: + free_cpumask_var(rd->span); +out: + return -ENOMEM; +} + +static void init_defrootdomain(void) +{ + init_rootdomain(&def_root_domain); + + atomic_set(&def_root_domain.refcount, 1); +} + +static struct root_domain *alloc_rootdomain(void) +{ + struct root_domain *rd; + + rd = kmalloc(sizeof(*rd), GFP_KERNEL); + if (!rd) + return NULL; + + if (init_rootdomain(rd) != 0) { + kfree(rd); + return NULL; + } + + return rd; +} + +/* + * Attach the domain 'sd' to 'cpu' as its base domain. Callers must + * hold the hotplug lock. + */ +static void +cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + struct sched_domain *tmp; + + for (tmp = sd; tmp; tmp = tmp->parent) + tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); + + /* Remove the sched domains which do not contribute to scheduling. */ + for (tmp = sd; tmp; ) { + struct sched_domain *parent = tmp->parent; + if (!parent) + break; + + if (sd_parent_degenerate(tmp, parent)) { + tmp->parent = parent->parent; + if (parent->parent) + parent->parent->child = tmp; + } else + tmp = tmp->parent; + } + + if (sd && sd_degenerate(sd)) { + sd = sd->parent; + if (sd) + sd->child = NULL; + } + + sched_domain_debug(sd, cpu); + + rq_attach_root(rq, rd); + rcu_assign_pointer(rq->sd, sd); +} + +/* cpus with isolated domains */ +static cpumask_var_t cpu_isolated_map; + +/* Setup the mask of cpus configured for isolated domains */ +static int __init isolated_cpu_setup(char *str) +{ + alloc_bootmem_cpumask_var(&cpu_isolated_map); + cpulist_parse(str, cpu_isolated_map); + return 1; +} + +__setup("isolcpus=", isolated_cpu_setup); + +/* + * init_sched_build_groups takes the cpumask we wish to span, and a pointer + * to a function which identifies what group(along with sched group) a CPU + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids + * (due to the fact that we keep track of groups covered with a struct cpumask). + * + * init_sched_build_groups will build a circular linked list of the groups + * covered by the given span, and will set each group's ->cpumask correctly, + * and ->cpu_power to 0. + */ +static void +init_sched_build_groups(const struct cpumask *span, + const struct cpumask *cpu_map, + int (*group_fn)(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, + struct cpumask *tmpmask), + struct cpumask *covered, struct cpumask *tmpmask) +{ + struct sched_group *first = NULL, *last = NULL; + int i; + + cpumask_clear(covered); + + for_each_cpu(i, span) { + struct sched_group *sg; + int group = group_fn(i, cpu_map, &sg, tmpmask); + int j; + + if (cpumask_test_cpu(i, covered)) + continue; + + cpumask_clear(sched_group_cpus(sg)); + sg->cpu_power = 0; + + for_each_cpu(j, span) { + if (group_fn(j, cpu_map, NULL, tmpmask) != group) + continue; + + cpumask_set_cpu(j, covered); + cpumask_set_cpu(j, sched_group_cpus(sg)); + } + if (!first) + first = sg; + if (last) + last->next = sg; + last = sg; + } + last->next = first; +} + +#define SD_NODES_PER_DOMAIN 16 + +#ifdef CONFIG_NUMA + +/** + * find_next_best_node - find the next node to include in a sched_domain + * @node: node whose sched_domain we're building + * @used_nodes: nodes already in the sched_domain + * + * Find the next node to include in a given scheduling domain. Simply + * finds the closest node not already in the @used_nodes map. + * + * Should use nodemask_t. + */ +static int find_next_best_node(int node, nodemask_t *used_nodes) +{ + int i, n, val, min_val, best_node = 0; + + min_val = INT_MAX; + + for (i = 0; i < nr_node_ids; i++) { + /* Start at @node */ + n = (node + i) % nr_node_ids; + + if (!nr_cpus_node(n)) + continue; + + /* Skip already used nodes */ + if (node_isset(n, *used_nodes)) + continue; + + /* Simple min distance search */ + val = node_distance(node, n); + + if (val < min_val) { + min_val = val; + best_node = n; + } + } + + node_set(best_node, *used_nodes); + return best_node; +} + +/** + * sched_domain_node_span - get a cpumask for a node's sched_domain + * @node: node whose cpumask we're constructing + * @span: resulting cpumask + * + * Given a node, construct a good cpumask for its sched_domain to span. It + * should be one that prevents unnecessary balancing, but also spreads tasks + * out optimally. + */ +static void sched_domain_node_span(int node, struct cpumask *span) +{ + nodemask_t used_nodes; + int i; + + cpumask_clear(span); + nodes_clear(used_nodes); + + cpumask_or(span, span, cpumask_of_node(node)); + node_set(node, used_nodes); + + for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { + int next_node = find_next_best_node(node, &used_nodes); + + cpumask_or(span, span, cpumask_of_node(next_node)); + } +} +#endif /* CONFIG_NUMA */ + +int sched_smt_power_savings = 0, sched_mc_power_savings = 0; + +/* + * The cpus mask in sched_group and sched_domain hangs off the end. + * + * ( See the the comments in include/linux/sched.h:struct sched_group + * and struct sched_domain. ) + */ +struct static_sched_group { + struct sched_group sg; + DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); +}; + +struct static_sched_domain { + struct sched_domain sd; + DECLARE_BITMAP(span, CONFIG_NR_CPUS); +}; + +struct s_data { +#ifdef CONFIG_NUMA + int sd_allnodes; + cpumask_var_t domainspan; + cpumask_var_t covered; + cpumask_var_t notcovered; +#endif + cpumask_var_t nodemask; + cpumask_var_t this_sibling_map; + cpumask_var_t this_core_map; + cpumask_var_t this_book_map; + cpumask_var_t send_covered; + cpumask_var_t tmpmask; + struct sched_group **sched_group_nodes; + struct root_domain *rd; +}; + +enum s_alloc { + sa_sched_groups = 0, + sa_rootdomain, + sa_tmpmask, + sa_send_covered, + sa_this_book_map, + sa_this_core_map, + sa_this_sibling_map, + sa_nodemask, + sa_sched_group_nodes, +#ifdef CONFIG_NUMA + sa_notcovered, + sa_covered, + sa_domainspan, +#endif + sa_none, +}; + +/* + * SMT sched-domains: + */ +#ifdef CONFIG_SCHED_SMT +static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_groups); + +static int +cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *unused) +{ + if (sg) + *sg = &per_cpu(sched_groups, cpu).sg; + return cpu; +} +#endif /* CONFIG_SCHED_SMT */ + +/* + * multi-core sched-domains: + */ +#ifdef CONFIG_SCHED_MC +static DEFINE_PER_CPU(struct static_sched_domain, core_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); + +static int +cpu_to_core_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group; +#ifdef CONFIG_SCHED_SMT + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#else + group = cpu; +#endif + if (sg) + *sg = &per_cpu(sched_group_core, group).sg; + return group; +} +#endif /* CONFIG_SCHED_MC */ + +/* + * book sched-domains: + */ +#ifdef CONFIG_SCHED_BOOK +static DEFINE_PER_CPU(struct static_sched_domain, book_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_book); + +static int +cpu_to_book_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group = cpu; +#ifdef CONFIG_SCHED_MC + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#endif + if (sg) + *sg = &per_cpu(sched_group_book, group).sg; + return group; +} +#endif /* CONFIG_SCHED_BOOK */ + +static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); + +static int +cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, struct cpumask *mask) +{ + int group; +#ifdef CONFIG_SCHED_BOOK + cpumask_and(mask, cpu_book_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_MC) + cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); + group = cpumask_first(mask); +#else + group = cpu; +#endif + if (sg) + *sg = &per_cpu(sched_group_phys, group).sg; + return group; +} + +/** + * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. + * @group: The group whose first cpu is to be returned. + */ +static inline unsigned int group_first_cpu(struct sched_group *group) +{ + return cpumask_first(sched_group_cpus(group)); +} + +#ifdef CONFIG_NUMA +/* + * The init_sched_build_groups can't handle what we want to do with node + * groups, so roll our own. Now each node has its own list of groups which + * gets dynamically allocated. + */ +static DEFINE_PER_CPU(struct static_sched_domain, node_domains); +static struct sched_group ***sched_group_nodes_bycpu; + +static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); +static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); + +static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, + struct sched_group **sg, + struct cpumask *nodemask) +{ + int group; + + cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); + group = cpumask_first(nodemask); + + if (sg) + *sg = &per_cpu(sched_group_allnodes, group).sg; + return group; +} + +static void init_numa_sched_groups_power(struct sched_group *group_head) +{ + struct sched_group *sg = group_head; + int j; + + if (!sg) + return; + do { + for_each_cpu(j, sched_group_cpus(sg)) { + struct sched_domain *sd; + + sd = &per_cpu(phys_domains, j).sd; + if (j != group_first_cpu(sd->groups)) { + /* + * Only add "power" once for each + * physical package. + */ + continue; + } + + sg->cpu_power += sd->groups->cpu_power; + } + sg = sg->next; + } while (sg != group_head); +} + +static int build_numa_sched_groups(struct s_data *d, + const struct cpumask *cpu_map, int num) +{ + struct sched_domain *sd; + struct sched_group *sg, *prev; + int n, j; + + cpumask_clear(d->covered); + cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map); + if (cpumask_empty(d->nodemask)) { + d->sched_group_nodes[num] = NULL; + goto out; + } + + sched_domain_node_span(num, d->domainspan); + cpumask_and(d->domainspan, d->domainspan, cpu_map); + + sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, num); + if (!sg) { + printk(KERN_WARNING "Can not alloc domain group for node %d\n", + num); + return -ENOMEM; + } + d->sched_group_nodes[num] = sg; + + for_each_cpu(j, d->nodemask) { + sd = &per_cpu(node_domains, j).sd; + sd->groups = sg; + } + + sg->cpu_power = 0; + cpumask_copy(sched_group_cpus(sg), d->nodemask); + sg->next = sg; + cpumask_or(d->covered, d->covered, d->nodemask); + + prev = sg; + for (j = 0; j < nr_node_ids; j++) { + n = (num + j) % nr_node_ids; + cpumask_complement(d->notcovered, d->covered); + cpumask_and(d->tmpmask, d->notcovered, cpu_map); + cpumask_and(d->tmpmask, d->tmpmask, d->domainspan); + if (cpumask_empty(d->tmpmask)) + break; + cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n)); + if (cpumask_empty(d->tmpmask)) + continue; + sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), + GFP_KERNEL, num); + if (!sg) { + printk(KERN_WARNING + "Can not alloc domain group for node %d\n", j); + return -ENOMEM; + } + sg->cpu_power = 0; + cpumask_copy(sched_group_cpus(sg), d->tmpmask); + sg->next = prev->next; + cpumask_or(d->covered, d->covered, d->tmpmask); + prev->next = sg; + prev = sg; + } +out: + return 0; +} +#endif /* CONFIG_NUMA */ + +#ifdef CONFIG_NUMA +/* Free memory allocated for various sched_group structures */ +static void free_sched_groups(const struct cpumask *cpu_map, + struct cpumask *nodemask) +{ + int cpu, i; + + for_each_cpu(cpu, cpu_map) { + struct sched_group **sched_group_nodes + = sched_group_nodes_bycpu[cpu]; + + if (!sched_group_nodes) + continue; + + for (i = 0; i < nr_node_ids; i++) { + struct sched_group *oldsg, *sg = sched_group_nodes[i]; + + cpumask_and(nodemask, cpumask_of_node(i), cpu_map); + if (cpumask_empty(nodemask)) + continue; + + if (sg == NULL) + continue; + sg = sg->next; +next_sg: + oldsg = sg; + sg = sg->next; + kfree(oldsg); + if (oldsg != sched_group_nodes[i]) + goto next_sg; + } + kfree(sched_group_nodes); + sched_group_nodes_bycpu[cpu] = NULL; + } +} +#else /* !CONFIG_NUMA */ +static void free_sched_groups(const struct cpumask *cpu_map, + struct cpumask *nodemask) +{ +} +#endif /* CONFIG_NUMA */ + +/* + * Initialise sched groups cpu_power. + * + * cpu_power indicates the capacity of sched group, which is used while + * distributing the load between different sched groups in a sched domain. + * Typically cpu_power for all the groups in a sched domain will be same unless + * there are asymmetries in the topology. If there are asymmetries, group + * having more cpu_power will pickup more load compared to the group having + * less cpu_power. + * + * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents + * the maximum number of tasks a group can handle in the presence of other idle + * or lightly loaded groups in the same sched domain. + */ +static void init_sched_groups_power(int cpu, struct sched_domain *sd) +{ + struct sched_domain *child; + struct sched_group *group; + long power; + int weight; + + WARN_ON(!sd || !sd->groups); + + if (cpu != group_first_cpu(sd->groups)) + return; + + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); + + child = sd->child; + + sd->groups->cpu_power = 0; + + if (!child) { + power = SCHED_LOAD_SCALE; + weight = cpumask_weight(sched_domain_span(sd)); + /* + * SMT siblings share the power of a single core. + * Usually multiple threads get a better yield out of + * that one core than a single thread would have, + * reflect that in sd->smt_gain. + */ + if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { + power *= sd->smt_gain; + power /= weight; + power >>= SCHED_LOAD_SHIFT; + } + sd->groups->cpu_power += power; + return; + } + + /* + * Add cpu_power of each child group to this groups cpu_power + */ + group = child->groups; + do { + sd->groups->cpu_power += group->cpu_power; + group = group->next; + } while (group != child->groups); +} + +/* + * Initialisers for schedule domains + * Non-inlined to reduce accumulated stack pressure in build_sched_domains() + */ + +#ifdef CONFIG_SCHED_DEBUG +# define SD_INIT_NAME(sd, type) sd->name = #type +#else +# define SD_INIT_NAME(sd, type) do { } while (0) +#endif + +#define SD_INIT(sd, type) sd_init_##type(sd) + +#define SD_INIT_FUNC(type) \ +static noinline void sd_init_##type(struct sched_domain *sd) \ +{ \ + memset(sd, 0, sizeof(*sd)); \ + *sd = SD_##type##_INIT; \ + sd->level = SD_LV_##type; \ + SD_INIT_NAME(sd, type); \ +} + +SD_INIT_FUNC(CPU) +#ifdef CONFIG_NUMA + SD_INIT_FUNC(ALLNODES) + SD_INIT_FUNC(NODE) +#endif +#ifdef CONFIG_SCHED_SMT + SD_INIT_FUNC(SIBLING) +#endif +#ifdef CONFIG_SCHED_MC + SD_INIT_FUNC(MC) +#endif +#ifdef CONFIG_SCHED_BOOK + SD_INIT_FUNC(BOOK) +#endif + +static int default_relax_domain_level = -1; + +static int __init setup_relax_domain_level(char *str) +{ + unsigned long val; + + val = simple_strtoul(str, NULL, 0); + if (val < SD_LV_MAX) + default_relax_domain_level = val; + + return 1; +} +__setup("relax_domain_level=", setup_relax_domain_level); + +static void set_domain_attribute(struct sched_domain *sd, + struct sched_domain_attr *attr) +{ + int request; + + if (!attr || attr->relax_domain_level < 0) { + if (default_relax_domain_level < 0) + return; + else + request = default_relax_domain_level; + } else + request = attr->relax_domain_level; + if (request < sd->level) { + /* turn off idle balance on this domain */ + sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); + } else { + /* turn on idle balance on this domain */ + sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); + } +} + +static void __free_domain_allocs(struct s_data *d, enum s_alloc what, + const struct cpumask *cpu_map) +{ + switch (what) { + case sa_sched_groups: + free_sched_groups(cpu_map, d->tmpmask); /* fall through */ + d->sched_group_nodes = NULL; + case sa_rootdomain: + free_rootdomain(d->rd); /* fall through */ + case sa_tmpmask: + free_cpumask_var(d->tmpmask); /* fall through */ + case sa_send_covered: + free_cpumask_var(d->send_covered); /* fall through */ + case sa_this_book_map: + free_cpumask_var(d->this_book_map); /* fall through */ + case sa_this_core_map: + free_cpumask_var(d->this_core_map); /* fall through */ + case sa_this_sibling_map: + free_cpumask_var(d->this_sibling_map); /* fall through */ + case sa_nodemask: + free_cpumask_var(d->nodemask); /* fall through */ + case sa_sched_group_nodes: +#ifdef CONFIG_NUMA + kfree(d->sched_group_nodes); /* fall through */ + case sa_notcovered: + free_cpumask_var(d->notcovered); /* fall through */ + case sa_covered: + free_cpumask_var(d->covered); /* fall through */ + case sa_domainspan: + free_cpumask_var(d->domainspan); /* fall through */ +#endif + case sa_none: + break; + } +} + +static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, + const struct cpumask *cpu_map) +{ +#ifdef CONFIG_NUMA + if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL)) + return sa_none; + if (!alloc_cpumask_var(&d->covered, GFP_KERNEL)) + return sa_domainspan; + if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL)) + return sa_covered; + /* Allocate the per-node list of sched groups */ + d->sched_group_nodes = kcalloc(nr_node_ids, + sizeof(struct sched_group *), GFP_KERNEL); + if (!d->sched_group_nodes) { + printk(KERN_WARNING "Can not alloc sched group node list\n"); + return sa_notcovered; + } + sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; +#endif + if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL)) + return sa_sched_group_nodes; + if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL)) + return sa_nodemask; + if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL)) + return sa_this_sibling_map; + if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL)) + return sa_this_core_map; + if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL)) + return sa_this_book_map; + if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL)) + return sa_send_covered; + d->rd = alloc_rootdomain(); + if (!d->rd) { + printk(KERN_WARNING "Cannot alloc root domain\n"); + return sa_tmpmask; + } + return sa_rootdomain; +} + +static struct sched_domain *__build_numa_sched_domains(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) +{ + struct sched_domain *sd = NULL; +#ifdef CONFIG_NUMA + struct sched_domain *parent; + + d->sd_allnodes = 0; + if (cpumask_weight(cpu_map) > + SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { + sd = &per_cpu(allnodes_domains, i).sd; + SD_INIT(sd, ALLNODES); + set_domain_attribute(sd, attr); + cpumask_copy(sched_domain_span(sd), cpu_map); + cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask); + d->sd_allnodes = 1; + } + parent = sd; + + sd = &per_cpu(node_domains, i).sd; + SD_INIT(sd, NODE); + set_domain_attribute(sd, attr); + sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); + sd->parent = parent; + if (parent) + parent->child = sd; + cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); +#endif + return sd; +} + +static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd; + sd = &per_cpu(phys_domains, i).sd; + SD_INIT(sd, CPU); + set_domain_attribute(sd, attr); + cpumask_copy(sched_domain_span(sd), d->nodemask); + sd->parent = parent; + if (parent) + parent->child = sd; + cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask); + return sd; +} + +static struct sched_domain *__build_book_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_BOOK + sd = &per_cpu(book_domains, i).sd; + SD_INIT(sd, BOOK); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; +} + +static struct sched_domain *__build_mc_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_MC + sd = &per_cpu(core_domains, i).sd; + SD_INIT(sd, MC); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; +} + +static struct sched_domain *__build_smt_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = parent; +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i).sd; + SD_INIT(sd, SIBLING); + set_domain_attribute(sd, attr); + cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); + sd->parent = parent; + parent->child = sd; + cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask); +#endif + return sd; +} + +static void build_sched_groups(struct s_data *d, enum sched_domain_level l, + const struct cpumask *cpu_map, int cpu) +{ + switch (l) { +#ifdef CONFIG_SCHED_SMT + case SD_LV_SIBLING: /* set up CPU (sibling) groups */ + cpumask_and(d->this_sibling_map, cpu_map, + topology_thread_cpumask(cpu)); + if (cpu == cpumask_first(d->this_sibling_map)) + init_sched_build_groups(d->this_sibling_map, cpu_map, + &cpu_to_cpu_group, + d->send_covered, d->tmpmask); + break; +#endif +#ifdef CONFIG_SCHED_MC + case SD_LV_MC: /* set up multi-core groups */ + cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu)); + if (cpu == cpumask_first(d->this_core_map)) + init_sched_build_groups(d->this_core_map, cpu_map, + &cpu_to_core_group, + d->send_covered, d->tmpmask); + break; +#endif +#ifdef CONFIG_SCHED_BOOK + case SD_LV_BOOK: /* set up book groups */ + cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu)); + if (cpu == cpumask_first(d->this_book_map)) + init_sched_build_groups(d->this_book_map, cpu_map, + &cpu_to_book_group, + d->send_covered, d->tmpmask); + break; +#endif + case SD_LV_CPU: /* set up physical groups */ + cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map); + if (!cpumask_empty(d->nodemask)) + init_sched_build_groups(d->nodemask, cpu_map, + &cpu_to_phys_group, + d->send_covered, d->tmpmask); + break; +#ifdef CONFIG_NUMA + case SD_LV_ALLNODES: + init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group, + d->send_covered, d->tmpmask); + break; +#endif + default: + break; + } +} + +/* + * Build sched domains for a given set of cpus and attach the sched domains + * to the individual cpus + */ +static int __build_sched_domains(const struct cpumask *cpu_map, + struct sched_domain_attr *attr) +{ + enum s_alloc alloc_state = sa_none; + struct s_data d; + struct sched_domain *sd; + int i; +#ifdef CONFIG_NUMA + d.sd_allnodes = 0; +#endif + + alloc_state = __visit_domain_allocation_hell(&d, cpu_map); + if (alloc_state != sa_rootdomain) + goto error; + alloc_state = sa_sched_groups; + + /* + * Set up domains for cpus specified by the cpu_map. + */ + for_each_cpu(i, cpu_map) { + cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), + cpu_map); + + sd = __build_numa_sched_domains(&d, cpu_map, attr, i); + sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i); + } + + for_each_cpu(i, cpu_map) { + build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i); + build_sched_groups(&d, SD_LV_BOOK, cpu_map, i); + build_sched_groups(&d, SD_LV_MC, cpu_map, i); + } + + /* Set up physical groups */ + for (i = 0; i < nr_node_ids; i++) + build_sched_groups(&d, SD_LV_CPU, cpu_map, i); + +#ifdef CONFIG_NUMA + /* Set up node groups */ + if (d.sd_allnodes) + build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0); + + for (i = 0; i < nr_node_ids; i++) + if (build_numa_sched_groups(&d, cpu_map, i)) + goto error; +#endif + + /* Calculate CPU power for physical packages and nodes */ +#ifdef CONFIG_SCHED_SMT + for_each_cpu(i, cpu_map) { + sd = &per_cpu(cpu_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif +#ifdef CONFIG_SCHED_MC + for_each_cpu(i, cpu_map) { + sd = &per_cpu(core_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif +#ifdef CONFIG_SCHED_BOOK + for_each_cpu(i, cpu_map) { + sd = &per_cpu(book_domains, i).sd; + init_sched_groups_power(i, sd); + } +#endif + + for_each_cpu(i, cpu_map) { + sd = &per_cpu(phys_domains, i).sd; + init_sched_groups_power(i, sd); + } + +#ifdef CONFIG_NUMA + for (i = 0; i < nr_node_ids; i++) + init_numa_sched_groups_power(d.sched_group_nodes[i]); + + if (d.sd_allnodes) { + struct sched_group *sg; + + cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, + d.tmpmask); + init_numa_sched_groups_power(sg); + } +#endif + + /* Attach the domains */ + for_each_cpu(i, cpu_map) { +#ifdef CONFIG_SCHED_SMT + sd = &per_cpu(cpu_domains, i).sd; +#elif defined(CONFIG_SCHED_MC) + sd = &per_cpu(core_domains, i).sd; +#elif defined(CONFIG_SCHED_BOOK) + sd = &per_cpu(book_domains, i).sd; +#else + sd = &per_cpu(phys_domains, i).sd; +#endif + cpu_attach_domain(sd, d.rd, i); + } + + d.sched_group_nodes = NULL; /* don't free this we still need it */ + __free_domain_allocs(&d, sa_tmpmask, cpu_map); + return 0; + +error: + __free_domain_allocs(&d, alloc_state, cpu_map); + return -ENOMEM; +} + +static int build_sched_domains(const struct cpumask *cpu_map) +{ + return __build_sched_domains(cpu_map, NULL); +} + +static cpumask_var_t *doms_cur; /* current sched domains */ +static int ndoms_cur; /* number of sched domains in 'doms_cur' */ +static struct sched_domain_attr *dattr_cur; + /* attribues of custom domains in 'doms_cur' */ + +/* + * Special case: If a kmalloc of a doms_cur partition (array of + * cpumask) fails, then fallback to a single sched domain, + * as determined by the single cpumask fallback_doms. + */ +static cpumask_var_t fallback_doms; + +/* + * arch_update_cpu_topology lets virtualised architectures update the + * cpu core maps. It is supposed to return 1 if the topology changed + * or 0 if it stayed the same. + */ +int __attribute__((weak)) arch_update_cpu_topology(void) +{ + return 0; +} + +cpumask_var_t *alloc_sched_domains(unsigned int ndoms) +{ + int i; + cpumask_var_t *doms; + + doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); + if (!doms) + return NULL; + for (i = 0; i < ndoms; i++) { + if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { + free_sched_domains(doms, i); + return NULL; + } + } + return doms; +} + +void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) +{ + unsigned int i; + for (i = 0; i < ndoms; i++) + free_cpumask_var(doms[i]); + kfree(doms); +} + +/* + * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. + */ +static int arch_init_sched_domains(const struct cpumask *cpu_map) +{ + int err; + + arch_update_cpu_topology(); + ndoms_cur = 1; + doms_cur = alloc_sched_domains(ndoms_cur); + if (!doms_cur) + doms_cur = &fallback_doms; + cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); + dattr_cur = NULL; + err = build_sched_domains(doms_cur[0]); + register_sched_domain_sysctl(); + + return err; +} + +static void arch_destroy_sched_domains(const struct cpumask *cpu_map, + struct cpumask *tmpmask) +{ + free_sched_groups(cpu_map, tmpmask); +} + +/* + * Detach sched domains from a group of cpus specified in cpu_map + * These cpus will now be attached to the NULL domain + */ +static void detach_destroy_domains(const struct cpumask *cpu_map) +{ + /* Save because hotplug lock held. */ + static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); + int i; + + for_each_cpu(i, cpu_map) + cpu_attach_domain(NULL, &def_root_domain, i); + synchronize_sched(); + arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); +} + +/* handle null as "default" */ +static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, + struct sched_domain_attr *new, int idx_new) +{ + struct sched_domain_attr tmp; + + /* fast path */ + if (!new && !cur) + return 1; + + tmp = SD_ATTR_INIT; + return !memcmp(cur ? (cur + idx_cur) : &tmp, + new ? (new + idx_new) : &tmp, + sizeof(struct sched_domain_attr)); +} + +/* + * Partition sched domains as specified by the 'ndoms_new' + * cpumasks in the array doms_new[] of cpumasks. This compares + * doms_new[] to the current sched domain partitioning, doms_cur[]. + * It destroys each deleted domain and builds each new domain. + * + * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the + * current 'doms_cur' domains and in the new 'doms_new', we can leave + * it as it is. + * + * The passed in 'doms_new' should be allocated using + * alloc_sched_domains. This routine takes ownership of it and will + * free_sched_domains it when done with it. If the caller failed the + * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, + * and partition_sched_domains() will fallback to the single partition + * 'fallback_doms', it also forces the domains to be rebuilt. + * + * If doms_new == NULL it will be replaced with cpu_online_mask. + * ndoms_new == 0 is a special case for destroying existing domains, + * and it will not create the default domain. + * + * Call with hotplug lock held + */ +void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ + int i, j, n; + int new_topology; + + mutex_lock(&sched_domains_mutex); + + /* always unregister in case we don't destroy any domains */ + unregister_sched_domain_sysctl(); + + /* Let architecture update cpu core mappings. */ + new_topology = arch_update_cpu_topology(); + + n = doms_new ? ndoms_new : 0; + + /* Destroy deleted domains */ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < n && !new_topology; j++) { + if (cpumask_equal(doms_cur[i], doms_new[j]) + && dattrs_equal(dattr_cur, i, dattr_new, j)) + goto match1; + } + /* no match - a current sched domain not in new doms_new[] */ + detach_destroy_domains(doms_cur[i]); +match1: + ; + } + + if (doms_new == NULL) { + ndoms_cur = 0; + doms_new = &fallback_doms; + cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); + WARN_ON_ONCE(dattr_new); + } + + /* Build new domains */ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < ndoms_cur && !new_topology; j++) { + if (cpumask_equal(doms_new[i], doms_cur[j]) + && dattrs_equal(dattr_new, i, dattr_cur, j)) + goto match2; + } + /* no match - add a new doms_new */ + __build_sched_domains(doms_new[i], + dattr_new ? dattr_new + i : NULL); +match2: + ; + } + + /* Remember the new sched domains */ + if (doms_cur != &fallback_doms) + free_sched_domains(doms_cur, ndoms_cur); + kfree(dattr_cur); /* kfree(NULL) is safe */ + doms_cur = doms_new; + dattr_cur = dattr_new; + ndoms_cur = ndoms_new; + + register_sched_domain_sysctl(); + + mutex_unlock(&sched_domains_mutex); +} + +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) +static void arch_reinit_sched_domains(void) +{ + get_online_cpus(); + + /* Destroy domains first to force the rebuild */ + partition_sched_domains(0, NULL, NULL); + + rebuild_sched_domains(); + put_online_cpus(); +} + +static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) +{ + unsigned int level = 0; + + if (sscanf(buf, "%u", &level) != 1) + return -EINVAL; + + /* + * level is always be positive so don't check for + * level < POWERSAVINGS_BALANCE_NONE which is 0 + * What happens on 0 or 1 byte write, + * need to check for count as well? + */ + + if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) + return -EINVAL; + + if (smt) + sched_smt_power_savings = level; + else + sched_mc_power_savings = level; + + arch_reinit_sched_domains(); + + return count; +} + +#ifdef CONFIG_SCHED_MC +static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, + struct sysdev_class_attribute *attr, + char *page) +{ + return sprintf(page, "%u\n", sched_mc_power_savings); +} +static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, + struct sysdev_class_attribute *attr, + const char *buf, size_t count) +{ + return sched_power_savings_store(buf, count, 0); +} +static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, + sched_mc_power_savings_show, + sched_mc_power_savings_store); +#endif + +#ifdef CONFIG_SCHED_SMT +static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, + struct sysdev_class_attribute *attr, + char *page) +{ + return sprintf(page, "%u\n", sched_smt_power_savings); +} +static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, + struct sysdev_class_attribute *attr, + const char *buf, size_t count) +{ + return sched_power_savings_store(buf, count, 1); +} +static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, + sched_smt_power_savings_show, + sched_smt_power_savings_store); +#endif + +int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) +{ + int err = 0; + +#ifdef CONFIG_SCHED_SMT + if (smt_capable()) + err = sysfs_create_file(&cls->kset.kobj, + &attr_sched_smt_power_savings.attr); +#endif +#ifdef CONFIG_SCHED_MC + if (!err && mc_capable()) + err = sysfs_create_file(&cls->kset.kobj, + &attr_sched_mc_power_savings.attr); +#endif + return err; +} +#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ + +/* + * Update cpusets according to cpu_active mask. If cpusets are + * disabled, cpuset_update_active_cpus() becomes a simple wrapper + * around partition_sched_domains(). + */ +static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + cpuset_update_active_cpus(); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: + cpuset_update_active_cpus(); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static int update_runtime(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action) { + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + return NOTIFY_OK; + + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + +#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) +/* + * Cheaper version of the below functions in case support for SMT and MC is + * compiled in but CPUs have no siblings. + */ +static int sole_cpu_idle(unsigned long cpu) +{ + return rq_idle(cpu_rq(cpu)); +} +#endif +#ifdef CONFIG_SCHED_SMT +/* All this CPU's SMT siblings are idle */ +static int siblings_cpu_idle(unsigned long cpu) +{ + return cpumask_subset(&(cpu_rq(cpu)->smt_siblings), + &grq.cpu_idle_map); +} +#endif +#ifdef CONFIG_SCHED_MC +/* All this CPU's shared cache siblings are idle */ +static int cache_cpu_idle(unsigned long cpu) +{ + return cpumask_subset(&(cpu_rq(cpu)->cache_siblings), + &grq.cpu_idle_map); +} +#endif + +void __init sched_init_smp(void) +{ + struct sched_domain *sd; + int cpu; + + cpumask_var_t non_isolated_cpus; + + alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); + alloc_cpumask_var(&fallback_doms, GFP_KERNEL); + +#if defined(CONFIG_NUMA) + sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), + GFP_KERNEL); + BUG_ON(sched_group_nodes_bycpu == NULL); +#endif + get_online_cpus(); + mutex_lock(&sched_domains_mutex); + arch_init_sched_domains(cpu_active_mask); + cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); + if (cpumask_empty(non_isolated_cpus)) + cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); + mutex_unlock(&sched_domains_mutex); + put_online_cpus(); + + hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); + hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); + + /* RT runtime code needs to handle some hotplug events */ + hotcpu_notifier(update_runtime, 0); + + /* Move init over to a non-isolated CPU */ + if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) + BUG(); + free_cpumask_var(non_isolated_cpus); + + grq_lock_irq(); + /* + * Set up the relative cache distance of each online cpu from each + * other in a simple array for quick lookup. Locality is determined + * by the closest sched_domain that CPUs are separated by. CPUs with + * shared cache in SMT and MC are treated as local. Separate CPUs + * (within the same package or physically) within the same node are + * treated as not local. CPUs not even in the same domain (different + * nodes) are treated as very distant. + */ + for_each_online_cpu(cpu) { + struct rq *rq = cpu_rq(cpu); + for_each_domain(cpu, sd) { + unsigned long locality; + int other_cpu; + +#ifdef CONFIG_SCHED_SMT + if (sd->level == SD_LV_SIBLING) { + for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) + cpumask_set_cpu(other_cpu, &rq->smt_siblings); + } +#endif +#ifdef CONFIG_SCHED_MC + if (sd->level == SD_LV_MC) { + for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) + cpumask_set_cpu(other_cpu, &rq->cache_siblings); + } +#endif + if (sd->level <= SD_LV_SIBLING) + locality = 1; + else if (sd->level <= SD_LV_MC) + locality = 2; + else if (sd->level <= SD_LV_NODE) + locality = 3; + else + continue; + + for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) { + if (locality < rq->cpu_locality[other_cpu]) + rq->cpu_locality[other_cpu] = locality; + } + } + +/* + * Each runqueue has its own function in case it doesn't have + * siblings of its own allowing mixed topologies. + */ +#ifdef CONFIG_SCHED_SMT + if (cpus_weight(rq->smt_siblings) > 1) + rq->siblings_idle = siblings_cpu_idle; +#endif +#ifdef CONFIG_SCHED_MC + if (cpus_weight(rq->cache_siblings) > 1) + rq->cache_idle = cache_cpu_idle; +#endif + } + grq_unlock_irq(); +} +#else +void __init sched_init_smp(void) +{ +} +#endif /* CONFIG_SMP */ + +unsigned int sysctl_timer_migration = 1; + +int in_sched_functions(unsigned long addr) +{ + return in_lock_functions(addr) || + (addr >= (unsigned long)__sched_text_start + && addr < (unsigned long)__sched_text_end); +} + +void __init sched_init(void) +{ + int i; + struct rq *rq; + + prio_ratios[0] = 128; + for (i = 1 ; i < PRIO_RANGE ; i++) + prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; + + raw_spin_lock_init(&grq.lock); + grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; + grq.niffies = 0; + grq.last_jiffy = jiffies; + raw_spin_lock_init(&grq.iso_lock); + grq.iso_ticks = grq.iso_refractory = 0; + grq.noc = 1; +#ifdef CONFIG_SMP + init_defrootdomain(); + grq.qnr = grq.idle_cpus = 0; + cpumask_clear(&grq.cpu_idle_map); +#else + uprq = &per_cpu(runqueues, 0); +#endif + for_each_possible_cpu(i) { + rq = cpu_rq(i); + rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = + rq->iowait_pc = rq->idle_pc = 0; + rq->dither = 0; +#ifdef CONFIG_SMP + rq->sticky_task = NULL; + rq->last_niffy = 0; + rq->sd = NULL; + rq->rd = NULL; + rq->online = 0; + rq->cpu = i; + rq_attach_root(rq, &def_root_domain); +#endif + atomic_set(&rq->nr_iowait, 0); + } + +#ifdef CONFIG_SMP + nr_cpu_ids = i; + /* + * Set the base locality for cpu cache distance calculation to + * "distant" (3). Make sure the distance from a CPU to itself is 0. + */ + for_each_possible_cpu(i) { + int j; + + rq = cpu_rq(i); +#ifdef CONFIG_SCHED_SMT + cpumask_clear(&rq->smt_siblings); + cpumask_set_cpu(i, &rq->smt_siblings); + rq->siblings_idle = sole_cpu_idle; + cpumask_set_cpu(i, &rq->smt_siblings); +#endif +#ifdef CONFIG_SCHED_MC + cpumask_clear(&rq->cache_siblings); + cpumask_set_cpu(i, &rq->cache_siblings); + rq->cache_idle = sole_cpu_idle; + cpumask_set_cpu(i, &rq->cache_siblings); +#endif + rq->cpu_locality = kmalloc(nr_cpu_ids * sizeof(unsigned long), + GFP_NOWAIT); + for_each_possible_cpu(j) { + if (i == j) + rq->cpu_locality[j] = 0; + else + rq->cpu_locality[j] = 4; + } + } +#endif + + for (i = 0; i < PRIO_LIMIT; i++) + INIT_LIST_HEAD(grq.queue + i); + /* delimiter for bitsearch */ + __set_bit(PRIO_LIMIT, grq.prio_bitmap); + +#ifdef CONFIG_PREEMPT_NOTIFIERS + INIT_HLIST_HEAD(&init_task.preempt_notifiers); +#endif + +#ifdef CONFIG_RT_MUTEXES + plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); +#endif + + /* + * The boot idle thread does lazy MMU switching as well: + */ + atomic_inc(&init_mm.mm_count); + enter_lazy_tlb(&init_mm, current); + + /* + * Make us the idle thread. Technically, schedule() should not be + * called from this thread, however somewhere below it might be, + * but because we are the idle thread, we just pick up running again + * when this runqueue becomes "idle". + */ + init_idle(current, smp_processor_id()); + + /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ + zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); +#ifdef CONFIG_SMP + /* May be allocated at isolcpus cmdline parse time */ + if (cpu_isolated_map == NULL) + zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); +#endif /* SMP */ +} + +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +static inline int preempt_count_equals(int preempt_offset) +{ + int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); + + return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); +} + +void __might_sleep(const char *file, int line, int preempt_offset) +{ +#ifdef in_atomic + static unsigned long prev_jiffy; /* ratelimiting */ + + if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || + system_state != SYSTEM_RUNNING || oops_in_progress) + return; + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) + return; + prev_jiffy = jiffies; + + printk(KERN_ERR + "BUG: sleeping function called from invalid context at %s:%d\n", + file, line); + printk(KERN_ERR + "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", + in_atomic(), irqs_disabled(), + current->pid, current->comm); + + debug_show_held_locks(current); + if (irqs_disabled()) + print_irqtrace_events(current); + dump_stack(); +#endif +} +EXPORT_SYMBOL(__might_sleep); +#endif + +#ifdef CONFIG_MAGIC_SYSRQ +void normalize_rt_tasks(void) +{ + struct task_struct *g, *p; + unsigned long flags; + struct rq *rq; + int queued; + + read_lock_irq(&tasklist_lock); + + do_each_thread(g, p) { + if (!rt_task(p) && !iso_task(p)) + continue; + + raw_spin_lock_irqsave(&p->pi_lock, flags); + rq = __task_grq_lock(p); + + queued = task_queued(p); + if (queued) + dequeue_task(p); + __setscheduler(p, rq, SCHED_NORMAL, 0); + if (queued) { + enqueue_task(p); + try_preempt(p, rq); + } + + __task_grq_unlock(); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + } while_each_thread(g, p); + + read_unlock_irq(&tasklist_lock); +} +#endif /* CONFIG_MAGIC_SYSRQ */ + +#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) +/* + * These functions are only useful for the IA64 MCA handling, or kdb. + * + * They can only be called when the whole system has been + * stopped - every CPU needs to be quiescent, and no scheduling + * activity can take place. Using them for anything else would + * be a serious bug, and as a result, they aren't even visible + * under any other configuration. + */ + +/** + * curr_task - return the current task for a given cpu. + * @cpu: the processor in question. + * + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! + */ +struct task_struct *curr_task(int cpu) +{ + return cpu_curr(cpu); +} + +#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ + +#ifdef CONFIG_IA64 +/** + * set_curr_task - set the current task for a given cpu. + * @cpu: the processor in question. + * @p: the task pointer to set. + * + * Description: This function must only be used when non-maskable interrupts + * are serviced on a separate stack. It allows the architecture to switch the + * notion of the current task on a cpu in a non-blocking manner. This function + * must be called with all CPU's synchronised, and interrupts disabled, the + * and caller must save the original value of the current task (see + * curr_task() above) and restore that value before reenabling interrupts and + * re-starting the system. + * + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! + */ +void set_curr_task(int cpu, struct task_struct *p) +{ + cpu_curr(cpu) = p; +} + +#endif + +/* + * Use precise platform statistics if available: + */ +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + *ut = p->utime; + *st = p->stime; +} + +void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + struct task_cputime cputime; + + thread_group_cputime(p, &cputime); + + *ut = cputime.utime; + *st = cputime.stime; +} +#else + +#ifndef nsecs_to_cputime +# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) +#endif + +void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); + + rtime = nsecs_to_cputime(p->sched_time); + + if (total) { + u64 temp; + + temp = (u64)(rtime * utime); + do_div(temp, total); + utime = (cputime_t)temp; + } else + utime = rtime; + + /* + * Compare with previous values, to keep monotonicity: + */ + p->prev_utime = max(p->prev_utime, utime); + p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); + + *ut = p->prev_utime; + *st = p->prev_stime; +} + +/* + * Must be called with siglock held. + */ +void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + struct signal_struct *sig = p->signal; + struct task_cputime cputime; + cputime_t rtime, utime, total; + + thread_group_cputime(p, &cputime); + + total = cputime_add(cputime.utime, cputime.stime); + rtime = nsecs_to_cputime(cputime.sum_exec_runtime); + + if (total) { + u64 temp; + + temp = (u64)(rtime * cputime.utime); + do_div(temp, total); + utime = (cputime_t)temp; + } else + utime = rtime; + + sig->prev_utime = max(sig->prev_utime, utime); + sig->prev_stime = max(sig->prev_stime, + cputime_sub(rtime, sig->prev_utime)); + + *ut = sig->prev_utime; + *st = sig->prev_stime; +} +#endif + +inline cputime_t task_gtime(struct task_struct *p) +{ + return p->gtime; +} + +void __cpuinit init_idle_bootup_task(struct task_struct *idle) +{} + +#ifdef CONFIG_SCHED_DEBUG +void proc_sched_show_task(struct task_struct *p, struct seq_file *m) +{} + +void proc_sched_set_task(struct task_struct *p) +{} +#endif + +#ifdef CONFIG_SMP +unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return SCHED_LOAD_SCALE; +} + +unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) +{ + unsigned long weight = cpumask_weight(sched_domain_span(sd)); + unsigned long smt_gain = sd->smt_gain; + + smt_gain /= weight; + + return smt_gain; +} +#endif diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8faa29ac949fb..e7787a1edb87a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -84,8 +84,8 @@ unsigned int __read_mostly sysctl_sched_compat_yield; * and reduces their over-scheduling. Synchronous workloads will still * have immediate wakeup/sleep latencies. */ -unsigned int sysctl_sched_wakeup_granularity = 1000000UL; -unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; +unsigned int sysctl_sched_wakeup_granularity = 25000UL; +unsigned int normalized_sysctl_sched_wakeup_granularity = 25000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; @@ -2047,21 +2047,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, struct cfs_rq *busiest_cfs_rq) { - int loops = 0, pulled = 0, pinned = 0; + int loops = 0, pulled = 0; long rem_load_move = max_load_move; struct task_struct *p, *n; if (max_load_move == 0) goto out; - pinned = 1; - list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { if (loops++ > sysctl_sched_nr_migrate) break; if ((p->se.load.weight >> 1) > rem_load_move || - !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) + !can_migrate_task(p, busiest, this_cpu, sd, idle, + all_pinned)) continue; pull_task(busiest, p, this_rq, this_cpu); @@ -2096,9 +2095,6 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, */ schedstat_add(sd, lb_gained[idle], pulled); - if (all_pinned) - *all_pinned = pinned; - return max_load_move - rem_load_move; } @@ -3301,6 +3297,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ + all_pinned = 1; local_irq_save(flags); double_rq_lock(this_rq, busiest); ld_moved = move_tasks(this_rq, this_cpu, busiest, diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 68e69acc29b95..1b6685acadb6a 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -3,7 +3,7 @@ * them to run sooner, but does not allow tons of sleepers to * rip the spread apart. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) +SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 0) /* * Place new tasks ahead so that they do not starve already running diff --git a/kernel/signal.c b/kernel/signal.c index 4e3cff10fdced..bf11d2697e9e2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, return -EFAULT; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info.si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info.si_code >= 0 || info.si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info.si_code < 0); return -EPERM; + } info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ @@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) return -EINVAL; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info->si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info->si_code >= 0 || info->si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info->si_code < 0); return -EPERM; + } info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); diff --git a/kernel/smp.c b/kernel/smp.c index 9910744f0856c..954548906afbe 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -450,7 +450,7 @@ void smp_call_function_many(const struct cpumask *mask, { struct call_function_data *data; unsigned long flags; - int cpu, next_cpu, this_cpu = smp_processor_id(); + int refs, cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. @@ -461,7 +461,7 @@ void smp_call_function_many(const struct cpumask *mask, WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress && !early_boot_irqs_disabled); - /* So, what's a CPU they want? Ignoring this one. */ + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); @@ -483,22 +483,49 @@ void smp_call_function_many(const struct cpumask *mask, data = &__get_cpu_var(cfd_data); csd_lock(&data->csd); + + /* This BUG_ON verifies our reuse assertions and can be removed */ BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); + /* + * The global call function queue list add and delete are protected + * by a lock, but the list is traversed without any lock, relying + * on the rcu list add and delete to allow safe concurrent traversal. + * We reuse the call function data without waiting for any grace + * period after some other cpu removes it from the global queue. + * This means a cpu might find our data block as it is being + * filled out. + * + * We hold off the interrupt handler on the other cpu by + * ordering our writes to the cpu mask vs our setting of the + * refs counter. We assert only the cpu owning the data block + * will set a bit in cpumask, and each bit will only be cleared + * by the subject cpu. Each cpu must first find its bit is + * set and then check that refs is set indicating the element is + * ready to be processed, otherwise it must skip the entry. + * + * On the previous iteration refs was set to 0 by another cpu. + * To avoid the use of transitivity, set the counter to 0 here + * so the wmb will pair with the rmb in the interrupt handler. + */ + atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ + data->csd.func = func; data->csd.info = info; - cpumask_and(data->cpumask, mask, cpu_online_mask); - cpumask_clear_cpu(this_cpu, data->cpumask); - /* - * To ensure the interrupt handler gets an complete view - * we order the cpumask and refs writes and order the read - * of them in the interrupt handler. In addition we may - * only clear our own cpu bit from the mask. - */ + /* Ensure 0 refs is visible before mask. Also orders func and info */ smp_wmb(); - atomic_set(&data->refs, cpumask_weight(data->cpumask)); + /* We rely on the "and" being processed before the store */ + cpumask_and(data->cpumask, mask, cpu_online_mask); + cpumask_clear_cpu(this_cpu, data->cpumask); + refs = cpumask_weight(data->cpumask); + + /* Some callers race with other cpus changing the passed mask */ + if (unlikely(!refs)) { + csd_unlock(&data->csd); + return; + } raw_spin_lock_irqsave(&call_function.lock, flags); /* @@ -507,6 +534,12 @@ void smp_call_function_many(const struct cpumask *mask, * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); + /* + * We rely on the wmb() in list_add_rcu to complete our writes + * to the cpumask before this write to refs, which indicates + * data is on the list and is ready to be processed. + */ + atomic_set(&data->refs, refs); raw_spin_unlock_irqrestore(&call_function.lock, flags); /* diff --git a/kernel/sys.c b/kernel/sys.c index 18da702ec813c..b837d4a35fdc0 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -297,7 +298,7 @@ void kernel_restart_prepare(char *cmd) blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); - sysdev_shutdown(); + syscore_shutdown(); } /** @@ -335,7 +336,7 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); - sysdev_shutdown(); + syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); machine_halt(); @@ -354,7 +355,7 @@ void kernel_power_off(void) if (pm_power_off_prepare) pm_power_off_prepare(); disable_nonboot_cpus(); - sysdev_shutdown(); + syscore_shutdown(); printk(KERN_EMERG "Power down.\n"); kmsg_dump(KMSG_DUMP_POWEROFF); machine_power_off(); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index a26c37df1b19f..7fc892ecbe73a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -95,6 +95,7 @@ extern char core_pattern[]; extern unsigned int core_pipe_limit; extern int pid_max; extern int min_free_kbytes; +extern int extra_free_kbytes; extern int min_free_order_shift; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; @@ -119,7 +120,12 @@ static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static unsigned long one_ul = 1; -static int one_hundred = 100; +static int __maybe_unused one_hundred = 100; +#ifdef CONFIG_SCHED_BFS +extern int rr_interval; +extern int sched_iso_cpu; +static int __read_mostly one_thousand = 1000; +#endif #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif @@ -170,6 +176,11 @@ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif +#ifdef CONFIG_PRINTK +static int proc_dmesg_restrict(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; @@ -251,7 +262,7 @@ static struct ctl_table root_table[] = { { } }; -#ifdef CONFIG_SCHED_DEBUG +#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS) static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ @@ -266,6 +277,7 @@ static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { +#ifndef CONFIG_SCHED_BFS { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, @@ -380,6 +392,7 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, #endif +#endif /* !CONFIG_SCHED_BFS */ #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", @@ -714,7 +727,7 @@ static struct ctl_table kern_table[] = { .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_dmesg_restrict, .extra1 = &zero, .extra2 = &two, }, @@ -822,6 +835,26 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif +#ifdef CONFIG_SCHED_BFS + { + .procname = "rr_interval", + .data = &rr_interval, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &one_thousand, + }, + { + .procname = "iso_cpu", + .data = &sched_iso_cpu, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one_hundred, + }, +#endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .procname = "spin_retry", @@ -1166,6 +1199,14 @@ static struct ctl_table vm_table[] = { .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, + { + .procname = "extra_free_kbytes", + .data = &extra_free_kbytes, + .maxlen = sizeof(extra_free_kbytes), + .mode = 0644, + .proc_handler = min_free_kbytes_sysctl_handler, + .extra1 = &zero, + }, { .procname = "min_free_order_shift", .data = &min_free_order_shift, @@ -2405,6 +2446,17 @@ static int proc_taint(struct ctl_table *table, int write, return err; } +#ifdef CONFIG_PRINTK +static int proc_dmesg_restrict(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + return proc_dointvec_minmax(table, write, buffer, lenp, ppos); +} +#endif + struct do_proc_dointvec_minmax_conv_param { int *min; int *max; diff --git a/kernel/time.c b/kernel/time.c index 32174359576fa..70ca1ee1767ce 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -150,7 +150,7 @@ static inline void warp_clock(void) * various programs will get confused when the clock gets warped. */ -int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) +int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz) { static int firsttime = 1; int error = 0; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 6519cf62d9cd9..0e17c10f8a9da 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) /* Add clocksource to the clcoksource list */ mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_select(); clocksource_enqueue_watchdog(cs); + clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } @@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs) mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); - clocksource_select(); clocksource_enqueue_watchdog(cs); + clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index a3b5aff626064..2bb742c964950 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -523,10 +523,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask, */ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { + int cpu = smp_processor_id(); + /* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; - int cpu = smp_processor_id(); bc->event_handler = tick_handle_oneshot_broadcast; clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); @@ -552,6 +553,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) tick_broadcast_set_event(tick_next_period, 1); } else bc->next_event.tv64 = KTIME_MAX; + } else { + /* + * The first cpu which switches to oneshot mode sets + * the bit for all other cpus which are in the general + * (periodic) broadcast mask. So the bit is set and + * would prevent the first broadcast enter after this + * to program the bc device. + */ + tick_broadcast_clear_oneshot(cpu); } } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d27c7562902cb..8e6a05a5915a2 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -353,7 +353,7 @@ EXPORT_SYMBOL(do_gettimeofday); * * Sets the time of day to the new time and update NTP and notify hrtimers */ -int do_settimeofday(struct timespec *tv) +int do_settimeofday(const struct timespec *tv) { struct timespec ts_delta; unsigned long flags; @@ -387,6 +387,42 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); + +/** + * timekeeping_inject_offset - Adds or subtracts from the current time. + * @tv: pointer to the timespec variable containing the offset + * + * Adds or subtracts an offset value from the current time. + */ +int timekeeping_inject_offset(struct timespec *ts) +{ + unsigned long flags; + + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + write_seqlock_irqsave(&xtime_lock, flags); + + timekeeping_forward_now(); + + xtime = timespec_add(xtime, *ts); + wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts); + + timekeeper.ntp_error = 0; + ntp_clear(); + + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + + write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); + + return 0; +} +EXPORT_SYMBOL(timekeeping_inject_offset); + /** * change_clocksource - Swaps clocksources if a new one is available * @@ -559,15 +595,66 @@ void __init timekeeping_init(void) /* time in seconds when suspend began */ static struct timespec timekeeping_suspend_time; +/** + * __timekeeping_inject_sleeptime - Internal function to add sleep interval + * @delta: pointer to a timespec delta value + * + * Takes a timespec offset measuring a suspend interval and properly + * adds the sleep offset to the timekeeping variables. + */ +static void __timekeeping_inject_sleeptime(struct timespec *delta) +{ + xtime = timespec_add(xtime, *delta); + wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); + total_sleep_time = timespec_add(total_sleep_time, *delta); +} + + +/** + * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values + * @delta: pointer to a timespec delta value + * + * This hook is for architectures that cannot support read_persistent_clock + * because their RTC/persistent clock is only accessible when irqs are enabled. + * + * This function should only be called by rtc_resume(), and allows + * a suspend offset to be injected into the timekeeping values. + */ +void timekeeping_inject_sleeptime(struct timespec *delta) +{ + unsigned long flags; + struct timespec ts; + + /* Make sure we don't set the clock twice */ + read_persistent_clock(&ts); + if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) + return; + + write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_forward_now(); + + __timekeeping_inject_sleeptime(delta); + + timekeeper.ntp_error = 0; + ntp_clear(); + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + + write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +} + + /** * timekeeping_resume - Resumes the generic timekeeping subsystem. - * @dev: unused * * This is for the generic clocksource timekeeping. * xtime/wall_to_monotonic/jiffies/etc are * still managed by arch specific suspend/resume code. */ -static int timekeeping_resume(struct sys_device *dev) +static void timekeeping_resume(void) { unsigned long flags; struct timespec ts; @@ -580,9 +667,7 @@ static int timekeeping_resume(struct sys_device *dev) if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - xtime = timespec_add(xtime, ts); - wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); - total_sleep_time = timespec_add(total_sleep_time, ts); + __timekeeping_inject_sleeptime(&ts); } /* re-base the last cycle value */ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); @@ -596,11 +681,9 @@ static int timekeeping_resume(struct sys_device *dev) /* Resume hrtimers */ hres_timers_resume(); - - return 0; } -static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) +static int timekeeping_suspend(void) { unsigned long flags; @@ -618,26 +701,18 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) } /* sysfs resume/suspend bits for timekeeping */ -static struct sysdev_class timekeeping_sysclass = { - .name = "timekeeping", +static struct syscore_ops timekeeping_syscore_ops = { .resume = timekeeping_resume, .suspend = timekeeping_suspend, }; -static struct sys_device device_timer = { - .id = 0, - .cls = &timekeeping_sysclass, -}; - -static int __init timekeeping_init_device(void) +static int __init timekeeping_init_ops(void) { - int error = sysdev_class_register(&timekeeping_sysclass); - if (!error) - error = sysdev_register(&device_timer); - return error; + register_syscore_ops(&timekeeping_syscore_ops); + return 0; } -device_initcall(timekeeping_init_device); +device_initcall(timekeeping_init_ops); /* * If the error is already larger, we look ahead even further @@ -779,7 +854,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) * * Called from the timer interrupt, must hold a write on xtime_lock. */ -void update_wall_time(void) +static void update_wall_time(void) { struct clocksource *clock; cycle_t offset; @@ -871,7 +946,7 @@ void update_wall_time(void) * getboottime - Return the real time of system boot. * @ts: pointer to the timespec to be set * - * Returns the time of day in a timespec. + * Returns the wall-time of boot in a timespec. * * This is based on the wall_to_monotonic offset and the total suspend * time. Calls to settimeofday will affect the value returned (which @@ -889,6 +964,55 @@ void getboottime(struct timespec *ts) } EXPORT_SYMBOL_GPL(getboottime); + +/** + * get_monotonic_boottime - Returns monotonic time since boot + * @ts: pointer to the timespec to be set + * + * Returns the monotonic time since boot in a timespec. + * + * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also + * includes the time spent in suspend. + */ +void get_monotonic_boottime(struct timespec *ts) +{ + struct timespec tomono, sleep; + unsigned int seq; + s64 nsecs; + + WARN_ON(timekeeping_suspended); + + do { + seq = read_seqbegin(&xtime_lock); + *ts = xtime; + tomono = wall_to_monotonic; + sleep = total_sleep_time; + nsecs = timekeeping_get_ns(); + + } while (read_seqretry(&xtime_lock, seq)); + + set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, + ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); +} +EXPORT_SYMBOL_GPL(get_monotonic_boottime); + +/** + * ktime_get_boottime - Returns monotonic time since boot in a ktime + * + * Returns the monotonic time since boot in a ktime + * + * This is similar to CLOCK_MONTONIC/ktime_get, but also + * includes the time spent in suspend. + */ +ktime_t ktime_get_boottime(void) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return timespec_to_ktime(ts); +} +EXPORT_SYMBOL_GPL(ktime_get_boottime); + /** * monotonic_to_bootbased - Convert the monotonic time to boot based. * @ts: pointer to the timespec to be converted @@ -910,11 +1034,6 @@ struct timespec __current_kernel_time(void) return xtime; } -struct timespec __get_wall_to_monotonic(void) -{ - return wall_to_monotonic; -} - struct timespec current_kernel_time(void) { struct timespec now; @@ -946,3 +1065,48 @@ struct timespec get_monotonic_coarse(void) now.tv_nsec + mono.tv_nsec); return now; } + +/* + * The 64-bit jiffies value is not atomic - you MUST NOT read it + * without sampling the sequence number in xtime_lock. + * jiffies is defined in the linker script... + */ +void do_timer(unsigned long ticks) +{ + jiffies_64 += ticks; + update_wall_time(); + calc_global_load(ticks); +} + +/** + * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, + * and sleep offsets. + * @xtim: pointer to timespec to be set with xtime + * @wtom: pointer to timespec to be set with wall_to_monotonic + * @sleep: pointer to timespec to be set with time in suspend + */ +void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, + struct timespec *wtom, struct timespec *sleep) +{ + unsigned long seq; + + do { + seq = read_seqbegin(&xtime_lock); + *xtim = xtime; + *wtom = wall_to_monotonic; + *sleep = total_sleep_time; + } while (read_seqretry(&xtime_lock, seq)); +} + +/** + * xtime_update() - advances the timekeeping infrastructure + * @ticks: number of ticks, that have elapsed since the last call. + * + * Must be called with interrupts disabled. + */ +void xtime_update(unsigned long ticks) +{ + write_seqlock(&xtime_lock); + do_timer(ticks); + write_sequnlock(&xtime_lock); +} diff --git a/kernel/timer.c b/kernel/timer.c index d6459923d2452..c848cd8abe2ae 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1295,19 +1295,6 @@ void run_local_timers(void) raise_softirq(TIMER_SOFTIRQ); } -/* - * The 64-bit jiffies value is not atomic - you MUST NOT read it - * without sampling the sequence number in xtime_lock. - * jiffies is defined in the linker script... - */ - -void do_timer(unsigned long ticks) -{ - jiffies_64 += ticks; - update_wall_time(); - calc_global_load(ticks); -} - #ifdef __ARCH_WANT_SYS_ALARM /* diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f3dadae83883e..9803b68dbc310 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2414,14 +2414,16 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) ftrace_match_records(parser->buffer, parser->idx, enable); } - mutex_lock(&ftrace_lock); - if (ftrace_start_up && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); - mutex_unlock(&ftrace_lock); - trace_parser_put(parser); kfree(iter); + if (file->f_mode & FMODE_WRITE) { + mutex_lock(&ftrace_lock); + if (ftrace_start_up && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); + } + mutex_unlock(&ftrace_regex_lock); return 0; } @@ -3328,7 +3330,7 @@ static int start_graph_tracing(void) /* The cpu_boot init_task->ret_stack will never be freed */ for_each_online_cpu(cpu) { if (!idle_task(cpu)->ret_stack) - ftrace_graph_init_task(idle_task(cpu)); + ftrace_graph_init_idle_task(idle_task(cpu), cpu); } do { @@ -3418,6 +3420,49 @@ void unregister_ftrace_graph(void) mutex_unlock(&ftrace_lock); } +static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + +static void +graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) +{ + atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visable before we add the ret_stack */ + smp_wmb(); + t->ret_stack = ret_stack; +} + +/* + * Allocate a return stack for the idle task. May be the first + * time through, or it may be done by CPU hotplug online. + */ +void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) +{ + t->curr_ret_stack = -1; + /* + * The idle task has no parent, it either has its own + * stack or no stack at all. + */ + if (t->ret_stack) + WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); + + if (ftrace_graph_active) { + struct ftrace_ret_stack *ret_stack; + + ret_stack = per_cpu(idle_ret_stack, cpu); + if (!ret_stack) { + ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH + * sizeof(struct ftrace_ret_stack), + GFP_KERNEL); + if (!ret_stack) + return; + per_cpu(idle_ret_stack, cpu) = ret_stack; + } + graph_init_task(t, ret_stack); + } +} + /* Allocate a return stack for newly created task */ void ftrace_graph_init_task(struct task_struct *t) { @@ -3433,12 +3478,7 @@ void ftrace_graph_init_task(struct task_struct *t) GFP_KERNEL); if (!ret_stack) return; - atomic_set(&t->tracing_graph_pause, 0); - atomic_set(&t->trace_overrun, 0); - t->ftrace_timestamp = 0; - /* make curr_ret_stack visable before we add the ret_stack */ - smp_wmb(); - t->ret_stack = ret_stack; + graph_init_task(t, ret_stack); } } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bd1c35a4fbccf..6ee56b4ad1363 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -669,7 +669,7 @@ static struct list_head *rb_list_head(struct list_head *list) * the reader page). But if the next page is a header page, * its flags will be non zero. */ -static int inline +static inline int rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *page, struct list_head *list) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ee6578b578ad3..7b65ae747c6fd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1283,8 +1283,14 @@ __acquires(&gcwq->lock) return true; spin_unlock_irq(&gcwq->lock); - /* CPU has come up inbetween, retry migration */ + /* + * We've raced with CPU hot[un]plug. Give it a breather + * and retry migration. cond_resched() is required here; + * otherwise, we might deadlock against cpu_stop trying to + * bring down the CPU on non-preemptive kernel. + */ cpu_relax(); + cond_resched(); } } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 44553231091f8..60942f8f0b219 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -835,7 +835,7 @@ config BOOT_PRINTK_DELAY config RCU_TORTURE_TEST tristate "torture tests for RCU" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !SCHED_BFS default n help This option provides a kernel module that runs torture tests @@ -1237,3 +1237,6 @@ source "samples/Kconfig" source "lib/Kconfig.kgdb" source "lib/Kconfig.kmemcheck" + +config TEST_KSTRTOX + tristate "Test kstrto*() family of functions at runtime" diff --git a/lib/Makefile b/lib/Makefile index cbb774f7d41d0..f2bf6e6f7a049 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -12,7 +12,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o prio_heap.o ratelimit.o show_mem.o \ - is_single_threaded.o plist.o decompress.o + is_single_threaded.o plist.o decompress.o \ + memory_alloc.o memcopy.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -22,6 +23,8 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o +obj-y += kstrtox.o +obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/bitmap.c b/lib/bitmap.c index 741fae905ae3b..cf12bb86d7c20 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -315,30 +315,32 @@ void bitmap_clear(unsigned long *map, int start, int nr) } EXPORT_SYMBOL(bitmap_clear); -/* +/** * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area + * @align_offset: Alignment offset for zero area. * * The @align_mask should be one less than a power of 2; the effect is that - * the bit offset of all zero areas this function finds is multiples of that - * power of 2. A @align_mask of 0 means no alignment is required. + * the bit offset of all zero areas this function finds plus @align_offset + * is multiple of that power of 2. */ -unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask) +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ - index = __ALIGN_MASK(index, align_mask); + index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; end = index + nr; if (end > size) @@ -350,7 +352,7 @@ unsigned long bitmap_find_next_zero_area(unsigned long *map, } return index; } -EXPORT_SYMBOL(bitmap_find_next_zero_area); +EXPORT_SYMBOL(bitmap_find_next_zero_area_off); /* * Bitmap printing & parsing functions: first version by Bill Irwin, @@ -571,8 +573,11 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen, EXPORT_SYMBOL(bitmap_scnlistprintf); /** - * bitmap_parselist - convert list format ASCII string to bitmap - * @bp: read nul-terminated user string from this buffer + * __bitmap_parselist - convert list format ASCII string to bitmap + * @buf: read nul-terminated user string from this buffer + * @buflen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @is_user: location of buffer, 0 indicates kernel space * @maskp: write resulting mask here * @nmaskbits: number of bits in mask to be written * @@ -587,20 +592,63 @@ EXPORT_SYMBOL(bitmap_scnlistprintf); * %-EINVAL: invalid character in string * %-ERANGE: bit number specified too large for mask */ -int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) +static int __bitmap_parselist(const char *buf, unsigned int buflen, + int is_user, unsigned long *maskp, + int nmaskbits) { unsigned a, b; + int c, old_c, totaldigits; + const char __user *ubuf = buf; + int exp_digit, in_range; + totaldigits = c = 0; bitmap_zero(maskp, nmaskbits); do { - if (!isdigit(*bp)) - return -EINVAL; - b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); - if (*bp == '-') { - bp++; - if (!isdigit(*bp)) + exp_digit = 1; + in_range = 0; + a = b = 0; + + /* Get the next cpu# or a range of cpu#'s */ + while (buflen) { + old_c = c; + if (is_user) { + if (__get_user(c, ubuf++)) + return -EFAULT; + } else + c = *buf++; + buflen--; + if (isspace(c)) + continue; + + /* + * If the last character was a space and the current + * character isn't '\0', we've got embedded whitespace. + * This is a no-no, so throw an error. + */ + if (totaldigits && c && isspace(old_c)) + return -EINVAL; + + /* A '\0' or a ',' signal the end of a cpu# or range */ + if (c == '\0' || c == ',') + break; + + if (c == '-') { + if (exp_digit || in_range) + return -EINVAL; + b = 0; + in_range = 1; + exp_digit = 1; + continue; + } + + if (!isdigit(c)) return -EINVAL; - b = simple_strtoul(bp, (char **)&bp, BASEDEC); + + b = b * 10 + (c - '0'); + if (!in_range) + a = b; + exp_digit = 0; + totaldigits++; } if (!(a <= b)) return -EINVAL; @@ -610,13 +658,52 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) set_bit(a, maskp); a++; } - if (*bp == ',') - bp++; - } while (*bp != '\0' && *bp != '\n'); + } while (buflen && c == ','); return 0; } + +int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) +{ + char *nl = strchr(bp, '\n'); + int len; + + if (nl) + len = nl - bp; + else + len = strlen(bp); + + return __bitmap_parselist(bp, len, 0, maskp, nmaskbits); +} EXPORT_SYMBOL(bitmap_parselist); + +/** + * bitmap_parselist_user() + * + * @ubuf: pointer to user buffer containing string. + * @ulen: buffer size in bytes. If string is smaller than this + * then it must be terminated with a \0. + * @maskp: pointer to bitmap array that will contain result. + * @nmaskbits: size of bitmap, in bits. + * + * Wrapper for bitmap_parselist(), providing it with user buffer. + * + * We cannot have this as an inline function in bitmap.h because it needs + * linux/uaccess.h to get the access_ok() declaration and this causes + * cyclic dependencies. + */ +int bitmap_parselist_user(const char __user *ubuf, + unsigned int ulen, unsigned long *maskp, + int nmaskbits) +{ + if (!access_ok(VERIFY_READ, ubuf, ulen)) + return -EFAULT; + return __bitmap_parselist((const char *)ubuf, + ulen, 1, maskp, nmaskbits); +} +EXPORT_SYMBOL(bitmap_parselist_user); + + /** * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap @@ -830,7 +917,7 @@ EXPORT_SYMBOL(bitmap_bitremap); * @orig (i.e. bits 3, 5, 7 and 9) were also set. * * When bit 11 is set in @orig, it means turn on the bit in - * @dst corresponding to whatever is the twelth bit that is + * @dst corresponding to whatever is the twelfth bit that is * turned on in @relmap. In the above example, there were * only ten bits turned on in @relmap (30..39), so that bit * 11 was set in @orig had no affect on @dst. diff --git a/lib/flex_array.c b/lib/flex_array.c index c0ea40ba20828..854b57bd7d9d3 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -232,10 +232,10 @@ EXPORT_SYMBOL(flex_array_clear); /** * flex_array_prealloc - guarantee that array space exists - * @fa: the flex array for which to preallocate parts - * @start: index of first array element for which space is allocated - * @end: index of last (inclusive) element for which space is allocated - * @flags: page allocation flags + * @fa: the flex array for which to preallocate parts + * @start: index of first array element for which space is allocated + * @nr_elements: number of elements for which space is allocated + * @flags: page allocation flags * * This will guarantee that no future calls to flex_array_put() * will allocate memory. It can be used if you are expecting to @@ -245,14 +245,24 @@ EXPORT_SYMBOL(flex_array_clear); * Locking must be provided by the caller. */ int flex_array_prealloc(struct flex_array *fa, unsigned int start, - unsigned int end, gfp_t flags) + unsigned int nr_elements, gfp_t flags) { int start_part; int end_part; int part_nr; + unsigned int end; struct flex_array_part *part; - if (start >= fa->total_nr_elements || end >= fa->total_nr_elements) + if (!start && !nr_elements) + return 0; + if (start >= fa->total_nr_elements) + return -ENOSPC; + if (!nr_elements) + return 0; + + end = start + nr_elements - 1; + + if (end >= fa->total_nr_elements) return -ENOSPC; if (elements_fit_in_base(fa)) return 0; @@ -343,6 +353,8 @@ int flex_array_shrink(struct flex_array *fa) int part_nr; int ret = 0; + if (!fa->total_nr_elements) + return 0; if (elements_fit_in_base(fa)) return ret; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { diff --git a/lib/genalloc.c b/lib/genalloc.c index 1923f1490e726..c7b9b9c41b780 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -16,53 +16,86 @@ #include +/* General purpose special memory pool descriptor. */ +struct gen_pool { + rwlock_t lock; /* protects chunks list */ + struct list_head chunks; /* list of chunks in this pool */ + unsigned order; /* minimum allocation order */ +}; + +/* General purpose special memory pool chunk descriptor. */ +struct gen_pool_chunk { + spinlock_t lock; /* protects bits */ + struct list_head next_chunk; /* next chunk in pool */ + phys_addr_t phys_addr; /* physical starting address of memory chunk */ + unsigned long start; /* start of memory chunk */ + unsigned long size; /* number of bits */ + unsigned long bits[0]; /* bitmap for allocating memory chunk */ +}; + /** - * gen_pool_create - create a new special memory pool - * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents - * @nid: node id of the node the pool structure should be allocated on, or -1 + * gen_pool_create() - create a new special memory pool + * @order: Log base 2 of number of bytes each bitmap bit + * represents. + * @nid: Node id of the node the pool structure should be allocated + * on, or -1. This will be also used for other allocations. * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ -struct gen_pool *gen_pool_create(int min_alloc_order, int nid) +struct gen_pool *__must_check gen_pool_create(unsigned order, int nid) { struct gen_pool *pool; - pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); - if (pool != NULL) { + if (WARN_ON(order >= BITS_PER_LONG)) + return NULL; + + pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid); + if (pool) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); - pool->min_alloc_order = min_alloc_order; + pool->order = order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** - * gen_pool_add - add a new chunk of special memory to the pool + * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to - * @addr: starting address of memory chunk to add to pool + * @virt: virtual starting address of memory chunk to add to pool + * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. + * + * Returns 0 on success or a -ve errno on failure. */ -int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, - int nid) +int __must_check gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, + size_t size, int nid) { struct gen_pool_chunk *chunk; - int nbits = size >> pool->min_alloc_order; - int nbytes = sizeof(struct gen_pool_chunk) + - (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + size_t nbytes; - chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); - if (unlikely(chunk == NULL)) - return -1; + if (WARN_ON(!virt || virt + size < virt || + (virt & ((1 << pool->order) - 1)))) + return -EINVAL; + + size = size >> pool->order; + if (WARN_ON(!size)) + return -EINVAL; + + nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits; + chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); + if (!chunk) + return -ENOMEM; spin_lock_init(&chunk->lock); - chunk->start_addr = addr; - chunk->end_addr = addr + size; + chunk->phys_addr = phys; + chunk->start = virt >> pool->order; + chunk->size = size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); @@ -70,118 +103,145 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, return 0; } -EXPORT_SYMBOL(gen_pool_add); +EXPORT_SYMBOL(gen_pool_add_virt); /** - * gen_pool_destroy - destroy a special memory pool - * @pool: pool to destroy + * gen_pool_virt_to_phys - return the physical address of memory + * @pool: pool to allocate from + * @addr: starting address of memory + * + * Returns the physical address on success, or -1 on error. + */ +phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) +{ + struct list_head *_chunk; + struct gen_pool_chunk *chunk; + + read_lock(&pool->lock); + list_for_each(_chunk, &pool->chunks) { + chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + + if (addr >= chunk->start && + addr < (chunk->start + chunk->size)) + return chunk->phys_addr + addr - chunk->start; + } + read_unlock(&pool->lock); + + return -1; +} +EXPORT_SYMBOL(gen_pool_virt_to_phys); + +/** + * gen_pool_destroy() - destroy a special memory pool + * @pool: Pool to destroy. * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { - struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; - int order = pool->min_alloc_order; - int bit, end_bit; - + int bit; - list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + while (!list_empty(&pool->chunks)) { + chunk = list_entry(pool->chunks.next, struct gen_pool_chunk, + next_chunk); list_del(&chunk->next_chunk); - end_bit = (chunk->end_addr - chunk->start_addr) >> order; - bit = find_next_bit(chunk->bits, end_bit, 0); - BUG_ON(bit < end_bit); + bit = find_next_bit(chunk->bits, chunk->size, 0); + BUG_ON(bit < chunk->size); kfree(chunk); } kfree(pool); - return; } EXPORT_SYMBOL(gen_pool_destroy); /** - * gen_pool_alloc - allocate special memory from the pool - * @pool: pool to allocate from - * @size: number of bytes to allocate from the pool + * gen_pool_alloc_aligned() - allocate special memory from the pool + * @pool: Pool to allocate from. + * @size: Number of bytes to allocate from the pool. + * @alignment_order: Order the allocated space should be + * aligned to (eg. 20 means allocated space + * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ -unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) +unsigned long __must_check +gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, + unsigned alignment_order) { - struct list_head *_chunk; + unsigned long addr, align_mask = 0, flags, start; struct gen_pool_chunk *chunk; - unsigned long addr, flags; - int order = pool->min_alloc_order; - int nbits, start_bit, end_bit; if (size == 0) return 0; - nbits = (size + (1UL << order) - 1) >> order; + if (alignment_order > pool->order) + align_mask = (1 << (alignment_order - pool->order)) - 1; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + size = (size + (1UL << pool->order) - 1) >> pool->order; - end_bit = (chunk->end_addr - chunk->start_addr) >> order; + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) { + if (chunk->size < size) + continue; spin_lock_irqsave(&chunk->lock, flags); - start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, - nbits, 0); - if (start_bit >= end_bit) { + start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size, + 0, size, align_mask, + chunk->start); + if (start >= chunk->size) { spin_unlock_irqrestore(&chunk->lock, flags); continue; } - addr = chunk->start_addr + ((unsigned long)start_bit << order); - - bitmap_set(chunk->bits, start_bit, nbits); + bitmap_set(chunk->bits, start, size); spin_unlock_irqrestore(&chunk->lock, flags); - read_unlock(&pool->lock); - return addr; + addr = (chunk->start + start) << pool->order; + goto done; } + + addr = 0; +done: read_unlock(&pool->lock); - return 0; + return addr; } -EXPORT_SYMBOL(gen_pool_alloc); +EXPORT_SYMBOL(gen_pool_alloc_aligned); /** - * gen_pool_free - free allocated special memory back to the pool - * @pool: pool to free to - * @addr: starting address of memory to free back to pool - * @size: size in bytes of memory to free + * gen_pool_free() - free allocated special memory back to the pool + * @pool: Pool to free to. + * @addr: Starting address of memory to free back to pool. + * @size: Size in bytes of memory to free. * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { - struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; - int order = pool->min_alloc_order; - int bit, nbits; - nbits = (size + (1UL << order) - 1) >> order; + if (!size) + return; - read_lock(&pool->lock); - list_for_each(_chunk, &pool->chunks) { - chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); + addr = addr >> pool->order; + size = (size + (1UL << pool->order) - 1) >> pool->order; + + BUG_ON(addr + size < addr); - if (addr >= chunk->start_addr && addr < chunk->end_addr) { - BUG_ON(addr + size > chunk->end_addr); + read_lock(&pool->lock); + list_for_each_entry(chunk, &pool->chunks, next_chunk) + if (addr >= chunk->start && + addr + size <= chunk->start + chunk->size) { spin_lock_irqsave(&chunk->lock, flags); - bit = (addr - chunk->start_addr) >> order; - while (nbits--) - __clear_bit(bit++, chunk->bits); + bitmap_clear(chunk->bits, addr - chunk->start, size); spin_unlock_irqrestore(&chunk->lock, flags); - break; + goto done; } - } - BUG_ON(nbits > 0); + BUG_ON(1); +done: read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free); diff --git a/lib/kstrtox.c b/lib/kstrtox.c new file mode 100644 index 0000000000000..05672e819f8c4 --- /dev/null +++ b/lib/kstrtox.c @@ -0,0 +1,227 @@ +/* + * Convert integer string representation to an integer. + * If an integer doesn't fit into specified type, -E is returned. + * + * Integer starts with optional sign. + * kstrtou*() functions do not accept sign "-". + * + * Radix 0 means autodetection: leading "0x" implies radix 16, + * leading "0" implies radix 8, otherwise radix is 10. + * Autodetection hints work after optional sign, but not before. + * + * If -E is returned, result is not touched. + */ +#include +#include +#include +#include +#include +#include + +static inline char _tolower(const char c) +{ + return c | 0x20; +} + +static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + unsigned long long acc; + int ok; + + if (base == 0) { + if (s[0] == '0') { + if (_tolower(s[1]) == 'x' && isxdigit(s[2])) + base = 16; + else + base = 8; + } else + base = 10; + } + if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') + s += 2; + + acc = 0; + ok = 0; + while (*s) { + unsigned int val; + + if ('0' <= *s && *s <= '9') + val = *s - '0'; + else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') + val = _tolower(*s) - 'a' + 10; + else if (*s == '\n') { + if (*(s + 1) == '\0') + break; + else + return -EINVAL; + } else + return -EINVAL; + + if (val >= base) + return -EINVAL; + if (acc > div_u64(ULLONG_MAX - val, base)) + return -ERANGE; + acc = acc * base + val; + ok = 1; + + s++; + } + if (!ok) + return -EINVAL; + *res = acc; + return 0; +} + +int kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + if (s[0] == '+') + s++; + return _kstrtoull(s, base, res); +} +EXPORT_SYMBOL(kstrtoull); + +int kstrtoll(const char *s, unsigned int base, long long *res) +{ + unsigned long long tmp; + int rv; + + if (s[0] == '-') { + rv = _kstrtoull(s + 1, base, &tmp); + if (rv < 0) + return rv; + if ((long long)(-tmp) >= 0) + return -ERANGE; + *res = -tmp; + } else { + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if ((long long)tmp < 0) + return -ERANGE; + *res = tmp; + } + return 0; +} +EXPORT_SYMBOL(kstrtoll); + +/* Internal, do not use. */ +int _kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(unsigned long)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(_kstrtoul); + +/* Internal, do not use. */ +int _kstrtol(const char *s, unsigned int base, long *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(long)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(_kstrtol); + +int kstrtouint(const char *s, unsigned int base, unsigned int *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(unsigned int)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtouint); + +int kstrtoint(const char *s, unsigned int base, int *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(int)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtoint); + +int kstrtou16(const char *s, unsigned int base, u16 *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(u16)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtou16); + +int kstrtos16(const char *s, unsigned int base, s16 *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(s16)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtos16); + +int kstrtou8(const char *s, unsigned int base, u8 *res) +{ + unsigned long long tmp; + int rv; + + rv = kstrtoull(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (unsigned long long)(u8)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtou8); + +int kstrtos8(const char *s, unsigned int base, s8 *res) +{ + long long tmp; + int rv; + + rv = kstrtoll(s, base, &tmp); + if (rv < 0) + return rv; + if (tmp != (long long)(s8)tmp) + return -ERANGE; + *res = tmp; + return 0; +} +EXPORT_SYMBOL(kstrtos8); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 619313ed6c46d..507a22fab7380 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -144,7 +144,7 @@ static void init_shared_classes(void) #define HARDIRQ_ENTER() \ local_irq_disable(); \ - irq_enter(); \ + __irq_enter(); \ WARN_ON(!in_irq()); #define HARDIRQ_EXIT() \ diff --git a/lib/memcopy.c b/lib/memcopy.c new file mode 100644 index 0000000000000..92c300faaf713 --- /dev/null +++ b/lib/memcopy.c @@ -0,0 +1,403 @@ +/* + * memcopy.c -- subroutines for memory copy functions. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The code is derived from the GNU C Library. + * Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc. + */ + +/* BE VERY CAREFUL IF YOU CHANGE THIS CODE...! */ + +#include + +/* + * _wordcopy_fwd_aligned -- Copy block beginning at SRCP to block beginning + * at DSTP with LEN `op_t' words (not LEN bytes!). + * Both SRCP and DSTP should be aligned for memory operations on `op_t's. + */ +void _wordcopy_fwd_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1; + + switch (len % 8) { + case 2: + a0 = ((op_t *) srcp)[0]; + srcp -= 6 * OPSIZ; + dstp -= 7 * OPSIZ; + len += 6; + goto do1; + case 3: + a1 = ((op_t *) srcp)[0]; + srcp -= 5 * OPSIZ; + dstp -= 6 * OPSIZ; + len += 5; + goto do2; + case 4: + a0 = ((op_t *) srcp)[0]; + srcp -= 4 * OPSIZ; + dstp -= 5 * OPSIZ; + len += 4; + goto do3; + case 5: + a1 = ((op_t *) srcp)[0]; + srcp -= 3 * OPSIZ; + dstp -= 4 * OPSIZ; + len += 3; + goto do4; + case 6: + a0 = ((op_t *) srcp)[0]; + srcp -= 2 * OPSIZ; + dstp -= 3 * OPSIZ; + len += 2; + goto do5; + case 7: + a1 = ((op_t *) srcp)[0]; + srcp -= 1 * OPSIZ; + dstp -= 2 * OPSIZ; + len += 1; + goto do6; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + a0 = ((op_t *) srcp)[0]; + srcp -= 0 * OPSIZ; + dstp -= 1 * OPSIZ; + goto do7; + case 1: + a1 = ((op_t *) srcp)[0]; + srcp -=-1 * OPSIZ; + dstp -= 0 * OPSIZ; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do8; /* No-op. */ + } + + do { +do8: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = a1; +do7: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = a0; +do6: + a0 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = a1; +do5: + a1 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = a0; +do4: + a0 = ((op_t *) srcp)[4]; + ((op_t *) dstp)[4] = a1; +do3: + a1 = ((op_t *) srcp)[5]; + ((op_t *) dstp)[5] = a0; +do2: + a0 = ((op_t *) srcp)[6]; + ((op_t *) dstp)[6] = a1; +do1: + a1 = ((op_t *) srcp)[7]; + ((op_t *) dstp)[7] = a0; + + srcp += 8 * OPSIZ; + dstp += 8 * OPSIZ; + len -= 8; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[0] = a1; +} + +/* + * _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to block + * beginning at DSTP with LEN `op_t' words (not LEN bytes!). DSTP should + * be aligned for memory operations on `op_t's, but SRCP must *not* be aligned. + */ + +void _wordcopy_fwd_dest_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1, a2, a3; + int sh_1, sh_2; + + /* + * Calculate how to shift a word read at the memory operation aligned + * srcp to make it aligned for copy. + */ + sh_1 = 8 * (srcp % OPSIZ); + sh_2 = 8 * OPSIZ - sh_1; + + /* + * Make SRCP aligned by rounding it down to the beginning of the `op_t' + * it points in the middle of. + */ + srcp &= -OPSIZ; + + switch (len % 4) { + case 2: + a1 = ((op_t *) srcp)[0]; + a2 = ((op_t *) srcp)[1]; + srcp -= 1 * OPSIZ; + dstp -= 3 * OPSIZ; + len += 2; + goto do1; + case 3: + a0 = ((op_t *) srcp)[0]; + a1 = ((op_t *) srcp)[1]; + srcp -= 0 * OPSIZ; + dstp -= 2 * OPSIZ; + len += 1; + goto do2; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + a3 = ((op_t *) srcp)[0]; + a0 = ((op_t *) srcp)[1]; + srcp -=-1 * OPSIZ; + dstp -= 1 * OPSIZ; + len += 0; + goto do3; + case 1: + a2 = ((op_t *) srcp)[0]; + a3 = ((op_t *) srcp)[1]; + srcp -=-2 * OPSIZ; + dstp -= 0 * OPSIZ; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do4; /* No-op. */ + } + + do { +do4: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2); +do3: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = MERGE (a3, sh_1, a0, sh_2); +do2: + a2 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = MERGE (a0, sh_1, a1, sh_2); +do1: + a3 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = MERGE (a1, sh_1, a2, sh_2); + + srcp += 4 * OPSIZ; + dstp += 4 * OPSIZ; + len -= 4; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2); +} + +/* + * _wordcopy_bwd_aligned -- Copy block finishing right before + * SRCP to block finishing right before DSTP with LEN `op_t' words (not LEN + * bytes!). Both SRCP and DSTP should be aligned for memory operations + * on `op_t's. + */ +void _wordcopy_bwd_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1; + + switch (len % 8) { + case 2: + srcp -= 2 * OPSIZ; + dstp -= 1 * OPSIZ; + a0 = ((op_t *) srcp)[1]; + len += 6; + goto do1; + case 3: + srcp -= 3 * OPSIZ; + dstp -= 2 * OPSIZ; + a1 = ((op_t *) srcp)[2]; + len += 5; + goto do2; + case 4: + srcp -= 4 * OPSIZ; + dstp -= 3 * OPSIZ; + a0 = ((op_t *) srcp)[3]; + len += 4; + goto do3; + case 5: + srcp -= 5 * OPSIZ; + dstp -= 4 * OPSIZ; + a1 = ((op_t *) srcp)[4]; + len += 3; + goto do4; + case 6: + srcp -= 6 * OPSIZ; + dstp -= 5 * OPSIZ; + a0 = ((op_t *) srcp)[5]; + len += 2; + goto do5; + case 7: + srcp -= 7 * OPSIZ; + dstp -= 6 * OPSIZ; + a1 = ((op_t *) srcp)[6]; + len += 1; + goto do6; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + srcp -= 8 * OPSIZ; + dstp -= 7 * OPSIZ; + a0 = ((op_t *) srcp)[7]; + goto do7; + case 1: + srcp -= 9 * OPSIZ; + dstp -= 8 * OPSIZ; + a1 = ((op_t *) srcp)[8]; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do8; /* No-op. */ + } + + do { +do8: + a0 = ((op_t *) srcp)[7]; + ((op_t *) dstp)[7] = a1; +do7: + a1 = ((op_t *) srcp)[6]; + ((op_t *) dstp)[6] = a0; +do6: + a0 = ((op_t *) srcp)[5]; + ((op_t *) dstp)[5] = a1; +do5: + a1 = ((op_t *) srcp)[4]; + ((op_t *) dstp)[4] = a0; +do4: + a0 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = a1; +do3: + a1 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = a0; +do2: + a0 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = a1; +do1: + a1 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = a0; + + srcp -= 8 * OPSIZ; + dstp -= 8 * OPSIZ; + len -= 8; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[7] = a1; +} + +/* + * _wordcopy_bwd_dest_aligned -- Copy block finishing right before SRCP to + * block finishing right before DSTP with LEN `op_t' words (not LEN bytes!). + * DSTP should be aligned for memory operations on `op_t', but SRCP must *not* + * be aligned. + */ +void _wordcopy_bwd_dest_aligned (long int dstp, long int srcp, size_t len) +{ + op_t a0, a1, a2, a3; + int sh_1, sh_2; + + /* + * Calculate how to shift a word read at the memory operation aligned + * srcp to make it aligned for copy. + */ + + sh_1 = 8 * (srcp % OPSIZ); + sh_2 = 8 * OPSIZ - sh_1; + + /* + * Make srcp aligned by rounding it down to the beginning of the op_t + * it points in the middle of. + */ + srcp &= -OPSIZ; + srcp += OPSIZ; + + switch (len % 4) { + case 2: + srcp -= 3 * OPSIZ; + dstp -= 1 * OPSIZ; + a2 = ((op_t *) srcp)[2]; + a1 = ((op_t *) srcp)[1]; + len += 2; + goto do1; + case 3: + srcp -= 4 * OPSIZ; + dstp -= 2 * OPSIZ; + a3 = ((op_t *) srcp)[3]; + a2 = ((op_t *) srcp)[2]; + len += 1; + goto do2; + case 0: + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + return; + srcp -= 5 * OPSIZ; + dstp -= 3 * OPSIZ; + a0 = ((op_t *) srcp)[4]; + a3 = ((op_t *) srcp)[3]; + goto do3; + case 1: + srcp -= 6 * OPSIZ; + dstp -= 4 * OPSIZ; + a1 = ((op_t *) srcp)[5]; + a0 = ((op_t *) srcp)[4]; + len -= 1; + if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0) + goto do0; + goto do4; /* No-op. */ + } + + do { +do4: + a3 = ((op_t *) srcp)[3]; + ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2); +do3: + a2 = ((op_t *) srcp)[2]; + ((op_t *) dstp)[2] = MERGE (a3, sh_1, a0, sh_2); +do2: + a1 = ((op_t *) srcp)[1]; + ((op_t *) dstp)[1] = MERGE (a2, sh_1, a3, sh_2); +do1: + a0 = ((op_t *) srcp)[0]; + ((op_t *) dstp)[0] = MERGE (a1, sh_1, a2, sh_2); + + srcp -= 4 * OPSIZ; + dstp -= 4 * OPSIZ; + len -= 4; + } while (len != 0); + + /* + * This is the right position for do0. Please don't move it into + * the loop. + */ +do0: + ((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2); +} + diff --git a/lib/memory_alloc.c b/lib/memory_alloc.c new file mode 100644 index 0000000000000..d931e148e5b05 --- /dev/null +++ b/lib/memory_alloc.c @@ -0,0 +1,425 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define MAX_MEMPOOLS 8 + +struct mem_pool mpools[MAX_MEMPOOLS]; + +/* The tree contains all allocations over all memory pools */ +static struct rb_root alloc_root; +static struct mutex alloc_mutex; + +static void *s_start(struct seq_file *m, loff_t *pos) + __acquires(&alloc_mutex) +{ + loff_t n = *pos; + struct rb_node *r; + + mutex_lock(&alloc_mutex); + r = rb_first(&alloc_root); + + while (n > 0 && r) { + n--; + r = rb_next(r); + } + if (!n) + return r; + return NULL; +} + +static void *s_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct rb_node *r = p; + ++*pos; + return rb_next(r); +} + +static void s_stop(struct seq_file *m, void *p) + __releases(&alloc_mutex) +{ + mutex_unlock(&alloc_mutex); +} + +static int s_show(struct seq_file *m, void *p) +{ + struct rb_node *r = p; + struct alloc *node = rb_entry(r, struct alloc, rb_node); + + seq_printf(m, "0x%lx 0x%p %ld %u %pS\n", node->paddr, node->vaddr, + node->len, node->mpool->id, node->caller); + return 0; +} + +static const struct seq_operations mempool_op = { + .start = s_start, + .next = s_next, + .stop = s_stop, + .show = s_show, +}; + +static int mempool_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &mempool_op); +} + +static struct alloc *find_alloc(void *addr) +{ + struct rb_root *root = &alloc_root; + struct rb_node *p = root->rb_node; + + mutex_lock(&alloc_mutex); + + while (p) { + struct alloc *node; + + node = rb_entry(p, struct alloc, rb_node); + if (addr < node->vaddr) + p = p->rb_left; + else if (addr > node->vaddr) + p = p->rb_right; + else { + mutex_unlock(&alloc_mutex); + return node; + } + } + mutex_unlock(&alloc_mutex); + return NULL; +} + +static int add_alloc(struct alloc *node) +{ + struct rb_root *root = &alloc_root; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + + mutex_lock(&alloc_mutex); + while (*p) { + struct alloc *tmp; + parent = *p; + + tmp = rb_entry(parent, struct alloc, rb_node); + + if (node->vaddr < tmp->vaddr) + p = &(*p)->rb_left; + else if (node->vaddr > tmp->vaddr) + p = &(*p)->rb_right; + else { + WARN(1, "memory at %p already allocated", tmp->vaddr); + mutex_unlock(&alloc_mutex); + return -EINVAL; + } + } + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, root); + mutex_unlock(&alloc_mutex); + return 0; +} + +static int remove_alloc(struct alloc *victim_node) +{ + struct rb_root *root = &alloc_root; + if (!victim_node) + return -EINVAL; + + mutex_lock(&alloc_mutex); + rb_erase(&victim_node->rb_node, root); + mutex_unlock(&alloc_mutex); + return 0; +} + +static struct gen_pool *initialize_gpool(unsigned long start, + unsigned long size) +{ + struct gen_pool *gpool; + + gpool = gen_pool_create(PAGE_SHIFT, -1); + + if (!gpool) + return NULL; + if (gen_pool_add(gpool, start, size, -1)) { + gen_pool_destroy(gpool); + return NULL; + } + + return gpool; +} + +static void *__alloc(struct mem_pool *mpool, unsigned long size, + unsigned long align, int cached, void *caller) +{ + unsigned long paddr; + void __iomem *vaddr; + + unsigned long aligned_size; + int log_align = ilog2(align); + + struct alloc *node; + + aligned_size = PFN_ALIGN(size); + paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align); + if (!paddr) + return NULL; + + node = kmalloc(sizeof(struct alloc), GFP_KERNEL); + if (!node) + goto out; + + if (cached) + vaddr = ioremap_cached(paddr, aligned_size); + else + vaddr = ioremap(paddr, aligned_size); + + if (!vaddr) + goto out_kfree; + + node->vaddr = vaddr; + node->paddr = paddr; + node->len = aligned_size; + node->mpool = mpool; + node->caller = caller; + if (add_alloc(node)) + goto out_kfree; + + mpool->free -= aligned_size; + + return vaddr; +out_kfree: + if (vaddr) + iounmap(vaddr); + kfree(node); +out: + gen_pool_free(mpool->gpool, paddr, aligned_size); + return NULL; +} + +static void __free(void *vaddr, bool unmap) +{ + struct alloc *node = find_alloc(vaddr); + + if (!node) + return; + + if (unmap) + iounmap(node->vaddr); + + gen_pool_free(node->mpool->gpool, node->paddr, node->len); + node->mpool->free += node->len; + + remove_alloc(node); + kfree(node); +} + +static struct mem_pool *mem_type_to_memory_pool(int mem_type) +{ + struct mem_pool *mpool = &mpools[mem_type]; + + if (!mpool->size) + return NULL; + + mutex_lock(&mpool->pool_mutex); + if (!mpool->gpool) + mpool->gpool = initialize_gpool(mpool->paddr, mpool->size); + mutex_unlock(&mpool->pool_mutex); + if (!mpool->gpool) + return NULL; + + return mpool; +} + +struct mem_pool *initialize_memory_pool(unsigned long start, + unsigned long size, int mem_type) +{ + int id = mem_type; + + if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE) + return NULL; + + mutex_lock(&mpools[id].pool_mutex); + + mpools[id].paddr = start; + mpools[id].size = size; + mpools[id].free = size; + mpools[id].id = id; + mutex_unlock(&mpools[id].pool_mutex); + + pr_info("memory pool %d (start %lx size %lx) initialized\n", + id, start, size); + return &mpools[id]; +} +EXPORT_SYMBOL_GPL(initialize_memory_pool); + +void *allocate_contiguous_memory(unsigned long size, + int mem_type, unsigned long align, int cached) +{ + unsigned long aligned_size = PFN_ALIGN(size); + struct mem_pool *mpool; + + mpool = mem_type_to_memory_pool(mem_type); + if (!mpool) + return NULL; + return __alloc(mpool, aligned_size, align, cached, + __builtin_return_address(0)); + +} +EXPORT_SYMBOL_GPL(allocate_contiguous_memory); + +unsigned long _allocate_contiguous_memory_nomap(unsigned long size, + int mem_type, unsigned long align, void *caller) +{ + unsigned long paddr; + unsigned long aligned_size; + + struct alloc *node; + struct mem_pool *mpool; + int log_align = ilog2(align); + + mpool = mem_type_to_memory_pool(mem_type); + if (!mpool || !mpool->gpool) + return 0; + + aligned_size = PFN_ALIGN(size); + paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align); + if (!paddr) + return 0; + + node = kmalloc(sizeof(struct alloc), GFP_KERNEL); + if (!node) + goto out; + + node->paddr = paddr; + + /* We search the tree using node->vaddr, so set + * it to something unique even though we don't + * use it for physical allocation nodes. + * The virtual and physical address ranges + * are disjoint, so there won't be any chance of + * a duplicate node->vaddr value. + */ + node->vaddr = (void *)paddr; + node->len = aligned_size; + node->mpool = mpool; + node->caller = caller; + if (add_alloc(node)) + goto out_kfree; + + mpool->free -= aligned_size; + return paddr; +out_kfree: + kfree(node); +out: + gen_pool_free(mpool->gpool, paddr, aligned_size); + return 0; +} +EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap); + +unsigned long allocate_contiguous_memory_nomap(unsigned long size, + int mem_type, unsigned long align) +{ + return _allocate_contiguous_memory_nomap(size, mem_type, align, + __builtin_return_address(0)); +} +EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap); + +void free_contiguous_memory(void *addr) +{ + if (!addr) + return; + __free(addr, true); + return; +} +EXPORT_SYMBOL_GPL(free_contiguous_memory); + +void free_contiguous_memory_by_paddr(unsigned long paddr) +{ + if (!paddr) + return; + __free((void *)paddr, false); + return; +} +EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr); + +unsigned long memory_pool_node_paddr(void *vaddr) +{ + struct alloc *node = find_alloc(vaddr); + + if (!node) + return -EINVAL; + + return node->paddr; +} +EXPORT_SYMBOL_GPL(memory_pool_node_paddr); + +unsigned long memory_pool_node_len(void *vaddr) +{ + struct alloc *node = find_alloc(vaddr); + + if (!node) + return -EINVAL; + + return node->len; +} +EXPORT_SYMBOL_GPL(memory_pool_node_len); + +static const struct file_operations mempool_operations = { + .owner = THIS_MODULE, + .open = mempool_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +int __init memory_pool_init(void) +{ + int i; + + alloc_root = RB_ROOT; + mutex_init(&alloc_mutex); + for (i = 0; i < ARRAY_SIZE(mpools); i++) { + mutex_init(&mpools[i].pool_mutex); + mpools[i].gpool = NULL; + } + + return 0; +} + +static int __init debugfs_mempool_init(void) +{ + struct dentry *entry, *dir = debugfs_create_dir("mempool", NULL); + + if (!dir) { + pr_err("Cannot create /sys/kernel/debug/mempool"); + return -EINVAL; + } + + entry = debugfs_create_file("map", S_IRUSR, dir, + NULL, &mempool_operations); + + if (!entry) + pr_err("Cannot create /sys/kernel/debug/mempool/map"); + + return entry ? 0 : -EINVAL; +} + +module_init(debugfs_mempool_init); diff --git a/lib/sha1.c b/lib/sha1.c index 4c45fd50e9136..f33271dd00cbc 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -1,31 +1,72 @@ /* - * SHA transform algorithm, originally taken from code written by - * Peter Gutmann, and placed in the public domain. + * SHA1 routine optimized to do word accesses rather than byte accesses, + * and to avoid unnecessary copies into the context array. + * + * This was based on the git SHA1 implementation. */ #include #include -#include +#include +#include -/* The SHA f()-functions. */ +/* + * If you have 32 registers or more, the compiler can (and should) + * try to change the array[] accesses into registers. However, on + * machines with less than ~25 registers, that won't really work, + * and at least gcc will make an unholy mess of it. + * + * So to avoid that mess which just slows things down, we force + * the stores to memory to actually happen (we might be better off + * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as + * suggested by Artur Skawina - that will also make gcc unable to + * try to do the silly "optimize away loads" part because it won't + * see what the value will be). + * + * Ben Herrenschmidt reports that on PPC, the C version comes close + * to the optimized asm with this (ie on PPC you don't want that + * 'volatile', since there are lots of registers). + * + * On ARM we get the best code generation by forcing a full memory barrier + * between each SHA_ROUND, otherwise gcc happily get wild with spilling and + * the stack frame size simply explode and performance goes down the drain. + */ -#define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ -#define f2(x,y,z) (x ^ y ^ z) /* XOR */ -#define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ +#ifdef CONFIG_X86 + #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) +#elif defined(CONFIG_ARM) + #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) +#else + #define setW(x, val) (W(x) = (val)) +#endif -/* The SHA Mysterious Constants */ +/* This "rolls" over the 512-bit array */ +#define W(x) (array[(x)&15]) -#define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ -#define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ -#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ -#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ +/* + * Where do we get the source from? The first 16 iterations get it from + * the input data, the next mix it from the 512-bit array. + */ +#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) +#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) + +#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ + __u32 TEMP = input(t); setW(t, TEMP); \ + E += TEMP + rol32(A,5) + (fn) + (constant); \ + B = ror32(B, 2); } while (0) + +#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) +#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) +#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) /** * sha_transform - single block SHA1 transform * * @digest: 160 bit digest to update * @data: 512 bits of data to hash - * @W: 80 words of workspace (see note) + * @array: 16 words of workspace (see note) * * This function generates a SHA1 digest for a single 512-bit block. * Be warned, it does not handle padding and message digest, do not @@ -36,47 +77,111 @@ * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ -void sha_transform(__u32 *digest, const char *in, __u32 *W) +void sha_transform(__u32 *digest, const char *data, __u32 *array) { - __u32 a, b, c, d, e, t, i; - - for (i = 0; i < 16; i++) - W[i] = be32_to_cpu(((const __be32 *)in)[i]); - - for (i = 0; i < 64; i++) - W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); - - a = digest[0]; - b = digest[1]; - c = digest[2]; - d = digest[3]; - e = digest[4]; - - for (i = 0; i < 20; i++) { - t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 40; i ++) { - t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 60; i ++) { - t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - for (; i < 80; i ++) { - t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; - e = d; d = c; c = rol32(b, 30); b = a; a = t; - } - - digest[0] += a; - digest[1] += b; - digest[2] += c; - digest[3] += d; - digest[4] += e; + __u32 A, B, C, D, E; + + A = digest[0]; + B = digest[1]; + C = digest[2]; + D = digest[3]; + E = digest[4]; + + /* Round 1 - iterations 0-16 take their input from 'data' */ + T_0_15( 0, A, B, C, D, E); + T_0_15( 1, E, A, B, C, D); + T_0_15( 2, D, E, A, B, C); + T_0_15( 3, C, D, E, A, B); + T_0_15( 4, B, C, D, E, A); + T_0_15( 5, A, B, C, D, E); + T_0_15( 6, E, A, B, C, D); + T_0_15( 7, D, E, A, B, C); + T_0_15( 8, C, D, E, A, B); + T_0_15( 9, B, C, D, E, A); + T_0_15(10, A, B, C, D, E); + T_0_15(11, E, A, B, C, D); + T_0_15(12, D, E, A, B, C); + T_0_15(13, C, D, E, A, B); + T_0_15(14, B, C, D, E, A); + T_0_15(15, A, B, C, D, E); + + /* Round 1 - tail. Input from 512-bit mixing array */ + T_16_19(16, E, A, B, C, D); + T_16_19(17, D, E, A, B, C); + T_16_19(18, C, D, E, A, B); + T_16_19(19, B, C, D, E, A); + + /* Round 2 */ + T_20_39(20, A, B, C, D, E); + T_20_39(21, E, A, B, C, D); + T_20_39(22, D, E, A, B, C); + T_20_39(23, C, D, E, A, B); + T_20_39(24, B, C, D, E, A); + T_20_39(25, A, B, C, D, E); + T_20_39(26, E, A, B, C, D); + T_20_39(27, D, E, A, B, C); + T_20_39(28, C, D, E, A, B); + T_20_39(29, B, C, D, E, A); + T_20_39(30, A, B, C, D, E); + T_20_39(31, E, A, B, C, D); + T_20_39(32, D, E, A, B, C); + T_20_39(33, C, D, E, A, B); + T_20_39(34, B, C, D, E, A); + T_20_39(35, A, B, C, D, E); + T_20_39(36, E, A, B, C, D); + T_20_39(37, D, E, A, B, C); + T_20_39(38, C, D, E, A, B); + T_20_39(39, B, C, D, E, A); + + /* Round 3 */ + T_40_59(40, A, B, C, D, E); + T_40_59(41, E, A, B, C, D); + T_40_59(42, D, E, A, B, C); + T_40_59(43, C, D, E, A, B); + T_40_59(44, B, C, D, E, A); + T_40_59(45, A, B, C, D, E); + T_40_59(46, E, A, B, C, D); + T_40_59(47, D, E, A, B, C); + T_40_59(48, C, D, E, A, B); + T_40_59(49, B, C, D, E, A); + T_40_59(50, A, B, C, D, E); + T_40_59(51, E, A, B, C, D); + T_40_59(52, D, E, A, B, C); + T_40_59(53, C, D, E, A, B); + T_40_59(54, B, C, D, E, A); + T_40_59(55, A, B, C, D, E); + T_40_59(56, E, A, B, C, D); + T_40_59(57, D, E, A, B, C); + T_40_59(58, C, D, E, A, B); + T_40_59(59, B, C, D, E, A); + + /* Round 4 */ + T_60_79(60, A, B, C, D, E); + T_60_79(61, E, A, B, C, D); + T_60_79(62, D, E, A, B, C); + T_60_79(63, C, D, E, A, B); + T_60_79(64, B, C, D, E, A); + T_60_79(65, A, B, C, D, E); + T_60_79(66, E, A, B, C, D); + T_60_79(67, D, E, A, B, C); + T_60_79(68, C, D, E, A, B); + T_60_79(69, B, C, D, E, A); + T_60_79(70, A, B, C, D, E); + T_60_79(71, E, A, B, C, D); + T_60_79(72, D, E, A, B, C); + T_60_79(73, C, D, E, A, B); + T_60_79(74, B, C, D, E, A); + T_60_79(75, A, B, C, D, E); + T_60_79(76, E, A, B, C, D); + T_60_79(77, D, E, A, B, C); + T_60_79(78, C, D, E, A, B); + T_60_79(79, B, C, D, E, A); + + digest[0] += A; + digest[1] += B; + digest[2] += C; + digest[3] += D; + digest[4] += E; } EXPORT_SYMBOL(sha_transform); @@ -92,4 +197,3 @@ void sha_init(__u32 *buf) buf[3] = 0x10325476; buf[4] = 0xc3d2e1f0; } - diff --git a/lib/string.c b/lib/string.c index f71bead1be3ef..6cbf6d834be12 100644 --- a/lib/string.c +++ b/lib/string.c @@ -23,6 +23,7 @@ #include #include #include +#include #ifndef __HAVE_ARCH_STRNICMP /** @@ -567,11 +568,12 @@ EXPORT_SYMBOL(memset); */ void *memcpy(void *dest, const void *src, size_t count) { - char *tmp = dest; - const char *s = src; + unsigned long dstp = (unsigned long)dest; + unsigned long srcp = (unsigned long)src; + + /* Copy from the beginning to the end */ + mem_copy_fwd(dstp, srcp, count); - while (count--) - *tmp++ = *s++; return dest; } EXPORT_SYMBOL(memcpy); @@ -588,21 +590,15 @@ EXPORT_SYMBOL(memcpy); */ void *memmove(void *dest, const void *src, size_t count) { - char *tmp; - const char *s; - - if (dest <= src) { - tmp = dest; - s = src; - while (count--) - *tmp++ = *s++; + unsigned long dstp = (unsigned long)dest; + unsigned long srcp = (unsigned long)src; + + if (dest - src >= count) { + /* Copy from the beginning to the end */ + mem_copy_fwd(dstp, srcp, count); } else { - tmp = dest; - tmp += count; - s = src; - s += count; - while (count--) - *--tmp = *--s; + /* Copy from the end to the beginning */ + mem_copy_bwd(dstp, srcp, count); } return dest; } diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c new file mode 100644 index 0000000000000..325c2f9ecebdc --- /dev/null +++ b/lib/test-kstrtox.c @@ -0,0 +1,739 @@ +#include +#include +#include + +#define for_each_test(i, test) \ + for (i = 0; i < sizeof(test) / sizeof(test[0]); i++) + +struct test_fail { + const char *str; + unsigned int base; +}; + +#define DEFINE_TEST_FAIL(test) \ + const struct test_fail test[] __initdata + +#define DECLARE_TEST_OK(type, test_type) \ + test_type { \ + const char *str; \ + unsigned int base; \ + type expected_res; \ + } + +#define DEFINE_TEST_OK(type, test) \ + const type test[] __initdata + +#define TEST_FAIL(fn, type, fmt, test) \ +{ \ + unsigned int i; \ + \ + for_each_test(i, test) { \ + const struct test_fail *t = &test[i]; \ + type tmp; \ + int rv; \ + \ + tmp = 0; \ + rv = fn(t->str, t->base, &tmp); \ + if (rv >= 0) { \ + WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \ + t->str, t->base, rv, tmp); \ + continue; \ + } \ + } \ +} + +#define TEST_OK(fn, type, fmt, test) \ +{ \ + unsigned int i; \ + \ + for_each_test(i, test) { \ + const typeof(test[0]) *t = &test[i]; \ + type res; \ + int rv; \ + \ + rv = fn(t->str, t->base, &res); \ + if (rv != 0) { \ + WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \ + t->str, t->base, t->expected_res, rv); \ + continue; \ + } \ + if (res != t->expected_res) { \ + WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \ + t->str, t->base, t->expected_res, res); \ + continue; \ + } \ + } \ +} + +static void __init test_kstrtoull_ok(void) +{ + DECLARE_TEST_OK(unsigned long long, struct test_ull); + static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = { + {"0", 10, 0ULL}, + {"1", 10, 1ULL}, + {"127", 10, 127ULL}, + {"128", 10, 128ULL}, + {"129", 10, 129ULL}, + {"255", 10, 255ULL}, + {"256", 10, 256ULL}, + {"257", 10, 257ULL}, + {"32767", 10, 32767ULL}, + {"32768", 10, 32768ULL}, + {"32769", 10, 32769ULL}, + {"65535", 10, 65535ULL}, + {"65536", 10, 65536ULL}, + {"65537", 10, 65537ULL}, + {"2147483647", 10, 2147483647ULL}, + {"2147483648", 10, 2147483648ULL}, + {"2147483649", 10, 2147483649ULL}, + {"4294967295", 10, 4294967295ULL}, + {"4294967296", 10, 4294967296ULL}, + {"4294967297", 10, 4294967297ULL}, + {"9223372036854775807", 10, 9223372036854775807ULL}, + {"9223372036854775808", 10, 9223372036854775808ULL}, + {"9223372036854775809", 10, 9223372036854775809ULL}, + {"18446744073709551614", 10, 18446744073709551614ULL}, + {"18446744073709551615", 10, 18446744073709551615ULL}, + + {"00", 8, 00ULL}, + {"01", 8, 01ULL}, + {"0177", 8, 0177ULL}, + {"0200", 8, 0200ULL}, + {"0201", 8, 0201ULL}, + {"0377", 8, 0377ULL}, + {"0400", 8, 0400ULL}, + {"0401", 8, 0401ULL}, + {"077777", 8, 077777ULL}, + {"0100000", 8, 0100000ULL}, + {"0100001", 8, 0100001ULL}, + {"0177777", 8, 0177777ULL}, + {"0200000", 8, 0200000ULL}, + {"0200001", 8, 0200001ULL}, + {"017777777777", 8, 017777777777ULL}, + {"020000000000", 8, 020000000000ULL}, + {"020000000001", 8, 020000000001ULL}, + {"037777777777", 8, 037777777777ULL}, + {"040000000000", 8, 040000000000ULL}, + {"040000000001", 8, 040000000001ULL}, + {"0777777777777777777777", 8, 0777777777777777777777ULL}, + {"01000000000000000000000", 8, 01000000000000000000000ULL}, + {"01000000000000000000001", 8, 01000000000000000000001ULL}, + {"01777777777777777777776", 8, 01777777777777777777776ULL}, + {"01777777777777777777777", 8, 01777777777777777777777ULL}, + + {"0x0", 16, 0x0ULL}, + {"0x1", 16, 0x1ULL}, + {"0x7f", 16, 0x7fULL}, + {"0x80", 16, 0x80ULL}, + {"0x81", 16, 0x81ULL}, + {"0xff", 16, 0xffULL}, + {"0x100", 16, 0x100ULL}, + {"0x101", 16, 0x101ULL}, + {"0x7fff", 16, 0x7fffULL}, + {"0x8000", 16, 0x8000ULL}, + {"0x8001", 16, 0x8001ULL}, + {"0xffff", 16, 0xffffULL}, + {"0x10000", 16, 0x10000ULL}, + {"0x10001", 16, 0x10001ULL}, + {"0x7fffffff", 16, 0x7fffffffULL}, + {"0x80000000", 16, 0x80000000ULL}, + {"0x80000001", 16, 0x80000001ULL}, + {"0xffffffff", 16, 0xffffffffULL}, + {"0x100000000", 16, 0x100000000ULL}, + {"0x100000001", 16, 0x100000001ULL}, + {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL}, + {"0x8000000000000000", 16, 0x8000000000000000ULL}, + {"0x8000000000000001", 16, 0x8000000000000001ULL}, + {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL}, + {"0xffffffffffffffff", 16, 0xffffffffffffffffULL}, + + {"0\n", 0, 0ULL}, + }; + TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok); +} + +static void __init test_kstrtoull_fail(void) +{ + static DEFINE_TEST_FAIL(test_ull_fail) = { + {"", 0}, + {"", 8}, + {"", 10}, + {"", 16}, + {"\n", 0}, + {"\n", 8}, + {"\n", 10}, + {"\n", 16}, + {"\n0", 0}, + {"\n0", 8}, + {"\n0", 10}, + {"\n0", 16}, + {"+", 0}, + {"+", 8}, + {"+", 10}, + {"+", 16}, + {"-", 0}, + {"-", 8}, + {"-", 10}, + {"-", 16}, + {"0x", 0}, + {"0x", 16}, + {"0X", 0}, + {"0X", 16}, + {"0 ", 0}, + {"1+", 0}, + {"1-", 0}, + {" 2", 0}, + /* base autodetection */ + {"0x0z", 0}, + {"0z", 0}, + {"a", 0}, + /* digit >= base */ + {"2", 2}, + {"8", 8}, + {"a", 10}, + {"A", 10}, + {"g", 16}, + {"G", 16}, + /* overflow */ + {"10000000000000000000000000000000000000000000000000000000000000000", 2}, + {"2000000000000000000000", 8}, + {"18446744073709551616", 10}, + {"10000000000000000", 16}, + /* negative */ + {"-0", 0}, + {"-0", 8}, + {"-0", 10}, + {"-0", 16}, + {"-1", 0}, + {"-1", 8}, + {"-1", 10}, + {"-1", 16}, + /* sign is first character if any */ + {"-+1", 0}, + {"-+1", 8}, + {"-+1", 10}, + {"-+1", 16}, + /* nothing after \n */ + {"0\n0", 0}, + {"0\n0", 8}, + {"0\n0", 10}, + {"0\n0", 16}, + {"0\n+", 0}, + {"0\n+", 8}, + {"0\n+", 10}, + {"0\n+", 16}, + {"0\n-", 0}, + {"0\n-", 8}, + {"0\n-", 10}, + {"0\n-", 16}, + {"0\n ", 0}, + {"0\n ", 8}, + {"0\n ", 10}, + {"0\n ", 16}, + }; + TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail); +} + +static void __init test_kstrtoll_ok(void) +{ + DECLARE_TEST_OK(long long, struct test_ll); + static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = { + {"0", 10, 0LL}, + {"1", 10, 1LL}, + {"127", 10, 127LL}, + {"128", 10, 128LL}, + {"129", 10, 129LL}, + {"255", 10, 255LL}, + {"256", 10, 256LL}, + {"257", 10, 257LL}, + {"32767", 10, 32767LL}, + {"32768", 10, 32768LL}, + {"32769", 10, 32769LL}, + {"65535", 10, 65535LL}, + {"65536", 10, 65536LL}, + {"65537", 10, 65537LL}, + {"2147483647", 10, 2147483647LL}, + {"2147483648", 10, 2147483648LL}, + {"2147483649", 10, 2147483649LL}, + {"4294967295", 10, 4294967295LL}, + {"4294967296", 10, 4294967296LL}, + {"4294967297", 10, 4294967297LL}, + {"9223372036854775807", 10, 9223372036854775807LL}, + + {"-1", 10, -1LL}, + {"-2", 10, -2LL}, + {"-9223372036854775808", 10, LLONG_MIN}, + }; + TEST_OK(kstrtoll, long long, "%lld", test_ll_ok); +} + +static void __init test_kstrtoll_fail(void) +{ + static DEFINE_TEST_FAIL(test_ll_fail) = { + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"-9223372036854775809", 10}, + {"-18446744073709551614", 10}, + {"-18446744073709551615", 10}, + /* negative zero isn't an integer in Linux */ + {"-0", 0}, + {"-0", 8}, + {"-0", 10}, + {"-0", 16}, + /* sign is first character if any */ + {"-+1", 0}, + {"-+1", 8}, + {"-+1", 10}, + {"-+1", 16}, + }; + TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail); +} + +static void __init test_kstrtou64_ok(void) +{ + DECLARE_TEST_OK(u64, struct test_u64); + static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + {"4294967296", 10, 4294967296}, + {"4294967297", 10, 4294967297}, + {"9223372036854775806", 10, 9223372036854775806ULL}, + {"9223372036854775807", 10, 9223372036854775807ULL}, + {"9223372036854775808", 10, 9223372036854775808ULL}, + {"9223372036854775809", 10, 9223372036854775809ULL}, + {"18446744073709551614", 10, 18446744073709551614ULL}, + {"18446744073709551615", 10, 18446744073709551615ULL}, + }; + TEST_OK(kstrtou64, u64, "%llu", test_u64_ok); +} + +static void __init test_kstrtou64_fail(void) +{ + static DEFINE_TEST_FAIL(test_u64_fail) = { + {"-2", 10}, + {"-1", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail); +} + +static void __init test_kstrtos64_ok(void) +{ + DECLARE_TEST_OK(s64, struct test_s64); + static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + {"4294967296", 10, 4294967296}, + {"4294967297", 10, 4294967297}, + {"9223372036854775806", 10, 9223372036854775806LL}, + {"9223372036854775807", 10, 9223372036854775807LL}, + }; + TEST_OK(kstrtos64, s64, "%lld", test_s64_ok); +} + +static void __init test_kstrtos64_fail(void) +{ + static DEFINE_TEST_FAIL(test_s64_fail) = { + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail); +} + +static void __init test_kstrtou32_ok(void) +{ + DECLARE_TEST_OK(u32, struct test_u32); + static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + {"2147483648", 10, 2147483648}, + {"2147483649", 10, 2147483649}, + {"4294967294", 10, 4294967294}, + {"4294967295", 10, 4294967295}, + }; + TEST_OK(kstrtou32, u32, "%u", test_u32_ok); +} + +static void __init test_kstrtou32_fail(void) +{ + static DEFINE_TEST_FAIL(test_u32_fail) = { + {"-2", 10}, + {"-1", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail); +} + +static void __init test_kstrtos32_ok(void) +{ + DECLARE_TEST_OK(s32, struct test_s32); + static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + {"65536", 10, 65536}, + {"65537", 10, 65537}, + {"2147483646", 10, 2147483646}, + {"2147483647", 10, 2147483647}, + }; + TEST_OK(kstrtos32, s32, "%d", test_s32_ok); +} + +static void __init test_kstrtos32_fail(void) +{ + static DEFINE_TEST_FAIL(test_s32_fail) = { + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail); +} + +static void __init test_kstrtou16_ok(void) +{ + DECLARE_TEST_OK(u16, struct test_u16); + static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + {"32768", 10, 32768}, + {"32769", 10, 32769}, + {"65534", 10, 65534}, + {"65535", 10, 65535}, + }; + TEST_OK(kstrtou16, u16, "%hu", test_u16_ok); +} + +static void __init test_kstrtou16_fail(void) +{ + static DEFINE_TEST_FAIL(test_u16_fail) = { + {"-2", 10}, + {"-1", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail); +} + +static void __init test_kstrtos16_ok(void) +{ + DECLARE_TEST_OK(s16, struct test_s16); + static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = { + {"-130", 10, -130}, + {"-129", 10, -129}, + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + {"256", 10, 256}, + {"257", 10, 257}, + {"32766", 10, 32766}, + {"32767", 10, 32767}, + }; + TEST_OK(kstrtos16, s16, "%hd", test_s16_ok); +} + +static void __init test_kstrtos16_fail(void) +{ + static DEFINE_TEST_FAIL(test_s16_fail) = { + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail); +} + +static void __init test_kstrtou8_ok(void) +{ + DECLARE_TEST_OK(u8, struct test_u8); + static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = { + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + {"128", 10, 128}, + {"129", 10, 129}, + {"254", 10, 254}, + {"255", 10, 255}, + }; + TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok); +} + +static void __init test_kstrtou8_fail(void) +{ + static DEFINE_TEST_FAIL(test_u8_fail) = { + {"-2", 10}, + {"-1", 10}, + {"256", 10}, + {"257", 10}, + {"32766", 10}, + {"32767", 10}, + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail); +} + +static void __init test_kstrtos8_ok(void) +{ + DECLARE_TEST_OK(s8, struct test_s8); + static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = { + {"-128", 10, -128}, + {"-127", 10, -127}, + {"-1", 10, -1}, + {"0", 10, 0}, + {"1", 10, 1}, + {"126", 10, 126}, + {"127", 10, 127}, + }; + TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok); +} + +static void __init test_kstrtos8_fail(void) +{ + static DEFINE_TEST_FAIL(test_s8_fail) = { + {"-130", 10}, + {"-129", 10}, + {"128", 10}, + {"129", 10}, + {"254", 10}, + {"255", 10}, + {"256", 10}, + {"257", 10}, + {"32766", 10}, + {"32767", 10}, + {"32768", 10}, + {"32769", 10}, + {"65534", 10}, + {"65535", 10}, + {"65536", 10}, + {"65537", 10}, + {"2147483646", 10}, + {"2147483647", 10}, + {"2147483648", 10}, + {"2147483649", 10}, + {"4294967294", 10}, + {"4294967295", 10}, + {"4294967296", 10}, + {"4294967297", 10}, + {"9223372036854775806", 10}, + {"9223372036854775807", 10}, + {"9223372036854775808", 10}, + {"9223372036854775809", 10}, + {"18446744073709551614", 10}, + {"18446744073709551615", 10}, + {"18446744073709551616", 10}, + {"18446744073709551617", 10}, + }; + TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail); +} + +static int __init test_kstrtox_init(void) +{ + test_kstrtoull_ok(); + test_kstrtoull_fail(); + test_kstrtoll_ok(); + test_kstrtoll_fail(); + + test_kstrtou64_ok(); + test_kstrtou64_fail(); + test_kstrtos64_ok(); + test_kstrtos64_fail(); + + test_kstrtou32_ok(); + test_kstrtou32_fail(); + test_kstrtos32_ok(); + test_kstrtos32_fail(); + + test_kstrtou16_ok(); + test_kstrtou16_fail(); + test_kstrtos16_ok(); + test_kstrtos16_fail(); + + test_kstrtou8_ok(); + test_kstrtou8_fail(); + test_kstrtos8_ok(); + test_kstrtos8_fail(); + return -EINVAL; +} +module_init(test_kstrtox_init); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d3023df8477f6..f3fd99a6ad628 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -120,147 +120,6 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base) } EXPORT_SYMBOL(simple_strtoll); -/** - * strict_strtoul - convert a string to an unsigned long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoul converts a string to an unsigned long only if the - * string is really an unsigned long string, any string containing - * any invalid char at the tail will be rejected and -EINVAL is returned, - * only a newline char at the tail is acceptible because people generally - * change a module parameter in the following way: - * - * echo 1024 > /sys/module/e1000/parameters/copybreak - * - * echo will append a newline to the tail. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - * - * simple_strtoul just ignores the successive invalid characters and - * return the converted value of prefix part of the string. - */ -int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) -{ - char *tail; - unsigned long val; - - *res = 0; - if (!*cp) - return -EINVAL; - - val = simple_strtoul(cp, &tail, base); - if (tail == cp) - return -EINVAL; - - if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { - *res = val; - return 0; - } - - return -EINVAL; -} -EXPORT_SYMBOL(strict_strtoul); - -/** - * strict_strtol - convert a string to a long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtol is similiar to strict_strtoul, but it allows the first - * character of a string is '-'. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - */ -int strict_strtol(const char *cp, unsigned int base, long *res) -{ - int ret; - if (*cp == '-') { - ret = strict_strtoul(cp + 1, base, (unsigned long *)res); - if (!ret) - *res = -(*res); - } else { - ret = strict_strtoul(cp, base, (unsigned long *)res); - } - - return ret; -} -EXPORT_SYMBOL(strict_strtol); - -/** - * strict_strtoull - convert a string to an unsigned long long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoull converts a string to an unsigned long long only if the - * string is really an unsigned long long string, any string containing - * any invalid char at the tail will be rejected and -EINVAL is returned, - * only a newline char at the tail is acceptible because people generally - * change a module parameter in the following way: - * - * echo 1024 > /sys/module/e1000/parameters/copybreak - * - * echo will append a newline to the tail of the string. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - * - * simple_strtoull just ignores the successive invalid characters and - * return the converted value of prefix part of the string. - */ -int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) -{ - char *tail; - unsigned long long val; - - *res = 0; - if (!*cp) - return -EINVAL; - - val = simple_strtoull(cp, &tail, base); - if (tail == cp) - return -EINVAL; - if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) { - *res = val; - return 0; - } - - return -EINVAL; -} -EXPORT_SYMBOL(strict_strtoull); - -/** - * strict_strtoll - convert a string to a long long strictly - * @cp: The string to be converted - * @base: The number base to use - * @res: The converted result value - * - * strict_strtoll is similiar to strict_strtoull, but it allows the first - * character of a string is '-'. - * - * It returns 0 if conversion is successful and *res is set to the converted - * value, otherwise it returns -EINVAL and *res is set to 0. - */ -int strict_strtoll(const char *cp, unsigned int base, long long *res) -{ - int ret; - if (*cp == '-') { - ret = strict_strtoull(cp + 1, base, (unsigned long long *)res); - if (!ret) - *res = -(*res); - } else { - ret = strict_strtoull(cp, base, (unsigned long long *)res); - } - - return ret; -} -EXPORT_SYMBOL(strict_strtoll); - static noinline_for_stack int skip_atoi(const char **s) { diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c index ea5fa4fe9d678..a6cdc969ea42a 100644 --- a/lib/xz/xz_dec_lzma2.c +++ b/lib/xz/xz_dec_lzma2.c @@ -969,6 +969,9 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, */ tmp = b->in[b->in_pos++]; + if (tmp == 0x00) + return XZ_STREAM_END; + if (tmp >= 0xE0 || tmp == 0x01) { s->lzma2.need_props = true; s->lzma2.need_dict_reset = false; @@ -1001,9 +1004,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, lzma_reset(s); } } else { - if (tmp == 0x00) - return XZ_STREAM_END; - if (tmp > 0x02) return XZ_DATA_ERROR; diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c index ac809b1e64f78..9a60cc21964ff 100644 --- a/lib/xz/xz_dec_stream.c +++ b/lib/xz/xz_dec_stream.c @@ -9,6 +9,7 @@ #include "xz_private.h" #include "xz_stream.h" +#include /* Hash used to validate the Index field */ struct xz_dec_hash { diff --git a/mm/ashmem.c b/mm/ashmem.c index f92eb34a14826..d24f0094ca99b 100644 --- a/mm/ashmem.c +++ b/mm/ashmem.c @@ -29,6 +29,7 @@ #include #include #include +#include #define ASHMEM_NAME_PREFIX "dev/ashmem/" #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) @@ -45,6 +46,8 @@ struct ashmem_area { struct list_head unpinned_list; /* list of all ashmem areas */ struct file *file; /* the shmem-based backing file */ size_t size; /* size of the mapping, in bytes */ + unsigned long vm_start; /* Start address of vm_area + * which maps this ashmem */ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ }; @@ -326,6 +329,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_file = asma->file; } vma->vm_flags |= VM_CAN_NONLINEAR; + asma->vm_start = vma->vm_start; out: mutex_unlock(&ashmem_mutex); @@ -347,17 +351,18 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' * pages freed. */ -static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) +static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc) { struct ashmem_range *range, *next; /* We might recurse into filesystem code, so bail out if necessary */ - if (nr_to_scan && !(gfp_mask & __GFP_FS)) + if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) return -1; - if (!nr_to_scan) + if (!sc->nr_to_scan) return lru_count; - mutex_lock(&ashmem_mutex); + if (!mutex_trylock(&ashmem_mutex)) + return -1; list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { struct inode *inode = range->asma->file->f_dentry->d_inode; loff_t start = range->pgstart * PAGE_SIZE; @@ -367,8 +372,8 @@ static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) range->purged = ASHMEM_WAS_PURGED; lru_del(range); - nr_to_scan -= range_size(range); - if (nr_to_scan <= 0) + sc->nr_to_scan -= range_size(range); + if (sc->nr_to_scan <= 0) break; } mutex_unlock(&ashmem_mutex); @@ -626,6 +631,69 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, return ret; } +#ifdef CONFIG_OUTER_CACHE +static unsigned int virtaddr_to_physaddr(unsigned int virtaddr) +{ + unsigned int physaddr = 0; + pgd_t *pgd_ptr = NULL; + pmd_t *pmd_ptr = NULL; + pte_t *pte_ptr = NULL, pte; + + spin_lock(¤t->mm->page_table_lock); + pgd_ptr = pgd_offset(current->mm, virtaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + pr_err("Failed to convert virtaddr %x to pgd_ptr\n", + virtaddr); + goto done; + } + + pmd_ptr = pmd_offset(pgd_ptr, virtaddr); + if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) { + pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n", + (void *)pgd_ptr); + goto done; + } + + pte_ptr = pte_offset_map(pmd_ptr, virtaddr); + if (!pte_ptr) { + pr_err("Failed to convert pmd_ptr %p to pte_ptr\n", + (void *)pmd_ptr); + goto done; + } + pte = *pte_ptr; + physaddr = pte_pfn(pte); + pte_unmap(pte_ptr); +done: + spin_unlock(¤t->mm->page_table_lock); + physaddr <<= PAGE_SHIFT; + return physaddr; +} +#endif + +static int ashmem_cache_op(struct ashmem_area *asma, + void (*cache_func)(unsigned long vstart, unsigned long length, + unsigned long pstart)) +{ +#ifdef CONFIG_OUTER_CACHE + unsigned long vaddr; +#endif + mutex_lock(&ashmem_mutex); +#ifndef CONFIG_OUTER_CACHE + cache_func(asma->vm_start, asma->size, 0); +#else + for (vaddr = asma->vm_start; vaddr < asma->vm_start + asma->size; + vaddr += PAGE_SIZE) { + unsigned long physaddr; + physaddr = virtaddr_to_physaddr(vaddr); + if (!physaddr) + return -EINVAL; + cache_func(vaddr, PAGE_SIZE, physaddr); + } +#endif + mutex_unlock(&ashmem_mutex); + return 0; +} + static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ashmem_area *asma = file->private_data; @@ -662,15 +730,82 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case ASHMEM_PURGE_ALL_CACHES: ret = -EPERM; if (capable(CAP_SYS_ADMIN)) { - ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); - ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); + struct shrink_control sc = { + .gfp_mask = GFP_KERNEL, + .nr_to_scan = 0, + }; + ret = ashmem_shrink(&ashmem_shrinker, &sc); + sc.nr_to_scan = ret; + ashmem_shrink(&ashmem_shrinker, &sc); } break; + case ASHMEM_CACHE_FLUSH_RANGE: + ret = ashmem_cache_op(asma, &clean_and_invalidate_caches); + break; + case ASHMEM_CACHE_CLEAN_RANGE: + ret = ashmem_cache_op(asma, &clean_caches); + break; + case ASHMEM_CACHE_INV_RANGE: + ret = ashmem_cache_op(asma, &invalidate_caches); + break; } return ret; } +static int is_ashmem_file(struct file *file) +{ + char fname[256], *name; + name = dentry_path(file->f_dentry, fname, 256); + return strcmp(name, "/ashmem") ? 0 : 1; +} + +int get_ashmem_file(int fd, struct file **filp, struct file **vm_file, + unsigned long *len) +{ + int ret = -1; + struct file *file = fget(fd); + *filp = NULL; + *vm_file = NULL; + if (unlikely(file == NULL)) { + pr_err("ashmem: %s: requested data from file " + "descriptor that doesn't exist.\n", __func__); + } else { + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; + pr_debug("filp %p rdev %d pid %u(%s) file %p(%ld)" + " dev id: %d\n", filp, + file->f_dentry->d_inode->i_rdev, + current->pid, get_task_comm(currtask_name, current), + file, file_count(file), + MINOR(file->f_dentry->d_inode->i_rdev)); + if (is_ashmem_file(file)) { + struct ashmem_area *asma = file->private_data; + *filp = file; + *vm_file = asma->file; + *len = asma->size; + ret = 0; + } else { + pr_err("file descriptor is not an ashmem " + "region fd: %d\n", fd); + fput(file); + } + } + return ret; +} +EXPORT_SYMBOL(get_ashmem_file); + +void put_ashmem_file(struct file *file) +{ + char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; + pr_debug("rdev %d pid %u(%s) file %p(%ld)" " dev id: %d\n", + file->f_dentry->d_inode->i_rdev, current->pid, + get_task_comm(currtask_name, current), file, + file_count(file), MINOR(file->f_dentry->d_inode->i_rdev)); + if (file && is_ashmem_file(file)) + fput(file); +} +EXPORT_SYMBOL(put_ashmem_file); + static struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 027100d30227f..8e4ed884f198b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { if (sb->s_bdi == bdi) - sb->s_bdi = NULL; + sb->s_bdi = &default_backing_dev_info; } spin_unlock(&sb_lock); } diff --git a/mm/compaction.c b/mm/compaction.c index 8be430b812def..a5b39dbc71e6c 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -42,8 +42,6 @@ struct compact_control { unsigned int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - - int compact_mode; }; static unsigned long release_freepages(struct list_head *freelist) @@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone, return COMPACT_COMPLETE; /* Compaction run is not finished if the watermark is not met */ - if (cc->compact_mode != COMPACT_MODE_KSWAPD) - watermark = low_wmark_pages(zone); - else - watermark = high_wmark_pages(zone); + watermark = low_wmark_pages(zone); watermark += (1 << cc->order); if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) @@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone, if (cc->order == -1) return COMPACT_CONTINUE; - /* - * Generating only one page of the right order is not enough - * for kswapd, we must continue until we're above the high - * watermark as a pool for high order GFP_ATOMIC allocations - * too. - */ - if (cc->compact_mode == COMPACT_MODE_KSWAPD) - return COMPACT_CONTINUE; - /* Direct compactor: Is a suitable page free? */ for (order = cc->order; order < MAX_ORDER; order++) { /* Job done if page is free of the right migratetype */ @@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync, - int compact_mode) + bool sync) { struct compact_control cc = { .nr_freepages = 0, @@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, - .compact_mode = compact_mode, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - status = compact_zone_order(zone, order, gfp_mask, sync, - COMPACT_MODE_DIRECT_RECLAIM); + status = compact_zone_order(zone, order, gfp_mask, sync); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -613,7 +596,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, /* Compact all zones within a node */ -static int compact_node(int nid) +static int compact_node(int nid, bool sync) { int zoneid; pg_data_t *pgdat; @@ -631,7 +614,7 @@ static int compact_node(int nid) .nr_freepages = 0, .nr_migratepages = 0, .order = -1, - .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, + .sync = sync, }; zone = &pgdat->node_zones[zoneid]; @@ -652,12 +635,12 @@ static int compact_node(int nid) } /* Compact all nodes in the system */ -static int compact_nodes(void) +int compact_nodes(bool sync) { int nid; for_each_online_node(nid) - compact_node(nid); + compact_node(nid, sync); return COMPACT_COMPLETE; } @@ -670,7 +653,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { if (write) - return compact_nodes(); + return compact_nodes(true); return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 113e35c475020..56cac93f155d1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { - if (test_bit(flag, &transparent_hugepage_flags)) - return sprintf(buf, "[yes] no\n"); - else - return sprintf(buf, "yes [no]\n"); + return sprintf(buf, "%d\n", + !!test_bit(flag, &transparent_hugepage_flags)); } + static ssize_t single_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { - if (!memcmp("yes", buf, - min(sizeof("yes")-1, count))) { + unsigned long value; + int ret; + + ret = kstrtoul(buf, 10, &value); + if (ret < 0) + return ret; + if (value > 1) + return -EINVAL; + + if (value) set_bit(flag, &transparent_hugepage_flags); - } else if (!memcmp("no", buf, - min(sizeof("no")-1, count))) { + else clear_bit(flag, &transparent_hugepage_flags); - } else - return -EINVAL; return count; } @@ -1396,6 +1400,9 @@ int split_huge_page(struct page *page) return ret; } +#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \ + VM_HUGETLB|VM_SHARED|VM_MAYSHARE) + int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { @@ -1404,11 +1411,7 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_HUGEPAGE | - VM_SHARED | VM_MAYSHARE | - VM_PFNMAP | VM_IO | VM_DONTEXPAND | - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; @@ -1424,11 +1427,7 @@ int hugepage_madvise(struct vm_area_struct *vma, /* * Be somewhat over-protective like KSM for now! */ - if (*vm_flags & (VM_NOHUGEPAGE | - VM_SHARED | VM_MAYSHARE | - VM_PFNMAP | VM_IO | VM_DONTEXPAND | - VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | - VM_MIXEDMAP | VM_SAO)) + if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; @@ -1562,10 +1561,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) * page fault if needed. */ return 0; - if (vma->vm_file || vma->vm_ops) + if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be + * true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -1814,12 +1817,15 @@ static void collapse_huge_page(struct mm_struct *mm, (vma->vm_flags & VM_NOHUGEPAGE)) goto out; - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + if (!vma->anon_vma || vma->vm_ops) goto out; if (is_vma_temporary_stack(vma)) goto out; - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() must be + * true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) @@ -2052,13 +2058,16 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, progress++; continue; } - /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ - if (!vma->anon_vma || vma->vm_ops || vma->vm_file) + if (!vma->anon_vma || vma->vm_ops) goto skip; if (is_vma_temporary_stack(vma)) goto skip; - - VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); + /* + * If is_pfn_mapping() is true is_learn_pfn_mapping() + * must be true too, verify it here. + */ + VM_BUG_ON(is_linear_pfn_mapping(vma) || + vma->vm_flags & VM_NO_THP); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 84225f3b71905..a351b680acbd3 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1414,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++(*pos); list_for_each_continue_rcu(n, &object_list) { - next_obj = list_entry(n, struct kmemleak_object, object_list); - if (get_object(next_obj)) + struct kmemleak_object *obj = + list_entry(n, struct kmemleak_object, object_list); + if (get_object(obj)) { + next_obj = obj; break; + } } put_object(prev_obj); diff --git a/mm/ksm.c b/mm/ksm.c index c2b2a94f9d677..78df7f472a425 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "internal.h" @@ -301,20 +302,6 @@ static inline int in_stable_tree(struct rmap_item *rmap_item) return rmap_item->address & STABLE_FLAG; } -static void hold_anon_vma(struct rmap_item *rmap_item, - struct anon_vma *anon_vma) -{ - rmap_item->anon_vma = anon_vma; - get_anon_vma(anon_vma); -} - -static void ksm_drop_anon_vma(struct rmap_item *rmap_item) -{ - struct anon_vma *anon_vma = rmap_item->anon_vma; - - drop_anon_vma(anon_vma); -} - /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, @@ -397,7 +384,7 @@ static void break_cow(struct rmap_item *rmap_item) * It is not an accident that whenever we want to break COW * to undo, we also need to drop a reference to the anon_vma. */ - ksm_drop_anon_vma(rmap_item); + put_anon_vma(rmap_item->anon_vma); down_read(&mm->mmap_sem); if (ksm_test_exit(mm)) @@ -466,7 +453,7 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node) ksm_pages_sharing--; else ksm_pages_shared--; - ksm_drop_anon_vma(rmap_item); + put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; cond_resched(); } @@ -554,7 +541,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) else ksm_pages_shared--; - ksm_drop_anon_vma(rmap_item); + put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { @@ -949,7 +936,8 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, goto out; /* Must get reference to anon_vma while still holding mmap_sem */ - hold_anon_vma(rmap_item, vma->anon_vma); + rmap_item->anon_vma = vma->anon_vma; + get_anon_vma(vma->anon_vma); out: up_read(&mm->mmap_sem); return err; @@ -1314,6 +1302,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page) slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); ksm_scan.mm_slot = slot; spin_unlock(&ksm_mmlist_lock); + /* + * Although we tested list_empty() above, a racing __ksm_exit + * of the last mm on the list may have removed it since then. + */ + if (slot == &ksm_mm_head) + return NULL; next_mm: ksm_scan.address = 0; ksm_scan.rmap_list = &slot->rmap_list; @@ -1907,9 +1901,11 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { - current->flags |= PF_OOM_ORIGIN; + int oom_score_adj; + + oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); err = unmerge_and_remove_all_rmap_items(); - current->flags &= ~PF_OOM_ORIGIN; + test_set_oom_score_adj(oom_score_adj); if (err) { ksm_run = KSM_RUN_STOP; count = err; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 0207c2f6f8bd7..a0166ab250516 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -239,7 +239,11 @@ void shake_page(struct page *p, int access) if (access) { int nr; do { - nr = shrink_slab(1000, GFP_KERNEL, 1000); + struct shrink_control shrink = { + .gfp_mask = GFP_KERNEL, + }; + + nr = shrink_slab(&shrink, 1000, 1000); if (page_count(p) == 1) break; } while (nr > 10); diff --git a/mm/memory.c b/mm/memory.c index 5823698c2b71a..703392e7cc872 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1359,7 +1359,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, */ mark_page_accessed(page); } - if (flags & FOLL_MLOCK) { + if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE @@ -1410,6 +1410,12 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, return page; } +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +{ + return stack_guard_page_start(vma, addr) || + stack_guard_page_end(vma, addr+PAGE_SIZE); +} + int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, @@ -1439,7 +1445,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, vma = find_extend_vma(mm, start); if (!vma && in_gate_area(tsk, start)) { unsigned long pg = start & PAGE_MASK; - struct vm_area_struct *gate_vma = get_gate_vma(tsk); pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -1464,10 +1469,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, pte_unmap(pte); return i ? : -EFAULT; } + vma = get_gate_vma(tsk); if (pages) { struct page *page; - page = vm_normal_page(gate_vma, start, *pte); + page = vm_normal_page(vma, start, *pte); if (!page) { if (!(gup_flags & FOLL_DUMP) && is_zero_pfn(pte_pfn(*pte))) @@ -1481,12 +1487,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, get_page(page); } pte_unmap(pte); - if (vmas) - vmas[i] = gate_vma; - i++; - start += PAGE_SIZE; - nr_pages--; - continue; + goto next_page; } if (!vma || @@ -1500,6 +1501,12 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, continue; } + /* + * For mlock, just skip the stack guard page. + */ + if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start)) + goto next_page; + do { struct page *page; unsigned int foll_flags = gup_flags; @@ -1516,6 +1523,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; + /* For mlock, just skip the stack guard page. */ + if (foll_flags & FOLL_MLOCK) { + if (stack_guard_page(vma, start)) + goto next_page; + } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -1569,6 +1581,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, flush_anon_page(vma, page, start); flush_dcache_page(page); } +next_page: if (vmas) vmas[i] = vma; i++; @@ -3322,7 +3335,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(__pte_alloc(mm, vma, pmd, address))) + if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 321fc7455df73..7aa5343ba9c38 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -112,9 +112,10 @@ void __ref put_page_bootmem(struct page *page) static void register_page_bootmem_info_section(unsigned long start_pfn) { - unsigned long *usemap, mapsize, section_nr, i; + unsigned long *usemap, mapsize, page_mapsize, section_nr, i, j; struct mem_section *ms; - struct page *page, *memmap; + struct page *page, *memmap, *page_page; + int memmap_page_valid; if (!pfn_valid(start_pfn)) return; @@ -133,9 +134,21 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) mapsize = sizeof(struct page) * PAGES_PER_SECTION; mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; - /* remember memmap's page */ - for (i = 0; i < mapsize; i++, page++) - get_page_bootmem(section_nr, page, SECTION_INFO); + page_mapsize = PAGE_SIZE/sizeof(struct page); + + /* remember memmap's page, except those that reference only holes */ + for (i = 0; i < mapsize; i++, page++) { + memmap_page_valid = 0; + page_page = __va(page_to_pfn(page) << PAGE_SHIFT); + for (j = 0; j < page_mapsize; j++, page_page++) { + if (early_pfn_valid(page_to_pfn(page_page))) { + memmap_page_valid = 1; + break; + } + } + if (memmap_page_valid) + get_page_bootmem(section_nr, page, SECTION_INFO); + } usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); @@ -591,6 +604,54 @@ int __ref add_memory(int nid, u64 start, u64 size) } EXPORT_SYMBOL_GPL(add_memory); +int __ref physical_remove_memory(u64 start, u64 size) +{ + int ret; + struct resource *res, *res_old; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(!res); + + ret = arch_physical_remove_memory(start, size); + if (!ret) { + kfree(res); + return 0; + } + + res->name = "System RAM"; + res->start = start; + res->end = start + size - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + res_old = locate_resource(&iomem_resource, res); + if (res_old) { + release_resource(res_old); + if (PageSlab(virt_to_head_page(res_old))) + kfree(res_old); + } + kfree(res); + + return ret; +} +EXPORT_SYMBOL_GPL(physical_remove_memory); + +int __ref physical_active_memory(u64 start, u64 size) +{ + int ret; + + ret = arch_physical_active_memory(start, size); + return ret; +} +EXPORT_SYMBOL_GPL(physical_active_memory); + +int __ref physical_low_power_memory(u64 start, u64 size) +{ + int ret; + + ret = arch_physical_low_power_memory(start, size); + return ret; +} +EXPORT_SYMBOL_GPL(physical_low_power_memory); + #ifdef CONFIG_MEMORY_HOTREMOVE /* * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy @@ -927,6 +988,7 @@ int remove_memory(u64 start, u64 size) end_pfn = start_pfn + PFN_DOWN(size); return offline_pages(start_pfn, end_pfn, 120 * HZ); } + #else int remove_memory(u64 start, u64 size) { diff --git a/mm/migrate.c b/mm/migrate.c index 352de555626c4..00bb07924da21 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -764,7 +764,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, /* Drop an anon_vma reference if we took one */ if (anon_vma) - drop_anon_vma(anon_vma); + put_anon_vma(anon_vma); uncharge: if (!charge) @@ -856,7 +856,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, remove_migration_ptes(hpage, hpage); if (anon_vma) - drop_anon_vma(anon_vma); + put_anon_vma(anon_vma); out: unlock_page(hpage); diff --git a/mm/mlock.c b/mm/mlock.c index c3924c7f00bea..c8e77909c04fe 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page) } } -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_stack_continue(vma->vm_prev, addr); -} - /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma @@ -169,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(end > vma->vm_end); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - gup_flags = FOLL_TOUCH; + gup_flags = FOLL_TOUCH | FOLL_MLOCK; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW @@ -185,15 +178,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; - if (vma->vm_flags & VM_LOCKED) - gup_flags |= FOLL_MLOCK; - - /* We don't try to access the guard page of a stack vma */ - if (stack_guard_page(vma, start)) { - addr += PAGE_SIZE; - nr_pages--; - } - return __get_user_pages(current, mm, addr, nr_pages, gup_flags, NULL, NULL, nonblocking); } diff --git a/mm/mmap.c b/mm/mmap.c index 2ec8eb5a9cdd0..772140c53ab18 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ - if (mm->start_brk > PAGE_ALIGN(mm->end_data)) + if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; @@ -1767,10 +1767,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; - error = acct_stack_growth(vma, size, grow); - if (!error) { - vma->vm_end = address; - perf_event_mmap(vma); + error = -ENOMEM; + if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_end = address; + perf_event_mmap(vma); + } } } vma_unlock_anon_vma(vma); @@ -1814,11 +1817,14 @@ static int expand_downwards(struct vm_area_struct *vma, size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; - error = acct_stack_growth(vma, size, grow); - if (!error) { - vma->vm_start = address; - vma->vm_pgoff -= grow; - perf_event_mmap(vma); + error = -ENOMEM; + if (grow <= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; + perf_event_mmap(vma); + } } } vma_unlock_anon_vma(vma); diff --git a/mm/mremap.c b/mm/mremap.c index 1de98d492ddcd..a7c1f9f9b9415 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -277,9 +277,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (old_len > vma->vm_end - addr) goto Efault; - if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { - if (new_len > old_len) + /* Need to be careful about a growing mapping */ + if (new_len > old_len) { + unsigned long pgoff; + + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; + pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) + goto Einval; } if (vma->vm_flags & VM_LOCKED) { diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7dcca55ede7ca..4b34bb0ba75ae 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -31,12 +31,40 @@ #include #include #include +#include int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; static DEFINE_SPINLOCK(zone_scan_lock); +/** + * test_set_oom_score_adj() - set current's oom_score_adj and return old value + * @new_val: new oom_score_adj value + * + * Sets the oom_score_adj value for current to @new_val with proper + * synchronization and returns the old value. Usually used to temporarily + * set a value, save the old value in the caller, and then reinstate it later. + */ +int test_set_oom_score_adj(int new_val) +{ + struct sighand_struct *sighand = current->sighand; + int old_val; + + spin_lock_irq(&sighand->siglock); + old_val = current->signal->oom_score_adj; + if (new_val != old_val) { + if (new_val == OOM_SCORE_ADJ_MIN) + atomic_inc(¤t->mm->oom_disable_count); + else if (old_val == OOM_SCORE_ADJ_MIN) + atomic_dec(¤t->mm->oom_disable_count); + current->signal->oom_score_adj = new_val; + } + spin_unlock_irq(&sighand->siglock); + + return old_val; +} + #ifdef CONFIG_NUMA /** * has_intersects_mems_allowed() - check task eligiblity for kill @@ -82,24 +110,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, } #endif /* CONFIG_NUMA */ -/* - * If this is a system OOM (not a memcg OOM) and the task selected to be - * killed is not already running at high (RT) priorities, speed up the - * recovery by boosting the dying task to the lowest FIFO priority. - * That helps with the recovery and avoids interfering with RT tasks. - */ -static void boost_dying_task_prio(struct task_struct *p, - struct mem_cgroup *mem) -{ - struct sched_param param = { .sched_priority = 1 }; - - if (mem) - return; - - if (!rt_task(p)) - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); -} - /* * The process p may have detached its own ->mm while exiting or through * use_mm(), but one or more of its subthreads may still have a valid @@ -171,15 +181,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, return 0; } - /* - * When the PF_OOM_ORIGIN bit is set, it indicates the task should have - * priority for oom killing. - */ - if (p->flags & PF_OOM_ORIGIN) { - task_unlock(p); - return 1000; - } - /* * The memory controller may have a limit of 0 bytes, so avoid a divide * by zero, if necessary. @@ -189,10 +190,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, /* * The baseline for the badness score is the proportion of RAM that each - * task's rss and swap space use. + * task's rss, pagetable and swap space use. */ - points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 / - totalpages; + points = get_mm_rss(p->mm) + p->mm->nr_ptes; + points += get_mm_counter(p->mm, MM_SWAPENTS); + + points *= 1000; + points /= totalpages; task_unlock(p); /* @@ -292,13 +296,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, unsigned long totalpages, struct mem_cgroup *mem, const nodemask_t *nodemask) { - struct task_struct *p; + struct task_struct *g, *p; struct task_struct *chosen = NULL; *ppoints = 0; - for_each_process(p) { + do_each_thread(g, p) { unsigned int points; + if (!p->mm) + continue; if (oom_unkillable_task(p, mem, nodemask)) continue; @@ -314,22 +320,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); - /* - * This is in the process of releasing memory so wait for it - * to finish before killing some other task by mistake. - * - * However, if p is the current task, we allow the 'kill' to - * go ahead if it is exiting: this will simply set TIF_MEMDIE, - * which will allow it to gain access to memory reserves in - * the process of exiting and releasing its resources. - * Otherwise we could get an easy OOM deadlock. - */ - if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { - if (p != current) - return ERR_PTR(-1UL); - - chosen = p; - *ppoints = 1000; + if (p->flags & PF_EXITING) { + /* + * If p is the current task and is in the process of + * releasing memory, we allow the "kill" to set + * TIF_MEMDIE, which will allow it to gain access to + * memory reserves. Otherwise, it may stall forever. + * + * The loop isn't broken here, however, in case other + * threads are found to have already been oom killed. + */ + if (p == current) { + chosen = p; + *ppoints = 1000; + } else { + /* + * If this task is not being ptraced on exit, + * then wait for it to finish before killing + * some other task unnecessarily. + */ + if (!(task_ptrace(p->group_leader) & + PT_TRACE_EXIT)) + return ERR_PTR(-1UL); + } } points = oom_badness(p, mem, nodemask, totalpages); @@ -337,7 +350,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, chosen = p; *ppoints = points; } - } + } while_each_thread(g, p); return chosen; } @@ -442,13 +455,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); - /* - * We give our sacrificial lamb high priority and access to - * all the memory it needs. That way it should be able to - * exit() and clear out its resources quickly... - */ - boost_dying_task_prio(p, mem); - return 0; } #undef K @@ -472,7 +478,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, */ if (p->flags & PF_EXITING) { set_tsk_thread_flag(p, TIF_MEMDIE); - boost_dying_task_prio(p, mem); return 0; } @@ -491,6 +496,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; + if (child->mm == p->mm) + continue; /* * oom_badness() returns 0 if the thread is unkillable */ @@ -689,7 +696,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, */ if (fatal_signal_pending(current)) { set_thread_flag(TIF_MEMDIE); - boost_dying_task_prio(current, NULL); return; } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2cb01f6ec5d01..82e523f5dadd3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -78,7 +78,7 @@ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 20; +int vm_dirty_ratio = 10; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c9e283d3ac86c..15ccc938d5faa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -171,9 +171,21 @@ static char * const zone_names[MAX_NR_ZONES] = { "Movable", }; +/* + * Try to keep at least this much lowmem free. Do not allow normal + * allocations below this point, only high priority ones. Automatically + * tuned according to the amount of memory in the system. + */ int min_free_kbytes = 1024; int min_free_order_shift = 1; +/* + * Extra memory for the system to try freeing. Used to temporarily + * free memory, to make space for new workloads. Anyone can allocate + * down to the min watermarks controlled by min_free_kbytes above. + */ +int extra_free_kbytes = 0; + static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; @@ -287,7 +299,7 @@ static void bad_page(struct page *page) /* Don't complain about poisoned pages */ if (PageHWPoison(page)) { - __ClearPageBuddy(page); + reset_page_mapcount(page); /* remove PageBuddy */ return; } @@ -318,7 +330,7 @@ static void bad_page(struct page *page) dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ - __ClearPageBuddy(page); + reset_page_mapcount(page); /* remove PageBuddy */ add_taint(TAINT_BAD_PAGE); } @@ -2045,6 +2057,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); +rebalance: /* This is the last chance, in general, before the goto nopage. */ page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, @@ -2052,7 +2065,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (page) goto got_pg; -rebalance: /* Allocate without watermarks if the context allows */ if (alloc_flags & ALLOC_NO_WATERMARKS) { page = __alloc_pages_high_priority(gfp_mask, order, @@ -3513,7 +3525,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) if (!slab_is_available()) { zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, alloc_size); + alloc_bootmem_node_nopanic(pgdat, alloc_size); } else { /* * This case means that a zone whose size was 0 gets new memory @@ -4086,7 +4098,8 @@ static void __init setup_usemap(struct pglist_data *pgdat, unsigned long usemapsize = usemap_size(zonesize); zone->pageblock_flags = NULL; if (usemapsize) - zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); + zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, + usemapsize); } #else static inline void setup_usemap(struct pglist_data *pgdat, @@ -4252,7 +4265,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) size = (end - start) * sizeof(struct page); map = alloc_remap(pgdat->node_id, size); if (!map) - map = alloc_bootmem_node(pgdat, size); + map = alloc_bootmem_node_nopanic(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -4953,6 +4966,7 @@ static void setup_per_zone_lowmem_reserve(void) void setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -4964,11 +4978,14 @@ void setup_per_zone_wmarks(void) } for_each_zone(zone) { - u64 tmp; + u64 min, low; spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->present_pages; - do_div(tmp, lowmem_pages); + min = (u64)pages_min * zone->present_pages; + do_div(min, lowmem_pages); + low = (u64)pages_low * zone->present_pages; + do_div(low, vm_total_pages); + if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -4992,11 +5009,13 @@ void setup_per_zone_wmarks(void) * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->watermark[WMARK_MIN] = tmp; + zone->watermark[WMARK_MIN] = min; } - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + low + (min >> 2); + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + low + (min >> 1); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); } @@ -5093,7 +5112,7 @@ module_init(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes - * changes. + * or extra_free_kbytes changes. */ int min_free_kbytes_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) diff --git a/mm/rmap.c b/mm/rmap.c index 941bf82e89612..ad416afb2061f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -278,7 +278,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) if (empty) { /* We no longer need the root anon_vma */ if (anon_vma->root != anon_vma) - drop_anon_vma(anon_vma->root); + put_anon_vma(anon_vma->root); anon_vma_free(anon_vma); } } @@ -1493,7 +1493,7 @@ int try_to_munlock(struct page *page) * we know we are the last user, nobody else can get a reference and we * can do the freeing without the lock. */ -void drop_anon_vma(struct anon_vma *anon_vma) +void put_anon_vma(struct anon_vma *anon_vma) { BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { diff --git a/mm/shmem.c b/mm/shmem.c index 5cd6194c5df27..03bdf3046264f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -422,7 +422,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long * a waste to allocate index if we cannot allocate data. */ if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks - 1) >= 0) return ERR_PTR(-ENOSPC); percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); @@ -852,7 +853,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) { - struct inode *inode; + struct address_space *mapping; unsigned long idx; unsigned long size; unsigned long limit; @@ -875,8 +876,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s if (size > SHMEM_NR_DIRECT) size = SHMEM_NR_DIRECT; offset = shmem_find_swp(entry, ptr, ptr+size); - if (offset >= 0) + if (offset >= 0) { + shmem_swp_balance_unmap(); goto found; + } if (!info->i_indirect) goto lost2; @@ -917,6 +920,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s shmem_swp_unmap(ptr); if (offset >= 0) { shmem_dir_unmap(dir); + ptr = shmem_swp_map(subdir); goto found; } } @@ -928,8 +932,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s return 0; found: idx += offset; - inode = igrab(&info->vfs_inode); - spin_unlock(&info->lock); + ptr += offset; /* * Move _head_ to start search for next from here. @@ -940,37 +943,18 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s */ if (shmem_swaplist.next != &info->swaplist) list_move_tail(&shmem_swaplist, &info->swaplist); - mutex_unlock(&shmem_swaplist_mutex); - error = 1; - if (!inode) - goto out; /* - * Charge page using GFP_KERNEL while we can wait. - * Charged back to the user(not to caller) when swap account is used. - * add_to_page_cache() will be called with GFP_NOWAIT. + * We rely on shmem_swaplist_mutex, not only to protect the swaplist, + * but also to hold up shmem_evict_inode(): so inode cannot be freed + * beneath us (pagelock doesn't help until the page is in pagecache). */ - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); - if (error) - goto out; - error = radix_tree_preload(GFP_KERNEL); - if (error) { - mem_cgroup_uncharge_cache_page(page); - goto out; - } - error = 1; - - spin_lock(&info->lock); - ptr = shmem_swp_entry(info, idx, NULL); - if (ptr && ptr->val == entry.val) { - error = add_to_page_cache_locked(page, inode->i_mapping, - idx, GFP_NOWAIT); - /* does mem_cgroup_uncharge_cache_page on error */ - } else /* we must compensate for our precharge above */ - mem_cgroup_uncharge_cache_page(page); + mapping = info->vfs_inode.i_mapping; + error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); + /* which does mem_cgroup_uncharge_cache_page on error */ if (error == -EEXIST) { - struct page *filepage = find_get_page(inode->i_mapping, idx); + struct page *filepage = find_get_page(mapping, idx); error = 1; if (filepage) { /* @@ -990,14 +974,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s swap_free(entry); error = 1; /* not an error, but entry was found */ } - if (ptr) - shmem_swp_unmap(ptr); + shmem_swp_unmap(ptr); spin_unlock(&info->lock); - radix_tree_preload_end(); -out: - unlock_page(page); - page_cache_release(page); - iput(inode); /* allows for NULL */ return error; } @@ -1009,6 +987,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page) struct list_head *p, *next; struct shmem_inode_info *info; int found = 0; + int error; + + /* + * Charge page using GFP_KERNEL while we can wait, before taking + * the shmem_swaplist_mutex which might hold up shmem_writepage(). + * Charged back to the user (not to caller) when swap account is used. + * add_to_page_cache() will be called with GFP_NOWAIT. + */ + error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); + if (error) + goto out; + /* + * Try to preload while we can wait, to not make a habit of + * draining atomic reserves; but don't latch on to this cpu, + * it's okay if sometimes we get rescheduled after this. + */ + error = radix_tree_preload(GFP_KERNEL); + if (error) + goto uncharge; + radix_tree_preload_end(); mutex_lock(&shmem_swaplist_mutex); list_for_each_safe(p, next, &shmem_swaplist) { @@ -1016,17 +1014,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page) found = shmem_unuse_inode(info, entry, page); cond_resched(); if (found) - goto out; + break; } mutex_unlock(&shmem_swaplist_mutex); - /* - * Can some race bring us here? We've been holding page lock, - * so I think not; but would rather try again later than BUG() - */ + +uncharge: + if (!found) + mem_cgroup_uncharge_cache_page(page); + if (found < 0) + error = found; +out: unlock_page(page); page_cache_release(page); -out: - return (found < 0) ? found : 0; + return error; } /* @@ -1064,7 +1064,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) else swap.val = 0; + /* + * Add inode to shmem_unuse()'s list of swapped-out inodes, + * if it's not already there. Do it now because we cannot take + * mutex while holding spinlock, and must do so before the page + * is moved to swap cache, when its pagelock no longer protects + * the inode from eviction. But don't unlock the mutex until + * we've taken the spinlock, because shmem_unuse_inode() will + * prune a !swapped inode from the swaplist under both locks. + */ + if (swap.val) { + mutex_lock(&shmem_swaplist_mutex); + if (list_empty(&info->swaplist)) + list_add_tail(&info->swaplist, &shmem_swaplist); + } + spin_lock(&info->lock); + if (swap.val) + mutex_unlock(&shmem_swaplist_mutex); + if (index >= info->next_index) { BUG_ON(!(info->flags & SHMEM_TRUNCATE)); goto unlock; @@ -1084,22 +1102,11 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) remove_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); - if (list_empty(&info->swaplist)) - inode = igrab(inode); - else - inode = NULL; - spin_unlock(&info->lock); swap_shmem_alloc(swap); + spin_unlock(&info->lock); BUG_ON(page_mapped(page)); page_cache_release(page); /* pagecache ref */ swap_writepage(page, wbc); - if (inode) { - mutex_lock(&shmem_swaplist_mutex); - /* move instead of add in case we're racing */ - list_move_tail(&info->swaplist, &shmem_swaplist); - mutex_unlock(&shmem_swaplist_mutex); - iput(inode); - } return 0; } @@ -1399,21 +1406,16 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { - if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || - shmem_acct_block(info->flags)) { - spin_unlock(&info->lock); - error = -ENOSPC; - goto failed; - } + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks) >= 0 || + shmem_acct_block(info->flags)) + goto nospace; percpu_counter_inc(&sbinfo->used_blocks); spin_lock(&inode->i_lock); inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&inode->i_lock); - } else if (shmem_acct_block(info->flags)) { - spin_unlock(&info->lock); - error = -ENOSPC; - goto failed; - } + } else if (shmem_acct_block(info->flags)) + goto nospace; if (!filepage) { int ret; @@ -1493,6 +1495,24 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, error = 0; goto out; +nospace: + /* + * Perhaps the page was brought in from swap between find_lock_page + * and taking info->lock? We allow for that at add_to_page_cache_lru, + * but must also avoid reporting a spurious ENOSPC while working on a + * full tmpfs. (When filepage has been passed in to shmem_getpage, it + * is already in page cache, which prevents this race from occurring.) + */ + if (!filepage) { + struct page *page = find_get_page(mapping, idx); + if (page) { + spin_unlock(&info->lock); + page_cache_release(page); + goto repeat; + } + } + spin_unlock(&info->lock); + error = -ENOSPC; failed: if (*pagep != filepage) { unlock_page(filepage); @@ -2794,6 +2814,10 @@ int shmem_zero_setup(struct vm_area_struct *vma) file = shmem_file_setup("dev/zero", size, vma->vm_flags); if (IS_ERR(file)) return PTR_ERR(file); - shmem_set_file(vma, file); + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = file; + vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } diff --git a/mm/slab.c b/mm/slab.c index 37961d1f584fe..4c6e2e31ced05 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (ralign < align) { ralign = align; } - /* disable debug if not aligning with REDZONE_ALIGN */ - if (ralign & (__alignof__(unsigned long long) - 1)) + /* disable debug if necessary */ + if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. @@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ - cachep->obj_offset += align; - size += align + sizeof(unsigned long long); + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of diff --git a/mm/swapfile.c b/mm/swapfile.c index 0341c5700e346..7fd98e62cda34 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -1558,6 +1559,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) struct address_space *mapping; struct inode *inode; char *pathname; + int oom_score_adj; int i, type, prev; int err; @@ -1616,9 +1618,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) p->flags &= ~SWP_WRITEOK; spin_unlock(&swap_lock); - current->flags |= PF_OOM_ORIGIN; + oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); err = try_to_unuse(type); - current->flags &= ~PF_OOM_ORIGIN; + test_set_oom_score_adj(oom_score_adj); if (err) { /* re-insert swap space back into swap_list */ @@ -2149,8 +2151,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); - if (swap_file) + if (swap_file) { + if (did_down) { + mutex_unlock(&inode->i_mutex); + did_down = 0; + } filp_close(swap_file, NULL); + } out: if (page && !IS_ERR(page)) { kunmap(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 6771ea70bfe7e..4f3f059d92b69 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -146,7 +147,7 @@ struct scan_control { /* * From 0 .. 100. Higher means more swappy. */ -int vm_swappiness = 60; +int vm_swappiness = 90; long vm_total_pages; /* The total number of pages which the VM controls */ static LIST_HEAD(shrinker_list); @@ -200,6 +201,14 @@ void unregister_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(unregister_shrinker); +static inline int do_shrinker_shrink(struct shrinker *shrinker, + struct shrink_control *sc, + unsigned long nr_to_scan) +{ + sc->nr_to_scan = nr_to_scan; + return (*shrinker->shrink)(shrinker, sc); +} + #define SHRINK_BATCH 128 /* * Call the shrink functions to age shrinkable caches @@ -220,25 +229,29 @@ EXPORT_SYMBOL(unregister_shrinker); * * Returns the number of slab objects which we shrunk. */ -unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, - unsigned long lru_pages) +unsigned long shrink_slab(struct shrink_control *shrink, + unsigned long nr_pages_scanned, + unsigned long lru_pages) { struct shrinker *shrinker; unsigned long ret = 0; - if (scanned == 0) - scanned = SWAP_CLUSTER_MAX; + if (nr_pages_scanned == 0) + nr_pages_scanned = SWAP_CLUSTER_MAX; - if (!down_read_trylock(&shrinker_rwsem)) - return 1; /* Assume we'll be able to shrink next time */ + if (!down_read_trylock(&shrinker_rwsem)) { + /* Assume we'll be able to shrink next time */ + ret = 1; + goto out; + } list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; unsigned long total_scan; unsigned long max_pass; - max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); - delta = (4 * scanned) / shrinker->seeks; + max_pass = do_shrinker_shrink(shrinker, shrink, 0); + delta = (4 * nr_pages_scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); shrinker->nr += delta; @@ -265,9 +278,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, int shrink_ret; int nr_before; - nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); - shrink_ret = (*shrinker->shrink)(shrinker, this_scan, - gfp_mask); + nr_before = do_shrinker_shrink(shrinker, shrink, 0); + shrink_ret = do_shrinker_shrink(shrinker, shrink, + this_scan); if (shrink_ret == -1) break; if (shrink_ret < nr_before) @@ -281,6 +294,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, shrinker->nr += total_scan; } up_read(&shrinker_rwsem); +out: + cond_resched(); return ret; } @@ -651,6 +666,17 @@ static enum page_references page_check_references(struct page *page, if (referenced_ptes) { if (PageAnon(page)) return PAGEREF_ACTIVATE; + + /* + * Identify referenced, file-backed active pages and move them + * to the active list. We know that this page has been + * referenced since being put on the inactive list. VM_EXEC + * pages are only moved to the inactive list when they have not + * been referenced between scans (see shrink_active_list). + */ + if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) + return PAGEREF_ACTIVATE; + /* * All mapped pages start out with page table * references from the instantiating fault, so we need @@ -1988,17 +2014,12 @@ static bool zone_reclaimable(struct zone *zone) return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; } -/* - * As hibernation is going on, kswapd is freezed so that it can't mark - * the zone into all_unreclaimable. It can't handle OOM during hibernation. - * So let's check zone's unreclaimable in direct reclaim as well as kswapd. - */ +/* All zones in zonelist are unreclaimable? */ static bool all_unreclaimable(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; - bool all_unreclaimable = true; for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { @@ -2006,13 +2027,11 @@ static bool all_unreclaimable(struct zonelist *zonelist, continue; if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - if (zone_reclaimable(zone)) { - all_unreclaimable = false; - break; - } + if (!zone->all_unreclaimable) + return false; } - return all_unreclaimable; + return true; } /* @@ -2032,7 +2051,8 @@ static bool all_unreclaimable(struct zonelist *zonelist, * else, the number of pages reclaimed */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, - struct scan_control *sc) + struct scan_control *sc, + struct shrink_control *shrink) { int priority; unsigned long total_scanned = 0; @@ -2066,7 +2086,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, lru_pages += zone_reclaimable_pages(zone); } - shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); + shrink_slab(shrink, sc->nr_scanned, lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; @@ -2108,6 +2128,14 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, if (sc->nr_reclaimed) return sc->nr_reclaimed; + /* + * As hibernation is going on, kswapd is freezed so that it can't mark + * the zone into all_unreclaimable. Thus bypassing all_unreclaimable + * check. + */ + if (oom_killer_disabled) + return 0; + /* top priority shrink_zones still had more to do? don't OOM, then */ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) return 1; @@ -2130,12 +2158,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .mem_cgroup = NULL, .nodemask = nodemask, }; + struct shrink_control shrink = { + .gfp_mask = sc.gfp_mask, + }; trace_mm_vmscan_direct_reclaim_begin(order, sc.may_writepage, gfp_mask); - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); @@ -2195,17 +2226,20 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .order = 0, .mem_cgroup = mem_cont, .nodemask = NULL, /* we don't care the placement */ + .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | + (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), + }; + struct shrink_control shrink = { + .gfp_mask = sc.gfp_mask, }; - sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | - (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); zonelist = NODE_DATA(numa_node_id())->node_zonelists; trace_mm_vmscan_memcg_reclaim_begin(0, sc.may_writepage, sc.gfp_mask); - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); @@ -2284,7 +2318,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, * must be balanced */ if (order) - return pgdat_balanced(pgdat, balanced, classzone_idx); + return !pgdat_balanced(pgdat, balanced, classzone_idx); else return !all_zones_ok; } @@ -2333,6 +2367,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, .order = order, .mem_cgroup = NULL, }; + struct shrink_control shrink = { + .gfp_mask = sc.gfp_mask, + }; loop_again: total_scanned = 0; sc.nr_reclaimed = 0; @@ -2397,7 +2434,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { - int compaction; struct zone *zone = pgdat->node_zones + i; int nr_slab; @@ -2423,29 +2459,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 8*high_wmark_pages(zone), end_zone, 0)) shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; - nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, - lru_pages); + nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; - compaction = 0; - if (order && - zone_watermark_ok(zone, 0, - high_wmark_pages(zone), - end_zone, 0) && - !zone_watermark_ok(zone, order, - high_wmark_pages(zone), - end_zone, 0)) { - compact_zone_order(zone, - order, - sc.gfp_mask, false, - COMPACT_MODE_KSWAPD); - compaction = 1; - } - if (zone->all_unreclaimable) continue; - if (!compaction && nr_slab == 0 && + if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; /* @@ -2791,7 +2811,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) .swappiness = vm_swappiness, .order = 0, }; - struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); + struct shrink_control shrink = { + .gfp_mask = sc.gfp_mask, + }; + struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); struct task_struct *p = current; unsigned long nr_reclaimed; @@ -2800,7 +2823,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); p->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); @@ -2975,6 +2998,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) .swappiness = vm_swappiness, .order = order, }; + struct shrink_control shrink = { + .gfp_mask = sc.gfp_mask, + }; unsigned long nr_slab_pages0, nr_slab_pages1; cond_resched(); @@ -3016,7 +3042,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) unsigned long lru_pages = zone_reclaimable_pages(zone); /* No reclaimable slab or very low memory pressure */ - if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) + if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) break; /* Freed enough memory */ diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 6e64f7c6a2e92..8a7de0f13e673 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp->nr_vlans--; + if (vlan->flags & VLAN_FLAG_GVRP) + vlan_gvrp_request_leave(dev); + vlan_group_set_device(grp, vlan_id, NULL); if (!grp->killall) synchronize_net(); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index be737539f34d1..a330b9e83d0b8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev) struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; - if (vlan->flags & VLAN_FLAG_GVRP) - vlan_gvrp_request_leave(dev); - dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) @@ -707,6 +704,7 @@ static int vlan_dev_init(struct net_device *dev) dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; #endif + dev->needed_headroom = real_dev->needed_headroom; if (real_dev->features & NETIF_F_HW_VLAN_TX) { dev->header_ops = real_dev->header_ops; dev->hard_header_len = real_dev->hard_header_len; diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c index f7fa67c78766f..f49da5814bc3c 100644 --- a/net/atm/atm_sysfs.c +++ b/net/atm/atm_sysfs.c @@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev, return pos - buf; } +static ssize_t show_atmindex(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct atm_dev *adev = to_atm_dev(cdev); + + return sprintf(buf, "%d\n", adev->number); +} + static ssize_t show_carrier(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev, static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); +static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL); static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); @@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); static struct device_attribute *atm_attrs[] = { &dev_attr_atmaddress, &dev_attr_address, + &dev_attr_atmindex, &dev_attr_carrier, &dev_attr_type, &dev_attr_link_rate, diff --git a/net/atm/common.c b/net/atm/common.c index 1b9c52a02cd31..22b963d06a10b 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -252,6 +252,7 @@ void atm_dev_release_vccs(struct atm_dev *dev) } write_unlock_irq(&vcc_sklist_lock); } +EXPORT_SYMBOL(atm_dev_release_vccs); static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) { diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 2862f53b66b15..d935da71ab3b5 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long sockfd_put(nsock); return -EBADFD; } + ca.device[sizeof(ca.device)-1] = 0; err = bnep_add_connection(&ca, nsock); if (!err) { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 75604cee46b3a..ddfacd5b94e02 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -184,6 +184,7 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) BT_DBG("%s %ld", hdev->name, opt); /* Reset device */ + set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } @@ -210,8 +211,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt) /* Mandatory initialization */ /* Reset */ - if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) + if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { + set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); + } /* Read Local Supported Features */ hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); @@ -1638,7 +1641,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { register struct hci_proto *hp; - hci_conn_enter_active_mode(conn, 1); + hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); /* Send to upper protocol */ hp = hci_proto[HCI_PROTO_L2CAP]; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index f5cd163027b76..67cd8d52dfdbb 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -183,6 +183,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, status); + clear_bit(HCI_RESET, &hdev->flags); + hci_req_complete(hdev, HCI_OP_RESET, status); } @@ -1465,7 +1467,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) break; } - if (ev->ncmd) { + if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 6973b3482c648..813b06e805263 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -720,6 +720,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user break; } + memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 88e4aa9cb1f9a..90e985b99f31f 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -163,7 +163,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb) goto drop; /* If STP is turned off, then forward */ - if (p->br->stp_enabled == BR_NO_STP) + if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0) goto forward; if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 030a002ff8eee..f61eb2eff3fdd 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -445,9 +445,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, ip6h->payload_len = htons(8 + sizeof(*mldq)); ip6h->nexthdr = IPPROTO_HOPOPTS; ip6h->hop_limit = 1; + ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, &ip6h->saddr); - ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); hopopt = (u8 *)(ip6h + 1); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 4b5b66d07bba7..e54990e4448b3 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb) goto drop; } - /* Zero out the CB buffer if no options present */ - if (iph->ihl == 5) { - memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); + if (iph->ihl == 5) return 0; - } opt->optlen = iph->ihl*4 - sizeof(struct iphdr); if (ip_options_compile(dev_net(dev), opt, skb)) @@ -741,6 +739,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, nf_bridge->mask |= BRNF_PKT_TYPE; } + if (pf == PF_INET && br_parse_ip_options(skb)) + return NF_DROP; + /* The physdev module checks on this */ nf_bridge->mask |= BRNF_BRIDGED; nf_bridge->physoutdev = skb->dev; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 16df0532d4b9a..47acf4a50efe8 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user, if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; + tmp.name[sizeof(tmp.name) - 1] = 0; + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) diff --git a/net/can/bcm.c b/net/can/bcm.c index 092dc88a7c64c..63779ab2b759c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1427,9 +1427,14 @@ static int bcm_init(struct sock *sk) static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; - struct bcm_sock *bo = bcm_sk(sk); + struct bcm_sock *bo; struct bcm_op *op, *next; + if (sk == NULL) + return 0; + + bo = bcm_sk(sk); + /* remove bcm_ops, timer, rx_unregister(), etc. */ unregister_netdevice_notifier(&bo->notifier); diff --git a/net/can/raw.c b/net/can/raw.c index 883e9d74fddf6..241b2b60c7ee8 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -305,7 +305,12 @@ static int raw_init(struct sock *sk) static int raw_release(struct socket *sock) { struct sock *sk = sock->sk; - struct raw_sock *ro = raw_sk(sk); + struct raw_sock *ro; + + if (!sk) + return 0; + + ro = raw_sk(sk); unregister_netdevice_notifier(&ro->notifier); diff --git a/net/core/dev.c b/net/core/dev.c index 6561021d22d1f..2bb4aa68466e5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1006,7 +1006,7 @@ int dev_change_name(struct net_device *dev, const char *newname) } write_lock_bh(&dev_base_lock); - hlist_del(&dev->name_hlist); + hlist_del_rcu(&dev->name_hlist); write_unlock_bh(&dev_base_lock); synchronize_rcu(); @@ -1332,11 +1332,13 @@ int dev_close_many(struct list_head *head) */ int dev_close(struct net_device *dev) { - LIST_HEAD(single); + if (dev->flags & IFF_UP) { + LIST_HEAD(single); - list_add(&dev->unreg_list, &single); - dev_close_many(&single); - list_del(&single); + list_add(&dev->unreg_list, &single); + dev_close_many(&single); + list_del(&single); + } return 0; } EXPORT_SYMBOL(dev_close); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index ff2302910b5eb..6c7c610866db1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -146,9 +146,24 @@ u32 ethtool_op_get_flags(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_flags); +/* Check if device can enable (or disable) particular feature coded in "data" + * argument. Flags "supported" describe features that can be toggled by device. + * If feature can not be toggled, it state (enabled or disabled) must match + * hardcoded device features state, otherwise flags are marked as invalid. + */ +bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) +{ + u32 features = dev->features & flags_dup_features; + /* "data" can contain only flags_dup_features bits, + * see __ethtool_set_flags */ + + return (features & ~supported) != (data & ~supported); +} +EXPORT_SYMBOL(ethtool_invalid_flags); + int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) { - if (data & ~supported) + if (ethtool_invalid_flags(dev, data, supported)) return -EINVAL; dev->features = ((dev->features & ~flags_dup_features) | diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d883dcc78b6b6..e9f924898e6b0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2997,6 +2997,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) skb->destructor = sock_rmem_free; atomic_add(skb->truesize, &sk->sk_rmem_alloc); + /* before exiting rcu section, make sure dst is refcounted */ + skb_dst_force(skb); + skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); diff --git a/net/dccp/options.c b/net/dccp/options.c index f06ffcfc8d712..4b2ab657ac8e6 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R: if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */ break; + if (len == 0) + goto out_invalid_option; rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, *value, value + 1, len - 1); if (rc) diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 0c2826337919a..116d3fd3d6691 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -435,10 +435,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, udpdest.sin_addr.s_addr = htonl(network | addr.station); } + memset(&ah, 0, sizeof(ah)); ah.port = port; ah.cb = cb & 0x7f; ah.code = 2; /* magic */ - ah.pad = 0; /* tack our header on the front of the iovec */ size = sizeof(struct aunhdr); diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index ce2d33582859d..5761185f884e8 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile @@ -1,5 +1,3 @@ obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o af_802154-y := af_ieee802154.o raw.o dgram.o - -ccflags-y += -Wall -DDEBUG diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index a5a1050595d18..8a79fbb4641eb 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -616,6 +616,9 @@ choice config DEFAULT_VEGAS bool "Vegas" if TCP_CONG_VEGAS=y + config DEFAULT_YEAH + bool "YeAH" if TCP_CONG_YEAH=y + config DEFAULT_VENO bool "Veno" if TCP_CONG_VENO=y @@ -641,6 +644,7 @@ config DEFAULT_TCP_CONG default "htcp" if DEFAULT_HTCP default "hybla" if DEFAULT_HYBLA default "vegas" if DEFAULT_VEGAS + default "yeah" if DEFAULT_YEAH default "westwood" if DEFAULT_WESTWOOD default "veno" if DEFAULT_VENO default "reno" if DEFAULT_RENO diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 1d2cdd43a878b..8725e783d9cdb 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1041,6 +1041,7 @@ static void ip_fib_net_exit(struct net *net) fib4_rules_exit(net); #endif + rtnl_lock(); for (i = 0; i < FIB_TABLE_HASHSZ; i++) { struct fib_table *tb; struct hlist_head *head; @@ -1053,6 +1054,7 @@ static void ip_fib_net_exit(struct net *net) fib_free_table(tb); } } + rtnl_unlock(); kfree(net->ipv4.fib_table_hash); } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index e0e77e297de32..d9d5130a91227 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1172,20 +1172,18 @@ static void igmp_group_dropped(struct ip_mc_list *im) if (!in_dev->dead) { if (IGMP_V1_SEEN(in_dev)) - goto done; + return; if (IGMP_V2_SEEN(in_dev)) { if (reporter) igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); - goto done; + return; } /* IGMPv3 */ igmpv3_add_delrec(in_dev, im); igmp_ifc_event(in_dev); } -done: #endif - ip_mc_clear_src(im); } static void igmp_group_added(struct ip_mc_list *im) @@ -1322,6 +1320,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) *ip = i->next_rcu; in_dev->mc_count--; igmp_group_dropped(i); + ip_mc_clear_src(i); if (!in_dev->dead) ip_rt_multicast_event(in_dev); @@ -1431,7 +1430,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev) in_dev->mc_list = i->next_rcu; in_dev->mc_count--; - igmp_group_dropped(i); + /* We've dropped the groups in ip_mc_down already */ + ip_mc_clear_src(i); ip_ma_put(i); } } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 97e5fb7652650..25e318153f143 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { + sk2->sk_state == TCP_LISTEN) { const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || sk2_rcv_saddr == sk_rcv_saddr(sk)) @@ -122,8 +122,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && - !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { spin_unlock(&head->lock); snum = smallest_rover; goto have_snum; diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index a96e65674ac3e..14af1b44121e4 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -366,7 +366,8 @@ static void inetpeer_free_rcu(struct rcu_head *head) } /* May be called with local BH enabled. */ -static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) +static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { int do_free; @@ -380,7 +381,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) * We use refcnt=-1 to alert lockless readers this entry is deleted. */ if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; struct inet_peer __rcu ***stackptr, ***delp; if (lookup(&p->daddr, stack, base) != p) BUG(); @@ -435,7 +435,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p) } /* May be called with local BH enabled. */ -static int cleanup_once(unsigned long ttl) +static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { struct inet_peer *p = NULL; @@ -467,7 +467,7 @@ static int cleanup_once(unsigned long ttl) * happen because of entry limits in route cache. */ return -1; - unlink_from_pool(p, peer_to_base(p)); + unlink_from_pool(p, peer_to_base(p), stack); return 0; } @@ -523,7 +523,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) if (base->total >= inet_peer_threshold) /* Remove one less-recently-used entry. */ - cleanup_once(0); + cleanup_once(0, stack); return p; } @@ -539,6 +539,7 @@ static void peer_check_expire(unsigned long dummy) { unsigned long now = jiffies; int ttl, total; + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; total = compute_total(); if (total >= inet_peer_threshold) @@ -547,7 +548,7 @@ static void peer_check_expire(unsigned long dummy) ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * total / inet_peer_threshold * HZ; - while (!cleanup_once(ttl)) { + while (!cleanup_once(ttl, stack)) { if (jiffies != now) break; } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a1151b8adf3c6..b1d282f11be7e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg) if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { struct sk_buff *head = qp->q.fragments; + const struct iphdr *iph; + int err; rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out_rcu_unlock; + /* skb dst is stale, drop it, and perform route lookup again */ + skb_dst_drop(head); + iph = ip_hdr(head); + err = ip_route_input_noref(head, iph->daddr, iph->saddr, + iph->tos, head->dev); + if (err) + goto out_rcu_unlock; + /* - * Only search router table for the head fragment, - * when defraging timeout at PRE_ROUTING HOOK. + * Only an end host needs to send an ICMP + * "Fragment Reassembly Timeout" message, per RFC792. */ - if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { - const struct iphdr *iph = ip_hdr(head); - int err = ip_route_input(head, iph->daddr, iph->saddr, - iph->tos, head->dev); - if (unlikely(err)) - goto out_rcu_unlock; - - /* - * Only an end host needs to send an ICMP - * "Fragment Reassembly Timeout" message, per RFC792. - */ - if (skb_rtable(head)->rt_type != RTN_LOCAL) - goto out_rcu_unlock; + if (qp->user == IP_DEFRAG_CONNTRACK_IN && + skb_rtable(head)->rt_type != RTN_LOCAL) + goto out_rcu_unlock; - } /* Send an ICMP "Fragment Reassembly Timeout" message. */ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 1906fa35860c8..b0413e300e561 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -329,7 +329,7 @@ int ip_options_compile(struct net *net, pp_ptr = optptr + 2; goto error; } - if (skb) { + if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); opt->is_changed = 1; } @@ -371,7 +371,7 @@ int ip_options_compile(struct net *net, goto error; } opt->ts = optptr - iph; - if (skb) { + if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); timeptr = (__be32*)&optptr[optptr[2]+3]; } @@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) unsigned long orefdst; int err; - if (!opt->srr) + if (!opt->srr || !rt) return 0; if (skb->pkt_type != PACKET_HOST) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index e855fffaed95b..6d79aa10e6299 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1065,6 +1065,7 @@ static int do_replace(struct net *net, const void __user *user, /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1486,6 +1487,7 @@ static int compat_do_replace(struct net *net, void __user *user, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1738,6 +1740,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 652efea013dcc..92fb4c5e5c9bd 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -387,7 +387,7 @@ ipt_do_table(struct sk_buff *skb, verdict = (unsigned)(-v) - 1; break; } - if (*stackptr == 0) { + if (*stackptr <= origptr) { e = get_entry(table_base, private->underflow[hook]); pr_debug("Underflow (this is normal) " @@ -427,10 +427,10 @@ ipt_do_table(struct sk_buff *skb, /* Verdict */ break; } while (!acpar.hotdrop); - xt_info_rdunlock_bh(); pr_debug("Exiting %s; resetting sp from %u to %u\n", __func__, *stackptr, origptr); *stackptr = origptr; + xt_info_rdunlock_bh(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else @@ -1261,6 +1261,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1805,6 +1806,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -2034,6 +2036,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 1e26a4897655b..e07341db19246 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -312,7 +312,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) * error messages (RELATED) and information requests (see below) */ if (ip_hdr(skb)->protocol == IPPROTO_ICMP && (ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)) + ctinfo == IP_CT_RELATED_REPLY)) return XT_CONTINUE; /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, @@ -326,12 +326,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par) ct->mark = hash; break; case IP_CT_RELATED: - case IP_CT_RELATED+IP_CT_IS_REPLY: + case IP_CT_RELATED_REPLY: /* FIXME: we don't handle expectations at the * moment. they can arrive on a different node than * the master connection (e.g. FTP passive mode) */ case IP_CT_ESTABLISHED: - case IP_CT_ESTABLISHED+IP_CT_IS_REPLY: + case IP_CT_ESTABLISHED_REPLY: break; default: break; @@ -669,8 +669,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input, char buffer[PROC_WRITELEN+1]; unsigned long nodenum; - if (copy_from_user(buffer, input, PROC_WRITELEN)) + if (size > PROC_WRITELEN) + return -EIO; + if (copy_from_user(buffer, input, size)) return -EFAULT; + buffer[size] = 0; if (*buffer == '+') { nodenum = simple_strtoul(buffer+1, NULL, 10); diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index d2ed9dc74ebc3..9931152a78b54 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) nat = nfct_nat(ct); NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); + ctinfo == IP_CT_RELATED_REPLY)); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 5a03c02af999a..db10075dd88e4 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum, /* This is where we call the helper: as the packet goes out. */ ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) + if (!ct || ctinfo == IP_CT_RELATED_REPLY) goto out; help = nfct_help(ct); diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index c04787ce1a712..f9ff19ffeebb7 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -428,7 +428,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, /* Must be RELATED */ NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || - skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); + skb->nfctinfo == IP_CT_RELATED_REPLY); /* Redirects on non-null nats must be dropped, else they'll start talking to each other without our translation, and be diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 21c30426480b0..733c9abc1cbd9 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c @@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par) /* Connection must be valid and new. */ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); + ctinfo == IP_CT_RELATED_REPLY)); NF_CT_ASSERT(par->out != NULL); return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 95481fee8bdbf..771f5a5efbfd4 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c @@ -115,7 +115,7 @@ nf_nat_fn(unsigned int hooknum, switch (ctinfo) { case IP_CT_RELATED: - case IP_CT_RELATED+IP_CT_IS_REPLY: + case IP_CT_RELATED_REPLY: if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(ct, ctinfo, hooknum, skb)) @@ -143,7 +143,7 @@ nf_nat_fn(unsigned int hooknum, default: /* ESTABLISHED */ NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || - ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); + ctinfo == IP_CT_ESTABLISHED_REPLY); } return nf_nat_packet(ct, ctinfo, hooknum, skb); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6ed6603c2f6db..fabfe8168b909 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -171,7 +171,7 @@ static struct dst_ops ipv4_dst_ops = { const __u8 ip_tos2prio[16] = { TC_PRIO_BESTEFFORT, - ECN_OR_COST(FILLER), + ECN_OR_COST(BESTEFFORT), TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BULK, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1a456652086b7..321e6e84dbccb 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_do_large_bitmap, }, -#ifdef CONFIG_IP_MULTICAST { .procname = "igmp_max_memberships", .data = &sysctl_igmp_max_memberships, @@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - -#endif { .procname = "igmp_max_msf", .data = &sysctl_igmp_max_msf, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 65f6c04062453..e6e900174e817 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2659,7 +2659,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) #define DBGUNDO(x...) do { } while (0) #endif -static void tcp_undo_cwr(struct sock *sk, const int undo) +static void tcp_undo_cwr(struct sock *sk, const int undo_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); @@ -2671,14 +2671,13 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); - if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { + if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; TCP_ECN_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } - tcp_moderate_cwnd(tp); tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -2822,8 +2821,15 @@ static int tcp_try_undo_loss(struct sock *sk) static inline void tcp_complete_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - tp->snd_cwnd_stamp = tcp_time_stamp; + + /* Do not moderate cwnd if it's already undone in cwr or recovery. */ + if (tp->undo_marker) { + if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) + tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); + else /* PRR */ + tp->snd_cwnd = tp->snd_ssthresh; + tp->snd_cwnd_stamp = tcp_time_stamp; + } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } @@ -2939,6 +2945,38 @@ void tcp_simple_retransmit(struct sock *sk) } EXPORT_SYMBOL(tcp_simple_retransmit); +/* This function implements the PRR algorithm, specifcally the PRR-SSRB + * (proportional rate reduction with slow start reduction bound) as described in + * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. + * It computes the number of packets to send (sndcnt) based on packets newly + * delivered: + * 1) If the packets in flight is larger than ssthresh, PRR spreads the + * cwnd reductions across a full RTT. + * 2) If packets in flight is lower than ssthresh (such as due to excess + * losses and/or application stalls), do not perform any further cwnd + * reductions, but instead slow start up to ssthresh. + */ +static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, + int fast_rexmit, int flag) +{ + struct tcp_sock *tp = tcp_sk(sk); + int sndcnt = 0; + int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); + + if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { + u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + + tp->prior_cwnd - 1; + sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; + } else { + sndcnt = min_t(int, delta, + max_t(int, tp->prr_delivered - tp->prr_out, + newly_acked_sacked) + 1); + } + + sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); + tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; +} + /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and @@ -2950,7 +2988,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit); * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ -static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) +static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, + int newly_acked_sacked, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -3100,13 +3139,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) tp->bytes_acked = 0; tp->snd_cwnd_cnt = 0; + tp->prior_cwnd = tp->snd_cwnd; + tp->prr_delivered = 0; + tp->prr_out = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); fast_rexmit = 1; } if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); - tcp_cwnd_down(sk, flag); + tp->prr_delivered += newly_acked_sacked; + tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); tcp_xmit_retransmit_queue(sk); } @@ -3620,6 +3663,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) u32 prior_in_flight; u32 prior_fackets; int prior_packets; + int prior_sacked = tp->sacked_out; + int newly_acked_sacked = 0; int frto_cwnd = 0; /* If the ack is older than previous acks @@ -3691,6 +3736,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); + newly_acked_sacked = (prior_packets - prior_sacked) - + (tp->packets_out - tp->sacked_out); + if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ @@ -3703,7 +3751,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, - flag); + newly_acked_sacked, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index dfa5beb0c1c8c..5464dc4f4fbdd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1003,7 +1003,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, int nlen; u8 flags; - BUG_ON(len > skb->len); + if (WARN_ON(len > skb->len)) + return -EINVAL; nsize = skb_headlen(skb) - len; if (nsize < 0) @@ -1795,11 +1796,13 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, tcp_event_new_data_sent(sk, skb); tcp_minshall_update(tp, mss_now, skb); - sent_pkts++; + sent_pkts += tcp_skb_pcount(skb); if (push_one) break; } + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) + tp->prr_out += sent_pkts; if (likely(sent_pkts)) { tcp_cwnd_validate(sk); @@ -2293,6 +2296,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk) return; NET_INC_STATS_BH(sock_net(sk), mib_idx); + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) + tp->prr_out += tcp_skb_pcount(skb); + if (skb == tcp_write_queue_head(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index d144e629d2b43..e46305d1815ae 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && (!sk->sk_reuse || !sk2->sk_reuse || - ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && + sk2->sk_state == TCP_LISTEN) && ipv6_rcv_saddr_equal(sk, sk2)) break; } diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 633a6c266136e..b53197233709c 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -124,7 +124,7 @@ struct sock *__inet6_lookup_established(struct net *net, } EXPORT_SYMBOL(__inet6_lookup_established); -static int inline compute_score(struct sock *sk, struct net *net, +static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const struct in6_addr *daddr, const int dif) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 7d227c644f727..eadafbfc9ef4b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -410,7 +410,7 @@ ip6t_do_table(struct sk_buff *skb, verdict = (unsigned)(-v) - 1; break; } - if (*stackptr == 0) + if (*stackptr <= origptr) e = get_entry(table_base, private->underflow[hook]); else @@ -441,8 +441,8 @@ ip6t_do_table(struct sk_buff *skb, break; } while (!acpar.hotdrop); - xt_info_rdunlock_bh(); *stackptr = origptr; + xt_info_rdunlock_bh(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; @@ -1274,6 +1274,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1820,6 +1821,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -2049,6 +2051,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index c8af58b225620..4111050a9fc52 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum, /* This is where we call the helper: as the packet goes out. */ ct = nf_ct_get(skb, &ctinfo); - if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) + if (!ct || ctinfo == IP_CT_RELATED_REPLY) goto out; help = nfct_help(ct); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e7db7014e89f9..57c20dc41ccb4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1970,7 +1970,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->dst.output = ip6_output; rt->rt6i_dev = net->loopback_dev; rt->rt6i_idev = idev; - dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1); rt->dst.obsolete = -1; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 7cb65ef79f9cd..6dcf5e7d661bd 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -17,6 +17,16 @@ static struct ctl_table empty[1]; +static ctl_table ipv6_static_skeleton[] = { + { + .procname = "neigh", + .maxlen = 0, + .mode = 0555, + .child = empty, + }, + { } +}; + static ctl_table ipv6_table_template[] = { { .procname = "route", @@ -37,12 +47,6 @@ static ctl_table ipv6_table_template[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "neigh", - .maxlen = 0, - .mode = 0555, - .child = empty, - }, { } }; @@ -160,7 +164,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { - ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); + ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 9a009c66c8a3d..6703f8b949267 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1339,7 +1339,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out; diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index c9890e25cd4c9..cc616974a447d 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | MSG_NOSIGNAL)) { - err = -EINVAL; - goto out; + return -EINVAL; } lock_sock(sk); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 5b743bdd89ba2..36477538cea8e 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -656,10 +656,16 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, n = 1; name_len = fp[n++]; + + IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); + memcpy(name, fp+n, name_len); n+=name_len; name[name_len] = '\0'; attr_len = fp[n++]; + + IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); + memcpy(attr, fp+n, attr_len); n+=attr_len; attr[attr_len] = '\0'; diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7c567b8aa89a9..2bb2beb6a373d 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -105,6 +105,9 @@ irnet_ctrl_write(irnet_socket * ap, while(isspace(start[length - 1])) length--; + DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, + -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); + /* Copy the name for later reuse */ memcpy(ap->rname, start + 5, length - 5); ap->rname[length - 5] = '\0'; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9cd73b11506e8..77fae489e8b93 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -228,11 +228,11 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, goto out; if (pairwise) - key = sta->ptk; + key = rcu_dereference(sta->ptk); else if (key_idx < NUM_DEFAULT_KEYS) - key = sta->gtk[key_idx]; + key = rcu_dereference(sta->gtk[key_idx]); } else - key = sdata->keys[key_idx]; + key = rcu_dereference(sdata->keys[key_idx]); if (!key) goto out; @@ -316,9 +316,21 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, return 0; } +static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx) +{ + if (!(rate->flags & RATE_INFO_FLAGS_MCS)) { + struct ieee80211_supported_band *sband; + sband = sta->local->hw.wiphy->bands[ + sta->local->hw.conf.channel->band]; + rate->legacy = sband->bitrates[idx].bitrate; + } else + rate->mcs = idx; +} + static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; + struct timespec uptime; sinfo->generation = sdata->local->sta_generation; @@ -330,7 +342,13 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) STATION_INFO_TX_RETRIES | STATION_INFO_TX_FAILED | STATION_INFO_TX_BITRATE | - STATION_INFO_RX_DROP_MISC; + STATION_INFO_RX_BITRATE | + STATION_INFO_RX_DROP_MISC | + STATION_INFO_BSS_PARAM | + STATION_INFO_CONNECTED_TIME; + + do_posix_clock_monotonic_gettime(&uptime); + sinfo->connected_time = uptime.tv_sec - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); sinfo->rx_bytes = sta->rx_bytes; @@ -355,15 +373,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI) sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx); - if (!(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS)) { - struct ieee80211_supported_band *sband; - sband = sta->local->hw.wiphy->bands[ - sta->local->hw.conf.channel->band]; - sinfo->txrate.legacy = - sband->bitrates[sta->last_tx_rate.idx].bitrate; - } else - sinfo->txrate.mcs = sta->last_tx_rate.idx; + sinfo->rxrate.flags = 0; + if (sta->last_rx_rate_flag & RX_FLAG_HT) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS; + if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; + if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI) + sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx); if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH @@ -376,6 +395,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->plink_state = sta->plink_state; #endif } + + sinfo->bss_param.flags = 0; + if (sdata->vif.bss_conf.use_cts_prot) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; + if (sdata->vif.bss_conf.use_short_preamble) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; + if (sdata->vif.bss_conf.use_short_slot) + sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; + sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; + sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; } @@ -904,8 +933,10 @@ static int ieee80211_change_mpath(struct wiphy *wiphy, static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, struct mpath_info *pinfo) { - if (mpath->next_hop) - memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); + struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop); + + if (next_hop_sta) + memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); else memset(next_hop, 0, ETH_ALEN); @@ -1471,6 +1502,8 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode old_req; int err; + lockdep_assert_held(&sdata->u.mgd.mtx); + old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 2dabdf7680d06..bae23ad4d8a34 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -172,9 +172,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - mutex_lock(&local->iflist_mtx); + mutex_lock(&sdata->u.mgd.mtx); err = __ieee80211_request_smps(sdata, smps_mode); - mutex_unlock(&local->iflist_mtx); + mutex_unlock(&sdata->u.mgd.mtx); return err; } diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index c04a1396cf8d9..c008232731eb0 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -92,6 +92,31 @@ static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, } STA_OPS(inactive_ms); + +static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct sta_info *sta = file->private_data; + struct timespec uptime; + struct tm result; + long connected_time_secs; + char buf[100]; + int res; + do_posix_clock_monotonic_gettime(&uptime); + connected_time_secs = uptime.tv_sec - sta->last_connected; + time_to_tm(connected_time_secs, 0, &result); + result.tm_year -= 70; + result.tm_mday -= 1; + res = scnprintf(buf, sizeof(buf), + "years - %d\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n", + result.tm_year, result.tm_mon, result.tm_mday, + result.tm_hour, result.tm_min, result.tm_sec); + return simple_read_from_buffer(userbuf, count, ppos, buf, res); +} +STA_OPS(connected_time); + + + static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { @@ -324,6 +349,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD(flags); DEBUGFS_ADD(num_ps_buf_frames); DEBUGFS_ADD(inactive_ms); + DEBUGFS_ADD(connected_time); DEBUGFS_ADD(last_seq_ctrl); DEBUGFS_ADD(agg_status); DEBUGFS_ADD(dev); diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 165a4518bb48e..cac35ff14b86e 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -639,18 +639,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; struct ieee80211_local *local = hw_to_local(mp->hw); u16 sta_cap = sta->ht_cap.cap; + int n_supported = 0; int ack_dur; int stbc; int i; /* fall back to the old minstrel for legacy stations */ - if (!sta->ht_cap.ht_supported) { - msp->is_ht = false; - memset(&msp->legacy, 0, sizeof(msp->legacy)); - msp->legacy.r = msp->ratelist; - msp->legacy.sample_table = msp->sample_table; - return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); - } + if (!sta->ht_cap.ht_supported) + goto use_legacy; BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); @@ -705,7 +701,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, mi->groups[i].supported = mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; + + if (mi->groups[i].supported) + n_supported++; } + + if (!n_supported) + goto use_legacy; + + return; + +use_legacy: + msp->is_ht = false; + memset(&msp->legacy, 0, sizeof(msp->legacy)); + msp->legacy.r = msp->ratelist; + msp->legacy.sample_table = msp->sample_table; + return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); } static void diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a6701ed87f0d1..b3c0c304c9bf4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1136,14 +1136,23 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, NL80211_IFTYPE_ADHOC); - if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) + if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) { sta->last_rx = jiffies; + if (ieee80211_is_data(hdr->frame_control)) { + sta->last_rx_rate_idx = status->rate_idx; + sta->last_rx_rate_flag = status->flag; + } + } } else if (!is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ sta->last_rx = jiffies; + if (ieee80211_is_data(hdr->frame_control)) { + sta->last_rx_rate_idx = status->rate_idx; + sta->last_rx_rate_flag = status->flag; + } } if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index c426504ed1cfe..61b0f14f60695 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -228,6 +228,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct sta_info *sta; + struct timespec uptime; int i; sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); @@ -243,7 +244,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; + sta->last_rx = jiffies; + do_posix_clock_monotonic_gettime(&uptime); + sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); if (sta_prepare_rate_control(local, sta, gfp)) { @@ -697,6 +701,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ cancel_work_sync(&sta->drv_unblock_wk); + cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL); + rate_control_remove_sta_debugfs(sta); ieee80211_sta_debugfs_remove(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index bbdd2a86a94b2..cb7809ef9d2e4 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -207,6 +207,8 @@ enum plink_state { * @rate_ctrl_priv: rate control private per-STA pointer * @last_tx_rate: rate used for last transmit, to report to userspace as * "the" transmit rate + * @last_rx_rate_idx: rx status rate index of the last data packet + * @last_rx_rate_flag: rx status flag of the last data packet * @lock: used for locking all fields that require locking, see comments * in the header file. * @flaglock: spinlock for flags accesses @@ -222,6 +224,7 @@ enum plink_state { * @rx_bytes: Number of bytes received from this STA * @wep_weak_iv_count: number of weak WEP IVs received from this station * @last_rx: time (in jiffies) when last frame was received from this STA + * @last_connected: time (in seconds) when a station got connected * @num_duplicates: number of duplicate frames received from this STA * @rx_fragments: number of received MPDUs * @rx_dropped: number of dropped MPDUs from this STA @@ -291,6 +294,7 @@ struct sta_info { unsigned long rx_packets, rx_bytes; unsigned long wep_weak_iv_count; unsigned long last_rx; + long last_connected; unsigned long num_duplicates; unsigned long rx_fragments; unsigned long rx_dropped; @@ -309,6 +313,8 @@ struct sta_info { unsigned long tx_bytes; unsigned long tx_fragments; struct ieee80211_tx_rate last_tx_rate; + int last_rx_rate_idx; + int last_rx_rate_flag; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; /* diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b0beaa58246bb..306533ba0d3df 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, return cpu_to_le16(dur); } -static int inline is_ieee80211_device(struct ieee80211_local *local, +static inline int is_ieee80211_device(struct ieee80211_local *local, struct net_device *dev) { return local == wdev_priv(dev->ieee80211_ptr); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 1534f2b44cafb..a776cc65bc3eb 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -830,6 +830,8 @@ config NETFILTER_XT_MATCH_OWNER based on who created the socket: the user or group. It is also possible to check whether a socket actually exists. + Conflicts with '"quota, tag, uid" match' + config NETFILTER_XT_MATCH_POLICY tristate 'IPsec "policy" match support' depends on XFRM @@ -863,6 +865,22 @@ config NETFILTER_XT_MATCH_PKTTYPE To compile it as a module, choose M here. If unsure, say N. +config NETFILTER_XT_MATCH_QTAGUID + bool '"quota, tag, owner" match and stats support' + depends on NETFILTER_XT_MATCH_SOCKET + depends on NETFILTER_XT_MATCH_OWNER=n + help + This option replaces the `owner' match. In addition to matching + on uid, it keeps stats based on a tag assigned to a socket. + The full tag is comprised of a UID and an accounting tag. + The tags are assignable to sockets from user space (e.g. a download + manager can assign the socket to another UID for accounting). + Stats and control are done via /proc/net/xt_qtaguid/. + It replaces owner as it takes the same arguments, but should + really be recognized by the iptables tool. + + If unsure, say `N'. + config NETFILTER_XT_MATCH_QUOTA tristate '"quota" match support' depends on NETFILTER_ADVANCED @@ -873,6 +891,30 @@ config NETFILTER_XT_MATCH_QUOTA If you want to compile it as a module, say M here and read . If unsure, say `N'. +config NETFILTER_XT_MATCH_QUOTA2 + tristate '"quota2" match support' + depends on NETFILTER_ADVANCED + help + This option adds a `quota2' match, which allows to match on a + byte counter correctly and not per CPU. + It allows naming the quotas. + This is based on http://xtables-addons.git.sourceforge.net + + If you want to compile it as a module, say M here and read + . If unsure, say `N'. + +config NETFILTER_XT_MATCH_QUOTA2_LOG + bool '"quota2" Netfilter LOG support' + depends on NETFILTER_XT_MATCH_QUOTA2 + depends on IP_NF_TARGET_ULOG=n # not yes, not module, just no + default n + help + This option allows `quota2' to log ONCE when a quota limit + is passed. It logs via NETLINK using the NETLINK_NFLOG family. + It logs similarly to how ipt_ULOG would without data. + + If unsure, say `N'. + config NETFILTER_XT_MATCH_RATEEST tristate '"rateest" match support' depends on NETFILTER_ADVANCED diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 441050f31111a..210e607491a2f 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -88,7 +88,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o +obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o +obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 84f4fcc5884be..842f53081f579 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -833,7 +833,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, /* It exists; we have (non-exclusive) reference. */ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { - *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; + *ctinfo = IP_CT_ESTABLISHED_REPLY; /* Please set reply bit if this packet OK */ *set_reply = 1; } else { @@ -1126,7 +1126,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) /* This ICMP is in reverse direction to the packet which caused it */ ct = nf_ct_get(skb, &ctinfo); if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) - ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; + ctinfo = IP_CT_RELATED_REPLY; else ctinfo = IP_CT_RELATED; diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index e17cb7c7dd8fd..6f5801eac9992 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -368,7 +368,7 @@ static int help(struct sk_buff *skb, /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { + ctinfo != IP_CT_ESTABLISHED_REPLY) { pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); return NF_ACCEPT; } diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 867882313e499..bcd5ed6b71304 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, CHECK_BOUND(bs, 2); count = *bs->cur++; count <<= 8; - count = *bs->cur++; + count += *bs->cur++; break; case SEMI: BYTE_ALIGN(bs); diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index b969025cf82fe..547c05be7f5f7 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff, int ret; /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { + if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; - } + pr_debug("nf_ct_h245: skblen = %u\n", skb->len); spin_lock_bh(&nf_h323_lock); @@ -1117,10 +1116,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff, int ret; /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { + if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; - } + pr_debug("nf_ct_q931: skblen = %u\n", skb->len); spin_lock_bh(&nf_h323_lock); diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index b394aa3187764..4f9390b98697e 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c @@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, return NF_ACCEPT; /* Until there's been traffic both ways, don't look in packets. */ - if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) + if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; /* Not a full tcp header? */ diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 088944824e135..2fd4565144def 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff, u_int16_t msg; /* don't do any tracking before tcp handshake complete */ - if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) + if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; nexthdr_off = protoff; diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index d9e27734b2a22..8501823b3f9b0 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c @@ -78,7 +78,7 @@ static int help(struct sk_buff *skb, ct_sane_info = &nfct_help(ct)->help.ct_sane_info; /* Until there's been traffic both ways, don't look in packets. */ if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) + ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; /* Not a full tcp header? */ diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index bcf47eb518eff..95a046a548fd0 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1422,7 +1422,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; if (ctinfo != IP_CT_ESTABLISHED && - ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) + ctinfo != IP_CT_ESTABLISHED_REPLY) return NF_ACCEPT; /* No Data ? */ diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c new file mode 100644 index 0000000000000..88c61bdf1d2de --- /dev/null +++ b/net/netfilter/xt_qtaguid.c @@ -0,0 +1,2991 @@ +/* + * Kernel iptables module to track stats for packets based on user tags. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * There are run-time debug flags enabled via the debug_mask module param, or + * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h. + */ +#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) +#include +#endif + +#include +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + +/* + * We only use the xt_socket funcs within a similar context to avoid unexpected + * return values. + */ +#define XT_SOCKET_SUPPORTED_HOOKS \ + ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN)) + + +static const char *module_procdirname = "xt_qtaguid"; +static struct proc_dir_entry *xt_qtaguid_procdir; + +static unsigned int proc_iface_perms = S_IRUGO; +module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR); + +static struct proc_dir_entry *xt_qtaguid_stats_file; +static unsigned int proc_stats_perms = S_IRUGO; +module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR); + +static struct proc_dir_entry *xt_qtaguid_ctrl_file; + +/* Everybody can write. But proc_ctrl_write_limited is true by default which + * limits what can be controlled. See the can_*() functions. + */ +static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO; +module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR); + +/* Limited by default, so the gid of the ctrl and stats proc entries + * will limit what can be done. See the can_*() functions. + */ +static bool proc_stats_readall_limited = true; +static bool proc_ctrl_write_limited = true; + +module_param_named(stats_readall_limited, proc_stats_readall_limited, bool, + S_IRUGO | S_IWUSR); +module_param_named(ctrl_write_limited, proc_ctrl_write_limited, bool, + S_IRUGO | S_IWUSR); + +/* + * Limit the number of active tags (via socket tags) for a given UID. + * Multiple processes could share the UID. + */ +static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS; +module_param(max_sock_tags, int, S_IRUGO | S_IWUSR); + +/* + * After the kernel has initiallized this module, it is still possible + * to make it passive. + * Setting passive to Y: + * - the iface stats handling will not act on notifications. + * - iptables matches will never match. + * - ctrl commands silently succeed. + * - stats are always empty. + * This is mostly usefull when a bug is suspected. + */ +static bool module_passive; +module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR); + +/* + * Control how qtaguid data is tracked per proc/uid. + * Setting tag_tracking_passive to Y: + * - don't create proc specific structs to track tags + * - don't check that active tag stats exceed some limits. + * - don't clean up socket tags on process exits. + * This is mostly usefull when a bug is suspected. + */ +static bool qtu_proc_handling_passive; +module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool, + S_IRUGO | S_IWUSR); + +#define QTU_DEV_NAME "xt_qtaguid" + +uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK; +module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR); + +/*---------------------------------------------------------------------------*/ +static const char *iface_stat_procdirname = "iface_stat"; +static struct proc_dir_entry *iface_stat_procdir; +/* + * The iface_stat_all* will go away once userspace gets use to the new fields + * that have a format line. + */ +static const char *iface_stat_all_procfilename = "iface_stat_all"; +static struct proc_dir_entry *iface_stat_all_procfile; +static const char *iface_stat_fmt_procfilename = "iface_stat_fmt"; +static struct proc_dir_entry *iface_stat_fmt_procfile; + + +/* + * Ordering of locks: + * outer locks: + * iface_stat_list_lock + * sock_tag_list_lock + * inner locks: + * uid_tag_data_tree_lock + * tag_counter_set_list_lock + * Notice how sock_tag_list_lock is held sometimes when uid_tag_data_tree_lock + * is acquired. + * + * Call tree with all lock holders as of 2012-04-27: + * + * iface_stat_fmt_proc_read() + * iface_stat_list_lock + * (struct iface_stat) + * + * qtaguid_ctrl_proc_read() + * sock_tag_list_lock + * (sock_tag_tree) + * (struct proc_qtu_data->sock_tag_list) + * prdebug_full_state() + * sock_tag_list_lock + * (sock_tag_tree) + * uid_tag_data_tree_lock + * (uid_tag_data_tree) + * (proc_qtu_data_tree) + * iface_stat_list_lock + * + * qtaguid_stats_proc_read() + * iface_stat_list_lock + * struct iface_stat->tag_stat_list_lock + * + * qtudev_open() + * uid_tag_data_tree_lock + * + * qtudev_release() + * sock_tag_data_list_lock + * uid_tag_data_tree_lock + * prdebug_full_state() + * sock_tag_list_lock + * uid_tag_data_tree_lock + * iface_stat_list_lock + * + * iface_netdev_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inetaddr_event_handler() + * iface_stat_create() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * iface_inet6addr_event_handler() + * iface_stat_create_ipv6() + * iface_stat_list_lock + * iface_stat_update() + * iface_stat_list_lock + * + * qtaguid_mt() + * account_for_uid() + * if_tag_stat_update() + * get_sock_stat() + * sock_tag_list_lock + * struct iface_stat->tag_stat_list_lock + * tag_stat_update() + * get_active_counter_set() + * tag_counter_set_list_lock + * tag_stat_update() + * get_active_counter_set() + * tag_counter_set_list_lock + * + * + * qtaguid_ctrl_parse() + * ctrl_cmd_delete() + * sock_tag_list_lock + * tag_counter_set_list_lock + * iface_stat_list_lock + * struct iface_stat->tag_stat_list_lock + * uid_tag_data_tree_lock + * ctrl_cmd_counter_set() + * tag_counter_set_list_lock + * ctrl_cmd_tag() + * sock_tag_list_lock + * (sock_tag_tree) + * get_tag_ref() + * uid_tag_data_tree_lock + * (uid_tag_data_tree) + * uid_tag_data_tree_lock + * (proc_qtu_data_tree) + * ctrl_cmd_untag() + * sock_tag_list_lock + * uid_tag_data_tree_lock + * + */ +static LIST_HEAD(iface_stat_list); +static DEFINE_SPINLOCK(iface_stat_list_lock); + +static struct rb_root sock_tag_tree = RB_ROOT; +static DEFINE_SPINLOCK(sock_tag_list_lock); + +static struct rb_root tag_counter_set_tree = RB_ROOT; +static DEFINE_SPINLOCK(tag_counter_set_list_lock); + +static struct rb_root uid_tag_data_tree = RB_ROOT; +static DEFINE_SPINLOCK(uid_tag_data_tree_lock); + +static struct rb_root proc_qtu_data_tree = RB_ROOT; +/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */ + +static struct qtaguid_event_counts qtu_events; +/*----------------------------------------------*/ +static bool can_manipulate_uids(void) +{ + /* root pwnd */ + return in_egroup_p(xt_qtaguid_ctrl_file->gid) + || unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_limited) + || unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid); +} + +static bool can_impersonate_uid(uid_t uid) +{ + return uid == current_fsuid() || can_manipulate_uids(); +} + +static bool can_read_other_uid_stats(uid_t uid) +{ + /* root pwnd */ + return in_egroup_p(xt_qtaguid_stats_file->gid) + || unlikely(!current_fsuid()) || uid == current_fsuid() + || unlikely(!proc_stats_readall_limited) + || unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid); +} + +static inline void dc_add_byte_packets(struct data_counters *counters, int set, + enum ifs_tx_rx direction, + enum ifs_proto ifs_proto, + int bytes, + int packets) +{ + counters->bpc[set][direction][ifs_proto].bytes += bytes; + counters->bpc[set][direction][ifs_proto].packets += packets; +} + +static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct tag_node *data = rb_entry(node, struct tag_node, node); + int result; + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " node=%p data=%p\n", tag, node, data); + result = tag_compare(tag, data->tag); + RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): " + " data.tag=0x%llx (uid=%u) res=%d\n", + tag, data->tag, get_uid_from_tag(data->tag), result); + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct tag_node *this = rb_entry(*new, struct tag_node, + node); + int result = tag_compare(data->tag, this->tag); + RB_DEBUG("qtaguid: %s(): tag=0x%llx" + " (uid=%u)\n", __func__, + this->tag, + get_uid_from_tag(this->tag)); + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_stat, tn.node); +} + +static void tag_counter_set_tree_insert(struct tag_counter_set *data, + struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root, + tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_counter_set, tn.node); + +} + +static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root) +{ + tag_node_tree_insert(&data->tn, root); +} + +static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag) +{ + struct tag_node *node = tag_node_tree_search(root, tag); + if (!node) + return NULL; + return rb_entry(&node->node, struct tag_ref, tn.node); +} + +static struct sock_tag *sock_tag_tree_search(struct rb_root *root, + const struct sock *sk) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct sock_tag *data = rb_entry(node, struct sock_tag, + sock_node); + if (sk < data->sk) + node = node->rb_left; + else if (sk > data->sk) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct sock_tag *this = rb_entry(*new, struct sock_tag, + sock_node); + parent = *new; + if (data->sk < this->sk) + new = &((*new)->rb_left); + else if (data->sk > this->sk) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->sock_node, parent, new); + rb_insert_color(&data->sock_node, root); +} + +static void sock_tag_tree_erase(struct rb_root *st_to_free_tree) +{ + struct rb_node *node; + struct sock_tag *st_entry; + + node = rb_first(st_to_free_tree); + while (node) { + st_entry = rb_entry(node, struct sock_tag, sock_node); + node = rb_next(node); + CT_DEBUG("qtaguid: %s(): " + "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__, + st_entry->sk, + st_entry->tag, + get_uid_from_tag(st_entry->tag)); + rb_erase(&st_entry->sock_node, st_to_free_tree); + sockfd_put(st_entry->socket); + kfree(st_entry); + } +} + +static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root, + const pid_t pid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct proc_qtu_data *data = rb_entry(node, + struct proc_qtu_data, + node); + if (pid < data->pid) + node = node->rb_left; + else if (pid > data->pid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void proc_qtu_data_tree_insert(struct proc_qtu_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct proc_qtu_data *this = rb_entry(*new, + struct proc_qtu_data, + node); + parent = *new; + if (data->pid < this->pid) + new = &((*new)->rb_left); + else if (data->pid > this->pid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void uid_tag_data_tree_insert(struct uid_tag_data *data, + struct rb_root *root) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct uid_tag_data *this = rb_entry(*new, + struct uid_tag_data, + node); + parent = *new; + if (data->uid < this->uid) + new = &((*new)->rb_left); + else if (data->uid > this->uid) + new = &((*new)->rb_right); + else + BUG(); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root, + uid_t uid) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct uid_tag_data *data = rb_entry(node, + struct uid_tag_data, + node); + if (uid < data->uid) + node = node->rb_left; + else if (uid > data->uid) + node = node->rb_right; + else + return data; + } + return NULL; +} + +/* + * Allocates a new uid_tag_data struct if needed. + * Returns a pointer to the found or allocated uid_tag_data. + * Returns a PTR_ERR on failures, and lock is not held. + * If found is not NULL: + * sets *found to true if not allocated. + * sets *found to false if allocated. + */ +struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res) +{ + struct uid_tag_data *utd_entry; + + /* Look for top level uid_tag_data for the UID */ + utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid); + DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry); + + if (found_res) + *found_res = utd_entry; + if (utd_entry) + return utd_entry; + + utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC); + if (!utd_entry) { + pr_err("qtaguid: get_uid_data(%u): " + "tag data alloc failed\n", uid); + return ERR_PTR(-ENOMEM); + } + + utd_entry->uid = uid; + utd_entry->tag_ref_tree = RB_ROOT; + uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree); + DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry); + return utd_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *new_tag_ref(tag_t new_tag, + struct uid_tag_data *utd_entry) +{ + struct tag_ref *tr_entry; + int res; + + if (utd_entry->num_active_tags + 1 > max_sock_tags) { + pr_info("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc quota exceeded. max=%d\n", + new_tag, max_sock_tags); + res = -EMFILE; + goto err_res; + + } + + tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC); + if (!tr_entry) { + pr_err("qtaguid: new_tag_ref(0x%llx): " + "tag ref alloc failed\n", + new_tag); + res = -ENOMEM; + goto err_res; + } + tr_entry->tn.tag = new_tag; + /* tr_entry->num_sock_tags handled by caller */ + utd_entry->num_active_tags++; + tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: new_tag_ref(0x%llx): " + " inserted new tag ref %p\n", + new_tag, tr_entry); + return tr_entry; + +err_res: + return ERR_PTR(res); +} + +static struct tag_ref *lookup_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + bool found_utd; + uid_t uid = get_uid_from_tag(full_tag); + + DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n", + full_tag, uid); + + utd_entry = get_uid_data(uid, &found_utd); + if (IS_ERR_OR_NULL(utd_entry)) { + if (utd_res) + *utd_res = utd_entry; + return NULL; + } + + tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Never returns NULL. Either PTR_ERR or a valid ptr. */ +static struct tag_ref *get_tag_ref(tag_t full_tag, + struct uid_tag_data **utd_res) +{ + struct uid_tag_data *utd_entry; + struct tag_ref *tr_entry; + + DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n", + full_tag); + spin_lock_bh(&uid_tag_data_tree_lock); + tr_entry = lookup_tag_ref(full_tag, &utd_entry); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + if (!tr_entry) + tr_entry = new_tag_ref(full_tag, utd_entry); + + spin_unlock_bh(&uid_tag_data_tree_lock); + if (utd_res) + *utd_res = utd_entry; + DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n", + full_tag, utd_entry, tr_entry); + return tr_entry; +} + +/* Checks and maybe frees the UID Tag Data entry */ +static void put_utd_entry(struct uid_tag_data *utd_entry) +{ + /* Are we done with the UID tag data entry? */ + if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) && + !utd_entry->num_pqd) { + DR_DEBUG("qtaguid: %s(): " + "erase utd_entry=%p uid=%u " + "by pid=%u tgid=%u uid=%u\n", __func__, + utd_entry, utd_entry->uid, + current->pid, current->tgid, current_fsuid()); + BUG_ON(utd_entry->num_active_tags); + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } else { + DR_DEBUG("qtaguid: %s(): " + "utd_entry=%p still has %d tags %d proc_qtu_data\n", + __func__, utd_entry, utd_entry->num_active_tags, + utd_entry->num_pqd); + BUG_ON(!(utd_entry->num_active_tags || + utd_entry->num_pqd)); + } +} + +/* + * If no sock_tags are using this tag_ref, + * decrements refcount of utd_entry, removes tr_entry + * from utd_entry->tag_ref_tree and frees. + */ +static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry, + struct uid_tag_data *utd_entry) +{ + DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__, + tr_entry, tr_entry->tn.tag, + get_uid_from_tag(tr_entry->tn.tag)); + if (!tr_entry->num_sock_tags) { + BUG_ON(!utd_entry->num_active_tags); + utd_entry->num_active_tags--; + rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree); + DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry); + kfree(tr_entry); + } +} + +static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry) +{ + struct rb_node *node; + struct tag_ref *tr_entry; + tag_t acct_tag; + + DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__, + full_tag, get_uid_from_tag(full_tag)); + acct_tag = get_atag_from_tag(full_tag); + node = rb_first(&utd_entry->tag_ref_tree); + while (node) { + tr_entry = rb_entry(node, struct tag_ref, tn.node); + node = rb_next(node); + if (!acct_tag || tr_entry->tn.tag == full_tag) + free_tag_ref_from_utd_entry(tr_entry, utd_entry); + } +} + +static int read_proc_u64(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + uint64_t value; + char *p = page; + uint64_t *iface_entry = data; + + if (!data) + return 0; + + value = *iface_entry; + p += sprintf(p, "%llu\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int read_proc_bool(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + bool value; + char *p = page; + bool *bool_entry = data; + + if (!data) + return 0; + + value = *bool_entry; + p += sprintf(p, "%u\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int get_active_counter_set(tag_t tag) +{ + int active_set = 0; + struct tag_counter_set *tcs; + + MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)" + " (uid=%u)\n", + tag, get_uid_from_tag(tag)); + /* For now we only handle UID tags for active sets */ + tag = get_utag_from_tag(tag); + spin_lock_bh(&tag_counter_set_list_lock); + tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (tcs) + active_set = tcs->active_set; + spin_unlock_bh(&tag_counter_set_list_lock); + return active_set; +} + +/* + * Find the entry for tracking the specified interface. + * Caller must hold iface_stat_list_lock + */ +static struct iface_stat *get_iface_entry(const char *ifname) +{ + struct iface_stat *iface_entry; + + /* Find the entry for tracking the specified tag within the interface */ + if (ifname == NULL) { + pr_info("qtaguid: iface_stat: get() NULL device name\n"); + return NULL; + } + + /* Iterate over interfaces */ + list_for_each_entry(iface_entry, &iface_stat_list, list) { + if (!strcmp(ifname, iface_entry->ifname)) + goto done; + } + iface_entry = NULL; +done: + return iface_entry; +} + +/* This is for fmt2 only */ +static int pp_iface_stat_line(bool header, char *outp, + int char_count, struct iface_stat *iface_entry) +{ + int len; + if (header) { + len = snprintf(outp, char_count, + "ifname " + "total_skb_rx_bytes total_skb_rx_packets " + "total_skb_tx_bytes total_skb_tx_packets " + "rx_tcp_bytes rx_tcp_packets " + "rx_udp_bytes rx_udp_packets " + "rx_other_bytes rx_other_packets " + "tx_tcp_bytes tx_tcp_packets " + "tx_udp_bytes tx_udp_packets " + "tx_other_bytes tx_other_packets\n" + ); + } else { + struct data_counters *cnts; + int cnt_set = 0; /* We only use one set for the device */ + cnts = &iface_entry->totals_via_skb; + len = snprintf( + outp, char_count, + "%s " + "%llu %llu %llu %llu %llu %llu %llu %llu " + "%llu %llu %llu %llu %llu %llu %llu %llu\n", + iface_entry->ifname, + dc_sum_bytes(cnts, cnt_set, IFS_RX), + dc_sum_packets(cnts, cnt_set, IFS_RX), + dc_sum_bytes(cnts, cnt_set, IFS_TX), + dc_sum_packets(cnts, cnt_set, IFS_TX), + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets); + } + return len; +} + +static int iface_stat_fmt_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, + int *eof, void *data) +{ + char *outp = page; + int item_index = 0; + int len; + int fmt = (int)data; /* The data is just 1 (old) or 2 (uses fmt) */ + struct iface_stat *iface_entry; + struct rtnl_link_stats64 dev_stats, *stats; + struct rtnl_link_stats64 no_dev_stats = {0}; + + if (unlikely(module_passive)) { + *eof = 1; + return 0; + } + + CT_DEBUG("qtaguid:proc iface_stat_fmt " + "pid=%u tgid=%u uid=%u " + "page=%p *num_items_returned=%p off=%ld " + "char_count=%d *eof=%d\n", + current->pid, current->tgid, current_fsuid(), + page, *num_items_returned, + items_to_skip, char_count, *eof); + + if (*eof) + return 0; + + if (fmt == 2 && item_index++ >= items_to_skip) { + len = pp_iface_stat_line(true, outp, char_count, NULL); + if (len >= char_count) { + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + + /* + * This lock will prevent iface_stat_update() from changing active, + * and in turn prevent an interface from unregistering itself. + */ + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(iface_entry, &iface_stat_list, list) { + if (item_index++ < items_to_skip) + continue; + + if (iface_entry->active) { + stats = dev_get_stats(iface_entry->net_dev, + &dev_stats); + } else { + stats = &no_dev_stats; + } + /* + * If the meaning of the data changes, then update the fmtX + * string. + */ + if (fmt == 1) { + len = snprintf( + outp, char_count, + "%s %d " + "%llu %llu %llu %llu " + "%llu %llu %llu %llu\n", + iface_entry->ifname, + iface_entry->active, + iface_entry->totals_via_dev[IFS_RX].bytes, + iface_entry->totals_via_dev[IFS_RX].packets, + iface_entry->totals_via_dev[IFS_TX].bytes, + iface_entry->totals_via_dev[IFS_TX].packets, + stats->rx_bytes, stats->rx_packets, + stats->tx_bytes, stats->tx_packets + ); + } else { + len = pp_iface_stat_line(false, outp, char_count, + iface_entry); + } + if (len >= char_count) { + spin_unlock_bh(&iface_stat_list_lock); + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + spin_unlock_bh(&iface_stat_list_lock); + + *eof = 1; + return outp - page; +} + +static void iface_create_proc_worker(struct work_struct *work) +{ + struct proc_dir_entry *proc_entry; + struct iface_stat_work *isw = container_of(work, struct iface_stat_work, + iface_work); + struct iface_stat *new_iface = isw->iface_entry; + + /* iface_entries are not deleted, so safe to manipulate. */ + proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir); + if (IS_ERR_OR_NULL(proc_entry)) { + pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n"); + kfree(isw); + return; + } + + new_iface->proc_ptr = proc_entry; + + create_proc_read_entry("tx_bytes", proc_iface_perms, proc_entry, + read_proc_u64, + &new_iface->totals_via_dev[IFS_TX].bytes); + create_proc_read_entry("rx_bytes", proc_iface_perms, proc_entry, + read_proc_u64, + &new_iface->totals_via_dev[IFS_RX].bytes); + create_proc_read_entry("tx_packets", proc_iface_perms, proc_entry, + read_proc_u64, + &new_iface->totals_via_dev[IFS_TX].packets); + create_proc_read_entry("rx_packets", proc_iface_perms, proc_entry, + read_proc_u64, + &new_iface->totals_via_dev[IFS_RX].packets); + create_proc_read_entry("active", proc_iface_perms, proc_entry, + read_proc_bool, &new_iface->active); + + IF_DEBUG("qtaguid: iface_stat: create_proc(): done " + "entry=%p dev=%s\n", new_iface, new_iface->ifname); + kfree(isw); +} + +/* + * Will set the entry's active state, and + * update the net_dev accordingly also. + */ +static void _iface_stat_set_active(struct iface_stat *entry, + struct net_device *net_dev, + bool activate) +{ + if (activate) { + entry->net_dev = net_dev; + entry->active = true; + IF_DEBUG("qtaguid: %s(%s): " + "enable tracking. rfcnt=%d\n", __func__, + entry->ifname, + percpu_read(*net_dev->pcpu_refcnt)); + } else { + entry->active = false; + entry->net_dev = NULL; + IF_DEBUG("qtaguid: %s(%s): " + "disable tracking. rfcnt=%d\n", __func__, + entry->ifname, + percpu_read(*net_dev->pcpu_refcnt)); + + } +} + +/* Caller must hold iface_stat_list_lock */ +static struct iface_stat *iface_alloc(struct net_device *net_dev) +{ + struct iface_stat *new_iface; + struct iface_stat_work *isw; + + new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC); + if (new_iface == NULL) { + pr_err("qtaguid: iface_stat: create(%s): " + "iface_stat alloc failed\n", net_dev->name); + return NULL; + } + new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC); + if (new_iface->ifname == NULL) { + pr_err("qtaguid: iface_stat: create(%s): " + "ifname alloc failed\n", net_dev->name); + kfree(new_iface); + return NULL; + } + spin_lock_init(&new_iface->tag_stat_list_lock); + new_iface->tag_stat_tree = RB_ROOT; + _iface_stat_set_active(new_iface, net_dev, true); + + /* + * ipv6 notifier chains are atomic :( + * No create_proc_read_entry() for you! + */ + isw = kmalloc(sizeof(*isw), GFP_ATOMIC); + if (!isw) { + pr_err("qtaguid: iface_stat: create(%s): " + "work alloc failed\n", new_iface->ifname); + _iface_stat_set_active(new_iface, net_dev, false); + kfree(new_iface->ifname); + kfree(new_iface); + return NULL; + } + isw->iface_entry = new_iface; + INIT_WORK(&isw->iface_work, iface_create_proc_worker); + schedule_work(&isw->iface_work); + list_add(&new_iface->list, &iface_stat_list); + return new_iface; +} + +static void iface_check_stats_reset_and_adjust(struct net_device *net_dev, + struct iface_stat *iface) +{ + struct rtnl_link_stats64 dev_stats, *stats; + bool stats_rewound; + + stats = dev_get_stats(net_dev, &dev_stats); + /* No empty packets */ + stats_rewound = + (stats->rx_bytes < iface->last_known[IFS_RX].bytes) + || (stats->tx_bytes < iface->last_known[IFS_TX].bytes); + + IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p " + "bytes rx/tx=%llu/%llu " + "active=%d last_known=%d " + "stats_rewound=%d\n", __func__, + net_dev ? net_dev->name : "?", + iface, net_dev, + stats->rx_bytes, stats->tx_bytes, + iface->active, iface->last_known_valid, stats_rewound); + + if (iface->active && iface->last_known_valid && stats_rewound) { + pr_warn_once("qtaguid: iface_stat: %s(%s): " + "iface reset its stats unexpectedly\n", __func__, + net_dev->name); + + iface->totals_via_dev[IFS_TX].bytes += + iface->last_known[IFS_TX].bytes; + iface->totals_via_dev[IFS_TX].packets += + iface->last_known[IFS_TX].packets; + iface->totals_via_dev[IFS_RX].bytes += + iface->last_known[IFS_RX].bytes; + iface->totals_via_dev[IFS_RX].packets += + iface->last_known[IFS_RX].packets; + iface->last_known_valid = false; + IF_DEBUG("qtaguid: %s(%s): iface=%p " + "used last known bytes rx/tx=%llu/%llu\n", __func__, + iface->ifname, iface, iface->last_known[IFS_RX].bytes, + iface->last_known[IFS_TX].bytes); + } +} + +/* + * Create a new entry for tracking the specified interface. + * Do nothing if the entry already exists. + * Called when an interface is configured with a valid IP address. + */ +static void iface_stat_create(struct net_device *net_dev, + struct in_ifaddr *ifa) +{ + struct in_device *in_dev = NULL; + const char *ifname; + struct iface_stat *entry; + __be32 ipaddr = 0; + struct iface_stat *new_iface; + + IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n", + net_dev ? net_dev->name : "?", + ifa, net_dev); + if (!net_dev) { + pr_err("qtaguid: iface_stat: create(): no net dev\n"); + return; + } + + ifname = net_dev->name; + if (!ifa) { + in_dev = in_dev_get(net_dev); + if (!in_dev) { + pr_err("qtaguid: iface_stat: create(%s): no inet dev\n", + ifname); + return; + } + IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n", + ifname, in_dev); + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + IF_DEBUG("qtaguid: iface_stat: create(%s): " + "ifa=%p ifa_label=%s\n", + ifname, ifa, + ifa->ifa_label ? ifa->ifa_label : "(null)"); + if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label)) + break; + } + } + + if (!ifa) { + IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n", + ifname); + goto done_put; + } + ipaddr = ifa->ifa_local; + + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(ifname); + if (entry != NULL) { + IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n", + ifname, entry); + iface_check_stats_reset_and_adjust(net_dev, entry); + _iface_stat_set_active(entry, net_dev, true); + IF_DEBUG("qtaguid: %s(%s): " + "tracking now %d on ip=%pI4\n", __func__, + entry->ifname, true, &ipaddr); + goto done_unlock_put; + } + + new_iface = iface_alloc(net_dev); + IF_DEBUG("qtaguid: iface_stat: create(%s): done " + "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr); +done_unlock_put: + spin_unlock_bh(&iface_stat_list_lock); +done_put: + if (in_dev) + in_dev_put(in_dev); +} + +static void iface_stat_create_ipv6(struct net_device *net_dev, + struct inet6_ifaddr *ifa) +{ + struct in_device *in_dev; + const char *ifname; + struct iface_stat *entry; + struct iface_stat *new_iface; + int addr_type; + + IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n", + ifa, net_dev, net_dev ? net_dev->name : ""); + if (!net_dev) { + pr_err("qtaguid: iface_stat: create6(): no net dev!\n"); + return; + } + ifname = net_dev->name; + + in_dev = in_dev_get(net_dev); + if (!in_dev) { + pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n", + ifname); + return; + } + + IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n", + ifname, in_dev); + + if (!ifa) { + IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n", + ifname); + goto done_put; + } + addr_type = ipv6_addr_type(&ifa->addr); + + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(ifname); + if (entry != NULL) { + IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__, + ifname, entry); + iface_check_stats_reset_and_adjust(net_dev, entry); + _iface_stat_set_active(entry, net_dev, true); + IF_DEBUG("qtaguid: %s(%s): " + "tracking now %d on ip=%pI6c\n", __func__, + entry->ifname, true, &ifa->addr); + goto done_unlock_put; + } + + new_iface = iface_alloc(net_dev); + IF_DEBUG("qtaguid: iface_stat: create6(%s): done " + "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr); + +done_unlock_put: + spin_unlock_bh(&iface_stat_list_lock); +done_put: + in_dev_put(in_dev); +} + +static struct sock_tag *get_sock_stat_nl(const struct sock *sk) +{ + MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk); + return sock_tag_tree_search(&sock_tag_tree, sk); +} + +static struct sock_tag *get_sock_stat(const struct sock *sk) +{ + struct sock_tag *sock_tag_entry; + MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk); + if (!sk) + return NULL; + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(sk); + spin_unlock_bh(&sock_tag_list_lock); + return sock_tag_entry; +} + +static int ipx_proto(const struct sk_buff *skb, + struct xt_action_param *par) +{ + int thoff, tproto; + + switch (par->family) { + case NFPROTO_IPV6: + tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); + if (tproto < 0) + MT_DEBUG("%s(): transport header not found in ipv6" + " skb=%p\n", __func__, skb); + break; + case NFPROTO_IPV4: + tproto = ip_hdr(skb)->protocol; + break; + default: + tproto = IPPROTO_RAW; + } + return tproto; +} + +static void +data_counters_update(struct data_counters *dc, int set, + enum ifs_tx_rx direction, int proto, int bytes) +{ + switch (proto) { + case IPPROTO_TCP: + dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1); + break; + case IPPROTO_UDP: + dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1); + break; + case IPPROTO_IP: + default: + dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes, + 1); + break; + } +} + +/* + * Update stats for the specified interface. Do nothing if the entry + * does not exist (when a device was never configured with an IP address). + * Called when an device is being unregistered. + */ +static void iface_stat_update(struct net_device *net_dev, bool stash_only) +{ + struct rtnl_link_stats64 dev_stats, *stats; + struct iface_stat *entry; + + stats = dev_get_stats(net_dev, &dev_stats); + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(net_dev->name); + if (entry == NULL) { + IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n", + net_dev->name); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + + IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__, + net_dev->name, entry); + if (!entry->active) { + IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__, + net_dev->name); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + + if (stash_only) { + entry->last_known[IFS_TX].bytes = stats->tx_bytes; + entry->last_known[IFS_TX].packets = stats->tx_packets; + entry->last_known[IFS_RX].bytes = stats->rx_bytes; + entry->last_known[IFS_RX].packets = stats->rx_packets; + entry->last_known_valid = true; + IF_DEBUG("qtaguid: %s(%s): " + "dev stats stashed rx/tx=%llu/%llu\n", __func__, + net_dev->name, stats->rx_bytes, stats->tx_bytes); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes; + entry->totals_via_dev[IFS_TX].packets += stats->tx_packets; + entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes; + entry->totals_via_dev[IFS_RX].packets += stats->rx_packets; + /* We don't need the last_known[] anymore */ + entry->last_known_valid = false; + _iface_stat_set_active(entry, net_dev, false); + IF_DEBUG("qtaguid: %s(%s): " + "disable tracking. rx/tx=%llu/%llu\n", __func__, + net_dev->name, stats->rx_bytes, stats->tx_bytes); + spin_unlock_bh(&iface_stat_list_lock); +} + +/* + * Update stats for the specified interface from the skb. + * Do nothing if the entry + * does not exist (when a device was never configured with an IP address). + * Called on each sk. + */ +static void iface_stat_update_from_skb(const struct sk_buff *skb, + struct xt_action_param *par) +{ + struct iface_stat *entry; + const struct net_device *el_dev; + enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX; + int bytes = skb->len; + int proto; + + if (!skb->dev) { + MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum); + el_dev = par->in ? : par->out; + } else { + const struct net_device *other_dev; + el_dev = skb->dev; + other_dev = par->in ? : par->out; + if (el_dev != other_dev) { + MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs " + "par->(in/out)=%p %s\n", + par->hooknum, el_dev, el_dev->name, other_dev, + other_dev->name); + } + } + + if (unlikely(!el_dev)) { + pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n", + par->hooknum, __func__); + BUG(); + } else if (unlikely(!el_dev->name)) { + pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n", + par->hooknum, __func__); + BUG(); + } else { + proto = ipx_proto(skb, par); + MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n", + par->hooknum, el_dev->name, el_dev->type, + par->family, proto); + } + + spin_lock_bh(&iface_stat_list_lock); + entry = get_iface_entry(el_dev->name); + if (entry == NULL) { + IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n", + __func__, el_dev->name); + spin_unlock_bh(&iface_stat_list_lock); + return; + } + + IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__, + el_dev->name, entry); + + data_counters_update(&entry->totals_via_skb, 0, direction, proto, + bytes); + spin_unlock_bh(&iface_stat_list_lock); +} + +static void tag_stat_update(struct tag_stat *tag_entry, + enum ifs_tx_rx direction, int proto, int bytes) +{ + int active_set; + active_set = get_active_counter_set(tag_entry->tn.tag); + MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d " + "dir=%d proto=%d bytes=%d)\n", + tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag), + active_set, direction, proto, bytes); + data_counters_update(&tag_entry->counters, active_set, direction, + proto, bytes); + if (tag_entry->parent_counters) + data_counters_update(tag_entry->parent_counters, active_set, + direction, proto, bytes); +} + +/* + * Create a new entry for tracking the specified {acct_tag,uid_tag} within + * the interface. + * iface_entry->tag_stat_list_lock should be held. + */ +static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry, + tag_t tag) +{ + struct tag_stat *new_tag_stat_entry = NULL; + IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx" + " (uid=%u)\n", __func__, + iface_entry, tag, get_uid_from_tag(tag)); + new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC); + if (!new_tag_stat_entry) { + pr_err("qtaguid: iface_stat: tag stat alloc failed\n"); + goto done; + } + new_tag_stat_entry->tn.tag = tag; + tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree); +done: + return new_tag_stat_entry; +} + +static void if_tag_stat_update(const char *ifname, uid_t uid, + const struct sock *sk, enum ifs_tx_rx direction, + int proto, int bytes) +{ + struct tag_stat *tag_stat_entry; + tag_t tag, acct_tag; + tag_t uid_tag; + struct data_counters *uid_tag_counters; + struct sock_tag *sock_tag_entry; + struct iface_stat *iface_entry; + struct tag_stat *new_tag_stat = NULL; + MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s " + "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n", + ifname, uid, sk, direction, proto, bytes); + + + iface_entry = get_iface_entry(ifname); + if (!iface_entry) { + pr_err_ratelimited("qtaguid: iface_stat: stat_update() " + "%s not found\n", ifname); + return; + } + /* It is ok to process data when an iface_entry is inactive */ + + MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n", + ifname, iface_entry); + + /* + * Look for a tagged sock. + * It will have an acct_uid. + */ + sock_tag_entry = get_sock_stat(sk); + if (sock_tag_entry) { + tag = sock_tag_entry->tag; + acct_tag = get_atag_from_tag(tag); + uid_tag = get_utag_from_tag(tag); + } else { + acct_tag = make_atag_from_value(0); + tag = combine_atag_with_uid(acct_tag, uid); + uid_tag = make_tag_from_uid(uid); + } + MT_DEBUG("qtaguid: iface_stat: stat_update(): " + " looking for tag=0x%llx (uid=%u) in ife=%p\n", + tag, get_uid_from_tag(tag), iface_entry); + /* Loop over tag list under this interface for {acct_tag,uid_tag} */ + spin_lock_bh(&iface_entry->tag_stat_list_lock); + + tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree, + tag); + if (tag_stat_entry) { + /* + * Updating the {acct_tag, uid_tag} entry handles both stats: + * {0, uid_tag} will also get updated. + */ + tag_stat_update(tag_stat_entry, direction, proto, bytes); + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + return; + } + + /* Loop over tag list under this interface for {0,uid_tag} */ + tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree, + uid_tag); + if (!tag_stat_entry) { + /* Here: the base uid_tag did not exist */ + /* + * No parent counters. So + * - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats. + */ + new_tag_stat = create_if_tag_stat(iface_entry, uid_tag); + if (!new_tag_stat) + goto unlock; + uid_tag_counters = &new_tag_stat->counters; + } else { + uid_tag_counters = &tag_stat_entry->counters; + } + + if (acct_tag) { + /* Create the child {acct_tag, uid_tag} and hook up parent. */ + new_tag_stat = create_if_tag_stat(iface_entry, tag); + if (!new_tag_stat) + goto unlock; + new_tag_stat->parent_counters = uid_tag_counters; + } else { + /* + * For new_tag_stat to be still NULL here would require: + * {0, uid_tag} exists + * and {acct_tag, uid_tag} doesn't exist + * AND acct_tag == 0. + * Impossible. This reassures us that new_tag_stat + * below will always be assigned. + */ + BUG_ON(!new_tag_stat); + } + tag_stat_update(new_tag_stat, direction, proto, bytes); +unlock: + spin_unlock_bh(&iface_entry->tag_stat_list_lock); +} + +static int iface_netdev_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) { + struct net_device *dev = ptr; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: netdev_event(): " + "ev=0x%lx/%s netdev=%p->name=%s\n", + event, netdev_evt_str(event), dev, dev ? dev->name : ""); + + switch (event) { + case NETDEV_UP: + iface_stat_create(dev, NULL); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static int iface_inet6addr_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = ptr; + struct net_device *dev; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): " + "ev=0x%lx/%s ifa=%p\n", + event, netdev_evt_str(event), ifa); + + switch (event) { + case NETDEV_UP: + BUG_ON(!ifa || !ifa->idev); + dev = (struct net_device *)ifa->idev->dev; + iface_stat_create_ipv6(dev, ifa); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + BUG_ON(!ifa || !ifa->idev); + dev = (struct net_device *)ifa->idev->dev; + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static int iface_inetaddr_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *dev; + + if (unlikely(module_passive)) + return NOTIFY_DONE; + + IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): " + "ev=0x%lx/%s ifa=%p\n", + event, netdev_evt_str(event), ifa); + + switch (event) { + case NETDEV_UP: + BUG_ON(!ifa || !ifa->ifa_dev); + dev = ifa->ifa_dev->dev; + iface_stat_create(dev, ifa); + atomic64_inc(&qtu_events.iface_events); + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + BUG_ON(!ifa || !ifa->ifa_dev); + dev = ifa->ifa_dev->dev; + iface_stat_update(dev, event == NETDEV_DOWN); + atomic64_inc(&qtu_events.iface_events); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block iface_netdev_notifier_blk = { + .notifier_call = iface_netdev_event_handler, +}; + +static struct notifier_block iface_inetaddr_notifier_blk = { + .notifier_call = iface_inetaddr_event_handler, +}; + +static struct notifier_block iface_inet6addr_notifier_blk = { + .notifier_call = iface_inet6addr_event_handler, +}; + +static int __init iface_stat_init(struct proc_dir_entry *parent_procdir) +{ + int err; + + iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir); + if (!iface_stat_procdir) { + pr_err("qtaguid: iface_stat: init failed to create proc entry\n"); + err = -1; + goto err; + } + + iface_stat_all_procfile = create_proc_entry(iface_stat_all_procfilename, + proc_iface_perms, + parent_procdir); + if (!iface_stat_all_procfile) { + pr_err("qtaguid: iface_stat: init " + " failed to create stat_old proc entry\n"); + err = -1; + goto err_zap_entry; + } + iface_stat_all_procfile->read_proc = iface_stat_fmt_proc_read; + iface_stat_all_procfile->data = (void *)1; /* fmt1 */ + + iface_stat_fmt_procfile = create_proc_entry(iface_stat_fmt_procfilename, + proc_iface_perms, + parent_procdir); + if (!iface_stat_fmt_procfile) { + pr_err("qtaguid: iface_stat: init " + " failed to create stat_all proc entry\n"); + err = -1; + goto err_zap_all_stats_entry; + } + iface_stat_fmt_procfile->read_proc = iface_stat_fmt_proc_read; + iface_stat_fmt_procfile->data = (void *)2; /* fmt2 */ + + + err = register_netdevice_notifier(&iface_netdev_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register dev event handler\n"); + goto err_zap_all_stats_entries; + } + err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register ipv4 dev event handler\n"); + goto err_unreg_nd; + } + + err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk); + if (err) { + pr_err("qtaguid: iface_stat: init " + "failed to register ipv6 dev event handler\n"); + goto err_unreg_ip4_addr; + } + return 0; + +err_unreg_ip4_addr: + unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk); +err_unreg_nd: + unregister_netdevice_notifier(&iface_netdev_notifier_blk); +err_zap_all_stats_entries: + remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir); +err_zap_all_stats_entry: + remove_proc_entry(iface_stat_all_procfilename, parent_procdir); +err_zap_entry: + remove_proc_entry(iface_stat_procdirname, parent_procdir); +err: + return err; +} + +static struct sock *qtaguid_find_sk(const struct sk_buff *skb, + struct xt_action_param *par) +{ + struct sock *sk; + unsigned int hook_mask = (1 << par->hooknum); + + MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb, + par->hooknum, par->family); + + /* + * Let's not abuse the the xt_socket_get*_sk(), or else it will + * return garbage SKs. + */ + if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS)) + return NULL; + + switch (par->family) { + case NFPROTO_IPV6: + sk = xt_socket_get6_sk(skb, par); + break; + case NFPROTO_IPV4: + sk = xt_socket_get4_sk(skb, par); + break; + default: + return NULL; + } + + /* + * Seems to be issues on the file ptr for TCP_TIME_WAIT SKs. + * http://kerneltrap.org/mailarchive/linux-netdev/2010/10/21/6287959 + * Not fixed in 3.0-r3 :( + */ + if (sk) { + MT_DEBUG("qtaguid: %p->sk_proto=%u " + "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state); + if (sk->sk_state == TCP_TIME_WAIT) { + xt_socket_put_sk(sk); + sk = NULL; + } + } + return sk; +} + +static void account_for_uid(const struct sk_buff *skb, + const struct sock *alternate_sk, uid_t uid, + struct xt_action_param *par) +{ + const struct net_device *el_dev; + + if (!skb->dev) { + MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum); + el_dev = par->in ? : par->out; + } else { + const struct net_device *other_dev; + el_dev = skb->dev; + other_dev = par->in ? : par->out; + if (el_dev != other_dev) { + MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs " + "par->(in/out)=%p %s\n", + par->hooknum, el_dev, el_dev->name, other_dev, + other_dev->name); + } + } + + if (unlikely(!el_dev)) { + pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum); + } else if (unlikely(!el_dev->name)) { + pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum); + } else { + int proto = ipx_proto(skb, par); + MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n", + par->hooknum, el_dev->name, el_dev->type, + par->family, proto); + + if_tag_stat_update(el_dev->name, uid, + skb->sk ? skb->sk : alternate_sk, + par->in ? IFS_RX : IFS_TX, + proto, skb->len); + } +} + +static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) +{ + const struct xt_qtaguid_match_info *info = par->matchinfo; + const struct file *filp; + bool got_sock = false; + struct sock *sk; + uid_t sock_uid; + bool res; + + if (unlikely(module_passive)) + return (info->match ^ info->invert) == 0; + + MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n", + par->hooknum, skb, par->in, par->out, par->family); + + atomic64_inc(&qtu_events.match_calls); + if (skb == NULL) { + res = (info->match ^ info->invert) == 0; + goto ret_res; + } + + switch (par->hooknum) { + case NF_INET_PRE_ROUTING: + case NF_INET_POST_ROUTING: + atomic64_inc(&qtu_events.match_calls_prepost); + iface_stat_update_from_skb(skb, par); + /* + * We are done in pre/post. The skb will get processed + * further alter. + */ + res = (info->match ^ info->invert); + goto ret_res; + break; + /* default: Fall through and do UID releated work */ + } + + sk = skb->sk; + if (sk == NULL) { + /* + * A missing sk->sk_socket happens when packets are in-flight + * and the matching socket is already closed and gone. + */ + sk = qtaguid_find_sk(skb, par); + /* + * If we got the socket from the find_sk(), we will need to put + * it back, as nf_tproxy_get_sock_v4() got it. + */ + got_sock = sk; + if (sk) + atomic64_inc(&qtu_events.match_found_sk_in_ct); + else + atomic64_inc(&qtu_events.match_found_no_sk_in_ct); + } else { + atomic64_inc(&qtu_events.match_found_sk); + } + MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n", + par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par)); + if (sk != NULL) { + MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n", + par->hooknum, sk, sk->sk_socket, + sk->sk_socket ? sk->sk_socket->file : (void *)-1LL); + filp = sk->sk_socket ? sk->sk_socket->file : NULL; + MT_DEBUG("qtaguid[%d]: filp...uid=%u\n", + par->hooknum, filp ? filp->f_cred->fsuid : -1); + } + + if (sk == NULL || sk->sk_socket == NULL) { + /* + * Here, the qtaguid_find_sk() using connection tracking + * couldn't find the owner, so for now we just count them + * against the system. + */ + /* + * TODO: unhack how to force just accounting. + * For now we only do iface stats when the uid-owner is not + * requested. + */ + if (!(info->match & XT_QTAGUID_UID)) + account_for_uid(skb, sk, 0, par); + MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n", + par->hooknum, + sk ? sk->sk_socket : NULL); + res = (info->match ^ info->invert) == 0; + atomic64_inc(&qtu_events.match_no_sk); + goto put_sock_ret_res; + } else if (info->match & info->invert & XT_QTAGUID_SOCKET) { + res = false; + goto put_sock_ret_res; + } + filp = sk->sk_socket->file; + if (filp == NULL) { + MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum); + account_for_uid(skb, sk, 0, par); + res = ((info->match ^ info->invert) & + (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0; + atomic64_inc(&qtu_events.match_no_sk_file); + goto put_sock_ret_res; + } + sock_uid = filp->f_cred->fsuid; + /* + * TODO: unhack how to force just accounting. + * For now we only do iface stats when the uid-owner is not requested + */ + if (!(info->match & XT_QTAGUID_UID)) + account_for_uid(skb, sk, sock_uid, par); + + /* + * The following two tests fail the match when: + * id not in range AND no inverted condition requested + * or id in range AND inverted condition requested + * Thus (!a && b) || (a && !b) == a ^ b + */ + if (info->match & XT_QTAGUID_UID) + if ((filp->f_cred->fsuid >= info->uid_min && + filp->f_cred->fsuid <= info->uid_max) ^ + !(info->invert & XT_QTAGUID_UID)) { + MT_DEBUG("qtaguid[%d]: leaving uid not matching\n", + par->hooknum); + res = false; + goto put_sock_ret_res; + } + if (info->match & XT_QTAGUID_GID) + if ((filp->f_cred->fsgid >= info->gid_min && + filp->f_cred->fsgid <= info->gid_max) ^ + !(info->invert & XT_QTAGUID_GID)) { + MT_DEBUG("qtaguid[%d]: leaving gid not matching\n", + par->hooknum); + res = false; + goto put_sock_ret_res; + } + + MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum); + res = true; + +put_sock_ret_res: + if (got_sock) + xt_socket_put_sk(sk); +ret_res: + MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res); + return res; +} + +#ifdef DDEBUG +/* This function is not in xt_qtaguid_print.c because of locks visibility */ +static void prdebug_full_state(int indent_level, const char *fmt, ...) +{ + va_list args; + char *fmt_buff; + char *buff; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + fmt_buff = kasprintf(GFP_ATOMIC, + "qtaguid: %s(): %s {\n", __func__, fmt); + BUG_ON(!fmt_buff); + va_start(args, fmt); + buff = kvasprintf(GFP_ATOMIC, + fmt_buff, args); + BUG_ON(!buff); + pr_debug("%s", buff); + kfree(fmt_buff); + kfree(buff); + va_end(args); + + spin_lock_bh(&sock_tag_list_lock); + prdebug_sock_tag_tree(indent_level, &sock_tag_tree); + spin_unlock_bh(&sock_tag_list_lock); + + spin_lock_bh(&sock_tag_list_lock); + spin_lock_bh(&uid_tag_data_tree_lock); + prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree); + prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree); + spin_unlock_bh(&uid_tag_data_tree_lock); + spin_unlock_bh(&sock_tag_list_lock); + + spin_lock_bh(&iface_stat_list_lock); + prdebug_iface_stat_list(indent_level, &iface_stat_list); + spin_unlock_bh(&iface_stat_list_lock); + + pr_debug("qtaguid: %s(): }\n", __func__); +} +#else +static void prdebug_full_state(int indent_level, const char *fmt, ...) {} +#endif + +/* + * Procfs reader to get all active socket tags using style "1)" as described in + * fs/proc/generic.c + */ +static int qtaguid_ctrl_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, int *eof, + void *data) +{ + char *outp = page; + int len; + uid_t uid; + struct rb_node *node; + struct sock_tag *sock_tag_entry; + int item_index = 0; + int indent_level = 0; + long f_count; + + if (unlikely(module_passive)) { + *eof = 1; + return 0; + } + + if (*eof) + return 0; + + CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u " + "page=%p off=%ld char_count=%d *eof=%d\n", + current->pid, current->tgid, current_fsuid(), + page, items_to_skip, char_count, *eof); + + spin_lock_bh(&sock_tag_list_lock); + for (node = rb_first(&sock_tag_tree); + node; + node = rb_next(node)) { + if (item_index++ < items_to_skip) + continue; + sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); + uid = get_uid_from_tag(sock_tag_entry->tag); + CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) " + "pid=%u\n", + sock_tag_entry->sk, + sock_tag_entry->tag, + uid, + sock_tag_entry->pid + ); + f_count = atomic_long_read( + &sock_tag_entry->socket->file->f_count); + len = snprintf(outp, char_count, + "sock=%p tag=0x%llx (uid=%u) pid=%u " + "f_count=%lu\n", + sock_tag_entry->sk, + sock_tag_entry->tag, uid, + sock_tag_entry->pid, f_count); + if (len >= char_count) { + spin_unlock_bh(&sock_tag_list_lock); + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + spin_unlock_bh(&sock_tag_list_lock); + + if (item_index++ >= items_to_skip) { + len = snprintf(outp, char_count, + "events: sockets_tagged=%llu " + "sockets_untagged=%llu " + "counter_set_changes=%llu " + "delete_cmds=%llu " + "iface_events=%llu " + "match_calls=%llu " + "match_calls_prepost=%llu " + "match_found_sk=%llu " + "match_found_sk_in_ct=%llu " + "match_found_no_sk_in_ct=%llu " + "match_no_sk=%llu " + "match_no_sk_file=%llu\n", + atomic64_read(&qtu_events.sockets_tagged), + atomic64_read(&qtu_events.sockets_untagged), + atomic64_read(&qtu_events.counter_set_changes), + atomic64_read(&qtu_events.delete_cmds), + atomic64_read(&qtu_events.iface_events), + atomic64_read(&qtu_events.match_calls), + atomic64_read(&qtu_events.match_calls_prepost), + atomic64_read(&qtu_events.match_found_sk), + atomic64_read(&qtu_events.match_found_sk_in_ct), + atomic64_read( + &qtu_events.match_found_no_sk_in_ct), + atomic64_read(&qtu_events.match_no_sk), + atomic64_read(&qtu_events.match_no_sk_file)); + if (len >= char_count) { + *outp = '\0'; + return outp - page; + } + outp += len; + char_count -= len; + (*num_items_returned)++; + } + + /* Count the following as part of the last item_index */ + if (item_index > items_to_skip) { + prdebug_full_state(indent_level, "proc ctrl"); + } + + *eof = 1; + return outp - page; +} + +/* + * Delete socket tags, and stat tags associated with a given + * accouting tag and uid. + */ +static int ctrl_cmd_delete(const char *input) +{ + char cmd; + uid_t uid; + uid_t entry_uid; + tag_t acct_tag; + tag_t tag; + int res, argc; + struct iface_stat *iface_entry; + struct rb_node *node; + struct sock_tag *st_entry; + struct rb_root st_to_free_tree = RB_ROOT; + struct tag_stat *ts_entry; + struct tag_counter_set *tcs_entry; + struct tag_ref *tr_entry; + struct uid_tag_data *utd_entry; + + argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid); + CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c " + "user_tag=0x%llx uid=%u\n", input, argc, cmd, + acct_tag, uid); + if (argc < 2) { + res = -EINVAL; + goto err; + } + if (!valid_atag(acct_tag)) { + pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input); + res = -EINVAL; + goto err; + } + if (argc < 3) { + uid = current_fsuid(); + } else if (!can_impersonate_uid(uid)) { + pr_info("qtaguid: ctrl_delete(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err; + } + + tag = combine_atag_with_uid(acct_tag, uid); + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "looking for tag=0x%llx (uid=%u)\n", + input, tag, uid); + + /* Delete socket tags */ + spin_lock_bh(&sock_tag_list_lock); + node = rb_first(&sock_tag_tree); + while (node) { + st_entry = rb_entry(node, struct sock_tag, sock_node); + entry_uid = get_uid_from_tag(st_entry->tag); + node = rb_next(node); + if (entry_uid != uid) + continue; + + CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n", + input, st_entry->tag, entry_uid); + + if (!acct_tag || st_entry->tag == tag) { + rb_erase(&st_entry->sock_node, &sock_tag_tree); + /* Can't sockfd_put() within spinlock, do it later. */ + sock_tag_tree_insert(st_entry, &st_to_free_tree); + tr_entry = lookup_tag_ref(st_entry->tag, NULL); + BUG_ON(tr_entry->num_sock_tags <= 0); + tr_entry->num_sock_tags--; + /* + * TODO: remove if, and start failing. + * This is a hack to work around the fact that in some + * places we have "if (IS_ERR_OR_NULL(pqd_entry))" + * and are trying to work around apps + * that didn't open the /dev/xt_qtaguid. + */ + if (st_entry->list.next && st_entry->list.prev) + list_del(&st_entry->list); + } + } + spin_unlock_bh(&sock_tag_list_lock); + + sock_tag_tree_erase(&st_to_free_tree); + + /* Delete tag counter-sets */ + spin_lock_bh(&tag_counter_set_list_lock); + /* Counter sets are only on the uid tag, not full tag */ + tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (tcs_entry) { + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "erase tcs: tag=0x%llx (uid=%u) set=%d\n", + input, + tcs_entry->tn.tag, + get_uid_from_tag(tcs_entry->tn.tag), + tcs_entry->active_set); + rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree); + kfree(tcs_entry); + } + spin_unlock_bh(&tag_counter_set_list_lock); + + /* + * If acct_tag is 0, then all entries belonging to uid are + * erased. + */ + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(iface_entry, &iface_stat_list, list) { + spin_lock_bh(&iface_entry->tag_stat_list_lock); + node = rb_first(&iface_entry->tag_stat_tree); + while (node) { + ts_entry = rb_entry(node, struct tag_stat, tn.node); + entry_uid = get_uid_from_tag(ts_entry->tn.tag); + node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "ts tag=0x%llx (uid=%u)\n", + input, ts_entry->tn.tag, entry_uid); + + if (entry_uid != uid) + continue; + if (!acct_tag || ts_entry->tn.tag == tag) { + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "erase ts: %s 0x%llx %u\n", + input, iface_entry->ifname, + get_atag_from_tag(ts_entry->tn.tag), + entry_uid); + rb_erase(&ts_entry->tn.node, + &iface_entry->tag_stat_tree); + kfree(ts_entry); + } + } + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + } + spin_unlock_bh(&iface_stat_list_lock); + + /* Cleanup the uid_tag_data */ + spin_lock_bh(&uid_tag_data_tree_lock); + node = rb_first(&uid_tag_data_tree); + while (node) { + utd_entry = rb_entry(node, struct uid_tag_data, node); + entry_uid = utd_entry->uid; + node = rb_next(node); + + CT_DEBUG("qtaguid: ctrl_delete(%s): " + "utd uid=%u\n", + input, entry_uid); + + if (entry_uid != uid) + continue; + /* + * Go over the tag_refs, and those that don't have + * sock_tags using them are freed. + */ + put_tag_ref_tree(tag, utd_entry); + put_utd_entry(utd_entry); + } + spin_unlock_bh(&uid_tag_data_tree_lock); + + atomic64_inc(&qtu_events.delete_cmds); + res = 0; + +err: + return res; +} + +static int ctrl_cmd_counter_set(const char *input) +{ + char cmd; + uid_t uid = 0; + tag_t tag; + int res, argc; + struct tag_counter_set *tcs; + int counter_set; + + argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid); + CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c " + "set=%d uid=%u\n", input, argc, cmd, + counter_set, uid); + if (argc != 3) { + res = -EINVAL; + goto err; + } + if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) { + pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n", + input); + res = -EINVAL; + goto err; + } + if (!can_manipulate_uids()) { + pr_info("qtaguid: ctrl_counterset(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err; + } + + tag = make_tag_from_uid(uid); + spin_lock_bh(&tag_counter_set_list_lock); + tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag); + if (!tcs) { + tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC); + if (!tcs) { + spin_unlock_bh(&tag_counter_set_list_lock); + pr_err("qtaguid: ctrl_counterset(%s): " + "failed to alloc counter set\n", + input); + res = -ENOMEM; + goto err; + } + tcs->tn.tag = tag; + tag_counter_set_tree_insert(tcs, &tag_counter_set_tree); + CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx " + "(uid=%u) set=%d\n", + input, tag, get_uid_from_tag(tag), counter_set); + } + tcs->active_set = counter_set; + spin_unlock_bh(&tag_counter_set_list_lock); + atomic64_inc(&qtu_events.counter_set_changes); + res = 0; + +err: + return res; +} + +static int ctrl_cmd_tag(const char *input) +{ + char cmd; + int sock_fd = 0; + uid_t uid = 0; + tag_t acct_tag = make_atag_from_value(0); + tag_t full_tag; + struct socket *el_socket; + int res, argc; + struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *uid_tag_data_entry; + struct proc_qtu_data *pqd_entry; + + /* Unassigned args will get defaulted later. */ + argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid); + CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d " + "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd, + acct_tag, uid); + if (argc < 2) { + res = -EINVAL; + goto err; + } + el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */ + if (!el_socket) { + pr_info("qtaguid: ctrl_tag(%s): failed to lookup" + " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n", + input, sock_fd, res, current->pid, current->tgid, + current_fsuid()); + goto err; + } + CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); + if (argc < 3) { + acct_tag = make_atag_from_value(0); + } else if (!valid_atag(acct_tag)) { + pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input); + res = -EINVAL; + goto err_put; + } + CT_DEBUG("qtaguid: ctrl_tag(%s): " + "pid=%u tgid=%u uid=%u euid=%u fsuid=%u " + "ctrl.gid=%u in_group()=%d in_egroup()=%d\n", + input, current->pid, current->tgid, current_uid(), + current_euid(), current_fsuid(), + xt_qtaguid_ctrl_file->gid, + in_group_p(xt_qtaguid_ctrl_file->gid), + in_egroup_p(xt_qtaguid_ctrl_file->gid)); + if (argc < 4) { + uid = current_fsuid(); + } else if (!can_impersonate_uid(uid)) { + pr_info("qtaguid: ctrl_tag(%s): " + "insufficient priv from pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + res = -EPERM; + goto err_put; + } + full_tag = combine_atag_with_uid(acct_tag, uid); + + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(el_socket->sk); + tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry); + if (IS_ERR(tag_ref_entry)) { + res = PTR_ERR(tag_ref_entry); + spin_unlock_bh(&sock_tag_list_lock); + goto err_put; + } + tag_ref_entry->num_sock_tags++; + if (sock_tag_entry) { + struct tag_ref *prev_tag_ref_entry; + + CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p " + "st@%p ...->f_count=%ld\n", + input, el_socket->sk, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); + /* + * This is a re-tagging, so release the sock_fd that was + * locked at the time of the 1st tagging. + * There is still the ref from this call's sockfd_lookup() so + * it can be done within the spinlock. + */ + sockfd_put(sock_tag_entry->socket); + prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, + &uid_tag_data_entry); + BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry)); + BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0); + prev_tag_ref_entry->num_sock_tags--; + sock_tag_entry->tag = full_tag; + } else { + CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n", + input, el_socket->sk); + sock_tag_entry = kzalloc(sizeof(*sock_tag_entry), + GFP_ATOMIC); + if (!sock_tag_entry) { + pr_err("qtaguid: ctrl_tag(%s): " + "socket tag alloc failed\n", + input); + spin_unlock_bh(&sock_tag_list_lock); + res = -ENOMEM; + goto err_tag_unref_put; + } + sock_tag_entry->sk = el_socket->sk; + sock_tag_entry->socket = el_socket; + sock_tag_entry->pid = current->tgid; + sock_tag_entry->tag = combine_atag_with_uid(acct_tag, + uid); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + if (IS_ERR_OR_NULL(pqd_entry)) + pr_warn_once( + "qtaguid: %s(): " + "User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", __func__, + current->pid, current->tgid, + current_fsuid()); + else + list_add(&sock_tag_entry->list, + &pqd_entry->sock_tag_list); + spin_unlock_bh(&uid_tag_data_tree_lock); + + sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree); + atomic64_inc(&qtu_events.sockets_tagged); + } + spin_unlock_bh(&sock_tag_list_lock); + /* We keep the ref to the socket (file) until it is untagged */ + CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count)); + return 0; + +err_tag_unref_put: + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + tag_ref_entry->num_sock_tags--; + free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry); +err_put: + CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); + /* Release the sock_fd that was grabbed by sockfd_lookup(). */ + sockfd_put(el_socket); + return res; + +err: + CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input); + return res; +} + +static int ctrl_cmd_untag(const char *input) +{ + char cmd; + int sock_fd = 0; + struct socket *el_socket; + int res, argc; + struct sock_tag *sock_tag_entry; + struct tag_ref *tag_ref_entry; + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; + + argc = sscanf(input, "%c %d", &cmd, &sock_fd); + CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n", + input, argc, cmd, sock_fd); + if (argc < 2) { + res = -EINVAL; + goto err; + } + el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */ + if (!el_socket) { + pr_info("qtaguid: ctrl_untag(%s): failed to lookup" + " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n", + input, sock_fd, res, current->pid, current->tgid, + current_fsuid()); + goto err; + } + CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n", + input, atomic_long_read(&el_socket->file->f_count), + el_socket->sk); + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(el_socket->sk); + if (!sock_tag_entry) { + spin_unlock_bh(&sock_tag_list_lock); + res = -EINVAL; + goto err_put; + } + /* + * The socket already belongs to the current process + * so it can do whatever it wants to it. + */ + rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree); + + tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry); + BUG_ON(!tag_ref_entry); + BUG_ON(tag_ref_entry->num_sock_tags <= 0); + spin_lock_bh(&uid_tag_data_tree_lock); + pqd_entry = proc_qtu_data_tree_search( + &proc_qtu_data_tree, current->tgid); + /* + * TODO: remove if, and start failing. + * At first, we want to catch user-space code that is not + * opening the /dev/xt_qtaguid. + */ + if (IS_ERR_OR_NULL(pqd_entry)) + pr_warn_once("qtaguid: %s(): " + "User space forgot to open /dev/xt_qtaguid? " + "pid=%u tgid=%u uid=%u\n", __func__, + current->pid, current->tgid, current_fsuid()); + else + list_del(&sock_tag_entry->list); + spin_unlock_bh(&uid_tag_data_tree_lock); + /* + * We don't free tag_ref from the utd_entry here, + * only during a cmd_delete(). + */ + tag_ref_entry->num_sock_tags--; + spin_unlock_bh(&sock_tag_list_lock); + /* + * Release the sock_fd that was grabbed at tag time, + * and once more for the sockfd_lookup() here. + */ + sockfd_put(sock_tag_entry->socket); + CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n", + input, sock_tag_entry, + atomic_long_read(&el_socket->file->f_count) - 1); + sockfd_put(el_socket); + + kfree(sock_tag_entry); + atomic64_inc(&qtu_events.sockets_untagged); + + return 0; + +err_put: + CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n", + input, atomic_long_read(&el_socket->file->f_count) - 1); + /* Release the sock_fd that was grabbed by sockfd_lookup(). */ + sockfd_put(el_socket); + return res; + +err: + CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input); + return res; +} + +static int qtaguid_ctrl_parse(const char *input, int count) +{ + char cmd; + int res; + + CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n", + input, current->pid, current->tgid, current_fsuid()); + + cmd = input[0]; + /* Collect params for commands */ + switch (cmd) { + case 'd': + res = ctrl_cmd_delete(input); + break; + + case 's': + res = ctrl_cmd_counter_set(input); + break; + + case 't': + res = ctrl_cmd_tag(input); + break; + + case 'u': + res = ctrl_cmd_untag(input); + break; + + default: + res = -EINVAL; + goto err; + } + if (!res) + res = count; +err: + CT_DEBUG("qtaguid: ctrl(%s): res=%d\n", input, res); + return res; +} + +#define MAX_QTAGUID_CTRL_INPUT_LEN 255 +static int qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN]; + + if (unlikely(module_passive)) + return count; + + if (count >= MAX_QTAGUID_CTRL_INPUT_LEN) + return -EINVAL; + + if (copy_from_user(input_buf, buffer, count)) + return -EFAULT; + + input_buf[count] = '\0'; + return qtaguid_ctrl_parse(input_buf, count); +} + +struct proc_print_info { + char *outp; + char **num_items_returned; + struct iface_stat *iface_entry; + struct tag_stat *ts_entry; + int item_index; + int items_to_skip; + int char_count; +}; + +static int pp_stats_line(struct proc_print_info *ppi, int cnt_set) +{ + int len; + struct data_counters *cnts; + + if (!ppi->item_index) { + if (ppi->item_index++ < ppi->items_to_skip) + return 0; + len = snprintf(ppi->outp, ppi->char_count, + "idx iface acct_tag_hex uid_tag_int cnt_set " + "rx_bytes rx_packets " + "tx_bytes tx_packets " + "rx_tcp_bytes rx_tcp_packets " + "rx_udp_bytes rx_udp_packets " + "rx_other_bytes rx_other_packets " + "tx_tcp_bytes tx_tcp_packets " + "tx_udp_bytes tx_udp_packets " + "tx_other_bytes tx_other_packets\n"); + } else { + tag_t tag = ppi->ts_entry->tn.tag; + uid_t stat_uid = get_uid_from_tag(tag); + /* Detailed tags are not available to everybody */ + if (get_atag_from_tag(tag) + && !can_read_other_uid_stats(stat_uid)) { + CT_DEBUG("qtaguid: stats line: " + "%s 0x%llx %u: insufficient priv " + "from pid=%u tgid=%u uid=%u stats.gid=%u\n", + ppi->iface_entry->ifname, + get_atag_from_tag(tag), stat_uid, + current->pid, current->tgid, current_fsuid(), + xt_qtaguid_stats_file->gid); + return 0; + } + if (ppi->item_index++ < ppi->items_to_skip) + return 0; + cnts = &ppi->ts_entry->counters; + len = snprintf( + ppi->outp, ppi->char_count, + "%d %s 0x%llx %u %u " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu " + "%llu %llu\n", + ppi->item_index, + ppi->iface_entry->ifname, + get_atag_from_tag(tag), + stat_uid, + cnt_set, + dc_sum_bytes(cnts, cnt_set, IFS_RX), + dc_sum_packets(cnts, cnt_set, IFS_RX), + dc_sum_bytes(cnts, cnt_set, IFS_TX), + dc_sum_packets(cnts, cnt_set, IFS_TX), + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes, + cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets); + } + return len; +} + +static bool pp_sets(struct proc_print_info *ppi) +{ + int len; + int counter_set; + for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS; + counter_set++) { + len = pp_stats_line(ppi, counter_set); + if (len >= ppi->char_count) { + *ppi->outp = '\0'; + return false; + } + if (len) { + ppi->outp += len; + ppi->char_count -= len; + (*ppi->num_items_returned)++; + } + } + return true; +} + +/* + * Procfs reader to get all tag stats using style "1)" as described in + * fs/proc/generic.c + * Groups all protocols tx/rx bytes. + */ +static int qtaguid_stats_proc_read(char *page, char **num_items_returned, + off_t items_to_skip, int char_count, int *eof, + void *data) +{ + struct proc_print_info ppi; + int len; + + ppi.outp = page; + ppi.item_index = 0; + ppi.char_count = char_count; + ppi.num_items_returned = num_items_returned; + ppi.items_to_skip = items_to_skip; + + if (unlikely(module_passive)) { + len = pp_stats_line(&ppi, 0); + /* The header should always be shorter than the buffer. */ + BUG_ON(len >= ppi.char_count); + (*num_items_returned)++; + *eof = 1; + return len; + } + + CT_DEBUG("qtaguid:proc stats pid=%u tgid=%u uid=%u " + "page=%p *num_items_returned=%p off=%ld " + "char_count=%d *eof=%d\n", + current->pid, current->tgid, current_fsuid(), + page, *num_items_returned, + items_to_skip, char_count, *eof); + + if (*eof) + return 0; + + /* The idx is there to help debug when things go belly up. */ + len = pp_stats_line(&ppi, 0); + /* Don't advance the outp unless the whole line was printed */ + if (len >= ppi.char_count) { + *ppi.outp = '\0'; + return ppi.outp - page; + } + if (len) { + ppi.outp += len; + ppi.char_count -= len; + (*num_items_returned)++; + } + + spin_lock_bh(&iface_stat_list_lock); + list_for_each_entry(ppi.iface_entry, &iface_stat_list, list) { + struct rb_node *node; + spin_lock_bh(&ppi.iface_entry->tag_stat_list_lock); + for (node = rb_first(&ppi.iface_entry->tag_stat_tree); + node; + node = rb_next(node)) { + ppi.ts_entry = rb_entry(node, struct tag_stat, tn.node); + if (!pp_sets(&ppi)) { + spin_unlock_bh( + &ppi.iface_entry->tag_stat_list_lock); + spin_unlock_bh(&iface_stat_list_lock); + return ppi.outp - page; + } + } + spin_unlock_bh(&ppi.iface_entry->tag_stat_list_lock); + } + spin_unlock_bh(&iface_stat_list_lock); + + *eof = 1; + return ppi.outp - page; +} + +/*------------------------------------------*/ +static int qtudev_open(struct inode *inode, struct file *file) +{ + struct uid_tag_data *utd_entry; + struct proc_qtu_data *pqd_entry; + struct proc_qtu_data *new_pqd_entry; + int res; + bool utd_entry_found; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n", + current->pid, current->tgid, current_fsuid()); + + spin_lock_bh(&uid_tag_data_tree_lock); + + /* Look for existing uid data, or alloc one. */ + utd_entry = get_uid_data(current_fsuid(), &utd_entry_found); + if (IS_ERR_OR_NULL(utd_entry)) { + res = PTR_ERR(utd_entry); + goto err_unlock; + } + + /* Look for existing PID based proc_data */ + pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree, + current->tgid); + if (pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u " + "%s already opened\n", + current->pid, current->tgid, current_fsuid(), + QTU_DEV_NAME); + res = -EBUSY; + goto err_unlock_free_utd; + } + + new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC); + if (!new_pqd_entry) { + pr_err("qtaguid: qtudev_open(): %u/%u %u: " + "proc data alloc failed\n", + current->pid, current->tgid, current_fsuid()); + res = -ENOMEM; + goto err_unlock_free_utd; + } + new_pqd_entry->pid = current->tgid; + INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list); + new_pqd_entry->parent_tag_data = utd_entry; + utd_entry->num_pqd++; + + proc_qtu_data_tree_insert(new_pqd_entry, + &proc_qtu_data_tree); + + spin_unlock_bh(&uid_tag_data_tree_lock); + DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n", + current_fsuid(), new_pqd_entry); + file->private_data = new_pqd_entry; + return 0; + +err_unlock_free_utd: + if (!utd_entry_found) { + rb_erase(&utd_entry->node, &uid_tag_data_tree); + kfree(utd_entry); + } +err_unlock: + spin_unlock_bh(&uid_tag_data_tree_lock); + return res; +} + +static int qtudev_release(struct inode *inode, struct file *file) +{ + struct proc_qtu_data *pqd_entry = file->private_data; + struct uid_tag_data *utd_entry = pqd_entry->parent_tag_data; + struct sock_tag *st_entry; + struct rb_root st_to_free_tree = RB_ROOT; + struct list_head *entry, *next; + struct tag_ref *tr; + + if (unlikely(qtu_proc_handling_passive)) + return 0; + + /* + * Do not trust the current->pid, it might just be a kworker cleaning + * up after a dead proc. + */ + DR_DEBUG("qtaguid: qtudev_release(): " + "pid=%u tgid=%u uid=%u " + "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n", + current->pid, current->tgid, pqd_entry->parent_tag_data->uid, + pqd_entry, pqd_entry->pid, utd_entry, + utd_entry->num_active_tags); + + spin_lock_bh(&sock_tag_list_lock); + spin_lock_bh(&uid_tag_data_tree_lock); + + list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) { + st_entry = list_entry(entry, struct sock_tag, list); + DR_DEBUG("qtaguid: %s(): " + "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n", + __func__, + st_entry, st_entry->sk, + current->pid, current->tgid, + pqd_entry->parent_tag_data->uid); + + utd_entry = uid_tag_data_tree_search( + &uid_tag_data_tree, + get_uid_from_tag(st_entry->tag)); + BUG_ON(IS_ERR_OR_NULL(utd_entry)); + DR_DEBUG("qtaguid: %s(): " + "looking for tag=0x%llx in utd_entry=%p\n", __func__, + st_entry->tag, utd_entry); + tr = tag_ref_tree_search(&utd_entry->tag_ref_tree, + st_entry->tag); + BUG_ON(!tr); + BUG_ON(tr->num_sock_tags <= 0); + tr->num_sock_tags--; + free_tag_ref_from_utd_entry(tr, utd_entry); + + rb_erase(&st_entry->sock_node, &sock_tag_tree); + list_del(&st_entry->list); + /* Can't sockfd_put() within spinlock, do it later. */ + sock_tag_tree_insert(st_entry, &st_to_free_tree); + + /* + * Try to free the utd_entry if no other proc_qtu_data is + * using it (num_pqd is 0) and it doesn't have active tags + * (num_active_tags is 0). + */ + put_utd_entry(utd_entry); + } + + rb_erase(&pqd_entry->node, &proc_qtu_data_tree); + BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1); + pqd_entry->parent_tag_data->num_pqd--; + put_utd_entry(pqd_entry->parent_tag_data); + kfree(pqd_entry); + file->private_data = NULL; + + spin_unlock_bh(&uid_tag_data_tree_lock); + spin_unlock_bh(&sock_tag_list_lock); + + + sock_tag_tree_erase(&st_to_free_tree); + + prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__, + current->pid, current->tgid); + return 0; +} + +/*------------------------------------------*/ +static const struct file_operations qtudev_fops = { + .owner = THIS_MODULE, + .open = qtudev_open, + .release = qtudev_release, +}; + +static struct miscdevice qtu_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = QTU_DEV_NAME, + .fops = &qtudev_fops, + /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */ +}; + +/*------------------------------------------*/ +static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir) +{ + int ret; + *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net); + if (!*res_procdir) { + pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n"); + ret = -ENOMEM; + goto no_dir; + } + + xt_qtaguid_ctrl_file = create_proc_entry("ctrl", proc_ctrl_perms, + *res_procdir); + if (!xt_qtaguid_ctrl_file) { + pr_err("qtaguid: failed to create xt_qtaguid/ctrl " + " file\n"); + ret = -ENOMEM; + goto no_ctrl_entry; + } + xt_qtaguid_ctrl_file->read_proc = qtaguid_ctrl_proc_read; + xt_qtaguid_ctrl_file->write_proc = qtaguid_ctrl_proc_write; + + xt_qtaguid_stats_file = create_proc_entry("stats", proc_stats_perms, + *res_procdir); + if (!xt_qtaguid_stats_file) { + pr_err("qtaguid: failed to create xt_qtaguid/stats " + "file\n"); + ret = -ENOMEM; + goto no_stats_entry; + } + xt_qtaguid_stats_file->read_proc = qtaguid_stats_proc_read; + /* + * TODO: add support counter hacking + * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write; + */ + return 0; + +no_stats_entry: + remove_proc_entry("ctrl", *res_procdir); +no_ctrl_entry: + remove_proc_entry("xt_qtaguid", NULL); +no_dir: + return ret; +} + +static struct xt_match qtaguid_mt_reg __read_mostly = { + /* + * This module masquerades as the "owner" module so that iptables + * tools can deal with it. + */ + .name = "owner", + .revision = 1, + .family = NFPROTO_UNSPEC, + .match = qtaguid_mt, + .matchsize = sizeof(struct xt_qtaguid_match_info), + .me = THIS_MODULE, +}; + +static int __init qtaguid_mt_init(void) +{ + if (qtaguid_proc_register(&xt_qtaguid_procdir) + || iface_stat_init(xt_qtaguid_procdir) + || xt_register_match(&qtaguid_mt_reg) + || misc_register(&qtu_device)) + return -1; + return 0; +} + +/* + * TODO: allow unloading of the module. + * For now stats are permanent. + * Kconfig forces'y/n' and never an 'm'. + */ + +module_init(qtaguid_mt_init); +MODULE_AUTHOR("jpa "); +MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_owner"); +MODULE_ALIAS("ip6t_owner"); +MODULE_ALIAS("ipt_qtaguid"); +MODULE_ALIAS("ip6t_qtaguid"); diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h new file mode 100644 index 0000000000000..6dc14a9c68896 --- /dev/null +++ b/net/netfilter/xt_qtaguid_internal.h @@ -0,0 +1,352 @@ +/* + * Kernel iptables module to track stats for packets based on user tags. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_INTERNAL_H__ +#define __XT_QTAGUID_INTERNAL_H__ + +#include +#include +#include +#include + +/* Iface handling */ +#define IDEBUG_MASK (1<<0) +/* Iptable Matching. Per packet. */ +#define MDEBUG_MASK (1<<1) +/* Red-black tree handling. Per packet. */ +#define RDEBUG_MASK (1<<2) +/* procfs ctrl/stats handling */ +#define CDEBUG_MASK (1<<3) +/* dev and resource tracking */ +#define DDEBUG_MASK (1<<4) + +/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */ +#define DEFAULT_DEBUG_MASK 0 + +/* + * (Un)Define these *DEBUG to compile out/in the pr_debug calls. + * All undef: text size ~ 0x3030; all def: ~ 0x4404. + */ +#define IDEBUG +#define MDEBUG +#define RDEBUG +#define CDEBUG +#define DDEBUG + +#define MSK_DEBUG(mask, ...) do { \ + if (unlikely(qtaguid_debug_mask & (mask))) \ + pr_debug(__VA_ARGS__); \ + } while (0) +#ifdef IDEBUG +#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__) +#else +#define IF_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef MDEBUG +#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__) +#else +#define MT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef RDEBUG +#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__) +#else +#define RB_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef CDEBUG +#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__) +#else +#define CT_DEBUG(...) no_printk(__VA_ARGS__) +#endif +#ifdef DDEBUG +#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__) +#else +#define DR_DEBUG(...) no_printk(__VA_ARGS__) +#endif + +extern uint qtaguid_debug_mask; + +/*---------------------------------------------------------------------------*/ +/* + * Tags: + * + * They represent what the data usage counters will be tracked against. + * By default a tag is just based on the UID. + * The UID is used as the base for policing, and can not be ignored. + * So a tag will always at least represent a UID (uid_tag). + * + * A tag can be augmented with an "accounting tag" which is associated + * with a UID. + * User space can set the acct_tag portion of the tag which is then used + * with sockets: all data belonging to that socket will be counted against the + * tag. The policing is then based on the tag's uid_tag portion, + * and stats are collected for the acct_tag portion separately. + * + * There could be + * a: {acct_tag=1, uid_tag=10003} + * b: {acct_tag=2, uid_tag=10003} + * c: {acct_tag=3, uid_tag=10003} + * d: {acct_tag=0, uid_tag=10003} + * a, b, and c represent tags associated with specific sockets. + * d is for the totals for that uid, including all untagged traffic. + * Typically d is used with policing/quota rules. + * + * We want tag_t big enough to distinguish uid_t and acct_tag. + * It might become a struct if needed. + * Nothing should be using it as an int. + */ +typedef uint64_t tag_t; /* Only used via accessors */ + +#define TAG_UID_MASK 0xFFFFFFFFULL +#define TAG_ACCT_MASK (~0xFFFFFFFFULL) + +static inline int tag_compare(tag_t t1, tag_t t2) +{ + return t1 < t2 ? -1 : t1 == t2 ? 0 : 1; +} + +static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid) +{ + return acct_tag | uid; +} +static inline tag_t make_tag_from_uid(uid_t uid) +{ + return uid; +} +static inline uid_t get_uid_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_utag_from_tag(tag_t tag) +{ + return tag & TAG_UID_MASK; +} +static inline tag_t get_atag_from_tag(tag_t tag) +{ + return tag & TAG_ACCT_MASK; +} + +static inline bool valid_atag(tag_t tag) +{ + return !(tag & TAG_UID_MASK); +} +static inline tag_t make_atag_from_value(uint32_t value) +{ + return (uint64_t)value << 32; +} +/*---------------------------------------------------------------------------*/ + +/* + * Maximum number of socket tags that a UID is allowed to have active. + * Multiple processes belonging to the same UID contribute towards this limit. + * Special UIDs that can impersonate a UID also contribute (e.g. download + * manager, ...) + */ +#define DEFAULT_MAX_SOCK_TAGS 1024 + +/* + * For now we only track 2 sets of counters. + * The default set is 0. + * Userspace can activate another set for a given uid being tracked. + */ +#define IFS_MAX_COUNTER_SETS 2 + +enum ifs_tx_rx { + IFS_TX, + IFS_RX, + IFS_MAX_DIRECTIONS +}; + +/* For now, TCP, UDP, the rest */ +enum ifs_proto { + IFS_TCP, + IFS_UDP, + IFS_PROTO_OTHER, + IFS_MAX_PROTOS +}; + +struct byte_packet_counters { + uint64_t bytes; + uint64_t packets; +}; + +struct data_counters { + struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS]; +}; + +static inline uint64_t dc_sum_bytes(struct data_counters *counters, + int set, + enum ifs_tx_rx direction) +{ + return counters->bpc[set][direction][IFS_TCP].bytes + + counters->bpc[set][direction][IFS_UDP].bytes + + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes; +} + +static inline uint64_t dc_sum_packets(struct data_counters *counters, + int set, + enum ifs_tx_rx direction) +{ + return counters->bpc[set][direction][IFS_TCP].packets + + counters->bpc[set][direction][IFS_UDP].packets + + counters->bpc[set][direction][IFS_PROTO_OTHER].packets; +} + + +/* Generic X based nodes used as a base for rb_tree ops */ +struct tag_node { + struct rb_node node; + tag_t tag; +}; + +struct tag_stat { + struct tag_node tn; + struct data_counters counters; + /* + * If this tag is acct_tag based, we need to count against the + * matching parent uid_tag. + */ + struct data_counters *parent_counters; +}; + +struct iface_stat { + struct list_head list; /* in iface_stat_list */ + char *ifname; + bool active; + /* net_dev is only valid for active iface_stat */ + struct net_device *net_dev; + + struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS]; + struct data_counters totals_via_skb; + /* + * We keep the last_known, because some devices reset their counters + * just before NETDEV_UP, while some will reset just before + * NETDEV_REGISTER (which is more normal). + * So now, if the device didn't do a NETDEV_UNREGISTER and we see + * its current dev stats smaller that what was previously known, we + * assume an UNREGISTER and just use the last_known. + */ + struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS]; + /* last_known is usable when last_known_valid is true */ + bool last_known_valid; + + struct proc_dir_entry *proc_ptr; + + struct rb_root tag_stat_tree; + spinlock_t tag_stat_list_lock; +}; + +/* This is needed to create proc_dir_entries from atomic context. */ +struct iface_stat_work { + struct work_struct iface_work; + struct iface_stat *iface_entry; +}; + +/* + * Track tag that this socket is transferring data for, and not necessarily + * the uid that owns the socket. + * This is the tag against which tag_stat.counters will be billed. + * These structs need to be looked up by sock and pid. + */ +struct sock_tag { + struct rb_node sock_node; + struct sock *sk; /* Only used as a number, never dereferenced */ + /* The socket is needed for sockfd_put() */ + struct socket *socket; + /* Used to associate with a given pid */ + struct list_head list; /* in proc_qtu_data.sock_tag_list */ + pid_t pid; + + tag_t tag; +}; + +struct qtaguid_event_counts { + /* Various successful events */ + atomic64_t sockets_tagged; + atomic64_t sockets_untagged; + atomic64_t counter_set_changes; + atomic64_t delete_cmds; + atomic64_t iface_events; /* Number of NETDEV_* events handled */ + + atomic64_t match_calls; /* Number of times iptables called mt */ + /* Number of times iptables called mt from pre or post routing hooks */ + atomic64_t match_calls_prepost; + /* + * match_found_sk_*: numbers related to the netfilter matching + * function finding a sock for the sk_buff. + * Total skbs processed is sum(match_found*). + */ + atomic64_t match_found_sk; /* An sk was already in the sk_buff. */ + /* The connection tracker had or didn't have the sk. */ + atomic64_t match_found_sk_in_ct; + atomic64_t match_found_no_sk_in_ct; + /* + * No sk could be found. No apparent owner. Could happen with + * unsolicited traffic. + */ + atomic64_t match_no_sk; + /* + * The file ptr in the sk_socket wasn't there. + * This might happen for traffic while the socket is being closed. + */ + atomic64_t match_no_sk_file; +}; + +/* Track the set active_set for the given tag. */ +struct tag_counter_set { + struct tag_node tn; + int active_set; +}; + +/*----------------------------------------------*/ +/* + * The qtu uid data is used to track resources that are created directly or + * indirectly by processes (uid tracked). + * It is shared by the processes with the same uid. + * Some of the resource will be counted to prevent further rogue allocations, + * some will need freeing once the owner process (uid) exits. + */ +struct uid_tag_data { + struct rb_node node; + uid_t uid; + + /* + * For the uid, how many accounting tags have been set. + */ + int num_active_tags; + /* Track the number of proc_qtu_data that reference it */ + int num_pqd; + struct rb_root tag_ref_tree; + /* No tag_node_tree_lock; use uid_tag_data_tree_lock */ +}; + +struct tag_ref { + struct tag_node tn; + + /* + * This tracks the number of active sockets that have a tag on them + * which matches this tag_ref.tn.tag. + * A tag ref can live on after the sockets are untagged. + * A tag ref can only be removed during a tag delete command. + */ + int num_sock_tags; +}; + +struct proc_qtu_data { + struct rb_node node; + pid_t pid; + + struct uid_tag_data *parent_tag_data; + + /* Tracks the sock_tags that need freeing upon this proc's death */ + struct list_head sock_tag_list; + /* No spinlock_t sock_tag_list_lock; use the global one. */ +}; + +/*----------------------------------------------*/ +#endif /* ifndef __XT_QTAGUID_INTERNAL_H__ */ diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c new file mode 100644 index 0000000000000..f6a00a3520ed5 --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.c @@ -0,0 +1,566 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Most of the functions in this file just waste time if DEBUG is not defined. + * The matching xt_qtaguid_print.h will static inline empty funcs if the needed + * debug flags ore not defined. + * Those funcs that fail to allocate memory will panic as there is no need to + * hobble allong just pretending to do the requested work. + */ + +#define DEBUG + +#include +#include +#include +#include +#include +#include + + +#include "xt_qtaguid_internal.h" +#include "xt_qtaguid_print.h" + +#ifdef DDEBUG + +static void _bug_on_err_or_null(void *ptr) +{ + if (IS_ERR_OR_NULL(ptr)) { + pr_err("qtaguid: kmalloc failed\n"); + BUG(); + } +} + +char *pp_tag_t(tag_t *tag) +{ + char *res; + + if (!tag) + res = kasprintf(GFP_ATOMIC, "tag_t@null{}"); + else + res = kasprintf(GFP_ATOMIC, + "tag_t@%p{tag=0x%llx, uid=%u}", + tag, *tag, get_uid_from_tag(*tag)); + _bug_on_err_or_null(res); + return res; +} + +char *pp_data_counters(struct data_counters *dc, bool showValues) +{ + char *res; + + if (!dc) + res = kasprintf(GFP_ATOMIC, "data_counters@null{}"); + else if (showValues) + res = kasprintf( + GFP_ATOMIC, "data_counters@%p{" + "set0{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}, " + "set1{" + "rx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}, " + "tx{" + "tcp{b=%llu, p=%llu}, " + "udp{b=%llu, p=%llu}," + "other{b=%llu, p=%llu}}}}", + dc, + dc->bpc[0][IFS_RX][IFS_TCP].bytes, + dc->bpc[0][IFS_RX][IFS_TCP].packets, + dc->bpc[0][IFS_RX][IFS_UDP].bytes, + dc->bpc[0][IFS_RX][IFS_UDP].packets, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[0][IFS_TX][IFS_TCP].bytes, + dc->bpc[0][IFS_TX][IFS_TCP].packets, + dc->bpc[0][IFS_TX][IFS_UDP].bytes, + dc->bpc[0][IFS_TX][IFS_UDP].packets, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_RX][IFS_TCP].bytes, + dc->bpc[1][IFS_RX][IFS_TCP].packets, + dc->bpc[1][IFS_RX][IFS_UDP].bytes, + dc->bpc[1][IFS_RX][IFS_UDP].packets, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets, + dc->bpc[1][IFS_TX][IFS_TCP].bytes, + dc->bpc[1][IFS_TX][IFS_TCP].packets, + dc->bpc[1][IFS_TX][IFS_UDP].bytes, + dc->bpc[1][IFS_TX][IFS_UDP].packets, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes, + dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets); + else + res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc); + _bug_on_err_or_null(res); + return res; +} + +char *pp_tag_node(struct tag_node *tn) +{ + char *tag_str; + char *res; + + if (!tn) { + res = kasprintf(GFP_ATOMIC, "tag_node@null{}"); + _bug_on_err_or_null(res); + return res; + } + tag_str = pp_tag_t(&tn->tag); + res = kasprintf(GFP_ATOMIC, + "tag_node@%p{tag=%s}", + tn, tag_str); + _bug_on_err_or_null(res); + kfree(tag_str); + return res; +} + +char *pp_tag_ref(struct tag_ref *tr) +{ + char *tn_str; + char *res; + + if (!tr) { + res = kasprintf(GFP_ATOMIC, "tag_ref@null{}"); + _bug_on_err_or_null(res); + return res; + } + tn_str = pp_tag_node(&tr->tn); + res = kasprintf(GFP_ATOMIC, + "tag_ref@%p{%s, num_sock_tags=%d}", + tr, tn_str, tr->num_sock_tags); + _bug_on_err_or_null(res); + kfree(tn_str); + return res; +} + +char *pp_tag_stat(struct tag_stat *ts) +{ + char *tn_str; + char *counters_str; + char *parent_counters_str; + char *res; + + if (!ts) { + res = kasprintf(GFP_ATOMIC, "tag_stat@null{}"); + _bug_on_err_or_null(res); + return res; + } + tn_str = pp_tag_node(&ts->tn); + counters_str = pp_data_counters(&ts->counters, true); + parent_counters_str = pp_data_counters(ts->parent_counters, false); + res = kasprintf(GFP_ATOMIC, + "tag_stat@%p{%s, counters=%s, parent_counters=%s}", + ts, tn_str, counters_str, parent_counters_str); + _bug_on_err_or_null(res); + kfree(tn_str); + kfree(counters_str); + kfree(parent_counters_str); + return res; +} + +char *pp_iface_stat(struct iface_stat *is) +{ + char *res; + if (!is) { + res = kasprintf(GFP_ATOMIC, "iface_stat@null{}"); + } else { + struct data_counters *cnts = &is->totals_via_skb; + res = kasprintf(GFP_ATOMIC, "iface_stat@%p{" + "list=list_head{...}, " + "ifname=%s, " + "total_dev={rx={bytes=%llu, " + "packets=%llu}, " + "tx={bytes=%llu, " + "packets=%llu}}, " + "total_skb={rx={bytes=%llu, " + "packets=%llu}, " + "tx={bytes=%llu, " + "packets=%llu}}, " + "last_known_valid=%d, " + "last_known={rx={bytes=%llu, " + "packets=%llu}, " + "tx={bytes=%llu, " + "packets=%llu}}, " + "active=%d, " + "net_dev=%p, " + "proc_ptr=%p, " + "tag_stat_tree=rb_root{...}}", + is, + is->ifname, + is->totals_via_dev[IFS_RX].bytes, + is->totals_via_dev[IFS_RX].packets, + is->totals_via_dev[IFS_TX].bytes, + is->totals_via_dev[IFS_TX].packets, + dc_sum_bytes(cnts, 0, IFS_RX), + dc_sum_packets(cnts, 0, IFS_RX), + dc_sum_bytes(cnts, 0, IFS_TX), + dc_sum_packets(cnts, 0, IFS_TX), + is->last_known_valid, + is->last_known[IFS_RX].bytes, + is->last_known[IFS_RX].packets, + is->last_known[IFS_TX].bytes, + is->last_known[IFS_TX].packets, + is->active, + is->net_dev, + is->proc_ptr); + } + _bug_on_err_or_null(res); + return res; +} + +char *pp_sock_tag(struct sock_tag *st) +{ + char *tag_str; + char *res; + + if (!st) { + res = kasprintf(GFP_ATOMIC, "sock_tag@null{}"); + _bug_on_err_or_null(res); + return res; + } + tag_str = pp_tag_t(&st->tag); + res = kasprintf(GFP_ATOMIC, "sock_tag@%p{" + "sock_node=rb_node{...}, " + "sk=%p socket=%p (f_count=%lu), list=list_head{...}, " + "pid=%u, tag=%s}", + st, st->sk, st->socket, atomic_long_read( + &st->socket->file->f_count), + st->pid, tag_str); + _bug_on_err_or_null(res); + kfree(tag_str); + return res; +} + +char *pp_uid_tag_data(struct uid_tag_data *utd) +{ + char *res; + + if (!utd) + res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}"); + else + res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{" + "uid=%u, num_active_acct_tags=%d, " + "num_pqd=%d, " + "tag_node_tree=rb_root{...}, " + "proc_qtu_data_tree=rb_root{...}}", + utd, utd->uid, + utd->num_active_tags, utd->num_pqd); + _bug_on_err_or_null(res); + return res; +} + +char *pp_proc_qtu_data(struct proc_qtu_data *pqd) +{ + char *parent_tag_data_str; + char *res; + + if (!pqd) { + res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}"); + _bug_on_err_or_null(res); + return res; + } + parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data); + res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{" + "node=rb_node{...}, pid=%u, " + "parent_tag_data=%s, " + "sock_tag_list=list_head{...}}", + pqd, pqd->pid, parent_tag_data_str + ); + _bug_on_err_or_null(res); + kfree(parent_tag_data_str); + return res; +} + +/*------------------------------------------*/ +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree) +{ + struct rb_node *node; + struct sock_tag *sock_tag_entry; + char *str; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(sock_tag_tree)) { + str = "sock_tag_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "sock_tag_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(sock_tag_tree); + node; + node = rb_next(node)) { + sock_tag_entry = rb_entry(node, struct sock_tag, sock_node); + str = pp_sock_tag(sock_tag_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list) +{ + struct sock_tag *sock_tag_entry; + char *str; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (list_empty(sock_tag_list)) { + str = "sock_tag_list=list_head{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "sock_tag_list=list_head{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(sock_tag_entry, sock_tag_list, list) { + str = pp_sock_tag(sock_tag_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree) +{ + char *str; + struct rb_node *node; + struct proc_qtu_data *proc_qtu_data_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(proc_qtu_data_tree)) { + str = "proc_qtu_data_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "proc_qtu_data_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(proc_qtu_data_tree); + node; + node = rb_next(node)) { + proc_qtu_data_entry = rb_entry(node, + struct proc_qtu_data, + node); + str = pp_proc_qtu_data(proc_qtu_data_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + indent_level++; + prdebug_sock_tag_list(indent_level, + &proc_qtu_data_entry->sock_tag_list); + indent_level--; + + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree) +{ + char *str; + struct rb_node *node; + struct tag_ref *tag_ref_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(tag_ref_tree)) { + str = "tag_ref_tree{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "tag_ref_tree{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_ref_tree); + node; + node = rb_next(node)) { + tag_ref_entry = rb_entry(node, + struct tag_ref, + tn.node); + str = pp_tag_ref(tag_ref_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree) +{ + char *str; + struct rb_node *node; + struct uid_tag_data *uid_tag_data_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(uid_tag_data_tree)) { + str = "uid_tag_data_tree=rb_root{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "uid_tag_data_tree=rb_root{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(uid_tag_data_tree); + node; + node = rb_next(node)) { + uid_tag_data_entry = rb_entry(node, struct uid_tag_data, + node); + str = pp_uid_tag_data(uid_tag_data_entry); + pr_debug("%*d: %s,\n", indent_level*2, indent_level, str); + kfree(str); + if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) { + indent_level++; + prdebug_tag_ref_tree(indent_level, + &uid_tag_data_entry->tag_ref_tree); + indent_level--; + } + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree) +{ + char *str; + struct rb_node *node; + struct tag_stat *ts_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (RB_EMPTY_ROOT(tag_stat_tree)) { + str = "tag_stat_tree{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "tag_stat_tree{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + for (node = rb_first(tag_stat_tree); + node; + node = rb_next(node)) { + ts_entry = rb_entry(node, struct tag_stat, tn.node); + str = pp_tag_stat(ts_entry); + pr_debug("%*d: %s\n", indent_level*2, indent_level, + str); + kfree(str); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list) +{ + char *str; + struct iface_stat *iface_entry; + + if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK)) + return; + + if (list_empty(iface_stat_list)) { + str = "iface_stat_list=list_head{}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + return; + } + + str = "iface_stat_list=list_head{"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + indent_level++; + list_for_each_entry(iface_entry, iface_stat_list, list) { + str = pp_iface_stat(iface_entry); + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); + kfree(str); + + spin_lock_bh(&iface_entry->tag_stat_list_lock); + if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) { + indent_level++; + prdebug_tag_stat_tree(indent_level, + &iface_entry->tag_stat_tree); + indent_level--; + } + spin_unlock_bh(&iface_entry->tag_stat_list_lock); + } + indent_level--; + str = "}"; + pr_debug("%*d: %s\n", indent_level*2, indent_level, str); +} + +#endif /* ifdef DDEBUG */ +/*------------------------------------------*/ +static const char * const netdev_event_strings[] = { + "netdev_unknown", + "NETDEV_UP", + "NETDEV_DOWN", + "NETDEV_REBOOT", + "NETDEV_CHANGE", + "NETDEV_REGISTER", + "NETDEV_UNREGISTER", + "NETDEV_CHANGEMTU", + "NETDEV_CHANGEADDR", + "NETDEV_GOING_DOWN", + "NETDEV_CHANGENAME", + "NETDEV_FEAT_CHANGE", + "NETDEV_BONDING_FAILOVER", + "NETDEV_PRE_UP", + "NETDEV_PRE_TYPE_CHANGE", + "NETDEV_POST_TYPE_CHANGE", + "NETDEV_POST_INIT", + "NETDEV_UNREGISTER_BATCH", + "NETDEV_RELEASE", + "NETDEV_NOTIFY_PEERS", + "NETDEV_JOIN", +}; + +const char *netdev_evt_str(int netdev_event) +{ + if (netdev_event < 0 + || netdev_event >= ARRAY_SIZE(netdev_event_strings)) + return "bad event num"; + return netdev_event_strings[netdev_event]; +} diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h new file mode 100644 index 0000000000000..b63871a0be5a7 --- /dev/null +++ b/net/netfilter/xt_qtaguid_print.h @@ -0,0 +1,120 @@ +/* + * Pretty printing Support for iptables xt_qtaguid module. + * + * (C) 2011 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __XT_QTAGUID_PRINT_H__ +#define __XT_QTAGUID_PRINT_H__ + +#include "xt_qtaguid_internal.h" + +#ifdef DDEBUG + +char *pp_tag_t(tag_t *tag); +char *pp_data_counters(struct data_counters *dc, bool showValues); +char *pp_tag_node(struct tag_node *tn); +char *pp_tag_ref(struct tag_ref *tr); +char *pp_tag_stat(struct tag_stat *ts); +char *pp_iface_stat(struct iface_stat *is); +char *pp_sock_tag(struct sock_tag *st); +char *pp_uid_tag_data(struct uid_tag_data *qtd); +char *pp_proc_qtu_data(struct proc_qtu_data *pqd); + +/*------------------------------------------*/ +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list); +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree); +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree); +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree); +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree); +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree); +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list); + +#else + +/*------------------------------------------*/ +static inline char *pp_tag_t(tag_t *tag) +{ + return NULL; +} +static inline char *pp_data_counters(struct data_counters *dc, bool showValues) +{ + return NULL; +} +static inline char *pp_tag_node(struct tag_node *tn) +{ + return NULL; +} +static inline char *pp_tag_ref(struct tag_ref *tr) +{ + return NULL; +} +static inline char *pp_tag_stat(struct tag_stat *ts) +{ + return NULL; +} +static inline char *pp_iface_stat(struct iface_stat *is) +{ + return NULL; +} +static inline char *pp_sock_tag(struct sock_tag *st) +{ + return NULL; +} +static inline char *pp_uid_tag_data(struct uid_tag_data *qtd) +{ + return NULL; +} +static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd) +{ + return NULL; +} + +/*------------------------------------------*/ +static inline +void prdebug_sock_tag_list(int indent_level, + struct list_head *sock_tag_list) +{ +} +static inline +void prdebug_sock_tag_tree(int indent_level, + struct rb_root *sock_tag_tree) +{ +} +static inline +void prdebug_proc_qtu_data_tree(int indent_level, + struct rb_root *proc_qtu_data_tree) +{ +} +static inline +void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree) +{ +} +static inline +void prdebug_uid_tag_data_tree(int indent_level, + struct rb_root *uid_tag_data_tree) +{ +} +static inline +void prdebug_tag_stat_tree(int indent_level, + struct rb_root *tag_stat_tree) +{ +} +static inline +void prdebug_iface_stat_list(int indent_level, + struct list_head *iface_stat_list) +{ +} +#endif +/*------------------------------------------*/ +const char *netdev_evt_str(int netdev_event); +#endif /* ifndef __XT_QTAGUID_PRINT_H__ */ diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c new file mode 100644 index 0000000000000..3c72bea2dd698 --- /dev/null +++ b/net/netfilter/xt_quota2.c @@ -0,0 +1,381 @@ +/* + * xt_quota2 - enhanced xt_quota that can count upwards and in packets + * as a minimal accounting match. + * by Jan Engelhardt , 2008 + * + * Originally based on xt_quota.c: + * netfilter module to enforce network quotas + * Sam Johnston + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License; either + * version 2 of the License, as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +#include +#endif + +/** + * @lock: lock to protect quota writers from each other + */ +struct xt_quota_counter { + u_int64_t quota; + spinlock_t lock; + struct list_head list; + atomic_t ref; + char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)]; + struct proc_dir_entry *procfs_entry; +}; + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +/* Harald's favorite number +1 :D From ipt_ULOG.C */ +static int qlog_nl_event = 112; +module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(event_num, + "Event number for NETLINK_NFLOG message. 0 disables log." + "111 is what ipt_ULOG uses."); +static struct sock *nflognl; +#endif + +static LIST_HEAD(counter_list); +static DEFINE_SPINLOCK(counter_list_lock); + +static struct proc_dir_entry *proc_xt_quota; +static unsigned int quota_list_perms = S_IRUGO | S_IWUSR; +static unsigned int quota_list_uid = 0; +static unsigned int quota_list_gid = 0; +module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR); +module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR); +module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR); + + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG +static void quota2_log(unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const char *prefix) +{ + ulog_packet_msg_t *pm; + struct sk_buff *log_skb; + size_t size; + struct nlmsghdr *nlh; + + if (!qlog_nl_event) + return; + + size = NLMSG_SPACE(sizeof(*pm)); + size = max(size, (size_t)NLMSG_GOODSIZE); + log_skb = alloc_skb(size, GFP_ATOMIC); + if (!log_skb) { + pr_err("xt_quota2: cannot alloc skb for logging\n"); + return; + } + + /* NLMSG_PUT() uses "goto nlmsg_failure" */ + nlh = NLMSG_PUT(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event, + sizeof(*pm)); + pm = NLMSG_DATA(nlh); + if (skb->tstamp.tv64 == 0) + __net_timestamp((struct sk_buff *)skb); + pm->data_len = 0; + pm->hook = hooknum; + if (prefix != NULL) + strlcpy(pm->prefix, prefix, sizeof(pm->prefix)); + else + *(pm->prefix) = '\0'; + if (in) + strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name)); + else + pm->indev_name[0] = '\0'; + + if (out) + strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); + else + pm->outdev_name[0] = '\0'; + + NETLINK_CB(log_skb).dst_group = 1; + pr_debug("throwing 1 packets to netlink group 1\n"); + netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC); + +nlmsg_failure: /* Used within NLMSG_PUT() */ + pr_debug("xt_quota2: error during NLMSG_PUT\n"); +} +#else +static void quota2_log(unsigned int hooknum, + const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const char *prefix) +{ +} +#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */ + +static int quota_proc_read(char *page, char **start, off_t offset, + int count, int *eof, void *data) +{ + struct xt_quota_counter *e = data; + int ret; + + spin_lock_bh(&e->lock); + ret = snprintf(page, PAGE_SIZE, "%llu\n", e->quota); + spin_unlock_bh(&e->lock); + return ret; +} + +static int quota_proc_write(struct file *file, const char __user *input, + unsigned long size, void *data) +{ + struct xt_quota_counter *e = data; + char buf[sizeof("18446744073709551616")]; + + if (size > sizeof(buf)) + size = sizeof(buf); + if (copy_from_user(buf, input, size) != 0) + return -EFAULT; + buf[sizeof(buf)-1] = '\0'; + + spin_lock_bh(&e->lock); + e->quota = simple_strtoull(buf, NULL, 0); + spin_unlock_bh(&e->lock); + return size; +} + +static struct xt_quota_counter * +q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon) +{ + struct xt_quota_counter *e; + unsigned int size; + + /* Do not need all the procfs things for anonymous counters. */ + size = anon ? offsetof(typeof(*e), list) : sizeof(*e); + e = kmalloc(size, GFP_KERNEL); + if (e == NULL) + return NULL; + + e->quota = q->quota; + spin_lock_init(&e->lock); + if (!anon) { + INIT_LIST_HEAD(&e->list); + atomic_set(&e->ref, 1); + strlcpy(e->name, q->name, sizeof(e->name)); + } + return e; +} + +/** + * q2_get_counter - get ref to counter or create new + * @name: name of counter + */ +static struct xt_quota_counter * +q2_get_counter(const struct xt_quota_mtinfo2 *q) +{ + struct proc_dir_entry *p; + struct xt_quota_counter *e = NULL; + struct xt_quota_counter *new_e; + + if (*q->name == '\0') + return q2_new_counter(q, true); + + /* No need to hold a lock while getting a new counter */ + new_e = q2_new_counter(q, false); + if (new_e == NULL) + goto out; + + spin_lock_bh(&counter_list_lock); + list_for_each_entry(e, &counter_list, list) + if (strcmp(e->name, q->name) == 0) { + atomic_inc(&e->ref); + spin_unlock_bh(&counter_list_lock); + kfree(new_e); + pr_debug("xt_quota2: old counter name=%s", e->name); + return e; + } + e = new_e; + pr_debug("xt_quota2: new_counter name=%s", e->name); + list_add_tail(&e->list, &counter_list); + /* The entry having a refcount of 1 is not directly destructible. + * This func has not yet returned the new entry, thus iptables + * has not references for destroying this entry. + * For another rule to try to destroy it, it would 1st need for this + * func* to be re-invoked, acquire a new ref for the same named quota. + * Nobody will access the e->procfs_entry either. + * So release the lock. */ + spin_unlock_bh(&counter_list_lock); + + /* create_proc_entry() is not spin_lock happy */ + p = e->procfs_entry = create_proc_entry(e->name, quota_list_perms, + proc_xt_quota); + + if (IS_ERR_OR_NULL(p)) { + spin_lock_bh(&counter_list_lock); + list_del(&e->list); + spin_unlock_bh(&counter_list_lock); + goto out; + } + p->data = e; + p->read_proc = quota_proc_read; + p->write_proc = quota_proc_write; + p->uid = quota_list_uid; + p->gid = quota_list_gid; + return e; + + out: + kfree(e); + return NULL; +} + +static int quota_mt2_check(const struct xt_mtchk_param *par) +{ + struct xt_quota_mtinfo2 *q = par->matchinfo; + + pr_debug("xt_quota2: check() flags=0x%04x", q->flags); + + if (q->flags & ~XT_QUOTA_MASK) + return -EINVAL; + + q->name[sizeof(q->name)-1] = '\0'; + if (*q->name == '.' || strchr(q->name, '/') != NULL) { + printk(KERN_ERR "xt_quota.3: illegal name\n"); + return -EINVAL; + } + + q->master = q2_get_counter(q); + if (q->master == NULL) { + printk(KERN_ERR "xt_quota.3: memory alloc failure\n"); + return -ENOMEM; + } + + return 0; +} + +static void quota_mt2_destroy(const struct xt_mtdtor_param *par) +{ + struct xt_quota_mtinfo2 *q = par->matchinfo; + struct xt_quota_counter *e = q->master; + + if (*q->name == '\0') { + kfree(e); + return; + } + + spin_lock_bh(&counter_list_lock); + if (!atomic_dec_and_test(&e->ref)) { + spin_unlock_bh(&counter_list_lock); + return; + } + + list_del(&e->list); + remove_proc_entry(e->name, proc_xt_quota); + spin_unlock_bh(&counter_list_lock); + kfree(e); +} + +static bool +quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) +{ + struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; + struct xt_quota_counter *e = q->master; + bool ret = q->flags & XT_QUOTA_INVERT; + + spin_lock_bh(&e->lock); + if (q->flags & XT_QUOTA_GROW) { + /* + * While no_change is pointless in "grow" mode, we will + * implement it here simply to have a consistent behavior. + */ + if (!(q->flags & XT_QUOTA_NO_CHANGE)) { + e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + } + ret = true; + } else { + if (e->quota >= skb->len) { + if (!(q->flags & XT_QUOTA_NO_CHANGE)) + e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + ret = !ret; + } else { + /* We are transitioning, log that fact. */ + if (e->quota) { + quota2_log(par->hooknum, + skb, + par->in, + par->out, + q->name); + } + /* we do not allow even small packets from now on */ + e->quota = 0; + } + } + spin_unlock_bh(&e->lock); + return ret; +} + +static struct xt_match quota_mt2_reg[] __read_mostly = { + { + .name = "quota2", + .revision = 3, + .family = NFPROTO_IPV4, + .checkentry = quota_mt2_check, + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), + .me = THIS_MODULE, + }, + { + .name = "quota2", + .revision = 3, + .family = NFPROTO_IPV6, + .checkentry = quota_mt2_check, + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), + .me = THIS_MODULE, + }, +}; + +static int __init quota_mt2_init(void) +{ + int ret; + pr_debug("xt_quota2: init()"); + +#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG + nflognl = netlink_kernel_create(&init_net, + NETLINK_NFLOG, 1, NULL, + NULL, THIS_MODULE); + if (!nflognl) + return -ENOMEM; +#endif + + proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net); + if (proc_xt_quota == NULL) + return -EACCES; + + ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg)); + if (ret < 0) + remove_proc_entry("xt_quota", init_net.proc_net); + pr_debug("xt_quota2: init() %d", ret); + return ret; +} + +static void __exit quota_mt2_exit(void) +{ + xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg)); + remove_proc_entry("xt_quota", init_net.proc_net); +} + +module_init(quota_mt2_init); +module_exit(quota_mt2_exit); +MODULE_DESCRIPTION("Xtables: countdown quota match; up counter"); +MODULE_AUTHOR("Sam Johnston "); +MODULE_AUTHOR("Jan Engelhardt "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_quota2"); +MODULE_ALIAS("ip6t_quota2"); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 9cc46356b5773..ddf5e0507f5f5 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -35,7 +35,7 @@ #include #endif -static void +void xt_socket_put_sk(struct sock *sk) { if (sk->sk_state == TCP_TIME_WAIT) @@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk) else sock_put(sk); } +EXPORT_SYMBOL(xt_socket_put_sk); static int extract_icmp4_fields(const struct sk_buff *skb, @@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb, return 0; } -static bool -socket_match(const struct sk_buff *skb, struct xt_action_param *par, - const struct xt_socket_mtinfo1 *info) +struct sock* +xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp = NULL; @@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) - return false; + return NULL; protocol = iph->protocol; saddr = iph->saddr; @@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, } else if (iph->protocol == IPPROTO_ICMP) { if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr, &sport, &dport)) - return false; + return NULL; } else { - return false; + return NULL; } #ifdef XT_SOCKET_HAVE_CONNTRACK @@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, ct = nf_ct_get(skb, &ctinfo); if (ct && !nf_ct_is_untracked(ct) && ((iph->protocol != IPPROTO_ICMP && - ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || + ctinfo == IP_CT_ESTABLISHED_REPLY) || (iph->protocol == IPPROTO_ICMP && - ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) && + ctinfo == IP_CT_RELATED_REPLY)) && (ct->status & IPS_SRC_NAT_DONE)) { daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; @@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); + + pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n", + protocol, &saddr, ntohs(sport), + &daddr, ntohs(dport), + &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); + + return sk; +} +EXPORT_SYMBOL(xt_socket_get4_sk); + +static bool +socket_match(const struct sk_buff *skb, struct xt_action_param *par, + const struct xt_socket_mtinfo1 *info) +{ + struct sock *sk; + + sk = xt_socket_get4_sk(skb, par); if (sk != NULL) { bool wildcard; bool transparent = true; @@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, sk = NULL; } - pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n", - protocol, &saddr, ntohs(sport), - &daddr, ntohs(dport), - &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); - return (sk != NULL); } @@ -253,8 +265,8 @@ extract_icmp6_fields(const struct sk_buff *skb, return 0; } -static bool -socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) +struct sock* +xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par) { struct ipv6hdr *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; @@ -262,7 +274,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) struct in6_addr *daddr, *saddr; __be16 dport, sport; int thoff, tproto; - const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); if (tproto < 0) { @@ -274,7 +285,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) - return false; + return NULL; saddr = &iph->saddr; sport = hp->source; @@ -284,13 +295,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) } else if (tproto == IPPROTO_ICMPV6) { if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, &sport, &dport)) - return false; + return NULL; } else { - return false; + return NULL; } sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY); + pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " + "(orig %pI6:%hu) sock %p\n", + tproto, saddr, ntohs(sport), + daddr, ntohs(dport), + &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); + return sk; +} +EXPORT_SYMBOL(xt_socket_get6_sk); + +static bool +socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) +{ + struct sock *sk; + const struct xt_socket_mtinfo1 *info; + + info = (struct xt_socket_mtinfo1 *) par->matchinfo; + sk = xt_socket_get6_sk(skb, par); if (sk != NULL) { bool wildcard; bool transparent = true; @@ -313,12 +341,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) sk = NULL; } - pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " - "(orig %pI6:%hu) sock %p\n", - tproto, saddr, ntohs(sport), - daddr, ntohs(dport), - &iph->daddr, hp ? ntohs(hp->dest) : 0, sk); - return (sk != NULL); } #endif diff --git a/net/rfkill/core.c b/net/rfkill/core.c index dafeaeb97bc32..df2dae6b27236 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1028,7 +1028,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file) * start getting events from elsewhere but hold mtx to get * startup events added first */ - list_add(&data->list, &rfkill_fds); list_for_each_entry(rfkill, &rfkill_list, node) { ev = kzalloc(sizeof(*ev), GFP_KERNEL); @@ -1037,6 +1036,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file) rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); list_add_tail(&ev->list, &data->events); } + list_add(&data->list, &rfkill_fds); mutex_unlock(&data->mtx); mutex_unlock(&rfkill_global_mutex); diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 1734abba26a29..174d51c9ce377 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c @@ -290,10 +290,15 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * facilities->source_ndigis = 0; facilities->dest_ndigis = 0; for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { - if (pt[6] & AX25_HBIT) + if (pt[6] & AX25_HBIT) { + if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) + return -1; memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); - else + } else { + if (facilities->source_ndigis >= ROSE_MAX_DIGIS) + return -1; memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); + } } } p += l + 2; @@ -333,6 +338,11 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac case 0xC0: l = p[1]; + + /* Prevent overflows*/ + if (l < 10 || l > 20) + return -1; + if (*p == FAC_CCITT_DEST_NSAP) { memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); @@ -373,12 +383,16 @@ int rose_parse_facilities(unsigned char *p, switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); + if (len < 0) + return 0; facilities_len -= len + 1; p += len + 1; break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); + if (len < 0) + return 0; facilities_len -= len + 1; p += len + 1; break; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index edea8cefec6c9..e852bb1f64296 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; - sfq_index x; + sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); @@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; + qlen = slot->qlen; sfq_drop(sch); - return NET_XMIT_CN; -} - -static struct sk_buff * -sfq_peek(struct Qdisc *sch) -{ - struct sfq_sched_data *q = qdisc_priv(sch); - - /* No active slots */ - if (q->tail == NULL) - return NULL; - - return q->slots[q->tail->next].skblist_next; + /* Return Congestion Notification only if we dropped a packet + * from this flow. + */ + return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; } static struct sk_buff * @@ -679,7 +671,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { .priv_size = sizeof(struct sfq_sched_data), .enqueue = sfq_enqueue, .dequeue = sfq_dequeue, - .peek = sfq_peek, + .peek = qdisc_peek_dequeued, .drop = sfq_drop, .init = sfq_init, .reset = sfq_reset, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5f1fb8bd862de..490f003da84d8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -64,6 +64,7 @@ /* Forward declarations for internal functions. */ static void sctp_assoc_bh_rcv(struct work_struct *work); static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* Keep track of the new idr low so that we don't re-use association id * numbers too fast. It is protected by they idr spin lock is in the @@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); + /* Free the ASCONF queue. */ + sctp_assoc_free_asconf_queue(asoc); + /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); @@ -1576,6 +1580,18 @@ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) return error; } +/* Free the ASCONF queue */ +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) +{ + struct sctp_chunk *asconf; + struct sctp_chunk *tmp; + + list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { + list_del_init(&asconf->list); + sctp_chunk_free(asconf); + } +} + /* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index faf71d179e464..6150ac5cf5ddf 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -140,14 +140,12 @@ void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port) /* Dispose of the address list. */ static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) { - struct sctp_sockaddr_entry *addr; - struct list_head *pos, *temp; + struct sctp_sockaddr_entry *addr, *temp; /* Empty the bind address list. */ - list_for_each_safe(pos, temp, &bp->address_list) { - addr = list_entry(pos, struct sctp_sockaddr_entry, list); - list_del(pos); - kfree(addr); + list_for_each_entry_safe(addr, temp, &bp->address_list, list) { + list_del_rcu(&addr->list); + call_rcu(&addr->rcu, sctp_local_addr_free); SCTP_DBG_OBJCNT_DEC(addr); } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e58f9476f29c5..dec012d32ff55 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1204,7 +1204,7 @@ SCTP_STATIC __init int sctp_init(void) if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) - __get_free_pages(GFP_ATOMIC, order); + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); @@ -1237,7 +1237,7 @@ SCTP_STATIC __init int sctp_init(void) if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) - __get_free_pages(GFP_ATOMIC, order); + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); diff --git a/net/socket.c b/net/socket.c index 06d835d3dab9e..bf86fb4820ff6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2120,14 +2120,16 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, */ if (MSG_CMSG_COMPAT & flags) { err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, - &msg_sys, flags, datagrams); + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = __sys_recvmsg(sock, (struct msghdr __user *)entry, - &msg_sys, flags, datagrams); + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); @@ -2586,23 +2588,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { + struct compat_ethtool_rxnfc __user *compat_rxnfc; + bool convert_in = false, convert_out = false; + size_t buf_size = ALIGN(sizeof(struct ifreq), 8); + struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; + u32 rule_cnt = 0, actual_rule_cnt; + u32 ethcmd; u32 data; - void __user *datap; + int ret; + + if (get_user(data, &ifr32->ifr_ifru.ifru_data)) + return -EFAULT; - ifr = compat_alloc_user_space(sizeof(*ifr)); + compat_rxnfc = compat_ptr(data); - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) + if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; - if (get_user(data, &ifr32->ifr_ifru.ifru_data)) + /* Most ethtool structures are defined without padding. + * Unfortunately struct ethtool_rxnfc is an exception. + */ + switch (ethcmd) { + default: + break; + case ETHTOOL_GRXCLSRLALL: + /* Buffer size is variable */ + if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) + return -EFAULT; + if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) + return -ENOMEM; + buf_size += rule_cnt * sizeof(u32); + /* fall through */ + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + convert_out = true; + /* fall through */ + case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: + buf_size += sizeof(struct ethtool_rxnfc); + convert_in = true; + break; + } + + ifr = compat_alloc_user_space(buf_size); + rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); + + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; - datap = compat_ptr(data); - if (put_user(datap, &ifr->ifr_ifru.ifru_data)) + if (put_user(convert_in ? rxnfc : compat_ptr(data), + &ifr->ifr_ifru.ifru_data)) return -EFAULT; - return dev_ioctl(net, SIOCETHTOOL, ifr); + if (convert_in) { + /* We expect there to be holes between fs.m_u and + * fs.ring_cookie and at the end of fs, but nowhere else. + */ + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + + sizeof(compat_rxnfc->fs.m_u) != + offsetof(struct ethtool_rxnfc, fs.m_u) + + sizeof(rxnfc->fs.m_u)); + BUILD_BUG_ON( + offsetof(struct compat_ethtool_rxnfc, fs.location) - + offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != + offsetof(struct ethtool_rxnfc, fs.location) - + offsetof(struct ethtool_rxnfc, fs.ring_cookie)); + + if (copy_in_user(rxnfc, compat_rxnfc, + (void *)(&rxnfc->fs.m_u + 1) - + (void *)rxnfc) || + copy_in_user(&rxnfc->fs.ring_cookie, + &compat_rxnfc->fs.ring_cookie, + (void *)(&rxnfc->fs.location + 1) - + (void *)&rxnfc->fs.ring_cookie) || + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; + } + + ret = dev_ioctl(net, SIOCETHTOOL, ifr); + if (ret) + return ret; + + if (convert_out) { + if (copy_in_user(compat_rxnfc, rxnfc, + (const void *)(&rxnfc->fs.m_u + 1) - + (const void *)rxnfc) || + copy_in_user(&compat_rxnfc->fs.ring_cookie, + &rxnfc->fs.ring_cookie, + (const void *)(&rxnfc->fs.location + 1) - + (const void *)&rxnfc->fs.ring_cookie) || + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; + + if (ethcmd == ETHTOOL_GRXCLSRLALL) { + /* As an optimisation, we only copy the actual + * number of rules that the underlying + * function returned. Since Mallory might + * change the rule count in user memory, we + * check that it is less than the rule count + * originally given (as the user buffer size), + * which has been range-checked. + */ + if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) + return -EFAULT; + if (actual_rule_cnt < rule_cnt) + rule_cnt = actual_rule_cnt; + if (copy_in_user(&compat_rxnfc->rule_locs[0], + &rxnfc->rule_locs[0], + rule_cnt * sizeof(u32))) + return -EFAULT; + } + } + + return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 67e31276682ab..cd6e4aa19dbfc 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) * Run memory cache shrinker. */ static int -rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) +rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc) { LIST_HEAD(free); int res; + int nr_to_scan = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) return (nr_to_scan == 0) ? 0 : -1; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index f375decc024b4..778e5dfc51449 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -427,7 +427,7 @@ static int context_derive_keys_rc4(struct krb5_ctx *ctx) { struct crypto_hash *hmac; - static const char sigkeyconstant[] = "signaturekey"; + char sigkeyconstant[] = "signaturekey"; int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ struct hash_desc desc; struct scatterlist sg[1]; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 57d344cf2256b..35d046b588f3f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) if (!(rovr->tk_flags & RPC_TASK_KILLED)) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); - rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); + if (RPC_IS_QUEUED(rovr)) + rpc_wake_up_queued_task(rovr->tk_waitqueue, + rovr); } } spin_unlock(&clnt->cl_lock); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 59e599498e37f..17c3e3aade292 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -637,14 +637,12 @@ static void __rpc_execute(struct rpc_task *task) save_callback = task->tk_callback; task->tk_callback = NULL; save_callback(task); - } - - /* - * Perform the next FSM step. - * tk_action may be NULL when the task has been killed - * by someone else. - */ - if (!RPC_IS_QUEUED(task)) { + } else { + /* + * Perform the next FSM step. + * tk_action may be NULL when the task has been killed + * by someone else. + */ if (task->tk_action == NULL) break; task->tk_action(task); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index be96d429b475f..3e0b5f146b059 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + transport->srcport = 0; + write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; @@ -1342,7 +1344,6 @@ static void xs_tcp_state_change(struct sock *sk) case TCP_CLOSE_WAIT: /* The server initiated a shutdown of the socket */ xprt_force_disconnect(xprt); - case TCP_SYN_SENT: xprt->connect_cookie++; case TCP_CLOSING: /* @@ -1756,6 +1757,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport) static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + int ret = -ENOTCONN; if (!transport->inet) { struct sock *sk = sock->sk; @@ -1787,12 +1789,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) } if (!xprt_bound(xprt)) - return -ENOTCONN; + goto out; /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; - return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); + ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); + switch (ret) { + case 0: + case -EINPROGRESS: + /* SYN_SENT! */ + xprt->connect_cookie++; + if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; + } +out: + return ret; } /** diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 437a99e560e1b..7a79ad0ecf4a7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -524,6 +524,8 @@ static int unix_dgram_connect(struct socket *, struct sockaddr *, int, int); static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); +static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *, + struct msghdr *, size_t, int); static const struct proto_ops unix_stream_ops = { .family = PF_UNIX, @@ -583,7 +585,7 @@ static const struct proto_ops unix_seqpacket_ops = { .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = unix_seqpacket_sendmsg, - .recvmsg = unix_dgram_recvmsg, + .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; @@ -1695,6 +1697,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock, return unix_dgram_sendmsg(kiocb, sock, msg, len); } +static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t size, + int flags) +{ + struct sock *sk = sock->sk; + + if (sk->sk_state != TCP_ESTABLISHED) + return -ENOTCONN; + + return unix_dgram_recvmsg(iocb, sock, msg, size, flags); +} + static void unix_copy_addr(struct msghdr *msg, struct sock *sk) { struct unix_sock *u = unix_sk(sk); diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 1f1ef70f34f2d..8e2a668c9230e 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -159,3 +159,14 @@ config LIB80211_DEBUG from lib80211. If unsure, say N. + +config CFG80211_ALLOW_RECONNECT + bool "Allow reconnect while already connected" + depends on CFG80211 + default n + help + cfg80211 stack doesn't allow to connect if you are already + connected. This option allows to make a connection in this case. + + Select this option ONLY for wlan drivers that are specifically + built for such purposes. diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index aa5df8865ff75..16881fea4ce6e 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -770,6 +770,15 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, } EXPORT_SYMBOL(cfg80211_new_sta); +void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp) +{ + struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + + nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp); +} +EXPORT_SYMBOL(cfg80211_del_sta); + struct cfg80211_mgmt_registration { struct list_head list; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9b62710891a2b..bbf42de6be527 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1679,14 +1679,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) if (err) goto out; - if (!(rdev->wiphy.flags & - WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) { - if (!key.def_uni || !key.def_multi) { - err = -EOPNOTSUPP; - goto out; - } - } - err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, key.def_uni, key.def_multi); @@ -1968,13 +1960,41 @@ static int parse_station_flags(struct genl_info *info, return 0; } +static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, + int attr) +{ + struct nlattr *rate; + u16 bitrate; + + rate = nla_nest_start(msg, attr); + if (!rate) + goto nla_put_failure; + + /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ + bitrate = cfg80211_calculate_bitrate(info); + if (bitrate > 0) + NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); + + if (info->flags & RATE_INFO_FLAGS_MCS) + NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); + if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) + NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); + if (info->flags & RATE_INFO_FLAGS_SHORT_GI) + NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); + + nla_nest_end(msg, rate); + return true; + +nla_put_failure: + return false; +} + static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { void *hdr; - struct nlattr *sinfoattr, *txrate; - u16 bitrate; + struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); if (!hdr) @@ -1988,6 +2008,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; + if (sinfo->filled & STATION_INFO_CONNECTED_TIME) + NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, + sinfo->connected_time); if (sinfo->filled & STATION_INFO_INACTIVE_TIME) NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, sinfo->inactive_time); @@ -2013,24 +2036,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, sinfo->signal_avg); if (sinfo->filled & STATION_INFO_TX_BITRATE) { - txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE); - if (!txrate) + if (!nl80211_put_sta_rate(msg, &sinfo->txrate, + NL80211_STA_INFO_TX_BITRATE)) + goto nla_put_failure; + } + if (sinfo->filled & STATION_INFO_RX_BITRATE) { + if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, + NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; - - /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ - bitrate = cfg80211_calculate_bitrate(&sinfo->txrate); - if (bitrate > 0) - NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); - - if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS) - NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, - sinfo->txrate.mcs); - if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); - if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI) - NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); - - nla_nest_end(msg, txrate); } if (sinfo->filled & STATION_INFO_RX_PACKETS) NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, @@ -2044,8 +2057,31 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (sinfo->filled & STATION_INFO_TX_FAILED) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, sinfo->tx_failed); + if (sinfo->filled & STATION_INFO_BSS_PARAM) { + bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); + if (!bss_param) + goto nla_put_failure; + + if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) + NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); + if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) + NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); + if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) + NLA_PUT_FLAG(msg, + NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); + NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, + sinfo->bss_param.dtim_period); + NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, + sinfo->bss_param.beacon_interval); + + nla_nest_end(msg, bss_param); + } nla_nest_end(msg, sinfoattr); + if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) + NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, + sinfo->assoc_req_ies); + return genlmsg_end(msg, hdr); nla_put_failure: @@ -5946,6 +5982,40 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, nl80211_mlme_mcgrp.id, gfp); } +void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *mac_addr, + gfp_t gfp) +{ + struct sk_buff *msg; + void *hdr; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); + + if (genlmsg_end(msg, hdr) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 nlpid, int freq, const u8 *buf, size_t len, gfp_t gfp) diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index e3f7fa886966e..dcac5cd6f0178 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -79,6 +79,9 @@ void nl80211_send_remain_on_channel_cancel( void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp); +void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, + struct net_device *dev, const u8 *mac_addr, + gfp_t gfp); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 nlpid, int freq, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 37693b6ef23a6..6560d13902da4 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1747,6 +1747,7 @@ static void restore_alpha2(char *alpha2, bool reset_user) static void restore_regulatory_settings(bool reset_user) { char alpha2[2]; + char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; mutex_lock(&cfg80211_mutex); @@ -1776,11 +1777,13 @@ static void restore_regulatory_settings(bool reset_user) /* First restore to the basic regulatory settings */ cfg80211_regdomain = cfg80211_world_regdom; + world_alpha2[0] = cfg80211_regdomain->alpha2[0]; + world_alpha2[1] = cfg80211_regdomain->alpha2[1]; mutex_unlock(®_mutex); mutex_unlock(&cfg80211_mutex); - regulatory_hint_core(cfg80211_regdomain->alpha2); + regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter diff --git a/net/wireless/sme.c b/net/wireless/sme.c index e17b0bee6bdc7..11b45e7203737 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -650,8 +650,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; +#ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_CONNECTED) return; +#endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); @@ -749,8 +751,10 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, ASSERT_WDEV_LOCK(wdev); +#ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_IDLE) return -EALREADY; +#endif if (WARN_ON(wdev->connect_keys)) { kfree(wdev->connect_keys); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6459588befc33..8da2741d7997b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1778,6 +1778,8 @@ int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, goto no_transform; } + dst_hold(&xdst->u.dst); + spin_lock_bh(&xfrm_policy_sk_bundle_lock); xdst->u.dst.next = xfrm_policy_sk_bundles; xfrm_policy_sk_bundles = &xdst->u.dst; diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile new file mode 100644 index 0000000000000..c95a696560a7d --- /dev/null +++ b/samples/uhid/Makefile @@ -0,0 +1,10 @@ +# kbuild trick to avoid linker error. Can be omitted if a module is built. +obj- := dummy.o + +# List of programs to build +hostprogs-y := uhid-example + +# Tell kbuild to always build the programs +always := $(hostprogs-y) + +HOSTCFLAGS_uhid-example.o += -I$(objtree)/usr/include diff --git a/samples/uhid/uhid-example.c b/samples/uhid/uhid-example.c new file mode 100644 index 0000000000000..03ce3c059a5e7 --- /dev/null +++ b/samples/uhid/uhid-example.c @@ -0,0 +1,381 @@ +/* + * UHID Example + * + * Copyright (c) 2012 David Herrmann + * + * The code may be used by anyone for any purpose, + * and can serve as a starting point for developing + * applications using uhid. + */ + +/* UHID Example + * This example emulates a basic 3 buttons mouse with wheel over UHID. Run this + * program as root and then use the following keys to control the mouse: + * q: Quit the application + * 1: Toggle left button (down, up, ...) + * 2: Toggle right button + * 3: Toggle middle button + * a: Move mouse left + * d: Move mouse right + * w: Move mouse up + * s: Move mouse down + * r: Move wheel up + * f: Move wheel down + * + * If uhid is not available as /dev/uhid, then you can pass a different path as + * first argument. + * If is not installed in /usr, then compile this with: + * gcc -o ./uhid_test -Wall -I./include ./samples/uhid/uhid-example.c + * And ignore the warning about kernel headers. However, it is recommended to + * use the installed uhid.h if available. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* HID Report Desciptor + * We emulate a basic 3 button mouse with wheel. This is the report-descriptor + * as the kernel will parse it: + * + * INPUT[INPUT] + * Field(0) + * Physical(GenericDesktop.Pointer) + * Application(GenericDesktop.Mouse) + * Usage(3) + * Button.0001 + * Button.0002 + * Button.0003 + * Logical Minimum(0) + * Logical Maximum(1) + * Report Size(1) + * Report Count(3) + * Report Offset(0) + * Flags( Variable Absolute ) + * Field(1) + * Physical(GenericDesktop.Pointer) + * Application(GenericDesktop.Mouse) + * Usage(3) + * GenericDesktop.X + * GenericDesktop.Y + * GenericDesktop.Wheel + * Logical Minimum(-128) + * Logical Maximum(127) + * Report Size(8) + * Report Count(3) + * Report Offset(8) + * Flags( Variable Relative ) + * + * This is the mapping that we expect: + * Button.0001 ---> Key.LeftBtn + * Button.0002 ---> Key.RightBtn + * Button.0003 ---> Key.MiddleBtn + * GenericDesktop.X ---> Relative.X + * GenericDesktop.Y ---> Relative.Y + * GenericDesktop.Wheel ---> Relative.Wheel + * + * This information can be verified by reading /sys/kernel/debug/hid//rdesc + * This file should print the same information as showed above. + */ + +static unsigned char rdesc[] = { + 0x05, 0x01, 0x09, 0x02, 0xa1, 0x01, 0x09, 0x01, + 0xa1, 0x00, 0x05, 0x09, 0x19, 0x01, 0x29, 0x03, + 0x15, 0x00, 0x25, 0x01, 0x95, 0x03, 0x75, 0x01, + 0x81, 0x02, 0x95, 0x01, 0x75, 0x05, 0x81, 0x01, + 0x05, 0x01, 0x09, 0x30, 0x09, 0x31, 0x09, 0x38, + 0x15, 0x80, 0x25, 0x7f, 0x75, 0x08, 0x95, 0x03, + 0x81, 0x06, 0xc0, 0xc0, +}; + +static int uhid_write(int fd, const struct uhid_event *ev) +{ + ssize_t ret; + + ret = write(fd, ev, sizeof(*ev)); + if (ret < 0) { + fprintf(stderr, "Cannot write to uhid: %m\n"); + return -errno; + } else if (ret != sizeof(*ev)) { + fprintf(stderr, "Wrong size written to uhid: %ld != %lu\n", + ret, sizeof(ev)); + return -EFAULT; + } else { + return 0; + } +} + +static int create(int fd) +{ + struct uhid_event ev; + + memset(&ev, 0, sizeof(ev)); + ev.type = UHID_CREATE; + strcpy((char*)ev.u.create.name, "test-uhid-device"); + ev.u.create.rd_data = rdesc; + ev.u.create.rd_size = sizeof(rdesc); + ev.u.create.bus = BUS_USB; + ev.u.create.vendor = 0x15d9; + ev.u.create.product = 0x0a37; + ev.u.create.version = 0; + ev.u.create.country = 0; + + return uhid_write(fd, &ev); +} + +static void destroy(int fd) +{ + struct uhid_event ev; + + memset(&ev, 0, sizeof(ev)); + ev.type = UHID_DESTROY; + + uhid_write(fd, &ev); +} + +static int event(int fd) +{ + struct uhid_event ev; + ssize_t ret; + + memset(&ev, 0, sizeof(ev)); + ret = read(fd, &ev, sizeof(ev)); + if (ret == 0) { + fprintf(stderr, "Read HUP on uhid-cdev\n"); + return -EFAULT; + } else if (ret < 0) { + fprintf(stderr, "Cannot read uhid-cdev: %m\n"); + return -errno; + } else if (ret != sizeof(ev)) { + fprintf(stderr, "Invalid size read from uhid-dev: %ld != %lu\n", + ret, sizeof(ev)); + return -EFAULT; + } + + switch (ev.type) { + case UHID_START: + fprintf(stderr, "UHID_START from uhid-dev\n"); + break; + case UHID_STOP: + fprintf(stderr, "UHID_STOP from uhid-dev\n"); + break; + case UHID_OPEN: + fprintf(stderr, "UHID_OPEN from uhid-dev\n"); + break; + case UHID_CLOSE: + fprintf(stderr, "UHID_CLOSE from uhid-dev\n"); + break; + case UHID_OUTPUT: + fprintf(stderr, "UHID_OUTPUT from uhid-dev\n"); + break; + case UHID_OUTPUT_EV: + fprintf(stderr, "UHID_OUTPUT_EV from uhid-dev\n"); + break; + default: + fprintf(stderr, "Invalid event from uhid-dev: %u\n", ev.type); + } + + return 0; +} + +static bool btn1_down; +static bool btn2_down; +static bool btn3_down; +static signed char abs_hor; +static signed char abs_ver; +static signed char wheel; + +static int send_event(int fd) +{ + struct uhid_event ev; + + memset(&ev, 0, sizeof(ev)); + ev.type = UHID_INPUT; + ev.u.input.size = 4; + + if (btn1_down) + ev.u.input.data[0] |= 0x1; + if (btn2_down) + ev.u.input.data[0] |= 0x2; + if (btn3_down) + ev.u.input.data[0] |= 0x4; + + ev.u.input.data[1] = abs_hor; + ev.u.input.data[2] = abs_ver; + ev.u.input.data[3] = wheel; + + return uhid_write(fd, &ev); +} + +static int keyboard(int fd) +{ + char buf[128]; + ssize_t ret, i; + + ret = read(STDIN_FILENO, buf, sizeof(buf)); + if (ret == 0) { + fprintf(stderr, "Read HUP on stdin\n"); + return -EFAULT; + } else if (ret < 0) { + fprintf(stderr, "Cannot read stdin: %m\n"); + return -errno; + } + + for (i = 0; i < ret; ++i) { + switch (buf[i]) { + case '1': + btn1_down = !btn1_down; + ret = send_event(fd); + if (ret) + return ret; + break; + case '2': + btn2_down = !btn2_down; + ret = send_event(fd); + if (ret) + return ret; + break; + case '3': + btn3_down = !btn3_down; + ret = send_event(fd); + if (ret) + return ret; + break; + case 'a': + abs_hor = -20; + ret = send_event(fd); + abs_hor = 0; + if (ret) + return ret; + break; + case 'd': + abs_hor = 20; + ret = send_event(fd); + abs_hor = 0; + if (ret) + return ret; + break; + case 'w': + abs_ver = -20; + ret = send_event(fd); + abs_ver = 0; + if (ret) + return ret; + break; + case 's': + abs_ver = 20; + ret = send_event(fd); + abs_ver = 0; + if (ret) + return ret; + break; + case 'r': + wheel = 1; + ret = send_event(fd); + wheel = 0; + if (ret) + return ret; + break; + case 'f': + wheel = -1; + ret = send_event(fd); + wheel = 0; + if (ret) + return ret; + break; + case 'q': + return -ECANCELED; + default: + fprintf(stderr, "Invalid input: %c\n", buf[i]); + } + } + + return 0; +} + +int main(int argc, char **argv) +{ + int fd; + const char *path = "/dev/uhid"; + struct pollfd pfds[2]; + int ret; + struct termios state; + + ret = tcgetattr(STDIN_FILENO, &state); + if (ret) { + fprintf(stderr, "Cannot get tty state\n"); + } else { + state.c_lflag &= ~ICANON; + state.c_cc[VMIN] = 1; + ret = tcsetattr(STDIN_FILENO, TCSANOW, &state); + if (ret) + fprintf(stderr, "Cannot set tty state\n"); + } + + if (argc >= 2) { + if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { + fprintf(stderr, "Usage: %s [%s]\n", argv[0], path); + return EXIT_SUCCESS; + } else { + path = argv[1]; + } + } + + fprintf(stderr, "Open uhid-cdev %s\n", path); + fd = open(path, O_RDWR | O_CLOEXEC); + if (fd < 0) { + fprintf(stderr, "Cannot open uhid-cdev %s: %m\n", path); + return EXIT_FAILURE; + } + + fprintf(stderr, "Create uhid device\n"); + ret = create(fd); + if (ret) { + close(fd); + return EXIT_FAILURE; + } + + pfds[0].fd = STDIN_FILENO; + pfds[0].events = POLLIN; + pfds[1].fd = fd; + pfds[1].events = POLLIN; + + fprintf(stderr, "Press 'q' to quit...\n"); + while (1) { + ret = poll(pfds, 2, -1); + if (ret < 0) { + fprintf(stderr, "Cannot poll for fds: %m\n"); + break; + } + if (pfds[0].revents & POLLHUP) { + fprintf(stderr, "Received HUP on stdin\n"); + break; + } + if (pfds[1].revents & POLLHUP) { + fprintf(stderr, "Received HUP on uhid-cdev\n"); + break; + } + + if (pfds[0].revents & POLLIN) { + ret = keyboard(fd); + if (ret) + break; + } + if (pfds[1].revents & POLLIN) { + ret = event(fd); + if (ret) + break; + } + } + + fprintf(stderr, "Destroy uhid device\n"); + destroy(fd); + return EXIT_SUCCESS; +} diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ed2773edfe71b..ba25c440f9f6a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -118,6 +118,11 @@ cc-option-yn = $(call try-run,\ cc-option-align = $(subst -functions=0,,\ $(call cc-option,-falign-functions=0,-malign-functions=0)) +# cc-disable-warning +# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) +cc-disable-warning = $(call try-run,\ + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1))) + # cc-version # Usage gcc-ver := $(call cc-version) cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 4eb99ab340537..d4ba0734822c9 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -49,6 +49,56 @@ ifeq ($(KBUILD_NOPEDANTIC),) $(error CFLAGS was changed in "$(kbuild-file)". Fix it to use EXTRA_CFLAGS) endif endif + +# +# make W=... settings +# +# W=1 - warnings that may be relevant and does not occur too often +# W=2 - warnings that occur quite often but may still be relevant +# W=3 - the more obscure warnings, can most likely be ignored +# +# $(call cc-option, -W...) handles gcc -W.. options which +# are not supported by all versions of the compiler +ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS +warning- := $(empty) + +warning-1 := -Wextra -Wunused -Wno-unused-parameter +warning-1 += -Wmissing-declarations +warning-1 += -Wmissing-format-attribute +warning-1 += -Wmissing-prototypes +warning-1 += -Wold-style-definition +warning-1 += $(call cc-option, -Wmissing-include-dirs) +warning-1 += $(call cc-option, -Wunused-but-set-variable) + +warning-2 := -Waggregate-return +warning-2 += -Wcast-align +warning-2 += -Wdisabled-optimization +warning-2 += -Wnested-externs +warning-2 += -Wshadow +warning-2 += $(call cc-option, -Wlogical-op) + +warning-3 := -Wbad-function-cast +warning-3 += -Wcast-qual +warning-3 += -Wconversion +warning-3 += -Wpacked +warning-3 += -Wpadded +warning-3 += -Wpointer-arith +warning-3 += -Wredundant-decls +warning-3 += -Wswitch-default +warning-3 += $(call cc-option, -Wpacked-bitfield-compat) +warning-3 += $(call cc-option, -Wvla) + +warning := $(warning-$(findstring 1, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) +warning += $(warning-$(findstring 2, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) +warning += $(warning-$(findstring 3, $(KBUILD_ENABLE_EXTRA_GCC_CHECKS))) + +ifeq ("$(strip $(warning))","") + $(error W=$(KBUILD_ENABLE_EXTRA_GCC_CHECKS) is unknown) +endif + +KBUILD_CFLAGS += $(warning) +endif + include scripts/Makefile.lib ifdef host-progs @@ -403,7 +453,6 @@ ifneq ($(cmd_files),) include $(cmd_files) endif - # Declare the contents of the .PHONY variable as phony. We keep that # information in a variable se we can use it in if_changed and friends. diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 4c0383da1c9a2..58434b346b0ff 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2809,9 +2809,9 @@ sub process { WARN("consider using a completion\n" . $herecurr); } -# recommend strict_strto* over simple_strto* +# recommend kstrto* over simple_strto* if ($line =~ /\bsimple_(strto.*?)\s*\(/) { - WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr); + WARN("consider using kstrto* in preference to simple_$1\n" . $herecurr); } # check for __initcall(), use device_initcall() explicitly please if ($line =~ /^.\s*__initcall\s*\(/) { diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 659326c3e8957..006ad817cd5f0 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -332,7 +332,7 @@ static int conf_choice(struct menu *menu) } if (!child) continue; - if (line[strlen(line) - 1] == '?') { + if (line[0] && line[strlen(line) - 1] == '?') { print_help(child); continue; } diff --git a/scripts/module-common.lds b/scripts/module-common.lds index 47a1f9ae0edeb..0865b3e752be8 100644 --- a/scripts/module-common.lds +++ b/scripts/module-common.lds @@ -5,4 +5,15 @@ */ SECTIONS { /DISCARD/ : { *(.discard) } + + __ksymtab : { *(SORT(___ksymtab+*)) } + __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) } + __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) } + __ksymtab_unused_gpl : { *(SORT(___ksymtab_unused_gpl+*)) } + __ksymtab_gpl_future : { *(SORT(___ksymtab_gpl_future+*)) } + __kcrctab : { *(SORT(___kcrctab+*)) } + __kcrctab_gpl : { *(SORT(___kcrctab_gpl+*)) } + __kcrctab_unused : { *(SORT(___kcrctab_unused+*)) } + __kcrctab_unused_gpl : { *(SORT(___kcrctab_unused_gpl+*)) } + __kcrctab_gpl_future : { *(SORT(___kcrctab_gpl_future+*)) } } diff --git a/scripts/setlocalversion b/scripts/setlocalversion index ef8729f48586d..c421b69729176 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -166,7 +166,6 @@ else # LOCALVERSION= is not specified if test "${LOCALVERSION+set}" != "set"; then scm=$(scm_version --short) - res="$res${scm:++}" fi fi diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index b7106f192b75d..e2e902f39ac2f 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -592,7 +592,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name, sa.aad.op = OP_SETPROCATTR; sa.aad.info = name; sa.aad.error = -EINVAL; - return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL, + return aa_audit(AUDIT_APPARMOR_DENIED, + __aa_current_profile(), GFP_KERNEL, &sa, NULL); } } else if (strcmp(name, "exec") == 0) { diff --git a/security/commoncap.c b/security/commoncap.c index d30c4684654ee..356a415dd2b8d 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -103,7 +103,7 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, * Determine whether the current process may set the system clock and timezone * information, returning 0 if permission granted, -ve if denied. */ -int cap_settime(struct timespec *ts, struct timezone *tz) +int cap_settime(const struct timespec *ts, const struct timezone *tz) { if (!capable(CAP_SYS_TIME)) return -EPERM; diff --git a/security/security.c b/security/security.c index 7b7308ace8c5b..bb33ecadcf958 100644 --- a/security/security.c +++ b/security/security.c @@ -201,7 +201,7 @@ int security_syslog(int type) return security_ops->syslog(type); } -int security_settime(struct timespec *ts, struct timezone *tz) +int security_settime(const struct timespec *ts, const struct timezone *tz) { return security_ops->settime(ts, tz); } diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 57363562f0f88..f96f09cdd0366 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -497,7 +497,7 @@ static int policydb_index(struct policydb *p) goto out; rc = flex_array_prealloc(p->type_val_to_struct_array, 0, - p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO); + p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); if (rc) goto out; @@ -514,7 +514,7 @@ static int policydb_index(struct policydb *p) goto out; rc = flex_array_prealloc(p->sym_val_to_name[i], - 0, p->symtab[i].nprim - 1, + 0, p->symtab[i].nprim, GFP_KERNEL | __GFP_ZERO); if (rc) goto out; @@ -2286,7 +2286,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; /* preallocate so we don't have to worry about the put ever failing */ - rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1, + rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); if (rc) goto bad; diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 9d32f182301ee..cb09f1fce9109 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -927,7 +927,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct path *path, const int flag) { const u8 acc_mode = ACC_MODE(flag); - int error = -ENOMEM; + int error = 0; struct tomoyo_path_info buf; struct tomoyo_request_info r; int idx; @@ -938,9 +938,6 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, buf.name = NULL; r.mode = TOMOYO_CONFIG_DISABLED; idx = tomoyo_read_lock(); - if (!tomoyo_get_realpath(&buf, path)) - goto out; - error = 0; /* * If the filename is specified by "deny_rewrite" keyword, * we need to check "allow_rewrite" permission when the filename is not diff --git a/sound/core/init.c b/sound/core/init.c index 3e65da21a08c0..a0080aa45ae96 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -848,6 +848,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) return -ENOMEM; mfile->file = file; mfile->disconnected_f_op = NULL; + INIT_LIST_HEAD(&mfile->shutdown_list); spin_lock(&card->files_lock); if (card->shutdown) { spin_unlock(&card->files_lock); @@ -883,6 +884,9 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) list_for_each_entry(mfile, &card->files_list, list) { if (mfile->file == file) { list_del(&mfile->list); + spin_lock(&shutdown_lock); + list_del(&mfile->shutdown_list); + spin_unlock(&shutdown_lock); if (mfile->disconnected_f_op) fops_put(mfile->disconnected_f_op); found = mfile; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a82e3756a72d8..64449cb8f8737 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -375,6 +375,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, } if (runtime->no_period_wakeup) { + snd_pcm_sframes_t xrun_threshold; /* * Without regular period interrupts, we have to check * the elapsed time to detect xruns. @@ -383,7 +384,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) goto no_delta_check; hdelta = jdelta - delta * HZ / runtime->rate; - while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) { + xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; + while (hdelta > xrun_threshold) { delta += runtime->buffer_size; hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 4be45e7be8ad0..6848dd9c70a91 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3201,15 +3201,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); #endif /* SNDRV_PCM_INFO_MMAP */ -/* mmap callback with pgprot_noncached */ -int snd_pcm_lib_mmap_noncached(struct snd_pcm_substream *substream, - struct vm_area_struct *area) -{ - area->vm_page_prot = pgprot_noncached(area->vm_page_prot); - return snd_pcm_default_mmap(substream, area); -} -EXPORT_SYMBOL(snd_pcm_lib_mmap_noncached); - /* * mmap DMA buffer */ diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index 12b44b0b67771..a0da7755fceaa 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -482,8 +482,9 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) cable->streams[SNDRV_PCM_STREAM_CAPTURE]; unsigned long delta_play = 0, delta_capt = 0; unsigned int running; + unsigned long flags; - spin_lock(&cable->lock); + spin_lock_irqsave(&cable->lock, flags); running = cable->running ^ cable->pause; if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { delta_play = jiffies - dpcm_play->last_jiffies; @@ -495,10 +496,8 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) dpcm_capt->last_jiffies += delta_capt; } - if (delta_play == 0 && delta_capt == 0) { - spin_unlock(&cable->lock); - return running; - } + if (delta_play == 0 && delta_capt == 0) + goto unlock; if (delta_play > delta_capt) { loopback_bytepos_update(dpcm_play, delta_play - delta_capt, @@ -510,14 +509,14 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable) delta_capt = delta_play; } - if (delta_play == 0 && delta_capt == 0) { - spin_unlock(&cable->lock); - return running; - } + if (delta_play == 0 && delta_capt == 0) + goto unlock; + /* note delta_capt == delta_play at this moment */ loopback_bytepos_update(dpcm_capt, delta_capt, BYTEPOS_UPDATE_COPY); loopback_bytepos_update(dpcm_play, delta_play, BYTEPOS_UPDATE_POSONLY); - spin_unlock(&cable->lock); + unlock: + spin_unlock_irqrestore(&cable->lock, flags); return running; } diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h index b7617bee6388f..0199a317c5a9a 100644 --- a/sound/oss/dev_table.h +++ b/sound/oss/dev_table.h @@ -271,7 +271,7 @@ struct synth_operations void (*reset) (int dev); void (*hw_control) (int dev, unsigned char *event); int (*load_patch) (int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag); + int count, int pmgr_flag); void (*aftertouch) (int dev, int voice, int pressure); void (*controller) (int dev, int voice, int ctrl_num, int value); void (*panning) (int dev, int voice, int value); diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c index 3c09374ea5bf1..2292c230d7e6f 100644 --- a/sound/oss/midi_synth.c +++ b/sound/oss/midi_synth.c @@ -476,7 +476,7 @@ EXPORT_SYMBOL(midi_synth_hw_control); int midi_synth_load_patch(int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag) + int count, int pmgr_flag) { int orig_dev = synth_devs[dev]->midi_dev; @@ -491,33 +491,29 @@ midi_synth_load_patch(int dev, int format, const char __user *addr, if (!prefix_cmd(orig_dev, 0xf0)) return 0; + /* Invalid patch format */ if (format != SYSEX_PATCH) - { -/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/ return -EINVAL; - } + + /* Patch header too short */ if (count < hdr_size) - { -/* printk("MIDI Error: Patch header too short\n");*/ return -EINVAL; - } + count -= hdr_size; /* - * Copy the header from user space but ignore the first bytes which have - * been transferred already. + * Copy the header from user space */ - if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs)) + if (copy_from_user(&sysex, addr, hdr_size)) return -EFAULT; - - if (count < sysex.len) - { -/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/ + + /* Sysex record too short */ + if ((unsigned)count < (unsigned)sysex.len) sysex.len = count; - } - left = sysex.len; - src_offs = 0; + + left = sysex.len; + src_offs = 0; for (i = 0; i < left && !signal_pending(current); i++) { diff --git a/sound/oss/midi_synth.h b/sound/oss/midi_synth.h index 6bc9d00bc77c4..b64ddd6c4abc0 100644 --- a/sound/oss/midi_synth.h +++ b/sound/oss/midi_synth.h @@ -8,7 +8,7 @@ int midi_synth_open (int dev, int mode); void midi_synth_close (int dev); void midi_synth_hw_control (int dev, unsigned char *event); int midi_synth_load_patch (int dev, int format, const char __user * addr, - int offs, int count, int pmgr_flag); + int count, int pmgr_flag); void midi_synth_panning (int dev, int channel, int pressure); void midi_synth_aftertouch (int dev, int channel, int pressure); void midi_synth_controller (int dev, int channel, int ctrl_num, int value); diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c index 938c48c43585e..407cd677950bc 100644 --- a/sound/oss/opl3.c +++ b/sound/oss/opl3.c @@ -820,7 +820,7 @@ static void opl3_hw_control(int dev, unsigned char *event) } static int opl3_load_patch(int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag) + int count, int pmgr_flag) { struct sbi_instrument ins; @@ -830,11 +830,7 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, return -EINVAL; } - /* - * What the fuck is going on here? We leave junk in the beginning - * of ins and then check the field pretty close to that beginning? - */ - if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs)) + if (copy_from_user(&ins, addr, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) @@ -849,6 +845,10 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, static void opl3_panning(int dev, int voice, int value) { + + if (voice < 0 || voice >= devc->nr_voice) + return; + devc->voc[voice].panning = value; } @@ -1066,8 +1066,15 @@ static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info static void opl3_setup_voice(int dev, int voice, int chn) { - struct channel_info *info = - &synth_devs[dev]->chn_info[chn]; + struct channel_info *info; + + if (voice < 0 || voice >= devc->nr_voice) + return; + + if (chn < 0 || chn > 15) + return; + + info = &synth_devs[dev]->chn_info[chn]; opl3_set_instr(dev, voice, info->pgm_num); diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index 5ea1098ac427a..30bcfe470f831 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -241,7 +241,7 @@ int sequencer_write(int dev, struct file *file, const char __user *buf, int coun return -ENXIO; fmt = (*(short *) &event_rec[0]) & 0xffff; - err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0); + err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); if (err < 0) return err; diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 22dbd91811a4e..448dd01943f50 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -155,6 +155,11 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) goto out; } + if (hm->h.adapter_index >= HPI_MAX_ADAPTERS) { + err = -EINVAL; + goto out; + } + pa = &adapters[hm->h.adapter_index]; hr->h.size = 0; if (hm->h.object == HPI_OBJ_SUBSYSTEM) { diff --git a/sound/pci/au88x0/au88x0.h b/sound/pci/au88x0/au88x0.h index cf46bba563cf9..ecb8f4daf408f 100644 --- a/sound/pci/au88x0/au88x0.h +++ b/sound/pci/au88x0/au88x0.h @@ -211,7 +211,7 @@ static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma); //static void vortex_adbdma_stopfifo(vortex_t *vortex, int adbdma); static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma); static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma); -static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma); +static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma); static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma); #ifndef CHIP_AU8810 @@ -219,7 +219,7 @@ static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma); static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma); static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma); static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma); -static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma); +static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma); #endif /* global stuff. */ diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 16c0bdfbb1648..489150380eac0 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c @@ -1249,7 +1249,7 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { } } -static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) +static inline int vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int temp, page, delta; @@ -1506,7 +1506,7 @@ static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma) POS_SHIFT) & POS_MASK); } #endif -static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma) +static inline int vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int temp; diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 1bff80cde0a2f..b9321544c31c4 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) mutex_lock(&atc->atc_mutex); dao->ops->get_spos(dao, &status); if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { - status &= ((~IEC958_AES3_CON_FS) << 24); + status &= ~(IEC958_AES3_CON_FS << 24); status |= (iec958_con_fs << 24); dao->ops->set_spos(dao, status); dao->ops->commit_write(dao); diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index af56eb949bded..47d9ea97de02e 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_left_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscl.ops->master(&daio->rscl); @@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_right_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscr.ops->master(&daio->rscr); diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c index 15c1e7271ea88..c3519ff42fbbf 100644 --- a/sound/pci/ctxfi/ctmixer.c +++ b/sound/pci/ctxfi/ctmixer.c @@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, return 0; } -static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; - - ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; - ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; - ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; - ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; - - return 0; -} - static int ct_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, unsigned int status; atc->spdif_out_get_status(atc, &status); + + if (status == 0) + status = SNDRV_PCM_DEFAULT_CON_SPDIF; + ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; @@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .count = 1, .info = ct_spdif_info, - .get = ct_spdif_default_get, + .get = ct_spdif_get, .put = ct_spdif_put, .private_value = MIXER_IEC958_DEFAULT }; diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c index 537cfba829a58..863eafea691f2 100644 --- a/sound/pci/ens1370.c +++ b/sound/pci/ens1370.c @@ -229,6 +229,7 @@ MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force)."); #define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */ #define ES_1371_CODEC_RDY (1<<31) /* codec ready */ #define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */ +#define EV_1938_CODEC_MAGIC (1<<26) #define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */ #define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0)) #define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD) @@ -603,12 +604,18 @@ static void snd_es1370_codec_write(struct snd_ak4531 *ak4531, #ifdef CHIP1371 +static inline bool is_ev1938(struct ensoniq *ensoniq) +{ + return ensoniq->pci->device == 0x8938; +} + static void snd_es1371_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct ensoniq *ensoniq = ac97->private_data; - unsigned int t, x; + unsigned int t, x, flag; + flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { @@ -630,7 +637,8 @@ static void snd_es1371_codec_write(struct snd_ac97 *ac97, 0x00010000) break; } - outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC)); + outl(ES_1371_CODEC_WRITE(reg, val) | flag, + ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); @@ -647,8 +655,9 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct ensoniq *ensoniq = ac97->private_data; - unsigned int t, x, fail = 0; + unsigned int t, x, flag, fail = 0; + flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; __again: mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { @@ -671,7 +680,8 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, 0x00010000) break; } - outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC)); + outl(ES_1371_CODEC_READS(reg) | flag, + ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); @@ -683,6 +693,11 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, /* now wait for the stinkin' data (RDY) */ for (t = 0; t < POLL_COUNT; t++) { if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) { + if (is_ev1938(ensoniq)) { + for (t = 0; t < 100; t++) + inl(ES_REG(ensoniq, CONTROL)); + x = inl(ES_REG(ensoniq, 1371_CODEC)); + } mutex_unlock(&ensoniq->src_mutex); return ES_1371_CODEC_READ(x); } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fcedad9a5feff..1de9f2e9998b8 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2346,9 +2346,16 @@ static int __devinit check_position_fix(struct azx *chip, int fix) /* Check VIA/ATI HD Audio Controller exist */ switch (chip->driver_type) { case AZX_DRIVER_VIA: - case AZX_DRIVER_ATI: /* Use link position directly, avoid any transfer problem. */ return POS_FIX_VIACOMBO; + case AZX_DRIVER_ATI: + /* ATI chipsets don't work well with position-buffer */ + return POS_FIX_LPIB; + case AZX_DRIVER_GENERIC: + /* AMD chipsets also don't work with position-buffer */ + if (chip->pci->vendor == PCI_VENDOR_ID_AMD) + return POS_FIX_LPIB; + break; } return POS_FIX_AUTO; diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index 8dabab798689b..f8363ae869e86 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -3167,6 +3167,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec) for (i = 0; i < cfg->num_inputs; i++) { hda_nid_t nid = cfg->inputs[i].pin; + int type = cfg->inputs[i].type; switch (nid) { case 0x15: /* port-C */ snd_hda_codec_write(codec, 0x33, 0, AC_VERB_SET_CONNECT_SEL, 0x0); @@ -3176,7 +3177,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec) break; } snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, - i == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN); + type == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN); if (nid != AD1988_PIN_CD_NID) snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); @@ -4352,6 +4353,84 @@ static int ad1984a_thinkpad_init(struct hda_codec *codec) return 0; } +/* + * Precision R5500 + * 0x12 - HP/line-out + * 0x13 - speaker (mono) + * 0x15 - mic-in + */ + +static struct hda_verb ad1984a_precision_verbs[] = { + /* Unmute main output path */ + {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */ + {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x1f}, /* 0dB */ + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5) + 0x17}, /* 0dB */ + /* Analog mixer; mute as default */ + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, + {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, + /* Select mic as input */ + {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1}, + {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x27}, /* 0dB */ + /* Configure as mic */ + {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, + {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */ + /* HP unmute */ + {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, + /* turn on EAPD */ + {0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x02}, + /* unsolicited event for pin-sense */ + {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT}, + { } /* end */ +}; + +static struct snd_kcontrol_new ad1984a_precision_mixers[] = { + HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT), + HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT), + HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT), + HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Speaker Playback Volume", 0x13, 0x0, HDA_OUTPUT), + HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), + HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), + { } /* end */ +}; + + +/* mute internal speaker if HP is plugged */ +static void ad1984a_precision_automute(struct hda_codec *codec) +{ + unsigned int present; + + present = snd_hda_jack_detect(codec, 0x12); + snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0, + HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); +} + + +/* unsolicited event for HP jack sensing */ +static void ad1984a_precision_unsol_event(struct hda_codec *codec, + unsigned int res) +{ + if ((res >> 26) != AD1884A_HP_EVENT) + return; + ad1984a_precision_automute(codec); +} + +/* initialize jack-sensing, too */ +static int ad1984a_precision_init(struct hda_codec *codec) +{ + ad198x_init(codec); + ad1984a_precision_automute(codec); + return 0; +} + + /* * HP Touchsmart * port-A (0x11) - front hp-out @@ -4481,6 +4560,7 @@ enum { AD1884A_MOBILE, AD1884A_THINKPAD, AD1984A_TOUCHSMART, + AD1984A_PRECISION, AD1884A_MODELS }; @@ -4490,9 +4570,11 @@ static const char * const ad1884a_models[AD1884A_MODELS] = { [AD1884A_MOBILE] = "mobile", [AD1884A_THINKPAD] = "thinkpad", [AD1984A_TOUCHSMART] = "touchsmart", + [AD1984A_PRECISION] = "precision", }; static struct snd_pci_quirk ad1884a_cfg_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x04ac, "Precision R5500", AD1984A_PRECISION), SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE), SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP), SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE), @@ -4586,6 +4668,14 @@ static int patch_ad1884a(struct hda_codec *codec) codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event; codec->patch_ops.init = ad1984a_thinkpad_init; break; + case AD1984A_PRECISION: + spec->mixers[0] = ad1984a_precision_mixers; + spec->init_verbs[spec->num_init_verbs++] = + ad1984a_precision_verbs; + spec->multiout.dig_out_nid = 0; + codec->patch_ops.unsol_event = ad1984a_precision_unsol_event; + codec->patch_ops.init = ad1984a_precision_init; + break; case AD1984A_TOUCHSMART: spec->mixers[0] = ad1984a_touchsmart_mixers; spec->init_verbs[0] = ad1984a_touchsmart_verbs; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4d5004e693f03..9fbfd3b0ab59d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3130,6 +3130,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ {} diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index ec0fa2dd0a279..520f94a411658 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1276,6 +1276,39 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo, stream_tag, format, substream); } +static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec, + int channels) +{ + unsigned int chanmask; + int chan = channels ? (channels - 1) : 1; + + switch (channels) { + default: + case 0: + case 2: + chanmask = 0x00; + break; + case 4: + chanmask = 0x08; + break; + case 6: + chanmask = 0x0b; + break; + case 8: + chanmask = 0x13; + break; + } + + /* Set the audio infoframe channel allocation and checksum fields. The + * channel count is computed implicitly by the hardware. */ + snd_hda_codec_write(codec, 0x1, 0, + Nv_VERB_SET_Channel_Allocation, chanmask); + + snd_hda_codec_write(codec, 0x1, 0, + Nv_VERB_SET_Info_Frame_Checksum, + (0x71 - chan - chanmask)); +} + static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) @@ -1294,6 +1327,10 @@ static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, AC_VERB_SET_STREAM_FORMAT, 0); } + /* The audio hardware sends a channel count of 0x7 (8ch) when all the + * streams are disabled. */ + nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); + return snd_hda_multi_out_dig_close(codec, &spec->multiout); } @@ -1304,37 +1341,16 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream) { int chs; - unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id; + unsigned int dataDCC1, dataDCC2, channel_id; int i; mutex_lock(&codec->spdif_mutex); chs = substream->runtime->channels; - chan = chs ? (chs - 1) : 1; - switch (chs) { - default: - case 0: - case 2: - chanmask = 0x00; - break; - case 4: - chanmask = 0x08; - break; - case 6: - chanmask = 0x0b; - break; - case 8: - chanmask = 0x13; - break; - } dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT; dataDCC2 = 0x2; - /* set the Audio InforFrame Channel Allocation */ - snd_hda_codec_write(codec, 0x1, 0, - Nv_VERB_SET_Channel_Allocation, chanmask); - /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */ if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) snd_hda_codec_write(codec, @@ -1409,10 +1425,7 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, } } - /* set the Audio Info Frame Checksum */ - snd_hda_codec_write(codec, 0x1, 0, - Nv_VERB_SET_Info_Frame_Checksum, - (0x71 - chan - chanmask)); + nvhdmi_8ch_7x_set_info_frame_parameters(codec, chs); mutex_unlock(&codec->spdif_mutex); return 0; @@ -1508,6 +1521,11 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec) spec->multiout.max_channels = 8; spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x; codec->patch_ops = nvhdmi_patch_ops_8ch_7x; + + /* Initialize the audio infoframe channel mask and checksum to something + * valid */ + nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); + return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4261bb8eec1d5..24a3acb63f691 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -394,6 +394,7 @@ struct alc_spec { /* other flags */ unsigned int no_analog :1; /* digital I/O only */ unsigned int dual_adc_switch:1; /* switch ADCs (for ALC275) */ + unsigned int single_input_src:1; int init_amp; int codec_variant; /* flag for other variants */ @@ -1359,7 +1360,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) case 0x10ec0883: case 0x10ec0885: case 0x10ec0887: - case 0x10ec0889: + /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ alc889_coef_init(codec); break; case 0x10ec0888: @@ -1773,11 +1774,11 @@ static void alc_apply_fixup(struct hda_codec *codec, int action) codec->chip_name, fix->type); break; } - if (!fix[id].chained) + if (!fix->chained) break; if (++depth > 10) break; - id = fix[id].chain_id; + id = fix->chain_id; } } @@ -3919,6 +3920,8 @@ static struct hda_amp_list alc880_lg_loopbacks[] = { * Common callbacks */ +static void alc_init_special_input_src(struct hda_codec *codec); + static int alc_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -3929,6 +3932,7 @@ static int alc_init(struct hda_codec *codec) for (i = 0; i < spec->num_init_verbs; i++) snd_hda_sequence_write(codec, spec->init_verbs[i]); + alc_init_special_input_src(codec); if (spec->init_hook) spec->init_hook(codec); @@ -5151,7 +5155,9 @@ static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg, switch (cfg->line_out_type) { case AUTO_PIN_SPEAKER_OUT: - return "Speaker"; + if (cfg->line_outs == 1) + return "Speaker"; + break; case AUTO_PIN_HP_OUT: return "Headphone"; default: @@ -5205,16 +5211,19 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; + index = 0; + } err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT)); if (err < 0) return err; err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid, 3, 2, HDA_INPUT)); if (err < 0) @@ -5585,6 +5594,7 @@ static void fixup_single_adc(struct hda_codec *codec) spec->capsrc_nids += i; spec->adc_nids += i; spec->num_adc_nids = 1; + spec->single_input_src = 1; } } @@ -5596,6 +5606,16 @@ static void fixup_dual_adc_switch(struct hda_codec *codec) init_capsrc_for_pin(codec, spec->int_mic.pin); } +/* initialize some special cases for input sources */ +static void alc_init_special_input_src(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + if (spec->dual_adc_switch) + fixup_dual_adc_switch(codec); + else if (spec->single_input_src) + init_capsrc_for_pin(codec, spec->autocfg.inputs[0].pin); +} + static void set_capture_mixer(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -5611,7 +5631,7 @@ static void set_capture_mixer(struct hda_codec *codec) int mux = 0; int num_adcs = spec->num_adc_nids; if (spec->dual_adc_switch) - fixup_dual_adc_switch(codec); + num_adcs = 1; else if (spec->auto_mic) fixup_automic_adc(codec); else if (spec->input_mux) { @@ -5620,8 +5640,6 @@ static void set_capture_mixer(struct hda_codec *codec) else if (spec->input_mux->num_items == 1) fixup_single_adc(codec); } - if (spec->dual_adc_switch) - num_adcs = 1; spec->cap_mixer = caps[mux][num_adcs - 1]; } } @@ -10748,6 +10766,7 @@ static struct alc_config_preset alc882_presets[] = { */ enum { PINFIX_ABIT_AW9D_MAX, + PINFIX_LENOVO_Y530, PINFIX_PB_M5210, PINFIX_ACER_ASPIRE_7736, }; @@ -10762,6 +10781,14 @@ static const struct alc_fixup alc882_fixups[] = { { } } }, + [PINFIX_LENOVO_Y530] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x15, 0x99130112 }, /* rear int speakers */ + { 0x16, 0x99130111 }, /* subwoofer */ + { } + } + }, [PINFIX_PB_M5210] = { .type = ALC_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -10777,6 +10804,7 @@ static const struct alc_fixup alc882_fixups[] = { static struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", PINFIX_PB_M5210), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736), {} @@ -10829,23 +10857,28 @@ static void alc882_auto_init_hp_out(struct hda_codec *codec) hda_nid_t pin, dac; int i; - for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { - pin = spec->autocfg.hp_pins[i]; - if (!pin) - break; - dac = spec->multiout.hp_nid; - if (!dac) - dac = spec->multiout.dac_nids[0]; /* to front */ - alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); + if (spec->autocfg.line_out_type != AUTO_PIN_HP_OUT) { + for (i = 0; i < ARRAY_SIZE(spec->autocfg.hp_pins); i++) { + pin = spec->autocfg.hp_pins[i]; + if (!pin) + break; + dac = spec->multiout.hp_nid; + if (!dac) + dac = spec->multiout.dac_nids[0]; /* to front */ + alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, dac); + } } - for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { - pin = spec->autocfg.speaker_pins[i]; - if (!pin) - break; - dac = spec->multiout.extra_out_nid[0]; - if (!dac) - dac = spec->multiout.dac_nids[0]; /* to front */ - alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); + + if (spec->autocfg.line_out_type != AUTO_PIN_SPEAKER_OUT) { + for (i = 0; i < ARRAY_SIZE(spec->autocfg.speaker_pins); i++) { + pin = spec->autocfg.speaker_pins[i]; + if (!pin) + break; + dac = spec->multiout.extra_out_nid[0]; + if (!dac) + dac = spec->multiout.dac_nids[0]; /* to front */ + alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, dac); + } } } @@ -14158,7 +14191,7 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { }; static hda_nid_t alc269_adc_candidates[] = { - 0x08, 0x09, 0x07, + 0x08, 0x09, 0x07, 0x11, }; #define alc269_modes alc260_modes @@ -14904,6 +14937,23 @@ static void alc269_fixup_hweq(struct hda_codec *codec, alc_write_coef_idx(codec, 0x1e, coef | 0x80); } +static void alc271_fixup_dmic(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + static struct hda_verb verbs[] = { + {0x20, AC_VERB_SET_COEF_INDEX, 0x0d}, + {0x20, AC_VERB_SET_PROC_COEF, 0x4000}, + {} + }; + unsigned int cfg; + + if (strcmp(codec->chip_name, "ALC271X")) + return; + cfg = snd_hda_codec_get_pincfg(codec, 0x12); + if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED) + snd_hda_sequence_write(codec, verbs); +} + enum { ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, @@ -14912,6 +14962,7 @@ enum { ALC269_FIXUP_ASUS_G73JW, ALC269_FIXUP_LENOVO_EAPD, ALC275_FIXUP_SONY_HWEQ, + ALC271_FIXUP_DMIC, }; static const struct alc_fixup alc269_fixups[] = { @@ -14965,7 +15016,11 @@ static const struct alc_fixup alc269_fixups[] = { .v.func = alc269_fixup_hweq, .chained = true, .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2 - } + }, + [ALC271_FIXUP_DMIC] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc271_fixup_dmic, + }, }; static struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -14974,6 +15029,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), + SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), @@ -16052,9 +16108,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; - err = __alc861_create_out_sw(codec, name, nid, i, 3); + index = 0; + } + err = __alc861_create_out_sw(codec, name, nid, index, 3); if (err < 0) return err; } @@ -17205,16 +17264,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; + index = 0; + } err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, HDA_OUTPUT)); if (err < 0) return err; err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, HDA_INPUT)); if (err < 0) @@ -19263,12 +19325,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; - err = __alc662_add_vol_ctl(spec, name, nid, i, 3); + index = 0; + } + err = __alc662_add_vol_ctl(spec, name, nid, index, 3); if (err < 0) return err; - err = __alc662_add_sw_ctl(spec, name, mix, i, 3); + err = __alc662_add_sw_ctl(spec, name, mix, index, 3); if (err < 0) return err; } @@ -19484,6 +19549,7 @@ enum { ALC662_FIXUP_IDEAPAD, ALC272_FIXUP_MARIO, ALC662_FIXUP_CZC_P10T, + ALC662_FIXUP_SKU_IGNORE, }; static const struct alc_fixup alc662_fixups[] = { @@ -19512,10 +19578,15 @@ static const struct alc_fixup alc662_fixups[] = { {} } }, + [ALC662_FIXUP_SKU_IGNORE] = { + .type = ALC_FIXUP_SKU, + .v.sku = ALC_FIXUP_SKU_IGNORE, + }, }; static struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index bd7b123f64407..295a96a4df9a7 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -94,6 +94,7 @@ enum { STAC_92HD83XXX_REF, STAC_92HD83XXX_PWR_REF, STAC_DELL_S14, + STAC_DELL_E5520M, STAC_92HD83XXX_HP, STAC_HP_DV7_4000, STAC_92HD83XXX_MODELS @@ -757,7 +758,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); const struct hda_input_mux *imux = spec->input_mux; - unsigned int idx, prev_idx; + unsigned int idx, prev_idx, didx; idx = ucontrol->value.enumerated.item[0]; if (idx >= imux->num_items) @@ -769,7 +770,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[idx].index); - if (prev_idx >= spec->num_analog_muxes) { + if (prev_idx >= spec->num_analog_muxes && + spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { imux = spec->dinput_mux; /* 0 = analog */ snd_hda_codec_write_cache(codec, @@ -779,9 +781,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e } } else { imux = spec->dinput_mux; + /* first dimux item is hardcoded to select analog imux, + * so lets skip it + */ + didx = idx - spec->num_analog_muxes + 1; snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, - imux->items[idx - 1].index); + imux->items[didx].index); } spec->cur_mux[adc_idx] = idx; return 1; @@ -1628,7 +1634,7 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, "Dell Studio XPS 1645", STAC_DELL_M6_BOTH), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, - "Dell Studio 1558", STAC_DELL_M6_BOTH), + "Dell Studio 1558", STAC_DELL_M6_DMIC), {} /* terminator */ }; @@ -1652,6 +1658,13 @@ static unsigned int dell_s14_pin_configs[10] = { 0x40f000f0, 0x40f000f0, }; +/* Switch int mic from 0x20 to 0x11 */ +static unsigned int dell_e5520m_pin_configs[10] = { + 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110, + 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130, + 0x400000f0, 0x40f000f0, +}; + static unsigned int hp_dv7_4000_pin_configs[10] = { 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, @@ -1662,6 +1675,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, [STAC_DELL_S14] = dell_s14_pin_configs, + [STAC_DELL_E5520M] = dell_e5520m_pin_configs, [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, }; @@ -1670,6 +1684,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = "ref", [STAC_92HD83XXX_PWR_REF] = "mic-ref", [STAC_DELL_S14] = "dell-s14", + [STAC_DELL_E5520M] = "dell-e5520m", [STAC_92HD83XXX_HP] = "hp", [STAC_HP_DV7_4000] = "hp-dv7-4000", }; @@ -1682,6 +1697,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, "unknown Dell", STAC_DELL_S14), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a, + "Dell E5520", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b, + "Dell E5420", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb, + "Dell E5420m", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec, + "Dell E5520m", STAC_DELL_E5520M), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, "HP", STAC_92HD83XXX_HP), {} /* terminator */ diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 63b0054200a87..acc45798eeb43 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -159,6 +159,7 @@ struct via_spec { #endif }; +static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec); static struct via_spec * via_new_spec(struct hda_codec *codec) { struct via_spec *spec; @@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec) codec->spec = spec; spec->codec = codec; + spec->codec_type = get_codec_type(codec); + /* VT1708BCE & VT1708S are almost same */ + if (spec->codec_type == VT1708BCE) + spec->codec_type = VT1708S; return spec; } @@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct via_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); + int ret; if (!spec->mux_nids[adc_idx]) return -EINVAL; @@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); - /* update jack power state */ - set_jack_power_state(codec); - return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, + ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, spec->mux_nids[adc_idx], &spec->cur_mux[adc_idx]); + /* update jack power state */ + set_jack_power_state(codec); + + return ret; } static int via_independent_hp_info(struct snd_kcontrol *kcontrol, @@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, /* Get Independent Mode index of headphone pin widget */ spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel ? 1 : 0; - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel); + if (spec->codec_type == VT1718S) + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); + else + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CONNECT_SEL, pinsel); + if (spec->codec_type == VT1812) + snd_hda_codec_write(codec, 0x35, 0, + AC_VERB_SET_CONNECT_SEL, pinsel); if (spec->multiout.hp_nid && spec->multiout.hp_nid != spec->multiout.dac_nids[HDA_FRONT]) snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid, @@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, activate_ctl(codec, "Headphone Playback Switch", spec->hp_independent_mode); } + /* update jack power state */ + set_jack_power_state(codec); return 0; } @@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec) break; } - nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); - if (nums <= 1) - return 0; + if (spec->codec_type != VT1708) { + nums = snd_hda_get_connections(codec, nid, + conn, HDA_MAX_CONNECTIONS); + if (nums <= 1) + return 0; + } knew = via_clone_control(spec, &via_hp_mixer[0]); if (knew == NULL) @@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute) start_idx = 2; end_idx = 4; break; + case VT1718S: + nid_mixer = 0x21; + start_idx = 1; + end_idx = 3; + break; default: return; } @@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec) for (i = 0; i < spec->num_iverbs; i++) snd_hda_sequence_write(codec, spec->init_verbs[i]); - spec->codec_type = get_codec_type(codec); - if (spec->codec_type == VT1708BCE) - spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost - same */ /* Lydia Add for EAPD enable */ if (!spec->dig_in_nid) { /* No Digital In connection */ if (spec->dig_in_pin) { @@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec, else type_idx = 0; label = hda_get_autocfg_input_label(codec, cfg, i); - err = via_new_analog_input(spec, label, type_idx, idx, cap_nid); + if (spec->codec_type == VT1708S || + spec->codec_type == VT1702 || + spec->codec_type == VT1716S) + err = via_new_analog_input(spec, label, type_idx, + idx+1, cap_nid); + else + err = via_new_analog_input(spec, label, type_idx, + idx, cap_nid); if (err < 0) return err; snd_hda_add_imux_item(imux, label, idx, NULL); diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c index f7cd346fd7275..f5ccdbf7ebc6e 100644 --- a/sound/soc/codecs/jz4740.c +++ b/sound/soc/codecs/jz4740.c @@ -308,8 +308,6 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec) snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes, ARRAY_SIZE(jz4740_codec_dapm_routes)); - snd_soc_dapm_new_widgets(codec); - jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 2727befd158ec..977d0b64e9c43 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c @@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0), SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1), SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0), -SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0), +SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0), SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1), SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1), diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index e76847a9438b0..a7b8f301bad39 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c @@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = { static int uda134x_soc_probe(struct snd_soc_codec *codec) { struct uda134x_priv *uda134x; - struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev); + struct uda134x_platform_data *pd = codec->card->dev->platform_data; + int ret; printk(KERN_INFO "UDA134X SoC Audio Codec\n"); @@ -600,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = { .reg_cache_step = 1, .read = uda134x_read_reg_cache, .write = uda134x_write, -#ifdef POWER_OFF_ON_STANDBY .set_bias_level = uda134x_set_bias_level, -#endif }; static int __devinit uda134x_codec_probe(struct platform_device *pdev) diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 516892706063d..fce23b3ca1dc5 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -739,12 +739,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = { { "SPKL", "Input Switch", "MIXINL" }, { "SPKL", "IN1LP Switch", "IN1LP" }, - { "SPKL", "Output Switch", "Left Output Mixer" }, + { "SPKL", "Output Switch", "Left Output PGA" }, { "SPKL", NULL, "TOCLK" }, { "SPKR", "Input Switch", "MIXINR" }, { "SPKR", "IN1RP Switch", "IN1RP" }, - { "SPKR", "Output Switch", "Right Output Mixer" }, + { "SPKR", "Output Switch", "Right Output PGA" }, { "SPKR", NULL, "TOCLK" }, { "SPKL Boost", "Direct Voice Switch", "Direct Voice" }, @@ -766,8 +766,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = { { "SPKOUTRP", NULL, "SPKR Driver" }, { "SPKOUTRN", NULL, "SPKR Driver" }, - { "Left Headphone Mux", "Mixer", "Left Output Mixer" }, - { "Right Headphone Mux", "Mixer", "Right Output Mixer" }, + { "Left Headphone Mux", "Mixer", "Left Output PGA" }, + { "Right Headphone Mux", "Mixer", "Right Output PGA" }, { "Headphone PGA", NULL, "Left Headphone Mux" }, { "Headphone PGA", NULL, "Right Headphone Mux" }, @@ -786,17 +786,17 @@ static const struct snd_soc_dapm_route analogue_routes[] = { static const struct snd_soc_dapm_route lineout1_diff_routes[] = { { "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" }, { "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" }, - { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" }, + { "LINEOUT1 Mixer", "Output Switch", "Left Output PGA" }, { "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" }, { "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" }, }; static const struct snd_soc_dapm_route lineout1_se_routes[] = { - { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" }, - { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" }, + { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" }, + { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" }, - { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" }, + { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" }, { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" }, { "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" }, @@ -805,17 +805,17 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = { static const struct snd_soc_dapm_route lineout2_diff_routes[] = { { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" }, { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" }, - { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" }, + { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" }, { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" }, { "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" }, }; static const struct snd_soc_dapm_route lineout2_se_routes[] = { - { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" }, - { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" }, + { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" }, + { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" }, - { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" }, + { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" }, { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" }, { "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" }, @@ -835,17 +835,21 @@ int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec) snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, WM8993_IN2_VU, WM8993_IN2_VU); + snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_LEFT, + WM8993_SPKOUT_VU, WM8993_SPKOUT_VU); snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT, WM8993_SPKOUT_VU, WM8993_SPKOUT_VU); snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME, - WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC); + WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC, + WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC); snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME, WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC, WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC); snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME, - WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC); + WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU, + WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU); snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME, WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU, WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU); diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index 671ef8dd524cb..aab7765f401a0 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c @@ -110,12 +110,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, slave_config.direction = DMA_TO_DEVICE; slave_config.dst_addr = dma_params->dma_addr; slave_config.dst_addr_width = buswidth; - slave_config.dst_maxburst = dma_params->burstsize; + slave_config.dst_maxburst = dma_params->burstsize * buswidth; } else { slave_config.direction = DMA_FROM_DEVICE; slave_config.src_addr = dma_params->dma_addr; slave_config.src_addr_width = buswidth; - slave_config.src_maxburst = dma_params->burstsize; + slave_config.src_maxburst = dma_params->burstsize * buswidth; } ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config); @@ -303,6 +303,11 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = { static int __devinit imx_soc_platform_probe(struct platform_device *pdev) { + struct imx_ssi *ssi = platform_get_drvdata(pdev); + + ssi->dma_params_tx.burstsize = 6; + ssi->dma_params_rx.burstsize = 4; + return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2); } diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h index a4406a1348927..dc8a87530e3e9 100644 --- a/sound/soc/imx/imx-ssi.h +++ b/sound/soc/imx/imx-ssi.h @@ -234,7 +234,4 @@ void imx_pcm_free(struct snd_pcm *pcm); */ #define IMX_SSI_DMABUF_SIZE (64 * 1024) -#define DMA_RXFIFO_BURST 0x4 -#define DMA_TXFIFO_BURST 0x6 - #endif /* _IMX_SSI_H */ diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c index 784cff5f67e81..9027da466caea 100644 --- a/sound/soc/pxa/corgi.c +++ b/sound/soc/pxa/corgi.c @@ -310,7 +310,7 @@ static struct snd_soc_dai_link corgi_dai = { .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "wm8731-hifi", .platform_name = "pxa-pcm-audio", - .codec_name = "wm8731-codec-0.001b", + .codec_name = "wm8731-codec.0-001b", .init = corgi_wm8731_init, .ops = &corgi_ops, }; diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c index 3ceaef68e01de..838a0d540c5a3 100644 --- a/sound/soc/pxa/z2.c +++ b/sound/soc/pxa/z2.c @@ -147,7 +147,7 @@ static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd) snd_soc_dapm_disable_pin(dapm, "LINPUT3"); snd_soc_dapm_disable_pin(dapm, "RINPUT3"); snd_soc_dapm_disable_pin(dapm, "OUT3"); - snd_soc_dapm_disable_pin(dapm, "MONO"); + snd_soc_dapm_disable_pin(dapm, "MONO1"); /* Add z2 specific widgets */ snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets, diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c index 2c09e93dd566f..86f1dc434e9f0 100644 --- a/sound/soc/samsung/s3c24xx_uda134x.c +++ b/sound/soc/samsung/s3c24xx_uda134x.c @@ -226,7 +226,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = { static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { .name = "UDA134X", .stream_name = "UDA134X", - .codec_name = "uda134x-hifi", + .codec_name = "uda134x-codec", .codec_dai_name = "uda134x-hifi", .cpu_dai_name = "s3c24xx-iis", .ops = &s3c24xx_uda134x_ops, @@ -321,6 +321,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev) platform_set_drvdata(s3c24xx_uda134x_snd_device, &snd_soc_s3c24xx_uda134x); + platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x)); ret = platform_device_add(s3c24xx_uda134x_snd_device); if (ret) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n"); diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index d9ab3ce446acf..0c7454f8b8a98 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -55,7 +55,7 @@ int bench_sched_pipe(int argc, const char **argv, * discarding returned value of read(), write() * causes error in building environment for perf */ - int ret, wait_stat; + int __used ret, wait_stat; pid_t pid, retpid; argc = parse_options(argc, argv, options, diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 29acb894e0351..ae7225c53c29d 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -369,11 +369,6 @@ static void process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) { int ret = 0; - u64 now; - long long delta; - - now = get_nsecs(); - delta = start_time + atom->timestamp - now; switch (atom->type) { case SCHED_EVENT_RUN: @@ -562,7 +557,7 @@ static void wait_for_tasks(void) static void run_one_test(void) { - u64 T0, T1, delta, avg_delta, fluct, std_dev; + u64 T0, T1, delta, avg_delta, fluct; T0 = get_nsecs(); wait_for_tasks(); @@ -578,7 +573,6 @@ static void run_one_test(void) else fluct = delta - avg_delta; sum_fluct += fluct; - std_dev = sum_fluct / nr_runs / sqrt(nr_runs); if (!run_avg) run_avg = delta; run_avg = (run_avg*9 + delta)/10; @@ -799,7 +793,7 @@ replay_switch_event(struct trace_switch_event *switch_event, u64 timestamp, struct thread *thread __used) { - struct task_desc *prev, *next; + struct task_desc *prev, __used *next; u64 timestamp0; s64 delta; @@ -1404,7 +1398,7 @@ map_switch_event(struct trace_switch_event *switch_event, u64 timestamp, struct thread *thread __used) { - struct thread *sched_out, *sched_in; + struct thread *sched_out __used, *sched_in; int new_shortname; u64 timestamp0; s64 delta; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5a29d9cd94862..b0f6925b127d6 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -183,7 +183,6 @@ static int parse_source(struct sym_entry *syme) FILE *file; char command[PATH_MAX*2]; const char *path; - u64 len; if (!syme) return -1; @@ -212,8 +211,6 @@ static int parse_source(struct sym_entry *syme) } path = map->dso->long_name; - len = sym->end - sym->start; - sprintf(command, "objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s", BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start), @@ -1296,7 +1293,7 @@ static int __cmd_top(void) { pthread_t thread; struct perf_evsel *counter; - int i, ret; + int i, ret __used; /* * FIXME: perf_session__new should allow passing a O_MMAP, so that all this * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index 97d76562a1a09..26d4d3fd6deb2 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN @@ -23,10 +23,10 @@ if test -d ../../.git -o -f ../../.git && then VN=$(echo "$VN" | sed -e 's/-/./g'); else - eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '` - eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '` - eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '` - eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '` + eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') + eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') + eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ') + eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ') VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}" fi diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 0866bcdb5e8e7..9721e2fa9ece1 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1144,7 +1144,7 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs, { event_t ev; ssize_t size = 0, aligned_size = 0, padding; - int err = 0; + int err __used = 0; memset(&ev, 0, sizeof(ev)); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index df51560f16f7e..5214b703250e0 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1095,7 +1095,6 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, char command[PATH_MAX * 2]; FILE *file; int err = 0; - u64 len; char symfs_filename[PATH_MAX]; if (filename) { @@ -1140,8 +1139,6 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head, filename, sym->name, map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end)); - len = sym->end - sym->start; - pr_debug("annotating [%p] %30s : [%p] %30s\n", dso, dso->long_name, sym, sym->name); diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index c6d99334bdfa8..2040b85385273 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -248,8 +248,7 @@ static void python_process_event(int cpu, void *data, context = PyCObject_FromVoidPtr(scripting_context, NULL); PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); - PyTuple_SetItem(t, n++, - PyCObject_FromVoidPtr(scripting_context, NULL)); + PyTuple_SetItem(t, n++, context); if (handler) { PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b1bf490aff880..ba6d48949092f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1525,8 +1525,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) symbol_conf.symfs, self->long_name); break; case DSO__ORIG_GUEST_KMODULE: - if (map->groups && map->groups->machine) - root_dir = map->groups->machine->root_dir; + if (map->groups && machine) + root_dir = machine->root_dir; else root_dir = ""; snprintf(name, size, "%s%s%s", symbol_conf.symfs, diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 73a02223c6292..d8e622dd738aa 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused) char *next = NULL; char *addr_str; char ch; - int ret; + int ret __used; int i; line = strtok_r(file, "\n", &next); diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c index e5158369106ee..8462bffe20bc8 100644 --- a/tools/perf/util/ui/browsers/map.c +++ b/tools/perf/util/ui/browsers/map.c @@ -41,7 +41,7 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width) out_free_form: newtPopWindow(); newtFormDestroy(form); - return 0; + return err; } struct map_browser { diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 62a9caf0563c2..58a5e2b5b7e11 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -227,7 +227,7 @@ int kvm_iommu_map_guest(struct kvm *kvm) return -ENODEV; } - kvm->arch.iommu_domain = iommu_domain_alloc(); + kvm->arch.iommu_domain = iommu_domain_alloc(0); if (!kvm->arch.iommu_domain) return -ENOMEM;